diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..9ffb22d5329e4351a1ea23ccd7673d29abcd541d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02006_.png filter=lfs diff=lfs merge=lfs -text +custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02010_.png filter=lfs diff=lfs merge=lfs -text +custom_nodes/ComfyUI-sampler-lcm-alternative/SamplerLCMCycle-example.png filter=lfs diff=lfs merge=lfs -text diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/.gitignore b/custom_nodes/ComfyUI-Advanced-ControlNet/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..68bc17f9ff2104a9d7b6777058bb4c343ca72609 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/LICENSE b/custom_nodes/ComfyUI-Advanced-ControlNet/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/README.md b/custom_nodes/ComfyUI-Advanced-ControlNet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f0ef1591ce959f7d55caf97b5b7afba2bb6bc7d3 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/README.md @@ -0,0 +1,151 @@ +# ComfyUI-Advanced-ControlNet +Nodes for scheduling ControlNet strength across timesteps and batched latents, as well as applying custom weights and attention masks. The ControlNet nodes here fully support sliding context sampling, like the one used in the [ComfyUI-AnimateDiff-Evolved](https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) nodes. Currently supports ControlNets, T2IAdapters, and ControlLoRAs. Kohya Controllllite support coming soon. + +Custom weights allow replication of the "My prompt is more important" feature of Auto1111's sd-webui ControlNet extension. + +ControlNet preprocessors are available through [comfyui_controlnet_aux](https://github.com/Fannovel16/comfyui_controlnet_aux) nodes + +## Features +- Timestep and latent strength scheduling +- Attention masks +- Soft weights to replicate "My prompt is more important" feature from sd-webui ControlNet extension, and also change the scaling. +- ControlNet, T2IAdapter, and ControlLoRA support for sliding context windows. + +## Table of Contents: +- [Scheduling Explanation](#scheduling-explanation) +- [Nodes](#nodes) +- [Usage](#usage) (will fill this out soon) + + +# Scheduling Explanation + +The two core concepts for scheduling are ***Timestep Keyframes*** and ***Latent Keyframes***. + +***Timestep Keyframes*** hold the values that guide the settings for a controlnet, and begin to take effect based on their start_percent, which corresponds to the percentage of the sampling process. They can contain masks for the strengths of each latent, control_net_weights, and latent_keyframes (specific strengths for each latent), all optional. + +***Latent Keyframes*** determine the strength of the controlnet for specific latents - all they contain is the batch_index of the latent, and the strength the controlnet should apply for that latent. As a concept, latent keyframes achieve the same affect as a uniform mask with the chosen strength value. + +![advcn_image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/e6275264-6c3f-4246-a319-111ee48f4cd9) + +# Nodes + +The ControlNet nodes provided here are the ***Apply Advanced ControlNet*** and ***Load Advanced ControlNet Model*** (or diff) nodes. The vanilla ControlNet nodes are also compatible, and can be used almost interchangeably - the only difference is that **at least one of these nodes must be used** for Advanced versions of ControlNets to be used (important for sliding context sampling, like with AnimateDiff-Evolved). + +Key: +- 🟩 - required inputs +- 🟨 - optional inputs +- 🟦 - start as widgets, can be converted to inputs +- 🟥 - optional input/output, but not recommended to use unless needed +- 🟪 - output + +## Apply Advanced ControlNet +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/dc541d41-70df-4a71-b832-efa65af98f06) + +Same functionality as the vanilla Apply Advanced ControlNet (Advanced) node, except with Advanced ControlNet features added to it. Automatically converts any ControlNet from ControlNet loaders into Advanced versions. + +### Inputs +- 🟩***positive***: conditioning (positive). +- 🟩***negative***: conditioning (negative). +- 🟩***control_net***: loaded controlnet; will be converted to Advanced version automatically by this node, if it's a supported type. +- 🟩***image***: images to guide controlnets - if the loaded controlnet requires it, they must preprocessed images. If one image provided, will be used for all latents. If more images provided, will use each image separately for each latent. If not enough images to meet latent count, will repeat the images from the beginning to match vanilla ControlNet functionality. +- 🟨***mask_optional***: attention masks to apply to controlnets; basically, decides what part of the image the controlnet to apply to (and the relative strength, if the mask is not binary). Same as image input, if you provide more than one mask, each can apply to a different latent. +- 🟨***timestep_kf***: timestep keyframes to guide controlnet effect throughout sampling steps. +- 🟨***latent_kf_override***: override for latent keyframes, useful if no other features from timestep keyframes is needed. *NOTE: this latent keyframe will be applied to ALL timesteps, regardless if there are other latent keyframes attached to connected timestep keyframes.* +- 🟨***weights_override***: override for weights, useful if no other features from timestep keyframes is needed. *NOTE: this weight will be applied to ALL timesteps, regardless if there are other weights attached to connected timestep keyframes.* +- 🟦***strength***: strength of controlnet; 1.0 is full strength, 0.0 is no effect at all. +- 🟦***start_percent***: sampling step percentage at which controlnet should start to be applied - no matter what start_percent is set on timestep keyframes, they won't take effect until this start_percent is reached. +- 🟦***stop_percent***: sampling step percentage at which controlnet should stop being applied - no matter what start_percent is set on timestep keyframes, they won't take effect once this end_percent is reached. + +### Outputs +- 🟪***positive***: conditioning (positive) with applied controlnets +- 🟪***negative***: conditioning (negative) with applied controlnets + +## Load Advanced ControlNet Model +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/4a7f58a9-783d-4da4-bf82-bc9c167e4722) + +Loads a ControlNet model and converts it into an Advanced version that supports all the features in this repo. When used with **Apply Advanced ControlNet** node, there is no reason to use the timestep_keyframe input on this node - use timestep_kf on the Apply node instead. + +### Inputs +- 🟥***timestep_keyframe***: optional and likely unnecessary input to have ControlNet use selected timestep_keyframes - should not be used unless you need to. Useful if this node is not attached to **Apply Advanced ControlNet** node, but still want to use Timestep Keyframe, or to use TK_SHORTCUT outputs from ControlWeights in the same scenario. Will be overriden by the timestep_kf input on **Apply Advanced ControlNet** node, if one is provided there. +- 🟨***model***: model to plug into the diff version of the node. Some controlnets are designed for receive the model; if you don't know what this does, you probably don't want tot use the diff version of the node. + +### Outputs +- 🟪***CONTROL_NET***: loaded Advanced ControlNet + +## Timestep Keyframe +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/c6f2a86e-fc96-4f8b-b976-7c2062a6eba2) + +Scheduling node across timesteps (sampling steps) based on the set start_percent. Chaining Timestep Keyframes allows ControlNet scheduling across sampling steps (percentage-wise), through a timestep keyframe schedule. + +### Inputs +- 🟨***prev_timestep_kf***: used to chain Timestep Keyframes together to create a schedule. The order does not matter - the Timestep Keyframes sort themselves automatically by their start_percent. *Any Timestep Keyframe contained in the prev_timestep_keyframe that contains the same start_percent as the Timestep Keyframe will be overwritten.* +- 🟨***cn_weights***: weights to apply to controlnet while this Timestep Keyframe is in effect. Must be compatible with the loaded controlnet, or will throw an error explaining what weight types are compatible. If inherit_missing is True, if no control_net_weight is passed in, will attempt to reuse the last-used weights in the timestep keyframe schedule. *If Apply Advanced ControlNet node has a weight_override, the weight_override will be used during sampling instead of control_net_weight.* +- 🟨***latent_keyframe***: latent keyframes to apply to controlnet while this Timestep Keyframe is in effect. If inherit_missing is True, if no latent_keyframe is passed in, will attempt to reuse the last-used weights in the timestep keyframe schedule. *If Apply Advanced ControlNet node has a latent_kf_override, the latent_lf_override will be used during sampling instead of latent_keyframe.* +- 🟨***mask_optional***: attention masks to apply to controlnets; basically, decides what part of the image the controlnet to apply to (and the relative strength, if the mask is not binary). Same as mask_optional on the Apply Advanced ControlNet node, can apply either one maks to all latents, or individual masks for each latent. If inherit_missing is True, if no mask_optional is passed in, will attempt to reuse the last-used mask_optional in the timestep keyframe schedule. It is NOT overriden by mask_optional on the Apply Advanced ControlNet node; will be used together. +- 🟦***start_percent***: sampling step percentage at which this Timestep Keyframe qualifies to be used. Acts as the 'key' for the Timestep Keyframe in the timestep keyframe schedule. +- 🟦***strength***: strength of the controlnet; multiplies the controlnet by this value, basically, applied alongside the strength on the Apply ControlNet node. If set to 0.0 will not have any effect during the duration of this Timestep Keyframe's effect, and will increase sampling speed by not doing any work. +- 🟦***null_latent_kf_strength***: strength to assign to latents that are unaccounted for in the passed in latent_keyframes. Has no effect if no latent_keyframes are passed in, or no batch_indeces are unaccounted in the latent_keyframes for during sampling. +- 🟦***inherit_missing***: determines if should reuse values from previous Timestep Keyframes for optional values (control_net_weights, latent_keyframe, and mask_option) that are not included on this TimestepKeyframe. To inherit only specific inputs, use default inputs. +- 🟦***guarantee_usage***: when true, even if a Timestep Keyframe's start_percent ahead of this one in the schedule is closer to current sampling percentage, this Timestep Keyframe will still be used for one step before moving on to the next selected Timestep Keyframe in the following step. Whether the Timestep Keyframe is used or not, its inputs will still be accounted for inherit_missing purposes. + +### Outputs +- 🟪***TIMESTEP_KF***: the created Timestep Keyframe, that can either be linked to another or into a Timestep Keyframe input. + +## Latent Keyframe +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/7eb2cc4c-255c-4f32-b09b-699f713fada3) + +A singular Latent Keyframe, selects the strength for a specific batch_index. If batch_index is not present during sampling, will simply have no effect. Can be chained with any other Latent Keyframe-type node to create a latent keyframe schedule. + +### Inputs +- 🟨***prev_latent_kf***: used to chain Latent Keyframes together to create a schedule. *If a Latent Keyframe contained in prev_latent_keyframes have the same batch_index as this Latent Keyframe, they will take priority over this node's value.* +- 🟦***batch_index***: index of latent in batch to apply controlnet strength to. Acts as the 'key' for the Latent Keyframe in the latent keyframe schedule. +- 🟦***strength***: strength of controlnet to apply to the corresponding latent. + +### Outputs +- 🟪***LATENT_KF***: the created Latent Keyframe, that can either be linked to another or into a Latent Keyframe input. + +## Latent Keyframe Group +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/5ce3b795-f5fc-4dc3-ae30-a4c7f87e278c) + +Allows to create Latent Keyframes via individual indeces or python-style ranges. + +### Inputs +- 🟨***prev_latent_kf***: used to chain Latent Keyframes together to create a schedule. *If any Latent Keyframes contained in prev_latent_keyframes have the same batch_index as a this Latent Keyframe, they will take priority over this node's version.* +- 🟨***latent_optional***: the latents expected to be passed in for sampling; only required if you wish to use negative indeces (will be automatically converted to real values). +- 🟦***index_strengths***: string list of indeces or python-style ranges of indeces to assign strengths to. If latent_optional is passed in, can contain negative indeces or ranges that contain negative numbers, python-style. The different indeces must be comma separated. Individual latents can be specified by ```batch_index=strength```, like ```0=0.9```. Ranges can be specified by ```start_index_inclusive:end_index_exclusive=strength```, like ```0:8=strength```. Negative indeces are possible when latents_optional has an input, with a string such as ```0,-4=0.25```. +- 🟦***print_keyframes***: if True, will print the Latent Keyframes generated by this node for debugging purposes. + +### Outputs +- 🟪***LATENT_KF***: the created Latent Keyframe, that can either be linked to another or into a Latent Keyframe input. + +## Latent Keyframe Interpolation +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/7986c737-83b9-46bc-aab0-ae4c368df446) + +Allows to create Latent Keyframes with interpolated values in a range. + +### Inputs +- 🟨***prev_latent_kf***: used to chain Latent Keyframes together to create a schedule. *If any Latent Keyframes contained in prev_latent_keyframes have the same batch_index as a this Latent Keyframe, they will take priority over this node's version.* +- 🟦***batch_index_from***: starting batch_index of range, included. +- 🟦***batch_index_to***: end batch_index of range, excluded (python-style range). +- 🟦***strength_from***: starting strength of interpolation. +- 🟦***strength_to***: end strength of interpolation. +- 🟦***interpolation***: the method of interpolation. +- 🟦***print_keyframes***: if True, will print the Latent Keyframes generated by this node for debugging purposes. + +### Outputs +- 🟪***LATENT_KF***: the created Latent Keyframe, that can either be linked to another or into a Latent Keyframe input. + +## Latent Keyframe Batched Group +![image](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet/assets/7365912/6cec701f-6183-4aeb-af5c-cac76f5591b7) + +Allows to create Latent Keyframes via a list of floats, such as with Batch Value Schedule from [ComfyUI_FizzNodes](https://github.com/FizzleDorf/ComfyUI_FizzNodes) nodes. + +### Inputs +- 🟨***prev_latent_kf***: used to chain Latent Keyframes together to create a schedule. *If any Latent Keyframes contained in prev_latent_keyframes have the same batch_index as a this Latent Keyframe, they will take priority over this node's version.* +- 🟦***float_strengths***: a list of floats, that will correspond to the strength of each Latent Keyframe; the batch_index is the index of each float value in the list. +- 🟦***print_keyframes***: if True, will print the Latent Keyframes generated by this node for debugging purposes. + +### Outputs +- 🟪***LATENT_KF***: the created Latent Keyframe, that can either be linked to another or into a Latent Keyframe input. + +# There are more nodes to document and show usage - will add this soon! TODO diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/__init__.py b/custom_nodes/ComfyUI-Advanced-ControlNet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e70bf901eb061b1159b4e8b134fa4957821b0bdd --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/__init__.py @@ -0,0 +1,3 @@ +from .control.nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33dc1bd12658d1029e2223faaa7342d205a3884b Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd102a89a1848f7b454d1df4eddf6827cff8c47d Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/control.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/control.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..625851bfc189b0573dbd57d115fb70f1e0ef60a6 Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/control.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/control.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/control.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51d19085631c0e40a2c70c126384be74ccd59eb0 Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/control.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/deprecated_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/deprecated_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..409598a504bbe6b011326cd0e2e60089e6906a1e Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/deprecated_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/deprecated_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/deprecated_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e035b99bc13f7eb182aa918c1baeadf1e158088 Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/deprecated_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/latent_keyframe_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/latent_keyframe_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa3df992bef211eae23fed19d714058bdeae467d Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/latent_keyframe_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/latent_keyframe_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/latent_keyframe_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..996f8b222962b556561fe0af846c0af435f6d806 Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/latent_keyframe_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/logger.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b0390a033717b832aa937bcb32bea57163f21f2 Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/logger.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/logger.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/logger.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4efdaf3379ce7bc78c60bcddd3694dc65a31c1ed Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/logger.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe3771610ee6610d90aceb69a4bd09b4e55c676b Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf089018d9254c315615fc0c0d54216f3829af9e Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/weight_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/weight_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d6b415564252f89d122343aac6e1cd45ac9c36f Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/weight_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/weight_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/weight_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c3cbfd3082e9f29db5e420141646a8cf02ee294 Binary files /dev/null and b/custom_nodes/ComfyUI-Advanced-ControlNet/control/__pycache__/weight_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/control.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/control.py new file mode 100644 index 0000000000000000000000000000000000000000..18cbffdb914986836c4d714b60ce9b1a31a9cc54 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/control.py @@ -0,0 +1,770 @@ +from typing import Union +from torch import Tensor +import torch + +import comfy.utils +import comfy.controlnet as comfy_cn +from comfy.controlnet import ControlBase, ControlNet, ControlLora, T2IAdapter, broadcast_image_to + + +def get_properly_arranged_t2i_weights(initial_weights: list[float]): + new_weights = [] + new_weights.extend([initial_weights[0]]*3) + new_weights.extend([initial_weights[1]]*3) + new_weights.extend([initial_weights[2]]*3) + new_weights.extend([initial_weights[3]]*3) + return new_weights + + +class ControlWeightType: + DEFAULT = "default" + UNIVERSAL = "universal" + T2IADAPTER = "t2iadapter" + CONTROLNET = "controlnet" + CONTROLLORA = "controllora" + CONTROLLLLITE = "controllllite" + + +class ControlWeights: + def __init__(self, weight_type: str, base_multiplier: float=1.0, flip_weights: bool=False, weights: list[float]=None, weight_mask: Tensor=None): + self.weight_type = weight_type + self.base_multiplier = base_multiplier + self.flip_weights = flip_weights + self.weights = weights + if self.weights is not None and self.flip_weights: + self.weights.reverse() + self.weight_mask = weight_mask + + def get(self, idx: int) -> Union[float, Tensor]: + # if weights is not none, return index + if self.weights is not None: + return self.weights[idx] + return 1.0 + + @classmethod + def default(cls): + return cls(ControlWeightType.DEFAULT) + + @classmethod + def universal(cls, base_multiplier: float, flip_weights: bool=False): + return cls(ControlWeightType.UNIVERSAL, base_multiplier=base_multiplier, flip_weights=flip_weights) + + @classmethod + def universal_mask(cls, weight_mask: Tensor): + return cls(ControlWeightType.UNIVERSAL, weight_mask=weight_mask) + + @classmethod + def t2iadapter(cls, weights: list[float]=None, flip_weights: bool=False): + if weights is None: + weights = [1.0]*12 + return cls(ControlWeightType.T2IADAPTER, weights=weights,flip_weights=flip_weights) + + @classmethod + def controlnet(cls, weights: list[float]=None, flip_weights: bool=False): + if weights is None: + weights = [1.0]*13 + return cls(ControlWeightType.CONTROLNET, weights=weights, flip_weights=flip_weights) + + @classmethod + def controllora(cls, weights: list[float]=None, flip_weights: bool=False): + if weights is None: + weights = [1.0]*10 + return cls(ControlWeightType.CONTROLLORA, weights=weights, flip_weights=flip_weights) + + @classmethod + def controllllite(cls, weights: list[float]=None, flip_weights: bool=False): + if weights is None: + # TODO: make this have a real value + weights = [1.0]*200 + return cls(ControlWeightType.CONTROLLLLITE, weights=weights, flip_weights=flip_weights) + + +class StrengthInterpolation: + LINEAR = "linear" + EASE_IN = "ease-in" + EASE_OUT = "ease-out" + EASE_IN_OUT = "ease-in-out" + NONE = "none" + + +class LatentKeyframe: + def __init__(self, batch_index: int, strength: float) -> None: + self.batch_index = batch_index + self.strength = strength + + +# always maintain sorted state (by batch_index of LatentKeyframe) +class LatentKeyframeGroup: + def __init__(self) -> None: + self.keyframes: list[LatentKeyframe] = [] + + def add(self, keyframe: LatentKeyframe) -> None: + added = False + # replace existing keyframe if same batch_index + for i in range(len(self.keyframes)): + if self.keyframes[i].batch_index == keyframe.batch_index: + self.keyframes[i] = keyframe + added = True + break + if not added: + self.keyframes.append(keyframe) + self.keyframes.sort(key=lambda k: k.batch_index) + + def get_index(self, index: int) -> Union[LatentKeyframe, None]: + try: + return self.keyframes[index] + except IndexError: + return None + + def __getitem__(self, index) -> LatentKeyframe: + return self.keyframes[index] + + def is_empty(self) -> bool: + return len(self.keyframes) == 0 + + def clone(self) -> 'LatentKeyframeGroup': + cloned = LatentKeyframeGroup() + for tk in self.keyframes: + cloned.add(tk) + return cloned + + +class TimestepKeyframe: + def __init__(self, + start_percent: float = 0.0, + strength: float = 1.0, + interpolation: str = StrengthInterpolation.NONE, + control_weights: ControlWeights = None, + latent_keyframes: LatentKeyframeGroup = None, + null_latent_kf_strength: float = 0.0, + inherit_missing: bool = True, + guarantee_usage: bool = True, + mask_hint_orig: Tensor = None) -> None: + self.start_percent = start_percent + self.start_t = 999999999.9 + self.strength = strength + self.interpolation = interpolation + self.control_weights = control_weights + self.latent_keyframes = latent_keyframes + self.null_latent_kf_strength = null_latent_kf_strength + self.inherit_missing = inherit_missing + self.guarantee_usage = guarantee_usage + self.mask_hint_orig = mask_hint_orig + + def has_control_weights(self): + return self.control_weights is not None + + def has_latent_keyframes(self): + return self.latent_keyframes is not None + + def has_mask_hint(self): + return self.mask_hint_orig is not None + + + @classmethod + def default(cls) -> 'TimestepKeyframe': + return cls(0.0) + + +# always maintain sorted state (by start_percent of TimestepKeyFrame) +class TimestepKeyframeGroup: + def __init__(self) -> None: + self.keyframes: list[TimestepKeyframe] = [] + self.keyframes.append(TimestepKeyframe.default()) + + def add(self, keyframe: TimestepKeyframe) -> None: + added = False + # replace existing keyframe if same start_percent + for i in range(len(self.keyframes)): + if self.keyframes[i].start_percent == keyframe.start_percent: + self.keyframes[i] = keyframe + added = True + break + if not added: + self.keyframes.append(keyframe) + self.keyframes.sort(key=lambda k: k.start_percent) + + def get_index(self, index: int) -> Union[TimestepKeyframe, None]: + try: + return self.keyframes[index] + except IndexError: + return None + + def has_index(self, index: int) -> int: + return index >=0 and index < len(self.keyframes) + + def __getitem__(self, index) -> TimestepKeyframe: + return self.keyframes[index] + + def __len__(self) -> int: + return len(self.keyframes) + + def is_empty(self) -> bool: + return len(self.keyframes) == 0 + + def clone(self) -> 'TimestepKeyframeGroup': + cloned = TimestepKeyframeGroup() + for tk in self.keyframes: + cloned.add(tk) + return cloned + + @classmethod + def default(cls, keyframe: TimestepKeyframe) -> 'TimestepKeyframeGroup': + group = cls() + group.keyframes[0] = keyframe + return group + + +# used to inject ControlNetAdvanced and T2IAdapterAdvanced control_merge function + + +class AdvancedControlBase: + def __init__(self, base: ControlBase, timestep_keyframes: TimestepKeyframeGroup, weights_default: ControlWeights): + self.base = base + self.compatible_weights = [ControlWeightType.UNIVERSAL] + self.add_compatible_weight(weights_default.weight_type) + # mask for which parts of controlnet output to keep + self.mask_cond_hint_original = None + self.mask_cond_hint = None + self.tk_mask_cond_hint_original = None + self.tk_mask_cond_hint = None + self.weight_mask_cond_hint = None + # actual index values + self.sub_idxs = None + self.full_latent_length = 0 + self.context_length = 0 + # timesteps + self.t: Tensor = None + self.batched_number: int = None + # weights + override + self.weights: ControlWeights = None + self.weights_default: ControlWeights = weights_default + self.weights_override: ControlWeights = None + # latent keyframe + override + self.latent_keyframes: LatentKeyframeGroup = None + self.latent_keyframe_override: LatentKeyframeGroup = None + # initialize timestep_keyframes + self.set_timestep_keyframes(timestep_keyframes) + # override some functions + self.get_control = self.get_control_inject + self.control_merge = self.control_merge_inject#.__get__(self, type(self)) + self.pre_run = self.pre_run_inject + self.cleanup = self.cleanup_inject + + def add_compatible_weight(self, control_weight_type: str): + self.compatible_weights.append(control_weight_type) + + def verify_all_weights(self, throw_error=True): + # first, check if override exists - if so, only need to check the override + if self.weights_override is not None: + if self.weights_override.weight_type not in self.compatible_weights: + msg = f"Weight override is type {self.weights_override.weight_type}, but loaded {type(self).__name__}" + \ + f"only supports {self.compatible_weights} weights." + raise WeightTypeException(msg) + # otherwise, check all timestep keyframe weights + else: + for tk in self.timestep_keyframes.keyframes: + if tk.has_control_weights() and tk.control_weights.weight_type not in self.compatible_weights: + msg = f"Weight on Timestep Keyframe with start_percent={tk.start_percent} is type" + \ + f"{tk.control_weights.weight_type}, but loaded {type(self).__name__} only supports {self.compatible_weights} weights." + raise WeightTypeException(msg) + + def set_timestep_keyframes(self, timestep_keyframes: TimestepKeyframeGroup): + self.timestep_keyframes = timestep_keyframes if timestep_keyframes else TimestepKeyframeGroup() + # prepare first timestep_keyframe related stuff + self.current_timestep_keyframe = None + self.current_timestep_index = -1 + self.next_timestep_keyframe = None + self.weights = None + self.latent_keyframes = None + + def prepare_current_timestep(self, t: Tensor, batched_number: int): + self.t = t + self.batched_number = batched_number + # get current step percent + curr_t: float = t[0] + prev_index = self.current_timestep_index + # if has next index, loop through and see if need to switch + if self.timestep_keyframes.has_index(self.current_timestep_index+1): + for i in range(self.current_timestep_index+1, len(self.timestep_keyframes)): + eval_tk = self.timestep_keyframes[i] + # check if start percent is less or equal to curr_t + if eval_tk.start_t >= curr_t: + self.current_timestep_index = i + self.current_timestep_keyframe = eval_tk + # keep track of control weights, latent keyframes, and masks, + # accounting for inherit_missing + if self.current_timestep_keyframe.has_control_weights(): + self.weights = self.current_timestep_keyframe.control_weights + elif not self.current_timestep_keyframe.inherit_missing: + self.weights = self.weights_default + if self.current_timestep_keyframe.has_latent_keyframes(): + self.latent_keyframes = self.current_timestep_keyframe.latent_keyframes + elif not self.current_timestep_keyframe.inherit_missing: + self.latent_keyframes = None + if self.current_timestep_keyframe.has_mask_hint(): + self.tk_mask_cond_hint_original = self.current_timestep_keyframe.mask_hint_orig + elif not self.current_timestep_keyframe.inherit_missing: + del self.tk_mask_cond_hint_original + self.tk_mask_cond_hint_original = None + # if guarantee_usage, stop searching for other TKs + if self.current_timestep_keyframe.guarantee_usage: + break + # if eval_tk is outside of percent range, stop looking further + else: + break + + # if index changed, apply overrides + if prev_index != self.current_timestep_index: + if self.weights_override is not None: + self.weights = self.weights_override + if self.latent_keyframe_override is not None: + self.latent_keyframes = self.latent_keyframe_override + + # make sure weights and latent_keyframes are in a workable state + # Note: each AdvancedControlBase should create their own get_universal_weights class + self.prepare_weights() + + def prepare_weights(self): + if self.weights is None or self.weights.weight_type == ControlWeightType.DEFAULT: + self.weights = self.weights_default + elif self.weights.weight_type == ControlWeightType.UNIVERSAL: + # if universal and weight_mask present, no need to convert + if self.weights.weight_mask is not None: + return + self.weights = self.get_universal_weights() + + def get_universal_weights(self) -> ControlWeights: + return self.weights + + def set_cond_hint_mask(self, mask_hint): + self.mask_cond_hint_original = mask_hint + return self + + def pre_run_inject(self, model, percent_to_timestep_function): + self.base.pre_run(model, percent_to_timestep_function) + self.pre_run_advanced(model, percent_to_timestep_function) + + def pre_run_advanced(self, model, percent_to_timestep_function): + # for each timestep keyframe, calculate the start_t + for tk in self.timestep_keyframes.keyframes: + tk.start_t = percent_to_timestep_function(tk.start_percent) + # clear variables + self.cleanup_advanced() + + def get_control_inject(self, x_noisy, t, cond, batched_number): + # prepare timestep and everything related + self.prepare_current_timestep(t=t, batched_number=batched_number) + # if should not perform any actions for the controlnet, exit without doing any work + if self.strength == 0.0 or self.current_timestep_keyframe.strength == 0.0: + control_prev = None + if self.previous_controlnet is not None: + control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number) + if control_prev is not None: + return control_prev + else: + return None + # otherwise, perform normal function + return self.get_control_advanced(x_noisy, t, cond, batched_number) + + def get_control_advanced(self, x_noisy, t, cond, batched_number): + pass + + def calc_weight(self, idx: int, x: Tensor, layers: int) -> Union[float, Tensor]: + if self.weights.weight_mask is not None: + # prepare weight mask + self.prepare_weight_mask_cond_hint(x, self.batched_number) + # adjust mask for current layer and return + return torch.pow(self.weight_mask_cond_hint, self.get_calc_pow(idx=idx, layers=layers)) + return self.weights.get(idx=idx) + + def get_calc_pow(self, idx: int, layers: int) -> int: + return (layers-1)-idx + + def apply_advanced_strengths_and_masks(self, x: Tensor, batched_number: int): + # apply strengths, and get batch indeces to null out + # AKA latents that should not be influenced by ControlNet + if self.latent_keyframes is not None: + latent_count = x.size(0)//batched_number + indeces_to_null = set(range(latent_count)) + mapped_indeces = None + # if expecting subdivision, will need to translate between subset and actual idx values + if self.sub_idxs: + mapped_indeces = {} + for i, actual in enumerate(self.sub_idxs): + mapped_indeces[actual] = i + for keyframe in self.latent_keyframes: + real_index = keyframe.batch_index + # if negative, count from end + if real_index < 0: + real_index += latent_count if self.sub_idxs is None else self.full_latent_length + + # if not mapping indeces, what you see is what you get + if mapped_indeces is None: + if real_index in indeces_to_null: + indeces_to_null.remove(real_index) + # otherwise, see if batch_index is even included in this set of latents + else: + real_index = mapped_indeces.get(real_index, None) + if real_index is None: + continue + indeces_to_null.remove(real_index) + + # if real_index is outside the bounds of latents, don't apply + if real_index >= latent_count or real_index < 0: + continue + + # apply strength for each batched cond/uncond + for b in range(batched_number): + x[(latent_count*b)+real_index] = x[(latent_count*b)+real_index] * keyframe.strength + + # null them out by multiplying by null_latent_kf_strength + for batch_index in indeces_to_null: + # apply null for each batched cond/uncond + for b in range(batched_number): + x[(latent_count*b)+batch_index] = x[(latent_count*b)+batch_index] * self.current_timestep_keyframe.null_latent_kf_strength + # apply masks, resizing mask to required dims + if self.mask_cond_hint is not None: + masks = prepare_mask_batch(self.mask_cond_hint, x.shape) + x[:] = x[:] * masks + if self.tk_mask_cond_hint is not None: + masks = prepare_mask_batch(self.tk_mask_cond_hint, x.shape) + x[:] = x[:] * masks + # apply timestep keyframe strengths + if self.current_timestep_keyframe.strength != 1.0: + x[:] *= self.current_timestep_keyframe.strength + + def control_merge_inject(self: 'AdvancedControlBase', control_input, control_output, control_prev, output_dtype): + out = {'input':[], 'middle':[], 'output': []} + + if control_input is not None: + for i in range(len(control_input)): + key = 'input' + x = control_input[i] + if x is not None: + self.apply_advanced_strengths_and_masks(x, self.batched_number) + + x *= self.strength * self.calc_weight(i, x, len(control_input)) + if x.dtype != output_dtype: + x = x.to(output_dtype) + out[key].insert(0, x) + + if control_output is not None: + for i in range(len(control_output)): + if i == (len(control_output) - 1): + key = 'middle' + index = 0 + else: + key = 'output' + index = i + x = control_output[i] + if x is not None: + self.apply_advanced_strengths_and_masks(x, self.batched_number) + + if self.global_average_pooling: + x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3]) + + x *= self.strength * self.calc_weight(i, x, len(control_output)) + if x.dtype != output_dtype: + x = x.to(output_dtype) + + out[key].append(x) + if control_prev is not None: + for x in ['input', 'middle', 'output']: + o = out[x] + for i in range(len(control_prev[x])): + prev_val = control_prev[x][i] + if i >= len(o): + o.append(prev_val) + elif prev_val is not None: + if o[i] is None: + o[i] = prev_val + else: + o[i] += prev_val + return out + + def prepare_mask_cond_hint(self, x_noisy: Tensor, t, cond, batched_number, dtype=None): + self._prepare_mask("mask_cond_hint", self.mask_cond_hint_original, x_noisy, t, cond, batched_number, dtype) + self.prepare_tk_mask_cond_hint(x_noisy, t, cond, batched_number, dtype) + + def prepare_tk_mask_cond_hint(self, x_noisy: Tensor, t, cond, batched_number, dtype=None): + return self._prepare_mask("tk_mask_cond_hint", self.current_timestep_keyframe.mask_hint_orig, x_noisy, t, cond, batched_number, dtype) + + def prepare_weight_mask_cond_hint(self, x_noisy: Tensor, batched_number, dtype=None): + return self._prepare_mask("weight_mask_cond_hint", self.weights.weight_mask, x_noisy, t=None, cond=None, batched_number=batched_number, dtype=dtype, direct_attn=True) + + def _prepare_mask(self, attr_name, orig_mask: Tensor, x_noisy: Tensor, t, cond, batched_number, dtype=None, direct_attn=False): + # make mask appropriate dimensions, if present + if orig_mask is not None: + out_mask = getattr(self, attr_name) + if self.sub_idxs is not None or out_mask is None or x_noisy.shape[2] * 8 != out_mask.shape[1] or x_noisy.shape[3] * 8 != out_mask.shape[2]: + self._reset_attr(attr_name) + del out_mask + # TODO: perform upscale on only the sub_idxs masks at a time instead of all to conserve RAM + # resize mask and match batch count + multiplier = 1 if direct_attn else 8 + out_mask = prepare_mask_batch(orig_mask, x_noisy.shape, multiplier=multiplier) + actual_latent_length = x_noisy.shape[0] // batched_number + out_mask = comfy.utils.repeat_to_batch_size(out_mask, actual_latent_length if self.sub_idxs is None else self.full_latent_length) + if self.sub_idxs is not None: + out_mask = out_mask[self.sub_idxs] + # make cond_hint_mask length match x_noise + if x_noisy.shape[0] != out_mask.shape[0]: + out_mask = broadcast_image_to(out_mask, x_noisy.shape[0], batched_number) + # default dtype to be same as x_noisy + if dtype is None: + dtype = x_noisy.dtype + setattr(self, attr_name, out_mask.to(dtype=dtype).to(self.device)) + del out_mask + + def _reset_attr(self, attr_name, new_value=None): + if hasattr(self, attr_name): + delattr(self, attr_name) + setattr(self, attr_name, new_value) + + def cleanup_inject(self): + self.base.cleanup() + self.cleanup_advanced() + + def cleanup_advanced(self): + self.sub_idxs = None + self.full_latent_length = 0 + self.context_length = 0 + self.t = None + self.batched_number = None + self.weights = None + self.latent_keyframes = None + # timestep stuff + self.current_timestep_keyframe = None + self.next_timestep_keyframe = None + self.current_timestep_index = -1 + # clear mask hints + if self.mask_cond_hint is not None: + del self.mask_cond_hint + self.mask_cond_hint = None + if self.tk_mask_cond_hint_original is not None: + del self.tk_mask_cond_hint_original + self.tk_mask_cond_hint_original = None + if self.tk_mask_cond_hint is not None: + del self.tk_mask_cond_hint + self.tk_mask_cond_hint = None + if self.weight_mask_cond_hint is not None: + del self.weight_mask_cond_hint + self.weight_mask_cond_hint = None + + def copy_to_advanced(self, copied: 'AdvancedControlBase'): + copied.mask_cond_hint_original = self.mask_cond_hint_original + copied.weights_override = self.weights_override + copied.latent_keyframe_override = self.latent_keyframe_override + + +class ControlNetAdvanced(ControlNet, AdvancedControlBase): + def __init__(self, control_model, timestep_keyframes: TimestepKeyframeGroup, global_average_pooling=False, device=None): + super().__init__(control_model=control_model, global_average_pooling=global_average_pooling, device=device) + AdvancedControlBase.__init__(self, super(), timestep_keyframes=timestep_keyframes, weights_default=ControlWeights.controlnet()) + + def get_universal_weights(self) -> ControlWeights: + raw_weights = [(self.weights.base_multiplier ** float(12 - i)) for i in range(13)] + return ControlWeights.controlnet(raw_weights, self.weights.flip_weights) + + def get_control_advanced(self, x_noisy, t, cond, batched_number): + # perform special version of get_control that supports sliding context and masks + return self.sliding_get_control(x_noisy, t, cond, batched_number) + + def sliding_get_control(self, x_noisy: Tensor, t, cond, batched_number): + control_prev = None + if self.previous_controlnet is not None: + control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number) + + if self.timestep_range is not None: + if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]: + if control_prev is not None: + return control_prev + else: + return None + + output_dtype = x_noisy.dtype + + # make cond_hint appropriate dimensions + # TODO: change this to not require cond_hint upscaling every step when self.sub_idxs are present + if self.sub_idxs is not None or self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: + if self.cond_hint is not None: + del self.cond_hint + self.cond_hint = None + # if self.cond_hint_original length greater or equal to real latent count, subdivide it before scaling + if self.sub_idxs is not None and self.cond_hint_original.size(0) >= self.full_latent_length: + self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original[self.sub_idxs], x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device) + else: + self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device) + if x_noisy.shape[0] != self.cond_hint.shape[0]: + self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number) + + # prepare mask_cond_hint + self.prepare_mask_cond_hint(x_noisy=x_noisy, t=t, cond=cond, batched_number=batched_number, dtype=self.control_model.dtype) + + context = cond['c_crossattn'] + # uses 'y' in new ComfyUI update + y = cond.get('y', None) + if y is None: # TODO: remove this in the future since no longer used by newest ComfyUI + y = cond.get('c_adm', None) + if y is not None: + y = y.to(self.control_model.dtype) + timestep = self.model_sampling_current.timestep(t) + x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) + + control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(self.control_model.dtype), y=y) + return self.control_merge(None, control, control_prev, output_dtype) + + def copy(self): + c = ControlNetAdvanced(self.control_model, self.timestep_keyframes, global_average_pooling=self.global_average_pooling) + self.copy_to(c) + self.copy_to_advanced(c) + return c + + @staticmethod + def from_vanilla(v: ControlNet, timestep_keyframe: TimestepKeyframeGroup=None) -> 'ControlNetAdvanced': + return ControlNetAdvanced(control_model=v.control_model, timestep_keyframes=timestep_keyframe, + global_average_pooling=v.global_average_pooling, device=v.device) + + +class T2IAdapterAdvanced(T2IAdapter, AdvancedControlBase): + def __init__(self, t2i_model, timestep_keyframes: TimestepKeyframeGroup, channels_in, device=None): + super().__init__(t2i_model=t2i_model, channels_in=channels_in, device=device) + AdvancedControlBase.__init__(self, super(), timestep_keyframes=timestep_keyframes, weights_default=ControlWeights.t2iadapter()) + + def get_universal_weights(self) -> ControlWeights: + raw_weights = [(self.weights.base_multiplier ** float(7 - i)) for i in range(8)] + raw_weights = [raw_weights[-8], raw_weights[-3], raw_weights[-2], raw_weights[-1]] + raw_weights = get_properly_arranged_t2i_weights(raw_weights) + return ControlWeights.t2iadapter(raw_weights, self.weights.flip_weights) + + def get_calc_pow(self, idx: int, layers: int) -> int: + # match how T2IAdapterAdvanced deals with universal weights + indeces = [7 - i for i in range(8)] + indeces = [indeces[-8], indeces[-3], indeces[-2], indeces[-1]] + indeces = get_properly_arranged_t2i_weights(indeces) + return indeces[idx] + + def get_control_advanced(self, x_noisy, t, cond, batched_number): + # prepare timestep and everything related + self.prepare_current_timestep(t=t, batched_number=batched_number) + try: + # if sub indexes present, replace original hint with subsection + if self.sub_idxs is not None: + # cond hints + full_cond_hint_original = self.cond_hint_original + del self.cond_hint + self.cond_hint = None + self.cond_hint_original = full_cond_hint_original[self.sub_idxs] + # mask hints + self.prepare_mask_cond_hint(x_noisy=x_noisy, t=t, cond=cond, batched_number=batched_number) + return super().get_control(x_noisy, t, cond, batched_number) + finally: + if self.sub_idxs is not None: + # replace original cond hint + self.cond_hint_original = full_cond_hint_original + del full_cond_hint_original + + def copy(self): + c = T2IAdapterAdvanced(self.t2i_model, self.timestep_keyframes, self.channels_in) + self.copy_to(c) + self.copy_to_advanced(c) + return c + + def cleanup(self): + super().cleanup() + self.cleanup_advanced() + + @staticmethod + def from_vanilla(v: T2IAdapter, timestep_keyframe: TimestepKeyframeGroup=None) -> 'T2IAdapterAdvanced': + return T2IAdapterAdvanced(t2i_model=v.t2i_model, timestep_keyframes=timestep_keyframe, channels_in=v.channels_in, device=v.device) + + +class ControlLoraAdvanced(ControlLora, AdvancedControlBase): + def __init__(self, control_weights, timestep_keyframes: TimestepKeyframeGroup, global_average_pooling=False, device=None): + super().__init__(control_weights=control_weights, global_average_pooling=global_average_pooling, device=device) + AdvancedControlBase.__init__(self, super(), timestep_keyframes=timestep_keyframes, weights_default=ControlWeights.controllora()) + # use some functions from ControlNetAdvanced + self.get_control_advanced = ControlNetAdvanced.get_control_advanced.__get__(self, type(self)) + self.sliding_get_control = ControlNetAdvanced.sliding_get_control.__get__(self, type(self)) + + def get_universal_weights(self) -> ControlWeights: + raw_weights = [(self.weights.base_multiplier ** float(9 - i)) for i in range(10)] + return ControlWeights.controllora(raw_weights, self.weights.flip_weights) + + def copy(self): + c = ControlLoraAdvanced(self.control_weights, self.timestep_keyframes, global_average_pooling=self.global_average_pooling) + self.copy_to(c) + self.copy_to_advanced(c) + return c + + def cleanup(self): + super().cleanup() + self.cleanup_advanced() + + @staticmethod + def from_vanilla(v: ControlLora, timestep_keyframe: TimestepKeyframeGroup=None) -> 'ControlLoraAdvanced': + return ControlLoraAdvanced(control_weights=v.control_weights, timestep_keyframes=timestep_keyframe, + global_average_pooling=v.global_average_pooling, device=v.device) + + +class ControlLLLiteAdvanced(ControlNet, AdvancedControlBase): + def __init__(self, control_weights, timestep_keyframes: TimestepKeyframeGroup, device=None): + AdvancedControlBase.__init__(self, super(), timestep_keyframes=timestep_keyframes, weights_default=ControlWeights.controllllite()) + + +def load_controlnet(ckpt_path, timestep_keyframe: TimestepKeyframeGroup=None, model=None): + control = comfy_cn.load_controlnet(ckpt_path, model=model) + # TODO: support controlnet-lllite + # if is None, see if is a non-vanilla ControlNet + # if control is None: + # controlnet_data = comfy.utils.load_torch_file(ckpt_path, safe_load=True) + # # check if lllite + # if "lllite_unet" in controlnet_data: + # pass + return convert_to_advanced(control, timestep_keyframe=timestep_keyframe) + + +def convert_to_advanced(control, timestep_keyframe: TimestepKeyframeGroup=None): + # if already advanced, leave it be + if is_advanced_controlnet(control): + return control + # if exactly ControlNet returned, transform it into ControlNetAdvanced + if type(control) == ControlNet: + return ControlNetAdvanced.from_vanilla(v=control, timestep_keyframe=timestep_keyframe) + # if exactly ControlLora returned, transform it into ControlLoraAdvanced + elif type(control) == ControlLora: + return ControlLoraAdvanced.from_vanilla(v=control, timestep_keyframe=timestep_keyframe) + # if T2IAdapter returned, transform it into T2IAdapterAdvanced + elif isinstance(control, T2IAdapter): + return T2IAdapterAdvanced.from_vanilla(v=control, timestep_keyframe=timestep_keyframe) + # otherwise, leave it be - might be something I am not supporting yet + return control + + +def is_advanced_controlnet(input_object): + return hasattr(input_object, "sub_idxs") + + +# adapted from comfy/sample.py +def prepare_mask_batch(mask: Tensor, shape: Tensor, multiplier: int=1, match_dim1=False): + mask = mask.clone() + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[2]*multiplier, shape[3]*multiplier), mode="bilinear") + if match_dim1: + mask = torch.cat([mask] * shape[1], dim=1) + return mask + + +# applies min-max normalization, from: +# https://stackoverflow.com/questions/68791508/min-max-normalization-of-a-tensor-in-pytorch +def normalize_min_max(x: Tensor, new_min = 0.0, new_max = 1.0): + x_min, x_max = x.min(), x.max() + return (((x - x_min)/(x_max - x_min)) * (new_max - new_min)) + new_min + +def linear_conversion(x, x_min=0.0, x_max=1.0, new_min=0.0, new_max=1.0): + return (((x - x_min)/(x_max - x_min)) * (new_max - new_min)) + new_min + + +class WeightTypeException(TypeError): + "Raised when weight not compatible with AdvancedControlBase object" + pass diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/control_lllite.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/control_lllite.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/control_lllite.py @@ -0,0 +1 @@ + diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/deprecated_nodes.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/deprecated_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..a64ac9b697ea50672c10a395f489f7609a3e2f1c --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/deprecated_nodes.py @@ -0,0 +1,103 @@ +import os + +import torch + +import numpy as np +from PIL import Image, ImageOps +from .control import ControlWeights, LatentKeyframeGroup, TimestepKeyframeGroup, TimestepKeyframe +from .logger import logger + + +class LoadImagesFromDirectory: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"default": ""}), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT") + FUNCTION = "load_images" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/deprecated" + + def load_images(self, directory: str, image_load_cap: int = 0, start_index: int = 0): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory} cannot be found.'") + dir_files = os.listdir(directory) + if len(dir_files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + + dir_files = sorted(dir_files) + dir_files = [os.path.join(directory, x) for x in dir_files] + # start at start_index + dir_files = dir_files[start_index:] + + images = [] + masks = [] + + limit_images = False + if image_load_cap > 0: + limit_images = True + image_count = 0 + + for image_path in dir_files: + if os.path.isdir(image_path): + continue + if limit_images and image_count >= image_load_cap: + break + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") + images.append(image) + masks.append(mask) + image_count += 1 + + if len(images) == 0: + raise FileNotFoundError(f"No images could be loaded from directory '{directory}'.") + + return (torch.cat(images, dim=0), torch.stack(masks, dim=0), image_count) + + +class TimestepKeyframeNodeDeprecated: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ), + }, + "optional": { + "control_net_weights": ("CONTROL_NET_WEIGHTS", ), + "t2i_adapter_weights": ("T2I_ADAPTER_WEIGHTS", ), + "latent_keyframe": ("LATENT_KEYFRAME", ), + "prev_timestep_keyframe": ("TIMESTEP_KEYFRAME", ), + } + } + + RETURN_TYPES = ("TIMESTEP_KEYFRAME", ) + FUNCTION = "load_keyframe" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" + + def load_keyframe(self, + start_percent: float, + control_net_weights: ControlWeights=None, + latent_keyframe: LatentKeyframeGroup=None, + prev_timestep_keyframe: TimestepKeyframeGroup=None): + if not prev_timestep_keyframe: + prev_timestep_keyframe = TimestepKeyframeGroup() + keyframe = TimestepKeyframe(start_percent, control_net_weights, latent_keyframe) + prev_timestep_keyframe.add(keyframe) + return (prev_timestep_keyframe,) diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/latent_keyframe_nodes.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/latent_keyframe_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..2fde61e80e5be290786d14bdcfb8ef6b322c39af --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/latent_keyframe_nodes.py @@ -0,0 +1,283 @@ +from typing import Union +import numpy as np +from collections.abc import Iterable + +from .control import LatentKeyframe, LatentKeyframeGroup +from .control import StrengthInterpolation as SI +from .logger import logger + + +class LatentKeyframeNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "batch_index": ("INT", {"default": 0, "min": -1000, "max": 1000, "step": 1}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + }, + "optional": { + "prev_latent_kf": ("LATENT_KEYFRAME", ), + } + } + + RETURN_NAMES = ("LATENT_KF", ) + RETURN_TYPES = ("LATENT_KEYFRAME", ) + FUNCTION = "load_keyframe" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" + + def load_keyframe(self, + batch_index: int, + strength: float, + prev_latent_kf: LatentKeyframeGroup=None, + prev_latent_keyframe: LatentKeyframeGroup=None, # old name + ): + prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf + if not prev_latent_keyframe: + prev_latent_keyframe = LatentKeyframeGroup() + else: + prev_latent_keyframe = prev_latent_keyframe.clone() + keyframe = LatentKeyframe(batch_index, strength) + prev_latent_keyframe.add(keyframe) + return (prev_latent_keyframe,) + + +class LatentKeyframeGroupNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "index_strengths": ("STRING", {"multiline": True, "default": ""}), + }, + "optional": { + "prev_latent_kf": ("LATENT_KEYFRAME", ), + "latent_optional": ("LATENT", ), + "print_keyframes": ("BOOLEAN", {"default": False}) + } + } + + RETURN_NAMES = ("LATENT_KF", ) + RETURN_TYPES = ("LATENT_KEYFRAME", ) + FUNCTION = "load_keyframes" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" + + def validate_index(self, index: int, latent_count: int = 0, is_range: bool = False, allow_negative = False) -> int: + # if part of range, do nothing + if is_range: + return index + # otherwise, validate index + # validate not out of range - only when latent_count is passed in + if latent_count > 0 and index > latent_count-1: + raise IndexError(f"Index '{index}' out of range for the total {latent_count} latents.") + # if negative, validate not out of range + if index < 0: + if not allow_negative: + raise IndexError(f"Negative indeces not allowed, but was {index}.") + conv_index = latent_count+index + if conv_index < 0: + raise IndexError(f"Index '{index}', converted to '{conv_index}' out of range for the total {latent_count} latents.") + index = conv_index + return index + + def convert_to_index_int(self, raw_index: str, latent_count: int = 0, is_range: bool = False, allow_negative = False) -> int: + try: + return self.validate_index(int(raw_index), latent_count=latent_count, is_range=is_range, allow_negative=allow_negative) + except ValueError as e: + raise ValueError(f"index '{raw_index}' must be an integer.", e) + + def convert_to_latent_keyframes(self, latent_indeces: str, latent_count: int) -> set[LatentKeyframe]: + if not latent_indeces: + return set() + int_latent_indeces = [i for i in range(0, latent_count)] + allow_negative = latent_count > 0 + chosen_indeces = set() + # parse string - allow positive ints, negative ints, and ranges separated by ':' + groups = latent_indeces.split(",") + groups = [g.strip() for g in groups] + for g in groups: + # parse strengths - default to 1.0 if no strength given + strength = 1.0 + if '=' in g: + g, strength_str = g.split("=", 1) + g = g.strip() + try: + strength = float(strength_str.strip()) + except ValueError as e: + raise ValueError(f"strength '{strength_str}' must be a float.", e) + if strength < 0: + raise ValueError(f"Strength '{strength}' cannot be negative.") + # parse range of indeces (e.g. 2:16) + if ':' in g: + index_range = g.split(":", 1) + index_range = [r.strip() for r in index_range] + start_index = self.convert_to_index_int(index_range[0], latent_count=latent_count, is_range=True, allow_negative=allow_negative) + end_index = self.convert_to_index_int(index_range[1], latent_count=latent_count, is_range=True, allow_negative=allow_negative) + # if latents were passed in, base indeces on known latent count + if len(int_latent_indeces) > 0: + for i in int_latent_indeces[start_index:end_index]: + chosen_indeces.add(LatentKeyframe(i, strength)) + # otherwise, assume indeces are valid + else: + for i in range(start_index, end_index): + chosen_indeces.add(LatentKeyframe(i, strength)) + # parse individual indeces + else: + chosen_indeces.add(LatentKeyframe(self.convert_to_index_int(g, latent_count=latent_count, allow_negative=allow_negative), strength)) + return chosen_indeces + + def load_keyframes(self, + index_strengths: str, + prev_latent_kf: LatentKeyframeGroup=None, + prev_latent_keyframe: LatentKeyframeGroup=None, # old name + latent_image_opt=None, + print_keyframes=False): + prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf + if not prev_latent_keyframe: + prev_latent_keyframe = LatentKeyframeGroup() + else: + prev_latent_keyframe = prev_latent_keyframe.clone() + curr_latent_keyframe = LatentKeyframeGroup() + + latent_count = -1 + if latent_image_opt: + latent_count = latent_image_opt['samples'].size()[0] + latent_keyframes = self.convert_to_latent_keyframes(index_strengths, latent_count=latent_count) + + for latent_keyframe in latent_keyframes: + curr_latent_keyframe.add(latent_keyframe) + + if print_keyframes: + for keyframe in curr_latent_keyframe.keyframes: + logger.info(f"keyframe {keyframe.batch_index}:{keyframe.strength}") + + # replace values with prev_latent_keyframes + for latent_keyframe in prev_latent_keyframe.keyframes: + curr_latent_keyframe.add(latent_keyframe) + + return (curr_latent_keyframe,) + + +class LatentKeyframeInterpolationNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "batch_index_from": ("INT", {"default": 0, "min": -10000, "max": 10000, "step": 1}), + "batch_index_to_excl": ("INT", {"default": 0, "min": -10000, "max": 10000, "step": 1}), + "strength_from": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "strength_to": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT], ), + }, + "optional": { + "prev_latent_kf": ("LATENT_KEYFRAME", ), + "print_keyframes": ("BOOLEAN", {"default": False}) + } + } + + RETURN_NAMES = ("LATENT_KF", ) + RETURN_TYPES = ("LATENT_KEYFRAME", ) + FUNCTION = "load_keyframe" + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" + + def load_keyframe(self, + batch_index_from: int, + strength_from: float, + batch_index_to_excl: int, + strength_to: float, + interpolation: str, + prev_latent_kf: LatentKeyframeGroup=None, + prev_latent_keyframe: LatentKeyframeGroup=None, # old name + print_keyframes=False): + + if (batch_index_from > batch_index_to_excl): + raise ValueError("batch_index_from must be less than or equal to batch_index_to.") + + if (batch_index_from < 0 and batch_index_to_excl >= 0): + raise ValueError("batch_index_from and batch_index_to must be either both positive or both negative.") + + prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf + if not prev_latent_keyframe: + prev_latent_keyframe = LatentKeyframeGroup() + else: + prev_latent_keyframe = prev_latent_keyframe.clone() + curr_latent_keyframe = LatentKeyframeGroup() + + steps = batch_index_to_excl - batch_index_from + diff = strength_to - strength_from + if interpolation == SI.LINEAR: + weights = np.linspace(strength_from, strength_to, steps) + elif interpolation == SI.EASE_IN: + index = np.linspace(0, 1, steps) + weights = diff * np.power(index, 2) + strength_from + elif interpolation == SI.EASE_OUT: + index = np.linspace(0, 1, steps) + weights = diff * (1 - np.power(1 - index, 2)) + strength_from + elif interpolation == SI.EASE_IN_OUT: + index = np.linspace(0, 1, steps) + weights = diff * ((1 - np.cos(index * np.pi)) / 2) + strength_from + + for i in range(steps): + keyframe = LatentKeyframe(batch_index_from + i, float(weights[i])) + curr_latent_keyframe.add(keyframe) + + if print_keyframes: + for keyframe in curr_latent_keyframe.keyframes: + logger.info(f"keyframe {keyframe.batch_index}:{keyframe.strength}") + + # replace values with prev_latent_keyframes + for latent_keyframe in prev_latent_keyframe.keyframes: + curr_latent_keyframe.add(latent_keyframe) + + return (curr_latent_keyframe,) + + +class LatentKeyframeBatchedGroupNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "float_strengths": ("FLOAT", {"default": -1, "min": -1, "step": 0.001, "forceInput": True}), + }, + "optional": { + "prev_latent_kf": ("LATENT_KEYFRAME", ), + "print_keyframes": ("BOOLEAN", {"default": False}) + } + } + + RETURN_NAMES = ("LATENT_KF", ) + RETURN_TYPES = ("LATENT_KEYFRAME", ) + FUNCTION = "load_keyframe" + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" + + def load_keyframe(self, float_strengths: Union[float, list[float]], + prev_latent_kf: LatentKeyframeGroup=None, + prev_latent_keyframe: LatentKeyframeGroup=None, # old name + print_keyframes=False): + prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf + if not prev_latent_keyframe: + prev_latent_keyframe = LatentKeyframeGroup() + else: + prev_latent_keyframe = prev_latent_keyframe.clone() + curr_latent_keyframe = LatentKeyframeGroup() + + # if received a normal float input, do nothing + if type(float_strengths) in (float, int): + logger.info("No batched float_strengths passed into Latent Keyframe Batch Group node; will not create any new keyframes.") + # if iterable, attempt to create LatentKeyframes with chosen strengths + elif isinstance(float_strengths, Iterable): + for idx, strength in enumerate(float_strengths): + keyframe = LatentKeyframe(idx, strength) + curr_latent_keyframe.add(keyframe) + else: + raise ValueError(f"Expected strengths to be an iterable input, but was {type(float_strengths).__repr__}.") + + if print_keyframes: + for keyframe in curr_latent_keyframe.keyframes: + logger.info(f"keyframe {keyframe.batch_index}:{keyframe.strength}") + + # replace values with prev_latent_keyframes + for latent_keyframe in prev_latent_keyframe.keyframes: + curr_latent_keyframe.add(latent_keyframe) + + return (curr_latent_keyframe,) diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/logger.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..b23b82fa5b8456e87b3b86da21a3f07bed6682b6 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/logger.py @@ -0,0 +1,36 @@ +import sys +import copy +import logging + + +class ColoredFormatter(logging.Formatter): + COLORS = { + "DEBUG": "\033[0;36m", # CYAN + "INFO": "\033[0;32m", # GREEN + "WARNING": "\033[0;33m", # YELLOW + "ERROR": "\033[0;31m", # RED + "CRITICAL": "\033[0;37;41m", # WHITE ON RED + "RESET": "\033[0m", # RESET COLOR + } + + def format(self, record): + colored_record = copy.copy(record) + levelname = colored_record.levelname + seq = self.COLORS.get(levelname, self.COLORS["RESET"]) + colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}" + return super().format(colored_record) + + +# Create a new logger +logger = logging.getLogger("Advanced-ControlNet") +logger.propagate = False + +# Add handler if we don't have one. +if not logger.handlers: + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(ColoredFormatter("[%(name)s] - %(levelname)s - %(message)s")) + logger.addHandler(handler) + +# Configure logger +loglevel = logging.INFO +logger.setLevel(loglevel) diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/nodes.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..3794ac769f0082e8d4b6750a8dbecdb930bd1613 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/nodes.py @@ -0,0 +1,243 @@ +import numpy as np +from torch import Tensor + +import folder_paths + +from .control import load_controlnet, convert_to_advanced, ControlWeights, ControlWeightType,\ + LatentKeyframeGroup, TimestepKeyframe, TimestepKeyframeGroup, is_advanced_controlnet +from .control import StrengthInterpolation as SI +from .weight_nodes import DefaultWeights, ScaledSoftMaskedUniversalWeights, ScaledSoftUniversalWeights, SoftControlNetWeights, CustomControlNetWeights, \ + SoftT2IAdapterWeights, CustomT2IAdapterWeights +from .latent_keyframe_nodes import LatentKeyframeGroupNode, LatentKeyframeInterpolationNode, LatentKeyframeBatchedGroupNode, LatentKeyframeNode +from .deprecated_nodes import LoadImagesFromDirectory +from .logger import logger + + +class TimestepKeyframeNode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ), + }, + "optional": { + "prev_timestep_kf": ("TIMESTEP_KEYFRAME", ), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "cn_weights": ("CONTROL_NET_WEIGHTS", ), + "latent_keyframe": ("LATENT_KEYFRAME", ), + "null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "inherit_missing": ("BOOLEAN", {"default": True}, ), + "guarantee_usage": ("BOOLEAN", {"default": True}, ), + "mask_optional": ("MASK", ), + #"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ), + } + } + + RETURN_NAMES = ("TIMESTEP_KF", ) + RETURN_TYPES = ("TIMESTEP_KEYFRAME", ) + FUNCTION = "load_keyframe" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes" + + def load_keyframe(self, + start_percent: float, + strength: float=1.0, + cn_weights: ControlWeights=None, control_net_weights: ControlWeights=None, # old name + latent_keyframe: LatentKeyframeGroup=None, + prev_timestep_kf: TimestepKeyframeGroup=None, prev_timestep_keyframe: TimestepKeyframeGroup=None, # old name + null_latent_kf_strength: float=0.0, + inherit_missing=True, + guarantee_usage=True, + mask_optional=None, + interpolation: str=SI.NONE,): + control_net_weights = control_net_weights if control_net_weights else cn_weights + prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf + if not prev_timestep_keyframe: + prev_timestep_keyframe = TimestepKeyframeGroup() + else: + prev_timestep_keyframe = prev_timestep_keyframe.clone() + keyframe = TimestepKeyframe(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength, + control_weights=control_net_weights, latent_keyframes=latent_keyframe, inherit_missing=inherit_missing, guarantee_usage=guarantee_usage, + mask_hint_orig=mask_optional) + prev_timestep_keyframe.add(keyframe) + return (prev_timestep_keyframe,) + + +class ControlNetLoaderAdvanced: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "control_net_name": (folder_paths.get_filename_list("controlnet"), ), + }, + "optional": { + "timestep_keyframe": ("TIMESTEP_KEYFRAME", ), + } + } + + RETURN_TYPES = ("CONTROL_NET", ) + FUNCTION = "load_controlnet" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝" + + def load_controlnet(self, control_net_name, + timestep_keyframe: TimestepKeyframeGroup=None + ): + controlnet_path = folder_paths.get_full_path("controlnet", control_net_name) + controlnet = load_controlnet(controlnet_path, timestep_keyframe) + return (controlnet,) + + +class DiffControlNetLoaderAdvanced: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "control_net_name": (folder_paths.get_filename_list("controlnet"), ) + }, + "optional": { + "timestep_keyframe": ("TIMESTEP_KEYFRAME", ), + } + } + + RETURN_TYPES = ("CONTROL_NET", ) + FUNCTION = "load_controlnet" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝" + + def load_controlnet(self, control_net_name, model, + timestep_keyframe: TimestepKeyframeGroup=None + ): + controlnet_path = folder_paths.get_full_path("controlnet", control_net_name) + controlnet = load_controlnet(controlnet_path, timestep_keyframe, model) + if is_advanced_controlnet(controlnet): + controlnet.verify_all_weights() + return (controlnet,) + + +class AdvancedControlNetApply: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "control_net": ("CONTROL_NET", ), + "image": ("IMAGE", ), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) + }, + "optional": { + "mask_optional": ("MASK", ), + "timestep_kf": ("TIMESTEP_KEYFRAME", ), + "latent_kf_override": ("LATENT_KEYFRAME", ), + "weights_override": ("CONTROL_NET_WEIGHTS", ), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING") + RETURN_NAMES = ("positive", "negative") + FUNCTION = "apply_controlnet" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝" + + def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, + mask_optional: Tensor=None, + timestep_kf: TimestepKeyframeGroup=None, latent_kf_override: LatentKeyframeGroup=None, + weights_override: ControlWeights=None): + if strength == 0: + return (positive, negative) + + control_hint = image.movedim(-1,1) + cnets = {} + + out = [] + for conditioning in [positive, negative]: + c = [] + for t in conditioning: + d = t[1].copy() + + prev_cnet = d.get('control', None) + if prev_cnet in cnets: + c_net = cnets[prev_cnet] + else: + # copy, convert to advanced if needed, and set cond + c_net = convert_to_advanced(control_net.copy()).set_cond_hint(control_hint, strength, (start_percent, end_percent)) + if is_advanced_controlnet(c_net): + # apply optional parameters and overrides, if provided + if timestep_kf is not None: + c_net.set_timestep_keyframes(timestep_kf) + if latent_kf_override is not None: + c_net.latent_keyframe_override = latent_kf_override + if weights_override is not None: + c_net.weights_override = weights_override + # verify weights are compatible + c_net.verify_all_weights() + # set cond hint mask + if mask_optional is not None: + mask_optional = mask_optional.clone() + # if not in the form of a batch, make it so + if len(mask_optional.shape) < 3: + mask_optional = mask_optional.unsqueeze(0) + c_net.set_cond_hint_mask(mask_optional) + c_net.set_previous_controlnet(prev_cnet) + cnets[prev_cnet] = c_net + + d['control'] = c_net + d['control_apply_to_uncond'] = False + n = [t[0], d] + c.append(n) + out.append(c) + return (out[0], out[1]) + + +# NODE MAPPING +NODE_CLASS_MAPPINGS = { + # Keyframes + "TimestepKeyframe": TimestepKeyframeNode, + "LatentKeyframe": LatentKeyframeNode, + "LatentKeyframeGroup": LatentKeyframeGroupNode, + "LatentKeyframeBatchedGroup": LatentKeyframeBatchedGroupNode, + "LatentKeyframeTiming": LatentKeyframeInterpolationNode, + # Conditioning + "ACN_AdvancedControlNetApply": AdvancedControlNetApply, + # Loaders + "ControlNetLoaderAdvanced": ControlNetLoaderAdvanced, + "DiffControlNetLoaderAdvanced": DiffControlNetLoaderAdvanced, + # Weights + "ScaledSoftControlNetWeights": ScaledSoftUniversalWeights, + "ScaledSoftMaskedUniversalWeights": ScaledSoftMaskedUniversalWeights, + "SoftControlNetWeights": SoftControlNetWeights, + "CustomControlNetWeights": CustomControlNetWeights, + "SoftT2IAdapterWeights": SoftT2IAdapterWeights, + "CustomT2IAdapterWeights": CustomT2IAdapterWeights, + "ACN_DefaultUniversalWeights": DefaultWeights, + # Image + "LoadImagesFromDirectory": LoadImagesFromDirectory +} + +NODE_DISPLAY_NAME_MAPPINGS = { + # Keyframes + "TimestepKeyframe": "Timestep Keyframe 🛂🅐🅒🅝", + "LatentKeyframe": "Latent Keyframe 🛂🅐🅒🅝", + "LatentKeyframeGroup": "Latent Keyframe Group 🛂🅐🅒🅝", + "LatentKeyframeBatchedGroup": "Latent Keyframe Batched Group 🛂🅐🅒🅝", + "LatentKeyframeTiming": "Latent Keyframe Interpolation 🛂🅐🅒🅝", + # Conditioning + "ACN_AdvancedControlNetApply": "Apply Advanced ControlNet 🛂🅐🅒🅝", + # Loaders + "ControlNetLoaderAdvanced": "Load Advanced ControlNet Model 🛂🅐🅒🅝", + "DiffControlNetLoaderAdvanced": "Load Advanced ControlNet Model (diff) 🛂🅐🅒🅝", + # Weights + "ScaledSoftControlNetWeights": "Scaled Soft Weights 🛂🅐🅒🅝", + "ScaledSoftMaskedUniversalWeights": "Scaled Soft Masked Weights 🛂🅐🅒🅝", + "SoftControlNetWeights": "ControlNet Soft Weights 🛂🅐🅒🅝", + "CustomControlNetWeights": "ControlNet Custom Weights 🛂🅐🅒🅝", + "SoftT2IAdapterWeights": "T2IAdapter Soft Weights 🛂🅐🅒🅝", + "CustomT2IAdapterWeights": "T2IAdapter Custom Weights 🛂🅐🅒🅝", + "ACN_DefaultUniversalWeights": "Force Default Weights 🛂🅐🅒🅝", + # Image + "LoadImagesFromDirectory": "Load Images [DEPRECATED] 🛂🅐🅒🅝" +} diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/reference_nodes.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/reference_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..6879f9771c1a14e38505f1e1007ce9e4bc2e99ba --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/reference_nodes.py @@ -0,0 +1,12 @@ +class AnimateDiffLoaderWithContext: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("MODEL",) + CATEGORY = "" \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/control/weight_nodes.py b/custom_nodes/ComfyUI-Advanced-ControlNet/control/weight_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f80d6075aec5fef0d8ee86f1306d40e14a4859b0 --- /dev/null +++ b/custom_nodes/ComfyUI-Advanced-ControlNet/control/weight_nodes.py @@ -0,0 +1,201 @@ +from torch import Tensor +import torch +from .control import TimestepKeyframe, TimestepKeyframeGroup, ControlWeights, get_properly_arranged_t2i_weights, linear_conversion +from .logger import logger + + +WEIGHTS_RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT") + + +class DefaultWeights: + @classmethod + def INPUT_TYPES(s): + return { + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" + + def load_weights(self): + weights = ControlWeights.default() + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + + +class ScaledSoftMaskedUniversalWeights: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK", ), + "min_base_multiplier": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ), + "max_base_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}, ), + #"lock_min": ("BOOLEAN", {"default": False}, ), + #"lock_max": ("BOOLEAN", {"default": False}, ), + }, + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" + + def load_weights(self, mask: Tensor, min_base_multiplier: float, max_base_multiplier: float, lock_min=False, lock_max=False): + # normalize mask + mask = mask.clone() + x_min = 0.0 if lock_min else mask.min() + x_max = 1.0 if lock_max else mask.max() + if x_min == x_max: + mask = torch.ones_like(mask) * max_base_multiplier + else: + mask = linear_conversion(mask, x_min, x_max, min_base_multiplier, max_base_multiplier) + weights = ControlWeights.universal_mask(weight_mask=mask) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + + +class ScaledSoftUniversalWeights: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "base_multiplier": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 1.0, "step": 0.001}, ), + "flip_weights": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" + + def load_weights(self, base_multiplier, flip_weights): + weights = ControlWeights.universal(base_multiplier=base_multiplier, flip_weights=flip_weights) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + + +class SoftControlNetWeights: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "weight_00": ("FLOAT", {"default": 0.09941396206337118, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_01": ("FLOAT", {"default": 0.12050177219802567, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_02": ("FLOAT", {"default": 0.14606275417942507, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_03": ("FLOAT", {"default": 0.17704576264172736, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_04": ("FLOAT", {"default": 0.214600924414215, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_05": ("FLOAT", {"default": 0.26012233262329093, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_06": ("FLOAT", {"default": 0.3152997971191405, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_07": ("FLOAT", {"default": 0.3821815722656249, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_08": ("FLOAT", {"default": 0.4632503906249999, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_09": ("FLOAT", {"default": 0.561515625, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_10": ("FLOAT", {"default": 0.6806249999999999, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_11": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_12": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "flip_weights": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet" + + def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, + weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights): + weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, + weight_07, weight_08, weight_09, weight_10, weight_11, weight_12] + weights = ControlWeights.controlnet(weights, flip_weights=flip_weights) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + + +class CustomControlNetWeights: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "weight_00": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_01": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_02": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_04": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_05": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_06": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_07": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_08": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_09": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_10": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_11": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_12": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "flip_weights": ("BOOLEAN", {"default": False}), + } + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet" + + def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, + weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights): + weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, + weight_07, weight_08, weight_09, weight_10, weight_11, weight_12] + weights = ControlWeights.controlnet(weights, flip_weights=flip_weights) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + + +class SoftT2IAdapterWeights: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "weight_00": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_01": ("FLOAT", {"default": 0.62, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_02": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "flip_weights": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter" + + def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights): + weights = [weight_00, weight_01, weight_02, weight_03] + weights = get_properly_arranged_t2i_weights(weights) + weights = ControlWeights.t2iadapter(weights, flip_weights=flip_weights) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + + +class CustomT2IAdapterWeights: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "weight_00": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_01": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_02": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), + "flip_weights": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = WEIGHTS_RETURN_NAMES + FUNCTION = "load_weights" + + CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter" + + def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights): + weights = [weight_00, weight_01, weight_02, weight_03] + weights = get_properly_arranged_t2i_weights(weights) + weights = ControlWeights.t2iadapter(weights, flip_weights=flip_weights) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) diff --git a/custom_nodes/ComfyUI-Advanced-ControlNet/requirements.txt b/custom_nodes/ComfyUI-Advanced-ControlNet/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-Custom-Scripts/.gitignore b/custom_nodes/ComfyUI-Custom-Scripts/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ed8ebf583f771da9150c35db3955987b7d757904 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Custom-Scripts/LICENSE b/custom_nodes/ComfyUI-Custom-Scripts/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..acf31806a2507192169eca2137ea9ceb7685f51c --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 pythongosssss + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Custom-Scripts/README.md b/custom_nodes/ComfyUI-Custom-Scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3d6037c4aafdd4de86ed0f3d112d9c931aff9ede --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/README.md @@ -0,0 +1,394 @@ +# ComfyUI-Custom-Scripts + +# Installation + +1. Clone the repository: +`git clone https://github.com/pythongosssss/ComfyUI-Custom-Scripts.git` +to your ComfyUI `custom_nodes` directory + + The script will then automatically install all custom scripts and nodes. + It will attempt to use symlinks and junctions to prevent having to copy files and keep them up to date. + +- For uninstallation: + - Delete the cloned repo in `custom_nodes` + - Ensure `web/extensions/pysssss/CustomScripts` has also been removed + +# Update +1. Navigate to the cloned repo e.g. `custom_nodes/ComfyUI-Custom-Scripts` +2. `git pull` + +# Features + +## Autocomplete +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/b5971135-414f-4f4e-a6cf-2650dc01085f) +Provides embedding and custom word autocomplete. You can view embedding details by clicking on the info icon on the list. +Define your list of custom words via the settings. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/160ef61c-7d7e-49d0-b60f-5a1501b74c9d) +You can quickly default to danbooru tags using the Load button, or load/manage other custom word lists. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/cc180b35-5f45-442f-9285-3ddf3fa320d0) + +## Auto Arrange Graph +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/04b06081-ca6f-4c0f-8584-d0a157c36747) +Adds a menu option to auto arrange the graph in order of execution, this makes very wide graphs! + +## Always Snap to Grid +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/66f36d1f-e579-4959-9880-9a9624922e3a) +Adds a setting to make moving nodes always snap to grid. + +## [Testing] "Better" Loader Lists +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/664caa71-f25f-4a96-a04a-1466d6b2b8b4) +Adds custom Lora and Checkpoint loader nodes, these have the ability to show preview images, just place a png or jpg next to the file and it'll display in the list on hover (e.g. sdxl.safetensors and sdxl.png). +Optionally enable subfolders via the settings: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/e15b5e83-4f9d-4d57-8324-742bedf75439) +Adds an "examples" widget to load sample prompts, triggerwords, etc: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ad1751e4-4c85-42e7-9490-e94fb1cbc8e7) +These should be stored in a folder matching the name of the model, e.g. if it is `loras/add_detail.safetensors` put your files in as `loras/add_detail/*.txt` +To quickly save a generated image as the preview to use for the model, you can right click on an image on a node, and select Save as Preview and choose the model to save the preview for: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/9fa8e9db-27b3-45cb-85c2-0860a238fd3a) + +## Checkpoint/LoRA/Embedding Info +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/6b67bf40-ee17-4fa6-a0c1-7947066bafc2) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/32405df6-b367-404f-a5df-2d4347089a9e) +Adds "View Info" menu option to view details about the selected LoRA or Checkpoint. To view embedding details, click the info button when using embedding autocomplete. + +## Constrain Image +Adds a node for resizing an image to a max & min size optionally cropping if required. + +## Custom Colors +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/fa7883f3-f81c-49f6-9ab6-9526e4debab6) +Adds a custom color picker to nodes & groups + +## Favicon Status +![image](https://user-images.githubusercontent.com/125205205/230171227-31f061a6-6324-4976-bed9-723a87500cf3.png) +![image](https://user-images.githubusercontent.com/125205205/230171445-c7202a45-b511-4d69-87fa-945ad44c063f.png) +Adds a favicon and title to the window, favicon changes color while generating and the window title includes the number of prompts in the queue + +## Image Feed +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/caea0d48-85b9-4ca9-9771-5c795db35fbc) +Adds a panel showing images that have been generated in the current session, you can control the direction that images are added and the position of the panel via the ComfyUI settings screen and the size of the panel and the images via the sliders at the top of the panel. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ca093d38-41a3-4647-9223-5bd0b9ee4f1e) + +## KSampler (Advanced) denoise helper +Provides a simple method to set custom denoise on the advanced sampler +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/42946bd8-0078-4c7a-bfe9-7adb1382b5e2) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/7cfccb22-f155-4848-934b-a2b2a6efe16f) + +## Lock Nodes & Groups +![image](https://user-images.githubusercontent.com/125205205/230172868-5c5a943c-ade1-4799-bf80-cc931da5d4b2.png) +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/cfca09d9-38e5-4ecd-8b73-1455009fcd67) +Adds a lock option to nodes & groups that prevents you from moving them until unlocked + +## Math Expression +Allows for evaluating complex expressions using values from the graph. You can input `INT`, `FLOAT`, `IMAGE` and `LATENT` values. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/1593edde-67b8-45d8-88cb-e75f52dba039) +Other nodes values can be referenced via the `Node name for S&R` via the `Properties` menu item on a node, or the node title. +Supported operators: `+ - * /` (basic ops) `//` (floor division) `**` (power) `^` (xor) `%` (mod) +Supported functions `floor(num, dp?)` `floor(num)` `ceil(num)` `randomint(min,max)` +If using a `LATENT` or `IMAGE` you can get the dimensions using `a.width` or `a.height` where `a` is the input name. + +## Node Finder +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/177d2b67-acbc-4ec3-ab31-7c295a98c194) +Adds a menu item for following/jumping to the executing node, and a menu to quickly go to a node of a specific type. + +## Preset Text +![image](https://user-images.githubusercontent.com/125205205/230173939-08459efc-785b-46da-93d1-b02f0300c6f4.png) +Adds a node that lets you save and use text presets (e.g. for your 'normal' negatives) + +## Quick Nodes +![image](https://user-images.githubusercontent.com/125205205/230174266-5232831a-a03b-4bf7-bc8b-c45466a0bc64.png) +Adds various menu items to some nodes for quickly setting up common parts of graphs + +## Play Sound +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/9bcf9fb3-5898-4432-a974-fb1e17d3b7e8) +Plays a sound when the node is executed, either after each prompt or only when the queue is empty for queuing multiple prompts. +You can customize the sound by replacing the mp3 file `web/extensions/pysssss/CustomScripts/assets\notify.mp3` + +## [WIP] Repeater +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ec0dac25-14e4-4d44-b975-52193656709d) +Node allows you to either create a list of N repeats of the input node, or create N outputs from the input node. +You can optionally decide if you want to reuse the input node, or create a new instance each time (e.g. a Checkpoint Loader would want to be re-used, but a random number would want to be unique) +TODO: Type safety on the wildcard outputs to require match with input + +## Show Text +![image](https://user-images.githubusercontent.com/125205205/230174888-c004fd48-da78-4de9-81c2-93a866fcfcd1.png) +Takes input from a node that produces a string and displays it, useful for things like interrogator, prompt generators, etc. + +## Show Image on Menu +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/b6ab58f2-583b-448c-bcfc-f93f5cdab0fc) +Shows the current generating image on the menu at the bottom, you can disable this via the settings menu. + +## String Function +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/01107137-8a93-4765-bae0-fcc110a09091) +Supports appending and replacing text +`tidy_tags` will add commas between parts when in `append` mode. +`replace` mode supports regex replace by using `/your regex here/` and you can reference capturing groups using `\number` e.g. `\1` + +## Touch Support +Provides basic support for touch screen devices, its not perfect but better than nothing + +## Widget Defaults +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/3d675032-2b19-4da8-a7d7-fa2d7c555daa) +Allows you to specify default values for widgets when adding new nodes, the values are configured via the settings menu +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/7b57a3d8-98d3-46e9-9b33-6645c0da41e7) + +## Workflows +Adds options to the menu for saving + loading workflows: +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/7b5a3012-4c59-47c6-8eea-85cf534403ea) + +## Workflow Images +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/06453fd2-c020-46ee-a7db-2b8bf5bcba7e) +Adds menu options for importing/exporting the graph as SVG and PNG showing a view of the nodes + +## (Testing) Reroute Primitive +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/8b870eef-d572-43f9-b394-cfa7abbd2f98) Provides a node that allows rerouting primitives. +The node can also be collapsed to a single point that you can drag around. +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/a9bd0112-cf8f-44f3-af6d-f9a8fed152a7) +Warning: Don't use normal reroutes or primitives with these nodes, it isn't tested and this node replaces their functionality. + +
+
+ + +## WD14 Tagger +Moved to: https://github.com/pythongosssss/ComfyUI-WD14-Tagger + +## Link Render Mode +![image](https://github.com/pythongosssss/ComfyUI-Custom-Scripts/assets/125205205/ad3be76b-43b1-455e-a64a-bf2a6571facf) +Allows you to control the rendering of the links between nodes between straight, linear & spline, e.g. Straight. + +
+
+ + +# Changelog + +## 2023-09-22 +### Minor +- ✨ Use Civitai image as preview +- 🐛 CTRL+Enter on autocomplete will no longer accept the suggestions as it is the shortcut for queuing a prompt. +- 🐛 Fix using numbers in widget defaults +- ✨ Support setting node properties (e.g. title, colors) via widget defaults + +## 2023-09-13 +### New +- ✨ Ability to "send" an image to a Load Image node in either the current or a different workflow +### Minor +- ✨ Add support for A1111 autocomplete CSV format +- ✨ Allow setting custom node for middle click to add node + +## 2023-09-10 +### Minor +- 🐛 Fix rendering new lines in workflow image exports + +## 2023-09-08 +### New +- ✨ Add Load + Save Text file nodes, you can configure the allowed directories in the `user/text_file_dirs.json` file +### Minor +- 🎨 Show autocomplete alias word on popup +- ✨ Add setting to disable middle click from adding a reroute node +- 🎨 Add prompt for setting custom column count on image feed (click the column count label) + +## 2023-09-07 +### New +- ✨ Support Unicode (e.g. Chinese) and word aliases in autocomplete. + +## 2023-09-05 +### Minor +- 🎨 Disable autocomplete on math node +- 🐛 Fix Show Text node always resizing on update + +### Minor +- 🎨 Better adding of preview image to menu (thanks to @zeroeightysix) +- 🎨 UX improvements for image feed (thanks to @birdddev) +- 🐛 Fix Math Expression expression not showing on updated ComfyUI +- +## 2023-08-30 +### Minor +- 🎨 Allow jpeg lora/checkpoint preview images +- ✨ Save ShowText value to embedded image metadata + +## 2023-08-29 +### Minor +- ✨ Option to auto insert `, ` after autocomplete +- 🎨 Exclude arrow keys from triggering autocomplete +- 🐛 Split paths by `\` and `/` on Windows for submenus + +## 2023-08-28 +### New +- ✨ Add custom autocomplete word list setting +- ✨ Support autocomplete word priority sorting +- ✨ Support autocomplete matching anywhere in word rather than requiring starts with + +## 2023-08-27 +### New +- ✨ Add Checkpoint info +- ✨ Add embedding autocomplete +- ✨ Add embedding info +### Major +- ♻️ Refactor LoRA info + +## 2023-08-26 +### Minor +- 🐛 Fix using text widget values in Math Expression not casting to number +- 🎨 Fix padding on lightbox next arrow + +## 2023-08-25 +### Minor +- ♻️ Support older versions of python + +## 2023-08-24 +### Minor +- 🐛 Fix extracting links from LoRA info notes + +## 2023-08-23 +### Major +- 🚨 Update to use `WEB_DIRECTORY` feature instead of manual linking/copying web files + +## 2023-08-22 +### New +- ✨ Math Expression now supports IMAGE and LATENT inputs, to access the dimensions use `a.width`, `b.height` +- 🎨 Removed STRING output on Math Expression, now draws the result onto the node + +## 2023-08-21 +### New +- ✨ Allow custom note (named {file}.txt) to show in LoRA info +- ✨ Query Civita API using the model hash to provide link + +## 2023-08-20 +### New +- ✨ Add LoRA Info menu option for displaying LoRA metadata +### Minor +- 🐛 Fix crash on preset text replacement (thanks to @sjuxax) + +## 2023-08-19 +### New +- ✨ Add support for importing JPG files with embedded metadata (e.g. from Civitai) +### Minor +- 🐛 Fix crash on graph arrange where LiteGraph sometimes stores links to deleted nodes +- 🐛 Fix a couple of rendering issues in workflow export + +## 2023-08-18 +### New +- ✨ Add "example" widget to custom LoRA + Checkpoint loader allowing you to quickly view saved prompts, triggers, etc +- ✨ Add quick "Save as Preview" option on images to save generated images for models + +## 2023-08-16 +### New +- ✨ Add repeater node for generating lists or quickly duplicating nodes +### Minor +- 🐛 Support quick Add LoRA on custom Checkpoint Loader +- ✨ Support `randomint(min,max)` function in math node +- 🎨 Use relative imports to support proxied urls not on root path (thanks to @mcmonkey4eva) + +## 2023-08-13 +### Minor +- ✨ Support `round` `floor` `ceil` functions in math node +- 🐛 Fix floor division in math node + +## 2023-08-12 +### New +- 🎨 Image feed now uses a lightbox for showing images +### Minor +- 🎨 Better loader lists now supports images named `{name}.preview.png` + +## 2023-08-11 +### Minor +- ✨ Enable filter box on submenus + +## 2023-08-05 +### Major +- 🚨 The ComfyUI Lora Loader no longer has subfolders, due to compatibility issues you need to use my Lora Loader if you want subfolers, these can be enabled/disabled on the node via a setting (🐍 Enable submenu in custom nodes) +### New +- ✨ Add custom Checkpoint Loader supporting images & subfolders +- ✨ Add Play Sound node for notifying when a prompt is finished +### Minor +- ✨ Quick Nodes supports new LoRA loader ("Add 🐍 LoRA") +- ♻️ Disable link render mode if ComfyUI has native support + +## 2023-08-04 +### Minor +- ✨ Always snap to grid now applies on node resize +- 🐛 Fix reroute primitive widget value not being restored on reload +- ✨ Workflows now reuse last filename from load & save - save must be done by the submenu + +## 2023-08-02 +### New +- ✨ Add "Always snap to grid" setting that does the same as holding shift, aligning nodes to the grid +### Minor +- 🚨 No longer populates image feed when its closed +- 🐛 Allow lock/unlock of multiple selected nodes + +## 2023-08-01 +### Minor +- 🎨 Image feed now uses comfy theme variables for colors +- 🐛 Link render mode redraws graph on change of setting instead of requiring mouse move + +## 2023-07-30 +- 🎨 Update to image feed to make more user friendly, change image size to column count, various other tweaks (thanks @DrJKL) + +## 2023-07-30 +### Major +- 🐛 Fix issue with context menu (right click) not working for some users after Lora script updates +### New +- ✨ Add "Custom" option to color menu for nodes & groups +### Minor +- 🐛 Fix String Function values converted to unconnected inputs outputting the text "undefined" + +## 2023-07-29 +### New +- ✨ Added Reroute Primitive combining the functionality of reroutes + primitives, also allowing collapsing to a single point. +- ✨ Add support for exporting workflow images as PNGs and optional embedding of metadata in PNG and SVG +### Minor +- ✨ Remove new lines in Math Expression node +- ✨ String function is now an output node +- 🐛 Fix conflict between Lora Loader + Lora submenu causing the context menu to be have strangely (#23, #24) +- 🎨 Rename "SVG -> Import/Export" to "Workflow Image" -> Import/Export + +## 2023-07-27 +### New +- ✨ Added custom Lora Loader that includes image previews +### Minor +- ✨ Add preview output to string function node +- 📄 Updated missing/out of date parts of readme +- 🐛 Fix crash on show image on menu when set to not show (thanks @DrJKL) +- 🐛 Fix incorrect category (util vs utils) for math node (thanks @DrJKL) + +## 2023-07-27 +### Minor +- ✨ Save Image Feed close state +- 🐛 Fix unlocked group size calculation + +## 2023-07-21 + 22 +### Minor +- 🐛 Fix preset text incompatibility with Impact Pack (thanks @ltdrdata) + +## 2023-07-13 +### New +- ✨ Add Math Expression node for evaluating expressions using values from the graph +### Minor +- ✨ Add settings for image feed location + image order + +## 2023-06-27 +### Minor +- 🐛 Fix unlocking group using incorrect size +- ✨ Save visibility of image feed + +## 2023-06-18 +### Major Changes +- ✨ Added auto installation of scripts and `__init__` (thanks @TashaSkyUp) +- ♻️ Reworked folder structure +- 🚨 Renamed a number of nodes to include `pysssss` to prevent name conflicts +- 🚨 Remove Latent Upscale By as it is now a built in node in ComfyUI +- 🚨 Removed Anime Segmentation to own repo +### New +- ✨ Add Link Render Mode setting to choose how links are rendered +- ✨ Add Constrain Image node for resizing nodes to a min/max resolution with optional cropping +- ✨ Add Show Image On Menu to include the latest image output on the menu +- ✨ Add KSamplerAdvanced simple denoise prompt for configuring the node using steps + denoise +- 🎨 Add sizing options to Image Feed + +### Other +- ♻️ Include [canvas2svg](https://gliffy.github.io/canvas2svg/) for SVG export in assets to prevent downloading at runtime +- 🎨 Add background color (using theme color) to exported SVG +- 🐛 Fix Manage Widget Defaults to work with new ComfyUI settings dialog +- 🐛 Increase Image Feed z-index to prevent node text overlapping diff --git a/custom_nodes/ComfyUI-Custom-Scripts/__init__.py b/custom_nodes/ComfyUI-Custom-Scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae43dd2090ce782074bf72f2b7f49ad28dff1a5c --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/__init__.py @@ -0,0 +1,25 @@ +import importlib.util +import glob +import os +import sys +from .pysssss import init, get_ext_dir + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + +if init(): + py = get_ext_dir("py") + files = glob.glob(os.path.join(py, "*.py"), recursive=False) + for file in files: + name = os.path.splitext(file)[0] + spec = importlib.util.spec_from_file_location(name, file) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) + if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None: + NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS) + if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None: + NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) + +WEB_DIRECTORY = "./web" +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] diff --git a/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22e81cee8501937df75cac29a95702bf31d2bc5f Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3646d1de2c7fe62b5fcd8457a7370611a5db410 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9cdfa7f965a2dd10dc8dd20d248fcce7f658ec2 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56d1e6e17367f769d40c59e6618affd58df54f51 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/__pycache__/pysssss.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8b3a794153568f62a130caff935aa240d3f56c0 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe7a657cb9566464beec995985f63d56408b2514 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/autocomplete.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2e758dfcff42a7e190ed23dc77c40809eb6d6f9 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8001c7e3e0bd9d860233567a4c2d02e1a106ae1 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/better_combos.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7aed3ff94d5a5a9c971a4992cd1287425ba8d4b5 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04de4fbed1bb9bb1f53563e9610501aefdbfae2c Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/constrain_image.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa4e95085372c2969241b7b6754e6fd81f04a211 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2b198aef2111593b4a43a19568442be10d7f809 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/math_expression.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8172f3937334b86837c56f11f4ff74d04e7c6fda Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914e29674d5045f6c17ad29eb67c454c46778603 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/model_info.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b0e1de8b6900904867f7d2cfc74514ce66e5b48 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a368c398226ce34fdb6140384d6ea5e9812c7977 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/play_sound.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c39901b6c5139986d1a3d619fb58e802cccaae5 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c3c3389e77129d028b1633da3f3bfc984d94f71 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/repeater.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32347c1e2e027a553e4a55bd927249f29b702238 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06215d46d4364f0e6444cac68d0099d4b2704629 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/reroute_primitive.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2642f1b69c46a3b2ba240803c4c8c3781606055 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ab5a547d78fe31601276c4969d9958b01cf3160 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/show_text.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12d567b3b62d3eee292f596cdd5352d2fa4e9330 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2609c176fc7d766d52a75326b5696cdc63ffb1f Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/string_function.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62effd1986fcfb841669f378549289bca1251e9c Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c22e6d54f893f7f4fcd5813485e146e1abe8882c Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/text_files.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-310.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b94ae4b2c626bf94e1bd1864455a0419167d6037 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-311.pyc b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb604a5dd90c0744a5ecf8d3fb43b7875c9e7908 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/py/__pycache__/workflows.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/autocomplete.py b/custom_nodes/ComfyUI-Custom-Scripts/py/autocomplete.py new file mode 100644 index 0000000000000000000000000000000000000000..abf838dffb75fcd02da44c59ee2c343aa6060295 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/autocomplete.py @@ -0,0 +1,22 @@ +from server import PromptServer +from aiohttp import web +import os + +dir = os.path.abspath(os.path.join(__file__, "../../user")) +if not os.path.exists(dir): + os.mkdir(dir) +file = os.path.join(dir, "autocomplete.txt") + + +@PromptServer.instance.routes.get("/pysssss/autocomplete") +async def get_autocomplete(request): + if os.path.isfile(file): + return web.FileResponse(file) + return web.Response(status=404) + + +@PromptServer.instance.routes.post("/pysssss/autocomplete") +async def update_autocomplete(request): + with open(file, "w", encoding="utf-8") as f: + f.write(await request.text()) + return web.Response(status=200) diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/better_combos.py b/custom_nodes/ComfyUI-Custom-Scripts/py/better_combos.py new file mode 100644 index 0000000000000000000000000000000000000000..a7ed34e521cc4b7ff72f5193ecc785565ade6fdc --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/better_combos.py @@ -0,0 +1,135 @@ +import glob +import os +from nodes import LoraLoader, CheckpointLoaderSimple +import folder_paths +from server import PromptServer +from folder_paths import get_directory_by_type +from aiohttp import web +import shutil + + +@PromptServer.instance.routes.get("/pysssss/view/{name}") +async def view(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + image_path = folder_paths.get_full_path( + type, name) + if not image_path: + return web.Response(status=404) + + filename = os.path.basename(image_path) + return web.FileResponse(image_path, headers={"Content-Disposition": f"filename=\"{filename}\""}) + + +@PromptServer.instance.routes.post("/pysssss/save/{name}") +async def save_preview(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + body = await request.json() + + dir = get_directory_by_type(body.get("type", "output")) + subfolder = body.get("subfolder", "") + full_output_folder = os.path.join(dir, os.path.normpath(subfolder)) + + if os.path.commonpath((dir, os.path.abspath(full_output_folder))) != dir: + return web.Response(status=400) + + filepath = os.path.join(full_output_folder, body.get("filename", "")) + image_path = folder_paths.get_full_path(type, name) + image_path = os.path.splitext( + image_path)[0] + os.path.splitext(filepath)[1] + + shutil.copyfile(filepath, image_path) + + return web.json_response({ + "image": type + "/" + os.path.basename(image_path) + }) + + +@PromptServer.instance.routes.get("/pysssss/examples/{name}") +async def get_examples(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + file_path_no_ext = os.path.splitext(file_path)[0] + examples = [] + if os.path.isdir(file_path_no_ext): + examples += map(lambda t: os.path.relpath(t, file_path_no_ext), + glob.glob(file_path_no_ext + "/*.txt")) + + return web.json_response(examples) + + +def populate_items(names, type): + for idx, item_name in enumerate(names): + + file_name = os.path.splitext(item_name)[0] + file_path = folder_paths.get_full_path(type, item_name) + + if file_path is None: + print(f"(pysssss:better_combos) Unable to get path for {type} {item_name}") + continue + + file_path_no_ext = os.path.splitext(file_path)[0] + + for ext in ["png", "jpg", "jpeg", "preview.png"]: + has_image = os.path.isfile(file_path_no_ext + "." + ext) + if has_image: + item_image = f"{file_name}.{ext}" + break + + names[idx] = { + "content": item_name, + "image": f"{type}/{item_image}" if has_image else None, + } + names.sort(key=lambda i: i["content"].lower()) + + +class LoraLoaderWithImages(LoraLoader): + @classmethod + def INPUT_TYPES(s): + types = super().INPUT_TYPES() + names = types["required"]["lora_name"][0] + populate_items(names, "loras") + return types + + def load_lora(self, **kwargs): + kwargs["lora_name"] = kwargs["lora_name"]["content"] + return super().load_lora(**kwargs) + + +class CheckpointLoaderSimpleWithImages(CheckpointLoaderSimple): + @classmethod + def INPUT_TYPES(s): + types = super().INPUT_TYPES() + names = types["required"]["ckpt_name"][0] + populate_items(names, "checkpoints") + return types + + def load_checkpoint(self, **kwargs): + kwargs["ckpt_name"] = kwargs["ckpt_name"]["content"] + return super().load_checkpoint(**kwargs) + + +NODE_CLASS_MAPPINGS = { + "LoraLoader|pysssss": LoraLoaderWithImages, + "CheckpointLoader|pysssss": CheckpointLoaderSimpleWithImages, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LoraLoader|pysssss": "Lora Loader 🐍", + "CheckpointLoader|pysssss": "Checkpoint Loader 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image.py b/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image.py new file mode 100644 index 0000000000000000000000000000000000000000..c3631b3c6f76880a310d03271dd10f6c5abb8b84 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/constrain_image.py @@ -0,0 +1,71 @@ +import torch +import numpy as np +from PIL import Image + +class ConstrainImage: + """ + A node that constrains an image to a maximum and minimum size while maintaining aspect ratio. + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "max_width": ("INT", {"default": 1024, "min": 0}), + "max_height": ("INT", {"default": 1024, "min": 0}), + "min_width": ("INT", {"default": 0, "min": 0}), + "min_height": ("INT", {"default": 0, "min": 0}), + "crop_if_required": (["yes", "no"], {"default": "no"}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "constrain_image" + CATEGORY = "image" + OUTPUT_IS_LIST = (True,) + + def constrain_image(self, images, max_width, max_height, min_width, min_height, crop_if_required): + crop_if_required = crop_if_required == "yes" + results = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)).convert("RGB") + + current_width, current_height = img.size + aspect_ratio = current_width / current_height + + constrained_width = max(min(current_width, min_width), max_width) + constrained_height = max(min(current_height, min_height), max_height) + + if constrained_width / constrained_height > aspect_ratio: + constrained_width = max(int(constrained_height * aspect_ratio), min_width) + if crop_if_required: + constrained_height = int(current_height / (current_width / constrained_width)) + else: + constrained_height = max(int(constrained_width / aspect_ratio), min_height) + if crop_if_required: + constrained_width = int(current_width / (current_height / constrained_height)) + + resized_image = img.resize((constrained_width, constrained_height), Image.LANCZOS) + + if crop_if_required and (constrained_width > max_width or constrained_height > max_height): + left = max((constrained_width - max_width) // 2, 0) + top = max((constrained_height - max_height) // 2, 0) + right = min(constrained_width, max_width) + left + bottom = min(constrained_height, max_height) + top + resized_image = resized_image.crop((left, top, right, bottom)) + + resized_image = np.array(resized_image).astype(np.float32) / 255.0 + resized_image = torch.from_numpy(resized_image)[None,] + results.append(resized_image) + + return (results,) + +NODE_CLASS_MAPPINGS = { + "ConstrainImage|pysssss": ConstrainImage, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ConstrainImage|pysssss": "Constrain Image 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/math_expression.py b/custom_nodes/ComfyUI-Custom-Scripts/py/math_expression.py new file mode 100644 index 0000000000000000000000000000000000000000..decebdc5a52fdf0860a9989e33ac3d6fa3c362fa --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/math_expression.py @@ -0,0 +1,190 @@ +import ast +import math +import random +import operator as op + +operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Pow: op.pow, + ast.BitXor: op.xor, + ast.USub: op.neg, + ast.Mod: op.mod +} + +# TODO: restructure args to provide more info, generate hint based on args to save duplication +functions = { + "round": { + "args": (1, 2), + "call": lambda a, b = None: round(a, b), + "hint": "number, dp? = 0" + }, + "ceil": { + "args": (1, 1), + "call": lambda a: math.ceil(a), + "hint": "number" + }, + "floor": { + "args": (1, 1), + "call": lambda a: math.floor(a), + "hint": "number" + }, + "min": { + "args": (2, None), + "call": lambda *args: min(*args), + "hint": "...numbers" + }, + "max": { + "args": (2, None), + "call": lambda *args: max(*args), + "hint": "...numbers" + }, + "randomint": { + "args": (2, 2), + "call": lambda a, b: random.randint(a, b), + "hint": "min, max" + }, + "randomchoice": { + "args": (2, None), + "call": lambda *args: random.choice(args), + "hint": "...numbers" + }, +} + +autocompleteWords = list({ + "text": x, + "value": f"{x}()", + "showValue": False, + "hint": f"{functions[x]['hint']}", + "caretOffset": -1 +} for x in functions.keys()) + + +class MathExpression: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "expression": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": { + "words": autocompleteWords, + "separator": "" + }}), + }, + "optional": { + "a": ("INT,FLOAT,IMAGE,LATENT", ), + "b": ("INT,FLOAT,IMAGE,LATENT",), + "c": ("INT,FLOAT,IMAGE,LATENT", ), + }, + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", + "prompt": "PROMPT"}, + } + + RETURN_TYPES = ("INT", "FLOAT", ) + FUNCTION = "evaluate" + CATEGORY = "utils" + OUTPUT_NODE = True + + @classmethod + def IS_CHANGED(s, expression, **kwargs): + if "random" in expression: + return float("nan") + return expression + + def get_widget_value(self, extra_pnginfo, prompt, node_name, widget_name): + workflow = extra_pnginfo["workflow"] + node_id = None + for node in workflow["nodes"]: + name = node["type"] + if "properties" in node: + if "Node name for S&R" in node["properties"]: + name = node["properties"]["Node name for S&R"] + if name == node_name: + node_id = node["id"] + break + if "title" in node: + name = node["title"] + if name == node_name: + node_id = node["id"] + break + if node_id is not None: + values = prompt[str(node_id)] + if "inputs" in values: + if widget_name in values["inputs"]: + return values["inputs"][widget_name] + raise NameError(f"Widget not found: {node_name}.{widget_name}") + raise NameError(f"Node not found: {node_name}.{widget_name}") + + def get_size(self, target, property): + if isinstance(target, dict) and "samples" in target: + # Latent + if property == "width": + return target["samples"].shape[3] * 8 + return target["samples"].shape[2] * 8 + else: + # Image + if property == "width": + return target.shape[2] + return target.shape[1] + + def evaluate(self, expression, extra_pnginfo, prompt, a=None, b=None, c=None): + expression = expression.replace('\n', ' ').replace('\r', '') + node = ast.parse(expression, mode='eval').body + + lookup = {"a": a, "b": b, "c": c} + + def eval_expr(node): + if isinstance(node, ast.Num): + return node.n + elif isinstance(node, ast.BinOp): + return operators[type(node.op)](float(eval_expr(node.left)), float(eval_expr(node.right))) + elif isinstance(node, ast.UnaryOp): + return operators[type(node.op)](eval_expr(node.operand)) + elif isinstance(node, ast.Attribute): + if node.value.id in lookup: + if node.attr == "width" or node.attr == "height": + return self.get_size(lookup[node.value.id], node.attr) + + return self.get_widget_value(extra_pnginfo, prompt, node.value.id, node.attr) + elif isinstance(node, ast.Name): + if node.id in lookup: + val = lookup[node.id] + if isinstance(val, (int, float, complex)): + return val + else: + raise TypeError( + f"Compex types (LATENT/IMAGE) need to reference their width/height, e.g. {node.id}.width") + raise NameError(f"Name not found: {node.id}") + elif isinstance(node, ast.Call): + if node.func.id in functions: + fn = functions[node.func.id] + l = len(node.args) + if l < fn["args"][0] or (fn["args"][1] is not None and l > fn["args"][1]): + if fn["args"][1] is None: + toErr = " or more" + else: + toErr = f" to {fn['args'][1]}" + raise SyntaxError( + f"Invalid function call: {node.func.id} requires {fn['args'][0]}{toErr} arguments") + args = [] + for arg in node.args: + args.append(eval_expr(arg)) + return fn["call"](*args) + raise NameError(f"Invalid function call: {node.func.id}") + else: + raise TypeError(node) + + r = eval_expr(node) + return {"ui": {"value": [r]}, "result": (int(r), float(r),)} + + +NODE_CLASS_MAPPINGS = { + "MathExpression|pysssss": MathExpression, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "MathExpression|pysssss": "Math Expression 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/model_info.py b/custom_nodes/ComfyUI-Custom-Scripts/py/model_info.py new file mode 100644 index 0000000000000000000000000000000000000000..b9b4daf7df5a176d2c09a7e0a5e1e01f8b1b350a --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/model_info.py @@ -0,0 +1,115 @@ +import hashlib +import json +from aiohttp import web +from server import PromptServer +import folder_paths +import os + + +def get_metadata(filepath): + with open(filepath, "rb") as file: + # https://github.com/huggingface/safetensors#format + # 8 bytes: N, an unsigned little-endian 64-bit integer, containing the size of the header + header_size = int.from_bytes(file.read(8), "little", signed=False) + + if header_size <= 0: + raise BufferError("Invalid header size") + + header = file.read(header_size) + if header_size <= 0: + raise BufferError("Invalid header") + + header_json = json.loads(header) + return header_json["__metadata__"] if "__metadata__" in header_json else None + + +@PromptServer.instance.routes.post("/pysssss/metadata/notes/{name}") +async def save_notes(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + file_path = None + if type == "embeddings": + name = name.lower() + files = folder_paths.get_filename_list(type) + for f in files: + lower_f = f.lower() + if lower_f == name: + file_path = folder_paths.get_full_path(type, f) + else: + n = os.path.splitext(f)[0].lower() + if n == name: + file_path = folder_paths.get_full_path(type, f) + + if file_path is not None: + break + else: + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + file_no_ext = os.path.splitext(file_path)[0] + info_file = file_no_ext + ".txt" + with open(info_file, "w") as f: + f.write(await request.text()) + + return web.Response(status=200) + + +@PromptServer.instance.routes.get("/pysssss/metadata/{name}") +async def load_metadata(request): + name = request.match_info["name"] + pos = name.index("/") + type = name[0:pos] + name = name[pos+1:] + + file_path = None + if type == "embeddings": + name = name.lower() + files = folder_paths.get_filename_list(type) + for f in files: + lower_f = f.lower() + if lower_f == name: + file_path = folder_paths.get_full_path(type, f) + else: + n = os.path.splitext(f)[0].lower() + if n == name: + file_path = folder_paths.get_full_path(type, f) + + if file_path is not None: + break + else: + file_path = folder_paths.get_full_path( + type, name) + if not file_path: + return web.Response(status=404) + + try: + meta = get_metadata(file_path) + except: + meta = None + + if meta is None: + meta = {} + + file_no_ext = os.path.splitext(file_path)[0] + + info_file = file_no_ext + ".txt" + if os.path.isfile(info_file): + with open(info_file, "r") as f: + meta["pysssss.notes"] = f.read() + + hash_file = file_no_ext + ".sha256" + if os.path.isfile(hash_file): + with open(hash_file, "rt") as f: + meta["pysssss.sha256"] = f.read() + else: + with open(file_path, "rb") as f: + meta["pysssss.sha256"] = hashlib.sha256(f.read()).hexdigest() + with open(hash_file, "wt") as f: + f.write(meta["pysssss.sha256"]) + + return web.json_response(meta) diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/play_sound.py b/custom_nodes/ComfyUI-Custom-Scripts/py/play_sound.py new file mode 100644 index 0000000000000000000000000000000000000000..e2e6cf15bc92d979ff100d12f9352d889108a202 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/play_sound.py @@ -0,0 +1,40 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class PlaySound: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "any": (any, {}), + "mode": (["always", "on empty queue"], {}), + "volume": ("FLOAT", {"min": 0, "max": 1, "step": 0.1, "default": 0.5}) + }} + + FUNCTION = "nop" + INPUT_IS_LIST = True + OUTPUT_NODE = True + RETURN_TYPES = () + + CATEGORY = "utils" + + def IS_CHANGED(self, **kwargs): + return float("NaN") + + def nop(self, any, mode, volume): + return {"ui": {"a": []}, "result": ()} + + +NODE_CLASS_MAPPINGS = { + "PlaySound|pysssss": PlaySound, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PlaySound|pysssss": "PlaySound 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/repeater.py b/custom_nodes/ComfyUI-Custom-Scripts/py/repeater.py new file mode 100644 index 0000000000000000000000000000000000000000..1d83c67e2132b9db708ef087ad788d22c58c80c2 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/repeater.py @@ -0,0 +1,46 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class Repeater: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "source": (any, {}), + "repeats": ("INT", {"min": 0, "max": 5000, "default": 2}), + "output": (["single", "multi"], {}), + "node_mode": (["reuse", "create"], {}), + }} + + RETURN_TYPES = (any,) + FUNCTION = "repeat" + OUTPUT_NODE = False + OUTPUT_IS_LIST = (True,) + + CATEGORY = "utils" + + def repeat(self, repeats, output, node_mode, **kwargs): + if output == "multi": + # Multi outputs are split to indiviual nodes on the frontend when serializing + return ([kwargs["source"]],) + elif node_mode == "reuse": + # When reusing we have a single input node, repeat that N times + return ([kwargs["source"]] * repeats,) + else: + # When creating new nodes, they'll be added dynamically when the graph is serialized + return ((list(kwargs.values())),) + + +NODE_CLASS_MAPPINGS = { + "Repeater|pysssss": Repeater, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Repeater|pysssss": "Repeater 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/reroute_primitive.py b/custom_nodes/ComfyUI-Custom-Scripts/py/reroute_primitive.py new file mode 100644 index 0000000000000000000000000000000000000000..5b67bbfa89d6c7cf39a22795d93d4ecb1fd48aca --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/reroute_primitive.py @@ -0,0 +1,59 @@ +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + + +# Our any instance wants to be a wildcard string +any = AnyType("*") + + +class ReroutePrimitive: + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"value": (any, )}, + } + + @classmethod + def VALIDATE_INPUTS(s, **kwargs): + return True + + RETURN_TYPES = (any,) + FUNCTION = "route" + CATEGORY = "utils" + + def route(self, value): + return (value,) + + +class MultiPrimitive: + @classmethod + def INPUT_TYPES(cls): + return { + "required": {}, + "optional": {"value": (any, )}, + } + + @classmethod + def VALIDATE_INPUTS(s, **kwargs): + return True + + RETURN_TYPES = (any,) + FUNCTION = "listify" + CATEGORY = "utils" + OUTPUT_IS_LIST = (True,) + + def listify(self, **kwargs): + return (list(kwargs.values()),) + + +NODE_CLASS_MAPPINGS = { + "ReroutePrimitive|pysssss": ReroutePrimitive, + # "MultiPrimitive|pysssss": MultiPrimitive, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ReroutePrimitive|pysssss": "Reroute Primitive 🐍", + # "MultiPrimitive|pysssss": "Multi Primitive 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/show_text.py b/custom_nodes/ComfyUI-Custom-Scripts/py/show_text.py new file mode 100644 index 0000000000000000000000000000000000000000..f0fb564d43ea30eb9adc76fc1bf3c4246d97818f --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/show_text.py @@ -0,0 +1,37 @@ +class ShowText: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", {"forceInput": True}), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + "extra_pnginfo": "EXTRA_PNGINFO", + }, + } + + INPUT_IS_LIST = True + RETURN_TYPES = ("STRING",) + FUNCTION = "notify" + OUTPUT_NODE = True + OUTPUT_IS_LIST = (True,) + + CATEGORY = "utils" + + def notify(self, text, unique_id = None, extra_pnginfo=None): + if unique_id and extra_pnginfo and "workflow" in extra_pnginfo[0]: + workflow = extra_pnginfo[0]["workflow"] + node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id[0]), None) + if node: + node["widgets_values"] = [text] + return {"ui": {"text": text}, "result": (text,)} + + +NODE_CLASS_MAPPINGS = { + "ShowText|pysssss": ShowText, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ShowText|pysssss": "Show Text 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/string_function.py b/custom_nodes/ComfyUI-Custom-Scripts/py/string_function.py new file mode 100644 index 0000000000000000000000000000000000000000..532e9744896e7d3a20f34347e1e9a6f25b2dd3df --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/string_function.py @@ -0,0 +1,54 @@ +import re + +class StringFunction: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "action": (["append", "replace"], {}), + "tidy_tags": (["yes", "no"], {}), + "text_a": ("STRING", {"multiline": True}), + "text_b": ("STRING", {"multiline": True}), + }, + "optional": { + "text_c": ("STRING", {"multiline": True}) + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "exec" + CATEGORY = "utils" + OUTPUT_NODE = True + + def exec(self, action, tidy_tags, text_a, text_b, text_c=""): + # Converted inputs are sent as the string of 'undefined' if not connected + if text_a == "undefined": + text_a = "" + if text_b == "undefined": + text_b = "" + if text_c == "undefined": + text_c = "" + + tidy_tags = tidy_tags == "yes" + out = "" + if action == "append": + out = (", " if tidy_tags else "").join(filter(None, [text_a, text_b, text_c])) + else: + if text_c is None: + text_c = "" + if text_b.startswith("/") and text_b.endswith("/"): + regex = text_b[1:-1] + out = re.sub(regex, text_c, text_a) + else: + out = text_a.replace(text_b, text_c) + if tidy_tags: + out = out.replace(" ", " ").replace(" ,", ",").replace(",,", ",").replace(",,", ",") + return {"ui": {"text": (out,)}, "result": (out,)} + +NODE_CLASS_MAPPINGS = { + "StringFunction|pysssss": StringFunction, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "StringFunction|pysssss": "String Function 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/text_files.py b/custom_nodes/ComfyUI-Custom-Scripts/py/text_files.py new file mode 100644 index 0000000000000000000000000000000000000000..5b4f9174f9faca37a77c5ee06f7ef57726ca4725 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/text_files.py @@ -0,0 +1,192 @@ +import os +import folder_paths +import json +from server import PromptServer +import glob +from aiohttp import web + + +def get_allowed_dirs(): + dir = os.path.abspath(os.path.join(__file__, "../../user")) + file = os.path.join(dir, "text_file_dirs.json") + with open(file, "r") as f: + return json.loads(f.read()) + + +def get_valid_dirs(): + return get_allowed_dirs().keys() + + +def get_dir_from_name(name): + dirs = get_allowed_dirs() + if name not in dirs: + raise KeyError(name + " dir not found") + + path = dirs[name] + path = path.replace("$input", folder_paths.get_input_directory()) + path = path.replace("$output", folder_paths.get_output_directory()) + path = path.replace("$temp", folder_paths.get_temp_directory()) + return path + + +def is_child_dir(parent_path, child_path): + parent_path = os.path.abspath(parent_path) + child_path = os.path.abspath(child_path) + return os.path.commonpath([parent_path]) == os.path.commonpath([parent_path, child_path]) + + +def get_real_path(dir): + dir = dir.replace("/**/", "/") + dir = os.path.abspath(dir) + dir = os.path.split(dir)[0] + return dir + + +@PromptServer.instance.routes.get("/pysssss/text-file/{name}") +async def get_files(request): + name = request.match_info["name"] + dir = get_dir_from_name(name) + recursive = "/**/" in dir + # Ugh cant use root_path on glob... lazy hack.. + pre = get_real_path(dir) + + files = list(map(lambda t: os.path.relpath(t, pre), + glob.glob(dir, recursive=recursive))) + + if len(files) == 0: + files = ["[none]"] + return web.json_response(files) + + +def get_file(root_dir, file): + if file == "[none]" or not file or not file.strip(): + raise ValueError("No file") + + root_dir = get_dir_from_name(root_dir) + root_dir = get_real_path(root_dir) + full_path = os.path.join(root_dir, file) + + if not is_child_dir(root_dir, full_path): + raise ReferenceError() + + return full_path + + +class TextFileNode: + RETURN_TYPES = ("STRING",) + CATEGORY = "utils" + + @classmethod + def VALIDATE_INPUTS(self, root_dir, file, **kwargs): + self.file = get_file(root_dir, file) + return True + + def load_text(self, **kwargs): + with open(self.file, "r") as f: + return (f.read(), ) + + +class LoadText(TextFileNode): + @classmethod + def IS_CHANGED(self, **kwargs): + return os.path.getmtime(self.file) + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "root_dir": (list(get_valid_dirs()), {}), + "file": (["[none]"], { + "pysssss.binding": [{ + "source": "root_dir", + "callback": [{ + "type": "set", + "target": "$this.disabled", + "value": True + }, { + "type": "fetch", + "url": "/pysssss/text-file/{$source.value}", + "then": [{ + "type": "set", + "target": "$this.options.values", + "value": "$result" + }, { + "type": "validate-combo" + }, { + "type": "set", + "target": "$this.disabled", + "value": False + }] + }], + }] + }) + }, + } + + FUNCTION = "load_text" + + +class SaveText(TextFileNode): + @classmethod + def IS_CHANGED(self, **kwargs): + return float("nan") + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "root_dir": (list(get_valid_dirs()), {}), + "file": ("STRING", {"default": "file.txt"}), + "append": (["append", "overwrite", "new only"], {}), + "insert": ("BOOLEAN", { + "default": True, "label_on": "new line", "label_off": "none", + "pysssss.binding": [{ + "source": "append", + "callback": [{ + "type": "if", + "condition": [{ + "left": "$source.value", + "op": "eq", + "right": '"append"' + }], + "true": [{ + "type": "set", + "target": "$this.disabled", + "value": False + }], + "false": [{ + "type": "set", + "target": "$this.disabled", + "value": True + }], + }] + }] + }), + "text": ("STRING", {"forceInput": True, "multiline": True}) + }, + } + + FUNCTION = "write_text" + + def write_text(self, root_dir, file, append, insert, text): + if append == "new only" and os.path.exists(self.file): + raise FileExistsError( + self.file + " already exists and 'new only' is selected.") + with open(self.file, "a+" if append == "append" else "w") as f: + is_append = f.tell() != 0 + if is_append and insert: + f.write("\n") + f.write(text) + + return super().load_text() + + +NODE_CLASS_MAPPINGS = { + "LoadText|pysssss": LoadText, + "SaveText|pysssss": SaveText, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LoadText|pysssss": "Load Text 🐍", + "SaveText|pysssss": "Save Text 🐍", +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/py/workflows.py b/custom_nodes/ComfyUI-Custom-Scripts/py/workflows.py new file mode 100644 index 0000000000000000000000000000000000000000..ade13a11a16322166a7340dd156659c771c7ff07 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/py/workflows.py @@ -0,0 +1,59 @@ +from server import PromptServer +from aiohttp import web +import os +import inspect +import json +import importlib +import sys +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) +import pysssss + +root_directory = os.path.dirname(inspect.getfile(PromptServer)) +workflows_directory = os.path.join(root_directory, "pysssss-workflows") +workflows_directory = pysssss.get_config_value( + "workflows.directory", workflows_directory) + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + + +@PromptServer.instance.routes.get("/pysssss/workflows") +async def get_workflows(request): + files = [] + for dirpath, directories, file in os.walk(workflows_directory): + for file in file: + if (file.endswith(".json")): + files.append(os.path.relpath(os.path.join( + dirpath, file), workflows_directory)) + return web.json_response(list(map(lambda f: os.path.splitext(f)[0].replace("\\", "/"), files))) + + +@PromptServer.instance.routes.get("/pysssss/workflows/{name:.+}") +async def get_workflow(request): + file = os.path.abspath(os.path.join( + workflows_directory, request.match_info["name"] + ".json")) + if os.path.commonpath([file, workflows_directory]) != workflows_directory: + return web.Response(status=403) + + return web.FileResponse(file) + + +@PromptServer.instance.routes.post("/pysssss/workflows") +async def save_workflow(request): + json_data = await request.json() + file = os.path.abspath(os.path.join( + workflows_directory, json_data["name"] + ".json")) + if os.path.commonpath([file, workflows_directory]) != workflows_directory: + return web.Response(status=403) + + if os.path.exists(file) and ("overwrite" not in json_data or json_data["overwrite"] == False): + return web.Response(status=409) + + sub_path = os.path.dirname(file) + if not os.path.exists(sub_path): + os.makedirs(sub_path) + + with open(file, "w") as f: + f.write(json.dumps(json_data["workflow"])) + + return web.Response(status=201) diff --git a/custom_nodes/ComfyUI-Custom-Scripts/pysssss.example.json b/custom_nodes/ComfyUI-Custom-Scripts/pysssss.example.json new file mode 100644 index 0000000000000000000000000000000000000000..251e3c720067fc06b4458db51cb8c12182c05dee --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/pysssss.example.json @@ -0,0 +1,7 @@ +{ + "name": "CustomScripts", + "logging": false, + "workflows": { + "directory": "C:\\ComfyUI-Workflows" + } +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/pysssss.json b/custom_nodes/ComfyUI-Custom-Scripts/pysssss.json new file mode 100644 index 0000000000000000000000000000000000000000..6ca06420135d024f925a0d70d5dd0328c69b271e --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/pysssss.json @@ -0,0 +1,4 @@ +{ + "name": "CustomScripts", + "logging": false +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/pysssss.py b/custom_nodes/ComfyUI-Custom-Scripts/pysssss.py new file mode 100644 index 0000000000000000000000000000000000000000..a61a5547045f38b2eb337e5827f173c49142adc1 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/pysssss.py @@ -0,0 +1,290 @@ +import asyncio +import os +import json +import shutil +import inspect +import aiohttp +from server import PromptServer +from tqdm import tqdm + +config = None + + +def is_logging_enabled(): + config = get_extension_config() + if "logging" not in config: + return False + return config["logging"] + + +def log(message, type=None, always=False, name=None): + if not always and not is_logging_enabled(): + return + + if type is not None: + message = f"[{type}] {message}" + + if name is None: + name = get_extension_config()["name"] + + print(f"(pysssss:{name}) {message}") + + +def get_ext_dir(subpath=None, mkdir=False): + dir = os.path.dirname(__file__) + if subpath is not None: + dir = os.path.join(dir, subpath) + + dir = os.path.abspath(dir) + + if mkdir and not os.path.exists(dir): + os.makedirs(dir) + return dir + + +def get_comfy_dir(subpath=None, mkdir=False): + dir = os.path.dirname(inspect.getfile(PromptServer)) + if subpath is not None: + dir = os.path.join(dir, subpath) + + dir = os.path.abspath(dir) + + if mkdir and not os.path.exists(dir): + os.makedirs(dir) + return dir + + +def get_web_ext_dir(): + config = get_extension_config() + name = config["name"] + dir = get_comfy_dir("web/extensions/pysssss") + if not os.path.exists(dir): + os.makedirs(dir) + dir = os.path.join(dir, name) + return dir + + +def get_extension_config(reload=False): + global config + if reload == False and config is not None: + return config + + config_path = get_ext_dir("pysssss.json") + if not os.path.exists(config_path): + log("Missing pysssss.json, this extension may not work correctly. Please reinstall the extension.", + type="ERROR", always=True, name="???") + print(f"Extension path: {get_ext_dir()}") + return {"name": "Unknown", "version": -1} + with open(config_path, "r") as f: + config = json.loads(f.read()) + return config + + +def link_js(src, dst): + src = os.path.abspath(src) + dst = os.path.abspath(dst) + if os.name == "nt": + try: + import _winapi + _winapi.CreateJunction(src, dst) + return True + except: + pass + try: + os.symlink(src, dst) + return True + except: + import logging + logging.exception('') + return False + + +def is_junction(path): + if os.name != "nt": + return False + try: + return bool(os.readlink(path)) + except OSError: + return False + + +def install_js(): + src_dir = get_ext_dir("web/js") + if not os.path.exists(src_dir): + log("No JS") + return + + should_install = should_install_js() + if should_install: + log("it looks like you're running an old version of ComfyUI that requires manual setup of web files, it is recommended you update your installation.", "warning", True) + dst_dir = get_web_ext_dir() + linked = os.path.islink(dst_dir) or is_junction(dst_dir) + if linked or os.path.exists(dst_dir): + if linked: + if should_install: + log("JS already linked") + else: + os.unlink(dst_dir) + log("JS unlinked, PromptServer will serve extension") + elif not should_install: + shutil.rmtree(dst_dir) + log("JS deleted, PromptServer will serve extension") + return + + if not should_install: + log("JS skipped, PromptServer will serve extension") + return + + if link_js(src_dir, dst_dir): + log("JS linked") + return + + log("Copying JS files") + shutil.copytree(src_dir, dst_dir, dirs_exist_ok=True) + + +def should_install_js(): + return not hasattr(PromptServer.instance, "supports") or "custom_nodes_from_web" not in PromptServer.instance.supports + + +def init(check_imports=None): + log("Init") + + if check_imports is not None: + import importlib.util + for imp in check_imports: + spec = importlib.util.find_spec(imp) + if spec is None: + log(f"{imp} is required, please check requirements are installed.", + type="ERROR", always=True) + return False + + install_js() + return True + + +def get_async_loop(): + loop = None + try: + loop = asyncio.get_event_loop() + except: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return loop + + +def get_http_session(): + loop = get_async_loop() + return aiohttp.ClientSession(loop=loop) + + +async def download(url, stream, update_callback=None, session=None): + close_session = False + if session is None: + close_session = True + session = get_http_session() + try: + async with session.get(url) as response: + size = int(response.headers.get('content-length', 0)) or None + + with tqdm( + unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1], total=size, + ) as progressbar: + perc = 0 + async for chunk in response.content.iter_chunked(2048): + stream.write(chunk) + progressbar.update(len(chunk)) + if update_callback is not None and progressbar.total is not None and progressbar.total != 0: + last = perc + perc = round(progressbar.n / progressbar.total, 2) + if perc != last: + last = perc + await update_callback(perc) + finally: + if close_session and session is not None: + await session.close() + + +async def download_to_file(url, destination, update_callback=None, is_ext_subpath=True, session=None): + if is_ext_subpath: + destination = get_ext_dir(destination) + with open(destination, mode='wb') as f: + download(url, f, update_callback, session) + + +def wait_for_async(async_fn, loop=None): + res = [] + + async def run_async(): + r = await async_fn() + res.append(r) + + if loop is None: + try: + loop = asyncio.get_event_loop() + except: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + loop.run_until_complete(run_async()) + + return res[0] + + +def update_node_status(client_id, node, text, progress=None): + if client_id is None: + client_id = PromptServer.instance.client_id + + if client_id is None: + return + + PromptServer.instance.send_sync("pysssss/update_status", { + "node": node, + "progress": progress, + "text": text + }, client_id) + + +async def update_node_status_async(client_id, node, text, progress=None): + if client_id is None: + client_id = PromptServer.instance.client_id + + if client_id is None: + return + + await PromptServer.instance.send("pysssss/update_status", { + "node": node, + "progress": progress, + "text": text + }, client_id) + + +def get_config_value(key, default=None, throw=False): + split = key.split(".") + obj = get_extension_config() + for s in split: + if s in obj: + obj = obj[s] + else: + if throw: + raise KeyError("Configuration key missing: " + key) + else: + return default + return obj + + +def is_inside_dir(root_dir, check_path): + root_dir = os.path.abspath(root_dir) + if not os.path.isabs(check_path): + check_path = os.path.abspath(os.path.join(root_dir, check_path)) + return os.path.commonpath([check_path, root_dir]) == root_dir + + +def get_child_dir(root_dir, child_path, throw_if_outside=True): + child_path = os.path.abspath(os.path.join(root_dir, child_path)) + if is_inside_dir(root_dir, child_path): + return child_path + if throw_if_outside: + raise NotADirectoryError( + "Saving outside the target folder is not allowed.") + return None diff --git a/custom_nodes/ComfyUI-Custom-Scripts/user/autocomplete.txt b/custom_nodes/ComfyUI-Custom-Scripts/user/autocomplete.txt new file mode 100644 index 0000000000000000000000000000000000000000..eb0ce5f2a83cca5c0d08c0b86c5c0b5cab5dac80 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/user/autocomplete.txt @@ -0,0 +1,100000 @@ +1girl,4114588 +solo,3426446 +highres,3008413 +long hair,2898315 +commentary request,2610959 +breasts,2252741 +looking at viewer,2017032 +blush,1987494 +smile,1847264 +short hair,1530601 +open mouth,1521316 +bangs,1446592 +blue eyes,1192965 +multiple girls,1097189 +skirt,1072680 +blonde hair,1070839 +large breasts,1051375 +brown hair,1046479 +simple background,1033328 +shirt,956884 +black hair,951555 +hair ornament,909358 +absurdres,889451 +red eyes,874986 +thighhighs,871535 +hat,860554 +gloves,859727 +1boy,852461 +bad id,850533 +long sleeves,847925 +white background,842744 +dress,813386 +original,800843 +ribbon,776028 +bow,773755 +touhou,769563 +navel,762061 +bad pixiv id,714106 +2girls,713683 +photoshop (medium),702997 +holding,696989 +animal ears,695502 +cleavage,672733 +hair between eyes,661745 +bare shoulders,637748 +commentary,635624 +brown eyes,632789 +twintails,632535 +medium breasts,624978 +jewelry,622350 +sitting,612015 +very long hair,601723 +underwear,596262 +closed mouth,589051 +nipples,575314 +school uniform,569565 +green eyes,568502 +blue hair,547897 +standing,532306 +purple eyes,520583 +collarbone,504733 +panties,495559 +monochrome,470985 +tail,470651 +jacket,469260 +translated,459108 +swimsuit,457313 +full body,446282 +closed eyes,442906 +hair ribbon,440003 +kantai collection,436085 +yellow eyes,433326 +weapon,420275 +ponytail,419615 +upper body,417829 +purple hair,415161 +white shirt,413176 +pink hair,412002 +ass,411747 +comic,405140 +braid,404122 +flower,400183 +ahoge,395736 +:d,376147 +short sleeves,375920 +hair bow,372662 +greyscale,371431 +hetero,369606 +white hair,361604 +male focus,359240 +heart,350239 +pantyhose,345097 +bikini,340940 +sidelocks,336241 +thighs,332714 +nude,330496 +red hair,327685 +cowboy shot,324472 +pleated skirt,322440 +sweat,322381 +hairband,320155 +multicolored hair,318977 +translation request,318605 +earrings,316173 +small breasts,315987 +grey hair,314794 +boots,313016 +lying,303118 +censored,300048 +outdoors,296370 +frills,295934 +parted lips,292170 +detached sleeves,289409 +one eye closed,287946 +food,286933 +japanese clothes,282381 +multiple boys,278355 +green hair,278290 +wings,277342 +open clothes,275169 +sky,271218 +necktie,271120 +horns,268888 +penis,266519 +shoes,265277 +fate (series),260162 +glasses,258562 +shorts,253401 +barefoot,252703 +serafuku,246424 +pussy,245806 +teeth,245489 +solo focus,238837 +sleeveless,238082 +day,238001 +alternate costume,236618 +choker,236347 +tongue,235358 +pointy ears,228958 +socks,227884 +black gloves,225896 +elbow gloves,218936 +hairclip,218010 +fang,215043 +midriff,211220 +striped,211065 +puffy sleeves,207646 +shiny,204926 +looking back,203075 +belt,202376 +sword,198728 +collared shirt,197795 +pants,195599 +official art,194421 +artist name,192428 +cloud,192323 +black thighhighs,191915 +indoors,186361 +tears,184656 +cat ears,184506 +fate/grand order,183978 +white gloves,181876 +virtual youtuber,179776 +3girls,178540 +hair flower,177530 +signature,177272 +dark skin,176102 +hand up,175705 +spread legs,173323 +silver hair,172873 +cum,172618 +2boys,170921 +hood,170424 +sex,169737 +idolmaster,168860 +miniskirt,168572 +tongue out,167571 +wide sleeves,167392 +on back,166746 +fingerless gloves,166594 +blunt bangs,166178 +bowtie,164885 +black skirt,164625 +armpits,163586 +pink eyes,163024 +english commentary,163005 +sailor collar,160457 +medium hair,158791 +kimono,158659 +pokemon,157511 +water,154351 +grey background,153495 +necklace,151304 +black legwear,151031 +off shoulder,147990 +chibi,147648 +bag,146939 +hair bun,146389 +clothes lift,146105 +cape,145528 +from behind,145487 +star (symbol),145380 +stomach,145363 +scarf,145354 +twitter username,144376 +bra,143316 +nail polish,142958 +orange hair,142662 +yuri,142648 +white dress,141629 +sweatdrop,141422 +holding weapon,141258 +black footwear,141200 +armor,140506 +rabbit ears,139273 +white panties,139230 +mole,136877 +hair over one eye,135697 +grin,135287 +uniform,134600 +:o,133841 +huge breasts,133500 +blurry,132946 +black eyes,132362 +character name,132310 +apron,132263 +looking at another,130747 +vest,128632 +black dress,126840 +arm up,125800 +mosaic censoring,125723 +high heels,125612 +vaginal,125586 +red bow,125049 +twin braids,125048 +flat chest,123020 +arms up,122759 +shiny hair,122714 +side ponytail,121984 +bracelet,121896 +collar,121712 +covered nipples,121585 +feet,118991 +dated,118815 +from side,118032 +aqua eyes,117472 +vocaloid,116231 +sweater,115033 +white thighhighs,114692 +two-tone hair,114591 +speech bubble,114527 +leotard,114043 +red ribbon,112196 +two side up,111533 +english text,111457 +dark-skinned female,111075 +open jacket,110983 +tree,110120 +sketch,109877 +cup,109805 +blue sky,109339 +puffy short sleeves,108813 +lips,108716 +zettai ryouiki,107347 +blue skirt,106323 +hololive,106179 +official alternate costume,104403 +groin,104033 +coat,103266 +fingernails,103161 +wet,102577 +genshin impact,102416 +cat tail,102123 +v-shaped eyebrows,101398 +bad twitter id,101357 +streaked hair,101072 +black jacket,101020 +neckerchief,100738 +head tilt,100424 +crop top,99832 +white legwear,99720 +see-through,99593 +orange eyes,98788 +gradient,98281 +hand on hip,98165 +azur lane,98104 +gun,97949 +shiny skin,97431 +animal ear fluff,97071 +idolmaster cinderella girls,96349 +wrist cuffs,96175 +pillow,96077 +book,95815 +sleeves past wrists,95421 +plaid,95011 +looking to the side,95005 +torn clothes,94967 +artist request,94937 +maid,94840 +legs,94507 +grey eyes,94164 +parted bangs,93999 +pokemon (game),93498 +kneehighs,93476 +sash,93173 +military,93012 +maid headdress,92606 +black pantyhose,92414 +cosplay,92051 +petals,91785 +black panties,91619 +hands up,90878 +bare arms,90755 +fur trim,90513 +pubic hair,90201 +gradient background,89409 +symbol-shaped pupils,89395 +fox ears,89386 +one-piece swimsuit,89360 +loli,89189 +short shorts,88997 +ascot,88550 +dutch angle,87871 +black shirt,87107 +clothing cutout,87061 +eyelashes,85352 +open shirt,85035 +no humans,84788 +bare legs,84736 +bar censor,84656 +dress shirt,84293 +sparkle,84241 +mole under eye,84024 +window,83889 +kneeling,83779 +lowres,83651 +pokemon (creature),83524 +4girls,82439 +single braid,82234 +bodysuit,82016 +sleeveless shirt,81849 +hug,81795 +v,81652 +no bra,81328 +strapless,81041 +bell,80835 +saliva,80784 +double bun,80626 +aqua hair,80439 +uncensored,80159 +black headwear,80049 +black ribbon,79781 +military uniform,79091 +bed,78857 +blood,78550 +completely nude,78500 +md5 mismatch,78359 +hoodie,77706 +hatsune miku,77659 +sideboob,77602 +scan,77541 +4koma,77129 +pussy juice,76721 +profile,76676 +black bow,76644 +covered navel,76607 +tattoo,76518 +skindentation,76414 +gradient hair,76251 +makeup,76191 +neck ribbon,75973 +leaning forward,75874 +thigh strap,75182 +mask,74898 +muscular,74818 +no panties,74617 +multiple views,74566 +capelet,74472 +witch hat,74456 +banned artist,74331 +arknights,73750 +anus,73515 +copyright name,73482 +:3,73360 +alternate hairstyle,72994 +fruit,72810 +underboob,72760 +detached collar,72705 +night,71711 +depth of field,71646 +sleeveless dress,71607 +floating hair,71535 +headband,71524 +buttons,71479 +commission,71189 +^ ^,71068 +cameltoe,70817 +blue dress,70407 +copyright request,70143 +cum in pussy,70131 +fox tail,70111 +toes,70067 +side-tie bikini bottom,69831 +bottomless,69405 +black bikini,69362 +shadow,69346 +blurry background,69181 +glowing,69161 +nose blush,69041 +feet out of frame,68989 +red skirt,68553 +rose,68202 +fake animal ears,67687 +swept bangs,67401 +bed sheet,67177 +hakurei reimu,67154 +holding hands,66981 +chain,66832 +headgear,66794 +facial hair,66568 +turtleneck,66393 +bird,66278 +6+girls,66274 +siblings,66220 +headphones,66160 +colored skin,66044 +ocean,66029 +arm support,64840 +heterochromia,64687 +low twintails,64568 +animal,64448 +halterneck,64444 +umbrella,64428 +frown,64110 +beret,63886 +leaf,63502 +thigh boots,63404 +pov,63396 +embarrassed,63012 +on bed,62508 +one side up,62483 +white headwear,62443 +back,62309 +from above,62263 +fangs,62161 +watermark,62108 +garter straps,62076 +ass visible through thighs,61978 +kirisame marisa,61859 +blue background,61519 +non-web source,61251 +highleg,61030 +scar,60550 +white bikini,60509 +on side,60439 +transparent background,60400 +plaid skirt,60393 +mahou shoujo madoka magica,60061 +upper teeth,59984 +wariza,59972 +blue bow,59909 +mouth hold,59852 +traditional media,59832 +beach,59766 +chair,59725 +parody,59713 +wavy hair,59694 +facial mark,59372 +bandages,59350 +looking away,59190 +female pubic hair,59087 +black choker,58999 +hair tubes,58971 +blush stickers,58896 +shirt lift,58495 +expressionless,58455 +drill hair,58376 +chinese clothes,58184 +brown footwear,57952 +grabbing,57712 +obi,57499 +arms behind back,57461 +eating,57289 +holding sword,57204 +thick thighs,57195 +no shoes,56933 +clothes pull,56785 +heart-shaped pupils,56777 +pantyshot,56545 +topless,56515 +thigh gap,56394 +soles,56234 +short dress,56103 +looking down,56025 +phone,55900 +symbol-only commentary,55769 +fire emblem,55684 +skirt lift,55609 +eyepatch,55472 +magical girl,55341 +stuffed toy,55100 +floral print,55065 +bound,55064 +black shorts,54852 +flying sweatdrops,54783 +wavy mouth,54739 +crossed arms,54738 +hair intakes,54519 +playboy bunny,54374 +girls und panzer,54361 +piercing,54341 +border,54335 +formal,54153 +moon,54017 +love live!,54003 +abs,53468 +leg up,53348 +black pants,53296 +half-closed eyes,53060 +sandals,53019 +from below,52997 +erection,52994 +cover,52966 +cleavage cutout,52916 +sunlight,52668 +table,52641 +single hair bun,52348 +happy,52216 +oral,52161 +red dress,52030 +cat,51593 +squatting,51398 +pink background,51164 +underwear only,51062 +scrunchie,51015 +sunglasses,50834 +chinese commentary,50625 +testicles,50503 +school swimsuit,50489 +halo,50174 +cum on body,50156 +bdsm,50025 +dark-skinned male,49985 +white footwear,49945 +trembling,49688 +mob cap,49616 +blazer,49474 +ring,49342 +wolf ears,49081 +sleeping,49004 +game cg,48639 +final fantasy,48401 +standing on one leg,48365 +backpack,48307 +light brown hair,48178 +eyes visible through hair,48041 +knee boots,47982 +bob cut,47943 +lingerie,47880 +breast grab,47836 +hat ribbon,47823 +thick eyebrows,47814 +partial commentary,47802 +katana,47622 +demon girl,47534 +stuffed animal,47470 +bat wings,47403 +cardigan,47357 +white skirt,47352 +girls' frontline,47253 +frilled dress,47245 +;d,47230 +korean commentary,47220 +crossed legs,47212 +white jacket,47149 +suspenders,47108 +3boys,46808 +helmet,46660 +remilia scarlet,46507 +hood down,46434 +cloudy sky,46289 +antenna hair,46277 +outstretched arms,46024 +cellphone,45981 +crying,45824 +tank top,45779 +cross,45683 +polka dot,45581 +bottle,45552 +grass,45395 +aged down,45353 +bug,45344 +fire,45168 +undressing,45146 +5girls,45141 +crossover,45133 +suit,45094 +crown,44967 +tiara,44951 +feathers,44917 +bent over,44874 +frilled skirt,44768 +breasts out,44629 +light smile,44594 +high ponytail,44590 +holding food,44458 +knife,44420 +pectorals,44318 +animated,44307 +looking up,44282 +couple,44275 +own hands together,44229 +straddling,44128 +denim,44013 +white bow,43897 +black hairband,43861 +x hair ornament,43849 +horse ears,43703 +eyebrows,43694 +> <,43669 +hair bobbles,43638 +wing collar,43595 +on stomach,43546 +blue shirt,43462 +plant,43377 +areolae,43051 +flandre scarlet,42909 +tan,42823 +lipstick,42805 +outstretched arm,42794 +short twintails,42636 +fellatio,42576 +letterboxed,42511 +bondage,42481 +izayoi sakuya,42221 +robot,42197 +blue ribbon,42082 +girl on top,41971 +white flower,41860 +umamusume,41812 +curtains,41691 +lifted by self,41638 +precure,41585 +animal print,41438 +pointing,41370 +muscular male,41194 +monster girl,41181 +cat girl,41138 +polearm,41085 +pink bow,40937 +revision,40905 +juliet sleeves,40829 +slit pupils,40815 +^^^,40758 +sex from behind,40715 +all fours,40712 +spiked hair,40593 +sisters,40474 +sharp teeth,40407 +crescent,40234 +staff,40001 +granblue fantasy,39964 +hand on own chest,39894 +blue sailor collar,39729 +panty pull,39645 +white socks,39620 +frilled sleeves,39612 +cherry blossoms,39487 +?,39399 +red necktie,39396 +blue jacket,39315 +blouse,39159 +clenched teeth,38917 +jojo no kimyou na bouken,38901 +black background,38739 +towel,38689 +goggles,38666 +cover page,38572 +shaded face,38546 +brooch,38462 +wind,38440 +bike shorts,38398 +head wings,38365 +green skirt,38314 +otoko no ko,38307 +casual,38305 +ground vehicle,38250 +loafers,38014 +gauntlets,37942 +elf,37925 +pink panties,37674 +no pants,37660 +t-shirt,37635 +black bra,37603 +red flower,37584 +demon horns,37512 +fate/stay night,37270 +fox girl,37220 +multiple tails,37183 +building,37145 +shoulder armor,37060 +striped panties,36949 +wristband,36873 +motion lines,36787 +kiss,36774 +black socks,36701 +single thighhigh,36666 +messy hair,36664 +between breasts,36603 +breast press,36557 +surprised,36500 +hat bow,36274 +horse girl,36064 +butterfly,36053 +sheath,36047 +kemono friends,35930 +child,35743 +group sex,35715 +skirt set,35596 +ribbon trim,35463 +character request,35404 +rabbit tail,35388 +third eye,35361 +drooling,35292 +sneakers,35275 +instrument,35253 +clenched hand,35216 +gem,35178 +red shirt,35118 +hakama,35114 +red footwear,35093 +sex toy,35043 +fishnets,34988 +box,34914 +pale skin,34900 +tassel,34815 +musical note,34787 +red bowtie,34624 +admiral (kancolle),34565 +rope,34553 +revealing clothes,34527 +wet clothes,34510 +genderswap,34387 +candy,34365 +blue archive,34329 +ear piercing,34294 +side-tie panties,34277 +facial,34202 +covering,34193 +foreshortening,34125 +steam,34069 +nature,34021 +dog ears,33972 +adapted costume,33970 +anal,33953 +pink dress,33885 +star (sky),33841 +demon tail,33782 +idolmaster (classic),33758 +portrait,33688 +peaked cap,33414 +holding gun,33259 +scenery,33196 +armband,33179 +duplicate,33144 +waist apron,33107 +convenient censoring,33082 +night sky,32890 +breath,32879 +ejaculation,32863 +lace trim,32786 +artoria pendragon (fate),32683 +veil,32627 +china dress,32585 +black nails,32538 +gundam,32500 +alice margatroid,32361 +arms behind head,32284 +tokin hat,32268 +couch,32265 +kochiya sanae,32161 +interlocked fingers,32054 +bandaid,31988 +patchouli knowledge,31880 +love live! school idol project,31866 +lace,31864 +microphone,31853 +white apron,31834 +bara,31814 +male pubic hair,31808 +front-tie top,31744 +nijisanji,31616 +hand fan,31602 +hakama skirt,31594 +mole under mouth,31592 +bow panties,31582 +strapless dress,31483 +bridal gauntlets,31482 +holding cup,31435 +yaoi,31359 +mecha,31357 +glowing eyes,31344 +black leotard,31321 +no headwear,31248 +anger vein,31201 +clothes writing,31010 +...,30973 +white ribbon,30947 +christmas,30909 +konpaku youmu,30875 +breasts apart,30806 +bikini top only,30801 +jingle bell,30800 +straight hair,30760 +baseball cap,30749 +cirno,30568 +string bikini,30499 +cum on breasts,30492 +hair over shoulder,30475 +hair flaps,30433 +yellow background,30363 +twin drills,30363 +skin tight,30287 +torn legwear,30238 +:<,30130 +feathered wings,30068 +semi-rimless eyewear,30065 +web address,30064 +hooded jacket,30061 +angry,29877 +facing viewer,29718 +check translation,29708 +light purple hair,29602 +motor vehicle,29565 +cloak,29555 +yakumo yukari,29449 +wolf tail,29391 +eyewear on head,29306 +red nails,29296 +claws,29221 +carrying,29183 +light particles,29145 +side braid,29144 +corset,29104 +micro bikini,29023 +red background,28919 +multiple penises,28902 +pelvic curtain,28877 +mary janes,28806 +knees up,28793 +red headwear,28775 +tentacles,28751 +index finger raised,28574 +bad link,28568 +danganronpa (series),28517 +rifle,28514 +purple dress,28510 +clothing aside,28494 +grey shirt,28474 +smartphone,28466 +bright pupils,28447 +tareme,28419 +multicolored clothes,28388 +:p,28336 +beard,28254 +vertical stripes,28250 +paizuri,28182 +red neckerchief,27971 +single horn,27939 +black necktie,27938 +extra ears,27907 +strap slip,27821 +licking,27773 +finger to mouth,27753 +short hair with long locks,27649 +striped thighhighs,27538 +close-up,27532 +puffy nipples,27512 +white border,27492 +red jacket,27412 +scar on face,27395 +french braid,27351 +androgynous,27341 +snow,27295 +tsurime,27266 +buckle,27227 +full moon,27179 +neck bell,27141 +pom pom (clothes),27021 +eye contact,27001 +forehead,26988 +two-tone background,26942 +upskirt,26901 +striped legwear,26881 +areola slip,26870 +komeiji koishi,26856 +furry,26814 +seiza,26683 +face,26664 +yellow bow,26605 +blue bikini,26512 +pink flower,26508 +genderswap (mtf),26485 +spoilers,26453 +lens flare,26401 +hololive english,26341 +1other,26331 +armlet,26089 +gift,26038 +pink nails,25973 +spikes,25954 +hand on own face,25906 +wading,25848 +between legs,25847 +pendant,25825 +side slit,25804 +shameimaru aya,25795 +handgun,25777 +dual persona,25729 +desk,25687 +camisole,25666 +abyssal ship,25640 +masturbation,25475 +brown gloves,25420 +skin fang,25364 +faceless,25353 +restrained,25331 +plate,25316 +horse tail,25316 +alcohol,25311 +green bow,25155 +after sex,25132 +curvy,25098 +handjob,25080 +spoken heart,25034 +broom,25027 +cross-laced footwear,24988 +ribbed sweater,24980 +o-ring,24978 +sleeves rolled up,24977 +k-on!,24924 +maid apron,24909 +heavy breathing,24889 +ball,24763 +hair scrunchie,24745 +low ponytail,24734 +drinking glass,24709 +eyeshadow,24691 +santa hat,24644 +grey skirt,24582 +cowgirl position,24564 +breast hold,24517 +highleg leotard,24386 +personification,24381 +fujiwara no mokou,24374 +headwear removed,24296 +red gloves,24272 +clenched hands,24262 +forest,24259 +dress lift,24243 +persona,24231 +tray,24174 +reisen udongein inaba,24152 +doujin cover,24103 +headset,24083 +machinery,24061 +cropped legs,24049 +partially submerged,24018 +high heel boots,23988 +floating,23983 +zipper,23962 +blue headwear,23914 +wide hips,23905 +smoke,23887 +tanlines,23866 +low-tied long hair,23754 +hair rings,23751 +legs up,23744 +jpeg artifacts,23734 +halloween,23682 +backlighting,23682 +pencil skirt,23665 +single glove,23625 +fish,23610 +hood up,23603 +sweater vest,23523 +large pectorals,23481 +pink skirt,23480 +reflection,23471 +clothed sex,23395 +flying,23358 +lyrical nanoha,23350 +hong meiling,23337 +half updo,23331 +neon genesis evangelion,23309 +sleeves past fingers,23302 +spot color,23297 +panties under pantyhose,23256 +arm behind back,23254 +freckles,23242 +starry sky,23223 +colored sclera,23187 +kemonomimi mode,23177 +white bra,23176 +nontraditional miko,23158 +asymmetrical legwear,23139 +strapless leotard,23111 +sailor dress,23108 +holding book,23081 +puffy long sleeves,23064 +tearing up,23045 +brown background,23006 +arm at side,23006 +akemi homura,22991 +happy birthday,22956 +long fingernails,22893 +blue nails,22871 +red bikini,22870 +!,22798 +garter belt,22643 +dual wielding,22618 +6+boys,22607 +rain,22602 +cuffs,22587 +red-framed eyewear,22586 +santa costume,22545 +fingering,22544 +walking,22523 +bubble,22498 +cropped jacket,22477 +komeiji satori,22476 +epaulettes,22442 +idolmaster shiny colors,22395 +frilled bikini,22378 +crossdressing,22372 +hands,22364 +innertube,22356 +ribbon-trimmed sleeves,22260 +dragon horns,22250 +teacup,22215 +suzumiya haruhi no yuuutsu,22207 +legs apart,22194 +naughty face,22170 +out of frame,22166 +clothed female nude male,22153 +contrapposto,22148 +pixel-perfect duplicate,22143 +fate/extra,22107 +thong,22074 +cake,21989 +kaname madoka,21959 +black wings,21916 +pink ribbon,21910 +demon wings,21896 +condom,21888 +arm behind head,21879 +oni horns,21850 +veins,21792 +white pantyhose,21766 +short ponytail,21739 +rape,21666 +bowl,21655 +world witches series,21632 +hat ornament,21622 +beads,21608 +denim shorts,21571 +saigyouji yuyuko,21570 +curly hair,21567 +purple background,21559 +green background,21559 +black-framed eyewear,21535 +pokemon swsh,21472 +one-hour drawing challenge,21439 +holding phone,21428 +gym uniform,21397 +knee up,21382 +cumdrip,21331 +spread pussy,21318 +fur collar,21299 +school bag,21279 +resolution mismatch,21273 +blue shorts,21260 +:q,21228 +doggystyle,21218 +lolita fashion,21152 +sun hat,21101 +pauldrons,21077 +black sailor collar,21066 +skeb commission,21062 +empty eyes,21017 +goggles on head,21006 +outside border,21005 +spoken ellipsis,21004 +4boys,20974 +blue flower,20947 +pink shirt,20922 +pocket,20913 +under-rim eyewear,20890 +toned,20886 +cum in mouth,20875 +shiny clothes,20826 +bulge,20802 +long legs,20790 +topless male,20775 +science fiction,20765 +alternate breast size,20763 +hand on another's head,20754 +crying with eyes open,20749 +purple skirt,20734 +third-party edit,20730 +skull,20719 +jitome,20710 +green dress,20660 +hand in own hair,20616 +meme,20570 +monster,20563 +outline,20557 +new year,20550 +sunset,20521 +toenails,20508 +futanari,20473 +buruma,20439 +thighband pantyhose,20412 +kaga (kancolle),20409 +light blush,20406 +shield,20367 +inubashiri momiji,20360 +spear,20344 +hands on hips,20299 +emphasis lines,20280 +aged up,20218 +v arms,20157 +frilled shirt collar,20143 +frilled apron,20142 +female masturbation,20075 +white sleeves,20053 +asymmetrical hair,20041 +animal hands,19971 +high-waist skirt,19914 +running,19896 +o o,19839 +breast squeeze,19831 +dog,19799 += =,19775 +track jacket,19752 +competition swimsuit,19744 +long skirt,19707 +tied hair,19687 +princess connect!,19639 +striped bikini,19632 +ice,19624 +angel wings,19602 +mole on breast,19583 +watch,19574 +minigirl,19533 +dog tail,19533 +teddy bear,19523 +legs together,19520 +black serafuku,19458 ++ +,19447 +impossible clothes,19440 +gold trim,19390 +head rest,19373 +ass grab,19331 +faceless male,19281 +blue footwear,19265 +white skin,19215 +white pupils,19207 +single earring,19183 +hat removed,19155 +blue theme,19146 +bespectacled,19137 +folded ponytail,19114 +ghost,19106 +tress ribbon,19093 +turret,19076 +toaru majutsu no index,19075 +purple bow,19074 +paw pose,19074 +pouch,19029 +red scarf,18995 +plump,18956 +popsicle,18942 +shawl,18917 +blue one-piece swimsuit,18912 +rabbit,18900 +poke ball,18895 +source request,18880 +sarashi,18853 +brown skirt,18801 +cigarette,18756 +knees together feet apart,18710 +hand in pocket,18695 +white sailor collar,18661 +yukata,18599 +age difference,18593 +hand between legs,18511 +hair down,18507 +brown pantyhose,18501 +sheathed,18484 +bow (weapon),18463 +dragon girl,18445 +black belt,18417 +open coat,18401 +drinking straw,18388 +twins,18381 +league of legends,18379 +high collar,18261 +upside-down,18217 +oni,18212 +page number,18162 +serious,18132 +jeans,18107 +black coat,18090 +smirk,18072 +black vest,18049 +object insertion,18042 +brown jacket,18020 +light rays,18002 +robe,17999 +thought bubble,17995 +military hat,17990 +injury,17972 +yellow shirt,17962 +yakumo ran,17920 +pose,17917 +;),17913 +mini hat,17901 +lollipop,17851 +blue panties,17824 +fate/apocrypha,17818 +yellow ribbon,17801 +dakimakura (medium),17770 +hoop earrings,17740 +striped shirt,17705 +braided ponytail,17702 +street fighter,17701 +tabard,17689 +sideburns,17658 +patreon username,17637 +boku no hero academia,17635 +bouncing breasts,17628 +circlet,17617 +butt crack,17600 +pajamas,17588 +eyebrows hidden by hair,17578 +missionary,17567 +crystal,17505 +lantern,17491 +@ @,17466 +red rose,17462 +striped bow,17429 +wide-eyed,17420 +ofuda,17387 +| |,17362 +contemporary,17338 +water drop,17323 +top hat,17261 +tentacle hair,17235 +kagamine rin,17226 +:t,17218 +breastplate,17216 +chocolate,17188 +threesome,17136 +jumping,17110 +blurry foreground,17110 +white shorts,17099 +rock,17092 +hairpin,17056 +partially fingerless gloves,17000 +hug from behind,17000 +sports bra,16995 +handbag,16957 +white hairband,16956 +blue necktie,16954 +outstretched hand,16939 +oekaki,16927 +moriya suwako,16923 +height difference,16908 +konpaku youmu (ghost),16896 +mirror,16875 +chopsticks,16874 +object hug,16851 +belt buckle,16830 +wooden floor,16787 +bouquet,16782 +pink lips,16773 +black bowtie,16771 +blood on face,16763 +colored eyelashes,16747 +full-face blush,16743 +rumia,16720 +stairs,16678 +paper,16670 +mug,16655 +miki sayaka,16633 +valentine,16630 +shoulder bag,16609 +sportswear,16601 +white kimono,16572 +snowing,16554 +mountain,16498 +kazami yuuka,16466 +covering breasts,16463 +animal hood,16458 +star hair ornament,16448 +underwater,16437 +kaenbyou rin,16412 +holding umbrella,16407 +wedding dress,16404 +miko,16365 +city,16310 +mature male,16303 +cropped torso,16278 +green jacket,16221 +glint,16214 +animated gif,16197 +vibrator,16181 +the legend of zelda,16167 +shimakaze (kancolle),16163 +snake,16157 +adjusting clothes,16152 +meme attire,16138 +reiuji utsuho,16106 +doll,16102 +enmaided,16090 +open book,16077 +video,16064 +panties aside,16026 +bloomers,16026 +backless outfit,15986 +fighting stance,15978 +hair bell,15975 +cum on hair,15956 +sign,15942 +pout,15942 +borrowed character,15903 +saber,15901 +bandaged arm,15871 +waving,15862 +hibiki (kancolle),15846 +multicolored eyes,15831 +chen,15801 +motion blur,15791 +ice cream,15787 +skirt hold,15762 +forehead mark,15756 +crossed bangs,15740 +mature female,15739 +colored inner hair,15721 +realistic,15720 +aircraft,15710 +strawberry,15701 +mouse ears,15693 +one eye covered,15692 +raised eyebrows,15690 +headpiece,15649 +orange bow,15631 +spoon,15582 +nose,15576 +love live! sunshine!!,15546 +text focus,15538 +partially translated,15521 +panties around one leg,15517 +card,15500 +!?,15469 +turtleneck sweater,15464 +pink bra,15459 +round teeth,15448 +brother and sister,15426 +fire emblem heroes,15359 +hands in pockets,15357 +nurse cap,15348 +kamishirasawa keine,15332 +fate/zero,15311 +remodel (kantai collection),15309 +front-tie bikini top,15302 +visor cap,15259 +yellow flower,15220 +fence,15207 +strike witches,15201 +bandana,15183 +clitoris,15179 +center opening,15176 +eighth note,15150 +hair tie,15147 +touken ranbu,15137 +white pants,15101 +leash,15080 +mustache,15063 +grey jacket,15004 +anklet,14998 +white one-piece swimsuit,14995 +short eyebrows,14960 +holding staff,14952 +white leotard,14951 +hime cut,14950 +green headwear,14949 +palm tree,14943 +geta,14943 +sand,14930 +card (medium),14925 +dragon,14907 +tatara kogasa,14899 +fire emblem: three houses,14893 +covering mouth,14842 +kawashiro nitori,14839 +drink,14826 +sailor hat,14821 +stubble,14817 +anchor symbol,14817 +pokemon sm,14808 +blue skin,14806 +idolmaster million live!,14804 +dildo,14802 +mash kyrielight,14763 +green shirt,14753 +top-down bottom-up,14749 +antennae,14747 +door,14740 +can,14739 +arrow (projectile),14735 +fantasy,14723 +orange background,14699 +overflow,14692 +sitting on person,14680 +on floor,14671 +scar across eye,14642 +fork,14624 +alternate color,14576 +hip focus,14575 +round eyewear,14572 +hinanawi tenshi,14557 +xenoblade chronicles (series),14553 +dragon tail,14520 +mouth mask,14503 +blindfold,14496 +frog hair ornament,14489 +asymmetrical bangs,14481 +mario (series),14473 +adjusting hair,14462 +indie virtual youtuber,14440 +spread arms,14434 +purple flower,14419 +honkai (series),14415 +yu-gi-oh!,14393 +camera,14391 +arrow (symbol),14380 +furrowed brow,14359 +interracial,14357 +licking lips,14335 +fake tail,14302 +sakura kyouko,14289 +tall image,14288 +wand,14282 +arched back,14252 +zoom layer,14247 +off-shoulder dress,14236 +sharp fingernails,14227 +folding fan,14212 +black one-piece swimsuit,14210 +straw hat,14209 +arms at sides,14208 +green ribbon,14203 +double v,14200 +long dress,14191 +tomoe mami,14189 +music,14184 +apple,14173 +sword art online,14151 +overwatch,14148 +jacket on shoulders,14147 +emblem,14133 +head out of frame,14110 +mixed-language commentary,14109 +sun,14095 +lowleg,14092 +blanket,14056 +hanging breasts,14053 +sunflower,14052 +anime screencap,14040 +final fantasy xiv,14039 +arm warmers,14013 +rabbit girl,14010 +honkai impact 3rd,14001 +white wings,13996 +labcoat,13993 +gag,13985 +groping,13984 +railing,13970 +yokozuwari,13969 +paid reward available,13961 +cannon,13956 +dragon ball,13943 +shigure (kancolle),13917 +alternate hair length,13908 +pink footwear,13905 +paid reward,13905 +veiny penis,13884 +heart hair ornament,13882 +wince,13874 +black bodysuit,13869 +bandeau,13856 +gothic lolita,13847 +final fantasy vii,13847 +horizon,13838 +overwatch 1,13837 +brown headwear,13823 +purple ribbon,13799 +holding clothes,13746 +houraisan kaguya,13743 +name tag,13741 +furry female,13727 +leg lift,13723 +bun cover,13710 +muscular female,13695 +shota,13694 +goatee,13686 +unbuttoned,13681 +pink bikini,13673 +highleg panties,13662 +pasties,13658 +dog girl,13657 +chinese text,13631 +branch,13622 +angel,13613 +floral background,13605 +re:zero kara hajimeru isekai seikatsu,13561 +beanie,13556 +blue kimono,13553 +hand on own cheek,13546 +partially visible vulva,13544 +clock,13537 +jack-o'-lantern,13531 +hitodama,13529 +black sclera,13529 +smoking,13527 +koakuma,13523 +red hairband,13493 +imageboard desourced,13452 +android,13447 +hand on headwear,13434 +off-shoulder shirt,13409 +lactation,13407 +swimsuit under clothes,13405 +underbust,13389 +protected link,13387 +brown legwear,13377 +kill la kill,13352 +hat flower,13343 +eyeball,13325 +untied,13307 +microskirt,13292 +back-to-back,13287 +red cape,13281 +petticoat,13277 +tales of (series),13272 +purple shirt,13265 +dot nose,13246 +tabi,13215 +food on face,13187 +ahegao,13174 +basket,13173 +nipple slip,13170 +vambraces,13154 +ass focus,13135 +bookshelf,13130 +gakuran,13118 +logo,13083 +criss-cross halter,13078 +splatoon (series),13074 +black sleeves,13067 +large areolae,13059 +drawstring,13038 +kongou (kancolle),13037 +wolf girl,13030 +pointless censoring,13029 +argyle,13015 +armored dress,13013 +fairy wings,13011 +collared dress,13005 +character doll,12997 +suspender skirt,12964 +cabbie hat,12961 +grabbing from behind,12952 +tail ornament,12950 +lace-up boots,12947 +purple nails,12923 +saliva trail,12884 +silhouette,12870 +bandaid on face,12849 +pocky,12847 +pokemon (anime),12840 +unzipped,12820 +blue pants,12814 +size difference,12809 +hand on another's shoulder,12806 +large penis,12800 +mystia lorelei,12778 +monogatari (series),12763 +white scarf,12762 +headdress,12755 +white coat,12747 +covered mouth,12744 +kneepits,12743 +silent comic,12740 +taut clothes,12738 +gangbang,12731 +blue gloves,12720 +crotch seam,12697 +onsen,12668 +gift box,12665 +gohei,12653 +red vest,12631 +painttool sai (medium),12622 +yellow ascot,12616 +butterfly hair ornament,12583 +anchor,12562 +leaning back,12554 +drinking,12535 +everyone,12507 +gochuumon wa usagi desu ka?,12494 +blue bowtie,12493 +inazuma (kancolle),12492 +retro artstyle,12485 +ibuki suika,12472 +inaba tewi,12458 +on head,12457 +cow print,12451 +military vehicle,12428 +cow ears,12426 +no nose,12421 +winter clothes,12407 +holding flower,12405 +naruto (series),12398 +sundress,12392 +shrug (clothing),12383 +witch,12379 +guitar,12353 +naval uniform,12351 +highleg swimsuit,12351 +center frills,12342 +bridal garter,12325 +pen,12307 +2koma,12304 +pool,12267 +electricity,12266 +arm grab,12263 +wind lift,12255 +tiles,12230 +heart censor,12220 +bishoujo senshi sailor moon,12190 +toenail polish,12173 +guilty gear,12172 +black cape,12171 +frog,12165 +akagi (kancolle),12165 +glass,12163 +photo (medium),12162 +souryuu asuka langley,12157 +spaghetti strap,12151 +checkered clothes,12150 +hijiri byakuren,12148 +red lips,12145 +adjusting eyewear,12084 +brown thighhighs,12070 +flag,12028 +ganyu (genshin impact),12009 +mittens,11985 +headpat,11952 +code geass,11931 +muneate,11928 +salute,11910 +wedding ring,11908 +bound wrists,11907 +reading,11899 +bangle,11895 +selfie,11825 +chestnut mouth,11792 +ankle boots,11787 +scythe,11786 +facing away,11780 +korean text,11779 +double-breasted,11770 +blue thighhighs,11769 +flipped hair,11763 +purple bikini,11757 +waitress,11747 +classroom,11726 +female child,11715 +nazrin,11693 +absurdly long hair,11693 +xenoblade chronicles 2,11692 +poke ball (basic),11680 +hair stick,11666 +chromatic aberration,11658 +talking,11638 +strap gap,11635 +airplane,11595 +vision (genshin impact),11591 +uneven legwear,11564 +cheerleader,11562 +magic,11546 +car,11540 +beachball,11528 +smug,11509 +pinafore dress,11500 +food-themed hair ornament,11500 +toaru kagaku no railgun,11497 +wristwatch,11481 +one piece,11470 +flat cap,11468 +mahou shoujo lyrical nanoha,11465 +fur-trimmed jacket,11444 +lucky star,11438 +cleft of venus,11424 +nun,11410 +inverted nipples,11385 +pulled by self,11373 +kita high school uniform,11362 +notice lines,11349 +earmuffs,11342 +ugoira,11336 +kagamine len,11303 +light,11299 +shirt tucked in,11294 +hand to own mouth,11292 +gagged,11279 +pom pom (cheerleading),11264 +bikini under clothes,11264 +nurse,11250 +houjuu nue,11233 +potted plant,11225 +horn ornament,11221 +bucket,11209 +bukkake,11201 +rigging,11199 +road,11191 +tenryuu (kancolle),11181 +hands on own chest,11181 +stud earrings,11177 +battle,11160 +spoken question mark,11149 +danganronpa 2: goodbye despair,11132 +armored boots,11128 +bare back,11114 +nipple tweak,11108 +animal costume,11106 +tatami,11056 +eyeliner,11025 +ninja,11023 +mecha musume,11011 +sideways glance,11007 +triangular headpiece,11003 +foot focus,10995 +hakama short skirt,10991 +tamamo (fate),10988 +sexually suggestive,10985 +light blue hair,10982 +cow horns,10974 +style parody,10965 +kono subarashii sekai ni shukufuku wo!,10959 +assault rifle,10949 +neptune (series),10944 +lamp,10942 +holding poke ball,10934 +snake hair ornament,10922 +fire emblem fates,10921 +femdom,10893 +bench,10877 +blazblue,10852 +>:),10844 +yasaka kanako,10840 +candle,10837 +yuudachi (kancolle),10824 +yagokoro eirin,10824 +controller,10824 +bush,10823 +mahou shoujo lyrical nanoha strikers,10820 +:>,10817 +overalls,10801 +happy sex,10798 +scales,10787 +hair over eyes,10778 +tube top,10773 +naked shirt,10772 +back bow,10769 +bow bra,10764 +cityscape,10762 +the king of fighters,10756 +mask on head,10740 +bags under eyes,10738 +bad anatomy,10733 +riding,10725 +biting,10724 +ragnarok online,10698 +dragon quest,10687 +megurine luka,10682 +crown braid,10672 +brown belt,10669 +bridal veil,10666 +sailor shirt,10661 +pokemon bw,10652 +white sweater,10645 +mini crown,10624 +brown dress,10601 +open fly,10597 +fairy,10593 +stuffed bunny,10590 +pink headwear,10568 +ikazuchi (kancolle),10558 +hair behind ear,10549 +sarong,10542 +joints,10538 ++++,10519 +japanese armor,10512 +source smaller,10491 +pov hands,10489 +ice wings,10488 +covering crotch,10487 +shade,10482 +purple panties,10472 +toeless legwear,10463 +orgasm,10460 +triangle mouth,10422 +dark,10404 +cable,10398 +red kimono,10395 +wet shirt,10393 +peach,10378 +jeanne d'arc alter (fate),10372 +creature,10372 +hugging own legs,10357 +reaching out,10351 +key,10347 +bird wings,10342 +fishnet pantyhose,10335 +inazuma eleven (series),10331 +red hakama,10318 +pleated dress,10312 +bad tumblr id,10310 +space,10301 +mizuhashi parsee,10296 +halloween costume,10290 +blue leotard,10290 +doujinshi,10264 +air bubble,10259 +steaming body,10257 +randoseru,10253 +5boys,10239 +print legwear,10238 +bathing,10224 +lolita hairband,10207 +claw pose,10200 +fate/extra ccc,10177 +persona 4,10175 +oil-paper umbrella,10174 +pilot suit,10167 +holster,10166 +food in mouth,10131 +indian style,10130 +heart of string,10118 +school desk,10088 +alternate eye color,10088 +breast pocket,10084 +own hands clasped,10067 +painting (medium),10057 +single side bun,10044 +paw print,10043 +print kimono,10039 +red panties,10034 +dagger,10032 +against wall,10028 +hands on own face,10021 +laughing,10019 +hibiscus,10016 +touhou (pc-98),10010 +red choker,10009 +yellow bikini,9999 +abigail williams (fate),9984 +hair up,9976 +wet hair,9947 +fubuki (kancolle),9944 +confetti,9935 +heart hands,9933 +petite,9929 +one breast out,9929 +akiyama mio,9929 +star-shaped pupils,9927 +lace-trimmed legwear,9927 +blue rose,9912 +3d,9889 +polka dot background,9885 +akatsuki (kancolle),9884 +alternate hair color,9882 +cross-laced clothes,9880 +snowflakes,9878 +plugsuit,9877 +carrot,9863 +black collar,9861 +string,9855 +frilled shirt,9848 +bath,9829 +paw gloves,9820 +drunk,9815 +zuikaku (kancolle),9782 +white outline,9778 +autumn leaves,9767 +tate eboshi,9766 +marker (medium),9747 +incest,9747 +bracer,9732 +whisker markings,9730 +holding bag,9724 +bodystocking,9724 +scabbard,9704 +pumpkin,9698 +crescent hair ornament,9693 +stretching,9688 +anal object insertion,9669 +sack,9665 +tengen toppa gurren lagann,9659 +parasol,9653 +cum in ass,9653 +raccoon ears,9643 +office lady,9635 +bat (animal),9635 +source larger,9630 +star print,9619 +low wings,9610 +ribbon choker,9607 +cone hair bun,9604 +red legwear,9596 +fujimaru ritsuka (female),9592 +umineko no naku koro ni,9585 +tasuki,9574 +asymmetrical wings,9574 +hand on another's face,9560 +facepaint,9552 +wine glass,9535 +voiceroid,9527 +wrist scrunchie,9514 +computer,9510 +torii,9508 +feather hair ornament,9508 +holding bottle,9505 +nier (series),9502 +sakuragaoka high school uniform,9496 +teapot,9492 +holding fan,9485 +fake horns,9482 +zipper pull tab,9480 +navel cutout,9472 +higurashi no naku koro ni,9457 +antlers,9454 +check commentary,9453 +watercraft,9445 +axe,9444 +self upload,9443 +habit,9399 +shingeki no kyojin,9380 +purple gloves,9378 +tiger ears,9366 +arm tattoo,9353 +magatama,9348 +string panties,9334 +danganronpa v3: killing harmony,9333 +haori,9324 +rozen maiden,9310 +fire emblem awakening,9300 +belly,9296 +black sweater,9293 +cum on clothes,9289 +mismatched legwear,9274 +red thighhighs,9271 +lace-trimmed panties,9271 +pixel art,9259 +slippers,9256 +black border,9256 +brown shirt,9248 +nipple piercing,9247 +rebuild of evangelion,9244 +happy new year,9238 +transparent,9225 +chinese zodiac,9209 +shibari,9202 +fujimaru ritsuka (male),9199 +holding tray,9195 +furry male,9194 +tight,9176 +wagashi,9173 +blood on clothes,9163 +rwby,9158 +artist logo,9153 +purple jacket,9149 +nagato (kancolle),9145 +toyosatomimi no miko,9141 +vampire,9130 +one-piece tan,9119 +grey dress,9113 +pink kimono,9104 +derivative work,9092 +idolmaster cinderella girls starlight stage,9091 +morichika rinnosuke,9076 +hip vent,9076 +haruna (kancolle),9072 +hamakaze (kancolle),9065 +garrison cap,9055 +bikini skirt,9046 +tied shirt,9041 +pixiv request,9038 +fang out,9038 +precum,9029 +yellow neckerchief,9019 +bald,9000 +crescent hat ornament,8996 +highleg bikini,8992 +image sample,8988 +mouse tail,8974 +tachi-e,8970 +thighlet,8969 +hand on own head,8968 +nosebleed,8967 +knee pads,8964 +magic circle,8939 +curled horns,8929 +fate testarossa,8922 +giant,8920 +macross,8916 +suzumiya haruhi,8915 +open cardigan,8911 +grabbing own breast,8911 +evil smile,8908 +arm strap,8903 +black kimono,8888 +tiger print,8874 +purple headwear,8872 +red shorts,8866 +raglan sleeves,8860 +fishnet legwear,8853 +leg ribbon,8848 +condom wrapper,8824 +asymmetrical clothes,8812 +pantyhose pull,8809 +sample watermark,8805 +layered dress,8805 +draph,8805 +very short hair,8796 +holding knife,8795 +hoshiguma yuugi,8794 +princess carry,8789 +hand on own chin,8788 +striped background,8786 +midriff peek,8770 +hair censor,8765 +legwear under shorts,8759 +deep skin,8751 +short kimono,8748 +after vaginal,8748 +aura,8740 +dancing,8727 +shoulder blades,8725 +graphite (medium),8721 +green bikini,8717 +blue bra,8717 +nero claudius (fate),8715 +looking afar,8697 +kicking,8675 +scared,8670 +smile precure!,8665 +presenting,8661 +half gloves,8648 +link,8642 +layered sleeves,8629 +pokemon dppt,8620 +head wreath,8619 +layered skirt,8617 +pink jacket,8613 +pikachu,8605 +wall,8602 +manly,8592 +tea,8580 +male child,8570 +torpedo,8569 +purple thighhighs,8567 +balloon,8565 +kashima (kancolle),8564 +tail raised,8551 +over shoulder,8547 +yellow jacket,8546 +crop top overhang,8526 +nier automata,8525 +nakano azusa,8523 +one knee,8508 +o-ring top,8508 +male underwear,8504 +open kimono,8500 +mechanical arms,8499 +pigeon-toed,8495 +mother and daughter,8494 +w,8476 +used condom,8475 +disposable cup,8472 +hat feather,8471 +paintbrush,8468 +yellow dress,8456 +tohsaka rin,8442 +kagiyama hina,8437 +open hand,8435 +mermaid,8435 +shanghai doll,8432 +nagato yuki,8431 +shoulder cutout,8429 +green skin,8429 +backless dress,8422 +o-ring bikini,8421 +kimetsu no yaiba,8420 +leather,8407 +gap (touhou),8404 +reference sheet,8392 +blue vest,8384 +greaves,8379 +lace-trimmed bra,8349 +public indecency,8342 +in container,8321 +animal on head,8319 +shimenawa,8298 +red ascot,8289 +company name,8270 +sound effects,8254 +ooarai school uniform,8254 +naked towel,8254 +bare pectorals,8252 +futa with female,8251 +naruto,8242 +photo (object),8239 +yellow skirt,8226 +lineart,8226 +sunbeam,8222 +colorized,8207 +squiggle,8206 +glowing eye,8203 +field,8201 +limited palette,8197 +bandaged leg,8193 +symmetrical docking,8192 +out-of-frame censoring,8189 +watercolor (medium),8179 +maple leaf,8176 +star earrings,8173 +nishizumi miho,8173 +gawr gura,8170 +bamboo,8162 +mmf threesome,8159 +holding microphone,8159 +wriggle nightbug,8158 +rem (re:zero),8152 +tokyo afterschool summoners,8148 +blue neckerchief,8136 +architecture,8134 +multiple persona,8128 +leg grab,8127 +holding polearm,8120 +gameplay mechanics,8111 +imminent kiss,8105 +houshou marine,8079 +mononobe no futo,8075 +daiyousei,8071 +bedroom,8071 +tsukihime,8061 +breast sucking,8060 +tracen school uniform,8051 +hirasawa yui,8042 +polka dot bow,8039 +huge ass,8038 +thumbs up,8030 +ryuujou (kancolle),8030 +red leotard,8017 +clothes around waist,8014 +sleeveless turtleneck,7998 +on couch,7980 +raiden shogun,7970 +arm cannon,7962 +halftone,7942 +grey pants,7940 +naked apron,7937 +bound arms,7922 +kyubey,7919 +senki zesshou symphogear,7910 +imminent penetration,7910 +body fur,7899 +peeing,7892 +shiki eiki,7886 +animification,7884 +pee,7862 +singing,7860 +green nails,7859 +shoulder tattoo,7855 +explosion,7853 +checkered floor,7849 +shackles,7844 +onozuka komachi,7843 +undercut,7821 +double penetration,7808 +pencil,7800 +under covers,7798 +tile floor,7794 +hair over breasts,7790 +bra lift,7783 +suzuya (kancolle),7770 +slingshot swimsuit,7770 +brown pants,7769 +idol,7764 +ruins,7761 +fate/kaleid liner prisma illya,7760 +summer,7758 +cross necklace,7749 +bang dream!,7748 +falling,7743 +cowbell,7737 +strap,7731 +striped dress,7721 +plaid vest,7720 +raccoon tail,7710 +iron cross,7698 +print shirt,7693 +vines,7692 +head fins,7676 +erune,7674 +blue hairband,7674 +leggings,7665 +mushroom,7654 +purple legwear,7651 +tank,7649 +^o^,7644 +scathach (fate),7643 +dark persona,7640 +artist self-insert,7639 +skirt pull,7627 +house,7626 +avatar (ff14),7625 +white cape,7622 +pubic tattoo,7617 +stitches,7615 +vampire (game),7610 +mega man (series),7606 +purple footwear,7600 +sleeve cuffs,7594 +black cat,7585 +name connection,7576 +fox mask,7565 +super smash bros.,7561 +eyewear removed,7558 +annoyed,7556 +spoken exclamation mark,7553 +misaka mikoto,7544 +ranguage,7543 +spiked bracelet,7541 +genderswap (ftm),7538 +second-party source,7528 +holding hair,7528 +vaginal object insertion,7521 +face-to-face,7513 +long sideburns,7505 +black scarf,7499 +black hoodie,7498 +chainsaw man,7489 +3koma,7482 +usami renko,7481 +platform footwear,7478 +huge weapon,7470 +television,7469 +patreon reward,7465 +arm under breasts,7464 +yukkuri shiteitte ne,7447 +one-eyed,7443 +bathroom,7442 +sad,7437 +bikini armor,7437 +hachimaki,7436 +tilted headwear,7433 +trigger discipline,7431 +takamachi nanoha,7431 +bikini pull,7431 +shoulder pads,7429 +asymmetrical docking,7423 +beige background,7421 +sakazuki,7416 +doughnut,7407 +toramaru shou,7387 +babydoll,7383 +ringed eyes,7380 +french kiss,7370 +head scarf,7362 +pants pull,7359 +sandwiched,7343 +inactive account,7337 +illyasviel von einzbern,7332 +cutoffs,7319 +duel monster,7315 +hand on own knee,7303 +yorha no. 2 type b,7294 +invisible chair,7290 +pokemon bw2,7277 +headphones around neck,7276 +tifa lockhart,7272 +0 0,7271 +saki,7267 +fundoshi,7264 +hand on own thigh,7256 +scar on cheek,7248 +narrow waist,7243 +inkling,7238 +tiptoes,7233 +camouflage,7233 +black neckerchief,7231 +houshou (kancolle),7222 +old,7214 +white capelet,7210 +sweater dress,7207 +persona 5,7205 +bread,7205 +imaizumi kagerou,7203 +id card,7199 +monster hunter (series),7191 +white rose,7179 +necktie between breasts,7170 +cunnilingus,7168 +ayanami rei,7163 +maribel hearn,7153 +m legs,7153 +jeanne d'arc (fate),7153 +holding animal,7148 +starry background,7147 +orange skirt,7144 +shorts under skirt,7140 +tinted eyewear,7139 +joseph joestar,7127 +dragon ball z,7126 +crescent moon,7119 +go-toubun no hanayome,7112 +partially unbuttoned,7108 +kamen rider,7108 +striped necktie,7102 +futon,7095 +earphones,7095 +x-ray,7092 +white eyes,7092 +two-tone dress,7092 +splashing,7092 +tiger & bunny,7083 +checkered background,7079 +white choker,7075 +dripping,7073 +star hat ornament,7069 +grey footwear,7069 +ore no imouto ga konna ni kawaii wake ga nai,7054 +wide shot,7050 +big hair,7046 +sliding doors,7042 +wardrobe malfunction,7035 +covered eyes,7021 +casual one-piece swimsuit,7020 +voice actor connection,7019 +anchor hair ornament,7019 +bobby socks,7015 +foot out of frame,7014 +leaf on head,7012 +arm garter,7012 +holding another's wrist,7011 +cow girl,7006 +two tails,6992 +tainaka ritsu,6990 +smoking pipe,6990 +;o,6988 +bad source,6985 +animal focus,6984 +yuudachi kai ni (kancolle),6983 +split,6983 +grey skin,6981 +danganronpa: trigger happy havoc,6981 +body writing,6981 +brick wall,6978 +broom riding,6974 +matoi ryuuko,6950 +untied bikini,6948 +constricted pupils,6948 +shigure kai ni (kancolle),6940 +no pupils,6939 +handcuffs,6937 +on ground,6925 +navel piercing,6924 +aqua nails,6918 +fur-trimmed sleeves,6909 +leaf hair ornament,6903 +soaking feet,6894 +purple kimono,6892 +frilled panties,6888 +breast rest,6884 +winter,6880 +facial tattoo,6880 +foreskin,6879 +frilled bow,6873 +cross-section,6872 +bonnet,6871 +yin yang,6868 +disembodied limb,6862 +holding gift,6861 +lumine (genshin impact),6856 +holding fruit,6856 +object on head,6855 +pink theme,6848 +arm ribbon,6848 +pink rose,6835 +cookie,6834 +shoes removed,6821 +tiger tail,6819 +orange shirt,6810 +handheld game console,6807 +white fur,6803 +blue neckwear,6799 +spiked collar,6798 +rolling eyes,6782 +standing sex,6781 +persona 3,6766 +serval (kemono friends),6760 +ffm threesome,6752 +murakumo (kancolle),6734 +nishikino maki,6731 +forehead jewel,6728 +watermelon,6723 +1990s (style),6719 +producer (idolmaster),6717 +arm guards,6716 +single shoe,6713 +fireworks,6710 +jeanne d'arc alter (avenger) (fate),6693 +sonoda umi,6690 +purple theme,6685 +multiple others,6682 +folded,6681 +bridge,6679 +rimless eyewear,6678 +sheet grab,6664 +partially colored,6664 +rice,6650 +sailor senshi uniform,6649 +blue scarf,6649 +chalkboard,6648 +yellow necktie,6647 +syringe,6641 +afterimage,6632 +gyaru,6626 +badge,6624 +northern ocean princess,6619 +engrish text,6616 +interspecies,6613 +heart brooch,6611 +green panties,6606 +armpit crease,6605 +bakemonogatari,6604 +sitting on lap,6603 +neck ring,6594 +symbol in eye,6592 +amatsukaze (kancolle),6589 +himekaidou hatate,6584 +asymmetrical gloves,6580 +high school dxd,6576 +anniversary,6576 +print bikini,6573 +ushio (kancolle),6562 +bleach,6559 +water bottle,6558 +toned male,6555 +purple skin,6554 +!!,6553 +shibuya rin,6546 +implied sex,6546 +stool,6541 +monitor,6535 +fat,6532 +east asian architecture,6522 +short over long sleeves,6514 +content rating,6504 +shoe soles,6496 +sake,6496 +ripples,6492 +submachine gun,6491 +inazuma eleven go,6484 +oversized clothes,6482 +lion ears,6475 +toujou nozomi,6471 +beer,6469 +green vest,6456 +:/,6453 +red theme,6452 +rainbow,6451 +hammer,6447 +hooded cloak,6446 +brothers,6442 +atago (kancolle),6440 +bishounen,6436 +heart earrings,6435 +egg,6429 +skirt removed,6428 +red horns,6428 +dappled sunlight,6426 +scroll,6425 +okita souji (fate),6425 +red skin,6424 +chibi inset,6422 +torn shirt,6415 +giantess,6411 +d:,6405 +karakasa obake,6402 +pointing at viewer,6392 +official style,6388 +fucked silly,6381 +yukikaze (kancolle),6380 +jaggy lines,6374 +yuru yuri,6360 +kaku seiga,6356 +red pants,6354 +pixiv fantasia,6349 +sniper rifle,6348 +mini top hat,6348 +lowleg panties,6343 +tying hair,6339 +feeding,6338 +shoukaku (kancolle),6337 +brown sweater,6320 +multicolored background,6315 +fringe trim,6315 +rabbit hair ornament,6310 +hu tao (genshin impact),6308 +skin-covered horns,6307 +planet,6303 +frilled bra,6303 +murasa minamitsu,6295 +pink thighhighs,6293 +yazawa nico,6287 +hard translated,6282 +sepia,6277 +print panties,6263 +spy x family,6262 +princess zelda,6257 +licking penis,6257 +erection under clothes,6256 +error,6254 +chess piece,6253 +bathtub,6252 +blue choker,6250 +white bodysuit,6241 +goat horns,6237 +downblouse,6237 +fur-trimmed coat,6218 +ayase eli,6214 +cherry,6211 +motorcycle,6209 +brown coat,6209 +third-party source,6208 +kafuu chino,6202 +bikini bottom only,6201 +undershirt,6192 +off-shoulder sweater,6190 +micro shorts,6189 +torogao,6187 +grey shorts,6187 +jiangshi,6186 +to heart 2,6182 +fur-trimmed gloves,6173 +inkling girl,6170 +3:,6168 +straight-on,6165 +body blush,6159 +white tank top,6158 +front ponytail,6158 +playing instrument,6153 +police,6150 +christmas tree,6139 +cushion,6137 +chaldea uniform,6137 +yellow bowtie,6134 +senran kagura,6130 +belt pouch,6129 +zhongli (genshin impact),6128 +grey legwear,6127 +tamamo no mae (fate/extra),6122 +ears through headwear,6121 +power lines,6102 +ribs,6098 +miqo'te,6096 +holding stuffed toy,6095 +otonokizaka school uniform,6085 +cyborg,6079 +cardcaptor sakura,6076 +panties removed,6071 +\m/,6071 +pinky out,6067 +zzz,6059 +perspective,6054 +faulds,6052 +race queen,6051 +coin,6048 +nagae iku,6043 +yamato (kancolle),6033 +holding plate,6032 +lamppost,6028 +joseph joestar (young),6024 +game controller,6024 +girl sandwich,6019 +sleepy,6008 +long coat,5995 +scissors,5991 +uwabaki,5987 +horse,5987 +sound,5980 +bride,5975 +seductive smile,5968 +reverse cowgirl position,5964 +bear ears,5956 +nijisanji en,5949 +chest hair,5948 +birthday,5947 +reclining,5945 +pun,5944 +taut shirt,5943 +punching,5939 +dirty,5936 +bead bracelet,5936 +shell,5933 +akebono (kancolle),5933 +tail wagging,5931 +latex,5927 +clothed male nude female,5920 +dawn (pokemon),5915 +fishnet thighhighs,5914 +sheep horns,5913 +shouting,5911 +platinum blonde hair,5911 +broken,5909 +the legend of zelda: breath of the wild,5908 +pokemon xy,5907 +kariginu,5907 +on chair,5896 +shirt pull,5894 +heads together,5894 +green theme,5893 +track suit,5889 +black capelet,5885 +white belt,5875 +kujo jotaro,5874 +kitakami (kancolle),5873 +pink legwear,5871 +nervous,5862 +plaid shirt,5859 +multi-strapped bikini,5859 +hata no kokoro,5855 +frilled hairband,5847 +bead necklace,5843 +white bowtie,5842 +beamed eighth notes,5840 +huge ahoge,5837 +manjuu (azur lane),5833 +holding bouquet,5833 +pillow hug,5828 +leaning to the side,5825 +light frown,5824 +loose socks,5823 +animalization,5823 +looping animation,5821 +male swimwear,5818 +brown shorts,5816 +pervert,5810 +take your pick,5808 +green necktie,5807 +gilgamesh (fate),5802 +saucer,5797 +blank eyes,5793 +holding instrument,5791 +cum string,5791 +blue sweater,5790 +summer uniform,5788 +bruise,5787 +pointy hair,5782 +nekomata,5782 +marvel,5773 +warship girls r,5771 +playing card,5771 +trait connection,5769 +coffee,5764 +usada pekora,5757 +harness,5755 +hair slicked back,5755 +unfinished,5753 +torso grab,5751 +jester cap,5750 +kaito (vocaloid),5747 +fat mons,5747 +aikatsu! (series),5743 +blue cape,5742 +fish tail,5737 +ibaraki kasen,5729 +oversized object,5714 +ball gag,5712 +blue legwear,5707 +white hoodie,5706 +mahou shoujo lyrical nanoha a's,5706 +shirakami fubuki,5705 +rensouhou-chan,5696 +skeleton,5692 +striped bowtie,5690 +old man,5683 +red capelet,5680 +jujutsu kaisen,5680 +black neckwear,5679 +planted,5675 +xd,5662 +matou sakura,5660 +grey sweater,5658 +hair pulled back,5650 +assertive female,5645 +cooking,5639 +horn ribbon,5637 +lap pillow,5636 +beach umbrella,5634 +bad deviantart id,5634 +grey gloves,5621 +tentacle sex,5620 +spoken musical note,5613 +lillie (pokemon),5609 +kumoi ichirin,5609 +doll joints,5609 +megumin,5608 +poolside,5607 +splatoon 1,5604 +suggestive fluid,5602 +merry christmas,5600 +bubble skirt,5598 +last origin,5596 +aqua background,5594 +green shorts,5593 +red coat,5589 +kurodani yamame,5587 +nightgown,5584 +multicolored dress,5584 +burger,5581 +pregnant,5579 +waist cape,5576 +bandaid on leg,5576 +brown cardigan,5572 +shuten douji (fate),5570 +over-kneehighs,5570 +bra pull,5569 +mouse,5568 +animal hat,5563 +kasodani kyouko,5562 +no eyes,5561 +green kimono,5559 +green footwear,5547 +fur,5546 +purple bowtie,5532 +speed lines,5530 +soga no tojiko,5530 +tokiwadai school uniform,5529 +white collar,5528 +kotatsu,5526 +macross frontier,5524 +to love-ru,5522 +yawning,5520 +novel illustration,5511 +twisted torso,5508 +clipboard,5503 +holding bow (weapon),5502 +prinz eugen (kancolle),5497 +pink bowtie,5496 +whip,5495 +purple bra,5492 +mechanical halo,5492 +expressions,5488 +pectoral cleavage,5483 +black tank top,5479 +wine,5476 +atelier (series),5474 +holding chopsticks,5469 +skull hair ornament,5463 +cum on ass,5461 +miyako yoshika,5459 +loincloth,5459 +hoshii miki,5443 +plaid scarf,5440 +pillar,5440 +cow tail,5438 +damaged,5437 +weapon over shoulder,5434 +paper fan,5433 +female orgasm,5426 +love live! nijigasaki high school idol club,5425 +playing games,5424 +kotobuki tsumugi,5422 +shelf,5417 +polka dot panties,5416 +real life,5415 +ooyodo (kancolle),5407 +standing split,5406 +stand (jojo),5402 +helltaker,5402 +braided bun,5399 +queen's blade,5393 +female admiral (kancolle),5389 +superhero,5380 +gundam 00,5378 +astolfo (fate),5377 +river,5376 +cardboard box,5368 +time paradox,5360 +mona (genshin impact),5354 +ear blush,5351 +biceps,5346 +grapes,5344 +dead or alive,5344 +axis powers hetalia,5344 +sweating profusely,5343 +castle,5343 +street,5337 +kobayashi-san chi no maidragon,5331 +pink gloves,5327 +elsword,5326 +darling in the franxx,5324 +hiiragi kagami,5317 +baseball bat,5317 +female pervert,5309 +between fingers,5309 +flat color,5308 +darjeeling (girls und panzer),5308 +gigantic breasts,5305 +red collar,5303 +:|,5302 +footjob,5299 +tatsuta (kancolle),5297 +flag print,5295 +grey headwear,5290 +aqua necktie,5290 +wet panties,5288 +new super mario bros. u deluxe,5288 +chun-li,5288 +black horns,5288 +dokidoki! precure,5282 +argyle legwear,5281 +police uniform,5280 +pokemon hgss,5280 +mutsu (kancolle),5277 +holding spoon,5275 +tape,5274 +orb,5271 +=3,5271 +single mechanical arm,5268 +see-through sleeves,5265 +grey thighhighs,5259 +huge penis,5258 +onee-shota,5256 +female ejaculation,5254 +dark background,5254 +vento aureo,5253 +breast envy,5253 +super crown,5250 +bare tree,5250 +cu chulainn (fate),5247 +little busters!,5245 +split mouth,5243 +laptop,5243 +shark tail,5242 +unmoving pattern,5220 +animal collar,5218 +waistcoat,5216 +forehead protector,5215 +bestiality,5204 +sparkling eyes,5203 +dressing,5203 +thighhighs under boots,5197 +diamond (shape),5196 +checkered,5196 +asashio (kancolle),5195 +red sweater,5189 +cookie (touhou),5185 +single sock,5183 +black blindfold,5183 +diagonal stripes,5180 +pink choker,5178 +red bra,5176 +clannad,5174 +long braid,5172 +tengu-geta,5171 +gridman universe,5165 +penguin,5163 +;p,5157 +sweater lift,5147 +may (pokemon),5143 +lily (flower),5139 +kijin seija,5137 +extra arms,5130 +book stack,5130 +danmaku,5124 +hand on own stomach,5117 +competition school swimsuit,5113 +tassel earrings,5111 +black rock shooter,5110 +extra eyes,5109 +metroid,5104 +breast suppress,5103 +patreon logo,5101 +serval print,5099 +hand on own ass,5099 +company connection,5096 +v-neck,5094 +shopping bag,5085 +minami kotori,5084 +frilled thighhighs,5083 +dog tags,5081 +covering face,5076 +blue coat,5076 +bursting breasts,5075 +yellow footwear,5071 +kaban (kemono friends),5070 +chain-link fence,5068 +nishizumi maho,5067 +albino,5066 +costume switch,5064 +animal nose,5064 +dango,5062 +adjusting headwear,5061 +multicolored skin,5060 +uterus,5059 +trading card,5052 +tail ribbon,5052 +meat,5046 +topknot,5044 +holding hat,5044 +clownpiece,5044 +zombie,5042 +monocle,5038 +kinomoto sakura,5038 +facing another,5036 +byleth (fire emblem),5033 +charm (object),5032 +heart cutout,5027 +hammer (sunset beach),5027 +yandere,5023 +twilight,5023 +no socks,5023 +keqing (genshin impact),5016 +purple lips,5008 +pot,5007 +exhibitionism,5004 +emiya shirou,5001 +final fantasy vii remake,4996 +water gun,4994 +fur-trimmed cape,4994 +hands on own cheeks,4989 +clothes removed,4987 +bokeh,4987 +rosa (pokemon),4980 +falling petals,4979 +internal cumshot,4978 +quiver,4973 +flame,4973 +wolf,4971 +sanpaku,4964 +mori calliope,4957 +angel beats!,4955 +bad nicoseiga id,4951 +holding eyewear,4948 +fur-trimmed capelet,4944 +meiko (vocaloid),4941 +ebifurya,4940 +panty & stocking with garterbelt,4939 +teardrop,4938 +armpit peek,4930 +archer (fate),4928 +heartcatch precure!,4926 +shorts pull,4924 +itsumi erika,4923 +c.c.,4923 +dougi,4922 +pyra (xenoblade),4915 +ship,4914 +bismarck (kancolle),4914 +tail bow,4913 +thong bikini,4909 +closed umbrella,4909 +reaching,4907 +megami magazine,4905 +braided bangs,4905 +traditional bowtie,4904 +koha-ace,4903 +yae miko,4897 +blood splatter,4897 +autumn,4891 +samus aran,4889 +knight,4887 +aki minoriko,4887 +sendai (kancolle),4886 +moaning,4885 +onigiri,4880 +infection monitor (arknights),4880 +mordred (fate),4877 +gym shirt,4874 +ooi (kancolle),4872 +military jacket,4871 +hydrangea,4866 +minamoto no raikou (fate),4856 +microphone stand,4856 +fox shadow puppet,4854 +holding card,4849 +hilda (pokemon),4842 +anal beads,4842 +heart print,4834 +open vest,4833 +izumi konata,4828 +firing,4821 +dimples of venus,4818 +cum on tongue,4815 +hieda no akyuu,4809 +ninomae ina'nis,4808 +marnie (pokemon),4805 +thigh holster,4803 +prosthesis,4803 +stomach bulge,4802 +fake screenshot,4802 +single bare shoulder,4793 +caesar anthonio zeppeli,4792 +cowboy hat,4783 +leg warmers,4781 +rose petals,4780 +lance,4779 +pokemon legends: arceus,4778 +holding fork,4774 +breast lift,4773 +akashi (kancolle),4773 +flower knight girl,4772 +convenient leg,4770 +noodles,4769 +paper lantern,4767 +family,4767 +phantasy star,4765 +floating object,4765 +father and daughter,4765 +turn pale,4763 +yuzuki yukari,4761 +solid circle eyes,4760 +roswaal mansion maid uniform,4760 +puzzle & dragons,4759 +jumpsuit,4758 +flower field,4756 +bicycle,4753 +pillow hat,4746 +cross earrings,4743 +unsheathing,4735 +colored tips,4731 +robot joints,4727 +amami haruka,4720 +colored pencil (medium),4719 +train interior,4718 +mind control,4718 +amputee,4717 +fox,4712 +anchovy (girls und panzer),4706 +takao (kancolle),4704 +sekibanki,4700 +skyscraper,4698 +multiple 4koma,4689 +yellow hairband,4687 +pink sweater,4685 +minato aqua,4685 +electric guitar,4684 +peeking out,4681 +musashi (kancolle),4681 +glaring,4677 +vase,4666 +green gloves,4656 +large bow,4655 +yoko littner,4654 +condom in mouth,4654 +watashi ga motenai no wa dou kangaetemo omaera ga warui!,4653 +full armor,4653 +visor,4650 +striped ribbon,4645 +junko (touhou),4640 +leotard under clothes,4639 +old school swimsuit,4638 +chick,4638 +knees to chest,4636 +bone,4632 +pokemon oras,4631 +oripathy lesion (arknights),4631 +disembodied penis,4624 +side-by-side,4622 +holding wand,4622 +gold,4617 +yor briar,4616 +split-color hair,4616 +sleeveless jacket,4613 +heart ahoge,4613 +pink hairband,4612 +looking ahead,4610 +double handjob,4608 +open hoodie,4603 +chest tattoo,4599 +purple leotard,4598 +heart-shaped box,4598 +single wing,4596 +navel hair,4596 +waves,4595 +nero claudius (fate/extra),4590 +okita souji (koha-ace),4582 +knees,4577 +brown bow,4577 +shiranui (kancolle),4576 +strap pull,4574 +yellow nails,4573 +spring onion,4569 +textless version,4562 +solid oval eyes,4562 +elbow pads,4562 +wo-class aircraft carrier,4549 +chest jewel,4548 +plaid bow,4547 +ladle,4547 +kitsune,4546 +frilled choker,4546 +beer mug,4544 +kirby (series),4542 +pixiv sample,4541 +resized,4540 +hibike! euphonium,4540 +swimsuit pull,4538 +kousaka honoka,4537 +trident,4536 +pussy peek,4534 +weapon on back,4533 +picture frame,4526 +red wings,4525 +nanami chiaki,4523 +apex legends,4520 +halter dress,4516 +revolver,4510 +d.va (overwatch),4509 +blinking,4507 +haruyama kazunori,4502 +futatsuiwa mamizou,4501 +sazanami (kancolle),4500 +yellow sclera,4498 +side braids,4497 +disgaea,4496 +gourd,4495 +naked ribbon,4486 +flight deck,4483 +super robot wars,4482 +medusa (fate),4481 +aether (genshin impact),4480 +asuna (sao),4479 +red bodysuit,4478 +vehicle focus,4474 +ssss.gridman,4471 +souryuu (kancolle),4471 +sukuna shinmyoumaru,4465 +heart-shaped chocolate,4465 +impossible shirt,4463 +tsukino usagi,4462 +notebook,4461 +blue hoodie,4460 +short necktie,4459 +bra strap,4458 +shushing,4452 +utility pole,4451 +mixed bathing,4451 +hooded coat,4451 +shijou takane,4450 +lowleg bikini,4446 +whistle,4441 +ringlets,4441 +hiei (kancolle),4441 +arthropod girl,4441 +grabbing another's hair,4440 +panty peek,4439 +holding ball,4439 +aiguillette,4438 +striped skirt,4436 +>:(,4434 +akiyama yukari,4429 +suigintou,4424 +samidare (kancolle),4423 +no mouth,4420 +popped collar,4418 +aki shizuha,4418 +mahou shoujo madoka magica: hangyaku no monogatari,4412 +stardust crusaders,4411 +fedora,4410 +ro-500 (kancolle),4405 +crazy eyes,4399 +mythra (xenoblade),4398 +ear covers,4397 +senketsu,4395 +striped scarf,4394 +lock,4393 +holding can,4393 +detached wings,4393 +amiya (arknights),4391 +yamashiro (kancolle),4388 +holding strap,4387 +nitroplus,4384 +reverse trap,4380 +gears,4380 +money,4375 +grabbing own ass,4369 +sailor,4368 +diamond wa kudakenai,4366 +monster girl encyclopedia,4362 +faceless female,4359 +pinstripe pattern,4355 +hands on own knees,4351 +dark blue hair,4350 +pov crotch,4348 +maebari,4345 +brown vest,4344 +garreg mach monastery uniform,4341 +single elbow glove,4338 +hk416 (girls' frontline),4336 +waking up,4335 +kisaragi chihaya,4335 +fur-trimmed dress,4334 +dungeon ni deai wo motomeru no wa machigatteiru darou ka,4331 +reverse grip,4326 +gumi,4323 +projectile cum,4322 +kirishima (kancolle),4322 +verniy (kancolle),4320 +fullmetal alchemist,4320 +heart pasties,4318 +cane,4316 +winter coat,4313 +slime (substance),4313 +showgirl skirt,4312 +white necktie,4310 +orange bikini,4307 +gloria (pokemon),4301 +watson amelia,4300 +dio brando,4294 +evening,4293 +throne,4290 +ice cream cone,4288 +letty whiterock,4287 +tower,4286 +skinny,4278 +business suit,4272 +orange (fruit),4270 +kaguya-sama wa kokurasetai ~tensai-tachi no renai zunousen~,4270 +amagami,4270 +kiryuuin satsuki,4260 +waterfall,4259 +lightning,4257 +polka dot bikini,4256 +battle tendency,4255 +bandaid on nose,4254 +poking,4253 +wedgie,4252 +squatting cowgirl position,4250 +asymmetrical sleeves,4247 +bow hairband,4244 +unbuttoned shirt,4243 +minase iori,4243 +digimon,4242 +print skirt,4241 +naruto shippuuden,4238 +mizuki hitoshi,4238 +blue fire,4230 +poster (object),4229 +inazuma eleven,4229 +has bad revision,4229 +statue,4226 +mordred (fate/apocrypha),4222 +butterfly wings,4221 +aikatsu!,4214 +baby,4213 +monster boy,4208 +dark elf,4203 +crack,4203 +hoshizora rin,4202 +wrist ribbon,4200 +white bloomers,4198 +kikuchi makoto,4198 +crotchless,4194 +thigh grab,4190 +bustier,4190 +crowd,4189 +medium skirt,4185 +magia record: mahou shoujo madoka magica gaiden,4177 +machine gun,4177 +towel on head,4173 +pearl necklace,4173 +streaming tears,4172 +bullet,4171 +collaboration,4166 +cropped,4161 +pocket watch,4158 +blood on hands,4155 +surgical mask,4153 +red scrunchie,4152 +boy on top,4151 +boat,4150 +torn dress,4147 +takanashi kiara,4145 +bb (fate),4145 +white neckerchief,4144 +pink necktie,4142 +trench coat,4139 +fate/prototype,4131 +iowa (kancolle),4129 +no pussy,4128 +bear,4126 +yellow shorts,4124 +klee (genshin impact),4124 +whiskers,4122 +red buruma,4120 +kasumi (kancolle),4119 +blue bodysuit,4118 +tomboy,4115 +letter,4115 +ex-keine,4112 +crab,4108 +bound legs,4104 +venti (genshin impact),4101 +open hands,4097 +spread anus,4090 +green pants,4088 +vertical-striped shirt,4084 +oppai loli,4079 +ribbon-trimmed legwear,4078 +fatal fury,4077 +animal on shoulder,4077 +jeanne d'arc (ruler) (fate),4075 +bra visible through clothes,4073 +locked arms,4069 +toilet,4067 +crotch,4064 +bodypaint,4063 +snowman,4061 +skullgirls,4059 +dress pull,4059 +drawing,4059 +kouji (campus life),4058 +nekomata okayu,4056 +kitchen,4049 +yellow theme,4043 +defeat,4043 +vertical-striped thighhighs,4042 +toeless footwear,4037 +imagining,4034 +ganaha hibiki,4034 +saber alter,4032 +locker,4032 +hot,4032 +one-punch man,4030 +landscape,4030 +tile wall,4029 +hecatia lapislazuli,4026 +tsundere,4024 +energy,4024 +flower knot,4023 +yuubari (kancolle),4022 +atago (azur lane),4022 +leg tattoo,4021 +gintama,4021 +ryouou school uniform,4020 +cat cutout,4020 +oshino shinobu,4018 +hand on another's cheek,4017 +orange bowtie,4016 +no legwear,4012 +guilty gear xrd,4012 +wine bottle,4010 +fusion,4010 +splatoon 2,4008 +milk,4008 +imminent rape,4008 +towel around neck,4006 +argyle background,4003 +idolmaster 1,4000 +frilled collar,3999 +fire emblem: the blazing blade,3999 +fat man,3999 +jacket removed,3995 +evil grin,3994 +crescent pin,3991 +soccer uniform,3990 +kiso (kancolle),3990 +shimamura uzuki,3989 +assisted exposure,3986 +pirate hat,3985 +byleth (fire emblem) (female),3980 +steins;gate,3975 +frilled pillow,3975 +gardevoir,3972 +uruha rushia,3969 +yellow gloves,3962 +tiger,3956 +orange dress,3956 +bird tail,3955 +yellow headwear,3951 +two-sided fabric,3951 +checkered skirt,3949 +unsheathed,3948 +long tongue,3947 +cat lingerie,3946 +watanabe you,3944 +taihou (azur lane),3944 +monster musume no iru nichijou,3944 +road sign,3939 +death,3935 +zero two (darling in the franxx),3934 +crow,3931 +sagging breasts,3930 +bat print,3930 +anal tail,3930 +asahina mikuru,3925 +hatching (texture),3920 +phantasy star online 2,3919 +kiseru,3919 +cat hair ornament,3912 +bikini aside,3909 +library,3907 +hand on another's chin,3905 +red (pokemon),3904 +torn skirt,3902 +resident evil,3900 +very dark skin,3899 +muted color,3899 +back cutout,3899 +starfish,3895 +ash ketchum,3891 +alien,3886 +print bow,3882 +yellow panties,3881 +virgin killer sweater,3880 +you gonna get raped,3877 +suite precure,3873 +1koma,3871 +green bowtie,3870 +sleeves pushed up,3866 +morrigan aensland,3865 +blunt ends,3865 +on desk,3864 +fingers together,3862 +teacher,3860 +sideways mouth,3859 +long bangs,3858 +nintendo switch,3853 +mask removed,3852 +heart background,3851 +osomatsu-san,3850 +defloration,3848 +bikini tan,3847 +torn pants,3844 +gundam build fighters,3844 +1980s (style),3844 +princess peach,3841 +cloud strife,3841 +costume,3834 +cropped shirt,3828 +soap bubbles,3827 +soul gem,3825 +raised eyebrow,3825 +game console,3825 +hiryuu (kancolle),3824 +itomugi-kun,3814 +uchiwa,3813 +halftone background,3811 +formidable (azur lane),3810 +planted sword,3809 +meltryllis (fate),3809 +takagaki kaede,3808 +track pants,3806 +mechanical wings,3806 +prinz eugen (azur lane),3802 +doctor (arknights),3800 +school,3797 +purple necktie,3797 +stick,3796 +karyl (princess connect!),3795 +shy,3794 +djeeta (granblue fantasy),3794 +texas (arknights),3789 +suitcase,3788 +shirasaka koume,3782 +romaji text,3782 +os-tan,3782 +bubble blowing,3775 +album cover,3775 +spoken blush,3769 +cum pool,3762 +colorful,3762 +unaligned breasts,3761 +pink skin,3761 +tartaglia (genshin impact),3758 +command spell,3758 +taking picture,3757 +gradient eyes,3756 +graf zeppelin (kancolle),3753 +ultimate madoka,3748 +employee uniform,3746 +sagisawa fumika,3745 +musical note hair ornament,3745 +blue wings,3745 +skadi (arknights),3744 +holding shield,3741 +mandarin orange,3740 +armchair,3739 +multicolored jacket,3738 +holding candy,3736 +clover,3736 +red eyeshadow,3734 +dragon wings,3734 +shirogane noel,3733 +lifebuoy,3732 +pouring,3729 +eromanga sensei,3726 +school chair,3725 +obijime,3725 +winter uniform,3724 +gundam seed,3719 +holding paper,3717 +striped pantyhose,3711 +male masturbation,3708 +pointing up,3707 +dc comics,3706 +mountainous horizon,3705 +carrot hair ornament,3704 +v over eye,3702 +silk,3699 +blue butterfly,3694 +feet up,3693 +demon,3692 +floppy ears,3691 +husband and wife,3690 +kagerou project,3686 +coffee mug,3686 +wakasagihime,3685 +tissue box,3682 +backboob,3682 +blood on weapon,3681 +clone,3677 +power armor,3674 +yurucamp,3669 +shingeki no bahamut,3669 +barcode,3669 +heart necklace,3668 +multicolored skirt,3667 +2021,3667 +half-closed eye,3665 +against glass,3662 +blood from mouth,3658 +:>=,3651 +black suit,3649 +little witch academia,3648 +leather jacket,3648 +blue sleeves,3647 +shark girl,3646 +off-topic,3646 +infinite stratos,3646 +houseki no kuni,3645 +ear bow,3644 +food focus,3643 +black cloak,3643 +lamia,3639 +thong leotard,3638 +orange jacket,3638 +holding pen,3638 +snout,3637 +koizumi hanayo,3632 +white camisole,3630 +lake,3629 +the pose,3627 +red rope,3627 +swimming,3626 +multicolored nails,3626 +smokestack,3625 +fisheye,3623 +fur-trimmed boots,3621 +see-through silhouette,3620 +black skin,3619 +jar,3618 +bandaid on knee,3616 +medusa (rider) (fate),3615 +komaeda nagito,3609 +kishin sagume,3607 +paizuri under clothes,3604 +flexible,3603 +red moon,3601 +two-tone skin,3598 +blue buruma,3598 +recording,3594 +puddle,3594 +button gap,3592 +sode,3590 +hoshimachi suisei,3590 +pointing at self,3584 +bremerton (azur lane),3582 +inugami korone,3579 +cat hood,3579 +american flag legwear,3578 +waist bow,3577 +dark nipples,3577 +orange headwear,3575 +mitakihara school uniform,3574 +kyon,3572 +holding own arm,3571 +aiming,3570 +purple hairband,3569 +underboob cutout,3568 +frilled hat,3568 +backwards hat,3568 +toradora!,3567 +thorns,3567 +naka (kancolle),3566 +hestia (danmachi),3564 +kirby,3563 +takatsuki yayoi,3560 +ambiguous gender,3556 +sono bisque doll wa koi wo suru,3555 +osomatsu-kun,3555 +white ascot,3554 +no nipples,3552 +nanamori school uniform,3550 +earth (planet),3548 +cheek-to-cheek,3547 +novelty censor,3543 +blue socks,3542 +lanyard,3541 +demon slayer uniform,3541 +arm hug,3539 +miyamoto musashi (fate),3535 +flaccid,3535 +sakura miko,3534 +shrine,3533 +pince-nez,3532 +grey cardigan,3532 +leaning,3530 +dolphin shorts,3529 +robot ears,3528 +belt collar,3527 +hose,3522 +aerith gainsborough,3520 +shop,3519 +holding cigarette,3519 +rice shower (umamusume),3518 +metal collar,3516 +brown scarf,3515 +rei no himo,3514 +dated commentary,3512 +black rock shooter (character),3511 +fate/hollow ataraxia,3510 +baggy pants,3508 +nude cover,3506 +grey pantyhose,3505 +green legwear,3505 +falling leaves,3504 +digital media player,3503 +paradis military uniform,3500 +bowsette,3499 +banana,3499 +sleeveless kimono,3495 +purple bodysuit,3494 +skirt suit,3491 +showering,3491 +spider lily,3488 +spread ass,3486 +yumemi riamu,3484 +shide,3483 +kanzaki ranko,3482 +selene (pokemon),3481 +paimon (genshin impact),3478 +film grain,3475 +what,3474 +belfast (azur lane),3474 +magazine scan,3473 +spider web,3472 +neon trim,3465 +bikini lift,3465 +mini wings,3463 +sirius (azur lane),3462 +pole,3460 +octopus,3460 +mother and son,3460 +super sonico,3458 +rooftop,3457 +cat paws,3456 +aqua bow,3456 +aran sweater,3455 +eula (genshin impact),3453 +promotional art,3450 +horror (theme),3446 +framed,3438 +kunai,3434 +arm around shoulder,3431 +kisume,3428 +improvised gag,3428 +ishtar (fate),3427 +cu chulainn (fate/stay night),3427 +mejiro mcqueen (umamusume),3424 +jougasaki mika,3424 +fishing rod,3423 +seaport princess,3422 +black flower,3422 +yu-gi-oh! duel monsters,3420 +fog,3420 +wizard hat,3418 +multiple wings,3418 +imminent vaginal,3416 +rapier,3415 +2others,3415 +chicken,3412 +tani takeshi,3410 +strapless bikini,3410 +chuunibyou demo koi ga shitai!,3410 +narukami yuu,3409 +mahou sensei negima!,3407 +glowing weapon,3405 +print gloves,3404 +pentagram,3404 +lily white,3403 +brand name imitation,3400 +painting (object),3398 +kissing cheek,3397 +pompadour,3396 +red sailor collar,3394 +photo background,3394 +holding broom,3392 +zero no tsukaima,3390 +;q,3387 +yellow sweater,3386 +zouri,3384 +cage,3384 +netorare,3383 +pokemon frlg,3381 +ikari shinji,3381 +macaron,3380 +kiryu coco,3380 +asuna (blue archive),3373 +interface headset,3372 +brown fur,3371 +trefoil,3370 +purple rose,3370 +laevatein (touhou),3369 +fur hat,3369 +fire emblem: mystery of the emblem,3369 +furude rika,3368 +naked sweater,3367 +pink scarf,3366 +steel ball run,3365 +wrench,3364 +amane kanata,3364 +i-19 (kancolle),3362 +hair tucking,3359 +adjusting swimsuit,3359 +food print,3357 +toe scrunch,3356 +cat boy,3355 +needle,3354 +folded fan,3353 +remote control,3351 +fairy (kancolle),3351 +slave,3350 +v-fin,3348 +tokoyami towa,3345 +energy sword,3345 +date a live,3345 +takebe saori,3344 +leaning on person,3341 +violin,3340 +hands in hair,3338 +demon boy,3338 +open door,3334 +green sailor collar,3334 +american flag,3333 +steepled fingers,3332 +hypnosis,3331 +tekken,3330 +tsushima yoshiko,3329 +paint.net (medium),3324 +butt plug,3322 +too many,3321 +swimsuit aside,3320 +perky breasts,3320 +opaque glasses,3320 +afloat,3318 +snk,3317 +collared jacket,3315 +blue hakama,3315 +gokou ruri,3311 +lower teeth,3310 +idolmaster million live! theater days,3310 +sheep,3309 +zuihou (kancolle),3308 +single sleeve,3308 +lightning bolt symbol,3308 +hololive indonesia,3308 +excalibur (fate/stay night),3307 +cake slice,3307 +pantylines,3306 +mascara,3306 +barnaby brooks jr.,3306 +keyboard (computer),3304 +print dress,3303 +boku wa tomodachi ga sukunai,3303 +holding bowl,3298 +sushi,3297 +2022,3294 +cream,3292 +policewoman,3289 +official wallpaper,3287 +elbow rest,3287 +armpit hair,3287 +sailor moon,3284 +framed breasts,3284 +aqua (konosuba),3284 +drum,3283 +o-ring bottom,3282 +sanya v. litvyak,3281 +real life insert,3281 +ereshkigal (fate),3281 +copyright,3281 +lower body,3279 +kuroki tomoko,3276 +maria-sama ga miteru,3275 +calligraphy brush,3274 +contrail,3273 +print bowtie,3269 +explosive,3267 +floor,3265 +left-handed,3263 +american flag dress,3262 +two-tone shirt,3257 +grey hoodie,3257 +oozora subaru,3253 +nitta minami,3253 +kakyoin noriaki,3253 +eiyuu densetsu,3253 +cynthia (pokemon),3253 +vertical-striped legwear,3250 +egg vibrator,3250 +figure,3249 +dungeon and fighter,3249 +duel,3249 +zombie land saga,3247 +transformers,3246 +scope,3245 +sleeves past elbows,3244 +orange ribbon,3242 +food on body,3241 +cammy white,3241 +kanon (kurogane knights),3232 +train,3231 +heart choker,3231 +transformation,3229 +office chair,3228 +orange flower,3227 +fins,3225 +eila ilmatar juutilainen,3221 +stylus,3211 +loose necktie,3211 +unicorn (azur lane),3209 +medical eyepatch,3209 +suction cups,3208 +frilled gloves,3205 +map,3201 +circle,3201 +character profile,3201 +commissioner upload,3200 +shiny pokemon,3199 +egyptian,3197 +staring,3196 +hand under clothes,3196 +love live! school idol festival,3192 +fanbox reward,3191 +elden ring,3189 +concept art,3189 +test tube,3187 +paper bag,3184 +string of fate,3181 +frilled legwear,3181 +animal crossing,3181 +kaburagi t. kotetsu,3178 +ahri (league of legends),3176 +spacecraft,3173 +lion tail,3172 +dancer,3172 +mouse girl,3170 +uneven eyes,3168 +grey vest,3168 +bra removed,3166 +capri pants,3162 +flower-shaped pupils,3161 +ichinose shiki,3158 +green leotard,3154 +striped tail,3152 +furisode,3152 +binoculars,3152 +bandaids on nipples,3151 +final fantasy xi,3149 +rudder footwear,3147 +i-58 (kancolle),3147 +sonic (series),3146 +lemon,3146 +horn bow,3146 +envelope,3146 +stocking (psg),3145 +yahari ore no seishun lovecome wa machigatteiru.,3141 +striped bra,3141 +reverse outfit,3141 +maid bikini,3141 +after anal,3141 +holding towel,3138 +lucina (fire emblem),3136 +two-tone fur,3131 +medicine melancholy,3126 +corrin (fire emblem),3125 +evangelion: 3.0 you can (not) redo,3124 +ookami mio,3123 +basketball,3122 +rod of remorse,3121 +milestone celebration,3120 +lunasa prismriver,3119 +hand on own arm,3119 +tight pants,3116 +sennen sensou aigis,3116 +daiwa scarlet (umamusume),3115 +kotomine kirei,3114 +baozi,3110 +bad feet,3110 +serena (pokemon),3108 +partially undressed,3108 +open dress,3108 +middle finger,3107 +yagami hayate,3106 +sake bottle,3106 +candy apple,3106 +multiple crossover,3104 +blue scrunchie,3102 +bkub,3102 +pizza,3101 +hinata hajime,3101 +footwear bow,3101 +alice in wonderland,3100 +maya (kancolle),3099 +rubber duck,3098 +machikado mazoku,3098 +hooded capelet,3098 +hair beads,3097 +scan artifacts,3095 +meiji schoolgirl uniform,3095 +kamisato ayaka,3093 +uranohoshi school uniform,3091 +pussy juice stain,3090 +commander (azur lane),3090 +pink leotard,3089 +breast slip,3088 +chewing gum,3087 +arm belt,3087 +naganami (kancolle),3086 +father and son,3086 +taur,3084 +banner,3084 +pussy juice trail,3083 +haruno sakura,3083 +blue capelet,3083 +mismatched gloves,3080 +kokkoro (princess connect!),3080 +swim trunks,3079 +pumps,3078 +hair strand,3078 +covered collarbone,3076 +the legend of zelda: twilight princess,3074 +fur-trimmed headwear,3074 +yuri!!! on ice,3073 +stage,3073 +happy halloween,3073 +nightcap,3072 +no shirt,3071 +pink shorts,3070 +pokemon masters ex,3069 +finger gun,3069 +yamakaze (kancolle),3066 +pokemon rse,3065 +pink cardigan,3064 +squirrel ears,3061 +holding pokemon,3061 +digimon (creature),3061 +bandage over one eye,3060 +blue pantyhose,3057 +official alternate hairstyle,3056 +parfait,3055 +holding mask,3055 +submerged,3054 +ouma kokichi,3054 +anastasia (idolmaster),3051 +shore,3049 +holding camera,3049 +son goku,3048 +kindergarten uniform,3048 +partially unzipped,3045 +hiiragi tsukasa,3045 +background text,3043 +gradient sky,3038 +penis on face,3037 +drop shadow,3037 +flashing,3036 +shotgun,3035 +grabbing another's ass,3032 +murasame (kancolle),3029 +lineup,3025 +heart-shaped eyewear,3025 +gothic,3025 +kitagawa marin,3023 +project sekai,3022 +goggles around neck,3022 +ejaculating while penetrated,3022 +arm rest,3020 +multiple hair bows,3018 +duck,3017 +gym shorts,3016 +deep penetration,3014 +obliques,3013 +gae bolg (fate),3013 +riding crop,3012 +multiple rings,3007 +kirito,3006 +hairpods,3005 +lip biting,3002 +tony taka,3001 +dress bow,3001 +colored pubic hair,3001 +reverse suspended congress,2998 +ear ornament,2998 +polos crown,2996 +shirogane naoto,2992 +stirrup legwear,2991 +energy gun,2990 +red headband,2989 +cuts,2989 +yukihana lamy,2988 +stomach tattoo,2987 +mercy (overwatch),2986 +forked eyebrows,2984 +tomoe (symbol),2983 +tales of vesperia,2983 +piggyback,2979 +kingdom hearts,2979 +green bra,2979 +pokemon adventures,2978 +lying on person,2977 +doremy sweet,2977 +throwing,2975 +kuromorimine military uniform,2975 +white vest,2967 +leotard aside,2967 +hair twirling,2966 +harpy,2963 +florence nightingale (fate),2963 +sakurauchi riko,2962 +paint splatter,2962 +upright straddle,2961 +japari symbol,2960 +nia teppelin,2959 +edelgard von hresvelg,2959 +holding another's arm,2957 +hiding,2954 +leg hair,2952 +higuchi madoka,2952 +black armor,2952 +obiage,2951 +single kneehigh,2948 +misty (pokemon),2948 +artbook,2948 +scar on chest,2947 +pink neckerchief,2947 +frilled swimsuit,2947 +sailor bikini,2946 +clothes grab,2946 +toy,2944 +tablet pc,2944 +body markings,2944 +kujo jolyne,2943 +bat hair ornament,2943 +metal gear (series),2942 +reverse bunnysuit,2941 +jack the ripper (fate/apocrypha),2939 +suit jacket,2937 +squirrel tail,2935 +shimada arisu,2935 +katyusha (girls und panzer),2934 +giorno giovanna,2933 +plaid dress,2928 +kay (girls und panzer),2928 +trainer (umamusume),2927 +irrumatio,2927 +w arms,2926 +prosthetic arm,2926 +hands in opposite sleeves,2926 +asymmetrical footwear,2925 +holding underwear,2924 +chips (food),2923 +darkness,2922 +ump45 (girls' frontline),2921 +shovel,2921 +bandaged hand,2921 +earbuds,2917 +hyouka,2916 +seagull,2915 +anus peek,2915 +makima (chainsaw man),2914 +hands on another's shoulders,2914 +galaga,2914 +z1 leberecht maass (kancolle),2910 +yellow scarf,2909 +holding lollipop,2909 +gold ship (umamusume),2906 +sheryl nome,2905 +bone hair ornament,2905 +audible speech,2904 +purple vest,2900 +hands on another's face,2897 +grey sailor collar,2897 +egasumi,2896 +audible music,2896 +midoriya izuku,2893 +fresh precure!,2891 +blue-framed eyewear,2890 +penis awe,2887 +purple umbrella,2886 +uzumaki naruto,2885 +sandwich,2884 +slime girl,2883 +spitroast,2877 +tomato,2876 +holding box,2874 +magazine (weapon),2873 +arm around waist,2872 +ayanami (azur lane),2870 +viewfinder,2869 +futaba anzu,2868 +orange bodysuit,2866 +layered bikini,2864 +speaker,2863 +street fighter v,2862 +megaphone,2861 +shikishi,2860 +bleeding,2860 +taihou (kancolle),2859 +shenhe (genshin impact),2858 +ouro kronii,2858 +pecorine (princess connect!),2857 +z3 max schultz (kancolle),2856 +alternate legwear,2854 +miniboy,2853 +loose belt,2850 +feather boa,2845 +treble clef,2844 +xiao (genshin impact),2841 +takarada rikka,2840 +miura azusa,2837 +fire emblem: the binding blade,2837 +lion girl,2836 +iskandar (fate),2836 +bamboo forest,2836 +waver velvet,2835 +kuma (kancolle),2834 +ruu (tksymkw),2830 +evangelion: 2.0 you can (not) advance,2829 +grenade,2828 +chest harness,2828 +humanization,2827 +licking finger,2826 +pancake,2824 +surreal,2821 +grinding,2821 +gas mask,2821 +spoken squiggle,2820 +letterman jacket,2820 +pubic hair peek,2819 +jacket around waist,2819 +soda can,2816 +fighting,2816 +plantar flexion,2815 +goggles on headwear,2812 +bubble tea,2812 +single leg pantyhose,2810 +murasaki shion,2808 +finger on trigger,2808 +shirt tug,2807 +newspaper,2806 +shishiro botan,2804 +nami (one piece),2804 +hyuuga hinata,2804 +horned headwear,2804 +spice and wolf,2801 +scowl,2800 +saihara shuuichi,2800 +lifting person,2800 +akigumo (kancolle),2800 +fire emblem: path of radiance,2799 +skates,2798 +lily pad,2798 +magazine cover,2797 +vegetable,2796 +ghost tail,2796 +bird ears,2796 +arms around neck,2796 +multi-tied hair,2795 +bunny print,2794 +shooting star,2793 +green neckwear,2793 +orange hairband,2792 +beer can,2791 +traditional youkai,2790 +bag charm,2790 +a1,2790 +warspite (kancolle),2789 +jonathan joestar,2789 +pokephilia,2787 +hagiwara yukiho,2787 +midair,2785 +samurai spirits,2784 +guro,2784 +5koma,2784 +mario,2782 +i-401 (kancolle),2782 +tokai teio (umamusume),2781 +narmaya (granblue fantasy),2781 +guilty gear strive,2781 +shirai kuroko,2780 +deepthroat,2780 +2020,2779 +mikasa ackerman,2777 +hair tie in mouth,2777 +in tree,2774 +spoken interrobang,2773 +snowflake hair ornament,2773 +piano,2773 +blade (galaxist),2772 +jougasaki rika,2770 +izumi sagiri,2770 +in box,2770 +extra,2770 +leaf print,2769 +eevee,2768 +chloe von einzbern,2768 +fusou (kancolle),2766 +ceiling,2764 +tiger girl,2762 +ryuuguu rena,2762 +food on head,2762 +pith helmet,2761 +yellow rose,2760 +uraraka ochako,2760 +mochi,2760 +left-to-right manga,2759 +dark souls (series),2759 +open-chest sweater,2758 +crotch rope,2758 +pyonta,2756 +coat on shoulders,2755 +watson cross,2754 +thumb ring,2754 +elizabeth bathory (fate),2754 +aisaka taiga,2753 +idolmaster side-m,2752 +frying pan,2752 +after fellatio,2752 +heart in eye,2751 +uneven gloves,2750 +reizei mako,2750 +kanzashi,2750 +thinking,2749 +yuki miku,2748 +ribbed dress,2748 +non-human admiral (kancolle),2745 +boo tao (genshin impact),2745 +blue gemstone,2745 +motoori kosuzu,2743 +undertale,2742 +stained glass,2742 +fire emblem: radiant dawn,2742 +ranma 1/2,2741 +humanoid robot,2740 +matsunaga kouyou,2739 +common raccoon (kemono friends),2739 +bodysuit under clothes,2738 +bike shorts under skirt,2738 +hand over own mouth,2736 +working!!,2735 +winged arms,2735 +midorikawa nao,2734 +vertical-striped dress,2732 +hand on head,2732 +hagoromo,2732 +gloom (expression),2732 +panda,2731 +stitched,2729 +yugake,2727 +neck ruff,2726 +heart in mouth,2724 +ankle ribbon,2724 +orange sky,2721 +raccoon girl,2720 +flip-flops,2720 +hunter x hunter,2718 +usami sumireko,2717 +making-of available,2715 +breastless clothes,2715 +object namesake,2714 +city lights,2713 +leon (pokemon),2712 +caterpillar tracks,2711 +sparkle background,2708 +sheep ears,2708 +kumano (kancolle),2705 +lelouch lamperouge,2702 +hoto cocoa,2702 +sekaiju no meikyuu,2701 +chemise,2701 +taimanin (series),2700 +christmas ornaments,2700 +writing,2699 +peeing self,2699 +satonaka chie,2698 +american flag bikini,2698 +barbara (genshin impact),2697 +torn bodysuit,2696 +lotus,2696 +button badge,2696 +red belt,2694 +numbered,2694 +flats,2692 +sayonara zetsubou sensei,2691 +puppet,2690 +debris,2690 +ruby rose,2687 +holding axe,2687 +kise yayoi,2685 +holo,2682 +spill,2680 +public nudity,2680 +four-leaf clover,2680 +beatrice (umineko),2680 +mankanshoku mako,2679 +remote control vibrator,2678 +chained,2677 +bird on head,2677 +leopard print,2676 +kote,2675 +bar (place),2675 +purple pantyhose,2674 +rubber boots,2673 +monster hunter (character),2673 +cat ear panties,2673 +shoe dangle,2671 +shared clothes,2671 +pretty (series),2671 +shakugan no shana,2670 +orange necktie,2670 +higashikata josuke,2670 +collage,2670 +wide ponytail,2669 +pink scrunchie,2669 +gekkoukan high school uniform,2668 +covering eyes,2667 +diffraction spikes,2665 +nanashi mumei,2664 +cupcake,2663 +white scrunchie,2662 +paint,2662 +grey panties,2662 +hyuuga (kancolle),2661 +lotion,2660 +maekawa miku,2658 +aoba (kancolle),2656 +kama (fate),2654 +dragon ball super,2653 +striker unit,2651 +raihan (pokemon),2651 +pokemon sm (anime),2651 +naga u,2651 +lycoris recoil,2651 +shiranui mai,2650 +orange nails,2649 +holding scythe,2649 +long eyelashes,2644 +charlotte (madoka magica),2644 +shell casing,2643 +bento,2642 +red sleeves,2641 +happinesscharge precure!,2641 +emilia (re:zero),2641 +tokitsukaze (kancolle),2639 +akizuki ritsuko,2639 +shoulder carry,2637 +ear ribbon,2635 +cheese,2633 +paw shoes,2632 +gaping,2630 +yellow kimono,2629 +kiyohime (fate),2629 +thank you,2628 +arm hair,2627 +tunic,2624 +gender request,2624 +layered clothes,2622 +breast curtains,2622 +yunomi,2621 +stuffed cat,2618 +pinching,2618 +moonlight,2617 +keyhole,2617 +rabbit hood,2614 +medal,2614 +lace panties,2614 +ch'en (arknights),2613 +ace attorney,2613 +gem uniform (houseki no kuni),2612 +earclip,2610 +arms under breasts,2610 +world war ii,2606 +hip bones,2606 +hallway,2606 +green scarf,2606 +urethra,2605 +luna child,2605 +sword of hisou,2604 +au ra,2604 +pokemon rgby,2603 +exercise,2603 +candy cane,2602 +robin (fire emblem),2600 +mobile suit,2600 +domino mask,2600 +quad tails,2598 +flat ass,2598 +fetal position,2598 +fennec (kemono friends),2598 +pirate,2597 +goldfish,2597 +fate/prototype: fragments of blue and silver,2597 +corruption,2596 +reindeer antlers,2590 +on shoulder,2590 +star sapphire,2588 +star in eye,2588 +jean (genshin impact),2588 +wedding,2583 +multiple bows,2583 +furry with non-furry,2583 +hayate no gotoku!,2582 +stethoscope,2581 +jealous,2581 +oboro (kancolle),2580 +black ascot,2579 +abstract,2579 +kousaka kirino,2578 +hand in panties,2578 +tomoe gozen (fate),2576 +quarter note,2576 +cross hair ornament,2576 +unzan,2575 +holding dagger,2573 +padlock,2572 +pudding,2571 +light green hair,2571 +huge nipples,2570 +boko (girls und panzer),2566 +helmet removed,2565 +fashion,2565 +year of the tiger,2564 +ashigara (kancolle),2564 +volleyball,2563 +single sidelock,2561 +satsuki (kancolle),2561 +leg lock,2561 +illustrious (azur lane),2559 +bangs pinned back,2559 +orange footwear,2557 +kyuubi,2557 +single detached sleeve,2553 +dice,2553 +snot,2551 +kogal,2551 +made in abyss,2550 +pant suit,2549 +ladder,2549 +latin cross,2548 +electric fan,2548 +bea (pokemon),2548 +yohane,2547 +aiming at viewer,2546 +hand on another's chest,2545 +seamed legwear,2542 +jintsuu (kancolle),2542 +horseshoe ornament,2541 +ears down,2541 +brush,2540 +mole on thigh,2539 +mogami (kancolle),2539 +kanon,2539 +uzuki (kancolle),2538 +zombie pose,2537 +weiss schnee,2537 +fischl (genshin impact),2536 +over-rim eyewear,2535 +year of the ox,2534 +torn sleeves,2534 +kara no kyoukai,2534 +skirt tug,2533 +kiyoshimo (kancolle),2533 +pink apron,2530 +crescent earrings,2530 +purple wings,2529 +urakaze (kancolle),2526 +qing guanmao,2526 +mega man (classic),2526 +green thighhighs,2526 +makise kurisu,2525 +hidamari sketch,2525 +2018,2525 +kagerou (kancolle),2524 +incredibly absurdres,2524 +flaming eye,2523 +coattails,2523 +ramen,2522 +pillarboxed,2522 +happy valentine,2522 +burn scar,2522 +stone ocean,2521 +go! princess precure,2519 +subtitled,2518 +profanity,2518 +trash can,2516 +bullpup,2516 +2016,2516 +duffel bag,2515 +paddle,2514 +nakiri ayame,2514 +soldier,2513 +shower head,2513 +purple choker,2513 +feather trim,2513 +two-tone skirt,2512 +lappland (arknights),2511 +looking at penis,2509 +turtle shell,2508 +wet swimsuit,2507 +vibrator under clothes,2507 +futa (nabezoko),2507 +sunburst,2506 +st. gloriana's school uniform,2506 +wide spread legs,2505 +natsuiro matsuri,2505 +bra peek,2505 +mitsudomoe (shape),2504 +yellow fur,2503 +nitocris (fate),2503 +green hairband,2503 +takami chika,2498 +finger to cheek,2498 +isokaze (kancolle),2497 +fur-trimmed hood,2497 +sunny milk,2496 +owl,2496 +orange scrunchie,2496 +2019,2496 +fairy tail,2495 +emiya kiritsugu,2494 +tachibana arisu,2491 +negligee,2491 +asuna (bunny) (blue archive),2491 +doorway,2490 +soccer ball,2489 +ainu clothes,2489 +soulcalibur,2484 +kallen stadtfeld,2484 +yorigami shion,2483 +melty blood,2483 +spade (shape),2482 +miyafuji yoshika,2482 +alternate universe,2482 +seashell,2481 +artoria pendragon (lancer) (fate),2481 +erica hartmann,2480 +come hither,2480 +shiranui flare,2479 +crossed ankles,2479 +clitoral stimulation,2479 +heart pillow,2478 +shark,2477 +strap between breasts,2476 +eyepatch bikini,2476 +breast tattoo,2476 +rose print,2475 +mismatched bikini,2475 +coffee cup,2474 +chest sarashi,2474 +m.u.g.e.n,2472 +utau,2471 +mobile suit gundam,2471 +anime coloring,2471 +sleepwear,2470 +pig,2470 +bass guitar,2470 +star ocean,2469 +alternate headwear,2469 +pastry,2468 +exusiai (arknights),2465 +hand on another's hip,2464 +unryuu (kancolle),2463 +brown bag,2462 +red sclera,2459 +wrestling outfit,2458 +shibari over clothes,2458 +fire emblem: genealogy of the holy war,2458 +nipple rings,2454 +cross-shaped pupils,2453 +red gemstone,2452 +locker room,2452 +diagonal bangs,2452 +brown theme,2452 +untied panties,2451 +snowflake print,2451 +lifted by another,2450 +carnelian,2450 +assault lily,2450 +suspension,2449 +nessa (pokemon),2449 +spandex,2447 +koshimizu sachiko,2447 +photo-referenced,2446 +corrin (fire emblem) (female),2446 +bandaid on pussy,2446 +weibo username,2445 +sigh,2445 +blue belt,2445 +raincoat,2442 +g-string,2442 +diarmuid ua duibhne (lancer) (fate),2442 +blade,2441 +multiple horns,2440 +rebecca (keinelove),2439 +hatsuyuki (kancolle),2439 +embers,2439 +skirt around one leg,2438 +clothes in mouth,2437 +orange gloves,2436 +orange choker,2436 +futari wa precure,2436 +final fantasy vi,2435 +two-footed footjob,2433 +mismatched footwear,2433 +2015,2432 +see-through shirt,2431 +sheep girl,2430 +hishikawa rikka,2429 +talons,2425 +shirt removed,2425 +lights,2424 +flame-tipped tail,2424 +anya (spy x family),2423 +chef hat,2421 +warugaki (sk-ii),2420 +arm held back,2420 +shaved ice,2419 +ido (teketeke),2418 +yellow scrunchie,2417 +sweets,2417 +hayami kanade,2417 +naoetsu high school uniform,2416 +head-mounted display,2416 +doll hug,2415 +orgy,2414 +holding panties,2414 +black cardigan,2414 +giving,2413 +walk-in,2412 +cold,2412 +dizzy (guilty gear),2411 +sun symbol,2410 +mallet,2409 +zero suit,2407 +nakano nino,2407 +magazine (object),2407 +frilled kimono,2407 +wa maid,2406 +shiratsuyu (kancolle),2406 +hisahiko,2405 +cigar,2405 +akitsu maru (kancolle),2405 +striped jacket,2404 +green coat,2404 +mika (girls und panzer),2402 +asakura ryouko,2402 +simon (ttgl),2400 +kitauji high school uniform,2399 +horizontal pupils,2399 +calendar (medium),2399 +grey coat,2397 +white robe,2396 +spacesuit,2394 +sitting on face,2394 +skull and crossbones,2392 +mouth drool,2392 +ooarai military uniform,2391 +scar on nose,2390 +ixy,2390 +white theme,2389 +oshiro project,2389 +houjou satoko,2389 +taigei (kancolle),2388 +tokyo ghoul,2387 +scar on arm,2386 +frilled hair tubes,2386 +arm around neck,2386 +typo,2385 +project diva (series),2384 +2017,2384 +ram (re:zero),2383 +checkered scarf,2383 +holding person,2382 +tuxedo,2381 +cum on boy,2380 +bow (bhp),2380 +holding flag,2378 +hand grab,2377 +undersized clothes,2375 +ribbed shirt,2375 +clip studio paint (medium),2375 +wa2000 (girls' frontline),2374 +bloom,2374 +pink bodysuit,2373 +gertrud barkhorn,2373 +nervous smile,2372 +oda nobunaga (fate),2370 +green sweater,2370 +aoki reika,2369 +prone bone,2367 +swirl lollipop,2363 +look-alike,2363 +alice (alice in wonderland),2362 +updo,2361 +uneven sleeves,2361 +text-only page,2361 +cum on stomach,2361 +rice bowl,2360 +puckered lips,2358 +suspender shorts,2357 +kirisawa juuzou,2357 +sword girls,2355 +hairy,2355 +muvluv,2354 +red pupils,2353 +o3o,2351 +callie (splatoon),2349 +sakamata chloe,2348 +dragon quest iii,2348 +don-chan (usada pekora),2348 +toothbrush,2347 +dog boy,2347 +princess,2346 +futami mami,2346 +hand on another's back,2345 +red one-piece swimsuit,2344 +atelier ryza,2342 +tombstone,2341 +bit gag,2341 +rubbing eyes,2339 +open shorts,2339 +kirima syaro,2339 +plastic bag,2338 +super saiyan,2337 +dd (ijigendd),2337 +yoshida yuuko (machikado mazoku),2333 +screaming,2332 +nakano miku,2331 +white feathers,2330 +louise francoise le blanc de la valliere,2330 +gundam seed destiny,2330 +thigh sex,2328 +d-pad,2328 +millipen (medium),2327 +multiple belts,2326 +mohawk,2326 +spotlight,2325 +orc,2325 +convenient arm,2325 +bird girl,2325 +jackal ears,2323 +club (weapon),2323 +nape,2322 +bomber jacket,2322 +hand on another's arm,2321 +shouji,2320 +flower pot,2320 +dark magician girl,2320 +orange theme,2319 +animal penis,2319 +accelerator (toaru majutsu no index),2318 +patterned background,2316 +nichijou,2316 +room,2314 +on lap,2314 +beach towel,2314 +hair spread out,2313 +female pov,2313 +tam o' shanter,2312 +still life,2312 +santa bikini,2311 +power symbol,2310 +carpet,2309 +harusame (kancolle),2308 +carrot necklace,2307 +creator connection,2306 +dusk,2305 +nagisa kaworu,2304 +tied up (nonsexual),2303 +small penis,2303 +black headband,2303 +saratoga (kancolle),2302 +flask,2302 +asphyxiation,2302 +yellow legwear,2301 +egyptian clothes,2301 +tally,2300 +heart tattoo,2300 +hatsuzuki (kancolle),2299 +brown ribbon,2299 +red sash,2296 +cameo,2295 +bakugou katsuki,2295 +phimosis,2293 +iesupa,2291 +hadanugi dousa,2290 +path,2289 +golden kamuy,2289 +talisman,2288 +evening gown,2288 +holding smoking pipe,2287 +makinami mari illustrious,2286 +linea alba,2286 +hexagram,2286 +disembodied head,2286 +pokemon (classic anime),2285 +hamu koutarou,2283 +:i,2283 +tachibana kanade,2281 +smiley face,2281 +number tattoo,2281 +kamijou touma,2280 +saenai heroine no sodatekata,2279 +holding pom poms,2277 +side cutout,2276 +brave witches,2276 +bow bikini,2276 +red pantyhose,2274 +multiple earrings,2274 +diagonal-striped bow,2274 +sunrise,2272 +chihuri,2271 +arcueid brunestud,2271 +kou hiyoyo,2270 +kani biimu,2270 +triangle,2269 +merlin prismriver,2268 +smell,2266 +takao (azur lane),2265 +shana,2265 +faux traditional media,2265 +roman numeral,2264 +futa with male,2264 +steam censor,2263 +shimakaze (kancolle) (cosplay),2261 +police hat,2260 +hill,2260 +condom on penis,2260 +re-class battleship,2259 +broken glass,2259 +curry,2257 +michishio (kancolle),2256 +katsushika hokusai (fate),2256 +monkey tail,2255 +cover image,2255 +mouse (computer),2254 +russian text,2253 +drumsticks,2253 +diadem,2253 +ufo,2251 +ichimi,2251 +hamster,2251 +diving mask,2251 +shinrabanshou,2247 +cyclops,2246 +ushiromiya battler,2245 +purple sleeves,2245 +multicolored legwear,2244 +tree shade,2243 +retrofit (azur lane),2242 +covered abs,2241 +yura (kancolle),2240 +shinryaku! ikamusume,2240 +sink,2238 +pink-framed eyewear,2238 +ranka lee,2237 +marie (splatoon),2237 +the legend of zelda: ocarina of time,2235 +fourth wall,2235 +u-511 (kancolle),2234 +bandaid on cheek,2234 +desert,2233 +tri tails,2230 +surtr (arknights),2230 +blue lips,2230 +hino akane (smile precure!),2229 +shuuchiin academy school uniform,2228 +short sword,2226 +new game!,2226 +green choker,2226 +chainsaw,2226 +lyrica prismriver,2224 +karin (blue archive),2224 +big belly,2223 +trick or treat,2222 +kiana kaslana,2221 +bad hands,2221 +quill,2219 +ice cube,2219 +shared scarf,2218 +aqua bikini,2217 +cyberpunk,2215 +striped socks,2214 +houtengeki,2213 +fur coat,2213 +breast bondage,2213 +kashiwazaki sena,2212 +eraser,2212 +cinderella girls gekijou,2211 +kanna kamui,2210 +anal fingering,2210 +string of flags,2209 +black scrunchie,2209 +tennis uniform,2208 +octarian,2207 +agnes tachyon (umamusume),2207 +scratches,2205 +torch,2204 +strangling,2204 +microdress,2203 +kamado nezuko,2203 +kantoku,2201 +downscaled,2200 +spider web print,2199 +cervix,2199 +beak,2199 +street fighter iv (series),2198 +melting,2198 +kaenbyou rin (cat),2198 +grey sky,2198 +spatula,2197 +mutsuki (kancolle),2196 +isuzu hana,2196 +large tail,2195 +kyonko,2193 +yes! precure 5,2192 +driving,2192 +satchel,2191 +ankle cuffs,2191 +miyu edelfelt,2190 +jellyfish,2190 +finger in another's mouth,2190 +phone screen,2189 +chitanda eru,2186 +tail through clothes,2185 +lana (pokemon),2185 +yahagi (kancolle),2181 +ethan (pokemon),2181 +holding arrow,2179 +hikarizaka private high school uniform,2179 +studded belt,2178 +cliff,2178 +yua (checkmate),2177 +tribadism,2177 +praying,2176 +green cape,2176 +d-pad hair ornament,2176 +torn cape,2175 +shoulder spikes,2175 +rectangular eyewear,2175 +holding sheath,2175 +ribbed legwear,2174 +feather-trimmed sleeves,2174 +yukine chris,2172 +black fur,2171 +sideless outfit,2170 +ojipon,2170 +hand on hilt,2169 +samurai,2168 +ritual baton,2168 +purple pants,2168 +laffey (azur lane),2168 +yuuki makoto,2166 +oni mask,2166 +gimp (medium),2166 +toshinou kyouko,2165 +prayer beads,2165 +enterprise (azur lane),2165 +tippy (gochiusa),2164 +restaurant,2164 +red umbrella,2161 +feather hair,2161 +shading eyes,2160 +keyboard (instrument),2160 +t-head admiral,2159 +calendar (object),2159 +insignia,2158 +haguro (kancolle),2158 +crossed bandaids,2158 +matching outfit,2157 +dirty face,2156 +yelan (genshin impact),2155 +sitting on desk,2155 +magical musket,2153 +bb (baalbuddy),2153 +open collar,2152 +kirigiri kyouko,2152 +fine art parody,2151 +final fantasy iv,2151 +creature and personification,2151 +masao,2150 +amamiya ren,2150 +milk bottle,2149 +sleeping upright,2148 +clothed pokemon,2148 +yaegashi nan,2147 +oshiro project re,2147 +open skirt,2147 +dynamax band,2146 +cave,2144 +caught,2144 +plum blossoms,2143 +mechanical parts,2142 +i-8 (kancolle),2142 +horseback riding,2142 +shiseki hirame,2141 +kabedon,2140 +dessert,2140 +crotchless panties,2140 +puyopuyo,2139 +napoleon bonaparte (fate),2139 +drawing tablet,2139 +tamamo cat (fate),2138 +starter pokemon trio,2138 +akai haato,2135 +final fantasy tactics,2133 +ear tag,2133 +curtain grab,2133 +strapless bra,2132 +cooperative fellatio,2131 +hands on own head,2130 +clitoral hood,2130 +racket,2129 +no eyewear,2129 +final fantasy ix,2129 +doki doki literature club,2129 +black bag,2125 +fur-trimmed legwear,2124 +abukuma (kancolle),2124 +broken horn,2123 +shinku,2122 +nisemonogatari,2122 +liquid,2122 +hanekawa tsubasa,2121 +sweater around waist,2120 +hungry clicker,2120 +spoken anger vein,2117 +houjou hibiki,2117 +kousaka tamaki,2115 +flower (symbol),2115 +akizuki (kancolle),2115 +tone (kancolle),2114 +anzio school uniform,2114 +??,2114 +durarara!!,2112 +black camisole,2112 +when you see it,2111 +smelling,2110 +butterfly print,2110 +;3,2110 +seiken densetsu,2108 +amber (genshin impact),2108 +meitantei conan,2107 +black tail,2106 +town,2103 +shark hair ornament,2102 +futami ami,2102 +blue ascot,2102 +wire,2101 +white nails,2101 +st. gloriana's military uniform,2100 +tailcoat,2099 +kemono friends 3,2099 +colored tongue,2099 +abubu,2098 +dreadlocks,2097 +festival,2096 +enpera,2096 +bare hips,2096 +abstract background,2096 +white cloak,2095 +light bulb,2094 +lace-trimmed dress,2094 +uchiha sasuke,2093 +carrying over shoulder,2093 +arabian clothes,2093 +reisalin stout,2092 +attack,2092 +tankini,2091 +naked coat,2091 +cat ear headphones,2090 +gundam build fighters try,2089 +constellation,2087 +chain necklace,2082 +hooded cape,2081 +graffiti,2081 +hair horns,2080 +single vertical stripe,2079 +atlanta (kancolle),2079 +akagi (azur lane),2079 +white headband,2077 +whistle around neck,2077 +pulled by another,2077 +pulling,2076 +impossible leotard,2076 +white cat,2075 +aida mana,2075 +mega pokemon,2074 +strappy heels,2073 +jeanne d'arc alter (swimsuit berserker) (fate),2073 +blue oak,2073 +blue cardigan,2073 +mini-hakkero,2072 +video camera,2071 +surfboard,2071 +neck tattoo,2071 +rocket launcher,2070 +purple cape,2070 +enoshima junko,2070 +black rose,2069 +two-tone bikini,2068 +striped pants,2068 +plaid bikini,2068 +hews,2068 +church,2068 +budget sarashi,2068 +tie clip,2067 +patterned clothing,2067 +takanashi rikka,2066 +spread wings,2066 +jet,2066 +chinese knot,2066 +two-tone jacket,2065 +ano hi mita hana no namae wo bokutachi wa mada shiranai.,2065 +junketsu,2064 +yoga pants,2063 +spilling,2063 +black apron,2063 +arch,2062 +minecraft,2061 +sweatband,2060 +gran (granblue fantasy),2059 +bolt action,2059 +hammer and sickle,2058 +omelet,2057 +green lips,2057 +> o,2057 +trumpet,2056 +kisaragi (kancolle),2056 +product placement,2055 +standing on liquid,2054 +shokuhou misaki,2054 +yorigami jo'on,2052 +french fries,2052 +vial,2051 +matsuura kanan,2051 +screencap redraw,2050 +evolutionary line,2050 +youkai watch,2048 +twitching penis,2048 +holding condom,2048 +holding sign,2046 +moss,2045 +egg (food),2045 +briefs,2045 +kurosawa dia,2043 +amagi yukiko,2042 +single hair intake,2041 +turban,2039 +god eater,2039 +glowstick,2039 +fleeing,2038 +overlord (maruyama),2037 +belly chain,2037 +instant loss,2036 +gold chain,2035 +yang xiao long,2034 +qiqi (genshin impact),2034 +denim skirt,2033 +wild arms,2032 +nishikigi chisato,2032 +mudrock (arknights),2032 +street fighter zero (series),2031 +manga (object),2030 +akari (pokemon),2030 +guilty crown,2029 +veranda,2028 +kagari atsuko,2028 +covering nipples,2028 +thigh ribbon,2026 +tegaki,2026 +darker than black,2025 +mayuzumi fuyuko,2024 +bb (fate/extra),2024 +pokemon gsc,2023 +nero claudius (swimsuit caster) (fate),2023 +monocle hair ornament,2023 +stage lights,2022 +ia (vocaloid),2022 +hilbert (pokemon),2021 +eren yeager,2021 +azumanga daioh,2021 +gown,2020 +action,2020 +clothes hanger,2019 +white cardigan,2018 +nail,2018 +cat hat,2018 +tail bell,2017 +spiked armlet,2017 +clothes down,2017 +cat print,2017 +battleship princess,2016 +spread fingers,2015 +nib pen (medium),2015 +naked sheet,2014 +ikamusume,2013 +idolmaster dearly stars,2013 +black lips,2010 +kurumi erika,2009 +energy wings,2009 +slugbox,2008 +nonna (girls und panzer),2008 +ankle socks,2008 +akuma homura,2008 +scissor blade,2007 +rowlet,2007 +kal'tsit (arknights),2007 +grey bow,2007 +finger in own mouth,2007 +winged hat,2006 +mop,2006 +mechanical legs,2006 +shako cap,2004 +pool ladder,2004 +perrine h. clostermann,2004 +kappougi,2004 +holding doll,2004 +ueyama michirou,2003 +bowl hat,2003 +aqua skirt,2003 +ump9 (girls' frontline),2002 +purple shorts,2002 +kunikida hanamaru,2002 +hands on headwear,1999 +potato chips,1998 +multicolored wings,1998 +dreaming,1998 +whale,1997 +penis grab,1997 +oversized animal,1996 +minaba hideo,1996 +vertical-striped skirt,1995 +purple scarf,1995 +dust,1995 +reverse upright straddle,1994 +star wars,1993 +spanked,1993 +head grab,1993 +charlotte e. yeager,1993 +vector trace,1991 +shikinami (kancolle),1990 +laurel crown,1990 +see-through legwear,1989 +tusks,1988 +pill,1988 +matara okina,1986 +boris (noborhys),1986 +setsubun,1984 +kamikaze (kancolle),1984 +hand on another's thigh,1983 +fire emblem echoes: shadows of valentia,1983 +false smile,1982 +tongue piercing,1980 +bloodborne,1980 +vocaloid append,1979 +sleep molestation,1979 +akaza akari,1978 +ise (kancolle),1977 +korean clothes,1976 +dyed bangs,1976 +ishiyumi,1975 +impossible bodysuit,1975 +sketchbook,1974 +shuriken,1974 +shiroko (blue archive),1974 +three-dimensional maneuver gear,1973 +raising heart,1973 +long pointy ears,1973 +gusset,1973 +crotch rub,1973 +tamamo no mae (swimsuit lancer) (fate),1972 +hoshizora miyuki,1972 +team rocket,1971 +tales of xillia,1971 +bandaged head,1971 +senjougahara hitagi,1970 +ribbon-trimmed skirt,1970 +fur scarf,1970 +zounose,1969 +sweet potato,1969 +kid icarus,1969 +yu-gi-oh! gx,1967 +fire emblem: the sacred stones,1967 +boxing gloves,1966 +blur censor,1965 +jun'you (kancolle),1963 +two-tone swimsuit,1962 +gambier bay (kancolle),1962 +wind chime,1961 +turtle,1961 +han juri,1961 +nakano yotsuba,1960 +nose bubble,1959 +car interior,1959 +bronya zaychik,1958 +bad proportions,1958 +nengajou,1957 +hands on lap,1957 +teenage,1954 +plunging neckline,1953 +miyuki (kancolle),1953 +feather earrings,1953 +prehensile hair,1952 +child on child,1951 +kujikawa rise,1950 +inflatable toy,1949 +diluc (genshin impact),1949 +mechanical horns,1947 +beamed sixteenth notes,1947 +tsukudani (coke-buta),1946 +gangut (kancolle),1946 +devil may cry (series),1946 +ankle lace-up,1946 +afro,1946 +promare,1945 +flower earrings,1944 +anastasia (fate),1943 +clothed animal,1942 +glowing sword,1941 +baseball,1941 +wife and wife,1940 +taiyaki,1939 +rariatto (ganguri),1939 +poptepipic,1939 +wooden bucket,1938 +two-tone gloves,1938 +pink sleeves,1938 +grimace,1937 +in bucket,1935 +fou (fate),1935 +wreath,1934 +looking at object,1934 +honda mio,1934 +scepter,1933 +symmetry,1932 +dragon's crown,1932 +clothed masturbation,1930 +black feathers,1930 +median furrow,1929 +have to pee,1929 +final fantasy v,1929 +shinki (touhou),1928 +crepe,1928 +warship,1927 +shared umbrella,1927 +fire emblem cipher,1927 +school hat,1925 +comb,1925 +wrestling,1923 +lynette bishop,1923 +kizuna akari,1923 +kaga (azur lane),1923 +aria,1923 +panty (psg),1922 +high contrast,1922 +hassan of serenity (fate),1922 +cube,1922 +birthday cake,1922 +saten ruiko,1921 +picture (object),1921 +clothing request,1920 +wide image,1918 +suiseiseki,1918 +skewer,1918 +on table,1918 +tsuda nanafushi,1917 +raiden mei,1917 +tanuki,1916 +mod3 (girls' frontline),1916 +boxers,1915 +transparent umbrella,1914 +swim briefs,1914 +ohtsuki yui,1914 +pola (kancolle),1913 +shark hood,1911 +ryougi shiki,1910 +octoling,1909 +annotated,1907 +tripping,1906 +purple coat,1906 +drawer,1906 +ryuuou no oshigoto!,1905 +spinning,1904 +lawson,1903 +failure,1903 +cheek pinching,1903 +shino (ponjiyuusu),1902 +gegege no kitarou,1902 +piplup,1901 +borrowed garments,1901 +blue serafuku,1901 +bb (swimsuit mooncancer) (fate),1901 +ribbon-trimmed clothes,1900 +hair through headwear,1899 +between thighs,1898 +flute,1897 +drawfag,1896 +union jack,1893 +barrel,1893 +sangonomiya kokomi,1892 +yu mei-ren (fate),1890 +stray pubic hair,1890 +pillow grab,1890 +corpse,1890 +bunny pose,1890 +squid,1889 +tsunako,1888 +sports bikini,1887 +red sky,1887 +orange kimono,1887 +counter,1886 +clover hair ornament,1886 +turnaround,1885 +galaxy angel,1884 +back tattoo,1884 +sayori (neko works),1883 +la+ darknesss,1883 +tokkuri,1882 +slime (creature),1882 +avatar (ff11),1882 +i-168 (kancolle),1881 +covered face,1881 +lyra (pokemon),1880 +katsuki yuuri,1880 +hex maniac (pokemon),1880 +sonozaki mion,1879 +aoki hagane no arpeggio,1879 +lion,1878 +irisviel von einzbern,1877 +mating press,1875 +utawarerumono,1874 +spider girl,1874 +harvin,1874 +toast,1873 +juice box,1873 +ikkitousen,1873 +cheek poking,1873 +white male underwear,1872 +soap,1870 +avogado6,1869 +playing with own hair,1867 +holding cat,1867 +two-tone bowtie,1866 +face to breasts,1865 +caustics,1864 +hanfu,1863 +cocktail glass,1859 +chaps,1859 +nude filter,1858 +hands on another's head,1858 +black hakama,1858 +kurosawa ruby,1857 +bite mark,1857 +yuugumo (kancolle),1856 +shoebill (kemono friends),1856 +kanikama,1856 +head bump,1855 +gate,1855 +viktor nikiforov,1854 +spear the gungnir,1853 +fu hua,1853 +spider-man (series),1851 +safety pin,1851 +low twin braids,1851 +stone,1850 +porkpie hat,1850 +heart hands duo,1848 +vending machine,1847 +touhou tag dream,1847 +mallow (pokemon),1847 +balcony,1847 +holding basket,1846 +column lineup,1846 +mushoku tensei,1844 +black sports bra,1844 +inkling boy,1843 +polka dot bra,1842 +holding controller,1840 +watashi ni tenshi ga maiorita!,1839 +hand in another's hair,1839 +christa renz,1839 +warcraft,1838 +tatsumaki,1838 +springfield (girls' frontline),1838 +light trail,1838 +akeome,1838 +shadowverse,1837 +nose piercing,1837 +licking nipple,1837 +alarm clock,1837 +volleyball uniform,1834 +vita,1834 +pocky day,1834 +energy ball,1834 +jakuzure nonon,1833 +chart,1833 +daisy,1832 +69,1832 +torn shorts,1831 +grey-framed eyewear,1831 +yellow choker,1829 +makigumo (kancolle),1829 +burning,1829 +sleeveless sweater,1828 +blood stain,1828 +naked cape,1827 +bisexual female,1827 +fried egg,1826 +brown cape,1826 +neptune (neptune series),1825 +nachi (kancolle),1825 +lolita channel,1825 +final fantasy x,1825 +charizard,1825 +under table,1824 +hyur,1824 +squirrel,1823 +macross delta,1823 +head back,1823 +w (arknights),1822 +tissue,1822 +dominatrix,1822 +shoujo kageki revue starlight,1821 +cosmetics,1821 +koyama shigeru,1820 +meltryllis (swimsuit lancer) (fate),1819 +breath of fire,1819 +p-head producer,1818 +diamond-shaped pupils,1818 +orange shorts,1817 +ibaraki douji (fate),1817 +ok sign,1816 +multicolored gloves,1816 +vivio,1815 +industrial pipe,1815 +tsukino mito,1814 +strawberry shortcake,1814 +rainbow order,1814 +mash kyrielight (dangerous beast),1814 +prostitution,1813 +kochou shinobu,1813 +hakos baelz,1813 +bow-shaped hair,1813 +koihime musou,1812 +keychain,1811 +if they mated,1811 +noumi kudryavka,1810 +large hat,1810 +holding paintbrush,1810 +aqua dress,1810 +pointer,1809 +miyamoto frederica,1809 +impregnation,1809 +idolmaster 2,1808 +suzuran (arknights),1807 +pangya,1807 +jack-o' challenge,1807 +checkered necktie,1807 +censored nipples,1807 +mochi au lait,1806 +elizabeth bathory (fate/extra ccc),1806 +orange pekoe (girls und panzer),1805 +train station,1804 +striped sleeves,1804 +snake tail,1804 +circle cut,1804 +lace bra,1803 +bound ankles,1803 +watering can,1801 +purple sweater,1801 +silence suzuka (umamusume),1800 +nintendo,1800 +lemon slice,1800 +ujimatsu chiya,1798 +shrimp,1798 +nico robin,1798 +homurahara academy school uniform,1798 +checkered kimono,1798 +pokemon journeys,1797 +traffic light,1795 +sofra,1795 +whisk,1794 +tsukishiro saika,1794 +rabbit house uniform,1793 +sakura miku,1792 +pearl (gemstone),1792 +earpiece,1792 +choukai (kancolle),1792 +cyberpunk (series),1791 +kitsunerider,1790 +against tree,1790 +aldnoah.zero,1789 +senran kagura shoujo-tachi no shin'ei,1788 +shared food,1787 +looking over eyewear,1787 +to love-ru darkness,1786 +hairdressing,1786 +rug,1785 +tedeza rize,1784 +magatama necklace,1783 +scar on forehead,1782 +mirko,1782 +finger to own chin,1782 +fluffy,1781 +flower wreath,1781 +drill,1781 +clover print,1781 +mysterious heroine xx (fate),1780 +akagi miria,1780 +skull earrings,1779 +nice nature (umamusume),1779 +harem outfit,1779 +amagi brilliant park,1779 +two-tone ribbon,1777 +lyn (fire emblem),1777 +jeanne d'arc alter santa lily (fate),1777 +yellow bra,1776 +orange scarf,1776 +harem,1776 +:x,1776 +tape gag,1774 +parted hair,1774 +hara (harayutaka),1774 +johnny joestar,1773 +blake belladonna,1773 +multicolored bodysuit,1772 +happy tears,1771 +brendan (pokemon),1771 +board game,1771 +sleeping on person,1770 +sesshouin kiara,1770 +in food,1770 +dixie cup hat,1769 +chasing,1769 +mostima (arknights),1768 +footprints,1768 +star choker,1767 +sidesaddle,1767 +phantom blood,1767 +you're doing it wrong,1766 +m4 carbine,1765 +saki achiga-hen,1764 +refrigerator,1764 +maru-yu (kancolle),1764 +lysithea von ordelia,1764 +ceiling light,1764 +capcom,1762 +multicolored swimsuit,1761 +pale color,1760 +nagatsuki (kancolle),1760 +eye mask,1760 +bandaid on arm,1760 +plaid necktie,1758 +yuzuriha inori,1757 +love plus,1757 +nero claudius (bride) (fate),1754 +monkey,1754 +akamatsu kaede,1754 +melonbooks,1753 +identity censor,1753 +color guide,1753 +soul eater,1752 +fewer digits,1752 +muvluv alternative,1751 +gatling gun,1751 +android 18,1751 +strawberry print,1749 +railroad tracks,1749 +green gemstone,1749 +bomb,1749 +tennis racket,1748 +katsuragi (kancolle),1747 +destruction,1747 +on grass,1746 +faucet,1746 +mizuki makoto,1745 +cloud print,1745 +stripper pole,1744 +crossbow,1744 +beltbra,1744 +shinjou akane,1743 +poncho,1742 +hitachi magic wand,1742 +loose clothes,1740 +sinoalice,1739 +kenzaki makoto,1739 +cafe,1739 +condom packet strip,1738 +air,1738 +atelier ryza 1,1736 +spoken sweatdrop,1735 +minami (colorful palette),1735 +pond,1734 +lilith aensland,1734 +cockpit,1734 +takara miyuki,1733 +cracked skin,1733 +persona 3 portable,1732 +dirty clothes,1732 +kin-iro mosaic,1731 +nia (xenoblade),1730 +gloved handjob,1730 +dragon quest iv,1728 +butler,1728 +aqua bowtie,1727 +haikyuu!!,1726 +grey socks,1725 +bardiche,1725 +anti-materiel rifle,1725 +urusei yatsura,1723 +tomose shunsaku,1723 +ginhaha,1723 +hina (blue archive),1722 +doyagao,1722 +alternate form,1722 +mima (touhou),1721 +holding syringe,1721 +body hair,1721 +akatsuki kirika,1721 +taisa (kari),1720 +index (toaru majutsu no index),1720 +senpai ga uzai kouhai no hanashi,1719 +sakamoto mio,1719 +hiro (darling in the franxx),1719 +spitting,1718 +dove,1718 +looking at phone,1717 +birdcage,1717 +cheating (relationship),1716 +yowane haku,1715 +gojou satoru,1714 +fishing,1714 +bandage on face,1714 +drone,1713 +mabinogi,1711 +full nelson,1711 +tsunomaki watame,1710 +sinon,1710 +love live! superstar!!,1710 +koizumi itsuki,1710 +champagne flute,1710 +rosalina,1708 +deviantart username,1708 +shima rin,1707 +after kiss,1707 +tama (kancolle),1706 +aqua ribbon,1706 +aegis (persona),1706 +multicolored bikini,1705 +straitjacket,1704 +shinomiya kaguya,1704 +saiguchi otoufu,1704 +no wings,1704 +alley,1704 +thick arms,1702 +phosphophyllite,1702 +nanashi (nlo),1702 +pepperoni (girls und panzer),1701 +javelin (azur lane),1701 +mace,1700 +lucky beast (kemono friends),1700 +futanari masturbation,1700 +winged helmet,1699 +selfcest,1699 +ink,1699 +morpeko,1698 +purple sky,1697 +last order (toaru majutsu no index),1697 +holding hammer,1697 +noshiro (kancolle),1696 +brown sailor collar,1695 +mixed media,1692 +kyoukaisenjou no horizon,1692 +yellow vest,1691 +tachibana hibiki (symphogear),1691 +impossible dress,1691 +mask pull,1690 +teruzuki (kancolle),1689 +spider,1689 +belko,1689 +sparks,1688 +holding drink,1688 +surprise kiss,1687 +sonia (pokemon),1687 +hilda valentine goneril,1687 +yayoi (kancolle),1684 +enkyo yuuichirou,1684 +purple sailor collar,1683 +frilled capelet,1683 +tickling,1682 +morikubo nono,1681 +freediving,1681 +briefcase,1681 +mimikaki,1680 +cowboy western,1680 +suzukaze (kancolle),1679 +leg belt,1679 +dolphin,1679 +artificial vagina,1679 +variations,1678 +pussy juice puddle,1678 +sticker,1677 +roller skates,1677 +pain,1677 +hands on own thighs,1677 +white horns,1676 +plectrum,1676 +on person,1675 +open bra,1673 +mizuno ami,1673 +fellatio gesture,1673 +pole dancing,1672 +circle name,1672 +alice gear aegis,1672 +tube,1671 +gorget,1671 +brushing hair,1671 +missile,1670 +milkpanda,1670 +hooves,1670 +voyakiloid,1669 +shin megami tensei,1668 +rotom,1667 +eye focus,1667 +eastern dragon,1667 +crescent facial mark,1667 +character print,1667 +blue apron,1667 +yume nikki,1666 +black lagoon,1666 +torpedo tubes,1665 +rubble,1665 +breast curtain,1665 +barcode tattoo,1665 +tanabata,1664 +sitting in tree,1664 +agawa ryou,1664 +kaedehara kazuha,1663 +frilled ribbon,1663 +overskirt,1662 +bird on hand,1662 +kannagi,1661 +deer ears,1661 +yellow-framed eyewear,1660 +the legend of luo xiaohei,1660 +sen no kiseki,1660 +light areolae,1660 +skull print,1659 +pocky kiss,1658 +hanamura yousuke,1658 +fine fabric emphasis,1658 +buck teeth,1658 +shinsengumi,1657 +gotland (kancolle),1657 +pripara,1655 +kuromorimine school uniform,1655 +kirino ranmaru,1655 +signum,1654 +miyo (ranthath),1654 +patch,1653 +makoto nanaya,1653 +implied futanari,1653 +holly,1653 +kuroshio (kancolle),1652 +confession,1652 +hisona (suaritesumi),1651 +breasts on head,1651 +beam rifle,1651 +avatar (series),1651 +yuuji (and),1650 +shirayuki (kancolle),1650 +andou (girls und panzer),1650 +dark souls i,1649 +asashio kai ni (kancolle),1648 +adjusting gloves,1648 +x x,1647 +song name,1647 +pokemon go,1647 +breast smother,1647 +pokemon xy (anime),1646 +mattaku mousuke,1646 +high-waist pants,1646 +wooden sword,1645 +hanasaki tsubomi,1645 +fanny pack,1645 +cabinet,1645 +pink hoodie,1644 +yuureidoushi (yuurei6214),1643 +yasogami school uniform,1643 +thighhighs pull,1643 +my little pony,1643 +legwear garter,1643 +noel vermillion,1642 +helm,1642 +chiyoda momo,1641 +asui tsuyu,1641 +brown capelet,1640 +super robot,1639 +see-through leotard,1639 +puffy shorts,1639 +free!,1639 +swing,1638 +nail art,1638 +through clothes,1637 +red socks,1637 +coral,1637 +yellow thighhighs,1636 +tales of graces,1635 +dragon ball (classic),1633 +cousins,1633 +inuyasha,1632 +wa (genryusui),1631 +red armor,1631 +dress tug,1631 +scarlet devil mansion,1630 +reflective floor,1629 +holostars,1629 +matou kariya,1628 +kizuna ai inc.,1628 +holding pencil,1628 +confused,1628 +tangzhuang,1627 +shiomi syuko,1627 +seiken densetsu 3,1627 +m-da s-tarou,1627 +producer (idolmaster cinderella girls anime),1626 +kill me baby,1625 +aerial fireworks,1625 +worried,1624 +toga himiko,1624 +star halo,1624 +gundam tekketsu no orphans,1624 +very long fingernails,1622 +oda nobunaga (koha-ace),1622 +leopard ears,1622 +world of warcraft,1621 +zeta gundam,1620 +tracer (overwatch),1620 +spring (season),1620 +see-through dress,1620 +naked jacket,1620 +baseball uniform,1619 +ak-12 (girls' frontline),1619 +st. louis (azur lane),1618 +saber lily,1618 +matsuno karamatsu,1618 +\||/,1618 +pokemon on head,1617 +biker clothes,1617 +seigaiha,1616 +polka dot dress,1615 +radio antenna,1614 +upscaled,1613 +menu,1613 +cross scar,1613 +kasumi (doa),1612 +insect wings,1612 +flexing,1612 +white suit,1611 +starry sky print,1611 +luigi's mansion,1611 +tokyo 7th sisters,1610 +pink one-piece swimsuit,1610 +after paizuri,1610 +n (pokemon),1609 +puchimasu!,1608 +tarot,1607 +shiomi kotone,1607 +hair in mouth,1607 +german clothes,1607 +palms,1606 +grabbing another's chin,1606 +asashimo (kancolle),1606 +aino minako,1606 +kalashnikov rifle,1605 +incoming gift,1605 +strapless shirt,1604 +patchwork skin,1604 +lucario,1604 +sugar lyric,1603 +plaid background,1603 +beni shake,1603 +otonashi kotori,1602 +in the face,1602 +hair brush,1602 +breasts on glass,1602 +sumiyao (amam),1601 +ofuda on clothes,1601 +lio fotia,1601 +fortissimo,1601 +fate/extella,1601 +darkness (konosuba),1601 +ribbon bondage,1600 +leaning on object,1600 +seiran (touhou),1599 +naginata,1599 +lip piercing,1599 +cyberpunk edgerunners,1599 +black robe,1599 +autobot,1599 +monsterification,1598 +kizuna ai,1598 +lord el-melloi ii case files,1597 +xenoblade chronicles 3,1596 +ningguang (genshin impact),1596 +komi-san wa komyushou desu,1596 +cucumber,1596 +2014,1596 +shimazaki mujirushi,1595 +minamino kanade,1595 +matsuno jyushimatsu,1595 +marker,1595 +dowsing rod,1595 +o-ring choker,1594 +beckoning,1593 +my-hime,1592 +magatama earrings,1592 +yae sakura,1591 +horikawa raiko,1591 +hataraku saibou,1591 +green-framed eyewear,1591 +sword art online: code register,1590 +pencil dress,1590 +moroboshi kirari,1590 +messy,1590 +untucked shirt,1589 +stuck,1588 +skin fangs,1588 +star twinkle precure,1587 +moon (ornament),1587 +matsuno ichimatsu,1587 +dodoco (genshin impact),1587 +matsuno osomatsu,1586 +high-waist shorts,1586 +cherry blossom print,1586 +tokusatsu,1585 +drill locks,1585 +inoue takina,1584 +barbell piercing,1584 +red hoodie,1583 +milk carton,1583 +greatsword,1583 +leaf (pokemon),1582 +poke ball symbol,1581 +tales of the abyss,1580 +kamiya nao,1580 +chocolate bar,1580 +chain leash,1580 +pink vest,1579 +torn,1578 +suzumiya haruhi no shoushitsu,1578 +hanyuu,1578 +horned helmet,1577 +role reversal,1576 +teasing,1575 +other focus,1575 +mimura kanako,1575 +crotch plate,1575 +claw (weapon),1575 +star necklace,1574 +bondage outfit,1574 +unconscious,1573 +oumae kumiko,1573 +dragon quest v,1573 +projected inset,1572 +lum,1572 +hachimiya meguru,1571 +tactical clothes,1570 +blinds,1570 +arashio (kancolle),1570 +lounge chair,1569 +hugtto! precure,1569 +gabriel dropout,1569 +fish girl,1569 +embellished costume,1569 +white sash,1568 +mechanical pencil,1568 +sendai kai ni (kancolle),1567 +wheel,1566 +heckler & koch,1566 +hand on thigh,1566 +faux figurine,1566 +rensouhou-kun,1565 +jinx (league of legends),1565 +chandelier,1565 +red cloak,1564 +frozen (disney),1564 +holstered weapon,1563 +senjou no valkyria (series),1562 +ayanami (kancolle),1562 +wooden wall,1561 +tamako market,1561 +leotard pull,1560 +hakurei reimu (cosplay),1560 +color connection,1560 +artoria pendragon (lancer alter) (fate),1560 +sorcerer's sutra scroll,1559 +ha akabouzu,1559 +felicia (vampire),1559 +yorha no. 9 type s,1558 +suzutsuki (kancolle),1558 +single pauldron,1558 +peeking,1558 +sendai hakurei no miko,1557 +polo shirt,1557 +gem (symbol),1557 +danganronpa 3 (anime),1556 +crease,1556 +health bar,1555 +bikini top removed,1555 +bad drawr id,1555 +neocoill,1554 +hand on own leg,1554 +creayus,1554 +two-handed,1553 +souseiseki,1553 +sakata gintoki,1553 +latex bodysuit,1553 +isuzu (kancolle),1553 +shikigami,1552 +park bench,1552 +hikimayu,1552 +ensemble stars!,1552 +yu-gi-oh! 5d's,1551 +yoimiya (genshin impact),1551 +toosaka asagi,1551 +stiletto heels,1551 +magi the labyrinth of magic,1551 +elite ii (arknights),1551 +bamboo broom,1551 +robin (fire emblem) (female),1550 +misumi nagisa,1550 +lusamine (pokemon),1548 +leg hold,1548 +watatsuki no yorihime,1547 +sakurai momoka,1547 +old woman,1547 +kinugasa (kancolle),1547 +nishizumi shiho,1546 +german text,1546 +agano (kancolle),1545 +yamamoto souichirou,1544 +ushiwakamaru (fate),1544 +sunburst background,1544 +plaid bowtie,1544 +dagashi kashi,1543 +bishamonten's pagoda,1543 +japanese crested ibis (kemono friends),1542 +i-class destroyer,1542 +vshojo,1541 +holding leash,1541 +groin tendon,1541 +cupless bra,1541 +striped gloves,1540 +princess king boo,1540 +fireflies,1539 +large insertion,1538 +icho private high school uniform,1538 +holding whip,1538 +video game,1537 +green hoodie,1537 +cuddling,1537 +hagoita,1536 +suppressor,1535 +rappa (rappaya),1535 +viera,1534 +red cross,1534 +gold earrings,1534 +bowing,1534 +yellow cardigan,1533 +spiked tail,1533 +leafa,1533 +futa with futa,1532 +ushanka,1531 +spine,1531 +gold hairband,1531 +checkerboard cookie,1530 +graveyard,1529 +taut dress,1528 +staff (music),1528 +mars symbol,1528 +etna (disgaea),1528 +manhattan cafe (umamusume),1527 +konoshige (ryuun),1527 +has censored revision,1527 +different reflection,1527 +circled 9,1527 +orange legwear,1525 +special week (umamusume),1524 +rosehip (girls und panzer),1524 +northern white-faced owl (kemono friends),1524 +lipstick tube,1524 +choko (cup),1524 +shallow water,1522 +fingers,1522 +little red riding hood,1521 +instrument case,1521 +pentacle,1520 +handsfree ejaculation,1520 +bear hair ornament,1520 +holding sack,1519 +hojo karen,1518 +holding stick,1517 +cowlick,1517 +reach-around,1515 +visible air,1514 +mei (overwatch),1514 +hexagon,1514 +fff threesome,1514 +eagle,1514 +twitter,1513 +single gauntlet,1513 +ohara mari,1513 +naked hoodie,1513 +cosplay photo,1513 +head between breasts,1512 +yuuhagi (amaretto-no-natsu),1511 +strap-on,1511 +fox boy,1511 +ribbon-trimmed collar,1510 +gensou suikoden,1510 +ramune,1509 +notepad,1509 +neon lights,1509 +libeccio (kancolle),1509 +crazy smile,1509 +ushiromiya ange,1508 +monokuma,1508 +mahjong,1508 +kusazuri,1508 +hirasawa ui,1508 +power suit,1507 +wisteria,1506 +harukawa maki,1506 +aqua theme,1506 +wing hair ornament,1505 +whipped cream,1505 +puffy detached sleeves,1505 +nontraditional playboy bunny,1505 +tulip hat,1504 +pageratta,1503 +ghost in the shell,1503 +office,1502 +itadori yuuji,1502 +gamagoori ira,1502 +chanta (ayatakaoisii),1501 +wolf boy,1500 +funami yui,1500 +saren (princess connect!),1499 +pacifier,1499 +perineum,1498 +painting (action),1498 +musashi kai ni (kancolle),1498 +mikazuki munechika,1498 +angelina (arknights),1498 +xenosaga,1497 +sakugawa school uniform,1495 +eternity larva,1495 +edward elric,1495 +ayu (mog),1495 +kirigaya suguha,1494 +japanese flag,1494 +hatsune miku (append),1494 +collared cape,1494 +beach chair,1494 +bayonetta (series),1494 +matsuryuu,1493 +tokisaki kurumi,1492 +sword over shoulder,1492 +love letter,1492 +fallen down,1492 +pussy piercing,1490 +morpeko (full),1490 +medium dress,1490 +levi (shingeki no kyojin),1489 +kissing forehead,1489 +green neckerchief,1489 +simplified chinese text,1488 +oshida (girls und panzer),1488 +mega man x (series),1488 +kawakaze (kancolle),1488 +ceres fauna,1487 +pawpads,1485 +kagura (gintama),1485 +artoria pendragon (alter swimsuit rider) (fate),1485 +monkey d. luffy,1484 +gundam unicorn,1484 +power (chainsaw man),1483 +earth (ornament),1483 +dinosaur,1483 +animal feet,1483 +fate/unlimited codes,1482 +blueberry,1482 +skyla (pokemon),1481 +rotational symmetry,1481 +kujou sara,1481 +stationary restraints,1480 +satou kazuma,1480 +bremerton (scorching-hot training) (azur lane),1480 +sincos,1479 +nilou (genshin impact),1479 +gengar,1479 +arm between breasts,1478 +air conditioner,1478 +honolulu (azur lane),1477 +tonda,1476 +truth,1475 +pani poni dash!,1475 +raimon,1474 +morgan le fay (fate),1474 +mismatched pupils,1474 +mascot,1474 +ribbon in mouth,1473 +kibito high school uniform,1473 +tall female,1472 +sweatshirt,1472 +spiked shell,1472 +mother (game),1472 +kirakira precure a la mode,1472 +hooded sweater,1472 +widowmaker (overwatch),1471 +nyantype,1471 +centaur,1471 +tail censor,1469 +sakura taisen,1468 +has downscaled revision,1468 +candlestand,1468 +ramlethal valentine,1467 +frilled cuffs,1467 +caution tape,1467 +holding needle,1466 +green apron,1466 +caressing testicles,1466 +uzaki-chan wa asobitai!,1465 +thigh cutout,1465 +kamui gakupo,1465 +clothed female nude female,1465 +age progression,1465 +a certain high school uniform,1465 +tera online,1464 +playstation portable,1464 +tiger boy,1463 +petting,1463 +white-framed eyewear,1462 +tsumiki mikan,1462 +tohru (maidragon),1462 +francesca lucchini,1462 +boned meat,1461 +title,1460 +tan background,1460 +stuffed shark,1460 +purple heart,1460 +dengeki moeou,1460 +cotton candy,1460 +camilla (fire emblem),1460 +yd (orange maru),1459 +truck,1459 +reindeer costume,1459 +onmyoji,1459 +presenting armpit,1458 +kasugano sakura,1458 +sanshoku dango,1457 +kiyosumi school uniform,1457 +isonami (kancolle),1457 +ebi 193,1457 +cure peace,1457 +bendy straw,1457 +leonardo da vinci (fate),1456 +heart tail,1456 +blood from eyes,1456 +overgrown,1455 +yang guifei (fate),1454 +kishibe rohan,1454 +garden,1454 +dog days,1454 +holding lantern,1453 +2013,1453 +kashuu kiyomitsu,1452 +fish hair ornament,1452 +kemono friends v project,1451 +karin (bunny) (blue archive),1451 +hand on another's ass,1451 +aqua shirt,1451 +yellow skin,1450 +shirase sakuya,1450 +chibi usa,1450 +hand puppet,1449 +grey necktie,1449 +ashiya douman (fate),1449 +yagasuri,1448 +matsuno choromatsu,1448 +diamond button,1448 +fushiguro megumi,1447 +berserk,1447 +palette (object),1446 +vegeta,1445 +plant girl,1445 +flame print,1445 +toranoana,1444 +scene reference,1444 +santa dress,1443 +arm wrap,1443 +ymir (shingeki no kyojin),1442 +sports car,1442 +spiked club,1442 +null (nyanpyoun),1442 +halberd,1442 +buzz cut,1442 +head chain,1441 +viewer holding leash,1440 +palutena,1440 +okamisty,1440 +magnifying glass,1440 +katahira masashi,1440 +eureka seven (series),1440 +drying,1440 +camcorder,1440 +pink blood,1439 +tukiwani,1438 +puffy pants,1437 +martha (fate),1437 +helicopter,1437 +chocolate on body,1437 +reference inset,1436 +hanging,1436 +katori (kancolle),1435 +ilya kuvshinov,1435 +black sash,1434 +agahari,1434 +xenoblade chronicles 1,1433 +untying,1433 +poke ball print,1433 +nijigasaki academy school uniform,1433 +mouth veil,1433 +fiery hair,1433 +the moon studio,1432 +buttjob,1431 +artoria pendragon (swimsuit ruler) (fate),1431 +used tissue,1430 +uiharu kazari,1430 +tina branford,1430 +penis outside,1430 +omaru polka,1430 +holding shoes,1430 +2010,1430 +wiping tears,1429 +morning,1429 +asakura toru,1429 +2012,1429 +invisible penis,1428 +sitting on object,1427 +fujibayashi kyou,1427 +my little pony: friendship is magic,1426 +horns through headwear,1426 +vibrator under panties,1425 +tight shirt,1425 +striped kimono,1425 +silver fox (kemono friends),1424 +oral invitation,1424 +holding innertube,1424 +spanking,1423 +santa boots,1423 +ringo (touhou),1423 +mechanization,1423 +enemy aircraft (kancolle),1423 +donation box,1423 +spirit,1422 +purple one-piece swimsuit,1422 +makizushi,1422 +fangs out,1422 +back-seamed legwear,1422 +myoukou (kancolle),1421 +multiple condoms,1420 +weibo logo,1419 +ovum,1418 +handkerchief,1418 +butcha-u,1418 +shirt in mouth,1417 +mikage takashi,1417 +kaeya (genshin impact),1417 +final fantasy xv,1417 +timestamp,1416 +bowl cut,1416 +ass cutout,1416 +alternate skin color,1416 +nyantcha,1415 +holding clipboard,1415 +column,1415 +batman (series),1415 +ar tonelico,1415 +tree stump,1414 +skadi the corrupting heart (arknights),1414 +side-tie leotard,1413 +grey scarf,1413 +dirndl,1412 +cropped vest,1412 +open window,1411 +crane (machine),1411 +pixiv id,1410 +as109,1410 +ruffling hair,1409 +final fantasy viii,1409 +inazuma eleven go chrono stone,1408 +expressive hair,1407 +bear girl,1407 +jeanne d'arc (swimsuit archer) (fate),1406 +tashkent (kancolle),1405 +non-humanoid robot,1405 +narrowed eyes,1405 +mole on neck,1405 +lipstick mark,1405 +konjiki no yami,1405 +inflation,1405 +thigh pouch,1404 +elsa (frozen),1404 +cle masahiro,1403 +shiba inu,1402 +neck,1402 +molestation,1402 +helena blavatsky (fate),1402 +spread toes,1401 +party hat,1401 +jersey,1401 +hino rei,1401 +show by rock!!,1400 +jessie (pokemon),1400 +pectoral grab,1399 +noire (neptune series),1399 +yotsubato!,1398 +window shade,1398 +ragna the bloodedge,1398 +plaid headwear,1398 +midna,1398 +jirai kei,1398 +sonozaki shion,1397 +raised fist,1397 +ib,1397 +curtsey,1397 +yuri lowell,1396 +twilight (spy x family),1396 +onmyouji,1396 +harem pants,1396 +marie antoinette (fate),1395 +hand on another's stomach,1395 +pavement,1394 +panda ears,1394 +gym storeroom,1394 +gamepad,1394 +black mask,1394 +tsukikage yuri,1393 +popuko,1393 +le malin (azur lane),1393 +anti (untea9),1393 +after ejaculation,1393 +utility belt,1392 +skyline,1392 +pet play,1392 +goblin slayer!,1392 +yu-gi-oh! arc-v,1391 +kamio misuzu,1391 +fujima takuya,1391 +osakabe-hime (fate),1390 +kemomimi-chan (naga u),1390 +grey wolf (kemono friends),1390 +grey fur,1390 +doughnut hair bun,1390 +bloop (gawr gura),1390 +ribbed bodysuit,1389 +leather belt,1389 +bayonetta,1389 +takeuchi takashi,1388 +takafumi,1388 +fukumaru koito,1388 +excessive pubic hair,1388 +higashi setsuna,1387 +satono diamond (umamusume),1386 +ito noizi,1386 +mysterious heroine x alter (fate),1385 +expressive clothes,1385 +triforce,1384 +tk8d32,1384 +nian (arknights),1384 +mizumoto tadashi,1384 +baguette,1384 +suzuhira hiro,1383 +omurice,1383 +niwatazumi,1383 +monkey ears,1383 +symboli rudolf (umamusume),1382 +h&k hk416,1382 +\o/,1382 +spell card,1381 +haruhisky,1381 +yuffie kisaragi,1380 +scathach skadi (fate),1380 +index fingers together,1380 +ezo red fox (kemono friends),1380 +brown kimono,1380 +sitting on stairs,1379 +matsuno todomatsu,1379 +jervis (kancolle),1379 +endeavor (boku no hero academia),1379 +charlotte dunois,1379 +beige sweater,1379 +alternate weapon,1378 +treasure chest,1377 +stone lantern,1377 +radio,1376 +mcdonald's,1376 +madotsuki,1376 +furukawa nagisa,1376 +failure penguin,1376 +claw ring,1376 +summon night,1375 +polka dot swimsuit,1375 +pointless condom,1375 +brushing teeth,1375 +sangvis ferri,1374 +furutaka (kancolle),1374 +jeanne d'arc alter (ver. shinjuku 1999) (fate),1373 +39,1373 +orange thighhighs,1372 +oguri cap (umamusume),1372 +micro panties,1371 +jean pierre polnareff,1371 +elesa (pokemon),1371 +konno junko,1370 +fake facial hair,1370 +hidden eyes,1369 +hayashimo (kancolle),1369 +eyewear on headwear,1369 +takodachi (ninomae ina'nis),1368 +ishikei,1368 +redesign,1367 +people,1367 +mega man legends,1367 +coca-cola,1367 +yamato-no-kami yasusada,1366 +nanairogaoka middle school uniform,1365 +nagi no asukara,1365 +excessive cum,1365 +tatsumi kanji,1364 +takoyaki,1362 +pink sailor collar,1362 +natsume rin,1362 +open pants,1361 +nishi kinuyo,1361 +mahou girls precure!,1361 +dakimakura (object),1361 +orange sailor collar,1360 +holding helmet,1360 +eromame,1360 +carrying under arm,1359 +tapir tail,1358 +propeller,1358 +looking at animal,1358 +cardfight!! vanguard,1358 +yellow wings,1357 +mamiya (kancolle),1357 +lisa (genshin impact),1357 +kanji,1357 +kamina (ttgl),1357 +hoshi syoko,1357 +holding brush,1357 +di gi charat,1357 +alpha transparency,1357 +red-tinted eyewear,1356 +palm leaf,1356 +fujiwara chika,1355 +countdown,1355 +black buruma,1355 +ajirogasa,1355 +tiki (fire emblem),1354 +bear print,1354 +turtleneck dress,1353 +nanasaki ai,1353 +male playboy bunny,1353 +imu sanjo,1353 +yaoyorozu momo,1352 +oversized shirt,1352 +cow boy,1352 +bulma,1352 +2011,1352 +tribal,1351 +short jumpsuit,1351 +sakura futaba,1351 +arcana heart,1351 +yuusha de aru,1350 +goat ears,1350 +eureka seven,1350 +baseball mitt,1350 +kami nomi zo shiru sekai,1349 +glomp,1349 +finger to face,1349 +yoshikawa chinatsu,1348 +globe,1348 +talking on phone,1347 +cupping hands,1347 +niiko (gonnzou),1346 +mihono bourbon (umamusume),1346 +happi,1346 +halloween bucket,1346 +foot up,1346 +saria (arknights),1345 +gundam zz,1345 +takamaki anne,1344 +sidepec,1344 +priest,1344 +fighter jet,1344 +byleth (fire emblem) (male),1344 +forehead-to-forehead,1343 +yuno (hidamari sketch),1342 +shiromanta,1342 +momosuzu nene,1342 +kris (pokemon),1342 +kawashina (momen silicon),1342 +murakumo kai ni (kancolle),1341 +impaled,1341 +elin,1341 +shantae (series),1340 +albedo (genshin impact),1340 +cat teaser,1339 +shin guards,1338 +masturbation through clothes,1338 +venus symbol,1337 +stomach cutout,1337 +little red riding hood (grimm),1337 +leg between thighs,1337 +kamoi (kancolle),1336 +ark royal (kancolle),1336 +tate no yuusha no nariagari,1335 +spread pussy under clothes,1335 +no testicles,1335 +mahou shoujo lyrical nanoha vivid,1335 +glitch,1335 +blue headband,1335 +to heart,1334 +lotion bottle,1334 +hauchiwa,1334 +blonde pubic hair,1334 +aaaa (quad-a),1334 +ultra series,1333 +pink pants,1333 +mizuhara aki,1333 +idol clothes,1333 +quiz magic academy,1332 +overcoat,1332 +oshawott,1332 +hand on eyewear,1332 +brown necktie,1332 +sakuma mayu,1331 +italian flag,1330 +hand on breast,1330 +alice margatroid (pc-98),1330 +aldehyde,1330 +saga,1328 +naked kimono,1328 +mixing bowl,1328 +debt,1328 +screen,1327 +cutesexyrobutts,1327 +vividred operation,1326 +novel cover,1326 +lucifer (helltaker),1324 +holding handheld game console,1324 +diving mask on head,1324 +tsukioka kogane,1323 +tales of symphonia,1323 +specter (arknights),1323 +sheet music,1323 +roll (mega man),1323 +twincest,1322 +ichikawa feesu,1322 +snorkel,1321 +reiuji utsuho (bird),1320 +pillbox hat,1320 +m4a1 (girls' frontline),1320 +yes-no pillow,1319 +komi shouko,1319 +death note,1319 +kimono skirt,1318 +incoming food,1318 +gyee,1318 +u.a. school uniform,1317 +stadium,1317 +prosthetic leg,1317 +holding leaf,1317 +stone floor,1316 +pantyhose under shorts,1316 +clock tower,1316 +katekyo hitman reborn!,1315 +red hood,1314 +production art,1314 +nike,1314 +kicchou yachie,1314 +black theme,1314 +twitching,1313 +squirrel girl,1313 +incoming attack,1313 +april fools,1313 +yukishiro honoka,1312 +tsukumo benben,1312 +tropical drink,1312 +ribbed sleeves,1312 +pokemon dppt (anime),1312 +jako (jakoo21),1312 +cloth gag,1312 +. .,1312 +haiyore! nyaruko-san,1311 +nekotoufu,1310 +mayano top gun (umamusume),1310 +flip phone,1310 +asanagi,1310 +silver (pokemon),1309 +fertilization,1309 +ryu (street fighter),1308 +rainbow gradient,1308 +pokemon platinum,1308 +mishima kurone,1308 +jaguar ears,1308 +flower request,1308 +abigail williams (swimsuit foreigner) (fate),1308 +saku usako (rabbit),1307 +onikobe rin,1307 +kettle,1307 +stab,1306 +potion,1306 +imizu (nitro unknown),1306 +dei shirou,1306 +school briefcase,1305 +dido (azur lane),1305 +umikaze (kancolle),1304 +pokemoa,1304 +crosswalk,1304 +bodice,1304 +shirosato,1303 +okita souji alter (fate),1303 +myoudouin itsuki,1303 +lube,1303 +drawing bow,1303 +humiliation,1302 +slapping,1301 +kaguya luna,1301 +sailor moon redraw challenge (meme),1300 +hoshiguma (arknights),1300 +dragon ball fighterz,1300 +under night in-birth,1299 +shining (series),1299 +plaid panties,1299 +grey bra,1299 +thrusters,1298 +intravenous drip,1298 +cato (monocatienus),1298 +orange slice,1297 +hagikaze (kancolle),1297 +frapowa,1297 +nekomusume,1296 +morino rinze,1296 +sonic the hedgehog,1295 +grey border,1295 +food-themed clothes,1295 +cheering,1295 +log,1294 +lize helesta,1294 +joseph joestar (old),1294 +princess principal,1293 +orange ascot,1293 +jojo pose,1293 +elio (pokemon),1293 +multicolored bow,1292 +godzilla (series),1292 +full-body tattoo,1292 +cagliostro (granblue fantasy),1292 +bruno bucciarati,1292 +blowing kiss,1292 +nori tamago,1291 +broken chain,1291 +artoria caster (fate),1291 +sekina,1290 +print bra,1290 +plaid pants,1290 +homare (fool's art),1290 +stone wall,1289 +kuwayama chiyuki,1289 +fumio (rsqkr),1289 +breathing fire,1289 +an-94 (girls' frontline),1289 +karaagetarou,1288 +chest belt,1288 +brown-framed eyewear,1288 +7th dragon (series),1288 +origami,1287 +oouso,1287 +komano aunn,1287 +k-suwabe,1287 +little boy admiral (kancolle),1286 +lily of the valley,1286 +leather gloves,1286 +murakami suigun,1285 +haramura nodoka,1285 +frottage,1285 +nippleless clothes,1284 +negom,1284 +wooden table,1283 +oomuro sakurako,1283 +nishi koutarou,1283 +gilles de rais (caster) (fate),1283 +soccer,1282 +cleaning,1282 +alchemy stars,1282 +tamura yuri,1281 +strike witches: suomus misfits squadron,1281 +scooter,1281 +poker chip,1281 +kino makoto,1281 +yuuka (blue archive),1280 +tsurumaru kuninaga,1280 +totoki airi,1280 +tail grab,1280 +ranma-chan,1280 +marina (splatoon),1280 +blanc (neptune series),1280 +sengoku nadeko,1279 +cow,1279 +yes,1277 +sweatpants,1277 +blue eyeshadow,1277 +tail between legs,1276 +stream,1276 +platform heels,1276 +horosuke,1276 +unitard,1275 +theresa apocalypse,1275 +my-otome,1275 +makuwauri,1275 +g11 (girls' frontline),1275 +full-package futanari,1275 +yuyushiki,1274 +goblin,1274 +steampunk,1273 +fumihiko (fu mihi ko),1273 +wrist guards,1272 +wakabayashi toshiya,1272 +ike (fire emblem),1272 +bianca (pokemon),1272 +onee-loli,1271 +harry potter (series),1271 +.live,1271 +shouhou (kancolle),1270 +riesz,1270 +hiroki (yyqw7151),1270 +gekkan shoujo nozaki-kun,1270 +hatsune miku (cosplay),1269 +bird on shoulder,1269 +natsuki subaru,1268 +twisted wonderland,1267 +tohno akiha,1267 +sleeveless hoodie,1267 +dr rex,1266 +blue fur,1266 +striped shorts,1265 +reflective water,1265 +panty straps,1265 +ninomiya asuka,1265 +multiple sources,1265 +mechanical tail,1265 +gloves removed,1265 +cup ramen,1265 +world flipper,1264 +torn panties,1264 +shoelaces,1264 +ootsuki wataru,1264 +dragon quest xi,1264 +torn pantyhose,1263 +prisma illya,1263 +pokedex number,1263 +hand net,1263 +etorofu (kancolle),1263 +clitoris piercing,1263 +camellia,1262 +altera (fate),1262 +deer,1261 +android 21,1261 +stove,1260 +single strap,1260 +respirator,1260 +ornate ring,1260 +bad perspective,1260 +u u,1259 +tokiko (touhou),1259 +pom pom hair ornament,1259 +m4 sopmod ii (girls' frontline),1259 +kilye kairi,1259 +chest strap,1259 +partially underwater shot,1258 +higuchi kaede,1258 +hands on feet,1258 +hamaya,1258 +su-san,1257 +purple tail,1257 +mimikyu,1257 +wooden fence,1256 +mithra (ff11),1256 +maturiuta sorato,1255 +cat on head,1255 +bridget (guilty gear),1255 +kurokoma saki,1254 +hakama pants,1254 +balancing,1254 +alphes (style),1254 +happoubi jin,1253 +e.o.,1253 +doraemon,1253 +yokochou,1252 +street fighter iii (series),1252 +oreki houtarou,1252 +yano toshinori,1251 +green flower,1251 +gaoo (frpjx283),1251 +new school swimsuit,1250 +kasumi kai ni (kancolle),1250 +tail wrap,1249 +shinkai no valkyrie,1249 +scorbunny,1249 +akizuki ryo,1249 +sponge,1248 +nishida satono,1248 +aru (blue archive),1248 +arashi (kancolle),1248 +ankle strap,1248 +veiny breasts,1247 +shinda sekai sensen uniform,1247 +noelle (genshin impact),1247 +kaamin (mariarose753),1247 +eggplant,1247 +yordle,1246 +polka dot legwear,1246 +werewolf,1245 +pink collar,1245 +bad end,1244 +sobu high school uniform,1243 +phonograph,1243 +cooperative paizuri,1243 +black pubic hair,1243 +traditional clothes,1242 +hana kazari,1242 +atalanta (fate),1242 +the legend of zelda: skyward sword,1241 +roronoa zoro,1241 +lossy-lossless,1241 +fletcher (kancolle),1241 +beatmania,1241 +st ar-15 (girls' frontline),1240 +pink bag,1240 +lace-trimmed sleeves,1240 +koyanskaya (fate),1240 +blue sash,1240 +black male underwear,1240 +tales of zestiria,1239 +latex gloves,1239 +imminent fellatio,1239 +desk lamp,1239 +arch bishop (ragnarok online),1239 +miyamoto musashi (swimsuit berserker) (fate),1238 +honma meiko,1238 +electrokinesis,1238 +brown hairband,1238 +wattson (apex legends),1237 +kimi no na wa.,1237 +bookmark,1237 +bolo tie,1237 +red bandana,1236 +diana cavendish,1236 +diagonal-striped necktie,1236 +animal ear legwear,1236 +shower (place),1235 +severed head,1235 +head on another's shoulder,1235 +fur-trimmed skirt,1235 +alisa ilinichina amiella,1235 +super mario galaxy,1234 +panties on head,1234 +kujou karasuma,1234 +sana channel,1233 +nanase nao,1233 +lord camelot (fate),1233 +himemori luna,1233 +kirin (armor),1232 +hourai doll,1232 +jojolion,1231 +cure marine,1231 +kadotani anzu,1230 +skateboard,1229 +single pantsleg,1229 +mole on ass,1229 +dangle earrings,1229 +yellow hoodie,1228 +saint seiya,1228 +kuma (persona 4),1228 +kourindou tengu costume,1228 +nepgear,1227 +natori sana,1227 +murasaki shikibu (fate),1227 +gold armor,1227 +german flag,1227 +breast conscious,1227 +namazuo toushirou,1226 +boots removed,1226 +araragi koyomi,1226 +maikaze (kancolle),1225 +blue horns,1225 +bandaid on hand,1225 +pokemon on shoulder,1224 +kasane teto,1224 +minna-dietlinde wilcke,1223 +gyro zeppeli,1223 +enemy lifebuoy (kancolle),1223 +undone necktie,1222 +sunazuka akira,1222 +multicolored shirt,1222 +high kick,1222 +furry with furry,1222 +frilled socks,1221 +todoroki shouto,1220 +schwarz (arknights),1220 +picnic basket,1220 +marianne von edmund,1220 +usashiro mani,1219 +paper airplane,1219 +hachikuji mayoi,1219 +katawa shoujo,1218 +eyebrow piercing,1218 +tablecloth,1217 +jacket partially removed,1217 +blue poison (arknights),1217 +pote (ptkan),1216 +hand on own shoulder,1216 +frederica bernkastel,1216 +clearite,1216 +z23 (azur lane),1215 +keroro gunsou,1215 +ivan karelin,1215 +siege (arknights),1214 +piers (pokemon),1214 +lapels,1214 +grey kimono,1214 +ohisashiburi,1213 +koharu (blue archive),1213 +chainmail,1213 +age regression,1213 +shinai,1212 +shin jigen game neptune vii,1212 +hat tip,1212 +2009,1212 +whispering,1211 +runes,1211 +high school fleet,1211 +engiyoshi,1211 +animal bag,1211 +perpendicular paizuri,1210 +ijiranaide nagatoro-san,1210 +gundam build divers,1210 +friends,1210 +sunscreen,1209 +doctor,1209 +club (shape),1209 +tokitarou (fate),1208 +ooshio (kancolle),1208 +kigurumi,1208 +cocktail dress,1208 +yellow pupils,1207 +soup,1207 +sasaki chie,1207 +no gloves,1207 +nearly naked apron,1207 +nanakusa suzuna,1207 +kimono pull,1207 +beidou (genshin impact),1207 +teamwork,1206 +shokugeki no souma,1206 +skull mask,1205 +shortstack,1205 +sex machine,1205 +sailor mercury,1205 +orange neckerchief,1205 +curry rice,1205 +aqua panties,1205 +playstation controller,1204 +mitsumoto jouji,1204 +kagamihara nadeshiko,1204 +honebami toushirou,1204 +sheya,1203 +lace-trimmed skirt,1203 +koruri,1203 +kimi kiss,1203 +kasumi (skchkko),1203 +hoshino fumina,1203 +flashback,1203 +flash,1203 +colorado (kancolle),1203 +slap mark,1202 +ryona,1202 +pants rolled up,1201 +misaki kurehito,1201 +kirijou mitsuru,1201 +gofu,1201 +odd one out,1200 +metal gear solid,1200 +hop (pokemon),1200 +duffel coat,1200 +dark penis,1200 +no game no life,1199 +gweda,1199 +stroking own chin,1198 +pravda school uniform,1198 +mery (yangmalgage),1198 +stepped on,1197 +sonya (kill me baby),1197 +richelieu (kancolle),1197 +impossible swimsuit,1197 +silica,1196 +kousaka reina,1196 +green bodysuit,1196 +unya,1195 +teruterubouzu,1195 +athletic leotard,1195 +teireida mai,1194 +shiny legwear,1194 +pokemon sv,1194 +goma (gomasamune),1194 +dust cloud,1194 +decensored,1194 +tohsaka tokiomi,1193 +turtleneck leotard,1192 +robin (fire emblem) (male),1192 +real world location,1192 +pie,1192 +peeping,1192 +caren hortensia,1192 +warhammer 40k,1191 +sausage,1191 +grey bikini,1191 +bulbasaur,1191 +sona (league of legends),1190 +plaid neckwear,1190 +papakha,1190 +molten rock,1190 +maebara keiichi,1190 +koto inari,1190 +nagi (kannagi),1189 +girlfriend (kari),1189 +burnt clothes,1189 +brown bikini,1189 +bad nijie id,1189 +utano,1188 +sobble,1188 +ranni the witch,1188 +kero,1188 +tomoeda elementary school uniform,1187 +gathers,1187 +furutani himawari,1187 +bartender,1187 +midare toushirou,1186 +daidouji tomoyo,1186 +alpaca suri (kemono friends),1186 +wiping face,1185 +sick,1185 +shirobako,1185 +purple capelet,1185 +mawaru penguindrum,1185 +harp,1185 +fingering through clothes,1185 +wading pool,1184 +kudamaki tsukasa,1184 +deerstalker,1184 +christmas lights,1184 +short sidetail,1183 +dead or alive 5,1183 +busou shinki,1183 +snivy,1182 +slime (genshin impact),1182 +meito (maze),1182 +flustered,1182 +cellphone picture,1182 +bath yukata,1182 +watermelon bar,1181 +sunflower hair ornament,1181 +amanogawa kirara,1181 +tent,1180 +mismatched pubic hair,1180 +kanabou,1180 +eurasian eagle owl (kemono friends),1179 +carrying person,1179 +acrylic paint (medium),1179 +sensei (blue archive),1178 +hand on lap,1178 +excalibur morgan (fate),1178 +angel and devil,1178 +yomu (sgt epper),1177 +two-tone legwear,1177 +ikuchan kaoru,1177 +futaba channel,1177 +fake mustache,1177 +yopparai oni,1176 +striped sweater,1176 +ladybug,1176 +naegi makoto,1175 +ta-class battleship,1174 +split screen,1174 +twitter logo,1173 +zen (kamuro),1172 +wheelchair,1171 +suspended congress,1171 +holographic interface,1171 +dot mouth,1171 +da capo,1171 +asymmetrical horns,1171 +wig,1170 +naked bandage,1170 +milking machine,1169 +dissidia final fantasy,1169 +sandals removed,1168 +medea (fate),1168 +himura kiseki,1168 +hawaiian shirt,1168 +year of the rat,1167 +yabuki kentarou,1167 +super robot wars original generation,1167 +oyashio (kancolle),1167 +oikawa shizuku,1167 +magical mirai (vocaloid),1167 +inset,1167 +fireplace,1167 +ciel (tsukihime),1167 +butterfly on hand,1167 +blue collar,1167 +winged footwear,1166 +two-sided cape,1166 +shimada fumikane,1166 +masukuza j,1166 +cheek bulge,1166 +ribbed leotard,1165 +pyrokinesis,1165 +karukan (monjya),1163 +blender (medium),1163 +tiered tray,1162 +pink pupils,1162 +living clothes,1162 +leg armor,1162 +emoji,1162 +ema,1162 +turning head,1161 +tulip,1161 +spoken character,1161 +sparkler,1161 +rook (chess),1161 +pipimi,1161 +on bench,1161 +nekopara,1161 +nahida (genshin impact),1161 +machine,1161 +fake wings,1161 +animal hug,1161 +on vehicle,1160 +native american,1160 +tempura,1159 +ru-class battleship,1159 +open robe,1159 +digimon adventure,1159 +chitose (kancolle),1159 +cheek squash,1159 +ankh,1159 +sidelighting,1158 +seishun buta yarou,1158 +pasta,1158 +giving up the ghost,1158 +final fantasy xii,1158 +yunamaro,1157 +yellow bodysuit,1156 +va-11 hall-a,1156 +sekai seifuku: bouryaku no zvezda,1156 +kamiyama high school uniform (hyouka),1156 +hands on another's cheeks,1156 +uvula,1155 +crate,1155 +tengu,1154 +igarashi futaba (shiromanta),1154 +bruise on face,1154 +monika (doki doki literature club),1153 +ferris wheel,1153 +denji (chainsaw man),1153 +asamura hiori,1153 +single fingerless glove,1152 +jjune,1152 +irys (hololive),1152 +anila (granblue fantasy),1152 +torn scarf,1151 +sento isuzu,1151 +large testicles,1151 +cum on self,1151 +baltimore (azur lane),1151 +fake antlers,1150 +potato,1149 +opening door,1149 +kanroji mitsuri,1149 +dairi,1149 +belt boots,1149 +kururugi suzaku,1148 +hooded track jacket,1148 +constellation print,1148 +black leggings,1148 +kesa,1147 +iphone,1147 +cum on hands,1147 +pink coat,1146 +full-length zipper,1146 +fountain,1146 +sword art online: alicization,1145 +open belt,1145 +laser,1145 +champion uniform,1145 +assam (girls und panzer),1145 +art brush,1145 +two-tone hairband,1144 +snail,1144 +ramchi,1144 +penis size difference,1144 +leather boots,1144 +black wristband,1144 +aqua headwear,1144 +abe nana,1144 +through wall,1143 +see-through skirt,1143 +nanatsu no taizai,1143 +large wings,1143 +chest of drawers,1143 +stitched face,1142 +hitting,1142 +arm blade,1142 +alpaca ears,1142 +nisekoi,1141 +load bearing vest,1141 +kagami hirotaka,1141 +saniwa (touken ranbu),1140 +crystal hair,1140 +zeta (granblue fantasy),1139 +tensei shitara slime datta ken,1139 +shantae,1139 +asutora,1139 +wraith (apex legends),1138 +shidare hotaru,1138 +seele vollerei,1138 +candy hair ornament,1138 +bubukka,1138 +neckwear grab,1137 +m16a1 (girls' frontline),1137 +ipod,1137 +honeycomb (pattern),1137 +holding pillow,1137 +tropical-rouge! precure,1136 +swd3e2,1136 +ryuuko no ken,1136 +modeus (helltaker),1136 +behind another,1136 +kusanagi motoko,1135 +bus stop,1135 +brown panties,1135 +breast expansion,1135 +white mage,1134 +gladion (pokemon),1134 +ballerina,1134 +voyeurism,1133 +tanaka kusao,1133 +hooded cardigan,1133 +cum on penis,1133 +bow earrings,1133 +bishop (chess),1133 +aa megami-sama,1133 +tima,1132 +nagatoro hayase,1132 +headwear request,1132 +boruto: naruto next generations,1132 +they had lots of sex afterwards (meme),1131 +hand on shoulder,1131 +green (pokemon),1131 +plate armor,1130 +park,1130 +leopard tail,1130 +kid icarus uprising,1130 +james (pokemon),1130 +fairy knight tristan (fate),1130 +theft,1129 +nikorashi-ka,1129 +chalk,1129 +anilingus,1129 +world trigger,1128 +rydia (ff4),1128 +kamille (vcx68),1128 +artoria pendragon (alter swimsuit rider) (second ascension) (fate),1128 +wrestling ring,1127 +urushihara satoshi,1127 +super sentai,1127 +optionaltypo,1127 +mikuma (kancolle),1127 +marie (girls und panzer),1127 +check copyright,1127 +utensil in mouth,1126 +hands on ass,1126 +gym,1126 +dream c club (series),1126 +lizard tail,1125 +i-26 (kancolle),1125 +battle axe,1125 +waero,1124 +ready to draw,1124 +newhalf,1124 +grey ribbon,1124 +fur-trimmed cloak,1124 +bikesuit,1124 +yukinoshita yukino,1123 +vibrator in thighhighs,1123 +silver trim,1123 +meow (nekodenki),1123 +kuromiya,1123 +kohaku (tsukihime),1123 +flashlight,1123 +emperor penguin (kemono friends),1123 +yuigahama yui,1122 +orange panties,1122 +monster hunter rise,1122 +laundry,1122 +island,1122 +heartbeat,1122 +chikuma (kancolle),1122 +suzuki toto,1121 +dandon fuga,1121 +bow (music),1121 +lettuce,1120 +kamado tanjirou,1120 +hair ears,1120 +victor (pokemon),1119 +touhoku kiritan,1119 +minami-ke,1119 +kitasan black (umamusume),1119 +ketchup,1119 +furrification,1119 +dark aura,1119 +ammunition belt,1119 +zara (azur lane),1118 +unicorn,1118 +uchikake,1118 +makai senki disgaea,1118 +hoshino (blue archive),1118 +archery,1118 +tsuruya,1117 +fumizuki (kancolle),1117 +asagumo (kancolle),1117 +aoshima,1117 +yui (angel beats!),1116 +isshiki (ffmania7),1116 +hatsukaze (kancolle),1116 +final fantasy xiii,1116 +petals on liquid,1115 +jaguar (kemono friends),1115 +dorohedoro,1115 +disgust,1115 +beatmania iidx,1115 +sideways,1114 +minamoto sakura,1114 +hatsushimo (kancolle),1114 +haniyasushin keiki,1114 +fuuin no tsue,1114 +bai lao shu,1114 +mismatched sleeves,1113 +ikeuchi tanuma,1113 +.hack//,1113 +huang baoling,1112 +frankenstein's monster (fate),1112 +digitigrade,1112 +bat ears,1112 +weapon request,1111 +selen tatsuki,1111 +red oni,1111 +motherly,1111 +satin,1110 +sakuramon,1110 +triangle print,1109 +shuffle!,1109 +ruler,1109 +cropped shoulders,1109 +konohagakure symbol,1108 +slayers,1107 +nu-13,1107 +marvel cinematic universe,1107 +grimoire,1107 +greyscale with colored background,1107 +boo (mario),1107 +shards,1106 +overcast,1106 +orange fur,1106 +lace-trimmed gloves,1106 +hidefu kitayan,1106 +hickey,1106 +orange pantyhose,1105 +namori,1105 +momozono love,1105 +kazenokaze,1105 +akitsuki karasu,1105 +soda bottle,1104 +riding pokemon,1104 +pod (nier automata),1104 +gift bag,1104 +fiery horns,1104 +commander (girls' frontline),1104 +queen (chess),1103 +noai nioshi,1103 +mizuno ai,1102 +hakui koyori,1102 +boudica (fate),1102 +yin (darker than black),1101 +sailor venus,1101 +ootachi,1101 +french text,1101 +yukito (dreamrider),1100 +off-shoulder bikini,1100 +green sleeves,1100 +evangelion: 3.0+1.0 thrice upon a time,1100 +drum (container),1100 +cactus,1100 +spacezin,1099 +oven mitts,1099 +oryo (oryo04),1099 +implied yuri,1099 +iizuki tasuku,1099 +gradient legwear,1099 +gakkou gurashi!,1099 +futa on male,1098 +lalafell,1097 +izumi-no-kami kanesada,1097 +grey sleeves,1097 +eyewear strap,1097 +torpedo launcher,1096 +satou kibi,1096 +kawashima momo,1096 +closers,1096 +blue armor,1096 +round window,1095 +legs over head,1095 +shirt grab,1094 +ribbon-trimmed dress,1094 +guiding hand,1094 +cat day,1094 +blend s,1094 +ballet slippers,1093 +bad food,1093 +littorio (kancolle),1092 +archived source,1092 +vodka (umamusume),1091 +vaulting horse,1091 +luggage,1091 +battle rifle,1091 +back focus,1091 +torn jeans,1090 +rising sun,1090 +pink pantyhose,1090 +mechanical ears,1090 +ichigo mashimaro,1090 +eyebrow cut,1090 +adjusting legwear,1090 +yanfei (genshin impact),1089 +test plugsuit,1089 +striped one-piece swimsuit,1089 +niijima makoto,1089 +moisture (chichi),1089 +holding water gun,1089 +enkidu (fate),1089 +comparison,1089 +yoshi tama,1088 +stuffed winged unicorn,1088 +rance (series),1088 +low neckline,1088 +high priest (ragnarok online),1088 +akairiot,1088 +abuse,1088 +yamashita shun'ya,1087 +sundae,1087 +shared bathing,1087 +rias gremory,1087 +g gundam,1087 +coffin,1087 +upshorts,1086 +toilet use,1086 +nemoto hina,1086 +multiple monochrome,1086 +dream c club,1086 +consensual tentacles,1086 +compass rose halo,1086 +squirtle,1085 +soap censor,1085 +minamoto no raikou (swimsuit lancer) (fate),1085 +flock,1085 +weight conscious,1084 +tsukuyomi shirabe,1084 +title parody,1084 +chat log,1084 +berserker (fate/zero),1084 +tharja (fire emblem),1083 +knees apart feet together,1083 +honey,1083 +boar,1083 +bird mask,1083 +strap lift,1082 +karakai jouzu no takagi-san,1082 +armin arlert,1082 +shiina yuika,1081 +salaryman,1081 +patterned,1081 +fubuki (one-punch man),1081 +climbing,1081 +yamashiro (azur lane),1080 +slaine troyard,1080 +shell bikini,1080 +passionlip (fate),1080 +non non biyori,1080 +kako (kancolle),1080 +dumbbell,1080 +buruma pull,1080 +liquid hair,1079 +holding balloon,1079 +guido mista,1079 +chimney,1079 +6 (yuchae),1079 +tanzaku,1078 +shihouin yoruichi,1078 +leona heidern,1078 +heart-shaped lock,1078 +covering ass,1078 +sailor mars,1077 +hawks (boku no hero academia),1077 +fubuki kai ni (kancolle),1077 +collared coat,1077 +ammunition,1077 +tokimeki memorial,1076 +tamaki iroha,1076 +split ponytail,1076 +pixiv,1076 +pennant,1076 +haru urara (umamusume),1076 +doitsuken,1076 +clog sandals,1076 +watatsuki no toyohime,1075 +user interface,1075 +red neckwear,1075 +nowaki (kancolle),1075 +mask around neck,1075 +lucas (pokemon),1075 +keizoku military uniform,1075 +butterfly net,1075 +aestus estus,1075 +wall of text,1074 +low tied hair,1074 +hayasui (kancolle),1074 +chocolate on breasts,1074 +yuuki yuuna wa yuusha de aru,1073 +weighing scale,1073 +rita rossweisse,1073 +odin sphere,1073 +mouth pull,1073 +lycoris uniform,1073 +drawing sword,1073 +steering wheel,1072 +gradient clothes,1072 +anchor print,1072 +kitchen knife,1071 +swim cap,1070 +saunders military uniform,1070 +mechanical hands,1070 +luigi,1070 +looking at breasts,1070 +glowing wings,1070 +cure sunshine,1070 +b-ginga,1070 +zara (kancolle),1069 +spoken object,1069 +pharah (overwatch),1069 +hoshizuki (seigetsu),1069 +birthmark,1069 +xuangzang sanzang (fate),1068 +torn swimsuit,1068 +lpip,1068 +ginkgo leaf,1068 +dark green hair,1068 +cooler,1068 +champagne,1068 +c:,1068 +back-print panties,1068 +ass support,1068 +ashford academy uniform,1068 +washing,1067 +uncommon stimulation,1067 +screwdriver,1067 +hoshimiya ichigo,1067 +castlevania,1067 +cait aron,1067 +spreader bar,1066 +onbashira,1066 +karna (fate),1066 +dice hair ornament,1066 +iris (pokemon),1065 +cum on pussy,1065 +reindeer,1064 +pinching sleeves,1064 +legs folded,1064 +light censor,1063 +hanetsuki,1063 +striped horns,1062 +decepticon,1062 +bow choker,1062 +pouty lips,1061 +no lineart,1061 +jimiko,1061 +irida (pokemon),1061 +button eyes,1061 +anal hair,1061 +princess daisy,1060 +violet evergarden (series),1059 +master sword,1059 +horse penis,1059 +fukuzawa yumi,1059 +fish print,1059 +uryuu ryuunosuke,1058 +nakamura yuri,1058 +mahou shoujo ikusei keikaku,1058 +torn leotard,1056 +nata (tool),1056 +haro,1056 +cure beauty,1056 +carpaccio (girls und panzer),1056 +candy wrapper,1056 +tearing clothes,1055 +phallic symbol,1055 +oribe yasuna,1055 +k/da (league of legends),1055 +drum set,1055 +bow legwear,1055 +borrowed design,1055 +pink wings,1054 +mikeou,1054 +alternate language,1054 +morning glory,1053 +kyouka (princess connect!),1053 +joutouguu mayumi,1053 +tennis ball,1052 +holding game controller,1052 +ashido mina,1052 +angel (kof),1052 +yukie (kusaka shi),1051 +picnic,1051 +orange-tinted eyewear,1051 +lighter,1051 +beans,1051 +arrow through heart,1051 +sabrina (pokemon),1050 +eu03,1050 +dropping,1050 +caenis (fate),1049 +sky-freedom,1048 +oyari ashito,1048 +medb (fate),1048 +katou asuka,1048 +hizaka,1048 +fume,1048 +blue tongue,1048 +blade & soul,1048 +arisu (blue archive),1048 +aria company uniform,1048 +3others,1048 +purple fire,1047 +"don't say ""lazy""",1047 +clothes,1047 +bede (pokemon),1047 +wrinkled skin,1046 +sword behind back,1046 +reines el-melloi archisorte,1046 +puppet strings,1046 +america (hetalia),1046 +sanji (one piece),1045 +kuavera,1045 +johnston (kancolle),1045 +fake cover,1045 +cum in clothes,1045 +breastfeeding,1045 +ange katrina,1045 +wakizashi,1044 +uchuu senkan yamato,1044 +squidbeak splatoon,1044 +medjed (fate),1044 +galaxy expedition team survey corps uniform,1044 +eve (elsword),1044 +cum on feet,1044 +chi-chi (dragon ball),1044 +brown bodysuit,1044 +azura (fire emblem),1044 +kashino (azur lane),1043 +gurande (g-size),1043 +doily,1043 +artificial eye,1043 +umbreon,1042 +cutting board,1042 +bicorne,1042 +ama mitsuki,1042 +cyndaquil,1041 +yuuki mikan,1040 +magical mirai miku,1040 +guitar case,1040 +angora rabbit,1040 +yuzuki choco,1039 +tricorne,1039 +tomoe hotaru,1039 +rachel alucard,1039 +holding legs,1039 +brown bowtie,1039 +reze (chainsaw man),1038 +loungewear,1038 +heart button,1038 +fishnet top,1038 +dusk (arknights),1038 +bus,1038 +suzuhara lulu,1037 +strawberry hair ornament,1037 +pill earrings,1037 +mole above mouth,1037 +hooded robe,1037 +hairjob,1037 +comforting,1037 +pearl bracelet,1036 +fishnet gloves,1036 +duster,1036 +detexted,1036 +brown socks,1036 +very long sleeves,1035 +rex (xenoblade),1035 +hisakawa hayate,1035 +cure blossom,1035 +colonel aki,1035 +thai text,1034 +sylveon,1034 +newtype,1034 +mochizuki (kancolle),1034 +hand on forehead,1034 +yoshio (55level),1033 +party popper,1033 +key visual,1033 +combat boots,1033 +awa,1033 +yellow apron,1032 +training bra,1032 +shironeko project,1032 +grocery bag,1032 +aquarion (series),1032 +survey corps (emblem),1031 +puckered anus,1031 +pink hakama,1031 +forked tongue,1031 +cheek press,1031 +plaid jacket,1030 +millia rage,1030 +holding baseball bat,1030 +hand to head,1030 +elira pendora,1030 +telescope,1029 +subaru nakajima,1029 +sleeveless turtleneck leotard,1029 +scaramouche (genshin impact),1029 +licking another's face,1029 +ban! (bansankan),1029 +after rape,1029 +tsunade (naruto),1028 +smokestack hair ornament,1028 +katou megumi,1028 +imminent anal,1028 +shichimenchou,1027 +jumpy dumpty,1027 +summer festival,1026 +headless,1026 +detective,1026 +miyamizu mitsuha,1025 +inuyama aoi,1025 +healin' good precure,1025 +bored,1025 +asteroid ill,1025 +grey neckerchief,1024 +eroe,1024 +soda,1023 +round image,1023 +chongyun (genshin impact),1023 +sharp toenails,1022 +spit take,1021 +scathach (swimsuit assassin) (fate),1021 +hisakawa nagi,1021 +head down,1021 +erect clitoris,1021 +blue sclera,1021 +arjuna (fate),1021 +shoulder strap,1020 +pilot,1020 +kureiji ollie,1020 +kazanari tsubasa,1020 +glaceon,1020 +denim jacket,1020 +camouflage jacket,1020 +vore,1019 +seo tatsuya,1019 +sanageyama uzu,1019 +messenger bag,1019 +tonee,1018 +poke ball theme,1018 +oohashi high school uniform,1018 +holding jacket,1018 +eiri (eirri),1018 +cross print,1018 +winry rockbell,1017 +tooth necklace,1017 +kazama iroha,1017 +fkey,1017 +estellise sidos heurassein,1017 +brown cloak,1017 +yarn,1016 +xingqiu (genshin impact),1016 +tepig,1016 +stealth sex,1016 +lute (instrument),1016 +katsuragi misato,1016 +inui toko,1016 +hospital bed,1016 +snack,1015 +kars (jojo),1015 +guided breast grab,1015 +grey nails,1015 +fate/strange fake,1015 +egg hair ornament,1015 +blue cloak,1015 +tsurumaki maki,1014 +necktie grab,1014 +loose bowtie,1014 +horikawa kunihiro,1014 +6+others,1014 +upshirt,1013 +radiation symbol,1013 +prehensile tail,1013 +kos-mos,1013 +hokuto no ken,1013 +grey belt,1013 +campfire,1013 +yin yang orb,1012 +tada riina,1012 +pink socks,1012 +outstretched leg,1012 +assassin's creed (series),1012 +kongou kai ni (kancolle),1011 +wetsuit,1010 +vertical-striped pantyhose,1010 +tokino sora,1010 +liz to aoi tori,1010 +basketball uniform,1010 +mirai nikki,1009 +final fantasy vii advent children,1009 +bunny-shaped pupils,1009 +paw print background,1008 +motorcycle helmet,1008 +kaneki ken,1008 +extra mouth,1008 +shuujin academy uniform,1007 +nintendo ds,1007 +clapping,1007 +torn gloves,1006 +tonfa,1006 +thermometer,1006 +pointy footwear,1006 +otokuyou,1006 +kamikita komari,1006 +jokanhiyou,1006 +mauve,1005 +komaki manaka,1005 +cis (carcharias),1005 +the legend of zelda: a link between worlds,1004 +tantou,1004 +sorceress (dragon's crown),1004 +roma (kancolle),1004 +rocket,1004 +popplio,1004 +panzerkampfwagen iv,1004 +noh mask,1004 +kishinami hakuno (female),1004 +rabbit yukine,1003 +poster (medium),1003 +love live! school idol festival all stars,1003 +ice skates,1003 +diaper,1003 +single horizontal stripe,1002 +sakiyamama,1002 +kaijuu,1002 +hair flip,1002 +dragalia lost,1002 +accidental exposure,1002 +player 2,1001 +nursery rhyme (fate),1001 +mutsuki (blue archive),1001 +multiple braids,1001 +majo no tabitabi,1001 +getsuyoubi no tawawa,1001 +choufu shimin,1001 +walkie-talkie,1000 +st. gloriana's (emblem),1000 +niconico,1000 +nanachi (made in abyss),1000 +kotonoha akane,1000 +charmander,1000 +body freckles,1000 +red apron,999 +prinz eugen (unfading smile) (azur lane),998 +niichi (komorebi-palette),998 +holding stylus,998 +canvas (object),998 +belly grab,998 +asa no ha (pattern),998 +maruki (punchiki),997 +hot dog,997 +cure black,996 +crocodilian tail,996 +clarisse (granblue fantasy),996 +brown leotard,996 +white tail,995 +ushiromiya maria,995 +soul worker,995 +frisk (undertale),995 +elezen,995 +bombergirl,995 +under kotatsu,994 +taimanin asagi,994 +kitashirakawa tamako,994 +fantia reward,994 +rosaria (genshin impact),993 +pearl (splatoon),993 +abigail williams (traveling outfit) (fate),993 +yellow leotard,992 +star of david,992 +cocktail,992 +albedo (overlord),992 +akali,992 +sand sculpture,991 +king hassan (fate),991 +kashiwamochi yomogi,991 +cowboy boots,991 +mizunashi akari,990 +tokyo ghoul:re,989 +multiple straps,989 +frog print,989 +covering one eye,989 +akkijin,989 +catstudioinc (punepuni),988 +takagi-san,987 +snake hair,987 +sakimichan,987 +marth (fire emblem),987 +holding scissors,987 +chan co,987 +breast focus,987 +tamamo cross (umamusume),986 +rectangular mouth,986 +in cup,986 +ib (ib),986 +glowing horns,986 +bulletproof vest,986 +broken weapon,986 +applying makeup,986 +asaya minoru,985 +ugly man,984 +tsukumo sana,984 +ryoji (nomura ryouji),984 +pentagon (railgun ky1206),984 +naked overalls,984 +looking at mirror,984 +kugisaki nobara,984 +goma (yoku yatta hou jane),984 +ferry (granblue fantasy),984 +asian,984 +voice actor,983 +serizawa asahi,983 +rokuwata tomoe,983 +kazagumo (kancolle),983 +good end,983 +eyjafjalla (arknights),983 +embodiment of scarlet devil,983 +crayon,983 +violet evergarden,982 +suzukaze aoba,982 +sora (kingdom hearts),982 +kooh,982 +goat girl,982 +fushigi no umi no nadia,982 +disney,982 +choujigen game neptune,982 +mitake ran,981 +furigana,981 +colored shadow,981 +arisugawa natsuha,981 +7010,981 +vampire costume,980 +maoyuu maou yuusha,980 +heart balloon,980 +corn,980 +berry,980 +troll face,979 +cure march,979 +blue overalls,979 +yuri plisetsky,978 +red bag,978 +lasterk,978 +king (chess),978 +trash bag,977 +tools,977 +off-shoulder jacket,977 +moogle,977 +hibiki (blue archive),977 +gundam suisei no majo,977 +bc freedom military uniform,977 +tsukumo yatsuhashi,975 +taneshima popura,975 +shorts under dress,975 +piromizu,975 +mahou tsukai no yoru,975 +saiyan armor,974 +makaino ririmu,974 +kuchiki rukia,974 +dildo riding,974 +bowser,974 +yanyo (ogino atsuki),973 +tsushima (kancolle),973 +torn jacket,973 +reins,973 +osaki amana,973 +ninja mask,973 +annin musou,973 +reiner braun,972 +momota kaito,972 +aikatsu stars!,972 +rotom phone,971 +lactation through clothes,971 +kitazawa shiho,971 +espeon,971 +eishin flash (umamusume),971 +yoroizuka mizore,970 +uzaki hana,970 +thighhighs over pantyhose,970 +takeba yukari,970 +scarf over mouth,970 +kuroko no basuke,970 +gundam wing,970 +cure happy,970 +war,969 +vanishing point,969 +thread,969 +sailor senshi,969 +odawara hakone,969 +intrepid (kancolle),969 +human scabbard,969 +easel,969 +akitsushima (kancolle),969 +uchi no hime-sama ga ichiban kawaii,968 +telstar,968 +haruna kai ni (kancolle),968 +dudou,968 +facepalm,967 +hoshikawa sara,966 +cd,966 +bibi (tokoyami towa),966 +bandaged neck,966 +uccow,965 +nishieda,965 +frogtie,965 +anzio military uniform,965 +toast in mouth,964 +square 4koma,964 +ink tank (splatoon),964 +hoop,964 +duel academy uniform (yu-gi-oh! gx),964 +calico,964 +zabuton,963 +vikala (granblue fantasy),963 +raphtalia,963 +penises touching,963 +hina ichigo,963 +green hakama,963 +wood,962 +traditional chinese text,961 +tateyama ayano,961 +takanami (kancolle),961 +manabe nodoka,961 +heart o-ring,961 +hat over one eye,961 +ear biting,961 +corded phone,961 +saotome alto,960 +jill valentine,960 +drying hair,960 +bosshi,960 +ark order,960 +alt text,960 +yellow sash,959 +red eyeliner,959 +pursed lips,959 +print headwear,959 +million arthur (series),959 +lucoa (maidragon),959 +doukyuusei another world,959 +frilled shorts,958 +eye of horus,958 +blank stare,958 +yotsuba alice,957 +ririko (zhuoyandesailaer),957 +pet bowl,957 +overall shorts,957 +onii-chan wa oshimai,957 +momo velia deviluke,957 +mo (kireinamo),957 +gomennasai,957 +counter:side,957 +white tiger,956 +toilet paper,956 +socks removed,956 +sakazaki freddy,956 +pornography,956 +multicolored cape,956 +misumi (macaroni),956 +live2d,956 +kino no tabi,956 +karina lyle,956 +griffin & kryuger military uniform,956 +cum on pectorals,956 +cevio,956 +x-men,955 +team rocket uniform,955 +snowball,955 +screw,955 +kujou karen,955 +cure melody,955 +sakuraba yuuki,954 +red apple,954 +backless leotard,954 +among us,954 +under tree,953 +tentacles under clothes,953 +shirayuki hime,953 +jin (mugenjin),953 +araragi karen,953 +the legend of zelda: the wind waker,952 +raimon soccer uniform,952 +pier,952 +kemachiku,952 +huge bow,952 +folding chair,952 +benghuai xueyuan,952 +white snake,951 +rice on face,951 +nate (pokemon),951 +miss cloud,951 +meltryllis (swimsuit lancer) (first ascension) (fate),951 +mechanical eye,951 +double vertical stripe,951 +dark areolae,951 +bubble tea challenge,951 +blind,951 +sakurazawa izumi,950 +hand on another's leg,950 +from outside,950 +eto (ikumika),950 +chef uniform,950 +wiping sweat,949 +key necklace,949 +afrobull,949 +sucy manbavaran,948 +shirow masamune,948 +purple eyeshadow,948 +no tail,948 +daruma doll,948 +alice (alice in wonderland) (cosplay),948 +meowth,947 +bandolier,947 +transparent wings,946 +toudou shimako,946 +purple belt,946 +obiwan,946 +kayneth el-melloi archibald,946 +xenogears,945 +tsuruse,945 +footwear ribbon,945 +tanaka mamimi,944 +sonoda chiyoko,944 +setz,944 +keystone,944 +cat ear legwear,943 +airfield princess,943 +white serafuku,942 +tiger stripes,942 +niwatari kutaka,942 +japan (hetalia),942 +ichigo (darling in the franxx),942 +danua,942 +slashing,941 +shopping,941 +sailor jupiter,941 +non (z-art),941 +kozakura marry,941 +fleur de lapin uniform,941 +voile,940 +shirabi,940 +rtil,940 +purple scrunchie,940 +pink pajamas,940 +chrono trigger,940 +alphonse elric,940 +samneco,939 +popcorn,939 +pokemon lgpe,939 +pixiv fantasia 5,939 +nanodesu (phrase),939 +musou isshin (genshin impact),939 +katsura hinagiku,939 +hanten (clothes),939 +glaive (polearm),939 +eyewear hang,939 +multiple riders,938 +mega man zero,938 +hand on another's waist,938 +telekinesis,937 +single boot,937 +kepi,937 +chewing,937 +aono3,937 +triangle earrings,936 +skirt basket,936 +shark costume,936 +saitama (one-punch man),936 +kfr,936 +grookey,936 +>o<,936 +wet dress,935 +satou yuuki,935 +nanao naru,935 +hands on own ass,935 +yuuki setsuna (love live!),934 +okazaki yumemi,934 +holding fishing rod,934 +head on pillow,934 +floating island,934 +bbb (friskuser),934 +akinbo (hyouka fuyou),934 +yellow pants,933 +tail piercing,933 +rebecca (cyberpunk),933 +koutetsujou no kabaneri,933 +divine gate,933 +aztodio,933 +animal on lap,933 +saber (weapon),932 +purple-framed eyewear,932 +pas (paxiti),932 +instagram username,932 +flying kick,932 +yagisaka seto,931 +sister princess,931 +naked scarf,931 +himouto! umaru-chan,931 +geewhy,931 +dotted line,931 +coin hair ornament,931 +tentacle pit,930 +tales of berseria,930 +kissing hand,930 +elaina (majo no tabitabi),930 +anjou naruko,930 +otter ears,929 +kula diamond,929 +gonzarez,929 +wrist wrap,928 +takato kurosuke,928 +swimsuit cover-up,928 +star brooch,928 +shoujo kakumei utena,928 +pencil case,928 +mandarin collar,928 +cleaver,928 +boxer briefs,928 +vertical-striped bikini,927 +team 9,927 +cheshire (azur lane),927 +pink capelet,926 +omori,926 +ogami kazuki,926 +kara (color),926 +bokken,926 +beige jacket,926 +barbed wire,926 +baiken,926 +amagi (kancolle),926 +rainbow hair,925 +pixiv fantasia last saga,925 +otohime (youngest princess),925 +okabe rintarou,925 +may (guilty gear),925 +lowleg pants,925 +komiya kaho,925 +kino (kino no tabi),925 +kine,925 +cu chulainn alter (fate),925 +character censor,925 +texture,924 +nagian,924 +kaisen chuui,924 +fur-trimmed kimono,924 +black corset,924 +ballet,924 +red border,923 +palms together,923 +dandelion,923 +united kingdom (hetalia),922 +looking outside,922 +japari bun,922 +hisui (tsukihime),922 +dvd cover,922 +dark souls iii,922 +astolfo (sailor paladin) (fate),922 +yorha type a no. 2,921 +the legend of korra,921 +sousouman,921 +orange pants,921 +mota,921 +jmg,921 +ibuki fuuko,921 +foam,921 +arthur pendragon (fate),921 +adapted turret,921 +school rumble,920 +pushing,920 +nakano itsuki,920 +falchion (fire emblem),920 +brick,920 +assassin (fate/zero),920 +amulet,920 +tokisadame school uniform,919 +phoenix crown,919 +oyama mahiro,919 +mash kyrielight (dangerous beast) (cosplay),919 +bee (deadflow),919 +barefoot sandals,919 +sleeve garter,918 +single stripe,918 +porch,918 +multicolored horns,918 +aragaki ayase,918 +streamers,917 +stain,917 +scar on stomach,917 +aquarion evol,917 +sieg (fate),916 +sakino shingetsu,916 +oktavia von seckendorff,916 +triple penetration,915 +sig sauer,915 +iizunamaru megumu,915 +hand on leg,915 +blank censor,915 +youkan,914 +print jacket,914 +multiple swords,914 +lord of the mysteries,914 +henshin,914 +analog clock,914 +son gohan,913 +sidewalk,913 +shindou takuto,913 +sawa azusa,913 +hanbok,913 +diamond (gemstone),913 +pointy breasts,912 +keith goodman,912 +industrial piercing,912 +aquarium,912 +the legend of zelda: tears of the kingdom,911 +studio ghibli,911 +shibasaki shouji,911 +jigglypuff,911 +florence nightingale (trick or treatment) (fate),911 +commandant teste (kancolle),911 +between pectorals,911 +nijisanji kr,910 +isabelle (animal crossing),910 +brown wings,910 +yuuki (sao),909 +unzipping,909 +kinu (kancolle),909 +hellsing,909 +zoids,908 +len (tsukihime),908 +fujisaki chihiro,908 +untue,907 +peace symbol,907 +no eyebrows,907 +houndstooth,907 +hifumi (blue archive),907 +bilingual,907 +studded bracelet,906 +searchlight,906 +portrait (object),906 +ichihara nina,906 +frilled ascot,906 +boobplate,906 +yuna (ff10),905 +papers,905 +noose,905 +lupin iii,905 +flower ornament,905 +elf (stroll in the woods),905 +child drawing,905 +backpack removed,905 +ar-15,905 +suomi (girls' frontline),904 +polka dot headwear,904 +hourglass,904 +haniwa (statue),904 +fur-trimmed shorts,904 +crystal ball,904 +bird legs,904 +bamboo steamer,904 +yu yu hakusho,903 +white umbrella,903 +third-party watermark,903 +seaweed,903 +ragho no erika,903 +misaka imouto,903 +kiriya aoi,903 +hydrokinesis,903 +tohno shiki,902 +manga cover,902 +icing,902 +holding vegetable,902 +witches of africa,901 +strawberry panties,901 +sono hanabira ni kuchizuke wo,901 +merlin (fate),901 +akanbe,901 +yarn ball,900 +striped hoodie,900 +puffy cheeks,900 +miia (monster musume),900 +anglerfish,900 +sparse pubic hair,899 +okina ika,899 +hase yu,899 +elvaan,899 +drugs,899 +cute & girly (idolmaster),899 +purple hakama,898 +nekomonogatari,898 +komatsu eiji,898 +highschool of the dead,898 +ears visible through hair,898 +rabbit boy,897 +pendant choker,897 +jaguar print,897 +drinking straw in mouth,897 +torchic,896 +ass shake,896 +ankle wrap,896 +tenkyuu chimata,895 +spooning,895 +santa gloves,895 +sano toshihide,895 +print shorts,895 +crystal earrings,895 +bandaid on forehead,895 +zidane tribal,894 +yume shokunin,894 +uehara ayumu,894 +oda uri,894 +monster hunter: world,894 +kurokawa eren,894 +hotarumaru,894 +brown collar,894 +bayonet,894 +antique firearm,894 +sei (kaien kien),893 +mashuu (neko no oyashiro),893 +lion (kemono friends),893 +lala satalin deviluke,893 +kousaka kyousuke,893 +flaming weapon,893 +aquila (kancolle),893 +yellow butterfly,892 +tieria erde,892 +penguin hood,892 +len'en,892 +kidou senkan nadesico,892 +fur boots,892 +doujima nanako,892 +cheren (pokemon),892 +catholic,892 +scar on neck,891 +nier,891 +musaigen no phantom world,891 +holding pizza,891 +twin turbo (umamusume),890 +micaiah (fire emblem),890 +lace-trimmed hairband,890 +guided penetration,890 +end card,890 +aisha landar,890 +vaporeon,889 +valkyrie,889 +smoking gun,889 +slime (dragon quest),889 +owl ears,889 +notched ear,889 +chado,889 +ayamy,889 +sound horizon,888 +saddle,888 +mitsumi misato,888 +mioda ibuki,888 +lulu (league of legends),888 +fur cape,888 +fuantei,887 +blowing,887 +azasuke,887 +aviator sunglasses,887 +arms (game),887 +yuuki hagure,886 +narancia ghirga,886 +jinguu (4839ms),886 +personal ami,885 +hikawa hina,885 +green wings,885 +audience,885 +tantei opera milky holmes,884 +kami jigen game neptune v,884 +hammann (azur lane),884 +credits,884 +brown hoodie,884 +beige shirt,884 +waiter,883 +godzilla,883 +coconut,883 +star platinum,882 +scottie (phantom2),882 +riyo (lyomsnpmp),882 +mordred (memories at trifas) (fate),882 +lion boy,882 +euryale (fate),882 +cable knit,882 +side drill,881 +qqqrinkappp,881 +majo no takkyuubin,881 +fusu (a95101221),881 +convenience store,881 +cleveland (azur lane),881 +america,881 +yellow bag,880 +tassel hair ornament,880 +seiren (suite precure),880 +metal slug,880 +concert,880 +white bag,879 +tenchi muyou!,879 +sugiura ayano,879 +recorder,879 +public use,879 +lyria (granblue fantasy),879 +little nuns (diva),879 +hikawa shou,879 +cum on legs,879 +linear hatching,878 +kamogawa tanuki,878 +check character,878 +brown apron,878 +polka dot skirt,877 +mother 2,877 +dress removed,877 +type 95 (girls' frontline),876 +tail feathers,876 +pinwheel,876 +mukai takumi,876 +miyamoto musashi (swimsuit berserker) (second ascension) (fate),876 +hishaku,876 +grey theme,876 +final fight,876 +destiny child,876 +dennou coil,876 +vira (granblue fantasy),875 +urin,875 +serebi ryousangata,875 +nursing handjob,875 +nibutani shinka,875 +nia (blade) (xenoblade),875 +kouhaku nawa,875 +ino (magloid),875 +camouflage pants,875 +bernadetta von varley,875 +xiangling (genshin impact),874 +torinone,874 +orange sweater,874 +kusaka souji,874 +inoue orihime,874 +graf zeppelin (azur lane),874 +yamagumo (kancolle),873 +yama no susume,873 +tenken (gotannda),873 +nakano ichika,873 +goh (pokemon),873 +coke-bottle glasses,873 +yellow coat,872 +umigarasu (kitsune1963),872 +space helmet,872 +oimanji,872 +multicolored fur,872 +komaku juushoku,872 +kenkou cross,872 +hologram,872 +goodsmile racing,872 +baggy clothes,872 +side slit shorts,871 +sengoku basara,871 +nyarlathotep (nyaruko-san),871 +loose shirt,871 +claude von riegan,871 +celestia ludenberg,871 +traffic cone,870 +themed object,870 +striped headwear,870 +polka dot scrunchie,870 +plaid bra,870 +objectification,870 +miyu (blue archive),870 +meyoco,870 +marie rose,870 +ikari manatsu,870 +blood on knife,870 +vibrator cord,869 +line (naver),869 +konbu wakame,869 +kasaki nozomi,869 +diva (hyxpk),869 +airship,869 +shiroko (swimsuit) (blue archive),868 +playing,868 +mitsudomoe,868 +lei lei,868 +heads-up display,868 +hataraku maou-sama!,868 +food stand,868 +fairy knight gawain (fate),868 +checkered sash,868 +washing machine,867 +ro635 (girls' frontline),867 +nenohi (kancolle),867 +hibiki (cheerleader) (blue archive),867 +double w,867 +crotch cutout,867 +bass clef,867 +araki hirohiko (style),867 + ,867 +pastel colors,866 +okumura haru,866 +kohinata miho,866 +jetto komusou,866 +checkered flag,866 +bullet hole,866 +yellow belt,865 +tenjouin asuka,865 +sakurajima mai,865 +pink camisole,865 +multiple piercings,865 +strapless swimsuit,864 +st. chronica academy uniform,864 +puff and slash sleeves,864 +prison clothes,864 +kamelie,864 +green capelet,864 +ultra ball,863 +napkin,863 +undressing another,862 +sakura chiyo,862 +lenna charlotte tycoon,862 +jeweled branch of hourai,862 +hole,862 +hatsuharu (kancolle),862 +dress flower,862 +sleeveless coat,861 +melon bread,861 +inverted cross,861 +double \m/,861 +clothes between breasts,861 +chiyoda (kancolle),861 +pouring onto self,860 +kase daiki,860 +annie leonhardt,860 +adventure time,860 +two-tone headwear,859 +maplestory,859 +aino megumi,859 +adidas,859 +visor (armor),858 +ushiromiya jessica,858 +nadia la arwall,858 +hand on another's neck,858 +double dildo,858 +cropped hoodie,858 +chibi miku,858 +x3,857 +steven stone,857 +pickaxe,857 +nonomi (blue archive),857 +hands on own stomach,857 +hand on own neck,857 +cross choker,857 +bear costume,857 +tsugu (vtuber),856 +round-bottom flask,856 +minato hitori,856 +hatoba tsugu,856 +anna (frozen),856 +shinama,855 +moona hoshinova,855 +garnet til alexandros xvii,855 +force of will,855 +dante (devil may cry),855 +black umbrella,855 +adapted uniform,855 +undead,854 +sasaki saku,854 +monster musume no iru nichijou online,854 +mahjong tile,854 +ifrit (arknights),854 +donguri suzume,854 +cure sunny,854 +wringing clothes,853 +psychic,853 +painterly,853 +net,853 +momoe nagisa,853 +aozaki aoko,853 +yuuki (princess connect!),852 +hikawa sayo,852 +furudo erika,852 +easter egg,852 +yagen toushirou,851 +virgin killer outfit,851 +tsukihime (remake),851 +miyanaga saki,851 +maizono sayaka,851 +kimono lift,851 +cursive,851 +cetacean tail,851 +arachne,851 +queen's blade rebellion,850 +momio,850 +lace gloves,850 +hilichurl (genshin impact),850 +hayasaka ai,850 +facing to the side,850 +cutting hair,850 +brown sweater vest,850 +aika (series),850 +redrawn,849 +racing miku,849 +ogata chieri,849 +alisa (girls und panzer),849 +silent hill (series),848 +shizuka rin,848 +murata range,848 +laura bodewig,848 +jeno,848 +fate/requiem,848 +dirty feet,848 +cu chulainn (caster) (fate),848 +sperm cell,847 +rider belt,847 +lighthouse,847 +jingei (kancolle),847 +green bag,847 +crazy,847 +unusually open eyes,846 +splatoon 3,846 +mummy costume,846 +monster energy,846 +grave,846 +broken heart,846 +wa lolita,845 +sakuragi mano,845 +mega man (character),845 +magical ruby,845 +genshiken,845 +ayane (doa),845 +vspo!,844 +kasumigaoka utaha,844 +kanojo okarishimasu,844 +hiyou (kancolle),844 +heel up,844 +faris scherwiz,844 +cardigan vest,844 +allister (pokemon),844 +tsab ground military uniform,843 +sparrow,843 +sleep mask,843 +romaji commentary,843 +lux (league of legends),843 +hat with ears,843 +cure white,843 +cropped arms,843 +cecilia alcott,843 +aki (girls und panzer),843 +ten'ou haruka,842 +snap-fit buckle,842 +screentones,842 +project moon,842 +maou (maoyuu),842 +lucy heartfilia,842 +hinghoi,842 +hair cubes,842 +deadnooodles,842 +art nouveau,842 +alastor (shakugan no shana),842 +accel world,842 +\n/,842 +sugimura tomokazu,841 +platinum (arknights),841 +p90,841 +multicolored kimono,841 +kanbaru suruga,841 +yt (wai-tei),840 +roy (fire emblem),840 +mole on stomach,840 +futasub,840 +blue bag,840 +satin panties,839 +panty lift,839 +owari no seraph,839 +lamb-oic029,839 +golf club,839 +yume no owari,838 +pine tree,838 +nagato (azur lane),838 +maka albarn,838 +kazano hiori,838 +hizuki yayoi,838 +hacka doll,838 +ceobe (arknights),838 +blew andwhite,838 +wheelbarrow,837 +satou daiji,837 +looking through legs,837 +bradamante (fate),837 +apron lift,837 +acoustic guitar,837 +lambda (kusowarota),836 +kishida mel,836 +exhausted,836 +brown nails,836 +arare (kancolle),836 +yumehara nozomi,835 +tanikaze (kancolle),835 +star pasties,835 +seiun sky (umamusume),835 +oversized food,835 +komusou (jinrikisha),835 +green one-piece swimsuit,835 +denpa onna to seishun otoko,835 +cracked wall,835 +chalice,835 +breast poke,835 +ano natsu de matteru,835 +alice zuberg,835 +wataboushi,834 +osaki tenka,834 +nudist,834 +murata himeko,834 +kotoyoro,834 +holding saucer,834 +holding breath,834 +baton (conducting),834 +zabaniyya (housamo),833 +tube dress,833 +trunks (dragon ball),833 +training corps (emblem),833 +photo inset,833 +matsumoto rangiku,833 +madou monogatari,833 +komeshiro kasu,833 +fate/grand order arcade,833 +enty reward,833 +chikorita,833 +bralines,833 +romper,832 +heart lock (kantai collection),832 +cerberus (helltaker),832 +tsurezure children,831 +tentacles on male,831 +jack dempa,831 +ishii hisao,831 +grill,831 +gaou (umaiyo puyoman),831 +agnes digital (umamusume),831 +yuzuhara konomi,830 +nt00,830 +nekomusume (gegege no kitarou 6),830 +mikazuki (kancolle),830 +lack,830 +jirou kyouka,830 +bianca (dq5),830 +storefront,829 +naked tabard,829 +lyrics,829 +jin kisaragi,829 +blaze (arknights),829 +alternate wings,829 +yukataro,828 +yokoyama nao,828 +yamato nadeshiko,828 +tsurumaki kokoro,828 +tire,828 +pravda military uniform,828 +kusanagi tonbo,828 +glowing butterfly,828 +eden academy uniform,828 +black garter belt,828 +yellow sleeves,827 +pramanix (arknights),827 +lillian girls' academy uniform,827 +ibuki notsu,827 +green pantyhose,827 +dragon quest viii,827 +struggling,826 +reinforce zwei,826 +red fur,826 +rakuen tsuihou,826 +orange hoodie,826 +konnyaku (kk-monmon),826 +gymnastics,826 +aki99,826 +akashi (azur lane),826 +xp-tan,825 +witch (madoka magica),825 +skirt flip,825 +matsuwa (kancolle),825 +chi-hatan military uniform,825 +breast padding,825 +urethral insertion,824 +ucmm,824 +shoulder holster,824 +shimazu yoshino,824 +purple gemstone,824 +kondou taeko,824 +inumuta houka,824 +how to,824 +dragging,824 +takoluka,823 +monomi (danganronpa),823 +jouga maya,823 +hidden face,823 +gills,823 +crime prevention buzzer,823 +unamused,822 +teen titans,822 +squinting,822 +spaghetti,822 +shirabe ako,822 +polar bear,822 +penis peek,822 +niwarhythm,822 +nanao yuriko,822 +mtu (orewamuzituda),822 +monety,822 +kou mashiro,822 +himejima akeno,822 +glowing hair,822 +aqua gloves,822 +suou momoko,821 +magukappu,821 +kichihachi,821 +fairy knight lancelot (fate),821 +eva 02,821 +dressing another,821 +windowsill,820 +pink ascot,820 +mogudan,820 +miyako (hidamari sketch),820 +masochism,820 +mahou shoujo madoka magica movie 1 & 2,820 +kirishima touka,820 +anchor necklace,820 +taihou (enraptured companion) (azur lane),819 +sirius (azure horizons) (azur lane),819 +penis in panties,819 +ke-ta,819 +group picture,819 +amplifier,819 +senran kagura shinovi versus,818 +rosario+vampire,818 +priest (ragnarok online),818 +pokemon rse (anime),818 +kloah,818 +kisaragi shintarou,818 +inu x boku ss,818 +dimitri alexandre blaiddyd,818 +color trace,818 +circle skirt,818 +brown bra,818 +samuel b. roberts (kancolle),817 +m1911,817 +hand under shirt,817 +garchomp,817 +vibrator on nipple,816 +strike witches: kurenai no majo-tachi,816 +riyo (lyomsnpmp) (style),816 +parka,816 +moomin,816 +lambdadelta,816 +iroha (samurai spirits),816 +ichiba youichi,816 +fire emblem warriors: three hopes,816 +catchphrase,816 +armpit cutout,816 +angela balzac,816 +zero (mega man),815 +tail ring,815 +scar on leg,815 +revy (black lagoon),815 +mashiro miru,815 +maria cadenzavna eve,815 +guts (berserk),815 +grass wonder (umamusume),815 +diego brando,815 +tetsu (kimuchi),814 +rimuru tempest,814 +yellow (pokemon),813 +hayashiya zankurou,813 +grey hairband,813 +cygames,813 +arms around waist,813 +animal skull,813 +92m,813 +robaato,812 +jaguar tail,812 +ghost costume,812 +uta no prince-sama,811 +tutu,811 +thermos,811 +sutahiro (donta),811 +pink belt,811 +pectoral press,811 +multicolored scarf,811 +canopy bed,811 +rolling suitcase,810 +lm (legoman),810 +yellow raincoat,809 +the ring,809 +sling,809 +shoulder cannon,809 +rainbow mika,809 +pendant watch,809 +kiss-shot acerola-orion heart-under-blade,809 +fake nails,809 +cosmic break,809 +corrin (fire emblem) (male),809 +zara (poolside coincidence) (azur lane),808 +yamada elf,808 +warioware,808 +season connection,808 +multiple legs,808 +mataro (matarou),808 +imperial japanese army,808 +anchor choker,808 +suigetsu,807 +stained panties,807 +mamuru,807 +gold bikini,807 +dungeon meshi,807 +watanabe akio,806 +surcoat,806 +sagiri (kancolle),806 +pokemon tcg,806 +octoling girl,806 +lord el-melloi ii,806 +lattice,806 +anegasaki nene,806 +y.ssanoha,805 +sciamano240,805 +neo politan,805 +nardack,805 +multi-strapped panties,805 +dota (series),805 +asahina mirai,805 +akagashi hagane,805 +pit (kid icarus),804 +morino hon,804 +mikagami hiyori,804 +grey cape,804 +ganguro,804 +diona (genshin impact),804 +umekichi,803 +pigeon,803 +ojou-sama pose,803 +no blindfold,803 +mumei (kabaneri),803 +mizumizuni,803 +miuku (marine sapphire),803 +leafeon,803 +karaoke,803 +eyepatch removed,803 +ao no exorcist,803 +string bra,802 +silence (arknights),802 +purple hoodie,802 +mohammed avdol,802 +leash pull,802 +german flag bikini,802 +furukawa (yomawari),802 +elphelt valentine,802 +echo (circa),802 +cosmog,802 +bath stool,802 +3d background,802 +wall clock,801 +shin sangoku musou,801 +ookami (game),801 +harada takehito,801 +hands on hilt,801 +star ocean till the end of time,800 +prinny,800 +parsley-f,800 +kazama asuka,800 +caffein,800 +brown bear (kemono friends),800 +yuuji (yukimimi),799 +unowen,799 +teana lanster,799 +sado (kancolle),799 +rounded corners,799 +merunyaa,799 +leg wrap,799 +hatted pokemon,799 +group name,799 +ehoumaki,799 +zanntetu,798 +yamagishi fuuka,798 +starbucks,798 +st. theresa's girls academy school uniform,798 +kanpa (campagne 9),798 +justice (helltaker),798 +jeanne d'arc alter (avenger) (third ascension) (fate),798 +coat removed,798 +club3,798 +choujikuu yousai macross,798 +araragi tsukihi,798 +sakawa (kancolle),797 +onion,797 +inline skates,797 +criis-chan,797 +cat costume,797 +baka to test to shoukanjuu,797 +sucrose (genshin impact),796 +kotonoha aoi,796 +sitting on rock,795 +no eyepatch,795 +mamaloni,795 +izayoi aki,795 +gotou (nekocat),795 +d.gray-man,795 +cure moonlight,795 +character signature,795 +blank speech bubble,795 +tape measure,794 +super soaker,794 +stole,794 +snowflake background,794 +ribbon braid,794 +nipple clamps,794 +minakami (flyingman555),794 +kukie-nyan,794 +kira yoshikage,794 +hand gesture,794 +green tea,794 +dumpling,794 +danganronpa another episode: ultra despair girls,794 +cowboy bebop,794 +brown choker,794 +avatar (wow),794 +aqua footwear,794 +andira (granblue fantasy),794 +yui (princess connect!),793 +wheat,793 +porurin,793 +persona 5 the royal,793 +mysterious heroine x (fate),793 +murasame kai ni (kancolle),793 +mattari yufi,793 +makoto (street fighter),793 +kazuma muramasa,793 +high school dxd born,793 +dishwasher1910,793 +collared vest,793 +boa hancock,793 +asakaze (kancolle),793 +aozora market,793 +akina tsukako,793 +7th dragon,793 +yumi (senran kagura),792 +ulrich (tagaragakuin),792 +takamori aiko,792 +taimanin suit,792 +ryouka (suzuya),792 +maneki-neko,792 +izumi mei,792 +hiro (hirohiro31),792 +arcane: league of legends,792 +toon link,791 +super mario bros. 1,791 +pichu,791 +luoxiaohei,791 +kaiou michiru,791 +hetza (hellshock),791 +hemogurobin a1c,791 +female protagonist (pokemon go),791 +curtained hair,791 +bib,791 +no mole,790 +kisaragi (azur lane),790 +hornet (kancolle),790 +female saniwa (touken ranbu),790 +cum in nose,790 +chrono cross,790 +brick floor,790 +boy sandwich,790 +black undershirt,790 +2008,790 +yuuki juudai,789 +white sports bra,789 +white butterfly,789 +vertical-striped panties,789 +street fighter ii (series),789 +nironiro,789 +lightsaber,789 +inflatable raft,789 +energy drink,789 +arm cuffs,789 +tall,788 +splatoon 2: octo expansion,788 +kousaka kirino's school uniform,788 +flareon,788 +flag background,788 +beltskirt,788 +astronaut,788 +usada hikaru,787 +scathach (piercing bunny) (fate),787 +kankan33333,787 +jeanne d'arc (third ascension) (fate),787 +hau (pokemon),787 +harukaze (kancolle),787 +frozen,787 +flaming sword,787 +bunching hair,787 +purple neckerchief,786 +pokemon move,786 +paper crane,786 +multicolored headwear,786 +litten,786 +kamukura izuru,786 +jellytits-7,786 +holding tail,786 +girls und panzer senshadou daisakusen!,786 +excited,786 +shorts around one leg,785 +hanasakigawa school uniform,785 +chinese new year,785 +chicken (food),785 +wallet,784 +spray can,784 +sand castle,784 +orange goggles,784 +nib pen (object),784 +handstand,784 +bagged fish,784 +arm above head,784 +takeda harumi (shiromanta),783 +sawamura spencer eriri,783 +riichu,783 +kobayakawa sae,783 +garry (ib),783 +card parody,783 +alpha (yukai na nakamatachi),783 +snow bunny,782 +mundane utility,782 +lopunny,782 +kappa,782 +jizeru (giselebon),782 +evillious nendaiki,782 +cutout above navel,782 +stats,781 +shinano (azur lane),781 +natsu megumi,781 +mutsuki (moonknives),781 +moyazou (kitaguni moyashi seizoujo),781 +grenade launcher,781 +gradient dress,781 +dissolving,781 +condom belt,781 +coffee-kizoku,781 +yamanaka ino,780 +trigram,780 +sideways hat,780 +mudkip,780 +hammock,780 +ball and chain restraint,780 +admiral graf spee (azur lane),780 +melon,779 +honeycomb background,779 +foliage,779 +chita (ketchup),779 +char's counterattack,779 +bean bag chair,779 +beam saber,779 +arinu,779 +yunyun (konosuba),778 +vert (neptune series),778 +stakes of purgatory,778 +print necktie,778 +ominaeshi (takenoko),778 +official alternate hair length,778 +matikane tannhauser (umamusume),778 +star guardian (league of legends),777 +sanzen'in nagi,777 +mikazuki akira!,777 +koyorin,777 +fur cloak,777 +finger biting,777 +dota 2,777 +bicycle basket,777 +super real mahjong,776 +senjou no valkyria 1,776 +rx-78-2,776 +lightning farron,776 +kuro kosyou,776 +koyama yuzu,776 +cartoon bone,776 +teacher and student,775 +takei hisa,775 +takasaki yuu,775 +inaba shiki,775 +hands on shoulders,775 +edmond dantes (fate),775 +watarui,774 +twitter sample,774 +orange leotard,774 +multiple heads,774 +kohinata miku,774 +kinchaku,774 +hino akane (idolmaster),774 +electrical outlet,774 +tasaka shinnosuke,773 +shigino sohuzi,773 +saru,773 +nipple bar,773 +nejiri hachimaki,773 +eureka,773 +credits page,773 +colored nipples,773 +bar stool,773 +type-moon,772 +multicolored footwear,772 +elephant,772 +clothes between thighs,772 +yo-yo,771 +tail hug,771 +orange sleeves,771 +cartridge,771 +brynhildr (fate),771 +windmill,770 +unasaka ryou,770 +tsuchinoko (kemono friends),770 +onija tarou,770 +mummy,770 +luna nova school uniform,770 +kerchief,770 +jack-o' valentine,770 +hair flowing over,770 +dokomon,770 +ass-to-ass,770 +aoki (fumomo),770 +uchuu senkan yamato 2199,769 +pannacotta fugo,769 +licking armpit,769 +kamisato ayato,769 +ichigo hitofuri,769 +cure peach,769 +bullying,769 +aqua jacket,769 +yamabuki inori,768 +takakura himari,768 +herada mitsuru,768 +swinging,767 +sukusuku hakutaku,767 +sephiroth,767 +linked piercing,767 +kikumon,767 +kiki (majo no takkyuubin),767 +hololive idol uniform,767 +holding ribbon,767 +hinatsuru ai,767 +eldritch abomination,767 +yuni (princess connect!),766 +sextuplets,766 +oshiete! galko-chan,766 +in water,766 +cursor,766 +curren chan (umamusume),766 +amagi (azur lane),766 +umanosuke,765 +two-handed handjob,765 +tanned cirno,765 +rosmontis (arknights),765 +leaf umbrella,765 +kapatarou,765 +kamitsubaki studio,765 +joy-con,765 +hands on another's hips,765 +guzma (pokemon),765 +toon (style),764 +tengu mask,764 +nonco,764 +moose (kemono friends),764 +kabuto (helmet),764 +cephalopod eyes,764 +avatar: the last airbender,764 +saunders school uniform,763 +gotyou,763 +gogiga gagagigo,763 +family crest,763 +eva 01,763 +elbows on table,763 +amazuyu tatsuki,763 +amazon (taitaitaira),763 +zora,762 +speaking tube headset,762 +print sarong,762 +post-apocalypse,762 +harigane shinshi,762 +hairu,762 +grey capelet,762 +fantasy earth zero,762 +elizabeth bathory (brave) (fate),762 +boxcutter,762 +akame ga kill!,762 +z-ring,761 +yuuki haru,761 +tented shirt,761 +pointy nose,761 +matoi (pso2),761 +dorothea arnault,761 +amasora taichi,761 +zooey (granblue fantasy),760 +sneezing,760 +ri-class heavy cruiser,760 +pink cape,760 +pet,760 +misunderstanding,760 +ini (inunabe00),760 +humboldt penguin (kemono friends),760 +hakurei shrine,760 +haimura kiyotaka,760 +clothes tug,760 +tongs,759 +mon-musu quest!,759 +meka (overwatch),759 +konpaku youki,759 +bookbag,759 +band uniform,759 +2007,759 +weightlifting,758 +sugar cube,758 +senji muramasa (fate),758 +petenshi (dr. vermilion),758 +momoko (momopoco),758 +milla maxwell,758 +kouno hikaru,758 +jacket pull,758 +atago (stunning speedster) (azur lane),758 +abigail williams (third ascension) (fate),758 +tumblr username,757 +saber alter (ver. shinjuku 1999) (fate),757 +massage,757 +maruyama aya,757 +blood in hair,757 +super saiyan 1,756 +seat,756 +sakimori dan,756 +sakagami tomoyo,756 +oomori yuuko,756 +ibuki (street fighter),756 +group hug,756 +ear wiggle,756 +ara haan,756 +yellow cape,755 +wata do chinkuru,755 +ryoumen sukuna (jujutsu kaisen),755 +pumpkin hair ornament,755 +personality switch,755 +note,755 +aki rosenthal,755 +tea set,754 +pink border,754 +oekakizuki,754 +long neck,754 +kiritto,754 +holding bucket,754 +hinata hideki,754 +harime nui,754 +clothes theft,754 +774 (nanashi),754 +wisespeak,753 +pizza slice,753 +natalia (idolmaster),753 +double fox shadow puppet,753 +chrom (fire emblem),753 +capcom fighting jam,753 +butter,753 +beatrix (granblue fantasy),753 +apple inc.,753 +shampoo (ranma 1/2),752 +ornament,752 +mole on body,752 +mk (mod0),752 +meer campbell,752 +kobuichi,752 +kiku hitomoji,752 +hayabusa,752 +clumsy nun (diva),752 +clown,752 +bishamonten's spear,752 +toaru majutsu no index: new testament,751 +senran kagura new wave,751 +sakura oriko,751 +redrop,751 +nagara (kancolle),751 +mikakunin de shinkoukei,751 +lipgloss,751 +legband,751 +hand on headphones,751 +aqua bra,751 +aoba moca,751 +animal on arm,751 +wardrobe error,750 +variable fighter,750 +udon (shiratama),750 +the king of fighters xv,750 +spiked choker,750 +shaking,750 +panties under buruma,750 +kagami mochi,750 +journey to the west,750 +heart collar,750 +gawain (fate),750 +g36 (girls' frontline),750 +cart,750 +caitlyn (league of legends),750 +y'shtola rhul,749 +upturned eyes,749 +oni-noboru,749 +omikuji,749 +m200 (girls' frontline),749 +keep out,749 +hair dryer,749 +galo thymos,749 +year of the dog,748 +trish una,748 +tandohark,748 +takayaki,748 +n.g.,748 +minamura haruki,748 +holding envelope,748 +fishing line,748 +ene (kagerou project),748 +blue feathers,748 +adjusting panties,748 +zeon,747 +yumeko (touhou),747 +toe ring,747 +senkawa chihiro,747 +roon (azur lane),747 +mismatched eyebrows,747 +iori rinko,747 +gokkun,747 +full metal panic!,747 +feater (arknights),747 +argyle sweater,747 +tight dress,746 +tears of themis,746 +sora ginko,746 +sayagata,746 +saibou shinkyoku,746 +orange skin,746 +em (totsuzen no hakike),746 +dithering,746 +chin strap,746 +butterfly brooch,746 +buster sword,746 +american flag print,746 +shoujo kitou-chuu,745 +rakugaki-biyori,745 +pyra (pro swimmer) (xenoblade),745 +pokemon emerald,745 +pinstripe shirt,745 +lap pillow invitation,745 +kurokoshou (emuburemu123),745 +dragon quest ii,745 +cheek pull,745 +braixen,745 +air groove (umamusume),745 +pegasus knight uniform (fire emblem),744 +pegasus,744 +lizard,744 +lisbeth,744 +latex legwear,744 +inu (aerodog),744 +grand piano,744 +crescent print,744 +calem (pokemon),744 +thick lips,743 +strike the blood,743 +star tattoo,743 +sol badguy,743 +polka dot shirt,743 +kopaka (karda nui),743 +holding ladle,743 +blue border,743 +bb (swimsuit mooncancer) (second ascension) (fate),743 +abigail williams (swimsuit foreigner) (third ascension) (fate),743 +mossacannibalis,742 +mika pikazo,742 +juice,742 +halcon,742 +floating cape,742 +fatal frame,742 +dog penis,742 +dialogue box,742 +breast cutout,742 +purple horns,741 +pagoda,741 +oversized limbs,741 +nelson (kancolle),741 +naoe riki,741 +live a hero,741 +holomyth,741 +hand mirror,741 +god eater burst,741 +beetle,741 +zheng,740 +splattershot (splatoon),740 +shashaki,740 +ryusei hashida,740 +rita mordio,740 +prostration,740 +nanaroku (fortress76),740 +log horizon,740 +frilled leotard,740 +fish boy,740 +plume,739 +platelet (hataraku saibou),739 +kurou (quadruple zero),739 +jigoku shoujo,739 +white day,738 +takane lui,738 +niliu chahui,738 +momiji mao,738 +messy room,738 +kuki shinobu,738 +korra,738 +kanabun,738 +flower over eye,738 +floating scarf,738 +yuudachi (azur lane),737 +taokaka,737 +okobo,737 +kasuga yukihito,737 +japan,737 +futari wa precure splash star,737 +tomimi (arknights),736 +photorealistic,736 +nintendo 3ds,736 +logo parody,736 +kotoba noriaki,736 +kawakami rokkaku,736 +kairi (kingdom hearts),736 +gokotai,736 +dead master,736 +yuigaoka school uniform,735 +yamu (reverse noise),735 +wooden chair,735 +tofuubear,735 +ssss.dynazenon,735 +sorimachi-doufu,735 +sonia nevermind,735 +popped button,735 +neck tassel,735 +manya (dq4),735 +height chart,735 +flower bracelet,735 +sitting backwards,734 +sajo yukimi,734 +rena erindel,734 +mifune miyu,734 +kido tsubomi,734 +kawata hisashi,734 +forced orgasm,734 +acerola (pokemon),734 +king halo (umamusume),733 +implied fellatio,733 +ichikawa hinana,733 +gerudo,733 +ferret,733 +erubo,733 +bazett fraga mcremitz,733 +winding key,732 +shoukaku (azur lane),732 +shokudaikiri mitsutada,732 +saki chisuzu,732 +pink fur,732 +mount fuji,732 +iori (blue archive),732 +head on chest,732 +dock,732 +behind back,732 +x-shaped pupils,731 +world cup,731 +wizard,731 +whoosaku,731 +super creek (umamusume),731 +subterranean animism,731 +soviet,731 +sauna,731 +onsoku inu,731 +narita brian (umamusume),731 +levitation,731 +jeanne d'arc (swimsuit archer) (first ascension) (fate),731 +hungry,731 +game boy,731 +crowbar,731 +seki (red shine),730 +rune factory,730 +regina (dokidoki! precure),730 +minotaur,730 +michael f91,730 +mami mogu mogu,730 +drag-on dragoon,730 +ameyama denshin,730 +ako (blue archive),730 +youjo senki,729 +x,729 +sailor saturn,729 +miyase mahiro,729 +ivy,729 +clock eyes,729 +cardigan around waist,729 +torture,728 +swire (arknights),728 +ring gag,728 +pneuma (xenoblade),728 +kimmy77,728 +jetpack,728 +eirika (fire emblem),728 +cloth,728 +beam,728 +yamamura sadako,727 +steak,727 +sousou (sousouworks),727 +sakimori (hououbds),727 +radish,727 +okazaki tomoya,727 +master 3 (housamo),727 +infinity,727 +huyumitsu,727 +ear protection,727 +dragon ball super broly,727 +print mug,726 +pixiv fantasia t,726 +okita sougo,726 +momo no sukebe,726 +ingrid brandl galatea,726 +hirose koichi,726 +head on hand,726 +diesel-turbo,726 +cluseller,726 +agent aika,726 +white armor,725 +pov across table,725 +nigou,725 +knotted penis,725 +kaine (nier),725 +hasumi (blue archive),725 +dress swimsuit,725 +book hug,725 +berry (pokemon),725 +agrias oaks,725 +shinapuu,724 +shimmer,724 +puppy,724 +pectoral focus,724 +mitya,724 +mikan (chipstar182),724 +gunblade,724 +green sclera,724 +tennouji rina,723 +st. louis (luxurious wheels) (azur lane),723 +sparkle print,723 +sekaiju no meikyuu 1,723 +natori (kancolle),723 +mary (ib),723 +extra faces,723 +carmilla (fate),723 +body armor,723 +ad,723 +sleeve grab,722 +skull necklace,722 +pussy juice drip through clothes,722 +nearl (arknights),722 +minato ojitan,722 +mikko (girls und panzer),722 +matoba risa,722 +kogitsunemaru,722 +bear tail,722 +ammunition pouch,722 +wakan tanka,721 +tsuji santa,721 +small-clawed otter (kemono friends),721 +moyoron,721 +mast,721 +dragon boy,721 +diving,721 +animal ear headwear,721 +tokyo revengers,720 +taihou (forbidden feast) (azur lane),720 +solid eyes,720 +numbers (nanoha),720 +nitocris (swimsuit assassin) (fate),720 +meltryllis (swimsuit lancer) (second ascension) (fate),720 +joy ride,720 +strapless bottom,719 +pokoten (pokoten718),719 +lina inverse,719 +kuzuryuu kennosuke,719 +en'en no shouboutai,719 +douluo dalu,719 +white sarong,718 +tsukumo (soar99),718 +shin (new),718 +osiimi,718 +oonusa,718 +lavinia whateley (fate),718 +kiwi (fruit),718 +hitodama print,718 +french flag,718 +cream on face,718 +akaboshi koume,718 +wrong feet,717 +white rabbit (alice in wonderland),717 +vsinger,717 +vane (granblue fantasy),717 +sickle,717 +seijun,717 +pixiv fantasia fallen kings,717 +ingo (pokemon),717 +hori (hori no su),717 +yurigaoka girls academy school uniform,716 +two-tone bow,716 +satanichia kurumizawa mcdowell,716 +nogi takayoshi,716 +model kit,716 +helltaker (character),716 +gate - jieitai ka no chi nite kaku tatakaeri,716 +eunectes (arknights),716 +cure heart,716 +condom box,716 +setsuna f. seiei,715 +pekeko (pepekekeko),715 +noble witches,715 +mitre,715 +hanami,715 +fujisaki hikari,715 +bouncing ass,715 +blue sarong,715 +battleship,715 +yamato (one piece),714 +marumikan,714 +kuro suto sukii,714 +keizoku school uniform,714 +isobe noriko,714 +inami mahiru,714 +enemy naval mine (kancolle),714 +charlotte (anime),714 +celes chere,714 +track uniform,713 +silver dress,713 +pink eyeshadow,713 +kishida shiki,713 +9law,713 +serial experiments lain,712 +ryuu ga gotoku (series),712 +oshino ougi,712 +narita taishin (umamusume),712 +meta knight,712 +merlin (fate/prototype),712 +jill stingray,712 +finana ryugu,712 +evil,712 +cure rhythm,712 +40hara,712 +solid circle pupils,711 +shell necklace,711 +polka dot ribbon,711 +paul bunyan (fate),711 +mudou eichi,711 +kinkymation,711 +green socks,711 +ganbare douki-chan,711 +food-themed earrings,711 +flower necklace,711 +filia (skullgirls),711 +epic seven,711 +yellow sky,710 +sitting on bench,710 +shared object insertion,710 +maid day,710 +kazami yuuka (pc-98),710 +jockstrap,710 +hospital gown,710 +holding cane,710 +himajin noizu,710 +guard rail,710 +goldenglow (arknights),710 +curiosities of lotus asia,710 +testicle grab,709 +rou-kyuu-bu!,709 +nikka edvardine katajainen,709 +naomi (girls und panzer),709 +kojima saya,709 +flower tattoo,709 +clothesline,709 +arona (blue archive),709 +yamper,708 +t2r,708 +sirius (scorching-hot seirios) (azur lane),708 +onegai my melody,708 +mmm threesome,708 +kikuzuki (kancolle),708 +haruno haruka,708 +green scrunchie,708 +elizabeth bathory (first ascension) (fate),708 +elesis (elsword),708 +butterfly sitting,708 +brown tail,708 +box of chocolates,708 +bikini shorts,708 +ankle grab,708 +projekt red (arknights),707 +pomu rainpuff,707 +me-tan,707 +inazuma eleven go galaxy,707 +fujimaru ritsuka (male) (polar chaldea uniform),707 +toudou yurika,706 +throwing knife,706 +spanish commentary,706 +sd gundam,706 +rei (rei's room),706 +muririn,706 +hair between breasts,706 +cross-laced legwear,706 +crewmate (among us),706 +cleave gag,706 +boushi-ya,706 +rei no pool,705 +nora higuma,705 +mother 3,705 +mash kyrielight (swimsuit of perpetual summer),705 +hunter (bloodborne),705 +hagoromo lala,705 +chocobo,705 +belldandy,705 +swastika,704 +sf-a2 miki,704 +sewing,704 +seal (animal),704 +reisen (touhou bougetsushou),704 +nikaidou saki,704 +mat,704 +kariya masaki,704 +hoshino ruri,704 +galaxy,704 +eye black,704 +culter,704 +army,704 +team fortress 2,703 +shiratama (shiratamaco),703 +multicolored stripes,703 +lowleg skirt,703 +kotobuki (tiny life),703 +ancesra,703 +alolan vulpix,703 +swordsouls,702 +possessed,702 +licking foot,702 +isolated island oni,702 +hook,702 +gundam build divers re:rise,702 +g (genesis1556),702 +uneven twintails,701 +sock pull,701 +sion eltnam atlasia,701 +sainan high school uniform,701 +pumpkin hat,701 +muuran,701 +mokufuu,701 +leather pants,701 +eretto,701 +dragonfly,701 +dinergate (girls' frontline),701 +dark sakura,701 +caliburn (fate),701 +aoi (annbi),701 +acorn,701 +yellow tank top,700 +torriet,700 +reinforce,700 +orange vest,700 +on motorcycle,700 +mosho,700 +mae (maesanpicture),700 +heaven's feel,700 +hands on ground,700 +fat rolls,700 +expression chart,700 +character hair ornament,700 +box art,700 +abe kanari,700 +salad,699 +roboco-san,699 +pink sky,699 +norasuko,699 +error musume,699 +year of the pig,698 +touwa erio,698 +touching,698 +totokichi,698 +sazaki ichiri,698 +platinum the trinity,698 +perth (kancolle),698 +osashin (osada),698 +no-kan,698 +miracle mallet,698 +kuzuha (nijisanji),698 +iwashi dorobou -r-,698 +ichinose kotomi,698 +holding own foot,698 +heracles (fate),698 +harusame (unmei no ikasumi),698 +cool-kyou shinja,698 +between toes,698 +aqua neckerchief,698 +above clouds,698 +zuikaku (azur lane),697 +triplets,697 +takumi (fire emblem),697 +shiina mayuri,697 +mashiro yukiya,697 +kabocha torute,697 +jun (seojh1029),697 +breath of fire ii,697 +absolutely everyone,697 +whiskey,696 +vacuum cleaner,696 +time bokan (series),696 +soukou akki muramasa,696 +rabbit costume,696 +mosha,696 +leonardo da vinci (rider) (fate),696 +greek clothes,696 +crawling,696 +buruma aside,696 +at classics,696 +tomioka giyuu,695 +sussurro (arknights),695 +suicide,695 +prototype,695 +okinami (kancolle),695 +i-13 (kancolle),695 +hong (white spider),695 +holding letter,695 +grey bodysuit,695 +genyaky,695 +zucchini,694 +utsumi erice,694 +sadism,694 +martial arts belt,694 +lace choker,694 +knight (chess),694 +kirisawa juuzou (character),694 +cherry hair ornament,694 +akita neru,694 +affectionate,694 +yusa kozue,693 +turn a gundam,693 +titan (shingeki no kyojin),693 +pool of blood,693 +pixiv username,693 +ooarai (emblem),693 +nakasu kasumi,693 +kyuudou,693 +kawashima ami,693 +inkwell,693 +igarashi kyou (eroe),693 +hoshino (girls und panzer),693 +homu,693 +satou shouji,692 +makai senki disgaea 2,692 +long earlobes,692 +kagematsuri,692 +tanya degurechaff,691 +shared speech bubble,691 +long tail,691 +kurosaki ichigo,691 +dream soul,691 +detached hair,691 +2k-tan,691 +zankuro,690 +yamada aoi,690 +waifu2x,690 +shinonome nano,690 +nyan,690 +kirlia,690 +iwakura lain,690 +holding head,690 +green eyeshadow,690 +foot hold,690 +fidgeting,690 +shishio chris,689 +sakimiya (inschool),689 +rhodes island logo,689 +kusugawa sasara,689 +italian text,689 +irako (kancolle),689 +gashi-gashi,689 +fukuda haru,689 +folded hair,689 +bunny hat,689 +aurora,689 +raichu,688 +kissing penis,688 +feldt grace,688 +deco (geigeki honey),688 +squall leonhart,687 +long toenails,687 +kurikara,687 +naked cloak,686 +melon22,686 +enma ai,686 +takimoto hifumi,685 +stheno (fate),685 +splatter,685 +severed limb,685 +parrot,685 +pants tucked in,685 +modare,685 +kisetsu,685 +in palm,685 +i.u.y,685 +grimoire of alice,685 +erika (pokemon),685 +crescent rose,685 +contrast,685 +yu-gi-oh! zexal,684 +vi (league of legends),684 +sumeragi kohaku,684 +stone stairs,684 +socha,684 +sen to chihiro no kamikakushi,684 +sei shounagon (fate),684 +mononoke hime,684 +k hiro,684 +isshi pyuma,684 +glove biting,684 +fishnet bodysuit,684 +cola,684 +alternate pectoral size,684 +orange cape,683 +multicolored hairband,683 +mimonel,683 +mewtwo,683 +implied yaoi,683 +fuya (tempupupu),683 +yuri sakazaki,682 +shoujo shuumatsu ryokou,682 +shindan maker,682 +sengoku musou,682 +semiramis (fate),682 +seiru (prairie),682 +satake minako,682 +roxie (pokemon),682 +mamerakkkkko,682 +kodomo no jikan,682 +hoshi san 3,682 +hair on horn,682 +checkered shirt,682 +black hanekawa,682 +1970s (style),682 +zun (style),681 +sora no otoshimono,681 +pochi (pochi-goya),681 +yosuga no sora,680 +star fox,680 +roke (taikodon),680 +puru-see (hoshizuki (seigetsu)),680 +oberon (fate),680 +kneesocks (psg),680 +ibuki tsubasa,680 +holding another's hair,680 +do m kaeru,680 +blue tank top,680 +tsukimiya ayu,679 +sweaty clothes,679 +otokura yuuki,679 +kickboard,679 +jabara tornado,679 +holding jewelry,679 +fuzichoco,679 +fujimaru ritsuka (female) (polar chaldea uniform),679 +bulletin board,679 +sophie (tales),678 +self hug,678 +oil,678 +multiple moles,678 +masu,678 +lyre,678 +heart-shaped eyes,678 +grandia,678 +facebook username,678 +black heart,678 +arataki itto,678 +watching television,677 +umishima senbon,677 +ryugasaki rene,677 +raptor7,677 +neko majin,677 +musical note print,677 +mole on cheek,677 +kuradoberi jam,677 +konpeto,677 +gasai yuno,677 +g41 (girls' frontline),677 +aragaki shinjirou,677 +yumeno himiko,676 +yappen,676 +kaga3chi,676 +hair lift,676 +fingersmile,676 +asbel lhant,676 +tenjou utena,675 +single epaulette,675 +sand cat (kemono friends),675 +princess of moonbrook,675 +mysterious heroine x alter (first ascension) (fate),675 +konno tohiro,675 +kawakami mai,675 +hibiki (zerocodo),675 +broken window,675 +afterglow,675 +tying,674 +the king of fighters xiv,674 +sage (dq3),674 +no mask,674 +morgana (persona 5),674 +mochizuki anna,674 +fumitan (humitan),674 +falling feathers,674 +bokura wa ima no naka de,674 +symbolism,673 +shun (blue archive),673 +procreate (medium),673 +meta,673 +kanaria,673 +joystick,673 +izayoi liko,673 +implied fingering,673 +hirano katsuyuki,673 +hentai ouji to warawanai neko.,673 +h&k ump,673 +gomtang,673 +cum in container,673 +yasuyuki,672 +two-sided dress,672 +trafalgar law,672 +tendou akane,672 +solid snake,672 +roll caskett (mega man),672 +multiple drawing challenge,672 +koumajou densetsu,672 +kamen rider w,672 +heart on chest,672 +donkey kong (series),672 +adjusting necktie,672 +yuri (doki doki literature club),671 +watercolor pencil (medium),671 +the iron of yin and yang,671 +pink-tinted eyewear,671 +perfume bottle,671 +green tail,671 +grecale (kancolle),671 +elysia (honkai impact),671 +daiaru,671 +covering one breast,671 +syrup,670 +silence girl,670 +punishing: gray raven,670 +pendulum,670 +nijimura okuyasu,670 +nakagawa natsuki,670 +guardian tales,670 +cheria barnes,670 +candlelight,670 +bb (swimsuit mooncancer) (third ascension) (fate),670 +bag of chips,670 +yuusha to maou,669 +track and field,669 +soap bottle,669 +shrimp tempura,669 +ribbon-trimmed headwear,669 +purple ascot,669 +perfect cherry blossom,669 +no jacket,669 +natsuki teru,669 +makuran,669 +lance (pokemon),669 +himekawa yuki,669 +hand over eye,669 +duel disk,669 +double horizontal stripe,669 +digital dissolve,669 +cast,669 +black clothes,669 +behind-the-head headphones,669 +zen33n,668 +wixoss,668 +toutetsu yuuma,668 +sister cleaire,668 +propeller hair ornament,668 +mutual masturbation,668 +moose ears,668 +mogami shizuka,668 +lapras,668 +kuroboshi kouhaku,668 +curled fingers,668 +corrupted metadata,668 +calligraphy brush (medium),668 +bikini bottom removed,668 +arthropod limbs,668 +yuzuna99,667 +sky print,667 +seishou elementary school uniform,667 +hanasaku iroha,667 +chamaji,667 +atdan,667 +alena (dq4),667 +world is mine (vocaloid),666 +vietnamese dress,666 +shamoji,666 +sekaiju no meikyuu 3,666 +pointing at another,666 +panicking,666 +moke ro,666 +itou yuuji,666 +fujimura taiga,666 +froggy nun (diva),666 +blue-tinted eyewear,666 +black keys (type-moon),666 +baby bottle,666 +shinshuu maru (kancolle),665 +ryuzaki kaoru,665 +le malin (listless lapin) (azur lane),665 +holding skull,665 +hardhat,665 +gun to head,665 +futatsuki hisame,665 +focused,665 +23 (real xxiii),665 +warrior,664 +tokyo-3 middle school uniform,664 +takana shinno,664 +shotgun shell,664 +sakata kintoki (fate),664 +rhea (fire emblem),664 +nose ring,664 +noripachi,664 +mordred (swimsuit rider) (fate),664 +military helmet,664 +michairu,664 +joestar birthmark,664 +bike shorts under shorts,664 +abmayo,664 +tooth,663 +team skull,663 +sling bikini top,663 +scylla,663 +satou kuuki,663 +nanashiro gorou,663 +matsuda (matsukichi),663 +kasuga ayumu,663 +jirachi,663 +jason (kaiten kussaku kikou),663 +hoshimiya kate,663 +hinata channel,663 +hasegawa kobato,663 +golden snub-nosed monkey (kemono friends),663 +cure sword,663 +baffu,663 +yanagi (nurikoboshi),662 +studded collar,662 +shikushiku (amamori weekly),662 +rikatan,662 +qr code,662 +qp:flapper,662 +new jersey (azur lane),662 +milky way,662 +kishinami (kancolle),662 +juurouta,662 +fukuji mihoko,662 +thoma (genshin impact),661 +kumiko shiba,661 +kasuga maru (kancolle),661 +kaiga,661 +igote,661 +first aid kit,661 +electric plug,661 +crumbs,661 +breaking,661 +walking on liquid,660 +very wide shot,660 +suisei no gargantia,660 +selvaria bles,660 +otonashi yuzuru,660 +nekomiya hinata,660 +nahaki,660 +muzzle flash,660 +kobayashi chisato,660 +hospital,660 +groom,660 +getou suguru,660 +dopey (dopq),660 +breast milk,660 +boota (ttgl),660 +arc the lad,660 +hitotsuki nebura,659 +goto p,659 +floating book,659 +cat bag,659 +animare,659 +shinshin,658 +riza hawkeye,658 +natsu (anta tte hitoha),658 +mega man battle network,658 +male hand,658 +koi,658 +ilulu (maidragon),658 +hashitsuki nata,658 +fist bump,658 +dekomori sanae,658 +armpit sex,658 +school days,657 +pink umbrella,657 +ness (mother 2),657 +kotegawa yui,657 +kaze no tani no nausicaa,657 +jacket over swimsuit,657 +incoming kiss,657 +flapping,657 +easter,657 +yukoku kiriko,656 +yuki arare,656 +yuel (granblue fantasy),656 +single ear cover,656 +sako (bosscoffee),656 +pokemon usum,656 +makicha (sasurainopink),656 +grey wings,656 +blob,656 +akuma no riddle,656 +whip marks,655 +sunnysideup,655 +ruffle compatible,655 +poronegi,655 +pineapple,655 +goat,655 +cyrillic,655 +crane (animal),655 +bust cup,655 +bison cangshu,655 +age comparison,655 +spade hair ornament,654 +skeleton print,654 +new horizon,654 +kyoukai no kanata,654 +gesugao,654 +crazy straw,654 +claymore,654 +blood bag,654 +aono miki,654 +ak-47,654 +wu zetian (fate),653 +testicle sucking,653 +teeth hold,653 +taimanin yukikaze,653 +ryuusei (mark ii),653 +neki (wakiko),653 +marshmallow,653 +just the tip,653 +chikan,653 +camouflage headwear,653 +barbariank,653 +amestris military uniform,653 +unconventional media,652 +suletta mercury,652 +slim legs,652 +shope,652 +scheherazade (fate),652 +orihara izaya,652 +medallion,652 +kashu (hizake),652 +jungle,652 +holding money,652 +groceries,652 +golden arms,652 +fennekin,652 +doraemon (character),652 +diavolo,652 +yamakasa,651 +mew,651 +kobayashi (maidragon),651 +ishikkoro,651 +implied masturbation,651 +guin guin,651 +erza scarlet,651 +copy ability,651 +bound together,651 +blue shawl,651 +bathrobe,651 +white bird,650 +shinoasa,650 +shimokirin,650 +sheffield (azur lane),650 +nakajima yuka,650 +manatsu no yo no inmu,650 +kano shuuya,650 +ingrid (capcom),650 +finger sucking,650 +craft essence (fate),650 +chikuwa.,650 +bowler hat,650 +blood on arm,650 +uehara himari,649 +saren (summer) (princess connect!),649 +sans,649 +ryuuichi (f dragon),649 +pokemon bw (anime),649 +pelt,649 +jude mathis,649 +higashikata josuke (jojolion),649 +ganondorf,649 +fuuzasa,649 +food art,649 +fiery wings,649 +afuro terumi,649 +yes! precure 5 gogo!,648 +popqn,648 +pop-up story,648 +persona 4: the ultimate in mayonaka arena,648 +neru (blue archive),648 +misumi (niku-kyu),648 +kiyama satoshi,648 +ink (medium),648 +igarashi kyoko,648 +happiness!,648 +gold choker,648 +cat mask,648 +blue poison (shoal beat) (arknights),648 +artoria caster (second ascension) (fate),648 +9a-91 (girls' frontline),648 +lili (tekken),647 +gorou (genshin impact),647 +god eater 2: rage burst,647 +exposed pocket,647 +dissolving clothes,647 +amami rantarou,647 +wakamezake,646 +two-sided skirt,646 +sugimoto saichi,646 +queen elizabeth (azur lane),646 +mizuryu kei,646 +maya kai ni (kancolle),646 +katagiri sanae,646 +jean bart (azur lane),646 +iruma miu,646 +chen (cat),646 +checkered dress,646 +ar tonelico ii,646 +yatadera narumi,645 +yamashichi (mtseven),645 +volcano,645 +veiny arms,645 +uchi emiri,645 +tamagoyaki,645 +ma rukan,645 +jessica albert,645 +honey strap,645 +chocola (nekopara),645 +biwa lute,645 +ballpoint pen (medium),645 +arle nadja,645 +weasel ears,644 +tomoe gozen (swimsuit saber) (fate),644 +through screen,644 +suzuki jun,644 +star ocean anamnesis,644 +spider-man,644 +sitting on shoulder,644 +sheik,644 +rifyu,644 +no legs,644 +nakoruru,644 +mefomefo,644 +ejami,644 +duckling,644 +youtube,643 +unfastened,643 +tumblr sample,643 +potara earrings,643 +nunnally lamperouge,643 +mermaid melody pichi pichi pitch,643 +love hina,643 +jeanne d'arc (granblue fantasy),643 +hakurei reimu (pc-98),643 +blazblue: chronophantasma,643 +at2.,643 +animal slippers,643 +wrist bow,642 +suminagashi,642 +sanada akihiko,642 +print sleeves,642 +power connection,642 +platform boots,642 +ogata hyakunosuke,642 +mei (maysroom),642 +kuro chairo no neko,642 +ghost pose,642 +galko,642 +fukawa touko,642 +blue bandana,642 +webbed hands,641 +vampy,641 +usekh collar,641 +simoun,641 +necktie removed,641 +lielos,641 +klan klein,641 +kamizono (spookyhouse),641 +gwen stacy,641 +zyugoya,640 +utage (arknights),640 +torisan,640 +toe-point,640 +takiki,640 +sonohara anri,640 +soldier (dq3),640 +shrugging,640 +ousaka shizuku,640 +hogwarts school uniform,640 +brown flower,640 +bococho,640 +bad yandere id,640 +underbutt,639 +stile uniform,639 +single hair ring,639 +ray gun,639 +protecting,639 +patchwork clothes,639 +kuga natsuki,639 +kame (kamepan44231),639 +hand wraps,639 +chef,639 +cellphone charm,639 +bc freedom school uniform,639 +wing ears,638 +uni (neptune series),638 +shell hair ornament,638 +rewrite,638 +pubic stubble,638 +poison (final fight),638 +pixelated,638 +phoenix,638 +otaku,638 +luma (mario),638 +koutaro,638 +kiyohime (swimsuit lancer) (fate),638 +exploding clothes,638 +euphemia li britannia,638 +azusa (blue archive),638 +wild arms 2,637 +tama (tamago),637 +re:creators,637 +licking ear,637 +kamen rider revice,637 +hei (darker than black),637 +exercise ball,637 +dirt,637 +cutlass (girls und panzer),637 +clownfish,637 +checkered legwear,637 +arano oki,637 +yuzu momo,636 +yozora mel,636 +yamabukiiro,636 +varia suit,636 +uni8,636 +roundel,636 +optimus prime,636 +nanoless,636 +holding beachball,636 +head,636 +gentsuki,636 +flying teardrops,636 +floating weapon,636 +eldridge (azur lane),636 +character age,636 +avengers (series),636 +amazon (dragon's crown),636 +siro (dennou shoujo youtuber siro),635 +senomoto hisashi,635 +out of character,635 +one-piece thong,635 +nihonga,635 +morgiana,635 +midway princess,635 +mibu natsuki,635 +isshiki iroha,635 +frilled sailor collar,635 +formation girls,635 +flat chest grab,635 +dennou shoujo youtuber siro,635 +stitched mouth,634 +soft serve,634 +nail bat,634 +mel (melty pot),634 +lucy (cyberpunk),634 +givuchoko,634 +eel,634 +darkmaya,634 +capriccio,634 +aikawa ryou,634 +roy mustang,633 +rape face,633 +otter tail,633 +necro (nekurodayo),633 +namaniku atk,633 +kamio reiji (yua),633 +kairunoburogu,633 +greenteaneko,633 +gravity daze,633 +caryo,633 +t-head trainer,632 +shimaidon (sex),632 +nakatani nio,632 +multiple torii,632 +meisho doto (umamusume),632 +kirisame marisa (cosplay),632 +izuna (blue archive),632 +dikko,632 +converse,632 +uranaishi (miraura),631 +two-finger salute,631 +takasu ryuuji,631 +shogi,631 +saijo juri,631 +rpk-16 (girls' frontline),631 +red tail,631 +object on breast,631 +negev (girls' frontline),631 +mizuumi (bb),631 +minazuki karen,631 +maki (seventh heaven maxion),631 +kaizuka inaho,631 +hair color connection,631 +eris greyrat,631 +cracked floor,631 +xiujia yihuizi,630 +starlight academy uniform,630 +sitting on table,630 +single wrist cuff,630 +shnva,630 +queen's gate,630 +negative space,630 +little red riding hood (grimm) (cosplay),630 +hyakumantenbara salome,630 +gekota,630 +blue pupils,630 +bee,630 +barry (pokemon),630 +riven (league of legends),629 +restroom,629 +licking breast,629 +kanshou & bakuya (fate),629 +hatake kakashi,629 +gmkj,629 +cattail,629 +welt (kinsei koutenkyoku),628 +sonson (eleven),628 +seatbelt,628 +nagase haruhito,628 +missile pod,628 +mishaguji,628 +harano,628 +erere,628 +chi-class torpedo cruiser,628 +chara (undertale),628 +braiding hair,628 +yellow-tinted eyewear,627 +year of the rooster,627 +wakaba (kancolle),627 +used condom on penis,627 +tambourine,627 +tail around leg,627 +rengoku kyoujurou,627 +originium arts (arknights),627 +okita j. souji (fate),627 +medusa (lancer) (fate),627 +measuring,627 +komone ushio,627 +gensoukoumuten,627 +flail,627 +cura,627 +beretta 92,627 +before and after,627 +tonari no totoro,626 +stuffed penguin,626 +single head wing,626 +pizza box,626 +nottytiffy,626 +nipple pull,626 +matsukaze tenma,626 +lunamaria hawke,626 +hizuki akira,626 +yuuno scrya,625 +whislash (arknights),625 +waving arms,625 +sailor chibi moon,625 +renkin san-kyuu magical pokaan,625 +person on head,625 +oboro muramasa,625 +mayo riyo,625 +kyouda suzuka,625 +ken masters,625 +horseshoe,625 +hayashi custom,625 +bunny day,625 +alternate hair ornament,625 +sixteenth note,624 +purple-tinted eyewear,624 +nameo (judgemasterkou),624 +midriff sarashi,624 +leone abbacchio,624 +lance of longinus,624 +kaname madoka (cosplay),624 +flcl,624 +akashio (loli ace),624 +ui (blue archive),623 +tama (tamatamo),623 +purple socks,623 +ohta yuichi,623 +nijiura maids,623 +misu kasumi,623 +mikoyan,623 +mega man zx,623 +masayo (gin no ame),623 +male underwear pull,623 +front zipper swimsuit,623 +bolt,623 +blue umbrella,623 +bacius,623 +uni mate,622 +toyokawa fuka,622 +silva (granblue fantasy),622 +nekobungi sumire,622 +mushi024,622 +magallan (arknights),622 +lolibaba,622 +liang xing,622 +jeanne d'arc (swimsuit archer) (second ascension) (fate),622 +fu-ta,622 +dragon ball gt,622 +vahn yourdoom,621 +tsuyuri kanao,621 +table sex,621 +super pochaco,621 +soraka (league of legends),621 +rough sex,621 +military operator,621 +keebo,621 +hoodie lift,621 +decapitation,621 +crotch grab,621 +compass,621 +ushizaki urumi,620 +tekken 7,620 +spider-man: into the spider-verse,620 +side-tie peek,620 +scoop neck,620 +rioshi,620 +raven (dc),620 +pig ears,620 +nagare,620 +mikozin,620 +mejiro dober (umamusume),620 +knives between fingers,620 +john doe,620 +fine motion (umamusume),620 +aegir (azur lane),620 +utsugi yuuki,619 +udagawa tomoe,619 +kurokote,619 +invisible man,619 +greninja,619 +germany,619 +fanning self,619 +drugged,619 +chandelure,619 +yukichi (eikichi),618 +visual novel,618 +split theme,618 +shugo chara!,618 +roxy migurdia,618 +plaid ribbon,618 +pinstripe suit,618 +nishimata aoi,618 +nagami yuu,618 +multitasking,618 +micro bra,618 +kobayakawa rinko,618 +gao,618 +erwin (girls und panzer),618 +black clover,618 +bat girl,618 +bamboo blade,618 +weapon bag,617 +warning sign,617 +small nipples,617 +sewayaki kitsune no senko-san,617 +rabbit on head,617 +paw print pattern,617 +otter,617 +miwa shirow,617 +loaded interior,617 +hyrule warriors,617 +frog girl,617 +camonome,617 +bridal legwear,617 +zuwai kani,616 +vanilla (nekopara),616 +tracen swimsuit,616 +sen (astronomy),616 +michii yuuki,616 +identity v,616 +grey leotard,616 +cuboon,616 +bigrbear,616 +arms between legs,616 +apartment,616 +against railing,616 +wokada,615 +samson (skullgirls),615 +midori (blue archive),615 +meimaru inuchiyo,615 +macross 7,615 +karyl (summer) (princess connect!),615 +infinote,615 +homura subaru,615 +flonne,615 +centauroid,615 +butz klauser,615 +alice cartelet,615 +ysys,614 +shamal,614 +saigusa haruka,614 +neck garter,614 +karory,614 +i-no,614 +hyuuga azuri,614 +energy beam,614 +carapace,614 +stuffed dog,613 +orange jumpsuit,613 +nakamura regura,613 +momoi (blue archive),613 +magical boy,613 +listening to music,613 +lifebuoy ornament,613 +kumadano,613 +kotatsu (g-rough),613 +hand tattoo,613 +half-skirt,613 +gold necklace,613 +front-print panties,613 +emoticon,613 +destroyer princess,613 +colored stripes,613 +brown corset,613 +breasts on table,613 +bismarck (azur lane),613 +atelier totori,613 +white mask,612 +mizutani eri,612 +food on breasts,612 +felyne,612 +dilation tape,612 +brand new animal,612 +youkoso jitsuryoku shijou shugi no kyoushitsu e,611 +wide face,611 +vyrn (granblue fantasy),611 +tsurui,611 +sora no kiseki,611 +shirasagi chisato,611 +runny makeup,611 +piyokichi,611 +minase nayuki,611 +melia antiqua,611 +kingdom hearts ii,611 +ishuzoku reviewers,611 +hollow eyes,611 +fingering through panties,611 +cum on glasses,611 +casing ejection,611 +can't be this cute,611 +bathhouse,611 +u u zan,610 +tribal tattoo,610 +shibuya kanon,610 +saint quartz (fate),610 +hayasaka mirei,610 +haoni,610 +ditto,610 +crocodilian,610 +censored text,610 +caro ru lushe,610 +aster crowley,610 +african wild dog (kemono friends),610 +valkyrie profile,609 +toddlercon,609 +takaharu,609 +sukja,609 +sayo samonji,609 +penis to breast,609 +nii manabu,609 +magic knight rayearth,609 +imperishable night,609 +elise (fire emblem),609 +aile (crossroads),609 +sato shin,608 +pancake stack,608 +mikazuki yozora,608 +gray (fate),608 +emotionless sex,608 +cian yo,608 +uzuki sayaka,607 +paizuri invitation,607 +natsuki (doki doki literature club),607 +matsubara kanon,607 +kawayabug,607 +broken armor,607 +temple,606 +spider-gwen,606 +razor (genshin impact),606 +pastry bag,606 +kittysuit,606 +jashin-chan dropkick,606 +irene (arknights),606 +hekiga (freelot),606 +green belt,606 +floating clothes,606 +chloe (princess connect!),606 +black survival,606 +asuna (sao-alo),606 +astolfo (saber) (fate),606 +asahina aoi,606 +announcement celebration,606 +animal on hand,606 +amane hasuhito,606 +paper stack,605 +milotic,605 +mikumo guynemer,605 +honoka (doa),605 +hisasi,605 +hima,605 +high tops,605 +hanzou,605 +hacka doll 3,605 +graf eisen,605 +gilgamesh (caster) (fate),605 +gae dearg (fate),605 +fanbox username,605 +arrancar,605 +wet towel,604 +soldier: 76 (overwatch),604 +pearl earrings,604 +musket,604 +majin android 21,604 +kurona,604 +kashino (hot springs relaxation) (azur lane),604 +isometric,604 +crane game,604 +clothes pin,604 +sankuma,603 +rinne no lagrange,603 +powerpuff girls z,603 +pov doorway,603 +paint splatter on face,603 +mazinger (series),603 +implied kiss,603 +goutokuji mike,603 +dr.p,603 +determined,603 +cinderace,603 +chomusuke,603 +cauldron,603 +yellow one-piece swimsuit,602 +wiz (konosuba),602 +takamiya rion,602 +sakura (fire emblem),602 +okusawa misaki,602 +matilda fiship,602 +ittokyu,602 +hanna-justina marseille,602 +genzoman,602 +embroidery,602 +chai (drawingchisanne),602 +yoshizawa kasumi,601 +rui (sugar3),601 +roots,601 +rin yuu,601 +prison,601 +nishii (nitroplus),601 +livestream,601 +kotobuki (momoko factory),601 +kamisimo 90,601 +infirmary,601 +iggy (jojo),601 +chocolate cake,601 +brown neckerchief,601 +yuuri (shoujo shuumatsu ryokou),600 +wallpaper (object),600 +selection university military uniform,600 +saigado,600 +roki (hirokix),600 +powerpuff girls,600 +metroid (creature),600 +kumada masaru,600 +kokudou juunigou,600 +graduation,600 +gladiator sandals,600 +2006,600 +yorha no. 2 type b (cosplay),599 +walther,599 +two-tone leotard,599 +syuri22,599 +shirotsumekusa,599 +saw,599 +prosciutto,599 +iwbitu,599 +fuuma nagi,599 +engrish commentary,599 +crypto (apex legends),599 +bear panties,599 +yoshitomi akihito,598 +velvet crowe,598 +triangle hair ornament,598 +travel attendant,598 +throat microphone,598 +rainbow background,598 +phosphophyllite (ll),598 +pavolia reine,598 +oda ken'ichi,598 +nedia (nedia region),598 +mia (fire emblem),598 +jolteon,598 +grisaia (series),598 +greatmosu,598 +cthulhu mythos,598 +yumizuka satsuki,597 +wrestle angels,597 +tsukiyo (skymint),597 +saya no uta,597 +sairenji haruna,597 +rui shi (rayze ray),597 +nippon professional baseball,597 +mozukuzu (manukedori),597 +la pluma (arknights),597 +j7w,597 +hooded dress,597 +freyja wion,597 +devil survivor,597 +assassin cross (ragnarok online),597 +smart falcon (umamusume),596 +simulated fellatio,596 +scalpel,596 +naganami kai ni (kancolle),596 +magician,596 +katarina (league of legends),596 +half mask,596 +energy cannon,596 +caracal (kemono friends),596 +atago (summer march) (azur lane),596 +stick poster,595 +shippou (pattern),595 +shannon (umineko),595 +omaru gyuunyuu,595 +noctis lucis caelum,595 +jibril (no game no life),595 +grimm's fairy tales,595 +gastly,595 +eas,595 +clawed gauntlets,595 +chameleon (ryokucha combo),595 +beijuu,595 +admire vega (umamusume),595 +yuto (dialique),594 +tron bonne (mega man),594 +tori (minamopa),594 +spaceship hair ornament,594 +silverash (arknights),594 +shimada chiyo,594 +raven (tales),594 +quick waipa,594 +panties over pantyhose,594 +musouzuki,594 +latias,594 +kirishima satoshi,594 +holding swimsuit,594 +hiten (hitenkei),594 +fuukadia (narcolepsy),594 +cropped sweater,594 +bottle miku,594 +blemishine (arknights),594 +arnest,594 +alina gray,594 +yokosuka girls marine high school uniform,593 +trolling,593 +tennis,593 +temari (naruto),593 +suzuka gozen (fate),593 +right-over-left kimono,593 +ricegnat,593 +raccoon,593 +polearm behind back,593 +pacific rim,593 +nyuu (manekin-eko),593 +muppo,593 +malenia blade of miquella,593 +long labia,593 +kotoyama,593 +hounori,593 +cure lovely,593 +ashiomi masato,593 +tolkien's legendarium,592 +sega,592 +pochincoff,592 +munakata (sekimizu kazuki),592 +minegumo (kancolle),592 +makabe mizuki,592 +kalina (girls' frontline),592 +implied anal,592 +congratulations,592 +yadokari genpachirou,591 +surfing,591 +sanrio,591 +rukuriri (girls und panzer),591 +proposal,591 +morinaka kazaki,591 +leaf background,591 +kitashirakawa chiyuri,591 +keqing (opulent splendor) (genshin impact),591 +judith (tales),591 +i-14 (kancolle),591 +high school dxd infinity,591 +hair over one breast,591 +gundam age,591 +gold footwear,591 +four-leaf clover hair ornament,591 +fading,591 +cookie run,591 +chacharan,591 +xenovia quarta,590 +sekiro: shadows die twice,590 +nuu (nu-nyu),590 +narumi arata,590 +nagioka,590 +legwear removed,590 +himesaka noa,590 +gundam g no reconguista,590 +chocolate making,590 +tokyo (city),589 +spicy nun (diva),589 +sasha braus,589 +perseus (azur lane),589 +nyong nyong,589 +masakichi (crossroad),589 +ainu,589 +...?,589 +super masara,588 +shigure ui (vtuber),588 +resident evil village,588 +penguin costume,588 +lisa lisa,588 +killua zoldyck,588 +kazemaru ichirouta,588 +hiradaira chisaki,588 +f-ism,588 +cupboard,588 +ayatsuji tsukasa,588 +akchu,588 +vrchat,587 +volleyball net,587 +toggles,587 +spas-12 (girls' frontline),587 +self exposure,587 +red tank top,587 +pyrrha nikos,587 +nagishiro mito,587 +m.m,587 +keyblade,587 +huge dildo,587 +holding chocolate,587 +fast food,587 +ett,587 +catsuit,587 +bekkankou,587 +argyle cutout,587 +wooloo,586 +symbol,586 +side-tie skirt,586 +poi,586 +metal boots,586 +matikanefukukitaru (umamusume),586 +marugoshi (54burger),586 +laundry basket,586 +kariyushi shirt,586 +gradient skin,586 +flailing,586 +e20,586 +tsubaki yayoi,585 +tony tony chopper,585 +tashkent (azur lane),585 +square enix,585 +sailor collar lift,585 +sachito,585 +morishima haruka,585 +frame arms girl,585 +flippers,585 +ellen baker,585 +drum magazine,585 +deutschland (azur lane),585 +combat knife,585 +bravely default (series),585 +ai ai gasa,585 +yuusha series,584 +wii fit,584 +walzrj,584 +triangular eyewear,584 +seeu,584 +sam browne belt,584 +rurouni kenshin,584 +o-ring swimsuit,584 +namamo nanase,584 +mari (blue archive),584 +lily black,584 +grey tank top,584 +glowing hand,584 +glock,584 +dashed eyes,584 +broken sword,584 +boa (brianoa),584 +anmi,584 +zero no kiseki,583 +wind turbine,583 +transparent censoring,583 +shuri (84k),583 +police car,583 +panda hood,583 +kama (first ascension) (fate),583 +fate/extra ccc fox tail,583 +dairoku ryouhei,583 +aurahack,583 +;(,583 +yuuki yuuna,582 +yatterman,582 +tony guisado,582 +tebi (tbd11),582 +soccer spirits,582 +purple tongue,582 +nanami yachiyo,582 +mukyuu,582 +jewelry removed,582 +honma himawari,582 +goggles removed,582 +crow (siranui),582 +chito (shoujo shuumatsu ryokou),582 +brown sleeves,582 +black coat (kingdom hearts),582 +baba (baba seimaijo),582 +asamiya athena,582 +artstation sample,582 +ange (princess principal),582 +y2,581 +teardrop tattoo,581 +tanashi (mk2),581 +sayu (genshin impact),581 +ryo (tg290),581 +ping hai (azur lane),581 +otoi rekomaru,581 +neoartcore,581 +kitten (gravity daze),581 +karutamo,581 +kanzuki karin,581 +janus (kancolle),581 +isaki (gomi),581 +holding own tail,581 +ghost in the shell stand alone complex,581 +flying paper,581 +yaneko uta,580 +village,580 +unfairr,580 +tanamachi kaoru,580 +shirosaki hana,580 +sakaguchi karina,580 +saitou masatsugu,580 +saga (arknights),580 +rope belt,580 +pantyhose under swimsuit,580 +multiple weapons,580 +kfc,580 +key (company),580 +hand rest,580 +folder,580 +doyouwantto,580 +douki-chan (douki-chan),580 +deer antlers,580 +cabbage,580 +sexual coaching,579 +scouter,579 +print swimsuit,579 +natsuya (kuttuki),579 +megurigaoka high school uniform,579 +lifting,579 +leo (fire emblem),579 +kuroyukihime,579 +iahfy,579 +firelock,579 +6u (eternal land),579 +yutakasan-love,578 +twintails day,578 +totodile,578 +ryosios,578 +litwick,578 +leaning on rail,578 +lane line,578 +korie riko,578 +invisible,578 +hina (swimsuit) (blue archive),578 +hands on another's chest,578 +gold bracelet,578 +game boy (original),578 +cash register,578 +berusuke (beru no su),578 +bc freedom (emblem),578 +zima (arknights),577 +zannen onna-kanbu black general-san,577 +purple cardigan,577 +natsume kyousuke,577 +lord knight (ragnarok online),577 +kyogoku shin,577 +kedama milk,577 +kan'u unchou,577 +jcm2,577 +ginga nakajima,577 +fitting room,577 +clarent (fate),577 +akasaai,577 +undefined fantastic object,576 +tajima yuuki,576 +shining (arknights),576 +saya (saya no uta),576 +royal penguin (kemono friends),576 +priest (dq3),576 +popsicle stick,576 +neta,576 +minato yukina,576 +kurot,576 +inuinui,576 +entangled,576 +whip sword,575 +temari ball,575 +stomping,575 +roto,575 +ribbon-trimmed bra,575 +pinned,575 +m16,575 +lansane,575 +jintsuu kai ni (kancolle),575 +ice cream float,575 +greem bang,575 +flower on head,575 +cumulonimbus cloud,575 +apple caramel,575 +yellow socks,574 +tro,574 +orca,574 +ogasawara sachiko,574 +octopath traveler,574 +morikura en,574 +mizuho (kancolle),574 +mercedes von martritz,574 +meadow,574 +justice gakuen,574 +azuma aya,574 +adsouto,574 +senbei,573 +railroad crossing,573 +ptilopsis (arknights),573 +nigo (aozoragarou),573 +narration,573 +kitahara tomoe (kitahara koubou),573 +kama (swimsuit avenger) (fate),573 +hitsukuya,573 +friend (nanashi mumei),573 +ef,573 +dodging,573 +canteen,573 +bucket hat,573 +aoi kumiko,573 +alp,573 +yamanaka sawako,572 +taut swimsuit,572 +pastel (medium),572 +nostrils,572 +nijisanji id,572 +menu board,572 +male on futa,572 +ge-b,572 +ganesagi,572 +cum inflation,572 +cowering,572 +azuki osamitsu,572 +wafer stick,571 +van,571 +toshishikisai,571 +souryuu asuka langley (cosplay),571 +sankakusui,571 +rosa (arknights),571 +phoenix wright,571 +mvv,571 +mamemaki,571 +kamina shades,571 +hanging plant,571 +beige cardigan,571 +aegis sword (xenoblade),571 +60+fps,571 +wool (miwol),570 +wii fit trainer,570 +weights,570 +viper,570 +tag,570 +storm,570 +komi zumiko,570 +koizumi mahiru,570 +kibina high school uniform,570 +captain (kemono friends),570 +amasawa yuuko,570 +amano kouki,570 +agatsuma zenitsu,570 +sazanami mio,569 +rossweisse,569 +red mittens,569 +mocha (cotton),569 +liya,569 +holding branch,569 +heles,569 +han (jackpot),569 +glowing tattoo,569 +fukahire (ruinon),569 +female service cap,569 +anya alstreim,569 +4chan,569 +tokoro megumi,568 +sun wukong,568 +romani archaman,568 +nonomi (swimsuit) (blue archive),568 +massachusetts (azur lane),568 +mannequin,568 +light blue background,568 +kisaragi nana,568 +inverted bob,568 +hamanami (kancolle),568 +chrysanthemum,568 +chaesu,568 +alpha signature,568 +against fourth wall,568 +tank focus,567 +stalk in mouth,567 +ru zhai,567 +noi (dorohedoro),567 +miyagoe yoshitsuki,567 +kanzakietc,567 +incoming hug,567 +himemushi momoyo,567 +fallout (series),567 +emily (pure dream),567 +diamond (houseki no kuni),567 +delinquent,567 +tsukui kachou,566 +tenmaso,566 +ruukii drift,566 +prank,566 +pallad,566 +mikazuki neko,566 +ikusaba mukuro,566 +grima (fire emblem),566 +gingham,566 +gekitotsu! joshikousei oiroke sensha gundan,566 +fur-trimmed footwear,566 +flower in mouth,566 +falken (yutozin),566 +chili pepper,566 +bubba (watson amelia),566 +tamamo cat (third ascension) (fate),565 +tajima ryuushi,565 +sailor uranus,565 +removing jacket,565 +orange rose,565 +nekoya (liu),565 +milfeulle sakuraba,565 +kisaragi momo,565 +hibari (senran kagura),565 +dollar sign,565 +chakram,565 +asseylum vers allusia,565 +yoi naosuke,564 +tonarikeru,564 +strawberry panic!,564 +spoken food,564 +sequential,564 +natsuiro egao de 1 2 jump!,564 +kafuu chino's school uniform,564 +izawa (bhive003),564 +homu (honkai impact),564 +hakuryuu (inazuma eleven),564 +fruit on head,564 +frilled camisole,564 +drowning,564 +chained wrists,564 +bull,564 +azusa (hws),564 +white blood cell (hataraku saibou),563 +vampire (azur lane),563 +valorant,563 +takase muh,563 +sweater pull,563 +sky lantern,563 +russia (hetalia),563 +redcomet,563 +rebe11,563 +naganohara mio,563 +math,563 +mai natsume,563 +hair wings,563 +guweiz,563 +chihiro (kemonomichi),563 +wooden horse,562 +tamago (yotsumi works),562 +striped hairband,562 +spanish text,562 +scar on mouth,562 +pretty rhythm,562 +onii-shota,562 +nishihara isao,562 +minazuki (kancolle),562 +microwave,562 +kuroka (high school dxd),562 +kimi ga nozomu eien,562 +kanon (umineko),562 +hypno,562 +hi you (flying bear),562 +handshake,562 +eye trail,562 +cow costume,562 +arm across waist,562 +arcanine,562 +afterword,562 +yoshikawa yuuko,561 +two-tone coat,561 +torn bike shorts,561 +seraziel,561 +rokugou daisuke,561 +raspberry,561 +ophelia (fire emblem),561 +nao (mabinogi),561 +missing limb,561 +mignon,561 +kouda suzu,561 +ichihaya,561 +haru (nakajou-28),561 +h kasei,561 +fukai ryosuke,561 +cougar (cougar1404),561 +302,561 +taishi (picchiridou),560 +super robot wars og saga mugen no frontier,560 +skadi (waverider) (arknights),560 +shorts removed,560 +rooster,560 +mutual hug,560 +minase akiko,560 +maji de watashi ni koi shinasai!,560 +kozue akari,560 +ica,560 +gin'ichi (akacia),560 +frostnova (arknights),560 +covering ears,560 +buront,560 +beatrice (re:zero),560 +bald eagle,560 +arm around back,560 +yamagou ayumi,559 +tarnished (elden ring),559 +tales of xillia 2,559 +star ocean the second story,559 +sora (arknights),559 +peony (flower),559 +muvluv total eclipse,559 +moth,559 +ky kiske,559 +comiket,559 +bunbun,559 +bayonetta 2,559 +anchor earrings,559 +wuxian (the legend of luoxiaohei),558 +shun (small) (blue archive),558 +santa claus,558 +red mask,558 +pigeon666,558 +ojamajo doremi,558 +melony (pokemon),558 +martha (swimsuit ruler) (fate),558 +kuena,558 +kenshin187,558 +kar98k (girls' frontline),558 +itoshiki nozomu,558 +fuuen (akagaminanoka),558 +dora ita,558 +box tie,558 +ace of hearts,558 +vertical-striped kimono,557 +seikan hikou,557 +ryuki (ryukisukune),557 +rhine lab logo,557 +poke ball (legends),557 +ouno (nounai disintegration),557 +orizen,557 +ore twintail ni narimasu,557 +lvl (sentrythe2310),557 +icomochi,557 +hand on wall,557 +getter robo,557 +casino (casinoep),557 +body switch,557 +aviator cap,557 +v-mag,556 +tsukiman,556 +tomori nao,556 +tanabe (fueisei),556 +suke (singekijyosei),556 +sengoku bushouki muramasa,556 +sawao,556 +purple fur,556 +mountain (arknights),556 +mega man x (character),556 +kutan,556 +kadomatsu,556 +gesture,556 +ashtray,556 +wii fit trainer (female),555 +sombra (overwatch),555 +short bangs,555 +qingxin flower,555 +projectile lactation,555 +pitcher,555 +okada izou (fate),555 +layered kimono,555 +imai lisa,555 +holding megaphone,555 +hirschgeweih antennas,555 +hinamatsuri,555 +circle formation,555 +anal fluid,555 +ai takurou,555 +totooria helmold,554 +stance,554 +smelling clothes,554 +rice paddy,554 +reku,554 +raiden mei (herrscher of thunder),554 +psycho-pass,554 +orange bra,554 +na-ga,554 +mob psycho 100,554 +koku,554 +hiiragi yuzu,554 +fun bo,554 +forehead tattoo,554 +flower in eye,554 +dsr-50 (girls' frontline),554 +disguise,554 +dhole (kemono friends),554 +canned coffee,554 +azumanga daioh's school uniform,554 +yorumi rena,553 +yakob labo,553 +turtwig,553 +sothis (fire emblem),553 +reverse translation,553 +lavender background,553 +jtveemo,553 +hoshino hinata,553 +holding surfboard,553 +fuura kafuka,553 +fujinami (kancolle),553 +floating rock,553 +cure princess,553 +cumdump,553 +blood in mouth,553 +bindi,553 +z46 (azur lane),552 +tsuji renta,552 +team magma,552 +sweep tosho (umamusume),552 +surio,552 +pants under skirt,552 +nekoume,552 +mutou yuugi,552 +minust,552 +kurugaya yuiko,552 +kei jiei,552 +hidaka ai,552 +hard translated (non-english),552 +hands on own breasts,552 +ginko (silver fox),552 +dirigible,552 +damao yu,552 +daizu (melon-lemon),552 +clefairy,552 +clara (girls und panzer),552 +asuka (senran kagura),552 +aether foundation employee,552 +yandere trance,551 +weee (raemz),551 +superman (series),551 +side-tie swimsuit,551 +see-through jacket,551 +sasaki chiho,551 +rei (pokemon),551 +josou seme,551 +iwamoto eiri,551 +gj-bu,551 +flattop,551 +evelynn (league of legends),551 +dragonite,551 +crotch zipper,551 +asticassia school uniform,551 +ryuuhou (kancolle),550 +mikisugi aikurou,550 +lightning bolt print,550 +less,550 +koizumi itsuki (female),550 +kayoko (blue archive),550 +hm (hmongt),550 +fu hua (herrscher of sentience),550 +bianka durandal ataegina,550 +adjusting buruma,550 +yumiya,549 +twrlare,549 +tri braids,549 +taketora suzume,549 +saotome ranma,549 +petra gurin,549 +pachirisu,549 +ozymandias (fate),549 +luo tianyi,549 +lexington (warship girls r),549 +james moriarty (archer) (fate),549 +interior,549 +hands on another's thighs,549 +ergot,549 +blunt tresses,549 +tenshin amaguri (inobeeto),548 +sin sack,548 +shinonono houki,548 +niina ryou,548 +materia,548 +huang lingyin,548 +hooded vest,548 +eva beatrice,548 +chibi on head,548 +char aznable,548 +celebi,548 +caesar (girls und panzer),548 +bruce lee's jumpsuit,548 +tukino neru,547 +toukaairab,547 +segami daisuke,547 +pokemon ears,547 +okazaki ushio,547 +namauni,547 +murder,547 +munmu-san,547 +lancelot (fate/grand order),547 +karate gi,547 +hxxg,547 +hori yuko,547 +hiememiko,547 +fist pump,547 +egg laying,547 +cuff links,547 +chess,547 +bobblehat,547 +bertolt hoover,547 +wyvern,546 +sunhyun,546 +race vehicle,546 +po-ju,546 +makeup brush,546 +koi wa sensou (vocaloid),546 +high five,546 +hand over face,546 +furoshiki,546 +charlotta (granblue fantasy),546 +casino,546 +battle spirits,546 +arezu (pokemon),546 +aiba yumi,546 +admiral hipper (azur lane),546 +yakihebi,545 +tear grants,545 +suou pavlichenko,545 +summon night 3,545 +shinozuka jouji,545 +ouma tokiichi,545 +mini witch hat,545 +holographic monitor,545 +green-tinted eyewear,545 +dragon print,545 +crutch,545 +cross tie,545 +coffee pot,545 +bagpipe (arknights),545 +uzaki tsuki,544 +terrajin,544 +taikyokuturugi,544 +sobbing,544 +seihekiog,544 +sakata nemuno,544 +princess serenity,544 +phantom of the kill,544 +ooji mochizou,544 +kuroi mimei,544 +gwendolyn (odin sphere),544 +fruit cup,544 +frilled umbrella,544 +cure pine,544 +checkered bow,544 +asaka karin,544 +akagi kurage,544 +ahoge wag,544 +young link,543 +tsukudani norio,543 +timbougami,543 +penis on ass,543 +ononoki yotsugi,543 +namie-kun,543 +mitha,543 +mio (xenoblade),543 +mikumikudance (medium),543 +lying on lap,543 +long nose,543 +kushieda minori,543 +fusuma,543 +free style (yohan1754),543 +fanning face,543 +colored shoe soles,543 +villager (animal crossing),542 +special feeling (meme),542 +shone,542 +seto kousuke,542 +panties under shorts,542 +mukaido manaka,542 +madoka runes,542 +kirito (sao-ggo),542 +kaidou minami,542 +jug,542 +holding shirt,542 +high wizard (ragnarok online),542 +guoba (genshin impact),542 +cigarette pack,542 +bokukawauso,542 +banana peel,542 +accident,542 +wet skirt,541 +standard bearer,541 +scratching head,541 +rust,541 +ri-net,541 +punk,541 +pop'n music,541 +midorikawa you,541 +holding hose,541 +finger to another's mouth,541 +dungeon,541 +annotation request,541 +altaria,541 +ace combat,541 +zenon (for achieve),540 +takanashi kei (hitsujikan),540 +sinking,540 +single knee pad,540 +shirt overhang,540 +shared earphones,540 +nekobaka,540 +mika (blue archive),540 +konishi (koconatu),540 +heshikiri hasebe,540 +flygon,540 +fire emblem: thracia 776,540 +elly (touhou),540 +dualshock,540 +creeper,540 +camisole lift,540 +washing hair,539 +voms,539 +sugimoto reimi,539 +sidecut,539 +scratching cheek,539 +paint can,539 +onesie,539 +mutsuki kai ni (kancolle),539 +megido72,539 +lockheart,539 +jump rope,539 +ittla,539 +hasumi souji (eroe),539 +gundam hathaway's flash,539 +greek toe,539 +fish bone,539 +edogawa conan,539 +atelier ryza 2,539 +amaterasu (ookami),539 +utawarerumono: itsuwari no kamen,538 +two-tone cape,538 +taka (takahirokun),538 +tachibana taki,538 +starfish hair ornament,538 +shirt around waist,538 +sazanami konami,538 +norman maggot,538 +nico nico nii,538 +mythra (massive melee) (xenoblade),538 +koi (koisan),538 +inuyasha (character),538 +holding toy,538 +glitter,538 +extraction,538 +delicious party precure,538 +too many cats,537 +takanashi ringo,537 +saionji hiyoko,537 +pointing forward,537 +masami chie,537 +kokonoe rin,537 +kiso kai ni (kancolle),537 +kakegurui,537 +ichigaya arisa,537 +hand print,537 +cutting,537 +cross pasties,537 +belarus (hetalia),537 +yamada rei (rou),536 +valentine (skullgirls),536 +toujou koneko,536 +pixiv fantasia sword regalia,536 +nami junior high school uniform,536 +mega gardevoir,536 +kagemori michiru,536 +heiwajima shizuo,536 +black garter straps,536 +bipod,536 +yonaga angie,535 +weasel girl,535 +tsubasa (abchipika),535 +thumbs down,535 +mitsumomo mamu,535 +mitarashi kousei,535 +jacket girl (dipp),535 +ichii yui,535 +fur-trimmed collar,535 +cure diamond,535 +cervical penetration,535 +beach mat,535 +yoshi,534 +too literal,534 +tomobe kinuko,534 +tokarev (girls' frontline),534 +tang keke,534 +stripper,534 +saury,534 +sakaki imasato,534 +rosa farrell,534 +rei (sanbonzakura),534 +neon-tetora,534 +mismatched sclera,534 +kurashima tomoyasu,534 +holding torpedo,534 +holding leg,534 +gaijin 4koma (meme),534 +foot worship,534 +emolga,534 +eiyuu senki,534 +detached leggings,534 +yellow horns,533 +trash,533 +silly (marinkomoe),533 +schwertkreuz,533 +print hoodie,533 +plasma-chan (kancolle),533 +piccolo,533 +naso4,533 +naked capelet,533 +momose (oqo),533 +miss fortune (league of legends),533 +mahjong soul,533 +lime (fruit),533 +jabot,533 +finalcake,533 +feet on chair,533 +covered penis,533 +yumesato makura,532 +walker,532 +tenryuu kai ni (kancolle),532 +tape bondage,532 +stocks,532 +shirakiin ririchiyo,532 +planet hair ornament,532 +okiraku nikku,532 +kobayakawa yutaka,532 +hayakawa aki,532 +eye reflection,532 +cross-eyed,532 +cellien (kemono friends),532 +bache (azur lane),532 +annette fantine dominic,532 +akimoto komachi,532 +akane (blue archive),532 +zannen na hito,531 +vergil (devil may cry),531 +venera-sama,531 +valkyrie (fate),531 +shinmai maou no testament,531 +noya makoto,531 +night sparrow love,531 +nannacy7,531 +konshin,531 +ibara mayaka,531 +hatakaze (kancolle),531 +hades (game),531 +green rose,531 +fireball,531 +eunie (xenoblade),531 +endou mamoru,531 +beer bottle,531 +battery indicator,531 +uranami (kancolle),530 +shulk (xenoblade),530 +samip,530 +rento (rukeai),530 +poyo (hellmayuge),530 +portal (object),530 +lambda-11,530 +kyo (kuroichigo),530 +holding teapot,530 +harimoji,530 +glands of montgomery,530 +felutiahime,530 +crow (la+ darknesss),530 +chocolate banana,530 +brain,530 +abyssal admiral (kancolle),530 +yana (chihuri),529 +welrod mkii (girls' frontline),529 +underpec,529 +ring fit adventure,529 +resident evil 4,529 +quadruple amputee,529 +nazi,529 +kuriyama mirai,529 +kizumonogatari,529 +kimura takahiro,529 +inoue sora,529 +hoshino miyako (wataten),529 +heart pendant,529 +gabriel tenma white,529 +dullahan,529 +drag-on dragoon 3,529 +ajishio,529 +yuano,528 +toutenkou,528 +psyduck,528 +nousagi (usada pekora),528 +ms. fortune (skullgirls),528 +mahou shoujo ikusei keikaku unmarked,528 +kusakabe misao,528 +koume keito,528 +hidden star in four seasons,528 +genji (overwatch),528 +covered clitoris,528 +yamanbagiri kunihiro,527 +war hammer,527 +venusaur,527 +souryu,527 +ndgd,527 +luke (dydansgur),527 +kaleidostick,527 +kaetzchen,527 +is that so,527 +high-waist pantyhose,527 +head only,527 +haneoka school uniform,527 +celica (fire emblem),527 +bird on arm,527 +utaite (singer),526 +single thigh boot,526 +prussia (hetalia),526 +oversized forearms,526 +matsukaze (kancolle),526 +futari wa precure max heart,526 +christmas stocking,526 +check artist,526 +arm over head,526 +tusia,525 +suzunone rena,525 +surprised arms,525 +suikawari,525 +senko (sewayaki kitsune no senko-san),525 +rubber gloves,525 +robin hood (fate),525 +randomboobguy,525 +pyramid (structure),525 +ots-14 (girls' frontline),525 +lugia,525 +hand on another's hand,525 +fast food uniform,525 +elfenlied22,525 +claw foot bathtub,525 +cheshire cat (alice in wonderland),525 +blue camisole,525 +aircraft carrier oni,525 +yae (mono110),524 +tamamo no mae (swimsuit lancer) (second ascension) (fate),524 +rozaliya olenyeva,524 +rindou mikoto,524 +nero claudius (olympian bloomers) (fate),524 +lonely,524 +listen!!,524 +king dedede,524 +gosick,524 +foot dangle,524 +fold-over boots,524 +doorknob,524 +blue tunic,524 +binary,524 +two-tone footwear,523 +ticket,523 +the legend of zelda: majora's mask,523 +synecdoche,523 +soranamae,523 +sesshouin kiara (swimsuit mooncancer),523 +sayori (doki doki literature club),523 +ribbon-trimmed panties,523 +parari (parari000),523 +overall skirt,523 +okitakung,523 +kotatsu (kotatsu358),523 +kobeya uniform,523 +green tank top,523 +dwarf,523 +adult neptune,523 +yuuki tatsuya,522 +yapo (croquis side),522 +yakushiji saaya,522 +virgilia (umineko),522 +torn wings,522 +russian commentary,522 +rasukaru,522 +norizou type-r,522 +ninian (fire emblem),522 +kozaki yuusuke,522 +kagurazaka asuna,522 +iori junpei,522 +danna ga nani wo itte iru ka wakaranai ken,522 +dakku (ogitsune),522 +baking,522 +yan qing (fate),521 +teardrop facial mark,521 +submarine,521 +senri gan,521 +puyopuyo fever,521 +pants around one leg,521 +kamukamu (ars),521 +itou (onsoku tassha),521 +ikaruga (senran kagura),521 +hinata kaho,521 +hazama,521 +fiammetta (arknights),521 +drifloon,521 +carousel,521 +yun jin (genshin impact),520 +tottotonero,520 +sword on back,520 +super heroine boy,520 +sukoya kana,520 +singlet,520 +pekoyama peko,520 +natsume eri,520 +mouth,520 +matanonki,520 +kurasuke,520 +janong,520 +isshiki akane,520 +heart (organ),520 +green fire,520 +glansjob,520 +fi-san,520 +broccoli,520 +bkub (style),520 +ankleband,520 +60mai,520 +wingull,519 +urinal,519 +shitty admiral (phrase),519 +rag,519 +princess of the crystal,519 +nakiri erina,519 +nadegata,519 +kamiya tomoe,519 +hoshino (swimsuit) (blue archive),519 +horn grab,519 +holding with tail,519 +crumbles,519 +chris redfield,519 +blending,519 +batta (ijigen debris),519 +backless swimsuit,519 +zoids genesis,518 +tatebayashi sakurako,518 +tanaka gandamu,518 +shuu (inazuma eleven),518 +shuten douji (first ascension) (fate),518 +shintarou,518 +sailor neptune,518 +purple sash,518 +notched lapels,518 +no feet,518 +li xiaolang,518 +elevator,518 +donburi (donburikazoku),518 +arlecchino (genshin impact),518 +yumi (bow),517 +tamamo cat (first ascension) (fate),517 +starshadowmagician,517 +stacking,517 +rural,517 +qurare magic library,517 +pocari sweat,517 +nowi (fire emblem),517 +nana asta deviluke,517 +lapel pin,517 +interview,517 +funeral dress,517 +emmet (pokemon),517 +busujima saeko,517 +yano mitsuki,516 +whiteboard,516 +tsubaki (blue archive),516 +terry bogard,516 +suzi q,516 +strength (black rock shooter),516 +snorlax,516 +side-tie bottom,516 +risotto nero,516 +kimoshi,516 +haunter,516 +hair ornament removed,516 +face punch,516 +crushing,516 +compound eyes,516 +amakusa shirou (fate),516 +stuffed unicorn,515 +madoka aguri,515 +face down,515 +evangeline a.k. mcdowell,515 +cork,515 +cat on shoulder,515 +bear hood,515 +abigail williams (second ascension) (fate),515 +yuyushiki's school uniform,514 +yoake mae yori ruri iro na,514 +yagami hikari,514 +tucked penis,514 +toji no miko,514 +mondi hl,514 +meth (emethmeth),514 +malina (helltaker),514 +limiter (tsukumo sana),514 +kuririn,514 +konoe kanata,514 +kasugano urara (yes! precure 5),514 +higashiyama kobeni,514 +giant male,514 +efe,514 +dogeza,514 +camera phone,514 +bamboo shoot,514 +aqua (kingdom hearts),514 +zoya petrovna vecheslova,513 +vajra (granblue fantasy),513 +tsukumiya amane,513 +soy sauce,513 +oono aya,513 +lacus clyne,513 +kongou (aoki hagane no arpeggio),513 +kiira,513 +kanno takanori,513 +hummy (suite precure),513 +hirasaka makoto,513 +hestia (danmachi) (cosplay),513 +giratina,513 +fumitsuki (minaduki 6),513 +cure dream,513 +crocs,513 +bee girl,513 +vodka,512 +vertical-striped bra,512 +thong aside,512 +tanaka ryou,512 +side-tie shirt,512 +shiroshi (denpa eshidan),512 +ryuu.,512 +red feathers,512 +manino (mofuritaionaka),512 +maboroshi mochi,512 +long shirt,512 +kousaki rui,512 +kemono friends 2,512 +ji no,512 +instagram logo,512 +frankenstein's monster (swimsuit saber) (fate),512 +da capo ii,512 +closet,512 +at computer,512 +asia argento,512 +arima senne,512 +aioi yuuko,512 +watanon (gakushokutei),511 +sweet lolita,511 +reverse spitroast,511 +qys3,511 +mafuyu (chibi21),511 +kurione (zassou),511 +kirishima eijirou,511 +jam,511 +gon freecss,511 +dorsal fin,511 +butter-t,511 +brown serafuku,511 +blue oath,511 +aquaegg,511 +846-gou,511 +zaxwu,510 +south park,510 +record,510 +otosama,510 +mismatched eyelashes,510 +manaka lala,510 +lilina (fire emblem),510 +gallade,510 +flipper,510 +depressed,510 +dantewontdie,510 +cockroach,510 +chessboard,510 +akagi (fmttps),510 +wada kazu,509 +sticky note,509 +shouji nigou,509 +shinkai shoujo (vocaloid),509 +sakura trick,509 +national shin ooshima school uniform,509 +mashima hiro,509 +market stall,509 +kindergarten,509 +implied pantyshot,509 +holding wrench,509 +gigantamax,509 +dotted background,509 +dmm,509 +bird print,509 +azur lane: slow ahead,509 +amazon (company),509 +yae sakura (gyakushinn miko),508 +uchiha sarada,508 +stuffed panda,508 +soukyuu no fafner,508 +shirley fenette,508 +san (mononoke hime),508 +occultic;nine,508 +no sclera,508 +komakoma (magicaltale),508 +judgement (helltaker),508 +jjanda,508 +isabella valentine,508 +harley quinn,508 +handjob gesture,508 +depth charge,508 +c-string,508 +brass knuckles,508 +ame.,508 +u.n. spacy,507 +toujou sakana,507 +toraishi 666,507 +tonpuu,507 +shiny swimsuit,507 +sakuya tsuitachi,507 +pushing away,507 +narue,507 +nanashino,507 +lindaroze,507 +king's raid,507 +kataginu,507 +ima-no-tsurugi,507 +glass bottle,507 +gentoo penguin (kemono friends),507 +frilled jacket,507 +finger frame,507 +devil may cry 5,507 +coin purse,507 +yorita yoshino,506 +tomokichi,506 +stained clothes,506 +squeezing,506 +sauce,506 +olga marie animusphere,506 +okuda yasuhiro,506 +mole on arm,506 +mechanic,506 +masara (chuujou),506 +kirisame marisa (pc-98),506 +kawakami sadayo,506 +hoshi no tsue,506 +headphones removed,506 +genderswap (otf),506 +futatsuiwa mamizou (human),506 +doom (series),506 +carried breast rest,506 +benikurage (cookie),506 +aqua kimono,506 +amagi (amagi626),506 +achiga school uniform,506 +sheikah,505 +sazaki kaoruko,505 +presenting panties,505 +oversized breast cup,505 +miyashita ai,505 +makacoon,505 +holding heart,505 +headlight,505 +hawk,505 +final fantasy x-2,505 +duke (inu daimyou),505 +band,505 +anko (gochiusa),505 +yukimura aoi,504 +xenosaga episode iii,504 +white hood,504 +white garter straps,504 +stanley lau,504 +softmode,504 +scorpion tail,504 +policeman,504 +persona 2,504 +pelvic curtain lift,504 +omachi (slabco),504 +matsuda yuusuke,504 +jolly roger,504 +jewelpet (series),504 +bloodshot eyes,504 +abandon ranka,504 +wavy eyes,503 +translucent,503 +stomach growling,503 +siirakannu,503 +satou atsuki,503 +phantom ix row,503 +luciela r. sourcream,503 +kamen rider ooo (series),503 +island fox (kemono friends),503 +hierophant green,503 +hieroglyphics,503 +hakuo school uniform,503 +grey bowtie,503 +foxyreine,503 +cum through clothes,503 +zentradi,502 +treecko,502 +niiya,502 +mg42,502 +mawile,502 +kawachi koorogi,502 +in mouth,502 +fern,502 +cargo pants,502 +calligraphy,502 +benienma (fate),502 +xander (fire emblem),501 +tiger costume,501 +sakuramori kaori,501 +pipe in mouth,501 +maya fey,501 +marill,501 +mahouka koukou no rettousei,501 +magikarp,501 +ishikei (style),501 +ideolo,501 +hoshina hikaru,501 +horn ring,501 +hello kitty,501 +hand on floor,501 +hanadera nodoka,501 +fighter (granblue fantasy),501 +black vs white,501 +aroused,501 +vignetting,500 +tsugumomo,500 +sprinkles,500 +shigatake,500 +seraph,500 +pixiv fantasia new world,500 +pink diamond 765 (idolmaster),500 +nerv,500 +holding footwear,500 +cheekbones,500 +chabashira tenko,500 +broken eyewear,500 +ao-chan (ninomae ina'nis),500 +ai-chan (tawawa),500 +victorica de blois,499 +tokyo (great akuta),499 +tokiha mai,499 +takayama toshinori,499 +seramikku,499 +sekaiju no meikyuu 5,499 +resident evil 2,499 +rainbow six siege,499 +lily (vocaloid),499 +harada (sansei rain),499 +gladiia (arknights),499 +gemu555,499 +beach volleyball,499 +animal head,499 +shaking head,498 +puuzaki puuna,498 +nipple cutout,498 +moth girl,498 +kuroi moyamoya,498 +komori kuzuyu,498 +kirero,498 +hands in pocket,498 +female assassin (fate/zero),498 +blame!,498 +absol,498 +yonu (yonurime),497 +tsab air military uniform,497 +tokyo mew mew,497 +shaymin,497 +rotary phone,497 +nu carnival,497 +narciso anasui,497 +myama,497 +mansion,497 +fumo (doll),497 +food request,497 +ejaculation under clothes,497 +daikon,497 +container,497 +ayakura juu,497 +aoi nagisa (metalder),497 +after masturbation,497 +tsukareta san,496 +tatsumi ray,496 +tantrum,496 +tachibana makoto,496 +siegfried (fate),496 +ryouma (galley),496 +reshiram,496 +print scarf,496 +planted umbrella,496 +mizumoto yukari,496 +miyama-zero,496 +lace border,496 +kurapika,496 +kore wa zombie desu ka?,496 +kantele,496 +forced smile,496 +ebiblue,496 +dango daikazoku,496 +chrome dokuro,496 +wild tiger,495 +vulpix,495 +studded choker,495 +shuga (soranote),495 +short yukata,495 +sameha ikuya,495 +rucchiifu,495 +puru two,495 +nagasioo,495 +muku (muku-coffee),495 +kadose ara,495 +infukun,495 +horiguchi yukiko,495 +hand on glass,495 +grey one-piece swimsuit,495 +fudou yuusei,495 +face in pillow,495 +eichi yuu,495 +cure twinkle,495 +cure rosetta,495 +boshi (a-ieba),495 +arm between legs,495 +ao banana,495 +victorian,494 +ushiromiya lion,494 +senkou hanabi,494 +saiste,494 +pecorine (summer) (princess connect!),494 +kiran (fire emblem),494 +iroyopon,494 +idolmaster live for you!,494 +hanasaka houcha,494 +desert eagle,494 +cure beat,494 +carrying bag,494 +blastoise,494 +barasuishou,494 +arcade cabinet,494 +ao usagi,494 +alcremie,494 +adam's apple,494 +yakiimo,493 +white heart,493 +wada arco,493 +tsurugi kyousuke,493 +shepherd0821,493 +senki zesshou symphogear xd unlimited,493 +sadamoto yoshiyuki,493 +pawn,493 +kiryu manzoku,493 +hoshikawa lily,493 +hijiri rei,493 +hidari (left side),493 +gumroad reward,493 +forked tail,493 +evoker,493 +dumbbell nan kilo moteru?,493 +anti-aircraft,493 +amusement park,493 +akino shuu,493 +zettai karen children,492 +yuran,492 +wiping,492 +the elder scrolls,492 +takatsuki ichi,492 +signpost,492 +shan,492 +saraki,492 +read or die,492 +piyodera mucha,492 +pixiv fantasia 3,492 +nakabayashi zun,492 +mordred (swimsuit rider) (first ascension) (fate),492 +monk,492 +minigun,492 +min min (arms),492 +kekemotsu,492 +holding microphone stand,492 +holding chain,492 +fleur de lis,492 +enema,492 +e-hentai sample,492 +delmogeny uniform,492 +corsage,492 +yoshioka yoshiko,491 +trigger (company),491 +street fighter 6,491 +ran'ou (tamago no kimi),491 +pan (mimi),491 +noses touching,491 +nipple-to-nipple,491 +monori rogue,491 +mint blancmanche,491 +koyubi (littlefinger1988),491 +kawai (purplrpouni),491 +kamonari ahiru,491 +jack frost,491 +holding notebook,491 +hangar,491 +hakozaki serika,491 +grid background,491 +geshumaro,491 +food bite,491 +crayon shin-chan,491 +blue rose (tiger & bunny),491 +blazblue: central fiction,491 +antonio lopez,491 +adachi tooru,491 +zorua,490 +undone neck ribbon,490 +skating,490 +priestess (goblin slayer!),490 +pamiat merkuria (azur lane),490 +lifted by tail,490 +kousei (public planet),490 +koiwai yotsuba,490 +hinoa,490 +hinata yukari,490 +enna alouette,490 +demonbane,490 +arashio kai ni (kancolle),490 +yui (sao),489 +ushiromiya rosa,489 +two-tone hoodie,489 +mechanical,489 +make up in halloween! (umamusume),489 +keenh,489 +kallen kaslana,489 +huge testicles,489 +holding cross,489 +holding another's leg,489 +hikage eiji,489 +hat belt,489 +hanafuda,489 +grey apron,489 +glory wall,489 +coconut tree,489 +cobblestone,489 +bridle,489 +bayonetta 1,489 +weno,488 +ushiromiya natsuhi,488 +tofu,488 +thighhighs removed,488 +suzushiro kurumi,488 +stopwatch,488 +shizuki hitomi,488 +scanty (psg),488 +satsuki kai ni (kancolle),488 +playing sports,488 +nakahira guy,488 +mountain of faith,488 +mononobe alice,488 +magatama hair ornament,488 +keuma,488 +izumi sai,488 +imazon,488 +fabulous,488 +asirpa,488 +alice carroll,488 +alex ahad,488 +zaku ii,487 +wang liu mei,487 +tsukasawa takamatsu,487 +subaru duck,487 +south dakota (kancolle),487 +sound voltex,487 +onigirikun,487 +myrtenaster,487 +lying on water,487 +luicent,487 +cure muse (yellow),487 +cream (nipakupa),487 +aikatsu friends!,487 +yajirushi (chanoma),486 +takitarou,486 +son goten,486 +shiratsuyu kai ni (kancolle),486 +sei shoujo,486 +resident evil 5,486 +playing with another's hair,486 +oda non,486 +nudist beach uniform,486 +nobita (makoto7060355),486 +nathan seymour,486 +mochizuki shiina,486 +masked,486 +lockon stratos,486 +kasugano sora,486 +elbow sleeve,486 +dsmile,486 +beaker,486 +yamada tae,485 +xblaze,485 +sw (taco),485 +ssalbulre,485 +shokuyou mogura,485 +rubbing,485 +raw egg lent,485 +puppet rings,485 +portgas d. ace,485 +minarai,485 +manhwa,485 +kuromiya raika,485 +kissing neck,485 +intestines,485 +indomitable (azur lane),485 +faustsketcher,485 +ear grab,485 +da capo i,485 +crotchless pants,485 +amezawa koma,485 +alphonse (white datura),485 +zuikaku kai ni (kancolle),484 +yuuki rika,484 +suspenders slip,484 +stapler,484 +smartphone case,484 +side-seamed legwear,484 +shermie (kof),484 +sheep costume,484 +sasaki akebi,484 +ruton-niki,484 +poring,484 +oda nobukatsu (fate),484 +naizuri,484 +maruzensky (umamusume),484 +male doctor (arknights),484 +making-of,484 +laserflip,484 +kirakishou,484 +holding bra,484 +haneru,484 +hand on own forehead,484 +gazing eye,484 +gae buidhe (fate),484 +four goddesses online: cyber dimension neptune,484 +falling card,484 +elizabeth liones,484 +dusk ball,484 +diploma,484 +covered testicles,484 +bars,484 +amisu,484 +ak-15 (girls' frontline),484 +yami yuugi,483 +sugoi dekai,483 +shaman king,483 +rice hat,483 +pravda (emblem),483 +party,483 +pairan,483 +nonohara yuzuko,483 +naka kai ni (kancolle),483 +mtu virus,483 +merman,483 +medieval,483 +mdr (girls' frontline),483 +maple leaf print,483 +komori kiri,483 +hakusai (tiahszld),483 +gradient wings,483 +efukei,483 +buried,483 +bedivere (fate),483 +watching,482 +takao (aoki hagane no arpeggio),482 +syhan,482 +sophia (fire emblem),482 +soejima shigenori,482 +rory mercury,482 +robert e. o. speedwagon,482 +nina (fire emblem),482 +kutsuki kai,482 +izumi tsubasu,482 +iwasaki minami,482 +inktober,482 +hiburi (kancolle),482 +hedgehog,482 +heart maebari,482 +deer girl,482 +daive,482 +american beaver (kemono friends),482 +aliasing,482 +zekkyon,481 +yakisobapan tarou & negitoro-ko,481 +winged hair ornament,481 +shibuki ran,481 +purple collar,481 +neit ni sei,481 +lucas (mother 3),481 +living hair,481 +kujou ichiso,481 +konoha (kagerou project),481 +ken (koala),481 +kanno naoe,481 +fujibayashi ryou,481 +flying button,481 +elma (maidragon),481 +brock (pokemon),481 +blocking,481 +billboard,481 +baccano!,481 +ayato,481 +wedge heels,480 +trophy,480 +torn coat,480 +tobi (one),480 +star driver,480 +seele (alter ego),480 +saipaco,480 +runny nose,480 +red nose,480 +project diva 2nd,480 +nipple chain,480 +markings,480 +luke fon fabre,480 +kirisaki chitoge,480 +kinugasa kai ni (kancolle),480 +kekkai sensen,480 +kawanishi shinobu,480 +kakure eria,480 +gustav (telomere na),480 +enlightened byleth (female),480 +dark clouds,480 +clear sky,480 +bacon,480 +american flag shirt,480 +amane suzuha,480 +adaman (pokemon),480 +achilles (fate),480 +windsock,479 +wakura (gcdan),479 +takafuji kako,479 +shuzi,479 +reptile girl,479 +metal gear solid v,479 +masuishi kinoto,479 +ikezawa hanako,479 +ikeda jun (mizutamari),479 +hitsugi no chaika,479 +english audio,479 +disco brando,479 +ayanepuna,479 +artsy-rc,479 +armored core,479 +arm on head,479 +alvin (tales),479 +yu yu,478 +usopp,478 +uncle and niece,478 +stiletto (weapon),478 +shibacha,478 +sangokushi taisen,478 +rex k,478 +record of lodoss war,478 +orange headband,478 +nono hana,478 +nero claudius (bride) (second ascension) (fate),478 +matsudaira touko,478 +hope's peak academy school uniform,478 +gunslinger girl,478 +fever,478 +echidna (re:zero),478 +bilibili,478 +stitched arm,477 +playground,477 +miyamori aoi,477 +misaka worst,477 +kitten,477 +jasmine (pokemon),477 +gunp,477 +elizabeth (persona),477 +cross-body stretch,477 +carton,477 +ashley (warioware),477 +yu-ta,476 +table tennis paddle,476 +sasamori tomoe,476 +rosuuri,476 +nucomas,476 +monikano,476 +judo fuu,476 +itou chika,476 +inconvenient breasts,476 +hanekoto,476 +gensou suikoden v,476 +f.s.,476 +eho (icbm),476 +curled up,476 +cure passion,476 +circe (fate),476 +blue oni,476 +bad leg,476 +ayakashi (monkeypanch),476 +uu~,475 +tsumami kanzashi,475 +sitting on water,475 +shirayuki chiyo,475 +seneto,475 +scp foundation,475 +oozora akari,475 +natsuki rin,475 +naruko (naruto),475 +low-cut armhole,475 +hood (azur lane),475 +grea (shingeki no bahamut),475 +garou: mark of the wolves,475 +dan (kumadan),475 +croissant,475 +choke hold,475 +barbara (summertime sparkle) (genshin impact),475 +yoshizawa tsubaki,474 +valkyrie drive,474 +toilet stall,474 +text messaging,474 +shopping cart,474 +pepper,474 +kasuga mirai,474 +holding another's foot,474 +hitec,474 +happy facial,474 +hanauna,474 +gundam 0083,474 +fish tank,474 +fathom,474 +courtney (pokemon),474 +cherry print,474 +caramelldansen,474 +astaroth (shinrabanshou),474 +anti-aircraft gun,474 +anato finnstark,474 +akino sora,474 +ace of spades,474 +yellow border,473 +udon,473 +tweyen (granblue fantasy),473 +tsurukame,473 +snake print,473 +roxas,473 +noshiro (azur lane),473 +mud,473 +miyasu risa,473 +killer queen,473 +kikuyoshi (tracco),473 +flip flappers,473 +black-tailed prairie dog (kemono friends),473 +yukikaze panettone,472 +yamagishi yukako,472 +tripod,472 +time stop,472 +shimushu (kancolle),472 +shikitani asuka,472 +rockhopper penguin (kemono friends),472 +pdf available,472 +oar,472 +northern italy (hetalia),472 +makoto (princess connect!),472 +kunreishiki,472 +hoozuki no reitetsu,472 +hange zoe,472 +exoskeleton,472 +dungeons and dragons,472 +cozy,472 +chocolat (momoiro piano),472 +yuugiri (zombie land saga),471 +wanaata,471 +waddle dee,471 +ryo (botugo),471 +ookuma nekosuke,471 +matsumae ohana,471 +l hakase,471 +kinta (distortion),471 +kiana kaslana (herrscher of the void),471 +impossible sweater,471 +hugh (pokemon),471 +heart panties,471 +glitchedpuppet,471 +gengetsu (touhou),471 +eudetenis,471 +choujigen game neptune mk2,471 +arisa bannings,471 +airi (queen's blade),471 +yellow sailor collar,470 +tighnari (genshin impact),470 +tanaka kotoha,470 +tanaka asuka,470 +striped neckerchief,470 +shellvi,470 +rouge the bat,470 +rolling,470 +pregnancy test,470 +pokachu,470 +mazaki anzu,470 +jessica (arknights),470 +fake magazine cover,470 +elena (street fighter),470 +chuunibyou,470 +buriki,470 +baron bunny (genshin impact),470 +a-chan (hololive),470 +yamasaki wataru,469 +usb,469 +the world,469 +takozonesu,469 +surrounded,469 +shichigatsu,469 +rinotuna,469 +precum drip,469 +pixiv fantasia age of starlight,469 +octane (apex legends),469 +mdf an,469 +massakasama,469 +komori kei,469 +jacket lift,469 +hyuuga saki,469 +holding remote control,469 +ho-oh,469 +flagpole,469 +changing room,469 +blaziken,469 +toothpick,468 +tank helmet,468 +nabeshima tetsuhiro,468 +master (vocaloid),468 +mai (touhou),468 +kazuma (kazumav),468 +hitomaru,468 +henshako,468 +el condor pasa (umamusume),468 +darjeeling (reley),468 +breath of fire v,468 +atelier sophie,468 +anbe yoshirou,468 +aircraft carrier princess,468 +weapon case,467 +unbuttoned shorts,467 +tomu (tomubobu),467 +padlocked collar,467 +oryou (girls und panzer),467 +lyn (blade & soul),467 +kanpani girls,467 +hisen kaede,467 +heidimarie w. schnaufer,467 +girl holding a cat (kancolle),467 +flower (vocaloid),467 +cinnabar (houseki no kuni),467 +blue pubic hair,467 +a channel,467 +two-sided jacket,466 +torichamaru,466 +russian flag,466 +pola (azur lane),466 +ousama ranking,466 +ookurikara,466 +o-ring belt,466 +miura-n315,466 +metal gear rising: revengeance,466 +medaka box,466 +measurements,466 +lain paterson,466 +kaburagi kaede,466 +hands on headphones,466 +commentary typo,466 +brown horns,466 +aurora (arknights),466 +amagai tarou,466 +zodiac,465 +wanted,465 +tsukemon,465 +shadow the hedgehog,465 +plaid legwear,465 +makai senki disgaea 3,465 +kojo anna,465 +iron saga,465 +hiraga matsuri,465 +hijikata toushirou,465 +hairstyle switch,465 +grimsley (pokemon),465 +ginkgo,465 +fio germi,465 +ewa (seraphhuiyu),465 +cure berry,465 +catura (granblue fantasy),465 +bravely default: flying fairy,465 +arin,465 +zakusi,464 +warfarin (arknights),464 +thick eyelashes,464 +tateha (marvelous grace),464 +saburou (hgmg),464 +regu (made in abyss),464 +queen of sheba (fate),464 +nilitsu,464 +nero claudius (swimsuit caster) (third ascension) (fate),464 +kishiyo,464 +heart-shaped boob challenge,464 +heanna sumire,464 +grizzly mkv (girls' frontline),464 +fumio (ura fmo),464 +fakepucco,464 +eugeo,464 +emma verde,464 +chobits,464 +aqua hairband,464 +anna miller,464 +ameno (a meno0),464 +tokiani,463 +toe cleavage,463 +texas (winter messenger) (arknights),463 +talulah (arknights),463 +square,463 +shirokami project,463 +sewing needle,463 +red cardigan,463 +peach (momozen),463 +oryou (fate),463 +omamori himari,463 +morag ladair (xenoblade),463 +makio (makiomeigenbot),463 +heart facial mark,463 +head on head,463 +fushigiboshi no futago hime,463 +front-tie bra,463 +damda,463 +zack fair,462 +yetworldview kaze,462 +xion (kingdom hearts),462 +widow's peak,462 +watarase jun,462 +ume (plumblossom),462 +sixten,462 +shingyou (alexander-13),462 +reaper (overwatch),462 +piano keys,462 +ormille,462 +nekomamire,462 +kuronuko neero,462 +kuresento,462 +hunched over,462 +gorilla,462 +fur-trimmed shirt,462 +flintlock,462 +evangelion (mecha),462 +errant,462 +dragon ball (object),462 +yone kinji,461 +weapon focus,461 +uzumaki boruto,461 +sasayuki,461 +rachnera arachnera,461 +pink tank top,461 +ning hai (azur lane),461 +neco-arc,461 +kl,461 +haman karn,461 +hakika,461 +gyarados,461 +dog hair ornament,461 +clamp (circle),461 +bonkara (sokuseki maou),461 +ayasugi tsubaki,461 +aburaage,461 +47agdragon,461 +404 (girls' frontline),461 +yasume yukito,460 +wacchi,460 +usoneko,460 +twig,460 +too many frills,460 +tinker bell,460 +shin'ya (nanp),460 +saber (cosplay),460 +radish p,460 +poop,460 +pink tail,460 +peroponesosu.,460 +kedamono kangoku-tou,460 +kamen rider fourze (series),460 +jeanne d'arc (girl from orleans) (fate),460 +iino miko,460 +hiiragi yuuichi,460 +furyou michi ~gang road~,460 +eiwa,460 +einhart stratos,460 +e10,460 +charisma break,460 +beifeng han,460 +asakuraf,460 +ana (overwatch),460 +yuugen,459 +yatogami tooka,459 +witchblade,459 +tracen training uniform,459 +tiffy (nottytiffy),459 +takaman (gaffe),459 +pirate costume,459 +oven,459 +okama,459 +multicolored neckwear,459 +metal skin,459 +mega man star force,459 +kingdom hearts birth by sleep,459 +hu dako,459 +hinata yuu,459 +hikigaya hachiman,459 +higurashi kagome,459 +hazawa tsugumi,459 +gummy (arknights),459 +gabiran,459 +food insertion,459 +drop earrings,459 +deviantart sample,459 +cut-in,459 +brown lips,459 +akeyama kitsune,459 +whale hair ornament,458 +sora wo kakeru shoujo,458 +shampoo bottle,458 +red pubic hair,458 +mutsuki (azur lane),458 +kneeless mermaid,458 +kaminagi (kaminagi-tei),458 +a (phrase),458 +wsman,457 +wreckage,457 +white pubic hair,457 +waterring,457 +warspite (azur lane),457 +swallowing,457 +shorts tan,457 +shibafu (glock23),457 +shakujou,457 +refraction,457 +komasan,457 +kaname buccaneer,457 +goomy,457 +baba konomi,457 +yoshizaki mine,456 +wamuu,456 +urban legend in limbo,456 +sumisu (mondo),456 +space print,456 +serika (blue archive),456 +otome game no hametsu flag shika nai akuyaku reijou ni tensei shite shimatta,456 +nekoguruma,456 +movie poster,456 +love laika (idolmaster),456 +innerboob,456 +hikami sumire,456 +hijikata toshizou (fate),456 +gavial (arknights),456 +big eyes,456 +azumi kazuki,456 +anemone (eureka seven),456 +air shakur (umamusume),456 +wild arms 1,455 +viral (ttgl),455 +spiral,455 +soulcalibur iv,455 +shiraishi tsumugi,455 +school girl strikers,455 +madjian,455 +long jacket,455 +kiyama hiroto,455 +jema,455 +girls' frontline neural cloud,455 +gia,455 +front-seamed legwear,455 +fried rice,455 +food on hand,455 +erina pendleton,455 +eriko (princess connect!),455 +cyan (show by rock!!),455 +crest,455 +case,455 +capura lin,455 +akira,455 +akemi homura (cosplay),455 +akatsuki akane,455 +vincent valentine,454 +videl,454 +tsurusaki takahiro,454 +touhoku zunko,454 +torn bikini,454 +tappa (esperanza),454 +sui-feng,454 +squigly (skullgirls),454 +spread navel,454 +sleigh,454 +silver (chenwen),454 +red curtains,454 +paragasu (parags112),454 +murata tefu,454 +mr. c.b. (umamusume),454 +minakami mai,454 +maze (gochama ze gohan),454 +ling (arknights),454 +license plate,454 +kurata rine,454 +kawase seiki,454 +inteleon,454 +feguimel,454 +emori miku project,454 +dirty pair,454 +digital clock,454 +covering another's eyes,454 +c-button,454 +bubble background,454 +breast pillow,454 +born-to-die,454 +blue robe,454 +black mage,454 +beaver ears,454 +:c,454 +yamada (gotyui),453 +yagami taichi,453 +tadano kagekichi,453 +source filmmaker (medium),453 +shirogane tsumugi,453 +shirogane (platinum),453 +seiran (mousouchiku),453 +sankuro (agoitei),453 +rei (cookie),453 +mipha,453 +mel/a,453 +low-braided long hair,453 +kalmahul,453 +jacket over shoulder,453 +ina (gokihoihoi),453 +hiro (hidamari sketch),453 +himejoshi,453 +grisaia no kajitsu,453 +gantz,453 +fran (ff12),453 +fortified suit,453 +doma umaru,453 +deer tail,453 +byleth (summer) (fire emblem) (female),453 +amuro tooru,453 +4others,453 +water yoyo,452 +togepi,452 +texas (willpower) (arknights),452 +tenga,452 +seto no hanayome,452 +price tag,452 +pikachu (cosplay),452 +okota mikan,452 +mousepad (medium),452 +morrigan aensland (cosplay),452 +minoto,452 +magic: the gathering,452 +kanae (nijisanji),452 +kamishima kanon,452 +jibanyan,452 +japan self-defense force,452 +indian clothes,452 +holed coin,452 +cccpo,452 +blue tail,452 +bident,452 +azuma yuki,452 +ayagi daifuku,452 +yuzu (blue archive),451 +utsurogi angu,451 +soma peries,451 +rhythmic gymnastics,451 +piranha plant,451 +pet food,451 +naruse mai,451 +muchi maro,451 +missile228,451 +mishou mai,451 +loose thighhigh,451 +kugimiya atsuki,451 +ekz (robotekz),451 +damian desmond,451 +chou-10cm-hou-chan,451 +biwa hayahide (umamusume),451 +banchou,451 +august von parseval (azur lane),451 +yaranaika,450 +walpurgisnacht (madoka magica),450 +starraisins,450 +shoujo kageki revue starlight -re live-,450 +shingyouji tatsuya,450 +saku (osake love),450 +saku (kudrove),450 +rororina fryxell,450 +red armband,450 +oil painting (medium),450 +miko (royal milk),450 +mikagami sou,450 +merchandise,450 +laura la mer,450 +latios,450 +kouyouen academy school uniform,450 +kazutake hazano,450 +jingasa,450 +jiji (aardvark),450 +hannya,450 +gokotai's tigers,450 +galge.com,450 +floating skull,450 +elf (dragon's crown),450 +duct tape,450 +drain (object),450 +cinnamiku,450 +caidychen,450 +brown pubic hair,450 +blood on leg,450 +atelier meruru,450 +another,450 +;<,450 +tokumi yuiko,449 +star facial mark,449 +shin'ya (shin'yanchi),449 +ikusaba daisuke,449 +hadou nejire,449 +green headband,449 +gorgeous mushroom,449 +gin'you haru,449 +double amputee,449 +confrontation,449 +chikado,449 +candy (smile precure!),449 +birii,449 +arcade,449 +yandere sample,448 +tokunou shoutarou,448 +tank shell,448 +takao (beach rhapsody) (azur lane),448 +quetzalcoatl (fate),448 +myon (phrase),448 +minami kana,448 +metroid dread,448 +mechanical buddy universe,448 +koinobori,448 +kitsu chiri,448 +jiji (majo no takkyuubin),448 +honjou raita,448 +hayashi kewi,448 +half note,448 +child carry,448 +bronya zaychik (silverwing: n-ex),448 +ange vierge,448 +allelujah haptism,448 +aegir (housamo),448 +yutazou,447 +yamabuki high school uniform,447 +tone kai ni (kancolle),447 +takemori shintarou,447 +rfb (girls' frontline),447 +papi (monster musume),447 +package,447 +onsen tamago (hs egg),447 +narusawa ryouka,447 +kafu,447 +high elf archer (goblin slayer!),447 +francis drake (fate),447 +edna (tales),447 +ebisuzawa kurumi,447 +blowhole,447 +ar tonelico iii,447 +yagate kimi ni naru,446 +ubanis,446 +top wo nerae 2!,446 +tachimi (basue),446 +tabigarasu,446 +skeptycally,446 +shamo (koumakantv),446 +sengoku rance,446 +scale armor,446 +sakamoto (nichijou),446 +purple butterfly,446 +matou shinji,446 +kelvin hiu,446 +iris heart,446 +illyasviel von einzbern (beast style),446 +hachiko (hati12),446 +green serafuku,446 +eruruw,446 +drifters,446 +crt,446 +chata maru (irori sabou),446 +blood on breasts,446 +biplane,446 +yudepii,445 +watermelon slice,445 +tsunashi hajime,445 +takane manaka,445 +synthesizer v,445 +star-shaped eyewear,445 +sakurazaki setsuna,445 +russia,445 +ponytail korosuke,445 +p90 (girls' frontline),445 +ookami-san,445 +navi,445 +muryotaro,445 +mizuki yukikaze,445 +miyaura sanshio,445 +looking at hand,445 +kurukurumagical,445 +kishinami hakuno (male),445 +kazekoshi school uniform,445 +heattech leotard,445 +gwen (league of legends),445 +guilty gear xx,445 +grandia i,445 +field of blades,445 +female commander (girls' frontline),445 +elize lutus,445 +doushimasho,445 +centorea shianus,445 +bemani,445 +amazuki jou,445 +white flag,444 +takaya n,444 +rkrk,444 +pikmin (series),444 +orange planet uniform,444 +onegai teacher,444 +nijou noriko,444 +nao (ritsancrossover),444 +mousetrap,444 +lace background,444 +kuji-in,444 +kazenoko,444 +kannazuki no miko,444 +implied extra ears,444 +hinamori amu,444 +gaming chair,444 +gachou,444 +doudanuki masakuni,444 +cygnet (azur lane),444 +cross-laced sleeves,444 +cinnamoroll,444 +arm on shoulder,444 +amy rose,444 +tsunashi kaoru,443 +tiger i,443 +taiki shuttle (umamusume),443 +tachitsu teto,443 +sharing food,443 +rouka (akatyann),443 +orange socks,443 +nanase haruka (free!),443 +mizuki (flowerlanguage),443 +megahiyo,443 +mataichi mataro,443 +iroha (blue archive),443 +index fingers raised,443 +hans christian andersen (fate),443 +goyain,443 +gendou pose,443 +fried chicken,443 +diagram,443 +cure miracle,443 +bridal lingerie,443 +bandaged fingers,443 +baby princess,443 +astolfo (memories at trifas) (fate),443 +wss (nicoseiga19993411),442 +wrist straps,442 +welsh corgi,442 +vehicle interior,442 +tagme,442 +sugiyuu,442 +shin godzilla,442 +oddish,442 +nena trinity,442 +naked chocolate,442 +mickey mouse,442 +matsuoka rin,442 +hume,442 +hotate-chan,442 +halo (series),442 +formidable (the lady of the beach) (azur lane),442 +daitou (kancolle),442 +china (hetalia),442 +black-haired demon girl (shimmer),442 +aomushi (mushamusha),442 +ahenn,442 +yoshida hideyuki,441 +torn hat,441 +todoroki yachiyo,441 +suruga (xsurugax),441 +prince of lan ling (fate),441 +nipple penetration,441 +misogi (princess connect!),441 +matsuoka miu,441 +katsura kotonoha,441 +iono (pokemon),441 +holding frying pan,441 +dodai shouji,441 +armored leotard,441 +torn cloak,440 +tokkyu,440 +sen (sansui),440 +ry (yagoyago),440 +rouman academy uniform,440 +poppi (xenoblade),440 +pettan p,440 +neru (bunny) (blue archive),440 +miyazaki nodoka,440 +korwa,440 +kippu,440 +kaiji,440 +jagaimo (kkamja),440 +ikemeru19,440 +huke,440 +doukutsu monogatari,440 +darumoon,440 +annie (league of legends),440 +akagi towa,440 +ainy,440 +yukie (peach candy),439 +yellow pantyhose,439 +worm,439 +uchiha itachi,439 +tyrannosaurus rex,439 +tanigawa kanna,439 +takano miyo,439 +rebecca miyamoto,439 +professor shinonome,439 +ogino chihiro,439 +noumen,439 +niwatori kokezou,439 +musuko ga kawaikute shikatanai mazoku no hahaoya,439 +morgan (fire emblem),439 +marota,439 +kuso miso technique,439 +inanaki shiki,439 +helena (azur lane),439 +fur jacket,439 +fujishima shinnosuke,439 +flaming eyes,439 +cleavage reach,439 +chipa (arutana),439 +cheese-kun,439 +cat on lap,439 +akatsuki uniform,439 +abazu-red,439 +vest lift,438 +tea (nakenashi),438 +sakuragi ren,438 +sakuraba hikaru (loveindog),438 +red carpet,438 +rattle,438 +pizza hut,438 +nyama,438 +negresco,438 +naruko hanaharu,438 +mole on pussy,438 +large hands,438 +kouda tomohiro,438 +knitting,438 +karamoneeze,438 +houshin engi,438 +gate of babylon (fate),438 +elf-san wa yaserarenai.,438 +beige skirt,438 +asada hachi,438 +aqua shorts,438 +amashiro natsuki,438 +amamiya hibiya,438 +amae koromo,438 +yashajin ai,437 +wolf costume,437 +star ornament,437 +spray bottle,437 +shin megami tensei v,437 +shimakaze-kun,437 +shiina excel,437 +sakamoto ryouma (fate),437 +red male underwear,437 +pochita (chainsaw man),437 +penis shadow,437 +mirai akari project,437 +meandros,437 +kagura suzu,437 +flying fish,437 +elpeo puru,437 +eclipse,437 +chinomaron,437 +branch (blackrabbits),437 +wactor production,436 +vf-1,436 +type 97 (girls' frontline),436 +suzunari shizuku,436 +piapro,436 +mochizuki kei,436 +mahou shoujo lyrical nanoha the movie 1st,436 +european clothes,436 +endou saya,436 +eizouken ni wa te wo dasu na!,436 +comet,436 +clothed male nude male,436 +broly (dragon ball super),436 +broken halo,436 +sakura yuki (clochette),435 +saga frontier 2,435 +resident evil 3,435 +police badge,435 +obese,435 +nishizono mio,435 +mochiyuki,435 +misteor,435 +mirai akari,435 +miorine rembran,435 +massala,435 +mahora academy middle school uniform,435 +leopard girl,435 +kawagishi keitarou,435 +k mugura,435 +himiko (fate),435 +haganef,435 +grass root youkai network,435 +death (entity),435 +concrete,435 +cock ring,435 +captain america,435 +batman,435 +azuuru,435 +yellow camisole,434 +wrapper,434 +square mouth,434 +sports festival,434 +so moe i'm gonna die!,434 +shimura shinpachi,434 +rosa (tears of themis),434 +panties (pantsu-pirate),434 +nichika (nitikapo),434 +moriya's iron rings,434 +mizunashi (second run),434 +mappaninatta,434 +mahou senki lyrical nanoha force,434 +lzh,434 +kinoshita ichi,434 +hand on table,434 +gogalking,434 +gilgamesh (immoral biker jacket) (fate),434 +enjaku izuku,434 +crustacean,434 +chamaruku,434 +burnt,434 +azazel (helltaker),434 +atelier rorona,434 +aoki shizumi,434 +amamiya kokoro,434 +yumekui merry,433 +weasel tail,433 +tiffania westwood,433 +tamamo no mae (swimsuit lancer) (third ascension) (fate),433 +sesshoumaru,433 +seed,433 +roman clothes,433 +princess (princess principal),433 +oowada mondo,433 +ogipote,433 +mole on armpit,433 +minimap,433 +magnet,433 +large syringe,433 +kumbhira (granblue fantasy),433 +kriss vector,433 +kojima takeshi,433 +koishi day,433 +kawashima mizuki,433 +hanging scroll,433 +fujino shizuru,433 +froslass,433 +emotional engine - full drive,433 +crystalfly (genshin impact),433 +bubble bath,433 +3.1-tan,433 +siraha,432 +sawayaka samehada,432 +rikku (ff10),432 +paper mario,432 +naegi (naegidokoro),432 +luna (sailor moon),432 +kuuchuu yousai,432 +kanden sky,432 +haramaki,432 +fiery tail,432 +eyecatch,432 +dragonslayer (sword),432 +diamond hairband,432 +cure honey,432 +coat dress,432 +yuumei,431 +tokoroten (hmmuk),431 +tokisaki mio,431 +school swimsuit flap,431 +ryogo,431 +oda takayuki,431 +newo (shinra-p),431 +namonashi,431 +moe moe kyun!,431 +mimi (princess connect!),431 +manta ray,431 +laboratory,431 +kupaa,431 +kimura natsuki,431 +ishimaru kiyotaka,431 +inner senshi,431 +in bowl,431 +huanxiang heitu,431 +fishbowl,431 +echj,431 +deltarune,431 +dandara (karakure),431 +chimchar,431 +camouflage bikini,431 +ama usa an uniform,431 +yuuka nonoko,430 +yana (nekoarashi),430 +whimsicott,430 +virtuareal,430 +utsusumi kio,430 +the golden smurf,430 +tachibana roku,430 +pov across bed,430 +pina (sao),430 +on roof,430 +omone hokoma agm,430 +nuezou,430 +nausicaa,430 +mitsumine yuika,430 +master spark,430 +marimo danshaku,430 +magnemite,430 +learning with manga! fgo,430 +kobayashi yuuji,430 +jonylaser,430 +hachi (chihagura),430 +female butler,430 +d-m (dii emu),430 +cerebella (skullgirls),430 +cecil harvey,430 +cable tail,430 +big head,430 +artoria pendragon (lancer alter) (royal icing) (fate),430 +zange,429 +toga,429 +tamamura gunzo,429 +sukemyon,429 +shochuumimai,429 +ryou-san,429 +porforever,429 +pants removed,429 +nanashii (soregasisan),429 +miwabe sakura,429 +matsuda chiyohiko,429 +mario kart,429 +jiroo,429 +honda,429 +faceplant,429 +blue innertube,429 +baton (weapon),429 +ameto yuki,429 +amasawa natsuhisa,429 +zessica wong,428 +wiping mouth,428 +torn thighhighs,428 +soredemo ayumu wa yosetekuru,428 +shin'yashiki,428 +sharingan,428 +screen light,428 +saijou claudine,428 +morii shizuki,428 +kayari buta,428 +heater,428 +futaki kanata,428 +floating card,428 +elbow on knee,428 +ear cleaning,428 +coronavirus pandemic,428 +blood on bandages,428 +bikini removed,428 +adjusting scarf,428 +woofycakes,427 +villetta nu,427 +ugatsu matsuki,427 +tendou maya,427 +takeda hiromitsu,427 +swiss roll,427 +spray paint,427 +plain doll,427 +persimmon,427 +penis over one eye,427 +martial arts,427 +maou alba,427 +mahou shoujo lyrical nanoha a's portable: the battle of aces,427 +leon (mikiri hassha),427 +konngara (touhou),427 +kazetto,427 +kadoc zemlupus,427 +idolish 7,427 +hiiragi hazime,427 +hanging light,427 +fly,427 +centipede,427 +bae.c,427 +asyura kumo,427 +ankle bow,427 +zepher (makegumi club),426 +tsuzuri (tuzuri),426 +tsutsukakushi tsukiko,426 +tonbokiri (touken ranbu),426 +tales of symphonia knight of ratatosk,426 +symmetrical hand pose,426 +ooiwa wataru,426 +okumura rin,426 +ninimo nimo,426 +nanaponi,426 +"mogyutto ""love"" de sekkin chuu!",426 +miyamoto issa,426 +miri (ago550421),426 +lm7 (op-center),426 +kyon no imouto,426 +komainu,426 +kebab,426 +kamezaemon,426 +human furniture,426 +hat over eyes,426 +fubuki shirou,426 +for adoption,426 +dying,426 +dinosaur tail,426 +cheer (cheerkitty14),426 +yoko juusuke,425 +waltrud krupinski,425 +sono midoriko,425 +shinonome ena,425 +sailor moon (cosplay),425 +nagayori,425 +multicolored leotard,425 +muk (monsieur),425 +makai senki disgaea 4,425 +lowleg shorts,425 +lana's mother (pokemon),425 +iori (swimsuit) (blue archive),425 +ikeda chitose,425 +himegi,425 +heart straw,425 +hakama shorts,425 +familiar,425 +dakemakura-koubou,425 +ana coppola,425 +xiaoxi0619,424 +winning ticket (umamusume),424 +white wrist cuffs,424 +toudori,424 +the witcher (series),424 +skull on head,424 +ramudia (lamyun),424 +pine (bombergirl),424 +oogaki chiaki,424 +necomi,424 +movie theater,424 +monika weisswind,424 +ikeshita moyuko,424 +holding shovel,424 +dorei to no seikatsu ~teaching feeling~,424 +bonsai,424 +black male swimwear,424 +alpaca tail,424 +akou roushi,424 +2005,424 +zanshomimai,423 +yujup,423 +top wo nerae!,423 +sakayama shinta,423 +pokemon tail,423 +ooarai naval school uniform,423 +naruto: the last,423 +marui mitsuba,423 +maeda toushirou,423 +m&m (mickey and mackey),423 +lilith (machikado mazoku),423 +leonie pinelli,423 +kuro yuzu,423 +kanna (plum),423 +kaiba seto,423 +ikegami akane,423 +hachune miku,423 +green cardigan,423 +buran buta,423 +boomerang,423 +animal ear headphones,423 +vinegar doppio,422 +trunks (future) (dragon ball),422 +to be continued,422 +super sailor moon,422 +stained sheets,422 +single mechanical hand,422 +shiwasu no okina,422 +shaft look,422 +saturday night fever,422 +royal robe,422 +red track suit,422 +quanxi (chainsaw man),422 +power-up,422 +ponponmaru,422 +pan (dragon ball),422 +neko (minato aqua),422 +naked suspenders,422 +nakamura takeshi,422 +mao lian (nekokao),422 +mana (remana),422 +machamp,422 +kiratto pri chan,422 +kamiyoshi rika,422 +ichijou hotaru,422 +haibane renmei,422 +gunbuster pose,422 +five-seven (girls' frontline),422 +edobox,422 +danshi koukousei no nichijou,422 +cbt,422 +bed frame,422 +arm on knee,422 +aircraft carrier,422 +accio,422 +yuu (higashi no penguin),421 +witch's labyrinth,421 +white len (tsukihime),421 +tupet,421 +sword art online alternative: gun gale online,421 +pandora hearts,421 +lotte jansson,421 +loliconder,421 +kousaka umi,421 +kokka han,421 +jingai modoki,421 +hugging own tail,421 +gyakushuu no fantasica,421 +dedenne,421 +curious,421 +crosshatching,421 +cinderella girls card parody,421 +cham cham,421 +butterfly tattoo,421 +artoria pendragon (swimsuit archer) (fate),421 +ars almal,421 +yuasan,420 +sumeragi lee noriega,420 +souda kazuichi,420 +ninja slayer,420 +nekomata naomi,420 +myrtle (arknights),420 +mutsu-no-kami yoshiyuki,420 +muffin,420 +mauser 98,420 +luvdisc,420 +klara (pokemon),420 +iron man,420 +immobilization,420 +holding test tube,420 +gerudo set (zelda),420 +genos,420 +finger heart,420 +craft lawrence,420 +brown border,420 +white mittens,419 +whistling,419 +sola-ui nuada-re sophia-ri,419 +sekira ame,419 +saigyouji yuyuko's fan design,419 +riolu,419 +prison school,419 +piledriver (sex),419 +otomachi una,419 +op na yarou,419 +oda nobunaga (swimsuit berserker) (fate),419 +naoi ayato,419 +mukka,419 +mr. squeaks (hakos baelz),419 +mihoyo,419 +kotonomiya yuki,419 +incineroar,419 +holding tablet pc,419 +hexagon hair ornament,419 +gift art,419 +fushigi no dungeon,419 +ex-rumia,419 +chuunioniika,419 +child gilgamesh (fate),419 +boyshort panties,419 +atsushi toushirou,419 +arm sling,419 +alcina dimitrescu,419 +yuuki rito,418 +yoban,418 +stage connection,418 +primarina,418 +ninoko,418 +monochrome background,418 +miyanaga teru,418 +mazinger z,418 +mage (dungeon and fighter),418 +kusano (torisukerabasu),418 +kannko bokujou,418 +gold city (umamusume),418 +film strip,418 +deetamu,418 +choco chip,418 +atoki,418 +zero gravity,417 +wrestling mask,417 +uppercut,417 +touhou lost word,417 +stuffed frog,417 +striped vest,417 +sony,417 +silverxp,417 +sasaki kojirou (fate),417 +sasahara yuuki,417 +neferpitou,417 +namine,417 +nail polish bottle,417 +martina (dq11),417 +lilish,417 +leina,417 +lei,417 +kirigakure (kirigakure tantei jimusho),417 +katsushika hokusai (swimsuit saber) (fate),417 +kairakuen umenoka,417 +harunatsu akito,417 +hakase fuyuki,417 +hairband removed,417 +frostleaf (arknights),417 +eggshell,417 +d;,417 +chicago-x,417 +blue pajamas,417 +black sky,417 +bear boy,417 +andou shuki,417 +wakamo (blue archive),416 +tadano magu,416 +sinon (sao-alo),416 +shu-mai,416 +shiba itsuki,416 +severed hair,416 +re ghotion,416 +pupps,416 +musashi kai (kancolle),416 +mugetsu (touhou),416 +kinoshita hideyoshi,416 +kid (chrono cross),416 +kanzaki kaori,416 +kantai collection (anime),416 +italy,416 +hikage (senran kagura),416 +haniwa (leaf garden),416 +hand on another's knee,416 +grid,416 +gouta (nagishiro6624),416 +fuji (pixiv24804665),416 +cracking knuckles,416 +cloba,416 +asa no ha (awayuki),416 +aqua thighhighs,416 +aile (mega man zx),416 +abukuma kai ni (kancolle),416 +.flow,416 +yoru (chainsaw man),415 +umamusume summer story (umamusume),415 +traffic baton,415 +shunichi,415 +shiori (princess connect!),415 +shinza bansho series,415 +shikanoin heizou,415 +sakutarou (umineko),415 +sakura bakushin o (umamusume),415 +prince of wales (azur lane),415 +no arms,415 +nikoo,415 +mogu (au1127),415 +mirei,415 +mil (xration),415 +langbazi,415 +kataoka yuuki,415 +howl no ugoku shiro,415 +hoshizora ikuyo,415 +glove bow,415 +flayn (fire emblem),415 +flaming hand,415 +fingers to mouth,415 +es (xblaze),415 +cure flora,415 +cowfee,415 +covered anus,415 +cassandra alexandra,415 +bomber grape,415 +baretto (karasi07),415 +alicia florence,415 +agent 8 (splatoon),415 +abo (hechouchou),415 +7th dragon 2020,415 +zebra print,414 +yonomori kobeni,414 +xingzhi lv,414 +tsunderia,414 +tougou mimori,414 +takunomi,414 +studying,414 +stahlhelm,414 +shouji ayumu,414 +ree (re-19),414 +red star,414 +needy girl overdose,414 +mudrock (silent night) (arknights),414 +mikazuchi zeus,414 +klaius,414 +katanagatari,414 +kaname aomame,414 +holding torch,414 +hashibira inosuke,414 +hand over another's mouth,414 +forever 7th capital,414 +fire emblem: new mystery of the emblem,414 +csyday,414 +bowtie removed,414 +black babydoll,414 +white sky,413 +white bandeau,413 +tristan (fate),413 +tomoe gozen (swimsuit saber) (first ascension) (fate),413 +tidus,413 +tatami to hinoki,413 +taiwan (hetalia),413 +sukage,413 +sore wa bokutachi no kiseki,413 +shiraha maru,413 +sakiyo cake,413 +ronald mcdonald,413 +redlhzz,413 +qin liangyu (fate),413 +oogai daiichi middle school uniform,413 +nagi ryou,413 +mikage sekizai,413 +marvel vs. capcom,413 +kon-kit,413 +kinona,413 +kazaana,413 +haguhagu (rinjuu circus),413 +fukuyama mai,413 +friedrich der grosse (azur lane),413 +flamethrower,413 +energy blade,413 +cupping glass,413 +chaos online,413 +carrying overhead,413 +bound torso,413 +battlefield,413 +ao no kiseki,413 +119,413 +ys,412 +typhlosion,412 +space ishtar (fate),412 +shipii (jigglypuff),412 +poprication,412 +pixiv-tan,412 +mitsuba choco,412 +long sword,412 +liliya olenyeva,412 +katagiri hinata,412 +hamada yoshikazu,412 +gene (pso2),412 +fake halo,412 +distr,412 +diamond hair ornament,412 +chevalier d'eon (fate),412 +cafekun,412 +aika granzchesta,412 +yin yang print,411 +takemaru (housamo),411 +sleeve bow,411 +shimejinameko,411 +reloading,411 +purple pubic hair,411 +pandea work,411 +monorus,411 +millhiore f. biscotti,411 +marushin (denwa0214),411 +lydian academy uniform,411 +lunchbox,411 +long eyebrows,411 +lissa (fire emblem),411 +kyogre,411 +kisaragi kai ni (kancolle),411 +jazz jack,411 +hand on ear,411 +h2so4,411 +glass slipper,411 +fukube satoshi,411 +cream puff,411 +bennett (genshin impact),411 +baileys (tranquillity650),411 +zangief,410 +yuzuki gao,410 +vertical-striped pants,410 +tamamo cat (second ascension) (fate),410 +rohitsuka,410 +robaco,410 +niy (nenenoa),410 +licking testicle,410 +kago no tori,410 +iona,410 +hypnosis mic,410 +hinako note,410 +ha-class destroyer,410 +glove pull,410 +glass table,410 +comah,410 +arena (company),410 +akita toushirou,410 +22/7,410 +zawameki,409 +yoshida keiji,409 +whitney (pokemon),409 +valtor,409 +tsukumihara academy uniform (fate/extra ccc),409 +torii sumi,409 +tokyo big sight,409 +syroh,409 +suterii,409 +sorai shin'ya,409 +shun (rokudena-shi),409 +shikito,409 +rayquaza,409 +pononozo,409 +perona,409 +osananajimi neko,409 +nui sociere,409 +nakatani iku,409 +moekibara fumitake,409 +leviathan (skullgirls),409 +kawasumi mai,409 +himeya company uniform,409 +grey suit,409 +glasgow smile,409 +gintarou (kurousagi108),409 +different shadow,409 +ball and chain (weapon),409 +audio jack,409 +art shift,409 +ampharos,409 +amania orz,409 +airi (the infernal temptress),409 +wrapped candy,408 +wataame27,408 +vsk-94 (girls' frontline),408 +violence,408 +vertical foregrip,408 +undersized animal,408 +toritora,408 +tomohiro kai,408 +tekehiro,408 +shiwasu horio,408 +shinozuka atsuto,408 +sae (hidamari sketch),408 +radar,408 +n:go,408 +maracas,408 +mano aloe,408 +makoto daikichi,408 +mailbox (incoming mail),408 +koshirae tsurugi,408 +kobamiso (kobalt),408 +kawacy,408 +hatsune mikuo,408 +final fantasy iii,408 +enami katsumi,408 +dragon quest vii,408 +deep wound,408 +dark room,408 +coco's,408 +celestial being uniform,408 +aroma sensei,408 +angewomon,408 +yuihico,407 +unlimited blade works (fate),407 +suzuya kai ni (kancolle),407 +suzutsuki kurara,407 +sunaba suzume,407 +sora 72-iro,407 +sketch eyebrows,407 +shiki (psychedelic g2),407 +rubber band,407 +ray-k,407 +purple apron,407 +pom pom earrings,407 +pacific,407 +nonono (mino),407 +mizuki (koko lost),407 +mahoromatic,407 +liskarm (arknights),407 +jack-o'-lantern hair ornament,407 +holding lipstick tube,407 +greenmarine,407 +brown sash,407 +tsuutenkaaku,406 +tokonone,406 +ten desires,406 +sword writing,406 +super mario odyssey,406 +steven universe,406 +sleeveless duster,406 +shuten douji (halloween caster) (fate),406 +shin'you (kancolle),406 +sakura chiyono o (umamusume),406 +sak (lemondisk),406 +pickup truck,406 +pa-15 (girls' frontline),406 +over the knee,406 +nozo (hitomiz),406 +mujinbensin,406 +maruyama saki,406 +lycanroc,406 +laharl,406 +kuboken,406 +ichiyou moka,406 +happa (cloverppd),406 +hanako (blue archive),406 +gunpla,406 +germany (hetalia),406 +episode number,406 +cornelia li britannia,406 +control rod,406 +comiket 100,406 +camouflage shirt,406 +belfast (shopping with the head maid) (azur lane),406 +baseball jersey,406 +aria pokoteng,406 +ar tonelico i,406 +wool,405 +urara meirochou,405 +trowel,405 +tomoyohi,405 +thumb sucking,405 +syndra,405 +sakurai touko,405 +sakaki,405 +puck (re:zero),405 +pear,405 +painted clothes,405 +numbered panels,405 +mu-12,405 +michishio kai ni (kancolle),405 +lunatic gun,405 +kakizaki (chou neji),405 +jean kirchstein,405 +incense,405 +himitsu (hi mi tsu 2),405 +haruyuki (yukichasoba),405 +gravel (arknights),405 +grani (arknights),405 +fujikawa,405 +enokuma uuta,405 +earphones removed,405 +breast mousepad,405 +bifidus,405 +428,405 +35p (sakura miko),405 +zero (code geass),404 +z-epto (chat-noir86),404 +takara akihito,404 +shampoo,404 +sebastian piyodore,404 +penis ribbon,404 +pearl thong,404 +papajay (jennygin2),404 +onineko-chan,404 +maestrale (kancolle),404 +lucy maria misora,404 +log pose,404 +igawa asagi,404 +holly hair ornament,404 +himeyamato,404 +ga-rei,404 +donkey kong,404 +digimon tamers,404 +conte di cavour (kancolle),404 +chun-li (cosplay),404 +cessa,404 +cattleya,404 +uzuki (cookie),403 +tonbi,403 +tianzi,403 +sugue tettou,403 +ryunnu,403 +orange belt,403 +melone,403 +masa masa,403 +maria (hayate no gotoku!),403 +kamijou kyousuke,403 +hybrid,403 +hungary (hetalia),403 +himemiya anthy,403 +hikawa iona,403 +high school dxd pi,403 +hashtag,403 +completion time,403 +cagalli yula athha,403 +bloomers pull,403 +banette,403 +angra mainyu (fate),403 +aki (akikaze asparagus),403 +akemiho tabi nikki,403 +youta,402 +wild arms 3,402 +vent arbre,402 +taut leotard,402 +suurin (ksyaro),402 +specter the unchained (arknights),402 +sitting on head,402 +shizuma yoshinori,402 +rei kun,402 +prostate milking,402 +poco (asahi age),402 +playing piano,402 +peso (cheese company),402 +pearl hair ornament,402 +nagato kai ni (kancolle),402 +motomiya mitsuki,402 +mocchi (mocchichani),402 +kiriyama taichi,402 +hooters,402 +hater (hatater),402 +guchico,402 +green cloak,402 +golden apple,402 +fairy maid (touhou),402 +caitlin (pokemon),402 +bb (swimsuit mooncancer) (first ascension) (fate),402 +bandaid on head,402 +amazon warrior,402 +akuma (street fighter),402 +akira (coffee curry),402 +after bathing,402 +yaman,401 +voltron (series),401 +tsuki ni kawatte oshioki yo,401 +torino aqua,401 +taiko drum,401 +sticker on face,401 +sepia background,401 +sawatari makoto,401 +sakiryo kanna,401 +sailor swimsuit (idolmaster),401 +saeki hokuto,401 +rinoa heartilly,401 +red robe,401 +pointing weapon,401 +nanao (mahaya),401 +mineva lao zabi,401 +matsuoka gou,401 +marukyuu ameya,401 +kawagoe pochi,401 +junkpuyo,401 +hozuki momiji,401 +holding pole,401 +hijikawa arashi,401 +hashimoto takashi,401 +hair over face,401 +fur cuffs,401 +eliwood (fire emblem),401 +chrono crusade,401 +christmas present,401 +chii,401 +casting spell,401 +caburi,401 +bursting ass,401 +arc the lad ii,401 +amino (tn7135),401 +against window,401 +.hack//g.u.,401 +yura kai ni (kancolle),400 +wrestle angels survivor,400 +very long tongue,400 +togami byakuya,400 +tenkuu no shiro laputa,400 +takemura sessyu,400 +sylvie (dorei to no seikatsu),400 +swimsuit removed,400 +rosetta (granblue fantasy),400 +reu (cookie),400 +peeling,400 +naoto (tulip),400 +momoshiki tsubaki,400 +hosomi shizuko,400 +ebizome,400 +dragon quest x,400 +coffee table,400 +canaan,400 +undyne,399 +toyama kasumi,399 +t.m. opera o (umamusume),399 +supply depot princess,399 +scratching,399 +sakuramochi (sakura frappe),399 +poison,399 +nikaidou kou,399 +la pucelle,399 +koretsuki azuma,399 +ink bottle,399 +gazacy (dai),399 +dragon quest vi,399 +clow card,399 +clothes in front,399 +chima q,399 +battleship water oni,399 +ayanero taicho,399 +yoako,398 +transmission tower,398 +top pull,398 +tanihara natsuki,398 +swimsuit skirt,398 +suichuu hanabi,398 +sparrowl,398 +shimadoriru,398 +sen (granblue fantasy),398 +ribbon between breasts,398 +pgm hecate ii,398 +oso (toolate),398 +nakano yuka,398 +muroku (aimichiyo0526),398 +legend of the cryptids,398 +kibisake,398 +kakao (noise-111),398 +hoppege,398 +holding fish,398 +hillly (maiwetea),398 +hassan of the cursed arm (fate),398 +enami hakase,398 +drawing on another's face,398 +chicken leg,398 +atte nanakusa,398 +aida takanobu,398 +achiki,398 +wily beast and weakest creature,397 +victorious (azur lane),397 +v over mouth,397 +teruui,397 +takoyaki (roast),397 +sweater tug,397 +rom (neptune series),397 +oda nobuna no yabou,397 +money tuck,397 +mashiro aa,397 +majoccoid,397 +lotte no omocha!,397 +kizaki yuuri,397 +kazuharu kina,397 +isegawa yasutaka,397 +hero (dq11),397 +exlic,397 +dragon@harry,397 +devil may cry 3,397 +deaver,397 +daichi (tokoya),397 +canopy (aircraft),397 +akitsu maru kai (kancolle),397 +zaku,396 +yan (nicknikg),396 +waist hug,396 +thomasz,396 +tachikoma (mousou teikoku),396 +super mushroom,396 +suou patra,396 +shirokitsune,396 +san-pon,396 +roropull,396 +robot animal,396 +riku (kingdom hearts),396 +recurring image,396 +noihara himari,396 +noco (adamas),396 +midnight bliss,396 +lee (colt),396 +kumamiko,396 +kamen rider dcd,396 +kakage,396 +ike eveland,396 +hyudora,396 +hyakkaou academy uniform,396 +heinrike prinzessin zu sayn-wittgenstein,396 +endou hiroto,396 +dunkerque (azur lane),396 +covering with blanket,396 +boulder,396 +awning,396 +anya melfissa,396 +ankle garter,396 +yuki (touhou),395 +vignette tsukinose april,395 +v-shaped eyes,395 +ueda ryou,395 +tokugawa matsuri,395 +tarakanovich,395 +takeda yukimura,395 +takahero,395 +symmetrical pose,395 +starfighter,395 +star ocean the last hope,395 +sakai kyuuta,395 +number pun,395 +nohito,395 +nakajima (girls und panzer),395 +multiple monitors,395 +multicolored ribbon,395 +mousepad (object),395 +mizuga,395 +mike inel,395 +magister,395 +mage staff,395 +kurosaki chitose,395 +kotozume yukari,395 +katayama kei,395 +k/da akali,395 +iwatooshi,395 +inunoko.,395 +inui sajuna,395 +hoto cocoa's school uniform,395 +holding mirror,395 +hijiri (resetter),395 +gryffindor,395 +ghislaine dedoldia,395 +enjo kouhai,395 +devilman,395 +covering chest,395 +backless panties,395 +alternative girls,395 +all might,395 +a-ka,395 +yuiga naoha,394 +tiger cub,394 +tails (sonic),394 +sigurd (fate),394 +shiraitodai school uniform,394 +sasairebun,394 +sasaame,394 +sansei muramasa,394 +sakaki yuuya,394 +rozalin,394 +peacock feathers,394 +oyama mihari,394 +no bangs,394 +nishizawa,394 +misaki nonaka,394 +kiryuuin ragyou,394 +holding racket,394 +holding cake,394 +hamushima,394 +gin (shioyude),394 +ga-rei zero,394 +fujitaka nasu,394 +fujiko f fujio (style),394 +flannery (pokemon),394 +energy weapon,394 +dragon tattoo,394 +double scoop,394 +carol malus dienheim,394 +board,394 +batsubyou,394 +aunt and niece,394 +amuro ray,394 +aircraft carrier water oni,394 +vomiting,393 +tirpitz (azur lane),393 +shibari under clothes,393 +self breast sucking,393 +nnyara,393 +mutual orgasm,393 +murata yuusuke,393 +mobu,393 +minami chiaki,393 +marina ismail,393 +koumei (twinameless),393 +inkling (language),393 +if (neptune series),393 +grey ascot,393 +giant monster,393 +foo fighters,393 +egyptian mythology,393 +dra,393 +deel (rkeg),393 +yume 2kki,392 +voltron: legendary defender,392 +tennis court,392 +smoke trail,392 +sitting sideways,392 +reverse fellatio,392 +navy (navy.blue),392 +nagant revolver (girls' frontline),392 +muzzle,392 +m bison,392 +kuroshitsuji,392 +klein moretti,392 +kannatsuki noboru,392 +kana anaberal,392 +holding egg,392 +galaxy angel rune,392 +female ejaculation through clothes,392 +byakugan,392 +binsen,392 +bikini tug,392 +ansatsu kyoushitsu,392 +zombie fairy (touhou),391 +xephyrks,391 +takamichi,391 +tactical surface fighter,391 +subachi,391 +stitched torso,391 +sleeping bag,391 +shiratama (hockey),391 +shark print,391 +saikawa yusa,391 +rubik's cube,391 +piripun,391 +nian (unfettered freedom) (arknights),391 +naz,391 +mp40 (girls' frontline),391 +monkey girl,391 +menacing (jojo),391 +lace legwear,391 +l (death note),391 +koi dance,391 +kanna (cookie),391 +isekai maou to shoukan shoujo no dorei majutsu,391 +holding coin,391 +grilling,391 +frankenstein's monster,391 +france (hetalia),391 +forbidden scrollery,391 +cue stick,391 +aty (summon night),391 +aono shimo,391 +american mcgee's alice,391 +yukiwo,390 +suzuho hotaru,390 +single earphone removed,390 +shoulder sash,390 +shigure ryuunosuke,390 +rosemi lovelock,390 +pokemon mystery dungeon,390 +muffin top,390 +miata (miata8674),390 +maullarmaullar,390 +magnet (vocaloid),390 +macross: do you remember love?,390 +lucifero,390 +litchi faye ling,390 +ligne claire,390 +kyuuba melo,390 +kingdom hearts iii,390 +ken (coffee michikusa),390 +haibara ai,390 +glass shards,390 +gengorou,390 +erlenmeyer flask,390 +emofuri,390 +elsword (character),390 +doronjo,390 +animal helmet,390 +alakoala,390 +tsukimura suzuka,389 +serena (yu-gi-oh!),389 +rpg,389 +risui (suzu rks),389 +raiou,389 +princess principal game of mission,389 +pectoral docking,389 +oden,389 +neropaso,389 +nakayama miyuki,389 +mizushima (p201112),389 +minatsuki randoseru,389 +kokuchuutei,389 +kiawe (pokemon),389 +kei (keigarou),389 +iz (asteroid ill),389 +harisen,389 +demon's souls,389 +coppelion,389 +clothes on floor,389 +avatar (mabinogi),389 +zuizi,388 +yuuma (skirthike),388 +yoru nai,388 +yaza,388 +yagyuu (senran kagura),388 +turkey (food),388 +tamada tamaki,388 +saitou naoki,388 +redjuice,388 +poketch,388 +paint on clothes,388 +o-ring panties,388 +neko neko koneko,388 +nassukun,388 +matoi ryuuko (cosplay),388 +majikina mina,388 +kokonoe (blazblue),388 +ko yu,388 +keita (tundereyuina),388 +humping,388 +holding key,388 +hibiki yuuta,388 +fingers to cheeks,388 +back cover,388 +ayase yue,388 +astesia (arknights),388 +akagi ritsuko,388 +snow globe,387 +shimo (depthbomb),387 +rojiura satsuki: chapter heroine sanctuary,387 +ooji,387 +nokoppa,387 +narutaki shin,387 +nanashin naomi,387 +mythra (radiant beach) (xenoblade),387 +momoji (lobolobo2010),387 +metamoran vest,387 +maria holic,387 +layla (idolmaster),387 +kuzuryuu fuyuhiko,387 +kerberos blade,387 +kamen rider 555,387 +kabu (pokemon),387 +green armband,387 +girls und panzer saishuushou,387 +dark labia,387 +crescent necklace,387 +character mask,387 +candelabra,387 +bust chart,387 +amano don,387 +admiral (kancolle) (cosplay),387 +vlad iii (fate/apocrypha),386 +tab head,386 +syunzou,386 +sweater around neck,386 +seta kaoru,386 +saori (blue archive),386 +saemonza (girls und panzer),386 +pepsi,386 +penguin tail,386 +panels,386 +orange juice,386 +levantine,386 +lead pipe,386 +knit hat,386 +kindergarten bag,386 +katalina (granblue fantasy),386 +irisviel von einzbern (angel's song),386 +implied cunnilingus,386 +imperial japanese navy,386 +hand on own elbow,386 +goshiki suzu,386 +gingerbread man,386 +exiled warrior leina,386 +engo (aquawatery),386 +dramatical murder,386 +devil summoner,386 +cum on fingers,386 +crucifixion,386 +bluethebone,386 +ayase hazuki,386 +arguing,386 +akky (akimi1127),386 +yagami iori,385 +toolbox,385 +tabitha (zero no tsukaima),385 +sylvain jose gautier,385 +squchan,385 +shirokane rinko,385 +sengoku collection,385 +penis under another's clothes,385 +nishida asako,385 +miyao ryuu,385 +miyako hito,385 +mimofu (fullhighkick),385 +metata,385 +licking neck,385 +kroos (arknights),385 +kidou yuuto,385 +himemiya chikane,385 +golem,385 +gin00,385 +gayarou,385 +cowboy,385 +colored eyepatch,385 +chocolate cornet,385 +cad (caddo),385 +bursting pectorals,385 +aria gakuen school uniform,385 +zinnia (pokemon),384 +wooper,384 +white headdress,384 +vertical-striped shorts,384 +unleashed,384 +tsujino akari,384 +suicune,384 +snowball (overwatch),384 +shirokuma a,384 +new generations (idolmaster),384 +natuna natu,384 +nakaaki masashi,384 +kunoichi tsubaki no mune no uchi,384 +kasen kanesada,384 +karasusou nano,384 +jiru (jirufun),384 +iroiro yaru hito,384 +imminent hug,384 +icicle,384 +holding bell,384 +headlock,384 +fishing hook,384 +emyo,384 +boom barrier,384 +aslindsamure,384 +amayadori machi,384 +akane souichi,384 +age of ishtaria,384 +86 -eightysix-,384 +yostxxx,383 +urushibara ruka,383 +tsab executive military uniform,383 +tokita monta,383 +toka (marchlizard),383 +tanaka shoutarou,383 +suplex,383 +sophie neuenmuller,383 +smegma,383 +shopping basket,383 +sho-n-d,383 +shennai misha,383 +pottsness,383 +playstation,383 +necklace removed,383 +midna (true),383 +maruto!,383 +kingdom hearts 358/2 days,383 +kakumeiki valvrave,383 +f-zero,383 +booth seating,383 +bikini around one leg,383 +bbc-chan,383 +ario,383 +ambiguous red liquid,383 +yu 65026,382 +single sandal,382 +roar yell! tracen academy cheerleading squad (umamusume),382 +plumeria,382 +mushishi,382 +mugino shizuri,382 +male underwear peek,382 +kohsaka jun,382 +km yama,382 +kanzaki muyu,382 +holding photo,382 +holding necklace,382 +grey choker,382 +giraffe ears,382 +fifiruu,382 +excessive pussy juice,382 +dog (mixed breed) (kemono friends),382 +de ruyter (kancolle),382 +cu-no,382 +condensation,382 +buneary,382 +bikini in mouth,382 +ass tattoo,382 +ash arms,382 +androgyne symbol,382 +airani iofifteen,382 +yowai totoko,381 +white uniform,381 +tsukigi,381 +tomoe mami (cosplay),381 +staryu,381 +st.germain-sal,381 +shounen (hogehoge),381 +shark fin,381 +ryouko (tenchi muyou!),381 +revolver knuckle,381 +powering up,381 +nunchaku,381 +nav (itsnav),381 +mars (cookie),381 +kenmochi touya,381 +katsuragi (senran kagura),381 +kaorihero,381 +ivysaur,381 +itto (mentaiko),381 +holding map,381 +hane yuki,381 +full mouth,381 +euphonium,381 +bonyari high school uniform,381 +blue suit,381 +tomatto (@ma!),380 +tobiichi origami,380 +tamamo no mae (spring casual) (fate),380 +pleated shirt,380 +owarimonogatari,380 +open bodysuit,380 +okitsugu,380 +mofu namako,380 +miniskirt pirates,380 +ment,380 +medusa (shingeki no bahamut),380 +itolife,380 +hubert von vestra,380 +hand under swimsuit,380 +green fur,380 +grape hair ornament,380 +glory hole,380 +gladiolus amicitia,380 +gensou suikoden ii,380 +eurasian tree sparrow,380 +eotech,380 +dog costume,380 +bremerton (kung fu cruiser) (azur lane),380 +big nose,380 +bag removed,380 +asa (coco),380 +74,380 +whale tail (clothing),379 +waving arm,379 +vei (vtuber),379 +tsubasa tsubasa,379 +tksand,379 +thorns (arknights),379 +tamayan,379 +starting block,379 +spiked footwear,379 +slide,379 +silhouette demon,379 +shizuru (princess connect!),379 +sheep tail,379 +pako (pousse-cafe),379 +nino (fire emblem),379 +nendoroid,379 +nemo (fate),379 +mizuhara chizuru,379 +minah (chaesu),379 +marl kingdom,379 +kansou samehada,379 +inugami kira,379 +ikazu401,379 +hinoka (fire emblem),379 +grey flower,379 +flint (girls und panzer),379 +faye valentine,379 +enpe,379 +email address,379 +decidueye,379 +creature on head,379 +collar grab,379 +clinging,379 +ch'en (ageless afterglow) (arknights),379 +catherine (game),379 +bishoujo mangekyou,379 +ama-tou,379 +akane (bunny) (blue archive),379 +vigna (arknights),378 +tunnel,378 +toothpaste,378 +techi (techi35499),378 +team aqua,378 +shinomiya himawari,378 +sherlock shellingford,378 +scolding,378 +sama samasa,378 +red blood cell (hataraku saibou),378 +pliers,378 +nara shikamaru,378 +mint,378 +maniacpaint,378 +legacy of lunatic kingdom,378 +kuraue hinata,378 +kugelschreiber,378 +iei,378 +holding dildo,378 +hagakure tooru,378 +giraffe,378 +gale kawaii,378 +fujishima-sei ichi-gou,378 +enomoto takane,378 +dyson (edaokunnsaikouya),378 +cocktail shaker,378 +chocolate on face,378 +beige fur,378 +arika yumemiya,378 +yoru no yatterman,377 +wasabi60,377 +uenoryoma,377 +tsune (tune),377 +thigh straddling,377 +testa,377 +sunny (omori),377 +siesta (zero no tsukaima),377 +shoulder guard,377 +sekirei,377 +sananana (cookie),377 +ringo-chan (otokuyou),377 +ram (neptune series),377 +picking up,377 +piano bench,377 +nurarihyon no mago,377 +nekominase,377 +multicolored tail,377 +morty (pokemon),377 +menace,377 +labia piercing,377 +kilesha,377 +kanzaki hiro,377 +k10k,377 +jirou (chekoro),377 +itsuka kotori,377 +istina (arknights),377 +frilled armband,377 +eyeshield 21,377 +doukyuusei,377 +btmr game,377 +ao no kanata no four rhythm,377 +abigail williams (emerald float) (fate),377 +yamasan,376 +wakamatsu372,376 +umakuchi shouyu,376 +ukagaka,376 +torigoe takumi,376 +tart (food),376 +tachibana miya,376 +suou mikoto (school rumble),376 +spurs,376 +shorts aside,376 +reticulated giraffe (kemono friends),376 +resident evil 6,376 +racchi.,376 +producer (idolmaster anime),376 +osakana (denpa yun'yun),376 +oro (sumakaita),376 +orange apron,376 +om (nk2007),376 +nina kosaka,376 +naidong (artist),376 +monoglove,376 +mammon (umineko),376 +light machine gun,376 +lace-trimmed shirt,376 +kson,376 +hoshiguma yuugi (promo),376 +hitomi (doa),376 +headwear switch,376 +feral lemma,376 +dead or alive xtreme,376 +cafe-chan to break time,376 +black bullet,376 +bisexual male,376 +baseball helmet,376 +2000s (style),376 +zipper skirt,375 +z flag,375 +xinyan (genshin impact),375 +whisper (youkai watch),375 +toujou kirumi,375 +tenobe,375 +tansuke,375 +summertime render,375 +sky high,375 +shimabara elena,375 +shift (waage),375 +shamakho,375 +satellite dish,375 +reno (azur lane),375 +print footwear,375 +pia carrot (series),375 +naked paint,375 +nakamura yukitoshi,375 +morisobo,375 +magisa (granblue fantasy),375 +lying card,375 +kyoto animation,375 +kotatu (akaki01aoki00),375 +k-on! movie,375 +heart wand,375 +haaton (akai haato),375 +fisting,375 +esidisi,375 +esdeath,375 +ebifly,375 +chespin,375 +arsenixc,375 +archie (pokemon),375 +anise tatlin,375 +zen'in maki,374 +yamashita takahiro,374 +watarase piro,374 +wake up girls!,374 +tanigaki genjirou,374 +suspenders pull,374 +super saiyan 2,374 +skitty,374 +sh (562835932),374 +scarf removed,374 +princess tutu,374 +portal (series),374 +petra macneary,374 +mylene jenius,374 +muoto,374 +mp7 (girls' frontline),374 +morino donguri,374 +masanaga (tsukasa),374 +kanniiepan,374 +kamihama university affiliated school uniform,374 +in-universe location,374 +himeragi yukina,374 +hanpenmaru,374 +hamao,374 +cthugha (nyaruko-san),374 +chigusa minori,374 +catching,374 +bubuzuke,374 +bibi02,374 +basin,374 +asakawa (outeq),374 +zenra,373 +unacchi (nyusankin),373 +tales of phantasia,373 +tako-san wiener,373 +shoulder blush,373 +shengtian,373 +serizawa (serizawaroom),373 +satou sei,373 +sakuranomiya maika,373 +s.a.t.8 (girls' frontline),373 +roshin yuukai (vocaloid),373 +racecar,373 +pop (electromagneticwave),373 +pleinair,373 +plaid apron,373 +pink outline,373 +paywall censor,373 +margay (kemono friends),373 +load bearing equipment,373 +leaf-pattern stripe,373 +katarina claes,373 +kajin (kajinman),373 +impa,373 +holding binoculars,373 +hanzo (overwatch),373 +folded clothes,373 +down jacket,373 +done (donezumi),373 +bigdead,373 +aruruw,373 +alice liddell (american mcgee's alice),373 +abenattou,373 +yayoi maka,372 +tomoe,372 +the elder scrolls v: skyrim,372 +takatora,372 +sweeping,372 +sleeve ribbon,372 +shexyo,372 +sasai saji,372 +rock paper scissors,372 +rice shower (make up vampire!) (umamusume),372 +red suit,372 +onono imoko,372 +ntw-20 (girls' frontline),372 +nanahime,372 +meltrandi,372 +kuroi (liar-player),372 +kotoribako,372 +kokone (coconeeeco),372 +kogami akira,372 +kemurikusa,372 +holding toothbrush,372 +hatterene,372 +hato haru,372 +hanada yanochi,372 +ghiaccio,372 +deyui,372 +bolverk,372 +bad artstation id,372 +atarashi ako,372 +alca (wakatanka4),372 +yamamoto arifred,371 +wide oval eyes,371 +uousa-ou,371 +sword of the creator,371 +smol ame,371 +ryomou shimei,371 +rantana (lalalalackluster),371 +qosic,371 +popsicle in mouth,371 +pokemon on lap,371 +penis over eyes,371 +parasoul (skullgirls),371 +parachute,371 +okuri banto,371 +noto kurumi,371 +mullet,371 +mihama chiyo,371 +jigatei (omijin),371 +ikayaki,371 +dawn,371 +crotchless pantyhose,371 +cassidy (overwatch),371 +caren hortensia (amor caren),371 +blvefo9,371 +ayunda risu,371 +ayasaki hayate,371 +atelier live,371 +amada ken,371 +agent (girls' frontline),371 +yakusoku no neverland,370 +yagami light,370 +snowboard,370 +saratoga (azur lane),370 +saileach (arknights),370 +ruru amour,370 +riding bicycle,370 +ransusan,370 +quad drills,370 +nikki kyousuke,370 +nathaniel pennel,370 +mayonnaise,370 +marcille,370 +magowasabi,370 +lum (cosplay),370 +lady maria of the astral clocktower,370 +kobushi abiru,370 +kedama (touhou),370 +katou danzou (fate),370 +igayan,370 +height,370 +frenda seivelun,370 +fjsmu,370 +dome,370 +crater,370 +black bandeau,370 +arisaka ako,370 +amazon position,370 +aa-12 (girls' frontline),370 +a.x.,370 +yamabuki (yusuraume),369 +white babydoll,369 +weapon merchant cattleya,369 +wataboku,369 +verone gakuin school uniform,369 +ume (noraneko),369 +tyone,369 +tenako (mugu77),369 +shishi juuroku,369 +sasahara wakaba,369 +peanuts,369 +panties over garter belt,369 +pajamas pull,369 +okame nin,369 +nyamota,369 +nice nature (run&win) (umamusume),369 +nababa,369 +monster hunter portable 3rd,369 +magical sapphire,369 +libre,369 +j@ck,369 +iwasama masami,369 +idw (girls' frontline),369 +hiroya juuren,369 +hatsunatsu,369 +hamaken. (novelize),369 +frilled straps,369 +firefighter,369 +ebimomo,369 +chapayev (azur lane),369 +cape removed,369 +black jumpsuit,369 +aningay,369 +video game cover,368 +treasure,368 +tokimeki memorial 2,368 +sugano manami,368 +sprite,368 +saxophone,368 +richelieu (azur lane),368 +print ribbon,368 +poaro,368 +nebula,368 +mahira (granblue fantasy),368 +lisia (pokemon),368 +jiangshi costume,368 +harris hero,368 +gomipomi,368 +gake no ue no ponyo,368 +fur-trimmed bikini,368 +ekita kuro,368 +collei (genshin impact),368 +cheese trail,368 +bural chingu,368 +bowwow (hamju94),368 +akaga hirotaka,368 +:s,368 +wave hair ornament,367 +usagihime,367 +to heart 2 ad,367 +tarutaru,367 +speh,367 +satou jun,367 +saru getchu,367 +sakurai rihoko,367 +sakakiba misogi,367 +saga frontier,367 +racing,367 +pixie cut,367 +orca hood,367 +noripro,367 +lava (arknights),367 +kurotofu,367 +kurogarasu,367 +kodamari,367 +kamaboko,367 +hatsumoude,367 +hat tug,367 +hamuzou,367 +gatchaman crowds,367 +garnet,367 +energy barrier,367 +cyphers,367 +convertible,367 +cheek rest,367 +cassock,367 +bouncing penis,367 +autofacial,367 +acolyte (ragnarok online),367 +zdrada (helltaker),366 +v4x,366 +tsukimori usako,366 +takeshima eku,366 +skyfire (arknights),366 +scary monsters (stand),366 +ring box,366 +rindou (p41neko),366 +rice cooker,366 +red sports bra,366 +puyo (puyopuyo),366 +postbox (outgoing mail),366 +plaid shorts,366 +miles edgeworth,366 +merc storia,366 +matoyama,366 +maromi (am97),366 +lenalee lee,366 +jun (aousa0328),366 +incoming punch,366 +hunter (ragnarok online),366 +holding scroll,366 +h&k ump45,366 +gold coin,366 +dark halo,366 +comic kairakuten,366 +bow skirt,366 +zan (harukahime),365 +yukikaze (azur lane),365 +wild geese,365 +tem10,365 +suujiniku,365 +snm (sunimi),365 +shimatani azu,365 +ringo sui,365 +refeia,365 +red wine,365 +person between breasts,365 +pascal (tales),365 +murakami yuichi,365 +makihitsuji,365 +love cacao,365 +leopard (yatterman),365 +krudears,365 +kankitsunabe (citrus),365 +izayoi sakuya (cosplay),365 +hxd,365 +ha-ru,365 +europa (granblue fantasy),365 +deku suke,365 +dating,365 +claire redfield,365 +chastity belt,365 +blue outline,365 +bang-you,365 +architect (girls' frontline),365 +aqua leotard,365 +adachi to shimamura,365 +yoekosukii,364 +whitesmith (ragnarok online),364 +waffle,364 +takakamo shizuno,364 +snowball22,364 +self-portrait,364 +sasasegawa sasami,364 +project diva f,364 +playstation vita,364 +piukute062,364 +nopon,364 +merryweather,364 +kuja,364 +ketchup bottle,364 +kenjou akira,364 +jahy-sama wa kujikenai!,364 +irohara mitabi,364 +houndoom,364 +hippopotamus (kemono friends),364 +grim (azur lane),364 +gardenia (pokemon),364 +funnels (gundam),364 +barding,364 +baku-p,364 +at gunpoint,364 +ai-wa,364 +yuusha ou gaogaigar,363 +windows,363 +sophitia alexandra,363 +shinmai (kyata),363 +sekiya asami,363 +ooba minori,363 +narmaya (summer) (granblue fantasy),363 +mizusawa mao,363 +lucca ashtear,363 +little witch nobeta,363 +kidou keisatsu patlabor,363 +kaf (kamitsubaki studio),363 +jilu,363 +hitoto,363 +hermit purple,363 +four of a kind (touhou),363 +fate/extella link,363 +ez6,363 +empty pool,363 +cornrows,363 +comic kairakuten beast,363 +baba arumi,363 +alisa southerncross,363 +aki (neyuki41028),363 +agent 3 (splatoon),363 +zero (drag-on dragoon),362 +yami to boushi to hon no tabibito,362 +tasting,362 +suneate,362 +starting future (umamusume),362 +shino (eefy),362 +sad smile,362 +ring-con,362 +pepe (jonasan),362 +nekopuchi,362 +loose bikini,362 +kiikii (kitsukedokoro),362 +kayabakoro,362 +kagami uekusa,362 +jabami yumeko,362 +ipad,362 +huqu,362 +hockey mask,362 +hechi,362 +headshop,362 +giraffe (ilconte),362 +choukai kai ni (kancolle),362 +bandana over mouth,362 +atalanta alter (fate),362 +yue (chinese wife diary),361 +walkure romanze,361 +topu,361 +tomo (sjim),361 +threo (granblue fantasy),361 +sky: children of the light,361 +satsuki neko,361 +samegami,361 +ryuuzaki ichi,361 +prison cell,361 +pinup (style),361 +okkotsu yuuta,361 +nyanko daisensou,361 +nori (hidamari sketch),361 +nac000,361 +mejiro ardan (umamusume),361 +komichi aya,361 +ikea shark,361 +hirano toushirou,361 +green sweater vest,361 +green heart,361 +goodsmile company,361 +fukae (kancolle),361 +diisuke,361 +deadpool,361 +character cutout,361 +aqua sailor collar,361 +virtua fighter,360 +uni create,360 +uesugi fuutarou,360 +traffic,360 +super zombie,360 +studiozombie,360 +snow white,360 +shimakaze (azur lane),360 +shakemi (sake mgmgmg),360 +protagonist (smtv),360 +paint roller,360 +nagineko,360 +mosako,360 +miyasaka ryou,360 +milly ashford,360 +long fingers,360 +leotard peek,360 +lei fang,360 +horikou,360 +grief seed,360 +gggg,360 +fujiwara akina,360 +fallen angel,360 +dies irae,360 +chisel,360 +chiester sisters,360 +catria (fire emblem),360 +breath of fire iv,360 +black outline,360 +bandaid on neck,360 +ayuma sayu,360 +ayase arisa,360 +arutera,360 +amano chiharu,360 +aku no musume (vocaloid),360 +adjusting bra,360 +you (noanoamoemoe),359 +silver background,359 +re:act,359 +pun2,359 +penis on head,359 +nyotengu,359 +nebusoku,359 +natsuumi manatsu,359 +mizushina minato,359 +mirai hikari,359 +manboobs,359 +lilligant,359 +landmark,359 +koikatsu (medium),359 +kiyomin,359 +kanta (kanta 077),359 +kaibutsu oujo,359 +joltik,359 +hijiri tsukasa,359 +hero's daughter (dq5),359 +gumroad username,359 +fud,359 +f7(eiki),359 +colette brunel,359 +chagen kokimu,359 +celty sturluson,359 +brushing another's hair,359 +akinaie,359 +2ch,359 +yomi (senran kagura),358 +torimaru,358 +titanfall (series),358 +siu (siu0207),358 +serperior,358 +seikon no qwaser,358 +pussy cutout,358 +pink sash,358 +penetration gesture,358 +nyoro~n,358 +nyon (cookie),358 +nonoririn,358 +nitocris (swimsuit assassin) (second ascension) (fate),358 +multiple insertions,358 +mother's day,358 +mifune shioriko,358 +meis (terameisu),358 +mathias leth,358 +lucifer (umineko),358 +kuguri oimo,358 +kudelia aina bernstein,358 +kitakami reika,358 +kim eb,358 +ibuki douji (fate),358 +holocouncil,358 +hole on body,358 +franka (arknights),358 +flat screen tv,358 +evolution,358 +erio mondial,358 +disneyland,358 +deego (omochi bazooka),358 +collar tug,358 +chimera,358 +bike shorts pull,358 +baram,358 +alpaca,358 +71,358 +yuizaki kazuya,357 +vomit,357 +tiger paws,357 +suzumiya haruhiko,357 +sharena (fire emblem),357 +ruri rarako,357 +riko (made in abyss),357 +penis tentacles,357 +palla (fire emblem),357 +obui,357 +nyoro (nyoronyoro000),357 +nier (granblue fantasy),357 +misono karin,357 +meiou setsuna,357 +masuyama ryou,357 +little bel (azur lane),357 +licking another's cheek,357 +kurowa,357 +kink (tortoiseshell),357 +kadeart,357 +ina (1813576),357 +hayakawa harui,357 +green tunic,357 +greatm8,357 +cycling uniform,357 +clitoris slip,357 +butterfree,357 +blazpu,357 +big hero 6,357 +beige vest,357 +:<>,357 +zaphn,356 +yakumo yukari (young),356 +wario,356 +takehana note,356 +sliding,356 +sakura hiyori,356 +ryu (ryu's form site),356 +naegi komaru,356 +mosin-nagant,356 +monobe yuri,356 +mobius (honkai impact),356 +misaki mei,356 +millennium puzzle,356 +mikleo (tales),356 +miazi,356 +meowfficer (azur lane),356 +maiku,356 +kuroonehalf,356 +kuromorimine (emblem),356 +kamen rider kuuga (series),356 +jormungand,356 +hayama kazusa,356 +haku (p&d),356 +gotou hisashi,356 +fin e ld si laffinty,356 +essex (azur lane),356 +diyusi (cookie),356 +diving suit,356 +cuddling handjob,356 +chorimokki,356 +cardiogram,356 +brown hakama,356 +wolf paws,355 +unicorn gundam,355 +touyoko momoko,355 +tokiomi tsubasa,355 +tokido saya,355 +tamura manami,355 +taiyou (kancolle),355 +see-through sarong,355 +saigyouji yuyuko (living),355 +saigyou ayakashi,355 +rindou (radical dream),355 +plague doctor mask,355 +panda costume,355 +observerz,355 +moon phases,355 +mizuki kotora,355 +megatron,355 +matsushita yuu,355 +lancelot (fate/zero),355 +koh (minagi kou),355 +himuro shunsuke,355 +haruichi (komikal),355 +greenhouse,355 +gomu (chewinggom),355 +fujiwara hajime,355 +dosugon,355 +dannex009,355 +chousoku henkei gyrozetter,355 +caeda (fire emblem),355 +ayanami rei (cosplay),355 +alexmaster,355 +yuzuruka (bougainvillea),354 +yuki onna,354 +x fingers,354 +walking away,354 +valkyrie (apex legends),354 +tan (inka),354 +starmie,354 +squat toilet,354 +sookmo,354 +shigatsu wa kimi no uso,354 +shaymin (land),354 +peeking through fingers,354 +oosaka kanagawa,354 +nisshin (kancolle),354 +nippon ichi,354 +natsu no koucha,354 +mono (moiky),354 +momendoufu,354 +m16a1,354 +lampshade,354 +label girl (dipp),354 +kurokuro,354 +kazeno,354 +kamo kamen,354 +japari bus,354 +ibaraki natou,354 +haruki (colorful macaron),354 +handsome wataru,354 +gyate gyate,354 +frieza,354 +erica (naze1940),354 +dermar,354 +daijoubu? oppai momu?,354 +cum on floor,354 +croissant (arknights),354 +common dolphin (kemono friends),354 +chieru (princess connect!),354 +charlotte corday (fate),354 +bus interior,354 +bad arm,354 +ark royal (azur lane),354 +akiyama mizuki,354 +aina (mao lian),354 +yuri petrov,353 +wlop,353 +undone sarashi,353 +tetsurou (fe+),353 +teepo (tales),353 +takino tomo,353 +suna no wakusei (vocaloid),353 +stuffed octopus,353 +sorani (kaeru0768),353 +silica (sao-alo),353 +selfie stick,353 +seiya kou,353 +ro-class destroyer,353 +ribbon panties,353 +oz (genshin impact),353 +onodera kosaki,353 +olivia (fire emblem),353 +nunucco,353 +niko (tama),353 +mysta rias,353 +matatabi (2ccp),353 +marumoru,353 +latifa fleuranza,353 +kyuuso inukami,353 +kuronyan,353 +jan (lightdragoon),353 +in heat,353 +homura (senran kagura),353 +fujoshi,353 +flx,353 +female goblin,353 +elysia (miss pink elf) (honkai impact),353 +dlanor a. knox,353 +digital thermometer,353 +digimon adventure 02,353 +crown hair ornament,353 +asari nanami,353 +angela (seiken densetsu 3),353 +alchemist (ragnarok online),353 +aila jyrkiainen,353 +yen sign,352 +tsukahara hibiki,352 +tera zip,352 +shidou irina,352 +seaplane,352 +rotom dex,352 +rom (20),352 +reinama,352 +raiden (metal gear),352 +plaid trim,352 +pink check school (idolmaster),352 +pac-man (game),352 +naruse hirofumi,352 +mikura (kancolle),352 +living room,352 +kita senri,352 +kagerou (kers),352 +inoue jun'ichi,352 +hirose yasuho,352 +hakai shin,352 +gorgon (fate),352 +froakie,352 +filming,352 +felix hugo fraldarius,352 +dars (recolors),352 +cum in cup,352 +chm (macharge),352 +checkered bikini,352 +camus (dq11),352 +bakugadou,352 +asuzemu,352 +aestheticc-meme,352 +zumi (zumidraws),351 +zekrom,351 +yuubari kai ni (kancolle),351 +victory gundam,351 +uneg,351 +team skull grunt,351 +soramuko,351 +sanpachishiki (gyokusai-jima),351 +relay baton,351 +prywinko,351 +print pajamas,351 +piyodesu,351 +pandemonica (helltaker),351 +nenobi (nenorium),351 +ne-class heavy cruiser,351 +mia (gute-nacht-07),351 +melina (elden ring),351 +marui koishi,351 +marble (toy),351 +mana (tsurubeji),351 +koyashaka,351 +konayama kata,351 +kemomimi oukoku kokuei housou,351 +kai'sa,351 +kageyama shigeo,351 +ikuno dictus (umamusume),351 +honey come chatka!!,351 +holding mop,351 +holding magnifying glass,351 +hidan no aria,351 +hayama eishi,351 +haruka (senran kagura),351 +hamaguchi ayame,351 +death-sensei (mori calliope),351 +celtic knot,351 +ayaya~,351 +album cover redraw,351 +white garter belt,350 +twilight sparkle,350 +tsukuyomi moonphase,350 +strike witches zero,350 +south dakota (azur lane),350 +shez (fire emblem),350 +sekai saisoku no panda,350 +sakurame,350 +sableye,350 +pija (pianiishimo),350 +okita j. souji (first ascension) (fate),350 +nikichen,350 +nidy,350 +nagayama yuunon,350 +mole on shoulder,350 +meteor,350 +koito yuu,350 +kazu (muchuukai),350 +kageshio (276006),350 +irohara,350 +implied pregnancy,350 +horned mask,350 +heroman,350 +hero neisan,350 +gokuu (acoloredpencil),350 +fluorescent lamp,350 +dos (james30226),350 +daifuku (yukihana lamy),350 +cello,350 +blue jumpsuit,350 +ada wong,350 +95-tan,350 +xenoblade chronicles 2: torna - the golden country,349 +water balloon,349 +unhappy,349 +takayama haruka,349 +takasugi shinsuke,349 +suu (monster musume),349 +sturm (granblue fantasy),349 +slowpoke,349 +senran kagura new link,349 +senji (tegone spike),349 +sakurai makoto (custom size),349 +riliane lucifen d'autriche,349 +ogin (girls und panzer),349 +miya (miyaruta),349 +microsoft,349 +liquor,349 +kunashiri (kancolle),349 +kodachi (kuroyuri shoukougun),349 +kamen rider 01 (series),349 +inuzumi masaki,349 +inoshira,349 +hebitsukai-san,349 +headbutt,349 +grayfia lucifuge,349 +biyon,349 +bikini day,349 +battle girl high school,349 +akabane (zebrasmise),349 +yukian,348 +yasuda akira,348 +vox akuma,348 +tier harribel,348 +takayama sayoko,348 +super blackjack,348 +spiked hairband,348 +somechime (sometime1209),348 +shirogane hina,348 +sayakata katsumi,348 +ruined for marriage,348 +orange coat,348 +ojisan to marshmallow,348 +no thank you!,348 +new jersey (exhilarating steps!) (azur lane),348 +nagu,348 +mikoko (kemomimi oukoku kokuei housou),348 +metal,348 +medarot,348 +maett,348 +mabing,348 +lime slice,348 +kashima yuu,348 +jeanne d'arc alter (avenger) (first ascension) (fate),348 +harumachi nagaaki,348 +gunpod,348 +gar32,348 +fire extinguisher,348 +cymbals,348 +cosaten,348 +cliffheart (arknights),348 +cd case,348 +big bad wolf (grimm),348 +alexander (fate),348 +zagreus (hades),347 +you work you lose,347 +yoshida masaki,347 +yellow umbrella,347 +ump40 (girls' frontline),347 +tokiwa midori (kyokutou funamushi),347 +toad (mario),347 +tenten (naruto),347 +target,347 +swablu,347 +songover,347 +shennong (housamo),347 +shamare (arknights),347 +sakura taisen iii,347 +remodel (warship girls r),347 +red bull,347 +pixiv fantasia 4,347 +nonowa,347 +nihilego,347 +naked necktie,347 +minccino,347 +miku symphony (vocaloid),347 +mass effect (series),347 +mashima shima,347 +lunacle,347 +ko~cha,347 +kippuru,347 +hinahino,347 +changpao,347 +book focus,347 +blue mary,347 +black general,347 +basil (omori),347 +yoye (pastel white),346 +yamauchi noriyasu,346 +tuteurfars shin,346 +steed (steed enterprise),346 +sneasel,346 +single garter strap,346 +simulacrum (titanfall),346 +sekaiju no meikyuu 2,346 +sabamen,346 +re:shimashima,346 +puuakachan,346 +poppy (league of legends),346 +miyauchi renge,346 +me!me!me!,346 +mashayuki,346 +maribel (dq7),346 +krekkov,346 +kisaragi zwei,346 +kem kem,346 +kamen rider build (series),346 +kagura mea,346 +jester,346 +izuna (swimsuit) (blue archive),346 +ikari gendou,346 +gag removed,346 +forest (4423),346 +ehrrr,346 +druaga no tou,346 +dean (momodean),346 +citron 82,346 +cirno day,346 +checkered neckwear,346 +bike jersey,346 +arsene lupin iii,346 +arcane jinx,346 +ancient princess menace,346 +akaza,346 +wheat field,345 +tales of eternia,345 +shuuko (s h uuko),345 +shoe removed,345 +shikei,345 +sela (23),345 +sabitsuki,345 +peke,345 +ogros,345 +nochita shin,345 +murakami (girls und panzer),345 +mishima hiroji,345 +mahou shoujo lyrical nanoha the movie 2nd a's,345 +last exile,345 +kuroemon,345 +kujou danbo,345 +kirara fantasia,345 +kaoling,345 +irua,345 +ips cells,345 +horn (instrument),345 +harukigenia,345 +futanari pov,345 +flynn scifo,345 +felix argyle,345 +core (mayomayo),345 +beeswax (arknights),345 +bakery,345 +azki (hololive),345 +anceril sacred,345 +amano pikamee,345 +akatsuki kai ni (kancolle),345 +aka tawashi,345 +yoshimoto (dear life),344 +unyu,344 +trapped,344 +tategami aoi,344 +taishi (moriverine),344 +super saiyan 4,344 +seren (staphy),344 +see-through swimsuit,344 +saturn (planet),344 +ryota (ry o ta),344 +robin (arknights),344 +recursion,344 +pen (medium),344 +necro (guilty gear),344 +monster strike,344 +momose rio,344 +miyao miya,344 +mayura2002,344 +looking at food,344 +lightsource,344 +light cruiser oni,344 +ikkyuu,344 +hokuto (scichil),344 +hazuki natsu,344 +hay,344 +griffith (berserk),344 +grey eyeshadow,344 +goalkeeper,344 +gengoroumaru (ambidextrous),344 +front slit,344 +fighter (7th dragon),344 +dj,344 +daimaou ruaeru,344 +coffee beans,344 +call of duty,344 +broken mirror,344 +big boss,344 +azuma (azur lane),344 +asahina mafuyu,344 +aoi manabu,344 +xiao wu (douluo dalu),343 +tachikawa mimi,343 +striped clothes,343 +spike spiegel,343 +sorey (tales),343 +soba (saz),343 +shigure asa,343 +razor,343 +rakurakutei ramen,343 +raika9,343 +peacock (skullgirls),343 +oyatsu (mk2),343 +orie (under night in-birth),343 +ningen (ningen96),343 +new mask of hope,343 +nanateru,343 +music video,343 +matches,343 +lobster,343 +knifed,343 +kisaragi yuu (fallen sky),343 +kaoruru (sakuno2911),343 +ilsa (granblue fantasy),343 +ichi (ichikai),343 +hitotose rin,343 +hatsune (princess connect!),343 +giraffe horns,343 +fuji shinobu,343 +enrico pucci,343 +chidori kaname,343 +captain falcon,343 +canal,343 +boxing,343 +bekotarou,343 +atg (wttoo0202),343 +anzu (o6v6o),343 +zasshu,342 +yepnean,342 +yaotome urushi,342 +yagi (ningen),342 +tapir ears,342 +sorcerer (ragnarok online),342 +silent hill 2,342 +short-sleeved sweater,342 +shauna (pokemon),342 +servbot (mega man),342 +sandalphon (granblue fantasy),342 +running track,342 +riz,342 +rinpoo chuan,342 +rider-tan,342 +razalor,342 +presenting foot,342 +nozaki umetarou,342 +morizono shiki,342 +mizuhara koyomi,342 +mage (ragnarok online),342 +lucknight,342 +kuya (hey36253625),342 +kunihiro hajime,342 +koikawa minoru,342 +kino (m6t2a),342 +kimishima ao,342 +kawazoe tamaki,342 +jakuri (ar tonelico),342 +hoozuki (hoozuki no reitetsu),342 +hanato (seonoaiko),342 +hanamiya natsuka,342 +eonsang,342 +dolphin hair ornament,342 +cyclone (reizei),342 +bad bcy id,342 +yokoshima (euphoria),341 +yellow tail,341 +usatarou,341 +urban,341 +tomozero,341 +tomoshibi hidekazu,341 +tetsujin momoko,341 +tennouboshi uzume,341 +shugao,341 +shiina noriko,341 +senbon-zakura (vocaloid),341 +sasa onigiri,341 +riku (rikkuru),341 +queen's blade grimoire,341 +perseus (unfamiliar duties) (azur lane),341 +patterned legwear,341 +octoling boy,341 +night elf (warcraft),341 +nemu (nebusokugimi),341 +monrooru,341 +makkachin,341 +lifeline (apex legends),341 +krile mayer baldesion,341 +holding briefcase,341 +growlithe,341 +france,341 +dorontabi,341 +brighid (xenoblade),341 +aya shachou,341 +asahi breweries,341 +alice: madness returns,341 +25-ji night code de. (project sekai),341 +zafira,340 +yu mei-ren (first ascension) (fate),340 +white corset,340 +warlock (ragnarok online),340 +wan'yan aguda,340 +volkner (pokemon),340 +tuna,340 +train station platform,340 +takakura shouma,340 +slug,340 +senmu (0107),340 +red camisole,340 +ranpha franboise,340 +persona dancing,340 +owari akane,340 +ocarina,340 +noah (tettsui-sole),340 +mochizuki ryouji,340 +minea (dq4),340 +midnight (banyex),340 +mashiro (rikuya),340 +leopard boy,340 +kane-neko,340 +jinrui wa suitai shimashita,340 +irelia,340 +inflatable dolphin,340 +ikeda kazumi,340 +hirabitai,340 +hatsuzuki 527,340 +grim aloe,340 +flower choker,340 +final fantasy ii,340 +doko tetora,340 +crisis core final fantasy vii,340 +blue male underwear,340 +barbell,340 +ariverkao,340 +angel girl (shimmer),340 +amane ruri,340 +yui toshiki,339 +xter,339 +weidashming,339 +vertical comic,339 +ugoira conversion,339 +thompson submachine gun,339 +takorice,339 +symmetra (overwatch),339 +societte (granblue fantasy),339 +short braid,339 +school of fish,339 +prinz heinrich (azur lane),339 +ponsuke (pon00000),339 +poniko,339 +pako (pakosun),339 +pac-man eyes,339 +oberon (third ascension) (fate),339 +nishiuri,339 +nekomusume (gegege no kitarou 5),339 +muffin (sirumeria),339 +mizuki (arknights),339 +miyai sen,339 +melona,339 +mameshiba,339 +luxray,339 +kuro no kiseki,339 +karin (p&d),339 +ishida akira,339 +i-47 (kancolle),339 +hyakuhachi (over3),339 +hinase (cookie),339 +gununu,339 +garland (decoration),339 +fjorm (fire emblem),339 +fengmo,339 +ephraim (fire emblem),339 +dreepy,339 +darkrai,339 +crotch kick,339 +coyote (kemono friends),339 +clueless,339 +chocomiru,339 +broly (dragon ball z),339 +body pillow,339 +barcode scanner,339 +arikawa satoru,339 +yukiu con,338 +yuki (popopo),338 +yoon cook,338 +yamabuki saaya,338 +wattaro,338 +volo (pokemon),338 +vice-versa (skullgirls),338 +underwear theft,338 +tome of the night sky,338 +t-elos,338 +super saiyan blue,338 +submission hold,338 +sou (soutennkouchi),338 +shirogane miyuki,338 +shake-o,338 +sakura empire (emblem),338 +pyukumuku,338 +plackart,338 +nontraditional school swimsuit,338 +mine fujiko,338 +mikasa (azur lane),338 +michelle (bang dream!),338 +locomotive,338 +lee-enfield (girls' frontline),338 +kurusugawa ayaka,338 +kanekiyo miwa,338 +kamille bidan,338 +kamata yuuya,338 +ikurauni,338 +hinatsuki mikan,338 +harbor,338 +hand on another's penis,338 +fruit tree,338 +frilled shawl,338 +ebido,338 +draenei,338 +dorsiflexion,338 +cum bubble,338 +cross (crossryou),338 +arceus,338 +aqua skin,338 +apt,338 +angel mort,338 +andou mahoro,338 +yuuyu (777),337 +wobbuffet,337 +ttomm,337 +too bad! it was just me! (meme),337 +tkgsize,337 +tidsean,337 +st. michael's school uniform,337 +silence glaive,337 +severed arm,337 +ralts,337 +polka dot pillow,337 +piano print,337 +oyakodon (sex),337 +noren,337 +myrrh (fire emblem),337 +mitsudoue,337 +mirror image,337 +minazuki juuzou,337 +merry nightmare,337 +kosshii (masa2243),337 +kos-mos ver. 4,337 +knights of the round uniform,337 +knife holster,337 +intertwined tails,337 +haga yui,337 +grumpy nun (diva),337 +franky (one piece),337 +florence nightingale (trick or treatment) (fate) (cosplay),337 +fbc,337 +ech,337 +diglett,337 +diagonal-striped neckwear,337 +coin rand,337 +clarevoir,337 +chocolate chip cookie,337 +charin,337 +butterfly ornament,337 +bonnie (pokemon),337 +bokutachi wa benkyou ga dekinai,337 +akatsuki hijiri,337 +airisubaka,337 +adjusting leotard,337 +zenkou,336 +yuuki (yuyuki000),336 +wing brooch,336 +unconnected marketeers,336 +tristana,336 +suito,336 +spongebob squarepants,336 +sato ame,336 +pidgey,336 +ozen,336 +nameless dagger (fate),336 +market,336 +lord of vermilion,336 +kurumi (touhou),336 +kamia (not found),336 +incoming drink,336 +hololive fantasy,336 +grey overalls,336 +fruit hat ornament,336 +force-feeding,336 +foodgasm,336 +crown removed,336 +core,336 +breast contest,336 +bad instagram id,336 +atoshi,336 +arthropod boy,336 +aoki ume,336 +adventurer (ff14),336 +zoroark,335 +yakisoba,335 +vector (girls' frontline),335 +ushiromiya eva,335 +take it home,335 +swan,335 +spiked gauntlets,335 +souza samonji,335 +shoshinsha mark,335 +satoupote,335 +quiz magic academy the world evolve,335 +prolapse,335 +pikmin (creature),335 +on box,335 +ogawa shou,335 +night vision device,335 +nia (nia4294),335 +negahami,335 +momo (higanbana and girl),335 +mayogii,335 +masha,335 +maken-ki!,335 +kurose kousuke,335 +kuro (be ok),335 +kuragari,335 +juuban middle school uniform,335 +hinagi (fox priest),335 +hands on thighs,335 +foaming at the mouth,335 +ellen (touhou),335 +demi-chan wa kataritai,335 +dark precure,335 +cleopatra (fate),335 +blue tabard,335 +ashe (league of legends),335 +arizuka (catacombe),335 +anti (0324),335 +agarest senki (series),335 +abbystea,335 +wakfu,334 +uguisumaru,334 +takanashi souta,334 +stop sign,334 +sorimura youji,334 +shoulder grab,334 +shoe locker,334 +shirahane nao,334 +shinx,334 +shared blanket,334 +red mage,334 +playing card theme,334 +persona 4 the golden,334 +onigiri print,334 +neck biting,334 +mirin chikuwa,334 +memories off,334 +leon s. kennedy,334 +koshigaya komari,334 +kikumaru bunta,334 +holding fireworks,334 +hirato (kancolle),334 +gogeta,334 +ganari ryuu,334 +fujieda uzuki,334 +c.c. lemon,334 +blueberry (5959),334 +akai sashimi,334 +ae-3803,334 +abigail williams (swimsuit foreigner) (first ascension) (fate),334 +yatai,333 +yaomai,333 +takagi (tansuke),333 +stone mask (jojo),333 +scizor,333 +multiple hands,333 +morgan (fire emblem) (female),333 +meeko,333 +material-s,333 +leona (league of legends),333 +laura matsuda,333 +kamiya maneki,333 +jonsun,333 +fuyutsuki (kancolle),333 +florence nightingale (fate) (cosplay),333 +dengeki g's,333 +canister,333 +ask (askzy),333 +animation paper,333 +amedamacon,333 +alicia (granblue fantasy),333 +against fence,333 +wakasa yuuri,332 +the witcher 3,332 +suspenders gap,332 +shirt slip,332 +shin'en (gyokuro company),332 +shalon,332 +rina-chan board,332 +paw print soles,332 +oda nobunaga (maou avenger) (fate),332 +m4 sherman,332 +lumo 1121,332 +long arms,332 +koyanskaya (assassin) (first ascension) (fate),332 +koshika rina,332 +kaigen 1025,332 +isedaichi ken,332 +ichininmae no lady,332 +ham (points),332 +green border,332 +gradient skirt,332 +fur choker,332 +folks (nabokof),332 +edward teach (fate),332 +deva battle suit,332 +chest guard,332 +bort,332 +ayase fuuka,332 +ali baba saluja,332 +z/x,331 +the owl house,331 +terakoya,331 +sukeban,331 +suguri,331 +sonsaku hakufu,331 +simoyuki,331 +shirosuzu,331 +shiragiku hotaru,331 +shiokonbu,331 +shinekalta,331 +satomi (black scarecrow),331 +saint-louis (azur lane),331 +ryne,331 +puzzle,331 +project diva,331 +photobomb,331 +oshiki hitoshi,331 +okayado,331 +no,331 +mad hatter (alice in wonderland),331 +kuwashima rein,331 +kokona (blue archive),331 +kim hyung tae,331 +kamen rider den-o (series),331 +ishimiso (ishimura),331 +holding dog,331 +hisae (hisae collect),331 +g'raha tia,331 +forced,331 +end of evangelion,331 +beowulf (fate),331 +bandaged wrist,331 +aozaki touko,331 +a6m zero,331 +yoisaki kanade,330 +winn,330 +white wristband,330 +watarai keiji,330 +tooo,330 +tiv,330 +tan yang (kancolle),330 +taimanin asagi kessen arena,330 +suyasuyabi,330 +suna (s73d),330 +sukajan,330 +starscream,330 +shpo,330 +shihou (g-o-s),330 +sariel (touhou),330 +san diego (azur lane),330 +sacred heart,330 +red blindfold,330 +queen,330 +puzzle piece,330 +purple cloak,330 +print sweater,330 +penguin logistics logo,330 +pan-pa-ka-paaan!,330 +nakano maru,330 +nakamura hinato,330 +mouri ran,330 +mimino kurumi,330 +kiri futoshi,330 +keyaki chimaki,330 +jean (sea breeze dandelion) (genshin impact),330 +iws 2000 (girls' frontline),330 +ilya (princess connect!),330 +ichiren namiro,330 +harukara (7th dragon),330 +h&k mp5,330 +flower (vocaloid4),330 +eiko carol,330 +egawa satsuki,330 +diagonal-striped bowtie,330 +cuteg,330 +cromachina,330 +berabou,330 +arched soles,330 +ace trainer (pokemon),330 +totoro,329 +suou-sensei,329 +suien,329 +so-bin,329 +sakuyu,329 +rin-sin,329 +plate carrier,329 +pitchfork,329 +pastry box,329 +panties around ankles,329 +oosaki shin'ya,329 +ole tower,329 +minami koyogi,329 +majutsushi orphen,329 +lightning bolt hair ornament,329 +kvlen,329 +kuppipi ramutarou,329 +kite,329 +kentaurosu,329 +jill 07km,329 +inflatable orca,329 +holding menu,329 +hitoribocchi no marumaru seikatsu,329 +girls und panzer ribbon no musha,329 +flora (dq5),329 +comic aun,329 +coffee maker (object),329 +chups,329 +basketball hoop,329 +arashi chisato,329 +youko-shima,328 +yoruny,328 +yomosaka,328 +wangchuan de quanyan,328 +tsukikage oyama,328 +throwing needles,328 +tabata hisayuki,328 +sydus,328 +steve rogers,328 +star of life,328 +splashbrush,328 +satomura kyou,328 +rejean dubois,328 +ram hachimin,328 +puchiman,328 +orange-framed eyewear,328 +musha miko tomoe,328 +migihidari (puwako),328 +liduke,328 +kito (sorahate),328 +kei (soundcross),328 +ishtar (fire emblem),328 +hyakko,328 +gouenji shuuya,328 +gii,328 +fiora (xenoblade),328 +extra penises,328 +chihaya (clothing),328 +champagne bottle,328 +arm pillow,328 +amagiri (kancolle),328 +adachi sakura,328 +yen,327 +x-ray vision,327 +wiggling toes,327 +watanore,327 +wall lamp,327 +usami ichika,327 +tsubobot,327 +toothbrush in mouth,327 +tales of arise,327 +standing on object,327 +rustle,327 +regular mow,327 +red sarong,327 +particle cannon case,327 +ootomo takuji,327 +nitamako (sakamalh),327 +nishiide kengorou,327 +munakata atsumi,327 +misaka 12003-gou,327 +miru tights,327 +marui hitoha,327 +maeshima shigeki,327 +limgae,327 +kyougoku touya,327 +kurusugawa himeko,327 +kokomine cocona,327 +kitaku jikan (ktk jkn),327 +kagemusha,327 +island (kossori),327 +isayama yomi,327 +hiraga saito,327 +goshiki agiri,327 +futami eriko,327 +food fantasy,327 +fainting,327 +creature on shoulder,327 +catnnn,327 +carnival phantasm,327 +beige dress,327 +bakkanki,327 +walther wa 2000,326 +uyama hajime,326 +undine (guilty gear),326 +ulrich von hutten (azur lane),326 +training,326 +taurine 8000mg,326 +source mismatch,326 +samurai jacket (cyberpunk),326 +pillory,326 +north carolina (azur lane),326 +mysterious heroine x alter (second ascension) (fate),326 +multicolored coat,326 +matsumi kuro,326 +kozou (soumuden),326 +kitakami kai ni (kancolle),326 +kaiza (rider000),326 +hyanna-natsu,326 +guile,326 +foreskin pull,326 +fae (fire emblem),326 +eyjafjalla (summer flowers) (arknights),326 +doremi,326 +cow hat,326 +bad hentai-foundry id,326 +amaha masane,326 +ai-generated,326 +yokota mamoru,325 +well,325 +water slide,325 +venom (marvel),325 +ursula hartmann,325 +surtr (colorful wonderland) (arknights),325 +stone walkway,325 +sniper scope,325 +single tear,325 +sign around neck,325 +shunsuke,325 +shiro (no game no life),325 +shinkyoku soukai polyphonica,325 +shima-shuu,325 +pinta (ayashii bochi),325 +miyazono kawori,325 +millie parfait,325 +mashiroiro symphony,325 +less end,325 +kodomo no hi,325 +kinako (shiratama mochi),325 +kawai makoto,325 +karochii,325 +kamen rider ryuki (series),325 +jelly bean,325 +inoue takuya (tactactak),325 +hermione granger,325 +hand grip,325 +hanazono tae,325 +gundam 08th ms team,325 +flonne (fallen angel),325 +feet on table,325 +dyun,325 +cloud hair ornament,325 +carnelian (arknights),325 +byulzzi,325 +blood+,325 +arm armor,325 +an yasuri,325 +aliza (granblue fantasy),325 +yumenosaki school uniform,324 +yu mei-ren (swimsuit lancer) (fate),324 +translucent bunnysuit,324 +tongue grab,324 +take (trude1945oneetyan),324 +sword art online: alicization - war of underworld,324 +starry moon,324 +sitting on railing,324 +sheikah slate,324 +rotated,324 +rakudai ninja rantarou,324 +prism project,324 +prince of lorasia,324 +pokemon bdsp,324 +pointing down,324 +plusle,324 +pink butterfly,324 +on water,324 +my melody,324 +milk (yes! precure 5),324 +mikagami mamizu,324 +kanokon,324 +jahy,324 +holding feather,324 +head hug,324 +hao (patinnko),324 +geppewi,324 +fujimaru ritsuka (female) (brilliant summer),324 +flight attendant,324 +dearmybrothers,324 +darry adai,324 +daito,324 +cyno (genshin impact),324 +crocodile,324 +cierra (ra-bit),324 +chomoran,324 +checkered haori,324 +bauble,324 +aoba (smartbeat),324 +air mattress,324 +abigail williams (festival outfit) (fate),324 +yukimoto shuuji (gurigura),323 +x-t3al,323 +wireless,323 +standing cunnilingus,323 +spicy moo,323 +spain (hetalia),323 +shohei (piranha5hk),323 +sasaki akira (ugc),323 +sakuragi matsuri,323 +sabaku chitai,323 +phone booth,323 +pegging,323 +pegasus wings,323 +pc-98 (style),323 +oukawa yuu,323 +orange sclera,323 +nns (sobchan),323 +nanami touko,323 +nakadori (movgnsk),323 +myoudou gakuen middle school uniform,323 +mata (matasoup),323 +magion02,323 +linked collar,323 +kuroda bb,323 +kureha mitsushige,323 +kujou hikari,323 +koissa,323 +kamioka shun'ya,323 +kaminari denki,323 +jan azure,323 +houkago play,323 +hk416 (black kitty's gift) (girls' frontline),323 +hita (hitapita),323 +grenade pin,323 +fox mccloud,323 +edytha rossmann,323 +daye bie qia lian,323 +d-rex,323 +crosshair pupils,323 +christmas wreath,323 +caulifla,323 +bouncing,323 +black tube top,323 +applekun,323 +aerial battle,323 +utopia,322 +tilt-shift (azalanz),322 +taki (soulcalibur),322 +takeya yuki,322 +takahashi tetsuya,322 +star censor,322 +sora to umi,322 +shiodome miuna,322 +sharp sign,322 +ruuto (sorufu),322 +rossiu adai,322 +oyaji-sou,322 +ogiue chika,322 +nargacuga (armor),322 +nanase miori,322 +mori yuki,322 +misdreavus,322 +mercury (element),322 +maru (marg0613),322 +library of ruina,322 +last period,322 +larcei (fire emblem),322 +kirisame mia,322 +kazama souta,322 +kagamine len (append),322 +jizou,322 +hat pin,322 +goe (g-o-e),322 +gingham apron,322 +fujiwara no mokou (young),322 +expressive tail,322 +clam,322 +cinderella bust,322 +boombox,322 +amami amayu,322 +al azif,322 +akechi gorou,322 +abbreviated karakusa,322 +thai commentary,321 +takeda aranobu,321 +stitched leg,321 +sheita,321 +shamir nevrand,321 +sakamoto ryuuji,321 +reiwa,321 +racing suit,321 +puyo (puyotopia),321 +prier,321 +pon de ring,321 +pinky swear,321 +pee stain,321 +nanaken nana,321 +miwano rag,321 +mismagius,321 +medicine,321 +mamemochi,321 +ling xiaoyu,321 +koromaru (persona),321 +kiyohime (swimsuit lancer) (first ascension) (fate),321 +hyakka ryouran samurai girls,321 +gun on back,321 +greek mythology,321 +fu hua (azure empyrea),321 +fengxi (the legend of luoxiaohei),321 +dragonair,321 +dagashi (daga2626),321 +breath of fire iii,321 +anya's heh face (meme),321 +amagi-chan (azur lane),321 +akiha rumiho,321 +zaizen tokiko,320 +victorian maid,320 +u-non (annon'an),320 +takemi tae,320 +side-tie dress,320 +shisei (kyuushoku banchou),320 +sakaki (noi-gren),320 +saint seiya omega,320 +rio rollins,320 +red riding hood (sinoalice),320 +oxygen mask,320 +on crescent,320 +nishida megane,320 +nekonyaa (girls und panzer),320 +mudra,320 +meiji ken,320 +mauser c96,320 +lobotomy corporation,320 +kuroba rapid,320 +konpeitou,320 +kidouko (zakusi),320 +kagamine rin (append),320 +k2isu,320 +iridescent,320 +hydreigon,320 +hanya (hanya yashiki),320 +gotou keiji,320 +cooperative handjob,320 +chiba toshirou,320 +carrot print,320 +buttercup (ppg),320 +bunbunmaru,320 +boutonniere,320 +anza tomo,320 +yuyu (00365676),319 +yusha m,319 +uneven footwear,319 +ukyo rst,319 +ukiyo-e,319 +tengu (tetuo kun),319 +super smashing summer vacation (umamusume),319 +shot glass,319 +shinigami,319 +public vibrator,319 +prompto argentum,319 +ortlinde (fate),319 +nina (breath of fire ii),319 +multiple anal,319 +mizin kosutin,319 +minun,319 +metagross,319 +meme50,319 +lynus,319 +long beard,319 +latin text,319 +late for school,319 +konoe konoka,319 +kitsuneno denpachi,319 +kitiroku,319 +kimeemaru,319 +kanoe soushi,319 +instagram,319 +hugging another's tail,319 +howl (howl no ugoku shiro),319 +holding drumsticks,319 +hiranko,319 +gundam 0080,319 +erkaz,319 +dildo under clothes,319 +costume chart,319 +cleaning brush,319 +between labia,319 +yoichi (umagoya),318 +wii,318 +vertical-striped jacket,318 +umino mokuzu (shizumisou),318 +ultraman (1st series),318 +tsuki tokage,318 +tales of destiny 2,318 +takano yuki (allegro mistic),318 +stuffed sheep,318 +sparkle hair ornament,318 +pony,318 +poker table,318 +ostrich,318 +orushibu,318 +ooshio kai ni (kancolle),318 +nihongami,318 +neco,318 +nagomi (mokatitk),318 +monobeno,318 +missing eye,318 +long hoodie,318 +laser sight,318 +knuckles the echidna,318 +kinfuji,318 +kinagase tsumugu,318 +keroro,318 +kaguyuzu,318 +kagura (azumanga daioh),318 +iwasaki takashi,318 +incase,318 +hijiki (hijikini),318 +h-new,318 +galleon (granblue fantasy),318 +furau,318 +food on clothes,318 +fingering from behind,318 +dog hood,318 +deras,318 +chikuwa,318 +bruised eye,318 +boxing ring,318 +barret wallace,318 +ankha (animal crossing),318 +aladdin (magi),318 +547th sy,318 +zhu xian,317 +zerocat,317 +typing,317 +tokiame,317 +starry sky bright (idolmaster),317 +shamisen,317 +saiyan,317 +sage joh,317 +mister donut,317 +metal gear solid 4,317 +matsurika youko,317 +leia rolando,317 +lazyprocrastinator,317 +kousetsu samonji,317 +kazusa (blue archive),317 +kasai shin,317 +kaedeko (kaedelic),317 +hai ookami,317 +falkyrie no monshou,317 +dragunov svd,317 +dr pepper,317 +dolls in pseudo paradise,317 +dearonnus,317 +cyicheng,317 +chiester410,317 +balance scale,317 +azumi (myameco),317 +arcie albano,317 +aogisa,317 +alternate color school swimsuit,317 +uchuu no stellvia,316 +try (lsc),316 +the little mermaid,316 +swav,316 +ssrb,316 +sniper (ragnarok online),316 +skateboarding,316 +sakura ani,316 +rudoni,316 +pinstripe vest,316 +originium (arknights),316 +monoko,316 +mitsunara,316 +minakata sunao,316 +maitetsu,316 +lass (pokemon),316 +langrisser,316 +lace-trimmed choker,316 +kuroko tetsuya,316 +kurokami fubuki,316 +knot,316 +kin'iro ryotei (umamusume),316 +ka (marukogedago),316 +icehotmilktea,316 +hoshinoumi academy uniform,316 +hishi amazon (umamusume),316 +hisau maiya,316 +hero (dq5),316 +godsworn alexiel,316 +gatau,316 +fukurou (owl222),316 +fujimoto rina,316 +fluttershy,316 +feena (grandia),316 +evil eyes,316 +dishes,316 +chounorin,316 +carnation,316 +c (control),316 +ayakumo,316 +alolan raichu,316 +ak-12,316 +white leggings,315 +weavile,315 +vivi ornitier,315 +twin (tt lsh),315 +team galactic,315 +single bridal gauntlet,315 +pink sarong,315 +pidgeot,315 +okonogi yuuko,315 +moe (hamhamham),315 +indianapolis (azur lane),315 +in bottle,315 +goomba,315 +giovanni (pokemon),315 +fujishiro emyu,315 +food as clothes,315 +flower on liquid,315 +flamingo,315 +finnish text,315 +emily stewart,315 +eagle union (emblem),315 +duoyuanjun,315 +dise,315 +bullet (blazblue),315 +bob (biyonbiyon),315 +bear paws,315 +avalon (fate/stay night),315 +anarogumaaa,315 +yaruku,314 +xia (ryugo),314 +white devil,314 +washington (kancolle),314 +uni (plastic girl),314 +trusty bell,314 +torn bra,314 +tiamat (fate),314 +tanaka mako,314 +table tennis,314 +snort,314 +silvervale,314 +shoulder belt,314 +sanshita,314 +salt shaker,314 +ring necklace,314 +red panda ears,314 +rasahan,314 +peplos,314 +off-shoulder one-piece swimsuit,314 +nurse witch komugi-chan,314 +lulu-chan92,314 +kourindou,314 +koruse,314 +kamu (geeenius),314 +kama (second ascension) (fate),314 +interstellar,314 +hector (fire emblem),314 +haruhata mutsuki,314 +haruhara haruko,314 +gin (ginshari),314 +gensokyo,314 +flood,314 +crypton future media,314 +chaika trabant,314 +camouflage skirt,314 +byte (allbyte),314 +brief (psg),314 +boyaking,314 +blood drip,314 +ace of diamond,314 +wishiwashi,313 +valkyrie drive -mermaid-,313 +tenchisouha,313 +sweater tucked in,313 +suwa yasai,313 +sendrawz,313 +sanshoku amido,313 +rough time school (idolmaster),313 +red sun,313 +potato pot,313 +peroro (blue archive),313 +paladin (final fantasy),313 +nono (top wo nerae 2!),313 +nelson (azur lane),313 +nekometaru,313 +morinaga miki,313 +milo (pokemon),313 +machete,313 +lupinus virtual games,313 +kihara tsumugu,313 +kao no nai tsuki,313 +kamiyama aya,313 +iwamoto zerogo,313 +hjl,313 +hella p,313 +hands on own knee,313 +green bandana,313 +glove in mouth,313 +flaming skull,313 +ermes costello,313 +decora,313 +dangomushi,313 +daitai konna kanji,313 +contortion,313 +backwards virgin killer sweater,313 +ataru (cha2batake),313 +ashelia b'nargin dalmasca,313 +after war gundam x,313 +tropical,312 +tomari (veryberry00),312 +the lord of the rings,312 +terupancake,312 +team magma uniform,312 +takatsuki nato,312 +spirytus tarou,312 +sila (carpen),312 +saria (the legend of zelda),312 +revealing layer,312 +resting,312 +pataneet,312 +pantyhose removed,312 +nuzzle,312 +nam (valckiry),312 +munna,312 +mokke (mokke99),312 +male swimwear challenge,312 +llenn (sao),312 +kabocha (monkey4),312 +jiaozi,312 +ignis scientia,312 +idaten93,312 +ichigo (cookie),312 +hina ningyou,312 +hiiringu,312 +hajime (hajime-ill-1st),312 +ground shatter,312 +grandfather and grandson,312 +dancer (final fantasy),312 +cross-laced cutout,312 +criin,312 +cordelia (fire emblem),312 +chiri (atlanta),312 +black tea,312 +atelier escha & logy,312 +alolan exeggutor,312 +akino ell,312 +akashiya moka,312 +zonda (solid air),311 +wrapped up,311 +weedy (arknights),311 +urabi (tomatohouse),311 +trickster,311 +taneda yuuta,311 +suzume inui,311 +summon night 2,311 +suetake (kinrui),311 +squeezable (artist),311 +sirius symboli (umamusume),311 +satsuriku no tenshi,311 +sakura-sou no pet na kanojo,311 +safi,311 +rioka (southern blue sky),311 +ribbon-trimmed bikini,311 +raikoart,311 +pyramid head,311 +pumpkinspicelatte,311 +piglet,311 +pig tail,311 +oshiruko (uminekotei),311 +onomeshin,311 +nymph (sora no otoshimono),311 +mousou (mousou temporary),311 +monado,311 +merlin (fate/prototype) (second ascension),311 +masaru.jp,311 +lillly,311 +kyokucho,311 +kohaku (yua),311 +kintsuba (shiranui flare),311 +kawai ritsu (rits meg),311 +kamen rider wizard (series),311 +in bag,311 +igarashi kyouhei,311 +holding spring onion,311 +holding spatula,311 +hogtie,311 +gradient horns,311 +gold saint,311 +go robots,311 +ginga eiyuu densetsu,311 +fuchi (nightmare),311 +daiba nana,311 +crosier,311 +clemont (pokemon),311 +canyon,311 +buzz,311 +balalaika (black lagoon),311 +ant,311 +akizora momiji,311 +akazutsumi momoko,311 +yu-gi-oh! vrains,310 +yellow (among us),310 +vivi (eve no hakoniwa),310 +victory pose,310 +the yuudachi-like creature,310 +teemo,310 +task (s task80),310 +sody,310 +sinzan,310 +shoujo to ura roji,310 +shimoneta to iu gainen ga sonzai shinai taikutsu na sekai,310 +shimamura hougetsu,310 +self harm,310 +saeki tatsuya,310 +roxanne (pokemon),310 +otonashi haruna,310 +nksk,310 +mimoto (aszxdfcv),310 +makina nakajima,310 +kuurunaitsu,310 +just as planned (meme),310 +japanese tankery league (emblem),310 +hirai yuzuki,310 +henshin pose,310 +hand under skirt,310 +grate,310 +gibson les paul,310 +ekakibito,310 +driver (kamen rider),310 +dejiko,310 +cow hood,310 +comiching,310 +cirilla lin,310 +chougei (kancolle),310 +chocolate syrup,310 +bandai,310 +bag over head,310 +arakune,310 +aqua pantyhose,310 +alter servant,310 +yadapot,309 +wrist cutting,309 +valkyrie (p&d),309 +u.a. gym uniform,309 +triad primus (idolmaster),309 +toque blanche,309 +take (shokumu-taiman),309 +symphogear pendant,309 +subway,309 +stuffed lion,309 +sougetsu (yosinoya35),309 +slippers removed,309 +shiranagi,309 +shiny footwear,309 +shinkon santaku,309 +shikino yuki,309 +senju (snz0),309 +ryuugamine mikado,309 +ranf,309 +ramza beoulve,309 +protean assassin melona,309 +papyrus (undertale),309 +miyamoto musashi (third ascension) (fate),309 +meira (touhou),309 +marimo (yousei ranbu),309 +marceline abadeer,309 +mamedenkyuu (berun),309 +lace-trimmed garter belt,309 +kitami yuzu,309 +kida masaomi,309 +kaede (sayappa),309 +jikatarou,309 +isse,309 +inuyama nanami,309 +hippo (hirople),309 +hashi,309 +haori io,309 +hand focus,309 +funny valentine,309 +floating head,309 +fei (maidoll),309 +deviantart logo,309 +cross-laced slit,309 +condom left inside,309 +bomhat,309 +bili girl 33,309 +asaga aoi,309 +an-94,309 +akakage red,309 +yuuta (monochrome),308 +towel around waist,308 +thunder (girls' frontline),308 +shinguuji sakura,308 +scrape,308 +sakura mochi,308 +sailboat,308 +putting on jewelry,308 +playstation 4,308 +nanatsuiro drops,308 +nakigitsune,308 +mihama junior high school uniform,308 +mashuu masaki,308 +luigi torelli (kancolle),308 +little girl admiral (kancolle),308 +kira tsubasa,308 +kion-kun,308 +kanmiya shinobu,308 +kamen rider gaim (series),308 +kaiten muten-maru,308 +kagerou kai ni (kancolle),308 +hand on own crotch,308 +go back!,308 +girly boy,308 +fujii masahiro,308 +framed image,308 +flying nimbus,308 +fire emblem: shadow dragon,308 +falcon,308 +easy chair,308 +cutoff jeans,308 +cutiefly,308 +colo (nagrolaz),308 +book holster,308 +autoarousal,308 +ashley graham,308 +antarcticite,308 +alucard (hellsing),308 +akamaru,308 +yatsuhashi kyouto,307 +waterkuma,307 +vilde loh hocen,307 +underground,307 +unagi (kobucha blaster),307 +toto nemigi,307 +tight top,307 +staff of homa (genshin impact),307 +sohin,307 +shingo (missing link),307 +sesshouin kiara (swimsuit mooncancer) (second ascension),307 +senren banka,307 +sakurato ototo shizuku,307 +rivalry,307 +purple robe,307 +pole (ppp1409),307 +pikaole,307 +nozomi tsubame,307 +ninja (final fantasy),307 +miyamoto musashi (first ascension) (fate),307 +leaning on table,307 +knitting needle,307 +kindred (league of legends),307 +kagachi saku,307 +jiino,307 +io (granblue fantasy),307 +houjou satoshi,307 +hot vr,307 +holding creature,307 +face grab,307 +ezreal,307 +ben 10,307 +arf,307 +aponia (honkai impact),307 +anyamal tantei kirumin zoo,307 +ankkoyom,307 +tsurugi hagane,306 +tsukushi akihito,306 +tickle torture,306 +sword art online: memory defrag,306 +sunna (nilsunna),306 +stirring,306 +sprigatito,306 +spiked knuckles,306 +snake earrings,306 +shinguuji korekiyo,306 +selphie tilmitt,306 +seitokai yakuindomo,306 +seal impression,306 +sanari (quarter iceshop),306 +running bond,306 +rex lapis (genshin impact),306 +rella,306 +quagsire,306 +putcher,306 +pac-man,306 +ouhara lolong,306 +nigirizushi,306 +nian (zhanian),306 +necktie between pectorals,306 +neck flower,306 +nanami kento,306 +mutsu (layergreen),306 +mini flag,306 +m950a (girls' frontline),306 +kittan bachika,306 +kiman,306 +kagero (fire emblem),306 +iron blood (emblem),306 +housewife,306 +hinata shouyou,306 +diane (nanatsu no taizai),306 +dede (qwea 00000),306 +cyancapsule,306 +comiket 94,306 +chestnut,306 +captain america (series),306 +blue male swimwear,306 +banpai akira,306 +yuuzii,305 +yuri (dirty pair),305 +yuni (precure),305 +yohane bonaventura,305 +xe (execut3r),305 +wendy marvell,305 +underwear writing,305 +tentacle grab,305 +tenjou ryuka,305 +tang sanzang,305 +tales of legendia,305 +tako (plastic protein),305 +takecha,305 +swimsuit lift,305 +suzuki kyoutarou,305 +sherlock holmes (fate),305 +sekaiju no meikyuu 4,305 +sayaka (saru getchu),305 +sanshuu middle school uniform,305 +sakizaki saki-p,305 +saitou (lynx-shrike),305 +ribbon-trimmed gloves,305 +ratatatat74,305 +princess bonnibel bubblegum,305 +portland (azur lane),305 +penelo,305 +painwheel (skullgirls),305 +nyifu,305 +narutomaki,305 +nanahara fuyuki,305 +minato tomoka,305 +mikeran (mikelan),305 +meitoro,305 +machine pistol,305 +kurosaki mea,305 +kujira naoto,305 +kitashirakawa anko,305 +ikaros,305 +hook hand,305 +historical event,305 +hirose (mokiki),305 +hand on ground,305 +green jumpsuit,305 +fighting game,305 +dev (dev0614),305 +chibirisu,305 +changing clothes,305 +carbuncle (puyopuyo),305 +aoi ogata,305 +akisora,305 +.me,305 +yoshino (date a live),304 +viola (seed),304 +u35,304 +turntable,304 +toyota,304 +tossing,304 +testament (guilty gear),304 +strelizia,304 +spire,304 +sino (sionori),304 +shining wind,304 +shameimaru aya (newsboy),304 +seyren windsor,304 +sengoku taisen,304 +saikin yatotta maid ga ayashii,304 +racetrack,304 +quad braids,304 +power item (touhou),304 +ooi kai ni (kancolle),304 +mitsugu,304 +lord of heroes,304 +lolidom,304 +kyouno madoka,304 +kofunami nana,304 +kezune (i- -i),304 +horkeu kamui,304 +hemokinesis,304 +hanokage,304 +haguro kai ni (kancolle),304 +fuumi (radial engine),304 +fukuda shuushi,304 +coin on string,304 +broken mask,304 +aurochs (kemono friends),304 +aubrey (omori),304 +apios1,304 +antonio salieri (fate),304 +animal hair ornament,304 +yamashiro takane,303 +wooden spoon,303 +western comics (style),303 +u-1146,303 +tennis no ouji-sama,303 +teddy (khanshin),303 +tan (tangent),303 +shoujo donburi,303 +sculpture,303 +schwarzesmarken,303 +sarashina ruka,303 +saku anna,303 +nuriko-kun,303 +ninjatou,303 +natsu dragneel,303 +mixed-language text,303 +michihasu,303 +lunch (lunch3),303 +lamb (league of legends),303 +lady (devil may cry),303 +kusanagi kyou,303 +krul tepes,303 +komatsuzaki rui (style),303 +kisw2010,303 +kirito (sao-alo),303 +keyring,303 +kazami mizuho,303 +kanda mizuki,303 +junsuina fujunbutsu,303 +jp06,303 +hyena ears,303 +honzuki no gekokujou,303 +hololive alternative,303 +hippopotamus ears,303 +himiya jouzu,303 +heian,303 +hazuki ruu,303 +hand on railing,303 +hagino kouta,303 +famicom,303 +devil may cry 4,303 +cure magical,303 +caution,303 +book on lap,303 +blue nipples,303 +batsu,303 +asahina hikage,303 +arm cutout,303 +arbok,303 +akino hamo,303 +urushi,302 +tsuujou kougeki ga zentai kougeki de ni-kai kougeki no okaasan wa suki desu ka?,302 +toki wo kakeru shoujo,302 +take shinobu,302 +sgk,302 +seikimatsu occult gakuin,302 +rose (street fighter),302 +praying mantis,302 +plum (arch),302 +ono misao,302 +ohishi izumi,302 +nobi nobita,302 +nejikyuu,302 +nakano sora,302 +mimikaki (men bow),302 +meteor shower,302 +mazume,302 +maccha (mochancc),302 +kannazuki hato,302 +juusan kihei bouei ken,302 +ichigo 100 percent,302 +hymen,302 +handa roco,302 +fyuo,302 +forte stollen,302 +chihunhentai,302 +blood elf (warcraft),302 +badcompzero,302 +atsushi (aaa-bbb),302 +ataru (ataru-littlebird),302 +asagami fujino,302 +aohashi ame,302 +alchera,302 +wrists extended,301 +vanripper (style),301 +urara (ckt),301 +uguisu mochi (ykss35),301 +tof,301 +tail fin,301 +spirit blossom (league of legends),301 +smelling flower,301 +shiden (sashimi no wife),301 +santa alter,301 +randou,301 +penis piercing,301 +papercraft (medium),301 +nu gundam,301 +nokuhashi,301 +nekozuki yuki,301 +nase,301 +moonlight flower,301 +misaki akeno,301 +minami yume,301 +metera (granblue fantasy),301 +mask over one eye,301 +lunch (dragon ball),301 +kyouno,301 +kiana kaslana (herrscher of flamescion),301 +kazuno sarah,301 +kajiki yumi,301 +jaho,301 +izmir,301 +iroha (shiki),301 +holding scepter,301 +hiramitsu hinata,301 +henrietta de tristain,301 +harusame (rueken),301 +h&k g11,301 +gaston18,301 +engine,301 +earth eleven,301 +dialga,301 +cutie honey,301 +comic party,301 +chouzetsusaikawa tenshi-chan,301 +cheelai,301 +beehive hairdo,301 +azumi (girls und panzer),301 +animal around neck,301 +yuihira asu,300 +yae miko (fox),300 +taiho shichauzo,300 +spork,300 +souchou,300 +snow on head,300 +shiraishi yoshitake,300 +shiosoda,300 +sakura hibiki,300 +reuniclus,300 +polarityplus,300 +penis measuring,300 +paint musume,300 +oyu udon,300 +ougi hina,300 +onnaski,300 +nonaka ritsu,300 +ninin ga shinobuden,300 +ninetales,300 +nagao kagetora (fate),300 +motoori shiro,300 +morpeko (hangry),300 +momohime,300 +locket,300 +lego,300 +keytar,300 +kagura gumi,300 +hazuki (tsukuyomi),300 +grey male underwear,300 +fukutarou (enji127),300 +foot on head,300 +ekao,300 +dynamite,300 +dress shoes,300 +cross-laced bikini,300 +clitoris ring,300 +casino card table,300 +bulleta,300 +breasts on tray,300 +bondrewd,300 +ayuayu (shouyu no sato),300 +ayasaka,300 +asmodeus (umineko),300 +allenes,300 +aleksandra i. pokryshkin,300 +akadako,300 +touhou gouyoku ibun,299 +togekiss,299 +tatsunami youtoku,299 +suzume miku,299 +strike witches: katayoku no majo-tachi,299 +sideroca (arknights),299 +shiro (reptil),299 +shell earrings,299 +roromiya karuta,299 +ookuma satomi,299 +ochinsama,299 +nakasone haiji,299 +mitsurugi meiya,299 +metal gear solid 3,299 +lemon print,299 +kitagawa yuusuke,299 +kise ryouta,299 +kirby 64,299 +kei (dirty pair),299 +kama (third ascension) (fate),299 +kagamine rin (cosplay),299 +hoshino souichirou,299 +honolulu (umbrella girl) (azur lane),299 +hiei kai ni (kancolle),299 +harihisa,299 +hakaba (dairiseki),299 +hajin,299 +green armor,299 +fue (rhomphair),299 +faputa,299 +ebisu eika,299 +dragon kid,299 +dragapult,299 +chakabo,299 +araki hina,299 +aketa mikoto,299 +yamiochi umekichi,298 +yagokoro,298 +welding mask,298 +tsuyadashi shuuji,298 +tama (songe),298 +sleeveless turtleneck crop top,298 +shiitake nabe tsukami,298 +shian (my lonly life.),298 +sasa kichi,298 +ryoma (fire emblem),298 +rimuu,298 +purple sarong,298 +pullover,298 +pokedex,298 +one (cevio),298 +ogami tamaki,298 +nijino yume,298 +nekki basara,298 +myusha,298 +lowleg pantyhose,298 +koyama hirokazu,298 +ikeda shouko,298 +hou (hachiyou),298 +hotel room,298 +holding marker,298 +green ascot,298 +gokuhara gonta,298 +glowing mouth,298 +glass door,298 +gainaxtop,298 +estelle bright,298 +doyora,298 +dk.senie,298 +daitaku helios (umamusume),298 +captain nemo (fate),298 +black veil,298 +black lady,298 +beige coat,298 +aumann,298 +ambience synesthesia,298 +akatsuki uni,298 +a.i. voice,298 +yuki miku (2014),297 +yamakawa,297 +washbowl,297 +terraria,297 +spool,297 +southern italy (hetalia),297 +skeletal arm,297 +sentai,297 +sekiro,297 +sandstar,297 +sameda koban,297 +roller coaster,297 +removing eyewear,297 +orange horns,297 +oil lamp,297 +nez-box,297 +nejikirio,297 +nakai (waitress),297 +misekai 555,297 +mikomachi (35machi),297 +mazjojo,297 +luke pearce (tears of themis),297 +kiana kaslana (white comet),297 +kawakaze kai ni (kancolle),297 +katatsuka kouji,297 +kasumi (princess connect!),297 +johan (johan13),297 +irony,297 +iced latte with breast milk (meme),297 +hakui ami,297 +guts (kill la kill),297 +furen e lustario,297 +fishnet sleeves,297 +feraligatr,297 +fart,297 +faceoff,297 +everlasting summer,297 +ebina nana,297 +destroyer,297 +d4dj,297 +coffret (heartcatch precure!),297 +camping,297 +bunny slippers,297 +bukkuri,297 +board eraser,297 +asobi ni iku yo!,297 +alleyne (queen's blade),297 +advanced nurturing high school uniform,297 +zetsumame,296 +yukineko1018,296 +willard h. wright,296 +vibrator in thigh strap,296 +urbosa,296 +terminator (series),296 +spoken expression,296 +rumi (girls und panzer),296 +rotix,296 +reina prowler,296 +rachel gardner,296 +paopao,296 +padded coat,296 +ouran high school host club,296 +nelliel tu odelschwanck,296 +nejime,296 +naoki miki,296 +moetan,296 +miruto netsuki,296 +minosu,296 +m4 sopmod ii,296 +kiddy grade,296 +itatatata,296 +hyurasan,296 +hinami (hinatamizu),296 +himeno (chainsaw man),296 +greek text,296 +glasses nun (diva),296 +giraffe print,296 +galaktika,296 +franziska von karma,296 +fal (girls' frontline),296 +facebook logo,296 +emori miku,296 +cube85,296 +cocktail umbrella,296 +cirenk,296 +chiaki rakutarou,296 +bunk bed,296 +bluefield,296 +beudelb,296 +bell (oppore coppore),296 +battery,296 +asterisk kome,296 +araido kagiri,296 +alternate footwear,296 +alison (alison airlines),296 +zouni soup,295 +zinno,295 +z.taiga,295 +yoshito,295 +white bandana,295 +wakamo (swimsuit) (blue archive),295 +veronica (dq11),295 +tsuyuri kumin,295 +tsunetsuki matoi,295 +tsareena,295 +striped pajamas,295 +shundou heishirou,295 +seo jaiil,295 +scientist,295 +sakurai energy,295 +saitou ena,295 +provence (arknights),295 +penis on pussy,295 +p19,295 +nora wanko,295 +nishikitaitei-chan,295 +nidalee,295 +murabito c,295 +moribe (rabumanyo),295 +mochiya marosuke,295 +menat,295 +meiya neon,295 +marie antoinette (swimsuit caster) (fate),295 +m14 (girls' frontline),295 +lizardman,295 +kotona elegance,295 +koala,295 +kitano (kitanosnowwhite),295 +kanamori sayaka,295 +izumi mahiru,295 +ishihara masumi,295 +holding bone,295 +hati105,295 +grappler baki,295 +gishu,295 +gift wrapping,295 +fudou akio,295 +crocodile (one piece),295 +bisharp,295 +bili girl 22,295 +bear paw hammer,295 +yokune ruko,294 +wainscoting,294 +usugumo (kancolle),294 +tuxedo de cat,294 +tsukamoto minori,294 +tsu (lovesick1964),294 +torn kimono,294 +tonari no kyuuketsuki-san,294 +tonami yuma,294 +titanfall 2,294 +takeashiro,294 +takano natsuki,294 +spread armpit,294 +souya (kancolle),294 +socks over pantyhose,294 +scar on hand,294 +right-to-left comic,294 +pinecone,294 +ohitashi netsurou,294 +nimura yuuji,294 +nasuno (nasuno42),294 +miyuki ruria,294 +mieharu,294 +metal gloves,294 +matsumi yuu,294 +master 4 (housamo),294 +luviagelita edelfelt,294 +lotosu,294 +lloule,294 +kuromayu,294 +kiyal bachika,294 +kinosuke (sositeimanoga),294 +kanola u,294 +k/da ahri,294 +hanomido,294 +gocoli,294 +gakuen utopia manabi straight!,294 +fire keeper,294 +final fantasy i,294 +crimson viper,294 +clothed bath,294 +chloe (pokemon),294 +cat panties,294 +asami sato,294 +acronym,294 +zono (inokura syuzo029),293 +yuyaiyaui,293 +yune (fire emblem),293 +veronica (fire emblem),293 +usatsuka eiji,293 +tsukineko,293 +totatokeke,293 +tenco's story,293 +subarashiki kono sekai,293 +sogaya,293 +sock dangle,293 +snowball fight,293 +shirayuki tomoe,293 +seelie (genshin impact),293 +satou lilly,293 +purin jiisan,293 +princess lover,293 +pretty rhythm rainbow live,293 +pizzasi,293 +pincushion,293 +palkia,293 +noeyebrow (mauve),293 +niwamaru (niwarhythm),293 +nina (breath of fire v),293 +naik,293 +mist (fire emblem),293 +miki sayaka (cosplay),293 +luminyu,293 +large buttons,293 +kukui (pokemon),293 +jigen daisuke,293 +hiroyuki,293 +heavenly boat maanna,293 +gravity falls,293 +fuyou kaede,293 +cure aqua,293 +cedar (artist),293 +butterfly-shaped pupils,293 +bicycle helmet,293 +athrun zala,293 +aijou karen,293 +wringing skirt,292 +wallace (pokemon),292 +vintage microphone,292 +ushiromiya george,292 +titania (sao),292 +tenugui,292 +tailjob,292 +swing set,292 +super sailor chibi moon,292 +stamp mark,292 +sowel (sk3),292 +snow halation,292 +sledgehammer,292 +shorts under shorts,292 +shin megami tensei iii: nocturne,292 +selcky,292 +sawachika eri,292 +russian clothes,292 +purple sister,292 +print apron,292 +organs,292 +nero (devil may cry),292 +nerine,292 +mirage (apex legends),292 +masterwork apocalypse,292 +majima gorou,292 +m1903 springfield,292 +lillithlauda,292 +kuonji alice,292 +kirishima kai ni (kancolle),292 +kazami karasu,292 +kai-ri-sei million arthur,292 +kaguya madoka,292 +jiffic,292 +imitating,292 +ico,292 +hyrule warriors: age of calamity,292 +horokusa (korai),292 +hokuro ryuuseigun,292 +hiyori (rindou66),292 +himura moritaka,292 +hands on floor,292 +go-1,292 +futaba sana,292 +futaba aoi (vividred operation),292 +fall guys,292 +detonator,292 +cinderella,292 +anzio (emblem),292 +anchor ornament,292 +alien (movie),292 +akishima kei,292 +akata itsuki,292 +akanagi youto,292 +aikir (jml5160),292 +wall-eyed,291 +vladilena millize,291 +touhoku itako,291 +torn bodystocking,291 +tied to chair,291 +thigh belt,291 +sub-res,291 +snapping fingers,291 +signora (genshin impact),291 +shooting glasses,291 +sakuma jirou,291 +ryo (liver sashi daisuki!),291 +oogami sakura,291 +okunoda miyoi,291 +nosuku,291 +nakkar,291 +monsieur,291 +marionette,291 +mane,291 +kotori (takanashi kiara),291 +koishi komeiji's heart-throbbing adventure,291 +katsuobushi (eba games),291 +katiko,291 +karimei,291 +kantaka,291 +kamen rider blade (series),291 +kakao (chocolate land),291 +juexing (moemoe3345),291 +hoshina tomoko,291 +holding skirt,291 +green sky,291 +gorilla (bun0615),291 +fukuroumori,291 +emil chronicle online,291 +egg yolk,291 +e volution,291 +deedlit,291 +cicada,291 +chiton,291 +chimame-tai,291 +chacha (fate),291 +brll,291 +aqua lips,291 +aoi kujira,291 +amonitto,291 +american football uniform,291 +yuku (kiollion),290 +wetland,290 +waiting,290 +vermeil (arknights),290 +toi8,290 +tile roof,290 +thrud (fate),290 +suisogenshi,290 +silent hill 3,290 +shrine bell,290 +science,290 +sala mander,290 +sail,290 +retsumaru,290 +ootori emu,290 +nishimura eri,290 +nest,290 +nekoshoko,290 +moyashimon,290 +marida cruz,290 +maou beluzel,290 +mahou shoujo oriko magica,290 +kuoh academy school uniform,290 +kogatarou,290 +kochiya (gothope),290 +kazabuki poni,290 +jagabutter,290 +jaco,290 +hot air balloon,290 +food between breasts,290 +fatal frame 2,290 +emma (yakusoku no neverland),290 +eargasm,290 +dynamic pose,290 +don (29219),290 +demizu posuka,290 +cnm,290 +clash,290 +chiyo (pk19981234),290 +chiester45,290 +charging forward,290 +catfight,290 +caterpillar,290 +candice (pokemon),290 +axent wear,290 +ar (rikuesuto),290 +aegislash,290 +2011 sendai earthquake and tsunami,290 +yude,289 +vanilla h,289 +undone neckerchief,289 +studded garter belt,289 +sidonia no kishi,289 +shirako (kirupi),289 +shinomu (cinomoon),289 +shadow of the colossus,289 +sesield,289 +samsung,289 +rerrere,289 +pui pui molcar,289 +ppk (girls' frontline),289 +popopoka,289 +otegine,289 +orion (orionproject),289 +nyori,289 +nekoi mie,289 +nagumo haruya,289 +mx2j (nsh6394),289 +multicolored capelet,289 +marie rose (devilish servant against the splashing waves),289 +mahoujin guruguru,289 +lina kontiola,289 +liechtenstein (hetalia),289 +kokuriu,289 +kamizaki risa,289 +k2 (girls' frontline),289 +ichikawa masahiro,289 +horizontal bar,289 +holding suitcase,289 +hip armor,289 +hiiragi shinoa,289 +hatsune miku (nt),289 +hajika,289 +grey horns,289 +florina (fire emblem),289 +felicia (fire emblem),289 +creamer (vessel),289 +crazy diamond,289 +costume request,289 +cilan (pokemon),289 +chorefuji,289 +brook (one piece),289 +brain freeze,289 +block,289 +bellhenge,289 +bankoku ayuya,289 +anabuki tomoko,289 +xinzoruo,288 +winking (animated),288 +ushiwakamaru (swimsuit assassin) (fate),288 +tokisaki asaba,288 +svd (girls' frontline),288 +soundwave (transformers),288 +shaved head,288 +sewing pin,288 +scamp (kancolle),288 +sailor pluto,288 +riesbyfe stridberg,288 +ranma (kamenrideroz),288 +push-ups,288 +photokano,288 +percival (granblue fantasy),288 +oiran,288 +norizc,288 +moebell,288 +mizuyan,288 +mitsuki yuuya,288 +midori foo,288 +meto31,288 +mc axis,288 +mayu (vocaloid),288 +maccha,288 +lilith (saikin yatotta maid ga ayashii),288 +latex boots,288 +koohee,288 +knocking,288 +holding bird,288 +gyaruo,288 +great ball,288 +glaucus (arknights),288 +gibagiba,288 +furniture,288 +floating fortress (kancolle),288 +exaxuxer,288 +enosan,288 +crash,288 +brown mittens,288 +banana boat,288 +yuzuki roa,287 +yagami (mukage),287 +wet pantyhose,287 +usume shirou,287 +urotsuki,287 +umiroku,287 +tsuchiya (girls und panzer),287 +tosa (azur lane),287 +tenkubashi tomoka,287 +takeyuu,287 +summer wars,287 +stormcow,287 +sk8 the infinity,287 +sheffield (kancolle),287 +ruteko (ruko220),287 +reimu endou,287 +pixiv logo,287 +perepere-kun,287 +panties on penis,287 +panties in mouth,287 +no headgear,287 +nishino flower (umamusume),287 +nicoby,287 +narynn,287 +mighty no. 9,287 +m14,287 +listener (inugami korone),287 +keiko (mitakarawa),287 +jadenkaiba,287 +imai kazunari,287 +heavy cruiser princess,287 +hana (pangya),287 +grenade hair ornament,287 +green322,287 +gearous,287 +gaiko kujin,287 +eyebrows visible through hat,287 +elysia de lute ima,287 +dorothy (princess principal),287 +cutlass (sword),287 +corrupted file,287 +cool & sexy (idolmaster),287 +clair (pokemon),287 +buster shirt,287 +brigitte (overwatch),287 +bakuon!!,287 +assassin (ragnarok online),287 +arm over shoulder,287 +10eki (tenchou),287 +zas m21 (girls' frontline),286 +zarya (overwatch),286 +xblaze code: embryo,286 +wrist flower,286 +vivid strike!,286 +traffic mirror,286 +tokyo tower,286 +tenrai,286 +sword print,286 +st. lucia academy school uniform,286 +spiked penis,286 +spidu,286 +seliph (fire emblem),286 +rattata,286 +puririn,286 +puffy chest,286 +pu-en,286 +poco (backboa),286 +placard,286 +osisio,286 +ookami ryouko,286 +okumura yukio,286 +nyto (girls' frontline),286 +ninai,286 +nero claudius (bride) (third ascension) (fate),286 +neck grab,286 +monarch (azur lane),286 +mitake eil,286 +menpoo,286 +marugoshi teppei,286 +macross frontier: itsuwari no utahime,286 +m16a1 (boss) (girls' frontline),286 +louise (touhou),286 +koflif,286 +kitayama miuki,286 +kimono removed,286 +jadf,286 +iumi urura,286 +inu sakuya (nejikirio),286 +hoppip,286 +holding ice cream,286 +holding another's head,286 +hikaru no go,286 +hey taisyou,286 +hedge,286 +head steam,286 +hataraku saibou black,286 +hakamada hinata,286 +green collar,286 +gomamiso (gomamiso sp),286 +girl cafe gun,286 +fu hua (phoenix),286 +eyes in shadow,286 +dungeon travelers 2,286 +despair,286 +crobat,286 +conch,286 +ch'en the holungday (arknights),286 +bounsweet,286 +blue-senpai,286 +balaclava,286 +ast,286 +ability card (touhou),286 +yae sakura (goushinnso memento),285 +wet face,285 +vest over shirt,285 +underskirt,285 +tiemu (man190),285 +thor (marvel),285 +takatsuki ichika,285 +takamine nadare,285 +taisho (gumiyuki),285 +swirl,285 +skj,285 +shield print,285 +savannah,285 +rejection,285 +potg (piotegu),285 +pink feathers,285 +penthesilea (fate),285 +origami cyclone,285 +nagayoshi subaru,285 +monkey boy,285 +mitsumine mashiro,285 +minami saki,285 +miku day,285 +kuroinu ~kedakaki seijo wa hakudaku ni somaru~,285 +ki (kk-sk-ray),285 +jas,285 +inue shinsuke,285 +head on table,285 +hands on legs,285 +gomzi,285 +gacha-m,285 +friday the 13th,285 +elfen lied,285 +dororo (tezuka),285 +denki showgun,285 +curly brace,285 +cupping hand,285 +cunnilingus through clothes,285 +cosmic (crownclowncosmic),285 +chaki (teasets),285 +bowalia,285 +ashita (2010),285 +yutamaro,284 +wing ornament,284 +weather report,284 +warframe,284 +utage (summer flowers) (arknights),284 +tsukigami runa,284 +tomoyuki kotani,284 +tied sleeves,284 +text in eyes,284 +swordsman (ragnarok online),284 +sheep nun (diva),284 +saizu nitou gunsou,284 +rumble roses,284 +putting on shoes,284 +purple headband,284 +pumpkaboo,284 +powerlesssong,284 +portal 1,284 +peni parker,284 +ouendan,284 +mortarboard,284 +mizuta kenji,284 +miyako (naotsugu),284 +mejiro palmer (umamusume),284 +masaki sasami jurai,284 +makai tenshi djibril,284 +kounosu satori,284 +kingprotea (fate),284 +kienbiu,284 +katagiri hachigou,284 +kampfer,284 +kamen rider kuuga,284 +iwatani naofumi,284 +hinoue itaru,284 +hazuki nagisa,284 +hagiya masakage,284 +cu chulainn (fate/prototype),284 +christian louboutin (brand),284 +chigasaki yukari,284 +bola (weapon),284 +bad end precure,284 +azure striker gunvolt,284 +10mo,284 +white tube top,283 +white jumpsuit,283 +wasabi (sekai),283 +virtual on,283 +vertical-striped gloves,283 +vashperado,283 +tj-type1,283 +the saga of darren shan,283 +the emperor (arknights),283 +tawawa challenge,283 +stella unibell,283 +steampunk (liarsoft),283 +stealth masturbation,283 +ssambatea,283 +smelling penis,283 +rune factory 4,283 +resort boin,283 +pokemon: twilight wings,283 +paradeus,283 +ohland,283 +nora valkyrie,283 +murasaki shikibu (swimsuit rider) (fate),283 +multiple hat bows,283 +monocle chain,283 +mizuhara yuu,283 +melody (projektmelody),283 +megumiya,283 +maruma (maruma gic),283 +kiss day,283 +kieyza,283 +jyt,283 +julia (fire emblem),283 +joy (pokemon),283 +ishikawa luna,283 +hori masayuki,283 +heart hair bun,283 +heart eyepatch,283 +hatching,283 +handrail,283 +gurren-lagann,283 +fuusuke (fusuke208),283 +elona,283 +doxy,283 +dildo under panties,283 +deal with it (meme),283 +cure mint,283 +cthulhu,283 +courreges ace,283 +copano rickey (umamusume),283 +chung seiker,283 +captured,283 +captive bead ring,283 +cannibalism,283 +calcio,283 +bonnie (rsg),283 +blue gk,283 +between buttocks,283 +baking sheet,283 +amai nekuta,283 +adiane,283 +abe yoshitoshi,283 +zone of the enders,282 +yuuki akira,282 +yagami kou,282 +yabuki kana,282 +xretakex,282 +wooden box,282 +wolfrun,282 +witch mercy,282 +watao,282 +vento,282 +van gogh (fate),282 +tsuruga school uniform,282 +tenkuu no crystalia,282 +suna (sandworks),282 +shoulder cape,282 +shattered,282 +santa matsuri,282 +repede (tales),282 +red bandeau,282 +pokemon on arm,282 +negi (ulog'be),282 +naufaldreamer,282 +natsumi akira,282 +milim nava,282 +meliodas,282 +longcat (meme),282 +koban (gold),282 +kakine teitoku,282 +injection,282 +hyper blossom,282 +houkago teibou nisshi,282 +honekoneko (psg),282 +hirune (konekonelkk),282 +hatoyama itsuru,282 +grey cat,282 +grandfather clock,282 +fujimoto hideaki,282 +floating headgear,282 +face between breasts,282 +diary,282 +cure chocolat,282 +censored food,282 +awakened miki,282 +artwhirl mahou gakuen no otome-tachi,282 +akaneya,282 +zipper dress,281 +yuki miku (2017),281 +vanripper,281 +toriniku senshi chikinman,281 +togata mirio,281 +text censor,281 +takabushi kengo,281 +suenari (peace),281 +splat roller (splatoon),281 +single-shoulder dress,281 +shiratsuki shiori,281 +shin strap,281 +red shawl,281 +racing miku (2022),281 +purple border,281 +oosuki mamako,281 +nome (nnoommee),281 +nanakusa nichika,281 +moyori,281 +mortal kombat,281 +momomiya ichigo,281 +laces,281 +kuroda kazuya,281 +kuroda akimi,281 +kousaka china,281 +kaenuco,281 +jewelpet twinkle,281 +hoshikuzu witch meruru,281 +horibe hiderou,281 +hirunagi,281 +hasumi (gym uniform) (blue archive),281 +hashimoto nyaa,281 +hanged,281 +hagane soushi,281 +goose,281 +glass writing,281 +female gunner (dungeon and fighter),281 +eye print,281 +doppelganger,281 +diamond clan outfit,281 +dead by daylight,281 +dduck kong,281 +dark orb (madoka magica),281 +crumpled paper,281 +crimvael,281 +collagen,281 +citrus (saburouta),281 +chikuma kai ni (kancolle),281 +chamupei,281 +c.c. lemon (character),281 +busou renkin,281 +bumblebee (transformers),281 +bucket on head,281 +brave girl ravens,281 +aqua one-piece swimsuit,281 +amane misa,281 +african penguin (kemono friends),281 +abua,281 +aardwolf (kemono friends),281 +3four,281 +zundarepon,280 +yusa emi,280 +yellow lips,280 +vehicle request,280 +ukokkei,280 +tsurumi chiriko,280 +tsukioka tsukiho,280 +tenki no ko,280 +tama (tama-s),280 +t k g,280 +sumisaki yuzuna,280 +smash invitation,280 +saitou hajime (fate),280 +s-now,280 +prince of samantoria,280 +prime,280 +oka mochi,280 +naitou kouse,280 +nabe,280 +mystia lorelei (bird),280 +mogiki hayami,280 +mameeekueya,280 +loading screen,280 +layered gloves,280 +large ears,280 +koffing,280 +kitazinger,280 +joker (card),280 +iwato kasumi,280 +huei nazuki,280 +houzouin oniwaka,280 +holding plant,280 +hiroe rei,280 +hellandheaven,280 +hair scarf,280 +gaap (umineko),280 +fuji kiseki (umamusume),280 +flower bed,280 +distortion,280 +cub,280 +chocoan,280 +ceylon (arknights),280 +braces,280 +bloodhound (apex legends),280 +beelzebub (umineko),280 +allen walker,280 +98-tan,280 +yqgkg,279 +wonder festival,279 +vanguard princess,279 +u yuz xx,279 +twumi,279 +the evil within,279 +tamamon,279 +shirayuki mizore,279 +shibori kasu,279 +shelf bra,279 +scolipede,279 +safety glasses,279 +riv (rivleaf),279 +rikumaru,279 +reisen udongein inaba (cosplay),279 +racer (magnet),279 +q azieru,279 +pururut,279 +prince,279 +pinky ring,279 +penny polendina,279 +paya (zelda),279 +orenchi no meidosan,279 +nanami haruka,279 +money gesture,279 +mizushima saki,279 +miramikaru riran,279 +miltank,279 +long scarf,279 +kuromi,279 +kshimu,279 +kinnikuman,279 +kamichu!,279 +k (anime),279 +jason voorhees,279 +holding crossbow,279 +hisayuki hirokazu,279 +hayakawa akari,279 +goribote,279 +gisyo,279 +garnish,279 +fiery background,279 +factory,279 +electro emilia,279 +dying message,279 +creator (ragnarok online),279 +chawan (yultutari),279 +carriage,279 +captain (honkai impact),279 +buckler,279 +bucket of water,279 +bailingxiao jiu,279 +azuki azusa,279 +akita hika,279 +aka ringo,279 +adjusting goggles,279 +yomiko readman,278 +watanabe yoshihiro,278 +test,278 +sushio,278 +striped pillow,278 +ryuuzaki umi,278 +rokudenashi majutsu koushi to akashic record,278 +paula (mother 2),278 +pathfinder (apex legends),278 +nekono rin,278 +nanase meruchi,278 +naked belt,278 +mortar,278 +morimoto kiyona,278 +mizoguchi keiji,278 +misaka shiori,278 +mega man x dive,278 +matsumotoke,278 +leblanc (league of legends),278 +kurono yuu,278 +kokutou mikiya,278 +kibanda gohan,278 +kesoshirou,278 +itai no wa iya nano de bougyoryoku ni kyokufuri shitai to omoimasu,278 +igawa sakura,278 +i-400 (kancolle),278 +hitowa,278 +hasshaku-sama,278 +hand fan writing,278 +grand theft auto,278 +g-spring goddess (ishiyumi),278 +furious,278 +destroyer (girls' frontline),278 +color drain,278 +bwell,278 +bust measuring,278 +black sarong,278 +beige pants,278 +beelzebub (helltaker),278 +artoria pendragon (lancer alter) (royal icing) (fate) (cosplay),278 +amaryllis gumi,278 +alternate muscle size,278 +40010prototype,278 +wicke (pokemon),277 +viy (fate),277 +tousen,277 +toriel,277 +too many sex toys,277 +takishima asaka,277 +taihou (temptation on the sea breeze) (azur lane),277 +starfire,277 +smeared lipstick,277 +ski goggles,277 +shishiou,277 +scarlet ibis (kemono friends),277 +sakamoto mineji,277 +saemon (tonpura),277 +print eyepatch,277 +pokemon rgby (prototype),277 +pina korata,277 +phantasy star portable 2,277 +nikuku (kazedesune),277 +nightstand,277 +nextoad,277 +makimura shunsuke,277 +luger p08,277 +luca kaneshiro,277 +liru,277 +lipps (idolmaster),277 +lefthand,277 +kiryuu moeka,277 +kinako (40hara),277 +kaede (yumesaki kaede),277 +ka-class submarine,277 +ipuu (el-ane koubou),277 +inu (marukome),277 +houston (kancolle),277 +hoshi (snacherubi),277 +hecatia lapislazuli (earth),277 +groudon,277 +grey cloak,277 +endou okito,277 +ear down,277 +doomguy,277 +ddal,277 +columbina (genshin impact),277 +clown nose,277 +bluebird,277 +blowing smoke,277 +ballistic shield,277 +aoyama sumika,277 +ao jun,277 +allen avadonia,277 +zeta gundam (mobile suit),276 +yoga,276 +uzumaki himawari,276 +ushiyama ame,276 +tedeza rize's school uniform,276 +sweets bird,276 +stuffing,276 +shiwa (siwaa0419),276 +seisenshi dunbine,276 +scooby-doo,276 +sara (granblue fantasy),276 +salpin,276 +saishi,276 +saikawa riko,276 +peach maki,276 +nipple torture,276 +neko (ganecooo),276 +mutou kurihito,276 +morgana (league of legends),276 +minaha (playjoe2005),276 +matsunaga ryo,276 +maou-jou de oyasumi,276 +makai no juumin,276 +linhardt von hevring,276 +kenuu (kenny),276 +janna (league of legends),276 +holding rope,276 +hand on hand,276 +gym challenge uniform,276 +gunner (sekaiju),276 +garlean,276 +fanning crotch,276 +emon-yu,276 +ctrlz77,276 +corsola,276 +cinque (nanoha),276 +blue sports bra,276 +araiguma-san,276 +adeleine,276 +2004,276 +yuunamida uyu,275 +yamato suzuran,275 +yamamomo (plank),275 +warabeda meijii,275 +tomotsuka haruomi,275 +tokyo exe girls,275 +tateishi kureha,275 +tapir,275 +tales of destiny,275 +stick figure,275 +spraying,275 +sora (blue archive),275 +sonoda yuu,275 +shoumaru (gadget box),275 +shauntal (pokemon),275 +sanada clan (emblem),275 +rolling girl (vocaloid),275 +ribeyrolles (girls' frontline),275 +ren san,275 +railgun,275 +qian wu atai,275 +pipa (instrument),275 +palette swap,275 +ohara hiroki,275 +nightingale (arknights),275 +nibiiro shizuka,275 +negi springfield,275 +nazuna (hidamari sketch),275 +my dear vampire (idolmaster),275 +mutsutake,275 +mofuaki,275 +mitsu (mitsu art),275 +loba (apex legends),275 +lifeguard,275 +li shuwen (fate),275 +kyoeiki,275 +kitajima yuuki,275 +kidmo,275 +kawakami masaki,275 +ka2,275 +honolulu (kancolle),275 +holding candle,275 +hifumi (swimsuit) (blue archive),275 +haruka natsuki,275 +hamburger steak,275 +frankenstein's monster (cosplay),275 +fortune arterial,275 +flirting,275 +edea lee,275 +ea (fate/stay night),275 +cai geng,275 +blue hawaii,275 +asakura rikako,275 +aqua vest,275 +amibazh,275 +77gl,275 +witch craft works,274 +window1228,274 +vampire (vocaloid),274 +soyosoyo,274 +souta (karasu no ouchi),274 +shigunyan,274 +scissorhold,274 +salamence,274 +opagi,274 +noise,274 +nirap,274 +nikku (ra),274 +morio (poke orio),274 +mayo (becky2006),274 +matsu-sensei,274 +marchen,274 +macross frontier: sayonara no tsubasa,274 +little blue whale (kancolle),274 +kiwi slice,274 +kiryuu kazuma,274 +kimidori emiri,274 +jairou,274 +hollow knight,274 +hasebe yuusaku,274 +green pajamas,274 +goblin slayer,274 +gainax,274 +futon (kitsune tsuki),274 +freng,274 +floating breasts,274 +fallenshadow,274 +eunos,274 +dragon's dogma,274 +dr. slump,274 +dabi (boku no hero academia),274 +brown shawl,274 +bloody marie (skullgirls),274 +blaze the cat,274 +black sweater vest,274 +belphegor (umineko),274 +baton,274 +bastet (p&d),274 +bakuya,274 +asama tomo,274 +akira (kadokawa),274 +akatsuki (log horizon),274 +xin yu hua yin,273 +watsuki ayamo,273 +uno ryoku,273 +the only shoe,273 +teoi (good chaos),273 +spoken zzz,273 +sogawa,273 +saiba (henrietta),273 +ronove (umineko),273 +p-chan,273 +overman king gainer,273 +oomiya shinobu,273 +oohira sunset,273 +oekaki musume,273 +nyonn24,273 +nou (nounknown),273 +natalia luzu kimlasca lanvaldear,273 +nakigitsune's fox,273 +miya (tsumazukanai),273 +mario (cosplay),273 +maaryan (to heart),273 +kochiya sanae (cosplay),273 +kkopoli,273 +kamotama,273 +jack-o'-lantern earrings,273 +iv stand,273 +inuarashi,273 +iijima yun,273 +hyde (tabakko),273 +hanarito,273 +hachi (8bit canvas),273 +green buruma,273 +goback,273 +fn scar,273 +female orc,273 +fal maro,273 +defy (girls' frontline),273 +david martinez,273 +cure macaron,273 +carina (xiaowoo),273 +black gold saw,273 +bazooka (gundam),273 +bald girl,273 +archetype earth,273 +zanamaoria,272 +yusa (angel beats!),272 +youtube logo,272 +whale hat,272 +wet legwear,272 +ursula charistes,272 +ultra kaijuu gijinka keikaku,272 +udagawa ako,272 +trente,272 +suzumura sango,272 +sugina miki,272 +spoken x,272 +soul evans,272 +sled,272 +shoulder plates,272 +shimure (460),272 +shigure ui,272 +ridy (ri sui),272 +rainbow dash,272 +ppshex,272 +open bikini,272 +oleana (pokemon),272 +naked skirt,272 +miki purasu,272 +mibry (phrysm),272 +maxie (pokemon),272 +kuzu kow,272 +kazawa (tonzura-d),272 +karen (pokemon),272 +kani club,272 +ji dao ji,272 +ichio,272 +hood (james x),272 +hoe,272 +himura kenshin,272 +hands on another's back,272 +haku (sen to chihiro no kamikakushi),272 +german suplex,272 +futaba miwa,272 +fizintine,272 +fighting master alleyne,272 +farm,272 +ex idol,272 +cleats,272 +bound thighs,272 +bomber,272 +banana (girls' frontline),272 +baku taso,272 +axsen,272 +angel devil (chainsaw man),272 +anastasia (swimsuit archer) (fate),272 +amakusa juuza,272 +adjusting shoe,272 +yukino minato,271 +white veil,271 +victini,271 +velvet (odin sphere),271 +tomari mari,271 +tied ears,271 +tenka touitsu chronicle,271 +teletha testarossa,271 +taichou haori,271 +tactile paving,271 +stuffed bird,271 +strong zero,271 +slow start,271 +sido (slipknot),271 +shidou hikaru,271 +salmon88,271 +ringozaka mariko,271 +polka dot pajamas,271 +pink sclera,271 +noctchill (idolmaster),271 +naga the serpent,271 +monicanc,271 +mind reading,271 +memory,271 +l'ecole des cinq lumieres school uniform,271 +kamen rider double,271 +idolmaster xenoglossia,271 +hayanami (kancolle),271 +gochou (kedama),271 +ghost print,271 +frilled sash,271 +feena fam earthlight,271 +facing up,271 +e16a zuiun,271 +drew (drew213g),271 +cure lemonade,271 +colis,271 +bokujou monogatari,271 +black gothic dress (idolmaster),271 +bell earrings,271 +awakening (sennen sensou aigis),271 +akishimo (kancolle),271 +aki663,271 +akamoku,271 +ahiru (princess tutu),271 +white romper,270 +wakino keibun,270 +virtuous contract,270 +velvet scarlatina,270 +ushiromiya kyrie,270 +tsuji kazuho,270 +tanaka rikimaru,270 +stu dts,270 +shiver (splatoon),270 +scanlines,270 +saunders (emblem),270 +sat-c,270 +ryuji (ikeriu),270 +rito453,270 +rinto (rint rnt),270 +rib:y(uhki),270 +pyz (cath x tech),270 +purple sclera,270 +power girl,270 +ophelia phamrsolone,270 +nyanya,270 +nanase aoi,270 +musen-shiki sanhankikan,270 +mochitsuki,270 +mitsurugi sugar,270 +maho moco,270 +mabinogi heroes,270 +kurebayashi noe,270 +kudou fuyuka,270 +kousaka yukiho,270 +kiriya (552260),270 +kawarajima kou,270 +kaoming,270 +iwado anna,270 +ishikirimaru,270 +hisuian zorua,270 +helena (kancolle),270 +hakuryou high school uniform,270 +green umbrella,270 +doujima ryoutarou,270 +djmax,270 +dew drop,270 +dera mochimazzui,270 +deemo,270 +danball senki,270 +chikotam,270 +bioshock (series),270 +armpit focus,270 +ana (rznuscrf),270 +amo (shibu3),270 +akiyama rinko,270 +airborne,270 +yaso shigeru,269 +voyager (fate),269 +toriyama akira (style),269 +takaramonozu,269 +taa (acid),269 +super sass (girls' frontline),269 +stalking,269 +silver footwear,269 +shizuna kaede,269 +seras victoria,269 +selection university (emblem),269 +sawaizumi chiyu,269 +sasamori karin,269 +saihate (vocaloid),269 +rhapsody,269 +rakudai kishi no cavalry,269 +pointy hat,269 +pink fire,269 +ogre,269 +nuka (nvkka),269 +nt-d,269 +nori (seaweed),269 +nanakusa hazuki,269 +mochi.f,269 +mascot costume,269 +maihama ayumu,269 +lao jiu,269 +kuhotaka,269 +kudou shin'ichi,269 +kore ga watashi no goshujin-sama,269 +kimizuka aoi,269 +kibushi,269 +kei (bekei),269 +kagayaki homare,269 +human chair,269 +helma lennartz,269 +heirou,269 +haruka (blue archive),269 +gyozanuko,269 +green sash,269 +gatchan,269 +funny glasses,269 +fuji fujino,269 +food theft,269 +eva 00,269 +downtown no gaki no tsukai ya arahende!!,269 +cygnus (cygnus7),269 +budew,269 +bud,269 +broken wall,269 +breast awe,269 +begging,269 +asada shino,269 +artoria pendragon (swimsuit archer) (first ascension) (fate),269 +arisaka,269 +arimura yuu,269 +aoshidan school uniform,269 +anemone (flower),269 +alraune,269 +akizone,269 +aizen kunitoshi,269 +? block,269 +zun,268 +yoshimo,268 +urotan,268 +urashima kotetsu,268 +uchuu patrol luluco,268 +tmp (girls' frontline),268 +tile ceiling,268 +thin (suzuneya),268 +the great ace attorney,268 +string phone,268 +spike ball,268 +sitting on animal,268 +shining hearts,268 +shamu meruruusa,268 +shadow hearts,268 +screencap inset,268 +saya (mychristian2),268 +ruhika,268 +romi (346 ura),268 +pokimari,268 +otter spirit (touhou),268 +oono kanako,268 +nightmare77zx,268 +neku (neku draw),268 +nefertari vivi,268 +menou kaname,268 +lecia (granblue fantasy),268 +kokonose haruka,268 +kakyuusei,268 +julia (idolmaster),268 +ice cream cup,268 +ibaraki douji (swimsuit lancer) (fate),268 +honkai: star rail,268 +head on ass,268 +hazuki (sutasuta),268 +growlanser,268 +gojou wakana,268 +fuzukikai,268 +fei rune,268 +extended downblouse,268 +creek (moon-sky),268 +coroha,268 +cooking pot,268 +cocozasa,268 +chiroshiron,268 +chimunge,268 +bukimi isan,268 +billiard ball,268 +anyannko,268 +zenless zone zero,267 +yoshinon,267 +yadomi jinta,267 +urutsu sahari,267 +tsukinami kousuke,267 +tokyo mirage sessions fe,267 +tamura hiyori,267 +tama (soon32281),267 +suzushiro (suzushiro333),267 +standing on person,267 +soulcalibur vi,267 +soukou kihei votoms,267 +shiranui mai (cosplay),267 +shirako miso,267 +shatter,267 +samsung sam,267 +ripping,267 +omniscient reader's viewpoint,267 +nyotaimori,267 +mukiki,267 +moruchi (rinaka moruchi),267 +miyu edelfelt (beast style),267 +mazeran,267 +matataku,267 +man (man-room),267 +mamo williams,267 +lynn minmay,267 +life fiber,267 +lantern festival,267 +konomi (kino konomi),267 +kikoka (mizuumi),267 +iguro obanai,267 +hylian shield,267 +holding another's tail,267 +hatsutori hajime,267 +h&k mp7,267 +giovanna (guilty gear),267 +gaou (babel),267 +fish.boy,267 +dakusuta,267 +cure fortune,267 +bsapricot,267 +benizuwai,267 +bell pepper,267 +barista,267 +alisa reinford,267 +akira shiun,267 +;>,267 +wangphing,266 +vertical-striped socks,266 +vanilla (miotanntann),266 +usagi nagomu,266 +twinpoo,266 +togashi yuuta,266 +tarou tachi,266 +tape on nipples,266 +static,266 +sovetskaya rossiya (azur lane),266 +skinny dipping,266 +shironeko yuuki,266 +senkou no ronde,266 +sanbasou,266 +reinhardt (overwatch),266 +pesci,266 +nyatokanyaru,266 +nu (plastic eraser),266 +namidame,266 +multicolored panties,266 +moth wings,266 +mejiro bright (umamusume),266 +leviathan (mega man),266 +lace-trimmed headwear,266 +kuro293939 (rasberry),266 +komakusa sannyo,266 +kokoro (darling in the franxx),266 +kikurage (plastic people),266 +kamiki mirai,266 +kamen rider ooo,266 +kamen rider decade,266 +ichizen (o tori),266 +hyouju issei,266 +higekiri (touken ranbu),266 +hana mori,266 +goutokuji miyako,266 +goat tail,266 +floating sword,266 +fi (zelda),266 +dragonaut,266 +chiyingzai,266 +bulge to ass,266 +aztec,266 +asriel dreemurr,266 +amanda o'neill,266 +aisaki emiru,266 +zhong lanzhu,265 +yuuyuu (yuuki1771),265 +world war i,265 +tyranitar,265 +tina armstrong,265 +thief (final fantasy),265 +the transformers (idw),265 +template,265 +tamamo no mae (third ascension) (fate),265 +takenoko no you,265 +takayama maria,265 +surtr (liberte echec) (arknights),265 +sune (mugendai),265 +song of broken pines (genshin impact),265 +shiritsu justice gakuen,265 +shez (fire emblem) (female),265 +shenbei xiaoqiu,265 +sen1986,265 +scryed,265 +saliva drip,265 +sakura kyouko (cosplay),265 +romeo to cinderella (vocaloid),265 +raimon natsumi,265 +prushka,265 +plume (arknights),265 +pisuke,265 +mp40,265 +mineta minoru,265 +maylene (pokemon),265 +mawaru (mawaru),265 +matsubara kaoru,265 +lutherniel,265 +lilith aensland (cosplay),265 +kyousaru,265 +kumashiro maya,265 +kitsune spirit (doitsuken),265 +kirisato itsuki,265 +kiraki,265 +kat (bu-kunn),265 +kakyuusei 2,265 +kaede (mmkeyy),265 +hungry nun (diva),265 +hiiragi fuyuki,265 +hige habahiro,265 +gaketsu,265 +fukuro daizi,265 +flower over mouth,265 +fist in hand,265 +fighter (dungeon and fighter),265 +fangdan runiu,265 +fami (yellow skies),265 +eredhen,265 +cuffs-to-collar,265 +cross-laced top,265 +corn dog,265 +chuck (psg),265 +chikuwa (tks),265 +bronya zaychik (herrscher of reason),265 +axel (kingdom hearts),265 +aunt and nephew,265 +asuku (69-1-31),265 +asterios (fate),265 +artillery,265 +alcremie (strawberry sweet),265 +akino komichi,265 +akagi-chan (azur lane),265 +...!,265 +zenobia (fate),264 +yuu (kfc),264 +yano erika,264 +yamamoto kazue,264 +white curtains,264 +wax,264 +visible ears,264 +ushiki yoshitaka,264 +tina fate,264 +tenneko yuuri,264 +tamagoroo,264 +sword maiden,264 +swampert,264 +starpiece memories (idolmaster),264 +spider apple,264 +souma (ordures),264 +shimizu akina,264 +sharpedo,264 +sengoku otome,264 +sempon (doppio note),264 +satellite,264 +qin shi ming yue,264 +projektmelody,264 +pinstripe dress,264 +pensuke,264 +partially annotated,264 +natsume yuujinchou,264 +mosquito coil,264 +mina cream,264 +mile (off8mile),264 +mary stuart,264 +ludger will kresnik,264 +leaf bikini,264 +ladic,264 +kuon (utawarerumono),264 +kitasaya ai,264 +kinuhata saiai,264 +kinon bachika,264 +kiguchiko,264 +kansaiben,264 +k' (kof),264 +invisible air (fate),264 +ichii tooru,264 +hominamia,264 +holding scarf,264 +hatsune miku (vocaloid4),264 +hangaku,264 +gomashi (goma),264 +ganto,264 +fukuda noriko,264 +flanvia,264 +fangxiang cuoluan,264 +cyril brooklyn,264 +cure mermaid,264 +cubone,264 +coyucom,264 +cherry tomato,264 +bra slip,264 +asymmetrical armor,264 +arjuna alter (fate),264 +aqua flower,264 +aki poi,264 +abe suke,264 +z.o.b,263 +yuuki kira,263 +yukishiro arte,263 +yoshiki,263 +yagami makino,263 +urd (aa megami-sama),263 +theresa apocalypse (valkyrie pledge),263 +sunameri oishii,263 +suimya,263 +step and repeat,263 +shiraue yuu,263 +shino (comic penguin club),263 +shigure (azur lane),263 +shiba miyuki,263 +rukitsura,263 +pinkgermy,263 +packge,263 +otome wa boku ni koishiteru,263 +orange one-piece swimsuit,263 +oktoberfest,263 +obidome,263 +nissan,263 +nakabayashi reimei,263 +muteppona hito,263 +mr.romance,263 +minase (takaoka nanase),263 +mifuru,263 +may (gundam build divers re:rise),263 +lam (ramdayo),263 +kuroi susumu,263 +kotoyoshi yumisuke,263 +koorimizu,263 +kirisaki byakko,263 +kionant,263 +husband and husband,263 +hiiragi souren,263 +higashi shino,263 +gao (gaolukchup),263 +esureki,263 +easonx,263 +camouflage shorts,263 +battle angel alita,263 +anna (fire emblem),263 +akagi: yami ni oritatta tensai,263 +agetama,263 +african wild dog print,263 +;t,263 +yuuki chihiro,262 +wally (pokemon),262 +w over eye,262 +vespa,262 +uchuuneko,262 +two-tone bodysuit,262 +tsuezu,262 +totoki86,262 +thor (deep rising),262 +takakura kanba,262 +shirt tan,262 +shiina (angel beats!),262 +senju muramasa,262 +reel (riru),262 +prinz eugen (final lap) (azur lane),262 +pjrmhm coa,262 +papika (flip flappers),262 +orange wings,262 +nago,262 +model,262 +missing tooth,262 +miso soup,262 +mattress,262 +makai senki disgaea 5,262 +magic kaito,262 +logan cure,262 +lcl,262 +kuga tsukasa,262 +joshiraku,262 +inuno rakugaki,262 +ikeda (cpt),262 +human stacking,262 +hugging another's leg,262 +holding pocky,262 +hands on own legs,262 +gin moku,262 +flower basket,262 +figure skating,262 +eraser head (boku no hero academia),262 +door handle,262 +damegane,262 +clothed robot,262 +changye,262 +benitama,262 +ball busting,262 +badminton racket,262 +arash (fate),262 +yurume atsushi,261 +yellow sweater vest,261 +wakabayashi iori,261 +usami (danganronpa),261 +twister,261 +tsukiriran,261 +tomoo (tomo),261 +swim cap removed,261 +suzu (cookie),261 +sunohara youhei,261 +starcraft,261 +spiked boots,261 +sitting on car,261 +sitonai (fate),261 +shimotsuki potofu,261 +sen'yuu yuuji,261 +sakura chiyo (konachi000),261 +purple pupils,261 +purple armor,261 +pollux (fate),261 +omamori,261 +night wizard,261 +nakatama kyou,261 +mountain han,261 +mokku,261 +minamon (vittel221),261 +love hotel,261 +long ribbon,261 +kugui kiyunemu,261 +konkito,261 +kawamura tenmei,261 +kamen rider kabuto (series),261 +jeneral,261 +hizamaru (touken ranbu),261 +hatsushimo kai ni (kancolle),261 +hand in pants,261 +goomrrat,261 +endro!,261 +ddari,261 +dark knight (final fantasy),261 +cum in panties,261 +cottontailtokki,261 +c.r.,261 +accidental pervert,261 +yamaku high school uniform,260 +world of warships,260 +watanuki kaname,260 +tetsubuta,260 +tarachine,260 +takami masahiro,260 +suzuki (girls und panzer),260 +suou tatsuya,260 +snow shelter,260 +skirt grab,260 +shingoku no valhalla gate,260 +seolla schweizer,260 +sch,260 +sazamiso rx,260 +satomi yoshitaka,260 +sakura (medilore),260 +rosette (roze-ko),260 +rockruff,260 +riru,260 +pantyhose around one leg,260 +osumoto1,260 +oshiaki,260 +omoomomo,260 +oda nobunaga (swimsuit berserker) (first ascension) (fate),260 +nun bora,260 +nicky w,260 +misaki high school uniform,260 +mine (peak),260 +mhk (mechamania),260 +komusun,260 +kamishirasawa keine (hakutaku),260 +iruma kamiri,260 +inohara masato,260 +high school dxd new,260 +heather mason,260 +hane (hanegoya),260 +griffin,260 +glastonbury1966,260 +fried rice0614,260 +folding screen,260 +downpants,260 +double anal,260 +djheycha,260 +diives,260 +devil survivor 2,260 +detached hood,260 +dehya (genshin impact),260 +cocoloco,260 +cheesecake,260 +backbeako,260 +av idol,260 +apollo justice: ace attorney,260 +alternate shiny pokemon,260 +ajitarou (setsu),260 +aaru (tenrake chaya),260 +zakki,259 +yuuji overall,259 +yuki miku (2019),259 +xbox 360,259 +watering,259 +tsuaii,259 +toaru kagaku no mental out,259 +tape on pussy,259 +sucking male nipple,259 +spiked armor,259 +spacecraft interior,259 +ship's wheel,259 +shin (dorohedoro),259 +shimhaq,259 +sexual harassment,259 +sakura neko,259 +re mii,259 +raphiel shiraha ainsworth,259 +qi lolita,259 +pokemon: the electric tale of pikachu,259 +overhead line,259 +okayparium,259 +nikkari aoe,259 +nanana (nanana iz),259 +mushi gyouza,259 +murosaki miyo,259 +misnon the great,259 +matryoshka (vocaloid),259 +luse maonang,259 +lala tramont,259 +lace-up,259 +kooribata,259 +kino (kino konomi),259 +kingin,259 +kakuzatou (koruneriusu),259 +k-pop,259 +inoshishi (ikatomo),259 +imi uzi,259 +holding reins,259 +holding notepad,259 +hakumen,259 +hachiouji naoto,259 +green pubic hair,259 +gogatsu fukuin,259 +danganronpa 10th anniversary costume,259 +connie springer,259 +conductor,259 +citemer,259 +churchill (tank),259 +bardock,259 +bandaged foot,259 +baltimore (after-school ace) (azur lane),259 +ashigara kai ni (kancolle),259 +akagi shun,259 +wishiwashi (solo),258 +white mage (fft),258 +turtleneck crop top,258 +tridisart,258 +teenage mutant ninja turtles,258 +tailred,258 +tailmon,258 +t-34,258 +super robot wars the lord of elemental,258 +sombrero,258 +snom,258 +sadahiro (chicken nugget gyuuniku aji),258 +reiq,258 +red butterfly,258 +power fist,258 +pomegranate,258 +paint tube,258 +narusegawa naru,258 +mori toshiaki,258 +matsutani,258 +lit fuse,258 +kurobe natsumi (shiromanta),258 +koseki reina,258 +koonago,258 +konata gazel,258 +katou haruaki,258 +ishida seito,258 +inaba mob (touhou),258 +high-visibility vest,258 +hayakawa tazuna,258 +haryuu (poetto),258 +hand to forehead,258 +fuuma kotarou (fate),258 +extra legs,258 +empoleon,258 +elie macdowell,258 +duskull,258 +date (senpen),258 +dainamitee,258 +black order uniform,258 +arai nobu,258 +amane kurumi,258 +yuzuki karu,257 +yorda,257 +yakuza,257 +yakitomato,257 +x navel,257 +wolfgang amadeus mozart (fate),257 +white rhinoceros (kemono friends),257 +uraharukon,257 +udon entertainment,257 +turisasu,257 +stroking another's chin,257 +shinazugawa sanemi,257 +scholar (final fantasy),257 +sakieko,257 +sakaki maki,257 +sagat,257 +saeki sora,257 +round table,257 +ridley,257 +red panda,257 +predicament bondage,257 +poppy (flower),257 +panties under bike shorts,257 +ox horns,257 +ohlia,257 +noba,257 +noa (blue archive),257 +nagasone kotetsu,257 +mojarin (kihara mojarin),257 +mikazuki sara,257 +matsui yasutsugu,257 +leona west,257 +lagann,257 +kiya machi,257 +kira yamato,257 +kataku musou,257 +jon (st05254),257 +io (pso2),257 +ikuyoan,257 +helena blavatsky (swimsuit archer) (fate),257 +heart-shaped gem,257 +heart-shaped buckle,257 +headgear removed,257 +hazuki ren,257 +escalator,257 +chikuwa savy,257 +canaan (character),257 +atahuta,257 +arm scrunchie,257 +amamiya elena,257 +zootopia,256 +yokoyama chika,256 +yggdrasil (granblue fantasy),256 +warrior of light,256 +venat (ff14),256 +unicorn (long-awaited date) (azur lane),256 +umyonge (lkakeu),256 +translucent hair,256 +tootsuki saryou ryouri gakuen uniform,256 +tinnies,256 +the legend of zelda: a link to the past,256 +teranen,256 +tachibana momoka,256 +tachibana himeko,256 +shiro kuma shake,256 +shiodome oji,256 +shimohara sadako,256 +seitokai no ichizon,256 +samurai (7th dragon series),256 +raven cronwell,256 +rangu,256 +rabi-ribi,256 +persona 1,256 +onsen tamago (nurumayu onsen),256 +no symbol,256 +neck fur,256 +ndtwofives,256 +meteion,256 +mendou kusai,256 +megumi (girls und panzer),256 +march-bunny,256 +lace-trimmed bikini,256 +kyuuri (miyako),256 +kusanagi nene,256 +kurokaji,256 +kei kei,256 +implied paizuri,256 +hula hoop,256 +headshot,256 +hagiwara rin,256 +gelatin,256 +fennec fox,256 +elizabeth bathory (second ascension) (fate),256 +dragoon (final fantasy),256 +dancho (dancyo),256 +croquette,256 +caterpie,256 +atago (school traumerei) (azur lane),256 +asahina hiyori,256 +anzai romi,256 +amano jack,256 +akitokage,256 +adepta sororitas,256 +usagi-san,255 +unime seaflower,255 +toriatto gununu,255 +tanemura koyori,255 +tamaki kotatsu,255 +tadano akira,255 +supermarket,255 +square neckline,255 +spiked anklet,255 +shirayuki towa,255 +senriyama school uniform,255 +rokkotsu,255 +roegadyn,255 +ripu (lipi),255 +prosthetic hand,255 +pixiv fate/grand order contest 2,255 +opened by self,255 +nollety,255 +noa (nagareboshi),255 +nezuko,255 +minami kotori (bird),255 +min-naraken,255 +marshall k,255 +lady avalon (fate),255 +kyou (fr39),255 +kris (deltarune),255 +konan (naruto),255 +kolshica,255 +koizumi amane,255 +kiwame (touken ranbu),255 +kirise mitsuru,255 +kama (swimsuit avenger) (third ascension) (fate),255 +junkrat (overwatch),255 +inokuma youko,255 +ice skating,255 +hitoshura,255 +hibiki ryouga,255 +galgrease,255 +frozen frog,255 +fatkewell,255 +fall guy,255 +eye pop,255 +draco centauros,255 +disco ball,255 +deoxys,255 +curse maker,255 +bulging eyes,255 +ataruman,255 +anchorage water oni,255 +amemiya ruki,255 +after battle,255 +aburidashi zakuro,255 +yoshi mi yoshi,254 +yamada ako,254 +yaia (granblue fantasy),254 +white rabbit (alice in wonderland) (cosplay),254 +whisperain (arknights),254 +whip (kof),254 +vs,254 +uro,254 +tsubaki (tatajd),254 +trapinch,254 +towa herschel,254 +tactics ogre,254 +suzuno fuusuke,254 +sofy,254 +snk heroines: tag team frenzy,254 +shibainu niki,254 +seishou music academy uniform,254 +sashimi,254 +sakura ryuuken,254 +rod (rod4817),254 +rarity,254 +rakeemspoon,254 +quintuplets,254 +poro (league of legends),254 +ooji cha,254 +noss (rariatto),254 +nanjo hikaru,254 +naked robe,254 +naitou ryuu,254 +munashi mujou,254 +mika (1020mk),254 +megao 3rd,254 +matsuda hikari,254 +machiko (beard),254 +long horns,254 +kujiran,254 +kon kanaho,254 +king (vocaloid),254 +king (snk),254 +ichi (ichi.colors),254 +human tower,254 +hubert ozwell,254 +huanxiang huifeng,254 +hopeless masquerade,254 +hakuda tofu,254 +gazer (monster girl encyclopedia),254 +gambeson,254 +fishine,254 +excellen browning,254 +compound bow,254 +coffeekite,254 +catbell,254 +billiards,254 +bewear,254 +be (o-hoho),254 +aloe (quiz magic academy),254 +akisome hatsuka,254 +airplane interior,254 +zooey (summer) (granblue fantasy),253 +white album 2,253 +weapon behind back,253 +waka (wk4444),253 +vibrator in anus,253 +uta (kuroneko),253 +toeto (vocaloid),253 +tenko (gintenko),253 +takaishi takeru,253 +taira tsukune,253 +simao (x x36131422),253 +shalltear bloodfallen,253 +sentimental graffiti,253 +scribble,253 +raspberyl,253 +primordial jade winged-spear (genshin impact),253 +ouroboros (girls' frontline),253 +non (nuebako),253 +namako (takorin),253 +mvp,253 +mckeee,253 +leviathan (umineko),253 +koshiro itsuki,253 +kishibe,253 +japanese postal mark,253 +janne cherry,253 +jaeger (pacific rim),253 +iriam,253 +imaichi,253 +illuso,253 +huyase,253 +hosizora mikoto,253 +holding up,253 +hito komoru,253 +himeno kanon,253 +giggling,253 +geetsu,253 +fusou (azur lane),253 +eko,253 +die (die0118),253 +delivery,253 +dark souls ii,253 +dark elven forest ranger,253 +crosshair,253 +cream on body,253 +cracker,253 +convention greeting,253 +chi-hatan school uniform,253 +chain chronicle,253 +candace (genshin impact),253 +band-width,253 +astel leda,253 +asami asami,253 +anonamos,253 +angel (evangelion),253 +akiyoshi (tama-pete),253 +aardwolf ears,253 +<|> <|>,253 +5others,253 +0930erina,253 +yan-yam,252 +thompson (girls' frontline),252 +tatekawa mako,252 +takeya yuuki,252 +takaya noriko,252 +tadanoshi kabane,252 +summer pockets,252 +sts,252 +solar (happymonk),252 +sogabe toshinori,252 +slouching,252 +shaw (arknights),252 +seragaki aoba,252 +sensen,252 +senjou no valkyria 3,252 +release date,252 +polaroid,252 +nemu mohu,252 +naoko-san,252 +muka tsuku,252 +moyu marginal,252 +miya9,252 +meltryllis (third ascension) (fate),252 +makino (ukiuo),252 +maki (natoriumu),252 +lion dance,252 +komaki ikuno,252 +koma midori,252 +kikurage (crayon arts),252 +kaneru,252 +kanba girls high school uniform,252 +k (gear labo),252 +j.k.,252 +ishimu,252 +illustration.media,252 +iida tenya,252 +hayase mitsuki,252 +hashida itaru,252 +guinea pig,252 +gradient kimono,252 +fake censor,252 +ero kaeru,252 +dirt road,252 +denhijou niki,252 +crystal sword,252 +cracked glass,252 +cinder fall,252 +buruma around one leg,252 +blossom (ppg),252 +bebebe (pepegakii),252 +araiguma,252 +amatsuka uto,252 +alfonse (fire emblem),252 +akaoni (zumt3548),252 +zaki (zaki btw),251 +yamaha,251 +wooden door,251 +toshifumi,251 +tkhs,251 +tesun (g noh),251 +tapioka (oekakitapioka),251 +takao (full throttle charmer) (azur lane),251 +sumeragi aika,251 +suerte,251 +stuffed dolphin,251 +space cat (meme),251 +sigama,251 +satin bra,251 +sashizume soutarou,251 +sara (touhou),251 +rika (touhou),251 +richard (tales),251 +putting on headwear,251 +plaid sleeves,251 +pinkie pie,251 +ouhashi,251 +organization xiii,251 +onjouji toki,251 +omori (omori),251 +ohasi,251 +normaland,251 +nonohara miki,251 +noise (lesion949),251 +nobori ranzu,251 +mutou mato,251 +mother and child,251 +merlusa,251 +marvel vs. capcom 3,251 +lorenz hellman gloucester,251 +lorelei (pokemon),251 +lily (artist),251 +lieza (arc the lad),251 +kurororo rororo,251 +kou (haijindeath),251 +karol capel,251 +kaptivate,251 +kaneaki mukku,251 +kaga (battleship) (azur lane),251 +jigsaw puzzle,251 +ichinomiya (blantte),251 +ichiei,251 +human toilet,251 +hoojiro,251 +hideko (l33l3b),251 +hi-ho-,251 +hanami kotoha,251 +gold experience,251 +glowing flower,251 +evandragon,251 +ember celica (rwby),251 +dragon quest dai no daibouken,251 +darkkanan,251 +dan (orange train),251 +clam shell,251 +chiya (urara meirochou),251 +bradamante (first ascension) (fate),251 +betock,251 +balding,251 +athenawyrm,251 +anzu ame,251 +ame to kimi to,251 +alessandra susu,251 +ainz ooal gown,251 +adjusting footwear,251 +abo (kawatasyunnnosukesabu),251 +2014 fifa world cup,251 +zubat,250 +yoshida kazumi,250 +uumaru,250 +transformers prime,250 +tenka hyakken,250 +tanaka io (craftstudio),250 +taihou (phoenix's spring song) (azur lane),250 +sigm@,250 +satsuki imonet,250 +punching bag,250 +product girl,250 +pheromosa,250 +pepper0,250 +penguins performance project (kemono friends),250 +oku tamamushi,250 +oginome ringo,250 +niyon (granblue fantasy),250 +night battle idiot,250 +nami (nyaa),250 +mizuki seira,250 +misha (hoongju),250 +mimura kaoru,250 +mickeysmith,250 +lulu (ff10),250 +lora (xenoblade),250 +lineage,250 +kuroshinki,250 +kosame daizu,250 +knight (dungeon and fighter),250 +kinomoto sakura (cosplay),250 +kikuta,250 +kazari tayu,250 +kaname nagi,250 +kanachirou,250 +kamen rider kiva (series),250 +hikari (komitiookami),250 +ginko,250 +ga geijutsuka art design class,250 +fletcher mk ii (kancolle),250 +female tourist c (arknights),250 +fate testarossa (impulse form),250 +elbows on knees,250 +chitosezaka suzu,250 +bread slice,250 +blood on wall,250 +bakusou kyoudai let's & go!!,250 +arcana heart 2,250 +altera the santa (fate),250 +yuya (night lily),249 +yuragisou no yuuna-san,249 +yu-gi-oh! sevens,249 +yahako,249 +xingchen,249 +wrestler,249 +wacom,249 +uzaki (jiro),249 +u-47 (azur lane),249 +transformer,249 +toast (gesture),249 +tentaclejob,249 +tangled,249 +suzuya juuzou,249 +superb bird-of-paradise (kemono friends),249 +suka,249 +subaru (yachika),249 +sooon,249 +skirt caught on object,249 +shu yamino,249 +shiina kuro,249 +seki hiromi,249 +sceptile,249 +sakurada jun,249 +roomba,249 +print pants,249 +outer senshi,249 +okada akane,249 +nina (girls und panzer),249 +murkrow,249 +mmm (ji1945),249 +mitchi,249 +minazuki tsuyuha,249 +mina (pokemon),249 +milking handjob,249 +mikazuki augus,249 +maya g,249 +material-l,249 +kyokutou hentai samurai,249 +kasuga (kasuga39),249 +kameponde,249 +kamaboko (ossann0125),249 +jedi,249 +inishie,249 +implied bisexual,249 +hyouuma,249 +hota,249 +hhh (wave),249 +harmonia,249 +hands on another's stomach,249 +gemi,249 +gabriel evangel,249 +fundoshi aside,249 +frustrated,249 +electrocution,249 +double-blade,249 +dart,249 +coin (ornament),249 +chowbie,249 +char (2v 26),249 +cell (dragon ball),249 +bunny vibrator,249 +boruhis,249 +artorias the abysswalker,249 +amou kanade,249 +akg,249 +action pizazz,249 +a9b (louis814),249 +wild arms 5,248 +wave print,248 +watashi (jintai),248 +u (the unko),248 +tarmo,248 +supon,248 +summoning,248 +stack,248 +snow white and the seven dwarfs,248 +shining resonance,248 +sakeharasu,248 +rihito (usazukin),248 +rhinoceros beetle,248 +pyon-kichi,248 +phichit chulanont,248 +peril,248 +ovaries,248 +oshimizu nako,248 +ooide chousuke,248 +nyaru (nyaru 4126),248 +musashi (azur lane),248 +morogami ryou,248 +mochizuki chiyome (fate),248 +meion,248 +mareep,248 +manga panel redraw,248 +makishima saori,248 +lysandre (pokemon),248 +lloyd irving,248 +lewdamone,248 +lapis lazuli (houseki no kuni),248 +kuzumosu,248 +koyanskaya (chinese lostbelt outfit) (fate),248 +kanapy,248 +indosou,248 +ia (ias1010),248 +holding shower head,248 +hanazawa suou,248 +gensou suikoden i,248 +flametail (arknights),248 +dr. stone,248 +comic exe,248 +comic anthurium,248 +cobra (animal),248 +chansey,248 +chain chomp,248 +carlo montie,248 +camieux,248 +assault visor,248 +arms on knees,248 +anne bonny (fate),248 +adjusting another's clothes,248 +yozakura (senran kagura),247 +watermelon print,247 +uzui tengen,247 +utensil,247 +tohsaka aoi,247 +syatey,247 +steelix,247 +sami (object dump),247 +pomu,247 +pikomarie,247 +ox ears,247 +ombok diving and delivery services,247 +nyatasha nyanners,247 +nokishita,247 +nicutoka,247 +nada haruka,247 +myuu (arisumeria),247 +muu (mumumer),247 +mumu (mumunyan),247 +meganium,247 +kyoto,247 +kotohime (touhou),247 +kisaragi tsurugi,247 +itachou,247 +ikeda kana,247 +hug (yourhug),247 +hishimochi,247 +hirowa nagi,247 +hinata momo,247 +henyaan (oreizm),247 +hattori masaki,247 +haruri,247 +hajime kaname,247 +eliza (skullgirls),247 +ecru,247 +dross,247 +drifblim,247 +delphox,247 +cosplay request,247 +corn cob,247 +comiket 99,247 +arung samudra (cessa),247 +artemis (fate),247 +aoi masami,247 +aoblue,247 +akamatsu ken,247 +admiral suwabe,247 +yunekoko,246 +wolf hood,246 +white (among us),246 +uona telepin,246 +uboa,246 +tsuyokiss,246 +tsukiyo no ribbon,246 +toten (der fuhrer),246 +takahata yuki,246 +stun gun,246 +stuffed fish,246 +sozan,246 +sayla mass,246 +sasakura,246 +renton thurston,246 +q (ed69),246 +pumpkinpan,246 +pudding (skymint 028),246 +palmtop tiger,246 +oniku (shimofuri-ke),246 +nobeta,246 +nat the lich,246 +nanotsuki,246 +nakayama festa (umamusume),246 +muten roushi,246 +melon soda,246 +mayoi neko overrun!,246 +mash kyrielight (senpai killer outfit),246 +lunafreya nox fleuret,246 +lenneth valkyrie,246 +kunkun,246 +kiritzugu,246 +kaname tatsuya,246 +kagune (tokyo ghoul),246 +kagiana,246 +itaco,246 +ishtar (swimsuit rider) (fate),246 +iris (mega man),246 +iori shirou,246 +horned hood,246 +homestuck,246 +himawari-san,246 +higa yukari,246 +hiding behind another,246 +hidebou,246 +hagure kedama,246 +gunner 2 (sekaiju),246 +grey hakama,246 +green pupils,246 +excessive nosebleed,246 +enderman,246 +crono (chrono trigger),246 +chise (blue archive),246 +canking,246 +bouncing testicles,246 +bond (spy x family),246 +backbeard,246 +ashwatthama (fate),246 +antispiral nia,246 +aizawa sakuya,246 +afukuro,246 +zhu bajie,245 +yukichi (sukiyaki39),245 +vocaloid boxart pose,245 +ushiwakamaru (swimsuit assassin) (first ascension) (fate),245 +tsukumogami,245 +thanatos (hades),245 +tezuka rin,245 +tatsuya (guild plus),245 +taimanin rpgx,245 +sora no woto,245 +siren (azur lane),245 +shouna mitsuishi,245 +shanyao jiang tororo,245 +shadow (persona),245 +sarukaiwolf,245 +same anko,245 +purinpurin,245 +pudgeruffian,245 +precum string,245 +pikl (elira pendora),245 +ouroboros,245 +ors anime renders,245 +olimar,245 +oka asahi,245 +neet,245 +nadeara bukichi,245 +multicolored polka dots,245 +molcar,245 +minashirazu,245 +luna platz (mega man),245 +lr hijikata,245 +li (lithium0522),245 +kyuutou (kyuutouryuu),245 +kusagami style,245 +kurotsuchi nemu,245 +kurochiroko,245 +kukka,245 +kouzuki kei,245 +koshigaya natsumi,245 +kokkoro (summer) (princess connect!),245 +koe no katachi,245 +khakis,245 +kayako (tdxxxk),245 +kawashiro mitori,245 +kanju,245 +isuka,245 +irisu fuyumi,245 +ilfa (to heart),245 +human (warcraft),245 +houchi shoujo,245 +horned hat,245 +holding pickaxe,245 +holding own wrist,245 +hinata sora,245 +hinami047,245 +himukai kyousuke,245 +hachijou (kancolle),245 +green dew,245 +gotou toushirou,245 +google,245 +gigantic penis,245 +genie,245 +french cruller,245 +flowey (undertale),245 +fanning,245 +fake video,245 +facing down,245 +eye beam,245 +eremes guile,245 +eno yukimi,245 +drawn ears,245 +dragon ball heroes,245 +doukyuusei 2,245 +coughing,245 +clothed after sex,245 +chocokin,245 +celebration,245 +blood on ground,245 +beyblade,245 +asymmetrical pants,245 +amy (suisei no gargantia),245 +amaa (chou dennou jidai),245 +alisaie leveilleur,245 +akitetsu,245 +yamato aki,244 +wakamesan,244 +vyragami,244 +uno makoto,244 +turtleneck jacket,244 +tcb,244 +tai (nazutai),244 +sugimori ken (style),244 +sitting on food,244 +shower curtain,244 +rose bush,244 +reien girl's academy school uniform,244 +pilot helmet,244 +piku,244 +penetration through clothes,244 +o-ring legwear,244 +nanashi noiji,244 +nana kagura,244 +mutsumi masato,244 +musharna,244 +mizuki (mizuki ame),244 +minamoto shizuka,244 +max melon,244 +marisasu (marisa0904),244 +lucie,244 +kuma (crimsonvanilla),244 +kono bijutsubu niwa mondai ga aru!,244 +kodama fumika,244 +kitayuki kajika,244 +kingdom hearts i,244 +kikugetsu,244 +kawamura reo,244 +kamui (gintama),244 +jinbaori,244 +in (ain),244 +ikebukuro akiha,244 +ice cream spoon,244 +i-pan,244 +holding comb,244 +hildr (fate),244 +hello kitty (character),244 +ezio auditore da firenze,244 +cum on figure,244 +bat tattoo,244 +azumi inori,244 +arulumaya,244 +articuno,244 +arm wrestling,244 +ansel (arknights),244 +alakazam,244 +akahito,244 +ah-lyong lee,244 +advent cirno,244 +add (elsword),244 +zapdos,243 +yonezawa natsumi,243 +yellow gemstone,243 +vice (kuronekohadokoheiku),243 +ttgl eyecatch,243 +trim brush,243 +tensugi takashi,243 +tanuki (ame to kimi to),243 +tana (fire emblem),243 +suisai (suisao),243 +sui (camellia),243 +store clerk,243 +shouu-kun,243 +scathacha (granblue fantasy),243 +saratoga (warship girls r),243 +raboot,243 +quiff,243 +pretzel,243 +peacock,243 +oounabara to wadanohara,243 +onikokko,243 +ogihara mach,243 +natu,243 +nanohana (november.),243 +mura karuki,243 +mug writing,243 +magical mirai miku (2016),243 +luigi di savoia duca degli abruzzi (kancolle),243 +long island (azur lane),243 +kusakabe (kusakabeworks),243 +kokuzou,243 +keizoku (emblem),243 +kanan,243 +kaitou kid,243 +juuni kokuki,243 +jinki,243 +izanagi (persona 4),243 +ishigaki takashi,243 +hover bike,243 +holding ring,243 +hitou nami,243 +head under skirt,243 +giant brush,243 +fixro2n,243 +five nights at freddy's,243 +fc (efushii),243 +douya (233),243 +double exposure,243 +dengeki hime,243 +darth vader,243 +cloud tattoo,243 +chypre (heartcatch precure!),243 +chiron (fate),243 +chikaretsu,243 +camera flash,243 +burbur,243 +breathing,243 +bradamante (third ascension) (fate),243 +black delmo,243 +binder,243 +bakumatsu rouman,243 +augustine sycamore,243 +arsalan (housamo),243 +applejack,243 +apple pie,243 +animated png,243 +alisha diphda,243 +akusema,243 +aipom,243 +weibo id,242 +wangxiii,242 +vileplume,242 +upset,242 +teshima nari,242 +sweater jacket,242 +studio microphone,242 +southern tamandua (kemono friends),242 +sou (tuhut),242 +snow white (sinoalice),242 +shisoneri,242 +satan (umineko),242 +sakura no tomoru hi e,242 +riverbank,242 +ribucci,242 +remembering,242 +qualidea code,242 +potpourri (heartcatch precure!),242 +poipoi purin,242 +pixiv red,242 +patting,242 +nitako,242 +nightmare,242 +mujina,242 +mizuno youko,242 +minion 2 (zannen onna-kanbu black general-san),242 +masamune-kun no revenge,242 +linne,242 +kuren,242 +kino aki,242 +katsushika hokusai (traveling outfit) (fate),242 +katsuragi keima,242 +kaleido star,242 +izuna (shinrabanshou),242 +izumi koushirou,242 +iwatobi hiro,242 +ironmouse,242 +inuburo,242 +ibarazaki emi,242 +houkago no pleiades,242 +homulilly,242 +hitsuki rei,242 +hinata nonoka,242 +green tongue,242 +golisopod,242 +goldfish scooping,242 +fujiyoshi harumi,242 +fire emblem gaiden,242 +fellatio under mask,242 +executive mishiro,242 +dolphin wave,242 +circular saw,242 +boot removed,242 +bellezza felutia,242 +baocaizi,242 +azumarill,242 +avatar (pso2),242 +angelina (distinguished visitor) (arknights),242 +agumon,242 +13 (spice!!),242 +z-ton,241 +yellow moon,241 +yamaguchi yuu,241 +tying footwear,241 +traffic barrier,241 +tosen jordan (umamusume),241 +toddler,241 +tobisawa,241 +tari tari,241 +tackle,241 +suzumiya haruhi-chan no yuuutsu,241 +st. feles gakuen uniform,241 +springfield (stirring mermaid) (girls' frontline),241 +slipper bathtub,241 +single arm warmer,241 +shosei,241 +ribombee,241 +reverse grip handjob,241 +renamon,241 +purple buruma,241 +primula,241 +pomodorosa,241 +orion (bear) (fate),241 +nipi27,241 +miyako (rgrayt),241 +miyabi akino,241 +lindy harlaown,241 +lighting cigarette,241 +kuzunoha raidou,241 +konoe ototsugu,241 +katou marika,241 +kamen rider ex-aid (series),241 +intravaginal futanari,241 +ikura nagisa,241 +ice climber,241 +huan li,241 +holding newspaper,241 +head biting,241 +hakkai,241 +geass,241 +gambler club,241 +donkey kong country,241 +dog walking,241 +cure rouge,241 +coral reef,241 +captain yue,241 +canada (hetalia),241 +belgium (hetalia),241 +beatrice (princess principal),241 +ayla (chrono trigger),241 +aomine daiki,241 +aoi nagisa,241 +akimoto dai,241 +yuzuriha (active homing),240 +yuujo,240 +yukizuki chikuba,240 +yukijirushi,240 +yellow outline,240 +yanagi yuu,240 +yagoo,240 +world of tanks,240 +virginia knights,240 +unapoppo,240 +tsuru (clainman),240 +the loud house,240 +tao (kadoya),240 +tamahana,240 +sumiya nadateru,240 +starly,240 +soukou makura,240 +slipping,240 +sleep bubble,240 +shitty t-shirt naval base,240 +shinomiya karen,240 +serafall leviathan,240 +seeker,240 +sasha (haguhagu),240 +sai-go,240 +roaring,240 +professor (ragnarok online),240 +penguin 3-gou,240 +paperclip,240 +oppai mochi,240 +olympics,240 +miyakure,240 +meerkat (kemono friends),240 +mayo chiki!,240 +marle (chrono trigger),240 +malon,240 +lena (zoal),240 +legend of mana,240 +landing,240 +kobaji,240 +kizuato,240 +kisaragi gunma,240 +kenron toqueen,240 +kel (omori),240 +indomitable (ms. motivationless maid) (azur lane),240 +icon (computing),240 +hotpot,240 +holding quill,240 +holding chainsaw,240 +hakusai ponzu,240 +gtunver,240 +green sarong,240 +game model,240 +fate/unlimited blade works,240 +eyeball hair ornament,240 +eiserne jungfrau,240 +dissidia 012 final fantasy,240 +crawling dreams,240 +cocq taichou,240 +chii (tsumami tsumamare),240 +cafe (cafe-chan to break time),240 +blueprint,240 +audrey burne,240 +amagase touma,240 +abandoned,240 +zephid,239 +yonaga (masa07240),239 +wireless sex toy controller,239 +virginia maxwell,239 +viking,239 +turewindwalker,239 +tomb raider,239 +tatsumi koutarou,239 +tail biting,239 +supershiruco,239 +stargazing,239 +southern ocean oni,239 +shimizudani ryuuka,239 +shiina mashiro,239 +satou toshiyuki,239 +rizu-kyun,239 +ribbon-trimmed thighhighs,239 +remilia scarlet (cosplay),239 +rake,239 +quiet (metal gear),239 +pool table,239 +pony play,239 +ogino (oginogino),239 +numbers' uniform,239 +night raven college uniform,239 +nicetack,239 +nemoto yuuma,239 +nakiri alice,239 +mutsu kai ni (kancolle),239 +mizunoe kotaru,239 +mitty (made in abyss),239 +mew ichigo,239 +mebae,239 +manga time kirara,239 +kurotama,239 +kumiho,239 +isamu-ki (yuuki),239 +ippers,239 +illyasviel von einzbern (swimsuit archer),239 +ikarin,239 +hoxutixu,239 +hisako (angel beats!),239 +hirasawa seiji,239 +hiramedousa,239 +heavy object,239 +hands on another's shoulder,239 +grey sclera,239 +gascogne (azur lane),239 +fujiko (emnprmn),239 +flying saucer,239 +face in ass,239 +eye socket,239 +evertale,239 +eve (chihuri),239 +emma august,239 +double (skullgirls),239 +darkside,239 +cotton swab,239 +compact (cosmetics),239 +ciel (mega man),239 +churuya,239 +ben-tou,239 +bandaid on shoulder,239 +ass smack,239 +aruman,239 +angelina (summer flowers) (arknights),239 +zellam,238 +wooden lantern,238 +ugume,238 +u-1196,238 +tsukimi dango,238 +tressa colzione,238 +tom clancy's the division,238 +teltelhousi,238 +taxi,238 +tassel choker,238 +takenouchi sora,238 +takei junko,238 +taguchi takahiro,238 +six fanarts challenge,238 +sit-up,238 +sinensian,238 +sherry birkin,238 +ryota-h,238 +rotom (normal),238 +rina atherina,238 +rikudou inuhiko,238 +pumpkin pants,238 +print hairband,238 +poker,238 +okuba,238 +north abyssor,238 +nier reincarnation,238 +nanahamu,238 +mons pubis,238 +minamoto chizuru,238 +meng ziya,238 +mejiro ryan (umamusume),238 +mcgunngu,238 +maeya susumu,238 +maekawa suu,238 +mac star,238 +kusuribe,238 +kumatora,238 +kosokosomaru (higashi taishi),238 +kazepana,238 +kageyama shien,238 +juggling,238 +jaune arc,238 +itou nanami,238 +itamochi,238 +instant ramen,238 +horse boy,238 +hormone koijirou,238 +honchu,238 +hebino rai,238 +haxorus,238 +hair half undone,238 +hagure keg,238 +gothic architecture,238 +golden delmo,238 +fur (clothing),238 +fox hat,238 +flower eyepatch,238 +expulse,238 +dobermann (arknights),238 +cum on armpits,238 +bokusatsu tenshi dokuro-chan,238 +blindfold lift,238 +bakuzan,238 +ao+beni,238 +anteater ears,238 +animeflux,238 +ze (phrase),237 +yuzuriha (under night in-birth),237 +velzhe,237 +tsukino wagamo,237 +tipii,237 +taka-michi,237 +suicide squad,237 +star pillow,237 +sora no amagumo,237 +sid story,237 +shukusuri,237 +shipwreck,237 +sekomumasada sensei,237 +seele vollerei (stygian nymph),237 +sasaoka gungu,237 +rose (pokemon),237 +richter belmont,237 +ribbon (kirby),237 +rezi,237 +retorillo,237 +relationship graph,237 +rei (tonbo0430),237 +paintrfiend,237 +outbreak company,237 +nyarla (osiimi),237 +nikaidou chizuru,237 +nattou,237 +natsu (blue archive),237 +namahamu (hmhm 81),237 +murakami tomoe,237 +mont blanc (food),237 +momoirone,237 +mole on collarbone,237 +mo ne,237 +messy sleeper,237 +meditation,237 +light switch,237 +kurata sayuri,237 +kozakura (dictionary),237 +kirisawa saki,237 +kirin kakeru,237 +keita naruzawa,237 +katsura kotarou,237 +ichihisa,237 +hot chocolate,237 +hisoka morow,237 +grey bag,237 +gachimuchi,237 +frog costume,237 +freija crescent,237 +folait,237 +fatal frame 4,237 +fake beard,237 +echidna (queen's blade),237 +ddangbi,237 +daifukumochi (akaaokiiwo),237 +cel shading,237 +aruma jiki,237 +arcana heart 3,237 +anno88888,237 +a.a (aa772),237 +yuzumiya mono,236 +yukico-tan,236 +yokai (yokai0401),236 +wizard (ragnarok online),236 +washing back,236 +vanilla ice,236 +unown,236 +twitter-san,236 +twin angel,236 +trigun,236 +through window,236 +talkex,236 +sumeragi kaguya,236 +solopipb,236 +sikijou77o,236 +shoejob,236 +shishigami (sunagimo),236 +shiromiya asuka,236 +shinonome asu,236 +rikugou (rikugou-dou),236 +priestess,236 +pola (seaside coincidence) (azur lane),236 +pincers,236 +pallas (arknights),236 +paizuri on lap,236 +oruyanke (shirakami fubuki),236 +nenchi,236 +narumi tsuyu,236 +nano (nanosize),236 +mustard,236 +munchlax,236 +maine (honzuki no gekokujou),236 +maid cafe,236 +lost universe,236 +linda,236 +lappland (refined horrormare) (arknights),236 +kurumi (kantoku),236 +kurozawa yui,236 +kuroe shizuku,236 +kinjo kuromomo,236 +kendo,236 +kamiya midori,236 +kabayaki unagi,236 +joy division,236 +inuyama mana,236 +i-19 (azur lane),236 +huwari (dnwls3010),236 +hubrael,236 +heebee,236 +hecatia lapislazuli (moon),236 +hamaguri (hamaguri1234),236 +hama! (3toshinhmkz),236 +hakata toushirou,236 +hagakure yasuhiro,236 +gurimjang,236 +gunner-l,236 +gulp5959,236 +golf,236 +glowing petals,236 +ekko (league of legends),236 +ear pull,236 +ear chain,236 +diorama,236 +cutie mark,236 +coreytaiyo,236 +constance von nuvelle,236 +cidney aurum,236 +christmas tree hair ornament,236 +chameleon man (three),236 +cat's cradle,236 +canned food,236 +breasts on back,236 +bishoujo senshi sailor moon crystal,236 +avatar icon,236 +asahikawa hiyori,236 +arai-san mansion,236 +apple slice,236 +ammunition box,236 +akizuki shigure,236 +4shi,236 +yuuzuki (re'ef),235 +yuuki (irodo rhythm),235 +yuuichi (tareme paradise),235 +yume no tsue,235 +yamanokami eaka,235 +x arms,235 +wonderful rush,235 +utakata (kochou no yume),235 +touya (the-moon),235 +tibetan fox (kemono friends),235 +tenpesuto,235 +sumo,235 +strike witches: aurora no majo,235 +strawberry blossoms,235 +sketchbook full colors,235 +sekine shiori,235 +sakurano ru,235 +rum raisin (chihiromakita19),235 +rougetsu (eclipse),235 +roserade,235 +rolo lamperouge,235 +rainbow eyes,235 +poverty,235 +piyoko (uruha rushia),235 +pink babydoll,235 +philosopher's stone,235 +owler,235 +on railing,235 +no male underwear,235 +murakami kou (raye),235 +melon-chan,235 +meiko (inuarashi),235 +mata hari (fate),235 +master 2 (housamo),235 +marius von hagen (tears of themis),235 +magaki ryouta,235 +m.o.m.o.,235 +light cruiser princess,235 +koumajou densetsu 2,235 +kazuno leah,235 +karura (utawarerumono),235 +jeremiah gottwald,235 +james buchanan barnes,235 +jacy,235 +inkerton-kun,235 +hori shin,235 +honotai,235 +holy grail (fate),235 +hk416 (starry cocoon) (girls' frontline),235 +hie (hiememiko),235 +henet hene,235 +harumiya,235 +haoro,235 +hair over crotch,235 +gundam aerial,235 +goriate,235 +gambe,235 +flat sign,235 +evan yang,235 +eliskalti,235 +crotchless swimsuit,235 +babywearing,235 +anzu (ensemble stars!),235 +25-ji miku,235 +yuri kuma arashi,234 +window (computing),234 +vs seeker,234 +valtava,234 +unsomnus,234 +tsukimirin,234 +tsubasa chronicle,234 +thanatos (persona),234 +tamagogayu1998,234 +swiftsure (azur lane),234 +shimano natsume,234 +shamonabe,234 +sewing machine,234 +sensha otoko,234 +saihate (d3),234 +sagamiso,234 +renne (eiyuu densetsu),234 +raimon school uniform,234 +raikou,234 +psychedelic,234 +playstation 2,234 +panda girl,234 +palanquin ship,234 +overexposure,234 +oshiruko (tsume),234 +ootsuka shin'ichirou,234 +nonohachi,234 +nakajima konta,234 +mosin-nagant (girls' frontline),234 +mizu asato,234 +matsuda arisa,234 +manaria friends,234 +makai,234 +lace-trimmed collar,234 +kousaku,234 +kirche augusta frederica von anhalt zerbst,234 +khyle.,234 +kerok (joniko1110),234 +katsuwo (cr66g),234 +kaburamaru,234 +itohana,234 +inyucchi,234 +hourou musuko,234 +gon-san,234 +furret,234 +frankenstein's monster (swimsuit saber) (first ascension) (fate),234 +fairy knight gawain (second ascension) (fate),234 +dytm,234 +delia ketchum,234 +brown scrunchie,234 +blue stripes,234 +black santa costume,234 +bird boy,234 +bible (object),234 +beaver tail,234 +azuumori,234 +aurora sya lis kaymin,234 +arcade stick,234 +arai togami,234 +aragami oga,234 +anyan (jooho),234 +aihara yuzu,234 +yuuki eishi,233 +yasuda suzuhito,233 +winged heart,233 +water world,233 +wanta (futoshi),233 +tokitou muichirou,233 +tocky,233 +throat bulge,233 +tamamo no mae (jk) (fate),233 +takatsuki akira,233 +takamine noa,233 +tajima yoshikazu,233 +taisa (cookie),233 +sky girls,233 +sills,233 +sesshouin kiara (lily),233 +scimitar,233 +peaceful,233 +orphen,233 +oran berry,233 +okita j. souji (third ascension) (fate),233 +nyum,233 +noda (angel beats!),233 +narumiya (narumiya),233 +nanami lucia,233 +miyamasuzaka girls' academy uniform,233 +miyako (princess connect!),233 +miv4t,233 +miroku san-ju,233 +mercedes (odin sphere),233 +manhole cover,233 +machimura komori,233 +lisa (ponyo),233 +light stick,233 +kurumi momoka,233 +kotobuki tsukasa,233 +koohiitei shinbo,233 +komaru (himouto! umaru-chan),233 +kibihimi,233 +karuha,233 +kagawa ichigo,233 +kabeu mariko,233 +iori sei,233 +ice crystal,233 +honda naoki,233 +hagioshi,233 +gumiya,233 +gretel (sinoalice),233 +goodra,233 +giratina (origin),233 +garie tuman,233 +full-length mirror,233 +frioniel,233 +foreskin insertion,233 +female doctor (arknights),233 +elis (touhou),233 +ear twitch,233 +comic penguin club,233 +birth,233 +assisted stretching,233 +aphrodisiac,233 +aleriia v,233 +abe takakazu,233 +yuuki keisuke,232 +yurikawa,232 +yukiusagi (gaballscreen&blaze),232 +yui (sao-alo),232 +valley,232 +ukraine (hetalia),232 +tsunbeji,232 +tennis net,232 +tenma tsukasa,232 +table tennis ball,232 +souma hiroomi,232 +skuld (aa megami-sama),232 +shinkansen henkei robo shinkalion,232 +seungju lee,232 +scarecrow,232 +sawada yuusuke,232 +sasorigatame,232 +sarong lift,232 +rob ishi,232 +ress,232 +re (re 09),232 +pila-pela,232 +payphone,232 +outie navel,232 +okada (hoooojicha),232 +nekomura iroha,232 +nanobana kinako,232 +nagato-chan,232 +miyamori school uniform,232 +manticore (arknights),232 +mad (hazukiken),232 +konka,232 +kneepit sex,232 +kilart,232 +izumi yukiru,232 +hot pants (sbr),232 +hanenbo,232 +hanada hyou,232 +hama chon,232 +hakamichi shizune,232 +gundou mirei,232 +green camisole,232 +grandfather and granddaughter,232 +g36c (girls' frontline),232 +g-tetsu,232 +fumihiro,232 +fuecoco,232 +fagi (kakikaki),232 +emanon123,232 +drainpipe,232 +douji,232 +double bass,232 +decantering,232 +cure milky,232 +country connection,232 +choukou sennin haruka,232 +breast strap,232 +breakfast,232 +bodysuit pull,232 +biting clothes,232 +bandana waddle dee,232 +aposine,232 +antinomy of common flowers,232 +ankle wings,232 +amano hina (tenki no ko),232 +alchemy,232 +year of the monkey,231 +yak (kemono friends),231 +xenoblade chronicles x,231 +weasel,231 +v legs,231 +unsinkable sam,231 +the king of fighters xiii,231 +tarbo (exxxpiation),231 +string pull,231 +stepping,231 +soon (c-u-soon),231 +sin-go,231 +sibyl,231 +shaving,231 +rokko,231 +quilava,231 +plan (planhaplalan),231 +ottosfoxhole,231 +nazo no kanojo x,231 +nagato yuuki,231 +mizunashi kenichi,231 +minami haruka,231 +mikan (5555),231 +matterhorn (arknights),231 +masou gakuen hxh,231 +madogawa,231 +lasto,231 +lala (monster musume),231 +kyuri tizu,231 +kuro oolong,231 +kougi hiroshi,231 +kiyohime (swimsuit lancer) (third ascension) (fate),231 +kenken,231 +kanzaki kureha,231 +kan'u,231 +kamiyama high school uniform (project sekai),231 +judd (splatoon),231 +idolmaster movie,231 +idea,231 +hwansang,231 +holding cape,231 +hirokiku,231 +heroine (dq4),231 +grey rose,231 +gilse,231 +fur capelet,231 +fukuoka katsumi,231 +fishnet armwear,231 +eyepatch lift,231 +e sky rugo,231 +decorations,231 +cure yell,231 +cucouroux (granblue fantasy),231 +crimson comics,231 +clover (flower),231 +carro veloce cv-33,231 +bald eagle (kemono friends),231 +azuki yui,231 +ayase midori,231 +aoba tsugumi,231 +yuki miku (2011),230 +yorukun,230 +waka (shark waka),230 +wailord,230 +wafer,230 +volkor,230 +viewer on leash,230 +urec,230 +traumatized,230 +touhou cannonball,230 +toro (konirio),230 +tonguejob,230 +the terminator,230 +sleeping beauty,230 +silver chariot,230 +shimizu kiyoko,230 +saya pr,230 +rune (pixiv 25170019),230 +plant hair,230 +plaid sailor collar,230 +pink ocean,230 +phantom kingdom,230 +nishikata,230 +neko (yanshoujie),230 +morita kazuaki,230 +mononoke,230 +mikoto akemi,230 +mickey krog,230 +lina davis,230 +lazy lazy (idolmaster),230 +kuzumomo,230 +kumagai haito,230 +kiryuu makoto,230 +kinosaki,230 +kawakaze (azur lane),230 +kaiboukan no. 4 (kancolle),230 +juugonichi (wheeliex2),230 +impossible towel,230 +holy pumpkin,230 +hayyan,230 +gris swimsuit,230 +gomano rio,230 +gesogeso,230 +frilled vest,230 +etopen,230 +emperor penguin,230 +emerada (xenogears),230 +curse (023),230 +cure bloom,230 +cosine,230 +clockwork,230 +chou-10cm-hou-chan (teruzuki's),230 +cerberus (shingeki no bahamut),230 +bubble pipe,230 +billie (meng gong fang),230 +belt removed,230 +bard,230 +banagher links,230 +apollo justice,230 +ace attorney investigations,230 +aano (10bit),230 +zanzi,229 +yamaori (yamaorimon),229 +wolf link,229 +tower of fantasy,229 +threat,229 +the big o,229 +tetrodotoxin,229 +taki minashika,229 +takamura yui,229 +suzumiya haruhi (cosplay),229 +suoiresnu,229 +star wand,229 +square pupils,229 +sora yori mo tooi basho,229 +soba,229 +sirfetch'd,229 +sian,229 +shu-z,229 +shogi piece,229 +scat,229 +sawaguchi mai,229 +sakura taisen v,229 +roberta (black lagoon),229 +risumi (taka-fallcherryblossom),229 +rin2008,229 +rakka (haibane),229 +purple serafuku,229 +puni (miku plus),229 +panties day,229 +pachira,229 +oxygen tank,229 +oroborus,229 +oono tsutomu,229 +ogata tei,229 +norza,229 +nogami aoi,229 +nesoberi,229 +nakata sae,229 +murakami hisashi,229 +multiple vaginal,229 +mu mashu,229 +momotsuki gakuen school uniform,229 +mistletoe,229 +layered shirt,229 +lara croft,229 +kobeya,229 +klaudia valentz,229 +kisamu (ksmz),229 +kim yura (goddess mechanic),229 +kawajiri kosaku,229 +kasuga sunao,229 +kannari,229 +joker (dc),229 +j.moon,229 +hanny (uirusu chan),229 +h&k g36,229 +gundam barbatos,229 +goumudan,229 +ghostblade,229 +focke wulf,229 +firefighter jacket,229 +feiqizi (fkey),229 +eggman (pixiv28975023),229 +crab on head,229 +club hair ornament,229 +chemical structure,229 +cat pillow,229 +cammy white (cosplay),229 +breathing on hands,229 +blue blood,229 +benio (dontsugel),229 +alternate height,229 +54hao,229 +zeronis,228 +yoo tenchi,228 +woodland camouflage,228 +wolf spirit (touhou),228 +vinhnyu,228 +utahane,228 +upotte!!,228 +tsuchii (ramakifrau),228 +tanaka kaori,228 +swkl:d,228 +sugi,228 +splush wave,228 +skai kun,228 +single mechanical leg,228 +shaped lollipop,228 +seiken densetsu 2,228 +samuel oak,228 +salt,228 +s-a-murai,228 +rpg-7,228 +removing bra,228 +paizuri over clothes,228 +newtype flash,228 +neuroi,228 +multi-strapped bikini bottom,228 +moon print,228 +meruru (oreimo),228 +mephist,228 +maru-yu-san,228 +marta lualdi,228 +maritan (pixelmaritan),228 +manjoume jun,228 +magatsuchi shouta,228 +kubocha,228 +kisaragi ryou (sougetsu-tei),228 +kirby: star allies,228 +kinntarou,228 +kakiman,228 +junk gaming maiden,228 +jakko,228 +iwamoto sora,228 +idoly pride,228 +ibuo (ibukht1015),228 +hone (honehone083),228 +hentai key,228 +henreader,228 +heavy,228 +hair over mouth,228 +gundula rall,228 +girl (deemo),228 +gif artifacts,228 +fujikawa daichi,228 +frisbee,228 +french horn,228 +flying whale,228 +fighter (dq3),228 +dodome-iro mayonnaise,228 +disgaea d2,228 +danimaru,228 +crossbone gundam,228 +clumsy,228 +chiyo (rotsurechiriha),228 +chihiro (khorosho),228 +chaos;head,228 +beritabo,228 +alien1452,228 +adeptus astartes,228 +zero (miraichizu),227 +zebra ears,227 +yuno (suke yuno),227 +yuisis (granblue fantasy),227 +your diary,227 +ycco (estrella),227 +xu fu (fate),227 +wilhelmina braunschweig ingenohl friedeburg,227 +whistle frog,227 +villyane,227 +vertical-striped headwear,227 +uemukai dai,227 +trouble spirit!,227 +torso,227 +tokuno yuika,227 +steins;gate 0,227 +spots,227 +soulcalibur v,227 +sora no method,227 +siun,227 +shirono,227 +shinjitsu,227 +shichirin,227 +severed hand,227 +sendou aichi,227 +scirocco (kancolle),227 +sbs,227 +roh nam kyung,227 +ren kougyoku,227 +reitou mikan,227 +queen's blade unlimited,227 +powered buttercup,227 +persona eyes,227 +paint on body,227 +oopartz yang,227 +o-ring collar,227 +nonohara akane,227 +naruse mio,227 +nanaroba hana,227 +nakaichi (ridil),227 +mitsuki meia,227 +mikoshiba mikoto,227 +makinaru,227 +macaron hair ornament,227 +louise halevy,227 +leonmandala,227 +kurenai karasu,227 +kokutou nikke,227 +kinshi no ane,227 +kimura kaere,227 +kemu (guruguru dan),227 +kathryne keyron,227 +jirou tachi,227 +jeanex,227 +izumo neru,227 +iya na kao sare nagara opantsu misete moraitai,227 +inuzuka bouru,227 +horosho,227 +holding whistle,227 +holding watering can,227 +h,227 +gypsy (ragnarok online),227 +giuseppe garibaldi (kancolle),227 +fushigi ebi,227 +fujiyama ichiha,227 +fujieda miyabi,227 +food-themed ornament,227 +firo (tate no yuusha no nariagari),227 +feather fan,227 +emil (nier),227 +emia renya,227 +elysion,227 +elina,227 +doupo cangqiong,227 +double dealing character,227 +digimon xros wars,227 +charmeleon,227 +bubbles (ppg),227 +bottle to cheek,227 +book on head,227 +black hole,227 +beize (garbage),227 +ao no roku-gou,227 +alice (sinoalice),227 +air gear,227 +yeklsa,226 +yankee,226 +wraith's kunai,226 +wiping forehead,226 +utu (ldnsft),226 +uo denim,226 +tsuzuya (knt31),226 +tsukishima hajime,226 +tokkihouse,226 +togedemaru,226 +suna (sunaipu),226 +sumapan,226 +sounding,226 +soapland,226 +sitting on box,226 +silver bikini,226 +shiny chariot,226 +shachiku succubus no hanashi,226 +searching,226 +ryara,226 +propaganda,226 +ootsuki momiji,226 +no more heroes,226 +nikaidou (dorohedoro),226 +ni-class destroyer,226 +nekokyun,226 +kureha (ironika),226 +kuon (kwonchanji),226 +kouhai-chan (douki-chan),226 +kodama yuu,226 +kendama,226 +kagamihara sakura,226 +jam (nandade),226 +itsuki kousuke,226 +iris anemone,226 +ichimi renge,226 +holding mallet,226 +higanbana no saku yoru ni,226 +haruken,226 +harry potter,226 +halphelt,226 +hainchu,226 +hai to hickory,226 +girlfriend (houkago play),226 +fuyuno haruaki,226 +futami yayoi,226 +enkidu (weapon) (fate),226 +elizabeth f. beurling,226 +element bending,226 +egami,226 +dr graevling,226 +dola (nijisanji),226 +dino (dinoartforame),226 +dan evan,226 +daida,226 +crown of thorns,226 +chixiao,226 +caress,226 +bible black,226 +beniko (ymdbnk),226 +belly-to-belly,226 +alexander dinh,226 +yuura,225 +yuta agc,225 +wing print,225 +white stripes,225 +white lion (kemono friends),225 +wancozow,225 +voodoo doll,225 +volcarona,225 +uzumaki kushina,225 +tsurusaki yuu,225 +tottoto tomekichi,225 +symphony regalia,225 +swimsuit theft,225 +super famicom,225 +spicy bardo,225 +soren (fire emblem),225 +single garter,225 +shin getter robo,225 +satsuki rin,225 +ruchi,225 +ribbon-trimmed shirt,225 +relm arrowny,225 +qblade,225 +paper hat,225 +panty tug,225 +otonashi saya,225 +omar dogan,225 +ol-chan (oouso),225 +nyan (reinyan 007),225 +neps-l,225 +native american headdress,225 +nakaseko kaori,225 +moe2019,225 +mixed-sex combat,225 +mitsukoshi (department store),225 +mine (weapon),225 +minato fumi,225 +metal belt,225 +mash kyrielight (cosplay),225 +mamimi (mamamimi),225 +lilim (monster girl encyclopedia),225 +licking weapon,225 +korrina (pokemon),225 +kirusu,225 +kidnapping,225 +kazo,225 +kavka,225 +katou (osoraku),225 +k pring,225 +judy hopps,225 +johnny (from scratch),225 +jittsu,225 +holoforce,225 +holding pot,225 +hino minato (spec.c),225 +heart hair,225 +head on arm,225 +goliath doll,225 +goldeen,225 +gobanme no mayoi neko,225 +garimpeiro,225 +gaiters,225 +gag around neck,225 +fleet,225 +fairy tone,225 +escha malier,225 +elaine (pokemon),225 +easy (aqk7bdqt),225 +dry humping,225 +drawn wings,225 +doseisan,225 +digimon frontier,225 +dfer,225 +cathedral,225 +byourou,225 +bell-bottoms,225 +battoujutsu stance,225 +barrett m82,225 +attsun (atsushi jb),225 +asutora-chan,225 +ashiwara yuu,225 +artemis (sailor moon),225 +arcee,225 +yuuhi alpha,224 +yukikaze kai ni (kancolle),224 +yamcha pose (meme),224 +wrestle angels survivor 2,224 +void princess (elsword),224 +view between legs,224 +up sleeve,224 +united states medal of honor,224 +tifa lockhart (cosplay),224 +thumbprint cookie,224 +taro (taro),224 +tantan men (dragon),224 +strawberry parfait,224 +starman (mario),224 +soredemo machi wa mawatteiru,224 +sleepover,224 +sitting in window,224 +shindouji school uniform,224 +sakana (ryuusui-tei),224 +relaxed,224 +real drive,224 +ragnarok masters,224 +priscilla the crossbreed,224 +pokegear,224 +phantasmagoria of flower view,224 +p (tidoriashi),224 +nagasawa shin,224 +murasaki iro,224 +misha (ohds101),224 +misaki juri,224 +masaki (ekakiningen),224 +maehara shinobu,224 +la pluma (summer flowers) (arknights),224 +kuroba dam,224 +kuro mushi,224 +kurisu sai,224 +kuria (clear trip second),224 +kuraki suzuna,224 +kukumomo,224 +konboi-eg,224 +komame (st beans),224 +kobashi daku,224 +kitsune (kazenouta),224 +kanojo x kanojo x kanojo,224 +kakone,224 +ishida yamato,224 +hiiragi tomoka,224 +hanazome dotera,224 +grandmother and granddaughter,224 +ghetsis (pokemon),224 +gazelle ears,224 +g-room honten,224 +fuse tail,224 +forte (shingeki no bahamut),224 +falcoon,224 +exif rotation,224 +egyptian loli (surio),224 +death stranding,224 +dangmill,224 +cypress,224 +curse maker 2,224 +cpu (hexivision),224 +cashier,224 +bone print,224 +baby carry,224 +artoria caster (first ascension) (fate),224 +alita,224 +463 jun,224 +yurikuta tsukumi,223 +yukoku roberu,223 +wonder woman,223 +unison (nanoha),223 +unel,223 +transparent raincoat,223 +tonchinkan,223 +takamura wamu,223 +stomach punch,223 +stinger,223 +spiked pauldrons,223 +shirt cut meme,223 +shiro seijo to kuro bokushi,223 +shigekikkusu,223 +shacho (ko no ha),223 +sayika,223 +sakakura juuzou,223 +sakae general school uniform,223 +realmbw,223 +purrloin,223 +poppi qtpi (xenoblade),223 +papi (papiron100),223 +papa no iu koto wo kikinasai!,223 +pages,223 +nonono (nononotea),223 +nearl the radiant knight (arknights),223 +nanael,223 +nagana sayui,223 +multicolored sky,223 +mokeo,223 +mojo,223 +mizutamako,223 +mirai (sugar),223 +milka (milk4ppl),223 +mike156,223 +melopun,223 +melanbread,223 +mapar,223 +maki keigo,223 +kyjsogom,223 +kurogane gin,223 +kujo jotaro's pose (jojo),223 +kotatsu kaya,223 +konoe a. mercury,223 +kisaragi miyu,223 +kiri (2htkz),223 +kinokomushi,223 +kereno,223 +kamehameha,223 +jumpluff,223 +jet set radio,223 +hornet (azur lane),223 +hiki niito,223 +hasegawa chisame,223 +gagraphic,223 +fukiyose seiri,223 +fish skeleton,223 +feather duster,223 +fajyobore,223 +derauea,223 +corticarte apa lagranges,223 +cecilia lynne adelhyde,223 +boke-chan,223 +bodskih,223 +blue reflection,223 +batacchi (mashimashi butter),223 +bakutendou,223 +bad neck,223 +ayya sap,223 +ars goetia,223 +arito arayuru,223 +apologizing,223 +anila (summer) (granblue fantasy),223 +amelia wil tesla seyruun,223 +ame (uten cancel),223 +7-eleven,223 +3 small spiders,223 +zukky,222 +yuzuki kei,222 +yuuka (gym uniform) (blue archive),222 +yuugo (atmosphere),222 +yuccoshi,222 +yasaka minato,222 +xxzero,222 +wet spot,222 +vanillite,222 +uchiu kazuma,222 +u2 (5798239),222 +tsujimoto natsumi,222 +tsugumi seishirou,222 +trombone,222 +toph bei fong,222 +togame,222 +tiger hood,222 +throtem,222 +sunbathing,222 +stereo,222 +speculum,222 +songjikyo,222 +shuriken hair ornament,222 +shukinuko,222 +shidou mariya,222 +seychelles (hetalia),222 +sanbou,222 +sage (granblue fantasy),222 +rokka no yuusha,222 +rapunzel (disney),222 +qkat (arikawa-dou),222 +putting on gloves,222 +otogibara era,222 +on pillow,222 +nessie (respawn),222 +nagatsukiin,222 +muuba,222 +muffet,222 +mosaic background,222 +moriyama shiemi,222 +mizuno kurage,222 +miraculous ladybug,222 +megurine luka (toeto),222 +marching band,222 +lilka eleniak,222 +kikuchi tsutomu,222 +kannagi kaname,222 +implied handjob,222 +holding earphones,222 +hitomi sensei no hokenshitsu,222 +hisanuma sayu,222 +hisame genta,222 +hiroki ree,222 +hibiscus (arknights),222 +hayase ruriko (yua),222 +hat around neck,222 +gedou (ge ge gedou),222 +fujisaki yuu,222 +dratini,222 +dengeki bunko,222 +cross-species cosplay,222 +cracked screen,222 +chinchongcha,222 +bushidou (sekaiju),222 +brown feathers,222 +bonfire,222 +betchan,222 +belial (granblue fantasy),222 +battlefield (series),222 +ayase honoka,222 +arcane vi,222 +akinashi yuu,222 +aibumi,222 +zeco,221 +youshuu,221 +yomiyama north junior high school uniform,221 +yn red,221 +yasakani an,221 +wok,221 +vegetto,221 +vardan,221 +uterine prolapse,221 +two-tone shorts,221 +two-tone panties,221 +tsumura tokiko,221 +tsukimi,221 +tsab naval military uniform,221 +tobimura,221 +stone free,221 +snoring,221 +snapchat,221 +skull belt,221 +shirataki kaiseki,221 +seiken no blacksmith,221 +saw cleaver,221 +revenant (apex legends),221 +reitaisai,221 +rash guard,221 +purple nipples,221 +print male underwear,221 +planetarian,221 +omake,221 +monaim,221 +mienshao,221 +midori niku,221 +mashima saki (mashimasa),221 +magatsumagic,221 +magai akashi,221 +leonmitchelli galette des rois,221 +kyogoku-uru,221 +krokobyaka,221 +kojima (blue stardust),221 +kobone,221 +kagura hikari,221 +jampen,221 +irori,221 +hoto mocha,221 +holding goggles,221 +grabbing own thigh,221 +gero zoukin,221 +fuck-me shirt,221 +fu hua (valkyrie accipiter),221 +frye (splatoon),221 +front-hook bra,221 +flugel (kaleido scope-710),221 +ferdinand von aegir,221 +eiden (nu carnival),221 +cum in navel,221 +colored text,221 +chocho (homelessfox),221 +cassette player,221 +carro pino,221 +bucchake (asami),221 +boruto: naruto the movie,221 +bondage mittens,221 +black surge night,221 +black souls,221 +bikini briefs,221 +an2a,221 +aihara mei,221 +@@@,221 +yururi nano,220 +yoko littner (cosplay),220 +yakumo ran (fox),220 +xichii,220 +westxost (68monkey),220 +usagi koushaku,220 +uesugi u. kyouko,220 +torn boots,220 +tio plato,220 +takumi (scya),220 +takamura kazuhiro,220 +strong,220 +spiked ball and chain,220 +severa (fire emblem),220 +scar on back,220 +rokudou mukuro,220 +roarke (lavenderincubus),220 +ricochet-gou,220 +r-binon,220 +pumpkin costume,220 +phamoz,220 +paru rari,220 +paper child,220 +panty mask,220 +neonbeat,220 +naruse chisato,220 +musujime awaki,220 +monsterverse,220 +mofurun (mahou girls precure!),220 +misonou hirokichi,220 +minarai shachou,220 +melt (vocaloid),220 +kusada souta,220 +kso,220 +kotatsu (kotatsu3),220 +knightmare frame,220 +kiya shii,220 +kazana (sakuto),220 +katou keiko,220 +kappa mob (touhou),220 +kamen rider revi,220 +kagawa yuusaku,220 +juna,220 +jenevan,220 +itou nobue,220 +inflatable armbands,220 +indirect kiss,220 +ikari (aor3507),220 +ikaasi,220 +holding gohei,220 +holding crown,220 +hohehohe,220 +hayate immelmann,220 +hastur (nyaruko-san),220 +haruta (806060),220 +hamadaichi,220 +haganemaru kennosuke,220 +gambol shroud,220 +galibo,220 +gakky,220 +fur bikini,220 +eternal return: black survival,220 +energy drain,220 +earasensha,220 +cyberpunk 2077,220 +cum on food,220 +code geass: boukoku no akito,220 +choco (chocolate shop),220 +chireiden,220 +chastity cage,220 +butterfly on head,220 +brodie helmet,220 +blushyspicy,220 +azuki (azuki-taste),220 +asteroid,220 +asagi yuna,220 +arisugawa himari,220 +arino hiroshi,220 +ar (lover boy),220 +aonaga heri,220 +altina orion,220 +akarui kioku soushitsu,220 +zpolice,219 +zone-tan,219 +zone-archive,219 +yuki miku (2021),219 +yuki miku (2013),219 +yu-ri,219 +xephonia,219 +waai fu (arknights),219 +twintelle (arms),219 +translucent skin,219 +therion (octopath traveler),219 +testicle peek,219 +tenjin hidetaka,219 +tales of hearts,219 +suzushina yuriko,219 +sui (suizilla),219 +strike witches 1991,219 +stairwell,219 +st (youx1119),219 +sho (runatic moon),219 +shibusun,219 +schreibe shura,219 +sakusyo,219 +saegusa wakaba,219 +roulette table,219 +rolling bubbles,219 +rein (futagohime),219 +postcard,219 +pinstripe pants,219 +pinakes,219 +penguin 2-gou,219 +osuman toruko,219 +ookiku furikabutte,219 +nozomi (princess connect!),219 +no entry sign,219 +no detached sleeves,219 +nanaya shiki,219 +murai shinobu,219 +mount lady,219 +miyasaka miyabi,219 +mirrored text,219 +mary read (fate),219 +mapo tofu,219 +maku ro,219 +kyouka (halloween) (princess connect!),219 +kuuro kuro,219 +koito otonoshin,219 +kamen rider zi-o (series),219 +ippongui,219 +inoue takuya,219 +howard alt-eisen,219 +hinasaki you,219 +hephaestus (housamo),219 +gondola,219 +fuuna,219 +fukudahda,219 +fukazaki,219 +fire emblem: shadow dragon and the blade of light,219 +eve santaclaus,219 +dema hmw,219 +daiishori,219 +cross ange,219 +caracol,219 +butterfly earrings,219 +bunny panties,219 +border break,219 +awa yume,219 +arrow in body,219 +arakawa tarou,219 +alternate facial hair,219 +aegis (nerocc),219 +admiral (warship girls r),219 +zombification,218 +yuriwhale,218 +yamcha,218 +yamasuta,218 +witch (puyopuyo),218 +vins-mousseux,218 +uut,218 +underbarrel grenade launcher,218 +umihotaru harumare,218 +two-sided cloak,218 +tenkuu nozora,218 +tamatsukuri misumaru,218 +sunrise stance,218 +sports utility vehicle,218 +sophie twilight,218 +shigetake (buroira),218 +sett,218 +scorpion,218 +saratoga mk ii (kancolle),218 +sakuragi hinako,218 +sakura megumi,218 +rita rossweisse (umbral rose),218 +q-bee,218 +poliwag,218 +pokemon on back,218 +pix (league of legends),218 +pin,218 +piercing through clothes,218 +petra ral,218 +pasutel,218 +noto (soranoto),218 +nipple rub,218 +nightcat,218 +newhalf with male,218 +nanakaku,218 +munetani mashiro,218 +muguet,218 +miyagawa takane,218 +minion 1 (zannen onna-kanbu black general-san),218 +mikoto (oi plus),218 +mikimoto haruhiko,218 +mightyena,218 +mahou shoujo lyrical nanoha innocent,218 +magic trick,218 +low braid,218 +lee-enfield,218 +kujo holy,218 +kikkoumon,218 +kayon (touzoku),218 +karbo,218 +kakogawa tarou,218 +junwool,218 +jaku sono,218 +izumi akane,218 +hou (ppo),218 +holding paddle,218 +heero yuy,218 +hanasaki miyabi,218 +fiora (league of legends),218 +fender stratocaster,218 +deluxe<<<,218 +curss,218 +computer tower,218 +chisumi,218 +chintora0201,218 +canal vorfeed,218 +braveman,218 +box (hotpppink),218 +blazblue remix heart,218 +battle bunny riven,218 +arms around back,218 +arm scarf,218 +anabel (pokemon),218 +amahara subaru,218 +akebi-chan no serafuku,218 +akahi242,218 +air jordan,218 +zako (arvinry),217 +yukizome chisa,217 +youkai watch 2,217 +yan pai,217 +wild and horned hermit,217 +white nightgown,217 +watabe koharu,217 +waist cutout,217 +udin (kureiji ollie),217 +tote bag,217 +topia,217 +toku (ke7416613),217 +teruru,217 +terence t. d'arby,217 +swiss flag,217 +swanna,217 +stupa13a,217 +sniper,217 +shinano toushirou,217 +shikibe ayaka,217 +setmen,217 +sesena yau,217 +sen no kiseki iv,217 +salmon run (splatoon),217 +ruku (alicecreation),217 +purple suit,217 +ponytail holder,217 +pandain,217 +otonashi meru,217 +open towel,217 +oiled,217 +ne an ito,217 +nana (ice climber),217 +muu rian,217 +morte (arknights),217 +monoe,217 +miu (miuuu 721),217 +minoa (lastswallow),217 +mikhail buran,217 +midori (misuriru8),217 +mattari illust,217 +luz noceda,217 +las91214,217 +kurusu kimihito,217 +kiyoh bachika,217 +kawashima sapphire,217 +kannagi itsuki,217 +junpaku karen,217 +jaw drop,217 +jade curtiss,217 +indesign,217 +hujikok,217 +honolulu (summer accident?!) (azur lane),217 +holding whisk,217 +helena blavatsky (third ascension) (fate),217 +height conscious,217 +hecatia lapislazuli (cosplay),217 +harusawa,217 +gurageida,217 +gundam exia,217 +gumi (v3 megpoid),217 +gucchiann,217 +glaze lily,217 +gijang,217 +flandre scarlet (cosplay),217 +elu (nijisanji),217 +elhaym van houten,217 +cu chulainn alter (third ascension) (fate),217 +colored pencil,217 +coat of arms,217 +chou-10cm-hou-chan (hatsuzuki's),217 +booota,217 +blanka,217 +bell cranel,217 +ara ara,217 +amano keita,217 +alm (fire emblem),217 +11eyes,217 +zhibuji loom,216 +yasojima nejiro,216 +yakitate!! japan,216 +yagi toshinori,216 +wild arms 4,216 +vittorio veneto (azur lane),216 +under skirt,216 +u jie,216 +tsukuyomi komoe,216 +tsukamoto yakumo,216 +tokimeki memorial 1,216 +tenjou tenge,216 +team rocket grunt,216 +takomeshi,216 +suspenders hanging,216 +super robot wars 30,216 +sunken cheeks,216 +sumaki shungo,216 +strip game,216 +spoken skull,216 +sneer,216 +shigureru,216 +shadows house,216 +sengoku musou 2,216 +sam yang,216 +sakura kotetsu,216 +saito katuo,216 +saikai academy uniform,216 +rum (girls und panzer),216 +romancing saga,216 +rizzl,216 +repair bucket,216 +reoen,216 +red wristband,216 +red goggles,216 +pyramid (geometry),216 +piro (orip),216 +photon ray (fate),216 +photo album,216 +persona 5: dancing star night,216 +paint stains,216 +no choker,216 +nel zelpher,216 +ne-on,216 +nakatokung,216 +moto murabito,216 +moon stick,216 +monster farm,216 +miwa yoshikazu,216 +matokechi,216 +licking hand,216 +kuzuki souichirou,216 +kuroshio kai ni (kancolle),216 +ke-su,216 +johnny funamushi,216 +jakelian,216 +impostor (among us),216 +imageboard colors,216 +hinata natsumi,216 +heart belt,216 +hazuki haru,216 +hands on own chin,216 +green butterfly,216 +grandia lee,216 +gorou (darling in the franxx),216 +godees,216 +gaia (ff14),216 +fumiko (mesushi),216 +fuepo,216 +food wrapper,216 +feet only,216 +doraeshi,216 +doppel (bonnypir),216 +cumdrip onto panties,216 +colonel sanders,216 +christina (princess connect!),216 +choujuu gigaku,216 +canards,216 +boku to koi suru ponkotsu akuma.,216 +barbara (dq6),216 +aoin,216 +akira (been0328),216 +airport,216 +agrius metamorphosis,216 +218,216 +yuuhi homare,215 +wehrmacht,215 +vofan,215 +toyosaki shu,215 +tien (granblue fantasy),215 +tantaka,215 +takitsubo rikou,215 +synthesizer,215 +sword of dios,215 +supergirl,215 +stuffed tiger,215 +stmast,215 +stasis tank,215 +smother,215 +slytherin,215 +slow loop,215 +shina shina,215 +sharumon,215 +saber marionette j,215 +ribbed swimsuit,215 +retoree,215 +rear-view mirror,215 +ramanda,215 +raira academy uniform,215 +r3dfive,215 +qilin (mythology),215 +poin,215 +phoenix wright: ace attorney - dual destinies,215 +patricia martin,215 +parasite eve,215 +orimura ichika,215 +open arms,215 +olive laurentia,215 +nyoibo,215 +neopure,215 +mutton chops,215 +mmm,215 +miyoshi sana,215 +meguru (megurunn),215 +mashiro (blue archive),215 +mabuchoco m,215 +levy mcgarden,215 +lens flare abuse,215 +ld (luna dial398),215 +lady avalon (second ascension) (fate),215 +konno makoto,215 +kizuki aruchu,215 +kekocha,215 +kanbe kotori,215 +kagura tohru,215 +june,215 +inumaki toge,215 +inari one (umamusume),215 +hoshino yumemi,215 +holding necktie,215 +holding baby,215 +hasekura rei,215 +hand on weapon,215 +grim reaper,215 +great pyrenees,215 +giuniu,215 +gipsy danger,215 +gardening,215 +ganbare goemon,215 +emerane,215 +dyed ahoge,215 +disintegration,215 +condom in clothes,215 +boater hat,215 +beatrix (ff9),215 +ayanami kai ni (kancolle),215 +asou shin,215 +arikanrobo,215 +ariake (kancolle),215 +aoi tobira,215 +anzumame,215 +animal in clothes,215 +alchemaniac,215 +akali (legacy),215 +acchii (akina),215 +.sin,215 +zaregoto series,214 +zangyaku-san,214 +wooden pencil,214 +veteran mercenary echidna,214 +ultraman,214 +ultima (fft),214 +tsumugi (princess connect!),214 +toridamono,214 +tilm,214 +teu (navy),214 +tamiya akito,214 +tales of innocence,214 +suzuki arisa,214 +suzaku (zaku6584),214 +stew,214 +stalker (ragnarok online),214 +sonken,214 +sig sauer p226,214 +shigaraki (strobe blue),214 +serizawa mutsuki,214 +ryuinu,214 +rosary,214 +rokuichi,214 +rensyu,214 +pumpkinsinclair,214 +pokemon unite,214 +otomedius,214 +open pajamas,214 +ohitsu,214 +ofuda on pussy,214 +nose art,214 +ninomotonino,214 +nigiri (ngr24),214 +navel focus,214 +narushima kanna,214 +nana (raiupika),214 +namco,214 +nakamura sumikage,214 +nagareboshi,214 +mkiiiiii,214 +mirage farina jenius,214 +mijinko (rioriorio),214 +lemontea,214 +lanxi zhen,214 +laco soregashi,214 +kitarou,214 +jyushimatsu's girlfriend,214 +jakob (fire emblem),214 +jacket tug,214 +hiroshimaben,214 +hijab,214 +hattori shizuka,214 +hakuishi aoi,214 +goroo (eneosu),214 +gengetsu chihiro,214 +futakoi,214 +fukumaaya,214 +fairy fencer f,214 +euryale (third ascension) (fate),214 +eiji (eiji),214 +edward wong hau pepelu tivrusky iv,214 +dowman sayman,214 +dark pit,214 +cure gelato,214 +chrono harlaown,214 +chinchou,214 +cape hold,214 +calamity queller (genshin impact),214 +buta tamako,214 +brown ascot,214 +brick road,214 +brat (brabrabrat00),214 +blaccura,214 +beta (inazuma eleven),214 +barashiya,214 +axew,214 +anz32,214 +angol mois,214 +akane-iro ni somaru saka,214 +yukiguni yuu,213 +waist poke ball,213 +viva!!,213 +upanishi mariko,213 +ukulele,213 +turn a gundam (mobile suit),213 +tsunashima shirou,213 +tokiti,213 +tetsuwan atom,213 +terimayo,213 +taikogane sadamune,213 +sv-98 (girls' frontline),213 +super-shorty (girls' frontline),213 +stiff tail,213 +stella vermillion,213 +sorano aoi,213 +sora (efr),213 +sophocles (pokemon),213 +sidon,213 +shigehiro (hiroi heya),213 +sezok,213 +sbel02,213 +sawatari (sado),213 +rororo,213 +rena lanford,213 +reddizen,213 +pozyomka (arknights),213 +poligon (046),213 +pkp (girls' frontline),213 +pansy,213 +orange lips,213 +nyagakiya,213 +no hairband,213 +moltres,213 +mochirong,213 +loran cehack,213 +lila decyrus,213 +lakshmibai (fate),213 +lainart,213 +kusuriuri (mononoke),213 +kogane (staygold),213 +kita (kitairoha),213 +kindergarten teacher,213 +karei,213 +kamijo haruna,213 +jimmy madomagi,213 +inside creature,213 +hyocorou,213 +hyakkimaru (dororo),213 +hoop skirt,213 +hera (p&d),213 +green male underwear,213 +fujinoki (horonabe-ken),213 +formaggio,213 +enelis,213 +dolphin girl,213 +debidebi debiru,213 +cyrus (pokemon),213 +chariot (black rock shooter),213 +castlevania: rondo of blood,213 +ashiroku (miracle hinacle),213 +1up,213 +zizi (zz22),212 +yukimitsuki,212 +white blindfold,212 +wax seal,212 +waraningyou,212 +wagon,212 +vertical-striped swimsuit,212 +umarutsufuri,212 +tsurugi minko,212 +tsukasa jun,212 +tramp stamp,212 +tomiokasena,212 +thumb biting,212 +team plasma,212 +tangela,212 +slam dunk (series),212 +sheepd,212 +scuba,212 +sakurai yumeko,212 +sabachiyo land,212 +romulus (fate),212 +reimei (r758120518),212 +recycling symbol,212 +rapidash,212 +puracotte,212 +punch-out!!,212 +puge,212 +penis milking,212 +pekopokox,212 +oppai challenge,212 +ootori tatta,212 +ningguang (orchid's evening gown) (genshin impact),212 +narwhal (kemono friends),212 +narumiya yume,212 +nanika (azumi inori),212 +namakuby,212 +mku,212 +miyama amehiko,212 +mind break,212 +mikan box,212 +mexico,212 +male underwear aside,212 +lunatic (tiger & bunny),212 +kyururu (kemono friends),212 +kunio-kun series,212 +kou1,212 +kinhasu,212 +kikuchi seiji,212 +karasuma chitose,212 +kamen rider drive (series),212 +kageyama tobio,212 +infernape,212 +hoshizaki reita,212 +holding ears,212 +holding drinking straw,212 +hijikata-san (m.m),212 +hercule barton,212 +helianthus (girls' frontline),212 +hayakawa pao,212 +haruna (aoki hagane no arpeggio),212 +guy cecil,212 +gundam f91,212 +grasslands,212 +glowing lines,212 +gazebo,212 +foxvulpine,212 +final fantasy iv the after,212 +erwin smith,212 +dress in mouth,212 +do (4-rt),212 +cure ace,212 +cryokinesis,212 +colored condom,212 +coconut (nekopara),212 +clarinet,212 +blizzard,212 +asazuki norito,212 +arthur ko,212 +arakawa under the bridge,212 +ai shite! homun,212 +1041 (toshikazu),212 +zerosu (take out),211 +yaguo,211 +water lily flower,211 +usami taiga,211 +tokiwa (mukoku),211 +tnolize,211 +the shining,211 +tar-21 (girls' frontline),211 +taisa (lovemokunae),211 +tail insertion,211 +spines,211 +source quote parody,211 +silveroid,211 +shunsei (muratou),211 +shikuta maru,211 +psychic hearts,211 +pinecone (arknights),211 +panther chameleon (kemono friends),211 +panda hair ornament,211 +okuto,211 +nidoking,211 +moose (moosemitchell2),211 +modeling,211 +mito ikumi,211 +miku (darling in the franxx),211 +maria traydor,211 +mamiyama,211 +long nipples,211 +lisianthus,211 +lipstick ring,211 +libiadan,211 +lemonade,211 +ledjoker07,211 +krystal,211 +kouki kuu,211 +kiyohime (third ascension) (fate),211 +kaze-hime,211 +kawano takuji,211 +kaisenpurin,211 +kai (akamekogeme),211 +isaki kaname,211 +inamitsu shinji,211 +ichi-go,211 +ibuki (azur lane),211 +i heart...,211 +hyena girl,211 +hong meiling (panda),211 +hole in wall,211 +holding grenade,211 +hirose sumire,211 +higaragi,211 +harukaze unipo,211 +haqua d'rot herminium,211 +green suit,211 +granado espada,211 +feather necklace,211 +fay (fay axl),211 +eyewear in mouth,211 +exposed muscle,211 +endou (zettai bluenoid),211 +elbe (azur lane),211 +dx,211 +dogoo,211 +clock hands,211 +cibo,211 +charles schulz (style),211 +chain belt,211 +cezaria,211 +capera,211 +cain highwind,211 +bulge press,211 +borderlands (series),211 +bomberman,211 +bafarin,211 +atra mixta,211 +asakusa midori,211 +arceonn,211 +araizumi rui,211 +anthuria,211 +american football,211 +203wolves,211 +yuezheng ling,210 +yayaka,210 +wooden ceiling,210 +ventus (kingdom hearts),210 +tsukino (nakajimaseiki),210 +tomato rice,210 +tirotata,210 +suzuran (spring praise) (arknights),210 +soukuu kizuna,210 +silver pubic hair,210 +shiraki meiko,210 +shimakaze (soundz of bell),210 +shijima (sjmr02),210 +sex ed,210 +seto sun,210 +servant (danganronpa),210 +sekiguchi miiru,210 +seele vollerei (swallowtail phantasm),210 +sanoba witch,210 +s gentian,210 +ryou sakazaki,210 +rain mikamura,210 +raichiyo33,210 +predator (movie),210 +oohara kyuutarou,210 +nanin,210 +nakajou,210 +multicolored shorts,210 +multi (to heart),210 +mieruko-chan,210 +melaton,210 +left 4 dead,210 +lee hyeseung,210 +kuo shenlin,210 +konori mii,210 +koiso usu,210 +kisaragi kiriha,210 +kiryu tsukasa (idolmaster),210 +kero sweet,210 +katsudansou,210 +kangetsu (fhalei),210 +kaitou tenshi twin angel,210 +izayoi seishin,210 +ibara riato,210 +i-203 (kancolle),210 +himeji mizuki,210 +gotou moyoko,210 +gigamessy,210 +giant tree,210 +garen (league of legends),210 +galaxia (sword),210 +freezing (series),210 +franz (217franz),210 +flags of all nations,210 +facing back,210 +eiffel tower,210 +closure (arknights),210 +chimecho,210 +cassandra (seishun katsu sando),210 +blazblue variable heart,210 +birthday party,210 +baisi shaonian,210 +azuse neko,210 +armored vehicle,210 +ariga tou,210 +amagasa yun,210 +all the way through,210 +akimaki yuu,210 +akikaze tsumuji,210 +yuujin (yuzinn333),209 +yulong (journey to the west),209 +yoshiheihe,209 +x&x&x,209 +wu zetian (first ascension) (fate),209 +whale print,209 +wartortle,209 +ushigome rimi,209 +ursula (23),209 +tomioka jirou,209 +thomas 8000,209 +teruyof,209 +tanabe kyou,209 +symbiote,209 +suou,209 +sun tattoo,209 +sola7764,209 +skull collar,209 +shoukaki (earthean),209 +shougun (chuckni1),209 +shinzui (fantasysky7),209 +shiki (catbox230123),209 +seven sisters high school uniform,209 +serika (swimsuit) (blue archive),209 +sereneandsilent,209 +santa (sunflower),209 +rocking chair,209 +rhongomyniad (fate),209 +reiga (act000),209 +raynare,209 +rariemonn,209 +playstation 5,209 +perfumer (arknights),209 +pen in pocket,209 +parvati (fate),209 +ousaka nozomi,209 +otabek altin,209 +oogure ito,209 +on wall,209 +namine ritsu,209 +mozu (peth),209 +morry,209 +momdroid (mechanical buddy universe),209 +mole above eye,209 +moeki yuuta,209 +mirai (kemono friends),209 +miraa (chikurin),209 +mimura zaja,209 +milky rose,209 +mendou saya,209 +meme (me!me!me!),209 +maria balthasar,209 +light persona,209 +lavolpe (yagisaka seto),209 +kousetsu,209 +kevbot,209 +kenshiro,209 +july,209 +jenet behrn,209 +jason (fate),209 +ikune juugo,209 +ijima yuu,209 +hoshimi junna,209 +hitotsuyanagi riri,209 +hikanyan,209 +hidori (hibi toridori),209 +heel pop,209 +heart-shaped food,209 +hamsterfragment,209 +gray fullbuster,209 +gen (enji),209 +galarian ponyta,209 +fly (marguerite),209 +duraludon,209 +douki-kun (douki-chan),209 +clyde s,209 +cervus,209 +capybara,209 +bon (rump),209 +blueorca,209 +black butterfly,209 +birdy cephon altirra,209 +bajima shouhei,209 +arrow in head,209 +alphys,209 +akira (meltyhip),209 +3;,209 +yukimasa (nkk145),208 +white male swimwear,208 +wakabayashi tomoka,208 +vf-25,208 +unadon,208 +tsudero,208 +tavern,208 +sumeragi hamao,208 +straylight (idolmaster),208 +step-siblings,208 +speech stab,208 +smock,208 +slingshot,208 +shuumatsu no izetta,208 +shroud of magdalene,208 +shiramori yuse,208 +sako (35s 00),208 +ragnell,208 +raft,208 +polka dot hairband,208 +phoebe (pokemon),208 +ousaka nanami,208 +onibi (foxhound4185),208 +okiyumi kase,208 +naha78,208 +miyamura miyako,208 +mirai (senran kagura),208 +minazuki haruka,208 +mikurou (nayuta),208 +medea (lily) (fate),208 +manyako (mohumohu),208 +luna-p,208 +lucy (elfen lied),208 +kongiku,208 +kitada mo,208 +kishi nisen,208 +karaage bou,208 +kamui (kill la kill),208 +kamen rider hibiki (series),208 +jurrig,208 +itou kaiji,208 +itoshiki rin,208 +irie miyuki,208 +inuyama tamaki,208 +humming,208 +hsin,208 +honda takaharu,208 +hibiki dan,208 +hareno chiame,208 +hamazura shiage,208 +grey serafuku,208 +greco roman (spiral brain),208 +godzilla (shin),208 +giant snake,208 +gantz suit,208 +gagaga girl,208 +fate testarossa (true sonic form),208 +evelysse (star ocean),208 +ether core,208 +director chimera (spy x family),208 +detached arm,208 +date pun,208 +crimson avenger (elsword),208 +cream cod,208 +colossal titan,208 +coco (disney),208 +chunpai,208 +bulga,208 +asacoco,208 +aryuma772,208 +arisu kazumi,208 +apron hold,208 +anatomy,208 +adult baby,208 +00047,208 +zero hime,207 +yuki usagi (snowcanvas),207 +yui tsuruno,207 +yamane masahiro,207 +yamanashi taiki,207 +yaezawa natori,207 +warp pipe,207 +wafu (youzora samo18),207 +wachiwo,207 +ushiromiya krauss,207 +uryuu minene,207 +tsunemori akane,207 +thunder,207 +terebi (shimizu1996),207 +superpig,207 +sunoharasou no kanrinin-san,207 +sulking,207 +suan ringo,207 +speckticuls,207 +sitting on pillow,207 +shachoo.,207 +sasachin (k+w),207 +salt bae (meme),207 +rope walking,207 +r-king,207 +odayan,207 +nohara shinnosuke,207 +myouga (plant),207 +mobile trace suit,207 +middle w,207 +mei miya,207 +legendary super saiyan,207 +kusanagi kaoru,207 +kurokawa otogi,207 +kukuri,207 +kubiwa (kutan),207 +kimijima sara,207 +key frame,207 +kawatsu yuuki,207 +karibuchi hikari,207 +kanna asuke,207 +kangoku kou,207 +isolated island princess,207 +ironwork,207 +inflatable whale,207 +horizon ariadust,207 +honzawa yuuichirou,207 +hizagawa rau,207 +himukai yuuji,207 +himawari-san (character),207 +hikawa79,207 +highway,207 +hands on another's waist,207 +hachisuka kotetsu,207 +gyari (imagesdawn) (style),207 +elfheim,207 +don (rg06268),207 +digimon adventure tri.,207 +death note (object),207 +david liu,207 +convenient head,207 +chemistry,207 +checkered headwear,207 +caren hortensia (amor caren) (second ascension),207 +baanin,207 +aya brea,207 +ankimo (tokino sora),207 +altair (re:creators),207 +agnamore,207 +2018 fifa world cup,207 +zacian,206 +yuuri nayuta,206 +yusano,206 +yui (ceremonial) (princess connect!),206 +yoohi,206 +yofukashi no uta,206 +yami kawaii,206 +washington (azur lane),206 +wanimaru,206 +velma dace dinkley,206 +ueno-san wa bukiyou,206 +tosura-ayato,206 +tori udon,206 +tomusooya,206 +the-sinner,206 +tetra,206 +tegaki draw and tweet,206 +tabletorgy,206 +swedish flag,206 +sugimori ken,206 +spider tattoo,206 +snake bondage,206 +shiroe (log horizon),206 +serio (to heart),206 +saltydanshark,206 +rybiok,206 +roger (guilty gear),206 +risty,206 +riho,206 +quaxly,206 +purple camisole,206 +princess chain chomp,206 +primrose azelhart,206 +plus sign,206 +patting lap,206 +outo eguchi,206 +orchid,206 +orange border,206 +object behind back,206 +norimaki arale,206 +neeko (aldehyde),206 +nanamomo rio,206 +musyne xsk,206 +minami mirei,206 +merurulince rede arls,206 +meipoi,206 +lakilolom,206 +kyou 039,206 +kuroi mato,206 +kumao mofumofu,206 +kotobuki (stealth sendan),206 +kagami kuro,206 +jade leech,206 +izumi (stardustalone),206 +igarasy,206 +hyakuen raitaa,206 +holding dress,206 +hellhound (monster girl encyclopedia),206 +head between pecs,206 +hand on own penis,206 +gunjou row,206 +grabbing own arm,206 +goat boy,206 +gin (oyoyo),206 +gensou suikoden iii,206 +genkung,206 +galhound,206 +fruit punch,206 +feh (fire emblem heroes),206 +falkner (pokemon),206 +erhu,206 +elemental (creature),206 +delthea (fire emblem),206 +celica a. mercury,206 +cairngorm (houseki no kuni),206 +boyano,206 +blue whale,206 +blue sweater vest,206 +benesse,206 +ayumaru (art of life),206 +axolotl,206 +ariel (disney),206 +argyle dress,206 +aoyama blue mountain,206 +yonekura hisaki,205 +yomako,205 +yamaori,205 +yakumo yukari (cosplay),205 +xenomorph,205 +will anthonio zeppeli,205 +vega (street fighter),205 +utsurogi akira,205 +unsfrau,205 +unname,205 +undone bra,205 +tsuchimikado motoharu,205 +tree of life,205 +torinari (dtvisu),205 +tokihama jirou,205 +tekken tag tournament 2,205 +teaching,205 +sushikuugo (suisen),205 +suruga kreuz,205 +string around finger,205 +straw (stalk),205 +steenee,205 +shirt under shirt,205 +shiroshisu,205 +shirasu youichi,205 +serizawa akane,205 +salt (salty),205 +queen's blade white triangle,205 +pokemon gsc (prototype),205 +pink lady mage,205 +pile bunker,205 +orange cardigan,205 +ophilia clement,205 +one - kagayaku kisetsu e,205 +omucchan (omutyuan),205 +nidai nekomaru,205 +ni (221),205 +momoi komomo,205 +miyako (xxxbibit),205 +lee (dragon garou),205 +kunifuto,205 +kugimiya rie,205 +kozou (rifa),205 +kntrs (knyrs),205 +knights of blood uniform (sao),205 +kawaii boku to 142's (idolmaster),205 +kasuga ayumu (haruhipo),205 +kanemaki thomas,205 +kaname junko,205 +k00s,205 +izumi masashi,205 +inayama,205 +ie (raarami),205 +hung (arknights),205 +holding pointer,205 +hinoshita akame,205 +hinomoto madoka,205 +hinata ichi,205 +hakama lift,205 +green hood,205 +gooey (kirby),205 +georgette lemare,205 +falin thorden,205 +enoshima iki,205 +dobunezumi,205 +coffee cat,205 +chiharu (9654784),205 +breasts day,205 +bokutachi wa hitotsu no hikari,205 +basa rutan,205 +baakurou,205 +atelier ayesha,205 +artist self-reference,205 +araneesama,205 +ape,205 +anshinmama,205 +aka ume,205 +adapted object,205 +abekawa,205 +2003,205 +zombie mogura,204 +yoshioka chie,204 +womi,204 +wii remote,204 +white hakama,204 +wailmer,204 +wabi (wbsk),204 +umigraphics,204 +tsukana (saba mizore),204 +tougetsu gou,204 +test score (paper),204 +teranekosu,204 +tehepero,204 +tadano hitohito,204 +tachikawa mushimaro,204 +suzuame yatsumi,204 +suomi kp/-31,204 +sun print,204 +space station,204 +sokura (mochichitose),204 +shiromuku,204 +ryoutan,204 +rie petoriyacowa,204 +red panda tail,204 +raiden (raiden labo),204 +public masturbation,204 +poinsettia,204 +pixiv fantasia wizard and knight,204 +pinstripe legwear,204 +pikacchi,204 +phantasy star online,204 +onegai twins,204 +omutatsu,204 +nogizaka haruka no himitsu,204 +nishiuri warito,204 +nike (0306),204 +natsuki (ukiwakudasai),204 +nanameda kei,204 +namiki (remiter00),204 +multicolored hoodie,204 +morihito,204 +monaka curl,204 +moe2021,204 +mizuno (okn66),204 +misawa hiroshi,204 +medjed (fate) (cosplay),204 +masaki aeka jurai,204 +madara inosuke,204 +labrys (persona),204 +kyuri,204 +kuromame (8gou),204 +kurokan (kokkyou oudan),204 +kosegawa shiromi,204 +kofune ushio,204 +koa (phrase),204 +kiyama harumi,204 +kiss chart,204 +kazune (baumkuchen),204 +katou hazuki,204 +katari,204 +karlwolf,204 +kaai yuki,204 +ikuta takanon,204 +holding magazine,204 +holding gloves,204 +history,204 +hikichi sakuya,204 +hiei (yu yu hakusho),204 +hat loss,204 +gordie (pokemon),204 +goban,204 +gible,204 +genocider shou,204 +fujii jun,204 +flipping food,204 +first high school uniform,204 +eight tohyama,204 +echo saber,204 +dreadtie,204 +dorothy west,204 +delsaber,204 +dasoku sentarou,204 +christmas sweater,204 +chiba mamoru,204 +cheytac m200,204 +caspar von bergliez,204 +brave sword x blaze soul,204 +bloom2425,204 +black bird,204 +beige shorts,204 +beanstalk (arknights),204 +aruka (alka p1),204 +arnval,204 +apricot sakuraba,204 +apple hair ornament,204 +aoi tiduru,204 +anti-rain (girls' frontline),204 +akagi asahito,204 +yamabushi kunihiro,203 +yakiniku,203 +xi gundam,203 +wolf-chan (wataame27),203 +wamu (chartreuse),203 +wadani hitonori,203 +vitarka mudra,203 +vista-tan,203 +urabe mikoto,203 +umikaze kai ni (kancolle),203 +trim marks,203 +tionishia,203 +tiny evil,203 +tetsuo,203 +tayutama,203 +taurus mask,203 +tamute (2580rs),203 +swimsuit costume,203 +sumeragi seisuke,203 +streetcar,203 +spiked mace,203 +sofmap background,203 +shigemiya kyouhei,203 +shabana may,203 +rogue titan,203 +reigen arataka,203 +rama (yu-light8),203 +r.o.d the tv,203 +quattro vageena,203 +pyotr (madoka magica),203 +pine,203 +pants under dress,203 +orochi itto,203 +noccu,203 +noah (xenoblade),203 +new york,203 +nev (nevblindarts),203 +nekota susumu,203 +nazu-na,203 +natori youkai,203 +namikaze minato,203 +mr.lime,203 +mokomoko yanakku,203 +miyazawa kengo,203 +minarai zouhyou,203 +mickey mouse ears,203 +mercy rabbit,203 +masurao (sekaiju),203 +majima yuki,203 +kurono kurumu,203 +kurasawa moko,203 +ksk (semicha keisuke),203 +koharu rikka,203 +kayama kenji,203 +jianmo sl,203 +isopod,203 +ines fujin (umamusume),203 +indivisible,203 +horsea,203 +hong bai,203 +hole in chest,203 +harukaze doremi,203 +hakua ugetsu,203 +garderobe uniform,203 +game cartridge,203 +gainoob,203 +fujioka haruhi,203 +fish in mouth,203 +final fantasy tactics advance,203 +domyoji karin,203 +dojikko pose,203 +digging,203 +diddy kong,203 +cross mirage,203 +color ink (medium),203 +ciel phantomhive,203 +cake hair ornament,203 +buntaichou,203 +ayul (ayulneri 92),203 +astolfo (saber) (third ascension) (fate),203 +armor removed,203 +algerie (azur lane),203 +akino coto,203 +aikura (twilight dusk),203 +aiko (kanl),203 +abu,203 +yuemanhuaikong,202 +yamato (muchuu paradigm),202 +yamamoto keigo,202 +windforcelan,202 +watosu,202 +wada sachiko,202 +tyrone,202 +turnip,202 +tsunamayo,202 +tsukuyo (gintama),202 +tsuki no i-min,202 +touon,202 +touhou danmaku kagura,202 +tony stark,202 +tasmanian devil ears,202 +takaku toshihiko,202 +taesi,202 +tachibana marika,202 +suzune yuuji,202 +sneaking,202 +single knee boot,202 +sha wujing,202 +sannomiya shiho,202 +sakura nene,202 +ryuugazaki rei,202 +ring dream,202 +reia,202 +rama (fate),202 +quan (kurisu tina),202 +project pochama,202 +pokemon rgby (style),202 +plug,202 +petrification,202 +penguin girl,202 +paripi koumei,202 +pagong,202 +padparadscha (houseki no kuni),202 +oshio (dayo),202 +omuretsu,202 +noise (tsuzuki),202 +nano (syoutamho),202 +nanbu kaguya,202 +mokokiyo (asaddr),202 +mogumo,202 +miyamoto musashi (second ascension) (fate),202 +mimi houllier von schwarzlang,202 +mikado shiina,202 +mg mg,202 +meteor (arknights),202 +mephistopheles (fate),202 +mega man 2,202 +manako,202 +majin tantei nougami neuro,202 +machoke,202 +lace sleeves,202 +kurosususu,202 +kuroki rei,202 +kuragare,202 +kunabishi,202 +kuma (bloodycolor),202 +kuen (kuennn12),202 +kouzuki hajime,202 +kosuzume,202 +knight (ragnarok online),202 +knife in head,202 +knee to chest,202 +kissing object,202 +katou shinobu,202 +katori (kancolle) (cosplay),202 +junketsu duelion,202 +jewel butt plug,202 +jessie rasberry,202 +japan maritime self-defense force,202 +inumori sayaka,202 +incoming pocky kiss,202 +inazuma japan,202 +ichinose minori,202 +houhou (black lack),202 +holding pumpkin,202 +hiyohiyo,202 +hatena yousei,202 +h&k ump9,202 +groping motion,202 +grand archer (elsword),202 +golden sun,202 +fnc (girls' frontline),202 +fischl (ein immernachtstraum) (genshin impact),202 +female priest (dungeon and fighter),202 +entei,202 +edogawa nao,202 +dragon quest ix,202 +dogs: bullets & carnage,202 +danganronpa s: ultimate summer camp,202 +cogecha,202 +chrollo lucilfer,202 +chris (konosuba),202 +chikage (sister princess),202 +bowser jr.,202 +boku no kokoro no yabai yatsu,202 +boku dake ga inai machi,202 +boar mask,202 +banchiku,202 +aruya (flosrota),202 +arisa (shadowverse),202 +aoi kimi,202 +alicia melchiott,202 +akagi shigeru,202 +agekichi (heart shape),202 +admiral paru,202 +zeta (summer) (granblue fantasy),201 +yunoha thrul,201 +yuki miku (2015),201 +yuki maccha (yukimattya10),201 +yozakura quartet,201 +yondemasu yo azazel-san.,201 +yakitori,201 +wild arms xf,201 +valkyrie no densetsu,201 +umapyoi densetsu,201 +tugumi0w0,201 +treehouse,201 +they (kiman),201 +terumii,201 +tentacle clothes,201 +tamanegiinyo,201 +tam-u,201 +sugimoto isao,201 +strapless coat,201 +steel beam,201 +stalactite,201 +source quote,201 +shurelia (ar tonelico),201 +shijou saikyou no deshi ken'ichi,201 +shanguier,201 +sengoku saga,201 +sekihan,201 +sasaki mutsumi,201 +ryuuka sane,201 +robotics;notes,201 +risutaru,201 +postage stamp,201 +panzer,201 +osterei,201 +nyanmaru,201 +nose hook,201 +nogisaka kushio,201 +nikame,201 +nekko (momosuzu nene),201 +mm (mm chair),201 +miyasaka miyu,201 +minimaru,201 +million dreams (idolmaster),201 +mia fey,201 +meowstic,201 +masurao 2 (sekaiju),201 +mashu (control),201 +mash kyrielight (swimsuit of perpetual summer ver.02),201 +male lactation,201 +makabe gorou,201 +mahoraba,201 +loremaster (helltaker),201 +lomocya,201 +live for the funk,201 +leaning over,201 +kuromori yako,201 +kung fu,201 +kopianget,201 +konpaku youmu (cosplay),201 +kokeshi,201 +kentan (kingtaiki),201 +katsurai yoshiaki,201 +kamiya yuu,201 +japanese wolf (kemono friends),201 +industrial,201 +howa type 89,201 +hoshi ryouma,201 +histoire,201 +hebitsukai,201 +haijin,201 +haiba 09,201 +gusu,201 +gofelem,201 +gangut dva (kancolle),201 +fukusuke hachi-gou,201 +fujibayashi sheena,201 +faubynet,201 +es (eisis),201 +donald duck,201 +doll (ib),201 +dokuro-kun (houshou marine),201 +detached pants,201 +dead space,201 +daffodil,201 +croagunk,201 +covering head,201 +cong1991,201 +comic cover,201 +clare (claymore),201 +chocojax,201 +choborau nyopomi,201 +ceiling fan,201 +cape lift,201 +caligula (game),201 +bras d'honneur,201 +banishment,201 +audino,201 +anubis (monster girl encyclopedia),201 +alzi xiaomi,201 +achan (blue semi),201 +zuo daoxing,200 +zegapain,200 +yijian ma,200 +yasai (getsu),200 +untied footwear,200 +united states navy,200 +umibouzu (niito),200 +uehiro,200 +typewriter,200 +tsuchibayashi makoto,200 +true tears,200 +tora (xenoblade 2),200 +tamaoki benkyou,200 +swivel chair,200 +super sailor mercury,200 +smoker (one piece),200 +sleeveless bodysuit,200 +shumiko (kamenokoueki),200 +shiraishi minoru,200 +shinjiro,200 +selector wixoss,200 +say hana,200 +ryuuta (msxtr),200 +rectangular pupils,200 +raizen high school uniform,200 +purple rope,200 +pon yui,200 +paparazzi,200 +ootori (kyoya-ohtori),200 +nullma,200 +nukoyarou,200 +nishi yuuko,200 +negija,200 +miyabino (miyabi1616),200 +mieu (tales),200 +make a contract,200 +lust (fma),200 +lowe (slow),200 +latte art,200 +latex leotard,200 +lampent,200 +kz oji,200 +kurumi (lycoris recoil),200 +kuramitsu mihoshi,200 +kurama (yu yu hakusho),200 +kuchiku i-kyuu,200 +koufuku graffiti,200 +kanke (yonkuma),200 +kaede (shijie heping),200 +junk,200 +judal,200 +jigoku sensei nube,200 +iris black games,200 +houraku,200 +holding petal,200 +hi iro,200 +hazuki watora,200 +hata-tan (rui (hershe)),200 +grimms notes,200 +ginta,200 +gen (black factory),200 +garuku,200 +fukaiton,200 +fuente,200 +fn fal,200 +flint (pokemon),200 +flash game,200 +final fantasy type-0,200 +fed (giba),200 +fading border,200 +emiya alter,200 +emily stock,200 +domotolain,200 +dinosaur costume,200 +cuffed,200 +chize,200 +chiyo chichi,200 +chikage (kinokodou),200 +chicken costume,200 +catherine,200 +capsule,200 +black jaguar (kemono friends),200 +bio lab,200 +beedrill,200 +beast wars,200 +august,200 +ashe ubert,200 +ancient destroyer oni,200 +anachronism,200 +501st joint fighter wing,200 +2equal8,200 +yorktown (azur lane),199 +yoplait,199 +yamana akane,199 +wander (shadow of the colossus),199 +walkure (macross delta),199 +upright restraints,199 +toy car,199 +tokyo yamane,199 +taiki (6240taiki),199 +suzumeko,199 +stuffed carrot,199 +steam locomotive,199 +sophia esteed,199 +snoopy,199 +shirakawa kotori,199 +shimotsuki shio,199 +shiika yuno,199 +shefu,199 +sen no kiseki iii,199 +sei shounagon (swimsuit berserker) (fate),199 +satou mari,199 +sabotender,199 +rody roughnight,199 +rin (inuyasha),199 +renetan,199 +rean schwarzer,199 +pull out,199 +predator,199 +pon de lion,199 +pierre bichelberger,199 +paper kabuto,199 +osanai (shashaki),199 +oomuro hanako,199 +nineo,199 +mondaiji-tachi ga isekai kara kuru sou desu yo?,199 +momoniku (taretare-13),199 +mogamiya honu,199 +mityubi,199 +messy (efuya),199 +meatball,199 +mash kyrielight (ortenaus),199 +lolimate,199 +lisbeth (sao-alo),199 +kusuha mizuha,199 +kurowan,199 +kuroi nyan,199 +kujira lorant,199 +koge donbo,199 +kanya pyi,199 +jeffr,199 +jackal tail,199 +iri flina,199 +image macro (meme),199 +horaki hikari,199 +hoo bamon,199 +holding jar,199 +hiro (chumo),199 +harada (basashi),199 +greaseberries,199 +gold ship (run revolt launcher) (umamusume),199 +fine (futagohime),199 +fakkuma,199 +eating hair,199 +drawn whiskers,199 +deborah (dq5),199 +damian doyle (cyphers),199 +cross-laced dress,199 +code vein,199 +circuit board,199 +chiroru (cheese-roll),199 +capsule servant,199 +bokuman,199 +bloodstained: ritual of the night,199 +black rock shooter (character) (cosplay),199 +belmond banderas,199 +asameshi,199 +archetto (arknights),199 +ame-chan (needy girl overdose),199 +alpaca girl,199 +akitsuki itsuki,199 +airandou,199 +108 gou,199 +zui zui dance,198 +yoshinari you,198 +yasuhara ema,198 +xp home-tan,198 +xatu,198 +whorled clouds,198 +white pajamas,198 +vinland saga,198 +ungagged,198 +underwater sex,198 +tsuyuzaki mahiru,198 +tsuuhan,198 +tsuno no hito,198 +trample,198 +tobi (nekomata homara),198 +tetuw,198 +tetsuwan birdy decode,198 +terada tera,198 +tensui no sakuna-hime,198 +st. hermelin school uniform,198 +sinkai,198 +shiroko (cycling) (blue archive),198 +sena (xenoblade),198 +sea slug,198 +ryuki@maguro-ex,198 +roselia (pokemon),198 +reverse ryona,198 +ratna petit,198 +ran (bukeranwu),198 +pukara,198 +prisoner,198 +polyle,198 +planetary ring,198 +penguin 1-gou,198 +painting nails,198 +octagram,198 +neneka (princess connect!),198 +nekomiya ryuu,198 +nekolina,198 +nakahara komugi,198 +nagatekkou,198 +murasaki shikibu (swimsuit rider) (first ascension) (fate),198 +multiple scars,198 +monoyoshi sadamune,198 +miyuki kazuya,198 +miyamoto ryuuichi,198 +midnight (boku no hero academia),198 +metal gear solid 2,198 +mayer (arknights),198 +maydrawfag,198 +mafuyu (kanden shoujyo),198 +light valkyrie (p&d),198 +latex dress,198 +laevatein (tail),198 +kukri,198 +kokonoe tsubaki,198 +kohaku.,198 +kiba satoshi,198 +jounouchi katsuya,198 +jack daniel's,198 +hunyan,198 +hundred,198 +holding blanket,198 +hirota fruit,198 +himehina channel,198 +high belt,198 +gojarun,198 +fur shawl,198 +flower brooch,198 +fig sign,198 +epilepsy warning,198 +enlightened byleth (male),198 +emg (christain),198 +emet-selch,198 +elf k,198 +dura,198 +disappointed,198 +creeparka,198 +comic hotmilk,198 +color switch,198 +chin piercing,198 +charles-henri sanson (fate),198 +chameleon,198 +cassette tape,198 +buresu,198 +bunny headphones,198 +black cat d.va,198 +ark (morita hitomi),198 +any (lucky denver mint),198 +amity blight,198 +alkemanubis,198 +aggron,198 +aerodactyl,198 +4suke,198 +yuuhi riri,197 +yoshida on,197 +yamacchi,197 +xenoblade chronicles: future connected,197 +wireless earphones,197 +vivivoovoo,197 +victorious (kancolle),197 +umitonakai,197 +tokiwa midori,197 +the seven deadly sins,197 +the legend of zelda: link's awakening,197 +terada ochiko,197 +tasora,197 +soya (torga),197 +sith,197 +shyvana,197 +shinkon gattai godannar!!,197 +seiou gakuen school uniform,197 +sakai (motomei),197 +saintshiro,197 +rose background,197 +ravel phenex,197 +pink pubic hair,197 +pikazo,197 +patricia (stylish marunage),197 +osakabe-hime (swimsuit archer) (fate),197 +na insoo,197 +mushiro (nijie728995),197 +mozuwaka,197 +monster hunter 3,197 +manaphy,197 +male protagonist (pokemon go),197 +lunala,197 +love triangle,197 +li shuwen (young) (fate),197 +kupala,197 +kuonji ukyou,197 +kouji (kari),197 +komb,197 +kazami ruku,197 +kataro,197 +kashuu (b-q),197 +kanzarin,197 +k/da evelynn,197 +janis (hainegom),197 +ifuji shinsen,197 +horrified,197 +hooded shirt,197 +hiwatashi nazuna,197 +hatakenaka (kamagabuchi),197 +hakuouki shinsengumi kitan,197 +furisuku,197 +exhaust,197 +eunectes (forgemaster) (arknights),197 +earth ekami,197 +dress flip,197 +dive ball,197 +dire wolf (kemono friends),197 +chacha (ss 5087),197 +black panther,197 +black cola,197 +beancurd,197 +bauxite,197 +b3 wingman,197 +arima miyako,197 +ano ko wa toshi densetsu,197 +allen (makaroll),197 +aldra (queen's blade),197 +akihime sumomo,197 +ajax (azur lane),197 +aida kensuke,197 +zaizen aoi,196 +you shimizu,196 +year of the goat,196 +yandere simulator,196 +yamaioni (sasakama),196 +wonder woman (series),196 +white-stew,196 +utamaro,196 +tostantan,196 +torahime (roland00),196 +tokunaga (tales),196 +tatsuta kai ni (kancolle),196 +takizawa asuka,196 +takagi saya,196 +super sailor venus,196 +stationery,196 +star trek,196 +star ocean integrity and faithlessness,196 +spade earrings,196 +skis,196 +shironeko haru,196 +shirogane kei,196 +shi huang di (fate),196 +senjou no valkyria 2,196 +seaport summer princess,196 +samoore,196 +ryu genshin77,196 +royal navy,196 +rei (rei rr),196 +redjet,196 +pointing sword,196 +pitching,196 +oohoshi awai,196 +onigirya (nekomata okayu),196 +olivia (pokemon),196 +nuno (pppompon),196 +noe noel,196 +ninja gaiden,196 +nihongou (touken ranbu),196 +nate mitotsudaira,196 +namiuchigiwa no muromi-san,196 +nakatsu shizuru,196 +motoi hiroumi,196 +mistorene callus,196 +meyrin hawke,196 +melings (aot2846),196 +lovestruck,196 +koisuru asteroid,196 +kirby and the forgotten land,196 +kieta,196 +kawanobe,196 +karinto yamada,196 +kaqo,196 +kanna asumi,196 +itoichi.,196 +in locker,196 +homework,196 +hk (hk),196 +ginji (sakaki summer),196 +genjii (touhou),196 +gauge,196 +fruits basket,196 +flaming halo,196 +finger to eye,196 +fergus mac roich (fate),196 +emia wang,196 +elizabeth tower,196 +didloaded,196 +davi (dokidoki! precure),196 +chikanoko,196 +blind girl (popopoka),196 +blacksaikou,196 +binbougami ga!,196 +azusa (cookie),196 +asakura sakura,196 +aqua choker,196 +aqua-framed eyewear,196 +akashi kaoru,196 +abara heiki,196 +yumaru (marumarumaru),195 +yue (show-ei),195 +yu-ves,195 +yellow pajamas,195 +yamaguchi shinnosuke,195 +wolverine,195 +wanderer (ragnarok online),195 +vulcan salute,195 +vest removed,195 +varus,195 +utsushimi kemii,195 +teddiursa,195 +tea party,195 +tadokoro nurikabe,195 +surcouf (azur lane),195 +striped scrunchie,195 +stirrups,195 +spilled milk,195 +space dandy,195 +solokov (okb-999),195 +smoke grenade,195 +shungikuten,195 +shizuku (kantoku),195 +shirt on shoulders,195 +shikimi (yurakuru),195 +shibata rai,195 +serino itsuki,195 +savan,195 +salmon,195 +sakurada shiro,195 +saiga-12 (girls' frontline),195 +rukinya (nyanko mogumogu),195 +reno (ff7),195 +ray (pixiv9514208),195 +rail wars!,195 +power pro kun pocket,195 +poch4n,195 +pen (pen3),195 +oricorio,195 +omizu (omz),195 +odd (hin yari),195 +oborotsuki kakeru,195 +nina (breath of fire iv),195 +natsumekinoko,195 +na tarapisu153,195 +mukunoki nanatsu,195 +moe2018,195 +minegishi ayano,195 +miko fly,195 +mikan (bananoha),195 +meerkat ears,195 +matsuo (matuonoie),195 +masayu,195 +masamuuu,195 +mamimu (ko cha 22),195 +makishima azusa,195 +magilou (tales),195 +mafia,195 +lutecia alpine,195 +kuwada yuuki,195 +kurihara sakura,195 +kikyou (inuyasha),195 +kana (fire emblem),195 +kaga ai,195 +jeanne d'arc (summer) (granblue fantasy),195 +iyami,195 +hplay,195 +homare (suzu no oka),195 +hatsume mei,195 +happy meek (umamusume),195 +gundam sentinel,195 +gneisenau (azur lane),195 +fossil,195 +fiodo,195 +doritos,195 +cure whip,195 +criss-cross back-straps,195 +chrisandita,195 +chop,195 +chocoblood,195 +buuwa,195 +belphegor (reborn),195 +basketball court,195 +awara kayu,195 +arurandeisu,195 +aquaplus,195 +aoi yuuka (ao no kokoro),195 +ao oni (onioni-aoi),195 +akazawa red,195 +yuza,194 +yuuki aoi,194 +yukimiya (parupunta),194 +yoshida ryouko,194 +yashiro seika,194 +yamano remon,194 +wynn (yu-gi-oh!),194 +white shawl,194 +vanessa (kof),194 +tsuchikure,194 +tororo ten,194 +tonami kanji,194 +tommy (kingdukeee),194 +tiger ii,194 +tesshii (riza4828),194 +suzumiya haruka,194 +spark,194 +soto,194 +sollyz,194 +shuten douji (festival outfit) (fate),194 +shigure s,194 +sengoku aky,194 +senbei (senbe i),194 +seicoh,194 +scarf bow,194 +salome (one piece),194 +sabo (one piece),194 +ryokucha michi,194 +polka dot trim,194 +pokemon egg,194 +plap,194 +perfume (band),194 +penguin logistics (arknights),194 +open jumpsuit,194 +noel (noel-gunso),194 +nipple bells,194 +ninniku (ninnniku105),194 +nina wang,194 +neruzou,194 +nekojira,194 +nauribon,194 +namu (nurui cha),194 +nakagawa nana,194 +moketa,194 +mishima kazuya,194 +matryoshka doll,194 +master ball,194 +little mac,194 +lina (michihasu),194 +lin yuhsia (arknights),194 +limbus company,194 +lichtenberg figure,194 +kuune rin,194 +kuroki tomoki,194 +kurisu-kun,194 +kotobuki hajime,194 +korekara no someday,194 +konpaku youki (ghost),194 +kono lolicon domome,194 +kobayakawa miyuki,194 +kisaragi yuri,194 +kihou no gotoku dmc,194 +jaguar girl,194 +jack (slaintheva),194 +iromeki overdrive,194 +hakugyokurou,194 +hagino makoto,194 +ganassa,194 +game screenshot,194 +flick,194 +fire emblem warriors,194 +figure four sitting,194 +emboar,194 +dusknoir,194 +dog print,194 +credit card,194 +corner,194 +commander (last origin),194 +comala (komma la),194 +city forest online,194 +ciloranko,194 +christina sierra,194 +chiester00,194 +captain earth,194 +call (mighty no. 9),194 +cafe maid,194 +bokura no live kimi to no life,194 +angel of light nanael,194 +aizawa yuuichi,194 +aikome (haikome),194 +7gao,194 +zeus (inazuma eleven),193 +zebstrika,193 +yowamushi pedal,193 +yonaga san,193 +yassy,193 +x-ray glasses,193 +western dragon,193 +wakamiya eve,193 +viperxtr,193 +type 100 (girls' frontline),193 +toxtricity,193 +touran-sai,193 +toro3,193 +torn sleeve,193 +tokarev tt-33,193 +tetris,193 +takana,193 +tail masturbation,193 +suoni (deeperocean),193 +steve (minecraft),193 +spheal,193 +sotoba,193 +scarlet (studioscr),193 +saruanu,193 +sape (saperon black),193 +sangoku musou 1,193 +ruschuto,193 +ruka (piyopiyopu),193 +rowan,193 +rito,193 +resident evil 2 (remake),193 +prishe,193 +pointing to the side,193 +pink wristband,193 +pink armor,193 +penis on stomach,193 +order of the black knights uniform,193 +ogre (illustogre),193 +nukkoru,193 +noragami,193 +no.6,193 +nin (female),193 +nicoseiga sample,193 +nemui333,193 +nanostar,193 +n (dai n honpo),193 +multicolored pants,193 +morino bambi,193 +monk (final fantasy),193 +monk (fft),193 +moire,193 +miyamoto rei,193 +mitchell (dynxcb25),193 +miniature,193 +minami koharu,193 +materclaws,193 +mariah (jojo),193 +mac-10,193 +mabanna,193 +maachin,193 +loen,193 +light brown background,193 +kupa (jesterwii),193 +kumo desu ga nani ka?,193 +kotobukiya bishoujo,193 +kokutou azaka,193 +kito koruta,193 +ki (adotadot),193 +keffiyeh,193 +kdm (ke dama),193 +kasukabe akira,193 +kacyu,193 +iris (flower),193 +io (sinking=carousel),193 +inari konkon koi iroha.,193 +ibuki maya,193 +hololive china,193 +hirai yukio,193 +helmet over eyes,193 +hamaburicchi,193 +guilty gear x,193 +gerwalk,193 +gashapon,193 +emperpep,193 +elle mel martha,193 +dancer (three houses),193 +cinnamon (nekopara),193 +chiune (yachi),193 +chigusa asuha,193 +chaba (hortensia),193 +cerestia of life,193 +bunny earrings,193 +brushing own hair,193 +brekkist,193 +big bad wolf,193 +balkenkreuz,193 +automail,193 +astarotte ygvar,193 +anubis (mythology),193 +android 17,193 +amamitsu kousuke,193 +akichin (atelier baguri),193 +akabashi yuusuke,193 +air defense princess,193 +abigail williams (swimsuit foreigner) (second ascension) (fate),193 +zasha,192 +yuzu (fruit),192 +yuuki nao,192 +yunuki uta,192 +yukinojou yakan,192 +yellow jumpsuit,192 +yan wan,192 +winda (yu-gi-oh!),192 +wara (warapro),192 +wakaba sprout,192 +ushinawareta mirai wo motomete,192 +triela,192 +trellis,192 +touka (utawarerumono),192 +tank turret,192 +sunrise (company),192 +stitched neck,192 +stifled laugh,192 +stellar loussier,192 +sr-3mp (girls' frontline),192 +soyoong jun,192 +skade,192 +shampoo hat,192 +sekiyu (spartan),192 +sato art,192 +sao satoru,192 +sandogasa,192 +samoyed (dog),192 +red xiii,192 +pyonsuke (pyon2 mfg),192 +poppy (poppykakaka),192 +polka dot hair,192 +playstation 3,192 +phantom brave,192 +pelvic curtain aside,192 +patty fleur,192 +ouri (aya pine),192 +otoki raku,192 +osame,192 +old man (guin guin),192 +olchas,192 +okonomiyaki,192 +nekobell,192 +music stand,192 +muginami,192 +mitarashi dango,192 +minazuki noumu,192 +mayachi (amuriya),192 +maruruk,192 +maru (sara duke),192 +marking on cheek,192 +majin buu,192 +machi (7769),192 +m37 (girls' frontline),192 +lunalu (granblue fantasy),192 +lgbt pride,192 +kurosu aroma,192 +kuri giepi,192 +kiriha (tsugumomo),192 +karasuno volleyball uniform,192 +kanau,192 +kagami taiga,192 +kaeranu kaeru,192 +jingo,192 +jellyfish (splatoon),192 +iwatobi high school uniform,192 +issun,192 +ion (cation),192 +inishie kumo,192 +hououji fuu,192 +holding headphones,192 +holding cigar,192 +hermit crab,192 +harpoon,192 +haraya manawari,192 +hand on another's crotch,192 +griffin & kryuger,192 +gesture request,192 +gakubuchi aiko,192 +fin funnels,192 +error message,192 +elysia (herrscher of human:ego) (honkai impact),192 +dark samus,192 +dark magician,192 +cure grace,192 +cure egret,192 +crona (soul eater),192 +cecilia (shiro seijo to kuro bokushi),192 +captain amari,192 +bokota (bokobokota),192 +bird/binary,192 +binayu,192 +bettle (b s a n),192 +bellossom,192 +behind ear,192 +bagua,192 +ayame (gundam build divers),192 +ayame (0419),192 +yellow capelet,191 +wonderlands x showtime (project sekai),191 +werkbau,191 +unbuttoning,191 +ujikintoki tamaryu,191 +tsurugi (blue archive),191 +toy airplane,191 +torterra,191 +tekken 6,191 +tatsumiya kagari,191 +takoneru,191 +t t,191 +sylvia (huajiuhuajiu),191 +sukonbu (shirakami fubuki),191 +suicidal girl (hamsterfragment),191 +story of eastern wonderland,191 +spiritomb,191 +solru,191 +shikabane itsuka,191 +serge (chrono cross),191 +senano-yu,191 +seasons,191 +schezo wegey,191 +satyr,191 +saria (the law) (arknights),191 +sapphire (sapphire25252),191 +richard (ri39p),191 +rainbow text,191 +r0g0b0,191 +quality,191 +princess ruto,191 +polteageist,191 +poke kid (pokemon),191 +pixie willow (voice actor),191 +piston,191 +parking lot,191 +parakeet,191 +padoru (meme),191 +orange sarong,191 +ogry ching,191 +nylon,191 +novcel,191 +norita,191 +noel (sora no method),191 +nibosi,191 +naruse yuu,191 +najimi shin,191 +material-d,191 +matemi,191 +mashue,191 +martha (swimsuit ruler) (third ascension) (fate),191 +mari (omori),191 +laby (elsword),191 +kouhai-chan (tawawa),191 +kotoura-san,191 +kokotetsu,191 +kobo kanaeru,191 +kimi ga shine,191 +kashimashi,191 +kamura (armor),191 +itsumo nokoru,191 +holding gem,191 +hikari niji,191 +gozz,191 +golbat,191 +gareth (fate),191 +fukube tamaki,191 +first aid,191 +fakemon,191 +doran (doran7280),191 +domon kasshu,191 +damage numbers,191 +dalc rose,191 +daiwa scarlet (trifle vacation) (umamusume),191 +cordelia glauca,191 +concentrating,191 +common bottlenose dolphin (kemono friends),191 +collared leotard,191 +chachi (azuzu),191 +cardboard,191 +belly rub,191 +bellows (suisei no gargantia),191 +ayuayu (chisuke ayu),191 +athena glory,191 +assassin's creed ii,191 +aquarian age,191 +akatsuki no goei,191 +acryl,191 +2002,191 +yurichtofen,190 +yoshiku (oden-usagi),190 +yamato junji,190 +watery eyes,190 +wasabi (legemd),190 +waktaverse,190 +valkyrie (vnd),190 +uppi,190 +umonebi,190 +umaibou,190 +tsuchimiya,190 +tokuura,190 +thompson/center contender,190 +tatapopo,190 +tasogare otome x amnesia,190 +tamu (mad works),190 +suzunoki rin,190 +sports bra lift,190 +snatti,190 +shellder,190 +shaped pubic hair,190 +sdf-1,190 +sazae-san,190 +salamander,190 +sakura sora,190 +sakura quest,190 +ruo (tariki hongan),190 +red ribbon army,190 +rags,190 +pidove,190 +partially immersed,190 +outlaw star,190 +orz (kagewaka),190 +ochikobore fruit tart,190 +nokishita kumoemon,190 +nier (young),190 +nagato yuki-chan no shoushitsu,190 +nagasawa tougo,190 +muramatsu sakura,190 +morisova,190 +mogskg,190 +michiking,190 +mechuragi,190 +mass production eva,190 +love lab,190 +left-hand drive,190 +lantern on liquid,190 +kusakabe yuuki (to heart 2),190 +kurousagi yuu,190 +kuriboh,190 +krirk,190 +kotomuke fuurin,190 +kissing hair,190 +kinsenka momi,190 +kanchou,190 +kaida michi,190 +kagami sumika,190 +juvia lockser,190 +jacques de molay (foreigner) (fate),190 +ishikawa goemon xiii,190 +hjz (artemi),190 +hide (hideout),190 +henrietta (gunslinger girl),190 +gundam 00 a wakening of the trailblazer,190 +gobera,190 +gekijouban hibike! euphonium,190 +fuse,190 +furukawa sanae,190 +fletchling,190 +flat chest joke,190 +face to pecs,190 +evo grim,190 +emia (castilla),190 +elven forest maker,190 +duke of york (azur lane),190 +dha,190 +dawn (pokemon) (cosplay),190 +cyanide-whale,190 +crybringer,190 +communism,190 +clipe,190 +chiba sadoru,190 +chameleon tail,190 +capriccyo,190 +boca,190 +blue hood,190 +bakuman,190 +asatsuki (fgfff),190 +archer (fate) (cosplay),190 +arancia,190 +animal request,190 +amekosame,190 +akujiki59,190 +aku (dejigiga),190 +akb48,190 +akaiha (akaihasugk),190 +aida rayhunton,190 +zol,189 +yuzu-aki,189 +yuuma (renkin san-kyuu magical pokaan),189 +yomogi (black-elf),189 +yellowroom,189 +vortex vanquisher (genshin impact),189 +veiny hands,189 +urine meter,189 +underworld (ornament),189 +tool belt,189 +teto (nausicaa),189 +teffish,189 +sweet home,189 +suzumi (fallxalice),189 +spoiler (automobile),189 +spectral (series),189 +spas-12,189 +space yoko,189 +snow white (mahoiku),189 +signo aaa,189 +shuten douji (fate) (cosplay),189 +shio (futatsumami),189 +shinobu (ninin ga shinobuden),189 +shiina kokomi,189 +selkie (fire emblem),189 +sasago kaze,189 +ryuushen,189 +ryuji (red-truth),189 +ryokan,189 +rune knight (ragnarok online),189 +rumie,189 +rimuru tempest (slime),189 +reri,189 +rayvon,189 +print scrunchie,189 +politician,189 +otaku room,189 +onsoku no sonic,189 +nipple tassels,189 +nicole demara,189 +nekoboshi sakko,189 +nanananana,189 +morisawa haruyuki,189 +miyata sou,189 +miso katsu,189 +mino tarou,189 +mii (nintendo),189 +mermaid costume,189 +medic,189 +matching shanghai,189 +matayoshi,189 +matarou (matarou072),189 +married,189 +mantis girl,189 +madyy,189 +lotad,189 +kuroneko no toorimichi,189 +kuroki (ma-na-tu),189 +kourourin,189 +kisairo kaede,189 +kirishima (aoki hagane no arpeggio),189 +kinagi yuu,189 +kimi to boku no mirai,189 +kashima (kancolle) (cosplay),189 +kansuke,189 +kagome (traumatize),189 +jashin-chan,189 +itsuwa,189 +insect cage,189 +ilog,189 +illustrious (morning star of love and hope) (azur lane),189 +ibuki pon,189 +honest axe,189 +holding lighter,189 +gothic wa mahou otome,189 +gift card,189 +fuwa minato,189 +funeral,189 +five-seven (gun),189 +fairy knight lancelot (second ascension) (fate),189 +dsr-50 (highest bid) (girls' frontline),189 +dragon nest,189 +cross edge,189 +constanze amalie von braunschbank-albrechtsberger,189 +cirno (cosplay),189 +cirno-nee,189 +chiwino,189 +casca (berserk),189 +buddha,189 +botan (yu yu hakusho),189 +beyond the nobles (idolmaster),189 +bastard!!,189 +bad aspect ratio,189 +awa toka,189 +asmr,189 +armored skirt,189 +aqua sweater,189 +aoba kokona,189 +amamiya minato,189 +akizuki akina,189 +akisaka yamoka,189 +accordion,189 +yunagi amane,188 +yoshida iyo,188 +yatagarasu (tsukumo sana),188 +yamin (cookie),188 +yamaguchi homupe,188 +xo,188 +webbed feet,188 +wakagi repa,188 +toy gun,188 +toriyama akira,188 +tokio neo,188 +tnt,188 +tamamo no mae (sable mage) (fate),188 +taker pov,188 +takayama toshiaki,188 +taimanin murasaki,188 +superman,188 +suomi (midsummer pixie) (girls' frontline),188 +star guardian lux,188 +spark (pokemon),188 +sokrates (touhou),188 +snow boots,188 +shuz (dodidu),188 +shirakawako,188 +shiokko (murasaki shion),188 +shining tears,188 +shiguma (signalmass),188 +senmu (senmudayo),188 +sanaki kirsch altina,188 +samurott,188 +sakuya (p&d),188 +sainohikari,188 +ryokushiki (midori-ya),188 +rowboat,188 +ro (aahnn),188 +removing legwear,188 +purple tank top,188 +potto,188 +phone with ears,188 +peanut,188 +parororo,188 +oruka (kamituki0827),188 +original remodel (kantai collection),188 +open bag,188 +norakura (nr kura),188 +nonon (xenonbot),188 +neve,188 +neneneji,188 +natsumiya yuzu,188 +naka (cheeseyeast),188 +nacchan (ohisashiburi),188 +mon3tr (arknights),188 +minerva (fire emblem),188 +meruccubus (merunyaa),188 +mayusaki yuu,188 +matsukura nemu,188 +margaret (persona),188 +mairimashita! iruma-kun,188 +maboroshi no ginzuishou,188 +lycanroc (midnight),188 +lucio (overwatch),188 +leonat,188 +konno takashi,188 +kiriman (souldeep),188 +kintaro,188 +kinmedai pink,188 +kapuchii,188 +kamen rider agito (series),188 +innovators (gundam 00),188 +i.takashi,188 +hrd,188 +hikasa tomoshika,188 +held down,188 +heavy machine gun,188 +hatsuyume,188 +hamster costume,188 +gu li,188 +green horns,188 +gleision adain,188 +gleam,188 +giratina (altered),188 +garden of the sun,188 +fushirun rung,188 +formidable (timeless classics) (azur lane),188 +female trainer (umamusume),188 +emil castagnier,188 +dragon ball super super hero,188 +dp-12 (girls' frontline),188 +dog paws,188 +desktop,188 +d.koutya,188 +circussion,188 +buri (retty9349),188 +brown robe,188 +bob (you-u-kai),188 +bencao gangmu,188 +beef,188 +banjo-kazooie,188 +autofellatio,188 +arm across chest,188 +araki495,188 +aosora kamiya,188 +anteater tail,188 +anabone,188 +akihiyo,188 +absinthe (arknights),188 +aardwolf tail,188 +1960s (style),188 +zi se,187 +zeke von genbu (xenoblade),187 +yoropa,187 +xayah,187 +xaxaxa,187 +vocaloid (lat-type ver),187 +usano mimi,187 +urasekai picnic,187 +united kingdom,187 +umi monogatari,187 +ukata,187 +toriga naku,187 +tomoeda middle school uniform,187 +takase kanan,187 +tahya,187 +t-back,187 +suzuki24,187 +sunflower seed,187 +starry sky (game),187 +star!! (idolmaster),187 +spoken star,187 +skull ornament,187 +single letter,187 +shoto (vtuber),187 +shoe loss,187 +shizuko (blue archive),187 +shiny luminous,187 +shiki (senran kagura),187 +sasaki makie,187 +saruchitan,187 +saijou haruki,187 +saaya (kirome),187 +rexlent,187 +red star (toranecomet),187 +reammara,187 +rain lan,187 +purple bag,187 +pt imp group,187 +pot on head,187 +polka dot camisole,187 +platinum (shimmering dew) (arknights),187 +pantsing,187 +pansage,187 +otama (atama ohanabatake),187 +organ derwald,187 +nueco,187 +ngetyan,187 +muse (amaburi),187 +motteke! serafuku,187 +monkey mask,187 +mizukoshi (marumi),187 +mimyo,187 +mikage kirino,187 +midoriiro no shinzou,187 +metal slug attack,187 +meroune lorelei,187 +melantha (arknights),187 +mega man zx advent,187 +mazinger z (mecha),187 +marinesnow,187 +mahou kyuuri,187 +luna lia,187 +lordgenome,187 +kyan reki,187 +kuroba kaito,187 +kumamoto aichi,187 +kud wafter,187 +knee guards,187 +kirschtaria wodime,187 +kintoki (sakura miko),187 +katagiri himeko,187 +karasuma kuraha,187 +karabako,187 +kale (dragon ball),187 +jack-o'-lantern print,187 +iosys,187 +imagawa akira,187 +hosokawa kanako,187 +hanging lantern,187 +haku (sabosoda),187 +haioku colonel,187 +giant robo,187 +frilled boots,187 +foodification,187 +falco lombardi,187 +elliemaplefox,187 +earplugs,187 +duel masters,187 +d-frag!,187 +cyber (cyber knight),187 +compa,187 +clutching chest,187 +cinkai,187 +chupa chups,187 +cenangam,187 +c.cu,187 +c-ms (girls' frontline),187 +butcherboy,187 +browning m2,187 +bobobo,187 +blue wristband,187 +blue bird,187 +bitchcraft123,187 +baymax,187 +automatic giraffe,187 +ashiyama yoshinori,187 +ardbert (ff14),187 +arabian oryx (kemono friends),187 +aqua scarf,187 +april,187 +alder (pokemon),187 +agenasu,187 +yui (niikyouzou),186 +yajuu,186 +wonder egg priority,186 +vegetation,186 +usa mimi,186 +tokyogenso,186 +thunderbolt fantasy,186 +takashi (huzakenna),186 +t@ke-g,186 +susie (deltarune),186 +sophie (howl no ugoku shiro),186 +sonshoukou,186 +sola,186 +snowman hair ornament,186 +shikano sumiaki,186 +setra,186 +sawada tsunayoshi,186 +sarhce,186 +sandbag,186 +sakurea,186 +rou (rou22),186 +rock lee,186 +red stone of aja,186 +plumeria (pokemon),186 +planter,186 +orangina,186 +orange tank top,186 +orange pupils,186 +optical illusion,186 +neone,186 +nekoyashiki pushio,186 +natasha (sekai seifuku),186 +narumiya (empty cafe),186 +naokomama,186 +nanabuluku,186 +naked snake,186 +nakamura kanko,186 +misakura nankotsu,186 +mega man x4,186 +may of doom,186 +matsuo chizuru,186 +matatabi maru,186 +mashimashi,186 +mama (mama hiro),186 +mairo,186 +mail,186 +kyutai x,186 +kou (garakuta teikoku),186 +kobi (piliheros2000),186 +kira-kira sensation!,186 +kimi ga aruji de shitsuji ga ore de,186 +kakumayu,186 +junes,186 +joseph joestar (tequila),186 +jin young-in,186 +ion (tales),186 +ikezawa shin,186 +hong meiling (cosplay),186 +homunculus (artist),186 +hitsugaya toushirou,186 +hisho collection,186 +hat on back,186 +gyuudon,186 +gyari (imagesdawn),186 +group incest,186 +fuurin (omaemona),186 +furukawa itsuse,186 +fatherly,186 +elma leivonen,186 +eiku,186 +drizzile,186 +corset piercing,186 +cla (finesoda),186 +chocolate covered,186 +chaise longue,186 +baru (val-val),186 +azula,186 +asura (asurauser),186 +arita haruyuki,186 +anne (shingeki no bahamut),186 +amakura mio,186 +alphard (canaan),186 +alicia renato (yashiro sousaku),186 +akebono kai ni (kancolle),186 +yuzutei,185 +yuuki aine,185 +yushika,185 +yukimi,185 +yashin (yasinz),185 +yaoshi jun,185 +yangsion,185 +yang guifei (third ascension) (fate),185 +wrinkled fabric,185 +wigglytuff,185 +u u (mooooooou),185 +tug,185 +tsunemi aosa,185 +trainer (idolmaster),185 +toucan,185 +tejina senpai (series),185 +team skull uniform,185 +takaomi (orenchi no maidosan),185 +tadashi,185 +switch01,185 +sunanuko (ramuneko),185 +summon night 5,185 +striped camisole,185 +snow white (grimm),185 +skylight,185 +sita vilosa,185 +shoukaku (sororal wings) (azur lane),185 +shionne (tales),185 +sanjiro (tenshin anman),185 +saeki touma,185 +ribbon hair,185 +razia,185 +rasusurasu,185 +protocol omega,185 +piyotan (girls und panzer),185 +pikapikapop (idolmaster),185 +parka (summersketch),185 +otabe sakura,185 +ooyama (angel beats!),185 +olivier mira armstrong,185 +oliver (vocaloid),185 +oborofu,185 +nozaki tsubata,185 +nonoyama,185 +naru (andante),185 +narberal gamma,185 +nagy,185 +mp5 (girls' frontline),185 +motokonut,185 +moshimoshibe,185 +meowy (chainsaw man),185 +mask lift,185 +margay print,185 +mannack,185 +maburaho,185 +lovers (game),185 +kusarigama,185 +kurinton,185 +kuri (kurigohan),185 +kuga huna,185 +koyanskaya (foreigner) (first ascension) (fate),185 +kouya no kotobuki hikoutai,185 +komimiyako,185 +kitamurashu,185 +kiona (giraffe kiona),185 +karasu (naoshow357),185 +kakizome,185 +kahili (pokemon),185 +kagura chitose,185 +juliana (pokemon),185 +jovejun,185 +jivke,185 +imminent gangbang,185 +hyougintou,185 +hooded bodysuit,185 +holostars english,185 +holding sketchbook,185 +hermione (azur lane),185 +hand on window,185 +hal (goshujinomocha),185 +gundam narrative,185 +graph,185 +gouguru,185 +girls' frontline 2: exilium,185 +fruit background,185 +firecrackers,185 +fechirin,185 +fatal frame 3,185 +eternity (shadeh),185 +endori,185 +earth-chan,185 +dou,185 +daidai ookami,185 +corviknight,185 +comic lo,185 +chest armor,185 +bongo cat,185 +atem,185 +alex (street fighter),185 +akira ferrari,185 +aeru,185 +yume no tobira,184 +yotsuya miko,184 +yoshitani motoka,184 +yazaki (yazakc),184 +wrong hand,184 +walkman,184 +walfie (style),184 +wa-class transport ship,184 +vestia zeta,184 +vertical-striped scarf,184 +usuzumi hatsumi,184 +uruu gekka,184 +unxi,184 +triple scoop,184 +tare (tonikaku magaru),184 +tamagawa yukimaru,184 +taji (crowview),184 +tachibana jun'ichi,184 +suda (yuunagi enikki),184 +starmilk,184 +spoken light bulb,184 +speedo (company),184 +sitting on torii,184 +shiwo,184 +shinoda hajime,184 +sawatari izumi,184 +sasakura (calicchio),184 +sa-ya2,184 +royal navy (emblem),184 +rkp,184 +rectangle,184 +railroad signal,184 +pp tenshi t-shirt,184 +power bottom,184 +pink gemstone,184 +peach hat ornament,184 +otsumami (bu-bu-heaven),184 +oka (bananashoe),184 +ogura eisuke,184 +nullken,184 +needle sword,184 +necktie in mouth,184 +nanohana kohina,184 +mugetsu2501,184 +mu-pyon,184 +mizuki kotori (yu-gi-oh!),184 +mitarashi anko,184 +mitaka asa,184 +minatsuki alumi,184 +matching hairstyle,184 +macintosh,184 +macaroni hourensou,184 +lvi,184 +kusaba (kusabashiki),184 +konnosuke,184 +koko hekmatyar,184 +kohinata hoshimi,184 +kissuisou uniform,184 +kanoe yuuko,184 +kamenozoki momomo,184 +kamen rider (1st series),184 +japan ground self-defense force,184 +isu,184 +ichinose uruha,184 +ice cream on face,184 +hondaranya,184 +himadera,184 +hikaru (ofuton at5),184 +high-low skirt,184 +hida tatsuo,184 +headboard,184 +hakonnbo,184 +hacka doll 2,184 +golden shower,184 +gogono pan'ya,184 +gendo0032,184 +gary oak,184 +garlic,184 +futaba akane,184 +fushisha o,184 +furutaka kai ni (kancolle),184 +fox child (doitsuken),184 +female slayer (dungeon and fighter),184 +familymart,184 +fainted,184 +eimi (blue archive),184 +ebanoniwa,184 +doghouse,184 +djmax portable,184 +comic girls,184 +checkered wall,184 +bungou stray dogs,184 +brown one-piece swimsuit,184 +bridgeless bra,184 +bobobo-bo bo-bobo,184 +blank page,184 +bamboo fence,184 +bakugou mitsuki,184 +arms on table,184 +aos,184 +ankea (a-ramo-do),184 +anal fisting,184 +amane (dream c club),184 +alracoco,184 +akihiro altland,184 +3 3,184 +yuran (kuen-hien),183 +yuki miku (2018),183 +yoshika (music480069),183 +yewang19,183 +yamada anna,183 +xxxholic,183 +xiangzi box,183 +wet.elephant,183 +vivillon,183 +ugif,183 +toudou chise,183 +torkoal,183 +tearju lunatique,183 +teabag,183 +sylphy (amaburi),183 +suprii,183 +star wars: the force awakens,183 +spoken flying sweatdrops,183 +sideroca (light breeze) (arknights),183 +shiguma rika,183 +sekihara umina,183 +screw in head,183 +scarlet weather rhapsody,183 +sanotsuki,183 +sakura romu,183 +sakine meiko,183 +saionji sekai,183 +saberiii,183 +romancing saga 2,183 +rolling pin,183 +ro-500 (kancolle) (cosplay),183 +raya (uk 0128),183 +rance,183 +poa mellhen,183 +pactio,183 +orihi chihiro,183 +onsen musume,183 +okyou,183 +okita sawa,183 +nuko (mikupantu),183 +none (kameko227),183 +nephlite,183 +nemunemu (candy paddle),183 +nanakusa nazuna (yofukashi no uta),183 +nakabayashi yoshitaka's maid uniform,183 +mudrock (obsidian) (arknights),183 +moupii (hitsuji no ki),183 +meika hime,183 +mega stone,183 +matsuri (teriyaki),183 +manhole,183 +laios thorden,183 +kuwata leon,183 +kurokaze no sora,183 +kochou kanae,183 +knees out of frame,183 +kimagure orange road,183 +kapuru 0410,183 +kana616,183 +kamen rider fourze,183 +kaede johan nouvel,183 +iro ame (amewaagada),183 +internet explorer,183 +holding palette,183 +hol horse,183 +hisakawa chin,183 +hiei (azur lane),183 +heixiu,183 +hatenna,183 +haruba negi,183 +group profile,183 +garoudo (kadouhan'i),183 +gajeel redfox,183 +frfr,183 +excalibur (fate/prototype),183 +enoo,183 +endou masatoshi,183 +e-co,183 +dryad,183 +dollar bill,183 +dhalsim,183 +dakkalot,183 +cruiser,183 +chro,183 +charger,183 +bowieknife,183 +axe r18,183 +aurea juniper,183 +ashes,183 +ash (titanfall 2),183 +^jj^,183 +yuzurizaki nero,182 +washio sumi wa yuusha de aru,182 +vi3r6ein,182 +tyouseki,182 +ttk (kirinottk),182 +toyoman,182 +tetsukuzu tetsuko,182 +tedain,182 +tachibana rei,182 +sunga2usagi,182 +stickam,182 +spicy,182 +soranagi,182 +shirokuma (nankyoku),182 +servant x service,182 +saionji kotoka,182 +rope marks,182 +rope (arknights),182 +rikume,182 +quarter rest,182 +presea combatir,182 +precure all stars,182 +popup,182 +otokamu,182 +orange cat,182 +ohagi1010,182 +nura (oaaaaaa),182 +noyomidx,182 +nobori,182 +name john,182 +naked labcoat,182 +mutang,182 +murata himeko (vermillion knight),182 +mool yueguang,182 +minamoto (mutton),182 +megaman.exe,182 +mass effect,182 +mary cochran,182 +makumaxu,182 +loooyd,182 +litsvn,182 +linmiee,182 +kuranosuke,182 +kujo jotaro (cosplay),182 +kokura masashi,182 +koga koharu,182 +keishin,182 +kazuki-mendou,182 +karatakewari,182 +kamigishi akari,182 +kamen rider faiz,182 +joseph oda,182 +ichiyan,182 +hoshiringo0902,182 +hinaki (hinaki 0102),182 +haru (hiyori-kohal),182 +handjob over clothes,182 +hanamura teruteru,182 +hanamasa ono,182 +hai to gensou no grimgar,182 +gulpin,182 +green innertube,182 +greek cross,182 +floorplan,182 +firefox,182 +firefly,182 +fa no hito,182 +espurr,182 +efuri (riarea00),182 +domo1220,182 +diantha (pokemon),182 +detective pikachu,182 +dazed,182 +daidou (demitasse),182 +cslucaris,182 +cross-laced sandals,182 +cockatiel,182 +chouno ami,182 +cheating (competitive),182 +casul,182 +black bloomers,182 +bellflower,182 +awkward,182 +ash crimson,182 +aotan nishimoto,182 +aokihoshi,182 +amano kozue,182 +albacore (azur lane),182 +aiz wallenstein,182 +against door,182 +adjusting another's hair,182 +a song of ice and fire,182 +ziro (zirorong),181 +yuuki anju,181 +yu 3,181 +yopan danshaku,181 +yana mori,181 +yamaishi (mukoubuti),181 +yamada naoko (hideko1227),181 +yahagi kai ni (kancolle),181 +wilhelmina carmel,181 +weezing,181 +watarabe keiichi,181 +trucy wright,181 +titan (titanfall),181 +tetsuwan birdy,181 +tank destroyer,181 +tanaka ahiru,181 +super sailor mars,181 +striped tank top,181 +spiked thighlet,181 +shiwo (siwosi),181 +shiromoru (yozakura rety),181 +shinia,181 +shigaraki tomura,181 +shida kazuhiro,181 +senjuushi (series),181 +sazanami kai (kancolle),181 +sakura hanpen,181 +sakazakinchan,181 +ryouna (senran kagura),181 +rolua,181 +rillaboom,181 +rie (reverie),181 +ricotta elmar,181 +rebis,181 +psyche3313,181 +pokobee,181 +pet shaming,181 +owl girl,181 +orihara mairu,181 +onion knight,181 +nyonyonba tarou,181 +non (nobu),181 +noise tanker,181 +nishiumi yuuta,181 +niijima sae,181 +neptune (azur lane),181 +neet co.,181 +nanase kureha,181 +morishita naochika,181 +monoku,181 +moe (blue archive),181 +miyuki (miyuki0529),181 +mitsuyo (mituyo324),181 +miito shido,181 +meika mikoto,181 +marui futaba,181 +mahito,181 +lucia (punishing: gray raven),181 +lovely x cation 2,181 +looney tunes,181 +lasso,181 +kurusu kanako,181 +kurono yuzuko,181 +kitajima kaede,181 +kirikirimai (kkm),181 +kerosene heater,181 +katari (ropiropi),181 +karakuri chachamaru,181 +kamo (kamonabe 44),181 +kamen rider 1,181 +kagiyama (clave),181 +jogging,181 +jeanne (bayonetta),181 +jackal boy,181 +ishigaki (kancolle),181 +ihobus,181 +ichirino minagi,181 +human head,181 +hozumi kaoru,181 +holding star,181 +holding plectrum,181 +hip attack,181 +hidden blade,181 +haguruma c,181 +haevest,181 +green robe,181 +globus cruciger,181 +gigi andalusia,181 +fujimaru ritsuka (female) (cosplay),181 +fate/tiger colosseum,181 +eevee (cosplay),181 +eel hat,181 +dobrynya nikitich (fate),181 +dekosuke,181 +december,181 +daram (shappydude),181 +danna (karatekikku),181 +daimaou k,181 +daidou ayumu,181 +count zero,181 +corrin (summer) (fire emblem) (female),181 +corpse party,181 +breakdance,181 +bouncing pecs,181 +bird hair ornament,181 +billy the kid (fate),181 +beige footwear,181 +battle damage,181 +atsumi yoshioka,181 +arisaka mashiro,181 +aoyama motoko,181 +alphinaud leveilleur,181 +alfort (may0508),181 +aku no meshitsukai (vocaloid),181 +akakura,181 +zirba,180 +yoshino norihito,180 +yakui,180 +xenosaga episode i,180 +weedle,180 +waha,180 +w tails cat,180 +un-go,180 +udukikosuke,180 +tsukuda0310,180 +tsukiori,180 +too-ye,180 +tisshu (karutamo),180 +tiger (kemono friends),180 +the simpsons,180 +tank interior,180 +sy4,180 +star vs the forces of evil,180 +sorapoi,180 +skirt around ankles,180 +shindou chihiro,180 +shimashiro itsuki,180 +sheeta,180 +see-through bodysuit,180 +scraggy,180 +sasaki kanna (kaedeko),180 +sansha san'you,180 +sano manjirou,180 +saiki kusuo no psi nan,180 +sabana,180 +rusky,180 +rampart1028,180 +quote (doukutsu monogatari),180 +pop filter,180 +pepatiku,180 +pebble,180 +omelet tomato,180 +nijihashi sora,180 +namazu (yamasonson),180 +multicolored bra,180 +mugichoko (mugi no choko),180 +mozukun43,180 +moon rabbit,180 +mega man volnutt,180 +marijuana,180 +mahcdai,180 +lunar tear,180 +linkle,180 +lineage 2,180 +liliruca arde,180 +laegjarn (fire emblem),180 +koyoi mitsuki,180 +konjiki no gash!!,180 +kokujuuji,180 +kita hinako,180 +kiroranke,180 +kimura shuuichi,180 +kenoka,180 +kazuboh,180 +kazama akira,180 +kasa list,180 +kamen rider blade,180 +kairaku historie,180 +kagiyama shachou,180 +jurge,180 +isemagu,180 +initial d,180 +hunie (series),180 +hinomaru (futagun),180 +hikashima (shiofune),180 +higuchi isami,180 +high school dxd hero,180 +heran hei mao,180 +helena douglas,180 +hekomii,180 +hector (fate),180 +harukawa moe (style),180 +hama (22ji kara 24ji),180 +graves (league of legends),180 +goshoguruma,180 +ghost hair ornament,180 +galbany (tsgororin),180 +furuyama itaru,180 +food-themed background,180 +fate/empire of dirt,180 +eruru (erl),180 +elincia ridell crimea,180 +edging underwear,180 +dodomori,180 +dandelion seed,180 +daeho cha,180 +d-floe,180 +colosseum,180 +clover theater,180 +cheshire cat (alice in wonderland) (cosplay),180 +cheetah ears,180 +brown suit,180 +blair (soul eater),180 +blaidd the half-wolf,180 +bee doushi,180 +band shirt,180 +ayyk92,180 +asics,180 +antonio salieri (second ascension) (fate),180 +angeldust,180 +akashi kuniyuki,180 +aikei ake,180 +ai arctic warfare,180 +aardwolf print,180 +6koma,180 +zelos wilder,179 +zelitto,179 +youngster (pokemon),179 +yorha type p no. 2,179 +yoneyama mai,179 +yabai gorilla,179 +winter schnee,179 +waffle cone,179 +vyugen,179 +viola (pokemon),179 +unicorn girl,179 +uni (melm),179 +twitter-san (character),179 +tsunashi takuto,179 +tsukumizu yuu,179 +tsukamoto tenma,179 +tornado,179 +too many books,179 +thupoppo,179 +tera hiroshi,179 +tankard,179 +tanasuke,179 +taihou (sweet time after school) (azur lane),179 +suga hideo,179 +stellated octahedron,179 +stank,179 +southern ocean war princess,179 +single-lens reflex camera,179 +shinmai fukei kiruko-san,179 +shingen seiji,179 +shichinose,179 +sha2mo,179 +seo yuzuki,179 +senmura,179 +seaport water oni,179 +sakurajima saromako,179 +sakuna-hime,179 +sai (naruto),179 +ryou@ryou,179 +royal,179 +river city girls,179 +ririkuto,179 +repairing,179 +reika (clovia studio),179 +red male swimwear,179 +prydwen (fate),179 +professor layton,179 +print innertube,179 +pita ten,179 +piko piko hammer,179 +phantasy star universe,179 +petagon,179 +paw hair ornament,179 +otogi-juushi akazukin,179 +on food,179 +okuma mai,179 +oda nobunaga (swimsuit berserker) (second ascension) (fate),179 +obi-wan kenobi,179 +nhaliz,179 +nami nami (belphegor-5812),179 +nah (fire emblem),179 +mutio,179 +morgan (fire emblem) (male),179 +mitsukai dokuro,179 +misato (princess connect!),179 +minneapolis (azur lane),179 +marie (persona 4),179 +lying on another,179 +leslyzerosix,179 +lal'c mellk mal,179 +kukuri (mawaru),179 +kiryuuin satsuki (cosplay),179 +keishi surota,179 +kamen rider black (series),179 +kamameshi gougoumaru,179 +kai yuuki,179 +k0ng,179 +jujutsu tech uniform,179 +johan andersen,179 +iris yuma,179 +inusaka,179 +inou shin,179 +hyuuga neji,179 +hsuliherng,179 +helvetica std,179 +hassen (8cm),179 +hardboiled egg,179 +gb (doubleleaf),179 +fujimaru ritsuka (female) (royal brand),179 +fried egg on toast,179 +foregrip,179 +flower braid,179 +fary5,179 +fairy (jintai),179 +f-15 eagle,179 +eringi (rmrafrn),179 +ekra,179 +drednaw,179 +dorua (dollar),179 +daki (kimetsu no yaiba),179 +dain,179 +constantine xi (fate),179 +captain america: the winter soldier,179 +bt-42,179 +bra (dragon ball),179 +besmiled,179 +ayaki,179 +atelier firis,179 +assassin's creed,179 +asou (asabu202),179 +arisugawa otome,179 +anpanman,179 +alto seneka,179 +alternate element,179 +akaza akane,179 +zhen lu,178 +zangoose,178 +yunioshi,178 +youmou usagi,178 +wakaouji ichigo,178 +vania600,178 +umekko,178 +ueda masashi (style),178 +towel on legs,178 +tosa (hometown zest) (azur lane),178 +torn ribbon,178 +tomato (lsj44867),178 +terra (kingdom hearts),178 +tentacruel,178 +tenkuu sphere,178 +tauros,178 +takumi (rozen garten),178 +suishougensou,178 +spell,178 +soviet flag,178 +silver bell,178 +sigurd hosenfeld,178 +shiki (shiki1230),178 +sakuremi,178 +rimo,178 +reversed,178 +rem (re:zero) (cosplay),178 +ray-akila,178 +quna (pso2),178 +ppsh-41,178 +pleading face emoji,178 +peach hair ornament,178 +p-nekor,178 +ozawa akifumi,178 +otoshidama,178 +oteruko (wanabeee),178 +oono tetsuya,178 +obaoba (monkeyix),178 +nove (nanoha),178 +normad,178 +noeomi,178 +no navel,178 +nita (onakatohoppe),178 +nemuri nemu,178 +nami (league of legends),178 +mr. game & watch,178 +mouse on head,178 +mochii,178 +milli little,178 +meiji (meiji770),178 +master dojo uniform,178 +magenta (atyana),178 +lilith (yamibou),178 +kyouran kazoku nikki,178 +kurochijo,178 +kurobuta gekkan,178 +knights of the round table (fate),178 +kitajima sara,178 +kernel killer,178 +keiran (ryo170),178 +kandanchi,178 +kamu (kamuuei),178 +kamikaze kaitou jeanne,178 +inari (inariya),178 +imai midori,178 +hiyama kiyoteru,178 +hisaishi kanade,178 +himeyuri sango,178 +himeyuri ruri,178 +hatch,178 +halkawa501,178 +haiki (tegusu),178 +graf zeppelin (beachside urd) (azur lane),178 +geo (yukishitadou),178 +gamuo,178 +fuwa kokone,178 +finger to tongue,178 +fadingz,178 +ember (selen tatsuki),178 +ebi frion (natsuiro matsuri),178 +drinking pee,178 +doberman,178 +dishwashing,178 +chunithm,178 +chinese lantern (plant),178 +chinatsu (blue archive),178 +charamells,178 +character hood,178 +bowling ball,178 +bokkun (doyagaobyo),178 +bnw (umamusume),178 +bioshock infinite,178 +artoria pendragon (alter swimsuit rider) (first ascension) (fate),178 +alligator,178 +akehi yuki,178 +5danny1206,178 +3d glasses,178 +0-den,178 +#compass,178 +yukirin,177 +yukin (es),177 +yukiko (tesseract),177 +yuki miku (2012),177 +usugiri bacon,177 +usaka ray,177 +umedama nabu,177 +tsukikage nemu,177 +trung trac (fate),177 +transformers animated,177 +toujou (toujou ramen),177 +tenshinhan,177 +spirit blossom ahri,177 +spiked helmet,177 +spiked gloves,177 +solosis,177 +sima naoteng,177 +shiota nagisa,177 +shiino sera,177 +segamark,177 +ryu-akt,177 +rindou (awoshakushi),177 +right-hand drive,177 +reiesu (reis),177 +popo (ice climber),177 +ponkotsu,177 +pinwheel hair ornament,177 +pikachu ears,177 +owafu,177 +ookami maito,177 +natsuki marina,177 +nasa logo,177 +murota yuuhei,177 +montemasa,177 +momoiro lettuce,177 +mole (animal),177 +minette (skullgirls),177 +mimic,177 +mayo (miyusa),177 +matasabu,177 +maple (bofuri),177 +madotsukumo,177 +lots of laugh (vocaloid),177 +lingshalan,177 +kyurem,177 +kurokoori,177 +kougami shin'ya,177 +koryuu (gackter10),177 +kogaku kazuya,177 +kishizuka kenji,177 +kishitani shinra,177 +kimura (ykimu),177 +kaoru (gensou yuugen-an),177 +kakitama,177 +kakiko210,177 +julius caesar (fate),177 +joshua (shisanli934),177 +jin akhr,177 +jessica (granblue fantasy),177 +jane mere,177 +jack-o'-lantern ornament,177 +ikkitousen great guardians,177 +ichimegasa,177 +ice pack,177 +i.f.s.f,177 +holding flask,177 +go (board game),177 +gm (mobile suit),177 +ghost sweeper mikami,177 +genya (genya67),177 +gentoku,177 +gaara (naruto),177 +fur-trimmed mittens,177 +floral arch,177 +ekko (ejami),177 +edoya inuhachi,177 +dusk (everything is a miracle) (arknights),177 +double vaginal,177 +dokuro deluxe,177 +demitri maximoff,177 +dead or alive 6,177 +darabuchi,177 +dana zane,177 +cutie honey (character),177 +cure star,177 +cisyo,177 +chen bin,177 +carbuncle (final fantasy),177 +broad shoulders,177 +azuma kiyohiko,177 +awai880,177 +asymmetrical breasts,177 +asada sadao,177 +aogiri penta,177 +anthony (madoka magica),177 +altair ibn la-ahad,177 +aki (sangetusei),177 +zbrush (medium),176 +z282g,176 +yuruto,176 +yunimaru,176 +yoshida seiji,176 +yajuu senpai,176 +wingheart,176 +udppagen,176 +tsukumihara academy uniform (fate/extra),176 +tripdancer,176 +toushi ryoku,176 +toruneko,176 +tomozo kaoru,176 +tomo (tmtm mf mf),176 +toba hiyoko,176 +the legend of zelda: spirit tracks,176 +tengxiang lingnai,176 +tejina senpai,176 +takimoto yukari,176 +tail pull,176 +sumi keiichi,176 +stuffed orca,176 +stenciled rose,176 +spoken animal,176 +size comparison,176 +silvally,176 +shiramine rika,176 +shaymin (sky),176 +shaojiang,176 +seviper,176 +sawati,176 +sakkan,176 +roura,176 +red (among us),176 +raticate,176 +rashinban musume,176 +rakeru (dokidoki! precure),176 +putama,176 +poison ivy,176 +pin.s,176 +persona 4: dancing all night,176 +ox girl,176 +overwatch 2,176 +orihara kururi,176 +orange heart,176 +orange butterfly,176 +onix,176 +one finger selfie challenge (meme),176 +on flower,176 +nyororiso (muyaa),176 +nozomi fuuten,176 +nipple tag,176 +netnavi,176 +natsuki kruger,176 +nanaume (shichimi tougarashi),176 +nagato (great fox's respite) (azur lane),176 +mystical high collar,176 +morigami (morigami no yashiro),176 +mizuno mumomo,176 +mizuki nana,176 +miyoshino,176 +miyamoto musashi (swimsuit berserker) (first ascension) (fate),176 +miy@,176 +mi (pic52pic),176 +mesousa,176 +memento vivi,176 +maria (umineko),176 +maosame,176 +makochin,176 +madan no ou to vanadis,176 +lurantis,176 +lin (breath of fire),176 +lavender (flower),176 +kurorichin,176 +kuroo tetsurou,176 +kure-nai,176 +konakona,176 +kobe shinbun,176 +kizaki ren,176 +kinokohime,176 +kazamatsuri fuuka,176 +kayahara,176 +kawanakajima,176 +kaonashi,176 +kanijiru,176 +kani aruki (bucket crawl),176 +kaneko (novram58),176 +kanade izuru,176 +kamimiya,176 +kagami chihiro,176 +jinno megumi,176 +izetta,176 +isekai joucho,176 +inkbrush (splatoon),176 +imaichi moenai ko,176 +ikuno (darling in the franxx),176 +hua-j,176 +hoshiguma (patrolling ronin) (arknights),176 +holding butterfly net,176 +hmage,176 +hitotsubashi yurie,176 +hirosato,176 +hinemosu notari,176 +hikobae,176 +hibari kyouya,176 +hazmat suit,176 +haneramu,176 +gundam msv,176 +grey pubic hair,176 +grasshopper,176 +gotcha!,176 +giant armadillo (kemono friends),176 +gattai,176 +fuyube rion,176 +fudou akira,176 +fruit bowl,176 +friday night funkin',176 +fie claussell,176 +fantasista doll,176 +extra pupils,176 +euphemism,176 +erufuda-san,176 +empty picture frame,176 +doku corne,176 +comfey,176 +colorful palette,176 +clitoris tweak,176 +catfish,176 +burmecian,176 +buran (kure),176 +bunny and fox world,176 +body horror,176 +black bandana,176 +bbbb fex,176 +bachera,176 +australian devil (kemono friends),176 +asahi kuroi,176 +artem wing (tears of themis),176 +anus cutout,176 +ankle bell,176 +zone of the enders 2,175 +yuzu-soft,175 +yuki usagi (mofurafu),175 +yoshiwa tomo,175 +yellowpaint.,175 +yamcha (cocololi),175 +wenny02,175 +vanity table,175 +ukami,175 +tsuttsu,175 +toucailao,175 +teleport,175 +taiwan,175 +star ocean first departure,175 +snot trail,175 +sino42,175 +silky anteater (kemono friends),175 +shirt down,175 +shirafuji kyouko,175 +senran kagura estival versus,175 +sea turtle,175 +sakuraba rola,175 +sakura musubi,175 +sabet (young ouo),175 +ryouzou,175 +rodney (azur lane),175 +remington arms,175 +raphael kirsten,175 +power strip,175 +phoenix wright: ace attorney - spirit of justice,175 +penpen,175 +pee in container,175 +paras,175 +paper chain,175 +panzerfaust,175 +othinus,175 +orca girl,175 +opening,175 +onineko,175 +ohako (ohako1818),175 +naomi (fantasia),175 +nanahyakuichi middle school uniform,175 +naked raincoat,175 +nakamura rohane,175 +nakajima atsuko,175 +muunyan (yumenekoya),175 +mutyakai,175 +moru (monaka),175 +morioka itari,175 +monster hunter x,175 +monosenbei,175 +momoko (palemon),175 +miya utsutsu,175 +misooden,175 +miaogujun,175 +mashio,175 +march,175 +makaria,175 +lucia (scott malin),175 +latooni subota,175 +kuroba u,175 +kuriyuzu kuryuu,175 +kokutan kitsunen,175 +kohakope,175 +kodama (wa-ka-me),175 +ko kita,175 +kiriko (araragikoyomi),175 +kikimifukuri,175 +kido jou,175 +kazuha's friend (genshin impact),175 +kanten,175 +kanaria hisagi,175 +k jie,175 +k+,175 +jewel (the black canvas),175 +jammers,175 +ishikawa hideki,175 +husky,175 +hurricane glass,175 +houmitsu,175 +hobble,175 +hinazuki ririna,175 +hand on own wrist,175 +hamayumiba sou,175 +gya (144),175 +guilty dragon,175 +grimlight,175 +green outline,175 +grand sphere,175 +gourry gabriev,175 +fuwa fuwa dog,175 +fuuka reventon,175 +flora (fire emblem),175 +fixed,175 +en pointe,175 +crest worm,175 +cofffee,175 +cleo everlastin,175 +character watermark,175 +carrot pillow,175 +cargo shorts,175 +carbon12th,175 +calyrex,175 +beat angel escalayer,175 +bai (granblue fantasy),175 +ayakaze ryuushou,175 +armillary sphere,175 +amaguchi chiyoko,175 +aisutabetao,175 +100 percent orange juice,175 +013 (hamsasuke),175 +yotarou (aoki hagane no arpeggio),174 +yokoyama naoki,174 +yellow innertube,174 +yasumo (kuusouorbital),174 +white chocolate,174 +voicevox,174 +vittorio veneto (warship girls r),174 +uru (uru0000),174 +tying apron,174 +tsuta no ha,174 +trung nhi (fate),174 +tota (sizukurubiks),174 +tomidoron,174 +the lego group,174 +thanabis,174 +taya oco,174 +tapisuke,174 +tamura hiro,174 +takeya masami,174 +t-ray,174 +swimsuit tug,174 +super cub,174 +starfox1015,174 +stake,174 +sphinx (toaru majutsu no index),174 +soft focus,174 +sirills,174 +shinonome akito,174 +semi-transparent,174 +sango,174 +salmonid,174 +sakura moyon,174 +sakishima hikari,174 +rondo bell,174 +roadhog (overwatch),174 +reality arc (sinoalice),174 +ranger (ragnarok online),174 +quimbaya airplane,174 +queen of spades symbol,174 +priapus a. tarou,174 +popoman,174 +pinch (nesume),174 +pechika,174 +ooyun,174 +on rock,174 +nyan koi!,174 +nu-class light aircraft carrier,174 +nose picking,174 +ninamo,174 +nima (niru54),174 +neko to chiyo,174 +nakayama yukiji,174 +myoudou gakuen high school uniform,174 +munakata kyousuke,174 +multiple moons,174 +moose tail,174 +mochizuki mochi,174 +mochinue,174 +miyoshi karin,174 +mirukurim,174 +mimi-chan,174 +mikoma sanagi,174 +medium request,174 +makoto (summer) (princess connect!),174 +maka (morphine),174 +majo shuukai de aimashou,174 +magic school uniform,174 +mafia kajita,174 +lieqi hun,174 +liepard,174 +kusukusu,174 +kushida you,174 +kunugigaoka middle school uniform,174 +kimetsu gakuen,174 +kimarin,174 +kenny mccormick,174 +kelvin-trainerk,174 +karunabaru,174 +kannon ouji,174 +kanno izuka,174 +kanikou,174 +kai (link2262),174 +justin (grandia),174 +julia chang,174 +izuoku,174 +iroha (iroha matsurika),174 +ichinose777,174 +hoshino kagari,174 +ho-class light cruiser,174 +hanamaru youchien,174 +halter shirt,174 +hakodate omiko,174 +green brooch,174 +ga320aaa,174 +fujy,174 +felt (re:zero),174 +faye (fire emblem),174 +fallout 4,174 +eternity sword series,174 +esper nyanko,174 +eighth rest,174 +dr. eggman,174 +display case,174 +display,174 +comiket 95,174 +comic unreal,174 +color timer,174 +cleffa,174 +choyeon,174 +candy bar,174 +breaking bad,174 +borushichi,174 +arpeggio kaga,174 +aqua scrunchie,174 +animedia,174 +anagumasan,174 +akatsuki miho,174 +advance wars,174 +zenno rob roy (umamusume),173 +yuuhi (arcadia),173 +yunsang,173 +yuko (uc yuk),173 +yukari (yukari21653710),173 +yoshimi (blue archive),173 +yokoyoko (nazonazo),173 +yellow overalls,173 +yamauchi (conan-comy),173 +yamato (sword),173 +yakin byoutou,173 +voltorb,173 +uraichishi,173 +tsuritama,173 +tsubasa ryuuji,173 +tsana (lansane),173 +tonotyama,173 +tomoya kankurou,173 +tenkawa nayuta,173 +takao (school romanza) (azur lane),173 +tadano (toriaezu na page),173 +tabi (tabisumika),173 +suzuki masaru,173 +sunflower print,173 +stepladder,173 +standing missionary,173 +spaghe,173 +skadi the corrupting heart (sublimation) (arknights),173 +shizuru viola,173 +shishinon,173 +shisantian,173 +shining (silent night) (arknights),173 +shi chimi,173 +shenmue,173 +senhappyaku,173 +scuba gear,173 +sankarea,173 +samuimu,173 +sakana (flame sakana),173 +sagemaru-br,173 +sag (karehabase),173 +ryuushou,173 +ryofu housen,173 +ribbon earrings,173 +ribbon bra,173 +reeds,173 +ratsuku kinoko,173 +r1,173 +print pillow,173 +print leotard,173 +polka dot scarf,173 +polar opposites,173 +pants under shorts,173 +pancham,173 +outer glow,173 +ototsu kei,173 +on moon,173 +oharu-chan,173 +ocean bottom,173 +nyatabe,173 +new submarine princess,173 +nakagawa kanon,173 +nagomi yui,173 +mushroom cloud,173 +muse (rainforest),173 +metal gear solid peace walker,173 +meola,173 +marching band baton,173 +manoji,173 +mankanshoku sukuyo,173 +makinami (kancolle),173 +mahou shoujo madoka magica plus,173 +lynchis,173 +letdie1414,173 +leila malcal,173 +lancelot (granblue fantasy),173 +kusakanmuri,173 +kurokawa chiaki,173 +konmori (kinbou sokai),173 +kitazawa hagumi,173 +kinoshita hinata,173 +kanda done,173 +kamaboko red,173 +kakyouin chieri,173 +kaho okashii,173 +jaye (arknights),173 +hitsuji bako,173 +hiro (pqtks113),173 +hermes (kino no tabi),173 +hara kenshi,173 +hanzaki jirou,173 +hand around neck,173 +gujianshaonu,173 +grey robe,173 +gongba laoge,173 +gine,173 +fujimaki (angel beats!),173 +firolian,173 +fender,173 +enjoy mix,173 +elemental master (elsword),173 +dvd case,173 +dolce (dolsuke),173 +dohna dohna issho ni warui koto o shiyou,173 +danganronpa/zero,173 +clarice (idolmaster),173 +choice,173 +chipmunk,173 +cheetah print,173 +bsapricot (vtuber),173 +blastbeat,173 +betabeet,173 +bandages over eyes,173 +atsutoku,173 +asimo953,173 +ashe (overwatch),173 +artstation username,173 +aokura shou,173 +ankle scrunchie,173 +alternate sleeve length,173 +akuma (st.takuma),173 +akechi kokoro,173 +zeraora,172 +yuuki makoto (radiant),172 +yuu (1007yu),172 +yukiharu,172 +yoshino ryou,172 +yayoi sakura,172 +wryyyyyyyyyyyyyyyyyyyy,172 +won (az hybrid),172 +whoisshe,172 +weapon connection,172 +wazakita,172 +wardrobe,172 +velouria (fire emblem),172 +tsu-class light cruiser,172 +tree of savior,172 +tousaki shiina,172 +tortoise,172 +suwaneko,172 +suke (momijigari),172 +sugiyama genshou,172 +stuffed whale,172 +spoken number,172 +soleil (fire emblem),172 +skarmory,172 +simosi,172 +silfa (to heart),172 +shuuzo3,172 +shuttlecock,172 +segawa onpu,172 +sakino asuka,172 +sakikumo (sakumo),172 +sagamimok,172 +sachiel,172 +ryuk,172 +ryoubi (senran kagura),172 +rurudo lion,172 +rugby uniform,172 +romulus quirinus (fate),172 +rikiddo (tise 00),172 +pump action,172 +print cape,172 +prehensile tongue,172 +poppi alpha (xenoblade),172 +ponpon,172 +philomelalilium,172 +phantump,172 +petilil,172 +paloma piquet,172 +palette suit,172 +outstretched legs,172 +orange pubic hair,172 +onii-chan dakedo ai sae areba kankeinai yo ne,172 +oimo (imoyoukan),172 +nyaa (nnekoron),172 +nishiyama (whatsoy),172 +nishi iori,172 +nightshirt,172 +neneru,172 +namiorii,172 +namco x capcom,172 +nakamura eight,172 +nagase kaede,172 +mugi (banban53),172 +mugen senshi valis,172 +mirisha,172 +miriam (bloodstained),172 +massachusetts (dressed to impress) (azur lane),172 +maremay0513,172 +maple (nekopara),172 +maguro (mawaru sushi),172 +lute (fire emblem),172 +loading (verjuice),172 +light beam,172 +kubyou azami,172 +kojima emi,172 +kishido temma,172 +kimura shiki,172 +kiana kaslana (void drifter),172 +katwo,172 +katori (mocchidou),172 +kama (weapon),172 +kabaji,172 +jouzaburou (joe3),172 +jima,172 +jiggly girls,172 +ifuji sakura,172 +huxiao (mistlakefront),172 +huang (granblue fantasy),172 +holding cloth,172 +hmdark-9,172 +hand to own face,172 +halfling,172 +h2 (h20000000),172 +gwendolyn tennyson,172 +futaba rio,172 +futaba riho,172 +fushiguro touji,172 +eris (asobi ni iku yo!),172 +earth federation,172 +donedone,172 +delibird,172 +comic rin,172 +chloe lemaire,172 +cherry earrings,172 +cerberus,172 +burp,172 +bunji,172 +bronya zaychik (haxxor bunny),172 +blu-ray cover,172 +blacksmith (ragnarok online),172 +black sun,172 +bikkuriman,172 +beeeeen,172 +beautifly,172 +bb (bb shot!) (fate),172 +artoria caster (third ascension) (fate),172 +amagi kai (kancolle),172 +alpha (alpha91),172 +akasode (tyaramu),172 +akano murasaki,172 +afterburner,172 +2l (2lsize),172 +21yc (september breeze),172 +zifu,171 +wilderness bandit risty,171 +watagashi yui,171 +wario land,171 +waraji,171 +virtual reality,171 +viroa,171 +vilor,171 +vertical-striped vest,171 +unicorn (the gift of spring) (azur lane),171 +tsuzumi,171 +tsuina,171 +tosyeo,171 +tom-neko (zamudo akiyuki),171 +ticcy,171 +temachii,171 +takom,171 +tai0201,171 +sushoyushi,171 +sunakumo,171 +soborou,171 +sitrus berry,171 +silent princess,171 +sig mcx (girls' frontline),171 +shuuji (shumi),171 +shironeko sanbou,171 +shirai yuyu,171 +shimotsuki eight,171 +shika (s1ka),171 +sheska xue,171 +serval (kemono friends) (cosplay),171 +sarasadou dan,171 +sangatsu no lion,171 +samus aran (cosplay),171 +samurai champloo,171 +sakuya (sister princess),171 +sailor senshi costume,171 +ripe.c,171 +ribbon bangs,171 +rabbit (tukenitian),171 +papa (shimeguru),171 +open box,171 +non (wednesday-classic),171 +nandemo iu koto wo kiite kureru akane-chan (voiceroid),171 +naked cardigan,171 +mystic square,171 +musubi,171 +modeseven,171 +mochizuki hijiri,171 +mk 18 carbine,171 +mizuno asami,171 +mewkledreamy,171 +merchant (ragnarok online),171 +matsumoto rise,171 +march ab,171 +manaka hitomi,171 +mana kakkowarai,171 +makura (y makura),171 +magolor,171 +mad hatter (alice in wonderland) (cosplay),171 +lamb,171 +la pucelle (mahoiku),171 +kusaka kokage,171 +kurokami medaka,171 +kon manatsu,171 +kinbakuman,171 +kimidake,171 +kazama jin,171 +kanzaki h. aria,171 +kabiinyo (kab),171 +jessie (pokemon) (cosplay),171 +holding pocket watch,171 +hinomoto hikari,171 +hinamatsuri touko,171 +hatchet (axe),171 +grandia bing,171 +graham aker,171 +googly eyes,171 +eucliwood hellscythe,171 +etou (cherry7),171 +emlan,171 +emirio (emirio110),171 +emilico (shadows house),171 +elisia valfelto,171 +elina (captain of the royal guard),171 +dorothy haze,171 +dlsite.com,171 +deformed,171 +clockshow,171 +chibita,171 +catsmoon,171 +blindfold removed,171 +bed invitation,171 +bebe (bebe pp),171 +b.a.b.e.l. uniform,171 +azusawa kohane,171 +asyde,171 +aqua pants,171 +aoba (akibajun),171 +amnesia (idea factory),171 +amane sora,171 +amakura mayu,171 +aladdin (disney),171 +ace,171 +8-gou (mechanist08),171 +zuttokodomo,170 +yuki (yukin0128),170 +yuchi (salmon-1000),170 +yanyan (shinken gomi),170 +winnie the pooh,170 +wengwengchim,170 +vivy: fluorite eye's song,170 +usami mizuki,170 +uchino kazuhisa,170 +tsukimi eiko,170 +tobacco (tabakokobata),170 +tatsumiya mana,170 +suo sango,170 +suihei sen,170 +sprout,170 +splat charger (splatoon),170 +sonoda ken'ichi,170 +sirataki umauma,170 +simulated footjob,170 +sigui (queen's blade),170 +shimatori (sanyyyy),170 +she-ra and the princesses of power,170 +seihou,170 +seaplane tender princess,170 +scaffolding,170 +satogo,170 +sato-pon,170 +sagiri (kancolle) (cosplay),170 +sagara sousuke,170 +rurudo,170 +rottytops,170 +rhydon,170 +renta (deja-vu),170 +ranyu,170 +pote-mm,170 +polka dot apron,170 +pirotess,170 +pierre norano,170 +otonashi kiruko,170 +otemoto (baaaaloooo),170 +orange bag,170 +oota youjo,170 +note-chan,170 +nolia,170 +nishiki koi,170 +nijimoto hirok,170 +nepolabo,170 +natsuki (natukituki),170 +mullmull02,170 +mochizuki ado,170 +misuguu,170 +megumin (cosplay),170 +maruchi,170 +manle,170 +manaka non,170 +maiii (smaii i),170 +magical mirai miku (2017),170 +m249,170 +lu xueqi (zhu xian),170 +loki (marvel),170 +loki (fire emblem),170 +kurokawa makoto,170 +kurokawa izumi,170 +kuroganeruto,170 +koutarou (girl power),170 +kou (granblue fantasy),170 +kisuke (akutamu),170 +kishibe taiga,170 +kikuichi monji,170 +keepout,170 +kaya rio,170 +katsuoboshi,170 +kamishiro rui,170 +kamen rider kiva,170 +kafun,170 +jungon kim,170 +jiyu2,170 +jin musou,170 +jack atlas,170 +iwamushi,170 +illustrious (maiden lily's radiance) (azur lane),170 +ico (character),170 +hoshikawa mafuyu,170 +holding sponge,170 +hiruno,170 +hirano aya,170 +hero (omori),170 +hat basket,170 +hasegawa langa,170 +harmonist11,170 +green feathers,170 +gou lianlian dogface,170 +gotou junji,170 +gotland andra (kancolle),170 +g yuusuke,170 +fukada ichika,170 +five star stories,170 +falling star,170 +eyewear lift,170 +eden's zero,170 +death the kid,170 +darren shan,170 +da-cart,170 +cunnilingus gesture,170 +chiba kirino,170 +calin,170 +buthikireta,170 +bolze,170 +battle spirits: shounen toppa bashin,170 +ayuteisyoku,170 +asaki takayuki,170 +ariyoshi gen,170 +arche klein,170 +aono (f i s),170 +amos' bow (genshin impact),170 +akashi (live a hero),170 +aizawa eiko,170 +aihara enju,170 +agemono,170 +agarest senki,170 +aerisdies,170 +zygarde,169 +zukky000,169 +yukiunag1,169 +yor briar (cosplay),169 +yoga mat,169 +xuhuai (the legend of luoxiaohei),169 +wonder festival 2007,169 +wet male underwear,169 +vice (kof),169 +ushiromiya rudolf,169 +twirl baton,169 +tussy,169 +troll (homestuck),169 +toono minagi,169 +suntail,169 +stielhandgranate,169 +snow is,169 +snow fox,169 +slot machine,169 +slime boy,169 +shiroko (reku),169 +shimomoto,169 +shera l. greenwood,169 +shantae: half-genie hero,169 +sexy no jutsu,169 +seitei (04seitei),169 +sarena,169 +s zenith lee,169 +rikoma,169 +richelieu (fleuron of the waves) (azur lane),169 +raburebo,169 +purple tabard,169 +pompmaker1,169 +penis sheath,169 +oni costume,169 +ok-ray,169 +oerba dia vanille,169 +october,169 +noelle silva,169 +nidoran,169 +narutaru,169 +nanaka mai,169 +muloli,169 +mosquito,169 +micha jawkan,169 +meloetta,169 +master artoria,169 +mankanshoku matarou,169 +makako (yume bouei shoujo tai),169 +littorio (azur lane),169 +lion costume,169 +leather choker,169 +le temeraire (azur lane),169 +kusana (dudqja602),169 +konparu nozomi,169 +kirahoshi ciel,169 +kanbayashi makoto,169 +kakao rantan,169 +jing ke (fate),169 +jibaku shounen hanako-kun,169 +itou korosuke,169 +ise (0425),169 +inubashiri momiji (wolf),169 +ikuhana niiro,169 +houchou toushirou,169 +holding cookie,169 +hatsuse izuna,169 +haruno shiobana,169 +hakuoro,169 +habetrot (fate),169 +gunnthra (fire emblem),169 +ghostbusters,169 +fukunaga kazuhiro,169 +foot pussy,169 +egk513,169 +denaseey,169 +culture japan,169 +cream lemon,169 +collared capelet,169 +cigarette butt,169 +christie (doa),169 +chiro,169 +chiri (ch!),169 +chichibu (watson),169 +buratei marii,169 +buizel,169 +broom surfing,169 +bluekomadori,169 +black jack (series),169 +big bad wolf (cosplay),169 +berserker armor,169 +azuma yukihiko,169 +ayane (blue archive),169 +artpatient,169 +arrest,169 +a:n (angrynum),169 +zundamon,168 +zakuro (rariatto),168 +yuugumo kai ni (kancolle),168 +yukia (firstaid0),168 +yellow sarong,168 +yamato kai ni (kancolle),168 +wataru (nextlevel),168 +venom snake,168 +two-sided headwear,168 +tricycle,168 +tororo,168 +tokoyami fumikage,168 +taguchi kenji (omaep),168 +suzunosuke (sagula),168 +sunsun69,168 +sunahara wataru,168 +suiroh (shideoukami),168 +stigma1101,168 +stephanie dora,168 +star741,168 +sig sauer 556,168 +siberian tiger (kemono friends),168 +shiromiya mimi,168 +sharuru (dokidoki! precure),168 +shameimaru aya (crow),168 +seahorse,168 +sawamura daichi,168 +saotome haruna,168 +sakura koharu,168 +removing glove,168 +qbz-95,168 +princess (7th dragon),168 +plank,168 +pf,168 +panther ears,168 +panpa,168 +oshioki sweetie,168 +osamu (jagabata),168 +onion (lemlaml),168 +onedoo,168 +on stool,168 +omanyte,168 +offbeat,168 +object head,168 +nonbire,168 +noi mine,168 +nekonyan,168 +needless,168 +nasuna,168 +musunde hiraite rasetsu to mukuro (vocaloid),168 +moriya shrine,168 +momogaa (girls und panzer),168 +moe2017,168 +mizutame tori,168 +missing teeth,168 +mins (minevi),168 +milfa (to heart),168 +matsumoto sarina,168 +masamune (ff7),168 +marion-ville,168 +mandricardo (fate),168 +maki oze,168 +link (cosplay),168 +lid,168 +kurosuke (nora),168 +kirby d a,168 +kimi ni todoke,168 +kecleon,168 +juuni taisen,168 +io (princess connect!),168 +instant ip,168 +imai kana,168 +ijimeka (meme),168 +ichimedoo,168 +hozuki kaede,168 +hisui (cookie),168 +hinata (user rjkt4745),168 +haru (nature life),168 +gypsy,168 +game & watch,168 +galarian zigzagoon,168 +fuyouchu,168 +fishnet fabric,168 +etan14,168 +encasement,168 +dress aside,168 +dewgong,168 +dark sky,168 +code: nemesis (elsword),168 +chibikko (morihito),168 +bottle cap,168 +bearhug,168 +axe kick,168 +athena (series),168 +arima senka,168 +andou you,168 +alice margatroid (cosplay),168 +alber,168 +akito (sub707),168 +akieda,168 +98se-tan,168 +yoshida inuhito,167 +yajiro masaru,167 +victreebel,167 +umezawa itte,167 +tun,167 +trust me,167 +trinity seven,167 +toufu mentaru zabuton,167 +tono (rt0no),167 +tokki,167 +tendo teru,167 +sword art online the movie: ordinal scale,167 +swaying,167 +suzushiro yukari,167 +sukebewe,167 +star trail,167 +shitappa,167 +shiroshouzoku,167 +shiraishi an,167 +shinn asuka,167 +shikinami kai ni (kancolle),167 +shigemitsu jun,167 +satou rin (gobugabuge),167 +salama (amaburi),167 +saegusa kii,167 +rubi-sama,167 +roe,167 +rimururu,167 +rateratte,167 +ramiya ryou,167 +quadruple wielding,167 +pringles,167 +pointing gun,167 +pink mittens,167 +picpicgram,167 +patchouli knowledge (cosplay),167 +owa (ishtail),167 +outseal,167 +nekosugi (hoshi),167 +nekomugiharu,167 +neeko (league of legends),167 +natsuki (silent selena),167 +mutsuba fumi,167 +masaki (machisora),167 +mahou shoujo (raita),167 +maa (forsythia1729),167 +liu liaoliao,167 +lily (gentsuki),167 +kurowana,167 +kuroi nanako,167 +komi shuuko,167 +kizuna ai (a.i. games),167 +kitano yuusuke,167 +kisaragi mizu,167 +kiryuuin aoi,167 +katsuragi niya,167 +kamyu,167 +k.ty (amejin),167 +junior27016,167 +jinako carigiri,167 +jean-jacques leroy,167 +is ii,167 +ingrid (taimanin murasaki),167 +imigimuru,167 +ikue fuuji,167 +human village (touhou),167 +holding riding crop,167 +healther,167 +harukanaru toki no naka de,167 +hamashima shigeo,167 +hakubi washuu,167 +gum (gmng),167 +gomi ichigo,167 +ginklaga,167 +fujimiya yahiro,167 +freedom nakai,167 +fou (ssqseeker),167 +florence nightingale (third ascension) (fate),167 +ezoshika,167 +echizen (hvcv),167 +doom eternal,167 +dianna soreil,167 +deto,167 +core drill,167 +chopsticks in mouth,167 +chirashi (so),167 +car seat,167 +bulbonne,167 +broken arm,167 +breloom,167 +breast fondle,167 +blue-eyes white dragon,167 +black eyeshadow,167 +binggong asylum,167 +beige headwear,167 +bakuretsu hunters,167 +back to the future,167 +atlantic puffin (kemono friends),167 +ashitaka,167 +annindoufu (oicon),167 +amemiya nazuna,167 +akiteru (akiteru98),167 +afuro,167 +zoe (league of legends),166 +ziling,166 +zasshu nigou,166 +yuuhi kurenai,166 +yumesaki nana,166 +yuasa makoto,166 +yoshida akihiko,166 +yorktown cv-5,166 +yogisya,166 +yellow feathers,166 +yawaraka black,166 +x-23,166 +winter soldier,166 +tsumi no hahen (debris),166 +tsukko (3ki2ne10),166 +tokyo necro,166 +tokomichi,166 +tokoi,166 +terras,166 +tariah furlow,166 +takano itsuki,166 +suta furachina,166 +sussurro (summer flowers) (arknights),166 +skorpion (girls' frontline),166 +sinakyo,166 +shirayuri sakura,166 +shingyoku (touhou),166 +she-hulk,166 +rune factory 3,166 +rugal bernstein,166 +rome romedo,166 +roman (sound horizon),166 +rita rossweisse (artemis),166 +print hakama,166 +pink horns,166 +ozzingo,166 +oreo,166 +on animal,166 +object insertion from behind,166 +no hair ornament,166 +nikomi (nikomix),166 +nada namie,166 +moneybag,166 +moffle (ayabi),166 +minimalism,166 +mikami mika,166 +mareanie,166 +mamel 27,166 +magicians (zhkahogigzkh),166 +m3 lee,166 +lever action,166 +leeron littner,166 +layla prismriver,166 +kyuukon (qkonsan),166 +kv-2,166 +kusano shinta,166 +kuroe (sugarberry),166 +kogame,166 +kitsune maru,166 +kinoebi,166 +kefla (dragon ball),166 +kayura yuka,166 +katara,166 +kaniya shiku,166 +kameyan,166 +juggling club,166 +isis (p&d),166 +imouto sae ireba ii,166 +hoshimiya mashiro,166 +hong doo,166 +hometa,166 +hellagur (arknights),166 +hajimari no kiseki,166 +guard vent jun,166 +garnet rod,166 +game over,166 +gakusen toshi asterisk,166 +fumi (nijisanji),166 +freeze-ex,166 +flying witch,166 +empew,166 +emerald (pokemon),166 +elizabeth (bioshock infinite),166 +dodonpachi,166 +disdain,166 +deerling,166 +daiteikoku,166 +charisma guard,166 +ca (maeda koutarou),166 +brown rose,166 +bremerton (day-off date) (azur lane),166 +brandkojo,166 +brand-new friend (umamusume),166 +bokoboko (pandagapanda1),166 +bismarck (warship girls r),166 +bimmy,166 +ayukawa madoka,166 +araki (qbthgry),166 +aimee (emi),166 +after,166 +7ife,166 +yokohama kaidashi kikou,165 +yamazaki sousuke,165 +yamato maya,165 +yamakaze ran,165 +words worth,165 +weed (astarone),165 +wakatsuki you,165 +unlight,165 +unagimaru,165 +tuba,165 +tsunami jousuke,165 +transformed ditto,165 +toe seam,165 +todding,165 +tentacool,165 +takanashi-a,165 +tachimukai yuuki,165 +studded jacket,165 +straw hat pirates,165 +star nun (diva),165 +squeeze bottle,165 +spark621,165 +sonic the hedgehog (classic),165 +solar eclipse,165 +snow white (disney),165 +siva (executor),165 +sistine fibel,165 +siino,165 +shutou mq,165 +shiroi karasu,165 +shiro (sewayaki kitsune no senko-san),165 +shira (sirairo116),165 +shimana (cs-ts-az),165 +scone,165 +sawamura eijun,165 +savage (arknights),165 +rotte (1109),165 +ririmon,165 +rimu (kingyo origin),165 +ribi,165 +pure white memories (idolmaster),165 +proto man,165 +poshi (ginmokusei),165 +porygon,165 +popoin,165 +poem,165 +pearl fey,165 +paprika shikiso,165 +pandoria (xenoblade),165 +ouma shuu,165 +oobari masami,165 +nina (pastime),165 +natsupa,165 +narumi nanami,165 +nakamura kusata,165 +nakai hisao,165 +momotarou densetsu,165 +momiji (binbougami ga!),165 +miyata (lhr),165 +mitsuru (darling in the franxx),165 +mireyu,165 +minarai tenna,165 +mihifu,165 +micchan (ohisashiburi),165 +mayonaka taruho,165 +masters of the universe,165 +maru daizu (aqua6233),165 +maou sadao,165 +mantine,165 +mameojitan,165 +lunarisaileron,165 +leogust,165 +lennon,165 +laundry pole,165 +kuronami (lvi),165 +koruta (nekoimo),165 +koi suru kanojo no bukiyou na butai,165 +knight (hollow knight),165 +kimtoxic,165 +kazuma kaneko,165 +kara age,165 +kantoku (style),165 +kaminari doon,165 +isao,165 +io naomichi,165 +ima (lm ew),165 +igarashi (nogiheta),165 +idol heroes (idolmaster),165 +hz (murder license ),165 +hyoin,165 +hulk,165 +hot melon,165 +hoshino ouka,165 +horse racing track,165 +horizon (apex legends),165 +horikita suzune,165 +hisamura natsuki,165 +hip hop,165 +higa-tsubasa,165 +heart-shaped pubic hair,165 +hamedoragon,165 +gino weinberg,165 +getbackers,165 +fu hua (night squire),165 +framed insect,165 +february,165 +fan speaking,165 +extra digits,165 +extra breasts,165 +enmanuelart20,165 +dracu-riot!,165 +crossed swords,165 +crooked eyewear,165 +cibo (killy),165 +chig,165 +carrot (one piece),165 +burusuta,165 +bracelet girls,165 +bottoms1237,165 +blue babydoll,165 +beni kurage,165 +bedwetting,165 +bagpipe (queen no. 1) (arknights),165 +azusa (swimsuit) (blue archive),165 +ateoyh,165 +atage,165 +aspara,165 +asch (tales),165 +ane naru mono,165 +amatsuka megumi (gj-bu),165 +amatsuka mao,165 +alma armas,165 +alexi (tits!),165 +aek-999 (girls' frontline),165 +zigzagdb,164 +zhuge kongming (honkai impact),164 +yusake san,164 +yukiyuki 441,164 +you2,164 +yostar,164 +yanje,164 +yamato (uchuu senkan yamato),164 +y naf,164 +wind sneaker (elsword),164 +wendy's,164 +warui ga watashi wa yuri janai,164 +uwu,164 +type 89 i-gou,164 +turbo byakuren,164 +tsuruta himeko,164 +toshizou (0714),164 +togutogu,164 +theresa apocalypse (starlit astrologos),164 +tel-o,164 +taruya,164 +tamahiyo,164 +synchroaki,164 +sukabu,164 +star sticker,164 +star cutout,164 +sinlaire,164 +sikorsky,164 +shimaji,164 +shadaloo dolls,164 +senya (dq11),164 +sankomichi,164 +saijaku muhai no bahamut,164 +s.m.s.,164 +pure pure,164 +pirate ship,164 +pecjob,164 +osu! tatakae! ouendan,164 +one piece: strong world,164 +olys,164 +oikura sodachi,164 +nut (food),164 +nr noir,164 +nose shade,164 +norman (yakusoku no neverland),164 +nina (breath of fire iii),164 +nhk ni youkoso!,164 +muji (uimss),164 +mokoke,164 +mito (go!go!king!),164 +mismatched wings,164 +minimized,164 +mage (dq3),164 +kure (kure ng),164 +kurau kii,164 +koto (instrument),164 +kemono jihen,164 +keigi,164 +jupiter symbol,164 +junko (blue archive),164 +jikihatiman,164 +irui guneden,164 +iced tea,164 +houden eizou,164 +hiryuu (azur lane),164 +hirota tsuu,164 +hiragana oufu,164 +hichiko,164 +hexagram hair ornament,164 +haruna (blue archive),164 +harada miyuki,164 +hanayamata,164 +half-life,164 +hakou (barasensou),164 +glorious (azur lane),164 +genetic (ragnarok online),164 +fusion dance,164 +fugetsu taku,164 +franken fran,164 +foster's home for imaginary friends,164 +fir tree,164 +famas,164 +eyewear switch,164 +esukee,164 +ende (chihuri),164 +electrode (pokemon),164 +ebinku,164 +dresstrip,164 +diepod,164 +deoxys (normal),164 +deku (dekunosu),164 +cyberbots,164 +crossed fingers,164 +craytm,164 +cowter,164 +constriction,164 +chouun shiryuu,164 +cham-p,164 +calne ca,164 +c-low,164 +bobby pin,164 +barn,164 +baltimore (black ace) (azur lane),164 +bad reflection,164 +azuki (nekopara),164 +ashishun,164 +ao madoushi,164 +zebra,163 +yutsumoe,163 +yushe quetzalli,163 +yumeno uta,163 +yukikaze (kancolle) (cosplay),163 +yukihira souma,163 +yellow headband,163 +yae (ganbare goemon),163 +whipping,163 +voodoothur,163 +tsumitani daisuke,163 +tobi (discharge cycle),163 +teramoto kaoru,163 +tdk,163 +tanaka hime,163 +takuzui,163 +suzuki sayaka,163 +stopwatch around neck,163 +squid hat,163 +shuckle,163 +shoe flower,163 +shirase (shirose),163 +senjuushi: the thousand noble musketeers,163 +sarfata,163 +sano (hospital 0434),163 +ryou-ouki,163 +relena peacecraft,163 +relationshipping,163 +rabbit (wlsdnjs950),163 +pinb,163 +phantom (happinesscharge precure!),163 +ooshima tomo,163 +okazaki yasuha,163 +nijitama shiyushiyu,163 +nanno koto,163 +nana g,163 +namihaya,163 +naka akira,163 +nagatsuki sanae,163 +murano,163 +mou tama maru,163 +monchan rev3,163 +moeanime,163 +mirino,163 +mexican standoff,163 +metapod,163 +matra milan,163 +mathew (srmmk mce),163 +marui shiro,163 +mari audio,163 +malenia goddess of rot,163 +maekakekamen,163 +lunatone,163 +love wing bell,163 +lock cole,163 +lask,163 +kurono nekomaru,163 +komajirou,163 +kokubunji suou,163 +kobinbin,163 +kirishima romin,163 +kendy (revolocities),163 +kawakami princess (umamusume),163 +karaginu mo,163 +kanoko (pattern),163 +kanna hisashi,163 +jinroku,163 +jeep,163 +iris (asteroid ill),163 +inkey,163 +inaba rinne,163 +ieiri shoko,163 +ichijou hikaru,163 +horns pose,163 +honyang,163 +hitori bocchi,163 +hifumi kei,163 +hajime (caramel toone),163 +gun case,163 +godzillapigeon1,163 +gibun (sozoshu),163 +geike,163 +fubuki (azur lane),163 +fishing net,163 +eye symbol,163 +expedition uniform,163 +ettone,163 +eternal sailor moon,163 +elf (houtengeki),163 +echizen (n fns17),163 +dier (girls' frontline),163 +deirdre (fire emblem),163 +dedue molinaro,163 +dango (uni 520),163 +comic sans,163 +colorado (azur lane),163 +cis (tiger & bunny),163 +chunyan,163 +chobi (akchu),163 +cherubi,163 +cellphone strap,163 +camisole over clothes,163 +borderless panels,163 +blood trail,163 +black fundoshi,163 +biohazard symbol,163 +bf. (sogogiching),163 +battle athletes,163 +ayamisiro,163 +austria (hetalia),163 +asakura otome,163 +artist progress,163 +armbinder,163 +ark kan,163 +another eidos-r,163 +ameyu (rapon),163 +ame usako,163 +alecto 0,163 +aks-74u,163 +akebi komichi,163 +akasata,163 +ai yori aoshi,163 +adagaki aki,163 +258n,163 +zun (artist),162 +zip available,162 +zetton,162 +zaregoto tsukai no deshi,162 +yuuki hb,162 +yayoi (egoistic realism),162 +yamakaze kai ni (kancolle),162 +yamai,162 +yae sakura (flame sakitama),162 +wei xiao,162 +watanabe yasuaki,162 +waku (ayamix),162 +usamata,162 +ura tomomi,162 +tweezers,162 +tunapon01,162 +tsuyomaru,162 +transformers (live action),162 +torn vest,162 +tokira nozumi,162 +theresa (arknights),162 +theodore (persona),162 +tenzin (arknights),162 +tennohi,162 +tenbou,162 +telnyashka,162 +teekyuu,162 +tachikoma,162 +tachibana yuu,162 +suisen-21,162 +sugaishi,162 +standing double penetration,162 +spitting blood,162 +sorano (12gou),162 +sinzen,162 +singing! (k-on!),162 +sinaooo,162 +shuppet,162 +shishiou no mofumofu,162 +shino (syllable),162 +shibi,162 +shiba nanasei,162 +sesshouin kiara (swimsuit mooncancer) (first ascension),162 +seinen,162 +samidare (hoshi),162 +sakura ayane,162 +sagging testicles,162 +robo (chrono trigger),162 +ribbon-trimmed underwear,162 +reed (arknights),162 +raitho,162 +puru (ex-150),162 +princess athena,162 +oweee,162 +otome gee sekai wa mob ni kibishii sekai desu,162 +oozora itsuki,162 +oosawara sadao,162 +ogami ren,162 +nori senbei,162 +nishizono shinsuke,162 +nishimura kinu,162 +nekoguchi,162 +natori (natorism),162 +naruse ibara,162 +nagasarete airantou,162 +my little pony equestria girls,162 +mutsu (snail),162 +mutant,162 +montpelier (azur lane),162 +molyb,162 +mocchisake,162 +miwa mitsune,162 +minion 3 (zannen onna-kanbu black general-san),162 +minami rena,162 +mikel (4hands),162 +mia (fai1510),162 +matsuyuki atsumu,162 +mailman,162 +mage (disgaea),162 +m4 sopmod ii jr,162 +ladies versus butlers!,162 +kyoyakyo,162 +kuzuvine,162 +kusunoki midori,162 +kuromari (runia),162 +kurihara kenshirou,162 +kinjyou (shashaki),162 +kido (choushouya),162 +kamishiro seren,162 +kahlua (artist),162 +jitomi monoe,162 +jimsdaydreams,162 +irene (kanniiepan),162 +inverted colors,162 +inre kemomimi,162 +innertube with ears,162 +ilyana (fire emblem),162 +ienaga mugi,162 +hoshizora e kakaru hashi,162 +horn (arknights),162 +hoothoot,162 +honchkrow,162 +hitomin (ksws7544),162 +hisuian typhlosion,162 +hinata hajime (awakened),162 +hara tetsuo (style),162 +happy tree friends,162 +hanagata,162 +greater roadrunner (kemono friends),162 +goho mafia! kajita-kun,162 +ginga tetsudou 999,162 +gauze,162 +fuuka (blue archive),162 +futatsuki eru,162 +futaba suetsuki,162 +furorina,162 +fernandia malvezzi,162 +feather dress,162 +fateline alpha,162 +eris (konosuba),162 +erect sawaru,162 +dora v nu,162 +coyote ragtime show,162 +cooling pad,162 +conte di cavour nuovo (kancolle),162 +chirigami-san,162 +blackrabbitsoul,162 +bellsprout,162 +ass press,162 +asclepius (fate),162 +aratama (a-tama),162 +aona (anagasaki),162 +aloe (kenkou3733),162 +ale (ale halexxx),162 +aiuti,162 +age switch,162 +yuuichi (reductionblack),161 +yunodon (sugar maple),161 +yu-gi-oh! the dark side of dimensions,161 +young zelda,161 +youhei 64d,161 +yoshida nobuyoshi,161 +xiaoshou xiansheng,161 +webcam,161 +wanaca,161 +uta macross sumaho deculture,161 +urameshi yusuke,161 +tusk (stand),161 +tropius,161 +tomo-graphy,161 +theamazingspino,161 +teoshiguruma,161 +tani takeshi (character),161 +tangaroa (housamo),161 +takenashi eri,161 +tabe koji,161 +suzumori 521,161 +suzuka utako,161 +st. hilde academy of magic uniform,161 +spinning top,161 +space adventure cobra,161 +soso (chlgksk110),161 +sophia (granblue fantasy),161 +smelling hair,161 +slacks,161 +shoukaku kai ni (kancolle),161 +shiranui (nakiri ayame),161 +shimohira reika,161 +senzzang,161 +selesia upitiria,161 +sega dreamcast,161 +see-through panties,161 +scyther,161 +sasaki tatsuya,161 +sangoku infinity,161 +rixia mao,161 +riddle rosehearts,161 +reiha (penetrate),161 +rei (tdn ng),161 +rangycrow,161 +pyonpyonmaru,161 +project bunny,161 +production kawaii,161 +pickelhaube,161 +pectoral squeeze,161 +okbnkn,161 +no smoking,161 +nerotarou,161 +nenosame,161 +mugicha (mugicha0929),161 +motohara moka,161 +mosta (lo1777789),161 +mofu,161 +miyazen,161 +mitsurugi tsurugi,161 +mirai denki,161 +mazenda (3378),161 +matou zouken,161 +maruse nisanosuke,161 +mallizmora,161 +m1918 bar,161 +lycanroc (midday),161 +lvlv,161 +love train,161 +licking blade,161 +leria v,161 +lace-trimmed ribbon,161 +kusunoki yukimura,161 +kuroduki (pieat),161 +kurachi mizuki,161 +kotori photobomb,161 +kokoro (hakui koyori),161 +king crimson (stand),161 +kihaiu,161 +kidnapped,161 +kawai rie,161 +kanora,161 +kaneshiya sitara,161 +kamiki uutarou,161 +kamen rider wizard,161 +jet yowatari,161 +insult,161 +ikezawa kazuma,161 +ignatz victor,161 +ichigai (hayawossan),161 +houmatu awa,161 +hisou tensoku,161 +hinata masaki,161 +gymnastics ribbon,161 +grimgrim,161 +funnyari,161 +fuka (hk nemo),161 +frederic chopin,161 +folinic (arknights),161 +flyer,161 +facebook,161 +expectations/reality,161 +esuyuki,161 +ensemble girls!,161 +egyptian art,161 +drie,161 +dav-19,161 +danmaku comments,161 +dabadhi,161 +chiyo (ane naru mono),161 +china,161 +charging device,161 +broken ground,161 +breast zipper,161 +bidoof,161 +basketball jersey,161 +barry nah,161 +ayano yuu (sonma 1426),161 +atsumi jun,161 +arima (arima bn),161 +arashiki mamizu,161 +animage,161 +amatsukaze (kancolle) (cosplay),161 +amairo islenauts,161 +akitsuki (oenothera),161 +yuuri (saikin yatotta maid ga ayashii),160 +yoo (tabi no shiori),160 +yami (m31),160 +yachima tana,160 +yachi hitoka,160 +wewe,160 +wakku kan,160 +toudou itsumi,160 +torii eriko,160 +takei ooki,160 +swirlix,160 +suzumiya akane,160 +suzuki nene,160 +super saiyan 3,160 +suihi,160 +sob (submar1089),160 +snozaki,160 +snake tattoo,160 +shylily,160 +shouma keito,160 +shiomizu (swat),160 +shiokazunoko,160 +shadowsinking,160 +scott malin,160 +saya (twrlare),160 +sakimori toji,160 +saboten teishoku,160 +ryou (shirotsumesou),160 +ruru (gi xxy),160 +roido (taniko-t-1218),160 +rasis,160 +progression,160 +prinz eugen (cordial cornflower) (azur lane),160 +print umbrella,160 +priapus,160 +poyoyo (nakiri ayame),160 +power rangers,160 +poi (last origin),160 +peace@pieces,160 +pachinko,160 +osechi,160 +orange (touhou),160 +onigunsou,160 +one piece film: red,160 +oldlim,160 +oku (okumen),160 +okiru,160 +nikola tesla (fate),160 +nephenee (fire emblem),160 +nea (chihuri),160 +moe2015,160 +mizuki sei,160 +miyashiro ryuutarou,160 +miru (mill 36),160 +mini-ikamusume,160 +minagiku,160 +midorikawa hana,160 +mg5 (girls' frontline),160 +mepikari,160 +matsumiya kiseri,160 +man face,160 +majokko (kancolle),160 +majestic prince,160 +luna (shadowverse),160 +loundraw,160 +lor (roasyerizyonirapi),160 +ling yao,160 +kurukuru (sekai seifuku),160 +kuroi suna,160 +komekko,160 +kokubunji koyori,160 +kiznaiver,160 +kittew,160 +kirome (kamipaper),160 +katami shinta,160 +kanitama (putyourhead),160 +kamen rider vice,160 +kamen rider ryuki,160 +jl tan,160 +jet engine,160 +itsuka shidou,160 +hyouta (yoneya),160 +huniepop,160 +hong kong (hetalia),160 +hololive gamers,160 +holding skewer,160 +holding flashlight,160 +hitoyo (baffu),160 +hisuian zoroark,160 +hisuian growlithe,160 +hisakata souji,160 +hinata mutsuki,160 +hetareeji,160 +headlamp,160 +hawe king,160 +hanazono shizuma,160 +habatakuhituji,160 +gundam wing endless waltz,160 +gouda nagi,160 +goinkyo,160 +ganbare ganbare (itou life),160 +finn the human,160 +entrance,160 +emiya kiritsugu (assassin),160 +eluthel,160 +dromarch (xenoblade),160 +drang (granblue fantasy),160 +double grinding,160 +donquixote doflamingo,160 +deca purio,160 +deathalice,160 +criminal girls,160 +construction,160 +clouble,160 +cherrypin,160 +chabo (fuketsudan),160 +bunny mask,160 +brooke (mathias leth),160 +boro,160 +bikini bridge,160 +azure-maya,160 +aotori,160 +anna (sennen sensou aigis),160 +amatori chika,160 +alto-00,160 +akino shin,160 +agent 4 (splatoon),160 +after rain,160 +admiral shiro (shino),160 +yuu (arcadia),159 +yuigahama yui's mother,159 +yue (shemika98425261),159 +yama raja (elsword),159 +xelloss,159 +warabino matsuri,159 +wancho,159 +vivid bad squad (project sekai),159 +vistamp,159 +unimon e,159 +triple wielding,159 +touka gettan,159 +touhou hisoutensoku,159 +totoharu (kujirai minato),159 +tonchan,159 +tokimeki memorial 4,159 +the king of fighters all-stars,159 +taiga (ryukyu-6102-8),159 +suzuya (azur lane),159 +suzuki hina,159 +strider (video game),159 +stan marsh,159 +sphinx,159 +sora no iro mizu no iro,159 +smeargle,159 +shoulder massage,159 +shirane koitsu,159 +shintani tsushiya,159 +shinkuukan (tetsuo3),159 +shin no tenpii,159 +shameimaru aya (cosplay),159 +sawa (snack yoshie),159 +sauvignon,159 +sasaki haise,159 +saotome ako,159 +sabujiroko,159 +revolverwing,159 +red corset,159 +recycle bin,159 +ray (yakusoku no neverland),159 +psd (psdgai),159 +projekt red (light breeze) (arknights),159 +popotan,159 +polka dot necktie,159 +pinki (shounenkakuseiya),159 +pilokey,159 +phoenix wright: ace attorney - trials and tribulations,159 +phantom (arknights),159 +pelipper,159 +papino,159 +ouse (otussger),159 +ouka (ra-raradan),159 +otosaka yuu,159 +osamada meika,159 +opposing sides,159 +oiun,159 +ochikazuki ni naritai miyazen-san,159 +noeru,159 +nipple biting,159 +neko sensha,159 +mokorei,159 +mochi trail,159 +mochacot,159 +miyasaka naco,159 +mitsuboshi colors,159 +mitarai shouta,159 +mitama mudimudi,159 +minamoto no raikou (swimsuit lancer) (second ascension) (fate),159 +megami tensei,159 +manio,159 +machine-doll wa kizutsukanai,159 +lysander z,159 +leopard,159 +leather armor,159 +lamia loveless,159 +kyou (ningiou),159 +kurusu shou,159 +kuroya shinobu,159 +komiyama kotomi,159 +komasawa (fmn-ppp),159 +kokonoka,159 +kishida-shiki,159 +kesuida,159 +kawashiro nitori (cosplay),159 +karan,159 +kanda yuu,159 +kamen rider kabuto,159 +january,159 +isna (footprintsofisna),159 +irma,159 +inou-battle wa nichijou-kei no naka de,159 +imminent paizuri,159 +ico (green bullet),159 +ibara azuki,159 +hougu souji,159 +hiyajou maho,159 +head between thighs,159 +hatozuki tsumiki,159 +hashiko nowoto,159 +grimm (rwby),159 +grey camisole,159 +gravity daze 2,159 +gradient gloves,159 +goth risuto,159 +goredolf musik,159 +ginzake (mizuumi),159 +german army,159 +full moon wo sagashite,159 +fly agaric,159 +floppy disk,159 +flamebringer (arknights),159 +felarya,159 +fei lio mao,159 +erechan,159 +elsa granhilte,159 +el shaddai,159 +eclair martinozzi,159 +double spoiler,159 +dizzy (feeling),159 +cyawa,159 +cuphead (game),159 +ciel alencon,159 +cheshirrr,159 +cecil damon,159 +castlevania: portrait of ruin,159 +black underwear,159 +bengus,159 +bat ornament,159 +bababababan,159 +avicebron (fate),159 +astraea (sora no otoshimono),159 +arcade sona,159 +aqua socks,159 +aoi suzu,159 +animal between breasts,159 +amemiya taiyou,159 +akatoro (nightlord),159 +after fingering,159 +a.hebmuller,159 +404 logo (girls' frontline),159 +2drr,159 +yuki18r,158 +yu ni t,158 +yoshida hajime,158 +white album,158 +wanko (takohati8),158 +uzuki aki,158 +ump45 (agent lop rabbit) (girls' frontline),158 +uganda (ugandam 00),158 +uchiha obito,158 +two-tone sweater,158 +tougarashi hideyu,158 +tohsaka rin (cosplay),158 +tina (closers),158 +tibbers,158 +tenkuu no craft fleet,158 +tanishi (tani4),158 +tanigawa yuzu,158 +takase asagiri,158 +sutora binsuke,158 +suki! yuki! maji magic (vocaloid),158 +striped bike shorts,158 +strike witches 1940,158 +stage curtains,158 +squeezing testicles,158 +sohn woohyoung,158 +sisters ~natsu no saigo no hi~,158 +shirosaba,158 +sheriff badge,158 +serval,158 +sen no kiseki ii,158 +sebastian michaelis,158 +sasanomesi,158 +sakurai unan,158 +sakakidani,158 +run p (aveton),158 +relaxjon,158 +rebecca chambers,158 +quuni,158 +quasarcake,158 +pringles can,158 +pouncing,158 +poncho s,158 +pomeranian (dog),158 +painpa,158 +oza watto,158 +oyasu (kinakoyamamori),158 +onsokumaru,158 +okuragon,158 +northern ocean princess (cosplay),158 +noixen,158 +nogi yasuhito,158 +nenbuta,158 +navel insertion,158 +nasaniliu,158 +nanashiki fuuka,158 +mori nagayoshi (fate),158 +minami juujisei,158 +mimelond,158 +meguru (cookie),158 +matsumoto noriyuki,158 +manatsuki manata,158 +malcorond,158 +kumatora tatsumi,158 +kongou (kancolle) (cosplay),158 +kobuushi,158 +knight's & magic,158 +knee blush,158 +kingdom key,158 +kataru (ubw-emiya),158 +kasa,158 +kanbara satomi,158 +kamen rider hibiki,158 +kagamine rin (roshin yuukai/hard rkmix),158 +kaeruyama yoshitaka,158 +jasdavi,158 +iogi (iogi k),158 +ichiroku (sakumogu-029),158 +i-26 (azur lane),158 +huangdanlan,158 +hood (warship girls r),158 +homeko,158 +hiroshi (hunter-of-kct),158 +hazekura mikitaka,158 +hand over heart,158 +hanasaki komugi,158 +gyaheung,158 +grune,158 +grenville (azur lane),158 +geodude,158 +frog (chrono trigger),158 +foreplay,158 +fire hydrant,158 +final fantasy crystal chronicles,158 +esther shen,158 +emerald sustrai,158 +duca degli abruzzi (azur lane),158 +driftkingtw,158 +dlanon,158 +deidara (naruto),158 +danua (summer) (granblue fantasy),158 +croupier,158 +chutohampa,158 +chaos (warhammer),158 +carmilla (swimsuit rider) (fate),158 +cardigan (arknights),158 +capsule corp,158 +bugsy (pokemon),158 +brionne,158 +breast pump,158 +bou,158 +bone necklace,158 +barasui,158 +azul ashengrotto,158 +azomo,158 +azhdaha (genshin impact),158 +athrun1120,158 +asakura yume,158 +apron tug,158 +anbe masahiro,158 +amog,158 +amagaeru (hylathewet),158 +alluka zoldyck,158 +akisa (12023648),158 +agata (agatha),158 +acog,158 +yuki onna (nurarihyon no mago),157 +yazuki gennojou,157 +yandr4hope,157 +willow,157 +wedge (tyrl stride),157 +wang yujia,157 +wamudraws,157 +waisshu (sougyokyuu),157 +vayne (league of legends),157 +uon taraku,157 +ugeppa,157 +tsuruki shizuka,157 +treadmill,157 +touching ears,157 +takanashi yomi,157 +takamura,157 +supreme (brand),157 +star guardian soraka,157 +splattershot jr (splatoon),157 +spartan (halo),157 +shirogane (cufsser),157 +shia flatpaddy,157 +shamisen (suzumiya haruhi),157 +seofon (granblue fantasy),157 +scopedog,157 +scene (arknights),157 +saruno (eyesonly712),157 +sanwari (aruji yume),157 +runway,157 +romana,157 +rimukoro,157 +ribbon baton,157 +princess leia organa solo,157 +primary stage,157 +over the nose gag,157 +osora (judithandlilith),157 +ore no kanojo to osananajimi ga shuraba sugiru,157 +november,157 +nomura tetsuya,157 +noble academy school uniform,157 +no hands,157 +no anus,157 +nemunemu semi,157 +neko danshaku,157 +naho (pi988y),157 +mushihime-sama,157 +murata,157 +munyuu,157 +muhi11234,157 +mole on crotch,157 +mokyutan,157 +minasenagi,157 +minagi mikoto,157 +mimasi osuwari,157 +mikisai,157 +menstruation,157 +melusine (ibuki notsu),157 +marikawa shizuka,157 +malino (dream maker),157 +m1 garand (girls' frontline),157 +longlong (drasdr7513),157 +kuroe shizuku (cosplay),157 +komachi pochi,157 +kokoro connect,157 +kitsune-neko,157 +kisasage kouta,157 +kinugasa yuuichi,157 +kingdra,157 +katsuragi yako,157 +katori (quietude),157 +karma (nakiri ayame),157 +kaleido ruby,157 +kageyama torako,157 +jun (princess connect!),157 +jin (xenoblade),157 +infi,157 +implied incest,157 +hyottoko mask,157 +hoshino yuumi,157 +hoshino ichika (project sekai),157 +hookah,157 +hongbao,157 +hodaka natsumi,157 +hitomi o,157 +hirota (masasiv3),157 +headwear writing,157 +haiiro teien,157 +garana,157 +fumafu,157 +flannel,157 +fenrir (fenlil0316),157 +feather-trimmed jacket,157 +fan la norne,157 +elleciel.eud,157 +dusty (gravity daze),157 +doukyo's,157 +dom,157 +danaka,157 +daitoshokan no hitsujikai,157 +caveman,157 +brown cat,157 +brown-tinted eyewear,157 +borrowed hairstyle,157 +blacksmith,157 +belly poke,157 +bakuretsu tenshi,157 +bait and switch,157 +asteion,157 +ass ripple,157 +arsh (thestarwish),157 +alzano school uniform,157 +acea4,157 +abarai renji,157 +zhainan s-jun,156 +yodare (3yami8),156 +yo-class submarine,156 +yagiri namie,156 +wulazula,156 +wolflong,156 +wadanohara,156 +vivy,156 +utane uta,156 +ursaring,156 +type 95 (summer cicada) (girls' frontline),156 +thumb to mouth,156 +tepen (tptptpn),156 +tawashi1623,156 +tasmanian devil (kemono friends),156 +suzuhara touji,156 +suuru,156 +super sailor jupiter,156 +suiheisen made nan mile?,156 +suguri (character),156 +stingray,156 +sodapop (iemaki),156 +small lady serenity,156 +slave tattoo,156 +shirt under dress,156 +seikai no senki,156 +sebas murasaki,156 +roll.exe (mega man),156 +ribbon-trimmed bow,156 +pondel,156 +pidgeotto,156 +peterhl,156 +perrault (last origin),156 +obstagoon,156 +nishizono chigusa,156 +nishizawa ayumu,156 +nishimiya shouko,156 +ng (kimjae737),156 +nasii,156 +nanu (pokemon),156 +nandaba naota,156 +namaru (summer dandy),156 +moomintroll,156 +momiji oroshi,156 +miwasiba,156 +mister (black and white),156 +mgk968,156 +mesprit,156 +meipu hm,156 +mayuzumi kai,156 +martha (swimsuit ruler) (second ascension) (fate),156 +marivel armitage,156 +maple tree,156 +majamari,156 +magical mirai miku (2019),156 +lucidsky,156 +lobelia (saclia),156 +lifeguard chair,156 +layla (genshin impact),156 +kunisaki yukito,156 +kotoura haruka,156 +korok,156 +komomo (ptkrx),156 +kokomi (aniesuakkaman),156 +koin (foxmark),156 +kiramarukou,156 +kimagure blue,156 +kikuri (touhou),156 +kibadori rue,156 +katsushika pachi,156 +kasamoto eri,156 +karin (naruto),156 +kamiya zuzu,156 +kagura yuuki,156 +judge,156 +jiyuuyuu,156 +isshiki momo,156 +in orbit,156 +ichimura kanata,156 +hushabye,156 +hotaru iori,156 +holding handcuffs,156 +hinoe (right-hnxx03),156 +higashigure,156 +hibiki (blue archive) (cosplay),156 +hero (dq4),156 +heart-shaped mouth,156 +harukatron,156 +hariyama (toubou tengoku),156 +hand under shorts,156 +gunpla boy (ishiyumi),156 +gudon (iukhzl),156 +growth,156 +gridman (ssss),156 +grape-kun,156 +granblue fantasy versus,156 +gotou kenji,156 +gonoike biwa,156 +gesugesu ahoaho,156 +genmu senki leda,156 +garage,156 +frillish,156 +flying boat,156 +fenrir (shingeki no bahamut),156 +ebira,156 +ebihara naho,156 +dustpan,156 +dior-zi,156 +date masamune (sengoku basara),156 +cocked eyebrow,156 +chihayafuru,156 +broken plate,156 +bakusai,156 +azumaya hironaru,156 +archer (ragnarok online),156 +aosaki yukina,156 +aori sora,156 +anatomical nonsense,156 +ame 816,156 +abukobato,156 +zaku ii s char custom,155 +yuusha no kuse ni namaiki da,155 +yuuki kei,155 +yuki to hana,155 +ysk!,155 +yoosai,155 +yasuri shichika,155 +yasohachi ryou,155 +watakarashi,155 +volkswagen,155 +vikala (blooming summer wallflower) (granblue fantasy),155 +unohana retsu,155 +tuxedo kamen,155 +thomas edison (fate),155 +the silmarillion,155 +tenshinranman,155 +team galactic uniform,155 +talonflame,155 +tales of rebirth,155 +taihou (muse) (azur lane),155 +susuki grass,155 +sushi (sashimise),155 +sumia (fire emblem),155 +sugo6969,155 +spoon straw,155 +soviet army,155 +skywalker0610,155 +skylader,155 +sivir,155 +sitting on tree stump,155 +sin kiske,155 +shousan (hno3syo),155 +shounen jump,155 +shantae and the pirate's curse,155 +septet (zrca janne),155 +sekiutsu maria tarou,155 +scathach skadi (third ascension) (fate),155 +saiku (zvlku),155 +saeki nao,155 +sadakage,155 +ryuu (breath of fire v),155 +ringeko-chan,155 +renata alekseevna tsvetaeva,155 +raoh (hokuto no ken),155 +ponkan 8,155 +polka dot innertube,155 +pokemon battle,155 +pochimaru (marumaru wanwan),155 +pile,155 +phonecard,155 +pedophile,155 +oomuro-ke,155 +omc,155 +newhalf with female,155 +neme,155 +natsume takashi,155 +naruse mamoru,155 +mutsuki riichi,155 +murasaki (senran kagura),155 +mukunokino isshiki,155 +moshihimechan,155 +moonywitcher,155 +momoi azuki,155 +mizusaki tsubame,155 +miyuki rei,155 +miyahara takuya,155 +milcho,155 +michiru (amphibian),155 +mexif,155 +meowstic (female),155 +maxa',155 +matsubayashi souta,155 +matsu (kancolle),155 +masaki tenchi,155 +marx (kirby),155 +majin gappa,155 +m590 (girls' frontline),155 +luozhu (the legend of luoxiaohei),155 +lifting covers,155 +laundromat,155 +l85,155 +kylin,155 +kuga zankurou,155 +komikado sachi,155 +kolin,155 +kinoshita sakura,155 +kendou itsuka,155 +kazukoto,155 +kazami chiu,155 +kari (kakko k),155 +kaneki ichika,155 +kakuno,155 +kagetsu tooya,155 +jesus,155 +iuui,155 +isu (is88),155 +iseki (kuroshura no tabiji),155 +isaac foster,155 +inubouzaki fuu,155 +ikezaki misa,155 +ian chase,155 +houjou sophie,155 +hopping,155 +hioyami,155 +hentaki,155 +hedgehog ears,155 +hazakura satsuki,155 +haruhina purple,155 +hamster ears,155 +hacka doll 1,155 +gyoju (only arme nim),155 +greed (fma),155 +gourgeist,155 +gibson,155 +genek,155 +fumizuki (azur lane),155 +fudou yukimitsu,155 +fox hair ornament,155 +firewood,155 +finnish flag,155 +exploration,155 +ever 17,155 +enarane,155 +elfnein,155 +dorayaki,155 +darius (league of legends),155 +dainsleif (genshin impact),155 +cinderella series,155 +bousouzoku,155 +binato lulu,155 +barioth (armor),155 +barbecue,155 +azurill,155 +ayra (fire emblem),155 +asahigaoka school uniform,155 +aozora nan,155 +aomoro,155 +andava,155 +altronage,155 +alice (megami tensei),155 +akiba monaka,155 +accessories,155 +a.b.a,155 +zinogre (armor),154 +yatomi,154 +xkirara39x,154 +white wolf,154 +ushio kai ni (kancolle),154 +unclasped,154 +ume-sensei,154 +u.a. cheerleader uniform,154 +toy block,154 +tooyama rin,154 +thomas hewitt,154 +tequila (arknights),154 +tatsunokosso,154 +takashi shirogane,154 +takahashi mugi,154 +swadloon,154 +suzuki ayane,154 +super robot wars og saga mugen no frontier exceed,154 +sunohara mei,154 +suikakitsu shiro,154 +striped footwear,154 +steyr aug,154 +spykeee,154 +somehira katsu,154 +shoori (migiha),154 +shiny and normal,154 +shinsengumi (gintama),154 +shin koihime musou,154 +shadow puppet,154 +sextuplet (osomatsu-kun),154 +serra (fire emblem),154 +senou natsuru,154 +sen (sen0910),154 +sawatari riko,154 +satsumi,154 +sarong removed,154 +sabaton,154 +sa-dui,154 +ryuumonbuchi touka,154 +rudeus greyrat,154 +robot girls z,154 +rin (royal),154 +repi,154 +reiga mieru,154 +plate stack,154 +plastic memories,154 +pizza (pizzania company),154 +pepper shaker,154 +pegashi,154 +paper mario: the thousand year door,154 +oversized zipper,154 +ouga saki,154 +otome function,154 +orca hair ornament,154 +ookami ryousuke,154 +oden (th-inaba),154 +oda nanami,154 +object in swimsuit,154 +nuclear weapon,154 +norazura,154 +noki (affabile),154 +nohhun,154 +nasu no yoichi,154 +nakamura tetsuya,154 +naginami,154 +moti coi,154 +mm!,154 +miyukiyo,154 +miho (mi),154 +maeha,154 +m1918 (girls' frontline),154 +m-rs,154 +lunar,154 +lrk,154 +lilaccu,154 +lanturn,154 +kuromu (underporno),154 +kure~pu,154 +koya (koya x 00),154 +kiriu,154 +king cobra (kemono friends),154 +king,154 +kim dokja,154 +kidachi,154 +ken (haresaku),154 +kazumi yoshiyuki,154 +kaya8,154 +jiu (sdesd3205),154 +inaba haneru (animare),154 +ikeda chizuru,154 +ichinose hajime,154 +i-chu,154 +hydrant (kasozama),154 +hoso-inu,154 +horz,154 +hokori sakuni,154 +hirokawa kouichirou,154 +hinamori (18ar0),154 +highleg dress,154 +herozu (xxhrd),154 +here's johnny! (meme),154 +heiwa (murasiho),154 +hashiro,154 +hansel (granblue fantasy),154 +hand on own foot,154 +han-0v0,154 +halu-ca,154 +gurasion (gurasion),154 +gretel (granblue fantasy),154 +gekkou (geccomajin),154 +furukawa herzer,154 +flou (flou art),154 +female footjob,154 +f6,154 +el (girls und panzer),154 +dugtrio,154 +double cheek kiss,154 +cum on back,154 +boryeon (last origin),154 +blood on chest,154 +black sister,154 +b-pang,154 +azumi akitake,154 +azen (mntimcczgrtn),154 +applin,154 +anpan,154 +anchorage (azur lane),154 +amphibia,154 +yunar,153 +yukke,153 +yukizen,153 +yueko (jiayue wu),153 +yoyokkun,153 +yoshiwo,153 +yomoyama yotabanashi,153 +yokaze japan,153 +ymir (queen's blade),153 +x anus,153 +wynn the wind charmer,153 +wrist extended,153 +wooden staff,153 +wendy (wendy's),153 +warehouse,153 +wanke,153 +wagashi928,153 +vash the stampede,153 +vaseraga,153 +umemaro (siona0908),153 +tyoko tanuki16,153 +tsumetai (tsurunoka),153 +tsube aika,153 +toono mizuki,153 +test tube rack,153 +team yell,153 +taishou yakyuu musume,153 +t-pose,153 +suzuki (company),153 +suppository,153 +suesan,153 +stepping stones,153 +steamroller,153 +stealth fellatio,153 +starkamisan,153 +spiked dildo,153 +sora (sky s04),153 +smallfry (splatoon),153 +sin (sin52y),153 +simon belmont,153 +shovelwell,153 +shion (tensei shitara slime datta ken),153 +sewer grate,153 +sewaddle,153 +sengo muramasa (touken ranbu),153 +seizon honnou valkyria (idolmaster),153 +scarf on head,153 +saliva swap,153 +saaya (suisei no gargantia),153 +rururiaru,153 +red (girllove),153 +red: pride of eden,153 +push-button,153 +poochyena,153 +plug (piercing),153 +pig girl,153 +orange hair ornament,153 +netoge no yome wa onna no ko janai to omotta?,153 +nekomu,153 +neko ni chikyuu,153 +neko-san (dim.dream),153 +namazu,153 +my700,153 +mutsuki face,153 +mr. mime,153 +mishiro (ixtlolton),153 +matsuki (mikipingpong),153 +matsubara yuuna,153 +masaki (msk064),153 +masaharu,153 +marona (phantom brave),153 +magical mirai miku (2018),153 +luminous arc,153 +last exile: gin'yoku no fam,153 +kyubey (cosplay),153 +kuzumiya yuyu,153 +kuse (0201),153 +kurofood,153 +kobeya (tonari no kobeya),153 +kizami nori to yamaimo,153 +kazekawa nagi,153 +kauto,153 +katakana,153 +kashimoto riko,153 +karate,153 +kanagawa okinami ura,153 +jellcaps,153 +jamadhar,153 +jagdpanzer 38(t),153 +it (stephen king),153 +isaki tanaka,153 +irisu shoukougun!,153 +invisible floor,153 +hyakuya yuuichirou,153 +hoshihuri,153 +horn flower,153 +hiroyama hiroshi,153 +high-cut armor,153 +herio,153 +haori himo,153 +hanasato minori,153 +hanako151,153 +hakkasame,153 +h'aanit (octopath traveler),153 +golgi hon,153 +gmgt (gggggg3),153 +gino,153 +funyariko,153 +fumizuki kai ni (kancolle),153 +fouriasensei,153 +fortnite,153 +eria (yu-gi-oh!),153 +dress grab,153 +dragon quest i,153 +double breast sucking,153 +devilukez,153 +dancing stars on me!,153 +cornet espoir,153 +common vampire bat (kemono friends),153 +chou-10cm-hou-chan (suzutsuki's),153 +cherry in the sun,153 +chariko,153 +bison (arknights),153 +ayase-mio,153 +asahina mitsuru,153 +arcane caitlyn,153 +anna nishikinomiya,153 +american football (object),153 +amakusa (hidorozoa),153 +amahane madoka,153 +alipheese fateburn xvi,153 +akikawa yayoi (umamusume),153 +akari (princess connect!),153 +akai shuuichi,153 +zefrableu,152 +yuzu modoki,152 +yutsuka (amyucca),152 +yotsunoha,152 +yashiro (kancolle),152 +yamato iori,152 +xox xxxxxx,152 +wspread,152 +woobat,152 +wolf o'donnell,152 +winged unicorn,152 +vaan,152 +unryuu kai (kancolle),152 +tympole,152 +tsuchiya ako,152 +touyama nao,152 +totenkopf,152 +throwing poke ball,152 +tayuya (naruto),152 +tanaka yuuichi,152 +tamakaga,152 +taku1122,152 +tachibana omina,152 +sumaga,152 +sugimoto gang,152 +striped leotard,152 +splatter background,152 +sooru0720,152 +snuffy (vtuber),152 +skyrick9413,152 +shuri yasuyuki,152 +shouryuuken,152 +shikishima mirei,152 +sawa jaaji,152 +satou samu,152 +samejima mamimi,152 +roux louka,152 +rafaelaaa,152 +quistis trepe,152 +puchiko,152 +post and rail fence,152 +plaid socks,152 +pink buruma,152 +pimp,152 +pieck finger,152 +p-chan (p-90),152 +orchis,152 +oniyama831,152 +okunin,152 +ocha (ochappie),152 +nude modeling,152 +nogoodlife,152 +ninoude (ninoude44),152 +morifumi,152 +monster hunter 4,152 +mk23 (girls' frontline),152 +mini santa hat,152 +microphone wand,152 +michimaru (michi),152 +male priest (dungeon and fighter),152 +male harem,152 +lungmen dollar,152 +little princess,152 +lace-up top,152 +kurono,152 +kureaki (exit),152 +krizalid,152 +kiryu coco (dragon),152 +kinsenka,152 +kinom (sculpturesky),152 +kfp,152 +kapura,152 +kaito (vocaloid3),152 +kaidou j1,152 +kaela kovalskia,152 +jungle gym,152 +jq,152 +iron tager,152 +inukai isuke,152 +inui sanagi,152 +illumi zoldyck,152 +ikemura hiroichi,152 +hope estheim,152 +hooreng,152 +hime (kaibutsu oujo),152 +herensuge girls academy school uniform,152 +heigani,152 +heco (mama),152 +hasegawa kodaka,152 +hare hare yukai,152 +hane (azelye),152 +hachiroku (maitetsu),152 +grey sweater vest,152 +gold bar,152 +gimmy,152 +giant otter (kemono friends) (kuro (kurojill)),152 +gamecube,152 +fortune (last origin),152 +flour,152 +firelight ekko,152 +fire flower,152 +finland (hetalia),152 +ereraero,152 +enuni,152 +dream hunter rem,152 +dragonmaterial,152 +dougan calpis con,152 +dokugamine riruka,152 +diving helmet,152 +demekyon,152 +delutaya,152 +cosmos (flower),152 +code: empress (elsword),152 +clefable,152 +cigarette holder,152 +choudenji robo combattler v,152 +chikichi,152 +cheshire (summery date!) (azur lane),152 +buna shimeji (keymush),152 +briar rose (sinoalice),152 +bra on head,152 +bouen,152 +blackmail,152 +bikko,152 +bba1985,152 +bad drawcrowd id,152 +avatar 2.0 project,152 +arashiya,152 +angelic angel,152 +anew returner,152 +akagi (warship girls r),152 +aircraft catapult,152 +zuo wei er,151 +z3 max schultz (kancolle) (cosplay),151 +yuuki shuri,151 +yukkuri abuse,151 +yamadori yoshitomo,151 +yamada hifumi,151 +yaekaidou,151 +xero,151 +whoopsatro,151 +warainaku,151 +warabi mochi (ehimedaisuki),151 +wakiyama tamami,151 +vierzeck,151 +uka-no-mitama-no-kami (inakon),151 +towa monaka,151 +toushinden,151 +toudou naoya,151 +toudou aoi (jujutsu kaisen),151 +tohoho (hoshinoyami),151 +title page,151 +teaspoon,151 +team yell grunt,151 +tamandua ears,151 +suruga dbh,151 +sunagimo (nagimo),151 +string of light bulbs,151 +skull573,151 +shinano (dreams of the hazy moon) (azur lane),151 +sheva alomar,151 +sherlock holmes,151 +sencha (senchat),151 +seishou middle school uniform,151 +school nurse,151 +sara (kurome1127),151 +ruquia,151 +rosenkreuzstilette,151 +rokurou rangetsu,151 +rinarisa,151 +registeel,151 +raythalosm,151 +rathalos,151 +raditz,151 +queen of hearts (alice in wonderland),151 +q-gaku,151 +purple outline,151 +project x zone,151 +pretty-purin720,151 +pokemon the movie: the power of us,151 +playing with hair,151 +platform,151 +pink headband,151 +pile of skulls,151 +peg leg,151 +pardofelis (honkai impact),151 +osaragi hazumu,151 +orange peel,151 +orange eyeshadow,151 +opal (pokemon),151 +onigiri (ginseitou),151 +okpriko,151 +nyanko,151 +nuda,151 +noriuma,151 +no scar,151 +nixeu,151 +musume (yuunama),151 +mushroom on head,151 +moritan,151 +monokubo,151 +mizuki ryuu,151 +miyako (blue archive),151 +miiko (drops7),151 +miakis (gensou suikoden),151 +mebaru,151 +matsuri (matsuike),151 +master chief,151 +lufi ays,151 +love live! sunshine!! the school idol movie over the rainbow,151 +kws,151 +kurou (bcrow),151 +kurokin,151 +kumonji aruto,151 +kumasteam,151 +kumano (azur lane),151 +kudo shinobu,151 +kubo tite (style),151 +koi to senkyo to chocolate,151 +kirisaki akihito,151 +ken-1,151 +kawaruhi,151 +kangoku senkan,151 +kamen rider den-o,151 +joseph lee,151 +jont,151 +igrene (fire emblem),151 +himetsuki luna,151 +hibimegane,151 +helena blavatsky (swimsuit archer) (third ascension) (fate),151 +haura akitoshi,151 +hane (hanetsuki),151 +hands on penis,151 +hanabana tsubomi,151 +gwayo,151 +glasses day,151 +getting over it,151 +geomancer (fft),151 +genso,151 +game boy advance,151 +gaki kyonyuu,151 +fuuka academy uniform,151 +fushimi gaku,151 +fukase,151 +fruit tart,151 +flyable heart,151 +finger gun to head,151 +fabric,151 +ex albio,151 +erotibot,151 +enpitsu01,151 +emma millstein,151 +dismemberment,151 +crosscounter,151 +cradle (artist),151 +counting,151 +coughing blood,151 +conveyor belt,151 +cofepig,151 +christophe giacometti,151 +chopping,151 +chloe (school festival) (princess connect!),151 +chise (swimsuit) (blue archive),151 +charlotte aulin,151 +cefca palazzo,151 +candle wax,151 +campo flicker (kemono friends),151 +butakoma 300g,151 +broken condom,151 +blonde dog girl (ri-net),151 +berserkert,151 +benjamin kirby tennyson,151 +batter,151 +batta (kanzume quality),151 +ayanokouji rem,151 +ayano naoto,151 +ayano (ayn398),151 +aug (girls' frontline),151 +asuna (stacia),151 +astra militarum,151 +asicah,151 +asagiri youko,151 +ao (flowerclasse),151 +andreana (arknights),151 +amana (pocketkey),151 +alucard (castlevania),151 +albino (a1b1n0623),151 +adelbert steiner,151 +yuzuki himuka,150 +yuzuki (yuduame),150 +yuki maru (yukimaru 1),150 +yufukiri,150 +yoshiki360,150 +yoriteruru,150 +yatsuka (846),150 +yashiro kizuku,150 +yanmarson,150 +william shakespeare (fate),150 +waver velvet (sensha otoko),150 +urara (sumairuclover),150 +totoro bus stop,150 +totaku (musha prune),150 +tohno shiki (2),150 +thumb in pocket,150 +the caves of steel,150 +tewarusa,150 +team flare,150 +tamada heijun,150 +tamachi kuwa,150 +taker (flamestorm),150 +suzumori uina,150 +sutoroa,150 +surge concerto,150 +suraimu (suraimuraimu),150 +sun-d,150 +stuffed pig,150 +splatoon (manga),150 +souzou forest (vocaloid),150 +soubi,150 +so-class submarine,150 +shinto,150 +shika miso,150 +shears,150 +shared thought bubble,150 +shadow chaser (ragnarok online),150 +sasaki (suzumiya haruhi),150 +sai (weapon),150 +saber lion,150 +ryota tentei,150 +rorobomb,150 +rizu (rizunm),150 +riddle joker,150 +red outline,150 +raybar,150 +qubeley,150 +phase connect,150 +pestle,150 +perfect cell,150 +panties around one ankle,150 +otoshiro seira,150 +orange tail,150 +open cloak,150 +ookamiuo,150 +nina williams,150 +nightclub,150 +naba (take tonbo),150 +moura (kenyuugetu),150 +moroboshi ataru,150 +monk (ragnarok online),150 +mof,150 +miyano ururu,150 +milkshake,150 +mikeco,150 +mikami komata,150 +metal owl (aden12),150 +medic (sekaiju),150 +mauko (girls und panzer),150 +masirosu,150 +maku-raku,150 +majo no ie,150 +maho (yakimorokoshi),150 +lyra-kotto,150 +kuwabara kazuma,150 +kujibiki unbalance,150 +kriegsmarine,150 +komatsu ibuki,150 +kirisu mafuyu,150 +kirikan,150 +kirasaka sayaka,150 +kashiwa mochi (food),150 +k jin,150 +jumpsuit around waist,150 +iuro,150 +ijiro suika,150 +holding legwear,150 +holding carrot,150 +hirase yuu,150 +hiide,150 +healthyman,150 +hatachi,150 +haru (kyou),150 +hanazono yurine,150 +hamachamu,150 +half slime-chan,150 +hacko,150 +gunsmith cats,150 +grovyle,150 +grey sports bra,150 +grey lips,150 +geronimo (fate),150 +gensou suikoden iv,150 +fujiwara warawara,150 +fishbowl helmet,150 +female abyssal admiral (kancolle),150 +est (fire emblem),150 +ero waifu,150 +eeeeee,150 +eavesdropping,150 +dragon claw,150 +double middle finger,150 +daidou sayo,150 +cum in throat,150 +cross (weapon),150 +crescent choker,150 +clair vaux bernardus,150 +chosen undead,150 +chocolate on hand,150 +chiyoko (oman1229),150 +cheken,150 +championship belt,150 +chaigidhiell,150 +c.honey,150 +business card,150 +blue screen of death,150 +blackbuck (kemono friends),150 +bird nest,150 +beelzebub (manga),150 +becky blackbell,150 +bead anklet,150 +battle effectiveness award,150 +batou,150 +autumn-sacura,150 +aru ra une,150 +antaria,150 +alex (alexandoria),150 +akiha (attract),150 +ajifurai,150 +2010 fifa world cup,150 +zoza,149 +zipang (zip@ng works),149 +yukari (rihenara doll),149 +yue (tada no saboten),149 +yotsuyu goe brutus,149 +yosaku (roach),149 +xilmo,149 +wooden bridge,149 +wood carving tool,149 +white tunic,149 +whale shark,149 +weather vane,149 +wakui rumi,149 +wagashi (dagashiya),149 +venomrobo,149 +ulquiorra cifer,149 +ub1mo,149 +u rin,149 +tsukasa yuuki,149 +tsuka,149 +toru nagase,149 +to heart 2 xrated,149 +teresa (claymore),149 +temu,149 +tasmanian devil tail,149 +taro (ultrataro),149 +takanae kyourin,149 +taishakuten (onmyoji),149 +suzuharu toufu,149 +sura (ragnarok online),149 +substitute (pokemon),149 +souryuu (azur lane),149 +shouni (sato3),149 +shiromanta (character),149 +shirihime,149 +shelly (pokemon),149 +shedinja,149 +shared drink,149 +setu kurokawa,149 +setsuna (fire emblem),149 +segment display,149 +sawkm,149 +satomachi,149 +salanchu,149 +sakra devanam (elsword),149 +ryekie (live a hero),149 +richelieu (warship girls r),149 +regice,149 +reflect (gawr gura),149 +red cucumber,149 +rangen,149 +quinella,149 +punishment,149 +pon (shind 997),149 +pon (ponidrop),149 +polka dot umbrella,149 +pink robe,149 +onodera karen,149 +ohara tometa,149 +occult ball,149 +note2000,149 +nogizaka haruka,149 +nobile1031,149 +niles (fire emblem),149 +niku-name,149 +neki-t,149 +natsu hotaru,149 +nanami sano,149 +nagimiso,149 +na! (na'mr),149 +muchourin,149 +mizuki gyokuran,149 +minami aomori,149 +mayl sakurai (mega man),149 +matsukawa (pale scarlet),149 +mary skelter,149 +marth (fire emblem awakening),149 +marinette dupain-cheng,149 +margaretha sorin,149 +malik caesars,149 +makiaato,149 +maison ikkoku,149 +maiden in black,149 +mahou shoujo lyrical nanoha a's portable: the gears of destiny,149 +maga-g,149 +madarame harunobu,149 +macross plus,149 +m.tokotsu,149 +lycanroc (dusk),149 +linde (fire emblem),149 +leather skirt,149 +landolt tamaki,149 +kurogane no linebarrel,149 +kriemhild gretchen,149 +konnyaku (food),149 +knifedragon,149 +klein (sao),149 +kawasemi27,149 +karokuchitose,149 +karigurashi no arrietty,149 +kani fish,149 +jing hu,149 +j.h.,149 +izuka daisuke,149 +itasha,149 +inflatable shark,149 +inaeda kei,149 +ikuchan kaoru (character),149 +iihara nao,149 +ichibi,149 +hut,149 +hawawa-chan (shiro kuma shake),149 +hamster girl,149 +hamamoto ryuusuke,149 +half moon,149 +gujira,149 +fuuki (te fuukin),149 +fushitasu,149 +fujiya takao,149 +eric (tianqijiang),149 +entry plug,149 +endsmall min,149 +edogawa roman,149 +diamond earrings,149 +des,149 +dark angel olivia,149 +ciri,149 +chris (mario),149 +choukai (azur lane),149 +chii-kun (seedyoulater),149 +chen hai (azur lane),149 +cbgb,149 +carte,149 +caenis (swimsuit rider) (fate),149 +bread bun,149 +blue bandeau,149 +belfast (iridescent rosa) (azur lane),149 +beldum,149 +bee costume,149 +august von parseval (the conquered unhulde) (azur lane),149 +arisen (dragon's dogma),149 +argyle scarf,149 +arena,149 +arai harumaki,149 +apple on head,149 +ancotaku,149 +ameth (princess connect!),149 +alisa boskonovich,149 +aetherion,149 +achievement unlocked,149 +zeroyon (yukkuri remirya),148 +zenigata kouichi,148 +zcune,148 +yuzu bath,148 +yuyanshu13,148 +yuu (yuyukaikan),148 +youkan (food),148 +yogurt,148 +yasuo (league of legends),148 +yasui riosuke,148 +wonderland wars,148 +waira,148 +veigar,148 +valkyrie connect,148 +ushiromiya kinzou,148 +two-tone kimono,148 +tsukikusa,148 +tomosuke,148 +toma (asagayatei),148 +textbook,148 +takashia (akimototakashia),148 +tafuto,148 +tada no nasu,148 +suzutarou gunsou,148 +suzuri (tennenseki),148 +sugimeno,148 +sue (bg-bros),148 +spirit blossom kindred,148 +spasm,148 +sousui hani,148 +skull and crossed swords,148 +siamese cat,148 +shourou kanna,148 +shirousagi uyu,148 +shigofumi,148 +shigenobu,148 +shachihoko,148 +servant card (fate/grand order),148 +sdorica,148 +scruffyturtles,148 +satsuki mayuri,148 +satou sakie,148 +saru (style),148 +saejin oh,148 +rumo,148 +rin (kemurikusa),148 +rikka (holostars),148 +rei (guilty gear),148 +reco,148 +rainbow bikini,148 +quilt (game),148 +pushing face,148 +pullcart,148 +pocari (sq551),148 +pico (picollector79),148 +pholia,148 +pasdar,148 +panzer waltz,148 +panda hat,148 +orange capelet,148 +ooyama kina,148 +okera,148 +nori (akusei shinseibutsu),148 +nogi sonoko,148 +nobuyoshi-zamurai,148 +nishigori atsushi,148 +nikism,148 +neck bobbles,148 +mushroom print,148 +moroyan,148 +morokoshi (tekku),148 +morino ichigo,148 +monster hunter frontier,148 +mokyu,148 +mochiki,148 +mizuki riko,148 +mistrail,148 +mikomiko (mikomikosu),148 +mercury symbol,148 +mejiro mcqueen (ripple fairlady) (umamusume),148 +maruwa tarou,148 +mannosuke,148 +maburu (lojyq1eur3e8bit),148 +ludvico private girls' academy school uniform,148 +lenora (pokemon),148 +l (matador),148 +kyokugen dasshutsu,148 +kuronuma sawako,148 +kuroihato,148 +kuchibashi (9180),148 +kokoa ninniku,148 +kokihanada,148 +kodoku no gourmet,148 +kneeing,148 +kishuku gakkou no juliet,148 +kirisaki seeker,148 +kikivi,148 +kadoi aya,148 +jum-p,148 +jirusu,148 +jiraiya (naruto),148 +jason kim,148 +irisu kyouko,148 +illyasviel von einzbern (swimsuit archer) (second ascension),148 +ikaheigen,148 +ibuki munemasa,148 +hyuuga hanabi,148 +human dog,148 +hk (nt),148 +hiwatari rin,148 +hisin,148 +harukana receive,148 +hareta,148 +hapi (fire emblem),148 +hands on another's neck,148 +gomashio (goma feet),148 +glowing feather,148 +girotin ginza,148 +girlish number,148 +getsuyou yasumi,148 +gerotan,148 +gargoyle,148 +gal gamer ni homeraretai,148 +fuugetsu oreha ikiru,148 +fubuki atsuya,148 +frilled garter,148 +fir (fire emblem),148 +federica n. doglio,148 +fairy knight tristan (second ascension) (fate),148 +erakin,148 +england,148 +eizen (tales),148 +dick gumshoe,148 +dantes ward,148 +dango remi,148 +cat o' nine tails,148 +card game,148 +broken moon,148 +breast curtain lift,148 +bluez,148 +bloomers around one leg,148 +bioluminescence,148 +avatar (lineage 2),148 +asya,148 +asahiro,148 +amagamido,148 +alia (mega man),148 +alhaitham (genshin impact),148 +agnes oblige,148 +ace of diamonds,148 +yuzuki kihiro,147 +yuunagi (0217),147 +yukiyanagi,147 +yuimari,147 +yagyuu kyuubei,147 +xfate,147 +white tiger print,147 +wake up girls! stage no tenshi,147 +vyn richter (tears of themis),147 +volumen hydragyrum (fate),147 +vermillion akiha,147 +vane (vane0),147 +undone bowtie,147 +twinmyniad (fate),147 +tsukiumi,147 +togami byakuya (danganronpa 2),147 +tendou nabiki,147 +ten-chan (eternal s),147 +tama (hiroshige 36),147 +takara joney,147 +swinub,147 +sukesan,147 +spotted hair,147 +spiral-only eyes,147 +solaire of astora,147 +smelling underwear,147 +skiing,147 +sketching,147 +silvertsuki,147 +sig (puyopuyo),147 +shiranui kai ni (kancolle),147 +shibuya (tokyo),147 +seulbi lee,147 +seto midori,147 +sencha (senta 10),147 +scout (tf2),147 +saipin,147 +removing shoes,147 +rakko (r2),147 +raiden mei (valkyrie bladestrike),147 +pn (wnsl216),147 +planeptune,147 +pia carrot e youkoso!! 3,147 +peri (fire emblem),147 +people's republic of china flag,147 +oshage (osyage921),147 +osana reimu,147 +osafune kairi,147 +oro (zetsubou girl),147 +orimoto rika,147 +oosaki takahito,147 +octillery,147 +noukatu,147 +nokoru sora,147 +nitori aiichirou,147 +nikke (cherish),147 +nick wilde,147 +nero claudius (modern costume of crimson) (fate),147 +national basketball association,147 +nao (dream c club),147 +nanashinayuzu mochi,147 +nahu,147 +mukkushi,147 +mugen no fantasia,147 +motojima hakka,147 +momozu komamochi,147 +mokona,147 +mixplin,147 +mikripkm,147 +mazda,147 +maruino,147 +makita maki,147 +luminous,147 +lucid (maplestory),147 +lococo:p,147 +lightning glare,147 +leather suit,147 +l'avenir academy uniform,147 +kusakami akira,147 +kurozako,147 +kouyafu,147 +komota (kanyou shoujo),147 +kiritani (marginal),147 +kawaii dake ja nai shikimori-san,147 +kaori (princess connect!),147 +kachayori,147 +just be friends (vocaloid),147 +jianren,147 +jaguar boy,147 +izumi ako,147 +itou ayachi,147 +ishida mia,147 +inu3,147 +hunya,147 +hector rivera,147 +heart ring top,147 +hazuki kasane,147 +hayashi (l8poushou),147 +hashimoto fumie,147 +hamada (super tachioyogi kyousou),147 +hairstyle connection,147 +hair vines,147 +hair extensions,147 +gunxsword,147 +ghost quartz (houseki no kuni),147 +genshin impact sticker redraw (meme),147 +gabayo,147 +fuyuno taka,147 +fujie-yz,147 +frikulu,147 +foot tickling,147 +flask (pandora),147 +f-14 tomcat,147 +energy shield,147 +el cazador de la bruja,147 +dunkerque (summer sucre) (azur lane),147 +dopamine70,147 +daibouken! yukeyuke osawari island,147 +d@i,147 +color halftone,147 +ciel (elsword),147 +childhood friend-chan (ramchi),147 +character select,147 +cat (nyanko daisensou),147 +castor (fate),147 +caleb thomas,147 +bad tinami id,147 +back turned,147 +azuki akizuki,147 +auer,147 +arrow (tamawo222),147 +armored aircraft carrier princess,147 +arcaea,147 +arc system works,147 +amii,147 +amamiya chiharu,147 +alchemist (girls' frontline),147 +against rock,147 +adrenaline!!!,147 +zipper footwear,146 +yoshikawa kazunori,146 +yorha commander,146 +welrod mk2,146 +utsugi noriyuki,146 +used artificial vagina,146 +uniform vest,146 +umibudou,146 +umeboshitora,146 +trombone (sex act),146 +towel rack,146 +tohsaka rin (fate/extra),146 +tobi (kotetsu),146 +tengen toppa gurren lagann: parallel works,146 +takemura kou,146 +sutorora,146 +super robot wars x-omega,146 +submarine princess,146 +statue of liberty,146 +snowflake earrings,146 +shizuku (hunter x hunter),146 +shichiten shichitou,146 +shakunetsu no takkyuu musume,146 +seraphine (league of legends),146 +senri akane,146 +scientific name,146 +scharfschutze,146 +sabakan (iizuka48),146 +ryuuta (cure ryuuta),146 +ryuuga shou,146 +roina (effj7473),146 +ribbon-trimmed hairband,146 +reindeer girl,146 +prism magical,146 +princess tutu (character),146 +ponyta,146 +ponkotsu (ayarosu),146 +pink sweater vest,146 +pillow fight,146 +persocon93,146 +originium slug (arknights),146 +oboro (fire emblem),146 +nogi wakaba wa yuusha de aru,146 +no brand girls,146 +nine (liuyuhao1992),146 +niimi kaoru,146 +nibo (att 130),146 +nekokun,146 +navy cross,146 +nana mikoto,146 +nakajou tatsuya,146 +naitou kirara,146 +myht,146 +murasakio,146 +muneshiro (hitsuji kikaku),146 +mouse girl (yuuki (yuyuki000)),146 +momose kurumi,146 +moe (phrase),146 +misora (princess connect!),146 +minillustration,146 +mingke,146 +mega charizard x,146 +maze yuri,146 +mashiro (nijisanji),146 +maria robotnik,146 +map (blue catty),146 +mahou no tenshi creamy mami,146 +luka (mon-musu quest!),146 +lozelia,146 +lanzi (415460661),146 +kurokawa (silve),146 +kroos the keen glint (arknights),146 +kokaki mumose,146 +kikuchi mataha,146 +kie (wylee2212),146 +kaze makase,146 +katase shima,146 +kashiwagi kano,146 +kasashi (kasasi008),146 +karasuma yayoi,146 +kakudai (hujikolp),146 +kagesaki yuna,146 +kabayaki (kabayaki eel),146 +jupiter (planet),146 +judau ashta,146 +joui,146 +joshua bright,146 +jikan sokougun,146 +japanese cylindrical postbox,146 +jako (toyprn),146 +inuga anahoru,146 +inasaki shirau,146 +iga tamaki,146 +ibara dance,146 +humboldt penguin,146 +hotaryuso,146 +himura kiseki (style),146 +himemiya touri,146 +hero (dq8),146 +henohenomoheji,146 +henken,146 +heaven's door,146 +hara yui,146 +hanna rudel,146 +hage2013,146 +hadouken,146 +haagen-dazs,146 +h&k g3,146 +gus (clarkii),146 +ginji74,146 +gigokku,146 +gas can,146 +fuyuno yuuki,146 +fullbokko heroes,146 +fuiba fuyu,146 +floyd leech,146 +farfetch'd,146 +eyewear view,146 +esouko,146 +elina kuroe no daarin,146 +ear scrunchie,146 +drying clothes,146 +dororo (character),146 +daidouji kira,146 +dagon (housamo),146 +da capo iii,146 +compression sleeve,146 +company of heroes,146 +cloud focus,146 +circus,146 +cioccolata,146 +chuby mi,146 +chariot,146 +character pillow,146 +chakapi,146 +caved,146 +castell,146 +cafe (chuu no ouchi),146 +bukurote,146 +bmw,146 +black widow,146 +baretto (firearms 1),146 +barbara parker,146 +azu torako,146 +ayuman,146 +arato asato,146 +aramachi,146 +aoi hiro,146 +animal ears helmet,146 +akagi (paradise amaryllis) (azur lane),146 +aizawa kazuha,146 +agravain (fate),146 +agito (nanoha),146 +3 (sanyako1),146 +3104 (3104milkshake),146 +zorome (darling in the franxx),145 +zac (league of legends),145 +yuru-chara,145 +yellow male underwear,145 +wnb mark,145 +winston (overwatch),145 +watermelon seeds,145 +visor lift,145 +violet (flower),145 +uki atsuya,145 +turnip kabura,145 +touma kazusa,145 +tooi aoiro,145 +tir mcdohl,145 +tina sprout,145 +takasugi kou,145 +taka (tsmix),145 +tactics,145 +taco,145 +sweden (hetalia),145 +suzuno (bookshelf),145 +super robot wars z,145 +suicide boy,145 +staraptor,145 +spacecolonie,145 +socks over thighhighs,145 +snorunt,145 +shishigami bang,145 +shen (league of legends),145 +seero,145 +scuba tank,145 +sayo tanku,145 +sanagi torajirou,145 +sakura (usashiro mani),145 +rem sora410,145 +reiji (gundam bf),145 +rajang,145 +raijuu (bakanara),145 +quro (black river),145 +psylocke,145 +portable stove,145 +playerunknown's battlegrounds,145 +pika (kai9464),145 +painttool sai,145 +ozkh,145 +ooshima ryou,145 +onaya masakazu,145 +ogiso setsuna,145 +o-nashi neko,145 +norway (hetalia),145 +noeru (noellemonade),145 +nishiki areku,145 +nipple press,145 +nezumi (no.6),145 +nel-c,145 +neco spirit,145 +naegino sora,145 +mutsuki (new year) (blue archive),145 +morisawa chiaki,145 +mogi yasunobu,145 +mizuya nao,145 +miyo (miyomiyo01),145 +mirakichi,145 +micro uzi (girls' frontline),145 +metis (persona),145 +merry program,145 +mega lopunny,145 +makuro,145 +magneton,145 +machita chima,145 +luluco,145 +leonidas (fate),145 +kusakabe wakaba,145 +kuo (kuo114514),145 +kon5283,145 +koishi chikasa,145 +knotting,145 +kmnz,145 +kitahama (siroimakeinu831),145 +kazuneko (wktk1024),145 +kamen rider ghost (series),145 +kakuchoshi,145 +kaitou saint tail,145 +joosi,145 +jochuu-san,145 +ichiki 1,145 +ibuki douji (swimsuit berserker) (fate),145 +hippopotamus,145 +hatsuseno alpha,145 +harlequin,145 +haou taikei ryuu knight,145 +hanging food,145 +hand on another's mouth,145 +gomeifuku,145 +glance,145 +g-taste,145 +eva 16-gouki,145 +erstin ho,145 +eri (boku no hero academia),145 +eijima moko,145 +edmond honda,145 +duan henglong,145 +donson,145 +dobato,145 +deemo (character),145 +dancer's costume (dq),145 +comic bavel,145 +clift,145 +chou shittou caduceus,145 +charlotte hazellink,145 +capitan (tsyoujo),145 +candela (pokemon),145 +buchi maru,145 +brown sky,145 +botan (clannad),145 +berryverrine,145 +beauty and the beast,145 +babu,145 +ashen one (dark souls 3),145 +american football helmet,145 +amatari sukuzakki,145 +ajino (sakanahen),145 +30-06,145 +.hack//tasogare no udewa densetsu,145 +zelgadiss graywords,144 +yukimin (yukihana lamy),144 +yuki shuuka,144 +you ni ge shaobing,144 +yoshino chidori,144 +yen-mi,144 +yamai kaguya,144 +yakimi 27,144 +yakan (kusogaki teikoku),144 +yagami hiroki,144 +white lips,144 +waku waku 7,144 +ushimittsu,144 +unpale,144 +under clothes,144 +ultimecia,144 +two-sided coat,144 +touya (konpekitou),144 +tokumaro,144 +tm (hanamakisan),144 +tiptoe kiss,144 +the road to el dorado,144 +tell your world (vocaloid),144 +tayo,144 +takeshisu,144 +t-asama,144 +switch,144 +swimwear (module),144 +susanna hopkins,144 +super mario 3d world,144 +sunny day song,144 +sudachi (calendar),144 +strider hiryuu,144 +stg44,144 +st.kuma,144 +sneaking suit,144 +smash ball,144 +silverlight,144 +silver rain,144 +shukufuku no campanella,144 +shishimaru ken'ya,144 +shiroi suna no aquatope,144 +school emblem,144 +scarf pull,144 +sazabi,144 +satsuki yukimi,144 +saruei,144 +sandwich cookie,144 +sachi (sao),144 +qm,144 +print towel,144 +ponyo,144 +poi (goldfish scoop),144 +pidge gunderson,144 +panbai,144 +oyama yoihaya,144 +opanchu (hakusen),144 +okumari,144 +oarfish,144 +noritama (gozen),144 +noill,144 +nisoku hokou (vocaloid),144 +narita top road (umamusume),144 +muneneko,144 +multiple cats,144 +motion slit,144 +mori (unknown.),144 +mono lith,144 +monizumi ishikawa,144 +momo no kanzume,144 +moeta kaoruko,144 +miyazaki byou,144 +mitsuzuri ayako,144 +mirai akari's new virtual youtuber illustration contest,144 +military rank insignia,144 +matrix16,144 +maru ccy,144 +makino ruki,144 +majo to hyakkihei,144 +link (aa30),144 +lava the purgatory (arknights),144 +kushizaki (vtuber),144 +kurosawa rin (aikatsu!),144 +kuroeda-san,144 +kouta.,144 +kona ming,144 +kirikuchi riku,144 +king saw,144 +key kun,144 +keke (kokorokeke),144 +kayle (league of legends),144 +kawachi rin,144 +kantori,144 +kamiko kana,144 +kakesu,144 +kakaobataa,144 +kaginoni,144 +jj (ssspulse),144 +jett (valorant),144 +j yak47,144 +izumi reina,144 +ixima,144 +iwai ryou,144 +impossible hair,144 +ichikawa noa,144 +humagear headphones,144 +hoshibuchi,144 +horikoshi kouhei,144 +hoojiro (found1093),144 +hollomaru,144 +hitotsuki nanoka,144 +hira (nanika no heya),144 +hinata nao,144 +higuchi konomi,144 +hi ye,144 +hemo (hemoroda),144 +heiwari kanade,144 +healing animal,144 +hanazuki (azur lane),144 +hajime (ak-r),144 +hachigatsu no cinderella nine,144 +h&k psg1,144 +foongus,144 +floating city,144 +fire helmet,144 +fatal fury cap,144 +face filter,144 +exit sign,144 +executor (arknights),144 +eventh7,144 +escavalier,144 +engineer (tf2),144 +eevee ears,144 +dreamer (girls' frontline),144 +dragoon,144 +doduo,144 +daydream (zhdkffk21),144 +cuora (arknights),144 +chucolala,144 +choumi wuti (xueye fanmang zhong),144 +chloe von einzbern (beast style),144 +cheshire cat (monster girl encyclopedia),144 +chaos;child,144 +celia kumani entory,144 +cain (gunnermul),144 +bra-ban!,144 +bisected,144 +barbariccia,144 +bandaid on stomach,144 +asymmetrical bodysuit,144 +asymmetrical arms,144 +asou yuuko,144 +ashiya douman (second ascension) (fate),144 +ascii art,144 +asagiri asagi,144 +armadillo ears,144 +akino subaru,144 +aizawa hikaru,144 +yumi yumi,143 +yuki miku (2022),143 +youkai watch (object),143 +yoshinoya (hidamari sketch),143 +yayoichi (yoruyoru108),143 +yatsuha kanan,143 +yatosaki haru,143 +yaosera,143 +yagyuu munenori (fate),143 +yadokugaeru,143 +wu ganlan cai,143 +watanuki banri,143 +wagaya no oinari-sama,143 +wadapen,143 +wadante,143 +viviana (arknights),143 +uta (one piece),143 +ura (05131),143 +unagiman,143 +tsuyuka (sunny spot),143 +transforming clothes,143 +tomimi (silent night) (arknights),143 +togo ai,143 +titiduki (manman-ya),143 +tifa lockhart's refined dress,143 +the king of fighters 2001,143 +ten'i (ikkitousen),143 +taut skirt,143 +tamatoys,143 +tajima yukie,143 +t-okada,143 +sunday silence (racehorse),143 +sunday31,143 +striped suit,143 +sseopik,143 +so dakki,143 +sinko,143 +shuumatsu nani shitemasu ka?,143 +shiwasu takashi,143 +sex pistols (stand),143 +senran kagura peach beach splash,143 +sayoyonsayoyo,143 +sawara65,143 +satsuki mei (sakuramochi),143 +sanom,143 +sahuyaiya,143 +rokugatsu t,143 +rino (princess connect!),143 +renka (sutegoma25),143 +priite hari (torriet),143 +popman3580,143 +pink sports bra,143 +pile of books,143 +palace of dragon (idolmaster),143 +oyuwari,143 +osa (osaosa),143 +ohnuma kurumi,143 +odds & ends (vocaloid),143 +not present,143 +nodding,143 +nina einstein,143 +niii (memstapak),143 +natsuhiko,143 +nanashi (ganesagi),143 +myoukou kai ni (kancolle),143 +murio,143 +multiple fusions,143 +mosuko,143 +momoda yasuhito,143 +mizunomoto,143 +mizuki (kutan),143 +minikon,143 +minakamirin,143 +mg renders,143 +mephisto (arknights),143 +mega lucario,143 +master asia,143 +magus (chrono trigger),143 +magical mirai miku (2021),143 +little blue (guin guin),143 +lithuania (hetalia),143 +lily (granblue fantasy),143 +len (hand linke),143 +lamborghini,143 +kutata,143 +kururu (little princess),143 +kuro (kuroneko no kanzume),143 +konkichi (flowercabbage),143 +kobayashi hiyoko,143 +king (nanatsu no taizai),143 +kido saori,143 +khiara (personal ami),143 +kazooie (banjo-kazooie),143 +kashiwa kiseri,143 +kanzaki hideri,143 +kamishiro ryuu,143 +kamihime project,143 +kamen rider build,143 +kaburagi yuki,143 +jon (pixiv31559095),143 +izumiyuhina,143 +itou eito,143 +italia (kancolle),143 +inuzuka kiba,143 +indonesian text,143 +in-hyuk lee,143 +ikura (food),143 +hoshino madoka,143 +hinadan,143 +hazuki (nyorosuke),143 +hat bobbles,143 +harmonica,143 +greatodoggo,143 +gmot,143 +gangut (azur lane),143 +gahaku,143 +furahata gen,143 +fukase ayaka,143 +frilled necktie,143 +frenulum piercing,143 +flashbang,143 +flamie speeddraw,143 +fender telecaster,143 +eruthika,143 +dx (dekusu),143 +duo maxwell,143 +drag-on dragoon 1,143 +douma (kimetsu no yaiba),143 +doria (p f dolia),143 +dog (shiba inu) (kemono friends),143 +dido (anxious bisque doll) (azur lane),143 +crash bandicoot (series),143 +clip,143 +chroche latel pastalie,143 +chitozen (pri zen),143 +cheetah tail,143 +camel000,143 +camel,143 +caligula (fate),143 +buttercup redraw challenge (meme),143 +bread eating race,143 +bokuto koutarou,143 +bandaid hair ornament,143 +bandaged chest,143 +bald spot,143 +baketsuya,143 +backflip,143 +azuma seiji,143 +atelier lydie & suelle,143 +alolan meowth,143 +allenby beardsley,143 +akiyasu,143 +akito (d30n26),143 +akino takehiko,143 +akahige,143 +ahsoka tano,143 +23 (candy chapus),143 +zuizou,142 +yuunagi middle school uniform,142 +youhei (testament),142 +yoshii akihisa,142 +yonekura kengo,142 +yilx,142 +yatagarasu (game),142 +versailles no bara,142 +uso (ameuzaki),142 +ueno (ueno-san wa bukiyou),142 +ubizo,142 +two-tone pants,142 +two-tone bra,142 +tsunko (9v2 q),142 +tropical kiss,142 +totsuka saika,142 +tomomimi shimon,142 +tk (angel beats!),142 +tied sweater,142 +tianzhong zhongtian,142 +theodore riddle,142 +themaestronoob,142 +text print,142 +tenshi no inai 12-gatsu,142 +tashigi,142 +sutera (granblue fantasy),142 +sunohara ayaka,142 +suisa (mizsai),142 +stiel,142 +stardew valley,142 +sonikey0 0,142 +sofia valmer,142 +snow leopard,142 +shuutou haruka,142 +shouhei,142 +shiteyan'yo,142 +shiro (octet),142 +shinburu,142 +shin sakura taisen,142 +sett (league of legends),142 +sara (gundam build divers),142 +sakura hime,142 +sakana (saka11205),142 +sakaki (utigi),142 +ruukoto,142 +rsef,142 +rokushou,142 +rojiko,142 +rodney (warship girls r),142 +rocha (aloha ro cha),142 +rkrk12,142 +redjack 036,142 +r44,142 +queasy s,142 +pikachu tail,142 +peter parker,142 +orgasm denial,142 +objection,142 +numbered flag,142 +nokanok,142 +nijihara ink,142 +nekokotei,142 +necromancer,142 +nakahara misaki,142 +myucel foalan,142 +mujin wakusei survive,142 +mugshot,142 +moira (nijisanji),142 +mochizuki nozomu,142 +mizuki kyou,142 +miyoshi (triple luck),142 +miyamae shiho (jack dempa),142 +mixed gender duel,142 +minatosaiga,142 +mian (dream c club),142 +melty+,142 +matsushima michiru,142 +maryland (kancolle),142 +mandibles,142 +liquid clothes,142 +like and retweet,142 +kuso otoko,142 +kongou mitsuko,142 +king k. rool,142 +keith (voltron),142 +kazeharu,142 +kazama raita,142 +kawajiri shinobu,142 +kaneko tsukasa,142 +kamoto tatsuya,142 +kamikoshi sorawo,142 +kama (swimsuit avenger) (first ascension) (fate),142 +kagura mizuki,142 +k52,142 +jeff andonuts,142 +janyhero,142 +izumi minami,142 +ishii akira,142 +inui (jt1116),142 +ina (inadahime),142 +in basket,142 +ikeno daigo,142 +hyakumangoku masurao,142 +holmemee,142 +hinazuki kayo,142 +himegami aisa,142 +hasaya,142 +harukawa moe,142 +hapu (pokemon),142 +haku89,142 +hai (h81908190),142 +gsusart,142 +greenopi,142 +goto (sep),142 +gonster,142 +fujishima kousuke,142 +frog hood,142 +frilled coat,142 +face of the people who sank all their money into the fx (meme),142 +evolvingmonkey,142 +evil eye sigma,142 +erogos,142 +ekans,142 +drowzee,142 +doyachii,142 +dokidoki sister aoi-chan,142 +dilated pupils,142 +deathsmiles,142 +danby merong,142 +cube x cursed x curious,142 +cryska barchenowa,142 +crotchless leotard,142 +chyoel,142 +chel (the road to el dorado),142 +cha goma,142 +card in mouth,142 +calamity jane (fate),142 +butterchalk,142 +bunny ear legwear,142 +braided beard,142 +bound knees,142 +bol (liliymimi),142 +bodycon,142 +black overalls,142 +barre,142 +barmaid,142 +azuma toh,142 +azu-taro,142 +asuna (doruru-mon),142 +ashiya douman (third ascension) (fate),142 +around corner,142 +arondight (fate),142 +aranara (genshin impact),142 +alicia testarossa,142 +akke,142 +akashi seijuurou,142 +akasaka yuzu,142 +aimobake,142 +aether foundation uniform,142 +abutomato,142 +49s-aragon,142 +zooming in,141 +zijou,141 +zero-theme,141 +z-move,141 +yunoru,141 +yun lee,141 +yuhi (hssh 6),141 +yug,141 +yokoyari mengo,141 +yellow diamond (houseki no kuni),141 +yasoji (16321578),141 +xiao dianshi,141 +uzuki kou,141 +utsugi kotoko,141 +ushimi ichigo,141 +urim (paintur),141 +uiri-na,141 +ueda suzuho,141 +type 95 (narcissus) (girls' frontline),141 +tucana,141 +ttanuu.,141 +toshinoshin,141 +togruta,141 +tamiku (shisyamo609),141 +takasugi heppu,141 +tajikarao (housamo),141 +suzumi tamao,141 +surcouf (loisirs balneaires) (azur lane),141 +suikamaru,141 +star wars: the clone wars,141 +spongebob squarepants (character),141 +splat bomb (splatoon),141 +space ishtar (second ascension) (fate),141 +sousakubito,141 +sorami kanata,141 +sonia strumm (mega man),141 +someya mako,141 +skirt in mouth,141 +shirt behind neck,141 +shinouji matsurika,141 +shinori,141 +shadow (artist),141 +serin199,141 +sanka rea,141 +sanada yukimura (sengoku basara),141 +sameya,141 +sakurai yuuto (shiromanta),141 +saki (hxaxcxk),141 +sakai hamachi,141 +roulette,141 +roonhee,141 +romancing saga 3,141 +rocket ship,141 +riku (wana),141 +renri no chigiri wo kimi to shiru,141 +removing pasties,141 +red tabard,141 +rainbow skirt,141 +pocari66,141 +pinky pop hepburn,141 +picter,141 +parody request,141 +oxxo (dlengur),141 +oonaka ito,141 +ooba wakako,141 +ontaros,141 +ompf,141 +okiura,141 +ojo (dfreak),141 +oga raito,141 +nora cat channel,141 +nora cat,141 +ninja (ragnarok online),141 +neck pillow,141 +nb (pixiv594732),141 +nancou (nankou),141 +nanana (chicken union),141 +nai diffusion,141 +nagare hyougo,141 +my pet tentacle monster,141 +morichika rinnosuke (cosplay),141 +momoyama mirai,141 +mks,141 +miz (mizillustration),141 +miri (tobira no mukou),141 +minami seira,141 +mikan-uji,141 +mettaton,141 +metroid fusion,141 +meinya (made in abyss),141 +meimei (p&d),141 +meguro fukuzou,141 +mankanshoku barazou,141 +mana khemia (series),141 +makidera kaede,141 +maco spl,141 +mach caliber,141 +lumarianne20,141 +lilica felchenerow,141 +lambent light,141 +kusakabe maron,141 +kurenai no buta,141 +kuga yuuma,141 +koza900,141 +kobanzame,141 +kinuko (kinucakes),141 +kazami youka (yokochu),141 +kanipanda,141 +kanaria (fuusenkazura),141 +kanacho,141 +kamekichi,141 +jodhpurs,141 +jehyun,141 +izumi (blue archive),141 +iyou,141 +indigo (arknights),141 +ijuuin hokuto,141 +ice (ice aptx),141 +huyuharu0214,141 +honda tamanosuke,141 +holding string,141 +hisama kumako,141 +herb,141 +hattori masahiko,141 +hatsune (summer) (princess connect!),141 +hata kenjirou,141 +hasumi (hasubatake39),141 +harmony (splatoon),141 +hamo (dog),141 +haluka (aixioo),141 +hair undone,141 +hagimura suzu,141 +h&k usp,141 +gzei,141 +glenn andrean,141 +garo (series),141 +fuwafuwa time,141 +fujiwara gacho,141 +fubuki (kancolle) (cosplay),141 +foot on face,141 +final fantasy fables,141 +fault!!,141 +fang assassin irma,141 +family computer robot,141 +erica fontaine,141 +ephnel,141 +enjoji michiru,141 +edinburgh (azur lane),141 +dreamlight2000,141 +dragon riding,141 +dimension w,141 +dildo harness,141 +decapre,141 +daiba canon,141 +cure black (cosplay),141 +cosmic & funny (idolmaster),141 +chokutou,141 +chipmunk ears,141 +btraphen,141 +brazil,141 +blush visible through hair,141 +black beat,141 +bikini bottom aside,141 +basquash!,141 +aties20,141 +asakura nemu,141 +arkapami,141 +aoyagi touya,141 +amagi (volfuji),141 +alphy,141 +akisha,141 +akaneko (redakanekocat),141 +ajiriko,141 +aikawa chinatsu,141 +1672,141 +14sai bishoujo (shoutarou),141 +zyra,140 +yuzuyomogi,140 +yunomiya agari,140 +youkai ankake,140 +yasaka mahiro,140 +yamanobe tomo,140 +yamaada,140 +wrapped bento,140 +wooden bench,140 +wolf (league of legends),140 +vyolfers,140 +urayama (backmountain),140 +unfinished background,140 +tsukimoto kizuki,140 +truss,140 +tomoeri,140 +tomitake jirou,140 +theater,140 +terminator 2: judgment day,140 +team7,140 +tatsumi kon,140 +takubon,140 +sugiura,140 +stroma,140 +striped bandeau,140 +star panties,140 +sovetskaya belorussiya (azur lane),140 +soranagi yuki,140 +smile (rz),140 +shuucream (syuichi),140 +shoebill,140 +shizune (naruto),140 +shingeki no bahamut: genesis,140 +shiki,140 +shihou matsuri,140 +sera (serappi),140 +sentret,140 +senshiya,140 +seno (senohime),140 +savi (byakushimc),140 +sasamashin,140 +saki (blue archive),140 +saibashi,140 +rose (tales),140 +rimone,140 +revolver ocelot,140 +pterosaur,140 +prinz eugen (symphonic fate) (azur lane),140 +politoed,140 +pitfall,140 +persona 5 scramble: the phantom strikers,140 +peg,140 +p answer,140 +over the mouth gag,140 +oumi neneha,140 +orga,140 +ningen mame,140 +nero claudius (bride) (fate) (cosplay),140 +nekotewi,140 +naked dogeza,140 +nakatsukasa tsubaki,140 +mzet,140 +momomeno (7th dragon),140 +mitsuki felicia,140 +mismatched earrings,140 +mimi (mimi puru),140 +mash kyrielight (formal dress),140 +lyle dylandy,140 +kztk,140 +kyousougiga,140 +kyou zip,140 +kurokoeda,140 +kumai natsu,140 +kojiki-life,140 +koizumo,140 +kodachi,140 +kirino ranmaru (mixi max jeanne d'arc),140 +kimino tomonari,140 +kemeko deluxe,140 +kasuga (sengoku basara),140 +itamidome,140 +ishibori eregomos,140 +irohakaede,140 +insane black rock shooter,140 +implied rape,140 +iltusa,140 +horse dildo,140 +hizukiryou,140 +hitode,140 +hisha (kan moko),140 +hinayuki usa,140 +heel-less heels,140 +heavyrain (arknights),140 +hands over own mouth,140 +handplug,140 +handot (d yot ),140 +ham,140 +gym leader badge,140 +gin fragrans,140 +geneva bowers,140 +g-self,140 +fur-trimmed leotard,140 +fanta,140 +faba (pokemon),140 +etotama,140 +eclair (kiddy grade),140 +dusclops,140 +droid,140 +disembodied eye,140 +diana (league of legends),140 +dangling,140 +crazy raccoon,140 +chyan,140 +chobipero,140 +chobi (penguin paradise),140 +checkered panties,140 +canadian flag,140 +bushidou 2 (sekaiju),140 +breast biting,140 +blue mittens,140 +black bracelet,140 +azuma minatsu,140 +azmaria hendric,140 +arslan senki,140 +aquila (azur lane),140 +aoki lapis,140 +akashi yuuna,140 +ak-74,140 +ak-12 (quiet azure) (girls' frontline),140 +abe inori,140 +yzpyn,139 +yuugiri (u-slash),139 +yusa (yusa0751),139 +you gonna get eaten,139 +yashahime (momotarou densetsu),139 +xiaodi,139 +woollen cap,139 +windart,139 +wataru (zazazazazazawa),139 +walfie,139 +ushio to tora,139 +urako,139 +tsuchimiya kagura,139 +tsuchifumazu,139 +toshi (1-147),139 +torn capelet,139 +tirarizun,139 +tenzeru,139 +ten (tenchan man),139 +talho yuuki,139 +takeda kanryuusai,139 +takanashi kotori,139 +takahashi (k2ta7),139 +suru (ksoo420),139 +surge (pokemon),139 +super mario rpg,139 +striped apron,139 +star butterfly,139 +spring (object),139 +south dakota kai (kancolle),139 +soul hackers,139 +sora (men0105),139 +shishimai,139 +shiromiso,139 +shirayuki (arknights),139 +shinomiya natsuki (uta no prince-sama),139 +shinjitsu (true ride),139 +shimofuri kaeru,139 +shimada minami,139 +senzaicha kasukadoki,139 +sasaki sakiko,139 +saiki yuzuri,139 +rx boss,139 +ronindude,139 +remoraid,139 +qin (7833198),139 +poland (hetalia),139 +poipole,139 +pill hair ornament,139 +phara suyuf,139 +pecha berry,139 +patipat asavasena,139 +pastel ink,139 +oza osuwari,139 +ototobe,139 +oniku (oishii oniku),139 +oneechanbara,139 +obmas,139 +nyaroon,139 +nutkingcall,139 +nr (cmnrr),139 +notes,139 +nishi (count2.4),139 +nightea,139 +nidoran (male),139 +nidaime (doronbo),139 +nemone,139 +neco meito,139 +namatyoco,139 +nakabeni yua,139 +myoukou pose,139 +muscle car,139 +monster musume no oisha-san,139 +mofuringu,139 +mila babicheva,139 +mikono suzushiro,139 +miko machi,139 +matsuno chifuyu,139 +marvelous sunday (umamusume),139 +marie antoinette (third ascension) (fate),139 +manarou,139 +makadamixa,139 +mahou shoujo ikusei keikaku restart,139 +maeda risou,139 +luluko,139 +life vest,139 +lemon hair ornament,139 +kyoutsuugengo,139 +kotorai,139 +kittona,139 +kiriririn,139 +kirieppa,139 +ke-ta (style),139 +kawanabe,139 +katsugeki/touken ranbu,139 +kanzaki aoi (kimetsu no yaiba),139 +kamen rider agito,139 +kagari6496,139 +k11 (girls' frontline),139 +jin rou,139 +itsuki tasuku,139 +itsuki sayaka,139 +isa,139 +igni tion,139 +ichijou (kr neru0),139 +icecake,139 +humpback whale,139 +howling,139 +horse mask,139 +honma (honmatomohiro),139 +honlo,139 +hiromaster sinta jh,139 +hiro (dismaless),139 +hidebo,139 +heart lollipop,139 +hatsuko,139 +harada miyo,139 +hanamichi ran,139 +hair weapon,139 +gunslinger stratos,139 +girly running,139 +fuchitoro,139 +freyjadour falenas,139 +frankie foster,139 +frankenstein's monster (swimsuit saber) (second ascension) (fate),139 +flaaffy,139 +figma,139 +fenrir (fenriluuu),139 +feint721,139 +fake box art,139 +ermine,139 +envy (fma),139 +dracule mihawk,139 +dorianpanda,139 +digital camouflage,139 +d. (ddot2018),139 +cuddly octopus,139 +cteno,139 +contender (girls' frontline),139 +combos & doodles,139 +cloyster,139 +cinia pacifica,139 +chopstick rest,139 +chikuwa (odennabe),139 +checkered bowtie,139 +cecily cambell,139 +castlevania: symphony of the night,139 +castform,139 +capybara (kemono friends),139 +butterfly on nose,139 +bruce wayne,139 +broken bottle,139 +briska,139 +branded,139 +boar boy,139 +belly dancing,139 +bear hat,139 +bccp,139 +bashamichi,139 +barakamon,139 +az toride,139 +asa (swallowtail),139 +aoyashio rin,139 +anibache,139 +alina (arknights),139 +akasaka (qv92612),139 +aino heart,139 +aaru sentou shuudan,139 +3u,139 +2-g,139 +zuikaku (the wind's true name) (azur lane),138 +yuu (alsiel),138 +yuki hime haruka,138 +ys viii lacrimosa of dana,138 +yonema,138 +yatsuhashi kakera,138 +yanagi hirohiko,138 +yamai yuzuru,138 +yamada koutarou,138 +xiho (xi ho ),138 +wet shorts,138 +watermelon beachball,138 +water wheel,138 +waka (yuuhagi (amaretto-no-natsu)),138 +venus blade,138 +venomoth,138 +usa b,138 +unizou,138 +tri,138 +touzai (poppin phl95),138 +tiktok,138 +tatsuno newo,138 +tanemura arina,138 +tanaka the wizard,138 +takita (takita3411),138 +takeponi,138 +sugarbeat,138 +sugamo,138 +sudachips,138 +studded gloves,138 +sticky fingers (stand),138 +spiral staircase,138 +spiked belt,138 +sounan desuka?,138 +songchuan li,138 +son of droid (mechanical buddy universe),138 +shuffle (songdatiankong),138 +shiruko (food),138 +shiro maru,138 +shipu (gassyumaron),138 +shichouson,138 +september,138 +sand cat print,138 +sakuramarusan,138 +saigyouji yuyuko (cosplay),138 +rukako,138 +rock bison,138 +robin (dc),138 +ritsu (roboroboro),138 +retsuna,138 +rapid punches,138 +ramie (ramie541),138 +purple feathers,138 +pug,138 +president maa,138 +pinky to mouth,138 +philia (sao),138 +pectoral lift,138 +oze (xyz go go11),138 +otoutogimi,138 +ocha (popopogg),138 +noir,138 +nekoyama,138 +nama shirasu,138 +mr. koiwai,138 +moudoku (decopon3rd),138 +motivator,138 +morina nao,138 +moorina,138 +moonjunk,138 +mooncake,138 +mochida arisa,138 +mizuno keisuke,138 +miyabi (senran kagura),138 +miton (turuyasann),138 +misty lake,138 +miso (misomiso 154),138 +meiko (vocaloid3),138 +megastructure,138 +madana (xesa7885),138 +lucia (pangya),138 +luchador mask,138 +loaf of bread,138 +light in heart,138 +leilan (p&d),138 +kyoko (kunio-kun),138 +kyo-hei (kyouhei),138 +kuronezumi,138 +kumakichi (cost-lost),138 +kudou (ooabareteng),138 +kuase,138 +koto (colorcube),138 +komatsuzaki rui,138 +koikishi purely kiss,138 +kirin toroika,138 +king of unlucky,138 +kinakomoti,138 +kim possible,138 +kikimi,138 +kida kuro mu,138 +kerykeion,138 +kemono friends pavilion,138 +kasumi komo,138 +kamitsuki manmaru,138 +kakizaki megu,138 +kaitou jeanne,138 +ishida to asakura,138 +io takuya,138 +inzup,138 +inutose,138 +implied vibrator,138 +ichifuji nitaka,138 +huang gua,138 +houshou hanon,138 +hoshara,138 +hono,138 +holding mushroom,138 +holding coat,138 +hiya gohan,138 +hinoru saikusa,138 +himematsu school uniform,138 +hermmy,138 +heart no kuni no alice,138 +heart hands failure,138 +hana (fire emblem),138 +hakuleg,138 +gradient ribbon,138 +gouketsuji ichizoku,138 +gohei (aoi yuugure),138 +gingami (giluziru),138 +garterbelt (psg),138 +future card buddyfight,138 +futsuu no joshikousei ga locodol yattemita,138 +fur-trimmed vest,138 +fishnet socks,138 +fir3born,138 +fii-tan the figure,138 +faust (arknights),138 +fartooth (arknights),138 +etogami kazuya,138 +egypt,138 +ebitetsu,138 +distress,138 +dinner,138 +dhiea,138 +d.y.x.,138 +cecilia schariac,138 +calimero (honey come chatka!!),138 +bulges touching,138 +buddhism,138 +brown thoroughbred (kemono friends),138 +bougu,138 +black jack (character),138 +bikini bottom pull,138 +bashen chenyue,138 +banana slice,138 +ban hada,138 +balrog (street fighter),138 +azelf,138 +ayanakitori,138 +asuka ryou,138 +aruva,138 +arifureta shokugyou de sekai saikyou,138 +aqua hoodie,138 +any (trueblue),138 +annie (skullgirls),138 +anglerfish costume,138 +amanagi seiji,138 +adomi,138 +acchi kocchi,138 +2b-ge,138 +zihad,137 +zhuore zhi hen,137 +yuutarou (fukiiincho),137 +yuuka (o.t.kingdom),137 +yuri seo,137 +yofukashi,137 +yamuraiha,137 +yamask,137 +yamagami karuta,137 +yakult,137 +window fog,137 +white dog,137 +wasp,137 +warrior (final fantasy),137 +warlock (granblue fantasy),137 +videocassette,137 +vanillaware,137 +umamusume: cinderella gray,137 +twitch plays pokemon,137 +tsukamoto kensuke,137 +trish (devil may cry),137 +trevo (trevoshere),137 +tombow mono,137 +tokiwadai school swimsuit,137 +takeuchi ryousuke,137 +swapnote,137 +stag beetle,137 +sibata maru,137 +shiitake taishi,137 +sato imo,137 +sasahiro,137 +sally (luna-arts),137 +sajittari,137 +saikyou ginga ultimate zero ~battle spirits~,137 +sagae haruki,137 +rizento,137 +reppuu (kancolle),137 +removing mask,137 +reina (black spider),137 +range finder,137 +raiya atelier,137 +puka puka,137 +pram (phantom kingdom),137 +plaid capelet,137 +paneled background,137 +oversized flower,137 +osamu yagi,137 +nishimi shin,137 +neviril,137 +nazuka (mikkamisaki),137 +navy,137 +nattororo,137 +mutsu (azur lane),137 +morncolour,137 +monobe tsukuri,137 +monk (sekaiju),137 +mona lisa,137 +mofumofu channel,137 +miz 003,137 +misha arsellec lune,137 +minami (apricot tea),137 +mime jr.,137 +miko embrace,137 +miga (migao),137 +midorikawa ryuuji,137 +merry-san,137 +maya (azur lane),137 +mature (kof),137 +matsuda yasuke,137 +mao (6r),137 +manjirou (manji illust),137 +mahjong table,137 +m1 garand,137 +lexaiduer,137 +lazulight,137 +langley1000,137 +kurai nao,137 +kowiru,137 +konomori kanon,137 +kishimen,137 +kiki (majo no takkyuubin) (cosplay),137 +kibellin,137 +kenix,137 +kay yu,137 +katoroku,137 +kariya (mizore),137 +karakari,137 +kanzaki sumire,137 +kangaskhan,137 +kamisama ni natta hi,137 +kagura nana,137 +kagerou (shadowmage),137 +jyuru,137 +juusenki l-gaim,137 +jikkyou powerful pro yakyuu,137 +jeanne d'arc (azur lane),137 +is (kamen rider 01),137 +inia sestina,137 +in shopping cart,137 +ikkitousen dragon destiny,137 +ichikawa ryuunosuke,137 +hitmontop,137 +hasekura chiaki,137 +hanna england,137 +hakano shinshi,137 +gundam card builder,137 +gomashio ponz,137 +gokubuto mayuge,137 +glowing earrings,137 +fuyuzuki gato,137 +fuu fuu,137 +fungi (genshin impact),137 +fujisaka kimihiko,137 +fujimaru ritsuka (female) (tropical summer),137 +flipnote studio (medium),137 +firewatch (arknights),137 +fairey swordfish,137 +emaciated,137 +ema skye,137 +ebi (eeotoko),137 +dqn (dqnww),137 +dokuro-san,137 +daniel j. d'arby,137 +cyrus albright,137 +cup noodle,137 +crusch karsten,137 +crotch stomping,137 +courier (arknights),137 +cottonee,137 +commander shepard,137 +clima-tact,137 +cirima,137 +chipp zanuff,137 +cherry tree,137 +charlotte (fire emblem),137 +chai xianghua,137 +cancell,137 +bunny background,137 +breath of fire i,137 +blood spray,137 +bishi (bishi),137 +beppu mitsunaka,137 +battleship summer princess,137 +azuma (azuma10270724),137 +athena cykes,137 +athena (p&d),137 +asuka (junerabitts),137 +ariko youichi,137 +areola piercing,137 +apple bunny,137 +aoi sora (pairan),137 +ametama (runarunaruta5656),137 +amano maya,137 +alpha pokemon,137 +alina (girls und panzer),137 +ace of clubs,137 +z1 leberecht maass (azur lane),136 +yume kawaii,136 +yukimaru217,136 +yuki miku (2020),136 +whitem (whiteemperor2020),136 +white jabot,136 +wendi (nanoha),136 +wapokichi,136 +vertigris,136 +utx school uniform,136 +ursus empire logo,136 +united states army,136 +tsuzuri (tu-san house),136 +tsuki suigetsu,136 +tsubaki tsubaru,136 +tia-chan,136 +temari rin,136 +summoner (final fantasy),136 +sukkirito (rangusan),136 +sugar bowl,136 +sue (grandia),136 +stormtrooper,136 +spy (tf2),136 +spawnfoxy,136 +souya ichika,136 +sonjow4,136 +skiploom,136 +shy guy,136 +shishidou akiha,136 +shinonoko,136 +shigureteki,136 +sandshrew,136 +sakurasawa sumi,136 +sakura tsubame,136 +saki (ar tonelico),136 +sakenomi (cookie),136 +ryuu ga gotoku 0,136 +rednian,136 +raisa pottgen,136 +raccoon hood,136 +pyro (tf2),136 +pushing down,136 +porco galliard,136 +pokemon ranger,136 +pochadon,136 +plaid shawl,136 +plaid pillow,136 +piroshiki123,136 +pinsir,136 +parfaitlate,136 +ox (baallore),136 +otonari,136 +oozon (ozon),136 +okazaki takeshi,136 +octopus boy,136 +nyakonro (nekonro),136 +nuqura,136 +notora,136 +nikki (swapnote),136 +nihei tsutomu,136 +nene (sengoku musou),136 +nemu (nora),136 +natsuki yuu (amemizu),136 +nami makoto,136 +nagato yuki (cosplay),136 +mugen gakuen school uniform,136 +moth antennae,136 +mosaic,136 +mishiro (andante),136 +mia taylor,136 +merxkialis,136 +mappe (778exceed),136 +mami (apsaras),136 +macaw,136 +linreplica,136 +leysritt (fate),136 +lexington (cv-16) (warship girls r),136 +kumichou (kumichoubox),136 +kumamoto (bbtonhk2),136 +kubo tite,136 +koru koruno,136 +kissing nose,136 +kimura shigetaka,136 +key in head,136 +kayumidome,136 +kashiwaba tomoe,136 +karasawa-40,136 +kamen rider black rx (series),136 +kagome misaki,136 +kagerou days (vocaloid),136 +john (nakoto),136 +jet black,136 +inugami-ke no ichizoku pose,136 +inon,136 +ikeda masateru,136 +i-la,136 +holstein friesian cattle (kemono friends),136 +hiro hiroki,136 +harunoibuki,136 +hanetu,136 +hanasaki mahiru,136 +hajimete no gal,136 +gynoid talk,136 +gravure swimsuit (idolmaster),136 +grail,136 +georugu13,136 +gastrodon,136 +gasp,136 +game of thrones,136 +fujisaka lyric,136 +fang (arknights),136 +faicha,136 +exion (neon),136 +era (erasion),136 +energy tank,136 +ellin meiji,136 +ein (cowboy bebop),136 +eguchi sera,136 +eerr,136 +drifting,136 +dragon slayer ornstein,136 +donquixote rocinante,136 +demento,136 +dark jeanne,136 +dacchi,136 +cyborg 009,136 +cupless bikini,136 +creepy eyes,136 +creat,136 +cocoon (loveririn),136 +ciel sacred,136 +chin gero,136 +cherrim,136 +bunny ears prank,136 +blazblue: cross tag battle,136 +beast,136 +bao (vtuber),136 +ayatsuki sugure,136 +ayakase riberi,136 +aron,136 +archer (pokemon),136 +akabane youko,136 +agonasubi,136 +100th black market,136 +00 gundam,136 +zattape,135 +yveltal,135 +yuyu (yuyuworks),135 +yuuki sonisuke,135 +yoshimon,135 +yon (letter),135 +yari,135 +yamaneko (tkdrumsco),135 +yamane akira,135 +yamamoto akira,135 +yahiro (epicopeiidae),135 +xiaosan ye,135 +xiaosamiao,135 +wing censor,135 +white gakuran,135 +waist brooch,135 +viper (valorant),135 +ultraviolet light,135 +ukumo uichi,135 +tyako 089,135 +theta (ponkotsu works),135 +the king of fighters '97,135 +taiga takeru,135 +switzerland (hetalia),135 +sturmgeschutz iii,135 +smite,135 +small stellated dodecahedron,135 +shiromochi sakura,135 +shima (wansyon144),135 +serah farron,135 +satsuki (miicat),135 +sasaki fuuka,135 +saryuu (iriomote),135 +santana (jojo),135 +sankyaku tako,135 +sakuragi kei,135 +safety razor,135 +runa (artist),135 +rugo,135 +rin (blue archive),135 +reverse footjob,135 +rampart (apex legends),135 +qunqing,135 +qing wu,135 +potaaju,135 +pocketland,135 +pkpokopoko3,135 +pixiv fantasia revenge of the darkness,135 +pixel (yuxian),135 +piggy bank,135 +pic postcard,135 +pentagon (shape),135 +pauline (mario),135 +panzerkampfwagen panther,135 +panda tail,135 +palmon,135 +pain-lucky777,135 +ozaki (tsukiko3),135 +otter girl,135 +orochimaru (naruto),135 +oomori (kswmr),135 +on banana,135 +ofuda on nipples,135 +nyanmilla,135 +nyahu (nyahu 77),135 +nvl,135 +noumin,135 +notte,135 +natsuk,135 +naop (anything),135 +nanonin,135 +mzh,135 +mushroom (osh320),135 +mtyy,135 +mousse (ranma 1/2),135 +moketto,135 +mochi (mochi444420),135 +mix (aquarion),135 +misawa maho,135 +midoribox,135 +metal crab (arknights),135 +merii (mazohaha),135 +meimi k,135 +mega milk,135 +meat day,135 +matty (zuwzi),135 +maruti bitamin,135 +mamyouda,135 +madaraki fran,135 +mad max,135 +luca truelywaath,135 +locked slide,135 +li meiling,135 +lead white (tsurunoka),135 +lavelis,135 +lactating into cup,135 +la-na,135 +kyle broflovski,135 +kuroki tsutomu,135 +ksvk (girls' frontline),135 +kozume kenma,135 +koopa troopa,135 +kokono coco,135 +kohshibasaki,135 +kizaki (volvox),135 +kiriyama,135 +kipi-san,135 +kin niku,135 +kildir,135 +kawaraya a-ta,135 +kaninn,135 +junny,135 +jeto (jetopyon),135 +itsumizu,135 +ink stamp,135 +imaishi hiroyuki,135 +im (badmasa),135 +holding duster,135 +holding chalk,135 +hiiro (alchemy stars),135 +hibi89,135 +haseo (.hack//),135 +hase neet,135 +harumaki,135 +haru (tsuritama),135 +harau,135 +haitekudasai takamine-san,135 +guncannon,135 +godzilla: king of the monsters,135 +god gundam,135 +glowing hands,135 +ginzuki ringo,135 +ghost earrings,135 +ge xi,135 +gallon,135 +fujita (condor),135 +frogsnake,135 +following,135 +finger cots,135 +feower (granblue fantasy),135 +feipin zhanshi,135 +eldegoss,135 +digdug006,135 +cure scarlet,135 +cresselia,135 +covered piercing,135 +corona timir,135 +cocoperino,135 +clivenzu,135 +chikuwa (yurucamp),135 +cherico,135 +chaldea logo,135 +chainsword,135 +cd player,135 +castanets,135 +carry me,135 +bren lmg,135 +beerus,135 +bebinn,135 +batgirl,135 +barbie (franchise),135 +azuma lim,135 +azami (kagerou project),135 +asususususu,135 +astg,135 +ashita no nadja,135 +asatani tomoyo,135 +asagao minoru,135 +armored aircraft carrier oni,135 +arm pouch,135 +aria (sister princess),135 +american flag pants,135 +amelie planchard,135 +amagi shino,135 +altar,135 +alice girls shiny heart,135 +akanesanzou,135 +akame (akame ga kill!),135 +akagi (sk0127aho),135 +air jordan 1,135 +aina ardebit,135 +absolute duo,135 +a hat in time,135 +zombie-andy,134 +yuubararin,134 +yurizaki mira,134 +yukinoshita haruno,134 +yukihiro ayaka,134 +yuki miku (2010),134 +yizhirenben,134 +yellow tongue,134 +yamato damashi,134 +yamashiro (summer offensive?) (azur lane),134 +xian jian qi xia zhuan (series),134 +wuwusan,134 +winda priestess of gusto,134 +unlovely (happinesscharge precure!),134 +ukai saki,134 +uchisaki himari,134 +tsukimi (xiaohuasan),134 +tsuchinoko,134 +tropical fish,134 +tonari (ichinichime azuma),134 +togo (korlsj1235),134 +the promised time: silks & three riddles (umamusume),134 +tamura yukari,134 +takurowo,134 +takanashi touka,134 +takamine takane,134 +sumeragi tomo,134 +squirting liquid,134 +spencer sais,134 +sp0i0ppp,134 +sophie (toast of the town) (tales),134 +sonya (fire emblem),134 +solomon (fate),134 +sims (azur lane),134 +shitou (4h),134 +shiromikan,134 +shinma x keishou! ragnabreak,134 +shikido (khf),134 +shapoco,134 +senpai-san (douki-chan),134 +samsara (shuukeura),134 +sakuraba aoi,134 +sakura ichiko,134 +rogue (ragnarok online),134 +richou (zerozero1101),134 +rezodwel,134 +reunion soldier (arknights),134 +renshu usodayo,134 +reminiscence,134 +r daneel olivaw,134 +purah,134 +polygamy,134 +polka dot shorts,134 +pinstripe skirt,134 +pemu,134 +pawn (dragon's dogma),134 +patamon,134 +paralyzer,134 +pandora (p&d),134 +paladin (sekaiju),134 +pajamas challenge (meme),134 +overpass,134 +orga itsuka,134 +oosaki minato,134 +oomasa teikoku,134 +nyazui,134 +nyatrix,134 +nisetanaka,134 +nigelungdayo,134 +namusanbou,134 +nae (rno),134 +morinaga kobato,134 +mizushima asa,134 +mishiro shinza,134 +minahoshi taichi,134 +mg34,134 +megami magazine deluxe,134 +matsumoto yoriko,134 +marshtomp,134 +mao (darker than black),134 +mandragora,134 +magical halloween,134 +maetel,134 +machop,134 +lwmmg (girls' frontline),134 +looking at hands,134 +lillipup,134 +lie ren,134 +li xingke,134 +leavanny,134 +larvitar,134 +l-elf,134 +kuronohana,134 +kuroimori,134 +kuroda (nanchara flight),134 +kurahana chinatsu,134 +kurage1,134 +koubou (cowbow kun),134 +kotteri,134 +kosuke haruhito,134 +korisei,134 +kohanayuki,134 +kkuem,134 +kinako (marubotan),134 +key (gaigaigai123),134 +kerorokjy,134 +kazuhira miller,134 +kayanogura,134 +kanopan,134 +kanata (kanade pa),134 +kabayaki namazu,134 +jynx,134 +justice league,134 +joukamachi no dandelion,134 +jonya,134 +jnt,134 +itou hachi,134 +ishida umi,134 +irisviel von einzbern (caster),134 +inyuppo,134 +inner moka,134 +ina zuma,134 +ikeda yasuhiro,134 +ichiryuu tsumiki,134 +hyonee,134 +hu sea,134 +houlen yabusame,134 +honeypot,134 +homaderi,134 +holox,134 +hiyo kiki,134 +hiro yoshinaka,134 +hidan (naruto),134 +hapymaher,134 +han'you no yashahime,134 +hamahama,134 +haiyi,134 +guilty gear 2,134 +golbeza,134 +gins,134 +ghost in the shell lineup,134 +genryuusai maki,134 +fuyuichi,134 +fuutou shizune,134 +fuju,134 +fujirin,134 +from argonavis,134 +floating castle,134 +fiz (fizintine),134 +festa11307070,134 +fei fakkuma,134 +f-22 raptor,134 +exhaust pipe,134 +exeggutor,134 +ethel (xenoblade),134 +eria the water charmer,134 +erebus (azur lane),134 +epic armageddon,134 +enterprise (warship girls r),134 +elysium (arknights),134 +drake (azur lane),134 +dolechan,134 +digivice,134 +dewprism,134 +comiket 96,134 +clear glass (mildmild1311),134 +cero (last2stage),134 +centiskorch,134 +caroline (persona 5),134 +cardboard box gundam,134 +c (neta),134 +buchikaki,134 +borderlands 2,134 +bibi (love live!),134 +benimura karu,134 +bayonetta 3,134 +apron pull,134 +annie mei project,134 +anna williams,134 +animal-themed umbrella,134 +anetai toyone,134 +ame yamori,134 +ame-san,134 +216,134 +zunta,133 +zigzagoon,133 +zero kara hajimeru mahou no sho,133 +zenyatta (overwatch),133 +zawawa (satoukibi1108),133 +zakirsiz,133 +yuyi,133 +yupiteru,133 +you can eat the girl,133 +yoshikawa hideaki,133 +yk (pixiv43531291),133 +yappa muri,133 +webclap,133 +watchmen,133 +warzard,133 +wakamatsu hirotaka,133 +umesato middle school uniform,133 +tsuki ni yorisou otome no sahou,133 +tousaki umiko,133 +torotei,133 +toriseru (thesuperhero),133 +too many chicks,133 +tokonome mamori,133 +toaru kagaku no railgun s,133 +tenseiani,133 +tenoo12,133 +tear ring saga,133 +tanuki yousuke,133 +tanakara,133 +takanaga kouhei,133 +takamori haruka,133 +takahara ayumi,133 +taka (taka wo kukuru),133 +taka-kun,133 +surumeri (baneiro),133 +sukumo (kemutai),133 +suda ayaka,133 +starheart,133 +spray,133 +shopyun,133 +shiva (final fantasy),133 +shion (mirudakemann),133 +shinjin-chan (douki-chan),133 +shindou kei (ef),133 +shiburingaru,133 +satoimo chika,133 +sasanoneko,133 +sasanon (sasapoliton),133 +sasakuma kyouta,133 +sara valestein,133 +rynzfrancis,133 +rumia tingel,133 +rko (a470350510),133 +ribero,133 +raru (nanaharararu),133 +rai (sakuranbo sugar),133 +priest (dungeon and fighter),133 +pretty rhythm aurora dream,133 +poliwhirl,133 +pnatsu,133 +peroncho,133 +panzuban,133 +ouran high school uniform,133 +othel (hatimorris),133 +orange bandana,133 +object in panties,133 +nyakelap,133 +nut (hardware),133 +neonfloyd,133 +neckwear request,133 +natsukawa sarasa,133 +narae,133 +nanjou asuka,133 +nanakusa amane,133 +najuco (naju0517),133 +nagara (azur lane),133 +muzzle brake,133 +mugen ouka,133 +morisshii (morishiey),133 +momoe maria,133 +mizuno,133 +miyamae porin,133 +minyom,133 +minamiya mia,133 +mikao (eanv5385),133 +midorino eni,133 +mega man 11,133 +matsuura nanase,133 +matsuno canel,133 +mato spectoru,133 +maruyaa (malya1006),133 +mark gavatino,133 +manuela casagranda,133 +maenchu,133 +maaru (akira428),133 +lucina (spring) (fire emblem),133 +lino-lin,133 +lethe (fire emblem),133 +ky.,133 +kuon yashiro,133 +ktsecond,133 +kosai takayuki,133 +kokeshi (yoi no myoujou),133 +kobo (cobo 0609),133 +kobayakawa sena,133 +kirby super star,133 +kincora,133 +kazuchi,133 +kataokasan,133 +kamiki sekai,133 +kama (swimsuit avenger) (second ascension) (fate),133 +kahadu (kawazu),133 +juni (street fighter),133 +jt dong-agyoku,133 +john doe shinobu,133 +jellicent,133 +jekyll and hyde (fate),133 +jazztaki,133 +izuru (timbermetal),133 +izumi masamune,133 +itou ryuusei,133 +incise soul,133 +incense burner,133 +ichinose tokiya,133 +ichiban ushiro no daimaou,133 +hyoudou michiru,133 +hunterkay,133 +hot limit,133 +honoka chiffon,133 +hongbaise raw,133 +holding wallet,133 +holding stomach,133 +hoimi slime,133 +hizuki aya,133 +hinoe (dd works),133 +hino hino,133 +hilt,133 +hibiscus print,133 +heartbeat (module),133 +haru (konomi 150),133 +hara shoutarou,133 +h&k mp5k,133 +green apple,133 +gozu farm,133 +gouf,133 +giroro,133 +girl with a blonde braid (tomoshibi hidekazu),133 +game boy color,133 +fuurin asumi,133 +funitarefu,133 +fumei (mugendai),133 +fujimaru ritsuka (female) (chaldea combat uniform),133 +foreign blue,133 +firis mistlud,133 +final fantasy xiii-2,133 +fare gate,133 +fakir (princess tutu),133 +faiz azhar,133 +esaka,133 +eriyama e,133 +ek masato,133 +egnigem cenia,133 +ebata risa,133 +doughnut innertube,133 +din (flypaper),133 +darkest dungeon,133 +cure custard,133 +chitetan,133 +chi-hatan (emblem),133 +char's counterattack - beltorchika's children,133 +cargo net,133 +call of duty: modern warfare 2,133 +blanche (pokemon),133 +black cellien (kemono friends),133 +barawa,133 +balancing on head,133 +ayyh,133 +aya roushi,133 +aya (lezon),133 +asakura noi,133 +aramaru,133 +arakure,133 +ankh necklace,133 +anchorage oni,133 +amagaeru (amapippi052525),133 +akuno hideo,133 +akira miku ver,133 +airplane wing,133 +abra,133 +221 (tsutsuichi),133 +.hack//games,133 +zabel zarock,132 +yutarou,132 +yorra villeneuve,132 +yomikawa aiho,132 +yamashiro kai ni (kancolle),132 +yamaha tsui,132 +yakka,132 +xiaomu,132 +xiao chichi,132 +weno's blonde original character,132 +wasabi,132 +vhs artifacts,132 +venus flytrap,132 +utakata (azaka00),132 +unya (unya-unya),132 +unibrow,132 +ueauwa,132 +uchiuchi keyaki,132 +turbulence,132 +tribadism through clothes,132 +transparent border,132 +toron (mino106),132 +timeskip,132 +thigh bow,132 +tatata,132 +tashkent (the bound cruiser) (azur lane),132 +taruhi,132 +tamamo (mon-musu quest!),132 +tamade chiyu,132 +suzuki rion,132 +sunkern,132 +sticky (stickysheep),132 +star balloon,132 +snowflake ornament,132 +single-shoulder shirt,132 +siddham,132 +sidarim,132 +shiraori,132 +shino hajime,132 +shindol,132 +shanoa,132 +serving,132 +senjou no electro girl,132 +sema (vivaviva 02),132 +sebu illust,132 +scrafty,132 +scary sex,132 +sawaya (mizukazu),132 +satsuma age,132 +saint tail,132 +ryuntack,132 +rose neru,132 +robina,132 +ralsei,132 +raidon,132 +purple pajamas,132 +project.c.k.,132 +portuguese text,132 +ponpon (tosico),132 +pixiv fantasia 2,132 +pink seito,132 +pedestal,132 +paper cutout,132 +paladins,132 +otomore (shashaki),132 +orel cruise,132 +okita souji alter (swimsuit saber) (fate),132 +okamura nao,132 +okajima rokuro,132 +ochiai miyabi,132 +noblesse (elsword),132 +nako (nonrain),132 +nachi kai ni (kancolle),132 +murasame (senren),132 +multiple necklaces,132 +mrkg (arsfatuus),132 +mizukami ryouma,132 +miyu (matsunohara),132 +misaka kaori,132 +milk churn,132 +merry (diameri),132 +meal,132 +maruzensky (blasting off summer night) (umamusume),132 +maki ikazuya,132 +machine (nier),132 +lrl (last origin),132 +le malin (mercredi at the secret base) (azur lane),132 +kurohara yuu,132 +kurkoboltsi,132 +kujaku mai,132 +koza game,132 +kiseijuu,132 +kisei2,132 +kirino souya,132 +kimi no kokoro wa kagayaiteru kai?,132 +kim kwang hyun,132 +killer t (hataraku saibou),132 +katoryu gotoku,132 +karuta (karuta01),132 +karen (sister princess),132 +kanokoga,132 +kanameya,132 +kamiki izumo,132 +kakifly,132 +kaimuari,132 +jurassic park,132 +izanami (persona),132 +ittoki otoya,132 +improvised umbrella,132 +ibaraki kasen (cosplay),132 +houdukixx,132 +himekawa (shashaki),132 +hiita (yu-gi-oh!),132 +hekoningyou (waraningyou),132 +hecha (01964237),132 +haccan,132 +gyuunyuu nomio,132 +gu luco,132 +gramophone miku,132 +fuurinkan high school uniform,132 +frilled footwear,132 +fraux,132 +faust (guilty gear),132 +ezaki papiko,132 +enraku tsubakura,132 +elephant ears,132 +edward geraldine,132 +dynamax,132 +dogpile,132 +deilan12,132 +deep (deep4946),132 +custom maid 3d 2,132 +cp00,132 +comic tenma,132 +clothes rack,132 +chiyomaru (yumichiyo0606),132 +chicke iii,132 +chiachun0621,132 +cheryl (pokemon),132 +checkered ribbon,132 +censored gesture,132 +castlevania: order of ecclesia,132 +carl clover,132 +card holster,132 +caption,132 +calling,132 +cafe stella to shinigami no chou,132 +bookstore,132 +blender,132 +bend,132 +azuma hazuki,132 +aston machan (umamusume),132 +arnold-s,132 +archlich,132 +arata iri,132 +alia's carnival!,132 +akirannu,132 +aiba uiha,132 +ahagon umiko,132 +admiral graf spee (peaceful daily life) (azur lane),132 +7zu7,132 +yuzuki yukari (shizuku),131 +yui.h,131 +youshu ohepe,131 +yourfreakyneighbourh,131 +xanax025,131 +wurmple,131 +wa2000 (date in the snow) (girls' frontline),131 +violetshit,131 +vatista,131 +type 97 chi-ha,131 +towel on one shoulder,131 +tongue tattoo,131 +tomiwo,131 +tom nook (animal crossing),131 +the hunchback of notre dame,131 +tenkuu no escaflowne,131 +tanino gimlet (umamusume),131 +talim,131 +takigawa yuu,131 +takenoko no sato (food),131 +taira yuuki,131 +tacco (tikeworld),131 +tabuchi (tabuchisanchi),131 +suou amane,131 +stuffed dragon,131 +sprue,131 +slowbro,131 +shizuka joestar,131 +shin megami tensei iv,131 +shikniful,131 +shibakame,131 +shaft,131 +sequins,131 +sekishiro mico,131 +saya (blue archive),131 +sawai natsuha,131 +sanuki (kyoudashya),131 +rio wezley,131 +rapeseed blossoms,131 +rance 10,131 +raigou,131 +q (control),131 +pump (pumpqmuq),131 +prophecy (rakuichimonji),131 +prisma illya (cosplay),131 +prehistoric animal,131 +potemayo,131 +potekite,131 +ponta (velmar),131 +pinoko (pnk623),131 +persian,131 +paru paru,131 +otomeza ryuseigun,131 +orimiya mai,131 +onimaru gonpei,131 +ominous shadow,131 +oinari-sama (kemono friends),131 +o-ring dress,131 +ngayope,131 +neyuki rei,131 +nestkeeper,131 +natsuki (gedo),131 +natsu (sinker8c),131 +na kyo,131 +n-mix,131 +music s.t.a.r.t!!,131 +motsuaki,131 +moko (moko/moko),131 +mocha (naturefour),131 +mizuno sao,131 +mizu (lzzrwi603),131 +miemia,131 +midori (310ri 21),131 +mejiro haruhiko,131 +matsusaka gyuu,131 +matou byakuya,131 +masaki (celesta),131 +martha (swimsuit ruler) (first ascension) (fate),131 +maria (fire emblem),131 +luna (reclaimed land),131 +lobo (fate),131 +laevatein (fire emblem),131 +lace collar,131 +kz nagomiya,131 +kurumi (recycllamo),131 +kuroshiro (ms-2420),131 +kujou kazuya,131 +kufei,131 +kubota masaki,131 +krenz,131 +kou futoshi,131 +kongari tokei,131 +kogasa-san's father,131 +kiryuu michiru,131 +kawasaki tadataka,131 +kawanuma uotsuri,131 +kasuga souichi,131 +kamen rider gaim,131 +justin leyva (steamy tomato),131 +ja'far (magi),131 +itsumi (itumiyuo),131 +irono (irtyu),131 +inubouzaki itsuki,131 +inoue seita,131 +ilya kuvshinov (style),131 +ikuya koimori,131 +ichikawa kyoutarou,131 +ichidai taisa,131 +hoyon,131 +holding bow (ornament),131 +hiryuu kai ni (kancolle),131 +himedanshi,131 +hijouguti,131 +hibiki mio,131 +hentai (1985),131 +helloimtea,131 +hayase misa,131 +hashiri nio,131 +graviton beam emitter,131 +gram 9,131 +giryu,131 +funakura,131 +fukujima kiwi,131 +fujioka,131 +flamel symbol,131 +fionn mac cumhaill (fate/grand order),131 +ferrari,131 +eyes of grima,131 +emelie (cyancapsule),131 +elsevilla,131 +elbowing,131 +edomae lunar,131 +dynamo roller (splatoon),131 +doumou,131 +dizi930,131 +deviljho,131 +detective pikachu (character),131 +death flag,131 +dartboard,131 +comic megastore,131 +cocked hammer,131 +cleft chin,131 +chiwa (chiwa0617),131 +chiimako,131 +chihiro (blue archive),131 +cheetah (kemono friends),131 +bondson,131 +bojji,131 +black cat (series),131 +bf 109,131 +berukko,131 +beiyi,131 +bayleef,131 +avery (pokemon),131 +atom,131 +ashino,131 +arsene (persona 5),131 +arrow (jojo),131 +anbutter siruko,131 +amasaki yusuke,131 +amakara surume,131 +aliceblue,131 +akiyama cz4a,131 +akai ringo (ookami-san),131 +against table,131 +afterglow (bang dream!),131 +admiral graf spee (girl's sunday) (azur lane),131 +abaratani kurou,131 +zepar,130 +yotsuboshi-imai,130 +whismur,130 +watanabe kanako,130 +vulcanus (disgaea 4),130 +ukan muri,130 +type 99 dive bomber,130 +twilight (go! princess precure),130 +tunamayo (dsasd751),130 +tribute (tributism),130 +too many rabbits,130 +tokyo ravens,130 +togetic,130 +terror (azur lane),130 +tenzou crossunite,130 +taowu (20809),130 +takeuchi aya,130 +take (draghignazzo),130 +takatsu keita,130 +sweater removed,130 +superhero landing,130 +super orion (fate),130 +sun-3,130 +slav squatting,130 +shuu-0208,130 +shisshou senkoku,130 +shirosu,130 +shinmon akika,130 +shinjou hinaki,130 +shikou sakugo (qqap9gt9k),130 +shanks (one piece),130 +shaka (staito0515),130 +shadow (shadows house),130 +seseren,130 +senyoku no sigrdrifa,130 +sayousuke,130 +saren (christmas) (princess connect!),130 +sana!rpg,130 +samurai (final fantasy),130 +sakofu,130 +sakaki shizuka,130 +ryuno,130 +rumoon,130 +rui (dream c club),130 +rourou ill,130 +roang,130 +reihou19,130 +puti devil,130 +poko chin,130 +plachta,130 +pipidan,130 +pink cloak,130 +pineapple slice,130 +pie slice,130 +paul bunyan (third ascension) (fate),130 +ohoho,130 +ochazuke,130 +noire (fire emblem),130 +nito (nshtntr),130 +nikka (cryptomeria),130 +nemophila (flower),130 +mushiking,130 +multicolored buttons,130 +mpien,130 +motono (sakamotono),130 +mokuzou (moku ssbu),130 +menea,130 +melissabelle,130 +maruku,130 +maria campbell,130 +mai (dragon ball),130 +lafiel,130 +kurowa (curowa),130 +kuri choko,130 +kurarin,130 +kouenji sayuka,130 +koshimizu ami,130 +konohana lucia,130 +kono yuusha ga ore tueee kuse ni shinchou sugiru,130 +kokouno oyazi,130 +koborii (amaburi),130 +kitazawa (embers),130 +kirigami,130 +keshin (inazuma eleven),130 +katsushika hokusai (third ascension) (fate),130 +karin bluez3,130 +kami-sama no memo-chou,130 +kamatama,130 +kaleina (ricegnat),130 +kakaon,130 +kaizeru,130 +kaede (blue archive),130 +joman,130 +javelin (spear),130 +iwanori,130 +itachi kanade,130 +isla (plastic memories),130 +ikoku meiro no croisee,130 +ikazuchi no senshi raidy,130 +ichi-jirushi,130 +holding trident,130 +holding skateboard,130 +henriiku (ahemaru),130 +he-class light cruiser,130 +haraegushi,130 +hanikami kanojo,130 +hagetapo,130 +hachikuji,130 +guvava,130 +gond,130 +gohpot,130 +glass teapot,130 +fu hua (shadow knight),130 +frilled babydoll,130 +freyja (fire emblem),130 +floating screen,130 +flak jacket,130 +familiar (madoka magica),130 +embroidered bra,130 +ekubo (mob psycho 100),130 +ehart,130 +dolri,130 +denonbu,130 +denden,130 +day and night,130 +crying aqua (meme),130 +conto,130 +comiket 93,130 +combee,130 +caustic (apex legends),130 +caucasian,130 +cap'n cuttlefish,130 +baselard,130 +asterozoa,130 +armored core: for answer,130 +archer (disgaea),130 +arablue,130 +appleq,130 +aohane,130 +anchor hat ornament,130 +amitie (puyopuyo),130 +alcremie (vanilla cream),130 +akiba hideki,130 +aki (akisora hiyori),130 +aiko (renkin san-kyuu magical pokaan),130 +aikawa chiho,130 +a-soul,130 +;|,130 +7 calpis 7,130 +yuuma (u-ma),129 +yumibakama meme,129 +yumi sayaka,129 +yumekamaborosh,129 +yukiyanagi raki,129 +yoshizoe eiko,129 +yoshimoto (carpsukidayo),129 +yakumo nanahara,129 +xiao rui rui,129 +xiangtu,129 +wristlet,129 +working,129 +weeds,129 +wakataku,129 +uhyoko,129 +tsunsuki (naobe009),129 +torn sweater,129 +toriny,129 +tony kuusisto,129 +the great ace attorney: adventures,129 +the children,129 +texas the omertosa (arknights),129 +terra battle,129 +tenhiro naoto,129 +teitan high school uniform,129 +taniguchi,129 +sorono,129 +snowcie,129 +sleeve rolled up,129 +simulated facial,129 +shiozaki yuji,129 +shion (no.6),129 +shimizu pem,129 +shibata masahiro,129 +sharpffffff,129 +shamisen (syami sen),129 +setanta (fate),129 +self-mutilation,129 +seiken gakuin no maken tsukai,129 +scorpion girl,129 +satou aji,129 +sakamuke,129 +sakai eri (illustratoreri),129 +sajou ayaka,129 +sabakuomoto,129 +romero (zombie land saga),129 +rodney (melagal),129 +rock howard,129 +regigigas,129 +reala (tales),129 +re:zero kara hajimeru isekai seikatsu: lost in memories,129 +qiongsheng,129 +qingmingtongzi,129 +pushcart,129 +porygon-z,129 +portal 2,129 +plains zebra (kemono friends),129 +pizza delivery,129 +pie in face,129 +palette knife,129 +padded jacket,129 +orfevre (umamusume),129 +oretachi ni tsubasa wa nai,129 +ohyo,129 +oerba yun fang,129 +norasame (dagako),129 +noelle (kfc) (genshin impact),129 +nobamo pieruda,129 +nns146,129 +niku (ni23ku),129 +nephthys2356,129 +natsu narumi,129 +mumumu (road),129 +multicolored cloak,129 +muku (apupop),129 +moriya suwako (cosplay),129 +moe2016,129 +mochimochi (xseynao),129 +mk-5,129 +mikuni mizuki,129 +meshimase idol,129 +meen (ouaughikepdvrsf),129 +marley military uniform,129 +manectric,129 +mamonomusume to no seikatsu ~ramia no baai~,129 +mall,129 +makun dx,129 +maki (blue archive),129 +m (neteitai10),129 +m60,129 +lunch (lunchicken),129 +leonardo watch,129 +lan mao akko,129 +lamia hygieia,129 +kyoku tou,129 +kurofude anna,129 +kuroda kunika,129 +kuramoto takato,129 +kumaori jun,129 +kumano kai ni (kancolle),129 +kukimaru,129 +kouba,129 +koguma (super cub),129 +kobayashi yoshitaka,129 +kikou souseiki mospeada,129 +katase minami,129 +kasuga yuuki,129 +karyl (new year) (princess connect!),129 +kaneda shoutarou,129 +kanade (kanadeya),129 +kagehito,129 +kagami toufu,129 +kaerunoashi,129 +jubei (blazblue),129 +joey jones,129 +jilpoong17,129 +iseya shiki,129 +irokari,129 +inumoto,129 +inui nagi,129 +inkblot,129 +hun,129 +holding finger,129 +hinata yuu (atelierhinata),129 +hentai kamen,129 +heiyan shenshi,129 +hayashio (kancolle),129 +haruna (kore wa zombie desu ka?),129 +harino646,129 +hanada kirame,129 +hack (apex legends),129 +h2o footprints in the sand,129 +gyosone,129 +gullinbursti (housamo),129 +gouma reifuden izuna,129 +genshi,129 +gangster,129 +furfur,129 +furfrou,129 +fred04142,129 +foot massage,129 +exercise machine,129 +etou kanami,129 +emden (azur lane),129 +electrodes,129 +diola (granblue fantasy),129 +devilot de deathsatan ix,129 +dddoochi1,129 +daisi gi,129 +courtroom,129 +cocoa (cafe-hotcocoa),129 +christmas tree costume,129 +chisato (missing park),129 +chimachi,129 +chihuahua,129 +capybara-san,129 +can to cheek,129 +caiman (dorohedoro),129 +bouncing hair,129 +boko (girls und panzer) (cosplay),129 +billy herrington,129 +bilibili xiaolu,129 +beru,129 +berserker (granblue fantasy),129 +beige scarf,129 +bedside,129 +barbatos (genshin impact),129 +aurica nestmile,129 +applepie (12711019),129 +amemiya sekira,129 +amamiya yuuko,129 +akuma,129 +akine (kuroyuri),129 +akatsuki hiro,129 +agatsuma kaede,129 +aang,129 +7001,129 +yuya (oshidori),128 +yuuki shin,128 +yuugenmagan,128 +yuu (yumezakura),128 +yuu201023,128 +yukifuri tsuyu,128 +yone,128 +yocchi (tenyunkel),128 +yaya (machine-doll),128 +yamada michiru,128 +winged fusiliers,128 +wing hug,128 +will (pokemon),128 +wavy eyebrows,128 +warp star,128 +wang yuanji,128 +wa2000 (op. manta ray) (girls' frontline),128 +valis,128 +uosaasou,128 +umbrella riding,128 +tuskryo,128 +tube socks,128 +tsushi,128 +triage x,128 +togame momoko,128 +toaster,128 +tk31,128 +tiffa adill,128 +thresh (league of legends),128 +teri terio,128 +tenshi no 3p!,128 +tea (cafe-chan to break time),128 +tataki tuna,128 +tamamo no mae (fate/extra) (cosplay),128 +tamakorogashi,128 +tama yu,128 +talon (league of legends),128 +taiko no tatsujin,128 +suzuki rika,128 +stuffed seal,128 +storage room,128 +sorato (astllatte),128 +snowsant (arknights),128 +skirt under kimono,128 +simulated paizuri,128 +sigurd (fire emblem),128 +shoulder-to-shoulder,128 +shipping container,128 +shiory,128 +shining star,128 +shingyoku (female),128 +sewenan,128 +seirei tsukai no blade dance,128 +sebastian castellanos,128 +sd gundam g-generation,128 +sbgu,128 +sasabunecafe,128 +sakusakusakurai,128 +sakura hanatsumi,128 +sa9no,128 +roberta (arknights),128 +ritateo,128 +risky boots,128 +rei (princess connect!),128 +recording studio,128 +raven (honkai impact),128 +rance (dokidoki! precure),128 +rabiiandrain,128 +print coat,128 +precure all stars new stage: mirai no tomodachi,128 +poriuretan,128 +pool party (league of legends),128 +polka dot bowtie,128 +pleated sleeves,128 +pillow sex,128 +people's liberation army,128 +panda print,128 +nopetroto,128 +nitro (mugityaoisii),128 +nishiya futoshi,128 +nichibotsu (kitaziman),128 +nameless (rynono09),128 +nakasawa kei,128 +mushroom girl,128 +motobi (mtb umk),128 +morita (moritania),128 +mitsumine raimu,128 +miria marigold mackenzie,128 +mimic chest,128 +mia0309,128 +mercedes-benz,128 +matsushita makako,128 +master nemesis,128 +maroonabyss,128 +mari (gym uniform) (blue archive),128 +malasada,128 +maho (princess connect!),128 +machiko ryou,128 +lupusregina beta,128 +luciana mazzei,128 +lif (lif & ref),128 +leaf98k,128 +layer (mega man),128 +latale,128 +kuzu (miduhana),128 +kurusu tatsuya,128 +kurousagi (mondaiji),128 +kurama norihito,128 +kujuu shikuro,128 +kouga (mutsumi),128 +kotaka,128 +kos-mos re:,128 +kori (trouble spirit),128 +koi ni naritai aquarium,128 +kochikame,128 +kinketsu,128 +kieed,128 +kemuri haku,128 +kawai rou,128 +kanro ame (ameko),128 +kaneblob,128 +kagaya you,128 +kadan (ad1999),128 +kabutoyama,128 +juli (street fighter),128 +john su,128 +jiao mao,128 +j adsen,128 +inniyik,128 +indra (arknights),128 +ikamagu,128 +igusaharu,128 +iga tomoteru,128 +ichijou hitoshi,128 +hotel01,128 +hosshiwa,128 +hisakawa riho,128 +helel ben shalem,128 +harle (chrono cross),128 +half-timbered,128 +h.n.elly (kirsten),128 +gunner (final fantasy),128 +gregor (tsurunoka),128 +go-it,128 +giraffe tail,128 +gedou (shigure seishin),128 +gecchu,128 +gear hair ornament,128 +galactic empire,128 +gachon jirou,128 +finger to head,128 +ferris wheel interior,128 +fallout 3,128 +fa yuiry,128 +examination,128 +eustace (granblue fantasy),128 +escalayer,128 +drumming,128 +dragon (arms),128 +doora (dora0913),128 +docozi,128 +deg (deguana),128 +danjou sora,128 +dab (dance),128 +cutie panther,128 +crowded,128 +corrugated galvanised iron sheet,128 +convention,128 +collision,128 +chungu,128 +chris4708,128 +charybdis (azur lane),128 +chane laforet,128 +cawang,128 +bumping,128 +brazilian flag,128 +boukun habanero,128 +bob-omb,128 +blazblue: continuum shift,128 +black mutou,128 +bitter crown,128 +beenic,128 +beastars,128 +bead choker,128 +bastet (houtengeki),128 +baron (nsbaron),128 +baozishark,128 +bamboo print,128 +azure meraki,128 +azure (armor),128 +azna,128 +aymr (fire emblem),128 +are you my master,128 +aoi (buzhuen444),128 +anju (mocomocousagi),128 +anakin skywalker,128 +amimi,128 +amagi (wending waters serene lotus) (azur lane),128 +african rock python (kemono friends),128 +40 (0f0urw),128 +zhuxiao517,127 +yuuzuki hijiri,127 +yuumin,127 +yuuka seisen,127 +yuniiho,127 +yoruhachi,127 +yoriyuki chiyo,127 +yom (ymayma00ss),127 +xerneas,127 +wreck-it ralph,127 +wowoguni,127 +world masterpiece theater,127 +viola (majo no ie),127 +uxie,127 +ultra instinct,127 +uiroutsuji yumihiko,127 +tweet,127 +touryou,127 +toudou erena,127 +tooth gap,127 +toaruocha,127 +templus,127 +tamayura,127 +tamamo no mae (swimsuit lancer) (first ascension) (fate),127 +tamaki ui,127 +takemikazuchi (muvluv),127 +takamiya ren,127 +suwakana,127 +sugi87,127 +sudowoodo,127 +sucking,127 +studiolg,127 +street fighter ex (series),127 +stomach day,127 +star guardian ahri,127 +star bracelet,127 +space shuttle,127 +solrock,127 +so-taro,127 +skorpion vz. 61,127 +shinozuki takumi,127 +shenqi (toid310),127 +sharekoube,127 +shadowgrave,127 +seura isago,127 +sethxzoe,127 +senbon tsuki,127 +sc naru,127 +satou (3366 s),127 +samba,127 +sakura akari,127 +sakuma rinko,127 +sakagami ayumi,127 +sadaharu,127 +s-head commander,127 +ryuubi gentoku,127 +rumwik,127 +rulue (puyopuyo),127 +rukino saki,127 +roshin,127 +robotech,127 +riftgarret,127 +ribbon-trimmed apron,127 +republic of china flag,127 +redol,127 +rathalos (armor),127 +ran system,127 +rakuyo (bloodborne),127 +ragecndy,127 +puffer fish,127 +profnote,127 +printer,127 +premature ejaculation,127 +poko (mammypoko),127 +pokemon center,127 +po fu,127 +pengy (granblue fantasy),127 +paul (pokemon),127 +parasite,127 +page tear,127 +otsukemono,127 +open labcoat,127 +ootsuka mahiro,127 +ootori chihaya,127 +onimaru (onimal7802),127 +okuyama saori,127 +octodeco,127 +nogami takeshi,127 +nino (sunaba suzume),127 +nezha (fate),127 +nekoi hikaru,127 +nanashi maru,127 +nakaba (mode),127 +mousse (arknights),127 +mono land,127 +mono (shadow of the colossus),127 +mon mon,127 +momoki run,127 +mm (yoromu),127 +mkm (mkm storage),127 +miori celesta,127 +mimoza (96mimo414),127 +matsunaga777,127 +matsukuzu,127 +matsu 84,127 +masaki kei,127 +marley (pokemon),127 +marisu,127 +maki daikichi,127 +m ko (maxft2),127 +kururu (rhapsody),127 +kuromukuro,127 +kuroino (kuroyashiro),127 +kuki panda (wkdwnsgk13),127 +kongou (azur lane),127 +komachi narota,127 +kobuta,127 +kitou sakeru,127 +kirishima riona,127 +king of prism by prettyrhythm,127 +kandori,127 +kaii shoukougun,127 +kagura rei,127 +kagamine len (cosplay),127 +kaerunoko,127 +kaeru (kaerism),127 +kaburagi yasutaka,127 +jaguarman (fate),127 +izumi makoto,127 +irezumi,127 +inamori mika,127 +implied after sex,127 +ichinose haru,127 +holographic clothing,127 +holding spork,127 +holding carton,127 +hitmonlee,127 +hisui (stapspats),127 +hirai chika,127 +hentai oyaji,127 +heavy (tf2),127 +healing,127 +hasegawa shin'ya,127 +haru57928031,127 +hand sonic,127 +hallessena,127 +hakuryuu (azur lane),127 +guhanshounen,127 +goodbye sengen (vocaloid),127 +goldfish print,127 +girls bravo,127 +ginmu,127 +fur gloves,127 +fukutchi,127 +flower trim,127 +fii-tan,127 +eternal arcadia,127 +e-liter 3k (splatoon),127 +drew (pokemon),127 +dragon radar,127 +double thumbs up,127 +dongho kang,127 +dojirou,127 +densetsu kyojin ideon,127 +denkaisui,127 +darumaka,127 +dancouga (series),127 +cyber v,127 +cure white (cosplay),127 +cramorant,127 +construction site,127 +coco (yes! precure 5),127 +cid highwind,127 +chataro1412,127 +cereal,127 +carol (skullgirls),127 +calpara,127 +cacnea,127 +byakuya reki,127 +brown tank top,127 +brigid (fire emblem),127 +brera sterne,127 +bravely second: end layer,127 +big.g,127 +baiguio (zhu xian),127 +bag on head,127 +auction,127 +atlas academy uniform,127 +asanaya,127 +araki kanao,127 +anezaki mamori,127 +amano yukiteru,127 +akigumo kai ni (kancolle),127 +akabane hibame,127 +after insertion,127 +accessory tan,127 +4690 (tkpbsk),127 +zummy,126 +zatsune miku,126 +zaitsu,126 +zacian (crowned),126 +yuzuhara haruka,126 +yuui hutabakirage,126 +yohane shimizu,126 +y (036 yng),126 +weri,126 +vira (summer) (granblue fantasy),126 +video call,126 +vibrator bulge,126 +usui harusame,126 +ushi (newrein),126 +usagimiko,126 +unconventional vehicle,126 +umitsuki natsuzou,126 +ukimukai,126 +type 79 (girls' frontline),126 +two-tone wings,126 +tp (kido 94),126 +tougou hifumi,126 +totte,126 +tomajiyama,126 +tokunaga akimasa,126 +tian (my dear),126 +tenmu shinryuusai,126 +tanuki (kemono friends),126 +tank cupola,126 +tan sweater,126 +tamasaka makoto,126 +tamanoi peromekuri,126 +takoyaki neko-san,126 +sword art online: hollow fragment,126 +sweet devil (vocaloid),126 +sunege (hp0715),126 +summon night 4,126 +sou (pale 1080),126 +sothe (fire emblem),126 +simon (n.s craft),126 +shiao,126 +seox (granblue fantasy),126 +se-u-ra,126 +scarecrow (girls' frontline),126 +sawamura hikaru,126 +sashima,126 +saint october,126 +rumiya9i,126 +ruda (ruda e),126 +rorosuke,126 +robographer,126 +riza wildman,126 +ribbon-trimmed swimsuit,126 +remsor076,126 +regirock,126 +red serafuku,126 +r dorothy wayneright,126 +princess royale,126 +pos (shiratama-ya),126 +pomesaurus,126 +plushmallow,126 +pia carrot e youkoso!!,126 +pencil mustache,126 +pekomama,126 +parkgee,126 +orange buruma,126 +ootani mikoto,126 +oniilus,126 +oboro (taimanin asagi),126 +no parking sign,126 +nicole pmonachi,126 +namaonpa,126 +namako (namacotan),126 +nakayama tooru,126 +nakagawa waka,126 +muvluv alternative (anime),126 +mutsuki tooru,126 +moor,126 +moomoo milk,126 +mokkori9,126 +miniwa tsumiki,126 +mikuni oriko,126 +mikan (ama no hakoniwa),126 +metroid prime,126 +masquerain,126 +mania (fd6060 60),126 +mame,126 +maidforge,126 +mahoutsukai no yakusoku,126 +mahou shoujo ai,126 +magician's red,126 +maeshimashi,126 +mabel pines,126 +maam,126 +lubikaya,126 +looped braids,126 +limeblock,126 +lectern,126 +latenight,126 +kokorozashi,126 +koi han,126 +kio naoki,126 +kinomiki nobori,126 +kinoko no yama,126 +kingdom of kazimierz logo,126 +king ghidorah,126 +killy,126 +kihel heim,126 +kiduta cielo,126 +kerosene lamp,126 +keikenchi (style),126 +kasanui,126 +karaage3,126 +kannabi no mikoto,126 +kamiya kaoru,126 +kamen rider black,126 +kamaboko senshi,126 +kadabra,126 +jourd4n,126 +john zhang,126 +jigen (cookie),126 +jenga,126 +jasmine (disney),126 +ivy (sena0119),126 +itsuki kuro,126 +ikawa waki,126 +ice cream cone spill,126 +hyena tail,126 +hortense,126 +holding club,126 +holding boots,126 +hiragana,126 +hiiragi miki,126 +herohero (higashi no dou),126 +heavy splatling (splatoon),126 +hayami saori,126 +haruno shuu,126 +hare (tetterutei),126 +hand guard,126 +grimlock,126 +grey outline,126 +girl (mokufuu),126 +girl's avenue,126 +ginopi,126 +geregere (lantern),126 +galvantula,126 +fuuchouin kazuki,126 +fukunoki tokuwa,126 +fujii tomo,126 +followers favorite challenge,126 +flo (guilty hearts),126 +fish (food),126 +fatina,126 +fataaa,126 +fanta (the banana pistols),126 +engulfing lightning (genshin impact),126 +east sha2,126 +dragonstone,126 +din (raiden),126 +darnell,126 +cross-laced skirt,126 +cromwellb,126 +corked bottle,126 +cofagrigus,126 +cobalta,126 +cinko,126 +chocolate doughnut,126 +chiruto,126 +chiester556,126 +chicken nuggets,126 +chase (pokemon),126 +censer,126 +cdash817,126 +catgirl0926,126 +catball1994,126 +canape (canape0130),126 +butter knife,126 +boston terrier,126 +boko (maniacpurple),126 +blood writing,126 +beliatan,126 +bad vulva,126 +autopaizuri,126 +atsumisu,126 +ashino moto,126 +apocalypse,126 +aosora (mizore),126 +antelope horns,126 +amicis,126 +amamine,126 +akizuki ritchan,126 +akitaka akita,126 +akiamare,126 +aisha clanclan,126 +acubi tomaranai,126 +aak (arknights),126 +aa2233a,126 +888myrrh888,126 +zoids chaotic century,125 +zamudelin,125 +yuuno (yukioka),125 +yukinoshiro,125 +yuio58ok,125 +yoo mina,125 +yoicha,125 +yinpa (wanone500511),125 +yazawa oke,125 +yamamura umi,125 +yamamoto shuuhei,125 +yakumo mitama,125 +xiumu bianzhou,125 +with you,125 +washing hands,125 +vietnam (hetalia),125 +usa (dai9c carnival),125 +uonuma yuu,125 +tsurugi hikaru,125 +tsukuru (seki sabato),125 +tori (10ri),125 +thundergotch,125 +tentsuu (tentwo),125 +tennousu athena,125 +tenma saki,125 +tenk,125 +tenchuu,125 +taniguchi jun'ichirou,125 +tanaji,125 +takigawa geenito,125 +taiki (luster),125 +tachibana shiro (idolmaster),125 +t-34-85,125 +suzu (tg 390),125 +super robot wars judgement,125 +sukiyo,125 +sougishi ego,125 +sogabe shuuji,125 +snorkel in mouth,125 +sitting on roof,125 +shouno kotarou,125 +shiratama mikan,125 +shiraki shiori,125 +shijou mitsue,125 +shez (fire emblem) (male),125 +serizawa fumino,125 +scathegrapes,125 +sb lama,125 +sasori (naruto),125 +sasakama (sasagaki01),125 +sakurabe notos,125 +sakura szm,125 +saino,125 +sagimori arata,125 +saegusa akina,125 +ry thae,125 +roresu,125 +roark (pokemon),125 +riley (pokemon),125 +rifu (643763873),125 +retaining wall,125 +restraints,125 +rally vincent,125 +r-301 carbine,125 +putimaxi,125 +punk girl (pokemon),125 +prism ark,125 +print thighhighs,125 +porygon2,125 +pori (kmt pori),125 +pixiv azriel,125 +pastel chime,125 +partially opaque glasses,125 +paper balloon,125 +oshiruko (oshiruco 212048),125 +ookashippo,125 +omyo (myomyomyo22),125 +nice nature (umamusume) (cosplay),125 +news,125 +nennen,125 +neko musume michikusa nikki,125 +nazal,125 +nanotaro,125 +nagumo (nagumon),125 +nagi (shunsuke-manage),125 +model tank,125 +miyamoto musashi (swimsuit berserker) (third ascension) (fate),125 +miurahha,125 +minori (senran kagura),125 +matsuzaki miyuki,125 +mato (mozu hayanie),125 +mastgg,125 +mashiro blan de windbloom,125 +maruchan.,125 +marionette (excle),125 +marin (the legend of zelda),125 +maitake (kinokonabe hinanjo),125 +maguta,125 +lordol,125 +liu lan,125 +kouzuki tsubasa (musou kaidou),125 +kokekokko coma,125 +kohakugin,125 +kochoko,125 +kinu kai ni (kancolle),125 +kinoko,125 +kimi tsuru,125 +kenkon no washi,125 +kazuaki,125 +kawaty,125 +katai (nekoneko0720),125 +karuta (card game),125 +kana (fire emblem) (female),125 +kamehito,125 +kagehara hanzou,125 +kabocha head,125 +jurassic world,125 +jason voorhees (cosplay),125 +iyakun,125 +ixmmxi,125 +indonesian commentary,125 +igglybuff,125 +holding candy cane,125 +hk416 (herbal-flavored hard candy) (girls' frontline),125 +hisakawa tetsudou,125 +hiiro h,125 +higashigunkan,125 +henkuma,125 +hell,125 +held up,125 +hatafuta,125 +hamoto,125 +hair ornament request,125 +habu rin,125 +h@ll,125 +gugure! kokkuri-san,125 +glameow,125 +ghirahim,125 +getsumen suibaku ver. a(c),125 +genmukan,125 +gaothun,125 +gag manga biyori,125 +fumino tamaki,125 +fujii eishun,125 +french commentary,125 +fpanda,125 +fireball (series),125 +fionna the human girl,125 +farah oersted,125 +fansub,125 +eto (tokyo ghoul),125 +epona,125 +elizabeth bathory (halloween caster) (fate),125 +ekus (xo ekus),125 +eiscue,125 +ecell,125 +delica,125 +crote,125 +conifer,125 +coco adel,125 +chocolate misu,125 +chipmunk tail,125 +checkered vest,125 +charlemagne (fate),125 +canti,125 +boros (ouro kronii),125 +bong (0u0bon),125 +blissey,125 +bishooji,125 +bib (bibboss39),125 +bekkourico,125 +behind tree,125 +barking,125 +bangboo (zenless zone zero),125 +bad end happy,125 +aurora e. juutilainen,125 +asagiri shiori,125 +artoria pendragon (alter swimsuit rider) (third ascension) (fate),125 +arrietty,125 +argyle skirt,125 +aqua sarong,125 +aqua horns,125 +ame no kisaki academy uniform,125 +alice (tales),125 +alex (minecraft),125 +aitsugawa rui,125 +adolf hitler,125 +abyssal crane princess,125 +abacus,125 +zerokosan,124 +zenshin,124 +yuuki-chan (kanabun),124 +yugen99,124 +yuba no shirushi,124 +yua (argaoffroad),124 +yiduan zhu,124 +year of the rabbit,124 +yatsuki yura,124 +xiaoyin li,124 +wolf pelt,124 +weasel (close-to-the-edge),124 +warming hands,124 +waluigi,124 +violence kumahina,124 +velia,124 +tsukuyo (blue archive),124 +towako (10wk0),124 +toudou shion,124 +tostos,124 +toru k,124 +thirty 8ght,124 +the adventures of sherlock holmes,124 +teemu taiga,124 +tarayama,124 +tank (container),124 +tama kai ni (kancolle),124 +takeu,124 +takemoto izumi (style),124 +sword art online progressive,124 +susie (kirby),124 +suntory,124 +sunflora,124 +suenaga mirai,124 +strawberry earrings,124 +sora no kanata no dystopia x shitei,124 +sinbad (magi),124 +sidecar,124 +shockwave (transformers),124 +shingyoku (male),124 +shadow hearts ii,124 +sasa kazamori,124 +sarutobi ayame,124 +sarashiki tatenashi,124 +saotome genma,124 +sakura ran,124 +ryuuki (hydrangea),124 +rurima (cookie),124 +ribbon removed,124 +rias-coast,124 +remy (elsword),124 +reinhardt (fire emblem),124 +reference photo,124 +red-p,124 +rebreather,124 +razor blade,124 +r o ha,124 +poliwrath,124 +pokestar studios,124 +pixiv fantasia 1,124 +pippi (pixiv 1922055),124 +peter strasser (azur lane),124 +paco,124 +otogi kyouka,124 +ore no nounai sentakushi ga gakuen love-comedy wo zenryoku de jama shiteiru,124 +ogre (dq10),124 +nishimura (nianiamu),124 +nishigoori yuuko,124 +nibbling,124 +nbo,124 +natsushi,124 +napo,124 +muro,124 +moriya naoki,124 +misono gakuen school uniform,124 +minase shuu,124 +miles morales,124 +mid-stride,124 +mettaton ex,124 +meslamtaea (weapon),124 +meridian project,124 +melowh,124 +mega man x: command mission,124 +maxim tomato,124 +matsuba (idiotlantern),124 +masquerade mask,124 +maru-sun,124 +maanu,124 +m16a2,124 +luminous (madoka magica),124 +luigi (cosplay),124 +leander (azur lane),124 +laura kinney,124 +kureta (nikogori),124 +kumaartsu,124 +korea (hetalia),124 +kobuchizawa shirase,124 +kobayashi tetsuya,124 +knee strap,124 +kitakouji hisui,124 +kisara (yu-gi-oh!),124 +kirita (noraring),124 +kirai shouen,124 +kimuti-g,124 +ken ill,124 +katzeh,124 +kasaneko,124 +kamizaki hibana,124 +kaiman garupan,124 +justine (persona 5),124 +julius pringles,124 +joko jmc,124 +joakim sandberg,124 +jerry can,124 +jack (wkm74959),124 +iowa (pacific),124 +inuo (inuo x),124 +illusionk,124 +igloo,124 +idunn (fire emblem),124 +ibis douglas,124 +hyoudou issei,124 +holding screwdriver,124 +holding cage,124 +hirose madoka,124 +hinomoto oniko,124 +hinamori momo,124 +hina (genshin impact),124 +hasunokaeru,124 +has cropped revision,124 +hands on another's arms,124 +hanabatake chaika,124 +hamtaro (series),124 +hakama pull,124 +greythroat (arknights),124 +gimp suit,124 +futoshi (darling in the franxx),124 +furukawa lemon,124 +fummy,124 +fujiwara minaho,124 +fujimaru ritsuka (male) (tropical summer),124 +flower shop,124 +finger tattoo,124 +fender jazz bass,124 +eugen (granblue fantasy),124 +eihi,124 +edwin (cyberdark impacts),124 +edoya pochi,124 +edgar roni figaro,124 +e-fa-dorn,124 +dulldull,124 +dot pupils,124 +doki doki majo shinpan,124 +crimecrime,124 +coat lift,124 +cloud retainer (genshin impact),124 +chon (chon33v),124 +chiyami,124 +chikaya,124 +cheunes,124 +chatot,124 +character hat,124 +catheter,124 +caracal ears,124 +caibao return,124 +buta (uhoiiotoko),124 +blood sword,124 +biya (1024),124 +banssee,124 +azusagawa tsukino,124 +azuma tokaku,124 +ayesha altugle,124 +avrora (azur lane),124 +ass hair,124 +arsk,124 +aoki ume (style),124 +anubis (houtengeki),124 +anizi,124 +amane sou,124 +alice360,124 +akanasu,124 +aito,124 +aco (koaya ako),124 +yuzuzukushi,123 +yune (ikoku meiro no croisee),123 +yukihama,123 +yukibuster z,123 +yonedatomo mizu,123 +yoiti,123 +yatsune rika,123 +yamifuka-san (hoshi san 3),123 +venice,123 +valvatorez (disgaea),123 +usuzawa sae,123 +upside-down kiss,123 +unbalance,123 +umagenzin,123 +uka (color noise),123 +touma kisa,123 +tom and jerry,123 +the hobbit,123 +the grim adventures of billy & mandy,123 +the baddest evelynn,123 +taunting,123 +takamoto akisa,123 +taikoubou,123 +tahita1874,123 +taedu,123 +sunny gurlukovich,123 +subaru (houkago no pleiades),123 +striped coat,123 +stratos 4,123 +squid game,123 +souya agb (kancolle),123 +soldier (tf2),123 +slug girl,123 +shuuichi (gothics),123 +shroomish,123 +shinozaki sayoko,123 +shinano yura,123 +shin'en-san (shin'en),123 +shihou haru,123 +shiba koen middle school uniform,123 +sarazanmai,123 +sanae (satansanae),123 +sakamoto tatsuma,123 +ryuubi,123 +ross tran,123 +romg,123 +rin (yu-gi-oh!),123 +rhasta,123 +red tie,123 +queue,123 +projector,123 +pontasu,123 +pokemon heroes: latios & latias,123 +podium,123 +pingo,123 +ping hai (summer vacation) (azur lane),123 +pd (pdpdlv1),123 +parasect,123 +osobachan,123 +oruto (ort+),123 +ootori subaru,123 +ongeki,123 +ok sign over eye,123 +ntw-20,123 +nonosaki,123 +night clothes,123 +netarou,123 +nachoneko,123 +muraryo,123 +mudo (saji),123 +moped,123 +monolith (suibou souko),123 +momoi satsuki,123 +mof's silver haired twintailed girl,123 +moby (elsword),123 +mo (mocopo),123 +miyafuji miina,123 +miwa kasumi,123 +mitsu king,123 +minowa gin,123 +minidraco,123 +minato mio,123 +minamisawa atsushi,123 +mihato senba,123 +miel (lessontome),123 +meiro (yuu),123 +mecha-fiora,123 +maruyo,123 +marco (one piece),123 +majiang,123 +machismo fuji,123 +lune zoldark,123 +lotus pedestal,123 +logix ficsario,123 +kuroame (kurinohana),123 +kuranami shiki,123 +krabby,123 +koto (kyousougiga),123 +kosuke (bb),123 +konoe yuba,123 +konarofu,123 +koga norio,123 +kiryuu kaoru,123 +kihane atsusane,123 +kaosu (kaosu0905),123 +kanzaki akari,123 +kantarou (8kan),123 +kaneda shoutarou's bike,123 +kandata nijou,123 +kaji ryouji,123 +kafra uniform,123 +jouvru,123 +jindai komaki,123 +jester (dq3),123 +jajala,123 +itsukia,123 +isou nagi,123 +isosceles triangle (xyzxyzxyz),123 +ishiusu,123 +ishigami yuu,123 +inu (puputizy),123 +inu777,123 +inaka keikou,123 +ibispaint (medium),123 +ibaraki douji (swimsuit lancer) (first ascension) (fate),123 +hu kotora,123 +holographic touchscreen,123 +hinata (pure pure),123 +hinata (blue archive),123 +himadarou,123 +hikimaru,123 +hiiro yuki,123 +higashikata daiya,123 +haniwagi (hal),123 +hands on stomach,123 +hakuurei amano,123 +grabbing another's hand,123 +gogoco,123 +glycine bleumer,123 +ginnkei,123 +giant hand,123 +gemini,123 +ganbaru pose,123 +gagaimo,123 +fuu (samurai champloo),123 +futomomomoe,123 +furnace,123 +fur-trimmed pants,123 +fujihara (haguhagu),123 +fuji yoshida,123 +frog headband,123 +flan (seeyouflan),123 +farrah (granblue fantasy),123 +eta,123 +eric cartman,123 +embroidered panties,123 +ekuesu,123 +egooo,123 +eelektross,123 +earlgrey,123 +digital hazard,123 +destroyer water oni,123 +demon cleric,123 +dazzle paint,123 +dark haired kappa,123 +danhu,123 +da mao banlangen,123 +crusader (tank),123 +con potata,123 +clea (geee13),123 +claudette (queen's blade),123 +chiaki kurihara,123 +charlotte corday (third ascension) (fate),123 +cathead,123 +carving,123 +carro armato p40,123 +calm mashiro,123 +calla lily,123 +bug (artist),123 +boxing shorts,123 +bomber hat,123 +boar hood,123 +baron (x5qgeh),123 +barbarian tk,123 +bannouyakunta,123 +balmung (fate/apocrypha),123 +baji keisuke,123 +ayakashi triangle,123 +atago hiroe,123 +ass on glass,123 +ashlock (arknights),123 +asagao to kase-san,123 +archvermin,123 +antlers through headwear,123 +antelope ears,123 +akasaka mamoru,123 +yukianesa,122 +yst,122 +yoyohara tsukasa,122 +youko (girls und panzer),122 +yeneny,122 +yanutorie,122 +yamada maya (infinite stratos),122 +wwe,122 +white towel,122 +white lily,122 +weight,122 +watanabe minori,122 +walther ppk,122 +viridi,122 +violette,122 +usagino suzu,122 +umiu geso,122 +two-tone neckwear,122 +tora jun,122 +tokiwata soul,122 +tights day,122 +terry (pixiv3274443),122 +taut sweater,122 +tanaka rie,122 +tamagotchi,122 +tama (aquarion),122 +takamachi nanoha (exceed mode),122 +suzuka (rekkyo),122 +sutei (giru),122 +super tama musume,122 +sugita tomokazu,122 +stitching,122 +stem,122 +star of lakshmi,122 +spareribs,122 +souryuu (double dragon),122 +slimy,122 +skull-shaped hat,122 +skeletal hand,122 +sishenfan,122 +sironora,122 +sirachi,122 +sin11111,122 +silver the hedgehog,122 +signalviolet,122 +siduri (fate),122 +shouryouuma,122 +shizuki michiru,122 +shishin (shishintei),122 +sentou mecha xabungle,122 +semi truck,122 +scathach skadi (swimsuit ruler) (fate),122 +satin dress,122 +sarutobi asuma,122 +saltwater crocodile (kemono friends),122 +sakuradou,122 +ryuu ryuu,122 +ryuu (tsukinoyuki),122 +roto (kanae),122 +roshiakouji-chan,122 +rocket punch,122 +rinnegan,122 +rikumoto yoshiyuki,122 +renka (cloudsaikou),122 +red jumpsuit,122 +project diva extend,122 +prinz eugen (profusion of flowers) (azur lane),122 +primm,122 +pointed footwear,122 +planted knife,122 +pekoni (peconi),122 +pearl7,122 +panda (jujutsu kaisen),122 +otoyomegatari,122 +original race uniform (umamusume),122 +oosanshouuo-san,122 +oomuro nadeshiko,122 +onsen symbol,122 +onceskylark,122 +once 11h,122 +old snake,122 +oimo (oimkimn),122 +nzeneee,122 +nose pinch,122 +nofuture,122 +no animal ears,122 +nishina toriko,122 +nicorima,122 +natsu natsuna,122 +naoki eguchi,122 +nanase miri,122 +nanao futaba,122 +namba emi,122 +nakukoroni,122 +nagase minato,122 +murenase! shiiton gakuen,122 +murani,122 +moru,122 +miyazawa tsutomu,122 +mirakururu,122 +mint adenade,122 +miniru,122 +minior,122 +minato yoshihiro,122 +milinda brantini,122 +miitoban,122 +mega man 9,122 +mechanical spine,122 +matsunaga garana,122 +maru usagi,122 +maria renard,122 +mari0ball,122 +makai shin trillion,122 +mady (madine08260),122 +lumiere,122 +looking at screen,122 +le malin (sleepy sunday) (azur lane),122 +l ract,122 +l'arachel (fire emblem),122 +kuwahara taiki,122 +kugi ta hori taira,122 +koukaku no regios,122 +koga (pokemon),122 +kochipu,122 +koba,122 +kisaragi chiyuki,122 +kira yoshikage (jojolion),122 +kimura seiko,122 +khn (kihana),122 +kawasumi (japonica),122 +kasugano tobari,122 +karakusa (pattern),122 +kanosawa,122 +kaname tomohisa,122 +kajaneko,122 +k5 (girls' frontline),122 +k/da kai'sa,122 +isshuukan friends,122 +isaac clarke,122 +isa (peien516),122 +integra hellsing,122 +in bubble,122 +ihachisu,122 +hyakuya mikaela,122 +huziko32,122 +huyandere,122 +honda super cub,122 +hiyoshi hana,122 +himaneko.,122 +himamo,122 +herokick,122 +heijialan,122 +heart out of chest,122 +haya taro pochi,122 +hasu (velicia),122 +haruru minamo ni!,122 +haruki reimari,122 +gurifu,122 +greetload,122 +great grey wolf sif,122 +grandia ii,122 +ghost trick,122 +genny (fire emblem),122 +fuwa daisuke,122 +future style (module),122 +fuji tsugu,122 +enterprise (wind catcher) (azur lane),122 +en@rain,122 +elizabeth (gintama),122 +ecoman,122 +echidna (p&d),122 +duck hunt,122 +dressing room,122 +dojikko,122 +diner,122 +diablo,122 +dekasudachin,122 +dark cure (yes! precure 5),122 +darahan,122 +cure summer,122 +cubicle,122 +chuchu (show by rock!!),122 +chroneco,122 +chou shippai-saku,122 +chilchuck,122 +cherino (blue archive),122 +chabaneko,122 +can't show this,122 +calvin klein,122 +brown camisole,122 +bow swimsuit,122 +blue vert,122 +beige legwear,122 +bakeneko,122 +bai linqin,122 +avengers: infinity war,122 +assisted masturbation,122 +ashley winchester,122 +ashikaga chachamaru,122 +asa (1asa-0-asa1),122 +aro (charged time out),122 +annelotte,122 +amaichi esora,122 +alice (pandora hearts),122 +action figure,122 +34no404,122 +#unicus (idolmaster),122 +zukki (suzukio),121 +yuusa,121 +youkai hyakki-tan!,121 +yamaguchi (shinigami-no-barad),121 +y (khakiyawn),121 +wax play,121 +washing another,121 +vf-1j,121 +vayneeeee,121 +usamochi.,121 +two (tsu (lovesick1964)),121 +tutsucha illust,121 +tsurumi tokushirou,121 +triple-q,121 +toku (yhpv8752),121 +tk4,121 +thkani,121 +teroru,121 +tenbin gashira,121 +teikoku jokyoku,121 +takenaka,121 +take toshiaki,121 +supernew,121 +sugar (dndi888),121 +stylistic,121 +spacelongcat,121 +soramame (corndog),121 +slime taoshite 300 nen shiranai uchi ni level max ni nattemashita,121 +sinobi illust,121 +shortcake,121 +sho bu 1116,121 +sho (sumika),121 +shitacemayo,121 +shinsou hitoshi,121 +shikai,121 +shibafu (glock23) (style),121 +serizawa chikaru,121 +seraphina (disgaea),121 +sea scorpion (umisasori),121 +sazanami shione,121 +sawsbuck,121 +sawitou mizuki,121 +sao (0060),121 +sakakibara satomi,121 +sakai yuuji,121 +rupinesu,121 +rengar,121 +redlight,121 +quincy (warship girls r),121 +qiyana (league of legends),121 +premier ball,121 +post,121 +poo (mother 2),121 +plamja-sama,121 +pinocchio (sinoalice),121 +pink mercy,121 +paradox live,121 +paintale,121 +owain (fire emblem),121 +otosaka ayumi,121 +otonashi kyouko,121 +omochishiki,121 +omnic,121 +olive,121 +noivern,121 +noamem,121 +no-rin,121 +nise nanatsura,121 +nin nakajima,121 +niizuka (c-drop),121 +nekoita,121 +nekobayashi,121 +nari (cougar1404),121 +nanase riku,121 +mustard seeds,121 +muk,121 +mspaint (medium),121 +mouth piercing,121 +morrighan,121 +moekyon,121 +mochizuki momiji,121 +miyatsuki itsuka,121 +mitsumine (ookami no oyashiro),121 +mitsu yomogi,121 +miazuma sarin,121 +mg4 (girls' frontline),121 +meerkat tail,121 +medama oyaji,121 +maneki-neko (fujifuji),121 +makino kanna,121 +mahan,121 +luthica preventer,121 +lunamoon,121 +luimiart,121 +low-cut,121 +little girl saniwa (touken ranbu),121 +laoan,121 +kusunoki shio,121 +kuchiki byakuya,121 +kray foresight,121 +konohana saku,121 +komeiji koishi (cosplay),121 +koji (wildlhz),121 +kohinata aoi (dokidoki sister aoi-chan),121 +kisaragi kokoro (hazuki),121 +kikunosukemaru,121 +kiana kaslana (knight moonbeam),121 +kentairui,121 +ken (shutenndouji1),121 +katsudon (food),121 +kate (alice catharsis),121 +kashiwadokoro,121 +kano (wi3028),121 +kannagi rei,121 +kana (okitasougo222),121 +kachima,121 +juliet nao zhang,121 +johnny (guilty gear),121 +huyusilver,121 +hirasawa shizuku,121 +hikigaya komachi,121 +high-waist sideboob,121 +hayasaka akira,121 +hamada pengin,121 +haine,121 +hagoromo gitsune,121 +gurajio,121 +gradient nails,121 +gertrud (madoka magica),121 +german commentary,121 +genba neko,121 +garou (one-punch man),121 +gaius (tales),121 +fuussu (21-kazin),121 +futase hikaru,121 +furukawa wanosuke,121 +funikurikurara,121 +fujiwara hazuki,121 +fish request,121 +fenyon,121 +fearless night,121 +execution,121 +etna (kuzuyu),121 +esia mariveninne,121 +envy,121 +emurin,121 +drossel von flugel,121 +dragon costume,121 +domino high school uniform,121 +dogfight,121 +cynthia (fire emblem),121 +cottage,121 +code: battle seraph (elsword),121 +cinderella (sinoalice),121 +churro,121 +chijou noko,121 +cerberus (kemono friends),121 +celtic cross,121 +catwoman,121 +catsubun (kkst0904),121 +cait sith (ff7),121 +bokurano,121 +blowing in ear,121 +bloodline,121 +black blood,121 +big man (splatoon),121 +beanis,121 +bakura ryou,121 +ayase ena,121 +aya shobon,121 +axia-chan,121 +atoatto,121 +arunira,121 +aruma (sawayaka tokuko miruku),121 +ari hinoko,121 +aquila yuna,121 +ankle gun,121 +anegawa eiji,121 +anderain,121 +ameno shigure,121 +amamiya (abaros),121 +akudama drive,121 +akai ronii,121 +adyisu,121 +adjusting bow,121 +absolum art,121 +15 (tooka),121 +.hack//sign,121 +zombina,120 +zerocastle,120 +yuukichi,120 +yuuki1103,120 +yukino bijin (umamusume),120 +yui hiroshi,120 +yoshiwo (kanikama),120 +yoru (yowuyoru),120 +yamase,120 +wild flower,120 +wi-z garage,120 +whistlerx,120 +wachi yuri,120 +vistake,120 +v yuusha no kuse ni namaiki da r,120 +uruo,120 +urethral beads,120 +uno (game),120 +umetsu yasuomi,120 +type 91 armor-piercing shell,120 +tsurugi (the horizon),120 +tokyo sky tree,120 +ten no hoshi,120 +ten'inkou korin,120 +tamagogogo,120 +takio (kani sama),120 +takeuma,120 +takepon1123,120 +takahashi ren,120 +takahashi reiko,120 +syandega,120 +sura sura,120 +suomi (korvatunturi pixie) (girls' frontline),120 +sumeragi shion,120 +stretched limb,120 +stack (sack b7),120 +srw battle screen,120 +somersault,120 +solgaleo,120 +skeletal wings,120 +signal flag,120 +shun'ya (daisharin36),120 +shogo (4274732),120 +shizuko (swimsuit) (blue archive),120 +shizuka (queen's blade),120 +shirma,120 +shirase maki,120 +shionji yuuko,120 +shion uzuki,120 +shiki haruomi,120 +seaking,120 +scar on breasts,120 +satyr (granblue fantasy),120 +sand writing,120 +salazzle,120 +sailor moon narikiri bra set,120 +saga inu,120 +riki (xenoblade),120 +revice driver,120 +reunion logo (arknights),120 +reroi,120 +reno (biggest little cheerleader) (azur lane),120 +recorder case,120 +ravenclaw,120 +ralf jones,120 +pumpkin scissors,120 +prostate,120 +print sash,120 +primamiya,120 +potion (moudamepo),120 +popukar (arknights),120 +popon ta,120 +popoi,120 +plant on head,120 +pickle,120 +peach11 01,120 +p7 (girls' frontline),120 +ots-12 (girls' frontline),120 +ornate,120 +orikuchi hirata,120 +oosawa maria,120 +ochakai shin'ya,120 +nori gorou,120 +noppo,120 +ninnin (shishitou),120 +night angel (last origin),120 +nidoqueen,120 +naruse maria,120 +nacht,120 +n-zap (splatoon),120 +myon (tokipi),120 +mushroom hat,120 +mush820823,120 +msp sammy,120 +mrdotd,120 +mosu (korikan),120 +monica kruszewski,120 +molotov cocktail,120 +mocha (snowflake),120 +miyake hinata,120 +minpei ichigo,120 +mikurun,120 +metroid: zero mission,120 +medli,120 +may,120 +matatagi hayato,120 +masato (mirai koubou),120 +maruneko,120 +marin (myuy 3),120 +makise minami,120 +mahou tsukai no yome,120 +luna (my little pony),120 +luffyko,120 +lokman lam,120 +leg wings,120 +lachesis (fire emblem),120 +kyukkyu-kun,120 +kusakabe satsuki,120 +kurasawa kyoushou,120 +kuma (jk0073),120 +kouichi eiji,120 +konabetate,120 +kiba manami,120 +kawasaki kazuhiko,120 +kase tomoka,120 +kanzen bouon,120 +kannazuki genshi,120 +kallen kaslana (sixth serenade),120 +kakinoki mikan (kari),120 +kaito (kaixm),120 +kageira,120 +kabocha usagi,120 +jingai makyou,120 +jenny (pokemon),120 +izra,120 +ira (dokidoki! precure),120 +interlocked mars and venus symbols,120 +holding hair ornament,120 +hizuki higure,120 +hiyamizu yukie,120 +hitsuji takako,120 +hinasumire,120 +hiiragi shino,120 +highly responsive to prayers,120 +higeneko,120 +heracross,120 +hazuki kyou,120 +haruyukiko,120 +harada makoto,120 +handlebar,120 +hand on own throat,120 +griseo,120 +grey tail,120 +gorgonzola (paradisegom),120 +golf ball,120 +ginyu force pose,120 +gigi d.g.,120 +gamerag,120 +ga rune pose,120 +fujisawa takashi,120 +florence nightingale (santa) (fate),120 +firing at viewer,120 +false arm,120 +esoragoto,120 +ducklett,120 +duca,120 +drunkoak,120 +dotted quarter note,120 +dickbomber,120 +denki,120 +dekosukentr,120 +dango hair ornament,120 +danfer3,120 +dance in the vampire bund,120 +cum bath,120 +crystal exarch,120 +cryamore,120 +corrector yui,120 +cody travers,120 +claire stanfield,120 +cicin mage (genshin impact),120 +chouun,120 +choco (chocovix112),120 +chloe valens,120 +cherno alpha,120 +cherche (fire emblem),120 +checkered apron,120 +champion (ragnarok online),120 +chamba,120 +cecilia (fire emblem),120 +catra,120 +casshern sins,120 +caee penguin,120 +bonkiru,120 +blsh,120 +blazblue phase 0,120 +bitten apple,120 +biting hair,120 +bird hat,120 +beruko14,120 +beitemian,120 +bard (final fantasy),120 +atha (leejuiping),120 +asterisk (idolmaster),120 +asakura hayate,120 +apollo (hu maple),120 +angel31424,120 +altera moontail,120 +alabama (azur lane),120 +akazawa izumi,120 +ajna (indivisible),120 +aislinn wishart,120 +adjustable wrench,120 +2 fuel 4 ammo 11 steel,120 +yuzuki yukari (vocaloid4),119 +yusao,119 +yuri leclerc,119 +yuna (sao),119 +yukishiro (hitsuji),119 +yo mo,119 +yaya hiyayaka,119 +yashiro momoka,119 +yanase miyuki,119 +yamataka,119 +yaki mayu,119 +whooo-ya,119 +wee (weeyy),119 +viper v16,119 +utomo,119 +utatane piko,119 +uraki (tetsu420),119 +ty lee,119 +tusk act1,119 +tunamayochan,119 +tugo,119 +tsuti,119 +tsukikanade,119 +tsugumi (guilty crown),119 +toyota saori,119 +toki (hokuto no ken),119 +tiankong pie ai,119 +terufuu,119 +tantei wa mou shindeiru,119 +tanaka kunihiko,119 +tanaka hirotaka,119 +takanashi akihito,119 +tahra,119 +tachibana yuu (yakitomato),119 +t.m.revolution,119 +surskit,119 +subaru (subachoco),119 +stuffed chicken,119 +steeb,119 +star guardian jinx,119 +south korean flag,119 +skirt rolled up,119 +sirokohi,119 +single vambrace,119 +shuumatsu no valkyrie,119 +shirai hinako,119 +shinonoko (tubamecider),119 +shining musume,119 +sesame seeds,119 +segway,119 +sega mega drive,119 +same (sendai623),119 +sakurai shin'ichi,119 +sakuma ritsu,119 +sakigake!! cromartie koukou,119 +sailor mercury (cosplay),119 +ryuunosuke (luckyneco),119 +rinhee,119 +rero (bigdoorbig2),119 +renzu (lens 02),119 +red tassel,119 +red panda girl,119 +recharging,119 +rebecca bluegarden,119 +princess peach (cosplay),119 +pooh,119 +pixie (megami tensei),119 +pink serafuku,119 +pink curtains,119 +pincurchin,119 +persica (girls' frontline),119 +patricia thompson,119 +paper doll,119 +paopa ship,119 +paffy pafuricia,119 +p-suke,119 +otako (galko),119 +orsola aquinas,119 +orange peel (artist),119 +okazuwa shake,119 +o (crazyoton46),119 +nyuubara reona,119 +nuko (shoujo shuumatsu ryokou),119 +novady,119 +noizu (noi hr),119 +nise maou kanizeru,119 +nira (vira),119 +nintendo 64,119 +nijou aki,119 +nichiru,119 +nezumidoshi,119 +neko daruma,119 +nancy lee,119 +nakamura sandayo,119 +murakami meishi,119 +mmmilk,119 +mixed martial arts,119 +mitsuba greyvalley,119 +mito mashiro,119 +mint (arknights),119 +meowstic (male),119 +menoziriath,119 +melodica,119 +mc lita,119 +marowak,119 +mao (expuella),119 +manda (manda9n),119 +mako (azuumori),119 +makirin,119 +makigai,119 +lollipop chainsaw,119 +leorio paladiknight,119 +leo de la iglesia,119 +leo/need (project sekai),119 +leather strap,119 +layer cake,119 +lap,119 +kyouyama anna,119 +kurono kito,119 +krono tokage,119 +kosobin,119 +konoe subaru,119 +kaze no klonoa,119 +katou teppei,119 +kashiwagi chisame,119 +izumi rei,119 +izumi kouhei,119 +izayoi miku,119 +individuals (idolmaster),119 +immaterial and missing power,119 +hinata hibari,119 +hevn,119 +heart arms duo,119 +harumi (harumix),119 +hanazawa kana,119 +habara meguru,119 +habara (danshi koukousei),119 +gunkanmaki,119 +gray bear,119 +grappling hook,119 +goron,119 +goldowl,119 +gladiator,119 +fuyuno kamome,119 +futaba hazuki,119 +fujisaka kuuki,119 +formicid,119 +flowerxl,119 +fire emblem (tiger & bunny),119 +female commander (azur lane),119 +etsunami kumita,119 +enomoto hina,119 +eagle spirit (touhou),119 +doromame,119 +doormat,119 +doku-chan (dokkudokudoku),119 +diathorn,119 +denpaken pochi,119 +denmark (hetalia),119 +delcatty,119 +days in a flash (umamusume),119 +dark jewel (gem oblivion),119 +cosmikaizer,119 +cornelia (umineko),119 +clothes gag,119 +claire harvey,119 +chitose (usacan),119 +chingling,119 +chaosringen,119 +centurion (tank),119 +catherine (fire emblem),119 +carim gracia,119 +cancer (zjcconan),119 +callan (callancoe),119 +burgh (pokemon),119 +braviary,119 +bird costume,119 +bing zizi,119 +azuazu 0405,119 +aura bella fiora,119 +asobi asobase,119 +asellus (saga frontier),119 +arudebido,119 +archeops,119 +anita king,119 +anastasia (swimsuit archer) (third ascension) (fate),119 +amy sorel,119 +alternate size,119 +almic,119 +akechi shizuku,119 +akai kitsune,119 +ai tenshi densetsu wedding peach,119 +adjusting hood,119 +adell (disgaea),119 +212 (kisaragi),119 +zetsumu,118 +zenkoku seifuku bishoujo grand prix,118 +yuri briar,118 +yoshino momiji,118 +yoshida hirofumi,118 +yoruhoshi owl,118 +yoo joonghyuk,118 +yawata maru (kancolle),118 +yang lee,118 +yamikyon,118 +yamazaki tsukune,118 +white horse,118 +watanabe masafumi (masafumi 127),118 +visket53,118 +veilrain,118 +usuki (graygreed),118 +uniqlo,118 +ultra seven (series),118 +u-joe,118 +two-handed sword,118 +tsukioka kirio,118 +trigger heart exelica,118 +toramaru shou (tiger),118 +terasu mc,118 +tamaki ako,118 +taku michi,118 +takajo kyoji,118 +suzushiro haruka,118 +suzumori,118 +suou sakura,118 +sungwon,118 +sunburn,118 +studio,118 +stapled,118 +st bernard,118 +sports sunglasses,118 +spearow,118 +soriz,118 +sorano eika,118 +sonsoso,118 +sogiita gunha,118 +shirogane souju,118 +shimanoji,118 +screenshot,118 +scoreboard,118 +sawatari mitsuki,118 +satellizer el bridget,118 +sasucchi95,118 +sasakura momiji,118 +sakurasawa yukino,118 +sakanomachico,118 +saitou kengo,118 +rutile (houseki no kuni),118 +running on liquid,118 +royalwatts,118 +rem galeu,118 +rebutsu,118 +rato,118 +ratise,118 +ramb chop,118 +r-type,118 +quinzhee,118 +project upd8,118 +podenco (arknights),118 +plus step,118 +pito (sh02327),118 +penguin maru (penginmaru),118 +pectoral envy (meme),118 +pastime774,118 +pantheon (league of legends),118 +pandarou,118 +orange outline,118 +orange bullet,118 +okomeuma,118 +ohigetan,118 +object request,118 +obei teitoku,118 +nozama tomoko,118 +noname (nowhere),118 +nishizumi tsuneo,118 +niku (hamuchang),118 +nash (na-si),118 +narumi yuu (bunbukudou),118 +nappa,118 +nanami ayane,118 +nanam (nanam sk),118 +namekuji (namekuzu),118 +monsters inc.,118 +moe2020,118 +mochi (circle rin),118 +misumaru yurika,118 +miruki,118 +minapo,118 +mikudayoo,118 +mihatarou,118 +migu (migmig),118 +meimone,118 +megurine luka (cosplay),118 +medic 2 (sekaiju),118 +mechanic (ragnarok online),118 +meago,118 +matsuno chiya,118 +mashinatsu,118 +mango (mgo),118 +manana (matoi1111),118 +malariya,118 +mahito (jujutsu kaisen),118 +magna carta,118 +maeda koutarou,118 +macross zero,118 +lunacats,118 +lucifer (shingeki no bahamut),118 +lucie (millie parfait),118 +luca (yu-gi-oh!),118 +lsls,118 +locon,118 +lilith (fire emblem),118 +liclac,118 +lebring,118 +laphicet (tales),118 +lao meng,118 +lahmu (fate),118 +laharl-chan,118 +lady bat,118 +kure kirika,118 +kronshtadt (azur lane),118 +kouzuki yuniko,118 +kondou isao,118 +komadori renge,118 +kodama,118 +kisa (k isa),118 +kawakami shuuichi,118 +karasu raven,118 +kaita (mokamilkcup),118 +kaishaku,118 +kagura suzu (.live),118 +kagari ayaka,118 +jojobirdz,118 +jmao,118 +japan racing association,118 +jam-orbital,118 +jake the dog,118 +irouha,118 +irohasu (sasagarasu),118 +iris mysteria!,118 +inigo (fire emblem),118 +indian,118 +inaba himeko,118 +illyasviel von einzbern (cosplay),118 +ikura hato,118 +ichimonme (ichi),118 +ichihara yuuko,118 +hoshi ori yume mirai,118 +harenchi,118 +hanebado!,118 +hanayama (inunekokawaii),118 +hanako (jibaku shounen hanako-kun),118 +hanahanamaki,118 +haizai,118 +habanero pepper,118 +gundam mk ii,118 +goofy,118 +gokukoku no brynhildr,118 +gogo tomago,118 +gimp mask,118 +getter-1,118 +gen'ei wo kakeru taiyou,118 +gemini sunrise,118 +gebijiade 89,118 +gauna,118 +fn fnc,118 +fetus,118 +featherine augustus aurora,118 +fantongjun,118 +fankupl,118 +exeggcute,118 +essex (warship girls r),118 +enoshima junko (cosplay),118 +eliza (tekken),118 +eden (honkai impact),118 +dragon quest yuusha abel densetsu,118 +dododo dadada,118 +digimon story: cyber sleuth,118 +devilman crybaby,118 +denchu (kazudentyu),118 +dark konoha,118 +dabi (dabibubi),118 +cream the rabbit,118 +cr72,118 +comic x-eros,118 +cigarette kiss,118 +chuuta (+14),118 +character single,118 +carrot (robosquat),118 +carimarica,118 +camera around neck,118 +calculator,118 +buruma under skirt,118 +boy and girl sandwich,118 +black leopard (kemono friends),118 +bijin onna joushi takizawa-san,118 +ashiya hiro,118 +asashimo kai ni (kancolle),118 +arykei,118 +ao-shiba,118 +animegao,118 +angel gabriel.,118 +amasa mitsunaru,118 +amaretto (girls und panzer),118 +aken,118 +zzyzzyy,117 +zk (zk gundan),117 +zain (omaru polka),117 +yuuki nobuteru,117 +yutapon,117 +youjo (creek (moon-sky)),117 +yonerinoko (banberabu),117 +ylgr (fire emblem),117 +xue lu,117 +waddle doo,117 +vf-1s,117 +vandread,117 +uramakaron,117 +unyon,117 +uncle rabbit ii,117 +ttosom,117 +touge hiro,117 +tondamanuke,117 +toma tokage,117 +tayelle ebonclaw,117 +task owner,117 +takizawa kyouko,117 +ta girls school uniform,117 +swordfish,117 +sunaya yanokura,117 +sui (tsuruhibiki),117 +suction cup dildo,117 +stuffed fox,117 +strelka belca,117 +stella hoshii,117 +soil,117 +soccer field,117 +sky cappuccino,117 +shizuru (summer) (princess connect!),117 +shirakawa nanaka,117 +shiori (kamioka shun'ya),117 +shenaidi,117 +shark boy,117 +shadman,117 +sendou erika,117 +seele vollerei (starchasm nyx),117 +satosi,117 +sasurai susuki,117 +sasami-san@ganbaranai,117 +saionji reimi,117 +saiko dagashi,117 +ryuuneart,117 +ryochapu,117 +rowen j. ilbert,117 +rita rossweisse (fallen rosemary),117 +reido (reido c),117 +rebellion (sword),117 +rebecca streisand,117 +rassie s,117 +ran (9ens2000),117 +rackam (granblue fantasy),117 +pressing,117 +pet shop,117 +pepperoni,117 +pearl clan outfit,117 +pdxen,117 +panties under swimsuit,117 +pain (naruto),117 +pacific racing team,117 +onoe,117 +okoge senbei,117 +oka (umanihiki),117 +official cosplay,117 +o (rakkasei),117 +nut megu,117 +numako,117 +nowareno (higashi shino),117 +nowa,117 +nip to chip,117 +ninamori eri,117 +neko lu (wofud453),117 +natsume maya,117 +nabe0721,117 +myu-po,117 +muunai,117 +music box,117 +muscular child,117 +mozuya murasaki,117 +mospeada,117 +morisawa yuu,117 +mofu-mofu after school,117 +mobile,117 +mizunashi hayate,117 +micho,117 +michael jackson,117 +meloetta (aria),117 +matt (pokemon),117 +matsusatoru kouji,117 +martina crespi,117 +maro (lij512),117 +marisa (fire emblem),117 +map (object),117 +mao yu,117 +makita (twosidegekilove),117 +maita rui,117 +madobe nanami,117 +machinosuke,117 +m870 (girls' frontline),117 +m203,117 +lydia601304,117 +lycoris princess,117 +lost technology,117 +loped,117 +lk (lk00),117 +leo (mafuyu),117 +leiur darahim,117 +latex panties,117 +lailah (tales),117 +kyufe,117 +kyouna,117 +kurosaki ruri,117 +kurashima chiyuri,117 +kousa (black tea),117 +kmitty,117 +kitaooji satsuki,117 +kirifrog,117 +karasawa toshiyuki,117 +kanibasami,117 +kamishiro (rsg10679),117 +kamazuki suzuno,117 +kaiware-san,117 +kagami kira,117 +kaga (everlasting killing stone) (azur lane),117 +kaerre,117 +julius will kresnik,117 +joshi kousei,117 +jorin,117 +jinjin,117 +jet ski,117 +jasony,117 +jaga note,117 +jack van burace,117 +jack the ripper (fate/apocrypha) (cosplay),117 +izuna (gouma reifuden izuna),117 +izayoi (blazblue),117 +isekai ojisan,117 +inne sulistya robin,117 +inamimi (sheep1733),117 +iincho (airi8751),117 +ichikawa saasha,117 +hys-d,117 +hugo andore,117 +hosoda naoto,117 +horn piercing,117 +honoji,117 +honey and clover,117 +holding rock,117 +hitmonchan,117 +himegi you,117 +hidezi,117 +heoningu,117 +helios (sailor moon),117 +hekicha,117 +hat kid,117 +haku le,117 +guardian-panda,117 +greenkohgen,117 +goku black,117 +gloss,117 +ginko (nico),117 +geoffroy's cat (kemono friends),117 +gema,117 +gatling santouhei,117 +ganbaruzoi,117 +gamers!,117 +fori,117 +fish costume,117 +female protagonist (pokemon go) (cosplay),117 +felicia (vampire) (cosplay),117 +eustass captain kid,117 +equipment layout,117 +elisabeth von wettin,117 +egg implantation,117 +dough,117 +dominica s. gentile,117 +diving regulator,117 +den (kur0 yuki),117 +dancer (ragnarok online),117 +cure coral,117 +cum swap,117 +cooperative footjob,117 +chiyo goya,117 +children's book,117 +chest tuft,117 +cglas,117 +cabbit,117 +brown male underwear,117 +broken egg,117 +bowling,117 +bowgun,117 +bobomaster,117 +bloody wolf (elden ring),117 +black mittens,117 +bernese mountain dog,117 +banboro (technobot),117 +arkatopia,117 +ardenlolo,117 +aranea highwind,117 +aozora middle school uniform,117 +anna (ikeuchi tanuma),117 +anchorage princess,117 +amulet heart,117 +american flag panties,117 +amei sumeru,117 +akihabara (tokyo),117 +aikawa tatsuki,117 +aikatsu on parade!,117 +ace rocket,117 ++++ (artist),117 +zygocactus,116 +zuki,116 +yuzuki tsuzuru,116 +yuusha ou gaogaigar final,116 +yellow collar,116 +yeero,116 +yaten kou,116 +yamato (battleship),116 +yagyuu juubei (hyakka ryouran),116 +white tiger (kemono friends),116 +wakame,116 +variable geo,116 +urokozuki,116 +urn,116 +uq holder!,116 +unaligned ears,116 +umbrella (skullgirls),116 +uekura eku,116 +ueda katsuhito,116 +tsuzura saki,116 +trailer,116 +toi (number8),116 +titi-chan (nezumi inu),116 +tirpitz (warship girls r),116 +tenpura (tenpura621),116 +teardrop-framed glasses,116 +taut shorts,116 +target practice,116 +tamanami (kancolle),116 +tamaki mari,116 +takeuchi takashi (style),116 +takase mizuki,116 +takapiko,116 +sylphine,116 +suzuragi karin,116 +suzune (senran kagura),116 +sunaba (nczd5875),116 +suisen toire (moko924),116 +sugisaki yuu,116 +succubus (ragnarok online),116 +strawberry milk,116 +stg44 (girls' frontline),116 +starting brighty (idolmaster),116 +spoken mars symbol,116 +sound wave,116 +slap mark on face,116 +single over-kneehigh,116 +silvia van hossen,116 +shokuyou koori,116 +shiika sadamasa,116 +senzaki ema,116 +selena (fire emblem fates),116 +sei zenra jogakuen,116 +sasamaru chimaki,116 +salt (seasoning),116 +sakuro,116 +sakurato tsuguhi,116 +sakebuotoko,116 +sakamata,116 +sadamatsu ryuuichi,116 +sabi (pokemon),116 +run elsie jewelria,116 +rudolph von stroheim,116 +reon (saikyou),116 +renekton,116 +recruiters (disney),116 +raven branwen,116 +rainbow print,116 +ragequit,116 +precis neumann,116 +praetor suit,116 +popuru,116 +pokkora,116 +pizza man,116 +piston ring,116 +pink innertube,116 +piiko (aa doushiyou),116 +persona q (series),116 +penlight,116 +pedicure,116 +panther tail,116 +palace,116 +otome youkai zakuro,116 +oshino meme,116 +orimura chifuyu,116 +oreshika,116 +orange male underwear,116 +ookanehira (touken ranbu),116 +omegamon,116 +nurami (shouji),116 +nori (norimakigumo),116 +noein,116 +nino (arakawa),116 +nimirom,116 +nightmare cat,116 +nidoran (female),116 +neptune symbol,116 +nectar (fujiya),116 +natsume (menthol),116 +natasha romanoff,116 +myuton,116 +mural,116 +mulberry (arknights),116 +muchigaku,116 +mouse boy,116 +moruga,116 +moriguchi nao (naonao),116 +moonbell,116 +mona (warioware),116 +mokumokuren (atariya kyoushitsu),116 +moko (mokochisa),116 +mizuamememe,116 +mimizuku (sky: children of the light),116 +mimizubare,116 +mille,116 +milihime taisen,116 +metindone,116 +memeno,116 +memekko,116 +marisayaka,116 +marco albiero,116 +mango,116 +mae (fire emblem),116 +m99 (girls' frontline),116 +lymle lemuri phi,116 +log-mi (tonpuu),116 +lium,116 +linjie,116 +leo (senran kagura),116 +leizi (arknights),116 +kuraishi tanpopo,116 +kudou (sikisiki0000),116 +kriemhild (fate),116 +koutarosu,116 +kounoike tsuyoshi,116 +kotori (blue archive),116 +kono naka ni hitori imouto ga iru!,116 +kodama (mmt uf),116 +koda1ra,116 +kobayashi ritz,116 +kitsune-tsuki (getter),116 +kirishima kano,116 +kingchenxi,116 +kedama (kedama akaza),116 +kazakura,116 +kanda aya,116 +kamitsurugi ouka,116 +kaminary,116 +kamikaze explorer!,116 +kaizoku sentai gokaiger,116 +juuban high school uniform,116 +itou (mogura),116 +irotsuya,116 +irotoridori no sekai,116 +iron princess ymir,116 +imachireki,116 +ichinose honami (youjitsu),116 +ichimaru gin,116 +hyogonosuke,116 +hyodo rena,116 +humiyou,116 +howhow notei,116 +hose nozzle,116 +hoopa,116 +holding golf club,116 +hino ryutaro,116 +heart pouch,116 +hazumi otoya,116 +hasegawa keita,116 +haruyonoto,116 +haneoka meimi,116 +hachinatsu,116 +guttary,116 +goriyaku,116 +golem (pokemon),116 +gokujou seitokai,116 +gogo (detteiu de),116 +ginkgo guild uniform,116 +gimnang,116 +gertrude (umineko),116 +gegera,116 +gambling,116 +fuyuki (neigedhiver),116 +futaba hotaru,116 +fusion swords,116 +furai,116 +freedom gundam,116 +frappuccino,116 +florence nightingale (chaldea lifesavers) (fate),116 +flesh,116 +fle0423,116 +feiton,116 +father's day,116 +evomanaphy,116 +eusine (pokemon),116 +drop trap,116 +dokshuri,116 +diablo 3,116 +dewott,116 +destiny gundam,116 +darkpulsegg,116 +custom (cus-tom),116 +cure cosmo,116 +crystalherb,116 +colis (regunm772),116 +chris re5,116 +choujuushin gravion,116 +chingisu,116 +cheong ha,116 +chahei,116 +ceobe (unfettered) (arknights),116 +carmelina (granblue fantasy),116 +boys anti tank rifle,116 +blood on dress,116 +bleed through,116 +black knight (granblue fantasy),116 +belt bag,116 +axl low,116 +arm out of sleeve,116 +aoinu,116 +angela (project moon),116 +andou ruruka,116 +amakano ~second season~,116 +akira (viper),116 +akikazu mizuno,116 +\(^o^)/,116 +159cm,116 +zerodiamonds (voice actor),115 +yuuto (yu-gi-oh!),115 +yukichin,115 +yomi (p&d),115 +yelan xing xuan,115 +yaoto,115 +yamaboshi private high school uniform,115 +xes (xes 5377),115 +winchester model 1897,115 +wenquangua,115 +walkway,115 +vesperia,115 +utatanecocoa,115 +umou (may65879),115 +ukita uuko,115 +tumblr,115 +train 90,115 +toy sword,115 +toward the terra,115 +torikissa!,115 +toraneko 2,115 +tobi0728,115 +teruki kuma,115 +tekuteku (yuuhodou),115 +team plasma grunt,115 +tapu lele,115 +tanaka ken'ichi,115 +tanaka ayumu,115 +tales weaver,115 +sudou noboru,115 +strawberry cake,115 +spiral power,115 +spear of cassius,115 +spade-m,115 +sneasler,115 +sleep talking,115 +sima (startwitch),115 +shiro9jira,115 +shiranui (azur lane),115 +shiomiya shiori,115 +shiki (yuureidoushi (yuurei6214)),115 +sherryqq,115 +sheep boy,115 +seorang,115 +satou satoru,115 +satin underwear,115 +saolin (wudangx),115 +sano sanoko,115 +sailor mars (cosplay),115 +sage (ragnarok online),115 +sade abyss,115 +ryuu ga gotoku 1,115 +riko (k riko),115 +renpc,115 +reki (user rcrd4534),115 +reio reio,115 +red-eyes macadamiachoco,115 +rasukusekai,115 +punkish gothic (idolmaster),115 +puffy dress,115 +portrait of exotic girls,115 +pig ggul,115 +pekoneko,115 +ozaneko,115 +orniflop,115 +ohihil,115 +ogino jun,115 +ogasawara rinko,115 +odachu,115 +noctowl,115 +niwaka potato,115 +nishigoori lutz,115 +nishigoori loop,115 +nishigoori axel,115 +necrophilia,115 +nayuhi (yukimuu14),115 +nanaran,115 +nabi (uz02),115 +my life as a teenage robot,115 +mutsuno hekisa,115 +mustache print,115 +mushisotisis,115 +musashino sekai,115 +murasaki saki,115 +mozuno (mozya 7),115 +mottsun (i 40y),115 +morino shoutarou,115 +mono (mono60),115 +momoiro,115 +mitama (fire emblem),115 +mirage (rairudiseu),115 +miketsukami soushi,115 +merrytail,115 +medb (swimsuit saber) (fate),115 +marumi,115 +mari (dream c club),115 +makarou,115 +makaimura,115 +maintenance musume (kancolle),115 +ma-2 (konkon kitakitsune),115 +llowoll,115 +leonardo 16sei,115 +lana (hyrule warriors),115 +kyoumachi seika,115 +kyokou suiri,115 +kuon (nokokopopo),115 +koukaku no pandora,115 +kokoro (doa),115 +kof: maximum impact,115 +klegsart,115 +kitano tomotoshi,115 +keqing (lantern rite) (genshin impact),115 +kayama benio,115 +kamekoya sato,115 +kako kai ni (kancolle),115 +kage yuu,115 +kagaminomachi no kaguya,115 +jonasan (bad-t),115 +ji guang-hong,115 +jecht,115 +jaffar (fire emblem),115 +jabberwock (monster girl encyclopedia),115 +j. league,115 +iwawa,115 +impossible spell card,115 +hosaka dx,115 +honami (yths4221),115 +holofive,115 +holding bomb,115 +hizuki miya,115 +high score girl,115 +hasebe akira,115 +hario 4,115 +happening18,115 +hand around waist,115 +hanazuka ryouji,115 +hamada kiyo,115 +hakuto (28syuku),115 +gum (vivid garden),115 +gochisousama (tanin050),115 +geso smith,115 +fujinohara akihira,115 +fizz (pixiv34498626),115 +fhang,115 +feng (skullgirls),115 +fate testarossa (cosplay),115 +ezo brown bear (kemono friends),115 +eymbee,115 +ereshkigal (third ascension) (fate),115 +emu (toran),115 +eiken,115 +drinking fountain,115 +dojo,115 +dog child (doitsuken),115 +dasdokter,115 +dark dream,115 +dandelion (girls' frontline),115 +curtain call challenge (meme),115 +crucifix,115 +cp9a,115 +cow girl (goblin slayer!),115 +cona kinaco,115 +colt single action army,115 +cogita (pokemon),115 +chloris garden,115 +cherrim (sunshine),115 +charles babbage (fate),115 +captain liliana,115 +blindfold slip,115 +black rock shooter (game),115 +bikkuriman (style),115 +bar soap,115 +baniran dorosu,115 +balflear,115 +bad apple!!,115 +ayanami (niconico) (azur lane),115 +asahi (uwa),115 +arqa,115 +arnold tsang,115 +angel chromosome xx,115 +aneros,115 +anchors (mono eye os),115 +amau (kosmos),115 +alios arvin,115 +akiyama yoshiko,115 +akesaka iku,115 +ajiro shinpei,115 +aihara shouta,115 +abi (abimel10),115 +aak,115 +9a-91,115 +7th dragon 2020-ii,115 +38 (sanjuuhachi),115 +zener card,114 +zb,114 +yuzuyu (hyui),114 +yuusha (maoyuu),114 +yuri shoutu,114 +yukinon (tsuki koubou),114 +yukiiti,114 +yoshiron,114 +yonoki,114 +yomawari (series),114 +ying yi,114 +yellowseeds,114 +yamiyono moruru,114 +yamadori ofuu,114 +yakou (4507770),114 +xude,114 +winona (pokemon),114 +warashi,114 +violin case,114 +viktoriya ivanovna serebryakov,114 +valkyrie drive -siren-,114 +uzuki tsukuyo,114 +uruha (yw1109),114 +uehara miyako,114 +uchida shou,114 +tsukimoto aoi,114 +tsuki wa higashi ni hi wa nishi ni,114 +transform (pokemon),114 +touhou sangetsusei,114 +touhou bougetsushou,114 +toranosuke,114 +tonegawa yukio,114 +tomo (ryo i so ),114 +tobisawa misaki,114 +thief (ragnarok online),114 +thea (nekojira),114 +terujirou (masateruteru),114 +terebi-chan,114 +tenjin kotone (channel),114 +tenjin kotone,114 +tamaki (princess connect!),114 +tamago (eva1314056),114 +takaoka yukari,114 +takanashi hiyori,114 +taiko sticks,114 +sylphiette (mushoku tensei),114 +swiss cheese,114 +swimmer (pokemon),114 +suzumetarou,114 +sugi 214,114 +standing on three legs,114 +spider-man (miles morales),114 +snowboarding,114 +skipping,114 +single bang,114 +sign language,114 +shutter,114 +shokatsuryou,114 +shiratama dango,114 +shiny heart (alice girls),114 +shinanoya (satanicad),114 +shimoda masaya,114 +shiina aoi,114 +shichimiya satone,114 +shared coat,114 +secretary-san (zannen onna-kanbu black general-san),114 +sd bigpie,114 +saya (sayaya),114 +sanger zonvolt,114 +saginomiya isumi,114 +rude (ff7),114 +ronye arabel,114 +rong yi tan,114 +roco (rocoroco1115),114 +riyo (riyontoko),114 +rivier (kuzuyu),114 +rinne berlinetta,114 +ria,114 +red flag,114 +red carnation,114 +re:stage!,114 +radiostarkiller,114 +qtian,114 +poniko (lielos),114 +polorinken,114 +pikurusu,114 +pikipek,114 +percival (fate),114 +pepperbox revolver,114 +peanut (shokan),114 +paundo2,114 +passenger pigeon (kemono friends),114 +paperfinger,114 +oono imo,114 +ogera okera,114 +ofuda on head,114 +odysseus (fate),114 +nue,114 +npa (sirokurokuma),114 +norimaki (haru koubou),114 +nora to oujo to noraneko heart,114 +nokachoco114,114 +nishimori yusa,114 +nishihara tetsuya,114 +nexas,114 +nagiha kuten,114 +more more jump! (project sekai),114 +monsuu (hoffman),114 +miyashita miki,114 +mitu yang,114 +misha (pita ten),114 +misashi (raichi821),114 +mini cu-chan (fate),114 +mina tepes,114 +meteora osterreich,114 +meicha,114 +mei-fang,114 +matsunaka hiro,114 +master 1 (housamo),114 +mario kaneda,114 +maki soutoki,114 +mago (gengennikoniko),114 +maemi (maemi12),114 +madara sai,114 +luzi,114 +london inu,114 +little cocon (umamusume),114 +lim jaejin,114 +lesser panda (kemono friends),114 +lelei la lalena,114 +lavender dress,114 +larxene,114 +kyabakurabakufu,114 +kuziaaizuk,114 +kurohanya (niliu chahui),114 +kuro (zhurunbo1997),114 +kunaboto,114 +kuhouin murasaki,114 +koukoku,114 +kohige,114 +kohaku muro,114 +kock k,114 +kirisita,114 +kinoshita neko,114 +kiiro kimi,114 +kevin.g.tuck,114 +kate (shadows house),114 +kaos art,114 +kakiha makoto,114 +kaatsukun,114 +juukishi cutie bullet,114 +juliet starling,114 +jinbe (one piece),114 +izumi noa,114 +itou shin'ichi,114 +inward v,114 +instrument on back,114 +ichika (ichika87),114 +ichijou ayaka,114 +iceberg,114 +houkisei,114 +hinanosuke,114 +hilamaru,114 +hiiro (kikokico),114 +hentai elf to majime orc,114 +heart tail duo,114 +hatori piyoko,114 +haseru (ginku mh),114 +haseneko,114 +hands on another's knees,114 +hamaru (s5625t),114 +haku (grimjin),114 +hachimitsu hinako,114 +habanero-tan,114 +gumilkx,114 +guest art,114 +granblue fantasy (style),114 +gourai,114 +god of war,114 +go-kart,114 +gneisenau (nightmarish succubus) (azur lane),114 +giga omega,114 +ganesha (fate),114 +gakkou de seishun!,114 +fura,114 +fujisawa yayoi (uchuu no stellvia),114 +finneon,114 +fabri,114 +etyaduke,114 +equation,114 +engage kiss,114 +ein (long cake),114 +duckman,114 +dots,114 +doge,114 +dog on head,114 +dilation belt,114 +dieci (nanoha),114 +daraz,114 +dance dance revolution,114 +crow (gravity daze),114 +cress (pokemon),114 +coo,114 +coaster,114 +clover (lapis lazure),114 +cink-knic,114 +cho!cho!,114 +chiyonekoko,114 +chinese white dolphin (kemono friends),114 +cave interior,114 +card between breasts,114 +cafeteria,114 +brown sclera,114 +bomi,114 +body bridge,114 +blood-c,114 +beyblade: burst,114 +batatata77,114 +banjo (banjo-kazooie),114 +b (kana0816),114 +auko,114 +atsuko (blue archive),114 +aroa (aroamoyasi),114 +arikindows10,114 +arata (xin),114 +aqua cardigan,114 +applying sunscreen,114 +aochoku,114 +anosillus ii,114 +animal on back,114 +angelfish,114 +anapom,114 +amaya haruko,114 +amajiki tamaki,114 +akasia,114 +akagi kai ni (kancolle),114 +akademeia uniform,114 +airi (ogami kazuki),114 +against mirror,114 +aegis (takunomi),114 +a maru,114 +a-rise,114 +zhongye yu,113 +zentreya (vtuber),113 +yuzuyunagi,113 +yuutama2804,113 +yuria (kittyluv),113 +yuri lowell (reliable senior),113 +yukimura chizuru,113 +yamiiv,113 +yamako (state of children),113 +xiaobanbei milk,113 +wasabi shoujo,113 +vaporwave,113 +vajra (object),113 +utatane hiroyuki,113 +unexistarts,113 +unadare,113 +toujou aya,113 +toraneko,113 +too much food,113 +too many scoops,113 +togusa masamu,113 +toasty scones,113 +theresa apocalypse (luna kindred),113 +the monkey,113 +the atko,113 +taser,113 +taru neko,113 +tamaru tokihiko,113 +take tonbo,113 +takao (d-frag!),113 +taimadou gakuen 35 shiken shoutai,113 +syringe gun,113 +suzuno naru,113 +suzuki moeko,113 +sunset nostalgie (idolmaster),113 +subaru (brand),113 +stuffed mouse,113 +studded,113 +srpzk,113 +sniper (tf2),113 +small penis humiliation,113 +sitting on hand,113 +sierra mikain,113 +sho (shoichi-kokubun),113 +shinryou rei,113 +shining blade,113 +shimesaba kohada,113 +shibainu,113 +shangzi,113 +sero hanta,113 +satsuki misuzu,113 +sakurasaka,113 +sakaki yumiko,113 +saionji mary,113 +ryuuno6,113 +ryuujou kai ni (kancolle),113 +roubai academy school uniform (old),113 +rino cnc,113 +ring hair ornament,113 +riju,113 +resident evil 3 (remake),113 +reki (haibane),113 +regdic,113 +rana,113 +polpol,113 +polilla,113 +poco24,113 +piyomon,113 +pisoshi,113 +piro,113 +piloting,113 +phantasmagoria of dim.dream,113 +pedestrian bridge,113 +osaragi mitama,113 +osakabe-hime (swimsuit archer) (second ascension) (fate),113 +omoide no marnie,113 +ogata rizu,113 +odd taxi,113 +oak leaf,113 +novice (ragnarok online),113 +northern water princess,113 +northampton (kancolle),113 +natsuki shuri,113 +naruse shiroha,113 +naoki (2rzmcaizerails6),113 +nao (naaa 195),113 +namisaki yuka,113 +myaku-myaku,113 +mutou mame,113 +murabito,113 +moyachii,113 +moreshan,113 +moorish idol,113 +momo no suidou-sui,113 +momo (kancolle),113 +momo (breath of fire),113 +miyamaru,113 +mirufuaa,113 +mintoaisu,113 +minami kenjirou,113 +meron,113 +meihemu,113 +medusa (rider) (third ascension) (fate),113 +medic (tf2),113 +matsuo yuusuke,113 +maskwolf,113 +mashimaro tabetai,113 +masaki itsuki,113 +maryquant,113 +marumai,113 +martinreaction,113 +marsen,113 +marmaladica,113 +mario tennis,113 +manaka (pdx),113 +mahou shoujo lyrical nanoha reflection,113 +madhatter hello,113 +lone nape hair,113 +lipstick mark on crotch,113 +kuwahara sayako,113 +kurodeko,113 +kudou makoto,113 +ktokei (kurokku-tokei),113 +koujima shikasa,113 +kita (7kita),113 +kibutsuji muzan,113 +khezu,113 +kexue,113 +kazuoki,113 +kasumi seiki,113 +kaneko hiraku,113 +kamishiro rize,113 +kagaya kuki,113 +kaga kai ni (kancolle),113 +jyon,113 +joypyonn,113 +jin (sirius-j),113 +jh,113 +jesse (pixiv34586727),113 +irizaki mei,113 +inushima,113 +ini (mocomocccos),113 +implied tail plug,113 +ibukichi,113 +howa type 64,113 +hongryeon (last origin),113 +honda tohru,113 +homing (areya),113 +hiro1984,113 +hirabaru kenji,113 +hino (2nd life),113 +hidarikiki,113 +hi-nu gundam,113 +hero's son (dq5),113 +here (hr rz ggg),113 +hawkeye (marvel),113 +hato no suisou,113 +happy (fairy tail),113 +hagino chiaki,113 +grimgrimoire,113 +greave (asterism),113 +gold belt,113 +gloom (pokemon),113 +gentle sasaki,113 +gen (genetrix),113 +garyou,113 +gankutsuou,113 +futaba aoi (naomi),113 +full metal jacket,113 +fukunaga yukito,113 +fue (tsuzuku),113 +food writing,113 +flare,113 +fish tattoo,113 +fiona belli,113 +feather collar,113 +facominn,113 +enoki p,113 +enma-chan,113 +elisabeth blanctorche,113 +elf all-stars datsuijan,113 +eichan (eichanidfi),113 +ei (marutendon),113 +ecclesia (yu-gi-oh!),113 +dvalin (genshin impact),113 +dream catcher,113 +domination,113 +dog (duck hunt),113 +diantha (granblue fantasy),113 +diamond cutout,113 +delusion (genshin impact),113 +dead-robot,113 +dateya torahachi,113 +dateko,113 +danemaru,113 +cure ange,113 +colossus,113 +cicada block (meme),113 +chitose yuma,113 +chikiso,113 +chamchami,113 +chahashiraozen,113 +cecile croomy,113 +cat slippers,113 +carcano m91/38 (girls' frontline),113 +cafe sourire,113 +brush stroke,113 +broken umbrella,113 +bolter,113 +boku no kanojo sensei,113 +bois de justice,113 +bayashiko,113 +baltimore (muse) (azur lane),113 +bachi,113 +b.c.n.y.,113 +ayahi 4,113 +auruo bossard,113 +athena (granblue fantasy),113 +asbestos (arknights),113 +article,113 +ariados,113 +archerko (himura kiseki),113 +aora,113 +animal pillow,113 +angelise ikaruga misurugi,113 +amg (nwmnmllf),113 +amakusa tobari,113 +alolan ninetales,113 +akatsuki blitzkampf,113 +airsoft,113 +ai-chan (honkai impact),113 +aer (tengqiu),113 +ace combat zero,113 +1=2,113 +yuuryuu nagare,112 +yurishiro ginko,112 +yumi (careca398),112 +yubel,112 +youhe hino,112 +yamada masaki,112 +wargreymon,112 +wakie,112 +vient,112 +vice (alchemy stars),112 +usuta sumire,112 +under umbrella,112 +uki violeta,112 +twinbox school,112 +tsukimi (ryouiki ridatsu),112 +tsubasa (kureha),112 +tonmoh,112 +tomatika,112 +thorfinn,112 +theresa apocalypse (celestial hymn),112 +tetrapod,112 +tetopetesone,112 +tanyatonya,112 +takepon,112 +takatsukasa yue,112 +takatisakana,112 +tail fondling,112 +sylvanas windrunner,112 +sybilla,112 +swim swim,112 +sv001 (metal slug),112 +sunday aki,112 +souno kazuki,112 +sochie heim,112 +shuvi (shuvi1125),112 +shirubaburu,112 +shiratama kitsune,112 +shibano kaito,112 +seteth (fire emblem),112 +servine,112 +sensory deprivation,112 +sekitaku,112 +secretbusiness,112 +script,112 +sawarakajin,112 +saturn symbol,112 +sakayaki (burning soul),112 +saitou kaede (yama no susume),112 +rozea (graphmelt),112 +rooru kyaabetsu,112 +ron weasley,112 +renatus.z,112 +remotaro,112 +reiko holinger,112 +rectal prolapse,112 +re:lief ~shin'ai naru anata e~,112 +ran straherz,112 +raiden mei (striker fulminata),112 +raiden mei (lightning empress),112 +princess (sekaiju),112 +primogem,112 +porom,112 +pokopi,112 +pocky in mouth,112 +plaid coat,112 +pk (girls' frontline),112 +pink nightgown,112 +picoli1313,112 +petra leyte,112 +petra johanna lagerkvist,112 +pendulum clock,112 +peach panther (kemono friends),112 +passionlip (third ascension) (fate),112 +osamu,112 +ootani nonno,112 +onishima homare,112 +okujou no yurirei-san,112 +oboe,112 +nyorotono,112 +not on shana,112 +nobuchi,112 +nekoha shizuku,112 +neckerchief removed,112 +nanpou (nanpou0021),112 +nana (nanalog76),112 +namuko,112 +namikawa kuroha,112 +nameless bard (genshin impact),112 +namakawa,112 +nail clippers,112 +nagomian,112 +nagidango,112 +nadja applefield,112 +mysterious heroine x alter (third ascension) (fate),112 +motoba kirie,112 +moth1,112 +monpuchi,112 +momoiro taisen pairon,112 +model ship,112 +mito (calcomer),112 +minazuki kyouko,112 +mikicat,112 +meji aniki,112 +medical scrubs,112 +mantyke,112 +makoto (minami-ke),112 +maki natsuo,112 +majorina,112 +magako,112 +m0ti,112 +luminous witches,112 +lucy steel,112 +loli bushi,112 +loincloth lift,112 +licking navel,112 +liar lawyer,112 +leng wa guo,112 +ledo (suisei no gargantia),112 +le triomphant (azur lane),112 +lazy eye,112 +layered legwear,112 +laguna loire,112 +lace-up thighhighs,112 +kyousaku,112 +kyapinetzu,112 +kusakabe mei,112 +kurumi noah,112 +kurono mitsuki,112 +krazehkai,112 +kouguchi moto,112 +kotone11152,112 +kotobuki utage,112 +kokoyashi,112 +kokoro (vocaloid),112 +kiyuu,112 +kitamura (bamboo),112 +keroro7,112 +keiki8296,112 +kava,112 +katsumi-kun,112 +kashi kosugi,112 +karo-chan,112 +kani nayuta,112 +kami tora,112 +kakura kurumi,112 +kakuna,112 +kairos+,112 +joka (night gate),112 +jenson tw,112 +japanese black bear (kemono friends),112 +idora (idola),112 +ibuki (abuki),112 +hyulla,112 +huge clitoris,112 +horoyuki (gumizoku),112 +horikwawataru,112 +horiizumi inko,112 +honshou aru,112 +hitsujibane shinobu,112 +hijirikawa masato,112 +high school dxd cross,112 +head on knees,112 +harukaruha,112 +harpie lady,112 +hamburglar,112 +hachiko of castling,112 +graysheartart,112 +gorillaz,112 +goma (11zihisin),112 +ghostly field club,112 +gaze on me! outfit (umamusume),112 +gamryous,112 +gad guard,112 +fuooooo,112 +frey (rune factory),112 +frederica sawyer,112 +foul detective satori,112 +fl studio,112 +favonius sword (genshin impact),112 +fanshu,112 +fan yang (jiuge),112 +eyewear around neck,112 +empty,112 +emilio (tetsukazu no ao),112 +elza straherz,112 +elucidator,112 +elma (xenoblade x),112 +elekid,112 +dragon arm,112 +donkey ears,112 +dodrio,112 +doctor who,112 +dm owr,112 +detective pikachu (movie),112 +dennryuurai,112 +dcwj,112 +dave cheung,112 +dark sun gwyndolin,112 +cul,112 +cucco,112 +crownslayer (arknights),112 +comiket 97,112 +colonel olcott (fate),112 +coffee-milk-moumou,112 +codpiece,112 +clockwork rabbit,112 +cloak removed,112 +cizzi,112 +chou (meteorite3),112 +chichi band,112 +cheese (cheese koubou),112 +censored violence,112 +casshern,112 +canyne,112 +canvas 2,112 +caisena,112 +buzzwole,112 +brushing,112 +brown overalls,112 +bobokuboboku,112 +blaze (artist),112 +biting hand,112 +bicute bunnies miku,112 +barbara gordon,112 +bamboo screen,112 +asuka momoko,112 +ashiyu (ashu-ashu),112 +aoi shiro,112 +annelotte (princess knight),112 +anko (kirikiri),112 +ambriel (arknights),112 +amamiya manami,112 +alolan marowak,112 +akiyama hayato,112 +7-tan,112 +zurikishi,111 +zeroillya,111 +zed (league of legends),111 +zack (haiuinndo),111 +yuuya (blue scarab),111 +yuriyuri (ccc),111 +yuksi,111 +you06,111 +yottan,111 +yashemao qishi,111 +yamada yui,111 +yamada ichizoku,111 +waltz (tram),111 +wakaba hinata,111 +venipede,111 +utakata,111 +uchiha madara,111 +tsukimori hiro,111 +ts422,111 +triangle (instrument),111 +trail,111 +toucannon,111 +tooru (jux),111 +toku sonchou,111 +toki (toki ship8),111 +tibino,111 +terai (teraimorimori),111 +tenmiyakiyo,111 +tenko kuugen,111 +tazuma (u283),111 +tasting plate,111 +tanaka masayoshi,111 +tama satou,111 +takatsuki kahiro,111 +takanashi hikari,111 +taillow,111 +tail flower,111 +tadokoro teppei,111 +tabris-xx,111 +super mario sunshine,111 +suou kuyou,111 +stalagmite,111 +spread anus under clothes,111 +sousou,111 +someyaya,111 +slm,111 +sina (pokemon),111 +siesta (tantei wa mou shindeiru),111 +shirota69,111 +shiroaisa,111 +shiimai,111 +shigure (fire emblem),111 +shiba yuuki,111 +seo kouji,111 +semikichi,111 +sella (fate),111 +scorpion5050,111 +satou (kuso-neet),111 +sasagawa (haikaiki),111 +saotome kazuko,111 +sakurai aoi,111 +sakomizu haruka,111 +saine,111 +sabotaged condom,111 +rsk (tbhono),111 +rosmino,111 +roro (gunvolt),111 +riyo servant (bunnygirl) (fate),111 +rina (kemurikusa),111 +recolored,111 +re leaf,111 +ramochi (auti),111 +pupi (rain prophet),111 +ppsh-41 (girls' frontline),111 +power symbol-shaped pupils,111 +poho,111 +playground equipment (kemono friends pavilion),111 +planted arrow,111 +phenne,111 +phat smash,111 +period (anony 83),111 +pellas (panix2383),111 +pekora (jashin-chan dropkick),111 +overwatch (logo),111 +orange sekaii,111 +ohse,111 +ogawa maiko,111 +nyaou,111 +nyantiu,111 +nosepass,111 +non-repeating animation,111 +nocchi (perfume),111 +no (xpxz7347),111 +niwabuki,111 +nine-colored deer,111 +neon genesis evangelion gakuen datenroku,111 +naoki (endofcentury102),111 +nanakusa,111 +n kamui,111 +morizo cs,111 +mole on leg,111 +mizukikushou,111 +mizuki yuuma,111 +miyashiro sousuke,111 +mitsukazu (nijigen complex),111 +mint (dewprism),111 +minakami rinka,111 +mimori suzuko,111 +mimamui,111 +mikagura gakuen kumikyoku,111 +mika melatika,111 +menthako,111 +menotama,111 +medusa (rider) (fate) (cosplay),111 +matanukinuki,111 +masuda (yousaytwosin),111 +mars (planet),111 +manicure,111 +maitora,111 +magaeshi,111 +lkeris,111 +lil-la (yu-gi-oh!),111 +liita (dusk snow),111 +lee hoon,111 +kuri (animejpholic),111 +kurappii,111 +kumakou,111 +kouzuki (reshika213),111 +konogi nogi,111 +konno mitsune,111 +komiya kuniharu,111 +kokkoro (real) (princess connect!),111 +kokiri,111 +klaus von reinhertz,111 +kinoshita shizuka,111 +kinniku buster,111 +kiichi hougen (fate),111 +kettenkrad,111 +ken-ji,111 +kazuha nanako,111 +katou kaiou,111 +katamari damacy,111 +kashiyuka,111 +kaolla su,111 +kaniko (tsukumo sana),111 +kamen rider black rx,111 +kajimakoto,111 +kageharu,111 +kagarimachi konatsu,111 +kaga (kancolle) (cosplay),111 +juzumaru tsunetsugu,111 +jenny wakeman,111 +japanese white-eye,111 +izayoi no hanayome,111 +idia shroud,111 +hybridmink,111 +hooded pajamas,111 +honey whip (module),111 +holding anchor,111 +hisako (6anmbblfnjueeff),111 +hirooka masaki,111 +hiita the fire charmer,111 +hicha nomu,111 +herb bundle,111 +hazard stripes,111 +harui (hr x9 ),111 +hare (blue archive),111 +hands on own leg,111 +guillotine,111 +grimmsnarl,111 +grey pupils,111 +gradient bikini,111 +giant penguin (kemono friends),111 +galuf halm baldesion,111 +funada kiito,111 +fua yuu,111 +frosmoth,111 +fox print,111 +florian (pokemon),111 +false limb,111 +expo2025,111 +etomai,111 +enumiyaa,111 +dragon yukano,111 +dipper pines,111 +dick grayson,111 +diancie,111 +danna (tsukimisou),111 +daisy (dq),111 +daifuku,111 +cupless babydoll,111 +cu chulainn (second ascension) (fate),111 +creamy mami,111 +cone,111 +coffee grinder,111 +climbing ladder,111 +clara dolls (madoka magica),111 +cl (summer sama),111 +choko egg,111 +choi mochimazzui,111 +chabudai (table),111 +cerberus (last origin),111 +celestia (my little pony),111 +carly nagisa,111 +blue santa costume,111 +black cat (marvel),111 +black armband,111 +beast boy (dc),111 +bd ayknn,111 +bankai,111 +bakko,111 +bai qi-qsr,111 +azusa mifuyu,111 +asahina mikuru (adult),111 +arai hiroki,111 +apple tree,111 +aoi thomas,111 +aoi kyosuke,111 +anna (drw01),111 +andou ringo,111 +ancolatte (onikuanco),111 +an-94 (silent rouge) (girls' frontline),111 +amifumi inko,111 +amano kusatsu,111 +akuta hinako,111 +aizen (syoshiyuki),111 +add (fate),111 +act (xadachit),111 +abomasnow,111 +a deer of nine colors,111 +666,111 +zodiac (sekaiju),110 +yykuaixian,110 +yuuki terumi,110 +yunkaasu (kakushiaji),110 +yu (bosshi),110 +yokaze (yokajie),110 +yanagi kiyora,110 +yamaguchi ugou,110 +yamada ranga,110 +yae sakura (darkbolt jonin),110 +yae kasumi,110 +wo-class aircraft carrier (cosplay),110 +western parotia (kemono friends),110 +weight gain,110 +warhammer fantasy,110 +wanpaku pixels,110 +wakazato haruna,110 +venonat,110 +ushas,110 +urobuchi gen,110 +unkempt,110 +umxzo,110 +uewtsol,110 +tsukiji,110 +tpi ri,110 +torajirou (toraneko zirou),110 +toon zelda,110 +tianel ent,110 +they're not panties,110 +theresa apocalypse (sakura rondo),110 +taroumaru (genshin impact),110 +tanaka ginji,110 +tadokoro megumi,110 +suzu (torikissa!),110 +suv,110 +super robot wars z2,110 +sumii,110 +subdermal port,110 +studio sunadokei,110 +striped towel,110 +string tie,110 +stiyl magnus,110 +starless,110 +spiked shoes,110 +spas-12 (midsummer fruit) (girls' frontline),110 +sonic boom (game),110 +somejima,110 +sig sauer mpx,110 +shoji sakura,110 +shionty,110 +shinnasuka025,110 +sengoku kamuri,110 +seki suzume,110 +sega saturn,110 +see-through bra,110 +seaside,110 +satou sasara,110 +sasuke (sasuke no sato),110 +sakyumama (kedama milk),110 +sakuragi rian,110 +sailing ship,110 +saeki sayaka,110 +ryudraw,110 +ruan chen yue,110 +rokushaku neko,110 +rikose,110 +rey (star wars),110 +retweet (meme),110 +resident evil revelations,110 +rena (renasight),110 +ren (endscape20),110 +red (sygna suit) (pokemon),110 +rean (r ean),110 +raised fists,110 +rahato,110 +purple haze (stand),110 +pulp piroshi,110 +pov dating,110 +ponnetsu,110 +pokemon cafe mix,110 +playboy bunny swimsuit,110 +pkpkpppk,110 +picube525528,110 +paprika private academy school uniform,110 +p.i.t.d,110 +otona no moeoh,110 +otoca d'or,110 +ojay tkym,110 +octoshot (splatoon),110 +nyx,110 +nurugamer-kouyouju,110 +nise (basilsis),110 +ninomae ina'nis (artist),110 +ning hai (warship girls r),110 +niji (nijioki),110 +nekonyan (inaba31415),110 +nekomiya nono,110 +natsuki coco,110 +nanase kurumi (menhera-chan),110 +nametake,110 +montage,110 +mont blanc (heartcatch ayaya),110 +mona (genshin impact) (cosplay),110 +momoya show-neko,110 +momimaki,110 +mo (pixiv9929995),110 +miru,110 +miriel (fire emblem),110 +minamoto no raikou (swimsuit lancer) (first ascension) (fate),110 +mikasayaki,110 +midarezaki kyouka,110 +messikid,110 +megane chuu,110 +matador,110 +martini,110 +marron,110 +magus tale,110 +mafumafu,110 +macciatto (aciel02),110 +lo xueming,110 +liumang tu shua p zhan,110 +lino chang,110 +lickitung,110 +licking cum,110 +lene (fire emblem),110 +lalala222,110 +labret piercing,110 +kuruton486,110 +kurosawa kiyotaka,110 +kurabayashi aya,110 +kozu (bloomme1 me),110 +kotarou (rakugaki shirushi),110 +kotaro-nosuke,110 +koiiro soramoyou,110 +koi drake,110 +kinukawa chinatsu,110 +kinu (azur lane),110 +king (nadia),110 +kikuchi michitaka,110 +kekekeke,110 +kashi,110 +karuizawa kei,110 +karakuri musha,110 +kanna kanaki,110 +kamo (yokaze),110 +kainkout,110 +kaifuku jutsushi no yarinaoshi ~sokushi mahou to skill copy no chouetsu heal~,110 +kagto (alterna),110 +kaauchi,110 +johnathan mar,110 +john (a2556349),110 +jewelry bonney,110 +jet (pw3234),110 +itou noemi,110 +itano circus,110 +ishida arata,110 +inika,110 +idol time pripara,110 +iceland (hetalia),110 +hoshina meito,110 +hop3,110 +hood basket,110 +holding shell,110 +hiruma andon,110 +hinomoto reiko,110 +heybot!,110 +henriette mystere,110 +hechi (hechi322),110 +hebameki,110 +hawlucha,110 +harmony's clownfish (splatoon),110 +hanayagi kaoruko,110 +godzilla (legendary),110 +glowworm (warship girls r),110 +galarian rapidash,110 +gabumon,110 +fur bracelet,110 +fumizuki misoka,110 +fumiko (miruyuana),110 +fujimoto akio,110 +flight goggles,110 +fingerprint,110 +fediel (granblue fantasy),110 +fct,110 +eye (okame nin),110 +evolution championship series,110 +estinien varlineau,110 +eriko (summer) (princess connect!),110 +enperuto (yarumi),110 +ellipsis (mitei),110 +ekm,110 +eiji (monochromexd),110 +ebisu (dorohedoro),110 +dyresbroom,110 +donkey,110 +domestic na kanojo,110 +digimon savers,110 +devil (housamo),110 +dangan neko,110 +crystal shoujo,110 +cross akiha,110 +croconaw,110 +claus (mother 3),110 +clare (543),110 +chungmechanic,110 +chiri (o9o9),110 +chili (pokemon),110 +chao lingshen,110 +captain syrup,110 +bruno (pokemon),110 +blue moon,110 +blue corset,110 +blitzen,110 +black bridal gauntlets,110 +billy kane,110 +belt chain,110 +azure striker gunvolt 2,110 +ayukko (forest village),110 +attyon,110 +atawatsho,110 +atago kinue,110 +ashina merou,110 +artnip,110 +arachnid,110 +aotsu karin,110 +aono (aonocrotowa),110 +aoihitsuji,110 +aoi yusuke,110 +aoba kozue,110 +anti (ssss.gridman),110 +anny (yoai),110 +angel blade,110 +amakura (am as),110 +aluce,110 +alternate design,110 +akatsuki no yona,110 +akatsuki ikki,110 +akane yuki,110 +akagi rio,110 +aka6,110 +ak-74m,110 +achakura,110 +acesrulez,110 +ace combat 7,110 +absol (dkqthf),110 +a (aaaaaaaaaaw),110 +a.t. zero,110 +723/nanahumi,110 +300,110 +147,110 +00 qan[t],110 +zhi zhi/zu zu,109 +zelsius,109 +zamazenta,109 +yuuki mix,109 +yoshioka mitsuko,109 +yellow quokka,109 +yatani row,109 +yat sen (azur lane),109 +yamiko,109 +yamayoshi tanosuke,109 +yamano (yamanoh),109 +wumumu,109 +wounds404,109 +wiping nose,109 +wei miao,109 +wato (ko),109 +water tank,109 +watanabe no tsuna (fate),109 +vulcan (arknights),109 +vault suit,109 +valve,109 +utsugi (skydream),109 +unagi sango,109 +uhouhogorigori,109 +ueda torini,109 +tuka luna marceau,109 +tsukuyomi ai,109 +tower of god,109 +toriningen,109 +tonton (tonz159),109 +tomifumi,109 +toma (shinozaki),109 +toketou,109 +toe sucking,109 +toaru majutsu no index: endymion no kiseki,109 +tks (526tks),109 +tita russell,109 +thief,109 +teeta j,109 +te toga,109 +tatara kogasa (umbrella),109 +tanupo,109 +tailblue,109 +suzushi moruto,109 +suzume (princess connect!),109 +suta (clusta),109 +super mario world,109 +suishin tenra,109 +stroller,109 +single elbow pad,109 +siegfried (granblue fantasy),109 +siberian chipmunk (kemono friends),109 +shouhou (azur lane),109 +shoohee,109 +shocking party,109 +shiroyukimajima,109 +shinsono shiroko,109 +shingetsu takehito,109 +shin kouchuu ouja mushiking,109 +shimetta seiya,109 +shijuuhatte,109 +sekimo,109 +saint-louis (holy knight's resplendence) (azur lane),109 +runaru,109 +rope around neck,109 +roman torchwick,109 +ritsuki,109 +rin (fuwarin),109 +repulse (azur lane),109 +reptile,109 +reno (reno bunnino) (azur lane),109 +redpoke,109 +qlakwnd,109 +puyopuyo 7,109 +pump,109 +puffy lips,109 +puff (go! princess precure),109 +psd available,109 +porary,109 +pocket square,109 +play button,109 +pink bandana,109 +ping myu ring (tandohark),109 +phantasy star portable 2 infinity,109 +pepper fever,109 +paseri (cookie),109 +parent and child,109 +pansear,109 +panpour,109 +orihime,109 +ophelia (madoka magica),109 +onigiri (ocelot),109 +onboro (clunker),109 +onao,109 +okapi (kemono friends),109 +nougami neuro,109 +noshiro kai ni (kancolle),109 +norue,109 +normin (tales),109 +nonaka yuki,109 +nogi (acclima),109 +no neckwear,109 +nina (breath of fire i),109 +neshia (tsaqif0309),109 +nekoneko,109 +nashidrop,109 +naoyama masaru,109 +naoto (yandereheaven),109 +nanamura,109 +namamake,109 +nakachiruno,109 +nagare ryoma,109 +n'doul,109 +myowa,109 +mygrimoire,109 +monaka ooji,109 +mmrailgun,109 +mk (masatusaboten),109 +mitsuki mitsuno,109 +mirea,109 +melailai,109 +medb (alluring chief warden look) (fate),109 +marshadow,109 +maron (kagamikunn),109 +marin (umi monogatari),109 +marie antoinette (swimsuit caster) (third ascension) (fate),109 +mamiina,109 +maid leotard,109 +mago,109 +magical mirai miku (2020 summer),109 +m k,109 +luke skywalker,109 +living weapon,109 +little match girl,109 +lacrosse,109 +lace-trimmed apron,109 +lab zero games,109 +kyudoli,109 +kutsuno,109 +kuso zako choroin nishiga hachi,109 +kusano houki,109 +kuroneko liger,109 +kureha yuna,109 +kurano kun chi no futago jijou,109 +kouhei (sxmas),109 +kosaka chihiro,109 +kobayashi chizuru,109 +kikai sentai zenkaiger,109 +keikesu,109 +kawasaki,109 +kasai amane,109 +karakuri pierrot (vocaloid),109 +kamitsuki shion,109 +kaieda hiroshi,109 +kaburagi tomoe,109 +jurakin,109 +jiro (ninetysix),109 +j young,109 +izumi (nagashi),109 +ishiyuki,109 +iris (takunomi),109 +iris (konosuba),109 +inui nakiru,109 +incoming call,109 +in cage,109 +i.t.o daynamics,109 +i-504 (kancolle),109 +hua jianghu zhi bei mo ting,109 +hototogisu (hot to gis),109 +hoshina satoya,109 +hiyori (princess connect!),109 +hinomaru (kotoba),109 +hinazuka ryou,109 +himegoto,109 +hikabe sakuho,109 +higokumaru,109 +hayabusa (spacecraft),109 +harumi kajika,109 +hands on eyewear,109 +halftone texture,109 +gufu (guffuumu),109 +grass skirt,109 +golurk,109 +glue,109 +giant pangolin (kemono friends),109 +g3 (girls' frontline),109 +funnyfunny,109 +fuji aoi,109 +fu yukari,109 +forehead flick,109 +flasso,109 +fl-chan,109 +farmer,109 +f-cla,109 +ethan (arknights),109 +esu (tasoesu),109 +elizabeth thompson,109 +dynamite pussy cat,109 +dragon ball minus,109 +dr. mario (game),109 +dofresh,109 +dmsco1803,109 +dhfz181,109 +cure sparkle,109 +cum on gloves,109 +crusaders quest,109 +combat medic ziegler,109 +colophon,109 +coldcat.,109 +clarine (fire emblem),109 +cinque izumi,109 +childhood friend (ominaeshi),109 +charlotte (seiken densetsu 3),109 +chaika bogdan,109 +cavall the 2nd,109 +camisole pull,109 +burning love (phrase),109 +bun (bbb81bun),109 +bud (korovsme),109 +blazblue: calamity trigger,109 +black rope,109 +beaten,109 +badminton,109 +a~chan,109 +attouteki yuugi mugen souls,109 +atou rie,109 +atelier lulua,109 +asakura kazumi,109 +arisawa tatsuki,109 +apostle,109 +aoandon,109 +anvil,109 +anal hook,109 +amiya (guard) (arknights),109 +alternate body size,109 +akira (orenchi no maidosan),109 +akihazama,109 +aimori meguru,109 +aaeru,109 +zuko,108 +zinfyu,108 +zbura,108 +zambiie,108 +yuugo (yu-gi-oh!),108 +yutohiroya,108 +yoshizawa hikoto,108 +yoshikita popuri,108 +yonah,108 +yatsu murasaki,108 +yasaka shuu,108 +yaoyao (genshin impact),108 +wii u,108 +weisuoxin,108 +wani (perfect han),108 +wallace pires,108 +uchi no maid ga uzasugiru!,108 +tsukimi 50,108 +toichi,108 +tinker bell (disney),108 +tiese schtrinen,108 +team spica's trainer,108 +tangrowth,108 +tango (tn500),108 +tamezou,108 +takayama kisai,108 +super sailor saturn,108 +super famicom gamepad,108 +sunsun2812,108 +sun wukong (cosplay),108 +substance20,108 +spinda,108 +spinarak,108 +spartacus (fate),108 +soulcalibur iii,108 +sola (solo0730),108 +sket dance,108 +sizma,108 +shirouzu mairu,108 +shiitake (love live! sunshine!!),108 +shangri-la (azur lane),108 +settyaro,108 +setona (daice),108 +sentape,108 +senoo aiko,108 +schierke (berserk),108 +scavenger (arknights),108 +sayama yoshiki,108 +sakura (doors),108 +saimon tamaki,108 +sabamori,108 +ryu shou,108 +rosia (show by rock!!),108 +roasting,108 +riki6,108 +reichsadler,108 +rankasei,108 +queen aldra,108 +papico (ice cream),108 +panties around one finger,108 +pamphlet,108 +paladin,108 +oz ma33,108 +ougi (ihayasaka),108 +otsukare,108 +osmanthus blade,108 +oscar francois de jarjayes,108 +orianna (league of legends),108 +ocza,108 +nazo (mystery),108 +natsunagi takaki,108 +natsumi (ragtime),108 +nanakura nanane,108 +nail biting,108 +nail (hollow knight),108 +muska,108 +muse dash,108 +murrue ramius,108 +muramasa mikado,108 +munseonghwa,108 +moyan,108 +monster hunter 3 g,108 +miyawaki sana,108 +misaki yuu,108 +miri (cherryjelly),108 +minase (mmakina),108 +mimuni362,108 +mileina vashti,108 +mikami (mkm0v0),108 +meteorite (arknights),108 +meimu (infinity),108 +mathiamo13,108 +marui,108 +marufuji ryou,108 +march hare (alice in wonderland),108 +makihara arina,108 +mahou shoujo taisen,108 +magician (china),108 +lime (saber j),108 +lily bloomerchen,108 +lafter frankland,108 +lacrosse stick,108 +kuro yanagi,108 +kumoi takashi,108 +kuma yuu,108 +krs (karasu),108 +krieg (skullgirls),108 +kratos aurion,108 +kozy,108 +kon (bleach),108 +kokorono arika,108 +kitahara mio,108 +kirarin369,108 +ki-sikil (yu-gi-oh!),108 +kerorira,108 +kazumasa,108 +katharine ohare,108 +kashiwagi azusa,108 +karahai (31448823),108 +kanmuri (hanyifan30338),108 +kanekan,108 +kamuo,108 +kamitani george,108 +kakimoto nao,108 +kajino (aosansai),108 +kaine (nier) (cosplay),108 +kaiko,108 +k kanehira,108 +juubaori mashumaro,108 +junkyard,108 +jong tu,108 +jarvan iv (league of legends),108 +ip police tsuduki chan,108 +inhye,108 +immersed,108 +hotori (sion),108 +hirako,108 +hiraizumi (mugenkidousha),108 +hippogriff,108 +hinabita,108 +hina misora,108 +higashiyama kazuko,108 +headband removed,108 +he wants to order (meme),108 +hasune,108 +hao (haozz),108 +hammann (rebellious summer) (azur lane),108 +hamburger-chan (hundredburger),108 +gukurosawa01,108 +green overalls,108 +gold can,108 +garamgum,108 +ganmaganmo,108 +furim,108 +flying car,108 +flower (vocaloid3),108 +flint (arknights),108 +flay allster,108 +fire emblem engage,108 +fire axe,108 +figure skating dress,108 +feli (puyopuyo),108 +fancybetty,108 +edward newgate,108 +edmond (nu carnival),108 +doskoinpo,108 +dominator (gun),108 +dildo gag,108 +desaku,108 +dark haruka,108 +dan-98,108 +cuon (kuon),108 +cum in bowl,108 +command and conquer,108 +collapsed,108 +cockatoo,108 +clover earrings,108 +chyoling,108 +chocolate on foot,108 +chii aruel,108 +chibi maruko-chan,108 +chevalier d'eon (maid knight) (fate),108 +chaa (korone-ze),108 +cellval,108 +capybara girl,108 +burnt food,108 +bunny earmuffs,108 +bullet bill,108 +blue whale (kemono friends),108 +bandaid on thigh,108 +azu,108 +ayo (ayosanri009),108 +ayatori (aytr),108 +ayatoki-1,108 +awai shiro,108 +asta (black clover),108 +asakura masatoki,108 +asahina momoko,108 +asagi (kabocha oukoku),108 +arsenal,108 +arawi keiichi (style),108 +aono meri,108 +aoi hana,108 +aoi ch.,108 +ambulance,108 +amano hagoromo,108 +alisia0812,108 +akazukin chacha,108 +age conscious,108 +abyss mage (genshin impact),108 +aa-12,108 +zhaoyebai,107 +zero (zero kara hajimeru mahou no sho),107 +zero-go,107 +yuugen no tei,107 +yurigasaki lulu,107 +yurano (upao),107 +yukiyoshi mamizu,107 +yukitsuki kokoro,107 +yuki (asayuki101),107 +ytoy,107 +yozakura tama,107 +youmu-kun,107 +yamato (kancolle) (cosplay),107 +yamano sachi,107 +yamanami keisuke (fate),107 +yamagami lucy,107 +yai (hachihito),107 +xiaolang,107 +wolf's gravestone (genshin impact),107 +watanuki fuuri,107 +waramori fuzuka,107 +viprpg,107 +valerie (pokemon),107 +usaslug (tsukumo sana),107 +uroko (mnr),107 +ume (datsuryokugen),107 +tsurime-chan,107 +totonii (totogoya),107 +toda kazuki,107 +tenryuu (kancolle) (cosplay),107 +tekuho no habo,107 +tekkaman blade,107 +tamandua tail,107 +swimsuit over clothes,107 +suzuna (princess connect!),107 +suzumiya haruhi (young),107 +suzuki jirou,107 +suke (share koube),107 +strea (sao),107 +stare down,107 +spyke (splatoon),107 +snake boy,107 +sitting on arm,107 +shoe-ji,107 +shiruko,107 +shirouzu myuuta,107 +shirogane sara,107 +shiro albino,107 +shiguang dailiren,107 +shiba yuuji,107 +sharing,107 +sg (esujii),107 +senya fuurin,107 +seisen cerberus,107 +sawada manami,107 +satake shougo,107 +sandslash,107 +saltyicecream,107 +sakuramori sumomo,107 +sakuramochi1003,107 +sabrith ebonclaw,107 +ryp,107 +rororogi mogera,107 +rookidee,107 +red tube top,107 +rauto,107 +rapa (heisei strawberry),107 +queen (band),107 +quarterlift,107 +qooo003,107 +price list,107 +preview,107 +precia testarossa,107 +potetos7,107 +porsche,107 +pillow (nutsfool),107 +picket fence,107 +peter pan (disney),107 +patrat,107 +party horn,107 +ozu yugurou,107 +ovelia atkascha,107 +organ (instrument),107 +orange (bibiko),107 +oolay-tiger (voice actor),107 +oasis (magnitude711),107 +noveske n4,107 +nisego,107 +naoko (naonocoto),107 +nagata gata,107 +n2midori,107 +muromaki,107 +murasaki nyaa,107 +multicolored bowtie,107 +mouth insertion,107 +mountain pukuichi,107 +moegi0926,107 +mochizuki mina,107 +mochi (k620803n),107 +miyama yuuna,107 +miyakawa106,107 +miya ur,107 +mishuo (misuo69421),107 +mirufui,107 +minutachi,107 +military coat,107 +milin (orange maru),107 +milim nova,107 +midare toushirou (kiwame),107 +mid-autumn festival,107 +metallican,107 +mejiro mcqueen (end of sky) (umamusume),107 +mechrailgun,107 +matsushita,107 +maro nie,107 +marcy (chrono cross),107 +mao (disgaea),107 +magnezone,107 +macne series,107 +lyza (made in abyss),107 +lovely x cation,107 +linfi-muu,107 +lich,107 +lewdkuma,107 +leotard lift,107 +lemrina vers envers,107 +lee sin,107 +lavender panties,107 +landing gear,107 +lalah sune,107 +kusari hime: euthanasia,107 +kurun (kurun777),107 +kuro the divine heir,107 +kurebayashi juri,107 +kunizuka yayoi,107 +kunitori,107 +kumamon,107 +kiui (dagk8254),107 +kitchen hood,107 +kikurina,107 +keith claes,107 +kasu kazu,107 +kannagi cocoa,107 +kamishiro mai (capti-no),107 +kaijumilk (milkchaotea),107 +kagura mutsuki,107 +kagari liroi,107 +julietta juris,107 +jorori,107 +jixing renou,107 +jin nai,107 +jedah dohma,107 +jean (gunnhildr's legacy) (genshin impact),107 +izumi kanata,107 +ishizu ishtar,107 +inui shinju,107 +independence (azur lane),107 +impossible vest,107 +ichinose (sorario),107 +hrothgar,107 +hozumi rino,107 +hover vehicle,107 +hoshino lily,107 +hoshimiya mukuro,107 +hoshi mikan,107 +honda takeshi,107 +holding glowstick,107 +himeno mikoto,107 +himejima gyoumei,107 +highleg buruma,107 +heriyama,107 +hani haya,107 +hands under legs,107 +hammerhead shark,107 +hajike akira,107 +gum (jsr),107 +guardians of the galaxy,107 +grimmelsdathird,107 +goshuushou-sama ninomiya-kun,107 +golden retriever,107 +godot (ace attorney),107 +gnai,107 +gedoooo,107 +gc3,107 +gawr gura (cosplay),107 +garfield,107 +gangsta hold,107 +fz064,107 +fusou kai ni (kancolle),107 +fukumitsu (kirarirorustar),107 +fukasaku aoi,107 +fujimaru (green sparrow),107 +fujibejifu,107 +fubuki (blue archive),107 +finger horns,107 +fe (tetsu),107 +fantastic beasts and where to find them,107 +face in hands,107 +elijah baley,107 +eien no aselia,107 +echigo mitsutaka,107 +ebi puri (ebi-ebi),107 +earthworm,107 +duto,107 +durin (arknights),107 +dungeon toaster,107 +dunbine,107 +draven,107 +dororon enma-kun,107 +dore (gilles dore),107 +disgaea rpg,107 +dirty ero,107 +digital camera,107 +deyuyama,107 +detroit: become human,107 +deogho (liujinzy9854),107 +deino (pokemon),107 +ddt (darktrident),107 +data (mega man),107 +dark willow,107 +danmakuman,107 +daigoman,107 +cz75 (girls' frontline),107 +comp h's,107 +commander shepard (female),107 +comet (teamon),107 +combusken,107 +cocoon,107 +claudia hortensia,107 +chtholly nota seniorious,107 +chocolate on pussy,107 +chiroron,107 +chiriko (atlanta),107 +chela77,107 +chef no kimagure salad,107 +carp,107 +camouflage tank top,107 +c (rahit),107 +bugles,107 +bride (fire emblem),107 +breast sucking through clothes,107 +bootjob,107 +blue21,107 +beyond the vibes (idolmaster),107 +bastion (overwatch),107 +bail,107 +azelweien,107 +artery gear,107 +aoi yun,107 +aogiri koukou game club,107 +annie mei,107 +ankle flower,107 +angelise reiter,107 +andrewcockroach,107 +amano tora,107 +almeida (granblue fantasy),107 +akutsu (yuumi),107 +akutsu (demodori),107 +akira ituki,107 +akino (princess connect!),107 +akazaki yasuma,107 +aizawa chizuru,107 +aikawa ruru,107 +aida yuu,107 +aiba asagi,107 +abusoru,107 +abiko yuuji,107 +7lark,107 +3.14,107 +1nupool,107 +14 (vision5032),107 +01 0219,107 +001 (darling in the franxx),107 +zone (artist),106 +zhiyou ruozhe,106 +zebrablack,106 +yozora (1wkdgusdn),106 +yokkora,106 +ymd (holudoun),106 +yamashita jiro,106 +yamaguchi satoshi,106 +yamaguchi kisaragi,106 +yakumo ran (cosplay),106 +yakibuta (shimapow),106 +yaeno muteki (umamusume),106 +xiaji,106 +wolksheep,106 +winged bag,106 +wendy o. koopa,106 +wash cloth,106 +volyz,106 +vococo,106 +vo1ez,106 +vmat,106 +vittorio veneto (the flower of la spezia) (azur lane),106 +vitaminechan,106 +vietnam war,106 +unicorn gundam banshee,106 +un-known (ninesix),106 +umeboshi (lazy lazy),106 +ultimate madoka (cosplay),106 +uisu (noguchipint),106 +uguisu kagura,106 +tutou jiang,106 +tsukimonogatari,106 +trance tina branford,106 +toratora (nanahaba),106 +tooya daisuke,106 +to@st,106 +tiamat (momokuri mannen),106 +thomas the tank engine,106 +the beatles,106 +tetsua rei,106 +take (illustrator),106 +takatun223,106 +takada akemi,106 +svc chaos,106 +sui (petit comet),106 +straizo,106 +sterkenburg cranach,106 +sora (silent square),106 +sonchi,106 +siqi (miharuu),106 +single bare leg,106 +shize (coletti),106 +shiron (shiro n),106 +shirane taito,106 +shinopoko,106 +shinei nouzen,106 +shimakoma,106 +shigurio,106 +shaa,106 +seascape,106 +schwarz (skyline) (arknights),106 +saturndxy,106 +saotome mary,106 +saotome genma (panda),106 +sangou shizuku,106 +saitou chiwa,106 +sailor star fighter,106 +sahara386,106 +sadida,106 +rouge (makeup),106 +robo8,106 +riyun (halodark),106 +ribbon girl (arms),106 +revali,106 +reptile boy,106 +reinhard van astrea,106 +ray.s,106 +queen of sunlight gwynevere,106 +qinshi-ji,106 +puton,106 +ptrd-41,106 +project krone (idolmaster),106 +princess hilda,106 +pirates of the caribbean,106 +petelgeuse romaneeconti,106 +panty gag,106 +oxygen (0220),106 +oriana thomason,106 +orange overalls,106 +oono mayu,106 +olga discordia,106 +okawaii koto,106 +ogami,106 +nuneno,106 +northern goshawk (kemono friends),106 +nishino eri,106 +nijimura keicho,106 +netamaru,106 +neon palette,106 +necono (nyu6poko),106 +natsuba002,106 +nana (manaita koumuten),106 +nakajima youko,106 +nakajima ryou,106 +muzuki uruu,106 +murakami natsumi,106 +moutama,106 +moro no ichizoku,106 +monoto,106 +mizuyoukan (mikususannda),106 +misako (kunio-kun),106 +mikogami riko,106 +miho (last origin),106 +mem,106 +melting halo,106 +measuring cup,106 +may chang,106 +may (spring 2021) (pokemon),106 +matsuo yukihiro,106 +marriage certificate,106 +marie antoinette (swimsuit caster) (second ascension) (fate),106 +maitou,106 +luck & logic,106 +laundry dragonmaid,106 +ladybug (character),106 +kyoichi (live a hero),106 +kurusugawa serika,106 +kururi,106 +kuroshiro00,106 +kuro (tbm9187),106 +koomoi,106 +klonoa,106 +kiyoura setsuna,106 +ken marinaris,106 +keihh,106 +kazari jun,106 +kazami kazuki,106 +katsura masakazu,106 +kanae (inorin05kanae),106 +kamisuki,106 +kamen rider saber (series),106 +kage houshi,106 +justrube,106 +josie rizal,106 +jito mesuki nitouhei,106 +japanese otter (kemono friends),106 +jagd,106 +issho ni training,106 +isabelle (shadowverse),106 +ippus,106 +ignite module,106 +i-402 (aoki hagane no arpeggio),106 +hyde (under night in-birth),106 +hurdle,106 +hoshi rasuku,106 +honkivampy,106 +holding bullet,106 +hoe-l,106 +hobby (azur lane),106 +hirume of heavenly incense,106 +himuhino,106 +hime granzchesta,106 +hikosan,106 +higa423,106 +hataraki ari,106 +happiny,106 +hanshin tigers,106 +hands on own shoulders,106 +hand on goggles,106 +gundam arsenal base,106 +gears of war,106 +fuuga (perv rsity),106 +fujisaki shiori,106 +fujimaru ritsuka (male) (mage's association uniform),106 +fujimaru ritsuka (female) (mage's association uniform),106 +flower tact,106 +floette,106 +flamenco dress,106 +fan over face,106 +falinks,106 +falconry,106 +evolutionary stone,106 +evangelyne,106 +eun (elsword),106 +epi zero,106 +emoto reishi,106 +elise (league of legends),106 +electrostimulation,106 +electivire,106 +ekaterina kurae,106 +eisuto,106 +eien no sai tori,106 +eichisu,106 +e=mc2 (mosimosi),106 +double insertion,106 +dotsuco,106 +donutman,106 +dive to zone,106 +destiny (game),106 +demon pillar (fate),106 +def (chronowarld),106 +deep aqua mirror,106 +cuivre,106 +cradily,106 +comiket 88,106 +clear card,106 +chuzenji,106 +chikujouin magane,106 +chikugen shiina,106 +cheek biting,106 +cat stretch,106 +carcano m1891 (girls' frontline),106 +caiman pool,106 +cacturne,106 +c8oyk,106 +busou shoujo machiavellism,106 +buntan,106 +broken cup,106 +blue angel,106 +bleach: the thousand-year blood war,106 +been,106 +bara (03 bara ),106 +ayuanlv,106 +ashu,106 +asgore dreemurr,106 +arctic fox (kemono friends),106 +anoa,106 +andy bogard,106 +andou tomoya,106 +an-telin,106 +amesawa mokke,106 +all-out attack,106 +algerie (white sand paradise) (azur lane),106 +alfyn greengrass,106 +akumi (yoclesh),106 +aki eda,106 +aisaka sayo,106 +agovitch,106 +8 (yamalu),106 +78rr,106 +16 ban,106 +0 (znanimo),106 +zipping,105 +yuzuki yukari (onn),105 +yurun,105 +yukimura tsubame,105 +yukimi unagi,105 +yukihiro,105 +yudaoshan,105 +yotsuboshi academy uniform,105 +yoshizane akihiro,105 +yonoisan,105 +yokohachi,105 +yarizui sen,105 +yamanbagiri chougi,105 +yaise,105 +yada ei,105 +y.i. (lave2217),105 +xia you qing,105 +world's end dancehall (vocaloid),105 +vetina,105 +valkenhayn r. hellsing,105 +uzukinoko,105 +urubida,105 +ursica,105 +uno (nanoha),105 +unidentified nk,105 +unicycle,105 +umi (srtm07),105 +umeno ryuuji,105 +ukrainian flag,105 +twogie,105 +twoframe,105 +tropical liquor,105 +torajimaneko,105 +tora tentei,105 +tira misu,105 +timer,105 +tian guan ci fu,105 +theodor bachstein,105 +the hermit,105 +tetsu tissue,105 +tarou2,105 +tarayuki,105 +tansan daisuki,105 +tamayume,105 +takuan,105 +taki eri,105 +takami akio,105 +takamatsu,105 +suzuneko (yume no kyoukai),105 +super creek (chiffon ribbon mummy) (umamusume),105 +sumomomo momomo,105 +sugiura midori,105 +space invaders,105 +sousou no frieren,105 +snowflake choker,105 +skirt under dress,105 +sk02,105 +shutter shades,105 +shitou,105 +shironekokfp,105 +shino-o,105 +shin kawasaki,105 +shimotsuki keisuke,105 +seto (asils),105 +seraphim (kore wa zombie desu ka?),105 +sentouin hakenshimasu!,105 +senjou no valkyria 4,105 +self-propelled artillery,105 +sei000,105 +seffyna,105 +sb (coco1),105 +sayu030b,105 +saren (real) (princess connect!),105 +sakakura (sariri),105 +rowing,105 +riyo (aokiwazumi),105 +rikui (rella2930),105 +regition,105 +red lipstick tube,105 +rbx lamen,105 +quina quen,105 +prophet chu,105 +pride (fma),105 +porkjinta,105 +popola,105 +polt,105 +plastic bottle,105 +pixiv fate/grand order contest 1,105 +pish,105 +pink rope,105 +pikachi,105 +paper mario 64,105 +ozu kanon,105 +ouroboros (granblue fantasy),105 +oribe shiori,105 +on plate,105 +omastar,105 +okano kei,105 +ohashi (hashidate),105 +officer caitlyn,105 +nuernberg (azur lane),105 +ntk (7t5),105 +not for sale,105 +nortuet,105 +nona,105 +noiz,105 +nekomimi kanon,105 +natsuya (natuya777),105 +natsuki mikuru,105 +nasus,105 +museum,105 +moriyama shijimi,105 +morisoban,105 +mori airi,105 +mono (recall),105 +monaka (siromona),105 +moa (show by rock!!),105 +mixing console,105 +misaka (missa),105 +mini cooper,105 +milk (cookie),105 +might guy,105 +mia flatpaddy,105 +mg42cat-k1ng,105 +maribelle (fire emblem),105 +mako-chan (minami-ke),105 +maggi,105 +mad max: fury road,105 +macaronk,105 +lowlight kirilenko,105 +loggy,105 +litra (ltr0312),105 +lili mdoki,105 +leonardo da vinci (swimsuit ruler) (fate),105 +leilin,105 +leaf earrings,105 +kyouichi,105 +kusahagane,105 +kurusu tomari,105 +kurahashi riko,105 +kumehara chiyota,105 +kujira hebi,105 +kriem,105 +kouyama mitsuki,105 +koumo,105 +kotohara hinari,105 +konpotsu,105 +konohana hikari,105 +kon futaba,105 +komugi (lee),105 +koketsu (koketsu-ya),105 +kobayashi nyoromichi,105 +kitsune udon,105 +kitagou fumika,105 +kine-c,105 +kijinaka mahiro,105 +kida yuu,105 +kiana kaslana (divine prayer),105 +ki (druaga),105 +kazuhiro (tiramisu),105 +kay (utd),105 +kawa mura,105 +kare huang,105 +karasuma chitose (girlish number),105 +karakuzu red,105 +kanyoko (yuzukano 17),105 +kankito,105 +kamen rider geats (series),105 +kaiki deishuu,105 +izumo,105 +isya,105 +isuzu (log horizon),105 +ishimari,105 +ishida uryuu,105 +inari (ambercrown),105 +in sack,105 +id :invaded,105 +ichino tomizuki,105 +ichijou (pani poni),105 +hoppe illust,105 +holding collar,105 +hishigata,105 +hirofumi (orenchi no maidosan),105 +hinooka shuuji,105 +hima (mizu ni tsuyoi),105 +hatsune negame,105 +hatsune miku (vocaloid3),105 +hat leaf,105 +hasaha,105 +hand on own ankle,105 +hakomaru (pandora box),105 +h.carrrrrrot,105 +gunbuster,105 +grimmjow jaegerjaquez,105 +greedent,105 +gomamon,105 +gengoroh,105 +gavel,105 +gatchapowa,105 +gager (girls' frontline),105 +fur-trimmed choker,105 +fukushima masayasu,105 +fujiki maka,105 +frozen lake,105 +frillish (male),105 +formula one,105 +fal (fal-se-0),105 +duck (duck hunt),105 +drayden (pokemon),105 +doppel (monster musume),105 +dm (dai miao),105 +dimension witch (elsword),105 +devola,105 +de da xianyu,105 +darmanitan,105 +cure la mer,105 +crested porcupine (kemono friends),105 +cra,105 +collaboration request,105 +chinyan,105 +chest mouth,105 +cassiopeia (league of legends),105 +carleen (alchemy stars),105 +caenis (swimsuit rider) (first ascension) (fate),105 +busujima (kusahana),105 +burger hair ornament,105 +bug bite,105 +bronya zaychik (wolf's dawn),105 +british,105 +blue rose sword,105 +blue delmo,105 +blazer removed,105 +black bean,105 +bellsaltr,105 +behoimi,105 +bangalore (apex legends),105 +azmodan,105 +asoka,105 +asakura kukuri,105 +artoria pendragon (swimsuit ruler) (fate) (cosplay),105 +arrow print,105 +apple ringo,105 +annytf,105 +ami (orenchi no maidosan),105 +akisoba,105 +aimitsu (ibityuttyu),105 +zero two (kirby),104 +zenos yae galvus,104 +zaku ii f/j,104 +yuya (pixiv37335712),104 +yuna (biya (1024)),104 +yulie ahtreide,104 +yuki kawachi,104 +yukari (princess connect!),104 +yue (lov n n),104 +yuasa hiromi,104 +yoshioka saki,104 +yashichii,104 +wynaut,104 +wk (low-f),104 +waterswing,104 +waffen-ss,104 +volkswagen beetle,104 +verta (verlaine),104 +urita (vivivinicol),104 +uranus symbol,104 +unbirthing,104 +umeki otoha,104 +uki (room 405),104 +ujikintoki ginka,104 +uesaka sumire,104 +udaruyo,104 +twistedgrim,104 +tsumi guilty,104 +tsubaki (kunoichi tsubaki no mune no uchi),104 +toyohara etsuko,104 +toxtricity (low key),104 +to-class light cruiser,104 +tipo (tipoplaza),104 +tiffany lords,104 +tied jacket,104 +tentomon,104 +tayako,104 +tamiya incorporated,104 +tallinn (azur lane),104 +takebi,104 +takahashi umori,104 +takadoya,104 +taira no kagekiyo (fate),104 +tailtiu (fire emblem),104 +tabris,104 +syurimp,104 +swellow,104 +suzushiro (gripen39),104 +suzune rai,104 +succubus (disgaea),104 +stunfisk,104 +stuffed cow,104 +steelblue mirage,104 +st+1,104 +spyglass,104 +sp//dr,104 +sonri,104 +skyrail,104 +sky striker ace - raye,104 +sjw kazuya,104 +shirosame,104 +shiroma (mamiko),104 +shiraishi (siraisi00),104 +shinazugawa genya,104 +shin getter-1,104 +shima (pepekekeko),104 +shiina mafuyu,104 +sawashiro miyuki,104 +sasaki ran,104 +sanmoto gorouzaemon,104 +saint seiya: the lost canvas,104 +saiki kusuo,104 +ryuuguu komachi (idolmaster),104 +ruin re birth,104 +rocking horse,104 +riuichi,104 +ribbed gloves,104 +rff (3 percent),104 +remington 870,104 +radar dish,104 +pronghorn (kemono friends),104 +print male swimwear,104 +precum through clothes,104 +porupurucha,104 +poorly translated,104 +pon (0737),104 +polishing,104 +pokka,104 +plug gag,104 +pixel heart,104 +pinzu,104 +petal print,104 +p2 (uxjzz),104 +oyster,104 +ortfine fredericka von eylstadt,104 +onna kishi (maoyuu),104 +ohara michiru,104 +ogre (granblue fantasy),104 +octotrooper,104 +nyamaso,104 +nuppunuppu,104 +nishigaki nana,104 +ninjask,104 +niko p,104 +newey,104 +natsushiro,104 +natsuko (bluecandy),104 +nanami mami,104 +myuto (advent retribution),104 +mutou hana,104 +munou na nana,104 +muninshiki,104 +mukuroi,104 +moyui (myi 005),104 +morimiya masayuki,104 +monteriakitto,104 +miya (hooooo),104 +missouri (warship girls r),104 +miso (b7669726),104 +misaki (princess connect!),104 +mini dragon,104 +milk puppy,104 +mikipa,104 +mikami,104 +mikagami ei,104 +miike (992058),104 +metallica (majo to hyakkihei),104 +meredy (tales),104 +menna (0012),104 +me!me!me! dance (meme),104 +maximilian jenius,104 +matsunaga (haku),104 +matou sakura (fate/extra),104 +maria (syake-uni),104 +manabe jouji,104 +makihara shiho,104 +mahou shoujo ikusei keikaku limited,104 +mage (7th dragon),104 +london,104 +llama,104 +lightning bolt earrings,104 +liebe,104 +li sakura,104 +legskin,104 +ldl,104 +lace-trimmed bow,104 +komusou (komusou1),104 +kokkoro (ceremonial) (princess connect!),104 +ko-on (ningen zoo),104 +kiriko (overwatch),104 +khalitzburg,104 +kayune niu,104 +karaage,104 +kanoe (gallery walhalla),104 +kaguura (kagu),104 +kagari (rewrite),104 +juugoya neko,104 +july (darker than black),104 +jennifer walters,104 +jane t. godfrey,104 +itogari,104 +irusu,104 +iparupua,104 +io (code vein),104 +internet,104 +infraton,104 +i want you,104 +honda futayo,104 +hirasawa minami,104 +hinomori shizuku,104 +hanpen,104 +hajime no ippo,104 +gouda toshirou,104 +goddess of victory: nikke,104 +glados,104 +gaia memory,104 +gaelio bauduin,104 +fule,104 +food on hair,104 +flarefox,104 +fishing lure,104 +filia ul copt,104 +fennel (pokemon),104 +fallopian tubes,104 +explosion gag,104 +excadrill,104 +eo masaka,104 +enomoto hidehira,104 +echoes (stand),104 +ear sex,104 +dubwool,104 +doorbell,104 +donnpati,104 +dirty deeds done dirt cheap,104 +deathlock-san,104 +dawn (palentine's 2021) (pokemon),104 +dabuki,104 +d.o.c. health drone,104 +curry gohan,104 +coupe50,104 +color-coded,104 +codename: kids next door,104 +clair lasbard,104 +chunrijun (springer),104 +chijo,104 +cheek-to-breast,104 +charlotte izoard,104 +chamu (chammkue),104 +chachie,104 +bullet necklace,104 +brush (medium),104 +blooper (mario),104 +blitzle,104 +black shawl,104 +black lilith (last origin),104 +black battler,104 +black-headed ibis (kemono friends),104 +bianca (pokemon heroes),104 +berg-yb,104 +banba mahiru,104 +bakuhatsu bros.,104 +azura cecillia,104 +auui,104 +atom (tetsuwan atom),104 +atelier shallie,104 +astrologian (final fantasy),104 +asmodeus (shinrabanshou),104 +asle,104 +asakura you,104 +asai miki,104 +arts shirt,104 +artoria pendragon (alter swimsuit rider) (fate) (cosplay),104 +arlonn,104 +argyle sweater vest,104 +aosora neko,104 +aoki shin,104 +ao iro,104 +anal ball wear,104 +ame (amechan17391739),104 +amano megumi wa suki darake!,104 +amano megumi,104 +albert wesker,104 +akitsuchi shien,104 +aimaina,104 +aikawa arisa,104 +adjusting collar,104 +action taimanin,104 +ac130,104 +abenobashi mahou shoutengai,104 +abaraya,104 +774 inc.,104 +74 (teduka),104 +zz gundam,103 +zeniyan,103 +zarutsu,103 +yurin leciel,103 +yoshida ayumi,103 +yasaikakiage,103 +yang wen-li,103 +yamato tachibana,103 +xiang yu (fate),103 +wiz anniversary,103 +wishiwashi (school),103 +wing gundam zero custom,103 +wilma bishop,103 +wataichi meko,103 +wasabi (wasalongt),103 +warwick,103 +urban legend,103 +uran (uran-factory),103 +unabara mitsuki,103 +umbrella stand,103 +tsukigami chronica,103 +toxel,103 +touki matsuri,103 +torracat,103 +tooku nomura (artist),103 +tomono rui,103 +tomas (kaosu22),103 +tia (cocorosso),103 +three monkeys,103 +the last of us,103 +tapris chisaki sugarbell,103 +takuyarawr,103 +takashima,103 +takamachi nanoha (cosplay),103 +takagi hideaki,103 +taiyaki (astre),103 +tabby cat,103 +syokuuuuuuuuumura,103 +suzuka hime,103 +super mecha champions,103 +sunimu,103 +sumisumii,103 +strawberry bra,103 +sonia (p&d),103 +slayers try,103 +slayer of demons,103 +slavya-chan,103 +shirow (mha03133),103 +shiroobi (whitebeltmaster),103 +shirazumi lio,103 +shinko windy (umamusume),103 +shimura tae,103 +shaga ayame,103 +setsuko ohara,103 +senzoc,103 +sendou yukari,103 +sekka (kageshio),103 +sakurano kurimu,103 +sakura sakura,103 +sakuma tsukasa,103 +saix,103 +saibai shounen,103 +s&w m&p,103 +risshu,103 +renown (warship girls r),103 +rend,103 +rayno,103 +rabirin (precure),103 +quasimodox,103 +ponpon (ponpon62126),103 +pokan (xz1128),103 +pohwaran,103 +pikomint,103 +pig (kemono friends),103 +penis and testicles touching,103 +pegasus seiya,103 +peeing in cup,103 +paintedmike,103 +otoo (izanagi),103 +omnitrix,103 +oinari (tensaizoku),103 +nose pads,103 +nomayo,103 +noeejang,103 +no image,103 +niwakaame (amayadori),103 +nitaka (fujikichi),103 +ningyo hime (sinoalice),103 +nicporim,103 +nga (ray ray),103 +nekou izuru,103 +nanto yaya,103 +nanami (suikoden),103 +nana (kemono friends),103 +nabenko,103 +n9+,103 +musashiya chougenbou,103 +murata himeko (battle storm),103 +muranisaki,103 +muramura hito,103 +mumu,103 +moruta (sunu-pio),103 +monster hunter mezeporta kaitaku-ki,103 +momo (gundam build divers),103 +mizuki maya,103 +miso panda,103 +mironomeo,103 +minuspal,103 +minowa sukyaru,103 +minato aya,103 +mila (doa),103 +mighty yukiko,103 +mifune ryuuko,103 +microspace,103 +metal baseball bat,103 +melleau,103 +marluxia,103 +marie antoinette (alter) (fate),103 +manticore (monster girl encyclopedia),103 +lunch boxer,103 +luna (sailor moon) (human),103 +luna (mi-chanman),103 +lorem (mazohaha),103 +lodbyy,103 +lloyd bannings,103 +liang cun rakuna,103 +lee ji-eun,103 +lavi,103 +larten crepsley,103 +kuroneko douji,103 +kuramochi kyouryuu,103 +kunugi miyaco,103 +krabby (artist),103 +koujiro frau,103 +kotetsu isane,103 +komi (komiking),103 +kiyose akame,103 +kishibe (chainsaw man),103 +kirisame ga furu mori,103 +kimuwaipu,103 +kimura ryu,103 +kimura neito,103 +kawakami tomoko,103 +katou itsuwa,103 +katanakko daisuki,103 +kaoru (alicemakoto),103 +juke,103 +iwahana,103 +irodori (irotoridori),103 +ireading,103 +invisible wall,103 +inhabituels,103 +hyuuga (aoki hagane no arpeggio),103 +hosoo,103 +homura hinase,103 +holding orb,103 +hiyopuko,103 +hitchhiking,103 +hijiri myouren,103 +hiichisato,103 +highleg shorts,103 +head arms,103 +hayashi sakura,103 +hato kenjirou,103 +haruka shiya,103 +hanabi (ocha),103 +hairclip removed,103 +ha ze,103 +guttia,103 +gundam seed astray,103 +gulim,103 +gorebyss,103 +g28 (girls' frontline),103 +fumika,103 +fumi11gou,103 +fukurahagi uomaru,103 +frillish (female),103 +female titan,103 +fallout new vegas,103 +f91 gundam,103 +evulchibi,103 +ernesto de la cruz,103 +erina (rabi-ribi),103 +enemy vessel (warship girls r),103 +emma sheen,103 +elena trafalgar,103 +elbow on table,103 +eijsvl,103 +eevee tail,103 +ear focus,103 +duosion,103 +drill ponytail,103 +ditienan ddn,103 +deutschland (service time?!) (azur lane),103 +denpa (denpae29),103 +decadence (anime),103 +dartrix,103 +dark matter,103 +dark magician girl (cosplay),103 +damenaito,103 +dakunesu,103 +contact lens,103 +closed labcoat,103 +cliffjumper,103 +ckst,103 +chiyoko (tykchiyo),103 +chipmunk girl,103 +chinese armor,103 +chin,103 +chikuma (azur lane),103 +chie (ishikei),103 +chen hai (vestibule of wonders) (azur lane),103 +cencoroll,103 +c2 kikan,103 +build driver,103 +breast slap,103 +boken fantasy,103 +boiling,103 +bnc (bunshi),103 +blue cat (precure),103 +bladder,103 +behelit,103 +bedroll,103 +ballet boots,103 +bagelbomb,103 +asuka hina,103 +asterisk (asterism),103 +asou misaki,103 +asanaka yomogi,103 +aritsuno,103 +aqua gemstone,103 +apple core,103 +aozoraichi,103 +aouma,103 +aoi (princess connect!),103 +aoguu,103 +anpanman (character),103 +andrew (duel angel),103 +ameya nihachi,103 +ame (ame025),103 +alice (queen's gate),103 +akari (raigou),103 +akaashi keiji,103 +admiral arisugawa,103 +1996,103 +1-up mushroom,103 +.hack//link,103 +zinbei,102 +zenonzard,102 +zefa (neoaltemice),102 +yuma (coffee-milk-moumou),102 +youtube creator award,102 +yotsubato! pose,102 +yonebayashi saiko,102 +yomotsuki road,102 +yamaiwa shuuhai,102 +writing on wall,102 +winged kuriboh,102 +wikipedia,102 +wheel o feet,102 +weshika,102 +wave (world wide wave),102 +vivi (ac8231),102 +vanquished queens,102 +v (devil may cry),102 +umino iruka,102 +uesugi kenshin (rance),102 +u-81 (azur lane),102 +turiganesou800,102 +trojan green asteroid,102 +train (clothing),102 +toyoi yuuta,102 +touken ranbu: hanamaru,102 +tomoe marguerite,102 +tomoe gozen (traveling outfit) (fate),102 +toichi (ik07),102 +time mage,102 +tiko (trouble spirit),102 +terazaki kaoru,102 +tatsuki (irodori) (style),102 +takenaka hanbee (oda nobuna no yabou),102 +taion (xenoblade),102 +suiten nikkou amaterasu yanoshisu ishi,102 +subarashiki hibi,102 +storm attacker,102 +staple,102 +sovetskaya rossiya (the lackadaisical lookout) (azur lane),102 +slavic clothes,102 +skull tattoo,102 +sindri,102 +sigilyph,102 +shuragyoku mami,102 +shokkin,102 +shishanmo,102 +shirow (crazy),102 +shira yu ki,102 +shiomi (lowrise),102 +shino sto,102 +shindou sugata,102 +setta shu,102 +sellel,102 +seiren,102 +segawa izumi,102 +scarle yonaguni,102 +savanna striped giant slug (kemono friends),102 +satou hina (kamisama ni natta hi),102 +sasaki raito,102 +sari,102 +sarah bryant,102 +saku (saku1151),102 +ronopu,102 +ritao kamo,102 +ricocheting,102 +rheez,102 +reindeer (kemono friends),102 +razy (skuroko),102 +racing miku (2014),102 +pvc parfait,102 +politics,102 +photocopier,102 +phantom of inferno,102 +peter pan collar,102 +peragura,102 +paracelsus,102 +oyabun (higashi),102 +otsumami (otsu-mugi),102 +otono fei,102 +otaku ni koi wa muzukashii,102 +oswald (odin sphere),102 +omuraisu echizen,102 +omega alpha,102 +okitafuji,102 +okakasushi,102 +oisin,102 +nyxerebos,102 +nyoriko,102 +nuka (nukamochi),102 +nosa kouko,102 +noa (granblue fantasy),102 +nito (siccarol),102 +nishihara yasoko,102 +ninnzinn,102 +ng knight lamune & 40,102 +namori (style),102 +nakano (2 mannaka),102 +nagata nagato,102 +nagashii kouhei,102 +muromi-san,102 +mukuba,102 +mountain dew,102 +mother 1,102 +moon rabbit extra (touhou),102 +moo yoshio,102 +mogeko (okegom),102 +mog,102 +moero! justice gakuen,102 +miyama-san,102 +mitsugi,102 +mitsu (tendou itsuki),102 +missarilicious,102 +mikeneko (utaite),102 +midorima shintarou,102 +menstrual pad,102 +mellozzo,102 +megami paradise,102 +medium tank,102 +medb (swimsuit saber) (second ascension) (fate),102 +mechanical tentacles,102 +maximal,102 +masaki nanaya,102 +marnie (summer 2021) (pokemon),102 +marnie (omoide no marnie),102 +marco bodt,102 +marching,102 +makoto (genshin impact),102 +m mance,102 +luna (konosuba),102 +love guitar rod,102 +lord's blade ciaran,102 +lix,102 +liquid snake,102 +li (rob ishi),102 +li'l judd (splatoon),102 +legends of runeterra,102 +laurels,102 +laura s. arseid,102 +lan hikari (mega man),102 +kurosawa tetsu,102 +kurimochi chizuru,102 +kuchisake-onna,102 +kshatriya,102 +kouka (mrakano5456),102 +kotowari (newtype kenkyuujo),102 +kiri (sub-res),102 +kilva lollop,102 +kill bill,102 +kemo chiharu,102 +kemigawa mondo,102 +keito (keito-ya),102 +katsu (katsupainter),102 +karibuchi takami,102 +karadborg,102 +kanpyou (hghgkenfany),102 +kamori sayaka,102 +kamo ashi,102 +kamo (gafas),102 +kamisama dolls,102 +kaho (sister princess),102 +k (sktchblg),102 +jupiter (idolmaster),102 +jpc,102 +janine (pokemon),102 +jacket grab,102 +ivy (sparrowl),102 +inhabituels estudios,102 +iku2727,102 +igarashi youhei,102 +ichinose kaede,102 +hullabaloo,102 +hover board,102 +hosomitimiti,102 +hoshiibara mato,102 +holding lamp,102 +hizashi no naka no real,102 +hisuian decidueye,102 +hex grid,102 +hero shot (splatoon),102 +hero (dq1),102 +heles (summer) (granblue fantasy),102 +hanyae,102 +hanetsuka,102 +handle,102 +hanamuke,102 +gunnjou yosio,102 +grizz,102 +grandmother and grandson,102 +gold teeth,102 +gobou 1000,102 +glalie,102 +girls' frontline commander (xiujia yihuizi),102 +gibraltar (apex legends),102 +gau (n00 shi),102 +garun wattanawessako,102 +furby,102 +front braid,102 +freddy fazbear,102 +fairy knight lancelot (first ascension) (fate),102 +eufoniuz,102 +erospanda,102 +erezu,102 +elizabeth bathory (brave) (fate) (cosplay),102 +doruji,102 +doro (sabarudoro),102 +dirge of cerberus final fantasy vii,102 +dildo reveal,102 +diagonal-striped skirt,102 +desco (disgaea),102 +demon costume,102 +decoy00xx,102 +dead spike,102 +dark illuminate (idolmaster),102 +danshaku,102 +dani-ikapi,102 +dai-erie,102 +cyllene (pokemon),102 +cosplay pikachu,102 +cosmic bear,102 +clothes on bed,102 +cia (hyrule warriors),102 +choco chip (ekitai idou),102 +chis (js60216),102 +chao (sonic),102 +chan1moon,102 +cat ornament,102 +cang se ye hua,102 +camerupt,102 +cabba,102 +byu (orihazuma),102 +bronzong,102 +bodyguard,102 +blackberry (fruit),102 +black star,102 +black pasties,102 +bismarck (beacon of the iron blood) (azur lane),102 +bikini over clothes,102 +berry's,102 +beats by dr. dre,102 +beach house,102 +bartholomew roberts (fate),102 +barred window,102 +balnom,102 +ayame iro (toumei dolce),102 +au (d elite),102 +ashraely,102 +ashita no kimi to au tame ni,102 +ash ketchum (cosplay),102 +asahi (zabaniyan),102 +arato hisako,102 +arai sumiko,102 +aoinu (aoinu 99),102 +aohada bocchi,102 +angelia (girls' frontline),102 +amira (shingeki no bahamut),102 +akaneyu akiiro,102 +aioi u,102 +aihara nana,102 +after cunnilingus,102 +yuudachi (shogun of snowballs) (azur lane),101 +yuli (yulipo),101 +yukizumi remon,101 +yukina (yu yu hakusho),101 +yue teitoku,101 +yin midnight,101 +yandere-chan (ramchi),101 +yami bakura,101 +yamaguchi tamon,101 +yada (xxxadaman),101 +yabu q,101 +xlnosearch (xlxiaoluodiaofa),101 +xiao qiang (overseas),101 +white rope,101 +vibrator on penis,101 +vent (object),101 +vectorek,101 +ursula (takunomi),101 +unused character,101 +unown a,101 +universe,101 +ump9 (shiba investigator) (girls' frontline),101 +type 97 (peony) (girls' frontline),101 +turretless tank,101 +tsugutoku,101 +tr-6,101 +toy story,101 +tosh (imonade ryouchou),101 +torso (hjk098),101 +the lion king,101 +the elder scrolls iv: oblivion,101 +teruchan,101 +taro-k,101 +tapu koko,101 +tanjiu,101 +taketatsu ayana,101 +takanashi sora,101 +sword between breasts,101 +suyohara,101 +sugisaki miku,101 +sugawara koushi,101 +strike gundam,101 +sprite art,101 +spring2013,101 +speedl00ver,101 +space ishtar (first ascension) (fate),101 +soyoking,101 +sotto,101 +sitting on branch,101 +sirius (azur lane) (cosplay),101 +sin (kami148),101 +siera (sieracitrus),101 +shumai (sm318),101 +shometsu-kei no teruru,101 +shiromako,101 +shione (niliu chahui),101 +shinovi,101 +shindou hikaru,101 +shimon (31426784),101 +shiba yuusuke,101 +shaving cream,101 +seiten ragnarok,101 +saya (scabbard),101 +sasaki (glass1138),101 +sanninme no haru,101 +sandbox,101 +sakura taisen ii,101 +sage (valorant),101 +safutsuguon,101 +ryuuji teitoku,101 +roulette animation,101 +ronisuke,101 +rirene rn,101 +ripe-tan,101 +riela marcellis,101 +resident evil 1,101 +reinhardtzar,101 +rebar,101 +rearing,101 +raw meat,101 +ramina (baallore),101 +rakujin,101 +radio tower,101 +putting on legwear,101 +punto,101 +puniru wa kawaii slime,101 +priscilla (fire emblem),101 +princess laura,101 +portuguese commentary,101 +plastic wrap,101 +pink clouds,101 +pilot (titanfall 2),101 +pig nose,101 +pharos,101 +pharamacom,101 +paryi project,101 +pantograph,101 +panipo,101 +ooyama imo,101 +ojiri shin'ya,101 +ohisashiburi (style),101 +observer alpha (azur lane),101 +o-ring thigh strap,101 +nori chazuke,101 +nogiguchi kohiro,101 +nippori honsha,101 +new super marisa land,101 +natto soup,101 +nantaimori,101 +nanna (fire emblem),101 +namuna (7th dragon),101 +nagai gojitsudan no nechronica,101 +nabe puyo,101 +morumoru x,101 +morelull,101 +momihige,101 +ml,101 +mizushima oonari,101 +misaki shiki,101 +minawa,101 +minatoasu,101 +minamoto no raikou (swimsuit lancer) (third ascension) (fate),101 +massan,101 +maru takeo,101 +marina (blue archive),101 +manji taba,101 +mahou shoujo kazumi magica,101 +madara hio,101 +macchiato (jae-min cho),101 +lyrical lyric,101 +lubrication,101 +lq saku,101 +lord of vermilion iii,101 +leonhardt (arknights),101 +leo kliesen,101 +legomaru,101 +kushidama minaka,101 +kotsu,101 +kotone a,101 +kokkoro (new year) (princess connect!),101 +kj (k777),101 +kimidori (ico),101 +kh (kh 1128),101 +kemoyuri,101 +keijo!!!!!!!!,101 +kayu,101 +katou yuuki,101 +karpin,101 +kani nyan,101 +kamisa,101 +kakuzu (naruto),101 +kagano ai,101 +kaga sumire,101 +juu (juuzi),101 +junekun,101 +jinno hikari,101 +jinbei (clothes),101 +jean roque lartigue,101 +javelin (beach picnic!) (azur lane),101 +james sunderland,101 +izure (pixiv16392012),101 +ivan wang,101 +itou tatsuya,101 +isonade orca,101 +ishihara saika,101 +is-2,101 +iris amicitia,101 +inumi,101 +initial,101 +ininiro shimuro,101 +imi tavor tar-21,101 +illustrious (muse) (azur lane),101 +igor (persona),101 +ichino,101 +houshou marine (cosplay),101 +hishi akebono (umamusume),101 +hirano masanori,101 +hibikino high school uniform,101 +herunia kokuoji,101 +hero (dq7),101 +henry (fire emblem),101 +harumi shinju,101 +harapeko,101 +hands on own neck,101 +hand milking,101 +hagiko,101 +hachachi,101 +gyouza teishoku,101 +gunparade march,101 +gundam age-1,101 +guild girl (goblin slayer!),101 +greece (hetalia),101 +gominami,101 +godwkgodwk,101 +gin (tttetu123),101 +gihuta hiroshi,101 +gift between breasts,101 +gaiking,101 +furutani kaede,101 +funii,101 +fukuro ooji,101 +fujimaru ritsuka (female) (decisive battle chaldea uniform),101 +fortress,101 +ford,101 +etou misaki (idolmaster),101 +enajii,101 +elf all-stars datsuijan 2,101 +el joey,101 +eiscue (ice),101 +eguegu,101 +edogawakid,101 +ebi tendon,101 +e (you33ou),101 +dudley,101 +drawn tail,101 +double arm hug,101 +donkey girl,101 +devil mercy,101 +demon (monster girl encyclopedia),101 +dano,101 +daewoo k2,101 +d.va (overwatch) (cosplay),101 +cubchoo,101 +crossed out,101 +cowengium,101 +cioccolatodorima,101 +charles zi britannia,101 +chain blades,101 +chabi (amedama),101 +ccccc (jpn m0),101 +camilla (spring) (fire emblem),101 +caffeccino,101 +buying condoms,101 +buta tsuzumi,101 +bubble head nurse,101 +brushing another's teeth,101 +book of the azure sky,101 +bluekalmia,101 +blinkblink art,101 +bikkusama,101 +bell tower,101 +bat earrings,101 +basilisk (manga),101 +baseball stadium,101 +bang dacy,101 +ball bra,101 +balance (superdust),101 +bakugan,101 +babydoll t-shirt,101 +awei (aweirua),101 +asukagawa chise,101 +arapi,101 +animal ears (norankkori),101 +anima (togashi),101 +amemura ramuda,101 +amayadori-tei,101 +amano yo-ki,101 +amagai yukino,101 +alkaid (.hack//),101 +aka kitsune,101 +ajimita,101 +aikawa (dorohedoro),101 +aer7o,101 +;/,101 +ze (wzfnn001),100 +yuuhi (cookie),100 +yui 7,100 +ym (distance819),100 +yasuno (airy light),100 +yashiro nene,100 +yama tatsuo,100 +yadokari (migihashi),100 +xo (xo17800108),100 +wet floor,100 +watari1118,100 +w55674570w,100 +vasavi shakti (fate),100 +v ap art,100 +urabe michiru,100 +united states marine corps,100 +trieste (azur lane),100 +toweringman,100 +tovio rogers,100 +touyama sabu,100 +toriburi,100 +tony man+,100 +tom q (tomtoq),100 +the cold,100 +team fortress (series),100 +tall grass,100 +takitsume shino,100 +tadd (tatd),100 +tachikawa ayaka,100 +t-800,100 +superbusty,100 +sunagakure symbol,100 +sumeragi hakua,100 +sukone tei,100 +sukima (crie),100 +suigetsu (watermoon-910),100 +strappado,100 +step arts,100 +spirit (super smash bros.),100 +soul link,100 +sora (no game no life),100 +sonny brisko,100 +somray,100 +smelling armpit,100 +shise (agepan),100 +shiroie mika,100 +shiroi hakuto,100 +shining needle castle,100 +shiki (samurai spirits),100 +shihira tatsuya,100 +sewer,100 +sekka yufu,100 +sejuani,100 +scarlet witch,100 +satsuyo,100 +sasoura,100 +sansan (dongfangzhong111),100 +sanemichi hajime,100 +sakuraba neku,100 +sakihata rimi,100 +sagara riri,100 +rorschach,100 +ronpaxronpa,100 +romancing saga minstrel song,100 +rir-chan,100 +ringouulu,100 +reaper (tera online),100 +rakko (makarakko),100 +production note,100 +prince of wales (warship girls r),100 +plus9,100 +plaid swimsuit,100 +pantsu majirou,100 +pantsu-ripper,100 +pak ce,100 +pai-chan (nao),100 +otomachi una (talkex),100 +open poke ball,100 +onizuka takuto,100 +omega symbol,100 +ohhhhhhtsu,100 +oba-min,100 +nursery rhyme,100 +numpopo,100 +npcpepper,100 +north carolina (the heart's desire) (azur lane),100 +nontao,100 +nijiko (c2cs4q),100 +netural,100 +netherlands (hetalia),100 +necktie on head,100 +nayuta69,100 +natsuzuka-san no himitsu,100 +nasipasuta,100 +nakuta,100 +najar,100 +naba chizuru,100 +muzzuleflash,100 +muteki soda,100 +multicolored scrunchie,100 +mouse costume,100 +mosuke,100 +moriki takeshi,100 +mon-musu quest: paradox,100 +mokottsu,100 +moja (gibson),100 +mofumofu (lorelei),100 +mk48 (girls' frontline),100 +mizutama,100 +miyata gakuman,100 +miyanokouji mizuho,100 +miura rinaldi,100 +miteiru (shirakami fubuki),100 +misplaced genitals,100 +mirimo,100 +mintol (qool+),100 +minfilia warde,100 +minase inori,100 +mikumikudance,100 +migumigu,100 +mig (36th underground),100 +mi no take,100 +melpha,100 +max-k,100 +matsuu (akiomoi),100 +master (4th),100 +marine day,100 +mani oniniku,100 +male spitroast,100 +makino harumaki,100 +maiqo,100 +madeline (woofycakes),100 +lumineko,100 +love handles,100 +leopard (kemono friends),100 +leggings pull,100 +leg cutout,100 +lancet-2 (arknights),100 +kurokami (kurokaminohito),100 +kurame,100 +kurakumo nue,100 +kurage,100 +kunieda,100 +kudou jun,100 +kradebii,100 +kosaka shunpei,100 +konami kirie,100 +kokechan,100 +knee brace,100 +kirika towa alma,100 +kawada tomoko,100 +kawabata yoshihiro,100 +kauru00,100 +kasukabe saki,100 +kashiwagi tsubasa,100 +kankurou,100 +kaniharu,100 +kanamemo,100 +kamon rider,100 +kakuzatou (satousatotototo),100 +kagaya miki,100 +k.m.station,100 +juuban elementary school uniform,100 +juliona trans,100 +jon henry nam,100 +joker (stjoker),100 +jiusan naitan,100 +izuna nie,100 +iwi tavor,100 +itsutsu,100 +itou ittousai (sengoku bushouki muramasa),100 +ishii takamori,100 +ishihara (kuniyoshi),100 +irukatto,100 +ios (os),100 +inha,100 +iced coffee,100 +honda masazumi,100 +hirosuke (psychexx),100 +hershel layton,100 +heidi (arknights),100 +haruna (azur lane),100 +harmaline,100 +harada minoru,100 +happy easter,100 +grand chase,100 +gondom,100 +golduck,100 +gobi (jtnyrwbkohiqsuldg),100 +goat-chan (enarane),100 +ghost (tama),100 +geese howard,100 +gaze (thompon),100 +gas station,100 +fur-trimmed bra,100 +fujimi suzu,100 +frederica baumann,100 +forrest (fire emblem),100 +five nights at freddy's: security breach,100 +five-seven (cruise queen) (girls' frontline),100 +firstw1,100 +finnel (ar tonelico),100 +final fantasy brave exvius,100 +fh moya,100 +feng mouren,100 +feebas,100 +fanbook,100 +f6f hellcat,100 +ernesti echevalier,100 +envyvanity,100 +enouchi ai,100 +elze (s01047480836),100 +elsa maria (madoka magica),100 +ei ei mun!,100 +draco malfoy,100 +downcast eyes,100 +dorothy (arknights),100 +donkey (kemono friends),100 +dokiyuri,100 +dodojune,100 +denchuubou,100 +deadman wonderland,100 +dead rising,100 +cupid,100 +croix meridies,100 +cookie clicker,100 +click (arknights),100 +claude frollo,100 +clark still,100 +chien zero,100 +cheese kang,100 +charro,100 +charlotte pudding,100 +brown haired glasses elf (houtengeki),100 +braco,100 +bollard,100 +blue petals,100 +blue door,100 +beige border,100 +bartolomeobari,100 +azit (down),100 +autocannon,100 +aru (citrine drplt),100 +artemis of the blue,100 +archer (summer casual) (fate),100 +animal skeleton,100 +aneco (westsea1227),100 +amber (darker than black),100 +amano soraha,100 +alisa (kuro no sakura),100 +akizuki (17769498),100 +akitaru oubi,100 +akitaka mika,100 +ai-assisted,100 +aerosmith (stand),100 +a-ktoo,100 +15k,100 +13-gou,100 +yuuki nae,99 +yuni channel,99 +yuni (yuni channel),99 +yumemi (kiowa),99 +yotsugi,99 +yasuri nanami,99 +yamigo,99 +wild wet west,99 +white-brown,99 +watashiya kaworu,99 +virus-g,99 +utanone shion,99 +usamin (artist),99 +usalia (disgaea),99 +uraeus,99 +under night in-birth exe:late[st],99 +umeboshi,99 +ukke,99 +tsun'ichi (tsun1),99 +torinoko (miiko draw),99 +toplow,99 +toono yayoi,99 +tomatomato (tomato kanzume),99 +tomato slice,99 +toad (animal),99 +tigrex,99 +tic-tac-toe,99 +teddy (lingerie),99 +tarokennsann,99 +tanghulu,99 +tami (nekoneto),99 +takatsuki p,99 +takashina masato,99 +suzuki puramo,99 +suicchonsuisui,99 +sugiyama nobuhiro,99 +su guryu,99 +stellarspin,99 +star night snow (vocaloid),99 +spinning bird kick,99 +skydive,99 +silent hill 4,99 +shounan (kancolle),99 +shinra tsubaki,99 +shimimaru,99 +shimamura charlotte,99 +shiira,99 +shenlong (dragon ball),99 +shenanimation,99 +shake sawa,99 +serving cart,99 +scr.e,99 +scarlettear33,99 +satoyasu,99 +sasetsu,99 +sakumichi,99 +sakaki natsuki,99 +saitou yahu,99 +saegusa matsuri,99 +saber 01,99 +ryuu ga gotoku 7,99 +ryuu (multitask),99 +ruohire9,99 +rumenia (ao2is),99 +robot cat,99 +riot music,99 +rinko (mg54),99 +riko (sorube),99 +rewolf,99 +renjouji beru,99 +reddit,99 +re:rin,99 +racing miku (2013),99 +queen bonjourno,99 +purple babydoll,99 +puk (puk 2020),99 +proton (pokemon),99 +price,99 +pony r,99 +piyopoyo,99 +pinstripe camisole,99 +pink tattoo,99 +pignite,99 +paladin 2 (sekaiju),99 +oxfords,99 +oso-teki kinoko gijinka zukan,99 +oskar vega,99 +osawari tantei,99 +ooike teru,99 +nurikabe (mictlan-tecuhtli),99 +nqrse,99 +nohara misae,99 +noda shuha,99 +nightmare (arknights),99 +nekonote (nekono paraiso),99 +natsuzakura yuuki,99 +nametakenoko,99 +nameko (osawari tantei),99 +namanie,99 +namagome negi,99 +nakura hakuto,99 +nakazawa aki,99 +murasakibara atsushi,99 +murakumo (senran kagura),99 +mumumu,99 +mujiha,99 +mugi (iccomae),99 +morning glory print,99 +morinaga (brand),99 +monaka (gatinemiku),99 +momoko (momoko14),99 +mokota mememe,99 +misato (summer) (princess connect!),99 +misaka imouto 10032's cat,99 +minttchocok,99 +mintchoco (orange shabette),99 +mikogami nagisa,99 +metronome,99 +meiji (charisma serve),99 +medicine bottle,99 +maya (aoki hagane no arpeggio),99 +mashiro (swimsuit) (blue archive),99 +marshall d. teach,99 +marco rossi,99 +makino tomoyasu,99 +makaze (t-junction),99 +majo (pastamajo),99 +machi (hunter x hunter),99 +lumineon,99 +lulu (tales),99 +ludicolo,99 +love r,99 +little tail bronx,99 +little illustrious (azur lane),99 +lemon earrings,99 +kyundoo,99 +kyodairobo,99 +kyamu (qqea92z9n),99 +kusui aruta,99 +koyukkuri,99 +koyo akio,99 +kouno takaaki,99 +kotora (toranoana),99 +kotonoman,99 +kongou iroha,99 +kokuto (kurousagi1210),99 +kohaku (dr. stone),99 +knife in mouth,99 +kitazume kumie,99 +kishi torajirou,99 +kiriyama sakura,99 +kintarou (kintarou's room),99 +kino makoto's school uniform,99 +kimoi girls (meme),99 +kidou senkan nadesico - prince of darkness,99 +kemurin,99 +keionism,99 +keinesandayoooo,99 +kazuki seihou,99 +kawasumi ayako,99 +katsura harufumi,99 +kasuka108,99 +karma (league of legends),99 +karisawa erika,99 +kareha,99 +kaiboukan no. 30 (kancolle),99 +kagaya tsubomi,99 +kadota kyouhei,99 +jyura,99 +jon taira,99 +jikasei,99 +ishikawa sparerib,99 +iris chateaubriand,99 +instocklee,99 +inoue yoshihisa,99 +innocent red,99 +implied prostitution,99 +imo bouya,99 +iganashi1,99 +idk-kun,99 +ichihachiyon,99 +i0525,99 +housou-chan,99 +honmakaina kudou,99 +holiday0606,99 +hobak,99 +hissatsukun,99 +himuro kane,99 +higata akatsuki,99 +hidaka mai,99 +henry bird 9,99 +head kiss,99 +harutarou (orion 3boshi),99 +hand on breasts,99 +hakuren,99 +hajikkoneko,99 +hair dye,99 +hair around ear,99 +hachita (odangoya),99 +gudanco (nyanmeowzer),99 +gouda takeshi,99 +gothitelle,99 +ginga ojou-sama densetsu yuna,99 +gigandal federation,99 +genderswap (otm),99 +gayprince,99 +gang of heaven,99 +furuhashi chinatsu,99 +fundoshi pull,99 +fuka (kantoku),99 +fugi jis,99 +fubuzilla (shirakami fubuki),99 +frilled innertube,99 +flyswatter,99 +flowers (innocent grey),99 +fatigues,99 +excalibolg,99 +emiya-san chi no kyou no gohan,99 +elenab,99 +elena ivlyushkina,99 +electabuzz,99 +eightman,99 +e-kingdom,99 +dragoner,99 +dokan (dkn),99 +dodonpachi saidaioujou,99 +dentist,99 +dawalixi,99 +dark link,99 +daitabashi kinu,99 +daidai (daidai826),99 +d-ryuu,99 +cutting mat,99 +crystal wings,99 +cress albane,99 +cracked egg,99 +contest winner,99 +conqueror of shambala,99 +commando (movie),99 +colombia pose,99 +coffeedog,99 +cobra no oyatsu,99 +closed game,99 +christie monteiro,99 +chiemo (xcem),99 +chibi miku (mayo riyo),99 +chen (cosplay),99 +castle of cagliostro,99 +castanic,99 +carmilla (swimsuit rider) (third ascension) (fate),99 +cardigan removed,99 +captain freedom,99 +candeloro,99 +business casual,99 +bunny nun (diva),99 +brave frontier,99 +boroboro no elf-san o shiawaseni suru kusuri uri-san,99 +blood on feet,99 +bianka durandal ataegina (valkyrie gloria),99 +beige,99 +barbie (character),99 +bandage on knee,99 +ayamine kei,99 +av-98 ingram,99 +archstreal,99 +april (arknights),99 +ange (angeban93),99 +amiya (newsgirl) (arknights),99 +alpha omega nova,99 +alice liddell (heart no kuni no alice),99 +akimichi chouji,99 +agasang,99 +adachi fumio,99 +a20 (atsumaru),99 +9aki,99 +94plum,99 +3d rod!,99 +33 gaff,99 +2sham,99 +2001,99 +1nilla',99 +100%,99 +0jae,99 +zynxy,98 +zeppy (azur lane),98 +zenigata mawari,98 +z.m. (school913102),98 +yurumawari,98 +yun yun (doupo cangqiong),98 +yukimi koume,98 +yudesoba,98 +ye fan,98 +yashiro kasumi,98 +yao fueifuei,98 +yamikota,98 +xiafuizui,98 +xerxes break,98 +who framed roger rabbit,98 +welchino,98 +wd (1106592840),98 +war fan,98 +walking bike,98 +volley-bu-chan (tawawa),98 +viper rsr,98 +urigarasu,98 +urayamashiro (artist),98 +umi suzume,98 +ulti (one piece),98 +ukuru (kancolle),98 +uchida yuka,98 +ty 1865,98 +tsuru-chan,98 +tr (hareru),98 +tori (driftwood),98 +toranoe,98 +too many knives,98 +tomcat (moconeko),98 +thinking emoji,98 +thancred waters,98 +terriermon,98 +tere asahi,98 +team magma grunt,98 +tanto (tc1995),98 +tanabe (studio tiamat),98 +takopii no genzai,98 +takayama mizuki,98 +takamine midori,98 +takamachi nanoha (aggressor mode),98 +taiki ken,98 +taiga (ookami mio),98 +tagawa gengo,98 +swat,98 +sunao (wokada),98 +subaki (fire emblem),98 +splatter print,98 +sorairo len,98 +soneda akane,98 +smoochum,98 +sirurabbit,98 +sinon (solus),98 +sightseer (pokemon),98 +shokupan (slvk12),98 +shirai tanuki,98 +shiohachi,98 +shinozuka arashi,98 +shinken!!,98 +shimoguchi tomohiro,98 +shima renzou,98 +shikimori (kawaii dake ja nai),98 +shaving crotch,98 +sergestid shrimp in tungkang,98 +sena airi,98 +sealand (hetalia),98 +scarf grab,98 +sawa2,98 +sankaku,98 +sailor venus (cosplay),98 +sacanahen,98 +reizou,98 +redgart,98 +rebecca (fire emblem),98 +ratryu,98 +rat huang,98 +rarami,98 +prunce (precure),98 +prism recollection!,98 +polka dot kimono,98 +pokemon ability,98 +plunger,98 +pectoral pillow,98 +papa-kun (destiny549-2),98 +oz vessalius,98 +over zenith,98 +osandon,98 +oricorio (pom-pom),98 +oomura yuusuke,98 +onabe no shime,98 +on piano,98 +omiza somi,98 +ogata rina,98 +nuudoru,98 +nrvnqsr chaos,98 +notori d,98 +nopeys,98 +nightmare blood (idolmaster),98 +nemui (nemui),98 +nemesis (resident evil),98 +neichiru,98 +neckerchief between breasts,98 +nazo kichi,98 +natsume aya,98 +natsu no ame,98 +nanpuu,98 +namidate,98 +nagase yuuki,98 +nadie,98 +myoushun,98 +mym (dragalia lost),98 +mura (kiyohime),98 +muku-chan (muroku),98 +mordred (swimsuit rider) (second ascension) (fate),98 +monica (little witch nobeta),98 +momoi nanabei,98 +momioka risa,98 +miyuzu,98 +miryoryo9,98 +miracle paint (vocaloid),98 +mexican dress,98 +meto (metrin),98 +mentaiko mamire,98 +medibang paint (medium),98 +mazamuno,98 +matsuoka michihiro,98 +matsuoka kiyone,98 +mario (mario portal),98 +marblehead (azur lane),98 +mal poi,98 +maimuro,98 +maeda hiroyuki,98 +m87 black hole,98 +lulu heika,98 +lost one zero,98 +lolliedrop,98 +lich (monster girl encyclopedia),98 +libe (hentai elf to majime orc),98 +leiq,98 +lavender shirt,98 +kurata mashiro,98 +kuku px,98 +kuinji 51go,98 +koyoi (iroha (iroha matsurika)),98 +koumajou densetsu 1,98 +komekueyo,98 +koma neko,98 +kobayashi kabura,98 +koakuma (cosplay),98 +kizuki miki,98 +kissing ear,98 +kishimoto masashi,98 +kirara bernstein,98 +kin mokusei,98 +kiki (koikuchikinako),98 +kenzo 093,98 +kazagumo kai ni (kancolle),98 +kawajiri hayato,98 +karasumi (aiseec),98 +karanak,98 +kabuto daigo,98 +jotti,98 +jessica rabbit,98 +jago dibuja,98 +izumi kei,98 +iwashi (iwashi008),98 +itou makoto,98 +itou ikuko,98 +isurugi noe,98 +isawo (lucanus19),98 +intrepid (azur lane),98 +infinite stratos academy uniform,98 +inconvenient tail,98 +inari1369,98 +idolmaster one for all,98 +ibushi (oath),98 +huion,98 +hotate rayan,98 +hoshi usagi,98 +honjitsu no shuyaku,98 +holding nunchaku,98 +hjm,98 +hissaa (starlight express),98 +hirokazu,98 +hinata (fire emblem),98 +hika (cross-angel),98 +hifumi hajime,98 +hakamii,98 +hahifuhe,98 +h2o (ddks2923),98 +green blood,98 +gliscor,98 +ginrei (giant robo),98 +german shepherd,98 +genmon,98 +genka ichien,98 +gcg,98 +gawain (artist),98 +gauss 3<,88 +22 (tak ja),88 +zaveid (tales),87 +zap renfro,87 +za warudo,87 +z.nov,87 +yuuya (yu-ya's),87 +yuuraku yuraku,87 +yuunagi (seventh heaven),87 +yuuki kazuhito,87 +yuu (tiny evil),87 +yutian alice,87 +yurashima,87 +yukime (jigoku sensei nube),87 +yuki (cencoroll),87 +yuki7128,87 +yukaa,87 +yu hydra,87 +yorurokujuu,87 +yonic symbol,87 +yazawa nico (cosplay),87 +yatsuha (hachiyoh),87 +yatanukikey,87 +yarawi,87 +yamoto koki,87 +xkit,87 +xiaoguimist,87 +wheelie,87 +wet thighhighs,87 +wendy testaburger,87 +wendell,87 +weepinbell,87 +watashi nouryoku wa heikinchi de tte itta yo ne!,87 +watase sanetoshi,87 +wally99,87 +wakaba (kemurikusa),87 +waist hold,87 +vomiting rainbows,87 +viki (suikoden),87 +uzura kazuhisa,87 +uzaki yanagi,87 +usumy,87 +ushimochi,87 +urrrt,87 +ujiie moku,87 +udder,87 +twinkle crusaders,87 +tweaking own nipple,87 +turo (pokemon),87 +tsurupy,87 +tsumeki,87 +triuni,87 +traveler (journey),87 +totto (naka),87 +toshi hiroshi,87 +tonbo,87 +tomoe (fdhs5855),87 +tkbn r,87 +titan (generic) (shingeki no kyojin),87 +tiara (fairy fencer f),87 +temari114,87 +tarma roving,87 +taric,87 +tareko,87 +taono kinoko,87 +tan skirt,87 +tamuhi,87 +takeda yuuko,87 +takase yuu,87 +takahashi rie,87 +tachibana rui,87 +swoobat,87 +suzumiya haruhi no tsuisou,87 +super metroid,87 +sunao nako,87 +sugisaka umi,87 +suffolk (azur lane),87 +succubus (mabinogi),87 +stomach ache,87 +stoat girl,87 +stewsui,87 +stellar theater,87 +sprout (33510539),87 +spectral souls,87 +sou (mgn),87 +sorbet,87 +snowstorm,87 +slurping,87 +skull brooch,87 +skillet,87 +skeleton (helltaker),87 +sitting on drum,87 +silent sinner in blue,87 +shooting gallery,87 +shitogi eris,87 +shishio,87 +shiori (xxxsi),87 +shikaya,87 +shiina miyako,87 +shii (kairi-t-k0317),87 +shi-chen,87 +shanghmely,87 +seong mi-na,87 +sekhmet of death,87 +sein (nanoha),87 +seele0907,87 +sawed-off shotgun,87 +sawarineko,87 +satyuas,87 +satou akane,87 +satoshi (peso727),87 +satonaka narumi,87 +satetsu,87 +sana423,87 +saki (saki paint),87 +saikachi (ogre tree),87 +sai (bankoru),87 +sagami jun,87 +sa ioio,87 +ryusei (ryusei b),87 +rosenburg engel (idolmaster),87 +rose (elsword),87 +rooseputo 02,87 +roon (dark red grin) (azur lane),87 +robinexile,87 +rk (rktorinegi),87 +rinmei,87 +rikorin,87 +removing sock,87 +recare,87 +rahxephon,87 +rafchu,87 +pong (vndn124),87 +po0000000000,87 +ph.,87 +petralka anne eldant iii,87 +penis biting,87 +penguin hat,87 +patrick star,87 +passenger (arknights),87 +park ongjol,87 +panzerkampfwagen ii,87 +pantheon eve,87 +palossand,87 +palanquin d.va,87 +ozadomi,87 +otosume ruiko,87 +ori (momonimo),87 +orange sash,87 +ootato,87 +onionworkshop,87 +okosama lunch (sendan),87 +oichi (sengoku basara),87 +ogami shirou,87 +odaodaoaoda,87 +norinco,87 +nitta io,87 +nishi minami,87 +nishi juuji,87 +nin (lion),87 +nezha (the legend of luoxiaohei),87 +new pokemon snap,87 +nekololisama,87 +negiwo,87 +neg (101neg),87 +nazo no anime-dan,87 +nayozane (worker7),87 +nayoshi (r-744),87 +natsuiro xx,87 +nano-nano pudding,87 +nananana nanana,87 +nagoooon 114,87 +nagahama megumi,87 +mystic eyes of death perception,87 +mysterious heroine x (third ascension) (fate),87 +murata isshin,87 +muji-muji daruma (genshin impact),87 +muchabou,87 +mt. silver,87 +motto (night wear),87 +mori kotarou,87 +moonslanstudio,87 +monokini,87 +momiji (ninja gaiden),87 +mogeko castle,87 +mofumofuen (idolmaster),87 +moeru! jiten,87 +miyashiro karin,87 +misakino kukuru,87 +mirai suzu,87 +mirai (happy-floral),87 +minos (jashin-chan dropkick),87 +minase kaya,87 +minari (minari37),87 +millia jenius,87 +mikanniro,87 +midori aoi,87 +michiru donut,87 +miche,87 +mib mamizou,87 +meruru (oreimo) (cosplay),87 +menori,87 +melty blood: type lumina,87 +melty (suisei no gargantia),87 +melisaongmiqin,87 +mechanical boots,87 +mayoln,87 +matsuyama kumiko,87 +matsuki ringo,87 +matilda (matildax),87 +mass effect 3,87 +marnie (pokemon) (cosplay),87 +maria tachibana,87 +margery daw,87 +maqin,87 +manggapaegtoli,87 +mamoswine,87 +maccyman,87 +lunia,87 +lrh0123,87 +lotus position,87 +lingerie (aki3240),87 +lace-trimmed kimono,87 +la corda d'oro,87 +kuuki shoujo,87 +kusuriuri-san,87 +kushana,87 +kumajirou (hetalia),87 +kowata makoto,87 +koto suomi,87 +kongou sensei,87 +kohaku hearts,87 +kodama shichirou,87 +kito (kito2),87 +kiana mai,87 +kiana kaslana (valkyrie ranger),87 +kazami yuuka (cosplay),87 +kayou (kayou-bi),87 +katsura kokoro,87 +katou emiri,87 +kashmir,87 +kashiwagi yamine,87 +kari kenji,87 +kanimaru,87 +kaneko (bblogtinhan),87 +kamioka saki,87 +kafuu chino (cosplay),87 +jiraiya (housamo),87 +jiraiken,87 +jingle bell earrings,87 +jake muller,87 +jack (darkshero),87 +ivenglynn,87 +itoji,87 +ironashi,87 +irie kyousuke,87 +in cell,87 +ikeda emi,87 +hourai ninjin,87 +hououmaru rei,87 +hound 2 (sekaiju),87 +hk416 (midnight evangelion) (girls' frontline),87 +hitotsuki no yagi,87 +hiroki (hirokiart),87 +himiko (third ascension) (fate),87 +himajin (starmine),87 +hiiragi nana,87 +herme (ohisashiburi),87 +heracles (housamo),87 +helena kai (kancolle),87 +heart arrow,87 +hayama hayato,87 +hashimoto takayoshi,87 +haruwaka 064,87 +happy sugar life,87 +hangleing,87 +half-soles,87 +hair wagging,87 +gradient sleeves,87 +gozaemon,87 +goripan,87 +gomibuta kuzutarou,87 +gold ship (racehorse),87 +gold horns,87 +goal,87 +girafarig,87 +ginga hyouryuu vifam,87 +gaogaigar,87 +fuyube gin (huyube),87 +fuwafuwa-chan (kamiyoshi rika),87 +fujimaru (bluebrand),87 +fractalmagnolia,87 +foch (azur lane),87 +fake nose,87 +excel saga,87 +eureka seven ao,87 +etzali,87 +ennui heiwa miku,87 +emoji censor,87 +eltnum,87 +ellie (the last of us),87 +elf (dq10),87 +eir (fire emblem),87 +eihire,87 +eggshell hat,87 +dywx poison,87 +dynamax ball,87 +du mogu,87 +drums (artist),87 +double \n/,87 +doromizu,87 +dodok (gj77230),87 +dinosaur king,87 +detached ahoge,87 +demmy,87 +curly eyebrows,87 +cuna (qunya),87 +cum on tail,87 +cradling head,87 +constellation costume,87 +cole,87 +chinjuu hibakichi,87 +cheetah girl,87 +cardigan under jacket,87 +car crash,87 +cancer,87 +camp of hunger,87 +bunny ayumi,87 +bunches,87 +bummerdude,87 +brynhildr (swimsuit berserker) (fate),87 +brown hood,87 +broiler,87 +bowl stack,87 +blue dragon,87 +bloodycat,87 +blackhole-chan,87 +beni (bluebluesky),87 +belfraw martini,87 +bashosho,87 +banica conchita,87 +ballet dress,87 +ayame (tenchuu),87 +awatake takahiro,87 +avril vent fleur,87 +asura (asr4000),87 +assassin,87 +ashita no joe,87 +arthur (code geass),87 +artery gear: fusion,87 +arm shield,87 +arima nana,87 +arima kousei,87 +ariaridoradora,87 +aoi yuuji,87 +aoi minamo,87 +antiqq,87 +anteater,87 +amakusa shino,87 +althea (sakiya0000),87 +akthree0,87 +akanishi erika,87 +ajimu najimi,87 +ai-kun,87 +agatha (pokemon),87 +acid trip,87 +achromaru,87 +aaoshigatoutoi,87 +2d dating,87 +2c=galore,87 +zooanime,86 +zonotaida,86 +zombie-ya reiko,86 +zhanzheng zi,86 +zeke yeager,86 +zashiki-warashi,86 +zairen,86 +yuuhi (ages),86 +yu mochi (kamiinu),86 +youbou,86 +you rei (blowback),86 +you naka,86 +yazwo,86 +yatyou,86 +yanmega,86 +yami shoujo,86 +yameta takashi,86 +yako,86 +xxxceller,86 +x-overd,86 +wotagei,86 +whispy woods,86 +warlock (sekaiju),86 +wakabayashi-kun ga nekasete kurenai,86 +vex (league of legends),86 +vault boy,86 +utu,86 +usuke (u skeeep),86 +unikurage,86 +umenomori chise,86 +uchuu ika,86 +uchio kazumasa,86 +two-tone necktie,86 +twinbee,86 +tsukise miwa,86 +trinity blood,86 +tourmaline,86 +tomodachi (tomofanart),86 +tokimeki memorial girl's side,86 +toki (tokihakashi),86 +tog (shoten),86 +tm (gf-tm),86 +therese (granblue fantasy),86 +the rolling girls,86 +teru zeta,86 +teramoto tomiko,86 +teikoku shounen,86 +teeth print,86 +tears to tiara,86 +tatsuta age,86 +tatl,86 +taro,86 +tanning oil,86 +talos (housamo),86 +takasegawa yui,86 +syou (endemic species),86 +suzui narumi,86 +sunfish,86 +street fighter x tekken,86 +strawberry heart,86 +standing restraints,86 +standing on another's head,86 +space ishtar (third ascension) (fate),86 +sousei no aquarion,86 +somei ooo,86 +slurpuff,86 +slave-chan (mino),86 +sl86,86 +skeleton (minecraft),86 +sitting on fence,86 +shuna (tensei shitara slime datta ken),86 +shouin,86 +shiromi (ringo),86 +shinohara natsuki,86 +shiina aya,86 +severed leg,86 +senshi (dungeon meshi),86 +saxasa kisuk,86 +satan (puyopuyo),86 +sacha,86 +saano chia,86 +sa (h28085),86 +ryuu (breath of fire ii),86 +ryu seung,86 +ryouya,86 +rutorifuki,86 +rusellunt,86 +rpameri,86 +romantic saga of beauty & devil,86 +robert garcia,86 +roadi3,86 +ro-a,86 +rinrin (927413),86 +rin (rin niji),86 +revealing swimsuit (dq),86 +resident evil 0,86 +rescue,86 +release the spyce,86 +reid hershel,86 +reco love,86 +rebecca (one piece),86 +rayet areash,86 +raindrop746079,86 +quess paraya,86 +quarian,86 +qian renxue (douluo dalu),86 +prophosphere,86 +prinz eugen (warship girls r),86 +poseich,86 +ponta (kounoike tsuyoshi),86 +pokiyu,86 +pinkcap,86 +petya (darker than black),86 +pecorine (new year) (princess connect!),86 +pearl (steven universe),86 +osana najimi (komi-san wa komyushou desu),86 +oriue wato,86 +nitocris (third ascension) (fate),86 +nishiga hachi,86 +negative,86 +narrative formation,86 +nanome to,86 +nanboku,86 +nagant m1895,86 +muvluv alternative strike frontier,86 +muscle envy,86 +muraicchi (momocchi),86 +ms. marvel,86 +mr.yun,86 +moeki (moeki0329),86 +moai (aoh),86 +miyazaki miyako,86 +mitsuru (madeinore),86 +miquella (elden ring),86 +mining helmet,86 +minami touma,86 +mimi wo sumaseba,86 +mikamin,86 +mika mikan,86 +midler,86 +mg kurino,86 +mental cube (azur lane),86 +menomorute,86 +menmen (menmen13s),86 +melli (pokemon),86 +meister otome uniform,86 +medicham,86 +mecco,86 +mc liz,86 +mating (animal),86 +massage chair,86 +maru (314),86 +maririn,86 +maria theresa (granblue fantasy),86 +maou (maoudaisukiya),86 +malga naruze,86 +makki (tobaccos),86 +maes hughes,86 +maclone,86 +machi (wm),86 +lunaluna (queen's blade),86 +lucky chloe,86 +lilith (p&d),86 +lilirulu,86 +light and night love,86 +lifeline (a384079959),86 +lethe-shion,86 +leaphere,86 +leah (airisubaka),86 +ldl (bcw1025),86 +kyoukyan,86 +kurumiya hato,86 +kuroshio maki,86 +kumacy,86 +koopo,86 +komori-san wa kotowarenai!,86 +kitano (zekkyon),86 +kishima kouma,86 +kisekisaki,86 +kirameki haruaki,86 +kinpatsu-chan (rucchiifu),86 +kinniku tarou,86 +kingu (fate),86 +key hair ornament,86 +kerno,86 +kensight328,86 +keki chogyegi,86 +kazesawa sora,86 +kazami haruki,86 +kayjae,86 +kawamoto hinata,86 +kate iwana,86 +kasutaso,86 +karasu-san (syh3iua83),86 +kaoru348,86 +kanmoku-san,86 +kanesada keishi,86 +kanbara akihito,86 +kajikawa yahiro,86 +kaigun bakuryou,86 +kagura (inuyasha),86 +kafka (arknights),86 +kaede to suzu,86 +jname,86 +jingburger,86 +jigoku meguri,86 +jellal fernandes,86 +isshoku (shiki),86 +ishigami kazui,86 +iren lovel,86 +iphone 11 pro,86 +invincible dragon (last origin),86 +iczer (series),86 +icym,86 +ichifuji nitaka (phase nine),86 +hyuse,86 +hood (lady of the shining sun) (azur lane),86 +honnryou hanaru,86 +holding rattle,86 +hige shinshi,86 +hide448,86 +hero (dka),86 +hemorina,86 +helen (idolmaster),86 +hcnone,86 +hazakura chikori,86 +hawkeye (seiken densetsu 3),86 +hatsuharu kai ni (kancolle),86 +hathaway noa,86 +hat bell,86 +haruse hiroki,86 +haruka armitage,86 +hanenosaki,86 +handler (monster hunter world),86 +hand on sheath,86 +hanayori jyoshiryou,86 +haine (howling),86 +haiiro gundan,86 +hachimi,86 +growlanser iv,86 +gotointhepark,86 +god eater 3,86 +gloria (summer 2021) (pokemon),86 +giri giri sisters,86 +gio (maroon0924),86 +gibbous moon,86 +ghost of tsushima,86 +geroika,86 +gemini seed,86 +garbodor,86 +garasuita,86 +gaimoon,86 +futatsuiwa mamizou (tanuki),86 +furfrou (natural),86 +fujita hidetoshi,86 +friedbun,86 +fio (nier),86 +female seth (street fighter),86 +fang qiao,86 +exposed brain,86 +existence,86 +electro cicin mage (genshin impact),86 +eclair (food),86 +earth defense force,86 +drum master (granblue fantasy),86 +doldol (rkwowlqrp),86 +dmith,86 +divergenceok,86 +dfd,86 +desmond miles,86 +deekei,86 +dead animal,86 +daenarys,86 +d-ten,86 +crystal maiden,86 +crusader (ragnarok online),86 +crossed belts,86 +crib,86 +corphish,86 +coco bandicoot,86 +coat rack,86 +chi lian (qin shi ming yue),86 +cheng xiaoshi,86 +charimei,86 +chano hinano,86 +champagne (azur lane),86 +chagamaka,86 +chabudai,86 +centurii-chan (artist),86 +cecilia (pangya),86 +cavalry,86 +captain mizuki,86 +c-da,86 +butajima john,86 +bunny head,86 +bu li,86 +brown long-eared bat (kemono friends),86 +bobobong,86 +blue leggings,86 +blowing whistle,86 +bloody handprints,86 +blonde girl (itou),86 +blanc neige,86 +blade runner,86 +bittersweet (dalcoms),86 +bisuke-tan,86 +big-d,86 +beshi,86 +beauty (pokemon),86 +bangosu,86 +ballroom e youkoso,86 +baddap sleed,86 +b:ga,86 +azzz (7334080),86 +azuma takeshi (andon kandelaar),86 +azuhira,86 +azama (fire emblem),86 +awestruck,86 +ass freckles,86 +asatsuki (cookie),86 +arm slave (mecha),86 +argyle shirt,86 +arashiyama hotori,86 +aoha (twintail),86 +ankh (ooo),86 +anbj,86 +anarchojs,86 +amon (lord of the mysteries),86 +amitie florian,86 +amazons quartet,86 +alexandrite (houseki no kuni),86 +akiba's trip,86 +akenoin soumon,86 +akara kai,86 +akai suzaku,86 +aizawa chihiro,86 +ailu elf,86 +aidumi,86 +aegir (swimsuit) (housamo),86 +adachi yousuke,86 +acrylicstroke,86 +aaron huton (cyphers),86 +234 (1234!),86 +zizi niisan,85 +zhongwu chahui,85 +zebra girl,85 +yuuki mizuho,85 +yumenouchi chiharu,85 +yukiman,85 +yukata lift,85 +yuhito (ablbex),85 +yotaro,85 +yoshi (crossmind),85 +yoko.u,85 +yennefer of vengerberg,85 +yatagarasu,85 +yasu,85 +yamayoshi,85 +yamanome,85 +wori,85 +wing tattoo,85 +whyhelbram,85 +west 24,85 +vsk-94,85 +vill-v,85 +vietnam,85 +vialnite,85 +valitran,85 +utamaru (konomix),85 +ushiro muki,85 +usapyon,85 +uraki,85 +union jack bikini,85 +under the moon,85 +ugai yuichi,85 +u (mikaduki0720),85 +tsurugi hina,85 +tsukiori sasa,85 +tsukino murakumo,85 +tsuchiya kouta,85 +tseng,85 +triple horizontal stripe,85 +touhaku,85 +torisudesu,85 +too many birds,85 +tona bnkz,85 +thighhigh dangle,85 +thetis (last origin),85 +tel,85 +team galactic grunt,85 +tata (tataice),85 +tamura masafumi,85 +tama two (fukuya),85 +taliyah,85 +takuteks,85 +takabow,85 +tail brushing,85 +super saiyan god,85 +suou tsukasa,85 +sudako (dai011305),85 +ssamjang (misosan),85 +sptbird,85 +spice girl (stand),85 +soragane (banisinngurei),85 +soraao0322,85 +solar system,85 +soburi,85 +so nagi,85 +simon shades,85 +siebold (pokemon),85 +shovel knight,85 +shituzi,85 +shitou (1992116210),85 +shiny rod,85 +shiei no sona-nyl,85 +severed finger,85 +sera karen,85 +senoo kaori,85 +senki zesshou symphogear xv,85 +sei asagiri,85 +seedot,85 +schala zeal,85 +satou chagashi,85 +satofuji masato,85 +sashimono,85 +san-x,85 +sakura ab,85 +saji crossroad,85 +ryoumoto ken,85 +ru2n131,85 +roru (lol dessin),85 +rokki hero,85 +rim (kamitsubaki studio),85 +rennerei,85 +renji (orange ize),85 +reload9 yohji,85 +refia,85 +rappy,85 +rance quest,85 +rakuji tarahi,85 +rail (silverbow),85 +qbspdl,85 +pro-p,85 +pretzel bikini,85 +poses,85 +portmanteau,85 +popcorn 91,85 +pink cat,85 +piloswine,85 +panda (pandadesu),85 +pale eye,85 +ouya (maboroshimori),85 +outstretched foot,85 +oueo,85 +orobou,85 +orange (meitoku),85 +okita souji alter (swimsuit saber) (first ascension) (fate),85 +null suke,85 +noraico,85 +no name ninja,85 +nazunakku,85 +natsuzuka (ryou),85 +narusawa yui,85 +narusan beam2,85 +nanami to konomi no oshiete abc,85 +nanachise7,85 +nakarai takumi,85 +mytyl,85 +munakata (hisahige),85 +mukiguri,85 +muhogame,85 +motoi ayumu,85 +motchie,85 +mokuren (kunoichi tsubaki no mune no uchi),85 +moegi emo,85 +mizuno poppo,85 +miyai haruki,85 +mitama mayo,85 +misa (kaeruhitode),85 +mirakurun,85 +mine (wizard),85 +miku miku ni shite ageru (vocaloid),85 +mike luckas,85 +metal gear (robot),85 +mechanica (arms),85 +mecha danshi,85 +masuo,85 +masu shu,85 +mapi,85 +mad father,85 +machimote taikou,85 +ma tsukasa,85 +luren max,85 +lunar 2: eternal blue,85 +logiclr,85 +lims (neko2lims),85 +lilith-soft,85 +lilac (p-f easy),85 +licking stomach,85 +laslow (fire emblem),85 +kuzuryu io,85 +kuya (nu carnival),85 +kuroha uma,85 +kunimura hakushi,85 +kozomezuki,85 +konsu konsuke,85 +kokonobi,85 +koji (kohei66),85 +koio,85 +kobi420,85 +klin (girls' frontline),85 +kkry99,85 +kito (coyo),85 +kiske,85 +kishimen udn,85 +kisara (tales),85 +kirby's dream land 3,85 +king bradley,85 +kichin yarou,85 +kezi,85 +ken (1057168252),85 +kawamura toshie,85 +kawaku,85 +kasasasagi,85 +karasuma ryuu,85 +kaori (azumanga daioh),85 +kamen rider v3 (series),85 +kamen rider amazon,85 +kamdia,85 +kakizaki misa,85 +kaitan,85 +kagami (galgamesion),85 +jun wei,85 +jude mathis (school uniform),85 +jon shicchiou,85 +joker (2019),85 +john crayton,85 +jeanne d'arc alter santa lily (fate) (cosplay),85 +izumi soujirou,85 +iury padilha,85 +iro (boo iro),85 +imuraya ayuka,85 +illustrious (never-ending tea party) (azur lane),85 +ikelag,85 +iiimirai,85 +ihara asta,85 +iccoco,85 +i (kaiyou),85 +i-201 (kancolle),85 +honedge,85 +holysnow,85 +hiyama yuu (wee259hiyama),85 +helping,85 +heaven,85 +heart stickers,85 +hayanye,85 +hawawa,85 +harusame (user wawj5773),85 +haru aki,85 +hano haruka,85 +hannya (arknights),85 +hand on belt,85 +haishiki,85 +gyaza,85 +grimoire weiss,85 +grey collar,85 +gran saga,85 +gossa-tei,85 +gomosawa,85 +gogatsu no renkyuu,85 +glensaru,85 +gasterblaster,85 +ganik,85 +fuwa (precure),85 +fuguno,85 +frog hat,85 +flandre day,85 +flan-maman (goma),85 +finishing move,85 +feel (nasitaki),85 +executioner (girls' frontline),85 +excavator,85 +ese shinshi,85 +enjutsu,85 +enatsu,85 +eiyuu (eiyuu04),85 +eichi (skskdi12z),85 +edwin huang,85 +ear fondling,85 +dragon age,85 +double chin,85 +donut (zoza),85 +dog hat,85 +delphinium (darling in the franxx),85 +darkmuleth,85 +danna,85 +dagashi (place),85 +cutting own hair,85 +cupa (at2.),85 +cum in eye,85 +culotte (hosenrock),85 +crazy grin,85 +coomer (meme),85 +columbia (azur lane),85 +coffee-break,85 +clint barton,85 +clear (dramatical murder),85 +cleaning windows,85 +clarion,85 +cinccino,85 +chisa,85 +chilla (arms),85 +cherry (saber j),85 +chemical-x,85 +checkered jacket,85 +chanmura,85 +carol danvers,85 +carnivine,85 +capcom vs snk,85 +campbell gichou,85 +bugles on fingers,85 +bugle,85 +brough superior,85 +boku no risou no isekai seikatsu,85 +bnari,85 +bill cipher,85 +belle (disney),85 +bathroom scale,85 +bassa,85 +barrett,85 +baby carrier,85 +babo,85 +azazel (azazel-san),85 +ayumu (zenkou),85 +axia krone,85 +avengers: endgame,85 +asselin bb ii,85 +asiri senpai,85 +asagiri mai,85 +aro 1801,85 +arishima alice,85 +apple brk,85 +aoki yuriko,85 +aoki ryuusei spt layzner,85 +aoinagi,85 +aoi yuki,85 +aoi toori,85 +anya (spy x family) (cosplay),85 +anmita (rarutos),85 +amazake (drink),85 +amawa kazuhiro,85 +amatsume akira,85 +alban knox,85 +akutaa,85 +akr (qpqpqp),85 +akm,85 +akiyama nenehisa,85 +airy (bravely default),85 +adjusting shorts,85 +adachi shingo,85 +aayh,85 +6maker,85 +500 dollar four foot tall mareep (meme),85 +zky (oekaky),84 +yuzuttan,84 +yuuki (yukigeshou hyouka),84 +yumeno tanuki,84 +yooki (winter cakes),84 +yolanda,84 +yian kut-ku,84 +yellow armor,84 +yasukuni kazumasa,84 +yasu (segawahiroyasu),84 +xion32,84 +wr (karakusa senbei),84 +withered,84 +witch (left 4 dead),84 +white overalls,84 +white-corner,84 +wassnonnam,84 +walkalone,84 +w (1999 kyu),84 +vomi agogo,84 +uthy,84 +unconventional guitar,84 +ume (kancolle),84 +uedrk yamato,84 +tsurugi (swimsuit) (blue archive),84 +tsumiki akeno,84 +tsuka (handle),84 +tsubasa19900920,84 +trypophobia,84 +toe socks,84 +tk (butakuma),84 +timcanpy,84 +tied dress,84 +thwackey,84 +the incredibles,84 +the amazing world of gumball,84 +testicles on face,84 +tatenashi high school uniform,84 +tamano nae,84 +talulah the fighter (arknights),84 +takeko spla,84 +takeda sun,84 +takatan,84 +suzunami (kancolle),84 +suzuki sonoko,84 +supermarine spitfire,84 +super sailor uranus,84 +sunako (veera),84 +sumire (blue archive),84 +suika (atelier-yuu),84 +strawberry prince,84 +stick jitb,84 +starry hair,84 +spanish flag,84 +space channel 5,84 +sousai shoujo teien,84 +sorashima (117),84 +sonic adventure 2,84 +soldering iron,84 +skyhouse,84 +skunk ears,84 +sketti,84 +sier (girls' frontline),84 +sienna (henken),84 +shukurin,84 +shirt hold,84 +shirousa,84 +shirinda fureiru,84 +shinkansen,84 +shingo (picturepuzzle),84 +shikakui kyomu,84 +sheldon (splatoon),84 +shapes,84 +sebunsu,84 +scroll tube,84 +sayori (oskn),84 +satou memeko,84 +sasuga kei,84 +saruton,84 +sangai senki,84 +sakasa (guranyto),84 +sahara1127,84 +rosetta passel,84 +ristarte (kono yuusha ga ore tueee kuse ni shinchou sugiru),84 +ripple (mahoiku),84 +ringpearl,84 +rikkii (likito yuzu is),84 +remi (mozzaremi),84 +refrigerator interior,84 +red (pokemon) (cosplay),84 +recettear,84 +rear naked choke,84 +r (corolla),84 +quincy (nu carnival),84 +puni (atelier),84 +project luminasys,84 +prinplup,84 +potemkin (guilty gear),84 +pornhub,84 +plymouth (azur lane),84 +pig costume,84 +peperon (peperou),84 +penguin frontier,84 +peas,84 +paradise (swd3e2),84 +paperwork,84 +panorama,84 +oyu no kaori,84 +orifushi akina,84 +oono akira,84 +one man's dream ii,84 +on mecha,84 +nyucha,84 +nyarlathotep,84 +noelle holiday,84 +nitro+ chiral,84 +nishijima kai,84 +nikku (nzaf5832),84 +nightwing,84 +night demon,84 +nice boat (meme),84 +ngo,84 +neil dylandy,84 +natsushima memo,84 +narutaki fuuka,84 +narmaya (holiday) (granblue fantasy),84 +nanao (naoekaki25),84 +namiki meiko,84 +nakiri asahi,84 +nagato (kancolle) (cosplay),84 +naftosaur,84 +musashi (kancolle) (cosplay),84 +murmansk (azur lane),84 +murakami teruaki,84 +motoki (hisano motoki),84 +mostima (spellbreaker) (arknights),84 +moshimo ashita ga hare naraba,84 +mizusoba,84 +mizuno midori,84 +mizuki (kogetsu-tei),84 +misato hao,84 +military police brigade (emblem),84 +mikomu,84 +mike (mikeneko),84 +migu (iws2525),84 +midou tsukasa,84 +mian (3zandora),84 +mi mi ham,84 +messer ihlefeld,84 +may (2747513627),84 +matsurisu,84 +matsu (bandan),84 +maruyama,84 +maru (sasayama chikage),84 +mammon (reborn),84 +maku (l-u),84 +makoushi,84 +makinohara shouko,84 +maki (letusgomaki),84 +maihama minami high school uniform,84 +magni dezmond,84 +magmar,84 +ma-yu,84 +lycoris fubuki,84 +lucena winter,84 +lloyd asplund,84 +liyou-ryon,84 +listening,84 +lipstick mark on penis,84 +lioreo,84 +li shuwen (old) (fate),84 +lessar,84 +legoshi,84 +laughing man (ghost in the shell),84 +lalazyt,84 +lace hairband,84 +l.bou,84 +kuro-kun (nablack),84 +kumasawa (dkdkr),84 +kukkumann,84 +kt (kusare171),84 +koyomisa,84 +koutetu yarou,84 +koti,84 +koneko mari,84 +koko (koko3),84 +koisuru otome to shugo no tate,84 +kofa (ikyurima),84 +knight (fft),84 +kiritani haruka,84 +kinou no shika,84 +kindandowa,84 +kinatsu souju,84 +kimitoshiin,84 +kimblee,84 +khibiki,84 +kemono friends festival,84 +kel-tec,84 +kawa akira,84 +kannagi noel,84 +kamo 3,84 +kama (chocolate heaven) (fate),84 +kakutasu,84 +kakult2017,84 +kako (kemono friends),84 +kajiwara sora,84 +kaichou wa maid-sama!,84 +kaguya (kagaminomachi no kaguya),84 +kaejunni,84 +kaatoso,84 +k@bu,84 +jitsu hidari,84 +jin grey paladin,84 +jenmin12,84 +jeane (gensou suikoden),84 +jd (bibirijd),84 +jane maxwell,84 +jaina proudmoore,84 +ishiki (okota),84 +isako rokurou,84 +ink on face,84 +imizu (nitro unknown) (character),84 +imaizumi teruhiko,84 +ignis,84 +iganseijin,84 +ibuki (ibuki0118),84 +ibaraki shun,84 +hyugo,84 +hyou (hyouga617),84 +howe (azur lane),84 +hourai kochou,84 +hot drink,84 +hms orion (siirakannu),84 +hirokazu (analysis-depth),84 +hirasaka ine,84 +hinase (jet hearts),84 +higashi no eden,84 +hestia (neko itachi),84 +hekikuu (kanaderuyume),84 +head fuse,84 +hatomugi (mugicha),84 +hasegawa fumi,84 +hari611,84 +hansel (sinoalice),84 +haiteku,84 +hachiya shohei,84 +gyudong123,84 +guriin,84 +guntank,84 +gowther,84 +gotcha! girl (pokemon),84 +good ass day,84 +gnns,84 +glutton,84 +george the bomb,84 +genjuu rou,84 +gavial the invincible (arknights),84 +gasuto (kamikami),84 +garukichi,84 +ganon,84 +gakkou no kaidan (anime),84 +fuurin rei,84 +fujiwara no mokou (cosplay),84 +fujimaru ritsuka (female) (anniversary blonde),84 +fujimachine (clayten),84 +fruit on liquid,84 +frederick (fire emblem),84 +fox-pop vli,84 +food-themed hat ornament,84 +follen (639594),84 +female mage (dungeon and fighter),84 +fcc,84 +father (diva),84 +excalibur galatine (fate),84 +eureka 814,84 +esu (transc),84 +eromettsu,84 +emu (emum),84 +eigaka,84 +egawa kusumi,84 +ecaflip,84 +e sdss,84 +e 2,84 +dydydyok,84 +dwarf (dragon's crown),84 +dress straps,84 +donald trump,84 +domoge,84 +doburoku (daiginjou),84 +dj copy and paste,84 +depo (typebaby505),84 +densuke,84 +debi tarou,84 +danpu,84 +daina (encore62),84 +crab man,84 +cr-r,84 +color coordination,84 +cologne (heartcatch precure!),84 +cloud9,84 +chiruru96,84 +child assassin (fate/zero),84 +chiba saori,84 +chemist (fft),84 +carcass (artist),84 +calpis118,84 +burnet (pokemon),84 +buchi0122,84 +booch,84 +bomssp,84 +blue mage,84 +bitter melon,84 +big o (mecha),84 +betoko,84 +beatrice (wild arms),84 +bbk (13zk),84 +basculegion,84 +baka-man,84 +bagon,84 +badger ears,84 +ayukawa tenri,84 +avatar (mabinogi heroes),84 +au11,84 +atsajh,84 +atoli (.hack//),84 +atair,84 +ashisi,84 +asagi marin,84 +ariko,84 +arata toshihira,84 +aozora kyuuji,84 +aonoji,84 +aoi (ittla),84 +anno masato,84 +annaka haruna,84 +andou inari official,84 +ander (at2.),84 +amemiya sayaka,84 +amakano,84 +alina l,84 +akiyoshi haru,84 +aki no urara no akane-iro shoutengai,84 +akahoshi kenji,84 +aizen sousuke,84 +aizawa kotarou,84 +ahat (ragnarok online),84 +adol christin,84 +adjusting headphones,84 +achikita chinami,84 +abyssal twin princess (white),84 +a-shacho,84 +zebra tail,83 +zapfish,83 +yuzuha (yuzuya),83 +yuxian youka,83 +yua (your diary),83 +yoshino keiko,83 +yorugami rei,83 +yoizuki (azur lane),83 +yi er fan club (vocaloid),83 +yellow teeth,83 +yanagi wakana,83 +yanagi (tsukiakari),83 +yamino kenji,83 +yairo (sik s4),83 +yahiro pochi,83 +xiling,83 +wringing,83 +wonder acute (umamusume),83 +wiffle gag,83 +whole note,83 +white delmo,83 +wat (worldlog),83 +viego (league of legends),83 +vf-31,83 +vegas (akg),83 +uzuki makio,83 +usotsuki,83 +usaginezumi,83 +urode,83 +unown o,83 +uninigumi,83 +ueda metawo,83 +turtonator,83 +tug of war,83 +tsujizen,83 +tsuchiyamada mitsuko,83 +trianon,83 +toyota sprinter trueno,83 +touto seiro,83 +totem,83 +torn necktie,83 +tmari,83 +the end (phrase),83 +tenchi muyou! uchuu hen,83 +team aqua uniform,83 +taroumaru (gakkou gurashi),83 +tape recorder,83 +taoru (t kiji),83 +tanline peek,83 +tanikake yoku,83 +taniguchi gou,83 +taishou,83 +table of contents,83 +suto (iamsuto),83 +sushi pizza rrr,83 +sunshine (1638509769),83 +suminoya kureha,83 +subway station,83 +striped sarong,83 +streets of rage,83 +stray cat (jojo),83 +steamed egg,83 +spiked horns,83 +souike,83 +soeda ippei,83 +siwasunohige,83 +shinonon (iso shino),83 +shinobu (tyno),83 +shinki (shinki59),83 +shijima (tanfn),83 +sheer gloves,83 +seripa,83 +self bondage,83 +seboneko,83 +sean matsuda,83 +scathach (fate) (cosplay),83 +saver (artbysaver),83 +satou reika,83 +sando (dukedevil),83 +samurai jack,83 +sakura (ukagaka),83 +sakasana (kaisen teikoku),83 +sakagami umi,83 +saint onii-san,83 +sage (mami1210),83 +sagas293,83 +saezuru usagi,83 +ryuujou (azur lane),83 +romeo and juliet,83 +robot sex,83 +ritsuko kubel kettenkrad,83 +rinwell (tales),83 +rico (gunslinger girl),83 +ricci,83 +retsuto,83 +renka (renkas),83 +reinhard von lohengramm,83 +redbaron,83 +rche (beatmania),83 +ray peng,83 +ramenwarwok,83 +ragnarok (demon sword),83 +purugly,83 +purple wristband,83 +purinnssu,83 +primiera (saga),83 +powered ciel,83 +pompompurin,83 +plivyou,83 +piyo (pixiv 2308057),83 +piers nivans,83 +panties on breasts,83 +pangoro,83 +ots-14 (ruler of the banquet) (girls' frontline),83 +oshiba ken,83 +orchid (maplestory),83 +orange collar,83 +oppai oppai,83 +opening eyes,83 +onaka sukisuki,83 +oh (aung ae),83 +numaguro (tomokun0808),83 +noyamano ringo,83 +northern parliament (emblem),83 +noako,83 +niwaka yuan,83 +nishio rina,83 +nina (wokada),83 +nephthys (p&d),83 +naui kudan,83 +narmaya (valentine) (granblue fantasy),83 +narita tamezou,83 +nantoka maru,83 +nanasuou,83 +nanako (melty blood),83 +name (oiuio),83 +myth1carts,83 +mysterious ranmaru x (fate),83 +myrrh (arknights),83 +myoya,83 +muslim,83 +mumaya,83 +muji body fitting sofa,83 +mr. nothing (arknights),83 +moshi (atelier33-4),83 +mogutofuoes,83 +mochoeru,83 +mo23,83 +mizutsuki rei,83 +misekiss,83 +miria (claymore),83 +mioda 69ch,83 +mino (udonge),83 +minazuki sarami,83 +minamoto mamechichi,83 +minagi hiyori,83 +military police,83 +migi (mm86262394),83 +midori (kancolle),83 +micon,83 +met (mega man),83 +men in black,83 +mega blaziken,83 +medu (rubish),83 +mayo (mayomr29),83 +masou shizuka,83 +maruro,83 +marui (koedame),83 +marina hayami,83 +marguerite (one piece),83 +marble-v,83 +manekineko5319,83 +mandaring,83 +manannan mac lir (fate),83 +maji (etonato),83 +mahou tsukai to kuroneko no wiz,83 +mahou shoujo taisen contest 1,83 +mahou shoujo suzune magica,83 +mabahn,83 +ma2,83 +m. lee lunsford,83 +luxury ball,83 +loalo,83 +littleshrimp,83 +lily strosek,83 +lastdark,83 +lakitu,83 +laio,83 +kyuutame,83 +kusanagi kikoku,83 +kururun (precure),83 +kurozero,83 +kuroneko (kuroneko works),83 +kuroino (0kuro8ino6),83 +kurogane ken,83 +kuonji shinra,83 +kuko,83 +kujou subaru,83 +kozakura (i s 15),83 +kouzuki nana,83 +kousaka alice,83 +kotobukiya,83 +korwa (summer) (granblue fantasy),83 +konno akikaze,83 +kokuu haruto,83 +kokorowatari,83 +koki (ryoushikiha),83 +koiwai yoshino,83 +koh rd,83 +kochi michikaze,83 +kissing foot,83 +kisaki oni,83 +kiryuu takahisa,83 +kiritomo koroha,83 +kirishima shizuku,83 +king kong (series),83 +king (tekken),83 +kine (kirby),83 +kimihagu,83 +kaze (kazesan),83 +kaze (fire emblem),83 +katsushika hokusai (festival outfit) (fate),83 +kasu (kasunohikari),83 +kappa worker (tag dream),83 +kaosu kun,83 +kamina pose,83 +kamijou sadame,83 +kama (beast iii/l) (fate),83 +kaliningradg,83 +kaku-san-sei million arthur,83 +kakei sumire,83 +ka941,83 +jungle wa itsumo hare nochi guu,83 +juna crawford,83 +jojo no kimyou na bouken: eyes of heaven,83 +johnny (nyansama0412se),83 +japari coin,83 +jaguar,83 +izumikuma,83 +isuzu ren,83 +ishizu kayu,83 +iseshi,83 +indai (3330425),83 +inai uchi,83 +imminent death,83 +illumination stars (idolmaster),83 +idunn & idunna,83 +ichimatsu nana,83 +ichigeki sacchuu!! hoihoi-san,83 +hygogg,83 +hufy,83 +hoshikawa tsukimi,83 +hitachi mako,83 +hiroichi,83 +hino kahoko,83 +hikasa youko,83 +heisa,83 +headwear with attached mittens,83 +hakumei to mikochi,83 +hakuhou (ende der welt),83 +haimei1980,83 +hahihu1782,83 +h8k,83 +guitar (guitars),83 +guan dao,83 +green nipples,83 +gotenks,83 +gomio (bb-k),83 +gomadoka,83 +golden rose,83 +gold headband,83 +gentiane (girls' frontline),83 +genda koujirou,83 +garin,83 +game club project,83 +gabriel (granblue fantasy),83 +futase hijiri,83 +furusawa yoriko,83 +funi mu9,83 +fumi (butakotai),83 +fujimori tonkatsu,83 +ft-17,83 +frog panties,83 +freyja (p&d),83 +french battleship princess,83 +fou (fate) (cosplay),83 +fffukuzawa,83 +fake scrollbar,83 +emilia percival,83 +elizabeth bathory (third ascension) (fate),83 +elemental gelade,83 +dudeunderscore,83 +dodory,83 +dim sum,83 +diamond pickaxe,83 +dezel (tales),83 +devin elle kurtz,83 +devil heavens,83 +detroit metal city,83 +dancer (fft),83 +daitou academy school uniform,83 +daisukerichard,83 +cutlery,83 +cure marine (cosplay),83 +cordelia (saga),83 +colorful kanan,83 +coelacanth,83 +coco jumbo,83 +clubroom,83 +clown mask,83 +clamperl,83 +chuor (chuochuoi),83 +chiba shuusaku,83 +chat noir,83 +cham fau,83 +chagama teishoku,83 +cat paw,83 +caim (drag-on dragoon),83 +buko (bukosuki),83 +bubble wand,83 +bshi edayo,83 +breast massage,83 +blazblue insignia,83 +bishoujo terror,83 +bigxixi,83 +bellavoirr,83 +beeswax (weisser sand) (arknights),83 +beam cannon,83 +beagle,83 +bang (gesture),83 +bandaid on ear,83 +bakusou kyoudai let's & go!! max,83 +bad face,83 +azure (capriccio),83 +ayase naru,83 +awayuki ramika,83 +avengers: age of ultron,83 +astraea (fate),83 +ashe (under the moon),83 +asako (itiba),83 +asahi rokka,83 +asa ni haru,83 +armguards,83 +arlmuffin,83 +arind yudha,83 +argule0901,83 +aquamu,83 +apple print,83 +aoyama kotoha (mitsuboshi colors),83 +andira (summer) (granblue fantasy),83 +an fyhx,83 +amo (silent bomb19),83 +amethyst (houseki no kuni),83 +amanda kenny,83 +altines,83 +akujiki musume conchita (vocaloid),83 +akiakane,83 +akasaka asa,83 +akaitera,83 +aduare,83 +acguy,83 +abondz,83 +1999,83 +0417nao,83 +zyunya,82 +zweihander,82 +ziz glover,82 +zashiki usagi,82 +yuzu shio,82 +yukimura hyouga,82 +yui ko,82 +yuguraniru,82 +yueqin (monnpiano),82 +yoshinatsu,82 +yoshida akihiko (style),82 +yoneme mei,82 +yanhe,82 +yamane takao,82 +watashi no tame ni nuginasai!,82 +wa2000 (ballroom interlude) (girls' frontline),82 +vox,82 +vichya dominion (emblem),82 +usami nanako,82 +urshifu,82 +urashimasakatasen,82 +uninori,82 +underlighting,82 +unbuckled,82 +umirororo,82 +uliel,82 +ukraine,82 +udon (memai no kabe),82 +toshi punk,82 +toshi (little-fluffy-cloud),82 +torottye,82 +torn neckerchief,82 +tokonoma,82 +titanic (movie),82 +time machine,82 +tilde (ice cube),82 +tikoh,82 +tiger (p),82 +tiasis,82 +tarabagani,82 +takt op. destiny,82 +takeshima tsutako,82 +takena-c,82 +synchroman,82 +swinging legs,82 +suzune hapinesu,82 +surume (clavis),82 +surfing orange,82 +super mario world 2: yoshi's island,82 +sun shang xiang,82 +suigintou (cosplay),82 +suga leon,82 +standing on chair,82 +springfield (queen in radiance) (girls' frontline),82 +sponty,82 +soyubee,82 +soul (tamashii),82 +sothis (fire emblem) (cosplay),82 +songmil,82 +solcha,82 +sobi (dnenqkswja),82 +snake box sneak,82 +skunk tail,82 +sitting on wall,82 +sieyarelow,82 +shutsuri,82 +shut hell (character),82 +shun soku,82 +shromann,82 +show (rinnetenshow),82 +shiratama mochi,82 +shiraishi yuzuki,82 +shinobi (sekaiju),82 +shimeno puni,82 +shiira (nya rl),82 +shiiki (love-saber),82 +shano hiyori,82 +see-through body,82 +scarlet dango,82 +sasaki ryou,82 +sakuraidai,82 +sako (namocake),82 +saitou shunsuke,82 +saeki shun,82 +saboten,82 +rupee,82 +runta,82 +rumiko (rmeco),82 +rumia (compacthuman),82 +rogue (7th dragon),82 +rinkah (fire emblem),82 +rikopin,82 +reverse (bluefencer),82 +revenge,82 +ressha sentai toqger,82 +renren,82 +reia 76,82 +reason (ficafe),82 +real bout high school,82 +rapunzel,82 +ranger (azur lane),82 +rakuto mangan,82 +racing miku (2011),82 +queen of hearts (card),82 +qianzhu,82 +qianqian,82 +puyopuyo quest,82 +prototype (game),82 +pornstar,82 +poporing,82 +ponzu rui,82 +pomu (pomu me),82 +pokemon ranger uniform,82 +pokemon gym,82 +pirason,82 +patriot (arknights),82 +ozu shion,82 +otakeyan,82 +osmanthus,82 +orange armor,82 +ooshio (azur lane),82 +onna shunin kishi mieko,82 +on liquid,82 +okishiji en,82 +ogata mamimi,82 +norba shino,82 +noragami sota,82 +niur,82 +nirvash,82 +nira (nira box),82 +neukkom,82 +nekosuke (oxo),82 +nc731,82 +natsui tuna,82 +nasakixoc,82 +nao-08,82 +nannung,82 +nanako (to heart 2),82 +nana nakano,82 +namiki itsuki,82 +nama udon,82 +nagi (akito),82 +monohoshizao,82 +momobami kirari,82 +momi,82 +moja (moja4192),82 +moekichi,82 +moai (moai world),82 +mizushirazu,82 +miyabi urumi,82 +miraroma,82 +minashiro soushi,82 +mimiyama kiichi,82 +mimikyu (cosplay),82 +mikoto (ff9),82 +miguel rivera,82 +mian lang,82 +mezul,82 +medoi,82 +matsuda takato,82 +mato tsuyoi,82 +mato seihei no slave,82 +masupa kiriu,82 +mash rene figaro,82 +mary (granblue fantasy),82 +manyuu kaeru tasuke sansei,82 +manjoume fumi,82 +mandy (grim adventures),82 +mandrill,82 +mairudo (mildcoffee1117),82 +mage (warcraft),82 +mad burnish,82 +m (m073111),82 +m37 (summer parader) (girls' frontline),82 +m16a4,82 +love morocc,82 +linoone,82 +li qingning (the legend of luoxiaohei),82 +lavie head,82 +lapis re:lights,82 +kyokugen dasshutsu: 9 jikan 9 nin 9 no tobira,82 +kuzuryuu yaichi,82 +kty (04),82 +koyanskaya (assassin) (second ascension) (fate),82 +kousaka shigure,82 +kouji (astral reverie),82 +konoha (arcana heart),82 +kohinata sora,82 +kizuki erika,82 +kitchen dragonmaid,82 +kisaki nana,82 +kirimori toya,82 +king (one-punch man),82 +kinako (nurupoga),82 +kimi to boku,82 +keyhole panties,82 +kevin kaslana,82 +keureu (tiqkdtiqkd10),82 +ken to mahou to gakuen mono,82 +kayano ai,82 +kasseus maximus,82 +kamen rider v3,82 +kahlua marjoram,82 +kachikachipiroo,82 +kabukimonogatari,82 +juliet persia,82 +jeanne d'arc (formal dress) (fate),82 +ixion saga,82 +itou hikari,82 +ito22oji,82 +inushi,82 +inori (xyz5568),82 +inazuma eleven (game),82 +ina (gonsora),82 +imi negev,82 +ifpark (ifpark.com),82 +ichika (ichika manga),82 +ichijou eika,82 +hyuuman,82 +hua cheng,82 +hozenkakari,82 +holding flower pot,82 +holding dice,82 +hoicyo,82 +hiyuu (hiyualice),82 +hisuian braviary,82 +hironii (hirofactory),82 +hirococo,82 +hiragi ringo,82 +hide and seek,82 +hello planet (vocaloid),82 +heart attack,82 +hazuki gean,82 +hayami yuujirou,82 +hataya,82 +haruto yuki,82 +hands on another's wrists,82 +hand to hand,82 +hana (mew),82 +half-split chopsticks,82 +hakutaku,82 +haje,82 +h&k g41,82 +gundam thunderbolt,82 +griffon (last origin),82 +gretel (black lagoon),82 +gravel (modeling night) (arknights),82 +goya (xalbino),82 +gokou tamaki,82 +glass wall,82 +genis sage,82 +geiru toneido,82 +garter-velvet,82 +fyu-neru,82 +fuyu,82 +fur-trimmed thighhighs,82 +fujiwara aya,82 +fujinuma satoru,82 +forced partners,82 +floatzel,82 +file cabinet,82 +fikkyun,82 +fever-san,82 +feet together,82 +faucre the evil overlord,82 +eye poke,82 +extra hands,82 +eruption,82 +elfriend (shiranui flare),82 +dvd,82 +dutchko,82 +dustox,82 +dou-t,82 +dizmathik,82 +denken,82 +denial,82 +demyx,82 +delorean,82 +daylightallure,82 +daniela dimitrescu,82 +dairyo3,82 +daiji pt,82 +da (datako),82 +cure papaya,82 +cote,82 +copycat (dryfood),82 +comiket 92,82 +colorfag,82 +cloud meadow,82 +clothes iron,82 +circle garyuu,82 +cinderella dream (idolmaster),82 +chouchin obake,82 +choroli (chorolin),82 +chiruku,82 +charlie nash,82 +chain paradox,82 +centipede girl,82 +calm (artist),82 +c-kyuu,82 +byuub,82 +buster machine 7,82 +blue scales,82 +black prince (azur lane),82 +black joa,82 +binzume yousei,82 +bianka durandal ataegina (bright knight: excelsis),82 +betei,82 +bead curtain,82 +basukechi,82 +barok van zieks,82 +baltimore (finish line flagbearer) (azur lane),82 +backless pants,82 +b.c,82 +ayane (swimsuit) (blue archive),82 +aussa the earth charmer,82 +astaroth (p&d),82 +ashiyu,82 +asc11,82 +aru (abyssinia),82 +art556 (girls' frontline),82 +arios (orochi yuta),82 +arado balanga (xchd7538),82 +aqua hisui,82 +appo (36786257),82 +aouji,82 +anzai miyako,82 +amidada,82 +alice parade,82 +alenka,82 +al aoi aoba,82 +akefumi,82 +akamaru saasha,82 +acchi (koiyimknp),82 +abyssal twin princess (black),82 +a kun,82 +3di project,82 +2020 summer olympics,82 +108 (toowa),82 +zwei (rwby),81 +zutto mayonaka de ii no ni,81 +ziz (pantwo),81 +zasshu tamashii,81 +zantyarz,81 +zan partizanne,81 +z46 (her first summer) (azur lane),81 +yuzhi,81 +yuuzaki,81 +yume keikaku,81 +yukichiro,81 +yui sora,81 +yokota takumi,81 +yata (yatao zzz),81 +yanma,81 +xlscaler,81 +xi xeong,81 +wujia xiaozi,81 +wsfw,81 +windblade,81 +wind a breath of heart,81 +wildcat (kusonemi),81 +wei (promise 0820),81 +wedding cake,81 +washio sumi,81 +wani (mezo),81 +wang-pac,81 +visqi,81 +villain pose,81 +velociraptor,81 +uron-rei,81 +urase shioji,81 +unomi,81 +ueno tsuki,81 +type 95 ha-gou,81 +tube top lift,81 +tsuzuri (itosousou),81 +tsuzuki shiori,81 +tsukiyama shuu,81 +triton (housamo),81 +trinity glassfield,81 +trigger,81 +tries,81 +transgender flag,81 +toy train,81 +totsuki tooka,81 +tonio trussardi,81 +tomaco,81 +toki to shishin,81 +tob,81 +tnt (aaaazzzz),81 +thundercracker,81 +throne room,81 +the wizard of oz,81 +tentai senshi sunred,81 +taroimo (00120014),81 +tamanegiya,81 +tama (tamakaka1031),81 +takeda mika,81 +takamichis211,81 +tachibana sakuya (god eater),81 +swallow (bird),81 +suna kiririto,81 +sukuemon,81 +suketto sanjou!,81 +sueyuu,81 +stup-jam,81 +stump cover,81 +spike,81 +soveno,81 +sougetsu izuki,81 +somebody (leiking00),81 +solokitsune,81 +soft & wet,81 +small testicles,81 +sleeves removed,81 +skysign ft,81 +single half glove,81 +silentdin,81 +sig sauer mcx,81 +shrimpman,81 +shiyun,81 +shitodo kuroji,81 +shishou (cookie),81 +shiritori,81 +shiratama yomogi,81 +shino megumi,81 +shing (sorairo factory),81 +shin subarashiki kono sekai,81 +shikkoku no sharnoth,81 +shijie jianfa,81 +senhaku,81 +sekiya naru,81 +scotch (cookie) (style),81 +scavia10,81 +saryuu evan,81 +sara crispino,81 +saji genpou (true),81 +saitou (tiger & bunny),81 +runamatu,81 +ruin guard (genshin impact),81 +ruei (chicking),81 +rosalie de hemricourt de grunne,81 +romiy,81 +roi (liu tian),81 +renoa yu,81 +ren (dramatical murder),81 +regalia the three sacred stars,81 +re: cutie honey,81 +raseruta,81 +rainmaker,81 +raidy,81 +radioneet,81 +quattro (nanoha),81 +quatre raberba winner,81 +puyocha,81 +puriketsu corgy,81 +professor um,81 +priest (tera online),81 +pov bullying,81 +potemki11,81 +pork,81 +poop on a stick,81 +ponytail girl (kamisimo 90),81 +pomelomelon,81 +plants vs zombies,81 +plant request,81 +plant monster,81 +pirochi,81 +pink doragon,81 +ping hai (warship girls r),81 +pillion,81 +petit ramune,81 +penis hug,81 +partial bodysuit,81 +panqlao,81 +oumi (rsag),81 +otsumami (02mami),81 +orix buffaloes,81 +onimusha soul,81 +omiso (omiso),81 +okudaira akira,81 +okosama lunch,81 +ohmu,81 +oggy,81 +ogakuru,81 +nys,81 +noe (ppppriver),81 +nobiiru arm,81 +ninto,81 +nicholas (azur lane),81 +next purple,81 +nemes,81 +negimiso1989,81 +nayu tundora,81 +natural sign,81 +narcissism,81 +napstablook,81 +nakura haru,81 +nakahara (takayama toshinori),81 +nagehazushi,81 +nagakura (seven walkers),81 +myouren temple,81 +my sunshine,81 +muuyiie,81 +mutsuki (tsugaidanuki),81 +mutsuki (ama245),81 +muelsyse (arknights),81 +mordred (formal dress) (fate),81 +mordeth,81 +moose,81 +moonlit,81 +mononobe kyoma,81 +mongguri,81 +mofetousu furuna,81 +mo:oku,81 +mizuki shiranui,81 +miyamoto konatsu,81 +mitsuki sohara,81 +mito itsuki,81 +mirui,81 +mirin (granblue fantasy),81 +miria harvent,81 +mireille bouquet,81 +minazuki shigure,81 +mimengfeixue,81 +mikawa miso,81 +mi8pq,81 +mezashi (mezamashi set),81 +metal wings,81 +meidri,81 +mega absol,81 +mcp150 plus,81 +mcdobo,81 +maxima,81 +matsubara tsuyoshi,81 +materializing,81 +marubonman,81 +marshall (wahooo),81 +marse (rokudaime),81 +mars expedition,81 +mario kart wii,81 +marigold,81 +marguerite fatima,81 +manabe rika,81 +majisuka gakuen,81 +maizken,81 +mairin (pokemon),81 +mahou shoujo lyrical nanoha detonation,81 +mad kimo,81 +macne nana,81 +lukeskytera,81 +loli bitch island,81 +liliya (kaetzchen),81 +li zhu,81 +leipzig (azur lane),81 +lapis lazuli (steven universe),81 +lanlanlap,81 +land striker,81 +kurosu jun,81 +kurosaki kazui,81 +kuronoiparoma,81 +kuroe (madoka magica),81 +kurama (naruto),81 +kuhuku006f86,81 +ksenolog,81 +koyuri shouyu,81 +kouzuki anna,81 +koto tsubane,81 +konayuki fururi,81 +komori shuri,81 +komine,81 +kokumotsu,81 +kojo (0124),81 +kirin tarou,81 +kick-ass,81 +kevn,81 +kelinch1,81 +kei-co,81 +kawajuu,81 +kanon (wild arms 2),81 +kagurazaki shizuki,81 +kaga rin,81 +juniper actias,81 +jubeat,81 +jougenmushi,81 +joint lock,81 +jjw1029,81 +jiyu (jiyusi),81 +jinjide shaonian,81 +jeralt reus eisner,81 +jean havoc,81 +jc shikikan,81 +izumi rion,81 +izakaya,81 +iwasaki masakazu,81 +itsuki (houkago no pleiades),81 +irozuku sekai no ashita kara,81 +inochigake demo tarinai no sa,81 +infinity gauntlet,81 +in cauldron,81 +imaizumin-chi wa douyara gal no tamariba ni natteru rashii,81 +ijac ray,81 +icwine,81 +ice reizou,81 +hyadain no kakakata kataomoi - c,81 +huyan zhuo (fate),81 +huntail,81 +houkago climax girls (idolmaster),81 +hotechige,81 +hoshikuzu,81 +holding tripod,81 +holding cable,81 +hishimiya tsugaru,81 +hinokumo f,81 +hikashou,81 +higyaku no noel,81 +hero (do-belman),81 +hellmatio,81 +hector rivera (alive),81 +heatran,81 +hayate x blade,81 +hata4564,81 +haruka (tactics),81 +haru (haruxxe),81 +harrymiao,81 +hand gagged,81 +hamuhamu,81 +hachitani (sunaba suzume),81 +green sports bra,81 +greco-roman architecture,81 +gopnik,81 +gonzalez (machita chima),81 +glider,81 +glass teacup,81 +glacia (pokemon),81 +girlycard,81 +girls' generation,81 +gift (game),81 +genpatsu (cookie),81 +gas,81 +gaius (nikoniko2),81 +fuwamoko momen toufu,81 +fukuda935,81 +froth,81 +fragile: sayonara tsuki no haikyo,81 +fox girl (jaco),81 +foothold trap,81 +foo (pixiv54892036),81 +firo prochainezo,81 +finish line,81 +fine (symphogear),81 +fighter (dragon's crown),81 +fictional sister,81 +ferdinand (honzuki no gekokujou),81 +facehugger,81 +eva (hq8nn1ef),81 +energy spear,81 +eclosion,81 +drinking from condom,81 +dribbling (basketball),81 +dragon dildo,81 +double driver,81 +dorothy (sinoalice),81 +dori (genshin impact),81 +dissidia final fantasy opera omnia,81 +denki-gai no hon'ya-san,81 +delta 9,81 +dagappa,81 +curren chan (sakutsuki ma cherie) (umamusume),81 +crown print,81 +crashtv,81 +cote d'azur widowmaker,81 +cloud strife (cosplay),81 +claude kenni,81 +chuppa (katotsuba),81 +chirico cuvie,81 +chibiterasu,81 +ceobe (summer flowers) (arknights),81 +cavalry (maslow),81 +caravan stories,81 +camui1104,81 +body soaping,81 +bocchi the rock!,81 +blue rope,81 +black rose dragon,81 +binder clip,81 +beauty (bobobo-bo bo-bobo),81 +beanstalk (gift uncompleted) (arknights),81 +battleship girl,81 +bano akira,81 +bangqiao yan,81 +aulbath,81 +audiodude,81 +audio-technica,81 +ashley (pokemon),81 +asashio (azur lane),81 +asaba yuu,81 +aoiro (t aoiro123),81 +aobe mahito,81 +anparu,81 +anniechromes,81 +aniplex,81 +ame (conronca),81 +ambipom,81 +amayofuu,81 +amatiz,81 +amano yuu,81 +amano yoshitaka (style),81 +amagami (makise tsubaki),81 +alt (ctrldel),81 +alchemist (sekaiju),81 +akutabe,81 +akagi (plum and snow) (azur lane),81 +akagi (kancolle) (cosplay),81 +aikawa touma,81 +ai (aria),81 +agarest senki 2,81 +adele balfetto,81 +aboutama,81 +34 (sanjushi),81 +zuiun (kancolle),80 +zoirun,80 +zifletts,80 +z1 leberecht maass (kancolle) (cosplay),80 +yyy (zelda10010),80 +yuzukaze rio,80 +yun (simoun),80 +yufine (epic seven),80 +yotsuba (sister princess),80 +yobai,80 +yasato,80 +yamabuki (yamabuki xxxxx),80 +yakumo beni,80 +xin hua,80 +xigbar,80 +xiayehongming,80 +xiaoju xiaojie,80 +veryhardloco,80 +utsumi karmin,80 +usubeni sakurako,80 +uraomote,80 +unhappy refrain (vocaloid),80 +udakyo,80 +uchiha symbol,80 +tuoer,80 +tsukimori madoka,80 +trisha elric,80 +touka kureha,80 +touhara asuha,80 +tougo,80 +toudou kasumi,80 +torii5011,80 +tori rui,80 +toilet seat,80 +tika (mika4975),80 +tianhu (the legend of luoxiaohei),80 +thunderseal,80 +three sizes,80 +thief (disgaea),80 +teratsuki,80 +tarakoutibiru,80 +tansho,80 +tanisi (hosimade),80 +tamago gohan,80 +takurou,80 +takuan (takuanlunch),80 +takeno koko,80 +tabitha (pokemon),80 +synth (iahfy),80 +suzutsuki kanade,80 +suzuakks,80 +sunao (souis),80 +sui (isekai ojisan),80 +steven a. starphase,80 +spring rider,80 +soukitsu,80 +snow (gi66gotyo),80 +slugma,80 +skull kid,80 +siruphial,80 +sirosoil,80 +silverstar017,80 +shown,80 +shoujo mahou gaku littlewitch romanesque,80 +shopping district,80 +shiren (monochrome structure),80 +shion (kizuro),80 +shio (7203802),80 +shino (mijinko),80 +shinkaui,80 +shima chiyo,80 +shiina sakurako,80 +shibuya takami,80 +shachi kamaboko,80 +sewing kit,80 +seto (harunadragon),80 +seraphita (xenogears),80 +senpai (tawawa),80 +selim bradley,80 +seadra,80 +scrapped princess,80 +sawada marina,80 +satsuki inari,80 +saniko (honchu),80 +sanazura hiroyuki,80 +sally (bofuri),80 +sakon04,80 +sakayanagi arisu,80 +sailing,80 +rpk-16,80 +rowya,80 +robe lift,80 +rivalz cardemonde,80 +rita rossweisse (phantom iron),80 +riding machine,80 +ribbon (happinesscharge precure!),80 +reverse prayer,80 +retweet chicks,80 +reset kalar,80 +removing bandaid,80 +religious offering,80 +regis altare,80 +redrabbit44,80 +red flowers,80 +ramiel,80 +ragfes,80 +qizhu,80 +ps5 chan,80 +project diva f 2nd,80 +professor nemo (fate),80 +preyanan suwanpramote,80 +poppin'party,80 +ponita,80 +polka dot hoodie,80 +pleasure-treasure,80 +pinguinkotak,80 +pikachu costume,80 +pickle (grappler baki),80 +phantom thief lapin,80 +pear sauce,80 +pazu,80 +park junkyu,80 +paper man,80 +panza,80 +pamela ibiss,80 +p0ckylo,80 +orii (orii i),80 +oozora haruka (harukana receive),80 +oni gini,80 +okinawa habu (kemono friends),80 +okapi tail,80 +ogs (orgasm88),80 +oda nobuna,80 +oda kippoushi (fate),80 +obsydia (nijisanji),80 +numel,80 +number10 (hagakure),80 +nukoosama,80 +nomiya (no 38),80 +niku harumaki,80 +nelson (warship girls r),80 +natsu (nattiyann),80 +nasubin (nasubisamurai15),80 +narutaki fumika,80 +narumizg,80 +nanjou terumasa,80 +namo,80 +naked hood,80 +nakamura naoto,80 +nagisa (psp2i),80 +mushoku no hourousha,80 +musanix,80 +murakumo (kancolle) (cosplay),80 +moyashi baasuto,80 +motsu (selshia12),80 +mospeada (mecha),80 +mortar headd,80 +moral cacoethes,80 +monorino,80 +monarch (black gerard) (azur lane),80 +momae makku,80 +mole on nose,80 +mokyuko,80 +moge-hera,80 +mizuna (water emp-tei),80 +mizuhashi kaori,80 +miyazawa fuuka,80 +miyake shinobu,80 +miwajou,80 +mitsuki (naruto),80 +misa pika,80 +mior,80 +minxei,80 +minazuki mizu,80 +mimme (haenakk7),80 +milssak,80 +metal wire,80 +messiah cage,80 +meow nyang,80 +men'youjan,80 +meltan,80 +melan blue,80 +meito,80 +megami device,80 +mega charizard y,80 +media (pani poni),80 +may9,80 +matsuri (hidamari sketch),80 +matsura (nagatosan),80 +maskman,80 +masiromu,80 +mashiro kta,80 +masabodo,80 +maplestory 2,80 +maonatten,80 +mainichi compile heart,80 +maguro (ma-glo),80 +magcargo,80 +macula marius,80 +lyy,80 +lyna (yu-gi-oh!),80 +lumen (gunvolt),80 +luetzow (azur lane),80 +lucifel (el shaddai),80 +luca (jasdavi),80 +lisa (seiken no blacksmith),80 +lich (granblue fantasy),80 +liang qi,80 +lever,80 +lee (monsterheart),80 +leavv,80 +ldfe mk2,80 +lazlo (gensou suikoden),80 +lavenza (persona 5),80 +lang (chikage36),80 +lan xiezi,80 +lafolie,80 +kyuusugi toku,80 +kyouou ena,80 +kyouno aki,80 +kylo ren,80 +kwaejina,80 +kurosuke (hipparu),80 +kurorettsu,80 +kumichou (ef65-1118-ef81-95),80 +kukuru (arc the lad),80 +kudou chitose,80 +konohata mira,80 +komeo15,80 +kitagawa mahiro,80 +kise saki,80 +kim bae-eo,80 +kijima saki,80 +kaya (nari1-24),80 +katsuragi kai (kancolle),80 +kata rosu,80 +kasugayama high school uniform,80 +kasuga yui,80 +kapiten70,80 +kanda (ura-kanda),80 +kanaoto neiro,80 +kana (fire emblem) (male),80 +kakeku,80 +kaguya (srx61800),80 +jungle crow (kemono friends),80 +juanmao,80 +js kaga (kancolle),80 +jiujiuyatou (yayanzz),80 +izumo neko,80 +italian flag print,80 +isis eaglet,80 +inukaze yamu,80 +inu-t,80 +inoue kiyoshirou,80 +innocent cluster,80 +in pot,80 +imoko (neeko's sister),80 +ikameshi (ika meshi),80 +iiha toobu,80 +ii orc no hi,80 +ichinose hana,80 +ichijou takakiyo,80 +ibex,80 +hyaku shiki,80 +hua ben wuming,80 +houru,80 +houraisan chouko,80 +hoshinomiya girls' high school uniform,80 +hoshiiro girldrop,80 +honoboooono,80 +homura910210,80 +holding clothes hanger,80 +hogen (housamo),80 +hmax,80 +hito (nito563),80 +hilling care,80 +higurehiiro,80 +hayase mina,80 +haruo (clownberry),80 +haruna mao,80 +hands on another's arm,80 +hand in mouth,80 +hallelujah essaim,80 +hajimete no koi ga owaru toki (vocaloid),80 +guillotine cross (ragnarok online),80 +gotoba sora,80 +gothorita,80 +gomi (kaiwaresan44),80 +golden time,80 +glowing skin,80 +giuseppina ciuinni,80 +girly air force,80 +georgi popovich,80 +gan-viking,80 +gainos priestess melpha,80 +gabite,80 +futoshi,80 +fushuu,80 +fumitan admoss,80 +fujikawa arika,80 +frostcyco,80 +frip,80 +freed turing,80 +frau bow,80 +forced perspective,80 +folding bicycle,80 +fengya,80 +fengling (furin-jp),80 +face in crotch,80 +etchimune,80 +enya geil,80 +enni,80 +en (enxxx),80 +ema (kuguiema),80 +elephant tail,80 +elc (arc the lad),80 +dvddvd (meme),80 +dungeons & dragons: shadow over mystara,80 +drsunshine,80 +digimon card game,80 +desert tech mdr,80 +dennou tenshi jibril,80 +deatheach,80 +david (fate),80 +daruia (sabitare),80 +cure parfait,80 +cross channel,80 +cremia,80 +cramped,80 +coffeechicken,80 +clay,80 +classy-black-haired girl (hayabusa),80 +circus (studio),80 +chukachuka,80 +christiane friedrich,80 +chloroform,80 +chin (motio7201),80 +cheschorv,80 +chalkboard sign,80 +cattleya regina games,80 +cat breakdancing (meme),80 +card with aura,80 +cantaloupe,80 +camera lens,80 +cacodemon,80 +cable tie,80 +c-wing,80 +burn the witch,80 +burger king,80 +brws,80 +bright noa,80 +boin,80 +body jewelry,80 +blade master (elsword),80 +black bustier,80 +beruka (fire emblem),80 +bamuth,80 +azumaya toushirou,80 +asymmetrical eyebrows,80 +astolfo (sparkling frills) (fate),80 +astgenne (arknights),80 +aster arcadia,80 +asakura ryou,80 +asai ichiko,80 +asahana jun,80 +arknights: endfield,80 +arctic hare (kemono friends),80 +apple (ygbhjdbiulsg),80 +aopanda,80 +anonymous (4chan),80 +anima miko,80 +aniece (modeseven),80 +anemachi,80 +anchor tattoo,80 +ameko53133,80 +aliens,80 +aladdin (sinoalice),80 +akebono kai (kancolle),80 +akaza (kimetsu no yaiba),80 +agetake,80 +agemasen! (meme),80 +ace attorney investigations 2,80 +abe hikaru,80 +a nightmare on elm street,80 +@shun,80 +13o,80 +zubora na kintoki,79 +zhuzi,79 +zeshgolden,79 +zero a,79 +zen juraku,79 +zen'in mai,79 +zanku,79 +yuuki. (limit),79 +yutaka7,79 +yumekui,79 +yume (grimgar),79 +yukisame,79 +yukina (masyumaro610),79 +yukimochi (yume),79 +yukikawa sara,79 +yukarite,79 +yuiofire,79 +yuama (drop),79 +yu cha,79 +young savage,79 +yougen kitsune,79 +you haruka,79 +you (granblue fantasy),79 +yoshizuki kumichi,79 +yoshiie,79 +yorousa (yoroiusagi),79 +yjy,79 +yizumi,79 +yaopei,79 +yanase mai,79 +yakitori (oni),79 +xelgot,79 +xecty ein,79 +wutu (1183238220),79 +wooden railing,79 +wonder zone,79 +where's wally,79 +wet bra,79 +wario land shake,79 +wakaki tamiki,79 +wachi (hati1186),79 +vibrator on clitoris,79 +veight,79 +vegetablenabe,79 +valentine (guilty gear),79 +utachy,79 +urahara,79 +unk kyouso,79 +universal bulin (azur lane),79 +unaji,79 +ume (ittla),79 +ueshita,79 +uchouten kazoku,79 +tyuga,79 +tugeneko,79 +tsukumo yuuma,79 +tsukumi (tkmillust),79 +tsuda akira,79 +tsuchiya akira,79 +tsubameyado,79 +trimmau (fate),79 +tomyoo,79 +tokita arumi,79 +toki kureha,79 +tobacco,79 +tiramii,79 +time lapse,79 +the legend of zelda: oracle of seasons,79 +ten (urusei yatsura),79 +temir,79 +tanziya (codlxcold),79 +tanbonota46,79 +tan shirt,79 +takanashi,79 +taira takehiro,79 +taggo,79 +tagane,79 +t'au,79 +sumo (komokomo1201),79 +suiren (mystia6b),79 +sugihara (sugihara2000),79 +suga koutarou,79 +station memories,79 +star pin,79 +ssn (sasa8u9r),79 +spadelake,79 +snake-eyed kanako,79 +sky guy art,79 +skating rink,79 +siro (asteblanc),79 +single drill,79 +shoujo ramune,79 +shougayaki (kabayaki 3),79 +shockwave,79 +shirono mirai,79 +shirofuku yukie,79 +shimura takako,79 +shikkoku neko,79 +shigatsu (4gate),79 +sernia iori flameheart,79 +sena chifuyu,79 +sekiri,79 +seira (mermaid melody pichi pichi pitch),79 +scrunchie removed,79 +satou aoi,79 +sasahara kanji,79 +sangou (girls und panzer),79 +sami (3a3i3a3i),79 +sakurai haruto,79 +sakura card,79 +sagiri (ulpha220),79 +sagattoru,79 +sagara misae,79 +sabagebu!,79 +ruuku (azukieru),79 +rozu ki,79 +ronoue genji,79 +roco (katsuya1011),79 +rocking school (idolmaster),79 +rippajun,79 +ringetsu,79 +ringabel,79 +rickenbacker 4001,79 +rick (kirby),79 +richard viii,79 +reversi,79 +reulem,79 +renee (negative hero),79 +rekise,79 +red clouds,79 +red babydoll,79 +recruitment bag (arknights),79 +raine sage,79 +raina,79 +radiata stories,79 +pururun z,79 +print capelet,79 +prat rat,79 +porcupine ears,79 +poponko,79 +poodle,79 +poi poifu,79 +pocari sweat (artist),79 +pnoji,79 +player (god eater burst),79 +pk (mukasihasakana),79 +pinattsu,79 +phoenix wright: ace attorney - justice for all,79 +persimmon (lsxh3),79 +peorth (aa megami-sama),79 +pedestrian lights,79 +paz ortega andrade,79 +parade,79 +ortho shroud,79 +ornate clothes,79 +orein,79 +oolong,79 +ooishi kuraudo,79 +onigiri noka,79 +on counter,79 +old maid,79 +okome (ricecandy),79 +officer,79 +nyatoran (precure),79 +nyami,79 +noumin joemanyodw,79 +nomura teruya,79 +noise (mokusei),79 +nimu,79 +nilgiri (girls und panzer),79 +nijou katame,79 +nifuji hirotaka,79 +nicole (lion),79 +natural (module),79 +narita rumi,79 +nanbo ataru (attall),79 +namekian,79 +namaata,79 +nakamura hinata,79 +naga (pixiv70891418),79 +muta poo,79 +motomiya daisuke,79 +mori sonou,79 +mole on back,79 +mokoiscat,79 +mogg,79 +mofuji,79 +miyuki yaya,79 +mitus,79 +mitaka,79 +misora inaho,79 +mismatched animal ear colors,79 +mirai (mirai76 ),79 +millium orion,79 +mille (dieci),79 +mikumo osamu,79 +mijinko (83nabe),79 +mickey mouse (cosplay),79 +mia (world flipper),79 +mg (mica),79 +metang,79 +melmaid,79 +mayano top gun (sunlight bouquet) (umamusume),79 +matsukai mao,79 +mash kyrielight (fgo x lawson),79 +mariwai (marireroy),79 +manly tears,79 +mana (418208360),79 +mamenomoto,79 +mal (malberrybush),79 +makuhita,79 +makai tenshi djibril 2,79 +mahito (tranjistor),79 +maeda shiori,79 +mado (mukade tou),79 +maccha cocoa (mococo),79 +luftwaffe,79 +liquid weapon,79 +liaowen,79 +letter hair ornament,79 +leeannpippisum,79 +latvia (hetalia),79 +lancelot (code geass),79 +kuurubiyuutei gankyou,79 +kuroume (aihikarikuroume24),79 +kuronomine,79 +kurono genbu,79 +kurokuma (kuro kumagaya),79 +kurohal,79 +kuroha ai,79 +kurobane,79 +korokoro daigorou,79 +komine sachi,79 +kokonoe miya,79 +kokkuri-san (gugukoku),79 +kodiak bear (kemono friends),79 +kodamasawa izumi,79 +kno1,79 +kisuke,79 +kissing shoulder,79 +kiriuzasu,79 +kikurage (sugi222),79 +kerorin,79 +kenzaki ririka,79 +kel-tec rfb,79 +keiryuu seo,79 +kazamaki matsuri,79 +kazakami yuu,79 +kasugano haruka,79 +karla (fire emblem),79 +kareya,79 +kanase (mcbrwn18),79 +kamejiro (sasakame),79 +kaleka,79 +kakizato,79 +kageru (mofnyan),79 +kagamine rin (roshin yuukai),79 +kaga cape,79 +k (li),79 +juri (nattoutomato),79 +junketsu no maria,79 +jun'you kai ni (kancolle),79 +john hathway,79 +jimon asuta,79 +jijii (nicoseiga91467756),79 +jigokuraku,79 +japan air self-defense force,79 +ivris,79 +italian commentary,79 +isuzu kai ni (kancolle),79 +ishizaka ryuudai,79 +iron bars,79 +irogomi,79 +iro (sekaixiro),79 +irisu makina,79 +inuko (redconstellation),79 +inubashiri momiji (cosplay),79 +inoue kousei,79 +indonesian clothes,79 +indeedee,79 +inabi,79 +inaba teitoku,79 +imura (shiro maru),79 +ikujitto,79 +ike masato,79 +ika,79 +igarashi daiji,79 +ice shard,79 +ibuki douji (swimsuit berserker) (first ascension) (fate),79 +ian olympia,79 +i b b e,79 +"i""s",79 +hyperbudd,79 +how to train your dragon,79 +houraisen runa,79 +hot kakigoori,79 +hoshimawa,79 +hoshi no samidare,79 +hornet (hollow knight),79 +hongcasso,79 +honey dipper,79 +homeo,79 +holding magazine (weapon),79 +hiyori mizuki,79 +himitsucalibur (fate),79 +himeshaga,79 +himesato maria,79 +himenomikan,79 +hiiragi nemu,79 +hieung,79 +hiba (p-p-purin),79 +hellnyaa,79 +heart arms,79 +hatamoto (kotoba),79 +hasegawa chisato,79 +harpy (closers),79 +hand wave,79 +hamada miku,79 +guren nishiki,79 +gunsou1350,79 +gungun (hakutou7887),79 +grey umbrella,79 +gradient shirt,79 +gou d,79 +gomashiwo o,79 +glisten,79 +gilbert nightray,79 +gekkou ookami,79 +ganyu (genshin impact) (cosplay),79 +galarian slowpoke,79 +futabu,79 +futaba 841,79 +fuse (apex legends),79 +fujin yumi (fire emblem),79 +frilled cape,79 +fluffydango,79 +fireman's carry,79 +fina (sa47rin5),79 +famepeera,79 +eyyy,79 +europa (summer) (granblue fantasy),79 +epuko,79 +energy arrow,79 +emma (victorian romance emma),79 +ellen (majo no ie),79 +elbing (azur lane),79 +dwyer (fire emblem),79 +dvd (object),79 +dream academy uniform,79 +dragon (monster girl encyclopedia),79 +dracovish,79 +double buttjob,79 +donovan baine,79 +dokyuu hentai hxeros,79 +digitan (porforever),79 +dido (muse) (azur lane),79 +despicable me,79 +demon mages,79 +dears,79 +dean stark,79 +day walker1117,79 +dasha,79 +danbooru (site),79 +dai-oki,79 +dacho,79 +d futagosaikyou,79 +cure miracle (ruby style),79 +cruhteo,79 +cotta (heleif),79 +conveyor belt sushi,79 +cocona vatel,79 +cocoasabure,79 +clarissa arwin,79 +clamp (circle) (style),79 +clalaclan philias,79 +choujuu kishin dancouga,79 +chocoshi,79 +chikuwabu,79 +caterpillar (alice in wonderland),79 +cala,79 +cagliostro (symphogear),79 +bunchou (bunchou3103),79 +buchi holes,79 +bow (paper mario),79 +bloody wings,79 +blood hood,79 +blonde girl (popopoka),79 +black rose (.hack//),79 +bismarck (kancolle) (cosplay),79 +banchii,79 +azumi risa,79 +asuka keisuke,79 +asa (teng zi),79 +armored gloves,79 +armaldo,79 +arkray,79 +arjent,79 +aria wintermint,79 +aojiru,79 +ankai (rappelzankai),79 +android girl (itou),79 +anda (pennyroyal tea),79 +anastasia (swimsuit archer) (second ascension) (fate),79 +amy (madoka magica),79 +ameshizuku natsuki,79 +amano yoshitaka,79 +alexstrasza,79 +akari seisuke,79 +akaneman,79 +akane hazuki,79 +ak-47 (girls' frontline),79 +ak-15,79 +adagumo no saragimaru,79 +aburisamon,79 +9is,79 +90i,79 +88 (einnimnech),79 +1000,79 +yuusha ni narenakatta ore wa shibushibu shuushoku wo ketsui shimashita.,78 +yuuri (yu-gi-oh!),78 +yuuna minato,78 +yuto takumi,78 +yuino (fancy party),78 +yuiki (cube),78 +yeti (creature),78 +year of the snake,78 +yasiro,78 +yago8 pp3,78 +ya-man,78 +xiaojiaju,78 +wyrdeer,78 +wrist blades,78 +women's wallet,78 +wilbell voll-ersleid,78 +white trim,78 +white robin,78 +wazukani,78 +watanuki kimihiro,78 +wanda maximoff,78 +v2 gundam,78 +utage (disguise) (arknights),78 +urethral fingering,78 +tsuda takatoshi,78 +trento (azur lane),78 +touma (tomatooo018),78 +toshi,78 +tonberry,78 +tomozu,78 +tomatita,78 +toima (beat0829),78 +tl (xdd),78 +tira (soulcalibur),78 +tatsuno malm,78 +tateishi shima,78 +taneda risa,78 +tamagokake gohan,78 +tama launcher,78 +takkayuuki,78 +takeyama (angel beats!),78 +takano ui,78 +takanashi nazuna,78 +taiyou no kiba dougram,78 +tagame (tagamecat),78 +tachibana sylphynford,78 +tachibana hibiki (symphogear) (another),78 +tabe ayumu,78 +sylpheed,78 +sword mastery academy uniform,78 +suzushiro akane,78 +supercell,78 +sumishi (sumisi 3),78 +suminoe takako,78 +static cling,78 +stargazy pie,78 +starfleet uniform,78 +star rod,78 +st parasu,78 +soroi mitsuzou,78 +something (omori),78 +sock on penis,78 +snow (game),78 +simone mandl,78 +silltare,78 +silent hill (movie),78 +shirogane lilly,78 +shiro youduki,78 +shiranui kaede,78 +shiraba (sonomama futene),78 +shinshia,78 +shinra kusakabe,78 +shima (landsuzume),78 +shiime,78 +shack,78 +seventeen (st17215),78 +sena izumi (ensemble stars!),78 +scythana,78 +satin sheets,78 +sasasi,78 +sasaki rika,78 +sasaki (dkenpisss),78 +saru (monkey magic3),78 +sanallite (tsukumo sana),78 +sakusaku,78 +sakake asobi,78 +saitou natsuki,78 +sagami rin,78 +s10021,78 +ryuugajou nanana no maizoukin,78 +rune factory 5,78 +rumia (darkness),78 +rope gag,78 +rin (torikissa!),78 +richard suwono,78 +ribbon bar,78 +reverse paizuri,78 +restaint,78 +reptileye,78 +renshiu,78 +renakobonb,78 +reef (sanomsai products),78 +red (warioware),78 +reco love gold beach,78 +ranken,78 +randolph orlando,78 +qwilfish,78 +quinn (league of legends),78 +potato (air),78 +pooor,78 +pontaro18,78 +plum,78 +pleiadean,78 +plasma cutter,78 +pizanuko,78 +piza rokumai,78 +pinkwaters,78 +philena ivy,78 +petoka,78 +pensive,78 +pelican (s030),78 +pan-nya,78 +pads,78 +otohime mutsumi,78 +orca (kemono friends),78 +onion (onion and pi-natto),78 +onimusha,78 +olivia (kureiji ollie),78 +okutama tarou,78 +okuri ookami,78 +oimo (14sainobba),78 +nuime,78 +nozomu144,78 +northern little sister,78 +non (nonzile),78 +nomura taeko,78 +nishiwaki yuuri,78 +nishinoya yuu,78 +nishijima waon,78 +new wave (idolmaster),78 +nekomura otako,78 +neige hausen,78 +natsusora wakana,78 +natsukawa kuina,78 +narusegawa riko,78 +naraka (nijisanji),78 +namamo (kura),78 +nagomi yayado,78 +nagi (exsit00),78 +musukichi,78 +mumistar,78 +multicolored necktie,78 +mugon,78 +motsuba,78 +mototenn,78 +motatei,78 +monk 2 (sekaiju),78 +mo (deciliter),78 +miyuli,78 +mitosansan,78 +misaka imouto 10032,78 +miracle nikki,78 +minamino kanata,78 +mimi (halloween) (princess connect!),78 +mikan toshi,78 +mika miche,78 +migchip,78 +michael roa valdamjong,78 +message in a bottle,78 +meru,78 +meikko-chan (j7w),78 +mechanical dragon,78 +mayafufu,78 +matcha (mattyan),78 +masa ni,78 +marirero a,78 +maractus,78 +mapyuhin za puremiyamu,78 +mamahaha,78 +mako mori,78 +makin tama,78 +majin vegeta,78 +majin go!,78 +mainz (azur lane),78 +magical mirai len,78 +magical astronomy,78 +madara hato,78 +machinist (final fantasy),78 +maaka karin,78 +luis cammy,78 +lucyfer,78 +lola bunny,78 +lobelia carlini,78 +lieselotte sherlock,78 +lens,78 +leaf skirt,78 +lasts,78 +lantern earrings,78 +l.k,78 +kyouyasai4423,78 +kyoukai no rinne,78 +kyoffie12,78 +kuzunoha amehiko,78 +kuzugitsune (inarikami),78 +kurosawa kazuto,78 +kurosaki shun,78 +kurosaki coco,78 +kuronaga,78 +kuran (mkmrl),78 +kurage modoki,78 +kuon (break through),78 +kunoichi (sengoku musou),78 +kunimitsu (9nimi2),78 +kuma daigorou,78 +kugimiya madoka,78 +kubfu,78 +kt. (kaisou-notagui),78 +koutate,78 +kona sleipnir,78 +kogasa-san's mother,78 +kofji (cookie),78 +kobayashi rindou,78 +kobato.,78 +klink,78 +kiritanpo (food),78 +kfp employee (takanashi kiara),78 +kenran butou sai,78 +kennen,78 +kazuo daisuke,78 +kazuma (scryed),78 +katwu (gensou ninja yashiki),78 +katsuten,78 +katsukare,78 +karin.,78 +kara no shoujo,78 +kanzaki shiori,78 +kamura reiri,78 +kamidanomi,78 +kaina (tsubasakuronikuru),78 +kageyama ritsu,78 +kagetomo midori,78 +jintsuu (azur lane),78 +java sparrow,78 +iyagatteru kimi ga suki,78 +iwauchi tomoki,78 +ishitsu kenzou,78 +ishii takuma,78 +inazuma eleven ares no tenbin,78 +ikazuchi akira,78 +idon,78 +ibrahim (nijisanji),78 +hyena boy,78 +hydra splatling (splatoon),78 +houraisan kaguya (cosplay),78 +hololive error,78 +holding chess piece,78 +holding bunny,78 +hita (hizokuseikogeki),78 +hiroe (cosmos blue-02 421),78 +hiro (hirohiro gorira),78 +himemiya shuang,78 +himarisu (hida mari),78 +hige wo soru. soshite joshikousei wo hirou.,78 +hayashi keita,78 +hashima izumi,78 +haori haruki,78 +hangover,78 +hanging legs,78 +hanamonogatari,78 +hana (hana mo arashi mo),78 +hal360,78 +haikawa hemlen,78 +haiiro (hi ghi ro),78 +gunlance,78 +gryebooks,78 +growling,78 +ground,78 +green lantern (series),78 +gomulgong,78 +girls und panzer gekijouban,78 +gg-chan,78 +getter robo (1st series),78 +genkai zero,78 +gatari,78 +fuyumi kazuki,78 +fuyuki030,78 +fuurinji miu,78 +futagawa fumi,78 +frilled bracelet,78 +frieren,78 +frankenstein,78 +flower swing,78 +flour (cookie),78 +floro (7th dragon),78 +flick (sal23),78 +film reel,78 +filha,78 +fallen tree,78 +fake whiskers,78 +f-35 lightning ii,78 +eyebrows visible through mask,78 +ensis exorcizans,78 +enjou sakuya,78 +emily armond,78 +em s,78 +elk115,78 +elise von dietrich,78 +eleaaa,78 +eilinna,78 +eientei,78 +eichi,78 +ego trigger,78 +effie (fire emblem),78 +e.m.m.i. (metroid),78 +dreamoon,78 +dobure18,78 +dining room,78 +deviruchi hat,78 +death parade,78 +deadprince,78 +daruk,78 +daring tact (umamusume),78 +daigaga,78 +d.va (gremlin),78 +curled tail,78 +cure amour,78 +cradling phone,78 +cozyu,78 +coo (kirby),78 +cherub,78 +chapayev (the captive cavalier) (azur lane),78 +chang wufei,78 +cartagra,78 +cake mogo,78 +butterflyfish,78 +brown outline,78 +bound toes,78 +boom microphone,78 +book to mouth,78 +book strap,78 +bomb devil (chainsaw man),78 +blaine (pokemon),78 +biting tongue,78 +biretta,78 +bird on leg,78 +bingwei huang,78 +berserker r,78 +bell sleeves,78 +beleven,78 +barber pole,78 +bana (stand flower),78 +ballroom,78 +bakushi (kaeritai0609),78 +baiguiyu,78 +bad end march,78 +bad ass,78 +backstage,78 +azumaya koyuki,78 +ayase miya,78 +auru t,78 +astral chain,78 +askeladd,78 +arnold (jojo),78 +armless,78 +aoi usagi,78 +anti-eyebrow piercing,78 +anonymous drawfag,78 +anni minto,78 +anne of green gables,78 +ameoto,78 +amco,78 +amano kazumi,78 +alu.m (alpcmas),78 +alolan boy,78 +alexiel (pixiv6211566),78 +akimichi chouchou,78 +akeboshi kagayo,78 +akagami no shirayukihime,78 +aikawa ren,78 +agwing86,78 +ace combat 04,78 +zumi tiri,77 +zubatto (makoto),77 +zassou maruko,77 +zaizen touko,77 +yuusaki riko,77 +yuuko (030 yuko),77 +yuu (isis7796),77 +yuno385,77 +yuni (school festival) (princess connect!),77 +yukiko hime,77 +yori (shitsuon),77 +yoracrab,77 +yonezuka ryou,77 +yoko (nz g),77 +yoimoriyoru,77 +ygo (kintsuba),77 +yamoge,77 +yamamoto takeshi,77 +yagaminoue,77 +xyanaid,77 +xkaishaku,77 +wuliu heihuo,77 +woman yelling at cat (meme),77 +wolffeld price,77 +wolf tengu extra (touhou),77 +wolf (raidou-j),77 +wenzheng147,77 +wcdonalds,77 +wang man,77 +wanaxtuco,77 +walrein,77 +waku,77 +wakamepiza,77 +waiting for kiss,77 +vexen,77 +venus eleven vivid!,77 +urasuji samurai,77 +unique (pixiv12704744),77 +umegiri ameto,77 +umapyoi (phrase),77 +twc (p-towaco),77 +tuuuh,77 +turn x,77 +turkey min,77 +tricky 46,77 +tranquillianusmajor,77 +toshibou (satsukisou),77 +torso flash,77 +toropp,77 +torn hoodie,77 +tona-gura!,77 +tokopi,77 +tine (fire emblem),77 +the king of fighters xii,77 +teria saga,77 +tatsuya (atelier road),77 +tate yukimi,77 +tasogare mimi,77 +takumi namuchi,77 +takatoo nanase,77 +taekwon kim,77 +t jiroo (ringofriend),77 +suzuko (star8383),77 +suyamori,77 +susumu-sensei,77 +sushi geta,77 +surgery,77 +supocon,77 +suguro ryuuji,77 +steely dan,77 +soya (sys ygo),77 +soumakyo,77 +sock bow,77 +soba (sobaya1938),77 +small hands,77 +sleeping with eyes open,77 +sl8 (girls' frontline),77 +skywarp,77 +sinanju,77 +side-tie costume,77 +shuumatsu no harem,77 +shu (loveeater),77 +shouma (bravespiritya),77 +shizuki aya,77 +shiro sousu,77 +shionootsu,77 +shiohara shin'ichi,77 +shiny pantyhose,77 +shinonome haru,77 +shinoi,77 +shin yuya,77 +shiguri,77 +sheryl nome (cosplay),77 +shati,77 +seppuku,77 +sen'yuu.,77 +sela (08180074),77 +sekka koyori,77 +sekino roko,77 +sekai de ichiban oppai ga suki!,77 +seedflare,77 +security guard,77 +second heaven,77 +sealeo,77 +samurai sentai shinkenger,77 +sakurayu haru,77 +sakurai izumi,77 +sakaki chizuru,77 +saito yoko,77 +sabito (kimetsu),77 +s.e.e.s,77 +ryouku,77 +ryoma (rym 369),77 +rya (elden ring),77 +rurun rururica,77 +rope necklace,77 +roman imperial,77 +robba-san (wangphing),77 +rikei ga koi ni ochita no de shoumeishitemita,77 +rhapsode,77 +reyn (xenoblade),77 +rankebu,77 +ragnarock city,77 +r-type nirvana,77 +qianqiu wanxia,77 +qian ye (qys3),77 +python (fire emblem),77 +pre (preecho),77 +praise the sun,77 +pixiv succubus,77 +pipette1223,77 +pink stripes,77 +pienahenggou,77 +phalanx 2 (sekaiju),77 +pepsi nex,77 +patricia (madoka magica),77 +paryi,77 +oyaman,77 +orimoto mimana,77 +"ore ga ojou-sama gakkou ni ""shomin sample"" toshite rachirareta ken",77 +ookawa wataru,77 +ookamiden,77 +oodenta mitsuyo,77 +ooarai (ibaraki),77 +onomiya,77 +onda takeshi,77 +omiya (louise-louis-lucille),77 +oman (evld),77 +okitsune (okitsune-sama),77 +okawa friend,77 +oirin,77 +odori momoha,77 +oddsnail,77 +oda eiichirou (style),77 +ochanoko (get9-sac),77 +number print,77 +nozaki sakura,77 +noneon319,77 +nona moth,77 +nomeazog,77 +no hair bow,77 +nirvana (blazblue),77 +nico (devil may cry),77 +neuroi girl,77 +netachou,77 +neneko-n,77 +nekoyanagi matasaburou,77 +nekomegane,77 +naru 0,77 +naoki (shibu asa ryo),77 +nanashi mumei (owl),77 +namakemono (u446644k),77 +nagisa (blue archive),77 +nagai wataru,77 +nabe-box,77 +n-mori,77 +mutsuki (iroha (iroha matsurika)),77 +murasaki hisato,77 +murasaki-yuri,77 +mugiusagi,77 +mourning,77 +motsunuki,77 +morihama karute,77 +mora (genshin impact),77 +moon gate,77 +moo (umineko),77 +momoiro tanuki,77 +mokkei,77 +mmu,77 +mmmmmkun,77 +mitsuru (pixiv 34028718),77 +mishima toshihiro,77 +misana,77 +misaka imouto 9982,77 +minion (despicable me),77 +miniature ranni,77 +mini shako cap,77 +minamito,77 +michiru (air),77 +michele crispino,77 +meo,77 +meigo arisa,77 +matcha cream v,77 +matatabi (nigatsu),77 +masuyama kei,77 +masn (moxi),77 +masaya ichika,77 +mary cagle,77 +marvel vs. capcom 2,77 +maruishi,77 +maru shion,77 +marie makise,77 +maria marionette,77 +manah,77 +mame nabe donko,77 +malos (xenoblade),77 +makita (vector1525),77 +makiri akira,77 +machino henmaru,77 +lyn (shunao),77 +lupin dive,77 +lovely labrynth of the silver castle,77 +looking inside,77 +little sister (seojh1029),77 +lino (lilyparty07),77 +leon0705,77 +leite jokin,77 +leena (chrono cross),77 +leaf panties,77 +laxus dreyar,77 +langrisser mobile,77 +ladydevimon,77 +kyuuketsuki sugu shinu,77 +kuroi paseri,77 +kurasaki fuuko,77 +kunishige keiichi,77 +kousaka maria,77 +kouhara yuyu,77 +koriarredondo,77 +koraidon,77 +kook,77 +konno kengo,77 +konbari tariumu,77 +komainu (yamaha1997),77 +koko (kamitsubaki studio),77 +koi ni kanmi o soete,77 +kogarasumaru (touken ranbu),77 +koga rejini,77 +knt02142769,77 +knolling,77 +kiyone suzu,77 +kiran (kiranpln),77 +kirameki high school uniform,77 +kio shimoku,77 +kidokawa seishuu,77 +kemi neko,77 +kcccc,77 +kashii eiji,77 +kaminogi haruka,77 +kamimura chika,77 +kamen rider amazon (series),77 +kamanatsu,77 +kaizuka yuki,77 +kainown,77 +k3rd,77 +jun (goodgun7),77 +john (tiger & bunny),77 +jo (bakuretsu tenshi),77 +jk gumi (nijisanji),77 +jiam009,77 +jeanne d'arc alter (festival outfit) (fate),77 +jagercoke,77 +iwao (pixiv258710),77 +ishinoyari,77 +iron maiden,77 +iriya no sora ufo no natsu,77 +io (maryann blue),77 +inemuri uno,77 +impossible skirt,77 +imai tetsuya,77 +ichinose (ichinose1592),77 +ibuki suika (cosplay),77 +huangquan dong (sinchi),77 +hua yi shan xin zhi yue,77 +hospital king,77 +hoshizora no babylon,77 +homu (seven deadly sins),77 +holding luggage,77 +hiyama yuki,77 +hirokazu sasaki,77 +himation,77 +hidaka kouyou,77 +hei (tonarinohey),77 +heart ring bottom,77 +hawoku ishibare,77 +hasisisissy,77 +harumi sawara,77 +happy maker!,77 +hanpan,77 +hano (hanos91),77 +hand in bikini,77 +hana n. fountainstand,77 +half-life 2,77 +hajimete no orusuban,77 +haiba arisa,77 +h&k g36c,77 +glowing markings,77 +gerome (fire emblem),77 +gensou suikogaiden,77 +gecko,77 +garan co,77 +ganyu (china merchants bank) (genshin impact),77 +gamarenji,77 +fuyu no yoru miku,77 +fuurin kingyou,77 +futoumeido,77 +futon fly away,77 +furuki ayaginutaira,77 +funpjinju,77 +fujiyama,77 +fujimoto satoru,77 +fujimori shiki,77 +fujima sakura,77 +fueiku,77 +freddy krueger,77 +food on legs,77 +floor lamp,77 +fervent idiot,77 +fai (fai-storage),77 +eyes visible through eyewear,77 +exif thumbnail surprise,77 +eunnieverse,77 +eugle na,77 +enmu (kimetsu no yaiba),77 +ending,77 +end of eternity,77 +emonyu,77 +durandal (fire emblem),77 +dunsparce,77 +druddigon,77 +dr. wily (mega man),77 +dodododo,77 +djmax respect,77 +dendrobium schema,77 +dekochin hammer,77 +deep web underground,77 +daisy cutter,77 +daisy (working!!),77 +daichi (daichi catcat),77 +da (bobafett),77 +cyberlive,77 +cutout below navel,77 +cutesexyrobutts (style),77 +crotchless bloomers,77 +cross-laced shorts,77 +cpqm,77 +couter,77 +coupon (skyth),77 +counter-strike (series),77 +cortoony,77 +condensed milk,77 +cloud hair,77 +claude (housamo),77 +ciel nosurge,77 +chuuou higashiguchi,77 +choo choo train,77 +chobii (hamgyomu),77 +chihaya gunzou,77 +chibi chibi,77 +chain earrings,77 +chaciooh,77 +cat (trickster),77 +carvanha,77 +captain tsubasa,77 +caltina (pepekekeko),77 +caloriemate,77 +butterfly choker,77 +bushmaster acr,77 +burenbo,77 +bun150,77 +brycen (pokemon),77 +bouno satoshi,77 +bokoblin,77 +boise (azur lane),77 +blue tube top,77 +blue sealad,77 +blonde hair-chan (ramchi),77 +bliss barson,77 +bisuke (k step2009),77 +biscuit krueger,77 +bikini top aside,77 +big sister (seojh1029),77 +bettie (pokemon),77 +bea (bropmlk),77 +bbeedol,77 +b rock,77 +aya drevis,77 +axel syrios,77 +awesomeerix,77 +aurora (disney),77 +at field,77 +asuka (louyun),77 +ashiya shirou,77 +arm mounted weapon,77 +aoyama nanami,77 +aojiru (shiro to kuro no mukyou),77 +anouetto,77 +ankoiri,77 +angeling,77 +angel french,77 +amu (258shin),77 +amazon (azur lane),77 +amaya enaka,77 +amane rosylily,77 +alteisen,77 +alphes,77 +akkun to kanojo,77 +akira (yuibnm71),77 +akatsuki shimeji,77 +akai akasaki,77 +ajin (sakurai gamon),77 +ai gon deroga,77 +adobe photoshop,77 +adhesive bra,77 +active raid,77 +academy ahri,77 +7100potechi,77 +104,77 +zura (phrase),76 +zhishi ge fangzhang,76 +yuzuriha (atelier liang),76 +yuurei yashiki,76 +yuuichi (bobobo),76 +yururi-ra,76 +yuri hyuga,76 +yukome,76 +yukkyun,76 +yuinshiel asteria,76 +yu-gi-oh! tag force,76 +yoshiten,76 +yoshinogai,76 +yokuran,76 +yokoyama kouji,76 +yf-19,76 +yellow sports bra,76 +yasehattagi,76 +yamadori seika,76 +yac (mokkori),76 +xyunx,76 +xingchee,76 +x-drive (symphogear),76 +wweed,76 +wu zetian (swimsuit caster) (fate),76 +win opz,76 +wedo,76 +washpan,76 +wacky races,76 +vuccha,76 +vima,76 +victor von gerdenheim,76 +vibrava,76 +venus bikini,76 +utani (punishment),76 +uss enterprise (cv-6),76 +ushisuke,76 +ushiromiya hideyoshi,76 +unown n,76 +unown i,76 +unbuttoned dress,76 +un tan,76 +umino mizu,76 +ultrabinou,76 +ultra magnus,76 +uchida aya,76 +tubumi,76 +tsunokakushi,76 +tsunenorip,76 +tsukishita kaoru,76 +tsujigiri,76 +trevenant,76 +topdylan,76 +tooku0,76 +tomo (tomorag7),76 +tiny (tini3030),76 +time mage (fft),76 +thick outlines,76 +theory (xenoblade),76 +the legend of zelda (nes),76 +the dark knight,76 +teruya (6w6y),76 +tenkaichi nihon saikyou bugeisha ketteisen,76 +ten piboshi,76 +temujin (housamo),76 +tarutobi,76 +tao pai pai,76 +takao (kancolle) (cosplay),76 +takamiya nasuno,76 +takagirock,76 +sweater girl,76 +suzuya aki,76 +suzuki iruma,76 +sunday se7en,76 +suigetsu koubou,76 +strelka,76 +steel ball,76 +spiral heart moon rod,76 +space jam,76 +space elevator,76 +soo-hyon lee,76 +sonoda mitsuki,76 +someno haru,76 +smelling ass,76 +silverwing,76 +silent hill 1,76 +shroedinger,76 +shisaki tayu,76 +shiromiya rei,76 +shirogane takeru,76 +shiratama (monster1553),76 +shine!! (idolmaster),76 +shimabara yuuhi,76 +shiitake,76 +shiiba nae,76 +shigure (sigre),76 +shie (m417),76 +sheep hood,76 +shedding,76 +shaian,76 +semi (p-poco),76 +seattle (azur lane),76 +satobitob,76 +sasatabekung,76 +sango (53box),76 +sanada tatsuki,76 +samonasu17,76 +ryuudou issei,76 +ryoushi chicken soup grass big chungus,76 +ryokucha manma,76 +roku no hito,76 +roku (tsua-kihuyu),76 +rog rockbe,76 +rodoreamon,76 +rivets,76 +rilliona (yu-gi-oh!),76 +reisei zero,76 +referee,76 +red overalls,76 +ramuya (lamb),76 +rahwia,76 +r2pi,76 +quatthro,76 +qiao xing,76 +punched,76 +pumpkin skirt,76 +pudding (zoza),76 +present mic,76 +predacon,76 +poroi (poro586),76 +pon (cielo),76 +poleyn,76 +plover,76 +pigone,76 +patrick colasour,76 +parabora (nipplemokuba),76 +pan koujou,76 +pallas's cat (kemono friends),76 +padoruu,76 +ozeu0916,76 +oyuyu,76 +oyasuminjyutsu,76 +otome wa boku ni koishiteru futari no elder,76 +oshiruko (oshiruko s2),76 +oricorio (sensu),76 +orange pajamas,76 +ooshio7734,76 +ookamisama,76 +omega 2-d,76 +oluha,76 +oju (ouka),76 +oguro (moyashi 2-hon),76 +officer vi,76 +nyaring943,76 +nyamou,76 +numenoko,76 +npn,76 +nonomura sora,76 +nobusawa osamu,76 +nito minatsuki,76 +nishikawa youko,76 +nishi masakazu,76 +nisha (elsword),76 +nikusho,76 +nikai kara momoshio,76 +nidorina,76 +nero claudius (fate/extra) (cosplay),76 +nengoro,76 +necrozma,76 +natsuki subaru (cosplay),76 +namespace,76 +myeolchi,76 +mush (mushlicious),76 +musco,76 +mumumu (three emu),76 +mukai,76 +moroha,76 +mochizuki honami,76 +mo dao zu shi,76 +mizumizu (phoenix),76 +miyakodori takayuki,76 +mirin pengin,76 +minya (nyanko daisensou),76 +minnie mouse ears,76 +mikko leminen,76 +mikazuki (azur lane),76 +mifilinah jegell,76 +michelle cheung,76 +melancholic (vocaloid),76 +mega gengar,76 +mayuzumi,76 +mayumi thyme,76 +maximum impact ii,76 +mattang,76 +matryoshka (borscht),76 +material sniper,76 +mass effect 2,76 +marutaya,76 +maru (maru1625),76 +marie en carlsberg,76 +marie (atelier),76 +manila envelope,76 +manatu kato,76 +mamonomusume to no seikatsu,76 +mamemo (daifuku mame),76 +mamemix,76 +mall link suit,76 +majiro (mazurka),76 +luke triton,76 +luke (kyeftss),76 +liquid-in-glass thermometer,76 +lilith (lilithchan),76 +lewdishsnail,76 +leppa berry,76 +leonne (futagohime),76 +leica,76 +laventon (pokemon),76 +lanz (xenoblade),76 +kusoyuridanchi,76 +kurosawa shouichi,76 +kurosawa rei,76 +kuon michiyoshi,76 +kunikida,76 +kumakichi (toshigat),76 +kuma jet,76 +koucha indian,76 +kondou ryunosuke,76 +konami,76 +kokuhaku jikkou iinkai,76 +kokoa-chan (pan (mimi)),76 +koizumi hitsuji,76 +koi ga saku koro sakura doki,76 +kobashi,76 +klefki,76 +kitamura yuusaku,76 +kirino kasumu,76 +kirin (monster hunter),76 +king kazma,76 +kimyo,76 +kibou,76 +keikou ryuudou,76 +kawaguchi youhei,76 +kasuga misora,76 +kashimu,76 +karna (santa) (fate),76 +kamura gimi,76 +kamishiro rin,76 +kamisama no you na kimi e,76 +kajuu,76 +kagura (onmyoji),76 +kagami tina,76 +jyushiko (osomatsu-san),76 +junior clown car,76 +jung freud,76 +julius juukulius,76 +jettoburikku,76 +ishiwatari daisuke,76 +iris (material sniper),76 +inui sekihiko,76 +ino (tellu0120),76 +ink wash painting,76 +indo curry,76 +impact (font),76 +imp mercy,76 +iceky,76 +ice cream scoop,76 +i-13 (azur lane),76 +hyena,76 +hyadain no joujou yuujou,76 +hunting,76 +hukahito,76 +hovering kousin,76 +hoshiuta,76 +hoshi (xingspresent),76 +hoozuki shia,76 +hiyo kotori,76 +hiyashi mirano,76 +hiratsuka shizuka,76 +hilda (under night in-birth),76 +hihachi,76 +hidari kagetora,76 +heripiro,76 +heisei,76 +headphone actor (vocaloid),76 +hayashi kotoyo,76 +hayama kotono,76 +hattrem,76 +hato yu-ki,76 +hardgore alice,76 +hans christian andersen (adult) (fate),76 +gupipy,76 +gundam gp-02 physalis,76 +guitar little sister (hitomi o),76 +gu deulmok,76 +grumpy,76 +ground gundam,76 +gr greeze,76 +golden egg,76 +godzilla (2014),76 +godharo1,76 +gnosis (genshin impact),76 +glowing nipples,76 +gipsy underground,76 +gilgamesh (establishment) (fate),76 +geoduck,76 +genji tsuushin agedama,76 +gemi 25,76 +gekka nanako,76 +ga bunko,76 +g.h (gogetsu),76 +fuuma kotarou (tenkaichi),76 +fukuzawa yukichi (egoist867),76 +fujiwara zakuro,76 +fujisaki rei,76 +frit 2,76 +file112056,76 +fiery ears,76 +fei fong wong,76 +fake translation,76 +es-ther,76 +ennis,76 +eleonora viltaria,76 +drop kick,76 +draw-till-death,76 +dosu (doseven),76 +dos cat,76 +doruka,76 +dl2go,76 +diieru,76 +depth charge projector,76 +deep web underground (character),76 +dadachyo,76 +cyzir visheen,76 +cure peace (princess form),76 +cruiser d.va,76 +cruccu,76 +crimson typhoon,76 +cookbook,76 +construction worker,76 +cola (gotouryouta),76 +climbing tree,76 +ckln,76 +cicin,76 +chullo,76 +chou megami shinkou noire gekishin black heart,76 +choso (jujutsu kaisen),76 +chika (orange pop),76 +chelle ingham,76 +chat (tales),76 +chacha (akazukin chacha),76 +cha kuro (limo),76 +cha (kancolle),76 +captain america (cosplay),76 +capoeira,76 +burari,76 +bugita,76 +brey,76 +bow shirt,76 +bow removed,76 +bound feet,76 +bouhatei tetora,76 +bou shaku,76 +bonryuu,76 +bonobono,76 +boingo,76 +blood moon (league of legends),76 +blonde girl (cloba),76 +bikupan,76 +beyond the wishes (idolmaster),76 +beast wars: transformers,76 +baizhu (genshin impact),76 +bait,76 +bagel,76 +bad gun anatomy,76 +bad end beauty,76 +bachibachi (tisen),76 +azuma satori,76 +azu (azusayumix),76 +atou haruki,76 +atelier marie,76 +atchy,76 +asymmetrical earrings,76 +asura (onmyoji),76 +asura (elsword),76 +asakawa shinka,76 +asahina mikuru (cosplay),76 +arufa (hourai-sugar),76 +arm on table,76 +arene (arknights),76 +archer (cool and wild) (fate),76 +archen,76 +araragi ayune,76 +aqua sky,76 +aozora taf,76 +aoten,76 +anmitsu (dessert),76 +amata sora,76 +amacha,76 +alex mercer,76 +akira (ying),76 +akinoya,76 +akino kaede,76 +akatsuki (akatsukishiki),76 +akaho sakura,76 +akagikou,76 +aino nagisa,76 +aichi shiho,76 +ahiru (duck),76 +agnes joubert,76 +agemaki wako,76 +adjusting strap,76 +5tb,76 +41 (taskmaster41),76 +2qba,76 +1998,76 +1997,76 +15citron,76 +zutta,75 +zumi (neronero126),75 +zerogravitas,75 +zazie rainyday,75 +zanki,75 +yzak joule,75 +yuririn poi,75 +yunkaiming,75 +yumasaki walker,75 +yukinami (paru26i),75 +yukimaro yukkii,75 +yoyoyotto,75 +yoshii,75 +yonomori benio,75 +yonehara sousuke,75 +yeyebirdie,75 +yayuyoyayuyo,75 +xuan li (the legend of luoxiaohei),75 +woominwoomin5,75 +winged wand,75 +whitebeard pirates,75 +watchog,75 +wajuniorbox,75 +vocaloid (tda-type ver),75 +vik (xypt7474),75 +venus (planet),75 +vegetable noda,75 +utsugi uyu,75 +usui yoshito (style),75 +useless,75 +usavich,75 +usaki (ama),75 +urushia (okame nin),75 +unown r,75 +ume (yume uta da),75 +umashio,75 +ujiie mutsumi,75 +tsunoko,75 +tsa,75 +trigger-chan,75 +toukan,75 +tomoe (persona 4),75 +tokimeki memorial girl's side 2nd kiss,75 +toinana,75 +tofuvi,75 +tingle,75 +thrasir (fire emblem),75 +the legend of zelda: the minish cap,75 +the hand (stand),75 +tharja (fire emblem) (cosplay),75 +teti,75 +teko,75 +tear ring saga: utna heroes saga,75 +tatahai,75 +tanizaki yukari,75 +tangerine (dudu),75 +tanaka (colorcorn),75 +tamamo (yagi),75 +tama (love hina),75 +takase hina,75 +takamin apaman,75 +tajyador,75 +taisai,75 +swordwaltz,75 +sv-98,75 +suzumiya seika,75 +suzu (nagasarete airantou),75 +super smash bros. logo,75 +super robot wars v,75 +sunyukun,75 +striped cape,75 +streamingsun,75 +strada,75 +spoken sparkle,75 +split tail,75 +sonaworld,75 +snow villiers,75 +snake armband,75 +slit,75 +slayer (guilty gear),75 +siroooo,75 +siren purifier (azur lane),75 +side handle teapot,75 +shuichi wada,75 +shokatsuryou koumei,75 +shizu (9394marimo),75 +shiun'in sora,75 +shirt half tucked in,75 +shirokujira,75 +shiretoko rin,75 +shipyard,75 +shino (shinderera),75 +shimotsuma,75 +shimotsuki juugo,75 +shimon,75 +shilin,75 +shibagami,75 +shantak (nyaruko-san),75 +sha (isago),75 +sengoku koihime ~otome kenran sengoku emaki~,75 +see-through shawl,75 +sea spray,75 +schelz,75 +sayossa (pak-front),75 +sanukiske,75 +sansaro rii,75 +sane-person,75 +sakimiya iruka,75 +saki hajime,75 +saitou nicole,75 +safari jacket,75 +saegusa yukika,75 +ryuuto (vocaloid),75 +running towards viewer,75 +rune factory frontier,75 +rukotaro,75 +ruca milda,75 +rome,75 +rokosu (isibasi403),75 +riou (gensou suikoden),75 +ringorou (idolmaster),75 +rido (ridograph),75 +rider kick,75 +rice planting,75 +revolver (game),75 +redundant-cat,75 +recette lemongrass,75 +rasupekuto,75 +rai (newtype xm-x1),75 +qiu ju,75 +purity seal,75 +pureji oshou,75 +provence (casual vacation) (arknights),75 +profitshame,75 +pop-up pirate,75 +ponyaru,75 +ponpoko,75 +pokio,75 +poharo,75 +platelet (hataraku saibou) (cosplay),75 +pla4neta,75 +pilky,75 +pika (pokemon),75 +phione,75 +phazer,75 +pentagon (uzus),75 +pennywise,75 +pelican,75 +pedobear,75 +pawn (chess),75 +paskmel,75 +para-sol,75 +pan-ooh,75 +ostwindprojekt,75 +oshin0 (zheng),75 +orochi (youkai watch),75 +open cockpit,75 +onizuka saori,75 +onionsketch,75 +okumori boy,75 +okino tsukasa,75 +oekakimannga,75 +o medal,75 +null maru,75 +nootomo,75 +nomal,75 +noiz (dramatical murder),75 +noe yuuhi,75 +nodori710,75 +niwa hitomi,75 +nicohi,75 +nhk (voiceroid),75 +nemun (tamizzz),75 +nazuna (flower knight girl),75 +natsuno suika,75 +natsume yuji,75 +naruse nagi,75 +naomi evans,75 +nanase (nns 6077),75 +nagi to (kennkenn),75 +nachoz (nachozart),75 +nachisuke,75 +muryuuin tayun,75 +mumyuu,75 +mouth submerged,75 +moto mitsuashi,75 +monmusu harem,75 +mona (pact of stars and moon) (genshin impact),75 +momoyama hinase,75 +mojake,75 +mogami kai ni (kancolle),75 +moana (movie),75 +mizune (winter),75 +miyu edelfelt (cosplay),75 +miruzawa akechi,75 +mini koala,75 +minazuki haruka (twin angel),75 +minase rio,75 +mikagemaru (mikage000),75 +mikado nagi,75 +miitara,75 +miharu sena kanaka,75 +midori (uchuu patrol luluco),75 +meido yomi,75 +mega banette,75 +mayer,75 +may lee,75 +masco,75 +masako (sabotage-mode),75 +mapi (mup1228),75 +makomo (kimetsu),75 +makita yoshiharu,75 +mak (kainemaru),75 +majodou,75 +madore,75 +macosee,75 +machina,75 +maaru (shironeko project),75 +luc (suikoden),75 +liya nikorov,75 +lithographica,75 +linux,75 +lileep,75 +lightning returns: final fantasy xiii,75 +leanne (fire emblem),75 +leaf hat ornament,75 +laika (sputnik2nd),75 +l'antica (idolmaster),75 +kyak bamboo,75 +kutar22,75 +kurotsuchi mayuri,75 +kurotobi rarumu,75 +kurosawa haruto,75 +kuropan (crow panther),75 +kuroko (piii),75 +kuno (runkunochan),75 +kumasan (kumazonjp),75 +kumanz,75 +kris (sygna suit) (pokemon),75 +kouno miyako,75 +koumi haruka,75 +kouichi (kouichi-129),75 +kooemong,75 +komugiko no mori,75 +komii,75 +kohagura ellen,75 +koeda (k83 4),75 +koala forest military uniform,75 +kkaebing,75 +kitere,75 +kisa (kisa-kisa5900),75 +king kouta,75 +kikukawa yukino,75 +kenkou zenrakei suieibu umishou,75 +kazane-wind,75 +kazamaki matsuri (female),75 +katsuma rei,75 +katana zero,75 +kasugai (de-tteiu),75 +kanisawa yuuki,75 +kamishiro sui,75 +kamen rider demons,75 +kakan (amka),75 +kagi f,75 +kage kara mamoru!,75 +kagamine len (vocaloid4),75 +juria0801,75 +jun (noiji guren 0220),75 +judas (tales),75 +jouwan,75 +jiraiya (persona 4),75 +jinguu shion,75 +jindai high school uniform,75 +jikuno,75 +jasminka antonenko,75 +jackal,75 +izumida,75 +ivory (25680nico),75 +itsutsuki,75 +isshin (sasayamakids),75 +isara gunther,75 +isabe (girls und panzer),75 +iron paladin (elsword),75 +inumine aya,75 +inseki tarou,75 +inamori futayo,75 +imuro,75 +imu (senran kagura),75 +iizuka haruko,75 +iinchou (trouble spirit),75 +idolmaster platinum stars,75 +idle animation,75 +ice2002,75 +hyury,75 +houtou,75 +horie yui,75 +holding thermometer,75 +hinata (hinata-ur),75 +himuro hitoshi,75 +himitsu keisatsu (vocaloid),75 +hiiro60,75 +high speed!,75 +hifumi (3b x),75 +hidaka ryou,75 +hibioes,75 +hfp~kubiao,75 +hermes (ff14),75 +harukappa,75 +harakawa tamako,75 +hanjuku choco-pai,75 +hanji (hansi),75 +hagoromo komachi (idolmaster),75 +habataki academy uniform,75 +gyorui (amezari),75 +guuchama,75 +gothita,75 +goshingo1,75 +gomiyama,75 +goma (u p),75 +gold dress,75 +gnome,75 +glowing penis,75 +gdgd fairies,75 +garushaa wolfein,75 +gangplank (league of legends),75 +gakuen taisen valkyries,75 +futapi,75 +fujioka-kuma,75 +frenchmaid (made in frenchmaid),75 +fox hood,75 +foomi,75 +foam mustache,75 +fluffy legwear,75 +flippy (happy tree friends),75 +felipe godoy,75 +fed by viewer,75 +fake ad,75 +estel freesia,75 +escape from tarkov,75 +enonko,75 +emoillu,75 +emo (ricemo),75 +emiya shirou (prisma illya),75 +eitri (fire emblem),75 +edpan,75 +edenfox,75 +drum bath,75 +dragon half,75 +dory,75 +donatello (tmnt),75 +dkaki,75 +derby (dabidabi),75 +denpa teki na kanojo,75 +de-chan (belko),75 +darkereve,75 +dare ga tame no alchemist,75 +cynical (llcbluckg c004),75 +cure happy (princess form),75 +crushed,75 +crown patisserie (umamusume),75 +craig tucker,75 +crab claw,75 +covering anus,75 +cooper (azur lane),75 +coffeeslice,75 +clark kent,75 +citron80citron,75 +chueog,75 +chro (rulurullu),75 +chiaroscuro,75 +cheshire (cait sith crooner) (azur lane),75 +chernyyvo,75 +chamame,75 +cereal box,75 +celestine lucullus,75 +carrera,75 +carla j. luksic,75 +camouflage scarf,75 +calen (time bokan),75 +caam serenity of gusto,75 +buoy,75 +bunny girl (yuuhagi (amaretto-no-natsu)),75 +bunny (trickster),75 +bungaku shoujo,75 +brynhildr (cheer for master) (fate),75 +brown dust,75 +british admiral (y.ssanoha),75 +boy's club,75 +borongo,75 +bleedman,75 +blackball,75 +bitchen,75 +bending,75 +beauty swimsuit (idolmaster),75 +bearer of the curse,75 +bb (swimsuit mooncancer) (fate) (cosplay),75 +bandaid on ass,75 +bakebake (touhou),75 +baigao,75 +babydoll lift,75 +ayerscarpe (arknights),75 +ayanami (rock 'n' demon) (azur lane),75 +awesome face,75 +award ribbon,75 +asyura7,75 +asdj,75 +arutopian,75 +arrow through apple,75 +aroma (go! princess precure),75 +armpit stubble,75 +armadillo tail,75 +argentina,75 +apricot (d-floe),75 +anita (vampire),75 +ancient ys vanished,75 +amiba48,75 +alice kei (lemon-jiru),75 +alice (as109),75 +alexis kerib,75 +akiyoshi yoshiaki,75 +akira (kaned fools),75 +akagi yuuto,75 +aika zero,75 +aika r-16,75 +ah-kun,75 +agtt25333,75 +afro puffs,75 +abimaru gup,75 +abduction,75 +7/11 (fukuso),75 +3e,75 +30 minutes sisters,75 +1925 (vocaloid),75 +12cat,75 +0123456789,75 +zunkome,74 +zhayin-san,74 +zero in,74 +zekkyou (h9s9),74 +zarory,74 +yuunagi seshina,74 +yuuki yuu,74 +yusan,74 +yuro,74 +yumeno naka,74 +yumehiko,74 +yoshida (ne),74 +yohan12,74 +yo-jin,74 +yinzhai,74 +yeun,74 +yazawa kotarou,74 +yatogami fuma,74 +yato (fire emblem),74 +yaruwashi,74 +yanagihara tantoui,74 +yakuto007,74 +yah yah 6,74 +xing cai,74 +white sister ram,74 +white (kekkai sensen),74 +warekara,74 +wamusato haru,74 +vladbacescu,74 +viper f40,74 +vienna (vtuber),74 +vf-1 super,74 +venom (guilty gear),74 +van darkholme,74 +uzuki tooru,74 +uzuki (azur lane),74 +uturo,74 +useq1067,74 +unusually visible,74 +unown t,74 +uni ikura,74 +umegiri hifumi,74 +ume (illegal bible),74 +tyrant sugawara,74 +twitch logo,74 +tsuzaki aoba,74 +tsukishima general high school uniform,74 +tsukimiya miyabi,74 +tsengyun,74 +tryndamere,74 +trap door,74 +train conductor,74 +toujou basara,74 +toromera,74 +torn tank top,74 +torn apron,74 +tooru,74 +tokura eiko,74 +tokidoki bosotto roshia-go de dereru tonari no arya-san,74 +tk28,74 +the olphy,74 +the legend of zelda: oracle of ages,74 +the legend of dragoon,74 +tetsudou musume,74 +teruhashi kokomi,74 +terrarium,74 +teresa (mazohaha),74 +tennessee (azur lane),74 +tendou karen,74 +tefco,74 +talnory,74 +tales of the world radiant mythology 2,74 +takuji yuusaku,74 +takeda seiji,74 +takanamushi,74 +taisinkoku,74 +sync (tales),74 +swedish text,74 +suwaiya,74 +supokon! sports wear complex,74 +sun trial,74 +suicabar72,74 +stone torii,74 +steve zheng,74 +stable,74 +sonic the hedgehog (archie comics),74 +soapy,74 +smjim1986,74 +skwovet,74 +sinomi,74 +shuutai,74 +shoulder angel,74 +shouen kigashi,74 +shokugyo,74 +sho (wnmf3234),74 +shiron (moze),74 +shiren (ourboy83),74 +shirayuki (sister princess),74 +shiori2525,74 +shinkai kanata,74 +shinagawa mikuzu,74 +shimo-san,74 +shima (sh1mamu),74 +shiki eiki (cosplay),74 +shiki (shikki46),74 +shichibukai,74 +shibuya (kurokamishain),74 +shibainu kisetsu,74 +shi ecchi,74 +shepherd's crook,74 +shelving book,74 +shaggy susu,74 +sf choujigen densetsu rall,74 +serike w,74 +serafleur,74 +seitokai nimo anawa aru!,74 +seikishi melty lovers,74 +seidou (tukinomiyako),74 +scorpio,74 +schwann oltorain,74 +schneewittchen,74 +sanshokuin sumireko,74 +sandcasks,74 +sakurano otoha,74 +sakuraba hinano,74 +sakata gintoki (cosplay),74 +sakaki youma,74 +sakai wakana,74 +sakai waka,74 +saishuu heiki kanojo,74 +sailor star healer,74 +saana-kun,74 +s-purple,74 +rutsubo,74 +rutee katrea,74 +ruriwo (ruriwo1894),74 +rune slayer (elsword),74 +roggenrola,74 +rococo urupa,74 +rocher-hd,74 +risu (dorohedoro),74 +ripu (sherypton),74 +ringo yuyu,74 +riddhe marcenas,74 +ribahara aki,74 +riai (onsen),74 +reporter waddle dee,74 +renshirenji,74 +red pajamas,74 +really? really!,74 +re-class battleship (cosplay),74 +rahulk (forever 7th capital),74 +purin (purin0),74 +public yotsuba middle school uniform,74 +public service announcement,74 +presa (tales),74 +pozesuke,74 +porno (dohna dohna),74 +piroshiky,74 +pipi,74 +pip boy,74 +phiphi-au-thon,74 +pfalz,74 +persicaria (girls' frontline nc),74 +panoramango,74 +pandako,74 +oyamada gamata,74 +otoufu (wddkq314band),74 +otoshiro kosame,74 +orochi (fire emblem),74 +orguss,74 +orange gemstone,74 +orange camisole,74 +ooyodo kai (kancolle),74 +online neet,74 +ohshioyou,74 +ogata zen,74 +nuzleaf,74 +nursing fingering,74 +nsfwolf,74 +nonono nagata,74 +nohtuy,74 +no harness,74 +nishi itsumi,74 +nipple ribbon,74 +nintendo switch lite,74 +ninopal,74 +nihimaru,74 +new jersey (snow-white ceremony) (azur lane),74 +nellko agogo,74 +nekobus,74 +neko nabe,74 +neko mata,74 +negi mugiya,74 +nearly naked coat,74 +natsu (927013),74 +narumi yuu (imitmoon),74 +naotsugu (log horizon),74 +nanatuki13,74 +nanashi (74 nanashi),74 +nanao (aoyamahikari),74 +nakahara mizuki,74 +naishi-chan,74 +nagisawa yuu,74 +nagatani (nagata2),74 +munuu,74 +mugicha (sukimachaya),74 +moyuru,74 +motsuni (artist),74 +moth (artist),74 +morino harifu,74 +moribuden,74 +montanyaoh,74 +monster collection,74 +momomiya mion,74 +momo uzura,74 +mojo (dennou coil),74 +mo-fu,74 +mizura,74 +mizuno kaede,74 +misono kirika,74 +misagi nagu,74 +miona yui,74 +minori (log horizon),74 +mini nobu (fate),74 +minecart,74 +minamo (pixiv17726065),74 +mimelex,74 +mima sachi,74 +mikazuki mika,74 +mikan no shiru,74 +mikage nao,74 +mearian,74 +mctom,74 +mayuzumi yukie,74 +mato kuroi,74 +marvelous,74 +maritaki,74 +marilyn monroe,74 +mandalorian,74 +manaka (sumiyao),74 +malachite,74 +makisige,74 +majime joe,74 +magical mirai rin,74 +mackintosh rose,74 +lyse hext,74 +lyon (fire emblem),74 +lugosi ela,74 +ltt challenger,74 +low poly,74 +lost child,74 +liv (punishing: gray raven),74 +liong,74 +lillet blan,74 +lightning background,74 +lieselotte achenbach,74 +lieri bishop,74 +liangban xiexu,74 +leotard tug,74 +leopold stotch,74 +leaf fan,74 +large belt,74 +large-spotted genet (kemono friends),74 +lalatia-meai,74 +kyukyutto (denryoku hatsuden),74 +kyou (nekoneko),74 +kyoshinhei,74 +kuroyoru umidori,74 +kurosaki,74 +kuroha neko,74 +kuro (jyupiter),74 +kumita (sumica),74 +kugi (kugi-xiv),74 +kuga hiroto,74 +kou 2008,74 +konohana kitan,74 +kon (kin219),74 +kominato ruuko,74 +klavier gavin,74 +kito (clamp5656),74 +kita ichiban,74 +kistina,74 +kirari (kira rin9),74 +kiragi (fire emblem),74 +kinokosuke,74 +kimura akiyoshi,74 +kimineri,74 +kiki fushigi,74 +kijo kouyou (fate),74 +kichijou agata,74 +keaton (fire emblem),74 +kawasaki saki,74 +katsuki mari,74 +katoributa a,74 +katagirinanoka,74 +kari (karinimooreha),74 +karamiti,74 +kaneshiro matoi,74 +kancolle arcade,74 +kamiya (mennu),74 +kamachi (kamati0maru),74 +kalinka cossack (mega man),74 +kajiyama hiroshi,74 +kagarino kirie,74 +kafuru (senran kagura),74 +judo,74 +joyfull (terrace),74 +joseph desaulniers,74 +joey koguma,74 +javelin (slow ahead!) (azur lane),74 +iseno yajin,74 +isemori,74 +inomata mutsumi,74 +ingway (odin sphere),74 +inata17ta,74 +imperium of man,74 +ikei,74 +ikamusume (cosplay),74 +ichiko (osomatsu-san),74 +hotline miami,74 +hosoi kouzou,74 +homura (haku89),74 +holding picture,74 +hitoi,74 +hisagi (puchimaple),74 +hiromin,74 +hirasaka yotsuyu,74 +hinako (sister princess),74 +himemurasaki,74 +hildegard von krone,74 +hidaka0503,74 +heckler kai,74 +hatsuga (dmaigmai),74 +harukana (harukana 10),74 +han soo-min (hanny),74 +hal emmerich,74 +hakusai (hksicabb),74 +hakoiri nyanko,74 +haiiro (immature),74 +guilty kiss (love live!),74 +gud0c,74 +guan yinping,74 +gouenji yuuka,74 +gonzaburou,74 +gnome (last origin),74 +glowing headgear,74 +gloamy,74 +ginn (hzh770121),74 +ginga e kickoff!!,74 +gibson sg,74 +gen (susono01),74 +gekka kaguya (urabata),74 +gaichi,74 +gaia (girls' frontline),74 +fujino miyabi,74 +fujii satoshi,74 +fujii maki,74 +fred0092,74 +formalin,74 +forest of magic,74 +follower (yagisaka seto),74 +florist,74 +floppy sleeves,74 +flieger,74 +final fantasy tactics a2,74 +ff frbb122,74 +fenrich (disgaea),74 +feng you,74 +father gascoigne,74 +fake transparency,74 +ezomori nozomu,74 +ethlyn (fire emblem),74 +eska bamel,74 +enoch,74 +enforcer (arknights),74 +emor18 shikeko,74 +emina&aki,74 +elena olegovna owen,74 +elena (ff7),74 +eiyuu senki gold,74 +eha7y,74 +eating flower,74 +dunceneygak,74 +duck innertube,74 +dorris,74 +dorothea arnault (cosplay),74 +don3,74 +dokuganryuu,74 +derori,74 +denki anma,74 +demon king (in),74 +deck chair,74 +ddolbang,74 +dashigarayama,74 +danball senki w,74 +dai (mebae16),74 +daft punk,74 +cyberconnect2,74 +cuphead,74 +crown (symbol),74 +crow hogan,74 +crimo,74 +cradle,74 +concealed weapon,74 +comptiq,74 +colt python,74 +codename wa sailor v,74 +civilization (series),74 +chun lanlanlan,74 +chris (kof),74 +chiru (sanifani),74 +chimaki (impressiveanarchy),74 +chikusan nurseman,74 +check (check book),74 +chang chun (azur lane),74 +chain sumeragi,74 +cha sakura,74 +carole peppers,74 +butterfly necklace,74 +budesonide,74 +browning m1919,74 +bronya zaychik (valkyrie chariot),74 +brashear lushert,74 +bow print,74 +boar ears,74 +blue hallelujah,74 +blackfangs,74 +bismarck (coat of arms),74 +bear bag,74 +baggy shorts,74 +azzie (az man studios),74 +azuki (azuki shot),74 +ayanobro,74 +ata (tsumari),74 +asparagus (girls und panzer),74 +arayama reiichi,74 +araki rena,74 +araki jeccy,74 +araco,74 +aphilia (kiyomin),74 +aoi (houkago no pleiades),74 +anonymous (japanese),74 +anne shirley,74 +anastasia hoshin,74 +amrkdrw,74 +amano ameno,74 +amakura,74 +alma01,74 +akamatsu yui,74 +akai (ugokashitari),74 +akai (riaakai),74 +akabane karma,74 +aikawa ayumu,74 +aibo (gorgeous mushroom),74 +ai-chan's sister (tawawa),74 +ahr studio,74 +agnese sanctis,74 +ae iueo,74 +aamond,74 +7:08,74 +2poet,74 +1991 (blz),74 +18 (backstreetno18),74 +zkstxxx,73 +zemi mama,73 +z'gok,73 +yuuma (noel),73 +yu mei-ren (festival outfit) (fate),73 +yasu (pixiv),73 +yashima takahiro,73 +yandere no onna no ko ni shinu hodo aisarete nemurenai cd,73 +yamano uzura,73 +yakumo (nu carnival),73 +yahiro (heartseek000),73 +yagi (sinnsyou),73 +xiaoyu,73 +xiaolumiaoliya,73 +xiacheng tatsuya,73 +wushier,73 +wrinkled frown (detective pikachu),73 +wooden bathtub,73 +wet jacket,73 +wawako mama,73 +wanko to lily,73 +vorona,73 +urokong,73 +unown s,73 +umiru,73 +umihara kawase (character),73 +ukimesato,73 +uichi,73 +uchi no musume no tame naraba ore wa moshikashitara maou mo taoseru kamo shirenai.,73 +ubi (ekdus6080),73 +u-556 (azur lane),73 +twiska (doubitian),73 +tuze111,73 +tutinako,73 +tukino (panna),73 +toujou ruby,73 +toudou misa,73 +totthii0081,73 +tomojo,73 +tomboy childhood friend (cccpo),73 +tokai teio (beyond the horizon) (umamusume),73 +todoko (osomatsu-san),73 +timato,73 +tiktok logo,73 +the king of fighters xi,73 +the fool,73 +testicles touching,73 +temptation,73 +tembin 3,73 +tearfish,73 +tatekami seri,73 +tashiro tetsuya,73 +tanu0706,73 +tanakalma,73 +tanabe,73 +tamomoko,73 +tamaki fuyu,73 +takarazaki school uniform,73 +takahan,73 +tabiaki (cosy catastrophe),73 +swiftsure (midsummer special service) (azur lane),73 +suzutsuki (azur lane),73 +suzume (rance),73 +susumu,73 +susato mikotoba,73 +suisei (kancolle),73 +suga kyoutarou,73 +stuffed turtle,73 +steven quartz universe,73 +squash,73 +southern ocean war oni,73 +so shio,73 +smoothie,73 +slokai (iron saga),73 +sleeve pull,73 +skinsuit,73 +sissel,73 +sigrun (fire emblem),73 +shyrei faolan,73 +shuuhei (shoohey),73 +shugo19,73 +shropshire (azur lane),73 +shouten pegasus mix mori,73 +shou937,73 +shirota dai,73 +shion (peach momozen),73 +shinken-zemi,73 +shideboo (shideboh),73 +shichijou natori,73 +shibatenko,73 +shainea,73 +shadowverse (anime),73 +sexual dimorphism,73 +setu (shining12),73 +sessue,73 +seol,73 +sentou yousei yukikaze,73 +senel coolidge,73 +seijou academy uniform,73 +sasaki yukinojou,73 +sakurai tomoki,73 +sajou manaka,73 +saeuchobab,73 +rush (mega man),73 +royboy,73 +romance of the three kingdoms,73 +rods,73 +ringo-chan (danshi koukousei),73 +rikudou koushi,73 +rifsom,73 +rhyperior,73 +renren (ah renren),73 +ren kon,73 +ren hakuryuu,73 +regult,73 +re:i,73 +rasen manga,73 +ranger squirrel,73 +ranger (kancolle),73 +rampage 2nd,73 +raccoon boy,73 +r (ryo),73 +pyro jack,73 +purple shawl,73 +ptsd,73 +prinz adalbert (azur lane),73 +pp-19 bizon,73 +pottery,73 +pop-up book,73 +pisces,73 +pioneer neckerchief,73 +pink moon,73 +phantom breaker,73 +pen to mouth,73 +peke-kun,73 +panapana,73 +pai chan,73 +otto suewen,73 +otoko no ko wa meido fuku ga osuki!?,73 +orz (orz57),73 +one room,73 +oishinbo,73 +off (game),73 +odogaron (armor),73 +nyome991,73 +nyoijizai,73 +nyano21,73 +numa (minus 4k),73 +nuka cola06,73 +norunollu,73 +nori20170709,73 +norakuro nero,73 +nomu (29 nom),73 +noir vesper,73 +noel (tsukihime),73 +noel (nnoelllll),73 +noah (jasdavi),73 +no penis,73 +nnz,73 +niso,73 +nishino miyuki,73 +nintendo switch (personification),73 +nikubanare,73 +niku114514810,73 +nephtim (world flipper),73 +nekosama shugyouchuu,73 +near,73 +natalia poklonskaya,73 +nana (elfen lied),73 +nagii yuki,73 +mysterious idol x alter (fate),73 +mutation,73 +murakami fumio,73 +multi-strapped bra,73 +mortar (weapon),73 +mokew,73 +modern afro,73 +mochiko (uyu omochi),73 +mochiko (mocchikkoo),73 +moai,73 +mizunezumi,73 +miya atsumu,73 +misti,73 +mirin (coene65),73 +minamina,73 +millenia (grandia),73 +milk junkies,73 +mile (mil2),73 +mild (trmsasasa),73 +mikanbako (aitatadon3),73 +mikado (winters),73 +miito (meeeeton333),73 +merpperoni,73 +mermaid dress,73 +meg (bakuretsu tenshi),73 +mecha eli-chan (fate),73 +marine nemo (fate),73 +maodouzi,73 +mantis akiyama,73 +manoko,73 +mamepon,73 +makitoshi0316,73 +makabe kazuki,73 +majokko a la mode,73 +maimoto keisuke,73 +maid-chou (maoyuu),73 +mai ball!,73 +magpul,73 +magic circuit,73 +maeda kousuke,73 +mae (nahabaru),73 +lyna the light charmer,73 +luxord,73 +lovelovemaid,73 +louis&visee,73 +looking at watch,73 +lisara restall,73 +liara t'soni,73 +lens (arknights),73 +lecca aisu,73 +leafwow,73 +le malin (muse) (azur lane),73 +latina (uchi no musume no tame naraba),73 +kyrie florian,73 +kyou no go no ni,73 +kyatto ninden teyandee,73 +kurumi moka,73 +kurogane (majesticrune),73 +kouda hayato (e-gis),73 +kotatiyu,73 +kodama (koda mat),73 +kkia,73 +kitamura eri,73 +kishi mieko,73 +kill time communication,73 +kgr,73 +kay faraday,73 +katou (hyaena),73 +kashiwamochi (kashiwakashiwa),73 +karakai jouzu no (moto) takagi-san,73 +karada,73 +kanonno earhart,73 +kannei,73 +kamimura akiko,73 +kakitsubata tsukune,73 +kabru,73 +jyu-so,73 +jungyun99,73 +jin (crocus),73 +jidaigeki,73 +jetfire,73 +jet enduro,73 +jericho (girls' frontline),73 +jarckius,73 +james ghio,73 +ixia (ixia424),73 +iseki mitsuharu,73 +internet explorer (webcomic),73 +indo (mdtanaka2007),73 +illnott,73 +ifrit (housamo),73 +ideon,73 +ichinomiya eruna,73 +ichino nanasuke,73 +ice horns,73 +ibaraki douji (swimsuit lancer) (third ascension) (fate),73 +horizon zero dawn,73 +honoka (summer angel on the shore) (doa),73 +homil22,73 +hollow mouth,73 +holding neckwear,73 +hoerutarou,73 +hobbang,73 +hiyori-o,73 +hit-girl,73 +hiramatsu tadashi,73 +hilda (pokemon) (cosplay),73 +heyzan,73 +heresy,73 +hellboy,73 +helena blavatsky (swimsuit archer) (first ascension) (fate),73 +headache,73 +hazuki shizuku,73 +hatagaya,73 +harumi hana,73 +haru (matatabi sanjou),73 +harper (pokemon),73 +handcuff dangle,73 +hanabusa kokoro,73 +hally,73 +half-track,73 +hakka0320,73 +haido (ryuuno kanzume),73 +hageshii nakano,73 +gurin33,73 +gummy (summer flowers) (arknights),73 +growlanser i,73 +greypidjun,73 +green mask,73 +grace (sound voltex),73 +gomusin,73 +gm (ggommu),73 +glowing clothes,73 +gigantamax charizard,73 +gibson flying v,73 +geordo stuart,73 +galzoo island,73 +gaius (fire emblem),73 +gachapin,73 +fuyuichi monme,73 +fuyou-chan,73 +future gpx cyber formula,73 +furagu,73 +fukukitaru,73 +fueru nattou,73 +flapper shirt,73 +fisheye placebo,73 +fila,73 +fez hat,73 +fengli (709622571),73 +felli loss,73 +fat buu,73 +fantasy world umanest (umamusume),73 +extra tails,73 +european hare (kemono friends),73 +esan (llamaesan),73 +eru,73 +erkerut,73 +erika wagner,73 +endou araya,73 +emoi do,73 +elden ring (object),73 +el (sound horizon),73 +ekusiregaia,73 +duran (seiken densetsu 3),73 +duck print,73 +dootmoon,73 +djeeta (summer) (granblue fantasy),73 +digimoji,73 +detached legs,73 +destroyed,73 +dave rapoza,73 +dark elf (lineage 2),73 +dante (nu carnival),73 +dai-xt,73 +cure sunshine (cosplay),73 +cure muse (black),73 +circle a,73 +chuuten (clam chowder),73 +choujikuu seiki orguss,73 +chiwa (chiwawanwan1206),73 +chen bingyou,73 +chacha (tyatya),73 +cenco,73 +catchouli (hazuki ruu),73 +cariboy,73 +cardigan lift,73 +calpis,73 +c01a (cola),73 +bushidou 4 (sekaiju),73 +buri hamachi,73 +brownie (last origin),73 +bosstseng,73 +boney,73 +bon nob,73 +bomb item (touhou),73 +bokarokaku,73 +blue bracelet,73 +bloody0rabby,73 +blazblue alternative: dark war,73 +biomega,73 +bellibolt,73 +bath of blood,73 +bat genome,73 +baphomet (grizz),73 +bandaged ear,73 +baku (creature),73 +bajitohfu,73 +bagpipes,73 +bad google+ id,73 +bad cghub id,73 +b gumi,73 +awakusu akane,73 +asuka shirou,73 +asakawa remon,73 +artofkuzu,73 +arima kouichi,73 +argyle bikini,73 +arawi keiichi,73 +anzu 1026,73 +anna (granblue fantasy),73 +ankokuboshi kurome,73 +animal on chest,73 +angelica rafa redgrave,73 +andrew hanbridge,73 +andrea doria (warship girls r),73 +amlichan,73 +amatsuka urara,73 +amane (honey come chatka!!),73 +amaha miu,73 +alvis (last origin),73 +alternate eyewear,73 +alicesoft,73 +akinakesu-chan,73 +akebono kt,73 +akari ryuryuwa,73 +akane harurou,73 +akadama,73 +ajimu kaede,73 +air guitar,73 +ainezu,73 +ainchase ishmael,73 +aida rion,73 +ai the somnium files,73 +agitha,73 +aebj,73 +adjusting bowtie,73 +acmeholic,73 +ace trappola,73 +abyssal jellyfish princess,73 +504 gateway,73 +2n5,73 +zhaktnf,72 +zanpakutou,72 +zabuton dorobou,72 +yuushiba,72 +yushima,72 +yupi (yuyupiko01),72 +yunare,72 +yumeno (rubbercup),72 +yuki touko,72 +yuki hotaru,72 +yugi (magical-dreamer),72 +ysayle dangoulain,72 +yoroi nau,72 +yomogawa ayame,72 +yeooudam,72 +yas (35373),72 +yamabuki zarame,72 +xiaoyuan (you can eat the girl),72 +xiaoling (kyouno),72 +xiao qiao,72 +x-6,72 +wizard (dragon's crown),72 +wig removed,72 +whitesnake (stand),72 +white sister rom,72 +whirlpool,72 +wet fundoshi,72 +watanabe ignica,72 +wakana rei,72 +vil schoenheit,72 +van-s,72 +valsione,72 +utsumi erice (swimsuit avenger),72 +under bridge,72 +uh-60 blackhawk,72 +ufo robo grendizer,72 +u.b m1s2s,72 +type 99 cannon,72 +tudon (donut),72 +ttrop,72 +ttmry bonbon,72 +tsumayouji (tumayog),72 +triangle heart,72 +trace (pokemon),72 +toys drive,72 +toutouhai,72 +touka (nonaka ritsu),72 +toshise.,72 +toshi (anime coloring),72 +torikoro,72 +tora tsugumi,72 +toothbrush mustache,72 +tomiyama akiji,72 +tomato juice,72 +tokomaya keita,72 +tiger (tiger-kimu),72 +theresia van astrea,72 +tessaku ro,72 +teru suzu,72 +terrier (dog),72 +tenyo0819,72 +temmie,72 +tehen,72 +tasukete eirin,72 +tao jun,72 +tanaka shinbei (fate),72 +tamura (kouititamura),72 +tamakagura inari,72 +takuya kame,72 +takeuchi naoko,72 +takenoko mgrc,72 +takemasa,72 +takanaru,72 +taeminhyeon,72 +ta152 (graf zeppelin),72 +swire (honor and splendor) (arknights),72 +suzumaru,72 +sunshine creation,72 +sukiyaki,72 +stechkin aps,72 +spoken letter,72 +speed limit sign,72 +space maria,72 +soyanrai,72 +sousaku kanojo no renai koushiki,72 +soshina nohito,72 +sorahachi (sora823),72 +sora shitatoge,72 +slam dunk (basketball),72 +side mirror,72 +shokushi yuu,72 +shirt straps,72 +shiro font,72 +shino yuki,72 +shino (r shughart),72 +shinkawa youji,72 +shining finger,72 +shin sekai yori,72 +shimushu pose,72 +shimofuri,72 +shimekazari,72 +shiki (no-reply),72 +shigure (kancolle) (cosplay),72 +shigino kisumi,72 +shelter (music video),72 +sheep (trickster),72 +shaun healey,72 +setsuna (miraichizu),72 +senra banshou,72 +seena kanon,72 +seeds328,72 +satou makura,72 +satire,72 +satan (idolmaster),72 +sashimi nasu,72 +saotome rei,72 +sano br,72 +sakurakouji kinako,72 +sakura puchirou,72 +sakifox,72 +saga 2,72 +sacred seven,72 +sachiiro hanamizuki,72 +ryuuno stadtfeld,72 +ryuuhou kai ni (kancolle),72 +ryuuguuji ken,72 +rurine luna,72 +roosterteeth,72 +ronin (zeth total),72 +robou no stone,72 +roaru (gyuren),72 +rizky (strated),72 +rioru (rioru v v),72 +rin (shelter),72 +riffle hunter,72 +reindeer hood,72 +reimusan (jotti),72 +raplus,72 +rankiryuu,72 +ran komomo,72 +raijin-bh,72 +raigh (fire emblem),72 +r93 (girls' frontline),72 +qingye ling,72 +q (a72pgc),72 +puchimirin,72 +private garden,72 +privacy screen,72 +pota (nabrinko),72 +popporunga,72 +pinoko,72 +pink mask,72 +phong anh,72 +penis ornament,72 +pengrani,72 +peke (xoxopeke),72 +pastel palettes (bang dream!),72 +pasoputi,72 +park noah,72 +paris,72 +panna444,72 +page,72 +owarine miku,72 +oshi no ko,72 +osaru (yuuen-dou),72 +orqz,72 +orita enpitsu,72 +oricorio (baile),72 +order-sol,72 +open cuffs,72 +oono makoto,72 +oonamazu,72 +oniro,72 +okuma707,72 +o-yuki,72 +noshime ruka,72 +nonomura ayumi,72 +nomura (buroriidesu),72 +nishikujic,72 +nishiki kazue,72 +nihohohi,72 +ni (ippozenshin),72 +neko pan,72 +neats,72 +naze,72 +nasuhara anastasia,72 +naruko (instrument),72 +nambu type 14,72 +namataro,72 +nakaya 106,72 +nakano tomokazu,72 +nagi itsuki,72 +myuria tionysus,72 +my little pogchamp (meme),72 +murazono,72 +mr.blackcat666,72 +mossari poteto,72 +moriyama daisuke,72 +monty python,72 +momiji7728,72 +moi moi7,72 +moffle,72 +mochitsu jou,72 +moana waialiki,72 +mizusawa mimori,72 +miyu (tiny evil),72 +miyauchi lemmy,72 +miyabi (miyabeeya),72 +mit (necomit),72 +mishima akane,72 +misaki (blue archive),72 +misaka misuzu,72 +minus (sr mineka),72 +mine (odasol),72 +min (120716),72 +mile (noukin),72 +mikemono yuu,72 +mikage baku,72 +microphone cord,72 +mew mint,72 +merida angel,72 +meoyo,72 +memeo (candy house),72 +megumi kei,72 +meet,72 +max domikov,72 +match (idleslumber),72 +mars people,72 +marimo (artist),72 +mankanshoku mako (cosplay),72 +mana khemia 2,72 +malleus draconia,72 +makura (makura0128),72 +magui3,72 +magearna (normal),72 +maegami-chan (tawawa),72 +m6a seiran,72 +lyrinne,72 +lizta,72 +lion hair,72 +legs on table,72 +leenim,72 +ledyba,72 +leaning against motorcycle,72 +leaf clothing,72 +lavoy (kurakuro),72 +kutouten,72 +kurogane daichi,72 +kunou tatewaki,72 +kune-kune,72 +kunashiri (etorofu),72 +kuki rei,72 +krookodile,72 +kozureokami20,72 +kotoji,72 +kosumi,72 +koha,72 +kodai susumu,72 +kissing back,72 +kisaragi ichigo,72 +kira keita,72 +kinta (kinta no mousou),72 +kingdom hearts x,72 +kimura daisuke,72 +kimikage yui,72 +kim da-yoon,72 +kichi (kitiokitioo),72 +kh-fullhouse,72 +keypot,72 +kazamatsuri matsuri,72 +kawami nami,72 +kawaguchi (mojacome),72 +katagiri chisato,72 +karv,72 +kankitsurui (house of citrus),72 +kana tako,72 +kamita,72 +kamen rider ex-aid,72 +kamen rider accel,72 +kakkou,72 +kaguyuu,72 +kaguya (sinoalice),72 +kagenui yozuru,72 +kagami kino,72 +juujou hiyori,72 +jura cambri,72 +joshi shougakusei hajimemashita,72 +joe higashi,72 +jian xia qing yuan (series),72 +jaw,72 +iwanaga tm,72 +ivan (ffxazq),72 +itou ben,72 +isla (kof),72 +ishihara souka,72 +ishibashi yosuke,72 +inami hatoko,72 +icons (1452697582),72 +hounyouin,72 +hoshizora no shita,72 +honda motocompo,72 +homex,72 +hiragi rin,72 +hijirisawa shonosuke,72 +higofushi,72 +hero-chan (hanauna),72 +hera-ur (p&d),72 +hawk (nanatsu no taizai),72 +hattori yuu,72 +hat under hood,72 +harie (granblue fantasy),72 +hara takehito,72 +hanna wind,72 +hanjuku tomato,72 +hanbu hantarou,72 +hana kon (17aaammm),72 +hamano kaiji,72 +hair focus,72 +haiku,72 +haeil2,72 +hade na kangofu,72 +gunjima souichirou,72 +gundam side story: the blue destiny,72 +guardian chara,72 +gradient cape,72 +gotou hitori,72 +gossifleur,72 +gold creator award,72 +gm,72 +gluttony (fma),72 +glassjill,72 +ginoza nobuchika,72 +gilles de rais (saber) (fate),72 +giant panda (kemono friends),72 +garjana,72 +galarian meowth,72 +gaitoou,72 +fyama,72 +fuyusaka iori,72 +furisode girl (pokemon),72 +fujiwara riyu,72 +free hugs,72 +freckles nun (diva),72 +fourth of july,72 +fiora (fire emblem),72 +finger to another's cheek,72 +ferret ears,72 +fan to mouth,72 +exelica,72 +eugenio2nd,72 +epel felmier,72 +eou,72 +emma woods,72 +eku,72 +early type,72 +dreamysuite,72 +drake (the golden hind's respite) (azur lane),72 +deretta,72 +death devil,72 +dazai osamu (bungou stray dogs),72 +daruma karei,72 +cure flamingo,72 +cuirass,72 +crustle,72 +crossbone gundam x-1,72 +creta (taku10),72 +co botan,72 +cleo (dragalia lost),72 +clear insertion,72 +clala,72 +circle echime,72 +chushengdao,72 +chu kai man,72 +christopher columbus (fate),72 +chokotto sister,72 +chiro (bocchiropafe),72 +child (isoliya),72 +chikorita85,72 +chiko (kanhogo),72 +chidorigafuchi aine,72 +chester ocampo,72 +chester burklight,72 +cheetahman (1ddghfr78cswc),72 +central princess,72 +captain america: civil war,72 +caduceus,72 +burnt green tea,72 +bumcha,72 +buggy the clown,72 +budweiser,72 +boxer dansi,72 +bluepony,72 +bluedemon13,72 +bizarre rain,72 +bionekojita,72 +bindong,72 +biako,72 +bear tsukasa,72 +bad poipiku id,72 +azu (kirara310),72 +awayawa pic,72 +asparagus,72 +ash (phantom brave),72 +asanuma,72 +asakurashinji,72 +artist glove,72 +artificial flower,72 +archery dojo,72 +aquamarine (gemstone),72 +aqua belt,72 +apring,72 +apophis (monster girl encyclopedia),72 +aosode,72 +anzi,72 +annlu vazzel,72 +anna (watamote),72 +ameiro,72 +ame (amemgmgmg),72 +amagasahigasa,72 +altina (fire emblem),72 +alter ego malevolent (granblue fantasy),72 +alice (ragnarok online),72 +al (ahr),72 +akisu karasu,72 +akise aru,72 +akimoto katherine reiko,72 +aki tomoya,72 +aizawa tomomi,72 +aiura,72 +airplane hair ornament,72 +aimpoint,72 +aife (zana),72 +aiba kou,72 +404,72 +1ssakawaguchi,72 +1055,72 +zyl,71 +zuifeng tenkai,71 +zhao linger,71 +zettai bouei leviathan,71 +zero point,71 +yuzuki yukari (lin),71 +yuyumatsu,71 +yuusei tsukiro,71 +yuuma (yuma1111),71 +yukko,71 +yoshikawa kikyou,71 +yoshida takuma,71 +yoshida seiko,71 +yonemura kouichirou,71 +yomoyama (toirets),71 +yogukasu,71 +yesterday wo utatte,71 +yamamura ken,71 +yamamoto nori,71 +yamada-kun to 7-nin no majo,71 +xiao-mei,71 +world map,71 +witch's weapon,71 +waveform,71 +watase rei,71 +walter white,71 +walluka,71 +venus love me chain,71 +valac clara,71 +v8,71 +usuki (usukine1go),71 +urkt 10,71 +uni (oni unicorn),71 +u-110 (azur lane),71 +tylwing,71 +tuzhate,71 +turquoise iro,71 +tsunaso (lack of sunlight),71 +tsunashi youta,71 +tsumayouji (dekosoko),71 +tsukishima kei,71 +tsuka-ito,71 +trowa barton,71 +tron,71 +triangle bullet,71 +tracer fire,71 +toyux2,71 +totally spies,71 +torn umbrella,71 +topo (bacchustab),71 +tooculi,71 +tongkkangi,71 +tom (pixiv10026189),71 +tofu (tf07px),71 +timitarcat,71 +theresa apocalypse (twilight paladin),71 +teucer (genshin impact),71 +tennis dress,71 +tashkent (muse) (azur lane),71 +tamamo no mae (tailmaid strike) (fate),71 +tamadra,71 +taketori monogatari,71 +takane hibiki,71 +taharu kousuke,71 +tachibana chitose,71 +sword art online: fatal bullet,71 +suzuran (lostlands flowering) (arknights),71 +suzuki masahisa,71 +sutokame,71 +sutaa sutoringuzu yori,71 +sudach koppe,71 +stuffed horse,71 +striped innertube,71 +striped headband,71 +striped cardigan,71 +strawberry pocky,71 +steyr iws 2000,71 +steward b,71 +star guardian lulu,71 +souri,71 +soul patch,71 +sosa,71 +sorethroat,71 +song request,71 +soma cruz,71 +solana (pokemon),71 +snow white (queen's blade),71 +siyudi (cookie),71 +silas (fire emblem),71 +shishou (doragyurosu),71 +shinobi 4 (sekaiju),71 +shinketsu kanyu,71 +shinjin succubus (konosuba),71 +shin megami tensei: strange journey,71 +shiinotic,71 +shiina minatsu,71 +shibuki kamone,71 +shelter,71 +severed torso,71 +set square,71 +serizawa enono,71 +sendai (nazonomono),71 +seki (l0410706268),71 +sega hard girls,71 +sd gundam gaiden,71 +scheris adjani,71 +scarfy,71 +sbbs,71 +satin clothes,71 +sashou mihiro,71 +sasagawa kyouko,71 +saruei (vtuber),71 +sakushin,71 +sakurapochi,71 +saburoo,71 +saber alter (cosplay),71 +s-goon,71 +ryuu (breath of fire iii),71 +ryuko redraw (meme),71 +ryochan96154,71 +ryan greythorn,71 +runa7,71 +rugby ball,71 +rudia of the moon smile,71 +rozer,71 +royale style (idolmaster),71 +riria (liry a flower),71 +rin (rin7kan7),71 +revive revival,71 +reki (arequa),71 +reika (dream c club),71 +regina (dino crisis),71 +redial (vocaloid),71 +rapunzel (sinoalice),71 +randy (awesomevillage),71 +ran (urusei yatsura),71 +rakugakiraid,71 +raijuu,71 +r. blackriver agogo,71 +qubeley mk ii,71 +pryce (pokemon),71 +protagonist (tokimemo gs3),71 +print tank top,71 +princess leia organa solo (cosplay),71 +prince albert,71 +presenting pussy,71 +piercing pull,71 +phrygian cap,71 +phantasy star zero,71 +persona 4: the ultimax ultra suplex hold,71 +perisie (star ocean),71 +penis in thighhigh,71 +paravene,71 +pantyhose under trousers,71 +p/a ~potential ability~,71 +orihime (cosplay),71 +oribe mafuyu,71 +ore ga suki nano wa imouto dakedo imouto janai,71 +orbeetle,71 +on stairs,71 +on ( l0 ),71 +ofuro mantarou,71 +official manga,71 +o-los,71 +nurse nemo (fate),71 +nove (legge),71 +not afraid anymore,71 +nora (salatto),71 +nokkusuart,71 +noise (symphogear),71 +nodoka glasses,71 +nodoka (blue archive),71 +niwatoriya,71 +niseneko (mofumofu ga ienai),71 +nico (hero300),71 +nichts (fatalbug896),71 +nekoto rina,71 +nekone (utawarerumono),71 +nanashi mushi,71 +nanaichi,71 +nakanishi tatsuya,71 +nabooru,71 +nabana (bnnbnn),71 +murasaki (lightsource),71 +multicolored sleeves,71 +multicolored rose,71 +mukade (siieregannsu),71 +mori shin risuku,71 +mordekaiser,71 +moonsorrow,71 +momopanda,71 +mofupaka,71 +moegi homi,71 +mob face,71 +mob-sensei,71 +mitche,71 +minted,71 +mini mamu,71 +mineta naoki,71 +minazuki tooru,71 +miluke,71 +mikazukimo,71 +mikane sebiri,71 +mezzo forte,71 +melynx,71 +mello,71 +mega ring,71 +meesuke,71 +matsuno matsuyo,71 +mathilda lando,71 +marukei (kuramaruk),71 +marlon (pokemon),71 +marie (pixiv31942978),71 +mardjan,71 +marcy wu,71 +mana matitia (okame nin),71 +majoca majoluna,71 +mahotama,71 +magician (module),71 +luoxiaobai,71 +lunch,71 +lunar: the silver star,71 +luna (fizintine),71 +ludaf,71 +looker (pokemon),71 +lock seed,71 +lipstick mark on chest,71 +lilynna blueberry,71 +leg holster,71 +larger bmx,71 +laoyepo,71 +lamellar armor,71 +lady in red (ib),71 +kyle dunamis,71 +kuune,71 +kusakabe rei,71 +kurusu yuzuko,71 +kurozatou owata,71 +kurotsuki (luowei99),71 +kurabayashi,71 +kujou shion,71 +kuga yoshito,71 +koutetsu tenshi kurumi,71 +komi shin'ya,71 +kokenashi,71 +kobushime (sorobochi),71 +kizuna ai (elegant),71 +kitsune (scaz),71 +kirishima (azur lane),71 +kiri celea,71 +kinjero,71 +kingdom hearts chain of memories,71 +king slime,71 +king george v (azur lane),71 +kinako (462),71 +kimi ni matsuwaru mystery,71 +kijibato 123-gou,71 +kazunari,71 +kawara yun,71 +kara (color1087),71 +kannazuki tamaki,71 +kamon (shinshin),71 +kamen rider zi-o,71 +kakuteru sudachi,71 +kakei,71 +kaibutsu,71 +kaguya hime,71 +justaway,71 +jungle cat (kemono friends),71 +joseph stalin,71 +jill (fire emblem),71 +jelly shrimp,71 +jamil viper,71 +jacques de molay (foreigner) (third ascension) (fate),71 +izumi wakoto,71 +iwaya,71 +itami youji,71 +italian senate porn livestream,71 +isbeyvan,71 +iron man (cosplay),71 +irisviel von einzbern (cosplay),71 +inukami!,71 +innoarukugyou,71 +ink stain,71 +indian elephant (kemono friends),71 +inazuma eleven go vs danball senki w,71 +imdsound,71 +idolmaster starlit season,71 +hz (helu 2),71 +hug-tan (precure),71 +huang (volt0526),71 +horns through hood,71 +holding shuriken,71 +hizaki gamma,71 +hip gear,71 +hinnyuu-chan (iku),71 +himitsu no jugyou,71 +hime to boin,71 +hikari (mitsu honey),71 +heaven (kanji),71 +headband around neck,71 +hayama marin,71 +hatori you,71 +harusame-r,71 +haru (renol),71 +harryych,71 +harpie lady 1,71 +hanako (disgaea),71 +hanabi (senran kagura),71 +hamu agaki,71 +hako (gyhujikolp),71 +haji (hajiko),71 +h2so4kancel,71 +gundam x,71 +gunbreaker (final fantasy),71 +gummy bear,71 +gu yuena,71 +greil,71 +grappling gloves,71 +granbelm,71 +gou (double trigger),71 +goten (510gensoku),71 +golden boy,71 +getumentour,71 +genjiguruma,71 +gatsby ssl,71 +garutaisa,71 +gamer driver,71 +gacha,71 +g-senjou no maou,71 +fuyu mi,71 +future fish,71 +fur-trimmed armor,71 +fujiki kouta,71 +freenote mr,71 +franciscoetchart,71 +flapping ears,71 +fizrotart,71 +fishnet leotard,71 +fight ippatsu! juuden-chan!!,71 +fasnakegod,71 +falco arrow,71 +fake blood,71 +fairy knight gawain (first ascension) (fate),71 +faech,71 +facebook-san,71 +f-4 phantom ii,71 +ex-s gundam,71 +ewokakukaede,71 +ethan winters,71 +erise,71 +eriimyon,71 +eric lowery,71 +ergo proxy,71 +eos (ff14),71 +entrails,71 +english flag,71 +emerald herald,71 +elly (tonari no kyuuketsuki-san),71 +ello,71 +elisa (girls' frontline),71 +elaine,71 +eikou no guardian battle,71 +ei ei okotta?,71 +egret,71 +efmoe,71 +effects pedal,71 +ecu8080,71 +echiru39,71 +ebiri fy,71 +dungeons & princess,71 +drapion,71 +dracula (castlevania),71 +doomcomic,71 +doctor magus,71 +disco elysium,71 +dino crisis,71 +diarmuid ua duibhne (saber) (fate),71 +dhelmise,71 +dental gag,71 +den (den zuri555),71 +dangaiou,71 +damascus (dearigazu2001),71 +daibanchou,71 +daiakuji,71 +d jirooo,71 +cynthia riddle,71 +cuvie,71 +cui yifei,71 +cross of saint andrew,71 +crayon (medium),71 +coyopotato,71 +cling,71 +clementine (overlord),71 +clay (pokemon),71 +classy cranberry's,71 +cinderella (queen's blade grimoire),71 +choir (artist),71 +chocolate milk,71 +cho marisa,71 +chirosuke,71 +chiki yuuko,71 +chiisan,71 +cheogtanbyeong,71 +chanchanko (clothes),71 +champuru,71 +chamber (suisei no gargantia),71 +cattleya (flower knight girl),71 +carmilla (summertime mistress) (fate),71 +butsu menhou,71 +burst bomb (splatoon),71 +brynhildr romantia,71 +brown umbrella,71 +bronya zaychik (snowy sniper),71 +bronto burt,71 +boonie hat,71 +body chain,71 +bluerabbit gap,71 +blue halo,71 +blood sucking,71 +blazing heart (elsword),71 +black light,71 +bell orgel,71 +bean mr12,71 +battle chatelaine,71 +banana gyuunyuu,71 +bad singing,71 +ayase sayuki,71 +axolotl girl,71 +australia,71 +auron,71 +aura (aurapls),71 +asahina,71 +arlizi,71 +ariyon,71 +ari (bleum),71 +apricot (yamai),71 +aospanking,71 +aonogura,71 +aogiri sei,71 +antilous,71 +angel's swimsuit (idolmaster),71 +anakoluth,71 +ameimo,71 +amano nene (vtuber),71 +amamiya rizumu,71 +amada,71 +alois trancy,71 +alien nine,71 +akizuki maku,71 +akaba chizuru,71 +aizawa azusa,71 +air current,71 +aiko (less),71 +agni gandiva,71 +act-age,71 +accidental kiss,71 +abiko zyozi,71 +abarabone,71 +9nojo,71 +8takenokonosato8,71 +210ten,71 +1044kiro,71 +zinczinc ka,70 +yuuyuu (yuko),70 +yuu (twisted wonderland),70 +yume miru kusuri,70 +yukia (yukia 777),70 +yuki kaori,70 +yuhazu (rhe-rhe),70 +yuasa,70 +young justice,70 +yoshioka (haco),70 +yorck (azur lane),70 +yojio (2188),70 +yo-ba yo,70 +yki,70 +yellow heart,70 +yayoi kotoyuki,70 +yatteyan'yo,70 +yani tama,70 +yangshangshu,70 +yamanori (yamanori56),70 +yagokoro eirin (cosplay),70 +yagi (kyuhyun),70 +xianyusamuel,70 +wine (2148 wine),70 +wenz,70 +watashi kininarimasu,70 +wang-xi,70 +wakame (vhai26uhykmqb9k),70 +vinyl (vinyyl bag),70 +vile (mega man),70 +vickie (cryingrobot),70 +usami tokishige,70 +uni (bom19850101),70 +turkey,70 +tsuzuki masumi,70 +tsunekun,70 +tsunakawa,70 +tsumaseu,70 +tsumamigui,70 +tsukumo kazuki,70 +tsukuda hayato,70 +tsukkomi,70 +tree print,70 +transparent footwear,70 +trance (hijiri),70 +touin rina,70 +toudou kirin,70 +toudou charo,70 +torn flag,70 +tongkkangi (streamer),70 +tomokoji,70 +tomoe tamao,70 +tokimachi eisei,70 +tohsaka sakura,70 +to heart 2 dungeon travelers,70 +three k (spiritus no honoo),70 +thorton (pokemon),70 +the baddest akali,70 +terra formars,70 +teba motoko,70 +tawawa group,70 +tasuku (otomebotan),70 +tarutaru yamaoka,70 +tang san,70 +tanaka yutaka,70 +tamuhei (6nasiki),70 +talunilu uu3,70 +talgi,70 +takkun (flcl),70 +takayama chihiro,70 +takahashi akira,70 +tabata hidenori,70 +sword clash,70 +suzuki sakura,70 +surikogi,70 +super robot wars og the inspector,70 +sugita ranpaku,70 +subaru noji,70 +star guardian janna,70 +soramame pikuto,70 +sokka,70 +snowciel,70 +slit throat,70 +slayer (dungeon and fighter),70 +shuu illust,70 +shizuoxing kof,70 +shishima eichi,70 +shirou (bear) (fate),70 +shiro (nitto),70 +shiohari kanna,70 +shinomiya runa,70 +shinohara shinome,70 +shino (osaru),70 +shin (highest1192),70 +shin1ar24,70 +shimura nana,70 +shikino (sikinonono),70 +shika (shika0),70 +shijimi kozou,70 +shiguko,70 +shigeru,70 +shellos,70 +sheep sleep,70 +seesaw,70 +season (artist),70 +sayuwan,70 +sarah sauge,70 +sanpeita,70 +samuraisamurai,70 +saku (kazana),70 +sakimiya mafu,70 +saitou gabio,70 +saitani umetarou,70 +saint mary's academy uniform,70 +sacchan (nyaromeron),70 +s-yin,70 +ryman,70 +rrr (reason),70 +roroichi,70 +rocket raccoon,70 +rock balancing,70 +riyo servant (babydoll) (fate),70 +rikotan,70 +riking,70 +rick and morty,70 +rhajat (fire emblem),70 +reon (dainagon azuki),70 +relay race,70 +red scales,70 +razuzyamu,70 +rance ix,70 +ranbu hararin,70 +rainyazurehoodie,70 +radlionheart,70 +qi hui,70 +purple haired elf (houtengeki),70 +puddingx2,70 +public address system,70 +prusena (haevest),70 +print bag,70 +primal kyogre,70 +praxis (xenoblade),70 +poyo (shwjdddms249),70 +pollux (housamo),70 +poco muerte,70 +pluto symbol,70 +plico (nicoma),70 +planetarium,70 +pizzzica,70 +piyo (ppotatto),70 +pixiv festa,70 +pink overalls,70 +perfume (cosmetics),70 +peachems (gemu),70 +pastel memories,70 +pachimari,70 +owada (kousonhuchi),70 +osuk2,70 +orange (among us),70 +ooka (rkyu),70 +ono ikumi,70 +onii-san (tawawa),70 +one year artist progress record,70 +okukawa minako,70 +okita,70 +ogadenmon,70 +odagiri futaba,70 +oborogumo takamitsu,70 +obon,70 +null (chronix),70 +nova (starcraft),70 +noi (noi 2y),70 +noco (pixiv14976070),70 +no10,70 +ninja (yzpyn),70 +ninja (tera online),70 +nimu (vtuber),70 +nikayu,70 +nijimaarc,70 +newt scamander,70 +nergigante,70 +nemusuke,70 +negy,70 +nasa,70 +narusawa sora,70 +naoe kanetsugu (hyakka ryouran),70 +nanasumi (pantie party project),70 +nanase nanami (gundam build divers),70 +nanako (houkago no pleiades),70 +nakajima rei,70 +nairobi song,70 +nagayo,70 +nagasawa (tthnhk),70 +nachiru,70 +myero,70 +muutsuki,70 +muteki choujin zambot 3,70 +mushibugyou,70 +multiple hats,70 +multicolored sweater,70 +mukuro (sakiyo cake),70 +mt.somo,70 +morag ladair (obligatory leave) (xenoblade),70 +mooofoo,70 +monster hunter stories 2,70 +monokuro (sekahate),70 +monaka natsume,70 +moejin,70 +mochizuki himari,70 +mizuki takehito,70 +mizuchi (mizuchi7118),70 +miyama rikka,70 +miyama (kannsannn),70 +mix (wkupmix),70 +mitoko (tsuchikure),70 +minase tamaki,70 +minari (lmina09),70 +mina (sio0616),70 +mimana,70 +miki (hoozuki no reitetsu),70 +mikan 29344886,70 +miharu (ttt),70 +metadora,70 +megaera (hades),70 +medea,70 +mecha on girl,70 +mazinkaiser,70 +matsuuni,70 +matsuno opa,70 +matsuki akira,70 +masuku (saint mask),70 +masu gitsune,70 +marie antoinette (festival outfit) (fate),70 +maria tr,70 +marco polo (azur lane),70 +mansu,70 +mankun,70 +maltese tiger (kemono friends),70 +male underwear removed,70 +makarov pm,70 +mahou no princess minky momo,70 +macan (housamo),70 +ma nyan (nyao mao nyao),70 +m ganzy,70 +m bison (cosplay),70 +lychee,70 +luupechi,70 +luchador,70 +love live! the school idol movie,70 +love ball,70 +little noah,70 +lisuchi,70 +licking leg,70 +leveen,70 +lerico213,70 +leon magnus,70 +legend of queen opala,70 +leer.meer,70 +lee (saraki),70 +larsa ferrinas solidor,70 +kyaro (kyaro54),70 +kuroinu,70 +kurogane otome,70 +kuro goma (kakkou11),70 +kunoichi-chan (iroha (iroha matsurika)),70 +kousaka nobaku,70 +koubuin yuuhi,70 +koooogasya,70 +konokiya,70 +konneko,70 +komori kinoko,70 +kiwi of ruin,70 +kirino aya,70 +kiriki haruomi,70 +king of spades,70 +kikimora (monster girl encyclopedia),70 +kiiro no nantoka,70 +kiev (azur lane),70 +kick scooter,70 +kibii mocha,70 +kgt (pixiv12957613),70 +kazuko (towa),70 +kazuha (saku kn),70 +kazama gorou,70 +kayou (sennen sensou aigis),70 +katana man (chainsaw man),70 +kashii (amoranorem),70 +karasu (jjwh2832),70 +kanojo mo kanojo,70 +kanna (chaos966),70 +kalk (azur lane),70 +kaito (sawayakasawaday),70 +kairi (kai ri),70 +kagiyama hina (cosplay),70 +kafu (cevio),70 +k-ya.,70 +jyu ichi,70 +junkt729,70 +junjunforever,70 +jude maverick,70 +johnston (kancolle) (cosplay),70 +jofang,70 +jockey,70 +jk-chan (oouso),70 +jinyuan712,70 +jin (granblue fantasy),70 +jesse pinkman,70 +jason todd,70 +jamir,70 +jajao,70 +izumi bell,70 +iwata satoru,70 +itsuki jun,70 +inukoko,70 +instanttnoodle,70 +inase shin'ya,70 +imaizumi kagerou (wolf),70 +ikkitousen xtreme xecutor,70 +ikebata homura,70 +ikataruto,70 +ikari yui,70 +if f,70 +ichinomiya kantarou,70 +ichikura (bk),70 +hostage,70 +hoshiakari (c2 kikan),70 +homura (silver blaze),70 +holding stethoscope,70 +holding pitchfork,70 +holding another's thighs,70 +hitoshi,70 +higebu,70 +hecate (shakugan no shana),70 +headstand,70 +head rub,70 +harley quinn (cosplay),70 +happy party train,70 +hanchou (shirokun555),70 +hanavbara,70 +hamu (plot sy),70 +hamidashi creative,70 +hakuginnosora,70 +hakugei (re:zero),70 +hagi (ame hagi),70 +hachisuka goemon,70 +gyup,70 +gyn chair,70 +guel jeturk,70 +grapploct,70 +granbull,70 +gooster,70 +goenitz,70 +glamour works,70 +girdle,70 +ginjyasei,70 +giant insect,70 +geometry,70 +genista (darling in the franxx),70 +gbeeee,70 +garo:honoo no kokuin,70 +ganida boushoku,70 +gabu (az210309),70 +g ieep,70 +furuhashi fumino,70 +fruit sandwich,70 +francoise arnoul,70 +france shoujo,70 +fpminnie1,70 +folded twintails,70 +flower bikini,70 +fenix uniform,70 +felnemo,70 +fatal frame 5,70 +fantasy seal,70 +eye chart,70 +ev (dai00888),70 +eureka brider,70 +euclase (houseki no kuni),70 +eternatus,70 +escalation heroines,70 +eru-sennin,70 +ere (2516325),70 +eldritch loreteller (idolmaster),70 +eldar,70 +dumpster,70 +dualsense,70 +drid,70 +drawing board,70 +double lariat (vocaloid),70 +dodo (bird),70 +dmc: devil may cry,70 +devilhs,70 +dashi (minzoku gb),70 +dana (hapong07),70 +daisy mitsumata,70 +dada (dolce),70 +d3a,70 +cut-here line,70 +curling iron,70 +cu chulainn (fate/extra),70 +crow 3434,70 +cranidos,70 +cracked mask,70 +cqc,70 +cosmo (465lilia),70 +cordless phone,70 +convenient breasts,70 +compass (instrument),70 +colored armpit hair,70 +code geachu lelouch of the calamity,70 +clapperboard,70 +clair (fire emblem),70 +cinderella (disney),70 +choudenji machine voltes v,70 +chika (princess connect!),70 +chiji komari,70 +chihiro (chrnt),70 +chesnaught,70 +chen zi,70 +check source,70 +chankyone,70 +cetera,70 +cbow,70 +cbi cbi,70 +cartoon network,70 +c take0141,70 +byte (grunty-hag1),70 +bullet trail,70 +bouhatei (t-back),70 +boogiepop,70 +bombergirl573,70 +boba,70 +blaze fielding,70 +bismarck kai (kancolle),70 +bismarck drei (kancolle),70 +bingo tarte,70 +bibimbub,70 +basch fon ronsenburg,70 +barinbo,70 +bandaid on breast,70 +bamboo memory (umamusume),70 +bakushiishi at,70 +baby's-breath,70 +azitama atsushi (attyuu),70 +ayuko (ayuco),70 +asava (hutaitenn),70 +arnold schwarzenegger,70 +arin sel,70 +aries muu,70 +aria (opera),70 +arepko,70 +ara haan (celestial fox),70 +aoshidan (emblem),70 +aoi rena,70 +aoi (gegege no kitarou),70 +amo (amo9612),70 +ame usari,70 +amakusa shirou (third ascension) (fate),70 +alto2019,70 +alten,70 +altair floone,70 +aika warazu,70 +agarwood,70 +aduo,70 +abigail williams (fate) (cosplay),70 +9rimson,70 +.l.l,70 +zzzpani,69 +zetta (phantom kingdom),69 +zest,69 +yyy246,69 +yuya090602,69 +yuusuke (5yusuke3),69 +yuna (spn28u79),69 +yukkuri youmu to hontou wa kowai cthulhu shinwa,69 +yuki tomoshi,69 +yukari yukke,69 +yoshii dan,69 +yomogi dango,69 +yo na,69 +yasushi,69 +yamakawa michiko,69 +yamabukiiro (browncat),69 +xie lian,69 +xiao gen,69 +wristwear,69 +wraith (apex legends) (cosplay),69 +wo jianqiang fu guo,69 +windows xp,69 +wille (fatalbug896),69 +wattsu,69 +watayoshi (suiiho),69 +waneella,69 +wakatsuki sana,69 +virizion,69 +virgosdf,69 +victory belles,69 +verynezumi,69 +versiris,69 +vampire (little devil in white) (azur lane),69 +utsunomiya toramaru,69 +utilizator,69 +ushi 424,69 +usameruti,69 +united states air force,69 +uni (nico02),69 +umumu (pilcre),69 +twilight (spy x family) (cosplay),69 +twarda8,69 +tsunamino yuu,69 +tsuna (al dente),69 +tsumetsume zerii,69 +tottsuan,69 +tonomayo,69 +tomoe gozen (swimsuit saber) (third ascension) (fate),69 +toadstool (natadekoko),69 +the grateful dead (stand),69 +tenjouin saki,69 +telaform,69 +teiryoku lolita,69 +teikoku senki,69 +tazu,69 +taurus,69 +tatutaniyuuto,69 +tatatan (ts kon2),69 +taniguchi daisuke (surumenium),69 +tanaka ichi,69 +tanaka deshirittoru,69 +tamura satomi,69 +takiya makoto,69 +take no ko (dodon),69 +takatsuki tsukasa,69 +takashima yuuna,69 +takappe,69 +takanashi koyomi,69 +takahashi rumiko,69 +taira momen,69 +tainaka satoshi,69 +tail on face,69 +taikodon,69 +taigi akira,69 +tai gong wang (fate),69 +tachibana hiroki,69 +taba comya,69 +sugar song and bitter step,69 +suelle marlen,69 +stigmamyu,69 +spiked jacket,69 +specter (undercurrent) (arknights),69 +soumendaze,69 +sorayama natsume,69 +sophia f shirring,69 +smelling feet,69 +slowking,69 +ski pole,69 +sinccubi,69 +sime (echo),69 +shuten douji (housamo),69 +shoonia,69 +shogun (sekaiju),69 +shiro (deadman wonderland),69 +shirley fennes,69 +shirayuri kaguya,69 +shira ichigo (ichigohou),69 +shino (yaruki nai yatsu),69 +shimanoko,69 +shima tetsuo,69 +sentarou,69 +segawa misato,69 +seato-hao,69 +sdwing,69 +scott pilgrim,69 +scotch (meitantei conan),69 +scientist (zannen onna-kanbu black general-san),69 +scar (fma),69 +sazanka,69 +satsumaimo pai,69 +satsuki itsuka,69 +satozaki kiko,69 +satou saori,69 +satan (devilman),69 +sas (ls08b),69 +sarah (pokemon),69 +saple,69 +sanzu river,69 +sakurai chisato,69 +sakigake generation!,69 +ryuujou (kancolle) (cosplay),69 +ryoi,69 +rui (o-rui),69 +ruby (jewelpet),69 +roku 6,69 +roger smith,69 +rnd.jpg,69 +ritual,69 +rilakkuma,69 +ribao,69 +rery,69 +ravness loxaerion,69 +r2-d2,69 +quarantine 722 wraith,69 +qi'e (penguin),69 +pulley,69 +puffy anus,69 +print vest,69 +postmark,69 +polka dot jacket,69 +polka (trusty bell),69 +plain,69 +phantasy star ii,69 +peron (niki2ki884),69 +pasya-pasya,69 +pastel chime continue,69 +pangolin ears,69 +padded gloves,69 +p!k@ru,69 +ozawa reido,69 +oumi sanaka,69 +operator 6o,69 +oollnoxlloo,69 +onka,69 +omnisucker,69 +omega sisters,69 +okura-chan (at nya5),69 +ojou (galko),69 +oguri oguri (meme),69 +odin (fire emblem),69 +oboro (iron saga),69 +object on pectorals,69 +nukigee mitai na shima ni sunderu watashi wa dou surya ii desu ka?,69 +nogizaka mika,69 +ninten,69 +neyuki no gen'ei,69 +nekonabe ao,69 +nato-kun,69 +nanoca flanka,69 +nannaru (nananana),69 +nanami neru,69 +nana (732 kaiten),69 +namidako,69 +nakta,69 +nakada rumi,69 +n.a.,69 +muya (uyamuya jj),69 +muteki kanban musume,69 +mushihime-sama futari,69 +multicolored umbrella,69 +mulan,69 +mukai suzu,69 +mouri motonari (sengoku basara),69 +moroheiya (moroheiya-works),69 +morino ryoushi,69 +mokoppe,69 +mogmog,69 +moe on drop,69 +mochisaka mitsuki,69 +mizu,69 +mittelt,69 +mitsuru-kun (kusanagi tonbo),69 +mitarai ryouta,69 +misuke (gyouran),69 +mishima heihachi,69 +minky momo,69 +milo and akouo,69 +mijinkouka,69 +mielang,69 +michou,69 +metal bikini,69 +merry milk,69 +meroko yui,69 +merchant,69 +meditite,69 +meadow (morphinecaca),69 +matsumoto mitsuaki,69 +matsumoto kouko,69 +matsuki miyu,69 +master yi,69 +masatoki,69 +masakichi (heppouku),69 +maronie.,69 +marlboro,69 +marie antoinette (swimsuit caster) (first ascension) (fate),69 +mara (megami tensei),69 +maou-sama to kekkonshitai,69 +manabe jin'ichirou,69 +mana khemia,69 +mamita,69 +makoto jon,69 +makoto ikemu,69 +maki (huran),69 +majic lin,69 +magrona channel,69 +magokorokurage,69 +maachi (fsam4547),69 +m1 abrams,69 +lux (alpa),69 +luode huayuan,69 +lucius (fire emblem),69 +lotus (elico),69 +llamrei (fate),69 +lime hair ornament,69 +lilith (evangelion),69 +leona (granblue fantasy),69 +leex,69 +leaf nun (diva),69 +kzcjimmy,69 +kyou (ittla),69 +kusami toka naku au,69 +kuroki mashiro,69 +kuro nezumi inu,69 +kumaneko,69 +koyori,69 +kotera ryou,69 +komachi tsugumi,69 +knatb,69 +kissai,69 +kirita asami,69 +kirby's return to dream land,69 +kimisaki school uniform,69 +kiku (kicdoc),69 +kikou senki dragonar,69 +kep (ahokep),69 +kengan (series),69 +keisuke (0320030103200301),69 +keid,69 +kazetsubaki kuriko,69 +katy (artist),69 +katri (unwilling orc),69 +kareshi kanojo no jijou,69 +karasuro,69 +kanzaki sora,69 +kanjou jouki,69 +kanini,69 +kanda satoshi,69 +kamen no hito,69 +kakita (92m),69 +kaisanbutsu,69 +kageng,69 +kaden (fire emblem),69 +kabi (zcwd8845),69 +juujouin mari,69 +jun (aoerm),69 +judge (girls' frontline),69 +joy (pokemon) (cosplay),69 +jogan,69 +joe shimamura,69 +jiushi shijiu,69 +jinsei,69 +jibakurei (elite unchi),69 +jian xia qing yuan online 3,69 +jeffrey10,69 +jazz (transformers),69 +jayce (league of legends),69 +japanese macaque,69 +jagayamatarawo,69 +jacqueline baumgold,69 +izumo kasumi (nijisanji),69 +iwai ku tsuki,69 +itsuki (itsukiovo),69 +itose ikuto,69 +itete,69 +isora,69 +ishioto,69 +isekai shinige ojousama,69 +isada (daisa),69 +irohatomo,69 +inoue makito,69 +ino futon,69 +implied ejaculation,69 +igarashi ikki,69 +ichijou raku,69 +ichigo daifuku,69 +ibaraki douji's arm,69 +ibara.,69 +i-iv (longman),69 +i-400 (aoki hagane no arpeggio),69 +hydrafxx,69 +human tug of war,69 +hug and suck,69 +hoshizuki suzu,69 +hoshina utau,69 +hoshi umi,69 +horn lance,69 +hooded kimono,69 +holding turret,69 +holding eyepatch,69 +holding cooking pot,69 +hiyoko daiou,69 +hitogome,69 +hirama hirokazu,69 +himenokouji akiko,69 +hiei (aoki hagane no arpeggio),69 +hiccup,69 +hibikino-san-chi wa eroge-ya-san!,69 +hero (dq6),69 +henrietta (zankuro),69 +heart bikini,69 +head on another's stomach,69 +hayama teru,69 +hatakeyama tsukushi,69 +harano kaguyama,69 +happy lesson,69 +hanato kobato,69 +hanae (blue archive),69 +hall jion,69 +hakurei botan,69 +hakua (hka art),69 +haku (fate),69 +hagiwara onsen,69 +hades project zeorymer,69 +gym trainer (pokemon),69 +gyaku tsubasa,69 +guy (final fight),69 +gundam alex,69 +gronru (alchemy stars),69 +greymon,69 +grey shawl,69 +grey mask,69 +gotou jun,69 +google chrome,69 +gonzaburo,69 +goma tonbi,69 +gold experience requiem,69 +glowing armor,69 +glider (artist),69 +glasses girl (nameo),69 +glasgow (azur lane),69 +fuzzamorous,69 +fuyuki minami,69 +fuwa hyouka,69 +futaba-san chi no kyoudai,69 +furukawa yui,69 +fumizuki homura,69 +fujisaki (si da),69 +fujimaru ritsuka (female) (true ether chaldea uniform),69 +frost,69 +flower girl (yuuhagi (amaretto-no-natsu)),69 +flea (chrono trigger),69 +flamberge (kirby),69 +finger ribbon,69 +fiat 500,69 +fei long,69 +eva (metal gear),69 +eus ing,69 +eternalspherex,69 +estelle (arknights),69 +erikku (kata235),69 +eorx,69 +enaki (mtmti),69 +elisanne,69 +elakan,69 +ekina (1217),69 +eda (black lagoon),69 +echosdoodle,69 +east01 06,69 +dress jacket,69 +dr. voir (nanosheep),69 +dove pixie,69 +dovahkiin,69 +doru riheko,69 +donphan,69 +dokuna,69 +doko demo issho,69 +doi masayuki,69 +docoi,69 +distance,69 +demirinz,69 +dayama,69 +darren geers,69 +daetta (granblue fantasy),69 +dadadanoda,69 +cygnet (royal fanfare) (azur lane),69 +cutiebell,69 +cure blossom (cosplay),69 +crinoline,69 +cranberry spencer,69 +cranberry (mahoiku),69 +coula cat,69 +coopa,69 +cocorip,69 +cocoablue23,69 +cleaning glasses,69 +circle anco,69 +cinnamon roll,69 +chushou jiang,69 +chousokabe motochika (sengoku basara),69 +choroko (osomatsu-san),69 +chkuyomi,69 +chin-chin,69 +chiko (beroro),69 +cheeze (akizone),69 +channel ppp,69 +chako (chakoxxx),69 +casper (deathsmiles),69 +calnarsa,69 +cable car,69 +bug spray,69 +brown sports bra,69 +bowsette jr.,69 +bon (moegomi),69 +blood drop,69 +bilibilida,69 +beit (idolmaster),69 +beelzebumon,69 +beatless,69 +baby penguin,69 +baariya,69 +ayama yuiya,69 +ayakashi,69 +aya kobayashi,69 +autodefenestration,69 +augu (523764197),69 +atom (@tom),69 +atago (lily-white vow) (azur lane),69 +asakiri koko,69 +as val,69 +aru16,69 +artoria pendragon (swimsuit archer) (third ascension) (fate),69 +aroha j,69 +aria.,69 +aratami isse,69 +arami o 8,69 +aoba misaki,69 +anorith,69 +anna (princess connect!),69 +andvari (last origin),69 +anbasa (amaneyuz13),69 +analog kenshi (moto),69 +amber (gemstone),69 +amazou,69 +amano tooko,69 +amane nishiki,69 +alternate animal ears,69 +alma beoulve,69 +akatsuki kojou,69 +akashi senju,69 +aisha callaaisha,69 +ai kusunoki,69 +agnes claudel,69 +6 yin,69 +50yen,69 +1921494015,69 +zuoteng lucha,68 +zatsu,68 +yuzuki yuno,68 +yuupon,68 +yuudachi kai ni (kancolle) (cosplay),68 +yuu (oosakazaijyuu),68 +yusuki (fukumen),68 +yupopo (hidame),68 +ys (ytoskyoku-57),68 +yotsumi shiro,68 +yayata (884),68 +yasu (umineko),68 +yasaka kanako (cosplay),68 +yan (gyee),68 +yamakawa umi,68 +yamada maya (yamdmay),68 +yakusa,68 +yakuri,68 +yagiushi (sinnpatilove),68 +ximsol182,68 +xiaolong (touhoufuhai),68 +xblaze lost: memories,68 +wooden stool,68 +wing grab,68 +white breath,68 +waterstaring,68 +wamdus (granblue fantasy),68 +wallaby (girls und panzer),68 +wakahiko,68 +wadanaka,68 +wacca005,68 +w (idolmaster),68 +vy1,68 +voruvoru,68 +virtual anto channel,68 +viewtiful joe,68 +vel'koz,68 +veiny testicles,68 +vacuum bed,68 +uta no prince-sama maji love 2000 percent,68 +unu (unucence),68 +unraveling,68 +ultraman z (series),68 +ultraman (hero's comics),68 +ulala,68 +ukiwakisen,68 +ujac,68 +type 80 (girls' frontline),68 +tuye (arknights),68 +tsuyuki (yukitgraph),68 +tsuma to mama to boin,68 +tsukishiro yukito,68 +tsukishima makoto,68 +triumph (expression),68 +triss merigold,68 +triceratops,68 +transpot nonoko,68 +toshihiro,68 +tori no ou,68 +too many pikachu,68 +tongari,68 +tombo (majo no takkyuubin),68 +tofu1601,68 +toes up,68 +todoroki fuyumi,68 +tobin (fire emblem),68 +tit (bird),68 +tirudo29,68 +tipsy,68 +timo wei95,68 +timmyyen,68 +thundurus,68 +thrux,68 +thighhigh gaiters,68 +thief (dungeon and fighter),68 +the mandalorian,68 +the kogado (idolmaster),68 +tekka maki (wafuu-bune),68 +tefutene,68 +teddy demon,68 +tapioka (coconuts),68 +taiga kazame,68 +taht (that is mm),68 +t.a.k.,68 +sweater vest lift,68 +sumiyoshi kanako,68 +sumi-chan (sachito),68 +stuffed dinosaur,68 +stop motion,68 +stealth set (zelda),68 +star fox adventures,68 +speedpaint,68 +spectral force,68 +spectacled caiman (kemono friends),68 +sousuke (ponyo),68 +souma kyou,68 +soot,68 +soma natsumi,68 +soldier game,68 +snowman print,68 +smilodon (kemono friends),68 +slam,68 +sizzlipede,68 +sion (9117),68 +sie kensou,68 +shulliy baudelair,68 +shizuki soujuurou,68 +shirt over head,68 +shirokaba114,68 +shirohime quest,68 +shirayuki (akagami no shirayukihime),68 +shirafuji natsuki,68 +shirafuji ene,68 +shining gundam,68 +shin (amnesia),68 +shimeji simulation,68 +shiki hinako,68 +shiki (0802makimari),68 +shikato miyo,68 +shikada kokonotsu,68 +shichi (ratorin53),68 +shari cote,68 +sgb,68 +seven deadly sins,68 +serena (sygna suit) (pokemon),68 +sera (mega man),68 +sayuu hanten,68 +sawara noa,68 +satou saya,68 +satou masuki,68 +sarujie (broken monky),68 +sara666,68 +sangsoo jeong,68 +sage (final fantasy),68 +rynn (rynn cube),68 +ryanpei,68 +ruua (idaten93),68 +ruoshui (the legend of luoxiaohei),68 +ruggie bucchi,68 +ruberule,68 +rii abrego,68 +rennes,68 +renkonmatsuri,68 +renka (senran kagura),68 +ren (gh),68 +reinoenu (anon),68 +rei (granblue fantasy),68 +reckless fist (elsword),68 +rebecca anderson,68 +razmi (indivisible),68 +ratto (mobilis 1870),68 +rapha galthena,68 +ral-san,68 +raineru (gryffindor131),68 +race bib,68 +r-99 smg,68 +quilt,68 +quercus (arknights),68 +qingchen (694757286),68 +qbz-97,68 +purunyara,68 +purple pussy,68 +purikura,68 +pullups,68 +project a.d.a.,68 +potty,68 +poorgom,68 +pokey,68 +poketoon,68 +platin (alios),68 +plaid scrunchie,68 +plaid footwear,68 +piyoyanagi,68 +pixiv shadow,68 +piisu,68 +pietani397,68 +photo shoot,68 +phoenix wright: ace attorney,68 +petrel (pokemon),68 +pantyhose tug,68 +pa-15 (high school thrills) (girls' frontline),68 +ozawa ari,68 +otakummm,68 +osatou (character),68 +orange-pengin,68 +ookura miyako,68 +ookawara kunio,68 +oohirakeisuke,68 +oohhya,68 +onigensou,68 +onasuba,68 +okuva,68 +okia,68 +o-sirius,68 +nslacka,68 +noshima,68 +northman,68 +noba (veronicanoha),68 +nm (tshell2761),68 +nissan skyline,68 +nishizono midori,68 +nishinomiya suzu,68 +nintendo 64 controller,68 +nepnep connect: chaos chanpuru,68 +neon katt,68 +neo-porte,68 +nemo (nameless920),68 +natsume (natsume melio),68 +nanami yuuno,68 +namori (character),68 +naked bowtie,68 +nagase iori,68 +myuri (spice and wolf),68 +mystic-san,68 +my (iuogn4yu),68 +musical note necklace,68 +mushroom hair ornament,68 +murasaki (fioletovyy),68 +multicolored thighhighs,68 +mu46016419,68 +morimiya aono,68 +model airplane,68 +miyazakit,68 +miyakouji,68 +misuta710,68 +minerva (re:zero),68 +mindoll,68 +mima chi,68 +mileena,68 +miero,68 +meruem,68 +mengxin huazha,68 +mella,68 +melissa kinrenka,68 +mekakushi code (vocaloid),68 +meiza endust,68 +megami magazine creators,68 +mega man 3,68 +matagitii,68 +mary clarissa christie,68 +maru-kichi,68 +marshall,68 +mandytsune,68 +mameyanagi,68 +male-female symbol,68 +magnolia,68 +mafurako,68 +mackia,68 +machine robo,68 +m2hb (girls' frontline),68 +lyodi,68 +loose panties,68 +lofi girl (youtube),68 +little blonde girl (kozato),68 +like an ero-doujin,68 +lhu (barappra1),68 +lamune,68 +ladd russo,68 +laboto,68 +kz 609,68 +kyoukai senki,68 +ky (nimbusky),68 +kuronushi (genshin impact),68 +kuroneko (kuroi-neko),68 +kuromiya (def lp),68 +kuro (ao no exorcist),68 +kurekore,68 +ku-ba,68 +kt,68 +ko~n,68 +koyanskaya (assassin) (third ascension) (fate),68 +koutari yuu,68 +koujaku,68 +komuro chinami,68 +komano aunn (komainu),68 +kolulu (granblue fantasy),68 +kokukyukeo,68 +kohitsuji ai,68 +knight (shichigatsu),68 +kiyo (yamazoe1122),68 +kisou (kisou00),68 +kiryuu kyousuke,68 +kingsman: the secret service,68 +kimura,68 +kemomimi refle!,68 +kemejiho,68 +kcco (girls' frontline),68 +kazanock,68 +katsu (food),68 +katano sukune,68 +kasuga ichiban,68 +kashiwa (iersansi),68 +kasako (komeshiro kasu),68 +karashi (asarikarasi),68 +karahara shima,68 +kaorin minogue,68 +kanie seiya,68 +kanechi,68 +kana (kuaua),68 +kana (haibane),68 +kamipani!,68 +kamen rider garren,68 +kali belladonna,68 +kaitou reiko,68 +k-rha's,68 +juliet capulet,68 +ju 87,68 +jolaeng-i,68 +jitsu wa watashi wa,68 +jirafuru,68 +jill valentine (cosplay),68 +jiang ye kiri,68 +isobee,68 +ishitsu tadashi,68 +isaac dian,68 +inugahora an,68 +inoue tomii,68 +inaba yui,68 +impidimp,68 +imp (doom),68 +imminent breast grab,68 +imazawa,68 +illyasviel von einzbern (dress of heaven),68 +ichimatsu shiro,68 +ice block,68 +i am homeko,68 +hyuuga takashi,68 +hyuga zen,68 +hwansang jungdog,68 +huoji (wonderturkey),68 +hrid (fire emblem),68 +hoshi kubi,68 +horizontal comic,68 +homare (homaredai),68 +hisayaki kyuu,68 +hiroshix31,68 +hiokirasu,68 +hinanawi tenshi (cosplay),68 +hinamatsuri (manga),68 +himuro ayame,68 +hiiragi akao,68 +henz,68 +heineken,68 +height mark,68 +heart cheeks,68 +haysaca a. smithee,68 +hayama jun'ichi,68 +hatsune miku graphy collection,68 +hashimochi,68 +haruyuki 14,68 +harklight,68 +hanami dango (zzldango),68 +haku (naruto),68 +hakoneko (marisa19899200),68 +hakka (88hk88),68 +h sueun,68 +gupunetsu,68 +great fairy (zelda),68 +grand theft auto: san andreas,68 +goku-chan,68 +godsh0t,68 +glorybringer (granblue fantasy),68 +geolim,68 +gensoukitan,68 +gamera (series),68 +gal dolva!!,68 +future,68 +futaribeya,68 +fushimimukai hinako,68 +fusataka shikibu,68 +fukuso hilbert kuukan,68 +fujinami ryuunosuke,68 +fujima (k114),68 +fuji tarawi,68 +fuenyuan,68 +fruits fulcute!,68 +francisca (kirby),68 +fox wife (batta (kanzume quality)),68 +fossa (kemono friends),68 +flower anklet,68 +feathered dragon,68 +fbi,68 +family guy,68 +erdtree (elden ring),68 +elmerulia fryxell,68 +elliot leucosia,68 +elevator operator,68 +doyamona,68 +doro au,68 +dongqing zaozigao,68 +dive,68 +dice gt,68 +diabla (elsword),68 +detached cape,68 +denjyou23,68 +dena.ei,68 +degenbrecher (arknights),68 +de (deys),68 +dalrye v3,68 +daikoku osamu,68 +cz2128 delta,68 +cure beauty (princess form),68 +crystal night party (idolmaster),68 +crimrose,68 +coral hair ornament,68 +clitoris torture,68 +clip (weapon),68 +cinderella (grimm),68 +churi (oxxchurixxo),68 +choco-chan,68 +china moeka,68 +chiliarch (elsword),68 +chieezuik,68 +cheesewoo,68 +chauke,68 +cat's eye,68 +castella,68 +cassin (azur lane),68 +carrot pin,68 +carole & tuesday,68 +capricorn,68 +capcom vs snk 2,68 +canada,68 +c (theta),68 +bzs (kage no shinobu),68 +butterfly swords,68 +burn mark,68 +bunnelby,68 +botamochi (exwelder),68 +bmo,68 +bluethebone (character),68 +bleachers,68 +black stripes,68 +black pajamas,68 +black hayate,68 +blacephalon,68 +bintz,68 +biburi (precure),68 +betti (maron),68 +bespin,68 +bellri zenam,68 +bbolalus,68 +bb-8,68 +bataan (azur lane),68 +bass (mega man),68 +bandeau lift,68 +ayano (katou),68 +ayanami (nightfall raiment) (azur lane),68 +aubz,68 +attenborough cortitch,68 +asortofcolorfag,68 +arugeri,68 +arthur pendragon (white rose) (fate),68 +arong,68 +arnval mk2,68 +armlock,68 +arinotowatari,68 +aries,68 +archibald (adahalt389zp),68 +arc (arc the lad),68 +aquarius camus,68 +aqua sleeves,68 +aqua pupils,68 +april (coyote ragtime show),68 +aomizuan,68 +anoshabu,68 +announ (kurotya),68 +anne bonny (swimsuit archer) (first ascension) (fate),68 +angel sanctuary,68 +amiba (nerdamiba),68 +amearare,68 +alistar (league of legends),68 +alicia (valkyrie profile 2),68 +aleikats,68 +akka,68 +akari (pokemon) (cosplay),68 +airi (akamichiaika),68 +airbrush (medium),68 +aino yumeri,68 +aim-9 sidewinder,68 +aili (aliceandoz),68 +ah-64 apache,68 +aerlai,68 +adachi tenka,68 +abeen jhong,68 +aaron (pokemon),68 +a-kiraa (whisper),68 +3j dangan,68 +144 (riesztan),68 +zooming out,67 +zel-sama,67 +zb-26 (girls' frontline),67 +zaniaii,67 +yuzuha (utawarerumono),67 +yuzu (konohana kitan),67 +yuusha masatoshi,67 +yuuno,67 +yuki ahiru,67 +yukemuriganmo,67 +yuh,67 +yua (bokubo0806),67 +yoshida (rodeo),67 +yoshi (moco1),67 +yiyu qing mang,67 +yasuhara roku,67 +yasashii naizou,67 +yanmaami,67 +yancy (pokemon),67 +yamanobe saya,67 +xylophone,67 +xtears kitsune,67 +wooden shield,67 +windranger (dota),67 +windfeathers,67 +weiss,67 +water tower,67 +watanuki nao,67 +watanabe atsuko,67 +wasabijoyu76,67 +warfarin (casual vacation) (arknights),67 +wanne,67 +wanderlucia,67 +walter c. dornez,67 +walrus,67 +wally (where's wally),67 +vuvuzela,67 +volbeat,67 +vivian (paper mario),67 +vanir,67 +vanilla (arknights),67 +uss iowa (bb-61),67 +ushita kaoruko,67 +usami tsuitachi,67 +usada kensetsu,67 +ura-omote lovers (vocaloid),67 +unico kasumi,67 +unaware,67 +ultone (neisiss),67 +uchicchii,67 +type 64 (girls' frontline),67 +tying headband,67 +two-tone vest,67 +turtleneck swimsuit,67 +tsukamichi fumi,67 +tsubomioka happa,67 +tractor,67 +touko 56,67 +tomoshiki,67 +tomo-chan wa onna no ko,67 +toda jun,67 +tnr (horiko1525),67 +titania (final fantasy),67 +tirpitz (snow-melting summer) (azur lane),67 +time tunnel (madoka magica),67 +ticktack chicken,67 +tibarn (fire emblem),67 +the sounds of autumn (umamusume),67 +the binding of isaac,67 +tetsujin 28-gou,67 +tenjou tsuki,67 +tengen toppa gurren-lagann (mecha),67 +teddy bear sex,67 +tawara touta (fate),67 +tashiromotoi,67 +tar-21 (night at the bar) (girls' frontline),67 +tapioka chaso,67 +tape censor,67 +tanutika,67 +tane juu-gou,67 +tamen de gushi,67 +tamako love story,67 +tamaki iori,67 +tales of link,67 +takasugi shinsaku (fate),67 +taisen hot gimmick,67 +sword cane,67 +swalot,67 +sutei (arece15),67 +supullim,67 +super saiyan rose,67 +sumi (oyasumie),67 +sugapi,67 +suemitsu dicca,67 +subject 67 (helltaker),67 +striped bodysuit,67 +stahn aileron,67 +squid girl,67 +sokutenkun,67 +sleeveless jumpsuit,67 +slash-ex,67 +size hermitage,67 +siseru samurai,67 +sirius (white rose) (azur lane),67 +sigma (mega man),67 +shrinking,67 +shiya,67 +shiratori serano,67 +shinozaki-san ki wo otashikani,67 +shining force exa,67 +shiho (yuuhagi (amaretto-no-natsu)),67 +shia (pita ten),67 +shenteita,67 +shenmue ii,67 +shal.e,67 +sex doll,67 +setzer gabbiani,67 +serina ranshi,67 +seo haruto,67 +sengoku hime,67 +sen (daydream 53),67 +seigetsu academy uniform,67 +seafood,67 +scottie (pokemon),67 +sazanka (kunoichi tsubaki no mune no uchi),67 +sawamura tomoki,67 +satori (ymoy),67 +sashinami shouko,67 +saotome academy uniform,67 +santoku knife,67 +sakino saku,67 +sakamoto kengo,67 +saka i hirokadu,67 +saimoe,67 +ryuya,67 +ryans,67 +ry-spirit,67 +roro (sghona10),67 +robot neoanthropinae polynian,67 +robot dog,67 +riria (happy strawberry),67 +rin lingsong,67 +riinu (ir-n),67 +ri qing,67 +renge (bishoujo mangekyou),67 +reijing etrn,67 +rei (cosmic break),67 +red bean paste,67 +reanbell,67 +rasengan,67 +rappelling,67 +qiuzhi huiyi,67 +promotions,67 +project fairy (idolmaster),67 +princess spirit,67 +princess aeolian,67 +primal groudon,67 +priest (warcraft),67 +priana,67 +porsche 911,67 +popii (yuuta679),67 +pol winner,67 +pokken tournament,67 +pokeyugami,67 +pokemon wings,67 +poinia,67 +plaid hairband,67 +pineapple print,67 +pia (botamochinjufu),67 +pet cone,67 +perlica (arknights),67 +patzzi,67 +paradox (parapa),67 +panamuru,67 +pamiat merkuria (caged minx) (azur lane),67 +p-nekoe,67 +oyaji,67 +oshiri seijin,67 +orifushi natsumi,67 +ootsuki makoto,67 +onoe serika,67 +oninamako,67 +omaida takashi,67 +nyanmaru (ememing),67 +nuye,67 +nozz177,67 +nonohara nyorai,67 +niu illuminator,67 +nito nazuna,67 +nishitsuki tsutomu,67 +nishino tsukasa,67 +nishikasai munieru,67 +nipple flick,67 +nikaidou shinku,67 +nicca (kid nicca),67 +nia (fancy sundress) (xenoblade),67 +nevin (flyskying),67 +nemuro nao,67 +neko atsume,67 +natsuzora kanata,67 +natsume (decadence),67 +national football league,67 +naruto: road to ninja,67 +naosuke (morioka shachuu),67 +nanju bami,67 +nande koko ni sensei ga!?,67 +nanae,67 +namakura neo,67 +naltal,67 +naked kappougi,67 +nagare yoshimi,67 +nagami suzuka,67 +naarann,67 +myaaco,67 +murata himeko (blood rose),67 +moyo (amaniwa),67 +mountain tim,67 +mottsun,67 +morinaga korune,67 +mori hikiko,67 +moon uniform (houseki no kuni),67 +momose hisashi,67 +mohya,67 +mochizou,67 +mobu 45,67 +mlynar (arknights),67 +mizuno (soutaman),67 +mitsubishi motors,67 +mitsubachi koucha,67 +mio5,67 +minj kim,67 +minior (core),67 +mihira (tainosugatayaki),67 +midiman,67 +mhg (hellma),67 +mesu nie onna kyoushi,67 +meriko,67 +meracle,67 +mechari,67 +mazda rx-7,67 +maumen,67 +matsura ichirou,67 +mata-aro,67 +maru (cookie),67 +marie (sister princess),67 +manamitsu,67 +manabe itsuki,67 +mamezou,67 +maku (wasabishock),67 +major,67 +maid swimsuit,67 +mai (t-5),67 +mahdi,67 +magical mirai miku (2022),67 +lon'qu (fire emblem),67 +loftwing,67 +little mare,67 +litchipix,67 +life is strange,67 +lewisia aquablue,67 +leo,67 +lemur ears,67 +lei lei (cosplay),67 +ledian,67 +leaf (studio),67 +lawrence (shiro seijo to kuro bokushi),67 +lavender quartz,67 +kushina anna,67 +kusakabe misuzu,67 +kuryuu kohaku,67 +kurusu natsume,67 +kurosu gatari,67 +kurome (akame ga kill!),67 +kuroiwa yuuki,67 +kuroba aki,67 +kuro minamo,67 +kurenaidahlia,67 +kurattes,67 +kurafuji sachi,67 +kupocun,67 +kunisaki rena,67 +kubota rin,67 +kotikomori,67 +korandamu,67 +kooji (macoji),67 +kokonoha mikage,67 +koikeya,67 +koga tomoe,67 +kirishima kanna,67 +kirihara misaki,67 +kinzoku bat,67 +kindaichi shounen no jikenbo,67 +kimi no iru machi,67 +ki (mxxxx),67 +kellymonica02,67 +keis (locrian1357),67 +kechin (oreteki18kin),67 +kazuto san,67 +kazuhito (1245ss),67 +kazanari genjuurou,67 +kayano kaede,67 +katsuragi misato (cosplay),67 +kasappi,67 +karita (kali lgk),67 +kare (0621utak),67 +kana (user rkuc4823),67 +kamiya hiroshi,67 +kamidori alchemy meister,67 +kamen rider 2,67 +kaleidomoon scope,67 +kaku yone,67 +kaio (watagami),67 +kaie,67 +kaidou (one piece),67 +kagari atsuhiro,67 +kaga (white fox's new year) (azur lane),67 +kabun (suzuna jct),67 +juban,67 +joh pierrot,67 +jissouseki,67 +jijo (kakitama),67 +jiji (381134808),67 +jegan,67 +jay xu,67 +jarv,67 +jacob dream world,67 +iwaki kouji,67 +itsumoto hiroharu,67 +isekai meikyuu de harem wo,67 +iris libre (emblem),67 +ip,67 +io (io oekaki),67 +inu chikushou,67 +inu8neko,67 +innes (fire emblem),67 +inga,67 +ina (inadiary),67 +impressionism,67 +imai nobume,67 +ildy,67 +iki hiyori,67 +iga (okame nin),67 +ieufg,67 +icetea,67 +ibuki hasu,67 +hunter (girls' frontline),67 +hunehoura,67 +hoshino hitsuki,67 +hoshikuzu pan,67 +horse head,67 +honeymilk0252,67 +hirata katsuzou,67 +hiraga daidai,67 +hinase haruka,67 +hikari (haibane),67 +hijiri byakuren (cosplay),67 +hido (meori apu da),67 +henry townshend,67 +helen roro,67 +heart wings,67 +head between knees,67 +hbb,67 +hazuki kurumi,67 +hatsu (first snow),67 +hasumi takashi,67 +harvest,67 +haruhikohiko,67 +haru (citron citron),67 +haobuguniao,67 +han solo,67 +hami yura,67 +hamakaze (azur lane),67 +hakutaku (hoozuki no reitetsu),67 +hako momiji,67 +hajime (kin'you club),67 +hachi (orange),67 +guilmon,67 +grozny (azur lane),67 +grimay,67 +grado labs,67 +goushou,67 +gotcha! boy (pokemon),67 +gotagotay,67 +gorudazo,67 +gorigo,67 +gokou hinata,67 +goki buri,67 +go-lurk,67 +girutea,67 +giorno giovanna's pose (jojo),67 +gigalith,67 +gia kon,67 +ghost in the shell: sac 2045,67 +gekato,67 +gallerian marlon,67 +gala (16901040),67 +future studio (artist),67 +futaba tsukushi,67 +furui suguri,67 +fumikiri,67 +fukano youichi,67 +fujioka toki,67 +fujimon,67 +fujikido kenji,67 +fudatsuki no kyouko-chan,67 +from hat trick,67 +flower (gynoid talk),67 +floodlights,67 +fiddlesticks,67 +female warrior (disgaea),67 +exa (koyuru),67 +ero zemi,67 +ensinen,67 +enoko (zqfebi),67 +emuchi,67 +emile bertin (azur lane),67 +emil nekola,67 +ells,67 +elf (ii orc no hi),67 +ekokuice,67 +eeyasu,67 +ecoco,67 +eat0123,67 +dutch flag,67 +drawstring bag,67 +drasna (pokemon),67 +doughnut hair ornament,67 +doubutsu sentai zyuohger,67 +dia (world flipper),67 +deed (nanoha),67 +dancer 2 (sekaiju),67 +daikuuji ayu,67 +daiginjou,67 +daicon,67 +cyclops (x-men),67 +cutter (arknights),67 +cunt punt,67 +crossbone gundam ghost,67 +crescentia fortuna,67 +crayfish,67 +cotta,67 +cooking idol ai! mai! main!,67 +cloritin,67 +christine (arknights),67 +chouon senshi borgman,67 +chocolate on legs,67 +chinese hat,67 +chiha (abo ecm mk25),67 +chieru (school festival) (princess connect!),67 +chelsea (akame ga kill!),67 +checkered bra,67 +chariki,67 +chair tipping,67 +cerecere (sailor moon),67 +caron (rall),67 +cao hong anh,67 +camel ears,67 +cala t maki 72,67 +c@rbon,67 +bun-o,67 +bowling alley,67 +botan m,67 +borockman,67 +blueman,67 +blue lock,67 +blue (among us),67 +blade ride,67 +black mage (fft),67 +berry jou,67 +bard (sekaiju),67 +banxuan c2ka,67 +banglinh1997,67 +b suke,67 +azusagawa kaede,67 +ayuto,67 +astor alexander,67 +ash (fire emblem),67 +ao oni,67 +anitore! ex,67 +anice farm,67 +angry sex,67 +angelina kudou shields,67 +angelan,67 +ane,67 +andrew kreiss,67 +anakaris,67 +amripo,67 +amatsuki rei,67 +alphabet,67 +alicecrazy,67 +alcxome,67 +akiae (hayj14),67 +akeiro kaikitan,67 +aizen (housamo),67 +aito (indigorabbit),67 +aitanikofu,67 +aisora,67 +aion kiu,67 +after handjob,67 +aero (mega man),67 +adjusting mask,67 +ace combat 5,67 +aburaya tonbi,67 +7aka ne11,67 +695 (mukuko),67 +3x3 eyes,67 +3838383,67 +35,67 +0208ramune,67 +zuikaku (kancolle) (cosplay),66 +ziogon,66 +zibun owl,66 +zhixie jiaobu,66 +zanunoneko,66 +zanshi,66 +yuzuno kaori,66 +yuzu gin (pika97),66 +yuufreak,66 +yuu (yu0221f),66 +yunagi (arukumaruta),66 +yuna (rutera),66 +yuka yukiusa,66 +yuasa akira,66 +yts takana,66 +youkan (tako),66 +yoshihara maito,66 +yori (princess connect!),66 +yomotsuka tsukasa,66 +yeurei,66 +yazawa kokoro,66 +yasiromann,66 +yasai yuuki,66 +yan531,66 +yamashio maru (kancolle),66 +xm8 (girls' frontline),66 +xiao ren,66 +xaxak,66 +xaldin,66 +wucanming,66 +wlper,66 +wing gundam,66 +war of the visions: final fantasy brave exvius,66 +vietnamese text,66 +utsumi erice (mission start),66 +uso-kun,66 +unyl-chan,66 +unownglyphics,66 +unown h,66 +uni (ieatpockey),66 +unfezant,66 +umitsubame,66 +ui (rot),66 +uho (uhoyoshi-o),66 +uguisu (happy turn),66 +ugg boots,66 +ueng,66 +u4284g,66 +type 74,66 +twomoon,66 +tsuu (tu-3),66 +tsurugi ai (seikan hitchhiker),66 +tsukishima shizuku,66 +tsukishima koko,66 +tsuki tsuki!,66 +triptych,66 +transformers shattered glass,66 +toyasu aina,66 +totororo,66 +toryuu,66 +toramaru shou (cosplay),66 +tora (ushio to tora),66 +toei animation,66 +to-den (v-rinmiku),66 +tko (kkk66),66 +tino (ikeuchi tanuma),66 +throwing person,66 +the fool (tarot),66 +teru sakura,66 +tenko (yuureidoushi (yuurei6214)),66 +tengu nouzu,66 +tenchi muyou! ryou-ouki,66 +team rainbow rocket grunt,66 +taotao,66 +tantanmen72,66 +tango,66 +tanaka hitoriaruki,66 +tama (speedgrapher),66 +takigraphic,66 +takano suzu,66 +takanami kai ni (kancolle),66 +taiyou gakuen uniform,66 +taiga shinjirou,66 +tadpole tail,66 +tachibana hinano (vtuber),66 +sutekina awa,66 +suna co,66 +summoner (league of legends),66 +sumisuzu,66 +sumeshi (ambivalince),66 +sugina fujitaka,66 +strike cannon,66 +straw (yokubou hiroba),66 +stone (shirokanipe ranran),66 +stocking (psg) (cosplay),66 +standby,66 +stage of magic (idolmaster),66 +spritzee,66 +spaghetti and meatballs,66 +sonsaku,66 +sitting on books,66 +sir arthur (makaimura),66 +sinister,66 +single shoulder pad,66 +sindre,66 +sima,66 +sill plain,66 +shougi (116),66 +shooting428star,66 +shiwasuzuki,66 +shiranui (wasuresateraito),66 +shionosuke,66 +shino duka,66 +shinkaisoku,66 +shinebell,66 +shin ultraman,66 +shin sangoku musou 6,66 +shimeko,66 +shide kouri,66 +shen li,66 +sheep (kemono friends),66 +shacttainw,66 +senjouhara nira,66 +seal script,66 +santa claus (chainsaw man),66 +samuraig,66 +same no fukahire,66 +sam porter bridges (cosplay),66 +sakurazari hotori,66 +sakurano,66 +sakura hitsuji,66 +sakura akami,66 +sakura (39ra),66 +saklo,66 +sake dong,66 +ryuu (breath of fire iv),66 +ryushin,66 +rune factory 1,66 +ruizu (takakisan503),66 +rotating light,66 +ring 411,66 +rindoriko,66 +rilex lenov,66 +resident evil 7,66 +reiji-rj,66 +reia hana,66 +recri,66 +ranka lee (cosplay),66 +rakuga kiya,66 +rakku (10219563),66 +raichi (ddq0246),66 +racing miku (2015),66 +purple sweater vest,66 +purple hood,66 +puni puni handmaid,66 +protagonist (love and producer),66 +pretty sammy (character),66 +popful mail,66 +pokemon ranger 1,66 +placemat,66 +pixiv trainer,66 +pino (jashin-chan dropkick),66 +pillo,66 +pigeon pose,66 +pi tayuko,66 +peter (gvb),66 +penelope (hathaway's flash),66 +patterned hair,66 +pandora (re:zero),66 +pandaki (aki),66 +panda hero (vocaloid),66 +others,66 +osuzu akiomi,66 +ornate armor,66 +ori (yellow duckling),66 +ore no kome,66 +ookami kodomo no ame to yuki,66 +oni noodle,66 +onanie master kurosawa,66 +omura06,66 +olivine (nu carnival),66 +ohizumi daisaku,66 +ohagi (ymnky),66 +off-shoulder coat,66 +oekaki taro,66 +nse,66 +nora ichigo,66 +nooca,66 +noire kooshe,66 +noeru (putty),66 +nmz (namazu),66 +nishina hima,66 +nishikiori jin,66 +nine alpha,66 +nikaidou yuzu,66 +nightmare-doom,66 +niedersachsen military uniform,66 +nicola (granblue fantasy),66 +nico-mo,66 +neziren14,66 +nero (nilu),66 +nene (10575936),66 +nekozawa yukari,66 +negi suppository,66 +nefrubi,66 +naye,66 +naokado,66 +nanaminn,66 +namagaki yukina,66 +nakareki,66 +nakane nata,66 +naji yanagida,66 +nagishy,66 +mystra77,66 +mutual breast sucking,66 +muimui,66 +mugishima orie,66 +mugen (samurai champloo),66 +mr h.l.,66 +motoi (spieler),66 +morichika shuuto,66 +mooning,66 +mono (freerotary),66 +monet (one piece),66 +momoyama nozomu,66 +momosuke (ishakry),66 +momonosuke (u-ma991028),66 +momoka (blue archive),66 +momijigari,66 +momigara (mmgrkmnk),66 +mogullaz,66 +miyashita,66 +miyanome,66 +mixflavor,66 +mito tsubaki,66 +mink (dragon half),66 +minagokoro,66 +millennium ring,66 +milk tea,66 +mikuriya jin,66 +midou (grk12138),66 +midori (fire emblem),66 +miclot,66 +michael wazowski,66 +metal man,66 +mepple,66 +meno,66 +menma kozo,66 +menma222,66 +meng xiao jiong,66 +memories off 1,66 +memai,66 +meltryllis (fate) (cosplay),66 +meina (atlanta),66 +megurumiru,66 +mega man x8,66 +medjed (mythology),66 +matsukaze yukiji,66 +matoimaru (arknights),66 +matoi,66 +master 5 (housamo),66 +masrur,66 +mario kart 8,66 +mario grant,66 +marekamico,66 +malva (pokemon),66 +makimura miki,66 +maid-san wa taberu dake,66 +mai (avatar),66 +magrona,66 +magic xiang,66 +maetaku,66 +mabataki,66 +ma2acworks,66 +lyseria christaria,66 +lucy (pokemon),66 +lucian (pokemon),66 +lotus root,66 +loose pants,66 +liss meier (koyashaka),66 +lisa (pso2),66 +light (luxiao deng),66 +lexaeus,66 +leo whitefang,66 +lemming no suana,66 +laura rolla,66 +lanchester smg,66 +lance (lancelliu),66 +lana (fire emblem),66 +laffey (white rabbit's oath) (azur lane),66 +kyashii (a3yu9mi),66 +kuzuryuu amane,66 +kuze shizuka,66 +kutta,66 +kurumi nui,66 +kurozu,66 +kuroshiroemaki,66 +kuroi-tsuki,66 +kurihara chiyo,66 +kurigura (sketch san),66 +kumasawa chiyo,66 +kukiha,66 +kujira gunsou,66 +kotori (cheerleader) (blue archive),66 +kotomine risei,66 +koro (tyunnkoro0902),66 +kong lang (skywave),66 +kondoru,66 +komowata haruka,66 +komatsu (sakanae),66 +koji (kojikojimdw),66 +kitamura sora,66 +kissing eye,66 +kishibe ayaka,66 +kisaragi yuu (re:lucks),66 +kiramashi (satsujinki),66 +kian,66 +ketopon,66 +kengan ashura,66 +keiya,66 +kazoku game,66 +kate (idolmaster),66 +kasandra (xenoblade),66 +karoine,66 +kanaka,66 +kamoi (kancolle) (cosplay),66 +kamen rider kuuga (ultimate form),66 +kamei,66 +kamado (pokemon),66 +kama iruka,66 +kakenari,66 +kakashichi,66 +kajiya kurogane,66 +kaiman,66 +kai tomohisa,66 +kagalin,66 +kabane (follabi),66 +kab00m chuck,66 +justeeeeth,66 +jun'you maru,66 +joshi kousei rich thots,66 +joseph joestar (young) (cosplay),66 +joeychen,66 +jk bitch sannin musume!,66 +jin yuuichi,66 +jeanne d'arc alter (mystery treasure) (fate),66 +ivan the terrible (fate),66 +iskandar (sensha otoko) (fate),66 +ishida baru,66 +isekai shokudou,66 +irisviel von einzbern (halloween princess),66 +iris (en'en no shouboutai),66 +iranon (new iranon),66 +io enishi,66 +inoue haruka (haruharu210),66 +inaba meguru,66 +implied murder,66 +imasogari,66 +imada kozue,66 +iijima renka,66 +ii (ayanepuna),66 +igunuk,66 +ichiko (ichi),66 +ichihime,66 +ichihara2929,66 +hscatter,66 +hrtyuk,66 +houjou yutori,66 +holding snowball,66 +holding jug,66 +hiromumaru,66 +hiro (725611),66 +hino shinnosuke,66 +hino hinako,66 +hilda (beelzebub),66 +hikap,66 +hekiten,66 +havoc-chan,66 +hattori heiji,66 +haruto (hirokazu1001),66 +harusabin,66 +harukanaru toki no naka de 3,66 +haruka ni aogi uruwashi no,66 +haruhitooo,66 +hara kazuhiro,66 +hanakuso,66 +half-nightmare,66 +hakumei (hakumei to mikochi),66 +haaselia,66 +gun in mouth,66 +great mazinger,66 +goji (five clock),66 +gods (1073337800),66 +goatman (umineko),66 +ginnoturu,66 +giganticbuddha,66 +giga mermaid,66 +ghost rule (vocaloid),66 +ghast,66 +ggubii0225,66 +georgia (pokemon),66 +geno (mario),66 +genda,66 +gemi ningen,66 +gekijigen tag: blanc + neptune vs zombie gundan,66 +gasper vladi,66 +garland (ff1),66 +gang road joker,66 +g home,66 +fuyukawa motoi,66 +fujitaka (akasora),66 +fujita canaria,66 +fujifuji924,66 +frilled underwear,66 +freya (valkyrie profile),66 +frankseven,66 +feather print,66 +f4u corsair,66 +f-tani,66 +externally piloted mecha,66 +esuthio,66 +estonia (hetalia),66 +eruka frog,66 +empty x embryo,66 +ellias ainsworth,66 +east german,66 +ears up,66 +dusty heaven,66 +dummy,66 +drruraguchi,66 +dragon knight (dota),66 +domco,66 +doku sasori,66 +doctor zexxck,66 +djmax technika,66 +dima (girls' frontline),66 +dilapidated,66 +desspie,66 +denfunsan,66 +deis,66 +death star,66 +db (dokidoki! precure),66 +daxz240r,66 +danshi koukousei ga mahou shoujo ni naru hanashi,66 +danish flag,66 +dancho (danch),66 +dal-gi,66 +daicon bunny girl,66 +daichan mona,66 +d-kureta,66 +cyrano,66 +cyan (among us),66 +cyaca ab,66 +cut man,66 +cup6542,66 +creeeen jjang,66 +covering navel,66 +couzone,66 +cornelius (odin sphere),66 +constantia s2,66 +collair (cono),66 +cloud background,66 +class change,66 +choking on object,66 +chocolate (jitong),66 +chigusa kasumi,66 +cheonyeon-hi,66 +chen gong (fate),66 +charlotte katakuri,66 +charlotte corday (swimsuit caster) (first ascension) (fate),66 +chaos code,66 +chakuma (yiyh1468),66 +cat and rabbit,66 +canvas (cocktail soft),66 +caesty,66 +butterfly background,66 +brups tv,66 +brown headband,66 +break blade,66 +bonsly,66 +bokuso,66 +bokujou monogatari: waku waku animal march,66 +bleach: zanpakutou ibun hen,66 +black wyrm (last origin),66 +black hood,66 +black bull (emblem),66 +binzoko megane (san-inch),66 +bianco (mapolo),66 +ben 10: alien force,66 +bellows camera,66 +beige gloves,66 +beehive,66 +beegle,66 +beast ball,66 +ball (gundam),66 +baba kanade,66 +azurda (xenoblade),66 +azuman,66 +ayuzawa misaki,66 +ayuhara hiiro,66 +ayamiya fumi,66 +avocado,66 +asuna (i luv),66 +asumi kana,66 +astesia (starseeker) (arknights),66 +ashinano hitoshi,66 +asatte no houkou,66 +artisticjinsky,66 +arina nary,66 +arihara ema,66 +archer (fft),66 +arabic text,66 +aoneco,66 +anrakutei kukuru,66 +announcement,66 +angkor (elsword),66 +angelic layer,66 +angel (drag-on dragoon),66 +ancotsubu,66 +amasa-hikae,66 +amano miu,66 +amakawa akito,66 +amahara pekozaemon,66 +akizuki hakuto,66 +akino ochiba,66 +akina t,66 +akatsuki usagi,66 +akatsuki (kancolle) (cosplay),66 +akashi (yojouhan),66 +airspace,66 +aimf,66 +ai wa muteki,66 +agielba,66 +adjusting hair ornament,66 +abyss watcher,66 +aa44,66 +a5m,66 +7t,66 +1930s (style),66 +zxpfer,65 +zjl baiqishi,65 +zion,65 +zero (ray 0805),65 +yuxyon,65 +yuuta (yuuta0312),65 +yuumi (league of legends),65 +yuuki nona,65 +yuu (plasm),65 +yurizuka (sergeant heart),65 +yukki (rffcq251),65 +yukitsuba hina,65 +yuki madoka,65 +yu (bkks),65 +yoshimura kentaro,65 +yaya hinata,65 +yasunao (yasunao-z),65 +yasuda,65 +yani (nokori life1),65 +yamamoto keiji,65 +yamabushi tengu,65 +yamabuki kasumi,65 +wolf mask,65 +winchester model 1887,65 +whoopin,65 +white tabard,65 +white armband,65 +whiscash,65 +wakipiiiii,65 +wakabayashi ikuno,65 +wa2000 (haunted castle) (girls' frontline),65 +vyse,65 +very long ears,65 +vanilla (last origin),65 +utaha (blue archive),65 +ushi-oni (monster girl encyclopedia),65 +urokoda,65 +urianger augurelt,65 +unusablenameaaa,65 +unholy sanctuary,65 +umitsuki,65 +umeume (totoya),65 +ueno musashi,65 +uchiwa design,65 +type 10 (tank),65 +turian,65 +tuqi pix,65 +tsumumi (kandume103),65 +tsukushino mitsugu,65 +tsukishiro mona (shiromanta),65 +tsuji yuzu,65 +tsubaki (tsubakiyasan),65 +truejekart,65 +transparent bag,65 +torinosukei,65 +torii earrings,65 +tori (hiyoko bazooka),65 +topadori,65 +tooth earrings,65 +tokine (maikaze),65 +the scream,65 +the north face,65 +the caster chronicles,65 +the boss,65 +the boogie,65 +the bible,65 +tezuka osamu (style),65 +tensei shitara ken deshita,65 +telepathy,65 +teen (teen629),65 +team rainbow rocket,65 +tax2rin,65 +tatakau shisho,65 +tatakae!! iczer-1,65 +tamukoro,65 +tamasan,65 +tamamo no mae (sexy bikini) (fate),65 +takashi (calla),65 +takano chizuru,65 +takanashi misha,65 +takahiro (rikky),65 +tagosaku (tatsukiuma0329),65 +tachibana ichika,65 +tabasco,65 +suzushiro atsushi,65 +suzunari arare,65 +suzume (maid-san wa taberu dake),65 +suzuhane suzu,65 +suzu-batsu,65 +suwa amaki,65 +sumeragi taito,65 +sukuna136,65 +string choker,65 +strawberry background,65 +storm cloud,65 +stirrup footwear,65 +staryume,65 +starbucks siren,65 +standing on torii,65 +st.microscope,65 +spirtie,65 +soumen,65 +souffle pancake,65 +sonic adventure,65 +son gohan (future),65 +soldier blue,65 +sokona (sosokona),65 +snail shell,65 +smoliv,65 +smoke heart,65 +smash is for good boys and girls,65 +sly,65 +sloth (animal),65 +shuzen kokoa,65 +shouji mezou,65 +shotadom,65 +shirley (fate),65 +shirano,65 +shining nikki,65 +shimashima nezumi,65 +shimada sarasara,65 +shima riu,65 +shichinin no online gamers,65 +shefi (princess connect!),65 +shan grila,65 +seaplane tender water princess,65 +sawashi (ur-sawasi),65 +satou rina,65 +sasumata jirou,65 +sarugaki hiyori,65 +saotome suzume,65 +sanuki (zigzagflamberge),65 +sanso,65 +sangrde,65 +sangoku hime,65 +same-san,65 +sakura yuuya,65 +sakura nitouhei,65 +saimon (tales),65 +sailor fundoshi,65 +sagara sanosuke,65 +sagan (skullgirls),65 +saeki ritsuka,65 +ryuuzouji usagi,65 +ryusei2u,65 +ryein,65 +rusty soul,65 +rurutie (utawarerumono),65 +rumia (cosplay),65 +ruma imaginary,65 +rufus shinra,65 +roki (307033),65 +rocoroco,65 +rico brzenska,65 +ribbon no kishi,65 +renz (rirene rn),65 +ren hakuei,65 +removing coat,65 +rei (usabiba),65 +reflex sight,65 +red garter belt,65 +ranulf (fire emblem),65 +queen of spades,65 +q-v (levia),65 +pu-chin,65 +project diva x,65 +project a-ko,65 +princess yan,65 +poppi (reimu endou),65 +poland,65 +pokemon breeder (pokemon),65 +play of the game,65 +plaid blanket,65 +pink tube top,65 +pile of corpses,65 +persona x detective,65 +persona trinity soul,65 +peptide,65 +penki,65 +penis in pantyhose,65 +pei (sumurai),65 +patting back,65 +panty bulge,65 +palom,65 +p90 (the girl from b.e.l) (girls' frontline),65 +otone,65 +oroshipon zu,65 +oppaiserothicc,65 +open sign,65 +onitsuka natsumi,65 +oka yuuichi,65 +oga-san,65 +nullhachi,65 +nuko-d,65 +nozu (thukuhuku),65 +noodle (gorillaz),65 +nonaka haru,65 +nojima minami,65 +nobutake (nobu0),65 +noboru (kamine204136),65 +no armor,65 +nishiro nya,65 +ninsaki (9saki),65 +nike (mahoujin guruguru),65 +nestea,65 +neriash,65 +neongun,65 +nei,65 +natsugumo (kancolle),65 +nandz,65 +nanakawa (nanasoon),65 +nameta neko,65 +nakasima-syouta,65 +nakamura tatsunori,65 +nagumo tetora,65 +na53,65 +mythra (xenoblade) (cosplay),65 +myao (o3o333),65 +my-otome s.ifr,65 +muuten,65 +musashibo benkei (fate),65 +mummification (bound),65 +mugimugigo,65 +mousse (duck) (ranma 1/2),65 +moukaku,65 +mot,65 +moroes,65 +moro (like the gale!),65 +moonface,65 +money-shaped pupils,65 +momochi (orrizonte),65 +moira (overwatch),65 +model building,65 +mma gloves,65 +mizutama (mao11260510),65 +mizu (dl7613),65 +mizoro tadashi,65 +miyakura shiiha,65 +miura miki,65 +mita chisato,65 +mistress 9,65 +misonikomi,65 +miry,65 +miroku (kyon35),65 +minis,65 +minato yu (0514),65 +minato yu,65 +minato (zegapain),65 +minami kotori's mother,65 +minami haruya,65 +minagawa yuki,65 +milla maxwell (tox2),65 +mikuning,65 +mid (gameshe),65 +michael-x,65 +metako (machikado mazoku),65 +meltyrice,65 +medusa (jashin-chan dropkick),65 +medara,65 +mayuri (date a live),65 +matsuo hiromi,65 +matamataro,65 +maroppe,65 +marmyadose (fate),65 +mark iv tank,65 +marishiten,65 +mario kart tour,65 +marimo,65 +mandrake,65 +mana (super real mahjong),65 +mama (nier),65 +mako gai,65 +makishima rin,65 +makingtawawa,65 +maid ane (maoyuu),65 +magna carta crimson stigmata,65 +maga-reimu,65 +machi kyouko,65 +lydie marlen,65 +lisa buijteweg,65 +lio (tsukuyomi-tei),65 +lingmuzi,65 +liking,65 +librarian,65 +level up,65 +leonardo da vinci (active sailor) (fate),65 +lee (arknights),65 +ledo vassar,65 +league of angels,65 +latex bra,65 +lary,65 +langrisser ii,65 +langlang,65 +laguna (granblue fantasy),65 +laceysx,65 +kyousuke nanbu,65 +kyokutei bakin (fate),65 +kyo (maae00),65 +kurosaki honoka,65 +kuro futoshi,65 +kuro ari (pixiv),65 +kuouzumiaiginsusutakeizumonokamimeichoujin mika,65 +kumatoshi,65 +kumashou (nabeyama kaidou),65 +kronie (ouro kronii),65 +kriss sison,65 +kris (fire emblem),65 +kouno harumi,65 +kouchou,65 +korpokkur kne,65 +korotsuke,65 +kooan,65 +konmamion,65 +komeko meko (mecolate),65 +kokorominton,65 +kohiruimaki karen,65 +kohinata yukari,65 +knight blazer,65 +klee (genshin impact) (cosplay),65 +kingdom hearts 3d dream drop distance,65 +killjoy (valorant),65 +kilabo,65 +kikkou sadamune,65 +kid (kidocchi),65 +kenji (8zidayo),65 +kaze tachinu,65 +kawawa sakurako,65 +kawaoka sachio,65 +katou akatsuki,65 +katabami (flower knight girl),65 +karasuma amiru,65 +kanonari,65 +kanoe youshi,65 +kano (nakanotakahiro1029),65 +kannonzaka doppo,65 +kannagi tsukasa,65 +kamek,65 +kalifa (one piece),65 +kago (htpxr),65 +kagemusya,65 +kaekae,65 +k.blank,65 +k-y,65 +junior (gogalking),65 +juliet capulet (cosplay),65 +jr4rt,65 +jornyhail,65 +jndfh,65 +jintetsu,65 +jenkins (azur lane),65 +jcdr,65 +izurumi,65 +itsumi mita,65 +itome (funori1),65 +isshitaira,65 +iriya kana,65 +iria animi,65 +intersection,65 +inatomi hibiki,65 +inahori,65 +imari kurumi,65 +imachi (staccato squirrel),65 +ika (4801055),65 +ichinosenen,65 +hula,65 +hoshizuki kaede,65 +hoshikawa (hoshikawa gusuku),65 +hosh,65 +hootsie (nanashi mumei),65 +honya lala,65 +holding oar,65 +hisui (user zvkr2432),65 +hiromon,65 +hinohoshi ataru,65 +hinemaru,65 +himegami kodama,65 +himaya,65 +hiiragi anri,65 +higashikata tomoko,65 +hichi,65 +hibiscus the purifier (arknights),65 +heroine (dra+koi),65 +hero-san to moto onna kanbu-san,65 +heiseikorotaisei,65 +heikokuru1224,65 +hear (kpmf4732),65 +headbanging,65 +haya bs,65 +hatsukoi 1/1,65 +hashiko (neleven),65 +haru ichigo,65 +handa shin'ichi,65 +hand in another's pocket,65 +hanamoto hagumi,65 +hanamiya nagisa,65 +hana (h6n6 matsu),65 +hakase satomi,65 +hajimete no otsukai,65 +haishiba ame,65 +haine rammsteiner,65 +hagaa,65 +gxp,65 +gutalalaman,65 +gulliver,65 +grey feathers,65 +grelxb,65 +graphics card,65 +grandyoukan,65 +grandma (cookie clicker),65 +gradient necktie,65 +gouda takeru,65 +goton goton,65 +gorilla (kemono friends),65 +goodbye,65 +godekasu,65 +girls symphony,65 +gias-ex-machella,65 +giantcavemushroom,65 +georgios (fate),65 +geometrie,65 +genesect,65 +gemini saga,65 +garrod ran,65 +gardevoir (fashionable),65 +ganmo (takane lui),65 +gan balance,65 +gaiking: legend of daikuu maryuu,65 +gabrieltenma77,65 +fuyukayui,65 +futanari-sama (mdf an),65 +furururu,65 +furufuru fugu,65 +fujimura (marina),65 +fujimaru ritsuka (female) (the three great heroes),65 +footsies,65 +fokwolf,65 +flowers-imh,65 +flit asuno,65 +fingers to cheek,65 +fary,65 +farming,65 +eyepatch pull,65 +extreme dangling,65 +eve (gundam build divers re:rise),65 +ero daisuki,65 +erica ainsworth,65 +enfuku,65 +elfilin,65 +eiji,65 +eeshin (eishin5584),65 +eerisyn,65 +edmond dantes (monte cristo selection) (fate),65 +draculina (last origin),65 +dra+koi,65 +dota: dragon's blood,65 +dolustoy,65 +dolfini,65 +dog shadow puppet,65 +dodota,65 +distracted boyfriend (meme),65 +devotion,65 +detached ears,65 +dekitate evo! revo! generation! (idolmaster),65 +decus,65 +dawn of the golden witch,65 +david hrusa,65 +date a live: date a bullet,65 +dark falz apprentice,65 +dangan kurabu,65 +dakkusu,65 +dadami,65 +da qiao,65 +cynthia the mission,65 +custom udon,65 +cuntboy,65 +crystal carillon,65 +cream (cream),65 +cotton kanzaki,65 +compilation,65 +competition,65 +compensated molestation,65 +colt 1851 navy,65 +cocoa (cocoa0191),65 +citolo,65 +chinanago7010,65 +chikariya,65 +chibitan,65 +chewtle,65 +charle (fairy tail),65 +champi,65 +chameleon girl,65 +chagu,65 +cathy graham,65 +cat earrings,65 +cat (kemono friends),65 +camouflage panties,65 +cagliostro (halloween) (granblue fantasy),65 +buried stars,65 +brynhildr (swimsuit berserker) (first ascension) (fate),65 +boxing headgear,65 +bolin,65 +body piercings,65 +blood on gloves,65 +bitey (arknights),65 +bishamon,65 +bingansuan jiamouren,65 +bikini jeans,65 +beryl benito,65 +berezovich kryuger (girls' frontline),65 +benelli m1014,65 +beargguy iii,65 +bean bag,65 +barikios,65 +banajune,65 +baiwei er hao ji,65 +bad fanbox id,65 +bachou,65 +ayaoshiro,65 +ayane (princess connect!),65 +awono nanaumi,65 +atago (azur lane) (cosplay),65 +asu no yoichi,65 +astraea f,65 +ass mousepad,65 +asana tsukune,65 +asahi (ge nyu),65 +asa inu,65 +as val (girls' frontline),65 +arusuko,65 +artstation logo,65 +arnas (yoru no nai kuni),65 +armadillo,65 +arim0k0,65 +ariaria (netsuki),65 +ariana (pokemon),65 +aqua capelet,65 +aqua cape,65 +aqua camisole,65 +aosa (michikusakan),65 +aonoe,65 +ant girl,65 +annie (splatoon),65 +anima yell!,65 +angela salas larrazabal,65 +american oppai-san,65 +amatani mutsu,65 +amakasu barley tenji,65 +aloy (horizon),65 +alice or alice,65 +alice (bishoujo mangekyou),65 +alexia lynn elesius,65 +alc (ex2 lv),65 +akutoku no judgement (vocaloid),65 +akutagawa manbou,65 +ako suke,65 +akiyama mio (cosplay),65 +akiwashi,65 +akazawa kureha,65 +akamaru (naruto),65 +ajia (otya3039),65 +aji kosugi,65 +air man,65 +aida taketo,65 +adon (street fighter),65 +adda,65 +abineko,65 +abercrombie (azur lane),65 +a-o a,65 +5t (5t 000),65 +3d custom girl,65 +zui ai shuang mawei,64 +zoo min,64 +zetsuriinu (kairyougata),64 +zamasu,64 +zakuro (otome youkai zakuro),64 +yuzuki n dash,64 +yuuki haruna,64 +yungoos,64 +yuna (mega man),64 +yukimimi,64 +yuki mizore,64 +yodori,64 +year of the dragon,64 +yayoi and nagi,64 +yashio rui,64 +yaruky,64 +yamamoto nanako,64 +yahisa tsukiko,64 +yagi (shiro yagi),64 +xing muhen,64 +wuguno ziran juan,64 +whitewill,64 +white glint,64 +white cane,64 +whisperain (tremble cold) (arknights),64 +wakabayashi isamu,64 +vivit,64 +viral1112,64 +veiny thighs,64 +utsugi mikoto,64 +usako (usako1031),64 +urushihara hanzou,64 +uouokuma,64 +unyuu,64 +under bed,64 +ugogogesik,64 +uchako,64 +tumbler,64 +tthal,64 +tsukinowa kumatarou,64 +tsukinon,64 +tsukimiya ringo,64 +troll,64 +travis touchdown,64 +touyarokii,64 +torpedo (gad3757),64 +toris,64 +tooyama saku,64 +tomato cyuki,64 +tokinon,64 +toki/,64 +tojou michiru,64 +tierno (pokemon),64 +tidehunter (dota),64 +tianye toshi,64 +three-legged race,64 +theodor edelbach,64 +thanos,64 +tengo (maotengo),64 +tatsuta (kancolle) (cosplay),64 +tasuku (tusktouhou4),64 +tapestry,64 +tamaki mitsune,64 +takuto meyers,64 +takeluuu,64 +takagi mitsukuni,64 +taisui xingjun (fate),64 +tail lights,64 +taihou (kancolle) (cosplay),64 +szzz k,64 +susukitten,64 +super novice (ragnarok online),64 +sunflower fairy (touhou),64 +summary,64 +suma (kimetsu no yaiba),64 +suegorou (mousou tokkyuu),64 +subaru331,64 +starscourge radahn,64 +stadiometer,64 +srwsrx (gp03dsrx),64 +spotted fur,64 +sparrow (artist),64 +spada belforma,64 +souko souji,64 +sou tamae,64 +sory,64 +sora (suguri),64 +sonia branche,64 +someoka ryuugo,64 +somemiya suzume,64 +sofusan1526,64 +so dasui1,64 +smlltb,64 +slaking,64 +sita (fate),64 +silk (marvel),64 +sidney (pokemon),64 +shizupu,64 +shirushiru (saitou888),64 +shiomachi,64 +shinomiya ayase,64 +shinki kakusei melty maiden,64 +shibuya rin (cosplay),64 +sharrkan,64 +shao (newton),64 +senryoko,64 +senno aki,64 +sekaihebi,64 +sawatari miko,64 +sawano akira,64 +satsuya,64 +santystuff,64 +sanma (tabet ),64 +sangokushi puzzle taisen,64 +sandstorm,64 +salovesy,64 +salandit,64 +sakuyamon,64 +sakutaishi,64 +sakura kakumei,64 +saco (cgmore),64 +saber (summer battle clothes) (fate),64 +s vileblood,64 +ryan tien,64 +rumiak,64 +rumi (rarumi11),64 +royl,64 +rotting,64 +rosie (animal crossing),64 +roku (warrock),64 +roas01b,64 +rio (g (genesis1556)),64 +rinrin (sister princess),64 +rima (rimarip),64 +rhea (0u0),64 +rest in peace (phrase),64 +reebok,64 +red lining,64 +rebake,64 +raving rabbids,64 +rani viii,64 +rakia (ds00309),64 +rachel shiori guardian,64 +r-wade,64 +puppetjackmj,64 +printemps (love live!),64 +priest (dq3) (cosplay),64 +power stone,64 +polar bear (kemono friends),64 +planted spear,64 +pipe bomb,64 +picocopi,64 +piano-alice,64 +pekanpeka,64 +paris (fate),64 +panzerkampfwagen iii,64 +pantyshot through reflection,64 +panties over clothes,64 +panprika,64 +panikuru yuuto,64 +owari hajime,64 +overlord (overlord80000),64 +orange cloak,64 +ono mochiko,64 +omaru (0marufestival),64 +oliver evans,64 +okutomi fumi,64 +okita souji (hakuouki),64 +ogiwara sayu,64 +off-shoulder leotard,64 +ocha (oteaaa),64 +nyto adeline (girls' frontline),64 +nutcracker,64 +nullpooo,64 +nuko miruku,64 +noruren,64 +nonstandard furigana,64 +noname (metaldragonfly),64 +noda (yncoon),64 +no toes,64 +nimble fabric,64 +nilan 1999,64 +nijimura kei,64 +niji (rudduf232),64 +nekoyama nae,64 +nekomaru,64 +neck warmer,64 +neal d. anderson,64 +natsume k,64 +natsume hinako,64 +naruta iyo,64 +naruse,64 +naru (ul),64 +naotan,64 +nanaly fletch,64 +namiri,64 +nami (teranen),64 +namerakaweb,64 +nagashiro rouge,64 +nadia kim,64 +myahogao,64 +munakata isaomi,64 +mugipot,64 +mugika,64 +muffled,64 +mt (ringofive),64 +mouthful mode,64 +morito (sidonia no kishi),64 +morita yuu,64 +morita gurutamin,64 +moody blues (stand),64 +momotaros,64 +momomaron,64 +momo kyun sword,64 +mizuno (pixiv31352320),64 +miyukiko,64 +miyako draw,64 +miyagi ratona,64 +mixed harem,64 +mitsurugi ryouko,64 +miss siamour,64 +misaki yuria,64 +miranko,64 +minato shachiko,64 +minashiro orihime,64 +minase lin,64 +minase koito,64 +mimizuku (mmiganaru),64 +millicent (elden ring),64 +mikoshiba momotarou,64 +mikami hotaka,64 +midori miyako,64 +michael trinity,64 +mew zakuro,64 +mew lettuce,64 +mejiro dober (vacation safir) (umamusume),64 +mejikara scene,64 +megastore comics,64 +megarisu,64 +meekohopanes,64 +mayuge inu,64 +matsuri6373,64 +matsumoto maya,64 +master utsushi,64 +mashin sentai kiramager,64 +maruta kentarou,64 +maruhachi (maruhachi record),64 +marie (dies irae),64 +maou prier,64 +manjyufroth,64 +manaka (gunjooou),64 +mamono hunter youko,64 +malamar,64 +makamati,64 +mahou tsukai tai!,64 +magby,64 +madoromi no yakusoku,64 +luca angeloni,64 +luan teng,64 +lottery,64 +long riders!,64 +lofter username,64 +live twin ki-sikil,64 +list,64 +linmiu (smilemiku),64 +lincoro,64 +lin (hokuto no ken),64 +licking tail,64 +li sushang,64 +leonardo da vinci (rider) (second ascension) (fate),64 +legs behind head,64 +lanse dai mao,64 +landolt c,64 +lamprey,64 +lairon,64 +lace-trimmed camisole,64 +kyusoukyu,64 +kyouryuu sentai zyuranger,64 +kyonyuu fantasy,64 +kusada,64 +kuronosu (yamada1230),64 +kuroneko shiro,64 +kurohitsuji lim,64 +kurenai hanpen,64 +kupikuuu,64 +kumatani,64 +kukuru (dq8),64 +kukua (mosushi),64 +koyubi right,64 +kousi sirazawa,64 +konome noi,64 +konataeru,64 +kokomachi,64 +koko (kishibe),64 +kocchi muite baby (vocaloid),64 +kobayashi gen,64 +kneading,64 +kiyohime (swimsuit lancer) (second ascension) (fate),64 +kittan (cve27426),64 +kitou kaitai,64 +kitora (kisekinonameko),64 +kisaragi yakumo,64 +kinnan,64 +kimberly ann possible,64 +ki-51 (ampullaria),64 +kazunehaka,64 +kataphrakt (aldnoah.zero),64 +katahane,64 +kashiwagi yuuna,64 +karyl (real) (princess connect!),64 +karosu maker,64 +karla (kimidori3),64 +karen le cao,64 +karashi (tou gara shi),64 +kanzaki hitomi,64 +kanon (ikamiso),64 +kanojo (ogino atsuki),64 +kanchuumimai,64 +kanata (kanata onion),64 +kamippoina (vocaloid),64 +kamina (ttgl) (cosplay),64 +kamen rider evil,64 +kage (ousama ranking),64 +juuoumujin no fafnir,64 +jun the swan,64 +jontxu,64 +jinani,64 +jellicent (female),64 +jean bart (uninhibited bloodstone) (azur lane),64 +jajka (girls und panzer),64 +jaenbba,64 +izuki (toneya),64 +itsumip,64 +itou nobuhiro,64 +itomi sayaka,64 +ishida (ishida to asakura),64 +interlocked venus symbols,64 +indeedee (female),64 +indarias (genshin impact),64 +implied bestiality,64 +imouto no okage de mote sugite yabai,64 +imoichi,64 +ikkei dou,64 +ikaruga luca,64 +ieiieiiei,64 +idola phantasy star saga,64 +ichi ka,64 +ice shaver,64 +hzk,64 +hyoon (sockgyu),64 +hyness,64 +huazha01,64 +howler (owler),64 +howa type 64 (girls' frontline),64 +hourei tenten,64 +houjou reika,64 +hoshi no gen,64 +horiishi horuto,64 +homura minori,64 +holiday (pangjelly),64 +holding hands is lewd,64 +hitsujisnow,64 +hitaki yuu,64 +hirakata kana,64 +hinatsuru (kimetsu no yaiba),64 +hinata kokage,64 +himino seika,64 +hilda (hilda),64 +hieda (hiedanoaqn),64 +hibari hina,64 +heart guitar,64 +harutask,64 +harusaki nodoka,64 +harunagi,64 +harukaze koucha,64 +haruka poi,64 +haru (re ilust),64 +haru (oomr005),64 +hanidebi! honey & devil,64 +hanasaki kaoruko,64 +han yijie,64 +han'eri,64 +hamster wheel,64 +ham na-bi,64 +hachiman (douno),64 +gundam gp-03 stamen,64 +gunba (5meters),64 +gumiten,64 +greyface,64 +green wristband,64 +greece,64 +great lungmen logo,64 +gradient neckerchief,64 +gore (white gore),64 +gokurin,64 +giorgio claes,64 +gingerbread house,64 +gilgamesh (sensha otoko) (fate),64 +ghost (psg),64 +gemuo,64 +gat (korean traditional hat),64 +garrus vakarian,64 +garnet (steven universe),64 +gamiani zero,64 +gamhwa,64 +galarian zapdos,64 +gakkou de atta kowai hanashi,64 +gaiidraws,64 +g9 (jiiku),64 +fuuna thise,64 +funikura,64 +full-body blush,64 +fuku (r-i-h-o),64 +fujimaru arikui,64 +fujikusa,64 +fujigakubou,64 +fuji-k,64 +fruit dildo,64 +fran (reborn),64 +form code,64 +fn f2000,64 +firework background,64 +fenrirr,64 +eyepiece,64 +extra tongue,64 +exit tunes,64 +exga,64 +evinist,64 +enka (bcat),64 +engacyo (engacyo39800yen),64 +emu alice,64 +empress (dmfd),64 +emperor penguin (kemono friends) (cosplay),64 +elf (lineage 2),64 +eichikei (hakuto),64 +eeryuu (2004107),64 +edy nelson,64 +echigoya takeru,64 +ebi-rom,64 +ebi-chan (gawr gura),64 +easy breezy,64 +e.t.,64 +dukemon,64 +duffy,64 +dragon tactics,64 +dr. mundo,64 +dot r,64 +donkey tail,64 +dodo (kemono friends),64 +dnf duel,64 +disembodied tongue,64 +dirty pair flash,64 +diana (sailor moon),64 +deneb rove,64 +delmin (show by rock!!),64 +deavor lover,64 +daze (kagerou project),64 +date a live: spirit pledge,64 +daro,64 +damodar,64 +daible,64 +d-boy,64 +cutthroat (akudama drive),64 +cure earth,64 +cum on headwear,64 +csy,64 +crotchless bikini,64 +crop top lift,64 +crea rosenqueen,64 +coraman,64 +combattler v (robot),64 +cojibou,64 +cleaning weapon,64 +claymore (mine),64 +chuchu (kirby),64 +chuatury panlunch,64 +chrysa ( sa0ru),64 +christiane barkhorn,64 +chitose (azur lane),64 +chirun0,64 +chill ykon,64 +char b1,64 +castlevania (netflix),64 +carbink,64 +can (honkai impact),64 +cake hat,64 +buruma removed,64 +buchi (y0u0ri ),64 +bowsette (cosplay),64 +bon (bonbon315),64 +bokuya,64 +bokkusu,64 +bnahabra (armor),64 +bloodcandy,64 +black dog,64 +bitter glasse (umamusume),64 +billy katagiri,64 +big bob (arknights),64 +bent spoon,64 +belgian flag,64 +bb-28,64 +battle spirits: shounen gekiha dan,64 +banonefans,64 +balut (7676i),64 +b3b,64 +ayer,64 +ayanami (lunar demon) (azur lane),64 +ayama nano,64 +ayacho,64 +avery (skullgirls),64 +avatar (lineage),64 +asura fantasy online,64 +asu hare,64 +assistant waddle dee,64 +ashita wa hitsuji,64 +ashe (mega man),64 +ash (pixiv53802),64 +asahina yurina,64 +ariura kanna,64 +ariel (novel),64 +areaaaron,64 +ao (time-leap),64 +anivia,64 +anise azeat,64 +android 16,64 +ame-rain,64 +amaterasu (p&d),64 +alpha (acerailgun),64 +akr et,64 +akiyama kenta,64 +akitaka takaaki,64 +akashi (sorobochi),64 +akanebi,64 +akagiakemi,64 +akagi (ruby-laced beauty) (azur lane),64 +akabeko,64 +aizawa ema,64 +aibek,64 +ahn,64 +against pillar,64 +abe (kumayu),64 +96tuki,64 +1 mutsuki,64 +10mk,64 +zygarde core,63 +zunusama,63 +zukaketawagase,63 +zoushi kanai,63 +zossie (pokemon),63 +zink (zink mchn),63 +zetsuen no tempest,63 +zero (inazuma eleven),63 +zefyu,63 +zaraku,63 +yway1101,63 +yuusha keisatsu j-decker,63 +yuuri lessen,63 +yuukami (wittsu),63 +yurishia farandole,63 +yune (ayanepuna),63 +yumari nakura,63 +yukiyama momo,63 +yua (tick-tack),63 +yozu (yozu0420),63 +youko (tactics),63 +yoshinao (yoshinao 0203),63 +yoshimura thi mai,63 +yoshiharu,63 +yonekura kihiro,63 +yoma,63 +yoka1chi,63 +yellow serafuku,63 +yazuka,63 +yatagawa nazuki,63 +yasuto (eria151),63 +yami marik,63 +yamaiso,63 +yako noir (kei-ne),63 +yakepu,63 +yabe satoshi,63 +wrwr,63 +work gloves,63 +wei (kaminari0411),63 +wataro (watawatawatapon),63 +watanabe ayasa (jackdenpa),63 +wasa (wanosabi),63 +wanotsuku,63 +wannai kinuho,63 +vortex,63 +virtuous treaty,63 +vigwer,63 +vanilla (nicolla),63 +uzura (moimoi),63 +uni (gugurutan),63 +uncle sam,63 +unbalance unbalance,63 +umizakura tachimi,63 +umino haruka (harukaumino6),63 +umebayashi saki,63 +ultra (511455329),63 +ulrik,63 +uka,63 +ujikintoki,63 +uchiko,63 +tyrogue,63 +tsukishiro hitomi,63 +tsukimaru,63 +tsukii,63 +tsukasa 0913,63 +tsukamoto shuuichi,63 +tsugikuni yoriichi,63 +tsubakuro yume,63 +touhou m-1 grand prix,63 +torokeru none,63 +tooyama kazuha,63 +toon (noin),63 +tonbidou,63 +tomboo,63 +tokuto-kun,63 +tiziano,63 +tiry,63 +tiger pelt,63 +tibonobannsann,63 +thurim6,63 +the memories of phantasm,63 +the chicken that appears in the middle of cookie,63 +tezunuri,63 +tezcatlipoca (housamo),63 +terumin (yuganda sebone),63 +terran,63 +tenma umatarou,63 +temutemutemu,63 +tatsuri (forest penguin),63 +tatsuma daisuke,63 +tate (pokemon),63 +tartu (azur lane),63 +tarte (hodarake),63 +tamase miki,63 +takai isshiki,63 +taguchi makoto,63 +tachibana ginchiyo (sengoku musou),63 +tabao,63 +sylph kim,63 +swing!!,63 +suzuki nago,63 +suzuhara sakura,63 +surtr (housamo),63 +summon night swordcraft story,63 +sukuna hikona,63 +suiyou dou de shou,63 +suika (suika-dokei),63 +suekane kumiko,63 +stella chen yui,63 +stari,63 +standing on shoulder,63 +squalo,63 +sprout on head,63 +springfield (o holy night) (girls' frontline),63 +spread nipple,63 +spiked bat,63 +spiceg,63 +sonomura maki,63 +solkorra,63 +sokka (kbs),63 +sohee,63 +soaking hands,63 +snale,63 +smudge,63 +small kyubey,63 +skypixter,63 +skuntank,63 +skull cup,63 +skirt aside,63 +sizuka (takuma0),63 +siroyuki,63 +sinko (sinsin),63 +sii artatm,63 +sight,63 +shocho (shaojiujiu),63 +shirahane yukina,63 +shimada mayu,63 +shima saki,63 +shikoro,63 +seu kaname,63 +seto miyako,63 +sesame street,63 +series connection,63 +seri (yuukasakura),63 +seong mi-na (bural chingu),63 +senbei (avocadochaya),63 +sei shoujo ~seido ikusei gakuen~,63 +sbd dauntless,63 +sazanami (ripple1996),63 +say'ri (fire emblem),63 +sawwei005,63 +sawa (sawasaku),63 +satou akira,63 +satoru wada,63 +satoma makoto,63 +sato (hekiga ni nemuru),63 +sapphire (nine),63 +sanagi (diohazard),63 +sanada keisui,63 +san francisco (azur lane),63 +samail,63 +sakurai tamako,63 +sakuragi hanamichi,63 +sakura inu,63 +sakazuki (akainu),63 +saint estera gakuin no shichinin no majo,63 +sailor star maker,63 +sagta panggang,63 +sagara arisa,63 +saeki kaori,63 +ryuga (balius),63 +rufflet,63 +rubii,63 +romeo montague,63 +rokunen,63 +riz (ravel dc),63 +ririko (fhnngririko),63 +rinne (sennen sensou aigis),63 +riichi (rouchi),63 +rhinoceros girl,63 +rhine (overtonerhine),63 +reverie metherlence,63 +rerebrace,63 +ren (zero second),63 +rei kazami,63 +rave,63 +rapua qive,63 +rakkyo,63 +rain (sao),63 +rai (rai-s),63 +raffina (puyopuyo),63 +queen of hearts (alice in wonderland) (cosplay),63 +qi2341,63 +puttee,63 +puma (brand),63 +protagonist (yomawari),63 +potemayo (character),63 +polka dot footwear,63 +pokki (sue eus),63 +pokemon the movie: i choose you!,63 +playing with another's ears,63 +planking,63 +pkp pecheneg,63 +pkm,63 +pitcher plant,63 +pipkin pippa,63 +pinki o64,63 +pinkboy,63 +philyshy (alchemy stars),63 +peony (fire emblem),63 +peko-chan,63 +pegitan (precure),63 +peco peco,63 +peachyp,63 +pawmi,63 +paula (suikoden),63 +patty (fire emblem),63 +parodius,63 +panty (psg) (cosplay),63 +palm-fist tap,63 +paleatus,63 +pakotaroh,63 +ouzisamafe,63 +osshouri55,63 +orimoto izumi,63 +oprince,63 +ono (0 no),63 +omegu,63 +okada manabi,63 +obata takeshi,63 +o/p.com,63 +nyoron (fudegatana),63 +norma beatty,63 +norikoi,63 +noriheita,63 +noise paper,63 +niruanu (nitayam),63 +nikaidou reika,63 +nikaidou benimaru,63 +night strait princess (black),63 +nicholas f,63 +neri sachiko,63 +neptune (planet),63 +neo geo battle coliseum,63 +nenehotoso,63 +nekokan masshigura,63 +nekohane ryou,63 +necro-san,63 +ne f g o,63 +natsumi-chan (kanabun),63 +murase48,63 +murasaki daidai etsuo,63 +muchin jousha,63 +mrnn,63 +mouth guard,63 +moth (diddms1999),63 +moshoko (mizuneroku),63 +monster hunter xx,63 +mofun,63 +mofmof (sousa),63 +mochi hanpen,63 +mizuse ruka,63 +mizuki shiori,63 +miyu greer,63 +miyamoto musashi (traveling outfit) (fate),63 +miyaguchi kei,63 +mistynight,63 +missnips,63 +miss monochrome (character),63 +miss monochrome,63 +migikata no chou (vocaloid),63 +michi kuso,63 +michelangelo (tmnt),63 +metallica (stand),63 +meronshiroppu,63 +meri-san,63 +mentholatum,63 +megawatt,63 +mega mewtwo y,63 +meer campbell (cosplay),63 +mecha (alswp),63 +maze,63 +mazakura senju,63 +mao ge,63 +manjuu teishoku,63 +makura wet,63 +maki aida factor,63 +majormilk,63 +mahou shoujo tart magica,63 +magical mirai miku (2020 winter),63 +maggot,63 +madanai (morisumeshi),63 +mad369,63 +macuahuitl,63 +m1 helmet,63 +lunderhaus cord,63 +luna (mujin wakusei survive),63 +lumine (genshin impact) (cosplay),63 +lock earrings,63 +liver city,63 +lipstick writing,63 +linith,63 +lilyglazed,63 +lily evans,63 +lian yao,63 +leone,63 +lancheu,63 +kyuusui gakari,63 +kyuu (chiu850513),63 +kuzukiri (riezenagel),63 +kuze kiriha,63 +kuu nekoneko,63 +kusumoto shizuru,63 +kurogoma (meganegurasan),63 +kuon itsuki,63 +kuon (shinrabanshou),63 +kumoko (kumo desu ga nani ka?),63 +koyama rikako,63 +kotamaru (pajama soft),63 +kon'ya wa neko-chan,63 +komiyam a,63 +koala (one piece),63 +kivo,63 +kite (.hack//),63 +kitahara aki,63 +kisekae,63 +kirishima shouko,63 +kirima (user danf8787),63 +kirarazaka marina,63 +king kittan,63 +kilalesi,63 +kidd coega,63 +ki no nekko,63 +kenzen robo daimidaler,63 +kazenemuri,63 +kazato fuuchi,63 +kayn (league of legends),63 +karei (zeroseed),63 +karako (osomatsu-san),63 +kanzeon,63 +kankitsu kei,63 +kanata (mizubenisumutori),63 +kamota (momokomati),63 +kamishirasawa keine (cosplay),63 +kamiizumi yasuna,63 +kamesys,63 +kamen rider stronger (series),63 +kamabo ko,63 +kaka kittens,63 +kajiji,63 +kaer sasi dianxia,63 +k suke (weibo),63 +junkei,63 +juneau (azur lane),63 +jun (rojiura jack),63 +joujima yuuki,63 +joshi kousei no mudazukai,63 +jonathan h,63 +jidao huashi,63 +jhin,63 +jfjf,63 +jerry3912,63 +james bond (series),63 +izuminanase,63 +iwasaki kouji,63 +iwao178,63 +itohime,63 +ithaca m37,63 +ishimori sakana,63 +isbn,63 +isago (ica),63 +ipod ad,63 +ioriwu8,63 +inoue toro,63 +inoue miyako,63 +innocent grey,63 +ines (arknights),63 +imu (lom),63 +improvised weapon,63 +imae megumi,63 +imacchi,63 +iiiroha,63 +ichigo junior high uniform,63 +iaidou,63 +hyshirey,63 +hoshino lala,63 +hoshibudou,63 +honne dell,63 +hondoumachi koharu,63 +hizakozouzu,63 +hisakabe oto,63 +hinoya,63 +hinotama (hinotama422),63 +hina (bird salty),63 +himaruya hidekazu,63 +hiiragi mayuki,63 +higashitani fumito,63 +hibiki (kancolle) (cosplay),63 +hexelica,63 +hetano yokoduki,63 +herowarz,63 +herdier,63 +henemimi,63 +helluva boss,63 +heimu (heim kstr),63 +hcz n,63 +hazuki mina (darker than black),63 +hazuki gyokuto,63 +hayasaka miura,63 +hayaken sarena,63 +hatomaru (hatomaru56),63 +haryudanto,63 +haroukitei kigurumi,63 +hanyu,63 +hangetsuban sonshou,63 +hanen (borry),63 +hako roku,63 +hakan,63 +haiperion buzan,63 +haibara sakuya,63 +hage ta,63 +hachimitsu honey,63 +habuki,63 +gyaru v,63 +gundam age-1 titus,63 +gummyrise,63 +gray fox,63 +grasshopper inoue,63 +giga-tera,63 +giant mushroom,63 +gas pump nozzle,63 +ganymede (overwatch),63 +futanari-chan (akiamare),63 +futaba neiko,63 +furumeta,63 +fumirumochigashin,63 +fujishiro kokoa,63 +fox wife (doitsuken),63 +forte (symbol),63 +forniphilia,63 +fiorayne (monster hunter),63 +finger writing,63 +fai d. flowright,63 +extension cord,63 +extended magazine,63 +evernight goddess,63 +evening rabbit,63 +ester ein astrada,63 +epiphyllum,63 +emperors saga,63 +emperor (ff2),63 +elizabeth bathory (cinderella rider) (fate),63 +ekusera,63 +ebbilin,63 +dvach-tan,63 +du meishin,63 +dsr-50 (weapon),63 +drops mint,63 +dragon quest swords,63 +donbee (food),63 +domodesu,63 +diving penguin,63 +diva (blood+),63 +dh-zone,63 +decoponmagi,63 +daylight919,63 +dark hunter 4,63 +cum on legwear,63 +cryptract,63 +crushed can,63 +crossed wrists,63 +cross-laced pants,63 +coupe,63 +collarbone piercing,63 +clownpiece (cosplay),63 +cloaca,63 +claymore (sword),63 +cici,63 +chroah vatel,63 +choujigen taisen neptune vs sega hard girls,63 +choomoranma,63 +chiyoganemaru,63 +chinpan,63 +chinbotsu,63 +chikahii,63 +checkered pillow,63 +chapter number,63 +chany,63 +chainsaw devil,63 +chaashuu,63 +cero (cerocero),63 +catherine cover parody,63 +can (canzume),63 +camera waddle dee,63 +calcifer,63 +byulrorqual,63 +bump (volleyball),63 +broken door,63 +british army,63 +bradamante (second ascension) (fate),63 +boukou-chan (tokiwata soul),63 +bohegao,63 +bobbles,63 +blonde onee-san (sky-freedom),63 +blaze (blaze pso2),63 +blacknight (arknights),63 +bill (pokemon),63 +bernard-jou iwaku.,63 +beast spear,63 +basculin,63 +bardiche (riot zanber stinger),63 +bandaid on foot,63 +badhand,63 +bad end sunny,63 +baal (shiomachi),63 +azuki (lizzy),63 +azazel1944,63 +ayumu (ayumu3659),63 +ayatak0517,63 +ayanami (low-key idol @confused) (azur lane),63 +ats (ats2nd),63 +atlach-nacha,63 +aoki kanji,63 +aogami high school uniform,63 +angelic buster,63 +anastasia (swimsuit archer) (first ascension) (fate),63 +amiami,63 +alva,63 +alph (sancheck),63 +alomomola,63 +akiiro renka,63 +akegata tobari,63 +akari (blue archive),63 +akairo no mako,63 +akaino (akaomi),63 +ak-alfa (girls' frontline),63 +aina saharin,63 +aida mai,63 +agria (tales),63 +aether sage (elsword),63 +adnachiel (arknights),63 +adagumo no yaorochi,63 +actress,63 +abe yasushi (umizoi tibet),63 +96nokimihito,63 +7ban,63 +zrero,62 +zhenlin,62 +zelo-lee,62 +z35 (azur lane),62 +yuu (derodero),62 +yuru yuri's starting,62 +yuno (mioalice),62 +yummy yoi,62 +yukino super,62 +yudough,62 +yu (xcapriccioso),62 +ysmmzr,62 +youshun (naturaljuice),62 +youkai fox (wild and horned hermit),62 +yoshimune (b12f),62 +yonezawa mao,62 +yomawari,62 +yokon2199,62 +yitiao er-hua,62 +yellow shawl,62 +yazawa kokoa,62 +yaya (yayaa 00),62 +yanagi ryuu,62 +yamamoto shikaku,62 +yakisoba ohmori,62 +yagi2013,62 +yagai gakushuu,62 +xuanlin jingshuang,62 +xing,62 +wooni,62 +wonder momo,62 +wonbin lee,62 +wonawo,62 +wilted flower,62 +whitefrost dragonewt filene,62 +white petals,62 +waving flag,62 +wanko,62 +wanao,62 +wake,62 +wada chiyon,62 +volibear,62 +virtua fighter 5,62 +veko,62 +veiny tentacles,62 +vados (dragon ball),62 +uzu hi,62 +usamin,62 +unplugged line,62 +unown k,62 +unknown mother goose (vocaloid),62 +universal federation army uniform,62 +undersized breast cup,62 +unbuttoned sleeves,62 +umiu (hoge),62 +umino chika,62 +tuna21,62 +tsuzuchii,62 +tsunekawa niwasuke,62 +tsunami (tenchi muyou!),62 +tsumujikaze koyori,62 +tsukune (takane lui),62 +transparent seat,62 +tower of saviors,62 +toshiya,62 +torrent (elden ring),62 +toranashi,62 +top gun,62 +tomoyuki hino,62 +tokito yu,62 +toaru kagaku no accelerator,62 +tiz arrior,62 +tie fighter,62 +tian nya,62 +thief (dq3),62 +thea (fire emblem),62 +the euro front,62 +tetsu (excalibur920),62 +tensai ouji no akaji kokka saiseijutsu,62 +tenji,62 +tege (tege xxx),62 +teasmacker,62 +tanuki koubou,62 +tano,62 +tan tan pou,62 +tamakko,62 +takura mahiro,62 +takagi seiniku,62 +tacch,62 +sybian,62 +sword in head,62 +sweden,62 +supo01,62 +super paper mario,62 +suntory nomu,62 +sugiyama mio,62 +string in mouth,62 +sterben,62 +steel-toe boots,62 +star gladiator,62 +spot the differences,62 +soyo2106,62 +soupchan,62 +soulcalibur ii,62 +soukun s,62 +sorashu,62 +soranokakera01,62 +solar milk,62 +smog,62 +smack,62 +sinsora,62 +sinohira rin,62 +sieg (sherman 69),62 +shoutaro saito,62 +shiunnkaku,62 +shishio makoto,62 +shirayuki (warship girls r),62 +shirakawa kokona,62 +shinohara rei,62 +shinitagari shoujo to shokujinki-san,62 +shining resonance collection of visual materials,62 +shin sekaiju no meikyuu,62 +shin murasame,62 +shigurui,62 +shidatsu takayuki,62 +sharon rainsworth,62 +shared cape,62 +sevie,62 +serilly (puyopuyo),62 +seraphim throne,62 +sea angel,62 +scbstella,62 +saturday (hokawazu),62 +satsuki (gogotaru),62 +sasquatch (vampire),62 +sasahara natsuki (val bi ole),62 +sarkany csont landzsa,62 +sandile,62 +sandansu,62 +sakurada nori,62 +sakurada akane,62 +sakura yuu,62 +sakura hane,62 +sakizou,62 +saileach (appreciate fragrance) (arknights),62 +saber (pure night dress) (fate),62 +ryuusama,62 +ryu3224,62 +ruto (petatann),62 +runamonet,62 +royal arsenal,62 +roxanne (isekai meikyuu de harem wo),62 +rokushaku fundoshi,62 +rip van winkle,62 +rinrin kai,62 +rima rima ri,62 +ridget (suisei no gargantia),62 +regal bryan,62 +red fire,62 +reborn,62 +rakugaki-chan,62 +raito47,62 +rage (rojiura),62 +ragdoll (boku no hero academia),62 +quetzalcoatl (samba santa) (fate),62 +queen's blade spiral chaos,62 +qinglai haiji,62 +purple nightgown,62 +pupa jiang,62 +puffer fish vomiting water (meme),62 +psychos,62 +princess zelda (cosplay),62 +poogie,62 +pomp (qhtjd0120),62 +pola (1021),62 +pokemon: lucario and the mystery of mew,62 +pokarii zuu,62 +pochi-t,62 +pmlial,62 +plusout,62 +playback,62 +pixel maritan,62 +pinstripe jacket,62 +pink bracelet,62 +pine (yellowpine112),62 +peridot (steven universe),62 +paper boat,62 +panther girl,62 +pallapalla (sailor moon),62 +otorimonogatari,62 +oshi taberu,62 +osakabe-hime (swimsuit archer) (first ascension) (fate),62 +oriharaizaya819,62 +origamine ouka,62 +oobako,62 +onodera raika,62 +onigiri hair ornament,62 +one way sign,62 +okuzaki akira,62 +okonon (kado colda),62 +okiq,62 +ogata,62 +ocarino,62 +notchi,62 +nos,62 +nona (goodboy),62 +nomuo (shiromi),62 +nompang,62 +noel seeker,62 +nob1109,62 +niu ju (orange bull),62 +nishiro ryoujin,62 +nishina masato,62 +niku (dance-siva),62 +nightgown lift,62 +nichigeckoh,62 +nice holystone,62 +ngc20701,62 +neon ui,62 +nekoyaso,62 +nekokami,62 +neko no ongaeshi,62 +natsumi chorisuke,62 +natsukon,62 +natsuiro kiseki,62 +natsu no arashi!,62 +naruko (nalcoro),62 +naotosi,62 +naoton,62 +nana (kurisustinah),62 +namine0079,62 +nakaishow,62 +nail gun,62 +nae-nae,62 +nabana,62 +nab,62 +myurumyuru,62 +murayama (high school dxd),62 +multiple panties,62 +mukkun,62 +muichimon,62 +muhamado,62 +motu0505,62 +motoyon,62 +morugen,62 +mofuo,62 +mo (ine mao),62 +mizuno tera,62 +miyabi (037),62 +mistilteinn (closers),62 +misery (doukutsu monogatari),62 +misawa kei,62 +misa (jjin miryeon),62 +mirror twins,62 +mirei-san (suterii),62 +mira (kendeshi),62 +mio-muo1206,62 +minsk (azur lane),62 +miku with you (vocaloid),62 +mikkusushi,62 +mik yanase,62 +migita hibiki,62 +migi,62 +mightyhonk,62 +mei (abliss),62 +megu (pixiv9460065),62 +mazel (mazel star),62 +masuneko,62 +marion phauna,62 +marion (high school dxd),62 +maria (maria0304),62 +mano youko,62 +mana30row,62 +mammoth,62 +makio (kimetsu no yaiba),62 +makino yume,62 +maka (hyougenbu),62 +lyas,62 +lxkate,62 +lunatic psyker (elsword),62 +luna nyann,62 +luciferion,62 +lucia morgan,62 +lisa eostre,62 +lipstick mark on ass,62 +lion (warship girls r),62 +linaria (ookinahitomi),62 +lily rain,62 +lilac,62 +lightofheaven,62 +lichiko,62 +level.21,62 +lechuza,62 +lanmei,62 +langrisser iii,62 +lancer (weapon),62 +laila (queen's blade),62 +lace-trimmed thighhighs,62 +kyu,62 +kyoushirou to towa no sora,62 +kyonyuu-chan (iku),62 +kyomu (7641),62 +kyan-dog,62 +kuze,62 +kushami deso,62 +kurumi (princess connect!),62 +kurone,62 +kuraryu,62 +kuramoto erika,62 +ktovhinao,62 +koyaya,62 +kouko,62 +kome (vm500),62 +kokuyou,62 +kokesa kerokero,62 +kodamazon,62 +koari,62 +kitaooji hanabi,62 +kiseijou rei,62 +kisaragi yuki,62 +kirishima kaito,62 +kimidori-san,62 +kikka (kicca choco),62 +khan the swift,62 +kenja no deshi wo nanoru kenja,62 +kemurikusa (object),62 +kemono friends kingdom,62 +keikei (kitty colors),62 +kazamatsuri touma,62 +kayakooooo,62 +kawausoman,62 +kawana misaki,62 +kashiwagi yuuma,62 +karin koenig,62 +karin (a62826704),62 +kanisawa kinu,62 +kan lee,62 +kamoi hayato,62 +kamimura haruka,62 +kaito (vocaloid) (cosplay),62 +kaidan restaurant,62 +kagerou (gigayasoma),62 +kagami kazuya,62 +kadokura (whokdkr),62 +kabuto kouji,62 +jumping dogeza,62 +js 9 (girls' frontline),62 +jowell she,62 +jinguuji marimo,62 +japanese tankery league judge uniform,62 +jana schirmer,62 +j.c.14,62 +izumi luna (akitsu taira),62 +izumi iori,62 +izmir (summer) (granblue fantasy),62 +iwa (iwafish),62 +itou yukino,62 +it's ok to touch,62 +it's j.j. style!,62 +ishiwari,62 +isekai quartet,62 +isakawa megumi,62 +iosys parody,62 +ikag,62 +igarashi ran (igatz),62 +ichihara chiaki,62 +ibaraki douji (swimsuit lancer) (second ascension) (fate),62 +hyun9164,62 +hydrangea hair ornament,62 +houzouin inshun (fate),62 +house of cards,62 +hoshitetsu ringo,62 +hoshi nawoki,62 +horse costume,62 +horn speaker,62 +holding tongs,62 +holding belt,62 +hoihoi-san,62 +hogara,62 +hinano (sky-freedom),62 +hina sasaki,62 +hijiki meshi,62 +hijabolic,62 +hiiro yuya,62 +higero (wataten),62 +hidakarumen,62 +henki (orange),62 +helioptile,62 +heavy cruiser summer princess,62 +head mirror,62 +hazumi rio,62 +hazuki futahi,62 +hawthorn,62 +hatsuyuki sakura,62 +haruya (lajoon),62 +harune aira,62 +haru431,62 +hara yumiko,62 +hanzow t,62 +hands on own cheek,62 +handa seishuu,62 +hal (sakurajam),62 +hair blush,62 +gyuutarou (kimetsu no yaiba),62 +gundam barbatos lupus rex,62 +green little,62 +grater,62 +gotou nao,62 +gonta (gshoutai),62 +golden axe,62 +girl from the illusionary world,62 +geonjeonji,62 +gedou danshaku,62 +gao (naodayo),62 +galatea (claymore),62 +galarian moltres,62 +ga015,62 +g.t,62 +fuwaffy,62 +fuurai (resuty),62 +fuumin (youkai watch),62 +furaido,62 +fujiki yuusaku,62 +fors wall,62 +flower ring,62 +florges,62 +floral dress,62 +fiora ariete,62 +felicia-val,62 +feitan,62 +fate/type redline,62 +fal (falketto),62 +exposed bone,62 +et.m,62 +enu (roco roco44),62 +enami (e373),62 +emya,62 +elder cousin (igarashi kyouhei),62 +eko.art,62 +eddie brock,62 +eddelrittuo,62 +echizen ryooma,62 +dying (dying0414),62 +dragon crisis!,62 +doku gorira,62 +doctor strange,62 +disguised pyra (xenoblade),62 +denkitori,62 +deep one kyomu to mugen no fragment,62 +date (mamanonamaebot),62 +daniel deng,62 +dai0,62 +daefny,62 +cyron tanryoku,62 +cure march (princess form),62 +cuderia von feuerbach,62 +cradling,62 +cota,62 +con potage,62 +clawed boots,62 +chuushuu meigetsu miku,62 +chronoir,62 +chitose sana,62 +cherie espoir,62 +checkered blanket,62 +cassandra cain,62 +cannonball ~neko neko machine mou-race!~,62 +call e,62 +cafe no zombi-ko,62 +cafe cuties (league of legends),62 +burmy,62 +btms666,62 +bribery,62 +boogbogex,62 +bluesnail,62 +blue (ao maru),62 +black straps,62 +black panther (film),62 +battletech,62 +battlement,62 +barboach,62 +baltimore (evening breeze minuet) (azur lane),62 +bacun,62 +azuma sawayoshi,62 +atalanta alter (first ascension) (fate),62 +at knifepoint,62 +ass worship,62 +ash-greninja,62 +asclepius (nanoha),62 +asazuki kanai,62 +asashio kai ni (kancolle) (cosplay),62 +arslan,62 +ariesu watanabe,62 +areola measuring,62 +aragami,62 +ar-57 (girls' frontline),62 +applying bandages,62 +appletun,62 +aomaru (shta-chu-jisuiai),62 +aoi kanan,62 +aogami pierce,62 +anzu (01010611),62 +anomalocaris,62 +ano (gccx8784),62 +andou hiroyuki,62 +anchun (quail0503),62 +ana (mother),62 +amore1989,62 +amemori sayo,62 +amano onsa,62 +amamiya sakura,62 +amagase lyle,62 +alvin lee,62 +akina422,62 +akayan,62 +akari acura,62 +akaito,62 +akai maho,62 +akai ito,62 +akafuku pukoemon,62 +akabane rin,62 +aizawa hiroshi,62 +aira (endless5515),62 +agent 416 (girls' frontline),62 +abigail williams (animejapan 2018) (fate),62 +abe (roiz),62 +8-bit,62 +2gou,62 +2f sq,62 +2006 fifa world cup,62 +123 (tyamaguch),62 +zootan,61 +zoisite (sailor moon),61 +zhudacaimiao,61 +zhiyu moke,61 +zetsuyo chimayo,61 +zethia,61 +zero (code geass) (cosplay),61 +yuyusu (cookie),61 +yuuki makoto (ensemble stars!),61 +yuuki (yukinko-02727),61 +yunohito,61 +yumuto (spring1786),61 +yuki akira,61 +yuk233,61 +yuge (yuge bakuhatsu),61 +youamo,61 +yoshitake,61 +yoshioka yuki,61 +yoshikawa hiro,61 +york (azur lane),61 +yorha no. 9 type s (cosplay),61 +yellow male swimwear,61 +yellow brooch,61 +yasutake,61 +yappariga,61 +yamano rokamizu,61 +yamaguchi yoshimi,61 +yalmyu,61 +xingchen (cosplay),61 +x-ray film,61 +wulie errr,61 +world election,61 +wookyung,61 +wight (monster girl encyclopedia),61 +white santa costume,61 +wawamachi (wawamachis),61 +wata,61 +wangjook (wj),61 +wandenreich,61 +wakatsuki misato,61 +vss vintorez,61 +void (guilty crown),61 +violinist of hameln,61 +vinsmoke reiju,61 +villager (minecraft),61 +viletta badam,61 +vikavolt,61 +vermilion city school uniform,61 +vanessa lewis,61 +vampire (gogalking),61 +valueless0000,61 +uzume (gino),61 +usui seri,61 +ultra recon squad uniform,61 +uehara mutsuki,61 +uechin ewokaku,61 +tyrantrum,61 +tumtumisu,61 +tsumamigui 3,61 +tsuji,61 +tsuchimi rin,61 +transformers armada,61 +touwa meme,61 +too many flowers,61 +tonari no seki-kun,61 +tomo (machinemess),61 +tomatojam,61 +tokumei wombat,61 +toe gojuu,61 +title screen,61 +titanfall,61 +tira 27,61 +tiphereth a (project moon),61 +tiki (dragon's crown),61 +tikano,61 +think mark think! (meme),61 +thesale,61 +the great ace attorney 2: resolve,61 +the creation of adam,61 +tendou kasumi,61 +tarpaulin,61 +taokaka (cosplay),61 +tanuki costume,61 +tanu,61 +tania (little witch nobeta),61 +tanaka gorbachev,61 +tam (ragnarok online),61 +takeawatch,61 +tajador (ooo combo),61 +tai (2vs),61 +t2,61 +t-track,61 +suzumi (hetza),61 +suzuki tsuta,61 +surfacage,61 +summoner aldra,61 +sumi elias,61 +suketoudara (artist),61 +sugar (food),61 +storm drain,61 +sten gun,61 +star wars: revenge of the sith,61 +spoink,61 +souen hiro,61 +soruna (nell),61 +solution epsilon,61 +smile (.flow),61 +sky striker ace - roze,61 +sizuru (garasuame0206),61 +silver gloves,61 +silco (arcane),61 +siki2046,61 +sidoh (dqb2),61 +sibelurabbi,61 +si10ra,61 +shuvi (no game no life),61 +showerz,61 +shouyan,61 +shocho,61 +shishidou imoko,61 +shirou (shiro uzr),61 +shiratakiseaice,61 +shinozaki ayumi,61 +shinomiya shiori,61 +shinohayu the dawn of age,61 +shin01571,61 +shikishima (eiri),61 +shiina yuuki,61 +shaneru,61 +serious graphics,61 +seri (vyrlw),61 +sergei (pattundo),61 +serena (palentine's 2021) (pokemon),61 +sera masumi,61 +senri (senri sen),61 +senmen kinuko,61 +sengoku chidori,61 +sena shiori (idolmaster),61 +sen no rikyu (fate),61 +seikoku no dragonar,61 +sea anemone,61 +sawasa,61 +satoimoya,61 +sapphire rhodonite,61 +sanbi (reku),61 +san sheng wan,61 +san mamiya,61 +san francisco,61 +same 2009,61 +salem (rwby),61 +sakura tale,61 +sakura rock,61 +sakamoto clan (emblem),61 +saitou hajime (hakuouki),61 +saida nika,61 +sai (saipoko),61 +safeguard (blame!),61 +saegusa riko,61 +saegusa mayumi,61 +saegusa futaba,61 +sad cat dance (meme),61 +ryon (ryonhei),61 +ryoku sui,61 +rurukichi,61 +ruri (aohada bocchi),61 +rr (suisse200),61 +rozarin,61 +rowdon,61 +ross (clumzero),61 +ronguuta,61 +romi,61 +rokushiru (muzai-p),61 +rococolove,61 +rocket boots,61 +robocop,61 +ro risu,61 +ring fit trainee,61 +riko (kujira215),61 +ribbed socks,61 +reizouko,61 +reiza,61 +red fundoshi,61 +rebirth42000,61 +reaper (final fantasy),61 +re-l mayer,61 +rascal,61 +rapping,61 +ranpha (princess connect!),61 +rampardos,61 +raisuta,61 +raika (rune (pixiv 25170019)),61 +rae (offrecord),61 +purple moon,61 +punch line,61 +ptrd (girls' frontline),61 +protagonist (elona),61 +protagonist (doki doki literature club),61 +princess sapphire,61 +pretzel pose,61 +pretty guardian sailor moon,61 +porridge,61 +pori (poritan81),61 +pool party leona,61 +pokemon 3: the movie - spell of the unown: entei,61 +pokemon: the first movie - mewtwo strikes back,61 +pmx,61 +pleo,61 +plectrum in mouth,61 +piyoko,61 +pistachiocream,61 +pirano,61 +pioneer movement,61 +pink male underwear,61 +pink loli (rinechun),61 +pharaoh (cat),61 +persona q2: new cinema labyrinth,61 +pepper project,61 +penis in swimsuit,61 +peeing in bottle,61 +pecopecosupipi,61 +peafowl (kemono friends),61 +paul robertson,61 +panzer dragoon,61 +pantherlily,61 +pandora (mega man),61 +panamaman,61 +paladin (baalbuddy),61 +p (flavorppp),61 +ozaki mirai,61 +osiris the sky dragon,61 +orpheus (persona),61 +ophelia (bigrbear),61 +oomichi miyabi,61 +okii,61 +okiba ga nai!,61 +okanoyuno,61 +ohnishi yuriko,61 +nyamo,61 +nue day,61 +note (dragon ball),61 +nogchasaeg (karon2848),61 +nobel gundam,61 +niwata0,61 +nishikawa (fe),61 +nishikata chii,61 +nintendo dsi,61 +ninniku mashimashi,61 +nickit,61 +nean,61 +nazgul,61 +narongchai singhapand,61 +nabari no ou,61 +mystic knight,61 +mysterious heroine xx (fate) (cosplay),61 +myrtle (light gold celebration) (arknights),61 +muyoshito,61 +multiple wielding,61 +multicolored neckerchief,61 +mugi-co,61 +mprichin,61 +mp7 (lollipop ammo) (girls' frontline),61 +mouse marisa (yuasan),61 +morujii,61 +morokoshitaroh,61 +morizono wakana,61 +morikawa (futomayu),61 +morganagod,61 +mon momu,61 +model zx (mega man),61 +model x (mega man),61 +mobile suit gundam the origin,61 +mo xiaoxue,61 +mizuto (o96ap),61 +mizuno nanatsu,61 +mizumori (xcllcx),61 +mizuki sasahara,61 +miyamoto ruri,61 +miyabi mt-b,61 +mitsuba-sama (milkba-ng),61 +mist train girls,61 +miss crane (fate),61 +misaki shizuno,61 +misaka mikoto (cosplay),61 +mirin.,61 +minobu jentoru,61 +minatsuki kou,61 +mikumo (lpmkookm),61 +meziosaur,61 +mewo,61 +metalwasabi,61 +metae,61 +meneru,61 +melty q melromarc,61 +meltryllis (swimsuit lancer) (fate) (cosplay),61 +matsuzaka satou,61 +mask around one ear,61 +mashin eiyuuden wataru,61 +marufuji izumi,61 +marino (mega man),61 +marina (mrn9),61 +marie rose (cosplay),61 +mare s. ephemeral,61 +mantou xiang,61 +makise kurisu (cosplay),61 +mail (popful mail),61 +mai kobayashi,61 +mahou no star magical emi,61 +mag (mag42),61 +mafiaduck (nilan),61 +madkaiser,61 +maco22,61 +macaron background,61 +mac-10 (girls' frontline),61 +lydia macarthur,61 +lvans,61 +lows.,61 +love money rock'n'roll,61 +loose neck ribbon,61 +link cable,61 +linde (octopath traveler),61 +limbo,61 +lillymon,61 +lilia chocolanne,61 +licking self,61 +licking dildo,61 +levi9452,61 +leonard mitchell,61 +lempika,61 +lemonrou,61 +leaf (sygna suit) (pokemon),61 +lazuri7,61 +layered swimsuit,61 +latte (precure),61 +laovaan,61 +landorus,61 +ladymade star,61 +lace-up heels,61 +lace-trimmed leotard,61 +kurosaki makoto,61 +kuroneko (fragrant olive),61 +kuroda kuwa,61 +kurobe sclock,61 +kurihara mari (prison school),61 +kurige horse,61 +kureha goya,61 +kuraikurairey,61 +kumichou (nakasato-gumi),61 +kuka (princess connect!),61 +kubrick stare,61 +ksg (girls' frontline),61 +kouga yun,61 +kosmos beta,61 +kosaki wit,61 +koraku gekki,61 +kommo-o,61 +komaro-chan,61 +koiso kenji,61 +kohinore,61 +kohaku (kohagura),61 +kobayakawa horan,61 +kizuna ai (anniversary),61 +kitami erika,61 +kisukekun,61 +kiss to lord to darjeeling,61 +kirikaze ren,61 +kiiroi tamago,61 +kicchi (tmgk),61 +kei (keigarou) (style),61 +kazura,61 +kazumiya rio,61 +kaze minoru so-ru,61 +kawarage,61 +kawaguchi yukihiro,61 +katsura miya,61 +katsuki hiroko,61 +kata meguma,61 +kashiwagi hatsune,61 +kashiwagi haruko,61 +kase (kurimuzone oruta),61 +karumaruka circle,61 +karbuitt,61 +karaage (ta-na),61 +kanichiri,61 +kamura poku,61 +kamoku nagi,61 +kamen no maid guy,61 +kajou ayame,61 +kachuten,61 +just because!,61 +jtaka,61 +jotman,61 +jonathan joestar's pose (jojo),61 +joker game,61 +jisue10,61 +jippe,61 +jika-tabi,61 +jet set radio future,61 +jeibii,61 +jean-louis (yuasa rengou),61 +jacob (housamo),61 +izumi kikaku,61 +izu lemon,61 +isono satoshi,61 +irvine kinneas,61 +iron maiden jeanne,61 +inoue jun,61 +inou takashi,61 +in pocket,61 +in'youchuu,61 +imminent hand holding,61 +imi galil,61 +ill (0022),61 +ilfriede von feulner,61 +ikusy,61 +ichijo daisuke,61 +ichijiku,61 +ichihyaku nanajuu,61 +ichi (13xxx),61 +hydaelyn,61 +humvee,61 +huai diao de zongzi,61 +hs2,61 +hospital rokunin no ishi,61 +hoshijiro shizuka,61 +hoshifuri sosogu,61 +horezai,61 +honeycoming royalsweet,61 +honey badger (girls' frontline),61 +holy roman empire (hetalia),61 +holland novak,61 +holding clover,61 +hokuto (hokuto pk),61 +hokaze junko,61 +hiwonoafu,61 +hisone to masotan,61 +hiruma youichi,61 +hip flask,61 +hindenburg,61 +himuro akari,61 +hikikomori,61 +hikariz,61 +hiiragi mine,61 +higurashi ryuuji,61 +higa izuru (idsuru),61 +hidepoin,61 +hhhori,61 +hellsing: the dawn,61 +heijitsu (paapuu),61 +hecchi (blanch),61 +heart hunter (module),61 +hc (razel1),61 +hazuki shino,61 +hazuki (nature.),61 +hatsukano you,61 +haruto (harut n),61 +harumi chihiro,61 +hand on mirror,61 +hanbun no tsuki ga noboru sora,61 +hanasaki work spring,61 +haikimono shounen,61 +gundam ms igloo,61 +grey scrunchie,61 +gordon freeman,61 +gordo,61 +gomrang,61 +gol d. roger,61 +glowworm (azur lane),61 +glowing hot,61 +gligar,61 +gindoro,61 +gijou mitsumi,61 +gene (gear gene),61 +geduan,61 +galatea (fate),61 +gakki (gaku suru),61 +fuyuni0307,61 +fuwafuwa 35,61 +fuuko chan,61 +funamusea,61 +funami mari,61 +fumyuun,61 +full cowling (boku no hero academia),61 +fujimura shizuru,61 +fuji minako,61 +frozen ii (disney),61 +frilled nightgown,61 +frilled corset,61 +francine (daijaemon),61 +frame arms,61 +fox costume,61 +forsyth (fire emblem),61 +food truck,61 +fireseal,61 +finger marks,61 +ferris eris,61 +feater (dojo star) (arknights),61 +favilia,61 +fat (artist),61 +evangeline a.k. mcdowell (adult),61 +ethan forsythe,61 +ernesto de la cruz (alive),61 +enterprise (pacific),61 +emukon,61 +emukami,61 +emelia pris,61 +elwing,61 +eli conifer,61 +electric kettle,61 +einamu,61 +ein (phantom),61 +edowan,61 +ebiten (manga),61 +ebiebiebio,61 +ebenholz (arknights),61 +ducati,61 +dress pants,61 +dpea9,61 +donkey kong (game),61 +dolphenry,61 +doboshiru,61 +dj sona,61 +didi esmeralda,61 +dialogue options,61 +destiny (takt op.),61 +deoxys (attack),61 +demonion ii: maou to sannin no joou,61 +deeezel,61 +dango-chan (4shi),61 +daicon iv,61 +cure precious,61 +cunimura1584,61 +cthugha (housamo),61 +cruel gz,61 +cromwaits,61 +crazypen,61 +crab on shoulder,61 +crab hair ornament,61 +compression shirt,61 +comichipota,61 +comic grape,61 +colored shoe interior,61 +collie,61 +cloverse6,61 +cloudxmoe,61 +clona,61 +claes,61 +circlek,61 +cima garahau,61 +cilica,61 +cid nan garlond,61 +ciconia no naku koro ni,61 +christianity,61 +chocotto715,61 +chise (saishuu heiki kanojo),61 +chiko (mizuho),61 +chiko (d04099),61 +chidori (@rom),61 +cherry panties,61 +chen yang yang,61 +chatsune (white lolita),61 +champ+,61 +centi (nimu),61 +cea se,61 +cavorite ball,61 +camouflage trim,61 +california (azur lane),61 +calenda (kemono friends),61 +bugs bunny,61 +brooklyn (azur lane),61 +bronzor,61 +bronze parrot,61 +bridge piercing,61 +bosack,61 +bonjin (pageratta),61 +boey (fire emblem),61 +bochibochi (gyuuniku 6000),61 +blizzard (company),61 +black soldier,61 +black rock shooter: dawn fall,61 +bizen,61 +birdhouse,61 +bindle,61 +baumku techen,61 +balloon animal,61 +baker nemo (fate),61 +baker at bat,61 +bachou mouki,61 +aya (oneechanbara),61 +awan0918,61 +awake,61 +awachi,61 +aversa (fire emblem),61 +auguste,61 +augusta (granblue fantasy),61 +audie (animal crossing),61 +asurada yui,61 +asukaru (magika ru),61 +asta rindo,61 +ashitaba tomorou,61 +ash (rainbow six siege),61 +artificial wings,61 +ark ford,61 +arimoto wataru,61 +arigato (rmskrtkdlqj),61 +ari (shichigatsu),61 +argentea (darling in the franxx),61 +araya souren,61 +aosuke (ayakawa akito),61 +aoshima sakana,61 +aono tsukune,61 +aoi (aoisaka),61 +anyuu,61 +anming,61 +ankoromochi,61 +animal on leg,61 +anderson m0311,61 +an'no natsume,61 +amatsutsumi,61 +amamiya marron,61 +amagi korona,61 +amagaya (rat-tat-tat),61 +algernon (housamo),61 +alf874,61 +akira slide,61 +akinoko,61 +akimitsu-dono,61 +akiko 141,61 +aki inu,61 +akebi (kakororo),61 +aizenpochi,61 +aizawa takeru,61 +airgetlam (fate),61 +ailac coega,61 +aigan tenshi cheery pink,61 +aharen-san wa hakarenai,61 +aguri (aguri0406-aoi),61 +adachi (ioioi),61 +abe tsukumo,61 +298yenomiso,61 +1st-mn,61 +021 shiro,61 +00 raiser,61 +zinnkousai3850,60 +zinan,60 +ziku driver,60 +zhixiang zhi,60 +zhineart,60 +zentai,60 +zell dincht,60 +yxyyxy,60 +yuzhou,60 +yuubari (kancolle) (cosplay),60 +yuri (chocho q),60 +yummy (yumyumyummy),60 +yukiusagi1983,60 +yukishiro mahiro,60 +yukino sayuri,60 +yukikaze (warship girls r),60 +yueguang zhanglang,60 +yosimura,60 +yoshizumi kazuyuki,60 +yoshitome miharu,60 +yooani,60 +yoneko okome,60 +yokura (yukilina),60 +yellow halo,60 +yaten,60 +yan lie,60 +yami anko,60 +yamato kai (kancolle),60 +yajirushi kaku,60 +yagyuu katsuda,60 +yadoso,60 +yabby,60 +xue qi ye (dndtmbdue),60 +xiao qiong,60 +worthlessvalor,60 +wonchun,60 +winged victory mercy,60 +windia (deathsmiles),60 +wichita (azur lane),60 +whirlipede,60 +weyas kayur,60 +water beryl,60 +wakura yuina,60 +wakanita,60 +waiai,60 +waccha primagi!,60 +volkswagen type 2,60 +volkies,60 +veemon,60 +vante,60 +valhalla valkyries,60 +v1v404,60 +usuaji,60 +uruuru,60 +uru uzuki,60 +uroko (pattern),60 +urakata (uracata),60 +upa (steins;gate),60 +unown l,60 +umbrella octopus,60 +ultra seven,60 +uchino maiko,60 +uchida maaya,60 +type 3 chi-nu,60 +type: null,60 +twinkle star (idolmaster),60 +turret (portal),60 +tullece,60 +tuchinokoeffect,60 +tsuyama mutsuki,60 +tsurumaru tsuyoshi (umamusume),60 +tsumire (takane lui),60 +tsukeo,60 +trunkdiary,60 +triangle heart 3,60 +triandra (fire emblem),60 +trevor (pokemon),60 +transparent butterfly,60 +toy boat,60 +towamin,60 +toudou (dolce),60 +totto,60 +totteri,60 +toromi chuuka,60 +toor 0111,60 +tonnura-san,60 +tomoyo after,60 +toma (amnesia),60 +tokufumi,60 +tokimeki check in,60 +togi9999,60 +tnonizyou,60 +tina (pixiv37050289),60 +tianliang duohe fangdongye,60 +thunder hawk,60 +therrao,60 +teyuruun,60 +teena (granblue fantasy),60 +tara baka ni,60 +tanono,60 +tamiko (paseri),60 +tama (tmfy5),60 +takeda hinata,60 +takano masayuki,60 +tak (karasuki),60 +swordsman (sekaiju 4),60 +suno (imydream),60 +suinose,60 +sugawara takurou,60 +subway (company),60 +submarine 707r,60 +studioqube,60 +strawberry-chan,60 +stereogram,60 +stephanie brown,60 +springfield (classic witch) (girls' frontline),60 +spoken venus symbol,60 +splash-o-matic (splatoon),60 +souma mizuki,60 +somna,60 +somber,60 +snufkin,60 +snowflake pendant,60 +slumcat,60 +sloth (fma),60 +skye (hcnone),60 +skilled lookouts (kancolle),60 +sinon (sao:hf),60 +sin mal,60 +silver meteor,60 +sierokarte,60 +shunrai,60 +shunga youkyu,60 +showdown,60 +shoulder devil,60 +sho (sho lwlw),60 +shizuna rem misurugi,60 +shizu-chan,60 +shirogane (fox),60 +shirasawa kazane,60 +shiny tambourine,60 +shinobe,60 +shin (sin-maniax),60 +shimotsuki iko,60 +shikyouin hibiki,60 +shiki natsume,60 +shijukara (great tit),60 +shepherd,60 +shazhiqiao,60 +shaggy rogers,60 +sevi (seviyummy),60 +seseragi azuma,60 +seo akira,60 +senzi,60 +sengoku gensoukyoku,60 +seikon no arcana,60 +see-through (psg),60 +sebire,60 +scapular,60 +sayu (mio-mosa),60 +sawatari fuu,60 +satsuki (quiz magic academy),60 +sasha chii,60 +sarana,60 +sanada taketo,60 +samako,60 +sakusya2honda,60 +sakuramochi n,60 +sakata kintoki (heian warrior attire) (fate),60 +sakai yume,60 +saitama (antitankromeo),60 +saisoku no yukkuri,60 +saikunartworks,60 +sagano yuuji,60 +saeba ryou,60 +ryuuenji tasuku,60 +ryn (rinorea),60 +ruru (lulubuu),60 +rowan (pokemon),60 +rose pacifica,60 +romeo montague (cosplay),60 +rolling sleeves up,60 +roko roko (doradorazz),60 +rohgun,60 +rob lucci,60 +rivals,60 +ribbon (rabi-ribi),60 +rhys (fire emblem),60 +rhinoceros ears,60 +reuri (tjux4555),60 +remington acr,60 +relius clover,60 +refrigerator magnet,60 +red tunic,60 +red circle,60 +rebel alliance,60 +ramu-on@ shinon,60 +rainsp,60 +qoopie,60 +qbu-88 (girls' frontline),60 +push down,60 +purple bandana,60 +pulque,60 +pukonuu,60 +psyren,60 +princess white rose,60 +princess waltz,60 +princess melody (idolmaster),60 +powder puff,60 +potion (pokemon),60 +poppippoo (vocaloid),60 +polka dot sleeves,60 +pokemon card,60 +pochi-a,60 +plaid kimono,60 +place name,60 +phino,60 +pepeo,60 +pen-pineapple-apple-pen,60 +pecorine (real) (princess connect!),60 +peco (pockleberry),60 +pclbang,60 +pawniard,60 +papel,60 +panzerkampfwagen viii maus,60 +palow,60 +paji,60 +pachi (sugiyama0306),60 +paayan (hagakinorock54),60 +oshi ga budoukan itte kuretara shinu,60 +ooba jun,60 +onomi9ta,60 +onizuka hime,60 +omuni,60 +okyao,60 +okakan,60 +oginouchihara yuki,60 +oge (ogeogeoge),60 +o2 (o2mm),60 +nyan cat,60 +nureha (log horizon),60 +noumu (pixiv),60 +nona drops,60 +noise reduction,60 +nobuya,60 +noah (little noah),60 +niwa toriko,60 +nitamago,60 +ninjara (arms),60 +nil sunna,60 +nikitan (niki),60 +new balance,60 +nerine (flower knight girl),60 +neopara,60 +nekoyanagi kirio,60 +nekota tsuna,60 +nekonote (neko-no-te92),60 +nekoma hikaru,60 +neko-hime (neko-hime),60 +negativezero,60 +neeko's mother,60 +nazrin (cosplay),60 +natsu yume nagisa,60 +nanopai kakumeikokonoyu,60 +nanami ao,60 +namyo,60 +nami qi,60 +myu (neorosi),60 +mysterious heroine x (fate) (cosplay),60 +muzinneki,60 +muu shuwuu,60 +musunde hiraite (mh5jta),60 +mushi mushi ex,60 +muppets,60 +munya,60 +munuko,60 +mukoujima tenro,60 +mugimugis,60 +motto notto,60 +mosquito girl,60 +morochin (mo loss an),60 +morishima hitoshi,60 +mori yashiro (konkon oyashiro),60 +mordred (fate/apocrypha) (cosplay),60 +monster hunter stories,60 +mona (2ch),60 +momose hikaru,60 +mofu07519,60 +moegi (honey 122),60 +mochi-iri kinchaku,60 +mnjs,60 +mizutani megumi,60 +mizuno makoto (green),60 +mizukoshi mayu,60 +mizuki gai,60 +mizoreame,60 +miyabi (ninin ga shinobuden),60 +mitty (made in abyss) (furry),60 +mitsurugi lia,60 +mitama ~shinobi~,60 +mistress (dungeon and fighter),60 +mission impossible (bkub),60 +mirokuji yuuya,60 +mirim,60 +miriam hildegard von gropius,60 +ming (3952862),60 +minertime,60 +minazuki minao,60 +minase nagi,60 +minamihama yoriko,60 +minaka shobu,60 +mina carolina,60 +million chain,60 +millaarc,60 +milcona,60 +mikoto (stbk),60 +mikazuki shigure,60 +mifu (b24vc1),60 +midoriya inko,60 +mexico salamander (kemono friends),60 +metal sonic,60 +mesh,60 +meru02295238,60 +merryhachi,60 +mei ichi,60 +mea (hwaksal),60 +mayhem art,60 +mattie (ego trigger),60 +matilda (tank),60 +marik ishtar,60 +maria (space maria),60 +mari (little crown),60 +mari (doraerin0),60 +maou (mitosansan),60 +mamecchi,60 +makken,60 +makkamu,60 +makano mucchi,60 +mak066,60 +maekawa,60 +madcore,60 +m134 minigun,60 +lunatic (ragnarok online),60 +lunasanguinis,60 +lord guyis,60 +live twin lil-la,60 +liu guniang,60 +ling mou,60 +lily (shiei no sona-nyl),60 +light hello (umamusume),60 +lif (ragnarok online),60 +lianna (fire emblem),60 +levi elipha,60 +leonidas,60 +leona of blood & iron,60 +lemur tail,60 +leaf lsd,60 +latex pants,60 +larva tiamat (fate),60 +laputa robot,60 +lapis (sennen sensou aigis),60 +lanyaojun,60 +kyouki,60 +kurumi (koutetsu tenshi kurumi),60 +kurona (neko musume michikusa nikki),60 +kurojishi (zannen onna-kanbu black general-san),60 +kuroinu (sonoba shinogi),60 +kudou (gst910),60 +koyuki (azumaya999),60 +koyoi (ruka),60 +koyama shigeto,60 +koumei (harmonizer),60 +korai (horokusa),60 +koopa clown car,60 +koneko (nonnkimono),60 +knit sweater,60 +kliff (fire emblem),60 +kjerag logo,60 +kizuna ai (musician),60 +kitchen scale,60 +kisakinomiya chihaya,60 +kinosaki yuki,60 +kinaco 4738,60 +kikurage (dorohedoro),60 +kiki-yu,60 +kidatsu! dungeons lord,60 +kei-chan (ultimatekeichan),60 +kazuki kan,60 +kazehaya shouta,60 +kazane mari,60 +katorea,60 +katakai,60 +katagiri yuuhi,60 +katagawa mika,60 +kasumi (suikoden),60 +kashiyama,60 +kashiwa (3920kashiwa),60 +kashiru,60 +kartana,60 +karonaru,60 +karenina (punishing: gray raven),60 +karasu btk,60 +karanagare 4,60 +kanojo tachi no ryuugi,60 +kanno fumi,60 +kamumiya,60 +kamue,60 +kamen rider geats,60 +kakouen myousai,60 +kakon,60 +kaga nazuna,60 +kaga kouko,60 +kaeru kenshi,60 +kaburagi t. kotetsu (cosplay),60 +k.o.,60 +jyako (bara-myu),60 +joker (dc) (cosplay),60 +joe rikiichi,60 +jiman,60 +jiliang jiying yumao,60 +jellyfish girl,60 +jay87k,60 +jacknavy,60 +izumo ayuka,60 +izumi kanagi,60 +izumi (swimsuit) (blue archive),60 +island lagoon,60 +isekai wa smartphone to tomo ni.,60 +isabelle (acerailgun),60 +ironhide,60 +iroha karuta,60 +interrupted,60 +inaba reito,60 +imada hidehito,60 +ilsa (summer) (granblue fantasy),60 +ichikawa,60 +husagin,60 +hourainingyou,60 +hotarugusa (onmyoji),60 +honolulu (among the stalls) (azur lane),60 +honjou mikaze,60 +hiyori sou,60 +hiyori (nyatto),60 +hiver laurant,60 +hitsugi katsugi no kuro,60 +hitobashira alice (vocaloid),60 +hisa tsuki,60 +hirasaka hatsune,60 +himemiya ruri,60 +himawari (hishu),60 +higa,60 +hetaren (ramark),60 +hero charger (splatoon),60 +hepas (haevest),60 +helltaker dance,60 +helen (claymore),60 +helbindi (fire emblem),60 +heart bra,60 +hauto-san,60 +hatopoo (beach7pijon),60 +hashira 14,60 +harukanaru toki no naka de 4,60 +haru ion,60 +harry mason,60 +hariyama,60 +happy synthesizer (vocaloid),60 +hands on own crotch,60 +hamburger hat,60 +ha (hura76752775),60 +guru,60 +gurepyon,60 +grotle,60 +green santa costume,60 +gomibukurokarasu,60 +getto,60 +gecko4488,60 +gascogne (tropical environs acclimation service equipment) (azur lane),60 +gardavwar,60 +gagajya,60 +fuyuumi ai,60 +fuu (futian),60 +fushimimukai sanae,60 +fushicho,60 +funaya (a2brasd),60 +fukaziroh (sao),60 +frelia (ar tonelico),60 +foxhound (azur lane),60 +fou-lu,60 +fn scar 17,60 +first ken,60 +finaltakenoko,60 +fif (granblue fantasy),60 +ffcreatyuuki,60 +fai fai,60 +f.l.u.d.d.,60 +f-18 hornet,60 +esp guitars,60 +error1945,60 +enoki art,60 +emunise,60 +emu (eomou),60 +einar (personal ami),60 +eina tulle,60 +ebr-kii,60 +dracula,60 +dotsu (wtnbkwiyd),60 +dorothy gale,60 +dorisu2,60 +dokudoku913,60 +dodo (yatotoyatoto),60 +docu (doppel),60 +djeeta (granblue fantasy) (cosplay),60 +dh (brink of memories),60 +devil bringer,60 +desperado (yotaro),60 +dayshiart,60 +dark magical circle,60 +dark eclair,60 +daitirumoesu,60 +daidouji (senran kagura),60 +daga kotowaru,60 +cyrillic commentary,60 +cum on chest,60 +cross-laced swimsuit,60 +craft-cs,60 +coupy pencil (medium),60 +conomi-c5,60 +cona ember (kkamja),60 +combat s-ko,60 +comb over,60 +coin flip,60 +cno,60 +clam curry,60 +chomikuplus,60 +chocolate clothes,60 +chiri to mato,60 +chilli 646,60 +chiika (cure cherish),60 +chiba yuriko,60 +chi xiao (arknights),60 +cheeky little star,60 +charizard pose,60 +chance maker,60 +celestial globe,60 +cathyl,60 +castration,60 +canon memphis,60 +camouflage legwear,60 +camouflage bra,60 +calico m950,60 +byron (pokemon),60 +byoin,60 +byakko (kemono friends),60 +by spoon,60 +burn-up,60 +bunting,60 +broken pillar,60 +broken heart print,60 +broken arrow,60 +braum (league of legends),60 +boss,60 +bokujou monogatari: yasuragi no ki,60 +boku girl,60 +blue (happinesscharge precure!),60 +blanket hug,60 +birdie (street fighter),60 +bibyo,60 +beluga dolphin,60 +bella (a-soul),60 +belfast (the pledge of claddagh) (azur lane),60 +bekkomi,60 +beek,60 +beak hold,60 +baraba baba,60 +banana oekaki,60 +baldr sky,60 +bakanoe,60 +baijiin poison,60 +azami masurao,60 +ayukawa miyuki,60 +ayase yuuki (mikan mochi),60 +aya tori,60 +asasow,60 +asamiyajy,60 +as4gi,60 +arms on head,60 +arkfield,60 +arisegawa arle,60 +ariel (yu-gi-oh!),60 +archellaura,60 +araquanid,60 +arakawa (aintnoroom),60 +aoki sei,60 +antidote,60 +anna (omoide no marnie),60 +android (os),60 +anaroguya,60 +amethyst (steven universe),60 +amemura (caramelo),60 +amatsuki hotaru,60 +amaroku neko,60 +amano akari,60 +alternative facts in eastern utopia,60 +alpha (ypalpha79),60 +alknasn,60 +akiba rika,60 +aka kan,60 +aji (coastal area),60 +aizawa85,60 +aisia,60 +aida (chinhung0612),60 +ai xiao meng,60 +agro (shadow of the colossus),60 +adele (fate),60 +a082,60 +a-na,60 +a-10 thunderbolt ii,60 +76gpo,60 +73suke,60 +286c,60 +zygarde (50%),59 +zumizu,59 +zippo lighter,59 +zhou yu (ppaaqz1995),59 +zhi jiyang,59 +zhen long,59 +zakku (kya--193),59 +yuncha,59 +yumeoji shiori,59 +yukinokouji nobara,59 +yukimura anzu,59 +yuki56,59 +yukasu,59 +yui (princess) (princess connect!),59 +yugiri perserte,59 +yuchio,59 +youkoso! sukebe elf no mori e,59 +yotsuyu,59 +yostar pictures,59 +yoshika fuumi,59 +yomi (indigoriver),59 +yokohama,59 +ymr,59 +yanagi joe,59 +yan vismok,59 +yan-baru,59 +yamaguchi mami,59 +xong,59 +xiin,59 +xenosaga episode ii,59 +x-wing,59 +wwwazxc,59 +wild mane (arknights),59 +whispering for nothing,59 +werewolf costume,59 +wedding peach,59 +watashi no oshi wa akuyaku reijou,59 +warumono tomii,59 +walk cycle,59 +wakaokami wa shougakusei,59 +waha (artist),59 +wabiushi,59 +vlad iii (fate/extra),59 +vivid507,59 +virgo,59 +vhs (girls' frontline),59 +varamill,59 +vajra (macross),59 +uyuyuun,59 +uwabami breakers,59 +utsuhostoria,59 +uryuu sakuno,59 +urano ura,59 +un (le monde-alternatif),59 +ug (ugg),59 +uehara ayaka,59 +uchino chika,59 +tyranu,59 +tyenka7728,59 +twice (boku no hero academia),59 +tsushima (kaikyo),59 +tsunogiri,59 +tsukasa-emon,59 +toyosaki aki,59 +toyomi 13,59 +toy robot,59 +tornadus,59 +torinoko city (vocaloid),59 +torikabuto (flower knight girl),59 +tonkatsu,59 +tonan leopard,59 +tomatolover16,59 +tokkyuu mikan,59 +tokimeki memorial girl's side 4th heart,59 +toenketsu,59 +tiona hyryute,59 +timeline,59 +time,59 +tianlluo,59 +thumbtack,59 +thorr (fire emblem),59 +the art mage,59 +tazbun,59 +taxidermy,59 +taste of a liar (meme),59 +tangle,59 +tales of the tempest,59 +takatsuki yoshino,59 +takatou rui,59 +takakura,59 +taiyou akari,59 +taguchi shouichi,59 +tachanka (rainbow six siege),59 +syu45,59 +symbol ricochet,59 +swimsuit under swimsuit,59 +sweetonedollar,59 +suzurino,59 +suzuno,59 +suzune kotora,59 +suzuki ken'ya,59 +suppa (hagakuresuppa),59 +super sonic,59 +super robot wars t,59 +sunafuki tabito,59 +summer rose,59 +submarine sandwich,59 +striker eureka,59 +stella bremer,59 +squemezzo,59 +sprite sheet,59 +sploot,59 +spider boy,59 +spider-man (cosplay),59 +spica (sumaga),59 +sowamame,59 +sota,59 +soraru,59 +sonota taisei,59 +solty revant,59 +soejima shigenori (style),59 +sliggoo,59 +sky surfing,59 +sinmo (dolomang),59 +sigma 2018,59 +sig 516,59 +shoukan yuusha to f-kei kareshi,59 +shosudo,59 +shiwashiwa no kinchakubukuru,59 +shirubaa,59 +shironekoban,59 +shiroi inu,59 +shirogami seisho,59 +shiro yurine,59 +shiro-inu,59 +shiro-chan (mignon),59 +shirai yu,59 +ship in a bottle,59 +ship deck,59 +shiono (0303),59 +shinonome kazuhiko,59 +shimotsuki miri,59 +shimesaba (simesabaikka),59 +shikihime zoushi,59 +shiki makoto,59 +shiikeru,59 +shichijou aria,59 +she holds my weakness (meme),59 +sekine irie,59 +secre swallowtail,59 +scrotum piercing,59 +saya (majo no tabitabi),59 +saucepan,59 +satou yasu,59 +sapporo beer,59 +sandrone (genshin impact),59 +samurai calibur,59 +samidare (kancolle) (cosplay),59 +samantha (admiral bahroo),59 +sakuyabm,59 +sakuraniku umatarou,59 +sakuragi akira,59 +sakura momoko,59 +sakura honoka (srhk0623),59 +saki (little crown),59 +sakana-ko,59 +sailor moon musical,59 +saibe,59 +sabu (sabudenego),59 +ryo-suzuki,59 +ruuen rouga,59 +rupika (pso2),59 +rulebreaker,59 +riokasen,59 +rinmmo,59 +ri (qrcode),59 +relio db318,59 +regan (hatsumi),59 +reagan long,59 +ranan,59 +ran (7th dragon),59 +rainys bill,59 +raiden shogun (cosplay),59 +quick man,59 +quan zhi gao shou,59 +qrow branwen,59 +qiqu,59 +q (street fighter),59 +purple (among us),59 +pukao,59 +ptilopsis (serenity) (arknights),59 +psychogun,59 +prinz eugen (kancolle) (cosplay),59 +positive passion (idolmaster),59 +ponta (matsuokazieg),59 +plaid collar,59 +petag2,59 +persona q: shadow of the labyrinth,59 +parasite oyatsu,59 +pantyhose under buruma,59 +pandora party project,59 +pallette (mega man),59 +paku paku,59 +ozyasadayo,59 +ouma bunshichirou,59 +osanai yuuta,59 +organ rn,59 +orchestra,59 +open drawer,59 +onsen nakaya,59 +okeno kamoku,59 +okamura hijiki,59 +odoc,59 +octobrush (splatoon),59 +ochanomizu hiroshi,59 +obelisk,59 +nyako (lhq3p),59 +nvidia,59 +noshimasa,59 +nonohara hime,59 +noise (hakusen),59 +noda kotone,59 +nobusnow,59 +nobu baka,59 +nobicco,59 +noah fantasy,59 +njike,59 +nishiyama maruta,59 +nipple sleeves,59 +nipa~,59 +nina klatt,59 +niiyan,59 +newhalf with newhalf,59 +nerua,59 +nekoyuu,59 +nekomanma (byougatei),59 +nekoman (nukomann),59 +nekokan-nekokan,59 +neko (k),59 +nefertari (tsukudani),59 +needle (needlebomb),59 +nebukuro,59 +nayuta-kanata,59 +natsuhara,59 +nas-o,59 +narwhal,59 +naoto (96neko96),59 +nami no manimani,59 +nakkasu,59 +nahanmin,59 +nagoriyuki,59 +nagihashi koko,59 +nagare kei,59 +nagaoka shiho,59 +myr (p&d),59 +muzet (tales),59 +munaage,59 +multiple style parody,59 +muki (muki kunxd),59 +mr. satan,59 +motonaga hiroya,59 +moto toshi,59 +mortal kombat x,59 +moritaka (housamo),59 +moriichi,59 +morichan,59 +monmonism,59 +momono shiori,59 +molly hale,59 +moja (rainpoow),59 +moizumi shipon,59 +mochityoko,59 +mizuki saori,59 +miyasumi (jam session),59 +miyamae tomoka,59 +miya (tokumei),59 +mitsuki (goomoku),59 +mita kurumi,59 +misuzu (iridescence),59 +mirusa,59 +miroku (inuyasha),59 +miracles of the holy night (umamusume),59 +mintes,59 +minaho kazuto,59 +minagawa yuuhi,59 +milady (mechanical buddy universe),59 +mikuni (open-ranka),59 +mikoto cluster,59 +mikoto (mikoto r a),59 +mikogami rimu,59 +mikochi (hakumei to mikochi),59 +mikaze ai,59 +mikaku,59 +mikafen,59 +mii (popotan),59 +mgn0isi,59 +mepo (raven0),59 +meow (space dandy),59 +menoko,59 +mayuzumi makiha,59 +mayutsuba mono,59 +mavis vermilion,59 +matatabi (karukan222),59 +mashu (masyu ut),59 +marufuji shou,59 +mao (alepricos),59 +mano (narumi arata),59 +manmi,59 +manga time kirara carat,59 +mandudaein,59 +mamadasky,59 +major league baseball,59 +mahou shoujo ikusei keikaku jokers,59 +magnetism,59 +magmortar,59 +magazine ejection,59 +maeda inuchiyo,59 +lunaraven,59 +louise (fire emblem),59 +lop rabbit ears,59 +locksuke,59 +leon (sygna suit) (pokemon),59 +lemur,59 +lee siu,59 +le delicatessen,59 +lazymimium,59 +lazengann,59 +last chronicle,59 +large ribbon,59 +la folia rihavein,59 +ky (ky990533),59 +kuzan (aokiji),59 +kuuchuu buranko,59 +kusaka eiki,59 +kurosujuu,59 +kurosawa tomo,59 +kurokiri6630,59 +kuroino (poco),59 +kurogoma (haruhi3),59 +kurogane (tsubasa chronicle),59 +kurageso,59 +kumanomi,59 +kugimiya kei,59 +krita (medium),59 +koyuiko,59 +kouno (uiyoyo199),59 +kotohane,59 +korona,59 +koron chris,59 +koori chikage,59 +kono oozora ni tsubasa wo hirogete,59 +kono aozora ni yakusoku wo,59 +konatsu karasu,59 +konagi (konotuki),59 +komo da,59 +kohinata raichi,59 +kiu (dear deer),59 +kitin,59 +kite (hunter x hunter),59 +kit (studio),59 +kisuu (oddnumberr ),59 +kissing leg,59 +kise (swimmt),59 +kisaragi hina,59 +kiritani846,59 +kirenenko (usavich),59 +kinokoko-kino,59 +king boo,59 +kimagure ringo,59 +kikuru (vtuber),59 +kikkaiki,59 +khornette quest,59 +kenken (keuken),59 +ken-san,59 +keli bang,59 +kcar66t,59 +kazeha (starrysky),59 +kazami ryouya,59 +kazama jun,59 +kawamoto akari,59 +kasukabe tarou,59 +kash-phia,59 +karakoro,59 +kamine ayaka,59 +kaishinshi,59 +kairi630,59 +kagelantern,59 +kafeifeifeifeifeifei jiemo jun,59 +kaburaya seiden,59 +k9999,59 +juu satoshi,59 +juri (shiningred),59 +juno emmons,59 +joker (smile precure!),59 +john joseco,59 +johan liebert,59 +jinnai enta,59 +jess (jelee),59 +jeanne d'arc (mystery treasure) (fate),59 +jammeryx,59 +jai (whany1998),59 +jacuzzi splot,59 +jack howl,59 +jabittoson,59 +izuna jinkuro,59 +iwashi mizuni,59 +iwaki hazuki,59 +iteza (flapper girl 25),59 +isoi reiji,59 +ishikawa mami,59 +ironatsuki,59 +inuyama akari,59 +inukoro (spa),59 +inukai mofu,59 +inui arihiko,59 +inugami akito,59 +inue ao,59 +inoue marina,59 +inkune,59 +inhaling,59 +incredible ecclesia the virtuous,59 +imo norio,59 +imanaka koutarou,59 +icywood,59 +ichimonji batsu,59 +ichimatsu kohina,59 +hyuuga kai ni (kancolle),59 +huyou (awegk),59 +hornjob,59 +hopper,59 +honi,59 +holding to chest,59 +hmk84,59 +hiyake-chan,59 +hitobashira (1746164306),59 +hiseki (tknkkm),59 +hinata (ryohinata),59 +himekawa kotone,59 +himekawa hayuru,59 +higashikata joshu,59 +hibi tsuna,59 +heika (heikahp),59 +heidi (gray all),59 +hayato (meromoni),59 +hayami ayumi,59 +hayame (m ayame),59 +hatoneko,59 +hato moa,59 +hastur,59 +haruna hisui,59 +haru (d-s-c),59 +haru (beastars),59 +haoto luna,59 +hanging from tree,59 +hane (15188414),59 +hand maid may,59 +hala (pokemon),59 +haban (haban35),59 +gyoukan (jfxc),59 +gyan,59 +guriko (mossari),59 +gray (fire emblem),59 +glowing tail,59 +girlsay,59 +ginnote,59 +gigantic testicles,59 +gen (gen 7254),59 +gats (nougats),59 +gas tank,59 +gabi braun,59 +fuyutsuki asuto,59 +fusenryo,59 +fumotono mikoto,59 +fujimaru ritsuka (female) (halloween royalty),59 +frilled garter belt,59 +fractale,59 +floralia,59 +fjt (fjym2),59 +finland,59 +finger to nose,59 +ferdy's lab,59 +fabiola iglesias,59 +fa mulan (disney),59 +f-una,59 +eva solo,59 +etna (disgaea) (cosplay),59 +eternal melody,59 +eruza,59 +enraenra (youkai watch),59 +eien no juu nana-sai,59 +earthquake,59 +e.g.o (project moon),59 +dwebble,59 +duoj ji,59 +ds (ndsl),59 +dragon star2,59 +dp28 (girls' frontline),59 +downes (azur lane),59 +dofus,59 +diting (the legend of luoxiaohei),59 +devy lobotomy,59 +deredere,59 +denim dress,59 +deme (karesuki),59 +damian wayne,59 +daikan'yama ebisu,59 +daichi nono,59 +dahuang,59 +dada (esoragoto),59 +d tomoki,59 +czeslaw meyer,59 +cygnet (sea star on shore) (azur lane),59 +cutting clothes,59 +cutepet,59 +cure black pose,59 +cum on bed,59 +cuckoo clock,59 +controlline3,59 +comiket 84,59 +comforter,59 +clitoris leash,59 +clitoris clamp,59 +ciel arc,59 +chou chou,59 +chitu hefeng zhong,59 +chitose-kun wa ramune bin no naka,59 +chinese mythology,59 +chiaki (shigofumi),59 +chest cutout,59 +charjabug,59 +cb,59 +canon (company),59 +canoe,59 +buzheng61241,59 +bus stop sign,59 +buffalo bell,59 +bseibutsu,59 +brown jumpsuit,59 +brown hair girl (nagioka),59 +brazuca,59 +bobamiruku,59 +blueberry hair ornament,59 +blemishine (moon catastrborn) (arknights),59 +blackheart,59 +black egrets,59 +bird of paradise flower,59 +birch (pokemon),59 +berkut (fire emblem),59 +bepo,59 +bearwitch,59 +beartic,59 +battle spirits: burning soul,59 +batabiru,59 +bartholomew fatima,59 +barrel (live a hero),59 +bang5410,59 +bamboo scroll,59 +balthus von albrecht,59 +bakugan battle brawlers,59 +azukiko,59 +ayuko (ayuko54bornthisway),59 +ayatan-kiri,59 +ayakichi,59 +awashima seri,59 +awarinko,59 +atelier iris,59 +atatatamu,59 +ataka takeru,59 +at4,59 +ashfair,59 +asami lilith,59 +art deco,59 +archer alter (fate),59 +apple peel,59 +ao (aoblueao),59 +antonio salieri (first ascension) (fate),59 +another story,59 +ankoro mochi,59 +angelene,59 +ange serena,59 +ancient,59 +anba kohaku,59 +amoonguss,59 +amazu (kurozu),59 +amamiya poran,59 +amafuyu,59 +alisa mikhailovna kujou,59 +alexander anderson,59 +albacore (warship girls r),59 +akym,59 +akiha gongen (housamo),59 +akari maki,59 +aira kanae,59 +aikawa aika,59 +aida (aidia),59 +agnes digital (lovely jiangshi) (umamusume),59 +afternooners,59 +aerokinesis,59 +aduti momoyama,59 +ad-6-0001a,59 +aaaninja,59 +a flat chest is a status symbol,59 +852 box,59 +360 (taiyo360),59 +'o'ne,59 +zouni (xavier),58 +zero project,58 +z23 (philosophy sensei) (azur lane),58 +yztp (sanmery),58 +yuuta kanami,58 +yuuno arashiko,58 +yuniiho (vtuber),58 +yumeno yume,58 +yume (454565427),58 +yukiri (l ii),58 +yozora,58 +youmu day,58 +youkai watch 3,58 +yoshinari kou,58 +yoshikawa tomoko,58 +yokoi rumi,58 +yohaku aki,58 +yilan un,58 +yeruen,58 +year connection,58 +yatogami tenka,58 +yasuyori (housamo),58 +yasumori zen,58 +yamineko,58 +yajima caroline,58 +xin zhao,58 +x chitch,58 +wojak,58 +wo you yibei jia wanli,58 +witches 5,58 +windyakuma,58 +white gorilla (okamoto),58 +webp-to-png conversion,58 +webley revolver,58 +watori re,58 +watata13,58 +watarai asuka,58 +wasabichan,58 +wan sheng jie,58 +waeba yuusee,58 +vvcephei,58 +vic viper,58 +venn (vebonbon),58 +vdonburi,58 +v gundam,58 +unown c,58 +unmei no clan battle,58 +un nm5sy,58 +ume (326310),58 +ug333333,58 +uchuu kaizoku captain harlock,58 +u (lastcrime),58 +u-d,58 +tynamo,58 +tyke,58 +twice (group),58 +tsuyukusa (eeta7273),58 +tsuyudakunanoda,58 +tsutsumi kinuka,58 +tsutsugami gai,58 +tsukishiro mina,58 +tsukishiro kou,58 +trick-or-treating,58 +tri-stringer (splatoon),58 +tougou tsuyoshi,58 +touchuu kasou,58 +tosaka (tosaka0001),58 +torque,58 +tora (torayarou),58 +tooboshoo,58 +tonelico (fate),58 +tomotake yoshino,58 +tomato manme (voice actor),58 +toho10min,58 +toho-77,58 +tigern,58 +tiger rmn,58 +thymilph,58 +thors military academy branch campus uniform,58 +tesagure! bukatsu-mono,58 +tenkomori (nirastrator),58 +temurei (vovovov),58 +telomere,58 +tatsuno (saya-tatsuno),58 +tapi,58 +tanaka misa,58 +tanaka (chainsaw man),58 +tamaki (tamaki illust),58 +takashima hiromi,58 +taiki (ozone),58 +tahnya,58 +table flip,58 +sxupxdxxy,58 +swordsman (sekaiju),58 +swindler (akudama drive),58 +swiftsure (study hall cyclamen) (azur lane),58 +suzunone seven,58 +suzuna (najaran),58 +suvin (mononochi),58 +superboy,58 +sunset shimmer,58 +sunomiya sana,58 +sunapua,58 +sunadori oruka,58 +sumishuu,58 +sukeo (nunswa08),58 +sukeno yoshiaki,58 +sugai (4ugaii),58 +stuffed monkey,58 +studio tri4,58 +steam (platform),58 +station,58 +stardrop,58 +spiz,58 +souler (fresh precure!),58 +sosuke (yrmon),58 +sonic speed monkey,58 +sonic boom,58 +sonic and the black knight,58 +somalisu,58 +snowflyer,58 +sniper wolf,58 +snegovski,58 +skywaker,58 +skull choker,58 +skeletal tail,58 +siren (p&d),58 +siren (devilman),58 +siren,58 +shy gal,58 +shouwaru,58 +shoura,58 +shou mai,58 +shitodo aoji,58 +shirt bow,58 +shirotaka (5choume),58 +shiroi ko (otokuyou),58 +shirayuki mutsumi,58 +shiratsuyu (nagatukihakuro),58 +shindou (fuguri),58 +shimoku reyu,58 +shimofuji jun,58 +shikishima krile,58 +shihoru (grimgar),58 +shi-2,58 +sherumaru (korcht06),58 +shenhua,58 +setouchi kurage,58 +setia,58 +serra (sennen sensou aigis),58 +seiya (iiseven),58 +see-through coat,58 +se.a,58 +sd sengokuden,58 +scorpion tattoo,58 +sawawse,58 +sawatari reika,58 +satsuki kei,58 +sasasa (nashi),58 +sarcophagus,58 +saotome hiyori,58 +saiykik,58 +sagittarius,58 +s ryouchi,58 +s ibane,58 +ryokucha (i cobalt),58 +ryao,58 +rushian,58 +routes,58 +roten (rotenburo),58 +ringo komachi,58 +retri,58 +renzu (mushishi),58 +renaissance clothes,58 +red uniform,58 +red hayao,58 +rakuma kanori,58 +radio (mei99),58 +racket ti1,58 +raamen,58 +queen draco (fate),58 +quan (fire emblem),58 +qqqne,58 +qie,58 +qi yuan zhi yu,58 +puck (berserk),58 +project x zone 2,58 +priscilla asagiri,58 +princess kraehe,58 +priecia,58 +potion lilac,58 +pota (bluegutty),58 +porocha,58 +polka dot pants,58 +polka dot neckwear,58 +plaque,58 +planetes,58 +pinoaisu,58 +pinkiepies2,58 +pico (p i c o),58 +pia carrot e youkoso!! g.o.,58 +personality core,58 +peku (science santa-san),58 +pedal,58 +peachpulsar,58 +peach luo,58 +patoto,58 +parker-hale idw,58 +papo,58 +panzerkampfwagen 38(t),58 +paladin (warcraft),58 +paco (eien mikan),58 +p.i.l.,58 +over the collar (idolmaster),58 +otogari adonis,58 +oro (street fighter),58 +oricorio (pa'u),58 +orc (ii orc no hi),58 +oratoza,58 +orange male swimwear,58 +ootori kanae (soukou akki muramasa),58 +ooeyama ken,58 +oobashin,58 +oo gata ken,58 +onsen tengoku,58 +onguuchi,58 +omizu (h2o),58 +omega rio,58 +okubyou yuuki,58 +okinami kai ni (kancolle),58 +oke (okeya),58 +okari,58 +oingo,58 +nz75 (girls' frontline),58 +nyaxxy,58 +nunotaba shinobu,58 +numachi doromaru,58 +nonono (1399900),58 +nohara hiroshi,58 +noda miki,58 +nobunaga no yabou,58 +nishinosora yoichi,58 +nishigoori takeshi,58 +ninomiya rui,58 +nine (lord of heroes),58 +nii (ao no exorcist),58 +nier (lover by the lily lake) (granblue fantasy),58 +ni piao xiancai,58 +nerunerunerune,58 +nekoro (nekokabuto),58 +nekolook,58 +nekobatake,58 +nameneko (124),58 +naked vest,58 +my daily life in the otaku club isn't normal,58 +mutsuki ginji,58 +murayamawataru,58 +munmu,58 +multicolored choker,58 +mujin gattai kisaragi,58 +muaooooo,58 +movie reference,58 +morris1611,58 +moro (mononoke hime),58 +moppo,58 +moldavite,58 +mogtate,58 +mm (kemeko deluxe),58 +miyamizu yotsuha,58 +miyako yoshika (living),58 +miyajima reiji,58 +miss fortune (idolmaster),58 +miso tanuki,58 +mishy,58 +mirutu,58 +mirunai,58 +mimippu,58 +miloku,58 +mikumix,58 +mikami yuuki (nl8you),58 +mihoto kouji,58 +mihama machi,58 +midori (searchlight),58 +midori (cafe-chan to break time),58 +miclone,58 +mi-24,58 +meyamu,58 +mercenary (zero kara hajimeru mahou no sho),58 +meng ge 3 (565571710),58 +melanie malachite,58 +mayu (airmods),58 +matsuzaki yutaka,58 +matsu symbol,58 +masunosushi,58 +master gundam,58 +mashiro botan,58 +mash kyrielight (enma-tei uniform),58 +masco (character),58 +maruze circus,58 +marty mcfly,58 +mariana princilla,58 +mari (delightfully fuckable and unrefined!!),58 +march 7th (honkai: star rail),58 +manaka misato,58 +mame-p,58 +makihige,58 +makaroni,58 +mahan (warship girls r),58 +magi-cu,58 +mafen,58 +macaroni and cheese (artist),58 +luthier (fire emblem),58 +lutecia syndrome,58 +luo.,58 +luna (epic seven),58 +luft,58 +lotpi,58 +long breast curtain,58 +liowig,58 +linus falco,58 +licking thigh,58 +leon v,58 +lena sayers,58 +lavinia whateley (emerald float) (fate),58 +latex skirt,58 +last night good night (vocaloid),58 +ladfa,58 +lace-trimmed veil,58 +kyo9999,58 +kyder,58 +kyaku tatsu,58 +kuzukago (bitchmaker),58 +kuudere,58 +kusumoto touka,58 +kusano,58 +kurokuro illust,58 +kurodani yamame (spider),58 +kurikabacha,58 +kurashina yuzuki,58 +kunai uri,58 +kujikimi,58 +kugatsu tooka,58 +kousaka rino,58 +koto seori,58 +kosencha,58 +koretsuna,58 +korea,58 +konimaru,58 +kollerss,58 +kogawa,58 +kofune mio,58 +kno (anahita),58 +knight servant,58 +kmbk0209,58 +kiyomasa ren,58 +kiryuu aoko,58 +kha'zix,58 +kevin (seiken densetsu 3),58 +kemonomichi (blue black),58 +kemomin nosuke,58 +keffiy,58 +kea (eiyuu densetsu),58 +kaz (shade),58 +kawacchi hirohiro,58 +katoki hajime,58 +karupisusawa,58 +karis (elsword),58 +kanzi,58 +kanon (sinto),58 +kano hito,58 +kani kanizawa,58 +kanayama nao,58 +kamin,58 +kamiki mikami,58 +kaimuu (fahrenheit724),58 +kaidou mitsuki,58 +kaguya (queen's blade),58 +kaga (exalted white wings) (azur lane),58 +kachi,58 +k701,58 +jupiter europe,58 +judge martin,58 +johann trinity,58 +jlien-,58 +jinu (jinusenpai),58 +jean popo,58 +jchoy,58 +jaeger (girls' frontline),58 +jack of spades,58 +izuriha kagari,58 +izumo no okuni (fate),58 +itsukushima takako,58 +itsuka tenma no kuro usagi,58 +itagaki hako,58 +irodorimidori,58 +incloud,58 +imtmcomics,58 +imperial german flag,58 +illumi999,58 +iko (i-my-16),58 +ikaruga,58 +idemi-iam,58 +ichini (aaaraaaaaaaaa),58 +ichiko oharu,58 +ichikei,58 +ichigoame,58 +ibuibuyou,58 +ibui matsumoto,58 +i want my hat back,58 +hover hand,58 +hotel transylvania,58 +hitode (ziburi),58 +hitachi sou,58 +hirotaka0125,58 +hiro (user negm3832),58 +hina (hinalovesugita),58 +himono xeno,58 +hikyakuashibi,58 +hiiragi tsubomi,58 +hiiragi kei,58 +high-waist panties,58 +helena blavatsky (christmas) (fate),58 +hei (heiyohei),58 +heart sutra,58 +hayato rikin,58 +hatsuki kaname,58 +hatake michi,58 +haruki (haruki678),58 +harryham harry,58 +harpoon gun,58 +harlequin-wheels,58 +hara hikaru,58 +hanaichi (hana13),58 +hakugin006,58 +hair straightener,58 +hair bondage,58 +hadean92,58 +gunner (dungeon and fighter),58 +guame,58 +grizzly mkv (weekend cop) (girls' frontline),58 +grisaia no meikyuu,58 +greentree,58 +googoogaagaa,58 +gomeisa (live a hero),58 +god knows...,58 +glasses boy (osomatsu-san),58 +giwa,58 +geriyarou,58 +gensui (auoua),58 +genkai tokki monster monpiece,58 +genesic gaogaigar,58 +gauche (tales),58 +galaxy fight,58 +galarian linoone,58 +gaien (willtame),58 +futaru usagi,58 +futaba jun,58 +fur-trimmed panties,58 +fujimoto atsuko,58 +fujimaru ritsuka (male) (anniversary blonde),58 +fudo shin,58 +fragran0live,58 +foreground text,58 +fmu,58 +flower panties,58 +flabebe (red),58 +fish eye (sailor moon),58 +feb itk,58 +feather (granblue fantasy),58 +ethyria,58 +ero shocker,58 +equal (heartstrings),58 +envy baby (vocaloid),58 +endless eight,58 +ender lilies quietus of the knights,58 +emu (losspass),58 +elementalist (dungeon and fighter),58 +eita 789,58 +egg carton,58 +edward teach (final ascension) (fate),58 +edo.,58 +ebihara ai,58 +e-note,58 +dunkerque (afternoon venus) (azur lane),58 +droite (tales),58 +dream world (touhou),58 +dorothy (shingeki no bahamut),58 +don quixote (limbus company),58 +doi shizuha,58 +dodome ichika,58 +do9bessa,58 +digimon adventure: (2020),58 +darkgreyclouds,58 +dana (ys),58 +daluto (hitomi555),58 +daitai sotogawa (futomomo),58 +daiki,58 +daijoubu da mondai nai,58 +cure magical (ruby style),58 +cum on mask,58 +cubism,58 +cu chulainn (fate/stay night) (cosplay),58 +cor leonis,58 +commissioner insert,58 +combo counter,58 +coelacanth (gyotaku),58 +cocaduel,58 +clearpage,58 +chong (547342983),58 +chocolate on penis,58 +chiru (218mg),58 +chimosaku,58 +child-box,58 +chil0107,58 +chikuwa (tikuwaumai ),58 +chikkuru,58 +chii (sbshop),58 +cecilia (acerailgun),58 +caramel,58 +carabiner,58 +cannonball,58 +california sea lion (kemono friends),58 +caladbolg (fate),58 +cac itinose,58 +buran (22x),58 +bungaku shoujo (danshi koukousei),58 +broken chopsticks,58 +bring stabity,58 +boy meets girl,58 +bound leg,58 +bomberman jetters,58 +bomb (final fantasy),58 +bocchi-ya-nawi,58 +bloodberry,58 +blood lad,58 +blonde ponytail girl (mdf an),58 +blade catching,58 +blackjunk,58 +bionicle,58 +betrayal,58 +bent,58 +ben 10: omniverse,58 +beaver,58 +beatrix bremer,58 +bastet (mythology),58 +baronia,58 +bandeau pull,58 +ball guy,58 +ayuya naka no hito,58 +awayuki tobari,58 +autogyro,58 +aura kingdom,58 +aura (a440),58 +audi,58 +atsuage (kakinop),58 +atric18,58 +ataru (7noise),58 +asutarou,58 +asano kazunari,58 +asaba0327,58 +arya (tianhua),58 +aruvina (gu luco),58 +armored core 5,58 +arimatang,58 +are (arearee),58 +arai teruko,58 +arai kei,58 +aqua border,58 +aokiku,58 +aoi tori,58 +anthurium,58 +anthiea,58 +angdo (kakajo26),58 +ane to boin,58 +amuii,58 +amino kohaku,58 +amane (funfun),58 +alvis hamilton,58 +alien (psr992),58 +akibakei kanojo,58 +akado harue,58 +ajidot,58 +aisu (icicleshot),58 +aion,58 +aida riko,58 +aharen reina,58 +a-er (akkij0358),58 +88942731ne,58 +1bitheart,58 +1341398tkrtr,58 +10r (tulipano),58 +zuima,57 +zig90,57 +zettai yarumoni,57 +zeruel,57 +zatou,57 +zamazenta (crowned),57 +zain (jiha3905),57 +z.dk,57 +yuzua,57 +yuutsuki hina,57 +yuura shiu,57 +yuuki (yuuki333),57 +yun (neo),57 +yukirin (nagatoyuki ibukifuko),57 +yuki sizuku,57 +yui (kuro kosyou),57 +yue yue,57 +yougata,57 +yoshio (440),57 +yoshida ken'ichi,57 +ying swei (azur lane),57 +yig yuki (yig-gha),57 +yi lee,57 +yellow hakama,57 +year of the horse,57 +yarizakura hime,57 +yamayu,57 +yamano (151515),57 +yakuru,57 +yagi mutsuki,57 +yadoyuki,57 +yada yada,57 +ya99ru,57 +y o u k a,57 +xxoom,57 +xwu,57 +xuan (mixflavor),57 +xiao lin jiu,57 +wooser (character),57 +wooden cup,57 +wood cube,57 +wol (wol 927),57 +wingul (tales),57 +wilhelm (re:zero),57 +white serval (kemono friends),57 +white mage (cosplay),57 +white album (stand),57 +west virginia (azur lane),57 +wendy crawford,57 +wendy (shepherd0821),57 +watson amelia (cosplay),57 +washinomiya koori,57 +warabin (suteki denpun),57 +wakamochi-ta,57 +voyager (third ascension) (fate),57 +virtual boy,57 +violet (ninomae ina'nis),57 +vincent brooks,57 +viiiper,57 +vidar,57 +vermilli000n,57 +vehicalization,57 +vamjulian,57 +valkyrie profile anatomia,57 +valkyrie (last origin),57 +uyuki (ouun),57 +utekiro,57 +uta no prince-sama: shining live,57 +uta-garuta,57 +usagi1923,57 +usada hikaru (cosplay),57 +urie kuki,57 +uri uri,57 +unown d,57 +uma (ninjin misaki),57 +ugwa,57 +uexkull,57 +ueda yumehito,57 +u-tan,57 +tyuraba,57 +type 4 (girls' frontline),57 +turks,57 +tsuwabuki (souma (ordures)),57 +tsuneki hikari,57 +tsuneaki (dosnan),57 +tsugumi (uzurabird),57 +tsubame (minami haruka),57 +tsubaki kureha,57 +tree 13,57 +traditional dress,57 +toyota hiace,57 +touhou (game),57 +totallyiryanic,57 +too mizuguchi,57 +too many in mouth,57 +tomokane,57 +tokugawa sen,57 +toho (kihon ha yappa),57 +todo (masa3373),57 +tobade (tbdfactory),57 +tk (takotsuboya),57 +tin (mixflavor),57 +thousand memories,57 +theobrobine,57 +the doctor,57 +the 3rd super robot wars alpha,57 +tempy (rinsia),57 +tatsuyoshi,57 +tatsuwo,57 +tarantula,57 +tani (tanidesuyo),57 +tana (garyuh-chitai),57 +tamase tama,57 +tamamo cat (lostroom outfit) (fate),57 +tallgeese (lgeesel),57 +taking off,57 +takatsurt,57 +takase haruhiko,57 +takasaki manabu,57 +tai (e3d1p),57 +tachibana wataru (123tsuki),57 +tachibana chata,57 +swarm,57 +suzushina,57 +suzumura kirie,57 +susanghan messenger,57 +sunflower petals,57 +sun zha,57 +sun jing,57 +sumisu (rinsumi),57 +suke akurou,57 +sugimura runa,57 +su (honkai impact),57 +stylized blood,57 +sturkey,57 +stuffed snake,57 +stock pot,57 +stick grenade,57 +steyr tmp,57 +stella glow,57 +star guardian pet,57 +ssumbi,57 +spawn,57 +spam (spamham4506),57 +sovetskaya belorussiya (relaxation stratagem) (azur lane),57 +souffle gamberetto,57 +sotoka rakita,57 +soriya,57 +sorakura shikiji,57 +sonozaki noriko,57 +sk816,57 +site of grace,57 +sirius enjoliao,57 +single scoop,57 +simz,57 +simon blackquill,57 +silicon creature,57 +sig 550,57 +siegmeyer of catarina,57 +shuang ye,57 +showing,57 +short tail,57 +shoron,57 +shizumi satou,57 +shirota mizuki,57 +shiromoca,57 +shiromamekei,57 +shirabe shiki,57 +shiosato jun,57 +shimo (shimo332215),57 +shimazaki mari,57 +shima tokio,57 +shiliuyexiaohei,57 +shiba tomoshibi,57 +shao (shaorouhong),57 +shampoo (cat),57 +sennen suisei,57 +sengoku shinobu,57 +selim spark,57 +seitokaichou-chan (minato ojitan),57 +scup,57 +scorpio milo,57 +scaramouche (wanderer) (genshin impact),57 +sazaki hiyoru,57 +satella (re:zero),57 +sasa ichi,57 +sanada yukimura (hyakka ryouran),57 +salvatore (disgaea),57 +sakura ino,57 +sakkama,57 +sakana (packetsjp),57 +saitou shuka,57 +sagara kazuto,57 +ryou (kimagure),57 +rurisakura,57 +ruiten,57 +royal bitter juice,57 +rock-bomber,57 +ritz malheur,57 +ritence,57 +risu (ritharte),57 +rikudou yura,57 +riko (machikado mazoku),57 +rickshaw,57 +ribbon girl (module),57 +ri-ko,57 +rengoku (first ascension) (fate),57 +rem (death note),57 +relaxing,57 +reisa (blue archive),57 +reiei 8,57 +redauto,57 +red brooch,57 +recipe,57 +rebel pilot,57 +rata (m40929),57 +rascal (n119),57 +rapt (47256),57 +ran (shugo chara!),57 +rakkogawa rinro,57 +raion (inoueraion),57 +rainli,57 +ragnarok origin,57 +qingxin gua yu,57 +pyra (xenoblade) (cosplay),57 +purple blindfold,57 +producer (idolmaster side-m anime),57 +print bandana,57 +pride chicken,57 +pretty mundane,57 +ponpon nun,57 +pltrgst,57 +plasma sword,57 +plait,57 +pitch (kirby),57 +pink shawl,57 +pineco,57 +philia felice,57 +pera,57 +pegasus (sailor moon),57 +peach ornament,57 +patches (from software),57 +patapon,57 +passionlip (fate) (cosplay),57 +parubinko,57 +parasite eve the 3rd birthday,57 +parallela66,57 +paraietta,57 +oversized plant,57 +oto,57 +osiaarah,57 +osatsu,57 +osanai,57 +orchid (arknights),57 +ooki futaba,57 +ono (ohno ha),57 +oneirio,57 +one piece film: gold,57 +olivia (asobi asobase),57 +okii (oh ki ik),57 +ojou (nameo),57 +ohta yuu,57 +odradek,57 +numahana,57 +ntw-20 (the aristocrat experience) (girls' frontline),57 +nozarashi satoru,57 +norfolk (azur lane),57 +noppo (tarstation),57 +noka (noka8103),57 +nod,57 +no undershirt,57 +nn tasu,57 +nishin (nsn 0822),57 +nishijou nanami,57 +nipple leash,57 +nioti,57 +nika (nikasa an),57 +nico o0,57 +nero claudius (formal dress) (fate),57 +neko miyabi (artist),57 +ncontrail (mgax7527),57 +nayutalien,57 +natsume (hidamari sketch),57 +natsu yasai,57 +nanao (skb48 n),57 +nanakusa nazuna (kouji (campus life)),57 +nanahira,57 +namatarou,57 +nalica (animeflux),57 +nakasaki hydra,57 +nakagomiyuki415,57 +najo,57 +nagumo kuu,57 +mushiboy,57 +munak,57 +multiple bracelets,57 +muhamaru yuni,57 +mr.milk caramel,57 +mozuku (mozuuru0323),57 +moyamu,57 +motokazu (dontokodon),57 +mori toshimichi,57 +monster kid (undertale),57 +mono 02,57 +momori,57 +momo tomato,57 +molatoliamu,57 +mogiko,57 +moekon,57 +mochitsuki karen,57 +mochigome (ununquadium),57 +mochi nabe,57 +mizuiro raika,57 +mizuhara hayari,57 +miyoshi yun,57 +miyane aki (radical dash),57 +miwa (ahyuck),57 +mitsukichi,57 +miton-chan (miton (turuyasann)),57 +misumi kouji,57 +mistral (metal gear rising),57 +miru (ormille),57 +mirai ticket,57 +mikoto kei,57 +mihono bourbon (code:glassage) (umamusume),57 +microphone waddle dee,57 +michirutnk,57 +michelle (ooo-anteat),57 +messina (jojo),57 +merryj,57 +memento (sennen sensou aigis),57 +meguno (wokada),57 +mechazawa shin'ichi,57 +matsuo shogo,57 +masaki gaillard,57 +marmot tail,57 +marie antoinette (animejapan 2016) (fate),57 +mao (endro!),57 +manyuu hikenchou,57 +manbagi rumiko,57 +man (trance),57 +mamu t7s,57 +mama (marui shiro),57 +makoto ono,57 +makino nanami,57 +maho (drmaho1),57 +magomago,57 +magi magi magician gal,57 +maeashi,57 +lyxu,57 +luu,57 +lunar wing,57 +lucky keai,57 +lucilius (granblue fantasy),57 +lucia: crimson abyss (punishing: gray raven),57 +lost elle,57 +loss time memory (vocaloid),57 +lipstick mark on thighs,57 +linda b,57 +linaria (granblue fantasy),57 +limha lekan,57 +lily c. sherbet,57 +lilith (ayanami rei),57 +libus,57 +li jianliang,57 +li dailin,57 +leyna koch,57 +lerome,57 +leon fou bartford,57 +lee byung hee,57 +lap pov,57 +lamium (artist),57 +laffey (bunny idol @unmotivated) (azur lane),57 +kyou kara ore wa loli no himo!,57 +kurumi kobayashi,57 +kuroida,57 +kuro namako,57 +kuro (miku plus),57 +kuro (1968),57 +kurata tome,57 +kunogi ai,57 +kracko,57 +krab (fumekrab),57 +kotobuki toro,57 +kotatsu-spirit,57 +konparu uran,57 +kongya,57 +komuro takashi,57 +komazuki (komaworks240),57 +komaru (himouto! umaru-chan) (cosplay),57 +koma yoichi,57 +kojima kirie,57 +kohatsuka,57 +kodou mikoto,57 +koashi mutsumi,57 +kizuna kirameku koi iroha,57 +kizaki emi,57 +kishibe rohan wa ugokanai,57 +kisaragi yaya,57 +kisaragi attention (vocaloid),57 +kiriyama rei,57 +kirihane,57 +kirigakure symbol,57 +kinokko,57 +killing bites,57 +kemonomichi,57 +keldeo (ordinary),57 +keitomato,57 +kazemaru (arknights),57 +kawakami tetsuya,57 +kawa-v,57 +katie (sennen sensou aigis),57 +katakori sugita,57 +katagiri non,57 +kasuteru,57 +kasugano tsubaki,57 +kasoku souchi,57 +kasetsu,57 +kasane,57 +karu (ishiken),57 +kaomoji (sagor00237),57 +kanda momo,57 +kamishiro natsume,57 +kamen rider cross-z,57 +kamato il,57 +kagami matsuri,57 +k-mame,57 +jyouden,57 +jumpsuit pull,57 +jisu lee,57 +jipponwazaari,57 +jinnosuke,57 +jet kimchrea,57 +jariinu (gomasionori),57 +izumi kouyou,57 +iyamato,57 +iwamotochou geinousha,57 +itsuka todoku ano sora ni,57 +island (game),57 +ishida shouya,57 +ishida mitsunari (sengoku musou),57 +ishida mitsunari (sengoku basara),57 +iris (tb33064667),57 +inakoi,57 +inaba-no-kuni-tottori,57 +imura setsuna,57 +imuhata shimuro,57 +imitation black (vocaloid),57 +imatomo mahya,57 +illusion connect,57 +ikasoke (likerm6au),57 +ijuin megumi,57 +how to make sushi (meme),57 +houshiruri,57 +hoshi akari,57 +holding trophy,57 +hokuto shun,57 +hockey stick,57 +hizuki aki,57 +hizaki ryuuta,57 +hino matsuri (osananajimi ni najimitai),57 +hinaki eiji,57 +hime hajime,57 +hilimuc,57 +hilde (counter:side),57 +hikage monogatari,57 +hijikata toshizou (golden kamuy),57 +hijikata keisuke,57 +high school! kimengumi,57 +hibiki wataru,57 +heavily armed high school girls,57 +hcsb,57 +hazamada toshikazu,57 +hatutaro,57 +hattori hanzo uruka,57 +hattifattener,57 +hatsuno xxx,57 +hatena (nazequestion),57 +hassan of serenity (lostroom outfit) (fate),57 +hashimo yuki,57 +haruno (kuromitugozen),57 +harpuia (mega man),57 +harem gain,57 +haregama shiina,57 +hano luno,57 +hanno,57 +hand on headset,57 +hand on another's hat,57 +hanazawa ma,57 +hamura mayu,57 +haitukun,57 +hair mustache,57 +hafuku,57 +h26r,57 +gung,57 +gundam 00p,57 +grayfox,57 +gradient border,57 +gotoo,57 +goe,57 +glaz (rainbow six siege),57 +glamrock freddy,57 +girl friends (manga),57 +gill (street fighter),57 +gastrodon (east),57 +garrote,57 +garl vinland,57 +garasuno,57 +galarian articuno,57 +gal gun (series),57 +gacha (ssaketen),57 +g0ringo,57 +furutaka (azur lane),57 +funeral kimono,57 +fukurou (suga0930),57 +fujiwara no iyozane,57 +fuji izumi,57 +fre,57 +frame (idolmaster),57 +forretress,57 +fomantis,57 +finger in another's navel,57 +figure stand,57 +fight cloud,57 +fig,57 +fancy glasses,57 +f-16 fighting falcon,57 +eye (mememem),57 +european princess,57 +eunos roadster,57 +eternity (last origin),57 +etama quomo,57 +esg (essagna),57 +erodrunky,57 +era (traveling bird),57 +equal (melomelopunch),57 +epee,57 +emori el,57 +emergency exit,57 +elfleda mirjasdottir,57 +ekusa takahito,57 +eitarou (kaminari neko),57 +eisu (eith),57 +effie (street fighter),57 +edwintarm,57 +edo tatsuki,57 +eddie (guilty gear),57 +edamame (food),57 +e-nya,57 +duke of york (warship girls r),57 +drawinglee,57 +drawing on own face,57 +drawdream1025,57 +dororosso,57 +doran (dorannomai),57 +don (street fighter),57 +domas,57 +dokka no kuni no kokuou,57 +djibril (makai tenshi djibril),57 +dirty hands,57 +dio eraclea,57 +dimension-3 (idolmaster),57 +diabolic esper (elsword),57 +denim vest,57 +delichuu,57 +deko isu,57 +defensive wall,57 +deep grand,57 +dbdkitty,57 +dark angel metatron (p&d),57 +daphne ann blake,57 +daijuuji kurou,57 +cyrille (shining force exa),57 +cum on crotch,57 +cuilan,57 +crossette (xenoblade),57 +crewmate (among us) (cosplay),57 +crea dorosera,57 +crawdaunt,57 +cool your head,57 +cloud kingdom,57 +ciela (yuuhagi (amaretto-no-natsu)),57 +chyopeuteu,57 +chuck (harfmoondark),57 +christine (kuro (kuronell)),57 +chocolate on clothes,57 +chloe (sciamano240),57 +chinatsu (suzutsuki kurara),57 +chef kawasaki,57 +charlotte (last origin),57 +char aznable (cosplay),57 +ceruledge,57 +cerberus arts,57 +cat shi,57 +casablanca (azur lane),57 +carrot on stick,57 +carlos oliveira,57 +campanula-star,57 +call of duty 4,57 +caisan,57 +buchou (houkago play),57 +brawly (pokemon),57 +brave fencer musashiden,57 +brasowa,57 +branwen,57 +bonnie (bunny gif),57 +bob (bobtheneet),57 +boardwalk,57 +bloop (archives),57 +binan xian lu,57 +big mouth,57 +benko,57 +beniko,57 +beni-bana,57 +bela dimitrescu,57 +bayonetta (cosplay),57 +bandaged knees,57 +ban (ban62460424),57 +balrog (doukutsu monogatari),57 +ballista (sekaiju),57 +backwards text,57 +azuma mayumi,57 +azibuda,57 +ayase asagi,57 +astolfo monster cosplay (meme),57 +ashida machi,57 +asha,57 +asashio (kancolle) (cosplay),57 +asama (drift in),57 +arjuna (versus) (fate),57 +arihara nanami,57 +argon,57 +architect (nvw model) (girls' frontline),57 +archienemy,57 +arara cocoa,57 +arai kuma,57 +ar nosurge,57 +aqua centolm,57 +aozora no mieru oka,57 +aoyama mio,57 +aonagi ibane,57 +andychen,57 +amunero (kyouka jinrui amunero),57 +amanooni touri,57 +alterlesott,57 +alteil,57 +alpaca hair ornament,57 +alice (fate),57 +akira tooru,57 +akira (usausa),57 +akiki (kiki),57 +akariko,57 +akakokko (niro azarashi),57 +akai homura,57 +aka tonbo (lovetow),57 +airani iofifteen (artist),57 +airagency1005,57 +aia amare,57 +against bookshelf,57 +aether gazer,57 +adarin,57 +academy d.va,57 +abe no seimei (onmyoji),57 +3finger hand,57 +123hamster,57 +114514,57 +00tea,57 +zuikillme,56 +zongzi,56 +zen (jirakun),56 +zawar379,56 +yuutarou,56 +yuurinmoku,56 +yuuko24,56 +yuuki shushu,56 +yuubi,56 +yuneri (azuki akizuki),56 +yumoteliuce,56 +yumeji kiriko,56 +yukimura shinya,56 +yuki onna (ring dream),56 +yui (sky-freedom),56 +you mizuno,56 +yoshizuki iori,56 +yoshiragi,56 +yoshinaga yunosuke,56 +yorihime nao,56 +yomegane,56 +yimu,56 +yashi nagomi,56 +yang zheng yu,56 +yamaguchi yuu (norisu),56 +yamada ayumi,56 +y&k,56 +xiao (ye yiaoxi),56 +xandier59,56 +wire cutters,56 +windshield,56 +wiggling,56 +whois,56 +wes (pokemon),56 +wei li,56 +water masturbation,56 +waribashi-p,56 +wan nyan a la mode!,56 +waist sash,56 +waa! okami,56 +vu (oyavun),56 +void 0,56 +vlfdus 0,56 +virtual clinic,56 +virgo shaka,56 +vesves (sailor moon),56 +v.v.,56 +utsuwa,56 +utsuro atomo,56 +uns (sdsxsoverlord),56 +unory,56 +uno ichi,56 +unizama,56 +unicorn (a dream of pure vows) (azur lane),56 +un'you (kancolle),56 +umekobushi,56 +ueshige suzu,56 +tying tie,56 +two-cat-walk,56 +tweek tweak,56 +tracyton,56 +tracen academy,56 +toyosatomimi no miko (cosplay),56 +touya akira,56 +touwa iyo,56 +tour guide from the underworld,56 +torricelli (azur lane),56 +toranoo,56 +tomohiro (duo),56 +tomo takino,56 +tomcat,56 +tombsakura,56 +tolone (xenogears),56 +tohru (maidragon) (cosplay),56 +toguro otouto,56 +tobe sunaho,56 +tia (4017342),56 +the last supper,56 +tfqr,56 +tezurumozuru,56 +tequila,56 +tepes,56 +tensa zangetsu (bankai),56 +tenpa (tenpa2190),56 +tatsunoko vs capcom,56 +taoru,56 +tanukikouji midori,56 +tam a mat,56 +takitou,56 +takemi kaoru,56 +take mamoru,56 +takatou suzunosuke,56 +takashi (onikukku),56 +takara yukari,56 +takara tooru,56 +takamaru (taka1220),56 +takahashi fuuko,56 +tairitsu (arcaea),56 +taira daichi,56 +tachibana kyouko,56 +super sass,56 +super robot wars destiny,56 +suparu (detteiu),56 +summoner (ragnarok online),56 +sumi otto,56 +sukeban (smg) (blue archive),56 +sugino (patrassyar),56 +subakeye,56 +striped choker,56 +string play spider baby (meme),56 +strawberry slice,56 +spiky-eared pichu,56 +sotsual deliheal,56 +soramame tomu,56 +sonomiya karen,56 +sonken chuubou,56 +sofia (toushinden),56 +soda (sodachuxd),56 +slowpit,56 +sleeping animal,56 +skyloop19,56 +sk tori,56 +siragagaga,56 +single tooth,56 +single hand,56 +silverchariotx,56 +silver legwear,56 +sigsbee (warship girls r),56 +sigma rio,56 +shuten douji (dress for demons) (fate),56 +shuizhanglang,56 +shout lines,56 +shoujo,56 +shouhou-san daisuki teitoku,56 +shoe print,56 +shiun (siun 5513),56 +shiumai,56 +shiromo (pui pui molcar),56 +shiromitsu suzaku,56 +shirofugu,56 +shiro shougun,56 +shirakaba risa,56 +shiny trinity (idolmaster),56 +shingo.,56 +shimotsuki aoi,56 +shimashima (simasima 23),56 +shibasaki erika,56 +shanghai bisu,56 +shadow naoto,56 +shadow hearts from the new world,56 +shachiku-san wa youjo yuurei ni iyasaretai.,56 +shabomu,56 +setou kenji,56 +sentaku nori,56 +senbata-rou,56 +selkiro,56 +seitsuji,56 +seikaisuru kado,56 +seijun (seijun01),56 +sawaki souemon tadayasu,56 +saw (movie),56 +satou rikidou,56 +sasakamakue,56 +sasahara satsuki,56 +sarutobi sasuke,56 +samuel rodrigues,56 +sakurayashiki nisei,56 +sakuramochi usa,56 +sakamoto atsumu,56 +sakakibara kouichi,56 +saikre,56 +saiga-12,56 +saebashi,56 +sadi-chan,56 +saddlebags,56 +ryuuseitai (ensemble stars!),56 +ryo (sora-ryu),56 +rx7649,56 +ruru (phantom world),56 +rungsak sontayanont,56 +royal guard (ragnarok online),56 +roswaal l. mathers,56 +rosenkreuzstilette freudenstachel,56 +romancing saga re;universe,56 +rody (hayama yuu),56 +rock garden,56 +risuo,56 +rio (rio 01),56 +rio.lw,56 +rikizo,56 +riding boar,56 +rico (pico-ba),56 +riccovich,56 +ribbon-trimmed kimono,56 +ren (dears),56 +reiuji utsuho (cosplay),56 +red queen (sword),56 +rec,56 +rantia,56 +ranger (sekaiju),56 +rand (gyee),56 +rafael-m,56 +rabbid,56 +r-e-l-o-a-d,56 +qi maid,56 +q (hololive),56 +q-bee (aaru),56 +python (girls' frontline),56 +puzzle piece hair ornament,56 +purple male underwear,56 +protagonist (tokimemo gs2),56 +prism,56 +premier league,56 +post guild war celebration,56 +poking nose,56 +piscina,56 +pimopi,56 +pillow bite,56 +phosphophyllite (gemstone),56 +phoenix wings,56 +petal censor,56 +perlmuttt,56 +peone (panapana),56 +penny (pokemon),56 +penis growth,56 +pedocchi,56 +peaked lapels,56 +payday (series),56 +patreon,56 +patissier (gindoro),56 +pastel,56 +parupin,56 +party whistle,56 +parfait (hawawa),56 +panjandrum,56 +panda inu,56 +ozma lee,56 +otto (nanoha),56 +ototachibana academy uniform,56 +osananajimi ni najimitai,56 +osaka (city),56 +oreomanko,56 +oreki genya,56 +oota takeshi,56 +oomiya io,56 +oohashi akiko,56 +ono toshihiro,56 +oniku (oniku029029mm),56 +onbin ha,56 +okuwaka tsubomi,56 +okayu (headless),56 +oharu,56 +ogawa shinji (symphogear),56 +oda nobunaga (drifters),56 +ocelot (kemono friends),56 +o-ring strap,56 +nunnun (shunka shuutou),56 +nukunuku,56 +nugi (armenci),56 +nueno meisuke,56 +nstlgie,56 +non (mangaka),56 +noel maresse ascot,56 +niwa makoto,56 +nishisan (mikurosu),56 +nishikawa ari,56 +nishi yasuaki,56 +ninose shizuku,56 +nino (ninouchi irazu),56 +nincada,56 +niking,56 +nijou makoto,56 +niimi haruka (photokano),56 +nightingale (an elegy) (arknights),56 +neuron (exceed),56 +netzach (project moon),56 +neriwasabi,56 +neopets,56 +nekopote,56 +neginoki,56 +neet de otaku na kunoichi to naze ka dousei hajimemashita,56 +nazume mikuru,56 +nazrin (mouse),56 +nausea,56 +natsuno riku,56 +natarle badgiruel,56 +narinn,56 +nao (doublexdutch),56 +nanase-chan ntr!,56 +nameko houshi,56 +nal (nal's pudding),56 +nagura setsuko,56 +nagul,56 +naemperor,56 +mystical power plant,56 +myslot,56 +musha sabu,56 +murasame oshizu,56 +mung (larnec),56 +munashichi,56 +mugino0515,56 +motofumi,56 +morisato keiichi,56 +morgan le fay (chaldea satellite station) (fate),56 +momoiro tunozemi,56 +moco (moco 28),56 +mizuki (ekakichan),56 +miwa konekomaru,56 +mitsugetsu,56 +mishina eikichi,56 +miravi,56 +miranda (wakfu),56 +miraidon,56 +miracle belltier,56 +mipple,56 +minazuki (jo),56 +minazuki (azur lane),56 +minai karte,56 +midori boushi,56 +meso (goma-meso),56 +meso-meso,56 +memories off sorekara,56 +meitarou,56 +mega ampharos,56 +mega altaria,56 +mechagodzilla,56 +mecha shoujo taisen z,56 +mazuka kei,56 +mavis dracula,56 +matsuyama nozomu,56 +matsu kitsune,56 +massager,56 +masollyn,56 +marakasu (chinkasu),56 +manmaru (love live!),56 +mandibuzz,56 +mandalay (boku no hero academia),56 +mame komari,56 +maisaki,56 +mai (future) (dragon ball),56 +magi in wanchin basilica,56 +maco (macoillust),56 +machico maki,56 +lynne,56 +lyn (summer) (fire emblem),56 +luxu,56 +luxiem,56 +littlecloudie,56 +litsilium,56 +lisanna strauss,56 +lingcod dayu,56 +lime-iro senkitan,56 +lillian ljungstrom,56 +leni milchstrasse,56 +lemon t,56 +lemo (dragon ball),56 +legolas,56 +lego brick,56 +legacy zechs,56 +leg brace,56 +laura (fire emblem),56 +larry butz,56 +lambo,56 +kyoro (cothurnus),56 +kyaroru,56 +kuutamo,56 +kuuki (rakam),56 +kurumiya (krmy p),56 +kuroshiro kanae,56 +kuroo (project apricot),56 +kurono kuro,56 +kurokona,56 +kurohikage,56 +kuro guren,56 +kuno touya,56 +kunitarou-art,56 +kumbhira (summer) (granblue fantasy),56 +krs (kqrqsi),56 +koyomi (kamen rider wizard),56 +konohana,56 +komaki midori,56 +komadera,56 +kokonoe tamaki,56 +koeri,56 +kodama naoko,56 +koda,56 +knight (7th dragon),56 +km2o4,56 +kjelle (fire emblem),56 +kittaka kaname,56 +kira kazuki,56 +kimoko,56 +kimofest,56 +kieeyo,56 +kiddy girl-and,56 +kidanieru,56 +kiasa,56 +key earrings,56 +ket-c,56 +keshigomu,56 +keropii,56 +kelp,56 +keita (kta0),56 +kedama (ughugjydthjdf),56 +kazuki kotobuki,56 +kawamura kousuke,56 +kawakabe momoka,56 +kawai honoka,56 +kawahara chisato,56 +katejina loos,56 +katakata un,56 +kashiwagi chizuru,56 +karin (viper),56 +kareha aki,56 +kappamaru,56 +kanzaki (kusomiso),56 +kanimura ebio,56 +kanbayashi shiori,56 +kanae (sekaiju),56 +kamen rider x (series),56 +kakiikada,56 +kai aki,56 +kahis (sweetbriar),56 +kagayama hajime,56 +kadokawa,56 +junkyard dog mk iii,56 +juliana eberhardt,56 +jubei-chan,56 +ju hyeon-mi,56 +jonejung,56 +john (ichigo mashimaro),56 +joe (sk8),56 +joaowbenfica,56 +jin hayato,56 +jiete,56 +jian huang,56 +jerry (tom and jerry),56 +jeran (ggokd),56 +jeanne d'arc (saintess of the sea) (azur lane),56 +javier estrada,56 +jas (annkoromochi),56 +iwami kyuuto,56 +isuzu (an icy cat),56 +istina (bibliosmia) (arknights),56 +issign,56 +isinose (ilxodidli),56 +ishigami senkuu,56 +ishida atsuko,56 +irokiiro,56 +irikawa,56 +io (summer) (princess connect!),56 +inuue kiyu,56 +introvert-kun,56 +indica,56 +in refrigerator,56 +ilias,56 +ihara saeko,56 +iduhara jugo,56 +ido romeko,56 +huacai,56 +hsmoji,56 +hozumi sousei,56 +hoshimiya mery,56 +hoplite,56 +hooded top,56 +honjou nia,56 +hongcha (roels),56 +homei (honkai impact),56 +holly (monster farm),56 +hitoyoshi zenkichi,56 +hiss,56 +hisany-spacecrayon,56 +hiryuu (kugelcruor),56 +hirano toshihiro,56 +hira-san,56 +hino mamori,56 +hino eiji,56 +hinata keiichi,56 +hinahara hajime,56 +himo,56 +himeka chiyako,56 +hikyou tanken fam & ihrie,56 +hi no tori,56 +herikawa koishi,56 +heidern,56 +hearthstone,56 +healer (disgaea),56 +hazuki mizuho,56 +hayashida airi,56 +hatsune miku expo,56 +hate shinaku aoi kono sora no shita de....,56 +hasebe yutaka,56 +haruoto alice gram,56 +harada isuka,56 +hanako (idolmaster),56 +hanagaki takemichi,56 +hakumaiya,56 +hako reeema,56 +haigure pose,56 +hachiya,56 +hacco mayu,56 +guo582,56 +gundam dynames,56 +gundam double x,56 +gundam breaker mobile,56 +gridley (azur lane),56 +green lantern,56 +gouma hyudor,56 +gold gloves,56 +gogeyama,56 +glowing heart,56 +ginka sima,56 +gelgoog,56 +gdd (vkejr702),56 +gas pump,56 +gao kawa,56 +g-clef (suite precure),56 +fylus,56 +fushimi saruhiko,56 +fumetsu no anata e,56 +fukua (skullgirls),56 +fujiwara shun'ichi,56 +fujimaru ritsuka (male) (chaldea combat uniform),56 +fujikura yuu,56 +fujikiri yana,56 +freckles-san (houkago play),56 +franlol,56 +franky franklin,56 +fran (tensei shitara ken deshita),56 +fox (trickster),56 +forneus 0,56 +foreskin biting,56 +food-themed bag,56 +fireflysnow,56 +finger twirl,56 +fina (eternal arcadia),56 +fideo aldena,56 +fengyin shici guozi,56 +feesh,56 +evil twin lil-la,56 +eureka (eureka-0075),56 +etheldreda,56 +ero ragnarok offline,56 +enbizaka no shitateya (vocaloid),56 +ena (enaa97),56 +emo (mikan),56 +electrike,56 +electric razor,56 +ekaki kukuma,56 +ekaapetto,56 +effort star,56 +ecien,56 +e neko,56 +dutch girl (last origin),56 +dun stallion (fate),56 +drinking blood,56 +drift (transformers),56 +dreamusun,56 +dreambig,56 +dr.wolf,56 +dotted half note,56 +dix-neuf,56 +dismassd,56 +din,56 +diluc (red dead of night) (genshin impact),56 +dice earrings,56 +destroid,56 +den (fma),56 +deltoids,56 +deeple,56 +dawn02,56 +darth wooser,56 +dark valkyrie (p&d),56 +dama,56 +dahlia,56 +dagger (fizrotart),56 +cyannism,56 +cwind,56 +crestren,56 +cressey (porforever),56 +cream (stand),56 +comet (azur lane),56 +collared cloak,56 +clown (ragnarok online),56 +cleanerjay,56 +cinder block,56 +chris armin,56 +chouhi ekitoku,56 +chocolate statue,56 +chocolate fountain,56 +chirumakuro,56 +chinako,56 +child abuse,56 +chika (toumei kousoku),56 +chiizu namasu,56 +cheetah,56 +chapman's zebra (kemono friends),56 +chabasu,56 +catapult,56 +castella (food),56 +caribun,56 +can can bunny,56 +campione!,56 +bushiroad,56 +bunzou,56 +bunny cutout,56 +broken tooth,56 +boyd (fire emblem),56 +borr,56 +book stand,56 +boogiepop (character),56 +boltund,56 +bloodcatblack,56 +blood on axe,56 +bleeding from forehead,56 +blanket grab,56 +black delmo leader,56 +black burakku,56 +black-tailed gull (kemono friends),56 +binware,56 +big daddy,56 +beretta px4,56 +benten,56 +battle standard,56 +baseness,56 +barn owl,56 +barack obama,56 +banana fish,56 +bakuten shoot beyblade,56 +baek changpo,56 +bad pawoo id,56 +bad newgrounds id,56 +azu ankoowl,56 +az0212,56 +ayasato karen,56 +ayasaki yuu,56 +axis deer (kemono friends),56 +axel almer,56 +awa suna,56 +aubrey (neal d. anderson),56 +asisuki,56 +aruse yuushi,56 +arima kana,56 +appleseed (appleseed art),56 +aozora stripe,56 +aoi umi no tristia,56 +anko kinako,56 +animahakim,56 +amatsuka fubuki,56 +amabie,56 +aloe (ishuzoku reviewers),56 +alien vs. predator,56 +alice mana,56 +album name,56 +aladdin (character),56 +akito (ao's club),56 +akira (otokuyou),56 +akino irori,56 +aka-kabuto no gema,56 +ajirui,56 +ags (last origin),56 +agnidevi,56 +afterschool school idol (love live!),56 +advance wars: dual strike,56 +adultonly,56 +acfun,56 +aatrox,56 +@ichigo,56 +9a-91 (bullets cafe) (girls' frontline),56 +7mb yut,56 +467 (artist),56 +3gs,56 +04sora40,56 ++1 (yakusoku0722),56 +zzz (orchid-dale),55 +zuowen,55 +zouzou,55 +zhean li,55 +zero130,55 +ze (0enmaitake),55 +z.i,55 +z-move trainer pose,55 +yuufuushi,55 +yuta0115,55 +yurina (rune (pixiv 25170019)),55 +yurara (aroma42enola),55 +yugami gooshu,55 +yue yue1102,55 +yu-chan,55 +yowaneko,55 +youko (inukami),55 +you died,55 +yotubawatane,55 +yotsuba (kyokugen dasshutsu),55 +yoshitsuki,55 +yoshino (mfmfpng),55 +yoshiki (bpmaxm),55 +yoggi (stretchmen),55 +yeyuanqianqianqian,55 +yawai tofu,55 +yasu (yossy),55 +yanngoto,55 +yamato (naruto),55 +yamada chickenko,55 +yakusuke,55 +yakumo mikage,55 +xin lan deng,55 +xiao ma,55 +x (manga),55 +x95 (girls' frontline),55 +x6suke,55 +world of darkness,55 +woogi (duguddnr),55 +white base,55 +wen-m,55 +webcounter,55 +web (cookie),55 +watayuki,55 +washtub,55 +wanmei shijie,55 +waichi,55 +viktorf,55 +vikpie,55 +vibrator in leg garter,55 +verus,55 +vertical-striped apron,55 +ventricosus,55 +vent (mega man),55 +vanilmirth (ragnarok online),55 +uxu,55 +ushinomiya,55 +urikurage,55 +unown y,55 +unown u,55 +unown f,55 +umori hinako,55 +umi (pixiv6861961),55 +umetsu yukinori,55 +uketsuke succubus (konosuba),55 +ufo (kirby),55 +u-min,55 +tyler garden,55 +two-tone scarf,55 +tucking in,55 +tsumidango,55 +tsukimura (d24f4z8j3t),55 +tsukasa tsubasa,55 +tsukasa kinako,55 +tsuchimikado natsume,55 +touhou unreal mahjong,55 +tottsuman,55 +torn sarong,55 +toriko no shimai ~midara ni moteasobareru unmei no mayoigo~,55 +toriko (hatoriko),55 +tori (otori),55 +top-exerou,55 +toomi maya,55 +tongue suck,55 +tomo ro,55 +tkln,55 +tinkle bell,55 +tied to stake,55 +tick! tack!,55 +through ground,55 +throh,55 +tenkasu (aster4545),55 +teng zhai zi,55 +teisuu,55 +tefu,55 +tea/pot,55 +taut pants,55 +tatsuno toshiko,55 +tarou (user tpmh7442),55 +tapu fini,55 +tanakahazime,55 +tamori tadaji,55 +tall crown,55 +takemoto uruka,55 +takasakiyama monmon,55 +takanashi minato,55 +tairyou-bata,55 +tachiuo (arines),55 +t-elos re:,55 +syuuen,55 +syrup (yes! precure 5),55 +symboli kris s (umamusume),55 +swain (league of legends),55 +suzushiro mizuki,55 +suzumori asuka,55 +surock,55 +surgeon,55 +superstar (granblue fantasy),55 +super robot wars original generation gaiden,55 +sumomo kpa,55 +summergoat,55 +sumi wo hakuneko,55 +sukurizo!,55 +subaru (794829485),55 +string lights,55 +strawberry tart,55 +stoned,55 +stella (sound horizon),55 +stealth handjob,55 +starbottle,55 +staravia,55 +spoken symbol,55 +spider (minecraft),55 +sparklenaut,55 +soul,55 +sonya (schan),55 +sonosakiburi,55 +somnium,55 +soletta orihime,55 +solatorobo,55 +sofue (girls und panzer),55 +skunk girl,55 +sitting on log,55 +single greave,55 +sin (btc86amme),55 +silmeria valkyrie,55 +siku199,55 +siegfried (sword),55 +shura (granblue fantasy),55 +shota-kun (sky-freedom),55 +shorts rolled up,55 +shooting range,55 +shokei shoujo no virgin road,55 +shisotex,55 +shiroe adele,55 +shirakawa mey,55 +shio butter (obscurityonline),55 +shinjitsu no kuchi,55 +shining world,55 +shimano,55 +shima 16bit,55 +shilleka,55 +shikke gnsn,55 +shiba 0,55 +shano-pirika,55 +shanaharuhi,55 +senoo aoi,55 +scissors lise,55 +schach nouera,55 +sate,55 +saratoga (seven seas of rest) (azur lane),55 +sano souichi,55 +sanguu mitsuba,55 +sakutake (ue3sayu),55 +sakuraba chiyo,55 +sakotach,55 +sakiko (gekiama),55 +saionji usagi,55 +saiko67,55 +saijou karin,55 +sae (renkyuuake),55 +s16xue,55 +ryougi shiki (third ascension),55 +rynn (acerailgun),55 +ruthenium77's character,55 +roy focker,55 +rotom (heat),55 +ronna,55 +rodan,55 +robert kim,55 +rinea (fire emblem),55 +rin (yukameiko),55 +ribbon-trimmed shorts,55 +return of the mount hua sect,55 +resurreccion,55 +resident evil revelations 2,55 +repikinoko,55 +rensouhou-chan (cosplay),55 +ren (fragile),55 +removable censorship,55 +reku hisui,55 +receptionist girl (amagi shino),55 +raruru,55 +rakuko,55 +rakavka,55 +raiz art works,55 +raiden mei (crimson impulse),55 +raichi (litchixlitchi),55 +rai32019,55 +pururun,55 +pumpkin print,55 +proton pack,55 +pretz,55 +potato house,55 +porqueloin,55 +poo (donkan gokko),55 +pomeko,55 +poking self,55 +plucking petals,55 +pixiescout,55 +pikiru,55 +piennamekuzi,55 +physisyoon,55 +photokinesis,55 +phony (cevio),55 +peshita,55 +pentagram necklace,55 +peas (peas0125),55 +pandamonium,55 +palpitoad,55 +pakuchii,55 +pajamas lift,55 +painter-lhb,55 +p-51 mustang,55 +overalls pull,55 +ourobunny,55 +otsunabe (naabe delta),55 +oshou (o shou),55 +orebalgum,55 +oracle (shinkai no valkyrie),55 +opiu,55 +ooarai marine tower,55 +ontake2009,55 +onodera haru,55 +one (drag-on dragoon),55 +omizu chihiro,55 +olympic rings,55 +olmatown,55 +okano hajime,55 +okamoto miyu,55 +okaasan to issho,55 +ohtagaki yasuo,55 +ohgaki m,55 +ogawa hidari,55 +ogarasu,55 +nuko 0108,55 +nuancho,55 +nosetissue,55 +nomio,55 +noja,55 +noir (4chan),55 +noel vermillion (cosplay),55 +noa ikeda,55 +nishizaki eimu,55 +neo-traditionalism of japan,55 +natsuyuki,55 +natsume suzuri,55 +natsuki yuu (minamino tsuki),55 +natasha (pommier),55 +natalia kaminski,55 +narukawa hime,55 +narukami arashi,55 +naonakamura,55 +nanamo ul namo,55 +nanafuton,55 +nan (gokurou),55 +nakamura kana,55 +nakamachi kana,55 +nakahara kaihei,55 +nai (daiduolaoge),55 +nagiichi,55 +naganami (azur lane),55 +nadir,55 +nachetanya loei piena augustra,55 +mutsumi326,55 +musse egret,55 +mushi baibai,55 +mukkuru,55 +mukamo (inujita),55 +mukai (ojimashu),55 +moyashi mou2,55 +movie camera,55 +mos,55 +morning wood,55 +morimura konoha,55 +momo (fruits box),55 +mofuko,55 +mizumidori,55 +miyako (kuavera),55 +miura (rnd.jpg),55 +minatasiro,55 +minami machi,55 +miltiades malachite,55 +miku (yuuki (yuyuki000)),55 +mikoko1,55 +mike zakarius,55 +mikaze oto,55 +mijuku dreamer,55 +miiko (somnolent),55 +mifuyu (princess connect!),55 +michalis (fire emblem),55 +mezzo (orbanga21),55 +meron nouka,55 +megumoke,55 +megafaiarou (talonflame 810),55 +mayosuke,55 +maya (kancolle) (cosplay),55 +mawashi,55 +matsunaga ayane,55 +matsuda tsubaki,55 +matsubara aoi,55 +matantei loki ragnarok,55 +mash kyrielight (grand new year),55 +mary (14476764),55 +marudeningen,55 +marshadow (gloom),55 +marneko,55 +mario & luigi rpg,55 +marian e. carl,55 +marcy dog,55 +mandragoria,55 +manabe yoshihisa,55 +mamoru-kun ni megami no shukufuku wo!,55 +mameko,55 +makoto nanaya (cosplay),55 +makino yuna,55 +making faces,55 +maiko (setllon),55 +mahkn,55 +mahiro takeumi,55 +magnolia eclair ii,55 +magnhild,55 +mac 4229,55 +m3 (gardevoir m3),55 +luozhou pile,55 +luminous arc 2,55 +lorein,55 +lombre,55 +lira mist,55 +lipstick mark on leg,55 +ling huanxiang,55 +lieserl albert einstein,55 +li chestnuts,55 +lexington (azur lane),55 +lenxiao,55 +lazy,55 +lakshmi (p&d),55 +lace garter belt,55 +l'opiniatre (azur lane),55 +kyosu,55 +kuzu no honkai,55 +kuz,55 +kusaka kabane,55 +kuroneko sakon,55 +kurokiba ryou,55 +kurokage,55 +kuroinyan,55 +kurohiko,55 +kuro (hitsugi katsugi no kuro),55 +kumoya yukio,55 +kumada kazushi,55 +kuma (kumahoihoi),55 +kujou tsukiyo,55 +kousuke0912,55 +koukou (climacool),55 +kouchuu,55 +koube shio,55 +kotomi (happy colors),55 +koshimizu takayuki,55 +kono sekai no katasumi ni,55 +konami mecha musume,55 +koko (oyasuminasai),55 +koito (bowstringsmall),55 +kobeni,55 +koakuma teeri to kyuuseishu!?,55 +ko-yuyuko (shin'en),55 +kjech,55 +kiyosumi hurricane,55 +kiyohara hiro,55 +kivat-bat iii,55 +kissing thigh,55 +kiseki raki,55 +kisara (engage kiss),55 +kisaki mio,55 +kiryuu kazuma (cosplay),55 +kiriya (aprikose 0312),55 +kirishima eriko,55 +kirigaya touko,55 +kirie (kouya no kotobuki hikoutai),55 +kinokoutarou,55 +kimidori (kimidoriri),55 +killy doodle,55 +kikistark,55 +khoaphan96,55 +keruberosu-a,55 +kepra iii,55 +kenao,55 +keikyu (tiltedcube),55 +keijou (cave),55 +kei (hidden),55 +kei (fortune),55 +kei (dirty pair flash),55 +kawamoto momo,55 +katahira (hiyama),55 +kashiwagi kaede,55 +karva (granblue fantasy),55 +kangoku (series),55 +kamuraaa 615,55 +kamukamu6392,55 +kamiura,55 +kamisato ayaka (heytea),55 +kakeami,55 +kajatony,55 +kagura tsukune,55 +kagura ittou,55 +kagi,55 +ka ei volltis,55 +ka1se1,55 +k-ta,55 +jyon104,55 +junda,55 +jum (parallel parasol),55 +jomae gen,55 +jm1990henan,55 +jaws (movie),55 +izumo (azur lane),55 +itunes,55 +ittumozzz,55 +ishihara megumi,55 +isara mao,55 +iowa (kancolle) (cosplay),55 +initial f,55 +inakami,55 +inagaki minami,55 +in hat,55 +illumise,55 +ichiroku,55 +ichigo-chan (mignon),55 +ice pick,55 +hunewearl,55 +howan (show by rock!!),55 +houkai (collapse illust),55 +hotaruika niji,55 +hori airi,55 +honon,55 +honda hiroto,55 +hombre tigre (housamo),55 +holy ring,55 +hollow (bleach),55 +holding laptop,55 +hiroshi (2443303124),55 +hiratsuka tomoya,55 +hirari,55 +hinomars19,55 +hinata fuyuki,55 +hinaki,55 +hinai paulette,55 +hinagiku-19,55 +himemiya anthy (cosplay),55 +hikari hachi,55 +hijiri-tukasa,55 +higa kanata,55 +hieda no are,55 +hideousbeing,55 +hey xander,55 +hermione (pure white angel) (azur lane),55 +helloet11,55 +hej (mugmnm51),55 +heibaise jiangshi,55 +hazuki (pacco),55 +hattori mitsuru,55 +hat on chest,55 +hasu (zatsugami),55 +harunori (hrnrx),55 +harpy (puyopuyo),55 +harpy (nukomasu),55 +happosai,55 +happiness lilys,55 +hanepochi,55 +hanaarts,55 +hami (lvct),55 +hamanashi (trapiorra),55 +hamalu,55 +halulu,55 +hakuri,55 +ha.skr (hasukara),55 +gyuunyuu bin,55 +gwenpool,55 +guild wars,55 +guardian (breath of the wild),55 +grilled corn,55 +grey sash,55 +grete m. gollob,55 +greenteaneko-chan,55 +goskt10trr,55 +goma irasuto,55 +golden wings,55 +gnsn aile022,55 +glowing crystal,55 +gloucester (azur lane),55 +ginjuuji,55 +gina boyd,55 +ghdwid,55 +gg amang,55 +geokinesis,55 +gensuke (ryun),55 +g-tz,55 +fw 190,55 +fuyusuke (hz+),55 +fuuro (johnsonwade),55 +fushimi inari taisha,55 +furuya jun,55 +fumi (fumifumi-graffiti),55 +fujiyama takashi,55 +fujiwara no mokou (phoenix),55 +fujisaki kaon,55 +fujieda honami,55 +fugtrup,55 +fudeyama (fudeco),55 +fubuki (warship girls r),55 +front to back,55 +formidable (azur lane) (cosplay),55 +foot kabedon,55 +floor ni maou ga imasu,55 +flash tomo,55 +fire truck,55 +female builder (dqb2),55 +fedsnk,55 +fatal frame 1,55 +fast-r,55 +fantastic four,55 +exploud,55 +eva mashiro,55 +eva 08,55 +eungi,55 +eternal tiare,55 +ereshkigal (bitter sweet) (fate),55 +epitaph (1122),55 +eon (greencastle),55 +eno konoe,55 +engineer nemo (fate),55 +en (dorohedoro),55 +elf no radral,55 +elec bobo,55 +ekidona,55 +eds,55 +edoara,55 +edo phoenix,55 +edea cluster,55 +durant,55 +dune (artist),55 +duke tougou,55 +duel academy uniform (yu-gi-oh! 5d's),55 +drogoth,55 +dream c club zero,55 +dramatica,55 +dragon ball z kami to kami,55 +dragalge,55 +dorok,55 +doll house,55 +dokonjou gaeru,55 +dmyotic,55 +dmitriy samarkin,55 +djmn c,55 +diaodiao,55 +deroo,55 +delta zone,55 +delf,55 +deany,55 +darkavey,55 +dai yasude,55 +daenerys targaryen,55 +d. joko,55 +cyaneko,55 +cuisses,55 +crymsie,55 +corsetman,55 +cop craft: dragnet mirage reloaded,55 +connor (detroit),55 +condom in hair,55 +come at me bro,55 +class number,55 +cinnamon (mega man),55 +christine garland,55 +chong feigiap,55 +chokuro,55 +chitose rin,55 +chikage (blackclownery),55 +chihong de tianshi,55 +chihaya (roshin),55 +chiden,55 +cheery pink,55 +charybdis (red chamber of healing) (azur lane),55 +chargeman ken!,55 +chanosuke,55 +chamuring,55 +chabashira sae,55 +cglange,55 +cete (controllingtime),55 +celestea tera,55 +cat food (vocaloid),55 +cardiac arrest,55 +canarinu,55 +camping chair,55 +c3 piyo,55 +burning clothes,55 +btooom!,55 +brown twintails girl (mdf an),55 +borusen,55 +bonple military uniform,55 +bon,55 +boleum (dgrpluna),55 +bokutachi no remake,55 +bokuden,55 +boku no pico,55 +blue armband,55 +blu-ray,55 +blade arcus from shining,55 +black getter,55 +big dipper,55 +bianyuanqishi,55 +bh001,55 +beta (muvluv),55 +berusa (berutoo),55 +berserker rage,55 +berserker (final fantasy),55 +benedia,55 +belfast (azur lane) (cosplay),55 +bechu,55 +barbariana (last origin),55 +barachan,55 +bangsutur,55 +balusah,55 +bacho,55 +b3,55 +azumaya (miyui001),55 +azazel ameri,55 +ayamori mimi,55 +ayaki d,55 +asuka 120 percent,55 +astesia (frlibre on the palace) (arknights),55 +ash blossom & joyous spring,55 +asana,55 +asahina akane (nijisanji),55 +asagiri kogen,55 +artemisia,55 +arihara tsubasa,55 +arigiep,55 +arigato usagi,55 +areadbhar (fire emblem),55 +aqua sclera,55 +aoppoi oto,55 +aoiro 050,55 +aodu fumiyoshi,55 +anubis (surio),55 +another rider (zi-o),55 +ann blyth,55 +anchovy (girls und panzer) (cosplay),55 +amy limiette,55 +among us eyes (meme),55 +amira,55 +amiba00,55 +ami dixie,55 +amekasaikuta,55 +amayo thranana,55 +amamiya kabosu,55 +amagimei,55 +alto clef,55 +alps no shoujo heidi,55 +almond,55 +alha,55 +alcd,55 +akutare (disgaea),55 +aki sakura,55 +akehoshi subaru,55 +akaya shiki,55 +ajahweea,55 +aizawa masaya,55 +aisa (micuma),55 +airi (quilt),55 +aino (acilealaulica),55 +aimai (luckyfive),55 +ailiner7060,55 +ailane (show by rock!!),55 +aihara yukino,55 +aho girl,55 +adilisia lenn mathers,55 +adapted weapon,55 +a-plug,55 +.52 gal (splatoon),55 +"""rouhou"" ore no iinazuke ni natta jimiko ie dewa kawaii shikanai.",55 +zundacroquette,54 +zukan (db tyoutyo),54 +zouhyou (at4190),54 +zodiac 4 (sekaiju),54 +zodd (berserk),54 +zeni,54 +zen (raspberry),54 +zanasta0810,54 +zacian (hero),54 +yuyake hino,54 +yuushoku,54 +yuukauta,54 +yuu (asaiyuji),54 +yutou (yutou75),54 +yurikawa saki,54 +yumemo,54 +yukimura usagi,54 +yukimaru,54 +yukidaruma718,54 +yukibi (ykb),54 +yuiazu (unit),54 +yuhuan,54 +yucco kxoxc,54 +yp (pypy 5 ),54 +youmicitrustea,54 +yoshi egg,54 +yoroiden samurai troopers,54 +yoi (207342),54 +yatsu seisakusho,54 +yasuhiko yoshikazu,54 +yanazuri (kimossari337),54 +yanazuki,54 +yanagi yagiaji,54 +yamori (stom),54 +yamatogawa,54 +yamada naoko (kodamayanao),54 +yaeno nadeshiko,54 +yadone kanna,54 +xubai,54 +xinta,54 +xiao guan (headdress),54 +x drake,54 +world flags,54 +won (toufunokado),54 +wind7626,54 +wikipe-tan,54 +wickellia,54 +whitebear,54 +whisking,54 +wheatley,54 +wet dream,54 +weien,54 +watanabe saki,54 +warlock 2 (sekaiju),54 +war of genesis,54 +wanko to kurasou,54 +w (fugue) (arknights),54 +volga (azur lane),54 +victor (tama e akira),54 +verse,54 +vergil mon,54 +venus rumble,54 +vapors,54 +van fanel,54 +vampire princess miyu,54 +vaike (fire emblem),54 +usami wataru,54 +uououoon,54 +unown m,54 +uni (reborn),54 +ujuju,54 +uchuu kyoudai,54 +u0rei,54 +tsukinami kasumi,54 +tsukamori shuuji,54 +tsuduki-chan,54 +tsuchinoko (muni muni),54 +triple amputee,54 +transformers energon,54 +toyosatomimi no miko (owl),54 +touhou ayaria epic,54 +tonegawa anju,54 +tonayon,54 +tomte (housamo),54 +tomoyo kai,54 +tomo (princess connect!),54 +tom (tom and jerry),54 +tokiwa nanaka,54 +togawa mayuu,54 +todoroki sora,54 +tobi (pixiv41237754),54 +tilia (ar tonelico),54 +tiki (pangya),54 +thedurianart,54 +the tentacle professor,54 +the magician (tarot),54 +the last story,54 +tevit (tevit15),54 +testament (fate),54 +terufu-chan,54 +tendosora,54 +taro (honyarara00),54 +tange sakura,54 +tanaka yoshitake,54 +tanaka keiko,54 +tales of the world radiant mythology 3,54 +taki rentaro,54 +takanashi kiara (phoenix),54 +tajima nao,54 +taiga hiroyuki,54 +taichi (tomo),54 +tacshojosora,54 +suzaku (kemono friends),54 +super robot wars w,54 +super robot wars og moon dwellers,54 +super robot wars dd,54 +sukekiyo (skky 0),54 +suee,54 +subaru (.hack//),54 +striped underwear,54 +straight cougar,54 +stechkin (girls' frontline),54 +star wars: return of the jedi,54 +star guardian miss fortune,54 +star block,54 +spot (arknights),54 +spike (my little pony),54 +soviet pioneer,54 +soubriquetrouge,54 +sonogami rinne,54 +sonic the hedgehog (cosplay),54 +soleil (soleilmtfbwy03),54 +solarisu,54 +sohaya-no-tsurugi,54 +smooooch,54 +smoked cheese,54 +ska.harumi,54 +sidelocks tied back,54 +shuukenyuu,54 +shuru y,54 +shrek (series),54 +shourin bonzu,54 +shizuko hideyoshi,54 +shishamo (masato k),54 +shishamo@,54 +shiromaru (maniado),54 +shiro (fire emblem),54 +shiramine (srmn09),54 +shinozaki akina,54 +shikajima shika,54 +shigure (blue archive),54 +shibayuki,54 +shibata mizuki,54 +shiba hibino,54 +shenmue iii,54 +shelgon,54 +sheep hat,54 +shanoa (vtuber),54 +shadow moon,54 +sero3eta,54 +sengoku blade,54 +selene (pokemon) (cosplay),54 +seisen no iberia,54 +seirei no moribito,54 +sea lion,54 +sclera (asteroid ill),54 +sawch cls,54 +sauropod,54 +satonaka chie (ooya-san wa shishunki),54 +sara (jewelpet twinkle),54 +sapling,54 +sangyou haikibutsu a,54 +san sami,54 +sally whitemane,54 +sakuru,54 +sakurada hane,54 +sakura ai ke,54 +sakou yukie,54 +sakisato kiriko,54 +sakanaokashi,54 +saiga tokihito,54 +saekiya sabou,54 +sabiirodoramu,54 +ryuuki garyuu,54 +rx hts,54 +rukito,54 +rosettastone,54 +rosa (pokemon) (cosplay),54 +rori chuushin,54 +robbie the rabbit,54 +rinna mayfield,54 +ring fit trainee (female),54 +riko201008,54 +rick g earth,54 +reticule,54 +resident evil code: veronica,54 +reizoku ichiba,54 +rei (rei 9146),54 +rayn,54 +raymon,54 +rakisuto,54 +raika grace,54 +raida (j5einmnjp3r49k6),54 +rabittofaa,54 +raayu (0u rayu),54 +ra-pen,54 +quickie,54 +qixiong ruqun,54 +qb 516,54 +putty (phantom brave),54 +pudding (tonpuu),54 +princess wriggle,54 +princess maker (series),54 +pororokka (macareo),54 +pool monitor part-timer (dytm),54 +pooka (odin sphere),54 +poni (rito),54 +pollenoxide,54 +piroaki,54 +pinku (vtuber),54 +pink floyd,54 +pin.x,54 +picrew,54 +photographer,54 +phoebe (granblue fantasy),54 +pharaoh,54 +perfect blue,54 +pencil sharpener,54 +peagunz,54 +peachette,54 +pat attackerman,54 +parusu (ehyfhugj),54 +pa-15 (alluring larkspur) (girls' frontline),54 +ouka (.hack//),54 +otter kawauso,54 +otoshiro noel,54 +osomatsu (nanameno),54 +oscar (fire emblem),54 +osanai shiina,54 +oridays,54 +oregano (olgn eao),54 +orchid mantis,54 +orange tabard,54 +ooya-san wa shishunki,54 +oobayashi mori,54 +oniichan no koto nanka zenzen suki janain dakara ne!!,54 +on (isk1812),54 +omega rei,54 +olwen (fire emblem),54 +okino ryuuto,54 +official alternate hair color,54 +odmised,54 +obvious statement,54 +obsidian slasher,54 +nyantan,54 +nyaa28,54 +nuwanko,54 +nura rikuo,54 +nugaa,54 +norah bright,54 +nonaka kurumi,54 +noel cerquetti,54 +noah (sound voltex),54 +no earrings,54 +nishikawa honami,54 +nishijima ren,54 +nina (ninageya),54 +nes,54 +nemubusoku,54 +nemu (isaya),54 +nemomo,54 +nemari (nemaru 0907),54 +nekohuman221 (tsun221),54 +nectar,54 +necro (street fighter),54 +nearl (shimmering dew) (arknights),54 +nazuna shizuku,54 +nayuko,54 +navigator (alchemy stars),54 +nattou mazeo,54 +natsukimonou,54 +narumi tsubame,54 +naru (wish field),54 +nansen ichimonji,54 +nankotsu,54 +nanakase yashiro,54 +naked pumpkin,54 +nakamori aoko,54 +nakajima nishiki,54 +naga u-chan,54 +nachiya,54 +nabe (ingenmame),54 +muwa12,54 +mustadio bunansa,54 +munehiro (21plus),54 +multico,54 +morimi saki,54 +mori marimo,54 +moratorian,54 +mope,54 +moong gya,54 +moon presence,54 +monokuro (snog),54 +monjiro (sorobochi),54 +mongarit,54 +monarch butterfly,54 +mona (shingeki no bahamut),54 +momoiro oji,54 +mola mola,54 +moetarou,54 +mode (mode-n),54 +mochi (m0chi0000),54 +moccy,54 +mnemosyne,54 +mk (lazymk),54 +mizutani shizuku,54 +mizuryuland,54 +miyuu,54 +miyadeguchi mizuchi,54 +miyabi tsuzuru,54 +missing finger,54 +mishima kazumi,54 +minyo,54 +minkye,54 +ming (mg),54 +minamino souta,54 +midori no makibaoo,54 +miami-chan (ryusei hashida),54 +mgmg 1012,54 +meteor (bard's holiday) (arknights),54 +messier number,54 +merry (grimgar),54 +menjou hare,54 +meiji milk chocolate,54 +medemoisellecu,54 +mayu (kaikan change),54 +max (pangya),54 +maud,54 +matsubara hidenori,54 +matchlock,54 +mascara wand,54 +mark kruger,54 +marchen awakens romance,54 +maple (cyakapon),54 +manorea,54 +mandragora (final fantasy),54 +mamoru (arizona),54 +mam233,54 +malin (kof),54 +makani kohitujito,54 +majin shoujo,54 +magami eiko,54 +m134,54 +lyrical denko,54 +luizhtx,54 +love deluxe,54 +loudred,54 +logknn,54 +liusang,54 +little viktoria,54 +lira,54 +lillie (pokemon) (cosplay),54 +lian (aohada bocchi),54 +leotard sweater,54 +leoheart,54 +lemon blossoms,54 +larva,54 +lapucelle (arasoo1210),54 +lace-up sleeves,54 +kyou ami!,54 +kutsugen kanna (mikouken),54 +kusiyan,54 +kuroshiro gurei,54 +kuromu,54 +kura noi,54 +kunzite (sailor moon),54 +kunoichi kaen,54 +kumeta kouji,54 +kumano (fancy waves) (azur lane),54 +kumakura mariko,54 +kuma-tan,54 +kukkuru,54 +kuji toi,54 +kreuzer 00,54 +koyomiuta,54 +kouno sachiko,54 +kosuke orz,54 +kongeraatio,54 +konatsu miyu,54 +konata (knt banri),54 +konachiu,54 +kon (kdash),54 +kokoro ga sakebitagatterunda.,54 +koi kakeru shin-ai kanojo,54 +koeda (koeda1kg),54 +klug (puyopuyo),54 +kizuki rei,54 +kiyo (chaoschyan),54 +kitsune udon (ai br),54 +kisaragi mic,54 +kirishima yurika,54 +kirimatsu,54 +kirby's dream land,54 +king kong,54 +kimochi,54 +kim jin sung,54 +kiki (re 6xxx),54 +kickylian,54 +kibasen,54 +keikenchi,54 +keenbiscuit,54 +kazukingu,54 +kazami kuku,54 +kawasaki toiro,54 +kawara pigeon,54 +kawai rika,54 +katsura dendou,54 +katsura (+araka),54 +kasetsu 03,54 +karo karo,54 +kapiko,54 +kanojo ga aitsu ni sareta koto,54 +kaneda eiji,54 +kanamura ren,54 +kanamori reiko,54 +kaminari ryuunosuke,54 +kamen rider amazons,54 +kamehima,54 +kame (3t),54 +kalina ann (weapon),54 +kaimo (mi6kai),54 +kaen miso,54 +junjun (sailor moon),54 +junjam,54 +jungki dr,54 +joshua (tubie),54 +jirai-chan (masayo),54 +jinpaitugounan,54 +jervis (kancolle) (cosplay),54 +jageungansik,54 +jack skellington,54 +ja mong,54 +izumi curtis,54 +izulizuru,54 +itoucon,54 +isaku,54 +isadora finnsdottir,54 +irene (claymore),54 +irabu ichirou,54 +inuwaka akane,54 +indian flag,54 +inaresi,54 +imaizumi (imaizumin-chi),54 +icebox46,54 +iapoc,54 +hydra,54 +hyakusei,54 +hungrydurp,54 +housen elis,54 +house dragonmaid,54 +hou no ka,54 +hoplitx,54 +homri,54 +hokkyoku hotaru,54 +hiyoko (chick's theater),54 +hitotsu yane no tsubasa no shita de,54 +hitorigaoka,54 +hisanoworld,54 +hirota masatane,54 +hiro (hankakudouga),54 +hinosaki,54 +hinomori shiho,54 +hinata hanabi,54 +hina (cassissoda),54 +high jump,54 +hige (hige2),54 +hibarino tuyuri,54 +herayoshi,54 +hayaoki (asagi-iro seishun-bu),54 +hawkeye (granblue fantasy),54 +hatabou,54 +hat rack,54 +haru (ririne9999rine),54 +hannah santos,54 +hanesaki ayano,54 +hanagin,54 +han-gyaku-sei million arthur,54 +hammerman benkei,54 +hakaze kaoru,54 +guri otoko,54 +gundam lost war chronicles,54 +guess who,54 +guatemala,54 +grail-kun,54 +graham specter,54 +gouf custom,54 +goidou yui,54 +glowing fist,54 +glorious success,54 +glidesloe,54 +glasses kappa,54 +giselle collette vingt,54 +girock,54 +gigantamax meowth,54 +geo siador,54 +genkidaun,54 +gecotan,54 +gau,54 +gara ayuri,54 +gakei3,54 +fuyoyo,54 +fusui,54 +funnel,54 +funamushi (funa),54 +funada ui,54 +full service (mazjojo),54 +fujita (speedlimit),54 +fujisaki yuu (nkrm),54 +fujisaki ribbon,54 +fujisaki miyabi,54 +fujimaru ritsuka (male) (cosplay),54 +fujimaru ritsuka (male) (brilliant summer),54 +fudatsuki kyouko,54 +fortress (sekaiju),54 +footstool,54 +fong pudding,54 +folded braid,54 +flying squirrel,54 +finni chang,54 +finger touching,54 +final gear,54 +fetishist,54 +fenghu (huli),54 +femto (berserk),54 +family portrait,54 +fam fan fan,54 +ez 1011,54 +extra teeth,54 +exercise bike,54 +eric toner,54 +enotou (enotou moi),54 +eneco,54 +emma frost,54 +emilia (re:zero) (cosplay),54 +elk,54 +eldridge (holy night's embrace) (azur lane),54 +einheadt,54 +e draw paint,54 +duel avatar,54 +drivesuit,54 +drip (meme),54 +dreamparty,54 +dre,54 +drawcrowd sample,54 +dr.beeeee,54 +douyougen,54 +dotekabocha,54 +dot triangle,54 +dorothy (bishoujo mangekyou),54 +doppel (madoka magica),54 +dombear,54 +doha skylightscent,54 +dobito mn,54 +do it yourself!!,54 +divergence eve,54 +directional arrow hair ornament,54 +denson,54 +demiurge,54 +dekapan,54 +ded (housamo),54 +deck of cards,54 +decchi oyabun,54 +dead man's questions,54 +damenano104,54 +daiichi ruby (umamusume),54 +daigada,54 +daifuku (tkja3555),54 +curtain (posuinochuanglian),54 +cum milking,54 +culotte,54 +cosmicmind,54 +compass rose,54 +comic cune,54 +cla (torinabe),54 +cirucci sanderwicci,54 +chuck (pokemon),54 +chroong,54 +chronoa,54 +chou saotome kenkyuujo,54 +chiot (god eater),54 +chichi guai,54 +chevrolet,54 +chen shu fen,54 +chan hone pkpk,54 +chabashira tatsukichi,54 +cerise (pokemon),54 +ceramic man,54 +catwalk (walkway),54 +cat zipper,54 +candy store,54 +candy print,54 +candy (art book),54 +call f,54 +c2-chan,54 +burujawa,54 +bouzu (bonze),54 +borsalino (kizaru),54 +bong,54 +boned-woo,54 +bondage gear,54 +bojue (hakus 1128),54 +bloody queen (elsword),54 +bling,54 +black hair-chan (ramchi),54 +bl/ess,54 +bite addict,54 +binchou-tan,54 +bidet,54 +bianka durandal ataegina (palatinus equinox),54 +bianka durandal ataegina (dea anchora),54 +betterman,54 +beniazumaru,54 +beni-chan (beijuu),54 +bellringer angel,54 +beastmaster (final fantasy),54 +bbb (33kudo),54 +battlefield 1,54 +battle magician (elsword),54 +baeg mi,54 +azarashi (azrsot),54 +aym (ash3ash3ash),54 +auzenhaito,54 +asuda,54 +assal (sennen sensou aigis),54 +asano shimon,54 +asakura (ishida to asakura),54 +asa (xametaler),54 +aryll,54 +arthur pendragon alter (fate),54 +aroddst4,54 +arion canvas,54 +ariga nao,54 +argyle kimono,54 +archetto (publicity strategy) (arknights),54 +arbbun,54 +arakumo gakuen,54 +appleseed,54 +apoidea,54 +aoki kaede,54 +aoi hinata,54 +aoba project,54 +aoaso,54 +ao (aocooler),54 +another eden,54 +another blood,54 +anjingkuxiao,54 +angel leotard,54 +andrea vaeyl,54 +amochin,54 +amanatsu yuzuka,54 +amagasa nadame,54 +alraune (p&d),54 +ale nqki,54 +alarm siren,54 +aki (chromaticclip),54 +akagawa007,54 +aizawa tomo,54 +airi (blue archive),54 +aihara academy school uniform,54 +aida rikako,54 +aida kan mori,54 +agyou sonokou l,54 +agatha christie no meitantei poirot to marple,54 +afterl!fe,54 +after (artist),54 +a grim reminder,54 +a (sofi3103),54 +a7m reppuu,54 +80yakky,54 +8055,54 +69 (tranquilo),54 +5tatsu,54 +58 (opal 00 58),54 +4qw5,54 +440,54 +43 pon,54 +23ichiya,54 +1000000000,54 +zumochi,53 +zuihou de miao pa si,53 +zodiac 2 (sekaiju),53 +zixiong zix,53 +zircon (houseki no kuni),53 +yuzuru (xxelysionxx),53 +yuzukarin,53 +yuurei-chan,53 +yuuki (silent moon),53 +yuuji (yujikazakiri),53 +yunmi 0527,53 +yuna (deadawon),53 +yumeno kanade,53 +yume yoroi,53 +yukishiro tomoe,53 +yukirar,53 +yuki (princess connect!),53 +yukarigawa yumiya,53 +yuhica,53 +yubelluna,53 +yu ikedon,53 +young wang,53 +youkai watch jam: youkai gakuen y,53 +you (esparda),53 +yoshizawa sumire,53 +yorimitsu,53 +yori (y rsy),53 +yonago miko,53 +yoga kimimaro,53 +yoclesh,53 +ymm007xx,53 +yellow (vocaloid),53 +yashiro (silver will),53 +yapi (yabai toki),53 +yakushimaru ryota,53 +xiao qi,53 +xiandao1213,53 +wrath (fma),53 +world heroes,53 +winemvee,53 +white thoroughbred (kemono friends),53 +white-aster,53 +wet pants,53 +wenhe,53 +wavesheep,53 +watagi michelle,53 +wasabisuke,53 +warai,53 +wakabayashi makoto,53 +wada don,53 +vigoroth,53 +veigue lungberg,53 +uzuki noboru (denchuu shoujo),53 +utsunomiya-sen meguri,53 +utaori,53 +ussr-tan,53 +uss lexington (cv-16) (y.ssanoha),53 +usotsuki hime to moumoku ouji,53 +ushitsuchi,53 +usagyaru (rangu),53 +uruc,53 +uranus (planet),53 +uniform number,53 +underwear around one leg,53 +under ground,53 +unachika,53 +umineco 1,53 +umasanjin,53 +ufotable,53 +uematsu koboshi,53 +two-tone,53 +tuxedo kamen (cosplay),53 +turbine,53 +ttk (ehohmaki),53 +tsunono,53 +tsuji airi,53 +tsubakiyama parry,53 +tripped,53 +trevor c. belmont,53 +trap (drthumt),53 +tracer (overwatch) (cosplay),53 +torture instruments,53 +torneko,53 +torn sack,53 +toranyun (akitsu taira),53 +tonta (tonta1231),53 +tonito,53 +tomihero,53 +tokumei sentai go-busters,53 +tokinhr,53 +tofu (tofulabo),53 +tim rhymeless,53 +tileable,53 +tilarna exedilika,53 +thebrushking,53 +the crawling city,53 +techgian,53 +tarumaru,53 +tanken driland,53 +tamonmaru,53 +tamama,53 +takopii,53 +taki (nakochan),53 +takeshi (mononohu20),53 +takatsuki shiori,53 +takahashi (yakitomato),53 +taka (sanoujo358),53 +tachibana (x quick),53 +sweater under jacket,53 +suzuno ito,53 +suzukawa koume,53 +suryua,53 +sunameri (pixiv3564245),53 +sukoyaka93,53 +sudou kayo,53 +studded armlet,53 +stoic seraphim,53 +steve chopz,53 +spoo,53 +souther,53 +souma haruto,53 +soriku,53 +sorauta,53 +soramimi (seiga),53 +sonzai soumei,53 +soe,53 +sode no shirayuki (shikai),53 +snake mouth,53 +slow dancer (jojo),53 +sky wbo,53 +skorupi,53 +sion eltnam sokaris,53 +siegbert (fire emblem),53 +shuma gorath,53 +shoujo shin'iki shoujo tengoku,53 +shiromomo,53 +shiro oolong-cha,53 +shirakawa miyako,53 +shiomiya iruka,53 +shin (sinsin12121),53 +shin-chan (evangelion),53 +shiki seishirou (eroe),53 +shiina (koufuku graffiti),53 +shibuya 109,53 +shibao aoyama,53 +sheer heart attack,53 +shaman,53 +sgt crisis,53 +serizawa madoka,53 +serapias alice,53 +seraphina,53 +sento (iroiro gottani),53 +senon,53 +senjochi janai,53 +sengoku ace,53 +sendou emi,53 +sen no hatou tsukisome no kouki,53 +sekibanki day,53 +seismitoad,53 +seio girls' academy uniform,53 +seifuku rakuen,53 +sd gundam sangokuden,53 +scream (movie),53 +schwarz (presents) (arknights),53 +scherazard harvey,53 +sawsbuck (spring),53 +sawatari shizuku,53 +satou kaede,53 +sasisage,53 +sasane,53 +sanshouuo,53 +sanpati,53 +sano emma,53 +sanntouhei,53 +samejima minoru,53 +sakuya (sao),53 +sakuria,53 +sakuemon,53 +sakanasoko,53 +sakaki rin,53 +saionji mikoto,53 +saida kazuaki,53 +sage (dq3) (cosplay),53 +saeki ai,53 +ryu hayabusa,53 +rurikoke,53 +rui rubellent,53 +rudeus (haevest),53 +royal guard set (zelda),53 +roy koopa,53 +rotom (wash),53 +rosalyn (hololive),53 +rollingswitch,53 +ripple star queen,53 +rin ateria,53 +rider watch,53 +rickert kai,53 +rick.black,53 +riba,53 +rfb (how rfb stole xmas) (girls' frontline),53 +ren (witch's weapon),53 +rei oe,53 +reference work request,53 +red-d,53 +rathian,53 +raranokusu,53 +rapute (migihidari),53 +ranch jjba,53 +rance 03 leazas kanraku,53 +rama (amaru),53 +raijinto (fire emblem),53 +rabinidaddo,53 +quartett!,53 +qt (space dandy),53 +pull,53 +pu uq,53 +prowl (transformers),53 +prosthetic weapon,53 +prehistoric,53 +pramanix (caster's frost) (arknights),53 +potti-p,53 +porco rosso (character),53 +platypus,53 +pitohui (sao),53 +pink leotard (dq),53 +pilot uniform,53 +piledriver (wrestling),53 +pikat,53 +pieces (series),53 +pie chart,53 +physics,53 +phrecklesart,53 +phantom of the opera (fate),53 +peeing on penis,53 +passport,53 +parfait chocolat second brew,53 +paper background,53 +panana,53 +oxygen mask (oxygenmask233),53 +otototo,53 +otomachi una (sugar),53 +osoko (osomatsu-san),53 +orochi leona,53 +orie hakua,53 +options,53 +ophelia (painting),53 +ooba juri,53 +ono daisuke,53 +oniyuru,53 +onion rings,53 +onimaru miki,53 +oni tengu,53 +one outs,53 +okey,53 +okara,53 +oishii ishiwata,53 +ocha (hutuumikan),53 +oboro (utawarerumono),53 +nyuukazai,53 +nyopu,53 +nyc,53 +nyaph,53 +nyaa-tan,53 +nunu (league of legends),53 +nuka cola,53 +nu (qjqmfqjqmf02),53 +noririn,53 +noogie,53 +nonomaro,53 +nomikata,53 +noirly,53 +nohotoke honne,53 +niwa nagahide (oda nobuna no yabou),53 +nishimuku meri,53 +ninym ralei,53 +ninomae ina'nis (cosplay),53 +nikumeron,53 +nikuji-kun,53 +nika nanaura,53 +niea,53 +new (lk),53 +neroma shin,53 +nekojishi,53 +nee chanto shiyou yo!,53 +nari (narikashi),53 +nara shikadai,53 +nanashi (soregashi),53 +nanakorobi nene,53 +nanairo reincarnation,53 +namek,53 +namatame chitaru,53 +nakonako,53 +nagae iku (cosplay),53 +mythbreakers (hololive english),53 +murayama ryouta,53 +murata (igaratara),53 +murasaki kajima,53 +murasa minamitsu (cosplay),53 +mumulatte,53 +mr2d,53 +mr.takealook,53 +moyahara,53 +morishima hodaka (tenki no ko),53 +moonandmist,53 +monowe,53 +momo walnut,53 +moedredd,53 +moe (splatoon),53 +moe (bosshi),53 +mochisuna,53 +miyabi (miura105),53 +miya (chocolate holic),53 +miura yumiko,53 +mishima yoshikatsu,53 +miramu (ramu450471),53 +minamoto no raikou (traveling outfit) (fate),53 +minahamu,53 +minafuni,53 +miki sauvester,53 +miimu (nelreg3),53 +michinoku (hiking miusan18),53 +michimoyo,53 +mgx0,53 +messiah (game),53 +mersoleil03,53 +mermaid (artist),53 +menghuan tian,53 +memphis (azur lane),53 +melon hair ornament,53 +meiz,53 +meia,53 +medusa (mythology),53 +mayuri kaichou,53 +maya yukiko,53 +maya schrodinger,53 +matsukaze rin,53 +matsubara saya,53 +mat play,53 +mashugure,53 +mary jane watson,53 +marvin (omarvin),53 +mario & sonic at the olympic games,53 +maria (junketsu no maria),53 +manya sora,53 +makino momiji (artist),53 +maitake (loose),53 +mai (maika 04),53 +mahou shoujo nante mou ii desukara.,53 +magnifire,53 +mage (bikini warriors),53 +madcocoon,53 +macken,53 +machida (ojimashu),53 +ma5,53 +lunarclinic,53 +luna noah,53 +luna (gunfire),53 +luna2,53 +lumi (merryweather),53 +luft7star,53 +luen kulo,53 +lucky (1045044604),53 +lucia (lunar),53 +loveless,53 +lolipaedq,53 +loika,53 +lin lee koo,53 +limalisha,53 +liberty leading the people,53 +lfacras,53 +lewdzure,53 +levia (shepherd0821),53 +level9kaito,53 +levasoj,53 +leon (fire emblem),53 +lemontyoisy r18,53 +leebigtree,53 +lee ji-min,53 +lal!role,53 +lagiacrus,53 +la galissonniere (azur lane),53 +kyuupura,53 +kyuu (pinpo),53 +kyariko,53 +kusunokimizuha,53 +kurosawa minamo,53 +kuromitu (kageneko),53 +kuromaru9,53 +kuro ai,53 +kureha (angelite),53 +kuramoto kaya,53 +kuko (flower knight girl),53 +kouno ibuki,53 +kotaroukuroo,53 +kotarou (yukina1721),53 +kos-owl,53 +kono yo no hate de koi wo utau shoujo yu-no,53 +kono2noko,53 +komori met,53 +komodo dragon (kemono friends),53 +komedawara,53 +kokutou mimi,53 +kokage-san,53 +kojou,53 +kogalashi,53 +knives (knives777),53 +kneehighs removed,53 +klinklang,53 +kizuna akari (a.i. voice),53 +kizuna ai (cosplay),53 +kiwamu,53 +kitsune saiguu,53 +kitou en,53 +kitagawa unagi,53 +kisaragi gentarou,53 +kisaragi alice,53 +kirigaya hitsugi,53 +kirara (inuyasha),53 +king ghidorah (godzilla: king of the monsters),53 +kin-san (sasuraiga),53 +kimi (hearty yuuki),53 +kiliko-san,53 +keny,53 +kenharu,53 +kengzeta,53 +keizou,53 +kawaii inu5,53 +katsuragi nantoka,53 +katanako,53 +kasumi (kancolle) (cosplay),53 +kaori (sky-freedom),53 +kanzume,53 +kanzaki tomoyo,53 +kannuki hisui,53 +kannoaki,53 +kanisaka shizuku,53 +kanikame,53 +kanase kanon,53 +kanaritu,53 +kamen rider chalice,53 +kakugari kyoudai,53 +kaiba,53 +kai schren,53 +kago1205,53 +juusou kikou dancouga nova,53 +juuka@100neko,53 +june mina,53 +ju-zika,53 +joutarou,53 +jojo no kimyou na bouken: all star battle,53 +jiukuzi18797,53 +jinkai yamizawa,53 +jian (weapon),53 +jervis (azur lane),53 +jaw titan,53 +jacknife,53 +jackasss,53 +izumiyamisono,53 +izu (tea value lord),53 +itsumi erika's loader,53 +itocoh,53 +ishida masayuki,53 +ishida kana,53 +iseria (epic seven),53 +isao (wasamoti),53 +iri-neko,53 +invincible candy,53 +inuboe,53 +intron depot,53 +inkling (cosplay),53 +implied death,53 +imminent suicide,53 +illustrious (illustrious ball) (azur lane),53 +ikuya daikokudou,53 +idu michito,53 +ichinose yuu,53 +ichihara kazuma,53 +hyuuga masamune,53 +hyuuga (azur lane),53 +hyoumon (saihokutan),53 +hu tao (genshin impact) (cosplay),53 +hrist valkyrie,53 +houjichaoic,53 +horonamin,53 +hone kawa,53 +hone (koppun),53 +holoro,53 +holmy (holmesdmode),53 +hokusen,53 +hokenshitsu no shinigami,53 +hizuki miu,53 +hitomi kazuya,53 +hiroserii,53 +hiromyan,53 +hiroaki (huruhonya),53 +hirako shinji,53 +hinata-bokko (sanpo fuumi),53 +himetsuba,53 +hikami dan,53 +hh,53 +hero (10cl3),53 +helmet (touhu812),53 +hekirake,53 +healer girl (yuuhagi (amaretto-no-natsu)),53 +hataraku ufo,53 +hasebe aya,53 +harurun,53 +haruka karibu,53 +haruka (haruka channel),53 +haniyama kurofo,53 +handsofmidaz,53 +hana87z,53 +hamioura,53 +hal (ojou),53 +hakkotsu shitai,53 +guricoogen,53 +guchagucha,53 +gris (vertigris),53 +grainne (fate),53 +gokotai (kiwame),53 +gogopaint,53 +glowing mushroom,53 +glorious azure costume (umamusume),53 +giraffe girl,53 +ghost rider,53 +genocide kitten,53 +gen uma mai,53 +gangsta,53 +fuwakuyuu,53 +futou ryouko,53 +full frontal,53 +fukuda fukutarou,53 +fujita (dorohedoro),53 +fujinozu,53 +fujimaru ritsuka (female) (starlight fest),53 +ford mustang,53 +folded wings,53 +flower bra,53 +floe,53 +feather-trimmed coat,53 +father (fma),53 +excaliblader,53 +ewokaku kitsune,53 +evdokiya infernalis,53 +eun bari,53 +eumi 114,53 +eufonie,53 +etsusa oohashi,53 +eroneko-san,53 +eriko (nakaeri gogo),53 +erection under blanket,53 +enuma ru,53 +entombed air defense guardian princess,53 +emina (emina&aki),53 +emile elman,53 +eltonel,53 +elle vianno,53 +elle sweet,53 +elizabeth bathory (fate/extra ccc) (cosplay),53 +element hunters,53 +electronic cigarette,53 +eldridge (the inquisitive recluse) (azur lane),53 +elbow cutout,53 +edamameoka,53 +durindana 7,53 +duo chromatic,53 +drawinik,53 +doushite,53 +donatello versace,53 +dodgeball,53 +djayo,53 +divine child of rejuvenation,53 +discord,53 +disappear,53 +dhjs 0010,53 +devil may cry 2,53 +denzel,53 +densya t,53 +den (denwhat),53 +dekapoi,53 +degarashi (ponkotsu),53 +deer boy,53 +dedeen,53 +ddongu,53 +dayoon,53 +date naoto,53 +dancing flower,53 +daeraeband,53 +d midiror,53 +cwilocky,53 +crystal tail,53 +crazy crazy (idolmaster),53 +courreges accel,53 +countdown timer,53 +cosmo (chainsaw man),53 +conmimi,53 +concentric circles,53 +comiket 89,53 +comcom,53 +collateral damage studios,53 +code geass: fukkatsu no lelouch,53 +coco maru,53 +coach,53 +cleveland (gentry knight) (azur lane),53 +chronicle 2nd,53 +choujikuu kidan southern cross,53 +chipmunk costume,53 +chino (ch no),53 +chikafumikou,53 +chienon,53 +chie rumiko,53 +chibibro,53 +charlie magne,53 +chaborin,53 +chabenmus,53 +cementite,53 +celine jules,53 +ce-tan,53 +carracosta,53 +card background,53 +can zhu,53 +caitlin (fall 2021) (pokemon),53 +buru-dai,53 +burger malfunction,53 +bunny on shoulder,53 +bunny1219,53 +bthx,53 +brmameng,53 +brioche d'arquien,53 +braided tail,53 +boxer,53 +bouffalant,53 +botan mochito,53 +bobbbob,53 +blunderbuss,53 +blaster (splatoon),53 +black frost,53 +biting arm,53 +benisumomo,53 +beni shouga,53 +beni (pokemon),53 +beck (mighty no. 9),53 +bear mask,53 +baskin-robbins,53 +baozha gangbi,53 +banjo,53 +azusa (azunyan12),53 +azuki nagamitsu (touken ranbu),53 +aymusk,53 +aye-aye (kemono friends),53 +ayanami (demon's finest dress) (azur lane),53 +axolotl (minecraft),53 +augetsix,53 +atcesolcyc,53 +asou yuma,53 +asn s,53 +aruciii,53 +artificial eyes,53 +arteslav,53 +arsene,53 +arrokuda,53 +arisue tsukasa,53 +aranami shibuki,53 +araizumi rui (style),53 +apita (apitaro),53 +aoi choko (aoichoco),53 +aoba anoa,53 +ao (sodalite),53 +anywhere knock,53 +anpolly,53 +anna (small night),53 +anju,53 +angry birds,53 +angemon,53 +ana (tateana juukyo),53 +amicia michella,53 +amesarasa,53 +amemiya taiyou (mixi max zhuge kongming),53 +amatsuka rikka,53 +amakoke,53 +alternate species,53 +alternate ears,53 +alpaca carlesi,53 +algae (5455454541),53 +albert chamomille,53 +akira b,53 +akikan!,53 +akeyama,53 +akabane (pixiv3586989),53 +aka tonbo (lililil),53 +aihara-rina,53 +aftamc,53 +adricarra,53 +adaajt,53 +acfun girl,53 +abbey road,53 +aata1007,53 +96dgd,53 +7nanappe,53 +72 (nananatsu),53 +61cm quadruple (oxygen) torpedo mount,53 +191karasu,53 +zukanosuke,52 +zoner,52 +ziro (daydozen),52 +zhumojian,52 +zhen ji,52 +zatanna zatara,52 +zaku i,52 +z19 hermann kunne (azur lane),52 +yuxing yuhang,52 +yuuna katsumi,52 +yuumaru (you-mya),52 +yuuki yuuna wa yuusha de aru: hanayui no kirameki,52 +yuuki shin'ichi,52 +yunomi (yunomi imonuy),52 +yukinoshita (shaonjishi),52 +yukiaka,52 +yuizayomiya,52 +yui (summer) (princess connect!),52 +yui (marine-drive),52 +yui (kari),52 +yuel,52 +youzu (youzuyozu),52 +yonekura (bakuzen),52 +yf (hbyg),52 +yellow hood,52 +yebisu,52 +yaziri,52 +yataba,52 +yahoo!,52 +yaa-kun,52 +xuuikie ashe,52 +xanxus,52 +wu yao jun,52 +wormadam,52 +wolfchev,52 +witchcrafter madame verre,52 +winni,52 +wind glider,52 +white sweater vest,52 +what i watched what i expected what i got (meme),52 +wendy garrett,52 +welding,52 +wd1113,52 +watanukin (etson122127),52 +watanae yuuka,52 +wasabi-chan (eha7y),52 +wano (azayakam),52 +waltz (dance),52 +wakagashira,52 +w.d. gaster,52 +vycma,52 +vivi (dakemakura),52 +vine whip,52 +vf-0,52 +velvet room,52 +vanguard (azur lane),52 +v-room,52 +uyu (soda uyu),52 +utayoi (umakatare),52 +usuusu,52 +ushihashiru,52 +usagigenki,52 +urabe rika,52 +uori,52 +una (mazinger),52 +umishima rinta,52 +ulquiorra0,52 +ueno naoka,52 +ubukata shinji,52 +twice12314,52 +tsuyuhara miu,52 +tsutsui misa,52 +tsuseki,52 +tsurumaki kazuya,52 +tsukidate chiyo,52 +tsuki no iwakasa,52 +tsuiru,52 +tsugumi amon,52 +tsuga,52 +triangle choke,52 +towa rui (artist),52 +toshiharu (s narutoshi),52 +torimahera,52 +torawar,52 +tonomiya68,52 +tomycase,52 +tomoyan (nyakkoya),52 +tomoya (artist),52 +tomid,52 +tomboy-chan (aestheticc-meme),52 +tomatsu haruka,52 +toguchi masaya,52 +toe shoes,52 +tobatoinu,52 +tiramisu,52 +tink (disgaea),52 +tiltrotor,52 +tiamat (granblue fantasy),52 +thigh focus,52 +the sims,52 +the birth of venus,52 +thanksgiving,52 +thai student uniform,52 +terminus est,52 +teraguchi,52 +tepengu,52 +tennomifune academy uniform,52 +ten-chan (pan (mimi)),52 +tekken revolution,52 +teiiku,52 +team rainbow rocket uniform,52 +tatami (loop),52 +tashimo,52 +tarowo,52 +taroji,52 +tampon,52 +tamon ketsuyuki,52 +tamarinfrog,52 +tama (new island),52 +takobe,52 +take yaki,52 +takasu yasuko,52 +takanashi otoha,52 +takami ryou,52 +takahashi record,52 +taka yanagi,52 +taka t,52 +taihou (warship girls r),52 +t bone (06tbone),52 +suzuno (kazahanasu),52 +suzumiya annko,52 +suzuka,52 +sumirou-kun,52 +sumida kichi,52 +suikyou (aqua cities),52 +subaru impreza,52 +stuffed pegasus,52 +striped wrist cuffs,52 +striped wall,52 +stern starks,52 +stella no mahou,52 +ssbaby,52 +sponsor,52 +spoken paw,52 +spider-man noir,52 +sousuke (sauceke),52 +souryuuin akemi,52 +souryuu kai ni (kancolle),52 +soreyuke! uchuu senkan yamamoto youko,52 +sora-bozu,52 +snowchild,52 +smith & wesson,52 +slow motion,52 +slovenly,52 +sleepless (wrysmile),52 +skarner,52 +sisoha,52 +siriuflong,52 +sinzire,52 +silcoon,52 +signal 1120,52 +shy (series),52 +shosho oekaki,52 +shoooohhhh,52 +shoboon,52 +shisui,52 +shirow masamune (style),52 +shirotsume souwa,52 +shiromonefu,52 +shinotarou (nagunaguex),52 +shinonome ukyu,52 +shinocco,52 +shinobu jacobs,52 +shin yomawari,52 +shimadouma,52 +shiizaki hinaki,52 +shiina (vuurrood),52 +shigaoka,52 +shida (xwheel),52 +shiboru,52 +shan gui yu yao,52 +shalnark,52 +shaffelli,52 +shadow (ff6),52 +seruel,52 +serph,52 +seri (zankuro),52 +sentaku-bune,52 +senna (league of legends),52 +senjougahara pose,52 +seminoyu,52 +sekiyu.,52 +sekisei (superego51),52 +sekien no inganock,52 +seisen school uniform,52 +see you,52 +scourge regalia,52 +scotch (blaze),52 +scatterbug,52 +saxyun,52 +satsuki yuu (awairo),52 +satansoft1,52 +sasai saki,52 +sasagawa kanon,52 +sarutobi konohamaru,52 +sarah mcdougal,52 +sapphire satou,52 +sangoku taichi,52 +same (g shark),52 +samazuka mashiro,52 +sam ashton,52 +sakula,52 +sakuama,52 +sakamoto sue,52 +sakamoto himemi,52 +sail (sail-away),52 +sagiri yuuko,52 +safu,52 +saffron (flower knight girl),52 +sachi (yumemayoi),52 +saboten7,52 +s.e.m (idolmaster),52 +ryuuguu otohime,52 +ryuu ga gotoku 4,52 +ryoutsu,52 +ryohgo narita (mangaka),52 +ryofu,52 +ryisu (deluxe<<<),52 +rubyaano (ducl323),52 +ru (famia),52 +rourouki,52 +rosa (holiday 2019) (pokemon),52 +rogue one: a star wars story,52 +rktlek159,52 +rise of the guardians,52 +riochan,52 +rio -rainbow gate!-,52 +ring (ring fit adventure),52 +riku (melty drop),52 +rikiel,52 +retumihari,52 +rena (watamote),52 +reluvy,52 +rekurieeru,52 +rei-chan (konachiu),52 +red (konkichi),52 +rasetsu001,52 +ramona (the prey),52 +qitoli,52 +purutoppu (toranohige),52 +pumpkin mask,52 +prossss,52 +powder (arcane),52 +pov adoring,52 +possessive,52 +poponpin,52 +polki,52 +poking penis,52 +poke ball hair ornament,52 +piyobomu,52 +piper thibodeau,52 +pink suit,52 +pink nose,52 +pink (among us),52 +pink-haired girl (kay yu),52 +penis in glove,52 +pecchii,52 +panther pink (precure),52 +pam-pam (precure),52 +pacifica northwest,52 +otogi tetsurou,52 +otogi:spirit agents,52 +osanai shouko,52 +oruta (owata saber),52 +ooama no ake no mitori,52 +old coco,52 +okuda nao,52 +oku 1225,52 +okoru ringo,52 +oki (koi0koi),52 +ogata matake,52 +ogata airi,52 +oekaki-daisuki-dessu,52 +odin (azur lane),52 +odagiri sakura,52 +oda koziki,52 +oberonia rhea,52 +nyanyanoruru,52 +nyangoroo (nekopanchi bashibashi),52 +nwon'yo pasun,52 +nuenya,52 +ns (ntrsis),52 +noredo nug,52 +nool,52 +nonji (sayglo halo),52 +nokke o,52 +nokataro,52 +noes,52 +nishikino maki's mother,52 +ninyo (user pddg5877),52 +ninja (fft),52 +nina tucker,52 +niichi (niichi21),52 +niconico rpg,52 +nerunnn,52 +neru (neruneruru),52 +nemissa,52 +nekoge,52 +neibii,52 +nay,52 +naruse jun,52 +narusawa (njzc2582),52 +nanatsuka,52 +nana (series),52 +namako mikan,52 +nakatsugi kyuuma,52 +nakasaku-p,52 +nak yama,52 +naib subedar,52 +nagomiya (shousha),52 +nagami tami,52 +nagaburo imoni,52 +muumuu (sirufuruteienn),52 +mutsuki kaya,52 +mushroom (artist),52 +mugura,52 +mubouou aasaa,52 +mouth focus,52 +mousse (food),52 +motto! haramase! honoo no oppai isekai chou ero succubus gakuen!,52 +motomiya ryou,52 +morioka hideyuki,52 +moninora,52 +mon (manarestra),52 +momoman (pink voltage),52 +mochizuki kazuto,52 +mochinabe,52 +mobukichi,52 +mobu (wddtfy61),52 +mo ying yu,52 +mizuno eita,52 +mizukamakiri,52 +miyazaki (watamote),52 +miura cat,52 +missing poster,52 +miso (mimimiso),52 +mirai nostalgia,52 +miochun,52 +minton,52 +minoshi,52 +mimura (nnnnnnnnmoo),52 +mimori (blue archive),52 +mimlememel,52 +mimitoke,52 +mimiga,52 +milkor mgl,52 +mila (yuta27315),52 +mikoto (mio),52 +mikebosi,52 +mikami reiko,52 +mihoshi (gundam bf),52 +miharin,52 +mig-29,52 +mhr,52 +mezzo danger service agency,52 +memories off 6,52 +melda deitz,52 +meguri uguisu,52 +mea (brunhilde),52 +maya (dq11),52 +matsuoka (mtok 0),52 +master of epic,52 +mash kyrielight (under the same sky),52 +mase yuuko,52 +masato (mk),52 +maru (memoriatechnica),52 +mamesi (suhk8583),52 +mame usagi,52 +malkuth (project moon),52 +makoto (mk10),52 +makinan,52 +maille-breze (azur lane),52 +maid (mechanical buddy universe),52 +mai (xskdizzy),52 +macaron (amaburi),52 +mable west,52 +maako (yuuyake.),52 +ma-hain-scarlet,52 +m alexa,52 +m4 sopmod ii (devourer of the feast) (girls' frontline),52 +lyn (bridal) (fire emblem),52 +luomo,52 +lucy (rusi-juren328),52 +love potion,52 +lord boros,52 +loftcat,52 +lmonster guai,52 +lizchief,52 +lissandra (league of legends),52 +lion (macross frontier),52 +linez,52 +lickilicky,52 +leonzo,52 +lentiyay,52 +len (cat),52 +lemmy koopa,52 +leather collar,52 +lear (pokemon),52 +leaf (arknights),52 +laserbeak,52 +lasa (lasa1116),52 +laphy,52 +lala (yamada goroku),52 +la liga,52 +kyokugen dasshutsu adv: zennin shibou desu,52 +kusunoki (gwzx5574),52 +kusujinn,52 +kuru2pantu,52 +kuroyuki,52 +kurotea,52 +kurosu tsugutoshi,52 +kuraki hiro,52 +kudou michiya,52 +koyama sao,52 +koukawa asuka,52 +kouichirou,52 +kottungyang,52 +kototoki,52 +konjiki no yami (cosplay),52 +kokuyouseki,52 +kokusan moyashi,52 +kokorori-p,52 +kokoro navi,52 +kokko (014kko),52 +kojima ayami,52 +koji (koji-a),52 +koide natsuno,52 +koi to uso,52 +kogarashi (wind of winter),52 +knight gundam,52 +km (nijie104352),52 +kloe rinz,52 +kk724,52 +kiwi (bird),52 +kitkat,52 +kirou (kiruyuu1210),52 +kirara (arknights),52 +kinsou no vermeil,52 +kinoji,52 +kingudejii,52 +kikuhara karin,52 +kermit the frog,52 +kenko (a143016),52 +ken sogen,52 +ken-chan,52 +kemono no souja erin,52 +keisan,52 +keiko rin,52 +kazuki kisuke,52 +kaze fukeba nanashi,52 +kayase,52 +kawauchi (kaz7ry),52 +kawamura ami,52 +katuhata,52 +katsushika hokusai (painting summer) (fate),52 +kato (mogumoguokome),52 +katakura shinji,52 +kasumi yozora,52 +kashiwabara en,52 +kashiba aira,52 +karioi yuu,52 +karin (yashiro sousaku),52 +karamimame,52 +kapkan (rainbow six siege),52 +kanoe (kanoe502),52 +kanengomi,52 +kan (tachi),52 +kameno sachi,52 +kamen rider x,52 +kai1up,52 +kachou (ojimashu),52 +kaburagi (decadence),52 +kabe ni hamatte ugokenai!,52 +kaabon meshi,52 +k-takano,52 +jun'iku,52 +jumbo,52 +julioalqae,52 +juju,52 +joke,52 +jobski,52 +jin (avatar),52 +jesus (saint onii-san),52 +jax (league of legends),52 +james moriarty (gray collar) (fate),52 +james (vocaloid),52 +itoo,52 +itokon300,52 +ishida masatsuki,52 +isetnation,52 +isaki (shimesaba),52 +isaka wasabi,52 +inko,52 +infinity (kkx132),52 +inami anju,52 +ika (hinatu1992),52 +igul,52 +ichijouji ken,52 +hunk (voltron),52 +huijin zhi ling,52 +huge moon,52 +hpa (foolish factory),52 +houtani yukitoshi,52 +houjuu nue (cosplay),52 +hoto cocoa (cosplay),52 +hornet (warship girls r),52 +honjou masato,52 +holding kettle,52 +hokkaido nippon-ham fighters,52 +hiva+,52 +hitman (game),52 +hinase kaguya,52 +himiko (btooom!),52 +hikoboshi,52 +highwayman (darkest dungeon),52 +higa norio,52 +hiepita (1014),52 +hibitono,52 +heymans breda,52 +hexafusion,52 +hellyon white,52 +heliolisk,52 +heijou institute school uniform,52 +hdkg,52 +hayashi ekyuu,52 +haurchefant greystone,52 +hattori toko,52 +hat writing,52 +harukawa syuria (jack dempa),52 +hardsuit,52 +hanatsuka,52 +hamster hood,52 +hamakaze (kancolle) (cosplay),52 +hallohi,52 +ha-chan (mahou girls precure!),52 +h016,52 +gwen poole,52 +gurina,52 +gumball,52 +grumpig,52 +green scales,52 +green facial hair,52 +gragas,52 +gold panties,52 +gogoat,52 +glico,52 +glass bowl,52 +gitano (arknights),52 +girl arms,52 +ginyu force,52 +ghostface,52 +gepard m1 (girls' frontline),52 +gen'en (sioherashi),52 +gekokujou (vocaloid),52 +gehrman the first hunter,52 +gear senshi dendou,52 +gavrof,52 +game-style,52 +futaba otohiro,52 +furuhara,52 +furapechi,52 +fur-trimmed sweater,52 +fukumoto nobuyuki (style),52 +front innocent,52 +freshtango,52 +frederika (hitsugi no chaika),52 +frayed clothes,52 +foxykuro,52 +foxeleos,52 +fongban illust,52 +flufflixx,52 +fisticuffs club,52 +fishnet shirt,52 +eyewear pull,52 +eyebrows visible through headband,52 +ewen egeburg,52 +esubui,52 +ermao wu,52 +epiko (aki),52 +emperor (stand),52 +elm (pokemon),52 +elbow on arm,52 +eila yagyu,52 +ei tantan,52 +egami tsubaki,52 +efyuru,52 +edmond dantes (first ascension) (fate),52 +ebippoid,52 +eahta (granblue fantasy),52 +e-kichi,52 +dyson,52 +dynazenon (character),52 +dygenguar,52 +dur-nar (arknights),52 +drops (ragnarok online),52 +dragon poker,52 +dp-28,52 +double footjob,52 +doseisan (dosei-san),52 +doridori,52 +donburi,52 +doku hebi,52 +dobu (moor),52 +dna^2,52 +djinn equip,52 +discharge mochi,52 +disassembly,52 +diol twee,52 +dinosaur riding,52 +digimon universe: appli monsters,52 +dexter's laboratory,52 +dexp,52 +desire driver,52 +desert eagle (girls' frontline),52 +deecha,52 +dedeko,52 +dc (doughertyevans),52 +dark schneider,52 +dao trong le,52 +dalmatian,52 +dakun,52 +daftbonkers,52 +cure peace (cosplay),52 +culdcept,52 +crusher joe,52 +cross-laced panties,52 +cristalavi,52 +crested ibis,52 +count of monte cristo (gankutsuou),52 +connect (madoka magica),52 +concord (azur lane),52 +comiket 87,52 +colonnade,52 +cold (hoshinoskull),52 +cocolo (co co lo),52 +cm lynarc,52 +clasp,52 +claire francois,52 +city (arawi keiichi),52 +cigar cat,52 +chunta,52 +chocpocalypse,52 +chloe no requiem,52 +chirorian,52 +chimimo,52 +chimaro,52 +chikuwa (majihima),52 +chiki (botugo),52 +chigusa,52 +chidejika,52 +chibitalia (hetalia),52 +chestnut thoroughbred (kemono friends),52 +chest stand,52 +cheria barnes (little imp),52 +cheesestyx,52 +charmal,52 +chariot.f,52 +chang'e,52 +cerberus (houtengeki),52 +cat tower,52 +caron (straycat910),52 +carinae,52 +card between fingers,52 +cadie,52 +c home,52 +bzerox,52 +byousoku 5 centimeter,52 +butter curry,52 +burning arisa,52 +bunta ru,52 +bunny-shaped eyewear,52 +bulge tsuki,52 +buffalo,52 +buddy complex,52 +breathing tube,52 +breast size switch,52 +bounen no xam'd,52 +boot straps,52 +bodyboard,52 +bloomers removed,52 +blitzball,52 +black cloud,52 +binchou-tan (character),52 +biittertaste,52 +belafu (human),52 +bel (cyancapsule),52 +bebe-tan,52 +bebatch,52 +beatrice (umineko) (cosplay),52 +beast wars ii,52 +bbbannooo,52 +batako (pixiv54063972),52 +baru (bar 0405),52 +bara (totocos),52 +bandaged tail,52 +banba shin'ya,52 +baltoy,52 +baka (mh6516620),52 +backbreaker,52 +azure luna,52 +azuma kei,52 +azrael (blazblue),52 +aylwin (azur lane),52 +ayamoto,52 +avataro sentai donbrothers,52 +atmospheric reentry,52 +asymmetrical bikini,52 +ashleyloob,52 +ashi ura,52 +aruma (shinrabanshou),52 +arukiru,52 +artsheops,52 +artist painter,52 +aria tenetorisu,52 +arhah,52 +archerfish (warship girls r),52 +aramaki scaltinof,52 +aoya (ayoyame18),52 +aononchi,52 +aoiakira553,52 +anoko (darenokoanoko),52 +animal band panties,52 +anima,52 +angelia avallone,52 +angel statue,52 +andyface,52 +amino dopple,52 +amelia (fire emblem),52 +amatsuka poi,52 +amatsuji,52 +amatsu misora ni!,52 +amashun,52 +amano otoha,52 +amakano 2,52 +amaimon,52 +alyn (fairy fencer f),52 +alter (kxk7357),52 +alle gro,52 +alisa nilsen,52 +albert maverick,52 +alaskan klee kai,52 +akizuki nagi,52 +akiyama (yamagoya),52 +akane soir,52 +akagi gishou,52 +akabeco,52 +aiu .,52 +aincrad,52 +ah (pixiv62888100),52 +agent 3 (splatoon 3),52 +adventurequest worlds,52 +ado (singer),52 +adjusting skirt,52 +absent,52 +abigail lincoln,52 +a (user vtsy8742),52 +a.q.u.a,52 +8ne (nitika127),52 +53c,52 +333 (pixiv8601259),52 +0-toki,52 +zombie no afureta sekai de ore dake ga osowarenai,51 +zhourues,51 +zerg (starcraft),51 +zee n3,51 +zaza (x-can01),51 +zankyou no terror,51 +yuu (yuu ammy),51 +yuria the witch,51 +yumemomosaka,51 +yukikasa,51 +yukihime (mofumofu2225),51 +yuki shiro,51 +yukari (momoko),51 +yudetama,51 +youtube username,51 +yoss 3,51 +yoshihara seiichi,51 +yoshida yoshitsugi,51 +yodobashi yuo,51 +yi yu,51 +yellow wristband,51 +yellow rope,51 +yellow cloak,51 +yawarabi juubee,51 +yasu rintarou,51 +yanagisawa naoko,51 +yamazaki sagaru,51 +yamazaki mitsuko,51 +yamane masato,51 +yamagarasu,51 +yam (yamap mako),51 +yakihoko,51 +yadamon (neverland),51 +xxxsoiu1,51 +xlyami,51 +xia yu yao,51 +wolt (fire emblem),51 +winterfall (artenh),51 +wingman,51 +will smith slapping chris rock (meme),51 +webley (girls' frontline),51 +waterskiing (meme),51 +water blue new world,51 +washi no tosaka,51 +warspite (kancolle) (cosplay),51 +walther p99,51 +walnut,51 +wallpaper forced,51 +wagomu17,51 +vy2,51 +vundo (gyee),51 +vladimir putin,51 +vividblue,51 +vice (kamen rider revice),51 +venus (p&d),51 +veil over eyes,51 +veerinly,51 +uya (yurukah),51 +usukuchi (impasto life),51 +usayoshi (touhopu2),51 +usami masamune,51 +urotare,51 +urashima keitarou,51 +uraraku shimuni,51 +urabe katsuto,51 +untitled goose game,51 +unajuu (set mk),51 +ultraman zero,51 +uasi,51 +twi'lek,51 +tv camera,51 +tsuruma konoe,51 +transformers cybertron,51 +toyu,51 +toyoda izumi,51 +towtow redoland,51 +touou,51 +toudou jinpachi,51 +torinoesa,51 +torii (kedamatori),51 +tora (trampjing),51 +top! clover (idolmaster),51 +toomi yuna,51 +tonan (l0l0l0l0l0l),51 +tomoki tomonori,51 +tomoegata naginata,51 +tiona,51 +tinsel,51 +timmy (animal crossing),51 +tidal wave,51 +the last remnant,51 +the fairly oddparents,51 +tekito03,51 +teinba,51 +tegami bachi,51 +teddy bear hair ornament,51 +tecoyuke,51 +tearing paper,51 +tea (retroz),51 +tatsuhiko,51 +tateyama kenjirou,51 +tasselcat,51 +tartarus,51 +tao (tao15102),51 +tanya (granblue fantasy),51 +talesofmea,51 +taku57,51 +takechii,51 +takasago tomoe,51 +takamiya nao,51 +taka (suigendou),51 +taiyou kai ni (kancolle),51 +tachikawa kei,51 +t k,51 +syno,51 +suzu (suzuko),51 +suu (shugo chara!),51 +suspension bridge,51 +surumeika (ninfiiiir),51 +suou tamaki,51 +sunred,51 +sunligh mao,51 +sunasu-tamako,51 +sun wukong (rwby),51 +succubus (monster girl encyclopedia),51 +stuffed squid,51 +strip poker,51 +straight-arm salute,51 +steven seagal,51 +star harmony academy uniform,51 +st. louis (spirits in the snow) (azur lane),51 +spung,51 +spiffydc,51 +spekkio36,51 +spanking momoko,51 +southampton (azur lane),51 +soungruan mian mao,51 +sougetsu eli,51 +sorbet (hotel01),51 +soraizumi,51 +sopdet (p&d),51 +skyhood,51 +skull fucking,51 +sitting on tail,51 +sitting on own tail,51 +simty (lemon ginger),51 +silvia (fire emblem),51 +silver tokki,51 +silver hair-chan (ramchi),51 +silence (pixiv18541142),51 +sieglinde jeremiah,51 +side-seamed gloves,51 +shokujinki-san,51 +shokuen (oxstl),51 +shokabatsuki,51 +shitodo hooaka,51 +shitajiki,51 +shishigami leona,51 +shiromiza kana,51 +shirayuki shoushirou,51 +shiratori suzune,51 +shintaisou,51 +shinsaku (stan-art),51 +shinonome megu,51 +shinomiya rina,51 +shinohara kenji,51 +shino sherwood,51 +shinkai no shachi,51 +shinjou akane (cosplay),51 +shining,51 +shin sekaiju no meikyuu 2,51 +shikabane hime,51 +shiinoyuko,51 +shiina yuu,51 +shiba (s hi ba ),51 +shi zhuzi da,51 +shenmue the animation,51 +sheep hair ornament,51 +shangri-la,51 +seryu oekaki,51 +serara (log horizon),51 +seraphitalg,51 +sehra klatt,51 +scathach skadi (second ascension) (fate),51 +sazh katzroy,51 +save the queen,51 +satori (transient wind),51 +satoimo (3311 mi),51 +sasasasa,51 +sasaki tamaru,51 +sasaki shou,51 +sandara,51 +samurai (movemusic),51 +samue,51 +sammohung,51 +samekichi,51 +salamander (jonbonjovi82),51 +sakurada yuuta,51 +sakuraba (cerisier x),51 +sakura misaki (sakura densetsu),51 +sakura (urusei yatsura),51 +sakazuki sudama,51 +saiyuki,51 +saitogiulio,51 +saijo1201,51 +sachisudesu,51 +saane,51 +ryu (17569823),51 +ryousuke (tukr5384),51 +ruyi jingu bang,51 +rutarou,51 +ruby suguri,51 +ronin (disgaea),51 +roivas,51 +rockrock (arknights),51 +rochiko (bgl6751010),51 +ririfu,51 +rinne (mizunosato),51 +rihito akane,51 +removing bra under shirt,51 +rem (artist),51 +reiko (super cub),51 +reference request,51 +redeye (artist),51 +red wrist cuffs,51 +raver,51 +rapunzel (grimm),51 +random (ningen modoki),51 +randall boggs,51 +rakikoko,51 +raionsan,51 +rachel (ninja gaiden),51 +r pascal,51 +r g b,51 +pyroar,51 +purple leggings,51 +purobe,51 +prototype bulin mkii (azur lane),51 +princess witches,51 +prince demande,51 +poyeop,51 +potato wedges,51 +porky minch,51 +poppypilf,51 +poni (poni arknights),51 +polla,51 +polish flag,51 +pochacco,51 +pjkka,51 +pisces aphrodite,51 +pintail (sword girls),51 +pinocchio-p,51 +pink trim,51 +pimmy,51 +pikumin,51 +pikario (precure),51 +phobos (vampire),51 +pepipopo,51 +pentakill (league of legends),51 +penis on shoulder,51 +pecking,51 +peargor,51 +payu (pyms11),51 +paptimus scirocco,51 +pappii (paprika shikiso),51 +pangolin tail,51 +paimon (magi),51 +pagye,51 +pack of dogs,51 +p38 (girls' frontline),51 +p-chan (mitsuta52),51 +ozu (agito100001),51 +oyster (artist),51 +oura rukako,51 +ots-14 (flurry of petals) (girls' frontline),51 +otomedius excellent,51 +osaname riku,51 +orthros,51 +oritako,51 +orangette,51 +orange tunic,51 +orange hakama,51 +orange541,51 +onogami tetsuya,51 +onimiko,51 +one piece: stampede,51 +omodaka romu,51 +olivia (mobseka),51 +okome (minagisama),51 +okita souji,51 +okemai,51 +okada nana,51 +ohayosayonara,51 +ogura toast,51 +oetaro,51 +odibil,51 +odagiri hidetoshi,51 +oda nobuhime,51 +oca,51 +o x,51 +nyuu,51 +nyarimia,51 +nyanta (log horizon),51 +nunun,51 +noria,51 +norapeko,51 +nonderi,51 +noie (neunteedelstein),51 +no vest,51 +no.gomesu,51 +nitta yui,51 +nitizyo,51 +nishiumi rin,51 +nishimura chiharu,51 +nishikuromori,51 +nippon ichi (neptune series),51 +ninjago,51 +nijizuki shino,51 +nightmare-kck,51 +niduannowu,51 +nico-tine,51 +nichijo,51 +nice (artist),51 +nicchi,51 +ni02 (asahi nini),51 +nether angel (housamo),51 +nephila clavata,51 +nekuraneko,51 +nekoto maruta,51 +nekomonogatari white,51 +nekodayo22,51 +neko (hansinn),51 +nazo no murasame chiaki,51 +naze youka,51 +natsuzora no perseus,51 +natsuki karin,51 +natsukawa masuzu,51 +natsugou shinogi,51 +nase hiroomi,51 +narurun (final123),51 +nanni jjang,51 +nanase (street fighter),51 +nanasaki nicole,51 +nana (darling in the franxx),51 +namonakisyura,51 +nameless (kof),51 +namakeruda,51 +naki ringo,51 +nakashi masakumi,51 +nagaro,51 +mystery skulls,51 +mysterious man (fire emblem),51 +murata ryou,51 +muraji0419,51 +muike,51 +mtmy,51 +mouri toushirou,51 +monferno,51 +momiji kei,51 +mochiko tsuru,51 +mobile doll may,51 +mizuno uchi,51 +mizu kane,51 +mizu cx,51 +mizoredama1,51 +miyako (rabochicken),51 +miya star saa,51 +miya-ki (miya key),51 +mituura,51 +mitsuki (toriaezu),51 +mita ryuusuke,51 +mistsplitter reforged (genshin impact),51 +mister bushido,51 +misoiri (gokutsubushi),51 +miracle wave,51 +mine (akame ga kill!),51 +minamimoto shou,51 +mikuri ouda,51 +mikoto (fire emblem),51 +mii gunner (smash ultimate),51 +mii brawler,51 +michiko to hacchin,51 +michi (iawei),51 +micchamu,51 +mi bait,51 +metalgreymon,51 +menbou (menbow3v),51 +mekimeki,51 +megane poni,51 +megachu!,51 +matudo yuu,51 +matsuno matsuzou,51 +mash kyrielight (welcome to the travelling circus!),51 +masayoshi,51 +masa (mirage77),51 +mary read (swimsuit archer) (fate),51 +mamiya t,51 +mamiya,51 +mamezara,51 +maka (mksrw),51 +mahoubin (totemo hot dayo),51 +machida sawako,51 +macchoko,51 +luck and pluck,51 +lua (yu-gi-oh!),51 +love live! school idol festival after school activity,51 +long table,51 +long-haired girl (ishiyumi),51 +loituma,51 +little buddy (splatoon),51 +linfa (futari midara),51 +lined paper,51 +limitless skye,51 +lillie (special costume) (pokemon),51 +lilith (lilycious),51 +lif (fire emblem),51 +leo aiolia,51 +leng sediao kafei,51 +lemontea (ekvr5838),51 +leki,51 +lego minifig,51 +leafar,51 +layla hamilton,51 +langrisser iv,51 +lakiston,51 +labia clamps,51 +la campanella,51 +kyoujixxxx,51 +kyandii,51 +kuza brs,51 +kuyoumi,51 +kusariuta,51 +kururu (keroro gunsou),51 +kuruma hajime,51 +kuroshio (zung-man),51 +kuromoto-kun (rina masimaro),51 +kurihara touko,51 +kura ekaki,51 +kulve taroth,51 +kukuri (tsugumomo),51 +koushisong,51 +kotou (ko-tou),51 +kotobukkii (yt lvlv),51 +koropokkuru,51 +koronu korinne,51 +kooeiatd111020,51 +kongsi,51 +konbu ame,51 +komuzuka,51 +komurapk,51 +kokoro rista!,51 +kizakura kouichi,51 +kiwwwwwi,51 +kiwi (cyberpunk),51 +kitsune (persona 4),51 +kitakaze (azur lane),51 +kisuu,51 +kiritani riria,51 +kirishima ayato,51 +kirin3145,51 +kirihara tatsugoro torayasu,51 +kirihara natsuki,51 +kinryuu,51 +kino (kino buro),51 +kimi ga ita kisetsu,51 +kim kitsuragi,51 +kim kaphwan,51 +kg-6 sleipnir,51 +keisea,51 +keikain yura,51 +kedo mitsuharu,51 +kazu (rakugakino-to),51 +kayoko (panchlora),51 +kawanami eito,51 +kawahara megumi,51 +kawa683,51 +katsuragi (webmaster909),51 +katou takeko,51 +katou tabihito,51 +kassai kassai,51 +karaage teishoku (regret1994),51 +kanno fumiroku,51 +kanikama25,51 +kani onigiri (shottare),51 +kanbara takuya,51 +kamikitayotsuba,51 +kamen rider ixa,51 +kamashi,51 +kalpas (honkai impact),51 +kakinomai,51 +kaki s,51 +kaisu,51 +kaichou118,51 +kagutsuchi (l'archange),51 +kagosumi,51 +kagamin boo,51 +kabuyou,51 +kaburagi sui,51 +kabu (yuyuibob),51 +k-rei,51 +jyako,51 +juno (azur lane),51 +jun (rellik & redrum),51 +jonathan morris,51 +jonathan kim,51 +jokebag,51 +johnrokk,51 +jo tuesday19,51 +jn3,51 +jiuri jiuhao,51 +jean grey,51 +james moriarty (ruler) (fate),51 +jaaku (ra 9rara),51 +j7w shinden,51 +j-20,51 +izura mari,51 +izumi sagiri (cosplay),51 +izumi (racer),51 +ivan flores,51 +its just suppi,51 +ishida sui,51 +isaya (pixiv4541633),51 +isaki uta,51 +irokohaku,51 +iosefka,51 +invincible marisa,51 +ine (zx o4),51 +ikuchi osutega,51 +ikeda (hayato),51 +igo miku,51 +ifelt (tamaki zutama),51 +idolmaster stella stage,51 +idolmaster sp,51 +ichiyanagi yumihiko,51 +ichinose kazuya,51 +ice cream kanojo,51 +hyacinth,51 +hummer,51 +huang li ling,51 +how to draw manga,51 +house tag denim,51 +horang4628,51 +honehone,51 +hollow ichigo,51 +hole in head,51 +holding wreath,51 +holding bandaid,51 +hkeno,51 +hitoha,51 +hisuian samurott,51 +historical connection,51 +hirayama kanna,51 +hino kagutsuki,51 +himawari (kawaisounaedesu),51 +hilling (ousama ranking),51 +hilda (cross ange),51 +hijiyama takatoshi,51 +hihara you,51 +hide yoshino,51 +heraldry,51 +hera-is (p&d),51 +helicopter hair,51 +helena k sink,51 +heke,51 +hehehzb,51 +heart hat ornament,51 +heart-shaped ornament,51 +hasuyawn,51 +hashimoto mari,51 +harvest moon (vtuber),51 +harutsuki (azur lane),51 +harurie,51 +harohapi! shinonome megu-chan no oheya,51 +haonfest,51 +hanada (cobalt003),51 +hanabishi miki,51 +han sooyoung,51 +hajime shindo,51 +hachimitsuboi,51 +gyakumushi,51 +guzheng,51 +guernical,51 +grizzly mkv (teddy transform!) (girls' frontline),51 +grimnir,51 +goro,51 +golden shrimp balls (genshin impact),51 +glowing liquid,51 +ginny weasley,51 +giji eizan,51 +ggpercent,51 +getter robo g,51 +gerph,51 +gebura (project moon),51 +fuwawa (fuwawa617),51 +futagojima,51 +fusion suit,51 +furaffinity username,51 +funa (sakana),51 +fujimino daisuke,51 +fujimaru (kinakomucch),51 +fugee (granblue fantasy),51 +fubuki (fakemonkey0224),51 +frosted glass,51 +frilled square (idolmaster),51 +freikugel (weapon),51 +fox girl (togutogu),51 +foote (azur lane),51 +fom (lifotai),51 +flatculture,51 +fish head,51 +fireworks print,51 +fierce deity,51 +ferret-san,51 +fengshen chino,51 +fender jazzmaster,51 +falulu,51 +f jun,51 +f4f wildcat,51 +ezlo,51 +evers,51 +etwahl,51 +etsem,51 +etherlite,51 +esper mami,51 +eriku (aoi tori),51 +endou tatsuya,51 +emushake,51 +emina,51 +elnowar seylan,51 +eleuseus,51 +eleanor (maou-sama to kekkonshitai),51 +elbow spikes,51 +elbe (time to show off?) (azur lane),51 +el shaddoll winda,51 +ekkusu kyuuzu,51 +eimi (harris hero),51 +ehryel,51 +echoes act3,51 +ebibaachan,51 +earthree gundam,51 +drive-thru,51 +dragon quest heroes,51 +dragon on shoulder,51 +dororo (keroro gunsou),51 +doppelganger arle,51 +dong (wandong44944),51 +donarudo,51 +dollhouse view,51 +dogxfish,51 +diablos,51 +deus ex,51 +desha (ousama ranking),51 +desert voe set (zelda),51 +demio,51 +delruki,51 +dee jay,51 +dautsen,51 +daruma owl,51 +daru (kumakumadon),51 +daromeon,51 +dante (dmc: devil may cry),51 +dangorou (yushi-art),51 +daleth (sky: children of the light),51 +cyberunique,51 +cure berry (cosplay),51 +cryturtle,51 +crew neck,51 +cosmic baton girl comet-san,51 +conkeldurr,51 +concrete revolutio,51 +comitia,51 +comic potpourri club,51 +code of princess,51 +cocytus (wind master),51 +clover (game cg),51 +claw mark,51 +classic (zildjian33),51 +citrus (place),51 +cinder (norasuko),51 +chum (splatoon),51 +chroma (chroma000),51 +christina mackenzie,51 +chocofox,51 +chm,51 +chitose kumi (hitotose),51 +chaotic-unknown,51 +chain print,51 +centaur (azur lane),51 +censored with cum,51 +cattleya yvette la baume le blanc de la fontaine,51 +catherine (granblue fantasy),51 +cat's tongue,51 +cascade badge,51 +carnelian (shining dew) (arknights),51 +captain marvel,51 +calder,51 +cain art811,51 +cai-man,51 +butterfly girl,51 +buro (muse dash),51 +bunker hill (azur lane),51 +bum hico,51 +buddha (saint onii-san),51 +bridget (guilty gear) (cosplay),51 +brest (azur lane),51 +boris airay,51 +booklet,51 +boo (takagi),51 +bone (stare),51 +boku no mirai wa koi to kakin to,51 +bo-tsu (hyrkgk),51 +blue revolver,51 +blind girl's dog (popopoka),51 +black star (module),51 +black knight (fire emblem),51 +bishamonten,51 +biriri (spacezin),51 +billbung,51 +beluo77,51 +beijiushui,51 +beam gun,51 +beam (chainsaw man),51 +bastiodon,51 +bariko,51 +banitya,51 +banira (ri0115ka),51 +banira (oocooocooocoo),51 +bang (one-punch man),51 +bandou marimo,51 +band (skymole01),51 +ball pit,51 +bajou takurou,51 +baekto (last origin),51 +b5n,51 +azure (alchemy stars),51 +azuma (no488888),51 +ayase hikaru,51 +ayanashi kunio,51 +ayakashi rumble!,51 +aya (min412),51 +aya02ka,51 +awatsuki maaya,51 +asymmetrical irises,51 +asuma (hanezu),51 +asu (asoras),51 +asou natsumi,51 +ashton anchors,51 +armor girls project,51 +armband removed,51 +arm around leg,51 +arisue kanako,51 +area 15,51 +arcade (architecture),51 +aqua tongue,51 +appi,51 +aoria,51 +aonome,51 +ansel (casual vacation) (arknights),51 +ann (ann58533111),51 +angelina nanatsu sewell,51 +angel costume,51 +ametsukana yago,51 +amatsuka seika,51 +amano sakuya,51 +alvis (xenoblade),51 +alice third macy,51 +alice (grimlight),51 +alfred (kounoike tsuyoshi),51 +akatsuki yakyou,51 +akase rai,51 +akaji (alpha0107),51 +akai toshifumi,51 +akafuyu (arknights),51 +ak-12 (age of slushies) (girls' frontline),51 +aiyoku no eustia,51 +aikatsu! photo on stage!!,51 +aiba-tsukiko,51 +ahute,51 +agarest senki zero,51 +agachi (shunya12300724),51 +afnroll,51 +afghan hound,51 +aerospray (splatoon),51 +adjusting sock,51 +addams family,51 +acerola (fall 2020) (pokemon),51 +a.l.l.,51 +91007,51 +66head,51 +3ldkm,51 +2t (tanakatakuto),51 +172cm,51 +1641 (chfhrtor94),51 +zoryc,50 +zombieman,50 +zis,50 +zettai shougeki,50 +zero (kirby),50 +zefai,50 +zastava m21,50 +z23 (schwarze hochzeit) (azur lane),50 +yuutousei,50 +yuukyuu no euphoria,50 +yuuki mitsuru,50 +yuuk33,50 +yutsukidayo,50 +yuriniel,50 +yurigaoka nayuki,50 +yupachu,50 +yuko666,50 +yukitaka,50 +yukine (noragami),50 +yuki (vicious),50 +yubisaki connection,50 +yt (yt ty),50 +youko kurama,50 +youkai mountain,50 +you are already dead,50 +yotsuba satsuki,50 +yonab,50 +yoko (shiocolor),50 +yjs0803123,50 +yellowparrot,50 +yatsuashimon,50 +yanzhan,50 +yanagui,50 +yamanami kousuke,50 +yamamoto yuu,50 +yamamoto (ymmt is sexy),50 +yamada yuuji,50 +y0ung,50 +xoco,50 +xianyu liang,50 +wrapped,50 +white lotus,50 +white java sparrow,50 +wawawa (hisuterisisusa),50 +warming,50 +warabi (danngo-mitarasi),50 +wakaba (wata ridley),50 +vrkdh,50 +virus,50 +viper ctr,50 +violetsang,50 +vincent wu,50 +vietnamese clothes,50 +vgaming,50 +verslll,50 +venus blood,50 +vauquelin (azur lane),50 +valgiris,50 +uvogin,50 +usami natsuki,50 +usagi nui,50 +urepito honke,50 +urameshiya,50 +unko man,50 +umino hotate,50 +umi hire,50 +ugonba (howatoro),50 +uchuu senkan yamato 2202: ai no senshi-tachi,50 +uchimura chiaki,50 +uchi wa mou enki dekinai.,50 +tyrfing (fire emblem),50 +two sidriver,50 +twisted breasts,50 +tsuzuki kei,50 +tsuzuki (e ci),50 +tsumagomi izumo,50 +tsukiyama minako,50 +tsukimi kirara,50 +trumpet creeper,50 +transparent hand,50 +transformers victory,50 +toy mouse,50 +towel pull,50 +towa (towa akqj10),50 +touxing no diluka,50 +touhou meikyuu,50 +tongue clamp,50 +tonari no kishida,50 +tommy (animal crossing),50 +toluda,50 +tokugawa ieyasu (sengoku otome),50 +toilet paper tube,50 +togainu no chi,50 +toa,50 +tlandolt69,50 +tin man,50 +time signature,50 +tiger hair ornament,50 +thoma avenir,50 +themis (ff14),50 +thegreyzen,50 +the winged dragon of ra,50 +the rumble fish,50 +the oni (ao oni),50 +the genesis,50 +the baddest kai'sa,50 +the 2nd super robot wars og,50 +thatched roof,50 +thana,50 +thai girl (okpriko),50 +temjin,50 +tegami (kch7683),50 +tec,50 +taut bodysuit,50 +tasuro kuzuha,50 +tan (carbon),50 +tamaki iroha (swimsuit costume),50 +tamaext,50 +tama project,50 +takizawa akira,50 +takerusilt,50 +takeda emi,50 +takechi otoya,50 +takao (the lessons of spring) (azur lane),50 +takao (azur lane) (cosplay),50 +takanashi tsubasa,50 +taiga mahoukan,50 +tadakichi-san,50 +tachibana mizuki,50 +tachibana gin,50 +syyn (syyndev),50 +symfo,50 +sword of exorcism,50 +sweethex,50 +suzuki tsudumi,50 +suzuki noritaka,50 +suzu head,50 +suteinua,50 +suminoe ako,50 +sul-lin-a,50 +su----per cute,50 +stormstx,50 +star wars: rebels,50 +st. louis (azur lane) (cosplay),50 +srw cover,50 +squire (fft),50 +springveiv,50 +springsuit,50 +spoken emoji,50 +special type 2 launch ka-mi,50 +spar,50 +space colony,50 +soul linker (ragnarok online),50 +souffle rosetti,50 +sou (shichigatsu),50 +soshite ashita no sekai yori,50 +sorinozuka renshou,50 +sorakase sawa,50 +sonomi,50 +sonika,50 +solomon (megido72),50 +soldier (dq3) (cosplay),50 +softboiled egg,50 +snowquill set (zelda),50 +skying,50 +skirt tied over head,50 +sinner! (sillygirl),50 +simipour,50 +side ahoge,50 +shun no shun,50 +shounen no abyss,50 +shoco (sco labo),50 +shiroyasha,50 +shirogane rio (artist),50 +shirika,50 +shiraishi urara,50 +shino (moment),50 +shino-puchihebi,50 +shijuu hachi,50 +shijimi (osumashi),50 +shiitake (gensoudou),50 +sherman (egnk2525),50 +shayoo,50 +sharuru (dokidoki! precure) (human),50 +sharl0ck,50 +shaka sign,50 +seyo,50 +seta (monyun),50 +servant,50 +sentou,50 +senkawa minato,50 +senju (uroakono),50 +semahiro,50 +self wedgie,50 +sechi (stst1850),50 +scathach skadi (swimsuit ruler) (first ascension) (fate),50 +saz (sazin764),50 +sayamai miyabi,50 +sawatari shingo,50 +sawada kanako,50 +sasaki kaori,50 +sasahara koujirou,50 +sari sumdac,50 +sarashina kau,50 +sara bon,50 +saniwa (katsugeki/touken ranbu),50 +sanii,50 +sangyou haikibutsu (turnamoonright),50 +samurai (fft),50 +sakuya (ookami),50 +sakura momo,50 +sakidesu,50 +sajipen,50 +sailor chibi chibi,50 +saifu (sisutakh),50 +sacchi,50 +ryuukyuu,50 +ryuujin no senpai,50 +ryu (street fighter) (cosplay),50 +ruwoka,50 +ruten (onakasukusuku),50 +ruo zhishi chu jian,50 +ruida,50 +roto (cosplay),50 +rotisserie,50 +roomi,50 +roboko (sekai seifuku),50 +rktsm,50 +rita (shingeki no bahamut: genesis),50 +rison,50 +retia adolf,50 +republic of korea army,50 +rengoku senjurou,50 +remoyona,50 +reki (hidan no aria),50 +recipe (object),50 +ravenousruss,50 +rattle drum,50 +raonal97,50 +ram (re:zero) (cosplay),50 +quarium,50 +qt0ri,50 +qoray7,50 +qmo (chalsoma),50 +qianshui baodan,50 +purple haze feedback,50 +probopass,50 +probe regalia,50 +princess juliette,50 +prbili,50 +potato (oriha94),50 +popopon,50 +polyhedron2,50 +pink (ohasi),50 +pingkypen,50 +pikaro,50 +pikachu libre,50 +pico (boku no pico),50 +peter quill,50 +penguin4,50 +peaceyj,50 +parsley,50 +parchment,50 +papercider,50 +paper mario: the origami king,50 +p9 (susisasimi),50 +oyuki (otozuki mozuku),50 +ou negi,50 +otto no inu aida ni... ~watashi odosarete anata ni ienai koto shimashita~,50 +oto nagi,50 +oto (rozeko),50 +orange sports bra,50 +optimus primal,50 +oppai kenmin,50 +oozora hiro,50 +ookouchi ricca,50 +ooba minato,50 +onodera (einsatz),50 +okuzumi yuiko,50 +okitarou (okiyo),50 +oitsuki (getsumen diver),50 +ogura shion,50 +ogu,50 +ofuro,50 +office lady (madoka magica),50 +oekakiism,50 +obelisk the tormentor,50 +nyarko,50 +nukosann,50 +notoro,50 +nosir onadat,50 +noppo-san,50 +nomiya37564,50 +noeunjung93,50 +no+bi=,50 +nishishi,50 +nishikiyama akira,50 +nishi hayato,50 +nini3piyo,50 +nikubou maranoshin,50 +night watcher (elsword),50 +night sky sword,50 +nigari (ngari 0115),50 +nidhoggn,50 +nico (blue archive),50 +nezumi (majisuka gakuen),50 +netherlands,50 +nero claudius (red lightning) (fate),50 +nerinn (artist),50 +neneko (rariatto),50 +nekoyama iori,50 +neko7,50 +negoya,50 +natuich,50 +natsuhime yuran,50 +nasunoko,50 +nasu rei,50 +nash latkje,50 +nara haramaung,50 +naomasap,50 +nanjou yoshino,50 +nanaba,50 +nakamura nagare,50 +nakamu 405,50 +najica dengeki sakusen,50 +nago (kirby),50 +nagamo sakana,50 +mwwhxl,50 +murakami mozu (cyclone),50 +mukku,50 +mukai hinata,50 +mueririko,50 +muchimaro-chan,50 +mr lobster,50 +moss (2225028),50 +mosaic art,50 +morimiya (kuroro),50 +morikawa yuki,50 +moose girl,50 +moon (majora's mask),50 +mon (monmon2133),50 +momosemocha,50 +momijizuki luna,50 +mokuyou,50 +mokarooru,50 +moira burton,50 +mogu (kanikama mgmg),50 +miyu (vampire princess miyu),50 +miyu (miy u1308),50 +miyaulait,50 +miyamoto akiyo,50 +miyakoto,50 +miyaguchi hiromi,50 +misuko (sbelolt),50 +misaki (1n1m),50 +minami shizuku,50 +minakami kaori,50 +milluun,50 +millgua,50 +mikaze,50 +mikawa sansen,50 +miji doujing daile,50 +mico3,50 +melting scarlet,50 +mei ford,50 +mazeru (jisjifin),50 +mayumochini,50 +mayonnaise (ringo gakuen),50 +mattyazuki,50 +matsuzaka (matsuzakagyu 29),50 +matsuri kyuuta,50 +matsudora124,50 +matsubara honoka,50 +masuchi,50 +mashin,50 +masami (souzou jinsei sanka),50 +mary-san,50 +marisa stole the precious thing,50 +marimo tarou,50 +manse,50 +manme,50 +male with breasts,50 +makkuro,50 +makiya,50 +make america great again,50 +maimu,50 +magpul masada,50 +magikano,50 +magic carpet,50 +maekawa yuichi,50 +madarame,50 +lyrical ds,50 +lune (kaminomi),50 +luca balsa,50 +ltotyht,50 +lourie,50 +lopunny (cosplay),50 +long torso,50 +lispict,50 +lirseven,50 +lipstick mark on stomach,50 +lip balm,50 +lion's roar (genshin impact),50 +lin hu (nekojishi),50 +lily white (cosplay),50 +lily servant,50 +lilo & stitch,50 +li meifon,50 +leos vincent,50 +left 4 dead 2,50 +lee roha,50 +leather vest,50 +layla (sound horizon),50 +lari,50 +lakestep55,50 +kyowosuke,50 +kyo (kyokuto016),50 +kyabekko,50 +kuzumochi,50 +kusanagi koyori,50 +kusaka ryuuji,50 +kurudaz,50 +kurojishi,50 +kuroe ayaka,50 +kurobuchi numama,50 +kurashina asuka,50 +kurasaki cority,50 +kurara-chan (suzutsuki kurara),50 +kuraki,50 +kunoichi (disgaea),50 +kumako (sono328),50 +kubota shinji,50 +koubu,50 +kotobuki (medetai),50 +kotneciii,50 +koroneri,50 +kongou (battleship),50 +konasu (sndk1480),50 +komugi (hunter x hunter),50 +kokusoji,50 +kokkoro (princess) (princess connect!),50 +koishikawa,50 +koiiro byoutou (vocaloid),50 +koigakubo momoko,50 +kohinata hayami,50 +kodera ju,50 +kobiemon,50 +kobayashi (oksk0x0),50 +kiyakyuu,50 +kishiro azuhito,50 +kishi youko,50 +kirima syaro (cosplay),50 +kirame kirai,50 +kinocopro,50 +kikukawa azami,50 +keven (ewdx3754),50 +kero (cosplay),50 +ken'ichi (silvian125jp),50 +kemu,50 +keishi (shining star),50 +keenii (kenny86),50 +kdc (tamaco333),50 +kazu sanbon,50 +kazamatsuri kazari,50 +kayo (watamote),50 +kawasumi (tk2k jade),50 +kawamura rukanan,50 +kawamura raichi,50 +katori shin'ichi,50 +katori (pso2),50 +kath,50 +katelynn (afrobull),50 +karin (fineyanny),50 +kannazuki okuto,50 +kamu (simp3),50 +kamonegi (meisou1998),50 +kamiya shion,50 +kamiomutsu,50 +kamen rider meteor,50 +kamachi kamachi-ko,50 +kakushigoto,50 +kaji ryouko,50 +kai harn,50 +kagura tsuna,50 +kagura demuri,50 +kaga (warship girls r),50 +kacchu musume,50 +kabakura tarou,50 +ka92,50 +juvenile (vocaloid),50 +juukyuu,50 +jupiter (pokemon),50 +julia8857,50 +joint (drug),50 +john wick,50 +jinx (dc),50 +jikeshi,50 +jgsdf type 07 tank natchin,50 +jewel knights,50 +jason voorhees (kotobukiya bishoujo),50 +james p. sullivan,50 +jack frost (rise of the guardians),50 +j-e-m-g,50 +izumi kaori (twinbox),50 +iwana,50 +itto maru,50 +itaro,50 +issa (sorako45),50 +ishimo,50 +isai shizuka,50 +inunekostudio,50 +inhoso,50 +inasa orange,50 +inarou (rakugakiproject),50 +imminent punch,50 +iijima kana,50 +ierotak,50 +idachi,50 +ichimoku ren,50 +ichi inaru,50 +icelernd,50 +ice queen (adventure time),50 +i-401 (kancolle) (cosplay),50 +i-25 (azur lane),50 +hutuu (1121),50 +hullzion,50 +hukahire0120,50 +huatu jiang,50 +htol#niq: hotaru no nikki,50 +hoshino sora (enuni),50 +honya (maho a),50 +hong soon-jae,50 +hod (project moon),50 +hiyorou,50 +hiki togu,50 +hikarikmy,50 +hex tails,50 +herbarium,50 +hen-shin-shou-jo metamol maiden,50 +helena (meta) (azur lane),50 +heiyizhushenxia,50 +hazuki (nukisasi),50 +hayate-s,50 +hayashi naoharu,50 +hayabusa koi,50 +hatsu inu,50 +hatotaurus (ookami mio),50 +hassan of serenity (merry sheep) (fate),50 +hassan (sink916),50 +haruno tomoya,50 +harubato,50 +harpie girl,50 +hanaon,50 +hanakai momo,50 +hana (dqkmb362),50 +han (ozrfour),50 +halakadira,50 +hakuun (m2230),50 +hako (hakosanhanasan),50 +hachimillion,50 +h&k mg5,50 +gyoubu ippei,50 +gyonikun,50 +gyarusatan,50 +gumball machine,50 +gregory (fnaf),50 +gravel,50 +grapple pilot (titanfall 2),50 +gracidea,50 +goru (cure golgom),50 +goroku,50 +gordon ramsay,50 +gokurakuin sakurako,50 +gilbert bougainvillea,50 +geta (epicure no12),50 +ge zhong kuaile,50 +gaikotsu kishi-sama tadaima isekai e o dekake-chuu,50 +fuzzlogik,50 +fuyono neru,50 +futaba riho (cosplay),50 +fushimi sameta,50 +fusa (starless2323),50 +fur sleeves,50 +fumimi,50 +fumikiri (dake no hito),50 +fujisaki yua,50 +fujimaru ritsuka (male) (royal brand),50 +fujibayashi suzu,50 +fuji hyorone,50 +fu (mushibun),50 +freddie mercury,50 +fox girl (mdf an),50 +forbin (azur lane),50 +foge,50 +flynn rider,50 +flying red barrel,50 +floette (red),50 +flamenco,50 +firenzesaika,50 +fire valkyrie,50 +finger puppet,50 +ferry (summer) (granblue fantasy),50 +fennery (show by rock!!),50 +fencing,50 +faejunkie,50 +f-2,50 +eye hair ornament,50 +ex-trident,50 +etuzan jakusui,50 +enoshito,50 +enkichi totan,50 +en (paorasuteki),50 +emilia hermit,50 +elrowa,50 +elie wayne,50 +eileen coney (shepherd0821),50 +eikaa,50 +edge maverick,50 +e7 (runaway162),50 +dylan keith,50 +drawdrawdeimos,50 +drakente,50 +drakee,50 +dragoncastle,50 +dragon shiryuu,50 +donuttypd,50 +donar0217,50 +dojipan,50 +doctor ferdinand,50 +dj.adonis,50 +dita liebely,50 +disco,50 +dinikee,50 +diamond princess no yuuutsu,50 +devil-v,50 +denkou choujin gridman,50 +decorating,50 +death angel komachi,50 +daroach,50 +dark repulser,50 +dark night (darkotl),50 +dao,50 +danraz0r,50 +daitoutei,50 +daihatsu (landing craft),50 +czk,50 +cygnet (an offer to be maid) (azur lane),50 +curling,50 +cure melody (cosplay),50 +cucchiore,50 +ctiahao,50 +crumbling,50 +crow tengu extra (touhou),50 +crossdraw holster,50 +croix raoul,50 +creepypasta,50 +cramp,50 +cpu,50 +covering another's mouth,50 +contract,50 +comizku,50 +colorful x melody (vocaloid),50 +clear echoes,50 +civia,50 +cimeri,50 +chuck pires,50 +chotto,50 +chiyosuke (nosuke21),50 +chickenvomit,50 +cheese (sonic),50 +cheek on glass,50 +chawanmushi (ebikamaboko),50 +chain gun,50 +chachazero,50 +cedric (gear art),50 +ceasar ian muyuela,50 +castlevania: dawn of sorrow,50 +cap (dkdm-d),50 +candy boy,50 +cafenami,50 +cacao (lamune),50 +byuura (sonofelice),50 +bunny symbol,50 +bunkyo takemi,50 +brown pupils,50 +brikot,50 +bridget evans,50 +bound wings,50 +boppin,50 +boole (sangha0301),50 +bookmarkahead,50 +bokustar fox,50 +bokura no 16bit warz (vocaloid),50 +boku wa ohime-sama ni narenai,50 +bocha (gotipoke),50 +bluecher (azur lane),50 +blue mask,50 +blue-haired boy (how to draw manga),50 +blossoming new year's karuta contest (umamusume),50 +blooregard q kazoo,50 +blackish 961sp (idolmaster),50 +bin1998,50 +biko pegasus (umamusume),50 +beiske (de skb3),50 +beat (mega man),50 +bean sprout,50 +battle girl (pokemon),50 +bandana removed,50 +banchengping@126,50 +banana (hirane1988),50 +balsa,50 +badger tail,50 +azuchi momo,50 +ayase tamaki,50 +ayabe kihachirou,50 +atelier ryza 3,50 +atelier elie,50 +asymmetrical shirt,50 +asmodeus alice,50 +ashigara (aoki hagane no arpeggio),50 +asemu asuno,50 +asagi (bombergirl),50 +aruka,50 +armored personnel carrier,50 +arin66,50 +ari don,50 +argyle necktie,50 +arctic wolf (kemono friends),50 +aratagawa nikei,50 +araimooah,50 +aoyama reo,50 +aosaki yato,50 +aoi anna,50 +anne (bravely second),50 +ankoku tiger,50 +animal cutout,50 +angel links,50 +andou nene,50 +amygdala,50 +amirun,50 +amekaze yukinatsu,50 +ameiarts,50 +amayouri,50 +amanoyayuki,50 +aliese martini,50 +alice-whiteberry,50 +alexi oekaki,50 +akira (umihan),50 +akira (aky-la),50 +aki no jikan,50 +akatsuki (azur lane),50 +akamtvahosi,50 +akai mi ga bakuhatsu shita,50 +aircraft request,50 +ahr tato,50 +ah-negitorow,50 +acrobatics,50 +aciddrop (arknights),50 +absolutemabbak,50 +aaaabo,50 +a lone prayer,50 +a flow,50 +a106,50 +9ma param,50 +96neko,50 +54cr,50 +501st joint fighter wing (emblem),50 +3ok,50 +3kuma,50 +2zuz4hru,50 +zxzx,49 +zweilous,49 +zono (rinkara-sou),49 +ziko,49 +zanburg,49 +yuyaki (senran kagura),49 +yuuma (pumpkin),49 +yuukyuu gensoukyoku,49 +yuuki miyabi,49 +yuukaku,49 +yuujin (pageratta),49 +yuugiri ayano,49 +yuucho (cololal),49 +yurufuwa milk,49 +yuri alpha,49 +yuri (quiz magic academy),49 +yura (ub4u),49 +yule log,49 +yukiyoshi,49 +yukiji shia,49 +yukihiko (sky sleep),49 +yu (uza),49 +ys ix monstrum nox,49 +youtan,49 +yorktown (warship girls r),49 +yonab (yonab),49 +yon yon (shikawafu),49 +yaruo,49 +yapo (mess),49 +yang38,49 +yammy,49 +yamano rinrin,49 +yae kaori,49 +y jinn,49 +xperiajoker,49 +xino,49 +xiatian (beatmania iidx),49 +xia xiang (ozicha),49 +x.x,49 +wukong (league of legends),49 +wu lun wujin,49 +wrist wings,49 +wringing dress,49 +wooden beam,49 +wing (aiastor),49 +windmill (company),49 +winberrl,49 +willump,49 +wicker basket,49 +white scales,49 +wei yu,49 +weapon name,49 +water cooler,49 +wata neo,49 +washiwa,49 +wargaming japan,49 +warabimoti yoz,49 +walfas,49 +waku2kitchen,49 +wakame-chan,49 +wakaba0801,49 +vs knight lamune & 40 fire,49 +void prowler wraith,49 +vladimir (league of legends),49 +virion (fire emblem),49 +vf-1 strike,49 +vespid (girls' frontline),49 +very big eyes,49 +vellark,49 +vegacolors,49 +valor (league of legends),49 +vaio,49 +va,49 +utai yumi,49 +usui ryuu,49 +uss missouri (bb-63),49 +usio ueda,49 +usa (yamai),49 +uriah-oyu,49 +united nations,49 +unfezant (male),49 +undershaft,49 +umeru (admiral bahroo),49 +umbilical cord,49 +ukatsu juuzou,49 +u tnmn,49 +u emper,49 +u2 (u2 85269),49 +type 90,49 +tycho science,49 +twobee,49 +two-tone neckerchief,49 +two-tone cloak,49 +twin-7646,49 +tuesday simmons,49 +tsuruhime,49 +tsurugi yuuichi,49 +tsumihoroboshi-hen,49 +tsukushima shijima,49 +trista (makkoivenus),49 +triangle cutout,49 +transformers car robots,49 +train attendant,49 +towa rui,49 +toutaku,49 +toujirou,49 +tori (eherotori),49 +topsu,49 +tomoshibi no majo,49 +tomon (slash0410),49 +toktin zq,49 +tokinomiya kamui,49 +todoroki kyouko,49 +tocope,49 +tita nium,49 +tile background,49 +tiefling,49 +thranduil,49 +the thing,49 +the king of fighters 2000,49 +the best schoolmate (idolmaster),49 +thatch,49 +tetra takamine,49 +teston,49 +terrako,49 +terimayo (sonnne farbe),49 +tenebria (epic seven),49 +temperature,49 +temmie chang,49 +tarisa manandal,49 +tariho (robotanime),49 +tanned girl (kamisimo 90),49 +tani,49 +tama azusa hatsu,49 +takuya (acceed),49 +takayama dan,49 +takashi moritaka,49 +takasaki (rock rock),49 +taira no fumikado,49 +tailbox,49 +tad s,49 +tachibana hina,49 +t lege d,49 +t (dyuhuhudyukusi),49 +sword world,49 +sword master (dungeon and fighter),49 +swirling,49 +sushoartworks,49 +survival yaiba,49 +surfboard (wrestling),49 +supobi,49 +super mario bros. 2,49 +supahbeefcakes,49 +sungpark,49 +summon ticket (fate),49 +sumire (anti (0324)),49 +sukoyaka gyuunyuu,49 +sukiya,49 +sujiko (125motimoti),49 +suimame,49 +sugomori tsuru (artist),49 +sugii tsukasa,49 +subfiction,49 +stuffed squirrel,49 +striped wristband,49 +stretch marks,49 +stitched fingers,49 +steward (arknights),49 +steph (afrobull),49 +stardust dragon,49 +star guardian syndra,49 +sr soba,49 +spread the wings!! (idolmaster),49 +spoken flower,49 +spiral background,49 +spiral (senra garou),49 +spider-ham,49 +spewpa,49 +sotsunaku,49 +soranokaze15,49 +sophia (p5s),49 +sonic world adventure,49 +sonech,49 +snowdrop (flower),49 +snow strawberry (idolmaster),49 +small head,49 +sleeve,49 +siyumu,49 +sixteenpo,49 +sitting on hair,49 +sitting on cloud,49 +sister (arakawa),49 +sion flina,49 +shuiro (frog-16),49 +shoko makiko,49 +shiroton (kazamineko),49 +shirotaso0818,49 +shirono kuma,49 +shirohebi (monster girl encyclopedia),49 +shirayuki ren,49 +shirayuki mishiro,49 +shiratori hime,49 +shirataki jiro,49 +shirasu (mashiro (rikuya)),49 +shion (len'en),49 +shio,49 +shinsetsu spice and wolf,49 +shinomy,49 +shinku (cosplay),49 +shinjou kanae (teekyuu),49 +shin'ya natsuki,49 +shimamura miwa,49 +shiki no akari,49 +shiki karuta,49 +shiitake (mm0820),49 +shiika (idolmaster),49 +shige,49 +shichimi (ftlvampire32),49 +shibanashi miso,49 +shiba tatsuya,49 +shennong (summer) (housamo),49 +shamone (skullgirls),49 +shallotte elminus,49 +shaito,49 +seushiki (ponti-ron),49 +set7,49 +serakoutarou,49 +sengoku asuka zero,49 +senbiki (nonono69),49 +seiros (fire emblem),49 +seidouzan,49 +segaxtu,49 +sasebono mari,49 +sasana,49 +sasaki kouhei,49 +sasakawa arumi,49 +sandman (sbr),49 +samurai jack (character),49 +salukino,49 +salty (cherrypie),49 +salamander (monster girl encyclopedia),49 +sakuya (utawarerumono),49 +sakuramachi touko,49 +sakurada kanade,49 +sakuma,49 +sakuba nao,49 +saint seiya saintia sho,49 +saint-louis (alluring administrator) (azur lane),49 +saika ryougi,49 +sai ichirou,49 +sai (saikun),49 +sagamihara sakyou,49 +sabi wasabi,49 +saazbaum,49 +ryuuguu rena (cosplay),49 +ryuhirohumi,49 +ryugeru baran,49 +ryouki (34388923),49 +rylaera,49 +ruu (queen's blade),49 +rupe paperu,49 +runemill,49 +rukiroki,49 +rui (pokemon),49 +rudy (rariatto),49 +rozaliya olenyeva (fervent tempo),49 +rouge (power stone),49 +rose tomas,49 +rodway,49 +rodori gesu,49 +rizeass,49 +rital,49 +riria,49 +rippootai,49 +rinx,49 +rinrin (user tvcf4347),49 +rijjin,49 +ries argent,49 +riding bird,49 +riding bean,49 +richter abend,49 +retto,49 +ren kouha,49 +removing helmet,49 +remotarou,49 +reki (dezuko),49 +reisalin stout (cosplay),49 +reinesia el arte cowen,49 +regls,49 +reddit username,49 +rebecca arcane,49 +ray littlechamber,49 +ranju aira,49 +raihan (anniversary 2022) (pokemon),49 +rafflesia (flower),49 +racoon-kun,49 +qing lan,49 +px4 storm (girls' frontline),49 +puu (kari---ume),49 +procreate (software),49 +prino hawell,49 +prin dog,49 +poporon (jashin-chan dropkick),49 +popo take,49 +pokira,49 +poking with penis,49 +pokemon xd,49 +poise,49 +pocketbook,49 +plue,49 +plastic moon,49 +plaid pajamas,49 +pixiv cat kingdom,49 +pippilipi,49 +ping hai (osmanthus moon rabbit) (azur lane),49 +pinecone (sing a song) (arknights),49 +pillar buster,49 +pile (seiyuu),49 +pikupiku,49 +piccolo daimaou,49 +piaisai,49 +people's liberation army air force,49 +penis head,49 +penguinbox,49 +penguin musume,49 +pcs shousa,49 +parayang,49 +pandora (nat the lich),49 +palmer (pokemon),49 +paddy field,49 +paaru,49 +pa da wan,49 +oyashimakanya,49 +oumagadoki doubutsuen,49 +ou taiga,49 +ottoman,49 +otsu natsu,49 +ostrich (kemono friends),49 +orn,49 +oriuo q,49 +ore no natsuyasumi,49 +orange clouds,49 +ootsuka mai,49 +ooshima aki,49 +oono eiko,49 +omega (mega man),49 +omega (final fantasy),49 +olmine,49 +oktyabrskaya revolyutsiya (kancolle),49 +okoge (simokaji),49 +okami ranka,49 +okada izou (dog) (fate),49 +oinari yukke,49 +oimari,49 +odabuts,49 +occluder,49 +oasis,49 +o22no,49 +nzz,49 +nyarumi,49 +nyano,49 +nyamnyam0502,49 +nunuko (mu661129),49 +nuenue,49 +nstime23,49 +nose genki,49 +nordic niku,49 +no hood,49 +no cloak,49 +niwatori gunsou,49 +nitta emi,49 +nitaimoimo,49 +nishinaka takashi,49 +nishi,49 +nintendo switch pro controller,49 +niko (aiai03012),49 +niihashi noel,49 +night seeker (sekaiju),49 +niandni,49 +nfb-zmc,49 +neytirix,49 +nevakuma (fanfanas),49 +neru (flareuptf1),49 +nemurism,49 +nemuaki,49 +nemo ringo,49 +nekono osero,49 +neco-arc chaos,49 +neck lift,49 +nayoung wooh,49 +naomi (agent aika),49 +naname (danbooru maker),49 +nanairo souga,49 +nanahi toshi,49 +namuru,49 +namuro,49 +naharyou,49 +nagisano,49 +nagase yutaka,49 +n15e,49 +myung-jin lee,49 +mystical,49 +mynare,49 +mx0,49 +musuka (muska),49 +mustard sfm,49 +musketeer,49 +murasaki nakoruru,49 +multilingual,49 +mujun atama,49 +mudou setsuna,49 +mousou zei (vocaloid),49 +mou (piooooon),49 +motocross saito,49 +mothim,49 +mother spider demon (kimetsu no yaiba),49 +mosquito musume,49 +morinas,49 +morimi ashita,49 +moriah,49 +mori calliope (cosplay),49 +moonlight butterfly,49 +monster maker,49 +monokuma (cosplay),49 +momomo12,49 +mogera81,49 +mk (mikka),49 +mk001black,49 +mizushiro kanon,49 +mizuse kiseki,49 +mizame,49 +miyazawa yukino,49 +miyar2d2,49 +miyamura izumi,49 +mitsuki (mitsukitei),49 +miso pan,49 +misia (ichiri),49 +miruku (cutesuu),49 +minu,49 +mine riko,49 +minatsuki hitoka,49 +minamiya natsuki,49 +minakuchi takashi,49 +milll 77,49 +mikomo0106,49 +mikaelya (voice actor),49 +mighty (series),49 +mibushiro,49 +meteoride,49 +metalgarurumon,49 +metabee,49 +menma (enaic31),49 +mengo,49 +melon slice,49 +melon-chan (fg),49 +mejiro family doctor,49 +meili portroute,49 +mebi il,49 +maxima enfield,49 +matsuno kuusuke,49 +matsunaga sara,49 +matou sakura (cosplay),49 +mashimaru (muzikluva),49 +mashima himeko (show by rock!!),49 +mashima (sumachi),49 +marui maru,49 +marotti,49 +marmalade mum,49 +marisa to alice no cookie storia,49 +marie parfacy,49 +marie ange,49 +marianne vi britannia,49 +maria (arakawa),49 +mari (swimsuit) (blue archive),49 +maosen,49 +manuel castanon,49 +malice stella,49 +makita haruka,49 +maki michaux,49 +maid (etan14),49 +maico (a218),49 +mahou shounen miracle hachirou,49 +maguchimo,49 +magical princess,49 +magical mirai kaito,49 +machi youko,49 +m500 (girls' frontline),49 +m1 carbine,49 +lvl374,49 +lux (dera ix),49 +lune (chikaretsu),49 +lunalu9,49 +luke venus,49 +lukas (fire emblem),49 +lost robin rondo,49 +lost ark,49 +loodncrood,49 +long island (long island - indoor slacker) (azur lane),49 +loeldrive,49 +lliissaawwuu2,49 +ll 0109,49 +little sister,49 +little nurse,49 +lirin (bae.c),49 +lirica,49 +linmei quan,49 +lina inverse (dota 2),49 +lilithmon,49 +lgw7,49 +lewis (girls' frontline),49 +let's draw pretty girls challenge,49 +leen (grandia),49 +leech girl,49 +leaning on weapon,49 +layzner,49 +latex top,49 +large shoes,49 +lanhacy,49 +laffey (snow rabbit and candy apple) (azur lane),49 +lady j,49 +kyuubi (youkai watch),49 +kyou (nodobotokemax),49 +kuzuryuu natsumi,49 +kuuneru,49 +kutsushita (tokati r),49 +kusama takato,49 +kurumizaka ruri,49 +kurosawa (kurosawakyo),49 +kuro ageha,49 +kunoichi demo koi ga shitai (vocaloid),49 +kuma kuma kuma bear,49 +kuku123,49 +kujo josefumi,49 +ku-ini,49 +kraken,49 +kr ohoshisama,49 +kouzuki yuuko,49 +koumoto madoka,49 +kono subarashii sekai ni shukufuku wo!: kurenai densetsu,49 +konase (non stop!),49 +komugi (mugiwaraclub),49 +komori takahiro,49 +komasi,49 +kol49,49 +kojiro337,49 +kohinata sumomo,49 +koga sayoko,49 +kodama yuuki,49 +koahri,49 +kitsunemiyoru,49 +kisui (28992125),49 +kirari hikaru,49 +kirara jump,49 +kinoshita ringo (no-rin),49 +kin no kutsu gin no kutsu,49 +kimipiyo (tenmiyakiyo),49 +kimberly (street fighter),49 +kii (azur lane),49 +kida sukizou,49 +kgo,49 +kenkaizar,49 +kemono friends 3: planet tours,49 +kemile,49 +katori youko,49 +katina tarask,49 +katayama minami,49 +kase atsushi,49 +karinaga raizan,49 +karakuri circus,49 +kappa modoki,49 +kanzaki miku,49 +kaneko naoya,49 +kaname (emanalc),49 +kamoi tsubame,49 +kamikaze (azur lane),49 +kamen rider stronger,49 +kamen rider genm,49 +kamakama (kdmorh kamakama),49 +kama (tour outfit) (fate),49 +kali (p&d),49 +kakuzatou (cubesugar03196),49 +kakihito shirazu,49 +kainohito,49 +kaihime,49 +kaien advance,49 +kaho (blue archive),49 +kaguyahime,49 +junshiki,49 +jung wonjo,49 +juby,49 +jonbur man,49 +jitama (bou),49 +jinxel world,49 +jin (phoenixpear),49 +jikei,49 +jaws (okdol0928),49 +jason chan,49 +janome gochou,49 +jail scaglietti,49 +jackalope,49 +jack (haist),49 +iyau,49 +itsuki (s2 129),49 +itou misei,49 +issei,49 +ishtar (fate) (cosplay),49 +ishinarimaru shouten,49 +isana615,49 +iron claw,49 +iori yuzuru,49 +iolite link,49 +inukai sumiharu,49 +inkstone,49 +inaka 44,49 +imminent grope,49 +image fill,49 +image comics,49 +imaani,49 +ilya (christmas) (princess connect!),49 +iida kotori,49 +ichitaro,49 +ichinose (kurui96),49 +ichinashi,49 +ichinana (dametetujin17),49 +ichina (osabakitina),49 +iandrea,49 +i-168 (azur lane),49 +hyrule castle,49 +hugo ardanach,49 +houshasei gokiburi,49 +houjouin seika,49 +hoshino char,49 +hoshino banchou,49 +horse legs,49 +horii kumi,49 +hollyhock,49 +holding telescope,49 +holding tanzaku,49 +hohetomaru,49 +hk21 (girls' frontline),49 +historical,49 +hime (suguri),49 +himduldago,49 +hilgendorf's tube-nose bat (kemono friends),49 +hikarusorano,49 +hiiragi ken,49 +higurashi towa,49 +higanbana (higanbana no saku yoru ni),49 +hiburi (kancolle) (cosplay),49 +hibikileon,49 +helmet-chan (girls und panzer),49 +hell machina,49 +hell angel,49 +heiwajima kasuka,49 +hazumi aileen,49 +hayashi maka,49 +hayanpool,49 +hatoichi reno,49 +haruna mahiru,49 +harukey,49 +harukaze chiharu,49 +harry (namayake),49 +harlock,49 +hansel (black lagoon),49 +hand in bra,49 +hanazawa teruki,49 +hanako-san (toire no hanako-san),49 +hami (hami-gerden),49 +hakiata,49 +haki (boogbogex),49 +hair fruit,49 +haibarasaika,49 +hacker (7th dragon),49 +ha neko,49 +gwanlamcha,49 +gurumin (gurruguru),49 +gupaon,49 +guildmarm (monster hunter),49 +gucci,49 +guan yu,49 +grilled fish,49 +green dam,49 +great gozu,49 +gradient scarf,49 +gozen4ji,49 +gouhou yuri fuufu hon,49 +gotou yuuko,49 +gordius wheel (fate),49 +golden gun,49 +goji (8jikan roudou),49 +goemon,49 +gnsn tori,49 +glacier,49 +girutaabo,49 +girl with golden shoe,49 +gilgamesh (final fantasy),49 +ghgnvm,49 +getter dragon,49 +get,49 +gecko moria,49 +gari (apollonica),49 +garfiel tinsel,49 +gaoerji,49 +gandalf,49 +gamou maki,49 +galarian yamask,49 +fuyuki8208,49 +fushihara-san,49 +fushigiboshi no futago hime gyu,49 +fur-tipped tail,49 +fukuyama jun,49 +fukushi ryouhei,49 +fujisaki kyouya,49 +folding clothes,49 +fnc (upotte!!),49 +fn-49 (girls' frontline),49 +flower to mouth,49 +fizz (league of legends),49 +finn (fire emblem),49 +ferrothorn,49 +feng ling (fenglingwulukong),49 +excel,49 +evil twin ki-sikil,49 +evil ryu,49 +eushufeal,49 +escort princess,49 +erory6 (ro),49 +erina der vogelweid,49 +episode title,49 +enomoto noa,49 +emi (emi43),49 +eien no kurayami,49 +ebonyxh,49 +dyarikku (vtuber),49 +dramatic stars (idolmaster),49 +dr altruist,49 +dr. bug,49 +double ok sign,49 +double-decker hamburger bun,49 +dotz,49 +doting parent,49 +doorknoble,49 +donggua bing cha,49 +dive! (love live!),49 +dincat,49 +dice156,49 +diana (kaminomi),49 +diamond mouth,49 +dhokidoki,49 +deuce spade,49 +detergent,49 +dekisugi hidetoshi,49 +david king (dead by daylight),49 +danmaku!!,49 +dandadan,49 +dance studio,49 +daga,49 +da raku,49 +cytomander,49 +cyborg ninja,49 +cyan aeolin,49 +cure white pose,49 +curcuma (flower knight girl),49 +crost,49 +coyomin,49 +corrin (fire emblem) (dragon),49 +corona-chan,49 +cooler (dragon ball),49 +colette (kono bijutsubu niwa mondai ga aru!),49 +coffeiz p,49 +cody (dross),49 +clarice di lanza,49 +ciel (bird/binary),49 +chronosth1,49 +christmas elf,49 +choujin gakuen gowcaizer,49 +choco (moyasi),49 +chiutake mina,49 +chirang,49 +chimera (chuu2koi),49 +chihyaa,49 +chibimame,49 +chaudlapin,49 +charlotte christine de colde,49 +charlotte (shironeko project),49 +charlie brown,49 +chaou,49 +chan qi (fireworkhouse),49 +catcar0983,49 +cassidy (pokemon),49 +cassandra dimitrescu,49 +cascoon,49 +cappuccino (drink),49 +cape tug,49 +cannian dada,49 +calum (existence),49 +cafemoka-septro,49 +byakuya0315,49 +burmy (plant),49 +burger skater (idolmaster),49 +bunny pin,49 +budgiepon,49 +bubble man,49 +bt-7274,49 +brooklyn (kancolle),49 +bronya zaychik (yamabuki armor),49 +boyfriend (friday night funkin'),49 +boomei (nanashi mumei),49 +bmkro,49 +blue unitard,49 +blue heart,49 +blowing on food,49 +black (among us),49 +big al,49 +bertha (pokemon),49 +bending backward,49 +benchen06,49 +belt feed,49 +beheeyem,49 +beater,49 +banzai nekomimi,49 +bakumatsu rouman dainimaku,49 +b (papiopine),49 +b-52 stratofortress,49 +azoth knife,49 +ayanokouji kiyotaka,49 +ayane ichijou,49 +ayame (no ohana),49 +awa iro,49 +aviator girls,49 +aurorus,49 +asura cryin',49 +assembling,49 +ass biting,49 +askr (mymy),49 +ashiya kouhei,49 +ashina genichirou,49 +asami yurumu,49 +as (ashyblown),49 +artia,49 +arizona (azur lane),49 +aria (okuda08808008),49 +arata,49 +arabian architecture,49 +aqueduct,49 +aquarion logos,49 +aqua outline,49 +apsara (elsword),49 +apron aside,49 +applying gag,49 +aoyama yuuga,49 +aotsuki ushio,49 +aoi akua,49 +anya cocolova,49 +annes (g (genesis1556)),49 +angon623,49 +ang,49 +andromedako,49 +amiya (fresh fastener) (arknights),49 +ameya kirika,49 +ameli (girls' frontline),49 +ambulocetus,49 +amano yukiko (youkai watch),49 +alolan persian,49 +alfred,49 +alex (stardew valley),49 +akuma shitsuji to kuroi neko,49 +akariya kuubu,49 +akagi kouhei,49 +aiue0,49 +aitaso,49 +aibon,49 +agu (antyosan),49 +agnes tachyon (umamusume) (cosplay),49 +afei (sfma3248),49 +acid (acid-field),49 +abarerumidori,49 +810 (dadmiral),49 +5saiji,49 +1995,49 +112san,49 +00tuma00,49 +zoo,48 +zhano kun,48 +zero (jckz2334),48 +yuyuko (yuyucocco),48 +yuyukana,48 +yuuten,48 +yuubari (azur lane),48 +yuri (dirty pair flash),48 +yuna (kuma kuma kuma bear),48 +yukina (kabaneri),48 +yuki haru,48 +yuki (otosuki),48 +yukeyf,48 +yuanagae,48 +yotsuha (little squash),48 +yosyo,48 +yossy (yossy1130),48 +yoroi mikoto,48 +yazaki kallin,48 +yaya (y8ay8a),48 +yatagarasu (kemono friends),48 +yasuraoka hanabi,48 +yasaka mao,48 +yarne (fire emblem),48 +yamori shiki,48 +yamato alexander,48 +yamanin zephyr (umamusume),48 +yamanaka inojin,48 +yam (htt),48 +yagami kamiya,48 +yafu,48 +ya kexi,48 +xhunzei,48 +wishbone,48 +winter gloves,48 +whisper (ragnarok online),48 +wet buruma,48 +water censor,48 +water boiler,48 +washanapple,48 +warable,48 +void termina,48 +violet (closers),48 +viewtiful joe (character),48 +victor grantz,48 +vchan,48 +valiant vixen (pso2),48 +valiant (azur lane),48 +utaka (anyoanyot),48 +usuzan school uniform,48 +usas-12 (girls' frontline),48 +ursula (xenoblade),48 +uro (uroboros),48 +unown w,48 +unown g,48 +unown b,48 +unou (unou mha),48 +unicron (brous),48 +underbite,48 +umu (um),48 +umbrella gun,48 +ultraman tiga (series),48 +uetaku,48 +uehara,48 +uchida mayumi,48 +ucc coffee,48 +u-sama (u summer0719),48 +tyrande whisperwind,48 +typhoon,48 +tt-392,48 +tsuna (so da77),48 +tsukuyomi sasami,48 +tsukiiro,48 +tsuke (maholabo),48 +tsukasa (.hack//),48 +tsuka (blind side),48 +tsubomi fujiko,48 +tree costume,48 +toyota 86,48 +toxapex,48 +towelket wo mou ichido,48 +toushirou (sugarwhite1046),48 +toshinobu40,48 +toshi aki,48 +too many bats,48 +tonbanlove,48 +tomoeda high school uniform,48 +tomitayaki,48 +tokiniwa,48 +tokiji,48 +tokeshi,48 +togusa saburou,48 +togare,48 +tikuwanwa,48 +tiea,48 +three of hearts,48 +thomas bangalter,48 +thedoujin,48 +the prince (katamari damacy),48 +the hanged man (tarot),48 +thaumazo,48 +thai flag,48 +tetratech,48 +tenjouin fubuki,48 +tayashigu (suisyounohosizora),48 +taut jacket,48 +tartaros online,48 +tani takuya,48 +tanashi miyoko,48 +tanaka kii,48 +tamifull,48 +tamaki (diarie inaiinaibaa),48 +takemaru08,48 +take (office-t),48 +takahiko,48 +takafuji yuna,48 +taka shida,48 +taishi22,48 +tadakuni,48 +tachibana isana,48 +tabi boots,48 +sword art online: hollow realization,48 +swon (joy200892),48 +super buu,48 +sunset skyline,48 +sunohara youko,48 +sunahara yoshimi,48 +sumiyoshi chisato,48 +sumika (rrz03),48 +suikou (genshin impact),48 +suikka,48 +su34ma,48 +stunky,48 +straw doll,48 +stork,48 +sticks,48 +sthesia awar,48 +static electricity,48 +spiked legwear,48 +soubee1,48 +soranakidayo,48 +soramiruku,48 +sonparesu,48 +sonomura,48 +somsom,48 +solid&etc,48 +solaris (sinhyg),48 +solar kaichuudentou,48 +sol badguy (cosplay),48 +smol mumei,48 +slimification,48 +skyfire (temperature difference) (arknights),48 +sisuko1016,48 +sindoll,48 +silica (silica silylate),48 +siberian husky (kemono friends),48 +shut (go! princess precure),48 +shujin,48 +shugami,48 +shovel knight (character),48 +shoujo l,48 +shokubai phantom girl,48 +shokill,48 +shoggoth,48 +sho-tan (thedoujin),48 +shizuku (omamori himari),48 +shishikai,48 +shiroto iku,48 +shirasaki aloe,48 +shirai yuuri,48 +shinobi (game),48 +shinkyoku soukai polyphonica aphonic songbird,48 +shinkawa youji (style),48 +shining ark,48 +shinae,48 +shikigami no shiro,48 +shikai (iesiki 56),48 +shijou hinako,48 +shiina aki,48 +sheya tin,48 +sheriff woody,48 +sharona (alchemy stars),48 +shaonav,48 +shanghai man,48 +shakeko (shake5),48 +seven-branched sword,48 +seth (street fighter),48 +serota,48 +separated legs,48 +senran kagura burst,48 +sekiguchi kanami,48 +seidouzan soccer uniform,48 +segawa rikako,48 +sebakanken,48 +scp-173,48 +scotch (cookie),48 +school connection,48 +sazanami tarou,48 +sawatari kazuma,48 +sasami (ki),48 +saru 000,48 +saphentite neikes,48 +santa lily,48 +sania (arc the lad),48 +sangokushi ranbu,48 +sanamisa,48 +sana (37pisana),48 +samenoido,48 +sakusakumonoka,48 +sakuramotikun,48 +sakura nanako,48 +sakuna,48 +saku hinoko,48 +saitou yuu,48 +s040784,48 +ryuko azuma,48 +ryu (masu nantoka-san),48 +rv,48 +rur (ml ruru),48 +rune factory oceans,48 +rubber chicken,48 +rosen-tai,48 +roin,48 +rogue (warcraft),48 +rock climbing,48 +robocap,48 +rizu-kyun (cosplay),48 +rita henschel,48 +rita drake,48 +riromomo,48 +rion flina,48 +rikka (dq9),48 +rii (pixiv11152329),48 +ridley timberlake,48 +riderman,48 +ribbon-trimmed vest,48 +rei taylor,48 +regieleki,48 +red trim,48 +red disappointment,48 +ravio,48 +rapa rachi,48 +ranzal (dragalia lost),48 +ranhatu,48 +randis,48 +radiant light,48 +rachel (pokemon),48 +rabe26k,48 +qingxiao kiyokiyo,48 +puck100ml,48 +projecttiger,48 +progress bar,48 +princess yoshi,48 +president (danshi koukousei),48 +poptepipic pose,48 +pop (smile precure!),48 +ponnyu12,48 +ponekusan,48 +poco.,48 +pnk crow,48 +piyoco,48 +pitui1996,48 +pinkxxxna,48 +pink (konkichi),48 +pikmin 2,48 +piana (atelier),48 +phoenix print,48 +peeing on viewer,48 +peconica,48 +pazma,48 +patriarch xtasy,48 +parum39,48 +parfait desu,48 +parda siko,48 +parazan d,48 +paprika,48 +panyatteria,48 +panties over leggings,48 +palmeros,48 +palina (pokemon),48 +pai kebon baa,48 +p (p25234112),48 +ozoi,48 +oyuki (urusei yatsura),48 +ouse kohime,48 +oshouyu tabetai,48 +osafune girls academy uniform,48 +oranguru,48 +ookaji hiroyuki,48 +oogami koga,48 +oofuji wataru,48 +on (onon2659),48 +omochi monaka,48 +omega-f,48 +okatora,48 +okaohito1,48 +ohayashi55,48 +odyssey 21,48 +ochappa,48 +o-ishi,48 +nyto (generic) (girls' frontline),48 +nunosei,48 +nukunuku (hinataboltuko),48 +nuclear throne,48 +ntw-20 (op. blazing sun) (girls' frontline),48 +ntny,48 +noshiro (hold the ice) (azur lane),48 +nonstop story,48 +nonone (the9thxsheep),48 +noii,48 +nohara himawari,48 +nocturne (league of legends),48 +no.aei,48 +nithros,48 +nisina,48 +nishizawa yoshiko,48 +nishimawari kouro,48 +nippori,48 +nina (maurururoa),48 +nidou,48 +nicomi.com,48 +nia teppelin (cosplay),48 +nepty (silkey vod),48 +nekoma kotomitsu,48 +nekobayashi (nekoforest),48 +neko usagi,48 +neko no wakusei,48 +nehitsuji (syatihokoga),48 +nefertari (fate),48 +necromancer (sekaiju),48 +ndkazh,48 +nayuta (una),48 +natsumi (gurande),48 +natsume riu,48 +nassss,48 +nappii (nappy happy),48 +naoise,48 +nao (okt8538),48 +nanase ren,48 +nameless king,48 +nakaba,48 +nailah (fire emblem),48 +mystia lorelei (cosplay),48 +muhut,48 +mu (caligula),48 +mori yoshihara,48 +monza (saeumon),48 +monica adenauer,48 +monaco (hetalia),48 +momoko (bombergirl),48 +moment (moment 607),48 +mold,48 +moji (mojimozis),48 +mogomaco,48 +mofu mofuko (ryusei hashida),48 +mochikushi,48 +mocco (mocco san1),48 +mmk,48 +mizz peachy (voice actor),48 +mizuki-chan (kanabun),48 +miyako yoshika (cosplay),48 +miwa uni,48 +mivioppai,48 +miss spencer,48 +miruru (rune (pixiv 25170019)),48 +mirai yashima,48 +minagi (gogogo),48 +milk (tonpuu),48 +mikado mariya,48 +mihara chiharu,48 +midori (green tea),48 +microsd (pibo),48 +microa,48 +mg3 (girls' frontline),48 +merumeru626,48 +merontomari,48 +melissa moretti,48 +meliadoul tengille,48 +mekabu,48 +mek number,48 +mei mei (jujutsu kaisen),48 +megane (artist),48 +megalo box,48 +meeting,48 +medic (sekaiju 4),48 +maya (culture),48 +mattsu,48 +matsuo masago,48 +matsugane youko,48 +matsu (sekirei),48 +matoki misa,48 +matmaj,48 +masakikazuyoshi,48 +masakano masaka,48 +marujin,48 +marufuji hirotaka,48 +maru (maru1105),48 +margit eberbach,48 +marc (red barrel),48 +manya (dq4) (cosplay),48 +mamiya-kunchi no itsutsugo jijou,48 +malty s melromarc,48 +makoto yabe,48 +makishima shougo,48 +makie fujiyuki,48 +makarori (noah),48 +mahiro (akino-suisen),48 +magicalmushroom,48 +magenta magenta,48 +magatsu izanagi,48 +magamoto,48 +mafuyu hemp,48 +mada (shizhou),48 +macota (cookie),48 +machio naruzou,48 +m3 submachine gun,48 +lymsleia falenas,48 +luyheg,48 +luka (shironeko project),48 +ludmila lourie,48 +lotus eaters,48 +looking at flowers,48 +long28,48 +long-tailed tit,48 +lom (lom lom 8),48 +little enterprise (azur lane),48 +lisa pacifist,48 +ling shen hua,48 +lilianei,48 +ligton1225,48 +licking panties,48 +liche (wiggly liche),48 +liang feng qui ye,48 +leopard (artist),48 +leon geeste,48 +leivinia birdway,48 +leather bikini,48 +league card,48 +leadin the sky,48 +lard (kumazakiyuta),48 +kyuukyoku shinka shita full dive rpg ga genjitsu yori mo kusoge dattara,48 +kyado (amaterasu),48 +kuzumotsu,48 +kusuke,48 +kushikawa hatoko,48 +kusano kouichi,48 +kurosaki asami,48 +kuroneko w1nter,48 +kuromiko shoujo,48 +kuroha (rockluo213),48 +kurita shin'ichi,48 +kuratch,48 +kujou kiyo,48 +kricketot,48 +kousaka yami,48 +koufukutei yumeko,48 +koucha shinshi,48 +kore (korewa13th),48 +kochiya yuriko,48 +kochiya hizuki,48 +kochira koufuku anshin iinkai desu (vocaloid),48 +kleinlight,48 +kiyoichi (acacia soukyoukai),48 +kitazume hiroyuki,48 +kiryuu touga,48 +kirby: triple deluxe,48 +kine (warabi mk501),48 +kikuchi youko,48 +kiddie ride,48 +kida mochi,48 +kevin ethan levin,48 +keltan,48 +kellzallday,48 +kei-chan (atlas kei),48 +kazuha (kazuha1003),48 +kazto furuya,48 +kawai masaki,48 +katsuragi hana,48 +katia grineal,48 +katagiri (a1466502),48 +kasuga rurito,48 +kashisuover,48 +karina,48 +kaoruko (unkrk55),48 +kano (kanokano44),48 +kan satomi,48 +kamuinii,48 +kamiowl,48 +kamen rider evol,48 +kamen rider diend,48 +kamemushi (hoozuki),48 +kakusei kenkyuu idol lab,48 +kakuchou no ou,48 +kakmxxxny06,48 +kakkou (su),48 +kakinouchi narumi,48 +kakincho,48 +kagurazaka yuna,48 +kaedeno yuu,48 +kacka,48 +jyuuji,48 +juuzawa juu,48 +juuden,48 +jusis albarea,48 +june lin milliam,48 +john (kakurenbo),48 +johanna (pokemon),48 +jmanvelez,48 +jinn (housamo),48 +jinkou-kuu,48 +jamrolypoly,48 +jamgom,48 +jacky (aerial rave),48 +jackie (arknights),48 +jacket around neck,48 +izumi yuuji (trace kouhosei),48 +iwane masaaki,48 +iwaha (iwabajunki06),48 +itokatsu (garou),48 +isumi (yangyan),48 +isone kotoha,48 +iseki shuuichi,48 +iroha uta (vocaloid),48 +ippen shinde miru?,48 +inuzuka tsumugi,48 +inui kentarou,48 +intruder (girls' frontline),48 +interrogation,48 +inkanii,48 +indee,48 +impossible armor,48 +ikra (katacoly),48 +ikea,48 +ikara,48 +ignia (elsword),48 +igashiko,48 +idass (idass16),48 +ichiru (yuzu-an),48 +ichijou haruhiko,48 +ichi rin,48 +ice fishing,48 +ice cream hair ornament,48 +hyoubu kyousuke,48 +hunter (the owl house),48 +how to draw manga redraw challenge (meme),48 +hotathino,48 +hoshino arika,48 +hoodier,48 +honokan,48 +hong kong,48 +homunculus,48 +holding cue stick,48 +hiyoko no ko,48 +hisuian sneasel,48 +hisui (kimochi),48 +hirotake,48 +hiro (h-net),48 +hindu mythology,48 +hinawa,48 +himamushi nyuudou,48 +hikaru ga chikyuu ni itakoro,48 +hihizaru badass,48 +higashiyama hayato,48 +hida kizuna,48 +hibarigaoka yuki,48 +hi-go!,48 +herlock sholmes,48 +heremia,48 +hei jin,48 +heal ball,48 +hazel0217,48 +hayashi ichirou,48 +hayase fukami,48 +hayapi (sinsin08051),48 +hattori junko,48 +hatsukoi limited,48 +hasumi eri,48 +hashiyamoto,48 +hasekura (hachinochun),48 +harusaki chiwa,48 +harurukan,48 +harukazedori ni tomarigi wo,48 +haru (primavera),48 +haneto,48 +hanami yuzuka,48 +hanakanzarashi,48 +hanaan,48 +hamigaking,48 +half-shirt,48 +hakuryuu (mixi max zhuge kongming),48 +hajimete no oisha-san,48 +hair bow removed,48 +haidara,48 +haemori ako,48 +hachikuji mayoi (cosplay),48 +ha9na,48 +gyuunyuu (mashika),48 +guzzlord,48 +guu (jungle wa itsumo),48 +guru (nicocco),48 +gurren,48 +gundam g-saviour,48 +gundam ez8,48 +guchiaki,48 +gsh-18 (girls' frontline),48 +grey asa,48 +great saiyaman,48 +grape stomping,48 +gradient neck ribbon,48 +gplnbeat,48 +gouache (medium),48 +goshingyu-sama (kemono friends),48 +goroumaru,48 +google play,48 +gomoku,48 +golett,48 +goemon1110,48 +glycyrrhizae,48 +glowing arrow,48 +girl (shinitagari shoujo to shokujinki-san),48 +gi (melmail),48 +ghost in the shell arise,48 +ggatip,48 +georgia max coffee,48 +gauma,48 +gapangman,48 +ganbaru (woainini),48 +galore,48 +galilei donna,48 +gaihan umeboshi mitchell,48 +g perarikku,48 +g3 (g310b3),48 +g.g.lemon,48 +fuurisuto,48 +futari ecchi,48 +futabaaf,48 +futaba anzu (cosplay),48 +fururu (tales),48 +furea (genjuu no mori),48 +funakubo hiroko,48 +fukuoka tarou,48 +fukuhara ann,48 +fujisaki yuusuke,48 +fujino shion,48 +frank west,48 +francis de lariatte,48 +floral flowlove,48 +floating hat,48 +fled,48 +film border,48 +fat cat (ff14),48 +fare,48 +fan hair ornament,48 +exusiai (city rider) (arknights),48 +executioner smough,48 +evangelion: 1.0 you are (not) alone,48 +eunram,48 +etidekailo,48 +error dot,48 +erin (kemono no souja erin),48 +eri na,48 +enne kl,48 +enn matien,48 +eneru (enepuni),48 +emdo (norabbit),48 +elza (ishuzoku reviewers),48 +elgyem,48 +electric toothbrush,48 +eigetu,48 +effole (fairy fencer f),48 +ecchi na bunny-san wa kirai?,48 +eau,48 +ear bar,48 +duck hair ornament,48 +dragon (dakemakura),48 +dr. molly simon,48 +doublade,48 +doremy sweet (baku),48 +doomguy (cosplay),48 +domik,48 +dog lead,48 +dobermann (lieutenant) (arknights),48 +do konjouuo,48 +djcomps,48 +disuto,48 +distant,48 +diana jakobsson,48 +dha315,48 +despa (ousama ranking),48 +demon power (elsword),48 +delita heiral,48 +death gun,48 +dclockwork,48 +dark anus,48 +dancing pallbearers (meme),48 +daizan (mount position),48 +daisy oak,48 +daiso,48 +cure flower,48 +crazy laugh,48 +cqqz0707,48 +cow mask,48 +covering nose,48 +cossack dance,48 +coruthi,48 +common chimpanzee (kemono friends),48 +comfy,48 +colored pussy,48 +collar up,48 +cofi (eternal),48 +cobalion,48 +clutching clothes,48 +clothes on shoulders,48 +clogs,48 +cleric,48 +claire rouge,48 +cinderella (vocaloid),48 +chuuou academy school uniform,48 +chonbo (artist),48 +choco (rune (pixiv 25170019)),48 +chiya58,48 +chinchilla ears,48 +chiffon (fresh precure!),48 +chibinon,48 +cheunchin,48 +chemicals,48 +changchun (warship girls r),48 +chamber dragonmaid,48 +ceejles,48 +cauchemar (p&d),48 +catherine kyoubashi,48 +castlevania: aria of sorrow,48 +carnival,48 +carcano,48 +carbon roller (splatoon),48 +candy cane (rumble roses),48 +camouflage hoodie,48 +call of duty: black ops,48 +c.seryl,48 +bruenhilde (azur lane),48 +brown hair-chan (ramchi),48 +breast band,48 +brazier,48 +boxman,48 +bougainvillea (flower),48 +boss coffee,48 +bosacius (genshin impact),48 +borokuro,48 +bonten,48 +bojue yu yaojing 695657,48 +bobo (6630978),48 +blue card,48 +blowing nose,48 +binding discoloration,48 +bicycle seat,48 +better call saul,48 +benjamin button suukina jinsei,48 +bati15,48 +barleyshake,48 +banbuu (zeromugen),48 +bakki,48 +badamon,48 +b-25 mitchell,48 +azumawari (azumofu),48 +azazel (last origin),48 +avatar base,48 +autodesk 3ds max (medium),48 +attic,48 +atelier viorate,48 +ashujou,48 +asashin (asn),48 +asaki yukiko,48 +arzuros (armor),48 +arval (fire emblem),48 +arttoru,48 +artsy-theo,48 +art of neight,48 +arl,48 +arisaka hatsune,48 +aries shion,48 +ariel (kumo desu ga nani ka?),48 +arbuz budesh,48 +arara milk,48 +arakawa hiromu,48 +aqua tank top,48 +ao fujimori,48 +antweiyi,48 +another code,48 +ankle belt,48 +angled foregrip,48 +andromeda shun,48 +andou tsubaki,48 +andou minawa,48 +amou mari,48 +amido (compassion273),48 +amefukura art,48 +amazio komeko,48 +alpaca huacaya (kemono friends),48 +akuma homura (cosplay),48 +akina (schism),48 +akihila,48 +akagi masafumi,48 +aizawa yasumi,48 +aizawa shin,48 +ahsiu,48 +ahnei,48 +after suko,48 +adolfine galland,48 +addy (@vegeebs),48 +actor connection,48 +abel (street fighter),48 +abe (f!mixture),48 +a.f.o,48 +^p^,48 +7:24,48 +720 72,48 +300 heroes,48 +2gong (9ujin ),48 +2ch.ru,48 +00kashian00,48 +001machi,48 +zonana,47 +zipper leotard,47 +zipgaemi,47 +zima (ready to go) (arknights),47 +ziggs,47 +zges,47 +zeri (league of legends),47 +zenzai (zenzaio72),47 +zarameru (tsukumo),47 +zap-nik,47 +yuusha tokkyuu might gaine,47 +yuuki tsubasa,47 +yuuhei,47 +yuudadou,47 +yuri ai,47 +yurako-san (tama),47 +yuragi (nukomomo),47 +yuno (p2eocene),47 +yumeha tseru,47 +yukimura kaname,47 +yukimi (yagi),47 +yukikaze (sovereign of the swing) (azur lane),47 +yukana,47 +yugo asuma,47 +youei (maiko),47 +you hashira,47 +yotti,47 +yotsuya yumi,47 +yoruda,47 +yomotsuka makoto,47 +yomogi (monster hunter),47 +yinori,47 +yinlu tongzi,47 +ying jing meng,47 +yeti (mu kaiten),47 +yeorem,47 +yellow robe,47 +yellow bloomers,47 +ye (ran chiiipye),47 +yadu nadu,47 +xxxx saki,47 +xsinon,47 +x-shaped eyewear,47 +wo chi xiangjiao pi,47 +wingtemple,47 +wilted ahoge,47 +whitter,47 +white2013,47 +weizen,47 +wei wuxian,47 +wata (rakopepa),47 +washizuka shou,47 +wash,47 +warren louw,47 +wari (nirodo26),47 +warabimochi kinako,47 +wagamama high spec,47 +vuxer,47 +vullaby,47 +vorupi,47 +vocaloid (sour-type ver),47 +vivian james,47 +virtual youtuber shiten'nou,47 +vincent nightray,47 +vanilla (v-palace),47 +vampire (the breath of spring) (azur lane),47 +vafar7,47 +uzuki takeru,47 +utawarerumono radio,47 +usao (313131),47 +urue,47 +unown p,47 +unoobang,47 +uni (vocaloid),47 +unbuttoned skirt,47 +ukitake juushirou,47 +uboar,47 +type-dog,47 +tweetdian,47 +tungsten (kwfr4544),47 +tuck,47 +tube top pull,47 +tsuuyakukan reni,47 +tsuuma,47 +tsuruhime yachiyo,47 +tsunano (koi pink),47 +tsumugi wenders,47 +tsujin bohboh,47 +tsugaru (co-walker),47 +trans-am,47 +toumei answer (vocaloid),47 +toufu (toufu 53),47 +toto (lom),47 +toruglose,47 +torahime,47 +tomohi,47 +tom skender,47 +tokutokenzen,47 +tohsaka rin (formalcraft),47 +toake mikage,47 +to gemichi,47 +tlla,47 +tiramisu651,47 +time travel,47 +tianya beiming,47 +thundercats,47 +the star (tarot),47 +the sealed esoteric history,47 +the maa,47 +the dream of the fisherman's wife,47 +the batter (off),47 +tetsujin 28,47 +tes (unpre),47 +tenzen (netspike),47 +tent interior,47 +tatara kogasa (cosplay),47 +tarou (shironeko project),47 +taranboman,47 +taniya raku,47 +tanikku,47 +tania (dq6),47 +tamakake,47 +takanoru,47 +takano hayato,47 +takanashi kiyomi,47 +takahashi kouta,47 +tael,47 +t misaomaru,47 +sys.ao,47 +syou (crecre),47 +synergetic suit,47 +symposium of post-mysticism,47 +sword oratoria,47 +sweet ann,47 +swatty (can can bunny),47 +suzushiro (daikon793),47 +suzume yuu,47 +suparaisu,47 +suo (sndrta2n),47 +sun hair ornament,47 +summertime record (vocaloid),47 +summer lesson,47 +summer-d (dodojune),47 +sukoyaka middle school uniform,47 +sukimi,47 +suiso (owp),47 +suica,47 +strictly mecha,47 +strib und werde,47 +strawberry syrup,47 +straw hats jolly roger,47 +star conflict,47 +star color pen,47 +star-ring,47 +srro yo,47 +splattershot pro (splatoon),47 +soramame1110,47 +sora no tori,47 +solanikieru,47 +smolev,47 +slender man,47 +slakoth,47 +sky (shantae),47 +sir aaron,47 +sin gun woo,47 +silayloe,47 +sigmart03,47 +side-tie shorts,47 +shrie,47 +shoujo-tachi wa kouya wo mezasu,47 +shoko (super real mahjong),47 +shirokami gakuen,47 +shiro tsugumi,47 +shiratama (xsrtmx),47 +shirataki,47 +shirasu don,47 +shirase ui,47 +shirako mugi,47 +shinonome mozuku,47 +shino (.hack//),47 +shine hausen,47 +shindou mikeko,47 +shindou erina,47 +shimo hisae,47 +shima yukiwa,47 +shikinyan,47 +shikibu honoka,47 +shiina mahiru,47 +shihou (g-o-s) (style),47 +shi oo,47 +shenqi de (9),47 +sharktuna,47 +shands,47 +sgt lonely,47 +seth,47 +seshiya,47 +sekiguchi yuria,47 +scrap iron,47 +scathach (fire emblem),47 +scarz,47 +sayuri (k pring),47 +saya (casual) (blue archive),47 +sawaki ken,47 +saul goodman,47 +satou odori,47 +sasame yaya,47 +sapphira nyx,47 +saotome shino (chigusa minori),47 +santa bra,47 +sano naoi,47 +samurai flamenco,47 +samaru (seiga),47 +salome (fate),47 +salmiakki,47 +sakurada maon,47 +sakura inu (itoyatomo),47 +saki usagi,47 +sakamiya tsuto,47 +sailor uranus (cosplay),47 +sagami (dei shirou),47 +saeki mika,47 +saber (type-moon racing) (fate),47 +s.o.n.g. uniform,47 +ryuusei world actor,47 +ruuko-san,47 +runako,47 +rumaki,47 +rugatsuki,47 +rudo (rudorudo0917),47 +rubber soul,47 +rotan,47 +rope ladder,47 +rokuon,47 +robokeh,47 +roberto,47 +riri (ririwaldorf),47 +riki (archf),47 +rider gashat,47 +ride armor,47 +reyna (valorant),47 +reverse x-ray,47 +reverse bikini armor,47 +retei,47 +resistance japan,47 +ren huozhe,47 +remora works,47 +rekenber guard,47 +refrain no chika meikyuu to majo no ryodan,47 +redlark (r083),47 +red sweater vest,47 +red lightning,47 +rasson,47 +raspberry hair ornament,47 +raind,47 +rack,47 +queen marika the eternal,47 +qoom,47 +purple mii,47 +purestream (arknights),47 +puppypaww,47 +pupitar,47 +punished pigeon,47 +ponnu (nikeytina),47 +pomki,47 +polka dot sarong,47 +polka dot gloves,47 +poke puff,47 +ploki,47 +playing flute,47 +player 2 (cloba),47 +pixiv's virtual youtuber audition,47 +pisuta (yamashiro),47 +pink moon stick,47 +pikaremon,47 +phantasy star online 2 new genesis,47 +peter griffin,47 +pet bed,47 +pesogin,47 +perio 67,47 +penny loafers,47 +pastahands,47 +parvati (p&d),47 +paolo antonio aguasin,47 +panne (fire emblem),47 +paku paku desuwa,47 +pakapom,47 +paimon genshin7,47 +pablo uchida,47 +p3ta (yio),47 +p2yong,47 +p2020 (pistol),47 +ouka mai,47 +otxoa60,47 +otonari no tenshi-sama ni itsu no mani ka dame ningen ni sarete ita ken,47 +oshimoto yuri,47 +oropi,47 +orokanahime,47 +organic,47 +orbital path,47 +oppi (ksop28),47 +operator 21o,47 +ootengu (onmyoji),47 +oooqqq,47 +onio,47 +oneshot (game),47 +onee-chan no yuuwaku,47 +omi (tyx77pb r2),47 +okotan (dewdrops),47 +ojyou,47 +ogasawara akiko,47 +odaiba middle school uniform,47 +o (jshn3457),47 +nyoon,47 +nurse dragonmaid,47 +nurse akali,47 +ntm,47 +norun,47 +norse mythology,47 +norada,47 +non-circular lens flare,47 +noir vinocacao,47 +noguchi takayuki,47 +no nut november,47 +nishijima masumi,47 +nisha labyrinth (elsword),47 +nima (nimamann),47 +niizuki (azur lane),47 +nicole (usako),47 +nichi keito,47 +nezunezu,47 +newmoonshira,47 +neva,47 +neumo,47 +neota,47 +nemigi tsukasa,47 +nemeneko 6,47 +nekomancer (granblue fantasy),47 +nekojiri,47 +neko (naomi),47 +neilos,47 +nehani (tworship),47 +nazi flag,47 +natsume masako,47 +natsuhachi,47 +naporu,47 +napoleon crossing the alps,47 +nanahoshi ren,47 +nametake (nekoyasya12172),47 +nakau,47 +naijiaer,47 +nagiriku912,47 +nagarakawa shiraku,47 +nabeya sakihana,47 +myuutau tadakichi,47 +myuga66666,47 +mutilation,47 +musse (falkyrie no monshou),47 +multicolored sarong,47 +mullany (azur lane),47 +motoyama tomomichi,47 +moroda shiori,47 +morino kiriko,47 +morimoto chio,47 +morihaw,47 +mootecky,47 +monotiina,47 +mon-chan,47 +mom 29 mom,47 +mokopekko,47 +moda (mo da 3),47 +mochizuki yomogi,47 +mochizuki nonoka,47 +mochizuki anko,47 +mochiyuki (gigfor1),47 +mmm (xbit),47 +mizusawa nodoka,47 +miysin,47 +miura kentarou,47 +miura hajime,47 +mitsuru (habihapi),47 +misakana,47 +miri (miri0xl),47 +miracan,47 +mira kimishima,47 +miotama,47 +mion (htol#niq),47 +minnku,47 +ming (5unri5e666),47 +minatsuki (m1natsuk1),47 +miles (gyee),47 +mikuroron,47 +mikoze yui,47 +mikage sakurako,47 +mihashi (re4 glo),47 +midori-chan (myslot),47 +mexican flag,47 +meunhongcha,47 +methynecros,47 +metal max,47 +metadoll,47 +mellow rag,47 +meisamu,47 +megurine luki,47 +megazone 23,47 +megawatt (arms),47 +mega metagross,47 +medb (first ascension) (fate),47 +mclaren,47 +mazinkaiser (robot),47 +mayu-tan no kung fu,47 +matthew (yo matthew),47 +matsuse daichi,47 +master hand,47 +mashiro (solopipb),47 +maru (hachi 47923),47 +martha (santa) (fate),47 +maria (ff2),47 +mao xin'ai,47 +manyuu chifusa,47 +manbou no ane,47 +manaka de ikuno!!,47 +mana (artist),47 +makoto osamu,47 +makinakid,47 +majo koi nikki,47 +majin bone,47 +magical emi,47 +magical amber,47 +macchaume,47 +lvl3toaster,47 +luye yuan,47 +lumu yuanxiang,47 +love fetish,47 +loona (helluva boss),47 +lobsteranian,47 +little nightmares,47 +liren44,47 +lipstick mark on neck,47 +lina (interlude),47 +lilia (null),47 +licking paw,47 +libra douko,47 +leonard bistario harway,47 +lemonmelon00,47 +leather shorts,47 +laurent (fire emblem),47 +large pasta,47 +land of caromag,47 +lactic acid bacteria,47 +lace-trimmed capelet,47 +kyouko's father (madoka magica),47 +kyattsu,47 +kuzel (bonolangje),47 +kuuyuu,47 +kusunoki kaho,47 +kusaka kou,47 +kurose yuuki,47 +kuromasa shisei,47 +kuromajo-san ga tooru!!,47 +kuroinu9,47 +kuroiani,47 +kurogane (blackxsilver),47 +kurata yumi,47 +kurai yonaka,47 +kurai masaru,47 +kurai (cry),47 +kunagisa tomo,47 +kumamiya,47 +kuma (2ch),47 +kuchinawa,47 +kronshtadt (begin the rush!) (azur lane),47 +krebskrum,47 +koyopi,47 +koya,47 +kouzuki (majc8345),47 +kotono,47 +kotoha (kotoha65),47 +kotetu han,47 +kotaken,47 +kosame koori,47 +konoshita kiyomasa,47 +konoha no sekai jijou (vocaloid),47 +kondou (acid),47 +komala,47 +komagome azuzu,47 +kokose,47 +knora,47 +kiyosumi kuro,47 +kitsutsuki (dzgu4744),47 +kitora ai,47 +kitami reika,47 +kissing breast,47 +kisaragi maaya,47 +kirsten (arknights),47 +kirisawa tokito,47 +kirigiri jin,47 +kirie nozomi,47 +kira (kira dra),47 +kimura fumie,47 +killer (one piece),47 +kent (azur lane),47 +kenshin kagemitsu,47 +kel-tec ksg,47 +keira (haevest),47 +kcn,47 +kazura enji,47 +kazami yuuji,47 +kawwa,47 +kawaniwa,47 +kawamoto toshihiro,47 +kawaii rowa,47 +katyusha (gindoro),47 +katou umi,47 +kataasa-ko,47 +kasumi (blue archive),47 +kashiwara mana,47 +karintou1485,47 +kapu rinko,47 +kanti15,47 +kano (coyotehunt),47 +kanimiso (juutilainen77),47 +kanamura will,47 +kamome,47 +kamen america (comic),47 +kamado nezuko (cosplay),47 +kama (satoyan),47 +kaiend,47 +kagura hinata,47 +kagerofu,47 +kagehira mika,47 +kabashima yousuke,47 +k no hito,47 +k nekoneko,47 +k2pudding,47 +junka-sakura,47 +jun (kyurisin),47 +jugemu (qqkyon),47 +jougen,47 +johssnny,47 +johnnie,47 +jinzou enemy (vocaloid),47 +jingle (mhb729),47 +jenma-chan,47 +james hotate,47 +izou (one piece),47 +iyayo,47 +itsuki shu,47 +itsuka shiori,47 +itori (clarice note),47 +israel,47 +iris yi,47 +inxst,47 +inusurvivor,47 +inukai purin,47 +inugami korone (dog),47 +intelli village no zashiki-warashi,47 +inori (princess connect!),47 +infinity (module),47 +imminent fingering,47 +iketsuko,47 +igaiga,47 +ifrit (final fantasy),47 +idolmaster poplinks,47 +iczer-1,47 +ichi kq,47 +icarus (azur lane),47 +i:p masquerena,47 +hyou itto,47 +human homeosta,47 +houshou marine (artist),47 +houhokekyo,47 +hoshifuri iku,47 +honnou (kjs9504),47 +hongye feixue,47 +holiday,47 +holding vase,47 +holding lightsaber,47 +holding gag,47 +holding by the ears,47 +hitodama (madatohi),47 +hisui (syarute),47 +hina (ohinao),47 +hikari (arcaea),47 +high x joker (idolmaster),47 +hidora art,47 +hidoi,47 +hidden camera,47 +hidan no aria aa,47 +hemoon,47 +heimerdinger,47 +heavyrain (tranquil moss) (arknights),47 +heat-soft,47 +healing wand,47 +haze,47 +hayami ritsu,47 +hayakawa sonchou,47 +hashimoto kurara,47 +hashimoto (frog flag),47 +harukawa fuki,47 +harukagi,47 +harry (dudwofla),47 +harp note (mega man),47 +hariken,47 +haribote (desk of coulomb),47 +hapu,47 +hanyang type 88 (girls' frontline),47 +hanenashi,47 +hana no yo,47 +hamtaro,47 +hami dmg,47 +halluel (granblue fantasy),47 +hakusan yoshimitsu,47 +haji,47 +haihai (skeleton),47 +hachirobe,47 +hachini,47 +hachimitsu-b,47 +hachiko (kota091986),47 +h shai,47 +gyumao (housamo),47 +guy-manuel de homem-christo,47 +guraa,47 +gun devil (chainsaw man),47 +groose,47 +green rope,47 +gravity suit,47 +gradient sarong,47 +gradient neckwear,47 +gorou naoki,47 +google (asdek18),47 +gonzozeppeli,47 +gold leotard,47 +gladius,47 +gigi,47 +gigantamax pikachu,47 +ghost ogre & snow rabbit,47 +ghillie suit,47 +george joestar ii,47 +general liu (girls' frontline),47 +gekkougahara miaya,47 +gasoline,47 +gaen izuko,47 +futami kito,47 +futaba aoi (asobi ni iku yo!),47 +fused zamasu,47 +furu (retr0e),47 +fukai (yas lions),47 +fubuki rinne,47 +frs2,47 +frogging,47 +frogadier,47 +frilled hakama,47 +freya (chobits),47 +freia kagami,47 +francie gerard,47 +fortress girl,47 +formica (vtuber),47 +fogged glasses,47 +floating crown,47 +flick-the-thief,47 +flash man,47 +fiona frost,47 +finger in ear,47 +filigree,47 +ficus finis,47 +fg42,47 +fei mao,47 +es (cah60700),47 +eromkk,47 +enoshima electric railway,47 +enel,47 +endymion (sailor moon),47 +eliwood (fire emblem) (cosplay),47 +eleven supernova,47 +elena (grandia),47 +ekuramani,47 +eine (eine dx),47 +echoes act1,47 +echizen murasaki,47 +easty,47 +earthspirit (arknights),47 +duke pantarei,47 +due (nanoha),47 +driftingtiger,47 +drag-on dragoon 2,47 +dou tanuki,47 +dorowa (drawerslove),47 +dorothy catalonia,47 +dorothy (marchen awakens romance),47 +door to heaven,47 +dong ji,47 +dong hole,47 +doma taihei,47 +doctor strange (series),47 +diten,47 +dismaiden,47 +dingding (chongsangjun),47 +digital (digital001),47 +diandianzai,47 +diamond tiara,47 +diabolic-mario,47 +dia de muertos,47 +dh ead,47 +dewpider,47 +devonrex,47 +dentaku music,47 +dejio,47 +deer head,47 +dayuh,47 +daydream show,47 +dawit,47 +date hajime,47 +dashingicecream,47 +dark meta knight,47 +daniel renard,47 +dakuryuu,47 +daizu sanchi,47 +dairoku tenma,47 +dadadada tenshi,47 +d.k,47 +cutie moon rod,47 +cubehero,47 +cream on breasts,47 +crash man,47 +cow (life of maid),47 +cotton ball,47 +cosmicsnic,47 +cornelia (atelier),47 +corette,47 +contra,47 +condiment,47 +concon-collector,47 +comiket 86,47 +colored smoke,47 +code geass: lost colors,47 +clovisxvii,47 +clipping nails,47 +clarisse de cagliostro,47 +clacker,47 +choujin koukousei-tachi wa isekai demo yoyuu de ikinuku you desu!,47 +chonmage,47 +chocolate on pectorals,47 +chocolate mint,47 +cho'gath,47 +chishibuki hiyoko,47 +chicken-mushroom skewer (genshin impact),47 +chibikki,47 +chest protector,47 +cherokee (1021tonii),47 +chelsea (7th dragon),47 +chawalit adsawawalanon,47 +charlotte shalzen,47 +charcoal (medium),47 +chaos witch quelaag,47 +chang koehan,47 +chain whip,47 +cevoy,47 +ceru,47 +centi mnkt,47 +cekonikova,47 +cayenne garamonde,47 +car keys,47 +caleana,47 +cafe little wish,47 +buzzer,47 +butch (pokemon),47 +burunuu (bullnukko),47 +bulzizzang,47 +bruno (yu-gi-oh!),47 +bou (maimoca501),47 +bossan 3310,47 +bobby36446329,47 +bless you (module),47 +blade regalia,47 +black tongue,47 +bird hood,47 +binchou maguro,47 +berrykanry,47 +bellona (azur lane),47 +bell mccamp (warship girls r),47 +behalter,47 +bear (artist),47 +baten (gei-vorugu),47 +baseu,47 +backbend,47 +babe (fate),47 +azuma shoujuan,47 +azalea (flower),47 +ayakashi hyakkiyakou,47 +avatar generator,47 +autism wanderer,47 +aus vaka,47 +aurebesh,47 +audio visualizer,47 +atlus,47 +astolfo (fate) (cosplay),47 +ashley taylor,47 +asaki yuki,47 +arsene lupin iii (cosplay),47 +arrowhead (r-type),47 +arnoul,47 +ara (sora-ageha),47 +aotu world,47 +aor saiun,47 +aokaze (mimi no uchi),47 +aoi renji,47 +ao ringo,47 +anzu (hanamaru youchien),47 +anzio princess,47 +anre (granblue fantasy),47 +anput (nsfwolf),47 +ano fuji,47 +anho,47 +angelica (k3lly),47 +aneimo,47 +amelie-san (nogi takayoshi),47 +amatsuka haruka,47 +amarotamaro,47 +amai hiyo,47 +amaha rihoko,47 +amagumo,47 +alternate uniform,47 +altena (fire emblem),47 +alma elma,47 +allen m. sumner (azur lane),47 +akuma ouji to ayatsuri ningyou,47 +akit (15jamjam),47 +akira howard,47 +akira agarkar yamada,47 +akino kabocha,47 +akilico,47 +akazukin (otogi-juushi akazukin),47 +akaza chacha,47 +akausuko,47 +akae neo,47 +akabane yu,47 +ajigo,47 +airomed,47 +aguhari,47 +agent legend,47 +aerith gainsborough (cosplay),47 +abuji,47 +aasara,47 +aaru (kerogero1582),47 +502nd joint fighter wing,47 +404 (artist),47 +163 (shiromeshi),47 +0shino,47 +0k0j0,47 +zymonasyh,46 +zonko,46 +zoey (pokemon),46 +zippo teifujou,46 +zhao shuwen,46 +zhadanzhawugui,46 +zero two (darling in the franxx) (cosplay),46 +zerg309,46 +zektbach,46 +zahlhamelina,46 +z5987,46 +yuzuriha p,46 +yuzuna hiyo,46 +yuzudaze,46 +yuukyuu ponzu,46 +yuuki shishin,46 +yuuki hana (jtnp5334),46 +yunotimo,46 +yukiou,46 +yukimachi tounosuke,46 +yuki (nanao yuki),46 +yugirlpict,46 +youko (monster musume),46 +yoshioka pochi,46 +yoshi 92,46 +yoru (xueyinye),46 +yoppy,46 +yomi (yomi14 pyaon),46 +yokoe (mealtime),46 +yna,46 +yihsien,46 +yatani (do9z),46 +yashio kaito,46 +yari no yuusha no yarinaoshi,46 +yarai miu,46 +yanagi shinsuke,46 +yamija,46 +yamaguchi sapuri,46 +yakitori (yakitori06),46 +yahankkwo,46 +xyomouse,46 +xiaobei,46 +xi yuu,46 +xi ying,46 +wu-qiao,46 +wtparadise,46 +woof,46 +wooden tray,46 +wood gradient hair,46 +wolf hat,46 +wingedwasabi,46 +wing gundam zero,46 +windcaller,46 +will-o'-the-wisp (mythology),46 +watashinabe,46 +warfarin (the feast) (arknights),46 +voodoo,46 +virginia kissless,46 +vifam,46 +vibrator under pantyhose,46 +veronica type-asc,46 +vergolophus,46 +valkyrie skirt,46 +uzutanco,46 +urasanmyaku,46 +unplugged,46 +una (pochincoff),46 +uma (oopartz yang),46 +ultraman geed (series),46 +ukm-2000 (girls' frontline),46 +ueda kana,46 +uchiyama lammy,46 +u 5ham0,46 +tyris flare,46 +type 0 fighter model 52,46 +two of spades,46 +twintailed girl (ishiyumi),46 +ttaji (pass35),46 +tsushima aki,46 +tsukino shizuku,46 +tsukamoto takashi,46 +trodh,46 +triangle halo,46 +tres-iques,46 +toyota supra,46 +toy(e),46 +towel on arm,46 +toro (shiro),46 +toranpo rintarou,46 +tonogai yoshiki,46 +tonikaku kawaii,46 +tonestarr,46 +tone rion,46 +tokyo xanadu,46 +tokyo necro suicide mission,46 +tokui sora,46 +tokihara sayuki,46 +tokage setsuna,46 +togusa,46 +togekk0,46 +toboso yana,46 +tobari (brokenxxx),46 +toba minami,46 +toaru kagaku no dark matter,46 +tim (a9243190a),46 +thwomp,46 +three (drag-on dragoon),46 +thomas the tank engine (character),46 +the a-team,46 +tetsumi,46 +tetora (yumejihuka),46 +tenjiku nezumi,46 +tenchou no matsumoto,46 +teisel bonne (mega man),46 +teikoku gensui,46 +tatwuku,46 +tatsuyoshi (zawahomura),46 +tatara (alnath may),46 +tasasakiamagu,46 +tanada,46 +tamamono atae,46 +takunama,46 +takasu ayako,46 +takase (harakiri),46 +takarada kaneo,46 +takamu,46 +takajou joujirou,46 +takahashi masaki,46 +taira kosaka,46 +taichi (yirkorn),46 +tai yuan (azur lane),46 +tagu,46 +taburakashi,46 +sylvia (dq11),46 +sword fight,46 +swiss army knife,46 +swimsuit around one leg,46 +suzumo70,46 +suzu danna,46 +susutouka,46 +surf (pokemon),46 +super robot wars advance,46 +suou katsuya,46 +suminoe riko,46 +suiren to shion,46 +su (gyee),46 +strigidae,46 +stress,46 +stratosphere (coom1017),46 +straight-laced footwear,46 +storia,46 +steam from mouth,46 +starship troopers,46 +sr-3mp,46 +squid neetommy,46 +spring onion print,46 +sp9 (girls' frontline),46 +soumenhiyamugi,46 +souma kisa,46 +souji kurokawa,46 +sora-bakabon,46 +son goku (cosplay),46 +sokuse kienta,46 +soku (bluerule-graypray),46 +sokomushi,46 +soga kaede,46 +snowdreams -lost in winter-,46 +sleeveless turtleneck dress,46 +sledding,46 +skyfish (kemono friends),46 +skura01,46 +skull knight (berserk),46 +sion001250,46 +silver bullet (ecc12 8),46 +sig (granblue fantasy),46 +shuka (taupe),46 +shou xun bu liang,46 +shokkaa (shmz61312),46 +shisen,46 +shiro (houseki no kuni),46 +shirakawa yuuko,46 +shiragi,46 +shinohara asuma,46 +shinobu akira (madoka magica),46 +shinidamachuu,46 +shindou ai,46 +shinatsukou,46 +shinatose izana,46 +shieldon,46 +shidaidaka,46 +shi shi ji,46 +shain,46 +seth (fire emblem),46 +sesshouin kiara (swimsuit mooncancer) (cosplay),46 +serori (koredemoka),46 +serie a,46 +serena (pokemon) (cosplay),46 +senkyoushi gondolf,46 +senketsu-kisaragi,46 +senji muramasa (second ascension) (fate),46 +seifuku!,46 +see-through kimono,46 +scleriteaeaeaggadah,46 +satou takeshi,46 +satou pikuta,46 +satou (su9arsatou),46 +satomi rentarou,46 +satochi,46 +sata (sat),46 +sasaki yukina,46 +sasaki bullet,46 +sasaj,46 +sarmat,46 +sarasa (kanbi),46 +sara kodama,46 +sapphicneko (sapphicneko),46 +saogokushi,46 +sanzu haruchiyo,46 +sanada (teketo),46 +sana hamada,46 +sakusa kiyoomi,46 +sakura mai (photokano),46 +sakuma hiragi,46 +saki (viper),46 +sake (kadai),46 +sakata ginko,46 +sakamoto bin,46 +sakaki tsui,46 +sakaki soshi,46 +saitou rokuro,46 +saitou hajime (third ascension) (fate),46 +sagatsune,46 +ryuuzouji akane,46 +ryuuko (oggy),46 +runep,46 +rummy 73,46 +ruku (ruku 5050),46 +royal flush,46 +rosamia (granblue fantasy),46 +rope bridge,46 +rizuriri,46 +ririvery,46 +rintaro komori,46 +rindou ringdong,46 +riley fairfeather,46 +rg (gumin),46 +reversible,46 +resuta,46 +ressue (gomgom),46 +resisting,46 +rennala queen of the full moon,46 +ren kun,46 +remy (street fighter),46 +reis duelar,46 +regu (reg95),46 +regain,46 +red ninja,46 +red (neptune series),46 +rdg red data girl,46 +raymond busujima,46 +rasha,46 +rakkou,46 +rainbow wings,46 +raikou (gotaishu),46 +radar chart,46 +ra mun5239,46 +pz-15,46 +purple kecleon,46 +puni (punycolors),46 +pulp fiction,46 +public urination,46 +pt@i,46 +psychic parrot,46 +prostate massager,46 +pronama-chan,46 +project-sp,46 +programming live broadcast,46 +professor layton vs. phoenix wright: ace attorney,46 +princess form (princess connect!),46 +poteto (potetosarada123),46 +pot-palm,46 +popularity contest,46 +poo (saku),46 +poncho (31103105),46 +pokke (pokke-chan),46 +poison916,46 +pk (pmd-explorers),46 +pittman (alchemy stars),46 +pinnn,46 +pink pet bottle,46 +pillbug,46 +pikao,46 +picarto.tv,46 +pi (.hack//),46 +peter pan (character),46 +pemoyashi (tianoblue),46 +pekikokko,46 +pee-kay,46 +pecohophop,46 +peach blossom,46 +pc angel,46 +patricia schade,46 +patori,46 +parufeito,46 +papers please,46 +pantie painting,46 +pano (mohayayamai),46 +pandora-ex,46 +pako (moto paco),46 +oz sorcerer (elsword),46 +ouma mana,46 +otou mamayo,46 +otaku heishi,46 +oshiruko (shiruko),46 +osanpogakari,46 +orator (fft),46 +orange serafuku,46 +orange blossoms,46 +ophiuchus shaina,46 +oonami kizuna,46 +ookado tsubasa,46 +oohashi sumika,46 +onimusha: dawn of dreams,46 +omoi yo hitotsu ni nare,46 +okikurumi (ookami),46 +okema,46 +oinari (koheihei1109),46 +oi shibako,46 +oda nobunaga (sengoku collection),46 +obybuss,46 +numanoan,46 +noyuki (3702),46 +noda yasuyuki,46 +no headband,46 +no emblem,46 +niya,46 +niwatori panda,46 +nishino kanako,46 +nishijou myu,46 +niratama-don,46 +ninagawa amuro,46 +nimuno (munimuni),46 +nils (fire emblem),46 +nila (cyancapsule),46 +niizuma lovely x cation,46 +niameresp,46 +neuralyzer,46 +nero claudius (swimsuit caster) (fate) (cosplay),46 +nephrite (sailor moon),46 +neneko (yumeria),46 +nemo (piisukun),46 +nekokobushi,46 +nekogoro,46 +nekobox,46 +neko nadeshiko,46 +nehelenia (sailor moon),46 +needle (hollow knight),46 +nazo no hito,46 +natsuyasumi.,46 +natsume nono,46 +narumi suzune,46 +naoe yamato,46 +nanamiyuki,46 +nakatsugawa ui,46 +nakamura ayamo,46 +nakaji (user snap3353),46 +nakahara-kun no kahogo na imouto,46 +nagomi tozakura,46 +nagikiho,46 +nagi aoi,46 +naganadel,46 +nagamon,46 +na (oagenosuke),46 +n36hoko,46 +n2ewu,46 +n0r0,46 +mzyk,46 +myuhuaki,46 +my mai tonight,46 +mutsu ane daisuki,46 +mute (mute89539160),46 +murasaki shikibu (swimsuit rider) (third ascension) (fate),46 +mumumu hoshibito,46 +multicolored hair bobbles,46 +mugensaku,46 +mr. chin,46 +mouse pointer,46 +moseley,46 +morton koopa jr.,46 +morridow (girls' frontline),46 +moon reflection,46 +montmorency margarita la fere de montmorency,46 +moni (credo),46 +monaco (rmn02),46 +momota (sunaba suzume),46 +momoiro guardian,46 +momogesomaru,46 +mokutan (link machine),46 +mobius final fantasy,46 +mktr (princess mktr),46 +mizuto umi (blue monday),46 +mizuki (mz),46 +mizuhashi parsee (cosplay),46 +miyuki (9029009),46 +miyazaki hiyori,46 +miyake taishi,46 +mittens removed,46 +mitsuji mitsuyoshi,46 +mitsuishi kotono,46 +mita mitauzo,46 +missingno.,46 +missing stars,46 +miss black,46 +misono chiaki,46 +misaki kurehito (style),46 +mirror's edge,46 +mirakurun (cosplay),46 +mira (pokemon),46 +minstrel (ragnarok online),46 +minpou (nhk),46 +minenami ryou,46 +minau37,46 +mimosa vermillion,46 +mimiru (mimill),46 +mimi (picarto.tv),46 +military base,46 +milia leclerc,46 +miki (shugo chara!),46 +mihoshi middle school uniform,46 +migu (migu room),46 +mienfoo,46 +michael.r,46 +miatsushi,46 +mia (golden sun),46 +mi (liki1020),46 +mgg (x arte),46 +metal hero,46 +metal gear rex,46 +mercy (overwatch) (cosplay),46 +menogias (genshin impact),46 +melfina (outlaw star),46 +mega man 8,46 +meer rowe,46 +mechazinaida,46 +mecha eli-chan mk.ii (fate),46 +mdr (cocktail observer) (girls' frontline),46 +mazingkaizer,46 +matsui celine,46 +mathilda (fire emblem),46 +masutaauu,46 +masikakuma,46 +marutani,46 +maru (umc a),46 +maru (lagrange),46 +maronee san,46 +maromi gou,46 +marimony manumonica,46 +maple colors,46 +mao (yotaro),46 +manzai,46 +malluel (granblue fantasy),46 +maiko (mimi),46 +magician (elsword),46 +magical mirai miku (2013),46 +madland,46 +macho ojiji,46 +machi fuka,46 +m (mrtarou),46 +m1919a4 (girls' frontline),46 +m1904 mastiff,46 +lyra (pokemon) (cosplay),46 +lyon rem helsos,46 +lymee,46 +luxuria,46 +lunarscent,46 +lsr,46 +lr (last remote 514),46 +lofi girl,46 +loch ness monster,46 +lincoln loud,46 +linatai,46 +light oooo,46 +liete (grandia),46 +lieselotte w. dorssia,46 +lidia sobieska,46 +liar princess,46 +liangzi tai hongcha,46 +lewyn (fire emblem),46 +leon luis,46 +lenk64,46 +laugh 111,46 +larry koopa,46 +langya beike,46 +laki,46 +ladder cutout,46 +kyuumoto kuku,46 +kyohei,46 +kutsunohito,46 +kushala daora,46 +kusanagi motoko (cosplay),46 +kusabue mitsu,46 +kurumiyasan ns,46 +kuroinu 2,46 +kuro (baseball0000),46 +kuribulb,46 +kurenai (red ninja),46 +kureha misaki,46 +kurano ema,46 +kumaori,46 +kumaccyo,46 +kuma piv,46 +kuchisake-onna (ishiyumi),46 +krutta fan,46 +koushi rokushiro,46 +kou (wagaya),46 +korin (shironeko project),46 +kontahsm,46 +konnyaku yuuna,46 +kongbai,46 +kondraki,46 +kompeitou (lemon garden),46 +komorebi ni yureru tamashii no koe,46 +kogetail,46 +ko-chin,46 +knucklecurve,46 +knight lautrec of carim,46 +knee grab,46 +kkaags,46 +kizinori,46 +kiyonaka rei,46 +kitsune choukan,46 +kitsugai sese,46 +kiso (kancolle) (cosplay),46 +kishimoto masashi (style),46 +kishimoto ayase,46 +kiryuu yoshiya,46 +kiriyama machi,46 +kirigoe mima,46 +kiri sakura,46 +kinoruru toiro,46 +kinnikuman (character),46 +kim hana,46 +kiki (koba),46 +kijo kouyou (third ascension) (fate),46 +kid buu,46 +kensei (v2),46 +kemo (pokka),46 +kekkaishi,46 +kazuto izumi,46 +kazeco,46 +kazahari kazuho,46 +katsuto,46 +katoyo85,46 +katia waldheim,46 +katase waka,46 +kat (mazume),46 +kasuga aya,46 +kasamatsu yukio,46 +kariki hajime,46 +karasu hito,46 +karasaki,46 +kaos,46 +kanae (aab),46 +kamogawa (kamogawa sodachi),46 +kaminosaki,46 +kamino saori,46 +kamen rider live,46 +kame,46 +kalluto zoldyck,46 +kakotomirai,46 +kakikorori,46 +kajun faycott,46 +kaida bola,46 +kai28,46 +kagutsuchi (victoragna),46 +kagura (senran kagura),46 +kaenbyou rin (cosplay),46 +jyaco,46 +juu p,46 +julia (cowboy bebop),46 +johannes krauser ii,46 +jinrou judgment,46 +jinbei,46 +jeanne d'arc (inazuma eleven),46 +jdw,46 +jasmin darnell,46 +japan animator expo,46 +janload1ng,46 +izuno kenta,46 +izumi kyouka (bungou stray dogs),46 +iwanishi,46 +ivy eveland,46 +ivis,46 +itsme takk,46 +italian flag bikini,46 +isoroku (haifuri),46 +ishigaki tetsurou,46 +ishidaki,46 +irise ryouji,46 +iris (arknights),46 +irie tamaki,46 +invincible (series),46 +internet explorer (merryweather),46 +insomnia-chan,46 +inma kourin devil carnival,46 +initsukkii,46 +incoming headpat,46 +imaru (yashiro19950425),46 +imagawa yoshimoto (sengoku collection),46 +iizuka tatsuki,46 +iguchi yuka,46 +igatto,46 +ice man,46 +hyuuga kizuna,46 +hyou-kun,46 +hyokkori tamago,46 +hwoarang (tekken),46 +huiqian,46 +hpflower,46 +howl's moving castle (novel),46 +houtei9,46 +houkiboshi,46 +hosokawa kazuko,46 +hong yun ji,46 +holstaur (monster girl encyclopedia),46 +holding walkie-talkie,46 +holding hoodie,46 +hokutoro64,46 +hiyoko (kokeko),46 +hiwakana6121,46 +hitachiin hikaru,46 +hinamikan,46 +hinamayo,46 +himeshima koukichi,46 +hilbert (fall 2020) (pokemon),46 +hikigaya 0926,46 +higi (rodriguez),46 +hidaka koharu,46 +hibiki reine,46 +hi (wshw5728),46 +hentaix,46 +henpei saboten,46 +hen zemi,46 +hello lady,46 +heiyz,46 +hei ling,46 +head on knee,46 +hayashi ryouta,46 +hayabusa yuki,46 +hatyuuruinohito,46 +hasekura airi,46 +harukasu (mememememo),46 +haruka-san,46 +harpe,46 +hariyaa,46 +haragaita i,46 +happy margaret!,46 +haohi (less),46 +hansal,46 +hand on mask,46 +hand biting,46 +hanamura shoma,46 +hanakoizumi yoruno,46 +ham pon,46 +hal (h lambda l),46 +haku hakujou daimaou,46 +haku (utawarerumono),46 +hakone yumoto,46 +hair rollers,46 +ha youn,46 +gz (gzdteee),46 +guruto,46 +gurasan (happinesscharge precure!),46 +gundam seed c.e. 73: stargazer,46 +gukukimu,46 +gueira (made in abyss),46 +gudakoguda,46 +gs (onsen syndicate),46 +growlanser vi,46 +grouse01,46 +grey mittens,46 +grendizer,46 +great knife,46 +grazing (livestock),46 +gougasha akira,46 +golshi's first place pose,46 +golden sun: dark dawn,46 +golden marriage,46 +gokusai kaibi,46 +glitch censor,46 +giuseppe garibaldi (azur lane),46 +girls book maker ~shiawase no libretto~,46 +gilmang,46 +genera-x,46 +genbu (kemono friends),46 +gdat,46 +gaydio zrong,46 +garapon,46 +ganbari mascarpone,46 +gammatelier,46 +gambier bay mk ii (kancolle),46 +galdino,46 +gakuen kino,46 +gaige,46 +gaha,46 +fuyukonbu,46 +futoshi slim,46 +fullbottle,46 +fukuko fuku,46 +fujou kirie,46 +fujiwara aoi,46 +fujimaru ritsuka (male) (decisive battle chaldea uniform),46 +fuha mika,46 +fucodoku,46 +frigga (last origin),46 +fray myste,46 +fragarach (fate),46 +found modori,46 +flynn (smt4),46 +fletcher (kancolle) (cosplay),46 +flametail (sport for all) (arknights),46 +flame style,46 +fiona (border break),46 +fiat,46 +fermium.ice,46 +fenrir (ghost (tama)),46 +feater (gourmet) (arknights),46 +fam,46 +fake pregnancy,46 +faceset,46 +faceplate,46 +fabarm sat-8,46 +evil-dei,46 +eternal bloom (idolmaster),46 +esper (saga 2),46 +enemy tantou,46 +emmeryn (fire emblem),46 +elel185,46 +electra (nadia),46 +elan ceres,46 +elaine auclair,46 +egawa hiromi,46 +eel boy,46 +edelgard von hresvelg (cosplay),46 +edamame (barium),46 +eccma417,46 +eatbara,46 +dusk of oolacile,46 +drop tank,46 +dress slip,46 +drake (pokemon),46 +dr.latency's freak report,46 +double halo,46 +doriy,46 +doctor doom,46 +dithered background,46 +dingo egret,46 +dille blood,46 +diabolik lovers,46 +deoxys (defense),46 +dekkano!!,46 +deadly chaser (elsword),46 +dawito,46 +darling0221,46 +dark elementalist lux,46 +dankesang,46 +dalehan,46 +daisx (dais0115),46 +curry bowl,46 +curecycadura,46 +cure fleuret,46 +cu-rim,46 +counter strike: global offensive,46 +cosette coalhearth,46 +cora stt,46 +cookie hair ornament,46 +constantia cantacuzino,46 +comotaro,46 +coffeenougat 1985,46 +classicaloid,46 +circle k sunkus,46 +cierra (riviera),46 +chuuka ichiban!,46 +chuujou kagetsu,46 +christy mii,46 +chorogon,46 +chocolate sable,46 +chlorophytum,46 +chito04,46 +chimimo (character),46 +chigiri kurenai,46 +chen (somanychens),46 +cheese wheel,46 +checkered pants,46 +charolic (girls' frontline 2),46 +chained sarkaz girl,46 +center (majisuka gakuen),46 +cecilia helmold,46 +cecilia flower (genshin impact),46 +catxuan,46 +catherine (rakurakutei ramen),46 +catherine: full body,46 +cat busters,46 +casshan robot hunter,46 +captain america: the first avenger,46 +burning at the stake,46 +brown-haired cat girl (kevbot),46 +bo xun lin,46 +blurry vision,46 +blueriest,46 +blue swim trunks,46 +blue nightgown,46 +blue facial hair,46 +bloom into me15,46 +blaze (burst feline) (arknights),46 +blackmore,46 +black headphones,46 +birijian,46 +biiji,46 +bibico,46 +benten (ioj3446),46 +bencao gangmu (anquan sy),46 +beat (dragon ball),46 +barkhorn0331,46 +bangoul,46 +back slit,46 +b-man,46 +b-daman,46 +azusayumi meme,46 +ayuka,46 +ayu (iyokanya),46 +awata mokane,46 +average-hanzo,46 +assassin (granblue fantasy),46 +asmis hara,46 +ashinowoto,46 +ashigara (housamo),46 +asarokuji,46 +asao (vc),46 +asakura mao,46 +artix entertainment,46 +aro,46 +arnval mk2 tempesta,46 +armrest,46 +ardnades,46 +apollo (fate),46 +aona (noraneko),46 +aoko (myut7287),46 +aoki uru,46 +aojiru (yume 2kki),46 +aoi yuuta,46 +aoi (kiyokiyoaomushi),46 +aoba (kona),46 +anzu (sumisaki yuzuna),46 +antica (bigrbear),46 +annyeongbangawo,46 +anison,46 +angelica (gunslinger girl),46 +ane-suisei,46 +andy2465,46 +amu (nsk0),46 +amarisu,46 +amanda werner,46 +amagiri yune,46 +alpaca club,46 +alolan sandslash,46 +alolan muk,46 +all you need is kill,46 +alice-yuki,46 +aletta,46 +akizuki kouyou,46 +akiyama otome,46 +akire (akireru shoujo),46 +akikaze shirakumo,46 +akaneko (idaten93),46 +akanako,46 +akagishi k,46 +aka (shoumae),46 +aiba ruri,46 +ahim de famille,46 +aelion draws,46 +adeshi (adeshi0693119),46 +adam (nier automata),46 +a-chan senpai,46 +770mk,46 +2nii,46 +1kudamo1,46 +zukki0731,45 +zourion,45 +zhudouzi,45 +zheyi parker,45 +zatu,45 +zas m21 (white queen) (girls' frontline),45 +zarbon,45 +yves bigerel,45 +yuzuki kaoru,45 +yuzu-chan,45 +yuyumi (yuurei),45 +yuusenmai (momochieri),45 +yuukyan (ucan85),45 +yuu azma,45 +yuruno,45 +yuriko2755,45 +yurie the last scholar,45 +yumyum,45 +yukoring,45 +yukimochi kinako,45 +yukimi daifuku (food),45 +yuki shizuku,45 +yukata (yume 2kki),45 +yuha (yh-krm),45 +yuguru,45 +yudoufu (yudouhu 1212),45 +yoyoyoyou,45 +youmak,45 +yoshitake rika,45 +yoru (sword),45 +yonezu kenshi,45 +yomiuri giants,45 +yogurting,45 +yoaferia,45 +yf-21,45 +yellow (konkichi),45 +yasuu!,45 +yangu shigekiyo,45 +yamashita kurowo,45 +yamamura saki,45 +yagara (kishimen f),45 +ya 4004,45 +ya.yu.,45 +xr650r,45 +xnalara,45 +xbox controller,45 +xabungle (mecha),45 +x-com,45 +wonderbat,45 +witcher medallion,45 +whitemop jog,45 +wet pavement,45 +wendy corduroy,45 +weather,45 +weapon removed,45 +watari taichi,45 +wasp (anthyutena),45 +warbonnet,45 +waga mama capriccio,45 +vvy,45 +vf-19,45 +vermeil (kinsou no vermeil),45 +veinte,45 +vectors,45 +vanessa (fire emblem),45 +vana,45 +van arkride,45 +vampire knight,45 +uzura (bonnet225),45 +utsunomiya hetaru,45 +usapenpen2019,45 +uratanuki,45 +urano suzu,45 +unicorn gundam phenex,45 +ultraman tarou (series),45 +uko (moi08),45 +ujiga waita,45 +ugono takenoko,45 +ufo princess valkyrie,45 +u-47 (rookie rider) (azur lane),45 +tuya bairon,45 +tsutsuji (hello x 2),45 +tsushima naoto,45 +tsukinomori school uniform,45 +tsuchihara ai,45 +tsubuta hiro,45 +tsubakiya,45 +triple anal,45 +trey clover,45 +tre (nanoha),45 +toyomaru,45 +toyo maru,45 +toy hammer,45 +touko (toukoneko),45 +toujou yuuki,45 +toto (kuro toto),45 +tor ai,45 +top speed,45 +too many bows,45 +tomatoman01c,45 +tokyo yakult swallows,45 +tokuchi toua,45 +tokorot,45 +togatamaki,45 +tissuebox (artist),45 +tim tim machine,45 +theycallhimcake,45 +the day of sagittarius,45 +thats not it,45 +test card,45 +teruteru-deru,45 +tententensan,45 +tenten (kitty),45 +tekkowang,45 +tefec,45 +teeburu,45 +ted (suikoden),45 +te28,45 +tayama midori,45 +tate (donnguriumai),45 +tataru taru,45 +tarutaru (ryousuke),45 +tarou (run),45 +taneichi (taneiti),45 +tanaka212,45 +tamae (pixiv40276),45 +tallgeese,45 +taking cover,45 +takeuchi yuka,45 +takeakigaku,45 +take-mikazuchi,45 +takayoshi,45 +takahashi shin,45 +takahara,45 +taka (0taka),45 +tail removed,45 +tail rape,45 +ta kaana,45 +syerii,45 +syaha,45 +syachiiro,45 +switchblade,45 +suzurikawa euphrasie ruika,45 +suzumia (daydream),45 +suzuki80,45 +suzuen,45 +suzaki aya,45 +surstromming,45 +surprised cat (matataku),45 +super affection,45 +suna (sunagawa),45 +sumomo (raviklx50uc2r2d),45 +sumomo,45 +sumi mi,45 +sumi-e,45 +sugar mountain,45 +strainer,45 +stocking stuffer,45 +steven mack,45 +starlime,45 +stardust (chen'ai weiding),45 +star-kiss,45 +stag (snobby snob),45 +staff riding,45 +spotted tail,45 +spotted hyena (kemono friends),45 +spica (sennen sensou aigis),45 +spark utamaro,45 +sowan (last origin),45 +soviet navy,45 +soul of cinder,45 +sora (yukimi shiro),45 +some1else45,45 +soma (sennen sensou aigis),45 +soffa,45 +snarkhunt,45 +smile cubic!,45 +sky child,45 +skull heart,45 +skeleton costume,45 +sio2 (nisankakeiso),45 +sino (mechanized gallery),45 +sinnra art,45 +silvers rayleigh,45 +silver spoon,45 +silver hairband,45 +siha,45 +sigurd (memories with my lover) (fate),45 +sigil,45 +shuiyaoximushi,45 +shuga (mhwii),45 +shoa tksm,45 +shizu (kino no tabi),45 +shizu25,45 +shirohebidou,45 +shirogane kasane,45 +shiraha (pixiv10239953),45 +shiori (tateha),45 +shiohana,45 +shino (pixia),45 +shingetsu nagisa,45 +shingeki no bahamut: virgin soul,45 +shin'ya (330696),45 +shimeta hiromitsu,45 +shimazawa noriko,45 +shikanari,45 +shijima tohiro,45 +shibamine takashi,45 +shiawase usagi,45 +sherry lai,45 +shared sweater,45 +shaomei rin,45 +shaohua hatsune miku,45 +shaiapouf,45 +seydlitz (azur lane),45 +seventh holy scripture,45 +seth (under night in-birth),45 +sesius (haevest),45 +sensor,45 +senke kagerou,45 +senju hashirama,45 +sen nai,45 +sen (sennosenn1127),45 +sekiranun graffiti (vocaloid),45 +seigetsu kotaku,45 +seelehan,45 +secret agent ~kishi gakuen no shinobi naru mono~,45 +seaport princess (cosplay),45 +sd gundam world heroes,45 +scarlet23i0,45 +scarab,45 +scamp (kancolle) (cosplay),45 +sayaka-chan (gashi-gashi),45 +satou shin'ya,45 +sat (yukipoha),45 +sara manta,45 +sara gallagher,45 +sanson (nadia),45 +sangue llia,45 +samanator club,45 +saltycaramel,45 +salsa (trusty bell),45 +sakusaku (sakusakucrab),45 +sakuratsuki,45 +sakurano miya,45 +sakura no uta,45 +sakatsuki,45 +saiun (kancolle),45 +sailen0,45 +sabo rina,45 +ryuu ga gotoku kiwami,45 +ryunbi,45 +ryiel rayford,45 +ruuto (ruto3),45 +ruuji familon,45 +ruri honjou,45 +runia,45 +runastark,45 +rukiana,45 +rome35793562,45 +roku (345),45 +robobot armor,45 +robert j case,45 +riz3,45 +riruno,45 +rinrin (ppnk2835),45 +rikuwo,45 +rico (mega man),45 +rhine,45 +rennkuu,45 +reizo ne,45 +reisun001,45 +reio,45 +reimin,45 +reibun (raven1119),45 +rei no sakura sousetsu (module),45 +rei (ilust9999),45 +regua,45 +red leather,45 +red dead redemption,45 +ravaniz,45 +rapapa,45 +ranger (warship girls r),45 +rambo,45 +rakuraku,45 +rakuchii (rurituvo),45 +rakkuguy,45 +rage of the dragons,45 +rabii,45 +quicksilver,45 +purple overalls,45 +pubic hair pull,45 +psyche oreca,45 +prpr friends,45 +prisoner (elden ring),45 +print bodysuit,45 +priestess (arknights),45 +pretty rhythm dear my future,45 +prelati (symphogear),45 +power pro kun pocket 9,45 +power pro appli,45 +poscorn617,45 +popompon,45 +pom77,45 +poke200,45 +point (vocaloid),45 +pochi katou,45 +platinumcorundum,45 +plaster (2501),45 +planol note,45 +piyo (sqn2idm751),45 +pipi damowang,45 +piper perri surrounded (meme),45 +pipe organ,45 +pinkymomo,45 +pilder,45 +pikomaro,45 +pigeon (wejil),45 +persona 4 the animation,45 +persona 3: dancing moon night,45 +penelope (azur lane),45 +peanutc,45 +pc (z yu),45 +pavapo,45 +parace l'sia,45 +p416,45 +ozawa yuu,45 +overhead door,45 +otsushimai,45 +otimusyairoha,45 +osx,45 +osakabe-hime (foxy lady) (fate),45 +orochi shermie,45 +oretsuu,45 +optical sight,45 +oonishi shunsuke,45 +onsen mikan,45 +onozuka komachi (cosplay),45 +onimonogatari,45 +oligarchomp,45 +ol mahonanoka,45 +oku hideki,45 +okonogi tetsurou,45 +okazu (eightstudio),45 +okannigeru,45 +oisiimizu,45 +oikawa shizuku (cosplay),45 +ogiwara kyouko,45 +ogi (sham00),45 +oda raika,45 +object riding,45 +o-yatsu,45 +o-minato,45 +o'neill cylinder,45 +nprmtp,45 +nota ika,45 +nosuta,45 +nogi momoko,45 +noelia ponce,45 +nodame cantabile,45 +nk cell (hataraku saibou),45 +nisshisshi,45 +ninomiya masataka,45 +ninjunker,45 +nine years in a coma (meme),45 +nijiura 7 (meme),45 +nights into dreams,45 +nigane,45 +niffler,45 +nico (doa),45 +nichimatsu seri,45 +nekomo,45 +nekokan (cat's cradle),45 +near (sound voltex),45 +nazi war ensign,45 +nayutan sei kara no buttai,45 +natsu-no-kamisama,45 +nasunael,45 +nashoki,45 +nasa yu,45 +nano (mianhua maoqiu),45 +nanno hachirou,45 +nanaya777,45 +nanase (under night in-birth),45 +nanami (nanami811),45 +nana (mizukas),45 +nalukikiki,45 +nakagami takashi,45 +nai gai hongcha,45 +nagute,45 +mustard bottle,45 +murasin,45 +muneyuki,45 +mugikoma,45 +mstm,45 +morisaki kurumi,45 +mopiwo,45 +monopoly,45 +monkey d. luffy (cosplay),45 +monja (monja0521),45 +monegi,45 +mokutan mmmm,45 +moi2m3,45 +mogura (nichijou),45 +mochizuki saku,45 +mochizuki jun,45 +mnmktn,45 +mizutsune (armor),45 +mizuno kakeru,45 +mizuki miyu,45 +mizui kaou,45 +mizore arius,45 +mizone,45 +miyuki (nyaa),45 +miyuika,45 +miyapo,45 +miyaguchi kanna,45 +mitty (made in abyss) (human),45 +mitsuki mouse,45 +mitsuka souji,45 +mistermagnolia,45 +mister rhino (wangzisama1001),45 +misogi (halloween) (princess connect!),45 +mismatched underwear,45 +misakichintin,45 +misaki yuu (misaki 1192),45 +minowa hikari,45 +mikepon (ota office),45 +mikan imo,45 +migiwa kazuha,45 +mighty switch force!,45 +midori (kimi ga shine),45 +michaellee4,45 +miao yao cha,45 +meyuu (kso),45 +mercury (planet),45 +mephilas seijin,45 +meiji (meizi493),45 +megotokyo,45 +meginalter,45 +meettheton,45 +mchi,45 +matuda (matudayazo),45 +matsuoka shuuzou,45 +mashiro tomoya,45 +mas (5734ghji),45 +marine (ahute),45 +marina (noill),45 +mariandale,45 +maochao,45 +malzahar,45 +maltese cross,45 +mallet (instrument),45 +male fighter (dungeon and fighter),45 +makochan42,45 +makishima yuusuke,45 +makenevemoiine,45 +makar,45 +majolica le fay,45 +maiko (yoshida308),45 +mahousho,45 +magical mirai meiko,45 +magical antique,45 +magi (cu-little2),45 +maehara nina,45 +mad bear (tiger & bunny),45 +maboroshi dandy,45 +m82a1 (girls' frontline),45 +m26 pershing,45 +m1 bazooka,45 +lynxgriffin,45 +lusamine (sygna suit) (pokemon),45 +lunica,45 +lump of sugar,45 +lucina (fire emblem) (cosplay),45 +lowain (granblue fantasy),45 +loussier ellerensia,45 +loud,45 +llpfmfc,45 +living jar (elden ring),45 +little thunder,45 +limousine,45 +lilium443,45 +liane mistlud,45 +leoharju,45 +lenny face,45 +leje39,45 +leglus,45 +lee-taro,45 +leafa (sao:im),45 +lch,45 +laurelfalcon,45 +lanlanlu (809930257),45 +lamp p9,45 +lafrenze,45 +lady nagant,45 +lace dress,45 +lace-up legwear,45 +kzm,45 +kyuuakaku,45 +kyaradain,45 +kuzakura mika,45 +kuusen madoushi kouhosei no kyoukan,45 +kurukuru (korekita),45 +kurouso (meikyoushisui),45 +kurose nao,45 +kurosawa karura,45 +kurono hyouka,45 +kuromu (cr0711),45 +kurojiya,45 +kuroi k-ta,45 +kurobeko (kur0bek0),45 +kurisu ams,45 +kuno,45 +kumaneko rococo,45 +kuma teikoku,45 +kukku,45 +kujou mikuru,45 +kujiramaru,45 +krjenl,45 +krile mayer baldesion (ff14),45 +kozuka yasushi,45 +koyanagi hanako,45 +koutetsu no majo annerose,45 +kouri (kyorosuukeeeeeee),45 +koroneko p0w0q,45 +kornod,45 +kor meteor,45 +konununui,45 +konoha (nozomi tsubame),45 +konekoneko (indonesia),45 +konachan sample,45 +kokorin,45 +kokollet,45 +kokoda kouji,45 +kokaji sukoya,45 +kogawawaki,45 +kofumi (nxme5555),45 +kocona,45 +kobeni hijiko,45 +knife behind back,45 +knbilove,45 +kk (kkgame7733),45 +kitanxjk,45 +kitakami kai (kancolle),45 +kissing stomach,45 +kise chiharu,45 +kisaragi tsubasa,45 +kir (khw66136132),45 +king of hearts (card),45 +king arthur (mythology),45 +kindo,45 +kinagi (3307377),45 +kikurage tom.,45 +kihara atsurou,45 +kido keiji,45 +keraton,45 +kemi (kemi433),45 +keith shadis,45 +kchair02 (k02-yan),45 +kazeto,45 +kaze ga tsuyoku fuiteiru,45 +kawashiro nitori (kappa),45 +kawara hajime,45 +kawahara makoto,45 +katkichi,45 +kasumi (doa) (cosplay),45 +kasane ao,45 +karrablast,45 +karasuma sakura,45 +kappe reeka,45 +kanonno grassvalley,45 +kannagi yuuri,45 +kangoku senkan 2,45 +kanda aoi,45 +kanagata sugumi,45 +kamihara mizuki,45 +kallen kaslana (ritual imayoh),45 +kakerayuri,45 +kaede (ragnarock city),45 +kabaya kousuke,45 +kabata (mechisan),45 +k29,45 +k-ma,45 +jude (minority lilac),45 +jou (mono),45 +johnson zhuang,45 +jnsdh,45 +jinko (monster girl encyclopedia),45 +jiliang ji ying,45 +ji mag (artist),45 +jerry,45 +jeina (sumiyao (amam)),45 +jane doe (john doe),45 +jandare,45 +itou katsunobu,45 +ishihama masashi,45 +isetta,45 +iscario (forever 7th capital),45 +irodori koukou gasshoubu yori,45 +irkawaza,45 +iris wilson,45 +ionocidium (flower knight girl),45 +inui achu,45 +insyu,45 +inside clothes,45 +insect on finger,45 +inoue takumi,45 +inkarmat,45 +inishie no megami to houseki no ite,45 +immortals: muvluv alternative,45 +imminent double penetration,45 +imminent bestiality,45 +ikusa katato,45 +ikuhashi muiko,45 +iizuka yuzu,45 +ichimoku (tansio5),45 +ichi10art,45 +ice sculpture,45 +ibuki kouko,45 +ibuki (ibukiro1003),45 +houshou (azur lane),45 +horiuchi osamu,45 +hori kyouko,45 +hooded leotard,45 +homua,45 +homare (g hmr88),45 +holocure,45 +hole in ears,45 +hiwa industry,45 +hishi (k-xaby),45 +hisame (fire emblem),45 +hiroyuki takahashi,45 +hirosaki kanade,45 +hippopotas,45 +hercules (1997 film),45 +hentai kuwa,45 +hell's kitchen,45 +helen parr,45 +hei chuan gui,45 +hegemon edelgard,45 +heaven ascended dio,45 +heart ring choker,45 +hayamaso,45 +hashiribe akira,45 +hashimoto w.s.,45 +harutoshi,45 +haruichi (sazanami complex),45 +hare-kon.,45 +hanzo (2929),45 +hanuu,45 +hane (feathe02),45 +hakurou (onmyoji),45 +hakinikui kutsu no mise,45 +hair net,45 +haggy,45 +hadurin (zdmzy),45 +hachiware,45 +hachimitsucoffee,45 +h-01,45 +gyuuki (yuyuyu),45 +gweon sua,45 +gungnir (toaru),45 +gundou musashi,45 +gundam virtue,45 +guglielmo,45 +gucha (netsu),45 +grubbin,45 +goto hime,45 +goldion hammer,45 +gobul (armor),45 +gipple,45 +gingham skirt,45 +gift eternal rainbow,45 +gesoking,45 +geromonja teitoku,45 +gerigoo,45 +gd choco,45 +gattsun,45 +ganzyu i,45 +gangnam style,45 +galaco,45 +gagame,45 +fuuun ishin dai shogun,45 +fuusha,45 +futurama,45 +futomayu-chan (sinohira rin),45 +futatsuiwa mamizou (cosplay),45 +futago no haha seihonnou,45 +furumiya neko,45 +furiruno,45 +furafura,45 +fuguu-chan,45 +fuetakishi,45 +frofrofrost,45 +fraud,45 +fishing gear,45 +fish-flavored toast,45 +fire stone,45 +fiona mayfield,45 +fio piccolo,45 +fingers between toes,45 +finger cannon,45 +final fantasy crystal chronicles: echoes of time,45 +fii fii (feefeeowo),45 +felt whitesnow,45 +feena (ys),45 +falslander,45 +fake scan,45 +f.k (hs225kr),45 +exice-zero,45 +espen olsen saetervik,45 +enusabu (enusub),45 +enterprise (anniversary ride) (azur lane),45 +enishi96,45 +empress (last origin),45 +emje (uncover),45 +ellie niunai zhuyi zhe,45 +eli clark,45 +electric wind instrument,45 +eldigan (fire emblem),45 +ego6,45 +edain (fire emblem),45 +drawingddoom,45 +doumyouji cocoa,45 +dougram,45 +doryudory,45 +dormouse (monster girl encyclopedia),45 +doren,45 +dorami,45 +dongyue sakura,45 +donaldakron,45 +dolce (rune factory),45 +dive bomber,45 +diolemonde,45 +dice coffeedox,45 +di qiu wang shi,45 +devil lo,45 +devi (elsword),45 +deoxys (speed),45 +denshinbashira (bashirajio!),45 +denjinq,45 +decadriver,45 +death box (apex legends),45 +dc (makezzal),45 +darth vader (cosplay),45 +dantalian no shoka,45 +damiaodi,45 +daikazoku63,45 +dahlia (pokemon),45 +da ji,45 +czc (deko),45 +cynthia (sygna suit) (renegade) (pokemon),45 +cure slum,45 +cure miracle (sapphire style),45 +cure lovely (lollipop hip hop),45 +cupen,45 +cryo cicin mage (genshin impact),45 +crow-chan (karasu raven),45 +crouching start,45 +cottontail (voice actor),45 +cookie jar,45 +colored tears,45 +code: electra (elsword),45 +clover (hi-per pinch),45 +clover-shaped pupils,45 +clobbopus,45 +cliana rimskaya,45 +clash kuro neko,45 +cing,45 +cinemagraph,45 +ciawasemono,45 +chuunenpi,45 +chuo8008,45 +chun 1234567,45 +chuki (lydia),45 +choko (chokotto sister),45 +chisuke 1104,45 +chiku (gesu),45 +chika (keiin),45 +chicchana yukitsukai sugar,45 +chibi vanille,45 +cheri berry,45 +cheno (amakuchi mustard),45 +cheep cheep,45 +chasen,45 +character pin,45 +chaka3464,45 +cero320,45 +centaur no nayami,45 +cave (neptune series),45 +catapult (arknights),45 +cat nose,45 +cat loaf,45 +cassini m bisuko,45 +carrot earrings,45 +carol anderson,45 +carina verritti,45 +caramel (caramelmilk),45 +canna (granblue fantasy),45 +camus (fire emblem),45 +caeldori (fire emblem),45 +burter,45 +bundou seika,45 +bun'ya (0874cirno141),45 +bucephalus (fate),45 +bu4321,45 +brown leggings,45 +brill p,45 +brand of sacrifice,45 +bowser (cosplay),45 +bounce,45 +borubikku,45 +blowgun,45 +blazbluefairy,45 +blackberry cookie,45 +black reaper,45 +black lemon-chan,45 +bitten,45 +bitou raimu,45 +bishounen series,45 +benny (fire emblem),45 +beelstarmon,45 +be9459,45 +battlefield 4,45 +basilis9,45 +bard 2 (sekaiju),45 +baphomet,45 +banderasu,45 +ban airi,45 +balsamic vinegar,45 +bag on lap,45 +azzalea,45 +azaz (last origin),45 +azarashing day,45 +ayataka,45 +ayano rika,45 +awakumo,45 +asuke yuki,45 +asuka mirai (aikatsu friends!),45 +astoria (azur lane),45 +ashley mizuki robbins,45 +armorganger,45 +arisa glennorth,45 +ariga hitoshi,45 +arai chie,45 +aquarium (visual novel),45 +aphmau,45 +aoyama shunsuke,45 +aoilio,45 +aoi tori (purple software),45 +aoi sora no neosphere,45 +anz (starry),45 +ano54,45 +annno ans,45 +anna (gekkou no carnevale),45 +anidante,45 +ancient destroyer princess,45 +amanatsu,45 +amamizu shizuku,45 +amamizu (myofuu kai),45 +am24,45 +almond tofu (genshin impact),45 +allze,45 +alicia rue,45 +alic miao,45 +aldegon (rariatto),45 +akitsuki tsukasa,45 +akisuko,45 +akisame kou,45 +akiomi aiko,45 +akaya (pixiv20545353),45 +akatsuki francesca,45 +akatsuki (osamaru36),45 +akamura saki,45 +akamomo,45 +aimo (aimo1214),45 +aika (eternal arcadia),45 +aesop carl,45 +adolescence (vocaloid),45 +action heroine cheer fruits,45 +ach,45 +acefish,45 +aburage (motimotigg20),45 +abataa,45 +a.ringo,45 +79inko,45 +72yan,45 +456,45 +3books,45 +33paradox,45 +1950s (style),45 +10hmugen,45 +zieru,44 +zhi xie,44 +zerogura,44 +zeri (zeristudio),44 +zee (zee sub),44 +zarude,44 +zangeki no reginleiv,44 +zafina (tekken),44 +yyi,44 +yuzu kiro,44 +yuzaki tsukasa,44 +yuuyu (moco20111115),44 +yuu (primenumber7),44 +yutori (clockwork chicken),44 +yunkru,44 +yukihi,44 +yukichi (tsuknak1),44 +yuki onna (onmyoji),44 +yube (skyhacker),44 +younger twin sister (muninshiki),44 +youko (santarose),44 +youka (gensou kyoudan),44 +yoshinoya seine,44 +yoshino (gunform),44 +yoshimune,44 +yoshii kyoko,44 +yoovora,44 +yokige,44 +yokaranu yuuna,44 +yo-nashi,44 +yi cat,44 +yggdra yuril artwaltz,44 +yellow sponge,44 +yasaka kazuki,44 +yanagiwara maron,44 +yamazaki ryuuji,44 +yamato (aoki hagane no arpeggio),44 +yamate kyouko,44 +yamaneko ken,44 +yamanata,44 +yamakonbu,44 +yamakake (tororo1293),44 +yakushiji megumi,44 +yakisoba (kaz2113),44 +yagisawa teru,44 +xssh,44 +xiao me,44 +wuduo,44 +world of final fantasy,44 +wizardry,44 +wizardriver,44 +witch (dungeon and fighter),44 +wing ribbon,44 +wimple,44 +wife and wife and wife,44 +white water,44 +whale girl,44 +washington (warship girls r),44 +warlock (granado espada),44 +wadaka,44 +waa153,44 +vinne,44 +vicchan,44 +vessel of sin,44 +vertical-striped bow,44 +veralde,44 +vera nair,44 +vera collins,44 +varshahn,44 +usuiken,44 +usuda hiro,44 +ushiyama tatsuma,44 +urumi (urumi00),44 +uraha,44 +uohhhhhhhhh! (meme),44 +unstableboiler,44 +unscpro,44 +unova mother (bw),44 +unitsu,44 +unicorn (little star songstress) (azur lane),44 +under fire,44 +uncats,44 +ultra beam,44 +uirina,44 +ufo koikoi,44 +uchida shuu,44 +uchi no pet jijou,44 +turna98,44 +tsuyuta kome,44 +tsurukawasha,44 +tsumugi (halloween) (princess connect!),44 +tsukimiya kamiko,44 +tsukiji uogashi sandaime,44 +tsugou makina,44 +tsuchinoto,44 +tsubaki (yi),44 +travo,44 +tranquill,44 +traene (sorai shin'ya),44 +toyoura (23066494),44 +towrituka,44 +tousou (touken ranbu),44 +touching toes,44 +torn sarashi,44 +torla16,44 +torii koyomi,44 +toona,44 +toku kekakewanko,44 +toki (tokinokogiri),44 +togeshiro azami,44 +tiger mask,44 +tiffany lilith stella,44 +tian ling (qys3),44 +thunder stone,44 +thousand sunny,44 +those girls,44 +the little mermaid (andersen),44 +the hermit (tarot),44 +the empress (tarot),44 +tenpou gensui,44 +tennouji nae,44 +tenmaru,44 +tenhi tsukitori,44 +tengawara,44 +tendou kisara,44 +temple gate,44 +temoshi,44 +tekla,44 +tekito midori,44 +tecchen,44 +tear (recettear),44 +tb (spr1110),44 +tattsun (blue&hazel),44 +tanukimaso,44 +tanoshii meat,44 +tank (artist),44 +tanaken,44 +tanaka (cow),44 +tamami (jonsun),44 +takashima remi,44 +takano kou,44 +takamiya mizuki,44 +takamiya honoka,44 +takai sayaka,44 +taihou (azur lane) (cosplay),44 +tablet-tan,44 +syringe holster,44 +sypha belnades,44 +svveetberry,44 +suzume (simple0091),44 +sutochan,44 +supershrimpcakes,44 +super robot wars mx,44 +super nintendo,44 +sunset nivaris,44 +suneo (goten),44 +sumemako,44 +sukeroku (treeee),44 +sugac,44 +suction bomb (splatoon),44 +suchi (fsur7444),44 +stuffed giraffe,44 +stuffed elephant,44 +stroking beard,44 +steel mask,44 +stc,44 +spiked wings,44 +spider web background,44 +spec,44 +spawn (spawn),44 +soul eater not!,44 +soujirou (new game!),44 +sorairo innocent,44 +sophie (fire emblem),44 +sooperman,44 +songstress,44 +song ren,44 +sogegin,44 +so nagi (artist),44 +snake costume,44 +smeared blood,44 +slingshot tan,44 +six (little nightmares),44 +siren (borderlands),44 +sion 0d1m,44 +simonov (girls' frontline),44 +silver skirt,44 +siegfried kircheis,44 +side split,44 +shuninshunin,44 +shounen hollywood,44 +shoumetsu toshi 2,44 +shiso azuki,44 +shirou,44 +shiro (tower of fantasy),44 +shiraui tsumugi,44 +shiontaso,44 +shinobu (princess connect!),44 +shinku p,44 +shinju-kan uniform,44 +shinigami wyvern,44 +shinigami0139,44 +shindou mitsuko,44 +shin megami tensei: if...,44 +shimoogawa,44 +shimashi (mori no sato),44 +shikidouji,44 +shigen,44 +shigatsu (soratobuusagi),44 +shibuya susano,44 +shelby seraphine,44 +sharan (dungeon and fighter),44 +shamshel,44 +shaliva,44 +shakata (syakatan),44 +sexy commando gaiden: sugoiyo! masaru-san,44 +senryuu shoujo,44 +senra (singer),44 +senketsu (scarf),44 +sena kizahashi,44 +seifer almasy,44 +scarecrow (twooz),44 +scandinavia peperoncino,44 +saturn-freak,44 +satou yuu,44 +sasha kruschschev,44 +sasaki youko,44 +sarina (tosiyukiryousuke),44 +sarah stone,44 +sangoku romance,44 +sandragh,44 +sanderson,44 +sandayu (housamo),44 +samidareura,44 +samemanma,44 +sakurafubuki nel,44 +sakura (honkai impact),44 +sake barrel,44 +sakamoto maaya,44 +sakamoto kazuya,44 +sakai jin,44 +saitou kakkou,44 +sagoromo 04,44 +saegusa miko,44 +sabiimo25,44 +sabatuki,44 +ryuukishi bloody saga,44 +ryouko (lovelovela),44 +ryou (kemurikusa),44 +ryan (dq4),44 +rustysalmon,44 +roots (hair),44 +room603,44 +roland (fate),44 +robin hood (summer hunter) (fate),44 +ro (igris-geo),44 +ro-ichi,44 +rkzrok,44 +ringo ame,44 +rindo,44 +rikka (rikka331),44 +rifleman1130,44 +rhemora,44 +revya (female),44 +renkin arthur,44 +rena illusion,44 +reit,44 +red tulip,44 +red girl (yuuhagi (amaretto-no-natsu)),44 +red beryl (houseki no kuni),44 +recoil,44 +realdoll,44 +re-ka,44 +rayman (series),44 +rawrden,44 +ramune (cherrykde),44 +rakku (rkrmz0n),44 +raiden mei (shadow dash),44 +ragna the bloodedge (cosplay),44 +radjeong,44 +rable,44 +rabbit+tank form,44 +r93 (holiday lucky star) (girls' frontline),44 +qiangzi,44 +qi sili,44 +python (snake legs),44 +pyrite (ironbunz),44 +purple (jobseeking),44 +purdoy25,44 +print neckerchief,44 +princess of the crystal (cosplay),44 +princess evangile,44 +princess dress,44 +pretty x cation,44 +prairie (mega man),44 +pr (puru),44 +power cord,44 +pouring onto pussy,44 +potatopanicking,44 +poppin' up (love live!),44 +popo (popopuri),44 +pon de lion (cosplay),44 +pomeranianko,44 +polygonal,44 +poinikusu,44 +plant boy,44 +pick'k,44 +pham thai chau,44 +petite miku,44 +penny crygor,44 +penis chart,44 +pen spinning,44 +pectorals on glass,44 +pearl anklet,44 +patricia wagon,44 +party parrot (meme),44 +parn (lodoss),44 +parai0,44 +pachio (patioglass),44 +overine19,44 +ots-12,44 +oshiyon,44 +osanpo02,44 +orion-m,44 +orihira,44 +orange eyewear,44 +oppai ball,44 +ophis (high school dxd),44 +ooshima miwa,44 +ookusa manami,44 +onoda yura,44 +one piece film: z,44 +okurapuchi,44 +okita ababa,44 +okayashi,44 +ogi (torikari),44 +ogasawara,44 +official art inset,44 +ofelia (gogalking),44 +odeclea,44 +oda nobunaga (sengoku otome),44 +oda-sama (yatterman),44 +ochibana ame,44 +nyxkz,44 +nyaon oekaki,44 +nuntarou (niudon kajika),44 +north kaiou,44 +no ahoge,44 +niyadepa,44 +nitoridio,44 +nishizono honoka,44 +nishio kouhaku,44 +nishiki ryouma,44 +nipple push,44 +ninjinshiru,44 +niimura akane,44 +nigiribashi,44 +night strait princess (white),44 +nigekiri sisters (umamusume),44 +nevermind,44 +nessa (fractale),44 +neroshi,44 +nekozombie,44 +negiko,44 +neee-t,44 +nazuki nazu,44 +nayuyu1105,44 +natsumi (natumi06),44 +natsume remon,44 +narumi (uminari),44 +naraku (senran kagura),44 +napkin holder,44 +naotaka,44 +nanbu chitose,44 +nanashi mumei's horror painting,44 +nanase yoshino,44 +nanase (amagamido),44 +nanakorobi yaoki,44 +nana-ine,44 +namaniku (nmnk2020),44 +naked cat,44 +nakamura misaki,44 +nagiru,44 +na2,44 +n7 armor,44 +mygiorni,44 +muyang,44 +muteki koujin daitarn 3,44 +musou orochi 2,44 +mrt mr,44 +moy64904958,44 +morishita michiru,44 +morimoto hirofumi,44 +moosopp,44 +mono (mono mon),44 +monk 4 (sekaiju),44 +money rain,44 +momofukki,44 +molly,44 +mokichi (nvzy3n),44 +moblin,44 +mmlu (honwa karesu),44 +mizuhara arisa,44 +miyahara ruri,44 +miyabe makoto,44 +miura akane,44 +mistimagi,44 +misoradeko,44 +miokuri,44 +mio (dream c club),44 +mint (flower knight girl),44 +minatsuki nao,44 +minami ikkei,44 +millcutto,44 +mikujin (mikuzin24),44 +mikoto freesia scarlet (goma),44 +mikanoisi,44 +mikan-ya,44 +miia's mother,44 +migumi (niiya),44 +migimaki (migi mawashi),44 +migata,44 +midnight (arknights),44 +michaela (evillious nendaiki),44 +mice (rune factory),44 +mg36 (girls' frontline),44 +mew pudding,44 +messengers (bloodborne),44 +mess kit,44 +meryl star,44 +merue,44 +merlin (nanatsu no taizai),44 +merii,44 +menokonome,44 +memetaroh,44 +melua melna meia,44 +mega man 4,44 +mega man: powered up,44 +meg (granblue fantasy),44 +mazinger z: infinity,44 +mayokichi,44 +maya sawamura anderson,44 +matchaneko,44 +mastermind (elsword),44 +master (gyee),44 +mashiroma zenima,44 +mashiko kaoru,44 +masato (josou jinja),44 +maritan,44 +marinasu,44 +maria (saidaioujou),44 +mappo m2,44 +mao (mizuki kotora),44 +manatsu no yoru no yuki monogatari,44 +mammoth (kemono friends),44 +mamezuka takashi,44 +mameroku,44 +mamamoo,44 +makiemon,44 +mak,44 +majin (kiidoumajin),44 +maiq06,44 +magnum boost,44 +magneto,44 +maenoo,44 +madokan suzuki,44 +macross 2,44 +macross: the first,44 +m320,44 +m32,44 +m1897 (girls' frontline),44 +luka millfy,44 +ludwig von koopa,44 +lucia (ogino atsuki),44 +lucerna lunam,44 +lu xueqi tongren ye,44 +lu bu (fate),44 +lrpanda00,44 +loxodon,44 +lotus hair ornament,44 +longcloud,44 +litleo,44 +lingxia,44 +ling xiang,44 +lime (among us),44 +lilac (k32420276),44 +leviathan (the seven deadly sins),44 +levia-san,44 +leto (arknights),44 +lemres (puyopuyo),44 +lemon pan,44 +lemon-chan,44 +legionnaire,44 +leclle,44 +lavie,44 +lauren iroas,44 +late,44 +laserdisc cover,44 +lappet-faced vulture (kemono friends),44 +lala (g (genesis1556)),44 +kyuunosuke (lactis09),44 +kyousa38,44 +kyojinjoa,44 +kyanpero,44 +kuzehashi akari,44 +kuyukian3,44 +kuto tubuyaki,44 +kuso bba,44 +kusaka yuuya,44 +kurutsuki,44 +kurt robinson,44 +kuroki manatsu,44 +kurokami,44 +kurohane,44 +kurogoma,44 +kuro wa shinoru,44 +kuribayashi shino,44 +kukurus,44 +kuki sanban,44 +krokorok,44 +kricketune,44 +kozakura shion,44 +koyubita,44 +koyomi (shinshia),44 +kouson q,44 +kousaka chihaya,44 +koucha maru,44 +koube an,44 +kotarou (kot mochi),44 +koshou shichimi,44 +koshi (koshi vortex),44 +konoekihei,44 +konishiki (52siki),44 +konbuni,44 +komoe (hinagatu),44 +koma kitsune (kururito),44 +kom0980,44 +kokumu,44 +kohinata hikari,44 +kmikan,44 +klein (fire emblem),44 +kizaki erika,44 +kita no miko,44 +kishimasa,44 +kiryuu kurou,44 +kirudai,44 +kiriya haruhito,44 +kirisaki kyouko (toloveru),44 +kiri (lwp01 lav),44 +kirameki mamika,44 +kimura naoki,44 +kimiyoshi natsumi,44 +kimigabuchi,44 +kima,44 +kikujin,44 +kikaijima mogana,44 +kibisakura2,44 +ki-43 hayabusa,44 +keshin armed,44 +kero kero keroppi,44 +kentou kanami,44 +kemoribon,44 +keke (kekekue),44 +kazamatsuri rinna,44 +katsuragi chikagi,44 +kateikyoushi no onee-san,44 +karasu h,44 +kaprice,44 +kanojo no seiiki,44 +kanata no astra,44 +kanashi kumo,44 +kamotsu yasai,44 +kamkac,44 +kamino ryuuya,44 +kamenakake,44 +kamen rider ouja,44 +kamen rider kick hopper,44 +kamabokopic,44 +kallen kaslana (sin hunter),44 +kakaaru,44 +kajun,44 +kajii supana,44 +kaho 0102,44 +kagamin bocchi,44 +kado,44 +kac-pdw (girls' frontline),44 +kaburi chiko,44 +k3 (girls' frontline),44 +justice (tarot),44 +justice (guilty gear),44 +juralumin,44 +jun (ash),44 +julian mintz,44 +jowy atreides-blight,44 +joshu-san,44 +joney,44 +johnalay,44 +john zerowb,44 +jitome-chan (tawawa),44 +jintianhuahualema,44 +jinguuji jakurai,44 +jimeko,44 +jiayu long,44 +jhonwalker,44 +jhc kai,44 +jellypon,44 +jeice,44 +jeanne d'arc alter (ver. shinjuku 1999) (fate) (cosplay),44 +jeancle abel meuniere,44 +jay b lee,44 +jay (tales),44 +jaken,44 +jag ging,44 +jack in the box (toy),44 +ivy (flower knight girl),44 +itsumi erika's gunner,44 +itou yoshiyuki,44 +isumi (i-spixi),44 +isuke,44 +ishitsuki ( 0101 831),44 +isaac (golden sun),44 +irori (hearth),44 +iro 178,44 +iris yayoi,44 +inumaru akagi,44 +inuinu (project october),44 +inspector gadget,44 +inkspirate,44 +inada roku,44 +imoto yuki,44 +imai asami,44 +imagine breaker,44 +im catfood,44 +ikazuchi no senshi raidy ii,44 +ikazuchi (kancolle) (cosplay),44 +ii fuufu no hi,44 +ignis no meiyaku kishi,44 +iggy koopa,44 +igawa asagi (cosplay),44 +idlecum,44 +ice scream,44 +hybrid cat,44 +hukairi botan,44 +hue oo,44 +hudak,44 +houzumi kaede,44 +hoshimaemi,44 +hoshiko (419401x),44 +hoshikawa hotaru,44 +hoshi (arakawa),44 +horse pose,44 +hood over eyes,44 +honma meiko (cosplay),44 +honey (space dandy),44 +homeless,44 +holding sickle,44 +holding shawl,44 +holding party popper,44 +holding hairband,44 +hobgoblin (touhou),44 +ho-oh (artist),44 +hizuki ayumi,44 +hiyori chisha,44 +hiyama izumi (wttdh),44 +hiyama hikaru,44 +hitori (htr t),44 +hiteihime (katanagatari),44 +hisana,44 +hiromachi nanami,44 +hippowdon,44 +hinoka (allen),44 +hino hikaru,44 +hiki-wota,44 +hikataso,44 +hikaru (asteriskring),44 +hikari to mizu no daphne,44 +helix fossil,44 +headphones over headwear,44 +haydee (gankutsuou),44 +hatsune miku (if),44 +hatomura (tareneko club),44 +hasunoue keroppi,44 +hashimoto shin,44 +haruchika,44 +haro button badge,44 +happyongdal,44 +happycloud,44 +happy chaos,44 +haowei wu,44 +hantsuki (ichigonichiya),44 +haneten kagatsu,44 +hanazono kirara,44 +hanamura mai,44 +hanamaru hareru,44 +hanadi (hndboo),44 +hamayumi (genshin impact),44 +hamachi hazuki,44 +hakuyou-choun,44 +hakuuyori,44 +haho,44 +hachikyaku rozunieru,44 +gunshot wound,44 +guided crotch grab,44 +guangsupaomian,44 +grey tongue,44 +grey male swimwear,44 +green suspenders,44 +gradient bow,44 +grabber tool,44 +gosama,44 +goron (phde2424),44 +gomibako (shirokumatei),44 +godzilla (cosplay),44 +gobou (gbu),44 +glowing arm,44 +gishki ariel,44 +gin (meitantei conan),44 +gime,44 +gilbert guilford,44 +gigawix,44 +geru,44 +gensi,44 +gen (bividgen),44 +gatchu,44 +garnet cradle,44 +gaoyang ou nisang,44 +ganno,44 +ganmo,44 +game screenshot background,44 +gall force,44 +gaku (wheel of lunokhod),44 +gabranth (ff12),44 +g.i. joe,44 +fuu'un,44 +future princess,44 +futoshi ame,44 +fushimi yuzuru,44 +fushimi inari,44 +funasshii,44 +fumiko (throughx2),44 +fukuinu,44 +fuku d,44 +fujiwara tatsuroo,44 +fujiwara mizuki,44 +fujimo ruru,44 +frostmourne,44 +frontal wedgie,44 +frog on head,44 +freedom wars,44 +franken stein (soul eater),44 +fo~do,44 +foot bath,44 +flower border,44 +flik (gensou suikoden),44 +five (drag-on dragoon),44 +fishnet panties,44 +fish earrings,44 +final smash,44 +final fantasy vii ever crisis,44 +fidget spinner,44 +ff gradient,44 +fall guy (cosplay),44 +fairy knight gawain (like a lady) (fate),44 +f-lags (idolmaster),44 +ezakishii,44 +eyeball bracelet girl (fkey),44 +eso (toory),44 +eskimofox,44 +erotanuki,44 +epoxy putty,44 +enterprise (starlight oath) (azur lane),44 +endou aya,44 +endo mame,44 +emukae mukae,44 +elsa (frozen) (cosplay),44 +elesia,44 +elasticity,44 +eiyuu chronicle,44 +edward teach,44 +educk,44 +ed edd n eddy,44 +ebi no hito,44 +eat666,44 +dyne gallon,44 +duximeng,44 +dust (konosuba),44 +dtcy,44 +drilbur,44 +double sided wrench,44 +dosei,44 +dornroschen,44 +dooru,44 +dolphin print,44 +doku yanagi,44 +dnsdltkfkd,44 +dlkdhy,44 +dleung,44 +dinosaur hood,44 +diao chan,44 +desktop army,44 +des moines (warship girls r),44 +denden taiko,44 +deboo,44 +dead space 2,44 +ddaomphyo,44 +daydremec,44 +danishi,44 +dangmyo,44 +dandel,44 +dam,44 +daien,44 +d-1,44 +cytus,44 +cutlass,44 +cutesu (cutesuu),44 +cure happy (cosplay),44 +cum on skirt,44 +cum bucket,44 +cue!,44 +cryogonal,44 +crimson lotus moth (genshin impact),44 +crime scene,44 +core gundam ii,44 +core fighter,44 +connie maheswaran,44 +collar chain,44 +cokua,44 +cokecoco,44 +coco kaine,44 +clock lock works (vocaloid),44 +clere,44 +cielo (zaki),44 +chun lo,44 +chrono (chrono crusade),44 +christinya,44 +christina (real) (princess connect!),44 +chomo (asymmate),44 +chloe (granblue fantasy),44 +chiyoda (azur lane),44 +chitose kiiro,44 +chipokan,44 +chihara minori,44 +chigusa hana,44 +chicken (chickenx),44 +chica,44 +cherry (lucky star),44 +chenaze57,44 +chemistry set,44 +chara soon,44 +chaos 0829,44 +chao ho (azur lane),44 +chama (painter),44 +celsius (tales),44 +cath (fire emblem),44 +casablanca (cheer squad on break) (azur lane),44 +caruta,44 +car-15,44 +cape grab,44 +cam (cammero95713700),44 +caleen keemosn,44 +c.c.r (ccrgaoooo),44 +bulkhead,44 +brown armor,44 +bokura wa minna kawaisou,44 +body bag,44 +bm94199,44 +bling (wnsdud34),44 +blackheartxiii,44 +bkub duck,44 +bis (jsr),44 +binding,44 +big zam,44 +besuyama,44 +benawi,44 +belle (katahane),44 +believe again,44 +behemoth,44 +beat (jsr),44 +bean bandit,44 +beak (girls' frontline),44 +battlesuit,44 +bassoon,44 +basilisk,44 +barricade,44 +banjiao qingniu,44 +bakenekomata,44 +baimeme,44 +baige0,44 +azel (laevateinn495),44 +aye,44 +ayase aria,44 +ayano no koufuku riron (vocaloid),44 +ayane (nagasarete airantou),44 +ayanami (pulse of the new year) (azur lane),44 +ayamo kaoru,44 +aya shiro423,44 +awa (bihidasu),44 +auge (akd),44 +atychi,44 +atamonica,44 +asutorii,44 +assassin's creed: brotherhood,44 +asphyxia17,44 +ash lynx,44 +asaoka (0x0),44 +asame21,44 +asahi (vjss4548),44 +aryus,44 +aru (new year) (blue archive),44 +arisugawa dice,44 +areno,44 +arduina (arduin art),44 +araragi soushi,44 +arahiko,44 +ar-10,44 +aquila marin,44 +apple da-ze,44 +aphrodite (fate),44 +aoyagi neko,44 +aotsuba,44 +aoi itsuki,44 +another project,44 +animal on face,44 +ani (aniya),44 +andonoz,44 +ame ame,44 +amd,44 +amana raika,44 +amamiya ren (cosplay),44 +amamiya mei,44 +alternate bottom wear,44 +alter ego (danganronpa),44 +aloupeep (enna alouette),44 +almaz von almadine adamant,44 +aliasse,44 +alexa pasztor,44 +akirara (ishiyumi),44 +akinoji (akiponpon27),44 +aki (aki k6),44 +aki (1360505188),44 +ake (ake54),44 +akatsuki (kuon),44 +akatsuki (akatsuki blitzkampf),44 +akasha (syakoba),44 +akamiso (k074510),44 +akagi sena,44 +akabane koume,44 +ajikan (azican),44 +aituoku,44 +air pump,44 +aika himena,44 +ai-bon,44 +aho no sakata,44 +aguruma (yukisita03),44 +afghanis-tan,44 +aestus domus aurea,44 +adjusting headset,44 +adelaide grand marnier,44 +adam smasher,44 +ace attorney investigations: miles edgeworth,44 +abrakadabra2012,44 +a will,44 +a0lp,44 +8 (e uu88),44 +88 flak (ash arms),44 +80mph,44 +808,44 +676643396dolce,44 +52ikaros,44 +343rone,44 +3000 xiao chun,44 +1629doyasa,44 +15kasikaze15,44 +zutsuyaku,43 +zishanjiang,43 +zin (goraku hiroba),43 +zeroyama,43 +zel (ishuzoku reviewers),43 +zealyush,43 +zanscare,43 +zain,43 +yuzuki (rinka01),43 +yuzucky,43 +yuuta (tokoton hirune hiyori),43 +yuuge (hangoban),43 +yurikawa hana,43 +yunarebun,43 +yun (dust-i1),43 +yummy (donyat1983),43 +yukimichi (nieko),43 +yu li,43 +ys vi ark of napishtim,43 +youtike,43 +youta (asatsukidou),43 +youkan (mattya youkan),43 +you (nanban319),43 +yoshinobori,43 +yoshino junpei,43 +yomorin,43 +yomitsuna,43 +yomatsuri akari,43 +yohinori,43 +ymir fritz,43 +yin yang earrings,43 +yigra don,43 +yellow tunic,43 +yazawa mana,43 +yatsushima tetsuya,43 +yano (404878),43 +yancha gal no anjou-san,43 +yamada ichirou (hypnosis mic),43 +yagitori,43 +yagiryu,43 +yabacha,43 +ya yan,43 +y xun,43 +xoaiu,43 +xinuo223,43 +wuzhiang liufu,43 +wringing hair,43 +woogi,43 +wiola magica,43 +wing earrings,43 +wiglett,43 +wet kimono,43 +werbellia,43 +wendy (honkai impact),43 +welkin gunther,43 +wekapipo,43 +webang111,43 +weather report (stand),43 +wayforward,43 +watercolor background,43 +water spring,43 +watchi,43 +wasabi sushi,43 +wankosoba (wanwan soba),43 +wangqi,43 +wandu muk,43 +wakkuchin20,43 +wagnaria uniform,43 +wa ga ne,43 +vitamin quest,43 +viria13,43 +vika (fire emblem),43 +victor (tales),43 +vice granscenic,43 +vibrator over clothes,43 +vf-31j,43 +vex shadow (league of legends),43 +vestaria saga,43 +venusflower,43 +venus (skullgirls),43 +vanquice,43 +uso da,43 +urataros,43 +urashima haruka,43 +unown v,43 +unown j,43 +unohana tsukasa,43 +undressable,43 +ump45 (diamond flower) (girls' frontline),43 +umezu kazuo (style),43 +ultraman suit,43 +ui (majiwi),43 +ufkqz,43 +type a kou-hyouteki,43 +twitter banner,43 +turtleneck bodysuit,43 +tsunami samemaru,43 +tsumugu otome to taishou no tsuki,43 +tsukugu,43 +tsukikusa megumi,43 +tracey sketchit,43 +totomiya,43 +tosaka teru,43 +torizousui,43 +torino kawazu,43 +torinannkotsukushi,43 +top gear,43 +too many eyes,43 +too many belts,43 +tongari boushi no atelier,43 +tomo wakui,43 +tomatology3,43 +tokugawa soyo,43 +tokorinowa,43 +togashi kuzuha,43 +toddifons (arknights),43 +tione hyryute,43 +tio (005),43 +tihoro1609,43 +thunder kitty wattson,43 +three-wheeler,43 +thorsten erdt,43 +thors military academy uniform,43 +thomas (aoakumasan),43 +the lego movie,43 +the kite,43 +the emperor (tarot),43 +the baron,43 +teru-chan (aoi tori),43 +teppen,43 +tenkomori (bug kura),43 +teke (exploration),43 +teke,43 +team plasma uniform,43 +tea ceremony,43 +tayuura (kuwo),43 +taut bikini,43 +tare-katsu,43 +tarai (silica5),43 +tape dispenser,43 +tanshio,43 +tanokura mon,43 +tanaka tom,43 +tanaka saeko (haikyuu!!),43 +tamo (tamokuteki kuukan),43 +takimi haru,43 +takeout container,43 +take (take143000),43 +takawashi nagisa,43 +takatsuki kasumi,43 +takapin,43 +takano jiyuu,43 +takahashi keitarou,43 +takahashi (te6-6ba),43 +takada yuuzou,43 +tachibana amane (amane01a),43 +tac-50 (girls' frontline),43 +tabiutaonpu,43 +t (toddy t),43 +t'challa,43 +sytokun,43 +syntier13,43 +sww13,43 +sweet madame (genshin impact),43 +suzune nia,43 +suzumori kuroku,43 +suzumiya haruhi no tomadoi,43 +suzuki mei,43 +suzui shiho,43 +suzette (sa9no),43 +survival friends,43 +sunset (porforever),43 +summoner (fft),43 +suitenan,43 +suika soda,43 +suika aji,43 +suiguutou (juuni kokuki),43 +sugar sugar rune,43 +suenaga (progressive),43 +su-47 berkut,43 +stuffed otter,43 +striped umbrella,43 +striped leggings,43 +striped capelet,43 +strange klug,43 +stormcallart,43 +star destroyer,43 +star breaker,43 +stanchion,43 +spore (ragnarok online),43 +spoon bending,43 +spinel (9057),43 +spiked,43 +spica (vocaloid),43 +spectrier,43 +spade tattoo,43 +souto (0401),43 +sotomichi,43 +sos-dan logo,43 +soror,43 +sorceress (dragon's crown) (cosplay),43 +sora no manimani,43 +solwyvern,43 +sockjob,43 +social kasu (mob oji katu),43 +soc nau,43 +snap my choker (phrase),43 +smooth criminal,43 +sly930105,43 +slush (norasuko),43 +sleepwalking,43 +slapping with penis,43 +skullcap,43 +sizque,43 +sivatherium (kemono friends),43 +sinape,43 +sin faye,43 +sin (hankotsu bunny),43 +simone aragon,43 +sima yi,43 +silver choker,43 +silver (metal),43 +silenxe,43 +side-tie legwear,43 +shuuyu,43 +shutumon,43 +shousetsu,43 +shotan,43 +shoggoth (monster girl encyclopedia),43 +shoe box,43 +shizuhime,43 +shishiou gai,43 +shirotae moyashi,43 +shiro (bombergirl),43 +shiro-hane,43 +shirley warwick,43 +shirato jin,43 +shirakawa mayumi,43 +shirai momota,43 +shirahane suou,43 +shionji ax,43 +shinzousan,43 +shinonome neko-tarou,43 +shinkai makoto,43 +shimono (utapre),43 +shimantogawa,43 +shillo,43 +shijou sadafumi,43 +shijima (agkm),43 +shigure kasumi,43 +shida yuudai,43 +shiawase okiba,43 +sheth (gentsuki),43 +sherlotta,43 +sheriff,43 +shenhai (2556146833),43 +shen woo,43 +she-ra,43 +sharasohju,43 +shampoo challenge,43 +shallistera (atelier),43 +shadow rise,43 +sg (under siiiiii),43 +sex underworld e youkoso!,43 +setsuna trip (vocaloid),43 +seth (closers),43 +sesshouin kiara (beast iii/r),43 +senkane,43 +senchimee,43 +sena mikoto,43 +sena (mineruba),43 +seiya hoshiko,43 +seishun fragile,43 +see-through pants,43 +see-through mask,43 +sebychu,43 +seallllion,43 +scottish fold,43 +scathach (formal dress) (fate),43 +sayama norika,43 +satsuki harunobu,43 +satsuki (notsachiko),43 +satchii,43 +sasihmi,43 +sasaoka tatsu,43 +sasaki masakatsu,43 +sarnath,43 +sanjouno haruhime,43 +sanctuary-of-apricot,43 +sakusan yousoeki,43 +sakurano asahi,43 +sakippo (sakippo0),43 +sakikagami,43 +sakayaya,43 +sakaokasan,43 +saitama seibu lions,43 +sagano aoi,43 +sadahara inako,43 +sachishiro pengin,43 +saber kitty (disgaea),43 +saamon (salmonkomaku),43 +ryuujin naga,43 +ryuu (monster girl encyclopedia),43 +ryu-tan,43 +ryo chimo,43 +ryakusun,43 +rururara,43 +rune (ru-nn),43 +rugby,43 +rosemon,43 +rokusaki coney,43 +rokurokubi,43 +rokudou itsuki,43 +robot (pixiv 42325944),43 +robo-ky,43 +robe slip,43 +ro-ga (kurumaya),43 +riventla nuck,43 +rinoko,43 +rindou aya (meshimase idol),43 +riku (kemurikusa),43 +rikaritta aries,43 +rika eastre,43 +rifufu,43 +rico (jackdoa),43 +ric (fwpbox),43 +rey za burrel,43 +revy (black lagoon) (cosplay),43 +retsuko,43 +repunit,43 +reiroukan misaya,43 +rei (breath of fire),43 +rei-kun,43 +re:ia,43 +rca,43 +rayleigh scale,43 +raven (fire emblem),43 +ranchuu (akamusume),43 +raku rakugaki,43 +rakan (league of legends),43 +rairyuu,43 +qualia qu,43 +qp shooting,43 +puyue,43 +purple trim,43 +puropera (puropera),43 +purism egoist,43 +purino party,43 +punc p,43 +public bondage,43 +print hair,43 +prinny (series),43 +preschooler (pokemon),43 +pray (furei),43 +pote (aonibi kairou),43 +porusasu,43 +porun,43 +popgun (22882502),43 +ponchi,43 +pon takahanada,43 +polaris (shinrabanshou),43 +poe (528yuzunon),43 +pochimaru (vtuber),43 +plushcharm,43 +pixiv bottlecap,43 +pisu,43 +phara,43 +perseus (fate),43 +persephone (p&d),43 +perry,43 +pearlscale0818,43 +patatata,43 +pastel (twinbee),43 +passimian,43 +panda copt,43 +palcomix,43 +pakunoda,43 +paint on fingers,43 +p90 (scarlet turbo) (girls' frontline),43 +overhaul (boku no hero academia),43 +ougon musou kyoku,43 +otoufu (gotouhu),43 +otonashi kotori (cosplay),43 +oskar von reuenthal,43 +orange moon,43 +orange-shaped earrings,43 +open can,43 +oone0206,43 +ooba eimi,43 +one-armed hug,43 +onda aka,43 +olivert reise arnor,43 +older twin sister (muninshiki),43 +ohagi (hurimaro metayaki),43 +oh? you're approaching me? (meme),43 +office lady taiwan,43 +ochikata kage,43 +obi spin,43 +novus rue,43 +nosuri,43 +nora-toro,43 +noele (toosaka asagi),43 +noberuge,43 +no tattoo,43 +no more eiga dorobou,43 +no hairclip,43 +no freckles,43 +nn (eogks),43 +niyasuke (yama),43 +nixtutyannh,43 +nitogebeto,43 +nitefise,43 +nissan gt-r,43 +nissan fairlady z,43 +nishita,43 +nishimura (prism engine),43 +nise pakuman-san,43 +niro (sikabanekurui),43 +nirai kanai,43 +niniidawns,43 +nina klein,43 +niko (oneshot),43 +nijou touka,43 +nig 18,43 +nifl (fire emblem),43 +nicky blake,43 +netojuu no susume,43 +nessa (pokemon) (cosplay),43 +nero claudius (bath robe) (fate),43 +nendoroya,43 +nell (pangya),43 +nekomata (disgaea),43 +nekohige,43 +nein (album),43 +neighbor quartz,43 +neck piercing,43 +naver username,43 +nautilus (league of legends),43 +nattapuumuu,43 +natsu (hottopeppa3390),43 +naoya (devil survivor),43 +nanjou akimasa,43 +nanina (nijnan),43 +nanatsu no umi,43 +nanasuke,43 +nanase rumi,43 +nanase nanami,43 +nanamiya natsumi,43 +nanai,43 +nanagane educational institution,43 +namuru (kurinton),43 +nakagawa kanon (pixiv32798535),43 +naguramu,43 +nagisa nagi,43 +nagisa (kantoku),43 +nagisa (imizogami),43 +nagatsuki take,43 +mylovelydevil,43 +mutsutsu,43 +mutsuki albino,43 +musteflott419,43 +musso (gyee),43 +musashimaru,43 +murata mine,43 +murata himeko (scarlet fusion),43 +murakumo1987,43 +muraichi,43 +muck (artist),43 +mrs.pumpkin no kokkei na yume (vocaloid),43 +mrr 05,43 +mr. x (resident evil),43 +mr. j.w,43 +mouth beam,43 +mounting,43 +mountain hare (kemono friends),43 +morty smith,43 +monousa,43 +monoheiya,43 +monmo mu,43 +moni monico,43 +momo no sei (onmyoji),43 +momo-deary,43 +momikocu,43 +mokkosu.,43 +mogijabgo,43 +mogeko (mogeko castle),43 +mochen,43 +mocacoco339,43 +mo qingxian,43 +mizushiro takuya,43 +mizuki (quilt),43 +mizukanayuki,43 +mizore akihiro,43 +miyasemao,43 +mitsuha (bless blessing),43 +mitosa,43 +miton (ton321),43 +misaki (kyal 001),43 +misago (525),43 +miri1120,43 +mio (fastest lap),43 +mini (pixiv6327751),43 +minamo25,43 +minami toshimi,43 +minakami yuki,43 +mimi pearlbaton,43 +mikannsisyou,43 +mikami (vitamin quest),43 +mika-shi,43 +mii (makosuke),43 +miffy (character),43 +mif,43 +miette (pokemon),43 +midarin,43 +mid-boss (disgaea),43 +micosiva,43 +michi,43 +meteos,43 +metaring,43 +mendou shuutarou,43 +mejiro palmer (devil in the moonlight) (umamusume),43 +mejiro mcqueen (racehorse),43 +medusa (kid icarus),43 +mattie,43 +matori yoshika (character),43 +matano seiko,43 +masshu (shu 123426),43 +mary (soul hackers),43 +maru (pixiv51714255),43 +mario golf,43 +marielle (log horizon),43 +mao (code geass),43 +malshi edroad,43 +makoji (yomogi),43 +makimaki makky7,43 +makai tenshi djibril 4,43 +major (hellsing),43 +maineko ruru,43 +maijima karen,43 +maha (gentsuki),43 +magical mirai miku (2015),43 +maeda mic,43 +machi wt,43 +m1014 (girls' frontline),43 +lwj,43 +luzzi (milllim),43 +lute (apocalypselibrary),43 +luncheon meat umai,43 +luna aegis (closers),43 +lost one no goukoku (vocaloid),43 +living (pixiv5031111),43 +live union,43 +littorio (calabria aurea) (azur lane),43 +link163353,43 +line-san,43 +lily (ender lilies),43 +lillia greyrat,43 +lillia (league of legends),43 +libeuo (liveolivel),43 +li sushang (jade knight),43 +leonardo da vinci (azur lane),43 +legendarysoulii,43 +legend (tiger & bunny),43 +lefty10,43 +lee (punishing: gray raven),43 +le terrible (azur lane),43 +lb (muraihou),43 +layered panties,43 +larum (fire emblem),43 +lars alexandersson,43 +larry foulke,43 +langley (kancolle),43 +langley (azur lane),43 +lancefate,43 +lace-trimmed swimsuit,43 +l.tea,43 +kyokugen chikan tokuiten 2,43 +kyapu-10,43 +kwrrrrrr,43 +kuzu suzumi,43 +kusuriya no hitorigoto,43 +kuruuya,43 +kurumitsu,43 +kuroshin,43 +kurosaki sayoko,43 +kuroneko pantsu,43 +kurogiri,43 +kuro kinkan,43 +kuro (grf),43 +kuro4221,43 +kuri (shibimame),43 +kunon,43 +kumauwo,43 +kuma (happylocation),43 +kuma-hina (31 violence),43 +kujou rin,43 +kuhnowushi,43 +kuga utao,43 +kudou akira,43 +kudoi,43 +kudakeru,43 +kuangtai (amami ryoko),43 +ku roiko,43 +ktrtokyo,43 +kouryuu densetsu villgust,43 +kouhai-chan (fukutchi),43 +koubakotone,43 +kotobuki reiji,43 +korogoro (mago0057),43 +konohana enishi,43 +komore,43 +komiya nonoka,43 +komaichi,43 +kojijima,43 +kohaku (rune factory),43 +kmtk,43 +kko (um7mr),43 +kiyohisa,43 +kite flying,43 +kita senju,43 +kirishina (raindrop-050928),43 +kireina (osiimi),43 +kirby: right back at ya,43 +kinohara kossuta,43 +kino hitoshi,43 +king gainer over!,43 +kimi no tonari de koishiteru!,43 +kikunojo (one piece),43 +kibitarou,43 +kent mame,43 +keiki (juuni kokuki),43 +keepvalley,43 +kazuma (theworld000021),43 +kazuki mai,43 +kawai maria,43 +kate (sketchbook full colors),43 +katase yuu,43 +kasaki sakura,43 +karyuu koujo,43 +karen (karenendo),43 +kapebeansies,43 +kanki (kibunhasaikou),43 +kangmoro,43 +kanata (sentiment),43 +kana yukino,43 +kan0nakan0,43 +kamishiro rio,43 +kamikita futago,43 +kalim al-asim,43 +kalawarner,43 +kakueki-teisha,43 +kakari,43 +kain fuery,43 +kagami yuu,43 +kafei,43 +k2shh,43 +k.ei,43 +justminor,43 +julius yu,43 +julius (fire emblem),43 +johnnyyyyy,43 +jirou (ramen),43 +jinlin,43 +jigen daisuke (cosplay),43 +jida,43 +jewelpet magical change,43 +jenny dolittle,43 +jennifer (shepherd0821),43 +japan railways,43 +jaoooo,43 +james potter,43 +jakomurashi,43 +jagged sword,43 +jaeyun,43 +jack krauser,43 +j2l,43 +j.xh,43 +j-chad,43 +iya na kao sare nagara opantsu misete moraitai yo wa pantsu ga mitai zo,43 +iya maid,43 +itou satoshi,43 +itou junji,43 +ito (silk9f),43 +itirirenn,43 +ite fuji,43 +itame moyashi,43 +isshin (kaxz),43 +iskaydi,43 +iskandar (fate) (cosplay),43 +ishita (sunagimomo),43 +ishino satoshi,43 +iroha (nullpo),43 +insulting viewer,43 +inferno (nanbu14),43 +indomitable marie,43 +inase,43 +inanome me,43 +inaho178,43 +imminent fight,43 +imelda rivera,43 +ilyfon133,43 +ilohasvio,43 +ikki,43 +ikeno kaede,43 +ignis (last origin),43 +igarashi kei,43 +idol revolution,43 +idaku,43 +ichinomiya kou,43 +iceblue,43 +ice cream crepe,43 +i am l,43 +i (deichi),43 +hyper police,43 +hyper highspeed genius,43 +hutago,43 +husun wei,43 +husband and wives,43 +hu tao (lawson) (genshin impact),43 +hresvelgr,43 +hozuki ferrari,43 +houseki hime,43 +houkiri nemu,43 +hotaru (ss801),43 +hoshika ranoe,43 +horseshoe print,43 +hop step jumpers,43 +hoozuki suigetsu,43 +holographic keyboard,43 +hollow knight (character),43 +hole in face,43 +holding cigarette pack,43 +holding charm,43 +hokkaido,43 +hoenn (jgm1102),43 +hiyashi mikan,43 +hiyari (hiyarilol),43 +hirotaka (hrtk990203),43 +hirose (10011),43 +hirano kouta (hsotd),43 +hippie,43 +hinamizawa hinami,43 +hinako (anzu15),43 +himiko (persona 4),43 +hikari no,43 +hikari50503,43 +hikari-chan (kanabun),43 +hiiragi ryou,43 +hiiragi matsuri,43 +higashiyama (higashiyama honpo l.t.d),43 +hiei-chan (azur lane),43 +hexagon print,43 +hetare (hetare013),43 +heris ardebit,43 +heisei yutorin,43 +hei yksk,43 +hecarim,43 +hebina masayoshi,43 +hazuki ryou,43 +hay bale,43 +hato (kosobin),43 +hat tassel,43 +hashimoto kokai,43 +hasekura noizu,43 +haru hina,43 +harlow garden,43 +hariko,43 +hanta96,43 +hansel and gretel,43 +hand on another's ear,43 +hanawa kaoru,43 +hanakuma chifuyu,43 +hanai haruki,43 +hamaya shin'ichi,43 +hak (akatsuki no yona),43 +hagiwara kazushi,43 +hacksaw,43 +hachimaru (ediciusa),43 +h (158cm),43 +h-a-j-i-m-a-r-i-u-t-a-!!,43 +gusty10rk,43 +gundam astray red frame,43 +gunbam,43 +gumoyu,43 +guider to the eternal edge,43 +gu-rahamu omega x,43 +group battle,43 +groin attack,43 +grisaia phantom trigger,43 +green (konkichi),43 +graph paper,43 +googerm,43 +gokugetsu momo,43 +goetia (fate),43 +gochiwa,43 +gloria (devil may cry),43 +gladiia (return) (arknights),43 +giulio cesare (azur lane),43 +giant leaf,43 +ghost driver,43 +gennai ao,43 +gemba (dlfms75),43 +gel (chiericyan),43 +garden eel,43 +gandaresu baran,43 +fuzuki hajime,43 +fuurin restia,43 +fuu torutanme,43 +fuu-chan (fujiwara gacho),43 +futayamam2,43 +futaki shiki,43 +fushigi mahou fun fun pharmacy,43 +fur-trimmed waist cape,43 +fukutaichou badge,43 +fukuoka softbank hawks,43 +fukui sora,43 +fujiwara hisashi,43 +fufu (fufuichi04),43 +from ground,43 +friedrich der grosse (dark raiments of gagaku) (azur lane),43 +fremea seivelun,43 +frederica irving,43 +fraxure,43 +four (drag-on dragoon),43 +fluorart,43 +fluff kevlar,43 +flower censor,43 +flare gun,43 +flapple,43 +fino bloodstone,43 +female admiral (kancolle) (cosplay),43 +fantasy earth,43 +falcom,43 +faith connors,43 +faerie (seiken densetsu 3),43 +eyewear on clothing,43 +ewokakuman,43 +eva 13,43 +esmyrelda maximus,43 +erk (fire emblem),43 +eris.y (7hai),43 +erich von rerugen,43 +eps3rd,43 +enter enter mission!,43 +enpou,43 +enkidu (sensha otoko) (fate),43 +enk 0822,43 +eniwa shii,43 +endo yohane,43 +end roll,43 +emoncake.,43 +ememtrp,43 +els,43 +elreyiens,43 +eiroyi,43 +educational,43 +edalyn clawthorne,43 +ebikawa kanetake,43 +eating contest,43 +durandal (kei),43 +dulse (pokemon),43 +drink me,43 +drawn horns,43 +dragon providence,43 +dousaki shin'ya,43 +dominia yizkor,43 +domestic violence,43 +dolphro-kun,43 +doinaka,43 +dog mask,43 +dog-san,43 +doctor magus 4,43 +disc (needless),43 +dif (difman),43 +dhyana mudra,43 +detached horns,43 +deneve,43 +decay,43 +deadpan,43 +dd (giogio99),43 +"dateless bar ""old adam""",43 +date wingfield reiko,43 +dante (devil may cry) (cosplay),43 +dai (uhyoko1102151),43 +d kake2,43 +d-zhai,43 +d'orsay heels,43 +culton,43 +cult of the lamb,43 +cu chulainn (prisma illya),43 +crying emoji,43 +crunchyroll,43 +crunchobar,43 +crueltear,43 +cropped head,43 +crimson kaiserin,43 +creamer packet,43 +crasher wake,43 +coupon,43 +coronavirus,43 +conope,43 +color issue,43 +cocontma,43 +coalossal,43 +climbing wall,43 +clamps,43 +cjrb1228,43 +chunlieater,43 +chu (huaha1320),43 +chouzetsu yarou,43 +chise (ichiri),43 +chipped sword,43 +chinoru,43 +chidori nekoro,43 +chibi on shoulder,43 +cheshire (ragnarok online),43 +cherry third,43 +cheety (show by rock!!),43 +charlotte linlin,43 +chaos space marine,43 +chan'nu,43 +chamomile,43 +chalk outline,43 +cendrillon (vocaloid),43 +cele (310v3),43 +cat shop,43 +carrier,43 +carole stanley,43 +card pendant,43 +candy corn,43 +cancer death mask,43 +c96 (girls' frontline),43 +bygddd5,43 +byako (srktn),43 +burnin (boku no hero academia),43 +burijittou,43 +bunbun (midukikome),43 +bullet girls,43 +bubble girl (boku no hero academia),43 +brynhildr (tome),43 +brown buruma,43 +broken bone,43 +bream-tan,43 +bracket,43 +bovyng,43 +bosutonii,43 +bonobono (character),43 +bondo (borndragon),43 +bolero (bo le ro66),43 +blue shell (mario),43 +blue (konkichi),43 +blouse removed,43 +blackma (pagus0012),43 +black nipples,43 +bingsardina,43 +biggs (ff7),43 +bible bullet,43 +bettykwong,43 +betanya,43 +benares (honkai impact),43 +beerko,43 +beehunter (arknights),43 +bb (fate/extra) (cosplay),43 +batman (cosplay),43 +basil (st 1),43 +banshouya ena,43 +banging,43 +bai qi (love and producer),43 +bad e-hentai id,43 +azumane asahi,43 +ayaya,43 +ayanami (warship girls r),43 +ayamatazu,43 +ay (1054105084),43 +autumn-north,43 +august1st,43 +atea,43 +astral buddy,43 +aston martin,43 +asougi rin,43 +ashita kura,43 +ashermes,43 +asahashi tsuyu,43 +asada okina,43 +artoria pendragon (swimsuit archer) (second ascension) (fate),43 +artificial academy 2,43 +arthur (fire emblem),43 +armpit onigiri,43 +armored bodysuit,43 +arisugawa bii,43 +arisato yui,43 +aqua wings,43 +apron basket,43 +apo (apos2721),43 +aoten (aoiroarekore),43 +aosiro-michi,43 +aomi maika,43 +anzu0130,43 +anubis (stand),43 +anshan (azur lane),43 +annihilate ray,43 +anlucea,43 +anette (pso2),43 +anbu,43 +anakin sky (utcc),43 +amiibo,43 +amicis (amisic),43 +amatsuka hikaru,43 +amano taiki,43 +amakawa hano,43 +ama asagi,43 +altorealize,43 +although she hurriedly put on clothes (meme),43 +alondite,43 +alfin,43 +alexis (zkstxxx),43 +akuto,43 +akuma nihmune,43 +akuma (ogino-m),43 +akqne,43 +akio (akio1124),43 +akidenmania,43 +akaya,43 +aizawa (teaminazuma),43 +aikura chihiro,43 +aikawa fuuri,43 +ai kotoba iii (vocaloid),43 +ai cao,43 +ah-1 cobra,43 +age fx,43 +agano (azur lane),43 +aethos,43 +aesice,43 +aegisfate,43 +advance wars: days of ruin,43 +adjusting earrings,43 +abyssal nimbus princess,43 +aboreen,43 +abc (type5 labyrith),43 +abarai575,43 +abab xiaxia,43 +9so (ponchon),43 +8c,43 +888,43 +8'108,43 +5pb (neptune series),43 +29 (artist),43 +1 (kawaseha),43 +14c,43 +zongren,42 +zoe (spacezin),42 +zi-dabu,42 +zeta plus,42 +zeorymer,42 +zb (xucz8527),42 +z18 (azur lane),42 +yuzumame,42 +yuzuko,42 +yuzukineko,42 +yuzu lemon,42 +yuuyan,42 +yuuutsu shan,42 +yuuppi,42 +yuukome (tekunon),42 +yuujoduelist,42 +yuugure (azur lane),42 +yuugumo (kancolle) (cosplay),42 +yuu-yuu,42 +yurul,42 +yuri (anachronic),42 +yuraiko,42 +yunsuku,42 +yuna (ff10) (cosplay),42 +yun-yang,42 +yumesaki kaede (game club project),42 +yumekawa ruru,42 +yuit (queen's blade),42 +yuineko,42 +yui (seiga),42 +yufeng kaete,42 +yue xiao e,42 +yuan long,42 +yu sa1126,42 +yu-gi-oh! (toei),42 +youngjijii,42 +youl,42 +youko (onmyoji),42 +yotogi (yotogi luminary),42 +yoshioka haru,42 +yoshimi,42 +yoshida kochou,42 +yorick (league of legends),42 +yori (a a yori),42 +yonasawa,42 +yomogi uehara,42 +yok01,42 +yk,42 +yggdrasil (sao),42 +yayaziiii,42 +yatsucchie,42 +yato (arknights),42 +yasuda genshou,42 +yasu suupatenin,42 +yasaka (astray l),42 +yaruse,42 +yang harim,42 +yanase aki,42 +yanagise,42 +yanagiba kiriko,42 +yamu (yamuyama web),42 +yamata no orochi (kemono friends),42 +yakurope-moko,42 +yaeba,42 +xiongbingbisata,42 +xing hai,42 +xiang wan wei wan,42 +xiamianliele,42 +xexu,42 +xbsx,42 +wszkii,42 +wolfina,42 +wojtek (ido),42 +winged animal,42 +william tell (fate),42 +willfin,42 +wild arms: million memories,42 +white leg warmers,42 +whirlwind,42 +welch vineyard,42 +wazd0183,42 +watashishi,42 +watabe keisuke,42 +washboard,42 +wang guo nian,42 +wakka,42 +vt,42 +vk-47 flatline,42 +vision (marvel),42 +viridian-c,42 +vengeful hannya (onmyoji),42 +varyu,42 +v-22 osprey,42 +uzumaki naruto (cosplay),42 +utawarerumono: futari no hakuoro,42 +us2s,42 +unwrapping,42 +umio (neptune series),42 +umikaze (azur lane),42 +umajiri gyuunyuu,42 +ultraman trigger (series),42 +uh-1 iroquois,42 +ueda toraji,42 +ue (xjhu3558),42 +uduki (nissi),42 +udonko072,42 +uchuu kaizoku sara,42 +uchi no musume ni te wo dasuna!,42 +tyobimiru,42 +twelve,42 +tusk act4,42 +tsuzurao,42 +tsurikichi obasan,42 +tsuno (nicoseiga11206720),42 +tsukunendo,42 +tsuki jin,42 +tsubuki (ron-bb69),42 +tre kronor,42 +tower of dragon,42 +touzoku arthur,42 +touyu (yuruyuruto),42 +touko (kira2yue),42 +touhou musou kakyou,42 +tougetsu matsuri,42 +tottema,42 +toriumi harumi,42 +torashiro eiji,42 +tooka,42 +tonzura,42 +tonton (mathcaca24),42 +tonbury,42 +tomoe (itigo),42 +tokiki (tomok1),42 +tokihanatareshi,42 +tokage (kaamin),42 +toi1et paper,42 +tina topia,42 +tiger mask (series),42 +tifg39,42 +thriller,42 +thore (nathalukpol),42 +the lovers (tarot),42 +the king of red lions,42 +the king of fighters 2003,42 +tharkis,42 +tetsukan,42 +teruteru12,42 +tenryou sena,42 +tenamaru,42 +ten kurairakon teikoku-gun,42 +tekkai blade,42 +teikoku,42 +te to te try on,42 +tasuku,42 +tara (szzj7733),42 +tanyuu karibusa,42 +tanuma miyuki,42 +tanpopo hayabusa-maru,42 +tankcay,42 +tamagoboro,42 +tamafurin,42 +taku (user nxgk7748),42 +takeuchi kou,42 +takanashi tsumugi,42 +takamizawa natsuki,42 +takahashi meishin,42 +taka-chan,42 +tadashi hamada,42 +tadano souko,42 +tadano shiroko,42 +tadano myoushi,42 +sylvie (isekai maou),42 +sylvia (konosuba),42 +sweet flower,42 +swan lake,42 +suzuna isurugi,42 +suzumusi114,42 +suzuhara izumiko,42 +suzu (susan slr97),42 +sutei (xfzdarkt),42 +sutegoro shiina,42 +surgeonfish,42 +super sailor chibi moon (stars),42 +sumeragi sunao,42 +sumeragi (black rose),42 +sue (fire emblem),42 +stuffed crocodile,42 +striped sash,42 +straight razor,42 +ssong2,42 +squirrel boy,42 +spp-1 (girls' frontline),42 +spas-12 (goblin huntress) (girls' frontline),42 +soul edge (weapon),42 +soukoku no arterial,42 +sore (whirlwind),42 +soranona (soranosuke),42 +solitude rain (love live!),42 +sode no shirayuki,42 +social network,42 +snow on body,42 +smolder (kanel),42 +smol gura,42 +skym (kumei),42 +sky diver xipuria,42 +sks,42 +skorpion (crimson starlet) (girls' frontline),42 +siw0n,42 +sitar,42 +sister nana,42 +sirene (last origin),42 +siren (mythology),42 +single slipper,42 +single hair tube,42 +silversirius,42 +silverjow,42 +sigure-zzzz,42 +shuten douji (lostroom outfit) (fate),42 +shunga (shun608),42 +shumichi,42 +shumiao,42 +shounen democratica,42 +shoujo material,42 +short-haired girl (osomatsu-san),42 +shishio (artist),42 +shiratori ryuushi,42 +shirasaki tsugumi,42 +shiramori sawa,42 +shirakawa (whitemist),42 +shirakami fubuki (cosplay),42 +shiraishi asuka,42 +shiori (jonsun),42 +shion (kof),42 +shinsei (easycross1226),42 +shinonome (cookie),42 +shindoine,42 +shindoi akio,42 +shin mazinger shougeki! z-hen,42 +shimamura joe,42 +shikishima fugen,42 +shijohane,42 +shiina shian,42 +shigure (shigure 43),42 +shien (tatunokoshien00),42 +shiabisu,42 +shell to ear,42 +shadowgale,42 +shadow kirby,42 +shaddoll fusion,42 +sezamyan,42 +sensaki chihiro,42 +seneo,42 +semidou jun,42 +selenoah,42 +seki oriko,42 +sei dorei gakuen,42 +scarlett (artist),42 +satomi (745684552),42 +sasorina,42 +sasano shiki,42 +sarekoube,42 +sarayashiki junior high school uniform,42 +sanmi tenten,42 +sanada-x,42 +sammy (bestsammy),42 +sama (sama24654684),42 +sakurakouji tsukuyomi,42 +sakurai masahiro,42 +sakura rin,42 +saitaniya ryouichi,42 +saint-germain (symphogear),42 +sai koro,42 +sabou san-shitsu kokoro,42 +sabertooth cat,42 +s (tenshi no kiss),42 +ryanreos,42 +runaway girl (jojo),42 +runawate56,42 +run the 9tails,42 +rum raisin,42 +ruby (ff9),42 +rougetsu,42 +roteri (roteri 69),42 +rorona s.,42 +roe (d-c -b),42 +rodimus,42 +robin (unlimited world),42 +road bicycle,42 +ritalem,42 +riou (pooh920),42 +rinko riban,42 +rikei-chan (tawawa),42 +rib (rib delesetong),42 +retrospective 53 minutes,42 +reno (summer spin-off) (azur lane),42 +renge (ngetyan),42 +renault,42 +remington model 700,42 +regidrago,42 +red-eared slider (kemono friends),42 +reccu,42 +reah (ys),42 +ratana satis,42 +rasputin (fate),42 +ranemu,42 +rakusai (saisai garou),42 +rafters,42 +quest receptionist (monster hunter 3 ultimate),42 +qing yunyi,42 +qin liangyu (chainsaw of the dead) (fate),42 +qawsedrftgyhujikolp,42 +pushing cart,42 +purplevortex,42 +pua,42 +psuede,42 +professor ozpin,42 +prinny ~ore ga shujinkou de iinsuka?~,42 +princess leona,42 +precure netorare (meme),42 +porthole,42 +porno dianno,42 +poppy bros jr,42 +popori,42 +pomf,42 +police motorcycle,42 +pokemon tower ghost,42 +pokemon ranger 2,42 +poechan chan,42 +piyo,42 +piruluk,42 +pirorun,42 +pink delmo,42 +piano wire,42 +persia (mahou no yousei persia),42 +peperoncirno,42 +penis wrap,42 +peakjump,42 +paulo barrios,42 +parappa the rapper,42 +papepox2,42 +panties around toe,42 +panda (heart sink),42 +paint gun,42 +pacifica casull,42 +ozyako,42 +oywj,42 +oxenia,42 +owasaki,42 +oversized insect,42 +outfit connection,42 +oukafafafa,42 +otome domain,42 +otochichi,42 +osananajimi wa daitouryou,42 +oro ponzu,42 +oriotorai makiko,42 +orichalcum reycal,42 +orange mikan,42 +oqwda,42 +oppai hoodie,42 +opening can,42 +onmyoji: the card game,42 +onaji (sokudo seigen),42 +olivia (yh),42 +ohiensis,42 +oekakiboya,42 +nyx (hades),42 +nyamsas,42 +numyumy,42 +numenume (powa-ogutyogutyo),42 +numarinko,42 +nukekip,42 +np (slipbounds),42 +notte (dragalia lost),42 +norisukep,42 +norigure18,42 +noptidha lukchup,42 +nonaka yuu,42 +nomo (16 16),42 +nolmo,42 +nolma7,42 +nojima chika,42 +nipye,42 +ninozen,42 +nine (dark),42 +nimbus (world flipper),42 +niconico id,42 +ni (2shi),42 +next white,42 +netorase,42 +netflix,42 +nero claudius (idol emperor) (fate),42 +neris (shining hearts),42 +nerdyart1,42 +neptune (neptune series) (cosplay),42 +nekotawawa,42 +nekomiya noru (yuduki710),42 +nekojirou,42 +nekoda kuro,42 +neko cyber (module),42 +necolab,42 +natsumi kei,42 +natsujiru,42 +nathan drake,42 +naoya (naoya ee),42 +naohiro,42 +nanozenzen,42 +nanase sena,42 +nanamiso,42 +nanami (virtuareal),42 +nana to kaoru,42 +nakoya (nane cat),42 +nakashima (middle earth),42 +nail file,42 +nahonanaho,42 +nagato (battleship),42 +nagamori mizuka,42 +nadayui,42 +nabe (crow's head),42 +n1k,42 +myriam (saga),42 +myholo tv,42 +murata himeko (arctic kriegsmesser),42 +muranushi sayuri,42 +muon,42 +mulberry (plant crude drug) (arknights),42 +mugupo,42 +mugimaru,42 +mr tangsuyuk,42 +mozu suka,42 +mozu (fire emblem),42 +mou (mooooow),42 +motsutoko,42 +moto (otemoto02),42 +mosin-nagant (moonlit ocean) (girls' frontline),42 +moryu,42 +morse code,42 +morisoba (silent hill),42 +moriguchi yuu,42 +mononoke (empty),42 +mononobe no futo (cosplay),42 +monolith (object),42 +momochi chia,42 +mogi yuusuke,42 +mochimochimochi,42 +mochiko (x-game),42 +mobile armor,42 +mob ojisan,42 +mn (zig r14),42 +mm30212,42 +mizuiro 32,42 +miyasato haruka,42 +miyaoi,42 +miyako (mongcocoa),42 +miyabi (miyabi r18),42 +miura takehiro,42 +mitsusaka mitsumi,42 +mitsurou,42 +mitsuki yuu,42 +mitsuki ponzu,42 +mitsubishi lancer evolution,42 +mitsu (nonoko05),42 +misogi (misogi1341),42 +miri (miri 1m),42 +miranjo,42 +mirai no bokura wa shitteru yo,42 +minami shin (zenshuu bougyo),42 +mina (shingeki no bahamut),42 +mimosa (flower),42 +milkychu,42 +milk mustache,42 +miles (fma),42 +mikuzukin (module),42 +mikimo nezumi,42 +midori (kemurikusa),42 +micro uzi,42 +michiyon,42 +mia alice,42 +mia (39565899),42 +mhong,42 +merric (fire emblem),42 +men's young,42 +melompan,42 +melissa mao,42 +meke (77842928),42 +mek,42 +medori,42 +matsuura ayane,42 +matokichi,42 +mashima taichi,42 +masa (masa-koba),42 +mark (heaven),42 +maria (ogino atsuki),42 +mamiya sakura,42 +mami akira,42 +mamedenchi,42 +mailbag,42 +maidensnow no youkai dai-makyou,42 +mahou no yousei persia,42 +maha5,42 +magnolia arch,42 +maggie mui,42 +magatan,42 +mafu,42 +madara (natsume yuujinchou),42 +maboroshi juuhime,42 +m1887 (girls' frontline),42 +m-a-v-e-r-i-c-k,42 +lzd,42 +lyoung0j,42 +luong,42 +luelue zi,42 +love love princess,42 +lorian (elder prince),42 +lord of walkure,42 +loli fox girl (mdf an),42 +little prinz eugen (azur lane),42 +lis zhu long,42 +lindoh flores,42 +lim (ramu),42 +lili levinas,42 +lilele (granblue fantasy),42 +light elementalist lux,42 +leotard removed,42 +leonis g,42 +leona garstein,42 +lem,42 +leehwa,42 +leech queen (matilda fiship),42 +lee sun young,42 +lee (gyee),42 +lechonk,42 +le chevalier d'eon,42 +ldd.ek,42 +lawnielle,42 +laser pointer,42 +large mouse (monster girl encyclopedia),42 +laozhanshi,42 +lanyingchengzhi,42 +laina (show by rock!!),42 +lactmangan,42 +lace-trimmed babydoll,42 +labyrista,42 +kz-kura,42 +kyuubi (kemono friends),42 +kys (k-k2),42 +kyoma (yellowxcake),42 +kyanduru,42 +kushabiria,42 +kuronyanko,42 +kuronekozero,42 +kuromitsu maria,42 +kuro (kurojill),42 +kureson (hayama baa),42 +kuon bb,42 +kunimitsu (tekken),42 +kumuiutabito,42 +kumagapaniti,42 +kukicha,42 +krampus (grizz),42 +koutetsu (fe steel stone),42 +kousaka yuuma,42 +kouhai-chan (mignon),42 +kotsu masumi,42 +kototora,42 +kotomozou,42 +kotobuki shiro,42 +kotobuki minako,42 +kord (girls' frontline),42 +kono sanorou,42 +kona (mmmkona),42 +komecchi,42 +koma (neko musume michikusa nikki),42 +kolmio,42 +kokoro na,42 +kokonoi hajime,42 +koi wa ameagari no you ni,42 +koharu (morikura en),42 +kiyotaki keika,42 +kitara koichi,42 +kitana,42 +kitagawa marin (cosplay),42 +kiss kiss drain,42 +kisaragi (criminal girls),42 +kirov (azur lane),42 +kirishima akari,42 +kirinkirin,42 +kirara yakubou,42 +kirakira patisserie uniform,42 +kinokorec,42 +kinoeneko,42 +kimikimi,42 +kikuma kaya,42 +kihara amata,42 +kibashiba,42 +keyaki (mora ll),42 +keta (psychetangle),42 +kelvena (xenogears),42 +kefir,42 +kazushiki midori,42 +kazesayuru,42 +kazemachi haruka,42 +kazami miki,42 +kaz,42 +kawai sasami,42 +katou taira,42 +katou ryouichi,42 +katakura supipi,42 +katakoriku,42 +kasukabe tsumugi,42 +kasugano sakura (cosplay),42 +karu (ricardo 2628),42 +karohroka,42 +karin (rei862),42 +kare,42 +kardie,42 +karan koron,42 +kaori-san (angelo),42 +kanami (bishop),42 +kanade suzu,42 +kamiya agari,42 +kamishiro rita,42 +kamisada himari,42 +kaminari qpi,42 +kaminari,42 +kamille areopagita,42 +kaiyi,42 +kaisou (0731waka),42 +kaima,42 +kagimura hazuki,42 +kaga (battleship) (kancolle),42 +kadaj,42 +kachou fuugetsu (onmyoji),42 +kabu usagi,42 +jyunhh,42 +jyun,42 +jyami,42 +juudai,42 +june (ne z asa),42 +jsih,42 +joshua (fire emblem),42 +jomy marquis shin,42 +john mactavish,42 +jocheong,42 +jisoo kim,42 +jerun,42 +jeong surim,42 +jeanne d'arc (drifters),42 +japa,42 +jankovsky,42 +jack-in-the-box,42 +j.2,42 +itou youko,42 +ito (itokayu),42 +irina jelavic,42 +ipheion (flower knight girl),42 +insarability,42 +ing0123,42 +inflatable chair,42 +indigo (tylwing),42 +inari (sennen sensou aigis),42 +inahime (sengoku musou),42 +imomochi,42 +illyasviel von einzbern (swimsuit archer) (third ascension),42 +ikeda usao,42 +ike (altitude attitude),42 +ikameshi (nega96396),42 +iihoneikotu,42 +idol janshi suchie-pai,42 +idnar,42 +ideologue!,42 +idenshi hina,42 +ichimatsu (anaumemondai),42 +ichi (pixiv6373491),42 +i g1ax,42 +hyuuga makoto,42 +hys122211,42 +hymmnos,42 +hyakutarou (momotar0 4),42 +hyakunichigo ni shinu wani,42 +humpty dumpty,42 +huey laforet,42 +huan yu,42 +hozu (hozumi),42 +howa type 89 (girls' frontline),42 +hoshino (nia hoshino),42 +hoshina suzu,42 +hoshimaru daichi,42 +hood pull,42 +hontani toshiaki,42 +honeyberry (arknights),42 +honda hanako,42 +holding scale,42 +holding paper airplane,42 +holding bandages,42 +hokkaido (artist),42 +hoji (hooooooooji1029),42 +hkn (ringya),42 +hiwatari makoto,42 +hitachiin kaoru,42 +hisuian lilligant,42 +hism 25 (tsumari),42 +hisami nanami,42 +hisame (shinrabanshou),42 +hirondo,42 +hiroki bzdsk,42 +hira taira,42 +hinoki bayashi,42 +hinoa (hinoa1113),42 +himokawa udon,42 +himeki chifuyu,42 +hiboshi daizu,42 +hi no tori (kemono friends),42 +hessra,42 +heroherotom,42 +herja,42 +hebi (yurari),42 +heart ribbon,42 +headdesk,42 +hayasugi hayato,42 +hasegawamorito,42 +harusameriburo,42 +haruka gracia,42 +harugamitsu,42 +harry du bois,42 +harpyia (last origin),42 +hanging on arm,42 +haneiro,42 +hane riu,42 +hanasaki coa,42 +hanabi (yuruneko0624),42 +hamutz meseta,42 +hamstarhand,42 +hakurei reimu (fox),42 +hakata no shio,42 +hakase yurisuki,42 +hajime (gitoriokawaii),42 +hair between horns,42 +haibara ayako,42 +hagimorijia,42 +hachino mugi,42 +gypceros (armor),42 +gyorui (makjita),42 +guts man,42 +guraedo-chungchoon,42 +guraasan,42 +gunuaki,42 +gundori,42 +gundam epyon,42 +grimoire ~shiritsu grimoire mahou gakuen~,42 +grenadier,42 +green screen,42 +great mazinger (robot),42 +grandis granva,42 +gouken,42 +gouen no soleil,42 +gong cha,42 +golden lore,42 +glowing nails,42 +giygas,42 +ginmaru,42 +gillian chen,42 +giant otter (kemono friends),42 +ghost hands,42 +geshiko (girls und panzer),42 +george joestar,42 +georg prime,42 +ganyu (qilin) (genshin impact),42 +gamera,42 +gallia (saint seiya omega),42 +galarian darumaka,42 +galarian darmanitan,42 +gagamatsu,42 +gagaga,42 +gabal docker,42 +fuzuki yuu,42 +funuyu,42 +fukuyama naoto,42 +fukemachi,42 +fujita mariko,42 +fujinomiya neko,42 +fuji noyume,42 +fuji kakei,42 +fuchi,42 +frog boy,42 +free sex sign,42 +fortress 2 (sekaiju),42 +fortisselle,42 +forklift,42 +food delivery box,42 +flinch,42 +five of hearts,42 +fist fight,42 +fino ko,42 +feo ul,42 +fengsao hua tanzhang,42 +felt,42 +feena (shingeki no bahamut),42 +feelition,42 +favonius greatsword (genshin impact),42 +farmer (sekaiju),42 +fallen angel ero maid costume,42 +fallen-leaves,42 +failnaught (fire emblem),42 +eyes visible through headwear,42 +evolto,42 +evelyn (pokemon),42 +etou toshiko,42 +enta shiho,42 +ene mizunoawa,42 +endend (shinia),42 +emera (bombergirl),42 +embroidered legwear,42 +ema yuzuru,42 +elsie-san (oshiruko),42 +elec man,42 +eleanor albertine le blanc de la blois de la valliere,42 +ela (rainbow six siege),42 +efreezerarts,42 +ebisumaru (ebisumaru3),42 +ebisu kana,42 +ebine toshio,42 +duzimura,42 +dream c club gogo.,42 +drakeposting (meme),42 +drain (evork festa),42 +double teacher life,42 +double barrels,42 +dot-matrix,42 +dosu (yodosu),42 +doctor (granblue fantasy),42 +dnangel,42 +disembodied torso,42 +dilation insertion,42 +dian cecht,42 +deviantart,42 +deunan knute,42 +den1208,42 +demimond23,42 +delmo commander,42 +deki (dekiebi),42 +defibrillator,42 +dearche kings claudia,42 +dead line,42 +daruma-san ga koronda,42 +dangling eye,42 +dalian,42 +daikokuten (fate),42 +daiiichukiii,42 +cycloneyukari,42 +cyclamen (flower knight girl),42 +cuso4 suiwabutu,42 +cuba (hetalia),42 +cthulhu (poptepipic),42 +crane stance,42 +cow skull,42 +cototiworld,42 +cosmos (dff),42 +cornelius alba,42 +core (girls' frontline),42 +cook,42 +control stick,42 +confessional,42 +commissar,42 +comiket 85,42 +colossus (granblue fantasy),42 +collateral damage,42 +cockroach girl,42 +clynxen,42 +clothes shop,42 +cleopatra (third ascension) (fate),42 +cleavage (game),42 +clear,42 +claudia (saga),42 +choko (last period),42 +chocolate on ass,42 +choco fashion,42 +chiyo (miichiyochin),42 +chiba sayaka,42 +chi ya,42 +chi gura-ya,42 +chi (hnnmemi),42 +chi (chiwa),42 +chi-rol,42 +chewbacca,42 +chelodoy,42 +charles ausburne (azur lane),42 +cen (cenll),42 +ccjn,42 +caterpillar girl,42 +castille (phantom brave),42 +carrot works,42 +carmilla (shepherd0821),42 +capoki,42 +capitan wei,42 +canopri comic,42 +camomi,42 +calling card,42 +c.c. (cosplay),42 +c-3po,42 +byeon dha,42 +bvucki36gzoeq1c,42 +buta-don,42 +busou kanojo,42 +buntatta,42 +bunny pajamas,42 +bucket spill,42 +brown eyeshadow,42 +broken vase,42 +broca (arknights),42 +breast reduction,42 +brady (fire emblem),42 +borisx,42 +boridongja,42 +booth babe,42 +bonza,42 +bohemian rhapsody,42 +boco,42 +boba fett,42 +blaster master zero,42 +black rhinoceros (kemono friends),42 +bitou daisukenojou,42 +bioshock 1,42 +bingbingzi,42 +besthetz,42 +bertille althusser,42 +berlinetta (pixiv fantasia),42 +beer keg,42 +beer crate,42 +beauty (zoza),42 +be (ronironibebe),42 +batsuma,42 +bartholomew kuma,42 +barber,42 +baicha oqqa,42 +bado (kotoba noriaki),42 +bad girl,42 +azir,42 +ayase eli (cosplay),42 +avant garde (artist),42 +aurastack,42 +atorosu,42 +atlanta (warship girls r),42 +asuto3,42 +asuna (sao) (cosplay),42 +asou kasumi,42 +asmodeus (megido72),42 +ashiga oreta,42 +aschen brodel,42 +asami yuriko,42 +artifact (genshin impact),42 +armored legwear,42 +arima natsubon,42 +arima kishou,42 +ardyn izunia,42 +archer (dragon nest),42 +arakumo gakuen soccer uniform,42 +arabian,42 +aqua (konosuba) (cosplay),42 +aotsuki kaoru,42 +aono yami,42 +aoman de cangshu,42 +aoi kyouka,42 +ano hito,42 +anjou (yancha gal),42 +anjerain,42 +animal balloon,42 +anhao1224,42 +angel cage,42 +andou aiko,42 +anata to mita sakura,42 +amu (doubutsu sentai zyuohger),42 +amedama (akaki 4207),42 +ambush,42 +amazume ryuuta,42 +amaura,42 +amatou3,42 +amano takumi,42 +amamiya rindou,42 +amamiya aki,42 +amakase miharu,42 +amagami rukichi,42 +alternate wing color,42 +alice in musicland (vocaloid),42 +alex milne,42 +alessa gillespie,42 +alcina dimitrescu (cosplay),42 +alchemist 2 (sekaiju),42 +albert (shingeki no bahamut),42 +akkgsyk,42 +akira-riku,42 +akimoto (akimomomoto),42 +akika 821,42 +akihabara dennou gumi,42 +akieru nomaki,42 +akiba's trip 2,42 +akashiro sen,42 +ainili,42 +ago maguro,42 +ag 00000,42 +african elephant (kemono friends),42 +aek-999 (babe driver) (girls' frontline),42 +aegissanp,42 +admiral minami kazusa,42 +adelbert (madoka magica),42 +actinium89,42 +acrux,42 +ace (fft-0),42 +9ji,42 +9degree,42 +8gou,42 +81 (mellowry),42 +66ta1yak1,42 +5pb.,42 +04bou,42 +zoids shinseiki/zero,41 +zogok (okekan),41 +zhuge liang,41 +zettai muri no akira,41 +zechs merquise,41 +zarashi,41 +zaku ii fz kai,41 +z16 friedrich eckoldt (warship girls r),41 +yuzuki yukari (cosplay),41 +yuwan 2333,41 +yuuki (nijiiro palette),41 +yuuhi korona,41 +yuria of londor,41 +yupii,41 +yuni (irohasuiroiro),41 +yumeoi kakeru,41 +yumemizuki,41 +yukimura hajime,41 +yukimaru (gojo),41 +yuki (12cut),41 +yuki1977,41 +yui (daijun),41 +yuge sasatarou,41 +yufu kyouko,41 +yu-gi-oh! duel links,41 +you'a,41 +yoshishi (yosisitoho),41 +yoshijima ataru,41 +yoshi (nagatoro),41 +yoohei (pizzadev),41 +yonomo,41 +yoneyu,41 +yokoshima tadao,41 +yoda,41 +yj (yojo san),41 +yin lan xue,41 +yghm,41 +yellow buruma,41 +yasuzumi yoriko,41 +yamamoto akane,41 +yamaki suzume,41 +yaho (yaho0211),41 +yagami kagami,41 +yabuki shingo,41 +xyufsky,41 +xuelan,41 +xiwang xintu,41 +ximubingo,41 +xehanort,41 +wudi sao nian,41 +wozora,41 +wotori,41 +wormadam (plant),41 +wings of iron: hazy tales (umamusume),41 +windbreaker,41 +white whale,41 +white buruma,41 +whipberry,41 +when mama isn't home (meme),41 +whac-a-mole,41 +wenu (kirikirimai),41 +wednesday addams,41 +wauwa,41 +watercolor effect,41 +wata (akawata),41 +wang chen,41 +walter sullivan,41 +wallabee beetles,41 +wahiko (black bastard),41 +wagashi (okashina),41 +vsk-94 (night on the silver bay) (girls' frontline),41 +voice (vocaloid),41 +viorie,41 +viola (trusty bell),41 +vincent t (oriaarts),41 +vesper (pixiv3568),41 +vera (punishing: gray raven),41 +venus versus virus,41 +vee (vtuber),41 +valkyrie drive -bhikkhuni-,41 +vajra (summer) (granblue fantasy),41 +vagina dentata,41 +uuroncha,41 +usutominsutaa,41 +ushijima wakatoshi,41 +uri (uryu002),41 +urashima kanako,41 +upside-down book,41 +unown x,41 +unneul,41 +unagi189,41 +umami (sakeikura),41 +ueyasu,41 +uesugi ren,41 +uemoto masato,41 +uehara ayuko,41 +uchiha sasuke (cosplay),41 +u-doku,41 +type-1 energy sword,41 +two-tone tail,41 +twitch username,41 +twirling weapon,41 +tuzki,41 +tuoni,41 +tugmix,41 +tsuyazaki kokage,41 +tsukushi sasa,41 +tsukito (leaf moon82),41 +tsukinogi (arknights),41 +tsuki hana,41 +tsubutarou,41 +tsuburaya mana,41 +trombe,41 +troll (warcraft),41 +triple vaginal,41 +trento (summer's hotness?) (azur lane),41 +traveler (artbbt),41 +tr-6 woundwort,41 +toyosu toyosu,41 +towako (akane shinsha),41 +touma (tsunamiharuru),41 +total drama,41 +tosh (arc the lad),41 +torte (triggerhappy),41 +torn skin,41 +tori (qqqt),41 +tool kit,41 +too many hearts,41 +tone (kancolle) (cosplay),41 +tomoe hiyori,41 +tokiziku,41 +tlman,41 +tktk135,41 +timaking,41 +thunderbird type-blue,41 +thunderbird,41 +thumb,41 +theodolite,41 +thejunebug,41 +the moon (tarot),41 +the high priestess (tarot),41 +the hierophant (tarot),41 +the flash,41 +the dark mangaka,41 +tettabuzz,41 +tetsu (cencoroll),41 +terrakion,41 +tenya mizuki,41 +tennosuke (tejons),41 +tei-o,41 +teddyellow,41 +tavn,41 +tautiki,41 +taro (disgaea),41 +tarantulaines,41 +tapu bulu,41 +tanya (darker than black),41 +tanima yuri,41 +tamichan,41 +tamaxi,41 +tamaoka kagari,41 +takuan (takuan0907),41 +takeshita kenjirou,41 +takeru (hashiru11275),41 +takenoko seijin,41 +takemoto yasuhiro,41 +takarada rikka (cosplay),41 +takanashi rikka (cosplay),41 +takamatsu (yamajiai),41 +takahashi (ichigo no katamari),41 +taka (copyrobot),41 +taiki (juuni kokuki),41 +taihei tengoku,41 +tachibana rino,41 +tachibana chizuru (kimi to boku),41 +tachibana akira,41 +t miyanagi,41 +t.o.d,41 +t-rex na kanojo,41 +t-5000 (girls' frontline),41 +sylas (league of legends),41 +sweetest music,41 +suupii,41 +sutera sea,41 +susukihotaru,41 +susan (rakeemspoon),41 +surre academy uniform,41 +suppi,41 +suneru,41 +sully (fire emblem),41 +suiiryu,41 +suica koubou,41 +sui hi sf,41 +sui (aruko91),41 +sugar sound,41 +sucking on multiple breasts,41 +su konbu,41 +su ggushi,41 +stufquin,41 +stuffed raccoon,41 +stitch (lilo & stitch),41 +stir255,41 +stealstitaniums,41 +starstruckdon,41 +sprite (drink),41 +spot-billed duck (kemono friends),41 +spirit chiasma,41 +sovetsky soyuz (azur lane),41 +souto,41 +south114,41 +souseiki aster gate,41 +souma yuki,41 +souki lankni,41 +souji (senran kagura),41 +sosser,41 +sorata123,41 +sonokawa momoka,41 +sonnano ari,41 +soma (ar tonelico),41 +soaryuna,41 +snoot challenge,41 +smi (enraku),41 +sll,41 +slee,41 +siri,41 +sippo-soft,41 +siosiosolty,41 +singed,41 +simplecar,41 +simisear,41 +simca,41 +silence (frosted breath) (arknights),41 +sig sauer p220,41 +sierra,41 +shokikanes,41 +shocker,41 +sho yai,41 +shizuri (neet de otaku na kunoichi to naze ka dousei hajimemashita),41 +shiyashiki,41 +shirotsuki shirone,41 +shiratama (irodoli),41 +shippitsu,41 +shiotsuki kazuya,41 +shinoe nun,41 +shin (hokuto no ken),41 +shin9tani,41 +shimizu naotaka,41 +shimizu megumi,41 +shimakaze (warship girls r),41 +shikimori kazuki,41 +shifumame,41 +shidare (youh4016),41 +shidaccc,41 +shibuya hajime,41 +shibamoto thores,41 +shario finieno,41 +sharin no kuni himawari no shoujo,41 +shannan (fire emblem),41 +shakti (elsword),41 +shadow queen,41 +sette (nanoha),41 +setsuri (tubaki),41 +serenade (sinohi),41 +senri mana (princess connect!),41 +sendou airi,41 +semovente 75/18,41 +selkie,41 +seijo senki,41 +search bar,41 +sea monster,41 +scuffed,41 +scp-076-2,41 +scal2let,41 +sayonara444,41 +sawamaharu,41 +satou (danganronpa),41 +satonishi,41 +sasurai,41 +sashiromiya sasha,41 +sarikyou,41 +sardine,41 +sapphism no gensou,41 +santos,41 +sanoharu,41 +sanasanayukikuni,41 +sana (tiny evil),41 +saliva on penis,41 +saki tsurugi,41 +sake (utopia modoki),41 +sakatsu ohane,41 +saitou tsukasa,41 +saiko aida (pkmn soda),41 +saigo (ip police tsuduki chan),41 +sai tamako,41 +safurantora,41 +saegusa mikoto,41 +sader,41 +s sasaki 09140,41 +ryuutetsu,41 +ryuukitsu koushu,41 +rye (hyn uka),41 +rwby ice queendom,41 +ruvik,41 +ruochongsang,41 +running blades,41 +runerigus,41 +ru 251,41 +rpr,41 +royal flare,41 +roxy,41 +rokuroku (xd p),41 +rodriguez (kamwing),41 +rocket girls,41 +robonyan,41 +roamu 65,41 +rm (rm32),41 +rita rossweisse (spina astera),41 +risem,41 +riry,41 +ringo roadagain,41 +rin mokkomoko,41 +rin (muse dash),41 +rimocon (vocaloid),41 +riding boots,41 +rhombus,41 +reminiscence202,41 +remedei,41 +rekareka,41 +reireimu,41 +rei (farta litia),41 +reeze (reezely),41 +reen kadorer,41 +red hare (fate),41 +raymie0124,41 +rascala39,41 +rare candy,41 +ransa,41 +ramnik5,41 +raidensan,41 +radioactive,41 +racco,41 +r2sais,41 +r-one,41 +queen qko,41 +quality cabbage,41 +qiu yue (vtuber),41 +qianze chia,41 +pzkpfwi,41 +pulling back,41 +prunus girl,41 +prinz heinrich (fireworks and tapestries) (azur lane),41 +powder,41 +pouring onto another,41 +popuri (fushigi mahou fun fun pharmacy),41 +poppi pipopapo,41 +popogori,41 +pop sound blossom (idolmaster),41 +pool party caitlyn,41 +pongari,41 +pollution,41 +plawres sanshirou,41 +plasbott,41 +pip bernardotte,41 +pingtsi (chainsaw man),41 +pinchuu,41 +pig tattoo,41 +pig boy,41 +phantom gundam,41 +phantasy star portable,41 +phantasy star online 2 the animation,41 +pet (sekaiju),41 +peninsula (disappearedstump),41 +pemoko,41 +pekoo (pekota),41 +pegasus koga,41 +payday 2,41 +patio,41 +parvati iv,41 +parfait (lamune),41 +papion,41 +pantalone (genshin impact),41 +panel gag,41 +palulap,41 +paintbrush hair ornament,41 +ozawa yumi,41 +oyasumi punpun,41 +outsuki,41 +outside of play area,41 +ourai no gahkthun,41 +ougi (u to4410),41 +otogi ryuuji,41 +otoboke-san,41 +otamashimai,41 +oshino hazure,41 +os9,41 +orisu atto,41 +ootori chacha,41 +ootori amane,41 +ooshima towa,41 +ooki1089,41 +onyuuuu,41 +onizuka ao,41 +onizaki kirara,41 +olesyaspitz,41 +okuno naru (exoprsa),41 +okku,41 +oga tatsumi,41 +odaiba girls high school uniform,41 +octopus devil (chainsaw man),41 +nyanko (marl kingdom),41 +nuts (hazel-nuts),41 +numazume,41 +nukoji,41 +nono kotori,41 +nonexistent memories (jujutsu kaisen),41 +nonbiri monban,41 +noir39,41 +nogi wakaba,41 +nogami shouko,41 +no coat,41 +niya (blue archive),41 +nishi tanuki,41 +nirak,41 +nipio,41 +ning hai (moon palace rabbit) (azur lane),41 +nikujaga (food),41 +nijou izumi,41 +nijino yurika,41 +niiyama nico,41 +niimura (csnel),41 +nigel uno,41 +nia i,41 +nezielmi,41 +new jersey (pacific),41 +nerokuro,41 +neoko,41 +nemu (haibane),41 +nejiresyake,41 +neji (ultramarinesunset),41 +necronomicon,41 +nayru,41 +natsumikan,41 +natsume asato,41 +natsu (360c),41 +nasuuni,41 +nashirasauce,41 +nashiki (5tella),41 +narumi nakuru,41 +naoya come,41 +nao akinari,41 +nao (otosuki),41 +nanashin,41 +nanao parakeet,41 +nanami mizuki,41 +naname (fossama1),41 +nanaly (sennen sensou aigis),41 +nanakagi satoshi,41 +namiko,41 +nameco (nameco h),41 +nam (nam990902),41 +nakakouji ayano,41 +nakahara (fukufuku),41 +nakadera akira,41 +nagumo (qmzp10),41 +nagatsukiariake,41 +nagata ozu,41 +nagachika hideyoshi,41 +naga (staygarden),41 +mzoo39,41 +mz (yeye ai chipao mian),41 +mythic live,41 +mush,41 +musashino kazuhiko,41 +multiple penis fellatio,41 +moyatarou,41 +movie thief,41 +mouse hood,41 +morphin e,41 +morizono rikka,41 +morisu,41 +morimasakazu,41 +moric,41 +morganite (houseki no kuni),41 +monster world,41 +monotosu,41 +monmon,41 +momonoko noko,41 +momochi zabuza,41 +moboj13,41 +mizutani tooru,41 +mizuno minato,41 +mizukoshi mio,41 +mizoreshi,41 +mizoredama,41 +miz,41 +mixer (cooking),41 +miss surfersparadise,41 +misoshiru (meridianchild312),41 +misono chiaya,41 +mirrrrr,41 +miranda lawson,41 +miori (alice parade),41 +minono aki,41 +mimizou,41 +mime (fft),41 +mikajima saki,41 +mikage (shibi),41 +mika (under night in-birth),41 +mika (lycoris recoil),41 +miiru,41 +mihirogi uta,41 +miffy,41 +mido (mido chen),41 +midnight (midnightstream3),41 +michigan,41 +mephisto pheles,41 +meme-tan (bana nan26),41 +mejina,41 +meiyan (boyimachao),41 +meily,41 +mcrc science,41 +mcgillis fareed,41 +mayumio88,41 +maya (tirolpop),41 +matutoya,41 +matsubara ryuu,41 +matayoshi (nopple 1000),41 +masamika10086,41 +marz von ludowing,41 +mary anning (fate),41 +marusan,41 +maruput,41 +maruna (maru01),41 +maroyan,41 +marnie (palentine's 2022) (pokemon),41 +marinon,41 +marija (muse dash),41 +march hare (alice in wonderland) (cosplay),41 +marble (stone),41 +marble (marblesized),41 +manmaru tamago,41 +maniwa kamakiri,41 +manda (luts7602),41 +manami sangaku,41 +mamo (fortune-mm),41 +mamezou (mamechan182),41 +mallard,41 +male warrior (disgaea),41 +maldives,41 +malboro,41 +makoto (dandelion),41 +magma chipmunk,41 +magical tale,41 +magical drop,41 +mad maggie (apex legends),41 +mad (kusakabe),41 +macbook,41 +mac-11,41 +mabuta kayumi,41 +m79,41 +m4a1 (suspender of time) (girls' frontline),41 +lugh (fire emblem),41 +lucia (fire emblem),41 +love cube,41 +loup,41 +losse (personal ami),41 +longzaibei01,41 +lleu,41 +liwendala,41 +liuliu,41 +lipstick mark on breast,41 +link (shounen captain),41 +ling (vivianling),41 +linch,41 +life neko72,41 +liangfen,41 +lepoule,41 +leo (reiga),41 +lemonice,41 +legioss,41 +leech,41 +lecturing,41 +lean (konosuba),41 +lay's,41 +lawnmower,41 +latutou1,41 +lamento,41 +kyou,41 +kyo (kyo21413),41 +kyl490,41 +kyama,41 +kusubii,41 +kushibi,41 +kusari,41 +kuroi mizore,41 +kurasaki cosmos,41 +kuramoto chinatsu,41 +kuramachi anna,41 +kumojacky,41 +kumashiro izuta,41 +kumahara,41 +kujira pasta,41 +kuimu lang,41 +kubomi 943,41 +kryztar,41 +krt736,41 +kris (fire emblem) (female),41 +koyama tomosato,41 +kousaka rui,41 +kouno ruruka,41 +kotomura akane,41 +koto (marron marron),41 +kotengu,41 +kotamun,41 +kosmo1214,41 +kosanmaka,41 +kondou totetsu,41 +kon (tokyo ravens),41 +kon-el,41 +komiya nigi,41 +komaruri,41 +komari mhy,41 +komainu ears,41 +kokura asahi,41 +kokemomo (makesound),41 +koizuka koyume,41 +koiken otome,41 +kogure yuuya,41 +kobapyon,41 +knee spikes,41 +knck,41 +klein (sao-alo),41 +kiyomasa (dangan),41 +kive,41 +kitsune23star,41 +kitsu+3,41 +kisaragi you,41 +kirome toru 2,41 +kirihara izumi,41 +kinese (katasutorohu),41 +kimimaru,41 +kimae,41 +kikwi,41 +kiki okina,41 +kijinkutsu,41 +kido jirou,41 +kiba mikoto,41 +kettsu,41 +kentap,41 +kenchi,41 +keishin academy uniform,41 +kbyd (idolmaster),41 +kazuraba kouta,41 +kazumi (madoka magica),41 +kazaya,41 +kazari asami,41 +kawasaki raimu,41 +katsuragi mari,41 +katana (life is beautiful),41 +katakura nayuuki,41 +kasugai haruko,41 +kasane ted,41 +karui (naruto),41 +karthus,41 +karon official,41 +karon (vtuber),41 +karioda,41 +kari okome,41 +karas,41 +karameru (character),41 +karada asobi,41 +karaage karara,41 +kappa mame,41 +kano-0724,41 +kanno naoshi,41 +kanbayashi mizuki,41 +kanaya azami,41 +kamen rider super-1,41 +kamekichi27,41 +kama (fate) (cosplay),41 +kalalasan,41 +kakyoxx,41 +kajishima masaki,41 +kagamine rin (if),41 +k ototo,41 +k liss s,41 +k0nfette,41 +jusc0,41 +juniper (artist),41 +julis-alexia von riessfeld,41 +ju-ok,41 +jsscj,41 +josou jinja,41 +john kafka,41 +john k. pe-ta,41 +jiu fanglianhua,41 +jinnai10,41 +jinlu tongzi,41 +jinguuji kuesu,41 +jie xian (tsuki),41 +jian,41 +jiachong jun z,41 +ji yue,41 +jesus burgess,41 +jeritza von hrym,41 +jalm,41 +jakuzure nonon (cosplay),41 +j-max japan,41 +izubuchi yutaka,41 +iyokawa,41 +iwashi gimu,41 +itsuka neru,41 +itou (golem inc),41 +issho ni gohan tabetai,41 +isamu dyson,41 +irorigumi,41 +iroha (samurai spirits) (cosplay),41 +iroha741852963,41 +iokawa karada,41 +io (onisarashi),41 +inzanaki,41 +inverted moon of the heavens,41 +inuyama kuroe,41 +insect collection,41 +incro300,41 +inaho (world flipper),41 +impossible necktie,41 +imo mushi,41 +imaoka,41 +imagawa yoshimoto (sengoku otome),41 +imac,41 +ilia amitola,41 +ikushima (danshi koukousei),41 +ikk,41 +iiros,41 +iida riho,41 +ido e itaru mori e itaru ido,41 +ido (nothing679),41 +ide naomi,41 +ichiya (obey),41 +ichijou kou,41 +ichijou kokona,41 +ichigozaka middle school uniform,41 +ichiba ko'ushi,41 +icelandic flag,41 +ice flower,41 +ice (doragon),41 +iberis (ogami kazuki),41 +ibata shouta,41 +hum (ten ten),41 +huang (darker than black),41 +hrna,41 +houjou kuniko,41 +hototogisu (7kanu 7ra),41 +hosshi (nariagari),41 +hoshinokaoru,41 +hoshiguma yuugi (cosplay),41 +honey badger (gun),41 +honda tamaki,41 +homerun ken,41 +holding sex toy,41 +holding nose,41 +holding frame,41 +hokuto (tokuho),41 +hokkana,41 +hobble dress,41 +hiya,41 +hitoki (kokusei1977),41 +hitachi izuru,41 +hisaki (morenabe),41 +hiota (kuhi 0301),41 +hino (moca),41 +hinata mizuiro,41 +hinata ema (aikatsu friends!),41 +hime (ohime pkg),41 +hikaru 310,41 +hijiri (xxhizirixx),41 +hiiragii (hiiragi 0404),41 +hige (com),41 +hievasp,41 +hieda no amu,41 +hidaka hokuto,41 +hibino hibiki,41 +hibika,41 +hibana (enen no shoubotai),41 +heridy,41 +helena blavatsky (fate) (cosplay),41 +headmistress fiora,41 +hazama (hazamazama),41 +hayashi tsugumi,41 +hayana neru,41 +hatsune haruka,41 +hatou kei,41 +hathor (p&d),41 +hashihime,41 +haruno suzune,41 +harukaze sensation!,41 +harpie queen,41 +harold berselius,41 +harakune (mugennero),41 +harabacho (gkfkqkch1),41 +handheld fan,41 +hand over another's eyes,41 +hancho,41 +hanazono serena,41 +hanatsuki,41 +hanabusa (xztr3448),41 +hamster (hanmster),41 +haku wi,41 +hair bun girl (nagioka),41 +hage tashuumi,41 +h.dupp (nama aakiruu),41 +h&k mark 23,41 +gyuunyuukeepaa,41 +gyudan (t1k 7),41 +gvzzgl,41 +gustaf,41 +gureshi db,41 +gunvolt,41 +guild cq,41 +gs-mantis,41 +grimace (mcdonald's),41 +gremyashchy (azur lane),41 +green pepper,41 +grandpa gohan,41 +granberia,41 +goyacchi,41 +goodsun sunkumi,41 +golden tabby tiger (kemono friends),41 +gokkun tororojiru,41 +going merry,41 +gohanduck,41 +godtier (homestuck),41 +goat legs,41 +gnosis (arknights),41 +girl who trained on mt. haku (touhou),41 +ginro (dr. stone),41 +gigapuri,41 +ghost (modern warfare 2),41 +getter robo arc,41 +geometric pattern,41 +genzaburoh,41 +gentletiger,41 +gensou suikoden tierkreis,41 +gemini kanon,41 +gekoge satoru,41 +gata2013,41 +garuda,41 +garrett hanna,41 +garnet (sumaga),41 +garickson,41 +galarian farfetch'd,41 +fuzinoe 13b,41 +fuugetsu (sanmunyudai),41 +futaba morishita,41 +funkysatou,41 +fumizuki academy uniform,41 +fukushima uichi,41 +fukumimi,41 +fukujon,41 +fukasugi aiko,41 +fujishiro otone,41 +fujisawa naoyuki,41 +fujisaki (hjsk),41 +fujimori yuu,41 +fujii shino,41 +freudia neuwahl,41 +foro (planet cage),41 +fondue,41 +flotation belt,41 +fleurdelis (yu-gi-oh!),41 +fist shaking,41 +fishnet swimsuit,41 +finch (xenoblade),41 +filicia heideman,41 +figure 17,41 +field radio,41 +fiamma of the right,41 +ferry (halloween) (granblue fantasy),41 +female knight (guardian tales),41 +female crusader (dungeon and fighter),41 +fcp,41 +fate/grand order: first order,41 +fate/grand carnival,41 +fat step-sister (orizen),41 +farina (fire emblem),41 +fang zhenjun,41 +fall (aki),41 +fake play button,41 +fake hisui (cookie),41 +fairyfloss,41 +fairy knight lancelot (third ascension) (fate),41 +face cutout,41 +eyelash curler,41 +evanyi,41 +eva 03,41 +eska (cookie),41 +ero condo,41 +erect!,41 +epis,41 +enyon moon5,41 +enryuu (rmxs3488),41 +enone,41 +engawa (rarenago),41 +eminya 27,41 +ella of the sky,41 +elite beat agents,41 +el (canon jihad),41 +eight-tailed fox nari,41 +eelektrik,41 +edel (ikeuchi tanuma),41 +ebihara minase,41 +dumuzid (fate),41 +dream c club uniform,41 +drampa,41 +dragon install,41 +dragon fruit,41 +dragon ball z fukkatsu no f,41 +doujin work,41 +dotaku (wran8845),41 +dom (animal crossing),41 +dokuga,41 +dina (sennen sensou aigis),41 +digimon survive,41 +diforland,41 +diamond dust,41 +dew (7302235),41 +depe,41 +denkichi,41 +demons driver,41 +dekomegane,41 +dedeyong,41 +dawapat,41 +darr1o,41 +darius burst,41 +darandy,41 +daimon masaru (danganronpa),41 +daikoku (housamo),41 +dai nikucho,41 +dadamori,41 +daakuro,41 +d-stop,41 +cybertron,41 +cure mofurun,41 +cum in shoe,41 +crash landing,41 +cranberry,41 +cowardly lion,41 +cosmos (the crying moon),41 +corn potako,41 +core1013,41 +congqian you ke xing,41 +comic megastore h,41 +comet-kun,41 +colorful drop (module),41 +cokuto1,41 +cockadooodledoo,41 +cloneko (zelmeledf2),41 +cloche hat,41 +clitoris pull,41 +claw hammer,41 +claire rieveldt,41 +ciao churu,41 +chuuko demo koi ga shitai!,41 +chua churam,41 +christian private white clover academy school uniform,41 +chokuboron ryou,41 +chocolat (noucome),41 +chloe lilith stella,41 +chisato and takina kicking each other's butt (meme),41 +chiru,41 +chinese food,41 +chilly (kirby),41 +chijimetaro,41 +chiisana kanojo no serenade,41 +chibigou,41 +chi no,41 +chest cannon,41 +chapayev (white cavalier's respite) (azur lane),41 +chagoon,41 +ch-47 chinook,41 +censored anus,41 +cecelia (arknights),41 +ccaw,41 +cbj-ms,41 +caws (girls' frontline),41 +cawfield,41 +catumi (ta938 ka23),41 +catneylang,41 +carrot glace,41 +caren hortensia (amor caren) (first ascension),41 +captain ginyu,41 +captain commando,41 +cap105,41 +canvas (medium),41 +callarinc,41 +cabbage soft,41 +c-chaos,41 +butterfly on hair,41 +bula,41 +browning hi-power,41 +brocon,41 +brigitte stark,41 +breast punch,41 +brayanong999,41 +box on head,41 +bowsan,41 +bolbbangbbang,41 +bokiboki333,41 +boarded windows,41 +blue oak (sygna suit),41 +blue apple,41 +blowing candle,41 +blind prince,41 +blazing souls,41 +blank (ff9),41 +black claws,41 +bitaraga,41 +bisquii,41 +beta (joutarou),41 +belt bow,41 +bella (sennen sensou aigis),41 +belka dog,41 +beelzebub-jou no okinimesu mama.,41 +bbakasatang,41 +bb-28 (cosplay),41 +baywatch,41 +battlefield 3,41 +battle programmer shirase,41 +basculin (red),41 +bari (destiny child),41 +bare legs girl (kamizaki hibana),41 +barbata,41 +bannou bunka nekomusume,41 +banana takemura,41 +ban (bannyata),41 +ban (777purin),41 +balor (housamo),41 +baiola,41 +bai winchester,41 +bad end night (vocaloid),41 +bacher,41 +azuma (lily-white longing) (azur lane),41 +azling,41 +azkn,41 +ayatori (sensei heroism),41 +aurora (kanachirou),41 +astromech droid,41 +assassin's creed: revelations,41 +aska (anakoluth),41 +asai makoto,41 +asahina samidare,41 +arx-7 arbalest,41 +arukanu,41 +artillery imp,41 +arthas menethil,41 +arksign,41 +ariel org,41 +architect (frame arms girl),41 +arcana famiglia,41 +aratani tomoe,41 +arama (genshin impact),41 +aqua eyeshadow,41 +aqua coat,41 +aqua apron,41 +aoba shigeru,41 +aoba rinka,41 +ao (1226ao),41 +anubituf,41 +another code (elsword),41 +ano (sbee),41 +annie (destiny child),41 +annerose redrum,41 +ankha zone,41 +animal ear request,41 +andou ryuu,41 +ana (vvvvor),41 +amraam120c,41 +amopui,41 +amecha,41 +amano uzura,41 +amamiya yuki,41 +amado (shin),41 +altyane hetata,41 +alessi,41 +alderion-al,41 +akira (mr akira),41 +akira (aristole),41 +akechi mitsuhide (oda nobuna no yabou),41 +akatsuki no kiseki,41 +akatoki!,41 +akashiro yulice,41 +akashi maho,41 +akapug621,41 +akakitsu,41 +aiueo1234853,41 +aisawa natsu,41 +aircraft carrier summer princess,41 +ainudraws,41 +aguila,41 +a (naruto),41 +9s0ykoyama117,41 +7fuji 06,41 +5-volt,41 +4tb (4tera byte),41 +490,41 +zonzu,40 +zipper bikini,40 +zin (mame denkyu),40 +zi nu (qin shi ming yue),40 +zhu xiang,40 +zhu mianzi,40 +zgxuke,40 +zero the flash,40 +zero808w,40 +zamius,40 +yuzutosen,40 +yusukesan,40 +yuniyuni,40 +yumeshima kanata,40 +yumeno ruruka,40 +yukihiko (tyabobo),40 +yuito (yuitokobunasbs0),40 +yuichi (sp sakura yoshi),40 +yuia,40 +yui (real) (princess connect!),40 +yui (ogino atsuki),40 +youaresober,40 +yosugara shou,40 +yoshimi50,40 +yoruillust,40 +yomi yasou,40 +yohaku,40 +yensh,40 +yelsh,40 +yellow leaves,40 +ye zi you bei jiao ju ge,40 +yda,40 +yato (yama0x1),40 +yasiro (oyasiro35),40 +yashiki yuuko,40 +yashigaras,40 +yaowu,40 +yanggang,40 +yang nari,40 +yanagin (danshi koukousei),40 +yan kodiac,40 +yamo (sky2world),40 +yamaura tamaki,40 +yamashita akira,40 +yamasafu,40 +yamagishi chihiro,40 +yamada sakura,40 +yamabuki7979,40 +yama-michi,40 +yakkuro,40 +xooku,40 +xixing si yao meng,40 +xi chen chen,40 +wuming,40 +wolf's rain,40 +witches in 7th base,40 +windows 10,40 +whitebc,40 +wedge (ff7),40 +wawatiku,40 +wave motion gun,40 +wattson (pokemon),40 +water pump,40 +warrior (sekaiju),40 +wario land 4,40 +wani02,40 +wadachitokakeru,40 +vodka13,40 +vivian (lancerhd),40 +vhumiku,40 +versen,40 +venelana gremory,40 +velvet rose (idolmaster),40 +vaulted ceiling,40 +vault girl,40 +vasily (golden kamuy),40 +vanink,40 +vampire (eloi's blessing) (azur lane),40 +valsione r,40 +vagrant story,40 +usotsuki mii-kun to kowareta maa-chan,40 +uso ewin,40 +usasaki shiro,40 +urshifu (single),40 +urd (p&d),40 +unpasu,40 +unmaker,40 +uncharted,40 +unacha,40 +umemegn,40 +uhana,40 +uesugi kyoushirou,40 +ueno chiyoko,40 +type 90 kyu-maru,40 +type 79 (nine-tail fox) (girls' frontline),40 +type 0 fighter model 21,40 +tyamurai33,40 +two (drag-on dragoon),40 +two-tone thighhighs,40 +tv show,40 +tukno,40 +ttusee5,40 +tsukuyomi luna,40 +tsukudato,40 +tsukimushi,40 +tsuki miso,40 +tsuji tatsuya,40 +true damage (league of legends),40 +triangle-shaped pupils,40 +triangle!,40 +tri-brigade ferrijit the barren blossom,40 +train hb,40 +tracy reznik,40 +toy tank,40 +toujou mina,40 +toufuu,40 +totoya z,40 +torisu,40 +torimeiro,40 +torao (torakmn),40 +ton ton tontoro,40 +tomatomagica,40 +tomako,40 +tokoshibyra,40 +tokino,40 +togin,40 +tobari (pure pure),40 +tkd dkt,40 +tio (konjiki no gash!!),40 +tiger dojo,40 +threo (eternal's summer vacation) (granblue fantasy),40 +the iizumi,40 +tetrahedron,40 +testest,40 +tenshin kagehisa,40 +tendou souun,40 +tendou kaoru,40 +tebukuro,40 +tarrasque (fate),40 +tanith (fire emblem),40 +tamaki iroha (pajamas costume),40 +tama (mahoiku),40 +tama (gintama),40 +tales of the world radiant mythology,40 +takumin dx,40 +takanashi kozue,40 +takanashi kiara (cosplay),40 +taiso samurai,40 +taino kou,40 +tail pussy,40 +taemin,40 +tachi (weapon),40 +sylvie paula paula,40 +suzumi (ccroquette),40 +suzukuri karin-chan,40 +suzuha (nozomi tsubame),40 +summoner (dungeon and fighter),40 +sukocchi moruto,40 +suginakara (user ehfp8355),40 +sugar+spice!,40 +suetsugi konoha,40 +su meen,40 +stylish energy (module),40 +strelitzia (kingdom hearts),40 +stoat (kemono friends),40 +statuette,40 +star plus one,40 +star guardian poppy,40 +star fox assault,40 +stand my heroes,40 +stamp-sheet,40 +sports panties,40 +spleeny,40 +spider genome,40 +spider-man (toei),40 +spice,40 +spelunker,40 +space pirate (metroid),40 +sozoremi,40 +sorrau,40 +sophie (693432),40 +sonyntendo,40 +sonic riders,40 +soniani,40 +somasoutaro,40 +solid snake (cosplay),40 +soap (user kghh4755),40 +snow (housamo),40 +snare drum,40 +smilesmile1312,40 +smalley (azur lane),40 +slime knight,40 +sl10 d,40 +skc,40 +sirokuro daruma,40 +silvia de alisia,40 +silverpixia,40 +silicon magic,40 +sigina,40 +siesta (artist),40 +sie-sie,40 +shuangfeng,40 +showers-u,40 +shokuen shiwe,40 +shishi osamu,40 +shirushiru,40 +shiro (muku),40 +shirayuki shion,40 +shiratama (siratama ll),40 +shirasaya,40 +shiranui kazuki,40 +shinsuke (moccori),40 +shinonome (game hakkutsu tai),40 +shinka musume channel,40 +shinka musume,40 +shinaso (sachi-machi),40 +shin (irowanioedo),40 +shimizu tomoki,40 +shimanaka arihito,40 +shijiani,40 +shigumo (shigeru),40 +shhis (idolmaster),40 +sherad,40 +shellos (east),40 +shearing,40 +shaytan (sound horizon),40 +shamuichi,40 +seuga,40 +sethkiel,40 +serika (new year) (blue archive),40 +serie niai,40 +senri (nazerine),40 +sengoku koihime x,40 +sena tsubasa,40 +sekaiju no meikyuu x,40 +sekaiitinoki,40 +seiryouinryousui,40 +sei (shinkai parallel),40 +sega game gear,40 +scout uniform,40 +scorpion tsuchida,40 +school kid (pokemon),40 +school fanfare,40 +scathach (first ascension) (fate),40 +sayuuiede,40 +satouin reiko,40 +sato (sato 1 11),40 +sasugano ruki,40 +sasagawa satarou,40 +sarashi pull,40 +sanzhuangwangcat,40 +sano sho,40 +sanitized (splatoon),40 +sanhon,40 +sanada nako,40 +sana.c,40 +samui (naruto),40 +samo (shichun samo),40 +sami (advance wars),40 +same face,40 +sam desu,40 +salomon (housamo),40 +sally acorn,40 +salamandinay,40 +sakuraba himari,40 +sakura soujirou,40 +sakura mami,40 +sakura (superbunnys),40 +sakimichan (style),40 +sakazaki akira,40 +sakaumi,40 +sakata kaname,40 +sakanaya nakasa,40 +safai,40 +sadistic music factory (vocaloid),40 +sada noriko,40 +sachiko (omame),40 +sabi1234,40 +ryoune yami,40 +ryeon (bluetom1),40 +rui (gsr1982),40 +rudia,40 +rpg fudousan,40 +royal flush heroes,40 +rotom (frost),40 +rope around waist,40 +rook hunt,40 +ronaldo castroneves,40 +rolled up paper,40 +rocomani,40 +rocker-chic,40 +rnfhv,40 +rita bernal,40 +risk hunters,40 +risa (pokemon),40 +riri yo,40 +ringo orihara,40 +rin kyoutarou,40 +rin (princess connect!),40 +rick sanchez,40 +ribimura,40 +ribbed tank top,40 +rethnick,40 +renze l,40 +rensei,40 +ren zotto,40 +ren (733),40 +ren'ai boukun,40 +remon (10112),40 +reincarnation,40 +redoredo (godprogress),40 +realive,40 +ratchet altair,40 +raru0310,40 +rapp (grandia),40 +ranshin,40 +ranpu,40 +rangers (arknights),40 +ran nagisa,40 +ramutaizumu,40 +raimone (nekokirinv3),40 +raihan (pokemon) (cosplay),40 +raiden mei (danzai spectramancer),40 +raideen (series),40 +racing miku (2012),40 +quick ball,40 +qqqmei,40 +qmiqun,40 +qiu tong,40 +qiandaiyiyu,40 +purasu no konbu,40 +prisma illya (zwei form),40 +prinz rupprecht (azur lane),40 +presia zenoskis,40 +poyason,40 +poyadevil,40 +power (lu power),40 +poruserin,40 +ponzu (udon),40 +ponimu,40 +pokemon the movie: secrets of the jungle,40 +pokemon between legs,40 +pogchamp,40 +planet with,40 +pineapple (pine),40 +pikapika hoppe,40 +pig penis,40 +phonon (under night in-birth),40 +peter xiao,40 +petals in mouth,40 +period,40 +penguru 086,40 +penelope and me,40 +pen holder,40 +pekodam,40 +patrasche (re:zero),40 +patenusu,40 +pastels,40 +panties over bodysuit,40 +paisley,40 +paintbrush rack,40 +paint in hair,40 +paindude,40 +oxykoma,40 +overshirt,40 +ougi kaname,40 +otogi resurrection,40 +otani (gloria),40 +otaku (artist),40 +osatou (soul of sugar),40 +osananajimi (hanekoto),40 +orpheus (inazuma eleven),40 +orinpachu,40 +ootsuka akio,40 +oomikado aoi,40 +onuj2$,40 +onko,40 +oniyan,40 +on finger,40 +on fence,40 +on ceiling,40 +omochimochi,40 +omega.ep,40 +omase (mnnk umai),40 +omaehadareda-uso,40 +om (carbohydratism),40 +olly (ollycrescent),40 +okusama wa mahou shoujo,40 +oinari risuru,40 +ogura tubuan,40 +ogura anko,40 +ofuton zeb,40 +odajima kosuzu,40 +oda (101511a),40 +ochanomizu mirie,40 +observatory,40 +nyuu (pixiv12143565),40 +nyasa,40 +nurse angel ririka sos,40 +noroiko,40 +nono i831,40 +noko morokoshi,40 +noki,40 +noel anderson,40 +noda megumi,40 +no goggles,40 +nnoitra gilga,40 +nitta yasunari,40 +nishimo,40 +ninonuko,40 +nina antalk,40 +nikaidou shion,40 +nihnfinite8,40 +nigouu,40 +nerukichikatafukuma,40 +neon genesis evangelion: iron maiden,40 +nene romanova,40 +nekomiyanono,40 +nekomia (kaptivate),40 +nekomata okayu (cosplay),40 +nekkikamille,40 +negura meru,40 +neetsr,40 +nazoko,40 +nautica (transformers),40 +natsumoka,40 +natsume (tsu-na),40 +nasubi (1ra1ri1ko2cho1mi2na),40 +nasu kinoko,40 +naoharu (re barna),40 +naoel (naoel art),40 +nankaichimu,40 +nanjou kei,40 +nanika (nnkgh),40 +nancy1209,40 +nanairo fuusen,40 +namuya,40 +nami (cassette),40 +nakamura aika,40 +naitou maia,40 +nagase sayaka,40 +nabeshiki (rakuneko yashiki),40 +naavs,40 +na-code (gurich),40 +mysteryctu,40 +mylene rafa holfort,40 +myaco9,40 +muto (uadocjon 21),40 +murasame reine,40 +mumen rider,40 +mulin,40 +mugishima,40 +mri,40 +mozuyun,40 +mothra (godzilla: king of the monsters),40 +mortal kombat 9,40 +morpho knight,40 +moriyama yuuki,40 +moriya,40 +morimaiko,40 +moriko06,40 +mori girl,40 +moonyan,40 +moon (yfsp7823),40 +monster world iv,40 +monotarou (danganronpa),40 +monkey d. garp,40 +mondo pop,40 +momoyuki (snow fox),40 +momo (last origin),40 +mokyusuke,40 +mogupon,40 +moekan,40 +moedog,40 +mochako (motyako),40 +mizonaki,40 +miyoshi saya,40 +miyazaki-san (mmyyzk),40 +miyano akihiro,40 +miyama tsubaki me,40 +miurin,40 +mitsuya takashi,40 +mitsunari miyako,40 +mirai millennium,40 +miogrobin,40 +mio sasuga,40 +mintchoco (mmn2),40 +mint (cerbi),40 +minna no rhythm tengoku,40 +minn (kangjm1107),40 +minma,40 +mineco000,40 +minashiro tsubaki,40 +minase nagisa,40 +minami hana (ghettoyouth),40 +mimura ryou,40 +mimiko (jujutsu kaisen),40 +millennium rod,40 +milk ko,40 +milephunter,40 +mikura0317,40 +mikuni saho,40 +mikoto (my-otome),40 +mikoshiba seijuurou,40 +miki-san (danna ga),40 +mikanururu,40 +mii gunner (smash 4),40 +mihono bourbon (ghosty and the magic of halloween) (umamusume),40 +mihan77108047,40 +mihaeru,40 +mia (miaflocon),40 +mgong520,40 +metarogu,40 +metallica,40 +met-tha,40 +mermaid misty (pokemon),40 +merlin (camelot & co) (fate),40 +merchant91,40 +meltymaple,40 +melty sweet (idolmaster),40 +meltryllis (second ascension) (fate),40 +mega sableye,40 +meemo,40 +mechanic (granblue fantasy),40 +mecha hisui,40 +mazeshi,40 +mayreel (guardian tales),40 +mayakaek,40 +max (ssss.gridman),40 +matterhorn (beach guard) (arknights),40 +matt frank,40 +matsuppoi,40 +matatabeat,40 +masa (p-piyo),40 +maryland (azur lane),40 +marron glace,40 +marrbl,40 +marisuku,40 +marie antoinette (formal dress) (fate),40 +marche mk14,40 +manzai sugar,40 +maniani,40 +manami tatsuya,40 +mamotte shugogetten!,40 +mameneko (leos vincent),40 +mallow (mario),40 +male protagonist (live a hero),40 +makaizou,40 +maka neko,40 +mahou arms,40 +mage (335656),40 +magallan (shaved-ice memories) (arknights),40 +mag (phantasy star),40 +maechuu,40 +madopen,40 +madam s,40 +ma2da,40 +m-shiganai,40 +lyre (fire emblem),40 +lulu season,40 +luki,40 +lucia (toaru majutsu no index),40 +lucent heart,40 +ltlx 7000 (girls' frontline),40 +lowro (en),40 +lounge,40 +lothric (younger prince),40 +lostdog121,40 +long fall boots,40 +loli ruri,40 +loincloth aside,40 +loggins (jojo),40 +lo-ta,40 +live for venus (idolmaster),40 +lionela heinze,40 +lineflo,40 +linca (atelier),40 +leris muma,40 +leonart,40 +leon (idolmaster),40 +leoleonardk10,40 +leo thasario,40 +leo (feeling goooood),40 +lent marslink,40 +lennah,40 +leki ixion,40 +leikangmin,40 +leicia,40 +leap frog,40 +leaf bra,40 +lcddem,40 +lazgear,40 +lanlanlancho,40 +lace ribbon,40 +kyrie,40 +kyer,40 +kv-1,40 +kuzumochi (kuzumochiya),40 +kuutei senki,40 +kushinaka,40 +kurotora865 90,40 +kurosu rino,40 +kurosawa sae,40 +kurosawa noriko,40 +kuro (blackpgmickey),40 +kuonji miyu,40 +kuon (hasumi (hasubatake39)),40 +kunoichi (game),40 +kumonosuke,40 +kumon kaito,40 +kumaane,40 +kukuruyo,40 +kujou ume,40 +kujiin mika,40 +kuchiki manabu,40 +kuberu e pastillage,40 +krampus (housamo),40 +kozakura ryou,40 +koyuki (snow fox),40 +kouzuki fukurou,40 +kougami kanon,40 +kougai,40 +kotomaru (sirouko9511),40 +kooei,40 +konoe sunao,40 +konna-nani,40 +komai hasuki,40 +koma (qqq111),40 +koinumaru-san,40 +koi de wa naku,40 +kogane mushi,40 +knifehead,40 +kneeling girl (kancolle),40 +knee boobs,40 +kivo some 18 (voice actor),40 +kitsuta,40 +kitsunekotori,40 +kitsunegasaki tametsugu (tenka hyakken),40 +kitsun8,40 +kitakami (kancolle) (cosplay),40 +kiseno,40 +kisaragi gold star,40 +kiriya erika,40 +kirishima sakura,40 +kirishima mana,40 +kirishima (kancolle) (cosplay),40 +kirin (kemono friends),40 +kirihara youichi,40 +kirihara torajyuro tatsumune,40 +kirara ookami,40 +kira miki,40 +kinosuke (pattaba),40 +kinoko (benitengudake),40 +kino hazuki,40 +kiniro (mushihime-sama),40 +kinezono rio,40 +kimopoleis,40 +kimakkun,40 +kika,40 +kiiro (cocoa080),40 +kii (theory),40 +kibazoku,40 +kevin graham,40 +kento matsuura,40 +kendo (artist),40 +keienu0,40 +kazunoko (saria001),40 +kayanuma kiko,40 +kayama kouji,40 +kawaiipony2,40 +kawaii voltage wattson,40 +kawai miruku,40 +katsuragimay18h,40 +katsuragi (azur lane),40 +katekari yuusuke,40 +kashisu mint,40 +karya,40 +karl gerat,40 +kari (artist),40 +kapool,40 +kanzaki souma,40 +kankurou (naruto),40 +kanae funwa,40 +kana (maple926),40 +kameron,40 +kamen rider skull,40 +kamakura (city),40 +kakeyu,40 +kajiri kamui kagura,40 +kai (pixiv12466647),40 +kagura (oneechanbara),40 +kagura (gintama) (cosplay),40 +kago-tan,40 +kagerou (kancolle) (cosplay),40 +kaette kita ultraman,40 +kacchuu,40 +k.k. slider (animal crossing),40 +k'lyn,40 +juubako,40 +jtr,40 +josef axner,40 +jon eve,40 +jojiart,40 +joanna (persona 5),40 +jinrai (owl12),40 +jinguu yakumo,40 +jigoku inu,40 +jichi,40 +jiaoshouwen,40 +jian jing,40 +jeff17,40 +jamaica (azur lane),40 +jaimito,40 +jackary,40 +jack (jackdou),40 +izuna masaru,40 +izumo akatsuki,40 +izumi mitsuki,40 +izumi (kisshot1126),40 +izaac,40 +iwashi 111,40 +itsuwa (continue),40 +itou shizuka,40 +itou (itsuko),40 +isuzu yuri,40 +israfil (housamo),40 +isami don,40 +iron fence,40 +iotower,40 +inte (whitewolftail),40 +insect on head,40 +inraku no ketsuzoku,40 +inoue yukihiro,40 +inflatable flamingo,40 +indonesian high school uniform,40 +inazuma (kancolle) (cosplay),40 +inana umi,40 +impossible pants,40 +ikkaisai,40 +ikegami noroshi,40 +ikari shinji raising project,40 +ikanomaru,40 +idoko,40 +ichino (ichinon),40 +ichikawa kazuhide,40 +ian samson,40 +i reibun,40 +i-beam,40 +hyuu (sing-dog),40 +huyusaki taiga,40 +hutaba123,40 +hull shoes,40 +huleito,40 +huizhiyin,40 +huangjin shizi,40 +hot tub,40 +hot rod (transformers),40 +hosoi mieko,40 +hosimo,40 +hoshikuzu (milkyway792),40 +hoshigaki (kyuukp),40 +hoshi no girls odyssey,40 +horoscope,40 +horn bell,40 +horiguchi hiroshi,40 +honey lemon,40 +honda takashi (enorea),40 +homura shinji,40 +hole in sock,40 +holding ruler,40 +holding pokedex,40 +holding hair brush,40 +holding gourd,40 +holding cd,40 +holding boxcutter,40 +holding bow (music),40 +hitbox,40 +hiseki erio,40 +hisato ar,40 +hirume,40 +hirayama eiji,40 +hira (otemoto84),40 +hips in air,40 +hinomoto aoi,40 +himuro kinu,40 +himishiro,40 +himeno shikimi,40 +hiko (hiko224556),40 +hikaru (mikan0407),40 +hiiragi shou,40 +hiiragi najica,40 +hidehirou,40 +hiccup horrendous haddock iii,40 +hibird,40 +hibino nozomu,40 +hibana,40 +hibachi (object),40 +hg ni koisuru futari,40 +helena blavatsky (swimsuit archer) (second ascension) (fate),40 +heeri,40 +heath41,40 +heartsix,40 +headhunting permit (arknights),40 +hayami sena,40 +hayami hiro,40 +haun (exodinary),40 +hatsuru koto naki mirai yori,40 +hatori kumi,40 +hatomugi seika,40 +hatch (7th dragon),40 +hata no kokoro (cosplay),40 +hasshaku-sama (cosplay),40 +hassan (dq6),40 +hashikure taro,40 +haruna (citrus love i),40 +haruka na sora,40 +haruka kuromiya,40 +haruakira,40 +harry potter and the cursed child,40 +hard gay,40 +hard drive,40 +happytreefriendspikapika,40 +hanneman von essar,40 +haninozuka mitsukuni,40 +hani (udauda),40 +hane yoshiyumi,40 +hamazaki reina,40 +hallucination,40 +halloween (movie),40 +hakuduki18,40 +hajun (hey sangha),40 +hajime monji,40 +hachi (hachi sin),40 +habutae kyusetsu,40 +gym pants,40 +guy fawkes mask,40 +gumi (fwjn7284),40 +guided kiss,40 +grimsley (sygna suit) (pokemon),40 +grey pajamas,40 +grel (r6hgvu5),40 +greenteamousou,40 +graphos,40 +goliath (girls' frontline),40 +godzilla: planet of the monsters,40 +god hand,40 +gnar (league of legends),40 +glock 17,40 +gingham (amalgam),40 +gigantamax (other),40 +giant anteater (kemono friends),40 +gerbera tetra,40 +george weasley,40 +gekiteki na beefsteak,40 +geinoujin kakuzuke check,40 +geegee (granblue fantasy),40 +geckolion,40 +gask (architect 2d),40 +garyljq,40 +garderobe swimsuit,40 +gallows carradine,40 +galarian slowbro,40 +gaeun (counter:side),40 +g4265059,40 +fuuto tantei,40 +fusuma (ramunezake),40 +furyouhin (bubumelon),40 +furuya hotaru,40 +furuta nimura,40 +fuotchan,40 +full metal panic? fumoffu,40 +fukuroda takaharu,40 +fujimaki nora,40 +fuji den fujiko,40 +fue (fuef),40 +fudanshi,40 +freesia (flower),40 +forastero,40 +foil,40 +fleur (personal ami),40 +flauschtraut,40 +flatfield,40 +flareze (porforever),40 +five of spades,40 +fitness boxing,40 +fire man,40 +fig (lchijiku),40 +ffyak,40 +ferret tail,40 +farnese (berserk),40 +farfalia,40 +farah (legend of queen opala),40 +fantasy bishoujo juniku ojisan to,40 +fantastic belltier,40 +euphoria (clockup),40 +etsuo,40 +etoile,40 +ethan (sygna suit) (pokemon),40 +erusen (des-arms),40 +erubesuto,40 +eringi oishii,40 +eol 9,40 +ensemble girls (artist),40 +enjou tomoe,40 +enia (eniaart),40 +end of the golden witch,40 +elina (eri15),40 +elfir traum,40 +ekaterina orange,40 +ekaki no mime,40 +eileen (virtua fighter),40 +eighth wonder,40 +ei flow,40 +eggloaf,40 +ede,40 +ecolo (puyopuyo),40 +ecole du ciel,40 +echidna (monster girl encyclopedia),40 +ebinomi,40 +ebihurai,40 +ebba,40 +dva,40 +duke of york (prestige of the glorious formula) (azur lane),40 +ducking,40 +drunken master,40 +drenched-in-sunlight,40 +drakloak,40 +dragoon (selen tatsuki),40 +dragon empery (emblem),40 +doomfist (overwatch),40 +donkoni,40 +don't you ever stop (meme),40 +dolphin earrings,40 +dokkaebi (rainbow six siege),40 +dogmatika ecclesia the virtuous,40 +dmitry grozov,40 +divus crewel,40 +diving block,40 +dicorndl,40 +diamond-shaped brooch,40 +dexio (pokemon),40 +devil (monster girl encyclopedia),40 +derpy hooves,40 +deodorant,40 +dejiko (cosplay),40 +decal,40 +dearka elsman,40 +dconan owo,40 +darcy (pixiv11949485),40 +darah,40 +darach (pokemon),40 +danyotsuba (yuureidoushi (yuurei6214)),40 +danny phantom,40 +danny lee,40 +daniella,40 +daniel oduber,40 +dan fei,40 +daiyousei (cosplay),40 +dagon (megido72),40 +da capo iv,40 +cypher 05,40 +cyou shigen,40 +cygnus hyouga,40 +cybuster,40 +cure magical (sapphire style),40 +cupitan (granblue fantasy),40 +crystallization,40 +cross neko,40 +cross-laced gloves,40 +counting money,40 +coso-ri,40 +corgi (corgi0322),40 +condom on nipples,40 +codemofel,40 +cocoa cookie,40 +coco (artist),40 +cloudlou,40 +clonion,40 +clming,40 +clitoris peek,40 +claudia enfield,40 +claudia (granblue fantasy),40 +cirrika,40 +chun bae,40 +choke (amamarin),40 +chloe withers,40 +chinese spoon,40 +chiba lotte marines,40 +cherry stem knot,40 +cherry peachcat,40 +cheer-chan (tawawa),40 +checkered sleeves,40 +chark14,40 +chapa kari,40 +chaos (dungeon and fighter),40 +chamnaitu,40 +celine (sen no kiseki),40 +celia claire,40 +ceey,40 +cecilia glinda miles,40 +cecile (porforever),40 +cavalier hat,40 +castlevania: legacy of darkness,40 +casteliacone,40 +carcinization,40 +captainkuma,40 +candy jar,40 +camouflage helmet,40 +calligraphy pen (medium),40 +cal devens,40 +cake (isiofb),40 +caimu,40 +cailin020,40 +cagliostro (summer) (granblue fantasy),40 +byakudan kagome,40 +butterfly on shoulder,40 +burent,40 +bureikubureido,40 +bungee jumping,40 +bullet in mouth,40 +bug catcher (pokemon),40 +bubuki buranki,40 +bubble wrap,40 +broly culo (meme),40 +broken fence,40 +bottle cap challenge,40 +boston crab,40 +blue tears (infinite stratos),40 +blossom (gizen'yasan),40 +bloody roar,40 +bloody moon,40 +blood on teeth,40 +black panther (marvel),40 +black garter,40 +biskekun,40 +birch,40 +bico (bicoris),40 +biafura,40 +bethlehem (alchemy stars),40 +beriko (dotera house),40 +bellelba (pokemon),40 +bearded seal (kemono friends),40 +battle koala,40 +barseisui,40 +baron (varon666),40 +barnaby brooks jr. (cosplay),40 +barioth,40 +barbary lion (kemono friends),40 +bambi-25,40 +balus,40 +bakunetsu god finger,40 +b d one,40 +b-29 superfortress,40 +azuri909,40 +azarashi (snmfloowern),40 +ayase non,40 +ayase miko,40 +axanael,40 +auxtasy,40 +automatic door,40 +au7,40 +ato (bfj315),40 +atai,40 +asunogear,40 +assassin's creed iv: black flag,40 +ashida yuri,40 +asai (00:05),40 +asagiri ai,40 +artin (boogbogex),40 +arquebus,40 +arowana2111,40 +arobiro,40 +arnaud g. vasquez,40 +arizuki shiina,40 +arisa gunhale,40 +arinsu (kodamamaimai),40 +arikawa rui,40 +aria (seiken no blacksmith),40 +arcana trust,40 +arcade ahri,40 +araki mitsuru,40 +ar-57,40 +apron grab,40 +appare! tenka gomen,40 +aotsu umihito,40 +aoto (ar tonelico),40 +aono 99,40 +aoi karin,40 +anocurry,40 +annyui,40 +anna mcbein,40 +anna graem,40 +angel wish,40 +an-chan (ananna0315),40 +amiya (planter) (arknights),40 +amedan,40 +amazaki ria,40 +amatsuka hotaru,40 +alternate neckwear,40 +alouette (la pucelle),40 +alloyrabbit,40 +allos,40 +all nippon airways,40 +alien alien (vocaloid),40 +alfin reise arnor,40 +akuma de oshioki!,40 +aks-74,40 +akovo,40 +akou (phoenix777),40 +akirame,40 +akipeko,40 +akari ga yatte kitazo (vocaloid),40 +akari (bokujou monogatari),40 +akakokko,40 +akachouchin,40 +akabino,40 +aizawa asahi (unbalance),40 +aiu404l,40 +airi (alice or alice),40 +airbrush,40 +aikawa kizuna,40 +aikawa jun,40 +ahn dongshik,40 +ahirun,40 +ags-30 (girls' frontline),40 +aglovale (granblue fantasy),40 +agate crosner,40 +adworse,40 +admiral graf spee (a novel anniversary) (azur lane),40 +adeptus mechanicus,40 +acid,40 +acceptor,40 +abe no seimei (fate),40 +abara hanbee,40 +aasu kirishita,40 +a re,40 +a kirima,40 +a821,40 +9wa,40 +45 (diagonal45angle),40 +3ldk,40 +2003 server,40 +1z10,40 +12 (xaenedju),40 +1202 koge,40 +zuu (qq770463651),39 +zuru,39 +zumi6,39 +zocehuy,39 +zless,39 +zest (lossol),39 +zenya,39 +yzk knmn,39 +yuuyami no mikazuki,39 +yus,39 +yurikawa midori,39 +yuri lowell (true knight),39 +yunoji yusuke,39 +yumi (tuzisaka),39 +yukishiro akira,39 +yukino (yukinosora1126),39 +yukinimaru,39 +yuki shin,39 +yuki hikari,39 +yugiri mistwalker,39 +yue (qtxyjiang),39 +yu (8dgc4mfc),39 +yoshinari atsushi,39 +yoshii yumi,39 +yoshihara motoki,39 +yoshida yuuko (machikado mazoku) (cosplay),39 +yoshida nishi,39 +yoru (0 0yoru),39 +yorozu,39 +yoroi kabuto,39 +yoongonji,39 +yomo,39 +yomare,39 +yokubari saboten,39 +yokomine ibuki,39 +yesod (project moon),39 +yatsunagi (oyasumi sumika),39 +yatonokami kanata,39 +yat anshin uchuu ryokou,39 +yasaidon,39 +yao (pixiv9482697),39 +yangyieva,39 +yan ge,39 +yamoto,39 +yamiyo ni odore,39 +yami nabe,39 +yamauta,39 +yamanoskk,39 +yamanaka koyomi,39 +yamanaka kotetsu,39 +yamamoto canponi,39 +yamagami mozuku,39 +yamae saki,39 +yakin byoutou san,39 +yakin byoutou ni,39 +yacht,39 +yace,39 +xx asui,39 +xing xiao,39 +xiawenjie,39 +xiao shi lullaby,39 +xi zhujia de rbq,39 +wuhuo,39 +wtuw,39 +wrato,39 +wool (kurokrkr),39 +wolfgang mittermeyer,39 +wolf (fate),39 +wkdnlwoddl,39 +wizard girl ambitious,39 +wilt and blush,39 +willow park,39 +white freckles,39 +whale ornament,39 +wet tail,39 +wet lens,39 +wen jr,39 +weedhollow (dokuran),39 +was775,39 +warin,39 +war thunder,39 +wantacchi,39 +wallet chain,39 +wakaya hana,39 +wabuki (ochigan),39 +w-t,39 +vulnificus,39 +von.franken,39 +venis,39 +vato falman,39 +vanadis,39 +uva,39 +usura,39 +usanekorin,39 +usagi (786113654),39 +usaba (usabara c),39 +urshifu (rapid),39 +ungaro,39 +undeedking,39 +un (kyousougiga),39 +umemoto (konna),39 +ukeuke,39 +uhhgaoh,39 +uenomigi,39 +uchigatana,39 +type speed,39 +type 56-1 (girls' frontline),39 +two soyjaks pointing (meme),39 +two of hearts,39 +twinkle star sprites,39 +tuzaixia,39 +turing love,39 +tsukuyomi mana,39 +tsukushi (toxicdoll),39 +tsukkon,39 +tsukizaki shizuka,39 +tsukimizake,39 +tsukimiya kaede,39 +tsukimi shokudouki,39 +tsukikawa chiri,39 +tsujisaki (coa3),39 +true love story,39 +trimcolor,39 +transparent breasts pads,39 +transformers: war for cybertron trilogy,39 +tqg 07,39 +toyohama nodoka,39 +towada-san (thank39),39 +touya kotonari,39 +toushin toshi iii,39 +touma saya,39 +toto (caaaaarrot),39 +toshiue lesson,39 +toroshio,39 +toriko no hime,39 +toranpuman,39 +torakichi37,39 +too many weapons,39 +tonight at the ligne droite (umamusume),39 +tokikago yuri,39 +tohru (dragon) (maidragon),39 +todoriki rin,39 +tmtkn1,39 +tmt,39 +tktk-tnk,39 +tira (elfenlied22),39 +tiguruvurumudo vuorun,39 +tightrope,39 +tigern (tigern28502735),39 +tiger hat,39 +tierla,39 +ticonderoga (azur lane),39 +thwwshark,39 +thundurus (incarnate),39 +thumbs in pockets,39 +thief (fft),39 +the unlimited: hyoubu kyousuke,39 +the last guardian,39 +the lamb (cult of the lamb),39 +the iconoclasts,39 +the gate (fma),39 +thank you friends!!,39 +tewo (tewowet),39 +tetsuyo,39 +tetori (tetolisto),39 +terry (dq6),39 +teriton,39 +tera l,39 +tequila marjoram,39 +tenpura noraneko,39 +ten cws,39 +tekin,39 +teio (teiotei),39 +teardrop earring,39 +tavor (m tavor),39 +taurus aldebaran,39 +tatsumi (sekizu),39 +tasu tasuta,39 +tanarot,39 +tanan,39 +tamtamdi,39 +tamaya,39 +tamago sando,39 +takumi mizuki,39 +takashiro hiroko,39 +takao (88499191),39 +takae (poupee en biscuit),39 +takacchi,39 +taka (hiroa),39 +tail slapping,39 +tail pillow,39 +tagawa katsuya,39 +tachibana hinata (fabiniku),39 +tabasa,39 +syuya,39 +syukonbu,39 +sylveon (cosplay),39 +syake (wadanohara),39 +sword world 2.0,39 +suzurikawa sasha,39 +suzuki kokono,39 +sutaku77224,39 +suppy,39 +suou mikoto (k),39 +sunohara nana,39 +sunkilow,39 +suicide bomb,39 +sugino miyo,39 +sugihara manami,39 +suehiro anna,39 +subaru (choukou sennin haruka),39 +suaynnai wanzi,39 +striped arm warmers,39 +stella (black rock shooter),39 +star saber (transformers),39 +standing fellatio,39 +squidward tentacles,39 +spoken panties,39 +spock,39 +soyokaze,39 +sowitchraw (cellphiena),39 +souhi,39 +soresaki,39 +sora no kanata no dystopia,39 +sora (aki00),39 +sophiaenju,39 +sophia (punishing: gray raven),39 +sony kisaragi,39 +sonia (special costume) (pokemon),39 +songjiangcc,39 +snowflake necklace,39 +smash daisaku,39 +slamming door,39 +skyt2,39 +skkc 128,39 +skinnytorch,39 +skeptical,39 +six neon,39 +siroringo,39 +simon (kappa no kawanagare),39 +simisage,39 +silvercandy gum,39 +silanduqiaocui,39 +sikinose val,39 +sigrunen,39 +sidorov,39 +shutou suzu,39 +shuruken (yuumo),39 +shukei,39 +shogun 1 (sekaiju),39 +shirorenge (huruhuru),39 +shironekonokiki,39 +shiro (mofuaki),39 +shirikon,39 +shiren (fuurai no shiren),39 +shippo (shishizaru),39 +shinonome satsuki,39 +shino skk,39 +shinmon megumi,39 +shinjou sadagiri,39 +shinchou ni kansuru kousatsu,39 +shin (shin k009),39 +shimogami ataru,39 +shimizu eiichi,39 +shiki haru,39 +shikai yue,39 +shiiba tsumugi,39 +shido mel,39 +shibuzoh,39 +sheryama,39 +sherman firefly,39 +shellos (west),39 +she li (lynxm),39 +shamare (echo of the horrorlair) (arknights),39 +shakkiyi,39 +sh-60 seahawk,39 +setsuna (inuyasha),39 +set (mythology),39 +serene (gusarme),39 +sephiroth (cosplay),39 +senkou high school uniform,39 +senifu,39 +senhime,39 +sendai (kancolle) (cosplay),39 +sen light,39 +selina kyle,39 +sekai (cevio),39 +sei 8220,39 +sei (bakuretsu tenshi),39 +sebas (isekai shinige ojousama),39 +scratching ass,39 +scott bennett,39 +schrodinger,39 +schoolgirls love tentacles,39 +saya6382,39 +sawara (starligtvision),39 +sawanoguchi sae,39 +sawamin,39 +savuxan,39 +satsuki suzuran,39 +satou satomi,39 +satou masaki,39 +satolive20,39 +saseum (kao husband),39 +sasakura ayato,39 +sarika,39 +sarasara shoyu,39 +saran,39 +sanada jp,39 +san zhi chun,39 +same ningen,39 +salad bowl,39 +sakurasakimasu4,39 +sakurai (kage),39 +sakuragi tooru,39 +sakura kiyomi,39 +sakiika0513,39 +sakenotorii,39 +sakamoto days,39 +sajiwa (namisippo),39 +sagano mito,39 +sadu dotharl,39 +sadatou ayano,39 +sada (sadahalu),39 +saboten pose,39 +sa-3 mozambique,39 +ryze,39 +ruriruno,39 +rureizi,39 +rubicante,39 +rtari,39 +rott ur,39 +rope (summer flowers) (arknights),39 +room you can't get out of unless you x (meme),39 +romani (zelda),39 +rolf (fire emblem),39 +roku kyuu,39 +roku-jou,39 +rocm (nkkf3785),39 +rockman xover,39 +robosaa (roboco),39 +ro96cu,39 +ririri (user rkrv7838),39 +riri (kemurikusa),39 +ringo (nanaprin),39 +riku (jonsun),39 +ribbon legwear,39 +rettou joutou (vocaloid),39 +reptilian,39 +renee,39 +renboukouji akira,39 +relief,39 +red corruption,39 +red-eyes black dragon,39 +recoome,39 +real intention,39 +rausu (undeadmachine),39 +ratel (kemono friends),39 +randal orlando,39 +rakshata chawla,39 +rae (rob ishi),39 +radishkek,39 +racoona,39 +rachael foley,39 +racaseal,39 +ra coon,39 +quiet (metal gear) (cosplay),39 +queen sectonia,39 +qtarou,39 +qbase,39 +pyuu to fuku! jaguar,39 +pyrefly,39 +puribate (helgoland),39 +psylduck,39 +psychic force,39 +psychic (7th dragon),39 +protagonist (devil survivor 2),39 +propane tank,39 +progrise key,39 +pratty,39 +practicing,39 +pps submachine gun,39 +pps-43 (girls' frontline),39 +poru (tohopunk),39 +portgas d. anne,39 +popping,39 +ponnu (tnpn2yw),39 +pokemon: jirachi: wish maker,39 +poini (king-scarlet-dead),39 +po pooch,39 +pk machine gun,39 +pizza cutter,39 +piyo piyo apron,39 +pixie-bob (boku no hero academia),39 +pinkmm,39 +pill on tongue,39 +picture book,39 +phoenix ikki,39 +phineas and ferb,39 +philippines,39 +pg (pege544),39 +peter pan,39 +person on back,39 +peppedayo ne,39 +peppe,39 +pepo (flower knight girl),39 +penis envy,39 +peng yong,39 +pekorin (precure),39 +pedestrian crossing sign,39 +paw ornament,39 +patdarux,39 +parune chigetsu,39 +panasonynet,39 +pajaman,39 +pairleaf,39 +page one (one piece),39 +padme amidala,39 +p22 (girls' frontline),39 +oz (gerbera7),39 +outer science (vocaloid),39 +oui lion,39 +ou (swdp),39 +otsuki38,39 +otou (otou san),39 +otonashi io,39 +otonashi fumiko,39 +otodama tamako,39 +ossan zabi 190,39 +oshiruko kizoku,39 +osakabe makie,39 +orrdriver,39 +ororooops,39 +orihalchon,39 +ore monogatari!!,39 +orange bloomers,39 +oozora ako,39 +ootsuka kotora,39 +ootori naru,39 +oota minoru,39 +ookura kazuya,39 +ookubo rumi,39 +ookouchi shino,39 +oogama (youkai watch),39 +oofxyphxia,39 +onna mahoutsukai (maoyuu),39 +omega na hito,39 +okuda manami,39 +okojo,39 +oklahoma (azur lane),39 +okenokoneko,39 +oka megumi,39 +ohara rinne,39 +ohagi (food),39 +oda kou,39 +nyto mercurows (girls' frontline),39 +nyoichi (ekaini),39 +nyan (themare),39 +null qq,39 +noyuki1204,39 +nora (act2),39 +noka (blackheart1118),39 +noise (suite precure),39 +noda eishi,39 +no armwear,39 +nm222,39 +nishizawa momoka,39 +nise6,39 +ninomae ichijiku,39 +nikaime,39 +nikaidou aya,39 +nijou ryuu,39 +nightmare (mazohaha),39 +night seeker 2 (sekaiju),39 +nicole watterson,39 +nezumi otoko,39 +nez-doll,39 +new orleans (azur lane),39 +nepgyaa,39 +nemou,39 +nemesis sudou,39 +nekoronbusu,39 +nekoreset13,39 +nekokyu,39 +neko zukin,39 +necromancer 2 (sekaiju),39 +necking,39 +navigator,39 +naval mine,39 +natsumegu,39 +natsuki iori,39 +natali (rune (pixiv 25170019)),39 +narakuuu,39 +narafume,39 +napoleon bonaparte (ladies & gentlemen) (fate),39 +naora yusuke,39 +nao (necomugi),39 +nanashiki,39 +nanami natsuki,39 +nanako (jujutsu kaisen),39 +nana kusun,39 +nana895,39 +namo (goodbyetears),39 +namgic,39 +nakajima akihiko,39 +najashi,39 +naitou-kun,39 +nagumo midori,39 +naginomori gakuen high school uniform,39 +nagi (fire emblem),39 +nagasode (48789563),39 +naga-no,39 +nafta,39 +n.u.n.s.,39 +myg,39 +mutsu (kitakaze berry),39 +musical touken ranbu,39 +mushiao,39 +musashi (aoki hagane no arpeggio),39 +murikajin,39 +murasaki-sin,39 +muraachi,39 +ms.assistant,39 +mouri kogoro,39 +mountaintop,39 +motto! haramase! honoo no oppai isekai ero mahou gakuen!,39 +mother brain,39 +mossberg 500,39 +morning musume,39 +moriyama a,39 +morisaki nao,39 +moriririnn,39 +moon gundam,39 +monushi,39 +monster in kamata,39 +monopollyan,39 +monono,39 +monomono,39 +monique (arknights),39 +momiji (lucario),39 +mogmogura,39 +mobius 1,39 +mm (pomeme4),39 +mk 14 ebr,39 +mizuki hau,39 +mizu no,39 +miyuara,39 +miyu ottavia,39 +miyamoto musashi (swimsuit berserker) (fate) (cosplay),39 +miyamoto (dominocube6),39 +miyabi shigure,39 +miya (zawarudo),39 +misaki (summer) (princess connect!),39 +miracle!,39 +mink (dramatical murder),39 +minibike,39 +minew,39 +minerva (p&d),39 +minazuki (0038),39 +minase yuka,39 +minamori noeru,39 +mimosa211,39 +mimimi (echonolog),39 +mikuni souichirou,39 +mihashi ren,39 +micolash host of the nightmare,39 +michudx,39 +mhunter 45,39 +metadio,39 +mesou-san,39 +merokonbu0,39 +mercelida ygvar,39 +meranoreuka (naokentak),39 +melonpan (artist),39 +meloettta,39 +melo (meromero),39 +mellchi,39 +melissa seraphy,39 +mebachi,39 +mea koenig,39 +mcyu,39 +maybe,39 +maxwell manzoku,39 +max caulfield,39 +matsuro meru,39 +matsurika (j297021302),39 +matsunami rumi,39 +matsuda juukou,39 +matou,39 +masumofu,39 +masao (ebi no osushi),39 +maruno ball,39 +marilyn monroe (cosplay),39 +marianne (unholy sanctuary),39 +maokai,39 +mao zedong,39 +mao (yuureidoushi (yuurei6214)),39 +mamoru (mamoru jinja),39 +mamo (dokidoki! precure),39 +makoto (roketto-massyumaro),39 +makimachi misao,39 +maki yahiro,39 +maki (makidai2024),39 +majorita (disgaea),39 +maid-san to boin damashii,39 +maguro (minase1684),39 +magical mirai luka,39 +magehound,39 +maestrale (azur lane),39 +mado akira,39 +maco,39 +machino (nidavellir),39 +mabui,39 +mab pa-15,39 +m3 (girls' frontline),39 +m0 chi,39 +m.a.d mafia is all dead,39 +lyric (hina9111),39 +lushuao,39 +luobo (nsnr8754),39 +lungs,39 +lucian (league of legends),39 +lovezawa,39 +lovely idol,39 +louis vuitton (brand),39 +lotter75,39 +lost passage,39 +loni dunamis,39 +lizzydom,39 +little princess (guardian tales),39 +linia pacifica,39 +lily (wanko),39 +lian (pokemon),39 +leviathan (final fantasy),39 +letty (ogami kazuki),39 +leomodesto,39 +leina ashta,39 +lei bailin,39 +le malin (azur lane) (cosplay),39 +lasso of truth,39 +lamborghini countach,39 +laffey (bunny clerk?) (azur lane),39 +lady pearl,39 +lady jewelpet,39 +kz ripo,39 +kyouraku shunsui,39 +kyounami,39 +kyoudyu,39 +kusunoki rikka,39 +kusunoki masashige,39 +kushinada (p&d),39 +kurusu piyo,39 +kururi (oekaki nikki),39 +kurokawa keita (haozz),39 +kuroiwa cookie,39 +kurirou,39 +kure (beniya),39 +kurda smahlt,39 +kunichika yuu,39 +kumo (kumo hsc0216),39 +kumo (kumo8159),39 +kumamakura kurumi,39 +kulolin,39 +kujou hyotarouo,39 +ku koro,39 +ktm,39 +kt kkz,39 +ksfactory,39 +kozzz y,39 +koyoka,39 +kouen miska,39 +kou (inaba),39 +kotona matome,39 +kotohatoko510,39 +kotobuki haruki,39 +kotetsu kiyone,39 +kote (dew),39 +korone,39 +komepan,39 +kokkoro (princess connect!) (cosplay),39 +koisuru natsu no last resort,39 +koenma,39 +kodachi nagi,39 +koaki,39 +ko->u,39 +knowa,39 +kkr rkgk,39 +kizuna encounter,39 +kitaya,39 +kishinuma yoshiki,39 +kirushi (killcy),39 +kirochef,39 +kirishima reiko,39 +kirishima kotone,39 +kira kira,39 +kinoto kanade,39 +kinosaki (green patio),39 +kinoko (shikimylove),39 +kino books,39 +kina (446964),39 +kimagure temptation,39 +killingrock,39 +killer instinct,39 +kikuchi (xpoz),39 +kijima matako,39 +kibimoka,39 +kiara (kenshin187),39 +kh (tanakananataka),39 +keyfanjun,39 +kent0320,39 +kemono fabric,39 +kemeko (kemeko deluxe),39 +kekyo,39 +keisuke desu~,39 +kei (limitedgirl),39 +keemoringo,39 +kebaboishii,39 +kazue kato,39 +kazehana,39 +kayoko (new year) (blue archive),39 +kayaku (banban),39 +kawamatsu yoshinori,39 +katsu ryouji,39 +kato (kato),39 +kasumi (summer) (princess connect!),39 +kasuga nozomi,39 +karomura,39 +karasu kame ex,39 +kanna ryouto,39 +kankitsu (94rz),39 +kankara nashi,39 +kanii rate,39 +kanemaru (knmr fd),39 +kandagawa jet girls,39 +kanda saki,39 +kanai23831347,39 +kamiya ogawa,39 +kamimon,39 +kamikaze (kancolle) (cosplay),39 +kamikawa tamaki,39 +kamen rider zx,39 +kamen rider super-1 (series),39 +kamada issei,39 +kalua,39 +kakubayashi tsuyoshi,39 +kagami leo,39 +kaga yuuki,39 +kadou,39 +kadota hiromi,39 +kaburaya,39 +kaaya,39 +k young03,39 +k@non,39 +k19,39 +k-dam,39 +juri (blue archive),39 +junyou (azur lane),39 +junexp,39 +juju coo shnamur,39 +judy hopps (cosplay),39 +ju (old505),39 +joze,39 +jongmin,39 +jizaikagi,39 +jishimaru,39 +jell (jell y fish),39 +jeetdoh,39 +jeanne d'arc (swimsuit archer) (fate) (cosplay),39 +jeanne d'arc,39 +jdori,39 +jayun,39 +jandy,39 +jadeite (sailor moon),39 +ivuki,39 +itunes card,39 +ittan momen,39 +itou shiori,39 +itou kanae (amagami),39 +itachi (3dt),39 +isshiki ichika (murakami suigun),39 +isekai cheat magician,39 +irue,39 +irei yukitoshi,39 +iovebly,39 +inumimi moeta,39 +inui seishu,39 +inugami kotarou,39 +ingrid sorveig sorgrims,39 +ingerdoll,39 +infinity sword (elsword),39 +ine (ineinnen),39 +indian wolf (kemono friends),39 +ille (xcpa7325),39 +ili (dream c club),39 +iku! iku!!,39 +ikki (tue sein bestes),39 +iiru,39 +iida keiko,39 +ignis (blazblue),39 +igawa sakura (cosplay),39 +idoukunn,39 +idolmaster side-m live on stage!,39 +ichitaka (1015ichitaka),39 +ich.,39 +hypnos (hades),39 +hype beast crypto,39 +hyejin (black survival),39 +hunting era,39 +hunter (left 4 dead),39 +hunter.g,39 +hungarian flag,39 +hugo (suikoden iii),39 +hrk173,39 +houzuki michiru,39 +hotaru (htol#niq),39 +hosoime,39 +hoshizaki akari,39 +hoshino kirara,39 +hoshino darts,39 +hoshimura makina,39 +hoshikoi tinkle,39 +hoshiful,39 +hoono yurumu,39 +honba misaki,39 +hollow song of birds,39 +hk416 (fang) (girls' frontline),39 +hitokuchii,39 +hishizaki shaia (gowcaizer),39 +hisame (nekousatan),39 +hirose mariko,39 +hirasawa geko,39 +hinatabokko,39 +himuro kirie,39 +himitsu ~kuro no chikai~ (vocaloid),39 +himemiya alice,39 +hime-chan no ribbon,39 +himawari!,39 +high chair,39 +higanbana (flower knight girl),39 +hibaneim,39 +hi-yo,39 +hewie,39 +helpig,39 +hello kitty to issho!,39 +heca,39 +heatmor,39 +heart ahoge duo,39 +hayashi akemi,39 +hawkeye girl (mechanical buddy universe),39 +hatsuyukisou (flower knight girl),39 +hatimiz,39 +hat over hat,39 +hassystants,39 +harushino,39 +harunohotaru,39 +harunoha,39 +harukabo,39 +haru (arser doil),39 +harken browning,39 +harada shoutarou,39 +hanazawa yusaku,39 +hananokouji kurara,39 +hanahubuki1991,39 +hamllock,39 +hamita (rikopin ika),39 +hameln (sinoalice),39 +hamagurihime,39 +haki,39 +hakata-san,39 +haiba lev,39 +hagiya kaoru,39 +hachinan tte sore wa nai deshou!,39 +h haluhalu415,39 +h@ruichi,39 +gyuu kaku (gyu400),39 +gurochii,39 +gunzan,39 +gunbai,39 +guild sweetheart,39 +gruier serenity,39 +groot,39 +greil (cosplay),39 +grandia online,39 +graf zeppelin (kancolle) (cosplay),39 +gozaru,39 +gotou (pixiv37128),39 +goshenite (houseki no kuni),39 +goldfishu,39 +goggles around arm,39 +glasses case,39 +gk,39 +gino knab,39 +gingerbullet,39 +gingerbread cookie,39 +gigantamax cinderace,39 +giga drill break,39 +geworin,39 +geumgang (odd snail),39 +genki dama,39 +genk,39 +general dynamics lwmmg,39 +geeto gaadian,39 +garma zabi,39 +gamora,39 +galil (girls' frontline),39 +galacta knight,39 +gaku ou,39 +gadget trial,39 +gabao.,39 +g.yamamoto,39 +fuyuri (tibirobo),39 +fuurin,39 +furisode girl kali,39 +fune (fune93ojj),39 +fukuyama jeanne sachie,39 +fujita saki,39 +fujita ayano (40hara),39 +fue (lars0713),39 +fudama,39 +fu (tk1189227dhy),39 +freideugoi,39 +freefall,39 +fred weasley,39 +fotia of fireside,39 +fortythree,39 +former capital,39 +forge,39 +foot on breast,39 +food on penis,39 +fomnant,39 +folkssoul,39 +flat escardos,39 +flame toys,39 +fishofthelakes,39 +first-chan (loalo),39 +fiore forvedge yggdmillennia,39 +findoworld,39 +fgz,39 +ferroseed,39 +felicita,39 +feather skirt,39 +favaro leone,39 +fan ju,39 +f 1chan,39 +ezusuke,39 +exusiai (wild operation) (arknights),39 +excalibur (soul eater),39 +eve valerne,39 +eva200499,39 +european water princess,39 +etesumsom,39 +essex (craft fairytail) (azur lane),39 +eris jerand,39 +enokorogusa (flower knight girl),39 +enoki 3106,39 +energon,39 +endou michiko,39 +endivinity,39 +en (shisui no utage),39 +emu (marico w),39 +empire state building,39 +emily (overwatch),39 +emily (last origin),39 +elsynien,39 +elise schwarzer,39 +elie (rave),39 +elephant hat,39 +eldridge (kitty idol) (azur lane),39 +eldelita (rakurakutei ramen),39 +elastigirl,39 +eddie (mega man),39 +edasaki banri,39 +e-liter 4k (splatoon),39 +dynamite-kit,39 +duca degli abruzzi (lustrous onyx sirenetta) (azur lane),39 +dragon ryuuhou,39 +dragon age: origins,39 +dr. daji,39 +dosanko,39 +dorachan r,39 +dodge,39 +doctor octopus,39 +dm (nguyen dm95),39 +dkxlek,39 +diz (diznaoto),39 +dixie clemets,39 +dido (magicc),39 +di qi gang guang,39 +devil gundam,39 +device,39 +denji (mugitomato),39 +denim shirt,39 +demstouts,39 +demon hunter (warcraft),39 +deep current wattson,39 +decoration (idolmaster),39 +deba,39 +dead (inhabituels),39 +dc9spot,39 +darius iii (fate),39 +dankestofdans,39 +daniella (sennen sensou aigis),39 +daniel deves,39 +dainyuu (dgls),39 +dain (bishop m),39 +daigaku jitome,39 +daidouji tomoyo (cosplay),39 +daidailong,39 +cyril (fire emblem),39 +cynthia marguerite,39 +cynd,39 +cyclonus,39 +cyborg (dc),39 +cyber elves,39 +curling stone,39 +cure peach (cosplay),39 +cube (cube00),39 +crystal shard,39 +crucible (doom),39 +crrispy shark,39 +crowanon,39 +crossco,39 +creamsea,39 +cream starter (stand),39 +courage the cowardly dog,39 +cotton boll,39 +cork gun,39 +conqueror of the eternals,39 +concertina,39 +cokata,39 +cocura,39 +cochlea1313,39 +clive winslett,39 +clinic,39 +cliff fittir,39 +cleaned,39 +classic squiffer (splatoon),39 +cindy campbell,39 +chupimaro,39 +chuntarou (kimetsu no yaiba),39 +chumugi,39 +chu,39 +chrono clock,39 +chrom (spring) (fire emblem),39 +chrisanother,39 +chorogy,39 +chocomarybadend,39 +chiyo (no3baki),39 +chiyaru,39 +chivo (kalchivo),39 +chinchilla girl,39 +chikku (k2753),39 +chiigo,39 +chiem,39 +chie (lcddem),39 +chibimi,39 +chi meng (hua jianghu zhi bei mo ting),39 +cherry girls,39 +cheria barnes (innocent maiden),39 +checking watch,39 +checkered ceiling,39 +charlotte (madoka magica) (cosplay),39 +character counter request,39 +char's deleted affair,39 +changpan hutao,39 +chained tan,39 +castlevania iii: dracula's curse,39 +castform (rainy),39 +carue,39 +carmilla (swimsuit rider) (first ascension) (fate),39 +carlos toshiki,39 +captain kirb,39 +capricorn shura,39 +cao-cao,39 +call of cthulhu,39 +cacomistle (artist),39 +butterfly affection,39 +burgundy (pokemon),39 +bun (hiyokomame),39 +bumgae,39 +buff bard,39 +bucky (chainsaw man),39 +buchou chinke,39 +bt (shio oninko),39 +brown pajamas,39 +bronya rand,39 +broken leg,39 +bramblefix,39 +boyakki,39 +bound fingers,39 +boulder badge,39 +borg (alien nine),39 +boomina the maidroid,39 +boldore,39 +boh stick,39 +blue seed,39 +blue destiny 01,39 +blood moon akali,39 +blonde catgirl (oota yuuichi),39 +blizzomos,39 +blindfold down,39 +black tears,39 +black soccer ball,39 +black kyurem,39 +bigoru,39 +benson (azur lane),39 +beast king (sekaiju),39 +bead belt,39 +beach boy (stand),39 +bbb (fabio8552),39 +bauer (girls und panzer),39 +barnacle,39 +barbaracle,39 +banister,39 +bakedanuki (touhou),39 +babymetal,39 +baby 5,39 +baburo,39 +azer,39 +ayatudura,39 +ayakashi onmyouroku,39 +ayakashi kyoushuutan,39 +ayakashi h,39 +awa (rosemarygarden),39 +august (coyote ragtime show),39 +atsuyu,39 +atlanta (kancolle) (cosplay),39 +asymmetrical eyewear,39 +asutoro (s--t),39 +asumi sena,39 +astrid (fire emblem),39 +asano akira,39 +asagizuisen,39 +asagi toshikazu,39 +artemis (p&d),39 +armored shoes,39 +arisu reiji,39 +ariane (gaikotsu kishi-sama tadaima isekai e o dekake-chuu),39 +aren (fubuki-46),39 +arcana,39 +arcade miss fortune,39 +arc de triomphe,39 +arai 12,39 +ar (3779609928),39 +aqua (popogori),39 +aplerichi,39 +aplche,39 +aonuma neiru,39 +aomi one,39 +antenna (draconian),39 +anneliese,39 +animankan,39 +angelmaster,39 +amusphere,39 +amou yuu,39 +amisaki ryouko,39 +amiami (company),39 +amezuku,39 +ameya,39 +ameru.miro,39 +amaoto,39 +amamiya tsubaki,39 +alti,39 +altessa (futagohime),39 +alstede brand,39 +alpha omega,39 +alita: battle angel,39 +alicia charlotte,39 +alicetaria february,39 +alger wilson,39 +alexander (fma),39 +akira ray,39 +akira (natodaisuki58),39 +akebisousaku,39 +akatuki taku,39 +akatsubon,39 +akashic chronicle,39 +akagi (deep crimson poppy) (azur lane),39 +aka (440626),39 +ak 5,39 +aira blanc neige galdinius,39 +aiovia,39 +aiha-deko,39 +agent 47,39 +ageha (ray-k),39 +ag+ (atelieriji),39 +adjusting sleeves,39 +adette kistler,39 +ada vessalius,39 +acorn hair ornament,39 +acca 13-ku kansatsu-ka,39 +a (shiei no sona-nyl),39 +a (kyousougiga),39 +a'gen (the legend of luoxiaohei),39 +88 taho,39 +80isiiii,39 +8-foot joe,39 +70 oku no bun no 1,39 +5plus,39 +1hao (@cerbero64),39 +13cm,39 +1340smile,39 +0:00,39 ++tic nee-san,39 +zun hat,38 +zuikaku (girls und panzer),38 +zoku owarimonogatari,38 +zixia (msl),38 +zhao tianyou,38 +zhao (pixiv12947327),38 +zero one driver,38 +zero (katana zero),38 +zenkai magine,38 +zawa (onomatopoeia),38 +zangya,38 +zaki (2872849),38 +zabaniyya (halloween) (housamo),38 +yustinos,38 +yuria (hokuto no ken),38 +yuni (via junk),38 +yumizuru eleanora,38 +yumeno koto,38 +yulha 06,38 +yukumo (armor),38 +yukki bunny,38 +yuhji,38 +yuge mugito,38 +yubi yubi (inugami korone),38 +yu iseol (return of the mount hua sect),38 +yotubeya,38 +yottur,38 +yostuba0704,38 +yoshitani (aminosan),38 +yorurumo,38 +yomoya oc10,38 +yomitrooper,38 +yokoi rego,38 +yokaze (xxxdisxxx),38 +yoimon,38 +yohsoro,38 +yma,38 +yinqi,38 +yellow bandeau,38 +yaxiya,38 +yatonokami nayuta,38 +yarareimu,38 +yanda,38 +yami ga fukami,38 +yamashita majime,38 +yamamura hiroki,38 +yakuta tetsuya,38 +yakimochi stream,38 +yakan (madoromio),38 +yaginuma io,38 +yaebane,38 +xx momomo xx,38 +xtango,38 +xpmc,38 +xingqiu (aoi no okina) (genshin impact),38 +xbox series x,38 +xanadu avici,38 +x-blades,38 +wuziky00,38 +wusie2,38 +wriggle nightbug (cosplay),38 +wjsn,38 +wing piercing,38 +whoareuu,38 +weiss winterprison,38 +watarumi,38 +watagashikn,38 +waruwarutsu,38 +war counselor iji,38 +wanyuwa,38 +wall crash,38 +wakanu,38 +wakana (nagomoo),38 +wakame mi,38 +w.q.y,38 +vusc,38 +vistahero,38 +vintem,38 +victoria (damegane),38 +vicar amelia,38 +vibncent,38 +vf-27,38 +verniy (kancolle) (cosplay),38 +vermouth (meitantei conan),38 +vergil (devil may cry) (cosplay),38 +venus chain (sailor moon),38 +venomania kou no kyouki (vocaloid),38 +venera-sama (cosplay),38 +vanilla the rabbit,38 +vanica zogratis,38 +vacuum tube,38 +uzuki mei,38 +uzuki eri,38 +uwded 207,38 +uwazumi,38 +usukawa,38 +ushiwakamaru (fate) (cosplay),38 +urgot,38 +ura musi,38 +unohana no sakuyahime,38 +uno uzume,38 +uno usaya,38 +uni (uni-strain),38 +uneune,38 +undyne the undying,38 +undone ascot,38 +umesato yukino,38 +umeno shii,38 +umaro,38 +ultraman orb (series),38 +ubwmitekure,38 +two-legged horse (kanji),38 +twitch (league of legends),38 +tulip (idolmaster),38 +tufang,38 +tsyn,38 +tsunemoku,38 +tsunagi first middle school uniform,38 +tsumugine rei,38 +tsumi to batsu (vocaloid),38 +tsukisaka sayu,38 +tsukiringo,38 +tsukimizu (ordeal),38 +tsukasaki aoi,38 +tsuji shinnosuke,38 +tsuji aya,38 +tsuchimikado maika,38 +tsubaki-sama wa sakihokore nai,38 +trouble witches,38 +tristana (girls und panzer),38 +traver009,38 +traptrix rafflesia,38 +transparent horns,38 +towa1,38 +toukashi (2tou9),38 +torikai hazuki,38 +torazou,38 +too many dogs,38 +tono munekage,38 +tonki,38 +tomoe gozen (swimsuit saber) (second ascension) (fate),38 +tomiya (tomiya2117),38 +tokiaki,38 +tirtouga,38 +tiger panties,38 +tianyu jifeng,38 +thrown,38 +the world (tarot),38 +the cecile,38 +the0neulost,38 +thaumana,38 +thai clothes,38 +tenor saxophone,38 +ten of hearts,38 +tempuru,38 +telepathic sex,38 +tazukichi,38 +tatsumi (akame ga kill!),38 +tatiana wisla,38 +tatiana (fire emblem),38 +tank truck,38 +tang-du,38 +tanaka nunu,38 +tamara,38 +tamano hinagiku,38 +tamago tyoko (ijen0703),38 +tales of vesperia: the first strike,38 +taku (fishdrive),38 +takapii,38 +takamaki anne (cosplay),38 +takahashi tetsuo,38 +taiyou (tori no su studio),38 +tail garter,38 +tail concerto,38 +tachibana (tach011194),38 +tachi (mtd),38 +taamo yu,38 +taachika,38 +t.r,38 +suzuna takano,38 +suzumura tomo,38 +suzukimadaka,38 +suzuhara misaki,38 +suzako,38 +sumiyao (sumiyao (amam)),38 +sukumaraku,38 +suisui (hakkindou),38 +sugoihi,38 +suemizu yuzuki,38 +stutter,38 +stunk,38 +studio khara,38 +stragus magus,38 +stone pillar,38 +starsd,38 +spika (pangya),38 +soyoghi,38 +soutsuki naru,38 +soumu (kehotank),38 +souma (soumadian),38 +sorceress sellen,38 +sonosaki kazebayashi,38 +sola (sola04),38 +snowman costume,38 +smoke ring,38 +sleepfool,38 +slan (berserk),38 +skidrow,38 +sirius black,38 +silent sakia,38 +signalkj,38 +sideswipe,38 +sialeeds falenas,38 +shuri (riri shu),38 +shuko hime,38 +shui lan er,38 +shufflebox,38 +shoukichi (shony),38 +short hair fox girl (mdf an),38 +shnider,38 +shizuku (game),38 +shishina,38 +shirosaki rin,38 +shiroimoufu,38 +shiroi ume,38 +shirasagi mayuri,38 +shiranori,38 +shirakino,38 +shiraki rika,38 +shirakamii,38 +shirakami itsuki,38 +shiou tsuyukusa,38 +shiori series,38 +shion (shinrabanshou),38 +shinsoku ikkon (idolmaster),38 +shinonome86,38 +shinomaru,38 +shino satoru,38 +shinma daigo,38 +shimizu kokeshi,38 +shimada (dmisx),38 +shima (niconico),38 +shikube,38 +shijima kiriko,38 +shiina chizuru,38 +shidoh279,38 +shibugaki matsuri,38 +shibasaki roka,38 +shake (ferit3),38 +setsuka,38 +serizawa kamo (fate),38 +seri gnsn,38 +sephikowa,38 +sensei (denki-gai),38 +sennin mode,38 +sencha (coldcolor),38 +selby,38 +seki (vtuber),38 +seirei (mimi toka),38 +seiken tsukai no world break,38 +seabook arno,38 +scissor seven,38 +sawatari ginbee haruomi,38 +satou usuzuku,38 +satou kuroon,38 +satori0121,38 +sasorichamaru,38 +sasanoha toro,38 +sasaki shounen,38 +sara scorpion,38 +sanya v. litvyak (cosplay),38 +sanada ikki,38 +san mokmok05,38 +san (mononoke hime) (cosplay),38 +sakurayume kome,38 +saki (14793221),38 +sakamina,38 +sak1023,38 +saints row,38 +sailor chibi moon (cosplay),38 +saikura noushu,38 +saiguuji sachi,38 +sagara1990,38 +s poi l,38 +ryuusei date,38 +ryuu (breath of fire i),38 +ryan domonica,38 +rune master (sekaiju),38 +ruka (princess connect!),38 +rouzille,38 +rose (dragon crisis!),38 +rope snake,38 +ronin,38 +rolled up newspaper,38 +rokumon tengai mon colle knight,38 +rojiura-cat,38 +robo-fortune,38 +robata,38 +rnkgmn,38 +rita mordio (catgirl waitress),38 +risutan,38 +ringocha (appleteatea),38 +ringoanu,38 +rick dias,38 +ribeyrolles 1918,38 +rg veda,38 +resha (cosmic break),38 +reon-shi,38 +reika (iamreika),38 +reborns gundam,38 +rebellion (ragnarok online),38 +re;lord dai san shou,38 +raven (guilty gear),38 +raven (artist),38 +rathke,38 +ranko no ane,38 +ranger (final fantasy),38 +rando seru,38 +rana (vocaloid),38 +ramp,38 +ramon (kof),38 +rakuhei,38 +rakugaki suruhito,38 +raito (latek),38 +rairaisuruyo,38 +rain (aaru),38 +rae (hexedwithluck),38 +racing miku (2018),38 +quelaag's sister,38 +queen of hearts symbol,38 +queen complex,38 +pythagora-switch,38 +purumia,38 +purple tunic,38 +puri puri prisoner,38 +pumo (kapuchiya),38 +puchi puri yucie,38 +protagonist (light and night love),38 +prinz (chainsaw man),38 +primarch,38 +prima aspallas,38 +pp: pianissimo,38 +pp-90 (girls' frontline),38 +pp-19 (girls' frontline),38 +power tool,38 +power drill,38 +porikeracchou,38 +pokefan cheng,38 +pocche-ex,38 +pm tii (matuko1024),38 +plesiosaur,38 +placenta (sidonia no kishi),38 +pirapirapirapira,38 +pink poison,38 +pink lipstick tube,38 +pieta,38 +phinx,38 +phina (fire emblem),38 +phantom thief,38 +perfect hole,38 +pencil to face,38 +pecorine (princess connect!) (cosplay),38 +papaia (quentingqoo),38 +panties under bodysuit,38 +panther boy,38 +pandy (geistbox),38 +pamiat merkuria (sweet cherry memories) (azur lane),38 +paffel,38 +padded walls,38 +pacha (pachastuff),38 +ouken,38 +otsukimi recital (vocaloid),38 +osage gankyou,38 +orkz,38 +origami yukari,38 +orie mishiro,38 +orico,38 +ophelia,38 +operating table,38 +opengear,38 +ootsuka you,38 +ootani yuri,38 +ooshiro youkou,38 +onigawara rin,38 +onegai! ranking,38 +on ball,38 +okina (805197),38 +okamoto manami,38 +okada haruna,38 +oimo 0imo,38 +oi ke,38 +ohagi,38 +ogata hiro,38 +ochanomizu doggu,38 +occult soda,38 +obligation chocolate,38 +object on bulge,38 +nyako (idolmaster),38 +nyakkuru,38 +nyahpa20,38 +numera goomy,38 +numakura manami,38 +nt50,38 +noyama (noyama8888),38 +novel (object),38 +nose tape,38 +northeast mountain,38 +nonokuro,38 +nonddu,38 +nona (yeun),38 +nokuran,38 +nokia (harusion),38 +noe 8ban,38 +no control,38 +nitoro-star,38 +nishimura nanami,38 +nishikawa eito,38 +nishihama middle school uniform,38 +nisei muramasa,38 +niounomiya izumu,38 +nina dragnot,38 +nillin,38 +nil,38 +nijino saki,38 +nijie,38 +nihoshi (bipedal s),38 +nicoco,38 +nia (littlestars),38 +neytharone (drill (emilio)),38 +nevercrymoon,38 +nerisuke,38 +neosnim,38 +nene (oda nobuna no yabou),38 +nemeko,38 +nekoyanagi (azelsynn),38 +nekoya minamo,38 +nekomitei,38 +nekokokazuma,38 +nekoemonn,38 +negurie,38 +necrosmos,38 +necromorph,38 +necoring862,38 +necojishi,38 +ndo2,38 +nayutarou (nyt kag),38 +natsume (iravati-4u),38 +natsuki yoru,38 +natsuki straight,38 +natsuki nori,38 +natsuki mitsu,38 +natsuki-chan (natsuki teru),38 +natora einus,38 +nanora,38 +nanoningen (anapoko),38 +nanashi (shin megami tensei iv final),38 +nanasaki,38 +nanairo,38 +nanahoshi kou,38 +nakazawa minori,38 +nakano futaba,38 +nakai (zabuton makura),38 +naguy (nagui),38 +nagi ria,38 +nagi (watamote),38 +nagatu usagi,38 +nadia la arwall (cosplay),38 +nachi (aoki hagane no arpeggio),38 +nabuta375,38 +nabatame hitomi,38 +myouga,38 +mutual impregnation,38 +mutsu (gintama),38 +mushoku loli,38 +mumumu (ahomoidasuyo),38 +mugicho (kdks5554),38 +mugiccha2,38 +mugen kageno,38 +mudrock colossus (arknights),38 +mucchan,38 +mrxinom,38 +mr-poritan,38 +mp41 (girls' frontline),38 +mousou dairinin,38 +mouse hair ornament,38 +moopiekun,38 +moonlaw,38 +mononofu ~shirayuri kassen maihime~,38 +monita (matataku),38 +monio,38 +monini,38 +mondoart1,38 +momin,38 +mokusa,38 +mohei,38 +mogami rio,38 +mogami (azur lane),38 +mochi547,38 +mocchiri oyaji,38 +mobuko (akita komachi),38 +mmm3sushi,38 +mizutsune,38 +miyuki (yxbt7),38 +miyan (oceanmaiden),38 +miyake achi,38 +miyabi (miyabi),38 +mitsuki tayura,38 +mitsuha (kentairui),38 +mito (sao),38 +misu (miisuuu),38 +missile (ghost trick),38 +misonikomiii,38 +mismatched horns,38 +mishima ryo,38 +misfit funny,38 +misaki (chess08),38 +mirun (funimani),38 +miracle hoshi,38 +minnie mouse (cosplay),38 +minnie may hopkins,38 +minazuki kashou,38 +minato (mntnm),38 +minami (niiya),38 +milky tea,38 +mikisato,38 +mikawaya,38 +mikagura seisa,38 +mii aki,38 +michiko malandro,38 +michael myers,38 +mhs,38 +meumiharagane,38 +mesme,38 +mercury xeno,38 +meloetta (pirouette),38 +megurimu,38 +megatron (idw),38 +mega man x1,38 +meeeeeeco359,38 +medi gun,38 +meddy.exe (mega man),38 +mechanical bull,38 +maxgonta,38 +matsuoka chie,38 +matsumoto mifuyu,38 +matatabi (flower),38 +master sgt mine,38 +masquerade channel,38 +mashiro rima,38 +masaki shino,38 +maru-yu (kancolle) (cosplay),38 +marshal (animal crossing),38 +mario party,38 +marin (ragnarok online),38 +marie rudel,38 +margaret (rune factory),38 +maple syrup,38 +maomao (kusuriya no hitorigoto),38 +mao (shining tears),38 +mamorunokoto,38 +mamima,38 +male pregnancy,38 +male futanari,38 +makita (homosapiensu),38 +makishima yumi,38 +makino (sinobusan),38 +makai wars,38 +makai senki disgaea 6,38 +maikeru (dk maikel),38 +mahou shoujo ikusei keikaku aces,38 +magic research,38 +maetenkan,38 +madeleine (fantasista doll),38 +m onna senka,38 +m-eine,38 +m-eiji,38 +lynx ears,38 +lycoris challenge (meme),38 +lucy loud,38 +lucy kaneshiro,38 +luci ole,38 +"lu""",38 +lower (vocaloid),38 +louis lloyd-judson,38 +lorum piercing,38 +loporrit,38 +loly aivirrne,38 +loli hooker,38 +lola (pangya),38 +loki (danmachi),38 +loiza,38 +lloyd (mother),38 +liveactors,38 +liskarm (overload) (arknights),38 +lip (panel de pon),38 +lion hood,38 +lint roller,38 +ling si,38 +ling s,38 +lilium0235,38 +liberty manurung,38 +leska (arara cafe au lait),38 +ledgem (rhapsody),38 +lattice mast,38 +latex corset,38 +lariat,38 +lady (pokemon),38 +laaaicha,38 +l4wless,38 +kyou kara maou!,38 +kyo kaneko,38 +kymp,38 +kyara akaro,38 +kuwahara hazuki,38 +kusuda aina,38 +kushiya inaho,38 +kusakabe asako,38 +kusaka maichi,38 +kurokoshou (spicyland),38 +kurohebi,38 +kuroda hikari,38 +kureha (sound voltex),38 +kurano yae,38 +kuragehime,38 +kuonji shizuka,38 +kunieda (miniaturegarden),38 +kumaneko (kumaneko1138),38 +kukuchi heisuke,38 +krulcifer einfolk,38 +kroos (the mag) (arknights),38 +kronya (fire emblem),38 +kozukata yuuri,38 +koyuki (2smj),38 +koyama sousuke,38 +kowai (iamkowai),38 +kouzaku mitori,38 +koutetsu jeeg,38 +kouda kouji,38 +kotobuki shiiko,38 +korokoro,38 +kooriyama ichirou,38 +konpasu,38 +konatsu hare,38 +kome-kome (precure),38 +kokurikozaka kara,38 +kohigashi hitona,38 +kohei nakaya,38 +koa (koh a),38 +kiyochii,38 +kitakubu katsudou kiroku,38 +kishibe (young) (chainsaw man),38 +kiririn,38 +kirikawa ikumu,38 +kiri (trouble spirit),38 +kiran (fire emblem) (female),38 +kinubari nerune,38 +kintaros,38 +kinosaki reisui,38 +kingyo 114,38 +king (ougon kingyo-bachi),38 +kim yoon (gondom),38 +kim nag-seo,38 +kill la kill final episode scissor blade relay (meme),38 +kikuko (kazu),38 +kikou-kai galient,38 +kikino,38 +kijiyama north high school,38 +kid cobra,38 +khnchak,38 +key the metal idol,38 +kesha,38 +keller enasa,38 +kei (keiuu),38 +kei (keiclear),38 +kei1115,38 +keele zeibel,38 +kayaba ka-1,38 +kawosu shikou,38 +katori rea,38 +katee,38 +kataochi chuuko,38 +kasukabe you,38 +kasei yukimitsu,38 +kasakuri,38 +karyl (princess) (princess connect!),38 +karteira,38 +kariza,38 +kari-kenji,38 +karanashi mari,38 +kanrobi,38 +kanou ayumi,38 +kannagi miyabi,38 +kanikama (character),38 +kani (kaniya),38 +kangaruu (momotk1109),38 +kangaroo girl,38 +kanchigai,38 +kamiya yukihiro,38 +kamiya (amamiko),38 +kamen rider knight,38 +kamala khan,38 +kamaboko (milky holmes),38 +kalista,38 +kaiven,38 +kairoushu (dones01127),38 +kagura (prism ark),38 +kagemitsu g4,38 +kagaya (oshiriudon),38 +kagari shuusei,38 +kagamine len no bousou (vocaloid),38 +kaga (aircraft carrier),38 +kadoya tsukasa,38 +kadomaru misa,38 +k-rumi,38 +justin bailey,38 +juse rino,38 +junko (touhou) (cosplay),38 +jungtong,38 +junako,38 +jun jun,38 +jokerpang,38 +joker.z,38 +johnson ting,38 +joel (the last of us),38 +jiseki,38 +jinsai sa sa,38 +jin yun,38 +jijing zishui,38 +jay phenrix,38 +janoukyo19,38 +jang won,38 +izana minagi,38 +iyo (fate),38 +ixpellia,38 +iwatobi-chan,38 +iwato1712,38 +iwami sayaka,38 +itsu (artist),38 +itou souichi,38 +itoshiki mikoto,38 +italian wolf (kemono friends),38 +isis (ragnarok online),38 +ishtar (formal dress) (fate),38 +ishikawa fumi,38 +ishii (isuwie),38 +iscan (pokemon),38 +isabelle du monceau de bergendal,38 +iro (waterloafer),38 +irisrey,38 +iris (fall 2022) (pokemon),38 +irino,38 +io-catalyst,38 +inuinuo gataken,38 +inugami korone (cosplay),38 +intercom,38 +ink (ink01 ),38 +ink.,38 +inflating,38 +inconvenient ass,38 +inago,38 +in jar,38 +imouto no katachi,38 +imminent vore,38 +imageboard,38 +illumination,38 +ikoma tatsuhito,38 +ikoan,38 +igau,38 +iga oboro,38 +icon 315,38 +icon (sugarless yogurt),38 +ichinose kazuki,38 +ichika (blue archive),38 +ichi/mine,38 +ibuki sakura (sgw v07),38 +ibuki imina,38 +hyakujuu-ou golion,38 +hwayoung,38 +hurin raika,38 +hovercraft,38 +houshou (kancolle) (cosplay),38 +hot dog bun,38 +hosoinogarou,38 +hoshino sora,38 +hoshihara hikaru,38 +hosekisho richard-shi no nazo kantei,38 +hori hiroaki,38 +honey trap (hero-san to moto onna kanbu-san),38 +hone shoukan,38 +hondarai,38 +hona (pixiv7939518),38 +holy mami,38 +hollyyn,38 +holes,38 +holding organ,38 +hm mono,38 +hitsujin,38 +hitomebore,38 +hitokoe,38 +hirasawa yui (cosplay),38 +hinata yume,38 +hinapo,38 +hin,38 +hiko (lg612),38 +hikawa maria,38 +hiisu (s-1104-d),38 +hiiragi natsume,38 +higurashi akane,38 +high braid,38 +higanbana (onmyoji),38 +hiepita97,38 +hidarikata,38 +hhy,38 +herishop,38 +help,38 +heizou (hezo3361),38 +hei meiling,38 +hedgehog girl (yukimoto shuuji (gurigura)),38 +heartki,38 +heart bubbles,38 +hayata aya,38 +hayami rasenjin,38 +hatoya hato,38 +hata (pixiv4102938),38 +hasbro,38 +haruta (user dndp3458),38 +haru (yomawari),38 +haru (inuarashi),38 +hanzawa821,38 +hankachi (okayama012),38 +hanasaki akane,38 +hanasaka yui,38 +hanada no kiwami,38 +hanabusaraleigh,38 +hana azuki,38 +hamu fukurou,38 +hachikoo (astatine),38 +hachi (gaoo),38 +gurantsu,38 +gunslinger (granblue fantasy),38 +gundam gp-01 zephyranthes,38 +guilhermerm,38 +guardias,38 +guanghe zuoyong de de yezi,38 +guan hat,38 +griddle,38 +grey leggings,38 +great magami,38 +grass wonder (saint jade healer) (umamusume),38 +graphic equalizer,38 +grand blue,38 +gracehoo,38 +gotou matabei,38 +gorakujin,38 +goose (untitled goose game),38 +gon (hoozuki no reitetsu),38 +goke shike (altamira05),38 +gogot,38 +glorybringer (granblue fantasy) (cosplay),38 +ginga tetsudou no yoru,38 +giant squid,38 +gesundheit (artist),38 +genzou (me genzo),38 +gentiana,38 +gazef stronoff,38 +gatling033,38 +gari gari-kun,38 +gargoyle (nadia),38 +gamma (inazuma eleven),38 +gambit,38 +gakuko,38 +gaida,38 +gaden,38 +gabu kichi,38 +gaak11977,38 +g ig98,38 +fyuria (agarest senki),38 +fuse midori,38 +furiae,38 +fur armlet,38 +funkid,38 +funbolt,38 +fukudori,38 +fukami nana,38 +fukabori sumiyo,38 +fujinomiya rio,38 +fujieda hiro,38 +fuji (rua-258),38 +fuguriya,38 +fuchsia,38 +fubuki (pekesan),38 +fu shun (azur lane),38 +frostce,38 +friedrich der grosse (zeremonie of the cradle) (azur lane),38 +freudian slip,38 +frederica greenhill,38 +fortress (nanoha),38 +forehead writing,38 +food-themed creature,38 +fleur blanc,38 +flare earlgrande gioral,38 +five-seven (adventures of fenfen) (girls' frontline),38 +fim-92 stinger,38 +figure stage,38 +fidough,38 +festa!! hyper girls pop,38 +fen fen fen fen,38 +faux text,38 +famas (girls' frontline),38 +falke (street fighter),38 +falco grice,38 +fairy knight tristan (third ascension) (fate),38 +eydis (sao),38 +even (even yiwen),38 +eve (nier automata),38 +eve (kenzen),38 +eve (2nas) (elsword),38 +euden,38 +etorofu (kancolle) (cosplay),38 +eterno,38 +etchi inoha sukidesuka,38 +etceteraart,38 +esythqua,38 +estus flask,38 +erythroblast (hataraku saibou),38 +ero-god,38 +ermuzibu,38 +eriyama,38 +erica (acerailgun),38 +ergouzi echo,38 +enomoto yuiko,38 +enico,38 +encanto,38 +emura subaru,38 +emupii maid promotion master,38 +emu (trigger),38 +emptycicada,38 +emanuella porlallora,38 +ema (shirotsume souwa),38 +elflorri,38 +elemental hero neos,38 +elee0228,38 +electric flower,38 +ekubo (ciaobero),38 +efu,38 +eencya,38 +edward kenway,38 +ediblepanda,38 +eden (shiroki yuutsu),38 +edelgard (isekai maou),38 +edchi,38 +eco (petticoat),38 +eba rin,38 +earl grey (girls und panzer),38 +dynasty ahri,38 +dvdraw,38 +dum sticky note,38 +du yaoye (arknights),38 +dragonith,38 +draco (monster musume),38 +doya,38 +dousunnen,38 +double bikini,38 +dot heit,38 +doroti.,38 +dorei k,38 +donoteat,38 +dongdong (0206qwerty),38 +domino's pizza,38 +dodoss dont,38 +dj sakura,38 +dipping,38 +dierbeibanjia,38 +di yi xing zian,38 +dhomochevsky,38 +devout,38 +detonator orgun,38 +derrick berg,38 +deni m,38 +debris (game),38 +deathwing,38 +daweykun,38 +darklux,38 +dancouga,38 +dakushido,38 +daison,38 +daifukumochi (qquuiieett),38 +cutie honey (character) (cosplay),38 +curry bread,38 +cure pine (cosplay),38 +csi,38 +cryptid,38 +cray (breath of fire),38 +cracking egg,38 +cp2980606,38 +cozie178,38 +corona (brand),38 +corki,38 +copy x (mega man),38 +conte ryuu,38 +comet (comet-san),38 +code: ultimate (elsword),38 +cocoa fuumi,38 +clyde donovan,38 +clutter,38 +clover days,38 +clone trooper,38 +clickdraws,38 +clearfile,38 +clause,38 +clark (159123),38 +city below,38 +cinderella (grimm) (cosplay),38 +churio,38 +chung1000,38 +chu chu (xenogears),38 +chocomoch,38 +chiyo (genshin impact),38 +chinchilla (animal),38 +chikuwabuta,38 +cheerio,38 +chaki-yam,38 +chae ara,38 +chaakusu,38 +catocala,38 +catch,38 +carla yeager,38 +cardinal armand,38 +caracorn,38 +cala maria (cuphead),38 +butterfly hat ornament,38 +bute (butegram),38 +bunsen burner,38 +bungou to alchemist,38 +bukatsu kikaku,38 +broken teeth,38 +broken finger,38 +boyfriend (houkago play),38 +bouto (paranoia),38 +bonten karasu,38 +bonesaw,38 +boku (isizakitakasi),38 +bob ross,38 +blue track suit,38 +blue pussy,38 +blue garter straps,38 +blue curtain,38 +blowtorch,38 +bloated,38 +blanchat,38 +blade of the immortal,38 +bismarcho,38 +bisco (letitbleed),38 +bird type girl system,38 +binu (nadenade),38 +binding blade (weapon),38 +binbinsuke,38 +binah (project moon),38 +billbine,38 +bigegg,38 +berotore,38 +beatus creation,38 +beat (trusty bell),38 +bburi,38 +bbaltong,38 +bazelgeuse,38 +barokkusu,38 +banana batter,38 +bam (s2leona),38 +balloon vine (flower knight girl),38 +bai xiao,38 +bad multiple views,38 +babamba,38 +azu (azu401),38 +ayumi (x-blades),38 +ayomo ro,38 +ayatsuri-doll,38 +aya (star),38 +aug para (girls' frontline),38 +auditorium,38 +ataru (ataru squall),38 +asuma shin,38 +astrid hofferson,38 +ascot removed,38 +ascot (rayearth),38 +asami you,38 +asakura nanao,38 +asakura hao,38 +asakou (n morninglight),38 +asakaze risa,38 +art itou,38 +arriet (shingeki no bahamut),38 +armais (me-chan337),38 +arisawa masaharu,38 +arino ayarei,38 +arhoangel,38 +ares (fire emblem),38 +arena of valor,38 +arashiyama sayoko,38 +arare mochiko,38 +arado balanga,38 +aquarium tunnel,38 +aper,38 +apapico,38 +aoba chise,38 +anzu yotsuba,38 +anyotete,38 +anxflower,38 +anotherxalice,38 +anna (girls' frontline),38 +anji lanuo,38 +animal earmuffs,38 +angruoxin,38 +angela balzac (cosplay),38 +angel (kof) (cosplay),38 +anchoku 0621,38 +amy26,38 +amiyakinyu,38 +amiko (frostedchocolate),38 +amejaga,38 +amaya yuu,38 +amano kokoko,38 +amane (amnk1213),38 +amamami prime,38 +amai shuga,38 +amahira,38 +amage kanade,38 +alto168,38 +alto,38 +alice garnet nakata,38 +alice-type underwear,38 +alfred (bloodborne),38 +alcremie (love sweet),38 +alchemist,38 +alannoran,38 +akuama,38 +akira-tama,38 +akimegu m,38 +akiba's trip the animation,38 +akechi (826988799),38 +akatuti,38 +akatsuki nagisa,38 +akatsuki akira,38 +akasha terminal,38 +aize,38 +airi (queen's blade unlimited),38 +aine (haibane),38 +aile strike gundam,38 +ahira yuzu,38 +ahase hino,38 +adictreader,38 +ada badguy,38 +achmad faisal,38 +a-king,38 +99aj,38 +53,38 +1860 (ichi),38 +156m,38 +110 gou,38 +103mol/l,38 +0v0 (l seohui),38 +zzzearly,37 +zooya,37 +ziyue,37 +zhi (yammycheese),37 +zhao 190,37 +zeradok,37 +zeppeki shoujo,37 +zenith greyrat,37 +zed (wild arms),37 +zarik kajiwara,37 +z24 (azur lane),37 +z-chan,37 +yuzuki karin,37 +yuzu (kimagure kankitsurui),37 +yuuuun0218,37 +yuusha-chan (gassaku no hito),37 +yuuki (snowhouse),37 +yuuichi katou,37 +yuugure,37 +yuu (hi lite),37 +yutsu,37 +yurigera 8959,37 +yurayura,37 +yunohara izumi,37 +yuni 0205,37 +yumiyokiak,37 +yulia valkova,37 +yuitsuki1206,37 +yuichiitan,37 +yui (spica),37 +yugeoryouki,37 +yu 416416,37 +youzen,37 +youu (midgard),37 +yousuke (yosk),37 +yosu,37 +yossyzero,37 +yoshimura masato,37 +yoshiikirablr,37 +yoshihiro (yoshihiro12190),37 +yoshidaworks,37 +yoshida haru,37 +yoriha yashiro,37 +yonagi kei,37 +yona (edenkasuga),37 +yo-suke,37 +yn1982,37 +yi zhi ai xi,37 +yi (199702090505),37 +yf-29,37 +yellow tulip,37 +yayanri,37 +yasubaru,37 +yarr,37 +yangus,37 +yanagi ryuuta,37 +yamisawa,37 +yamijam,37 +yamato mikoto,37 +yamamoto-genryuusai shigekuni,37 +yamajun (junyamaekaki),37 +yamada (fanfantaisa),37 +yaha-kui zashunina,37 +yagi (yagiumaaai),37 +yabataso,37 +xintianou,37 +xenon (kona-card),37 +writing on hand,37 +wrist belt,37 +wreathlit noel,37 +wooden deck,37 +wonderful magic (idolmaster),37 +wonder festival 2005,37 +wokashiya,37 +wo cai bushi zhushou,37 +wizwu,37 +wizarmon,37 +winter wonder lulu,37 +william.b,37 +wenfei ye,37 +weighing breasts,37 +waymay,37 +watashi,37 +watariganikun,37 +wasavi 8,37 +wakaba iro no quartet,37 +vvv,37 +vuipui,37 +vostok (vostok061),37 +vlad king (boku no hero academia),37 +vincent (hiyakuen),37 +victoria dahlgrun,37 +vert farbreton,37 +veca,37 +vaison,37 +uto ki te,37 +uta (yagashiro25),37 +usukawa (uskw sr),37 +usakou,37 +usagikoya,37 +uro (m369),37 +ura tomoya,37 +unown q,37 +unown !,37 +unosuke,37 +unohana kotoha,37 +unicorn (warship girls r),37 +uni tenten,37 +uni mmtab,37 +unajuu (food),37 +umi (k mpk),37 +umbrella hair ornament,37 +umapan,37 +uhou renka,37 +ugetsu (chimere/marie),37 +uesugi kenshin (sengoku otome),37 +uchuu sentai kyuuranger,37 +u nagi,37 +type 97 torpedo bomber,37 +twitter verified checkmark,37 +turpentine (pin),37 +turkish flag,37 +turkey leg,37 +ttc,37 +tsuutenkaku,37 +tsuruko turuta,37 +tsuru (680597),37 +tsurara0128,37 +tsuko (25mnts),37 +tsukimiya sara,37 +tsukihime souka,37 +tsugumi-chan (sora),37 +trouble witches neo,37 +triple vertical stripe,37 +triple action thunder,37 +trench knife,37 +training room,37 +trainer minamizaka,37 +traffic cone on head,37 +tr (lauralauraluara),37 +touya (roukaku),37 +touya (log horizon),37 +toumin,37 +total,37 +toshi makoto,37 +tororo inniina,37 +toro tarou,37 +tornadus (incarnate),37 +torn tabard,37 +toritori,37 +toriko,37 +toothless,37 +too much burger,37 +toniwing,37 +tokyo metropolitan police department,37 +tokoyami towa (cosplay),37 +tokomon,37 +tobitori,37 +toa (kitakaze setsuna),37 +tmku,37 +timburr,37 +till (idaten93),37 +tikuwazyousyou,37 +tifa lockhart's sporty dress,37 +thompson (solowingfh),37 +the sun (tarot),37 +the roma-like snowman,37 +the path,37 +the o,37 +the legend of zelda: four swords,37 +that's your girlfriend (meme),37 +thailand,37 +tgp11s hawkeye (mechanical buddy universe),37 +tettere,37 +tetsumaki,37 +tengai makyou,37 +tea (kino no tabi),37 +tauburn (star driver),37 +tartar,37 +tara olphoros,37 +tapioka,37 +tao ren,37 +tanaka jouji,37 +tamakibi,37 +tama (tm suac),37 +tama (nyan koi),37 +takiteru,37 +takeshiko,37 +takataka,37 +takamura masaya,37 +taiger,37 +tafuu (tortafu),37 +tachibanaei,37 +tabletop rpg,37 +tabby (.hack//),37 +syuraime 0,37 +syuntyu katze,37 +sysen,37 +sylia stingray,37 +sweets lingerie,37 +swedish uniform,37 +swan boat,37 +suzuneko-rin,37 +suzumoto mayu,37 +suzume anko,37 +support,37 +super karna (fate),37 +super danganronpa another 2,37 +sumoffu,37 +sumeshi,37 +sukireto,37 +suika (game),37 +suguharu86,37 +sugeno tomoaki,37 +suframare,37 +su2525,37 +su-57,37 +su-37,37 +str11x,37 +sterilesoil,37 +stan (grandblue fantasy),37 +sseli,37 +squirrel girl (yuuhagi (amaretto-no-natsu)),37 +spy,37 +sponge cake,37 +sploosh-o-matic (splatoon),37 +splash free,37 +sparseseethe,37 +soxy,37 +sorahoshi kirame,37 +sora tokumo,37 +sonolar,37 +sonic the hedgehog (2006),37 +sonic team,37 +sonia (fire emblem),37 +somwang 07,37 +sodemaru unagi,37 +sobamushi mo,37 +slammo,37 +skype,37 +skyfiss,37 +skull ring,37 +skinny jeans,37 +siska leontyne,37 +sirfy,37 +sinsa (alchemy stars),37 +single fishnet legwear,37 +silver (twisted wonderland),37 +siloteddy,37 +siam (meow13),37 +shynee (p&d),37 +shy (ribboneels),37 +shurakrgt,37 +shroomsworth (pmd-explorers),37 +shroomia,37 +shoving,37 +shoveling,37 +shounan no tamasu,37 +shoukin500,37 +shou (hanasakukoroni),37 +shokora momiji,37 +shoko (moccom),37 +shogu (shoguchime),37 +shochiku,37 +shizuku (shinrabanshou),37 +shizukagata naginata,37 +shiva (housamo),37 +shito miu (40hara),37 +shirouko,37 +shirokuma cafe,37 +shiro kanae,37 +shirayuri sakuya,37 +shippou (inuyasha),37 +shiony regis,37 +shiny lips,37 +shinozaki akira,37 +shino aki,37 +shinmeiji rinn,37 +shinkansen henkei robo shinkalion z,37 +shinigami bocchan to kuro maid,37 +shinidei,37 +shindou takuto (mixi max oda nobunaga),37 +shindayomon,37 +shimoda-kon,37 +shimeji (fantasista doll),37 +shima katsuki,37 +shiki no miko,37 +shii (cocoa),37 +shigatsu shizuki,37 +shiden (t41xz),37 +shibao,37 +shi er xian,37 +sherry leblanc,37 +shephira (cert),37 +shelmet,37 +shared sense,37 +sharaku koji,37 +shantae (cosplay),37 +shako (syakoba3),37 +shadow sae,37 +shabon,37 +sh22,37 +seven star,37 +seven colors of the wind,37 +seven (sao),37 +serjatronic,37 +serizawa katsumi,37 +serana,37 +senjitsu musou,37 +sengoku hanafuda kassen,37 +sen-jou,37 +selena (punishing: gray raven),37 +sekine hajime,37 +seitarou,37 +seibzehn,37 +secelia dote,37 +sebastians (madoka magica),37 +seabread,37 +sea nami,37 +scylla (monster girl encyclopedia),37 +scorpius malfoy,37 +scathach skadi (swimsuit ruler) (second ascension) (fate),37 +scarlet macaw,37 +satou sara,37 +sateriasis venomania,37 +sasugano roki,37 +sasaki kotone,37 +saotome shizuno,37 +sanada ryou,37 +samidare yui,37 +samayoi,37 +sakurano mimito,37 +sakuramai (sakuramai 0525),37 +sakurai nana (moshichi),37 +sakura yuu (hzjy8485),37 +sakura len,37 +sakura kaede,37 +sakura futaba (cosplay),37 +saiya,37 +saionji yuri,37 +sain (fire emblem),37 +saikyou tomomi,37 +saetusum,37 +sadomochi,37 +sachico66,37 +sable able (animal crossing),37 +saberrung,37 +sabano niwatori,37 +ryu-san,37 +ryan jampole,37 +ruun (abcdeffff),37 +runesque,37 +rukuriritea,37 +rukia (incise soul),37 +ru&pauda (artist),37 +route 66,37 +rouko (shichizai shichifuku),37 +rotom (mow),37 +rotom (fan),37 +rosumerii,37 +rosaria (to the church's free spirit) (genshin impact),37 +rosa (hoshino),37 +rorretsim,37 +roku (ntbr fate),37 +rococo (girl cafe gun),37 +robin (gift of wild) (arknights),37 +rna (angel-smelter),37 +river (river ga),37 +ritz (h322),37 +ritsu (iqpi),37 +rise (alice or alice),37 +rina (hunyan),37 +rin (sen to chihiro no kamikakushi),37 +rilafm345,37 +rikuson,37 +riku (ukir125),37 +rikko (peso),37 +ricotta (ys),37 +ribbed hat,37 +rianastia flugel,37 +revia serge,37 +remonoart,37 +religion,37 +redluck,37 +red liquid (artist),37 +red innertube,37 +record jacket,37 +re cation,37 +razu (rus),37 +rawan,37 +raven (notorious teacher) (tales),37 +ravage (transformers),37 +ramiro de la cruz,37 +ralf,37 +rainbowscreen,37 +raimone26,37 +raiju (monster girl encyclopedia),37 +racing miku (2017),37 +rabbitbrush,37 +r-ko (rayla),37 +quiss,37 +qixi cui xing,37 +qinni,37 +qidai,37 +qianqianjie,37 +qi==qi,37 +purple male swimwear,37 +psycho soldier,37 +print (medium),37 +princess shokora,37 +primcoco,37 +primal hearts 2,37 +prehensile toes,37 +pra (prapon0904),37 +portgas d. rouge,37 +pop tab,37 +poo,37 +polyvora,37 +pocket mirror,37 +pixelflag,37 +pippin (pippin sol),37 +pioxpioo,37 +pinkbell,37 +ping pong (manga),37 +pina co lada (gate),37 +piko han,37 +pikatsu,37 +pi (math),37 +phoenix (azur lane),37 +philiera,37 +phantom rose,37 +phantom (mega man),37 +pfle,37 +peter (miku plus),37 +persia (rune factory),37 +penguu (green528),37 +peacoat,37 +parviz (gundam build divers re:rise),37 +paragus (dragon ball z),37 +papeapoo,37 +panko (drive co),37 +panettone (girls und panzer),37 +pandaun,37 +paaam,37 +p no hito,37 +p00nipooni,37 +ozeki miyabi,37 +oubou,37 +osu!,37 +origin (fefnir nightload),37 +orange innertube,37 +open beta (vtuber),37 +ooyodo (kancolle) (cosplay),37 +oohara tetsuya,37 +ooe kanade,37 +onomachi haruka,37 +onoda sakamichi,37 +onitobico,37 +onene,37 +omgamilla,37 +olympus (apex legends),37 +olaf (league of legends),37 +okome 2g2g,37 +okamoto natsuhi,37 +ojamu (kotarou),37 +ohguro mariya,37 +ogu (oguogu),37 +ogata kouji,37 +obobkkp,37 +nuggetkouhai,37 +nspa (spa-jcs),37 +nrmya,37 +noran,37 +nomura kasumi,37 +nomad (housamo),37 +noir corne (arknights),37 +nohoho (kakikonchi),37 +nodu,37 +noah (p&d),37 +nntn,37 +nishino hikoji,37 +nishimiya yuzuru,37 +niradama (nira2ratama),37 +nio altugle,37 +ninon (princess connect!),37 +nill,37 +nikotama mai,37 +nikori,37 +niko (silent.whitesnow),37 +nights (character),37 +nick carlyle,37 +next black,37 +new year's eve,37 +netsuke,37 +nemurase hime kara no okurimono (vocaloid),37 +nemupon (goodlucky),37 +nelke to densetsu no renkinjutsushi tachi,37 +neku397,37 +nekoyanagi reo,37 +nekomimimix,37 +nekomicha,37 +nekoma karin,37 +nekojima,37 +nekojarashi (yuuga),37 +neiigal,37 +necocafe lili,37 +nea (nongta2002),37 +nayuta ggg,37 +natsusemi,37 +nationale volksarmee,37 +naruto maki,37 +naraku (inuyasha),37 +nappooz,37 +nanjou (sumeragimishiro),37 +nang z1,37 +nanatsuki (arca-nize),37 +nanase mizuho,37 +naname ushiro,37 +namekuji ojiichan,37 +namae hamada,37 +nakoruru (cosplay),37 +nakashima naomi,37 +nakarai keijin,37 +nakamura 3sou,37 +nakajima nobuyo,37 +nakahara mai,37 +nakahara chuuya,37 +naitou,37 +nagisa (maid in heaven),37 +nagi (xx001122),37 +nagatsuki (azur lane),37 +nagase takeshi,37 +nagainosfw,37 +nabu (d4ng4nn6bu12),37 +nabeyaki neko,37 +na222222,37 +mystic (fft),37 +mydeerwitch,37 +mxwbr,37 +mutsuki hiiro,37 +mushiuta,37 +mushiki k,37 +murechika,37 +murata tomohide,37 +munakata reishi,37 +mukuo,37 +muimi (princess connect!),37 +mugi no hikyaku,37 +ms anne (erubo),37 +ms06s,37 +mozu 1oo,37 +moyashi koubou,37 +motomiki,37 +mother superior (diva),37 +mother13fucker,37 +moryo,37 +monowire,37 +monmonhomon,37 +momotose (hzuu xh4),37 +momotarekawa,37 +momi yuro,37 +mohg lord of blood,37 +moeoh ex,37 +mochi730,37 +mobugorilla,37 +mobile suit gundam: cucuruz doan's island,37 +mizushima serika,37 +mizushima kai,37 +mizuki haruka,37 +mizuhashi parusui,37 +miyata souji,37 +miyanii (myanie),37 +miyamayomema,37 +miyama mizuki (hoshihoshi1220),37 +miwatari renge,37 +mitsuki hana,37 +mitsui honoka,37 +mitarai yuuna,37 +mitaonsha,37 +mitamura-kun (landcell),37 +missiles,37 +mismagius (cosplay),37 +mishiyomi kazumi,37 +misawa hanei,37 +misawa daichi,37 +misasagi tasuku,37 +misao,37 +mios1225,37 +miokikoeru,37 +mio1030,37 +mintsu (ichi--kun),37 +minoominoomi,37 +mining,37 +minamoto-kun monogatari,37 +minami kawa,37 +milil,37 +mikiky,37 +mikanmochi,37 +mifune chihaya,37 +midori (mido0021),37 +miakiuehashi,37 +mia karnstein,37 +metal fight beyblade,37 +merc (merc storia),37 +menou (virgin road),37 +meiko (vocaloid) (cosplay),37 +mei (overwatch) (cosplay),37 +meganei,37 +mdnk,37 +mayonnaise bottle,37 +mayana (bbpp),37 +matsuo bashou (sengoku collection),37 +matsuhisa (ryo-tsuda1),37 +matsu (sekaowaoneok),37 +matou sakura (imaginary around),37 +matori (penguin batake),37 +materia (frame arms girl),37 +masakappa,37 +marsh badge,37 +marimotencho,37 +marco diaz,37 +mar-c!,37 +maon,37 +manticore (invisible dirge) (arknights),37 +mansu (user pnmp4287),37 +mania street,37 +mangattan,37 +mandolin,37 +manannan mac lir (first ascension) (fate),37 +manabe mana,37 +mamiya miya,37 +mamedanuki,37 +mako (avatar),37 +makihara izumi,37 +maki honoka,37 +makaron,37 +majicjiang,37 +maita,37 +maid in heaven,37 +mai dog love,37 +macartura08,37 +maca (macaca12),37 +maa (maa1),37 +luxuriou s,37 +lux arcadia,37 +luvluvvox,37 +lure,37 +lunastra,37 +lumia waber,37 +luffie,37 +lu bu,37 +lser116,37 +lout of count's family,37 +lost property control organization (samidare),37 +lkdv,37 +lizi (st3648),37 +living with hipstergirl and gamergirl,37 +living shadow,37 +liu bei,37 +little legend,37 +lip van winkle,37 +linked gag,37 +lillu,37 +lillin,37 +lilith (megami paradise),37 +lilia creative,37 +lijupy,37 +lightningstrikes,37 +light gun,37 +lifestream,37 +licking another's lips,37 +lewis gun,37 +leopard 2,37 +leon (leon the professional),37 +leeis cool,37 +leather bag,37 +le mars (azur lane),37 +layered stories zero,37 +lapis lazuline,37 +landorus (incarnate),37 +lalaco godspeed,37 +lakenightbug,37 +lace-trimmed shorts,37 +labotamochi,37 +kyurin (sunnydelight),37 +kyu (wein-seria),37 +kyoubashi amane,37 +kyonta,37 +kyle (suikoden),37 +kuzuryuu momoko,37 +kusanagi kei,37 +kurotani kyoukutsu,37 +kurosaki karin,37 +kuromiyagyo,37 +kuroinu momotarou,37 +kuro75268,37 +kuro-hero,37 +kurano mikoto,37 +kunami himehiko,37 +kumanosita,37 +kumagitsune,37 +kukoi,37 +kubo takako,37 +koyami tsukito,37 +kowarekake no orgel,37 +kousaka kure,37 +kouba nobu,37 +kou v05first,37 +kotsuru kari,37 +kotokoto (jumon),37 +konno rei,37 +komito,37 +kom (1323736),37 +kokushibou,37 +kokonidarekairu,37 +kokko (kokko3045),37 +koizumi kazuaki production,37 +koizumi chika,37 +kogiso,37 +kogasa-san's sister,37 +kodai heiki,37 +kobayashi jin,37 +ko-ran,37 +kneeichigo,37 +kkato,37 +kizuna akari (tsubomi),37 +kiyo (kyokyo1220),37 +kitsune kemono,37 +kitamiya genbu,37 +kiso azuki,37 +kishiwada robin,37 +kishirika kishirisu,37 +kisaragi shuuji,37 +kisaragi reona,37 +kisaragi ren (vtuber),37 +kiryuu mina,37 +kiri toichi,37 +kiri nada,37 +kiomota,37 +kinose azusa,37 +kimiko (zakusi),37 +kimijima yayoi,37 +killmonger,37 +kikuta kouichi,37 +kiki kaikai,37 +kieran (fire emblem),37 +kie (yospcd),37 +kiduguch,37 +kiaoekakishitai,37 +ki lllorz,37 +kentarou,37 +kent (fire emblem),37 +kenki fujioka,37 +kazuki yone,37 +kazu (really in hot water now),37 +kazaharu matsuhata,37 +kayuma,37 +kawaraya koh,37 +kawamura takayasu,37 +katsuki yousuke,37 +katia (fantasista doll),37 +katheryne (genshin impact),37 +kate-fox,37 +kashino (maid for mayhem) (azur lane),37 +kasaran,37 +karute,37 +karasuma kyousuke,37 +kaopon,37 +kanata ryou,37 +kamonohashi (girls und panzer),37 +kaminashi nozomi,37 +kamen rider saber,37 +kamatori pokari,37 +kamado tanjirou (cosplay),37 +kaiba mokuba,37 +kadowaki miku,37 +kabi akaru,37 +k3 (dolphin brain),37 +k.pumpkin,37 +juumonji kaho,37 +justin (sera tony),37 +justice committee club member (blue archive),37 +joffre (azur lane),37 +jio (nayutarooo),37 +jichou senshi,37 +jiaxi daze,37 +ji dan,37 +jhcrow3,37 +jeremy anninos,37 +jean-paul,37 +jawli,37 +javelin (blissful purity) (azur lane),37 +jason peng,37 +japan world cup,37 +janjan umatarou,37 +jack masser,37 +iza,37 +itou chitose,37 +itoi toi,37 +ito t20a,37 +isoi haruki,37 +isobe47,37 +isakysaku,37 +isabeau (smt),37 +iris (sennen sensou aigis),37 +ipev,37 +inumaru (sougen no marogoya),37 +inui's meltran,37 +inu0831,37 +intertwined hair,37 +information sheet,37 +indonesia,37 +india,37 +inagaki mami,37 +in-ear earphones,37 +imperfect cell,37 +ikusa megami (series),37 +ikoma (kabaneri),37 +ikki the vikki,37 +ikaruga (knight's & magic),37 +iizuka ena,37 +ii tea,37 +ihrie,37 +iguana henshuu-chou,37 +icetea774,37 +iberiko (soinesitai),37 +hyper muteki (artist),37 +hyakkihei,37 +hukutuuprunes,37 +huazang,37 +houkago play 3,37 +hoshinomori chiaki,37 +hoshi wo miru hito,37 +horo 27,37 +hopebiscuit,37 +hongou shio,37 +honami takase ambler,37 +holding tie,37 +holding pill,37 +holding on,37 +holding manga,37 +hiyunagi,37 +hiyaori (hiyahiyaval),37 +hits (hitstts),37 +hirata yurisa (ghettoyouth),37 +hirata ryou,37 +hinomori anzu,37 +hino ryuu,37 +hinata tino,37 +hinano,37 +hinakurukuru,37 +hinagiku lala,37 +hina saori (himitsu),37 +himerinco,37 +hilary flail,37 +hijirime laeria,37 +hieihirai,37 +hidejiu,37 +hide (lindalindalinda),37 +hida mari,37 +henriette (fire emblem),37 +hellsinker,37 +hei yan-m82a1,37 +heart-shaped breath,37 +headcrab,37 +head removed,37 +hazard trigger,37 +hayosena,37 +hatski.sin,37 +hashiguchi takashi,37 +hasesese,37 +harutimu,37 +harusaruhi,37 +harukawa (itsuka-itsukaichi),37 +haruka-chan (pan (mimi)),37 +haru-chan,37 +harin 0,37 +hargon,37 +haramin3,37 +hanpen (nijigasaki),37 +hanaori kotoha,37 +hanakoizumi an,37 +hanakago,37 +hanairo heptagram,37 +hanachirusato (genshin impact),37 +hana-tamago,37 +han seol,37 +halo removed,37 +hakuryuu (slice dice and serve) (azur lane),37 +hakozaki chika,37 +haitaka,37 +haibara nanaka,37 +habbitrot,37 +h (2de1flf8),37 +gym teacher,37 +guunhanchi,37 +gunkata,37 +gundou misuzu,37 +gundam breaker battlogue,37 +gumball watterson,37 +guee (ebifry teishoku),37 +gucchi,37 +gu jian qi tan,37 +gtolin chang,37 +griffon (devil may cry 5),37 +grey nipples,37 +green trim,37 +green hill zone,37 +green (among us),37 +greatpengh,37 +great auk (kemono friends) (carasohmi),37 +gomatarou (pixiv196136),37 +gomabura,37 +goma azarasi,37 +gold collar,37 +gohan (gohanchang),37 +gmanee,37 +glowing jewelry,37 +gleipnir (series),37 +glan (159cm),37 +gioha,37 +gina 61324,37 +gigi-chan,37 +ghost girl,37 +generator,37 +geistbox,37 +gauna 491,37 +garo:vanishing line,37 +garex,37 +gardnerverse,37 +garbage doll,37 +gamyuu (gamyu),37 +gamukami,37 +galaxea,37 +galarian weezing,37 +gaia (another eidos),37 +gabriel (dadaist),37 +fuzino,37 +fuuka,37 +futaba masumi,37 +fushigi no gensokyo,37 +furui,37 +furofuki daikon,37 +furi,37 +fumiomiomi,37 +fulunukko,37 +fukaya rin,37 +fujioka yatsufusa,37 +fujimiya,37 +fuji takanasu,37 +fuchibeppu chiyuri,37 +fuchi (0616tk),37 +fubuki (busou shinki),37 +fubuchun,37 +frilled pants,37 +frenzy (transformers),37 +french army,37 +fractal,37 +for all time,37 +footjob from behind,37 +fooring,37 +fonewearl,37 +flint (mother 3),37 +flandre (kaibutsu oujo),37 +flame princess,37 +firewatch (wilted cypress) (arknights),37 +fire dragon (inazuma eleven),37 +filir (ragnarok online),37 +fenrir (vehicle),37 +fengguan,37 +falsetto (suite precure),37 +falia the queen of the mountains,37 +fairy (girls' frontline),37 +eyewon (precure),37 +exodia the forbidden one,37 +eximmetry,37 +evileye (overlord),37 +eve (cloud meadow),37 +evan (cloud meadow),37 +ethan (pokemon) (cosplay),37 +esojima gary,37 +esearu,37 +ernula,37 +eringya (marl kingdom),37 +endou tatsumi,37 +enchi,37 +enamiru,37 +ena (quilt),37 +emmett brown,37 +emma (pokemon),37 +elvafirst,37 +elocca,37 +elliot (zkstxxx),37 +elbow on another's shoulder,37 +eijitsu,37 +edward elric's son,37 +eclair (girls und panzer),37 +echidna (last origin),37 +eccentriky,37 +drawcia,37 +drag009,37 +dosaken,37 +doreking,37 +dongsheng,37 +domo (domo kizusuki),37 +dola,37 +dodoria,37 +dodon gadon,37 +doctor (last origin),37 +dmanya,37 +dixie kong,37 +diving board,37 +dino (reborn),37 +dezuko (dezuko no heya),37 +deviljho (armor),37 +devil may cry 1,37 +denpa rasaito,37 +deno (denomina0),37 +denimcatfish,37 +dende (dndn222),37 +den'ei shoujo,37 +dededeteiu,37 +dead people,37 +daughter (bakuretsu hunters),37 +darli dagger,37 +dararito,37 +daniel fielding,37 +danheng (honkai: star rail),37 +dance with devils,37 +daifuku (usagi pie),37 +daichengqi,37 +dai mahou touge,37 +dai-gurren,37 +cyjalway,37 +cut-away,37 +curry (dbt),37 +cure yum-yum,37 +cure spicy,37 +cure peach (angel),37 +cure girl,37 +cu chulainn (megami tensei),37 +csc00014,37 +crow (aaaaaaa068345),37 +croriin,37 +crazy (zoza),37 +cracklecradle,37 +covering body,37 +courtyard,37 +coup (shun soku),37 +core gundam,37 +coon,37 +coney,37 +comomo (tk),37 +command and conquer: red alert 2,37 +columns ke,37 +coffgirl,37 +code geass: genesic re;code,37 +cocoon (yuming4976),37 +cocon (cocon q),37 +cloud.d,37 +clothes gagged,37 +claus lester,37 +claudia madobe,37 +claude faustus,37 +clara v,37 +cishi nianshao,37 +ciosuii,37 +cindy moon,37 +cinders,37 +cika,37 +choujigen game neptune sisters vs sisters,37 +chorisow (delta chord),37 +chocoboo,37 +chizu (fiute),37 +chiro (norishiro michiro),37 +chinpo ni wa katenakatta yo,37 +chilunchilun,37 +chichi chichi,37 +cheat kusushi no slow life,37 +chase! (love live!),37 +chappa (kaetodo),37 +chamooi,37 +chama kou,37 +chai (vtuber),37 +catharine blitzen,37 +castform (sunny),37 +carrie fernandez,37 +carmen (persona 5),37 +captain marvelous,37 +candy island (idolmaster),37 +cana alberona,37 +cain (granblue fantasy),37 +cagliostro (youthful uniform) (granblue fantasy),37 +caesar8149,37 +c-sha,37 +byoukitakashi,37 +byoubyou,37 +bursting,37 +burnt hair,37 +bunny eyepatch,37 +bumblebee (film),37 +build burning gundam,37 +bsq,37 +bristle,37 +brick st,37 +briareos hecatonchires,37 +bra tug,37 +bou (sen to chihiro no kamikakushi),37 +boomina,37 +bokyo,37 +body modification,37 +bob-the-bison,37 +bo9 (bo9 nc),37 +blz,37 +blunt-katana,37 +blue veil,37 +blue light,37 +blue blindfold,37 +bloomers on head,37 +blood on shoes,37 +blackwatch genji,37 +better girls,37 +bergmite,37 +beretta 93r,37 +ben-day dots,37 +belka,37 +bebitera,37 +beato2528,37 +bbmasa,37 +battle mage (dungeon and fighter),37 +batten japari dan,37 +baryan,37 +barbarian set (zelda),37 +bandaged horns,37 +bandabekken,37 +baka ouji persia,37 +bad weibo id,37 +bacharu (vtuber),37 +b-2 spirit,37 +azumaya akira,37 +azumake (azumakei),37 +azaka (rionrita),37 +az (shotatteiiyone),37 +ayumi tooru,37 +ayarin103,37 +ayanami (grade a sailor uniform) (azur lane),37 +ayamu (igakato),37 +axleaki,37 +axis (gundam),37 +aves plumbum9,37 +aurora (last origin),37 +augma,37 +atkm2,37 +atianshi,37 +asuka (viper),37 +astarte (strike the blood),37 +asskiler,37 +asshimar,37 +asm ln,37 +ashida ichirou,37 +asha (monster world),37 +ascoeur,37 +asakura yuuna,37 +asakura maina,37 +asai genji,37 +asahina natsuki,37 +asahi yanagi,37 +aruto (shake onigiri),37 +arnaud tegny,37 +armd,37 +arin (fanfan013),37 +aria the avvenire,37 +argentinian flag,37 +araki hirohiko,37 +araishi maro,37 +aosta (arknights),37 +aoi usagi (marinebluerabbit),37 +aoi mug,37 +aohitsugi samatoki,37 +aogami,37 +anne (wixoss),37 +angelo sauper,37 +andromeda (p&d),37 +anco (platanity),37 +amy razor,37 +ami thompson,37 +amawashi miku,37 +alternate horns,37 +altas,37 +alolan sandshrew,37 +almohada,37 +allegretto,37 +alikap,37 +aleksander nikolaevich her,37 +alatreon,37 +akms,37 +akishisu (air balance),37 +akino sayuri,37 +aki (broccoli-t),37 +akatsuki no amaneka to aoi kyojin,37 +akari (shichigatsu),37 +akaname,37 +akai miho,37 +akadou,37 +aka unto,37 +air kon,37 +aino osaru,37 +aikei,37 +aiban,37 +ai 1003,37 +ai (popotan),37 +ah yoshimizu,37 +agu,37 +against locker,37 +afuganisu-tan,37 +aemu (august life),37 +admiral hipper (muse) (azur lane),37 +acute (vocaloid),37 +acky bright,37 +acid head,37 +acha,37 +a9712mob,37 +a2ki,37 +a-91 (girls' frontline),37 +9 (saki-ooo-kiyu),37 +9150namihana,37 +4st injection,37 +4b,37 +3mm,37 +1s44c,37 +1b,37 +zonsters,36 +zon nura,36 +ziran juan,36 +zikryzero,36 +zhiyan li,36 +zeus (fate),36 +zepar (megido72),36 +zemzk,36 +zawashu,36 +zanka (the-only-neat),36 +zandan zero to na!?,36 +za yu a,36 +z2 georg thiele (azur lane),36 +z23 (breezy doubles) (azur lane),36 +yuumano yuuki,36 +yuuki koutarou,36 +yuuheisyoujyo,36 +yuugai choujuu,36 +yutsuki,36 +yurichi (artist),36 +yunoki rina,36 +yumingtongxue,36 +yumikoyama49,36 +yukiya (shiya),36 +yukishiro reika,36 +yuki tarou,36 +yuki (ookami kodomo),36 +yugiri princesca,36 +yuezheng longya,36 +yuanmaru,36 +yu chang (42680610),36 +yu-ri (kurione-sha),36 +younomiti,36 +yoshizaki mine (style),36 +yoshino charles,36 +yoruusagi,36 +yoruniyoruyoshi,36 +yorha type a no. 2 (cosplay),36 +yorei (death0813),36 +yongzhe mei hong,36 +yone f15,36 +yomo (rb crr),36 +yoke,36 +yeonjun park,36 +yellow kirby,36 +yeh (354162698),36 +ycyc,36 +yazumi (yazzz),36 +yata misaki,36 +yasaka hitsugi,36 +yaplus,36 +yanwulazy,36 +yanagi fuyumi,36 +yan (situyan0303),36 +yami shigeru,36 +yamatsuki sou,36 +yamashita bungo,36 +yamanbagiri kunihiro (kiwame),36 +yamada gogogo,36 +yakkun,36 +yakisoba spill,36 +yajima mirei,36 +yaco (nuitnotte),36 +y0u0k,36 +xuan zhi yue ying,36 +xsk (ruanmumu),36 +xiongshouji,36 +xion (pulilulu),36 +writing on ass,36 +world w academy uniform,36 +workout clothes,36 +wood man,36 +wing umbrella,36 +wing diver,36 +windtalker,36 +wigglytuff (pokeacc),36 +white negligee,36 +white kyurem,36 +white-faced varre,36 +whirl (transformers),36 +wheat print,36 +werlosk,36 +wei ji,36 +wankosukii,36 +wanibuchi emoko,36 +wall-e,36 +wakum,36 +wakamiya henri,36 +wakaho riku,36 +waha~,36 +wabaki,36 +vsk-94 (christmas eve detective) (girls' frontline),36 +vivid bikini (idolmaster),36 +visual prison,36 +virtualcity-ex,36 +viktor (gensou suikoden),36 +vickyycy99,36 +verity (pokemon),36 +ventriloquism,36 +vegetto (xeno),36 +vasily (run211),36 +vanitas no carte,36 +vacation,36 +v (govvvvw),36 +v (cyberpunk),36 +uu3cm,36 +uta (xuyansong1996),36 +usui sachi,36 +usami renko (cosplay),36 +usamaru67pi,36 +uricotake,36 +uri-tan,36 +upturned umbrella,36 +uno (mon chat noir),36 +umu (phrase),36 +umino chika (character),36 +umenodo,36 +ume (driveume),36 +ukrainian text,36 +ukiukikiwi2525,36 +uit-25 (kancolle),36 +uirou (uirou1),36 +ueda kou,36 +uc-lab,36 +tyrunt,36 +two of diamonds,36 +turu,36 +tunomon,36 +tsuzuki maki,36 +tsuru no ongaeshi,36 +tsuneda,36 +tsumikiy,36 +tsukushi (741789),36 +tsukuda akemi,36 +tsujisaki,36 +tsuji643163271,36 +tsuchu,36 +trigun maximum,36 +trico (character),36 +transparent bikini,36 +traits,36 +traces,36 +tousang,36 +tourbox,36 +touma rui,36 +tote col,36 +toromi (samegami),36 +torn footwear,36 +toramimi-senpai,36 +torako (yotsubato!),36 +tooda riko,36 +too low salary,36 +tomo (otosuki),36 +tokugawa landine,36 +tokeijikake no ley line,36 +togakushi touko,36 +to love-ru darkness: idol revolution,36 +tirol chocolate,36 +timpani,36 +tifa lockhart's exotic dress,36 +thoto,36 +thistle (dungeon meshi),36 +the scarlet devil,36 +the raineman,36 +the flash (series),36 +the devil (tarot),36 +thanatos eros,36 +tera (trs82341711),36 +tenkyou no alderamin,36 +tenihaba nana,36 +tekuteku aruko,36 +teke-emon,36 +team moka,36 +tea stalk,36 +tatti art,36 +tatsumaki (cosplay),36 +tasuketemama,36 +tao mongarten,36 +tamura-chan,36 +tamago kago,36 +takuma sakazaki,36 +takezaka tarou,36 +takenaka hideo,36 +takaya tomohide,36 +takashiro (takashiro factory),36 +takase kou,36 +takasaki chidori,36 +takarada rikka's mother,36 +takanashi sora (soramyon),36 +takanashi nao,36 +takamine koyuki,36 +takami rin,36 +takada satsuki,36 +taira no chouki,36 +tachiki (naruki),36 +tabako-bon,36 +ta ma on,36 +t-hiko,36 +t-65 x-wing,36 +synn032,36 +syatihoko,36 +swatting,36 +suzukaze rin,36 +sutagu,36 +susu,36 +sumiya akihiro,36 +sukuneko,36 +sukly,36 +suiua,36 +suiseiseki (cosplay),36 +suiren (flower knight girl),36 +suigyoku (module),36 +suigi,36 +su xiao jei,36 +strong stars story,36 +striated caracara (kemono friends),36 +strash,36 +stilt house,36 +stegosaurus,36 +starmyu,36 +starbirbz,36 +sss (komojinos3),36 +spriggan (last origin),36 +spoken dollar sign,36 +spica parfait,36 +south (monookibako),36 +souma kira,36 +soseji (tjduswjd),36 +sos galactic patrol,36 +sooma4869,36 +sonyaneko,36 +sonozaki akane,36 +sonic x,36 +sonic the hedgehog (film),36 +sonao,36 +so korokoro,36 +snowsakurachan,36 +sleevejob,36 +skiddo,36 +ski boots,36 +sivamaron,36 +sister friede,36 +sin moriyama,36 +sig sauer p320,36 +sice (fft-0),36 +shunkaku,36 +shuma (daitokei),36 +shui ran moon,36 +shrek,36 +shouhou (kancolle) (cosplay),36 +shota-kun (shinjiro),36 +shiyo yoyoyo,36 +shiva (granblue fantasy),36 +shirt aside,36 +shirouri,36 +shiromaru illust,36 +shio (ayanepuna),36 +shiny (module),36 +shinpei (shimpay),36 +shinonome kokona,36 +shinigami no ballad,36 +shinigami (tukiyomiikuto),36 +shina 000,36 +shin megami tensei i,36 +shin maboroshi,36 +shimogamo yasaburou,36 +shimeji wyvern,36 +shimanakao (shimanaka sushi),36 +shimada (simada bu),36 +shima chizuru,36 +shima6644,36 +shikosour,36 +shikishima gangu,36 +shiki takuto,36 +shiki karin,36 +shiho elis,36 +shiemasu,36 +shidomura,36 +shian (trouble spirit),36 +shi ma86,36 +shenqi xiao hong zai nali,36 +shenq,36 +shenhe (genshin impact) (cosplay),36 +sharpening,36 +sharon kreuger,36 +shamrock,36 +shadeofshinon,36 +sex teacher tsuyoshi,36 +seven (11),36 +seto tinami,36 +sergei strelka,36 +serene (riviera),36 +sera (dds),36 +seok,36 +seo hiroshi,36 +senrireiri,36 +senkou tobaku,36 +senin liku,36 +sen-asanagi,36 +self milking,36 +sekken kasu barrier,36 +sei (seiryuuden),36 +scorpion (mortal kombat),36 +scorch mark,36 +schwarz bruder,36 +scheherazade (magi),36 +sayuma,36 +sayasama,36 +sayano (yakob labo),36 +satsuki miya,36 +satoimo sanda,36 +sate hatena,36 +sata anri,36 +sasara (fantasista doll),36 +sasaki maguro,36 +sasaka yayoi,36 +saria (stronghold) (arknights),36 +sarah kerrigan,36 +sankaku umako,36 +san mon,36 +samuraichamp,36 +sakuraume,36 +sakurajousui neko,36 +sakurai takahiro,36 +sakurai mikage,36 +sakurai kanade,36 +sakuma sanosuke,36 +sakugan,36 +sako rk 95 (upotte!!),36 +saki (koutetsu tenshi kurumi),36 +sakeno rarukan,36 +sakana45,36 +saiki rider,36 +sachiko (hayashi custom),36 +saber (royal dress) (fate),36 +sa tsuko,36 +s nyaau,36 +s (happycolor 329),36 +ryuusaki rei,36 +ryuuri (aoithigo),36 +ryuu32,36 +ryutaros,36 +ryudo (grandia),36 +ryairyai,36 +run sho,36 +rukkhadevata (genshin impact),36 +rui (kimetsu no yaiba),36 +rudang,36 +rota (bitmap1022),36 +romania (hetalia),36 +role player: okayu shimai no nenmaku potrait - gurigucha live!,36 +rokusai,36 +rokuroubuna,36 +rmb-93 (girls' frontline),36 +rivalun,36 +ririclub,36 +rirashi,36 +ripple star,36 +ringomaru,36 +ring toss,36 +rimamo,36 +rima (princess connect!),36 +rice (okome no naru ki),36 +ribbonsnek,36 +ribbon-trimmed footwear,36 +rezia,36 +rezeharu,36 +resident evil darkside chronicles,36 +rengeteki (touhou),36 +ren (tainca2000),36 +removing,36 +reman kamuy,36 +relila,36 +reishin (tenpurasoba),36 +reina (maitetsu),36 +reiko (tofuubear),36 +reddgeist,36 +redblacktac,36 +red card,36 +red button,36 +red arremer,36 +red (dq8),36 +reactive armor,36 +re eva,36 +rayuse,36 +rayrie,36 +rate rapiku,36 +rashid (street fighter),36 +rapama,36 +ramta,36 +raichu (cosplay),36 +ragi (pluie),36 +radiant historia,36 +radagon of the golden order,36 +racism,36 +r-18 jii,36 +quiz,36 +qian renxue zhuye,36 +pyroar (male),36 +pygmalion. (group),36 +puteru,36 +purerin,36 +pumpkin dance (meme),36 +pu lona,36 +psycho mantis,36 +protest,36 +producer (idolmaster side-m),36 +prince of wales (the laureate's victory lap) (azur lane),36 +pppppan,36 +potiri02,36 +potion maker,36 +pork pie sailor hat,36 +por,36 +pope (ragnarok online),36 +pope,36 +ponsu (ponzuxponzu),36 +pokestop,36 +podenco (wake up from a nap) (arknights),36 +plumber,36 +pisipisi,36 +pirozhki,36 +pirate (sekaiju),36 +pink no ayumi!,36 +pink leggings,36 +pink jumpsuit,36 +pineapp panda,36 +pima mashiro,36 +pilot chair,36 +pillow straddling,36 +pig snout,36 +pig mask,36 +piano (agneschen),36 +photo booth,36 +philuffy aingram,36 +pheasant,36 +phantom of the opera,36 +phantas-moon,36 +penyo1989,36 +pengin (takeoff425),36 +peekaboo,36 +peachpii,36 +peach (airline),36 +paruno,36 +parkour,36 +parking garage,36 +paratrooper,36 +paprika (character),36 +panther print,36 +panah,36 +pan jing (the legend of luoxiaohei),36 +pall,36 +pale fox (kemono friends),36 +pafekui,36 +packing peanuts,36 +pacific rim: uprising,36 +oxp (okipuu),36 +owannu,36 +ouu min,36 +ouma kennosuke tokisada,36 +otome kaibou (vocaloid),36 +otoishi akira,36 +osomatu-sama,36 +oskar (jasdavi),36 +osa (osakana1217),36 +oritonagi,36 +orihara kozue,36 +ooyama bokuchi,36 +oomikado itsuki,36 +ookoshi hidetake,36 +ooki ikutoshi,36 +ooka (ohkaworks!),36 +onsokuzekuu,36 +onoda masahito,36 +onna kishi no shiro,36 +onemegawatt,36 +omao,36 +oku (2964 okn),36 +okowa 0141,36 +okome2028,36 +oki kuro,36 +okamoto fujio,36 +oka mariko,36 +oishi kuwagata,36 +ohkubo atsushi,36 +ogre battle,36 +ofly (ofly252),36 +odajima mayu,36 +o1118,36 +nyuuhin,36 +nyanpassu~,36 +nyanko kaitou,36 +nyala (nyala 766),36 +november 11 (darker than black),36 +nova (warframe),36 +norwegian flag,36 +northstar (titanfall),36 +noritake,36 +norato,36 +noodle-y,36 +nonosaki tsubasa,36 +nomuraumu,36 +nomomono eraser,36 +nogami ryoutarou,36 +nof,36 +nobuyuki,36 +noah (livas),36 +noa p,36 +no sense of shame,36 +nkise,36 +niwa haruki,36 +nishiwaki yuuko,36 +nishinomiya saku,36 +nishimiya momo,36 +nishigyou teraa,36 +niseoto,36 +nils nielsen,36 +nikuman (samara),36 +nijiomu,36 +nijigen dream fever (vocaloid),36 +nijie-tan,36 +nigoolas,36 +nightmare (kirby),36 +nia (x x02),36 +neo geo,36 +nekopantsu (blt),36 +nekonetoru take,36 +nekomata (youkai hyakki-tan!),36 +necromancer (final fantasy),36 +nawol,36 +natsume konoha,36 +natori sayaka,36 +narynn (character),36 +narukaze minamo,36 +nanigashi (xla009),36 +nanataroo 7,36 +nanami kanata,36 +nanaji (7ymf),36 +nanaironokabi,36 +nal (naru-1),36 +nakumonaga uma,36 +nakano hinata,36 +nakano haito,36 +nakamura (marakimi),36 +nakajima yua,36 +naizo (kimosugimasu),36 +naidong,36 +nagura shiro,36 +nagi (siki2n),36 +nagase daisuke,36 +nagano mamoru,36 +nabatani,36 +na1 pkmn,36 +n3o2,36 +mzrz,36 +mysterious eyes (idolmaster),36 +mxsoundtube,36 +muutsu (maxwell0916),36 +mussyu danachan,36 +muruchi-,36 +murayama yuiri,36 +muramasa (so-hi-shikan),36 +mura kuroe,36 +mumu yu mu,36 +mumu vosp,36 +multicolored butterfly,36 +mughi,36 +mtk (souko),36 +msg01,36 +mouse on shoulder,36 +mount whip,36 +moti ken,36 +morrigan (dragon age),36 +moriton,36 +monsoon (metal gear rising),36 +monowheel,36 +mono (mono zzz),36 +monkey jon,36 +mon eree,36 +momo alto,36 +mole on ear,36 +mogmogyasai,36 +mogami (warship girls r),36 +mofuka,36 +mod fashion,36 +moai21,36 +mkt,36 +mksm,36 +mizumizzumiz,36 +mizukami satoshi (world trigger),36 +mizu mochi,36 +miyotarou,36 +miyauchi hikage,36 +miyano mai,36 +miyama fugin,36 +miyako (miyako lplover),36 +miya (24toys),36 +miy 001,36 +mistral nereis,36 +misohagi,36 +miro (katsushikashibamata),36 +mint (uchi no pet jijou),36 +minior (red core),36 +milk box (leoleo963852741),36 +mikami rika,36 +mikago kotaro,36 +midori no hibi,36 +microsoft office,36 +micopp,36 +michitose michiru,36 +michelle hoefener,36 +mia clementis,36 +meythia,36 +meuneyu,36 +metroid prime 2: echoes,36 +methyl key,36 +meronpanna (mikoniito),36 +mephia,36 +meowlian,36 +mentos,36 +memories off#5,36 +meisa,36 +mechanical skirt,36 +mdoctkscb,36 +mclaren p1,36 +mayata,36 +matsumoto (vivy),36 +matsuda98,36 +mathnote,36 +matangomu-chan,36 +massuru,36 +mashitaka,36 +mashiro io,36 +masaoka tomomi,36 +maruhana,36 +margarita blankenheim,36 +marcus (ff9),36 +manual,36 +maniwa koumori,36 +mamegohan,36 +malik al-sayf,36 +makoron117117,36 +makina00,36 +makimura kaori,36 +maki chitose,36 +makai (touhou),36 +maimu (polka),36 +magician servant,36 +magentapeel,36 +maewix (artist),36 +madaraki veronica,36 +mabuta (byc0yqf4mabye5z),36 +m3 (mmm003),36 +m249 saw (girls' frontline),36 +m&m's,36 +lynn lambretta,36 +luonawei,36 +lumbbyz,36 +lowell (ouranoss2kanata),36 +lovehammer,36 +loup-garou,36 +looking at pussy,36 +lockpick,36 +liu mei fan,36 +lisesharte atismata,36 +lip ring,36 +linhe de chuangzi,36 +lin-lin,36 +lilynette gingerbuck,36 +lilith (borderlands),36 +levin sword,36 +letty whiterock (cosplay),36 +letta,36 +leonardo (fire emblem),36 +leo-dont-want-to-be-a-painter,36 +lemon snail,36 +leknaat,36 +lee on,36 +leaving,36 +layfon alseif,36 +laxia (ys),36 +law (tales),36 +lank (lankdesu),36 +lani (pink pink),36 +landship,36 +l.n,36 +l-gaim,36 +kyouran souryuu,36 +kyoungi nyang,36 +kyoukai (kingdom),36 +kyoudai no jouji jijou 2,36 +kyosuke1413koba,36 +kyoji (tmtrymetm),36 +kuromu (kaeru),36 +kuro n314,36 +kuro (chrono),36 +kurisugawa kii,36 +kurihara mari,36 +kuri dora,36 +kurenaiz,36 +kurayashiki tae,36 +kurarika,36 +kuo,36 +kumuo (mirakurufusao),36 +kumeri0804,36 +kumashiro,36 +kumacchi,36 +kukikomori,36 +kuga kokage,36 +kudou taiki,36 +kuchibue (tanima club),36 +ku99 (kugugu),36 +kraken (monster girl encyclopedia),36 +kotonemaru,36 +kotoba (1074421015),36 +koshirae kenji,36 +korg,36 +koppa mijinko (series2023),36 +koori (haraiso),36 +konota ko,36 +konoe haruka,36 +kono dio da (meme),36 +kondou (dioptrie),36 +kometa (kome 17),36 +komame (wanton),36 +komagarita,36 +kokutou,36 +kokuoh,36 +kokonotsunbai,36 +kojima genta,36 +koiwai flora,36 +koibumi,36 +koharuko (khrkhrk),36 +kkochmeli,36 +kiyomiya,36 +kitorakito,36 +kitayama shizuku,36 +kita (higan),36 +kisaragi tomi,36 +kisaragi mifuyu,36 +kisaku,36 +kiryuu aika,36 +kiriyama2109,36 +kirishima goro (55541),36 +kirieroido iii,36 +kirby and the amazing mirror,36 +kiraware,36 +kingyo (g-fish),36 +king game,36 +king gainer,36 +kimukimu,36 +kimi omou koi,36 +killian phegor,36 +killer bee,36 +kiki witch,36 +kikan (kikanoe),36 +kikaider (series),36 +kihel heim (cosplay),36 +kido airaku,36 +khloe aliapoh,36 +key747h,36 +keroko (frolicfrogs),36 +kenmochi shiena,36 +kendo club president (rangu),36 +kemuri jatarou,36 +kazami (kuroro),36 +kaya xavier,36 +kaworu (kaw lov),36 +kawamura yuzuriha,36 +kawamori shouji,36 +kawaiipenpen,36 +katuu,36 +katase yuki,36 +katase (high school dxd),36 +kataoka megu,36 +katakura kojuurou,36 +karipaku,36 +karashino,36 +kanzaki ayane,36 +kanzaki akihito,36 +kanisaka,36 +kani bonara,36 +kamio yuunosuke,36 +kamen rider mach,36 +kalata,36 +kakeshou,36 +kajichan,36 +kaiyoko star,36 +kaguya luna (cosplay),36 +kagura takeshi,36 +kageyama shinobi,36 +kagawa rin,36 +kagami ei,36 +kagami (haret46),36 +kabos,36 +kabane,36 +justin hsu,36 +justeaze lizrich von einzbern,36 +junsun,36 +junji,36 +jun (ittla),36 +john marica,36 +johannes voss,36 +jo area,36 +jne,36 +jingb dx,36 +jifuwabe,36 +jagdpanther,36 +jade (ghostblade),36 +jack of hearts,36 +jack cooper,36 +izumi (sachikara),36 +izumi39,36 +izanami hifumi,36 +iwasaki minako,36 +itsia,36 +its not you sit down,36 +itou mikoto,36 +itokatsu,36 +ist lei mikan,36 +ishizuka chihiro,36 +ishihara usumi,36 +isaac netero,36 +irohero,36 +iritoa,36 +iris (ryou@ryou),36 +irenji,36 +irene (ogami kazuki),36 +ippachi,36 +ionosphere,36 +invader zim,36 +insect pin,36 +inraku no miko ntr,36 +inoshin (inixia1748),36 +indiana jones,36 +indeedee (male),36 +imu (acmg3475),36 +implied object insertion,36 +ikedan,36 +iida nana,36 +ifurita,36 +iftuoma,36 +ie (nyj1815),36 +ichimi tougarashi,36 +ichikawa meisa,36 +ichihi (spinon),36 +ichiban renga,36 +ice king,36 +ibashi roni,36 +ib (yu-gi-oh!),36 +i don't have a single regret in my life,36 +i am jemboy,36 +i-riya,36 +i-19 (pillowy paradise) (azur lane),36 +i-19 (kancolle) (cosplay),36 +hzk (ice17moon),36 +hyouka (yashiro sousaku),36 +hyakute gyojin,36 +hwaen,36 +huo linger (wanmei shijie),36 +human pyramid,36 +huli daxian,36 +hukasikasi,36 +hosizora (sparetime),36 +hoshizora tetsudou to shiro no tabi,36 +hoshino erika,36 +hoshiineko,36 +honjou hayate,36 +home (houmei),36 +holmy (show by rock!!),36 +hokoodo,36 +hokkamuri,36 +hn (artist),36 +hmng,36 +hiyashinssu,36 +hitotu no naka,36 +hito komoru (style),36 +hit-kun,36 +hisui hearts,36 +hiroshi (hiroshixhiss),36 +hiroikara (smhong04),36 +hiro (spectral force),36 +hiro (hibikigaro),36 +hiraoka kanae (akatsuki usagi),36 +hinoki (hinoki-buro),36 +hinase kei,36 +hinami riku,36 +hinagiku lulu,36 +himesuzu,36 +himekawa fubuki,36 +hime (crunchyroll),36 +higashi (azm),36 +hidari (coletica),36 +hi (ibisf5umauma),36 +hemoglosso,36 +heinrich (fernanderuddle),36 +hebrew text,36 +hebereke black,36 +heaven burns red,36 +headphone-chan (splatoon),36 +he (eve),36 +hayami kyuuen,36 +hayabusa hideki,36 +hato niku,36 +hat launch,36 +hasumi urara,36 +hashibiro kou (garapiko p),36 +hasewo,36 +haruue erii,36 +haruka4413,36 +haru (ryosios),36 +harima mika,36 +hare (yamihuji),36 +hard-degenerate,36 +harada chie,36 +haniwa-dako,36 +hand on knees,36 +hand on back,36 +hanashiro yuuka,36 +hanamaru-s,36 +hana (ookami kodomo),36 +han gong,36 +hamihe,36 +hamafugu,36 +halloween baelz,36 +hakusan yoshimitsu's fox,36 +hakumei kosen,36 +hakamichi hideaki,36 +hakama removed,36 +hajilove -making lovers-,36 +hadome,36 +hachikei,36 +gyatto624,36 +gyari (bird),36 +guragief,36 +gundam barbatos lupus,36 +gudadan,36 +guchuko,36 +grilled eel,36 +greymon (nodoame1215),36 +grape vine,36 +grand cupido,36 +grace (suisei no gargantia),36 +gorogoro (sfx),36 +goldeen (cosplay),36 +godzilla vs kong,36 +goddess kiss,36 +glow (vocaloid),36 +glock 18c,36 +glenn (chrono cross),36 +girl dm,36 +gingrjoke,36 +gingham legwear,36 +gero-gh,36 +gawain (code geass),36 +gangure (disemboweled),36 +g3pen,36 +g-ist,36 +fuyuki jun,36 +furuya satoru,36 +furisode (pixilvina),36 +fur sweater,36 +funamushi nomore,36 +fumio (snnmfmw),36 +fukuzou,36 +fujizarashi,36 +fruit hair ornament,36 +front mission,36 +frilled lizard (kemono friends),36 +frank araya,36 +foxxarius,36 +foot wraps,36 +foot wings,36 +foot on back,36 +food fighter 441,36 +flower (kowarekake),36 +flogger,36 +fley3black,36 +fle en,36 +flash suppressor,36 +flammie,36 +finger on nose,36 +fia (riviera),36 +fflora,36 +fergus mac roich (young) (fate),36 +fenrir (housamo),36 +femuto,36 +febrie,36 +fasorasi,36 +fams (group),36 +fakegeo,36 +face hold,36 +evilblade,36 +evil (okame nin),36 +eve no jikan,36 +etihw,36 +esuto,36 +erty113,36 +eric proctor,36 +eri muuton,36 +enpu (ufo),36 +eno (mauritz stiller),36 +enk,36 +engi threepiece,36 +energy whip,36 +emmxd325,36 +emily (pandora hearts),36 +emilia (krt girls),36 +eileen (a-soul),36 +eikiri eimu,36 +eggnog cookie,36 +egg (cknlun),36 +eden (sennen sensou aigis),36 +ed (end),36 +eblmeka,36 +e-0057,36 +dydoe,36 +dusting,36 +drillhorn sword,36 +dress suit,36 +dreamsyndd,36 +doushindou,36 +dorowa no hito,36 +dormouse (alice in wonderland),36 +donki (yeah),36 +domo-kun,36 +doma umaru (cosplay),36 +dokkoida,36 +doham,36 +dobok,36 +dizzy (guilty gear) (cosplay),36 +district 9,36 +dinyc,36 +din djarin,36 +dimple,36 +die (ohisashiburi),36 +die (f mega),36 +diagonal-striped bikini,36 +dgk,36 +desu,36 +demitas,36 +dekappara futoriusu,36 +deepa mitra,36 +dazol,36 +datsuyuru,36 +darwin's game,36 +darunia,36 +darudana,36 +dana (pokemon),36 +daji (monster strike),36 +daitokuji biko,36 +d.sum,36 +d-nobi,36 +d-art,36 +cybela kuto,36 +cy fros,36 +curry man,36 +cure princess (sherbet ballet),36 +cubesona,36 +cthylla (chaos code),36 +crusty sean,36 +crescent wand,36 +crab print,36 +counter-strike,36 +cooking oil,36 +computer chip,36 +colt (monster farm),36 +collared bikini,36 +collage background,36 +coffee milk,36 +coconat summer,36 +coat partially removed,36 +clear regulus,36 +claudia emma cross,36 +circuit,36 +cipher (ace combat),36 +cinnamon stick,36 +ciela lapana,36 +choukai (kancolle) (cosplay),36 +choujuushin gravion zwei,36 +chocolate cigarette,36 +chloe price,36 +chizuru-chan kaihatsu nikki,36 +chinetsu15,36 +chinchilla tail,36 +china jersey (meme),36 +chin on palm challenge,36 +chikuwa (tikuwa),36 +chikkinage nage,36 +chiki (chikibw),36 +chihiro ayaka,36 +chie hallard,36 +chesed (project moon),36 +cherrim (overcast),36 +cherim,36 +cheng,36 +checklist,36 +check my note (idolmaster),36 +charcoalo,36 +chanohata tamami,36 +channee (cluseller),36 +change (437483723),36 +chako nejio,36 +celine (to love-ru),36 +celery,36 +caro-xy,36 +caren (mermaid melody pichi pichi pitch),36 +caooll,36 +cao cao,36 +canno,36 +camouflage gloves,36 +calamity mary,36 +cachet,36 +byo (kuro usagi),36 +button prompt,36 +burningblossom,36 +bungaw,36 +buffering,36 +brown sarong,36 +brk 603,36 +bright memories (idolmaster),36 +bremerton (azur lane) (cosplay),36 +bra in mouth,36 +bosch 1/64,36 +boooo-im,36 +booker dewitt,36 +booba (meme),36 +bonnou-chan,36 +bon clay,36 +boise (sheepish sapphire) (azur lane),36 +boa sorte,36 +blue bird (akitsu taira),36 +bloopers,36 +blockun,36 +blanco026,36 +blade of mercy,36 +black tri-stars,36 +black moon,36 +biyora,36 +bismarck (battleship),36 +bird on lap,36 +bijian de linghun,36 +big band,36 +bifurcated jaw,36 +bidarian,36 +beth (shepherd0821),36 +bentoss detritus,36 +benson moretti,36 +beluga whale,36 +becky-4545,36 +beating,36 +bbul horn,36 +battlecruiser,36 +basement,36 +baobab,36 +bandam,36 +banana popsicle,36 +banamons,36 +bambietta basterbine,36 +baizu (guszx),36 +badou nails,36 +bachira meguru,36 +b.bor,36 +azumadori tsugumi,36 +azuma shouko,36 +azuma kyoutarou (artist),36 +azu (azzz),36 +azalea (love live!),36 +ayuko91,36 +aya rato,36 +avtechno!,36 +avalugg,36 +austrian flag,36 +atokniiro,36 +athyra,36 +asumi (000),36 +asugi (fire emblem),36 +asteria (driftingprhp),36 +assless swimsuit,36 +asprach,36 +ashinamaturi,36 +asazuke25,36 +asahi (sakanasakana),36 +asagiri gen,36 +arvis (fire emblem),36 +aruki,36 +arugou,36 +arthur fleck,36 +armkreuz,36 +arm on thigh,36 +ariorihaberi,36 +arima kanae,36 +arch lapin,36 +arcane viktor,36 +arcadia090,36 +arc phone,36 +arai ako,36 +aqua fur,36 +apu spills his tendies (meme),36 +aphrodite (suite precure),36 +aotsuki takao,36 +aopoke,36 +aonik,36 +aoki mei,36 +aoki daisuke,36 +aoi sora-maru,36 +aoi-tama,36 +ann yosh,36 +anko koubou,36 +anja (madoka magica),36 +animal on ass,36 +animal background,36 +angelica ainsworth,36 +amy (bakuretsu tenshi),36 +amorphous,36 +amidasketchbook,36 +ameya (okemu ame),36 +amatsuka watayuki,36 +amase (yagami666),36 +amanosora,36 +amano misao (battle programmer shirase),36 +amamori kohan,36 +amamiya ten'ya,36 +amakuri3000,36 +amaha tsubasa,36 +amaguri (guri 0330),36 +alternate tail,36 +alpha (smashbox),36 +alouette (mega man),36 +almonde jagger,36 +alicia (queen's blade),36 +ali al-saachez,36 +alfa romeo,36 +alexandria (xenoblade),36 +alcremie (berry sweet),36 +alba (senyuu),36 +akiyama jungorou,36 +akitama,36 +akisame ruu,36 +akihara ryou,36 +aki (suterii),36 +akatsuki (aktk511),36 +akasaka aka,36 +akaisu,36 +aka shiro kiiro,36 +aisha (sennen sensou aigis),36 +airbo,36 +aira (qwedcxza49),36 +air qh,36 +air hockey,36 +ailu,36 +aiko (aiko 54),36 +aiamu iamu,36 +ahri (league of legends) (cosplay),36 +agasa (akasa anodan),36 +after sweet kiss,36 +aeus,36 +aerosol,36 +aekun cben,36 +aeiou (yoako),36 +adunba rell,36 +adelie penguin (kemono friends),36 +acxg,36 +aburai yui,36 +abarai ichika,36 +96neko mtm,36 +8981,36 +6mint,36 +6274,36 +4410 (kanimiso),36 +402 (o0 xxx),36 +3tohei,36 +329kome,36 +2 nostg,36 +125buri,36 +zyaki,35 +zluu,35 +zinpati,35 +zijiang m99,35 +zihacheol,35 +zettai ryouiki sex royale!!,35 +zeroki (izuno),35 +zergling (cdror1004),35 +zenon zogratis,35 +zengi,35 +zen (weishanzhe),35 +zawa (zawzawranran2),35 +z23 (upgrade failure?!) (azur lane),35 +yy,35 +yuzin,35 +yuyu (yuyudesu0806),35 +yuyu (flip flappers),35 +yuuki miaka,35 +yuuki (yunky373),35 +yuuki (ashitahare),35 +yuudachi (the bride of solomon) (azur lane),35 +yutoriko (candy0905),35 +yuran (erubo),35 +yunopan chako,35 +yumeneko nyaa,35 +yumejidake,35 +yume koucha,35 +yukiyaii,35 +yukitourou,35 +yukiru akitera,35 +yukiguni eringi,35 +yuki rin,35 +yuka (mikuxluka),35 +yuiki yaya,35 +yuib3 (yuibitch),35 +yugimaru (sugar),35 +yuelight,35 +yuebaihezi,35 +yu jiu,35 +yu-gi-oh! go rush!!,35 +yrel,35 +yosida komati,35 +yoshitoki (kisshin),35 +yoshinaga haru,35 +yoshimori isa,35 +yoshimitsu,35 +yoshidanoe,35 +yoshida morohe,35 +yorozuya hyakuhachi,35 +yoritomo (housamo),35 +yonko,35 +yong jie wujian,35 +yoncha,35 +yohioloid,35 +yohchi,35 +yogetsu high school uniform,35 +yja61,35 +yatsunote,35 +yashino 84,35 +yaotome gaku,35 +yao haa dushi,35 +yanagino (yanagino3),35 +yamunashi,35 +yamaimo torotoro,35 +yamada jirou,35 +yamabuki ryuu,35 +yama bikko,35 +yakumo koishi,35 +yakkey,35 +yagoro kusuriya,35 +yagami coco,35 +yache,35 +xue fengzi,35 +xpisigma,35 +xiaoxiao nanjue buyaokeng,35 +xane (fire emblem),35 +wormhole,35 +witchcraft,35 +witch springs,35 +winter (winterinkoakuma),35 +windows 7,35 +winbay01,35 +wimifu,35 +willy pete,35 +wide brim,35 +whyt,35 +wheelie (kirby),35 +wh1te,35 +wet floor sign,35 +weser (azur lane),35 +weill,35 +weddie (dq10),35 +wayne chan,35 +watari yuu (haskey),35 +washizu iwao,35 +waiko,35 +wadatsumi garland,35 +visbou,35 +viola (instrument),35 +vgerotica,35 +venuscho,35 +valona,35 +valhalla0707,35 +utara canaria,35 +usuba kagero,35 +usami sumireko (cosplay),35 +usalxlusa,35 +ursula raiment,35 +unou (mousou deguchi),35 +unoshima kanaka,35 +une,35 +undone bikini,35 +unconventional vibrator,35 +umi no tarako,35 +umehara emika,35 +ultramarines,35 +ultraman tarou,35 +uiu,35 +uiokv,35 +ui-chan no niizuma diary,35 +ueki-chan,35 +uda megumi,35 +uchuuneko (vtuber),35 +type-alpha,35 +tsurugi yasuyuki,35 +tsukidaruma,35 +tsuki yuuhi,35 +tsujieiri,35 +tsuizi,35 +tsugai kogarashi (vocaloid),35 +tsuda minami,35 +tsuchiya (1315444),35 +trumpet boy (meme),35 +truffleduster,35 +tron: legacy,35 +tribe cool crew,35 +traptrix atrax,35 +transformers super-god masterforce,35 +touhou mystia's izakaya,35 +touhoku rakuten golden eagles,35 +toto mame,35 +torofu,35 +torn flipper,35 +toriseka,35 +torio (mocd1985),35 +torii hair ornament,35 +tonyo (milky crown),35 +tomtomjm,35 +tomorrow (konosuba),35 +tomo futoshi,35 +tomako (tatihitoe),35 +tokyo clanpool,35 +tokurei sochi dantai stella jogakuin c3 bu,35 +tokiwa senkei,35 +tokitou yuichirou,35 +tokisaka ena,35 +toketa-sekai,35 +togashi (choco-bakama kitchen),35 +toga (toganawa),35 +todoroki (xttn9dul),35 +tkr (lovedelic99),35 +timins,35 +tiler (tiler00),35 +tienao,35 +thunder force,35 +three twosix,35 +three little pigs (sinoalice),35 +thorn (ashthorn),35 +thirty-second note,35 +the hammer,35 +thanatos eros (cosplay),35 +thailand (hetalia),35 +tgxx3300,35 +tetose,35 +tessou tsuzuri,35 +teruteru (teru teru),35 +teruki,35 +teostra,35 +tenshi mikadokuni,35 +tenni noboru,35 +ten (tentojidon),35 +tekoki karaoke,35 +team instinct,35 +tbf avenger,35 +tatyaoekaki,35 +tateshina,35 +tatemiya saiji,35 +tark (318),35 +tansug (tansuk88),35 +tanpakuroom,35 +tanikaze nagate,35 +tang xinzi,35 +tanemon,35 +tanaka ryuunosuke,35 +tanaka (ueno-san wa bukiyou),35 +tana (tanabio),35 +tamari (flawless),35 +tamamo cat (third ascension) (fate) (cosplay),35 +tamam gnsn,35 +tamaki iroha (swimsuit ver.),35 +tamaki (doa),35 +tama (ponz3o1),35 +takuji (dekosenn),35 +takokichi,35 +takeuchi hiroshi,35 +takemitsu-zamurai,35 +takejun,35 +takayuki hiyori,35 +tail mouth,35 +tail in mouth,35 +tadatomo (housamo),35 +tachibana kukuri,35 +t6 ti,35 +t3x,35 +t0m (projektmelody),35 +t-72,35 +symboli rudolf (archer of the white moon) (umamusume),35 +swallowtail butterfly,35 +suzuna (summer) (princess connect!),35 +suzume (summer) (princess connect!),35 +suzuki hayase,35 +suzuki gou,35 +suzuhara (13yuuno),35 +suzakuin tsubaki,35 +suyi-j,35 +suweeka,35 +susukida (sususabu0710),35 +super robot wars k,35 +super famicom cartridge,35 +sunsuke,35 +sunmil,35 +sumi (joker),35 +sumashi,35 +sui25jiyuu,35 +sueyen,35 +subarashii pose,35 +stregoicavar,35 +sthk,35 +stephanie sybydlo,35 +stephanie priscilla,35 +status bar,35 +starpoint lance (fire emblem),35 +star wars: the empire strikes back,35 +standing at attention,35 +ss39,35 +sr-71 blackbird,35 +spring water,35 +specterz,35 +sp (sweet potato),35 +souya akira,35 +south park: the stick of truth,35 +sophie hatter,35 +songwut ouppakarndee,35 +sole survivor (female),35 +softenni,35 +soap dispenser,35 +snafu (snafy snu),35 +sky background,35 +skullworms,35 +sizu,35 +sirakaro,35 +sira (user dswn7488),35 +single inverted nipple,35 +silk flower (genshin impact),35 +silhouette sakura,35 +sierra (ws),35 +si kongqi (hua jianghu zhi bei mo ting),35 +shynesslady,35 +shunnyun,35 +shun'ei,35 +shounibyou (shonibyodayo),35 +shoulder necklace,35 +shotel,35 +shotarou,35 +shoot,35 +shivue,35 +shisha no karada,35 +shirotaegiku (flower knight girl),35 +shirokuro (oyaji),35 +shirokuro (lapinnoir),35 +shirokuma1414,35 +shiroi suzume,35 +shiro ami,35 +shiratori aria,35 +shiratama (mofutto),35 +shiraishi sara,35 +shipu (toppintetratorten),35 +shiori kirihito,35 +shintani kyou,35 +shinsekai keikaku sinsekai city project,35 +shinonome,35 +shinoda eri,35 +shino (gouma reifuden izuna),35 +shinji in a chair (meme),35 +shindou rei,35 +shimanto youta,35 +shimamoto harumi,35 +shikimiya mana,35 +shikibu mayu,35 +shiisaa,35 +shiina minori,35 +shigureszku,35 +sheryth,35 +sheet bite,35 +sheathing,35 +shatte judevesten,35 +sharla (mujin wakusei survive),35 +shared jacket,35 +shaomin,35 +shamo (ptwmjgda),35 +shame,35 +shaketarako,35 +setsuna (nijisanji),35 +serizawa nae,35 +seqet-aru,35 +sentou kouhei kitsune,35 +senou aoi,35 +senjimon kayumi,35 +sendou hachi,35 +sena monaco,35 +sena (blue archive),35 +sen (sen69),35 +selly55,35 +seijo no maryoku wa bannou desu,35 +sega saturn (sega hard girls),35 +sefushi,35 +schedule,35 +scathach (makyou sergeant) (fate),35 +scarlett,35 +scapegoat,35 +scale print,35 +sayuumigi,35 +sayo wini,35 +sayaka (harris hero),35 +sawsbuck (winter),35 +sawsbuck (autumn),35 +satou tatsuhiro,35 +satou iruno,35 +satori day,35 +satomura akane,35 +satomi hinako,35 +sasagawa ryohei,35 +sarasa misa,35 +sarai,35 +sarah zabiarov,35 +saotome mirai,35 +saon101,35 +sangheili,35 +sandaconda,35 +sanada momen,35 +salia (cross ange),35 +sakurai ruka,35 +sakurai akane (girlfriend),35 +sakuragi miria,35 +sakura (flower knight girl),35 +sakou mochi,35 +sakazakimay,35 +sakamoto desu ga?,35 +sakaki takaya,35 +sakaki kuuya,35 +saimin seishidou,35 +saichi (meme+),35 +sagittarius aioros,35 +sagace,35 +saga taichi,35 +s k (shiro karasu),35 +ryuryu mt,35 +ryougi shiki (second ascension),35 +ryoku (kemurikusa),35 +rwk,35 +rushi (bloodc),35 +rururaroru,35 +runescape,35 +rumeha (aormsj22),35 +rumble (transformers),35 +rumble (league of legends),35 +rukialice,35 +ruka192,35 +rudolph the red nosed reindeer,35 +rryiup,35 +rona,35 +rolento,35 +rokudou rinne,35 +rokico,35 +rodeorodeo,35 +robbery,35 +rixch,35 +ririka (#compass),35 +riri zuran,35 +rinu (stpri),35 +rinta (reyte),35 +rin no youchuu,35 +riko (shuz),35 +rikkunia,35 +riding shark,35 +ricken (fire emblem),35 +richard crazyman,35 +rezoeline,35 +reu daikon,35 +resident evil outbreak,35 +reno 0901,35 +renn,35 +renkonv,35 +renge miyamoto,35 +rena (sky-freedom),35 +remi puguna,35 +relife,35 +relax (artist),35 +reisen udongein inaba (bunny),35 +rei no mizugi,35 +rei (9086),35 +reginn (fire emblem),35 +red hood (dc),35 +red garter straps,35 +rebecca hopkins,35 +rasielcochma,35 +raps (yohomeboyraps),35 +ranran 3939,35 +ran (mitama ~shinobi~),35 +raion (soraelf),35 +raining blood,35 +raikoh (paradiso guardian),35 +raiden (hayabusa),35 +racing miku (2020),35 +raccoon costume,35 +rabienu,35 +rabi-tan,35 +rabbity art,35 +quincy (azur lane),35 +quin (himegata alice),35 +queen zeal,35 +queen of hatred,35 +qianshuhao,35 +q9q,35 +q18607,35 +putotyra (ooo combo),35 +pussy juice in mouth,35 +purpleninfy,35 +punishedplume,35 +pumpkinpaii,35 +pukun,35 +psidubs,35 +prosperous peace (genshin impact),35 +prometheus (movie),35 +print bed sheet,35 +pretty x cation 2,35 +pravin rao santheran,35 +powai pichori,35 +pov finger frame,35 +poririna,35 +poppy girl (surio),35 +pongu,35 +pole2walker2,35 +pokemon on leg,35 +plug (feng-yushu),35 +pleated shorts,35 +playing card print,35 +pizza delivery sivir,35 +pixiv robot wars,35 +pixie (monster farm),35 +piuta,35 +pinky iwata,35 +pink tulip,35 +pink garter belt,35 +pingqiong xiansheng,35 +pine-chan ver. 1,35 +pikuseru,35 +pikuharu,35 +phi brain puzzle of god,35 +pharaoh (monster girl encyclopedia),35 +penthesilea (amazones ceo) (fate),35 +penpen (penpen1942),35 +penis tattoo,35 +penguin highway,35 +pecorin,35 +pc (personification),35 +pawoo username,35 +parua,35 +pariston hill,35 +parental advisory,35 +papyrus (font),35 +paper on head,35 +panties around feet,35 +pallo,35 +palette project,35 +paingumi,35 +p-gnesium,35 +overalls removed,35 +oujima tamaki,35 +ots-14 (destined love) (girls' frontline),35 +otomachi una (spicy),35 +oslight,35 +osabe tom,35 +orisa (overwatch),35 +orippa,35 +orihika,35 +oreshki,35 +orange (satsurikukuma),35 +opened by another,35 +ootsutsuki kaguya,35 +oota ushio,35 +oota tamon,35 +oofusa shizuko,35 +ooe chizuko,35 +ongyageum,35 +one3four!,35 +omuraashu,35 +omodaka (nitera1041),35 +olivia (shkm2443),35 +olette,35 +ol-chan (ol mahonanoka),35 +okudera momiji,35 +okudera miki,35 +okazaki norie,35 +okada izou (second ascension) (fate),35 +oissu tiwassu,35 +oeyama,35 +odoro (nicoseiga81184094),35 +o'bannon (warship girls r),35 +nyotengu (scarlet-tinged hot spring vacation) (doa),35 +nyoro mutou,35 +nyokkiiiiin,35 +numachi rouka,35 +nuker (nukerdraws),35 +nuime (nuishiron),35 +nue (tayutama),35 +novadada,35 +notinversion,35 +norino moto,35 +nontan (nontanexx),35 +nontan,35 +nonjake,35 +noku (eco),35 +no horns,35 +nmkranker,35 +nkyoku,35 +nkshoujo,35 +nkgmgs,35 +niwatori takeshi,35 +nitta hiroto,35 +nishimura konomi,35 +nise maou dokuzeru,35 +nirasawa hiyoko,35 +nina saotome,35 +nikukaiq,35 +nikon (company),35 +niko (toitoi210),35 +nikaidou mari,35 +nijinosaki dia,35 +nihudau,35 +nie xiaoqian,35 +nickii25,35 +ngra,35 +ngirln4,35 +ne~pon? x rai pon!,35 +never gonna give you up,35 +neutrophil,35 +nesskain,35 +nekotama (artist),35 +neko yuuko,35 +neko kuruto,35 +nehan (granblue fantasy),35 +negishio,35 +nazotyu,35 +natumiao,35 +natsune ilasuto,35 +natedecock,35 +nashi y,35 +naruka (ynarukay),35 +naru (kuma),35 +narmaya (the black butterfly) (granblue fantasy),35 +narashika asuka,35 +napalmbit,35 +nano (nanojyaa),35 +naniiro,35 +nanba mutta,35 +namama (namama82),35 +nakajou amane,35 +nakajima sanae,35 +nagisa k,35 +nagii,35 +nagi raiun,35 +nagato (azur lane) (old design),35 +nagase jun,35 +nagasawa yuki (assault lily),35 +myu (quiz magic academy),35 +myouga teien,35 +my doll house,35 +muzzle (trigger),35 +mutsu (kancolle) (cosplay),35 +mutou megumi,35 +musume shimai,35 +muruaka,35 +muraya yoshihisa,35 +munakata shiho,35 +multicolored text,35 +multi-strapped swimsuit,35 +mukoujima takurou,35 +mujib,35 +mugi maccha,35 +mugi (twinbox),35 +mugai (tamac),35 +muchousha,35 +mucc (ren0522),35 +mrpeanut 88,35 +mr.monster (araido kagiri),35 +motoko (fe25),35 +moscow,35 +mos yen,35 +moritakusan,35 +moonbeam,35 +monsters university,35 +monosuke,35 +monofunny,35 +monochro blue sky (vocaloid),35 +mono (nekogoya),35 +monjja,35 +momozukuku,35 +momomochi,35 +momoko (momokyun),35 +momo-chan (dagasi),35 +moi (latte art),35 +mochizuki meiko,35 +mo (mainiti omoti),35 +mmm73,35 +mji (emucchi),35 +mizushima (kazami4),35 +mizuno yun,35 +mizuno (iori-amu),35 +mizu ramen,35 +miyata akira,35 +miyanoshita satsuki,35 +miyamoto iroha,35 +miya (akumatokeiyaku),35 +miu pachi,35 +mituyota 76,35 +mitsukasa ayase,35 +misty sheikh,35 +miridereningen,35 +mireille (.hack//),35 +mira (sumaga),35 +minoda (mndh),35 +mini 4wd,35 +mincho,35 +minawa (hemo),35 +minato hiromu,35 +minamoto mamori,35 +minamito yui,35 +minami rio,35 +millie chliette,35 +milksasa,35 +mikoto paint,35 +mikokomiyazawa,35 +miki (virtuareal),35 +mike (mikenekotei),35 +mikawa (xxcrisp),35 +mikaponcho,35 +mikan (mikataaaa),35 +migi tonari,35 +michishio (kancolle) (cosplay),35 +miamuly,35 +mi tarou0412,35 +meyshi,35 +meranie,35 +meow25meow,35 +mento,35 +mennsuke,35 +meltyvixen,35 +mell (dmwe3537),35 +mela (pokemon),35 +mega sceptile,35 +mega salamence,35 +mega man (character) (cosplay),35 +mega audino,35 +meg maru2,35 +medea (fate) (cosplay),35 +mbt64kmb,35 +maya (borderlands),35 +mavezar,35 +maury (azur lane),35 +matthew (fire emblem),35 +matrix of leadership,35 +matahei,35 +masyu jyaga,35 +master xehanort,35 +mask on breasts,35 +mashiroyu,35 +masa tarou,35 +maron (quintet colors),35 +marion quinn,35 +mareeta (fire emblem),35 +maou to ore no hangyakuki,35 +maou no kuse ni namaiki da!,35 +manya drhr,35 +mangekyou sharingan,35 +manatee,35 +mame (ballet2604),35 +mamaprofennn,35 +malphite,35 +makunouchi ushio,35 +makigami kimiko,35 +majokko megu-chan,35 +maji de watashi ni koi shinasai! s,35 +maguroido,35 +magpie,35 +magazine rack,35 +maeda keiji,35 +machi (skyward sword),35 +m o (prftz),35 +m2 (guvn5745),35 +lyuri (riviera),35 +lynx tail,35 +lxjun 09,35 +lumiphi,35 +lume sangria (chrysa),35 +lucio (granblue fantasy),35 +lookhow,35 +lion space,35 +limi26,35 +lihuashuangxiang,35 +liftoff,35 +lieze lotte,35 +lezard valeth,35 +let me solo her,35 +leomon,35 +lens no mukougawa,35 +leni loud,35 +lemonpear,35 +leftporygon,35 +leaning in,35 +leaf hat,35 +lazy orange,35 +layla alstroemeria,35 +layered capelet,35 +lavender (flower knight girl),35 +lava the purgatory (dusk wisteria) (arknights),35 +lan se fangying,35 +lamorak (granblue fantasy),35 +ladygt93,35 +ladiva,35 +kyouta 22,35 +kyouhaku 2,35 +kyoro ina,35 +kuu (haibane),35 +kururu,35 +kurorekishiman,35 +kuroobi (armor),35 +kuromine hiraya,35 +kuroi hitsuji,35 +kuro (kurokami),35 +kurikoma komaru,35 +kurano tomoka,35 +kunimitsu ii,35 +kunieda shiho,35 +kujou amane,35 +kujira (knave),35 +ksaiki,35 +kouryuu kagemitsu,35 +kourin no machi lavender no shoujo,35 +kotomine kirei (cosplay),35 +korezyanai,35 +koohii koucha maru,35 +konya (chocolate palette),35 +konno shimako,35 +kokoro toshokan,35 +kokone (vocaloid),35 +koko (pokemon),35 +kojirou!,35 +koitsu (blue),35 +koike sadaji,35 +koiiro marriage,35 +koguma105,35 +kogarashi51,35 +koga (cookie),35 +kobayashi kobako,35 +kobayashi aika,35 +koba (jdhikdjdkfiwufh),35 +koaraya,35 +kllsiren,35 +klimspree,35 +kleavor,35 +kkusak (kkusag ks),35 +kkokko,35 +kiyu fuyuki,35 +kitimoop,35 +kitazato shigure,35 +kiryuu tsukasa (citrocube),35 +kiryuu sento,35 +kirisawa shinji,35 +kiririn51,35 +kirihara jyazue,35 +kirby's adventure,35 +kirara (gundam bf),35 +kira boshi27,35 +kingdom death,35 +kinata (area7),35 +kimi to boku to eden no ringo,35 +kimahri ronso,35 +kiko (kikobooom),35 +kiki lala,35 +kiki (shepherd0821),35 +kiichi (9ta),35 +khezu (armor),35 +keicha (kmoekaki),35 +keep this a secret from everyone in class,35 +keanu reeves,35 +kcalb,35 +kazi,35 +kaze no stigma,35 +kazari hisa,35 +kazahana chiruwo,35 +kawakami bansai,35 +kawaii joushi o komarasetai,35 +katrina elesius,35 +katou miyako,35 +katayama makoto,35 +katai uji,35 +kashuu kiyomitsu (kiwame),35 +kasai tatsuyoshi,35 +kaoru miki,35 +kanzaki yukiko,35 +kanoya rui,35 +kanatarou,35 +kan (pyy c12),35 +kamiazuma touka,35 +kamen rider zero-two,35 +kamen rider eternal,35 +kamejikiriga,35 +kame (pixiv),35 +kalian,35 +kakuzatou (boxxxsugar),35 +kakino nashiko,35 +kakei juubei,35 +kajio (maburo),35 +kaijuu no. 8,35 +kagamine rinto,35 +k2h,35 +k164,35 +juujou shion,35 +juri (yu yu hakusho),35 +jura,35 +juno bernal,35 +jun (real) (princess connect!),35 +judgement (tarot),35 +josephine (twin tail rabbit),35 +jjuwaap,35 +jin roh,35 +jin2,35 +jikunyaga,35 +jiisan baasan wakagaeru,35 +jiege,35 +jebura,35 +janitor,35 +janghwa (last origin),35 +jakkun,35 +jagi (jagi souken),35 +jack rockhardt,35 +jack (identity v),35 +j (onjj),35 +izumi yura,35 +itsuki (nanairo megane),35 +isuzu (uzushi),35 +isis (terrorist group),35 +ishii yuriko,35 +ishigaki tamaki,35 +isekai no seikishi monogatari,35 +isamu,35 +isabeau de baviere (madoka magica),35 +irumyuui,35 +iriya (lonesome),35 +iris hallet,35 +irezumi-san (ozka),35 +ippaiccha,35 +ipod touch,35 +inugami gyoubu (kemono friends),35 +interstellar rhapsody,35 +insemination,35 +inoue kikuko,35 +inoue jun'ya,35 +inkyubeiteo,35 +inawata,35 +impossible underwear,35 +imasara maki,35 +imaizumi kagerou (cosplay),35 +ilis,35 +ilassa (elh),35 +ikutsuki shuuji,35 +ikemeru19's delivery boy,35 +iizuna (milky walker),35 +idolmaster cinderella girls u149,35 +ideon gauge,35 +ichii maina,35 +ichi (antonym),35 +iceringer,35 +ibaraki douji (onmyoji),35 +i-chandraws,35 +hypertan,35 +hyoutan tan,35 +hyou (pixiv3677917),35 +hyaluron,35 +hunting horn,35 +hua ge pi,35 +hu su,35 +howl (howl no ugoku shiro) (cosplay),35 +howard (mujin wakusei survive),35 +houten (dre a mer),35 +houmei,35 +houjin exaxxion,35 +hotpants (i'm hot yet!),35 +hotogi shirayuki,35 +hot plate,35 +hoshikawa koharu,35 +horuda,35 +hontai bai,35 +honorikiti,35 +honeycomb print,35 +honey calamari,35 +honest rate,35 +holding water,35 +holding mouse (computer),35 +holding bento,35 +hms monarch (siirakannu),35 +hizuki shino,35 +hiyoyogi,35 +hiyoku no crosspiece,35 +hitotsubashi inari,35 +hiru-kun no ami,35 +hiroya masaharu,35 +hiroshi (jasinloki),35 +hiren,35 +hiraoka masamune,35 +hirame guard,35 +hinokami sakura,35 +hinata kanata,35 +hinata (echoloveloli),35 +hinamori (m nmy01),35 +hina (hinamatsuri),35 +himezaki aoi,35 +himeshita johnny mafuyu,35 +himeko (sky-freedom),35 +hikusa,35 +hijiri misaki,35 +higuchi kaede (swing!!),35 +hidaka toworu,35 +hephaestion (fate),35 +heo (tkgm),35 +henshin!,35 +helena harper,35 +helena (warship girls r),35 +heikouidou (seraeno),35 +hei zhi shi,35 +hechima-bushi,35 +hecatoncheir,35 +heaven's melody,35 +heathcliff,35 +hearts recollection,35 +haze00,35 +hayaneko (ceceragi),35 +hayami yoichi,35 +hatigatunoneko,35 +hasumushi,35 +hastune,35 +hashibi rokou,35 +haruno (kanimeshi),35 +haruirokomici,35 +harry potter and the philosopher's stone,35 +harapeko (886011),35 +harajuku mimi,35 +harahachibu ajinosuke,35 +happi xfyg,35 +hanyuu shion,35 +hanusu,35 +hanging bridge,35 +handlebar mustache,35 +hand glasses,35 +hanamori suzu (hkgbkk8),35 +hanadi detazo,35 +hanabusa lisa,35 +han do-yoon,35 +hamaeru,35 +half rest,35 +hal jordan,35 +hakuto hotaru,35 +hakusen-hiki,35 +hakkatou,35 +hakama-chan (aa),35 +hajime (wkpz8247),35 +hacosumi,35 +hacha (hachaowo),35 +gyorui (katsuwo drawing),35 +gwyn lord of cinder,35 +gurumi mami,35 +gundam deathscythe,35 +guldo,35 +grisha yeager,35 +gridknight (ssss.gridman),35 +grell sutcliff,35 +green ponpoko (module),35 +greasymeta,35 +graverobber (darkest dungeon),35 +grape (pixiv27523889),35 +gps,35 +gotou kiichi,35 +golden axe (weapon),35 +gold ship (umamusume) (cosplay),35 +gold one-piece swimsuit,35 +gokou ruri (cosplay),35 +godtail,35 +glenn radars,35 +ginshachi,35 +gingham dress,35 +ghost (monster girl encyclopedia),35 +geppuntei dappun,35 +gavial (combat medic) (arknights),35 +gateau mocha,35 +gambian rat (kemono friends),35 +gajumaru09,35 +gainer sanga,35 +gabyo nuno,35 +g munyo,35 +g4m,35 +g-tenko-r,35 +fuyuno (kiddyland),35 +fuyu urara,35 +futsuka (dzone),35 +fuse takuro,35 +furumi showichi,35 +furisode girl katherine,35 +furan (pixiv20237436),35 +fujiwara takumi's toyota trueno ae86,35 +fujisaki eru,35 +fu-ha jin,35 +frogbians,35 +frilled headwear,35 +freya (danmachi),35 +freddy krueger (cosplay),35 +francisco valle,35 +fp-45 liberator,35 +foxfire ahri,35 +fore (va-11 hall-a),35 +flattened,35 +flag custom,35 +fkskii65,35 +fish pillow,35 +final fantasy xii revenant wings,35 +fikusipu,35 +felsus,35 +felix (felix901123),35 +fefnir (mega man),35 +fatalis,35 +fanbox,35 +exposed clitoris,35 +eva 06,35 +emizel (disgaea),35 +emerson tung,35 +embroidered dress,35 +elizabeth bathory (first ascension) (fate) (cosplay),35 +elincie yerthrop,35 +elicia hughes,35 +elf village,35 +elchi cargo,35 +elbia hernaiman,35 +ekaki-ya okamoto,35 +eichi (wild chicken),35 +ei (akinosakae),35 +edgar valden,35 +ecstasy,35 +ecoas,35 +ebura din,35 +e-bushi,35 +dyarikku,35 +duan zhu,35 +drop.b,35 +drill hand,35 +drawing kanon,35 +dracula (cosplay),35 +doutei (one-punch man),35 +double eyepatch,35 +double-decker bus,35 +donut hole (vocaloid),35 +dongye1551,35 +dokyuu afro,35 +dokkoi shoo,35 +dohalim (tales),35 +doatobira,35 +diver down (stand),35 +discworld,35 +dildo under mask,35 +diarmuid ua duibhne (sensha otoko) (fate),35 +dianche miao (18826),35 +di le to,35 +deus ex: human revolution,35 +deuce (fft-0),35 +detective pikachu (game),35 +deshima shin,35 +dende,35 +demimushi,35 +dekooop,35 +decarabi,35 +death (tarot),35 +dearoliha,35 +dear vocalist,35 +danxing aipangzi,35 +dansai bunri no crime edge,35 +dango ya,35 +da cider,35 +d4y suisei,35 +czva,35 +czech flag,35 +cyu ta,35 +cyberdoll may,35 +cutie honey universe,35 +cuilein-anbar (genshin impact),35 +cuicuijiao,35 +cube (alma),35 +cruz schild,35 +crow0cc,35 +crmanzana,35 +crash bandicoot,35 +cpr,35 +coyote,35 +cottone (highjethoo),35 +cottan,35 +cooking mama,35 +cocoa (cafe-chan to break time),35 +clover heart's,35 +cleansed crystal mirror,35 +claus valca,35 +civilization v,35 +chuhaibane,35 +chourui keiko,35 +chongqing (warship girls r),35 +choister,35 +choco la tea,35 +chloe von einzbern (cosplay),35 +chloe (elsword),35 +chip (kirby),35 +chim,35 +chiaki shin'ichi,35 +cheryl (arc the lad),35 +cherry (urusei yatsura),35 +cherino (hot spring) (blue archive),35 +chelsea arcot,35 +chatan nakiri,35 +chapter0p,35 +chaleu,35 +cereza,35 +cerberus (megami tensei),35 +centinel303,35 +cello (little princess),35 +caustic crayon,35 +cauliflower,35 +cat (ghost trick),35 +cassio yoshiyaki,35 +carro (watarui),35 +captured alien,35 +canizzz,35 +caliper,35 +cake (adventure time),35 +butudan butugu,35 +busujima funya,35 +buranko (marchen),35 +budgerigar (bird),35 +bu tika,35 +bromide,35 +broken (7589468),35 +brave neptune,35 +brain powered,35 +bouquet toss,35 +borrowing race,35 +boribeya,35 +bone (armor),35 +bonbori,35 +bokujou monogatari: hajimari no daichi,35 +bodhi wushushenghua,35 +bobobo-bo bo-bobo (character),35 +bluk berry,35 +blue napoleon (idolmaster),35 +blame gakuen!,35 +blackpink,35 +blackberry-chan,35 +black snake,35 +big cat shan,35 +big boss (cosplay),35 +bidiu (the legend of luoxiaohei),35 +beryl (shinrabanshou),35 +benitoite (houseki no kuni),35 +ben jackson,35 +belluch,35 +bell (hellchan3),35 +behemoth (final fantasy),35 +beelzebub (beelzebub-jou),35 +bat background,35 +baseball base,35 +basculin (blue),35 +banshee sister (mechanical buddy universe),35 +bailey (azur lane),35 +bahamut lagoon,35 +baby mario,35 +baby be'el,35 +baba lamune,35 +b gent,35 +b.leaf,35 +azusa (granblue fantasy),35 +ayasegawa yumichika,35 +aya (jonsun),35 +awilda (p&d),35 +atsushima you,35 +atsumare! fushigi kenkyuubu,35 +atorasu,35 +atlanta (azur lane),35 +atelier30,35 +atarime,35 +astrid (pokemon),35 +astolfo (sugar vacation) (fate),35 +assassin (fate/zero) (cosplay),35 +asrbpy,35 +asio (asiogimuto),35 +ashuku,35 +asakura mihono,35 +asahiru ban,35 +asaba yuuta,35 +arwing,35 +arutoria (187497382),35 +art jam,35 +art gallery,35 +arshes nei,35 +aroma0501,35 +arisaka kazuki,35 +ariel (mecha),35 +archangel (helltaker),35 +arcade gamer fubuki,35 +arc2urus,35 +arai29,35 +aqua bodysuit,35 +apple bloom,35 +aphrodite (shuumatsu),35 +aphelios,35 +aokawa daisuke,35 +antiquewhite,35 +anti-earth bomb,35 +annlin,35 +anna (or),35 +anmochi mochi,35 +ankoman,35 +animatic,35 +anientte,35 +anesthesia (rumble roses),35 +ane yome concerto,35 +andrew leung,35 +ana medaiyu,35 +ana (warioware),35 +amenosorashido,35 +ameno sagiri (yuragisou no yuuna-san),35 +ame8desu,35 +amazima mangetu,35 +amatsuka yuuna,35 +amaterasu (fate),35 +amanogami dai,35 +amano haruka (sakogamitokuan),35 +amakawa tamawo,35 +amakara000,35 +alvida (one piece),35 +alt (apou),35 +alolan dugtrio,35 +alois rangeld,35 +alicia (kagawa yuusaku),35 +alice claudia,35 +alex (totally spies),35 +akuru (akr369akr),35 +akizuki buranko,35 +akikusa peperon,35 +akasaka koutarou,35 +akaisuto,35 +aka ruuko,35 +ajc,35 +aizawa sumie,35 +aira (exp),35 +aigami shion,35 +ai the somnium files: nirvana initiative,35 +agnes boulange,35 +afrika korps,35 +aeyga,35 +adzuma nishi,35 +ach (zjakskwdf),35 +abyss,35 +abukawa honpo,35 +absurd fox,35 +a ichiro,35 +a-line,35 +5 106y,35 +45 (mdf an),35 +2v (joyeong),35 +2c,35 +2980,35 +24 (24phage),35 +239 239,35 +0.05,35 +zzo0,34 +zyuden sentai kyoryuger,34 +zusshii (libertasgenesis),34 +zuman (zmnjo1440),34 +zugan (berugkamp),34 +zetsu (naruto),34 +zetallis,34 +zed o' brien,34 +zamazenta (hero),34 +zakuro0508,34 +z-jun.dd-zhong,34 +yyillust,34 +yuzawa,34 +yuzaki tatami,34 +yuyu (spika),34 +yuya kyoro,34 +yuuri shachi,34 +yuuren kyouko,34 +yuuki kanade,34 +yuuki chigusa,34 +yuuguu settai,34 +yutaka (yutakadeath),34 +yuri lowell (light fedrock uniform),34 +yuri-yuri,34 +yureru (junn7603),34 +yupachi,34 +yunyun (canaan),34 +yunomi (kyuusuu),34 +yunlongchen,34 +yuni (manyutin),34 +yumemi gachiko,34 +yumei,34 +yukishita miyuri,34 +yukishiro nanako,34 +yuki hishaku,34 +yukaribe setsuko,34 +yuichirou,34 +yui (new year) (princess connect!),34 +yuetsuki (ash),34 +yucca (sui linx),34 +yubaba,34 +yua serufu,34 +yu skl,34 +ytrall,34 +yoyomura,34 +youkai pad,34 +you guo chaocai,34 +yosuzu,34 +yoshiyoshiwa,34 +yoshimura tatsumaki,34 +yoshimura ken'ichirou,34 +yoshimizu amine,34 +yonpii,34 +yong-gok,34 +yonesuke,34 +yonecchi,34 +yone (kaguudou),34 +yomi (incubator),34 +yolang and yolang,34 +yogurt bakudan,34 +yoco n,34 +yinyoushirenmaotouying,34 +yimamiantang,34 +yiler,34 +yiku (sim7368),34 +yeqing (gorgeous mushroom),34 +yao ren gui,34 +yanuk,34 +yano (spirit1022),34 +yamine kuro,34 +yamazaki kana,34 +yamazaki (now printing),34 +yamashita ikuto,34 +yamanobe miki,34 +yamakaze (kancolle) (cosplay),34 +yamakamu,34 +yamada maririn,34 +yakushi kabuto,34 +yakumo hamaji,34 +yakota (usuk-yako),34 +yaegaki erika,34 +yae (yae ringo),34 +xyh,34 +xobox,34 +xeno a,34 +wreck-it ralph (character),34 +wooden bowl,34 +wontam,34 +wonder project j2,34 +wodom pod,34 +wkar,34 +wiori (mashiro miracle),34 +winu (hertrion),34 +winsankemonodou,34 +windworker,34 +will of the abyss,34 +wii version,34 +whole rest,34 +white witch (lack),34 +whiskey project,34 +welve,34 +weissritter,34 +weedy (candy cape) (arknights),34 +"watashi ga suki nara ""suki"" tte itte!",34 +watahashi yasumi,34 +wasami (a27678193a),34 +ward,34 +warakusa,34 +wamawmwm,34 +wakasa reo,34 +wakan tanka (fashionista swimsuit),34 +waguruma!,34 +wa (wa),34 +w.i.t.c.h.,34 +vriska serket,34 +vit,34 +vision test,34 +virusotaku,34 +virtu.al,34 +violet detector,34 +viola (soulcalibur),34 +veterinarian,34 +vans,34 +vane (halloween) (granblue fantasy),34 +vampire's sister (gogalking),34 +uzaki hitomi,34 +utsugi tsuguha,34 +uta (tokyo ghoul),34 +usui (tripcube),34 +uss des moines (ca-134),34 +usagi niku,34 +urza pranaice,34 +urushihara satoshi (style),34 +urooooboe,34 +urashima (hidoro mgmg),34 +ura dora,34 +unown z,34 +unigon (kitsutsuki),34 +uni sirasu,34 +uni (unexistarts),34 +undine,34 +un lim,34 +un4lord,34 +ume neko (otaku-nyanko),34 +ultra00,34 +uirou-zazami,34 +ubunchu!,34 +ubume (onmyoji),34 +u2suke,34 +type 97 shotgun (girls' frontline),34 +twirling gun,34 +twinpon,34 +twelve (zankyou no terror),34 +tupai (touhou),34 +tsuzura (pixiv74922627),34 +tsurumi rumi,34 +tsunami (sorudora),34 +tsumura chita,34 +tsukumo san,34 +tsukumo kazuita,34 +tsukimi (yukinagi),34 +tsuki (akatsuki no goei),34 +tsukasaki ryouko,34 +tsujigaito satoha,34 +tsugumi takakura,34 +tsuchiyama niu,34 +tsubakigaoka metropolitan high school uniform,34 +transistor (game),34 +traffic officer,34 +tpamjdsta (usatokurasu),34 +toyohara mitsuki,34 +toyo (toyozine2009),34 +toyo (toyoyomi),34 +toxic (toxicv),34 +touma hikaru,34 +touhou costume examination (meme),34 +tortinita fine,34 +torn belt,34 +torishimo,34 +torigoe gakuen school uniform,34 +toooka,34 +too much fluff,34 +tonia,34 +toni infante,34 +tomotototo,34 +tommy region,34 +tomi27,34 +tomahawk,34 +tokiwa kurumi,34 +to-ru,34 +tinpam,34 +tini,34 +time bomb,34 +tied jumpsuit,34 +tiamat (last origin),34 +thumb in beltline,34 +thor (marvel) (cosplay),34 +theend,34 +the sky crawlers,34 +the legend of zelda: tri force heroes,34 +the king of fighters '96,34 +the girl and the robot,34 +the endsinger,34 +the dark knight rises,34 +the chariot (tarot),34 +the boy (the last guardian),34 +tetsuzankou,34 +tetsukui,34 +tess turner,34 +teru (kai teru ),34 +teresa beria,34 +terauchi kiyo,34 +tendril,34 +tendo (zhazhatiantong),34 +tatsuji,34 +tatsubuchi (todoiru),34 +tatatsu,34 +tarnyang (queen's blade),34 +tarako supa,34 +tarako,34 +tapestry -you will meet yourself-,34 +tanzawa chizuru,34 +tanahashi suzune,34 +tanagawa makoto,34 +tamo (gaikogaigaiko),34 +tamaki (tamaki599),34 +tama (nyanko daisensou),34 +takssmask,34 +takeno (hashi falcon),34 +takejirog,34 +takefumi,34 +takasaki misaki (koi to uso),34 +takagi junjirou,34 +takada naho,34 +takada gan,34 +taiyou no yuusha fighbird,34 +taiyou iwaku moe yo chaos,34 +taitan,34 +tachiagare! orc-san,34 +table tennis net,34 +t-back spats,34 +syu.mu,34 +suzushiro,34 +suzumiya rin,34 +suzuki kana,34 +sushida hayari,34 +surrender,34 +suomi (blissful mission) (girls' frontline),34 +sunnywang03,34 +summer days,34 +sumiregawa nenene,34 +sumiosmith,34 +sumino akasuke,34 +sukinako ga megane wo wasureta,34 +suki,34 +suikario,34 +suiji,34 +sugata dski,34 +sudhiro sappurisa,34 +subete ga f ni naru,34 +subdue ezwei,34 +striped nails,34 +striped ascot,34 +stitched hand,34 +stitched eye,34 +sticky honey roast (genshin impact),34 +steel peach,34 +stayblack,34 +star (ikaruga),34 +ssss ve,34 +srgrafo,34 +squeaky (artist),34 +spizzy,34 +spiking,34 +sphinx awlad,34 +sparky (arms),34 +souzaipan,34 +sousei no onmyouji,34 +souma ren,34 +sophie (middle-school mascot) (tales),34 +sonny boy,34 +sonic the hedgehog (idw),34 +sona (yuio58ok),34 +so myeolchi,34 +sniper (sekaiju),34 +snifflesmp4,34 +smoker (left 4 dead),34 +smersh,34 +slums,34 +slow damage,34 +slouch hat,34 +skyspace,34 +sk (sk-g),34 +sixteenth rest,34 +siplick,34 +simon brezhnev,34 +simekirikowai,34 +silver leotard,34 +sigurd (first ascension) (fate),34 +sigetch,34 +shujinkou (eiyuu senki),34 +shoujo sect,34 +shoujo hatsudouki,34 +shou (ahiru shinobu),34 +shokushu-chan,34 +shivie aika,34 +shishi otome,34 +shirousagi (sirousagi1998),34 +shirona mikoto,34 +shirogane mitsuki,34 +shiro (hakukosui),34 +shiratsu (white-seaside),34 +shiratama liliana,34 +shiranui (kancolle) (cosplay),34 +shirakawa ryouko,34 +shione (memento forest),34 +shio kuzumochi,34 +shinozaki ai (corrector yui),34 +shinonome soichiro,34 +shinonome chiharu,34 +shinon (fire emblem),34 +shinomiya naka,34 +shinomiya kyouya,34 +shino (pharmacy),34 +shinjou izumi,34 +shindou akane,34 +shinalpha,34 +shimoyama mutsumi,34 +shimazaki1152,34 +shimasato,34 +shiina hikaru,34 +shigure ama,34 +shigure (attack i-490!) (azur lane),34 +shigatsugata,34 +shibata katsuie (oda nobuna no yabou),34 +shibarikini,34 +shiba (pixiv244),34 +sherumaa (shellmound),34 +shem-ha,34 +she-venom,34 +shawl lapels,34 +shardanic,34 +shapiro,34 +shanzha (fruit),34 +shanpao,34 +seung mo kim,34 +setsuna (kyoushirou to towa no sora),34 +seto yuuki,34 +sesame ange,34 +serizawa ulala,34 +sera haruna,34 +sera (doubutsu sentai zyuohger),34 +senkou no clarias,34 +sena (illust sena),34 +sekai seifuku kanojo,34 +seiyo academy uniform,34 +seinarukana,34 +seer,34 +sebastian (dokidoki! precure),34 +sebas tian,34 +screen door,34 +scout movement,34 +sayuco,34 +sayo ayato,34 +sawayuzu,34 +sawamura chizuru,34 +satsuki yomi,34 +satou toshio (suisuisuisui),34 +satonaka kei,34 +satellite cannon,34 +sashimi0gou,34 +sasameki koto,34 +sasahara nonoka,34 +sao (saowee),34 +sanmian (chidarakeno),34 +sane (zoza),34 +sakurazuka ren,34 +sakuraoo,34 +sakurai yukino,34 +sakurai natsuka,34 +sakuragi raia,34 +sakura yunuto,34 +sakura (ichisakupink),34 +sakana~ (meme),34 +saitu miki,34 +saimoe 2006,34 +sailor saturn (cosplay),34 +saikou-iro aurora,34 +sage (tick! tack!),34 +saeki teru,34 +sadone,34 +saboten (flower knight girl),34 +s hitorigoto3,34 +s gundam,34 +s.advent,34 +s-m!le yuu,34 +ryu hari,34 +ryouta,34 +ryaku,34 +ruty (mafuyu),34 +ruru (heat haze),34 +ruo,34 +ruku rx,34 +ruined king: a league of legends story,34 +rui yuda,34 +rottenweiler,34 +rotoscoping,34 +rosele,34 +rosehip t72,34 +roon (viridescent lullaby) (azur lane),34 +ronarona,34 +rolycoly,34 +rokuya (68 zusao),34 +rokuno,34 +roki (vocaloid),34 +rit3set,34 +rishetta (30ms),34 +risemu (c liesem),34 +riruru,34 +rinne (pretty rhythm),34 +rin (catherine),34 +rikuo (whace),34 +rikimatsu ariko,34 +riding animal,34 +richard ranashinha de vulpian,34 +rich s,34 +ribbon-trimmed ornament,34 +ribbon-trimmed choker,34 +ribbon-trimmed bloomers,34 +rhineheim,34 +rhine lab (arknights),34 +revolcane,34 +renaissance,34 +remus john lupin,34 +remomon (sdnn8578),34 +reminiscence re:collect,34 +remil,34 +reith,34 +reimei (1988),34 +regeneration,34 +reflector (photography),34 +reflector (ookami),34 +redman,34 +redline,34 +redchicken,34 +red10,34 +recotasan,34 +rawst berry,34 +rally car,34 +raindrop print,34 +rain (nadroj31),34 +radio.broom,34 +radiant soul (elsword),34 +radek ken,34 +queen serenity,34 +qian yi,34 +q-feuille,34 +pyongtaro,34 +purimari,34 +pudding (arknights),34 +pub,34 +psyco gundam,34 +psycho-puzzle,34 +prototype-d,34 +princess piranha plant,34 +princess bullet bill,34 +priest (ragnarok online) (cosplay),34 +prelati's spellbook,34 +ppk (mach tempest) (girls' frontline),34 +power,34 +poripori,34 +pop star,34 +poooka,34 +pondeomaru,34 +pon-chan (mikeneko),34 +pollen,34 +polka dot towel,34 +poker face (vocaloid),34 +poisonousgas,34 +poe dameron,34 +pnikatro,34 +plesioth,34 +planet earrings,34 +plainwhite,34 +piyopiyomen,34 +piyo (piyoko528),34 +pipi o (pipioo08),34 +pipelining,34 +pinlin,34 +pinko (inazume-panko),34 +pink hood,34 +pineapple hair ornament,34 +pillow (blackpirou),34 +pikachu pop star,34 +pikachu hood,34 +pi0w0pi,34 +phonic,34 +phantom (focus) (arknights),34 +perri (mnemosine),34 +perfumer (species plantarum) (arknights),34 +perfellcsaiko,34 +peregrine falcon (kemono friends),34 +pentomo (petra gurin),34 +pentakill sona,34 +penis face,34 +pelvic thrust,34 +pedestrians only sign,34 +pd-x,34 +paw panties,34 +patrick fche,34 +parnkgjas,34 +panbukin,34 +pa-15 (marvelous yam pastry) (girls' frontline),34 +p-kana,34 +oz (maplestory),34 +ouran,34 +ou kijin,34 +orcbarbies,34 +ootoriryouran gakuen school uniform,34 +oop,34 +oomikado himari,34 +ooeyama,34 +ono itaru,34 +one touch,34 +onacia,34 +oliver hamlin,34 +okusan,34 +okumura eiji,34 +okonogi noi,34 +okita kyouko,34 +okappixv,34 +okamen,34 +oisih,34 +odin (final fantasy),34 +odecono3,34 +octavia melody,34 +ocelot print,34 +obake no q-tarou,34 +o imotarou,34 +nyame,34 +nya lapucea,34 +nuriko (fushigi yuugi),34 +nukarumi noren,34 +nue (phrase),34 +np (edage no hazama),34 +no~ma,34 +nose (oekaki1825),34 +norori,34 +noro (tokyo ghoul),34 +norihe,34 +nor nao,34 +nopal,34 +nonohara himeko,34 +nonodera minku,34 +nonneim,34 +nonfiction!! (love live!),34 +nomeoil,34 +noiretox,34 +nogisaka motoka,34 +noel kreiss,34 +noe aoikaba,34 +noa yj,34 +no ribbon,34 +nnn-ollll,34 +nishio nishiki,34 +nishikigi chisato (cosplay),34 +nips (ohnips),34 +ninny spangcole,34 +nikumaru,34 +nikuko (galko),34 +nikujag96737782,34 +nightmare (soulcalibur),34 +nightcoat,34 +nigatsu (fevrali),34 +next frontier (idolmaster),34 +newovermind,34 +net ball,34 +nero augustus,34 +nepsuka (hachisuka),34 +nepodayo,34 +nemu (ceres fauna),34 +nemo (nadia),34 +nelo anjuurou,34 +nekotama shun,34 +nekosination,34 +neimi (fire emblem),34 +negatone,34 +nayuta (chainsaw man),34 +natural another one 2nd belladonna,34 +natt (made in abyss),34 +natsume atsuko,34 +napo8593,34 +nanashiba (banntlla),34 +nanami kazusa,34 +nanami (punishing: gray raven),34 +nanahara shie,34 +nanachi (made in abyss) (human),34 +namino.,34 +namiharuru,34 +nakamura hiro (nobody10ch),34 +naju soreiyu,34 +nagatsuki rio,34 +nagato (guardian fox's shining furisode) (azur lane),34 +nac nac,34 +nabeo,34 +mytho (princess tutu),34 +mystic (tera online),34 +myria loussier,34 +myanyuki h,34 +muvluv altered fable,34 +mutual foot licking,34 +mutsuki shougatsu,34 +mutou kazuki,34 +mutekikyuu believer,34 +mushuu,34 +muro (ninimohu),34 +murakata,34 +murabatodenki,34 +mungduck,34 +multiple shikishi,34 +mugicaan1,34 +mugheyart,34 +muay thai,34 +mr. chang,34 +mozuo,34 +morrow (hitodama-x),34 +morino hon (style),34 +moriiiiiiiiiinn,34 +monogram,34 +monkey1468,34 +mongz,34 +mone,34 +momodora: reverie under the moonlight,34 +momo (idolish 7),34 +momo (gomenne),34 +moleshi,34 +mogura2009,34 +mode aim,34 +moco (captain earth),34 +mochi (touhou),34 +mmm ss,34 +mizumi (artist),34 +mizukoshi saki,34 +mizukimaru,34 +mizuki toko,34 +mizuha (pixiv56938668),34 +mizugi kanojo,34 +miyata (miyatami07),34 +miyasutou,34 +miyako nagi,34 +mitsuya bonjin,34 +mitsucho,34 +mister ajikko,34 +mist dragon,34 +miraino tsubasa,34 +minette,34 +minecraft sword,34 +minazuki chika,34 +minato kageaki,34 +minase ruruu,34 +minamoto kouichi,34 +mimo lm,34 +mimo (pokemon),34 +mimizuku auru,34 +milla the temporal magician,34 +mikoshi matsuri,34 +mikiharu,34 +mikey uo,34 +midjourney,34 +miaodiande yuanshoumiao,34 +mian (dream c club) (cosplay),34 +meu meu,34 +metro-goldwyn-mayer,34 +metamorphy (elsword),34 +meson,34 +mekameka shii,34 +meisho doto (dot-o'-lantern) (umamusume),34 +mei mu,34 +megumi yakiniku,34 +mega garchomp,34 +mb0sco,34 +mayuzumi yukino,34 +mayuge1017,34 +mayu (yuizaki kazuya),34 +mayaa,34 +may (anniversary 2022) (pokemon),34 +matsuya (pile),34 +matcho,34 +matchadzuke,34 +matangom,34 +maruke,34 +marshall lee,34 +maro (nikeneko523),34 +marimo (momiage),34 +mariachi,34 +marci (dota),34 +mapuru,34 +maorzshu,34 +maokyu,34 +manzi,34 +manasseh,34 +mana (gakuburu),34 +mamoru mikokoro,34 +mamiya akari,34 +maman (shuugetsu karasu),34 +mama x holic,34 +makinon tm,34 +maji moji rurumo,34 +main battle tank,34 +maiini,34 +mahou shoujo lyrical nanoha strikers sound stage x,34 +magical charming!,34 +madaragi,34 +mada (mk333),34 +mad moxxi,34 +macross flashback 2012,34 +machi (nagasarete airantou),34 +mabui (poloon),34 +mabelmine,34 +maa (roninnn),34 +m202,34 +lyra (sygna suit) (pokemon),34 +lyoo (cacj5482),34 +lv21,34 +luxiel,34 +lunging,34 +luma li,34 +luetzow (everlasting blood banquet) (azur lane),34 +lucu lucu,34 +lucifer (shin megami tensei),34 +lucadark art,34 +love (pspdspsp),34 +louis (left 4 dead),34 +longhorn,34 +long (chainsaw man),34 +loki 78,34 +loki (matantei loki ragnarok),34 +little specter (elsword),34 +little armory,34 +lirilias,34 +liremi,34 +liptan,34 +ling (doraling12),34 +lin (tower of fantasy),34 +limbs,34 +likaou,34 +liiko,34 +licking shoulder,34 +licking back,34 +lichtendahl tsery,34 +leyu,34 +leonardo da vinci,34 +lena (azur lane),34 +lazflamme,34 +laura toth,34 +latex shorts,34 +lamsass,34 +laikaken,34 +laika sputnik (shepherd0821),34 +lactone,34 +l-trap,34 +kyouka (real) (princess connect!),34 +kycilia zabi,34 +kuuya (utawarerumono),34 +kushima kamome,34 +kusa chuu,34 +kurumi tsuchi,34 +kurotama (avino),34 +kurosu (nyakelap),34 +kuroshiro no tsubasa,34 +kuromori suzu,34 +kuromitsu nene,34 +kuroleo,34 +kurokoge013,34 +kuroi yasu,34 +kuroha eve,34 +kuroba.k,34 +kuro uso-ko,34 +kurisu kokone,34 +kurihara nagisa,34 +kuresu (alice-pma),34 +kureneko,34 +kurashita tsukimi,34 +kuraki mizuna,34 +kuraka,34 +kuontendou,34 +kunsei hamu,34 +kumo no mukou yakusoku no basho,34 +kumano (kancolle) (cosplay),34 +kudakitsune (kaien kun),34 +kristoff (frozen),34 +krekkball,34 +koushou academy school uniform,34 +kous (onetwojustice),34 +koukyou,34 +kouchi ayako,34 +koucha inu,34 +kou oishi,34 +koto-channel,34 +kotera (koterabyte),34 +kosma,34 +kos-mos ver. 3,34 +koromogae maya,34 +kori (etinop),34 +korean traditional hat,34 +konoe fumina,34 +kondate (inugrm),34 +kon (k0n16),34 +komuro takahiro,34 +komatinohu,34 +komadori ui,34 +koma neko (natukawasaku),34 +kojima tsuma,34 +kohinata yuuma,34 +kohaku teru,34 +kofucchi,34 +kobe,34 +ko-man,34 +knives out,34 +knights templar,34 +knights chronicle,34 +klein (honkai impact),34 +kizuta (barometz),34 +kiteretsu daihyakka,34 +kitami tsuzuka,34 +kishiri (sakurasaku xyli),34 +kishiinu,34 +kisaragi mizuto,34 +kiryuu reia,34 +kiryuu nanami,34 +kirishima sagiri,34 +kirishima noa,34 +kirin kai-ii,34 +kiraboshi (star driver),34 +kintsugi,34 +kino mayumi,34 +kingdom of victoria logo,34 +king jikochuu,34 +kimura takako,34 +kimura masahiro,34 +kimpanzi,34 +kimidorix32,34 +kimi to boku no saigo no senjou arui wa sekai ga hajimaru seisen,34 +kikusaka kochou,34 +kiku (ks5832),34 +kikokugai,34 +kikka kitamoto,34 +kiiro,34 +ki min-woo,34 +keypad,34 +kensei (ciid),34 +kemonosuke (kmskths),34 +keldeo (resolute),34 +kei1 833,34 +kebohan,34 +kazumi schlierenzauer,34 +kazama touko,34 +kazairo kotone,34 +kawaii hito,34 +katsuki toshiya,34 +katou setsuko,34 +katou-chan (shiromanta),34 +katori (katokichi),34 +kasuga kusunoki,34 +kashiwagi yomogi,34 +karuna (madlax),34 +karna (formal dress) (fate),34 +karmatron y los transformables,34 +karkat vantas,34 +karasuke d,34 +karakuri shoujo,34 +karakasa (murakumo koushou),34 +kanzume shima,34 +kanzato shin,34 +kanzaki ranko (cosplay),34 +kantoqyoiko,34 +kansuke (bubuduke),34 +kanogi,34 +kano (singer),34 +kanina shizuka,34 +kanasaki,34 +kanami9731,34 +kamogawa girls' high school uniform,34 +kamisama to unmei (series),34 +kamisama no inai nichiyoubi,34 +kamisakai,34 +kaminaga kouko,34 +kameseru,34 +kamen rider leangle,34 +kakuu,34 +kako (reku),34 +kajiwara shikaji,34 +kajitsu no hito,34 +kajimiya (kaji),34 +kaizin rumble,34 +kainou yoshiko,34 +kaijuicery,34 +kagosaka mahiro,34 +kagikake,34 +kafkasea,34 +kafei (fkey),34 +kabeyama heigorou,34 +k-san,34 +junito715,34 +june (squeezable),34 +julio (precure),34 +judy (animal crossing),34 +judith (glamorous maid) (tales),34 +jubjub (monster girl encyclopedia),34 +jr.,34 +joshua-42-as,34 +josh corpuz,34 +josette,34 +josal,34 +joka (hiwai),34 +jin-lian,34 +jiajiuli,34 +jed henry,34 +jangmo-o,34 +jack bright,34 +jack (kairuhaido),34 +j. geil,34 +izuna kazuki,34 +izayoi cha,34 +iyo (nanaka-0521),34 +iwazoukin,34 +ivxxx,34 +itou kanae,34 +itosiki zetu,34 +itaru (kidomh63),34 +ishino mikoto,34 +ise dango,34 +isayama mei,34 +isane,34 +isabella (yakusoku no neverland),34 +ioryogi,34 +inushida (dogsheepsheep),34 +inubousaki aya,34 +intestine hair,34 +infini,34 +inazuma kick,34 +inari,34 +impulse gundam,34 +ilovetani,34 +ilhi,34 +ikr (artist),34 +ikisugi shokudou,34 +iketatsu shizuru,34 +ikeda yuuji,34 +ikebana,34 +iinuma kaoru,34 +iguchi akari,34 +icoo,34 +ichizon,34 +ichinose ibuki,34 +ichihashi makoto,34 +icelee,34 +ibara,34 +hzw3,34 +huaji,34 +hua ming wink,34 +howling (busou shinki),34 +hourai girl (touhou),34 +houjou (takagi-san),34 +houjoh (7th-heaven),34 +hottan!,34 +hotatechoco (hotariin),34 +hosokawa miki,34 +hoshiyume yashiro,34 +hoshiyuki aruru,34 +hoshiyui tsukino,34 +horizon (counter:side),34 +horikoshi jirou,34 +hoodie tug,34 +honolulu (manjuu mischief) (azur lane),34 +honneamise no tsubasa,34 +honjou ranko,34 +honda civic,34 +homoo...,34 +homer simpson,34 +holding lifebuoy,34 +hms thunderer (siirakannu),34 +hiya (dkr),34 +hitsuji (sheepsb22),34 +hitomilook,34 +hit (dragon ball),34 +hisaki yukari,34 +hisakawa sora,34 +hiroyuki koto,34 +hiroshiko (restart),34 +hirobakar,34 +hiroaki (hiropon pj),34 +hiraken,34 +hipster,34 +hippopotamuso,34 +hinann bot,34 +hinamizawa kurumi,34 +himuka roko,34 +himawari no kyoukai to nagai natsuyasumi,34 +hijiribe ruri,34 +hijiki (hijiri st),34 +hiiragi inori,34 +higashikata josuke (cosplay),34 +hifumitaka,34 +hide (acceed),34 +hibiki tohru,34 +hero roller (splatoon),34 +hermithessa,34 +heo sung-moo,34 +heiyu,34 +hayase sou,34 +hayasaka hiyori,34 +hatyo,34 +hatsune miku (vocaloid4) (chinese),34 +hatomugi (hato6g),34 +hataraki kuma,34 +hat girl (umamusume),34 +hasumikaoru,34 +hassan of serenity (fate) (cosplay),34 +haruka athena,34 +hannyag,34 +hanata,34 +hananon,34 +hanamoto tenka,34 +hanamizuki (flower knight girl),34 +hanami shione,34 +hanami (horizon),34 +hanabusu arisu,34 +hamo (maca-roon),34 +hakushoku n,34 +hakuro109,34 +hagure yuusha no estetica,34 +hadashi no kenji,34 +hachi-kun,34 +h&k hk417,34 +gz (gyoju),34 +gyess963,34 +gundam kyrios,34 +gunbelt,34 +gumbat,34 +guizhong (genshin impact),34 +grogu,34 +gripen (girly air force),34 +grey headband,34 +gremio (gensou suikoden),34 +green mittens,34 +great white shark,34 +great dane,34 +grandmother (little red riding hood),34 +grancrest senki,34 +gomora,34 +gomihitosi,34 +golden eagle (kemono friends),34 +gisela (madoka magica),34 +gintama movie 2: yorozuya yo eien nare,34 +ginga patrol jaco,34 +gin ji,34 +gin daikon,34 +gigantamax gengar,34 +gig (soul cradle),34 +ghost-pepper,34 +getter arc,34 +gear print,34 +garrison regiment (emblem),34 +garon (fire emblem),34 +gantz sword,34 +gamako,34 +galil ar (upotte!!),34 +gakuen alice,34 +g36 (pure white cornflower) (girls' frontline),34 +fuu (frol),34 +futo-inu,34 +futa-futa,34 +fusou (kancolle) (cosplay),34 +furutori,34 +furudori yayoi,34 +fur-trimmed shawl,34 +fukiishi ayako,34 +fukami naoyuki,34 +fukagawa kazumi,34 +fujita yukihisa,34 +fujisawa kamiya,34 +fujisaki nadeshiko,34 +fujinoki nene,34 +fujima emiri,34 +fuguriya (monoton),34 +fuchigami mai,34 +frilled hood,34 +freely tomorrow (vocaloid),34 +freddie (gundam build divers re:rise),34 +frea,34 +franco il nero,34 +fran (gudanco (nyanmeowzer)),34 +four of hearts,34 +foodtoyc,34 +flyx2,34 +flynn scifo (student body president),34 +flying animal,34 +fluffy-pokemon,34 +flip (diiolive),34 +flaxvivi,34 +flat tire,34 +flan (zhd91),34 +fishsyrup,34 +fishneak,34 +finger piercing,34 +fine falke,34 +fetch,34 +ferry (santa minidress) (granblue fantasy),34 +fernandez (fearless night),34 +felice,34 +feather-trimmed gloves,34 +father and child,34 +fastener (psg),34 +fasces,34 +faithom,34 +fairyjack,34 +ex saki,34 +everyntrge,34 +evarella (berserk),34 +eumenes (fate),34 +estelle (cieluscian),34 +esmeralda (disney),34 +eske,34 +esbeliben,34 +erspace,34 +erin (granblue fantasy),34 +entrenching tool,34 +enterprise (heroic finery) (azur lane),34 +enriend,34 +enoshima,34 +eno (whiskeyne),34 +engawa suguru,34 +elwing ra-nah sylphith,34 +ellowas,34 +ellone,34 +ellie (kisaragi yuu),34 +elizabeth (tomas21),34 +elis 120%,34 +egyuuu,34 +efira,34 +eden grenze,34 +eddie (gyee),34 +echi kanichi,34 +ebi (daidalwave),34 +earth badge,34 +e snow jp,34 +dystopia,34 +dynamotor,34 +dune (series),34 +dry bones,34 +dressy alice (idolmaster),34 +dragoon (fft),34 +dracul,34 +dqn (rokomoko0),34 +double fellatio,34 +dospi,34 +donne anonime,34 +dongdong,34 +dong-jin rice-hime,34 +don patch,34 +dokuromaru,34 +doki doki oyako lesson,34 +dodonpachi daioujou,34 +dim lighting,34 +die letzte (tc),34 +dice members (danganronpa),34 +diana caprice,34 +dellu (geenymous),34 +decarabia,34 +dead body (among us),34 +ddalrim,34 +datli64,34 +daru,34 +danchu (danchu0628),34 +danbi2021,34 +dakian,34 +dailybloopy,34 +daihannya nagamitsu (touken ranbu),34 +d (xxl30433461),34 +d-tomoyo (thekingkas),34 +cum on eyewear,34 +cu-sith,34 +crowgod,34 +cross world,34 +cross patty,34 +croissant (seeker) (arknights),34 +crocea mors (rwby),34 +cream soda,34 +crazy hand,34 +costone,34 +coria,34 +contender (flowerful maid) (girls' frontline),34 +computer club president (suzumiya haruhi),34 +commercial,34 +comma,34 +color banding,34 +cola miku,34 +code,34 +close game/offline (project sekai),34 +clock hair ornament,34 +cleric beast,34 +clawitzer,34 +claris (group),34 +cithis,34 +cinnamon sabaku,34 +christina (tanaka the wizard),34 +chong wuxin,34 +chiyoda momoka,34 +chinkyuu,34 +chihaya megumi,34 +chiba chinatsu,34 +chiaki lsd,34 +chi yei,34 +chewycandy,34 +cheshire (the cat and the white steed) (azur lane),34 +cherry blossom (sk8),34 +cheona (last origin),34 +chemaru (a8l),34 +charlotte francia,34 +character portrait,34 +chain's,34 +chagataaa,34 +ceo and bodyguard,34 +cats yone,34 +cat pajamas,34 +cat cutout panties,34 +cat (masterwork apocalypse),34 +cartesian chart,34 +carmilla (re:zero),34 +camel clutch,34 +call of duty: modern warfare 3,34 +californian sea otter (kemono friends),34 +cako asida,34 +cacao (cacaomgmg),34 +byuune,34 +byougaku,34 +butaneko,34 +bunny pillow,34 +bundesliga,34 +brz,34 +brown (among us),34 +brilliant pagoda or haze castle,34 +brigette (pokemon),34 +brick (atelier brick),34 +breast rings,34 +breast pull,34 +braid girl (enmu's recruits),34 +brachydios,34 +bowtruckle,34 +bow bloomers,34 +booth tomato,34 +boobies uniform,34 +bonnou-s-rice,34 +bomb man,34 +bokuno,34 +boingoo,34 +bohe,34 +bluebell,34 +blue reflection tie,34 +blue garter belt,34 +blue bloomers,34 +blaze (minecraft),34 +blanca (fate),34 +blackdomo,34 +blackarachnia,34 +black tiara,34 +black mamba (kemono friends),34 +billy lee black,34 +billie,34 +bikuto ryuu,34 +big mac,34 +bielin,34 +bibi (bibi47881139),34 +bi yao zhuye,34 +beruzumi-m,34 +beppo (granblue fantasy),34 +bennopi,34 +benjamin4679,34 +benibara nadeshiko,34 +behind moon,34 +beatrix kiddo,34 +bear slippers,34 +bear ringer,34 +barururunru,34 +bart. d,34 +bard (ragnarok online),34 +barbara (genshin impact) (cosplay),34 +bao (s 888),34 +bans,34 +banjoo,34 +bad patreon id,34 +back peek,34 +b6n tenzan,34 +azuazuazu19,34 +ayase yuka,34 +ayase sakimi,34 +ayano rena,34 +aya-on (miria00),34 +atsuko,34 +asuka (busou shinki),34 +asmodeus (the seven deadly sins),34 +ashleigh hetrick,34 +asano tomoya,34 +asake ameji,34 +artina,34 +artemis (junketsu no maria),34 +arrow earrings,34 +arimura shion,34 +arimura hinae,34 +arimon (dfrgty12),34 +argyle coat,34 +argos (ff14),34 +arescr,34 +apj,34 +apis (ousama ranking),34 +aozame takao,34 +aomaxuanzexuexi,34 +aoki rei,34 +aoi kao (lsz7106),34 +aoi chiruko,34 +aoba shou,34 +ao no kitsune,34 +anonimasu,34 +annie barrs,34 +angoha,34 +angelo (ff8),34 +anastasia (shvibzik snow) (fate),34 +anal wine,34 +amick (americanomix),34 +amefurin,34 +amayakan,34 +amaya uw,34 +amase (siki696),34 +amamorient,34 +also sprach zarathustra (neco),34 +almostdeadbydawn,34 +alisia heart,34 +alina gray (halloween ver.),34 +alfa (alpharig),34 +alex (sandora),34 +albus severus potter,34 +akubi (fyfy),34 +akke (akke299),34 +akizuki maria,34 +akito (owata11),34 +akita momoko,34 +akira (sayo dayoo),34 +akira (cookie),34 +aki (yunkana),34 +aketa chika,34 +akechi mitsuhide (fate),34 +akasabi,34 +aju222,34 +aji082,34 +aizawa yoshihiko,34 +aizawa sachi,34 +airalin (mark folks),34 +aikawa you,34 +aijima cecil,34 +ai nige,34 +ai (re:lief),34 +agrt,34 +agent (ikuoikuo),34 +agarest senki mariage,34 +afrostar,34 +adriana visconti,34 +adjusting shirt,34 +adelie (space dandy),34 +adachi taeko,34 +adachi rei,34 +acomu414,34 +abekawa kinako,34 +aankau,34 +aalge,34 +a's wonderland,34 +^v^,34 +9tsumura,34 +9h,34 +82jhin,34 +77,34 +5health,34 +4b-enpitsu,34 +3692materia,34 +2gold,34 +10million,34 +10box seisakujo,34 +1059,34 +1-gou (111touban),34 +zunda mochi,33 +zombie ke,33 +zilvern,33 +zi ye (hbptcsg2),33 +zhuotian,33 +zhen panxie,33 +zhang wei yi,33 +zhai,33 +zerozaki hitoshiki,33 +zentaiteki ni sensation,33 +zenra1112,33 +zenpouji isaku,33 +zelus,33 +zedoraart,33 +ze xia,33 +zara (queen's blade),33 +zao (housamo),33 +zanyak,33 +zantetsu (gesogeso),33 +zant,33 +zagashira,33 +zabaniyya (fashionista swimsuit) (housamo),33 +z46 (girls' relay) (azur lane),33 +yuzushiro,33 +yuzuriha inori (cosplay),33 +yuzuaji,33 +yuusha raideen,33 +yuuri (vpxh7525),33 +yuunagi yuu,33 +yuukyuu no campanella,33 +yuukou,33 +yuuki makoto (cosplay),33 +yuuki hiko,33 +yuu (warm water),33 +yurooe,33 +yunomachi,33 +yuno (black clover),33 +yumemiya subaru,33 +yukirei,33 +yukino kanae,33 +yukineko,33 +yukimura tokine,33 +yukimai,33 +yukihiroyuki,33 +yuki miya,33 +yuki daruma92,33 +yukatama,33 +yuiyui (konosuba),33 +yuitanpo,33 +yue zi,33 +yuduki (tt-yuduki),33 +yu arin,33 +ysk (yusuke),33 +your tie is crooked,33 +you taira,33 +you marino,33 +yoshikawa hazure,33 +yoshida sei,33 +yoshida (nono ko),33 +yonyon (yotayota honpo),33 +yome sagashi ga hakadori sugite yabai.,33 +yoku wakaru gendai mahou,33 +yoko-ya manjirou,33 +yoga doujou (misawajima),33 +yjsnpi interview (meme),33 +yingji (zszero),33 +yigali xinji,33 +yi l chen 5,33 +yet you,33 +yellow mittens,33 +yatsushiro nanaka,33 +yatogami kurou,33 +yamato (083),33 +yamasaki tomoya,33 +yamagishi saki,33 +yamada yuuki,33 +yamada saburou,33 +yakubeni,33 +yakov feltsman,33 +yaibaken,33 +yaburebouki akuta,33 +xiaojishangtian,33 +xia lan,33 +xe-shine (ishiyumi),33 +x<,33 +x.x.d.x.c,33 +wtcolor,33 +woshinon,33 +wjs07,33 +with ribbon,33 +witch hunter robin,33 +witch's heart,33 +wiping pussy,33 +winx club,33 +wilbur (animal crossing),33 +white loincloth,33 +white-paka,33 +whislash (glory purple) (arknights),33 +wheel of fortune (tarot),33 +weathergirl,33 +weapon shop,33 +weapon on floor,33 +wazuka na shiawase,33 +wawawama,33 +wataya arata,33 +washi (micino),33 +war wolf (last origin),33 +war paint,33 +wade99,33 +w4pz,33 +void specialist wraith,33 +vladimir elliot kirilenko,33 +violence devil (chainsaw man),33 +vica,33 +vi (ena0930),33 +vepley (girls' frontline 2),33 +vanta-black,33 +vanna,33 +vampirella (character),33 +valbar (fire emblem),33 +vagabond,33 +uzuki kai,33 +uzaki hana (cosplay),33 +uttt,33 +utsuro no hako to zero no maria,33 +utada hikari,33 +usuzumi kei,33 +usoco,33 +usami haru,33 +uratari,33 +urami,33 +ura (ura-tennislove),33 +up (mmmmmmmmss),33 +unoone01,33 +unko samurai,33 +under the desk (idolmaster),33 +ume (pickled plum),33 +umatachi tsugumi,33 +umaguti,33 +ukitanisu,33 +ueno tomo,33 +uemoto sumire,33 +ucchii,33 +ubo (dbsgurdbsk),33 +tyson hesse,33 +type 56 assault rifle,33 +two of clubs,33 +two-sided capelet,33 +tubuan oisii,33 +tu tora2,33 +tsuyuki yuki,33 +tsuruya-senpai,33 +tsuruki noki,33 +tsuri baka,33 +tsukushite ageru no ni!,33 +tsukino (show by rock!!),33 +tsukimi daifuku,33 +tsugu0302,33 +tsugikuni michikatsu,33 +tsuda kousuke,33 +tsubaki (flower knight girl),33 +trixie lulamoon,33 +trencker,33 +travel,33 +transparent weapon,33 +transparent headwear,33 +track marks,33 +touzuki suzuya,33 +touzokuou bakura,33 +toutoumi,33 +touming renjian,33 +toukoku sasaiko,33 +touka (shirotsume souwa),33 +toudou kyoushirou,33 +toshiyu (10shi yu),33 +toro zai,33 +toriko no shizuku ~natsu no gouka kyakusen de kegasareru otome-tachi~,33 +torikichi (tsukikuma enthalpy),33 +toppo,33 +topper (nu carnival),33 +tooaya,33 +toni (artist),33 +tomoe you,33 +tomj,33 +tokimori aoi,33 +tobi (daidaikukan),33 +titania frost,33 +tiru,33 +tiger i (personification),33 +thundurus (therian),33 +three of diamonds,33 +thor (toaru majutsu no index),33 +the milkmaid,33 +the grudge,33 +the godfather,33 +the god of death,33 +the girls of armament,33 +the demonata,33 +thatob,33 +tewatasanaiinu,33 +tetsu (aurvandil),33 +tesomu,33 +terada tera (style),33 +tera2001,33 +tentacle and witches,33 +tenjouin miruku,33 +tengan kazuo,33 +temptation h,33 +teketeke (tekenotteke),33 +tauta (meshia8787),33 +tatsuo (tty0607),33 +tatsuki (debris),33 +tatiana kirgetova,33 +tatebayashi miharu,33 +tat (prototype2d),33 +taruto (takug0812),33 +taracomax,33 +tanuki (metaltanuki),33 +tanoma suzume,33 +taniguchi (female),33 +tang elen,33 +tamidro,33 +tame,33 +tamayo (kimetsu no yaiba),33 +tamariyazuna,33 +tamaki (tamaki pic),33 +tam (tam0804),33 +takekushi meguru,33 +take no ko (4919400),33 +takatsu kokone,33 +takatoiyori,33 +takanya,33 +takanoori middle school uniform,33 +takano akira,33 +takana shinno (character),33 +takahashi note,33 +taka (tokyoboogienight),33 +taiyang xiao long,33 +taisos,33 +taishou tanaka,33 +taichou furyou,33 +tachibana yuu (shika),33 +tachibana surimu,33 +tachi yure,33 +table knife,33 +t.k.o,33 +szayelaporro granz,33 +syrene (kyuri tizu),33 +swordfish ii,33 +suzuka (once),33 +suzaku (oekaki no sekai),33 +sushisalmon95,33 +sushi 171,33 +supernatural (tv series),33 +super sonico (cosplay),33 +sunaipu (age-zero),33 +sumioo (sumikko no ousama),33 +sumimoto ryuu,33 +suiyou taruta,33 +suimin (sui 0y0),33 +suikaxd(zhanweiheng1992),33 +suika (kinokoh),33 +sugarbell,33 +sugar (one piece),33 +suehachi (hikage),33 +sucking both nipples,33 +succulent plant,33 +subverse,33 +studio ghibli (style),33 +streets of rage 4,33 +stranger things,33 +stolichnaya (vodka),33 +stick bernard,33 +steller's sea lion (kemono friends),33 +stay puft,33 +starry night,33 +star tail (alice girls),33 +standplay,33 +sqloveraven,33 +splatterhouse,33 +spinzaku,33 +spider itou,33 +spamton g. spamton,33 +sowichi,33 +sore ga seiyuu!,33 +sorcerer rogier,33 +sorai mahiru (fukahire),33 +sora (zwz030),33 +sonou momoka,33 +solange blanchefleur de luxe,33 +soho reina,33 +soemy,33 +sm156,33 +slow motion (vocaloid),33 +sleep kirby,33 +skirt bow,33 +sirpent,33 +sin devil trigger,33 +sin (hitonatsu),33 +sin.x,33 +sin.,33 +silver luster tagore,33 +shyi,33 +shuri (oshiro project),33 +shunta,33 +shugogetten shaolin,33 +shu-ten,33 +shredder (tmnt),33 +shoulder support,33 +shougun-sama wa otoshigoro,33 +shoot the bullet,33 +shishikura seiji,33 +shishidou takane,33 +shishamo (scholacco),33 +shirosei mochi,33 +shirokuroma 29,33 +shirokuro (monochrome0226),33 +shiroi noria,33 +shiratsuyu (kancolle) (cosplay),33 +shirato sayuri,33 +shiraki ai,33 +shiraishi kouhei,33 +shirai sanjirou,33 +shirai kuroko (cosplay),33 +shippo (skink),33 +shinonome ryouko (juusan kihei bouei ken),33 +shinoji (shin status),33 +shinohara seiko,33 +shinjou kanae,33 +shinjin-chan (belko),33 +shinigami kiraki,33 +shimo (yatagarasu),33 +shimada arisu (cosplay),33 +shimabara ushio,33 +shihaku rare,33 +shige (moe-ren.net),33 +shiden akira,33 +shichisaburo,33 +shiboritoranaide onna shounin-san,33 +shibazaki yousuke,33 +shibaya toshimi,33 +shi huang di (third ascension) (fate),33 +shep (stickysheep),33 +shells,33 +shelkopryad,33 +sheba (golden sun),33 +sharu (dog days),33 +shark (gomtang),33 +severed tail,33 +sevens road witch,33 +seven (scissor seven),33 +setins,33 +seshiro (which501),33 +servants holding aphrodite's breasts (meme),33 +serdyukov (girls' frontline),33 +seraphim call,33 +sera narumu,33 +separated wrists,33 +senpai (oekakizuki),33 +sendou yuzuka,33 +selection project,33 +sekisei,33 +sekigan,33 +seiya (artist),33 +seiten (queen's blade),33 +seihouin erika,33 +seggs (meme),33 +sega hatsumi,33 +seeing stars,33 +scotishfold,33 +scorecard,33 +scissors hair ornament,33 +scarletsky,33 +sayonara wo oshiete,33 +sayo (kiki kaikai),33 +saty-rokuji,33 +satsukitane mikako,33 +satoutakahumi,33 +satou kivi,33 +satou (una-vittima),33 +satou,33 +satori (sa bird08),33 +sato (samanosuke 0710),33 +sasaki junya,33 +sasaki (sid328),33 +sasahara natsuki (hyper police),33 +sarara,33 +saphir (sailor moon),33 +santarou,33 +sansui.aoba,33 +sans (cosplay),33 +sanroku 3,33 +sangobob,33 +sand on skin,33 +sancking (fatekl),33 +san moto,33 +salve (8947279),33 +salsa tortilla,33 +sal,33 +sakurameguri shirou,33 +sakurako-san no ashimoto ni wa shitai ga umatteiru,33 +sakurai kouji,33 +sakeko,33 +saitou hajime (rurouni kenshin),33 +saintpaulia (flower knight girl),33 +sailor galaxia,33 +saikawa (0902k137),33 +sahara kazumitsu,33 +sado yasutora,33 +sabutarou,33 +saburou hiji,33 +saberillya2,33 +saber (fire emblem),33 +s35,33 +ryuugajou nanana,33 +ryousan gataki,33 +ryourou (edexigerero),33 +runachikku,33 +royal tea,33 +rowkiba,33 +rota (078),33 +rostina cosmos,33 +rosetta,33 +rosemary bergamot,33 +rose (dragoon),33 +rosamia badam,33 +roland (library of ruina),33 +roki (shouka65),33 +rodent,33 +robocop (character),33 +rivet (vvrn2825),33 +ritsu (re710pngn),33 +rita vrataski,33 +risa (pixiv23908854),33 +ris,33 +rinoda mano,33 +rin-chan now! (vocaloid),33 +rimsuk,33 +rifu (hunihuni1130),33 +rifa tellu anelethea,33 +rie-co,33 +rideword (ragnarok online),33 +ricardo milos,33 +rererere mon,33 +reniirean,33 +remyfive,33 +reido1177,33 +reese (mk001black),33 +red eyewear,33 +rayearth (character),33 +rasa k,33 +rarorimiore,33 +rari (badominton),33 +ranger (elsword),33 +ramuda (guilty931),33 +rajaki (oboro muramasa),33 +raira,33 +rainforest,33 +rainbow badge,33 +rain of arrows,33 +raimu (yuzu-raimu),33 +raiden punching armstrong (meme),33 +ragnarok online 2: legend of the second,33 +rabbitcamilla,33 +r-15 (series),33 +queen-zelda,33 +quake,33 +qnakamura,33 +qingfeng canying,33 +qianhai,33 +q-tarou,33 +puffin,33 +pubic tattoo through clothing,33 +psychicjin,33 +protagonist (lost property control organization),33 +propeller hat,33 +programming (topic),33 +priscilla (claymore),33 +princess party,33 +prestige edition (league of legends),33 +poruneko,33 +poking head,33 +pointing spider-man (meme),33 +pogo stick,33 +pochio,33 +pochiharu,33 +pocchipocchi,33 +plug cryostat,33 +planted axe,33 +plaid sweater,33 +plaid sarong,33 +pio (potion maker),33 +pink usagi,33 +pink dot balloon (idolmaster),33 +pine (angel4195202),33 +pierre yoshio,33 +phenice walholl,33 +phasmophobia,33 +phantasmic,33 +petsuo (penpen),33 +peter white,33 +peter (peter6409),33 +peta (snc7),33 +penomena,33 +pen guin15,33 +pelleas (fire emblem),33 +peanuts (ennuim),33 +paya (paya19891115),33 +paul von oberstein,33 +pato (ptro),33 +patchouligo,33 +pariya,33 +paperclip hair ornament,33 +pandegg,33 +palru s2,33 +p kotarou,33 +p08 (girls' frontline),33 +ozu (yojouhan),33 +ouroboros (lord of the mysteries),33 +otokobara,33 +otaple,33 +osu5i,33 +oshimaidebu,33 +oscar gonzalez loyo,33 +osagiri shuka,33 +orioto,33 +orinte,33 +orgus (f-kare),33 +orebelt,33 +orbis terra,33 +orangec,33 +opera vectra,33 +ootomo sourin (sengoku otome),33 +oosato haya,33 +ooka (skavler),33 +onuma kuma,33 +onock,33 +onnomono,33 +onigiri yumi09,33 +onigiri (vtuber),33 +onigiri (mmorpg),33 +omedetou! (meme),33 +olga hodrewa,33 +okinu (okinu dane),33 +okami (kami soubi),33 +oira (kagaribi),33 +ohjin,33 +ohil (ohil822),33 +oh jiyu,33 +oguri cap (miraculous white star) (umamusume),33 +office worker (pokemon),33 +odo 7ta,33 +ochite iku niizuma,33 +ochi (ochi1094),33 +oboro kai (kancolle),33 +obake-chan (yozora mel),33 +nyto alina (girls' frontline),33 +nyctea snow,33 +nurse (silent hill),33 +nunuan,33 +numadaira,33 +null2deoru,33 +nukui hayu,33 +nukidoki!,33 +nozomi (summer) (princess connect!),33 +nouzui,33 +norton campbell,33 +noppera-bou,33 +nojiko,33 +noel (mermaid melody pichi pichi pitch),33 +noboes,33 +nobell (bell no5),33 +nlitz,33 +nix (ak-style),33 +niwacho,33 +nishina kazuki,33 +nirasaki hinata,33 +nikumocchi,33 +nikaido yamato,33 +nik ibi,33 +nicol ascart,33 +nezumoto,33 +new orleans (warship girls r),33 +new moon,33 +nero tol scaeva,33 +neon (valorant),33 +neo-tk..,33 +nenemaru,33 +nemachi,33 +nelnal,33 +nellen,33 +nelio,33 +nekoyashiki nekomaru,33 +nekokawaigari,33 +nekojita (ika neko46),33 +neko sheep,33 +neiless neiro,33 +negationtenmaru,33 +nefushutan no yoroi,33 +neckwear lift,33 +ncww rinichi,33 +natsunoyuu,33 +natsumi schwarz,33 +natsume mio,33 +natsume asako,33 +natsuki (natsuyasumi.),33 +natsu no iro no nostalgia,33 +natsu (norari kurari),33 +natalia luzu kimlasca lanvaldear (beloved princess),33 +natalia (idolmaster) (cosplay),33 +narcissu,33 +nanmokaken,33 +nanashishi,33 +nanao mugi,33 +nanami yachiyo (pajamas costume),33 +nanahosiryuuki,33 +nana (krt girls),33 +nameko face (osawari tantei),33 +namaiki!,33 +namae ga kimaranai man,33 +nakonbu,33 +nako ryu,33 +nakazuki yuuna,33 +nakano kinzan,33 +nakahara sumi,33 +naitou shouko,33 +nagone mako,33 +nago celica,33 +nagase riko,33 +nagant revolver (astral bond) (girls' frontline),33 +nadeshiko rin,33 +nachi (herousa),33 +myuuu ay,33 +myuu1995,33 +myuracchi (ayashii hon'ya),33 +my (mylilla811),33 +muvluv alternative chronicles,33 +mustard (pokemon),33 +muso-comet,33 +musical note earrings,33 +muroto aki,33 +muroi (fujisan0410),33 +murasame maru,33 +munchie (dq8),33 +multiple penetration,33 +mui mui (snk),33 +mugman,33 +mugen dai,33 +mu fengchun,33 +mrs. kujo,33 +mrploxykun,33 +mr.lostman,33 +mr. and mrs. smith,33 +mp443,33 +mo~zu,33 +mozzu,33 +mozan,33 +moyashi san4,33 +mountain bicycle,33 +motsushi,33 +motorii,33 +motoko (ambiy),33 +mother lumi (matilda fiship),33 +morino (harpoomoss),33 +mori tarou,33 +mori shinji,33 +moonlight ~omoide no hajimari~,33 +monster girl island,33 +monotsuki,33 +mono 1010,33 +monara,33 +monable,33 +mona (destiny child),33 +momozono momo (high school dxd),33 +momoji (momojihiha),33 +momohara kana,33 +moku (racie45bv),33 +mojisan (ebimo),33 +mojipittan,33 +mogami yoshiaki (sengoku collection),33 +mocollie,33 +mochizuki erena,33 +mochigana,33 +mochamillll,33 +mm39572,33 +mizuno rin,33 +mizuna tomomi,33 +mizukiri fuurai,33 +mizukabe,33 +miyoshi nao (miyoshist),33 +miyashiro,33 +miyakawa-ke no kuufuku,33 +miyachu,33 +miya (pure lemon),33 +mixivsky,33 +miu (c blue),33 +mitsuba (watergarden),33 +mitsu masaya,33 +mitorizu 02,33 +mitora5,33 +mist (rune factory),33 +missmoonified (voice actor),33 +miss barbara,33 +miso bon,33 +miseo (mrkglove),33 +misaki runo,33 +mirage koas,33 +mirabilis (fire emblem),33 +minuo,33 +minior (blue core),33 +mini crewmate (among us),33 +minghecanyue,33 +minecraft pickaxe,33 +mind rape,33 +minami natsuno,33 +minael,33 +mimolette (galaxy angel),33 +milreaf,33 +mikhail n,33 +mikannu,33 +mikanagi ibuki,33 +mikan (wanko),33 +mie (sukinako ga megane wo wasureta),33 +midorikaze fuwari,33 +michi ta (masquerade),33 +michelin,33 +mia (kuja999),33 +mi2mi2 minmi,33 +mg3,33 +meta-tron,33 +merueki,33 +merican sack,33 +menthuthuyoupi,33 +melk (7th dragon),33 +meliyannn,33 +meganeko (battle spirits),33 +mee (sohin),33 +meat armor,33 +mcq,33 +mayoko (sisqo 5003),33 +mayoiga,33 +mayo cha,33 +mattyakinako (odango imomushi),33 +matsuha shuu,33 +mateus upd,33 +matebashi,33 +matching earrings,33 +masuko mika,33 +massachusetts (kancolle),33 +masi masio,33 +mashed potatoes,33 +masana hatuse,33 +masamune oekaki,33 +marupuni,33 +martha (aerial drive) (fate),33 +maria rasputin,33 +maoi,33 +manun-chan,33 +manny ambassada,33 +mandara misaki,33 +mamiya marie,33 +mame (yangqi787),33 +mamagogo (gomaep),33 +makurano neena,33 +mako (macomaco7),33 +makai penguin,33 +maiori 00,33 +maiden astraea,33 +maid imouto (maoyuu),33 +mahou tsukai ni taisetsu na koto,33 +mahou senshi louie,33 +mahou no tame no shoujo club,33 +mahado,33 +magika no kenshi to basileus,33 +magical grim,33 +mafia (vocaloid),33 +machi futo,33 +macayase,33 +maazyu,33 +maasan,33 +maabou,33 +m4 gun,33 +m-chan (kinbakuman),33 +luvents3,33 +lumpychan,33 +luli,33 +lukiarab,33 +lucuha,33 +lucifer (helltaker) (cosplay),33 +lost july,33 +longship,33 +log cabin,33 +locking,33 +liyu li,33 +livegun,33 +lit candle,33 +ling dianxia,33 +linebarrel,33 +lin xue ya,33 +liminarity,33 +lilith (shinrabanshou),33 +lilim (shingeki no bahamut),33 +lilim (megami tensei),33 +libeccio (azur lane),33 +liangchanxingmingrixiang,33 +li zeyan,33 +letter pose,33 +les chevaucheurs,33 +lentain,33 +lena liechtenauer,33 +lena dai,33 +lemon raimu,33 +lemon89h,33 +lein,33 +legion (titanfall 2),33 +laurie (personal ami),33 +laura stuart,33 +lataedelan,33 +last embryo,33 +lapis lazuli (gemstone),33 +lanxjourney,33 +langrisser v,33 +lan wangji,33 +laffey (white rabbit welcomes the spring) (azur lane),33 +la13,33 +kyuu you,33 +kyousin,33 +kyo722,33 +kuze matsuri,33 +kuusou code plus,33 +kutori pan'ya,33 +kusunoki suzu,33 +kururunpa,33 +kuroto yamaneko,33 +kurono mika,33 +kuromori (1010845110),33 +kurokurokuro,33 +kuroda kanna,33 +kurenai907,33 +kurebayashi yuzuki,33 +kurahashi (kancolle),33 +kujou sakurako,33 +kugatunohito,33 +kuchinashi (needless),33 +kubota junichirou,33 +krusier,33 +koyomi hare nanaka,33 +kowaremashita,33 +kousaka yuuji,33 +kousaka ayano,33 +kouotsu,33 +kou (01310808),33 +kotegiri gou,33 +koshirae,33 +koshi-kun,33 +koromo (kinu),33 +konomu0522,33 +konigstigerchan,33 +komeiji koishi (cat),33 +koku 666,33 +kokoperiiche,33 +koke ojisan,33 +kojima hirokazu,33 +kohaku (fuu),33 +kogyokuapple,33 +kogarasu1st,33 +koboke (scscsc),33 +kmcgold30,33 +kkkhosuke,33 +kizuchi r,33 +kiyohime (kiyohime in ribbons) (fate),33 +kiya hajime,33 +kiwa (pokemonwars),33 +kisugae,33 +kiss (stand),33 +kisaragi miyuki,33 +kisaragi akane,33 +kiriyama yui,33 +kirii,33 +kira hitomiko,33 +kira! mankai smile (idolmaster),33 +kinoshita yuuko,33 +kino haruc,33 +kino-sr,33 +kingfisher,33 +kinako (moment),33 +kinako928,33 +kimono girl (pokemon),33 +kimihara himeno,33 +kimberly (azur lane),33 +kikuyarou,33 +kikuta mokutaro,33 +kikukawa norihiko,33 +kikuchi tae,33 +kikimimi 612,33 +kiki (herayoshi),33 +kijima hyouka,33 +kii (monster musume),33 +kide koushin,33 +ki no rapika,33 +key (kagibangou),33 +kenshin (kenshin3),33 +kenny (poe90),33 +kengo (granblue fantasy),33 +ken the eagle,33 +kemukemuke,33 +keijimohumohu,33 +keep calm and carry on,33 +kdkaikai,33 +kbn317,33 +kazumi yoshizu,33 +kazu-chan,33 +kazeoto kirito,33 +kazanami,33 +kayama tamami,33 +kawamochi (tddm3573),33 +kawai shizuka,33 +katou kouki,33 +kataruruni,33 +katagi ren,33 +katachi noboru nishikino,33 +kasuga shun,33 +kashiwagi kazuhiro,33 +karyl (princess connect!) (cosplay),33 +karambit,33 +kaoswald,33 +kanou sumire,33 +kannagi ai,33 +kanikani (juicy),33 +kanda shouichi,33 +kanataria,33 +kanarai taru,33 +kanade,33 +kamunika,33 +kamomura ayane,33 +kamiya yuuji,33 +kamishiro ryouga,33 +kamira naito,33 +kaminashi yamato,33 +kamikita keiko,33 +kalsept,33 +kaleido ruby (cosplay),33 +kakoikaren,33 +kajo,33 +kairi (oro-n),33 +kainazuki,33 +kaida haru,33 +kai (nyanko daisensou),33 +kahama youko,33 +kagoya1219,33 +kagome (pattern),33 +kagemaro,33 +kagaya nene,33 +kaena swaya,33 +kadowaki satoshi,33 +kadomaki shinnosuke,33 +kadaka (9m),33 +k kymz,33 +juneplums,33 +julius belkisk harway,33 +julion (akesuzu),33 +jukuta tsu,33 +jojofon,33 +jkisaradu,33 +jiuyesang,33 +jitsudan,33 +jishou f-rank no oniisama ga game de hyouka sareru gakuen no chouten ni kunrin suru sou desu yo?,33 +jiseki rena,33 +jinmen-gyo (kemono friends),33 +jin yi dui,33 +jeremy chong,33 +jeong sana,33 +jazz kawa sodom,33 +jay156,33 +jannong,33 +janne d'arc,33 +james cabello,33 +jakqbigone,33 +jacknaiff,33 +jack-barro,33 +jaana kettu,33 +j (ppxx3543),33 +j.c. staff,33 +j-peg,33 +izumo konata,33 +izna (iznatic),33 +iwashi (nisankatanso),33 +itsumi (kaptivate),33 +itsuki (sengoku basara),33 +it's super effective,33 +ispin charles,33 +isora hibari,33 +iso (nh15mint),33 +island turtle,33 +isekai sakaba no sextet,33 +iru may (akairiot),33 +ipod nano,33 +invisible object,33 +inverto,33 +inuzumi,33 +inuwaka nazuna,33 +inukai michiru,33 +inuhiko (istdog),33 +inubousaki shian,33 +inu fuji,33 +inu (cookie),33 +internet survivor,33 +instance domination,33 +inside out,33 +inose mai,33 +inmu-kun,33 +index (toaru majutsu no index) (cosplay),33 +inamochi keiichirou,33 +inaba tewi (bunny),33 +implied pornography,33 +impero (azur lane),33 +imp (impractical),33 +immature blue,33 +imaikuy0,33 +ilmeria von leinweber,33 +ikurumi kaoru,33 +ikuhiro (19nnnkti16),33 +ikeshiki-chuujou,33 +ikeda akihisa,33 +ikari gendou (cosplay),33 +ikami (rockhardridefree),33 +igarashi miyuki,33 +ienaga kano,33 +iefukurou,33 +idolmaster shiny festa,33 +ichiya1115,33 +ichineko.,33 +ichiman nisen yen,33 +iceojin,33 +ice keki,33 +ibuki douji (swimsuit berserker) (second ascension) (fate),33 +ian dimas,33 +i-58 (kancolle) (cosplay),33 +hyp,33 +hyou haku,33 +huuyu 1z,33 +huoyi (zzh100200),33 +hunter (great one) (bloodborne),33 +huamuan huamuan,33 +housengo,33 +hotel dusk,33 +hoso miyuki,33 +hoshizaki rika (kanojo mo kanojo),33 +hoshitsuki miki,33 +hoshigaki (hsa16g),33 +hoshi ichi,33 +hororo,33 +hoozuki (otome youkai zakuro),33 +honoo teruki,33 +honegai,33 +homomomomon,33 +homare (princess connect!),33 +holding shorts,33 +holding pants,33 +holding flame,33 +holding drawing,33 +hitotoshite jiku ga bureteiru,33 +hitopm,33 +hitohira,33 +hitoguchi (hioxhio),33 +hisuian goodra,33 +hiruno ushiro,33 +hirono hiro,33 +hippopotas (male),33 +hinuma yuuri,33 +hinbackc,33 +hinase (twoxout),33 +hina (xoxo),33 +hina (one piece),33 +himuro rikka,33 +himemura saki,33 +himekiss,33 +hime apple,33 +hikiyama towa,33 +hikagi tatsuhiko,33 +hijiki (deriku4),33 +hiiraki asuka,33 +hiiragi (hanamaru youchien),33 +hiharo,33 +high elf (warcraft),33 +higashiyama seika,33 +hiep studio,33 +hideyuki i,33 +hesocha,33 +heroman (robot),33 +heriki (trkj),33 +henrietta (log horizon),33 +hen-tie,33 +hellme,33 +hellk111,33 +helevu,33 +hedge clippers,33 +heart bracelet,33 +he c92,33 +hd-hlh-3h,33 +hboxgames,33 +hazime,33 +hayase akira,33 +hawker harrier,33 +hatamichi mihiro,33 +hasutani taki,33 +hasegawa suzuho,33 +hasegawa akemi,33 +harusame tsubaki,33 +haruna konomi,33 +haruhito1211,33 +haru (haru2079),33 +hanegasaki academy uniform,33 +hanaki yuka,33 +han juri (cosplay),33 +hakuto momiji,33 +hakobako,33 +hajime x cross,33 +hajimari wa kimi no sora,33 +haiyun,33 +hair raising,33 +hai yoru,33 +hachimitsukyuuto,33 +hachiko (0088),33 +hachihito,33 +hachi (hachikai),33 +haburashi,33 +habane kotori,33 +h&k xm8,33 +gyarakushi shokudou,33 +gunspike,33 +gunslinger (ragnarok online),33 +guilty crown lost christmas,33 +groin punch,33 +green fox (sasaame),33 +green bandeau,33 +greed packet unlimited,33 +greatwhite1122,33 +greater bird-of-paradise (kemono friends),33 +grapple,33 +goumonsha,33 +gospel (mazohaha),33 +goro simpson,33 +gore magala,33 +gong,33 +goichi,33 +glass (tate no yuusha no nariagari),33 +gipehtyboo,33 +giorno giovanna (cosplay),33 +gilgamesh (modern costume of volupte) (fate),33 +gge (gebback dark),33 +gerichan,33 +gerda (fate),33 +gentle2nd,33 +genocide cutter,33 +genkai,33 +garnet (the rumble fish),33 +gareth (swimsuit saber) (fate),33 +gamjolno,33 +game show,33 +gal to otaku wa wakari aenai.,33 +fx-05 (girls' frontline),33 +fuyutarou,33 +futaori arisa,33 +futago monad,33 +futa yami,33 +fumotewi,33 +fullani,33 +fujiwara miyabi (aikatsu!),33 +fujita kanbe,33 +fujishima (raving phantom),33 +fujimaruu,33 +fujimaru ritsuka (male) (chaldea pathfinder),33 +fujiko sugi,33 +frilled bowtie,33 +frey (fisheye placebo),33 +french maid nidalee,33 +fred jones,33 +frances royce,33 +foxy (kof),33 +foxy (fnaf),33 +foxinshadow,33 +fox devil (chainsaw man),33 +foch (rainbow rendezvous) (azur lane),33 +flower tank (touhou),33 +florida-chan (ryusei hashida),33 +flight highschool,33 +fliegerhammer,33 +fletchinder,33 +flat chastity cage,33 +filho rossi,33 +filet (kai himo),33 +fidget (dust: an elysian tail),33 +fgm-148 javelin,33 +fetishy,33 +feriowind,33 +fencing suit,33 +fencer (sekaiju),33 +fei er,33 +fate/protoreplica,33 +fantasia re:build,33 +fang xue jun,33 +fallen of albaz,33 +face stretching,33 +eye in palm,33 +executioner maid (centuriic),33 +excalibur (warframe),33 +exabyte (parallax05),33 +evasong,33 +espgaluda,33 +escaflowne,33 +eroge!,33 +erimiko,33 +erica blandelli,33 +ereshkigal (youming niangniang) (fate),33 +enu naitsu,33 +enogu,33 +engraved,33 +endou akira,33 +emperor zuou,33 +emperor of mankind,33 +emergence (shindol),33 +elvetie,33 +elsi,33 +elna,33 +ellen baker (cosplay),33 +el fuerte,33 +eileen galvin,33 +eflunn (emilylunn),33 +eeeeeiti aka,33 +edward-el,33 +edita (tanaka the wizard),33 +eda,33 +echt,33 +ecchisage,33 +ebrietas daughter of the cosmos,33 +ebichiri sunday,33 +ebi senbei,33 +dutch (black lagoon),33 +dust: an elysian tail,33 +durian,33 +dragon gal,33 +dorago (doraemon4),33 +dorachefu,33 +doonatsu.,33 +donkey kong country 2,33 +dondo,33 +donald duck sailor hat,33 +don corneo,33 +dolphin penis,33 +dokuta,33 +dokudokudoku,33 +doko ka no hosono,33 +doitsuudon,33 +dogdogbhh,33 +dnk,33 +divine bustier (dq),33 +disguised zorua,33 +dirty legwear,33 +dinef,33 +dilemma (vocaloid),33 +digimon ghost game,33 +diaoxian kuangmo,33 +diabolo,33 +devoured by darkness,33 +denim2,33 +dengeki gx,33 +denchi more power,33 +demon slayer (dungeon and fighter),33 +dell,33 +defiaz (infinity),33 +defenestration,33 +deerling (autumn),33 +decoration disorder disconnection,33 +de lisle carbine,33 +david semsei,33 +daruizen,33 +darui hito,33 +darkside ochinpo burst,33 +dark emperors,33 +dangyu (danganpa),33 +dance pad,33 +daikinbakuju,33 +daby,33 +da akana xiv,33 +d-cao,33 +cute honey,33 +custard,33 +cure southern cross,33 +cure princess (macadamia hula dance),33 +ctn sasuke,33 +crowgirl,33 +crow (nichijou),33 +crotch ribbon,33 +cp9,33 +comiket 82,33 +cometch,33 +colonial marine,33 +coll (erichankun),33 +coin (pokemon),33 +coat stash,33 +clover (manga),33 +clockwork planet,33 +clip studio paint,33 +cleru (cleruuuuu),33 +cleo (suikoden),33 +claire victorious,33 +circus66,33 +ciel (toosaka asagi),33 +chuu (rinet),33 +chroniko,33 +choumoku (toriko b c),33 +choujo (kakitama),33 +choppy,33 +chomiso,33 +chocola flex,33 +chloe (melkor mancin),33 +chiyou yoyuchi,33 +chio-chan no tsuugakuro,33 +chinese robot kid,33 +childofa,33 +chiiutsu (cheewts),33 +chiiririn,33 +chihiro (onigiri),33 +chiharudaaaaaaa,33 +chiha,33 +chierishu,33 +chidouin sara,33 +chichi kurage ss,33 +chibi-moth,33 +chiba erika,33 +cherry blossom cookie,33 +cheemsburger (doge),33 +checkered footwear,33 +charamel,33 +chagara,33 +celtic,33 +celeste (animal crossing),33 +catwyz,33 +cattleya (houtengeki),33 +catiua powell,33 +carta issue,33 +carciphona,33 +carbonara hontyotyo,33 +cao xiong,33 +camille (league of legends),33 +calculus,33 +caiothevici,33 +cai yuan,33 +cafe cuties soraka,33 +c turtle,33 +bysau,33 +butterfly mask,33 +butterfly (love live!),33 +buster dress,33 +bura,33 +bunyip (monster girl encyclopedia),33 +bunny puppet,33 +bunny (d-rex),33 +buchou,33 +bruce lee,33 +branding iron,33 +bracelet removed,33 +bowcan,33 +bouquet (blue dragon),33 +boss (gindoro),33 +borscht (artist),33 +bookshelf pov,33 +boogeyman (housamo),33 +bonfurai,33 +boku to maou,33 +bochicemetery,33 +blue man,33 +blitzwing,33 +blinders,33 +blackletter,33 +black wetsuit,33 +black lotus (accel world),33 +black gemstone,33 +black (artist),33 +bizet,33 +bitaro,33 +bird skull,33 +bilbo baggins,33 +bickle (bickle1983),33 +bianca (agent aika),33 +bi ting (xia lan),33 +beruche (sailor moon),33 +berserk ryuuko,33 +bellona (epic seven),33 +beidan,33 +bei ju luoxuan wan,33 +beckey9415,33 +bebe1999,33 +beat shooter (idolmaster),33 +beast titan,33 +baund doc,33 +battle academia (league of legends),33 +batai,33 +bashou (senran kagura),33 +banana hair ornament,33 +ballista 2 (sekaiju),33 +bakemono no ko,33 +bactrian camel (kemono friends),33 +backdrop,33 +back hair,33 +baby face (stand),33 +b allbrack,33 +azuki (aduki),33 +azriel (no game no life),33 +ayumu (ayumumkrg),33 +aymmogh,33 +ayin (project moon),33 +ayame kotoko,33 +ayachin,33 +aya-0w0,33 +aw,33 +ava (ava31),33 +aussummer,33 +atsukan,33 +aticotta,33 +asymmetrical jacket,33 +astruma2,33 +assassin's creed: unity,33 +asou yui,33 +ashtoreth illacrimo,33 +ashley rosemarry,33 +ashitano kirin,33 +ashigara (azur lane),33 +asheta7,33 +asd13,33 +asano sisters project,33 +asanagi no aquanauts,33 +asame shinbun,33 +asakawa-san (8107ka),33 +asahina nono,33 +asahikawa yuuma,33 +asagiri aya,33 +asagi shigure,33 +aruuin,33 +arukime,33 +aru (arudes),33 +artoria caster (fate) (cosplay),33 +arthur (fire emblem fates),33 +arkuny,33 +arisa crain femiluna,33 +arin (1010 ssu),33 +arianna caledonia,33 +ariadoa (kono yuusha ga ore tueee kuse ni shinchou sugiru),33 +arcana (swd3e2),33 +arayuki (sakurafubuki),33 +ar ru (ar tonelico),33 +appas,33 +apollo (persona 2),33 +apex legends mobile,33 +aoki minami (+box),33 +aoi sena,33 +aoi miyabi,33 +aoi hane,33 +aoi (amazu),33 +aoba rena,33 +anson jun,33 +anshinzawa sasami,33 +anri anriette,33 +anotoki ashi,33 +anna sakura,33 +anna lindhurst,33 +anko anko,33 +animaniacs,33 +aniki gunsou,33 +angelous lazward,33 +angellyuna,33 +angelic serenade,33 +aneunyeoja,33 +anetarou,33 +anejiru,33 +andre grandier,33 +an-bl,33 +amyu (amm asl pa),33 +amida arca,33 +ameiro pk,33 +amego,33 +ame tame,33 +amayadori uki,33 +amatya,33 +amatsumara (housamo),33 +amanomiya jun,33 +amane yuki,33 +amane tsukuyo,33 +alternator,33 +alpine marmot (kemono friends),33 +alpha omega (cosplay),33 +allez0525,33 +allenerie,33 +alicia priss,33 +alexei dinoia,33 +alcremie (mint cream),33 +alayna danner,33 +alan stuart,33 +akudato,33 +akuan (7jackpot7),33 +akira atsushi,33 +akiori koromo,33 +akimichi,33 +akikaze cp,33 +akatsuki urara,33 +akasaki koyo,33 +akao ppai,33 +akanegasaki sora,33 +akane yumiko,33 +akane (getwild23),33 +akahuzi,33 +akagi (aircraft carrier),33 +aka-san to kyuuketsuki,33 +ajiko ajio,33 +aji paba,33 +aiya kyuu,33 +aishiteruze baby,33 +aisha (saga),33 +airplane ornament,33 +airmisuzu,33 +aina 156cm,33 +ain houfanghuashui,33 +aikohgin,33 +ai ken,33 +ai dongdong,33 +ahhien,33 +adosan,33 +adella the nun,33 +acr (girls' frontline),33 +achilles (fearless diver) (fate),33 +acaco,33 +abyssal chishima princess,33 +absorbing,33 +abisswalker8,33 +^3^,33 +8041mm,33 +7nite,33 +6kusa,33 +507th joint fighter wing,33 +2020s (style),33 +1g no izumi,33 +1994,33 +0w0,33 +zu-mebio-da,32 +ziz (housamo),32 +zero q 0q,32 +zephryion,32 +zendamu,32 +zen wistalia,32 +zeku (furrock),32 +zek (zecola),32 +zaso,32 +zambot 3,32 +zakk,32 +yxxrem,32 +yuzuki yukari (rei),32 +yuzu ichika,32 +yuzouni,32 +yuyuyu (pixiv19949705),32 +yuusuke-kun,32 +yusuke (shiota),32 +yurina,32 +yunvshen,32 +yuniko moontail,32 +yumesaki emiko,32 +yumeori amu,32 +yumekawa yui,32 +yukiusagi (yukiusa),32 +yukimori nene,32 +yukihira makoto,32 +yukibina,32 +yuki (yuki3243),32 +yuki (snowmaiden),32 +yuiko (yuiyuiko 108),32 +yuikannon,32 +yui (yomawari),32 +yui930,32 +yuetsu,32 +yu@genkoochu (5tsukino),32 +yu-bird,32 +yousisi,32 +younger sister (elona),32 +youjo modoki,32 +you (yoyou),32 +yoshiyanmisoko,32 +yoshino (minami-ke),32 +yorishiem,32 +yonezawa masaru,32 +yomisawa tsukino,32 +yomemi,32 +yof (sc gazer),32 +yinyong yu,32 +yinghuahua,32 +yiga clan,32 +yeyuan33,32 +yeti (monster girl encyclopedia),32 +yellowking hiro,32 +yellowfur,32 +yellow suit,32 +yellow scales,32 +yazuwo,32 +yashiro (sakananohone),32 +yas bitch slay (meme),32 +yang fang leiden,32 +yanfei u,32 +yandama,32 +yamika,32 +yamamoto youko,32 +yamamoto yamato,32 +yamaguchi mika,32 +yamagami miori,32 +yam (yam6056),32 +yaguchi haruo,32 +yagi (yagi5art),32 +yaegashi taichi,32 +yae miko (cosplay),32 +ya chuifeng lai,32 +xp-pen,32 +xionghan de guangtou,32 +xingye,32 +xiao xiao chuan,32 +xiao shei..,32 +xian miao,32 +xia lan bi ting chao hua,32 +xenon (simlacurm),32 +wowaka,32 +wooser no sono higurashi,32 +woodsbench,32 +wonder festival mascots,32 +witoi (roa),32 +winter (winter168883),32 +winning gundam,32 +william d porter (warship girls r),32 +wii fit trainer (male),32 +whitelily bread,32 +wayukako,32 +waveracer d.va,32 +wave505,32 +watereffect.net,32 +water pipe,32 +wataru kuri,32 +watabonten,32 +wata9mm no,32 +wasabi karasi,32 +warts,32 +wararu (user uecx7457),32 +warabe (waraghi),32 +wang cuilan,32 +wander last (vocaloid),32 +wallwalking,32 +wakamiya shinobu,32 +wait,32 +vu-hakase,32 +voice-ore,32 +vogue (magazine),32 +vocky,32 +viveka (haguruma c),32 +vitotraps,32 +violet parr,32 +vintage dress (module),32 +vinashyra,32 +vietnamese commentary,32 +vicineko,32 +vellu (geenymous),32 +velceed,32 +variangel,32 +vampirella,32 +valeera sanguinar,32 +vaal hazak,32 +uzuratani (uzu),32 +utai meika,32 +uta-chan (pan (mimi)),32 +ut pictura poesis,32 +ustn (usatan),32 +usirome,32 +uranfu,32 +urajirogashi,32 +unko yoshida,32 +unitaka,32 +uniphon,32 +unier,32 +unfezant (female),32 +unagidog,32 +unagi (popotan),32 +unadon (unadoom),32 +umesasami,32 +umenokouji aoi,32 +umber00,32 +ultraman r/b,32 +ultraman belial,32 +ukino youko,32 +udberg eldol,32 +u-jin,32 +tyotto ko i,32 +turupiko,32 +tufted puffin (kemono friends),32 +tsuzaki tsunomi,32 +tsuyuri eri,32 +tsutsumi akari,32 +tsuruba (tsu41014812),32 +tsuru (nekopanchi),32 +tsukito yayoi,32 +tsukino omame,32 +tsuki to laika to nosferatu,32 +tsuioku (908026270),32 +tsuchida satsuki,32 +tsuburaya mitsuhiko,32 +triple bun,32 +trinity universe,32 +triggertop,32 +trieste (rooftop lunch break) (azur lane),32 +triangle heart 3 lyrical toy box,32 +traptrix myrmeleo,32 +toy bits,32 +towel bunny,32 +touyama maki,32 +toumi (sr),32 +tougyuu yukio,32 +totoro on,32 +toto (twooz),32 +toshizaki shouma,32 +tororoto,32 +torn robe,32 +toriniku (suikyou),32 +too many scars,32 +tomonao,32 +tomoe mami (swimsuit ver.),32 +tomie,32 +tom marvolo riddle,32 +tokusou sentai dekaranger,32 +tokitou akari,32 +toine,32 +todoroki kakeru,32 +todo-akira,32 +tiyi (tiyi a09),32 +tit horse,32 +tiger mask w,32 +tiger lily,32 +throat,32 +three of clubs,32 +thorns (comodo) (arknights),32 +third kureko,32 +thighhigh removed,32 +theironmountain,32 +the thing not quite sure what it is,32 +the last of us 2,32 +the king of fighters '95,32 +the beast (vocaloid),32 +tethys (fire emblem),32 +terrorist,32 +tennouji yuugo,32 +tenjouin katsura,32 +tendou rushuna,32 +tendenbarabara,32 +ten'yoku,32 +teki (kakari),32 +teka,32 +teitei,32 +teiputi,32 +team flare grunt,32 +tapping shoulder,32 +tantanmen,32 +tamanegi (genpi),32 +tamamo no mae (police fox) (fate),32 +tamaizumi hiyoko,32 +tallinn (nostalgic pilsner) (azur lane),32 +takuto kira,32 +takuro (taku3949),32 +taku-tama,32 +tako 8 yaki,32 +takayashiro sayuki,32 +takatsu karino,32 +takasimareki,32 +takasaki misaki (yuru yuri),32 +takamaru (akisora drawing),32 +takahashi rumiko (style),32 +takahashi kazuki,32 +taka-f,32 +taiki shuttle (bubblegum memories) (umamusume),32 +tachikawa,32 +tachiinu,32 +tabgraphics,32 +t@kuyoa,32 +t373412,32 +t0petar0,32 +syn,32 +sylux,32 +syertse,32 +sword kirby,32 +sword hilt,32 +swepot,32 +sweetheart sona,32 +suzuya (maru),32 +suzushika (13 96),32 +suzumebachi (shikai),32 +suzuka g,32 +suzugaeru,32 +sutein,32 +susinoyama,32 +survey corps,32 +surtr (fire emblem),32 +superdiviatomic,32 +super masara ahegao,32 +suou mira,32 +sumeshiruko,32 +sumadera kasumi,32 +sui (fujiwara gacho),32 +sugimoto (niboshiumai),32 +sugawa maiko,32 +stucco,32 +strength (tarot),32 +stoll (tmxhf),32 +steelwire,32 +stb-chan,32 +starmine (manga),32 +starlan,32 +ssong-ga,32 +sr-25,32 +squarevr,32 +square live,32 +spring onion hair ornament,32 +spookie,32 +spinning wheel,32 +spinning teacup,32 +spica (spica 1510),32 +spence (azur lane),32 +special tan,32 +spade echo,32 +soujirou seta,32 +sou mei,32 +sora megumu,32 +sooya,32 +sone (takahiro-osone),32 +soliera (pokemon),32 +softball,32 +sofa (enogunomu),32 +sleeper hold,32 +skyde kei,32 +skulllee,32 +sk (buta),32 +sitting on liquid,32 +sitonai (third ascension) (fate),32 +sister quest,32 +sing152,32 +since,32 +simon eckert,32 +silver crow,32 +siho (ricchil),32 +sigyn erster,32 +siguya,32 +sia namsbinpeni,32 +shuuzen (shu-zen),32 +shunori,32 +shumai (food),32 +shuffling cards,32 +shuaigegentou,32 +shou jian yu,32 +shori bun,32 +shokorate,32 +shizune (homare),32 +shizuku (artist),32 +shiyoo,32 +shiruppo,32 +shirt rolled up,32 +shiratoriko,32 +shioya (soooooolt),32 +shiomi madoka,32 +shinonomemayoyo,32 +shinomiya satsuki,32 +shinobi (ps2),32 +shino laila,32 +shinka (yonkun121),32 +shinagawa hiroki,32 +shinachiku (uno0101),32 +shin megami tensei ii,32 +shimoku,32 +shimizu kaoru,32 +shimashima-ace,32 +shimao kazu,32 +shimamura uzuki (cosplay),32 +shikano (oshiro project),32 +shiira kan,32 +shiina risa,32 +shiina kagari,32 +shigure wasa,32 +shiawase graffiti,32 +shi wuxia,32 +sherry polnareff,32 +sheery sbox,32 +shatou (c-com),32 +shared innertube,32 +shangorilla,32 +shakuyaku (flower knight girl),32 +shadow the hedgehog (game),32 +seven d3t,32 +seven (fft-0),32 +setsugeka tumugi,32 +seta soujirou,32 +seseri aoba,32 +server,32 +serakawa,32 +sentouryoku 5,32 +sensei! tsugi wa battle no jikan desu.,32 +sengoku nadeko (cosplay),32 +seiken no faeries,32 +seele,32 +seal (pukozin),32 +screw hair ornament,32 +scp-073,32 +school yard,32 +school bus,32 +sazare (sazare az),32 +sazanka bianca,32 +sayonara no asa ni yakusoku no hana wo kazarou,32 +sayococco,32 +sayaka ikku,32 +sawyer (pokemon masters ex),32 +sawashiro kei,32 +satta naoto,32 +satsuki (konohana kitan),32 +satonaka chie (cosplay),32 +satoji (ochanomkmskry),32 +sasoribi-dekine,32 +sasasa (pixiv1790125),32 +sarasa hanna,32 +sapporo momoko,32 +sanzenkai no avatar,32 +santa65,32 +sansenokumo,32 +sanooxo,32 +sanohiramugi,32 +sano jinya,32 +sandy bash,32 +samurai (kasizuki),32 +sami briggs,32 +sally (pacch0614),32 +sakurame kurame,32 +sakurame kirie,32 +sakurakkuma,32 +sakuragi megu,32 +sakura miko (cosplay),32 +sakura mikan (chirizakura),32 +sakuma shiki,32 +sakaya313,32 +sakai-saka,32 +saiyki,32 +sai hinoki,32 +sai (abyss05),32 +saborou,32 +s-ghost,32 +ryuuhou (azur lane),32 +ryuuga (cookie),32 +ryukadomatsu,32 +ryu (gaquarium),32 +ryouki (senran kagura),32 +rykysd,32 +ruwo benzen,32 +ruthtria (bloodline),32 +rurubell,32 +runamochi,32 +rum (falkyrie no monshou),32 +ruiko (sad),32 +rui wa tomo wo yobu,32 +roxy rex,32 +roundschen,32 +round shelf,32 +roti,32 +rose22,32 +ron (lovechro),32 +rom (show by rock!!),32 +rokuro (ryvius),32 +rokoido12,32 +rockyroo,32 +robin7188,32 +road closed to vehicles sign,32 +rn (radon'ya),32 +rk (cc15915r),32 +rizelmine,32 +risubokkuri,32 +rise (rise19851203),32 +rionoil,32 +riolabo,32 +rindou ruri,32 +rina (crystalrina),32 +riful,32 +rice shower (umamusume) (cosplay),32 +reneph,32 +renais cardiff shishiou,32 +relly,32 +reki (tiny fin),32 +reka,32 +reizei hisako,32 +reina (xipuria),32 +reina (phantom rose),32 +reikakrzk,32 +rei (456789io),32 +regina mercedes,32 +refuto,32 +redum4,32 +red sheet,32 +red seiryu,32 +red plate (kyuu45),32 +red (transistor),32 +red (angry birds),32 +reborns,32 +rayhwang,32 +randou serika,32 +rammus,32 +ramiki,32 +rainbow mika (cosplay),32 +rain mikamura (cosplay),32 +railway gun,32 +radiosity (yousei),32 +racing miku (2016),32 +racing miku (2010),32 +r3ydart,32 +qutori,32 +queen (snow white),32 +q haoyu,32 +pvtskwerl,32 +puremage,32 +pumpkin earrings,32 +pul (gks569),32 +puchi-pochi,32 +ptrs-41,32 +protoman.exe,32 +proto messiah,32 +project winter,32 +prinz eugen (kindred evening spirits) (azur lane),32 +princess kakyuu,32 +pregnancy mark,32 +powered suit (toaru),32 +potionu,32 +pot of greed,32 +pororikin,32 +poopy,32 +poono,32 +ponzu yuki,32 +polygon project,32 +poker face failure,32 +poker-face-008,32 +pokemon pocket monsters,32 +pokemon: arceus and the jewel of life,32 +poison mushroom,32 +pocahontas,32 +plugging ears,32 +pleasedbyviolet (voice actor),32 +pjman,32 +piz,32 +piyomi,32 +pisu 1107,32 +piroya (shabushabu),32 +pirihiba,32 +pipoo,32 +pink heart,32 +pingu (series),32 +pinako rockbell,32 +pilaf,32 +pickle pee pump-a-rum crow,32 +phoenix (x-men),32 +phantom (the legend of zelda),32 +petsematary,32 +petit dragon,32 +persephone ii,32 +penny (inspector gadget),32 +patricia caulfield,32 +pascal (free spirit) (tales),32 +pasca kanonno,32 +parsee day,32 +paresthesia,32 +parakeet girl (yukimoto shuuji (gurigura)),32 +paprika (artist),32 +panos (ssgpanos),32 +pandora (saint seiya),32 +panasonic corporation,32 +palm tree print,32 +pale background,32 +paku romi,32 +padko,32 +oyakorodesu,32 +owju (ouju),32 +ouma zi-o,32 +ouka september,32 +ouer moyu,32 +otuming,32 +ototarou,32 +otoha (h2o),32 +otk,32 +otenki studio,32 +oshiego-chan (mignon),32 +orouu,32 +original zero,32 +ore deshita,32 +orca (pochincoff),32 +oozeki koyui,32 +oosaka naru,32 +ooi kai (kancolle),32 +oogie boogie,32 +onii-chan kiss no junbi wa mada desu ka?,32 +onibahime (sennen sensou aigis),32 +omo (h98013114),32 +omega quintet,32 +omanjuu mascot,32 +okura shito,32 +okuma yuugo,32 +okita mitsuba,32 +okada izou (i'm one dapping fella) (fate),32 +oka asaha,32 +ojou-sama wa sunao ni narenai,32 +ojama yellow,32 +oichi (pokemon),32 +ogyadya,32 +oginouchihara rei,32 +oginome momoka,32 +oda masaru,32 +octoman,32 +oboro (ragnarok online),32 +nyanpe,32 +nuu (liebe sk),32 +nuruo (pixiv39067734),32 +nuruhachi (honki),32 +nurie,32 +numeri,32 +numahata tofu.,32 +nui (shepherd0821),32 +nu (chrono trigger),32 +npon515,32 +nouzu,32 +notziegler,32 +note55885,32 +northeastern ocean princess (roshiakouji-chan),32 +northampton (azur lane),32 +northa (fresh precure!),32 +noro assumed,32 +nonsugar,32 +nonono futaba,32 +nonomori (anst nono),32 +nonette enneagram,32 +nonaka ai,32 +noh seong-min,32 +nogami (minamiituki),32 +noaki,32 +noa (shironeko project),32 +nkgw,32 +nipple injection,32 +nintoku,32 +nintendo ds lite,32 +ninniku (mfu7324),32 +ningu,32 +ning (smc),32 +nina kalinina,32 +nina (words worth),32 +nillith,32 +nikyu,32 +nikcesco,32 +nikaidou kouhei,32 +nijigen project,32 +niino,32 +nihility,32 +nighttsound,32 +nightmarejan,32 +nightingale (gundam),32 +night gaunt (monster girl encyclopedia),32 +nieto tokage,32 +nido (sebamaster),32 +niao sun,32 +nez n,32 +nettsuu,32 +nessie (kska),32 +neru fia,32 +nemurimangetsu,32 +nemesis no juukou (vocaloid),32 +nekoseki rion,32 +nekogurui minako-san,32 +nekodason,32 +neko kuriya,32 +neko (natsuiroclassic),32 +neko-kun,32 +nejitsu (nukomasu),32 +necronomicon (persona 5),32 +navka (hetza),32 +nattsu (nattu888 8),32 +natts (yes! precure 5),32 +natsuya (pucelle),32 +natsuno hamuto,32 +natsume nadeshiko,32 +natsu mikan (level9),32 +natsu97,32 +natano hisanori,32 +nasuka@hiyokko,32 +nashimochi 4,32 +nashigami tsubute,32 +narugami yuzuriha,32 +narcian (fire emblem),32 +nanohana jiyuu,32 +nanatsuki nana,32 +nanatsu fuji,32 +nanase (ribonshitoron),32 +nananichi,32 +nanaki (mkmk 915),32 +nanahikari rami,32 +nana 0253,32 +nalse,32 +nakamura yukihiro,32 +nakama yasukata,32 +naka noboru,32 +naka (nicovideo14185763),32 +naka-san,32 +naito mare,32 +nagato (great fox's white gown) (azur lane),32 +naga (fire emblem),32 +na bia,32 +my room,32 +my melody (cosplay),32 +mxr,32 +mutsushika misumi,32 +mutsuki (kancolle) (cosplay),32 +mutou tooru,32 +musumi renga,32 +musashi (horizon),32 +muryo,32 +murgoten,32 +murasame (kancolle) (cosplay),32 +murasaki tsutsuji,32 +murasaki atsushi,32 +murakumo takeru,32 +munimuni kinoko,32 +munechika,32 +mumei (pokolv),32 +mukago (kimetsu no yaiba),32 +mugen silhouette,32 +mr bowater,32 +mr123goku123,32 +mr. rime,32 +mozzi,32 +moyashi (m-planter),32 +moyashi (karamisouma),32 +mouse print,32 +mounted dildo,32 +motsuni (lxxe1120),32 +mosomoso,32 +moru moru moru,32 +morishima kon,32 +more deban,32 +moose (moosu193),32 +moon ball,32 +mood swing,32 +monako (sora527),32 +momono megumi,32 +momoi saki,32 +momohara natsuki,32 +momo (shinigami no ballad),32 +molu stranger,32 +mokuren (mozukukirai88),32 +mojo jojo,32 +moge-ko,32 +mochizuki mari,32 +mkz,32 +mizuno (hal0527),32 +mizuhara erika,32 +mizu bonbori,32 +miyu10366,32 +miyata sayaka,32 +miya kazutomo,32 +miya clochette,32 +mitsuki kaede,32 +misumi takasumi,32 +mishima ssuru,32 +mishima psycho,32 +miracle (miracle1980),32 +mini piano,32 +minatsume,32 +minato usagi,32 +minase shia,32 +minarei,32 +minamoto no raikou (fate) (cosplay),32 +minami kotori (cosplay),32 +mimiru (.hack//),32 +mimi balguerie,32 +mima tokiko,32 +milkriot,32 +mikumari yusa,32 +mikazukisou,32 +mikami shiori,32 +mikage mika,32 +mii swordfighter,32 +mii-chan,32 +midori matsukaze,32 +midori makibaoo,32 +midas money,32 +michishio nagasumi,32 +michikusa (roadksa),32 +mezashi gohan,32 +mew (words worth),32 +metalinu,32 +meowth (cosplay),32 +mentai mochi,32 +menou kururu,32 +melmetal,32 +mele retanagua,32 +melantha (letters from wessex) (arknights),32 +meishou (cantabile1222),32 +meinoss,32 +meilleure chocolat,32 +megumegu,32 +mega scizor,32 +mega man x3,32 +mega blastoise,32 +mee don,32 +meda,32 +mechanical owl,32 +mazarimon,32 +mayu-mayu1026,32 +matsyumaro,32 +matsunaga tsubame,32 +matsumura fuuka,32 +matano maya,32 +masquerade,32 +masiro,32 +mashita (candy apricot),32 +masaki nonoka,32 +masa ashe,32 +masa (neku),32 +marumaru no shuyaku wa wareware da!,32 +mark (fire emblem: the blazing blade),32 +maria (housamo),32 +mari (twrlare),32 +marginal skip,32 +mapyarong,32 +maoyao-ll,32 +maou mikage,32 +manzairaku,32 +manreeree,32 +manip,32 +manda schank,32 +mamiya (sheena-1125),32 +makiyuki,32 +major 2nd,32 +maicching machiko-sensei,32 +maho (fii-tan the figure),32 +magiquone,32 +magical nuko-lenlen (vocaloid),32 +madsensei,32 +maddestmao,32 +madarame ikkaku,32 +madame ping (genshin impact),32 +mad (artist),32 +maboroshineko,32 +maboroshi chouji,32 +m-ko (hoojiro (found1093)),32 +lyuritis (yoru no nai kuni),32 +lumakini,32 +lulli,32 +ludwig the accursed,32 +luai,32 +lovebird,32 +love plus plus,32 +lori loud,32 +lor starcutter,32 +lololo (kirby),32 +lndrmnn,32 +living armor,32 +littorio (the glory of naples) (azur lane),32 +little xia (elsword),32 +little match girl (sinoalice),32 +lisu,32 +lisa (deel),32 +linna yamazaki,32 +limone (gurande),32 +limelam06,32 +liliana hart,32 +lightning bolt necklace,32 +liangyilin,32 +leopardtiger,32 +leftame,32 +leesuyeon,32 +lc 7v2,32 +lazycoffee (wyen iscordo),32 +lazy guang guang,32 +layla (suptomat),32 +last shooting,32 +lass (pokemon) (cosplay),32 +langjiao,32 +lancelot (smalock),32 +labrador retriever,32 +l85a1 (upotte!!),32 +l.j.,32 +l-gaim mk ii,32 +kyuusenbinore (gavion),32 +kyuukyoku choujin r,32 +kyrie canaan,32 +kuurimuart,32 +kusunokinawate,32 +kurumayama,32 +kurosaki yuzu,32 +kurosaki isshin,32 +kurohachiboku,32 +kuroda matsurika,32 +kuroda kanbee (sengoku bushouki muramasa),32 +kurobis,32 +kuro (bombergirl),32 +kuramochi youichi,32 +kuraken,32 +kuragesaki,32 +kurage (kurakurapix),32 +kura (shironagasu02),32 +kunimoto ori,32 +kumeki (kk4615),32 +kumataka,32 +kumano09 (yaecha0),32 +kukyo,32 +kujo jolyne (cosplay),32 +kudou asuka,32 +kuchibiru (lipblue),32 +kubu kurin,32 +kubonouchi eisaku,32 +kubo yurika,32 +koyama (gantz0409jp),32 +kousaka mayuri,32 +kougasha,32 +koto,32 +koromoya,32 +konoike (pepe expect),32 +konishi saki,32 +kondou mikoto,32 +konako,32 +komugi (wataame27),32 +komori atsushi,32 +komori (komo ricecake),32 +komio (do@ho),32 +komeo,32 +komamura sajin,32 +koma kiri aoko,32 +kokumaren,32 +kokoro6636,32 +koibito doushi de suru koto zenbu,32 +koge-owl,32 +knife in hair,32 +kiyu (zuyu),32 +kitt (yu-gi-oh!),32 +kitsune onee-san (shuugetsu karasu),32 +kitayama,32 +kitanaga,32 +kitagawa jun,32 +kissbell,32 +kiss ato kiss will change my relation with you,32 +kise hiyoko,32 +kirihara mana,32 +kirani,32 +kirakira monstars,32 +kinoshita rumi,32 +kinoshita (air hike),32 +kinomiya yukari,32 +kingguyver,32 +king halo (noble white cheer attire) (umamusume),32 +kinako (karasu nomaru),32 +kimono tug,32 +kimidori (dera kimidori),32 +kimi to issho ni,32 +kill me dance,32 +kijo momiji,32 +kidchan,32 +kickstarter,32 +kicking at viewer,32 +kibayashi kimori,32 +keva (liltkeva),32 +keiou yuugekitai,32 +keesuke (kkosyoku),32 +kazuukarazu,32 +kazumu,32 +kayuu,32 +kawai fuguri,32 +kavies,32 +kauchoro (namikazemakase),32 +katsutake,32 +katou mayumi,32 +katou juri,32 +kaskia,32 +kasa jizou,32 +karol capel (girly),32 +karna (versus) (fate),32 +karen uji,32 +karen erra,32 +karamas,32 +karakura,32 +kano (hanayori jyoshiryou),32 +kanikanitengoku,32 +kanakubo homare,32 +kanade kotonoha,32 +kanabuso,32 +kanaa (apple tea z),32 +kan liu (666k),32 +kamoroosaazu,32 +kamishakujii renko,32 +kamishakujii (poppenheim),32 +kamino maihu,32 +kaminarichyan,32 +kamina1978,32 +kami mitsugu (kamiken),32 +kamen rider birth,32 +kame house,32 +kamakani (kanikama8192),32 +kaleid,32 +kakouton,32 +kaki gohri,32 +kakera (comona base),32 +kai toshiki,32 +kai (ootamuno12),32 +kaho (ramb),32 +kaho (amal135),32 +kaguya-san (nantyu-erosada),32 +kagu (a hazy moon),32 +kagemaru (kagemaru321),32 +kagami ryouko,32 +kagami masara,32 +kagami kyousuke,32 +kafuu kaya,32 +kabuto tong,32 +kabayaki,32 +k ei3k,32 +k-sk style,32 +k-bone,32 +juu ame,32 +justin paul,32 +jupachi18,32 +jung myung lee,32 +jump king,32 +juliana (megami paradise),32 +jukebox priest (mechanical buddy universe),32 +joy (shenmue),32 +joy (cyber x heaven),32 +john hamish watson,32 +jmori44,32 +jing king of bandits,32 +jigoku no misawa (style),32 +jianshu,32 +jiageya (atojian keikaku),32 +jey rain,32 +jellen aura,32 +jeanne d'arc alter (holy night supper) (fate),32 +jean bart (kancolle),32 +jayamon,32 +janus cascade,32 +jamil (granblue fantasy),32 +jak,32 +jaeger (rainbow six siege),32 +jackal (warship girls r),32 +jabra (one piece),32 +j na,32 +j-unit (lilykiss),32 +itou mikan,32 +itou aya,32 +itaba atsushi,32 +isokaze (new year's campaign) (azur lane),32 +ishida akira (seiyuu),32 +isekai ni tobasaretara papa ni nattandaga,32 +irori (irorixc),32 +irina luminesk,32 +irarei (araisanblog),32 +inui/byte,32 +instant (ioarthus),32 +insecticide,32 +inquisition (warhammer),32 +inomata mamoru,32 +inkan,32 +injuu gakuen la blue girl,32 +ingrid (sennen sensou aigis),32 +imouto (hanekoto),32 +imjayu,32 +imasan,32 +imari maria,32 +imamura ryou,32 +imai hiyoko,32 +ikeda ruriko,32 +ijuuin mei,32 +ii-chan,32 +ida rintarou,32 +ichimonji akane,32 +ice dragon (artist),32 +ibuki meno,32 +ibuki (clover club),32 +ibuki (blue archive),32 +ia (ilwmael9),32 +i-pin,32 +hyoshiki,32 +human (totomono),32 +huddle,32 +huanghyy,32 +hua ye,32 +houmuari,32 +houin kyouko,32 +hoshino aoi (la huynh hai than),32 +hoshikage wataru,32 +honnouji gakuen,32 +honey select,32 +homutatu,32 +homco,32 +holding sarong,32 +holding rocket launcher,32 +holding pinwheel,32 +holding halo,32 +holding digimon,32 +hokuna rin,32 +hizen tadahiro,32 +hiyori7,32 +hiyoku,32 +hiviki n'alchemy,32 +hitsuji kumo,32 +hitotose hirune,32 +hirokawa tomo,32 +hiro kazuki,32 +hiro9779,32 +hirayama yukio,32 +hinomaru zumou,32 +hina-uta,32 +himekawa donki,32 +hilda (stella glow),32 +hikoboshi (cosplay),32 +hikagen yoshio,32 +hiiro (coinxtossxdive),32 +hiiragi tsumugi,32 +hii (hii101),32 +high heel sneakers,32 +high elf,32 +hifumi (art sky littel),32 +hieda no akyuu (cosplay),32 +hidamarinet,32 +hidaka kouki,32 +hidaka aoi,32 +hidaka ajiko,32 +hicham habchi,32 +hibun tsukasa,32 +hesoten,32 +heron,32 +heroic age,32 +hermana larmo,32 +henry (dq5),32 +henoeno,32 +helenium (flower knight girl),32 +heathcliff blanchett,32 +heat man,32 +heart trace,32 +heart-shaped hole,32 +head swap,32 +hayun,32 +hayashi takeo,32 +hayashi (kanzume),32 +hayasaka (a865675167774),32 +hayama sayako,32 +hatsukoi zombie,32 +hatsanxp,32 +hasegawa haruka,32 +has lossy revision,32 +harukawa tomomi,32 +haru kanata,32 +haru akira,32 +harry ord,32 +harii (janib5kc),32 +harada mutei,32 +harada midori,32 +happy meal,32 +happa freee,32 +hanon (nonty),32 +hano9789,32 +hanma yuujirou,32 +hands on another's thigh,32 +hand in buruma,32 +hanazawa,32 +hanavvi,32 +hananenmi,32 +hanamomo (flower knight girl),32 +hanachirasu,32 +hamu 767,32 +halo: reach,32 +hakujou academy uniform,32 +hakuba ouji,32 +haiyahaiyohai,32 +hair iron,32 +haiki (hakkyoii06),32 +haiero,32 +haerge,32 +hacking,32 +habanero-neesan,32 +haaam,32 +ha ku ronofu jin,32 +ha en,32 +h.pn,32 +gz (gzxd),32 +gyuu mao,32 +gyusukiudon,32 +gyan (akenosuisei),32 +gyakuten sekai no denchi shoujo,32 +guumin,32 +gununu (pixiv),32 +gungrave,32 +gundam mk v,32 +guinevere (fire emblem),32 +guinble,32 +guang yiren,32 +greek flag,32 +goyouga-deann,32 +gou (tomero),32 +goshi-san,32 +goruti,32 +gorgon (third ascension) (fate),32 +gordin (fire emblem),32 +gomi (hakumaiteacher),32 +gomesu (gomes0343),32 +gold skin,32 +gohan beast,32 +glowing staff,32 +gillis,32 +gijxgij,32 +gigachad (meme),32 +gidget,32 +giant skeleton,32 +ghost (among us),32 +gettewi,32 +getter-2,32 +geshi,32 +german flag print,32 +gbmah,32 +gb hm,32 +gaw ha leecee,32 +gatotsu stance,32 +gatakk,32 +gargling,32 +gao guangyue,32 +ganzu,32 +gannen harst,32 +gameso,32 +galio,32 +gakuen tengoku,32 +fumiya-taketatsu,32 +fumi (fumibeing),32 +full moon (full moon wo sagashite),32 +fukuen misato,32 +fukiya (fumiakitoyama),32 +fukamine riko,32 +fuka-chan,32 +fujiwara naeka,32 +fujiwara,32 +fujimoto you,32 +fujii shingo,32 +frightening (zoza),32 +francesca (pokemon),32 +frag,32 +forte (crystalplanet00),32 +force (fossan 01),32 +flying tree frog,32 +fluffydus,32 +flower facial mark,32 +flippy (cripine111),32 +flappy,32 +flab,32 +fjsk,32 +fisher903,32 +fish and chips,32 +fiora pellerin,32 +fintowing,32 +finn (star wars),32 +finger on eyewear,32 +fine motion (titania) (umamusume),32 +fimyuan,32 +filia (star ocean),32 +festering desire (genshin impact),32 +fernanda suarez,32 +fern (sousou no frieren),32 +feraltintinsimp,32 +femboy hooters (meme),32 +fellbeast,32 +feijitian,32 +feel,32 +fate/grand order waltz in the moonlight/lostroom,32 +fantia username,32 +fairy knight gawain (ice warrior) (fate),32 +fail (djmax),32 +e~ji,32 +ezuki luna,32 +eye twitch,32 +exren,32 +exeter (azur lane),32 +eutopia (love live!),32 +eurasian beaver (kemono friends),32 +etmc1992,32 +esencey,32 +erze (king's raid),32 +eron,32 +eri (erikiri),32 +eri (artist),32 +erementa,32 +eno (joqeve),32 +enki (juuni kokuki),32 +enemy ootachi,32 +endou mishiro,32 +emily hu,32 +elven knight (dungeon and fighter),32 +ellen (folkssoul),32 +elbow gauntlets,32 +eirudy,32 +ecou,32 +eboshi,32 +earth defence force 5,32 +e (h798602056),32 +duke nukem (series),32 +ducktales,32 +dslr,32 +drift girls,32 +drawn on eyes,32 +drapri guu-ta-life 2,32 +dossei,32 +doremifa rondo (vocaloid),32 +doraemon (character) (cosplay),32 +dolores (kof),32 +dollinger,32 +dog nose,32 +diyap,32 +divine spirit (touhou),32 +diva duo (mechanical buddy universe),32 +disposable camera,32 +dino (shepherd0821),32 +dimension (module),32 +dias flac,32 +diarmuid ua duibhne (lancer) (fate) (cosplay),32 +diaper changing,32 +dia (hong),32 +devil's hand (ishiyumi),32 +dethmath,32 +desert pattern,32 +deroichi,32 +dengeki daioh,32 +demeter (fate),32 +deerling (summer),32 +deck (architecture),32 +deathwingxiii,32 +death knight (warcraft),32 +ddukae,32 +darusu,32 +dark flame master,32 +dark duck,32 +dao dao,32 +dansei virtual youtuber bacharu,32 +danfango,32 +dan mora,32 +damubomu,32 +daimonji ryugon,32 +daichi (hayate1328),32 +dahe zhuang (yishi fanhua),32 +cyaron (love live!),32 +curse seal,32 +cure marine (super silhouette),32 +cube (jsr),32 +cryptid crab,32 +crusty (log horizon),32 +cross-laced shirt,32 +cro (pixiv14643743),32 +counting sheep,32 +conconcon1031,32 +common kingfisher,32 +commander shepard (male),32 +colomar,32 +coiled cord,32 +code: sariel (elsword),32 +cocytus (overlord),32 +cocoro moto,32 +coco (eogks),32 +clover point,32 +cloud ya,32 +clear cross,32 +clavat,32 +clarisse (fire emblem),32 +citan uzuki,32 +chuuko anpu,32 +chronica,32 +chouginga gurren-lagann,32 +cho aniki,32 +chloe ardenne,32 +chiyu (kumataro0x0),32 +chisiro unya (unya draw),32 +chirosuke (nameless),32 +chinchin kemokemo,32 +child's play,32 +chikuwa (rinka),32 +chikattochikachika (kaguya-sama wa kokurasetai),32 +chigusa nana,32 +chicobo,32 +chiba nagisa,32 +chi lian qiju zhu,32 +chi's sweet home,32 +chcn,32 +charge blade,32 +character balloon,32 +chantez arpinion,32 +chanchan,32 +chan sang,32 +cermia (epic seven),32 +cerevisiae-tan,32 +celestial s,32 +celestial (suzumiya haruhi),32 +cecily fairchild,32 +caw=zoo,32 +cat eyes (alice girls),32 +cas cassis,32 +carrying pole,32 +carcass,32 +calvin & hobbes,32 +calvaires,32 +calbee (potato chips),32 +caius qualls,32 +ca2la,32 +byleth (fire emblem) (female) (cosplay),32 +byeoljagga,32 +buu (buu02),32 +burgerpants,32 +bunbee (yes! precure 5),32 +budd root,32 +bryan fury,32 +broom ribbon,32 +brandish,32 +brain drain (skullgirls),32 +brace,32 +bowed wings,32 +bori (3910234),32 +bookcage,32 +boo 1,32 +blue raincoat,32 +blue moon (module),32 +blue drop,32 +blue-eyed girl (hayabusa),32 +blonde shrine maiden from a future era (touhou),32 +blazefire saber,32 +blaz b. aros,32 +blade runner 2049,32 +blade (nu carnival),32 +blacktheif,32 +blacknight (summer flowers) (arknights),32 +blackberry hair ornament,32 +black tunic,32 +bitte,32 +birman h,32 +bionic commando,32 +biloxi (azur lane),32 +bill (left 4 dead),32 +bigdog,32 +best jeanist,32 +bernard wiseman,32 +berich (7th dragon),32 +benelli m4,32 +behind cover,32 +bebe 0620,32 +bear yutaka,32 +bear head,32 +beamed thirty-second notes,32 +bea (pokemon) (cosplay),32 +bat (hokuto no ken),32 +barley juice,32 +bari dal,32 +barb-tan,32 +banshee (last origin),32 +baltan seijin,32 +bake-danuki (genshin impact),32 +baby steps,32 +b6,32 +azuremo,32 +azuki-iro,32 +azm (mochanepore),32 +azito7,32 +ayabe lilyna,32 +atagumo yuba,32 +asuna (blue archive) (cosplay),32 +assi,32 +aspis,32 +ashihara chihiro,32 +ashei,32 +ascot between breasts,32 +asatsuyu sayoko,32 +asashio (the transfer student underneath the dancing petals) (azur lane),32 +asamori mizuki,32 +asagi-so,32 +aruhi ohimesama ni natteshimatta ken ni tsuite,32 +arson,32 +armored animal,32 +arisku,32 +arishiki,32 +argyle bow,32 +archangel (p&d),32 +arca (summon night),32 +ararecoa,32 +arantheus,32 +aranasi,32 +aralez,32 +aqwiz,32 +appleale19,32 +apple poison,32 +apple (luffy123),32 +aphrodite (hades),32 +aoshima kanae,32 +aomi isara,32 +aoirnn,32 +aoi ren,32 +aoha yuuki,32 +anzelotte,32 +anyacchi,32 +anya pandaria,32 +anubis (z.o.e.),32 +ant-man,32 +anju (utawarerumono),32 +aner (qqan00),32 +ane hoshimaru,32 +andou natsuki,32 +ancoloyuki,32 +anadapta,32 +amou june,32 +amor,32 +amidamaru,32 +ami7,32 +ame (candycircle),32 +amatake akewo,32 +amashi (qcmrq906),32 +amano miyabi,32 +amano kenpi,32 +amane shinobu,32 +amakura kei,32 +amaki-aria,32 +almiria bauduin,32 +alloc (playable one),32 +alice lendrott,32 +alexandra viktorovna dashkova,32 +alexander lloyds,32 +alexa (pokemon),32 +alea,32 +akuakuran,32 +akizuki kanna,32 +akitsuki ria,32 +akikaze rui,32 +aki a0623,32 +akashi (akashimichi),32 +akahito (genshin impact),32 +akahaneko,32 +akagi ritsuko (cosplay),32 +akabane iori,32 +ak1222dece,32 +aizawa marimo,32 +after the rain,32 +africa,32 +aether foundation employee (cosplay),32 +admiral graf spee (world's cutest) (azur lane),32 +adlet myer,32 +adeltrud walter,32 +addam origo,32 +adachi masahiro,32 +ace trainer (pokemon) (cosplay),32 +ace akira,32 +abyssal sun princess,32 +abukuma (azur lane),32 +abudala,32 +absurdly fat mons,32 +abemorioka,32 +aa-2153,32 +a kite,32 +a-tsuki,32 +a-1 pictures,32 +78 (tky8),32 +746kuchiku,32 +714 (leg200kr),32 +5ya,32 +5alive,32 +57friend,32 +4me 4ma,32 +3735geriragouki,32 +2y (tsuyu),32 +28aarts,32 +10011018,32 +0x3,32 +0000 (byoubyou),32 +zygarde (complete),31 +zygarde (10%),31 +zuosi zhichu,31 +zundamochi (sera),31 +zuihou (kancolle) (cosplay),31 +zorim,31 +zokenwatarushi,31 +zhang fei,31 +zetz,31 +zenji029,31 +zelgius (fire emblem),31 +zeko,31 +zeixique,31 +zangetsu (shikai),31 +z36 (azur lane),31 +z23 (the banquet's honor student) (azur lane),31 +z20 karl galster (azur lane),31 +yuzuki yukari's younger twin brother,31 +yuzuki ryouta,31 +yuzuha (vtuber),31 +yuutenji mishio,31 +yuurakudou kurono,31 +yuunagi kanade,31 +yuuki nanase,31 +yuuforia,31 +yuu kurema,31 +yusya,31 +yuro (mangasukinoyuro),31 +yuri meichi,31 +yuren,31 +yura 458,31 +yuo0,31 +yunael,31 +yumeno shiori,31 +yume aoi,31 +yuma (yuuma pants),31 +yukipo,31 +yukine,31 +yuki (luupechi),31 +yuki-ichigo,31 +yui (imprinting),31 +yuffie kisaragi (cosplay),31 +yueyue no hand,31 +yu cheng hong,31 +ytuorvi,31 +ys origin,31 +youyou (yoyoyo),31 +yousan (pixiv540898),31 +yoshitaro (almendra),31 +yoshioka kumiko,31 +yoshikanakamura,31 +yoshida shouyou,31 +yomo renji,31 +yokoshima (qb74pnkp),31 +yohia,31 +yodokawa (yukko),31 +yiqiang,31 +yezhi na,31 +yayoi b lutwidge,31 +yatorishino xam,31 +yatatashira,31 +yao bikuni (onmyoji),31 +yansae81,31 +yanobrk,31 +yankee41,31 +yangli daxian,31 +yang yang,31 +yanagita (daitai 2 ton),31 +yanagisawa masahide,31 +yanagi koharu,31 +yamakumo,31 +yamakawa kouji,31 +yamada uiro,31 +yamada auto,31 +yakata (artist),31 +yagumo kengou,31 +yagiri seiji,31 +yagi (s1120411),31 +yae (eky 567),31 +xinshijie de akalin,31 +xiaowei (xxx29042536),31 +xiaoshan jiang,31 +xia oekaki,31 +xi-u,31 +xelvy,31 +xbox one,31 +x-change,31 +wz.29 (girls' frontline),31 +wukloo,31 +wu zetian (swimsuit caster) (first ascension) (fate),31 +world teacher -isekaishiki kyouiku agent-,31 +world's edge,31 +wootsang,31 +wooden fish,31 +wokami,31 +wminiminiw,31 +wl6yugi8go1,31 +witch (dmfd),31 +witch's garden,31 +wisconsin (pacific),31 +winton kidd,31 +windsurfing,31 +windfish's egg,31 +white blood cell (hataraku saibou) (cosplay),31 +wet leotard,31 +wet ground,31 +welding torch,31 +water battle,31 +waremokou (flower knight girl),31 +wankoro mochi,31 +walking mushroom (dungeon meshi),31 +wajima maki,31 +wae,31 +vvvmung,31 +vmax-ver,31 +vividyellow,31 +virgin mary,31 +viorate platane,31 +violet (ac8231),31 +vexxxxa,31 +vertical-striped sweater,31 +velchi,31 +veggie,31 +varys truss,31 +var (weapon),31 +vanilla (nekomist),31 +van gogh (second ascension) (fate),31 +valkyrie-zero,31 +uzumi (uzumi yosi),31 +uz3d,31 +uungunover,31 +utsumi erice (swimsuit avenger) (second ascension),31 +usseewa,31 +uss yorktown (cv-10) (y.ssanoha),31 +usarinko,31 +us@myo,31 +ursaluna,31 +uranoyoru,31 +uogokoro-kun,31 +unwrap me body bow,31 +unown ?,31 +uni96 (uknee96),31 +un403lucky,31 +umino (anesthesia),31 +umi owl,31 +umbreon (cosplay),31 +uesugi hidehiko,31 +uehara sayoko,31 +uchuu senshi baldios,31 +uchiha mikoto,31 +ubel blatt,31 +uatemyrice,31 +u (mypace),31 +tyuuboutyauyo,31 +tytree crowe,31 +tytania,31 +tyrca (venus blood),31 +type 3 active sonar,31 +twitter strip game,31 +twinkle eye,31 +turn of the golden witch,31 +tukemono6,31 +tsurumi kazane,31 +tsumuri,31 +tsukimichi,31 +tsukikaze aki,31 +tsukihiko (kagerou project),31 +tsujiori,31 +tsui (kojiya),31 +tsuaaa,31 +tsathoggua (housamo),31 +true blue,31 +trkz tmwk,31 +trigonometry,31 +trigger (ace combat),31 +toyo (c8),31 +toya kento,31 +touya (tottoo-to),31 +tourniquet,31 +touhou sky arena,31 +touching tail,31 +torn bag,31 +torimachi kazami,31 +torigoshi crow,31 +torasigure,31 +tooya (gin'iro koubou),31 +tonnelee,31 +tongue twister,31 +tomoe (blue archive),31 +tombiiwa,31 +tokitoki (commando),31 +tokita kouichi,31 +tokime shizuka,31 +tokarev (a couple's journey) (girls' frontline),31 +tojorin,31 +toilet brush,31 +togashi yuu,31 +toda ayu,31 +tobi (mue86),31 +toaster (arms),31 +tkbnmnm,31 +titus alexius,31 +tirofinire,31 +tio (grandia),31 +tialoft e tromea,31 +ti keep,31 +ti2,31 +thumbelina,31 +thimble,31 +thi fure,31 +the super mario bros. movie,31 +the iris swordsoul,31 +the batman (2022),31 +thar chandran,31 +tetisuka,31 +tenton (henatyo),31 +tenshin no kehyaku tanuki,31 +tenroy,31 +tellu (sailor moon),31 +telepurte,31 +tekaru,31 +tech box (azur lane),31 +teca (ryeol),31 +tawa (ookami mio),31 +tatsunoko 777,31 +tatsumi (psmhbpiuczn),31 +tatsumaki udon,31 +tatsuki (pizza no tempra),31 +tatara maguro,31 +tatara kenshiro,31 +tanu (ace ikachan),31 +tanikaze (azur lane),31 +tanaka souichirou,31 +tamtam,31 +tamayo,31 +tamaki (209),31 +tamagoyaki pan,31 +tama go,31 +talesshop,31 +tales of phantasia: narikiri dungeon x,31 +takuyasaeki,31 +takuan (a mood home),31 +takizawa seidou,31 +takenaka hanbee (sengoku basara),31 +take-run-atelier,31 +takatsuki arunashi,31 +takashiro chidori,31 +taka (takahiro si),31 +taiyou no promia,31 +taiyaki a,31 +taishi karibe,31 +tachibana senzou,31 +tachibana meiko,31 +t1ger spuma,31 +syr flover,31 +syobonne,31 +sylvia christel,31 +sylphiel nels lahda,31 +sy-l-via,31 +sword of seiros,31 +switch axe,31 +sweetie belle,31 +suzushiro sayu,31 +suzusato rinka,31 +suusuke,31 +sussex (azur lane),31 +surutsu,31 +suruga ataru,31 +surps,31 +super sailor pluto,31 +super mario-kun,31 +super-saiya-0173,31 +sumiyoshi rocket,31 +sumi hei,31 +sulong form,31 +sula (s ra760),31 +sugou asuka,31 +sugiura jirou,31 +sugino tomohito,31 +sugarette,31 +steel ingot,31 +star bit,31 +standing leg lock,31 +stahl (fire emblem),31 +stacker pentecost,31 +ssm (ssm82048039),31 +ssam (samel10),31 +srco,31 +squirrel costume,31 +spring (trigger),31 +spiral warrior,31 +sphie,31 +spam,31 +space xu fu (fate),31 +sou 230,31 +soto miyako,31 +sosya ku,31 +sortiliena serlut,31 +sopranino,31 +sophie (sennen sensou aigis),31 +soooooook2,31 +solidus snake,31 +soldier's set (zelda),31 +sogekishu (sni8er),31 +sogdin,31 +softp3ach,31 +snowflake liliput (idolmaster),31 +smokey (alchemy stars),31 +smirnoff (vodka),31 +sleepy69,31 +skull (disgaea),31 +skarltano,31 +sio genshin,31 +sino (rtlsino),31 +sino (oyasumi hf),31 +silver (color),31 +silva (enkyo yuuichirou),31 +silber 1224,31 +sig (gyee),31 +side part,31 +shuimo,31 +shoutoku taishi,31 +shoudou kotoha,31 +shou (karigurashi no arrietty),31 +shooting through heart,31 +shooting girl,31 +shon,31 +shiva (tairakuten),31 +shiten rekka karin,31 +shiryuu akira,31 +shiro no musume (vocaloid),31 +shiro hakuchou,31 +shiro (kiron),31 +shiro (acad1213),31 +shiro (46isou),31 +shirazu ginshi,31 +shiratsuki,31 +shiranui inori,31 +shiranui hansode,31 +shiranagi masa,31 +shirakami fubuki (artist),31 +shiraishi takashi,31 +shiragami youko,31 +shiongaze,31 +shinyu xingyu,31 +shinonono tabane,31 +shinmai maou no testament burst,31 +shining dream,31 +shinatsuhiko yae,31 +shin mazinger zero,31 +shin (dragon ball),31 +shimomura izumi,31 +shime mura,31 +shimazoenohibi,31 +shimashima salmon,31 +shimabara,31 +shima kujira,31 +shigatsu itsuka,31 +shianebulae,31 +sherry (langrisser),31 +shellin burgundy,31 +sheila e,31 +sheeney (muku),31 +shayla-shayla,31 +shawli,31 +sharifah ozdil,31 +shanxin (the legend of luoxiaohei),31 +sham fu,31 +shah,31 +shadow (kingdom hearts),31 +shaco,31 +sgt hartman,31 +sgawarananto,31 +sexy gals (idolmaster),31 +seset,31 +serini (pixiv fantasia),31 +sera (judgemint),31 +seo (tqhgud016),31 +sentry,31 +sensei (tawawa),31 +senpai (kuro senpai),31 +senoo chihogi,31 +senacolada,31 +selka zuberg,31 +sega dreamcast (sega hard girls),31 +seera finis victoria,31 +seele (honkai: star rail),31 +seed uniform (ff8),31 +scp-040-jp,31 +sconce,31 +schwerer gustav,31 +school wear (idolmaster),31 +scarlet ohara,31 +scarf girl (tiger & bunny),31 +sazaki susumu,31 +satsuyu ito,31 +satorichan,31 +satomi naoko,31 +sasuke (ganbare goemon),31 +sassakntm,31 +saskia gutekunst,31 +sashisu,31 +sashacall,31 +sasaki toshiyuki,31 +sarashiki kanzashi,31 +sarah adiemus,31 +sarah-san (mignon),31 +sap,31 +sanwa (koyabu2171),31 +santa panties,31 +santa (kaisou hikuutei),31 +sankichi (croquette crab),31 +saniiiwan,31 +sangekimaru,31 +sanasedayo,31 +samurai (zoza),31 +samuel b. roberts mk ii (kancolle),31 +salty eyes,31 +sakurai muto,31 +sakuraebi chima,31 +sakura mafumi,31 +sakura kyouko (swimsuit costume),31 +sakura bitmap,31 +saku (amespi),31 +sakashita yuzuyu,31 +sakanobo (sushi1021),31 +sakana kidori,31 +sakamoto shuuji,31 +sakamaki izayoi,31 +sakaki kayumu,31 +saiun sigma,31 +saitou sakae,31 +saitou chiho,31 +saitama (one-punch man) (cosplay),31 +saikoro (et1312),31 +sahara (charlotte),31 +sadcat,31 +sabusuka,31 +sabo rin,31 +saber beam,31 +sabamisob,31 +saaal653,31 +sa/tsu/ki,31 +s o i,31 +ryuuna (shining tears),31 +ryuuga nanamaru,31 +ryugue,31 +ryu jiao,31 +ryou (ryo 217cafe),31 +ryoko (game x over),31 +rynn (seibu),31 +ryner lute,31 +rurumo,31 +ruoganzhao,31 +ruo (cruzada),31 +ruka (cookie),31 +ruit,31 +ruint,31 +ruby (nox),31 +rozarita,31 +roubai academy school uniform (new),31 +rosso fantasma,31 +rosa tsubomi,31 +rokuya nagi,31 +rokuji,31 +roisa,31 +roderika (elden ring),31 +rodan (godzilla: king of the monsters),31 +robin sena,31 +rita ya,31 +ririka (ab-yuri),31 +rimibure,31 +rikudou reika,31 +rigurudo (tensei shitara slime datta ken),31 +ricken,31 +richea spodune,31 +rich h1ll,31 +riako,31 +rh0012,31 +retorou,31 +resurrection,31 +rerere,31 +repulse (warship girls r),31 +reo (haozz),31 +renyu1012,31 +renpu girls school uniform,31 +rekka yamato,31 +reimaco,31 +regenerate-x,31 +reducto,31 +redman (character),31 +redforge,31 +red pond,31 +red mask (arizuka),31 +red dragon archfiend,31 +red blood cell (hataraku saibou) (cosplay),31 +red ace,31 +red (saga frontier),31 +re:n ne,31 +razz berry,31 +raydango,31 +rara419,31 +rapiko,31 +raoul (raoul77),31 +ranka (tonbo),31 +ranger 2 (sekaiju),31 +ran (elsword),31 +ramirisu,31 +raijin,31 +racing miku (cosplay),31 +rachel ransom,31 +quruiqing,31 +qumaoto,31 +quiz nanairo dreams,31 +quincy,31 +qplus,31 +qing,31 +qi (bleachcxn),31 +qi1san,31 +pyron,31 +pyonkichi,31 +putin (usavich),31 +purring,31 +purple innertube,31 +punt (kienbiu),31 +pucho,31 +provocation,31 +protagonist (romancing saga 2),31 +prologue rouge (idolmaster),31 +principal (hidamari sketch),31 +princess bitch,31 +prince of persia,31 +prince of lan ling (eastern socialite attire) (fate),31 +priest77,31 +poyosuke,31 +power glove (nintendo),31 +potato7192,31 +portugal,31 +porcupine,31 +porcelain,31 +poppo sutchy,31 +popopo,31 +popeye the sailor,31 +ponto1588,31 +ponpu-chou,31 +ponpoko (vtuber),31 +polish clothes,31 +pochi (askas is god),31 +playstation symbols,31 +player avatar prototype (woman) (kemono friends),31 +planting,31 +plaid umbrella,31 +plaid bikini top,31 +plaid ascot,31 +pixiv forest,31 +pisapipi,31 +pipikopipiko,31 +pino ko,31 +pink garter straps,31 +pink-haired nun (skeb),31 +pikakoya,31 +pig print,31 +piaroo,31 +piao miao,31 +pi (zcvj3588),31 +pewposterous,31 +peroronti,31 +perman (series),31 +people's liberation army navy,31 +pentagram earrings,31 +penis nipples,31 +penguin uwu,31 +peanut butter,31 +pavianne (ragnarok online),31 +patalliro!,31 +paruma umu,31 +paravane,31 +parasite eve ii,31 +paralysis,31 +panya,31 +panties under leotard,31 +panties over bike shorts,31 +panda (azarashi suki),31 +pajant,31 +paisu-chan (yashiro sousaku),31 +painnico,31 +pai (1111),31 +pafe yuzuran,31 +ozma,31 +oyuyamio,31 +oyashio kai ni (kancolle),31 +oumi hi,31 +ouhashi (yumenosukima),31 +ouchi ni kaeru made ga mashimaro desu,31 +otyaume 1910,31 +ots-44 (girls' frontline),31 +otome no teikoku,31 +otogi yuugi,31 +ossou rocket,31 +osananajimi ga zettai ni makenai lovecome,31 +oriko (nicolai),31 +oribe ririko,31 +orangutan,31 +opera brest,31 +open leotard,31 +ooki bonta,31 +oofuji reiichirou,31 +ontama,31 +oni chichi,31 +okuma tanukichi,31 +okiya subaru,31 +okinawa,31 +okaya mrh,31 +oizumi,31 +ohako,31 +octorok,31 +ocaca (chocolate-hopper),31 +obey me!: one master to rule them all!,31 +o-hako,31 +nzack,31 +nyan cafe macchiato,31 +nusisring tactical,31 +nurse no obenkyou,31 +nurse ni omakase,31 +nuri,31 +nui (nuinui0300),31 +nu wa,31 +npt shizuka,31 +nox13,31 +nowheresoul,31 +northampton (warship girls r),31 +north korea,31 +norne (fire emblem),31 +nonomiya shiho,31 +nonaprev,31 +nonaka nono,31 +noko,31 +nogizaka46,31 +noel (pixiv5459099),31 +nodocchi,31 +noda satoru,31 +no capelet,31 +no/min (noumin suijun),31 +nitumaruta,31 +nishizawa shizuku,31 +nishiwaki,31 +nishitaka,31 +nishio akira,31 +nira-chan,31 +ning rongrong (douluo dalu),31 +nina geminis,31 +nikuya (nikuniku nikuya),31 +niii,31 +niccya,31 +niall ardanach,31 +ni celery pk,31 +neziiro,31 +neyagi,31 +newmanoid,31 +newash,31 +netapai1,31 +nervegear,31 +neon (pixiv 31150749),31 +nenkou-san,31 +nekopurin (nyanko prin),31 +nekome3,31 +nekogami yaoyorozu,31 +neil (neil nim),31 +neiko,31 +nehan (gedatsu nehan),31 +negi3,31 +nda-p (threelow),31 +nazuna (log horizon),31 +navel (company),31 +natsusora aona,31 +nasu bacon,31 +narumi akiko,31 +narugino mikatan,31 +napoleon bonaparte,31 +nankyoku sakura,31 +nanatsuki sousuke,31 +nanashiba,31 +nanao yuki,31 +nana (mega man x: command mission),31 +namiki kojiro,31 +namae (areees),31 +nakata masahiko,31 +nakanoshima tae,31 +nakano kiho,31 +nakamura kuzuyu,31 +nakajima yuuko,31 +nakaga eri,31 +naive (day),31 +nagi mizuha,31 +nagatsuki misoka,31 +nagato (naruto),31 +nagatani (ngt 926),31 +nagano (5ronta),31 +nacho (nacho 1103),31 +na arva,31 +n (anniversary 2021) (pokemon),31 +mysterious heroine x alter (fate) (cosplay),31 +myouji namawe,31 +myoue,31 +muyu713,31 +muu1519,31 +mutton (user hafp8324),31 +muttiy,31 +muta kokichi,31 +musu,31 +mushroom parent,31 +mushi kei,31 +mushi hara,31 +musekinin kanchou tylor,31 +murmeltier,31 +murmansk (sceneries of pure snow) (azur lane),31 +murasawa hiroshi,31 +murasaki orange,31 +muni nuren,31 +mumu mago,31 +multicolored flower,31 +muko (kokia38),31 +muki (munikichan),31 +mukai (kof),31 +mujun-gatamari (meme),31 +muhyowo,31 +muguruma miyako,31 +mugen lion,31 +mugcan,31 +msugi,31 +mr.way,31 +mp-446 (girls' frontline),31 +mozukuzukuzuku,31 +moyashi udon,31 +mov,31 +mouthpiece,31 +mount,31 +moukin punch,31 +motsunukisuki,31 +motitoy,31 +mother (pso2),31 +mossberg 590,31 +morung,31 +moray eel,31 +moon in daylight,31 +mony,31 +monty oum (creator),31 +monster energy-chan,31 +monkey buonarroti,31 +mongkhon,31 +mondragon m1908 (girls' frontline),31 +momotarou (character),31 +momoko (kof),31 +momoko (kaeru314),31 +mogumogu fuyoudo,31 +modoki kuma,31 +model warrior julianne,31 +mochimon,31 +mocha (mochaxgm),31 +ml.e,31 +mkt (50r50r),31 +mizushima aru,31 +mizunototori,31 +mizukiyan,31 +mizuhara saki,31 +miyuu (crazy lollipop),31 +miyashiro takuru,31 +miyama sana,31 +miyako (halloween) (princess connect!),31 +miyabi reiji,31 +miya (baelfight),31 +mituki (mitukiiro),31 +mitsuya,31 +mitomumitomu,31 +mitarashi o,31 +mistimagi (character),31 +miss goldenweek,31 +misono karin (halloween ver.),31 +misoni (mi so ni t),31 +mishido sun,31 +misaki sango,31 +miruku pan,31 +mirelia q melromarc,31 +miranda (feguimel),31 +mio (needless),31 +mio (jian wei),31 +mingakk,31 +minato hikaru,31 +minato0618,31 +minamoto momo,31 +minamixdrops,31 +minamino nanami,31 +minami mayu,31 +minakami nagara,31 +mimikkoui,31 +milmir,31 +millennium necklace,31 +milaria,31 +mikuni aoi,31 +mikodanye,31 +mikazuki tenma,31 +mikazuki (feimao),31 +miin miin,31 +mihono bourbon (umamusume) (cosplay),31 +michiyuki,31 +michia (bourgognepony),31 +michelin man,31 +mian li,31 +mf bunko,31 +messenger (41986996),31 +meru (merumumu),31 +mershark (monster girl encyclopedia),31 +merlin (fate/prototype) (third ascension),31 +mereoleona vermillion,31 +mercury black,31 +mercedes (maplestory),31 +mentaiko jojotarou,31 +meng lea,31 +mem-mem (precure),31 +melvy de florencia,31 +melissa renata,31 +melfi,31 +mele ck,31 +meitantei holmes,31 +mehve,31 +megrim haruyo,31 +mega swampert,31 +meeboo (arknights),31 +medusa (lancer) (final ascension) (fate),31 +medici (lord of the mysteries),31 +mechi,31 +mechanical bird,31 +mecha-fiora (speed cowling),31 +me 262,31 +maya (dewprism),31 +max anarchy,31 +mauro abelard,31 +matsushika,31 +matsukaze (azur lane),31 +matsuda jinpei,31 +matsuba moco,31 +masui,31 +master nabesama,31 +masked booby (kemono friends),31 +masinhwangje,31 +masao tsubasa,31 +maruyama-jp,31 +maruwa gray,31 +maru (maruplum),31 +marishiten (mar1sh110),31 +marie (splatoon) (cosplay),31 +maribel hearn (cosplay),31 +maria (silent hill),31 +margo (pokemon),31 +marching melodies (idolmaster),31 +march (coyote ragtime show),31 +manuba (yukataro),31 +maned wolf (kemono friends),31 +mallllma,31 +male mage (dungeon and fighter),31 +mako-hime,31 +makiko,31 +makai no koutaishi,31 +majora (entity),31 +majokko a la mode 2,31 +mai (popotan),31 +mahou sentai magiranger,31 +mahou chuunen ojimajo 5,31 +magical marriage lunatics!!,31 +magaqq123,31 +maddy lovecraft,31 +m240,31 +m16a4 (upotte!!),31 +luoloo,31 +luneth,31 +luli daxian,31 +lsd,31 +low horns,31 +lost-phews,31 +lordgenome (young),31 +lopmon,31 +lolicom,31 +location request,31 +lobelia (granblue fantasy),31 +lion (trickster),31 +lindwurm (last origin),31 +lina rojas,31 +limble,31 +lilybell,31 +lillie (new year's 2021) (pokemon),31 +lilithmy,31 +lilith (vanpri),31 +liliane vehlendorf,31 +lian mang,31 +lexus (artist),31 +lewdlux,31 +lew,31 +leviathan (zettai bouei leviathan),31 +leon (rune factory),31 +leon (pokemon) (cosplay),31 +leo (mobile suit),31 +lena (fire emblem),31 +leeshin0919,31 +lee nabi,31 +leatherclub scene,31 +leadale no daichi nite,31 +lava lamp,31 +latte,31 +lasty farson,31 +lastlong,31 +las vegas (accio),31 +lapaco,31 +landorus (therian),31 +lancer (deltarune),31 +lalala (kirby),31 +laing,31 +l.v.c.,31 +kyung han kim,31 +kyougoku makoto,31 +kyosuke fujiwara,31 +kykeon,31 +kyan (glira),31 +kuzya,31 +kuzunoha kyouji (sunny side street),31 +kuzuneko,31 +kuze (ira),31 +kuu (0427),31 +kusumoto miya,31 +kusanagi mikoto (artist),31 +kurumada gouichi,31 +kurosaki sasori,31 +kuroki shigewo,31 +kuroiwa brs,31 +kuroba (f-15c eagle),31 +kuro senpai to kuro yashiki no yami ni mayowanai,31 +kuriyama kuriotoko,31 +kurimu (yappen),31 +kuri-magu kuroguro,31 +kurena kukumila,31 +kurai kako,31 +kuon gramred shutleheim,31 +kunikida (female),31 +kumei,31 +kumatangent,31 +kumakuma,31 +kuilaogouhuabudongle,31 +kugayama konoka,31 +kuga hajime (world lover),31 +kudou naka,31 +kudou aiko,31 +kuchinashi (not on shana),31 +kuchiki touko,31 +kubozuka pikurou,31 +kraken (splatoon),31 +kozimaki,31 +koyuki ekaki,31 +koyansuee,31 +koyama mai,31 +koyade,31 +koutake hiroyoshi,31 +kousoku kidou avatar drive,31 +kousaka honoka's mother,31 +kouichi09,31 +kou (kokounene),31 +kotobuki tsumugi (cosplay),31 +kotee,31 +koorizu,31 +koopalings,31 +konno natsume,31 +kon (inakon),31 +komugi (2212),31 +komiya ena,31 +kokonex,31 +koki (latte1023),31 +kokao358,31 +koiso tsukasa,31 +koi nobori,31 +koharu1807,31 +kodama (sakura yuki),31 +kobone awaki,31 +knee to face,31 +km9902226,31 +kl501,31 +kiwoseo meogneundas,31 +kiui (kogane),31 +kitsurubami,31 +kisshii (kic1224),31 +kishinami hakuno (female) (another ending),31 +kisero (kyuri tizu),31 +kisaragi koushi,31 +kisaki yuu,31 +kisaki souhei,31 +kisaki kanann,31 +kirze,31 +kiryuuin souichirou,31 +kiro (kirotayu),31 +kiris,31 +kinoshita ringo,31 +kino kuniya,31 +kingyo chuuihou!,31 +kimono dress,31 +kiiro kurumi,31 +kiefer,31 +khopesh,31 +kenja tori,31 +ken (kenta1922),31 +kelly 0w0,31 +kekai kotaki,31 +keipup,31 +keifuku-san,31 +keibleh,31 +keibi inu,31 +kei (k tmr),31 +kazuya0810,31 +kazuma kuvaru,31 +kazekiri,31 +kazami shirou (kaizou-zumi),31 +kayna (monster hunter),31 +kawazuishi,31 +kawasaki sakura (idoly pride),31 +kawamura kumi,31 +kawakami tomie,31 +kawai ritsu,31 +kawai ameri,31 +kathy (abubu),31 +katase aoko,31 +katagiri ikumi,31 +kasumi (shironeko project),31 +kasuga no tsubone (fate),31 +kasia mikolajczyk,31 +karekusa meronu,31 +karameru,31 +karakuri neko (tkfm),31 +kaoru (rena12345),31 +kaoru (mujin wakusei survive),31 +kanoe (tatsukanoe),31 +kannazuki nemu,31 +kannawa azusa,31 +kanata (kiduka),31 +kana (kwbr),31 +kamui (kurohshiro1),31 +kamu kotan,31 +kamizawa (sark),31 +kamisama minarai: himitsu no cocotama,31 +kamen rider brave,31 +kakuume,31 +kaku (one piece),31 +kakeru (fujiwara kakeru),31 +kaizuka,31 +kaibutsu-kun,31 +kaguya hime (onmyoji),31 +kageyama mari,31 +kagamigawa chloe,31 +kaeru (meriruou),31 +kackaorz1,31 +k-you (pixiv),31 +juugou taki,31 +junk doppel,31 +junik (snrj2255),31 +juliet (granblue fantasy),31 +joule (gunvolt),31 +josue pereira,31 +josephine-843,31 +jokarara,31 +joe (j studio),31 +jimmy09,31 +jill besson (vordandan),31 +jikai,31 +jiafei2o3,31 +jerky,31 +jelly fish,31 +japanese national police agency (emblem),31 +jan (janpx2012),31 +james (pokemon) (cosplay),31 +jade (dross),31 +jack sparrow,31 +j-dragon,31 +izumi kanaaki,31 +izanagi no ookami p.,31 +iz izhara,31 +itsumade (onmyoji),31 +itsukage,31 +itou chieri,31 +itodome,31 +ito (silva),31 +itkz (silentknight),31 +isozaki bebebe,31 +ise (azur lane),31 +isaraa 005,31 +iris freyja,31 +irasutoya,31 +ira megumi,31 +ippongi ryuuta,31 +inu-hariko,31 +intel,31 +inshitsu otaku ni ikareru imouto (kanojo),31 +insertsomthinawesome,31 +inora,31 +injuotoko,31 +ineuoy,31 +inapple,31 +inaki shinrou,31 +inae koron,31 +implied penetration,31 +implied anilingus,31 +imoman,31 +ikurikaito,31 +ikarimame,31 +iinikukuiita,31 +ihiro,31 +iguchi yumi,31 +igarashi sayaka,31 +ifnil,31 +idate,31 +ichinose tomoe,31 +ichinose mio,31 +ichinose honami (amakano),31 +ichijo kazuya,31 +ichigo seika,31 +ichigo-chan (yk),31 +ice cream sandwich,31 +ibuki douji (first ascension) (fate),31 +i-401 (aoki hagane no arpeggio),31 +hyouka (rat hut),31 +hyatt,31 +hya (ohyaarin),31 +hwoi (hw 123),31 +hungern (skullgirls),31 +human shield,31 +huckebein,31 +hualing,31 +hu tu,31 +how to draw an owl (meme),31 +houriigurei,31 +houman,31 +hoshikawa seira,31 +hosaka miyuki,31 +horon,31 +hoozuki warabe,31 +hong hongcha,31 +honami yuu,31 +holo (cosplay),31 +holding snowman,31 +holding planet,31 +holding javelin,31 +hokuto hyakuretsu ken,31 +hk416 (percussive bolero) (girls' frontline),31 +hizaki gamma (artist),31 +hiyokko ep,31 +hitsuki miyu,31 +hitokiri battousai,31 +hisiya (wldn1112),31 +hisaka (cookie),31 +hironegaika,31 +hiranara nirai,31 +hira (hinakomochi),31 +hinata mirun,31 +hinata himawari,31 +himoo yuina,31 +himexin,31 +himeno fumi,31 +himemiya niina,31 +hime kake,31 +himaruya hidekazu (style),31 +hikari no 4 senshi,31 +hikari123456,31 +hiiro 5-sai,31 +higami akabane,31 +hien rijin,31 +hideki,31 +hibiki (nilten),31 +hibana (vocaloid),31 +heyasamu,31 +heshiko disco,31 +hertz (tsuquart),31 +helicopter tail,31 +hazuki (hazu chaya),31 +haystack,31 +hayden mackenzie,31 +hayadai,31 +hatsune miku (shinkalion),31 +hatoro kuroyoshi,31 +hatoka ra5,31 +hasumi keito,31 +hashima renge (ryou@ryou),31 +hasha,31 +hasegawa taizou,31 +harusaki air,31 +haruno14,31 +haruno,31 +harumiya hiro,31 +harui (huruyi 817),31 +haru (maou-sama to kekkonshitai),31 +haro (artist),31 +harley davidson,31 +haoto (arnica leaf),31 +hands on shoulder,31 +hands in pants,31 +handkerchief biting,31 +hand drill,31 +hanchan,31 +hanasei,31 +hanako-san (gegege no kitarou),31 +hana x hana,31 +hana (den-o),31 +hamuta0404,31 +hamon ai,31 +halo infinite,31 +halo (artist),31 +hali,31 +hal-bard,31 +hakuro96,31 +hakkaku hailey,31 +haine koko,31 +hacka doll 0,31 +hachi (live union),31 +ha84no,31 +h&k mg4,31 +gym ghingnham,31 +guratan,31 +guozi li,31 +gundam gp-01 full vernian zephyranthes,31 +gumi (cosplay),31 +guihuo inferno,31 +gu (goodnight),31 +grimms echoes,31 +grim (grim adventures),31 +griffon mask,31 +gridman (character),31 +grey wolf,31 +gretel (grimm),31 +green gym girl (nagase haruhito),31 +great rune (elden ring),31 +grateful blue (idolmaster),31 +grandpa (shiromanta),31 +granatha eternal,31 +gram quartz,31 +grace o'connor,31 +goutokuji mike (cat),31 +goto kakushi,31 +gorujitai,31 +goro tame,31 +goombella,31 +gon (congchuabuoito),31 +gompang 11,31 +gompang,31 +gogo/tad,31 +gluteal fold peek,31 +gloom (irys),31 +glemy toto,31 +glastrier,31 +ginko (sekainoowari),31 +ghosty (xiaobai),31 +gezerun,31 +gentlu (precure),31 +genjitsu no yohane,31 +genieko,31 +gear second,31 +gear fifth,31 +gdhs,31 +gawain (granblue fantasy),31 +gatling raid,31 +garththedestroyer,31 +garigarigari,31 +gaon (hisuikairou),31 +gaogao (gaogaomegu),31 +gal gun double peace,31 +g li,31 +g-saviour gundam,31 +fuyu (utngrtn),31 +fuusen neko,31 +fuupo,31 +futaba yodoyume,31 +fusou (fuso0205),31 +fushimi asuha,31 +fushi,31 +furomaaju (fromage),31 +furai sen,31 +fumika bug (idolmaster),31 +fuko (fukkofuko),31 +fukkireta,31 +fuki to takenoko,31 +fukai ni nemuru oujo no abaddon,31 +fujiwara takumi,31 +fujiwara ryo,31 +fujiwara rika,31 +fujitsuki,31 +fujimoto kishino,31 +fujimna,31 +fujimiya momiji,31 +fubuki (muvluv),31 +frenlyse,31 +french clothes,31 +francis (left 4 dead),31 +four of clubs,31 +ford crown victoria,31 +force (r-type),31 +folding table,31 +fn scar 16,31 +flower fairy (osomatsu-kun),31 +floren (xenoblade),31 +flora (claymore),31 +flashlight beam,31 +firo (shiboritoranaide onna shounin-san),31 +finnish army,31 +film cartridge,31 +festenia muse,31 +fermion,31 +felicia (fire emblem) (cosplay),31 +feifei (fkey),31 +feather tails,31 +faye (front innocent),31 +fax machine,31 +fawn,31 +fate no keshin,31 +fast fashion octane,31 +fantia commission,31 +faith (sbi),31 +faefaea,31 +eyelid piercing,31 +ex-gear,31 +evemoina,31 +eve (rurudo),31 +eurica (ub1mo),31 +eula (genshin impact) (cosplay),31 +etoile rosenqueen,31 +eternity winner (elsword),31 +esuyukichin,31 +estcc,31 +eryngii yoko,31 +eruri (mgmn),31 +erul tron,31 +ershisi,31 +erinerin99,31 +erik burton,31 +erhuo,31 +equestrian,31 +enokimo me,31 +enjin (idolmaster),31 +engine blade,31 +enekk,31 +endo (makaron),31 +endless library,31 +enden,31 +emporio ivankov,31 +emiya shirou (cosplay),31 +emily (meago),31 +emi-tan,31 +emaason,31 +elysia watanabe,31 +ellis (agarest senki),31 +elise angel,31 +elinalise dragonroad,31 +eleking,31 +elcia harvence,31 +eit (ikeuchi),31 +eikichi (mujirushi shounen),31 +eigo ichii,31 +efi oladele,31 +edward elric's daughter,31 +edward (fire emblem),31 +edelweiss (senjou no valkyria),31 +eda (prsy3),31 +ecruteak city,31 +echidna (fire emblem),31 +e mishi,31 +dwarf (dq10),31 +dutou,31 +durga (housamo),31 +dungeon of regalias,31 +dskb,31 +dripping eye,31 +drawing equipment,31 +dragoon (girls' frontline),31 +dragon on head,31 +dragon maker,31 +doris (hololive),31 +dorei jackie,31 +dora the explorer,31 +dominoes,31 +dominico,31 +dolsig ilangnolja,31 +dolly (toaru kagaku no railgun),31 +doguu,31 +dodapan,31 +dobunomeme,31 +dishonored,31 +director's chair,31 +dio brando's pose (jojo),31 +digestion,31 +didi amen,31 +deyezi,31 +desutruction,31 +despina (queen's blade),31 +dermiss,31 +denbaa,31 +demupausagi,31 +death knight (fire emblem),31 +death (granblue fantasy),31 +deaimon,31 +dd tachibana,31 +dc24v,31 +daue,31 +date crush stories,31 +darou74,31 +darlton,31 +dark foreground,31 +dark.h,31 +dancer shantae,31 +damu ningen,31 +dama (akilay),31 +dakkoku jiro,31 +daiuchuu no hokori,31 +daishippai,31 +daifuku (pokefuka art),31 +dahadekan,31 +daeno,31 +cyobiro,31 +cygnus (maplestory),31 +cutting onions,31 +curly sue,31 +ct990413,31 +crossbone gundam x-1 full cloth,31 +cross of lorraine,31 +cromwell (tank),31 +crime city miss fortune,31 +crescent conundrum,31 +cresc-dol,31 +covering another's crotch,31 +countryball,31 +cosmo (bousoup),31 +corsair (final fantasy),31 +copper,31 +convenient hand,31 +conjaku,31 +comugico,31 +commander,31 +comiket 81,31 +collet,31 +code:realize,31 +coco (h221414),31 +coach (artist),31 +closed jacket,31 +clock tower (series),31 +cleveland (reaper fever) (azur lane),31 +clerk nagato,31 +clear (djmax),31 +classictime,31 +clammbon,31 +cirno (cookie),31 +circe (last origin),31 +cinna (ff9),31 +chucky,31 +christmas is cancelled,31 +chou chou egotistical,31 +choiark,31 +cho mo futoshi,31 +chloe (noir),31 +chiyokawa rin,31 +chiroru (7450n),31 +chirithy,31 +chiriri,31 +chippendales,31 +chimoon,31 +chikage (bloodborne),31 +chiitamu,31 +chief nuna (zcune),31 +chibi (nekomimimi),31 +chiba michinori,31 +chequita,31 +chenbo,31 +chen zhang,31 +cheerful candy (module),31 +chase (kamen rider drive),31 +chaoxi,31 +chaos ruler (granblue fantasy),31 +chandra nalaar,31 +champa (dragon ball),31 +chaji xiao bai,31 +chagama (tyagama0927),31 +cenm0,31 +cell1913,31 +celica (xenoblade x),31 +ced (fire emblem),31 +caules forvedge yggdmillennia,31 +catbii,31 +cat testicles,31 +castlevania: harmony of despair,31 +carren (granblue fantasy),31 +canute,31 +canopy (shelter),31 +candeloro's familiars,31 +canari,31 +campanella (vocaloid),31 +camouflage dress,31 +callmaichi,31 +cal ruslan,31 +c.q. cumber (splatoon),31 +byuey,31 +bwcloud,31 +bwanga,31 +bustafellows,31 +buru,31 +buriki one,31 +bunny ear headphones,31 +bulge to face,31 +bukiyou na senpai,31 +bugbug,31 +bubble slime,31 +bsmycc,31 +brulee,31 +breast clinging,31 +bradamante (welcome bunny) (fate),31 +brad evans,31 +box magazine,31 +bow babydoll,31 +bottle opener,31 +borubomu,31 +boku to kanojo ni furu yoru,31 +boat interior,31 +blue period,31 +blow,31 +blast-7,31 +blapan,31 +blanche fleur,31 +blair dame,31 +black torch,31 +black sheep,31 +black lobelia,31 +bit (kksbit),31 +birmingham (azur lane),31 +bilibala,31 +bi no ossan,31 +berserker (fate/zero) (cosplay),31 +beretta ar70,31 +berdly (deltarune),31 +benizika,31 +belinda (unlight),31 +beer hsk,31 +beat saber,31 +bcoca,31 +bbrangka,31 +barlunn,31 +barara peol,31 +bao (kof),31 +banysun,31 +bamme o3o,31 +bakugan new vestroia,31 +bahamut greed,31 +backstab,31 +babigonice,31 +ba ra ran,31 +b.sa (bbbs),31 +a~ un~,31 +azumada,31 +azuma yuuhi,31 +azuma tooru,31 +azuma (sospower73),31 +azuki miho,31 +azalie cait sith,31 +azai nagamasa (sengoku basara),31 +ayumi (830890),31 +ayu (ayuyu0924),31 +ayamy (vtuber),31 +awanqi,31 +awaken the power,31 +atlas (mega man),31 +atiti (ttttt945),31 +asuteka,31 +asura's wrath,31 +asuka asuka,31 +astral (yu-gi-oh!),31 +ashley (pokemon bw098),31 +ashimine arumi,31 +ashcape,31 +asatana,31 +asakura nuruma,31 +asaigai suzushi,31 +asa (y-asa),31 +asa (asa 410st),31 +arute arisu,31 +aruru no zaki,31 +aru aru,31 +arthurian legend,31 +art study,31 +arshtat falenas,31 +army-san,31 +armarouge,31 +armads (fire emblem),31 +arjend,31 +arisu shiria,31 +aristear remain,31 +arisie,31 +arisa (yuki touko),31 +ariane cevaille,31 +arcelle,31 +aral,31 +aragiken,31 +aquila favonia (genshin impact),31 +applechoc,31 +apple magician girl,31 +aose touka,31 +aoki clair,31 +aoi yugina,31 +aoi saki,31 +aogu,31 +aoba (azur lane),31 +anti-aqua,31 +annie berton,31 +ankoku broccoli,31 +animate object,31 +angie (meganekko-geki-love-1008),31 +angerykacchan,31 +angel blade punish,31 +andro juniarto,31 +andou saki,31 +anbivarens,31 +anastasia romanov,31 +amo takumi,31 +amnesia (majo no tabitabi),31 +amerika zarigani,31 +american flag skirt,31 +ame (ookami kodomo),31 +ambasa,31 +amanomori shou,31 +amagi xx,31 +amagai kosame,31 +amaemi-longing for you-,31 +aluo 7762,31 +alov,31 +allgreen,31 +alice (criminal girls),31 +alfi (tok),31 +albyee,31 +albatrosicks,31 +akr tmr,31 +akizuki (kancolle) (cosplay),31 +akiyama yuuji (naruko-tei),31 +akihare,31 +akichi mashiro,31 +akiba maid sensou,31 +akehime saki,31 +akazu kieshi,31 +akasaka4005,31 +akarino (yucco),31 +akane sawatari (chainsaw man),31 +akai katana,31 +akai ibara,31 +akagi (muse) (azur lane),31 +akabane kureha,31 +aira (dq7),31 +aimpoleume,31 +aihara kaichi,31 +aihara ai,31 +aiba (ai the somnium files),31 +aianchor,31 +ai drawing anime characters eating ramen (meme),31 +ai-chan (playstation),31 +aestivalis,31 +aek-971,31 +aegaeon (xenoblade),31 +acoco,31 +achiba,31 +ac/dc,31 +abuto,31 +abe yuichiro,31 +abbey (pui pui molcar),31 +aaku,31 +aaii,31 +a mituhashi,31 +a.j. (pokemon),31 +a-pose,31 +9 -nine- kokonotsu kokonoka kokonoiro,31 +99 yen (tsubura),31 +6tnut9,31 +6suan,31 +61cm triple torpedo mount,31 +5to rai,31 +5600cm,31 +447 records,31 +2001 a space odyssey,31 +10t,31 +108tooya,31 +01rosso,31 +009 re:cyborg,31 +.96 gal (splatoon),31 +zuchi00,30 +zinnadu,30 +zimu jiang,30 +zetsuhei no roran,30 +zerotwenty (020),30 +zentlardy alphabet,30 +zeno 1/128,30 +zelo6,30 +zed (trungbui42),30 +zealotus,30 +zaoanjisi,30 +z23 (serious idol @acting manager?!) (azur lane),30 +z1npool,30 +z.s.w.,30 +yuuyu (yuuyu015),30 +yuul b alwright (mythbreakers),30 +yuuki shougo,30 +yuuki sara,30 +yuuka (a0240765),30 +yuui1994,30 +yurix,30 +yuri7s0,30 +yunoji (makuswel),30 +yumekii,30 +yukkii,30 +yukisita mayu,30 +yukino ko (yukino shiratama),30 +yukimura touko,30 +yukimura seiichi,30 +yuki miku (2023),30 +yukamikayu,30 +yuguran (specs),30 +yudeika3,30 +yucca (yasabana),30 +yu (lovepayon),30 +yu'nyanko-chan (yu'nyanko),30 +ys seven,30 +yozi517,30 +you shugyouchuu,30 +yoshihiro-m,30 +yonoko k,30 +yoneda taishou,30 +yonago,30 +yield,30 +yellow bracelet,30 +yco 030601,30 +yazi114,30 +yawdeone,30 +yatufusa1,30 +yatsuo,30 +yasunoharu,30 +yasuda (fareast blade),30 +yanagi no ki,30 +yanagi haru,30 +yamato rinko,30 +yamato-no-kami yasusada (kiwame),30 +yamashiro yui,30 +yamado,30 +yamadaenako,30 +yamada rokkaku,30 +yamada naoko,30 +yamada-san wa tottemo baka nan desu,30 +yakou (innocent chapel),30 +yaguchiya tomoshi,30 +yagiwashi,30 +yagitome87,30 +yagate149,30 +xxxxakixxxx,30 +xishuu (user dvah3828),30 +xin (zinc),30 +xiaoxiao de kuangsan,30 +xiaomu (a414171448),30 +xian jian qi xia zhuan 7,30 +xia wanzi,30 +xerbatt,30 +xbox series x (personification),30 +wyx2,30 +wusebuhui,30 +wu ba pin,30 +woborou,30 +wo xuyao jiashui,30 +wizardess of oz,30 +wixoss diva(a)live,30 +witch (goblin slayer!),30 +windbard,30 +whitey,30 +white haired girl (munashi mujou),30 +wheeljack,30 +wet hat,30 +wep16night,30 +welts,30 +weltol,30 +watsuki nobuhiro,30 +water valkyrie (p&d),30 +wata0933,30 +wasanbon (atwazooka),30 +warped context,30 +warayanuko,30 +wanko (realsight),30 +wakao ruri,30 +wa--ka,30 +vulture,30 +voy@ger (idolmaster),30 +volume symbol,30 +volkan,30 +voldemort,30 +volcanion,30 +vivi-t (akagami),30 +vita clotilde,30 +viscum,30 +viridiana (girls und panzer),30 +vieny,30 +vertical-striped necktie,30 +valus (shadow of the colossus),30 +valentine (tank),30 +v buckle,30 +uuta (uturautouto),30 +uumenn,30 +uttao,30 +utsuwa0120,30 +uth 95,30 +uterus pose,30 +ushiromiya beatrice,30 +ushijima nozomi,30 +uruha rushia (cosplay),30 +uriel (alchemy stars),30 +uphir (megido72),30 +undine (seiken densetsu),30 +under the table (arknights),30 +under armour,30 +ump45 (the wish-making sorceress of fireworks) (girls' frontline),30 +umiushi (poyopacho),30 +umino tomo,30 +umino kiri,30 +ultraman taiga (series),30 +ultraman nexus (series),30 +ui (kirin),30 +ufoliving,30 +ude,30 +udan,30 +uchuu gorira,30 +tyun,30 +type 79 smg,30 +tying panties,30 +two-tone sleeves,30 +twintails girl (kamisimo 90),30 +twilight-g,30 +tutor,30 +turning around,30 +tsuzuku (hayamisyoto),30 +tsurugaya otsuu,30 +tsuru ringo,30 +tsuna (akmu8832),30 +tsukuyomi (ff14),30 +tsukumo nikyu,30 +tsukina (artist),30 +tsukimido,30 +tsujidou miyuri,30 +tsuhiki koyomi,30 +tsubudashi,30 +tsuboyarou,30 +tsubasam,30 +trolley problem,30 +triple baka (vocaloid),30 +trimbil,30 +transparent ribbon,30 +training drone (girls' frontline),30 +trainer wants to battle,30 +towor n,30 +towing,30 +towa (slow damage),30 +toushou daimos,30 +tousaki (tousakiworks),30 +tour guide,30 +touka (fukuoka katsumi),30 +toujou nozomi (cosplay),30 +toudou kohaku,30 +tou ilie,30 +torture dance,30 +toriumi arisu,30 +torakami14,30 +topiary,30 +toothache,30 +tomurasuisen,30 +tome (wolf),30 +tomcho,30 +tokumaru,30 +tokuda shinnosuke,30 +toh.,30 +todayama kyouko,30 +tobita,30 +to ze,30 +tndkworks,30 +tizibade,30 +tim drake,30 +tild - mage a louer,30 +tibetan clothes,30 +thunder badge,30 +through panties,30 +threaded cane,30 +thistle,30 +thigh bands,30 +theoto rikka,30 +the princess and the frog,30 +the distortion detective,30 +tetsutetsu tetsutetsu,30 +teperyndroors,30 +tenshi no shippo,30 +tenpyou no kitsune,30 +tenma maemi,30 +tenchi souzou,30 +ten year artist progress record,30 +temperance (tarot),30 +tempenite,30 +techwear,30 +te ru ya,30 +tcmk,30 +tbrsnsk,30 +tatedano kabae,30 +tasouken,30 +tare (hiden no tare),30 +tarariko,30 +tarai (yamadarairai),30 +tankobukids,30 +tanaka punie,30 +tanada-bon,30 +tanacris (third kureko),30 +tamani wakashi,30 +tamamura tamao,30 +tama gotoh,30 +talia gladys,30 +takayashiro yuzuka,30 +takatsuki nao,30 +takatsuki nanami,30 +takase (takase1214),30 +takasaki aneki,30 +takamine mion,30 +taiyo akari,30 +taisa (h68533631),30 +taira,30 +tail stand,30 +tadano53,30 +tadakuni's little sister,30 +tacticsalt,30 +t-bth,30 +syuurin,30 +sylvia sherwood,30 +syacyo,30 +sweetheart (omori),30 +sven (svenners),30 +suzuno kouya,30 +suzumura yuu,30 +suzuka (suzuka9111),30 +suwi,30 +surprise buttsex,30 +surgical scar,30 +supreme candy,30 +super chat,30 +sun stark,30 +sumosamo,30 +sumiobunnya,30 +sumikaze midi,30 +sumibiya yasain,30 +sumadera yukio,30 +sukumizu tabehoudai,30 +sugiya manao,30 +suechiee,30 +sudale,30 +strawberry pop moon (idolmaster),30 +stolas (megido72),30 +steven armstrong,30 +stele,30 +steelhead (splatoon),30 +stashia,30 +st06,30 +srx,30 +srb7606,30 +spooki,30 +spiritia rosenberg,30 +spider-woman,30 +spica (yukataro),30 +spica1476,30 +sparrow (xici9527),30 +soushin souma,30 +souseiseki (cosplay),30 +soukoku,30 +sou ryuu,30 +sost fgo,30 +sorey (kamui) (tales),30 +soojie roh,30 +sonoo koo,30 +sonobe shino,30 +sono chieri,30 +sonic forces,30 +sonic cd,30 +songjo,30 +somjeu,30 +someoka yusura,30 +solo vivace (hitokuirou),30 +sollyz (sollyz),30 +solace,30 +soar,30 +smskt 25,30 +smelly penis,30 +smartbsm,30 +slow loris,30 +sleepysolaire,30 +skyrider,30 +skrelp,30 +sk jynx,30 +six shame faces,30 +sister nanashi,30 +sionsionsion,30 +silva (gentian blue) (granblue fantasy),30 +siki 222,30 +sihai (wsskdywe),30 +siege (legacy) (arknights),30 +sicile gloria,30 +shuzoku fumei,30 +shuriken sentai ninninger,30 +shura (fire emblem),30 +shuiro,30 +shuangbatian,30 +shu zo (show by rock!!),30 +shouting with hands,30 +shouta (shbz),30 +shoushu,30 +shizuoka mirei,30 +shizume genma,30 +shivaharu,30 +shishou (tensei shitara ken deshita),30 +shishamoji,30 +shirotake jinan,30 +shirogane kihen,30 +shiro wa (shiroshironix),30 +shiro hebi rei,30 +shiro amada,30 +shiro (kida kuro mu),30 +shirataki nako,30 +shiraho (color-56),30 +shiori (magical girl) (princess connect!),30 +shio poteto,30 +shio (s alt shio),30 +shinsei inazuma japan,30 +shinori (efvbji6264),30 +shinonome nemu (nemulog sn),30 +shinko gunsei,30 +shingyouji mao,30 +shin (shincrz),30 +shimura shinpachi (cosplay),30 +shimogamo yajirou,30 +shimizu kyouko,30 +shimijimi,30 +shimejirou,30 +shima juuzou,30 +shima108,30 +shiliuye feiyu,30 +shikkoku (border of season),30 +shikimori ibuki,30 +shiina rei,30 +shigino hayato,30 +shigetashigezo,30 +shiba meiji,30 +sherylnome,30 +sheep (minecraft),30 +shaving armpits,30 +shania,30 +shanghai,30 +shakugan no shana-tan,30 +shadow yukiko,30 +shadow man (mega man),30 +seymour,30 +setsuna (needless),30 +sesshouin kiara (cosplay),30 +servachok,30 +serri glaus,30 +serious sam,30 +seria kirmin,30 +sennotane,30 +senguyen1011,30 +senbei (roof-lock),30 +sena (ichiroku),30 +semiramis no tenbin,30 +semimarusemi,30 +selina,30 +selena (fire emblem: the sacred stones),30 +seizon senryaku,30 +seiju natsumegu,30 +segawa haruna,30 +secretarybird (kemono friends),30 +sechka,30 +seashell print,30 +sd-sos,30 +sculpting,30 +scooby-doo (character),30 +scan dust,30 +sazame,30 +sawatarou (roto),30 +sawana,30 +sawada mio,30 +saw blade,30 +savage babe,30 +saturn devouring his son,30 +satsuki (kancolle) (cosplay),30 +satoshi igarashi,30 +sasuke (ninin ga shinobuden),30 +sasaki miyuki (kaedeko),30 +sarutahiko (housamo),30 +saratoga (scharn),30 +santafe99,30 +sansetsukon no lily,30 +sangoku hime 4,30 +samia of the shifting sands,30 +samael (5211),30 +salmon (657931354),30 +salada,30 +sakurazuka miki,30 +sakurami kyouko,30 +sakurahuji-iro,30 +sakuragi chisa,30 +sakura spirit,30 +sakura machizaka stories,30 +sakura hazuki,30 +saku2621,30 +sakasagami no yura,30 +sajima yumi,30 +saitou kon,30 +saisho no nakama,30 +saint uvuv,30 +saimin enbu,30 +saikin osen - bacterial contamination - (vocaloid),30 +saijou hirokazu,30 +sahare,30 +saberstaff,30 +sa-x,30 +s4 league,30 +s.a.t.8 (pumpkin skewers) (girls' frontline),30 +ryuujou mashiro,30 +ryuu ga gotoku 3,30 +ryuu (ryuraconis),30 +ryotarou (seyana),30 +ryofuhiko,30 +ruu wan mm,30 +runmo77,30 +ruki (senyuu),30 +rukawa sara,30 +rukawa kaede,30 +rua,30 +ru-pe (gstm 0915),30 +rqtkrqtk,30 +rpk,30 +rozuberry,30 +royal air force,30 +roy (arknights),30 +routemoc,30 +roundhouse kick,30 +rosie99,30 +rollingstonex6,30 +rokujou jun,30 +rody soul,30 +roberta (summer flowers) (arknights),30 +road to dragons,30 +ritz10 (benben),30 +ritsuko (rittii),30 +rishi (kurou),30 +riqurr,30 +rio (otosuki),30 +ringosutta,30 +riku (lingsky),30 +riku (kino no tabi),30 +rikito1087,30 +rikamello,30 +rika (agent aika),30 +rije (ikeuchi tanuma),30 +riesun,30 +rideback,30 +ricca,30 +ribbed jacket,30 +reverse falls (gravity falls),30 +resplendent quetzal (kemono friends),30 +resi,30 +replica,30 +renroujiang,30 +renee shika egakan,30 +rena geminis,30 +ren zhafan paijizu xitong,30 +reitsuki kazuma,30 +reijou kanritou ~seifuku shoujo-tachi no sakunyuu reizokuki~,30 +refla,30 +red hood (kawaguchi),30 +ray (nagaseray),30 +ravi (epic seven),30 +ratti (shepherd0821),30 +ratchet & clank,30 +raptor,30 +raochuu,30 +ranpo kitan,30 +range blaster (splatoon),30 +ramune02,30 +rain wzq,30 +raicchi (raicchi96),30 +raenoreto,30 +radio exercises,30 +raccoon (trickster),30 +raccoon (potesara),30 +rabimea (ichiri),30 +qi kou,30 +pzb39 (girls' frontline),30 +pygmalion,30 +pvmivs,30 +puuko (iberiko neko),30 +putting on condom,30 +punishment game,30 +pun-rii,30 +project (league of legends),30 +prinz eugen1938,30 +prince kanata,30 +prima doll (anime),30 +predict,30 +preceptor seluvis,30 +pq (lamune),30 +ppk (foliage romance) (girls' frontline),30 +power level,30 +potepote,30 +poruneu,30 +pora 0918,30 +popo agaga,30 +ponsuke kaikai,30 +poncho (poncho),30 +pomimiko,30 +polka dot border,30 +pocahontas (disney),30 +plus1024,30 +player (god eater 2),30 +platinum disco,30 +plar0846,30 +pla0658,30 +piyopiyo (pphiyo),30 +piyo (p i y o),30 +pinkdrawz,30 +piiroinardesco,30 +pig man,30 +pictoria,30 +physis,30 +phoebus art,30 +pham thi ran,30 +peter strasser (a moment frozen in pure white) (azur lane),30 +peta (taleslove596),30 +pesu,30 +perorisu,30 +peroppafu,30 +periscope,30 +pepsi japan cola challenge,30 +pennsylvania (azur lane),30 +penis to navel,30 +penis gag,30 +penguin hair ornament,30 +peneko,30 +pekerika,30 +paw up,30 +pause,30 +patxi (fate),30 +passimo,30 +pasheri,30 +parupome,30 +parasol kirby,30 +parallel akiba gakuen,30 +papiko (sogebusaretai),30 +panmijin99,30 +palm (mushihime-sama),30 +pallas (heritage) (arknights),30 +paizuri while penetrated,30 +pai daxing,30 +pachyphytum,30 +p.k.f,30 +p-chan (suite precure),30 +oyume,30 +oyanaku,30 +ottilie kittel,30 +otsutama takashi,30 +otsukaresanpo,30 +otonaru,30 +otachi,30 +ot (dog wasd),30 +osanzi,30 +orguss (mecha),30 +orga (orgacl),30 +oreha00701,30 +orbited,30 +orange bandeau,30 +oozora halu,30 +ootsuki mina,30 +oosaka rei,30 +oohashi maiko,30 +onomichi (city),30 +onofre wayne,30 +onodera punpun,30 +onmitsu doushin a,30 +only norisu,30 +only human,30 +onikiri,30 +onee-san (penguin highway),30 +one hundred scenes of jiangnan,30 +onasu (sawagani),30 +onakon 3-nichi-me,30 +omoti (1201208),30 +omokage ~ecchi na happening!? nandemo dontokoi!~,30 +omisoshiru,30 +omega-xis (mega man),30 +ome (mercury cigarette),30 +olivia (matilda fiship),30 +oliverror,30 +okomeito,30 +oiessw,30 +ogasawara hikari,30 +office lady (eu03),30 +odst,30 +odeko yma,30 +ochau,30 +object (vehicle),30 +oberon826,30 +o3tofu,30 +nyxview,30 +nyuusankin,30 +nyako (hitsuzineko3),30 +nutella,30 +nun (marchen),30 +number hair ornament,30 +nufucha,30 +nuekane,30 +nude beach,30 +nouhime (sengoku musou),30 +not nodu,30 +nosh,30 +norun (ru-on),30 +norte,30 +noromi,30 +nono (norabi),30 +nonh (wormoftank),30 +nonbei,30 +nomura (higurashi),30 +nokita (pinmisil),30 +nokia,30 +nodo sn,30 +no1shyv,30 +nmknf (mkn),30 +niuwajun,30 +niufog,30 +nishimikado tami,30 +nishikimaru,30 +nire nanaki,30 +ninjatic,30 +nina (tiny evil),30 +niksoncrazy,30 +nijita,30 +nihohe,30 +nicolas brown,30 +nick wilde (cosplay),30 +ni yuu,30 +nevan,30 +ness (mother 2) (cosplay),30 +nerimono (nekokoban22),30 +nemesis (tower of fantasy),30 +nekotamago,30 +nekopantsu (xxxxxxxxx0621),30 +nekonyan (nekoworld),30 +nekomiya yoshiko,30 +nekomancer (granblue fantasy) (cosplay),30 +nekoha gc,30 +nekogasuki,30 +neho-kun,30 +nay akane,30 +nax,30 +nawo (peace),30 +natuki miz,30 +nattun nattun,30 +natsuka qya,30 +natsu (rodysanp),30 +nasunasuurin,30 +narue no sekai,30 +naoko (artist),30 +nao salad,30 +nanoder,30 +nanigashi yakko,30 +nanase shie,30 +nanase kaoru,30 +nanase09rr,30 +nan (tales),30 +namu76,30 +namisi,30 +namazuo toushirou (kiwame),30 +nalai,30 +nakano (sewayaki kitsune no senko-san),30 +nakanishi toshimi,30 +nakamura (ryou),30 +nakajima atsushi (bungou stray dogs),30 +naka ushiro,30 +nail hair ornament,30 +nai (cnoadi8),30 +naguru (cyoroama),30 +naglfar,30 +nagitaro,30 +nagisa kaworu (cosplay),30 +nagihito,30 +nagidori,30 +nagi kanami,30 +nabunabu,30 +na ta53,30 +n2co,30 +mysterious heroine xx (second ascension) (fate),30 +muumin,30 +mutsumi (utawarerumono),30 +mushinosuke,30 +musashino udon,30 +murasaki (deceive138),30 +murabito sono2,30 +muni (fdrk),30 +mumu-crown,30 +multicolored pajamas,30 +muleta,30 +mujuuryoku kantai,30 +mugino kouji,30 +mugen0017,30 +mudamoro,30 +ms (momose),30 +motsupu,30 +motsu (sararia),30 +moto gp,30 +morros,30 +morph ball,30 +moritomo nozomi,30 +morisaki jiro,30 +moricky,30 +moriah saga,30 +mori (gj-bu),30 +moonshen timojim,30 +moonlight (base),30 +monu,30 +monty python's flying circus,30 +monta,30 +monsterhentai,30 +monme (yuureidoushi (yuurei6214)),30 +monk (syugyousou),30 +momoka (abc momoka0718),30 +momingie,30 +momiji (makuran),30 +moko (alice),30 +mojibake text,30 +moiko,30 +moikaloop,30 +mohomen,30 +model z (mega man),30 +mockingeu,30 +mochida64,30 +mobu yuri,30 +mndqn,30 +mmco,30 +mizuse kotone,30 +mizuongaku,30 +mizukami satoshi,30 +mizuhi kou,30 +mizota (rovel),30 +miyoshi (m-mallow),30 +miyauchi yuusuke,30 +miyakawa hikage,30 +miyajima (anthem-anthem),30 +miyaji,30 +miya osamu,30 +miwa maku,30 +miwa hitomi,30 +miusa,30 +mitsuru (mitsu5),30 +mitsuki3s kir,30 +mitsui mana,30 +mito tomiko,30 +mitake miyao,30 +missouri (pacific),30 +mishima reika,30 +misaq,30 +misa (929382672),30 +miranda (quiz magic academy),30 +minobey,30 +minisuka to niku daigaku,30 +minior (yellow core),30 +minikama,30 +mine fukaki se ni tayutau uta,30 +minato kazumi,30 +minato aqua (cosplay),30 +mimi (ranma3049),30 +mimi (mimi3mimimi),30 +mimamori,30 +millie (paper man),30 +millennium eye,30 +milky (ishuzoku reviewers),30 +milkuriem,30 +milk (artist),30 +mil (siratamamil),30 +miko no kamiko,30 +miko+resurrection,30 +mikii,30 +mikihiro,30 +miki (viper),30 +mikanagi yuri,30 +mihamimo,30 +midou masato,30 +midomido,30 +midoku (itijikusakura),30 +michishita masaki,30 +michelle k. davis,30 +michel d'alembert,30 +micaiah chevelle,30 +mew ichigo (cosplay),30 +mescaline,30 +mesa,30 +mervia siebel,30 +merrick,30 +mephisto (suite precure),30 +meowing,30 +mentaishi,30 +memoi,30 +memi (gamemix),30 +memento mori (m.m),30 +meldine (g (genesis1556)),30 +meimei (cb mei),30 +meikou gijuku,30 +meiko (puua),30 +meia gisborn,30 +mega man 10,30 +mechanical gloves,30 +mdr (mdrmdr1003),30 +mayoichi,30 +mayan (macross),30 +maus (ash arms),30 +mau sakura,30 +matt cummings,30 +matsushima koyuki,30 +matoi isshin,30 +matetsu (nakayoshi sanfujinka),30 +matenshi (touhou),30 +matching ring,30 +masturbation day,30 +masked lady (deemo),30 +mask alice,30 +mashiro yuki,30 +mary read (swimsuit archer) (first ascension) (fate),30 +maruyama kurehiro,30 +marumarukin,30 +mars foong,30 +marino (oyasumi),30 +marine (46586031),30 +marikouji kaede,30 +marika (marie & gali),30 +marie & gali,30 +maria onnette,30 +marche radiuju,30 +maquia,30 +maple (abc2215),30 +maou toubatsu shita ato medachitakunai node guild master ni natta,30 +maou gakuin no futekigousha,30 +manfred von karma,30 +mandala,30 +mamiya otaru,30 +malay text,30 +maki makishima (makimaki),30 +maki (eru),30 +maij,30 +maid fairy tale (idolmaster),30 +mahoroba youjo kitan,30 +maebari teikoku no gyakushuu,30 +madao,30 +maccha (jam513),30 +macaron (ameto yuki),30 +mabu (dorisuto),30 +ma-ko hunter,30 +m.i.y,30 +lyner barsett,30 +luka redgrave,30 +lucky channel,30 +luchs,30 +lucferz,30 +lowah,30 +lovey (pride of eden),30 +louise francoise le blanc de la valliere (cosplay),30 +louis (beastars),30 +lou roux,30 +lotus (brand),30 +lost,30 +lobster claw,30 +live on cardliver kakeru,30 +littleamber,30 +little renown (azur lane),30 +lipstick mark on testicles,30 +linse shileska,30 +line4x,30 +lind,30 +liliraune (monster girl encyclopedia),30 +lilia (madoka magica),30 +lightning ahoge,30 +lieyan huangzi,30 +lextodrawstuff,30 +leung ka che,30 +lesser dog,30 +leopard (sorakake),30 +lendivere,30 +leliel,30 +legretta (tales),30 +leg behind shoulder,30 +leather footwear,30 +leafy (kitsuneya),30 +leaf (pokemon) (cosplay),30 +leaf (black souls),30 +laura (houtengeki),30 +latex bikini,30 +landel,30 +landacdeus,30 +lamborghini aventador,30 +kyykttk,30 +kyu kyu kyu nyaa,30 +kyouta (a01891226),30 +kyoudou maya,30 +kwakah,30 +kuwabara (medetaya),30 +kuuga (mighty),30 +kusunoki yua,30 +kuryuu megumi,30 +kuroyoshi,30 +kurona reole,30 +kuroma (no plan),30 +kurokku,30 +kuroki masahiro,30 +kuroge (kewagyuu),30 +kurogane tekki,30 +kuroama,30 +kuon kimi,30 +kumegawa botan,30 +kumahubuki,30 +kujikawa rise (cosplay),30 +kudou asami,30 +kubota hina,30 +krohnen,30 +krace,30 +koyasu takehito,30 +koyashi24,30 +kouzuki sanae,30 +kousaka makoto,30 +kousaka honoka (cosplay),30 +kouhiipan,30 +kouenji (crispina),30 +kotone (tateha),30 +kotomaru,30 +kotegawa chisa,30 +kotatsumuri,30 +kotama (blue archive),30 +korotan,30 +koromia,30 +koowa,30 +konohanasakuya-hime,30 +konoha2014,30 +konoe nanami,30 +konno yuuki (sao),30 +konbu (hida123),30 +koma saburou,30 +kokonattsu,30 +kojiri,30 +kojima kana,30 +koisuru ojou-sama wa papa to yobitakunai,30 +koga yuika,30 +kodona,30 +kobold (monster girl encyclopedia),30 +kobayashi yoshio,30 +ko-ma,30 +kleken (alchemy stars),30 +kkuwa,30 +kkix25,30 +kkeiixxx,30 +kizaki aoi,30 +kiyovero,30 +kiyota yoshinori,30 +kiyokazu,30 +kiukoma,30 +kitiku,30 +kitamura motoyasu,30 +kiso fumihiro,30 +kishimoto maria mirai,30 +kishi kaisei,30 +kisama (0213),30 +kirisame0729,30 +kirino (blue archive),30 +kirika (peach momozen),30 +kirby's dream land 2,30 +kioroshin,30 +kinoto (ruindivinity),30 +kino ayuri,30 +kinjo no hito no nakimushi,30 +kimyouna juju,30 +kimidorin,30 +kimi to tsunagaru koi flag,30 +kim (mathias leth),30 +kiitos12,30 +kiitos,30 +kgeroua,30 +kero (tomoki-tiger),30 +kenshjn park,30 +kenmaster17,30 +kenja no mago,30 +kemu inu,30 +keith8387,30 +keishi,30 +keima ron,30 +kbisuco,30 +kazakami shun,30 +kaza,30 +kawaii onna no ko ni kouryaku sareru no wa suki desu ka?,30 +kawahara fantasia,30 +katsura yukiji,30 +katsu aki,30 +katou hiromi,30 +kat (warioware),30 +kasumisometsuki,30 +kassadin,30 +kashima fumi,30 +kashi-k,30 +karen ngao,30 +kara-age kun,30 +kappa (arakawa),30 +kaori (super real mahjong),30 +kannazuki yukito,30 +kankitukou,30 +kanitumuri,30 +kaneshiro sora,30 +kanechitomoyo,30 +kanden,30 +kanazawa hiromu,30 +kana (ky4696),30 +kaminendo,30 +kamiigusa misaki,30 +kamidan,30 +kamen rider kiva (emperor form),30 +kamen rider joker,30 +kamen rider jeanne,30 +kamen america,30 +kamatpit,30 +kamari (kama ri),30 +kaku seiga (cosplay),30 +kakikukeko,30 +kakaricho dairi,30 +kaka cheung,30 +kajiki nora,30 +kaizoku ookoku koronbusu,30 +kaitori oukoku,30 +kaiso (kaisooekaki),30 +kaiseki,30 +kaisar lidfald,30 +kairi (fotune),30 +kaije7,30 +kaii shoukougun 1,30 +kaibara elena (cookie),30 +kago (lelien7),30 +kagehi no loo,30 +kageco,30 +kageakira (shino1007),30 +kaga (battleship),30 +kaeru (ka=l),30 +kaede (bakumatsu rouman),30 +kabiyapyo,30 +k sen212,30 +k harris,30 +k 016002,30 +k3nzoteruta,30 +k12io,30 +k.s.miracle (umamusume),30 +jyaco7777,30 +juvisy,30 +juuken sentai gekiranger,30 +juse nozomu,30 +junjou karen freaks! -freaks romanticism-,30 +juniper's knot,30 +judge eyes,30 +juan (pokemon),30 +jqhnharqld,30 +jon snow,30 +joanna (mojo!),30 +jitsukawa ashi,30 +jisuart,30 +jirofly,30 +jessie gurtland,30 +jay (shining star),30 +japan airlines,30 +janku daruku,30 +janice (chrono cross),30 +jango-joe,30 +jane (girls und panzer),30 +james choo,30 +jakojakojako,30 +jaguarman series,30 +jacqli (artist),30 +jackie chan,30 +jacker,30 +izumo saki,30 +izabel (madoka magica),30 +iyoda mato,30 +iy tujiki,30 +ixen-fei,30 +iwai munehisa,30 +iver (reviolet),30 +ivar (tales),30 +itsukushima shrine,30 +itsudzumi,30 +itouei,30 +itou kazuki,30 +itou izumo,30 +isou doubaku,30 +isomer (girls' frontline),30 +ishikane aoi,30 +ishida hiroyasu,30 +iseria queen,30 +isagi yoichi,30 +isaac hein iii,30 +irima (doron),30 +irem,30 +irako mikan,30 +inuwi (yokose y),30 +inunoya,30 +inugami (gugukoku),30 +inu wi,30 +intertwined fate,30 +internet positif,30 +inou eita,30 +innocent girl,30 +innocent bullet,30 +indigo plateau,30 +ind (121),30 +inawa akito,30 +inagaki miiko,30 +inagaki,30 +impossible shorts,30 +imori (46296895),30 +ima soko ni iru boku,30 +illuminati,30 +ikuwataru nagomi,30 +ikkyuu tensai,30 +iichan.ru,30 +idle antics,30 +icetiina,30 +ice witch lupina,30 +ice cream cone on head,30 +ibuki mio,30 +ibuki mana,30 +hyara,30 +husui parashi,30 +hurybone,30 +huracan (vivid strike!),30 +hunter (little red riding hood),30 +hunter (azur lane),30 +humar,30 +human (lineage 2),30 +hujitaumi,30 +hujimogeo,30 +hs.50 (girls' frontline),30 +hoyo,30 +houshi,30 +hoshina hoshimi,30 +hosato mikine,30 +hooded jumpsuit,30 +honoka (ranukirai),30 +hongou kazuto,30 +honey bee (bancoth),30 +honami mikura (amino dopple),30 +homura kogetsu,30 +holykoshi,30 +holding shaker,30 +holding drawing tablet,30 +hokuyuu,30 +hokage,30 +hms conqueror (siirakannu),30 +hiyamaru,30 +hitaishou chiritori,30 +hiroshi (ao oni),30 +hirayama-h,30 +hinomoto akari,30 +hinatsu,30 +hinata (hinata123),30 +hina logi - from luck & logic,30 +hina asuka,30 +himeneko,30 +himekaidou hatate (cosplay),30 +himari (blue archive),30 +hild (aa megami-sama),30 +hiki yuichi,30 +hiiragi yuzu (cosplay),30 +hiiragi akio,30 +hidechuu,30 +hibarigaoka ruri,30 +hermes (azur lane),30 +helvetian military uniform,30 +hello hoshi wo kazoete,30 +hellcherr,30 +hell princess,30 +hekeniko,30 +heita0524,30 +heavy ball,30 +heat hawk,30 +heart challenger,30 +healer (7th dragon),30 +head on back,30 +haydee (game),30 +hayate (higan sakura),30 +hayakawa mayumi,30 +hayagiri,30 +hawk (cwmg),30 +hattori (one piece),30 +hatoya kohane,30 +hati,30 +hataraku otona no ren'ai jijou 2,30 +hasegawa urumi,30 +harumiya (meron89893),30 +harukaze soyogu,30 +haruka nana,30 +haru no hito,30 +harsh-mallowz,30 +harley (pokemon),30 +hare (aeex5727),30 +hard,30 +harakawa ken'ichi,30 +hanuma hanma,30 +hanon (heartofsword),30 +hanged man,30 +hang gliding,30 +hand under dress,30 +hanami mariya,30 +hanakoganei hibari,30 +hanabasami kyou,30 +hamada mari,30 +hakusyokuto,30 +haks,30 +hair salon,30 +hai,30 +hagino kana,30 +hagano ichigo,30 +haduki tohru,30 +hades1580,30 +haarmades,30 +haamon (harmon),30 +h&k sl8,30 +gyakushuu no hoshiumi,30 +guunome,30 +gus porter,30 +gunvolt chronicles luminous avenger ix,30 +gundam combat,30 +guitar little sister (hitomi o) (cosplay),30 +guhua67,30 +gugalanna,30 +guard (girls' frontline),30 +gtgt nishiteyaru,30 +ground gm,30 +green leggings,30 +green cat,30 +green babydoll,30 +greater honeyguide (kemono friends),30 +grease,30 +grayllust,30 +gravy,30 +grausummon,30 +graffiti tracer,30 +gouda takeo,30 +gouda hiroaki,30 +gou haihaihaihai,30 +good breasts day,30 +gonzz (gon2rix),30 +gokurakuin miito,30 +gogochi,30 +gogo,30 +gogg,30 +goeniko,30 +gobots,30 +go (mumuke),30 +glove cuffs,30 +gloomy (leonzo030),30 +glasslip,30 +giru (dragon ball),30 +gigi (whoopsatro),30 +gigantamax eevee,30 +ghound,30 +ghost (starcraft),30 +getter-3,30 +gen (street fighter),30 +gemini paradox,30 +geiyajin,30 +gd. fengzi,30 +garuru (pripara),30 +garo,30 +gareki (pandanokami),30 +gao (kohozuna),30 +gantai- (gxntai),30 +gamma 2,30 +gamma (ogino jun),30 +galo (warship girls r),30 +gakudayo,30 +gakkou no seiiki,30 +gabuccc,30 +fymrie,30 +fuzuki yoshihiro,30 +fuujin (ff8),30 +futakoi alternative,30 +furumero,30 +furawast,30 +fuoco,30 +fumizuki kou,30 +fullta (ikemeru19),30 +fukushima masaru,30 +fuka (mizno fuka),30 +fujiwara no shirogane no sanra,30 +fujita hiroyuki,30 +fujino kiyoshi,30 +fujimi nemu,30 +frontera,30 +frogcage,30 +frilled sweater,30 +french toast,30 +fool iketani,30 +fondolger,30 +folding stock,30 +fm77 (artist),30 +flytrapxx,30 +flower hat,30 +flower collar,30 +florbetriz,30 +flatwoods monster,30 +flaming arrow,30 +fischl (genshin impact) (cosplay),30 +fiona fox,30 +finger counting,30 +fine art,30 +filament (ar tonelico),30 +fiery clothing,30 +fiend (juniper's knot),30 +fi (atelier),30 +fhilippedu,30 +fenixman12,30 +feng mao mc,30 +feihong,30 +fee (fire emblem),30 +fatima (luminous arc),30 +famicom cartridge,30 +falcon (girls' frontline),30 +fairy (breath of fire),30 +facejob,30 +facedesk,30 +f-rhine,30 +extra horns,30 +ex hien,30 +eurika (falkyrie no monshou),30 +eurasian lynx (kemono friends),30 +eunnieboo,30 +eula (pizza hut) (genshin impact),30 +eugene chaud (mega man),30 +eroppu,30 +erimiyaman,30 +ericsakura,30 +ereshkigal (fate) (cosplay),30 +ephtracy,30 +enseisong,30 +ennishi,30 +enma (mythology),30 +emuson,30 +emodelas,30 +emo fashion,30 +emma (dq11),30 +embroidered garter belt,30 +elven founder (ishiyumi),30 +elise (koakuma teeri to kyuuseishu!?),30 +eisenwane (sword girls),30 +einherjar azena,30 +egypt (hetalia),30 +eel girl,30 +edward elric (cosplay),30 +edward confronts shou tucker (scene),30 +edgar valtinas,30 +ecchuu fundoshi,30 +ecchi na bunny-san wa kirai? 2,30 +eburi a,30 +ebola-chan,30 +ebiura akane,30 +ebisu senri,30 +earmuffs removed,30 +dual squelcher (splatoon),30 +dryseaweed,30 +drum (acta2436),30 +drow ranger (dota),30 +dream eater,30 +dragreder,30 +dragon tail steak,30 +dragk,30 +dr. gero (staedtler 0508),30 +douzen,30 +doshiko,30 +dorara9002,30 +donnel (fire emblem),30 +dolls order,30 +dolcexxx,30 +dog girl (doitsuken),30 +dodonpachi daifukkatsu,30 +doctor masube,30 +doctor (arknights) (cosplay),30 +dioreta (asicah),30 +dilaih,30 +digimon world -next 0rder-,30 +dig dug,30 +dieselmine,30 +dew-spiegel,30 +desyana laurentia,30 +destroyer (7th dragon 2020),30 +den dengaku,30 +demon core,30 +deku,30 +defense of the ancients,30 +defaultz,30 +deer skull,30 +deception,30 +deadbeat (calliope mori),30 +dazzle (shinkonryu),30 +dark mint,30 +daria (haguruma c),30 +daniaeru,30 +danchino,30 +dameyo,30 +daika (last origin),30 +daico,30 +dai toro,30 +daemonette,30 +cyde,30 +cyberspace,30 +cuvelia,30 +curry udon,30 +cure rhythm (cosplay),30 +cure lovely (cherry flamenco),30 +cryska (rune ocarina),30 +crys (dai),30 +crown of insight,30 +crossbow bolts,30 +cristina valenzuela,30 +creature on lap,30 +crayonchewer,30 +crash fever,30 +corroserum (arknights),30 +coronation,30 +cornelia (girl cafe gun),30 +cordelia (bridal) (fire emblem),30 +cor369,30 +coolisushi,30 +condom balloon,30 +commander (forever 7th capital),30 +cody's art,30 +code009,30 +coda (ankoprpr3700),30 +coco mercury,30 +coco (r5m),30 +cloud horizon,30 +class no gyaru ga nazeka ore no gimai to nakayoku natta.,30 +clarisse (soleil blanc) (granblue fantasy),30 +clariskrays,30 +claptrack,30 +cio hakatano,30 +chuukarudoruhu,30 +churayuki,30 +chubb,30 +chu chu (shoujo kakumei utena),30 +christoph aurel arland,30 +chitose mame,30 +chinese bellflower,30 +chinchira,30 +china railway girl,30 +chilakkk,30 +chiem (vtuber),30 +chie's mom (ishikei),30 +chidori kou,30 +chichikoucha,30 +chi4,30 +chevrolet corvette,30 +cherre,30 +cherinova,30 +checkered swimsuit,30 +charlott camile herlingum,30 +char (angel beats!),30 +chanifge,30 +chamochi,30 +chajott64,30 +chae na-ra,30 +ccllsaikou,30 +cayna (leadale no daichi nite),30 +cave spider,30 +cathy idx,30 +castle-3 (arknights),30 +carlos marlon,30 +card crusher,30 +captcha,30 +captain (hellsing),30 +canae0,30 +camp buddy,30 +camp,30 +camilla regina,30 +calcite (arknights),30 +cagnazzo,30 +cacn,30 +cable (marvel),30 +buruxugi,30 +bunny paws,30 +bunker,30 +bulldog,30 +bucephalus.tvt,30 +bubble (arknights),30 +bteele,30 +bruxish,30 +brown bandana,30 +broken shield,30 +breast implants,30 +brand (league of legends),30 +boy (pixiv17542438),30 +boushi (nandemo),30 +borscht (food),30 +border collie,30 +booby trap,30 +bondo,30 +bokujoukun,30 +boku no edamame,30 +bodies,30 +bobble-chan (splatoon),30 +blue hair-chan (ramchi),30 +blue dragon ral omega grad,30 +blue (nanostar),30 +bloom (irys),30 +bloody crow of cainhurst,30 +blitz (rainbow six siege),30 +blackcliff slasher (genshin impact),30 +black knight (dark souls),30 +black hands,30 +black canary,30 +black-backed jackal (kemono friends),30 +bismarck (iron black elysium) (azur lane),30 +biscuit griffon,30 +bing gang,30 +biker,30 +bigur,30 +big bad wolf (grimm) (cosplay),30 +bibeak (arknights),30 +beta (dream hunter rem),30 +bessho emma,30 +bes-low,30 +beryl (blueberylpie),30 +berserker (tera online),30 +bercouli (sao),30 +belldot,30 +beer tap,30 +beelzebub (granblue fantasy),30 +bea (palentine's 2022) (pokemon),30 +battleborn,30 +battle subway,30 +battle spirits: brave,30 +battle angel alita: last order,30 +battery aida ni misu ga deta,30 +bashikou,30 +barber chair,30 +bara no maria,30 +bantian yindang,30 +banbiiiino0303,30 +banana print,30 +ban tang,30 +ban (one ban7241),30 +balladeluce,30 +baldur's gate,30 +bakyu-n!!,30 +baku (bakunooekaki),30 +baiyin,30 +bails,30 +bah (dragon ball),30 +badluck,30 +bad boy,30 +back scrubber,30 +azure kite,30 +azumi tooru,30 +azumi hagumu,30 +azumamutuki,30 +azumakuro,30 +azaya (kuroi azaya),30 +azalea4,30 +aynoh,30 +aymeric de borel,30 +ayaya (ayaya ri),30 +ayasaki wakana,30 +aya003030,30 +awazake (2wairu),30 +augmented reality,30 +atum (stand),30 +atui (utawarerumono),30 +atori12,30 +ato (ml cc g),30 +atlas (titanfall),30 +ateka,30 +at-at,30 +asugano subaru,30 +aspear berry,30 +asou asuna,30 +asmo deus,30 +ashinagi (bijutu1),30 +ashi,30 +ash (closers),30 +asai mugi,30 +asahina suzuka,30 +asahi nayuta,30 +arx-8 laevatein,30 +arturaxia,30 +artsunisiju,30 +arthur (granblue fantasy),30 +artemis (third ascension) (fate),30 +art room,30 +armored corset,30 +ark royal (coast watcher) (azur lane),30 +arikui (ooooalikui),30 +ariel23795,30 +aridonall,30 +argent-ag,30 +arcle (kuuga),30 +archaic sealed heat,30 +arashiyama jun,30 +arashio (azur lane),30 +aramaki daisuke,30 +araka luto,30 +appare-ranman!,30 +aozora hayato,30 +aomi riru,30 +aoi hatori,30 +aoi akira,30 +aoi (lightsource),30 +aoba miu,30 +ao yasai,30 +ao-yume,30 +anzai105,30 +antonio (gyee),30 +answer (guilty gear),30 +ano ko wa chuunibyou tenshi!!,30 +annerose vajra,30 +ankh (ankh 05),30 +anju emma,30 +angelina rocca,30 +andou yuna,30 +amulet coin,30 +ammonite,30 +amino (li0a4),30 +ami hideto,30 +ameya shioichi,30 +amelie mcgregor,30 +amatsuki colors,30 +amanohana,30 +amane tsukasa,30 +amanda evelyn earl,30 +amamiya momona,30 +amakawa mayu,30 +amaetai hi wa soba ni ite,30 +ama no jaku (vocaloid),30 +ama diver,30 +alyssa (irotoridori no hoshi),30 +altina (shining blade),30 +altessimo (idolmaster),30 +alpha (dream hunter rem),30 +alpachiiino,30 +alolan rattata,30 +alicemagic,30 +alice blanche,30 +alice (odin sphere),30 +alice (grimms notes),30 +alice (baalbuddy),30 +alfonso san valiante,30 +alek reyes,30 +akutsumi,30 +akuta michi,30 +akura (arc the lad),30 +akuma gaoru,30 +aku no onna kanbu: perigee new moon,30 +akita inu,30 +akiosketch,30 +akikan (credit),30 +akeha (nier reincarnation),30 +akatsuki makoto,30 +akame (chokydaum),30 +akaike,30 +akagi anna,30 +akagi (dawn phoenix's summons) (azur lane),30 +akabei,30 +ak2,30 +ak-74u (girls' frontline),30 +aji77,30 +aizawa natsumi,30 +aisha (ash),30 +aikawa akane,30 +aiiro bokujuu,30 +aibara mitsuki,30 +ai (ai1470),30 +ai-chan's mother (tawawa),30 +ahika (akihasakuri),30 +agata katsuhira,30 +against chalkboard,30 +agahat,30 +ag ss41,30 +afba,30 +aeruusa,30 +aerobics,30 +aeon (skullgirls),30 +adenine (artist),30 +adcd,30 +adcalcium,30 +adam kapowski,30 +adachi (nogami nono),30 +acchan,30 +acasta (azur lane),30 +abyssal pacific princess,30 +abyssal hunters logo,30 +abigail (final fight),30 +a (show by rock!!),30 +2r- (pixiv8168114),30 +2dswirl,30 +21grams,30 +21 (eotyq58d6do16cs),30 +2027 (submarine2027),30 +12.7cm twin gun mount,30 +0roshioji3ran,30 +zyunsei777,29 +zwei (santanawamuujojo),29 +zuihou kai ni (kancolle),29 +zola project,29 +zo ochi2,29 +zinfian,29 +zimu,29 +zhuang yao,29 +zhang xiaobai,29 +zettai meikyuu grimm,29 +zetta (phantom kingdom) (book),29 +zerase (gensou suikoden),29 +zeikomi,29 +zanki zero,29 +zangi (lkt2012),29 +z23 (the eyecatch in the rye?) (azur lane),29 +yuzuki (rurirara),29 +yuzuki (hmr813k),29 +yuzuki4no,29 +yuuki karin,29 +yuuki homura,29 +yuuji (itadon),29 +yuuhi (at-speakeasy),29 +yuugji,29 +yuudachi (kancolle) (cosplay),29 +yuucho,29 +yuu (super real mahjong),29 +yuu (guruko),29 +yuu (archaic smile),29 +yumi (soak1111),29 +yumemiru prima girl,29 +yumekuro,29 +yumekijiiro,29 +yuli you gua,29 +yuli (pop'n music),29 +yuko (taxidermy),29 +yukky snow,29 +yukishiro mafuyu,29 +yukihomu,29 +yukihira itsuka,29 +yuki len,29 +yuki (agent aika),29 +yukage,29 +yubisaki milk tea,29 +yuan (ziranran),29 +yu xiu,29 +yu mei-ren (fate) (cosplay),29 +yowamidori,29 +youyan,29 +your throne,29 +yougashi,29 +yoshinaga masahiro,29 +yoshimo (yoshimo1516),29 +yoshida tooru,29 +yoshiaki (yosiaki ml),29 +yooguru,29 +yoo sena (jagaimo (kkamja)),29 +yong (mg),29 +yone (qtron),29 +yon (yonana),29 +yokke,29 +yoi (tokoyoi),29 +yinzinmiemie,29 +yeolyeo,29 +yellow devil (mega man),29 +yellow cat,29 +yeager (tales),29 +ybee,29 +yayoi asuka,29 +yayoi (shichigatsu),29 +yasha (endless requiem),29 +yasaka mai,29 +yaoyorozu-kobo,29 +yaoku,29 +yanagi kawa,29 +yamaya oouemon,29 +yama usagi (onmyoji),29 +yaka (kk1177),29 +yajuuraku,29 +yairenko,29 +yaiba (7th dragon iii),29 +yai ayanokoji (mega man),29 +yahoo,29 +yae sakura (shuffle!),29 +yachiwo,29 +xxinainaxx,29 +xsorax812,29 +xinghuo,29 +xetton,29 +x xith x,29 +wujiemang,29 +wu zetian (swimsuit caster) (third ascension) (fate),29 +wrappings,29 +wizard (ii orc no hi),29 +witoru,29 +winniconan,29 +william leonard,29 +will (willanator93),29 +wiene,29 +widowmaker (overwatch) (cosplay),29 +white soccer ball,29 +white magician pikeru,29 +white l,29 +wgm oekaki,29 +weregarurumon,29 +webbing,29 +weapon girls,29 +wazy hemisphere,29 +water dress,29 +watashi no oniichan,29 +washing face,29 +war machine,29 +wander (cordabyss),29 +wanda (wonder festival),29 +wanco (7200rpm),29 +wakatsuki shuri,29 +wakaba girl,29 +waitress (pokemon),29 +waistband,29 +vyo,29 +vulcan,29 +vsjojo,29 +vrco,29 +volcano badge,29 +volcano (liao),29 +voidwalker wraith,29 +viwop,29 +viscount shishamo,29 +verseir 001,29 +velocidrome,29 +velocesmells,29 +vector (kitty paws) (girls' frontline),29 +vdv,29 +varniskarnis,29 +vanille (hotel01),29 +vanessa (luminous arc),29 +valkyrie (lord of vermilion),29 +uyoshi,29 +uyalago,29 +usui ryuusan,29 +usui nagi,29 +usui kagerou,29 +uss albacore (ss-218),29 +ushiwakamaru (swimsuit assassin) (third ascension) (fate),29 +ushiwakamaru (corrupted) (fate),29 +urakaze (azur lane),29 +urabe miyabi,29 +untied shoe,29 +untan,29 +unomiki,29 +unicron,29 +underbar summer,29 +un (un0044),29 +umisaki,29 +umberblack,29 +ulpian (arknights),29 +uld macaron,29 +uhui,29 +uenoike (194753),29 +ueno haruki,29 +uchimura reimi,29 +uchimaki subaru,29 +type 97 te-ke,29 +twt,29 +twin-bush disguise,29 +twai,29 +tuxedo jacket,29 +turboflower,29 +tukisiro nao,29 +tukasa,29 +tucciao,29 +tsuzuki (flee away),29 +tsuyuxxx,29 +tsushima hina,29 +tsurugi yashiro,29 +tsumugi (summer) (princess connect!),29 +tsukiyomi ikuto,29 +tsukiko (meltdown),29 +tsujikami ayaka,29 +tsuji keisuke,29 +tsuina-chan,29 +tsugihagi (chainsaw man),29 +tsuan,29 +trick (dorama),29 +transparent gloves,29 +trampoline,29 +toutou (fufu0o),29 +touch (manga),29 +totorosu,29 +toshipiyo,29 +toronto (yamu),29 +torn curtains,29 +torizaki kuyuri,29 +torisumi horou,29 +tori (torinchi),29 +torc,29 +torayamato,29 +toraneko555,29 +tong shui,29 +tomonaga squadron pilot (kancolle),29 +tomathao,29 +tomari shinnosuke,29 +tokyo tenma,29 +tokiya seigo,29 +tokitsukaze (kancolle) (cosplay),29 +tokiha takumi,29 +toki (1243309499),29 +tokeru,29 +tochibi,29 +tobisawa mana,29 +tobimaru,29 +to-ya (to-ya can),29 +tnmrdgr,29 +tko (hhan),29 +tirnanogin industries,29 +tiny tina,29 +tina foster,29 +timy,29 +tild framith,29 +tiger mask (object),29 +tichiel juspian,29 +thirteen (scissor seven),29 +thicopoyo,29 +theiamillis gre fortorthe,29 +theakingu,29 +the last comer,29 +the dragon knights,29 +tetsusaiga,29 +tetsuox (housamo),29 +tetsuo (watson),29 +tetsuko (tetsuko009),29 +testicle tattoo,29 +teshigawara katsuhiko,29 +teru (teru11061),29 +teostra (armor),29 +tenteru,29 +tenrai (temple),29 +tenguro,29 +ten ryuu sadaaki,29 +teketeke,29 +teires (teir3s),29 +tefun (tehun),29 +tect,29 +technical difficulties,29 +team valor,29 +tea (084630000),29 +tawashi (tawashisan),29 +tatiana vasilievna stalina,29 +tate yuuichi,29 +tasutekete,29 +tari,29 +taremayu (kikai tomato),29 +tardis,29 +tanuxu,29 +tanaka (fate),29 +tamochi (tamochi213),29 +tama home,29 +tama (dragon ball),29 +tama (05728),29 +tam tambourine,29 +takojiru,29 +takizi,29 +takapi 3,29 +takanashi izumi,29 +takahasho,29 +takahashi mariko,29 +takagi takumi,29 +taka radjiman,29 +taka (takalight),29 +tajima (minagawa),29 +tainaka ritsu (cosplay),29 +tahm kench,29 +tagicrabbu,29 +tabobox,29 +syldra,29 +sweta (sakerflc),29 +sweetsoupman,29 +suzuki-shi,29 +suzuka gozen (third ascension) (fate),29 +suzu (sub-res),29 +suyu38,29 +suu2510,29 +susu (ysy),29 +surf,29 +super robot wars f,29 +supe (yuusyasupepen),29 +sung-eun (unleashed),29 +sunao (70 the),29 +sumihara satoka,29 +sumi (u-kar1n-89m0),29 +sumeragi amane,29 +suketoudara,29 +sukeban deka,29 +suhara (protea),29 +suezo,29 +sucho,29 +succubus familiar,29 +su-33,29 +stylecase,29 +street hopper (idolmaster),29 +strawberry nauts,29 +stratoz,29 +storybook,29 +stola,29 +stim pilot (titanfall 2),29 +steven steel,29 +stephen potter (azur lane),29 +stealth bondage,29 +starfox (artist),29 +star platinum (game),29 +standing on box,29 +spiked ear piercing,29 +spider-man: across the spider-verse (part one),29 +spats (footwear),29 +soviet navy flag,29 +souyoku,29 +souya yuki,29 +southern cross,29 +soushuu senshinkan gakuen hachimyoujin,29 +soul badge,29 +sosai salada,29 +sorting hat,29 +sorakado ao,29 +soraeda,29 +sora (dkssud6580),29 +sonya jonah,29 +sonic mania,29 +solidus,29 +sokimi (sosojovan),29 +soi,29 +sockinajar,29 +small horns,29 +sleipnir (last origin),29 +sleeping beauty (character),29 +skyape,29 +skirt cutout,29 +ska,29 +six heart princess,29 +siroromo,29 +sioagya,29 +simonadventure,29 +simelu,29 +sig sauer p228,29 +shuku,29 +shu (pjxv7485),29 +shrimp cc,29 +shouko (airen),29 +shoukichi usagi,29 +shoujo rei (vocaloid),29 +shougakusei,29 +shotgun speed loader,29 +shota-kun (g (genesis1556)),29 +shorts tug,29 +shonasan,29 +shoko-tan,29 +shohje,29 +shockwhite3,29 +shiyou (kouda suzu),29 +shiu kazuki,29 +shisu (binzo3),29 +shisonoha,29 +shishigami (mononoke hime),29 +shiruko (27anko),29 +shirosa,29 +shiro q~,29 +shiranui (onmyoji),29 +shirakawa hotaru,29 +shiokari monaka,29 +shiny forehead,29 +shinomiya akino,29 +shinano (warship girls r),29 +shinadume,29 +shin jia,29 +shimoe,29 +shimabara minami,29 +shima hinako,29 +shijou yuzuki,29 +shiho huit,29 +shigure rangetsu,29 +shidou mana,29 +sherry cromwell,29 +shared hat,29 +shared artificial vagina,29 +shapeson,29 +shadow cat (u9nej2qzq9vzxcf),29 +shachi (one piece),29 +sg (suuchan),29 +sex life,29 +severed penis,29 +seven of diamonds,29 +setouchi (blackse),29 +setamo map,29 +seriko (seo77000),29 +senta (ysk 0218),29 +senju tobirama,29 +sengoku driver,29 +sen rikyuu,29 +sen hisame,29 +selena recital,29 +seeso2d,29 +see-through cape,29 +scp-682,29 +school project,29 +scarmiglione,29 +scarlet (ff7),29 +scaleph,29 +saz8720,29 +sayu (shirotsume souwa),29 +sawa (kite),29 +savoia s.21,29 +sasamura kaede,29 +sasamori anna,29 +sasaki sakichi,29 +sasa kanako,29 +sarah (suikoden),29 +sara (arorasyeimi),29 +saotome nagi,29 +sanze (gokiburi kirai),29 +sanyu (ry thae),29 +sanwenyu ganlao,29 +santouka,29 +sanpo (sanpo 1027),29 +sanjou sorata,29 +sandbag (smash bros),29 +sananan,29 +sanami (sanami t 38),29 +sanadafelix,29 +samura hiroaki,29 +salon pixiv,29 +salar de uyuni,29 +sakuragouti,29 +sakuma hideko,29 +sakigake!! otokojuku,29 +saki (the atko),29 +saki (otsushimai),29 +sakazu mekasuke,29 +sakamoto ryouma (lancer) (fate),29 +sakais3211,29 +saizeriya,29 +saionji kyouichi,29 +saints row: the third,29 +sailor crest,29 +sailor beach wear (no.s project),29 +saiki kusuko,29 +saichuu (more1208),29 +sagisawa fumika (cosplay),29 +saeki kayako,29 +saejima haruka,29 +saegusa ibara,29 +saane (monster musume),29 +ryuutsuki basetsu,29 +ryuunagi hyouga,29 +ryuukichi,29 +ryuuki (pokemon),29 +ryuuga sazanami,29 +ryuu ga gotoku isshin,29 +ryomou,29 +ryokuno green,29 +rwael,29 +rutiwa,29 +rust (game),29 +ruruie (shinrabanshou),29 +runa (user guwn7382),29 +runa (maritan),29 +rudy (ikeuchi tanuma),29 +rudo,29 +ruche,29 +royal starlet (idolmaster),29 +royal candy,29 +roy campbell,29 +row (dq11),29 +rose lalonde,29 +roro (shirobako),29 +robot girl,29 +robisonjr,29 +rizna lanfebit,29 +riyu (yulevy),29 +riyo (ryon 96),29 +rishiya,29 +riputon (lipton sabou),29 +rioko,29 +ringe (ngetyan),29 +rindou matsuri,29 +riko-m,29 +ricroot,29 +rico tta,29 +ribbon-trimmed pants,29 +revenge of dragoon,29 +restya,29 +rera,29 +repu (rep sha),29 +reo (re2kn),29 +rents (akirents),29 +removing headwear,29 +remomon0,29 +remiria100,29 +remilia scarlet (bat),29 +reki connect,29 +reina de medishi,29 +rein00,29 +rei shabu,29 +regal blue tang,29 +refile,29 +red nipples,29 +red haired cure (bomber girls precure) (happinesscharge precure!),29 +red bloomers,29 +red (aba02),29 +rebaa,29 +real robot regiment,29 +ready player one,29 +re (tori),29 +rduuroorn,29 +rb2,29 +razurimu,29 +raviel,29 +raunchy ninja,29 +ratatos browntail (arknights),29 +ramram (arms),29 +ramona v flowers,29 +rakkyhappy,29 +raki kr,29 +rairateru,29 +rainkeru,29 +rag uralo,29 +radical highway,29 +rabbit print,29 +rabbit ears antenna,29 +rabbit+tank form (black hazard),29 +r04315,29 +quiz quest,29 +queenie (mechanical buddy universe),29 +qubeley papillon,29 +qbu-88,29 +q-chiang,29 +pussy steam,29 +pussy squeeze,29 +puranaria,29 +pupil g,29 +punchy (animal crossing),29 +puma tiger scorpion,29 +puchipu,29 +puchidori,29 +prune (bombergirl),29 +protagonist (susanghan messenger),29 +professor sabaku,29 +prinz eugen (azur lane) (cosplay),29 +presence,29 +pre sktch,29 +power pro kun pocket 13,29 +posom,29 +ponjiritsu,29 +pom,29 +pokemon tcg gb,29 +pokemon between breasts,29 +png pant (bus),29 +pn (ltpn 257),29 +platina77,29 +pisti,29 +piro (piro r),29 +piro (exp 66),29 +piririnegi,29 +pimi (ringsea21),29 +piku184,29 +pikojirokurao,29 +picnicic,29 +picking fruit,29 +pia carrot e youkoso!! 2,29 +pi (pppppchang),29 +phosphora,29 +peonia (pokemon),29 +penis bow,29 +penguin (one piece),29 +pegasus j crawford,29 +peachpa,29 +peachcak3,29 +pc engine,29 +pazuzu438,29 +pazuu,29 +pavolia reine (peahen),29 +paundit,29 +pauld,29 +patty (artist),29 +patissier,29 +pas' black-haired catperson,29 +park bokja,29 +parasite crest,29 +par.,29 +papaya,29 +pantyhose over swimsuit,29 +pantaloons,29 +pandemonium,29 +panapana (pixiv 12562150),29 +p shiki,29 +p.kibi,29 +p-40 warhawk,29 +oyk (signx),29 +oxoxox,29 +overpowering,29 +overidea,29 +overblot,29 +ou-sama no propose,29 +otter costume,29 +otacool,29 +ota (ota-0000),29 +oshida bob,29 +oshi no love yori koi no love,29 +oscar pine,29 +osanai sanday,29 +orochi (kof),29 +orimiya yui,29 +orihiro0614,29 +origami hina,29 +orgun,29 +orange wristband,29 +orange coffee,29 +oppaisagara,29 +openvl,29 +open hatch,29 +opalisart,29 +opalheart,29 +ootori ouka,29 +oota kuniyoshi face (meme),29 +oota,29 +ookamikakushi,29 +ooishi ryuuko,29 +ooi choon liang,29 +oniku (kusomushi onick),29 +onigiri no gu,29 +on toilet,29 +olympia (pokemon),29 +okken,29 +oki kouji,29 +ohya ichiko,29 +ohma,29 +ohirune,29 +ogino atsushi,29 +ogata tank,29 +ofukuro-sama,29 +oenothera (flower knight girl),29 +oekakiyari,29 +octoball,29 +ocha (kinnkonnsousai),29 +ocancan dancing school,29 +oboro neko,29 +oberon (sao),29 +nyuunzi,29 +nyt (nagane),29 +nyr50ml,29 +nyaruin,29 +nyaromon,29 +nyanyanyanyanyanyanya! (vocaloid),29 +nyansky,29 +nurumu,29 +numazoko namazu,29 +numan athletics,29 +nowsicaa,29 +noske,29 +northampton kai (kancolle),29 +norio minami,29 +norayinu,29 +noor7,29 +nonohara nagisa,29 +noguruma,29 +nodokana yuki,29 +noda megumi (artist),29 +nocturne krumenker (nocxturne),29 +noah ebalon,29 +no.1 machine,29 +nioh,29 +ninon beart,29 +nininisama,29 +nimura ruruko,29 +nikaidou arashi,29 +nijimura's father,29 +niizuma wa serafuku,29 +nicecream,29 +newflame,29 +new york yankees,29 +new battleship princess,29 +nerugal,29 +neru5,29 +neptunite (houseki no kuni),29 +neofreet,29 +nenekirimaru,29 +nemuri miyako,29 +nemimini,29 +nellu (geenymous),29 +nekonote (neconote),29 +nekonin,29 +nekomimi mode (tsukuyomi),29 +nekodosaiun,29 +neko0634,29 +neichii,29 +negev (little drifter) (girls' frontline),29 +neck hold,29 +nazo no diaboro,29 +nayuta (scarlet-noize),29 +navarre (fire emblem),29 +nattsume (natttsume),29 +natsume minami,29 +natsuhiko (pixiv40944),29 +natsu (soulcalibur),29 +natch imaycz,29 +nashimoto yukari,29 +nashi juni,29 +nase yukana,29 +nasca dopant,29 +naruyan (arashiboushi2),29 +narumi remon,29 +nariie shin'ichirou,29 +nankam,29 +nancy makuhari,29 +nanase kokono,29 +nanana narang,29 +nanami izu,29 +nanaka (princess connect!),29 +nanairo kouro,29 +nanahara kaoruko,29 +namikishiho,29 +nal (studio ng),29 +naked costume,29 +nakadashima,29 +nairofu,29 +nai kitsu,29 +nagumo shinobu,29 +nagiyamasugi,29 +nagisa otoha,29 +nagasaki yuko,29 +naala,29 +naaga sonomono,29 +myless,29 +mycstea,29 +my eight,29 +musupon214,29 +musumi kiyoka,29 +murasaki shikibu (purple eye) (fate),29 +murasaki (kyokugen dasshutsu),29 +munya (otikamohe),29 +mumu2126,29 +multicolored pubic hair,29 +mukuro (yu yu hakusho),29 +mui (muica),29 +mugendramon,29 +mucuun yin,29 +muchakushoku,29 +mua (sleeping earth),29 +mrhunking,29 +mr nini,29 +mozya,29 +mozu taiya,29 +mozu (pixiv12188108),29 +mowsovsky,29 +mousou youjo,29 +moubokobokodayo,29 +motsu (kk 3),29 +motouchi naruka,29 +motomura kouichi,29 +mosaiq (lovechild),29 +moruhinesan,29 +moromoro 0p0,29 +moriya ririka,29 +morinaoekaki,29 +moonlight's anti-soul (touhou),29 +moonlgnance,29 +moonbyul (mamamoo),29 +mookie (e mook9),29 +monugaeru,29 +monty python and the holy grail,29 +monorisu,29 +monogoi no succubus oyako o mitsuketakara ijimete yaru koto ni shita ww,29 +mondo (crazy raccoon),29 +momona (mvv),29 +momohime ryouran! sengoku asuka,29 +mokuzou,29 +modoi,29 +moco,29 +mobuchin,29 +mobius (daughter of corals) (honkai impact),29 +mmmakaron888,29 +mk 23 pistol,29 +mizushima sayori,29 +mizusawa hikaru,29 +miyoshi kaya,29 +miyazaki chisaki,29 +miyao gaku,29 +miyamoto takashi,29 +miyamo chio,29 +miyama waka,29 +miyama leaves,29 +miyaji ryunosuke,29 +miyabi (run),29 +miura asahi,29 +mitsuko,29 +mitsukii,29 +mitsuhashitaeko,29 +mitonoya saki,29 +miter,29 +miss siamour (human),29 +misogi (real) (princess connect!),29 +misheng liu yin,29 +misawa elena,29 +misaki (doa),29 +misaki (agent aika),29 +miriallia haw,29 +mio (ressha sentai toqger),29 +minori yumeko,29 +minneapolis (wild huntress schoolgirl) (azur lane),29 +mini splatling (splatoon),29 +minerva (fire emblem awakening),29 +minemura,29 +minazuki kotoko,29 +minasato hibiki,29 +minamoto kouji,29 +mina (o414),29 +mimoza,29 +mimori (cotton heart),29 +mimika (puetilun),29 +millia il velch cutrettola turan,29 +millais alloy,29 +miko (35nikomi),29 +miki miki,29 +mikeneko mari,29 +mikasa (battleship),29 +miichinori,29 +migu,29 +migiue,29 +midgardsormr,29 +michael jackson (cosplay),29 +miang hawwa,29 +mg42 (girls' frontline),29 +meto (cat),29 +metal gear ray,29 +mesopota,29 +merellyne,29 +menreiki,29 +memessa,29 +melusmelon,29 +melon syrup,29 +melang b,29 +mel medarda,29 +meiya,29 +mei (maple 152),29 +megazord,29 +megara (disney),29 +mega venusaur,29 +mega gyarados,29 +mega diancie,29 +mechanical hair,29 +mazushii,29 +matutoki nara05,29 +matsushima kei,29 +matsu takeshi,29 +matoba,29 +matdoljangin,29 +matchuri,29 +massakasama (style),29 +mashiro yozakura,29 +masaki kazusa,29 +mary (identity v),29 +maruuchi shouko,29 +maruta maruta,29 +maruo (mokurentenpu),29 +maruman,29 +marison (aono107),29 +marcus (rnarccus),29 +maplum,29 +mao (tales),29 +manzoku-san,29 +mantou yu,29 +manosdetrapo,29 +mano (m1n0f2e1),29 +manbou (manvow),29 +mamo murata,29 +mamiru (42105269),29 +mameneko (pai),29 +maliketh the black blade,29 +malaysia,29 +makuro (inmomakuro),29 +makochii,29 +makna armour,29 +majoora,29 +maji (majibomber),29 +mail (mail gell),29 +maiden with eyes of blue,29 +maid koubou,29 +mahou shoujo ikusei keikaku queens,29 +magicami,29 +magical mirai rin (2019),29 +magical mirai len (2019),29 +magical canan,29 +maggea22,29 +maezono koharu,29 +madoushi s,29 +madolche magileine,29 +madhand,29 +maco (crea-0328),29 +machias regnitz,29 +m1918 (bunny girl) (girls' frontline),29 +lznustrpo,29 +lynx (chrono cross),29 +lydian academy swimsuit,29 +lusamine fused (pokemon),29 +lunaria,29 +luna (punishing: gray raven),29 +luna (kaminomi),29 +luke uehara,29 +luerstine,29 +lucy van pelt,29 +lucrezia noin,29 +lucifer (the seven deadly sins),29 +lu xun,29 +lsunl,29 +lrul,29 +lovewolf5122,29 +louie (pikmin),29 +lotus wand,29 +lotus earrings,29 +lotion play,29 +lord tenma (touhou),29 +long fangs,29 +lone wolf,29 +lolzis,29 +lol (harikofu),29 +llmia4,29 +lixsium,29 +lithium10mg,29 +liquid metal slime (dq),29 +lionet0806,29 +lio convoy,29 +linked sausages,29 +linbai22,29 +limfoman,29 +limbo (game),29 +lilith (unxi),29 +lierre,29 +licoco,29 +lich king,29 +li zhiheng,29 +li xiangfei,29 +li se,29 +lexis yayoi,29 +levi russel,29 +level difference,29 +leslychoco15,29 +leopardon,29 +lemon browning,29 +leandro franci,29 +lazoomaiga,29 +layout plan,29 +laura nissinen,29 +laughing octopus,29 +laruna (granblue fantasy),29 +lart art1,29 +laolao (granblue fantasy),29 +lan ren hui,29 +lan (gyee),29 +laika (slime taoshite 300 nen),29 +lacolon,29 +kyuujou komachi,29 +kyoukya (kyouya02),29 +kyon (kyouhei-takebayashi),29 +kyomunohi,29 +kyary pamyu pamyu,29 +kyako youkan,29 +kyabetsutarou,29 +kuzuhana,29 +kusunoki (escude),29 +kusumori shiba,29 +kurusu ren,29 +kurusu alexandra,29 +kuroyagi,29 +kurotsuki hiiragi,29 +kurosteel ds,29 +kurono kiria,29 +kuroneko jiji,29 +kuromu shado,29 +kuromitsu (9633 kmfr),29 +kurokami no onna,29 +kuroda (kuro yyy),29 +kureihii,29 +kurasuke's maid girl,29 +kurakake clara,29 +kunoichi zero,29 +kunio-kun,29 +kunimasa ayami,29 +kung fu panda,29 +kumo suzume,29 +kuma yum24,29 +kubota jun,29 +koyuki (kimetsu no yaiba),29 +kouzuki kazuna,29 +kouzu shou,29 +kouseki0024,29 +kousaka junpei,29 +kounaien (comic1217),29 +koujisako,29 +kougekiryoku,29 +kota111519,29 +konoyasoul,29 +konna ko ga itara boku wa mou,29 +kongbai huanxiang,29 +kongai,29 +kon (kemono jihen),29 +komori sana,29 +kome (kokomoti),29 +komatsuzaki umi,29 +kolibri (girls' frontline),29 +kokoro magic a to z,29 +kokonoe misui,29 +kojikoji,29 +kohinata mangetsu,29 +koh (user kpcu7748),29 +kogashirou,29 +kogakunama,29 +kodomonomikata (noikurezant),29 +kobotoke nagi,29 +knight (elsword),29 +kneesocks senritsu,29 +knee cutout,29 +kmgrru,29 +klim nick,29 +kksukeke,29 +kiyukiyutan,29 +kitsune no ko,29 +kitou akari,29 +kiteman442,29 +kitahara aya,29 +kita nayuta,29 +kit,29 +kisugi hitomi,29 +kishita yuu,29 +kisaragi pana,29 +kiryuu iyori,29 +kirishima sakura (hundred),29 +kiri ph,29 +kirby's epic yarn,29 +kirawus (golden kamuy),29 +kira yoshihiro,29 +kinoshita tomomi,29 +kinoshita sumie,29 +kino707,29 +kingprotea (second ascension) (fate),29 +king arthur (eiyuu senki),29 +king (dakemakura),29 +kimo suna,29 +kimi to kanojo to kanojo no koi.,29 +kimi e okuru sora no hana,29 +kikuchi milo,29 +kikouken,29 +kikiki (hiya mikan),29 +kienan lafferty,29 +kidon,29 +kiana kaslana (white comet) (cosplay),29 +kezime,29 +kesa kujiru,29 +kensaint,29 +kebei,29 +kazuno (horizon),29 +kazuki sanbi,29 +kaya (hydego),29 +kawanishi yuuji,29 +kawaii girl (avogado6),29 +kawai kei (artist),29 +kawahagi-tei,29 +kawachi (hina),29 +kawabeako,29 +katzueki,29 +katubusi kisimen,29 +katsushika hokusai (fate) (cosplay),29 +katrina (romancing saga),29 +katann,29 +kasy,29 +kasuga haruhi,29 +kasshoku danchi e youkoso,29 +kashiwada kiiho,29 +kasasagi07,29 +karou (lhscck302),29 +karateka (baromaru),29 +kapirusu,29 +kaon (kyoushirou to towa no sora),29 +kanzaki sayaka (hg ni koisuru futari),29 +kanzaki aoi (true blue),29 +kantoku collection,29 +kanraku,29 +kannovaku,29 +kannasetsuna,29 +kanna kamui (dragon) (maidragon),29 +kanna kamui (cosplay),29 +kandori makoto,29 +kanatsuki tatsuya,29 +kamiwazumi maya,29 +kamishiro maiku,29 +kamineko,29 +kami otona,29 +kamen rider amazon alpha,29 +kamatani yuuki,29 +kamakurako,29 +kakusei avenger,29 +kaiouken,29 +kaikoinu,29 +kaigaraori,29 +kaigan,29 +kaguya-hime no monogatari,29 +kagari tuepesyu,29 +kagaku chop,29 +kaede (harutan109),29 +kadotarou,29 +kado (hametunoasioto),29 +kaden (muxt8423),29 +k-nattoh,29 +k-bose,29 +jyuma,29 +juusensha koubou,29 +jururu,29 +jun (nad-j),29 +jun (junko),29 +ju yorimoto (ranten yume),29 +ju-on,29 +joya no kane,29 +joujou,29 +josalyn visenad,29 +jolker,29 +jogo (jujutsu kaisen),29 +joeian,29 +jliaan,29 +jip,29 +jinri shijie,29 +jewel (umamusume),29 +jenna (golden sun),29 +jeanne d'arc alter santa lily (summer little) (fate),29 +jaycee (tekken),29 +javelin (energetic idol @120% motivation!) (azur lane),29 +jasmine (deltora quest),29 +jashin doruton,29 +japanese skink,29 +jangsunyo,29 +jangif,29 +jademoon,29 +jack-o'-lantern head,29 +jack-ddd-no13,29 +jaack,29 +j (let's & go),29 +izumi shin'ichi,29 +izumi roka,29 +izumi natsuka,29 +izanami (p&d),29 +iyama nami,29 +iwatozaki mamoru,29 +iwamine shuu,29 +iwabuchi haruka,29 +itou ebiko,29 +ito,29 +isorashi,29 +iskanderednaksi,29 +ishtar (elsword),29 +ishikawa masakazu,29 +ishikawa goemon xiii (cosplay),29 +isaac macdougal,29 +ironing board,29 +iron fist alexander,29 +iroha kaede,29 +iris krug,29 +iria zeiram the animation,29 +irene (jojo),29 +iralion,29 +ipo-chan,29 +inubashiri momo (suna),29 +insect glaive,29 +inoue takina (cosplay),29 +inose riku,29 +inkzooka (splatoon),29 +inica,29 +infel (ar tonelico),29 +indy k,29 +indian rhinoceros (kemono friends),29 +inazuma legend japan,29 +inari kuromu,29 +inari jin,29 +impmon,29 +implied orgasm,29 +imi jericho,29 +imdrunkontea,29 +imai miu,29 +ima (luce365),29 +illyasviel von einzbern (swimsuit archer) (first ascension),29 +illaoi,29 +ikumo taisuke,29 +ikeya (ikeya314),29 +igniz (kof),29 +iga (nonono tsuushin),29 +ievan polkka (vocaloid),29 +idunn (megami tensei),29 +idoraad,29 +ichinose natsuki,29 +ice floe,29 +ice dragon,29 +icchiramen,29 +ibuki suika (watermelon),29 +hyetta (elden ring),29 +hybrid (1212apro),29 +hyakkihime,29 +huracan,29 +hulkbuster,29 +huan shi tian tong,29 +hsch,29 +hp23,29 +housulu,29 +houraiji kyuushou,29 +hound (transformers),29 +hottate,29 +hotateyuki,29 +hosokawa gracia,29 +hoshino yura,29 +hoshino yachiho,29 +hoshiinasake,29 +hose between breasts,29 +horace (pokemon),29 +hoopa (unbound),29 +hong da,29 +honeymelon,29 +honey badger,29 +honda nsx,29 +holobirds,29 +holding utensil,29 +hobbit,29 +ho2nium,29 +hizuki reiya,29 +hizikit,29 +hitsuki (hiidukii),29 +hitagiri,29 +hisakawa haru,29 +hirano kouta (style),29 +hiramoto akira,29 +hirakawa,29 +hirakata masahiro,29 +hiraga pikurosu,29 +hiraba 6018,29 +hionhk,29 +hinya (wabi),29 +hinamushi (shokuyou),29 +himukai yuusuke,29 +himiko (eiyuu senki),29 +himekouji yuki,29 +hime takeo,29 +hillary clinton,29 +hilda (special costume) (pokemon),29 +hikuushi series,29 +hikari yui,29 +hiiro,29 +high-waist bikini,29 +higashikata rina,29 +hide (rightstuff annex),29 +hida naria,29 +hibiki (bagawa),29 +hiballista,29 +hey cre,29 +henginnnnnn,29 +hemyi,29 +helena (may i have this dance?) (azur lane),29 +hekiyama yurika,29 +height switch,29 +heidi (alps no shoujo heidi),29 +hei yu,29 +hei d,29 +heheneko,29 +heather37,29 +heat (dds),29 +he-man,29 +hayasaka,29 +hayakawa ai,29 +hawkeye gough,29 +hatsuharu (azur lane),29 +hatachi8p,29 +hat ring,29 +hashimoto sana,29 +hashima chihiro,29 +hashiguma,29 +hasemi ryou,29 +hasaki (alice ruru),29 +has watermarked revision,29 +harustein,29 +haruno hime,29 +haruka (new year) (blue archive),29 +harujiya (setugetuka),29 +haruharo (haruharo 7315),29 +harpy (monster girl encyclopedia),29 +harpie lady 2,29 +harp seal (kemono friends),29 +harikona,29 +harasaki nozomi,29 +harari,29 +happy happy friends,29 +haoyuan,29 +hanui,29 +hanemikakko,29 +hand in jacket,29 +hanayono menora,29 +hana arare,29 +hana (me!me!me!),29 +han (hehuihuihui),29 +hamza touijri,29 +hamidasu,29 +hama (sleeps),29 +halluci,29 +hakuhouin ayano,29 +hakata tonkotsu ramens,29 +hakasesuitchi,29 +haishima xv,29 +hairy pikachu,29 +hagakure hiroko,29 +hae-young na,29 +hadi,29 +hachirodesu,29 +hachi duchi,29 +hachi (lgm),29 +gyahu,29 +guy (fire emblem),29 +gurugnsn,29 +guriddopitto,29 +guodon,29 +gun zi (i&eyes),29 +guman project,29 +gu xun er (doupo cangqiong),29 +gu tao,29 +gsn (nocturne),29 +grune (tales),29 +groza,29 +growlanser v,29 +grim-evilnov,29 +grillby,29 +gretia,29 +gretel jeckeln,29 +greta (pokemon),29 +greig (dq11),29 +green ranger,29 +greavard,29 +graphig,29 +grandia iii,29 +gr (gule),29 +goyoyoo,29 +gotou masaki,29 +gorohati,29 +goro desu,29 +gopher (soul eater),29 +gooak,29 +gomix,29 +gollizo,29 +golden frieza,29 +gokokukyou,29 +glunk (kirby),29 +gluko,29 +glowing veins,29 +gliding,29 +girls und panzer: atsumare! minna no senshadou!!,29 +girl with a pearl earring,29 +ginho,29 +ginger root,29 +ging freecss,29 +gin (gin937),29 +gigigimimi,29 +gigan,29 +gie (gienara),29 +giant salamander,29 +giant robo (mecha),29 +ggyoku,29 +gfpebs,29 +gespenst,29 +gerusyu,29 +genn 00o,29 +gender transitioning,29 +geminiboar,29 +geara doga,29 +gazelle,29 +gassun,29 +gash bell,29 +garaudon,29 +gap (pdmng),29 +galvatron,29 +galient,29 +gachimuchi de dosukebe na kateikyoushi no oniisan to sugosu natsu,29 +gabu,29 +gaanc 23 (tomosuge),29 +g36c (you who steps up) (girls' frontline),29 +g-sky goddess (ishiyumi),29 +fyy2333,29 +fuyukono,29 +fuunyon,29 +futon tataki,29 +futami shion,29 +futami masaki,29 +furo (harirate),29 +furansiumu,29 +fura (wind rahu),29 +fur skirt,29 +full armor gundam,29 +fukunomiya koko,29 +fujiwara yoshito,29 +fujito (call f ),29 +fujishino shikino,29 +fujishima kazuya,29 +fujisawa aya (gundam build divers),29 +fujisaki mana,29 +fujimoto shirou,29 +fujimaru ritsuka (female) (chaldea pathfinder),29 +fujii yakumo,29 +fujii tomoyuki,29 +fuchima,29 +fu-girl,29 +frown (wonderland85711),29 +frog mask,29 +fried squid,29 +freddyhalloween,29 +fox daughter (doitsuken),29 +florence nightingale (divine princess of the storm) (fate),29 +fletches,29 +fleki,29 +flear,29 +firepo,29 +fiore (baru),29 +finger wrap,29 +figure four leglock,29 +fhara,29 +ffflilil,29 +ffc,29 +fey (unknown artist),29 +ferdinand marl e,29 +fenneko,29 +fen zuo,29 +female commander (forever 7th capital),29 +felia hanakata,29 +fei (songyijie),29 +faton,29 +fate/extra record,29 +fanatio synthesis two,29 +fan no hitori,29 +famitsu,29 +fallstreak hole,29 +falce,29 +exile (elona),29 +excela noa aura,29 +eviryun,29 +evil-ss,29 +euryale (fate) (cosplay),29 +eun soyeon (lustyfox),29 +eugene sevenstark,29 +etsuransha no rei,29 +essex (a trip down route 66) (azur lane),29 +esper girl,29 +esha,29 +ershin,29 +eris (cosmic break),29 +erinan,29 +erika sato,29 +erika (sygna suit) (pokemon),29 +epomeno,29 +emu 1316,29 +emilou apacci,29 +emanon (ice),29 +elusya,29 +elsa (g557744),29 +elsa (demonbane),29 +elliot craig,29 +elizabeth bathory (japan) (fate),29 +eir (machi),29 +eicy (alchemy stars),29 +eichi (0903275),29 +eggplant costume,29 +eeteru,29 +educational broadcasting system,29 +edoben,29 +edboy,29 +edamamezooooo,29 +echipashiko,29 +ebisumaru,29 +ebisuke,29 +ear tug,29 +dysoor,29 +dyogrammaton,29 +dynorz,29 +dutch text,29 +duster coat,29 +dudlesnoodles,29 +drum major,29 +drooping,29 +drawn halo,29 +dragon knight (dungeon and fighter),29 +drachea rannak,29 +dou (doudouzi),29 +dortin,29 +dorothy (granblue fantasy),29 +dorasu,29 +dopoing,29 +doongdang,29 +donquixote family,29 +donald duck (cosplay),29 +dominion,29 +dollyspica,29 +dohyo123123,29 +doggy god's street,29 +diter-trsey,29 +disgaea team attack,29 +dimensional hole,29 +dima,29 +digimon crest,29 +diagonal-striped legwear,29 +dexter,29 +deraken,29 +demonbane (mecha),29 +deltora quest,29 +delphinium (flower knight girl),29 +death 13,29 +death2990,29 +dasulchan,29 +darus5,29 +daruma is god,29 +dark nebula,29 +dark magician beth,29 +dark bride,29 +dannoura yuuki,29 +dandruff,29 +damien dozias,29 +daihatsu,29 +daichi (daiti1318),29 +cytus ii,29 +cynthia rou,29 +cy9,29 +curtis (pokemon),29 +curacoa (azur lane),29 +cum on neck,29 +cum on horns,29 +cryptocurrency,29 +crow (show by rock!!),29 +cross regalia,29 +cremanata,29 +crafting,29 +cracked shell,29 +coyote tango,29 +cover-d,29 +costume combination,29 +cornflower,29 +copochui,29 +cooper (blazing tennis battle) (azur lane),29 +cookin,29 +control tower,29 +constellation (warship girls r),29 +conflict (module),29 +confetti ball,29 +comanie,29 +color works,29 +color contrast,29 +cologne (ranma 1/2),29 +collapsing,29 +cokio,29 +cocoka,29 +cobushii (arms),29 +cnanao,29 +clumeal,29 +closing book,29 +clock print,29 +cliov,29 +clammy zell,29 +cissnei,29 +circlet princess,29 +cinque (fft-0),29 +cilfy,29 +chun'ai zhanshen milili,29 +chuck preston,29 +chrocatz,29 +chrissy (animal crossing),29 +chougoukin kurobikari,29 +chokota,29 +choia,29 +choi bounge,29 +choco taberusan,29 +chmyxo,29 +chizi,29 +chiyu (cotoritori),29 +chisuzu mei,29 +chiro (pez777),29 +chikawa shibainu,29 +chikada haruko,29 +chick print,29 +chiave (arknights),29 +chey,29 +chest rig,29 +chernobog (housamo),29 +cheese hair ornament,29 +checkered cape,29 +cheburashka,29 +chausson,29 +chamuhz,29 +chamoro,29 +ceph (greatyazawa1819),29 +celene (3di),29 +cchhii3,29 +cattleya baudelaire,29 +catsila,29 +cater diamond,29 +cat between legs,29 +casket,29 +carol0905,29 +carmen (project moon),29 +care label,29 +capelin s,29 +cannelle (sword girls),29 +canata katana,29 +camilla (kagemusha),29 +calvina coulange,29 +cailleach bheur (last origin),29 +cagliostro (granblue fantasy) (cosplay),29 +ca ba ya ki,29 +byunei,29 +byoru,29 +butcher,29 +bureoeve,29 +buranantoka,29 +buck (pokemon),29 +buchibussei,29 +brown tunic,29 +brown dog,29 +brolo,29 +broken staff,29 +broken goggles,29 +brendan (pokemon) (cosplay),29 +breasts on shoulders,29 +breakers,29 +brave new world,29 +box (hajimeyou654),29 +bookend,29 +bonnie (fnaf),29 +bonjindaaa,29 +bongnom,29 +bon bonne (mega man),29 +bokjumeoni,29 +bob (overwatch),29 +bloodhound (apex legends) (cosplay),29 +blood on mask,29 +blood in water,29 +blocking kiss,29 +blipper,29 +black widow (cosplay),29 +black knife (elden ring),29 +black devil girl,29 +bk mita,29 +bison,29 +biro-budou,29 +bingsang,29 +bifanghuanshi,29 +beyumi,29 +betty boop (character),29 +betty boop,29 +beshiexe,29 +beryl gut,29 +bernie sanders,29 +beri (zankuro),29 +beretta model 38 (girls' frontline),29 +benteja,29 +benjomushi,29 +bench press,29 +beko (beco 1122),29 +beisaku bei,29 +bearclaw,29 +basilisk (monster girl encyclopedia),29 +bartre (fire emblem),29 +bart simpson,29 +baron (baron-eros),29 +bananafish1111,29 +ball toss,29 +baliu,29 +baketsumuri,29 +babyls school uniform,29 +baba yasushi,29 +b-cat,29 +azuma reiji,29 +azuma fubuki,29 +azhang,29 +azanami (pso2),29 +az sainklaus,29 +ayumi (xiwu),29 +ayukisa,29 +ayashimon,29 +ayasaki yukino,29 +ayanokouji pai,29 +ayan 1593817922,29 +ayako (twrlare),29 +axehorn (ssambatea),29 +awakeningdog,29 +awa (awaawa),29 +aura (.hack//),29 +atorie,29 +aticsanir,29 +athria,29 +atelier lilie,29 +atac,29 +asuka (dream c club zero),29 +asu no yozora shoukaihan (vocaloid),29 +asano ruri,29 +asahina yori,29 +asahina yoi,29 +asahina kokomi,29 +asahimachi,29 +asagiri0700,29 +asagi1111,29 +asaba ureshiko,29 +asa (memento),29 +arumat p. thanatos,29 +artina (disgaea),29 +artificial angel (araido kagiri),29 +arthropod,29 +artemisia (pixiv fantasia),29 +arithmetician (fft),29 +ariilha12,29 +arifureta sekai seifuku (vocaloid),29 +aria vancleef,29 +arenoyoni,29 +arcade riven,29 +arasa ol haman-sama,29 +aranagi (arng 4401),29 +araburu kisetsu no otome-domo yo.,29 +aquna,29 +aqua fire,29 +aps (alice-momizi),29 +apricot (flower knight girl),29 +apapo,29 +aozukikawasemi,29 +aosuke,29 +aopiqoo,29 +aoaomzir,29 +ao homura,29 +ao (chung0u0),29 +anna mel,29 +ankuru (ankle art2),29 +animal pose,29 +aniao ya,29 +angelos armas,29 +anemos (elsword),29 +andrea averill,29 +andre roland jean gerard,29 +and uge,29 +analogue: a hate story,29 +anagura mogura,29 +anaglyph,29 +amico,29 +american psycho,29 +ameng (katena1990),29 +ambertwo (pokemon),29 +amatsuba mimina,29 +amatlas,29 +amaterasu (mythology),29 +amartbee,29 +amanoyui,29 +amanogawa subaru,29 +amano saki,29 +amano issui,29 +amamiya kanade,29 +amagiku,29 +amagasaki mikoto,29 +alsea,29 +alphe,29 +alolan diglett,29 +aligula,29 +alicianrone,29 +alice vu,29 +aldharoku,29 +alcohol (coldfront),29 +akutsu,29 +ako (zlzdf),29 +akitsu (davis0527dx),29 +akito (akitotika),29 +akiru (hokuouhuuhu),29 +akiran (r32),29 +akira ry0,29 +akinatsu meguru,29 +akiba nagito,29 +aki sora,29 +akezu,29 +akershus fortress (oshiro project),29 +akayama toshifumi,29 +akashi (welcome to sofmap!) (azur lane),29 +akari (fantasista doll),29 +akapocho,29 +akano yomi,29 +akanboh,29 +akagi mako,29 +aka syakkou,29 +aisha greyrat,29 +air groove (quercus civilis) (umamusume),29 +air defense cruiser princess,29 +aino pekonen,29 +aikurushii (idolmaster),29 +aie,29 +aiai (jsm),29 +agrias-san to love love lesson,29 +ageha,29 +agatha chris q outfit (touhou),29 +agas (vpwt7475),29 +afuu,29 +aftergardens,29 +acco (sh in),29 +acaallodola,29 +aburana (flower knight girl),29 +abel nightroad,29 +abe takaya,29 +aabtcndneefkg,29 +a way out,29 +??? (artist),29 +5ht,29 +416 day,29 +3ping lovers!,29 +33dot,29 +33bun3,29 +299 (horisso),29 +207,29 +2010s (style),29 +1o (ichio),29 +132,29 +111111111 (leiyao),29 +10birthday10,29 +0p (spiriti),29 +0byte,29 +0 ebj,29 +05deruta,29 +.ronde,29 +zxc,28 +zuikaku kai (kancolle),28 +zuikaku (ceremonial crane) (azur lane),28 +zuihou (hechen121),28 +zudarts lee,28 +zoku hitou meguri,28 +zoe (crownsforkings),28 +zkxandy,28 +zima (ursusio79) (arknights),28 +zim-o (2cy520),28 +zhuganchangjiadan,28 +zhoujialin,28 +zhandou greymon,28 +zeus (one piece),28 +zeta (24904340),28 +zest grangeitz,28 +zer0.zer0,28 +zebrina (show by rock!!),28 +zarisu,28 +zack (doa),28 +zabu rou,28 +zabaniyya (fashionista suit) (housamo),28 +yuzuyu (cookie),28 +yuzuyoukan,28 +yuzuriha (pixiv 14248010),28 +yuzure mon,28 +yuzun,28 +yuzukicture,28 +yuzuki (chobits),28 +yuzuha (tengokugumi),28 +yuzu sato,28 +yuuse kouichi,28 +yuurika (gorua),28 +yuuki subaru,28 +yuuki (moon child),28 +yuukagen (poipoipopoino),28 +yuu kikuchi,28 +yuu (masarunomori),28 +yusheng,28 +yurui tuhu,28 +yurikamome8160,28 +yurax-mae,28 +yunamul,28 +yumu (8181018),28 +yumina enclave,28 +yumesuke,28 +yukotaruma,28 +yukiya 0 0,28 +yukitsuki hisa,28 +yukimuro,28 +yukimizu,28 +yukiiri,28 +yuki mashiro,28 +yukadon,28 +yuigaoka music program school uniform,28 +yui (nightflight),28 +yuhka,28 +yu yun,28 +yu yin,28 +yu mei-ren (spare the idle talk) (fate),28 +yu kitsune,28 +yu-ga,28 +yousui,28 +young genji,28 +yotte,28 +yotsura,28 +yoshida saki,28 +yoruko (lily-spring),28 +yorisuke,28 +yonkuron,28 +yoni (zhang),28 +yomusugara (uzo-muzo),28 +yomogi komegura,28 +yomogi (becr),28 +yomiclone,28 +yomena,28 +yoite,28 +yohane yoshiko,28 +yilocity,28 +yidie,28 +yeluno meng,28 +yayoi shiro,28 +yayo325,28 +yatsuashi matomo,28 +yasuhito (yasu no e),28 +yasuda katsunori,28 +yashiro (lockheart),28 +yanase takayuki,28 +yammy (cellblo),28 +yamato (inraitei),28 +yamaoka46,28 +yamaneko,28 +yamamoto enji,28 +yamamoto doujin,28 +yamaki suzu,28 +yamada vanhouten,28 +yamada (ishida to asakura),28 +yakushiji ryouko no kaiki jikenbo,28 +yako mashiro,28 +yakisobaosu,28 +yahoo0124,28 +yagura miketa,28 +yagen toushirou (kiwame),28 +yagen sasami,28 +yadoumaru lisa,28 +yada masumi,28 +yachiyo mei,28 +y udumi,28 +xps,28 +xijian,28 +xianjian lingluan,28 +xhouz,28 +xenogears (mecha),28 +x2,28 +wyldstyle,28 +wuliao555,28 +wuim (mana khemia),28 +wttwj,28 +wss (32656138),28 +wrestling boots,28 +wormmon,28 +womu,28 +wolverine (kemono friends),28 +wjn-rance,28 +winged lion (dungeon meshi),28 +windy (suikoden),28 +william knights,28 +wild girls,28 +wil (fire emblem),28 +wii hola,28 +white garter,28 +welrod,28 +wednesday (starsilver),28 +weapon stand,28 +water enchantress of the temple,28 +water elemental,28 +watchdog rol (y1104280730),28 +watari shinji,28 +watanuki ron,28 +wasp girl,28 +washu junkyu,28 +wariko,28 +walking backwards,28 +wakasagihime (cosplay),28 +wada kenichi,28 +w (w64851564),28 +w.r.b,28 +vyzov tv,28 +vri (tinder box),28 +voltes v (mecha),28 +vogel schwein,28 +vizerothree,28 +vivian (divine gate),28 +virginia glynnberets,28 +viper gt1,28 +viole mai,28 +vinny (promare),28 +vincent van gogh (style),28 +village chief (hentai elf to majime orc),28 +vertical-striped leotard,28 +venus blood -frontier-,28 +vent of the front,28 +uwa (rakko),28 +utsugi sakuko,28 +utsugi lenka,28 +utako,28 +usui horokeu,28 +ushi-oneesan,28 +usfdive,28 +usano,28 +usami shiori,28 +untied swimsuit,28 +unreal night girls,28 +unknown artist of 2ch sakura kyouko thread,28 +un do,28 +ump45 (lonely traveler of eternity) (girls' frontline),28 +umetarou (shujinko kidori),28 +ultraman tiga,28 +ultra guardians uniform,28 +uemura hitoe,28 +uehara doh,28 +uav,28 +tzecheleaf,28 +tyranid,28 +type 61 (gundam),28 +tyourou god,28 +tsurara (pop'n music),28 +tsukkun,28 +tsukigime,28 +tsukebo,28 +tsuji tomoko,28 +tsubakihara ren,28 +true damage qiyana,28 +trouble trap laboratory,28 +tropical camouflage,28 +triceps,28 +train heartnet,28 +toyono435,28 +tower of hanoi,28 +touko (tokotoko),28 +toroi (run01211),28 +torn choker,28 +torikoriko please!!,28 +toriko no shirabe -refrain- chouritsu sareru otome-tachi to onna kyoushi,28 +toono suika,28 +too (totoo),28 +tonokawa,28 +tonnura,28 +tonko,28 +tomino yoshiyuki,28 +tomare (akashingou),28 +tom (remisaku),28 +tokio (okt0w0),28 +tokimiya rem,28 +tokimeki memorial only love,28 +toki reatle,28 +toki (rumukio),28 +toha heavy industries,28 +togashi yumeha,28 +to love-ru darkness 2nd,28 +tm (utfp3372),28 +tkgoishi,28 +tittu,28 +tissue princess,28 +tiny stars,28 +tiequan (last origin),28 +tian dian,28 +thu,28 +throat grab,28 +thomas claire,28 +thealagator,28 +the third,28 +the robots of dawn,28 +the regulars (torikissa!),28 +the legend of zelda: phantom hourglass,28 +the king of fighters '99,28 +tetsu10ru,28 +tetra (log horizon),28 +teruriu,28 +teo (telo ruka),28 +tensai bakabon,28 +tenmas,28 +ten (manga),28 +ten'on (amane09),28 +temakizushi (temakizushisand),28 +telstar 18,28 +teina,28 +teiko (gulp5959),28 +tehryu,28 +teatix,28 +team mystic,28 +tayashitsu,28 +tatsumi,28 +tatsukisan,28 +tatsukichi,28 +tarot set,28 +tareus (girls' frontline),28 +taphris,28 +tanupon,28 +tanuki (siutkm0228),28 +tannkobuiwa,28 +tanimura kaoru,28 +tangdouji (machine),28 +tangamja,28 +tanaka yuusuke,28 +tanaka keiichi,28 +tame (tame-97),28 +tamatabe,28 +tamaki shin'ichirou,28 +tamago soup,28 +tales of the rays,28 +takumi watanuki,28 +takuan (mo55ilst),28 +taku (yakumodaisuki),28 +taki zen'ichi,28 +takeshima satoshi,28 +takekawa shin,28 +takega satsu,28 +takecopter,28 +takebouzu,28 +takayanagi katsuya,28 +takatou sora,28 +takasuga tsukushi,28 +takanashi homare,28 +takamiya mana,28 +takahashi eriko,28 +taimanin asagi zero,28 +tail or,28 +taichi suzuki,28 +tagashira shinobu,28 +taekwon (ragnarok online),28 +tachibana kaoru (toosaka asagi),28 +tachi (tachibana),28 +t-hou,28 +t-90,28 +syutyou,28 +syaofoo,28 +sweeter (h110111),28 +swan white,28 +swallow zzy,28 +svv art,28 +suzunoya,28 +suzunooto shirasu,28 +suzuki rui,28 +suzuki ao,28 +suzuki anzu,28 +suzukaze (lvi),28 +surufuji,28 +surfing pikachu,28 +super sailor mercury (stars),28 +sunya (honorin-yuunibo),28 +sunnypoppy,28 +sunaguma,28 +sumita kazuasa,28 +sumika (smikas),28 +suga natsumi,28 +succubus (lord of vermilion),28 +suarokin,28 +stuffed owl,28 +studio rakkyou,28 +strip hair,28 +streyah,28 +stretched neck,28 +street gutter,28 +storage pot,28 +stellula eques,28 +stax,28 +starky (chrono cross),28 +starhorse pocket,28 +star wars: the phantom menace,28 +star wars: the last jedi,28 +star wars: a new hope,28 +star trek: the next generation,28 +star guardian xayah,28 +star fox 2,28 +stakataka,28 +ssi,28 +sriokorr,28 +sprbouuz,28 +spray poka,28 +spoken emoticon,28 +split depth,28 +sphene (houseki no kuni),28 +spanking self,28 +south dakota (warship girls r),28 +sourenkio,28 +sou (boxxx82),28 +sosogi (qtgejyrkhrng4jk),28 +sorrowny,28 +sore wa,28 +sora hasama,28 +sonoda umi (cosplay),28 +solmoniq,28 +sokabe megumi,28 +soitsu (alb),28 +softhanten,28 +so tsubasa,28 +so-ichi,28 +smelling pantyhose,28 +slope (check238),28 +slime beth,28 +sleepyowl (jobkung15),28 +sleep (isliping),28 +slaanesh,28 +skyscraper (artist),28 +sky focus,28 +skull-shaped pupils,28 +skf,28 +siyusiyu13,28 +sirotuki ito,28 +sirotuka lambda,28 +single mitten,28 +since2019,28 +simon stafsnes andersen,28 +silvy (hiruno),28 +silvia piquet,28 +silver wolf (honkai: star rail),28 +silver chain,28 +sicily (disgaea),28 +shuusaku,28 +shuumatsu ga yattekuru! (vocaloid),28 +shuu (sirokumasabu),28 +shuiyituzi,28 +shui qian he kafei,28 +shu (arc the lad),28 +showtime illusion (idolmaster),28 +shouz,28 +shoumetsu toshi,28 +shoulder patches,28 +shooot108,28 +shokuyou pants,28 +shizuki sayaka,28 +shiu (pika),28 +shiruhino,28 +shirosaki rio,28 +shiroro,28 +shironagasu senpai,28 +shirokuroya,28 +shiro (kemurikusa),28 +shirisensha,28 +shiratsuki shino,28 +shiratori kuu,28 +shiranui kensetsu,28 +shiraniwa rin,28 +shiozaki ibara,28 +shiori (tsuchikure),28 +shion (michiking),28 +shio (shirone),28 +shinno,28 +shinganji kurenai,28 +shingai eri,28 +shinazo,28 +shin ringetsu,28 +shimusu,28 +shimoochiai touka,28 +shimakaze (the white rabbit of wonderland) (azur lane),28 +shima (6land),28 +shikoke (fizintine),28 +shikishiro konomi,28 +shikikagami sanae,28 +shijou raimu,28 +shiiton gakuen school uniform,28 +shiina tsubasa,28 +shiimo,28 +shiiba aki,28 +shigarami kyouma,28 +shidare hotaru (cosplay),28 +shichikaku,28 +shi jun ti,28 +shi-ro,28 +sherry belmont,28 +shea haulia,28 +shangri-la (utopia's collector) (azur lane),28 +shakuyouka,28 +shadow tracker elina,28 +shadow lugia,28 +shadow ball (pokemon),28 +shade (futagohime),28 +sha,28 +seven of hearts,28 +seto (yancha gal),28 +seoltang (nitro sugar),28 +sensei (shepherd0821),28 +senpaihawkkun,28 +senoo arika,28 +senntakuya,28 +senkou no flash,28 +senba (592683801),28 +sekai saikou no ansatsusha isekai kizoku ni tensei suru,28 +sekai de ichiban tsuyoku naritai!,28 +seirei911,28 +seia (tikxxx),28 +seaside sukeban (mg) (blue archive),28 +seafoamboy,28 +scarlett ann,28 +satsu,28 +satomi (n-s freedom),28 +satomatoma,28 +satelyte,28 +satan jacob,28 +sasashigure miyo,28 +sasarekoube,28 +sasamori ryouta,28 +sasamaru (sasamaru),28 +sasahara souhei,28 +sasahara (shou goi),28 +sara (uunyan),28 +saphirya,28 +sanso (kasyawamoti),28 +sano keiko,28 +sanae (jomill04),28 +sanada ririna,28 +samurai (elden ring),28 +samanta,28 +saltypoundcake,28 +sakyuu futoshi,28 +sakurazuki yura,28 +sakuraki riichi,28 +sakurai kouichi,28 +sakurai027,28 +sakurada hikari,28 +sakura cha,28 +sakuma mashiro,28 +sako (oyatutabero),28 +sakichi,28 +sakecho,28 +sailor iron mouse,28 +saiko heart (love live!),28 +saijou hinako,28 +saiga-12 (crimson navigator) (girls' frontline),28 +sageo yn,28 +safe3133,28 +sachou,28 +sachirika,28 +saburouta,28 +saburou 03,28 +s2riridoll,28 +ryuuta (akatukiryoka),28 +ryuudouji shimon no inbou,28 +ryuuama,28 +ryou (effort),28 +ryokosan,28 +ryannari,28 +russia (dangan neko),28 +rure,28 +rumi morimiya,28 +rum,28 +rugal bernstein (cosplay),28 +rubbish selecting squad,28 +rp (necho),28 +rover (animal crossing),28 +roulette roulette,28 +rossa (pixiv27548922),28 +roruri,28 +ropi (yyuichi29),28 +rooster tail,28 +ronen,28 +rommeling,28 +rollermet,28 +rokuichi (bluelamp61),28 +rodin,28 +rococomm123,28 +roco617,28 +rocket engine,28 +robosuke,28 +robert de jesus,28 +rnknmrm,28 +rj (lingshih10),28 +rizel,28 +riza dxun,28 +riyu (gauzama),28 +ritos tortilla,28 +rita mordio (exchange student),28 +ririfa,28 +riri,28 +riretsuto,28 +rinbukyoku,28 +rikkukku,28 +rika (kakera),28 +riiya (akazukin chacha),28 +riffey,28 +rick hunter,28 +rick astley,28 +richmond (azur lane),28 +reyson (fire emblem),28 +retar,28 +rengoku ruka,28 +ren mizuha,28 +remiss (trouble spirit),28 +remimiku,28 +relations sisterxsister,28 +rei (persona q),28 +regis (world flipper),28 +reg (artist),28 +red leggings,28 +realdragon,28 +rdc7229,28 +ray of grace,28 +rare (user vxhu8375),28 +rance vi,28 +rance 01,28 +ranamon,28 +rakko-nabe,28 +rain yoo,28 +raiden (metal gear) (cosplay),28 +raideen (mecha),28 +raide,28 +rafale revive custom ii,28 +radiohead (radio paranoia),28 +rachel (seisou fude no tabibito),28 +r3d,28 +quartz (gemstone),28 +quarter 1404,28 +qqmng,28 +qb,28 +pyytato,28 +pyonsan,28 +pydiyudie,28 +purple armband,28 +protagonist (tokimemo gs4),28 +prnt,28 +print cup,28 +print bandaid,28 +princess aurora,28 +primitive link,28 +pp-91 kedr,28 +pp-19-01,28 +poyamu,28 +porontyo 07,28 +pokasu,28 +pokashi,28 +pocky1202,28 +playstation move,28 +plant wings,28 +plant sex,28 +plain girl (kamisimo 90),28 +piyon (hunter x hunter),28 +pisaro,28 +pipi20211026,28 +pipette,28 +pinky,28 +ping-yi,28 +pikunoma,28 +pierro (genshin impact),28 +pieces / yurikago no canaria,28 +phorni (symphonic rain),28 +philippine flag,28 +philia (sao-alo),28 +phantom blade (game),28 +phantom,28 +phantasy star iv,28 +petra (granblue fantasy),28 +petopetosan,28 +peter pan (adtc7243),28 +peter huu nguyen,28 +perrine h. clostermann (cosplay),28 +perapera,28 +peppa pig (series),28 +pepo (absolute1123),28 +pepelogoo,28 +peach print,28 +paw cutout,28 +paul phoenix,28 +patchoung (aoshima),28 +party time gold (idolmaster),28 +parrying,28 +paraholix,28 +paper towel,28 +paper roll,28 +papas,28 +panzerschreck,28 +pantarou,28 +pandora smith,28 +pandemic14,28 +pandakorya,28 +paku,28 +pakky (bachera),28 +pajamei,28 +painting woman,28 +p ion,28 +ozawa,28 +oyuzaki (ayuzaki),28 +ouija,28 +otoma may,28 +otokonoko heaven,28 +os (kazos),28 +orino yushi,28 +orimoto asami,28 +orietta chrono istarica ginasteele,28 +orange robe,28 +open wetsuit,28 +open in internet explorer,28 +ootori kyouya,28 +ootori akio,28 +oomune binta,28 +onyxia,28 +onoderasan,28 +only sense online,28 +onion pikupiku,28 +onigami mutsumi,28 +onemu,28 +oneko,28 +one smoke,28 +on cloud,28 +omochi kuenai,28 +omix,28 +omega strikers,28 +omega labyrinth,28 +omae no pantsu wa nani iro da!?,28 +okota1869,28 +okome rice,28 +okita juuzou,28 +oki kiki,28 +okayu (deleted),28 +okasira (superkurounmo),28 +okamochi (container),28 +okamired,28 +ojisan f,28 +ointment,28 +oharu (mushibugyou),28 +ogashira hiromi,28 +ogamiya jin,28 +odessa silverberg,28 +octavia,28 +obyaa,28 +obakeart,28 +o-djiko,28 +nzwt,28 +nyungsep,28 +nymph,28 +nyantamaran,28 +nyanko days,28 +nyaasora,28 +nva222,28 +nurumaru yu,28 +nuri kazuya,28 +nuresuke paradise x,28 +nun (mdf an),28 +nuebunny,28 +nudiedoodles,28 +nsio,28 +novelty,28 +noshiro (xanadu's eventide) (azur lane),28 +noriko (ni noriko),28 +nori (arara105),28 +nopo (patter),28 +noora to toki no koubou,28 +nooko,28 +nonosaki akiho,28 +nonamejd,28 +nokonoko,28 +nokogiriotoko,28 +noitama,28 +noir eku,28 +noin (shinrabanshou),28 +nogchaminteu,28 +nodj,28 +nocunoct,28 +noah noah,28 +nkraae,28 +nisson,28 +nishimori misa,28 +nishikikope,28 +nishijou takumi,28 +ninton,28 +nine (fft-0),28 +nil-eve,28 +nikawa 99-do,28 +niji sugi,28 +niigaki hina,28 +nickleflick,28 +nicholas (granblue fantasy),28 +niche (tegami bachi),28 +niboss,28 +nibelart,28 +ni no sakura kouchou (module),28 +nfr,28 +neri gozu,28 +nereid (last origin),28 +nelke von luchetam,28 +nekurokonomi,28 +nekoro,28 +nekodama2000,28 +neko punch (user hddm3373),28 +nekito,28 +negishi hideto,28 +neena hagen,28 +neemui,28 +necrozma (normal),28 +nazono mito,28 +nayuzu,28 +nayuzaki natsumi,28 +nayaase beleguii,28 +nawakena,28 +natsuno (natsunosho),28 +natsume kako,28 +natsume3304,28 +natsuhina,28 +nashida oriha,28 +naruko shoukichi,28 +narukami yuu (cosplay),28 +naruho,28 +naratani,28 +naraku,28 +napolitan,28 +naomasa (horizon),28 +nao suke,28 +nano (cherry line),28 +nannyou dojin,28 +nanao (nanao1023),28 +nanananona,28 +nanamiya,28 +nanamira bi,28 +nanami (fuku),28 +nana muted xilofon,28 +nana-shi hostler,28 +namu (112namu),28 +namiko817,28 +nameplate,28 +nako (nekono shippo75),28 +nakatsuru katsuyoshi,28 +nakano azusa (cosplay),28 +nakada eiji,28 +nai (erumaria),28 +nagase miyako,28 +naco (manacool),28 +nabeniuitagomi,28 +na sera,28 +myon2,28 +my chemical romance,28 +mutsuki masato,28 +mutenka (plfgb),28 +murakami murako,28 +muraji,28 +muneate removed,28 +mumere (9 xa9),28 +muireko,28 +mugityax,28 +mugi (user khzh5853),28 +mudbray,28 +mtmt mtmt,28 +mozu-k,28 +motto! haramase! honoo no oppai chou ero appli gakuen!,28 +mottirimuttiri,28 +motti (motthi0711),28 +mosu2,28 +mosquito coils,28 +mos (mostfunny),28 +morita yukari,28 +mori kaoru,28 +morgrem,28 +moonku,28 +monster monster,28 +monoma neito,28 +monika ellmenreich,28 +monica grace,28 +money bath,28 +mondragon rifle,28 +momonosuke (one piece),28 +momohime (dancing blade),28 +molly (skullgirls),28 +mokei,28 +mojya,28 +mojaranmo,28 +mohato official,28 +mofuruo,28 +moemoepiano,28 +mochizuki usagi,28 +mochizuki hull,28 +mochitsuki usa,28 +mochida yuka,28 +mobile legends: bang bang,28 +mo yu de jiaozi,28 +mo (mokatampe),28 +mmorpg,28 +mmmgnsn,28 +mkt (pixiv15187870),28 +mk23 (new term begins with a meow) (girls' frontline),28 +mizunashi tomo,28 +mizuabi kamome,28 +miyoshino shiki,28 +miyano tomochika,28 +miyamoto sakura ga kawaii dake no shousetsu,28 +miyamoto sakura,28 +miyamoto musashi (vagabond),28 +miyamori raira,28 +miyama09215,28 +miwa shuuji,28 +miwa satori,28 +miura tadahiro,28 +miura kazuko,28 +miura daisenpai,28 +miu (dumbxaela),28 +mittsu,28 +mitsurugi asuka,28 +miss safety,28 +mishima lisa,28 +misaki mizuki,28 +miruro (futagohime),28 +mirk,28 +mirei-yume,28 +mirage precure,28 +miomix,28 +mint (summon night),28 +mint (mintlemonade3),28 +minori (m-noir),28 +mine thrower (ore no bakudan),28 +minazumi kasane,28 +minazuki izumi,28 +minazuki aqua,28 +minatsuki (sitsu),28 +minase sakurako,28 +minase kuuru,28 +minamino karen,28 +minami-kamakura koukou joshi jitensha-bu,28 +mimoe,28 +milk-san,28 +milda (grandia),28 +mila alexander,28 +mika uni,28 +mika (moc828),28 +mijinko (barabadge),28 +mihama hitsuji,28 +migumi,28 +migita,28 +midori no umi,28 +midori (greenwood),28 +midori555,28 +michishio (azur lane),28 +michiko (identity v),28 +miaohik,28 +mi 398,28 +metaru maccha,28 +meta gun,28 +meso (gjmeso),28 +meryl santos,28 +mercedes marten,28 +melty (shining hearts),28 +melon-chan (cosplay),28 +melay (khrssc),28 +megumi cv,28 +megumi 222,28 +megumi-square,28 +megaton musashi,28 +mc-4,28 +mc,28 +mayano top gun (formation: party) (umamusume),28 +mausratten,28 +maud0239,28 +matsumotoasumu,28 +matsumoto katsuya,28 +matsuko (kazu-koto),28 +matsukan (dug pile),28 +matoma,28 +mate rin,28 +matatabi nia,28 +matatabi kikuno,28 +mata,28 +masurao bc,28 +massugu go,28 +mashilemo,28 +masatome,28 +masami-san (regdic),28 +marvelousaql (neptune series),28 +maruno,28 +maruboku,28 +markus (gyee),28 +mark sein,28 +marjoly,28 +marionette (ragnarok online),28 +mario wibisono,28 +marii pily,28 +mariel (wild arms),28 +marie mushroom,28 +marie (onegai teacher),28 +maria torres,28 +mari (rodoney-kiara),28 +mare6ns,28 +mare's leg,28 +march (trusty bell),28 +marcel galliard,28 +maquia (sayoasa),28 +map (map imas),28 +maomaozi,28 +manto (inazuma eleven),28 +manticore (girls' frontline),28 +mannish boy,28 +manji (nanakirio),28 +mani of machine,28 +manatsu daichi,28 +mamt56,28 +mamoi,28 +mami (hidamari sketch),28 +mali,28 +makoto (blue archive),28 +makokb,28 +makkuro rokusuke,28 +makino,28 +makimura minami,28 +make maketan,28 +makai tenshi djibril 3,28 +majesty (dungeon and fighter),28 +maintenance,28 +mai jin,28 +mai (maittingu),28 +mahou tsukai no hako,28 +mahou shoujo western girls,28 +mahou shoujo tai arusu,28 +magpul fmg-9,28 +mages.,28 +mageres,28 +mafuyun,28 +mafuri,28 +mafti nabiyu erin,28 +mafia (holostars),28 +made in heaven (stand),28 +macne coco,28 +machida ayaka,28 +machi (machi333),28 +macchou (tsubonekoya),28 +mabera,28 +maare (moyori),28 +maaranto,28 +ma2 ereki,28 +m-hit,28 +m-18,28 +lyiet,28 +luxurious sea-lord,28 +luvriel,28 +lure ball,28 +lunar eclipse,28 +luigi64,28 +lucille ernella,28 +lucifer (monster strike),28 +lua klein,28 +lovelyme,28 +love marginal,28 +loose skirt,28 +lolo (vtuber),28 +lolitaii,28 +lolinnez.,28 +logicon,28 +locked,28 +localized gravity,28 +llama8,28 +little twin stars,28 +littiecy,28 +lin lin (one-punch man),28 +lilya kyomi (fiwivt),28 +lilith bristol,28 +lieze aria,28 +liedein,28 +libra (fire emblem),28 +liang chan xing make tu,28 +li luo,28 +leung lik hang,28 +leonidas sun,28 +lennys,28 +lemonade kokoi,28 +leilah (ragnarok online),28 +leele,28 +leather chair,28 +layna scentriver,28 +last man battalion,28 +large head wings,28 +large forehead,28 +large cross,28 +lancer servant,28 +lady and the tramp,28 +lace bikini,28 +l!sten,28 +kz (kazuma-rising),28 +kyu sugardust,28 +kyoudaidon (sex),28 +kyoku hakaimado,28 +kyle marlon,28 +kyko,28 +kyarage (soda),28 +kuuron (moesann17),28 +kusuriuri (dark),28 +kusunoki tomoe,28 +kururu (koisi122),28 +kurukuruchocolate,28 +kuropani cos,28 +kurokimono001,28 +kuroki francisca yuria,28 +kuroi ginko,28 +kurogane shizuku,28 +kurogane naoto (churushiko),28 +kuroeart,28 +kurita (kuritanatsu),28 +kurisu takumi,28 +kurioshi fio,28 +kurashiki nanka,28 +kurashiki (kas0),28 +kumanoi (nichols),28 +kumagai chisato,28 +kukuri (ahagon),28 +kujou miu,28 +kujou fumi,28 +kuiqiu kq,28 +kuchifusa yogiri,28 +krtmtm,28 +kris (fire emblem) (male),28 +kozsen 810290,28 +kozakura (urasekai picnic),28 +koyomi (masayo),28 +koutetunamekuji,28 +kousaka daisuke,28 +kourou (kouroukun),28 +kouga (hipporit),28 +koube masahiko,28 +kotuzui yositune,28 +kotori (gokigen iori),28 +kotomine kirei (sensha otoko),28 +kotomickey,28 +koss,28 +korigitsune,28 +korg triton,28 +koougi,28 +kooten bergh no youhei,28 +koopa fortuna,28 +konosaka kirino,28 +konohana suzuka,28 +konoe,28 +konishi naoki,28 +kongou (warship girls r),28 +komupi,28 +komoreg,28 +komora (huran0729),28 +kokodayo,28 +koi (nisikicoi),28 +kohinata (sdu0628),28 +koebushi (mat),28 +kochou shinobu (cosplay),28 +kobiyuun,28 +ko-yan,28 +knsei,28 +knot gag,28 +knbd,28 +knb (nicoseiga53198051),28 +kmkr,28 +klang,28 +kiui,28 +kitty cat katarina,28 +kitakaze higashikaze,28 +kitagawa onitarou,28 +kitagawa mikio,28 +kishou seireiki,28 +kisaragi eiji,28 +kiritachi,28 +kirishima hijiri,28 +kirishiki sunako,28 +kirigiri kyouko (cosplay),28 +kiri (0218htt),28 +kirby air ride,28 +kio sayuki,28 +kintarou,28 +kinos (kw00789),28 +kinokino,28 +kinohe,28 +kings canyon,28 +kingin shishou,28 +kingbawoon,28 +king clawthorne,28 +killer whale (kemono friends) (stylecase),28 +kikuri yuki,28 +kikuchi moa,28 +kihou kanshouzai,28 +kiduki kaya,28 +kicham,28 +kibstar,28 +kibagami genjuro,28 +ki-84 hayate,28 +khrnnfz,28 +kgctcg,28 +kezu,28 +key trash,28 +ketsui no hikari,28 +ketchup ninja,28 +kekel,28 +keke (kotowari),28 +keito4f,28 +keigen hichou,28 +keifuku (tatsuki),28 +kazu (k no kobeya),28 +kay (girls und panzer) (cosplay),28 +kawashima yaruki,28 +kawasemi (pocorit),28 +kawamoto satsuki,28 +kawa takatoshi,28 +katyusha's mother (girls und panzer),28 +katsuragi takuto,28 +katagiri mai,28 +kasumi (magical girl) (princess connect!),28 +kasugazaki yukino,28 +kasshoku-chan (katuo1129),28 +kassan (kassan 5a),28 +karinka,28 +karin (princess connect!),28 +karate shoukoushi kohinata minoru,28 +kaosu (silverworld),28 +kanon (sennen sensou aigis),28 +kanobitch,28 +kannagi yuuma,28 +kanitama,28 +kani (kkk kani),28 +kaneda tamago,28 +kanata mako,28 +kanae arisu,28 +kanadome miyako,28 +kamiya ueshi,28 +kamina koharu,28 +kamen rider gatack,28 +kamen rider chaser,28 +kamen rider beast,28 +kamen rider amazon omega,28 +kalun (fwme3378),28 +kaku (walletbreaker),28 +kaka (kirby126),28 +kaitou sentai lupinranger vs. keisatsu sentai patranger,28 +kaisoku hirosuko,28 +kaiki (osuppai),28 +kaicchi,28 +kai (dorohedoro),28 +kai-o,28 +kaguyano,28 +kagi (dicedkey),28 +kage no utage,28 +kagami shiori,28 +kadokura (golden kamuy),28 +kachi (kachi5100),28 +kac-pdw,28 +k31 (girls' frontline),28 +k.sho,28 +k.k.,28 +k-me,28 +juvecross,28 +junkos,28 +junko day,28 +juffles,28 +judith (barely-there black bikini) (tales),28 +juder,28 +jubilee,28 +js05 (girls' frontline),28 +jouyama yui,28 +jonathan kent,28 +john r,28 +joe (megalo box),28 +joe (crusher joe),28 +jjuha6,28 +jiyasu,28 +jimiko (yamasuta),28 +jiguang zhi aoluola,28 +jigsaw (character),28 +jigoku-san,28 +jiang xin,28 +ji ruxue (hua jianghu),28 +jewel (suikoden),28 +jessica (clivia) (arknights),28 +jenie,28 +jeje (pixiv12541114),28 +jeanne francaix,28 +jeanne d'arc alter (swimsuit berserker) (fate) (cosplay),28 +janus zeal,28 +jamesmikopi,28 +jacquelin de leon,28 +jack (jacknoeaka),28 +jabulani,28 +iyumekai,28 +ixion saga dt,28 +iwakiyamayukisatoshironanogojuurokushi akira,28 +iwa (iwa000ima),28 +ivolay,28 +ivlis,28 +itsuku,28 +itou yoshiaki,28 +itoda (spica),28 +itakurakou1993,28 +itai,28 +itaba yumi,28 +isshii13,28 +israeli flag,28 +isoroku (gojuuroku),28 +isobe,28 +ishikawa yui,28 +ishikawa purin,28 +"isekai izakaya ""nobu""",28 +irokawa ruki,28 +iroiro,28 +iroha (ff11),28 +iroai midodo,28 +iris (neco),28 +ippo tsuko002,28 +ipeulo,28 +ioruko,28 +iori 4kagetsu,28 +iop,28 +invidiata,28 +inuhiko,28 +inu (mezonsidepark),28 +instruction manual,28 +inplick,28 +inosia,28 +indiana jones (series),28 +inasaba,28 +inaba masao,28 +imuzi,28 +impforhire,28 +impasto,28 +imijikumo36,28 +imari,28 +imai takahiro,28 +ilia silvestri,28 +ikuya@,28 +ikusabe lu,28 +ikki (amnesia),28 +ikishima midari,28 +ikari warriors,28 +iii (yu-gi-oh!),28 +iduhara,28 +idol wars z,28 +ico (engawa roman),28 +ichika (quaternionxxx),28 +i-tsd,28 +hyugakomati,28 +hyafumi,28 +hutaba haru,28 +hussar,28 +husky (artist),28 +hume (artist),28 +huge 0330,28 +hsxxx,28 +hozuka (kadokawa),28 +houmornin,28 +hoshino reiji,28 +hoshino asuka,28 +hoshino aki,28 +hoshino (illyasviel),28 +horumu (norubahu),28 +horocca,28 +hongchajun,28 +hong xiu ying,28 +honeyworks,28 +honduran white bat (kemono friends),28 +honda (obon),28 +homura chika,28 +homocacti,28 +hokuto no ken shinpan no sousousei kengou retsuden,28 +hogehoge0710,28 +hn (honyori ta),28 +hiyori (higanahannnti),28 +hiura r,28 +hitoiki,28 +hisuian voltorb,28 +hisagi shuuhei,28 +hisaba iori,28 +hirono (hxze4434),28 +hiraoka koushi,28 +hirai hisashi,28 +hio (hiohio0306),28 +hino kuu,28 +hinduism,28 +hina (akchu),28 +himukai rin,28 +himegami shino,28 +himasen,28 +himaro,28 +hima (nichikan),28 +hijiri rue,28 +highway star (stand),28 +higemorigen,28 +higashikata jobin,28 +hiera12,28 +hiei (moonlit cruise) (azur lane),28 +hiei (kancolle) (cosplay),28 +hibino mina,28 +hibachi (dodonpachi),28 +hey (bluebell),28 +hero shot (splatoon 2),28 +hero cantare,28 +heppokokun,28 +henjo ~hen na joshi kousei amaguri senko~,28 +helmet (trailblazer003),28 +hello hello world!,28 +hekonda kan,28 +heiyuen,28 +heifetz,28 +hei kuang jun,28 +hedge trimmer,28 +hecktop,28 +hearts of iron,28 +hdoom,28 +hazuna rio,28 +hayapi,28 +hayami rinka,28 +hayami aki,28 +hatsuru 826,28 +hashimoto (yanagi-momo),28 +hashiba natsumi (animare),28 +hasegawa (hase popopo),28 +hasamimushi,28 +haruto (nyannzou789),28 +haruno (macoro),28 +harunatsu akifumi,28 +haruhana aya,28 +harudori tsugumi,28 +harstfazn,28 +haribote elegy,28 +harami,28 +hao (udon),28 +hanson (nadia),28 +hansel (grimm),28 +hanr10,28 +hanekawa tsubasa (cosplay),28 +hand truck,28 +hand on ankle,28 +hanamakura,28 +hamster tail,28 +hamiko (hakogardenmiko),28 +halfmoe,28 +hakoiri musume ~muku na shoujo wa shiroku somaru~,28 +hakisou,28 +hair bun maid (mdf an),28 +hair behind eyewear,28 +haikuro,28 +haibara you,28 +hagino chiyoko,28 +haegiwa gonbee,28 +hachijou tooya,28 +h28,28 +gyokudama (niku),28 +guu (hakahonoo),28 +gusuku luna,28 +gust-san,28 +guozimiao,28 +gundam wing dual story: g-unit,28 +gun decal,28 +guiyu (nocaudal),28 +guilty princess,28 +gucce222,28 +guardian cross,28 +guan tang baozi,28 +ground pound,28 +greyy (arknights),28 +grey (mega man),28 +greenwood,28 +green green,28 +greater dog,28 +great teacher onizuka,28 +great kichi,28 +great grail,28 +grateful shell collector,28 +grani (miraculous moment) (arknights),28 +goushu,28 +gore screaming show,28 +good twins day,28 +golion (mecha),28 +golden gate bridge,28 +godrick the grafted,28 +god hunter,28 +goatwillow,28 +go to kozukuri,28 +glass floor,28 +gladiolus,28 +gisuka yan,28 +gisarme,28 +ginyasama,28 +gimme2000,28 +gillian (va-11 hall-a),28 +giko,28 +gigginox (armor),28 +gigantamax alcremie,28 +ghangaji,28 +gevjon,28 +get down (meme),28 +gerumaga,28 +gerik (fire emblem),28 +gensei ruri,28 +gemini (vocaloid),28 +gelgoog s char custom,28 +gel (guyver123),28 +gattame,28 +gashadokuro,28 +garmr (housamo),28 +galactic nova,28 +gaius (shadow of the colossus),28 +gaien (jin morisono),28 +gagumber,28 +gado-boa,28 +gabriel pavani,28 +gaa (butsugen),28 +g7 scout,28 +fysc,28 +fuyuhi tsukika,28 +fuxiyu,28 +fuwasn1545,28 +fuuka (toy jump),28 +fuufu ijou koibito miman.,28 +fuu (koneko no yomeiri),28 +future knight,28 +futami (mg42fw190d),28 +futaba no taiko,28 +futaba lili ramses,28 +fusou (meta) (azur lane),28 +fury (movie),28 +furihata ai,28 +furball,28 +fuoore (fore0042),28 +fundoshi day,28 +fumuna,28 +fumatake,28 +fukai ao,28 +fujiwara kaoruko,28 +fujitsuna,28 +fujisee,28 +fujimoto (ponyo),28 +fujimi yomi,28 +fujimi nao,28 +fujii daisei (artist),28 +fuiyu (feuille0818),28 +fubuki (senran kagura),28 +fruit hat,28 +frozen-sad,28 +frenchthenhen,28 +freesia (granblue fantasy),28 +framboosi,28 +fox husband (doitsuken),28 +for the better right? (meme),28 +for-u,28 +food fight,28 +food-themed necklace,28 +food-themed hat,28 +foo fighters (stand),28 +follett (deathsmiles),28 +folko,28 +flyers,28 +flow (splatoon),28 +flora (rariatto),28 +fishkitty,28 +fish hood,28 +firepower,28 +fire flower (vocaloid),28 +fiona gilman,28 +felox08,28 +felius arwin,28 +feleven,28 +faust (makai shin trillion),28 +family tree,28 +fallout (black torch),28 +falangies,28 +fake sleeping,28 +fajar kurniawan,28 +failnaught (fate),28 +f con,28 +extra nipples,28 +exciting animal (love live!),28 +excalibur face,28 +evra von,28 +ever (nann2013),28 +evelynn (league of legends) (cosplay),28 +eve moonlit,28 +eunyoo,28 +eumme tongtong gu-i,28 +eternita,28 +espio the chameleon,28 +escape ad,28 +error (errorless),28 +eric ueda,28 +eply,28 +ep (emio parn),28 +eosinophil (hataraku saibou),28 +enki (fate/prototype),28 +enki (dragonfire),28 +enjelicious,28 +engineer kim,28 +engine sentai go-onger,28 +enemy uchigatana,28 +enderspain,28 +ender dragon,28 +emu ichigo,28 +empty (mn3k yo),28 +emmikn,28 +emile elanos,28 +emi star,28 +em crazy,28 +elran,28 +elliot march,28 +elleco,28 +elizax9x,28 +elid (girls' frontline),28 +elhddmois,28 +electra (xenoblade),28 +elbing (the throne of misfortune) (azur lane),28 +eksistere kyrenia,28 +ejection,28 +eishin flash (collect chocolatier) (umamusume),28 +eiko (tukino),28 +eichikei,28 +ehohin,28 +edward keddy,28 +eden's ritter grenze,28 +ebifryman,28 +eaves,28 +eastern and little nature deity,28 +dynamo heart,28 +dvd player,28 +duo kawa,28 +dunk tank,28 +duel love,28 +drive shot,28 +dread,28 +dragra,28 +dragon ball z dokkan battle,28 +dracozolt,28 +dr. gero (dragon ball),28 +doyou tengoku pikaraji,28 +download link,28 +double facepalm,28 +dot (poni taoyaka),28 +dorshe,28 +doraemon: nobita to tetsujin heidan,28 +doomie1,28 +donguri big,28 +donburimeshi,28 +dog hate burger,28 +dodecagram,28 +doala,28 +do2mi doreimi,28 +dna man (arms),28 +dktaka,28 +diving mask around neck,28 +disc,28 +diode (0 division),28 +dino (blend s),28 +digiegg,28 +diao (nrays),28 +diablo (isekai maou),28 +devy,28 +devastator (transformers),28 +desuno,28 +despuntater,28 +designer ojisan,28 +densha otoko,28 +delux drawings,28 +delmore,28 +deliciousmeatart,28 +deko (kamemaru),28 +debi,28 +dealesis,28 +day mello,28 +dawy,28 +dave strider,28 +daroon5,28 +dark talker,28 +dark shadow,28 +dark lemonade,28 +dark fuu,28 +dantahi01,28 +dande cat,28 +dancing blade,28 +dal (edalnem),28 +daiwa (daicon),28 +daisy mae (animal crossing),28 +daisy (flower knight girl),28 +daisufuumi,28 +daimon gorou,28 +daigo,28 +dachshund (kemono friends) (nyifu),28 +daccubus (pen),28 +daburoku,28 +dabu (dabuchan),28 +d.j (dwcg2854),28 +cylinder,28 +cut bangs,28 +cure sunshine (super silhouette),28 +cupcake-chan,28 +cundodeviant,28 +cummerbund,28 +cumdrip through panties,28 +cst,28 +crystal beast ruby carbuncle,28 +crr001,28 +crown hat,28 +crotchless shorts,28 +crossbone gundam x-2,28 +croire,28 +crochet,28 +crested hair,28 +craven (azur lane),28 +covers (kill la kill),28 +cool4noodle,28 +cookie (ppyf5328),28 +continue,28 +conception: ore no kodomo wo undekure!,28 +comrade stalin,28 +comiket 98,28 +comiccho,28 +comic sigma,28 +coma (light825),28 +color bullets,28 +colin tan,28 +codename47,28 +cocoa bean,28 +coccix,28 +cocaine,28 +clov3r,28 +cloak lift,28 +clive (fire emblem),28 +climaxmukr,28 +clevelad (azur lane),28 +ciyana,28 +circle ed,28 +cigarette candy,28 +chyraliss,28 +chuchumy,28 +chrome (mon-musu quest!),28 +chouno maika,28 +choukaku,28 +chizuko (chiduk0),28 +chiyomi,28 +chiyo (shuten dj),28 +chiyo (chidori),28 +chitose (zenkou),28 +chise (cym23730),28 +chisato madison,28 +chiro (suzuka98),28 +chiro (chi-bu-ko),28 +chinese gundam,28 +chinatsu (kuroonehalf),28 +chillasan,28 +child of light (game),28 +chikuyama,28 +chikinman,28 +chikinan tarou,28 +chietori,28 +chicken wing,28 +chicken (kemono friends),28 +chibiki,28 +chaya mago,28 +chascoby,28 +charon (alchemy stars),28 +charisuke,28 +charge rifle,28 +charcoal,28 +character badge,28 +chane (nap cat),28 +chai mao,28 +chagama (pot),28 +cha-cha,28 +cero,28 +cellophane,28 +celistia ralgris,28 +cecil (wing r),28 +cathayan,28 +cat penis,28 +castlevania: lament of innocence,28 +castlevania: harmony of dissonance,28 +castform (snowy),28 +cassie (paladins),28 +casserole,28 +cartolaio,28 +carrotsprout,28 +carnival mask,28 +carabiniere (azur lane),28 +capitano (genshin impact),28 +canon (nyori),28 +canas (fire emblem),28 +cai,28 +cage unlimited,28 +c (pixiv3064042),28 +c4,28 +buyong22,28 +bunny black,28 +bulma (future),28 +bukkan,28 +buisen,28 +buffet,28 +buccaneer (fma),28 +bryony (pokemon),28 +brother tomita,28 +breakrabbit,28 +brave princess (ishiyumi),28 +boston dynamics,28 +borzoi,28 +bokkori,28 +bluetheater,28 +bluebreed,28 +bluebird (bluebird90),28 +blue haired girl (kamisimo 90),28 +blue (saga frontier),28 +bloody panther (last origin),28 +blood the last vampire,28 +bliss (image),28 +blacksteel worldwide (arknights),28 +black negligee,28 +black angel (elona),28 +bipup-hola,28 +bingbing,28 +billy (peach momozen),28 +bills,28 +bikininja,28 +big order,28 +beniko08,28 +benienma (third ascension) (fate),28 +benevole,28 +belted skirt,28 +bekko,28 +beastlord (sword),28 +be nantoka,28 +bbjj 927,28 +batako,28 +bassdrum,28 +baron nashor,28 +baritone (suite precure),28 +bard (league of legends),28 +baphomet jr,28 +bambi nano,28 +bad idea,28 +backscratcher,28 +b.va (overwatch),28 +azumi (madogiwa bocchi seki),28 +azuma ren,28 +azukiman,28 +azuki kurenai,28 +azazel (shingeki no bahamut),28 +ayumi (as0206),28 +ayase shichikai,28 +ayame (norie11),28 +awara chikako,28 +average,28 +aurore (takatou sora),28 +atsumi haru,28 +atobe keigo,28 +asuna elmarit,28 +asso,28 +asou renji,28 +ashina isshin,28 +ashikaga tamane,28 +aselia bluespirit,28 +asanuma katsuaki,28 +asano (shikisokuzekuu),28 +asagi (seal47),28 +arya stark,28 +arya-aiedail,28 +aroha (aroha390),28 +arka91,28 +arima yuu,28 +ariel (kagemusha),28 +ariakk,28 +argrim,28 +argon (exys),28 +areishia spirit academy uniform,28 +areazero,28 +arcedo,28 +arcbun,28 +arcafterdark,28 +arc the lad twilight of the spirits,28 +aratakosu (tako's),28 +aratake,28 +arare (op ed 000),28 +araragi yuuichi,28 +arane (lolitwin),28 +ar (maeus),28 +apupu,28 +aozora (syun8823),28 +aotan (aorin114),28 +aoinu (shuumatsugeki),28 +aohigeko,28 +aoba (warship girls r),28 +ao orin ringo,28 +anonymous (nijisanji),28 +annerica,28 +ankoku no ojisan,28 +angel lily,28 +anemia kwus,28 +anastasia (pixiv fantasia),28 +anaconda,28 +amenomori howa,28 +amefurashi,28 +ame246,28 +ambitious elf jinx,28 +amane0213,28 +amamoru 21,28 +amami reiko,28 +amakasu hisone,28 +amai shirou,28 +amagi daichi,28 +ama ane,28 +am (star wars),28 +alternate headgear,28 +alolan raticate,28 +allison (summer lesson),28 +allergy,28 +algodoo,28 +albus dumbledore,28 +akuyuu (akuyuworld),28 +ako (so crazy!?),28 +akkirarara,28 +akkii (meragold),28 +akk1,28 +akiya yukie,28 +akiko-sou,28 +akie (44265104),28 +aki yamane,28 +aki minoriko (cosplay),28 +aki (o2x x2o),28 +akeno06,28 +akebi miso,28 +akatoki 2! -tsumugu mahou to koboreru hikari-,28 +akasuga moyashi,28 +akasakak,28 +akasaka ryuunosuke,28 +akari (angel) (princess connect!),28 +ajrtkf44,28 +ajirou,28 +ajapar,28 +aizaki (aizkaizk),28 +aion (show by rock!!),28 +aikagi,28 +aiee,28 +aibeya,28 +ai (kaminai),28 +ahoyhoi,28 +ah zhong (mountain han),28 +agent 7,28 +agatio (golden sun),28 +agata no michi,28 +affliction (darkest dungeon),28 +af (afloatisland),28 +abysswolf,28 +abitu,28 +abdominal stretch,28 +aabitan,28 +a iri a,28 +a-iueo,28 +a-by,28 +a-545 (girls' frontline),28 +9ml,28 +92kuni92,28 +90mm single high-angle gun mount,28 +88 (vashperado),28 +7meill,28 +7eddy,28 +666haorare666,28 +60 (klioo1),28 +50k v3,28 +502nd joint fighter wing (emblem),28 +36shiri,28 +369-chan,28 +30 minutes missions,28 +2k-kun,28 +1mm (norizo),28 +0nodera,28 +07touka25,28 +zuoyou,27 +zou azarashi,27 +zorzero,27 +zombie neko,27 +zo-wa,27 +zhuge kongming (paripi koumei),27 +zhoumo fangjia,27 +zhang chunhua,27 +zhan ji tian xia,27 +zeta (vtuber),27 +zerorespect bot,27 +zenryoku batankyuu,27 +zenmai,27 +zenisu,27 +zeabolos,27 +zdl xiaobai,27 +zappa,27 +zangaku,27 +zagizagi,27 +zabuton (mgdw5574),27 +z-s-e,27 +z-ki,27 +z'gok char custom,27 +yyukke,27 +yuzutouhu ika,27 +yuzukiaz,27 +yuzuki kotona,27 +yuzuki kisa,27 +yuzuki iori,27 +yuzu juncgr,27 +yuyayuyo,27 +yuuto (chakokin),27 +yuutii,27 +yuusha yoshihiko to maou no shiro,27 +yuusha exkaiser,27 +yuuki uyu,27 +yuuki sawano,27 +yuuki (yuuk yume),27 +yuubokumin,27 +yuubari gogo,27 +yuteke key,27 +yuta (kchimuuuuu),27 +yusa tk74,27 +yusa makoto,27 +yusa1019,27 +yunico,27 +yumina elnea belfast,27 +yuma,27 +yukito mayumi,27 +yukishiro haku,27 +yukisaki mayui,27 +yukinohito (koutyanomitai),27 +yukiko (leovioykk),27 +yukihashi,27 +yukichi nya,27 +yuki (white garden),27 +yuki (idolish 7),27 +yui (josou jinja),27 +yuh 7929,27 +yu1,27 +ys iii wanderers of ys,27 +yow,27 +youryokuso (chlorophyll),27 +youngsok,27 +youmu (tomgoku2),27 +yosshy,27 +yoshitomo (koucha),27 +yoshinon (yoshinon kotori),27 +yoshinaga-san'chi no gargoyle,27 +yorutsuki (sakurekichan),27 +yoru (yoruyonaka),27 +yorck (breaker under the blood moon) (azur lane),27 +yoneya yousuke,27 +yone (league of legends),27 +yolang,27 +yokozawa (pyu-tohuku),27 +yokojima kemomi mi chuushin,27 +yogiri (hololive),27 +yilan,27 +yeyong,27 +yeng-hua,27 +yelansu,27 +yelan (genshin impact) (cosplay),27 +ye olde zipangese,27 +yarakuru,27 +yaoshan shi,27 +yano akane,27 +yamori 511,27 +yaminokisan,27 +yamiarisu,27 +yami freyja,27 +yamazaki jun,27 +yamasaki masato,27 +yamagara,27 +yamada akihiro,27 +yamada (onigori105),27 +yamada (iroha97151188),27 +yamachi (xadp7533),27 +yama (rabbit room),27 +yam spectrum,27 +yam (nekobeya),27 +yakushiji ryouko,27 +yakuoyoso,27 +yagi norihiro,27 +yagami tsurugi,27 +xun yu (1184527191),27 +xtransceiver,27 +xochi (nueeen6978),27 +xila qian tang shi,27 +xieyanbbb,27 +xiebaowang,27 +xiaoai,27 +xiao qiang sang,27 +xiao dao jun,27 +xander (spring) (fire emblem),27 +wu dong qian kun,27 +wrys (fire emblem),27 +wrecking ball,27 +wrapping paper,27 +woshihedawei,27 +woruta (soloistlist),27 +wonkrin,27 +wjstpwls4,27 +wireless mouse,27 +wiping hands,27 +winged sword,27 +willow sage0000,27 +wicker furniture,27 +whitek,27 +white sweatshirt,27 +white facial hair,27 +white dove,27 +wei,27 +webley-fosbery automatic revolver,27 +waya,27 +water in navel,27 +watari (hasumi rina),27 +watanabe akari,27 +watabow,27 +war devil (chainsaw man),27 +wandaba style,27 +walpurgisnacht's familiars,27 +wagaya no liliana-san,27 +wada katsu,27 +vox aura,27 +voltron (mecha),27 +vivid (key),27 +vitruvian man,27 +virtua fighter 5: final showdown,27 +viola (flower knight girl),27 +vioka,27 +vhdtyzusixc7fai,27 +vf-31c,27 +verniy (warship girls r),27 +venus syndrome (idolmaster),27 +venosus,27 +velcro,27 +veil (disney),27 +veffidas feaze,27 +variasii,27 +vanzan,27 +vanguard (warship girls r),27 +vampire (aoki hagane no arpeggio),27 +vamp!,27 +uzubilla,27 +uuruung,27 +utsumi erice (swimsuit avenger) (first ascension),27 +uss wisconsin (bb-64),27 +uss lexington (cv-2),27 +ushi (genshin impact),27 +usa-pom,27 +urushizawa takayuki,27 +uro (uro zi),27 +urinary drainage bag,27 +ur-8,27 +unkmochi,27 +unicorn (azur lane) (cosplay),27 +unichiri,27 +undead unluck,27 +unconventional broom,27 +unbeller,27 +umidemi,27 +umeyuki,27 +ume (flower knight girl),27 +umbral knight (ender lilies),27 +umakoshi yoshihiko,27 +ultimate girl,27 +ukon,27 +uguu~,27 +uezato ryouhei,27 +uep,27 +uehara (higanbachi),27 +ueda hashigo,27 +uchuu teiou,27 +uchida tamaki,27 +u-511 (kancolle) (cosplay),27 +tyson tan,27 +type 88 (girls' frontline),27 +type 63 (girls' frontline),27 +twin angels,27 +twin angel break,27 +twilimi,27 +turizao,27 +tukimisou0225,27 +tuba-kun,27 +ttutto,27 +ttnap,27 +tsuwabuki masaharu,27 +tsuru (tsubasa1993621),27 +tsumugiya ururu,27 +tsukudani shirou,27 +tsukiyo rei,27 +tsukiyama shinobu,27 +tsukishiro hikari,27 +tsukimi (shironeko project),27 +tsuki ni yorisou otome no sahou 2,27 +tsujita daisuke,27 +tsuchiya ai,27 +tsuchigumo (youkai watch),27 +tsu da,27 +trunchbull,27 +truffle,27 +trophy head,27 +troncill,27 +trip (dramatical murder),27 +trilby,27 +trembling legs,27 +trapeze,27 +transparent bow,27 +training wheels,27 +tozaki makoto,27 +toys (pixiv),27 +toy soldier,27 +towne,27 +touyama (t3yama2),27 +tounyu melon,27 +totopepe888,27 +toto (flip flappers),27 +total war,27 +total9,27 +toron,27 +toripuru (tripl3),27 +torichiyo,27 +tori (torashimaneko),27 +tora-oneesan,27 +toot,27 +tony (chikaku kabin),27 +tonde buurin,27 +tomoyami,27 +tomoe (criminal girls),27 +tomine kasumi,27 +tomato hair ornament,27 +toma (dragon ball),27 +tokiko (psychopomp),27 +toine hoko,27 +toge (owannun),27 +tofucakes,27 +todoroki gou,27 +toda eulalia kotohi,27 +toa510,27 +tnt77,27 +tinker bell (disney) (cosplay),27 +timoria (elsword),27 +tikal the echidna,27 +tico,27 +thylacine (kemono friends),27 +thunderbird (monster girl encyclopedia),27 +thoth (stand),27 +the silence of the lambs,27 +the saga of larten crepsley,27 +the legend of zelda (cd-i),27 +the king of fighters '98,27 +the cecile (vtuber),27 +tess tesryon,27 +terayamaden,27 +tentenyakan,27 +tenshou akira,27 +tennis skirt,27 +tenma mitsuru,27 +tenkawa maihime,27 +tengirl,27 +ten ten (ogino atsuki),27 +ten (ch),27 +temk,27 +teeri (koakuma teeri to kyuuseishu!?),27 +teddy (pui pui molcar),27 +tear ring saga: berwick saga,27 +tea sly,27 +taturouxs,27 +tatsuki (irodori),27 +tateyama ayaka,27 +tatarigoroshi-hen,27 +tatami san tatami,27 +taro. (tataroro 1),27 +tarn,27 +tarako jun,27 +tap out,27 +taoer.,27 +tampon string,27 +tameiki,27 +tamaki sakura,27 +tamae (jungetsu fukou),27 +tama nya,27 +takuan (taku1219oekaki),27 +takoyaki kenken,27 +takeda shingen (sengoku collection),27 +take tw01,27 +takayama kate,27 +takatoo erika,27 +takashino (noni-nani),27 +takamachi nanoha (formula ii),27 +takakura ken (dandadan),27 +takahashi mei,27 +taishang laojun,27 +taiga joe,27 +tai (pixiv6134),27 +taguchi (igutiguti),27 +tadano comina,27 +tactical trooper (elsword),27 +tachibana mayumi,27 +tachibana-san-chi no dansei jijou,27 +taboo tattoo,27 +ta ki,27 +t0da,27 +szainx,27 +syubare,27 +sword guard stance,27 +swinery,27 +sw (2311550438),27 +sv-51,27 +suzuki ichirou,27 +suzuki aika,27 +suzukaze no melt,27 +suzuhime,27 +suzhi2333,27 +sutetete,27 +supreme king (yu-gi-oh! gx),27 +super bunny man,27 +suouin kana,27 +sunny (20597521),27 +sunken scroll (splatoon),27 +sun dou,27 +sun (sunsun28),27 +sumiwow,27 +sumipic,27 +suitokuin tenmu,27 +suisui again,27 +suisui -sweetheart swimmer-,27 +suikomu now,27 +suika (muneneko),27 +sui (komorebi),27 +sugihara azuki,27 +sugar (chicchana yukitsukai sugar),27 +succubus quest,27 +stupid movie sequels,27 +studded trim,27 +stones of dragon,27 +stoner08,27 +stiky finkaz,27 +sthesia awar nono,27 +steven stone (summer 2020),27 +sterndorf,27 +stella (flou),27 +stanbot (little witch academia),27 +ssorasora,27 +squid pose,27 +sputnik (artist),27 +sprinkling,27 +spirit blossom sett,27 +spirit albarn,27 +spinaria (shingeki no bahamut),27 +speed grapher,27 +souya touki,27 +souya agl (kancolle),27 +sousaphone,27 +sound tamashi,27 +souma chihiro,27 +souensha,27 +sos,27 +sonson,27 +sonan kyouko,27 +son karin,27 +social commentary,27 +snow on headwear,27 +snow feather (last origin),27 +snow angel,27 +snake penis,27 +snail8,27 +smol kronii,27 +smol baelz,27 +slave knight gael,27 +skybracer (genshin impact),27 +skunk,27 +skullshatterer (arknights),27 +ske48,27 +sitri (fire emblem),27 +sirofuku414,27 +sirat111,27 +sira,27 +sion (laterna magica),27 +sioinari 03,27 +sio 1234,27 +sinisistar,27 +single leg warmer,27 +sinful hime,27 +simmsy,27 +sima zhao,27 +signing,27 +signal bar,27 +sigma (counter:side),27 +shuu (ssyuu721),27 +shroud of martin,27 +shrimp cake,27 +shoulder rest,27 +shoukimaru,27 +shoukaku (kancolle) (cosplay),27 +shoe strap,27 +shishui guima,27 +shishou no deshi,27 +shiru daku settai,27 +shiroi (shiroicbe),27 +shiro mayu,27 +shiranui hazuki,27 +shiraki aeka,27 +shirajira,27 +shirai,27 +shinyae,27 +shiny floor,27 +shinonome hatsuho,27 +shinonome (ichigotsuki),27 +shinomori aoshi,27 +shino yoshihisa,27 +shino (shino-xx),27 +shino (housamo),27 +shinkaida tetsuyarou,27 +shinigami (tougetsu hajime),27 +shingao-chan,27 +shinatsu azuki,27 +shinagire (sinanohaka),27 +shinada benio,27 +shimaneko,27 +shilfy yo,27 +shikabane gorou,27 +shijou mako,27 +shijima gou,27 +shii (niku-9),27 +shichi-go-san,27 +shibuya arata,27 +shibuki oroshi,27 +shi wu you,27 +shi qi kuang beng,27 +shelly (kakuno),27 +shekinah (phantom of the kill),27 +sheep print,27 +shasu (lastochka),27 +shared straw,27 +shanghai doll (cosplay),27 +shancha,27 +shadow chie,27 +shading mismatch,27 +seven (sixplusone),27 +setouchi chie,27 +serohan,27 +serizawa shion,27 +seritsumi,27 +senpai (souzaipan),27 +sengoku esuji,27 +semen sprinkler j,27 +sek-it,27 +seiun sky (soiree de chaton) (umamusume),27 +seisenshi dunbine: new story of aura battler dunbine,27 +seijuu shining dragon,27 +seigo (seigou),27 +seha lee,27 +seeking the pearl (umamusume),27 +sea serpent,27 +scootaloo,27 +school festival,27 +scaverle (mao),27 +scarlet nexus,27 +say (sakabin),27 +sawsbuck (summer),27 +saturn (satscarlet),27 +satou-san,27 +satori (blueinc324),27 +sato toshiya,27 +sassa (cb),27 +sasayuri (genshin impact),27 +sasakura34,27 +sasaki fumi,27 +sasaki azusa,27 +sasaki akane,27 +saruwatari akari,27 +sarablanche,27 +sara tefal,27 +saotome jin,27 +sangoku hime 3,27 +sangobana (flower knight girl),27 +sanada yukimura (sengoku musou),27 +samohichi,27 +sakurazaka yuzuki,27 +sakuraizumi yuu,27 +sakuragawanaa,27 +sakura miyuki,27 +sakura laurel (umamusume),27 +sakura dungeon,27 +saku (soreca49),27 +sakenomi akane,27 +sakatakin,27 +sakaagari hurricane,27 +saitou takeo,27 +saitou takana,27 +saitooo,27 +saionji rei,27 +sainyang (queen's blade),27 +sailor neptune (cosplay),27 +saikorodekimeru,27 +sageo,27 +sagara momoka,27 +sagami,27 +saegome,27 +sabusupi,27 +saber ruri,27 +saber class (fate),27 +sa ka (sakanoya),27 +s2u,27 +s.shimizu,27 +s.o chin,27 +rzx0,27 +ryuuki yumi,27 +ryuuguu yassuu,27 +ryumikooo,27 +ryuko lee,27 +ryo (ryoxkj),27 +ryekie (drunk tiger) (live a hero),27 +rydia arsenal,27 +ruy,27 +ruto5102,27 +rutger (fire emblem),27 +ruondea,27 +rukia moon,27 +ruka tou,27 +ruhuyu (show by rock!!),27 +rudorufu,27 +rudder,27 +rsk,27 +rpg-exen,27 +rozzi (black survival),27 +route39,27 +rou+,27 +rotom (other),27 +rothy (user cezn8425),27 +rosetta (summer) (granblue fantasy),27 +romance wa tsurugi no kagayaki 2,27 +rom (kochirasama),27 +rola (vtuber),27 +rokukatamari,27 +rokujou miyuki,27 +rokudou hijiri,27 +rock.fc,27 +robotta,27 +roblox,27 +roberta (madoka magica),27 +rizu033,27 +riuhi,27 +rita (sennen sensou aigis),27 +rishia,27 +riruku,27 +riri (ri0177),27 +riomario,27 +rinse 7,27 +rinko-san (cocoa fuumi),27 +rindou (kunoichi tsubaki no mune no uchi),27 +rindou (faker's manual),27 +rin (toriko),27 +rimworld,27 +ridge racer,27 +richard li,27 +rice porridge,27 +ricardo contreras,27 +rian (bdl),27 +rgrey00,27 +reset,27 +res2shuu,27 +remona-san,27 +remi altava,27 +reisei,27 +reinforced,27 +reiko holinger (cosplay),27 +rei (09991),27 +reebok pump,27 +redrose214,27 +redoxhn,27 +red crown (cult of the lamb),27 +red-50869,27 +recube,27 +rebaria,27 +reason!! (idolmaster),27 +real madrid,27 +razi,27 +raze (valorant),27 +rayxray,27 +rayfa padma khura'in,27 +raycrodu h,27 +rapuka,27 +raphael sorel,27 +ranran (iaotak),27 +ran to haiiro no sekai,27 +ran s200,27 +ran ran ru,27 +ramekin,27 +ramba ral,27 +raleigh becket,27 +raichi (quatsch),27 +radio telescope,27 +radian (paradiso guardian),27 +r31harutan,27 +qunqing123,27 +quilted clothes,27 +queen tia (mega man),27 +queen elizabeth (warship girls r),27 +queadluun-rau,27 +quanxi's group (chainsaw man),27 +quadruplets,27 +qt1 jo,27 +qmin arts,27 +qiqi (genshin impact) (cosplay),27 +qbird449,27 +q-pra,27 +puyon (puyon),27 +pussy juice in container,27 +puru (manatsu),27 +purple garter belt,27 +purple curtains,27 +purinnkawayusu,27 +pupuru (sennen sensou aigis),27 +punkish (module),27 +pumpkin shorts,27 +prinz luzifer,27 +princeton (azur lane),27 +princess king boo (cosplay),27 +priget plus,27 +prester johanna,27 +pov legs,27 +potato (popopopopo623),27 +poshii (posy),27 +poppuru,27 +popeye,27 +ponzu (beetle burner),27 +ponta (poqpon),27 +ponta (aoi),27 +ponkotta,27 +pomudachi (pomu rainpuff),27 +pommel tassel,27 +pokemon stadium,27 +pokemon ranger 3,27 +poison dart frog,27 +poe no ichizoku,27 +pochita,27 +pocchari,27 +plhsxf,27 +platinum (o0baijin0o),27 +piyoru nico,27 +pixiv gakuen,27 +pith u,27 +piss bottle,27 +pirlo,27 +pippi (osu!),27 +pink water,27 +pink negligee,27 +pink lady mage (character),27 +pikuson,27 +pike,27 +pikachu belle,27 +pig hood,27 +picolumi,27 +piatin,27 +phenomeno,27 +phaia,27 +petri dish,27 +peter chai,27 +pet carrier,27 +pepupapipooo,27 +pepsi ice cucumber,27 +peony ix,27 +peng kun,27 +peachy michi,27 +paw stick,27 +paul bunyan (festival outfit) (fate),27 +patio swing,27 +partner,27 +parkiranhonda,27 +parappa,27 +papuru (bombergirl),27 +papillon,27 +pantyhose on head,27 +panilla the revival,27 +pan chira,27 +paella,27 +p book,27 +p.a.w,27 +ozumii,27 +oxalis (flower knight girl),27 +ouro krono,27 +ouka (oukakaka),27 +otsuki (tm3n),27 +otou (otou3dayo),27 +otohime (kk23maa),27 +otaki55,27 +osu(statatatatatata),27 +osaki nana,27 +osaji0909,27 +oruserug,27 +ortina lillibel (yashiro sousaku),27 +orexxxo,27 +orange tree,27 +orange mittens,27 +oppaihobby,27 +ootaka narumi,27 +oota kouichirou,27 +oosawa fusatarou,27 +oomune mune,27 +ooki kino,27 +ookami inu (werwolf),27 +ookami ciro,27 +onsen man,27 +onsem,27 +only you recross,27 +onisuu,27 +onioohashi,27 +onigawara sharu,27 +omuretu (butterroru),27 +olivia (fire emblem) (cosplay),27 +okihara kotoha,27 +oketsu fumio,27 +okazaki beru,27 +oinari 33,27 +ogata garaiya,27 +ochrejelly,27 +ochiai (kinjo no hito no nakimushi),27 +ocarinaotw,27 +obscur,27 +o h miona,27 +nyx avatar,27 +nyuudles,27 +nyanko960121,27 +nyangorobei,27 +nyan5000,27 +nuri (yoon cook),27 +nudist beach ni shuugakuryokou de!!,27 +nudge,27 +ntake toukasaien,27 +nowoka,27 +noshiro (uncharted festival grounds?) (azur lane),27 +noromame,27 +nonoharak,27 +nomura fusako,27 +nola moon (girl cafe gun),27 +nohko,27 +noeru (soul64),27 +noela (cheat kusushi no slow life),27 +nobuda,27 +nitou akane,27 +nishizumi miho (cosplay),27 +nishinakajima nanpou,27 +nise maou sukaraberu,27 +nioh 2,27 +ningyo numa,27 +nine delta,27 +niiboshi reo,27 +nigori (keydoor),27 +nightstar0012,27 +nieve (rabi ribi),27 +niboshi,27 +ni no sakura senbu (module),27 +nezusuke (blue archive),27 +nexeee,27 +newon,27 +newman,27 +nervlish,27 +nero (black clover),27 +neosagi,27 +nekota chihiro,27 +nekonomi,27 +nekomusume (pekoneko),27 +nekomiya shuu,27 +nekomata (megami tensei),27 +nekomaaro,27 +nekoinu bamboo,27 +neko usagi (nekousagi jpn),27 +neko koi!,27 +neko (dakemakura),27 +negister,27 +negimapurinn,27 +neck snap,27 +necalli,27 +navy field 152,27 +natsume mina,27 +nathan spencer,27 +narukami arei,27 +narrative gundam,27 +naraba yueni,27 +napier,27 +naox,27 +nao tsukiji,27 +nanora (sero4),27 +nankaitarou chouson,27 +nanjou misao,27 +nanji3,27 +nandeyanen,27 +nanba hibito,27 +nanami kazuki,27 +nanakorobi,27 +nanahoshi yukari,27 +nan0teck,27 +namonaiteidono,27 +nami (aoi shiro),27 +namazu (dc 27546),27 +nako nya,27 +nakazeko,27 +nakamori kemuri,27 +naka no hito nado inai!,27 +naka1379,27 +naitou satoshi,27 +nahril,27 +nagoya (oshiro project),27 +nagisa iori,27 +naginoya,27 +nagase kotono,27 +nagano hinata,27 +nadashima gy,27 +nacht faust,27 +nabeshiki (nabeyashiki),27 +n.s.egg,27 +myoukou (azur lane),27 +muyue,27 +mutual penetration,27 +mutenka,27 +muskmelon,27 +mushanokouji iwai,27 +musclecar,27 +murousaisei123,27 +murasaki nami,27 +murasaki (ekyu),27 +muq,27 +multicolored vest,27 +mukimuki mayuge,27 +mugi (cookie),27 +mugen no ryvius,27 +mucus toad (monster girl encyclopedia),27 +muchi muchi pork,27 +mtr,27 +moyo (k1rakira),27 +mots,27 +moth hair ornament,27 +mot (anticycle),27 +mossu,27 +morty (fall 2021) (pokemon),27 +mortar shell,27 +morioka yasuto,27 +morinaga777,27 +mordred (memories at trifas) (fate) (cosplay),27 +morbius (film),27 +mootium,27 +monument,27 +monokid,27 +monodam,27 +mono (bluesky),27 +monkeyyan,27 +monaka,27 +momojiri aya,27 +momen102 (sji09u),27 +mokokusa,27 +mokokoiro,27 +moko (iiioookkkaaa),27 +moi (yfvlibbl9i),27 +moi'dukdum,27 +mohn (pokemon),27 +mogami noa,27 +mofge,27 +moero downhill night 2,27 +mochi mocchi,27 +mobius (suicideloli),27 +mob (dohna dohna),27 +mnk,27 +mk 12 spr,27 +mizunoto nozumi,27 +mizuno yui,27 +miyouji,27 +miyoshi (joker game),27 +miyamaki,27 +miyako910724,27 +miyako3344,27 +miyagi yasutomo,27 +miyabi juri,27 +miu (dears),27 +mitsukuni,27 +mitsuki (omezame alice),27 +mitoki 6x6,27 +mito soosu,27 +misakura julio,27 +miriam,27 +mireille lerner,27 +mio (navy field 152),27 +minus sign,27 +minoo,27 +minokasa nagi,27 +minibow,27 +minggoo,27 +minerva (blazblue),27 +minchi (lordofthemince),27 +minazuki jiyun,27 +minazuki (karashikazoku),27 +minato subaru,27 +minato (minat0),27 +minamizato ai,27 +minami (dakemakura),27 +min1910,27 +mime,27 +miltank (cosplay),27 +milleore,27 +milk (pop'n music),27 +mila (fire emblem),27 +mikuni (mikunik),27 +miki 0,27 +mikan03 26,27 +mihua mh,27 +mihama kouji,27 +migita makura,27 +mig-21,27 +midou miko,27 +midnight anime lemon angel,27 +midarezaki gekka,27 +midarezaki chika,27 +michaelfirman,27 +meteor sweepers uniform,27 +mery-chan,27 +meriibe,27 +mercedes (viper),27 +mentally deficient,27 +melusine (ff5),27 +melone (melonenbrot),27 +melon sakiguchi,27 +melon (akikan),27 +mellow4043,27 +melike,27 +mekakuri (otacon250),27 +mekakucity actors,27 +meimu,27 +mei (ayanepuna),27 +meguri tomoe,27 +meganemausu,27 +megajujube,27 +medusa (lancer) (fate) (cosplay),27 +medico (dohna dohna),27 +measuring stick,27 +measho,27 +meakashi-hen,27 +mayuzumi takumu,27 +mayday,27 +maya (calm),27 +may queen,27 +maximilian-destroyer,27 +matsuda touta,27 +matsuda shin,27 +matou sakura (deen s&m),27 +matcha parfait,27 +masuo (masdlivelove),27 +masuji,27 +mashle,27 +mashiro moritaka,27 +mashiro chisato,27 +mashima moyu,27 +mash kyrielight (enma-tei uniform) (cosplay),27 +masarou,27 +masanori ito,27 +marusuke,27 +marsia (arc the lad),27 +maronie (flower knight girl),27 +mario + rabbids kingdom battle,27 +mario & sonic at the rio 2016 olympic games,27 +marine benefit,27 +marika (zeddaru),27 +mariemon,27 +marble bloomers,27 +mao san,27 +mano sakurako,27 +manamachii,27 +manabu,27 +man dam,27 +maki yoshitake,27 +maki (seto no hanayome),27 +makaron611,27 +makarios (fate),27 +majima gorou (cosplay),27 +maionese,27 +maimai (game),27 +maigo,27 +mai waifu,27 +mai (senran kagura),27 +mahou tsukai sally,27 +mahou shoujo kanae,27 +maho (summer) (princess connect!),27 +magus (seiken densetsu 3),27 +mage (dq3) (cosplay),27 +magaki (kof),27 +mae (blue revolver),27 +madoka (abubu),27 +madlax,27 +madara6k,27 +madan no ou to michelia,27 +mad catz,27 +macne nana petit,27 +maclo,27 +mackerel (sabanoneko),27 +machine robo chronos no gyakushuu,27 +machin4719,27 +macaroni (piyo4508),27 +mabuchi (junk gaming maiden),27 +lycoris radiata,27 +lycion,27 +luxion (mobseka),27 +lunamoon (style),27 +luna (tsuki tsuki!),27 +lumilive,27 +luluce (30ms),27 +lugh beowulf,27 +luan loud,27 +loz 017,27 +loz,27 +lovesheng1314,27 +lovekov,27 +lovecom,27 +love machine,27 +love is blue (bayonetta),27 +love death + robots,27 +lorem ipsum,27 +lord of knights,27 +looking at ass,27 +lonyan (gurande),27 +longai,27 +lolipantherwww,27 +loke (fairy tail),27 +lixiao lang,27 +liukensama,27 +little gigant,27 +lisa 78,27 +liran (iro),27 +linger ftc,27 +lily salvatana,27 +lilith aileron,27 +lilith-lily,27 +liliana (wagaya no liliana-san),27 +lighter-than-air pancake (genshin impact),27 +lieat,27 +levka,27 +letro,27 +leon (vocaloid),27 +lemon tea (15058751917),27 +legenders (idolmaster),27 +legaia densetsu,27 +lebruitestmoi,27 +leaning against vehicle,27 +leaf-chan,27 +lantana0 0,27 +lane aim,27 +land rover,27 +lana liddell-hart,27 +lamase (41),27 +lal mirch,27 +lal mel martha,27 +lahti-saloranta m/26,27 +lagombi,27 +lackatask,27 +la coiffe (granblue fantasy),27 +kz (dbz kz),27 +kyona (konakona),27 +kyon-kyon (jashin-chan dropkick),27 +kyakya,27 +kuzaki rinko,27 +kuyuu (somari),27 +kuusen otome sky valkyries,27 +kusama daisaku,27 +kusakihara toshiyuki,27 +kurozu (hckr 96),27 +kuroyanagi ruriko,27 +kurosawa kakeru,27 +kurosaki ranmaru,27 +kurosabi neko,27 +kuroonu (gyakuro),27 +kuroniko,27 +kuroki rio,27 +kuroi-chan (kuroi moyamoya),27 +kuroda miki,27 +kuro no haijin,27 +kuro (parade),27 +kurenai yuuji,27 +kurarome,27 +kuranaga kozue,27 +kuran (yourcatissick),27 +kuori chimaki,27 +kumo ryuun,27 +kumiromi of harvest,27 +kumino (soup),27 +kumashige,27 +kumari kojika,27 +kukuri (kamisama dolls),27 +kuina (escapegoat),27 +kuhl-notes,27 +kubo shiori,27 +ktyon3,27 +kozure ookami,27 +kousou,27 +kousetsu (nonosuke),27 +koukaku,27 +kouga gennosuke,27 +koubou,27 +kou89,27 +kotosuzu,27 +kosumone,27 +koshian (taiyaki),27 +kosaka yukina,27 +korutopi,27 +koroni (nkrgs),27 +koromono,27 +konchiki,27 +konbini dmz,27 +komipe512,27 +kominato haruichi,27 +kominami asumi,27 +komaniwa pumpkin,27 +kokutou eiri,27 +kokuko (tsukiyotake),27 +kokonogi kisara,27 +koi wa sekai seifuku no ato de,27 +koi q!,27 +kohaku sogo,27 +kogure kakeru,27 +kogeneko,27 +kogamura uril,27 +koeln (azur lane),27 +kochouka,27 +knight of astora oscar,27 +kleine erdbeere,27 +kiwi0314,27 +kittika thaworn,27 +kitsuneko azarashi,27 +kishimoto sae,27 +kishimoto lucia raimu,27 +kisaragi yuki (sora saki),27 +kisaragi ren (mahjong soul),27 +kisaragi rei,27 +kisaragi hayato,27 +kisaragi chitose,27 +kisaragi (new year's wish) (azur lane),27 +kisaragi (kisaragi0930),27 +kisalaundry,27 +kirimiya mizuki,27 +kirima aki,27 +kirihota,27 +kirbyheimi,27 +kirby squeak squad,27 +kio is here,27 +kinoshita teitoku,27 +kingsglaive garb,27 +kinggainer,27 +king rouzer,27 +king of diamonds,27 +king (one piece),27 +kinaee,27 +kimmi,27 +kimi wo aogi otome wa hime ni,27 +kilt hide,27 +killing stalking,27 +kikuzuki tarou,27 +kikuno mira,27 +kikimora (puyopuyo),27 +kiki (tsunya),27 +kflamingo,27 +keyyan,27 +kevin herault,27 +keun ju kim,27 +kess (coffeechicken),27 +keqing (genshin impact) (cosplay),27 +kenzaki raki,27 +kenta (ittla),27 +kenjin (pageratta),27 +kemeo,27 +keino (midorinoko),27 +kei nagase,27 +kei (trouble spirit),27 +kei (0497),27 +kazanniro,27 +kazama akari,27 +kawazu kento,27 +kawarazakike no ichizoku,27 +kawa yui,27 +katou riko (niichi),27 +kasu (pixiv108801),27 +karimero (calimer0),27 +kariginu (gureviyo582),27 +karasuma tadaomi,27 +karappa,27 +kaoshuzi,27 +kanzuki yuu,27 +kanta-kun,27 +kanojo no carrera,27 +kanohi (bionicle),27 +kanogawa hiro,27 +kanna-mika,27 +kania,27 +kanaiko,27 +kanade (reveryearth),27 +kamura (rain prophet),27 +kamimori kuuraku,27 +kamihara ichi,27 +kamen rider shin,27 +kamen rider ryuga,27 +kamen rider delta,27 +kameko (denki-gai),27 +kamegawara nikuo,27 +kamakura shio,27 +kakiiro (takuya),27 +kakihou,27 +kakashino kakato,27 +kajika (kabaneri),27 +kai (kai 013),27 +kagura ren,27 +kagura mikazuchi,27 +kagari touya,27 +kagari chiho,27 +kagari (kgr 000),27 +kagamine rin/len happy 14th birthday,27 +kagamine lenka,27 +kagami kouhei,27 +kafi (cafee kuu),27 +kaede shiroppu,27 +kaede haya (lo0831lo),27 +kadowaki mai,27 +kadej,27 +kabisuke,27 +kabi killer,27 +k3nnyn3v,27 +k-sha,27 +jyn erso,27 +juujiro eru,27 +juno (element hunters),27 +juli kidman,27 +jugem-t,27 +jueduihuoli,27 +jue buzheng huo gu gu zi,27 +judy (artist),27 +judo throw,27 +jougasaki mika (cosplay),27 +jojon,27 +johnny silverhand,27 +jody know-grow-help,27 +jiu (gaydio zrong),27 +jitsu wa imouto deshita.,27 +jinrai (frame arms girl),27 +jin (mitosupa),27 +jiji (creature),27 +jetty,27 +jesus revenge,27 +jessica jefferson,27 +jericho (nanatsu no taizai),27 +jeorge (fire emblem),27 +jelonzo (splatoon),27 +jasmine t,27 +jasmine (disney) (cosplay),27 +january (coyote ragtime show),27 +jamijami,27 +jairo,27 +jaina preventer,27 +jaibus,27 +jaguchi (bbbing),27 +jagariko,27 +jade harley,27 +jack-o'-lantern cutout,27 +izumi (ko8),27 +izumi (gyee),27 +izanami kyouko,27 +iwasaki rio,27 +iwamochi,27 +iwami shouko,27 +iwagakure symbol,27 +iumu,27 +itsumo hokuto,27 +itou junji (style),27 +itoshiki majiru,27 +itoko (i t k),27 +itoi kaede,27 +itagaki atsushi,27 +issun boushi (ilmtkimoti),27 +isolde (kof),27 +ishida (danganronpa),27 +isher (liangzi tai hongcha),27 +isekai harem monogatari,27 +isarai kanara,27 +isana yashiro,27 +iro ni ide ni keri waga koi wa,27 +irise,27 +irie keisuke (handn),27 +iracco,27 +iona (wixoss),27 +io (ike ike),27 +inuyama (inuhill),27 +inufusa yuno,27 +interceptor (ff6),27 +insitsukun,27 +inoue iris (sdustz),27 +inori taimatsu,27 +innkeeper loraine,27 +inkinesss,27 +inahara,27 +inagita,27 +inada (masu shu),27 +imouto paradise!,27 +imminent hit,27 +imari yuka,27 +imahia,27 +ikinokore! shachiku-chan,27 +ignatius (fire emblem),27 +igeta (pattern),27 +igawa,27 +ideolo (style),27 +identity (vocaloid),27 +iczer-2,27 +icy02,27 +ichimatsu akebi,27 +ichimai ugou,27 +ichibanboshi no rei,27 +ice axe,27 +ibm (ajin),27 +i!,27 +hzrn (ymj924),27 +hyde (hyde (tabakko)),27 +hy (hem oo),27 +hwanhee,27 +huu00,27 +hundun no bifang,27 +hunchback,27 +huge afro,27 +huberta von bonin,27 +huaimeng,27 +hua,27 +hs2000,27 +houshou kisaki,27 +houkagi yuu,27 +hotoke party,27 +hossy,27 +hosimaru,27 +hoshinomiya kunon,27 +hoshi no otome to rikka no shimai,27 +hoshi mirin,27 +hortensia saga,27 +horarezonu,27 +hood (rosey love poem) (azur lane),27 +honyaru (nanairo39),27 +honshou chizuru,27 +honky,27 +hong hai-er,27 +hone onna,27 +homura (aristocrat-y),27 +homa kura,27 +hom (atelier),27 +holotempus,27 +hole in ceiling,27 +holding headgear,27 +ho-cki,27 +hizuki mitsuna,27 +hiyorimi,27 +hiyami aki,27 +hitoba,27 +hisagi hotaru,27 +hirumae,27 +hirayama (hirayamaniwa),27 +hiotan,27 +hinotta,27 +hinoborukaku,27 +hinnu@ao,27 +hinata nao (iwamotochou geinousha),27 +hinata (eine blume),27 +himura yuu,27 +himeno yuka,27 +hime-chan (ramchi),27 +hillprime,27 +hikounin sentai akibaranger,27 +hiking pole,27 +hikage (sennen sensou aigis),27 +hika (ozeluk),27 +highena,27 +hiden aruto,27 +hida sayuri,27 +hershey's,27 +hero (merc storia),27 +hercules beetle,27 +heracles (fate) (cosplay),27 +henreki san,27 +helipad,27 +hejia abby,27 +hebai xiaochuan,27 +heath (fire emblem),27 +heart (kw0hahgk9nenhgs),27 +heal & squeeze,27 +headphone + musume,27 +hazama shouko,27 +hayashibara megumi,27 +hayasaka (neoneet),27 +hayami shizuku,27 +hayami momoka,27 +hayami jin,27 +haty,27 +hatsune (leaden heart07),27 +hatohara mirai,27 +hateri,27 +haruta (h-oh),27 +harusame (moyuna412),27 +harumi (haru nee to no dousei shuumatsu),27 +harukaze bou,27 +haruka faraway716,27 +harugasaki kanau,27 +haru urara (first urara saku sakura) (umamusume),27 +harry tribeca,27 +hanuu (kemomiku),27 +hanged man (stand),27 +hangaku (araara0616),27 +hanesaki nekome,27 +hanahira!,27 +hanagamigendai,27 +hamster on shoulder,27 +hamo (user zuky3273),27 +hammer and sickle bikini,27 +hall,27 +hakusoto,27 +hakubishin tamazusa,27 +hakuaki,27 +hakohako-does,27 +haitani ran,27 +haikeiyu,27 +haiji kiyose,27 +haguro (aoki hagane no arpeggio),27 +hago,27 +hachimitsu (hati718),27 +hachifuku,27 +hachi (aimu),27 +habetrot (last origin),27 +haapi jang p,27 +ha yun,27 +gzmon,27 +gyeoggi 3 ban,27 +gv natsuno,27 +guuchun,27 +guroo (shine119),27 +gundam bael,27 +gundam 00f,27 +guido (grandia),27 +guardian place,27 +grs-,27 +grilled tiger fish (genshin impact),27 +grgrton,27 +gremyashchy (ryan greythorn),27 +gregorio zeppeli,27 +great auk (kemono friends),27 +grand harem,27 +grace (kor) (racehorse),27 +grabbed breast over shoulder,27 +gougoku,27 +gotou jin,27 +gooompy,27 +gomadare (310329),27 +golden knight (elona),27 +gold egg (p&d),27 +goggle-kun (splatoon),27 +godzilla singular point,27 +godzilla final wars,27 +godai yuusaku,27 +glyph,27 +gloomy bear,27 +glavenus,27 +glassy0302,27 +girouette (mega man),27 +girls und panzer little army,27 +girls of the wild's,27 +ginryuu,27 +ginga no kou,27 +gilbart chris von muir,27 +giji-p,27 +giant cat,27 +ghost-q,27 +geso (nekomachi),27 +geronimo (third ascension) (fate),27 +gerisita,27 +genesis rhapsodos,27 +general grievous,27 +gelato (girls und panzer),27 +ge-ha,27 +gasora,27 +garudamon,27 +garcia lovelace,27 +garbancobean,27 +ganbare! nakamura-kun!!,27 +gammei (live a hero),27 +gamma 1,27 +gals!,27 +gaki deka,27 +g=hikorou,27 +g36 (mini maid) (girls' frontline),27 +g36 (50 days with g36) (girls' frontline),27 +g-arcane,27 +fuziwara ayumu,27 +fuyuno usako,27 +fuusuke (f4989),27 +fuurin sou,27 +fuunsaiki,27 +fuuma shuriken,27 +futoshi (tekidai),27 +futaba riko,27 +fushoku,27 +fushigi na merumo,27 +furuichi takayuki,27 +furon (froon),27 +full armor unicorn gundam,27 +fukurau,27 +fukuma,27 +fuku (pukuyan),27 +fujishiro takeshi,27 +fujisaki aya,27 +fujimi keisuke,27 +fujimaru ritsuka (female) (fgo orchestra),27 +fujikura miyabi,27 +fujii yui,27 +fuji-san,27 +fudou (kakko kari),27 +fudepenbrushpen,27 +fuan no tane,27 +fs-project,27 +frog button,27 +friedbirdchips,27 +fridaynightcat,27 +frey knowles,27 +fp-6 (girls' frontline),27 +foxbat (cannonball),27 +four-leaf clover necklace,27 +food on tail,27 +folding stool,27 +fluno,27 +florges (red),27 +flickering,27 +flehmen response,27 +flaurel,27 +flash cards,27 +flam (81),27 +fk,27 +five of clubs,27 +fish hat,27 +firin,27 +fire punch,27 +fire bomber,27 +fins (pixiv29142276),27 +finn fish,27 +fillia einhart (eirgallant),27 +figurehead (figurehead67),27 +figu@mate,27 +feylin,27 +fenrir (shinkai no valkyrie),27 +felyne (cosplay),27 +felching,27 +feel nilvalen,27 +fav (mahoiku),27 +fattybot,27 +fairchild,27 +fage,27 +exocet,27 +exmile,27 +excarabu,27 +exasperation,27 +evy (mabinogi),27 +evolved virgin killer sweater,27 +eve genoard,27 +eufrik,27 +etto eat,27 +erun girl,27 +ernest,27 +eridan ampora,27 +epiki (nenekoneko0715),27 +ephraim (fire emblem) (cosplay),27 +envel203,27 +entrapta,27 +enshou,27 +enomiya milk,27 +ennui orz,27 +enkou shoujo,27 +enemy yari,27 +endou rino,27 +endou minari,27 +enchantress (dungeon and fighter),27 +emone04,27 +emilia (saga frontier),27 +emerada etuva,27 +emden (warship girls r),27 +embryo,27 +elza forte,27 +elu butyo,27 +els (ljhlee12),27 +elroadmaster,27 +elmina niet,27 +elizabetta (futagohime),27 +elisalotte,27 +elfboiii,27 +elephantus,27 +elena peoples,27 +electronic entertainment expo,27 +elach,27 +eku (threshold),27 +eini a lukkanen,27 +eight of clubs,27 +eicam,27 +ei (fran3bon),27 +ehekatl of luck,27 +egomeshi,27 +efg,27 +eddybird55555,27 +ebina hina,27 +ebimayo,27 +dynatron (mighty no. 9),27 +duzie e,27 +dusty attenborough,27 +duck costume,27 +ds a,27 +dryad (seiken densetsu),27 +dragon sword,27 +dragon slayer (series),27 +draculala (gashi-gashi),27 +dozle zabi,27 +doushite-chan,27 +double arts,27 +dotaku,27 +dorm leader,27 +dorian oishiiyo,27 +dorcas (fire emblem),27 +doomer girl,27 +donnaoneone,27 +dondongarara,27 +dokkanohukukaityou,27 +dog slippers,27 +dog (gabriel dropout),27 +dnlin,27 +dl mask,27 +diverse system,27 +diverse order,27 +dissidia final fantasy nt,27 +dishwashing soap,27 +disembodied breast,27 +disco (sbr),27 +dire (jojo),27 +diras,27 +diluc (kfc) (genshin impact),27 +diethard ried,27 +diana (umineko),27 +diana (a-soul),27 +diamond ring,27 +diagonal-striped neckerchief,27 +detached tail,27 +destoroyah,27 +desk slam,27 +desert sorceress,27 +depayama (depaty),27 +densou (kinnikuhunter),27 +densetsu no yuusha da-garn,27 +demonlorddante,27 +demon king fish,27 +demeter (destiny child),27 +dekasugiburun,27 +defense distributed liberator,27 +defender (girls' frontline),27 +deerling (winter),27 +deep impact (umamusume),27 +decim (death parade),27 +deareditor,27 +deadpool (movie),27 +ddok,27 +ddd (nicoseiga51845241),27 +daydarion,27 +dawnlover 01,27 +dawkinsia,27 +dashinoya,27 +daru dayu,27 +darling dance (vocaloid),27 +darkwinslow,27 +darksider (star wars),27 +dark fencer (granblue fantasy),27 +dakikano,27 +dainana sugune,27 +daimon masaru (digimon savers),27 +daigorou,27 +dai zu san,27 +dai-guard,27 +daamiyan,27 +d.g,27 +cyclops (girls' frontline),27 +cyberdemon,27 +cyan sung-sun,27 +cursed sword (monster girl encyclopedia),27 +curran (dragalia lost),27 +cure honey (cosplay),27 +cure honey (coconut samba),27 +cure fortune (anmitsu komachi),27 +cure dream (cosplay),27 +cuntboy with male,27 +cum in footwear,27 +cuisine dimension,27 +cu (fsy84738368),27 +crystal hair ornament,27 +crusader (darkest dungeon),27 +crowdesu,27 +cross punisher,27 +crisis,27 +crimsonseed,27 +creature inside,27 +crazyodin,27 +crank,27 +crackingtaro,27 +cow (shadow),27 +cov-r,27 +corkscrew,27 +cony (la locura),27 +contract monster,27 +conago,27 +company captain yorshka,27 +comin,27 +comiket 83,27 +cojohn,27 +coffee1223,27 +code geass: soubou no oz,27 +code: exotic (elsword),27 +cocone fatima rosa,27 +cockatrice,27 +closz,27 +cleveland (muse) (azur lane),27 +clarith,27 +clariate,27 +clan senki,27 +claire (clarevoir),27 +citroen,27 +cicada hug,27 +chyt,27 +church bell,27 +chun (ya i memories),27 +christine ogawa,27 +choobackers32,27 +chongtian yixiao shualuanfei,27 +chobo ume,27 +chirori,27 +chip le cree,27 +chintara10,27 +chimame chronicle,27 +child (elsword),27 +chikusawa,27 +chikuma mask,27 +chikaburo,27 +chii-chan kaihatsu nikki,27 +chida daisuke,27 +chiaki riko,27 +cherry (10013717),27 +checkered shorts,27 +charlotte (ogami kazuki),27 +chanary,27 +chameleos,27 +chakku illust,27 +chaccu,27 +cervina,27 +certificate,27 +celestino cialdini,27 +catzz,27 +catmesi,27 +castle dedede,27 +carnelian (hohenlohe chillysand) (arknights),27 +carnage (marvel),27 +carmine,27 +caren hortensia (cosplay),27 +cardigan pull,27 +captain hannah,27 +cappy (kirby),27 +canele,27 +camus (uta no prince-sama),27 +camie (one piece),27 +callie (splatoon) (cosplay),27 +call h,27 +cafe cuties gwen,27 +caeda (bridal) (fire emblem),27 +c3t gg,27 +c-eye,27 +byakuran,27 +buta no liver wa kanetsu shiro,27 +busujima riou mason,27 +burying,27 +bule,27 +brunnya (fire emblem),27 +bren (girls' frontline),27 +brellom,27 +breast piercing,27 +breast beam,27 +bonklers,27 +boku 2020,27 +bogyaku no m,27 +bobu (bovyo222),27 +boar tail,27 +blz xxx,27 +blustar sky,27 +blue penis,27 +blooming,27 +blood on snow,27 +bloocarrot,27 +black (kekkai sensen),27 +bkyuuc,27 +bittenhard,27 +biting testicles,27 +bita,27 +bishamonten (noragami),27 +bioshock 2,27 +bimi (mgk),27 +bikini (dragon ball),27 +big bang beat,27 +bewitching elise,27 +bethly rose daisley,27 +benoit picard,27 +benisuzume (gauna),27 +benevolent leanne,27 +benbe,27 +belle (girls und panzer),27 +bekki natsumi,27 +bee and puppycat,27 +beam shield,27 +battlefield 1942,27 +battle lover scarlet,27 +battle academia lux,27 +batayu,27 +bass drum,27 +basil hawkins,27 +baschyf,27 +baroque,27 +baphomet (ragnarok online),27 +baofu,27 +bankongping,27 +banirou,27 +bangku an,27 +bangeningmeng,27 +bane,27 +bandai daisaku,27 +balancing ball,27 +balance beam,27 +bakusou k,27 +baku (onegai my melody),27 +baikamo (flower knight girl),27 +back arrow,27 +b-gata h-kei,27 +azema,27 +az (kroneko007),27 +ayao77,27 +ayame (senran kagura),27 +aya chan1221,27 +aya-chan (smoke),27 +awa (12687414),27 +avatar (movie),27 +avalo pizarro,27 +ava (a-soul),27 +autumn boar,27 +auro drm,27 +atlantis: the lost empire,27 +athenacg,27 +athena (megami tensei),27 +athena (fire emblem),27 +atelier iris grand phantasm,27 +atbk,27 +atashi no kakedashi yuusha-sama,27 +asuka pyon,27 +astdevir,27 +assa,27 +asp@juken,27 +ashita kara ganbaro,27 +ashishi,27 +asatomjj,27 +asagami (hnt16303310),27 +arumaji (kiss kill lila),27 +aru-sci,27 +artur (fire emblem),27 +artofhuan,27 +arno dorian,27 +armeyer dinze,27 +arko (acucs),27 +ark tr,27 +arima souichirou,27 +ariduka anto,27 +arian rod,27 +ariake (azur lane),27 +aria (schwarza97rw0rd),27 +area aquamarine,27 +arctozolt,27 +araya kei,27 +arawado,27 +araime yasuri,27 +aquamary,27 +ap bar,27 +aonagi hayate,27 +aona masao,27 +aoki reimu,27 +aoiyamagi4,27 +aoi subaru,27 +aoba yukichi,27 +anzu (peace@pieces),27 +ant (fenixant),27 +annyui (cookie),27 +anna kokoro (anko),27 +anmin daiteitoku,27 +anko (w-anco),27 +animal yokochou,27 +angelica (epic seven),27 +angelchama,27 +andoain (arknights),27 +ancient killers (phantom of the kill),27 +anarista,27 +anarchy ptck,27 +anaheim electronics,27 +amon koutarou,27 +amistr (ragnarok online),27 +amigo (hua cao),27 +ameyoshi,27 +americano exodus,27 +ameen naksewee,27 +amazon tree boa (kemono friends),27 +amano rino,27 +amano misaki,27 +amano kotone (ichigo jet),27 +amano ai,27 +amane satsuki,27 +amanagi el,27 +amamiya sora,27 +amamiya shiina,27 +amal-amaru,27 +amakaze sora,27 +amakata miho,27 +amakase minatsu,27 +amai-pai,27 +amagiri dia,27 +amabane nodoka,27 +alphamon,27 +alo (m-o-k-e-k-e),27 +almaria,27 +alisa landeel,27 +alisa ilinichina amiella (cosplay),27 +alielle,27 +alicia viewstream,27 +alice gear,27 +alice fiction,27 +alice (nikke),27 +alec (arc the lad),27 +alarmy,27 +alalen,27 +akutsu mabu,27 +akumu (hiziiiiii),27 +akizuno,27 +akitake seiichi,27 +akisawa machi,27 +akiranime,27 +akino (gokosei),27 +akiaki (mofumo-freak),27 +aki (pixiv57498743),27 +akatsuki reipu,27 +akatsuki rabbit,27 +akatsuki katsuie,27 +akatsuki (spacecraft),27 +akao,27 +akali (cosplay),27 +akakinndaiya,27 +aka no ripika,27 +ajishio (loli king),27 +aiura mikoto,27 +airuko (justduet),27 +airrabbityan,27 +airi (robotics;notes),27 +aimoto rinku,27 +agnimon,27 +agatsumaattsu,27 +after3310,27 +afjc,27 +aesop's fables,27 +adachi eiko,27 +acea n,27 +ababarion,27 +aaoyama,27 +aaaa (gumi niku),27 +a-teru haito,27 +a- -z (b1u49i5nsk),27 +946083d1,27 +8-ball,27 +7dango7,27 +72producer,27 +66 (roro),27 +532,27 +51 (gigamiso),27 +4hands,27 +403 (artist),27 +3dbabes,27 +2dcg,27 +27 degrees,27 +19 okeke,27 +13844,27 +1000-chan,27 +zzzi gn,26 +zzz zhi he,26 +zwei!!,26 +zum,26 +zubon no onara,26 +zombie loan,26 +zoids wild,26 +zillionaire,26 +zigrock,26 +zhi xixi,26 +zhao yun,26 +zetxune,26 +zerotted,26 +zeroamu,26 +zero two driver,26 +zenobia (xenoblade),26 +zelda ii: the adventure of link,26 +zaqloxxx,26 +zaphylla,26 +zangetsu,26 +zang li,26 +zanak abalonic,26 +z (knkr1025),26 +z.boat,26 +yuzuriha (etra-chan wa mita!),26 +yuutopia,26 +yuuouji ouka,26 +yuunagi show,26 +yuuko (nora0x0),26 +yuuki miyaka,26 +yuu (natsuyasumi.),26 +yutif,26 +yutapo,26 +yusuke oshida,26 +yurt the silent chief,26 +yunikon,26 +yumoto motoyu,26 +yumihara hina,26 +yumiao79,26 +yume no crayon oukoku,26 +yume de yozora wo terashitai,26 +yukkronii (ouro kronii),26 +yukiyukidaihuku,26 +yukiyago,26 +yukion,26 +yukino aguria,26 +yukimi (poco),26 +yukihira furano,26 +yukihiko,26 +yukihana (awa),26 +yukiguni (ykgn),26 +yukichi (bancho99),26 +yuki miku (cosplay),26 +yuki (best010409),26 +yuke yuke!! trouble makers,26 +yukaris,26 +yukai nao,26 +yuga (abubu),26 +yue natsuki,26 +yudepan (yuri no sugata),26 +yucchan (drizzle star),26 +youken,26 +yougasu,26 +you (yawnmgmg),26 +yoshizuna,26 +yoshikoshi (mother 3),26 +yoshikawa miki,26 +yoshi-j,26 +yoruno mahiru,26 +yoru no nai kuni 2,26 +yokoshima (tirimoti),26 +yoko belnades,26 +yokkoisho (evtd8734),26 +yoboshi,26 +yoake,26 +yinanhuanle,26 +yf studio,26 +yayuyoron,26 +yatarime,26 +yata masahara,26 +yasukouchi yoshiko,26 +yashiro ryo,26 +yasaka himi,26 +yaobin yang,26 +yangsan (2991076090),26 +yaminabe (szhal14),26 +yameshoko,26 +yamato transport,26 +yamata no orochi,26 +yam (dr yammy),26 +yajuu no gankou (meme),26 +yaebi (at2.),26 +yachi (fujiyasu0616),26 +yabudatami,26 +yabame yume,26 +y tyano,26 +xxxx,26 +xuefei (snowdrop),26 +xuan chu,26 +xingnai,26 +xiayu93,26 +xiaohan6th,26 +xiaoguang (you can eat the girl),26 +xianming lin,26 +xia ekavira,26 +xi-988,26 +xano,26 +wuyue qinglu,26 +wulfsaga,26 +wslasher,26 +wosero,26 +world (magical drop),26 +wolfram von bielefeld,26 +wm (chawoo1357),26 +wittyz,26 +wirttian,26 +wire fence,26 +wilmarina noscrim,26 +wickebine tres,26 +whitemoor,26 +white innertube,26 +white helmet,26 +white angel,26 +whipping hair,26 +wazuka (wzzc),26 +watanabe ruriko,26 +watanabe kawa,26 +washout008,26 +warlock 3 (sekaiju),26 +waporif,26 +wan mame,26 +wakan tanka mugen,26 +vulcan (ejel2000),26 +vtol,26 +vsi0v,26 +voyager (second ascension) (fate),26 +vlad tepes (eiyuu senki),26 +vivid world (love live!),26 +virgo76612871,26 +vinkyfre,26 +vilepluff,26 +vigilante -boku no hero academia: illegals-,26 +very long beard,26 +vertical-striped footwear,26 +verdurous anima,26 +verdandi (p&d),26 +venom snake (cosplay),26 +veloce visrin,26 +velcozz,26 +vel (kamuo),26 +vegeta (cosplay),26 +varie7k,26 +vantsuki,26 +vanitas (vanitas no carte),26 +valgarv (slayers),26 +valerie (blue revolver),26 +valentina tavolilla,26 +vagabond (elden ring),26 +utatoki,26 +usurai,26 +ushiwakamaru (third ascension) (fate),26 +ushimaki riko,26 +ushiina,26 +urota shimapann,26 +urna,26 +urinal bottle,26 +urami koi koi urami koi.,26 +urakata hajime,26 +uraha (air),26 +ura (mukimeineko),26 +upside-down text,26 +unusualpie,26 +unknownnoname0,26 +unizo,26 +uni (rabbit beat),26 +unfortunate hero,26 +ump9 (the world's melody) (girls' frontline),26 +"ump45 (""just this time."") (girls' frontline)",26 +umigame (dragon ball),26 +umi zenbiraki,26 +umai neko,26 +ukero,26 +ukamaru,26 +uha,26 +udon-udon,26 +uchuu no senshi,26 +uchida fumiaki,26 +uchi no isourou ga sekai wo shouaku shiteru!,26 +uc,26 +ubuntu,26 +u-head trainer,26 +u-1212,26 +tyrant,26 +twitter sparkles,26 +twitter bird,26 +twilightend,26 +tweedledee (alice in wonderland),26 +tuning fork,26 +tt (poposujp),26 +tsuzuki kazuhiko,26 +tsunenori,26 +tsune-hime,26 +tsume3mai,26 +tsukiyama sena,26 +tsukiya sakumi,26 +tsujiya okuyasu,26 +tsugumori,26 +tsubasansan,26 +tsubaki (p&d),26 +trundle,26 +trrcmb,26 +trickster (artist),26 +trickstar (ensemble stars!),26 +trashcan lid,26 +transparent bathtub,26 +tranquilizer (bestcenter),26 +tracer (elsword),26 +toyotomi hideyoshi (sengoku basara),26 +tousaka hiyoko,26 +toujou bun,26 +tougenkyo momo,26 +totuka,26 +tornadus (therian),26 +tormod (fire emblem),26 +torisuke (koinohito),26 +tore (ksg666xxx),26 +torayamachi academy school uniform,26 +topgear,26 +tonshi,26 +tongue hold,26 +tonebird,26 +ton (artist),26 +tomoki k,26 +tomoe (queen's blade unlimited),26 +tomiya natsuki,26 +tokonaru,26 +tokiniha netai (pm8sleepy),26 +toketa (toketa15),26 +toiro gawon,26 +togepi egg,26 +toe fu,26 +todoroki rei,26 +toda fuyumi,26 +tobias leviathan,26 +to e,26 +tki,26 +tiuana rui,26 +tinysnails,26 +times square,26 +tigrevurmud vorn,26 +tifa amakura,26 +thomas emily,26 +this is fine (meme),26 +the tower (tarot),26 +the skeld,26 +the omoti,26 +the naked sun,26 +the keeper,26 +the hurting,26 +tetori rina,26 +teruru (teruru0321),26 +teru (grafroller),26 +terrace,26 +tera (aurahack),26 +tepechi,26 +tenrou kunagi,26 +tenkawa akito,26 +tenchi muyou! gxp,26 +tempty (voice actor),26 +teletelo,26 +tekkotsu (tekkotz),26 +tea leaves,26 +te okure,26 +tazawa (odamura),26 +tayutama 2,26 +tawara hiryuu,26 +taue shunsuke,26 +tattoo machine,26 +tatsu wan,26 +tatamiya,26 +tarousanlove1,26 +tapwing,26 +taplaos,26 +tanpi,26 +tanno shii,26 +tank (left 4 dead),26 +tanizakura shidare,26 +tanaka yubiseiakikana,26 +tanaka-san (danna ga),26 +tan (knock up),26 +tami moon,26 +tamanosuke,26 +tamamo no mae (type-moon racing) (fate),26 +tamajam,26 +takoyaki shoujo,26 +takoyaki pan,26 +takobue,26 +tako ashin,26 +taki noboru,26 +takenoko (flamingo garden),26 +takayama akira,26 +takasaki asuka,26 +takarai yua,26 +takahashi urara,26 +takahashi aoi,26 +takagawa sumire,26 +tajima naoto,26 +taisowbukurow,26 +taimanin kurenai,26 +tailyellow,26 +tail tale,26 +tadai nu,26 +tada-kun wa koi wo shinai,26 +tachibana sugane,26 +tachibana ritsuka,26 +t-doll contract,26 +t-55,26 +sys (suisei),26 +sword art online: infinity moment,26 +sweetwitch,26 +sweet reverie,26 +swedish flag print,26 +suzushiro haru,26 +suzumeda kaori,26 +suzuki yuma,26 +suzuke,26 +suzu (user kdex8732),26 +suzu (suzuame329),26 +suu (clover),26 +suttoboke,26 +susuki (flower knight girl),26 +surigoma,26 +surgical scissors,26 +superheroine haruhi,26 +super robot wars x,26 +super plugsuit,26 +sunomono,26 +sunahara shimako,26 +sunagawa yoshiharu,26 +sumitomo,26 +suminohirune,26 +sukuna-bikona (tokoyo no higashi),26 +sukuda mizuo,26 +sukoyasu r,26 +suiruu (yuriusu),26 +sugoidere,26 +sugito akira,26 +sugimotty nova,26 +sugarhigh,26 +sugai,26 +sue (pso2),26 +sudkampsin,26 +succubus (renetan),26 +succubus (oekakizuki),26 +subu art,26 +substitution technique,26 +stygian zinogre (armor),26 +stuart pot,26 +stu diho,26 +strike witches (lionheart witch),26 +street fighter i,26 +strawberry daifuku (food fantasy),26 +strapless dildo,26 +store room,26 +stone (ksorede),26 +stinger (splatoon),26 +stflash,26 +starfruit,26 +star guardian kai'sa,26 +star and crescent,26 +standard manufacturing dp-12,26 +stacked hats,26 +spunky knight,26 +sport girl (saberrung),26 +spoon hair ornament,26 +split image,26 +spikemuth,26 +spider-gwen (cosplay),26 +sphinx of giza,26 +sphere-stc,26 +special g (spg),26 +space girl (aetherion),26 +space core,26 +sovetsky soyuz (warship girls r),26 +souzan kurasuke,26 +soul hackers 2,26 +sorrysap,26 +soranana (sorabananasan),26 +sonobe kazuya,26 +sono na ha eros,26 +soma (closers),26 +sogeking,26 +softmax,26 +snowdrop (flower knight girl),26 +sniper team,26 +smol fauna,26 +sman,26 +sleepsack,26 +skill,26 +skadi's seaborn (arknights),26 +six of hearts,26 +sissela (black survival),26 +sirris of the sunless realms,26 +sing&smile (vocaloid),26 +sinad aruatjanapat,26 +silvia aizetto,26 +silverms2,26 +silverbin,26 +silver (eden),26 +signum (nanohanano77),26 +sig sauer 552,26 +siena (moratoriummaga),26 +shuttle (ksb0123),26 +shuryukan,26 +shurock,26 +shuri (9818),26 +shuizao (little child),26 +shu (hokuto no ken),26 +shower cap,26 +shousuke (skirge),26 +shop p,26 +shooing,26 +shizuru (daikoukaizidai),26 +shizukawashi sumi,26 +shishizaru,26 +shishigaj5,26 +shiroxai,26 +shiroserika,26 +shiromantou,26 +shirohanamame taichou,26 +shirogane tobari,26 +shirayuki maho,26 +shiranai love oshiete love,26 +shirakawa mayo,26 +shiraishi nagomi,26 +shiraishi mamim,26 +shiragixx,26 +shira (kunseitamago),26 +shippu man,26 +shion humine,26 +shinta (the-mattyaman),26 +shinonome ryuu,26 +shinonome natsuhi,26 +shinohara takashi,26 +shinohara sera,26 +shinogi k,26 +shinkai kiiro,26 +shinjuku cat,26 +shinba yagi,26 +shimetta masuta,26 +shima udon,26 +shima (aliceanna0518),26 +shikkoku no hono mikado,26 +shiisaa right,26 +shigure1213,26 +shida kuroha,26 +shibuya tomochika,26 +shibasaki kazuha,26 +shi0n krbn,26 +sheska (fma),26 +sheria blendy,26 +shemagh,26 +shawn flowers,26 +sharlona,26 +shantae and the seven sirens,26 +shamko,26 +shakuhachi,26 +shaapu,26 +sexting,26 +seventh happiness,26 +seven of spades,26 +serizawa yoshiko,26 +serenya,26 +seo yoon,26 +sentinel ga koku ni aru,26 +senna (bleach),26 +senko (oshiro project),26 +senjougahara hitagi (cosplay),26 +senguuji yamato,26 +sena (konosuba),26 +semi-perfect cell,26 +selvalanch,26 +self fisting,26 +selene kaguya,26 +sekigahara ei,26 +seirei fantasia,26 +seira (yuki touko),26 +seino (sasakike1305),26 +seijuro shin,26 +seihai (sailor moon),26 +seastar,26 +scyllei,26 +scratching stomach,26 +schoolish girl (idolmaster),26 +scharnhorst,26 +sb (hiratsei),26 +sayaka (ponkichi),26 +saya (blood the last vampire),26 +sawarabi (sawarabi725),26 +saviala,26 +saver (fate/prototype),26 +savannah (yugino),26 +saturday night live,26 +satsuki meguru,26 +satoumizu (j5xsyd9jk),26 +satou masayuki,26 +satom,26 +satoko (papapa sikakici),26 +satoimo (jia64097023),26 +sasha (animal crossing),26 +saraswati (kore wa zombie desu ka?),26 +sarasuty,26 +sara (tales),26 +santyoku,26 +santa fung,26 +sanshirou,26 +sano toshihide ga anata no senyou gengaman ni narimasu,26 +sania (agent aika),26 +sandrum,26 +sandeul,26 +samurai10932,26 +samuneturi,26 +samira,26 +samidare satsuki,26 +sam wells,26 +salamander (vocaloid),26 +sakuyamochi,26 +sakusan (ss-awesome),26 +sakurazuki kira,26 +sakuragi yuzuki,26 +sakura mikan,26 +sakatsuki sakana,26 +sakamoto miko,26 +sakamoto (pompa),26 +sakaki karen,26 +sakaikurinea,26 +saitou atsushi,26 +saitou ako,26 +saint (ragnarok masters),26 +saimin douga de nama iki nama omo ga nama iki suru namahousou ~jitaku,26 +saijou k,26 +saeldam,26 +sachiel (mugen),26 +sabinaok,26 +saber (lapis lazuli dress) (fate),26 +sabashi,26 +s ko,26 +s.claw,26 +s-kan,26 +ryuuzaki sakuno,26 +ryuuzaki (ereticent),26 +ryuukishi07 (style),26 +ryumaira,26 +ryou (pixiv779953),26 +ryon y0421,26 +ryokuyou (greencosmos),26 +ryochan (papipopi),26 +rye-beer,26 +rusher,26 +rurukuru,26 +ruri ookami,26 +rune venus,26 +rune factory 2,26 +runar,26 +rumil,26 +ruisselait,26 +ruined (league of legends),26 +rudy (brave soul),26 +ruby-eye,26 +ross (senyuu),26 +roppako,26 +rondo (poccal),26 +ronaldo (kyuuketsuki sugu shinu),26 +romary stone,26 +roman buriki,26 +roland (ms pigtails),26 +rokusho,26 +rogue division agent,26 +rogu (log 264),26 +rodney (future seaside admiral) (azur lane),26 +robo misucha,26 +robin (the iconoclasts),26 +robin16,26 +rkmlady,26 +rita (sweethex),26 +rino (sennen sensou aigis),26 +rinneko (rinne paint),26 +rinmeikan girls school uniform,26 +ringo (soul hackers 2),26 +rike lee,26 +rikapo,26 +rigu (3di),26 +riding outfit,26 +rico (fbn3),26 +richard i (fate),26 +rice shower (longed-for scenery) (umamusume),26 +ri cochet,26 +rero rero,26 +rerisa (kyouno),26 +requiem for the phantom,26 +rengoku shinjurou,26 +ren (irotoridori no sekai),26 +ren'ai shimasen ka?,26 +ren'ai saiban (vocaloid),26 +rekishitai hoonoji,26 +reki (lichk),26 +rek'sai,26 +reichi,26 +rei hana (nachis514),26 +regis lucis caelum,26 +redmoa,26 +red sonja,26 +recoilless rifle,26 +realman,26 +rca connector,26 +rayphenos,26 +rathian (armor),26 +rath (fire emblem),26 +rasukii (pamiton),26 +rapbitan,26 +ranni the witch (cosplay),26 +randou rino,26 +randle,26 +ramototu,26 +ramblin' evil mushroom,26 +rald schwarz,26 +rainybluebell,26 +rainbow hair ornament,26 +rai,26 +rafu (motrer1),26 +radral,26 +rad shiba,26 +rabiane (sinisistar),26 +rabbirio,26 +r star,26 +r.h no.1 fuyumomo,26 +quildren (ike eveland),26 +quicksand,26 +queen (fft-0),26 +quatraise,26 +q qree,26 +q.a. kinshachi,26 +pyra (xenoblade) (prototype),26 +pyocomon,26 +purple mittens,26 +puroshimin,26 +pupa,26 +puffphox,26 +puff (go! princess precure) (human),26 +probably noon,26 +prez (star driver),26 +pretty (zoza),26 +powhu,26 +powered gm cardigan,26 +power pro kun pocket 10,26 +potters wheel pose,26 +postblue98,26 +poseidon (shuumatsu),26 +poppu,26 +popolocrois,26 +pool party zoe,26 +pool party miss fortune,26 +ponkotsu musume no nichijou,26 +ponishi.,26 +pome charo,26 +polpo,26 +poking ass,26 +poketto,26 +pokemon the movie 2000: the power of one,26 +poke bean,26 +podone,26 +plumeria (flower knight girl),26 +pkp (l.a.d.y) (girls' frontline),26 +piranosuke,26 +pipin try,26 +pino 0 0,26 +pink santa costume,26 +pink-chan (petenshi (dr. vermilion)),26 +pin karo,26 +pimple,26 +pietro maximoff,26 +pieces / wataridori no somnium,26 +picolette xiii,26 +pichu (cosplay),26 +phinci,26 +philtomato,26 +phantasy star i,26 +peter strasser (chronos's kalendae) (azur lane),26 +peridot (jewelpet),26 +peppa pig,26 +penis in eye,26 +pendulum (game),26 +pendreon,26 +pekakiu,26 +peeler,26 +pechika (mahoiku),26 +pechi (peeechika),26 +peacock (p-location),26 +paya (alunair),26 +partio,26 +parkjinsuky,26 +park sung woo,26 +para-medic (mgs3),26 +papiko (papiko8901),26 +panzer dragoon orta,26 +panye,26 +pandra,26 +panda (tekken),26 +pan de peace!,26 +pan (xeno) (dragon ball),26 +palm strike,26 +pakupaku choppu,26 +paisen,26 +p.a. works,26 +oziozi kamuy,26 +ozawa tomohiro,26 +oyamada kouta,26 +ovan (.hack//),26 +ousawa kanata,26 +ouka (kazuki seihou),26 +ouhina,26 +otsuo,26 +ototoi (eevees813),26 +otono (bkt4b),26 +otonashi maria,26 +otonashi hatsune,26 +otome riron to sono shuuhen: ecole de paris,26 +otaut-r,26 +oshiete re:maid,26 +oshiego ni kyouhaku sareru no wa hanzai desu ka?,26 +orta,26 +orokudesu,26 +orihara sachiko,26 +orichalcum reycal duo,26 +oribe tsubaki,26 +oribe aoi,26 +oresky,26 +oreko,26 +ore no ue de agaku rokunin no togime,26 +ore dake haireru kakushi dungeon,26 +opa-opa,26 +oozora haruka (danball senki),26 +ootsubo yuka,26 +ootani momoko,26 +oosawa yayoi,26 +oogure maiko,26 +onyang,26 +onna shachou,26 +only haruka,26 +onemine nene,26 +oncidium (flower knight girl),26 +omurice (roza4957),26 +omul,26 +omocha-san,26 +ommmyoh,26 +olteca (kamen rider revice),26 +olha (ys),26 +olg,26 +old-night,26 +olbern,26 +okyurita,26 +okutani toro,26 +okuro zmzm,26 +okota (pixiv),26 +okayama shinako,26 +ojyomu,26 +ojiya (fueru gohan),26 +oimo mushi,26 +off-color semen,26 +oda nobunaga (swimsuit berserker) (third ascension) (fate),26 +octopus print,26 +octopus hair ornament,26 +ochanomizu ran,26 +ocean prince,26 +obsidian (houseki no kuni),26 +oastlv,26 +o-mars,26 +nyto iso (girls' frontline),26 +nyromide,26 +nyan nyan nyan (idolmaster),26 +nyan-nyan dance,26 +nyago,26 +nuwara eliya,26 +nush (xter),26 +nuruko (nuru55555),26 +nurse angel,26 +nurikabe (character),26 +numahito,26 +nukotama,26 +nukogami (minniecatlove21),26 +nuira,26 +nue0,26 +nuavic,26 +noronosuke,26 +norimaki gajira,26 +nona (death parade),26 +non (nonbiriya mini),26 +noli-pee (little stupid),26 +noix tranche,26 +nodoka (hot spring) (blue archive),26 +noddy (kirby),26 +nochatea,26 +nobunaga hazama,26 +noboru gongenzaka,26 +no scarf,26 +no cardigan,26 +nixie tube,26 +nishizawa saburou,26 +nishiyama serina,26 +nishi kita,26 +nire hikari,26 +nini tw99,26 +ningzzz,26 +ninchan,26 +nikki (miracle nikki),26 +niki (nikism1987),26 +niizato aoi,26 +niijima makoto (cosplay),26 +niiichi 21pk,26 +nier (old),26 +niduca (hio touge),26 +nic (kevin),26 +nguyen tam lee,26 +ng sam,26 +netsuzou trap,26 +netaballerina,26 +neo zeong,26 +nene (toji no miko),26 +nemesis (girls' frontline 2),26 +nekomarieru,26 +nekoichi,26 +nekohanemocha,26 +nekoashifumare,26 +negative frames,26 +nc empire (circle),26 +navio,26 +natural wind,26 +natubudou,26 +nattsu (nicoseiga),26 +natsusaki yomi,26 +natose,26 +nasuka gee,26 +narumiya inori,26 +narukami aki,26 +narasaka touru,26 +napoleon (one piece),26 +naotaka (bh5fnkbd),26 +naonao (sherry),26 +naoki (xhju8282),26 +nano (nazuna0512),26 +nannann,26 +nanatsume,26 +nanasawa yuni,26 +nanakamado anno,26 +namasomi,26 +namakarashi,26 +nakatani seiichi,26 +nakaryo0404,26 +naiya,26 +nagi (kannagi) (cosplay),26 +nagai gou (style),26 +nac0n,26 +mzz,26 +myuka (kyouka jinrui amunero),26 +myomu,26 +mwo imma hwag,26 +mwgi,26 +mv (spacecraft),26 +mutugorou u,26 +muttan (ashiru-f),26 +murimajimuri,26 +muraosamu,26 +mura (mapisha),26 +munakata misae,26 +mumumuka,26 +muiko i,26 +mugi (mugimugi 9kv),26 +mr. karate,26 +mp-443 (girls' frontline),26 +moyuchocolats,26 +moyashi (oekaki touhou),26 +mouri teru,26 +mount rushmore,26 +moumoku pen gin,26 +motoharu (danshi koukousei),26 +mosou keito,26 +moses sandor,26 +moriya ako,26 +morioka moriko,26 +morinozuka takashi,26 +moriko kyoho,26 +mordecai (fire emblem),26 +moonlit bear (vocaloid),26 +moofie (vtuber),26 +monster collect,26 +monodevil,26 +mongolian spot,26 +momota ro5555,26 +momose rin,26 +momomo udameda,26 +momiahair,26 +mokere shikkan-sha,26 +mokaffe,26 +mojakkoro,26 +model a (mega man),26 +mochinoki,26 +moccasins,26 +mob3,26 +mo geng,26 +mo-mo,26 +mo-mantai,26 +mmr magazine mystery chousa han,26 +mk/ret,26 +mizuz,26 +mizuoka magu,26 +mizuno takahiro,26 +mizuno shinya,26 +mizukoshi moe,26 +mizuki (anda),26 +mizugame,26 +miyazawa midori,26 +miyazaki yukichi,26 +miyayoshi (bricola),26 +miyanogi jiji,26 +miyama tsubame,26 +miyakura haruto,26 +miyahara mimikaki,26 +mitsumata,26 +mitsubasa miu,26 +mitsu336,26 +mitomaton,26 +mito w,26 +mitasarenai pale color (project sekai),26 +mitani kanae,26 +misty hollow,26 +missile (ace attorney),26 +misato miyu,26 +mirii oreano yakumo,26 +miranda lotto,26 +mira shamaliyy,26 +minior (green core),26 +minigob,26 +ming qian luo,26 +minazuki sho,26 +minazuki (aqua-city),26 +minamo iruka,26 +minamino tsubasa,26 +minami rika,26 +minami hinata,26 +minako-san,26 +minakami kurena,26 +min suha,26 +mimuji (shirobako),26 +mille-feuille,26 +milla maxwell (maid),26 +milkytiddyboy,26 +miles-df,26 +mikejima madara,26 +mike doscher,26 +mikami riku,26 +miin (toukotouya),26 +mii (yuureidoushi (yuurei6214)),26 +miharu (ringo sui),26 +midori no ruupe,26 +midori hemo,26 +mido006,26 +midarezaki yuuka,26 +mezamero,26 +meu (spectral souls),26 +metroid: samus returns,26 +meteor (yamashou),26 +metaphor (artist),26 +metal hairband,26 +metal detector,26 +messer (mobile suit),26 +merusuke,26 +merumo,26 +meru rumi,26 +meringue,26 +meremero,26 +meowwniz,26 +meoon,26 +menad shisei,26 +memusu,26 +memory342,26 +melt (artist),26 +mekeko,26 +meisai,26 +megumu,26 +megumi (piyo7piyo9),26 +meganeno dokitsui,26 +megami kouhosei,26 +mega man star force 3,26 +mega man 5,26 +medusa (monster girl encyclopedia),26 +mechanical broom,26 +mecha kaku man,26 +mcr,26 +mchiefy,26 +may wong,26 +maxiart,26 +maws (splatoon),26 +matsuo mono,26 +matou sakura (street choco-maid),26 +maternity dress,26 +matcha7611,26 +match (scp115),26 +matado,26 +masturbation from behind,26 +master mummy (arms),26 +masso nullbuilt,26 +masamori ikemo,26 +masaki (star8moon),26 +masai no senshi,26 +maro no kanja wa gatenkei 2,26 +mario & luigi: superstar saga,26 +marina liteyears,26 +margaret (abubu),26 +marco polo (the queen of hearts) (azur lane),26 +maou skun,26 +maosanmu,26 +manryou (flower knight girl),26 +manjji,26 +manglifer,26 +mandaman,26 +manbou no suiso,26 +manannan mac lir (second ascension) (fate),26 +manami030,26 +man in the mirror (stand),26 +mamaito,26 +malinda (shingeki no bahamut),26 +male swimwear pull,26 +makusu,26 +makoto1009,26 +makka na kedamono,26 +makabe midori,26 +makabe masamune,26 +majima (lycoris recoil),26 +maianh,26 +mahou shoujo wo mucha kucha taoshitai,26 +maguro (guromaguro),26 +magure senshi,26 +maguon,26 +magnamalo,26 +magical mirai miku (2014),26 +magdalena kaczynski,26 +maco (soliddevil),26 +machine-g.a.p.,26 +maccha xxxxxx,26 +maagori,26 +m eme,26 +m950a (concert diva!) (girls' frontline),26 +m19 (artist),26 +lyuka,26 +lynx (animal),26 +lynn loud,26 +lyk wuyan,26 +lyib,26 +luruko61,26 +lupicam,26 +luoye,26 +luosicheng 5,26 +luo zi,26 +lunaris filia,26 +lunar (lunar 815),26 +lum berry,26 +lulumiya (abbb1233),26 +ludwig's holy blade,26 +luck gandor,26 +lucier (7th dragon),26 +lucarios,26 +lr-300,26 +lpleader,26 +lovely aina-chan,26 +louise (dragalia lost),26 +lost echoes,26 +lost driver,26 +lorna (shining hearts),26 +lopuii,26 +longhorn lance,26 +loladestiny,26 +lloyd (granblue fantasy),26 +lizlett l. chelsie,26 +liver spots,26 +liu liu,26 +lis,26 +liquid halo,26 +lip (lih8),26 +lion paw,26 +lion cub,26 +linnkou,26 +ling qingzhu (wu dong qian kun),26 +linda cube,26 +lilith (the binding of isaac),26 +liliana vess,26 +lilco,26 +light bow shekinah,26 +liger zero,26 +lieutenant dan,26 +liely,26 +lielac,26 +lida romero,26 +libertas (nyori),26 +li chunfu,26 +li-e,26 +lexus,26 +lewdlilies,26 +levia,26 +levasol defense corps,26 +lesskiss,26 +leo (warzard),26 +leo...,26 +leni (under the moon),26 +lemonade alpha,26 +lelouch lamperouge (cosplay),26 +leila (fire emblem),26 +leidami,26 +legless,26 +leaning tower of pisa,26 +league staff (pokemon),26 +lazy dungeon master,26 +lawn,26 +lautes alltags,26 +latte (klimspree),26 +lasgun,26 +laon,26 +lani (ff9),26 +langod,26 +landsknecht,26 +landing craft,26 +lancer mina,26 +lalan fengchu,26 +lajhen2651,26 +lacusblade,26 +la brava,26 +l4no,26 +l.wolf,26 +kyuri (suika),26 +kyoushoku soukou guyver,26 +kyochuu rettou,26 +kyler (sweethex),26 +kv-1 (ash arms),26 +kuukai (adexi),26 +kusunokiokonogi,26 +kusari no shoujo (vocaloid),26 +kusanagi chouen,26 +kuruto.,26 +kurusu nono,26 +kurumi rumi,26 +kurotori chiyoko,26 +kuroton@9610,26 +kuroshiba kanae,26 +kurosawa itsuki,26 +kurosaki rendou,26 +kuroneko86,26 +kuroki hiromi,26 +kurokagami ryuuko,26 +kuro kichi,26 +kuro emimi,26 +kurii chasuke,26 +kurama (urusei yatsura),26 +kurama-chan ni guitte shitara pisha tte sareta,26 +kurahika,26 +kunitachi rinka,26 +kunimura kotone,26 +kun52,26 +kumo ni notte,26 +kumakumatc,26 +kumagai yuuko,26 +kuma (kuma1 kancolle),26 +kum haehyun,26 +kul (ngsensei),26 +kuku,26 +kujiragami no tearstilla,26 +kuji kanesada,26 +kugehi,26 +kuaru (okamokomon),26 +kt cano,26 +krogan,26 +kozeni isari,26 +kowata akane,26 +koutetsushin jeeg,26 +koume (hanamaru youchien),26 +kouen,26 +korin (ra-sky07),26 +konohana inori,26 +kono healer mendokusai,26 +konghai shanren,26 +konbini dmz plus!,26 +komori-san,26 +kokutei n,26 +koi0806,26 +kohuseigetsu,26 +kogomo,26 +kogara toto,26 +kogara (frenzied kotori),26 +koga taiga,26 +kodatino,26 +koala (kemono friends),26 +kkomdastro,26 +kizuna,26 +kizu,26 +kizmel,26 +kiyu (queue),26 +kiwami133,26 +kiwakiwa,26 +kitsuneco,26 +kitsune no botan (flower knight girl),26 +kito 3 tyoki-tyoki,26 +kita e,26 +kisou nowora,26 +kishiro yukito,26 +kishida nica,26 +kischur zelretch schweinorg,26 +kisaragi saki,26 +kisaki (strange s k),26 +kirobaito,26 +kiriya naoki,26 +kirito (cosplay),26 +kirishima mizuki,26 +kirishima ikuya,26 +kirisame tarou,26 +kirin (company),26 +kiriki alice,26 +kirarigaoka middle school uniform,26 +kintoki-douji,26 +kinpun (fgxdw447),26 +king penguin (kemono friends),26 +king of clubs,26 +kinagirea,26 +kimven (wenzisama),26 +kimi ga yobu megiddo no oka de,26 +kim leeyoon,26 +killia (disgaea),26 +kikan bakumatsu ibun last cavalier,26 +kiharatta,26 +kifune mio,26 +kiba,26 +kia (tumblr),26 +kia (sekien no inganock),26 +khui,26 +khj,26 +kevin hong,26 +ketanbakar,26 +keshizumi,26 +kerberos,26 +kendo mask,26 +kellogg's,26 +keiji asakawa,26 +keidai 3,26 +kei s01,26 +keb00b,26 +kazuna,26 +kazuki hana,26 +kazuki (kazu-king),26 +kazufumi (kaz-newt),26 +kazra,26 +kawasumi (sanzen'in matora),26 +kawaragi yuuki,26 +kawagami raito,26 +katoshigu,26 +kasuki masato,26 +kashi (number),26 +karuushi,26 +karukozaka high school uniform,26 +karipa,26 +karen (artist),26 +kareido (kaleidoscope),26 +kapimaru,26 +kaoru kozue,26 +kanou aogu,26 +kanou aira,26 +kanoko33,26 +kano (kanograph),26 +kanna (inuyasha),26 +kaneko shizue,26 +kaneki yushi,26 +kaneda (aqid),26 +kanamori maria,26 +kanami33,26 +kaname mahiro,26 +kamui aya,26 +kamochomedesu,26 +kamo noritoshi,26 +kamitsuki,26 +kamishiro mutsuki,26 +kamisama to unmei kakumei no paradox,26 +kamioto musu,26 +kamiki akinari,26 +kamijou asahi,26 +kamijororo,26 +kamezou (kame-zo),26 +kamen rider zo,26 +kamen rider jack revice,26 +kamen rider geiz,26 +kamen rider blade (king form),26 +kakouen,26 +kakizaki hayao,26 +kaiyuna,26 +kai shiden,26 +kagura (anomalo-anima),26 +kagkfc1z,26 +kagato (artist),26 +kaden shoujo,26 +kaden (kuzek),26 +kachoo,26 +ka ji,26 +k2 (before dawn) (girls' frontline),26 +k.g (matsumoto zo),26 +juu mensou (vocaloid),26 +jushoro,26 +jure of healing,26 +junsui (omori0310),26 +june (semen sprinkler j),26 +journey to the west (1986 tv series),26 +jounoin kaho,26 +jorougumo (youkai watch),26 +joi kun (senzai hiyori),26 +john price,26 +joe okada,26 +jitsuma,26 +jiron amos,26 +jiro-knightraider,26 +jinrouki winvurga,26 +jinnouchi wabisuke,26 +jinguuji mari,26 +jin kisaragi (cosplay),26 +jihl nabaat,26 +jiangshi (chanta),26 +ji-yoon (jourd4n),26 +jessica kaios,26 +jessica de alkirk,26 +jeri20,26 +jee-hyung lee,26 +jeanne (greenmarine),26 +javelin (operation: pillow fight!) (azur lane),26 +jam (zamuchi),26 +jakou nezumi,26 +jaguar (car),26 +jagdtiger,26 +jaegan,26 +jacques de molay (saber) (fate),26 +jacqueline (show by rock!!),26 +jack heart,26 +jabuchi you,26 +izumo-ss-yoshitunedenn,26 +izumi mogu,26 +izru,26 +izayoi-saki,26 +iyojima anzu,26 +iwako (eiken3kyuboy),26 +iwaizumi hajime,26 +ivioss,26 +itsumo no you ni love & peace!!,26 +itooooofu8282,26 +ito lab,26 +itadaki shinji,26 +isumi michiru,26 +isshiki seiran,26 +isobe eiji,26 +isis-chan,26 +ishizuki mana,26 +ishizaki miwako,26 +isayama hajime (style),26 +isaf,26 +isabella (seiken densetsu 3),26 +iroidori4422,26 +iroha-kuro,26 +iria (iria zeiram the animation),26 +ioko,26 +inuu ruru,26 +inuko (ink0425),26 +inuboshi,26 +introduction,26 +inspecting,26 +inn,26 +ink (303682546),26 +infinite justice gundam,26 +ine (vtuber),26 +inception,26 +inbit,26 +inaba tsukuyo,26 +imsofckinlost,26 +imouto no seiiki,26 +imitation lover,26 +ilsa34660285,26 +iken,26 +ike ko,26 +ihara natsume,26 +ignitrix,26 +igni suu,26 +idiot sandwich (meme),26 +ichinose yuri,26 +ichikawa yoshiyuki,26 +ichijou karen,26 +ichigo (daibouken! yukeyuke osawari island),26 +icarus (nereid's discovery) (azur lane),26 +iberiko yuri,26 +ia-lu (kemono no souja erin),26 +i (yunyuniraaka),26 +i-19 (departure's gentle breeze) (azur lane),26 +hypoxis,26 +hyona elatiora,26 +hyattlen,26 +humio (oriba),26 +hukii,26 +hug ff14,26 +huai diao me,26 +hu (saimens),26 +houou rinka,26 +houki (majo no tabitabi),26 +houjou teppei,26 +houjou mamushi,26 +hotline miami 2: wrong number,26 +hotel yamato,26 +hostess,26 +hoshizora rin (cosplay),26 +hoshimiya yashiro,26 +hoshikuzushi,26 +horizon (sushi0126),26 +horin,26 +horie ryuu,26 +horace (skullgirls),26 +hoodie pull,26 +hood grab,26 +honeydew mei,26 +honda rei,26 +honda asuka,26 +holy alina,26 +holographic horns,26 +hogeroh,26 +hogarth pennywhistle gilligan jr.,26 +hochikisu,26 +hiwatari,26 +hitsuji nata,26 +hitori (edge),26 +hitohira (shiroringo48),26 +hisame (gocbu),26 +hiru0130,26 +hironoshousei,26 +hirokawa takemi,26 +hirayama ityu,26 +hirasawa meio,26 +hirakana,26 +hiraga0613,26 +hinoyama ena,26 +hino miwa,26 +hino akira,26 +hinase kanoto,26 +hinageshi (amaetai hi wa soba ni ite),26 +hinacalibur,26 +himuro rabi,26 +himukai yuri,26 +himono hinata,26 +himey,26 +himeno sena,26 +hikonyan,26 +hikaru (parodius),26 +hiji,26 +highvoltage,26 +highmore (arknights),26 +higashiyama shou,26 +higashi,26 +hibren,26 +hibiki yoiko,26 +hibiki kohaku,26 +hiba manaka,26 +hiba (jun),26 +hexunart,26 +hero (sekaiju),26 +henna,26 +hello kitty (character) (cosplay),26 +heki kiri,26 +heartbee,26 +headphones for animal ears,26 +head wreath removed,26 +haydee (haydee),26 +hayasui (kancolle) (cosplay),26 +hayashi (nnnsf),26 +hayami tetsu,26 +hayama kouichi,26 +hayake (chenran),26 +hay fever,26 +hatsune speed: hatsune miku roller skating music,26 +hatsune miku no gekishou (vocaloid),26 +hatchin morenos,26 +hatake hukuro,26 +hasumi shizuko,26 +hasuhasuhasu0127,26 +hashibuto,26 +harusawa yoshino,26 +harukan tiusu,26 +haru4aki,26 +harlequinwaffles,26 +harkonnen (gun),26 +harikono,26 +harigane mutsumi (harigane623),26 +harawata,26 +hara yumi,26 +happy mask salesman,26 +hanrei (dqncncilust),26 +hanei rin,26 +hand pump,26 +hanasakichu,26 +hanamizawa q-tarou,26 +hanakomiti,26 +hana (module),26 +hamilundenongdizhe,26 +hamhsi miyar,26 +hamel,26 +hal-py,26 +hakaiou: gaogaigar vs. betterman,26 +haine (summertime render),26 +hahaha,26 +hagi neco,26 +haconeri,26 +hachijou ikuko,26 +habaki,26 +haar (fire emblem),26 +h0saki,26 +gyarike,26 +gwxx3435,26 +gusto gulldo,26 +gurongi,26 +gurihiru,26 +guri,26 +gurdurr,26 +gundam vidar,26 +gundam burai,26 +gundam age-2,26 +guitaro (yabasaki taro),26 +growlanser iii,26 +grim (twisted wonderland),26 +green track suit,26 +green smoke,26 +gravy boat,26 +grandmastag,26 +gram (muneneko),26 +graf spee (warship girls r),26 +grady sisters (the shining),26 +grace blackberry,26 +gpnet,26 +goutokuji kayo,26 +gotou masahiro,26 +gotcha force,26 +gosei sentai dairanger,26 +gosegu,26 +gorokyu,26 +gorilla-shi,26 +goofy (goldgoofy357),26 +gongitsune (gongitune2),26 +gom (kooskoo),26 +god razor,26 +goblin (final fantasy),26 +go yasukuni,26 +gnlo,26 +gj,26 +giselle gewelle,26 +girls und panzer gekitou! maginot-sen desu!!,26 +giren,26 +gintokyhenry,26 +ginnyo,26 +gingham background,26 +gina dickinson,26 +gimicalmas,26 +gigantamax duraludon,26 +gien,26 +gggglaze,26 +get9,26 +gensou otome no okashi na kakurega,26 +gennosuke,26 +gear shift,26 +gangut (imposing warden) (azur lane),26 +gamushiro (souren),26 +gamlin kizaki,26 +gamel,26 +gals rock ambivalence (idolmaster),26 +fysr,26 +fuu-chan (precure),26 +futakabe,26 +futaba rentarou,26 +futa with newhalf,26 +furioso,26 +fuonon,26 +funako (newggo),26 +fumiyomogi,26 +fullbban g,26 +full burrrrrrst,26 +fukurou (hukurou),26 +fujiwara truffe,26 +fujiwara (suzumiya haruhi),26 +fujita asagao,26 +fujisaki (saikin yatotta maid ga ayashii),26 +fujioni,26 +fujimaru mamenosuke,26 +fuji tooya,26 +fuchida kyou,26 +frilled curtains,26 +freer,26 +frederica (sennen sensou aigis),26 +francis drake (stormy seas outfit) (fate),26 +francesca lucchini (cosplay),26 +fox udon,26 +four of diamonds,26 +forutsu,26 +forever (jojo),26 +forehead beam,26 +forced kiss,26 +for the barrel,26 +foot smother,26 +foaming waves,26 +flowerchild ueda,26 +flareza,26 +flambe,26 +fire poker,26 +fio88,26 +finger to another's nose,26 +finch,26 +fifteen (katana zero),26 +fetefeteli,26 +fen bang laoda,26 +female brawler (disgaea),26 +felix (golden sun),26 +felice qaddaf,26 +fc barcelona,26 +fauna,26 +fatui pyro agent (genshin impact),26 +fatlulu (1008),26 +fatima betrorum,26 +fantasyxing,26 +fantasy zone,26 +fantastic children,26 +fang (fairy fencer f),26 +fang (cruciata) (arknights),26 +fancyark,26 +fan (20110507),26 +familiar-possessed - wynn,26 +fallschirmjager,26 +fail whale,26 +fabled grimro,26 +f-104 starfighter,26 +ezraqi,26 +ex-rika,26 +ex-arm,26 +evol blackhole,26 +evo 3 (girls' frontline),26 +eve (blaster master zero),26 +eva 05,26 +eureka seven: pocket ga niji de ippai,26 +etra (etra-chan wa mita!),26 +et gnsn,26 +esthoric,26 +esecool,26 +eris (shadowverse),26 +equipment screen,26 +epeulu (ate5424),26 +enu (spinal),26 +enomoto (luck-by-luck),26 +energy axe,26 +emuen,26 +empress,26 +emily dyer,26 +emil1030 blue,26 +emi (green wave),26 +elysian (granblue fantasy),26 +elsa la conti,26 +elphe,26 +elita one,26 +elferan,26 +elesa (sygna suit) (pokemon),26 +ekakiuo,26 +eitoman,26 +eiscue (noice),26 +eir,26 +eien no 24-sai no shakai hito,26 +ehime mikan,26 +eden no ori,26 +echolocator (splatoon),26 +ebisque,26 +ebipon,26 +ebino mei,26 +ebina hidekazu,26 +dymao,26 +dvdarts,26 +dusa (hades),26 +dunn smith,26 +dungeon maker,26 +dryad (terraria),26 +drowzzi,26 +drias,26 +draug (fire emblem),26 +dragonfly wings,26 +dragon knight 4,26 +doujima daigo,26 +doroshii,26 +doneen69,26 +dokonjou (odossan),26 +dog treat,26 +divel qree,26 +dith ytk,26 +distillation column,26 +digimon adventure: bokura no war game,26 +dice (dharu riser),26 +dia (yvirus68),26 +dharu riser,26 +devo,26 +deutschland (demon princess' dark hour banquet) (azur lane),26 +destiny (ishida),26 +dental (dentalsan),26 +demonio (elsword),26 +delano-laramie,26 +dela delon,26 +deatte 5-fun wa ore no mono!,26 +dear stage,26 +dead rising 1,26 +davi (destiny child),26 +datemegane,26 +databook,26 +dassault rafale,26 +dark rouge,26 +dark eldar,26 +darashinai imouto ni itazura shitemita,26 +daphne (last origin),26 +danshi koukou valentine (meme),26 +danno gs,26 +dankalaning,26 +daniwae,26 +dangerousbride,26 +dangerous zombie level x,26 +damagefloor,26 +daitoku junna,26 +daisy duck,26 +daisy (pokemon),26 +daisuki na sensei ni h na onedari shichau omase na boku no/watashi no puni puni,26 +daishou,26 +daily (daily178900),26 +daiku kenzaburou,26 +dahlia (xenoblade),26 +dae (dog-of-maou),26 +d-n,26 +cynthia (sygna suit) (pokemon),26 +cx4 storm (girls' frontline),26 +cute potato (cute potato ner),26 +cursor (medium),26 +cure tender,26 +crystalline,26 +crusoe,26 +cruise ship,26 +crotch mousepad,26 +crotalaria,26 +crossbow devil (chainsaw man),26 +crossbone gundam x-3,26 +cross yuki,26 +crevice,26 +cray,26 +cradle-song,26 +cpt (crazy raccoon),26 +cpieng,26 +courage (character),26 +coupytwo,26 +cool&create,26 +constellation hair ornament,26 +conrad (fire emblem),26 +comra,26 +comiket 78,26 +com kom,26 +color 73,26 +colette belrose,26 +coin slot,26 +coffeebeanbrush,26 +coffee tart,26 +coffee siphon,26 +codename696,26 +code: dragon blood,26 +coby,26 +clover (totally spies),26 +clothes only,26 +clothes on wall,26 +closed curtains,26 +cloire clover,26 +clitorim,26 +clear file,26 +claxton (warship girls r),26 +clauncher,26 +claudia bruford,26 +class,26 +clara (claris),26 +cipozhong yundepeitela,26 +cif,26 +chuchumy (ishiyumi),26 +chris (babo),26 +chorin,26 +chocolate strawberry,26 +chocolate cosmos (flower knight girl),26 +chocoball,26 +chloe (kuroinu),26 +chino ukana,26 +chino kawashiku,26 +chikushi nitouhei,26 +chikomayo,26 +chikaoka sunao,26 +chi wa,26 +chi (character),26 +chest (furniture),26 +cheru (sinkai ringo),26 +chengzhineixihuanxiaogou,26 +chen lio,26 +cheeseko,26 +cheadle yorkshire,26 +chaso (konshin),26 +charmy bee,26 +chama (1480),26 +chakoru,26 +chaamii,26 +cetta (cettadvd),26 +ceroblitz,26 +ceres (shingeki no bahamut),26 +centurii-chan,26 +celia alde,26 +ceilinginmyroom,26 +cavalier of the abyss,26 +cat sidhe nekoko,26 +cat helmet,26 +cat choker,26 +cat burger,26 +castlevania: circle of the moon,26 +cassius (granblue fantasy),26 +cars (movie),26 +carrie (sennen sensou aigis),26 +caron (higyaku no noel),26 +cardia beckford,26 +canned tuna,26 +camouflage background,26 +california king bed,26 +caesar anthonio zeppeli (cosplay),26 +cacaco,26 +c.z.,26 +byefrog,26 +byakudan midori,26 +butter run,26 +burn-up w,26 +burial blade,26 +burakku mutou,26 +bukui shi wo,26 +bugie,26 +budouya,26 +browning auto 5,26 +brown bandeau,26 +brothers conflict,26 +broom hatter,26 +broken spear,26 +broccoli (ohasi),26 +brick oven,26 +bradamante (festival outfit) (fate),26 +brachiosaurus,26 +bow camisole,26 +boru-boru,26 +bordeaux black (voice actor),26 +boomer (left 4 dead),26 +bookseve,26 +bonjiri (torippo222),26 +bongo drums,26 +bodysuit aside,26 +body (bacoborn),26 +bodai,26 +bochi (yamakan),26 +blue badger,26 +blue-and-yellow macaw,26 +blouson chiemi,26 +bloodrayne (videogame),26 +blood stalk,26 +blonde girl (sumiyao),26 +blonde girl (okpriko),26 +bleach: memories of nobody,26 +blanko!,26 +blanc (ameto yuki),26 +blackjack table,26 +black selena,26 +black iron great sword,26 +bistro cupid 2,26 +bistro cupid,26 +bisexual flag,26 +birdie wing: golf girls' story,26 +bird scarer,26 +binetsu kara mystery,26 +billy coen,26 +billy (gyee),26 +bigroll,26 +bexercube,26 +berrypop,26 +beniyuki pangya,26 +benimaru,26 +beni0012,26 +ben (ahan uhun 345),26 +belt bracelet,26 +behindxa,26 +beetle horn,26 +bealphareth,26 +bbci,26 +baymax (cosplay),26 +bathym (housamo),26 +batavia princess,26 +baron of hell,26 +baritone saxophone,26 +bardiche (scythe form),26 +bapio,26 +bandaged ankle,26 +banbon,26 +bamboozler 14 (splatoon),26 +ballot,26 +ballista,26 +baku (ff9),26 +baked potato,26 +bai kongque,26 +baharu,26 +back pain,26 +b nosk101,26 +azya,26 +azurcentauri,26 +azumi haruhito,26 +azuma sara,26 +azee gurumin,26 +azana shiyuga,26 +ayu (sweetfish man),26 +ayato mabu,26 +ayasa,26 +ayamori miyako,26 +ayakadegozans,26 +axis (monori rogue),26 +awooo,26 +awane kokoro,26 +auto (mega man),26 +auraaa,26 +auo123,26 +atlurkabuterimon,26 +atk7,26 +ati (sekien no inganock),26 +athena av,26 +ateru,26 +atelier judie,26 +asta (asicah),26 +assisted paizuri,26 +ask (dreaming cat),26 +ashitaba kemo,26 +ashita no sakuya,26 +ashika (yftcc948),26 +asano (asamusuko),26 +asamiya shiina,26 +asada ryou,26 +asa kusa 99,26 +artwolfaja,26 +arte (evillious nendaiki),26 +arsmagna,26 +arsloid,26 +ars magna,26 +aromatisse,26 +armory,26 +armored collar,26 +ariori haberi,26 +arima keitarou,26 +ariatorai,26 +arc draws,26 +arakawa,26 +aquaman (series),26 +aqua sash,26 +aqua-lia,26 +apple (suikoden),26 +apollo (kaminomi),26 +aphex twin,26 +aozaku (hatake no niku),26 +aone hiiro,26 +aoiakamaou,26 +aoi kiriko,26 +aoi aruma,26 +aoharuto,26 +aoba (aunana),26 +aoaoaoao (baiyanheibing),26 +ao no rupika,26 +anya hepburn,26 +anubisu-no-sinpan,26 +antique telephone,26 +anta baka?,26 +ansem seeker of darkness,26 +animist,26 +angry german kid,26 +angry dog noises,26 +angel daisy,26 +ange (granblue fantasy),26 +anata no shiranai kangofu,26 +anastasia (under the same sky) (fate),26 +anastacia of astora,26 +amyucheu,26 +amerika juu pan,26 +american flag swimsuit,26 +amepati,26 +ame999,26 +amber (5 22 lili),26 +amattle,26 +amarabi (tobira),26 +amane tari,26 +amane a (007 uiro),26 +amamiya (re-amamiya),26 +amaki ikuma,26 +amakawa ginga,26 +amaimochi,26 +amagi manami,26 +am1m,26 +alyx vance,26 +alter ego conjurer (granblue fantasy),26 +alraune (monster girl encyclopedia),26 +alopias,26 +almira,26 +aliza (ragnarok online),26 +alister agrew,26 +alina james,26 +albus (skullgirls),26 +al guang,26 +al (arupaka),26 +akizuki kei,26 +akira-kun (ishiyumi),26 +akimotsu (akim x),26 +akiiro,26 +aki toshi,26 +akelp,26 +akatsuki hayane,26 +akashi (akashi's in the red nya!) (azur lane),26 +akaneiro no kyoukaisen,26 +akane (cookie),26 +akane (akane0012),26 +akamizuki (akmzk),26 +akabuchi megane,26 +ajula wachula,26 +aisha (shironeko project),26 +aisarenakute mo kimi ga iru (vocaloid),26 +air master,26 +aiqing,26 +aimusu,26 +aimi (sky-freedom),26 +aika (konshin),26 +aihara (keitora),26 +ahoge removed,26 +against vehicle,26 +afro (kngotezo),26 +afpl (parrotz4),26 +adore (adoredesu),26 +adam (lord of the mysteries),26 +adam (erubo),26 +acura (gunvolt),26 +acheru maru,26 +aberu514,26 +a ichi,26 +a-shi (lion81923),26 +9tt6,26 +9room,26 +91 days,26 +7tp,26 +7melon,26 +72 (mmmmkk),26 +5tsukado,26 +5plus5,26 +2wink (ensemble stars!),26 +2bro.,26 +241 (nishiki),26 +20th century boys,26 +175x172nyrn,26 +12 billion yen incident,26 +1-4daithi,26 +0x0082,26 +06erunium,26 +0141zucker,26 +00e fgo,26 +.hack//quantum,26 +#104,26 +zzzzoka,25 +zubi (skylinezb),25 +zp hn02,25 +zotari,25 +zoma,25 +zoids wild zero,25 +zizizy,25 +ziu,25 +zipper legwear,25 +zinnia silane,25 +zhuo ying,25 +zhu (nitamagr),25 +zhenyuan (journey to the west),25 +zhao shixuan,25 +zettai tenshi kurumi-chan,25 +zet (globalgears),25 +zerotabi1210,25 +zeroblood,25 +zerion,25 +zeolch,25 +zen'in naoya,25 +zelc-face,25 +zeiss,25 +zatsuon,25 +zap,25 +zaku ii f2,25 +zafuri (yzrnegy),25 +z umeshi,25 +yzr (yzr99),25 +yygnzm,25 +yuzb,25 +yuuya bridges,25 +yuuri (fukuroudou),25 +yuukaze (sakazaki freddy),25 +yuu yuu (netaeshi58),25 +yuu (re:lief),25 +yusa aoi (kazoku game),25 +yuri lowell (cosplay),25 +yumesphere,25 +yumegi atsuki,25 +yume oukoku to nemureru 100-nin no ouji-sama,25 +yume no hana (sbac0019),25 +yukisa,25 +yukimaru ai,25 +yuki no city,25 +yuki (sumaburalove),25 +yuki (fuguneko),25 +yukari miyuri,25 +yui 4293,25 +yui (linaw-key08),25 +yuho kim,25 +yueyue,25 +yueye (blbl-y),25 +yu-twilight,25 +yu-no,25 +yoyterra,25 +yoyohachi,25 +young cricket,25 +yoshizawa tamae,25 +yosafire,25 +yongtae,25 +yongoh,25 +yokkest,25 +yohi,25 +yizhibao,25 +yhorm the giant,25 +yellow temperance,25 +yeedee,25 +yazoo,25 +yazawa nico's mother,25 +yatsuhashi (pekemiddle),25 +yashita saki,25 +yasaka (high school dxd),25 +yanows,25 +yanagita kousuke,25 +yanagiba sakana,25 +yan (yan 36k),25 +yamipika,25 +yaminabe (honnouji no kaninabe),25 +yamazaki rin,25 +yamashita toshinari,25 +yamashita (ueno-san wa bukiyou),25 +yamamura sadako (cosplay),25 +yamamura miwa,25 +yamakeitokokoro,25 +yamaguchi tadashi,25 +yamada taeko,25 +yamada sawa,25 +yamada eiji,25 +yamada 3,25 +yamabuki sayuki,25 +yaeno miho,25 +yabai,25 +y skk,25 +xukong,25 +xrjingx,25 +xo160,25 +xiaomai yorik,25 +xiao lu,25 +xeno (xenoglitch),25 +x-kulon,25 +wuwuwu (kriswu555),25 +wuke euco,25 +wroggi (armor),25 +wriggle day,25 +wormadam (trash),25 +worgen,25 +woman with a parasol,25 +wolbach,25 +witch king of angmar,25 +wing bow,25 +windy (clow card),25 +windows logo,25 +windows 95,25 +wincalblanke,25 +william ruzicka,25 +wiccan,25 +white xxxx,25 +weltall,25 +weatheroid,25 +weathernews,25 +wcks0774,25 +wboss,25 +wave 61,25 +wattson (apex legends) (cosplay),25 +watsuji aya,25 +watermelon tourmaline (houseki no kuni),25 +water stone,25 +watanuki hibiki,25 +watanabe mayumi,25 +wata ramune,25 +wapiko,25 +wang yi,25 +wang-sensei,25 +wamosukeda,25 +wamizu,25 +wall-e (character),25 +walker gallia,25 +wakky,25 +wakamoto norio,25 +wakami shion,25 +wachiko,25 +voc,25 +vldhomecenter,25 +vladimir lenin,25 +vk16.02 leopard (ash arms),25 +vippaloid,25 +victory (dog),25 +vicsen-u5,25 +vesta zc,25 +vertical-striped bodysuit,25 +velox,25 +van grants,25 +valeria,25 +v a i r,25 +v2 assault-buster gundam,25 +uzurako,25 +uzicha,25 +uto uto,25 +utahoshi kengo,25 +usu32,25 +uso (campus),25 +usas-12,25 +usamimikurage,25 +usami eru,25 +usagiplanet7,25 +usagihop,25 +urban camouflage,25 +urasoe (oshiro project),25 +unlimited saga,25 +unjou no fairy tale,25 +unizuma eleven,25 +union flag,25 +unconventional gun,25 +una kata,25 +ump45 (winter journey) (girls' frontline),25 +umitsuki (kurage no shokushu),25 +umisea,25 +uminagi karan,25 +umehime,25 +ume2888,25 +ultraman zero the movie,25 +ultraman trigger,25 +ullr (last origin),25 +ulith (wixoss),25 +ujimatsu chiya (cosplay),25 +uji (966qrr),25 +uisaki hinano,25 +ui97,25 +uehara (dormmmmir ),25 +ueda yuu,25 +ueda hanako,25 +uchida kayoko,25 +ubox,25 +u.s.a.,25 +u-hi,25 +u-96 (azur lane),25 +tyrea (xenoblade),25 +tyranno kenzan,25 +type 81 carbine (girls' frontline),25 +type 0 reconnaissance seaplane,25 +tyasuzu,25 +twobey,25 +twintails (mantids),25 +twinbell,25 +turma da monica,25 +turing (gyee),25 +tukinen,25 +ttopang,25 +ttakuann,25 +tsuyuri (doujin work),25 +tsuyukina fuzuki,25 +tsushima shuu,25 +tsuruya (l re10 l),25 +tsurukame (mihomi),25 +tsuru hiromi,25 +tsukuyomi (kamikimi),25 +tsukioka misasa,25 +tsukinami yuu,25 +tsukimura,25 +tsukimi (pan (mimi)),25 +tsan dire,25 +trip dancer,25 +transformers: revenge of the fallen,25 +toyota karina,25 +towa kiseki,25 +tourin fuwa,25 +toujou shufu,25 +touji no sato,25 +touge chayako,25 +toudou takatora (sengoku bushouki muramasa),25 +torn unitard,25 +torn ascot,25 +torion hei,25 +tori (matsuda (matsukichi)),25 +tora (net1nen),25 +topophilia,25 +topo (musashiden),25 +tongue scarf,25 +tonari no jk ni odosarete iribitararetemasu,25 +tomone,25 +tomoe gozen (first valentine) (fate),25 +tomo (552252),25 +tommy oliver,25 +tomida tomomi,25 +tokujo-chan,25 +tokinohiyoko,25 +tokimeki general girls x,25 +tokikane mikan,25 +tokiame (style),25 +token black,25 +tokai teio (umamusume) (cosplay),25 +toiku,25 +tohno motosumi,25 +togemon,25 +tobiuo (62masa62),25 +to6 l,25 +tmk,25 +tipsytrains,25 +tiihatanono,25 +tiger tattoo,25 +throwing money,25 +three ramen musketeers,25 +three plates,25 +thecovertgarden,25 +textured hair,25 +tetsuo (amenohutikoma),25 +tetsukado shin,25 +tetsuhige,25 +tesla violet,25 +teshima noriko,25 +terun,25 +terra (dc),25 +tensou sentai goseiger,25 +tensei kyuuketsuki san wa ohirune ga shitai,25 +tenma kenzou,25 +tenma-gav,25 +tenkuubashi aika,25 +tenkuu no otome-tachi,25 +tenbin no la dea ~ikusa megami memoria~,25 +ten of spades,25 +temple (artist),25 +tella,25 +teitoku,25 +tedeza rize (cosplay),25 +technical,25 +team flare uniform,25 +tayu (yuntayu),25 +tatu,25 +tatsuta rindou,25 +tasogare,25 +tashkent (the blue snoozer) (azur lane),25 +tarutaru gungun,25 +taropun,25 +tarokazu,25 +tarkus,25 +tarakan,25 +tanzhujiuyue,25 +tanpaku-chan,25 +tangobat,25 +tandem bicycle,25 +tan (kiriya0752),25 +tamo imai,25 +tamo (nama-yatsuhashi),25 +tamatama,25 +tamaki rinko,25 +tamaki (tmk-poison),25 +tamaki (summer) (princess connect!),25 +tamaki (glass bottle),25 +tamai shiina,25 +talon widowmaker,25 +tales of pixiv,25 +takuto kira (cat),25 +takozonesu (cosplay),25 +takimiya kazutaka,25 +takigi,25 +taki reki,25 +takezuchi,25 +takeno omoti,25 +takashi mare,25 +takashi (harukasaigusa),25 +takase shin'ya,25 +takasaki mako,25 +takao kazunari,25 +takanashi shiori,25 +takanashi sei,25 +takanashi iori (ichiyou moka),25 +takanashi hinami,25 +takamiya sora,25 +takamiya mio,25 +takami (manda),25 +takajou yuna,25 +takagi kick,25 +takagi akito,25 +tailor,25 +taeko (tao),25 +tadase kairi,25 +tachibana yui (natsu ga owaru made),25 +tachibana kyouka (jin),25 +tachibana kazumi,25 +tachibana kana,25 +tachibana hiro (yakitomato),25 +tachibana (suterii),25 +taboolicious,25 +tabazi,25 +taaru (taru),25 +t zhonglang,25 +syringe in head,25 +sye-,25 +swordsman 2 (sekaiju 4),25 +sweet aviation model div.,25 +sweep tosho (monopolizing the chill?) (umamusume),25 +suzuna (fkpw5754),25 +suzumi atsushi,25 +suzuki mirei,25 +suya000,25 +sutee (ripobita),25 +suruga kasune,25 +supure647,25 +super sailor venus (stars),25 +super doll licca-chan,25 +supepepe,25 +suo niao,25 +sunko,25 +summon night swordcraft story 2,25 +sumizome (genshin impact),25 +sumima,25 +sumi (gfgf 045),25 +sumeragi subaru,25 +sumachii,25 +sulphur-crested cockatoo,25 +sukumizu 2,25 +suimin,25 +suginoji,25 +sugaya nowa,25 +sugawara esuko,25 +suga koharu,25 +sucking tail,25 +su (noonrema),25 +styrofoam,25 +stuffed koala,25 +studio ring,25 +studio lights,25 +stremitelny (azur lane),25 +street dog,25 +stranger mukou hadan,25 +stmoon,25 +steve fox,25 +stealthmaria,25 +starhump,25 +star saber,25 +star platinum (cosplay),25 +star build strike gundam,25 +staccato,25 +soy chicken,25 +sowb,25 +sotcho,25 +sorase (srsrs 000),25 +soranaka ame,25 +sophia (front innocent),25 +sonna koto ura no mata urabanashi desho?,25 +sonic the hedgehog 2 (film),25 +sollux captor,25 +sol (tvtjk7ubec),25 +soggates-nyan (amurka-chan),25 +sodom,25 +sodeya itsuki,25 +socrates (odin sphere),25 +social distancing,25 +socha (pixiv99744),25 +so-on,25 +snow white (grimm) (cosplay),25 +snow print,25 +snow fairy story (vocaloid),25 +snow (sentouin hakenshimasu!),25 +snek (terupancake),25 +snake charmer,25 +smolly poli,25 +smol ina,25 +smol calli,25 +smears,25 +sleigh presty,25 +slapping penis,25 +sizzler plate,25 +six of diamonds,25 +siwan yuan (4oooomanyen),25 +sister blood,25 +sisco,25 +sinclair (limbus company),25 +silver sleeves,25 +sig sauer p239,25 +sieru,25 +siege (city destroyer) (arknights),25 +shymiruku,25 +shuujin/kami hikouki (vocaloid),25 +shuuen no shiori project,25 +shuu (mniarnoakou),25 +shuten douji (under the same sky) (fate),25 +shuri (saidaioujou),25 +shunki gentei poco a poco!,25 +shukketsubo,25 +shub-niggurath,25 +shuangsen,25 +shrimqsleeq,25 +shoukaku (aircraft carrier),25 +short messy bangs,25 +shizuma yuho,25 +shishamo (abc shishamo),25 +shirt basket,25 +shirota (takoyaki 110721),25 +shironagasu-tou e no kikan,25 +shiromi (15741279),25 +shirokuma (reirako-reirako),25 +shiroabe,25 +shiratama shima,25 +shiraishi mako,25 +shiotan,25 +shioe monjirou,25 +shio midori,25 +shiny chariot (cosplay),25 +shintou,25 +shinji (metal-chan),25 +shinigami sama,25 +shin kamen rider prologue,25 +shin getter robo vs neo getter robo,25 +shimuro (mentsukidou),25 +shimakaze (world's speediest bunny waitress) (azur lane),25 +shiki kyouzoku,25 +shikarii,25 +shika tsui,25 +shijiuqaq,25 +shii (seaside720),25 +shigure (kemonomichi),25 +shift car,25 +shift (shiftillust),25 +shidou (x 2903),25 +shichimiso,25 +shibainu goro,25 +sherlock (bbc),25 +sher (imnotsher),25 +sheeg,25 +sharon holygrail,25 +shang bu huan,25 +shana (cosplay),25 +shan-n,25 +shamu,25 +shaman (dungeon and fighter),25 +shakti kareen,25 +sha (nz2),25 +sha-pei sahei,25 +setter (seven stars),25 +serruria (flower knight girl),25 +serpico (berserk),25 +serbu super-shorty,25 +sep,25 +seo hyesung,25 +sensei (hitagi3594),25 +senri (yukataro),25 +senmi aki,25 +sengoku yaraideka,25 +sengoku kiyosumi,25 +sengoku (one piece),25 +sendouin kaede,25 +senbon,25 +sen (sen42724788),25 +selene (ff14),25 +selena (soccer spirits),25 +sekiro (cosplay),25 +sekiha love-love tenkyoken,25 +sekiguchi (odd taxi),25 +sekibanki (cosplay),25 +seityr,25 +seitokaichou (seitokaichou to sayono-kun),25 +seisou fude no tabibito,25 +segawa akane,25 +see-through shorts,25 +seath the scaleless,25 +seaside sukeban (smg) (blue archive),25 +seal (seal1102),25 +sea slug girl,25 +scw (girls' frontline),25 +scroll lock (scrool5),25 +scarlet desires,25 +saya (chocolate capsule),25 +sawaragi kyouka,25 +sauro dante,25 +satou yoshimi,25 +satou kazuma (cosplay),25 +sasamiya saya,25 +sasami (hallo),25 +sasame yozuru,25 +sasaki yuki,25 +sasabe opika,25 +sarisa highwind tycoon,25 +sapphirez39,25 +sanyuejiuri,25 +sanyang003,25 +sano (merrymerry),25 +sangcoon,25 +sanako (tsubakiiro),25 +sanada (tony2035176),25 +samurai 7,25 +samezuma jouji,25 +same-hada,25 +sam (totally spies),25 +salome (phantom kingdom),25 +sakuraba tamamo,25 +sakura kaoru,25 +sakura (yari no yuusha no yarinaoshi),25 +sakuhou3390,25 +saki (oneechanbara),25 +sakawa (azur lane),25 +sakata kintoki (third ascension) (fate),25 +sakata3,25 +sakamoto masaru,25 +saizaki minori,25 +saitou nekoichi,25 +saito (pigrank),25 +saishuu shiken kujira,25 +saionji reika (ginga e kickoff!!),25 +saionji makoto,25 +saionji leo,25 +saimin class wonderful,25 +saijou yurika,25 +saiba mirai,25 +sai (idolmaster),25 +sahara jun,25 +saginuma osamu,25 +sagami fuu,25 +sagami (aikodesyo),25 +saga (saga kancolle),25 +saeki takashi,25 +sado tarou,25 +sada-chan (tawawa),25 +sabotencc,25 +sabikui bisco,25 +ryusho,25 +ryus (ordinaryuzu),25 +ryuka,25 +ryuji ohara,25 +ryoushin no shakkin,25 +ryouke kaoru,25 +ryou-tan+,25 +ryopie,25 +ryoga,25 +rykard lord of blasphemy,25 +rue (dewprism),25 +ruby (tower of fantasy),25 +rubia natwick,25 +rubbing neck,25 +rrr ato,25 +rroar8,25 +rr (rr2),25 +rpg gakuen,25 +royal penguin,25 +rovintus,25 +rotating brushes,25 +rose quartz universe,25 +rose guns days,25 +rosa (fuu'un),25 +rori (artist),25 +rori (4chan),25 +rondel,25 +roncele,25 +rolling vistamp,25 +roland-gin,25 +rokuromi,25 +roku (hikokeng),25 +roche (p&d),25 +roamingtuna,25 +rla13753,25 +riyuta,25 +rising hopper,25 +rino (wonderland) (princess connect!),25 +rino (kurumi),25 +rinka (ruuku),25 +ringo apple,25 +rindou rinna,25 +rin5325,25 +rikotan (vtuber),25 +riko (maki-y318),25 +rikko (jellyberry),25 +riita iga,25 +riinougat,25 +rifling,25 +riel (yua),25 +riel,25 +ricarla borgnine,25 +ricardo soldato,25 +riboshika,25 +reycal,25 +rexfaxsex,25 +reta su gohan,25 +restroom sign,25 +ren wu ying,25 +remitei03,25 +rem (hinotomi),25 +relic buster (granblue fantasy),25 +reignite,25 +redamon,25 +red sonja (comics),25 +red-and-green macaw,25 +re:zero kara kasaneru isekai seikatsu,25 +rayman origins,25 +ray lovelock,25 +raven's bite,25 +rao ruki,25 +ramudayajirushi,25 +rakuen (nethvn),25 +raji (aranmax),25 +raizou,25 +raise a suilen,25 +raion (t12k1ro3),25 +raine (acke2445),25 +rain sunny,25 +rain (regen),25 +raikou104,25 +raiden shuga,25 +r2 online,25 +qwenthur barbotage,25 +queen of pain (dota),25 +queen of diamonds,25 +queen of clubs,25 +qu (punishing: gray raven),25 +qingshui ai,25 +pz,25 +pyonko,25 +puzzle (vocaloid),25 +purple santa costume,25 +punpun,25 +puni y y,25 +puka (wild arms),25 +puckjjick (belbesi19),25 +psi (583278318),25 +pruzhka (wardi113),25 +prinz adalbert (after-hours service) (azur lane),25 +print cloak,25 +principality of wy (hetalia),25 +princess princess,25 +princess mercury,25 +princess holiday,25 +prima,25 +pride-kun,25 +prema-ja,25 +precum pool,25 +poyoyon chihiro,25 +power suit (metroid),25 +powa (36iii),25 +potato iida,25 +poshul (chrono cross),25 +poroze,25 +popolocroits,25 +popcornflakes,25 +polskash,25 +pollo (evillious nendaiki),25 +pollity,25 +pole vault,25 +pokowachikusu,25 +pokemon ranger and the temple of the sea,25 +pokemon 4ever - celebi: the voice of the forest,25 +pokeblock,25 +pochacco (cosplay),25 +pluto (planet),25 +plume (junkpuyo),25 +plugg (kirby),25 +playmaker,25 +plastic little,25 +pjmiyo,25 +pixshed,25 +pirukusu,25 +pink pasties,25 +pineapple (a30930s),25 +pina (blue archive),25 +pikachu rock star,25 +pika mouse,25 +phares,25 +peyton gee,25 +pesu (penpen),25 +peropero saimin,25 +pepsi2330,25 +peppermint jet,25 +pengin pina,25 +patient zero,25 +passion harp,25 +parmesan (168n),25 +parachute pants,25 +pappappao,25 +panzerfaust (skullgirls),25 +panties under bloomers,25 +pang (sdorica),25 +pancake nun (diva),25 +paldean wooper,25 +palbo (hshhhh321),25 +pakisu-tan,25 +p.k.,25 +ozzzzy,25 +oz (manga),25 +oxstar,25 +ousuki konome,25 +ousaka sora,25 +ousaka asuha,25 +oumi megumi,25 +ouka (yama),25 +oui,25 +ottomarr,25 +otonashi amane,25 +otoma (matoi0603),25 +otokawa saori,25 +otogi frontier,25 +otaku no video,25 +osu! tatakae! ouendan 2,25 +oscrol las casas,25 +osashimisan,25 +osakana e,25 +orlijiang,25 +oresama teacher,25 +oreazu,25 +orange maru,25 +orange (sal1014),25 +orange (moekibarasensei),25 +opera the vermelho,25 +open gift,25 +oozora tsubasa,25 +oozaru,25 +oosaka hierou,25 +oooranje nlj,25 +oonamuamidabutu,25 +oobari masami (style),25 +onohana,25 +onoe junki,25 +only you,25 +onizuka kimihito,25 +onionyaa,25 +onigami mei,25 +oni0417,25 +onaji class no idol-san. around me is full by a celebrity.,25 +omochi no kimochi,25 +olverse,25 +olivier (heartcatch precure!),25 +oliver koito,25 +oli,25 +old dorothy,25 +okusama ga seito kaichou!,25 +okono,25 +okiura mizuki,25 +okazaki yuma,25 +okamocheese,25 +okamin,25 +okada izou (third ascension) (fate),25 +oka ball,25 +oishi (psycho-pass),25 +oimo imoo,25 +ohtsuka miyako (calm mashiro),25 +ohno,25 +ohayou girls,25 +ohama kan'emon,25 +ogawa misaki,25 +odokawa hiroshi,25 +odennoden,25 +odawara,25 +oda takashi,25 +obanana (ahap7438),25 +nyx (mebius no wa),25 +nyora (soredemosekai),25 +nymph (last origin),25 +nyagi,25 +nya-c,25 +numi (sin),25 +nukki,25 +nujig,25 +nuclear wasabi,25 +nucco,25 +ntw-20 (xmas reindeer) (girls' frontline),25 +nrp (pesu),25 +noyamanohana,25 +note (hikahikamahiru),25 +norton,25 +north island giant moa (kemono friends),25 +noroi no megane (vocaloid),25 +nora (greenteaneko),25 +nor (reader13),25 +nonbiri jirou,25 +nolan,25 +noko351,25 +noir (nowaru),25 +nogiwa kaede,25 +nobunaga the fool,25 +nobunaga no shinobi,25 +nobi tamako,25 +no halo,25 +no ears,25 +nixie (rabi ribi),25 +niwakaike,25 +nitoni,25 +nite airen,25 +nishina kurumi,25 +nishimura haru (7th dragon),25 +nirareba,25 +nipuni,25 +nipeira,25 +nio (einhorn),25 +nintendo 3ds ll,25 +ninpuu sentai hurricanger,25 +ninny-world,25 +nininbaori,25 +nine violet,25 +nine tail (ragnarok online),25 +nina matsumoto,25 +nin fake,25 +nikuo (29niku),25 +nijuuni,25 +niisan alpha,25 +nightmare (sinoalice),25 +night vision,25 +nicoloso da recco (azur lane),25 +nicole mimi tithel,25 +nick fury,25 +nick300,25 +niche-tan,25 +nezulet,25 +new york city police department,25 +new southern battleship princess,25 +neumann ku 100,25 +net ghost pipopa,25 +nerf gun,25 +nepeta leijon,25 +neo geo pocket color,25 +nemu kotatsu,25 +nekoshin kagari,25 +nekokoneko,25 +neko suke,25 +neko nami83,25 +nekko (pixiv62998998),25 +nejimaki kagyuu,25 +negoro shuujirou,25 +neginegio,25 +negi-mamire,25 +necktie on mouth,25 +natsuki (digretking),25 +natsuiro kokoro log,25 +natsu ga owaru made,25 +natasha (fire emblem),25 +nasa-chan,25 +naruse (0819),25 +narumi karen,25 +narisawa naruo,25 +naoto kurogane,25 +naoto (sandersoniahirahira),25 +naomi hunter,25 +naoki yukira,25 +nansui kinoko,25 +nanosheep (character),25 +nanjou satoshi,25 +nanban teishoku,25 +nanatsugumi,25 +nanatsu maka,25 +nanase yuu,25 +nanami chiaki (cosplay),25 +nanako kaitai shinsho,25 +nanai (ayinusu00),25 +nanahoshi milily,25 +nami z,25 +nami nami nami,25 +nameless hill,25 +namayakeinu,25 +namatyaba,25 +namatame tarou,25 +nama (namaiki),25 +nagoonimation,25 +nagihoko,25 +nagihara suzuna,25 +nagi springfield,25 +nagi (haruka 4),25 +nagase mana,25 +naganegi,25 +nachuraa,25 +n03+,25 +mystyhw,25 +myon rio,25 +myao (jumca my),25 +mw,25 +muyihui,25 +mutual feeding,25 +mutsuya,25 +mutsunari (crim0718),25 +mutsuki yui,25 +mushoku loli (character),25 +mushi aoi,25 +muse loss,25 +murakami mame,25 +murairamuraiari,25 +multi-strapped dress,25 +mukouyama mu,25 +mujizi,25 +mugyaclan,25 +mugen (sp7q4kv9),25 +mr yheu,25 +moyasi06 25,25 +mouse mask,25 +mousariababa,25 +mouri kazuaki,25 +mouretsu atarou,25 +mountain tapir (kemono friends),25 +motiking,25 +motida,25 +motherboard,25 +moses (fate),25 +mosamune,25 +mors gn,25 +moriya marie,25 +morishige misora,25 +morisaki hichimi,25 +morino mizu,25 +morini ochiteru,25 +mori tatsuki,25 +mori (pepekataokapepe),25 +morgan le fay (valentine witches) (fate),25 +moon carver (genshin impact),25 +monogo,25 +monofin,25 +money slap,25 +monday,25 +monane4,25 +momosiro,25 +momoayamo,25 +momikodayo,25 +momiji manjuu,25 +molten rock bath,25 +mokuzou (mokumokuzo),25 +mokki (smtkmokki),25 +mokichi812,25 +mogy88428,25 +moeroknight,25 +mochimochi mascot,25 +mocacoffee 1001,25 +mo (smileomoti),25 +mo-mo-ride,25 +mizuya chiharu,25 +mizuta marixxx,25 +mizushina hotaru,25 +mizushima kasumi,25 +mizusawa suzuka,25 +mizumiyako,25 +mizuki chika,25 +mizuhotsuki,25 +miyano (tanakeda),25 +miyamoto musashi (fate) (cosplay),25 +mitsurugi heishirou,25 +mitsumine hakuya,25 +mita kazuo,25 +mister popo,25 +misopanne,25 +misibe,25 +mischief witch,25 +misaki kyouko (suchie-pai),25 +misaki (jonsun),25 +miruru souya,25 +miru holstein,25 +mirai shousetsu arcana,25 +mirai shounen conan,25 +mira (miramita8727),25 +mippei,25 +mipe (r kkk12),25 +mink343,25 +minior (meteor),25 +mineji,25 +minazuki maya,25 +minamoto no hiromasa,25 +min,25 +milyu,25 +milksea,25 +mikumo shinden,25 +mikiki,25 +mikan riji,25 +mikame v2,25 +mika mikli,25 +mii brawler (smash ultimate),25 +mihaya (a-ta-i),25 +mihanada kanata,25 +mighty action x level 2,25 +mifumi takafumi,25 +midorikawa maki,25 +midori xu,25 +midi hazapero,25 +mid (mid skb),25 +mianbaoren,25 +mi-sya,25 +mg4 (survival club) (girls' frontline),25 +mexican clothes,25 +metto,25 +meteolance,25 +metalmorag,25 +metal sand,25 +metal akira,25 +messiah & crea,25 +meryl stryfe,25 +mervamon,25 +meru (dragoon),25 +meroko yui (bunny),25 +merkava (under night in-birth),25 +mercedes-benz g-class,25 +memi (asa no ha),25 +mementomori,25 +melusine (housamo),25 +mellow yellow (idolmaster),25 +melkcoffee,25 +mekami suzu,25 +mejiro family matriarch,25 +meitou muku,25 +meikyuu tansaku dragon princess,25 +meikyuu black company,25 +mei-mei (murenase! shiiton gakuen),25 +megu usagi,25 +megatron (beast wars),25 +megalateo,25 +mega man 6,25 +mega aggron,25 +meerkat,25 +me (mikannu),25 +mazu (mazumaro),25 +mazinkaiser skl,25 +maze draws,25 +mayura,25 +mayoko na kuroneko,25 +maydream,25 +may harvey,25 +may (arknights),25 +maumaou,25 +matt groening (style),25 +matsushita (matsudbox),25 +matsuri (princess connect!),25 +matsuno susumu,25 +matori (pokemon),25 +matin catorce,25 +matcha (user yyss8744),25 +master maichin,25 +mashitono desu,25 +mashiro akira,25 +masashi (excellent),25 +masamurai,25 +masamune (eight5050),25 +masaki andoh,25 +masafumi,25 +mary (marota),25 +marusa (marugorikun),25 +marupon,25 +maru0one,25 +maru-chan,25 +marta,25 +marl35,25 +markl,25 +marjorine,25 +marina (pokemon),25 +marimuu,25 +marik (artist),25 +mariero (mariello),25 +marie marigold,25 +mariano (fairy fencer f),25 +maria ross,25 +margit the fell omen,25 +marasai,25 +map background,25 +maou gakuen no hangyakusha ~ jinruihatsu no maou kouho kenzoku shoujo to ouza o mezashite nariagaru ~,25 +maocha,25 +manna (pixiv8805037),25 +manichi,25 +mameshiba (character),25 +malin falch,25 +male healer (disgaea),25 +mako (eogks),25 +makita (mugitya3776),25 +makishi yaichi,25 +makababazi,25 +maka (user rryf2748),25 +mahou sensou,25 +maho x roba -witches spiritual home-,25 +magnolia (pokemon),25 +magna carta 2,25 +magisa,25 +magahara desumi,25 +madeline (celeste),25 +maddy,25 +machinicalis,25 +machahiro (shiitake),25 +mabel able (animal crossing),25 +mabel (maou-sama to kekkonshitai),25 +m1 garand (beach princess) (girls' frontline),25 +m-ma,25 +lyra (summer 2020) (pokemon),25 +lyn (fire emblem) (cosplay),25 +luserina barows,25 +luna skylark,25 +luna rune,25 +luna 11777,25 +luna (unsomnus),25 +luna (gkluna mas),25 +luna (dota),25 +lukas thadeu,25 +lucille aleister,25 +love love life,25 +lotus pod,25 +lotte (madoka magica),25 +lollipop (zoza),25 +locomon,25 +lmin,25 +llicornia,25 +lizhp libellus aetern-ritter,25 +lithiumia,25 +liselsia cesarini,25 +lin (user uzmw2535),25 +lin (greenopi),25 +limebreaker,25 +lim aya w,25 +lily (houtengeki),25 +lilith clawthorne,25 +lilith (monster musume),25 +lilia (king's raid),25 +liki,25 +light tank,25 +lidia (damegane),25 +lic (licloud28),25 +let,25 +lest (rune factory),25 +leonir (gogalking),25 +leomon32,25 +lemontansan,25 +lemonolemone,25 +leisss,25 +leila (yurisouls),25 +legs back,25 +legal high,25 +leenvidia,25 +lee gyu-hyuk,25 +laurell weinder,25 +las vegas,25 +lantana (flower knight girl),25 +lanjiujiu,25 +lancer-tan,25 +lakuhito,25 +laevateinn (phantom of the kill),25 +ladybird8n,25 +lace-up gloves,25 +l aciel,25 +l 1753ucon,25 +l.m.b.g (idolmaster),25 +kyuubiness,25 +kyrieru,25 +kyoya (0o-7snow7-o0),25 +kyouyama (kuromon),25 +kyouka (summer) (princess connect!),25 +kyoto tower,25 +kyoshincats,25 +kuzunoha rindou,25 +kuusuke (yo suke39),25 +kuusou ryodan,25 +kushizukino ayame,25 +kusanagi yuuri,25 +kusakabe tatsuo,25 +kurusu asami (hitotose),25 +kururu (princess witches),25 +kurumi nari,25 +kurumi (zettai tenshi kurumi-chan),25 +kurou (yugato),25 +kurosu taichi,25 +kurosiro,25 +kurose rena,25 +kurokishi to shiro no maou,25 +kurokami yuuya,25 +kurohime,25 +kurohachi,25 +kurogane ikki,25 +kuroda sayuki,25 +kuro-ra,25 +kurashima nagisa,25 +kurabe juurou,25 +kuonji yume,25 +kunugi ayano,25 +kunieda aoi,25 +kuneru marta,25 +kumomachi,25 +kumoi ichirin (cosplay),25 +kumakichi (kuma-ana),25 +kudiramochi,25 +ktym 777,25 +ksartworks,25 +koyomiyoko o,25 +koyemshi,25 +koyasu kazu,25 +koyap,25 +koyano ichigo,25 +kougyoku (module),25 +kotomori ren,25 +kotobuki ryou,25 +kosui (artist),25 +kosori (dennoukitan),25 +kosmosshuffle,25 +kosian,25 +koshii tai,25 +koshigaya tatsumi,25 +koromo take,25 +koriente,25 +kora (xenoblade),25 +konowa (kakumei),25 +konno (genshiken),25 +kona (silent913),25 +komiya yuuta,25 +kometa virtual live,25 +komachi (gao 13),25 +koma (remi 398),25 +kokotendon,25 +kokorone=pendulum!,25 +koko (kotobutyann),25 +kokido,25 +kokeshi ya,25 +kodomo no omocha,25 +kodama's elder sister (sakura yuki),25 +kodai yui,25 +kobayashi kenya,25 +ko shushu,25 +knuckle hair,25 +knocknarea (fate),25 +kmkm panna,25 +klamp,25 +kiwa (a-bell-abi),25 +kity1211 tetsu,25 +kitano sora,25 +kitamiya hatsumi,25 +kishima (ki123454321),25 +kiryu coco (cosplay),25 +kirimochi,25 +kirihime yoruka,25 +kiri (foxsnake),25 +kinutani soushi,25 +kinoko-san,25 +kinkuri (axsc8mjrt),25 +kinjou manami,25 +kingindou yumeji,25 +kingfrogs,25 +kinakonato,25 +kimijima (kimijima0301),25 +kimagureneko,25 +kikyou kiri,25 +kikuchi yume,25 +kijipoko,25 +kiichirou,25 +kihuzinz,25 +kidmukuro,25 +kicdon,25 +kiben gakuha yotsuya-senpai no kaidan,25 +khaliqa bell,25 +keyliom,25 +keykey117117,25 +kesuno,25 +kerun,25 +kensuke creations,25 +kenny (pokemon),25 +kenbu (kyoukai senki),25 +ken-sya,25 +kemonono (inchiki dou),25 +kemono friends r,25 +kelbhin,25 +keisenko,25 +keibeam,25 +kazuta (kazutan62),25 +kazuha (ichiwa),25 +kazemal,25 +kaze yaku,25 +kazami nobuko,25 +kaz (kaz323),25 +kaye (blushyspicy),25 +kay (kf1n3),25 +kaworu (1030),25 +kawazoe mariko,25 +katsura kotetsu,25 +katsuki tsubasa,25 +katrielle layton,25 +katerea leviathan,25 +katase megumi,25 +katanon (suparutan),25 +katakuri,25 +katagiri atsuko,25 +kasagland,25 +karutia (g (genesis1556)),25 +karube guri,25 +karua m,25 +karasunomiya asuka,25 +karappo (poket12),25 +karamomo,25 +kanuka clancy,25 +kanten (kanten328),25 +kansen (series),25 +kanon (ghost trick),25 +kannuki natsume,25 +kannagi ayano,25 +kanihara eiko,25 +kangbus,25 +kanemoto akari,25 +kaneko ryou,25 +kanasuke,25 +kanade rindou,25 +kan-e-senna,25 +kamu (camui),25 +kamo 0707,25 +kamikawa yuuto,25 +kamichama karin,25 +kameo,25 +kameneji,25 +kamen rider the first,25 +kamen rider gills,25 +kame no nin,25 +kamaboko bijin,25 +kama (summer enma-tei) (fate),25 +kaku sugar,25 +kakoogan,25 +kako (azur lane),25 +kaki z3,25 +kajanda,25 +kaiware (user kamu3357),25 +kaitofuuma,25 +kaidou nora-,25 +kahiika,25 +kaheru (vtuber),25 +kagura san,25 +kagura (ressha sentai toqger),25 +kagumanikusu,25 +kage (ka 9e 4su),25 +kae (suguri),25 +kae (artist),25 +kaavi,25 +ka 4maki,25 +k041800,25 +k-ailisi,25 +justinas vitkus,25 +juno (pixiv32541104),25 +jungle de ikou,25 +jun sung kwak,25 +julius monrey,25 +juiceneko,25 +juhi-huji,25 +juan romero,25 +jowol,25 +journal,25 +jorouyome-chan,25 +joint06,25 +johanna wiese,25 +joey (lilinjunyi),25 +jk-ff,25 +jitan777,25 +jinyu lao honglingjin,25 +jinguu maya,25 +jin kaze tsukai,25 +jimmy valmer,25 +jill stingray (cosplay),25 +jibako,25 +jiaoshoutongxue,25 +jeya (leej3ya),25 +jet jaguar,25 +jessie maye,25 +jersey (azur lane),25 +jellyfish hair ornament,25 +jdpr,25 +jaws,25 +jasper (steven universe),25 +jas (kda10457),25 +jagi (nexboy),25 +jack the ripper (chaldea lifesavers) (fate),25 +jack-o'-lantern (kemono friends),25 +ja-punkster,25 +izuru,25 +izumi kumi,25 +izumi (walnov),25 +iwis,25 +iwashita akemi,25 +iwaoka (sikabanenomiti park),25 +iwakura kazunori,25 +iwa (alpaca oukoku),25 +ittan momen (gegege no kitarou),25 +itsumi1021,25 +itsuki (otsugei),25 +itou masanori,25 +itoguchi (manma melon),25 +italia mondial,25 +israfel,25 +isonami kai ni (kancolle),25 +ishikoro,25 +isezaki eri,25 +iseshima aya,25 +ironing,25 +ireza,25 +invisible wanwan'o,25 +invasion stripes,25 +inuyou,25 +inubouzaki ayako,25 +inubana jiruno,25 +inou hiroaki,25 +inoe (noie),25 +inkstrike (splatoon),25 +inkopiko,25 +ink (pixiv25450915),25 +ingo (pokemon) (cosplay),25 +indonesian flag,25 +inari (monster girl encyclopedia),25 +inanosuke,25 +inagata,25 +in net,25 +in'youchuu shoku,25 +imuneko,25 +imo cyber,25 +imo (ryokyou),25 +imeri fuzuki,25 +imanatsu,25 +imai taki,25 +ima (minitomato123),25 +illustrator,25 +illusion (pokemon),25 +ilion,25 +iku kurumi,25 +ikemoto1001,25 +ikashun,25 +iji (u mayday),25 +iinuma chika,25 +igniculus,25 +idw (cat in the box) (girls' frontline),25 +ida (idalol490),25 +icyee,25 +ichimonji kei,25 +ichiko,25 +ichi (lucky-dog1),25 +i-coat,25 +i-8 (kancolle) (cosplay),25 +i-13 (kancolle) (cosplay),25 +hymxiaocyan,25 +hyangu,25 +hyakka onibi,25 +huyunora,25 +hunnyamai,25 +huiro,25 +huffing,25 +huaronanago,25 +htk mikan,25 +hpknight,25 +hpapo,25 +howe (pastry princess) (azur lane),25 +houkago no senpai,25 +hotei kazuha,25 +hotarubi,25 +hoshino hachirouta,25 +hoshi tanuki (shironeko project),25 +hoshi ame,25 +hongse beiyu,25 +honey (norasuko),25 +honda s2000,25 +honda (mtp),25 +honcha,25 +homurakko,25 +holding thermos,25 +holding pendulum,25 +holding mistletoe,25 +holding hourglass,25 +holding hoop,25 +holding cushion,25 +holding blindfold,25 +hokuro-chan (tawawa),25 +hokke (fryinghokke),25 +ho (h k white),25 +hizack,25 +hitte5416,25 +hitoyume,25 +hitotsukane yuuko olivia,25 +hisui (paingumi),25 +hisu (hisu ),25 +hishida haru,25 +his master's voice,25 +hiruri,25 +hirono nagi,25 +hirokawa,25 +hirohana yukiko,25 +hiroe chiharu,25 +hire (uret4788),25 +hiomaika,25 +hinoki yuu,25 +himuro tatsuya,25 +himmel (allsky83),25 +himeoka yuki,25 +himenomiya kaguya,25 +himekawa fuuka,25 +himano (artist),25 +hilda (summer 2022) (pokemon),25 +hikouseki,25 +hikagami yukiri,25 +hijinrui gakuen,25 +hijikata (shiromanta),25 +hige (yosemite),25 +hiedanotsukai,25 +hida iori,25 +hibino matsuri,25 +hibiki ao,25 +hibara eiko,25 +hibanachiku,25 +heus (nuntarou),25 +hetiru,25 +hesuke,25 +heroes of might and magic,25 +hero's shade,25 +hercequary,25 +henryk,25 +helmina lent,25 +hell and heaven,25 +hell2 (ses0297),25 +helixel,25 +heliotrope (flower knight girl),25 +helena adams,25 +heki (axis),25 +hekapoo,25 +heka=ton,25 +heitian keji,25 +hei tong shi,25 +hebinuma,25 +heavy meta-ko,25 +heather (fire emblem),25 +hazuki ruka,25 +hazakura seiso,25 +hayate (doa),25 +hayase kouichi,25 +hawkeye (fire emblem),25 +hattori kiriko,25 +hattori hanzou (hyakka ryouran),25 +hatsuji horumon,25 +hatomugi (mamotan),25 +hatakeyama yoshitaka,25 +hasumi rain,25 +hashimoto,25 +hashiko (pecopom),25 +haruno sora,25 +haruna miyabi,25 +haruiro ouse,25 +haruharu55,25 +haru hikoya,25 +haru (inamura4),25 +haregi,25 +harapekopikachu,25 +haraguro jakku,25 +hapoa,25 +hansode32,25 +hans (senjou no valkyria),25 +hanes 025,25 +hand on animal,25 +hand in thighhighs,25 +hanamutsuki,25 +hanabatake yoshiko,25 +han joon-gi,25 +hamutarou,25 +hamha s,25 +ham (eikasiahhh),25 +halhal,25 +hakuu kanaka,25 +hakurai reika,25 +hakui (b600723),25 +hakua shou,25 +haiumore,25 +hair length switch,25 +hair chair,25 +haimine,25 +haidollo,25 +hagure tanishi,25 +haguhagu (19448514),25 +hagihara asami,25 +hachimitsu ame (phoenix),25 +habu.,25 +haaru,25 +gyroscope,25 +gyopi,25 +gygerbeen rtl06,25 +gyaku oudou,25 +gwendolyn (fire emblem),25 +guzma (pokemon) (cosplay),25 +gutter,25 +guren seiten,25 +gurekoguriko,25 +gunu (nyzn3223),25 +gunter (fire emblem),25 +guntank (guriko),25 +gunneko,25 +gunbam sonyeon,25 +gun pointing at viewer,25 +guido (sucurapu),25 +guanhian,25 +gs pno,25 +griffon (monster girl encyclopedia),25 +greenapple,25 +greek letters,25 +gree4,25 +great pretender,25 +grassy,25 +grand knights history,25 +gradient pants,25 +gou (gzgnight),25 +goshichi shoji,25 +gorirago,25 +gorgeous takarada,25 +gondola (meme),25 +gomi yashiki,25 +gomarayu,25 +gokushufudou,25 +goat alter (narane),25 +go to paradise (idolmaster),25 +gm sniper ii,25 +gm orangeade,25 +glove spread,25 +glaug,25 +glaucus (exterminator in the square) (arknights),25 +glalda,25 +gl ztoh,25 +girls book maker ~grimm to sannin no ohime-sama~,25 +giovanni (ginga tetsudou no yoru),25 +gintarou (puipuiginta),25 +ginko (konekonoshippo),25 +ginjoo (ginjo 1116),25 +ginga kuon,25 +ginban kaleidoscope,25 +gimmy adai,25 +ghost (ghost528),25 +ggim (kdnx8758),25 +geyser,25 +getting up,25 +geroro44,25 +germaine avadonia,25 +george kurai,25 +genie (aladdin),25 +gelbooru,25 +gel banana,25 +geb (stand),25 +gasketsu,25 +gasaraki,25 +garuta (yamcha),25 +garam masala (7355873),25 +garakuta (garakuta no gomibako),25 +gao ex kaiser,25 +gantan,25 +galois,25 +galeoria,25 +galar mother,25 +gakukuru,25 +gaius worzel,25 +gabo,25 +fuyuumikou,25 +fuuma tokiko,25 +fuuma (humawww),25 +futomashi,25 +futasan,25 +fury bowser,25 +furono (fuloru),25 +furisode girl blossom,25 +fur-trimmed robe,25 +funbuns,25 +full metal panic! invisible victory,25 +fukushima nyuugyou inc,25 +fukufukupine,25 +fujiya,25 +fujitama koto,25 +fujinami tomoko,25 +fujimori mikan,25 +fujimiya sakura,25 +fuji mitsuya,25 +fuhikari,25 +fuckin' hot (kuso atsui),25 +fuchi (fuchi 1106),25 +fu r y,25 +fu-mi.a,25 +fruit stand,25 +frodo baggins,25 +friulian spear,25 +friend ball,25 +fran 690,25 +fps xilou,25 +forever star (idolmaster),25 +forecast janna,25 +food awe,25 +fogriver,25 +fn minimi,25 +fmg,25 +flywinga7,25 +fly tutu,25 +fly (pokemon),25 +flower umbrella,25 +flower box,25 +flora beast (disgaea),25 +flatbed truck,25 +fl (l-fl),25 +fixelcat,25 +five of diamonds,25 +fishcoooo,25 +fiona (agarest senki),25 +final fantasy xvi,25 +final fantasy tactics: hakuma doushi shibari,25 +filin,25 +filiananna,25 +fighter (dq3) (cosplay),25 +ffxivys,25 +fezat,25 +feng shao kky (arj0522),25 +feiyyx,25 +feferi peixes,25 +feathered cape,25 +fata morgana no yakata,25 +fasalina,25 +faruzan (genshin impact),25 +fant,25 +falling money,25 +fallen angel (untsue),25 +falcom (neptune series),25 +fake nyon (cookie),25 +fairy ranmaru:anata no kokoro otasuke shimasu,25 +fair-chan,25 +f.l.c.,25 +f.a.n.g,25 +ezume (rosehip),25 +ezel the king of fire and iron,25 +exfeet,25 +excharny,25 +examination table,25 +eve (mythology),25 +eve (alchemy stars),25 +evanstan,25 +evanescent existence,25 +eva smith,25 +eva-st-clare,25 +eushully,25 +esty erhard,25 +esmeralda (mawaru penguindrum),25 +erufa (pixiv),25 +eroriru,25 +erio patrol,25 +ereshkigal (under the same sky) (fate),25 +erdrick's sword,25 +erato (ennuigirl),25 +eonbound,25 +enmto,25 +enkaboots,25 +empty plate,25 +emojo,25 +emma hardy,25 +emlyn white,25 +emilia (yu-gi-oh!),25 +emaan,25 +em8er,25 +eloseu (haribochase),25 +elnie tachibana,25 +elle (lom),25 +elena stoddart,25 +ela angraeni (revian samuel dani),25 +eika (artist),25 +eh? ah sou (vocaloid),25 +egg (rxlal),25 +egg (lemyawn),25 +edmond dantes (monte cristo uniform) (fate),25 +edelweiss (wsparkz),25 +ecchi nano wa ikenai to omoimasu,25 +eboda-x,25 +ebicha,25 +eba,25 +east coast canuck,25 +easily,25 +eagle (azur lane),25 +e len,25 +e.de.n,25 +dyuba000,25 +dusttodusk,25 +duralu500,25 +dunyarzad (genshin impact),25 +duck hood,25 +druj (jahy),25 +driselle sharil,25 +drawing alpaca,25 +dragon lord,25 +dragon (trickster),25 +dr. white (wet.elephant),25 +douraku utage,25 +doumyouji haruto,25 +double flare skirt one-piece,25 +domon asuka,25 +dolores (mazohaha),25 +dollyly21,25 +dole,25 +dokidoki yandemic,25 +dodari,25 +diverdiva,25 +dist (tales),25 +disorder 6,25 +discowars,25 +dino (trexsilence),25 +digital rain,25 +dictionary,25 +dias (tajima kouki),25 +diarrhea,25 +devil jin,25 +destructor girl,25 +der untergang,25 +dent,25 +denney (sukeru ramune),25 +demorzel,25 +demon days (gorillaz),25 +demia duodectet,25 +delinquent (pokemon),25 +deilrimix,25 +degu (kemono friends),25 +defect mogeko,25 +deden,25 +deatiose,25 +death march kara hajimaru isekai kyousoukyoku,25 +dear: (utaite),25 +dclaret,25 +days (kagerou project),25 +dauchimk 1,25 +date shigezane,25 +darknessukaru,25 +dark knight (elsword),25 +dare no inarikami,25 +dappled moonlight,25 +danyo (chung0226),25 +danial,25 +dandou,25 +dana (ocana dana),25 +damarinasai (mineo),25 +daitarn 3,25 +daikou-chan,25 +dahlia (rune factory),25 +dagasitotaiyou,25 +dace (azur lane),25 +da-mii,25 +d'eon de beaumont,25 +cz-805,25 +cynthia (sygna suit) (aura) (pokemon),25 +cyborg-san (sage (mami1210)),25 +cybernetic,25 +cyberdemon no3,25 +cure mirage,25 +cure lovely (cosplay),25 +cum on underside,25 +cum on testicles,25 +cu chulainn alter (curruid coinchenn) (fate),25 +crypto (apex legends) (cosplay),25 +crow's nest,25 +crotchless bodysuit,25 +crois,25 +crabrawler,25 +cpro,25 +cosplex,25 +cosmoem,25 +corvisquire,25 +cordula (okame nin),25 +contest button,25 +constance magee,25 +commeowdore,25 +comiket 72,25 +comic koh,25 +comaza,25 +cointreau,25 +cocoroppy,25 +cocomeen,25 +cocoda,25 +coco (mermaid melody pichi pichi pitch),25 +coco3186,25 +cobraja,25 +clulu aluminal,25 +clovis la britannia,25 +clitoris sleeve,25 +cliff (pokemon),25 +cleophee,25 +clarissa (epic seven),25 +cika k,25 +ciero,25 +cidolfus orlandeau,25 +chupirinko,25 +chunhwei lee,25 +chui (weapon),25 +chugging,25 +chou-10cm-hou-chan (fuyutsuki's),25 +chonkoo,25 +chocolate misu (cosplay),25 +chloe (srgrafo),25 +chloe (real) (princess connect!),25 +chiyo (ppp 808),25 +children's day,25 +chikuwabu (yokowokazuaki),25 +chikuwa udon,25 +chikushoudou pain,25 +chikan sen'you sharyou,25 +chiizu ore,25 +chihaya (kawacy),25 +chiffon (chiruto),25 +chicken feet,25 +chiba yuudai,25 +cherry numan,25 +chenalii,25 +chen yan,25 +checkered neckerchief,25 +chat noir (granblue fantasy),25 +chaser (warship girls r),25 +charlotte lueder,25 +charles caron,25 +chanms,25 +ceremony,25 +celeste (video game),25 +celeste (granblue fantasy),25 +cavalla (azur lane),25 +cathy (yu-gi-oh!),25 +caterpillar (artist),25 +cater (fft-0),25 +catcan,25 +cat symbol,25 +cat hoodie girl (tsubaki tsubara),25 +cat-quest-sun,25 +casting couch,25 +casey (pokemon),25 +carrying clothes,25 +carrie alberta,25 +carm (ruoyeahs),25 +carla carmilla (rariatto),25 +card creature,25 +cararina,25 +car trunk,25 +capture styler,25 +captainosaka,25 +captain lamb,25 +cappccino,25 +capacitor,25 +candy (pixiv15231759),25 +canadawbd,25 +campanella (ginga tetsudou no yoru),25 +campaign hat,25 +camouflage cloak,25 +buzz lightyear,25 +business,25 +bururai,25 +bullet line,25 +bulldog (azur lane),25 +bubblegum crisis 2040,25 +brown butterfly,25 +broken necklace,25 +broken handcuffs,25 +breast slider,25 +brandon (pokemon),25 +bowser peach,25 +boris jinneman,25 +borijoikun,25 +bordeaux (girls und panzer),25 +borchardt c-93,25 +boooshow,25 +boomslang (kemono friends),25 +body parts,25 +boar costume,25 +blush response,25 +blue wildebeest (kemono friends),25 +blue shrimp,25 +bloopiest,25 +bloodrayne,25 +blondynkitezgraja,25 +blathers (animal crossing),25 +blackspade,25 +blackcony,25 +blackbad,25 +black hair twintail girl (ichiki 1),25 +black dahlia,25 +black chemise,25 +black belt (pokemon),25 +bizarro,25 +biwa hayahide (noel rouge carol) (umamusume),25 +bitterpain,25 +bitch hime,25 +bishoujo senshi sailor moon another story,25 +bionic joshikousei (fukai ryousuke),25 +billowing cape,25 +biiko (king1015g),25 +biggumane,25 +bible black gaiden,25 +biba eichi,25 +bhm,25 +berver,25 +beowolf,25 +beltorchika irma,25 +bella (dq5),25 +bell mha,25 +bel hydra,25 +bebseo,25 +bebe (ad234 tenrou),25 +beatrice castiglioni,25 +bear paw s,25 +bead sex machine,25 +bat (coumori),25 +barraskewda,25 +barnette orangello,25 +baphomet (fate),25 +banjo (technistep),25 +bane (haibanemumi),25 +bananannu,25 +ban keiko,25 +ban bu bu duou,25 +bakuryuu sentai abaranger,25 +bakuchiku,25 +baitu,25 +baikinman,25 +baia,25 +badger,25 +bad kim,25 +backrooms (creepypasta),25 +babyseven 77,25 +ba fed kitaku,25 +b-17 flying fortress,25 +azuna (love live!),25 +azumi on,25 +azik eggers,25 +azarasi haru,25 +ayasekira,25 +ayase touka (piromizu),25 +ayase momo,25 +ayakashibito,25 +aya (sabaneko),25 +avrora (shackled saule) (azur lane),25 +autumnlll,25 +august soft,25 +audrey belrose,25 +auauun,25 +attouteki yuugi mugen souls z,25 +atelier annie,25 +asyuaffw,25 +asus,25 +asura (asura's wrath),25 +astolfo (sailor paladin) (fate) (cosplay),25 +astaroth (soulcalibur),25 +ask (densicho),25 +ashino chimado,25 +ashi (samurai jack),25 +ash sarai,25 +asaya-bigun,25 +asanagi aoi,25 +asahi haru,25 +asagiri no miko,25 +asagao (kunoichi tsubaki no mune no uchi),25 +arvalis,25 +aruu (memories),25 +aruto2498,25 +arukooru,25 +arue (konosuba),25 +arrow (en'en no shouboutai),25 +arpiel,25 +arkhangelsk (azur lane),25 +arisugawa reiko,25 +arioto,25 +arietta fine,25 +arianne the labrynth servant,25 +arianna (bloodborne),25 +aria pkmn,25 +arden (fire emblem),25 +arcturus,25 +archerfish (azur lane),25 +archer (modern black costume) (fate),25 +archangel gabriel,25 +arcade stick template,25 +arc gurren-lagann,25 +arc (ff3),25 +araki maki,25 +arachne boy,25 +ara pengin,25 +apocalypse now,25 +aozane,25 +aoyama-kun (penguin highway),25 +aorkgk,25 +aoiyui,25 +aoi nori (aoicoblue),25 +ao hito,25 +anyueh,25 +another kung fu girl,25 +anohito (tokumei),25 +annie (saga frontier),25 +anne petriceani,25 +anmaki,25 +anko (love live! sunshine!!),25 +anime tenchou,25 +anime-tamae! tensei no miko,25 +angrykuma,25 +angry sun,25 +anglerfish dance,25 +angela burton,25 +angel salvia,25 +anfang (chihuri),25 +ane doki,25 +anbee (arary),25 +anavel gato,25 +amo chenbe,25 +amnesia,25 +amayado rei,25 +amatsuka kosame,25 +amaton707,25 +amamiya yuumu,25 +amamiya mizuki,25 +amakara twins,25 +amagi hiiro,25 +alvin granford,25 +alver,25 +altrouge brunestud,25 +alphonse heiderich,25 +alpha (katz332),25 +allison & lillia,25 +alliance of the golden witch,25 +alisa kirsten,25 +aliori haberi,25 +alicuu girls maximum: bahamut,25 +alicia (pop'n music),25 +alice jing,25 +aleth,25 +aleste,25 +aleister crowley (toaru majutsu no index),25 +aldrich devourer of gods,25 +alcremie (ruby cream),25 +alchemist (company),25 +albreo,25 +akusera,25 +akumey,25 +akizuki airi,25 +akiyoshi fuyuka,25 +akiyama shun,25 +akiyama0818,25 +akira kira,25 +akino (christmas) (princess connect!),25 +akibotann,25 +aki shizuha (cosplay),25 +akatsuki no guuru,25 +akane rose,25 +akaname-san,25 +akaie11,25 +aircraft carrier summer oni,25 +aikosu (icos),25 +aikagi (azarashi soft),25 +aigami kaon,25 +aiba ami,25 +ai (warekaku),25 +ai (wakaba iro no quartet),25 +ai (tick! tack!),25 +ahamma,25 +agnes chevalier de milan,25 +against object,25 +aegis gundam,25 +aegis (persona) (cosplay),25 +ae (aeiu4114),25 +adelheid bernstein,25 +adachi ruri,25 +accessories switch,25 +abimaru,25 +abi (user nzav7333),25 +aardwolf girl,25 +a k i,25 +a clockwork orange,25 +a cat is fine too (meme),25 +a2t will draw,25 +a-801,25 +81diver,25 +6fu (11madhouse),25 +675 (image 675),25 +6-k-i-7,25 +3ri10te,25 +29 to jk,25 +26 (sister freedom),25 +16 (0xhsk16),25 +1/6 (vocaloid),25 +000 (jicasoe),25 +zzb,24 +zuikaku (aoki hagane no arpeggio),24 +zudomon,24 +zudah,24 +zoni-ko,24 +zoey (shepherd0821),24 +znz,24 +zn (zzzzzni),24 +zippedsquire,24 +zigza (gashi-gashi),24 +ziggy kakziga,24 +zhihaiwusheng,24 +zhengyifan7,24 +zephiel (fire emblem),24 +zenseava,24 +zenrakishi,24 +zehel az,24 +zb-26 (nook of ephemeral dreams) (girls' frontline),24 +zawapirori,24 +zarsy,24 +zaou ryuu,24 +zaogao xiaotu,24 +za (artist),24 +yuzuki yukari (a.i. voice),24 +yuusya27,24 +yuurei447,24 +yuumeibokumeimei,24 +yuumare,24 +yuuki shinjuurou,24 +yuuhi (yuyuhihi),24 +yuudachi (woofy floofy christmas night) (azur lane),24 +yuu cream,24 +yutokamizu,24 +yurinozuku1112,24 +yuri (yuri741),24 +yuri (dirty pair) (cosplay),24 +yurameku kokoro ni michita sekai de kimi no yume to yokubou wa kanau ka,24 +yura tsubasa,24 +yunohara konomi,24 +yuno setouchi,24 +yuno (ou35i),24 +yunita (hallelujah),24 +yun (outsidey),24 +yumiya rakko,24 +yumenomimizuku,24 +yumekoi,24 +yume e no ippo,24 +yukuso (dabiandang),24 +yukiya,24 +yukisuke (user gtmm7833),24 +yukishiki shilfi,24 +yukise miyu,24 +yukisaki miale,24 +yukinyan,24 +yukino (zeroshiki kouichi),24 +yukimi ai risu,24 +yuki yukki12,24 +yukanomokume,24 +yuiti hinata,24 +yuita,24 +yui (kanatamoo),24 +yuda (hokuto no ken),24 +yu-han chen,24 +yu-gi-oh! 3d bonds beyond time,24 +ys kosato,24 +youyu (kyouno),24 +youkai kusaregedo,24 +youichi (45 01),24 +you-6-11,24 +yotsuba yuiko,24 +yoto (rinlin2),24 +yossui,24 +yoshii akira,24 +yosakuh,24 +yorkshire terrier,24 +yorha,24 +yooroongoo,24 +yoneko,24 +yomo (ym),24 +yomi yojo,24 +yomban,24 +yocchi 3601,24 +yoarashi inasa,24 +yo-rindou,24 +ying (suetmo),24 +yeougui,24 +yen (isamu-ki),24 +yellow blood,24 +yatsumura tsuyuno,24 +yatabe noa,24 +yashiro yuuya,24 +yasakana tooi,24 +yankee-chan (shashaki),24 +yang guifei (honey lake) (fate),24 +yan wen zi,24 +yamone,24 +yami reina,24 +yamawaku,24 +yamashiro (holiday offensive) (azur lane),24 +yamanoyu,24 +yamanouchi hisako,24 +yamanezumi rokikku,24 +yamamoto rurika,24 +yamamoto nanashiki,24 +yamakatsu (genrei koubou),24 +yamada naoko (trick),24 +yamabiko,24 +yakumo ling,24 +yakisake,24 +yaichino,24 +yaiba,24 +yahweh,24 +yahiko (naruto),24 +yagisawa keiichi,24 +yadokari (yadokani),24 +yaato (yamato99725444),24 +xzu,24 +xing dao,24 +xi liu,24 +xeonomi,24 +x sanders x,24 +wz (woyzeck),24 +wuju (1198979953),24 +wufaxianshi cnd,24 +wow+,24 +wonchul,24 +wolla,24 +wizard barristers: benmashi cecil,24 +witch lady (dq8),24 +wise (okaa-san online),24 +wiping blood,24 +window shopping,24 +wind chime focus,24 +willwind30,24 +wikumi,24 +wikstrom (pokemon),24 +wick (identity v),24 +white queen (date a live),24 +whether,24 +wet and messy,24 +weighted clothes,24 +wei yenwu (arknights),24 +wbfish,24 +wato samirika,24 +watamate,24 +washimine yukio,24 +waruzamurai,24 +warirui,24 +war of genesis iii,24 +wanoji,24 +wang qiu er (douluo dalu),24 +wallfloristry,24 +walkermachine,24 +wakuraba,24 +wakou tensui,24 +wako morino,24 +wakita piyosuke,24 +wakaura asaho,24 +wakami suiren,24 +wakadori,24 +w0ru,24 +vought os2u kingfisher,24 +voltkatze,24 +volt crocodile,24 +vol.7,24 +virus (dramatical murder),24 +virtual kouhou taishi project,24 +violetgrass,24 +violetcoral,24 +vinyl scratch,24 +vikala (granblue fantasy) (cosplay),24 +viibean,24 +videocassette recorder,24 +victoria cindry,24 +vian,24 +vf-4,24 +vf-25 (cosplay),24 +vest pull,24 +veronju caesar (unleashed),24 +verdia,24 +venus ark uniform,24 +venti suki,24 +venom (vocaloid),24 +vanity (monster farm),24 +vanessa (live for the funk),24 +valkyr (warframe),24 +valdgeist,24 +uzumaki,24 +uzuki sakura,24 +uzucake,24 +utauinu,24 +uta (semimaru),24 +ushio takigawa,24 +uru-arrow,24 +urokodaki sakonji,24 +urbinator17,24 +union jack print,24 +unicorn jabu,24 +uni (uni9248),24 +unfins,24 +umino mo kuzu,24 +umino ht,24 +umibouzu (gintama),24 +umetori uriri,24 +umekichi (unbalance),24 +umedairuka,24 +ultraman mebius (series),24 +ultimate weapon (armored core),24 +ultimate antihero,24 +ukyou (amnesia),24 +uiru,24 +ugusu24,24 +uesugi mihato,24 +udyr,24 +ubuyashiki kagaya,24 +u-4989,24 +u-410 (azur lane),24 +tybernation,24 +twocar,24 +two-tone jumpsuit,24 +twenty,24 +tweedledum (alice in wonderland),24 +turenne,24 +tundra,24 +tumblr logo,24 +ttheyue,24 +tsuzura amo,24 +tsuzuki yoshio,24 +tsuyu sauce,24 +tsuyoshi takaki,24 +tsutsuji (etra-chan wa mita!),24 +tsuruhisashi,24 +tsunosame,24 +tsun (tsuncha),24 +tsukubae tomoe,24 +tsuki ga kirei,24 +tsukasa takashi,24 +tsukasa (pixiv34617881),24 +tsukai yowo,24 +tsucaco,24 +tsubasa miu,24 +tryvor,24 +tripleeight,24 +tri-brigade kitt,24 +trey (fft-0),24 +trevor25527766,24 +tren,24 +trefle r,24 +trapping ranger (elsword),24 +traghetto,24 +to~fuya,24 +toyosaka,24 +tower of the sun,24 +towel lift,24 +toujou masateru,24 +toshokan sensou,24 +toryu fufu,24 +toro th,24 +torn mask,24 +torn buruma,24 +torinoko tamago,24 +torii jungo,24 +torii (gundam),24 +torigara cha,24 +tori knkr,24 +torafuji nagi,24 +top gun: maverick,24 +tooyama midori,24 +tooru acura,24 +tonwwee,24 +tonkatsu (nagasarete airantou),24 +toni kensa (splatoon),24 +tonguewasabi,24 +tongtongtong,24 +tomoe (kemono friends) (niconico88059799),24 +tomineko (tomineko p),24 +tomaty.,24 +tomason,24 +tololi,24 +tokyo city hall,24 +tokyo babylon,24 +tokiwa png,24 +tokiwa osamu,24 +tokimeki tonight,24 +tokimatsuri eve,24 +tokiha suzumiya,24 +tohsaka tokiomi (cosplay),24 +togetsuhou,24 +toge nbo,24 +togami (tobysou526),24 +tofuboyz,24 +todoroki suyoshi,24 +todd oyamada,24 +tnaym,24 +tm-pika,24 +tladpwl03,24 +tlachtga,24 +tixie lix,24 +titanium hrk,24 +tinkerbat,24 +tine chelc,24 +timesoe,24 +tierra-sensei,24 +tiana (the princess and the frog),24 +thyrsus (fire emblem),24 +thumbcuffs,24 +thrown food,24 +thorny,24 +this is it,24 +third eye on chest,24 +thievul,24 +the transistor,24 +the prey,24 +the immaculate one,24 +thatpebble,24 +thalia,24 +tessa 1178,24 +terrorism,24 +terror (halloween terror) (azur lane),24 +terraxle,24 +teresa (iron saga),24 +terebi-,24 +terada katsuya,24 +teppuu,24 +tenten (chan4545),24 +tenshi ni narumon,24 +tennenmoe,24 +tennen ahoke,24 +tennen0201,24 +tengaku (vocaloid),24 +tenga hajime,24 +tenebrae,24 +tenchi muyou! manatsu no eve,24 +tempyou kango,24 +tempplex,24 +temari maco,24 +telephone number,24 +teito,24 +teijiro,24 +teepo,24 +teddy (mother),24 +techi (siro-white 0803),24 +teay (ttttteay),24 +tawai,24 +tavros nitram,24 +taut vest,24 +tatsunoko pro,24 +tatsumi yashiro,24 +tatsumaki senpuukyaku,24 +tatenayua,24 +tatata taira,24 +tastysalt,24 +taruk,24 +tartu (overthinking summer) (azur lane),24 +tapir girl,24 +tapa,24 +tanjel,24 +tango (soccer ball),24 +tandouji alma,24 +tanatonkub,24 +tanaka yuyuko,24 +tami yagi,24 +tamasi,24 +tamamo no mae (swimsuit lancer) (fate) (cosplay),24 +tamamo no mae (mythology),24 +tamamo (destiny child),24 +tamak rui,24 +tamachan,24 +tama (nezumi),24 +tama (chai),24 +talon feather (last origin),24 +takumi11,24 +taku pi,24 +takeuchi mariya,24 +takeda shingen (sengoku otome),24 +take5321,24 +takatsuki kanade,24 +takashima zakuro,24 +takase hiro,24 +takanoriha kasui,24 +takanon (nekomaruya),24 +takamachi momoko,24 +takakura aki,24 +takahina,24 +takahashi ryuunosuke,24 +takahashi osamu,24 +takahashi meijin no bug-tte honey,24 +taja spinner,24 +taiyouken,24 +tail strap,24 +tail around neck,24 +taeko (onsen tamago),24 +tada,24 +tackle box,24 +tachibana itsuki,24 +tachibana hinata (tokyo revengers),24 +tac-50,24 +tabigarasu (mahjong yuugen gaisha 58),24 +tabetai omochi,24 +t20210325,24 +switzerland,24 +swiftsure (beauty of white jade) (azur lane),24 +svenska flygvapnet,24 +suzuki yua,24 +suzuki shunji,24 +suzuki aina (seiyuu),24 +suzuki (cookie),24 +suzuhara shima,24 +suzugamori ren,24 +suzie (agent aika),24 +susutaketakumi,24 +susukinohukurou,24 +susuki (kutan),24 +surume (su1193),24 +suranaki,24 +super taruco,24 +super sons,24 +super sailor saturn (stars),24 +super robot wars l,24 +super mario strikers,24 +super fumina,24 +sunsirou,24 +sunahi arumi,24 +summon lw,24 +sumisumi,24 +sumi (suumiko ),24 +sumery,24 +suke 81,24 +sukasshu (mroooo),24 +suirenji kiyoharu,24 +sugiura yoshio,24 +sugise satoshi,24 +sugino (tactics),24 +sugi (shoufusha),24 +suga saru,24 +su (pixiv44447),24 +sturm (arowana kingyo),24 +stuffed eggplant,24 +stuffed alpaca,24 +studded footwear,24 +string on pinky,24 +stray hair,24 +strange ecolo,24 +stop,24 +stirring rod,24 +start!! true dreams (love live!),24 +staring contest,24 +stardust11,24 +starcat,24 +star wars: attack of the clones,24 +star voice (module),24 +star guardian neeko,24 +standing on branch,24 +stan lee,24 +stalk,24 +srm chi,24 +squeaky mallet,24 +sprinkler (splatoon),24 +spriggan (final fantasy),24 +spread urethra,24 +spotted skunk (kemono friends),24 +spooky-dollie,24 +spoken money,24 +spicy nun's mother (diva),24 +spats sansei,24 +sparkle earrings,24 +spacey,24 +soyaka,24 +soxkyo,24 +soushin shoujo matoi,24 +souma hatsuharu,24 +soulless,24 +soukaa (golden sash),24 +souffle sable,24 +soritari,24 +soriham,24 +sorano namida,24 +soppos,24 +sonken bundai,24 +song mia,24 +sonacia,24 +solail (faya),24 +sol badgal,24 +soju,24 +soga no tojiko (cosplay),24 +snowflake in hair,24 +snj,24 +slovak cuvac,24 +slipstream (transformers),24 +sliced meat,24 +sleepyhead,24 +skyla (holiday 2020) (pokemon),24 +skrats,24 +skllp,24 +skeleton horse,24 +sizuo 1997619,24 +sitting on chest,24 +siren (xenoblade),24 +sirbine,24 +sima nozomu,24 +silvia lautreamont,24 +silverash (york's bise) (arknights),24 +silky (last origin),24 +silicone,24 +sigppang (2shot00002),24 +sigemi,24 +sig (sfried),24 +sienna khan,24 +sic77,24 +shy (character),24 +shuusou gyoku,24 +shuu (shu-nm),24 +shunki,24 +shun (nikoru555),24 +shuga (0329tixi),24 +shuffle! memories,24 +shuen,24 +shoutarou (shoutarotttt),24 +shoujiki mura,24 +shoudoku taishi (taishi),24 +shou fuji,24 +shiying no yao,24 +shitada,24 +shishihara sawaya,24 +shirufana,24 +shirousagi una,24 +shirou kamui,24 +shiromimin,24 +shiroi yuki no princess wa (vocaloid),24 +shirogane noel (cosplay),24 +shirogane no cal to soukuu no joou,24 +shirochimaki,24 +shiro yukimichi,24 +shiro (shounen to hero),24 +shiro (shin-chan),24 +shiro (maple syrup9),24 +shirayuki usami,24 +shirayuki miho,24 +shirayuki (wind of breaking blade) (arknights),24 +shiratori yuriko,24 +shiraki (artist),24 +shiraishi (tanakeda),24 +shiragiku1991,24 +shiraga airi,24 +shirafuji tamaki,24 +shiosaki mato,24 +shion souta,24 +shion no ou,24 +shinonome tsukasa,24 +shinoko,24 +shinobu (kobanatu),24 +shino (sosuketo),24 +shinkuro sanagi,24 +shinkami hiroki,24 +shining shoot (pose),24 +shinguuji sakura (cosplay),24 +shinbross,24 +shinano eiji,24 +shinada an,24 +shimizu sorato,24 +shimakusa arou,24 +shima taka,24 +shima-tan,24 +shikapiro,24 +shika (hachilemon),24 +shijou saikyou no daimaou murabito a ni tensei suru,24 +shigeruoomi,24 +shidai,24 +shichigusa nanako,24 +shibata g ransu,24 +shibari marks,24 +shibainutank,24 +shiba cyon,24 +shi yusu,24 +sheba (xenoblade),24 +shax (megido72),24 +sharpheon,24 +shao siming guang wei,24 +shailiar,24 +shadow lady,24 +shachou batoru no jikan desu!,24 +sezaki takumi,24 +setsuna215,24 +seto (venus rumble),24 +setakman,24 +serisawa,24 +seri p tedoku,24 +seres (tales),24 +serena (yu-gi-oh!) (cosplay),24 +serena (konosuba),24 +sennen joyuu,24 +senkouji hagino,24 +senkaku mei,24 +senjou no pentsu,24 +sengoku basara 3,24 +sendou chika,24 +senba hikari,24 +semaphore flags,24 +sema (mekemeke king),24 +selene (dragon's dogma),24 +sekisouseki,24 +sekichuu (unholywars12),24 +seki toshinari,24 +seki tomokazu,24 +seiran (blue-orchid),24 +seiji (artman),24 +sega mega drive (sega hard girls),24 +seelean,24 +see-through gloves,24 +secret distance (project sekai),24 +sea chicken,24 +sd gundam world sangoku soketsuden,24 +scrambled egg,24 +scp,24 +scottish english text,24 +scar on ass,24 +scalizo,24 +sazanami jun,24 +sazanami (kancolle) (cosplay),24 +sayoko (sayosny2),24 +saya7,24 +sawade,24 +saw272,24 +satou (satohstc),24 +satan (the seven deadly sins),24 +sasumata,24 +sasakungoodsize,24 +saryn (warframe),24 +saru (pixiv13751598),24 +saratoga (kancolle) (cosplay),24 +saplus,24 +santou suihei,24 +santa alter (cosplay),24 +sanskrit,24 +sanshi (sannshi 34),24 +sanoi (giraffe),24 +sanaa,24 +san (winteroll),24 +samu (a117216),24 +same (carcharodon),24 +samara chan,24 +sam delatore,24 +salyut,24 +sakuraminto,24 +sakuramau,24 +sakurai ryouko,24 +sakuragi kurumi,24 +sakuragasaki fubuki,24 +sakura setsumi,24 +sakura no ame (vocaloid),24 +sakura chika,24 +sakuno shion,24 +sakumi,24 +sakugo,24 +sakisaka fuminori,24 +sakashita yomi,24 +sakasaki natsume,24 +sakamoto ahiru,24 +sakakibara mizuki,24 +sakaiya yumeno,24 +sakai chigusa,24 +saitou (ghost in the shell),24 +saikyou mahoushi no inton keikaku,24 +saijou karen,24 +sagiri mikage,24 +sagawa yumeko,24 +saeki shouji,24 +saden (magumo),24 +sacrifice (sound horizon),24 +sabanobori,24 +saab gripen,24 +s-sha,24 +ryuzu (clockwork planet),24 +ryuusei's short-haired girl,24 +ryuukeichi andromeda,24 +ryuu to sobakasu no hime,24 +ryuna (inc moon),24 +ryuda,24 +ryouten9,24 +rynisyou,24 +rvve,24 +rukira,24 +rukia (sound horison),24 +ruka (blueplus84),24 +ruinai,24 +rucksack,24 +rubill,24 +ruben de vela,24 +rta in japan,24 +rta-chan,24 +rpd,24 +roxie (skullgirls),24 +roswell ss,24 +rosalie (gothic wa mahou otome),24 +rosa cossette d'elise,24 +root (stpri),24 +rooster costume,24 +rono,24 +ronixis kenni,24 +rom (romshiro),24 +rollingcalling,24 +rokutelie,24 +rokuo016,24 +robiola (girls und panzer),24 +rlin,24 +rizihike,24 +riyan,24 +ritae,24 +rishixiyan,24 +rio (usagiya),24 +rinnku,24 +ringorenji,24 +rina (canvas+garden),24 +rin takanashi glacies,24 +rilliona the magistus of verre,24 +ril (wixoss),24 +rikona,24 +rikadoh,24 +rihhi,24 +rick (splatterhouse),24 +ribbed cardigan,24 +ria hagry,24 +ria (yfvv ria),24 +rhode,24 +rfa,24 +revived witch,24 +reticulum,24 +renko (gayosiz),24 +renga2250,24 +reinhardt (alchemy stars),24 +reina (leinqchqn),24 +reimu no yari,24 +reiji 0 g,24 +rei (pixiv 187780),24 +regiana (jubi),24 +reed (instrument),24 +reclamon,24 +reaper (overwatch) (cosplay),24 +re:birth colony,24 +re-45 auto,24 +rayu,24 +ravine,24 +raurashun,24 +ratoratah (ooo combo),24 +ratetaso,24 +ratatoskr (monster girl encyclopedia),24 +rata to,24 +rara086,24 +raputsue,24 +raptias,24 +rano (u rano),24 +rano8,24 +ran (bearsoymilk),24 +ramn,24 +ramia-yana,24 +rakugaki (artist),24 +raisun,24 +raise (ryo),24 +rainydayjp,24 +rainbow wing (pokemon),24 +rainbow-colored septentrion,24 +raimu (ranxa),24 +raimon track uniform,24 +raimon tarou,24 +rafale1008,24 +raeenay,24 +radittz,24 +radia,24 +racing miku (2021),24 +qzo (akai kitsune),24 +quilladin,24 +quick camel,24 +quichi 91,24 +queendom (love live!),24 +queen chrysalis,24 +queen (vocaloid),24 +queadluun-rea,24 +que meng meng,24 +quadriri (lansane),24 +qswan,24 +qiumoyixing,24 +qingjiao rou si,24 +pyroar (female),24 +pushun (muteki),24 +puppet show,24 +puni (artist),24 +pumpkin-crazy,24 +pukamon,24 +pucchan,24 +psyco gundam mk ii,24 +protagonist (ensemble girls!),24 +project gen2,24 +private ten'ou middle school uniform,24 +prinz eugen (blue oath),24 +print choker,24 +print ascot,24 +princess silver,24 +princess sailor moon,24 +princess peach's castle,24 +princess goomba,24 +primordial jade cutter (genshin impact),24 +pretty liar (idolmaster),24 +ppera,24 +poyo party,24 +power dolls,24 +posture collar,24 +possum ears,24 +porupu,24 +poptrt,24 +popopo (popopo5656),24 +pontata,24 +ponn mame,24 +ponkotsu ado,24 +pondo (peng-model),24 +pondering my orb (meme),24 +pom (soupy),24 +polora,24 +pokurouta,24 +pokilewd,24 +pokemon: zoroark: master of illusions,24 +poifuru,24 +pof (peuplierpof),24 +pochi (hetalia),24 +plusbrackets,24 +plumw,24 +platinum the trinity (cosplay),24 +plasmid,24 +placido,24 +piyopiyo,24 +pixy misa,24 +pixiv papico design contest,24 +pixiv fantasia scepter of zeraldia,24 +pixerite,24 +pixcel,24 +pink ranger,24 +pink blanket,24 +pikachu phd,24 +pierre iwashi,24 +piancaesar,24 +pi (space maria),24 +physics point,24 +phyco (pokemon),24 +photoshop flowey,24 +photography,24 +phoenix wright (cosplay),24 +philomel hartung,24 +philemon (butterfly),24 +phase,24 +petrushka,24 +peso (honopesopeso),24 +pero (pero7762),24 +peppsi (saba sabasuk0),24 +pepper cat,24 +pepper (norasuko),24 +pencil behind ear,24 +pelisse,24 +pecka,24 +paula (sennen sensou aigis),24 +patricia of end,24 +pateo,24 +passevo,24 +parsue,24 +parimu,24 +para sitism,24 +panzer (pnzrk),24 +panri,24 +pam-pam (precure) (human),24 +palpatine,24 +palidoozy-art,24 +pairon,24 +pachpachpach,24 +oyo hitsuji,24 +oyatu55k,24 +oyamada musshu,24 +oyakodon (food),24 +owner1657,24 +owaowa no mi,24 +overhead swing,24 +over the garden wall,24 +outsider 0,24 +oujano kaze,24 +oudondk,24 +otori michiru,24 +otoo hyougo,24 +otomo megane,24 +otome ga irodoru koi no essence,24 +otamon,24 +ossannoa,24 +oshiru (sealeu),24 +osana najimi,24 +oryuu,24 +orirock (arknights),24 +ophelia (merryweather),24 +ophelia (elsword),24 +ootori sakuya,24 +ooku,24 +ooishi (shiromanta),24 +oogushi aritomo,24 +onoguru,24 +onmyou taisenki,24 +on horn,24 +omotim000,24 +omochi chowder,24 +omochi (433purupuru),24 +omiiverse,24 +omega rugal,24 +omae nobuyuki,24 +olivia (god hand),24 +oliver poplan,24 +older edelfelt sister (fate),24 +old-fashioned swimsuit,24 +okuzora kohaku,24 +okuda yousuke,24 +okkobc,24 +okazakileo,24 +okabe gyoza,24 +ojou-sama wa gokigen naname,24 +oikawa momosuke,24 +odysseus eu britannia,24 +obara gaun,24 +o5o3,24 +nyoutou,24 +nykim0915,24 +nyansan oekaki,24 +nyanom,24 +nyabe,24 +nunua,24 +numemon,24 +nukumori nukumi,24 +nozimami,24 +noveske space invader,24 +notsuki miko,24 +notnoe (dxcl),24 +nosejob,24 +nori aji,24 +nordgreen,24 +nora (le-chat-noir),24 +noland (pokemon),24 +noki (potekoro),24 +nogitatsu,24 +nobuyo ninomiya,24 +noble succubus bianca,24 +nobingo,24 +noasa,24 +noa (meing),24 +niwatazumi keiko,24 +niwa ryouka,24 +niwa2wa tori,24 +niuy,24 +nita (pokemon),24 +nishinosono moe,24 +nishinishihigas,24 +nishina kakeri,24 +nise maou kikaizeru,24 +nipponia nippon,24 +niou kaoru,24 +ninja (disgaea),24 +nineball seraph,24 +nine usagi,24 +nine (zankyou no terror),24 +nilou (genshin impact) (cosplay),24 +nikoro,24 +nikomiudon 06,24 +niko (2ars),24 +nikku hikikomori,24 +nikaidou hitsugi,24 +nijiura,24 +niggurath the ancient tree branch,24 +nidou (rechlo),24 +nichi (hibi suimin),24 +ngv3553,24 +nezunomori,24 +nezu miko,24 +nezahualpilli,24 +new kamen rider,24 +new gnsn,24 +nevada-tan,24 +nerima (neconicoban),24 +neptune vasilias,24 +neoru (cvfw7854),24 +nenekoko (rariatto),24 +nen master (dungeon and fighter),24 +nemunoya,24 +nemui (ohisashiburi),24 +nekoyo chloe,24 +nekoyama shien,24 +nekotaririn,24 +nekono,24 +nekoname tuna,24 +nekoma volleyball uniform,24 +nei (hashiko nowoto),24 +neg 50asu,24 +neferkitty,24 +necrozma (ultra),24 +neconotaki,24 +nd,24 +nazuna (nazuna a2),24 +nawate (dij),24 +navy-san,24 +navi (ivan),24 +nautilus (nadia),24 +nautilus (azur lane),24 +natuichi-7212,24 +natsutaro (sss stn),24 +natsuiro koi uta,24 +natsue,24 +national geographic,24 +nasu hanahana,24 +naruse yasuhiro,24 +narumiya suzu,24 +narumi midori,24 +naozi,24 +naoazaz,24 +nao (naobinarydigit),24 +nanumn,24 +nanokah2,24 +nanno alice,24 +nannnann,24 +nannaspad,24 +nankai,24 +nanami k bladefield,24 +nanamesohuutei,24 +nanakusa (hillwithstars),24 +nanael (queen's blade unlimited),24 +nakedgeneral,24 +nakazaki tou,24 +nakayama (hidamari sketch),24 +nakatomi ryou,24 +nakata jouji,24 +nakano elsa,24 +nakamachi machi,24 +nagao uka,24 +nagae iku (fish),24 +n (ruff),24 +n-bata,24 +myuu (hellomagic),24 +mystique,24 +mysoda,24 +muusu,24 +musicalcombusken,24 +musical note-shaped pupils,24 +musee,24 +musashi (violet moonglow) (azur lane),24 +murujimu,24 +murmur (mirai nikki),24 +muraya,24 +murasakijazi,24 +murasaki (game),24 +muraosa (conjecture),24 +murakami ginko,24 +munisuke (zrkt7883),24 +munape,24 +mukai yumiko,24 +mucus,24 +mucchiri shiitake,24 +mu yan,24 +mrw,24 +mr very,24 +mouse on hand,24 +motsu ryouri,24 +motsu rebaa,24 +motoori kosuzu (cosplay),24 +motomiya nagisa,24 +motoasako,24 +morumoru (kuromrmr),24 +moru00f,24 +morpheus (milk tea-ya),24 +morphe (granblue fantasy),24 +morning6am,24 +morita shinobu,24 +morimoto kanaru,24 +mordred (granblue fantasy),24 +morakkyo (mephilas g3),24 +mora meat (genshin impact),24 +mootor,24 +moonshiner,24 +monge baby,24 +monch (arknights),24 +monarch (white warrick) (azur lane),24 +mon0351,24 +momoza r,24 +momoshina fumika,24 +momobako,24 +momo tai,24 +momijiko,24 +momiji ayaka,24 +moka. tapioka,24 +mochita sei,24 +mochimomomo,24 +mobile infantry,24 +mk82 (hoonsyh),24 +mizutani hozumi,24 +mizushima airi,24 +mizusawa matsuri,24 +mizuno star,24 +mizukoshi mako,24 +mizukiri,24 +mizuhichi,24 +mizuamemochimochi,24 +miyuki nknk,24 +miyoshi,24 +miyo (aenmix),24 +miyazaki hayao (person),24 +miyamoto musashi,24 +miyamoto hikari,24 +miyamae iroha,24 +miwa ai,24 +miura kazuki,24 +mitou shoukan://blood sign,24 +mito hollyhock,24 +mithra tsukiaki (vtuber),24 +mithos yggdrasill,24 +misyagu,24 +misono denpachi,24 +misochige,24 +misenouchi,24 +misaki renka,24 +misaka (05),24 +miruka,24 +mirai (mikami mika),24 +mirage (transformers),24 +mira (world trigger),24 +minusion,24 +mint clark,24 +minori,24 +minmii (minmi078),24 +minipat (sketch wall),24 +minior (orange core),24 +minerva (spacecraft),24 +mindle (mossacannibalis),24 +minatoya ringo,24 +minato (houkago no pleiades),24 +minase suzu,24 +minami noriko,24 +minakami hina,24 +mina likering,24 +mina (cafe little wish),24 +min-mu,24 +mimit,24 +mimi n,24 +milktower,24 +milia (lord of vermilion),24 +mikoccccchan,24 +miko kubota,24 +miko (miko030751),24 +miki (miki125dragon),24 +mike (chai),24 +mikasa ackerman (cosplay),24 +mikan yuzuko,24 +mikage subaru,24 +miichi (mimimi),24 +mii (jungle de ikou),24 +mihaia,24 +migii (tenra banshou),24 +mige shijiu,24 +mie lu,24 +midna01,24 +midd night,24 +michi l (streetlamp),24 +michael casteel,24 +mice (sake nomitai),24 +meuwzza (me zwa),24 +metroid prime 3: corruption,24 +metem puella,24 +metalbolic,24 +metal upa,24 +merli (vocaloid),24 +memory (prophet5),24 +memidesuyo,24 +memento1113,24 +melody (pokemon),24 +melfina bluesky,24 +mel mellow,24 +mekongdelta,24 +meido-fuku ga mitai,24 +megabee e,24 +mega beedrill,24 +mechjunk,24 +me meel,24 +mdr (ghost trap) (girls' frontline),24 +mcnuggies (meme),24 +mcnostril,24 +mayo.,24 +mayf42,24 +may greenfield,24 +mausoleum,24 +maumaujanken,24 +matumasima,24 +matsurika,24 +matsuno (mat8k),24 +matsunaga maguro,24 +matsumoto (5okuen),24 +matsumae takumi,24 +matsui haru,24 +matsufusa ema,24 +matoba kei,24 +mato.,24 +matilda caskett (mega man),24 +mathuri,24 +masturbating while watching,24 +mashiro03,24 +mashiri,24 +mashicono,24 +mash kyrielight (dive to blue),24 +masayan (minor-ms),24 +masato ayame,24 +masapeko,24 +mary sera,24 +maruchan akai kitsune udon,24 +marubororaito,24 +martina zoana mel navratilova,24 +martian officer,24 +martha (traveling outfit) (fate),24 +maron-chan,24 +maro (maro1108),24 +maro-n,24 +marmalade,24 +maritchi,24 +marine miku,24 +mari mari,24 +marge simpson,24 +maplesights,24 +maou no hajimekata,24 +mango cat,24 +manao-ke,24 +manami (fearfac666),24 +manako (manatera),24 +manaka mitsumi,24 +manaia matawhaura hato,24 +mamono musume-tachi to no rakuen ~slime & scylla~,24 +mami (sweetcandy),24 +mamerakko-chan,24 +malphier,24 +makui (umaku ikanai),24 +mako dai ni-dai,24 +makimura (miwmiw-cage),24 +makihara nodoka,24 +maki (maki88),24 +maker,24 +makarony,24 +makami (kemono friends),24 +maitake (maitake1234),24 +maigo no te o hiku sono saki wa (project sekai),24 +mahou shoujo madoka magica: concept movie,24 +mahha warabi,24 +maha5japan,24 +magpie (vtuber),24 +magine,24 +magikoopa,24 +maggey byrde,24 +mafumofu (armor),24 +madou king granzort,24 +madarabunchow,24 +madan (kkh8936),24 +mad scientist,24 +machi (uqyjee),24 +macchatei koeda,24 +mabuchi kyoma,24 +mablex,24 +lynette bishop (cosplay),24 +luminous arc 3,24 +lumda,24 +lude (ragnarok online),24 +lucky (sweet),24 +louise halevy's mama,24 +lotte (company),24 +lost-ko,24 +lop (star wars),24 +loki (p&d),24 +loish,24 +logo hair ornament,24 +locketmonkey,24 +locked outside,24 +lobsterbaby99,24 +lmo,24 +lm 0063,24 +ljayu,24 +liver,24 +little lass (ishiyumi),24 +lisong shen,24 +lisher,24 +lisette vertorre,24 +lisa (lom),24 +lipstick mark on shoulder,24 +lipstick mark on arm,24 +lipe-san,24 +lion mane,24 +linnoko,24 +lilac (live a hero),24 +lila (najica),24 +liezerota,24 +lia marin,24 +li0n (kongshushiwo),24 +leyte,24 +leona ardealescu,24 +lens life,24 +lemoneko,24 +lemnear,24 +leina (queen's blade unlimited),24 +legjob,24 +legend of lemnear,24 +left out,24 +lefiya viridis,24 +leeee ro,24 +lee chaolan,24 +lea (kingdom hearts),24 +le petit prince,24 +layeyes,24 +lauren phillips lifting alice merchesi (meme),24 +larienne,24 +lanubis,24 +lana branford,24 +lamia (voice actor),24 +lada (car),24 +labia ring,24 +l'indomptable (azur lane),24 +kyubi (99b 1226),24 +kyou-chan,24 +kyo (krokikyon),24 +kuuron,24 +kutuna yui,24 +kusunoki tomori,24 +kusata murasaki,24 +kusanagi suito,24 +kusanagi matabi,24 +kurosukey,24 +kurosaki shigure,24 +kuronekokan monpetit,24 +kuromi (cosplay),24 +kuroki michi,24 +kuro no utahime,24 +kurimuzon,24 +kurimilove,24 +kurenai shinkurou,24 +kureiji ollie (artist),24 +kurasuta,24 +kurasawa makoto,24 +kuradoberi jam (cosplay),24 +kunochai,24 +kung fu cooking girls,24 +kumomiya,24 +kumio-appon,24 +kumakoro (tetsupag),24 +kukig8765,24 +kujou karen (cosplay),24 +kugayama mitsunori,24 +kubooka toshiyuki,24 +kuaile de si jun,24 +ku--ma,24 +ksvk (angel's paint brush) (girls' frontline),24 +ks-23 (girls' frontline),24 +kris bernal,24 +kredorf,24 +kraber,24 +koyanskaya (fate) (cosplay),24 +kouyou (12953910),24 +koutyousan,24 +kouryou academy uniform,24 +kouno megumi,24 +kouhai (souzaipan),24 +kougei ciel nana,24 +kotori (may queen),24 +kotone ranmaru,24 +koshiki miyuki,24 +kose takashi,24 +korira,24 +kopa,24 +konsune (tengerium),24 +konpane (ohj),24 +konomi (yappen),24 +kongouseki,24 +konbanwa01,24 +kon hoshiro,24 +komota (mikebukuro),24 +komori aimi,24 +komorebi no namikimichi,24 +komkomx,24 +komiya harumoto,24 +komikado kensuke,24 +komaki,24 +komainu akira,24 +kokuu no megami athena (armed),24 +kokoala,24 +koko (hm142533),24 +kokemozuku,24 +koke (moromiso),24 +koimomo,24 +koi suru kimochi no kasanekata,24 +koharu nosuke,24 +kogetsu tooka,24 +koga (ringozaka mariko),24 +koenigsberg (azur lane),24 +kobutya4696,24 +kobayashi shinpei,24 +koan (sailor moon),24 +koa,24 +ko yami,24 +knight emperor (elsword),24 +knapsack,24 +km (ksolee1201),24 +kkkkt,24 +kk90,24 +kizuchi kanna,24 +kizdollark,24 +kiyone (psychofox),24 +kiyohime (fate) (cosplay),24 +kitatyoco,24 +kitaru (mabo f),24 +kitaminami,24 +kitagawa (ktgw 116),24 +kisume (cosplay),24 +kiss yori saki ni koi yori hayaku,24 +kiss (rock band),24 +kishio (agedama),24 +kisaragi myau,24 +kisaragi itsuka (aufheben),24 +kiryuu mizuha,24 +kiriya obu gn,24 +kirara akaru,24 +kippeijii,24 +kinomoto (nazonoinu),24 +kineya emuko,24 +kinatsu ship,24 +kinakomochi (kazuna922),24 +kimu (risatoko),24 +kimohiko,24 +kimino yume,24 +kim han seul,24 +killing,24 +kiev (backstreet silver sonata) (azur lane),24 +kidagakash,24 +khalida trish,24 +kewpie (mazohaha),24 +kent (kariumu),24 +kenshirou (mono ken),24 +kennymoney,24 +kenmotsu chiyo,24 +ken hayasaka,24 +ken19941028,24 +kemuma,24 +kejourou (monster girl encyclopedia),24 +keiz,24 +kedamaton,24 +kb-5,24 +kazuki seto,24 +kazemura,24 +kazahana mashiro,24 +kazagumo (azur lane),24 +kayuo,24 +kawery,24 +katyusha (girls und panzer) (cosplay),24 +katou misaki,24 +katou kei,24 +katou fumitaka,24 +katorius,24 +kate (pokemon),24 +katano sukune's bottle opener,24 +katamari,24 +katagiri ayako,24 +kasumi toshizou,24 +kasugano urara (sabagebu!),24 +kassim,24 +kashiwagi sumika,24 +kasahara tetsurou,24 +karla (kono healer mendokusai),24 +karl liversidge,24 +karijuku tomoe,24 +kari (atsuki 565),24 +kara (sam yang),24 +kapu (tetete8901),24 +kanzaki moe,24 +kanzaki megu,24 +kanou kayoko,24 +kanotype,24 +kanikaniland,24 +kanihai,24 +kang sae-byeok,24 +kanda sorata,24 +kanda (squall-rinoa),24 +kanbe piroshiki,24 +kanaya604,24 +kanamaru yuuki,24 +kamui (fire emblem),24 +kamogawa akira,24 +kamiya mitobe,24 +kamiko to seiryoku,24 +kamihitoe,24 +kamen rider punch hopper,24 +kamen rider kivala,24 +kamen rider j (movie),24 +kamen rider j,24 +kamen rider ibuki,24 +kakura yoshiki,24 +kakesu (freiheit),24 +kaixuan lushang,24 +kaitou shinshi no harahara!? white day (project sekai),24 +kaguya (force of will),24 +kagari3,24 +kaede acer,24 +kadokeshi,24 +ka maru,24 +ka koubun,24 +k (chissaiossan),24 +k@ito90p,24 +k.nock,24 +k.j.,24 +k.c,24 +jyuui,24 +jyundee,24 +jyb unknown,24 +jyan borii,24 +jwthor,24 +juu ho,24 +justsomenoob,24 +junpei (kyokugen dasshutsu),24 +junou,24 +jumpei,24 +july (coyote ragtime show),24 +julius belmont,24 +juliet starling (cosplay),24 +judeau (berserk),24 +jude mathis (butler),24 +joy (joy-max),24 +jotarozaku,24 +josou shinwa,24 +jorge joestar,24 +jokei kazoku,24 +johnny bravo (series),24 +jo an,24 +jjwww love,24 +jira,24 +jinpou anne,24 +jinkou no kuma,24 +jing li,24 +jil,24 +jiecaoxiong,24 +jevil,24 +jenny (artist),24 +jennifer yamada,24 +jenna brown,24 +jeet,24 +jeep (company),24 +jazz (fuukan),24 +jay27,24 +jaxa,24 +jasmine (flower),24 +janus (azur lane),24 +jacket (hotline miami),24 +j am,24 +izumi (izumi p),24 +izawa shizue,24 +izatama,24 +izanagi no okami,24 +iyokamioto,24 +iwatsuki,24 +iwadate yuan,24 +iuchar (fire emblem),24 +itsuki (spitbreak),24 +issycake,24 +isogai yuuji,24 +ishikawa yuga,24 +isaroishin,24 +irving-zero,24 +iris.exe (mega man),24 +iridori,24 +irene white (girl cafe gun),24 +inui (shirakawa777),24 +inu mamoru mizuki,24 +instrument request,24 +inoue mikuni,24 +innocentia,24 +ink (artist),24 +infamous,24 +ines (tachiagare! orc-san),24 +indol,24 +incoming letter,24 +inanami,24 +inaka gyomin,24 +inagawa yuu,24 +inabahitomi,24 +imomonono,24 +imi fumei,24 +illustica phantom,24 +ikoma minami,24 +ikaruga ibuki,24 +ika esu,24 +iiwake,24 +iiumiarts,24 +iinchou (justice gakuen),24 +igu (103milk),24 +igarashi aguri,24 +idol show time,24 +icqoo,24 +icicle fall,24 +ichinose nagi,24 +ichinose ichino,24 +ichimoku ren (onmyoji),24 +icedev,24 +ibis1,24 +iberis (flower knight girl),24 +ibarahime shizuka,24 +ibanez,24 +hyper brand,24 +hy136,24 +huziiro matutya,24 +humany,24 +huira444,24 +huan (hao7551789),24 +hu58013901,24 +htms sri ayudhya,24 +hraesvelgr (last origin),24 +hozumi sayaka,24 +howe (noble rouge) (azur lane),24 +howaitosawa papiko,24 +how to talk to short people (meme),24 +how is the progress (meme),24 +housui (g3hopes),24 +house m.d.,24 +houkago saikoro club,24 +hototogisu,24 +hoshizora hiroshi,24 +hoshinopurin,24 +hoshino mitsuki,24 +hoshino aquamarine,24 +horuta suin,24 +horu (horu 111),24 +horitomo,24 +horikoshi kouhei (style),24 +honoka (the third),24 +honkawa works,24 +honk honk (meme),24 +honenashi chicken,24 +hondo kaede,24 +honda yuita,24 +homong,24 +hollow body,24 +holding wig,24 +holding scanner,24 +hokoro,24 +hoken dayori,24 +hogi,24 +hko,24 +hk nnm,24 +hiyuki-chan,24 +hitsuka baka,24 +hitotose tanteidan,24 +hitohira onsa,24 +hitakikan,24 +hisohiso (altoblue),24 +hise,24 +hisame mon,24 +hisakawa aya,24 +hiroyama (hpzg5374),24 +hirose koharu,24 +hiro (minorstar),24 +hiro (14806390),24 +hirasawa yuu,24 +hiqu,24 +hippowdon (male),24 +hinohino,24 +hinasaki mafuyu,24 +hinaname,24 +hinako (teruki kuma),24 +himenohara suzuran,24 +himeko (honkai: star rail),24 +himejima kinoko,24 +hima hawa,24 +hilbert (pokemon) (cosplay),24 +hilary (pokemon),24 +hikotou (sao73cat),24 +hikora,24 +hiki furisode,24 +hikarinoko,24 +hikari (saidaioujou),24 +hikage (0hi kageo),24 +hijiriido miyo,24 +higezamurai (kurage gunsou),24 +hieda yawe,24 +hidekichi (09075470338),24 +hidden weapon,24 +hidaka rina,24 +hibinpo,24 +hibikase (vocaloid),24 +hibi (grangcat),24 +heru (totoben),24 +henshin!!! ~pantsu ni natte kunkun peropero~,24 +henry henderson,24 +henry davis,24 +henry1025,24 +hemachi,24 +hel0205,24 +heitai gensui,24 +heike falke,24 +heihei de hei yan long,24 +heemin,24 +headband girl (kamisimo 90),24 +hayanse,24 +hayami iori,24 +havoc (darker than black),24 +hatsuzuki (kancolle) (cosplay),24 +hatsune miku (roshin yuukai/nitamagomix),24 +hata matsuri,24 +hasukawa isaburou,24 +haruno ichigo,24 +harumaki haruki,24 +harukanaru toki no naka de 2,24 +haru (tateha),24 +haru (calipur),24 +haru (amagamido),24 +haro art,24 +hardy (azur lane),24 +harami (qz48lr),24 +hara shouji,24 +happy turn2,24 +happamushi,24 +haoriya chie (minidraco),24 +haori (ki-na-ri),24 +haocong33,24 +hans humpty,24 +hanosuke,24 +hanimaru (h@nimaru),24 +haneda kobato,24 +hane (kirschbaum),24 +hand on thighs,24 +hand on arm,24 +hanchi hannou,24 +hanafusa itsuki,24 +hamitamako,24 +hamericano,24 +hamama2,24 +halsey powell (azur lane),24 +hakutakuanta,24 +hakutaku (granblue fantasy),24 +hakuhatsu,24 +hakase,24 +hakai no ika,24 +hajimenimodoru,24 +haishin,24 +haires,24 +hair fan,24 +haiboku no megami,24 +hagino chihiro,24 +hagino (axgh),24 +hagi (artist),24 +hafuri,24 +haebara zanka,24 +hachimikkusu,24 +gyokuto b,24 +guu (guu8),24 +guts seijin,24 +guts (berserk) (cosplay),24 +gustav karl,24 +guranaada,24 +gunsmoke,24 +gundongdejie,24 +gundam tekketsu no orphans urdr hunt,24 +gundam age-2 darkhound,24 +gumitaroo,24 +guiche de gramont,24 +grenda-san,24 +green shell (mario),24 +gravelord nito,24 +grand admiral marina,24 +gracia hughes,24 +goya (team harenchi),24 +gotouge koyoharu (style),24 +goriraneesan,24 +goowonjoon,24 +goom (goomyparty),24 +gomas,24 +goldmondsel,24 +golden spiral,24 +golden knight (granblue fantasy),24 +god of the new world,24 +god eater resonant ops,24 +gneisenau (warship girls r),24 +gmg,24 +glowing finger,24 +globburt,24 +glitch techs,24 +girl with bear (madoka magica),24 +girigiri love,24 +giraffe three,24 +giotto (reborn),24 +gimai seikatsu,24 +giggles (happy tree friends),24 +gigantamax machamp,24 +gift bow,24 +giant spider,24 +ghostdoctor,24 +ghost belle & haunted mansion,24 +geshopu,24 +genjitsu shugi yuusha no oukoku saikenki,24 +genjimaru,24 +gen (gen m gen),24 +geena preddy,24 +gedo senki,24 +gbcolor (naza),24 +gaziter,24 +gaul galette des rois,24 +gatakenjin,24 +gastornis (kemono friends),24 +gascogne (muse) (azur lane),24 +garurumon,24 +garmmy,24 +gargadia empire,24 +ganryou,24 +ganpiro,24 +gana (mknumi),24 +gamigamimissile,24 +galleon-joe,24 +galka,24 +gale (dds),24 +galaxy dungeon,24 +gaia gear,24 +gahara,24 +gadwin (grandia),24 +gabri-l,24 +ga yeah,24 +g28 (beer ranch) (girls' frontline),24 +g141,24 +fuubuu,24 +fusuma (not found),24 +fusuma (nohbrk),24 +fushimi touka,24 +fushikawa kokoro,24 +furutachi ren,24 +furizuu20,24 +furi2play!,24 +fur shirt,24 +fupoo,24 +fumo,24 +fumika asano,24 +fukunaga yumi,24 +fukumune iria,24 +fujita tatara,24 +fujita nodoka,24 +fujisawa tomio,24 +fujimaru ritsuka (female) (walking in the spring breeze),24 +fujieda yoshino,24 +fujieda kaede,24 +fuji (d38635s10),24 +fugaku (fugaku22),24 +fuel,24 +frozensoba,24 +frost fog,24 +frontier town,24 +fromchawen,24 +frilled scarf,24 +frigate,24 +fox girl (miya (miyaruta)),24 +four-leaf clover earrings,24 +fortune (azur lane),24 +forceps,24 +forced dressing,24 +food on toes,24 +folte,24 +folding,24 +flyinghigh,24 +flying guillotine,24 +fluf.p,24 +flower in drink,24 +flower-shaped hair,24 +flavia (fire emblem),24 +fish cake,24 +fireball 666,24 +fiona (xenoblade),24 +fiona (mabinogi),24 +finger to eyewear,24 +fili,24 +fey (broken cage),24 +fernanda dias,24 +fender precision bass,24 +felpurr,24 +fei miao,24 +fecchan,24 +feca,24 +faymantra,24 +faust (limbus company),24 +fat joke,24 +farigiraf,24 +fairy tale girl (pokemon),24 +fairy knight tristan (valentine witches) (fate),24 +f4u (naitou2),24 +f-22a raptor (muvluv),24 +f-117 nighthawk,24 +eye glitter,24 +executioner's sword,24 +excellia cruz,24 +evol driver,24 +evaiyu,24 +euphemia chevalier,24 +eukrante,24 +eugenia beilschmidt,24 +etta,24 +etsuko pkmn,24 +etou fujiko,24 +erulusyro,24 +eru daydream,24 +eru (aeeu2243),24 +eriol s2,24 +epurasu,24 +epic hair,24 +enterprise (blooming peony) (azur lane),24 +enola (ebanataw),24 +eno (preno gb),24 +enne (porforever),24 +enikuma,24 +enhancement pill (girls' frontline),24 +endou aina,24 +ena (fire emblem),24 +emukae kaede (plan),24 +emudoru,24 +emily brooks,24 +emi (fizintine),24 +emeraldas,24 +emapippi,24 +elyos,24 +elsam (granblue fantasy),24 +elmo,24 +elephant girl,24 +elena (jagaimo (kkamja)),24 +elegant (sumisumi4268),24 +eleaclarisse,24 +elea,24 +elazul,24 +elanore,24 +elaine (iron saga),24 +eko (yu-shao-eko),24 +ejaeli (granblue fantasy),24 +eirene (oniro),24 +eggplus,24 +egao no daika,24 +edorai,24 +edo tensei,24 +edgar syu,24 +edelyn,24 +edea kramer,24 +edamame (buruburu),24 +ectas online,24 +echoes act2,24 +echo (pandora hearts),24 +eas (cosplay),24 +earthen miraculous sword,24 +eari (shining hearts),24 +e5 hayabusa (shinkalion),24 +dyxm,24 +dxlsmax (lizhimin),24 +dustcloth,24 +dust box,24 +duralumin,24 +dukehare,24 +duke nukem,24 +dreyfos,24 +dream smp,24 +drawing mannequin,24 +dragon quest heroes ii,24 +dr unk2020,24 +douluo dalu xiaowu zhuye,24 +dosol,24 +doriri,24 +doremy sweet (cosplay),24 +dorei himekishi to dorei jijo to no slow life,24 +don (macaron panda13),24 +dokumi,24 +dog girl (yukimoto shuuji (gurigura)),24 +dmuyaa,24 +dmc pa,24 +dk (13855103534),24 +dj-yu,24 +dizzy (artist),24 +divine (scfworks),24 +dirk strider,24 +dinosaur boy,24 +dimentio,24 +diego armando,24 +diamond hands,24 +diamond dog,24 +dhiea seville,24 +dharc (yu-gi-oh!),24 +devil breaker,24 +develop2,24 +detec bell,24 +dera fury,24 +denka houtou,24 +dendoumushi,24 +demya dalliante,24 +demonlordraizen,24 +demon parade,24 +demon (02030108),24 +demian 221,24 +del,24 +deerchip,24 +decima velanox,24 +deathclaw,24 +dear my friend,24 +deanoia,24 +deadmoon (kein2002),24 +dead rising 2,24 +dead drive,24 +dclzexon,24 +dazzling white town,24 +davecavedraws,24 +datsuko (momojam koubou),24 +darwin watterson,24 +darren,24 +dark knight (fft),24 +daredevil,24 +dapple dualies (splatoon),24 +dann of thursday,24 +dangaioh hyper combat unit,24 +dandere (tetsudan),24 +dalimao,24 +daiyousei mob (touhou),24 +daisenran!! sangokushi battle,24 +dairenji suzuka,24 +daime fusonzai,24 +daidouji mayura,24 +daidou shinove,24 +dai fuku,24 +dahlia hawthorne,24 +dagr (fire emblem),24 +daewoo k11,24 +d no,24 +cz scorpion evo 3,24 +cynthia (claymore),24 +cyasha,24 +cuttlefish,24 +cutie honey flash,24 +cutefreak,24 +cusozee,24 +cure waffle,24 +cure peace pose,24 +cure empress,24 +cure berry (angel),24 +cum in headwear,24 +cross piercing,24 +crescent tattoo,24 +creamyya,24 +creamyghost,24 +coyote starrk,24 +cotoh tsumi,24 +coral (summon night),24 +cooling tower,24 +conoha,24 +conception 2,24 +command and conquer: red alert 3,24 +comic orga,24 +comet543,24 +color creation,24 +colon (stpri),24 +colmack,24 +collared crop top,24 +cola-alter,24 +coffee talk,24 +code nt,24 +cocohore,24 +cocoa miel,24 +coco (hinatacoco),24 +cocktail pick,24 +cobra (cobra 63),24 +clutch shot king,24 +cleasky (idolmaster),24 +clarityblue,24 +clarissa snowflake,24 +ckhd,24 +circuit board print,24 +cigarette p,24 +cien (shikanokuni),24 +chozuru,24 +chon (klliaytong),24 +chokotto vampire!,24 +chococuco,24 +chloe (sennen sensou aigis),24 +chisuke,24 +chiro (youkai tamanokoshi),24 +chirarizushi,24 +chirakashi (chiruto),24 +children of the rune,24 +chikurin (sasamori tomoe),24 +chiharu (dididididinosaur),24 +chi zu crazy,24 +cheungchz,24 +cherryton school uniform,24 +cherrymaru,24 +chengongzi123,24 +chastel aiheap,24 +charlotte (pandora hearts),24 +chari de kita,24 +chaos drive,24 +change! ano ko ni natte kunkun peropero,24 +chall acustica,24 +chadgarciaburg,24 +chabatake,24 +cha chazi,24 +cellphone display,24 +cello case,24 +cei (sohin),24 +catwalk (modeling),24 +catch the rainbow,24 +cat peach,24 +cat ear bikini,24 +cash007,24 +casey w. coller,24 +carol (guilty crown),24 +cargo pallet,24 +capelet lift,24 +cao pi,24 +canzhajiang,24 +canopus wolph,24 +camouflage footwear,24 +camisole removed,24 +cale henituse,24 +cain (fire emblem: shadow dragon),24 +cafe choco,24 +c-ms (the wonderful adventures of goose) (girls' frontline),24 +byakusouya,24 +buttslayer,24 +burmy (trash),24 +burgerberg q-taro,24 +bunny choker,24 +bun (food),24 +buffalo (trickster),24 +buchou (kakitama),24 +brown facial hair,24 +bronya zaychik (drive kometa),24 +breeding mount,24 +bratja,24 +bowser logo,24 +bow shorts,24 +bow (breath of fire),24 +bourbone,24 +bouncy (kirby),24 +botantouki,24 +bossmonsterbani,24 +borezet,24 +book of fuxi,24 +bokujou monogatari: tsunagaru shin tenchi,24 +body slam,24 +bobblehead,24 +bmw z4,24 +blue trim,24 +blodia,24 +blister pack,24 +blaze union,24 +blaser r93,24 +blaseball,24 +blanket (kkbjah),24 +blackwatch reyes,24 +blackmoon,24 +black sig,24 +black n 12,24 +black maria (one piece),24 +black killers (phantom of the kill),24 +black cat (elona),24 +black ai,24 +bizure,24 +bittersweet lulu,24 +bita (vaderc),24 +bisuko (bisco mm),24 +bisuke (isbsk ekaki),24 +biopunk,24 +bini (xocolatl 501),24 +bingshan,24 +bikkuru,24 +bikkii,24 +betilla (rayman),24 +berisuno (beriberi0707),24 +benerokku,24 +belial (megido72),24 +bejili,24 +beepaint,24 +bee (bee and puppycat),24 +bedman,24 +beast of darkness (berserk),24 +bean sprouts (6651003),24 +be yu,24 +bbuni,24 +battlestar galactica,24 +battle principal yuumi,24 +batting stance,24 +bat signal,24 +bat pasties,24 +bashira (sennen sensou aigis),24 +banana (among us),24 +bamboo (akimotoaki),24 +balabling,24 +bakadebiru,24 +baek hyang'geum,24 +babape,24 +azusa (sukumizuya),24 +azusa (rikuriku),24 +azuma yuuki,24 +azuko (ampenm),24 +ayyataka,24 +ayuria,24 +ayanami (witch in ambush) (azur lane),24 +ayanami (azur lane) (cosplay),24 +ayajik,24 +axis04,24 +avenger (dungeon and fighter),24 +avalon code,24 +automaton (final fantasy),24 +aulick (azur lane),24 +aul,24 +audrey dreamweaver,24 +atsushi kenzaki,24 +asymmetrical shorts,24 +asuka r. kreutz,24 +asu kam,24 +astrotrain,24 +ashura (rg veda),24 +ashiyafuku,24 +ashigara (kancolle) (cosplay),24 +ash (cat7evy),24 +asato ai,24 +asama isami,24 +asaka hinata,24 +asahina yuuta,24 +asahina satoru,24 +asaba yuuki,24 +asa1014,24 +arzuros,24 +aruko (nac000),24 +artoise,24 +artificial world,24 +arrow to the knee,24 +arrow in mouth,24 +arrow hair ornament,24 +arno (ft3014),24 +armored core 4,24 +ariesuzu (ariessz),24 +arianna the labrynth servant,24 +aria advance,24 +ari suzushi,24 +argetlahm,24 +argentinosaurus,24 +argath thadalfus,24 +archer class (fate),24 +arasumi shii,24 +arashigaoka academy uniform,24 +aran (fire emblem),24 +aragaki nagisa,24 +arafune tetsuji,24 +aqua umbrella,24 +aqua buruma,24 +aqua-,24 +apricot (fruit),24 +aosi (wasabiranzy),24 +aonuma shun,24 +aoki masahiko,24 +aoiro 0w0,24 +aoi matsuri,24 +aoi01fenrir,24 +aobito sukoyaka bystander,24 +aobe,24 +anti the infinite holic (vocaloid),24 +anpsart,24 +anntan,24 +announcer,24 +annet myer,24 +anne (ttgl),24 +anime revolution,24 +animal pov,24 +angel pena (angelox),24 +angel (nadegata),24 +andy w hall,24 +andou mario,24 +and rira,24 +anby demara,24 +ana bi,24 +an sin,24 +an jera,24 +an (angular-angler),24 +amulet clover,24 +amiya aranha,24 +amiya64k7,24 +american civil war,24 +ameria (artist),24 +amel3di,24 +ame-kan,24 +amaurot,24 +amatsu ai,24 +amatou cacao,24 +amatarou,24 +amanohokosaka mei,24 +amane 1906,24 +amane (dream c club) (cosplay),24 +amane (7783),24 +amamiya sakurako,24 +amamiya kaoruko,24 +amamiya atsushi,24 +amagi yukiko (cosplay),24 +am88121,24 +alyssa searrs,24 +alvida (peach momozen),24 +aluppia,24 +alumina (kennmeidesu),24 +altheavin,24 +altera (fate) (cosplay),24 +altair (granblue fantasy),24 +alset,24 +alpholo v,24 +aloe (sos961111),24 +all seeing eye,24 +alien adviser,24 +alice (mary skelter),24 +alexander (final fantasy),24 +aleixa (aurahack),24 +albino (vocaloid),24 +alatreon (armor),24 +aku (samurai jack),24 +akoya (anoko konoko),24 +akn,24 +akito,24 +akita (20033203),24 +akira (jitsuimo),24 +akio (89e),24 +akebi (kakuri),24 +akazukin (mary skelter),24 +akashin,24 +akasaka shop,24 +akari (baffu),24 +akaikonomi,24 +akagi koku,24 +akagi (azur lane) (cosplay),24 +aiwm,24 +airi rei,24 +aijin (pageratta),24 +aihara kazumi,24 +ai kotoba (vocaloid),24 +aho manten,24 +ahedtra,24 +agibe,24 +agaa (masatu),24 +aegis (girls' frontline),24 +adz lrp,24 +adfx-02 morgan,24 +adam jensen,24 +adam (evangelion),24 +aako,24 +aaaaddddd,24 +a ching,24 +a chika-tastic summer,24 +a6m2-n,24 +a-m-one,24 +=3=,24 +9no kabe,24 +8rats,24 +88 (kimidori midori),24 +86thunder,24 +777nasu,24 +707arisu,24 +64 (xfour),24 +617247500,24 +30re,24 +30ml,24 +1ji45fun,24 +1984,24 +10cm twin high-angle gun mount,24 +029pom,24 +zzt (zztsama),23 +zzq,23 +zzizzi,23 +zyuwfc,23 +zy jomi,23 +zxj,23 +zxanzai,23 +zutaboro,23 +zukapin,23 +zoyu (wuedti),23 +zora set (zelda),23 +zokusuke,23 +zmore,23 +ziteng yue,23 +zimbabwefumi,23 +zienu,23 +zhaoyuan pan,23 +zhai mu q,23 +zetsu (zyej5442),23 +zetman,23 +zet (twt zet),23 +zenigata tsugiko,23 +zeng$ (zwj),23 +zearthp,23 +zearth,23 +zanza (xenoblade),23 +zane (ninjago),23 +zama masaaki,23 +z28 (azur lane),23 +z21 (azur lane),23 +yzk,23 +yuzupapa,23 +yuzumaki tajii,23 +yuzuki (wixoss),23 +yuzf,23 +yuuri (mafuyu),23 +yuukoku no moriarty,23 +yuuki susumu,23 +yuuki amane,23 +yuugiri (zettai karen children),23 +yuu li (glass),23 +yuu chitose,23 +yuu (vocaloid),23 +yutorin kakka,23 +yusa aoi,23 +yurudorashiru,23 +yurine,23 +yurian (user utch8788),23 +yuri (purinlove),23 +yupo 0322,23 +yupi mild,23 +yuna (yu-yu),23 +yun yun guan bo,23 +yun ling,23 +yun-chan,23 +yumeno sally,23 +yukinoko 0 3,23 +yukine chris (another),23 +yukimiya chino,23 +yukimi papino,23 +yukimi (pan (mimi)),23 +yukikaze (aaassszzz),23 +yukikana,23 +yukichi (yu-ame),23 +yuki ga tokeru mae ni (vocaloid),23 +yuki (bakumatsu rouman),23 +yukawa hideki,23 +yukagen,23 +yuka (mischief),23 +yuirinex,23 +yui (msy1302mana),23 +yui (bofuri),23 +yuhkiano,23 +yugami (mikami mika),23 +yufy,23 +yuenibushi,23 +yuela,23 +yu tin,23 +ys memories of celceta,23 +yoyoiro (sysi20),23 +yosiyuki yosizou,23 +yoshino yuusuke,23 +yoshino ns,23 +yoshimori misaki,23 +yoshi (cosplay),23 +yoru no yayoi,23 +yoru kiri,23 +yorite konoha wa kurenai ni,23 +yonari phone neon,23 +yon prmr,23 +yomoi nui,23 +yomogi mametaro,23 +yokuko zaza,23 +yoku (chocolateq),23 +yokiri,23 +yoiyoi (kannkann100),23 +yoiro (porigonn),23 +yoineko,23 +yoigoshi-hen,23 +yobanashi deceive (vocaloid),23 +ylceon,23 +yi (saver5607),23 +yi-sang (limbus company),23 +ygdm,23 +yeosi,23 +yeon bom,23 +yellow card,23 +ye xiu,23 +yayo,23 +yaya (20090410),23 +yawara a fashionable judo girl,23 +yatta ne tae-chan! (meme),23 +yasu (shijimi 8),23 +yashiro gaku,23 +yasaka pagoda,23 +yansuiyuluo,23 +yano (odd taxi),23 +yang (wuhan),23 +yamisuke (silent-black),23 +yamashiro (street corner offensive!) (azur lane),23 +yamashiro (sales clerk offensive?!) (azur lane),23 +yamamoto rintaro,23 +yama batake,23 +yama (yam0a),23 +yaku (999ykseo),23 +yaegashi isa,23 +yachika,23 +xyzal,23 +xxkuroxx,23 +xuehua,23 +xp-kun,23 +xininz,23 +xiaochentan,23 +xiao you,23 +xiao yao xiong (xy450425885),23 +xiahou dun,23 +xaruex,23 +x k o,23 +x-gun,23 +x-4kazu,23 +wormadam (sandy),23 +working slave (character),23 +worick arcangelo,23 +woon,23 +woofey,23 +wonder ride book,23 +wombat,23 +wochi,23 +wipers,23 +winning ticket (dream deliverer) (umamusume),23 +winged menace wattson,23 +wing hold,23 +windflit (arknights),23 +wild wet quest,23 +wii balance board,23 +whoru,23 +whitemaria,23 +white gemstone,23 +white-reimu,23 +wendelin von benno baumeister,23 +weisskaeppchen,23 +wearshoes,23 +weapon in mouth,23 +we-tan,23 +wazawogi (wzog),23 +water dragon,23 +watchtower,23 +watase (wataxx),23 +watarai sonoka,23 +watarai alice,23 +watanuki uchiha,23 +watanabe keisuke,23 +watage (lucky yyg),23 +watagashi unou,23 +wataame (tulip),23 +warechu,23 +warden (jadol 9),23 +warabi yuuzou,23 +wan ban,23 +walnusstinte,23 +walhalla illusion,23 +wakkigen,23 +wakaouji rui,23 +wakamiya asuka,23 +waai!,23 +w nijuuyon,23 +vulcan joseph,23 +voidsent,23 +vodka (meitantei conan),23 +vivillon (elegant),23 +virginia robertson,23 +viper m1,23 +vintage clothes,23 +vincennes (azur lane),23 +viking-dutchboy,23 +viewran,23 +vietnamese flag,23 +vesperbell,23 +vector the crocodile,23 +vauquelin (warship girls r),23 +vashti (pokemon),23 +vanship,23 +vander (arcane),23 +vampire hunter d,23 +vamp time,23 +vamp (metal gear),23 +uyuu,23 +uvao-tan,23 +uva academy uniform,23 +uub,23 +uso ashio,23 +ushi no koku mairi,23 +ushi (ushi manji),23 +usami youko,23 +usami (50-point caliper),23 +usa (cubic),23 +ururu,23 +ursula (disney),23 +urban style,23 +urara (himitsu kamitu),23 +uramoto kouji,23 +urai tami,23 +ur (wulfa),23 +ur-bracka (gogalking),23 +uokin2,23 +unova mother (bw2),23 +united states angel corps,23 +unicorn boy,23 +umikaze (kancolle) (cosplay),23 +umiichi kurage,23 +umeo retto,23 +umehara daigo,23 +umbrella bow,23 +ultraman z,23 +ultraman leo (series),23 +ultimate chimera,23 +ui (fuugetsuin),23 +ueno meguru,23 +ueda kazuyuki,23 +ueda hiroshi,23 +uchuu keiji gavan,23 +u u ki u u,23 +u to i,23 +u-ka (pixiv5407),23 +type 0 observation seaplane,23 +tyler (merryweather),23 +two pokemon,23 +twinrova,23 +twigg (hilda),23 +tuye (cultivate vegetation) (arknights),23 +tusk0315,23 +tuber (pokemon),23 +ttl,23 +ttegi (ddeck dg),23 +tsuzuki otome,23 +tsutsuji,23 +tsushima touko,23 +tsuruta saya,23 +tsunken,23 +tsunetarou (yasu),23 +tsuna maru,23 +tsumiki (12756023),23 +tsukune (yagi),23 +tsukiuta,23 +tsukishima orihime,23 +tsukikase,23 +tsukigime (fool ehle),23 +tsuki usagi,23 +tsukahara bokuden (sengoku collection),23 +tsugomori (remilia0398),23 +tsf monogatari,23 +triple bambi,23 +triforce earrings,23 +tri-slosher (splatoon),23 +trey ollis loffewa (kim eb),23 +tree bowbow,23 +treasure map,23 +treasure island,23 +traptrix nepenthes,23 +transforming,23 +transformers: robots in disguise (2015),23 +tp82n1r,23 +tp65pxu4,23 +tozaumo,23 +toyatei,23 +toy horse,23 +towashibuki,23 +tournament bracket,23 +touno kyouko,23 +touko (wanko to lily),23 +toukiden,23 +toudou erika,23 +touching forehead,23 +totosu,23 +totooria helmold (cosplay),23 +tot (zhxto),23 +toshiba emi,23 +torso only,23 +torry912,23 +toroko,23 +toroakikan,23 +torisoboro,23 +torino rito,23 +toriko no tsubasa ~butaiura de midara ni odoru shojo ningyou~,23 +toriharara,23 +tore-saki,23 +tora (koihime musou),23 +tooru jin,23 +tong (freedom2000),23 +tonakai (bunsuka),23 +tomooka shinpei,23 +tomogy,23 +tomatoritori,23 +tomatojiusu,23 +toma (me666nm),23 +tom-masu,23 +tokyo underground,23 +tokyo lover,23 +tohno ren,23 +tofu (bean359),23 +todatanoto,23 +toda youkon,23 +tobitaka seiya,23 +tobari susumu,23 +toaster oven,23 +tktn,23 +tktg,23 +tiphereth b (project moon),23 +tintin,23 +tiko (idadsa91),23 +tigrex (armor),23 +tie pilot,23 +tia langray,23 +thorin oakenshield,23 +thighhighs under pantyhose,23 +thehumancopier,23 +the walking dead,23 +the rock (dwayne johnson),23 +the king of fighters ex2,23 +the joy of painting,23 +the boys,23 +thanatos (sound horizon),23 +tf (tfx2),23 +teuyu,23 +teto (no game no life),23 +terui ryuu,23 +terastal,23 +tenti0619,23 +tentacle underwear,23 +tennen inari,23 +tenmei no conquista,23 +tenjuin marie,23 +tengu nimuru,23 +tenebrism,23 +tenchou (sakai),23 +tenchou (jahy),23 +tencent qq,23 +tenbatsu angel rabbie,23 +tena,23 +ten-u,23 +teku (the3dempire),23 +tehnyo,23 +teen titans go!,23 +teddy bear (mother 2),23 +techsupportdog,23 +teamfight tactics,23 +tdc24,23 +tawasiupa,23 +tasuku (user fkzv3343),23 +task baron,23 +tashite,23 +taro (116taro),23 +tapo,23 +tanya natdhipytadd,23 +tanna,23 +tanimeso,23 +tanabe ai,23 +tamu (tamurarucaffe1226),23 +tamazen,23 +tamayomi,23 +tamanotsuyu,23 +tamagoumauma,23 +tamachan (gam baru ger),23 +tama! (lazyturtle),23 +takumi (quilt),23 +tako otoko,23 +takka (aegis9294),23 +takigawa magister,23 +takeuchi shunsuke,23 +takemura makoto (hakushikei),23 +takata akira,23 +takano yuki (mangaka),23 +takani0721,23 +takane (lovehatsune),23 +takamine kiyomaro,23 +takamachi miyuki,23 +takahashi kazuki (style),23 +taiyou sentai sun vulcan,23 +taikoi7,23 +taguno,23 +taguchi sp,23 +tagalog text,23 +taffy (squeezable),23 +tachiuo nikominabe,23 +tachihara sayu,23 +tachibana yumeko,23 +tachibana ukyou,23 +tachibana tou,23 +tachakan,23 +taccomm,23 +tabmur,23 +tabarchie,23 +tabana,23 +syusyu 043,23 +synchronicity (vocaloid),23 +sylvia richter,23 +sword between thighs,23 +swain (legacy),23 +suzutsuki (suzutsuki hanetsuki adept!) (azur lane),23 +suzushiro (szsr),23 +suzunashi susumu,23 +suzuki yui,23 +suzuki address,23 +suzaku mikado,23 +suupuushan,23 +sutorea,23 +susukawa (susucawa),23 +susie (lord of the mysteries),23 +susan strong,23 +surume (surume 8738),23 +surprise deepthroat,23 +superman (cosplay),23 +superappleman,23 +suohachi,23 +sunway,23 +sunflower (tomodog422),23 +sunakawa mizuchi,23 +suna ko1,23 +sumxsum03,23 +sumomo (kumatanchi),23 +sumith,23 +sumisi,23 +sumi suya,23 +sumi (tkzk525),23 +sumbird,23 +sukoburu maeda,23 +suji,23 +sui (blackcatsui yoi),23 +sugita,23 +sugisaki yukiru,23 +sugatem! -sugarfull tempering-,23 +suga yoshihide,23 +sudare,23 +suchie-pai,23 +sturmjager,23 +stuffed,23 +student council president (suzumiya haruhi),23 +stray little devil,23 +stpen,23 +stonjourner,23 +stigmata,23 +stella (stella),23 +steelycross,23 +steel eel (splatoon),23 +steel chronicle,23 +starblame,23 +star trek the original series,23 +star guardian taliyah,23 +star guardian ezreal,23 +star color pendant,23 +stan (honkai impact),23 +staccato 2011,23 +st basil's cathedral,23 +st. cygnus,23 +squishy (pokemon),23 +splish,23 +spetsnaz,23 +sperm whale,23 +soushisouai note,23 +soukoku no regalia,23 +souken no cross age,23 +sougetsu saya,23 +soudayu,23 +sotie,23 +sorakuma (oycue41),23 +sora (pikasora),23 +sonoko neko,23 +sonic rush,23 +sonasan,23 +soma somari,23 +soldering,23 +snow20200,23 +sniping,23 +snake youkai (touhou),23 +snake man,23 +snake lady,23 +smokey brown,23 +smile pact,23 +slj,23 +slippy toad,23 +slime (minecraft),23 +skk,23 +skan srisuwan,23 +sizeaton,23 +sivil,23 +siva,23 +sister mermaid,23 +siori,23 +sion,23 +sinoda,23 +sin eater (ff14),23 +silver one-piece swimsuit,23 +silou b,23 +silicobra,23 +sig curtis,23 +siegfried (super cool biz) (fate),23 +siamese fighting fish,23 +si-ma,23 +shyh yue,23 +shure 55sh,23 +shun (artist),23 +shukufuku no kane no oto wa sakura-iro no kaze to tomo ni,23 +shuffle! essence+,23 +show chiku-by,23 +shoutmon,23 +shoukoku no altair,23 +shouko azuma,23 +shou zama,23 +shoryuki,23 +shokujin hatefukuchuu,23 +shoka,23 +shizuko (chipccchip),23 +shirojiro shiro,23 +shirogane no soleil,23 +shiro gisu,23 +shippaidayo,23 +ship interior,23 +shiori lee jeng,23 +shiono etorouji,23 +shinyafuru,23 +shinonome halltro,23 +shino (ten-m),23 +shingetsu ernesta fukami,23 +shinano (kancolle),23 +shina (sbk951121),23 +shin (world 3000),23 +shin'ichi (yokosuka220),23 +shimotsuki mika,23 +shimogamo yaichirou,23 +shimo (shimo 00),23 +shimesaba (masuraoburi),23 +shimejirou (000kk),23 +shimazaki setsuna,23 +shikkaku ishi,23 +shiisuu rattamu,23 +shiina maru,23 +shiina chieri,23 +shiijisu,23 +shigure (hot spring) (blue archive),23 +shignonpencil,23 +shigeru (rand),23 +shige (st-k),23 +shichi (tarakochan-medo),23 +shicewlysml,23 +shi xu jiu zhou,23 +sherry 0014,23 +shelling guardian (elsword),23 +sheena (gensou suikoden),23 +shaved body,23 +shaun hastings,23 +sharon les halles,23 +sharon apple,23 +shark bag,23 +shared pocket,23 +shaobao (sdhx3728),23 +shamrock (polynian),23 +shako (shaco flat),23 +sg550 (upotte!!),23 +sex through torn clothes,23 +setsuna (kaiyari),23 +seto ferb,23 +setia pradipta,23 +seshima rui,23 +serori,23 +serizawa kasumi,23 +sephira su,23 +seozo,23 +sento-kun,23 +sentinel (x-men),23 +sententia (senhariko),23 +sentaro207,23 +senpai-chan (beni shake),23 +seno lepo,23 +seno (nanrandesu),23 +senjugiku tabane,23 +senjou nanase,23 +sengoku koihime,23 +sengoku kakeru,23 +sendai (azur lane),23 +senatorwong,23 +sen no maken to tate no otome,23 \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Custom-Scripts/user/text_file_dirs.json b/custom_nodes/ComfyUI-Custom-Scripts/user/text_file_dirs.json new file mode 100644 index 0000000000000000000000000000000000000000..15adc2255e0109be7b860d591f54187520c673aa --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/user/text_file_dirs.json @@ -0,0 +1,5 @@ +{ + "input": "$input/**/*.txt", + "output": "$output/**/*.txt", + "temp": "$temp/**/*.txt" +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/canvas2svg.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/canvas2svg.js new file mode 100644 index 0000000000000000000000000000000000000000..03dc3923759ccbe8cf70eaab387b792cc0ba32ca --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/canvas2svg.js @@ -0,0 +1,1192 @@ +/*!! + * Canvas 2 Svg v1.0.19 + * A low level canvas to SVG converter. Uses a mock canvas context to build an SVG document. + * + * Licensed under the MIT license: + * http://www.opensource.org/licenses/mit-license.php + * + * Author: + * Kerry Liu + * + * Copyright (c) 2014 Gliffy Inc. + */ + +;(function() { + "use strict"; + + var STYLES, ctx, CanvasGradient, CanvasPattern, namedEntities; + + //helper function to format a string + function format(str, args) { + var keys = Object.keys(args), i; + for (i=0; i 1) { + options = defaultOptions; + options.width = arguments[0]; + options.height = arguments[1]; + } else if( !o ) { + options = defaultOptions; + } else { + options = o; + } + + if(!(this instanceof ctx)) { + //did someone call this without new? + return new ctx(options); + } + + //setup options + this.width = options.width || defaultOptions.width; + this.height = options.height || defaultOptions.height; + this.enableMirroring = options.enableMirroring !== undefined ? options.enableMirroring : defaultOptions.enableMirroring; + + this.canvas = this; ///point back to this instance! + this.__document = options.document || document; + this.__canvas = this.__document.createElement("canvas"); + this.__ctx = this.__canvas.getContext("2d"); + + this.__setDefaultStyles(); + this.__stack = [this.__getStyleState()]; + this.__groupStack = []; + + //the root svg element + this.__root = this.__document.createElementNS("http://www.w3.org/2000/svg", "svg"); + this.__root.setAttribute("version", 1.1); + this.__root.setAttribute("xmlns", "http://www.w3.org/2000/svg"); + this.__root.setAttributeNS("http://www.w3.org/2000/xmlns/", "xmlns:xlink", "http://www.w3.org/1999/xlink"); + this.__root.setAttribute("width", this.width); + this.__root.setAttribute("height", this.height); + + //make sure we don't generate the same ids in defs + this.__ids = {}; + + //defs tag + this.__defs = this.__document.createElementNS("http://www.w3.org/2000/svg", "defs"); + this.__root.appendChild(this.__defs); + + //also add a group child. the svg element can't use the transform attribute + this.__currentElement = this.__document.createElementNS("http://www.w3.org/2000/svg", "g"); + this.__root.appendChild(this.__currentElement); + }; + + + /** + * Creates the specified svg element + * @private + */ + ctx.prototype.__createElement = function (elementName, properties, resetFill) { + if (typeof properties === "undefined") { + properties = {}; + } + + var element = this.__document.createElementNS("http://www.w3.org/2000/svg", elementName), + keys = Object.keys(properties), i, key; + if(resetFill) { + //if fill or stroke is not specified, the svg element should not display. By default SVG's fill is black. + element.setAttribute("fill", "none"); + element.setAttribute("stroke", "none"); + } + for(i=0; i 0) { + var group = this.__createElement("g"); + parent.appendChild(group); + this.__currentElement = group; + } + + var transform = this.__currentElement.getAttribute("transform"); + if(transform) { + transform += " "; + } else { + transform = ""; + } + transform += t; + this.__currentElement.setAttribute("transform", transform); + }; + + /** + * scales the current element + */ + ctx.prototype.scale = function(x, y) { + if(y === undefined) { + y = x; + } + this.__addTransform(format("scale({x},{y})", {x:x, y:y})); + }; + + /** + * rotates the current element + */ + ctx.prototype.rotate = function(angle){ + var degrees = (angle * 180 / Math.PI); + this.__addTransform(format("rotate({angle},{cx},{cy})", {angle:degrees, cx:0, cy:0})); + }; + + /** + * translates the current element + */ + ctx.prototype.translate = function(x, y){ + this.__addTransform(format("translate({x},{y})", {x:x,y:y})); + }; + + /** + * applies a transform to the current element + */ + ctx.prototype.transform = function(a, b, c, d, e, f){ + this.__addTransform(format("matrix({a},{b},{c},{d},{e},{f})", {a:a, b:b, c:c, d:d, e:e, f:f})); + }; + + /** + * Create a new Path Element + */ + ctx.prototype.beginPath = function(){ + var path, parent; + + // Note that there is only one current default path, it is not part of the drawing state. + // See also: https://html.spec.whatwg.org/multipage/scripting.html#current-default-path + this.__currentDefaultPath = ""; + this.__currentPosition = {}; + + path = this.__createElement("path", {}, true); + parent = this.__closestGroupOrSvg(); + parent.appendChild(path); + this.__currentElement = path; + }; + + /** + * Helper function to apply currentDefaultPath to current path element + * @private + */ + ctx.prototype.__applyCurrentDefaultPath = function() { + if(this.__currentElement.nodeName === "path") { + var d = this.__currentDefaultPath; + this.__currentElement.setAttribute("d", d); + } else { + throw new Error("Attempted to apply path command to node " + this.__currentElement.nodeName); + } + }; + + /** + * Helper function to add path command + * @private + */ + ctx.prototype.__addPathCommand = function(command){ + this.__currentDefaultPath += " "; + this.__currentDefaultPath += command; + }; + + /** + * Adds the move command to the current path element, + * if the currentPathElement is not empty create a new path element + */ + ctx.prototype.moveTo = function(x,y){ + if(this.__currentElement.nodeName !== "path") { + this.beginPath(); + } + + // creates a new subpath with the given point + this.__currentPosition = {x: x, y: y}; + this.__addPathCommand(format("M {x} {y}", {x:x, y:y})); + }; + + /** + * Closes the current path + */ + ctx.prototype.closePath = function(){ + this.__addPathCommand("Z"); + }; + + /** + * Adds a line to command + */ + ctx.prototype.lineTo = function(x, y){ + this.__currentPosition = {x: x, y: y}; + if (this.__currentDefaultPath.indexOf('M') > -1) { + this.__addPathCommand(format("L {x} {y}", {x:x, y:y})); + } else { + this.__addPathCommand(format("M {x} {y}", {x:x, y:y})); + } + }; + + /** + * Add a bezier command + */ + ctx.prototype.bezierCurveTo = function(cp1x, cp1y, cp2x, cp2y, x, y) { + this.__currentPosition = {x: x, y: y}; + this.__addPathCommand(format("C {cp1x} {cp1y} {cp2x} {cp2y} {x} {y}", + {cp1x:cp1x, cp1y:cp1y, cp2x:cp2x, cp2y:cp2y, x:x, y:y})); + }; + + /** + * Adds a quadratic curve to command + */ + ctx.prototype.quadraticCurveTo = function(cpx, cpy, x, y){ + this.__currentPosition = {x: x, y: y}; + this.__addPathCommand(format("Q {cpx} {cpy} {x} {y}", {cpx:cpx, cpy:cpy, x:x, y:y})); + }; + + + /** + * Return a new normalized vector of given vector + */ + var normalize = function(vector) { + var len = Math.sqrt(vector[0] * vector[0] + vector[1] * vector[1]); + return [vector[0] / len, vector[1] / len]; + }; + + /** + * Adds the arcTo to the current path + * + * @see http://www.w3.org/TR/2015/WD-2dcontext-20150514/#dom-context-2d-arcto + */ + ctx.prototype.arcTo = function(x1, y1, x2, y2, radius) { + // Let the point (x0, y0) be the last point in the subpath. + var x0 = this.__currentPosition && this.__currentPosition.x; + var y0 = this.__currentPosition && this.__currentPosition.y; + + // First ensure there is a subpath for (x1, y1). + if (typeof x0 == "undefined" || typeof y0 == "undefined") { + return; + } + + // Negative values for radius must cause the implementation to throw an IndexSizeError exception. + if (radius < 0) { + throw new Error("IndexSizeError: The radius provided (" + radius + ") is negative."); + } + + // If the point (x0, y0) is equal to the point (x1, y1), + // or if the point (x1, y1) is equal to the point (x2, y2), + // or if the radius radius is zero, + // then the method must add the point (x1, y1) to the subpath, + // and connect that point to the previous point (x0, y0) by a straight line. + if (((x0 === x1) && (y0 === y1)) + || ((x1 === x2) && (y1 === y2)) + || (radius === 0)) { + this.lineTo(x1, y1); + return; + } + + // Otherwise, if the points (x0, y0), (x1, y1), and (x2, y2) all lie on a single straight line, + // then the method must add the point (x1, y1) to the subpath, + // and connect that point to the previous point (x0, y0) by a straight line. + var unit_vec_p1_p0 = normalize([x0 - x1, y0 - y1]); + var unit_vec_p1_p2 = normalize([x2 - x1, y2 - y1]); + if (unit_vec_p1_p0[0] * unit_vec_p1_p2[1] === unit_vec_p1_p0[1] * unit_vec_p1_p2[0]) { + this.lineTo(x1, y1); + return; + } + + // Otherwise, let The Arc be the shortest arc given by circumference of the circle that has radius radius, + // and that has one point tangent to the half-infinite line that crosses the point (x0, y0) and ends at the point (x1, y1), + // and that has a different point tangent to the half-infinite line that ends at the point (x1, y1), and crosses the point (x2, y2). + // The points at which this circle touches these two lines are called the start and end tangent points respectively. + + // note that both vectors are unit vectors, so the length is 1 + var cos = (unit_vec_p1_p0[0] * unit_vec_p1_p2[0] + unit_vec_p1_p0[1] * unit_vec_p1_p2[1]); + var theta = Math.acos(Math.abs(cos)); + + // Calculate origin + var unit_vec_p1_origin = normalize([ + unit_vec_p1_p0[0] + unit_vec_p1_p2[0], + unit_vec_p1_p0[1] + unit_vec_p1_p2[1] + ]); + var len_p1_origin = radius / Math.sin(theta / 2); + var x = x1 + len_p1_origin * unit_vec_p1_origin[0]; + var y = y1 + len_p1_origin * unit_vec_p1_origin[1]; + + // Calculate start angle and end angle + // rotate 90deg clockwise (note that y axis points to its down) + var unit_vec_origin_start_tangent = [ + -unit_vec_p1_p0[1], + unit_vec_p1_p0[0] + ]; + // rotate 90deg counter clockwise (note that y axis points to its down) + var unit_vec_origin_end_tangent = [ + unit_vec_p1_p2[1], + -unit_vec_p1_p2[0] + ]; + var getAngle = function(vector) { + // get angle (clockwise) between vector and (1, 0) + var x = vector[0]; + var y = vector[1]; + if (y >= 0) { // note that y axis points to its down + return Math.acos(x); + } else { + return -Math.acos(x); + } + }; + var startAngle = getAngle(unit_vec_origin_start_tangent); + var endAngle = getAngle(unit_vec_origin_end_tangent); + + // Connect the point (x0, y0) to the start tangent point by a straight line + this.lineTo(x + unit_vec_origin_start_tangent[0] * radius, + y + unit_vec_origin_start_tangent[1] * radius); + + // Connect the start tangent point to the end tangent point by arc + // and adding the end tangent point to the subpath. + this.arc(x, y, radius, startAngle, endAngle); + }; + + /** + * Sets the stroke property on the current element + */ + ctx.prototype.stroke = function(){ + if(this.__currentElement.nodeName === "path") { + this.__currentElement.setAttribute("paint-order", "fill stroke markers"); + } + this.__applyCurrentDefaultPath(); + this.__applyStyleToCurrentElement("stroke"); + }; + + /** + * Sets fill properties on the current element + */ + ctx.prototype.fill = function(){ + if(this.__currentElement.nodeName === "path") { + this.__currentElement.setAttribute("paint-order", "stroke fill markers"); + } + this.__applyCurrentDefaultPath(); + this.__applyStyleToCurrentElement("fill"); + }; + + /** + * Adds a rectangle to the path. + */ + ctx.prototype.rect = function(x, y, width, height){ + if(this.__currentElement.nodeName !== "path") { + this.beginPath(); + } + this.moveTo(x, y); + this.lineTo(x+width, y); + this.lineTo(x+width, y+height); + this.lineTo(x, y+height); + this.lineTo(x, y); + this.closePath(); + }; + + + /** + * adds a rectangle element + */ + ctx.prototype.fillRect = function(x, y, width, height){ + var rect, parent; + rect = this.__createElement("rect", { + x : x, + y : y, + width : width, + height : height + }, true); + parent = this.__closestGroupOrSvg(); + parent.appendChild(rect); + this.__currentElement = rect; + this.__applyStyleToCurrentElement("fill"); + }; + + /** + * Draws a rectangle with no fill + * @param x + * @param y + * @param width + * @param height + */ + ctx.prototype.strokeRect = function(x, y, width, height){ + var rect, parent; + rect = this.__createElement("rect", { + x : x, + y : y, + width : width, + height : height + }, true); + parent = this.__closestGroupOrSvg(); + parent.appendChild(rect); + this.__currentElement = rect; + this.__applyStyleToCurrentElement("stroke"); + }; + + + /** + * Clear entire canvas: + * 1. save current transforms + * 2. remove all the childNodes of the root g element + */ + ctx.prototype.__clearCanvas = function() { + var current = this.__closestGroupOrSvg(), + transform = current.getAttribute("transform"); + var rootGroup = this.__root.childNodes[1]; + var childNodes = rootGroup.childNodes; + for (var i = childNodes.length - 1; i >= 0; i--) { + if (childNodes[i]) { + rootGroup.removeChild(childNodes[i]); + } + } + this.__currentElement = rootGroup; + //reset __groupStack as all the child group nodes are all removed. + this.__groupStack = []; + if (transform) { + this.__addTransform(transform); + } + }; + + /** + * "Clears" a canvas by just drawing a white rectangle in the current group. + */ + ctx.prototype.clearRect = function(x, y, width, height) { + //clear entire canvas + if (x === 0 && y === 0 && width === this.width && height === this.height) { + this.__clearCanvas(); + return; + } + var rect, parent = this.__closestGroupOrSvg(); + rect = this.__createElement("rect", { + x : x, + y : y, + width : width, + height : height, + fill : "#FFFFFF" + }, true); + parent.appendChild(rect); + }; + + /** + * Adds a linear gradient to a defs tag. + * Returns a canvas gradient object that has a reference to it's parent def + */ + ctx.prototype.createLinearGradient = function(x1, y1, x2, y2){ + var grad = this.__createElement("linearGradient", { + id : randomString(this.__ids), + x1 : x1+"px", + x2 : x2+"px", + y1 : y1+"px", + y2 : y2+"px", + "gradientUnits" : "userSpaceOnUse" + }, false); + this.__defs.appendChild(grad); + return new CanvasGradient(grad, this); + }; + + /** + * Adds a radial gradient to a defs tag. + * Returns a canvas gradient object that has a reference to it's parent def + */ + ctx.prototype.createRadialGradient = function(x0, y0, r0, x1, y1, r1){ + var grad = this.__createElement("radialGradient", { + id : randomString(this.__ids), + cx : x1+"px", + cy : y1+"px", + r : r1+"px", + fx : x0+"px", + fy : y0+"px", + "gradientUnits" : "userSpaceOnUse" + }, false); + this.__defs.appendChild(grad); + return new CanvasGradient(grad, this); + + }; + + /** + * Parses the font string and returns svg mapping + * @private + */ + ctx.prototype.__parseFont = function() { + var regex = /^\s*(?=(?:(?:[-a-z]+\s*){0,2}(italic|oblique))?)(?=(?:(?:[-a-z]+\s*){0,2}(small-caps))?)(?=(?:(?:[-a-z]+\s*){0,2}(bold(?:er)?|lighter|[1-9]00))?)(?:(?:normal|\1|\2|\3)\s*){0,3}((?:xx?-)?(?:small|large)|medium|smaller|larger|[.\d]+(?:\%|in|[cem]m|ex|p[ctx]))(?:\s*\/\s*(normal|[.\d]+(?:\%|in|[cem]m|ex|p[ctx])))?\s*([-,\'\"\sa-z]+?)\s*$/i; + var fontPart = regex.exec( this.font ); + var data = { + style : fontPart[1] || 'normal', + size : fontPart[4] || '10px', + family : fontPart[6] || 'sans-serif', + weight: fontPart[3] || 'normal', + decoration : fontPart[2] || 'normal', + href : null + }; + + //canvas doesn't support underline natively, but we can pass this attribute + if(this.__fontUnderline === "underline") { + data.decoration = "underline"; + } + + //canvas also doesn't support linking, but we can pass this as well + if(this.__fontHref) { + data.href = this.__fontHref; + } + + return data; + }; + + /** + * Helper to link text fragments + * @param font + * @param element + * @return {*} + * @private + */ + ctx.prototype.__wrapTextLink = function(font, element) { + if(font.href) { + var a = this.__createElement("a"); + a.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", font.href); + a.appendChild(element); + return a; + } + return element; + }; + + /** + * Fills or strokes text + * @param text + * @param x + * @param y + * @param action - stroke or fill + * @private + */ + ctx.prototype.__applyText = function(text, x, y, action) { + var font = this.__parseFont(), + parent = this.__closestGroupOrSvg(), + textElement = this.__createElement("text", { + "font-family" : font.family, + "font-size" : font.size, + "font-style" : font.style, + "font-weight" : font.weight, + "text-decoration" : font.decoration, + "x" : x, + "y" : y, + "text-anchor": getTextAnchor(this.textAlign), + "dominant-baseline": getDominantBaseline(this.textBaseline) + }, true); + + textElement.appendChild(this.__document.createTextNode(text)); + this.__currentElement = textElement; + this.__applyStyleToCurrentElement(action); + parent.appendChild(this.__wrapTextLink(font,textElement)); + }; + + /** + * Creates a text element + * @param text + * @param x + * @param y + */ + ctx.prototype.fillText = function(text, x, y){ + this.__applyText(text, x, y, "fill"); + }; + + /** + * Strokes text + * @param text + * @param x + * @param y + */ + ctx.prototype.strokeText = function(text, x, y){ + this.__applyText(text, x, y, "stroke"); + }; + + /** + * No need to implement this for svg. + * @param text + * @return {TextMetrics} + */ + ctx.prototype.measureText = function(text){ + this.__ctx.font = this.font; + return this.__ctx.measureText(text); + }; + + /** + * Arc command! + */ + ctx.prototype.arc = function(x, y, radius, startAngle, endAngle, counterClockwise) { + // in canvas no circle is drawn if no angle is provided. + if (startAngle === endAngle) { + return; + } + startAngle = startAngle % (2*Math.PI); + endAngle = endAngle % (2*Math.PI); + if(startAngle === endAngle) { + //circle time! subtract some of the angle so svg is happy (svg elliptical arc can't draw a full circle) + endAngle = ((endAngle + (2*Math.PI)) - 0.001 * (counterClockwise ? -1 : 1)) % (2*Math.PI); + } + var endX = x+radius*Math.cos(endAngle), + endY = y+radius*Math.sin(endAngle), + startX = x+radius*Math.cos(startAngle), + startY = y+radius*Math.sin(startAngle), + sweepFlag = counterClockwise ? 0 : 1, + largeArcFlag = 0, + diff = endAngle - startAngle; + + // https://github.com/gliffy/canvas2svg/issues/4 + if(diff < 0) { + diff += 2*Math.PI; + } + + if(counterClockwise) { + largeArcFlag = diff > Math.PI ? 0 : 1; + } else { + largeArcFlag = diff > Math.PI ? 1 : 0; + } + + this.lineTo(startX, startY); + this.__addPathCommand(format("A {rx} {ry} {xAxisRotation} {largeArcFlag} {sweepFlag} {endX} {endY}", + {rx:radius, ry:radius, xAxisRotation:0, largeArcFlag:largeArcFlag, sweepFlag:sweepFlag, endX:endX, endY:endY})); + + this.__currentPosition = {x: endX, y: endY}; + }; + + /** + * Generates a ClipPath from the clip command. + */ + ctx.prototype.clip = function(){ + var group = this.__closestGroupOrSvg(), + clipPath = this.__createElement("clipPath"), + id = randomString(this.__ids), + newGroup = this.__createElement("g"); + + this.__applyCurrentDefaultPath(); + group.removeChild(this.__currentElement); + clipPath.setAttribute("id", id); + clipPath.appendChild(this.__currentElement); + + this.__defs.appendChild(clipPath); + + //set the clip path to this group + group.setAttribute("clip-path", format("url(#{id})", {id:id})); + + //clip paths can be scaled and transformed, we need to add another wrapper group to avoid later transformations + // to this path + group.appendChild(newGroup); + + this.__currentElement = newGroup; + + }; + + /** + * Draws a canvas, image or mock context to this canvas. + * Note that all svg dom manipulation uses node.childNodes rather than node.children for IE support. + * http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html#dom-context-2d-drawimage + */ + ctx.prototype.drawImage = function(){ + //convert arguments to a real array + var args = Array.prototype.slice.call(arguments), + image=args[0], + dx, dy, dw, dh, sx=0, sy=0, sw, sh, parent, svg, defs, group, + currentElement, svgImage, canvas, context, id; + + if(args.length === 3) { + dx = args[1]; + dy = args[2]; + sw = image.width; + sh = image.height; + dw = sw; + dh = sh; + } else if(args.length === 5) { + dx = args[1]; + dy = args[2]; + dw = args[3]; + dh = args[4]; + sw = image.width; + sh = image.height; + } else if(args.length === 9) { + sx = args[1]; + sy = args[2]; + sw = args[3]; + sh = args[4]; + dx = args[5]; + dy = args[6]; + dw = args[7]; + dh = args[8]; + } else { + throw new Error("Inavlid number of arguments passed to drawImage: " + arguments.length); + } + + parent = this.__closestGroupOrSvg(); + currentElement = this.__currentElement; + var translateDirective = "translate(" + dx + ", " + dy + ")"; + if(image instanceof ctx) { + //canvas2svg mock canvas context. In the future we may want to clone nodes instead. + //also I'm currently ignoring dw, dh, sw, sh, sx, sy for a mock context. + svg = image.getSvg().cloneNode(true); + if (svg.childNodes && svg.childNodes.length > 1) { + defs = svg.childNodes[0]; + while(defs.childNodes.length) { + id = defs.childNodes[0].getAttribute("id"); + this.__ids[id] = id; + this.__defs.appendChild(defs.childNodes[0]); + } + group = svg.childNodes[1]; + if (group) { + //save original transform + var originTransform = group.getAttribute("transform"); + var transformDirective; + if (originTransform) { + transformDirective = originTransform+" "+translateDirective; + } else { + transformDirective = translateDirective; + } + group.setAttribute("transform", transformDirective); + parent.appendChild(group); + } + } + } else if(image.nodeName === "CANVAS" || image.nodeName === "IMG") { + //canvas or image + svgImage = this.__createElement("image"); + svgImage.setAttribute("width", dw); + svgImage.setAttribute("height", dh); + svgImage.setAttribute("preserveAspectRatio", "none"); + + if(sx || sy || sw !== image.width || sh !== image.height) { + //crop the image using a temporary canvas + canvas = this.__document.createElement("canvas"); + canvas.width = dw; + canvas.height = dh; + context = canvas.getContext("2d"); + context.drawImage(image, sx, sy, sw, sh, 0, 0, dw, dh); + image = canvas; + } + svgImage.setAttribute("transform", translateDirective); + svgImage.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", + image.nodeName === "CANVAS" ? image.toDataURL() : image.getAttribute("src")); + parent.appendChild(svgImage); + } + }; + + /** + * Generates a pattern tag + */ + ctx.prototype.createPattern = function(image, repetition){ + var pattern = this.__document.createElementNS("http://www.w3.org/2000/svg", "pattern"), id = randomString(this.__ids), + img; + pattern.setAttribute("id", id); + pattern.setAttribute("width", image.width); + pattern.setAttribute("height", image.height); + if(image.nodeName === "CANVAS" || image.nodeName === "IMG") { + img = this.__document.createElementNS("http://www.w3.org/2000/svg", "image"); + img.setAttribute("width", image.width); + img.setAttribute("height", image.height); + img.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", + image.nodeName === "CANVAS" ? image.toDataURL() : image.getAttribute("src")); + pattern.appendChild(img); + this.__defs.appendChild(pattern); + } else if(image instanceof ctx) { + pattern.appendChild(image.__root.childNodes[1]); + this.__defs.appendChild(pattern); + } + return new CanvasPattern(pattern, this); + }; + + ctx.prototype.setLineDash = function(dashArray) { + if (dashArray && dashArray.length > 0) { + this.lineDash = dashArray.join(","); + } else { + this.lineDash = null; + } + }; + + /** + * Not yet implemented + */ + ctx.prototype.drawFocusRing = function(){}; + ctx.prototype.createImageData = function(){}; + ctx.prototype.getImageData = function(){}; + ctx.prototype.putImageData = function(){}; + ctx.prototype.globalCompositeOperation = function(){}; + ctx.prototype.setTransform = function(){}; + + //add options for alternative namespace + if (typeof window === "object") { + window.C2S = ctx; + } + + // CommonJS/Browserify + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = ctx; + } + +}()); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon-active.ico b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon-active.ico new file mode 100644 index 0000000000000000000000000000000000000000..64045ab56e87879adb039b1fdb0bbbe0462143e7 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon-active.ico differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon.ico b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..08df2481551bb6903735fa69d658b5abfb0a5ae1 Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/favicon.ico differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/notify.mp3 b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/notify.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5e3fdabbb12142dfa75702a0b0e0ca5e5425a7dd Binary files /dev/null and b/custom_nodes/ComfyUI-Custom-Scripts/web/js/assets/notify.mp3 differ diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/autocompleter.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/autocompleter.js new file mode 100644 index 0000000000000000000000000000000000000000..46ad35f1e3e60aca155278d8c80a74618cf5652a --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/autocompleter.js @@ -0,0 +1,404 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; +import { api } from "../../../../scripts/api.js"; +import { $el, ComfyDialog } from "../../../../scripts/ui.js"; +import { TextAreaAutoComplete } from "./common/autocomplete.js"; +import { ModelInfoDialog } from "./common/modelInfoDialog.js"; + +function parseCSV(csvText) { + const rows = []; + const delimiter = ","; + const quote = '"'; + let currentField = ""; + let inQuotedField = false; + + function pushField() { + rows[rows.length - 1].push(currentField); + currentField = ""; + inQuotedField = false; + } + + rows.push([]); // Initialize the first row + + for (let i = 0; i < csvText.length; i++) { + const char = csvText[i]; + const nextChar = csvText[i + 1]; + + if (!inQuotedField) { + if (char === quote) { + inQuotedField = true; + } else if (char === delimiter) { + pushField(); + } else if (char === "\r" || char === "\n" || i === csvText.length - 1) { + pushField(); + if (nextChar === "\n") { + i++; // Handle Windows line endings (\r\n) + } + rows.push([]); // Start a new row + } else { + currentField += char; + } + } else { + if (char === quote && nextChar === quote) { + currentField += quote; + i++; // Skip the next quote + } else if (char === quote) { + inQuotedField = false; + } else { + currentField += char; + } + } + } + + if (currentField || csvText[csvText.length - 1] === ",") { + pushField(); + } + + // Remove the last row if it's empty + if (rows[rows.length - 1].length === 0) { + rows.pop(); + } + + return rows; +} + +async function getCustomWords() { + const resp = await api.fetchApi("/pysssss/autocomplete", { cache: "no-store" }); + if (resp.status === 200) { + return await resp.text(); + } + return undefined; +} + +async function addCustomWords(text) { + if (!text) { + text = await getCustomWords(); + } + if (text) { + TextAreaAutoComplete.updateWords( + "pysssss.customwords", + parseCSV(text).reduce((p, n) => { + let text; + let priority; + let value; + let num; + switch (n.length) { + case 0: + return; + case 1: + // Single word + text = n[0]; + break; + case 2: + // Word,[priority|alias] + num = +n[1]; + if (isNaN(num)) { + text = n[1]; + value = n[0]; + } else { + text = n[0]; + priority = num; + } + break; + case 4: + // a1111 csv format? + value = n[0]; + priority = +n[2]; + const aliases = n[3]; + if (aliases) { + const split = aliases.split(","); + for (const text of split) { + p[text] = { text, priority, value }; + } + } + text = value; + default: + // Word,alias,priority + text = n[1]; + value = n[0]; + priority = +n[2]; + break; + } + p[text] = { text, priority, value }; + return p; + }, {}) + ); + } +} + +class EmbeddingInfoDialog extends ModelInfoDialog { + async addInfo() { + super.addInfo(); + const info = await this.addCivitaiInfo(); + if (info) { + $el("div", { + parent: this.content, + innerHTML: info.description, + style: { + maxHeight: "250px", + overflow: "auto", + }, + }); + } + } +} + +class CustomWordsDialog extends ComfyDialog { + async show() { + const text = await getCustomWords(); + this.words = $el("textarea", { + textContent: text, + style: { + width: "70vw", + height: "70vh", + }, + }); + + const input = $el("input", { + style: { + flex: "auto", + }, + value: + "https://gist.githubusercontent.com/pythongosssss/1d3efa6050356a08cea975183088159a/raw/a18fb2f94f9156cf4476b0c24a09544d6c0baec6/danbooru-tags.txt", + }); + + super.show( + $el( + "div", + { + style: { + display: "flex", + flexDirection: "column", + overflow: "hidden", + maxHeight: "100%", + }, + }, + [ + $el("h2", { + textContent: "Custom Autocomplete Words", + style: { + color: "#fff", + marginTop: 0, + textAlign: "center", + fontFamily: "sans-serif", + }, + }), + $el( + "div", + { + style: { + color: "#fff", + fontFamily: "sans-serif", + display: "flex", + alignItems: "center", + gap: "5px", + }, + }, + [ + $el("label", { textContent: "Load Custom List: " }), + input, + $el("button", { + textContent: "Load", + onclick: async () => { + try { + const res = await fetch(input.value); + if (res.status !== 200) { + throw new Error("Error loading: " + res.status + " " + res.statusText); + } + this.words.value = await res.text(); + } catch (error) { + alert("Error loading custom list, try manually copy + pasting the list"); + } + }, + }), + ] + ), + this.words, + ] + ) + ); + } + + createButtons() { + const btns = super.createButtons(); + const save = $el("button", { + type: "button", + textContent: "Save", + onclick: async (e) => { + try { + const res = await api.fetchApi("/pysssss/autocomplete", { method: "POST", body: this.words.value }); + if (res.status !== 200) { + throw new Error("Error saving: " + res.status + " " + res.statusText); + } + save.textContent = "Saved!"; + addCustomWords(this.words.value); + setTimeout(() => { + save.textContent = "Save"; + }, 500); + } catch (error) { + alert("Error saving word list!"); + console.error(error); + } + }, + }); + + btns.unshift(save); + return btns; + } +} + +const id = "pysssss.AutoCompleter"; + +app.registerExtension({ + name: id, + init() { + async function addEmbeddings() { + const embeddings = await api.getEmbeddings(); + const words = {}; + words["embedding:"] = { text: "embedding:" }; + + for (const emb of embeddings) { + const v = `embedding:${emb}`; + words[v] = { + text: v, + info: () => new EmbeddingInfoDialog(emb).show("embeddings", emb), + }; + } + + TextAreaAutoComplete.updateWords("pysssss.embeddings", words); + } + + Promise.all([addEmbeddings(), addCustomWords()]); + + const STRING = ComfyWidgets.STRING; + const SKIP_WIDGETS = new Set(["ttN xyPlot.x_values", "ttN xyPlot.y_values"]); + ComfyWidgets.STRING = function (node, inputName, inputData) { + const r = STRING.apply(this, arguments); + + if (inputData[1]?.multiline) { + // Disabled on this input + const config = inputData[1]?.["pysssss.autocomplete"]; + if (config === false) return r; + + // In list of widgets to skip + const id = `${node.comfyClass}.${inputName}`; + if (SKIP_WIDGETS.has(id)) return r; + + let words; + let separator; + if (typeof config === "object") { + separator = config.separator; + words = {}; + if (config.words) { + // Custom wordlist, this will have been registered on setup + Object.assign(words, TextAreaAutoComplete.groups[node.comfyClass + "." + inputName] ?? {}); + } + + for (const item of config.groups ?? []) { + if (item === "*") { + // This widget wants all global words included + Object.assign(words, TextAreaAutoComplete.globalWords); + } else { + // This widget wants a specific group included + Object.assign(words, TextAreaAutoComplete.groups[item] ?? {}); + } + } + } + + new TextAreaAutoComplete(r.widget.inputEl, words, separator); + } + + return r; + }; + + TextAreaAutoComplete.globalSeparator = localStorage.getItem(id + ".AutoSeparate") ?? ", "; + app.ui.settings.addSetting({ + id, + name: "🐍 Text Autocomplete", + defaultValue: true, + type: (name, setter, value) => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: name, + }), + ]), + $el("td", [ + $el( + "label", + { + textContent: "Enabled ", + style: { + display: "block", + }, + }, + [ + $el("input", { + id: id.replaceAll(".", "-"), + type: "checkbox", + checked: value, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.enabled = checked; + setter(checked); + }, + }), + ] + ), + $el( + "label", + { + textContent: "Auto-insert comma ", + style: { + display: "block", + }, + }, + [ + $el("input", { + id: id.replaceAll(".", "-"), + type: "checkbox", + checked: !!TextAreaAutoComplete.globalSeparator, + onchange: (event) => { + const checked = !!event.target.checked; + TextAreaAutoComplete.globalSeparator = checked ? ", " : ""; + localStorage.setItem(id + ".AutoSeparate", TextAreaAutoComplete.globalSeparator); + }, + }), + ] + ), + $el("button", { + textContent: "Manage Custom Words", + onclick: () => { + app.ui.settings.element.close(); + new CustomWordsDialog().show(); + }, + style: { + fontSize: "14px", + display: "block", + marginTop: "5px", + }, + }), + ]), + ]); + }, + }); + }, + beforeRegisterNodeDef(_, def) { + // Process each input to see if there is a custom word list for + // { input: { required: { something: ["STRING", { "pysssss.autocomplete": ["groupid", ["custom", "words"] ] }] } } } + const inputs = { ...def.input?.required, ...def.input?.optional }; + for (const input in inputs) { + const config = inputs[input][1]?.["pysssss.autocomplete"]; + if (!config) continue; + if (typeof config === "object" && config.words) { + const words = {}; + for (const text of config.words || []) { + const obj = typeof text === "string" ? { text } : text; + words[obj.text] = obj; + } + TextAreaAutoComplete.updateWords(def.name + "." + input, words, false); + } + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/betterCombos.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/betterCombos.js new file mode 100644 index 0000000000000000000000000000000000000000..bf3690d9fc71c8a401723528ec32811d3e5c0e87 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/betterCombos.js @@ -0,0 +1,319 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; +import { $el } from "../../../scripts/ui.js"; +import { api } from "../../../scripts/api.js"; + +const CHECKPOINT_LOADER = "CheckpointLoader|pysssss"; +const LORA_LOADER = "LoraLoader|pysssss"; + +function getType(node) { + if (node.comfyClass === CHECKPOINT_LOADER) { + return "checkpoints"; + } + return "loras"; +} + +app.registerExtension({ + name: "pysssss.Combo++", + init() { + $el("style", { + textContent: ` + .litemenu-entry:hover .pysssss-combo-image { + display: block; + } + .pysssss-combo-image { + display: none; + position: absolute; + left: 0; + top: 0; + transform: translate(-100%, 0); + width: 256px; + height: 256px; + background-size: cover; + background-position: center; + filter: brightness(65%); + } + `, + parent: document.body, + }); + + const submenuSetting = app.ui.settings.addSetting({ + id: "pysssss.Combo++.Submenu", + name: "🐍 Enable submenu in custom nodes", + defaultValue: true, + type: "boolean", + }); + + // Ensure hook callbacks are available + const getOrSet = (target, name, create) => { + if (name in target) return target[name]; + return (target[name] = create()); + }; + const symbol = getOrSet(window, "__pysssss__", () => Symbol("__pysssss__")); + const store = getOrSet(window, symbol, () => ({})); + const contextMenuHook = getOrSet(store, "contextMenuHook", () => ({})); + for (const e of ["ctor", "preAddItem", "addItem"]) { + if (!contextMenuHook[e]) { + contextMenuHook[e] = []; + } + } + // // Checks if this is a custom combo item + const isCustomItem = (value) => value && typeof value === "object" && "image" in value && value.content; + // Simple check for what separator to split by + const splitBy = (navigator.platform || navigator.userAgent).includes("Win") ? /\/|\\/ : /\//; + + contextMenuHook["ctor"].push(function (values, options) { + // Copy the class from the parent so if we are dark we are also dark + // this enables the filter box + if (options.parentMenu?.options?.className === "dark") { + options.className = "dark"; + } + }); + + // After an element is created for an item, add an image if it has one + contextMenuHook["addItem"].push(function (el, menu, [name, value, options]) { + if (el && isCustomItem(value) && value?.image && !value.submenu) { + el.textContent += " *"; + $el("div.pysssss-combo-image", { + parent: el, + style: { + backgroundImage: `url(/pysssss/view/${encodeURIComponent(value.image)})`, + }, + }); + } + }); + + function buildMenu(widget, values) { + const lookup = { + "": { options: [] }, + }; + + // Split paths into menu structure + for (const value of values) { + const split = value.content.split(splitBy); + let path = ""; + for (let i = 0; i < split.length; i++) { + const s = split[i]; + const last = i === split.length - 1; + if (last) { + // Leaf node, manually add handler that sets the lora + lookup[path].options.push({ + ...value, + title: s, + callback: () => { + widget.value = value; + widget.callback(value); + app.graph.setDirtyCanvas(true); + }, + }); + } else { + const prevPath = path; + path += s + splitBy; + if (!lookup[path]) { + const sub = { + title: s, + submenu: { + options: [], + title: s, + }, + }; + + // Add to tree + lookup[path] = sub.submenu; + lookup[prevPath].options.push(sub); + } + } + } + } + + return lookup[""].options; + } + + // Override COMBO widgets to patch their values + const combo = ComfyWidgets["COMBO"]; + ComfyWidgets["COMBO"] = function (node, inputName, inputData) { + const type = inputData[0]; + const res = combo.apply(this, arguments); + if (isCustomItem(type[0])) { + let value = res.widget.value; + let values = res.widget.options.values; + let menu = null; + + // Override the option values to check if we should render a menu structure + Object.defineProperty(res.widget.options, "values", { + get() { + if (submenuSetting.value) { + if (!menu) { + // Only build the menu once + menu = buildMenu(res.widget, values); + } + return menu; + } + return values; + }, + set(v) { + // Options are changing (refresh) so reset the menu so it can be rebuilt if required + values = v; + menu = null; + }, + }); + + Object.defineProperty(res.widget, "value", { + get() { + // HACK: litegraph supports rendering items with "content" in the menu, but not on the widget + // This detects when its being called by the widget drawing and just returns the text + // Also uses the content for the same image replacement value + if (res.widget) { + const stack = new Error().stack; + if (stack.includes("drawNodeWidgets") || stack.includes("saveImageExtraOutput")) { + return (value || type[0]).content; + } + } + return value; + }, + set(v) { + if (v?.submenu) { + // Dont allow selection of submenus + return; + } + value = v; + }, + }); + } + + return res; + }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + const isCkpt = nodeType.comfyClass === CHECKPOINT_LOADER; + const isLora = nodeType.comfyClass === LORA_LOADER; + if (isCkpt || isLora) { + const onAdded = nodeType.prototype.onAdded; + nodeType.prototype.onAdded = function () { + onAdded?.apply(this, arguments); + const { widget: exampleList } = ComfyWidgets["COMBO"](this, "example", [[""]], app); + + let exampleWidget; + + const get = async (route, suffix) => { + const url = encodeURIComponent(`${getType(nodeType)}${suffix || ""}`); + return await api.fetchApi(`/pysssss/${route}/${url}`); + }; + + const getExample = async () => { + if (exampleList.value === "[none]") { + if (exampleWidget) { + exampleWidget.inputEl.remove(); + exampleWidget = null; + this.widgets.length -= 1; + } + return; + } + + const v = this.widgets[0].value.content; + const pos = v.lastIndexOf("."); + const name = v.substr(0, pos); + + const example = await (await get("view", `/${name}/${exampleList.value}`)).text(); + if (!exampleWidget) { + exampleWidget = ComfyWidgets["STRING"](this, "prompt", ["STRING", { multiline: true }], app).widget; + exampleWidget.inputEl.readOnly = true; + exampleWidget.inputEl.style.opacity = 0.6; + } + exampleWidget.value = example; + }; + + const exampleCb = exampleList.callback; + exampleList.callback = function () { + getExample(); + return exampleCb?.apply(this, arguments) ?? exampleList.value; + }; + + const listExamples = async () => { + exampleList.disabled = true; + exampleList.options.values = ["[none]"]; + exampleList.value = "[none]"; + let examples = []; + if (this.widgets[0].value?.content) { + try { + examples = await (await get("examples", `/${this.widgets[0].value.content}`)).json(); + } catch (error) {} + } + exampleList.options.values = ["[none]", ...examples]; + exampleList.callback(); + exampleList.disabled = !examples.length; + app.graph.setDirtyCanvas(true, true); + }; + + const modelWidget = this.widgets[0]; + const modelCb = modelWidget.callback; + let prev = undefined; + modelWidget.callback = function () { + const ret = modelCb?.apply(this, arguments) ?? modelWidget.value; + let v = ret; + if (ret?.content) { + v = ret.content; + } + if (prev !== v) { + listExamples(); + prev = v; + } + return ret; + }; + setTimeout(() => { + modelWidget.callback(); + }, 30); + }; + } + + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + if (this.imgs) { + // If this node has images then we add an open in new tab item + let img; + if (this.imageIndex != null) { + // An image is selected so select that + img = this.imgs[this.imageIndex]; + } else if (this.overIndex != null) { + // No image is selected but one is hovered + img = this.imgs[this.overIndex]; + } + if (img) { + const nodes = app.graph._nodes.filter( + (n) => n.comfyClass === LORA_LOADER || n.comfyClass === CHECKPOINT_LOADER + ); + if (nodes.length) { + options.unshift({ + content: "Save as Preview", + submenu: { + options: nodes.map((n) => ({ + content: n.widgets[0].value.content, + callback: async () => { + const url = new URL(img.src); + const { image } = await api.fetchApi( + "/pysssss/save/" + encodeURIComponent(`${getType(n)}/${n.widgets[0].value.content}`), + { + method: "POST", + body: JSON.stringify({ + filename: url.searchParams.get("filename"), + subfolder: url.searchParams.get("subfolder"), + type: url.searchParams.get("type"), + }), + headers: { + "content-type": "application/json", + }, + } + ); + n.widgets[0].value.image = image; + app.refreshComboInNodes(); + }, + })), + }, + }); + } + } + } + return getExtraMenuOptions?.apply(this, arguments); + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.css b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.css new file mode 100644 index 0000000000000000000000000000000000000000..fa22a39c34ad8e043fe2c796411627012d0d45f9 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.css @@ -0,0 +1,61 @@ +.pysssss-autocomplete { + color: var(--descrip-text); + background-color: var(--comfy-menu-bg); + position: absolute; + font-family: sans-serif; + box-shadow: 3px 3px 8px rgba(0, 0, 0, 0.4); + z-index: 9999; +} + +.pysssss-autocomplete-item { + cursor: pointer; + padding: 3px 7px; + display: flex; + border-left: 3px solid transparent; + align-items: center; +} + +.pysssss-autocomplete-item--selected { + border-left-color: dodgerblue; +} + +.pysssss-autocomplete-highlight { + font-weight: bold; + text-decoration: underline; + text-decoration-color: dodgerblue; +} + +.pysssss-autocomplete-pill { + margin-left: auto; + font-size: 10px; + color: #fff; + padding: 2px 4px 2px 14px; + position: relative; +} + +.pysssss-autocomplete-pill::after { + content: ""; + display: block; + background: rgba(255, 255, 255, 0.25); + width: calc(100% - 10px); + height: 100%; + position: absolute; + left: 10px; + top: 0; + border-radius: 5px; +} + +.pysssss-autocomplete-pill + .pysssss-autocomplete-pill { + margin-left: 0; +} + +.pysssss-autocomplete-item-info { + margin-left: auto; + transition: filter 0.2s; + will-change: filter; + text-decoration: none; + padding-left: 10px; +} +.pysssss-autocomplete-item-info:hover { + filter: invert(1); +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.js new file mode 100644 index 0000000000000000000000000000000000000000..a8f232e52cc2090582e67f128c9a55282deeed4b --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/autocomplete.js @@ -0,0 +1,633 @@ +import { $el } from "../../../../scripts/ui.js"; +import { addStylesheet } from "./utils.js"; + +addStylesheet(import.meta.url); + +/* + https://github.com/component/textarea-caret-position + The MIT License (MIT) + + Copyright (c) 2015 Jonathan Ong me@jongleberry.com + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ +const getCaretCoordinates = (function () { + // We'll copy the properties below into the mirror div. + // Note that some browsers, such as Firefox, do not concatenate properties + // into their shorthand (e.g. padding-top, padding-bottom etc. -> padding), + // so we have to list every single property explicitly. + var properties = [ + "direction", // RTL support + "boxSizing", + "width", // on Chrome and IE, exclude the scrollbar, so the mirror div wraps exactly as the textarea does + "height", + "overflowX", + "overflowY", // copy the scrollbar for IE + + "borderTopWidth", + "borderRightWidth", + "borderBottomWidth", + "borderLeftWidth", + "borderStyle", + + "paddingTop", + "paddingRight", + "paddingBottom", + "paddingLeft", + + // https://developer.mozilla.org/en-US/docs/Web/CSS/font + "fontStyle", + "fontVariant", + "fontWeight", + "fontStretch", + "fontSize", + "fontSizeAdjust", + "lineHeight", + "fontFamily", + + "textAlign", + "textTransform", + "textIndent", + "textDecoration", // might not make a difference, but better be safe + + "letterSpacing", + "wordSpacing", + + "tabSize", + "MozTabSize", + ]; + + var isBrowser = typeof window !== "undefined"; + var isFirefox = isBrowser && window.mozInnerScreenX != null; + + return function getCaretCoordinates(element, position, options) { + if (!isBrowser) { + throw new Error("textarea-caret-position#getCaretCoordinates should only be called in a browser"); + } + + var debug = (options && options.debug) || false; + if (debug) { + var el = document.querySelector("#input-textarea-caret-position-mirror-div"); + if (el) el.parentNode.removeChild(el); + } + + // The mirror div will replicate the textarea's style + var div = document.createElement("div"); + div.id = "input-textarea-caret-position-mirror-div"; + document.body.appendChild(div); + + var style = div.style; + var computed = window.getComputedStyle ? window.getComputedStyle(element) : element.currentStyle; // currentStyle for IE < 9 + var isInput = element.nodeName === "INPUT"; + + // Default textarea styles + style.whiteSpace = "pre-wrap"; + if (!isInput) style.wordWrap = "break-word"; // only for textarea-s + + // Position off-screen + style.position = "absolute"; // required to return coordinates properly + if (!debug) style.visibility = "hidden"; // not 'display: none' because we want rendering + + // Transfer the element's properties to the div + properties.forEach(function (prop) { + if (isInput && prop === "lineHeight") { + // Special case for s because text is rendered centered and line height may be != height + if (computed.boxSizing === "border-box") { + var height = parseInt(computed.height); + var outerHeight = + parseInt(computed.paddingTop) + + parseInt(computed.paddingBottom) + + parseInt(computed.borderTopWidth) + + parseInt(computed.borderBottomWidth); + var targetHeight = outerHeight + parseInt(computed.lineHeight); + if (height > targetHeight) { + style.lineHeight = height - outerHeight + "px"; + } else if (height === targetHeight) { + style.lineHeight = computed.lineHeight; + } else { + style.lineHeight = 0; + } + } else { + style.lineHeight = computed.height; + } + } else { + style[prop] = computed[prop]; + } + }); + + if (isFirefox) { + // Firefox lies about the overflow property for textareas: https://bugzilla.mozilla.org/show_bug.cgi?id=984275 + if (element.scrollHeight > parseInt(computed.height)) style.overflowY = "scroll"; + } else { + style.overflow = "hidden"; // for Chrome to not render a scrollbar; IE keeps overflowY = 'scroll' + } + + div.textContent = element.value.substring(0, position); + // The second special handling for input type="text" vs textarea: + // spaces need to be replaced with non-breaking spaces - http://stackoverflow.com/a/13402035/1269037 + if (isInput) div.textContent = div.textContent.replace(/\s/g, "\u00a0"); + + var span = document.createElement("span"); + // Wrapping must be replicated *exactly*, including when a long word gets + // onto the next line, with whitespace at the end of the line before (#7). + // The *only* reliable way to do that is to copy the *entire* rest of the + // textarea's content into the created at the caret position. + // For inputs, just '.' would be enough, but no need to bother. + span.textContent = element.value.substring(position) || "."; // || because a completely empty faux span doesn't render at all + div.appendChild(span); + + var coordinates = { + top: span.offsetTop + parseInt(computed["borderTopWidth"]), + left: span.offsetLeft + parseInt(computed["borderLeftWidth"]), + height: parseInt(computed["lineHeight"]), + }; + + if (debug) { + span.style.backgroundColor = "#aaa"; + } else { + document.body.removeChild(div); + } + + return coordinates; + }; +})(); + +/* + Key functions from: + https://github.com/yuku/textcomplete + © Yuku Takahashi - This software is licensed under the MIT license. + + The MIT License (MIT) + + Copyright (c) 2015 Jonathan Ong me@jongleberry.com + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ +const CHAR_CODE_ZERO = "0".charCodeAt(0); +const CHAR_CODE_NINE = "9".charCodeAt(0); + +class TextAreaCaretHelper { + constructor(el) { + this.el = el; + } + + #calculateElementOffset() { + const rect = this.el.getBoundingClientRect(); + const owner = this.el.ownerDocument; + if (owner == null) { + throw new Error("Given element does not belong to document"); + } + const { defaultView, documentElement } = owner; + if (defaultView == null) { + throw new Error("Given element does not belong to window"); + } + const offset = { + top: rect.top + defaultView.pageYOffset, + left: rect.left + defaultView.pageXOffset, + }; + if (documentElement) { + offset.top -= documentElement.clientTop; + offset.left -= documentElement.clientLeft; + } + return offset; + } + + #isDigit(charCode) { + return CHAR_CODE_ZERO <= charCode && charCode <= CHAR_CODE_NINE; + } + + #getLineHeightPx() { + const computedStyle = getComputedStyle(this.el); + const lineHeight = computedStyle.lineHeight; + // If the char code starts with a digit, it is either a value in pixels, + // or unitless, as per: + // https://drafts.csswg.org/css2/visudet.html#propdef-line-height + // https://drafts.csswg.org/css2/cascade.html#computed-value + if (this.#isDigit(lineHeight.charCodeAt(0))) { + const floatLineHeight = parseFloat(lineHeight); + // In real browsers the value is *always* in pixels, even for unit-less + // line-heights. However, we still check as per the spec. + return this.#isDigit(lineHeight.charCodeAt(lineHeight.length - 1)) + ? floatLineHeight * parseFloat(computedStyle.fontSize) + : floatLineHeight; + } + // Otherwise, the value is "normal". + // If the line-height is "normal", calculate by font-size + return this.#calculateLineHeightPx(this.el.nodeName, computedStyle); + } + + /** + * Returns calculated line-height of the given node in pixels. + */ + #calculateLineHeightPx(nodeName, computedStyle) { + const body = document.body; + if (!body) return 0; + + const tempNode = document.createElement(nodeName); + tempNode.innerHTML = " "; + Object.assign(tempNode.style, { + fontSize: computedStyle.fontSize, + fontFamily: computedStyle.fontFamily, + padding: "0", + }); + body.appendChild(tempNode); + + // Make sure textarea has only 1 row + if (tempNode instanceof HTMLTextAreaElement) { + tempNode.rows = 1; + } + + // Assume the height of the element is the line-height + const height = tempNode.offsetHeight; + body.removeChild(tempNode); + + return height; + } + + getCursorOffset() { + const elOffset = this.#calculateElementOffset(); + const elScroll = this.#getElScroll(); + const cursorPosition = this.#getCursorPosition(); + const lineHeight = this.#getLineHeightPx(); + const top = elOffset.top - elScroll.top + cursorPosition.top + lineHeight; + const left = elOffset.left - elScroll.left + cursorPosition.left; + const clientTop = this.el.getBoundingClientRect().top; + if (this.el.dir !== "rtl") { + return { top, left, lineHeight, clientTop }; + } else { + const right = document.documentElement ? document.documentElement.clientWidth - left : 0; + return { top, right, lineHeight, clientTop }; + } + } + + #getElScroll() { + return { top: this.el.scrollTop, left: this.el.scrollLeft }; + } + + #getCursorPosition() { + return getCaretCoordinates(this.el, this.el.selectionEnd); + } + + getBeforeCursor() { + return this.el.selectionStart !== this.el.selectionEnd ? null : this.el.value.substring(0, this.el.selectionEnd); + } + + getAfterCursor() { + return this.el.value.substring(this.el.selectionEnd); + } + + insertAtCursor(value, offset, finalOffset) { + if (this.el.selectionStart != null) { + const startPos = this.el.selectionStart; + const endPos = this.el.selectionEnd; + + this.el.value = + this.el.value.substring(0, startPos + offset) + value + this.el.value.substring(endPos, this.el.value.length); + this.el.selectionEnd = this.el.selectionStart = startPos + value.length + offset + (finalOffset ?? 0); + } else { + this.el.value += value; + } + } +} + +/*********************/ + +/** + * @typedef {{ + * text: string, + * priority?: number, + * info?: Function, + * hint?: string, + * showValue?: boolean, + * caretOffset?: number + * }} AutoCompleteEntry + */ +export class TextAreaAutoComplete { + static globalSeparator = ""; + static enabled = true; + + /** @type {Record>} */ + static groups = {}; + /** @type {Set} */ + static globalGroups = new Set(); + /** @type {Record} */ + static globalWords = {}; + + /** @type {HTMLTextAreaElement} */ + el; + + /** @type {Record} */ + overrideWords; + overrideSeparator = ""; + + get words() { + return this.overrideWords ?? TextAreaAutoComplete.globalWords; + } + + get separator() { + return this.overrideSeparator ?? TextAreaAutoComplete.globalSeparator; + } + + /** + * @param {HTMLTextAreaElement} el + */ + constructor(el, words = null, separator = null) { + this.el = el; + this.helper = new TextAreaCaretHelper(el); + this.dropdown = $el("div.pysssss-autocomplete"); + this.overrideWords = words; + this.overrideSeparator = separator; + + this.#setup(); + } + + #setup() { + this.el.addEventListener("keydown", this.#keyDown.bind(this)); + this.el.addEventListener("keypress", this.#keyPress.bind(this)); + this.el.addEventListener("keyup", this.#keyUp.bind(this)); + this.el.addEventListener("click", this.#hide.bind(this)); + this.el.addEventListener("blur", () => setTimeout(() => this.#hide(), 150)); + } + + /** + * @param {KeyboardEvent} e + */ + #keyDown(e) { + if (!TextAreaAutoComplete.enabled) return; + + if (this.dropdown.parentElement) { + // We are visible + switch (e.key) { + case "ArrowUp": + e.preventDefault(); + if (this.selected.index) { + this.#setSelected(this.currentWords[this.selected.index - 1].wordInfo); + } else { + this.#setSelected(this.currentWords[this.currentWords.length - 1].wordInfo); + } + break; + case "ArrowDown": + e.preventDefault(); + if (this.selected.index === this.currentWords.length - 1) { + this.#setSelected(this.currentWords[0].wordInfo); + } else { + this.#setSelected(this.currentWords[this.selected.index + 1].wordInfo); + } + break; + case "Tab": + e.preventDefault(); + this.#insertItem(); + break; + } + } + } + + /** + * @param {KeyboardEvent} e + */ + #keyPress(e) { + if (!TextAreaAutoComplete.enabled) return; + if (this.dropdown.parentElement) { + // We are visible + switch (e.key) { + case "Enter": + if (!e.ctrlKey) { + e.preventDefault(); + this.#insertItem(); + } + break; + } + } + + if (!e.defaultPrevented) { + this.#update(); + } + } + + #keyUp(e) { + if (!TextAreaAutoComplete.enabled) return; + if (this.dropdown.parentElement) { + // We are visible + switch (e.key) { + case "Escape": + e.preventDefault(); + this.#hide(); + break; + } + } else if (e.key === "ArrowUp" || e.key === "ArrowDown" || e.key === "ArrowLeft" || e.key === "ArrowRight") { + return; + } + if (!e.defaultPrevented) { + this.#update(); + } + } + + #setSelected(item) { + if (this.selected) { + this.selected.el.classList.remove("pysssss-autocomplete-item--selected"); + } + + this.selected = item; + this.selected.el.classList.add("pysssss-autocomplete-item--selected"); + } + + #insertItem() { + if (!this.selected) return; + this.selected.el.click(); + } + + #getFilteredWords(term) { + term = term.toLocaleLowerCase(); + + const priorityMatches = []; + const prefixMatches = []; + const includesMatches = []; + for (const word of Object.keys(this.words)) { + const lowerWord = word.toLocaleLowerCase(); + if (lowerWord === term) { + // Dont include exact matches + continue; + } + + const pos = lowerWord.indexOf(term); + if (pos === -1) { + // No match + continue; + } + + const wordInfo = this.words[word]; + if (wordInfo.priority) { + priorityMatches.push({ pos, wordInfo }); + } else if (pos) { + includesMatches.push({ pos, wordInfo }); + } else { + prefixMatches.push({ pos, wordInfo }); + } + } + + priorityMatches.sort( + (a, b) => + b.wordInfo.priority - a.wordInfo.priority || + a.wordInfo.text.length - b.wordInfo.text.length || + a.wordInfo.text.localeCompare(b.wordInfo.text) + ); + + const top = priorityMatches.length * 0.2; + return priorityMatches + .slice(0, top) + .concat(prefixMatches, priorityMatches.slice(top), includesMatches) + .slice(0, 20); + } + + #update() { + let before = this.helper.getBeforeCursor(); + if (before?.length) { + const m = before.match(/([^\s|,|;|"]+)$/); + if (m) { + before = m[0]; + } else { + before = null; + } + } + + if (!before) { + this.#hide(); + return; + } + + this.currentWords = this.#getFilteredWords(before); + if (!this.currentWords.length) { + this.#hide(); + return; + } + + this.dropdown.style.display = ""; + + let hasSelected = false; + const items = this.currentWords.map(({ wordInfo, pos }, i) => { + const parts = [ + $el("span", { + textContent: wordInfo.text.substr(0, pos), + }), + $el("span.pysssss-autocomplete-highlight", { + textContent: wordInfo.text.substr(pos, before.length), + }), + $el("span", { + textContent: wordInfo.text.substr(pos + before.length), + }), + ]; + + if (wordInfo.hint) { + parts.push( + $el("span.pysssss-autocomplete-pill", { + textContent: wordInfo.hint, + }) + ); + } + + if (wordInfo.priority) { + parts.push( + $el("span.pysssss-autocomplete-pill", { + textContent: wordInfo.priority, + }) + ); + } + + if (wordInfo.value && wordInfo.text !== wordInfo.value && wordInfo.showValue !== false) { + parts.push( + $el("span.pysssss-autocomplete-pill", { + textContent: wordInfo.value, + }) + ); + } + + if (wordInfo.info) { + parts.push( + $el("a.pysssss-autocomplete-item-info", { + textContent: "ℹ️", + title: "View info...", + onclick: (e) => { + e.stopPropagation(); + wordInfo.info(); + e.preventDefault(); + }, + }) + ); + } + const item = $el( + "div.pysssss-autocomplete-item", + { + onclick: () => { + this.el.focus(); + this.helper.insertAtCursor( + (wordInfo.value ?? wordInfo.text) + this.separator, + -before.length, + wordInfo.caretOffset + ); + setTimeout(() => { + this.#update(); + }, 150); + }, + onmousemove: () => { + this.#setSelected(wordInfo); + }, + }, + parts + ); + + if (wordInfo === this.selected) { + hasSelected = true; + } + + wordInfo.index = i; + wordInfo.el = item; + + return item; + }); + + this.#setSelected(hasSelected ? this.selected : this.currentWords[0].wordInfo); + this.dropdown.replaceChildren(...items); + + if (!this.dropdown.parentElement) { + document.body.append(this.dropdown); + } + + const position = this.helper.getCursorOffset(); + this.dropdown.style.left = (position.left ?? 0) + "px"; + this.dropdown.style.top = (position.top ?? 0) + "px"; + } + + #hide() { + this.selected = null; + this.dropdown.remove(); + } + + static updateWords(id, words, addGlobal = true) { + const isUpdate = id in TextAreaAutoComplete.groups; + TextAreaAutoComplete.groups[id] = words; + if (addGlobal) { + TextAreaAutoComplete.globalGroups.add(id); + } + + if (isUpdate) { + // Remerge all words + TextAreaAutoComplete.globalWords = Object.assign( + {}, + ...Object.keys(TextAreaAutoComplete.groups) + .filter((k) => TextAreaAutoComplete.globalGroups.has(k)) + .map((k) => TextAreaAutoComplete.groups[k]) + ); + } else if (addGlobal) { + // Just insert the new words + Object.assign(TextAreaAutoComplete.globalWords, words); + } + } +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/binding.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/binding.js new file mode 100644 index 0000000000000000000000000000000000000000..7533e7e5e93beee798123a8ed84d36a50d2a4702 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/binding.js @@ -0,0 +1,244 @@ +// @ts-check +// @ts-ignore +import { ComfyWidgets } from "../../../../scripts/widgets.js"; +// @ts-ignore +import { api } from "../../../../scripts/api.js"; +// @ts-ignore +import { app } from "../../../../scripts/app.js"; + +const PathHelper = { + get(obj, path) { + if (typeof path !== "string") { + // Hardcoded value + return path; + } + + if (path[0] === '"' && path[path.length - 1] === '"') { + // Hardcoded string + return JSON.parse(path); + } + + // Evaluate the path + path = path.split(".").filter(Boolean); + for (const p of path) { + const k = isNaN(+p) ? p : +p; + obj = obj[k]; + } + + return obj; + }, + set(obj, path, value) { + // https://stackoverflow.com/a/54733755 + if (Object(obj) !== obj) return obj; // When obj is not an object + // If not yet an array, get the keys from the string-path + if (!Array.isArray(path)) path = path.toString().match(/[^.[\]]+/g) || []; + path.slice(0, -1).reduce( + ( + a, + c, + i // Iterate all of them except the last one + ) => + Object(a[c]) === a[c] // Does the key exist and is its value an object? + ? // Yes: then follow that path + a[c] + : // No: create the key. Is the next key a potential array-index? + (a[c] = + Math.abs(path[i + 1]) >> 0 === +path[i + 1] + ? [] // Yes: assign a new array object + : {}), // No: assign a new plain object + obj + )[path[path.length - 1]] = value; // Finally assign the value to the last key + return obj; // Return the top-level object to allow chaining + }, +}; + +/*** + @typedef { { + left: string; + op: "eq" | "ne", + right: string + } } IfCondition + + @typedef { { + type: "if", + condition: Array, + true?: Array, + false?: Array + } } IfCallback + + @typedef { { + type: "fetch", + url: string, + then: Array + } } FetchCallback + + @typedef { { + type: "set", + target: string, + value: string + } } SetCallback + + @typedef { { + type: "validate-combo", + } } ValidateComboCallback + + @typedef { IfCallback | FetchCallback | SetCallback | ValidateComboCallback } BindingCallback + + @typedef { { + source: string, + callback: Array + } } Binding +***/ + +/** + * @param {IfCondition} condition + */ +function evaluateCondition(condition, state) { + const left = PathHelper.get(state, condition.left); + const right = PathHelper.get(state, condition.right); + + let r; + if (condition.op === "eq") { + r = left === right; + } else { + r = left !== right; + } + + return r; +} + +/** + * @type { Record) => Promise> } + */ +const callbacks = { + /** + * @param {IfCallback} cb + */ + async if(cb, state) { + // For now only support ANDs + let success = true; + for (const condition of cb.condition) { + const r = evaluateCondition(condition, state); + if (!r) { + success = false; + break; + } + } + + for (const m of cb[success + ""] ?? []) { + await invokeCallback(m, state); + } + }, + /** + * @param {FetchCallback} cb + */ + async fetch(cb, state) { + const url = cb.url.replace(/\{([^\}]+)\}/g, (m, v) => { + return PathHelper.get(state, v); + }); + const res = await (await api.fetchApi(url)).json(); + state["$result"] = res; + for (const m of cb.then) { + await invokeCallback(m, state); + } + }, + /** + * @param {SetCallback} cb + */ + async set(cb, state) { + const value = PathHelper.get(state, cb.value); + PathHelper.set(state, cb.target, value); + }, + async "validate-combo"(cb, state) { + const w = state["$this"]; + const valid = w.options.values.includes(w.value); + if (!valid) { + w.value = w.options.values[0]; + } + }, +}; + +async function invokeCallback(callback, state) { + if (callback.type in callbacks) { + // @ts-ignore + await callbacks[callback.type](callback, state); + } else { + console.warn( + "%c[🐍 pysssss]", + "color: limegreen", + `[binding ${state.$node.comfyClass}.${state.$this.name}]`, + "unsupported binding callback type:", + callback.type + ); + } +} + +app.registerExtension({ + name: "pysssss.Binding", + beforeRegisterNodeDef(node, nodeData) { + const hasBinding = (v) => { + if (!v) return false; + return Object.values(v).find((c) => c[1]?.["pysssss.binding"]); + }; + const inputs = { ...nodeData.input?.required, ...nodeData.input?.optional }; + if (hasBinding(inputs)) { + const onAdded = node.prototype.onAdded; + node.prototype.onAdded = function () { + const r = onAdded?.apply(this, arguments); + + for (const widget of this.widgets || []) { + const bindings = inputs[widget.name][1]?.["pysssss.binding"]; + if (!bindings) continue; + + for (const binding of bindings) { + /** + * @type {import("../../../../../web/types/litegraph.d.ts").IWidget} + */ + const source = this.widgets.find((w) => w.name === binding.source); + if (!source) { + console.warn( + "%c[🐍 pysssss]", + "color: limegreen", + `[binding ${node.comfyClass}.${widget.name}]`, + "unable to find source binding widget:", + binding.source, + binding + ); + continue; + } + + let lastValue; + async function valueChanged() { + const state = { + $this: widget, + $source: source, + $node: node, + }; + + for (const callback of binding.callback) { + await invokeCallback(callback, state); + } + + app.graph.setDirtyCanvas(true, false); + } + + const cb = source.callback; + source.callback = function () { + const v = cb?.apply(this, arguments) ?? source.value; + if (v !== lastValue) { + lastValue = v; + valueChanged(); + } + return v; + }; + + lastValue = source.value; + valueChanged(); + } + } + + return r; + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.css b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.css new file mode 100644 index 0000000000000000000000000000000000000000..4357d9d053367d57d5b48100663244c575c3a63f --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.css @@ -0,0 +1,98 @@ +.pysssss-lightbox { + width: 100vw; + height: 100vh; + position: fixed; + top: 0; + left: 0; + z-index: 999; + background: rgba(0, 0, 0, 0.6); + display: flex; + align-items: center; + transition: opacity 0.2s; +} + +.pysssss-lightbox-prev, +.pysssss-lightbox-next { + height: 60px; + display: flex; + align-items: center; +} + +.pysssss-lightbox-prev:after, +.pysssss-lightbox-next:after { + border-style: solid; + border-width: 0.25em 0.25em 0 0; + display: inline-block; + height: 0.45em; + left: 0.15em; + position: relative; + top: 0.15em; + transform: rotate(-135deg) scale(0.75); + vertical-align: top; + width: 0.45em; + padding: 10px; + font-size: 20px; + margin: 0 10px 0 20px; + transition: color 0.2s; + flex-shrink: 0; + content: ""; +} + +.pysssss-lightbox-next:after { + transform: rotate(45deg) scale(0.75); + margin: 0 20px 0 0px; +} + +.pysssss-lightbox-main { + flex: auto; + text-align: center; +} + +.pysssss-lightbox-link { + display: inline-block; + position: relative; +} + +.pysssss-lightbox .lds-ring { + position: absolute; + left: 50%; + top: 50%; + transform: translate(-50%, -50%); +} + +.pysssss-lightbox-img { + max-height: 90vh; + max-width: calc(100vw - 130px); + height: auto; + object-fit: contain; + border: 3px solid white; + border-radius: 4px; + transition: opacity 0.2s; + user-select: none; +} + +.pysssss-lightbox-img:hover { + border-color: dodgerblue; +} + +.pysssss-lightbox-close { + font-size: 80px; + line-height: 1ch; + height: 1ch; + width: 1ch; + position: absolute; + right: 10px; + top: 10px; + padding: 5px; +} + +.pysssss-lightbox-close:after { + content: "\00d7"; +} + +.pysssss-lightbox-close:hover, +.pysssss-lightbox-prev:hover, +.pysssss-lightbox-next:hover { + color: dodgerblue; + cursor: pointer; +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.js new file mode 100644 index 0000000000000000000000000000000000000000..7e074459a95fdd5a94102226910e3a963abcc8dd --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/lightbox.js @@ -0,0 +1,102 @@ +import { $el } from "../../../../scripts/ui.js"; +import { addStylesheet, getUrl, loadImage } from "./utils.js"; +import { createSpinner } from "./spinner.js"; + +addStylesheet(getUrl("lightbox.css", import.meta.url)); + +const $$el = (tag, name, ...args) => { + if (name) name = "-" + name; + return $el(tag + ".pysssss-lightbox" + name, ...args); +}; + +const ani = async (a, t, b) => { + a(); + await new Promise((r) => setTimeout(r, t)); + b(); +}; + +export class Lightbox { + constructor() { + this.el = $$el("div", "", { + parent: document.body, + onclick: (e) => { + e.stopImmediatePropagation(); + this.close(); + }, + style: { + display: "none", + opacity: 0, + }, + }); + this.closeBtn = $$el("div", "close", { + parent: this.el, + }); + this.prev = $$el("div", "prev", { + parent: this.el, + onclick: (e) => { + this.update(-1); + e.stopImmediatePropagation(); + }, + }); + this.main = $$el("div", "main", { + parent: this.el, + }); + this.next = $$el("div", "next", { + parent: this.el, + onclick: (e) => { + this.update(1); + e.stopImmediatePropagation(); + }, + }); + this.link = $$el("a", "link", { + parent: this.main, + target: "_blank", + }); + this.spinner = createSpinner(); + this.link.appendChild(this.spinner); + this.img = $$el("img", "img", { + style: { + opacity: 0, + }, + parent: this.link, + onclick: (e) => { + e.stopImmediatePropagation(); + }, + }); + } + + close() { + ani( + () => (this.el.style.opacity = 0), + 200, + () => (this.el.style.display = "none") + ); + } + + async show(images, index) { + this.images = images; + this.index = index || 0; + await this.update(0); + } + + async update(shift) { + this.index += shift; + + this.prev.style.visibility = this.index ? "unset" : "hidden"; + this.next.style.visibility = this.index === this.images.length - 1 ? "hidden" : "unset"; + + const img = this.images[this.index]; + this.el.style.display = "flex"; + this.el.clientWidth; // Force a reflow + this.el.style.opacity = 1; + this.img.style.opacity = 0; + this.spinner.style.display = "inline-block"; + await loadImage(img); + this.spinner.style.display = "none"; + this.link.href = img; + this.img.src = img; + this.img.style.opacity = 1; + } +} + +export const lightbox = new Lightbox(); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.css b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.css new file mode 100644 index 0000000000000000000000000000000000000000..7c9718d030826984e9239bf5298176c7c3f10e16 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.css @@ -0,0 +1,104 @@ +.pysssss-model-info { + color: white; + font-family: sans-serif; + max-width: 90vw; +} +.pysssss-model-content { + display: flex; + flex-direction: column; + overflow: hidden; +} +.pysssss-model-info h2 { + text-align: center; + margin: 0 0 10px 0; +} +.pysssss-model-info p { + margin: 5px 0; +} +.pysssss-model-info a { + color: dodgerblue; +} +.pysssss-model-info a:hover { + text-decoration: underline; +} +.pysssss-model-tags-list { + display: flex; + flex-wrap: wrap; + list-style: none; + gap: 10px; + max-height: 200px; + overflow: auto; + margin: 10px 0; + padding: 0; +} +.pysssss-model-tag { + background-color: rgb(128, 213, 247); + color: #000; + display: flex; + align-items: center; + gap: 5px; + border-radius: 5px; + padding: 2px 5px; + cursor: pointer; +} +.pysssss-model-tag--selected span::before { + content: "✅"; + position: absolute; + background-color: dodgerblue; + left: 0; + top: 0; + right: 0; + bottom: 0; + text-align: center; +} +.pysssss-model-tag:hover { + outline: 2px solid dodgerblue; +} +.pysssss-model-tag p { + margin: 0; +} +.pysssss-model-tag span { + text-align: center; + border-radius: 5px; + background-color: dodgerblue; + color: #fff; + padding: 2px; + position: relative; + min-width: 20px; + overflow: hidden; +} + +.pysssss-model-metadata .comfy-modal-content { + max-width: 100%; +} +.pysssss-model-metadata label { + margin-right: 1ch; + color: #ccc; +} + +.pysssss-model-metadata span { + color: dodgerblue; +} + +.pysssss-preview { + max-width: 50%; + margin-left: 10px; + position: relative; +} +.pysssss-preview img { + max-height: 300px; +} +.pysssss-preview button { + position: absolute; + font-size: 12px; + bottom: 10px; + right: 10px; +} +.pysssss-model-notes { + background-color: rgba(0, 0, 0, 0.25); + padding: 5px; + margin-top: 5px; +} +.pysssss-model-notes:empty { + display: none; +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.js new file mode 100644 index 0000000000000000000000000000000000000000..9a8c989453f777f4c3e9591c7331651858cf91d3 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/modelInfoDialog.js @@ -0,0 +1,303 @@ +import { $el, ComfyDialog } from "../../../../scripts/ui.js"; +import { api } from "../../../../scripts/api.js"; +import { addStylesheet } from "./utils.js"; + +addStylesheet(import.meta.url); + +class MetadataDialog extends ComfyDialog { + constructor() { + super(); + + this.element.classList.add("pysssss-model-metadata"); + } + show(metadata) { + super.show( + $el( + "div", + Object.keys(metadata).map((k) => + $el("div", [$el("label", { textContent: k }), $el("span", { textContent: metadata[k] })]) + ) + ) + ); + } +} + +export class ModelInfoDialog extends ComfyDialog { + constructor(name) { + super(); + this.name = name; + this.element.classList.add("pysssss-model-info"); + } + + get customNotes() { + return this.metadata["pysssss.notes"]; + } + + set customNotes(v) { + this.metadata["pysssss.notes"] = v; + } + + get hash() { + return this.metadata["pysssss.sha256"]; + } + + async show(type, value) { + this.type = type; + + const req = api.fetchApi("/pysssss/metadata/" + encodeURIComponent(`${type}/${value}`)); + this.info = $el("div", { style: { flex: "auto" } }); + this.img = $el("img", { style: { display: "none" } }); + this.imgWrapper = $el("div.pysssss-preview", [this.img]); + this.main = $el("main", { style: { display: "flex" } }, [this.info, this.imgWrapper]); + this.content = $el("div.pysssss-model-content", [$el("h2", { textContent: this.name }), this.main]); + + const loading = $el("div", { textContent: "ℹ️ Loading...", parent: this.content }); + + super.show(this.content); + + this.metadata = await (await req).json(); + this.viewMetadata.style.cursor = this.viewMetadata.style.opacity = ""; + this.viewMetadata.removeAttribute("disabled"); + + loading.remove(); + this.addInfo(); + } + + createButtons() { + const btns = super.createButtons(); + this.viewMetadata = $el("button", { + type: "button", + textContent: "View raw metadata", + disabled: "disabled", + style: { + opacity: 0.5, + cursor: "not-allowed", + }, + onclick: (e) => { + if (this.metadata) { + new MetadataDialog().show(this.metadata); + } + }, + }); + + btns.unshift(this.viewMetadata); + return btns; + } + + getNoteInfo() { + function parseNote() { + if (!this.customNotes) return []; + + let notes = []; + // Extract links from notes + const r = new RegExp("(\\bhttps?:\\/\\/[^\\s]+)", "g"); + let end = 0; + let m; + do { + m = r.exec(this.customNotes); + let pos; + let fin = 0; + if (m) { + pos = m.index; + fin = m.index + m[0].length; + } else { + pos = this.customNotes.length; + } + + let pre = this.customNotes.substring(end, pos); + if (pre) { + pre = pre.replaceAll("\n", "
"); + notes.push( + $el("span", { + innerHTML: pre, + }) + ); + } + if (m) { + notes.push( + $el("a", { + href: m[0], + textContent: m[0], + target: "_blank", + }) + ); + } + + end = fin; + } while (m); + return notes; + } + + let textarea; + let notesContainer; + const editText = "✏️ Edit"; + const edit = $el("a", { + textContent: editText, + href: "#", + style: { + float: "right", + color: "greenyellow", + textDecoration: "none", + }, + onclick: async (e) => { + e.preventDefault(); + + if (textarea) { + this.customNotes = textarea.value; + + const resp = await api.fetchApi( + "/pysssss/metadata/notes/" + encodeURIComponent(`${this.type}/${this.name}`), + { + method: "POST", + body: this.customNotes, + } + ); + + if (resp.status !== 200) { + console.error(resp); + alert(`Error saving notes (${req.status}) ${req.statusText}`); + return; + } + + e.target.textContent = editText; + textarea.remove(); + textarea = null; + + notesContainer.replaceChildren(...parseNote.call(this)); + } else { + e.target.textContent = "💾 Save"; + textarea = $el("textarea", { + style: { + width: "100%", + minWidth: "200px", + minHeight: "50px", + }, + textContent: this.customNotes, + }); + e.target.after(textarea); + notesContainer.replaceChildren(); + textarea.style.height = Math.min(textarea.scrollHeight, 300) + "px"; + } + }, + }); + + notesContainer = $el("div.pysssss-model-notes", parseNote.call(this)); + return $el( + "div", + { + style: { display: "contents" }, + }, + [edit, notesContainer] + ); + } + + addInfo() { + this.addInfoEntry("Notes", this.getNoteInfo()); + } + + addInfoEntry(name, value) { + return $el( + "p", + { + parent: this.info, + }, + [ + typeof name === "string" ? $el("label", { textContent: name + ": " }) : name, + typeof value === "string" ? $el("span", { textContent: value }) : value, + ] + ); + } + + async getCivitaiDetails() { + const req = await fetch("https://civitai.com/api/v1/model-versions/by-hash/" + this.hash); + if (req.status === 200) { + return await req.json(); + } else if (req.status === 404) { + throw new Error("Model not found"); + } else { + throw new Error(`Error loading info (${req.status}) ${req.statusText}`); + } + } + + addCivitaiInfo() { + const promise = this.getCivitaiDetails(); + const content = $el("span", { textContent: "ℹ️ Loading..." }); + + this.addInfoEntry( + $el("label", [ + $el("img", { + style: { + width: "18px", + position: "relative", + top: "3px", + margin: "0 5px 0 0", + }, + src: "https://civitai.com/favicon.ico", + }), + $el("span", { textContent: "Civitai: " }), + ]), + content + ); + + return promise + .then((info) => { + content.replaceChildren( + $el("a", { + href: "https://civitai.com/models/" + info.modelId, + textContent: "View " + info.model.name, + target: "_blank", + }) + ); + + if (info.images?.length) { + this.img.src = info.images[0].url; + this.img.style.display = ""; + + this.imgSave = $el("button", { + textContent: "Use as preview", + parent: this.imgWrapper, + onclick: async () => { + // Convert the preview to a blob + const blob = await (await fetch(this.img.src)).blob(); + + // Store it in temp + const name = "temp_preview." + new URL(this.img.src).pathname.split(".")[1]; + const body = new FormData(); + body.append("image", new File([blob], name)); + body.append("overwrite", "true"); + body.append("type", "temp"); + + const resp = await api.fetchApi("/upload/image", { + method: "POST", + body, + }); + + if (resp.status !== 200) { + console.error(resp); + alert(`Error saving preview (${req.status}) ${req.statusText}`); + return; + } + + // Use as preview + await api.fetchApi("/pysssss/save/" + encodeURIComponent(`${this.type}/${this.name}`), { + method: "POST", + body: JSON.stringify({ + filename: name, + type: "temp", + }), + headers: { + "content-type": "application/json", + }, + }); + app.refreshComboInNodes(); + }, + }); + } + + return info; + }) + .catch((err) => { + content.textContent = "⚠️ " + err.message; + }); + } +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.css b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.css new file mode 100644 index 0000000000000000000000000000000000000000..9a29451ab3bb313c33c593a53e2d3328872b1171 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.css @@ -0,0 +1,35 @@ +.lds-ring { + display: inline-block; + position: relative; + width: 80px; + height: 80px; +} +.lds-ring div { + box-sizing: border-box; + display: block; + position: absolute; + width: 64px; + height: 64px; + margin: 8px; + border: 5px solid #fff; + border-radius: 50%; + animation: lds-ring 1.2s cubic-bezier(0.5, 0, 0.5, 1) infinite; + border-color: #fff transparent transparent transparent; +} +.lds-ring div:nth-child(1) { + animation-delay: -0.45s; +} +.lds-ring div:nth-child(2) { + animation-delay: -0.3s; +} +.lds-ring div:nth-child(3) { + animation-delay: -0.15s; +} +@keyframes lds-ring { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.js new file mode 100644 index 0000000000000000000000000000000000000000..f01315d80f4bf2dcfa67a5909f65bf8cfab19be0 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/spinner.js @@ -0,0 +1,9 @@ +import { addStylesheet } from "./utils.js"; + +addStylesheet(import.meta.url); + +export function createSpinner() { + const div = document.createElement("div"); + div.innerHTML = `
`; + return div.firstElementChild; +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/utils.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/utils.js new file mode 100644 index 0000000000000000000000000000000000000000..cd7539d449a2ac85672fb36da4be725f352b86f8 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/common/utils.js @@ -0,0 +1,30 @@ +import { $el } from "../../../../scripts/ui.js"; + +export function addStylesheet(url) { + if (url.endsWith(".js")) { + url = url.substr(0, url.length - 2) + "css"; + } + $el("link", { + parent: document.head, + rel: "stylesheet", + type: "text/css", + href: url.startsWith("http") ? url : getUrl(url), + }); +} + +export function getUrl(path, baseUrl) { + if (baseUrl) { + return new URL(path, baseUrl).toString(); + } else { + return new URL("../" + path, import.meta.url).toString(); + } +} + +export async function loadImage(url) { + return new Promise((res, rej) => { + const img = new Image(); + img.onload = res; + img.onerror = rej; + img.src = url; + }); +} diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/contextMenuHook.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/contextMenuHook.js new file mode 100644 index 0000000000000000000000000000000000000000..c493e70e75aeb042709cd880b1d544c8eca4b94e --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/contextMenuHook.js @@ -0,0 +1,90 @@ +import { app } from "../../../scripts/app.js"; +app.registerExtension({ + name: "pysssss.ContextMenuHook", + init() { + const getOrSet = (target, name, create) => { + if (name in target) return target[name]; + return (target[name] = create()); + }; + const symbol = getOrSet(window, "__pysssss__", () => Symbol("__pysssss__")); + const store = getOrSet(window, symbol, () => ({})); + const contextMenuHook = getOrSet(store, "contextMenuHook", () => ({})); + for (const e of ["ctor", "preAddItem", "addItem"]) { + if (!contextMenuHook[e]) { + contextMenuHook[e] = []; + } + } + + // Big ol' hack to get allow customizing the context menu + // Replace the addItem function with our own that wraps the context of "this" with a proxy + // That proxy then replaces the constructor with another proxy + // That proxy then calls the custom ContextMenu that supports filters + const ctorProxy = new Proxy(LiteGraph.ContextMenu, { + construct(target, args) { + return new LiteGraph.ContextMenu(...args); + }, + }); + + function triggerCallbacks(name, getArgs, handler) { + const callbacks = contextMenuHook[name]; + if (callbacks && callbacks instanceof Array) { + for (const cb of callbacks) { + const r = cb(...getArgs()); + handler?.call(this, r); + } + } else { + console.warn("[pysssss 🐍]", `invalid ${name} callbacks`, callbacks, name in contextMenuHook); + } + } + + const addItem = LiteGraph.ContextMenu.prototype.addItem; + LiteGraph.ContextMenu.prototype.addItem = function () { + const proxy = new Proxy(this, { + get(target, prop) { + if (prop === "constructor") { + return ctorProxy; + } + return target[prop]; + }, + }); + proxy.__target__ = this; + + let el; + let args = arguments; + triggerCallbacks( + "preAddItem", + () => [el, this, args], + (r) => { + if (r !== undefined) el = r; + } + ); + + if (el === undefined) { + el = addItem.apply(proxy, arguments); + } + + triggerCallbacks( + "addItem", + () => [el, this, args], + (r) => { + if (r !== undefined) el = r; + } + ); + return el; + }; + + // We also need to patch the ContextMenu constructor to unwrap the parent else it fails a LiteGraph type check + const ctxMenu = LiteGraph.ContextMenu; + LiteGraph.ContextMenu = function (values, options) { + if (options?.parentMenu) { + if (options.parentMenu.__target__) { + options.parentMenu = options.parentMenu.__target__; + } + } + + triggerCallbacks("ctor", () => [values, options]); + ctxMenu.call(this, values, options); + }; + LiteGraph.ContextMenu.prototype = ctxMenu.prototype; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/customColors.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/customColors.js new file mode 100644 index 0000000000000000000000000000000000000000..2975c87f234749ad65914f2e400b86173cc50a67 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/customColors.js @@ -0,0 +1,85 @@ +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; + +const colorShade = (col, amt) => { + col = col.replace(/^#/, ""); + if (col.length === 3) col = col[0] + col[0] + col[1] + col[1] + col[2] + col[2]; + + let [r, g, b] = col.match(/.{2}/g); + [r, g, b] = [parseInt(r, 16) + amt, parseInt(g, 16) + amt, parseInt(b, 16) + amt]; + + r = Math.max(Math.min(255, r), 0).toString(16); + g = Math.max(Math.min(255, g), 0).toString(16); + b = Math.max(Math.min(255, b), 0).toString(16); + + const rr = (r.length < 2 ? "0" : "") + r; + const gg = (g.length < 2 ? "0" : "") + g; + const bb = (b.length < 2 ? "0" : "") + b; + + return `#${rr}${gg}${bb}`; +}; + +app.registerExtension({ + name: "pysssss.CustomColors", + setup() { + let picker; + let activeNode; + const onMenuNodeColors = LGraphCanvas.onMenuNodeColors; + LGraphCanvas.onMenuNodeColors = function (value, options, e, menu, node) { + const r = onMenuNodeColors.apply(this, arguments); + requestAnimationFrame(() => { + const menus = document.querySelectorAll(".litecontextmenu"); + for (let i = menus.length - 1; i >= 0; i--) { + if (menus[i].firstElementChild.textContent.includes("No color")) { + $el( + "div.litemenu-entry.submenu", + { + parent: menus[i], + $: (el) => { + el.onclick = () => { + LiteGraph.closeAllContextMenus(); + if (!picker) { + picker = $el("input", { + type: "color", + parent: document.body, + style: { + display: "none", + }, + }); + picker.onchange = () => { + if (activeNode) { + if (activeNode.constructor === LiteGraph.LGraphGroup) { + activeNode.color = picker.value; + } else { + activeNode.color = colorShade(picker.value, 20); + activeNode.bgcolor = picker.value; + } + activeNode.setDirtyCanvas(true, true); + } + }; + } + activeNode = null; + picker.value = node.bgcolor; + activeNode = node; + picker.click(); + }; + }, + }, + [ + $el("span", { + style: { + paddingLeft: "4px", + display: "block", + }, + textContent: "🎨 Custom", + }), + ] + ); + break; + } + } + }); + return r; + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/faviconStatus.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/faviconStatus.js new file mode 100644 index 0000000000000000000000000000000000000000..b68eafc55f6ec6f28f74ad6a24bfb2ae08097cc6 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/faviconStatus.js @@ -0,0 +1,44 @@ +import { api } from "../../../scripts/api.js"; +import { app } from "../../../scripts/app.js"; + +// Simple script that adds the current queue size to the window title +// Adds a favicon that changes color while active + +app.registerExtension({ + name: "pysssss.FaviconStatus", + setup() { + let link = document.querySelector("link[rel~='icon']"); + if (!link) { + link = document.createElement("link"); + link.rel = "icon"; + document.head.appendChild(link); + } + + let executing = false; + const update = () => (link.href = new URL(`assets/favicon${executing ? "-active" : ""}.ico`, import.meta.url)); + + for (const e of ["execution_start", "progress"]) { + api.addEventListener(e, () => { + executing = true; + update(); + }); + } + + api.addEventListener("executing", ({ detail }) => { + // null will be sent when it's finished + executing = !!detail; + update(); + }); + + api.addEventListener("status", ({ detail }) => { + let title = "ComfyUI"; + if (detail && detail.exec_info.queue_remaining) { + title = `(${detail.exec_info.queue_remaining}) ${title}`; + } + document.title = title; + update(); + executing = false; + }); + update(); + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/graphArrange.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/graphArrange.js new file mode 100644 index 0000000000000000000000000000000000000000..8f25e241a2b91a01ca95a376385bdbe572994752 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/graphArrange.js @@ -0,0 +1,91 @@ +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "pysssss.GraphArrange", + setup(app) { + const orig = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = orig.apply(this, arguments); + options.push({ content: "Arrange (float left)", callback: () => graph.arrange() }); + options.push({ + content: "Arrange (float right)", + callback: () => { + (function () { + var margin = 50; + var layout; + + const nodes = this.computeExecutionOrder(false, true); + const columns = []; + + // Find node first use + for (let i = nodes.length - 1; i >= 0; i--) { + const node = nodes[i]; + let max = null; + for (const out of node.outputs || []) { + if (out.links) { + for (const link of out.links) { + const outNode = app.graph.getNodeById(app.graph.links[link].target_id); + if (!outNode) continue; + var l = outNode._level - 1; + if (max === null) max = l; + else if (l < max) max = l; + } + } + } + if (max != null) node._level = max; + } + + for (let i = 0; i < nodes.length; ++i) { + const node = nodes[i]; + const col = node._level || 1; + if (!columns[col]) { + columns[col] = []; + } + columns[col].push(node); + } + + let x = margin; + + for (let i = 0; i < columns.length; ++i) { + const column = columns[i]; + if (!column) { + continue; + } + column.sort((a, b) => { + var as = !(a.type === "SaveImage" || a.type === "PreviewImage"); + var bs = !(b.type === "SaveImage" || b.type === "PreviewImage"); + var r = as - bs; + if (r === 0) r = (a.inputs?.length || 0) - (b.inputs?.length || 0); + if (r === 0) r = (a.outputs?.length || 0) - (b.outputs?.length || 0); + return r; + }); + let max_size = 100; + let y = margin + LiteGraph.NODE_TITLE_HEIGHT; + for (let j = 0; j < column.length; ++j) { + const node = column[j]; + node.pos[0] = layout == LiteGraph.VERTICAL_LAYOUT ? y : x; + node.pos[1] = layout == LiteGraph.VERTICAL_LAYOUT ? x : y; + const max_size_index = layout == LiteGraph.VERTICAL_LAYOUT ? 1 : 0; + if (node.size[max_size_index] > max_size) { + max_size = node.size[max_size_index]; + } + const node_size_index = layout == LiteGraph.VERTICAL_LAYOUT ? 0 : 1; + y += node.size[node_size_index] + margin + LiteGraph.NODE_TITLE_HEIGHT + j; + } + + // Right align in column + for (let j = 0; j < column.length; ++j) { + const node = column[j]; + node.pos[0] += max_size - node.size[0]; + } + x += max_size + margin; + } + + this.setDirtyCanvas(true, true); + }).apply(app.graph); + }, + }); + return options; + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/imageFeed.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/imageFeed.js new file mode 100644 index 0000000000000000000000000000000000000000..b47791233dce605507b71acab3defa2b97a1dd44 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/imageFeed.js @@ -0,0 +1,439 @@ +import { api } from "../../../scripts/api.js"; +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; +import { lightbox } from "./common/lightbox.js"; + +$el("style", { + textContent: ` + .pysssss-image-feed { + position: absolute; + background: var(--comfy-menu-bg); + color: var(--fg-color); + z-index: 99; + font-family: sans-serif; + font-size: 12px; + display: flex; + flex-direction: column; + } + .pysssss-image-feed--top, .pysssss-image-feed--bottom { + width: 100vw; + min-height: 30px; + max-height: calc(var(--max-size, 20) * 1vh); + } + .pysssss-image-feed--top { + top: 0; + } + .pysssss-image-feed--bottom { + bottom: 0; + flex-direction: column-reverse; + padding-top: 5px; + } + .pysssss-image-feed--left, .pysssss-image-feed--right { + top: 0; + height: 100vh; + min-width: 200px; + max-width: calc(var(--max-size, 10) * 1vw); + } + .pysssss-image-feed--left { + left: 0; + } + .pysssss-image-feed--right { + right: 0; + } + + .pysssss-image-feed--left .pysssss-image-feed-menu, .pysssss-image-feed--right .pysssss-image-feed-menu { + flex-direction: column; + } + + .pysssss-image-feed-menu { + position: relative; + flex: 0 1 min-content; + display: flex; + gap: 5px; + padding: 5px; + justify-content: space-between; + } + .pysssss-image-feed-btn-group { + align-items: stretch; + display: flex; + gap: .5rem; + flex: 0 1 fit-content; + justify-content: flex-end; + } + .pysssss-image-feed-btn { + background-color:var(--comfy-input-bg); + border-radius:5px; + border:2px solid var(--border-color); + color: var(--fg-color); + cursor:pointer; + display:inline-block; + flex: 0 1 fit-content; + text-decoration:none; + } + .pysssss-image-feed-btn.sizing-btn:checked { + filter: invert(); + } + .pysssss-image-feed-btn.clear-btn { + padding: 5px 20px; + } + .pysssss-image-feed-btn.hide-btn { + padding: 5px; + aspect-ratio: 1 / 1; + } + .pysssss-image-feed-btn:hover { + filter: brightness(1.2); + } + .pysssss-image-feed-btn:active { + position:relative; + top:1px; + } + + .pysssss-image-feed-menu section { + border-radius: 5px; + background: rgba(0,0,0,0.6); + padding: 0 5px; + display: flex; + gap: 5px; + align-items: center; + position: relative; + } + .pysssss-image-feed-menu section span { + white-space: nowrap; + } + .pysssss-image-feed-menu section input { + flex: 1 1 100%; + background: rgba(0,0,0,0.6); + border-radius: 5px; + overflow: hidden; + z-index: 100; + } + + .sizing-menu { + position: relative; + } + + .size-controls-flyout { + position: absolute; + transform: scaleX(0%); + transition: 200ms ease-out; + transition-delay: 500ms; + z-index: 101; + width: 300px; + } + + .sizing-menu:hover .size-controls-flyout { + transform: scale(1, 1); + transition: 200ms linear; + transition-delay: 0; + } + .pysssss-image-feed--bottom .size-controls-flyout { + transform: scale(1,0); + transform-origin: bottom; + bottom: 0; + left: 0; + } + .pysssss-image-feed--top .size-controls-flyout { + transform: scale(1,0); + transform-origin: top; + top: 0; + left: 0; + } + .pysssss-image-feed--left .size-controls-flyout { + transform: scale(0, 1); + transform-origin: left; + top: 0; + left: 0; + } + .pysssss-image-feed--right .size-controls-flyout { + transform: scale(0, 1); + transform-origin: right; + top: 0; + right: 0; + } + + .pysssss-image-feed-menu > * { + min-height: 24px; + } + .pysssss-image-feed-list { + flex: 1 1 auto; + overflow-y: auto; + display: grid; + align-items: center; + justify-content: center; + gap: 4px; + grid-auto-rows: min-content; + grid-template-columns: repeat(var(--img-sz, 3), 1fr); + transition: 100ms linear; + scrollbar-gutter: stable both-edges; + padding: 5px; + background: var(--comfy-input-bg); + border-radius: 5px; + margin: 5px; + margin-top: 0px; + } + .pysssss-image-feed-list:empty { + display: none; + } + .pysssss-image-feed-list div { + height: 100%; + text-align: center; + } + .pysssss-image-feed-list::-webkit-scrollbar { + background: var(--comfy-input-bg); + border-radius: 5px; + } + .pysssss-image-feed-list::-webkit-scrollbar-thumb { + background:var(--comfy-menu-bg); + border: 5px solid transparent; + border-radius: 8px; + background-clip: content-box; + } + .pysssss-image-feed-list::-webkit-scrollbar-thumb:hover { + background: var(--border-color); + background-clip: content-box; + } + .pysssss-image-feed-list img { + object-fit: var(--img-fit, contain); + max-width: 100%; + max-height: calc(var(--max-size) * 1vh); + border-radius: 4px; + } + .pysssss-image-feed-list img:hover { + filter: brightness(1.2); + }`, + parent: document.body, +}); + +app.registerExtension({ + name: "pysssss.ImageFeed", + setup() { + let visible = true; + const showButton = $el("button.comfy-settings-btn", { + textContent: "🖼️", + style: { + right: "16px", + cursor: "pointer", + display: "none", + }, + }); + + const getVal = (n, d) => { + const v = localStorage.getItem("pysssss.ImageFeed." + n); + if (v && !isNaN(+v)) { + return v; + } + return d; + }; + + const saveVal = (n, v) => { + localStorage.setItem("pysssss.ImageFeed." + n, v); + }; + + const imageFeed = $el("div.pysssss-image-feed", { + parent: document.body, + }); + const imageList = $el("div.pysssss-image-feed-list"); + + const feedLocation = app.ui.settings.addSetting({ + id: "pysssss.ImageFeed.Location", + name: "🐍 Image Feed Location", + defaultValue: "bottom", + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + textContent: "🐍 Image Feed Location:", + }), + ]), + $el("td", [ + $el( + "select", + { + style: { + fontSize: "14px", + }, + oninput: (e) => { + feedLocation.value = e.target.value; + imageFeed.className = `pysssss-image-feed pysssss-image-feed--${feedLocation.value}`; + }, + }, + ["left", "top", "right", "bottom"].map((m) => + $el("option", { + value: m, + textContent: m, + selected: feedLocation.value === m, + }) + ) + ), + ]), + ]); + }, + onChange(value) { + imageFeed.className = `pysssss-image-feed pysssss-image-feed--${value}`; + }, + }); + + const feedDirection = app.ui.settings.addSetting({ + id: "pysssss.ImageFeed.Direction", + name: "🐍 Image Feed Direction", + defaultValue: "newest first", + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + textContent: "🐍 Image Feed Direction:", + }), + ]), + $el("td", [ + $el( + "select", + { + style: { + fontSize: "14px", + }, + oninput: (e) => { + feedDirection.value = e.target.value; + imageList.replaceChildren(...[...imageList.childNodes].reverse()); + }, + }, + ["newest first", "oldest first"].map((m) => + $el("option", { + value: m, + textContent: m, + selected: feedDirection.value === m, + }) + ) + ), + ]), + ]); + }, + }); + + const clearButton = $el("button.pysssss-image-feed-btn.clear-btn", { + textContent: "Clear", + onclick: () => imageList.replaceChildren(), + }); + + const hideButton = $el("button.pysssss-image-feed-btn.hide-btn", { + textContent: "❌", + onclick: () => { + imageFeed.style.display = "none"; + showButton.style.display = "unset"; + saveVal("Visible", 0); + visible = false; + }, + }); + + let columnInput; + function updateColumnCount(v) { + columnInput.parentElement.title = `Controls the number of columns in the feed (${v} columns).\nClick label to set custom value.`; + imageFeed.style.setProperty("--img-sz", v); + saveVal("ImageSize", v); + columnInput.max = Math.max(10, v, columnInput.max); + columnInput.value = v; + } + + imageFeed.append( + $el("div.pysssss-image-feed-menu", [ + $el("section.sizing-menu", {}, [ + $el("label.size-control-handle", { textContent: "↹ Resize Feed" }), + $el("div.size-controls-flyout", {}, [ + $el("section.size-control.feed-size-control", {}, [ + $el("span", { + textContent: "Feed Size...", + }), + $el("input", { + type: "range", + min: 10, + max: 80, + oninput: (e) => { + e.target.parentElement.title = `Controls the maximum size of the image feed panel (${e.target.value}vh)`; + imageFeed.style.setProperty("--max-size", e.target.value); + saveVal("FeedSize", e.target.value); + }, + $: (el) => { + requestAnimationFrame(() => { + el.value = getVal("FeedSize", 25); + el.oninput({ target: el }); + }); + }, + }), + ]), + $el("section.size-control.image-size-control", {}, [ + $el("a", { + textContent: "Column count...", + style: { + cursor: "pointer", + textDecoration: "underline", + }, + onclick: () => { + const v = +prompt("Enter custom column count", 20); + if (!isNaN(v)) { + updateColumnCount(v); + } + }, + }), + $el("input", { + type: "range", + min: 1, + max: 10, + step: 1, + oninput: (e) => { + updateColumnCount(e.target.value); + }, + $: (el) => { + columnInput = el; + requestAnimationFrame(() => { + updateColumnCount(getVal("ImageSize", 4)); + }); + }, + }), + ]), + ]), + ]), + $el("div.pysssss-image-feed-btn-group", {}, [clearButton, hideButton]), + ]), + imageList + ); + showButton.onclick = () => { + imageFeed.style.display = "block"; + showButton.style.display = "none"; + saveVal("Visible", 1); + visible = true; + }; + document.querySelector(".comfy-settings-btn").after(showButton); + + if (!+getVal("Visible", 1)) { + hideButton.onclick(); + } + + api.addEventListener("executed", ({ detail }) => { + if (visible && detail?.output?.images) { + for (const src of detail.output.images) { + const href = `/view?filename=${encodeURIComponent(src.filename)}&type=${ + src.type + }&subfolder=${encodeURIComponent(src.subfolder)}&t=${+new Date()}`; + + const method = feedDirection.value === "newest first" ? "prepend" : "append"; + imageList[method]( + $el("div", [ + $el( + "a", + { + target: "_blank", + href, + onclick: (e) => { + const imgs = [...imageList.querySelectorAll("img")].map((img) => img.getAttribute("src")); + lightbox.show(imgs, imgs.indexOf(href)); + e.preventDefault(); + }, + }, + [$el("img", { src: href })] + ), + ]) + ); + } + } + }); + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/kSamplerAdvDenoise.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/kSamplerAdvDenoise.js new file mode 100644 index 0000000000000000000000000000000000000000..7b92d4f1d6596a6d9679d3ef4c7dffe9ea297327 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/kSamplerAdvDenoise.js @@ -0,0 +1,54 @@ +import { app } from "../../../scripts/app.js"; +app.registerExtension({ + name: "pysssss.KSamplerAdvDenoise", + async beforeRegisterNodeDef(nodeType) { + // Add menu options to conver to/from widgets + const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = origGetExtraMenuOptions?.apply?.(this, arguments); + + let stepsWidget = null; + let startAtWidget = null; + let endAtWidget = null; + for (const w of this.widgets || []) { + if (w.name === "steps") { + stepsWidget = w; + } else if (w.name === "start_at_step") { + startAtWidget = w; + } else if (w.name === "end_at_step") { + endAtWidget = w; + } + } + + if (stepsWidget && startAtWidget && endAtWidget) { + options.push( + { + content: "Set Denoise", + callback: () => { + const steps = +prompt("How many steps do you want?", 15); + if (isNaN(steps)) { + return; + } + const denoise = +prompt("How much denoise? (0-1)", 0.5); + if (isNaN(denoise)) { + return; + } + + stepsWidget.value = Math.floor(steps / Math.max(0, Math.min(1, denoise))); + stepsWidget.callback?.(stepsWidget.value); + + startAtWidget.value = stepsWidget.value - steps; + startAtWidget.callback?.(startAtWidget.value); + + endAtWidget.value = stepsWidget.value; + endAtWidget.callback?.(endAtWidget.value); + }, + }, + null + ); + } + + return r; + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/linkRenderMode.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/linkRenderMode.js new file mode 100644 index 0000000000000000000000000000000000000000..010de6ee1085e3fbb4e5b3d3a381e38e261b53c7 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/linkRenderMode.js @@ -0,0 +1,57 @@ +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; + +const id = "pysssss.LinkRenderMode"; +const ext = { + name: id, + async setup(app) { + if (app.extensions.find((ext) => ext.name === "Comfy.LinkRenderMode")) { + console.log("%c[🐍 pysssss]", "color: limegreen", "Skipping LinkRenderMode as core extension found"); + return; + } + const setting = app.ui.settings.addSetting({ + id, + name: "🐍 Link Render Mode", + defaultValue: 2, + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: "🐍 Link Render Mode:", + }), + ]), + $el("td", [ + $el( + "select", + { + textContent: "Manage", + style: { + fontSize: "14px", + }, + oninput: (e) => { + setting.value = e.target.value; + app.canvas.links_render_mode = +e.target.value; + app.graph.setDirtyCanvas(true, true); + }, + }, + LiteGraph.LINK_RENDER_MODES.map((m, i) => + $el("option", { + value: i, + textContent: m, + selected: i == app.canvas.links_render_mode, + }) + ) + ), + ]), + ]); + }, + onChange(value) { + app.canvas.links_render_mode = +value; + app.graph.setDirtyCanvas(true); + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/locking.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/locking.js new file mode 100644 index 0000000000000000000000000000000000000000..64cafe2818899413fc81e1c906d619ee9f949d9d --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/locking.js @@ -0,0 +1,185 @@ +import { app } from "../../../scripts/app.js"; + +// Adds lock/unlock menu item for nodes + groups to prevent moving / resizing them + +const LOCKED = Symbol(); + +function lockArray(arr, isLocked) { + const v = []; + + for (let i = 0; i < 2; i++) { + v[i] = arr[i]; + + Object.defineProperty(arr, i, { + get() { + return v[i]; + }, + set(value) { + if (!isLocked()) { + v[i] = value; + } + }, + }); + } +} + +app.registerExtension({ + name: "pysssss.Locking", + init() { + function lockGroup(node) { + node[LOCKED] = true; + } + + // Add the locked flag to serialization + const serialize = LGraphGroup.prototype.serialize; + LGraphGroup.prototype.serialize = function () { + const o = serialize.apply(this, arguments); + o.locked = !!this[LOCKED]; + return o; + }; + + // On initial configure lock group if required + const configure = LGraphGroup.prototype.configure; + LGraphGroup.prototype.configure = function (o) { + configure.apply(this, arguments); + if (o.locked) { + lockGroup(this); + } + }; + + // Allow click through locked groups + const getGroupOnPos = LGraph.prototype.getGroupOnPos; + LGraph.prototype.getGroupOnPos = function () { + const r = getGroupOnPos.apply(this, arguments); + if (r && r[LOCKED] && !new Error().stack.includes("processContextMenu")) return null; + return r; + }; + + // Add menu options for lock/unlock + const getGroupMenuOptions = LGraphCanvas.prototype.getGroupMenuOptions; + LGraphCanvas.prototype.getGroupMenuOptions = function (node) { + const opts = getGroupMenuOptions.apply(this, arguments); + + opts.unshift( + node[LOCKED] + ? { + content: "Unlock", + callback: () => { + delete node[LOCKED]; + }, + } + : { + content: "Lock", + callback: () => lockGroup(node), + }, + null + ); + + return opts; + }; + }, + setup() { + const drawNodeShape = LGraphCanvas.prototype.drawNodeShape; + LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) { + const res = drawNodeShape.apply(this, arguments); + + if (node[LOCKED]) { + ctx.fillText("🔒", node.size[0] - 20, -10); + } + + return res; + }; + }, + async beforeRegisterNodeDef(nodeType) { + const nodesArray = (nodes) => { + if (nodes) { + if (nodes instanceof Array) { + return nodes; + } + return [nodes]; + } + return Object.values(app.canvas.selected_nodes); + }; + function unlockNode(nodes) { + nodes = nodesArray(nodes); + for (const node of nodes) { + delete node[LOCKED]; + } + app.graph.setDirtyCanvas(true, false); + } + function lockNode(nodes) { + nodes = nodesArray(nodes); + for (const node of nodes) { + if (node[LOCKED]) continue; + + node[LOCKED] = true; + // Same hack as above + lockArray(node.pos, () => !!node[LOCKED]); + + // Size is set by both replacing the value and setting individual values + // So define a new property that can prevent reassignment + const sz = [node.size[0], node.size[1]]; + Object.defineProperty(node, "size", { + get() { + return sz; + }, + set(value) { + if (!node[LOCKED]) { + sz[0] = value[0]; + sz[1] = value[1]; + } + }, + }); + // And then lock each element if required + lockArray(sz, () => !!node[LOCKED]); + } + + app.graph.setDirtyCanvas(true, false); + } + + // Add menu options for lock/unlock + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = getExtraMenuOptions ? getExtraMenuOptions.apply(this, arguments) : undefined; + + options.splice( + options.findIndex((o) => o?.content === "Properties") + 1, + 0, + null, + this[LOCKED] + ? { + content: "Unlock", + callback: () => { + unlockNode(); + }, + } + : { + content: "Lock", + callback: () => lockNode(), + } + ); + + return r; + }; + + // Add the locked flag to serialization + const onSerialize = nodeType.prototype.onSerialize; + nodeType.prototype.onSerialize = function (o) { + if (onSerialize) { + onSerialize.apply(this, arguments); + } + o.locked = this[LOCKED]; + }; + + // On initial configure lock node if required + const onConfigure = nodeType.prototype.onConfigure; + nodeType.prototype.onConfigure = function (o) { + if (onConfigure) { + onConfigure.apply(this, arguments); + } + if (o.locked) { + lockNode(this); + } + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/mathExpression.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/mathExpression.js new file mode 100644 index 0000000000000000000000000000000000000000..2409cf1378b85c8966d3483dc997c8fbabc0413a --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/mathExpression.js @@ -0,0 +1,35 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +app.registerExtension({ + name: "pysssss.MathExpression", + init() { + const STRING = ComfyWidgets.STRING; + ComfyWidgets.STRING = function (node, inputName, inputData) { + const r = STRING.apply(this, arguments); + r.widget.dynamicPrompts = inputData?.[1].dynamicPrompts; + return r; + }; + }, + beforeRegisterNodeDef(nodeType) { + if (nodeType.comfyClass === "MathExpression|pysssss") { + const onDrawForeground = nodeType.prototype.onDrawForeground; + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + + const v = app.nodeOutputs?.[this.id + ""]; + if (!this.flags.collapsed && v) { + const text = v.value[0] + ""; + ctx.save(); + ctx.font = "bold 12px sans-serif"; + ctx.fillStyle = "dodgerblue"; + const sz = ctx.measureText(text); + ctx.fillText(text, this.size[0] - sz.width - 5, LiteGraph.NODE_SLOT_HEIGHT * 3); + ctx.restore(); + } + + return r; + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/middleClickAddDefaultNode.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/middleClickAddDefaultNode.js new file mode 100644 index 0000000000000000000000000000000000000000..43f944f2835f56357b7701c2ef987261ceff9a8b --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/middleClickAddDefaultNode.js @@ -0,0 +1,49 @@ +import { app } from "../../../scripts/app.js"; + +const id = "pysssss.MiddleClickAddDefaultNode"; +const ext = { + name: id, + async setup(app) { + app.ui.settings.addSetting({ + id, + name: "🐍 Middle click slot to add", + defaultValue: "Reroute", + type: "combo", + options: (value) => + [ + ...Object.keys(LiteGraph.registered_node_types) + .filter((k) => k.includes("Reroute")) + .sort((a, b) => { + if (a === "Reroute") return -1; + if (b === "Reroute") return 1; + return a.localeCompare(b); + }), + "[None]", + ].map((m) => ({ + value: m, + text: m, + selected: !value ? m === "[None]" : m === value, + })), + onChange(value) { + const enable = value && value !== "[None]"; + if (value === true) { + value = "Reroute"; + } + LiteGraph.middle_click_slot_add_default_node = enable; + if (enable) { + for (const arr of Object.values(LiteGraph.slot_types_default_in).concat( + Object.values(LiteGraph.slot_types_default_out) + )) { + const idx = arr.indexOf(value); + if (idx !== 0) { + arr.splice(idx, 1); + } + arr.unshift(value); + } + } + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/modelInfo.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/modelInfo.js new file mode 100644 index 0000000000000000000000000000000000000000..6b198e6c004a287b1bfbec94862701192152d800 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/modelInfo.js @@ -0,0 +1,266 @@ +import { app } from "../../../scripts/app.js"; +import { $el } from "../../../scripts/ui.js"; +import { ModelInfoDialog } from "./common/modelInfoDialog.js"; + +const MAX_TAGS = 500; + +class LoraInfoDialog extends ModelInfoDialog { + getTagFrequency() { + if (!this.metadata.ss_tag_frequency) return []; + + const datasets = JSON.parse(this.metadata.ss_tag_frequency); + const tags = {}; + for (const setName in datasets) { + const set = datasets[setName]; + for (const t in set) { + if (t in tags) { + tags[t] += set[t]; + } else { + tags[t] = set[t]; + } + } + } + + return Object.entries(tags).sort((a, b) => b[1] - a[1]); + } + + getResolutions() { + let res = []; + if (this.metadata.ss_bucket_info) { + const parsed = JSON.parse(this.metadata.ss_bucket_info); + if (parsed?.buckets) { + for (const { resolution, count } of Object.values(parsed.buckets)) { + res.push([count, `${resolution.join("x")} * ${count}`]); + } + } + } + res = res.sort((a, b) => b[0] - a[0]).map((a) => a[1]); + let r = this.metadata.ss_resolution; + if (r) { + const s = r.split(","); + const w = s[0].replace("(", ""); + const h = s[1].replace(")", ""); + res.push(`${w.trim()}x${h.trim()} (Base res)`); + } else if ((r = this.metadata["modelspec.resolution"])) { + res.push(r + " (Base res"); + } + if (!res.length) { + res.push("⚠️ Unknown"); + } + return res; + } + + getTagList(tags) { + return tags.map((t) => + $el( + "li.pysssss-model-tag", + { + dataset: { + tag: t[0], + }, + $: (el) => { + el.onclick = () => { + el.classList.toggle("pysssss-model-tag--selected"); + }; + }, + }, + [ + $el("p", { + textContent: t[0], + }), + $el("span", { + textContent: t[1], + }), + ] + ) + ); + } + + addTags() { + let tags = this.getTagFrequency(); + let hasMore; + if (tags?.length) { + const c = tags.length; + let list; + if (c > MAX_TAGS) { + tags = tags.slice(0, MAX_TAGS); + hasMore = $el("p", [ + $el("span", { textContent: `⚠️ Only showing first ${MAX_TAGS} tags ` }), + $el("a", { + href: "#", + textContent: `Show all ${c}`, + onclick: () => { + list.replaceChildren(...this.getTagList(this.getTagFrequency())); + hasMore.remove(); + }, + }), + ]); + } + list = $el("ol.pysssss-model-tags-list", this.getTagList(tags)); + this.tags = $el("div", [list]); + } else { + this.tags = $el("p", { textContent: "⚠️ No tag frequency metadata found" }); + } + + this.content.append(this.tags); + + if (hasMore) { + this.content.append(hasMore); + } + } + + async addInfo() { + this.addInfoEntry("Name", this.metadata.ss_output_name || "⚠️ Unknown"); + this.addInfoEntry("Base Model", this.metadata.ss_sd_model_name || "⚠️ Unknown"); + this.addInfoEntry("Clip Skip", this.metadata.ss_clip_skip || "⚠️ Unknown"); + + this.addInfoEntry( + "Resolution", + $el( + "select", + this.getResolutions().map((r) => $el("option", { textContent: r })) + ) + ); + + super.addInfo(); + const p = this.addCivitaiInfo(); + this.addTags(); + + const info = await p; + if (info) { + $el( + "p", + { + parent: this.content, + textContent: "Trained Words: ", + }, + [ + $el("pre", { + textContent: info.trainedWords.join(", "), + style: { + whiteSpace: "pre-wrap", + margin: "10px 0", + background: "#222", + padding: "5px", + borderRadius: "5px", + maxHeight: "250px", + overflow: "auto", + }, + }), + ] + ); + $el("div", { + parent: this.content, + innerHTML: info.description, + style: { + maxHeight: "250px", + overflow: "auto", + }, + }); + } + } + + createButtons() { + const btns = super.createButtons(); + + function copyTags(e, tags) { + const textarea = $el("textarea", { + parent: document.body, + style: { + position: "fixed", + }, + textContent: tags.map((el) => el.dataset.tag).join(", "), + }); + textarea.select(); + try { + document.execCommand("copy"); + if (!e.target.dataset.text) { + e.target.dataset.text = e.target.textContent; + } + e.target.textContent = "Copied " + tags.length + " tags"; + setTimeout(() => { + e.target.textContent = e.target.dataset.text; + }, 1000); + } catch (ex) { + prompt("Copy to clipboard: Ctrl+C, Enter", text); + } finally { + document.body.removeChild(textarea); + } + } + + btns.unshift( + $el("button", { + type: "button", + textContent: "Copy Selected", + onclick: (e) => { + copyTags(e, [...this.tags.querySelectorAll(".pysssss-model-tag--selected")]); + }, + }), + $el("button", { + type: "button", + textContent: "Copy All", + onclick: (e) => { + copyTags(e, [...this.tags.querySelectorAll(".pysssss-model-tag")]); + }, + }) + ); + + return btns; + } +} + +class CheckpointInfoDialog extends ModelInfoDialog { + async addInfo() { + super.addInfo(); + const info = await this.addCivitaiInfo(); + if (info) { + this.addInfoEntry("Base Model", info.baseModel || "⚠️ Unknown"); + + $el("div", { + parent: this.content, + innerHTML: info.description, + style: { + maxHeight: "250px", + overflow: "auto", + }, + }); + } + } +} + +const infoHandler = { + LoraLoader: "loras", + "LoraLoader|pysssss": "loras", + CheckpointLoader: "checkpoints", + CheckpointLoaderSimple: "checkpoints", + "CheckpointLoader|pysssss": "checkpoints", +}; + +app.registerExtension({ + name: "pysssss.ModelInfo", + beforeRegisterNodeDef(nodeType) { + const type = infoHandler[nodeType.comfyClass]; + + if (type) { + const cls = type === "loras" ? LoraInfoDialog : CheckpointInfoDialog; + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + let value = this.widgets[0].value; + if (!value) { + return; + } + if (value.content) { + value = value.content; + } + options.unshift({ + content: "View info...", + callback: async () => { + new cls(value).show(type, value); + }, + }); + + return getExtraMenuOptions?.apply(this, arguments); + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/nodeFinder.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/nodeFinder.js new file mode 100644 index 0000000000000000000000000000000000000000..d3727724b4ad09454aa77736115d81d6426be9ef --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/nodeFinder.js @@ -0,0 +1,82 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; + +// Adds a menu option to toggle follow the executing node +// Adds a menu option to go to the currently executing node +// Adds a menu option to go to a node by type + +app.registerExtension({ + name: "pysssss.NodeFinder", + setup() { + let followExecution = false; + + const centerNode = (id) => { + if (!followExecution || !id) return; + const node = app.graph.getNodeById(id); + if (!node) return; + app.canvas.centerOnNode(node); + }; + + api.addEventListener("executing", ({ detail }) => centerNode(detail)); + + // Add canvas menu options + const orig = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = orig.apply(this, arguments); + options.push(null, { + content: followExecution ? "Stop following execution" : "Follow execution", + callback: () => { + if ((followExecution = !followExecution)) { + centerNode(app.runningNodeId); + } + }, + }); + if (app.runningNodeId) { + options.push({ + content: "Show executing node", + callback: () => { + const node = app.graph.getNodeById(app.runningNodeId); + if (!node) return; + app.canvas.centerOnNode(node); + }, + }); + } + + const nodes = app.graph._nodes; + const types = nodes.reduce((p, n) => { + if (n.type in p) { + p[n.type].push(n); + } else { + p[n.type] = [n]; + } + return p; + }, {}); + options.push({ + content: "Go to node", + has_submenu: true, + submenu: { + options: Object.keys(types) + .sort() + .map((t) => ({ + content: t, + has_submenu: true, + submenu: { + options: types[t] + .sort((a, b) => { + return a.pos[0] - b.pos[0]; + }) + .map((n) => ({ + content: `${n.getTitle()} - #${n.id} (${n.pos[0]}, ${n.pos[1]})`, + callback: () => { + app.canvas.centerOnNode(n); + }, + })), + }, + })), + }, + }); + + return options; + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/playSound.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/playSound.js new file mode 100644 index 0000000000000000000000000000000000000000..e47217044cb5d97c995c6f55c907752e551dcaa4 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/playSound.js @@ -0,0 +1,25 @@ +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "pysssss.PlaySound", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "PlaySound|pysssss") { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = async function () { + onExecuted?.apply(this, arguments); + if (this.widgets[0].value === "on empty queue") { + if (app.ui.lastQueueSize !== 0) { + await new Promise((r) => setTimeout(r, 500)); + } + if (app.ui.lastQueueSize !== 0) { + return; + } + } + const url = new URL(`assets/notify.mp3`, import.meta.url); + const audio = new Audio(url); + audio.volume = this.widgets[1].value; + audio.play(); + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/presetText.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/presetText.js new file mode 100644 index 0000000000000000000000000000000000000000..c7c22cf444a5cba6f463f76da38ab0cce4cf92e8 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/presetText.js @@ -0,0 +1,232 @@ +import { app } from "../../../scripts/app.js"; + +// Allows you to manage preset tags for e.g. common negative prompt +// Also performs replacements on any text field e.g. allowing you to use preset text in CLIP Text encode fields + +let replaceRegex; +const id = "pysssss.PresetText.Presets"; + +const getPresets = () => { + let items; + try { + items = JSON.parse(localStorage.getItem(id)); + } catch (error) {} + if (!items || !items.length) { + items = [{ name: "default negative", value: "worst quality" }]; + } + return items; +}; + +let presets = getPresets(); + +app.registerExtension({ + name: "pysssss.PresetText", + setup() { + app.ui.settings.addSetting({ + id: "pysssss.PresetText.ReplacementRegex", + name: "🐍 Preset Text Replacement Regex", + type: "text", + defaultValue: "(?:^|[^\\w])(?@(?[\\w-]+))", + tooltip: + "The regex should return two named capture groups: id (the name of the preset text to use), replace (the matched text to replace)", + attrs: { + style: { + fontFamily: "monospace", + }, + }, + onChange(value) { + if (!value) { + replaceRegex = null; + return; + } + try { + replaceRegex = new RegExp(value, "g"); + } catch (error) { + alert("Error creating regex for preset text replacement, no replacements will be performed."); + replaceRegex = null; + } + }, + }); + }, + registerCustomNodes() { + class PresetTextNode { + constructor() { + this.isVirtualNode = true; + this.serialize_widgets = true; + this.addOutput("text", "STRING"); + + const widget = this.addWidget("combo", "value", presets[0].name, () => {}, { + values: presets.map((p) => p.name), + }); + this.addWidget("button", "Manage", "Manage", () => { + const container = document.createElement("div"); + Object.assign(container.style, { + display: "grid", + gridTemplateColumns: "1fr 1fr", + gap: "10px", + }); + + const addNew = document.createElement("button"); + addNew.textContent = "Add New"; + addNew.classList.add("pysssss-presettext-addnew"); + Object.assign(addNew.style, { + fontSize: "13px", + gridColumn: "1 / 3", + color: "dodgerblue", + width: "auto", + textAlign: "center", + }); + addNew.onclick = () => { + addRow({ name: "", value: "" }); + }; + container.append(addNew); + + function addRow(p) { + const name = document.createElement("input"); + const nameLbl = document.createElement("label"); + name.value = p.name; + nameLbl.textContent = "Name:"; + nameLbl.append(name); + + const value = document.createElement("input"); + const valueLbl = document.createElement("label"); + value.value = p.value; + valueLbl.textContent = "Value:"; + valueLbl.append(value); + + addNew.before(nameLbl, valueLbl); + } + for (const p of presets) { + addRow(p); + } + + const help = document.createElement("span"); + help.textContent = "To remove a preset set the name or value to blank"; + help.style.gridColumn = "1 / 3"; + container.append(help); + + dialog.show(""); + dialog.textElement.append(container); + }); + + const dialog = new app.ui.dialog.constructor(); + dialog.element.classList.add("comfy-settings"); + + const closeButton = dialog.element.querySelector("button"); + closeButton.textContent = "CANCEL"; + const saveButton = document.createElement("button"); + saveButton.textContent = "SAVE"; + saveButton.onclick = function () { + const inputs = dialog.element.querySelectorAll("input"); + const p = []; + for (let i = 0; i < inputs.length; i += 2) { + const n = inputs[i]; + const v = inputs[i + 1]; + if (!n.value.trim() || !v.value.trim()) { + continue; + } + p.push({ name: n.value, value: v.value }); + } + + widget.options.values = p.map((p) => p.name); + if (!widget.options.values.includes(widget.value)) { + widget.value = widget.options.values[0]; + } + + presets = p; + localStorage.setItem(id, JSON.stringify(presets)); + + dialog.close(); + }; + + closeButton.before(saveButton); + + this.applyToGraph = function (workflow) { + // For each output link copy our value over the original widget value + if (this.outputs[0].links && this.outputs[0].links.length) { + for (const l of this.outputs[0].links) { + const link_info = app.graph.links[l]; + const outNode = app.graph.getNodeById(link_info.target_id); + const outIn = outNode && outNode.inputs && outNode.inputs[link_info.target_slot]; + if (outIn.widget) { + const w = outNode.widgets.find((w) => w.name === outIn.widget.name); + if (!w) continue; + const preset = presets.find((p) => p.name === widget.value); + if (!preset) { + const msg = `Preset text '${widget.value}' not found. Please fix this and queue again.`; + alert(msg); + throw new Error(msg); + } + w.value = preset.value; + } + } + } + }; + } + } + + LiteGraph.registerNodeType( + "PresetText|pysssss", + Object.assign(PresetTextNode, { + title: "Preset Text 🐍", + }) + ); + + PresetTextNode.category = "utils"; + }, + nodeCreated(node) { + if (node.widgets) { + // Locate dynamic prompt text widgets + const widgets = node.widgets.filter((n) => n.type === "customtext" || n.type === "text"); + for (const widget of widgets) { + const callbacks = [ + () => { + let prompt = widget.value; + if (replaceRegex && typeof prompt.replace !== 'undefined') { + prompt = prompt.replace(replaceRegex, (match, p1, p2, index, text, groups) => { + if (!groups.replace || !groups.id) return match; // No match, bad regex? + + const preset = presets.find((p) => p.name.replaceAll(/\s/g, "-") === groups.id); + if (!preset) return match; // Invalid name + + const pos = match.indexOf(groups.replace); + return match.substring(0, pos) + preset.value; + }); + } + return prompt; + }, + ]; + if (widget.serializeValue) { + callbacks.push(widget.serializeValue); + } + + let called = false; + const serializeValue = async (workflowNode, widgetIndex) => { + const widgetValue = widget.value; + if (called) return widgetValue; + called = true; + + for (const cb of callbacks) { + widget.value = await cb(workflowNode, widgetIndex); + } + + const prompt = widget.value; + widget.value = widgetValue; + + called = false; + + return prompt; + }; + + Object.defineProperty(widget, "serializeValue", { + get() { + return serializeValue; + }, + set(cb) { + callbacks.push(cb); + }, + }); + } + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/quickNodes.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/quickNodes.js new file mode 100644 index 0000000000000000000000000000000000000000..a52581b32ed9cc6af01f016774518d17693b2132 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/quickNodes.js @@ -0,0 +1,196 @@ +import { app } from "../../../scripts/app.js"; + +// Adds a bunch of context menu entries for quickly adding common steps + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +function getOrAddVAELoader(node) { + let vaeNode = app.graph._nodes.find((n) => n.type === "VAELoader"); + if (!vaeNode) { + vaeNode = addNode("VAELoader", node); + } + return vaeNode; +} + +function addNode(name, nextTo, options) { + options = { select: true, shiftY: 0, before: false, ...(options || {}) }; + const node = LiteGraph.createNode(name); + app.graph.add(node); + node.pos = [ + options.before ? nextTo.pos[0] - node.size[0] - 30 : nextTo.pos[0] + nextTo.size[0] + 30, + nextTo.pos[1] + options.shiftY, + ]; + if (options.select) { + app.canvas.selectNode(node, false); + } + return node; +} + +app.registerExtension({ + name: "pysssss.QuickNodes", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.input && nodeData.input.required) { + const keys = Object.keys(nodeData.input.required); + for (let i = 0; i < keys.length; i++) { + if (nodeData.input.required[keys[i]][0] === "VAE") { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Use VAE", + callback: () => { + getOrAddVAELoader(this).connect(0, this, i); + }, + }); + }); + break; + } + } + } + + if (nodeData.name === "KSampler") { + addMenuHandler(nodeType, function (_, options) { + options.unshift( + { + content: "Add Blank Input", + callback: () => { + const imageNode = addNode("EmptyLatentImage", this, { before: true }); + imageNode.connect(0, this, 3); + }, + }, + { + content: "Add Hi-res Fix", + callback: () => { + const upscaleNode = addNode("LatentUpscale", this); + this.connect(0, upscaleNode, 0); + + const sampleNode = addNode("KSampler", upscaleNode); + + for (let i = 0; i < 3; i++) { + const l = this.getInputLink(i); + if (l) { + app.graph.getNodeById(l.origin_id).connect(l.origin_slot, sampleNode, i); + } + } + + upscaleNode.connect(0, sampleNode, 3); + }, + }, + { + content: "Add 2nd Pass", + callback: () => { + const upscaleNode = addNode("LatentUpscale", this); + this.connect(0, upscaleNode, 0); + + const ckptNode = addNode("CheckpointLoaderSimple", this); + const sampleNode = addNode("KSampler", ckptNode); + + const positiveLink = this.getInputLink(1); + const negativeLink = this.getInputLink(2); + const positiveNode = positiveLink + ? app.graph.add(app.graph.getNodeById(positiveLink.origin_id).clone()) + : addNode("CLIPTextEncode"); + const negativeNode = negativeLink + ? app.graph.add(app.graph.getNodeById(negativeLink.origin_id).clone()) + : addNode("CLIPTextEncode"); + + ckptNode.connect(0, sampleNode, 0); + ckptNode.connect(1, positiveNode, 0); + ckptNode.connect(1, negativeNode, 0); + positiveNode.connect(0, sampleNode, 1); + negativeNode.connect(0, sampleNode, 2); + upscaleNode.connect(0, sampleNode, 3); + }, + }, + { + content: "Add Save Image", + callback: () => { + const decodeNode = addNode("VAEDecode", this); + this.connect(0, decodeNode, 0); + + getOrAddVAELoader(decodeNode).connect(0, decodeNode, 1); + + const saveNode = addNode("SaveImage", decodeNode); + decodeNode.connect(0, saveNode, 0); + }, + } + ); + }); + } + + if (nodeData.name === "CheckpointLoaderSimple") { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Add Clip Skip", + callback: () => { + const clipSkipNode = addNode("CLIPSetLastLayer", this); + const clipLinks = this.outputs[1].links ? this.outputs[1].links.map((l) => ({ ...graph.links[l] })) : []; + + this.disconnectOutput(1); + this.connect(1, clipSkipNode, 0); + + for (const clipLink of clipLinks) { + clipSkipNode.connect(0, clipLink.target_id, clipLink.target_slot); + } + }, + }); + }); + } + + if ( + nodeData.name === "CheckpointLoaderSimple" || + nodeData.name === "CheckpointLoader" || + nodeData.name === "CheckpointLoader|pysssss" || + nodeData.name === "LoraLoader" || + nodeData.name === "LoraLoader|pysssss" + ) { + addMenuHandler(nodeType, function (_, options) { + function addLora(type) { + const loraNode = addNode(type, this); + + const modelLinks = this.outputs[0].links ? this.outputs[0].links.map((l) => ({ ...graph.links[l] })) : []; + const clipLinks = this.outputs[1].links ? this.outputs[1].links.map((l) => ({ ...graph.links[l] })) : []; + + this.disconnectOutput(0); + this.disconnectOutput(1); + + this.connect(0, loraNode, 0); + this.connect(1, loraNode, 1); + + for (const modelLink of modelLinks) { + loraNode.connect(0, modelLink.target_id, modelLink.target_slot); + } + + for (const clipLink of clipLinks) { + loraNode.connect(1, clipLink.target_id, clipLink.target_slot); + } + } + options.unshift( + { + content: "Add LoRA", + callback: () => addLora.call(this, "LoraLoader"), + }, + { + content: "Add 🐍 LoRA", + callback: () => addLora.call(this, "LoraLoader|pysssss"), + }, + { + content: "Add Prompts", + callback: () => { + const positiveNode = addNode("CLIPTextEncode", this); + const negativeNode = addNode("CLIPTextEncode", this, { shiftY: positiveNode.size[1] + 30 }); + + this.connect(1, positiveNode, 0); + this.connect(1, negativeNode, 0); + }, + } + ); + }); + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/repeater.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/repeater.js new file mode 100644 index 0000000000000000000000000000000000000000..23432c4d4fa89f899312a7fa4bd83a04c635a6b5 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/repeater.js @@ -0,0 +1,123 @@ +import { app } from "../../../scripts/app.js"; + +const REPEATER = "Repeater|pysssss"; + +app.registerExtension({ + name: "pysssss.Repeater", + init() { + const graphToPrompt = app.graphToPrompt; + app.graphToPrompt = async function () { + const res = await graphToPrompt.apply(this, arguments); + + const id = Date.now() + "_"; + let u = 0; + + let newNodes = {}; + const newRepeaters = {}; + for (const nodeId in res.output) { + let output = res.output[nodeId]; + if (output.class_type === REPEATER) { + const isMulti = output.inputs.output === "multi"; + if (output.inputs.node_mode === "create") { + // We need to clone the input for every repeat + const orig = res.output[output.inputs.source[0]]; + if (isMulti) { + if (!newRepeaters[nodeId]) { + newRepeaters[nodeId] = []; + newRepeaters[nodeId][output.inputs.repeats - 1] = nodeId; + } + } + for (let i = 0; i < output.inputs.repeats - 1; i++) { + const clonedInputId = id + ++u; + + if (isMulti) { + // If multi create we need to clone the repeater too + newNodes[clonedInputId] = structuredClone(orig); + + output = structuredClone(output); + + const clonedRepeaterId = id + ++u; + newNodes[clonedRepeaterId] = output; + output.inputs["source"][0] = clonedInputId; + + newRepeaters[nodeId][i] = clonedRepeaterId; + } else { + newNodes[clonedInputId] = orig; + } + output.inputs[clonedInputId] = [clonedInputId, output.inputs.source[1]]; + } + } else if (isMulti) { + newRepeaters[nodeId] = Array(output.inputs.repeats).fill(nodeId); + } + } + } + + Object.assign(res.output, newNodes); + newNodes = {}; + + for (const nodeId in res.output) { + const output = res.output[nodeId]; + for (const k in output.inputs) { + const v = output.inputs[k]; + if (v instanceof Array) { + const repeaterId = v[0]; + const source = newRepeaters[repeaterId]; + if (source) { + v[0] = source.pop(); + v[1] = 0; + } + } + } + } + + // Object.assign(res.output, newNodes); + + return res; + }; + }, + beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === REPEATER) { + const SETUP_OUTPUTS = Symbol(); + nodeType.prototype[SETUP_OUTPUTS] = function (repeats) { + if (repeats == null) { + repeats = this.widgets[0].value; + } + while (this.outputs.length > repeats) { + this.removeOutput(repeats); + } + const id = Date.now() + "_"; + let u = 0; + while (this.outputs.length < repeats) { + this.addOutput(id + ++u, "*", { label: "*" }); + } + }; + + const onAdded = nodeType.prototype.onAdded; + nodeType.prototype.onAdded = function () { + const self = this; + const repeatsCb = this.widgets[0].callback; + this.widgets[0].callback = async function () { + const v = (await repeatsCb?.apply(this, arguments)) ?? this.value; + if (self.widgets[1].value === "multi") { + self[SETUP_OUTPUTS](v); + } + return v; + }; + + const outputCb = this.widgets[1].callback; + this.widgets[1].callback = async function () { + const v = (await outputCb?.apply(this, arguments)) ?? this.value; + if (v === "single") { + self.outputs[0].shape = 6; + self[SETUP_OUTPUTS](1); + } else { + delete self.outputs[0].shape; + self[SETUP_OUTPUTS](); + } + return v; + }; + return onAdded?.apply(this, arguments); + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/reroutePrimitive.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/reroutePrimitive.js new file mode 100644 index 0000000000000000000000000000000000000000..1b948942a8d89d07e7bab6e2cc1954c78df98f00 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/reroutePrimitive.js @@ -0,0 +1,342 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +const REROUTE_PRIMITIVE = "ReroutePrimitive|pysssss"; +const MULTI_PRIMITIVE = "MultiPrimitive|pysssss"; +const LAST_TYPE = Symbol("LastType"); + +app.registerExtension({ + name: "pysssss.ReroutePrimitive", + init() { + // On graph configure, fire onGraphConfigured to create widgets + const graphConfigure = LGraph.prototype.configure; + LGraph.prototype.configure = function () { + const r = graphConfigure.apply(this, arguments); + for (const n of app.graph._nodes) { + if (n.type === REROUTE_PRIMITIVE) { + n.onGraphConfigured(); + } + } + + return r; + }; + + const graphToPrompt = app.graphToPrompt; + app.graphToPrompt = async function () { + const res = await graphToPrompt.apply(this, arguments); + + const multiOutputs = []; + for (const nodeId in res.output) { + const output = res.output[nodeId]; + if (output.class_type === MULTI_PRIMITIVE) { + multiOutputs.push({ id: nodeId, inputs: output.inputs }); + } + } + + function permute(outputs) { + function generatePermutations(inputs, currentIndex, currentPermutation, result) { + if (currentIndex === inputs.length) { + result.push({ ...currentPermutation }); + return; + } + + const input = inputs[currentIndex]; + + for (const k in input) { + currentPermutation[currentIndex] = input[k]; + generatePermutations(inputs, currentIndex + 1, currentPermutation, result); + } + } + + const inputs = outputs.map((output) => output.inputs); + const result = []; + const current = new Array(inputs.length); + + generatePermutations(inputs, 0, current, result); + + return outputs.map((output, index) => ({ + ...output, + inputs: result.reduce((p, permutation) => { + const count = Object.keys(p).length; + p["value" + (count || "")] = permutation[index]; + return p; + }, {}), + })); + } + + const permutations = permute(multiOutputs); + for (let i = 0; i < permutations.length; i++) { + res.output[multiOutputs[i].id].inputs = permutations[i].inputs; + } + + return res; + }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + function addOutputHandler() { + // Finds the first non reroute output node down the chain + nodeType.prototype.getFirstReroutedOutput = function (slot) { + if (nodeData.name === MULTI_PRIMITIVE) { + slot = 0; + } + const links = this.outputs[slot].links; + if (!links) return null; + + const search = []; + for (const l of links) { + const link = app.graph.links[l]; + if (!link) continue; + + const node = app.graph.getNodeById(link.target_id); + if (node.type !== REROUTE_PRIMITIVE && node.type !== MULTI_PRIMITIVE) { + return { node, link }; + } + search.push({ node, link }); + } + + for (const { link, node } of search) { + const r = node.getFirstReroutedOutput(link.target_slot); + if (r) { + return r; + } + } + }; + } + + if (nodeData.name === REROUTE_PRIMITIVE) { + const configure = nodeType.prototype.configure || LGraphNode.prototype.configure; + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + const onAdded = nodeType.prototype.onAdded; + + nodeType.title_mode = LiteGraph.NO_TITLE; + + function hasAnyInput(node) { + for (const input of node.inputs) { + if (input.link) { + return true; + } + } + return false; + } + + // Remove input text + nodeType.prototype.onAdded = function () { + onAdded?.apply(this, arguments); + this.inputs[0].label = ""; + this.outputs[0].label = "value"; + this.setSize(this.computeSize()); + }; + + // Restore any widgets + nodeType.prototype.onGraphConfigured = function () { + if (hasAnyInput(this)) return; + + const outputNode = this.getFirstReroutedOutput(0); + if (outputNode) { + this.checkPrimitiveWidget(outputNode); + } + }; + + // Check if we need to create (or remove) a widget on the node + nodeType.prototype.checkPrimitiveWidget = function ({ node, link }) { + let widgetType = link.type; + let targetLabel = widgetType; + const input = node.inputs[link.target_slot]; + if (input.widget?.config?.[0] instanceof Array) { + targetLabel = input.widget.name; + widgetType = "COMBO"; + } + + if (widgetType in ComfyWidgets) { + if (!this.widgets?.length) { + let v; + if (this.widgets_values?.length) { + v = this.widgets_values[0]; + } + let config = [link.type, {}]; + if (input.widget) { + config = input.widget.config; + } + const { widget } = ComfyWidgets[widgetType](this, "value", config, app); + if (v !== undefined && (!this[LAST_TYPE] || this[LAST_TYPE] === widgetType)) { + widget.value = v; + } + this[LAST_TYPE] = widgetType; + } + } else if (this.widgets) { + this.widgets.length = 0; + } + + return targetLabel; + }; + + // Finds all input nodes from the current reroute + nodeType.prototype.getReroutedInputs = function (slot) { + let nodes = [{ node: this }]; + let node = this; + while (node?.type === REROUTE_PRIMITIVE) { + const input = node.inputs[slot]; + if (input.link) { + const link = app.graph.links[input.link]; + node = app.graph.getNodeById(link.origin_id); + slot = link.origin_slot; + nodes.push({ + node, + link, + }); + } else { + node = null; + } + } + + return nodes; + }; + + addOutputHandler(); + + // Update the type of all reroutes in a chain + nodeType.prototype.changeRerouteType = function (slot, type, label) { + const color = LGraphCanvas.link_type_colors[type]; + const output = this.outputs[slot]; + this.inputs[slot].label = " "; + output.label = label || (type === "*" ? "value" : type); + output.type = type; + + // Process all linked outputs + for (const linkId of output.links || []) { + const link = app.graph.links[linkId]; + if (!link) continue; + link.color = color; + const node = app.graph.getNodeById(link.target_id); + if (node.changeRerouteType) { + // Recursively update reroutes + node.changeRerouteType(link.target_slot, type, label); + } else { + // Validate links to 'real' nodes + const theirType = node.inputs[link.target_slot].type; + if (theirType !== type && theirType !== "*") { + node.disconnectInput(link.target_slot); + } + } + } + + if (this.inputs[slot].link) { + const link = app.graph.links[this.inputs[slot].link]; + if (link) link.color = color; + } + }; + + // Override configure so we can flag that we are configuring to avoid link validation breaking + let configuring = false; + nodeType.prototype.configure = function () { + configuring = true; + const r = configure?.apply(this, arguments); + configuring = false; + + return r; + }; + + Object.defineProperty(nodeType, "title_mode", { + get() { + return app.canvas.current_node?.widgets?.length ? LiteGraph.NORMAL_TITLE : LiteGraph.NO_TITLE; + }, + }); + + nodeType.prototype.onConnectionsChange = function (type, _, connected, link_info) { + // If configuring treat everything as OK as links may not be set by litegraph yet + if (configuring) return; + + const isInput = type === LiteGraph.INPUT; + const slot = isInput ? link_info.target_slot : link_info.origin_slot; + + let targetLabel = null; + let targetNode = null; + let targetType = "*"; + let targetSlot = slot; + + const inputPath = this.getReroutedInputs(slot); + const rootInput = inputPath[inputPath.length - 1]; + const outputNode = this.getFirstReroutedOutput(slot); + if (rootInput.node.type === REROUTE_PRIMITIVE) { + // Our input node is a reroute, so see if we have an output + if (outputNode) { + targetType = outputNode.link.type; + } else if (rootInput.node.widgets) { + rootInput.node.widgets.length = 0; + } + targetNode = rootInput; + targetSlot = rootInput.link?.target_slot ?? slot; + } else { + // We have a real input, so we want to use that type + targetNode = inputPath[inputPath.length - 2]; + targetType = rootInput.node.outputs[rootInput.link.origin_slot].type; + targetSlot = rootInput.link.target_slot; + } + + if (this.widgets && inputPath.length > 1) { + // We have an input node so remove our widget + this.widgets.length = 0; + } + + if (outputNode && rootInput.node.checkPrimitiveWidget) { + // We have an output, check if we need to create a widget + targetLabel = rootInput.node.checkPrimitiveWidget(outputNode); + } + + // Trigger an update of the type to all child nodes + targetNode.node.changeRerouteType(targetSlot, targetType, targetLabel); + + return onConnectionsChange?.apply(this, arguments); + }; + + // When collapsed fix the size to just the dot + const computeSize = nodeType.prototype.computeSize || LGraphNode.prototype.computeSize; + nodeType.prototype.computeSize = function () { + const r = computeSize.apply(this, arguments); + if (this.flags?.collapsed) { + return [1, 25]; + } else if (this.widgets?.length) { + return r; + } else { + let w = 75; + if (this.outputs?.[0]?.label) { + const t = LiteGraph.NODE_TEXT_SIZE * this.outputs[0].label.length * 0.6 + 30; + if (t > w) { + w = t; + } + } + return [w, r[1]]; + } + }; + + // On collapse shrink the node to just a dot + const collapse = nodeType.prototype.collapse || LGraphNode.prototype.collapse; + nodeType.prototype.collapse = function () { + collapse.apply(this, arguments); + this.setSize(this.computeSize()); + requestAnimationFrame(() => { + this.setDirtyCanvas(true, true); + }); + }; + + // Shift the bounding area up slightly as LiteGraph miscalculates it for collapsed nodes + nodeType.prototype.onBounding = function (area) { + if (this.flags?.collapsed) { + area[1] -= 15; + } + }; + } else if (nodeData.name === MULTI_PRIMITIVE) { + addOutputHandler(); + nodeType.prototype.onConnectionsChange = function (type, _, connected, link_info) { + for (let i = 0; i < this.inputs.length - 1; i++) { + if (!this.inputs[i].link) { + this.removeInput(i--); + } + } + if (this.inputs[this.inputs.length - 1].link) { + this.addInput("v" + +new Date(), this.inputs[0].type).label = "value"; + } + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/showImageOnMenu.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/showImageOnMenu.js new file mode 100644 index 0000000000000000000000000000000000000000..c38b5e74d4e6a2298e4f429df5a5f99a06947ed2 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/showImageOnMenu.js @@ -0,0 +1,79 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; +import { $el } from "../../../scripts/ui.js"; + +const id = "pysssss.ShowImageOnMenu"; +const ext = { + name: id, + async setup(app) { + let enabled = true; + let nodeId = null; + const img = $el("img", { + style: { + width: "100%", + height: "150px", + objectFit: "contain", + }, + }); + const link = $el( + "a", + { + style: { + width: "100%", + height: "150px", + marginTop: "10px", + order: 100, // Place this item last (until someone else has a higher order) + display: "none", + }, + href: "#", + onclick: (e) => { + e.stopPropagation(); + e.preventDefault(); + const node = app.graph.getNodeById(nodeId); + if (!node) return; + app.canvas.centerOnNode(node); + app.canvas.setZoom(1); + }, + }, + [img] + ); + + app.ui.menuContainer.append(link); + + const show = (src, node) => { + img.src = src; + nodeId = Number(node); + link.style.display = "unset"; + }; + + api.addEventListener("executed", ({ detail }) => { + if (!enabled) return; + const images = detail?.output?.images; + if (!images) return; + const format = app.getPreviewFormatParam(); + const src = `/view?filename=${encodeURIComponent(images[0].filename)}&type=${ + images[0].type + }&subfolder=${encodeURIComponent(images[0].subfolder)}&t=${+new Date()}${format}`; + show(src, detail.node); + }); + + api.addEventListener("b_preview", ({ detail }) => { + if (!enabled) return; + show(URL.createObjectURL(detail), app.runningNodeId); + }); + + app.ui.settings.addSetting({ + id, + name: "🐍 Show Image On Menu", + defaultValue: true, + type: "boolean", + onChange(value) { + enabled = value; + + if (!enabled) link.style.display = "none"; + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/showText.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/showText.js new file mode 100644 index 0000000000000000000000000000000000000000..a16540e54c015660747308ef5d134b3751a46758 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/showText.js @@ -0,0 +1,57 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +// Displays input text on a node + +app.registerExtension({ + name: "pysssss.ShowText", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "ShowText|pysssss") { + function populate(text) { + if (this.widgets) { + const pos = this.widgets.findIndex((w) => w.name === "text"); + if (pos !== -1) { + for (let i = pos; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = pos; + } + } + + for (const list of text) { + const w = ComfyWidgets["STRING"](this, "text", ["STRING", { multiline: true }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.6; + w.value = list; + } + + requestAnimationFrame(() => { + const sz = this.computeSize(); + if (sz[0] < this.size[0]) { + sz[0] = this.size[0]; + } + if (sz[1] < this.size[1]) { + sz[1] = this.size[1]; + } + this.onResize?.(sz); + app.graph.setDirtyCanvas(true, false); + }); + } + + // When the node is executed we will be sent the input text, display this in the widget + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + populate.call(this, message.text); + }; + + const onConfigure = nodeType.prototype.onConfigure; + nodeType.prototype.onConfigure = function () { + onConfigure?.apply(this, arguments); + if (this.widgets_values?.length) { + populate.call(this, this.widgets_values); + } + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/snapToGrid.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/snapToGrid.js new file mode 100644 index 0000000000000000000000000000000000000000..6cdc3181d697ded420c87cdf7ae6d69de68941cc --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/snapToGrid.js @@ -0,0 +1,58 @@ +import { app } from "../../../scripts/app.js"; + +let setting; +const id = "pysssss.SnapToGrid"; +const ext = { + name: id, + init() { + setting = app.ui.settings.addSetting({ + id, + name: "🐍 Always snap to grid", + defaultValue: false, + type: "boolean", + onChange(value) { + app.canvas.align_to_grid = value; + }, + }); + + // We need to register our hooks after the core snap to grid extension runs + // Do this from the graph configure function so we still get onNodeAdded calls + const configure = LGraph.prototype.configure; + LGraph.prototype.configure = function () { + // Override drawNode to draw the drop position + const drawNode = LGraphCanvas.prototype.drawNode; + LGraphCanvas.prototype.drawNode = function () { + if (setting?.value) { + const shift = app.shiftDown; + app.shiftDown = true; + const r = drawNode.apply(this, arguments); + app.shiftDown = shift; + return r; + } + return drawNode.apply(this, arguments); + }; + + // Override node added to add a resize handler to force grid alignment + const onNodeAdded = app.graph.onNodeAdded; + app.graph.onNodeAdded = function (node) { + const r = onNodeAdded?.apply(this, arguments); + const onResize = node.onResize; + node.onResize = function () { + if (setting?.value) { + const shift = app.shiftDown; + app.shiftDown = true; + const r = onResize?.apply(this, arguments); + app.shiftDown = shift; + return r; + } + return onResize?.apply(this, arguments); + }; + return r; + }; + + return configure.apply(this, arguments); + }; + }, +}; + +app.registerExtension(ext); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/stringFunction.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/stringFunction.js new file mode 100644 index 0000000000000000000000000000000000000000..cc150dff04a2e94b9bc768f6331964c13a96ddbe --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/stringFunction.js @@ -0,0 +1,33 @@ +import { app } from "../../../scripts/app.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +// Displays input text on a node + +app.registerExtension({ + name: "pysssss.StringFunction", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === "StringFunction|pysssss") { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + + if (this.widgets) { + const pos = this.widgets.findIndex((w) => w.name === "result"); + if (pos !== -1) { + for (let i = pos; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = pos; + } + } + + const w = ComfyWidgets["STRING"](this, "result", ["STRING", { multiline: true }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.6; + w.value = message.text; + + this.onResize?.(this.size); + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/swapResolution.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/swapResolution.js new file mode 100644 index 0000000000000000000000000000000000000000..40f6dcc851780077c79e7e142fd0f97104e85a09 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/swapResolution.js @@ -0,0 +1,30 @@ +import { app } from "../../../scripts/app.js"; +app.registerExtension({ + name: "pysssss.SwapResolution", + async beforeRegisterNodeDef(nodeType, nodeData) { + const inputs = { ...nodeData.input?.required, ...nodeData.input?.optional }; + if (inputs.width && inputs.height) { + const origGetExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = origGetExtraMenuOptions?.apply?.(this, arguments); + + options.push( + { + content: "Swap width/height", + callback: () => { + const w = this.widgets.find((w) => w.name === "width"); + const h = this.widgets.find((w) => w.name === "height"); + const a = w.value; + w.value = h.value; + h.value = a; + app.graph.setDirtyCanvas(true); + }, + }, + null + ); + + return r; + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/touchEvents.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/touchEvents.js new file mode 100644 index 0000000000000000000000000000000000000000..e983e344582208d8566cd7a6390f81db983faa62 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/touchEvents.js @@ -0,0 +1,74 @@ +import { app } from "../../../scripts/app.js"; + +// Adds mapping of touch events to mouse events for mobile. This isnt great but it is somewhat usable + +app.registerExtension({ + name: "pysssss.TouchEvents", + setup() { + let touchStart = null; + let touchType = 0; + + function fireEvent(originalEvent, type) { + const fakeEvent = document.createEvent("MouseEvent"); + const touch = originalEvent.changedTouches[0]; + fakeEvent.initMouseEvent( + type, + true, + true, + window, + 1, + touch.screenX, + touch.screenY, + touch.clientX, + touch.clientY, + false, + false, + false, + false, + 0, + null + ); + + touch.target.dispatchEvent(fakeEvent); + if (fakeEvent.defaultPrevented) { + originalEvent.preventDefault(); + } + } + + document.addEventListener( + "touchstart", + (e) => { + // Support tap as click if it completes within a delay + if (touchStart) { + clearTimeout(touchStart); + } + touchStart = setTimeout(() => { + touchStart = null; + }, 100); + + // Left or right button down + touchType = e.touches.length === 1 ? 0 : 2; + + fireEvent(e, "mousedown"); + }, + true + ); + + document.addEventListener("touchmove", (e) => fireEvent(e, "mousemove"), true); + + document.addEventListener( + "touchend", + (e) => { + const isClick = touchStart; + if (isClick) { + // We are within the touch start delay so fire this as a click + clearTimeout(touchStart); + fireEvent(e, "click"); + } + fireEvent(e, "mouseup"); + touchType = 0; + }, + true + ); + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/useNumberInputPrompt.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/useNumberInputPrompt.js new file mode 100644 index 0000000000000000000000000000000000000000..a92aa51095bc474b3cf80697aa238ad8814b4c3d --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/useNumberInputPrompt.js @@ -0,0 +1,36 @@ +import { app } from "../../../scripts/app.js"; + +const id = "pysssss.UseNumberInputPrompt"; +const ext = { + name: id, + async setup(app) { + const prompt = LGraphCanvas.prototype.prompt; + + const setting = app.ui.settings.addSetting({ + id, + name: "🐍 Use number input on value entry", + defaultValue: false, + type: "boolean", + }); + + LGraphCanvas.prototype.prompt = function () { + const dialog = prompt.apply(this, arguments); + if (setting.value && typeof arguments[1] === "number") { + // If this should be a number then update the imput + const input = dialog.querySelector("input"); + input.type = "number"; + + // Add constraints + const widget = app.canvas.node_widget?.[1]; + if (widget?.options) { + for (const prop of ["min", "max", "step"]) { + if (widget.options[prop]) input[prop] = widget.options[prop]; + } + } + } + return dialog; + }; + }, +}; + +app.registerExtension(ext); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/widgetDefaults.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/widgetDefaults.js new file mode 100644 index 0000000000000000000000000000000000000000..83bebf4f9252011977b9b0405fb660255efc0166 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/widgetDefaults.js @@ -0,0 +1,251 @@ +import { app } from "../../../scripts/app.js"; +import { $el, ComfyDialog } from "../../../scripts/ui.js"; + +// Allows you to specify custom default values for any widget on any node + +const id = "pysssss.WidgetDefaults"; +const nodeDataKey = Symbol(); + +app.registerExtension({ + name: id, + beforeRegisterNodeDef(nodeType, nodeData) { + nodeType[nodeDataKey] = nodeData; + }, + setup() { + let defaults; + let setting; + + const applyDefaults = (defaults) => { + for (const node of Object.values(LiteGraph.registered_node_types)) { + const nodeData = node[nodeDataKey]; + if (!nodeData) continue; + const nodeDefaults = defaults[node.type]; + if (!nodeDefaults) continue; + const inputs = { ...(nodeData.input?.required || {}), ...(nodeData.input?.optional || {}) }; + + for (const w in nodeDefaults) { + const widgetDef = inputs[w]; + if (widgetDef) { + let v = nodeDefaults[w]; + if (widgetDef[0] === "INT" || widgetDef[0] === "FLOAT") { + v = +v; + } + if (widgetDef[1]) { + widgetDef[1].default = v; + } else { + widgetDef[1] = { default: v }; + } + } + } + } + }; + + const getDefaults = () => { + let items; + try { + items = JSON.parse(setting.value); + items = items.reduce((p, n) => { + if (!p[n.node]) p[n.node] = {}; + p[n.node][n.widget] = n.value; + return p; + }, {}); + } catch (error) {} + if (!items) { + items = {}; + } + applyDefaults(items); + return items; + }; + + const onNodeAdded = app.graph.onNodeAdded; + app.graph.onNodeAdded = function (node) { + onNodeAdded?.apply?.(this, arguments); + + // See if we have any defaults for this type of node + const nodeDefaults = defaults[node.constructor.type]; + if (!nodeDefaults) return; + + // Dont run if they are pre-configured nodes from load/pastes + const stack = new Error().stack; + if (stack.includes("pasteFromClipboard") || stack.includes("loadGraphData")) { + return; + } + + for (const k in nodeDefaults) { + if (k.startsWith("property.")) { + const name = k.substring(9); + let v = nodeDefaults[k]; + // Special handling for some built in values + if (name in node || ["color", "bgcolor", "title"].includes(name)) { + node[name] = v; + } else { + // Try using the correct type + if (!node.properties) node.properties = {}; + if (typeof node.properties[name] === "number") v = +v; + else if (typeof node.properties[name] === "boolean") v = v === "true"; + else if (v === "true") v = true; + + node.properties[name] = v; + } + } + } + }; + + class WidgetDefaultsDialog extends ComfyDialog { + constructor() { + super(); + this.element.classList.add("comfy-manage-templates"); + this.grid = $el( + "div", + { + style: { + display: "grid", + gridTemplateColumns: "1fr auto auto auto", + gap: "5px", + }, + className: "pysssss-widget-defaults", + }, + [ + $el("label", { + textContent: "Node Class", + }), + $el("label", { + textContent: "Widget Name", + }), + $el("label", { + textContent: "Default Value", + }), + $el("label"), + (this.rows = $el("div", { + style: { + display: "contents", + }, + })), + ] + ); + } + + createButtons() { + const btns = super.createButtons(); + btns[0].textContent = "Cancel"; + btns.unshift( + $el("button", { + type: "button", + textContent: "Add New", + onclick: () => this.addRow(), + }), + $el("button", { + type: "button", + textContent: "Save", + onclick: () => this.save(), + }) + ); + return btns; + } + + addRow(node = "", widget = "", value = "") { + let nameInput; + this.rows.append( + $el( + "div", + { + style: { + display: "contents", + }, + className: "pysssss-widget-defaults-row", + }, + [ + $el("input", { + placeholder: "e.g. CheckpointLoaderSimple", + value: node, + }), + $el("input", { + placeholder: "e.g. ckpt_name", + value: widget, + $: (el) => (nameInput = el), + }), + $el("input", { + placeholder: "e.g. myBestModel.safetensors", + value, + }), + $el("button", { + textContent: "Delete", + style: { + fontSize: "12px", + color: "red", + fontWeight: "normal", + }, + onclick: (e) => { + nameInput.value = ""; + e.target.parentElement.style.display = "none"; + }, + }), + ] + ) + ); + } + + save() { + const rows = this.rows.children; + const items = []; + + for (const row of rows) { + const inputs = row.querySelectorAll("input"); + const node = inputs[0].value.trim(); + const widget = inputs[1].value.trim(); + const value = inputs[2].value; + if (node && widget) { + items.push({ node, widget, value }); + } + } + + setting.value = JSON.stringify(items); + defaults = getDefaults(); + + this.close(); + } + + show() { + this.rows.replaceChildren(); + for (const nodeName in defaults) { + const node = defaults[nodeName]; + for (const widgetName in node) { + this.addRow(nodeName, widgetName, node[widgetName]); + } + } + + this.addRow(); + super.show(this.grid); + } + } + + setting = app.ui.settings.addSetting({ + id, + name: "🐍 Widget Defaults", + type: () => { + return $el("tr", [ + $el("td", [ + $el("label", { + for: id.replaceAll(".", "-"), + textContent: "🐍 Widget & Property Defaults:", + }), + ]), + $el("td", [ + $el("button", { + textContent: "Manage", + onclick: () => { + app.ui.settings.element.close(); + const dialog = new WidgetDefaultsDialog(); + dialog.show(); + }, + style: { + fontSize: "14px", + }, + }), + ]), + ]); + }, + }); + defaults = getDefaults(); + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflowImage.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflowImage.js new file mode 100644 index 0000000000000000000000000000000000000000..0158f656796c2ab893301dee50d47b879fcb4ff7 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflowImage.js @@ -0,0 +1,632 @@ +import { app } from "../../../scripts/app.js"; +import { importA1111 } from "../../../scripts/pnginfo.js"; +import { ComfyWidgets } from "../../../scripts/widgets.js"; + +let getDrawTextConfig = null; +let fileInput; + +class WorkflowImage { + static accept = ""; + + getBounds() { + // Calculate the min max bounds for the nodes on the graph + const bounds = app.graph._nodes.reduce( + (p, n) => { + if (n.pos[0] < p[0]) p[0] = n.pos[0]; + if (n.pos[1] < p[1]) p[1] = n.pos[1]; + const r = n.pos[0] + n.size[0]; + const b = n.pos[1] + n.size[1]; + if (r > p[2]) p[2] = r; + if (b > p[3]) p[3] = b; + return p; + }, + [99999, 99999, -99999, -99999] + ); + + bounds[0] -= 100; + bounds[1] -= 100; + bounds[2] += 100; + bounds[3] += 100; + return bounds; + } + + saveState() { + this.state = { + scale: app.canvas.ds.scale, + width: app.canvas.canvas.width, + height: app.canvas.canvas.height, + offset: app.canvas.ds.offset, + }; + } + + restoreState() { + app.canvas.ds.scale = this.state.scale; + app.canvas.canvas.width = this.state.width; + app.canvas.canvas.height = this.state.height; + app.canvas.ds.offset = this.state.offset; + } + + updateView(bounds) { + app.canvas.ds.scale = 1; + app.canvas.canvas.width = bounds[2] - bounds[0]; + app.canvas.canvas.height = bounds[3] - bounds[1]; + app.canvas.ds.offset = [-bounds[0], -bounds[1]]; + } + + getDrawTextConfig(_, widget) { + return { + x: 10, + y: widget.last_y + 10, + resetTransform: false, + }; + } + + async export(includeWorkflow) { + // Save the current state of the canvas + this.saveState(); + // Update to render the whole workflow + this.updateView(this.getBounds()); + + // Flag that we are saving and render the canvas + getDrawTextConfig = this.getDrawTextConfig; + app.canvas.draw(true, true); + getDrawTextConfig = null; + + // Generate a blob of the image containing the workflow + const blob = await this.getBlob(includeWorkflow ? JSON.stringify(app.graph.serialize()) : undefined); + + // Restore initial state and redraw + this.restoreState(); + app.canvas.draw(true, true); + + // Download the generated image + this.download(blob); + } + + download(blob) { + const url = URL.createObjectURL(blob); + const a = document.createElement("a"); + Object.assign(a, { + href: url, + download: "workflow." + this.extension, + style: "display: none", + }); + document.body.append(a); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + + static import() { + if (!fileInput) { + fileInput = document.createElement("input"); + Object.assign(fileInput, { + type: "file", + style: "display: none", + onchange: () => { + app.handleFile(fileInput.files[0]); + }, + }); + document.body.append(fileInput); + } + fileInput.accept = WorkflowImage.accept; + fileInput.click(); + } +} + +class PngWorkflowImage extends WorkflowImage { + static accept = ".png,image/png"; + extension = "png"; + + n2b(n) { + return new Uint8Array([(n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff]); + } + + joinArrayBuffer(...bufs) { + const result = new Uint8Array(bufs.reduce((totalSize, buf) => totalSize + buf.byteLength, 0)); + bufs.reduce((offset, buf) => { + result.set(buf, offset); + return offset + buf.byteLength; + }, 0); + return result; + } + + crc32(data) { + const crcTable = + PngWorkflowImage.crcTable || + (PngWorkflowImage.crcTable = (() => { + let c; + const crcTable = []; + for (let n = 0; n < 256; n++) { + c = n; + for (let k = 0; k < 8; k++) { + c = c & 1 ? 0xedb88320 ^ (c >>> 1) : c >>> 1; + } + crcTable[n] = c; + } + return crcTable; + })()); + let crc = 0 ^ -1; + for (let i = 0; i < data.byteLength; i++) { + crc = (crc >>> 8) ^ crcTable[(crc ^ data[i]) & 0xff]; + } + return (crc ^ -1) >>> 0; + } + + async getBlob(workflow) { + return new Promise((r) => { + app.canvasEl.toBlob(async (blob) => { + if (workflow) { + // If we have a workflow embed it in the PNG + const buffer = await blob.arrayBuffer(); + const typedArr = new Uint8Array(buffer); + const view = new DataView(buffer); + + const data = new TextEncoder().encode(`tEXtworkflow\0${workflow}`); + const chunk = this.joinArrayBuffer(this.n2b(data.byteLength - 4), data, this.n2b(this.crc32(data))); + + const sz = view.getUint32(8) + 20; + const result = this.joinArrayBuffer(typedArr.subarray(0, sz), chunk, typedArr.subarray(sz)); + + blob = new Blob([result], { type: "image/png" }); + } + + r(blob); + }); + }); + } +} + +class DataReader { + /** @type {DataView} */ + view; + /** @type {boolean | undefined} */ + littleEndian; + offset = 0; + + /** + * @param {DataView} view + */ + constructor(view) { + this.view = view; + } + + /** + * Reads N bytes and increments the offset + * @param {1 | 2 | 4 | 8} size + */ + read(size, signed = false, littleEndian = undefined) { + const v = this.peek(size, signed, littleEndian); + this.offset += size; + return v; + } + + /** + * Reads N bytes + * @param {1 | 2 | 4 | 8} size + */ + peek(size, signed = false, littleEndian = undefined) { + this.view.getBigInt64; + let m = ""; + if (size === 8) m += "Big"; + m += signed ? "Int" : "Uint"; + m += size * 8; + m = "get" + m; + if (!this.view[m]) { + throw new Error("Method not found: " + m); + } + + return this.view[m](this.offset, littleEndian == null ? this.littleEndian : littleEndian); + } + + /** + * Seeks to the specified position or by the number of bytes specified relative to the current offset + * @param {number} pos + * @param {boolean} relative + */ + seek(pos, relative = true) { + if (relative) { + this.offset += pos; + } else { + this.offset = pos; + } + } +} + +class Tiff { + /** @type {DataReader} */ + #reader; + #start; + + readExif(reader) { + const TIFF_MARKER = 0x2a; + const EXIF_IFD = 0x8769; + + this.#reader = reader; + this.#start = this.#reader.offset; + this.#readEndianness(); + + if (!this.#reader.read(2) === TIFF_MARKER) { + throw new Error("Invalid TIFF: Marker not found."); + } + + const dirOffset = this.#reader.read(4); + this.#reader.seek(this.#start + dirOffset, false); + + for (const t of this.#readTags()) { + if (t.id === EXIF_IFD) { + return this.#readExifTag(t); + } + } + throw new Error("No EXIF: TIFF Exif IFD tag not found"); + } + + #readUserComment(tag) { + this.#reader.seek(this.#start + tag.offset, false); + const encoding = this.#reader.read(8); + if (encoding !== 0x45444f43494e55n) { + throw new Error("Unable to read non-Unicode data"); + } + const decoder = new TextDecoder("utf-16be"); + return decoder.decode(new DataView(this.#reader.view.buffer, this.#reader.offset, tag.count - 8)); + } + + #readExifTag(exifTag) { + const EXIF_USER_COMMENT = 0x9286; + + this.#reader.seek(this.#start + exifTag.offset, false); + for (const t of this.#readTags()) { + if (t.id === EXIF_USER_COMMENT) { + return this.#readUserComment(t); + } + } + throw new Error("No embedded data: UserComment Exif tag not found"); + } + + *#readTags() { + const count = this.#reader.read(2); + for (let i = 0; i < count; i++) { + yield { + id: this.#reader.read(2), + type: this.#reader.read(2), + count: this.#reader.read(4), + offset: this.#reader.read(4), + }; + } + } + + #readEndianness() { + const II = 0x4949; + const MM = 0x4d4d; + const endianness = this.#reader.read(2); + if (endianness === II) { + this.#reader.littleEndian = true; + } else if (endianness === MM) { + this.#reader.littleEndian = false; + } else { + throw new Error("Invalid JPEG: Endianness marker not found."); + } + } +} + +class Jpeg { + /** @type {DataReader} */ + #reader; + + /** + * @param {ArrayBuffer} buffer + */ + readExif(buffer) { + const JPEG_MARKER = 0xffd8; + const EXIF_SIG = 0x45786966; + + this.#reader = new DataReader(new DataView(buffer)); + if (!this.#reader.read(2) === JPEG_MARKER) { + throw new Error("Invalid JPEG: SOI not found."); + } + + const app0 = this.#readAppMarkerId(); + if (app0 !== 0) { + throw new Error(`Invalid JPEG: APP0 not found [found: ${app0}].`); + } + + this.#consumeAppSegment(); + const app1 = this.#readAppMarkerId(); + if (app1 !== 1) { + throw new Error(`No EXIF: APP1 not found [found: ${app0}].`); + } + + // Skip size + this.#reader.seek(2); + + if (this.#reader.read(4) !== EXIF_SIG) { + throw new Error(`No EXIF: Invalid EXIF header signature.`); + } + if (this.#reader.read(2) !== 0) { + throw new Error(`No EXIF: Invalid EXIF header.`); + } + + return new Tiff().readExif(this.#reader); + } + + #readAppMarkerId() { + const APP0_MARKER = 0xffe0; + return this.#reader.read(2) - APP0_MARKER; + } + + #consumeAppSegment() { + this.#reader.seek(this.#reader.read(2) - 2); + } +} + +class SvgWorkflowImage extends WorkflowImage { + static accept = ".svg,image/svg+xml"; + extension = "svg"; + + static init() { + // Override file handling to allow drag & drop of SVG + const handleFile = app.handleFile; + app.handleFile = async function (file) { + if (file && (file.type === "image/svg+xml" || file.name?.endsWith(".svg"))) { + const reader = new FileReader(); + reader.onload = () => { + // Extract embedded workflow from desc tags + const descEnd = reader.result.lastIndexOf(""); + if (descEnd !== -1) { + const descStart = reader.result.lastIndexOf("", descEnd); + if (descStart !== -1) { + const json = reader.result.substring(descStart + 6, descEnd); + this.loadGraphData(JSON.parse(SvgWorkflowImage.unescapeXml(json))); + } + } + }; + reader.readAsText(file); + return; + } else if (file && (file.type === "image/jpeg" || file.name?.endsWith(".jpg") || file.name?.endsWith(".jpeg"))) { + if ( + await new Promise((r) => { + try { + // This shouldnt go in here but it's easier than refactoring handleFile + const reader = new FileReader(); + reader.onload = async () => { + try { + const value = new Jpeg().readExif(reader.result); + importA1111(app.graph, value); + resolve(true); + } catch (error) { + resolve(false); + } + }; + reader.onerror = () => resolve(false); + reader.readAsArrayBuffer(file); + } catch (error) { + resolve(false); + } + }) + ) { + return; + } + } + return handleFile.apply(this, arguments); + }; + } + + static escapeXml(unsafe) { + return unsafe.replaceAll("&", "&").replaceAll("<", "<").replaceAll(">", ">"); + } + + static unescapeXml(safe) { + return safe.replaceAll("&", "&").replaceAll("<", "<").replaceAll(">", ">"); + } + + getDrawTextConfig(_, widget) { + return { + x: parseInt(widget.inputEl.style.left), + y: parseInt(widget.inputEl.style.top), + resetTransform: true, + }; + } + + saveState() { + super.saveState(); + this.state.ctx = app.canvas.ctx; + } + + restoreState() { + super.restoreState(); + app.canvas.ctx = this.state.ctx; + } + + updateView(bounds) { + super.updateView(bounds); + this.createSvgCtx(bounds); + } + + createSvgCtx(bounds) { + const ctx = this.state.ctx; + const svgCtx = (this.svgCtx = new C2S(bounds[2] - bounds[0], bounds[3] - bounds[1])); + svgCtx.canvas.getBoundingClientRect = function () { + return { width: svgCtx.width, height: svgCtx.height }; + }; + + // Override the c2s handling of images to draw images as canvases + const drawImage = svgCtx.drawImage; + svgCtx.drawImage = function (...args) { + const image = args[0]; + // If we are an image node and not a datauri then we need to replace with a canvas + // we cant convert to data uri here as it is an async process + if (image.nodeName === "IMG" && !image.src.startsWith("data:image/")) { + const canvas = document.createElement("canvas"); + canvas.width = image.width; + canvas.height = image.height; + const imgCtx = canvas.getContext("2d"); + imgCtx.drawImage(image, 0, 0); + args[0] = canvas; + } + + return drawImage.apply(this, args); + }; + + // Implement missing required functions + svgCtx.getTransform = function () { + return ctx.getTransform(); + }; + svgCtx.resetTransform = function () { + return ctx.resetTransform(); + }; + svgCtx.roundRect = svgCtx.rect; + app.canvas.ctx = svgCtx; + } + + getBlob(workflow) { + let svg = this.svgCtx + .getSerializedSvg(true) + .replace("", `${SvgWorkflowImage.escapeXml(workflow)}`); + } + + return new Blob([svg], { type: "image/svg+xml" }); + } +} + +app.registerExtension({ + name: "pysssss.WorkflowImage", + init() { + // https://codepen.io/peterhry/pen/nbMaYg + function wrapText(context, text, x, y, maxWidth, lineHeight) { + var words = text.split(" "), + line = "", + i, + test, + metrics; + + for (i = 0; i < words.length; i++) { + test = words[i]; + metrics = context.measureText(test); + while (metrics.width > maxWidth) { + // Determine how much of the word will fit + test = test.substring(0, test.length - 1); + metrics = context.measureText(test); + } + if (words[i] != test) { + words.splice(i + 1, 0, words[i].substr(test.length)); + words[i] = test; + } + + test = line + words[i] + " "; + metrics = context.measureText(test); + + if (metrics.width > maxWidth && i > 0) { + context.fillText(line, x, y); + line = words[i] + " "; + y += lineHeight; + } else { + line = test; + } + } + + context.fillText(line, x, y); + } + + const stringWidget = ComfyWidgets.STRING; + // Override multiline string widgets to draw text using canvas while saving as svg + ComfyWidgets.STRING = function () { + const w = stringWidget.apply(this, arguments); + if (w.widget && w.widget.type === "customtext") { + const draw = w.widget.draw; + w.widget.draw = function (ctx) { + draw.apply(this, arguments); + if (this.inputEl.hidden) return; + + if (getDrawTextConfig) { + const config = getDrawTextConfig(ctx, this); + const t = ctx.getTransform(); + ctx.save(); + if (config.resetTransform) { + ctx.resetTransform(); + } + + const style = document.defaultView.getComputedStyle(this.inputEl, null); + const x = config.x; + const y = config.y; + const w = parseInt(this.inputEl.style.width); + const h = parseInt(this.inputEl.style.height); + ctx.fillStyle = style.getPropertyValue("background-color"); + ctx.fillRect(x, y, w, h); + + ctx.fillStyle = style.getPropertyValue("color"); + ctx.font = style.getPropertyValue("font"); + + const line = t.d * 12; + const split = this.inputEl.value.split("\n"); + let start = y; + for (const l of split) { + start += line; + wrapText(ctx, l, x + 4, start, w, line); + } + + ctx.restore(); + } + }; + } + return w; + }; + }, + setup() { + const script = document.createElement("script"); + script.onload = function () { + const formats = [SvgWorkflowImage, PngWorkflowImage]; + for (const f of formats) { + f.init?.call(); + WorkflowImage.accept += (WorkflowImage.accept ? "," : "") + f.accept; + } + + // Add canvas menu options + const orig = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = orig.apply(this, arguments); + + options.push(null, { + content: "Workflow Image", + submenu: { + options: [ + { + content: "Import", + callback: () => { + WorkflowImage.import(); + }, + }, + { + content: "Export", + submenu: { + options: formats.flatMap((f) => [ + { + content: f.name.replace("WorkflowImage", "").toLocaleLowerCase(), + callback: () => { + new f().export(true); + }, + }, + { + content: f.name.replace("WorkflowImage", "").toLocaleLowerCase() + " (no embedded workflow)", + callback: () => { + new f().export(); + }, + }, + ]), + }, + }, + ], + }, + }); + return options; + }; + }; + + script.src = new URL(`assets/canvas2svg.js`, import.meta.url); + document.body.append(script); + }, +}); diff --git a/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflows.js b/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflows.js new file mode 100644 index 0000000000000000000000000000000000000000..df84c5178057ed7928df529b962989d21adc25c1 --- /dev/null +++ b/custom_nodes/ComfyUI-Custom-Scripts/web/js/workflows.js @@ -0,0 +1,339 @@ +import { app } from "../../../scripts/app.js"; +import { api } from "../../../scripts/api.js"; +import { $el } from "../../../scripts/ui.js"; + +// Adds workflow management +// Original implementation by https://github.com/i-h4x +// Thanks for permission to reimplement as an extension + +const style = ` +#comfy-save-button, #comfy-load-button { + position: relative; + overflow: hidden; +} +.pysssss-workflow-arrow { + position: absolute; + top: 0; + bottom: 0; + right: 0; + font-size: 12px; + display: flex; + align-items: center; + width: 24px; + justify-content: center; + background: rgba(255,255,255,0.1); +} +.pysssss-workflow-arrow:after { + content: "▼"; +} +.pysssss-workflow-arrow:hover { + filter: brightness(1.6); + background-color: var(--comfy-menu-bg); +} +.pysssss-workflow-load .litemenu-entry:not(.has_submenu):before, +.pysssss-workflow-load ~ .litecontextmenu .litemenu-entry:not(.has_submenu):before { + content: "🎛️"; + padding-right: 5px; +} +.pysssss-workflow-load .litemenu-entry.has_submenu:before, +.pysssss-workflow-load ~ .litecontextmenu .litemenu-entry.has_submenu:before { + content: "📂"; + padding-right: 5px; + position: relative; + top: -1px; +} +.pysssss-workflow-popup ~ .litecontextmenu { + transform: scale(1.3); +} +`; + +async function getWorkflows() { + const response = await api.fetchApi("/pysssss/workflows", { cache: "no-store" }); + return await response.json(); +} + +async function getWorkflow(name) { + const response = await api.fetchApi(`/pysssss/workflows/${encodeURIComponent(name)}`, { cache: "no-store" }); + return await response.json(); +} + +async function saveWorkflow(name, workflow, overwrite) { + try { + const response = await api.fetchApi("/pysssss/workflows", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ name, workflow, overwrite }), + }); + if (response.status === 201) { + return true; + } + if (response.status === 409) { + return false; + } + throw new Error(response.statusText); + } catch (error) { + console.error(error); + } +} + +class PysssssWorkflows { + async load() { + this.workflows = await getWorkflows(); + this.loadMenu.style.display = this.workflows.length ? "flex" : "none"; + } + + getMenuOptions(callback) { + const menu = []; + const directories = new Map(); + for (const workflow of this.workflows || []) { + const path = workflow.split("/"); + let parent = menu; + let currentPath = ""; + for (let i = 0; i < path.length - 1; i++) { + currentPath += "/" + path[i]; + let newParent = directories.get(currentPath); + if (!newParent) { + newParent = { + title: path[i], + has_submenu: true, + submenu: { + options: [], + }, + }; + parent.push(newParent); + newParent = newParent.submenu.options; + directories.set(currentPath, newParent); + } + parent = newParent; + } + parent.push({ + title: path[path.length - 1], + callback: () => callback(workflow), + }); + } + return menu; + } + + constructor() { + function addWorkflowMenu(type, getOptions) { + return $el("div.pysssss-workflow-arrow", { + parent: document.getElementById(`comfy-${type}-button`), + onclick: (e) => { + e.preventDefault(); + e.stopPropagation(); + + LiteGraph.closeAllContextMenus(); + const menu = new LiteGraph.ContextMenu( + getOptions(), + { + event: e, + scale: 1.3, + }, + window + ); + menu.root.classList.add("pysssss-workflow-popup"); + menu.root.classList.add(`pysssss-workflow-${type}`); + }, + }); + } + + this.loadMenu = addWorkflowMenu("load", () => + this.getMenuOptions(async (workflow) => { + const json = await getWorkflow(workflow); + app.loadGraphData(json); + }) + ); + addWorkflowMenu("save", () => { + return [ + { + title: "Save as", + callback: () => { + let filename = prompt("Enter filename", this.workflowName || "workflow"); + if (filename) { + if (!filename.toLowerCase().endsWith(".json")) { + filename += ".json"; + } + + this.workflowName = filename; + + const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string + const blob = new Blob([json], { type: "application/json" }); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: filename, + style: { display: "none" }, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + }, + }, + { + title: "Save to workflows", + callback: async () => { + const name = prompt("Enter filename", this.workflowName || "workflow"); + if (name) { + this.workflowName = name; + + const data = app.graph.serialize(); + if (!(await saveWorkflow(name, data))) { + if (confirm("A workspace with this name already exists, do you want to overwrite it?")) { + await saveWorkflow(name, app.graph.serialize(), true); + } else { + return; + } + } + await this.load(); + } + }, + }, + ]; + }); + this.load(); + + const handleFile = app.handleFile; + const self = this; + app.handleFile = function (file) { + if (file?.name?.endsWith(".json")) { + self.workflowName = file.name; + } else { + self.workflowName = null; + } + return handleFile.apply(this, arguments); + }; + } +} + +const refreshComboInNodes = app.refreshComboInNodes; +let workflows; + +async function sendToWorkflow(img, workflow) { + const graph = !workflow ? app.graph.serialize() : await getWorkflow(workflow); + const nodes = graph.nodes.filter((n) => n.type === "LoadImage"); + let targetNode; + if (nodes.length === 0) { + alert("To send the image to another workflow, that workflow must have a LoadImage node."); + return; + } else if (nodes.length > 1) { + targetNode = nodes.find((n) => n.title?.toLowerCase().includes("input")); + if (!targetNode) { + targetNode = nodes[0]; + alert( + "The target workflow has multiple LoadImage nodes, include 'input' in the name of the one you want to use. The first one will be used here." + ); + } + } else { + targetNode = nodes[0]; + } + + const blob = await (await fetch(img.src)).blob(); + const name = + (workflow || "sendtoworkflow").replace(/\//g, "_") + + "-" + + +new Date() + + new URLSearchParams(img.src.split("?")[1]).get("filename"); + const body = new FormData(); + body.append("image", new File([blob], name)); + + const resp = await api.fetchApi("/upload/image", { + method: "POST", + body, + }); + + if (resp.status === 200) { + await refreshComboInNodes.call(app); + targetNode.widgets_values[0] = name; + app.loadGraphData(graph); + app.graph.getNodeById(targetNode.id); + } else { + alert(resp.status + " - " + resp.statusText); + } +} + +app.registerExtension({ + name: "pysssss.Workflows", + init() { + $el("style", { + textContent: style, + parent: document.head, + }); + }, + async setup() { + workflows = new PysssssWorkflows(); + app.refreshComboInNodes = function () { + workflows.load(); + refreshComboInNodes.apply(this, arguments); + }; + + const comfyDefault = "[ComfyUI Default]"; + const defaultWorkflow = app.ui.settings.addSetting({ + id: "pysssss.Workflows.Default", + name: "🐍 Default Workflow", + defaultValue: comfyDefault, + type: "combo", + options: (value) => + [comfyDefault, ...workflows.workflows].map((m) => ({ + value: m, + text: m, + selected: m === value, + })), + }); + + document.getElementById("comfy-load-default-button").onclick = async function () { + if ( + localStorage["Comfy.Settings.Comfy.ConfirmClear"] === "false" || + confirm(`Load default workflow (${defaultWorkflow.value})?`) + ) { + if (defaultWorkflow.value === comfyDefault) { + app.loadGraphData(); + } else { + const json = await getWorkflow(defaultWorkflow.value); + app.loadGraphData(json); + } + } + }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = getExtraMenuOptions?.apply?.(this, arguments); + let img; + if (this.imageIndex != null) { + // An image is selected so select that + img = this.imgs[this.imageIndex]; + } else if (this.overIndex != null) { + // No image is selected but one is hovered + img = this.imgs[this.overIndex]; + } + + if (img) { + let pos = options.findIndex((o) => o.content === "Save Image"); + if (pos === -1) { + pos = 0; + } else { + pos++; + } + + options.splice(pos, 0, { + content: "Send to workflow", + has_submenu: true, + submenu: { + options: [ + { callback: () => sendToWorkflow(img), title: "[Current workflow]" }, + ...workflows.getMenuOptions(sendToWorkflow.bind(null, img)), + ], + }, + }); + } + + return r; + }; + }, +}); diff --git a/custom_nodes/ComfyUI-Impact-Pack/.gitignore b/custom_nodes/ComfyUI-Impact-Pack/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..07f87b5e57fbed6600f7ec61c5dc0115f87f336d --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/.gitignore @@ -0,0 +1,7 @@ +__pycache__ +*.ini +wildcards/** +.vscode/ +.idea/ +subpack +impact_subpack \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/.gitmodules b/custom_nodes/ComfyUI-Impact-Pack/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..9180e6465120d9a6e7de990c99b7517e29cda2e3 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/.gitmodules @@ -0,0 +1,3 @@ +[submodule "subpack"] + path = subpack + url = https://github.com/ltdrdata/ComfyUI-Impact-Subpack diff --git a/custom_nodes/ComfyUI-Impact-Pack/LICENSE.txt b/custom_nodes/ComfyUI-Impact-Pack/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI-Impact-Pack/README.md b/custom_nodes/ComfyUI-Impact-Pack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..282ff173fd9d7989ff55a7a239e934303d44706b --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/README.md @@ -0,0 +1,411 @@ +[![Youtube Badge](https://img.shields.io/badge/Youtube-FF0000?style=for-the-badge&logo=Youtube&logoColor=white&link=https://www.youtube.com/watch?v=AccoxDZIg3Y&list=PL_Ej2RDzjQLGfEeizq4GISeY3FtVyFmGP)](https://www.youtube.com/watch?v=AccoxDZIg3Y&list=PL_Ej2RDzjQLGfEeizq4GISeY3FtVyFmGP) + +# ComfyUI-Impact-Pack + +**Custom nodes pack for ComfyUI** +This custom node helps to conveniently enhance images through Detector, Detailer, Upscaler, Pipe, and more. + + +## NOTICE +* V4.20.1: Due to the feature update in `RegionalSampler`, the parameter order has changed, causing malfunctions in previously created `RegionalSamplers`. Please adjust the parameters accordingly. +* V4.12: `MASKS` is changed to `MASK`. +* V4.7.2 isn't compatible with old version of `ControlNet Auxiliary Preprocessor`. If you will use `MediaPipe FaceMesh to SEGS` update to latest version(Sep. 17th). +* Selection weight syntax is changed(: -> ::) since V3.16. ([tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcardProcessor.md)) +* Starting from V3.6, requires latest version(Aug 8, 9ccc965) of ComfyUI. +* **In versions below V3.3.1, there was an issue with the image quality generated after using the UltralyticsDetectorProvider. Please make sure to upgrade to a newer version.** +* Starting from V3.0, nodes related to `mmdet` are optional nodes that are activated only based on the configuration settings. + - Through ComfyUI-Impact-Subpack, you can utilize UltralyticsDetectorProvider to access various detection models. +* Between versions 2.22 and 2.21, there is partial compatibility loss regarding the Detailer workflow. If you continue to use the existing workflow, errors may occur during execution. An additional output called "enhanced_alpha_list" has been added to Detailer-related nodes. +* The permission error related to cv2 that occurred during the installation of Impact Pack has been patched in version 2.21.4. However, please note that the latest versions of ComfyUI and ComfyUI-Manager are required. +* The "PreviewBridge" feature may not function correctly on ComfyUI versions released before July 1, 2023. +* Attempting to load the "ComfyUI-Impact-Pack" on ComfyUI versions released before June 27, 2023, will result in a failure. +* With the addition of wildcard support in FaceDetailer, the structure of DETAILER_PIPE-related nodes and Detailer nodes has changed. There may be malfunctions when using the existing workflow. + + +## Custom Nodes +* [Detectors](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/detectors.md) + * SAMLoader - Loads the SAM model. + * UltralyticsDetectorProvider - Loads the Ultralystics model to provide SEGM_DETECTOR, BBOX_DETECTOR. + - Unlike `MMDetDetectorProvider`, for segm models, `BBOX_DETECTOR` is also provided. + - The various models available in UltralyticsDetectorProvider can be downloaded through **ComfyUI-Manager**. + * ONNXDetectorProvider - Loads the ONNX model to provide BBOX_DETECTOR. + * CLIPSegDetectorProvider - Wrapper for CLIPSeg to provide BBOX_DETECTOR. + * You need to install the ComfyUI-CLIPSeg node extension. + * SEGM Detector (combined) - Detects segmentation and returns a mask from the input image. + * BBOX Detector (combined) - Detects bounding boxes and returns a mask from the input image. + * SAMDetector (combined) - Utilizes the SAM technology to extract the segment at the location indicated by the input SEGS on the input image and outputs it as a unified mask. + * SAMDetector (Segmented) - It is similar to `SAMDetector (combined)`, but it separates and outputs the detected segments. Multiple segments can be found for the same detected area, and currently, a policy is in place to group them arbitrarily in sets of three. This aspect is expected to be improved in the future. + * As a result, it outputs the `combined_mask`, which is a unified mask, and `batch_masks`, which are multiple masks grouped together in batch form. + * While `batch_masks` may not be completely separated, it provides functionality to perform some level of segmentation. + * Simple Detector (SEGS) - Operating primarily with `BBOX_DETECTOR`, and with the additional provision of `SAM_MODEL` or `SEGM_DETECTOR`, this node internally generates improved SEGS through mask operations on both *bbox* and *silhouette*. It serves as a convenient tool to simplify a somewhat intricate workflow. + +* ControlNetApply (SEGS) - To apply ControlNet in SEGS, you need to use the Preprocessor Provider node from the Inspire Pack to utilize this node. + +* Bitwise(SEGS & SEGS) - Performs a 'bitwise and' operation between two SEGS. +* Bitwise(SEGS - SEGS) - Subtracts one SEGS from another. +* Bitwise(SEGS & MASK) - Performs a bitwise AND operation between SEGS and MASK. +* Bitwise(SEGS & MASKS ForEach) - Performs a bitwise AND operation between SEGS and MASKS. + * Please note that this operation is performed with batches of MASKS, not just a single MASK. +* Bitwise(MASK & MASK) - Performs a 'bitwise and' operation between two masks. +* Bitwise(MASK - MASK) - Subtracts one mask from another. +* Bitwise(MASK + MASK) - Combine two masks. +* SEGM Detector (SEGS) - Detects segmentation and returns SEGS from the input image. +* BBOX Detector (SEGS) - Detects bounding boxes and returns SEGS from the input image. + +* Detailer + * Detailer (SEGS) - Refines the image based on SEGS. + * DetailerDebug (SEGS) - Refines the image based on SEGS. Additionally, it provides the ability to monitor the cropped image and the refined image of the cropped image. + * To prevent regeneration caused by the seed that does not change every time when using 'external_seed', please disable the 'seed random generate' option in the 'Detailer...' node. + * MASK to SEGS - Generates SEGS based on the mask. + * MASK to SEGS For AnimateDiff - Generates SEGS based on the mask for AnimateDiff. + * MediaPipe FaceMesh to SEGS - Separate each landmark from the mediapipe facemesh image to create labeled SEGS. + * Usually, the size of images created through the MediaPipe facemesh preprocessor is downscaled. It resizes the MediaPipe facemesh image to the original size given as reference_image_opt for matching sizes during processing. + * ToBinaryMask - Separates the mask generated with alpha values between 0 and 255 into 0 and 255. The non-zero parts are always set to 255. + * Masks to Mask List - This node converts the MASKS in batch form to a list of individual masks. + * Mask List to Masks - This node converts the MASK list to MASK batch form. + * EmptySEGS - Provides an empty SEGS. + * MaskPainter - Provides a feature to draw masks. + * FaceDetailer - Easily detects faces and improves them. + * FaceDetailer (pipe) - Easily detects faces and improves them (for multipass). + * MaskDetailer (pipe) - This is a simple inpaint node that applies the Detailer to the mask area. + +* `FromDetailer (SDXL/pipe), BasicPipe -> DetailerPipe (SDXL), Edit DetailerPipe (SDXL)` - These are pipe functions used in Detailer for utilizing the refiner model of SDXL. + +* SEGS Manipulation nodes + * SEGSDetailer - Performs detailed work on SEGS without pasting it back onto the original image. + * SEGSPaste - Pastes the results of SEGS onto the original image. + * If `ref_image_opt` is present, the images contained within SEGS are ignored. Instead, the image within `ref_image_opt` corresponding to the crop area of SEGS is taken and pasted. The size of the image in `ref_image_opt` should be the same as the original image size. + * This node can be used in conjunction with the processing results of AnimateDiff. + * SEGSPreview - Provides a preview of SEGS. + * This option is used to preview the improved image through `SEGSDetailer` before merging it into the original. Prior to going through ```SEGSDetailer```, SEGS only contains mask information without image information. If fallback_image_opt is connected to the original image, SEGS without image information will generate a preview using the original image. However, if SEGS already contains image information, fallback_image_opt will be ignored. + * This node can be used in conjunction with the processing results of AnimateDiff. + * SEGSToImageList - Convert SEGS To Image List + * SEGSToMaskList - Convert SEGS To Mask List + * SEGS Filter (label) - This node filters SEGS based on the label of the detected areas. + * SEGS Filter (ordered) - This node sorts SEGS based on size and position and retrieves SEGs within a certain range. + * SEGS Filter (range) - This node retrieves only SEGs from SEGS that have a size and position within a certain range. + * SEGSConcat - Concatenate segs1 and segs2. If source shape of segs1 and segs2 are different from segs2 will be ignored. + * Picker (SEGS) - Among the input SEGS, you can select a specific SEG through a dialog. If no SEG is selected, it outputs an empty SEGS. Increasing the batch_size of SEGSDetailer can be used for the purpose of selecting from the candidates. + * DecomposeSEGS - Decompose SEGS to allow for detailed manipulation. + * AssembleSEGS - Reassemble the decomposed SEGS. + * From SEG_ELT - Extract detailed information from SEG_ELT. + * Edit SEG_ELT - Modify some of the information in SEG_ELT. + * Dilate SEG_ELT - Dilate the mask of SEG_ELT. + +* Dilate Mask - Dilate Mask. + * Support erosion for negative value. + +* Pipe nodes + * ToDetailerPipe, FromDetailerPipe - These nodes are used to bundle multiple inputs used in the detailer, such as models and vae, ..., into a single DETAILER_PIPE or extract the elements that are bundled in the DETAILER_PIPE. + * ToBasicPipe, FromBasicPipe - These nodes are used to bundle model, clip, vae, positive conditioning, and negative conditioning into a single BASIC_PIPE, or extract each element from the BASIC_PIPE. + * EditBasicPipe, EditDetailerPipe - These nodes are used to replace some elements in BASIC_PIPE or DETAILER_PIPE. + * FromDetailerPipe_v2, FromBasicPipe_v2 - It has the same functionality as `FromDetailerPipe` and `FromBasicPipe`, but it has an additional output that directly exports the input pipe. It is useful when editing EditBasicPipe and EditDetailerPipe. +* Latent Scale (on Pixel Space) - This node converts latent to pixel space, upscales it, and then converts it back to latent. + * If upscale_model_opt is provided, it uses the model to upscale the pixel and then downscales it using the interpolation method provided in scale_method to the target resolution. +* PixelKSampleUpscalerProvider - An upscaler is provided that converts latent to pixels using VAEDecode, performs upscaling, converts back to latent using VAEEncode, and then performs k-sampling. This upscaler can be attached to nodes such as 'Iterative Upscale' for use. + * Similar to 'Latent Scale (on Pixel Space)', if upscale_model_opt is provided, it performs pixel upscaling using the model. +* PixelTiledKSampleUpscalerProvider - It is similar to PixelKSampleUpscalerProvider, but it uses ComfyUI_TiledKSampler and Tiled VAE Decoder/Encoder to avoid GPU VRAM issues at high resolutions. + * You need to install the [BlenderNeko/ComfyUI_TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) node extension. + +* DenoiseScheduleHookProvider - IterativeUpscale provides a hook that gradually changes the denoise to target_denoise as the step progresses. +* CfgScheduleHookProvider - IterativeUpscale provides a hook that gradually changes the cfg to target_cfg as the step progresses. +* PixelKSampleHookCombine - This is used to connect two PK_HOOKs. hook1 is executed first and then hook2 is executed. + * If you want to simultaneously change cfg and denoise, you can combine the PK_HOOKs of CfgScheduleHookProvider and PixelKSampleHookCombine. +* NoiseInjectionHookProvider - During each iteration of IterativeUpscale, noise is injected into the latent space while varying the strength according to a schedule. + * You need to install the [BlenderNeko/ComfyUI_Noise](https://github.com/BlenderNeko/ComfyUI_Noise) node extension. + * The seed serves as the initial value required for generating noise, and it increments by 1 with each iteration as the process unfolds. + * The source determines the types of CPU noise and GPU noise to be configured. + * Currently, there is only a simple schedule available, where the strength of the noise varies from start_strength to end_strength during the progression of each iteration. +* NoiseInjectionDetailerHookProvider - The `detailer_hook` is a hook in the `Detailer` that injects noise during the processing of each SEGS. +* CoreMLDetailerHookProvider - CoreML supports only 512x512, 512x768, 768x512, 768x768 size sampling. CoreMLDetailerHookProvider precisely fixes the upscale of the crop_region to this size. When using this hook, it will always be selected size, regardless of the guide_size. However, if the guide_size is too small, skipping will occur. + +* Iterative Upscale (Latent) - The upscaler takes the input upscaler and splits the scale_factor into steps, then iteratively performs upscaling. +This takes latent as input and outputs latent as the result. +* Iterative Upscale (Image) - The upscaler takes the input upscaler and splits the scale_factor into steps, then iteratively performs upscaling. This takes image as input and outputs image as the result. + * Internally, this node uses 'Iterative Upscale (Latent)'. + +* TwoSamplersForMask - This node can apply two samplers depending on the mask area. The base_sampler is applied to the area where the mask is 0, while the mask_sampler is applied to the area where the mask is 1. + * Note: The latent encoded through VAEEncodeForInpaint cannot be used. +* KSamplerProvider - This is a wrapper that enables KSampler to be used in TwoSamplersForMask TwoSamplersForMaskUpscalerProvider. +* TiledKSamplerProvider - ComfyUI_TiledKSampler is a wrapper that provides KSAMPLER. + * You need to install the [BlenderNeko/ComfyUI_TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) node extension. + +* TwoAdvancedSamplersForMask - TwoSamplersForMask is similar to TwoAdvancedSamplersForMask, but they differ in their operation. TwoSamplersForMask performs sampling in the mask area only after all the samples in the base area are finished. On the other hand, TwoAdvancedSamplersForMask performs sampling in both the base area and the mask area sequentially at each step. +* KSamplerAdvancedProvider - This is a wrapper that enables KSampler to be used in TwoAdvancedSamplersForMask. + +* TwoSamplersForMaskUpscalerProvider - This is an Upscaler that extends TwoSamplersForMask to be used in Iterative Upscale. + * TwoSamplersForMaskUpscalerProviderPipe - pipe version of TwoSamplersForMaskUpscalerProvider. + +* PreviewBridge - This custom node can be used with a bridge when using the MaskEditor feature of Clipspace. +* ImageSender, ImageReceiver - The images generated in ImageSender are automatically sent to the ImageReceiver with the same link_id. +* LatentSender, LatentReceiver - The latent generated in LatentSender are automatically sent to the LatentReceiver with the same link_id. + * Furthermore, LatentSender is implemented with PreviewLatent, which stores the latent in payload form within the image thumbnail. + * Due to the current structure of ComfyUI, it is unable to distinguish between SDXL latent and SD1.5/SD2.1 latent. Therefore, it generates thumbnails by decoding them using the SD1.5 method. + +* Switch nodes + * Switch (image,mask), Switch (latent), Switch (SEGS) - Among multiple inputs, it selects the input designated by the selector and outputs it. The first input must be provided, while the others are optional. However, if the input specified by the selector is not connected, an error may occur. + * Switch (Any) - This is a Switch node that takes an arbitrary number of inputs and produces a single output. Its type is determined when connected to any node, and connecting inputs increases the available slots for connections. + * Inversed Switch (Any) - In contrast to `Switch (Any)`, it takes a single input and outputs one of many. Due to ComfyUI's functional limitations, the value of `select` must be determined at the time of queuing a prompt, and while it can serve as a `Primitive Node` or `ImpactInt`, it cannot function properly when connected through other nodes. + * Guide + * When the `Switch (Any)` and `Inversed Switch (Any)` selects are transformed into primitives, it's important to be cautious because the select range is not appropriately constrained, potentially leading to unintended behavior. + * `Switch (image,mask)`, `Switch (latent)`, `Switch (SEGS)`, `Switch (Any)` supports `sel_mode` param. The `sel_mode` sets the moment at which the `select` parameter is determined. `select_on_prompt` determines the `select` at the time of queuing the prompt, while `select_on_execution` determines it during the execution of the workflow. While `select_on_execution` offers more flexibility, it can potentially trigger workflow execution errors due to running nodes that may be impossible to execute within the limitations of ComfyUI. `select_on_prompt` bypasses this constraint by treating any inputs not selected as if they were disconnected. However, please note that when using `select_on_prompt`, the `select` can only be used with widgets or `Primitive Nodes` determined at the queue prompt. + * There is an issue when connecting the built-in reroute node with the switch's input/output slots. it can lead to forced disconnections during workflow loading. Therefore, it is advisable not to use reroute for making connections in such cases. However, there are no issues when using the reroute node in Pythongossss. + +* [Wildcards](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md) - These are nodes that supports syntax in the form of `__wildcard-name__` and dynamic prompt syntax like `{a|b|c}`. + * Wildcard files can be used by placing `.txt` or `.yaml` files under either `ComfyUI-Impact-Pack/wildcards` or `ComfyUI-Impact-Pack/custom_wildcards` paths. + * You can download and use [Wildcard YAML](https://civitai.com/models/138970/billions-of-wildcards-all-in-one) files in this format. + * After the first execution, you can change the custom wildcards path in the `custom_wildcards` entry within the `ComfyUI-Impact-Pack/impact-pack.ini` file created. + * ImpactWildcardProcessor - The text is generated by processing the wildcard in the Text. If the mode is set to "populate", a dynamic prompt is generated with each execution and the input is filled in the second textbox. If the mode is set to "fixed", the content of the second textbox remains unchanged. + * When an image is generated with the "fixed" mode, the prompt used for that particular generation is stored in the metadata. + * ImpactWildcardEncode - Similar to ImpactWildcardProcessor, this provides the loading functionality of LoRAs (e.g. ``). Populated prompts are encoded using the clip after all the lora loading is done. + * If the `Inspire Pack` is installed, you can use **Lora Block Weight** in the form of `LBW=lbw spec;` + * ``, ``, `` + +* Regional Sampling - These nodes offer the capability to divide regions and perform partial sampling using a mask. Unlike TwoSamplersForMask, sampling for each region is applied during each step. + * RegionalPrompt - This node combines a **mask** for specifying regions and the **sampler** to apply to each region to create `REGIONAL_PROMPTS`. + * CombineRegionalPrompts - Combine multiple `REGIONAL_PROMPTS` to create a single `REGIONAL_PROMPTS`. + * RegionalSampler - This node performs sampling using a base sampler and regional prompts. Sampling by the base sampler is executed at each step, while sampling for each region is performed through the sampler bound to each region. + * overlap_factor - Specifies the amount of overlap for each region to blend well with the area outside the mask. + * restore_latent - When sampling each region, restore the areas outside the mask to the base latent, preventing additional noise from being introduced outside the mask during region sampling. + * RegionalSamplerAdvanced - This is the Advanced version of the RegionalSampler. You can control it using `step` instead of `denoise`. + * NOTE: The `sde` sampler and `uni_pc` sampler introduce additional noise during each step of the sampling process. To mitigate this, when sampling each region, the `uni_pc` sampler applies additional `dpmpp_fast`, and the sde sampler applies the `dpmpp_2m` sampler as an additional measure. + +* KSampler (pipe), KSampler (advanced/pipe) + +* Image batch To Image List - Convert Image batch to Image List +- You can use images generated in a multi batch to handle them +* Make Image List - Convert multiple images into a single image list +* Make Image Batch - Convert multiple images into a single image batch +- The input of images can be scaled up as needed + +* String Selector - It selects and returns a portion of the string. When `multiline` mode is disabled, it simply returns the string of the line pointed to by the selector. When `multiline` mode is enabled, it divides the string based on lines that start with `#` and returns them. If the `select` value is larger than the number of items, it will start counting from the first line again and return accordingly. +* Combine Conditionings - It takes multiple conditionings as input and combines them into a single conditioning. + +* Logics (experimental) - These nodes are experimental nodes designed to implement the logic for loops and dynamic switching. + * ImpactCompare, ImpactConditionalBranch, ImpactInt, ImpactValueSender, ImpactValueReceiver, ImpactImageInfo, ImpactMinMax, ImpactNeg, ImpactConditionalStopIteration + * ImpactIsNotEmptySEGS - This node returns `true` only if the input SEGS is not empty. + * Queue Trigger - When this node is executed, it adds a new queue to assist with repetitive tasks. It will only execute if the signal's status changes. + * Queue Trigger (Countdown) - Like the Queue Trigger, it adds a queue, but only adds it if it's greater than 1, and decrements the count by one each time it runs. + * Sleep - Waits for the specified time (in seconds). + * Set Widget Value - This node sets one of the optional inputs to the specified node's widget. An error may occur if the types do not match. + * Set Mute State - This node changes the mute state of a specific node. + * Control Bridge - Depending on whether the mode is set to `block` or `pass`, it changes the mute status of connected nodes. If there are nodes that require a change, the current execution is paused, the mute status is updated, and a new prompt queue is inserted. + * **Limitation**: Due to these characteristics, it does not function correctly when the batch count exceeds 1. Additionally, it does not guarantee proper operation when the seed is randomized or when the state of nodes is altered by actions such as `Queue Trigger`, `Set Widget Value`, `Set Mute`, before the Control Bridge. + * When utilizing this node, please structure the workflow in such a way that `Queue Trigger`, `Set Widget Value`, `Set Mute State`, and similar actions are executed at the end of the workflow. + * If you want to change the value of the seed at each iteration, please ensure that Set Widget Value is executed at the end of the workflow instead of using randomization. + * It is not a problem if the seed changes due to randomization as long as it occurs after the Control Bridge section. + * You can find the `node_id` by checking through [ComfyUI-Manager](https://github.com/ltdrdata/ComfyUI-Manager) using the format `Badge: #ID Nickname`. + * Experimental set of nodes for implementing loop functionality (tutorial to be prepared later / [example workflow](test/loop-test.json)). + +* HuggingFace - These nodes provide functionalities based on HuggingFace repository models. + * `HF Transformers Classifier Provider` - This is a node that provides a classifier based on HuggingFace's transformers models. + * The 'repo id' parameter should contain HuggingFace's repo id. When `preset_repo_id` is set to `Manual repo id`, use the manually entered repo id in `manual_repo_id`. + * e.g. 'rizvandwiki/gender-classification-2' is a repository that provides a model for gender classification. + * `SEGS Classify` - This node utilizes the `TRANSFORMERS_CLASSIFIER` loaded with 'HF Transformers Classifier Provider' to classify `SEGS`. + * The 'expr' allows for forms like `label > number`, and in the case of `preset_expr` being `Manual expr`, it uses the expression entered in `manual_expr`. + * For example, in the case of `male <= 0.4`, if the score of the `male` label in the classification result is less than or equal to 0.4, it is categorized as `filtered_SEGS`, otherwise, it is categorized as `remained_SEGS`. + * For supported labels, please refer to the `config.json` of the respective HuggingFace repository. + * `#Female` and `#Male` are symbols that group multiple labels such as `Female, women, woman, ...`, for convenience, rather than being single labels. + +## MMDet nodes +* MMDetDetectorProvider - Loads the MMDet model to provide BBOX_DETECTOR and SEGM_DETECTOR. +* To use the existing MMDetDetectorProvider, you need to enable the MMDet usage configuration. + + +## Feature +* Interactive SAM Detector (Clipspace) - When you right-click on a node that has 'MASK' and 'IMAGE' outputs, a context menu will open. From this menu, you can either open a dialog to create a SAM Mask using 'Open in SAM Detector', or copy the content (likely mask data) using 'Copy (Clipspace)' and generate a mask using 'Impact SAM Detector' from the clipspace menu, and then paste it using 'Paste (Clipspace)'. +* Providing a feature to detect errors that occur when mixing models and clips from checkpoints such as `SDXL Base`, `SDXL Refiner`, `SD1.x`, `SD2.x` during sample execution, and reporting appropriate errors. + +## Deprecated +* The following nodes have been kept only for compatibility with existing workflows, and are no longer supported. Please replace them with new nodes. + * ONNX Detector (SEGS) - BBOX Detector (SEGS) + * MMDetLoader -> MMDetDetectorProvider + * SegsMaskCombine -> SEGS to MASK (combined) + * BboxDetectorForEach -> BBOX Detector (SEGS) + * SegmDetectorForEach -> SEGM Detector (SEGS) + * BboxDetectorCombined -> BBOX Detector (combined) + * SegmDetectorCombined -> SEGM Detector (combined) + * MaskPainter -> PreviewBridge +* To use the existing deprecated legacy nodes, you need to enable the MMDet usage configuration. + + +## Ultralytics models +* huggingface.co/Bingsu/[adetailer](https://github.com/ultralytics/assets/releases/) - You can download face, people detection models, and clothing detection models. +* ultralytics/[assets](https://github.com/ultralytics/assets/releases/) - You can download various types of detection models other than faces or people. + +## How to activate 'MMDet usage' +* Upon the initial execution, an `impact-pack.ini` file will be generated in the custom_nodes/ComfyUI-Impact-Pack directory. +``` +[default] +dependency_version = 2 +mmdet_skip = True +``` +* Change `mmdet_skip = True` to `mmdet_skip = False` +``` +[default] +dependency_version = 2 +mmdet_skip = False +``` +* Restart ComfyUI + + +## Installation + +1. `cd custom_nodes` +1. `git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack.git` +3. `cd ComfyUI-Impact-Pack` +4. (optional) `git submodule update --init --recursive` + * Impact Pack will automatically download subpack during its initial launch. +5. (optional) `python install.py` + * Impact Pack will automatically install its dependencies during its initial launch. + * For the portable version, you should execute the command `..\..\..\python_embeded\python.exe install.py` to run the installation script. + + +6. Restart ComfyUI + +* NOTE: If an error occurs during the installation process, please refer to [Troubleshooting Page](troubleshooting/TROUBLESHOOTING.md) for assistance. +* You can use this colab notebook [colab notebook](https://colab.research.google.com/github/ltdrdata/ComfyUI-Impact-Pack/blob/Main/notebook/comfyui_colab_impact_pack.ipynb) to launch it. This notebook automatically downloads the impact pack to the custom_nodes directory, installs the tested dependencies, and runs it. + +## Package Dependencies (If you need to manual setup.) + +* pip install + * openmim + * segment-anything + * ultralytics + * scikit-image + * piexif + * (optional) pycocotools + * (optional) onnxruntime + +* mim install (optional) + * mmcv==2.0.0, mmdet==3.0.0, mmengine==0.7.2 + +* linux packages (ubuntu) + * libgl1-mesa-glx + * libglib2.0-0 + + +## Config example +* Once you run the Impact Pack for the first time, an `impact-pack.ini` file will be automatically generated in the Impact Pack directory. You can modify this configuration file to customize the default behavior. + * `dependency_version` - don't touch this + * `mmdet_skip` - disable MMDet based nodes and legacy nodes if `True` + * `sam_editor_cpu` - use cpu for `SAM editor` instead of gpu + * sam_editor_model: Specify the SAM model for the SAM editor. + * You can download various SAM models using ComfyUI-Manager. + * Path to SAM model: `ComfyUI/models/sams` +``` +[default] +dependency_version = 9 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +``` + + +## Other Materials (auto-download on initial startup) + +* ComfyUI/models/mmdets/bbox <= https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth +* ComfyUI/models/mmdets/bbox <= https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mmdet_anime-face_yolov3.py +* ComfyUI/models/sams <= https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth + +## Troubleshooting page +* [Troubleshooting Page](troubleshooting/TROUBLESHOOTING.md) + + +## How to use (DDetailer feature) + +#### 1. Basic auto face detection and refine exapmle. +![simple](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/simple.png) +* The face that has been damaged due to low resolution is restored with high resolution by generating and synthesizing it, in order to restore the details. +* The FaceDetailer node is a combination of a Detector node for face detection and a Detailer node for image enhancement. See the [Advanced Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/tutorial/advanced.md) for a more detailed explanation. +* Pass the MMDetLoader 's bbox model and the detection model loaded by SAMLoader to FaceDetailer . Since it performs the function of KSampler for image enhancement, it overlaps with KSampler's options. +* The MASK output of FaceDetailer provides a visualization of where the detected and enhanced areas are. + +![simple-orig](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/simple-original.png) ![simple-refined](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/simple-refined.png) +* You can see that the face in the image on the left has increased detail as in the image on the right. + +#### 2. 2Pass refine (restore a severely damaged face) +![2pass-workflow-example](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-simple.png) +* Although two FaceDetailers can be attached together for a 2-pass configuration, various common inputs used in KSampler can be passed through DETAILER_PIPE, so FaceDetailerPipe can be used to configure easily. +* In 1pass, only rough outline recovery is required, so restore with a reasonable resolution and low options. However, if you increase the dilation at this time, not only the face but also the surrounding parts are included in the recovery range, so it is useful when you need to reshape the face other than the facial part. + +![2pass-example-original](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-original.png) ![2pass-example-middle](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-1pass.png) ![2pass-example-result](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/2pass-2pass.png) +* In the first stage, the severely damaged face is restored to some extent, and in the second stage, the details are restored + +#### 3. Face Bbox(bounding box) + Person silhouette segmentation (prevent distortion of the background.) +![combination-workflow-example](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/combination.jpg) +![combination-example-original](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/combination-original.png) ![combination-example-refined](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/combination-refined.png) + +* Facial synthesis that emphasizes details is delicately aligned with the contours of the face, and it can be observed that it does not affect the image outside of the face. + +* The BBoxDetectorForEach node is used to detect faces, and the SAMDetectorCombined node is used to find the segment related to the detected face. By using the Segs & Mask node with the two masks obtained in this way, an accurate mask that intersects based on segs can be generated. If this generated mask is input to the DetailerForEach node, only the target area can be created in high resolution from the image and then composited. + +#### 4. Iterative Upscale +![upscale-workflow-example](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/upscale-workflow.png) + +* The IterativeUpscale node is a node that enlarges an image/latent by a scale_factor. In this process, the upscale is carried out progressively by dividing it into steps. +* IterativeUpscale takes an Upscaler as an input, similar to a plugin, and uses it during each iteration. PixelKSampleUpscalerProvider is an Upscaler that converts the latent representation to pixel space and applies ksampling. + * The upscale_model_opt is an optional parameter that determines whether to use the upscale function of the model base if available. Using the upscale function of the model base can significantly reduce the number of iterative steps required. If an x2 upscaler is used, the image/latent is first upscaled by a factor of 2 and then downscaled to the target scale at each step before further processing is done. + +* The following image is an image of 304x512 pixels and the same image scaled up to three times its original size using IterativeUpscale. + +![combination-example-original](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/upscale-original.png) ![combination-example-refined](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/upscale-3x.png) + + +#### 5. Interactive SAM Detector (Clipspace) + +* When you right-click on the node that outputs 'MASK' and 'IMAGE', a menu called "Open in SAM Detector" appears, as shown in the following picture. Clicking on the menu opens a dialog in SAM's functionality, allowing you to generate a segment mask. +![samdetector-menu](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/SAMDetector-menu.png) + +* By clicking the left mouse button on a coordinate, a positive prompt in blue color is entered, indicating the area that should be included. Clicking the right mouse button on a coordinate enters a negative prompt in red color, indicating the area that should be excluded. Positive prompts represent the areas that should be included, while negative prompts represent the areas that should be excluded. +* You can remove the points that were added by using the "undo" button. After selecting the points, pressing the "detect" button generates the mask. Additionally, you can adjust the fidelity slider to determine the extent to which the mask belongs to the confidence region. + +![samdetector-dialog](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/SAMDetector-dialog.jpg) + +* If you opened the dialog through "Open in SAM Detector" from the node, you can directly apply the changes by clicking the "Save to node" button. However, if you opened the dialog through the "clipspace" menu, you can save it to clipspace by clicking the "Save" button. + +![samdetector-result](https://github.com/ltdrdata/ComfyUI-extension-tutorials/raw/Main/ComfyUI-Impact-Pack/images/SAMDetector-result.jpg) + +* When you execute using the reflected mask in the node, you can observe that the image and mask are displayed separately. + + +## Others Tutorials +* [ComfyUI-extension-tutorials/ComfyUI-Impact-Pack](https://github.com/ltdrdata/ComfyUI-extension-tutorials/tree/Main/ComfyUI-Impact-Pack) - You can find various tutorials and workflows on this page. +* [Advanced Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/advanced.md) +* [SAM Application](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sam.md) +* [PreviewBridge](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/previewbridge.md) +* [Mask Pointer](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/maskpointer.md) +* [ONNX Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ONNX.md) +* [CLIPSeg Tutorial](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/clipseg.md) +* [Extreme Highresolution Upscale](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/extreme-upscale.md) +* [TwoSamplersForMask](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoSamplers.md) +* [TwoAdvancedSamplersForMask](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoAdvancedSamplers.md) +* [Advanced Iterative Upscale: PK_HOOK](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/pk_hook.md) +* [Advanced Iterative Upscale: TwoSamplersForMask Upscale Provider](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/TwoSamplersUpscale.md) +* [Interactive SAM + PreviewBridge](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sam_with_preview_bridge.md) +* [ImageSender/ImageReceiver/LatentSender/LatentReceiver](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/sender_receiver.md) +* [ImpactWildcardProcessor](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcardProcessor.md) + + +## Credits + +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +dustysys/[ddetailer](https://github.com/dustysys/ddetailer) - DDetailer for Stable-diffusion-webUI extension. + +Bing-su/[dddetailer](https://github.com/Bing-su/dddetailer) - The anime-face-detector used in ddetailer has been updated to be compatible with mmdet 3.0.0, and we have also applied a patch to the pycocotools dependency for Windows environment in ddetailer. + +facebook/[segment-anything](https://github.com/facebookresearch/segment-anything) - Segmentation Anything! + +hysts/[anime-face-detector](https://github.com/hysts/anime-face-detector) - Creator of `anime-face_yolov3`, which has impressive performance on a variety of art styles. + +open-mmlab/[mmdetection](https://github.com/open-mmlab/mmdetection) - Object detection toolset. `dd-person_mask2former` was trained via transfer learning using their [R-50 Mask2Former instance segmentation model](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask2former#instance-segmentation) as a base. + +biegert/[ComfyUI-CLIPSeg](https://github.com/biegert/ComfyUI-CLIPSeg) - This is a custom node that enables the use of CLIPSeg technology, which can find segments through prompts, in ComfyUI. + +BlenderNeok/[ComfyUI-TiledKSampler](https://github.com/BlenderNeko/ComfyUI_TiledKSampler) - +The tile sampler allows high-resolution sampling even in places with low GPU VRAM. + +BlenderNeok/[ComfyUI_Noise](https://github.com/BlenderNeko/ComfyUI_Noise) - The noise injection feature relies on this function. + +WASasquatch/[was-node-suite-comfyui](https://github.com/WASasquatch/was-node-suite-comfyui) - A powerful custom node extensions of ComfyUI. diff --git a/custom_nodes/ComfyUI-Impact-Pack/__init__.py b/custom_nodes/ComfyUI-Impact-Pack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3c5f99532915af5c2a8b7254014ae6ac02ee8372 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/__init__.py @@ -0,0 +1,428 @@ +""" +@author: Dr.Lt.Data +@title: Impact Pack +@nickname: Impact Pack +@description: This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler. +""" + +import shutil +import folder_paths +import os +import sys +import traceback + +comfy_path = os.path.dirname(folder_paths.__file__) +impact_path = os.path.join(os.path.dirname(__file__)) +subpack_path = os.path.join(os.path.dirname(__file__), "impact_subpack") +modules_path = os.path.join(os.path.dirname(__file__), "modules") +wildcards_path = os.path.join(os.path.dirname(__file__), "wildcards") +custom_wildcards_path = os.path.join(os.path.dirname(__file__), "custom_wildcards") + +sys.path.append(modules_path) + + +import impact.config +import impact.sample_error_enhancer +print(f"### Loading: ComfyUI-Impact-Pack ({impact.config.version})") + + +def do_install(): + import importlib + spec = importlib.util.spec_from_file_location('impact_install', os.path.join(os.path.dirname(__file__), 'install.py')) + impact_install = importlib.util.module_from_spec(spec) + spec.loader.exec_module(impact_install) + + +# ensure dependency +if not os.path.exists(os.path.join(subpack_path, ".git")) and os.path.exists(subpack_path): + print(f"### CompfyUI-Impact-Pack: corrupted subpack detected.") + shutil.rmtree(subpack_path) + +if impact.config.get_config()['dependency_version'] < impact.config.dependency_version or not os.path.exists(subpack_path): + print(f"### ComfyUI-Impact-Pack: Updating dependencies [{impact.config.get_config()['dependency_version']} -> {impact.config.dependency_version}]") + do_install() + +sys.path.append(subpack_path) + +# Core +# recheck dependencies for colab +try: + import impact.subpack_nodes # This import must be done before cv2. + + import folder_paths + import torch + import cv2 + import numpy as np + import comfy.samplers + import comfy.sd + import warnings + from PIL import Image, ImageFilter + from skimage.measure import label, regionprops + from collections import namedtuple + import piexif + + if not impact.config.get_config()['mmdet_skip']: + import mmcv + from mmdet.apis import (inference_detector, init_detector) + from mmdet.evaluation import get_classes +except: + import importlib + print("### ComfyUI-Impact-Pack: Reinstall dependencies (several dependencies are missing.)") + do_install() + +import impact.impact_server # to load server api + +def setup_js(): + import nodes + js_dest_path = os.path.join(comfy_path, "web", "extensions", "impact-pack") + + if hasattr(nodes, "EXTENSION_WEB_DIRS"): + if os.path.exists(js_dest_path): + shutil.rmtree(js_dest_path) + else: + print(f"[WARN] ComfyUI-Impact-Pack: Your ComfyUI version is outdated. Please update to the latest version.") + # setup js + if not os.path.exists(js_dest_path): + os.makedirs(js_dest_path) + + js_src_path = os.path.join(impact_path, "js", "impact-pack.js") + shutil.copy(js_src_path, js_dest_path) + + js_src_path = os.path.join(impact_path, "js", "impact-sam-editor.js") + shutil.copy(js_src_path, js_dest_path) + + js_src_path = os.path.join(impact_path, "js", "comboBoolMigration.js") + shutil.copy(js_src_path, js_dest_path) + + +setup_js() + +from impact.impact_pack import * +from impact.detectors import * +from impact.pipe import * +from impact.logics import * +from impact.util_nodes import * +from impact.segs_nodes import * +from impact.special_samplers import * +from impact.hf_nodes import * + +impact.wildcards.read_wildcard_dict(wildcards_path) +try: + impact.wildcards.read_wildcard_dict(impact.config.get_config()['custom_wildcards']) +except Exception as e: + print(f"[Impact Pack] Failed to load custom wildcards directory.") + +NODE_CLASS_MAPPINGS = { + "SAMLoader": SAMLoader, + "CLIPSegDetectorProvider": CLIPSegDetectorProvider, + "ONNXDetectorProvider": ONNXDetectorProvider, + + "BitwiseAndMaskForEach": BitwiseAndMaskForEach, + "SubtractMaskForEach": SubtractMaskForEach, + + "DetailerForEach": DetailerForEach, + "DetailerForEachDebug": DetailerForEachTest, + "DetailerForEachPipe": DetailerForEachPipe, + "DetailerForEachDebugPipe": DetailerForEachTestPipe, + + "SAMDetectorCombined": SAMDetectorCombined, + "SAMDetectorSegmented": SAMDetectorSegmented, + + "FaceDetailer": FaceDetailer, + "FaceDetailerPipe": FaceDetailerPipe, + "MaskDetailerPipe": MaskDetailerPipe, + + "ToDetailerPipe": ToDetailerPipe, + "ToDetailerPipeSDXL": ToDetailerPipeSDXL, + "FromDetailerPipe": FromDetailerPipe, + "FromDetailerPipe_v2": FromDetailerPipe_v2, + "FromDetailerPipeSDXL": FromDetailerPipe_SDXL, + "ToBasicPipe": ToBasicPipe, + "FromBasicPipe": FromBasicPipe, + "FromBasicPipe_v2": FromBasicPipe_v2, + "BasicPipeToDetailerPipe": BasicPipeToDetailerPipe, + "BasicPipeToDetailerPipeSDXL": BasicPipeToDetailerPipeSDXL, + "DetailerPipeToBasicPipe": DetailerPipeToBasicPipe, + "EditBasicPipe": EditBasicPipe, + "EditDetailerPipe": EditDetailerPipe, + "EditDetailerPipeSDXL": EditDetailerPipeSDXL, + + "LatentPixelScale": LatentPixelScale, + "PixelKSampleUpscalerProvider": PixelKSampleUpscalerProvider, + "PixelKSampleUpscalerProviderPipe": PixelKSampleUpscalerProviderPipe, + "IterativeLatentUpscale": IterativeLatentUpscale, + "IterativeImageUpscale": IterativeImageUpscale, + "PixelTiledKSampleUpscalerProvider": PixelTiledKSampleUpscalerProvider, + "PixelTiledKSampleUpscalerProviderPipe": PixelTiledKSampleUpscalerProviderPipe, + "TwoSamplersForMaskUpscalerProvider": TwoSamplersForMaskUpscalerProvider, + "TwoSamplersForMaskUpscalerProviderPipe": TwoSamplersForMaskUpscalerProviderPipe, + + "PixelKSampleHookCombine": PixelKSampleHookCombine, + "DenoiseScheduleHookProvider": DenoiseScheduleHookProvider, + "CfgScheduleHookProvider": CfgScheduleHookProvider, + "NoiseInjectionHookProvider": NoiseInjectionHookProvider, + "NoiseInjectionDetailerHookProvider": NoiseInjectionDetailerHookProvider, + "CoreMLDetailerHookProvider": CoreMLDetailerHookProvider, + + "BitwiseAndMask": BitwiseAndMask, + "SubtractMask": SubtractMask, + "AddMask": AddMask, + "ImpactSegsAndMask": SegsBitwiseAndMask, + "ImpactSegsAndMaskForEach": SegsBitwiseAndMaskForEach, + "EmptySegs": EmptySEGS, + + "MediaPipeFaceMeshToSEGS": MediaPipeFaceMeshToSEGS, + "MaskToSEGS": MaskToSEGS, + "MaskToSEGS_for_AnimateDiff": MaskToSEGS_for_AnimateDiff, + "ToBinaryMask": ToBinaryMask, + "MasksToMaskList": MasksToMaskList, + "MaskListToMaskBatch": MaskListToMaskBatch, + "ImageListToImageBatch": ImageListToMaskBatch, + + "BboxDetectorSEGS": BboxDetectorForEach, + "SegmDetectorSEGS": SegmDetectorForEach, + "ONNXDetectorSEGS": BboxDetectorForEach, + "ImpactSimpleDetectorSEGS_for_AD": SimpleDetectorForAnimateDiff, + "ImpactSimpleDetectorSEGS": SimpleDetectorForEach, + "ImpactSimpleDetectorSEGSPipe": SimpleDetectorForEachPipe, + "ImpactControlNetApplySEGS": ControlNetApplySEGS, + + "ImpactDecomposeSEGS": DecomposeSEGS, + "ImpactAssembleSEGS": AssembleSEGS, + "ImpactFrom_SEG_ELT": From_SEG_ELT, + "ImpactEdit_SEG_ELT": Edit_SEG_ELT, + "ImpactDilate_Mask_SEG_ELT": Dilate_SEG_ELT, + "ImpactDilateMask": DilateMask, + "ImpactScaleBy_BBOX_SEG_ELT": SEG_ELT_BBOX_ScaleBy, + + "BboxDetectorCombined_v2": BboxDetectorCombined, + "SegmDetectorCombined_v2": SegmDetectorCombined, + "SegsToCombinedMask": SegsToCombinedMask, + + "KSamplerProvider": KSamplerProvider, + "TwoSamplersForMask": TwoSamplersForMask, + "TiledKSamplerProvider": TiledKSamplerProvider, + + "KSamplerAdvancedProvider": KSamplerAdvancedProvider, + "TwoAdvancedSamplersForMask": TwoAdvancedSamplersForMask, + + "PreviewBridge": PreviewBridge, + "ImageSender": ImageSender, + "ImageReceiver": ImageReceiver, + "LatentSender": LatentSender, + "LatentReceiver": LatentReceiver, + "ImageMaskSwitch": ImageMaskSwitch, + "LatentSwitch": GeneralSwitch, + "SEGSSwitch": GeneralSwitch, + "ImpactSwitch": GeneralSwitch, + "ImpactInversedSwitch": GeneralInversedSwitch, + + "ImpactWildcardProcessor": ImpactWildcardProcessor, + "ImpactWildcardEncode": ImpactWildcardEncode, + + "SEGSDetailer": SEGSDetailer, + "SEGSPaste": SEGSPaste, + "SEGSPreview": SEGSPreview, + "SEGSToImageList": SEGSToImageList, + "ImpactSEGSToMaskList": SEGSToMaskList, + "ImpactSEGSToMaskBatch": SEGSToMaskBatch, + "ImpactSEGSConcat": SEGSConcat, + "ImpactSEGSPicker": SEGSPicker, + + "SEGSDetailerForAnimateDiff": SEGSDetailerForAnimateDiff, + + "ImpactKSamplerBasicPipe": KSamplerBasicPipe, + "ImpactKSamplerAdvancedBasicPipe": KSamplerAdvancedBasicPipe, + + "ReencodeLatent": ReencodeLatent, + "ReencodeLatentPipe": ReencodeLatentPipe, + + "ImpactImageBatchToImageList": ImageBatchToImageList, + "ImpactMakeImageList": MakeImageList, + "ImpactMakeImageBatch": MakeImageBatch, + + "RegionalSampler": RegionalSampler, + "RegionalSamplerAdvanced": RegionalSamplerAdvanced, + "CombineRegionalPrompts": CombineRegionalPrompts, + "RegionalPrompt": RegionalPrompt, + + "ImpactCombineConditionings": CombineConditionings, + + "ImpactSEGSLabelFilter": SEGSLabelFilter, + "ImpactSEGSRangeFilter": SEGSRangeFilter, + "ImpactSEGSOrderedFilter": SEGSOrderedFilter, + + "ImpactCompare": ImpactCompare, + "ImpactConditionalBranch": ImpactConditionalBranch, + "ImpactInt": ImpactInt, + # "ImpactFloat": ImpactFloat, + "ImpactValueSender": ImpactValueSender, + "ImpactValueReceiver": ImpactValueReceiver, + "ImpactImageInfo": ImpactImageInfo, + "ImpactMinMax": ImpactMinMax, + "ImpactNeg": ImpactNeg, + "ImpactConditionalStopIteration": ImpactConditionalStopIteration, + "ImpactStringSelector": StringSelector, + + "RemoveNoiseMask": RemoveNoiseMask, + + "ImpactLogger": ImpactLogger, + "ImpactDummyInput": ImpactDummyInput, + + "ImpactQueueTrigger": ImpactQueueTrigger, + "ImpactQueueTriggerCountdown": ImpactQueueTriggerCountdown, + "ImpactSetWidgetValue": ImpactSetWidgetValue, + "ImpactNodeSetMuteState": ImpactNodeSetMuteState, + "ImpactControlBridge": ImpactControlBridge, + "ImpactIsNotEmptySEGS": ImpactNotEmptySEGS, + "ImpactSleep": ImpactSleep, + + "ImpactHFTransformersClassifierProvider": HF_TransformersClassifierProvider, + "ImpactSEGSClassify": SEGS_Classify +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + "BboxDetectorSEGS": "BBOX Detector (SEGS)", + "SegmDetectorSEGS": "SEGM Detector (SEGS)", + "ONNXDetectorSEGS": "ONNX Detector (SEGS/legacy) - use BBOXDetector", + "ImpactSimpleDetectorSEGS_for_AD": "Simple Detector for AnimateDiff (SEGS)", + "ImpactSimpleDetectorSEGS": "Simple Detector (SEGS)", + "ImpactSimpleDetectorSEGSPipe": "Simple Detector (SEGS/pipe)", + "ImpactControlNetApplySEGS": "ControlNetApply (SEGS)", + + "BboxDetectorCombined_v2": "BBOX Detector (combined)", + "SegmDetectorCombined_v2": "SEGM Detector (combined)", + "SegsToCombinedMask": "SEGS to MASK (combined)", + "MediaPipeFaceMeshToSEGS": "MediaPipe FaceMesh to SEGS", + "MaskToSEGS": "MASK to SEGS", + "MaskToSEGS_for_AnimateDiff": "MASK to SEGS for AnimateDiff", + "BitwiseAndMaskForEach": "Bitwise(SEGS & SEGS)", + "SubtractMaskForEach": "Bitwise(SEGS - SEGS)", + "ImpactSegsAndMask": "Bitwise(SEGS & MASK)", + "ImpactSegsAndMaskForEach": "Bitwise(SEGS & MASKS ForEach)", + "BitwiseAndMask": "Bitwise(MASK & MASK)", + "SubtractMask": "Bitwise(MASK - MASK)", + "AddMask": "Bitwise(MASK + MASK)", + "DetailerForEach": "Detailer (SEGS)", + "DetailerForEachPipe": "Detailer (SEGS/pipe)", + "DetailerForEachDebug": "DetailerDebug (SEGS)", + "DetailerForEachDebugPipe": "DetailerDebug (SEGS/pipe)", + "SEGSDetailerForAnimateDiff": "Detailer For AnimateDiff (SEGS/pipe)", + + "SAMDetectorCombined": "SAMDetector (combined)", + "SAMDetectorSegmented": "SAMDetector (segmented)", + "FaceDetailerPipe": "FaceDetailer (pipe)", + "MaskDetailerPipe": "MaskDetailer (Pipe)", + + "FromDetailerPipeSDXL": "FromDetailer (SDXL/pipe)", + "BasicPipeToDetailerPipeSDXL": "BasicPipe -> DetailerPipe (SDXL)", + "EditDetailerPipeSDXL": "Edit DetailerPipe (SDXL)", + + "BasicPipeToDetailerPipe": "BasicPipe -> DetailerPipe", + "DetailerPipeToBasicPipe": "DetailerPipe -> BasicPipe", + "EditBasicPipe": "Edit BasicPipe", + "EditDetailerPipe": "Edit DetailerPipe", + + "LatentPixelScale": "Latent Scale (on Pixel Space)", + "IterativeLatentUpscale": "Iterative Upscale (Latent)", + "IterativeImageUpscale": "Iterative Upscale (Image)", + + "TwoSamplersForMaskUpscalerProvider": "TwoSamplersForMask Upscaler Provider", + "TwoSamplersForMaskUpscalerProviderPipe": "TwoSamplersForMask Upscaler Provider (pipe)", + + "ReencodeLatent": "Reencode Latent", + "ReencodeLatentPipe": "Reencode Latent (pipe)", + + "ImpactKSamplerBasicPipe": "KSampler (pipe)", + "ImpactKSamplerAdvancedBasicPipe": "KSampler (Advanced/pipe)", + "ImpactSEGSLabelFilter": "SEGS Filter (label)", + "ImpactSEGSRangeFilter": "SEGS Filter (range)", + "ImpactSEGSOrderedFilter": "SEGS Filter (ordered)", + "ImpactSEGSConcat": "SEGS Concat", + "ImpactSEGSToMaskList": "SEGS to Mask List", + "ImpactSEGSToMaskBatch": "SEGS to Mask Batch", + "ImpactSEGSPicker": "Picker (SEGS)", + + "ImpactDecomposeSEGS": "Decompose (SEGS)", + "ImpactAssembleSEGS": "Assemble (SEGS)", + "ImpactFrom_SEG_ELT": "From SEG_ELT", + "ImpactEdit_SEG_ELT": "Edit SEG_ELT", + "ImpactDilate_Mask_SEG_ELT": "Dilate Mask (SEG_ELT)", + "ImpactScaleBy_BBOX_SEG_ELT": "ScaleBy BBOX (SEG_ELT)", + "ImpactDilateMask": "Dilate Mask", + + "PreviewBridge": "Preview Bridge", + "ImageSender": "Image Sender", + "ImageReceiver": "Image Receiver", + "ImageMaskSwitch": "Switch (images, mask)", + "ImpactSwitch": "Switch (Any)", + "ImpactInversedSwitch": "Inversed Switch (Any)", + + "MasksToMaskList": "Masks to Mask List", + "MaskListToMaskBatch": "Mask List to Masks", + "ImpactImageBatchToImageList": "Image batch to Image List", + "ImageListToImageBatch": "Image List to Image Batch", + "ImpactMakeImageList": "Make Image List", + "ImpactMakeImageBatch": "Make Image Batch", + "ImpactStringSelector": "String Selector", + "ImpactIsNotEmptySEGS": "SEGS isn't Empty", + + "RemoveNoiseMask": "Remove Noise Mask", + + "ImpactCombineConditionings": "Combine Conditionings", + + "ImpactQueueTrigger": "Queue Trigger", + "ImpactQueueTriggerCountdown": "Queue Trigger (Countdown)", + "ImpactSetWidgetValue": "Set Widget Value", + "ImpactNodeSetMuteState": "Set Mute State", + "ImpactControlBridge": "Control Bridge", + "ImpactSleep": "Sleep", + + "ImpactHFTransformersClassifierProvider": "HF Transformers Classifier Provider", + "ImpactSEGSClassify": "SEGS Classify", + + "LatentSwitch": "Switch (latent/legacy)", + "SEGSSwitch": "Switch (SEGS/legacy)" +} + +if not impact.config.get_config()['mmdet_skip']: + from impact.mmdet_nodes import * + import impact.legacy_nodes + NODE_CLASS_MAPPINGS.update({ + "MMDetDetectorProvider": MMDetDetectorProvider, + "MMDetLoader": impact.legacy_nodes.MMDetLoader, + "MaskPainter": impact.legacy_nodes.MaskPainter, + "SegsMaskCombine": impact.legacy_nodes.SegsMaskCombine, + "BboxDetectorForEach": impact.legacy_nodes.BboxDetectorForEach, + "SegmDetectorForEach": impact.legacy_nodes.SegmDetectorForEach, + "BboxDetectorCombined": impact.legacy_nodes.BboxDetectorCombined, + "SegmDetectorCombined": impact.legacy_nodes.SegmDetectorCombined, + }) + + NODE_DISPLAY_NAME_MAPPINGS.update({ + "MaskPainter": "MaskPainter (Deprecated)", + "MMDetLoader": "MMDetLoader (Legacy)", + "SegsMaskCombine": "SegsMaskCombine (Legacy)", + "BboxDetectorForEach": "BboxDetectorForEach (Legacy)", + "SegmDetectorForEach": "SegmDetectorForEach (Legacy)", + "BboxDetectorCombined": "BboxDetectorCombined (Legacy)", + "SegmDetectorCombined": "SegmDetectorCombined (Legacy)", + }) + +try: + import impact.subpack_nodes + + NODE_CLASS_MAPPINGS.update(impact.subpack_nodes.NODE_CLASS_MAPPINGS) + NODE_DISPLAY_NAME_MAPPINGS.update(impact.subpack_nodes.NODE_DISPLAY_NAME_MAPPINGS) +except Exception as e: + print("### ComfyUI-Impact-Pack: (IMPORT FAILED) Subpack\n") + print(" The module at the `custom_nodes/ComfyUI-Impact-Pack/impact_subpack` path appears to be incomplete.") + print(" Recommended to delete the path and restart ComfyUI.") + print(" If the issue persists, please report it to https://github.com/ltdrdata/ComfyUI-Impact-Pack/issues.") + print("\n---------------------------------") + traceback.print_exc() + print("---------------------------------\n") + +WEB_DIRECTORY = "js" +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ab0aa305b7d527aaab00631df0f2dfd03f77468 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..693ccc509aea465e87ed6e628105bb261e828244 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/__pycache__/install.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/__pycache__/install.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e874dc76b21351c0fa83d4046228ddfe556858c6 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/__pycache__/install.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/custom_wildcards/put_wildcards_here b/custom_nodes/ComfyUI-Impact-Pack/custom_wildcards/put_wildcards_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-Impact-Pack/disable.py b/custom_nodes/ComfyUI-Impact-Pack/disable.py new file mode 100644 index 0000000000000000000000000000000000000000..2d62417c14128faca59ced13bbd83d5cd8708da3 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/disable.py @@ -0,0 +1,38 @@ +import os +import sys +import time +import platform +import shutil +import subprocess + +comfy_path = '../..' + +def rmtree(path): + retry_count = 3 + + while True: + try: + retry_count -= 1 + + if platform.system() == "Windows": + subprocess.check_call(['attrib', '-R', path + '\\*', '/S']) + + shutil.rmtree(path) + + return True + + except Exception as ex: + print(f"ex: {ex}") + time.sleep(3) + + if retry_count < 0: + raise ex + + print(f"Uninstall retry({retry_count})") + +js_dest_path = os.path.join(comfy_path, "web", "extensions", "impact-pack") + +if os.path.exists(js_dest_path): + rmtree(js_dest_path) + + diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact-pack.ini b/custom_nodes/ComfyUI-Impact-Pack/impact-pack.ini new file mode 100644 index 0000000000000000000000000000000000000000..cfc2be426040af5434f191be00c4b93903e19735 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact-pack.ini @@ -0,0 +1,8 @@ +[default] +dependency_version = 19 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +custom_wildcards = C:\Users\matsu\Documents\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\custom_wildcards +disable_gpu_opencv = True + diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/.gitignore b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..392e184851e95dded25b3623de11b524e9ae41b2 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/.gitignore @@ -0,0 +1,5 @@ +__pycache__ +*.ini +wildcards/** +.vscode/ +.idea/ \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/LICENSE b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0ad25db4bd1d86c452db3f9602ccdbe172438f52 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/README.md b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f3700a13fb9123c50280b8c30c949eabda29b01a --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/README.md @@ -0,0 +1,18 @@ +# ComfyUI-Impact-Subpack +This extension serves as a complement to the Impact Pack, offering features that are not deemed suitable for inclusion by default in the ComfyUI Impact Pack + +The nodes in this repository cannot be used standalone and depend on [ComfyUI-Impact-Pack](https://github.com/ltdrdata/ComfyUI-Impact-Pack). + +## Nodes +* UltralyticsDetectorProvider - This node provides an object detection detector based on Ultralystics. + * By using this Detector Provider, you can replace the existing mmdet-based detector. + + +## Credits + +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +Bing-su/[adetailer](https://github.com/Bing-su/adetailer/) - This repo sitoryprovides an object detection model and features based on Ultralystics. + +huggingface/Bingsu/[adetailer](https://huggingface.co/Bingsu/adetailer/tree/main) - This repository offers various models based on Ultralystics. +* You can download other models supported by the UltralyticsDetectorProvider from here. \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76b634272221aebd46da5c0892ab6cf6d9ef9c3e Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab38ef71240ddd1a4f9d15b50ad13750a7c6b8b0 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subcore.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fe7165607ac2c66c406400fe5da29224e831bc2 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cf3657041987aa9c1709a2cf4a3d54174f1216d Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/__pycache__/subpack_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subcore.py b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subcore.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ab4c61a2604ae5ebefa9d36d5d7e1c097865c5 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subcore.py @@ -0,0 +1,203 @@ +from pathlib import Path +from PIL import Image + +import impact.core as core +import cv2 +import numpy as np +from torchvision.transforms.functional import to_pil_image +import torch + +try: + from ultralytics import YOLO +except Exception as e: + print(e) + print(f"\n!!!!!\n\n[ComfyUI-Impact-Subpack] If this error occurs, please check the following link:\n\thttps://github.com/ltdrdata/ComfyUI-Impact-Pack/blob/Main/troubleshooting/TROUBLESHOOTING.md\n\n!!!!!\n") + raise e + + +def load_yolo(model_path: str): + try: + return YOLO(model_path) + except ModuleNotFoundError: + # https://github.com/ultralytics/ultralytics/issues/3856 + YOLO("yolov8n.pt") + return YOLO(model_path) + + +def inference_bbox( + model, + image: Image.Image, + confidence: float = 0.3, + device: str = "", +): + pred = model(image, conf=confidence, device=device) + + bboxes = pred[0].boxes.xyxy.cpu().numpy() + cv2_image = np.array(image) + if len(cv2_image.shape) == 3: + cv2_image = cv2_image[:, :, ::-1].copy() # Convert RGB to BGR for cv2 processing + else: + # Handle the grayscale image here + # For example, you might want to convert it to a 3-channel grayscale image for consistency: + cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_GRAY2BGR) + cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY) + + segms = [] + for x0, y0, x1, y1 in bboxes: + cv2_mask = np.zeros(cv2_gray.shape, np.uint8) + cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1) + cv2_mask_bool = cv2_mask.astype(bool) + segms.append(cv2_mask_bool) + + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + + results = [[], [], [], []] + for i in range(len(bboxes)): + results[0].append(pred[0].names[int(pred[0].boxes[i].cls.item())]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(pred[0].boxes[i].conf.cpu().numpy()) + + return results + + +def inference_segm( + model, + image: Image.Image, + confidence: float = 0.3, + device: str = "", +): + pred = model(image, conf=confidence, device=device) + + bboxes = pred[0].boxes.xyxy.cpu().numpy() + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + + # NOTE: masks.data will be None when n == 0 + segms = pred[0].masks.data.cpu().numpy() + + results = [[], [], [], []] + for i in range(len(bboxes)): + results[0].append(pred[0].names[int(pred[0].boxes[i].cls.item())]) + results[1].append(bboxes[i]) + + mask = torch.from_numpy(segms[i]) + scaled_mask = torch.nn.functional.interpolate(mask.unsqueeze(0).unsqueeze(0), size=(image.size[1], image.size[0]), + mode='bilinear', align_corners=False) + scaled_mask = scaled_mask.squeeze().squeeze() + + results[2].append(scaled_mask.numpy()) + results[3].append(pred[0].boxes[i].conf.cpu().numpy()) + + return results + + +class UltraBBoxDetector: + bbox_model = None + + def __init__(self, bbox_model): + self.bbox_model = bbox_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + detected_results = inference_bbox(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + + for x, label in zip(segmasks, detected_results[0]): + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = core.make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(w, h, item_bbox, crop_region) + + cropped_image = core.crop_image(image, crop_region) + cropped_mask = core.crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = core.SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, label, None) + + items.append(item) + + shape = image.shape[1], image.shape[2] + return shape, items + + def detect_combined(self, image, threshold, dilation): + detected_results = inference_bbox(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + return core.combine_masks(segmasks) + + def setAux(self, x): + pass + + +class UltraSegmDetector: + bbox_model = None + + def __init__(self, bbox_model): + self.bbox_model = bbox_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + detected_results = inference_segm(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + + for x, label in zip(segmasks, detected_results[0]): + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = core.make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(w, h, item_bbox, crop_region) + + cropped_image = core.crop_image(image, crop_region) + cropped_mask = core.crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = core.SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, label, None) + + items.append(item) + + shape = image.shape[1], image.shape[2] + return shape, items + + def detect_combined(self, image, threshold, dilation): + detected_results = inference_segm(self.bbox_model, core.tensor2pil(image), threshold) + segmasks = core.create_segmasks(detected_results) + if dilation > 0: + segmasks = core.dilate_masks(segmasks, dilation) + + return core.combine_masks(segmasks) + + def setAux(self, x): + pass \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subpack_nodes.py b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subpack_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..13a14e153bc009cd1f00032b13bf6d0d8c9ab16a --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/impact/subpack_nodes.py @@ -0,0 +1,42 @@ +import os +import folder_paths +import impact.core as core +import impact.subcore as subcore +from impact.utils import add_folder_path_and_extensions + +print(f"### Loading: ComfyUI-Impact-Pack (Subpack: V0.3.2)") + +model_path = folder_paths.models_dir +add_folder_path_and_extensions("ultralytics_bbox", [os.path.join(model_path, "ultralytics", "bbox")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("ultralytics_segm", [os.path.join(model_path, "ultralytics", "segm")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("ultralytics", [os.path.join(model_path, "ultralytics")], folder_paths.supported_pt_extensions) + +class UltralyticsDetectorProvider: + @classmethod + def INPUT_TYPES(s): + bboxs = ["bbox/"+x for x in folder_paths.get_filename_list("ultralytics_bbox")] + segms = ["segm/"+x for x in folder_paths.get_filename_list("ultralytics_segm")] + return {"required": {"model_name": (bboxs + segms, )}} + RETURN_TYPES = ("BBOX_DETECTOR", "SEGM_DETECTOR") + FUNCTION = "doit" + + CATEGORY = "ImpactPack" + + def doit(self, model_name): + model_path = folder_paths.get_full_path("ultralytics", model_name) + model = subcore.load_yolo(model_path) + + if model_name.startswith("bbox"): + return subcore.UltraBBoxDetector(model), core.NO_SEGM_DETECTOR() + else: + return subcore.UltraBBoxDetector(model), subcore.UltraSegmDetector(model) + + +NODE_CLASS_MAPPINGS = { + "UltralyticsDetectorProvider": UltralyticsDetectorProvider +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + +} diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/install.py b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/install.py new file mode 100644 index 0000000000000000000000000000000000000000..9145fbe0f1d52192d507389f8158b64ca1b9fc64 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/install.py @@ -0,0 +1,32 @@ +import os +import sys +from torchvision.datasets.utils import download_url + +subpack_path = os.path.join(os.path.dirname(__file__)) +comfy_path = os.path.join(subpack_path, '..', '..', '..') + +sys.path.append(comfy_path) + +import folder_paths +model_path = folder_paths.models_dir +ultralytics_bbox_path = os.path.join(model_path, "ultralytics", "bbox") +ultralytics_segm_path = os.path.join(model_path, "ultralytics", "segm") + +if not os.path.exists(os.path.join(subpack_path, '..', '..', 'skip_download_model')): + if not os.path.exists(ultralytics_bbox_path): + os.makedirs(ultralytics_bbox_path) + + if not os.path.exists(ultralytics_segm_path): + os.makedirs(ultralytics_segm_path) + + if not os.path.exists(os.path.join(ultralytics_bbox_path, "face_yolov8m.pt")): + download_url("https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8m.pt", + ultralytics_bbox_path) + + if not os.path.exists(os.path.join(ultralytics_bbox_path, "hand_yolov8s.pt")): + download_url("https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8s.pt", + ultralytics_bbox_path) + + if not os.path.exists(os.path.join(ultralytics_segm_path, "person_yolov8m-seg.pt")): + download_url("https://huggingface.co/Bingsu/adetailer/resolve/main/person_yolov8m-seg.pt", + ultralytics_segm_path) diff --git a/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/requirements.txt b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d0a784681f77b24bf3c98efc34c9e5091862aad --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/impact_subpack/requirements.txt @@ -0,0 +1 @@ +ultralytics!=8.0.177 \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/install.py b/custom_nodes/ComfyUI-Impact-Pack/install.py new file mode 100644 index 0000000000000000000000000000000000000000..e1b54942a22dc8b777b5907c370881fd72469ed8 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/install.py @@ -0,0 +1,285 @@ +import os +import shutil +import sys +import subprocess +import threading +import locale +import traceback +import re + + +if sys.argv[0] == 'install.py': + sys.path.append('.') # for portable version + + +impact_path = os.path.join(os.path.dirname(__file__), "modules") +old_subpack_path = os.path.join(os.path.dirname(__file__), "subpack") +subpack_path = os.path.join(os.path.dirname(__file__), "impact_subpack") +subpack_repo = "https://github.com/ltdrdata/ComfyUI-Impact-Subpack" +comfy_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) + + +sys.path.append(impact_path) +sys.path.append(comfy_path) + + +# --- +def handle_stream(stream, is_stdout): + stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') + + for msg in stream: + if is_stdout: + print(msg, end="", file=sys.stdout) + else: + print(msg, end="", file=sys.stderr) + + +def process_wrap(cmd_str, cwd=None, handler=None): + print(f"[Impact Pack] EXECUTE: {cmd_str} in '{cwd}'") + process = subprocess.Popen(cmd_str, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1) + + if handler is None: + handler = handle_stream + + stdout_thread = threading.Thread(target=handler, args=(process.stdout, True)) + stderr_thread = threading.Thread(target=handler, args=(process.stderr, False)) + + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return process.wait() +# --- + + +pip_list = None + + +def get_installed_packages(): + global pip_list + + if pip_list is None: + try: + result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True) + pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()]) + except subprocess.CalledProcessError as e: + print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") + return set() + + return pip_list + + +def is_installed(name): + name = name.strip() + pattern = r'([^<>!=]+)([<>!=]=?)' + match = re.search(pattern, name) + + if match: + name = match.group(1) + + result = name.lower() in get_installed_packages() + return result + + +def is_requirements_installed(file_path): + print(f"req_path: {file_path}") + if os.path.exists(file_path): + with open(file_path, 'r') as file: + lines = file.readlines() + for line in lines: + if not is_installed(line): + return False + + return True + +try: + import platform + import folder_paths + from torchvision.datasets.utils import download_url + import impact.config + + + print("### ComfyUI-Impact-Pack: Check dependencies") + + if "python_embeded" in sys.executable or "python_embedded" in sys.executable: + pip_install = [sys.executable, '-s', '-m', 'pip', 'install'] + mim_install = [sys.executable, '-s', '-m', 'mim', 'install'] + else: + pip_install = [sys.executable, '-m', 'pip', 'install'] + mim_install = [sys.executable, '-m', 'mim', 'install'] + + + def ensure_subpack(): + import git + if os.path.exists(subpack_path): + try: + repo = git.Repo(subpack_path) + repo.remotes.origin.pull() + except: + traceback.print_exc() + if platform.system() == 'Windows': + print(f"[ComfyUI-Impact-Pack] Please turn off ComfyUI and remove '{subpack_path}' and restart ComfyUI.") + else: + shutil.rmtree(subpack_path) + git.Repo.clone_from(subpack_repo, subpack_path) + else: + git.Repo.clone_from(subpack_repo, subpack_path) + + if os.path.exists(old_subpack_path): + shutil.rmtree(old_subpack_path) + + + def remove_olds(): + global comfy_path + + comfy_path = os.path.dirname(folder_paths.__file__) + custom_nodes_path = os.path.join(comfy_path, "custom_nodes") + old_ini_path = os.path.join(custom_nodes_path, "impact-pack.ini") + old_py_path = os.path.join(custom_nodes_path, "comfyui-impact-pack.py") + + if os.path.exists(impact.config.old_config_path): + impact.config.get_config()['mmdet_skip'] = False + os.remove(impact.config.old_config_path) + + if os.path.exists(old_ini_path): + print(f"Delete legacy file: {old_ini_path}") + os.remove(old_ini_path) + + if os.path.exists(old_py_path): + print(f"Delete legacy file: {old_py_path}") + os.remove(old_py_path) + + + def ensure_pip_packages_first(): + subpack_req = os.path.join(subpack_path, "requirements.txt") + if os.path.exists(subpack_req) and not is_requirements_installed(subpack_req): + process_wrap(pip_install + ['-r', 'requirements.txt'], cwd=subpack_path) + + if not impact.config.get_config()['mmdet_skip']: + process_wrap(pip_install + ['openmim']) + + try: + import pycocotools + except Exception: + if platform.system() not in ["Windows"] or platform.machine() not in ["AMD64", "x86_64"]: + print(f"Your system is {platform.system()}; !! You need to install 'libpython3-dev' for this step. !!") + + process_wrap(pip_install + ['pycocotools']) + else: + pycocotools = { + (3, 8): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp38-cp38-win_amd64.whl", + (3, 9): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp39-cp39-win_amd64.whl", + (3, 10): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp310-cp310-win_amd64.whl", + (3, 11): "https://github.com/Bing-su/dddetailer/releases/download/pycocotools/pycocotools-2.0.6-cp311-cp311-win_amd64.whl", + } + + version = sys.version_info[:2] + url = pycocotools[version] + process_wrap(pip_install + [url]) + + + def ensure_pip_packages_last(): + my_path = os.path.dirname(__file__) + requirements_path = os.path.join(my_path, "requirements.txt") + + if not is_requirements_installed(requirements_path): + process_wrap(pip_install + ['-r', requirements_path]) + + # fallback + try: + import segment_anything + from skimage.measure import label, regionprops + import piexif + except Exception: + process_wrap(pip_install + ['-r', requirements_path]) + + # !! cv2 importing test must be very last !! + try: + import cv2 + except Exception: + try: + if not is_installed('opencv-python'): + process_wrap(pip_install + ['opencv-python']) + if not is_installed('opencv-python-headless'): + process_wrap(pip_install + ['opencv-python-headless']) + except: + print(f"[ERROR] ComfyUI-Impact-Pack: failed to install 'opencv-python'. Please, install manually.") + + try: + import git + except Exception: + if not is_installed('gitpython'): + process_wrap(pip_install + ['gitpython']) + + def ensure_mmdet_package(): + try: + import mmcv + import mmdet + from mmdet.evaluation import get_classes + except Exception: + process_wrap(pip_install + ['opendatalab==0.0.9']) + process_wrap(pip_install + ['-U', 'openmim']) + process_wrap(mim_install + ['mmcv>=2.0.0rc4, <2.1.0']) + process_wrap(mim_install + ['mmdet==3.0.0']) + process_wrap(mim_install + ['mmengine==0.7.4']) + + + def install(): + remove_olds() + + subpack_install_script = os.path.join(subpack_path, "install.py") + + print(f"### ComfyUI-Impact-Pack: Updating subpack") + ensure_subpack() # The installation of the subpack must take place before ensure_pip. cv2 triggers a permission error. + + if os.path.exists(subpack_install_script): + process_wrap([sys.executable, 'install.py'], cwd=subpack_path) + if not is_requirements_installed(os.path.join(subpack_path, 'requirements.txt')): + process_wrap(pip_install + ['-r', 'requirements.txt'], cwd=subpack_path) + else: + print(f"### ComfyUI-Impact-Pack: (Install Failed) Subpack\nFile not found: `{subpack_install_script}`") + + ensure_pip_packages_first() + + if not impact.config.get_config()['mmdet_skip']: + ensure_mmdet_package() + + ensure_pip_packages_last() + + # Download model + print("### ComfyUI-Impact-Pack: Check basic models") + + model_path = folder_paths.models_dir + + bbox_path = os.path.join(model_path, "mmdets", "bbox") + sam_path = os.path.join(model_path, "sams") + onnx_path = os.path.join(model_path, "onnx") + + if not os.path.exists(os.path.join(os.path.dirname(__file__), '..', 'skip_download_model')): + if not os.path.exists(bbox_path): + os.makedirs(bbox_path) + + if not impact.config.get_config()['mmdet_skip']: + if not os.path.exists(os.path.join(bbox_path, "mmdet_anime-face_yolov3.pth")): + download_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", bbox_path) + + if not os.path.exists(os.path.join(bbox_path, "mmdet_anime-face_yolov3.py")): + download_url("https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mmdet_anime-face_yolov3.py", bbox_path) + + if not os.path.exists(os.path.join(sam_path, "sam_vit_b_01ec64.pth")): + download_url("https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", sam_path) + + if not os.path.exists(onnx_path): + print(f"### ComfyUI-Impact-Pack: onnx model directory created ({onnx_path})") + os.mkdir(onnx_path) + + impact.config.write_config() + + + install() + +except Exception as e: + print("[ERROR] ComfyUI-Impact-Pack: Dependency installation has failed. Please install manually.") + traceback.print_exc() diff --git a/custom_nodes/ComfyUI-Impact-Pack/js/comboBoolMigration.js b/custom_nodes/ComfyUI-Impact-Pack/js/comboBoolMigration.js new file mode 100644 index 0000000000000000000000000000000000000000..fe1ee2f7da4e6d6096bf02ccf51ae85d633d6a27 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/js/comboBoolMigration.js @@ -0,0 +1,31 @@ +import { ComfyApp, app } from "../../scripts/app.js"; + +let conflict_check = undefined; + +app.registerExtension({ + name: "Comfy.impact.comboBoolMigration", + + nodeCreated(node, app) { + for(let i in node.widgets) { + let widget = node.widgets[i]; + + if(conflict_check == undefined) { + conflict_check = !!app.extensions.find((ext) => ext.name === "Comfy.comboBoolMigration"); + } + + if(conflict_check) + return; + + if(widget.type == "toggle") { + let value = widget.value; + Object.defineProperty(widget, "value", { + set: (value) => { + delete widget.value; + widget.value = value == true || value == widget.options.on; + }, + get: () => { return value; } + }); + } + } + } +}); diff --git a/custom_nodes/ComfyUI-Impact-Pack/js/common.js b/custom_nodes/ComfyUI-Impact-Pack/js/common.js new file mode 100644 index 0000000000000000000000000000000000000000..3365c018276be0fa7a8864f92a5d627cb1013a46 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/js/common.js @@ -0,0 +1,78 @@ +import { api } from "../../scripts/api.js"; +import { app } from "../../scripts/app.js"; + +let original_show = app.ui.dialog.show; + +function dialog_show_wrapper(html) { + if (typeof html === "string") { + if(html.includes("IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE")) { + return; + } + + this.textElement.innerHTML = html; + } else { + this.textElement.replaceChildren(html); + } + this.element.style.display = "flex"; +} + +app.ui.dialog.show = dialog_show_wrapper; + + +function nodeFeedbackHandler(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + const w = node.widgets.find((w) => event.detail.widget_name === w.name); + if(w) { + w.value = event.detail.value; + } + } +} + +api.addEventListener("impact-node-feedback", nodeFeedbackHandler); + + +function setMuteState(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + if(event.detail.is_active) + node.mode = 0; + else + node.mode = 2; + } +} + +api.addEventListener("impact-node-mute-state", setMuteState); + + +async function bridgeContinue(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + const mutes = new Set(event.detail.mutes); + const actives = new Set(event.detail.actives); + + for(let i in app.graph._nodes_by_id) { + let this_node = app.graph._nodes_by_id[i]; + if(mutes.has(i)) { + this_node.mode = 2; + } + else if(actives.has(i)) { + this_node.mode = 0; + } + } + + await app.queuePrompt(0, 1); + } +} + +api.addEventListener("impact-bridge-continue", bridgeContinue); + + +function addQueue(event) { + app.queuePrompt(0, 1); +} + +api.addEventListener("impact-add-queue", addQueue); diff --git a/custom_nodes/ComfyUI-Impact-Pack/js/impact-image-util.js b/custom_nodes/ComfyUI-Impact-Pack/js/impact-image-util.js new file mode 100644 index 0000000000000000000000000000000000000000..ca58ad2f93e0e220ac6a24f84ca6da2500ffba6c --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/js/impact-image-util.js @@ -0,0 +1,224 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; + +function load_image(str) { + let base64String = canvas.toDataURL('image/png'); + let img = new Image(); + img.src = base64String; +} + +function getFileItem(baseType, path) { + try { + let pathType = baseType; + + if (path.endsWith("[output]")) { + pathType = "output"; + path = path.slice(0, -9); + } else if (path.endsWith("[input]")) { + pathType = "input"; + path = path.slice(0, -8); + } else if (path.endsWith("[temp]")) { + pathType = "temp"; + path = path.slice(0, -7); + } + + const subfolder = path.substring(0, path.lastIndexOf('/')); + const filename = path.substring(path.lastIndexOf('/') + 1); + + return { + filename: filename, + subfolder: subfolder, + type: pathType + }; + } + catch(exception) { + return null; + } +} + +async function loadImageFromUrl(image, node_id, v, need_to_load) { + let item = getFileItem('temp', v); + + if(item) { + let params = `?node_id=${node_id}&filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + + let res = await api.fetchApi('/impact/set/pb_id_image'+params, { cache: "no-store" }); + if(res.status == 200) { + let pb_id = await res.text(); + if(need_to_load) {; + image.src = `view?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + } + return pb_id; + } + else { + return `$${node_id}-0`; + } + } + else { + return `$${node_id}-0`; + } +} + +async function loadImageFromId(image, v) { + let res = await api.fetchApi('/impact/get/pb_id_image?id='+v, { cache: "no-store" }); + if(res.status == 200) { + let item = await res.json(); + image.src = `view?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + return true; + } + + return false; +} + +app.registerExtension({ + name: "Comfy.Impact.img", + + nodeCreated(node, app) { + if(node.comfyClass == "PreviewBridge") { + let w = node.widgets.find(obj => obj.name === 'image'); + node._imgs = [new Image()]; + node.imageIndex = 0; + + Object.defineProperty(w, 'value', { + async set(v) { + const stackTrace = new Error().stack; + if(stackTrace.includes('presetText.js')) + return; + + var image = new Image(); + if(v && v.constructor == String && v.startsWith('$')) { + // from node feedback + let need_to_load = node._imgs[0].src == ''; + if(await loadImageFromId(image, v, need_to_load)) { + w._value = v; + if(node._imgs[0].src == '') { + node._imgs = [image]; + } + } + else { + w._value = `$${node.id}-0`; + } + } + else { + // from clipspace + w._value = await loadImageFromUrl(image, node.id, v, false); + } + }, + get() { + if(w._value == undefined) { + w._value = `$${node.id}-0`; + } + return w._value; + } + }); + + Object.defineProperty(node, 'imgs', { + set(v) { + const stackTrace = new Error().stack; + if(v && v.length == 0) + return; + else if(stackTrace.includes('pasteFromClipspace')) { + let sp = new URLSearchParams(v[0].src.split("?")[1]); + let str = ""; + if(sp.get('subfolder')) { + str += sp.get('subfolder') + '/'; + } + str += `${sp.get("filename")} [${sp.get("type")}]`; + + w.value = str; + } + + node._imgs = v; + }, + get() { + return node._imgs; + } + }); + } + + if(node.comfyClass == "ImageReceiver") { + let path_widget = node.widgets.find(obj => obj.name === 'image'); + let w = node.widgets.find(obj => obj.name === 'image_data'); + let stw_widget = node.widgets.find(obj => obj.name === 'save_to_workflow'); + w._value = ""; + + Object.defineProperty(w, 'value', { + set(v) { + if(v != '[IMAGE DATA]') + w._value = v; + }, + get() { + const stackTrace = new Error().stack; + if(!stackTrace.includes('draw') && !stackTrace.includes('graphToPrompt') && stackTrace.includes('app.js')) { + return "[IMAGE DATA]"; + } + else { + if(stw_widget.value) + return w._value; + else + return ""; + } + } + }); + + let set_img_act = (v) => { + node._img = v; + var canvas = document.createElement('canvas'); + canvas.width = v[0].width; + canvas.height = v[0].height; + + var context = canvas.getContext('2d'); + context.drawImage(v[0], 0, 0, v[0].width, v[0].height); + + var base64Image = canvas.toDataURL('image/png'); + w.value = base64Image; + }; + + Object.defineProperty(node, 'imgs', { + set(v) { + if (!v[0].complete) { + let orig_onload = v[0].onload; + v[0].onload = function(v2) { + if(orig_onload) + orig_onload(); + set_img_act(v); + }; + } + else { + set_img_act(v); + } + }, + get() { + if(this._img == undefined && w.value != '') { + this._img = [new Image()]; + if(stw_widget.value && w.value != '[IMAGE DATA]') + this._img[0].src = w.value; + } + else if(this._img == undefined && path_widget.value) { + let image = new Image(); + image.src = path_widget.value; + + try { + let item = getFileItem('temp', path_widget.value); + let params = `?filename=${item.filename}&type=${item.type}&subfolder=${item.subfolder}`; + + let res = api.fetchApi('/view/validate'+params, { cache: "no-store" }).then(response => response); + if(res.status == 200) { + image.src = 'view'+params; + } + + this._img = [new Image()]; // placeholder + image.onload = function(v) { + set_img_act([image]); + }; + } + catch { + + } + } + return this._img; + } + }); + } + } +}) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/js/impact-pack.js b/custom_nodes/ComfyUI-Impact-Pack/js/impact-pack.js new file mode 100644 index 0000000000000000000000000000000000000000..4a4d560e7e5f20ec84e98c921b3774ab2d47cbca --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/js/impact-pack.js @@ -0,0 +1,764 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; + +let wildcards_list = []; +async function load_wildcards() { + let res = await api.fetchApi('/impact/wildcards/list'); + let data = await res.json(); + wildcards_list = data.data; +} + +load_wildcards(); + + +// temporary implementation (copying from https://github.com/pythongosssss/ComfyUI-WD14-Tagger) +// I think this should be included into master!! +class ImpactProgressBadge { + constructor() { + if (!window.__progress_badge__) { + window.__progress_badge__ = Symbol("__impact_progress_badge__"); + } + this.symbol = window.__progress_badge__; + } + + getState(node) { + return node[this.symbol] || {}; + } + + setState(node, state) { + node[this.symbol] = state; + app.canvas.setDirty(true); + } + + addStatusHandler(nodeType) { + if (nodeType[this.symbol]?.statusTagHandler) { + return; + } + if (!nodeType[this.symbol]) { + nodeType[this.symbol] = {}; + } + nodeType[this.symbol] = { + statusTagHandler: true, + }; + + api.addEventListener("impact/update_status", ({ detail }) => { + let { node, progress, text } = detail; + const n = app.graph.getNodeById(+(node || app.runningNodeId)); + if (!n) return; + const state = this.getState(n); + state.status = Object.assign(state.status || {}, { progress: text ? progress : null, text: text || null }); + this.setState(n, state); + }); + + const self = this; + const onDrawForeground = nodeType.prototype.onDrawForeground; + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + const state = self.getState(this); + if (!state?.status?.text) { + return r; + } + + const { fgColor, bgColor, text, progress, progressColor } = { ...state.status }; + + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(text); + ctx.fillStyle = bgColor || "dodgerblue"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + if (progress) { + ctx.fillStyle = progressColor || "green"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, (sz.width + 12) * progress, 20, 5); + ctx.fill(); + } + + ctx.fillStyle = fgColor || "#fff"; + ctx.fillText(text, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + return r; + }; + } +} + +const input_tracking = {}; +const input_dirty = {}; +const output_tracking = {}; + +function progressExecuteHandler(event) { + if(event.detail.output.aux){ + const id = event.detail.node; + if(input_tracking.hasOwnProperty(id)) { + if(input_tracking.hasOwnProperty(id) && input_tracking[id][0] != event.detail.output.aux[0]) { + input_dirty[id] = true; + } + else{ + + } + } + + input_tracking[id] = event.detail.output.aux; + } +} + +function imgSendHandler(event) { + if(event.detail.images.length > 0){ + let data = event.detail.images[0]; + let filename = `${data.filename} [${data.type}]`; + + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'ImageReceiver') { + if(nodes[i].widgets[1].value == event.detail.link_id) { + if(data.subfolder) + nodes[i].widgets[0].value = `${data.subfolder}/${data.filename} [${data.type}]`; + else + nodes[i].widgets[0].value = `${data.filename} [${data.type}]`; + + let img = new Image(); + img.onload = (event) => { + nodes[i].imgs = [img]; + nodes[i].size[1] = Math.max(200, nodes[i].size[1]); + }; + img.src = `/view?filename=${data.filename}&type=${data.type}&subfolder=${data.subfolder}`+app.getPreviewFormatParam(); + } + } + } + } +} + + +function latentSendHandler(event) { + if(event.detail.images.length > 0){ + let data = event.detail.images[0]; + let filename = `${data.filename} [${data.type}]`; + + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'LatentReceiver') { + if(nodes[i].widgets[1].value == event.detail.link_id) { + if(data.subfolder) + nodes[i].widgets[0].value = `${data.subfolder}/${data.filename} [${data.type}]`; + else + nodes[i].widgets[0].value = `${data.filename} [${data.type}]`; + + let img = new Image(); + img.src = `/view?filename=${data.filename}&type=${data.type}&subfolder=${data.subfolder}`+app.getPreviewFormatParam(); + nodes[i].imgs = [img]; + nodes[i].size[1] = Math.max(200, nodes[i].size[1]); + } + } + } + } +} + + +function valueSendHandler(event) { + let nodes = app.graph._nodes; + for(let i in nodes) { + if(nodes[i].type == 'ImpactValueReceiver') { + if(nodes[i].widgets[2].value == event.detail.link_id) { + nodes[i].widgets[1].value = event.detail.value; + + let typ = typeof event.detail.value; + if(typ == 'string') { + nodes[i].widgets[0].value = "STRING"; + } + else if(typ == "boolean") { + nodes[i].widgets[0].value = "BOOLEAN"; + } + else if(typ != "number") { + nodes[i].widgets[0].value = typeof event.detail.value; + } + else if(Number.isInteger(event.detail.value)) { + nodes[i].widgets[0].value = "INT"; + } + else { + nodes[i].widgets[0].value = "FLOAT"; + } + } + } + } +} + + +const impactProgressBadge = new ImpactProgressBadge(); + +api.addEventListener("stop-iteration", () => { + document.getElementById("autoQueueCheckbox").checked = false; +}); +api.addEventListener("value-send", valueSendHandler); +api.addEventListener("img-send", imgSendHandler); +api.addEventListener("latent-send", latentSendHandler); +api.addEventListener("executed", progressExecuteHandler); + +app.registerExtension({ + name: "Comfy.Impack", + loadedGraphNode(node, app) { + if (node.comfyClass == "MaskPainter") { + input_dirty[node.id + ""] = true; + } + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name == "IterativeLatentUpscale" || nodeData.name == "IterativeImageUpscale" + || nodeData.name == "RegionalSampler"|| nodeData.name == "RegionalSamplerAdvanced") { + impactProgressBadge.addStatusHandler(nodeType); + } + + if(nodeData.name === 'ImpactInversedSwitch') { + nodeData.output = ['*']; + nodeData.output_is_list = [false]; + nodeData.output_name = ['output1']; + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info) + return; + + if(type == 2) { + // connect output + if(connected){ + if(app.graph._nodes_by_id[link_info.target_id].type == 'Reroute') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + + if(this.outputs[0].type == '*'){ + if(link_info.type == '*') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + else { + // propagate type + this.outputs[0].type = link_info.type; + this.outputs[0].name = link_info.type; + + for(let i in this.inputs) { + if(this.inputs[i].name != 'select') + this.inputs[i].type = link_info.type; + } + } + } + } + } + else { + if(app.graph._nodes_by_id[link_info.origin_id].type == 'Reroute') + this.disconnectInput(link_info.target_slot); + + // connect input + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + let origin_type = node.outputs[link_info.origin_slot].type; + + if(origin_type == '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + if(this.inputs[i].name != 'select') + this.inputs[i].type = origin_type; + } + + this.outputs[0].type = origin_type; + this.outputs[0].name = origin_type; + } + + return; + } + + if (!connected && this.outputs.length > 1) { + const stackTrace = new Error().stack; + + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData')) { + if(this.outputs[link_info.origin_slot].links.length == 0) + this.removeOutput(link_info.origin_slot); + } + } + + let slot_i = 1; + for (let i = 0; i < this.outputs.length; i++) { + this.outputs[i].name = `output${slot_i}` + slot_i++; + } + + let last_slot = this.outputs[this.outputs.length - 1]; + if (last_slot.slot_index == link_info.origin_slot) { + this.addOutput(`output${slot_i}`, this.outputs[0].type); + } + + let select_slot = this.inputs.find(x => x.name == "select"); + if(this.widgets) { + this.widgets[0].options.max = select_slot?this.outputs.length-1:this.outputs.length; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + if(this.widgets[0].options.max > 0 && this.widgets[0].value == 0) + this.widgets[0].value = 1; + } + } + } + + if (nodeData.name === 'ImpactMakeImageList' || nodeData.name === 'ImpactMakeImageBatch' || + nodeData.name === 'CombineRegionalPrompts' || nodeData.name === 'ImpactCombineConditionings' || + nodeData.name === 'ImpactSEGSConcat' || + nodeData.name === 'ImpactSwitch' || nodeData.name === 'LatentSwitch' || nodeData.name == 'SEGSSwitch') { + var input_name = "input"; + + switch(nodeData.name) { + case 'ImpactMakeImageList': + case 'ImpactMakeImageBatch': + input_name = "image"; + break; + + case 'ImpactSEGSConcat': + input_name = "segs"; + break; + + case 'CombineRegionalPrompts': + input_name = "regional_prompts"; + break; + + case 'ImpactCombineConditionings': + input_name = "conditioning"; + break; + + case 'LatentSwitch': + input_name = "input"; + break; + + case 'SEGSSwitch': + input_name = "input"; + break; + + case 'ImpactSwitch': + input_name = "input"; + } + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info) + return; + + if(type == 2) { + // connect output + if(connected && index == 0){ + if(nodeData.name == 'ImpactSwitch' && app.graph._nodes_by_id[link_info.target_id]?.type == 'Reroute') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + + if(this.outputs[0].type == '*'){ + if(link_info.type == '*') { + app.graph._nodes_by_id[link_info.target_id].disconnectInput(link_info.target_slot); + } + else { + // propagate type + this.outputs[0].type = link_info.type; + this.outputs[0].label = link_info.type; + this.outputs[0].name = link_info.type; + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = link_info.type; + } + } + } + } + + return; + } + else { + if(nodeData.name == 'ImpactSwitch' && app.graph._nodes_by_id[link_info.origin_id].type == 'Reroute') + this.disconnectInput(link_info.target_slot); + + // connect input + if(this.inputs[index].name == 'select' || this.inputs[index].name == 'sel_mode') + return; + + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + let origin_type = node.outputs[link_info.origin_slot].type; + + if(origin_type == '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = origin_type; + } + + this.outputs[0].type = origin_type; + this.outputs[0].label = origin_type; + this.outputs[0].name = origin_type; + } + } + + let select_slot = this.inputs.find(x => x.name == "select"); + let mode_slot = this.inputs.find(x => x.name == "sel_mode"); + + let converted_count = 0; + converted_count += select_slot?1:0; + converted_count += mode_slot?1:0; + + if (!connected && (this.inputs.length > 1+converted_count)) { + const stackTrace = new Error().stack; + + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData') && + this.inputs[index].name != 'select') { + this.removeInput(index); + } + } + + let slot_i = 1; + for (let i = 0; i < this.inputs.length; i++) { + let input_i = this.inputs[i]; + if(input_i.name != 'select'&& input_i.name != 'sel_mode') { + input_i.name = `${input_name}${slot_i}` + slot_i++; + } + } + + let last_slot = this.inputs[this.inputs.length - 1]; + if ( + (last_slot.name == 'select' && last_slot.name != 'sel_mode' && this.inputs[this.inputs.length - 2].link != undefined) + || (last_slot.name != 'select' && last_slot.name != 'sel_mode' && last_slot.link != undefined)) { + this.addInput(`${input_name}${slot_i}`, this.outputs[0].type); + } + + if(this.widgets) { + this.widgets[0].options.max = select_slot?this.inputs.length-1:this.inputs.length; + this.widgets[0].value = Math.min(this.widgets[0].value, this.widgets[0].options.max); + if(this.widgets[0].options.max > 0 && this.widgets[0].value == 0) + this.widgets[0].value = 1; + } + } + } + }, + + nodeCreated(node, app) { + if(node.comfyClass == "MaskPainter") { + node.addWidget("button", "Edit mask", null, () => { + ComfyApp.copyToClipspace(node); + ComfyApp.clipspace_return_node = node; + ComfyApp.open_maskeditor(); + }); + } + + switch(node.comfyClass) { + case "ToDetailerPipe": + case "ToDetailerPipeSDXL": + case "BasicPipeToDetailerPipe": + case "BasicPipeToDetailerPipeSDXL": + case "EditDetailerPipe": + case "FaceDetailer": + case "DetailerForEach": + case "DetailerForEachDebug": + case "DetailerForEachPipe": + case "DetailerForEachDebugPipe": + { + for(let i in node.widgets) { + let widget = node.widgets[i]; + if(widget.type === "customtext") { + widget.dynamicPrompts = false; + widget.inputEl.placeholder = "wildcard spec: if kept empty, this option will be ignored"; + widget.serializeValue = () => { + return node.widgets[i].value; + }; + } + } + } + break; + } + + if(node.comfyClass == "ImpactSEGSLabelFilter") { + Object.defineProperty(node.widgets[0], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(node.widgets[1].value.trim() != "" && !node.widgets[1].value.trim().endsWith(",")) + node.widgets[1].value += ", " + + node.widgets[1].value += value; + node.widgets_values[1] = node.widgets[1].value; + } + + node._value = value; + }, + get: () => { + return node._value; + } + }); + } + + if( + node.comfyClass == "ImpactWildcardEncode" || node.comfyClass == "ImpactWildcardProcessor" + || node.comfyClass == "ToDetailerPipe" || node.comfyClass == "ToDetailerPipeSDXL" + || node.comfyClass == "EditDetailerPipe" || node.comfyClass == "BasicPipeToDetailerPipe" || node.comfyClass == "BasicPipeToDetailerPipeSDXL") { + node._value = "Select the LoRA to add to the text"; + node._wvalue = "Select the Wildcard to add to the text"; + + var tbox_id = 0; + var combo_id = 3; + var has_lora = true; + + switch(node.comfyClass){ + case "ImpactWildcardEncode": + tbox_id = 0; + combo_id = 3; + break; + + case "ImpactWildcardProcessor": + tbox_id = 0; + combo_id = 4; + has_lora = false; + break; + + case "ToDetailerPipe": + case "ToDetailerPipeSDXL": + case "EditDetailerPipe": + case "EditDetailerPipeSDXL": + case "BasicPipeToDetailerPipe": + case "BasicPipeToDetailerPipeSDXL": + tbox_id = 0; + combo_id = 1; + break; + } + + Object.defineProperty(node.widgets[combo_id+1], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Select the Wildcard to add to the text") { + if(node.widgets[tbox_id].value != '') + node.widgets[tbox_id].value += ', ' + + node.widgets[tbox_id].value += value; + } + } + }, + get: () => { return "Select the Wildcard to add to the text"; } + }); + + Object.defineProperty(node.widgets[combo_id+1].options, "values", { + set: (x) => {}, + get: () => { + return wildcards_list; + } + }); + + if(has_lora) { + Object.defineProperty(node.widgets[combo_id], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Select the LoRA to add to the text") { + let lora_name = value; + if (lora_name.endsWith('.safetensors')) { + lora_name = lora_name.slice(0, -12); + } + + node.widgets[tbox_id].value += ``; + if(node.widgets_values) { + node.widgets_values[tbox_id] = node.widgets[tbox_id].value; + } + } + } + + node._value = value; + }, + get: () => { + return node._value; + } + }); + } + + // Preventing validation errors from occurring in any situation. + if(has_lora) { + node.widgets[combo_id].serializeValue = () => { return "Select the LoRA to add to the text"; } + } + node.widgets[combo_id+1].serializeValue = () => { return "Select the Wildcard to add to the text"; } + } + + if(node.comfyClass == "ImpactWildcardProcessor" || node.comfyClass == "ImpactWildcardEncode") { + node.widgets[0].inputEl.placeholder = "Wildcard Prompt (User input)"; + node.widgets[1].inputEl.placeholder = "Populated Prompt (Will be generated automatically)"; + node.widgets[1].inputEl.disabled = true; + node.widgets[0].dynamicPrompts = false; + node.widgets[1].dynamicPrompts = false; + + let populate_getter = node.widgets[1].__lookupGetter__('value'); + let populate_setter = node.widgets[1].__lookupSetter__('value'); + + const wildcard_text_widget = node.widgets.find((w) => w.name == 'wildcard_text'); + const populated_text_widget = node.widgets.find((w) => w.name == 'populated_text'); + const mode_widget = node.widgets.find((w) => w.name == 'mode'); + const seed_widget = node.widgets.find((w) => w.name == 'seed'); + + let force_serializeValue = async (n,i) => + { + if(!mode_widget.value) { + return populated_text_widget.value; + } + else { + let wildcard_text = await wildcard_text_widget.serializeValue(); + + let response = await api.fetchApi(`/impact/wildcards`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({text: wildcard_text, seed: seed_widget.value}) + }); + + let populated = await response.json(); + + if(n.widgets_values) { + n.widgets_values[2] = false; + n.widgets_values[1] = populated.text; + } + populate_setter.call(populated_text_widget, populated.text); + + return populated.text; + } + }; + + // mode combo + Object.defineProperty(mode_widget, "value", { + set: (value) => { + node._mode_value = value == true || value == "Populate"; + populated_text_widget.inputEl.disabled = value == true || value == "Populate"; + }, + get: () => { + if(node._mode_value != undefined) + return node._mode_value; + else + return true; + } + }); + + // to avoid conflict with presetText.js of pythongosssss + Object.defineProperty(populated_text_widget, "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(!stackTrace.includes('serializeValue')) + populate_setter.call(populated_text_widget, value); + }, + get: () => { + return populate_getter.call(populated_text_widget); + } + }); + + wildcard_text_widget.serializeValue = (n,i) => { + if(node.inputs) { + let link_id = node.inputs.find(x => x.name=="wildcard_text")?.link; + if(link_id != undefined) { + let link = app.graph.links[link_id]; + let input_widget = app.graph._nodes_by_id[link.origin_id].widgets[link.origin_slot]; + if(input_widget.type == "customtext") { + return input_widget.value; + } + } + else { + return wildcard_text_widget.value; + } + } + else { + return wildcard_text_widget.value; + } + }; + + populated_text_widget.serializeValue = force_serializeValue; + } + + if (node.comfyClass == "MaskPainter") { + node.widgets[0].value = '#placeholder'; + + Object.defineProperty(node, "images", { + set: function(value) { + node._images = value; + }, + get: function() { + const id = node.id+""; + if(node.widgets[0].value != '#placeholder') { + var need_invalidate = false; + + if(input_dirty.hasOwnProperty(id) && input_dirty[id]) { + node.widgets[0].value = {...input_tracking[id][1]}; + input_dirty[id] = false; + need_invalidate = true + this._images = app.nodeOutputs[id].images; + } + + let filename = app.nodeOutputs[id]['aux'][1][0]['filename']; + let subfolder = app.nodeOutputs[id]['aux'][1][0]['subfolder']; + let type = app.nodeOutputs[id]['aux'][1][0]['type']; + + let item = + { + image_hash: app.nodeOutputs[id]['aux'][0], + forward_filename: app.nodeOutputs[id]['aux'][1][0]['filename'], + forward_subfolder: app.nodeOutputs[id]['aux'][1][0]['subfolder'], + forward_type: app.nodeOutputs[id]['aux'][1][0]['type'] + }; + + if(node._images) { + app.nodeOutputs[id].images = [{ + ...node._images[0], + ...item + }]; + + node.widgets[0].value = + { + ...node._images[0], + ...item + }; + } + else { + app.nodeOutputs[id].images = [{ + ...item + }]; + + node.widgets[0].value = + { + ...item + }; + } + + if(need_invalidate) { + Promise.all( + app.nodeOutputs[id].images.map((src) => { + return new Promise((r) => { + const img = new Image(); + img.onload = () => r(img); + img.onerror = () => r(null); + img.src = "/view?" + new URLSearchParams(src).toString(); + }); + }) + ).then((imgs) => { + this.imgs = imgs.filter(Boolean); + this.setSizeForImage?.(); + app.graph.setDirtyCanvas(true); + }); + + app.nodeOutputs[id].images[0] = { ...node.widgets[0].value }; + } + + return app.nodeOutputs[id].images; + } + else { + return node._images; + } + } + }); + } + } +}); diff --git a/custom_nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js b/custom_nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js new file mode 100644 index 0000000000000000000000000000000000000000..279a938b730ec7bea16b77d9f901d7e36f3af57e --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/js/impact-sam-editor.js @@ -0,0 +1,636 @@ +import { app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { ComfyApp } from "../../scripts/app.js"; +import { ClipspaceDialog } from "../../extensions/core/clipspace.js"; + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +// Helper function to convert a data URL to a Blob object +function dataURLToBlob(dataURL) { + const parts = dataURL.split(';base64,'); + const contentType = parts[0].split(':')[1]; + const byteString = atob(parts[1]); + const arrayBuffer = new ArrayBuffer(byteString.length); + const uint8Array = new Uint8Array(arrayBuffer); + for (let i = 0; i < byteString.length; i++) { + uint8Array[i] = byteString.charCodeAt(i); + } + return new Blob([arrayBuffer], { type: contentType }); +} + +function loadedImageToBlob(image) { + const canvas = document.createElement('canvas'); + + canvas.width = image.width; + canvas.height = image.height; + + const ctx = canvas.getContext('2d'); + + ctx.drawImage(image, 0, 0); + + const dataURL = canvas.toDataURL('image/png', 1); + const blob = dataURLToBlob(dataURL); + + return blob; +} + +async function uploadMask(filepath, formData) { + await fetch('/upload/mask', { + method: 'POST', + body: formData + }).then(response => {}).catch(error => { + console.error('Error:', error); + }); + + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = `view?filename=${filepath.filename}&type=${filepath.type}`; + + if(ComfyApp.clipspace.images) + ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; + + ClipspaceDialog.invalidatePreview(); +} + +class ImpactSamEditorDialog extends ComfyDialog { + static instance = null; + + static getInstance() { + if(!ImpactSamEditorDialog.instance) { + ImpactSamEditorDialog.instance = new ImpactSamEditorDialog(); + } + + return ImpactSamEditorDialog.instance; + } + + constructor() { + super(); + this.element = $el("div.comfy-modal", { parent: document.body }, + [ $el("div.comfy-modal-content", + [...this.createButtons()]), + ]); + } + + createButtons() { + return []; + } + + createButton(name, callback) { + var button = document.createElement("button"); + button.innerText = name; + button.addEventListener("click", callback); + return button; + } + + createLeftButton(name, callback) { + var button = this.createButton(name, callback); + button.style.cssFloat = "left"; + button.style.marginRight = "4px"; + return button; + } + + createRightButton(name, callback) { + var button = this.createButton(name, callback); + button.style.cssFloat = "right"; + button.style.marginLeft = "4px"; + return button; + } + + createLeftSlider(self, name, callback) { + const divElement = document.createElement('div'); + divElement.id = "sam-confidence-slider"; + divElement.style.cssFloat = "left"; + divElement.style.fontFamily = "sans-serif"; + divElement.style.marginRight = "4px"; + divElement.style.color = "var(--input-text)"; + divElement.style.backgroundColor = "var(--comfy-input-bg)"; + divElement.style.borderRadius = "8px"; + divElement.style.borderColor = "var(--border-color)"; + divElement.style.borderStyle = "solid"; + divElement.style.fontSize = "15px"; + divElement.style.height = "21px"; + divElement.style.padding = "1px 6px"; + divElement.style.display = "flex"; + divElement.style.position = "relative"; + divElement.style.top = "2px"; + self.confidence_slider_input = document.createElement('input'); + self.confidence_slider_input.setAttribute('type', 'range'); + self.confidence_slider_input.setAttribute('min', '0'); + self.confidence_slider_input.setAttribute('max', '100'); + self.confidence_slider_input.setAttribute('value', '70'); + const labelElement = document.createElement("label"); + labelElement.textContent = name; + + divElement.appendChild(labelElement); + divElement.appendChild(self.confidence_slider_input); + + self.confidence_slider_input.addEventListener("change", callback); + + return divElement; + } + + async detect_and_invalidate_mask_canvas(self) { + const mask_img = await self.detect(self); + + const canvas = self.maskCtx.canvas; + const ctx = self.maskCtx; + + ctx.clearRect(0, 0, canvas.width, canvas.height); + + await new Promise((resolve, reject) => { + self.mask_image = new Image(); + self.mask_image.onload = function() { + ctx.drawImage(self.mask_image, 0, 0, canvas.width, canvas.height); + resolve(); + }; + self.mask_image.onerror = reject; + self.mask_image.src = mask_img.src; + }); + } + + setlayout(imgCanvas, maskCanvas, pointsCanvas) { + const self = this; + + // If it is specified as relative, using it only as a hidden placeholder for padding is recommended + // to prevent anomalies where it exceeds a certain size and goes outside of the window. + var placeholder = document.createElement("div"); + placeholder.style.position = "relative"; + placeholder.style.height = "50px"; + + var bottom_panel = document.createElement("div"); + bottom_panel.style.position = "absolute"; + bottom_panel.style.bottom = "0px"; + bottom_panel.style.left = "20px"; + bottom_panel.style.right = "20px"; + bottom_panel.style.height = "50px"; + + var brush = document.createElement("div"); + brush.id = "sam-brush"; + brush.style.backgroundColor = "blue"; + brush.style.outline = "2px solid pink"; + brush.style.borderRadius = "50%"; + brush.style.MozBorderRadius = "50%"; + brush.style.WebkitBorderRadius = "50%"; + brush.style.position = "absolute"; + brush.style.zIndex = 100; + brush.style.pointerEvents = "none"; + this.brush = brush; + this.element.appendChild(imgCanvas); + this.element.appendChild(maskCanvas); + this.element.appendChild(pointsCanvas); + this.element.appendChild(placeholder); // must below z-index than bottom_panel to avoid covering button + this.element.appendChild(bottom_panel); + document.body.appendChild(brush); + this.brush_size = 5; + + var confidence_slider = this.createLeftSlider(self, "Confidence", (event) => { + self.confidence = event.target.value; + }); + + var clearButton = this.createLeftButton("Clear", () => { + self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); + self.pointsCtx.clearRect(0, 0, self.pointsCanvas.width, self.pointsCanvas.height); + + self.prompt_points = []; + + self.invalidatePointsCanvas(self); + }); + + var detectButton = this.createLeftButton("Detect", () => self.detect_and_invalidate_mask_canvas(self)); + + var cancelButton = this.createRightButton("Cancel", () => { + document.removeEventListener("mouseup", ImpactSamEditorDialog.handleMouseUp); + document.removeEventListener("keydown", ImpactSamEditorDialog.handleKeyDown); + self.close(); + }); + + self.saveButton = this.createRightButton("Save", () => { + document.removeEventListener("mouseup", ImpactSamEditorDialog.handleMouseUp); + document.removeEventListener("keydown", ImpactSamEditorDialog.handleKeyDown); + self.save(self); + }); + + var undoButton = this.createLeftButton("Undo", () => { + if(self.prompt_points.length > 0) { + self.prompt_points.pop(); + self.pointsCtx.clearRect(0, 0, self.pointsCanvas.width, self.pointsCanvas.height); + self.invalidatePointsCanvas(self); + } + }); + + bottom_panel.appendChild(clearButton); + bottom_panel.appendChild(detectButton); + bottom_panel.appendChild(self.saveButton); + bottom_panel.appendChild(cancelButton); + bottom_panel.appendChild(confidence_slider); + bottom_panel.appendChild(undoButton); + + imgCanvas.style.position = "relative"; + imgCanvas.style.top = "200"; + imgCanvas.style.left = "0"; + + maskCanvas.style.position = "absolute"; + maskCanvas.style.opacity = 0.5; + pointsCanvas.style.position = "absolute"; + } + + show() { + this.mask_image = null; + self.prompt_points = []; + + this.message_box = $el("p", ["Please wait a moment while the SAM model and the image are being loaded."]); + this.element.appendChild(this.message_box); + + if(self.imgCtx) { + self.imgCtx.clearRect(0, 0, self.imageCanvas.width, self.imageCanvas.height); + } + + const target_image_path = ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src; + this.load_sam(target_image_path); + + if(!this.is_layout_created) { + // layout + const imgCanvas = document.createElement('canvas'); + const maskCanvas = document.createElement('canvas'); + const pointsCanvas = document.createElement('canvas'); + + imgCanvas.id = "imageCanvas"; + maskCanvas.id = "maskCanvas"; + pointsCanvas.id = "pointsCanvas"; + + this.setlayout(imgCanvas, maskCanvas, pointsCanvas); + + // prepare content + this.imgCanvas = imgCanvas; + this.maskCanvas = maskCanvas; + this.pointsCanvas = pointsCanvas; + this.maskCtx = maskCanvas.getContext('2d'); + this.pointsCtx = pointsCanvas.getContext('2d'); + + this.is_layout_created = true; + + // replacement of onClose hook since close is not real close + const self = this; + const observer = new MutationObserver(function(mutations) { + mutations.forEach(function(mutation) { + if (mutation.type === 'attributes' && mutation.attributeName === 'style') { + if(self.last_display_style && self.last_display_style != 'none' && self.element.style.display == 'none') { + ComfyApp.onClipspaceEditorClosed(); + } + + self.last_display_style = self.element.style.display; + } + }); + }); + + const config = { attributes: true }; + observer.observe(this.element, config); + } + + this.setImages(target_image_path, this.imgCanvas, this.pointsCanvas); + + if(ComfyApp.clipspace_return_node) { + this.saveButton.innerText = "Save to node"; + } + else { + this.saveButton.innerText = "Save"; + } + this.saveButton.disabled = true; + + this.element.style.display = "block"; + this.element.style.zIndex = 8888; // NOTE: alert dialog must be high priority. + } + + updateBrushPreview(self, event) { + event.preventDefault(); + + const centerX = event.pageX; + const centerY = event.pageY; + + const brush = self.brush; + + brush.style.width = self.brush_size * 2 + "px"; + brush.style.height = self.brush_size * 2 + "px"; + brush.style.left = (centerX - self.brush_size) + "px"; + brush.style.top = (centerY - self.brush_size) + "px"; + } + + setImages(target_image_path, imgCanvas, pointsCanvas) { + const imgCtx = imgCanvas.getContext('2d'); + const maskCtx = this.maskCtx; + const maskCanvas = this.maskCanvas; + + const self = this; + + // image load + const orig_image = new Image(); + window.addEventListener("resize", () => { + // repositioning + imgCanvas.width = window.innerWidth - 250; + imgCanvas.height = window.innerHeight - 200; + + // redraw image + let drawWidth = orig_image.width; + let drawHeight = orig_image.height; + + if (orig_image.width > imgCanvas.width) { + drawWidth = imgCanvas.width; + drawHeight = (drawWidth / orig_image.width) * orig_image.height; + } + + if (drawHeight > imgCanvas.height) { + drawHeight = imgCanvas.height; + drawWidth = (drawHeight / orig_image.height) * orig_image.width; + } + + imgCtx.drawImage(orig_image, 0, 0, drawWidth, drawHeight); + + // update mask + pointsCanvas.width = drawWidth; + pointsCanvas.height = drawHeight; + pointsCanvas.style.top = imgCanvas.offsetTop + "px"; + pointsCanvas.style.left = imgCanvas.offsetLeft + "px"; + + maskCanvas.width = drawWidth; + maskCanvas.height = drawHeight; + maskCanvas.style.top = imgCanvas.offsetTop + "px"; + maskCanvas.style.left = imgCanvas.offsetLeft + "px"; + + self.invalidateMaskCanvas(self); + self.invalidatePointsCanvas(self); + }); + + // original image load + orig_image.onload = () => self.onLoaded(self); + const rgb_url = new URL(target_image_path); + rgb_url.searchParams.delete('channel'); + rgb_url.searchParams.set('channel', 'rgb'); + orig_image.src = rgb_url; + self.image = orig_image; + } + + onLoaded(self) { + if(self.message_box) { + self.element.removeChild(self.message_box); + self.message_box = null; + } + + window.dispatchEvent(new Event('resize')); + + self.setEventHandler(pointsCanvas); + self.saveButton.disabled = false; + } + + setEventHandler(targetCanvas) { + targetCanvas.addEventListener("contextmenu", (event) => { + event.preventDefault(); + }); + + const self = this; + targetCanvas.addEventListener('pointermove', (event) => this.updateBrushPreview(self,event)); + targetCanvas.addEventListener('pointerdown', (event) => this.handlePointerDown(self,event)); + targetCanvas.addEventListener('pointerover', (event) => { this.brush.style.display = "block"; }); + targetCanvas.addEventListener('pointerleave', (event) => { this.brush.style.display = "none"; }); + document.addEventListener('keydown', ImpactSamEditorDialog.handleKeyDown); + } + + static handleKeyDown(event) { + const self = ImpactSamEditorDialog.instance; + if (event.key === '=') { // positive + brush.style.backgroundColor = "blue"; + brush.style.outline = "2px solid pink"; + self.is_positive_mode = true; + } else if (event.key === '-') { // negative + brush.style.backgroundColor = "red"; + brush.style.outline = "2px solid skyblue"; + self.is_positive_mode = false; + } + } + + is_positive_mode = true; + prompt_points = []; + confidence = 70; + + invalidatePointsCanvas(self) { + const ctx = self.pointsCtx; + + for (const i in self.prompt_points) { + const [is_positive, x, y] = self.prompt_points[i]; + + const scaledX = x * ctx.canvas.width / self.image.width; + const scaledY = y * ctx.canvas.height / self.image.height; + + if(is_positive) + ctx.fillStyle = "blue"; + else + ctx.fillStyle = "red"; + ctx.beginPath(); + ctx.arc(scaledX, scaledY, 3, 0, 3 * Math.PI); + ctx.fill(); + } + }줘 + + invalidateMaskCanvas(self) { + if(self.mask_image) { + self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); + self.maskCtx.drawImage(self.mask_image, 0, 0, self.maskCanvas.width, self.maskCanvas.height); + } + } + + async load_sam(url) { + const parsedUrl = new URL(url); + const searchParams = new URLSearchParams(parsedUrl.search); + + const filename = searchParams.get("filename") || ""; + const fileType = searchParams.get("type") || ""; + const subfolder = searchParams.get("subfolder") || ""; + + const data = { + sam_model_name: "auto", + filename: filename, + type: fileType, + subfolder: subfolder + }; + + fetch('/sam/prepare', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data) + }); + } + + async detect(self) { + const positive_points = []; + const negative_points = []; + + for(const i in self.prompt_points) { + const [is_positive, x, y] = self.prompt_points[i]; + const point = [x,y]; + if(is_positive) + positive_points.push(point); + else + negative_points.push(point); + } + + const data = { + positive_points: positive_points, + negative_points: negative_points, + threshold: self.confidence/100 + }; + + const response = await fetch('/sam/detect', { + method: 'POST', + headers: { 'Content-Type': 'image/png' }, + body: JSON.stringify(data) + }); + + const blob = await response.blob(); + const url = URL.createObjectURL(blob); + + return new Promise((resolve, reject) => { + const image = new Image(); + image.onload = () => resolve(image); + image.onerror = reject; + image.src = url; + }); + } + + handlePointerDown(self, event) { + if ([0, 2, 5].includes(event.button)) { + event.preventDefault(); + const x = event.offsetX || event.targetTouches[0].clientX - maskRect.left; + const y = event.offsetY || event.targetTouches[0].clientY - maskRect.top; + + const originalX = x * self.image.width / self.pointsCanvas.width; + const originalY = y * self.image.height / self.pointsCanvas.height; + + var point = null; + if (event.button == 0) { + // positive + point = [true, originalX, originalY]; + } else { + // negative + point = [false, originalX, originalY]; + } + + self.prompt_points.push(point); + + self.invalidatePointsCanvas(self); + } + } + + async save(self) { + if(!self.mask_image) { + this.close(); + return; + } + + const save_canvas = document.createElement('canvas'); + + const save_ctx = save_canvas.getContext('2d', {willReadFrequently:true}); + save_canvas.width = self.mask_image.width; + save_canvas.height = self.mask_image.height; + + save_ctx.drawImage(self.mask_image, 0, 0, save_canvas.width, save_canvas.height); + + const save_data = save_ctx.getImageData(0, 0, save_canvas.width, save_canvas.height); + + // refine mask image + for (let i = 0; i < save_data.data.length; i += 4) { + if(save_data.data[i]) { + save_data.data[i+3] = 0; + } + else { + save_data.data[i+3] = 255; + } + + save_data.data[i] = 0; + save_data.data[i+1] = 0; + save_data.data[i+2] = 0; + } + + save_ctx.globalCompositeOperation = 'source-over'; + save_ctx.putImageData(save_data, 0, 0); + + const formData = new FormData(); + const filename = "clipspace-mask-" + performance.now() + ".png"; + + const item = + { + "filename": filename, + "subfolder": "", + "type": "temp", + }; + + if(ComfyApp.clipspace.images) + ComfyApp.clipspace.images[0] = item; + + if(ComfyApp.clipspace.widgets) { + const index = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image'); + + if(index >= 0) + ComfyApp.clipspace.widgets[index].value = `${filename} [temp]`; + } + + const dataURL = save_canvas.toDataURL(); + const blob = dataURLToBlob(dataURL); + + let original_url = new URL(this.image.src); + + const original_ref = { filename: original_url.searchParams.get('filename') }; + + let original_subfolder = original_url.searchParams.get("subfolder"); + if(original_subfolder) + original_ref.subfolder = original_subfolder; + + let original_type = original_url.searchParams.get("type"); + if(original_type) + original_ref.type = original_type; + + formData.append('image', blob, filename); + formData.append('original_ref', JSON.stringify(original_ref)); + formData.append('type', "temp"); + + await uploadMask(item, formData); + ComfyApp.onClipspaceEditorSave(); + this.close(); + } +} + +app.registerExtension({ + name: "Comfy.Impact.SAMEditor", + init(app) { + const callback = + function () { + let dlg = ImpactSamEditorDialog.getInstance(); + dlg.show(); + }; + + const context_predicate = () => ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0 + ClipspaceDialog.registerButton("Impact SAM Detector", context_predicate, callback); + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.output.includes("MASK") && nodeData.output.includes("IMAGE")) { + addMenuHandler(nodeType, function (_, options) { + options.unshift({ + content: "Open in SAM Detector", + callback: () => { + ComfyApp.copyToClipspace(this); + ComfyApp.clipspace_return_node = this; + + let dlg = ImpactSamEditorDialog.getInstance(); + dlg.show(); + }, + }); + }); + } + } +}); + diff --git a/custom_nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js b/custom_nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js new file mode 100644 index 0000000000000000000000000000000000000000..01319f072923294d9a531aa296435ffa78eafe2a --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/js/impact-segs-picker.js @@ -0,0 +1,182 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; + +async function open_picker(node) { + const resp = await api.fetchApi(`/impact/segs/picker/count?id=${node.id}`); + const body = await resp.text(); + + let cnt = parseInt(body); + + var existingPicker = document.getElementById('impact-picker'); + if (existingPicker) { + existingPicker.parentNode.removeChild(existingPicker); + } + + var gallery = document.createElement('div'); + gallery.id = 'impact-picker'; + + gallery.style.position = "absolute"; + gallery.style.height = "80%"; + gallery.style.width = "80%"; + gallery.style.top = "10%"; + gallery.style.left = "10%"; + gallery.style.display = 'flex'; + gallery.style.flexWrap = 'wrap'; + gallery.style.maxHeight = '600px'; + gallery.style.overflow = 'auto'; + gallery.style.backgroundColor = 'rgba(0,0,0,0.3)'; + gallery.style.padding = '20px'; + gallery.draggable = false; + gallery.style.zIndex = 5000; + + var doneButton = document.createElement('button'); + doneButton.textContent = 'Done'; + doneButton.style.padding = '10px 10px'; + doneButton.style.border = 'none'; + doneButton.style.borderRadius = '5px'; + doneButton.style.fontFamily = 'Arial, sans-serif'; + doneButton.style.fontSize = '16px'; + doneButton.style.fontWeight = 'bold'; + doneButton.style.color = '#fff'; + doneButton.style.background = 'linear-gradient(to bottom, #0070B8, #003D66)'; + doneButton.style.boxShadow = '0 2px 4px rgba(0, 0, 0, 0.4)'; + doneButton.style.margin = "20px"; + doneButton.style.height = "40px"; + + var cancelButton = document.createElement('button'); + cancelButton.textContent = 'Cancel'; + cancelButton.style.padding = '10px 10px'; + cancelButton.style.border = 'none'; + cancelButton.style.borderRadius = '5px'; + cancelButton.style.fontFamily = 'Arial, sans-serif'; + cancelButton.style.fontSize = '16px'; + cancelButton.style.fontWeight = 'bold'; + cancelButton.style.color = '#fff'; + cancelButton.style.background = 'linear-gradient(to bottom, #ff70B8, #ff3D66)'; + cancelButton.style.boxShadow = '0 2px 4px rgba(0, 0, 0, 0.4)'; + cancelButton.style.margin = "20px"; + cancelButton.style.height = "40px"; + + const w = node.widgets.find((w) => w.name == 'picks'); + let prev_selected = w.value.split(',').map(function(item) { + return parseInt(item, 10); + }); + + let images = []; + doneButton.onclick = () => { + var result = ''; + for(let i in images) { + if(images[i].isSelected) { + if(result != '') + result += ', '; + + result += (parseInt(i)+1); + } + } + + w.value = result; + + gallery.parentNode.removeChild(gallery); + } + + cancelButton.onclick = () => { + gallery.parentNode.removeChild(gallery); + } + + var panel = document.createElement('div'); + panel.style.clear = 'both'; + panel.style.width = '100%'; + panel.style.height = '40px'; + panel.style.justifyContent = 'center'; + panel.style.alignItems = 'center'; + panel.style.display = 'flex'; + panel.appendChild(doneButton); + panel.appendChild(cancelButton); + gallery.appendChild(panel); + + var hint = document.createElement('label'); + hint.style.position = 'absolute'; + hint.innerHTML = 'Click: Toggle Selection
Ctrl-click: Single Selection'; + gallery.appendChild(hint); + + let max_size = 300; + + for(let i=0; i image.naturalHeight) { + ratio = max_size/image.naturalWidth; + } + else { + ratio = max_size/image.naturalHeight; + } + + let width = image.naturalWidth * ratio; + let height = image.naturalHeight * ratio; + + if(width < height) { + this.style.marginLeft = (200-width)/2+"px"; + } + else{ + this.style.marginTop = (200-height)/2+"px"; + } + + this.style.width = width+"px"; + this.style.height = height+"px"; + this.style.objectFit = 'cover'; + } + + image.addEventListener('click', function(event) { + if(event.ctrlKey) { + for(let i in images) { + if(images[i].isSelected) { + images[i].style.border = 'none'; + images[i].isSelected = false; + } + } + + image.style.border = '2px solid #006699'; + image.isSelected = true; + + return; + } + + if(image.isSelected) { + image.style.border = 'none'; + image.isSelected = false; + } + else { + image.style.border = '2px solid #006699'; + image.isSelected = true; + } + }); + + gallery.appendChild(image); + } + + document.body.appendChild(gallery); +} + + +app.registerExtension({ + name: "Comfy.Impack.Picker", + + nodeCreated(node, app) { + if(node.comfyClass == "ImpactSEGSPicker") { + node.addWidget("button", "pick", "image", () => { + open_picker(node); + }); + } + } +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/latent.png b/custom_nodes/ComfyUI-Impact-Pack/latent.png new file mode 100644 index 0000000000000000000000000000000000000000..19fed324a25a7e1a2252400e7752ce5586742429 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/latent.png differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b55045405c9695fb4b907bc2492b4f7deec56f3 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e8196b652fdcc5995bd4eaca9d25ac3cb6d2852 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/config.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68449c84c76ef60de0f6ed9c6e0b4bd2386c99ff Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a868743e54303f8aa12b1a54d363058f5a2f0bc5 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/core.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9db1578226c8c0c8a8fa0279902942a16865e0f Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90995eef5a7fca7ef172cd016cefc8f8c133eca8 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/detectors.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d7c251e92501e92e37d3d6aa1fa550cf33bd51 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f15d7befaa81d319f84fb362ee73b51485e80b46 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/hf_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e99ee0ebdf1b5547c6c7cb02d189eba5f2bdfc5c Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2f4a01104c1c59ccc67c940f0a6084e33376ec6 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_pack.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e564212f47d58bc5dd668cc5f54d00f2d4938cab Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c7af2dbdf1d0419c7a5d0b1d3f071a346091f3d Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/impact_server.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e56f1091248533457033bfb3d324de8a87d8c54 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87980b93813a007caae93a64f16f99a4573cf52f Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/logics.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..894c0f5f5d1ab57ef97903d9947051450e0f5662 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1082db5541a65e29325352ac557485f4b94ec7b1 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/pipe.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..259d43637608193afd770c99c37ffc6de134df92 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe80fb884d935b296107ab6ecb6d23d2fb5aaa7a Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/sample_error_enhancer.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca3cea54aa19b9ef8bb97c5179ae02fdd0a0341 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec41e065c42e68c05fc2920500b624b848154731 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/segs_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..804aa07d976edcd6edd0546185c170f39705d11d Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24436018d9ed36d2d325489b64c29f8c8eda3344 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/special_samplers.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4cedae8b4933e38974014abdced6e9eb6dde9ad Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b74f8cd4d689aa0827396f1e770ed7f450f1fe1 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/util_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2eba491c4cb1765a9e53921edfd61a131912d7a6 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fbdd91108a1b101471406e5e018927ea0dc2dba Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-310.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..976301e4c796ae71af651026c303df18d966dc07 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-311.pyc b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f375d7709953238d2cea31d007b1684e59b739e Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/__pycache__/wildcards.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..799b0b141370a53ca25163f58c011c2db5e22cb6 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/additional_dependencies.py @@ -0,0 +1,12 @@ +import sys +import subprocess + + +def ensure_onnx_package(): + try: + import onnxruntime + except Exception: + if "python_embeded" in sys.executable or "python_embedded" in sys.executable: + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime']) + else: + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime']) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4e496782e7ca409dbb58f01fec00934a729fa50a --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py @@ -0,0 +1,66 @@ +import configparser +import os + + +version = "V4.38.2" + +dependency_version = 19 + +my_path = os.path.dirname(__file__) +old_config_path = os.path.join(my_path, "impact-pack.ini") +config_path = os.path.join(my_path, "..", "..", "impact-pack.ini") +latent_letter_path = os.path.join(my_path, "..", "..", "latent.png") + +MAX_RESOLUTION = 8192 + + +def write_config(): + config = configparser.ConfigParser() + config['default'] = { + 'dependency_version': str(dependency_version), + 'mmdet_skip': str(get_config()['mmdet_skip']), + 'sam_editor_cpu': str(get_config()['sam_editor_cpu']), + 'sam_editor_model': get_config()['sam_editor_model'], + 'custom_wildcards': get_config()['custom_wildcards'], + 'disable_gpu_opencv': get_config()['disable_gpu_opencv'], + } + with open(config_path, 'w') as configfile: + config.write(configfile) + + +def read_config(): + try: + config = configparser.ConfigParser() + config.read(config_path) + default_conf = config['default'] + + return { + 'dependency_version': int(default_conf['dependency_version']), + 'mmdet_skip': default_conf['mmdet_skip'].lower() == 'true' if 'mmdet_skip' in default_conf else True, + 'sam_editor_cpu': default_conf['sam_editor_cpu'].lower() == 'true' if 'sam_editor_cpu' in default_conf else False, + 'sam_editor_model': 'sam_vit_b_01ec64.pth', + 'custom_wildcards': default_conf['custom_wildcards'] if 'custom_wildcards' in default_conf else os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")), + 'disable_gpu_opencv': default_conf['disable_gpu_opencv'].lower() == 'true' if 'disable_gpu_opencv' in default_conf else True + } + + except Exception: + return { + 'dependency_version': 0, + 'mmdet_skip': True, + 'sam_editor_cpu': False, + 'sam_editor_model': 'sam_vit_b_01ec64.pth', + 'custom_wildcards': os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")), + 'disable_gpu_opencv': True + } + + +cached_config = None + + +def get_config(): + global cached_config + + if cached_config is None: + cached_config = read_config() + + return cached_config diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py new file mode 100644 index 0000000000000000000000000000000000000000..fc66c172ed52ba5fbc14d6b196eabbacf68ad2ac --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py @@ -0,0 +1,1942 @@ +import copy +import os +from segment_anything import SamPredictor +import torch.nn.functional as F + +from impact.utils import * +from collections import namedtuple +import numpy as np +from skimage.measure import label, regionprops + +import nodes +import comfy_extras.nodes_upscale_model as model_upscale +from server import PromptServer +import comfy +import impact.wildcards as wildcards +import math +import cv2 + +SEG = namedtuple("SEG", + ['cropped_image', 'cropped_mask', 'confidence', 'crop_region', 'bbox', 'label', 'control_net_wrapper'], + defaults=[None]) + +pb_id_cnt = 0 +preview_bridge_image_id_map = {} +preview_bridge_image_name_map = {} +preview_bridge_cache = {} + + +def set_previewbridge_image(node_id, file, item): + global pb_id_cnt + + if file in preview_bridge_image_name_map: + pb_id = preview_bridge_image_name_map[node_id, file] + if pb_id.startswith(f"${node_id}"): + return pb_id + + pb_id = f"${node_id}-{pb_id_cnt}" + preview_bridge_image_id_map[pb_id] = (file, item) + preview_bridge_image_name_map[node_id, file] = (pb_id, item) + pb_id_cnt += 1 + + return pb_id + + +def erosion_mask(mask, grow_mask_by): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + w = mask.shape[1] + h = mask.shape[0] + + device = comfy.model_management.get_torch_device() + mask = mask.clone().to(device) + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), + mode="bilinear").to(device) + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)).to(device) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round().cpu() + + +def ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None): + if refiner_ratio is None or refiner_model is None or refiner_clip is None or refiner_positive is None or refiner_negative is None: + refined_latent = \ + nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise)[0] + else: + advanced_steps = math.floor(steps / denoise) + start_at_step = advanced_steps - steps + end_at_step = start_at_step + math.floor(steps * (1.0 - refiner_ratio)) + + print(f"pre: {start_at_step} .. {end_at_step} / {advanced_steps}") + temp_latent = \ + nodes.KSamplerAdvanced().sample(model, "enable", seed, advanced_steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + "enable")[0] + + if 'noise_mask' in latent_image: + # noise_latent = \ + # nodes.KSamplerAdvanced().sample(refiner_model, "enable", seed, advanced_steps, cfg, sampler_name, + # scheduler, refiner_positive, refiner_negative, latent_image, end_at_step, + # end_at_step, "enable")[0] + + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + temp_latent = \ + latent_compositor.composite(latent_image, temp_latent, 0, 0, False, latent_image['noise_mask'])[0] + + print(f"post: {end_at_step} .. {advanced_steps + 1} / {advanced_steps}") + refined_latent = \ + nodes.KSamplerAdvanced().sample(refiner_model, "disable", seed, advanced_steps, cfg, sampler_name, scheduler, + refiner_positive, refiner_negative, temp_latent, end_at_step, + advanced_steps + 1, + "disable")[0] + + return refined_latent + + +class REGIONAL_PROMPT: + def __init__(self, mask, sampler): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + self.mask = mask + self.sampler = sampler + self.mask_erosion = None + self.erosion_factor = None + + def get_mask_erosion(self, factor): + if self.mask_erosion is None or self.erosion_factor != factor: + self.mask_erosion = erosion_mask(self.mask, factor) + self.erosion_factor = factor + + return self.mask_erosion + + +class NO_BBOX_DETECTOR: + pass + + +class NO_SEGM_DETECTOR: + pass + + +def create_segmasks(results): + bboxs = results[1] + segms = results[2] + confidence = results[3] + + results = [] + for i in range(len(segms)): + item = (bboxs[i], segms[i].astype(np.float32), confidence[i]) + results.append(item) + return results + + +def gen_detection_hints_from_mask_area(x, y, mask, threshold, use_negative): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + points = [] + plabs = [] + + # minimum sampling step >= 3 + y_step = max(3, int(mask.shape[0] / 20)) + x_step = max(3, int(mask.shape[1] / 20)) + + for i in range(0, len(mask), y_step): + for j in range(0, len(mask[i]), x_step): + if mask[i][j] > threshold: + points.append((x + j, y + i)) + plabs.append(1) + elif use_negative and mask[i][j] == 0: + points.append((x + j, y + i)) + plabs.append(0) + + return points, plabs + + +def gen_negative_hints(w, h, x1, y1, x2, y2): + npoints = [] + nplabs = [] + + # minimum sampling step >= 3 + y_step = max(3, int(w / 20)) + x_step = max(3, int(h / 20)) + + for i in range(10, h - 10, y_step): + for j in range(10, w - 10, x_step): + if not (x1 - 10 <= j and j <= x2 + 10 and y1 - 10 <= i and i <= y2 + 10): + npoints.append((j, i)) + nplabs.append(0) + + return npoints, nplabs + + +def enhance_detail(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, bbox, seed, steps, cfg, + sampler_name, + scheduler, positive, negative, denoise, noise_mask, force_inpaint, wildcard_opt=None, + detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None, control_net_wrapper=None, cycle=1): + if noise_mask is not None and len(noise_mask.shape) == 3: + noise_mask = noise_mask.squeeze(0) + + if wildcard_opt is not None and wildcard_opt != "": + model, _, positive = wildcards.process_with_loras(wildcard_opt, model, clip) + + h = image.shape[1] + w = image.shape[2] + + bbox_h = bbox[3] - bbox[1] + bbox_w = bbox[2] - bbox[0] + + # Skip processing if the detected bbox is already larger than the guide_size + if not force_inpaint and bbox_h >= guide_size and bbox_w >= guide_size: + print(f"Detailer: segment skip (enough big)") + return None, None + + if guide_size_for_bbox: # == "bbox" + # Scale up based on the smaller dimension between width and height. + upscale = guide_size / min(bbox_w, bbox_h) + else: + # for cropped_size + upscale = guide_size / min(w, h) + + new_w = int(w * upscale) + new_h = int(h * upscale) + + # safeguard + if 'aitemplate_keep_loaded' in model.model_options: + max_size = min(4096, max_size) + + if new_w > max_size or new_h > max_size: + upscale *= max_size / max(new_w, new_h) + new_w = int(w * upscale) + new_h = int(h * upscale) + + if not force_inpaint: + if upscale <= 1.0: + print(f"Detailer: segment skip [determined upscale factor={upscale}]") + return None, None + + if new_w == 0 or new_h == 0: + print(f"Detailer: segment skip [zero size={new_w, new_h}]") + return None, None + else: + if upscale <= 1.0 or new_w == 0 or new_h == 0: + print(f"Detailer: force inpaint") + upscale = 1.0 + new_w = w + new_h = h + + if detailer_hook is not None: + new_w, new_h = detailer_hook.touch_scaled_size(new_w, new_h) + + print(f"Detailer: segment upscale for ({bbox_w, bbox_h}) | crop region {w, h} x {upscale} -> {new_w, new_h}") + + # upscale + upscaled_image = scale_tensor(new_w, new_h, torch.from_numpy(image)) + + # ksampler + latent_image = to_latent_image(upscaled_image, vae) + + upscaled_mask = None + if noise_mask is not None: + # upscale the mask tensor by a factor of 2 using bilinear interpolation + noise_mask = torch.from_numpy(noise_mask) + upscaled_mask = torch.nn.functional.interpolate(noise_mask.unsqueeze(0).unsqueeze(0), size=(new_h, new_w), + mode='bilinear', align_corners=False) + + # remove the extra dimensions added by unsqueeze + upscaled_mask = upscaled_mask.squeeze().squeeze() + latent_image['noise_mask'] = upscaled_mask + + if detailer_hook is not None: + latent_image = detailer_hook.post_encode(latent_image) + + cnet_pil = None + if control_net_wrapper is not None: + positive, cnet_pil = control_net_wrapper.apply(positive, upscaled_image, upscaled_mask) + + refined_latent = latent_image + for i in range(0, cycle): + if detailer_hook is not None and hasattr(detailer_hook, 'cycle_latent'): + refined_latent = detailer_hook.cycle_latent(i, refined_latent) + + refined_latent = ksampler_wrapper(model, seed+i, steps, cfg, sampler_name, scheduler, positive, negative, + refined_latent, denoise, + refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative) + + if detailer_hook is not None: + refined_latent = detailer_hook.pre_decode(refined_latent) + + # non-latent downscale - latent downscale cause bad quality + refined_image = vae.decode(refined_latent['samples']) + + if detailer_hook is not None: + refined_image = detailer_hook.post_decode(refined_image) + + # downscale + refined_image = scale_tensor_and_to_pil(w, h, refined_image) + + # don't convert to latent - latent break image + # preserving pil is much better + return refined_image, cnet_pil + + +def enhance_detail_for_animatediff(image_frames, model, clip, vae, guide_size, guide_size_for_bbox, max_size, bbox, seed, steps, cfg, + sampler_name, + scheduler, positive, negative, denoise, noise_mask, wildcard_opt=None, + detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, + refiner_negative=None): + if noise_mask is not None and len(noise_mask.shape) == 3: + noise_mask = noise_mask.squeeze(0) + + if wildcard_opt is not None and wildcard_opt != "": + model, _, positive = wildcards.process_with_loras(wildcard_opt, model, clip) + + h = image_frames.shape[1] + w = image_frames.shape[2] + + bbox_h = bbox[3] - bbox[1] + bbox_w = bbox[2] - bbox[0] + + # Skip processing if the detected bbox is already larger than the guide_size + if guide_size_for_bbox: # == "bbox" + # Scale up based on the smaller dimension between width and height. + upscale = guide_size / min(bbox_w, bbox_h) + else: + # for cropped_size + upscale = guide_size / min(w, h) + + new_w = int(w * upscale) + new_h = int(h * upscale) + + # safeguard + if 'aitemplate_keep_loaded' in model.model_options: + max_size = min(4096, max_size) + + if new_w > max_size or new_h > max_size: + upscale *= max_size / max(new_w, new_h) + new_w = int(w * upscale) + new_h = int(h * upscale) + + if upscale <= 1.0 or new_w == 0 or new_h == 0: + print(f"Detailer: force inpaint") + upscale = 1.0 + new_w = w + new_h = h + + if detailer_hook is not None: + new_w, new_h = detailer_hook.touch_scaled_size(new_w, new_h) + + print(f"Detailer: segment upscale for ({bbox_w, bbox_h}) | crop region {w, h} x {upscale} -> {new_w, new_h}") + + # upscale the mask tensor by a factor of 2 using bilinear interpolation + noise_mask = torch.from_numpy(noise_mask) + upscaled_mask = torch.nn.functional.interpolate(noise_mask.unsqueeze(0).unsqueeze(0), size=(new_h, new_w), + mode='bilinear', align_corners=False) + + upscaled_mask = upscaled_mask.squeeze().squeeze() + + latent_frames = None + for image in image_frames: + image = torch.from_numpy(image).unsqueeze(0) + + # upscale + upscaled_image = scale_tensor(new_w, new_h, image) + + # ksampler + samples = to_latent_image(upscaled_image, vae)['samples'] + + if latent_frames is None: + latent_frames = samples + else: + latent_frames = torch.concat((latent_frames, samples), dim=0) + + upscaled_mask = upscaled_mask.expand(len(image_frames), -1, -1) + + latent = { + 'noise_mask': upscaled_mask, + 'samples': latent_frames + } + + if detailer_hook is not None: + latent = detailer_hook.post_encode(latent) + + refined_latent = ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + latent, denoise, + refiner_ratio, refiner_model, refiner_clip, refiner_positive, refiner_negative) + + if detailer_hook is not None: + refined_latent = detailer_hook.pre_decode(refined_latent) + + refined_image_frames = None + for refined_sample in refined_latent['samples']: + refined_sample = refined_sample.unsqueeze(0) + + # non-latent downscale - latent downscale cause bad quality + refined_image = vae.decode(refined_sample) + + if refined_image_frames is None: + refined_image_frames = refined_image + else: + refined_image_frames = torch.concat((refined_image_frames, refined_image), dim=0) + + if detailer_hook is not None: + refined_image_frames = detailer_hook.post_decode(refined_image_frames) + + refined_image_frames = nodes.ImageScale().upscale(image=refined_image_frames, upscale_method='lanczos', width=w, height=h, crop='disabled')[0] + + return refined_image_frames + + +def composite_to(dest_latent, crop_region, src_latent): + x1 = crop_region[0] + y1 = crop_region[1] + + # composite to original latent + lc = nodes.LatentComposite() + orig_image = lc.composite(dest_latent, src_latent, x1, y1) + + return orig_image[0] + + +def sam_predict(predictor, points, plabs, bbox, threshold): + point_coords = None if not points else np.array(points) + point_labels = None if not plabs else np.array(plabs) + + box = np.array([bbox]) if bbox is not None else None + + cur_masks, scores, _ = predictor.predict(point_coords=point_coords, point_labels=point_labels, box=box) + + total_masks = [] + + selected = False + max_score = 0 + for idx in range(len(scores)): + if scores[idx] > max_score: + max_score = scores[idx] + max_mask = cur_masks[idx] + + if scores[idx] >= threshold: + selected = True + total_masks.append(cur_masks[idx]) + else: + pass + + if not selected: + total_masks.append(max_mask) + + return total_masks + + +def make_sam_mask(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + if sam_model.is_auto_mode: + device = comfy.model_management.get_torch_device() + sam_model.to(device=device) + + try: + predictor = SamPredictor(sam_model) + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + predictor.set_image(image, "RGB") + + total_masks = [] + + use_small_negative = mask_hint_use_negative == "Small" + + # seg_shape = segs[0] + segs = segs[1] + if detection_hint == "mask-points": + points = [] + plabs = [] + + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(segs[i].bbox) + points.append(center) + + # small point is background, big point is foreground + if use_small_negative and bbox[2] - bbox[0] < 10: + plabs.append(0) + else: + plabs.append(1) + + detected_masks = sam_predict(predictor, points, plabs, None, threshold) + total_masks += detected_masks + + else: + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(bbox) + + x1 = max(bbox[0] - bbox_expansion, 0) + y1 = max(bbox[1] - bbox_expansion, 0) + x2 = min(bbox[2] + bbox_expansion, image.shape[1]) + y2 = min(bbox[3] + bbox_expansion, image.shape[0]) + + dilated_bbox = [x1, y1, x2, y2] + + points = [] + plabs = [] + if detection_hint == "center-1": + points.append(center) + plabs = [1] # 1 = foreground point, 0 = background point + + elif detection_hint == "horizontal-2": + gap = (x2 - x1) / 3 + points.append((x1 + gap, center[1])) + points.append((x1 + gap * 2, center[1])) + plabs = [1, 1] + + elif detection_hint == "vertical-2": + gap = (y2 - y1) / 3 + points.append((center[0], y1 + gap)) + points.append((center[0], y1 + gap * 2)) + plabs = [1, 1] + + elif detection_hint == "rect-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, center[1])) + points.append((x1 + x_gap * 2, center[1])) + points.append((center[0], y1 + y_gap)) + points.append((center[0], y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "diamond-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, y1 + y_gap)) + points.append((x1 + x_gap * 2, y1 + y_gap)) + points.append((x1 + x_gap, y1 + y_gap * 2)) + points.append((x1 + x_gap * 2, y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "mask-point-bbox": + center = center_of_bbox(segs[i].bbox) + points.append(center) + plabs = [1] + + elif detection_hint == "mask-area": + points, plabs = gen_detection_hints_from_mask_area(segs[i].crop_region[0], segs[i].crop_region[1], + segs[i].cropped_mask, + mask_hint_threshold, use_small_negative) + + if mask_hint_use_negative == "Outter": + npoints, nplabs = gen_negative_hints(image.shape[0], image.shape[1], + segs[i].crop_region[0], segs[i].crop_region[1], + segs[i].crop_region[2], segs[i].crop_region[3]) + + points += npoints + plabs += nplabs + + detected_masks = sam_predict(predictor, points, plabs, dilated_bbox, threshold) + total_masks += detected_masks + + # merge every collected masks + mask = combine_masks2(total_masks) + + finally: + if sam_model.is_auto_mode: + print(f"semd to {device}") + sam_model.to(device="cpu") + + if mask is not None: + mask = mask.float() + mask = dilate_mask(mask.cpu().numpy(), dilation) + mask = torch.from_numpy(mask) + else: + mask = torch.zeros((8, 8), dtype=torch.float32, device="cpu") # empty mask + + return mask + + +def generate_detection_hints(image, seg, center, detection_hint, dilated_bbox, mask_hint_threshold, use_small_negative, + mask_hint_use_negative): + [x1, y1, x2, y2] = dilated_bbox + + points = [] + plabs = [] + if detection_hint == "center-1": + points.append(center) + plabs = [1] # 1 = foreground point, 0 = background point + + elif detection_hint == "horizontal-2": + gap = (x2 - x1) / 3 + points.append((x1 + gap, center[1])) + points.append((x1 + gap * 2, center[1])) + plabs = [1, 1] + + elif detection_hint == "vertical-2": + gap = (y2 - y1) / 3 + points.append((center[0], y1 + gap)) + points.append((center[0], y1 + gap * 2)) + plabs = [1, 1] + + elif detection_hint == "rect-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, center[1])) + points.append((x1 + x_gap * 2, center[1])) + points.append((center[0], y1 + y_gap)) + points.append((center[0], y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "diamond-4": + x_gap = (x2 - x1) / 3 + y_gap = (y2 - y1) / 3 + points.append((x1 + x_gap, y1 + y_gap)) + points.append((x1 + x_gap * 2, y1 + y_gap)) + points.append((x1 + x_gap, y1 + y_gap * 2)) + points.append((x1 + x_gap * 2, y1 + y_gap * 2)) + plabs = [1, 1, 1, 1] + + elif detection_hint == "mask-point-bbox": + center = center_of_bbox(seg.bbox) + points.append(center) + plabs = [1] + + elif detection_hint == "mask-area": + points, plabs = gen_detection_hints_from_mask_area(seg.crop_region[0], seg.crop_region[1], + seg.cropped_mask, + mask_hint_threshold, use_small_negative) + + if mask_hint_use_negative == "Outter": + npoints, nplabs = gen_negative_hints(image.shape[0], image.shape[1], + seg.crop_region[0], seg.crop_region[1], + seg.crop_region[2], seg.crop_region[3]) + + points += npoints + plabs += nplabs + + return points, plabs + + +def convert_and_stack_masks(masks): + if len(masks) == 0: + return None + + mask_tensors = [] + for mask in masks: + mask_array = np.array(mask, dtype=np.uint8) + mask_tensor = torch.from_numpy(mask_array) + mask_tensors.append(mask_tensor) + + stacked_masks = torch.stack(mask_tensors, dim=0) + stacked_masks = stacked_masks.unsqueeze(1) + + return stacked_masks + + +def merge_and_stack_masks(stacked_masks, group_size): + if stacked_masks is None: + return None + + num_masks = stacked_masks.size(0) + merged_masks = [] + + for i in range(0, num_masks, group_size): + subset_masks = stacked_masks[i:i + group_size] + merged_mask = torch.any(subset_masks, dim=0) + merged_masks.append(merged_mask) + + if len(merged_masks) > 0: + merged_masks = torch.stack(merged_masks, dim=0) + + return merged_masks + + +def segs_scale_match(segs, target_shape): + h = segs[0][0] + w = segs[0][1] + + th = target_shape[1] + tw = target_shape[2] + + if (h == th and w == tw) or h == 0 or w == 0: + return segs + + rh = th / h + rw = tw / w + + new_segs = [] + for seg in segs[1]: + cropped_image = seg.cropped_image + cropped_mask = seg.cropped_mask + x1, y1, x2, y2 = seg.crop_region + bx1, by1, bx2, by2 = seg.bbox + + crop_region = int(x1*rw), int(y1*rw), int(x2*rh), int(y2*rh) + bbox = int(bx1*rw), int(by1*rw), int(bx2*rh), int(by2*rh) + new_w = crop_region[2] - crop_region[0] + new_h = crop_region[3] - crop_region[1] + + cropped_mask = torch.from_numpy(cropped_mask) + cropped_mask = torch.nn.functional.interpolate(cropped_mask.unsqueeze(0).unsqueeze(0), size=(new_h, new_w), + mode='bilinear', align_corners=False) + cropped_mask = cropped_mask.squeeze(0).squeeze(0).numpy() + + if cropped_image is not None: + cropped_image = scale_tensor(new_w, new_h, torch.from_numpy(cropped_image)) + cropped_image = cropped_image.numpy() + + new_seg = SEG(cropped_image, cropped_mask, seg.confidence, crop_region, bbox, seg.label, seg.control_net_wrapper) + new_segs.append(new_seg) + + return ((th, tw), new_segs) + + +# Used Python's slicing feature. stacked_masks[2::3] means starting from index 2, selecting every third tensor with a step size of 3. +# This allows for quickly obtaining the last tensor of every three tensors in stacked_masks. +def every_three_pick_last(stacked_masks): + selected_masks = stacked_masks[2::3] + return selected_masks + + +def make_sam_mask_segmented(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + if sam_model.is_auto_mode: + device = comfy.model_management.get_torch_device() + sam_model.to(device=device) + + try: + predictor = SamPredictor(sam_model) + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + predictor.set_image(image, "RGB") + + total_masks = [] + + use_small_negative = mask_hint_use_negative == "Small" + + # seg_shape = segs[0] + segs = segs[1] + if detection_hint == "mask-points": + points = [] + plabs = [] + + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(bbox) + points.append(center) + + # small point is background, big point is foreground + if use_small_negative and bbox[2] - bbox[0] < 10: + plabs.append(0) + else: + plabs.append(1) + + detected_masks = sam_predict(predictor, points, plabs, None, threshold) + total_masks += detected_masks + + else: + for i in range(len(segs)): + bbox = segs[i].bbox + center = center_of_bbox(bbox) + x1 = max(bbox[0] - bbox_expansion, 0) + y1 = max(bbox[1] - bbox_expansion, 0) + x2 = min(bbox[2] + bbox_expansion, image.shape[1]) + y2 = min(bbox[3] + bbox_expansion, image.shape[0]) + + dilated_bbox = [x1, y1, x2, y2] + + points, plabs = generate_detection_hints(image, segs[i], center, detection_hint, dilated_bbox, + mask_hint_threshold, use_small_negative, + mask_hint_use_negative) + + detected_masks = sam_predict(predictor, points, plabs, dilated_bbox, threshold) + + total_masks += detected_masks + + # merge every collected masks + mask = combine_masks2(total_masks) + + finally: + # Temporarily disabling the switch back to CPU after inference. + # Rationale: After multiple tests and comparisons, it's concluded that not only does it fail to conserve GPU memory, + # but it also introduces additional IO overhead from transferring the model between devices. + + # if sam_model.is_auto_mode: + # sam_model.to(device=torch.device("cpu")) + + pass + + mask_working_device = torch.device("cpu") + + if mask is not None: + mask = mask.float() + mask = dilate_mask(mask.cpu().numpy(), dilation) + mask = torch.from_numpy(mask) + mask = mask.to(device=mask_working_device) + else: + # Extracting batch, height and width + height, width, _ = image.shape + mask = torch.zeros( + (height, width), dtype=torch.float32, device=mask_working_device + ) # empty mask + + stacked_masks = convert_and_stack_masks(total_masks) + + return (mask, merge_and_stack_masks(stacked_masks, group_size=3)) + # return every_three_pick_last(stacked_masks) + + +def segs_bitwise_and_mask(segs, mask): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + if mask is None: + print("[SegsBitwiseAndMask] Cannot operate: MASK is empty.") + return ([],) + + items = [] + + mask = (mask.cpu().numpy() * 255).astype(np.uint8) + + for seg in segs[1]: + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = np.bitwise_and(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +def apply_mask_to_each_seg(segs, masks): + if masks is None: + print("[SegsBitwiseAndMask] Cannot operate: MASK is empty.") + return (segs[0], [],) + + items = [] + + masks = masks.squeeze(1) + + for seg, mask in zip(segs[1], masks): + cropped_mask = (seg.cropped_mask * 255).astype(np.uint8) + crop_region = seg.crop_region + + cropped_mask2 = (mask.cpu().numpy() * 255).astype(np.uint8) + cropped_mask2 = cropped_mask2[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] + + new_mask = np.bitwise_and(cropped_mask.astype(np.uint8), cropped_mask2) + new_mask = new_mask.astype(np.float32) / 255.0 + + item = SEG(seg.cropped_image, new_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + items.append(item) + + return segs[0], items + + +class ONNXDetector: + onnx_model = None + + def __init__(self, onnx_model): + self.onnx_model = onnx_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + try: + import impact.onnx as onnx + + h = image.shape[1] + w = image.shape[2] + + labels, scores, boxes = onnx.onnx_inference(image, self.onnx_model) + + # collect feasible item + result = [] + + for i in range(len(labels)): + if scores[i] > threshold: + item_bbox = boxes[i] + x1, y1, x2, y2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + + if detailer_hook is not None: + crop_region = item_bbox.post_crop_region(w, h, item_bbox, crop_region) + + crop_x1, crop_y1, crop_x2, crop_y2, = crop_region + + # prepare cropped mask + cropped_mask = np.zeros((crop_y2 - crop_y1, crop_x2 - crop_x1)) + cropped_mask[y1 - crop_y1:y2 - crop_y1, x1 - crop_x1:x2 - crop_x1] = 1 + cropped_mask = dilate_mask(cropped_mask, dilation) + + # make items. just convert the integer label to a string + item = SEG(None, cropped_mask, scores[i], crop_region, item_bbox, str(labels[i]), None) + result.append(item) + + shape = h, w + return shape, result + except Exception as e: + print(f"ONNXDetector: unable to execute.\n{e}") + pass + + def detect_combined(self, image, threshold, dilation): + return segs_to_combined_mask(self.detect(image, threshold, dilation, 1)) + + def setAux(self, x): + pass + + +def mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size=1, label='A', crop_min_size=None, detailer_hook=None, is_contour=True): + drop_size = max(drop_size, 1) + if mask is None: + print("[mask_to_segs] Cannot operate: MASK is empty.") + return ([],) + + if isinstance(mask, np.ndarray): + pass # `mask` is already a NumPy array + else: + try: + mask = mask.numpy() + except AttributeError: + print("[mask_to_segs] Cannot operate: MASK is not a NumPy array or Tensor.") + return ([],) + + if mask is None: + print("[mask_to_segs] Cannot operate: MASK is empty.") + return ([],) + + result = [] + + if len(mask.shape) == 2: + mask = np.expand_dims(mask, axis=0) + + for i in range(mask.shape[0]): + mask_i = mask[i] + + if combined: + indices = np.nonzero(mask_i) + if len(indices[0]) > 0 and len(indices[1]) > 0: + bbox = ( + np.min(indices[1]), + np.min(indices[0]), + np.max(indices[1]), + np.max(indices[0]), + ) + crop_region = make_crop_region( + mask_i.shape[1], mask_i.shape[0], bbox, crop_factor + ) + x1, y1, x2, y2 = crop_region + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(mask_i.shape[1], mask_i.shape[0], bbox, crop_region) + + if x2 - x1 > 0 and y2 - y1 > 0: + cropped_mask = mask_i[y1:y2, x1:x2] + + if cropped_mask is not None: + item = SEG(None, cropped_mask, 1.0, crop_region, bbox, label, None) + result.append(item) + + else: + mask_i_uint8 = (mask_i * 255.0).astype(np.uint8) + contours, _ = cv2.findContours(mask_i_uint8, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + for contour in contours: + separated_mask = np.zeros_like(mask_i_uint8) + cv2.drawContours(separated_mask, [contour], 0, 255, -1) + separated_mask = np.array(separated_mask / 255.0).astype(np.float32) + + x, y, w, h = cv2.boundingRect(contour) + bbox = x, y, x + w, y + h + crop_region = make_crop_region( + mask_i.shape[1], mask_i.shape[0], bbox, crop_factor, crop_min_size + ) + + if detailer_hook is not None: + crop_region = detailer_hook.post_crop_region(mask_i.shape[1], mask_i.shape[0], bbox, crop_region) + + if w > drop_size and h > drop_size: + if is_contour: + mask_src = separated_mask + else: + mask_src = mask_i + + cropped_mask = np.array( + mask_src[ + crop_region[1]: crop_region[3], + crop_region[0]: crop_region[2], + ] + ) + + if bbox_fill: + cx1, cy1, _, _ = crop_region + bx1 = x - cx1 + bx2 = x+w - cx1 + by1 = y - cy1 + by2 = y+h - cy1 + cropped_mask[by1:by2, bx1:bx2] = 1.0 + + if cropped_mask is not None: + item = SEG(None, cropped_mask, 1.0, crop_region, bbox, label, None) + result.append(item) + + if not result: + print(f"[mask_to_segs] Empty mask.") + + print(f"# of Detected SEGS: {len(result)}") + # for r in result: + # print(f"\tbbox={r.bbox}, crop={r.crop_region}, label={r.label}") + + # shape: (b,h,w) -> (h,w) + return (mask.shape[1], mask.shape[2]), result + + +def mediapipe_facemesh_to_segs(image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + parts = { + "face": np.array([0x0A, 0xC8, 0x0A]), + "mouth": np.array([0x0A, 0xB4, 0x0A]), + "left_eyebrow": np.array([0xB4, 0xDC, 0x0A]), + "left_eye": np.array([0xB4, 0xC8, 0x0A]), + "left_pupil": np.array([0xFA, 0xC8, 0x0A]), + "right_eyebrow": np.array([0x0A, 0xDC, 0xB4]), + "right_eye": np.array([0x0A, 0xC8, 0xB4]), + "right_pupil": np.array([0x0A, 0xC8, 0xFA]), + } + + def create_segment(image, color): + image = (image * 255).to(torch.uint8) + image = image.squeeze(0).numpy() + mask = cv2.inRange(image, color, color) + + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + if contours: + max_contour = max(contours, key=cv2.contourArea) + convex_hull = cv2.convexHull(max_contour) + convex_segment = np.zeros_like(image) + cv2.fillPoly(convex_segment, [convex_hull], (255, 255, 255)) + + convex_segment = np.expand_dims(convex_segment, axis=0).astype(np.float32) / 255.0 + tensor = torch.from_numpy(convex_segment) + mask_tensor = torch.any(tensor != 0, dim=-1).float() + mask_tensor = mask_tensor.squeeze(0) + mask_tensor = torch.from_numpy(dilate_mask(mask_tensor.numpy(), dilation)) + return mask_tensor.unsqueeze(0) + + return None + + segs = [] + + def create_seg(label): + mask = create_segment(image, parts[label]) + if mask is not None: + seg = mask_to_segs(mask, False, crop_factor, bbox_fill, drop_size=drop_size, label=label, crop_min_size=crop_min_size) + if len(seg[1]) > 0: + segs.append(seg[1][0]) + + if face: + create_seg('face') + + if mouth: + create_seg('mouth') + + if left_eyebrow: + create_seg('left_eyebrow') + + if left_eye: + create_seg('left_eye') + + if left_pupil: + create_seg('left_pupil') + + if right_eyebrow: + create_seg('right_eyebrow') + + if right_eye: + create_seg('right_eye') + + if right_pupil: + create_seg('right_pupil') + + return (image.shape[1], image.shape[2]), segs + + +def segs_to_combined_mask(segs): + shape = segs[0] + h = shape[0] + w = shape[1] + + mask = np.zeros((h, w), dtype=np.uint8) + + for seg in segs[1]: + cropped_mask = seg.cropped_mask + crop_region = seg.crop_region + mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).astype(np.uint8) + + return torch.from_numpy(mask.astype(np.float32) / 255.0) + + +def segs_to_masklist(segs): + shape = segs[0] + h = shape[0] + w = shape[1] + + masks = [] + for seg in segs[1]: + mask = np.zeros((h, w), dtype=np.uint8) + cropped_mask = seg.cropped_mask + crop_region = seg.crop_region + mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).astype(np.uint8) + mask = torch.from_numpy(mask.astype(np.float32) / 255.0) + masks.append(mask) + + if len(masks) == 0: + empty_mask = torch.zeros((h, w), dtype=torch.float32, device="cpu") + masks = [empty_mask] + + return masks + + +def vae_decode(vae, samples, use_tile, hook, tile_size=512): + if use_tile: + pixels = nodes.VAEDecodeTiled().decode(vae, samples, tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(vae, samples)[0] + + if hook is not None: + pixels = hook.post_decode(pixels) + + return pixels + + +def vae_encode(vae, pixels, use_tile, hook, tile_size=512): + if use_tile: + samples = nodes.VAEEncodeTiled().encode(vae, pixels, tile_size)[0] + else: + samples = nodes.VAEEncode().encode(vae, pixels)[0] + + if hook is not None: + samples = hook.post_encode(samples) + + return samples + + +class KSamplerWrapper: + params = None + + def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise): + self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + + def sample(self, latent_image, hook=None): + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise) + + return nodes.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise)[0] + + +class KSamplerAdvancedWrapper: + params = None + + def __init__(self, model, cfg, sampler_name, scheduler, positive, negative): + self.params = model, cfg, sampler_name, scheduler, positive, negative + + def sample_advanced(self, add_noise, seed, steps, latent_image, start_at_step, end_at_step, + return_with_leftover_noise, hook=None, recover_special_sampler=False): + model, cfg, sampler_name, scheduler, positive, negative = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent = \ + hook.pre_ksample_advanced(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + return_with_leftover_noise) + + if recover_special_sampler and sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']: + base_image = latent_image.copy() + else: + base_image = None + + try: + latent_image = nodes.KSamplerAdvanced().sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step, end_at_step, + return_with_leftover_noise)[0] + except ValueError as e: + if str(e) == 'sigma_min and sigma_max must not be 0': + print(f"\nWARN: sampling skipped - sigma_min and sigma_max are 0") + return latent_image + + if recover_special_sampler and sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']: + compensate = 0 if sampler_name in ['uni_pc', 'uni_pc_bh2'] else 2 + sampler_name = 'dpmpp_fast' if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu'] else 'dpmpp_2m' + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + + noise_mask = latent_image['noise_mask'] + + if len(noise_mask.shape) == 4: + noise_mask = noise_mask.squeeze(0).squeeze(0) + + latent_image = \ + latent_compositor.composite(base_image, latent_image, 0, 0, False, noise_mask)[0] + + try: + latent_image = nodes.KSamplerAdvanced().sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, + positive, negative, latent_image, start_at_step-compensate, end_at_step, + return_with_leftover_noise)[0] + except ValueError as e: + if str(e) == 'sigma_min and sigma_max must not be 0': + print(f"\nWARN: sampling skipped - sigma_min and sigma_max are 0") + + return latent_image + + +class PixelKSampleHook: + cur_step = 0 + total_step = 0 + + def __init__(self): + pass + + def set_steps(self, info): + self.cur_step, self.total_step = info + + def post_decode(self, pixels): + return pixels + + def post_upscale(self, pixels): + return pixels + + def post_encode(self, samples): + return samples + + def pre_decode(self, samples): + return samples + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + def post_crop_region(self, w, h, item_bbox, crop_region): + return crop_region + + def touch_scaled_size(self, w, h): + return w, h + + +class PixelKSampleHookCombine(PixelKSampleHook): + hook1 = None + hook2 = None + + def __init__(self, hook1, hook2): + super().__init__() + self.hook1 = hook1 + self.hook2 = hook2 + + def set_steps(self, info): + self.hook1.set_steps(info) + self.hook2.set_steps(info) + + def post_decode(self, pixels): + return self.hook2.post_decode(self.hook1.post_decode(pixels)) + + def post_upscale(self, pixels): + return self.hook2.post_upscale(self.hook1.post_upscale(pixels)) + + def post_encode(self, samples): + return self.hook2.post_encode(self.hook1.post_encode(samples)) + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook1.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + return self.hook2.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + +class SimpleCfgScheduleHook(PixelKSampleHook): + target_cfg = 0 + + def __init__(self, target_cfg): + super().__init__() + self.target_cfg = target_cfg + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + progress = self.cur_step / self.total_step + gap = self.target_cfg - cfg + current_cfg = cfg + gap * progress + return model, seed, steps, current_cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise + + +class SimpleDenoiseScheduleHook(PixelKSampleHook): + target_denoise = 0 + + def __init__(self, target_denoise): + super().__init__() + self.target_denoise = target_denoise + + def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, + denoise): + progress = self.cur_step / self.total_step + gap = self.target_denoise - denoise + current_denoise = denoise + gap * progress + return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, current_denoise + + +def latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, use_tile=False, tile_size=512, + save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(w), int(h), False)[0] + + if hook is not None: + pixels = hook.post_upscale(pixels) + + return vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size) + + +def latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=False, tile_size=512, + save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] * scale_factor + h = pixels.shape[1] * scale_factor + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(w), int(h), False)[0] + + if hook is not None: + pixels = hook.post_upscale(pixels) + + return (vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size), pixels) + +def latent_upscale_on_pixel_space(samples, scale_method, scale_factor, vae, use_tile=False, tile_size=512, + save_temp_prefix=None, hook=None): + return latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile, tile_size, save_temp_prefix, hook)[0] + +def latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, upscale_model, new_w, new_h, vae, + use_tile=False, tile_size=512, save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] + + # upscale by model upscaler + current_w = w + while current_w < new_w: + pixels = model_upscale.ImageUpscaleWithModel().upscale(upscale_model, pixels)[0] + current_w = pixels.shape[2] + if current_w == w: + print(f"[latent_upscale_on_pixel_space_with_model] x1 upscale model selected") + break + + # downscale to target scale + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(new_w), int(new_h), False)[0] + + if hook is not None: + pixels = hook.post_upscale(pixels) + + return vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size) + + +def latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model, scale_factor, vae, use_tile=False, + tile_size=512, save_temp_prefix=None, hook=None): + pixels = vae_decode(vae, samples, use_tile, hook, tile_size=tile_size) + + if save_temp_prefix is not None: + nodes.PreviewImage().save_images(pixels, filename_prefix=save_temp_prefix) + + w = pixels.shape[2] + h = pixels.shape[1] + + new_w = w * scale_factor + new_h = h * scale_factor + + # upscale by model upscaler + current_w = w + while current_w < new_w: + pixels = model_upscale.ImageUpscaleWithModel().upscale(upscale_model, pixels)[0] + current_w = pixels.shape[2] + if current_w == w: + print(f"[latent_upscale_on_pixel_space_with_model] x1 upscale model selected") + break + + # downscale to target scale + pixels = nodes.ImageScale().upscale(pixels, scale_method, int(new_w), int(new_h), False)[0] + + if hook is not None: + pixels = hook.post_upscale(pixels) + + return (vae_encode(vae, pixels, use_tile, hook, tile_size=tile_size), pixels) + +def latent_upscale_on_pixel_space_with_model(samples, scale_method, upscale_model, scale_factor, vae, use_tile=False, + tile_size=512, save_temp_prefix=None, hook=None): + return latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model, scale_factor, vae, use_tile, tile_size, save_temp_prefix, hook)[0] + +class TwoSamplersForMaskUpscaler: + params = None + upscale_model = None + hook_base = None + hook_mask = None + hook_full = None + use_tiled_vae = False + is_tiled = False + tile_size = 512 + + def __init__(self, scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, hook_base_opt=None, hook_mask_opt=None, + hook_full_opt=None, + tile_size=512): + + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + mask = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) + + self.params = scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae + self.upscale_model = upscale_model_opt + self.full_sampler = full_sampler_opt + self.hook_base = hook_base_opt + self.hook_mask = hook_mask_opt + self.hook_full = hook_full_opt + self.use_tiled_vae = use_tiled_vae + self.tile_size = tile_size + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae = self.params + + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + self.prepare_hook(step_info) + + # upscale latent + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space(samples, scale_method, upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_base, tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_mask, tile_size=self.tile_size) + + return self.do_samples(step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent) + + def prepare_hook(self, step_info): + if self.hook_base is not None: + self.hook_base.set_steps(step_info) + if self.hook_mask is not None: + self.hook_mask.set_steps(step_info) + if self.hook_full is not None: + self.hook_full.set_steps(step_info) + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae = self.params + + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + self.prepare_hook(step_info) + + # upscale latent + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_base, + tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, self.upscale_model, + w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook_mask, + tile_size=self.tile_size) + + return self.do_samples(step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent) + + def is_full_sample_time(self, step_info, sample_schedule): + cur_step, total_step = step_info + + # make start from 1 instead of zero + cur_step += 1 + total_step += 1 + + if sample_schedule == "none": + return False + + elif sample_schedule == "interleave1": + return cur_step % 2 == 0 + + elif sample_schedule == "interleave2": + return cur_step % 3 == 0 + + elif sample_schedule == "interleave3": + return cur_step % 4 == 0 + + elif sample_schedule == "last1": + return cur_step == total_step + + elif sample_schedule == "last2": + return cur_step >= total_step - 1 + + elif sample_schedule == "interleave1+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + elif sample_schedule == "interleave2+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + elif sample_schedule == "interleave3+last1": + return cur_step % 2 == 0 or cur_step >= total_step - 1 + + def do_samples(self, step_info, base_sampler, mask_sampler, sample_schedule, mask, upscaled_latent): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + if self.is_full_sample_time(step_info, sample_schedule): + print(f"step_info={step_info} / full time") + + upscaled_latent = base_sampler.sample(upscaled_latent, self.hook_base) + sampler = self.full_sampler if self.full_sampler is not None else base_sampler + return sampler.sample(upscaled_latent, self.hook_full) + + else: + print(f"step_info={step_info} / non-full time") + # upscale mask + upscaled_mask = F.interpolate(mask, size=( + upscaled_latent['samples'].shape[2], upscaled_latent['samples'].shape[3]), + mode='bilinear', align_corners=True) + upscaled_mask = upscaled_mask[:, :, :upscaled_latent['samples'].shape[2], + :upscaled_latent['samples'].shape[3]] + + # base sampler + upscaled_inv_mask = torch.where(upscaled_mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + upscaled_latent['noise_mask'] = upscaled_inv_mask + upscaled_latent = base_sampler.sample(upscaled_latent, self.hook_base) + + # mask sampler + upscaled_latent['noise_mask'] = upscaled_mask + upscaled_latent = mask_sampler.sample(upscaled_latent, self.hook_mask) + + # remove mask + del upscaled_latent['noise_mask'] + return upscaled_latent + + +class PixelKSampleUpscaler: + params = None + upscale_model = None + hook = None + use_tiled_vae = False + is_tiled = False + tile_size = 512 + + def __init__(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, hook_opt=None, tile_size=512): + self.params = scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.upscale_model = upscale_model_opt + self.hook = hook_opt + self.use_tiled_vae = use_tiled_vae + self.tile_size = tile_size + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space(samples, scale_method, upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, hook=self.hook) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + if self.hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + refined_latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, + positive, negative, upscaled_latent, denoise)[0] + return refined_latent + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, hook=self.hook, + tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, self.upscale_model, + w, h, vae, + use_tile=self.use_tiled_vae, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + if self.hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + self.hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, + upscaled_latent, denoise) + + refined_latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, + positive, negative, upscaled_latent, denoise)[0] + return refined_latent + + +class ControlNetWrapper: + def __init__(self, control_net, strength, preprocessor): + self.control_net = control_net + self.strength = strength + self.preprocessor = preprocessor + self.image = None + + def apply(self, conditioning, image, mask=None): + if self.preprocessor is not None: + image = self.preprocessor.apply(image, mask) + + return nodes.ControlNetApply().apply_controlnet(conditioning, self.control_net, image, self.strength)[0], image + + +class CoreMLHook(PixelKSampleHook): + def __init__(self, mode): + super().__init__() + resolution = mode.split('x') + + self.w = int(resolution[0]) + self.h = int(resolution[1]) + + self.override_bbox_by_segm = False + + def pre_decode(self, samples): + new_samples = copy.deepcopy(samples) + new_samples['samples'] = samples['samples'][0].unsqueeze(0) + return new_samples + + def post_encode(self, samples): + new_samples = copy.deepcopy(samples) + new_samples['samples'] = samples['samples'].repeat(2, 1, 1, 1) + return new_samples + + def post_crop_region(self, w, h, item_bbox, crop_region): + x1, y1, x2, y2 = crop_region + bx1, by1, bx2, by2 = item_bbox + crop_w = x2-x1 + crop_h = y2-y1 + + crop_ratio = crop_w/crop_h + target_ratio = self.w/self.h + if crop_ratio < target_ratio: + # shrink height + top_gap = by1 - y1 + bottom_gap = y2 - by2 + + gap_ratio = top_gap / bottom_gap + + target_height = 1/target_ratio*crop_w + delta_height = crop_h - target_height + + new_y1 = int(y1 + delta_height*gap_ratio) + new_y2 = int(new_y1 + target_height) + crop_region = x1, new_y1, x2, new_y2 + + elif crop_ratio > target_ratio: + # shrink width + left_gap = bx1 - x1 + right_gap = x2 - bx2 + + gap_ratio = left_gap / right_gap + + target_width = target_ratio*crop_h + delta_width = crop_w - target_width + + new_x1 = int(x1 + delta_width*gap_ratio) + new_x2 = int(new_x1 + target_width) + crop_region = new_x1, y1, new_x2, y2 + + return crop_region + + def touch_scaled_size(self, w, h): + return self.w, self.h + + +# REQUIREMENTS: BlenderNeko/ComfyUI Noise +class InjectNoiseHook(PixelKSampleHook): + def __init__(self, source, seed, start_strength, end_strength): + super().__init__() + self.source = source + self.seed = seed + self.start_strength = start_strength + self.end_strength = end_strength + + def post_encode(self, samples, seed_idx=0): + # gen noise + size = samples['samples'].shape + seed = self.cur_step + self.seed + seed_idx + + if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS: + NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"] + InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"] + else: + raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.") + + noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0] + + # inj noise + mask = None + if 'noise_mask' in samples: + mask = samples['noise_mask'] + + strength = self.start_strength + (self.end_strength - self.start_strength) * self.cur_step / self.total_step + samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0] + + if mask is not None: + samples['noise_mask'] = mask + + return samples + + def cycle_latent(self, i, latent): + if i == 0: + return latent + else: + return self.post_encode(latent, i) + + +# REQUIREMENTS: BlenderNeko/ComfyUI_TiledKSampler +class TiledKSamplerWrapper: + params = None + + def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy): + self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy + + def sample(self, latent_image, hook=None): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + TiledKSampler = nodes.NODE_CLASS_MAPPINGS['BNK_TiledKSampler'] + else: + raise Exception("'BNK_TiledKSampler' node isn't installed.") + + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy = self.params + + if hook is not None: + model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \ + hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise) + + return \ + TiledKSampler().sample(model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, + scheduler, + positive, negative, latent_image, denoise)[0] + + +class PixelTiledKSampleUpscaler: + params = None + upscale_model = None + tile_params = None + hook = None + is_tiled = True + tile_size = 512 + + def __init__(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, + denoise, + tile_width, tile_height, tiling_strategy, + upscale_model_opt=None, hook_opt=None, tile_size=512): + self.params = scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise + self.tile_params = tile_width, tile_height, tiling_strategy + self.upscale_model = upscale_model_opt + self.hook = hook_opt + self.tile_size = tile_size + + def tiled_ksample(self, latent): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + TiledKSampler = nodes.NODE_CLASS_MAPPINGS['BNK_TiledKSampler'] + else: + raise Exception("'BNK_TiledKSampler' node isn't installed.") + + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + tile_width, tile_height, tiling_strategy = self.tile_params + + return \ + TiledKSampler().sample(model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, + scheduler, + positive, negative, latent, denoise)[0] + + def upscale(self, step_info, samples, upscale_factor, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space(samples, scale_method, upscale_factor, vae, + use_tile=True, save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model(samples, scale_method, self.upscale_model, + upscale_factor, vae, + use_tile=True, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + refined_latent = self.tiled_ksample(upscaled_latent) + + return refined_latent + + def upscale_shape(self, step_info, samples, w, h, save_temp_prefix=None): + scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params + + if self.hook is not None: + self.hook.set_steps(step_info) + + if self.upscale_model is None: + upscaled_latent = latent_upscale_on_pixel_space_shape(samples, scale_method, w, h, vae, + use_tile=True, save_temp_prefix=save_temp_prefix, + hook=self.hook, tile_size=self.tile_size) + else: + upscaled_latent = latent_upscale_on_pixel_space_with_model_shape(samples, scale_method, + self.upscale_model, w, h, vae, + use_tile=True, + save_temp_prefix=save_temp_prefix, + hook=self.hook, + tile_size=self.tile_size) + + refined_latent = self.tiled_ksample(upscaled_latent) + + return refined_latent + + +# REQUIREMENTS: biegert/ComfyUI-CLIPSeg +class BBoxDetectorBasedOnCLIPSeg: + prompt = None + blur = None + threshold = None + dilation_factor = None + aux = None + + def __init__(self, prompt, blur, threshold, dilation_factor): + self.prompt = prompt + self.blur = blur + self.threshold = threshold + self.dilation_factor = dilation_factor + + def detect(self, image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size=1, detailer_hook=None): + mask = self.detect_combined(image, bbox_threshold, bbox_dilation) + + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + segs = mask_to_segs(mask, False, bbox_crop_factor, True, drop_size, detailer_hook=detailer_hook) + return segs + + def detect_combined(self, image, bbox_threshold, bbox_dilation): + if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: + CLIPSeg = nodes.NODE_CLASS_MAPPINGS['CLIPSeg'] + else: + raise Exception("'CLIPSeg' node isn't installed.") + + if self.threshold is None: + threshold = bbox_threshold + else: + threshold = self.threshold + + if self.dilation_factor is None: + dilation_factor = bbox_dilation + else: + dilation_factor = self.dilation_factor + + prompt = self.aux if self.prompt == '' and self.aux is not None else self.prompt + + mask, _, _ = CLIPSeg().segment_image(image, prompt, self.blur, threshold, dilation_factor) + mask = to_binary_mask(mask) + return mask + + def setAux(self, x): + self.aux = x + + +def update_node_status(node, text, progress=None): + if PromptServer.instance.client_id is None: + return + + PromptServer.instance.send_sync("impact/update_status", { + "node": node, + "progress": progress, + "text": text + }, PromptServer.instance.client_id) + + +from comfy.cli_args import args, LatentPreviewMethod +import folder_paths +from latent_preview import TAESD, TAESDPreviewerImpl, Latent2RGBPreviewer + +try: + import comfy.latent_formats as latent_formats + + + def get_previewer(device, latent_format=latent_formats.SD15(), force=False, method=None): + previewer = None + + if method is None: + method = args.preview_method + + if method != LatentPreviewMethod.NoPreviews or force: + # TODO previewer methods + taesd_decoder_path = folder_paths.get_full_path("vae_approx", latent_format.taesd_decoder_name) + + if method == LatentPreviewMethod.Auto: + method = LatentPreviewMethod.Latent2RGB + if taesd_decoder_path: + method = LatentPreviewMethod.TAESD + + if method == LatentPreviewMethod.TAESD: + if taesd_decoder_path: + taesd = TAESD(None, taesd_decoder_path).to(device) + previewer = TAESDPreviewerImpl(taesd) + else: + print("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format( + latent_format.taesd_decoder_name)) + + if previewer is None: + previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors) + return previewer + +except: + print(f"#########################################################################") + print(f"[ERROR] ComfyUI-Impact-Pack: Please update ComfyUI to the latest version.") + print(f"#########################################################################") diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py new file mode 100644 index 0000000000000000000000000000000000000000..cbae26c11c049a9daa9fb853d45d898a6a0969c0 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py @@ -0,0 +1,329 @@ +import impact.core as core +from impact.config import MAX_RESOLUTION +import impact.segs_nodes as segs_nodes +import numpy as np +import impact.utils as utils + +class SAMDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sam_model": ("SAM_MODEL", ), + "segs": ("SEGS", ), + "image": ("IMAGE", ), + "detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", + "mask-points", "mask-point-bbox", "none"],), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_hint_use_negative": (["False", "Small", "Outter"], ) + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + return (core.make_sam_mask(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative), ) + + +class SAMDetectorSegmented: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sam_model": ("SAM_MODEL", ), + "segs": ("SEGS", ), + "image": ("IMAGE", ), + "detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", + "mask-points", "mask-point-bbox", "none"],), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_hint_use_negative": (["False", "Small", "Outter"], ) + } + } + + RETURN_TYPES = ("MASK", "MASK") + RETURN_NAMES = ("combined_mask", "batch_masks") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative): + combined_mask, batch_masks = core.make_sam_mask_segmented(sam_model, segs, image, detection_hint, dilation, + threshold, bbox_expansion, mask_hint_threshold, + mask_hint_use_negative) + return (combined_mask, batch_masks, ) + + +class BboxDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, bbox_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None): + segs = bbox_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook) + + if labels is not None and labels != '': + labels = labels.split(',') + if len(labels) > 0: + segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels) + + return (segs, ) + + +class SegmDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_detector": ("SEGM_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, segm_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None): + segs = segm_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook) + + if labels is not None and labels != '': + labels = labels.split(',') + if len(labels) > 0: + segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels) + + return (segs, ) + + +class SegmDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_detector": ("SEGM_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, segm_detector, image, threshold, dilation): + mask = segm_detector.detect_combined(image, threshold, dilation) + return (mask,) + + +class BboxDetectorCombined(SegmDetectorCombined): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 4, "min": -512, "max": 512, "step": 1}), + } + } + + def doit(self, bbox_detector, image, threshold, dilation): + mask = bbox_detector.detect_combined(image, threshold, dilation) + return (mask,) + + +class SimpleDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt=None, segm_detector_opt=None): + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size) + + if sam_model_opt is not None: + mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation, + sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False) + segs = core.segs_bitwise_and_mask(segs, mask) + elif segm_detector_opt is not None: + segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size) + mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, mask) + + return (segs,) + + + def doit(self, bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt=None, segm_detector_opt=None): + + return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt, segm_detector_opt) + + +class SimpleDetectorForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "detailer_pipe": ("DETAILER_PIPE", ), + "image": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + def doit(self, detailer_pipe, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold): + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + + return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt, segm_detector_opt) + + +class SimpleDetectorForAnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_detector": ("BBOX_DETECTOR", ), + "image_frames": ("IMAGE", ), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "sub_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), + "sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + @staticmethod + def detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt=None, segm_detector_opt=None): + + # gather segs for all frames + all_segs = [] + for image in image_frames: + image = image.unsqueeze(0) + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size) + + if sam_model_opt is not None: + mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation, + sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False) + segs = core.segs_bitwise_and_mask(segs, mask) + elif segm_detector_opt is not None: + segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size) + mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, mask) + + all_segs.append(segs) + + # create merged masks + all_masks = [] + for segs in all_segs: + all_masks += segs_nodes.SEGSToMaskList().doit(segs)[0] + + result_mask = all_masks[0] + for mask in all_masks[1:]: + result_mask += mask + + result_mask = utils.to_binary_mask(result_mask, 0.1) + + return segs_nodes.MaskToSEGS().doit(result_mask, False, crop_factor, False, drop_size) + + def doit(self, bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt=None, segm_detector_opt=None): + + return SimpleDetectorForAnimateDiff.detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size, + sub_threshold, sub_dilation, sub_bbox_expansion, + sam_mask_hint_threshold, sam_model_opt, segm_detector_opt) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..8b0e0bfd65147398f44f9d041e2427c343e28c8b --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py @@ -0,0 +1,182 @@ +from transformers import pipeline +import comfy +import re +from impact.utils import * + +hf_transformer_model_urls = [ + "rizvandwiki/gender-classification-2", + "NTQAI/pedestrian_gender_recognition", + "Leilab/gender_class", + "ProjectPersonal/GenderClassifier", + "crangana/trained-gender", + "cledoux42/GenderNew_v002", + "ivensamdh/genderage2" +] + + +class HF_TransformersClassifierProvider: + @classmethod + def INPUT_TYPES(s): + global hf_transformer_model_urls + return {"required": { + "preset_repo_id": (hf_transformer_model_urls + ['Manual repo id'],), + "manual_repo_id": ("STRING", {"multiline": False}), + "device_mode": (["AUTO", "Prefer GPU", "CPU"],), + }, + } + + RETURN_TYPES = ("TRANSFORMERS_CLASSIFIER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/HuggingFace" + + def doit(self, preset_repo_id, manual_repo_id, device_mode): + if preset_repo_id == 'Manual repo id': + url = manual_repo_id + else: + url = preset_repo_id + + if device_mode != 'CPU': + device = comfy.model_management.get_torch_device() + else: + device = "cpu" + + classifier = pipeline(model=url, device=device) + + return (classifier,) + + +preset_classify_expr = [ + '#Female > #Male', + '#Female < #Male', + 'female > 0.5', + 'male > 0.5', + 'Age16to25 > 0.1', + 'Age50to69 > 0.1', +] + +symbolic_label_map = { + '#Female': {'female', 'Female', 'Human Female', 'woman', 'women', 'girl'}, + '#Male': {'male', 'Male', 'Human Male', 'man', 'men', 'boy'} +} + +def is_numeric_string(input_str): + return re.match(r'^-?\d+(\.\d+)?$', input_str) is not None + + +classify_expr_pattern = r'([^><= ]+)\s*(>|<|>=|<=|=)\s*([^><= ]+)' + + +class SEGS_Classify: + @classmethod + def INPUT_TYPES(s): + global preset_classify_expr + return {"required": { + "classifier": ("TRANSFORMERS_CLASSIFIER",), + "segs": ("SEGS",), + "preset_expr": (preset_classify_expr + ['Manual expr'],), + "manual_expr": ("STRING", {"multiline": False}), + }, + "optional": { + "ref_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/HuggingFace" + + @staticmethod + def lookup_classified_label_score(score_infos, label): + global symbolic_label_map + + if label.startswith('#'): + if label not in symbolic_label_map: + return None + else: + label = symbolic_label_map[label] + else: + label = {label} + + for x in score_infos: + if x['label'] in label: + return x['score'] + + return None + + def doit(self, classifier, segs, preset_expr, manual_expr, ref_image_opt=None): + if preset_expr == 'Manual expr': + expr_str = manual_expr + else: + expr_str = preset_expr + + match = re.match(classify_expr_pattern, expr_str) + + if match is None: + return ((segs[0], []), segs) + + a = match.group(1) + op = match.group(2) + b = match.group(3) + + a_is_lab = not is_numeric_string(a) + b_is_lab = not is_numeric_string(b) + + classified = [] + remained_SEGS = [] + + for seg in segs[1]: + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif ref_image_opt is not None: + # take from original image + cropped_image = crop_image(ref_image_opt, seg.crop_region) + + if cropped_image is not None: + cropped_image = Image.fromarray(np.clip(255. * cropped_image.squeeze(), 0, 255).astype(np.uint8)) + res = classifier(cropped_image) + classified.append((seg, res)) + else: + remained_SEGS.append(seg) + + filtered_SEGS = [] + for seg, res in classified: + if a_is_lab: + avalue = SEGS_Classify.lookup_classified_label_score(res, a) + else: + avalue = a + + if b_is_lab: + bvalue = SEGS_Classify.lookup_classified_label_score(res, b) + else: + bvalue = b + + if avalue is None or bvalue is None: + remained_SEGS.append(seg) + continue + + avalue = float(avalue) + bvalue = float(bvalue) + + if op == '>': + cond = avalue > bvalue + elif op == '<': + cond = avalue < bvalue + elif op == '>=': + cond = avalue >= bvalue + elif op == '<=': + cond = avalue <= bvalue + else: + cond = avalue == bvalue + + if cond: + filtered_SEGS.append(seg) + else: + remained_SEGS.append(seg) + + return ((segs[0], filtered_SEGS), (segs[0], remained_SEGS)) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py new file mode 100644 index 0000000000000000000000000000000000000000..128de0f8b1430912f7ed0f07d13b20ba26dc0a83 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py @@ -0,0 +1,2261 @@ +import os +import sys + +import comfy.samplers +import comfy.sd +import warnings +from segment_anything import sam_model_registry +from io import BytesIO +import piexif +import zipfile +import re + +import impact.wildcards + +from impact.utils import * +import impact.core as core +from impact.core import SEG +from impact.config import MAX_RESOLUTION, latent_letter_path +from PIL import Image, ImageOps +import numpy as np +import hashlib +import json +import safetensors.torch +from PIL.PngImagePlugin import PngInfo +import comfy.model_management +import base64 +import impact.wildcards as wildcards + +warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') + +model_path = folder_paths.models_dir + + +# folder_paths.supported_pt_extensions +add_folder_path_and_extensions("mmdets_bbox", [os.path.join(model_path, "mmdets", "bbox")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("mmdets_segm", [os.path.join(model_path, "mmdets", "segm")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("mmdets", [os.path.join(model_path, "mmdets")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("sams", [os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) +add_folder_path_and_extensions("onnx", [os.path.join(model_path, "onnx")], {'.onnx'}) + + +# Nodes +class ONNXDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model_name": (folder_paths.get_filename_list("onnx"), )}} + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "load_onnx" + + CATEGORY = "ImpactPack" + + def load_onnx(self, model_name): + model = folder_paths.get_full_path("onnx", model_name) + return (core.ONNXDetector(model), ) + + +class CLIPSegDetectorProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": False}), + "blur": ("FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7}), + "threshold": ("FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4}), + "dilation_factor": ("INT", {"min": 0, "max": 10, "step": 1, "default": 4}), + } + } + + RETURN_TYPES = ("BBOX_DETECTOR", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, text, blur, threshold, dilation_factor): + if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: + return (core.BBoxDetectorBasedOnCLIPSeg(text, blur, threshold, dilation_factor), ) + else: + print("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") + + +class SAMLoader: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model_name": (folder_paths.get_filename_list("sams"), ), + "device_mode": (["AUTO", "Prefer GPU", "CPU"],), + } + } + + RETURN_TYPES = ("SAM_MODEL", ) + FUNCTION = "load_model" + + CATEGORY = "ImpactPack" + + def load_model(self, model_name, device_mode="auto"): + modelname = folder_paths.get_full_path("sams", model_name) + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam = sam_model_registry[model_kind](checkpoint=modelname) + # Unless user explicitly wants to use CPU, we use GPU + device = comfy.model_management.get_torch_device() if device_mode == "Prefer GPU" else "CPU" + + if device_mode == "Prefer GPU": + sam.to(device=device) + + sam.is_auto_mode = device_mode == "AUTO" + + print(f"Loads SAM model: {modelname} (device:{device_mode})") + return (sam, ) + + +class ONNXDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "onnx_detector": ("ONNX_DETECTOR",), + "image": ("IMAGE",), + "threshold": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "crop_factor": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detector" + + OUTPUT_NODE = True + + def doit(self, onnx_detector, image, threshold, dilation, crop_factor, drop_size): + segs = onnx_detector.detect(image, threshold, dilation, crop_factor, drop_size) + return (segs, ) + + +class DetailerForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": {"detailer_hook": ("DETAILER_HOOK",), } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1): + + image_pil = tensor2pil(image).convert('RGBA') + + enhanced_alpha_list = [] + enhanced_list = [] + cropped_list = [] + cnet_pil_list = [] + + segs = core.segs_scale_match(segs, image.shape) + new_segs = [] + + if wildcard_opt is not None: + wmode, wildcard_chooser = wildcards.process_wildcard_for_segs(wildcard_opt) + else: + wmode, wildcard_chooser = None, None + + if wmode in ['ASC', 'DSC']: + if wmode == 'ASC': + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1])) + else: + ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1]), reverse=True) + else: + ordered_segs = segs[1] + + for seg in ordered_segs: + cropped_image = seg.cropped_image if seg.cropped_image is not None \ + else crop_ndarray4(image.numpy(), seg.crop_region) + + mask_pil = feather_mask(seg.cropped_mask, feather) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"Detailer: segment skip [empty mask]") + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + if wildcard_chooser is not None: + wildcard_item = wildcard_chooser.get(seg) + else: + wildcard_item = None + + enhanced_pil, cnet_pil = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, cropped_mask, force_inpaint, wildcard_item, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, cycle=cycle) + + if cnet_pil is not None: + cnet_pil_list.append(cnet_pil) + + if not (enhanced_pil is None): + # don't latent composite-> converting to latent caused poor quality + # use image paste + image_pil.paste(enhanced_pil, (seg.crop_region[0], seg.crop_region[1]), mask_pil) + enhanced_list.append(pil2tensor(enhanced_pil)) + + if not (enhanced_pil is None): + # Convert enhanced_pil_alpha to RGBA mode + enhanced_pil_alpha = enhanced_pil.copy().convert('RGBA') + + # Apply the mask + mask_array = seg.cropped_mask.astype(np.uint8) * 255 + mask_image = Image.fromarray(mask_array, mode='L').resize(enhanced_pil_alpha.size) + enhanced_pil_alpha.putalpha(mask_image) + enhanced_alpha_list.append(pil2tensor(enhanced_pil_alpha)) + new_seg_pil = pil2numpy(enhanced_pil) + else: + new_seg_pil = None + + cropped_list.append(torch.from_numpy(cropped_image)) + + new_seg = SEG(new_seg_pil, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + image_tensor = pil2tensor(image_pil.convert('RGB')) + + cropped_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_list.sort(key=lambda x: x.shape, reverse=True) + enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) + + return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, detailer_hook=None): + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, cycle=cycle) + + return (enhanced_img, ) + + +class DetailerForEachPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "detailer_hook": ("DETAILER_HOOK",), + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + } + } + + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") + RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, + refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, cycle=1): + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle) + + # set fallback image + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return (enhanced_img, new_segs, basic_pipe, cnet_pil_list) + + +class FaceDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR", ), + "detailer_hook": ("DETAILER_HOOK",) + }} + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + @staticmethod + def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, + bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None, detailer_hook=None, + refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1): + + # make default prompt as 'face' if empty prompt for CLIPSeg + bbox_detector.setAux('face') + segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size, detailer_hook=detailer_hook) + bbox_detector.setAux(None) + + # bbox + sam combination + if sam_model_opt is not None: + sam_mask = core.make_sam_mask(sam_model_opt, segs, image, sam_detection_hint, sam_dilation, + sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, ) + segs = core.segs_bitwise_and_mask(segs, sam_mask) + + elif segm_detector is not None: + segm_segs = segm_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) + + if (hasattr(segm_detector, 'override_bbox_by_segm') and segm_detector.override_bbox_by_segm and + not (detailer_hook is not None and not hasattr(detailer_hook, 'override_bbox_by_segm'))): + segs = segm_segs + else: + segm_mask = core.segs_to_combined_mask(segm_segs) + segs = core.segs_bitwise_and_mask(segs, segm_mask) + + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard_opt, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, cycle=cycle) + + # Mask Generator + mask = core.segs_to_combined_mask(segs) + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list + + def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, cycle=1, + sam_model_opt=None, segm_detector_opt=None, detailer_hook=None): + + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( + image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard, detailer_hook, cycle=cycle) + + pipe = (model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, pipe, cnet_pil_list + + +class LatentPixelScale: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "scale_method": (s.upscale_methods,), + "scale_factor": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.1}), + "vae": ("VAE", ), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + } + } + + RETURN_TYPES = ("LATENT","IMAGE") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, scale_method, scale_factor, vae, use_tiled_vae, upscale_model_opt=None): + if upscale_model_opt is None: + latimg = core.latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=use_tiled_vae) + else: + latimg = core.latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model_opt, scale_factor, vae, use_tile=use_tiled_vae) + return latimg + + +class NoiseInjectionDetailerHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "source": (["CPU", "GPU"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, source, seed, strength): + try: + hook = core.InjectNoiseHook(source, seed, strength, strength) + hook.set_steps((1, 1)) + return (hook, ) + except Exception as e: + print("[ERROR] NoiseInjectionDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") + print(f"\t{e}") + pass + + +class CoreMLDetailerHookProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": {"mode": (["512x512", "768x768", "512x768", "768x512"], )}, } + + RETURN_TYPES = ("DETAILER_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, mode): + hook = core.CoreMLHook(mode) + return (hook, ) + + +class CfgScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_cfg): + hook = None + if schedule_for_iteration == "simple": + hook = core.SimpleCfgScheduleHook(target_cfg) + + return (hook, ) + + +class NoiseInjectionHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "source": (["CPU", "GPU"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "start_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, source, seed, start_strength, end_strength): + try: + hook = None + if schedule_for_iteration == "simple": + hook = core.InjectNoiseHook(source, seed, start_strength, end_strength) + + return (hook, ) + except Exception as e: + print("[ERROR] NoiseInjectionHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") + print(f"\t{e}") + pass + + +class DenoiseScheduleHookProvider: + schedules = ["simple"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "schedule_for_iteration": (s.schedules,), + "target_denoise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 100.0}), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, schedule_for_iteration, target_denoise): + hook = None + if schedule_for_iteration == "simple": + hook = core.SimpleDenoiseScheduleHook(target_denoise) + + return (hook, ) + + +class PixelKSampleHookCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "hook1": ("PK_HOOK",), + "hook2": ("PK_HOOK",), + }, + } + + RETURN_TYPES = ("PK_HOOK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, hook1, hook2): + hook = core.PixelKSampleHookCombine(hook1, hook2) + return (hook, ) + + +class PixelTiledKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt=None, pk_hook_opt=None): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_size=max(tile_width, tile_height)) + return (upscaler, ) + else: + print("[ERROR] PixelTiledKSampleUpscalerProvider: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + + +class PixelTiledKSampleUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE",) + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, tile_width, tile_height, tiling_strategy, basic_pipe, upscale_model_opt=None, pk_hook_opt=None): + if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_size=max(tile_width, tile_height)) + return (upscaler, ) + else: + print("[ERROR] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") + + +class PixelKSampleUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "model": ("MODEL",), + "vae": ("VAE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + use_tiled_vae, upscale_model_opt=None, pk_hook_opt=None, tile_size=512): + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, + tile_size=tile_size) + return (upscaler, ) + + +class PixelKSampleUpscalerProviderPipe(PixelKSampleUpscalerProvider): + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER",) + FUNCTION = "doit_pipe" + + CATEGORY = "ImpactPack/Upscale" + + def doit_pipe(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, + use_tiled_vae, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, tile_size=512): + model, _, vae, positive, negative = basic_pipe + upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, + tile_size=tile_size) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProvider: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "vae": ("VAE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) + return (upscaler, ) + + +class TwoSamplersForMaskUpscalerProviderPipe: + upscale_methods = ["nearest-exact", "bilinear", "area"] + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "scale_method": (s.upscale_methods,), + "full_sample_schedule": ( + ["none", "interleave1", "interleave2", "interleave3", + "last1", "last2", + "interleave1+last1", "interleave2+last1", "interleave3+last1", + ],), + "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + "optional": { + "full_sampler_opt": ("KSAMPLER",), + "upscale_model_opt": ("UPSCALE_MODEL", ), + "pk_hook_base_opt": ("PK_HOOK", ), + "pk_hook_mask_opt": ("PK_HOOK", ), + "pk_hook_full_opt": ("PK_HOOK", ), + } + } + + RETURN_TYPES = ("UPSCALER", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, basic_pipe, + full_sampler_opt=None, upscale_model_opt=None, + pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): + + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + _, _, vae, _, _ = basic_pipe + upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, + base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, + pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) + return (upscaler, ) + + +class IterativeLatentUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",) + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT",) + RETURN_NAMES = ("latent",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, samples, upscale_factor, steps, temp_prefix, upscaler, unique_id): + w = samples['samples'].shape[3]*8 # image width + h = samples['samples'].shape[2]*8 # image height + + if temp_prefix == "": + temp_prefix = None + + upscale_factor_unit = max(0, (upscale_factor-1.0)/steps) + current_latent = samples + scale = 1 + + for i in range(steps-1): + scale += upscale_factor_unit + new_w = w*scale + new_h = h*scale + core.update_node_status(unique_id, f"{i+1}/{steps} steps | x{scale:.2f}", (i+1)/steps) + print(f"IterativeLatentUpscale[{i+1}/{steps}]: {new_w:.1f}x{new_h:.1f} (scale:{scale:.2f}) ") + step_info = i, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + if scale < upscale_factor: + new_w = w*upscale_factor + new_h = h*upscale_factor + core.update_node_status(unique_id, f"Final step | x{upscale_factor:.2f}", 1.0) + print(f"IterativeLatentUpscale[Final]: {new_w:.1f}x{new_h:.1f} (scale:{upscale_factor:.2f}) ") + step_info = steps, steps + current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) + + core.update_node_status(unique_id, "", None) + + return (current_latent, ) + + +class IterativeImageUpscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pixels": ("IMAGE", ), + "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), + "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), + "temp_prefix": ("STRING", {"default": ""}), + "upscaler": ("UPSCALER",), + "vae": ("VAE",), + }, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Upscale" + + def doit(self, pixels, upscale_factor, steps, temp_prefix, upscaler, vae, unique_id): + if temp_prefix == "": + temp_prefix = None + + core.update_node_status(unique_id, "VAEEncode (first)", 0) + if upscaler.is_tiled: + latent = nodes.VAEEncodeTiled().encode(vae, pixels, upscaler.tile_size)[0] + else: + latent = nodes.VAEEncode().encode(vae, pixels)[0] + + refined_latent = IterativeLatentUpscale().doit(latent, upscale_factor, steps, temp_prefix, upscaler, unique_id) + + core.update_node_status(unique_id, "VAEDecode (final)", 1.0) + if upscaler.is_tiled: + pixels = nodes.VAEDecodeTiled().decode(vae, refined_latent[0], upscaler.tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(vae, refined_latent[0])[0] + + core.update_node_status(unique_id, "", None) + + return (pixels, ) + + +class FaceDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "detailer_pipe": ("DETAILER_PIPE",), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + + "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + + "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), + "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), + "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), + + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, False, False, True) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Simple" + + def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, + sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, refiner_ratio=None, cycle=1): + + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector, sam_model_opt, detailer_hook, \ + refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + + enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( + image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, feather, noise_mask, force_inpaint, + bbox_threshold, bbox_dilation, bbox_crop_factor, + sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, + sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector, sam_model_opt, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + cycle=cycle) + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, detailer_pipe, cnet_pil_list + + +class MaskDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + "basic_pipe": ("BASIC_PIPE",), + + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "mask bbox", "label_off": "crop region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), + "mask_mode": ("BOOLEAN", {"default": True, "label_on": "masked only", "label_off": "whole"}), + + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE", ), + "detailer_hook": ("DETAILER_HOOK",), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "BASIC_PIPE", "BASIC_PIPE") + RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "basic_pipe", "refiner_basic_pipe_opt") + OUTPUT_IS_LIST = (False, True, True, False, False) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/__for_test" + + def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, mask_mode, + seed, steps, cfg, sampler_name, scheduler, denoise, + feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1, + refiner_basic_pipe_opt=None, detailer_hook=None): + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + # create segs + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + segs = core.mask_to_segs(mask, False, crop_factor, False, drop_size) + + enhanced_img_batch = None + cropped_enhanced_list = [] + cropped_enhanced_alpha_list = [] + + for i in range(batch_size): + enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, _, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed+i, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, mask_mode, + force_inpaint=True, wildcard_opt=None, detailer_hook=detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, + refiner_positive=refiner_positive, refiner_negative=refiner_negative, cycle=cycle) + + if enhanced_img_batch is None: + enhanced_img_batch = enhanced_img + else: + enhanced_img_batch = torch.cat((enhanced_img_batch, enhanced_img), dim=0) + + cropped_enhanced_list += cropped_enhanced + cropped_enhanced_alpha_list += cropped_enhanced_alpha_list + + # set fallback image + if len(cropped_enhanced_list) == 0: + cropped_enhanced_list = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha_list) == 0: + cropped_enhanced_alpha_list = [empty_pil_tensor()] + + return enhanced_img_batch, cropped_enhanced_list, cropped_enhanced_alpha_list, basic_pipe, refiner_basic_pipe_opt + + +class DetailerForEachTest(DetailerForEach): + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") + RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images") + OUTPUT_IS_LIST = (False, True, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook=None, + cycle=1): + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, + cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, cycle=cycle) + + # set fallback image + if len(cropped) == 0: + cropped = [empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list + + +class DetailerForEachTestPipe(DetailerForEachPipe): + RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", ) + RETURN_NAMES = ("image", "segs", "basic_pipe", "cropped", "cropped_refined", "cropped_refined_alpha", 'cnet_images') + OUTPUT_IS_LIST = (False, False, False, True, True, True, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, cycle=1, + refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None): + + model, clip, vae, positive, negative = basic_pipe + + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ + DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, + sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, + force_inpaint, wildcard, detailer_hook, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, + refiner_negative=refiner_negative, cycle=cycle) + + # set fallback image + if len(cropped) == 0: + cropped = [empty_pil_tensor()] + + if len(cropped_enhanced) == 0: + cropped_enhanced = [empty_pil_tensor()] + + if len(cropped_enhanced_alpha) == 0: + cropped_enhanced_alpha = [empty_pil_tensor()] + + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return enhanced_img, new_segs, basic_pipe, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list + + +class SegsBitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, mask): + return (core.segs_bitwise_and_mask(segs, mask), ) + + +class SegsBitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "masks": ("MASK",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs, masks): + return (core.apply_mask_to_each_seg(segs, masks), ) + + +class BitwiseAndMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, base_segs, mask_segs): + + result = [] + + for bseg in base_segs[1]: + cropped_mask1 = bseg.cropped_mask.copy() + crop_region1 = bseg.crop_region + + for mseg in mask_segs[1]: + cropped_mask2 = mseg.cropped_mask + crop_region2 = mseg.crop_region + + # compute the intersection of the two crop regions + intersect_region = (max(crop_region1[0], crop_region2[0]), + max(crop_region1[1], crop_region2[1]), + min(crop_region1[2], crop_region2[2]), + min(crop_region1[3], crop_region2[3])) + + overlapped = False + + # set all pixels in cropped_mask1 to 0 except for those that overlap with cropped_mask2 + for i in range(intersect_region[0], intersect_region[2]): + for j in range(intersect_region[1], intersect_region[3]): + if cropped_mask1[j - crop_region1[1], i - crop_region1[0]] == 1 and \ + cropped_mask2[j - crop_region2[1], i - crop_region2[0]] == 1: + # pixel overlaps with both masks, keep it as 1 + overlapped = True + pass + else: + # pixel does not overlap with both masks, set it to 0 + cropped_mask1[j - crop_region1[1], i - crop_region1[0]] = 0 + + if overlapped: + item = SEG(bseg.cropped_image, cropped_mask1, bseg.confidence, bseg.crop_region, bseg.bbox, bseg.label, None) + result.append(item) + + return ((base_segs[0], result),) + + +class SubtractMaskForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "base_segs": ("SEGS",), + "mask_segs": ("SEGS",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, base_segs, mask_segs): + + result = [] + + for bseg in base_segs[1]: + cropped_mask1 = bseg.cropped_mask.copy() + crop_region1 = bseg.crop_region + + for mseg in mask_segs[1]: + cropped_mask2 = mseg.cropped_mask + crop_region2 = mseg.crop_region + + # compute the intersection of the two crop regions + intersect_region = (max(crop_region1[0], crop_region2[0]), + max(crop_region1[1], crop_region2[1]), + min(crop_region1[2], crop_region2[2]), + min(crop_region1[3], crop_region2[3])) + + changed = False + + # subtract operation + for i in range(intersect_region[0], intersect_region[2]): + for j in range(intersect_region[1], intersect_region[3]): + if cropped_mask1[j - crop_region1[1], i - crop_region1[0]] == 1 and \ + cropped_mask2[j - crop_region2[1], i - crop_region2[0]] == 1: + # pixel overlaps with both masks, set it as 0 + changed = True + cropped_mask1[j - crop_region1[1], i - crop_region1[0]] = 0 + else: + # pixel does not overlap with both masks, don't care + pass + + if changed: + item = SEG(bseg.cropped_image, cropped_mask1, bseg.confidence, bseg.crop_region, bseg.bbox, bseg.label, None) + result.append(item) + else: + result.append(base_segs) + + return ((base_segs[0], result),) + + +class MasksToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "masks": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK", ) + OUTPUT_IS_LIST = (True, ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, masks): + if masks is None: + empty_mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") + return ([empty_mask], ) + + res = [] + + for mask in masks: + res.append(mask) + + print(f"mask len: {len(res)}") + + return (res, ) + + +class MaskListToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("MASK", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask): + if len(mask) == 1: + if len(mask[0].shape) == 2: + mask = mask[0].unsqueeze(0) + return (mask,) + elif len(mask) > 1: + mask1 = mask[0] + if len(mask1.shape) == 2: + mask1 = mask1.unsqueeze(0) + + for mask2 in mask[1:]: + if len(mask2.shape) == 2: + mask2 = mask2.unsqueeze(0) + if mask1.shape[1:] != mask2.shape[1:]: + mask2 = comfy.utils.common_upscale(mask2.movedim(-1, 1), mask1.shape[2], mask1.shape[1], "bilinear", "center").movedim(1, -1) + mask1 = torch.cat((mask1, mask2), dim=0) + return (mask1,) + else: + empty_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu").unsqueeze(0) + return (empty_mask,) + + +class ImageListToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, images): + if len(images) <= 1: + return (images,) + else: + image1 = images[0] + for image2 in images[1:]: + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1) + image1 = torch.cat((image1, image2), dim=0) + return (image1,) + + +class ToBinaryMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "threshold": ("INT", {"default": 20, "min": 1, "max": 255}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, threshold): + mask = to_binary_mask(mask, threshold/255.0) + return (mask,) + + +class BitwiseAndMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = bitwise_and_masks(mask1, mask2) + return (mask,) + + +class SubtractMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK", ), + "mask2": ("MASK", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = subtract_masks(mask1, mask2) + return (mask,) + + +class AddMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask1, mask2): + mask = add_masks(mask1, mask2) + return (mask,) + + +import nodes + + +def get_image_hash(arr): + split_index1 = arr.shape[0] // 2 + split_index2 = arr.shape[1] // 2 + part1 = arr[:split_index1, :split_index2] + part2 = arr[:split_index1, split_index2:] + part3 = arr[split_index1:, :split_index2] + part4 = arr[split_index1:, split_index2:] + + # 각 부분을 합산 + sum1 = np.sum(part1) + sum2 = np.sum(part2) + sum3 = np.sum(part3) + sum4 = np.sum(part4) + + return hash((sum1, sum2, sum3, sum4)) + + +def get_file_item(base_type, path): + path_type = base_type + + if path == "[output]": + path_type = "output" + path = path[:-9] + elif path == "[input]": + path_type = "input" + path = path[:-8] + elif path == "[temp]": + path_type = "temp" + path = path[:-7] + + subfolder = os.path.dirname(path) + filename = os.path.basename(path) + + return { + "filename": filename, + "subfolder": subfolder, + "type": path_type + } + + +class PreviewBridge: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + "image": ("STRING", {"default": ""}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + + FUNCTION = "doit" + + OUTPUT_NODE = True + + CATEGORY = "ImpactPack/Util" + + def __init__(self): + super().__init__() + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prev_hash = None + + @staticmethod + def load_image(pb_id): + is_fail = False + if pb_id not in impact.core.preview_bridge_image_id_map: + is_fail = True + + image_path, ui_item = impact.core.preview_bridge_image_id_map[pb_id] + + if not os.path.isfile(image_path): + is_fail = True + + if not is_fail: + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + if is_fail: + image = empty_pil_tensor() + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + ui_item = { + "filename": 'empty.png', + "subfolder": '', + "type": 'temp' + } + + return (image, mask.unsqueeze(0), ui_item) + + def doit(self, images, image, unique_id): + need_refresh = False + + if unique_id not in impact.core.preview_bridge_cache: + need_refresh = True + + elif impact.core.preview_bridge_cache[unique_id][0] is not images: + need_refresh = True + + if not need_refresh: + pixels, mask, path_item = PreviewBridge.load_image(image) + image = [path_item] + else: + res = nodes.PreviewImage().save_images(images, filename_prefix="PreviewBridge/PB-") + image2 = res['ui']['images'] + pixels = images + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', image2[0]['filename']) + impact.core.set_previewbridge_image(unique_id, path, image2[0]) + impact.core.preview_bridge_image_id_map[image] = (path, image2[0]) + impact.core.preview_bridge_image_name_map[unique_id, path] = (image, image2[0]) + impact.core.preview_bridge_cache[unique_id] = (images, image2) + + image = image2 + + return { + "ui": {"images": image}, + "result": (pixels, mask, ), + } + + +class ImageReceiver: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "save_to_workflow": ("BOOLEAN", {"default": False}), + "image_data": ("STRING", {"multiline": False}), + "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + } + + FUNCTION = "doit" + + RETURN_TYPES = ("IMAGE", "MASK") + + CATEGORY = "ImpactPack/Util" + + def doit(self, image, link_id, save_to_workflow, image_data, trigger_always): + if save_to_workflow: + try: + image_data = base64.b64decode(image_data.split(",")[1]) + i = Image.open(BytesIO(image_data)) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return (image, mask.unsqueeze(0)) + except Exception as e: + print(f"[WARN] ComfyUI-Impact-Pack: ImageReceiver - invalid 'image_data'") + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return (empty_pil_tensor(64, 64), mask, ) + else: + return nodes.LoadImage().load_image(image) + + @classmethod + def VALIDATE_INPUTS(s, image, link_id, save_to_workflow, image_data, trigger_always): + if image != '#DATA' and not folder_paths.exists_annotated_filepath(image) or image.startswith("/") or ".." in image: + return "Invalid image file: {}".format(image) + + return True + + @classmethod + def IS_CHANGED(s, image, link_id, save_to_workflow, image_data, trigger_always): + if trigger_always: + return float("NaN") + else: + if save_to_workflow: + return hash(image_data) + else: + return hash(image) + + +from server import PromptServer + +class ImageSender(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ImgSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, images, filename_prefix="ImgSender", link_id=0, prompt=None, extra_pnginfo=None): + result = nodes.PreviewImage().save_images(images, filename_prefix, prompt, extra_pnginfo) + PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": result['ui']['images']}) + return result + + +class LatentReceiver: + def __init__(self): + self.input_dir = folder_paths.get_input_directory() + self.type = "input" + + @classmethod + def INPUT_TYPES(s): + def check_file_extension(x): + return x.endswith(".latent") or x.endswith(".latent.png") + + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and check_file_extension(f)] + return {"required": { + "latent": (sorted(files), ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT",) + + @staticmethod + def load_preview_latent(image_path): + if not os.path.exists(image_path): + return None + + image = Image.open(image_path) + exif_data = piexif.load(image.info["exif"]) + + if piexif.ExifIFD.UserComment in exif_data["Exif"]: + compressed_data = exif_data["Exif"][piexif.ExifIFD.UserComment] + compressed_data_io = BytesIO(compressed_data) + with zipfile.ZipFile(compressed_data_io, mode='r') as archive: + tensor_bytes = archive.read("latent") + tensor = safetensors.torch.load(tensor_bytes) + return {"samples": tensor['latent_tensor']} + return None + + def parse_filename(self, filename): + pattern = r"^(.*)/(.*?)\[(.*)\]\s*$" + match = re.match(pattern, filename) + if match: + subfolder = match.group(1) + filename = match.group(2).rstrip() + file_type = match.group(3) + else: + subfolder = '' + file_type = self.type + + return {'filename': filename, 'subfolder': subfolder, 'type': file_type} + + def doit(self, **kwargs): + if 'latent' not in kwargs: + return (torch.zeros([1, 4, 8, 8]), ) + + latent = kwargs['latent'] + + latent_name = latent + latent_path = folder_paths.get_annotated_filepath(latent_name) + + if latent.endswith(".latent"): + latent = safetensors.torch.load_file(latent_path, device="cpu") + multiplier = 1.0 + if "latent_format_version_0" not in latent: + multiplier = 1.0 / 0.18215 + samples = {"samples": latent["latent_tensor"].float() * multiplier} + else: + samples = LatentReceiver.load_preview_latent(latent_path) + + if samples is None: + samples = {'samples': torch.zeros([1, 4, 8, 8])} + + preview = self.parse_filename(latent_name) + + return { + 'ui': {"images": [preview]}, + 'result': (samples, ) + } + + @classmethod + def IS_CHANGED(s, latent, link_id, trigger_always): + if trigger_always: + return float("NaN") + else: + image_path = folder_paths.get_annotated_filepath(latent) + m = hashlib.sha256() + with open(image_path, 'rb') as f: + m.update(f.read()) + return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, latent, link_id, trigger_always): + if not folder_paths.exists_annotated_filepath(latent) or latent.startswith("/") or ".." in latent: + return "Invalid latent file: {}".format(latent) + return True + + +class LatentSender(nodes.SaveLatent): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "filename_prefix": ("STRING", {"default": "latents/LatentSender"}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "preview_method": (["Latent2RGB-SDXL", "Latent2RGB-SD15", "TAESDXL", "TAESD15"],) + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + OUTPUT_NODE = True + + RETURN_TYPES = () + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def save_to_file(tensor_bytes, prompt, extra_pnginfo, image, image_path): + compressed_data = BytesIO() + with zipfile.ZipFile(compressed_data, mode='w') as archive: + archive.writestr("latent", tensor_bytes) + image = image.copy() + exif_data = {"Exif": {piexif.ExifIFD.UserComment: compressed_data.getvalue()}} + + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + exif_bytes = piexif.dump(exif_data) + image.save(image_path, format='png', exif=exif_bytes, pnginfo=metadata, optimize=True) + + @staticmethod + def prepare_preview(latent_tensor, preview_method): + from comfy.cli_args import LatentPreviewMethod + import comfy.latent_formats as latent_formats + + lower_bound = 128 + upper_bound = 256 + + if preview_method == "Latent2RGB-SD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.Latent2RGB + elif preview_method == "TAESD15": + latent_format = latent_formats.SD15() + method = LatentPreviewMethod.TAESD + elif preview_method == "TAESDXL": + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.TAESD + else: # preview_method == "Latent2RGB-SDXL" + latent_format = latent_formats.SDXL() + method = LatentPreviewMethod.Latent2RGB + + previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) + + image = previewer.decode_latent_to_preview(latent_tensor) + min_size = min(image.size[0], image.size[1]) + max_size = max(image.size[0], image.size[1]) + + scale_factor = 1 + if max_size > upper_bound: + scale_factor = upper_bound/max_size + + # prevent too small preview + if min_size*scale_factor < lower_bound: + scale_factor = lower_bound/min_size + + w = int(image.size[0] * scale_factor) + h = int(image.size[1] * scale_factor) + + image = image.resize((w, h), resample=Image.NEAREST) + + return LatentSender.attach_format_text(image) + + @staticmethod + def attach_format_text(image): + width_a, height_a = image.size + + letter_image = Image.open(latent_letter_path) + width_b, height_b = letter_image.size + + new_width = max(width_a, width_b) + new_height = height_a + height_b + + new_image = Image.new('RGB', (new_width, new_height), (0, 0, 0)) + + offset_x = (new_width - width_b) // 2 + offset_y = (height_a + (new_height - height_a - height_b) // 2) + new_image.paste(letter_image, (offset_x, offset_y)) + + new_image.paste(image, (0, 0)) + + return new_image + + def doit(self, samples, filename_prefix="latents/LatentSender", link_id=0, preview_method="Latent2RGB-SDXL", prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + # load preview + preview = LatentSender.prepare_preview(samples['samples'], preview_method) + + # support save metadata for latent sharing + file = f"{filename}_{counter:05}_.latent.png" + fullpath = os.path.join(full_output_folder, file) + + output = {"latent_tensor": samples["samples"]} + + tensor_bytes = safetensors.torch.save(output) + LatentSender.save_to_file(tensor_bytes, prompt, extra_pnginfo, preview, fullpath) + + latent_path = { + 'filename': file, + 'subfolder': subfolder, + 'type': self.type + } + + PromptServer.instance.send_sync("latent-send", {"link_id": link_id, "images": [latent_path]}) + + return {'ui': {'images': [latent_path]}} + + +class ImageMaskSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "images1": ("IMAGE", ), + }, + + "optional": { + "mask1_opt": ("MASK",), + "images2_opt": ("IMAGE",), + "mask2_opt": ("MASK",), + "images3_opt": ("IMAGE",), + "mask3_opt": ("MASK",), + "images4_opt": ("IMAGE",), + "mask4_opt": ("MASK",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, images1, mask1_opt=None, images2_opt=None, mask2_opt=None, images3_opt=None, mask3_opt=None, images4_opt=None, mask4_opt=None): + if select == 1: + return images1, mask1_opt, + elif select == 2: + return images2_opt, mask2_opt, + elif select == 3: + return images3_opt, mask3_opt, + else: + return images4_opt, mask4_opt, + + +class LatentSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), + "latent1": ("LATENT",), + }, + } + + RETURN_TYPES = ("LATENT", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + input_name = f"latent{int(kwargs['select'])}" + + if input_name in kwargs: + return (kwargs[input_name],) + else: + print(f"LatentSwitch: invalid select index ('latent1' is selected)") + return (kwargs['latent1'],) + + +class ImpactWildcardProcessor: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + RETURN_TYPES = ("STRING", ) + FUNCTION = "doit" + + def doit(self, *args, **kwargs): + populated_text = kwargs['populated_text'] + return (populated_text, ) + + +class ImpactWildcardEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"), ), + "Select to add Wildcard": (["Select the Wildcard to add to the text"], ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + CATEGORY = "ImpactPack/Prompt" + + RETURN_TYPES = ("MODEL", "CLIP", "CONDITIONING", "STRING") + RETURN_NAMES = ("model", "clip", "conditioning", "populated_text") + FUNCTION = "doit" + + @staticmethod + def process_with_loras(**kwargs): + return impact.wildcards.process_with_loras(**kwargs) + + @staticmethod + def get_wildcard_list(): + return impact.wildcards.get_wildcard_list() + + def doit(self, *args, **kwargs): + populated = kwargs['populated_text'] + model, clip, conditioning = impact.wildcards.process_with_loras(populated, kwargs['model'], kwargs['clip']) + return (model, clip, conditioning, populated) + + +class ReencodeLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_vae": ("VAE", ), + "output_vae": ("VAE", ), + "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_vae, output_vae, tile_size=512): + if tile_mode in ["Both", "Decode(input) only"]: + pixels = nodes.VAEDecodeTiled().decode(input_vae, samples, tile_size)[0] + else: + pixels = nodes.VAEDecode().decode(input_vae, samples)[0] + + if tile_mode in ["Both", "Encode(output) only"]: + return nodes.VAEEncodeTiled().encode(output_vae, pixels, tile_size) + else: + return nodes.VAEEncode().encode(output_vae, pixels) + + +class ReencodeLatentPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples": ("LATENT", ), + "tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],), + "input_basic_pipe": ("BASIC_PIPE", ), + "output_basic_pipe": ("BASIC_PIPE", ), + }, + } + + CATEGORY = "ImpactPack/Util" + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + def doit(self, samples, tile_mode, input_basic_pipe, output_basic_pipe): + _, _, input_vae, _, _ = input_basic_pipe + _, _, output_vae, _, _ = output_basic_pipe + return ReencodeLatent().doit(samples, tile_mode, input_vae, output_vae) + + +class ImageBatchToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, image): + images = [image[i:i + 1, ...] for i in range(image.shape[0])] + return (images, ) + + +class MakeImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image1": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + images = [] + + for k, v in kwargs.items(): + images.append(v) + + return (images, ) + + +class MakeImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image1": ("IMAGE",), }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + image1 = kwargs['image1'] + del kwargs['image1'] + images = [value for value in kwargs.values()] + + if len(images) == 0: + return (image1,) + else: + for image2 in images: + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1) + image1 = torch.cat((image1, image2), dim=0) + return (image1,) + + +class StringSelector: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "strings": ("STRING", {"multiline": True}), + "multiline": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "select": ("INT", {"min": 0, "max": sys.maxsize, "step": 1, "default": 0}), + }} + + RETURN_TYPES = ("STRING",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, strings, multiline, select): + lines = strings.split('\n') + + if multiline: + result = [] + current_string = "" + + for line in lines: + if line.startswith("#"): + if current_string: + result.append(current_string.strip()) + current_string = "" + current_string += line + "\n" + + if current_string: + result.append(current_string.strip()) + + if len(result) == 0: + selected = strings + else: + selected = result[select % len(result)] + + if selected.startswith('#'): + selected = selected[1:] + else: + if len(lines) == 0: + selected = strings + else: + selected = lines[select % len(lines)] + + return (selected, ) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py new file mode 100644 index 0000000000000000000000000000000000000000..2f322e1dc2eb9d917962a1d64fa50584cef449de --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py @@ -0,0 +1,456 @@ +import os +import threading + +from aiohttp import web + +import impact +import server +import folder_paths + +import impact.core as core +import impact.impact_pack as impact_pack +from segment_anything import SamPredictor, sam_model_registry +import numpy as np +import nodes +from PIL import Image +import io +import impact.wildcards as wildcards +import comfy +from io import BytesIO +import random + + +@server.PromptServer.instance.routes.post("/upload/temp") +async def upload_image(request): + upload_dir = folder_paths.get_temp_directory() + + if not os.path.exists(upload_dir): + os.makedirs(upload_dir) + + post = await request.post() + image = post.get("image") + + if image and image.file: + filename = image.filename + if not filename: + return web.Response(status=400) + + split = os.path.splitext(filename) + i = 1 + while os.path.exists(os.path.join(upload_dir, filename)): + filename = f"{split[0]} ({i}){split[1]}" + i += 1 + + filepath = os.path.join(upload_dir, filename) + + with open(filepath, "wb") as f: + f.write(image.file.read()) + + return web.json_response({"name": filename}) + else: + return web.Response(status=400) + + +sam_predictor = None +default_sam_model_name = os.path.join(impact_pack.model_path, "sams", "sam_vit_b_01ec64.pth") + +sam_lock = threading.Condition() + +last_prepare_data = None + + +def async_prepare_sam(image_dir, model_name, filename): + with sam_lock: + global sam_predictor + + if 'vit_h' in model_name: + model_kind = 'vit_h' + elif 'vit_l' in model_name: + model_kind = 'vit_l' + else: + model_kind = 'vit_b' + + sam_model = sam_model_registry[model_kind](checkpoint=model_name) + sam_predictor = SamPredictor(sam_model) + + image_path = os.path.join(image_dir, filename) + image = nodes.LoadImage().load_image(image_path)[0] + image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + + if impact.config.get_config()['sam_editor_cpu']: + device = 'cpu' + else: + device = comfy.model_management.get_torch_device() + + sam_predictor.model.to(device=device) + sam_predictor.set_image(image, "RGB") + sam_predictor.model.cpu() + + +@server.PromptServer.instance.routes.post("/sam/prepare") +async def sam_prepare(request): + global sam_predictor + global last_prepare_data + data = await request.json() + + with sam_lock: + if last_prepare_data is not None and last_prepare_data == data: + # already loaded: skip -- prevent redundant loading + return web.Response(status=200) + + last_prepare_data = data + + model_name = 'sam_vit_b_01ec64.pth' + if data['sam_model_name'] == 'auto': + model_name = impact.config.get_config()['sam_editor_model'] + + model_name = os.path.join(impact_pack.model_path, "sams", model_name) + + print(f"[INFO] ComfyUI-Impact-Pack: Loading SAM model '{impact_pack.model_path}'") + + filename, image_dir = folder_paths.annotated_filepath(data["filename"]) + + if image_dir is None: + typ = data['type'] if data['type'] != '' else 'output' + image_dir = folder_paths.get_directory_by_type(typ) + if data['subfolder'] is not None and data['subfolder'] != '': + image_dir += f"/{data['subfolder']}" + + if image_dir is None: + return web.Response(status=400) + + thread = threading.Thread(target=async_prepare_sam, args=(image_dir, model_name, filename,)) + thread.start() + + print(f"[INFO] ComfyUI-Impact-Pack: SAM model loaded. ") + + +@server.PromptServer.instance.routes.post("/sam/release") +async def release_sam(request): + global sam_predictor + + with sam_lock: + del sam_predictor + sam_predictor = None + + print(f"[INFO] ComfyUI-Impact-Pack: unloading SAM model") + + +@server.PromptServer.instance.routes.post("/sam/detect") +async def sam_detect(request): + global sam_predictor + with sam_lock: + if sam_predictor is not None: + if impact.config.get_config()['sam_editor_cpu']: + device = 'cpu' + else: + device = comfy.model_management.get_torch_device() + + sam_predictor.model.to(device=device) + try: + data = await request.json() + + positive_points = data['positive_points'] + negative_points = data['negative_points'] + threshold = data['threshold'] + + points = [] + plabs = [] + + for p in positive_points: + points.append(p) + plabs.append(1) + + for p in negative_points: + points.append(p) + plabs.append(0) + + detected_masks = core.sam_predict(sam_predictor, points, plabs, None, threshold) + mask = core.combine_masks2(detected_masks) + + if mask is None: + return web.Response(status=400) + + image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + i = 255. * image.cpu().numpy() + + img = Image.fromarray(np.clip(i[0], 0, 255).astype(np.uint8)) + + img_buffer = io.BytesIO() + img.save(img_buffer, format='png') + + headers = {'Content-Type': 'image/png'} + finally: + sam_predictor.model.to(device="cpu") + + return web.Response(body=img_buffer.getvalue(), headers=headers) + + else: + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/impact/wildcards/list") +async def wildcards_list(request): + data = {'data': impact.wildcards.get_wildcard_list()} + return web.json_response(data) + + +@server.PromptServer.instance.routes.post("/impact/wildcards") +async def populate_wildcards(request): + data = await request.json() + populated = wildcards.process(data['text'], data.get('seed', None)) + return web.json_response({"text": populated}) + + +segs_picker_map = {} + +@server.PromptServer.instance.routes.get("/impact/segs/picker/count") +async def segs_picker_count(request): + node_id = request.rel_url.query.get('id', '') + + if node_id in segs_picker_map: + res = len(segs_picker_map[node_id]) + return web.Response(status=200, text=str(res)) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/impact/segs/picker/view") +async def segs_picker(request): + node_id = request.rel_url.query.get('id', '') + idx = int(request.rel_url.query.get('idx', '')) + + if node_id in segs_picker_map and idx < len(segs_picker_map[node_id]): + pil = segs_picker_map[node_id][idx] + + image_bytes = BytesIO() + pil.save(image_bytes, format="PNG") + image_bytes.seek(0) + + return web.Response(status=200, body=image_bytes, content_type='image/png', headers={"Content-Disposition": f"filename={node_id}{idx}.png"}) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/view/validate") +async def view_validate(request): + if "filename" in request.rel_url.query: + filename = request.rel_url.query["filename"] + subfolder = request.rel_url.query["subfolder"] + filename, base_dir = folder_paths.annotated_filepath(filename) + + if filename == '' or filename[0] == '/' or '..' in filename: + return web.Response(status=400) + + if base_dir is None: + base_dir = folder_paths.get_input_directory() + + file = os.path.join(base_dir, subfolder, filename) + + if os.path.isfile(file): + return web.Response(status=200) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/impact/validate/pb_id_image") +async def view_validate(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id not in core.preview_bridge_image_id_map: + return web.Response(status=400) + + file = core.preview_bridge_image_id_map[pb_id] + if os.path.isfile(file): + return web.Response(status=200) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/impact/set/pb_id_image") +async def set_previewbridge_image(request): + if "filename" in request.rel_url.query: + node_id = request.rel_url.query["node_id"] + filename = request.rel_url.query["filename"] + path_type = request.rel_url.query["type"] + subfolder = request.rel_url.query["subfolder"] + filename, output_dir = folder_paths.annotated_filepath(filename) + + if filename == '' or filename[0] == '/' or '..' in filename: + return web.Response(status=400) + + if output_dir is None: + if path_type == 'input': + output_dir = folder_paths.get_input_directory() + elif path_type == 'output': + output_dir = folder_paths.get_output_directory() + else: + output_dir = folder_paths.get_temp_directory() + + file = os.path.join(output_dir, subfolder, filename) + item = { + 'filename': filename, + 'type': path_type, + 'subfolder': subfolder, + } + pb_id = core.set_previewbridge_image(node_id, file, item) + + return web.Response(status=200, text=pb_id) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/impact/get/pb_id_image") +async def get_previewbridge_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id in core.preview_bridge_image_id_map: + _, path_item = core.preview_bridge_image_id_map[pb_id] + return web.json_response(path_item) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/impact/view/pb_id_image") +async def view_previewbridge_image(request): + if "id" in request.rel_url.query: + pb_id = request.rel_url.query["id"] + + if pb_id in core.preview_bridge_image_id_map: + file = core.preview_bridge_image_id_map[pb_id] + + with Image.open(file) as img: + filename = os.path.basename(file) + return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""}) + + return web.Response(status=400) + + +def onprompt_for_switch(json_data): + inversed_switch_info = {} + onprompt_switch_info = {} + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'ImpactInversedSwitch': + select_input = v['inputs']['select'] + if isinstance(select_input, list) and len(select_input) == 2: + input_node = json_data['prompt'][select_input[0]] + if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']: + inversed_switch_info[k] = input_node['inputs']['value'] + else: + inversed_switch_info[k] = select_input + + elif cls in ['ImpactSwitch', 'LatentSwitch', 'SEGSSwitch', 'ImpactMakeImageList']: + if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode']: + select_input = v['inputs']['select'] + if isinstance(select_input, list) and len(select_input) == 2: + input_node = json_data['prompt'][select_input[0]] + if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']: + onprompt_switch_info[k] = input_node['inputs']['value'] + if input_node['class_type'] == 'ImpactSwitch' and 'inputs' in input_node and 'select' in input_node['inputs']: + if isinstance(input_node['inputs']['select'], int): + onprompt_switch_info[k] = input_node['inputs']['select'] + else: + print(f"\n##### ##### #####\n[WARN] {cls}: For the 'select' operation, only 'select_index' of the 'ImpactSwitch', which is not an input, or 'ImpactInt' and 'Primitive' are allowed as inputs.\n##### ##### #####\n") + else: + onprompt_switch_info[k] = select_input + + for k, v in json_data['prompt'].items(): + disable_targets = set() + + for kk, vv in v['inputs'].items(): + if isinstance(vv, list) and len(vv) == 2: + if vv[0] in inversed_switch_info: + if vv[1] + 1 != inversed_switch_info[vv[0]]: + disable_targets.add(kk) + + if k in onprompt_switch_info: + selected_slot_name = f"input{onprompt_switch_info[k]}" + for kk, vv in v['inputs'].items(): + if kk != selected_slot_name and kk.startswith('input'): + disable_targets.add(kk) + + for kk in disable_targets: + del v['inputs'][kk] + + return json_data + + +def onprompt_for_pickers(json_data): + detected_pickers = set() + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'ImpactSEGSPicker': + detected_pickers.add(k) + + # garbage collection + keys_to_remove = [key for key in segs_picker_map if key not in detected_pickers] + for key in keys_to_remove: + del segs_picker_map[key] + + +def gc_preview_bridge_cache(json_data): + prompt_keys = json_data['prompt'].keys() + + for key in list(core.preview_bridge_cache.keys()): + if key not in prompt_keys: + print(f"key deleted: {key}") + del core.preview_bridge_cache[key] + + +def workflow_imagereceiver_update(json_data): + prompt = json_data['prompt'] + + for v in prompt.values(): + if 'class_type' in v and v['class_type'] == 'ImageReceiver': + if v['inputs']['save_to_workflow']: + v['inputs']['image'] = "#DATA" + + +def regional_sampler_seed_update(json_data): + prompt = json_data['prompt'] + + for k, v in prompt.items(): + if 'class_type' in v and v['class_type'] == 'RegionalSampler': + seed_2nd_mode = v['inputs']['seed_2nd_mode'] + + new_seed = None + if seed_2nd_mode == 'increment': + new_seed = v['inputs']['seed_2nd']+1 + if new_seed > 1125899906842624: + new_seed = 0 + elif seed_2nd_mode == 'decrement': + new_seed = v['inputs']['seed_2nd']-1 + if new_seed < 0: + new_seed = 1125899906842624 + elif seed_2nd_mode == 'randomize': + new_seed = random.randint(0, 1125899906842624) + + if new_seed is not None: + server.PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "seed_2nd", "type": "INT", "value": new_seed}) + + +def onprompt(json_data): + try: + json_data = onprompt_for_switch(json_data) + onprompt_for_pickers(json_data) + gc_preview_bridge_cache(json_data) + workflow_imagereceiver_update(json_data) + regional_sampler_seed_update(json_data) + except Exception as e: + print(f"[WARN] ComfyUI-Impact-Pack: Error on prompt - several features will not work.\n{e}") + + return json_data + + +server.PromptServer.instance.add_on_prompt_handler(onprompt) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/legacy_nodes.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/legacy_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..61709ce09d5410d4e75722d2df0094c1a5e5fe93 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/legacy_nodes.py @@ -0,0 +1,273 @@ +import folder_paths + +import impact.mmdet_nodes as mmdet_nodes +from impact.utils import * +from impact.core import SEG +import impact.core as core +import nodes + +class NO_BBOX_MODEL: + pass + + +class NO_SEGM_MODEL: + pass + + +class MMDetLoader: + @classmethod + def INPUT_TYPES(s): + bboxs = ["bbox/"+x for x in folder_paths.get_filename_list("mmdets_bbox")] + segms = ["segm/"+x for x in folder_paths.get_filename_list("mmdets_segm")] + return {"required": {"model_name": (bboxs + segms, )}} + RETURN_TYPES = ("BBOX_MODEL", "SEGM_MODEL") + FUNCTION = "load_mmdet" + + CATEGORY = "ImpactPack/Legacy" + + def load_mmdet(self, model_name): + mmdet_path = folder_paths.get_full_path("mmdets", model_name) + model = mmdet_nodes.load_mmdet(mmdet_path) + + if model_name.startswith("bbox"): + return model, NO_SEGM_MODEL() + else: + return NO_BBOX_MODEL(), model + + +class BboxDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_model": ("BBOX_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + @staticmethod + def detect(bbox_model, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + mmdet_results = mmdet_nodes.inference_bbox(bbox_model, image, threshold) + segmasks = core.create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + items.append(item) + + shape = h, w + return shape, items + + def doit(self, bbox_model, image, threshold, dilation, crop_factor): + return (BboxDetectorForEach.detect(bbox_model, image, threshold, dilation, crop_factor), ) + + +class SegmDetectorCombined: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_model": ("SEGM_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + def doit(self, segm_model, image, threshold, dilation): + mmdet_results = mmdet_nodes.inference_segm(image, segm_model, threshold) + segmasks = core.create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + mask = combine_masks(segmasks) + return (mask,) + + +class BboxDetectorCombined(SegmDetectorCombined): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "bbox_model": ("BBOX_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 4, "min": 0, "max": 255, "step": 1}), + } + } + + def doit(self, bbox_model, image, threshold, dilation): + mmdet_results = mmdet_nodes.inference_bbox(bbox_model, image, threshold) + segmasks = core.create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + mask = combine_masks(segmasks) + return (mask,) + + +class SegmDetectorForEach: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segm_model": ("SEGM_MODEL", ), + "image": ("IMAGE", ), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "dilation": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + } + } + + RETURN_TYPES = ("SEGS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + def doit(self, segm_model, image, threshold, dilation, crop_factor): + mmdet_results = mmdet_nodes.inference_segm(image, segm_model, threshold) + segmasks = core.create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + items.append(item) + + shape = h,w + return ((shape, items), ) + + +class SegsMaskCombine: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "image": ("IMAGE", ), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Legacy" + + @staticmethod + def combine(segs, image): + h = image.shape[1] + w = image.shape[2] + + mask = np.zeros((h, w), dtype=np.uint8) + + for seg in segs[1]: + cropped_mask = seg.cropped_mask + crop_region = seg.crop_region + mask[crop_region[1]:crop_region[3], crop_region[0]:crop_region[2]] |= (cropped_mask * 255).astype(np.uint8) + + return torch.from_numpy(mask.astype(np.float32) / 255.0) + + def doit(self, segs, image): + return (SegsMaskCombine.combine(segs, image), ) + + +class MaskPainter(nodes.PreviewImage): + @classmethod + def INPUT_TYPES(s): + return {"required": {"images": ("IMAGE",), }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + }, + "optional": {"mask_image": ("IMAGE_PATH",), }, + "optional": {"image": (["#placeholder"], )}, + } + + RETURN_TYPES = ("MASK",) + + FUNCTION = "save_painted_images" + + CATEGORY = "ImpactPack/Legacy" + + def save_painted_images(self, images, filename_prefix="impact-mask", + prompt=None, extra_pnginfo=None, mask_image=None, image=None): + if image == "#placeholder" or image['image_hash'] != id(images): + # new input image + res = self.save_images(images, filename_prefix, prompt, extra_pnginfo) + + item = res['ui']['images'][0] + + if not item['filename'].endswith(']'): + filepath = f"{item['filename']} [{item['type']}]" + else: + filepath = item['filename'] + + _, mask = nodes.LoadImage().load_image(filepath) + + res['ui']['aux'] = [id(images), res['ui']['images']] + res['result'] = (mask, ) + + return res + + else: + # new mask + if '0' in image: # fallback + image = image['0'] + + forward = {'filename': image['forward_filename'], + 'subfolder': image['forward_subfolder'], + 'type': image['forward_type'], } + + res = {'ui': {'images': [forward]}} + + imgpath = "" + if 'subfolder' in image and image['subfolder'] != "": + imgpath = image['subfolder'] + "/" + + imgpath += f"{image['filename']}" + + if 'type' in image and image['type'] != "": + imgpath += f" [{image['type']}]" + + res['ui']['aux'] = [id(images), [forward]] + _, mask = nodes.LoadImage().load_image(imgpath) + res['result'] = (mask, ) + + return res diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c48f5bcd99f90315fd5a895a23c5ada3b4168f --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py @@ -0,0 +1,494 @@ +import sys +import time + +import execution +import folder_paths +import impact.impact_server +from server import PromptServer +from impact.utils import any_typ +import impact.core as core + + +class ImpactCompare: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cmp": (['a = b', 'a <> b', 'a > b', 'a < b', 'a >= b', 'a <= b', 'tt', 'ff'],), + "a": (any_typ, ), + "b": (any_typ, ), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, cmp, a, b): + if cmp == "a = b": + return (a == b, ) + elif cmp == "a <> b": + return (a != b, ) + elif cmp == "a > b": + return (a > b, ) + elif cmp == "a < b": + return (a < b, ) + elif cmp == "a >= b": + return (a >= b, ) + elif cmp == "a <= b": + return (a <= b, ) + elif cmp == 'tt': + return (True, ) + else: + return (False, ) + + +class ImpactNotEmptySEGS: + @classmethod + def INPUT_TYPES(cls): + return {"required": {"segs": ("SEGS",)}} + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, segs): + return (segs[1] != [], ) + + +class ImpactConditionalBranch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "cond": ("BOOLEAN", {"forceInput": True}), + "tt_value": (any_typ,), + "ff_value": (any_typ,), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, cond, tt_value, ff_value): + if cond: + return (tt_value,) + else: + return (ff_value,) + + +class ImpactConditionalStopIteration: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { "cond": ("BOOLEAN", {"forceInput": True}), }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = () + + OUTPUT_NODE = True + + def doit(self, cond): + if cond: + PromptServer.instance.send_sync("stop-iteration", {}) + return {} + + +class ImpactNeg: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { "value": ("BOOLEAN", {"forceInput": True}), }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("BOOLEAN", ) + + def doit(self, value): + return (not value, ) + + +class ImpactInt: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("INT", ) + + def doit(self, value): + return (value, ) + + +class ImpactFloat: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 1.0, "min": -3.402823466e+38, "max": 3.402823466e+38}), + }, + } + + FUNCTION = "doit" + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = ("FLOAT", ) + + def doit(self, value): + return (value, ) + + +class ImpactValueSender: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": (any_typ, ), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + "optional": { + "signal_opt": (any_typ,), + } + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + RETURN_NAMES = ("signal", ) + + def doit(self, value, link_id=0, signal_opt=None): + PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value}) + return (signal_opt, ) + + +class ImpactIntConstSender: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ, ), + "value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = () + + def doit(self, signal, value, link_id=0): + PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value}) + return {} + + +class ImpactValueReceiver: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "typ": (["STRING", "INT", "FLOAT", "BOOLEAN"], ), + "value": ("STRING", {"default": ""}), + "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic" + + RETURN_TYPES = (any_typ, ) + + def doit(self, typ, value, link_id=0): + if typ == "INT": + return (int(value), ) + elif typ == "FLOAT": + return (float(value), ) + elif typ == "BOOLEAN": + return (bool(value), ) + else: + return (value, ) + + +class ImpactImageInfo: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": ("IMAGE", ), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("batch", "height", "width", "channel") + + def doit(self, value): + return (value.shape[0], value.shape[1], value.shape[2], value.shape[3]) + + +class ImpactMinMax: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "mode": ("BOOLEAN", {"default": True, "label_on": "max", "label_off": "min"}), + "a": (any_typ,), + "b": (any_typ,), + }, + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + + RETURN_TYPES = ("INT", ) + + def doit(self, mode, a, b): + if mode: + return (max(a, b), ) + else: + return (min(a, b),) + + +class ImpactQueueTrigger: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, mode): + if(mode): + PromptServer.instance.send_sync("impact-add-queue", {}) + + return (signal,) + + +class ImpactQueueTriggerCountdown: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "count": ("INT", {"default": 10, "min": 0, "max": 0xffffffffffffffff}) + }, + "hidden": {"unique_id": "UNIQUE_ID"} + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ, "INT") + RETURN_NAMES = ("signal_opt", "count") + OUTPUT_NODE = True + + def doit(self, signal, count, unique_id): + if count > 0: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": unique_id, "widget_name": "count", "type": "int", "value": count-1}) + PromptServer.instance.send_sync("impact-add-queue", {}) + + return (signal, count) + + +class ImpactSetWidgetValue: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "widget_name": ("STRING", {"multiline": False}), + }, + "optional": { + "boolean_value": ("BOOLEAN", {"forceInput": True}), + "int_value": ("INT", {"forceInput": True}), + "float_value": ("FLOAT", {"forceInput": True}), + "string_value": ("STRING", {"forceInput": True}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, node_id, widget_name, boolean_value=None, int_value=None, float_value=None, string_value=None, ): + kind = None + if boolean_value is not None: + value = boolean_value + kind = "BOOLEAN" + elif int_value is not None: + value = int_value + kind = "INT" + elif float_value is not None: + value = float_value + kind = "FLOAT" + elif string_value is not None: + value = string_value + kind = "STRING" + else: + value = None + + if value is not None: + PromptServer.instance.send_sync("impact-node-feedback", + {"node_id": node_id, "widget_name": widget_name, "type": kind, "value": value}) + + return (signal,) + + +class ImpactNodeSetMuteState: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "set_state": ("BOOLEAN", {"default": True, "label_on": "active", "label_off": "mute"}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, node_id, set_state): + PromptServer.instance.send_sync("impact-node-mute-state", {"node_id": node_id, "is_active": set_state}) + return (signal,) + + +class ImpactSleep: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "signal": (any_typ,), + "seconds": ("FLOAT", {"default": 0.5, "min": 0, "max": 3600}), + } + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal_opt",) + OUTPUT_NODE = True + + def doit(self, signal, seconds): + time.sleep(seconds) + return (signal,) + + +error_skip_flag = False +try: + import sys + def filter_message(str): + global error_skip_flag + + if "IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE" in str: + return True + elif error_skip_flag and "ERROR:root:!!! Exception during processing !!!\n" == str: + error_skip_flag = False + return True + else: + return False + + sys.__comfyui_manager_register_message_collapse(filter_message) + +except Exception as e: + print(f"[WARN] ComfyUI-Impact-Pack: `ComfyUI` or `ComfyUI-Manager` is an outdated version.") + pass + + +def workflow_to_map(workflow): + nodes = {} + links = {} + for link in workflow['links']: + links[link[0]] = link[1:] + for node in workflow['nodes']: + nodes[str(node['id'])] = node + + return nodes, links + + +class ImpactControlBridge: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "value": (any_typ,), + "mode": ("BOOLEAN", {"default": True, "label_on": "pass", "label_off": "block"}), + }, + "hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"} + } + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Logic/_for_test" + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("value",) + OUTPUT_NODE = True + + def doit(self, value, mode, unique_id, prompt, extra_pnginfo): + global error_skip_flag + + nodes, links = workflow_to_map(extra_pnginfo['workflow']) + + outputs = [str(links[link][2]) for link in nodes[unique_id]['outputs'][0]['links']] + + prompt_set = set(prompt.keys()) + output_set = set(outputs) + + if mode: + should_active_but_muted = output_set - prompt_set + if len(should_active_but_muted) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'actives': list(should_active_but_muted)}) + error_skip_flag = True + raise Exception("IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE\nIf you see this message, your ComfyUI-Manager is outdated. Please update it.") + else: + should_muted_but_active = prompt_set.intersection(output_set) + if len(should_muted_but_active) > 0: + PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'mutes': list(should_muted_but_active)}) + error_skip_flag = True + raise Exception("IMPACT-PACK-SIGNAL: STOP CONTROL BRIDGE\nIf you see this message, your ComfyUI-Manager is outdated. Please update it.") + + return (value, ) + + +original_handle_execution = execution.PromptExecutor.handle_execution_error + + +def handle_execution_error(**kwargs): + print(f" handled") + execution.PromptExecutor.handle_execution_error(**kwargs) + diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/mmdet_nodes.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/mmdet_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..b2689f6c1f9e27f87667bc94bd401b32084fa072 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/mmdet_nodes.py @@ -0,0 +1,213 @@ +import folder_paths +from impact.core import * + +import mmcv +from mmdet.apis import (inference_detector, init_detector) +from mmdet.evaluation import get_classes + + +def load_mmdet(model_path): + model_config = os.path.splitext(model_path)[0] + ".py" + model = init_detector(model_config, model_path, device="cpu") + return model + + +def inference_segm_old(model, image, conf_threshold): + image = image.numpy()[0] * 255 + mmdet_results = inference_detector(model, image) + + bbox_results, segm_results = mmdet_results + label = "A" + + classes = get_classes("coco") + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_results) + ] + n, m = bbox_results[0].shape + if n == 0: + return [[], [], []] + labels = np.concatenate(labels) + bboxes = np.vstack(bbox_results) + segms = mmcv.concat_list(segm_results) + filter_idxs = np.where(bboxes[:, -1] > conf_threshold)[0] + results = [[], [], []] + for i in filter_idxs: + results[0].append(label + "-" + classes[labels[i]]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + + return results + + +def inference_segm(image, modelname, conf_thres, lab="A"): + image = image.numpy()[0] * 255 + mmdet_results = inference_detector(modelname, image).pred_instances + bboxes = mmdet_results.bboxes.numpy() + segms = mmdet_results.masks.numpy() + scores = mmdet_results.scores.numpy() + + classes = get_classes("coco") + + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + labels = mmdet_results.labels + filter_inds = np.where(mmdet_results.scores > conf_thres)[0] + results = [[], [], [], []] + for i in filter_inds: + results[0].append(lab + "-" + classes[labels[i]]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(scores[i]) + + return results + + +def inference_bbox(modelname, image, conf_threshold): + image = image.numpy()[0] * 255 + label = "A" + output = inference_detector(modelname, image).pred_instances + cv2_image = np.array(image) + cv2_image = cv2_image[:, :, ::-1].copy() + cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY) + + segms = [] + for x0, y0, x1, y1 in output.bboxes: + cv2_mask = np.zeros(cv2_gray.shape, np.uint8) + cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1) + cv2_mask_bool = cv2_mask.astype(bool) + segms.append(cv2_mask_bool) + + n, m = output.bboxes.shape + if n == 0: + return [[], [], [], []] + + bboxes = output.bboxes.numpy() + scores = output.scores.numpy() + filter_idxs = np.where(scores > conf_threshold)[0] + results = [[], [], [], []] + for i in filter_idxs: + results[0].append(label) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(scores[i]) + + return results + + +class BBoxDetector: + bbox_model = None + + def __init__(self, bbox_model): + self.bbox_model = bbox_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + mmdet_results = inference_bbox(self.bbox_model, image, threshold) + segmasks = create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + # bbox_size = (item_bbox[2]-item_bbox[0],item_bbox[3]-item_bbox[1]) # (w,h) + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + + items.append(item) + + shape = image.shape[1], image.shape[2] + return shape, items + + def detect_combined(self, image, threshold, dilation): + mmdet_results = inference_bbox(self.bbox_model, image, threshold) + segmasks = create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + return combine_masks(segmasks) + + def setAux(self, x): + pass + + +class SegmDetector(BBoxDetector): + segm_model = None + + def __init__(self, segm_model): + self.segm_model = segm_model + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, detailer_hook=None): + drop_size = max(drop_size, 1) + mmdet_results = inference_segm(image, self.segm_model, threshold) + segmasks = create_segmasks(mmdet_results) + + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + items = [] + h = image.shape[1] + w = image.shape[2] + for x in segmasks: + item_bbox = x[0] + item_mask = x[1] + + y1, x1, y2, x2 = item_bbox + + if x2 - x1 > drop_size and y2 - y1 > drop_size: # minimum dimension must be (2,2) to avoid squeeze issue + crop_region = make_crop_region(w, h, item_bbox, crop_factor) + cropped_image = crop_image(image, crop_region) + cropped_mask = crop_ndarray2(item_mask, crop_region) + confidence = x[2] + + item = SEG(cropped_image, cropped_mask, confidence, crop_region, item_bbox, None, None) + items.append(item) + + return image.shape, items + + def detect_combined(self, image, threshold, dilation): + mmdet_results = inference_bbox(self.bbox_model, image, threshold) + segmasks = create_segmasks(mmdet_results) + if dilation > 0: + segmasks = dilate_masks(segmasks, dilation) + + return combine_masks(segmasks) + + def setAux(self, x): + pass + + +class MMDetDetectorProvider: + @classmethod + def INPUT_TYPES(s): + bboxs = ["bbox/"+x for x in folder_paths.get_filename_list("mmdets_bbox")] + segms = ["segm/"+x for x in folder_paths.get_filename_list("mmdets_segm")] + return {"required": {"model_name": (bboxs + segms, )}} + RETURN_TYPES = ("BBOX_DETECTOR", "SEGM_DETECTOR") + FUNCTION = "load_mmdet" + + CATEGORY = "ImpactPack" + + def load_mmdet(self, model_name): + mmdet_path = folder_paths.get_full_path("mmdets", model_name) + model = load_mmdet(mmdet_path) + + if model_name.startswith("bbox"): + return BBoxDetector(model), NO_SEGM_DETECTOR() + else: + return NO_BBOX_DETECTOR(), model \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/onnx.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..91736a1ac4913220ff1255bf0c463523840b4283 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/onnx.py @@ -0,0 +1,38 @@ +import impact.additional_dependencies +from impact.utils import * + +impact.additional_dependencies.ensure_onnx_package() + +try: + import onnxruntime + + def onnx_inference(image, onnx_model): + # prepare image + pil = tensor2pil(image) + image = np.ascontiguousarray(pil) + image = image[:, :, ::-1] # to BGR image + image = image.astype(np.float32) + image -= [103.939, 116.779, 123.68] # 'caffe' mode image preprocessing + + # do detection + onnx_model = onnxruntime.InferenceSession(onnx_model, providers=["CPUExecutionProvider"]) + outputs = onnx_model.run( + [s_i.name for s_i in onnx_model.get_outputs()], + {onnx_model.get_inputs()[0].name: np.expand_dims(image, axis=0)}, + ) + + labels = [op for op in outputs if op.dtype == "int32"][0] + scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0] + boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0] + + # filter-out useless item + idx = np.where(labels[0] == -1)[0][0] + + labels = labels[0][:idx] + scores = scores[0][:idx] + boxes = boxes[0][:idx].astype(np.uint32) + + return labels, scores, boxes +except Exception as e: + print("[ERROR] ComfyUI-Impact-Pack: 'onnxruntime' package doesn't support 'python 3.11', yet.") + print(f"\t{e}") diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..d03e837594419819f5a7c74ae4ead5f60d6b2914 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py @@ -0,0 +1,422 @@ +import folder_paths +import impact.wildcards + +class ToDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"] + impact.wildcards.get_wildcard_list(),), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL",), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }} + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + pipe = (kwargs['model'], kwargs['clip'], kwargs['vae'], kwargs['positive'], kwargs['negative'], kwargs['wildcard'], kwargs['bbox_detector'], + kwargs.get('segm_detector_opt', None), kwargs.get('sam_model_opt', None), kwargs.get('detailer_hook', None), + kwargs.get('refiner_model', None), kwargs.get('refiner_clip', None), + kwargs.get('refiner_positive', None), kwargs.get('refiner_negative', None)) + return (pipe, ) + + +class ToDetailerPipeSDXL(ToDetailerPipe): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "refiner_model": ("MODEL",), + "refiner_clip": ("CLIP",), + "refiner_positive": ("CONDITIONING",), + "refiner_negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"] + impact.wildcards.get_wildcard_list(),), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL",), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }} + + +class FromDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK") + RETURN_NAMES = ("model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe + return model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook + + +class FromDetailerPipe_v2: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK") + RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe + return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook + + +class FromDetailerPipe_SDXL: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, } + + RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK", "MODEL", "CLIP", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative + + +class ToBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + }, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, model, clip, vae, positive, negative): + pipe = (model, clip, vae, positive, negative) + return (pipe, ) + + +class FromBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), }, } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("model", "clip", "vae", "positive", "negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe): + model, clip, vae, positive, negative = basic_pipe + return model, clip, vae, positive, negative + + +class FromBasicPipe_v2: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), }, } + + RETURN_TYPES = ("BASIC_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING") + RETURN_NAMES = ("basic_pipe", "model", "clip", "vae", "positive", "negative") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe): + model, clip, vae, positive, negative = basic_pipe + return basic_pipe, model, clip, vae, positive, negative + + +class BasicPipeToDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"basic_pipe": ("BASIC_PIPE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"] + impact.wildcards.get_wildcard_list(),), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + basic_pipe = kwargs['basic_pipe'] + bbox_detector = kwargs['bbox_detector'] + wildcard = kwargs['wildcard'] + sam_model_opt = kwargs.get('sam_model_opt', None) + segm_detector_opt = kwargs.get('segm_detector_opt', None) + detailer_hook = kwargs.get('detailer_hook', None) + + model, clip, vae, positive, negative = basic_pipe + pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None + return (pipe, ) + + +class BasicPipeToDetailerPipeSDXL: + @classmethod + def INPUT_TYPES(s): + return {"required": {"base_basic_pipe": ("BASIC_PIPE",), + "refiner_basic_pipe": ("BASIC_PIPE",), + "bbox_detector": ("BBOX_DETECTOR", ), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"] + impact.wildcards.get_wildcard_list(),), + }, + "optional": { + "sam_model_opt": ("SAM_MODEL", ), + "segm_detector_opt": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE", ) + RETURN_NAMES = ("detailer_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + base_basic_pipe = kwargs['base_basic_pipe'] + refiner_basic_pipe = kwargs['refiner_basic_pipe'] + bbox_detector = kwargs['bbox_detector'] + wildcard = kwargs['wildcard'] + sam_model_opt = kwargs.get('sam_model_opt', None) + segm_detector_opt = kwargs.get('segm_detector_opt', None) + detailer_hook = kwargs.get('detailer_hook', None) + + model, clip, vae, positive, negative = base_basic_pipe + refiner_model, refiner_clip, refiner_vae, refiner_positive, refiner_negative = refiner_basic_pipe + pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative + return (pipe, ) + + +class DetailerPipeToBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }} + + RETURN_TYPES = ("BASIC_PIPE", "BASIC_PIPE") + RETURN_NAMES = ("base_basic_pipe", "refiner_basic_pipe") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, detailer_pipe): + model, clip, vae, positive, negative, _, _, _, _, _, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe + pipe = model, clip, vae, positive, negative + refiner_pipe = refiner_model, refiner_clip, vae, refiner_positive, refiner_negative + return (pipe, refiner_pipe) + + +class EditBasicPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": {"basic_pipe": ("BASIC_PIPE",), }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + }, + } + + RETURN_TYPES = ("BASIC_PIPE", ) + RETURN_NAMES = ("basic_pipe", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, basic_pipe, model=None, clip=None, vae=None, positive=None, negative=None): + res_model, res_clip, res_vae, res_positive, res_negative = basic_pipe + + if model is not None: + res_model = model + + if clip is not None: + res_clip = clip + + if vae is not None: + res_vae = vae + + if positive is not None: + res_positive = positive + + if negative is not None: + res_negative = negative + + pipe = res_model, res_clip, res_vae, res_positive, res_negative + + return (pipe, ) + + +class EditDetailerPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detailer_pipe": ("DETAILER_PIPE",), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"] + impact.wildcards.get_wildcard_list(),), + }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR",), + "sam_model": ("SAM_MODEL",), + "segm_detector": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } + + RETURN_TYPES = ("DETAILER_PIPE",) + RETURN_NAMES = ("detailer_pipe",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Pipe" + + def doit(self, *args, **kwargs): + detailer_pipe = kwargs['detailer_pipe'] + wildcard = kwargs['wildcard'] + model = kwargs.get('model', None) + clip = kwargs.get('clip', None) + vae = kwargs.get('vae', None) + positive = kwargs.get('positive', None) + negative = kwargs.get('negative', None) + bbox_detector = kwargs.get('bbox_detector', None) + sam_model = kwargs.get('sam_model', None) + segm_detector = kwargs.get('segm_detector', None) + detailer_hook = kwargs.get('detailer_hook', None) + refiner_model = kwargs.get('refiner_model', None) + refiner_clip = kwargs.get('refiner_clip', None) + refiner_positive = kwargs.get('refiner_positive', None) + refiner_negative = kwargs.get('refiner_negative', None) + + res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative = detailer_pipe + + if model is not None: + res_model = model + + if clip is not None: + res_clip = clip + + if vae is not None: + res_vae = vae + + if positive is not None: + res_positive = positive + + if negative is not None: + res_negative = negative + + if bbox_detector is not None: + res_bbox_detector = bbox_detector + + if segm_detector is not None: + res_segm_detector = segm_detector + + if wildcard != "": + res_wildcard = wildcard + + if sam_model is not None: + res_sam_model = sam_model + + if detailer_hook is not None: + res_detailer_hook = detailer_hook + + if refiner_model is not None: + res_refiner_model = refiner_model + + if refiner_clip is not None: + res_refiner_clip = refiner_clip + + if refiner_positive is not None: + res_refiner_positive = refiner_positive + + if refiner_negative is not None: + res_refiner_negative = refiner_negative + + pipe = (res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, + res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, + res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative) + + return (pipe, ) + + +class EditDetailerPipeSDXL(EditDetailerPipe): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detailer_pipe": ("DETAILER_PIPE",), + "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),), + "Select to add Wildcard": (["Select the Wildcard to add to the text"] + impact.wildcards.get_wildcard_list(),), + }, + "optional": { + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "refiner_model": ("MODEL",), + "refiner_clip": ("CLIP",), + "refiner_positive": ("CONDITIONING",), + "refiner_negative": ("CONDITIONING",), + "bbox_detector": ("BBOX_DETECTOR",), + "sam_model": ("SAM_MODEL",), + "segm_detector": ("SEGM_DETECTOR",), + "detailer_hook": ("DETAILER_HOOK",), + }, + } diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py new file mode 100644 index 0000000000000000000000000000000000000000..01b5600671e4bc5620251eab6f0c5a4ffe625571 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/sample_error_enhancer.py @@ -0,0 +1,25 @@ +import comfy.sample +import traceback + +original_sample = comfy.sample.sample + + +def informative_sample(*args, **kwargs): + try: + return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations. + except RuntimeError as e: + is_model_mix_issue = False + try: + if 'mat1 and mat2 shapes cannot be multiplied' in e.args[0]: + if 'torch.nn.functional.linear' in traceback.format_exc().strip().split('\n')[-3]: + is_model_mix_issue = True + except: + pass + + if is_model_mix_issue: + raise RuntimeError("\n\n#### It seems that models and clips are mixed and interconnected between SDXL Base, SDXL Refiner, SD1.x, and SD2.x. Please verify. ####\n\n") + else: + raise e + + +comfy.sample.sample = informative_sample diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..06006786ebb1f368ccd5430d1aeae6f83625db87 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py @@ -0,0 +1,1100 @@ +import os +import sys + +import torch + +import folder_paths +import comfy +import impact.impact_server +from nodes import MAX_RESOLUTION + +from impact.utils import * +import impact.core as core +from impact.core import SEG +import impact.utils as utils + +class SEGSDetailer: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "force_inpaint": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "basic_pipe": ("BASIC_PIPE",), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), + + "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + } + } + + RETURN_TYPES = ("SEGS", "IMAGE") + RETURN_NAMES = ("segs", "cnet_images") + OUTPUT_IS_LIST = (False, True) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio=None, batch_size=1, cycle=1, + refiner_basic_pipe_opt=None): + + model, clip, vae, positive, negative = basic_pipe + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + segs = core.segs_scale_match(segs, image.shape) + + new_segs = [] + cnet_pil_list = [] + + for i in range(batch_size): + seed += 1 + for seg in segs[1]: + cropped_image = seg.cropped_image if seg.cropped_image is not None \ + else crop_ndarray4(image.numpy(), seg.crop_region) + + is_mask_all_zeros = (seg.cropped_mask == 0).all().item() + if is_mask_all_zeros: + print(f"Detailer: segment skip [empty mask]") + new_segs.append(seg) + continue + + if noise_mask: + cropped_mask = seg.cropped_mask + else: + cropped_mask = None + + enhanced_pil, cnet_pil = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, cropped_mask, force_inpaint, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, + control_net_wrapper=seg.control_net_wrapper, cycle=cycle) + + if cnet_pil is not None: + cnet_pil_list.append(cnet_pil) + + if enhanced_pil is None: + new_cropped_image = cropped_image + else: + new_cropped_image = pil2numpy(enhanced_pil) + + new_seg = SEG(new_cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return (segs[0], new_segs), cnet_pil_list + + def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio=None, batch_size=1, cycle=1, + refiner_basic_pipe_opt=None): + + segs, cnet_pil_list = SEGSDetailer.do_detail(image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, denoise, noise_mask, force_inpaint, basic_pipe, refiner_ratio, batch_size, cycle=cycle, + refiner_basic_pipe_opt=refiner_basic_pipe_opt) + + # set fallback image + if len(cnet_pil_list) == 0: + cnet_pil_list = [empty_pil_tensor()] + + return (segs, cnet_pil_list) + + +class SEGSDetailerForAnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image_frames": ("IMAGE", ), + "segs": ("SEGS", ), + "guide_size": ("FLOAT", {"default": 256, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), + "max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE",), + "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}) + }, + "optional": { + "refiner_basic_pipe_opt": ("BASIC_PIPE",), + } + } + + RETURN_TYPES = ("SEGS",) + RETURN_NAMES = ("segs",) + OUTPUT_IS_LIST = (False,) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None): + + model, clip, vae, positive, negative = basic_pipe + if refiner_basic_pipe_opt is None: + refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None + else: + refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt + + segs = core.segs_scale_match(segs, image_frames.shape) + + new_segs = [] + + for seg in segs[1]: + cropped_image_frames = None + + for image in image_frames: + image = image.unsqueeze(0) + cropped_image = seg.cropped_image if seg.cropped_image is not None else crop_ndarray4(image.numpy(), seg.crop_region) + + if cropped_image_frames is None: + cropped_image_frames = torch.from_numpy(cropped_image) + else: + cropped_image_frames = torch.concat((cropped_image_frames, torch.from_numpy(cropped_image)), dim=0) + + cropped_image_frames = cropped_image_frames.numpy() + enhanced_image_tensor = core.enhance_detail_for_animatediff(cropped_image_frames, model, clip, vae, guide_size, guide_size_for, max_size, + seg.bbox, seed, steps, cfg, sampler_name, scheduler, + positive, negative, denoise, seg.cropped_mask, + refiner_ratio=refiner_ratio, refiner_model=refiner_model, + refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative) + + if enhanced_image_tensor is None: + new_cropped_image = cropped_image_frames + else: + new_cropped_image = enhanced_image_tensor.numpy() + + new_seg = SEG(new_cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None) + new_segs.append(new_seg) + + return (segs[0], new_segs) + + def doit(self, image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, + denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None): + + segs = SEGSDetailerForAnimateDiff.do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, + scheduler, denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt) + + return (segs,) + + +class SEGSPaste: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "segs": ("SEGS", ), + "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), + "alpha": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + }, + "optional": {"ref_image_opt": ("IMAGE", ), } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Detailer" + + @staticmethod + def doit(image, segs, feather, alpha=255, ref_image_opt=None): + + segs = core.segs_scale_match(segs, image.shape) + + result = None + for i in range(image.shape[0]): + image_i = image[i].unsqueeze(0) + image_pil = tensor2pil(image_i).convert('RGBA') + for seg in segs[1]: + ref_image_pil = None + if ref_image_opt is None and seg.cropped_image is not None: + cropped_tensor = torch.from_numpy(seg.cropped_image)[i] + cropped_tensor = cropped_tensor.unsqueeze(0) + ref_image_pil = tensor2pil(cropped_tensor) + elif ref_image_opt is not None: + ref_tensor = ref_image_opt[i].unsqueeze(0) + cropped = crop_image(ref_tensor, seg.crop_region) + cropped = np.clip(255. * cropped.squeeze(), 0, 255).astype(np.uint8) + ref_image_pil = Image.fromarray(cropped).convert('RGBA') + + if ref_image_pil is not None: + mask_pil = feather_mask(seg.cropped_mask, feather, base_alpha=alpha) + image_pil.paste(ref_image_pil, (seg.crop_region[0], seg.crop_region[1]), mask_pil) + + image_tensor = pil2tensor(image_pil.convert('RGB')) + + if result is None: + result = image_tensor + else: + result = torch.concat((result, image_tensor), dim=0) + + return (result, ) + + +class SEGSPreview: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "alpha_mode": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = () + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + OUTPUT_NODE = True + + def doit(self, segs, alpha_mode=True, fallback_image_opt=None): + full_output_folder, filename, counter, subfolder, filename_prefix = \ + folder_paths.get_save_image_path("impact_seg_preview", self.output_dir, segs[0][1], segs[0][0]) + + results = list() + + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + if len(segs[1]) > 0: + if segs[1][0].cropped_image is not None: + batch_count = len(segs[1][0].cropped_image) + elif fallback_image_opt is not None: + batch_count = len(fallback_image_opt) + else: + return {"ui": {"images": results}} + + for i in range(batch_count): + for seg in segs[1]: + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image[i] + elif fallback_image_opt is not None: + # take from original image + ref_image = fallback_image_opt[i].unsqueeze(0) + cropped_image = crop_image(ref_image, seg.crop_region).squeeze(0) + + if cropped_image is not None: + cropped_image = Image.fromarray(np.clip(255. * cropped_image, 0, 255).astype(np.uint8)) + + if alpha_mode: + mask_array = seg.cropped_mask.astype(np.uint8) * 255 + mask_image = Image.fromarray(mask_array, mode='L').resize(cropped_image.size) + cropped_image.putalpha(mask_image) + + file = f"{filename}_{counter:05}_.webp" + cropped_image.save(os.path.join(full_output_folder, file)) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + counter += 1 + + return {"ui": {"images": results}} + + +detection_labels = [ + 'hand', 'face', 'mouth', 'eyes', 'eyebrows', 'pupils', + 'left_eyebrow', 'left_eye', 'left_pupil', 'right_eyebrow', 'right_eye', 'right_pupil', + 'short_sleeved_shirt', 'long_sleeved_shirt', 'short_sleeved_outwear', 'long_sleeved_outwear', + 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress', 'sling_dress', + "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", + "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", + "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", + "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", + "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", + "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", + "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", + "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", + "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", + "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", + "hair drier", "toothbrush" + ] + + +class SEGSLabelFilter: + @classmethod + def INPUT_TYPES(s): + global detection_labels + return {"required": { + "segs": ("SEGS", ), + "preset": (['all'] + detection_labels, ), + "labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def filter(segs, labels): + labels = set([label.strip() for label in labels]) + + if 'all' in labels: + return (segs, (segs[0], []), ) + else: + res_segs = [] + remained_segs = [] + + for x in segs[1]: + if x.label in labels: + res_segs.append(x) + elif 'eyes' in labels and x.label in ['left_eye', 'right_eye']: + res_segs.append(x) + elif 'eyebrows' in labels and x.label in ['left_eyebrow', 'right_eyebrow']: + res_segs.append(x) + elif 'pupils' in labels and x.label in ['left_pupil', 'right_pupil']: + res_segs.append(x) + else: + remained_segs.append(x) + + return ((segs[0], res_segs), (segs[0], remained_segs), ) + + def doit(self, segs, preset, labels): + labels = labels.split(',') + return SEGSLabelFilter.filter(segs, labels) + + +class SEGSOrderedFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"],), + "order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}), + "take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, order, take_start, take_count): + segs_with_order = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + else: + value = y2 + + segs_with_order.append((value, seg)) + + if order: + sorted_list = sorted(segs_with_order, key=lambda x: x[0], reverse=True) + else: + sorted_list = sorted(segs_with_order, key=lambda x: x[0], reverse=False) + + result_list = [] + remained_list = [] + + for i, item in enumerate(sorted_list): + if take_start <= i < take_start + take_count: + result_list.append(item[1]) + else: + remained_list.append(item[1]) + + return ((segs[0], result_list), (segs[0], remained_list), ) + + +class SEGSRangeFilter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + "target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent"],), + "mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}), + "min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), + "max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}), + }, + } + + RETURN_TYPES = ("SEGS", "SEGS",) + RETURN_NAMES = ("filtered_SEGS", "remained_SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, target, mode, min_value, max_value): + new_segs = [] + remained_segs = [] + + for seg in segs[1]: + x1 = seg.crop_region[0] + y1 = seg.crop_region[1] + x2 = seg.crop_region[2] + y2 = seg.crop_region[3] + + if target == "area(=w*h)": + value = (y2 - y1) * (x2 - x1) + elif target == "length_percent": + h = y2 - y1 + w = x2 - x1 + value = max(h/w, w/h)*100 + print(f"value={value}") + elif target == "width": + value = x2 - x1 + elif target == "height": + value = y2 - y1 + elif target == "x1": + value = x1 + elif target == "x2": + value = x2 + elif target == "y1": + value = y1 + else: + value = y2 + + if mode and min_value <= value <= max_value: + print(f"[in] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + elif not mode and (value < min_value or value > max_value): + print(f"[out] value={value} / {mode}, {min_value}, {max_value}") + new_segs.append(seg) + else: + remained_segs.append(seg) + print(f"[filter] value={value} / {mode}, {min_value}, {max_value}") + + return ((segs[0], new_segs), (segs[0], remained_segs), ) + + +class SEGSToImageList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + } + } + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, fallback_image_opt=None): + results = list() + + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + for seg in segs[1]: + if seg.cropped_image is not None: + cropped_image = torch.from_numpy(seg.cropped_image) + elif fallback_image_opt is not None: + # take from original image + cropped_image = torch.from_numpy(crop_image(fallback_image_opt, seg.crop_region)) + else: + cropped_image = empty_pil_tensor() + + results.append(cropped_image) + + if len(results) == 0: + results.append(empty_pil_tensor()) + + return (results,) + + +class SEGSToMaskList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + if len(masks) == 0: + empty_mask = torch.zeros(segs[0], dtype=torch.float32, device="cpu") + masks = [empty_mask] + return (masks,) + + +class SEGSToMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("MASK",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + masks = core.segs_to_masklist(segs) + mask_batch = torch.stack(masks, dim=0) + return (mask_batch,) + + +class SEGSConcat: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs1": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + dim = None + res = None + + for k, v in list(kwargs.items()): + if v[0] == (0, 0) or len(v[1]) == 0: + continue + + if dim is None: + dim = v[0] + res = v[1] + else: + if v[0] == dim: + res = res + v[1] + else: + print(f"ERROR: source shape of 'segs1'{dim} and '{k}'{v[0]} are different. '{k}' will be ignored") + + if dim is None: + empty_segs = ((0, 0), []) + return (empty_segs, ) + else: + return ((dim, res), ) + + +class DecomposeSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS", ), + }, + } + + RETURN_TYPES = ("SEGS_HEADER", "SEG_ELT",) + OUTPUT_IS_LIST = (False, True, ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs): + return segs + + +class AssembleSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_header": ("SEGS_HEADER", ), + "seg_elt": ("SEG_ELT", ), + }, + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("SEGS", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_header, seg_elt): + return ((seg_header[0], seg_elt), ) + + +class From_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + }, + } + + RETURN_TYPES = ("SEG_ELT", "IMAGE", "MASK", "SEG_ELT_crop_region", "SEG_ELT_bbox", "SEG_ELT_control_net_wrapper", "FLOAT", "STRING") + RETURN_NAMES = ("seg_elt", "cropped_image", "cropped_mask", "crop_region", "bbox", "control_net_wrapper", "confidence", "label") + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_elt): + cropped_image = torch.tensor(seg_elt.cropped_image) if seg_elt.cropped_image is not None else None + return (seg_elt, cropped_image, torch.tensor(seg_elt.cropped_mask), seg_elt.crop_region, seg_elt.bbox, seg_elt.control_net_wrapper, seg_elt.confidence, seg_elt.label,) + + +class Edit_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + }, + "optional": { + "cropped_image_opt": ("IMAGE", ), + "cropped_mask_opt": ("MASK", ), + "crop_region_opt": ("SEG_ELT_crop_region", ), + "bbox_opt": ("SEG_ELT_bbox", ), + "control_net_wrapper_opt": ("SEG_ELT_control_net_wrapper", ), + "confidence_opt": ("FLOAT", {"min": 0, "max": 1.0, "step": 0.1, "forceInput": True}), + "label_opt": ("STRING", {"multiline": False, "forceInput": True}), + } + } + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg_elt, cropped_image_opt=None, cropped_mask_opt=None, confidence_opt=None, crop_region_opt=None, + bbox_opt=None, label_opt=None, control_net_wrapper_opt=None): + + cropped_image = seg_elt.cropped_image if cropped_image_opt is None else cropped_image_opt + cropped_mask = seg_elt.cropped_mask if cropped_mask_opt is None else cropped_mask_opt + confidence = seg_elt.confidence if confidence_opt is None else confidence_opt + crop_region = seg_elt.crop_region if crop_region_opt is None else crop_region_opt + bbox = seg_elt.bbox if bbox_opt is None else bbox_opt + label = seg_elt.label if label_opt is None else label_opt + control_net_wrapper = seg_elt.control_net_wrapper if control_net_wrapper_opt is None else control_net_wrapper_opt + + cropped_image = cropped_image.numpy() if cropped_image is not None else None + + if isinstance(cropped_mask, torch.Tensor): + if len(cropped_mask.shape) == 3: + cropped_mask = cropped_mask.squeeze(0) + + cropped_mask = cropped_mask.numpy() + + seg = SEG(cropped_image, cropped_mask, confidence, crop_region, bbox, label, control_net_wrapper) + + return (seg,) + + +class DilateMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("MASK", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, mask, dilation): + mask = core.dilate_mask(mask.numpy(), dilation) + return (torch.from_numpy(mask), ) + + +class Dilate_SEG_ELT: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg_elt": ("SEG_ELT", ), + "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), + }} + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, seg, dilation): + mask = core.dilate_mask(seg.cropped_mask, dilation) + seg = SEG(seg.cropped_image, mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) + return (seg,) + + +class SEG_ELT_BBOX_ScaleBy: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seg": ("SEG_ELT", ), + "scale_by": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 8.0, "step": 0.01}), } + } + + RETURN_TYPES = ("SEG_ELT", ) + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + @staticmethod + def fill_zero_outside_bbox(mask, crop_region, bbox): + cx1, cy1, _, _ = crop_region + x1, y1, x2, y2 = bbox + x1, y1, x2, y2 = x1-cx1, y1-cy1, x2-cx1, y2-cy1 + h, w = mask.shape + + x1 = min(w-1, max(0, x1)) + x2 = min(w-1, max(0, x2)) + y1 = min(h-1, max(0, y1)) + y2 = min(h-1, max(0, y2)) + + mask_cropped = mask.copy() + mask_cropped[:, :x1] = 0 # zero fill left side + mask_cropped[:, x2:] = 0 # zero fill right side + mask_cropped[:y1, :] = 0 # zero fill top side + mask_cropped[y2:, :] = 0 # zero fill bottom side + return mask_cropped + + def doit(self, seg, scale_by): + x1, y1, x2, y2 = seg.bbox + w = x2-x1 + h = y2-y1 + + dw = int((w * scale_by - w)/2) + dh = int((h * scale_by - h)/2) + + bbox = (x1-dw, y1-dh, x2+dw, y2+dh) + + cropped_mask = SEG_ELT_BBOX_ScaleBy.fill_zero_outside_bbox(seg.cropped_mask, seg.crop_region, bbox) + seg = SEG(seg.cropped_image, cropped_mask, seg.confidence, seg.crop_region, bbox, seg.label, seg.control_net_wrapper) + return (seg,) + + +class EmptySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}, } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self): + shape = 0, 0 + return ((shape, []),) + + +class SegsToCombinedMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { "segs": ("SEGS",), } } + + RETURN_TYPES = ("MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, segs): + return (core.segs_to_combined_mask(segs),) + + +class MediaPipeFaceMeshToSEGS: + @classmethod + def INPUT_TYPES(s): + bool_true_widget = ("BOOLEAN", {"default": True, "label_on": "Enabled", "label_off": "Disabled"}) + bool_false_widget = ("BOOLEAN", {"default": False, "label_on": "Enabled", "label_off": "Disabled"}) + return {"required": { + "image": ("IMAGE",), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "crop_min_size": ("INT", {"min": 10, "max": MAX_RESOLUTION, "step": 1, "default": 50}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 1}), + "dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "face": bool_true_widget, + "mouth": bool_false_widget, + "left_eyebrow": bool_false_widget, + "left_eye": bool_false_widget, + "left_pupil": bool_false_widget, + "right_eyebrow": bool_false_widget, + "right_eye": bool_false_widget, + "right_pupil": bool_false_widget, + }, + # "optional": {"reference_image_opt": ("IMAGE", ), } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + # padding is obsolete now + # https://github.com/Fannovel16/comfyui_controlnet_aux/blob/1ec41fceff1ee99596445a0c73392fd91df407dc/utils.py#L33 + # def calc_pad(h_raw, w_raw): + # resolution = normalize_size_base_64(h_raw, w_raw) + # + # def pad64(x): + # return int(np.ceil(float(x) / 64.0) * 64 - x) + # + # k = float(resolution) / float(min(h_raw, w_raw)) + # h_target = int(np.round(float(h_raw) * k)) + # w_target = int(np.round(float(w_raw) * k)) + # + # return pad64(h_target), pad64(w_target) + + # if reference_image_opt is not None: + # if image.shape[1:] != reference_image_opt.shape[1:]: + # scale_by1 = reference_image_opt.shape[1] / image.shape[1] + # scale_by2 = reference_image_opt.shape[2] / image.shape[2] + # scale_by = min(scale_by1, scale_by2) + # + # # padding is obsolete now + # # h_pad, w_pad = calc_pad(reference_image_opt.shape[1], reference_image_opt.shape[2]) + # # if h_pad != 0: + # # # height padded + # # image = image[:, :-h_pad, :, :] + # # elif w_pad != 0: + # # # width padded + # # image = image[:, :, :-w_pad, :] + # + # image = nodes.ImageScaleBy().upscale(image, "bilinear", scale_by)[0] + + result = core.mediapipe_facemesh_to_segs(image, crop_factor, bbox_fill, crop_min_size, drop_size, dilation, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil) + return (result, ) + + +class MaskToSEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "contour_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, combined, crop_factor, bbox_fill, drop_size, contour_fill=False): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + result = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + return (result, ) + + +class MaskToSEGS_for_AnimateDiff: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + "combined": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}), + "bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), + "contour_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Operation" + + def doit(self, mask, combined, crop_factor, bbox_fill, drop_size, contour_fill=False): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + segs = core.mask_to_segs(mask, combined, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) + + all_masks = SEGSToMaskList().doit(segs)[0] + + result_mask = all_masks[0] + for mask in all_masks[1:]: + result_mask += mask + + result_mask = utils.to_binary_mask(result_mask, 0.1) + + return MaskToSEGS().doit(result_mask, False, crop_factor, False, drop_size, contour_fill) + + +class ControlNetApplySEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "segs": ("SEGS",), + "control_net": ("CONTROL_NET",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }, + "optional": { + "segs_preprocessor": ("SEGS_PREPROCESSOR",), + } + } + + RETURN_TYPES = ("SEGS",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, segs, control_net, strength, segs_preprocessor=None): + new_segs = [] + + for seg in segs[1]: + control_net_wrapper = core.ControlNetWrapper(control_net, strength, segs_preprocessor) + new_seg = SEG(seg.cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, control_net_wrapper) + new_segs.append(new_seg) + + return ((segs[0], new_segs), ) + + +class SEGSSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}), + "segs1": ("SEGS",), + }, + } + + RETURN_TYPES = ("SEGS", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + input_name = f"segs{int(kwargs['select'])}" + + if input_name in kwargs: + return (kwargs[input_name],) + else: + print(f"SEGSSwitch: invalid select index ('segs1' is selected)") + return (kwargs['segs1'],) + + +class SEGSPicker: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "picks": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}), + "segs": ("SEGS",), + }, + "optional": { + "fallback_image_opt": ("IMAGE", ), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("SEGS", ) + + OUTPUT_NODE = True + + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, picks, segs, fallback_image_opt=None, unique_id=None): + if fallback_image_opt is not None: + segs = core.segs_scale_match(segs, fallback_image_opt.shape) + + # generate candidates image + cands = [] + for seg in segs[1]: + cropped_image = None + + if seg.cropped_image is not None: + cropped_image = seg.cropped_image + elif fallback_image_opt is not None: + # take from original image + cropped_image = crop_image(fallback_image_opt, seg.crop_region) + + if cropped_image is not None: + cropped_image = Image.fromarray(np.clip(255. * cropped_image.squeeze(), 0, 255).astype(np.uint8)) + + if cropped_image is not None: + pil = cropped_image + else: + pil = tensor2pil(empty_pil_tensor()) + + cands.append(pil) + + impact.impact_server.segs_picker_map[unique_id] = cands + + # pass only selected + pick_ids = set() + + for pick in picks.split(","): + try: + pick_ids.add(int(pick)-1) + except Exception: + pass + + new_segs = [] + for i in pick_ids: + if 0 <= i < len(segs[1]): + new_segs.append(segs[1][i]) + + return ((segs[0], new_segs),) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..2626035efa404e9998b9cafc5a6a6ed23cffc5ba --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/special_samplers.py @@ -0,0 +1,548 @@ +import time + +import comfy +import math +import impact.core as core +from impact.utils import * +from nodes import MAX_RESOLUTION +import nodes + +class TiledKSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "padded", 'simple'], ), + "basic_pipe": ("BASIC_PIPE", ) + }} + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, seed, steps, cfg, sampler_name, scheduler, denoise, + tile_width, tile_height, tiling_strategy, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.TiledKSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, + tile_width, tile_height, tiling_strategy) + return (sampler, ) + + +class KSamplerProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "basic_pipe": ("BASIC_PIPE", ) + }, + } + + RETURN_TYPES = ("KSAMPLER",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, seed, steps, cfg, sampler_name, scheduler, denoise, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.KSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise) + return (sampler, ) + + +class KSamplerAdvancedProvider: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "basic_pipe": ("BASIC_PIPE", ) + }, + } + + RETURN_TYPES = ("KSAMPLER_ADVANCED",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, cfg, sampler_name, scheduler, basic_pipe): + model, _, _, positive, negative = basic_pipe + sampler = core.KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative) + return (sampler, ) + + +class TwoSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent_image": ("LATENT", ), + "base_sampler": ("KSAMPLER", ), + "mask_sampler": ("KSAMPLER", ), + "mask": ("MASK", ) + }, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + def doit(self, latent_image, base_sampler, mask_sampler, mask): + inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + + latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample(latent_image) + + new_latent_image['noise_mask'] = mask + new_latent_image = mask_sampler.sample(new_latent_image) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class TwoAdvancedSamplersForMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "samples": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "mask_sampler": ("KSAMPLER_ADVANCED", ), + "mask": ("MASK", ), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}) + }, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Sampler" + + @staticmethod + def mask_erosion(samples, mask, grow_mask_by): + mask = mask.clone() + + w = samples['samples'].shape[3] + h = samples['samples'].shape[2] + + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear") + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round() + + def doit(self, seed, steps, denoise, samples, base_sampler, mask_sampler, mask, overlap_factor): + + inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0)) + + adv_steps = int(steps / denoise) + start_at_step = adv_steps - steps + + new_latent_image = samples.copy() + + mask_erosion = TwoAdvancedSamplersForMask.mask_erosion(samples, mask, overlap_factor) + + for i in range(start_at_step, adv_steps): + add_noise = "enable" if i == start_at_step else "disable" + return_with_leftover_noise = "enable" if i+1 != adv_steps else "disable" + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image, i, i + 1, "enable", recover_special_sampler=True) + + new_latent_image['noise_mask'] = mask_erosion + new_latent_image = mask_sampler.sample_advanced("disable", seed, adv_steps, new_latent_image, i, i + 1, return_with_leftover_noise, recover_special_sampler=True) + + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class RegionalPrompt: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK", ), + "advanced_sampler": ("KSAMPLER_ADVANCED", ), + }, + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + def doit(self, mask, advanced_sampler): + regional_prompt = core.REGIONAL_PROMPT(mask, advanced_sampler) + return ([regional_prompt], ) + + +class CombineRegionalPrompts: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "regional_prompts1": ("REGIONAL_PROMPTS", ), + }, + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + def doit(self, **kwargs): + res = [] + for k, v in kwargs.items(): + res += v + + return (res, ) + + +class CombineConditionings: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning1": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, **kwargs): + res = [] + for k, v in kwargs.items(): + res += v + + return (res, ) + + +class RegionalSampler: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "seed_2nd": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "seed_2nd_mode": (["ignore", "fixed", "seed+seed_2nd", "seed-seed_2nd", "increment", "decrement", "randomize"], ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "base_only_steps": ("INT", {"default": 2, "min": 0, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "samples": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "regional_prompts": ("REGIONAL_PROMPTS", ), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}), + "restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + @staticmethod + def mask_erosion(samples, mask, grow_mask_by): + mask = mask.clone() + + w = samples['samples'].shape[3] + h = samples['samples'].shape[2] + + mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear") + if grow_mask_by == 0: + mask_erosion = mask2 + else: + kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by)) + padding = math.ceil((grow_mask_by - 1) / 2) + + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1) + + return mask_erosion[:, :, :w, :h].round() + + def doit(self, seed, seed_2nd, seed_2nd_mode, steps, base_only_steps, denoise, samples, base_sampler, regional_prompts, overlap_factor, restore_latent, unique_id=None): + if restore_latent: + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + else: + latent_compositor = None + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + adv_steps = int(steps / denoise) + start_at_step = adv_steps - steps + + region_len = len(regional_prompts) + total = steps*region_len + + leftover_noise = 'disable' + if base_only_steps > 0: + if seed_2nd_mode == 'ignore': + leftover_noise = 'enable' + + samples = base_sampler.sample_advanced("enable", seed, adv_steps, samples, start_at_step, start_at_step + base_only_steps, leftover_noise, recover_special_sampler=False) + + if seed_2nd_mode == "seed+seed_2nd": + seed += seed_2nd + if seed > 1125899906842624: + seed = seed - 1125899906842624 + elif seed_2nd_mode == "seed-seed_2nd": + seed -= seed_2nd + if seed < 0: + seed += 1125899906842624 + elif seed_2nd_mode != 'ignore': + seed = seed_2nd + + new_latent_image = samples.copy() + base_latent_image = None + + if leftover_noise != 'enable': + add_noise = "enable" + else: + add_noise = "disable" + + for i in range(start_at_step+base_only_steps, adv_steps): + core.update_node_status(unique_id, f"{i}/{steps} steps | ", ((i-start_at_step)*region_len)/total) + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image, i, i + 1, "enable", recover_special_sampler=True) + + if restore_latent: + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + base_latent_image = new_latent_image.copy() + + j = 1 + for regional_prompt in regional_prompts: + if restore_latent: + new_latent_image = base_latent_image.copy() + + core.update_node_status(unique_id, f"{i}/{steps} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total) + + region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0) + + new_latent_image['noise_mask'] = region_mask + new_latent_image = regional_prompt.sampler.sample_advanced("disable", seed, adv_steps, new_latent_image, + i, i + 1, "enable", recover_special_sampler=True) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0] + new_latent_image = base_latent_image + + j += 1 + + add_noise = 'disable' + + # finalize + core.update_node_status(unique_id, f"finalize") + if base_latent_image is not None: + new_latent_image = base_latent_image + else: + base_latent_image = new_latent_image + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced("disable", seed, adv_steps, new_latent_image, adv_steps, adv_steps+1, "disable", recover_special_sampler=False) + + core.update_node_status(unique_id, f"{steps}/{steps} steps", total) + core.update_node_status(unique_id, "", None) + + if restore_latent: + new_latent_image = base_latent_image + + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class RegionalSamplerAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000}), + "restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), + "latent_image": ("LATENT", ), + "base_sampler": ("KSAMPLER_ADVANCED", ), + "regional_prompts": ("REGIONAL_PROMPTS", ), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Regional" + + def doit(self, add_noise, noise_seed, steps, start_at_step, end_at_step, overlap_factor, restore_latent, + return_with_leftover_noise, latent_image, base_sampler, regional_prompts, unique_id): + if restore_latent: + latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']() + else: + latent_compositor = None + + masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts] + masks = [np.ceil(mask).astype(np.int32) for mask in masks] + combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks)) + + inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0)) + + region_len = len(regional_prompts) + end_at_step = min(steps, end_at_step) + total = (end_at_step - start_at_step) * region_len + + new_latent_image = latent_image.copy() + base_latent_image = None + region_masks = {} + + for i in range(start_at_step, end_at_step): + core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | ", ((i-start_at_step)*region_len)/total) + + cur_add_noise = "enable" if i == start_at_step and add_noise else "disable" + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced(cur_add_noise, noise_seed, steps, new_latent_image, i, i + 1, "enable", recover_special_sampler=True) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = new_latent_image.copy() + + j = 1 + for regional_prompt in regional_prompts: + if restore_latent: + new_latent_image = base_latent_image.copy() + + core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total) + + if j not in region_masks: + region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0) + region_masks[j] = region_mask + else: + region_mask = region_masks[j] + + new_latent_image['noise_mask'] = region_mask + new_latent_image = regional_prompt.sampler.sample_advanced("disable", noise_seed, steps, new_latent_image, + i, i + 1, "enable", recover_special_sampler=True) + + if restore_latent: + del new_latent_image['noise_mask'] + base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0] + new_latent_image = base_latent_image + + j += 1 + + # finalize + core.update_node_status(unique_id, f"finalize") + if base_latent_image is not None: + new_latent_image = base_latent_image + else: + base_latent_image = new_latent_image + + new_latent_image['noise_mask'] = inv_mask + new_latent_image = base_sampler.sample_advanced("disable", noise_seed, steps, new_latent_image, end_at_step, end_at_step+1, return_with_leftover_noise, recover_special_sampler=False) + + core.update_node_status(unique_id, f"{end_at_step}/{end_at_step} steps", total) + core.update_node_status(unique_id, "", None) + + if restore_latent: + new_latent_image = base_latent_image + + if 'noise_mask' in new_latent_image: + del new_latent_image['noise_mask'] + + return (new_latent_image, ) + + +class KSamplerBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "sampling" + + def sample(self, basic_pipe, seed, steps, cfg, sampler_name, scheduler, latent_image, denoise=1.0): + model, clip, vae, positive, negative = basic_pipe + latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise)[0] + return (basic_pipe, latent, vae) + + +class KSamplerAdvancedBasicPipe: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"basic_pipe": ("BASIC_PIPE",), + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "latent_image": ("LATENT", ), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + } + } + + RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE") + FUNCTION = "sample" + + CATEGORY = "sampling" + + def sample(self, basic_pipe, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0): + model, clip, vae, positive, negative = basic_pipe + + if add_noise: + add_noise = "enable" + else: + add_noise = "disable" + + if return_with_leftover_noise: + return_with_leftover_noise = "enable" + else: + return_with_leftover_noise = "disable" + + latent = nodes.KSamplerAdvanced().sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise)[0] + return (basic_pipe, latent, vae) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..4f3a372faebd074ce471bb7c168e04761e42db30 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py @@ -0,0 +1,201 @@ +from impact.utils import any_typ +import comfy_extras.nodes_mask +from nodes import MAX_RESOLUTION + +class GeneralSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1}), + "sel_mode": ("BOOLEAN", {"default": True, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": False}), + }, + "optional": { + "input1": (any_typ,), + }, + "hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"} + } + + RETURN_TYPES = (any_typ, "STRING", "INT") + RETURN_NAMES = ("selected_value", "selected_label", "selected_index") + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, *args, **kwargs): + selected_index = int(kwargs['select']) + input_name = f"input{selected_index}" + + selected_label = input_name + node_id = kwargs['unique_id'] + nodelist = kwargs['extra_pnginfo']['workflow']['nodes'] + for node in nodelist: + if str(node['id']) == node_id: + inputs = node['inputs'] + + for slot in inputs: + if slot['name'] == input_name and 'label' in slot: + selected_label = slot['label'] + + break + + if input_name in kwargs: + return (kwargs[input_name], selected_label, selected_index) + else: + print(f"ImpactSwitch: invalid select index (ignored)") + return (None, "", selected_index) + + +class GeneralInversedSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1}), + "input": (any_typ,), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = tuple([any_typ] * 100) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, input, unique_id): + res = [] + + for i in range(0, select): + if select == i+1: + res.append(input) + else: + res.append(None) + + return res + + +class ImageMaskSwitch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}), + "images1": ("IMAGE",), + }, + + "optional": { + "mask1_opt": ("MASK",), + "images2_opt": ("IMAGE",), + "mask2_opt": ("MASK",), + "images3_opt": ("IMAGE",), + "mask3_opt": ("MASK",), + "images4_opt": ("IMAGE",), + "mask4_opt": ("MASK",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, select, images1, mask1_opt=None, images2_opt=None, mask2_opt=None, images3_opt=None, mask3_opt=None, + images4_opt=None, mask4_opt=None): + if select == 1: + return images1, mask1_opt, + elif select == 2: + return images2_opt, mask2_opt, + elif select == 3: + return images3_opt, mask3_opt, + else: + return images4_opt, mask4_opt, + + +class RemoveNoiseMask: + @classmethod + def INPUT_TYPES(s): + return {"required": {"samples": ("LATENT",)}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "doit" + + CATEGORY = "ImpactPack/Util" + + def doit(self, samples): + res = {key: value for key, value in samples.items() if key != 'noise_mask'} + return (res, ) + + +class ImagePasteMasked: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "destination": ("IMAGE",), + "source": ("IMAGE",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "resize_source": ("BOOLEAN", {"default": False}), + }, + "optional": { + "mask": ("MASK",), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "composite" + + CATEGORY = "image" + + def composite(self, destination, source, x, y, resize_source, mask = None): + destination = destination.clone().movedim(-1, 1) + output = comfy_extras.nodes_mask.composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) + return (output,) + + +from impact.utils import any_typ + +class ImpactLogger: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "data": (any_typ, ""), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + CATEGORY = "ImpactPack/Debug" + + OUTPUT_NODE = True + + RETURN_TYPES = () + FUNCTION = "doit" + + def doit(self, data, prompt, extra_pnginfo): + shape = "" + if hasattr(data, "shape"): + shape = f"{data.shape} / " + + print(f"[IMPACT LOGGER]: {shape}{data}") + + print(f" PROMPT: {prompt}") + + # for x in prompt: + # if 'inputs' in x and 'populated_text' in x['inputs']: + # print(f"PROMP: {x['10']['inputs']['populated_text']}") + # + # for x in extra_pnginfo['workflow']['nodes']: + # if x['type'] == 'ImpactWildcardProcessor': + # print(f" WV : {x['widgets_values'][1]}\n") + + return {} + + +class ImpactDummyInput: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + + CATEGORY = "ImpactPack/Debug" + + RETURN_TYPES = (any_typ,) + FUNCTION = "doit" + + def doit(self): + return ("DUMMY",) diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/utils.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b412e7c25098312f36cf928a46f1caf04b5e87ab --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/utils.py @@ -0,0 +1,319 @@ +import torch +import cv2 +import numpy as np +from PIL import Image, ImageDraw, ImageFilter +import folder_paths +from . import config + +LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) + + +def pil2numpy(image): + return (np.array(image).astype(np.float32) / 255.0)[np.newaxis, :, :, :] + + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + + +def center_of_bbox(bbox): + w, h = bbox[2] - bbox[0], bbox[3] - bbox[1] + return bbox[0] + w/2, bbox[1] + h/2 + + +def combine_masks(masks): + if len(masks) == 0: + return None + else: + initial_cv2_mask = np.array(masks[0][1]) + combined_cv2_mask = initial_cv2_mask + + for i in range(1, len(masks)): + cv2_mask = np.array(masks[i][1]) + + if combined_cv2_mask.shape == cv2_mask.shape: + combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask) + else: + # do nothing - incompatible mask + pass + + mask = torch.from_numpy(combined_cv2_mask) + return mask + + +def combine_masks2(masks): + if len(masks) == 0: + return None + else: + initial_cv2_mask = np.array(masks[0]).astype(np.uint8) + combined_cv2_mask = initial_cv2_mask + + for i in range(1, len(masks)): + cv2_mask = np.array(masks[i]).astype(np.uint8) + + if combined_cv2_mask.shape == cv2_mask.shape: + combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask) + else: + # do nothing - incompatible mask + pass + + mask = torch.from_numpy(combined_cv2_mask) + return mask + + +def bitwise_and_masks(mask1, mask2): + mask1 = mask1.cpu() + mask2 = mask2.cpu() + cv2_mask1 = np.array(mask1) + cv2_mask2 = np.array(mask2) + + if cv2_mask1.shape == cv2_mask2.shape: + cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2) + return torch.from_numpy(cv2_mask) + else: + # do nothing - incompatible mask shape: mostly empty mask + return mask1 + + +def to_binary_mask(mask, threshold=0): + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + mask = mask.clone().cpu() + mask[mask > threshold] = 1. + mask[mask <= threshold] = 0. + return mask + + +def use_gpu_opencv(): + return not config.get_config()['disable_gpu_opencv'] + + +def dilate_mask(mask, dilation_factor, iter=1): + if dilation_factor == 0: + return mask + + if len(mask.shape) == 3: + mask = mask.squeeze(0) + + kernel = np.ones((abs(dilation_factor), abs(dilation_factor)), np.uint8) + + if use_gpu_opencv(): + mask = cv2.UMat(mask) + kernel = cv2.UMat(kernel) + + if dilation_factor > 0: + result = cv2.dilate(mask, kernel, iter) + else: + result = cv2.erode(mask, kernel, iter) + + if use_gpu_opencv(): + return result.get() + else: + return result + + +def dilate_masks(segmasks, dilation_factor, iter=1): + if dilation_factor == 0: + return segmasks + + dilated_masks = [] + kernel = np.ones((abs(dilation_factor), abs(dilation_factor)), np.uint8) + + if use_gpu_opencv(): + kernel = cv2.UMat(kernel) + + for i in range(len(segmasks)): + cv2_mask = segmasks[i][1] + + if use_gpu_opencv(): + cv2_mask = cv2.UMat(cv2_mask) + + if dilation_factor > 0: + dilated_mask = cv2.dilate(cv2_mask, kernel, iter) + else: + dilated_mask = cv2.erode(cv2_mask, kernel, iter) + + if use_gpu_opencv(): + dilated_mask = dilated_mask.get() + + item = (segmasks[i][0], dilated_mask, segmasks[i][2]) + dilated_masks.append(item) + + return dilated_masks + + +def feather_mask(mask, thickness, base_alpha=255): + pil_mask = Image.fromarray(np.uint8(mask * base_alpha)) + + # Create a feathered mask by applying a Gaussian blur to the mask + blurred_mask = pil_mask.filter(ImageFilter.GaussianBlur(thickness)) + feathered_mask = Image.new("L", pil_mask.size, 0) + feathered_mask.paste(blurred_mask, (0, 0), blurred_mask) + return feathered_mask + + +def subtract_masks(mask1, mask2): + mask1 = mask1.cpu() + mask2 = mask2.cpu() + cv2_mask1 = np.array(mask1) * 255 + cv2_mask2 = np.array(mask2) * 255 + + if cv2_mask1.shape == cv2_mask2.shape: + cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2) + return torch.clamp(torch.from_numpy(cv2_mask) / 255.0, min=0, max=1) + else: + # do nothing - incompatible mask shape: mostly empty mask + return mask1 + + +def add_masks(mask1, mask2): + mask1 = mask1.cpu() + mask2 = mask2.cpu() + cv2_mask1 = np.array(mask1) * 255 + cv2_mask2 = np.array(mask2) * 255 + + if cv2_mask1.shape == cv2_mask2.shape: + cv2_mask = cv2.add(cv2_mask1, cv2_mask2) + return torch.clamp(torch.from_numpy(cv2_mask) / 255.0, min=0, max=1) + else: + # do nothing - incompatible mask shape: mostly empty mask + return mask1 + + +def normalize_region(limit, startp, size): + if startp < 0: + new_endp = min(limit, size) + new_startp = 0 + elif startp + size > limit: + new_startp = max(0, limit - size) + new_endp = limit + else: + new_startp = startp + new_endp = min(limit, startp+size) + + return int(new_startp), int(new_endp) + + +def make_crop_region(w, h, bbox, crop_factor, crop_min_size=None): + x1 = bbox[0] + y1 = bbox[1] + x2 = bbox[2] + y2 = bbox[3] + + bbox_w = x2 - x1 + bbox_h = y2 - y1 + + crop_w = bbox_w * crop_factor + crop_h = bbox_h * crop_factor + + if crop_min_size is not None: + crop_w = max(crop_min_size, crop_w) + crop_h = max(crop_min_size, crop_h) + + kernel_x = x1 + bbox_w / 2 + kernel_y = y1 + bbox_h / 2 + + new_x1 = int(kernel_x - crop_w / 2) + new_y1 = int(kernel_y - crop_h / 2) + + # make sure position in (w,h) + new_x1, new_x2 = normalize_region(w, new_x1, crop_w) + new_y1, new_y2 = normalize_region(h, new_y1, crop_h) + + return [new_x1, new_y1, new_x2, new_y2] + + +def crop_ndarray4(npimg, crop_region): + x1 = crop_region[0] + y1 = crop_region[1] + x2 = crop_region[2] + y2 = crop_region[3] + + cropped = npimg[:, y1:y2, x1:x2, :] + + return cropped + + +def crop_ndarray2(npimg, crop_region): + x1 = crop_region[0] + y1 = crop_region[1] + x2 = crop_region[2] + y2 = crop_region[3] + + cropped = npimg[y1:y2, x1:x2] + + return cropped + + +def crop_image(image, crop_region): + return crop_ndarray4(np.array(image), crop_region) + + +def to_latent_image(pixels, vae): + x = pixels.shape[1] + y = pixels.shape[2] + if pixels.shape[1] != x or pixels.shape[2] != y: + pixels = pixels[:, :x, :y, :] + t = vae.encode(pixels[:, :, :, :3]) + return {"samples": t} + + +def scale_tensor(w, h, image): + image = tensor2pil(image) + scaled_image = image.resize((w, h), resample=LANCZOS) + return pil2tensor(scaled_image) + + +def scale_tensor_and_to_pil(w, h, image): + image = tensor2pil(image) + return image.resize((w, h), resample=LANCZOS) + + +def empty_pil_tensor(w=64, h=64): + image = Image.new("RGB", (w, h)) + draw = ImageDraw.Draw(image) + draw.rectangle((0, 0, w-1, h-1), fill=(0, 0, 0)) + return pil2tensor(image) + + +class NonListIterable: + def __init__(self, data): + self.data = data + + def __getitem__(self, index): + return self.data[index] + + +# author: Trung0246 +def add_folder_path_and_extensions(folder_name, full_folder_paths, extensions): + # Iterate over the list of full folder paths + for full_folder_path in full_folder_paths: + # Use the provided function to add each model folder path + folder_paths.add_model_folder_path(folder_name, full_folder_path) + + # Now handle the extensions. If the folder name already exists, update the extensions + if folder_name in folder_paths.folder_names_and_paths: + # Unpack the current paths and extensions + current_paths, current_extensions = folder_paths.folder_names_and_paths[folder_name] + # Update the extensions set with the new extensions + updated_extensions = current_extensions | extensions + # Reassign the updated tuple back to the dictionary + folder_paths.folder_names_and_paths[folder_name] = (current_paths, updated_extensions) + else: + # If the folder name was not present, add_model_folder_path would have added it with the last path + # Now we just need to update the set of extensions as it would be an empty set + # Also ensure that all paths are included (since add_model_folder_path adds only one path at a time) + folder_paths.folder_names_and_paths[folder_name] = (full_folder_paths, extensions) + + +# wildcard trick is taken from pythongossss's +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + +any_typ = AnyType("*") diff --git a/custom_nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py new file mode 100644 index 0000000000000000000000000000000000000000..fe54532a444c4b786d3ad76d9fd21303dcf2864c --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py @@ -0,0 +1,402 @@ +import re +import random +import os +import nodes +import folder_paths +import yaml + +wildcard_dict = {} + + +def get_wildcard_list(): + return [f"__{x}__" for x in wildcard_dict.keys()] + + +def wildcard_normalize(x): + return x.replace("\\", "/").lower() + + +def read_wildcard(k, v): + if isinstance(v, list): + k = wildcard_normalize(k) + wildcard_dict[k] = v + elif isinstance(v, dict): + for k2, v2 in v.items(): + new_key = f"{k}/{k2}" + new_key = wildcard_normalize(new_key) + read_wildcard(new_key, v2) + + +def read_wildcard_dict(wildcard_path): + global wildcard_dict + for root, directories, files in os.walk(wildcard_path, followlinks=True): + for file in files: + if file.endswith('.txt'): + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, wildcard_path) + key = os.path.splitext(rel_path)[0].replace('\\', '/').lower() + + try: + with open(file_path, 'r', encoding="ISO-8859-1") as f: + lines = f.read().splitlines() + wildcard_dict[key] = lines + except UnicodeDecodeError: + with open(file_path, 'r', encoding="UTF-8", errors="ignore") as f: + lines = f.read().splitlines() + wildcard_dict[key] = lines + elif file.endswith('.yaml'): + file_path = os.path.join(root, file) + with open(file_path, 'r') as f: + yaml_data = yaml.load(f, Loader=yaml.FullLoader) + + for k, v in yaml_data.items(): + read_wildcard(k, v) + + return wildcard_dict + + +def process(text, seed=None): + if seed is not None: + random.seed(seed) + + def replace_options(string): + replacements_found = False + + def replace_option(match): + nonlocal replacements_found + options = match.group(1).split('|') + + multi_select_pattern = options[0].split('$$') + select_range = None + select_sep = ' ' + range_pattern = r'(\d+)(-(\d+))?' + range_pattern2 = r'-(\d+)' + + if len(multi_select_pattern) > 1: + r = re.match(range_pattern, options[0]) + + if r is None: + r = re.match(range_pattern2, options[0]) + a = '1' + b = r.group(1).strip() + else: + a = r.group(1).strip() + try: + b = r.group(3).strip() + except: + b = None + + if r is not None: + if b is not None and is_numeric_string(a) and is_numeric_string(b): + # PATTERN: num1-num2 + select_range = int(a), int(b) + elif is_numeric_string(a): + # PATTERN: num + x = int(a) + select_range = (x, x) + + if select_range is not None and len(multi_select_pattern) == 2: + # PATTERN: count$$ + options[0] = multi_select_pattern[1] + elif select_range is not None and len(multi_select_pattern) == 3: + # PATTERN: count$$ sep $$ + select_sep = multi_select_pattern[1] + options[0] = multi_select_pattern[2] + + adjusted_probabilities = [] + + total_prob = 0 + + for option in options: + parts = option.split('::', 1) + if len(parts) == 2 and is_numeric_string(parts[0].strip()): + config_value = float(parts[0].strip()) + else: + config_value = 1 # Default value if no configuration is provided + + adjusted_probabilities.append(config_value) + total_prob += config_value + + normalized_probabilities = [prob / total_prob for prob in adjusted_probabilities] + + if select_range is None: + select_count = 1 + else: + select_count = random.randint(select_range[0], select_range[1]) + + if select_count > len(options): + selected_items = options + else: + selected_items = random.choices(options, weights=normalized_probabilities, k=select_count) + selected_items = set(selected_items) + + try_count = 0 + while len(selected_items) < select_count and try_count < 10: + remaining_count = select_count - len(selected_items) + additional_items = random.choices(options, weights=normalized_probabilities, k=remaining_count) + selected_items |= set(additional_items) + try_count += 1 + + selected_items2 = [re.sub(r'^\s*[0-9.]+::', '', x, 1) for x in selected_items] + replacement = select_sep.join(selected_items2) + if '::' in replacement: + pass + + replacements_found = True + return replacement + + pattern = r'{([^{}]*?)}' + replaced_string = re.sub(pattern, replace_option, string) + + return replaced_string, replacements_found + + def replace_wildcard(string): + global wildcard_dict + pattern = r"__([\w.\-+/*\\]+)__" + matches = re.findall(pattern, string) + + replacements_found = False + + for match in matches: + keyword = match.lower() + keyword = wildcard_normalize(keyword) + if keyword in wildcard_dict: + replacement = random.choice(wildcard_dict[keyword]) + replacements_found = True + string = string.replace(f"__{match}__", replacement, 1) + elif '*' in keyword: + subpattern = keyword.replace('*', '.*').replace('+','\+') + total_patterns = [] + found = False + for k, v in wildcard_dict.items(): + if re.match(subpattern, k) is not None: + total_patterns += v + found = True + + if found: + replacement = random.choice(total_patterns) + replacements_found = True + string = string.replace(f"__{match}__", replacement, 1) + elif '/' not in keyword: + string_fallback = string.replace(f"__{match}__", f"__*/{match}__", 1) + string, replacements_found = replace_wildcard(string_fallback) + + return string, replacements_found + + replace_depth = 100 + stop_unwrap = False + while not stop_unwrap and replace_depth > 1: + replace_depth -= 1 # prevent infinite loop + + # pass1: replace options + pass1, is_replaced1 = replace_options(text) + + while is_replaced1: + pass1, is_replaced1 = replace_options(pass1) + + # pass2: replace wildcards + text, is_replaced2 = replace_wildcard(pass1) + stop_unwrap = not is_replaced1 and not is_replaced2 + + return text + + +def is_numeric_string(input_str): + return re.match(r'^-?\d+(\.\d+)?$', input_str) is not None + + +def safe_float(x): + if is_numeric_string(x): + return float(x) + else: + return 1.0 + + +def extract_lora_values(string): + pattern = r']+)>' + matches = re.findall(pattern, string) + + def touch_lbw(text): + return re.sub(r'LBW=[A-Za-z][A-Za-z0-9_-]*:', r'LBW=', text) + + items = [touch_lbw(match.strip(':')) for match in matches] + + added = set() + result = [] + for item in items: + item = item.split(':') + + lora = None + a = None + b = None + lbw = None + lbw_a = None + lbw_b = None + + if len(item) > 0: + lora = item[0] + + for sub_item in item[1:]: + if is_numeric_string(sub_item): + if a is None: + a = float(sub_item) + elif b is None: + b = float(sub_item) + elif sub_item.startswith("LBW="): + for lbw_item in sub_item[4:].split(';'): + if lbw_item.startswith("A="): + lbw_a = safe_float(lbw_item[2:].strip()) + elif lbw_item.startswith("B="): + lbw_b = safe_float(lbw_item[2:].strip()) + elif lbw_item.strip() != '': + lbw = lbw_item + + if a is None: + a = 1.0 + if b is None: + b = 1.0 + + if lora is not None and lora not in added: + result.append((lora, a, b, lbw, lbw_a, lbw_b)) + added.add(lora) + + return result + + +def remove_lora_tags(string): + pattern = r']+>' + result = re.sub(pattern, '', string) + + return result + + +def resolve_lora_name(lora_name_cache, name): + if os.path.exists(name): + return name + else: + if len(lora_name_cache) == 0: + lora_name_cache.extend(folder_paths.get_filename_list("loras")) + + for x in lora_name_cache: + if x.endswith(name): + return x + + +def process_with_loras(wildcard_opt, model, clip, clip_encoder=None): + lora_name_cache = [] + + pass1 = process(wildcard_opt) + loras = extract_lora_values(pass1) + pass2 = remove_lora_tags(pass1) + + for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b in loras: + if (lora_name.split('.')[-1]) not in folder_paths.supported_pt_extensions: + lora_name = lora_name+".safetensors" + + lora_name = resolve_lora_name(lora_name_cache, lora_name) + + path = folder_paths.get_full_path("loras", lora_name) + + if path is not None: + print(f"LOAD LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}") + + def default_lora(): + return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight) + + if lbw is not None: + if 'LoraLoaderBlockWeight //Inspire' not in nodes.NODE_CLASS_MAPPINGS: + print(f"'LBW(Lora Block Weight)' is given, but the 'Inspire Pack' is not installed. The LBW= attribute is being ignored.") + model, clip = default_lora() + else: + cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] + model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, "", lbw) + else: + model, clip = default_lora() + else: + print(f"LORA NOT FOUND: {lora_name}") + + print(f"CLIP: {pass2}") + + if clip_encoder is None: + return model, clip, nodes.CLIPTextEncode().encode(clip, pass2)[0] + else: + return model, clip, clip_encoder.encode(clip, pass2)[0] + + +def starts_with_regex(pattern, text): + regex = re.compile(pattern) + return bool(regex.match(text)) + + +def split_to_dict(text): + pattern = r'\[([A-Za-z0-9_. ]+)\]([^\[]+)(?=\[|$)' + matches = re.findall(pattern, text) + + result_dict = {key: value.strip() for key, value in matches} + + return result_dict + + +class WildcardChooser: + def __init__(self, items, randomize_when_exhaust): + self.i = 0 + self.items = items + self.randomize_when_exhaust = randomize_when_exhaust + + def get(self, seg): + if self.i >= len(self.items): + self.i = 0 + if self.randomize_when_exhaust: + random.shuffle(self.items) + + item = self.items[self.i] + self.i += 1 + + return item + + +class WildcardChooserDict: + def __init__(self, items): + self.items = items + + def get(self, seg): + text = "" + if 'ALL' in self.items: + text = self.items['ALL'] + + if seg.label in self.items: + text += self.items[seg.label] + + return text + + +def process_wildcard_for_segs(wildcard): + if wildcard.startswith('[LAB]'): + raw_items = split_to_dict(wildcard) + + items = {} + for k, v in raw_items.items(): + v = v.strip() + if v != '': + items[k] = v + + return 'LAB', WildcardChooserDict(items) + + elif starts_with_regex(r"\[(ASC|DSC|RND)\]", wildcard): + mode = wildcard[1:4] + raw_items = wildcard[5:].split('[SEP]') + + items = [] + for x in raw_items: + x = x.strip() + if x != '': + items.append(x) + + if mode == 'RND': + random.shuffle(items) + return mode, WildcardChooser(items, True) + else: + return mode, WildcardChooser(items, False) + + else: + return None, WildcardChooser([wildcard], False) diff --git a/custom_nodes/ComfyUI-Impact-Pack/node_list.json b/custom_nodes/ComfyUI-Impact-Pack/node_list.json new file mode 100644 index 0000000000000000000000000000000000000000..78a0b903a8006b6b536fc31f91e8990442497965 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/node_list.json @@ -0,0 +1,4 @@ +{ + "Segs Mask": "This node is renamed to 'ImpactSegsAndMask'", + "Segs Mask ForEach": "This node is renamed to 'ImpactSegsAndMaskForEach'" +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/notebook/comfyui_colab_impact_pack.ipynb b/custom_nodes/ComfyUI-Impact-Pack/notebook/comfyui_colab_impact_pack.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6435059cb8fbe9e5e27451fa959965309b7626bf --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/notebook/comfyui_colab_impact_pack.ipynb @@ -0,0 +1,172 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "aaaaaaaaaa" + }, + "source": [ + "Git clone the repo and install the requirements. (ignore the pip errors about protobuf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bbbbbbbbbb" + }, + "outputs": [], + "source": [ + "#@title Environment Setup\n", + "\n", + "from pathlib import Path\n", + "\n", + "OPTIONS = {}\n", + "\n", + "WORKSPACE = 'ComfyUI'\n", + "USE_GOOGLE_DRIVE = True #@param {type:\"boolean\"}\n", + "UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n", + "\n", + "OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n", + "OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n", + "\n", + "if OPTIONS['USE_GOOGLE_DRIVE']:\n", + " !echo \"Mounting Google Drive...\"\n", + " %cd /\n", + " \n", + " from google.colab import drive\n", + " drive.mount('/content/drive')\n", + "\n", + " WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n", + " \n", + " %cd /content/drive/MyDrive\n", + "\n", + "![ ! -d $WORKSPACE ] && echo \"-= Initial setup ComfyUI (Original)=-\" && git clone https://github.com/comfyanonymous/ComfyUI\n", + "%cd $WORKSPACE\n", + "\n", + "if OPTIONS['UPDATE_COMFY_UI']:\n", + " !echo \"-= Updating ComfyUI =-\"\n", + " !git pull\n", + " !rm \"/content/drive/MyDrive/ComfyUI/custom_nodes/comfyui-impact-pack.py\"\n", + "\n", + "%cd custom_nodes\n", + "!git clone https://github.com/ltdrdata/ComfyUI-Impact-Pack\n", + "%cd $WORKSPACE\n", + "\n", + "!echo -= Install dependencies =-\n", + "!pip -q install xformers -r requirements.txt\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "kkkkkkkkkkkkkk" + }, + "source": [ + "### Run ComfyUI with localtunnel (Recommended Way)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "jjjjjjjjjjjjj", + "outputId": "83be9411-d939-4813-e6c1-80e75bf8e80d" + }, + "outputs": [], + "source": [ + "!npm install -g localtunnel\n", + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\")\n", + " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", + " for line in p.stdout:\n", + " print(line.decode(), end='')\n", + "\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "gggggggggg" + }, + "source": [ + "### Run ComfyUI with colab iframe (use only in case the previous way with localtunnel doesn't work)\n", + "\n", + "You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n", + "\n", + "If you want to open it in another window use the link.\n", + "\n", + "Note that some UI features like live image previews won't work because the colab iframe blocks websockets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hhhhhhhhhh" + }, + "outputs": [], + "source": [ + "import threading\n", + "import time\n", + "import socket\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " from google.colab import output\n", + " output.serve_kernel_port_as_iframe(port, height=1024)\n", + " print(\"to open it in a window you can open this link here:\")\n", + " output.serve_kernel_port_as_window(port)\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/custom_nodes/ComfyUI-Impact-Pack/requirements.txt b/custom_nodes/ComfyUI-Impact-Pack/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4244bc45dcea95a57ac30c1996d22eeb7c7f945 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/requirements.txt @@ -0,0 +1,5 @@ +segment-anything +scikit-image +piexif +transformers +opencv-python-headless diff --git a/custom_nodes/ComfyUI-Impact-Pack/test/advanced-sampler.json b/custom_nodes/ComfyUI-Impact-Pack/test/advanced-sampler.json new file mode 100644 index 0000000000000000000000000000000000000000..f4bb5149d0277653058d055f55eb5eebd9080db1 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/test/advanced-sampler.json @@ -0,0 +1,976 @@ +{ + "last_node_id": 27, + "last_link_id": 46, + "nodes": [ + { + "id": 11, + "type": "EditBasicPipe", + "pos": [ + 1260, + 590 + ], + "size": { + "0": 267, + "1": 126 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 15 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 17 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 20 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 420, + 670 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 16 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 17 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, masterpiece, 1girl is sitting in the cafe terrace, (colorful hair:1.1)" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 415, + 186 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 13 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, masterpiece, 1girl is sitting in the cafe terrace" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 413, + 389 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 14 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, low quality:1.4, worst quality:1.4" + ] + }, + { + "id": 10, + "type": "ToBasicPipe", + "pos": [ + 952, + 189 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 10 + }, + { + "name": "clip", + "type": "CLIP", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 13 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 14 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 15, + 19, + 33 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + } + }, + { + "id": 22, + "type": "FromBasicPipe", + "pos": [ + 880, + 1040 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 33 + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "clip", + "type": "CLIP", + "links": null, + "shape": 3 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 40 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 35 + ], + "shape": 3, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 36 + ], + "shape": 3, + "slot_index": 4 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe" + } + }, + { + "id": 24, + "type": "VAEDecode", + "pos": [ + 1938, + 935 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 46 + }, + { + "name": "vae", + "type": "VAE", + "link": 40 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 41 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -5, + 212 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 10 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5, + 11, + 16 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 12, + 31 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "V07_v07.safetensors" + ] + }, + { + "id": 25, + "type": "PreviewImage", + "pos": [ + 2175, + 1079 + ], + "size": { + "0": 516, + "1": 424 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 41 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 13, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1727, + 192 + ], + "size": { + "0": 355.20001220703125, + "1": 154 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 19 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 42 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "fixed", + "normal" + ] + }, + { + "id": 16, + "type": "EmptyLatentImage", + "pos": [ + 532, + 1143 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 28, + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 792, + 512, + 1 + ] + }, + { + "id": 19, + "type": "KSampler", + "pos": [ + 1194.657802060547, + 1075.971700888672 + ], + "size": [ + 315, + 473.9999771118164 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 34 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 35 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 36 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 28 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 30 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1107040072933062, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 27, + "type": "TwoAdvancedSamplersForMask", + "pos": [ + 2187, + 266 + ], + "size": [ + 315, + 426.00000762939453 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 45 + }, + { + "name": "base_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 42 + }, + { + "name": "mask_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 43 + }, + { + "name": "mask", + "type": "MASK", + "link": 44 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 46 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "TwoAdvancedSamplersForMask" + }, + "widgets_values": [ + 1107040072933062, + "fixed", + 20, + 1, + 10 + ] + }, + { + "id": 23, + "type": "PreviewBridge", + "pos": [ + 1778, + 1098 + ], + "size": { + "0": 315, + "1": 290 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 37 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 44 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "PreviewBridge" + }, + "widgets_values": [ + { + "filename": "clipspace-mask-348148.69999999925.png", + "subfolder": "clipspace", + "type": "input", + "image_hash": 492469318636598500, + "forward_filename": "ComfyUI_00001_.png", + "forward_subfolder": "", + "forward_type": "temp" + } + ] + }, + { + "id": 15, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1719, + 592 + ], + "size": { + "0": 355.20001220703125, + "1": 154 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 20 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 43 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "fixed", + "normal" + ] + }, + { + "id": 20, + "type": "VAEDecode", + "pos": [ + 1546, + 972 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 30 + }, + { + "name": "vae", + "type": "VAE", + "link": 31 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + } + ], + "links": [ + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 10, + 4, + 0, + 10, + 0, + "MODEL" + ], + [ + 11, + 4, + 1, + 10, + 1, + "CLIP" + ], + [ + 12, + 4, + 2, + 10, + 2, + "VAE" + ], + [ + 13, + 6, + 0, + 10, + 3, + "CONDITIONING" + ], + [ + 14, + 7, + 0, + 10, + 4, + "CONDITIONING" + ], + [ + 15, + 10, + 0, + 11, + 0, + "BASIC_PIPE" + ], + [ + 16, + 4, + 1, + 12, + 0, + "CLIP" + ], + [ + 17, + 12, + 0, + 11, + 4, + "CONDITIONING" + ], + [ + 19, + 10, + 0, + 13, + 0, + "BASIC_PIPE" + ], + [ + 20, + 11, + 0, + 15, + 0, + "BASIC_PIPE" + ], + [ + 28, + 16, + 0, + 19, + 3, + "LATENT" + ], + [ + 30, + 19, + 0, + 20, + 0, + "LATENT" + ], + [ + 31, + 4, + 2, + 20, + 1, + "VAE" + ], + [ + 33, + 10, + 0, + 22, + 0, + "BASIC_PIPE" + ], + [ + 34, + 22, + 0, + 19, + 0, + "MODEL" + ], + [ + 35, + 22, + 3, + 19, + 1, + "CONDITIONING" + ], + [ + 36, + 22, + 4, + 19, + 2, + "CONDITIONING" + ], + [ + 37, + 20, + 0, + 23, + 0, + "IMAGE" + ], + [ + 40, + 22, + 2, + 24, + 1, + "VAE" + ], + [ + 41, + 24, + 0, + 25, + 0, + "IMAGE" + ], + [ + 42, + 13, + 0, + 27, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 43, + 15, + 0, + 27, + 2, + "KSAMPLER_ADVANCED" + ], + [ + 44, + 23, + 1, + 27, + 3, + "MASK" + ], + [ + 45, + 16, + 0, + 27, + 0, + "LATENT" + ], + [ + 46, + 27, + 0, + 24, + 0, + "LATENT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/test/detailer-pipe-test-sdxl.json b/custom_nodes/ComfyUI-Impact-Pack/test/detailer-pipe-test-sdxl.json new file mode 100644 index 0000000000000000000000000000000000000000..cf510fdf3a242b233f923185c8fb22109c68268f --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/test/detailer-pipe-test-sdxl.json @@ -0,0 +1,1989 @@ +{ + "last_node_id": 52, + "last_link_id": 150, + "nodes": [ + { + "id": 12, + "type": "CLIPTextEncodeSDXLRefiner", + "pos": [ + 480, + 990 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 11 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 13 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXLRefiner" + }, + "widgets_values": [ + 6, + 1024, + 1024, + "ugly, male, western" + ] + }, + { + "id": 14, + "type": "UltralyticsDetectorProvider", + "pos": [ + 963, + 955 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "links": [ + 16 + ], + "shape": 3 + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ] + }, + { + "id": 18, + "type": "PreviewImage", + "pos": [ + 3270, + 810 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 20 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 15, + "type": "SAMLoader", + "pos": [ + 967, + 1086 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 17 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "CPU" + ] + }, + { + "id": 9, + "type": "CLIPTextEncodeSDXL", + "pos": [ + 640, + -550 + ], + "size": { + "0": 400, + "1": 270 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXL" + }, + "widgets_values": [ + 1024, + 1024, + 0, + 0, + 1024, + 1024, + "a closeup photograph of cute girl", + "closeup" + ] + }, + { + "id": 7, + "type": "CheckpointLoaderSimple", + "pos": [ + 60, + -580 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 2, + 6, + 7 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 3 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SDXL/rundiffusionXL_beta.safetensors" + ] + }, + { + "id": 13, + "type": "LoadImage", + "pos": [ + 257, + 164 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 15, + 64, + 112 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "chunli.png", + "image" + ] + }, + { + "id": 10, + "type": "CLIPTextEncodeSDXL", + "pos": [ + 640, + -230 + ], + "size": { + "0": 400, + "1": 270 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 7 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXL" + }, + "widgets_values": [ + 1024, + 1024, + 0, + 0, + 1024, + 1024, + "ugly, male", + "ugly, male" + ] + }, + { + "id": 17, + "type": "PreviewImage", + "pos": [ + 3270, + 450 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 19 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 8, + "type": "CheckpointLoaderSimple", + "pos": [ + 120, + 590 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 69 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 10, + 11 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SDXL/sd_xl_refiner_1.0_0.9vae.safetensors" + ] + }, + { + "id": 11, + "type": "CLIPTextEncodeSDXLRefiner", + "pos": [ + 483, + 738 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 10 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 70 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXLRefiner" + }, + "widgets_values": [ + 6, + 1024, + 1024, + "high quality" + ] + }, + { + "id": 37, + "type": "PreviewImage", + "pos": [ + 2810, + -280 + ], + "size": { + "0": 344.04876708984375, + "1": 580.6563720703125 + }, + "flags": {}, + "order": 7, + "mode": 2, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 64 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 3200, + -280 + ], + "size": { + "0": 336.36944580078125, + "1": 585.6206665039062 + }, + "flags": {}, + "order": 18, + "mode": 2, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 18 + } + ], + "title": "SDXL Base only", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 6, + "type": "ToDetailerPipeSDXL", + "pos": [ + 1199, + 379 + ], + "size": { + "0": 400, + "1": 340 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1 + }, + { + "name": "clip", + "type": "CLIP", + "link": 2 + }, + { + "name": "vae", + "type": "VAE", + "link": 3 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "refiner_model", + "type": "MODEL", + "link": 69 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "link": 5, + "slot_index": 6 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "link": 70 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "link": 13, + "slot_index": 8 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 16, + "slot_index": 9 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 17, + "slot_index": 10 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": null + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 114 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToDetailerPipeSDXL" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 38, + "type": "PreviewImage", + "pos": [ + 3590, + -280 + ], + "size": { + "0": 336.36944580078125, + "1": 585.6206665039062 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 67 + } + ], + "title": "SDXL Base + Refiner", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 41, + "type": "BasicPipeToDetailerPipeSDXL", + "pos": [ + 2160, + 1010 + ], + "size": { + "0": 405.5999755859375, + "1": 200 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "base_basic_pipe", + "type": "BASIC_PIPE", + "link": 87 + }, + { + "name": "refiner_basic_pipe", + "type": "BASIC_PIPE", + "link": 88 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 133 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 134, + "slot_index": 3 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 135, + "slot_index": 4 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": 136, + "slot_index": 5 + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 86, + 110 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipeSDXL" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 44, + "type": "FaceDetailerPipe", + "pos": [ + 3565, + 427 + ], + "size": { + "0": 456, + "1": 902 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 104, + "slot_index": 0 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 103 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [], + "shape": 6, + "slot_index": 1 + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 105 + ], + "shape": 6, + "slot_index": 2 + }, + { + "name": "mask", + "type": "MASK", + "links": null, + "shape": 3 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": null, + "shape": 3 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 1024, + false, + 768, + 104033248204033, + "fixed", + 30, + 8, + "euler", + "normal", + 0.5, + 5, + true, + true, + 0.6, + 30, + 3, + "center-1", + 30, + 0.93, + 0, + 0.7, + "False", + 10, + 0.1 + ] + }, + { + "id": 45, + "type": "PreviewImage", + "pos": [ + 4109.76494140625, + 483.81650390625 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 105 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 1, + "type": "FaceDetailerPipe", + "pos": [ + 2720, + 430 + ], + "size": { + "0": 456, + "1": 902 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 15, + "slot_index": 0 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 86 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 18, + 67, + 104, + 106 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 19 + ], + "shape": 6, + "slot_index": 1 + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 20 + ], + "shape": 6, + "slot_index": 2 + }, + { + "name": "mask", + "type": "MASK", + "links": null, + "shape": 3 + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 103 + ], + "shape": 3, + "slot_index": 4 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 1024, + false, + 768, + 104033248204033, + "fixed", + 30, + 8, + "euler", + "normal", + 0.5, + 5, + true, + true, + 0.6, + 30, + 3, + "center-1", + 30, + 0.93, + 0, + 0.7, + "False", + 10, + 0.1 + ] + }, + { + "id": 43, + "type": "ToBasicPipe", + "pos": [ + 1790, + 1130 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 142 + }, + { + "name": "clip", + "type": "CLIP", + "link": 143 + }, + { + "name": "vae", + "type": "VAE", + "link": 145, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 150 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 88, + 108 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + } + }, + { + "id": 49, + "type": "ImpactSimpleDetectorSEGSPipe", + "pos": [ + 2236.375298828125, + 1520.8711416015626 + ], + "size": { + "0": 315, + "1": 246 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 110, + "slot_index": 0 + }, + { + "name": "image", + "type": "IMAGE", + "link": 112, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 111 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactSimpleDetectorSEGSPipe" + }, + "widgets_values": [ + 0.5, + 0, + 3, + 10, + 0.5, + 0, + 0, + 0.7 + ] + }, + { + "id": 47, + "type": "DetailerForEachPipe", + "pos": [ + 2725, + 1448 + ], + "size": { + "0": 456.5638732910156, + "1": 559.1150512695312 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 106 + }, + { + "name": "segs", + "type": "SEGS", + "link": 111, + "slot_index": 1 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 107, + "slot_index": 2 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null + }, + { + "name": "refiner_basic_pipe_opt", + "type": "BASIC_PIPE", + "link": 108 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 113 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "segs", + "type": "SEGS", + "links": null, + "shape": 3 + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": null, + "shape": 3 + }, + { + "name": "cnet_images", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "DetailerForEachPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 450265819682234, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + true, + "", + 0.2 + ] + }, + { + "id": 50, + "type": "PreviewImage", + "pos": [ + 3448.7228955078117, + 1463.962194335937 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 113 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 40, + "type": "ToDetailerPipeSDXL", + "pos": [ + 2226, + 539 + ], + "size": { + "0": 400, + "1": 340 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 125 + }, + { + "name": "clip", + "type": "CLIP", + "link": 116, + "slot_index": 1 + }, + { + "name": "vae", + "type": "VAE", + "link": 117 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 120, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 121 + }, + { + "name": "refiner_model", + "type": "MODEL", + "link": 124, + "slot_index": 5 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "link": 126 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "link": 127, + "slot_index": 7 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "link": 128 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 129 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 130, + "slot_index": 10 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 131 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": 132, + "slot_index": 12 + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToDetailerPipeSDXL" + }, + "widgets_values": [ + "", + "SDXL/person/IU_leejieun_SDXL.safetensors" + ] + }, + { + "id": 42, + "type": "ToBasicPipe", + "pos": [ + 1899, + 906 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 137 + }, + { + "name": "clip", + "type": "CLIP", + "link": 138, + "slot_index": 1 + }, + { + "name": "vae", + "type": "VAE", + "link": 139, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 147, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 148, + "slot_index": 4 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 87, + 107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ToBasicPipe" + } + }, + { + "id": 51, + "type": "FromDetailerPipeSDXL", + "pos": [ + 1650, + 520 + ], + "size": { + "0": 393, + "1": 286 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 114 + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": null, + "shape": 3 + }, + { + "name": "model", + "type": "MODEL", + "links": [ + 125, + 137 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 116, + 138, + 143 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 117, + 139, + 145 + ], + "shape": 3, + "slot_index": 3 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 120, + 147 + ], + "shape": 3, + "slot_index": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 121, + 148 + ], + "shape": 3, + "slot_index": 5 + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 129, + 133 + ], + "shape": 3, + "slot_index": 6 + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 130, + 134 + ], + "shape": 3, + "slot_index": 7 + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 131, + 135 + ], + "shape": 3, + "slot_index": 8 + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": [ + 132, + 136 + ], + "shape": 3, + "slot_index": 9 + }, + { + "name": "refiner_model", + "type": "MODEL", + "links": [ + 124, + 142 + ], + "shape": 3, + "slot_index": 10 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "links": [ + 126 + ], + "shape": 3, + "slot_index": 11 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "links": [ + 127, + 149 + ], + "shape": 3, + "slot_index": 12 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "links": [ + 128, + 150 + ], + "shape": 3, + "slot_index": 13 + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipeSDXL" + } + } + ], + "links": [ + [ + 1, + 7, + 0, + 6, + 0, + "MODEL" + ], + [ + 2, + 7, + 1, + 6, + 1, + "CLIP" + ], + [ + 3, + 7, + 2, + 6, + 2, + "VAE" + ], + [ + 5, + 8, + 1, + 6, + 6, + "CLIP" + ], + [ + 6, + 7, + 1, + 9, + 0, + "CLIP" + ], + [ + 7, + 7, + 1, + 10, + 0, + "CLIP" + ], + [ + 8, + 10, + 0, + 6, + 4, + "CONDITIONING" + ], + [ + 9, + 9, + 0, + 6, + 3, + "CONDITIONING" + ], + [ + 10, + 8, + 1, + 11, + 0, + "CLIP" + ], + [ + 11, + 8, + 1, + 12, + 0, + "CLIP" + ], + [ + 13, + 12, + 0, + 6, + 8, + "CONDITIONING" + ], + [ + 15, + 13, + 0, + 1, + 0, + "IMAGE" + ], + [ + 16, + 14, + 0, + 6, + 9, + "BBOX_DETECTOR" + ], + [ + 17, + 15, + 0, + 6, + 10, + "SAM_MODEL" + ], + [ + 18, + 1, + 0, + 16, + 0, + "IMAGE" + ], + [ + 19, + 1, + 1, + 17, + 0, + "IMAGE" + ], + [ + 20, + 1, + 2, + 18, + 0, + "IMAGE" + ], + [ + 64, + 13, + 0, + 37, + 0, + "IMAGE" + ], + [ + 67, + 1, + 0, + 38, + 0, + "IMAGE" + ], + [ + 69, + 8, + 0, + 6, + 5, + "MODEL" + ], + [ + 70, + 11, + 0, + 6, + 7, + "CONDITIONING" + ], + [ + 86, + 41, + 0, + 1, + 1, + "DETAILER_PIPE" + ], + [ + 87, + 42, + 0, + 41, + 0, + "BASIC_PIPE" + ], + [ + 88, + 43, + 0, + 41, + 1, + "BASIC_PIPE" + ], + [ + 103, + 1, + 4, + 44, + 1, + "DETAILER_PIPE" + ], + [ + 104, + 1, + 0, + 44, + 0, + "IMAGE" + ], + [ + 105, + 44, + 2, + 45, + 0, + "IMAGE" + ], + [ + 106, + 1, + 0, + 47, + 0, + "IMAGE" + ], + [ + 107, + 42, + 0, + 47, + 2, + "BASIC_PIPE" + ], + [ + 108, + 43, + 0, + 47, + 4, + "BASIC_PIPE" + ], + [ + 110, + 41, + 0, + 49, + 0, + "DETAILER_PIPE" + ], + [ + 111, + 49, + 0, + 47, + 1, + "SEGS" + ], + [ + 112, + 13, + 0, + 49, + 1, + "IMAGE" + ], + [ + 113, + 47, + 0, + 50, + 0, + "IMAGE" + ], + [ + 114, + 6, + 0, + 51, + 0, + "DETAILER_PIPE" + ], + [ + 116, + 51, + 2, + 40, + 1, + "CLIP" + ], + [ + 117, + 51, + 3, + 40, + 2, + "VAE" + ], + [ + 120, + 51, + 4, + 40, + 3, + "CONDITIONING" + ], + [ + 121, + 51, + 5, + 40, + 4, + "CONDITIONING" + ], + [ + 124, + 51, + 10, + 40, + 5, + "MODEL" + ], + [ + 125, + 51, + 1, + 40, + 0, + "MODEL" + ], + [ + 126, + 51, + 11, + 40, + 6, + "CLIP" + ], + [ + 127, + 51, + 12, + 40, + 7, + "CONDITIONING" + ], + [ + 128, + 51, + 13, + 40, + 8, + "CONDITIONING" + ], + [ + 129, + 51, + 6, + 40, + 9, + "BBOX_DETECTOR" + ], + [ + 130, + 51, + 7, + 40, + 10, + "SAM_MODEL" + ], + [ + 131, + 51, + 8, + 40, + 11, + "SEGM_DETECTOR" + ], + [ + 132, + 51, + 9, + 40, + 12, + "DETAILER_HOOK" + ], + [ + 133, + 51, + 6, + 41, + 2, + "BBOX_DETECTOR" + ], + [ + 134, + 51, + 7, + 41, + 3, + "SAM_MODEL" + ], + [ + 135, + 51, + 8, + 41, + 4, + "SEGM_DETECTOR" + ], + [ + 136, + 51, + 9, + 41, + 5, + "DETAILER_HOOK" + ], + [ + 137, + 51, + 1, + 42, + 0, + "MODEL" + ], + [ + 138, + 51, + 2, + 42, + 1, + "CLIP" + ], + [ + 139, + 51, + 3, + 42, + 2, + "VAE" + ], + [ + 142, + 51, + 10, + 43, + 0, + "MODEL" + ], + [ + 143, + 51, + 2, + 43, + 1, + "CLIP" + ], + [ + 145, + 51, + 3, + 43, + 2, + "VAE" + ], + [ + 147, + 51, + 4, + 42, + 3, + "CONDITIONING" + ], + [ + 148, + 51, + 5, + 42, + 4, + "CONDITIONING" + ], + [ + 149, + 51, + 12, + 43, + 3, + "CONDITIONING" + ], + [ + 150, + 51, + 13, + 43, + 4, + "CONDITIONING" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/test/detailer-pipe-test.json b/custom_nodes/ComfyUI-Impact-Pack/test/detailer-pipe-test.json new file mode 100644 index 0000000000000000000000000000000000000000..56ed4239197a59dfb2fc1aa72facff138279aead --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/test/detailer-pipe-test.json @@ -0,0 +1,3489 @@ +{ + "last_node_id": 87, + "last_link_id": 214, + "nodes": [ + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 413, + 389 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5, + "label": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0, + "label": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, worst quality:1.4, low quality:1.4" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 415, + 186 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3, + "label": "clip" + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0, + "label": "CONDITIONING" + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, 2 girls on table " + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 473, + 609 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0, + "label": "LATENT" + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 768, + 1 + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1209, + 188 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7, + "label": "samples" + }, + { + "name": "vae", + "type": "VAE", + "link": 8, + "label": "vae" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 10 + ], + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 30, + "type": "PreviewImage", + "pos": [ + 2532, + -7 + ], + "size": { + "0": 575.2411499023438, + "1": 561.0116577148438 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 179, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 24, + "type": "SAMLoader", + "pos": [ + 861, + 1300 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 19, + 33 + ], + "shape": 3, + "slot_index": 0, + "label": "SAM_MODEL" + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ] + }, + { + "id": 32, + "type": "BasicPipeToDetailerPipe", + "pos": [ + 1396, + 1143 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 34, + "label": "basic_pipe" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 202, + "slot_index": 1, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 33, + "slot_index": 2, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 213, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 36 + ], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipe" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, detailed eyes, \n__face_loras__ [faint smile|surprise|laugh]", + "Select the LoRA to add to the text" + ] + }, + { + "id": 36, + "type": "MaskToImage", + "pos": [ + 2650, + 1230 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 182, + "label": "mask" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 59 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 52, + "type": "BboxDetectorSEGS", + "pos": [ + 4948, + 677 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 85, + "label": "bbox_detector" + }, + { + "name": "image", + "type": "IMAGE", + "link": 188, + "label": "image" + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 87, + 160 + ], + "shape": 3, + "slot_index": 0, + "label": "SEGS" + } + ], + "properties": { + "Node name for S&R": "BboxDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 10, + 3, + 10 + ] + }, + { + "id": 46, + "type": "DetailerPipeToBasicPipe", + "pos": [ + 4753, + 1188 + ], + "size": { + "0": 304.79998779296875, + "1": 26 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 77, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 155, + 196 + ], + "shape": 3, + "slot_index": 0, + "label": "basic_pipe" + } + ], + "properties": { + "Node name for S&R": "DetailerPipeToBasicPipe" + } + }, + { + "id": 60, + "type": "PreviewImage", + "pos": [ + 6270, + 2420 + ], + "size": { + "0": 600, + "1": 670 + }, + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 166, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 57, + "type": "PreviewImage", + "pos": [ + 5997, + 1424 + ], + "size": { + "0": 840, + "1": 640 + }, + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 144, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 54, + "type": "PreviewImage", + "pos": [ + 6486, + 705 + ], + "size": { + "0": 740, + "1": 580 + }, + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 197, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 64, + "type": "PreviewImage", + "pos": [ + 6800, + -300 + ], + "size": { + "0": 570, + "1": 590 + }, + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 156, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 42, + "type": "PreviewImage", + "pos": [ + 4070, + 636 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 187, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 43, + "type": "MaskToImage", + "pos": [ + 4081, + 949 + ], + "size": { + "0": 176.39999389648438, + "1": 26 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 190, + "label": "mask" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 75 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 44, + "type": "PreviewImage", + "pos": [ + 4072, + 1029 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 75, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 37, + "type": "PreviewImage", + "pos": [ + 2890, + 1250 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 59, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 22, + "type": "BasicPipeToDetailerPipe", + "pos": [ + 1396, + 866 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 17, + "label": "basic_pipe" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 201, + "slot_index": 1, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 19, + "slot_index": 2, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 212, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "BasicPipeToDetailerPipe" + }, + "widgets_values": [ + "photorealistic:1.4, best quality:1.4, detailed eyes, \n[|||] [faint smile|surprise|laugh]", + "Select the LoRA to add to the text" + ] + }, + { + "id": 75, + "type": "PreviewImage", + "pos": [ + 2600, + 1330 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 181, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 10, + "type": "PreviewBridge", + "pos": [ + 1462, + 175 + ], + "size": { + "0": 315, + "1": 290 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 10, + "label": "images" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 169, + 183 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3, + "label": "MASK" + } + ], + "properties": { + "Node name for S&R": "PreviewBridge" + }, + "widgets_values": [ + "#placeholder" + ] + }, + { + "id": 41, + "type": "PreviewImage", + "pos": [ + 4301, + 119 + ], + "size": { + "0": 492.20916748046875, + "1": 448.6293029785156 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 186, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 78, + "type": "PreviewImage", + "pos": [ + 4075, + 1364 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 189, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 863, + 183 + ], + "size": { + "0": 315, + "1": 474 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1, + "label": "model" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6, + "label": "negative" + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2, + "label": "latent_image" + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0, + "label": "LATENT" + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 885412539640489, + "fixed", + 15, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 45, + "type": "EditDetailerPipe", + "pos": [ + 4338, + 950 + ], + "size": { + "0": 284.0971374511719, + "1": 316.5133361816406 + }, + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 191, + "label": "detailer_pipe" + }, + { + "name": "model", + "type": "MODEL", + "link": null, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": null, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": null, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": null, + "label": "bbox_detector" + }, + { + "name": "sam_model", + "type": "SAM_MODEL", + "link": null, + "label": "sam_model" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": null, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 77, + 82 + ], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "EditDetailerPipe" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 65, + "type": "PreviewImage", + "pos": [ + 6430, + -300 + ], + "size": { + "0": 330, + "1": 250 + }, + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 157, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 53, + "type": "MaskToSEGS", + "pos": [ + 5558, + 989 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 88, + "label": "mask" + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 138, + 154, + 195 + ], + "shape": 3, + "slot_index": 0, + "label": "SEGS" + } + ], + "properties": { + "Node name for S&R": "MaskToSEGS" + }, + "widgets_values": [ + false, + 3, + false, + 10 + ] + }, + { + "id": 81, + "type": "DetailerForEachPipe", + "pos": [ + 6092, + 708 + ], + "size": { + "0": 329.5368957519531, + "1": 598 + }, + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 194, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 195, + "label": "segs" + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 196, + "label": "basic_pipe" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 197 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "DetailerForEachPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 44457634171318, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 72, + "type": "DetailerForEachDebugPipe", + "pos": [ + 5938, + -58 + ], + "size": { + "0": 330, + "1": 618 + }, + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 153, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 154, + "label": "segs" + }, + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 155, + "label": "basic_pipe" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 156 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped", + "type": "IMAGE", + "links": [ + 157 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 158 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_refined" + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "links": [ + 200 + ], + "shape": 6, + "slot_index": 3, + "label": "cropped_refined_alpha" + } + ], + "properties": { + "Node name for S&R": "DetailerForEachDebugPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 0, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 66, + "type": "PreviewImage", + "pos": [ + 6430, + 30 + ], + "size": { + "0": 330, + "1": 260 + }, + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 158, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 82, + "type": "PreviewImage", + "pos": [ + 6435, + 355 + ], + "size": { + "0": 319.2451171875, + "1": 285.4361572265625 + }, + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 200, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 83, + "type": "UltralyticsDetectorProvider", + "pos": [ + 860, + 1160 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "links": [ + 201, + 202 + ], + "shape": 3, + "slot_index": 0, + "label": "BBOX_DETECTOR" + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "links": null, + "shape": 3, + "slot_index": 1, + "label": "SEGM_DETECTOR" + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "bbox/face_yolov8m.pt" + ] + }, + { + "id": 69, + "type": "DetailerForEach", + "pos": [ + 5610, + 1425 + ], + "size": { + "0": 315, + "1": 678 + }, + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 137, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 138, + "label": "segs" + }, + { + "name": "model", + "type": "MODEL", + "link": 139, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 140, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 141, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 142, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 143, + "label": "negative" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 144 + ], + "shape": 3, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "DetailerForEach" + }, + "widgets_values": [ + 256, + true, + 768, + 0, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 50, + "type": "FromDetailerPipe", + "pos": [ + 4730, + 1460 + ], + "size": { + "0": 342.5999755859375, + "1": 186 + }, + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 82, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 139, + 161 + ], + "shape": 3, + "slot_index": 0, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 140, + 162 + ], + "shape": 3, + "slot_index": 1, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 141, + 163 + ], + "shape": 3, + "slot_index": 2, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 142, + 164 + ], + "shape": 3, + "slot_index": 3, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 143, + 165 + ], + "shape": 3, + "slot_index": 4, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 85 + ], + "shape": 3, + "slot_index": 5, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 83 + ], + "shape": 3, + "slot_index": 6, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 204 + ], + "shape": 3, + "slot_index": 7, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": null, + "shape": 3, + "label": "detailer_hook" + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipe" + } + }, + { + "id": 51, + "type": "SAMDetectorCombined", + "pos": [ + 5125, + 894 + ], + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "sam_model", + "type": "SAM_MODEL", + "link": 83, + "label": "sam_model" + }, + { + "name": "segs", + "type": "SEGS", + "link": 87, + "label": "segs" + }, + { + "name": "image", + "type": "IMAGE", + "link": 205, + "label": "image" + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 88 + ], + "shape": 3, + "slot_index": 0, + "label": "MASK" + } + ], + "properties": { + "Node name for S&R": "SAMDetectorCombined" + }, + "widgets_values": [ + "center-1", + 0, + 0.93, + 0, + 0.7, + "False" + ] + }, + { + "id": 85, + "type": "SEGSToImageList", + "pos": [ + 5569.134812187498, + 1289.240372597656 + ], + "size": { + "0": 304.79998779296875, + "1": 46 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 207, + "label": "segs" + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "link": 208, + "label": "fallback_image_opt" + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 209 + ], + "shape": 6, + "slot_index": 0, + "label": "IMAGE" + } + ], + "properties": { + "Node name for S&R": "SEGSToImageList" + } + }, + { + "id": 86, + "type": "PreviewImage", + "pos": [ + 6910, + 1420 + ], + "size": { + "0": 409.85064697265625, + "1": 614.9011840820312 + }, + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 209, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 39, + "type": "ToDetailerPipe", + "pos": [ + 3167, + 631 + ], + "size": { + "0": 400, + "1": 260 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 61, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 62, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 65, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 66, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 67, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 68, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 69, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 203, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 210 + ], + "shape": 3, + "slot_index": 0, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "ToDetailerPipe" + }, + "widgets_values": [ + "", + "Select the LoRA to add to the text" + ] + }, + { + "id": 76, + "type": "FaceDetailerPipe", + "pos": [ + 3648, + 641 + ], + "size": { + "0": 347.608154296875, + "1": 1060.470947265625 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 184, + "label": "image" + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 210, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 186, + 188 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 187 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped_refined" + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 189 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_enhanced_alpha" + }, + { + "name": "mask", + "type": "MASK", + "links": [ + 190 + ], + "shape": 3, + "slot_index": 3, + "label": "mask" + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 191 + ], + "shape": 3, + "slot_index": 4, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "FaceDetailerPipe" + }, + "widgets_values": [ + 256, + true, + 768, + 284739423125169, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + 0.5, + 10, + 3, + "center-1", + 0, + 0.93, + 0, + 0.7, + "False", + 10 + ] + }, + { + "id": 49, + "type": "Reroute", + "pos": [ + 4967, + 568 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 211, + "label": "" + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 137, + 153, + 159, + 194 + ], + "slot_index": 0, + "label": "" + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 27, + "type": "PreviewImage", + "pos": [ + 2590, + 920 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 180, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 74, + "type": "FaceDetailer", + "pos": [ + 2050, + 580 + ], + "size": { + "0": 372.5969543457031, + "1": 1103.0477294921875 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 169, + "label": "image" + }, + { + "name": "model", + "type": "MODEL", + "link": 170, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 171, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 172, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 175, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 176, + "slot_index": 5, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "link": 177, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "link": 178, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "link": 214, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 179, + 211 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 180 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped_refined" + }, + { + "name": "cropped_enhanced_alpha", + "type": "IMAGE", + "links": [ + 181 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_enhanced_alpha" + }, + { + "name": "mask", + "type": "MASK", + "links": [ + 182 + ], + "shape": 3, + "slot_index": 3, + "label": "mask" + }, + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "links": [ + 193 + ], + "shape": 3, + "slot_index": 4, + "label": "detailer_pipe" + } + ], + "properties": { + "Node name for S&R": "FaceDetailer" + }, + "widgets_values": [ + 256, + true, + 768, + 872368928997833, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + 0.5, + 10, + 3, + "center-1", + 0, + 0.93, + 0, + 0.7, + "False", + 10, + "" + ] + }, + { + "id": 38, + "type": "FromDetailerPipe", + "pos": [ + 2740, + 630 + ], + "size": { + "0": 342.5999755859375, + "1": 186 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 193, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 61 + ], + "shape": 3, + "slot_index": 0, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 62 + ], + "shape": 3, + "slot_index": 1, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 65 + ], + "shape": 3, + "slot_index": 2, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 66 + ], + "shape": 3, + "slot_index": 3, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 67 + ], + "shape": 3, + "slot_index": 4, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 68 + ], + "shape": 3, + "slot_index": 5, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 69 + ], + "shape": 3, + "slot_index": 6, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 203 + ], + "shape": 3, + "slot_index": 7, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": null, + "shape": 3, + "label": "detailer_hook" + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipe" + } + }, + { + "id": 87, + "type": "UltralyticsDetectorProvider", + "pos": [ + 862, + 1445 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "BBOX_DETECTOR", + "type": "BBOX_DETECTOR", + "links": [], + "shape": 3, + "slot_index": 0, + "label": "BBOX_DETECTOR" + }, + { + "name": "SEGM_DETECTOR", + "type": "SEGM_DETECTOR", + "links": [ + 212, + 213 + ], + "shape": 3, + "slot_index": 1, + "label": "SEGM_DETECTOR" + } + ], + "properties": { + "Node name for S&R": "UltralyticsDetectorProvider" + }, + "widgets_values": [ + "segm/person_yolov8m-seg.pt" + ] + }, + { + "id": 77, + "type": "Reroute", + "pos": [ + 3500, + 170 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 183, + "label": "" + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 184, + 205, + 206, + 208 + ], + "slot_index": 0, + "label": "" + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 84, + "type": "SegmDetectorSEGS", + "pos": [ + 5130, + 1240 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "segm_detector", + "type": "SEGM_DETECTOR", + "link": 204, + "label": "segm_detector" + }, + { + "name": "image", + "type": "IMAGE", + "link": 206, + "label": "image" + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 207 + ], + "shape": 3, + "slot_index": 0, + "label": "SEGS" + } + ], + "properties": { + "Node name for S&R": "SegmDetectorSEGS" + }, + "widgets_values": [ + 0.5, + 10, + 3, + 1 + ] + }, + { + "id": 34, + "type": "FromDetailerPipe", + "pos": [ + 1737, + -34 + ], + "size": { + "0": 342.5999755859375, + "1": 186 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "detailer_pipe", + "type": "DETAILER_PIPE", + "link": 36, + "label": "detailer_pipe" + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": [ + 170 + ], + "shape": 3, + "slot_index": 0, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 171 + ], + "shape": 3, + "slot_index": 1, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 172 + ], + "shape": 3, + "slot_index": 2, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 175 + ], + "shape": 3, + "slot_index": 3, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 176 + ], + "shape": 3, + "slot_index": 4, + "label": "negative" + }, + { + "name": "bbox_detector", + "type": "BBOX_DETECTOR", + "links": [ + 177 + ], + "shape": 3, + "slot_index": 5, + "label": "bbox_detector" + }, + { + "name": "sam_model_opt", + "type": "SAM_MODEL", + "links": [ + 178 + ], + "shape": 3, + "slot_index": 6, + "label": "sam_model_opt" + }, + { + "name": "segm_detector_opt", + "type": "SEGM_DETECTOR", + "links": [ + 214 + ], + "shape": 3, + "slot_index": 7, + "label": "segm_detector_opt" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "links": null, + "shape": 3, + "label": "detailer_hook" + } + ], + "properties": { + "Node name for S&R": "FromDetailerPipe" + } + }, + { + "id": 73, + "type": "DetailerForEachDebug", + "pos": [ + 5603, + 2282 + ], + "size": { + "0": 315, + "1": 678 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 159, + "label": "image" + }, + { + "name": "segs", + "type": "SEGS", + "link": 160, + "label": "segs" + }, + { + "name": "model", + "type": "MODEL", + "link": 161, + "label": "model" + }, + { + "name": "clip", + "type": "CLIP", + "link": 162, + "label": "clip" + }, + { + "name": "vae", + "type": "VAE", + "link": 163, + "label": "vae" + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 164, + "label": "positive" + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 165, + "label": "negative" + }, + { + "name": "detailer_hook", + "type": "DETAILER_HOOK", + "link": null, + "label": "detailer_hook" + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 166 + ], + "shape": 3, + "slot_index": 0, + "label": "image" + }, + { + "name": "cropped", + "type": "IMAGE", + "links": [ + 167 + ], + "shape": 6, + "slot_index": 1, + "label": "cropped" + }, + { + "name": "cropped_refined", + "type": "IMAGE", + "links": [ + 168 + ], + "shape": 6, + "slot_index": 2, + "label": "cropped_refined" + }, + { + "name": "cropped_refined_alpha", + "type": "IMAGE", + "links": null, + "shape": 6, + "label": "cropped_refined_alpha" + } + ], + "properties": { + "Node name for S&R": "DetailerForEachDebug" + }, + "widgets_values": [ + 256, + true, + 768, + 225176759887640, + "fixed", + 20, + 8, + "euler", + "normal", + 0.5, + 5, + true, + false, + "" + ] + }, + { + "id": 61, + "type": "PreviewImage", + "pos": [ + 6000, + 2450 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 167, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 62, + "type": "PreviewImage", + "pos": [ + 5990, + 2780 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 168, + "label": "images" + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + 26, + 474 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1 + ], + "slot_index": 0, + "label": "MODEL" + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1, + "label": "CLIP" + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "slot_index": 2, + "label": "VAE" + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors" + ] + }, + { + "id": 19, + "type": "## make-basic_pipe [2c8c61]", + "pos": [ + 502, + 860 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "vae_opt", + "type": "VAE", + "link": null, + "label": "vae_opt" + } + ], + "outputs": [ + { + "name": "BASIC_PIPE", + "type": "BASIC_PIPE", + "links": [ + 17, + 34 + ], + "shape": 3, + "slot_index": 0, + "label": "BASIC_PIPE" + } + ], + "title": "## make-basic_pipe", + "properties": { + "Node name for S&R": "## make-basic_pipe [2c8c61]" + }, + "widgets_values": [ + "SD1.5/V07_v07.safetensors", + "", + "text, watermark, worst quality:1.4, low quality:1.4" + ] + } + ], + "links": [ + [ + 1, + 4, + 0, + 3, + 0, + "MODEL" + ], + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 10, + 8, + 0, + 10, + 0, + "IMAGE" + ], + [ + 17, + 19, + 0, + 22, + 0, + "BASIC_PIPE" + ], + [ + 19, + 24, + 0, + 22, + 2, + "SAM_MODEL" + ], + [ + 33, + 24, + 0, + 32, + 2, + "SAM_MODEL" + ], + [ + 34, + 19, + 0, + 32, + 0, + "BASIC_PIPE" + ], + [ + 36, + 32, + 0, + 34, + 0, + "DETAILER_PIPE" + ], + [ + 59, + 36, + 0, + 37, + 0, + "IMAGE" + ], + [ + 61, + 38, + 0, + 39, + 0, + "MODEL" + ], + [ + 62, + 38, + 1, + 39, + 1, + "CLIP" + ], + [ + 65, + 38, + 2, + 39, + 2, + "VAE" + ], + [ + 66, + 38, + 3, + 39, + 3, + "CONDITIONING" + ], + [ + 67, + 38, + 4, + 39, + 4, + "CONDITIONING" + ], + [ + 68, + 38, + 5, + 39, + 5, + "BBOX_DETECTOR" + ], + [ + 69, + 38, + 6, + 39, + 6, + "SAM_MODEL" + ], + [ + 75, + 43, + 0, + 44, + 0, + "IMAGE" + ], + [ + 77, + 45, + 0, + 46, + 0, + "DETAILER_PIPE" + ], + [ + 82, + 45, + 0, + 50, + 0, + "DETAILER_PIPE" + ], + [ + 83, + 50, + 6, + 51, + 0, + "SAM_MODEL" + ], + [ + 85, + 50, + 5, + 52, + 0, + "BBOX_DETECTOR" + ], + [ + 87, + 52, + 0, + 51, + 1, + "SEGS" + ], + [ + 88, + 51, + 0, + 53, + 0, + "MASK" + ], + [ + 137, + 49, + 0, + 69, + 0, + "IMAGE" + ], + [ + 138, + 53, + 0, + 69, + 1, + "SEGS" + ], + [ + 139, + 50, + 0, + 69, + 2, + "MODEL" + ], + [ + 140, + 50, + 1, + 69, + 3, + "CLIP" + ], + [ + 141, + 50, + 2, + 69, + 4, + "VAE" + ], + [ + 142, + 50, + 3, + 69, + 5, + "CONDITIONING" + ], + [ + 143, + 50, + 4, + 69, + 6, + "CONDITIONING" + ], + [ + 144, + 69, + 0, + 57, + 0, + "IMAGE" + ], + [ + 153, + 49, + 0, + 72, + 0, + "IMAGE" + ], + [ + 154, + 53, + 0, + 72, + 1, + "SEGS" + ], + [ + 155, + 46, + 0, + 72, + 2, + "BASIC_PIPE" + ], + [ + 156, + 72, + 0, + 64, + 0, + "IMAGE" + ], + [ + 157, + 72, + 1, + 65, + 0, + "IMAGE" + ], + [ + 158, + 72, + 2, + 66, + 0, + "IMAGE" + ], + [ + 159, + 49, + 0, + 73, + 0, + "IMAGE" + ], + [ + 160, + 52, + 0, + 73, + 1, + "SEGS" + ], + [ + 161, + 50, + 0, + 73, + 2, + "MODEL" + ], + [ + 162, + 50, + 1, + 73, + 3, + "CLIP" + ], + [ + 163, + 50, + 2, + 73, + 4, + "VAE" + ], + [ + 164, + 50, + 3, + 73, + 5, + "CONDITIONING" + ], + [ + 165, + 50, + 4, + 73, + 6, + "CONDITIONING" + ], + [ + 166, + 73, + 0, + 60, + 0, + "IMAGE" + ], + [ + 167, + 73, + 1, + 61, + 0, + "IMAGE" + ], + [ + 168, + 73, + 2, + 62, + 0, + "IMAGE" + ], + [ + 169, + 10, + 0, + 74, + 0, + "IMAGE" + ], + [ + 170, + 34, + 0, + 74, + 1, + "MODEL" + ], + [ + 171, + 34, + 1, + 74, + 2, + "CLIP" + ], + [ + 172, + 34, + 2, + 74, + 3, + "VAE" + ], + [ + 175, + 34, + 3, + 74, + 4, + "CONDITIONING" + ], + [ + 176, + 34, + 4, + 74, + 5, + "CONDITIONING" + ], + [ + 177, + 34, + 5, + 74, + 6, + "BBOX_DETECTOR" + ], + [ + 178, + 34, + 6, + 74, + 7, + "SAM_MODEL" + ], + [ + 179, + 74, + 0, + 30, + 0, + "IMAGE" + ], + [ + 180, + 74, + 1, + 27, + 0, + "IMAGE" + ], + [ + 181, + 74, + 2, + 75, + 0, + "IMAGE" + ], + [ + 182, + 74, + 3, + 36, + 0, + "MASK" + ], + [ + 183, + 10, + 0, + 77, + 0, + "*" + ], + [ + 184, + 77, + 0, + 76, + 0, + "IMAGE" + ], + [ + 186, + 76, + 0, + 41, + 0, + "IMAGE" + ], + [ + 187, + 76, + 1, + 42, + 0, + "IMAGE" + ], + [ + 188, + 76, + 0, + 52, + 1, + "IMAGE" + ], + [ + 189, + 76, + 2, + 78, + 0, + "IMAGE" + ], + [ + 190, + 76, + 3, + 43, + 0, + "MASK" + ], + [ + 191, + 76, + 4, + 45, + 0, + "DETAILER_PIPE" + ], + [ + 193, + 74, + 4, + 38, + 0, + "DETAILER_PIPE" + ], + [ + 194, + 49, + 0, + 81, + 0, + "IMAGE" + ], + [ + 195, + 53, + 0, + 81, + 1, + "SEGS" + ], + [ + 196, + 46, + 0, + 81, + 2, + "BASIC_PIPE" + ], + [ + 197, + 81, + 0, + 54, + 0, + "IMAGE" + ], + [ + 200, + 72, + 3, + 82, + 0, + "IMAGE" + ], + [ + 201, + 83, + 0, + 22, + 1, + "BBOX_DETECTOR" + ], + [ + 202, + 83, + 0, + 32, + 1, + "BBOX_DETECTOR" + ], + [ + 203, + 38, + 7, + 39, + 7, + "SEGM_DETECTOR" + ], + [ + 204, + 50, + 7, + 84, + 0, + "SEGM_DETECTOR" + ], + [ + 205, + 77, + 0, + 51, + 2, + "IMAGE" + ], + [ + 206, + 77, + 0, + 84, + 1, + "IMAGE" + ], + [ + 207, + 84, + 0, + 85, + 0, + "SEGS" + ], + [ + 208, + 77, + 0, + 85, + 1, + "IMAGE" + ], + [ + 209, + 85, + 0, + 86, + 0, + "IMAGE" + ], + [ + 210, + 39, + 0, + 76, + 1, + "DETAILER_PIPE" + ], + [ + 211, + 74, + 0, + 49, + 0, + "*" + ], + [ + 212, + 87, + 1, + 22, + 3, + "SEGM_DETECTOR" + ], + [ + 213, + 87, + 1, + 32, + 3, + "SEGM_DETECTOR" + ], + [ + 214, + 34, + 7, + 74, + 8, + "SEGM_DETECTOR" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/test/loop-test.json b/custom_nodes/ComfyUI-Impact-Pack/test/loop-test.json new file mode 100644 index 0000000000000000000000000000000000000000..f1633943e455307b9a043b8f5b91a19c8ec02d3d --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/test/loop-test.json @@ -0,0 +1,1114 @@ +{ + "last_node_id": 43, + "last_link_id": 49, + "nodes": [ + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 413, + 389 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 415, + 186 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," + ] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 1451, + 189 + ], + "size": { + "0": 210, + "1": 270 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": {}, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + 26, + 474 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 1 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "V07_v07.safetensors" + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1209, + 188 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9, + 12 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 19, + "type": "ImpactMinMax", + "pos": [ + 2480, + 1160 + ], + "size": { + "0": 210, + "1": 78 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "*", + "link": 24 + }, + { + "name": "b", + "type": "*", + "link": 25, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactMinMax" + }, + "widgets_values": [ + false + ] + }, + { + "id": 15, + "type": "ImpactValueSender", + "pos": [ + 3520, + 1140 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "*", + "link": 39 + } + ], + "properties": { + "Node name for S&R": "ImpactValueSender" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 11, + "type": "ImageMaskSwitch", + "pos": [ + 1297, + 893 + ], + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images1", + "type": "IMAGE", + "link": 12 + }, + { + "name": "mask1_opt", + "type": "MASK", + "link": null + }, + { + "name": "images2_opt", + "type": "IMAGE", + "link": 11 + }, + { + "name": "mask2_opt", + "type": "MASK", + "link": null + }, + { + "name": "images3_opt", + "type": "IMAGE", + "link": null + }, + { + "name": "mask3_opt", + "type": "MASK", + "link": null + }, + { + "name": "images4_opt", + "type": "IMAGE", + "link": null + }, + { + "name": "mask4_opt", + "type": "MASK", + "link": null + }, + { + "name": "select", + "type": "INT", + "link": 43, + "widget": { + "name": "select", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 4, + "step": 1 + } + ] + }, + "slot_index": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageMaskSwitch" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 34, + "type": "ImpactConditionalBranch", + "pos": [ + 3264, + 1006 + ], + "size": { + "0": 210, + "1": 66 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "cond", + "type": "BOOLEAN", + "link": 36, + "slot_index": 0 + }, + { + "name": "tt_value", + "type": "*", + "link": 37 + }, + { + "name": "ff_value", + "type": "*", + "link": 38 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [ + 39 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactConditionalBranch" + } + }, + { + "id": 33, + "type": "ImpactInt", + "pos": [ + 3010, + 930 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactInt" + }, + "widgets_values": [ + 2 + ] + }, + { + "id": 35, + "type": "ImpactInt", + "pos": [ + 3000, + 1140 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 38 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactInt" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 473, + 609 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 256, + 256, + 1 + ] + }, + { + "id": 13, + "type": "ImageScaleBy", + "pos": [ + 1730, + 920 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 13 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 23, + 40 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScaleBy" + }, + "widgets_values": [ + "nearest-exact", + 1.2 + ] + }, + { + "id": 41, + "type": "ImpactConditionalStopIteration", + "pos": [ + 3607, + 774 + ], + "size": { + "0": 252, + "1": 26 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "cond", + "type": "BOOLEAN", + "link": 49 + } + ], + "properties": { + "Node name for S&R": "ImpactConditionalStopIteration" + } + }, + { + "id": 32, + "type": "ImpactCompare", + "pos": [ + 2760, + 1040 + ], + "size": { + "0": 210, + "1": 78 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "*", + "link": 47 + }, + { + "name": "b", + "type": "*", + "link": 34, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "BOOLEAN", + "type": "BOOLEAN", + "links": [ + 36, + 48 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactCompare" + }, + "widgets_values": [ + "a > b" + ] + }, + { + "id": 43, + "type": "ImpactNeg", + "pos": [ + 3210.6906854687495, + 698.6871511123657 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "BOOLEAN", + "link": 48 + } + ], + "outputs": [ + { + "name": "BOOLEAN", + "type": "BOOLEAN", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactNeg" + } + }, + { + "id": 10, + "type": "ImageReceiver", + "pos": [ + 641, + 932 + ], + "size": { + "0": 315, + "1": 200 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageReceiver" + }, + "widgets_values": [ + "ImgSender_temp_vxhgs_00001_.png [temp]", + 0 + ] + }, + { + "id": 24, + "type": "ImpactImageInfo", + "pos": [ + 2077, + 1117 + ], + "size": { + "0": 210, + "1": 86 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "IMAGE", + "link": 23 + } + ], + "outputs": [ + { + "name": "batch", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "width", + "type": "INT", + "links": [ + 25 + ], + "shape": 3 + }, + { + "name": "channel", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImpactImageInfo" + } + }, + { + "id": 42, + "type": "ImpactInt", + "pos": [ + 2483, + 983 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 47 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactInt" + }, + "widgets_values": [ + 768 + ] + }, + { + "id": 39, + "type": "ImpactValueReceiver", + "pos": [ + 1021, + 1137 + ], + "size": { + "0": 210, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "*", + "type": "*", + "links": [ + 43 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImpactValueReceiver" + }, + "widgets_values": [ + "INT", + 1, + 0 + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 872, + 217 + ], + "size": { + "0": 315, + "1": 474 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 1 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 901257808527154, + "fixed", + 5, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 36, + "type": "ImageSender", + "pos": [ + 2046, + -116 + ], + "size": [ + 914.2697004627885, + 989.0802794506753 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 40 + } + ], + "properties": { + "Node name for S&R": "ImageSender" + }, + "widgets_values": [ + "ImgSender", + 0 + ] + } + ], + "links": [ + [ + 1, + 4, + 0, + 3, + 0, + "MODEL" + ], + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 11, + 10, + 0, + 11, + 2, + "IMAGE" + ], + [ + 12, + 8, + 0, + 11, + 0, + "IMAGE" + ], + [ + 13, + 11, + 0, + 13, + 0, + "IMAGE" + ], + [ + 23, + 13, + 0, + 24, + 0, + "IMAGE" + ], + [ + 24, + 24, + 1, + 19, + 0, + "*" + ], + [ + 25, + 24, + 2, + 19, + 1, + "*" + ], + [ + 34, + 19, + 0, + 32, + 1, + "*" + ], + [ + 36, + 32, + 0, + 34, + 0, + "BOOLEAN" + ], + [ + 37, + 33, + 0, + 34, + 1, + "*" + ], + [ + 38, + 35, + 0, + 34, + 2, + "*" + ], + [ + 39, + 34, + 0, + 15, + 0, + "*" + ], + [ + 40, + 13, + 0, + 36, + 0, + "IMAGE" + ], + [ + 43, + 39, + 0, + 11, + 8, + "INT" + ], + [ + 47, + 42, + 0, + 32, + 0, + "*" + ], + [ + 48, + 32, + 0, + 43, + 0, + "BOOLEAN" + ], + [ + 49, + 43, + 0, + 41, + 0, + "BOOLEAN" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/test/masks.json b/custom_nodes/ComfyUI-Impact-Pack/test/masks.json new file mode 100644 index 0000000000000000000000000000000000000000..da9c261f469a75c1513a8dfe2df9ce471dc01ebb --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/test/masks.json @@ -0,0 +1,622 @@ +{ + "last_node_id": 38, + "last_link_id": 52, + "nodes": [ + { + "id": 21, + "type": "SEGSToImageList", + "pos": [ + 2160, + 970 + ], + "size": { + "0": 304.79998779296875, + "1": 46 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 41 + }, + { + "name": "fallback_image_opt", + "type": "IMAGE", + "link": 26, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 27 + ], + "shape": 6, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SEGSToImageList" + } + }, + { + "id": 5, + "type": "MaskToSEGS", + "pos": [ + 1520, + 980 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 5 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 35, + 46 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToSEGS" + }, + "widgets_values": [ + "False", + 3, + "disabled", + 10 + ] + }, + { + "id": 36, + "type": "MasksToMaskList", + "pos": [ + 2270, + 680 + ], + "size": { + "0": 158.000244140625, + "1": 26 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "masks", + "type": "MASKS", + "link": 51 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 52 + ], + "shape": 6, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MasksToMaskList" + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 35, + "type": "MaskToImage", + "pos": [ + 2480, + 680 + ], + "size": { + "0": 176.39999389648438, + "1": 38.59991455078125 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 52 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 28, + "type": "Segs & Mask ForEach", + "pos": [ + 1800, + 980 + ], + "size": { + "0": 243.60000610351562, + "1": 46 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "segs", + "type": "SEGS", + "link": 35, + "slot_index": 0 + }, + { + "name": "masks", + "type": "MASKS", + "link": 43 + } + ], + "outputs": [ + { + "name": "SEGS", + "type": "SEGS", + "links": [ + 41 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Segs & Mask ForEach" + } + }, + { + "id": 22, + "type": "PreviewImage", + "pos": [ + 2510, + 970 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 27 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 4, + "type": "LoadImage", + "pos": [ + 1150, + 460 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 26, + 47 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 5 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-416378.30000000075.png [input]", + "image" + ] + }, + { + "id": 33, + "type": "SAMDetectorSegmented", + "pos": [ + 1740, + 310 + ], + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "sam_model", + "type": "SAM_MODEL", + "link": 45 + }, + { + "name": "segs", + "type": "SEGS", + "link": 46 + }, + { + "name": "image", + "type": "IMAGE", + "link": 47 + } + ], + "outputs": [ + { + "name": "combined_mask", + "type": "MASK", + "links": [ + 44 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "batch_masks", + "type": "MASKS", + "links": [ + 43, + 51 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SAMDetectorSegmented" + }, + "widgets_values": [ + "center-1", + 0, + 0.7, + 0, + 0.7, + "False" + ] + }, + { + "id": 2, + "type": "SAMLoader", + "pos": [ + 1160, + 310 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "SAM_MODEL", + "type": "SAM_MODEL", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SAMLoader" + }, + "widgets_values": [ + "sam_vit_b_01ec64.pth", + "AUTO" + ] + }, + { + "id": 6, + "type": "MaskToImage", + "pos": [ + 2300, + 310 + ], + "size": { + "0": 176.39999389648438, + "1": 26 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 44 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 7, + "type": "PreviewImage", + "pos": [ + 2720, + 310 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 8 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 9, + "type": "PreviewImage", + "pos": [ + 2720, + 680 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 50 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 38, + "type": "Note", + "pos": [ + 2032, + 698 + ], + "size": [ + 210, + 81.49969482421875 + ], + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "MasksToMaskList node introduced\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 37, + "type": "Note", + "pos": [ + 2071, + 384 + ], + "size": [ + 281.500244140625, + 65.09967041015625 + ], + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "type of batch_masks => MASKS instead of MASK\n" + ], + "color": "#432", + "bgcolor": "#653" + } + ], + "links": [ + [ + 5, + 4, + 1, + 5, + 0, + "MASK" + ], + [ + 8, + 6, + 0, + 7, + 0, + "IMAGE" + ], + [ + 26, + 4, + 0, + 21, + 1, + "IMAGE" + ], + [ + 27, + 21, + 0, + 22, + 0, + "IMAGE" + ], + [ + 35, + 5, + 0, + 28, + 0, + "SEGS" + ], + [ + 41, + 28, + 0, + 21, + 0, + "SEGS" + ], + [ + 43, + 33, + 1, + 28, + 1, + "MASKS" + ], + [ + 44, + 33, + 0, + 6, + 0, + "MASK" + ], + [ + 45, + 2, + 0, + 33, + 0, + "SAM_MODEL" + ], + [ + 46, + 5, + 0, + 33, + 1, + "SEGS" + ], + [ + 47, + 4, + 0, + 33, + 2, + "IMAGE" + ], + [ + 50, + 35, + 0, + 9, + 0, + "IMAGE" + ], + [ + 51, + 33, + 1, + 36, + 0, + "MASKS" + ], + [ + 52, + 36, + 0, + 35, + 0, + "MASK" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/test/regional_prompt.json b/custom_nodes/ComfyUI-Impact-Pack/test/regional_prompt.json new file mode 100644 index 0000000000000000000000000000000000000000..3864d5221c7a6b399f2e3278f2535b109eb1838c --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/test/regional_prompt.json @@ -0,0 +1,1625 @@ +{ + "last_node_id": 35, + "last_link_id": 65, + "nodes": [ + { + "id": 9, + "type": "EditBasicPipe", + "pos": [ + 1210, + 1030 + ], + "size": { + "0": 267, + "1": 126 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 60 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 13 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 16 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 15, + "type": "LoadImage", + "pos": [ + -240, + 1710 + ], + "size": { + "0": 900, + "1": 900 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1572044.0999999996.png [input]", + "image" + ] + }, + { + "id": 23, + "type": "LoadImage", + "pos": [ + -240, + 3790 + ], + "size": { + "0": 920, + "1": 910 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 31 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1351518.png [input]", + "image" + ] + }, + { + "id": 26, + "type": "EditBasicPipe", + "pos": [ + 1240, + 4180 + ], + "size": { + "0": 178, + "1": 126 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 59 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 34 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 33 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 17, + "type": "EditBasicPipe", + "pos": [ + 1550, + 1740 + ], + "size": { + "0": 178, + "1": 126 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 57 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 21 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 7, + "type": "VAEDecode", + "pos": [ + 3660, + 1820 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 63 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 8, + "type": "PreviewImage", + "pos": [ + 4020, + 1450 + ], + "size": { + "0": 1069.308349609375, + "1": 1128.923828125 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 10, + "type": "CLIPTextEncode", + "pos": [ + 860, + 1110 + ], + "size": { + "0": 292.0009765625, + "1": 115.41679382324219 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 61 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl black hair, upper knee, (cafe:1.1)" + ] + }, + { + "id": 22, + "type": "CombineRegionalPrompts", + "pos": [ + 2810, + 1860 + ], + "size": { + "0": 287.20001220703125, + "1": 106 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "regional_prompts1", + "type": "REGIONAL_PROMPTS", + "link": 48 + }, + { + "name": "regional_prompts2", + "type": "REGIONAL_PROMPTS", + "link": 49 + }, + { + "name": "regional_prompts3", + "type": "REGIONAL_PROMPTS", + "link": 50 + }, + { + "name": "regional_prompts4", + "type": "REGIONAL_PROMPTS", + "link": 64 + }, + { + "name": "regional_prompts5", + "type": "REGIONAL_PROMPTS", + "link": null + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 27 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CombineRegionalPrompts" + } + }, + { + "id": 12, + "type": "RegionalPrompt", + "pos": [ + 2030, + 1010 + ], + "size": { + "0": 418.1999816894531, + "1": 46 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 15 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 17 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 48 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 14, + "type": "EmptyLatentImage", + "pos": [ + 2740, + 1500 + ], + "size": { + "0": 350, + "1": 110 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 19 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 1104, + 1 + ] + }, + { + "id": 27, + "type": "CLIPTextEncode", + "pos": [ + 830, + 4260 + ], + "size": [ + 338.8743232727047, + 117.87075195312445 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 37 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl yellow pencil skirt, upper knee, (cafe:1.1)" + ] + }, + { + "id": 25, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1600, + 4180 + ], + "size": { + "0": 287.9136962890625, + "1": 106.45689392089844 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 33 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 13, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1563, + 1030 + ], + "size": { + "0": 355.20001220703125, + "1": 106 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 16 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 17 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 2, + "type": "RegionalSampler", + "pos": [ + 3260, + 1820 + ], + "size": { + "0": 323.1692810058594, + "1": 597.25439453125 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 19, + "slot_index": 0 + }, + { + "name": "base_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 10 + }, + { + "name": "regional_prompts", + "type": "REGIONAL_PROMPTS", + "link": 27 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalSampler" + }, + "widgets_values": [ + 1019854126263754, + "randomize", + 30, + 1, + 5 + ] + }, + { + "id": 5, + "type": "## make-basic_pipe [2c8c61]", + "pos": [ + -2547, + 2236 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "vae_opt", + "type": "VAE", + "link": null + } + ], + "outputs": [ + { + "name": "BASIC_PIPE", + "type": "BASIC_PIPE", + "links": [ + 1, + 3, + 62 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "## make-basic_pipe", + "properties": { + "Node name for S&R": "## make-basic_pipe [2c8c61]" + }, + "widgets_values": [ + "SD1.5/epicrealism_naturalSinRC1VAE.safetensors", + "a photograph of a girl is standing in the cafe terrace, looking viewer, upper knee", + "big head, closeup" + ] + }, + { + "id": 1, + "type": "LoadImage", + "pos": [ + -260, + 778 + ], + "size": { + "0": 915.1032104492188, + "1": 860.6505126953125 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1641138.7000000002.png [input]", + "image" + ] + }, + { + "id": 31, + "type": "CLIPTextEncode", + "pos": [ + 1230, + 2550 + ], + "size": { + "0": 292.0009765625, + "1": 115.41679382324219 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 56 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 51 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl, green tie, upper knee, (cafe:1.1)" + ] + }, + { + "id": 33, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1890, + 2470 + ], + "size": { + "0": 305.4067687988281, + "1": 106 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 53 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 30, + "type": "EditBasicPipe", + "pos": [ + 1610, + 2480 + ], + "size": { + "0": 178, + "1": 126 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 58 + }, + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 51 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": null + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EditBasicPipe" + } + }, + { + "id": 6, + "type": "FromBasicPipe", + "pos": [ + -1813, + 2226 + ], + "size": { + "0": 241.79998779296875, + "1": 106 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 3 + } + ], + "outputs": [ + { + "name": "model", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "vae", + "type": "VAE", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": [], + "shape": 3, + "slot_index": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe" + } + }, + { + "id": 34, + "type": "FromBasicPipe_v2", + "pos": [ + 699, + 2163 + ], + "size": { + "0": 267, + "1": 126 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 62 + } + ], + "outputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "links": [ + 57, + 58, + 59, + 60 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "model", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 55, + 56, + 61 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 63 + ], + "shape": 3, + "slot_index": 3 + }, + { + "name": "positive", + "type": "CONDITIONING", + "links": null, + "shape": 3 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FromBasicPipe_v2" + } + }, + { + "id": 20, + "type": "RegionalPrompt", + "pos": [ + 2230, + 1720 + ], + "size": { + "0": 278.79998779296875, + "1": 57.09715270996094 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 28 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 23 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 18, + "type": "CLIPTextEncode", + "pos": [ + 1180, + 1820 + ], + "size": { + "0": 292.0009765625, + "1": 115.41679382324219 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 21 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photorealistic:1.4, 1girl pink jacket, upper knee, (cafe:1.1)" + ] + }, + { + "id": 32, + "type": "RegionalPrompt", + "pos": [ + 2280, + 2450 + ], + "size": { + "0": 278.79998779296875, + "1": 57.09715270996094 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 65 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 52 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 64 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 24, + "type": "RegionalPrompt", + "pos": [ + 2040, + 4160 + ], + "size": { + "0": 278.79998779296875, + "1": 47.54190444946289 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 31 + }, + { + "name": "advanced_sampler", + "type": "KSAMPLER_ADVANCED", + "link": 32 + } + ], + "outputs": [ + { + "name": "REGIONAL_PROMPTS", + "type": "REGIONAL_PROMPTS", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "RegionalPrompt" + } + }, + { + "id": 35, + "type": "LoadImage", + "pos": [ + -274, + 2727 + ], + "size": { + "0": 900, + "1": 900 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 65 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-1594007.5999999996.png [input]", + "image" + ] + }, + { + "id": 21, + "type": "KSamplerAdvancedProvider", + "pos": [ + 1840, + 1740 + ], + "size": { + "0": 305.4067687988281, + "1": 106 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 24 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 8, + "dpm_fast", + "sgm_uniform" + ] + }, + { + "id": 4, + "type": "KSamplerAdvancedProvider", + "pos": [ + 2742, + 1681 + ], + "size": { + "0": 355.20001220703125, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "basic_pipe", + "type": "BASIC_PIPE", + "link": 1 + } + ], + "outputs": [ + { + "name": "KSAMPLER_ADVANCED", + "type": "KSAMPLER_ADVANCED", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvancedProvider" + }, + "widgets_values": [ + 5, + "dpm_fast", + "simple" + ] + } + ], + "links": [ + [ + 1, + 5, + 0, + 4, + 0, + "BASIC_PIPE" + ], + [ + 3, + 5, + 0, + 6, + 0, + "BASIC_PIPE" + ], + [ + 7, + 2, + 0, + 7, + 0, + "LATENT" + ], + [ + 9, + 7, + 0, + 8, + 0, + "IMAGE" + ], + [ + 10, + 4, + 0, + 2, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 13, + 10, + 0, + 9, + 4, + "CONDITIONING" + ], + [ + 15, + 1, + 1, + 12, + 0, + "MASK" + ], + [ + 16, + 9, + 0, + 13, + 0, + "BASIC_PIPE" + ], + [ + 17, + 13, + 0, + 12, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 19, + 14, + 0, + 2, + 0, + "LATENT" + ], + [ + 21, + 18, + 0, + 17, + 4, + "CONDITIONING" + ], + [ + 23, + 21, + 0, + 20, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 24, + 17, + 0, + 21, + 0, + "BASIC_PIPE" + ], + [ + 27, + 22, + 0, + 2, + 2, + "REGIONAL_PROMPTS" + ], + [ + 28, + 15, + 1, + 20, + 0, + "MASK" + ], + [ + 31, + 23, + 1, + 24, + 0, + "MASK" + ], + [ + 32, + 25, + 0, + 24, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 33, + 26, + 0, + 25, + 0, + "BASIC_PIPE" + ], + [ + 34, + 27, + 0, + 26, + 4, + "CONDITIONING" + ], + [ + 37, + 6, + 1, + 27, + 0, + "CLIP" + ], + [ + 48, + 12, + 0, + 22, + 0, + "REGIONAL_PROMPTS" + ], + [ + 49, + 20, + 0, + 22, + 1, + "REGIONAL_PROMPTS" + ], + [ + 50, + 24, + 0, + 22, + 2, + "REGIONAL_PROMPTS" + ], + [ + 51, + 31, + 0, + 30, + 4, + "CONDITIONING" + ], + [ + 52, + 33, + 0, + 32, + 1, + "KSAMPLER_ADVANCED" + ], + [ + 53, + 30, + 0, + 33, + 0, + "BASIC_PIPE" + ], + [ + 55, + 34, + 2, + 18, + 0, + "CLIP" + ], + [ + 56, + 34, + 2, + 31, + 0, + "CLIP" + ], + [ + 57, + 34, + 0, + 17, + 0, + "BASIC_PIPE" + ], + [ + 58, + 34, + 0, + 30, + 0, + "BASIC_PIPE" + ], + [ + 59, + 34, + 0, + 26, + 0, + "BASIC_PIPE" + ], + [ + 60, + 34, + 0, + 9, + 0, + "BASIC_PIPE" + ], + [ + 61, + 34, + 2, + 10, + 0, + "CLIP" + ], + [ + 62, + 5, + 0, + 34, + 0, + "BASIC_PIPE" + ], + [ + 63, + 34, + 3, + 7, + 1, + "VAE" + ], + [ + 64, + 32, + 0, + 22, + 3, + "REGIONAL_PROMPTS" + ], + [ + 65, + 35, + 1, + 32, + 0, + "MASK" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/TROUBLESHOOTING.md b/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/TROUBLESHOOTING.md new file mode 100644 index 0000000000000000000000000000000000000000..1284b1f29eaf2e1eb0a7309ea3816d5d2259a4c5 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/TROUBLESHOOTING.md @@ -0,0 +1,72 @@ +## When a permission error occurs during the installation process (on Windows) + +* There are cases where the package you are trying to install is already being used by another custom node that has been loaded. + * This issue occurs only on Windows. +* Please close ComfyUI and execute install.py directly using Python in the custom_nodes/ComfyUI-Impact-Pack directory. + * In case **portable** version: + 1. goto **ComfyUI_windows_portable** directory in **cmd** + 2. execute ```.\python_embeded\python -s -m custom_nodes\ComfyUI-Impact-Pack\install.py``` + * In case **venv**: + 1. activate venv + 2. execute ```python -s -m custom_nodes\ComfyUI-Impact-Pack\install.py``` + * Others: + 1. Please modify the path of 'python' according to your Python environment. + 2. execute ```(YOUR PYTHON) -s -m custom_nodes\ComfyUI-Impact-Pack\install.py``` + + +## If the nodes of the Impact Pack hang during execution + +* During the execution of processes related to dilation, issues like this may arise depending on the compatibility of the computer environment. +* Please set `disable_gpu_opencv = True` in the `ComfyUI-Impact-Pack/impact-pack.ini` file. Occasionally, issues may arise when the OpenCV GPU mode is activated depending on the environment. + + e.g. +``` +[default] +dependency_version = 17 +mmdet_skip = True +sam_editor_cpu = False +sam_editor_model = sam_vit_b_01ec64.pth +custom_wildcards = /home/me/github/ComfyUI/custom_nodes/ComfyUI-Impact-Pack/custom_wildcards +disable_gpu_opencv = True +``` + +## An issue has occurred with importing Ultralytics. +``` + AttributeError: 'Logger' object has no attribute 'reconfigure' + + or + + AttributeError: 'Logger' object has no attribute 'encoding' +``` +* Update `ComfyUI-Manager` to V1.1.2 or above + + +## An issue has occurred about 'cv2' + +``` + AttributeError: module 'cv2' has no attribute 'setNumThreads' +``` + +* Update 'opencv-python' and 'opencv-python-headless' to latest version + * Once you update to the latest version, you can also downgrade back to 4.6.0.66 if needed. + * For the portable version, navigate to the portable installation directory in the command prompt, and enter the following command: + + ``` + .\python_embeded\python.exe -m pip install -U opencv-python opencv-python-headless + ``` + + * When using the WAS node suite or reactor nodes, using the latest version may not work as expected. You can downgrade using the following command: + + ``` + .\python_embeded\python.exe -m pip install -U opencv-python==4.6.0.66 opencv-python-headless==4.6.0.66 + ``` + + +## Destortion on Detailer + +* Please also note that this issue may be caused by a bug in xformers 0.0.18. If you encounter this problem, please try adjusting the guide_size parameter. + +![example](black1.png) + +![example](black2.png) +* guide_size changed from 256 -> 192 diff --git a/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/black1.png b/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/black1.png new file mode 100644 index 0000000000000000000000000000000000000000..aa9cd8c8abbffe8ae2e50618898dbfa169fe461b Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/black1.png differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/black2.png b/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/black2.png new file mode 100644 index 0000000000000000000000000000000000000000..b14f2c10151741deeb9bd84dd5f77e9613c5cfc0 Binary files /dev/null and b/custom_nodes/ComfyUI-Impact-Pack/troubleshooting/black2.png differ diff --git a/custom_nodes/ComfyUI-Impact-Pack/uninstall.py b/custom_nodes/ComfyUI-Impact-Pack/uninstall.py new file mode 100644 index 0000000000000000000000000000000000000000..2d62417c14128faca59ced13bbd83d5cd8708da3 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/uninstall.py @@ -0,0 +1,38 @@ +import os +import sys +import time +import platform +import shutil +import subprocess + +comfy_path = '../..' + +def rmtree(path): + retry_count = 3 + + while True: + try: + retry_count -= 1 + + if platform.system() == "Windows": + subprocess.check_call(['attrib', '-R', path + '\\*', '/S']) + + shutil.rmtree(path) + + return True + + except Exception as ex: + print(f"ex: {ex}") + time.sleep(3) + + if retry_count < 0: + raise ex + + print(f"Uninstall retry({retry_count})") + +js_dest_path = os.path.join(comfy_path, "web", "extensions", "impact-pack") + +if os.path.exists(js_dest_path): + rmtree(js_dest_path) + + diff --git a/custom_nodes/ComfyUI-Impact-Pack/wildcards/put_wildcards_here b/custom_nodes/ComfyUI-Impact-Pack/wildcards/put_wildcards_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-Impact-Pack/wildcards/samples/flower.txt b/custom_nodes/ComfyUI-Impact-Pack/wildcards/samples/flower.txt new file mode 100644 index 0000000000000000000000000000000000000000..f8d0606f8de5a728a864d224a9ae0af4f77a9a7f --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/wildcards/samples/flower.txt @@ -0,0 +1,9 @@ +rose +orchid +iris +carnation +lily +daisy +chrysanthemum +daffodil +dahlia \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Impact-Pack/wildcards/samples/jewel.txt b/custom_nodes/ComfyUI-Impact-Pack/wildcards/samples/jewel.txt new file mode 100644 index 0000000000000000000000000000000000000000..2a58330357dbf4fc6d94879cb449b87d04a88d51 --- /dev/null +++ b/custom_nodes/ComfyUI-Impact-Pack/wildcards/samples/jewel.txt @@ -0,0 +1,9 @@ +diamond +emerald +sapphire +opal +ruby +topaz +pearl +rubyamethyst +aquamarine \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/.gitignore b/custom_nodes/ComfyUI-Inspire-Pack/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d6d8790c581808ef5bf724af6208ae1479cb8ab0 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/.gitignore @@ -0,0 +1,161 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ +resources/prompt-builder.yaml diff --git a/custom_nodes/ComfyUI-Inspire-Pack/LICENSE b/custom_nodes/ComfyUI-Inspire-Pack/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI-Inspire-Pack/README.md b/custom_nodes/ComfyUI-Inspire-Pack/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0c3cdf528978ec4e797685def8ebe0d862b3f434 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/README.md @@ -0,0 +1,121 @@ +# ComfyUI-Inspire-Pack +This repository offers various extension nodes for ComfyUI. Nodes here have different characteristics compared to those in the ComfyUI Impact Pack. The Impact Pack has become too large now... + +## Notice: +* V0.48 optimized wildcard node. This update requires Impact Pack V4.39.2 or later. +* V0.13.2 isn't compatible with old ControlNet Auxiliary Preprocessor. If you will use `MediaPipeFaceMeshDetectorProvider` update to latest version(Sep. 17th). +* WARN: If you use version **0.12 to 0.12.2** without a GlobalSeed node, your workflow's seed may have been erased. Please update immediately. + +## Nodes +* Lora Block Weight - This is a node that provides functionality related to Lora block weight. + * This provides similar functionality to [sd-webui-lora-block-weight](https://github.com/hako-mikan/sd-webui-lora-block-weight) + * `Lora Loader (Block Weight)`: When loading Lora, the block weight vector is applied. + * In the block vector, you can use numbers, R, A, a, B, and b. + * R is determined sequentially based on a random seed, while A and B represent the values of the A and B parameters, respectively. a and b are half of the values of A and B, respectively. + * `XY Input: Lora Block Weight`: This is a node in the [Efficiency Nodes](https://github.com/LucianoCirino/efficiency-nodes-comfyui)' XY Plot that allows you to use Lora block weight. + * You must ensure that X and Y connections are made, and dependencies should be connected to the XY Plot. + * Note: To use this feature, update `Efficient Nodes` to a version released after September 3rd. + +* SEGS Supports nodes - This is a node that supports ApplyControlNet (SEGS) from the Impact Pack. + * `OpenPose Preprocessor Provider (SEGS)`: OpenPose preprocessor is applied for the purpose of using OpenPose ControlNet in SEGS. + * You need to install [ControlNet Auxiliary Preprocessors](https://github.com/Fannovel16/comfyui_controlnet_aux) to use this. + * `Canny Preprocessor Provider (SEGS)`: Canny preprocessor is applied for the purpose of using Canny ControlNet in SEGS. + * `DW Preprocessor Provider (SEGS)`, `MiDaS Depth Map Preprocessor Provider (SEGS)`, `LeReS Depth Map Preprocessor Provider (SEGS)`, + `MediaPipe FaceMesh Preprocessor Provider (SEGS)`, `HED Preprocessor Provider (SEGS)`, `Fake Scribble Preprocessor (SEGS)`, + `AnimeLineArt Preprocessor Provider (SEGS)`, `Manga2Anime LineArt Preprocessor Provider (SEGS)`, `LineArt Preprocessor Provider (SEGS)`, + `Color Preprocessor Provider (SEGS)`, `Inpaint Preprocessor Provider (SEGS)`, `Tile Preprocessor Provider (SEGS)`, + * `MediaPipeFaceMeshDetectorProvider`: This node provides `BBOX_DETECTOR` and `SEGM_DETECTOR` that can be used in Impact Pack's Detector using the `MediaPipe-FaceMesh Preprocessor` of ControlNet Auxiliary Preprocessors. + +* A1111 Compatibility support - These nodes assists in replicating the creation of A1111 in ComfyUI exactly. + * `KSampler (Inspire)`: ComfyUI uses the CPU for generating random noise, while A1111 uses the GPU. One of the three factors that significantly impact reproducing A1111's results in ComfyUI can be addressed using `KSampler (Inspire)`. + * Other point #1 : Please make sure you haven't forgotten to include 'embedding:' in the embedding used in the prompt, like 'embedding:easynegative.' + * Other point #2 : ComfyUI and A1111 have different interpretations of weighting. To align them, you need to use [BlenderNeko/Advanced CLIP Text Encode](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb). + * `KSamplerAdvanced (Inspire)`: Inspire Pack version of `KSampler (Advanced)`. + * Common Parameters + * `batch_seed_mode` determines how seeds are applied to batch latents: + * `comfy`: This method applies the noise to batch latents all at once. This is advantageous to prevent duplicate images from being generated due to seed duplication when creating images. + * `incremental`: Similar to the A1111 case, this method incrementally increases the seed and applies noise sequentially for each batch. This approach is beneficial for straightforward reproduction using only the seed. + * `variation_strength`: In each batch, the variation strength starts from the set `variation_strength` and increases by `xxx`. + * `variation_seed` and `variation_strength` - Initial noise generated by the seed is transformed to the shape of `variation_seed` by `variation_strength`. If `variation_strength` is 0, it only relies on the influence of the seed, and if `variation_strength` is 1.0, it is solely influenced by `variation_seed`. + * These parameters are used when you want to maintain the composition of an image generated by the seed but wish to introduce slight changes. + +* Prompt Support - These are nodes for supporting prompt processing. + * `Load Prompts From Dir (Inspire)`: It sequentially reads prompts files from the specified directory. The output it returns is ZIPPED_PROMPT. + * Specify the directories located under `ComfyUI-Inspire-Pack/prompts/` + * One prompts file can have multiple prompts separated by `---`. + * e.g. `prompts/example` + * `Load Prompts From File (Inspire)`: It sequentially reads prompts from the specified file. The output it returns is ZIPPED_PROMPT. + * Specify the file located under `ComfyUI-Inspire-Pack/prompts/` + * e.g. `prompts/example/prompt2.txt` + * `Unzip Prompt (Inspire)`: Separate ZIPPED_PROMPT into `positive`, `negative`, and name components. + * `positive` and `negative` represent text prompts, while `name` represents the name of the prompt. When loaded from a file using `Load Prompts From File (Inspire)`, the name corresponds to the file name. + * `Zip Prompt (Inspire)`: Create ZIPPED_PROMPT from positive, negative, and name_opt. + * If name_opt is omitted, it will be considered as an empty name. + * `Prompt Extractor (Inspire)`: This node reads prompt information from the image's metadata. Since it retrieves all the text, you need to directly specify the prompts to be used for `positive` and `negative` as indicated in the info. + * `Global Seed (Inspire)`: This is a node that controls the global seed without a separate connection line. It only controls when the widget's name is 'seed' or 'noise_seed'. Additionally, if 'control_before_generate' is checked, it controls the seed before executing the prompt. + * Seeds that have been converted into inputs are excluded from the target. If you want to control the seed separately, convert it into an input and control it separately. + * `Bind [ImageList, PromptList] (Inspire)`: Bind Image list and zipped prompt list to export `image`, `positive`, `negative`, and `prompt_label` in a list format. If there are more prompts than images, the excess prompts are ignored, and if there are not enough, the remainder is filled with default input based on the images. + * `Wildcard Encode (Inspire)`: The combination node of [ImpactWildcardEncode](https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/ImpactWildcard.md) and BlenderNeko's [CLIP Text Encode (Advanced)](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb). + * To use this node, you need both the [Impact Pack](https://github.com/ltdrdata/ComfyUI-Impact-Pack) and the [Advanced CLIP Text Encode]((https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb)) extensions. + * This node is identical to `ImpactWildcardEncode`, but it encodes using `CLIP Text Encode (Advanced)` instead of the default CLIP Text Encode from ComfyUI for CLIP Text Encode. + * Requirement: Impact Pack V4.18.6 or above + * `Prompt Builder (Inspire)`: This node is a convenience node that allows you to easily assemble prompts by selecting categories and presets. To modify the presets, edit the `ComfyUI-InspirePack/resources/prompt-builder.yaml` file. + * `Seed Explorer (Inspire)`: This node helps explore seeds by allowing you to adjust the variation seed gradually in a prompt-like form. + * This feature is designed for utilizing a seed that you like, adding slight variations, and then further modifying from there when exploring. + * In the `seed_prompt`, the first seed is considered the initial seed, and the reflection rate is omitted, always defaulting to 1.0. + * Each prompt is separated by a comma, and from the second seed onwards, it should follow the format `seed:strength`. + * Pressing the "Add to prompt" button will append `additional_seed:additional_strength` to the prompt. + +* Regional Nodes - These node simplifies the application of prompts by region. + * Regional Sampler - These nodes assists in the easy utilization of the regional sampler in the `Impact Pack`. + * `Regional Prompt Simple (Inspire)`: This node takes `mask` and `basic_pipe` as inputs and simplifies the creation of `REGIONAL_PROMPTS`. + * `Regional Prompt By Color Mask (Inspire)`: Similar to `Regional Prompt Simple (Inspire)`, this function accepts a color mask image as input and defines the region using the color value that will be used as the mask, instead of directly receiving the mask. + * The color value can only be in the form of a hex code like #FFFF00 or a decimal number. + * Regional Conditioning - These nodes provides assistance for simplifying the use of `Conditioning (Set Mask)`. + * `Regional Conditioning Simple (Inspire)` + * `Regional Conditioning By Color Mask (Inspire)` + * Regional IPAdapter - These nodes facilitates the convenient use of the attn_mask feature in `ComfyUI IPAdapter Plus` custom nodes. + * To use this node, you need to install the [ComfyUI IPAdapter Plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension. + * `Regional IPAdapter Mask (Inspire)`, `Regional IPAdapter By Color Mask (Inspire)` + * `Regional IPAdapter Encoded Mask (Inspire)`, `Regional IPAdapter Encoded By Color Mask (Inspire)`: accept `embeds` instead of `image` + * Regional Seed Explorer - These nodes restrict the variation through a seed prompt, applying it only to the masked areas. + * `Regional Seed Explorer By Mask (Inspire)` + * `Regional Seed Explorer By Color Mask (Inspire)` + +* Image Util + * `Load Image Batch From Dir (Inspire)`: This is almost same as `LoadImagesFromDirectory` of [ComfyUI-Advanced-Controlnet](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet). This is just a modified version. Just note that this node forcibly normalizes the size of the loaded image to match the size of the first image, even if they are not the same size, to create a batch image. + * `Load Image List From Dir (Inspire)`: This is almost same as `Load Image Batch From Dir (Inspire)`. However, note that this node loads data in a list format, not as a batch, so it returns images at their original size without normalizing the size. + * `Load Image (Inspire)`: This node is similar to LoadImage, but the loaded image information is stored in the workflow. The image itself is stored in the workflow, making it easier to reproduce image generation on other computers. + * `Change Image Batch Size (Inspire)`: Change Image Batch Size + * `simple`: if the `batch_size` is larger than the batch size of the input image, the last frame will be duplicated. If it is smaller, it will be simply cropped. + +* KSampler Progress - In KSampler, the sampling process generates latent batches. By using `Video Combine` node from [ComfyUI-VideoHelperSuite](https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite), you can create a video from the progress. + +* Backend Cache - Nodes for storing arbitrary data from the backend in a cache and sharing it across multiple workflows. + * `Cache Backend Data (Inspire)`: Stores any backend data in the cache using a string key. Tags are for quick reference. + * `Retrieve Backend Data (Inspire)`: Retrieves cached backend data using a string key. + * `Remove Backend Data (Inspire)`: Removes cached backend data. + * Deletion in this node only removes it from the cache managed by Inspire, and if it's still in use elsewhere, it won't be completely removed from memory. + * `signal_opt` is used to control the order of execution for this node; it will still run without a `signal_opt` input. + * When using '*' as the key, it clears all data. + * `Show Cached Info (Inspire)`: Displays information about cached data. + * `Cache Backend Data [NumberKey] (Inspire)`, `Retrieve Backend Data [NumberKey] (Inspire)`, `Remove Backend Data [NumberKey] (Inspire)`: These nodes are provided for convenience in the automation process, allowing the use of numbers as keys. + * `Cache Backend Data List (Inspire)`, `Cache Backend Data List [NumberKey] (Inspire)`: This node allows list input for backend cache. Conversely, nodes like `Cache Backend Data [NumberKey] (Inspire)` that do not accept list input will attempt to cache redundantly and overwrite existing data if provided with a list input. Therefore, it is necessary to use a unique key for each element to prevent this. This node caches the combined list. When retrieving cached backend data through this node, the output is in the form of a list. + +* Util - Utilities + * `Float Range (Inspire)`: Create a float list that increases the value by `step` from `start` to `stop`. A list as large as the maximum limit is created, and when `ensure_end` is enabled, the last value of the list becomes the stop value. + * `ToIPAdapterPipe (Inspire)`, `FromIPAdapterPipe (Inspire)`: These nodes assists in conveniently using the bundled ipadapter_model, clip_vision, and model required for applying IPAdapter. + * `List Counter (Inspire)`: When each item in the list traverses through this node, it increments a counter by one, generating an integer value. + + +## Credits + +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +ComfyUI/[sd-webui-lora-block-weight](https://github.com/hako-mikan/sd-webui-lora-block-weight) - The original idea for LoraBlockWeight came from here, and it is based on the syntax of this extension. + +jags111/[efficiency-nodes-comfyui](https://github.com/jags111/ComfyUI-Jags-workflows) - The `XY Input` provided by the Inspire Pack supports the `XY Plot` of this node. + +Fannovel16/[comfyui_controlnet_aux](https://github.com/Fannovel16/comfyui_controlnet_aux) - The wrapper for the controlnet preprocessor in the Inspire Pack depends on these nodes. + +Kosinkadink/[ComfyUI-Advanced-Controlnet](https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet) - `Load Images From Dir (Inspire)` code is came from here. diff --git a/custom_nodes/ComfyUI-Inspire-Pack/__init__.py b/custom_nodes/ComfyUI-Inspire-Pack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5011c451d947fc676f1c8ad576eee26537559b20 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/__init__.py @@ -0,0 +1,35 @@ +""" +@author: Dr.Lt.Data +@title: Inspire Pack +@nickname: Inspire Pack +@description: This extension provides various nodes to support Lora Block Weight and the Impact Pack. +""" + +import importlib + +print(f"### Loading: ComfyUI-Inspire-Pack (V0.48.2)") + +node_list = [ + "lora_block_weight", + "segs_support", + "a1111_compat", + "prompt_support", + "inspire_server", + "image_util", + "regional_nodes", + "sampler_nodes", + "backend_support", + "list_nodes", +] + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + +for module_name in node_list: + imported_module = importlib.import_module(".inspire.{}".format(module_name), __name__) + + NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} + NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} + +WEB_DIRECTORY = "./js" +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] diff --git a/custom_nodes/ComfyUI-Inspire-Pack/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95332c367fad92f8581b6d86ee06402e113f73a Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/a1111_compat.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/a1111_compat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9779bed2ced86d93f6c587e61bf151aa64e25c86 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/a1111_compat.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/backend_support.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/backend_support.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44b31f21ee9ab5935472085a06fa4a2778a9c062 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/backend_support.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/image_util.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/image_util.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2682ac754a2474a3c87d75ff8ae456c4b666245 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/image_util.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/inspire_server.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/inspire_server.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcc61e3bb3229db6957835d00b5b164ec23ef68e Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/inspire_server.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/list_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/list_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9f25c6cdb3abb80dbc649d13150ff9bf4507c7d Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/list_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/lora_block_weight.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/lora_block_weight.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..420235c89b987417ac658b00e65ddbd6a17c5558 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/lora_block_weight.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/prompt_support.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/prompt_support.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94abf5c9a47e0bce22d23627d248dd83fbc60070 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/prompt_support.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/regional_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/regional_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d3867bd28b40e5a03cb01f9efea0bcf3ab70b9c Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/regional_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/sampler_nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/sampler_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d5dd4718841c3df1b5b384dadadce6b4f156b9 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/sampler_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/segs_support.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/segs_support.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..182a3563c59fc1860242f0734d71f5bfc7bb8395 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/__pycache__/segs_support.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/a1111_compat.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/a1111_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..2a731682729c71ab71a619ce2ef7ca35d48cc200 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/a1111_compat.py @@ -0,0 +1,134 @@ +import comfy +import torch +import numpy as np +import latent_preview +from .libs import utils + + +def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, + noise_mode="CPU", disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, + incremental_seed_mode="comfy", variation_seed=None, variation_strength=None, noise=None): + device = comfy.model_management.get_torch_device() + noise_device = "cpu" if noise_mode == "CPU" else device + latent_image = latent["samples"] + + if noise is None: + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device=noise_device) + else: + batch_inds = latent["batch_index"] if "batch_index" in latent else None + noise = utils.prepare_noise(latent_image, seed, batch_inds, noise_device, incremental_seed_mode, variation_seed=variation_seed, variation_strength=variation_strength) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + preview_format = "JPEG" + if preview_format not in ["JPEG", "PNG"]: + preview_format = "JPEG" + + previewer = latent_preview.get_previewer(device, model.model.latent_format) + + pbar = comfy.utils.ProgressBar(steps) + def callback(step, x0, x, total_steps): + preview_bytes = None + if previewer: + preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) + pbar.update_absolute(step + 1, total_steps, preview_bytes) + + samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, + force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed) + out = latent.copy() + out["samples"] = samples + return (out, ) + + +class KSampler_inspire: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + "batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],), + "variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "sample" + + CATEGORY = "InspirePack/a1111_compat" + + def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode, batch_seed_mode="comfy", variation_seed=None, variation_strength=None): + return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode, incremental_seed_mode=batch_seed_mode, variation_seed=variation_seed, variation_strength=variation_strength) + + +class KSamplerAdvanced_inspire: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + "batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],), + "variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": + { + "noise_opt": ("NOISE",), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "sample" + + CATEGORY = "InspirePack/a1111_compat" + + def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, noise_mode, return_with_leftover_noise, denoise=1.0, batch_seed_mode="comfy", variation_seed=None, variation_strength=None, noise_opt=None): + force_full_denoise = True + + if return_with_leftover_noise: + force_full_denoise = False + + disable_noise = False + + if not add_noise: + disable_noise = True + + return common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, + force_full_denoise=force_full_denoise, noise_mode=noise_mode, incremental_seed_mode=batch_seed_mode, + variation_seed=variation_seed, variation_strength=variation_strength, noise=noise_opt) + + +NODE_CLASS_MAPPINGS = { + "KSampler //Inspire": KSampler_inspire, + "KSamplerAdvanced //Inspire": KSamplerAdvanced_inspire, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "KSampler //Inspire": "KSampler (inspire)", + "KSamplerAdvanced //Inspire": "KSamplerAdvanced (inspire)" +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/backend_support.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/backend_support.py new file mode 100644 index 0000000000000000000000000000000000000000..5386e0e5b23cdaabaf5b2c8465bbc2da7da937e3 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/backend_support.py @@ -0,0 +1,286 @@ +from .libs.utils import any_typ +from server import PromptServer + +cache = {} + + +class CacheBackendData: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("STRING", {"multiline": False, "placeholder": "Input data key (e.g. 'model a', 'chunli lora', 'girl latent 3', ...)"}), + "tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}), + "data": (any_typ,), + } + } + + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("data opt",) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + OUTPUT_NODE = True + + def doit(self, key, tag, data): + global cache + + if key == '*': + print(f"[Inspire Pack] CacheBackendData: '*' is reserved key. Cannot use that key") + + cache[key] = (tag, (False, data)) + return (data,) + + +class CacheBackendDataNumberKey: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}), + "data": (any_typ,), + } + } + + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("data opt",) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + OUTPUT_NODE = True + + def doit(self, key, tag, data): + global cache + cache[key] = (tag, (False, data)) + return (data,) + + +class CacheBackendDataList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("STRING", {"multiline": False, "placeholder": "Input data key (e.g. 'model a', 'chunli lora', 'girl latent 3', ...)"}), + "tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}), + "data": (any_typ,), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("data opt",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + OUTPUT_NODE = True + + def doit(self, key, tag, data): + global cache + + if key == '*': + print(f"[Inspire Pack] CacheBackendDataList: '*' is reserved key. Cannot use that key") + + cache[key[0]] = (tag[0], (True, data)) + return (data,) + + +class CacheBackendDataNumberKeyList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}), + "data": (any_typ,), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("data opt",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + OUTPUT_NODE = True + + def doit(self, key, tag, data): + global cache + cache[key[0]] = (tag[0], (True, data)) + return (data,) + + +class RetrieveBackendData: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("STRING", {"multiline": False, "placeholder": "Input data key (e.g. 'model a', 'chunli lora', 'girl latent 3', ...)"}), + } + } + + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("data",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + def doit(self, key): + global cache + + is_list, data = cache[key][1] + + if is_list: + return (data,) + else: + return ([data],) + + +class RetrieveBackendDataNumberKey(RetrieveBackendData): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + } + } + + +class RemoveBackendData: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("STRING", {"multiline": False, "placeholder": "Input data key ('*' = clear all)"}), + }, + "optional": { + "signal_opt": (any_typ,), + } + } + + RETURN_TYPES = (any_typ,) + RETURN_NAMES = ("signal",) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + OUTPUT_NODE = True + + def doit(self, key, signal_opt=None): + global cache + + if key == '*': + cache = {} + elif key in cache: + del cache[key] + else: + print(f"[Inspire Pack] RemoveBackendData: invalid data key {key}") + + return (signal_opt,) + + +class RemoveBackendDataNumberKey(RemoveBackendData): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": { + "signal_opt": (any_typ,), + } + } + + def doit(self, key, signal_opt=None): + global cache + + if key in cache: + del cache[key] + else: + print(f"[Inspire Pack] RemoveBackendDataNumberKey: invalid data key {key}") + + return (signal_opt,) + + +class ShowCachedInfo: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "cache_info": ("STRING", {"multiline": True}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = () + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Backend" + + OUTPUT_NODE = True + + def doit(self, cache_info, unique_id): + global cache + + text1 = "---- [String Key Caches] ----\n" + text2 = "---- [Number Key Caches] ----\n" + for k, v in cache.items(): + if v[0] == '': + tag = 'N/A(tag)' + else: + tag = v[0] + + if isinstance(k, str): + text1 += f'{k}: {tag}\n' + else: + text2 += f'{k}: {tag}\n' + + text = text1 + "\n" + text2 + PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": unique_id, "widget_name": "cache_info", "type": "text", "data": text}) + + return () + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + +NODE_CLASS_MAPPINGS = { + "CacheBackendData //Inspire": CacheBackendData, + "CacheBackendDataNumberKey //Inspire": CacheBackendDataNumberKey, + "CacheBackendDataList //Inspire": CacheBackendDataList, + "CacheBackendDataNumberKeyList //Inspire": CacheBackendDataNumberKeyList, + "RetrieveBackendData //Inspire": RetrieveBackendData, + "RetrieveBackendDataNumberKey //Inspire": RetrieveBackendDataNumberKey, + "RemoveBackendData //Inspire": RemoveBackendData, + "RemoveBackendDataNumberKey //Inspire": RemoveBackendDataNumberKey, + "ShowCachedInfo //Inspire": ShowCachedInfo, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "CacheBackendData //Inspire": "Cache Backend Data (Inspire)", + "CacheBackendDataNumberKey //Inspire": "Cache Backend Data [NumberKey] (Inspire)", + "CacheBackendDataList //Inspire": "Cache Backend Data List (Inspire)", + "CacheBackendDataNumberKeyList //Inspire": "Cache Backend Data List [NumberKey] (Inspire)", + "RetrieveBackendData //Inspire": "Retrieve Backend Data (Inspire)", + "RetrieveBackendDataNumberKey //Inspire": "Retrieve Backend Data [NumberKey] (Inspire)", + "RemoveBackendData //Inspire": "Remove Backend Data (Inspire)", + "RemoveBackendDataNumberKey //Inspire": "Remove Backend Data [NumberKey] (Inspire)", + "ShowCachedInfo //Inspire": "Show Cached Info (Inspire)", +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/image_util.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/image_util.py new file mode 100644 index 0000000000000000000000000000000000000000..82c39e22ce120e331963a84262d955984c541783 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/image_util.py @@ -0,0 +1,224 @@ +import os +from PIL import Image +from PIL import ImageOps +import numpy as np +import torch +import comfy +import folder_paths +import base64 +from io import BytesIO + +class LoadImagesFromDirBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"default": ""}), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT") + FUNCTION = "load_images" + + CATEGORY = "image" + + def load_images(self, directory: str, image_load_cap: int = 0, start_index: int = 0): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory} cannot be found.'") + dir_files = os.listdir(directory) + if len(dir_files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + + # Filter files by extension + valid_extensions = ['.jpg', '.jpeg', '.png', '.webp'] + dir_files = [f for f in dir_files if any(f.lower().endswith(ext) for ext in valid_extensions)] + + dir_files = sorted(dir_files) + dir_files = [os.path.join(directory, x) for x in dir_files] + + # start at start_index + dir_files = dir_files[start_index:] + + images = [] + masks = [] + + limit_images = False + if image_load_cap > 0: + limit_images = True + image_count = 0 + + for image_path in dir_files: + if os.path.isdir(image_path) and os.path.ex: + continue + if limit_images and image_count >= image_load_cap: + break + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + images.append(image) + masks.append(mask) + image_count += 1 + + if len(images) == 1: + return (images[0], 1) + elif len(images) > 1: + image1 = images[0] + for image2 in images[1:]: + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1) + image1 = torch.cat((image1, image2), dim=0) + return (image1, len(images)) + + +class LoadImagesFromDirList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "directory": ("STRING", {"default": ""}), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + OUTPUT_IS_LIST = (True, True) + + FUNCTION = "load_images" + + CATEGORY = "image" + + def load_images(self, directory: str, image_load_cap: int = 0, start_index: int = 0): + if not os.path.isdir(directory): + raise FileNotFoundError(f"Directory '{directory} cannot be found.'") + dir_files = os.listdir(directory) + if len(dir_files) == 0: + raise FileNotFoundError(f"No files in directory '{directory}'.") + + # Filter files by extension + valid_extensions = ['.jpg', '.jpeg', '.png', '.webp'] + dir_files = [f for f in dir_files if any(f.lower().endswith(ext) for ext in valid_extensions)] + + dir_files = sorted(dir_files) + dir_files = [os.path.join(directory, x) for x in dir_files] + + # start at start_index + dir_files = dir_files[start_index:] + + images = [] + masks = [] + + limit_images = False + if image_load_cap > 0: + limit_images = True + image_count = 0 + + for image_path in dir_files: + if os.path.isdir(image_path) and os.path.ex: + continue + if limit_images and image_count >= image_load_cap: + break + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + images.append(image) + masks.append(mask) + image_count += 1 + + return images, masks + + +class LoadImageInspire: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": (sorted(files) + ["#DATA"], {"image_upload": True}), + "image_data": ("STRING", {"multiline": False}), + } + } + + CATEGORY = "InspirePack/image" + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "load_image" + + def load_image(self, image, image_data): + image_data = base64.b64decode(image_data.split(",")[1]) + i = Image.open(BytesIO(image_data)) + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + return (image, mask.unsqueeze(0)) + + +class ChangeImageBatchSize: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": ("IMAGE",), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}), + "mode": (["simple"],) + } + } + + CATEGORY = "InspirePack/image" + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "load_image" + + def load_image(self, image, batch_size, mode): + if mode == "simple": + if len(image) < batch_size: + last_frame = image[-1].unsqueeze(0).expand(batch_size - len(image), -1, -1, -1) + image = torch.concat((image, last_frame), dim=0) + else: + image = image[:batch_size, :, :, :] + return (image,) + else: + print(f"[WARN] ChangeImageBatchSize: Unknown mode `{mode}` - ignored") + return (image, ) + + +NODE_CLASS_MAPPINGS = { + "LoadImagesFromDir //Inspire": LoadImagesFromDirBatch, + "LoadImageListFromDir //Inspire": LoadImagesFromDirList, + "LoadImage //Inspire": LoadImageInspire, + "ChangeImageBatchSize //Inspire": ChangeImageBatchSize, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "LoadImagesFromDir //Inspire": "Load Image Batch From Dir (Inspire)", + "LoadImageListFromDir //Inspire": "Load Image List From Dir (Inspire)", + "ChangeImageBatchSize //Inspire": "Change Image Batch Size (Inspire)" +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/inspire_server.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/inspire_server.py new file mode 100644 index 0000000000000000000000000000000000000000..b6b931414b87be9bd0d43d091274a5e9a44cb94c --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/inspire_server.py @@ -0,0 +1,232 @@ +import random + +import nodes +import server +from enum import Enum +from . import prompt_support +from aiohttp import web + + +@server.PromptServer.instance.routes.get("/inspire/prompt_builder") +def prompt_builder(request): + result = {"presets": []} + + if "category" in request.rel_url.query: + category = request.rel_url.query["category"] + if category in prompt_support.prompt_builder_preset: + result['presets'] = prompt_support.prompt_builder_preset[category] + + return web.json_response(result) + + +class SGmode(Enum): + FIX = 1 + INCR = 2 + DECR = 3 + RAND = 4 + + +class SeedGenerator: + def __init__(self, base_value, action): + self.base_value = base_value + + if action == "fixed" or action == "increment" or action == "decrement" or action == "randomize": + self.action = SGmode.FIX + elif action == 'increment for each node': + self.action = SGmode.INCR + elif action == 'decrement for each node': + self.action = SGmode.DECR + elif action == 'randomize for each node': + self.action = SGmode.RAND + + def next(self): + seed = self.base_value + + if self.action == SGmode.INCR: + self.base_value += 1 + if self.base_value > 1125899906842624: + self.base_value = 0 + elif self.action == SGmode.DECR: + self.base_value -= 1 + if self.base_value < 0: + self.base_value = 1125899906842624 + elif self.action == SGmode.RAND: + self.base_value = random.randint(0, 1125899906842624) + + return seed + + +def control_seed(v): + action = v['inputs']['action'] + value = v['inputs']['value'] + + if action == 'increment' or action == 'increment for each node': + value += 1 + if value > 1125899906842624: + value = 0 + elif action == 'decrement' or action == 'decrement for each node': + value -= 1 + if value < 0: + value = 1125899906842624 + elif action == 'randomize' or action == 'randomize for each node': + value = random.randint(0, 1125899906842624) + + v['inputs']['value'] = value + + return value + + +def prompt_seed_update(json_data): + try: + seed_widget_map = json_data['extra_data']['extra_pnginfo']['workflow']['seed_widgets'] + except: + return None + + seed_widget_map = json_data['extra_data']['extra_pnginfo']['workflow']['seed_widgets'] + value = None + mode = None + node = None + action = None + + for k, v in json_data['prompt'].items(): + if 'class_type' not in v: + continue + + cls = v['class_type'] + if cls == 'GlobalSeed //Inspire': + mode = v['inputs']['mode'] + action = v['inputs']['action'] + value = v['inputs']['value'] + node = k, v + + # control before generated + if mode is not None and mode: + value = control_seed(node[1]) + + if value is not None: + seed_generator = SeedGenerator(value, action) + + for k, v in json_data['prompt'].items(): + for k2, v2 in v['inputs'].items(): + if isinstance(v2, str) and '$GlobalSeed.value$' in v2: + v['inputs'][k2] = v2.replace('$GlobalSeed.value$', str(value)) + + if k not in seed_widget_map: + continue + + if 'seed' in v['inputs']: + if isinstance(v['inputs']['seed'], int): + v['inputs']['seed'] = seed_generator.next() + + if 'noise_seed' in v['inputs']: + if isinstance(v['inputs']['noise_seed'], int): + v['inputs']['noise_seed'] = seed_generator.next() + + for k2, v2 in v['inputs'].items(): + if isinstance(v2, str) and '$GlobalSeed.value$' in v2: + v['inputs'][k2] = v2.replace('$GlobalSeed.value$', str(value)) + + # control after generated + if mode is not None and not mode: + control_seed(node[1]) + + return value is not None + + +def workflow_seed_update(json_data): + nodes = json_data['extra_data']['extra_pnginfo']['workflow']['nodes'] + seed_widget_map = json_data['extra_data']['extra_pnginfo']['workflow']['seed_widgets'] + prompt = json_data['prompt'] + + updated_seed_map = {} + value = None + for node in nodes: + node_id = str(node['id']) + if node_id in prompt: + if node['type'] == 'GlobalSeed //Inspire': + value = prompt[node_id]['inputs']['value'] + node['widgets_values'][0] = value + elif node_id in seed_widget_map: + widget_idx = seed_widget_map[node_id] + + if 'noise_seed' in prompt[node_id]['inputs']: + seed = prompt[node_id]['inputs']['noise_seed'] + else: + seed = prompt[node_id]['inputs']['seed'] + + node['widgets_values'][widget_idx] = seed + updated_seed_map[node_id] = seed + + server.PromptServer.instance.send_sync("inspire-global-seed", {"id": node_id, "value": value, "seed_map": updated_seed_map}) + + +def workflow_loadimage_update(json_data): + prompt = json_data['prompt'] + + for v in prompt.values(): + if 'class_type' in v and v['class_type'] == 'LoadImage //Inspire': + v['inputs']['image'] = "#DATA" + + +def populate_wildcards(json_data): + prompt = json_data['prompt'] + + if 'ImpactWildcardProcessor' in nodes.NODE_CLASS_MAPPINGS: + if not hasattr(nodes.NODE_CLASS_MAPPINGS['ImpactWildcardProcessor'], 'process'): + print(f"[Inspire Pack] Your Impact Pack is outdated. Please update to the latest version.") + return + + wildcard_process = nodes.NODE_CLASS_MAPPINGS['ImpactWildcardProcessor'].process + updated_widget_values = {} + for k, v in prompt.items(): + if 'class_type' in v and v['class_type'] == 'WildcardEncode //Inspire': + inputs = v['inputs'] + if inputs['mode'] and isinstance(inputs['populated_text'], str): + if isinstance(inputs['seed'], list): + try: + input_node = prompt[inputs['seed'][0]] + if input_node['class_type'] == 'ImpactInt': + input_seed = int(input_node['inputs']['value']) + if not isinstance(input_seed, int): + continue + else: + print( + f"[Impact Pack] Only ImpactInt and Primitive Node are allowed as the seed for '{v['class_type']}'. It will be ignored. ") + continue + except: + continue + else: + input_seed = int(inputs['seed']) + + inputs['populated_text'] = wildcard_process(text=inputs['wildcard_text'], seed=input_seed) + inputs['mode'] = False + + server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": k, "widget_name": "populated_text", "type": "text", "data": inputs['populated_text']}) + updated_widget_values[k] = inputs['populated_text'] + + if 'extra_data' in json_data and 'extra_pnginfo' in json_data['extra_data']: + for node in json_data['extra_data']['extra_pnginfo']['workflow']['nodes']: + key = str(node['id']) + if key in updated_widget_values: + node['widgets_values'][3] = updated_widget_values[key] + node['widgets_values'][4] = False + + +def onprompt(json_data): + prompt_support.list_counter_map = {} + + is_changed = prompt_seed_update(json_data) + if is_changed: + workflow_seed_update(json_data) + + workflow_loadimage_update(json_data) + populate_wildcards(json_data) + + return json_data + + +server.PromptServer.instance.add_on_prompt_handler(onprompt) + + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/libs/__pycache__/utils.cpython-311.pyc b/custom_nodes/ComfyUI-Inspire-Pack/inspire/libs/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d67a5a57c2929eab46d95844d7a270f2773bea03 Binary files /dev/null and b/custom_nodes/ComfyUI-Inspire-Pack/inspire/libs/__pycache__/utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/libs/utils.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/libs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..84600e0612796d0f7982f8570ea3b00592623637 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/libs/utils.py @@ -0,0 +1,160 @@ +from PIL import Image, ImageDraw, ImageFilter +import torch +import numpy as np +from . import utils + + +def apply_variation_noise(latent_image, noise_device, variation_seed, variation_strength, mask=None): + latent_size = latent_image.size() + latent_size_1batch = [1, latent_size[1], latent_size[2], latent_size[3]] + + if noise_device == "cpu": + variation_generator = torch.manual_seed(variation_seed) + else: + torch.cuda.manual_seed(variation_seed) + variation_generator = None + + variation_latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout, + generator=variation_generator, device=noise_device) + + variation_noise = variation_latent.expand(latent_image.size()[0], -1, -1, -1) + + if variation_strength == 0: + return latent_image + elif mask is None: + result = (1 - variation_strength) * latent_image + variation_strength * variation_noise + else: + # this seems precision is not enough when variation_strength is 0.0 + result = (mask == 1).float() * ((1 - variation_strength) * latent_image + variation_strength * variation_noise * mask) + (mask == 0).float() * latent_image + + return result + + +def prepare_noise(latent_image, seed, noise_inds=None, noise_device="cpu", incremental_seed_mode="comfy", variation_seed=None, variation_strength=None): + """ + creates random noise given a latent image and a seed. + optional arg skip can be used to skip and discard x number of noise generations for a given seed + """ + + latent_size = latent_image.size() + latent_size_1batch = [1, latent_size[1], latent_size[2], latent_size[3]] + + if variation_strength is not None and variation_strength > 0 or incremental_seed_mode.startswith("variation str inc"): + if noise_device == "cpu": + variation_generator = torch.manual_seed(variation_seed) + else: + torch.cuda.manual_seed(variation_seed) + variation_generator = None + + variation_latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout, + generator=variation_generator, device=noise_device) + else: + variation_latent = None + + def apply_variation(input_latent, strength_up=None): + if variation_latent is None: + return input_latent + else: + strength = variation_strength + + if strength_up is not None: + strength += strength_up + + variation_noise = variation_latent.expand(input_latent.size()[0], -1, -1, -1) + result = (1 - strength) * input_latent + strength * variation_noise + return result + + # method: incremental seed batch noise + if noise_inds is None and incremental_seed_mode == "incremental": + batch_cnt = latent_size[0] + + latents = None + for i in range(batch_cnt): + if noise_device == "cpu": + generator = torch.manual_seed(seed+i) + else: + torch.cuda.manual_seed(seed+i) + generator = None + + latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout, + generator=generator, device=noise_device) + + latent = apply_variation(latent) + + if latents is None: + latents = latent + else: + latents = torch.cat((latents, latent), dim=0) + + return latents + + # method: incremental variation batch noise + elif noise_inds is None and incremental_seed_mode.startswith("variation str inc"): + batch_cnt = latent_size[0] + + latents = None + for i in range(batch_cnt): + if noise_device == "cpu": + generator = torch.manual_seed(seed) + else: + torch.cuda.manual_seed(seed) + generator = None + + latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout, + generator=generator, device=noise_device) + + step = float(incremental_seed_mode[18:]) + latent = apply_variation(latent, step*i) + + if latents is None: + latents = latent + else: + latents = torch.cat((latents, latent), dim=0) + + return latents + + # method: comfy batch noise + if noise_device == "cpu": + generator = torch.manual_seed(seed) + else: + torch.cuda.manual_seed(seed) + generator = None + + if noise_inds is None: + latents = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, + generator=generator, device=noise_device) + latents = apply_variation(latents) + return latents + + unique_inds, inverse = np.unique(noise_inds, return_inverse=True) + noises = [] + for i in range(unique_inds[-1] + 1): + noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, + generator=generator, device=noise_device) + if i in unique_inds: + noises.append(noise) + noises = [noises[i] for i in inverse] + noises = torch.cat(noises, axis=0) + return noises + + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + +def empty_pil_tensor(w=64, h=64): + image = Image.new("RGB", (w, h)) + draw = ImageDraw.Draw(image) + draw.rectangle((0, 0, w-1, h-1), fill=(0, 0, 0)) + return pil2tensor(image) + + +def empty_latent(): + return torch.zeros([1, 4, 8, 8]) + +# wildcard trick is taken from pythongossss's +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + +any_typ = AnyType("*") diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/list_nodes.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/list_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..c22952e05cd9665748717e55a4a596048ad4b888 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/list_nodes.py @@ -0,0 +1,48 @@ +class FloatRange: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "start": ("FLOAT", {"default": 0.0, "min": -100.0, "max": 100.0, 'step': 0.000000001}), + "stop": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, 'step': 0.000000001}), + "step": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 100.0, 'step': 0.000000001}), + "limit": ("INT", {"default": 100, "min": 2, "max": 4096, "step": 1}), + "ensure_end": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + } + } + + RETURN_TYPES = ("FLOAT",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/util" + + def doit(self, start, stop, step, limit, ensure_end): + if start >= stop or step == 0: + return ([start], ) + + res = [] + x = start + last = x + while x <= stop and limit > 0: + res.append(x) + last = x + limit -= 1 + x += step + + if ensure_end and last != stop: + if len(res) >= limit: + res.pop() + + res.append(stop) + + return (res, ) + + +NODE_CLASS_MAPPINGS = { + "FloatRange //Inspire": FloatRange, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "FloatRange //Inspire": "Float Range (Inspire)" +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/lora_block_weight.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/lora_block_weight.py new file mode 100644 index 0000000000000000000000000000000000000000..ad3a48034e215b01126862482cd99d13bd35f51c --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/lora_block_weight.py @@ -0,0 +1,639 @@ +import folder_paths +import comfy.utils +import comfy.lora +import os +import torch +import numpy as np +import nodes +import re + +from server import PromptServer +from .libs import utils + + +def is_numeric_string(input_str): + return re.match(r'^-?\d+(\.\d+)?$', input_str) is not None + + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + +def load_lbw_preset(filename): + path = os.path.join(os.path.dirname(__file__), "..", "resources", filename) + path = os.path.abspath(path) + preset_list = [] + + if os.path.exists(path): + with open(path, 'r') as file: + for line in file: + preset_list.append(line.strip()) + + return preset_list + else: + return [] + + +class LoraLoaderBlockWeight: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + preset = ["Preset"] # 20 + preset += load_lbw_preset("lbw-preset.txt") + preset += load_lbw_preset("lbw-preset.custom.txt") + preset = [name for name in preset if not name.startswith('@')] + + lora_names = folder_paths.get_filename_list("loras") + lora_dirs = [os.path.dirname(name) for name in lora_names] + lora_dirs = ["All"] + list(set(lora_dirs)) + + return {"required": {"model": ("MODEL",), + "clip": ("CLIP", ), + "category_filter": (lora_dirs,), + "lora_name": (lora_names, ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "inverse": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "A": ("FLOAT", {"default": 4.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "B": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "preset": (preset,), + "block_vector": ("STRING", {"multiline": True, "placeholder": "block weight vectors", "default": "1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1", "pysssss.autocomplete": False}), + "bypass": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + } + } + + RETURN_TYPES = ("MODEL", "CLIP", "STRING") + RETURN_NAMES = ("model", "clip", "populated_vector") + FUNCTION = "doit" + + CATEGORY = "InspirePack/LoraBlockWeight" + + @staticmethod + def validate(vectors): + if len(vectors) < 12: + return False + + for x in vectors: + if x in ['R', 'r', 'U', 'u', 'A', 'a', 'B', 'b'] or is_numeric_string(x): + continue + else: + subvectors = x.strip().split(' ') + for y in subvectors: + y = y.strip() + if y not in ['R', 'r', 'U', 'u', 'A', 'a', 'B', 'b'] and not is_numeric_string(y): + return False + + return True + + @staticmethod + def convert_vector_value(A, B, vector_value): + def simple_vector(x): + if x in ['U', 'u']: + ratio = np.random.uniform(-1.5, 1.5) + ratio = round(ratio, 2) + elif x in ['R', 'r']: + ratio = np.random.uniform(0, 3.0) + ratio = round(ratio, 2) + elif x == 'A': + ratio = A + elif x == 'a': + ratio = A/2 + elif x == 'B': + ratio = B + elif x == 'b': + ratio = B/2 + elif is_numeric_string(x): + ratio = float(x) + else: + ratio = None + + return ratio + + v = simple_vector(vector_value) + if v is not None: + ratios = [v] + else: + ratios = [simple_vector(x) for x in vector_value.split(" ")] + + return ratios + + @staticmethod + def norm_value(value): # make to int if 1.0 or 0.0 + if value == 1: + return 1 + elif value == 0: + return 0 + else: + return value + + @staticmethod + def load_lora_for_models(model, clip, lora, strength_model, strength_clip, inverse, seed, A, B, block_vector): + key_map = comfy.lora.model_lora_keys_unet(model.model) + key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + loaded = comfy.lora.load_lora(lora, key_map) + + block_vector = block_vector.split(":") + if len(block_vector) > 1: + block_vector = block_vector[1] + else: + block_vector = block_vector[0] + + vector = block_vector.split(",") + vector_i = 1 + + if not LoraLoaderBlockWeight.validate(vector): + preset_dict = load_preset_dict() + if len(vector) > 0 and vector[0].strip() in preset_dict: + vector = preset_dict[vector[0].strip()].split(",") + else: + raise ValueError(f"[LoraLoaderBlockWeight] invalid block_vector '{block_vector}'") + + last_k_unet_num = None + new_modelpatcher = model.clone() + populated_ratio = strength_model + + def parse_unet_num(s): + if s[1] == '.': + return int(s[0]) + else: + return int(s) + + # sort: input, middle, output, others + input_blocks = [] + middle_blocks = [] + output_blocks = [] + others = [] + for k, v in loaded.items(): + k_unet = k[len("diffusion_model."):] + + if k_unet.startswith("input_blocks."): + k_unet_num = k_unet[len("input_blocks."):len("input_blocks.")+2] + input_blocks.append((k, v, parse_unet_num(k_unet_num), k_unet)) + elif k_unet.startswith("middle_block."): + k_unet_num = k_unet[len("middle_block."):len("middle_block.")+2] + middle_blocks.append((k, v, parse_unet_num(k_unet_num), k_unet)) + elif k_unet.startswith("output_blocks."): + k_unet_num = k_unet[len("output_blocks."):len("output_blocks.")+2] + output_blocks.append((k, v, parse_unet_num(k_unet_num), k_unet)) + else: + others.append((k, v, k_unet)) + + input_blocks = sorted(input_blocks, key=lambda x: x[2]) + middle_blocks = sorted(middle_blocks, key=lambda x: x[2]) + output_blocks = sorted(output_blocks, key=lambda x: x[2]) + + # prepare patch + np.random.seed(seed % (2**31)) + populated_vector_list = [] + ratios = [] + for k, v, k_unet_num, k_unet in (input_blocks + middle_blocks + output_blocks): + if last_k_unet_num != k_unet_num and len(vector) > vector_i: + ratios = LoraLoaderBlockWeight.convert_vector_value(A, B, vector[vector_i].strip()) + ratio = ratios.pop(0) + + if inverse: + populated_ratio = 1 - ratio + else: + populated_ratio = ratio + + populated_vector_list.append(LoraLoaderBlockWeight.norm_value(populated_ratio)) + + vector_i += 1 + else: + if len(ratios) > 0: + ratio = ratios.pop(0) + + if inverse: + populated_ratio = 1 - ratio + else: + populated_ratio = ratio + + last_k_unet_num = k_unet_num + + new_modelpatcher.add_patches({k: v}, strength_model * populated_ratio) + # if inverse: + # print(f"\t{k_unet} -> inv({ratio}) ") + # else: + # print(f"\t{k_unet} -> ({ratio}) ") + + # prepare base patch + ratios = LoraLoaderBlockWeight.convert_vector_value(A, B, vector[0].strip()) + ratio = ratios.pop(0) + + if inverse: + populated_ratio = 1 - ratio + + populated_vector_list.insert(0, LoraLoaderBlockWeight.norm_value(populated_ratio)) + + for k, v, k_unet in others: + new_modelpatcher.add_patches({k: v}, strength_model * populated_ratio) + # if inverse: + # print(f"\t{k_unet} -> inv({ratio}) ") + # else: + # print(f"\t{k_unet} -> ({ratio}) ") + + new_clip = clip.clone() + new_clip.add_patches(loaded, strength_clip) + populated_vector = ','.join(map(str, populated_vector_list)) + return (new_modelpatcher, new_clip, populated_vector) + + def doit(self, model, clip, lora_name, strength_model, strength_clip, inverse, seed, A, B, preset, block_vector, bypass=False, category_filter=None): + if strength_model == 0 and strength_clip == 0 or bypass: + return (model, clip, "") + + lora_path = folder_paths.get_full_path("loras", lora_name) + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + temp = self.loaded_lora + self.loaded_lora = None + del temp + + if lora is None: + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_lora, clip_lora, populated_vector = LoraLoaderBlockWeight.load_lora_for_models(model, clip, lora, strength_model, strength_clip, inverse, seed, A, B, block_vector) + return (model_lora, clip_lora, populated_vector) + + +class XY_Capsule_LoraBlockWeight: + def __init__(self, x, y, target_vector, label, storage, params): + self.x = x + self.y = y + self.target_vector = target_vector + self.reference_vector = None + self.label = label + self.storage = storage + self.another_capsule = None + self.params = params + + def set_reference_vector(self, vector): + self.reference_vector = vector + + def set_x_capsule(self, capsule): + self.another_capsule = capsule + + def set_result(self, image, latent): + if self.another_capsule is not None: + print(f"XY_Capsule_LoraBlockWeight: ({self.another_capsule.x, self.y}) is processed.") + self.storage[(self.another_capsule.x, self.y)] = image + else: + print(f"XY_Capsule_LoraBlockWeight: ({self.x, self.y}) is processed.") + + def patch_model(self, model, clip): + lora_name, strength_model, strength_clip, inverse, block_vectors, seed, A, B, heatmap_palette, heatmap_alpha, heatmap_strength, xyplot_mode = self.params + + try: + if self.y == 0: + target_vector = self.another_capsule.target_vector if self.another_capsule else self.target_vector + model, clip, _ = LoraLoaderBlockWeight().doit(model, clip, lora_name, strength_model, strength_clip, inverse, + seed, A, B, "", target_vector) + elif self.y == 1: + reference_vector = self.another_capsule.reference_vector if self.another_capsule else self.reference_vector + model, clip, _ = LoraLoaderBlockWeight().doit(model, clip, lora_name, strength_model, strength_clip, inverse, + seed, A, B, "", reference_vector) + except: + self.storage[(self.another_capsule.x, self.y)] = "fail" + pass + + return model, clip + + def pre_define_model(self, model, clip, vae): + if self.y < 2: + model, clip = self.patch_model(model, clip) + + return model, clip, vae + + def get_result(self, model, clip, vae): + _, _, _, _, _, _, _, _, heatmap_palette, heatmap_alpha, heatmap_strength, xyplot_mode = self.params + + if self.y < 2: + return None + + if self.y == 2: + # diff + weighted_image = self.storage[(self.another_capsule.x, 0)] + reference_image = self.storage[(self.another_capsule.x, 1)] + + if weighted_image == "fail" or reference_image == "fail": + image = "fail" + else: + image = torch.abs(weighted_image - reference_image) + self.storage[(self.another_capsule.x, self.y)] = image + elif self.y == 3: + import matplotlib.cm as cm + # heatmap + image = self.storage[(self.another_capsule.x, 0)] + + if image == "fail": + image = utils.empty_pil_tensor(8,8) + latent = utils.empty_latent() + return (image, latent) + else: + image = image.clone() + + diff_image = torch.abs(self.storage[(self.another_capsule.x, 2)]) + + heatmap = torch.sum(diff_image, dim=3, keepdim=True) + + min_val = torch.min(heatmap) + max_val = torch.max(heatmap) + heatmap = (heatmap - min_val) / (max_val - min_val) + heatmap *= heatmap_strength + + # viridis / magma / plasma / inferno / cividis + if heatmap_palette == "magma": + colormap = cm.magma + elif heatmap_palette == "plasma": + colormap = cm.plasma + elif heatmap_palette == "inferno": + colormap = cm.inferno + elif heatmap_palette == "cividis": + colormap = cm.cividis + else: + # default: viridis + colormap = cm.viridis + + heatmap = torch.from_numpy(colormap(heatmap.squeeze())).unsqueeze(0) + heatmap = heatmap[..., :3] + + image = heatmap_alpha * heatmap + (1 - heatmap_alpha) * image + + latent = nodes.VAEEncode().encode(vae, image)[0] + return (image, latent) + + def getLabel(self): + return self.label + + +def load_preset_dict(): + preset = ["Preset"] # 20 + preset += load_lbw_preset("lbw-preset.txt") + preset += load_lbw_preset("lbw-preset.custom.txt") + + dict = {} + for x in preset: + if not x.startswith('@'): + item = x.split(':') + if len(item) > 1: + dict[item[0]] = item[1] + + return dict + + +class XYInput_LoraBlockWeight: + @staticmethod + def resolve_vector_string(vector_string, preset_dict): + vector_string = vector_string.strip() + + if vector_string in preset_dict: + return vector_string, preset_dict[vector_string] + + vector_infos = vector_string.split(':') + + if len(vector_infos) > 1: + return vector_infos[0], vector_infos[1] + elif len(vector_infos) > 0: + return vector_infos[0], vector_infos[0] + else: + return None, None + + @classmethod + def INPUT_TYPES(cls): + preset = ["Preset"] # 20 + preset += load_lbw_preset("lbw-preset.txt") + preset += load_lbw_preset("lbw-preset.custom.txt") + + default_vectors = "SD-NONE/SD-ALL\nSD-ALL/SD-ALL\nSD-INS/SD-ALL\nSD-IND/SD-ALL\nSD-INALL/SD-ALL\nSD-MIDD/SD-ALL\nSD-MIDD0.2/SD-ALL\nSD-MIDD0.8/SD-ALL\nSD-MOUT/SD-ALL\nSD-OUTD/SD-ALL\nSD-OUTS/SD-ALL\nSD-OUTALL/SD-ALL" + + lora_names = folder_paths.get_filename_list("loras") + lora_dirs = [os.path.dirname(name) for name in lora_names] + lora_dirs = ["All"] + list(set(lora_dirs)) + + return {"required": { + "category_filter": (lora_dirs, ), + "lora_name": (lora_names, ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "inverse": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "A": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "B": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "preset": (preset,), + "block_vectors": ("STRING", {"multiline": True, "default": default_vectors, "placeholder": "{target vector}/{reference vector}", "pysssss.autocomplete": False}), + "heatmap_palette": (["viridis", "magma", "plasma", "inferno", "cividis"], ), + "heatmap_alpha": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), + "heatmap_strength": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 10.0, "step": 0.01}), + "xyplot_mode": (["Simple", "Diff", "Diff+Heatmap"],), + }} + + RETURN_TYPES = ("XY", "XY") + RETURN_NAMES = ("X (vectors)", "Y (effect_compares)") + + FUNCTION = "doit" + CATEGORY = "InspirePack/LoraBlockWeight" + + def doit(self, lora_name, strength_model, strength_clip, inverse, seed, A, B, preset, block_vectors, heatmap_palette, heatmap_alpha, heatmap_strength, xyplot_mode, category_filter=None): + xy_type = "XY_Capsule" + + preset_dict = load_preset_dict() + common_params = lora_name, strength_model, strength_clip, inverse, block_vectors, seed, A, B, heatmap_palette, heatmap_alpha, heatmap_strength, xyplot_mode + + storage = {} + x_values = [] + x_idx = 0 + for block_vector in block_vectors.split("\n"): + if block_vector == "": + continue + + item = block_vector.split('/') + + if len(item) > 0: + target_vector = item[0].strip() + ref_vector = item[1].strip() if len(item) > 1 else '' + + x_item = None + label, block_vector = XYInput_LoraBlockWeight.resolve_vector_string(target_vector, preset_dict) + _, ref_block_vector = XYInput_LoraBlockWeight.resolve_vector_string(ref_vector, preset_dict) + if label is not None: + x_item = XY_Capsule_LoraBlockWeight(x_idx, 0, block_vector, label, storage, common_params) + x_idx += 1 + + if x_item is not None and ref_block_vector is not None: + x_item.set_reference_vector(ref_block_vector) + + if x_item is not None: + x_values.append(x_item) + + if xyplot_mode == "Simple": + y_values = [XY_Capsule_LoraBlockWeight(0, 0, '', 'target', storage, common_params)] + elif xyplot_mode == "Diff": + y_values = [XY_Capsule_LoraBlockWeight(0, 0, '', 'target', storage, common_params), + XY_Capsule_LoraBlockWeight(0, 1, '', 'reference', storage, common_params), + XY_Capsule_LoraBlockWeight(0, 2, '', 'diff', storage, common_params)] + else: + y_values = [XY_Capsule_LoraBlockWeight(0, 0, '', 'target', storage, common_params), + XY_Capsule_LoraBlockWeight(0, 1, '', 'reference', storage, common_params), + XY_Capsule_LoraBlockWeight(0, 2, '', 'diff', storage, common_params), + XY_Capsule_LoraBlockWeight(0, 3, '', 'heatmap', storage, common_params)] + + return ((xy_type, x_values), (xy_type, y_values), ) + + +class LoraBlockInfo: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", ), + "clip": ("CLIP", ), + "lora_name": (folder_paths.get_filename_list("loras"), ), + "block_info": ("STRING", {"multiline": True}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + CATEGORY = "InspirePack/LoraBlockWeight" + + OUTPUT_NODE = True + + RETURN_TYPES = () + FUNCTION = "doit" + + @staticmethod + def extract_info(model, clip, lora): + key_map = comfy.lora.model_lora_keys_unet(model.model) + key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + loaded = comfy.lora.load_lora(lora, key_map) + + def parse_unet_num(s): + if s[1] == '.': + return int(s[0]) + else: + return int(s) + + input_block_count = set() + input_blocks = [] + input_blocks_map = {} + + middle_block_count = set() + middle_blocks = [] + middle_blocks_map = {} + + output_block_count = set() + output_blocks = [] + output_blocks_map = {} + + text_block_count = set() + text_blocks = [] + text_blocks_map = {} + + others = [] + for k, v in loaded.items(): + k_unet = k[len("diffusion_model."):] + + if k_unet.startswith("input_blocks."): + k_unet_num = k_unet[len("input_blocks."):len("input_blocks.")+2] + k_unet_int = parse_unet_num(k_unet_num) + + input_block_count.add(k_unet_int) + input_blocks.append(k_unet) + if k_unet_int in input_blocks_map: + input_blocks_map[k_unet_int].append(k_unet) + else: + input_blocks_map[k_unet_int] = [k_unet] + + elif k_unet.startswith("middle_block."): + k_unet_num = k_unet[len("middle_block."):len("middle_block.")+2] + k_unet_int = parse_unet_num(k_unet_num) + + middle_block_count.add(k_unet_int) + middle_blocks.append(k_unet) + if k_unet_int in middle_blocks_map: + middle_blocks_map[k_unet_int].append(k_unet) + else: + middle_blocks_map[k_unet_int] = [k_unet] + + elif k_unet.startswith("output_blocks."): + k_unet_num = k_unet[len("output_blocks."):len("output_blocks.")+2] + k_unet_int = parse_unet_num(k_unet_num) + + output_block_count.add(k_unet_int) + output_blocks.append(k_unet) + if k_unet_int in output_blocks_map: + output_blocks_map[k_unet_int].append(k_unet) + else: + output_blocks_map[k_unet_int] = [k_unet] + + elif k_unet.startswith("_model.encoder.layers."): + k_unet_num = k_unet[len("_model.encoder.layers."):len("_model.encoder.layers.")+2] + k_unet_int = parse_unet_num(k_unet_num) + + text_block_count.add(k_unet_int) + text_blocks.append(k_unet) + if k_unet_int in text_blocks_map: + text_blocks_map[k_unet_int].append(k_unet) + else: + text_blocks_map[k_unet_int] = [k_unet] + + else: + others.append(k_unet) + + text = "" + + input_blocks = sorted(input_blocks) + middle_blocks = sorted(middle_blocks) + output_blocks = sorted(output_blocks) + others = sorted(others) + + text += f"\n-------[Input blocks] ({len(input_block_count)}, Subs={len(input_blocks)})-------\n" + input_keys = sorted(input_blocks_map.keys()) + for x in input_keys: + text += f" IN{x}: {len(input_blocks_map[x])}\n" + + text += f"\n-------[Middle blocks] ({len(middle_block_count)}, Subs={len(middle_blocks)})-------\n" + middle_keys = sorted(middle_blocks_map.keys()) + for x in middle_keys: + text += f" MID{x}: {len(middle_blocks_map[x])}\n" + + text += f"\n-------[Output blocks] ({len(output_block_count)}, Subs={len(output_blocks)})-------\n" + output_keys = sorted(output_blocks_map.keys()) + for x in output_keys: + text += f" OUT{x}: {len(output_blocks_map[x])}\n" + + text += f"\n-------[Text blocks] ({len(text_block_count)}, Subs={len(text_blocks)})-------\n" + text_keys = sorted(text_blocks_map.keys()) + for x in text_keys: + text += f" CLIP{x}: {len(text_blocks_map[x])}\n" + + text += f"\n-------[Base blocks] ({len(others)})-------\n" + for x in others: + text += f" {x}\n" + + return text + + def doit(self, model, clip, lora_name, block_info, unique_id): + lora_path = folder_paths.get_full_path("loras", lora_name) + + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + text = LoraBlockInfo.extract_info(model, clip, lora) + + PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": unique_id, "widget_name": "block_info", "type": "text", "data": text}) + return {} + + +NODE_CLASS_MAPPINGS = { + "XY Input: Lora Block Weight //Inspire": XYInput_LoraBlockWeight, + "LoraLoaderBlockWeight //Inspire": LoraLoaderBlockWeight, + "LoraBlockInfo //Inspire": LoraBlockInfo, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "XY Input: Lora Block Weight //Inspire": "XY Input: Lora Block Weight", + "LoraLoaderBlockWeight //Inspire": "Lora Loader (Block Weight)", + "LoraBlockInfo //Inspire": "Lora Block Info", +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/prompt_support.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/prompt_support.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0de08668b4d1ec619c2ae93413e3220b99dc4b --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/prompt_support.py @@ -0,0 +1,530 @@ +import os +import re +import json +import sys +import shutil +import yaml + +from PIL import Image +import nodes +import torch + +import folder_paths +import comfy +import traceback + +from server import PromptServer +from .libs import utils + +prompt_builder_preset = {} + + +resource_path = os.path.join(os.path.dirname(__file__), "..", "resources") +resource_path = os.path.abspath(resource_path) + +prompts_path = os.path.join(os.path.dirname(__file__), "..", "prompts") +prompts_path = os.path.abspath(prompts_path) + +try: + pb_yaml_path = os.path.join(resource_path, 'prompt-builder.yaml') + pb_yaml_path_example = os.path.join(resource_path, 'prompt-builder.yaml.example') + + if not os.path.exists(pb_yaml_path): + shutil.copy(pb_yaml_path_example, pb_yaml_path) + + with open(pb_yaml_path, 'r', encoding="utf-8") as f: + prompt_builder_preset = yaml.load(f, Loader=yaml.FullLoader) +except Exception as e: + print(f"[Inspire Pack] Failed to load 'prompt-builder.yaml'") + + +class LoadPromptsFromDir: + @classmethod + def INPUT_TYPES(cls): + global prompts_path + try: + prompt_dirs = [d for d in os.listdir(prompts_path) if os.path.isdir(os.path.join(prompts_path, d))] + except Exception: + prompt_dirs = [] + + return {"required": {"prompt_dir": (prompt_dirs,)}} + + RETURN_TYPES = ("ZIPPED_PROMPT",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/prompt" + + def doit(self, prompt_dir): + global prompts_path + prompt_dir = os.path.join(prompts_path, prompt_dir) + files = [f for f in os.listdir(prompt_dir) if f.endswith(".txt")] + files.sort() + + prompts = [] + for file_name in files: + print(f"file_name: {file_name}") + try: + with open(os.path.join(prompt_dir, file_name), "r", encoding="utf-8") as file: + prompt_data = file.read() + prompt_list = re.split(r'\n\s*-+\s*\n', prompt_data) + + for prompt in prompt_list: + pattern = r"positive:(.*?)(?:\n*|$)negative:(.*)" + matches = re.search(pattern, prompt, re.DOTALL) + + if matches: + positive_text = matches.group(1).strip() + negative_text = matches.group(2).strip() + result_tuple = (positive_text, negative_text, file_name) + prompts.append(result_tuple) + else: + print(f"[WARN] LoadPromptsFromDir: invalid prompt format in '{file_name}'") + except Exception as e: + print(f"[ERROR] LoadPromptsFromDir: an error occurred while processing '{file_name}': {str(e)}") + + return (prompts, ) + + +class LoadPromptsFromFile: + @classmethod + def INPUT_TYPES(cls): + global prompts_path + try: + prompt_files = [] + for root, dirs, files in os.walk(prompts_path): + for file in files: + if file.endswith(".txt"): + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, prompts_path) + prompt_files.append(rel_path) + except Exception: + prompt_files = [] + + return {"required": {"prompt_file": (prompt_files,)}} + + RETURN_TYPES = ("ZIPPED_PROMPT",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/prompt" + + def doit(self, prompt_file): + prompt_path = os.path.join(prompts_path, prompt_file) + + prompts = [] + try: + with open(prompt_path, "r", encoding="utf-8") as file: + prompt_data = file.read() + prompt_list = re.split(r'\n\s*-+\s*\n', prompt_data) + + pattern = r"positive:(.*?)(?:\n*|$)negative:(.*)" + + for prompt in prompt_list: + matches = re.search(pattern, prompt, re.DOTALL) + + if matches: + positive_text = matches.group(1).strip() + negative_text = matches.group(2).strip() + result_tuple = (positive_text, negative_text, prompt_file) + prompts.append(result_tuple) + else: + print(f"[WARN] LoadPromptsFromFile: invalid prompt format in '{prompt_file}'") + except Exception as e: + print(f"[ERROR] LoadPromptsFromFile: an error occurred while processing '{prompt_file}': {str(e)}") + + return (prompts, ) + + +class UnzipPrompt: + @classmethod + def INPUT_TYPES(s): + return {"required": {"zipped_prompt": ("ZIPPED_PROMPT",), }} + + RETURN_TYPES = ("STRING", "STRING", "STRING") + RETURN_NAMES = ("positive", "negative", "name") + + FUNCTION = "doit" + + CATEGORY = "InspirePack/prompt" + + def doit(self, zipped_prompt): + return zipped_prompt + + +class ZipPrompt: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "positive": ("STRING", {"forceInput": True, "multiline": True}), + "negative": ("STRING", {"forceInput": True, "multiline": True}), + }, + "optional": { + "name_opt": ("STRING", {"forceInput": True, "multiline": False}) + } + } + + RETURN_TYPES = ("ZIPPED_PROMPT",) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/prompt" + + def doit(self, positive, negative, name_opt=""): + return ((positive, negative, name_opt), ) + + +prompt_blacklist = set([ + 'filename_prefix' +]) + +class PromptExtractor: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": { + "image": (sorted(files), {"image_upload": True}), + "positive_id": ("STRING", {}), + "negative_id": ("STRING", {}), + "info": ("STRING", {"multiline": True}) + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + CATEGORY = "InspirePack/prompt" + + RETURN_TYPES = ("STRING", "STRING") + RETURN_NAMES = ("positive", "negative") + FUNCTION = "doit" + + OUTPUT_NODE = True + + def doit(self, image, positive_id, negative_id, info, unique_id): + image_path = folder_paths.get_annotated_filepath(image) + info = Image.open(image_path).info + + positive = "" + negative = "" + text = "" + prompt_dicts = {} + node_inputs = {} + + def get_node_inputs(x): + if x in node_inputs: + return node_inputs[x] + else: + node_inputs[x] = None + + obj = nodes.NODE_CLASS_MAPPINGS.get(x, None) + if obj is not None: + input_types = obj.INPUT_TYPES() + node_inputs[x] = input_types + return input_types + else: + return None + + if isinstance(info, dict) and 'workflow' in info: + prompt = json.loads(info['prompt']) + for k, v in prompt.items(): + input_types = get_node_inputs(v['class_type']) + if input_types is not None: + inputs = input_types['required'].copy() + if 'optional' in input_types: + inputs.update(input_types['optional']) + + for name, value in inputs.items(): + if name in prompt_blacklist: + continue + + if value[0] == 'STRING' and name in v['inputs']: + prompt_dicts[f"{k}.{name.strip()}"] = (v['class_type'], v['inputs'][name]) + + for k, v in prompt_dicts.items(): + text += f"{k} [{v[0]}] ==> {v[1]}\n" + + positive = prompt_dicts.get(positive_id.strip(), "") + negative = prompt_dicts.get(negative_id.strip(), "") + else: + text = "There is no prompt information within the image." + + PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": unique_id, "widget_name": "info", "type": "text", "data": text}) + return (positive, negative) + + +class GlobalSeed: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("INT", {"default": 0, "min": 0, "max": 1125899906842624}), + "mode": ("BOOLEAN", {"default": True, "label_on": "control_before_generate", "label_off": "control_after_generate"}), + "action": (["fixed", "increment", "decrement", "randomize", + "increment for each node", "decrement for each node", "randomize for each node"], ), + "last_seed": ("STRING", {"default": ""}), + } + } + + RETURN_TYPES = () + FUNCTION = "doit" + + CATEGORY = "InspirePack/Prompt" + + OUTPUT_NODE = True + + def doit(self, **kwargs): + return {} + + +class BindImageListPromptList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "zipped_prompts": ("ZIPPED_PROMPT",), + "default_positive": ("STRING", {"multiline": True, "placeholder": "default positive"}), + "default_negative": ("STRING", {"multiline": True, "placeholder": "default negative"}), + } + } + + INPUT_IS_LIST = True + + RETURN_TYPES = ("IMAGE", "STRING", "STRING", "STRING") + RETURN_NAMES = ("image", "positive", "negative", "prompt_label") + + OUTPUT_IS_LIST = (True, True, True,) + + FUNCTION = "doit" + + CATEGORY = "InspirePack/Prompt" + + def doit(self, images, zipped_prompts, default_positive, default_negative): + positives = [] + negatives = [] + prompt_labels = [] + + if len(images) < len(zipped_prompts): + zipped_prompts = zipped_prompts[:len(images)] + + elif len(images) > len(zipped_prompts): + lack = len(images) - len(zipped_prompts) + default_prompt = (default_positive[0], default_negative[0], "default") + zipped_prompts = zipped_prompts[:] + for i in range(lack): + zipped_prompts.append(default_prompt) + + for prompt in zipped_prompts: + a, b, c = prompt + positives.append(a) + negatives.append(b) + prompt_labels.append(c) + + return (images, positives, negatives, prompt_labels) + + +class BNK_EncoderWrapper: + def __init__(self, token_normalization, weight_interpretation): + self.token_normalization = token_normalization + self.weight_interpretation = weight_interpretation + + def encode(self, clip, text): + if 'BNK_CLIPTextEncodeAdvanced' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use MediaPipeFaceMeshDetector, you need to install 'Advanced CLIP Text Encode'") + return nodes.NODE_CLASS_MAPPINGS['BNK_CLIPTextEncodeAdvanced']().encode(clip, text, self.token_normalization, self.weight_interpretation) + + +class WildcardEncodeInspire: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "clip": ("CLIP",), + "token_normalization": (["none", "mean", "length", "length+mean"], ), + "weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"], {'default': 'comfy++'}), + "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False, 'placeholder': 'Wildcard Prmopt (User Input)'}), + "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False, 'placeholder': 'Populated Prmopt (Will be generated automatically)'}), + "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), + "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"), ), + "Select to add Wildcard": (["Select the Wildcard to add to the text"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + CATEGORY = "InspirePack/Prompt" + + RETURN_TYPES = ("MODEL", "CLIP", "CONDITIONING", "STRING") + RETURN_NAMES = ("model", "clip", "conditioning", "populated_text") + FUNCTION = "doit" + + def doit(self, *args, **kwargs): + populated = kwargs['populated_text'] + + clip_encoder = BNK_EncoderWrapper(kwargs['token_normalization'], kwargs['weight_interpretation']) + + if 'ImpactWildcardEncode' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use WildcardEncodeInspire, you need to install 'Impact Pack'") + + model, clip, conditioning = nodes.NODE_CLASS_MAPPINGS['ImpactWildcardEncode'].process_with_loras(wildcard_opt=populated, model=kwargs['model'], clip=kwargs['clip'], clip_encoder=clip_encoder) + return (model, clip, conditioning, populated) + + +class PromptBuilder: + @classmethod + def INPUT_TYPES(s): + global prompt_builder_preset + + presets = ["#PRESET"] + return {"required": { + "category": (list(prompt_builder_preset.keys()), ), + "preset": (presets, ), + "text": ("STRING", {"multiline": True}), + }, + } + + RETURN_TYPES = ("STRING", ) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Prompt" + + def doit(self, category, preset, text): + return (text,) + + +class SeedExplorer: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latent": ("LATENT",), + "seed_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}), + "enable_additional": ("BOOLEAN", {"default": True, "label_on": "true", "label_off": "false"}), + "additional_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "additional_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + "initial_batch_seed_mode": (["incremental", "comfy"],), + } + } + + RETURN_TYPES = ("NOISE",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Prompt" + + @staticmethod + def apply_variation(start_noise, seed_items, noise_device, mask=None): + noise = start_noise + for x in seed_items: + if isinstance(x, str): + item = x.split(':') + else: + item = x + + if len(item) == 2: + try: + variation_seed = int(item[0]) + variation_strength = float(item[1]) + + noise = utils.apply_variation_noise(noise, noise_device, variation_seed, variation_strength, mask=mask) + except Exception: + print(f"[ERROR] IGNORED: SeedExplorer failed to processing '{x}'") + traceback.print_exc() + return noise + + def doit(self, latent, seed_prompt, enable_additional, additional_seed, additional_strength, noise_mode, + initial_batch_seed_mode): + latent_image = latent["samples"] + device = comfy.model_management.get_torch_device() + noise_device = "cpu" if noise_mode == "CPU" else device + + seed_prompt = seed_prompt.replace("\n", "") + items = seed_prompt.strip().split(",") + + if items == ['']: + items = [] + + if enable_additional: + items.append((additional_seed, additional_strength)) + + try: + hd = items[0] + tl = items[1:] + + if isinstance(hd, tuple): + hd_seed = int(hd[0]) + else: + hd_seed = int(hd) + + noise = utils.prepare_noise(latent_image, hd_seed, None, noise_device, initial_batch_seed_mode) + noise = noise.to(device) + noise = SeedExplorer.apply_variation(noise, tl, noise_device) + noise = noise.cpu() + + return (noise,) + + except Exception: + print(f"[ERROR] IGNORED: SeedExplorer failed") + traceback.print_exc() + + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, + device=noise_device) + return (noise,) + + +list_counter_map = {} + + +class ListCounter: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "signal": (utils.any_typ,), + "base_value": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "hidden": {"unique_id": "UNIQUE_ID"}, + } + + RETURN_TYPES = ("INT",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Util" + + def doit(self, signal, base_value, unique_id): + if unique_id not in list_counter_map: + count = 0 + else: + count = list_counter_map[unique_id] + + list_counter_map[unique_id] = count + 1 + + return (count + base_value, ) + + +NODE_CLASS_MAPPINGS = { + "LoadPromptsFromDir //Inspire": LoadPromptsFromDir, + "LoadPromptsFromFile //Inspire": LoadPromptsFromFile, + "UnzipPrompt //Inspire": UnzipPrompt, + "ZipPrompt //Inspire": ZipPrompt, + "PromptExtractor //Inspire": PromptExtractor, + "GlobalSeed //Inspire": GlobalSeed, + "BindImageListPromptList //Inspire": BindImageListPromptList, + "WildcardEncode //Inspire": WildcardEncodeInspire, + "PromptBuilder //Inspire": PromptBuilder, + "SeedExplorer //Inspire": SeedExplorer, + "ListCounter //Inspire": ListCounter, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "LoadPromptsFromDir //Inspire": "Load Prompts From Dir (Inspire)", + "LoadPromptsFromFile //Inspire": "Load Prompts From File (Inspire)", + "UnzipPrompt //Inspire": "Unzip Prompt (Inspire)", + "ZipPrompt //Inspire": "Zip Prompt (Inspire)", + "PromptExtractor //Inspire": "Prompt Extractor (Inspire)", + "GlobalSeed //Inspire": "Global Seed (Inspire)", + "BindImageListPromptList //Inspire": "Bind [ImageList, PromptList] (Inspire)", + "WildcardEncode //Inspire": "Wildcard Encode (Inspire)", + "PromptBuilder //Inspire": "Prompt Builder (Inspire)", + "SeedExplorer //Inspire": "Seed Explorer (Inspire)", + "ListCounter //Inspire": "List Counter (Inspire)" +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/regional_nodes.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/regional_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..faf5a8f19ae8530463fd4fa396bef9d54ac51e74 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/regional_nodes.py @@ -0,0 +1,487 @@ +import traceback + +import comfy +import nodes +import numpy as np +import torch +import torch.nn.functional as F +from . import prompt_support + +class RegionalPromptSimple: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "basic_pipe": ("BASIC_PIPE",), + "mask": ("MASK",), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "wildcard_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "wildcard prompt"}), + "controlnet_in_pipe": ("BOOLEAN", {"default": False, "label_on": "Keep", "label_off": "Override"}), + }, + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", ) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, basic_pipe, mask, cfg, sampler_name, scheduler, wildcard_prompt, controlnet_in_pipe=False): + if 'RegionalPrompt' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use RegionalPromptSimple, you need to install 'ComfyUI-Impact-Pack'") + + model, clip, vae, positive, negative = basic_pipe + + iwe = nodes.NODE_CLASS_MAPPINGS['ImpactWildcardEncode']() + kap = nodes.NODE_CLASS_MAPPINGS['KSamplerAdvancedProvider']() + rp = nodes.NODE_CLASS_MAPPINGS['RegionalPrompt']() + + if wildcard_prompt != "": + model, clip, new_positive, _ = iwe.doit(model=model, clip=clip, populated_text=wildcard_prompt) + + if controlnet_in_pipe: + prev_cnet = None + for t in positive: + if 'control' in t[1] and 'control_apply_to_uncond' in t[1]: + prev_cnet = t[1]['control'], t[1]['control_apply_to_uncond'] + break + + if prev_cnet is not None: + for t in new_positive: + t[1]['control'] = prev_cnet[0] + t[1]['control_apply_to_uncond'] = prev_cnet[1] + + else: + new_positive = positive + + basic_pipe = model, clip, vae, new_positive, negative + + sampler = kap.doit(cfg, sampler_name, scheduler, basic_pipe)[0] + regional_prompts = rp.doit(mask, sampler)[0] + + return (regional_prompts, ) + + +def color_to_mask(color_mask, mask_color): + try: + if mask_color.startswith("#"): + selected = int(mask_color[1:], 16) + else: + selected = int(mask_color, 10) + except Exception: + raise Exception(f"[ERROR] Invalid mask_color value. mask_color should be a color value for RGB") + + temp = (torch.clamp(color_mask, 0, 1.0) * 255.0).round().to(torch.int) + temp = torch.bitwise_left_shift(temp[:, :, :, 0], 16) + torch.bitwise_left_shift(temp[:, :, :, 1], 8) + temp[:, :, :, 2] + mask = torch.where(temp == selected, 1.0, 0.0) + return mask + + +class RegionalPromptColorMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "basic_pipe": ("BASIC_PIPE",), + "color_mask": ("IMAGE",), + "mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), + "wildcard_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "wildcard prompt"}), + "controlnet_in_pipe": ("BOOLEAN", {"default": False, "label_on": "Keep", "label_off": "Override"}), + }, + } + + RETURN_TYPES = ("REGIONAL_PROMPTS", "MASK") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, basic_pipe, color_mask, mask_color, cfg, sampler_name, scheduler, wildcard_prompt, controlnet_in_pipe=False): + mask = color_to_mask(color_mask, mask_color) + rp = RegionalPromptSimple().doit(basic_pipe, mask, cfg, sampler_name, scheduler, wildcard_prompt, controlnet_in_pipe)[0] + return (rp, mask) + + +class RegionalConditioningSimple: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "clip": ("CLIP", ), + "mask": ("MASK",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + "prompt": ("STRING", {"multiline": True, "placeholder": "prompt"}), + }, + } + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, clip, mask, strength, set_cond_area, prompt): + conditioning = nodes.CLIPTextEncode().encode(clip, prompt)[0] + conditioning = nodes.ConditioningSetMask().append(conditioning, mask, set_cond_area, strength)[0] + return (conditioning, ) + + +class RegionalConditioningColorMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "clip": ("CLIP", ), + "color_mask": ("IMAGE",), + "mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + "prompt": ("STRING", {"multiline": True, "placeholder": "prompt"}), + }, + } + + RETURN_TYPES = ("CONDITIONING", "MASK") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, clip, color_mask, mask_color, strength, set_cond_area, prompt): + mask = color_to_mask(color_mask, mask_color) + + conditioning = nodes.CLIPTextEncode().encode(clip, prompt)[0] + conditioning = nodes.ConditioningSetMask().append(conditioning, mask, set_cond_area, strength)[0] + return (conditioning, mask) + + +class ToIPAdapterPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "ipadapter": ("IPADAPTER", ), + "clip_vision": ("CLIP_VISION",), + "model": ("MODEL", ), + } + } + + RETURN_TYPES = ("IPADAPTER_PIPE",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Util" + + def doit(self, ipadapter, clip_vision, model): + pipe = ipadapter, clip_vision, model + + return (pipe,) + + +class FromIPAdapterPipe: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "ipadapter_pipe": ("IPADAPTER_PIPE", ), + } + } + + RETURN_TYPES = ("IPADAPTER", "CLIP_VISION", "MODEL") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Util" + + def doit(self, ipadapter_pipe): + return ipadapter_pipe + + +class IPAdapterConditioning: + def __init__(self, mask, weight, weight_type, noise=None, image=None, embeds=None): + self.mask = mask + self.image = image + self.embeds = embeds + self.weight = weight + self.noise = noise + self.weight_type = weight_type + + def doit(self, ipadapter, clip_vision, model): + if 'IPAdapterApply' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use Regional IPAdapter, you need to install 'ComfyUI_IPAdapter_plus'") + + obj = nodes.NODE_CLASS_MAPPINGS['IPAdapterApply'] + + if self.image is None: + clip_vision = None + + model = obj().apply_ipadapter(ipadapter, model, self.weight, clip_vision=clip_vision, image=self.image, + embeds=self.embeds, weight_type=self.weight_type, noise=self.noise, + attn_mask=self.mask)[0] + + return model + + +class RegionalIPAdapterMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + + "image": ("IMAGE",), + "weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}), + "noise": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "weight_type": (["original", "linear", "channel penalty"],), + }, + } + + RETURN_TYPES = ("REGIONAL_IPADAPTER", ) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, mask, image, weight, noise, weight_type): + cond = IPAdapterConditioning(mask, weight, weight_type, noise=noise, image=image) + return (cond, ) + + +class RegionalIPAdapterColorMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "color_mask": ("IMAGE",), + "mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}), + + "image": ("IMAGE",), + "weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}), + "noise": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "weight_type": (["original", "linear", "channel penalty"], ), + }, + } + + RETURN_TYPES = ("REGIONAL_IPADAPTER", "MASK") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, color_mask, mask_color, image, weight, noise, weight_type): + mask = color_to_mask(color_mask, mask_color) + cond = IPAdapterConditioning(mask, weight, weight_type, noise=noise, image=image) + return (cond, mask) + + +class RegionalIPAdapterEncodedMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + + "embeds": ("embeds",), + "weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}), + "weight_type": (["original", "linear", "channel penalty"],), + }, + } + + RETURN_TYPES = ("REGIONAL_IPADAPTER", ) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, mask, embeds, weight, weight_type): + cond = IPAdapterConditioning(mask, weight, weight_type, embeds=embeds) + return (cond, ) + + +class RegionalIPAdapterEncodedColorMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "color_mask": ("IMAGE",), + "mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}), + + "embeds": ("EMBEDS",), + "weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}), + "weight_type": (["original", "linear", "channel penalty"],), + }, + } + + RETURN_TYPES = ("REGIONAL_IPADAPTER", "MASK") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, color_mask, mask_color, embeds, weight, weight_type): + mask = color_to_mask(color_mask, mask_color) + cond = IPAdapterConditioning(mask, weight, weight_type, embeds=embeds) + return (cond, mask) + + +class ApplyRegionalIPAdapters: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "ipadapter_pipe": ("IPADAPTER_PIPE",), + "regional_ipadapter1": ("REGIONAL_IPADAPTER", ), + }, + } + + RETURN_TYPES = ("MODEL", ) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, **kwargs): + ipadapter_pipe = kwargs['ipadapter_pipe'] + ipadapter, clip_vision, model = ipadapter_pipe + + del kwargs['ipadapter_pipe'] + + for k, v in kwargs.items(): + model = v.doit(ipadapter, clip_vision, model) + + return (model, ) + + +class RegionalSeedExplorerMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + + "noise": ("NOISE",), + "seed_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}), + "enable_additional": ("BOOLEAN", {"default": True, "label_on": "true", "label_off": "false"}), + "additional_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "additional_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + }, + } + + RETURN_TYPES = ("NOISE",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, mask, noise, seed_prompt, enable_additional, additional_seed, additional_strength, noise_mode): + device = comfy.model_management.get_torch_device() + noise_device = "cpu" if noise_mode == "CPU" else device + + noise = noise.to(device) + mask = mask.to(device) + + if len(mask.shape) == 2: + mask = mask.unsqueeze(0) + + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noise.shape[2], noise.shape[3]), mode="bilinear").squeeze(0) + + try: + seed_prompt = seed_prompt.replace("\n", "") + items = seed_prompt.strip().split(",") + + if items == ['']: + items = [] + + if enable_additional: + items.append((additional_seed, additional_strength)) + + noise = prompt_support.SeedExplorer.apply_variation(noise, items, noise_device, mask) + except Exception: + print(f"[ERROR] IGNORED: RegionalSeedExplorerColorMask is failed.") + traceback.print_exc() + + noise = noise.cpu() + mask.cpu() + return (noise,) + + +class RegionalSeedExplorerColorMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "color_mask": ("IMAGE",), + "mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}), + + "noise": ("NOISE",), + "seed_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}), + "enable_additional": ("BOOLEAN", {"default": True, "label_on": "true", "label_off": "false"}), + "additional_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "additional_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + }, + } + + RETURN_TYPES = ("NOISE", "MASK") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Regional" + + def doit(self, color_mask, mask_color, noise, seed_prompt, enable_additional, additional_seed, additional_strength, noise_mode): + device = comfy.model_management.get_torch_device() + noise_device = "cpu" if noise_mode == "CPU" else device + + color_mask = color_mask.to(device) + noise = noise.to(device) + + mask = color_to_mask(color_mask, mask_color) + original_mask = mask + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noise.shape[2], noise.shape[3]), mode="bilinear").squeeze(0) + + mask = mask.to(device) + + try: + seed_prompt = seed_prompt.replace("\n", "") + items = seed_prompt.strip().split(",") + + if items == ['']: + items = [] + + if enable_additional: + items.append((additional_seed, additional_strength)) + + noise = prompt_support.SeedExplorer.apply_variation(noise, items, noise_device, mask) + except Exception: + print(f"[ERROR] IGNORED: RegionalSeedExplorerColorMask is failed.") + traceback.print_exc() + + color_mask.cpu() + noise = noise.cpu() + original_mask = original_mask.cpu() + return (noise, original_mask) + + +NODE_CLASS_MAPPINGS = { + "RegionalPromptSimple //Inspire": RegionalPromptSimple, + "RegionalPromptColorMask //Inspire": RegionalPromptColorMask, + "RegionalConditioningSimple //Inspire": RegionalConditioningSimple, + "RegionalConditioningColorMask //Inspire": RegionalConditioningColorMask, + "RegionalIPAdapterMask //Inspire": RegionalIPAdapterMask, + "RegionalIPAdapterColorMask //Inspire": RegionalIPAdapterColorMask, + "RegionalIPAdapterEncodedMask //Inspire": RegionalIPAdapterEncodedMask, + "RegionalIPAdapterEncodedColorMask //Inspire": RegionalIPAdapterEncodedColorMask, + "RegionalSeedExplorerMask //Inspire": RegionalSeedExplorerMask, + "RegionalSeedExplorerColorMask //Inspire": RegionalSeedExplorerColorMask, + "ToIPAdapterPipe //Inspire": ToIPAdapterPipe, + "FromIPAdapterPipe //Inspire": FromIPAdapterPipe, + "ApplyRegionalIPAdapters //Inspire": ApplyRegionalIPAdapters, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "RegionalPromptSimple //Inspire": "Regional Prompt Simple (Inspire)", + "RegionalPromptColorMask //Inspire": "Regional Prompt By Color Mask (Inspire)", + "RegionalConditioningSimple //Inspire": "Regional Conditioning Simple (Inspire)", + "RegionalConditioningColorMask //Inspire": "Regional Conditioning By Color Mask (Inspire)", + "RegionalIPAdapterMask //Inspire": "Regional IPAdapter Mask (Inspire)", + "RegionalIPAdapterColorMask //Inspire": "Regional IPAdapter By Color Mask (Inspire)", + "RegionalIPAdapterEncodedMask //Inspire": "Regional IPAdapter Encoded Mask (Inspire)", + "RegionalIPAdapterEncodedColorMask //Inspire": "Regional IPAdapter Encoded By Color Mask (Inspire)", + "RegionalSeedExplorerMask //Inspire": "Regional Seed Explorer By Mask (Inspire)", + "RegionalSeedExplorerColorMask //Inspire": "Regional Seed Explorer By Color Mask (Inspire)", + "ToIPAdapterPipe //Inspire": "ToIPAdapterPipe (Inspire)", + "FromIPAdapterPipe //Inspire": "FromIPAdapterPipe (Inspire)", + "ApplyRegionalIPAdapters //Inspire": "Apply Regional IPAdapters (Inspire)" +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/sampler_nodes.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/sampler_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..74387e9851d4d92488ce0a775a0ff06de538b520 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/sampler_nodes.py @@ -0,0 +1,123 @@ +import torch +from . import a1111_compat +import comfy + +class KSampler_progress(a1111_compat.KSampler_inspire): + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + "interval": ("INT", {"default": 1, "min": 1, "max": 10000}), + "omit_start_latent": ("BOOLEAN", {"default": True, "label_on": "True", "label_off": "False"}), + } + } + + CATEGORY = "InspirePack/analysis" + + RETURN_TYPES = ("LATENT", "LATENT") + RETURN_NAMES = ("latent", "progress_latent") + + def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode, interval, omit_start_latent): + adv_steps = int(steps / denoise) + + sampler = a1111_compat.KSamplerAdvanced_inspire() + + if omit_start_latent: + result = [] + else: + result = [latent_image['samples']] + + for i in range(0, adv_steps+1): + add_noise = i == 0 + return_with_leftover_noise = i != adv_steps + latent_image = sampler.sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, i, i+1, noise_mode, return_with_leftover_noise)[0] + if i % interval == 0 or i == adv_steps: + result.append(latent_image['samples']) + + if len(result) > 0: + result = torch.cat(result) + result = {'samples': result} + else: + result = latent_image + + return (latent_image, result) + + +class KSamplerAdvanced_progress(a1111_compat.KSamplerAdvanced_inspire): + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "noise_mode": (["GPU(=A1111)", "CPU"],), + "return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + "interval": ("INT", {"default": 1, "min": 1, "max": 10000}), + "omit_start_latent": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}), + }, + "optional": {"prev_progress_latent_opt": ("LATENT",), } + } + + FUNCTION = "sample" + + CATEGORY = "InspirePack/analysis" + + RETURN_TYPES = ("LATENT", "LATENT") + RETURN_NAMES = ("latent", "progress_latent") + + def sample(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, noise_mode, return_with_leftover_noise, interval, omit_start_latent, prev_progress_latent_opt=None): + sampler = a1111_compat.KSamplerAdvanced_inspire() + + if omit_start_latent: + result = [] + else: + result = [latent_image['samples']] + + for i in range(start_at_step, end_at_step+1): + cur_add_noise = i == start_at_step and add_noise + cur_return_with_leftover_noise = i != steps or return_with_leftover_noise + latent_image = sampler.sample(model, cur_add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, i, i+1, noise_mode, cur_return_with_leftover_noise)[0] + print(f"{i}, {i+1}") + if i % interval == 0 or i == steps: + result.append(latent_image['samples']) + + if len(result) > 0: + result = torch.cat(result) + result = {'samples': result} + else: + result = latent_image + + if prev_progress_latent_opt is not None: + result['samples'] = torch.cat((prev_progress_latent_opt['samples'], result['samples']), dim=0) + + return (latent_image, result) + + +NODE_CLASS_MAPPINGS = { + "KSamplerProgress //Inspire": KSampler_progress, + "KSamplerAdvancedProgress //Inspire": KSamplerAdvanced_progress, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "KSamplerProgress //Inspire": "KSampler Progress (Inspire)", + "KSamplerAdvancedProgress //Inspire": "KSampler Advanced Progress (Inspire)", +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/inspire/segs_support.py b/custom_nodes/ComfyUI-Inspire-Pack/inspire/segs_support.py new file mode 100644 index 0000000000000000000000000000000000000000..ecbf04df0e0b1f9de1f0e921814716c3eb6a24b5 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/inspire/segs_support.py @@ -0,0 +1,556 @@ +import nodes +import numpy as np +import torch + +def normalize_size_base_64(w, h): + short_side = min(w, h) + remainder = short_side % 64 + return short_side - remainder + (64 if remainder > 0 else 0) + + +class MediaPipeFaceMeshDetector: + def __init__(self, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil, max_faces, is_segm): + self.face = face + self.mouth = mouth + self.left_eyebrow = left_eyebrow + self.left_eye = left_eye + self.left_pupil = left_pupil + self.right_eyebrow = right_eyebrow + self.right_eye = right_eye + self.right_pupil = right_pupil + self.is_segm = is_segm + self.max_faces = max_faces + self.override_bbox_by_segm = True + + def detect(self, image, threshold, dilation, crop_factor, drop_size=1, crop_min_size=None, detailer_hook=None): + if 'MediaPipe-FaceMeshPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use MediaPipeFaceMeshDetector, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + if 'MediaPipeFaceMeshToSEGS' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use MediaPipeFaceMeshDetector, you need to install 'ComfyUI-Impact-Pack'") + + pre_obj = nodes.NODE_CLASS_MAPPINGS['MediaPipe-FaceMeshPreprocessor'] + seg_obj = nodes.NODE_CLASS_MAPPINGS['MediaPipeFaceMeshToSEGS'] + + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + facemesh_image = pre_obj().detect(image, self.max_faces, threshold, resolution=resolution)[0] + segs = seg_obj().doit(facemesh_image, crop_factor, not self.is_segm, crop_min_size, drop_size, dilation, + self.face, self.mouth, self.left_eyebrow, self.left_eye, self.left_pupil, + self.right_eyebrow, self.right_eye, self.right_pupil)[0] + + return segs + + def setAux(self, x): + pass + + +class MediaPipe_FaceMesh_Preprocessor_wrapper: + def __init__(self, max_faces, min_confidence, upscale_factor=1.0): + self.max_faces = max_faces + self.min_confidence = min_confidence + self.upscale_factor = upscale_factor + + def apply(self, image, mask=None): + if 'MediaPipe-FaceMeshPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use MediaPipeFaceMeshDetector, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + if self.upscale_factor != 1.0: + image = nodes.ImageScaleBy().upscale(image, 'bilinear', self.upscale_factor)[0] + + obj = nodes.NODE_CLASS_MAPPINGS['MediaPipe-FaceMeshPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.detect(image, self.max_faces, self.min_confidence, resolution=resolution)[0] + + +class AnimeLineArt_Preprocessor_wrapper: + def apply(self, image, mask=None): + if 'AnimeLineArtPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use AnimeLineArt_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['AnimeLineArtPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, resolution=resolution)[0] + + +class Manga2Anime_LineArt_Preprocessor_wrapper: + def apply(self, image, mask=None): + if 'Manga2Anime_LineArt_Preprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use Manga2Anime_LineArt_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['Manga2Anime_LineArt_Preprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, resolution=resolution)[0] + + +class Color_Preprocessor_wrapper: + def apply(self, image, mask=None): + if 'ColorPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use Color_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['ColorPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, resolution=resolution)[0] + + +class InpaintPreprocessor_wrapper: + def apply(self, image, mask=None): + if 'InpaintPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use InpaintPreprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['InpaintPreprocessor']() + if mask is None: + mask = torch.ones((image.shape[1], image.shape[2]), dtype=torch.float32, device="cpu").unsqueeze(0) + + return obj.preprocess(image, mask)[0] + + +class TilePreprocessor_wrapper: + def __init__(self, pyrUp_iters): + self.pyrUp_iters = pyrUp_iters + + def apply(self, image, mask=None): + if 'TilePreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use TilePreprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['TilePreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, self.pyrUp_iters, resolution=resolution)[0] + + +class LineArt_Preprocessor_wrapper: + def __init__(self, coarse): + self.coarse = coarse + + def apply(self, image, mask=None): + if 'LineArtPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use LineArt_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + coarse = 'enable' if self.coarse else 'disable' + + obj = nodes.NODE_CLASS_MAPPINGS['LineArtPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, resolution=resolution, coarse=coarse)[0] + + +class OpenPose_Preprocessor_wrapper: + def __init__(self, detect_hand, detect_body, detect_face, upscale_factor=1.0): + self.detect_hand = detect_hand + self.detect_body = detect_body + self.detect_face = detect_face + self.upscale_factor = upscale_factor + + def apply(self, image, mask=None): + if 'OpenposePreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use OpenPose_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + detect_hand = 'enable' if self.detect_hand else 'disable' + detect_body = 'enable' if self.detect_body else 'disable' + detect_face = 'enable' if self.detect_face else 'disable' + + if self.upscale_factor != 1.0: + image = nodes.ImageScaleBy().upscale(image, 'bilinear', self.upscale_factor)[0] + + obj = nodes.NODE_CLASS_MAPPINGS['OpenposePreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.estimate_pose(image, detect_hand, detect_body, detect_face, resolution=resolution)['result'][0] + + +class DWPreprocessor_wrapper: + def __init__(self, detect_hand, detect_body, detect_face, upscale_factor=1.0): + self.detect_hand = detect_hand + self.detect_body = detect_body + self.detect_face = detect_face + self.upscale_factor = upscale_factor + + def apply(self, image, mask=None): + if 'DWPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use DWPreprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + detect_hand = 'enable' if self.detect_hand else 'disable' + detect_body = 'enable' if self.detect_body else 'disable' + detect_face = 'enable' if self.detect_face else 'disable' + + if self.upscale_factor != 1.0: + image = nodes.ImageScaleBy().upscale(image, 'bilinear', self.upscale_factor)[0] + + obj = nodes.NODE_CLASS_MAPPINGS['DWPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.estimate_pose(image, detect_hand, detect_body, detect_face, resolution=resolution)['result'][0] + + +class LeReS_DepthMap_Preprocessor_wrapper: + def __init__(self, rm_nearest, rm_background, boost): + self.rm_nearest = rm_nearest + self.rm_background = rm_background + self.boost = boost + + def apply(self, image, mask=None): + if 'LeReS-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use LeReS_DepthMap_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + boost = 'enable' if self.boost else 'disable' + + obj = nodes.NODE_CLASS_MAPPINGS['LeReS-DepthMapPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, self.rm_nearest, self.rm_background, boost=boost, resolution=resolution)[0] + + +class MiDaS_DepthMap_Preprocessor_wrapper: + def __init__(self, a, bg_threshold): + self.a = a + self.bg_threshold = bg_threshold + + def apply(self, image, mask=None): + if 'MiDaS-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use MiDaS_DepthMap_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['MiDaS-DepthMapPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, self.a, self.bg_threshold, resolution=resolution)[0] + + +class Zoe_DepthMap_Preprocessor_wrapper: + def apply(self, image, mask=None): + if 'Zoe-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use Zoe_DepthMap_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS['Zoe-DepthMapPreprocessor']() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, resolution=resolution)[0] + + +class HED_Preprocessor_wrapper: + def __init__(self, safe, nodename): + self.safe = safe + self.nodename = nodename + + def apply(self, image, mask=None): + if self.nodename not in nodes.NODE_CLASS_MAPPINGS: + raise Exception(f"[ERROR] To use {self.nodename}_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'") + + obj = nodes.NODE_CLASS_MAPPINGS[self.nodename]() + resolution = normalize_size_base_64(image.shape[2], image.shape[1]) + return obj.execute(image, resolution=resolution, safe="enable" if self.safe else "disable")[0] + + +class Canny_Preprocessor_wrapper: + def __init__(self, low_threshold, high_threshold): + self.low_threshold = low_threshold + self.high_threshold = high_threshold + + def apply(self, image, mask=None): + obj = nodes.NODE_CLASS_MAPPINGS['Canny']() + return obj.detect_edge(image, self.low_threshold, self.high_threshold)[0] + + +class OpenPose_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detect_hand": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "detect_body": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "detect_face": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "resolution_upscale_by": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, detect_hand, detect_body, detect_face, resolution_upscale_by): + obj = OpenPose_Preprocessor_wrapper(detect_hand, detect_body, detect_face, upscale_factor=resolution_upscale_by) + return (obj, ) + + +class DWPreprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "detect_hand": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "detect_body": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "detect_face": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}), + "resolution_upscale_by": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, detect_hand, detect_body, detect_face, resolution_upscale_by): + obj = DWPreprocessor_wrapper(detect_hand, detect_body, detect_face, upscale_factor=resolution_upscale_by) + return (obj, ) + + +class LeReS_DepthMap_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "rm_nearest": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}), + "rm_background": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}) + }, + "optional": { + "boost": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, rm_nearest, rm_background, boost=False): + obj = LeReS_DepthMap_Preprocessor_wrapper(rm_nearest, rm_background, boost) + return (obj, ) + + +class MiDaS_DepthMap_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "a": ("FLOAT", {"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.05}), + "bg_threshold": ("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.05}) + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, a, bg_threshold): + obj = MiDaS_DepthMap_Preprocessor_wrapper(a, bg_threshold) + return (obj, ) + + +class Zoe_DepthMap_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { "required": {} } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self): + obj = Zoe_DepthMap_Preprocessor_wrapper() + return (obj, ) + + +class Canny_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "low_threshold": ("FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}), + "high_threshold": ("FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01}) + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, low_threshold, high_threshold): + obj = Canny_Preprocessor_wrapper(low_threshold, high_threshold) + return (obj, ) + + +class HEDPreprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "safe": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}) + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, safe): + obj = HED_Preprocessor_wrapper(safe, "HEDPreprocessor") + return (obj, ) + + +class FakeScribblePreprocessor_Provider_for_SEGS(HEDPreprocessor_Provider_for_SEGS): + def doit(self, safe): + obj = HED_Preprocessor_wrapper(safe, "FakeScribblePreprocessor") + return (obj, ) + + +class MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "max_faces": ("INT", {"default": 10, "min": 1, "max": 50, "step": 1}), + "min_confidence": ("FLOAT", {"default": 0.5, "min": 0.01, "max": 1.0, "step": 0.01}), + "resolution_upscale_by": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), + } + } + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, max_faces, min_confidence, resolution_upscale_by): + obj = MediaPipe_FaceMesh_Preprocessor_wrapper(max_faces, min_confidence, upscale_factor=resolution_upscale_by) + return (obj, ) + + +class MediaPipeFaceMeshDetectorProvider: + @classmethod + def INPUT_TYPES(s): + bool_true_widget = ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}) + bool_false_widget = ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}) + return {"required": { + "max_faces": ("INT", {"default": 10, "min": 1, "max": 50, "step": 1}), + "face": bool_true_widget, + "mouth": bool_false_widget, + "left_eyebrow": bool_false_widget, + "left_eye": bool_false_widget, + "left_pupil": bool_false_widget, + "right_eyebrow": bool_false_widget, + "right_eye": bool_false_widget, + "right_pupil": bool_false_widget, + }} + + RETURN_TYPES = ("BBOX_DETECTOR", "SEGM_DETECTOR") + FUNCTION = "doit" + + CATEGORY = "InspirePack/Detector" + + def doit(self, max_faces, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil): + bbox_detector = MediaPipeFaceMeshDetector(face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil, max_faces, is_segm=False) + segm_detector = MediaPipeFaceMeshDetector(face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil, max_faces, is_segm=True) + + return (bbox_detector, segm_detector) + + +class AnimeLineArt_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self): + obj = AnimeLineArt_Preprocessor_wrapper() + return (obj, ) + + +class Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self): + obj = Manga2Anime_LineArt_Preprocessor_wrapper() + return (obj, ) + + +class LineArt_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "coarse": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), + }} + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, coarse): + obj = LineArt_Preprocessor_wrapper(coarse) + return (obj, ) + + +class Color_Preprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self): + obj = Color_Preprocessor_wrapper() + return (obj, ) + + +class InpaintPreprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {}} + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self): + obj = InpaintPreprocessor_wrapper() + return (obj, ) + + +class TilePreprocessor_Provider_for_SEGS: + @classmethod + def INPUT_TYPES(s): + return {"required": {'pyrUp_iters': ("INT", {"default": 3, "min": 1, "max": 10, "step": 1})}} + RETURN_TYPES = ("SEGS_PREPROCESSOR",) + FUNCTION = "doit" + + CATEGORY = "InspirePack/SEGS/ControlNet" + + def doit(self, pyrUp_iters): + obj = TilePreprocessor_wrapper(pyrUp_iters) + return (obj, ) + + +NODE_CLASS_MAPPINGS = { + "OpenPose_Preprocessor_Provider_for_SEGS //Inspire": OpenPose_Preprocessor_Provider_for_SEGS, + "DWPreprocessor_Provider_for_SEGS //Inspire": DWPreprocessor_Provider_for_SEGS, + "MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": MiDaS_DepthMap_Preprocessor_Provider_for_SEGS, + "LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": LeReS_DepthMap_Preprocessor_Provider_for_SEGS, + # "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": Zoe_DepthMap_Preprocessor_Provider_for_SEGS, + "Canny_Preprocessor_Provider_for_SEGS //Inspire": Canny_Preprocessor_Provider_for_SEGS, + "MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire": MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS, + "HEDPreprocessor_Provider_for_SEGS //Inspire": HEDPreprocessor_Provider_for_SEGS, + "FakeScribblePreprocessor_Provider_for_SEGS //Inspire": FakeScribblePreprocessor_Provider_for_SEGS, + "AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire": AnimeLineArt_Preprocessor_Provider_for_SEGS, + "Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire": Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS, + "LineArt_Preprocessor_Provider_for_SEGS //Inspire": LineArt_Preprocessor_Provider_for_SEGS, + "Color_Preprocessor_Provider_for_SEGS //Inspire": Color_Preprocessor_Provider_for_SEGS, + "InpaintPreprocessor_Provider_for_SEGS //Inspire": InpaintPreprocessor_Provider_for_SEGS, + "TilePreprocessor_Provider_for_SEGS //Inspire": TilePreprocessor_Provider_for_SEGS, + "MediaPipeFaceMeshDetectorProvider //Inspire": MediaPipeFaceMeshDetectorProvider, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "OpenPose_Preprocessor_Provider_for_SEGS //Inspire": "OpenPose Preprocessor Provider (SEGS)", + "DWPreprocessor_Provider_for_SEGS //Inspire": "DWPreprocessor Provider (SEGS)", + "MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": "MiDaS Depth Map Preprocessor Provider (SEGS)", + "LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": "LeReS Depth Map Preprocessor Provider (SEGS)", + # "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": "Zoe Depth Map Preprocessor Provider (SEGS)", + "Canny_Preprocessor_Provider_for_SEGS //Inspire": "Canny Preprocessor Provider (SEGS)", + "MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire": "MediaPipe FaceMesh Preprocessor Provider (SEGS)", + "HEDPreprocessor_Provider_for_SEGS //Inspire": "HED Preprocessor Provider (SEGS)", + "FakeScribblePreprocessor_Provider_for_SEGS //Inspire": "Fake Scribble Preprocessor Provider (SEGS)", + "AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire": "AnimeLineArt Preprocessor Provider (SEGS)", + "Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire": "Manga2Anime LineArt Preprocessor Provider (SEGS)", + "LineArt_Preprocessor_Provider_for_SEGS //Inspire": "LineArt Preprocessor Provider (SEGS)", + "Color_Preprocessor_Provider_for_SEGS //Inspire": "Color Preprocessor Provider (SEGS)", + "InpaintPreprocessor_Provider_for_SEGS //Inspire": "Inpaint Preprocessor Provider (SEGS)", + "TilePreprocessor_Provider_for_SEGS //Inspire": "Tile Preprocessor Provider (SEGS)", + "MediaPipeFaceMeshDetectorProvider //Inspire": "MediaPipeFaceMesh Detector Provider", +} diff --git a/custom_nodes/ComfyUI-Inspire-Pack/js/common.js b/custom_nodes/ComfyUI-Inspire-Pack/js/common.js new file mode 100644 index 0000000000000000000000000000000000000000..7537fe11db8c1477184f18b6a67a0acb8f98d2d7 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/js/common.js @@ -0,0 +1,16 @@ +import { api } from "../../scripts/api.js"; + +function nodeFeedbackHandler(event) { + let nodes = app.graph._nodes_by_id; + let node = nodes[event.detail.node_id]; + if(node) { + if(event.detail.type == "text") { + const w = node.widgets.find((w) => event.detail.widget_name === w.name); + if(w) { + w.value = event.detail.data; + } + } + } +} + +api.addEventListener("inspire-node-feedback", nodeFeedbackHandler); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/js/image_util.js b/custom_nodes/ComfyUI-Inspire-Pack/js/image_util.js new file mode 100644 index 0000000000000000000000000000000000000000..4b856acc6e9213f11d48453d60c2b41a722d7fef --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/js/image_util.js @@ -0,0 +1,71 @@ +import { ComfyApp, app } from "../../scripts/app.js"; + +function load_image(str) { + let base64String = canvas.toDataURL('image/png'); + let img = new Image(); + img.src = base64String; +} + +app.registerExtension({ + name: "Comfy.Inspire.img", + + nodeCreated(node, app) { + if(node.comfyClass == "LoadImage //Inspire") { + let w = node.widgets.find(obj => obj.name === 'image_data'); + + Object.defineProperty(w, 'value', { + set(v) { + if(v != '[IMAGE DATA]') + w._value = v; + }, + get() { + const stackTrace = new Error().stack; + if(!stackTrace.includes('draw') && !stackTrace.includes('graphToPrompt') && stackTrace.includes('app.js')) { + return "[IMAGE DATA]"; + } + else { + return w._value; + } + } + }); + + let set_img_act = (v) => { + node._img = v; + var canvas = document.createElement('canvas'); + canvas.width = v[0].width; + canvas.height = v[0].height; + + var context = canvas.getContext('2d'); + context.drawImage(v[0], 0, 0, v[0].width, v[0].height); + + var base64Image = canvas.toDataURL('image/png'); + w.value = base64Image; + }; + + Object.defineProperty(node, 'imgs', { + set(v) { + if (!v[0].complete) { + let orig_onload = v[0].onload; + v[0].onload = function(v2) { + if(orig_onload) + orig_onload(); + set_img_act(v); + }; + } + else { + set_img_act(v); + } + }, + get() { + if(this._img == undefined && w.value != '') { + this._img = [new Image()]; + if(w.value && w.value != '[IMAGE DATA]') + this._img[0].src = w.value; + } + + return this._img; + } + }); + } + } +}) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/js/lora_block_weight.js b/custom_nodes/ComfyUI-Inspire-Pack/js/lora_block_weight.js new file mode 100644 index 0000000000000000000000000000000000000000..4dc6cb4635b95e1aecbb89282117f2cf48bb6890 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/js/lora_block_weight.js @@ -0,0 +1,161 @@ +import { ComfyApp, app } from "../../scripts/app.js"; + +app.registerExtension({ + name: "Comfy.Inspire.LBW", + + nodeCreated(node, app) { + if(node.comfyClass == "LoraLoaderBlockWeight //Inspire") { + // category filter + const lora_names_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'lora_name')]; + var full_lora_list = lora_names_widget.options.values; + const category_filter_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'category_filter')]; + + Object.defineProperty(lora_names_widget.options, "values", { + set: (x) => { + full_lora_list = x; + }, + get: () => { + if(category_filter_widget.value == 'All') + return full_lora_list; + + let l = full_lora_list.filter(x => x.startsWith(category_filter_widget.value)); + return l; + } + }); + + // vector selector + let preset_i = 9; + let vector_i = 10; + node._value = "Preset"; + + Object.defineProperty(node.widgets[preset_i], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Preset") { + node.widgets[vector_i].value = value.split(':')[1]; + if(node.widgets_values) { + node.widgets_values[vector_i] = node.widgets[preset_i].value; + } + } + } + + node._value = value; + }, + get: () => { + return node._value; + } + }); + } + + if(node.comfyClass == "XY Input: Lora Block Weight //Inspire") { + // category filter + const lora_names_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'lora_name')]; + var full_lora_list = lora_names_widget.options.values; + const category_filter_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'category_filter')]; + + Object.defineProperty(lora_names_widget.options, "values", { + set: (x) => { + full_lora_list = x; + }, + get: () => { + if(category_filter_widget.value == 'All') + return full_lora_list; + + let l = full_lora_list.filter(x => x.startsWith(category_filter_widget.value)); + return l; + } + }); + + // vector selector + let preset_i = 9; + let vector_i = 10; + node._value = "Preset"; + Object.defineProperty(node.widgets[preset_i], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Preset") { + if(!value.startsWith('@') && node.widgets[vector_i].value != "") + node.widgets[vector_i].value += "\n"; + if(value.startsWith('@')) { + let spec = value.split(':')[1]; + var n; + var sub_n = null; + var block = null; + + if(isNaN(spec)) { + let sub_spec = spec.split(','); + + if(sub_spec.length != 3) { + node.widgets_values[vector_i] = '!! SPEC ERROR !!'; + node._value = ''; + return; + } + + n = parseInt(sub_spec[0].trim()); + sub_n = parseInt(sub_spec[1].trim()); + block = parseInt(sub_spec[2].trim()); + } + else { + n = parseInt(spec.trim()); + } + + node.widgets[vector_i].value = ""; + if(sub_n == null) { + for(let i=1; i<=n; i++) { + var temp = ""; + for(let j=1; j<=n; j++) { + if(temp!='') + temp += ','; + if(j==i) + temp += 'A'; + else + temp += '0'; + } + + node.widgets[vector_i].value += `B${i}:${temp}\n`; + } + } + else { + for(let i=1; i<=sub_n; i++) { + var temp = ""; + for(let j=1; j<=n; j++) { + if(temp!='') + temp += ','; + + if(block!=j) + temp += '0'; + else { + temp += ' '; + for(let k=1; k<=sub_n; k++) { + if(k==i) + temp += 'A '; + else + temp += '0 '; + } + } + } + + node.widgets[vector_i].value += `B${block}.SUB${i}:${temp}\n`; + } + } + } + else { + node.widgets[vector_i].value += `${value}/${value.split(':')[0]}`; + } + if(node.widgets_values) { + node.widgets_values[vector_i] = node.widgets[preset_i].value; + } + } + } + + node._value = value; + }, + get: () => { + return node._value; + } + }); + } + } +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/js/prompt.js b/custom_nodes/ComfyUI-Inspire-Pack/js/prompt.js new file mode 100644 index 0000000000000000000000000000000000000000..1db204018159e1f3e8431331b5c0ef15be6213c9 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/js/prompt.js @@ -0,0 +1,168 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; + +let get_wildcards_list; +try { + const ImpactPack = await import("../ComfyUI-Impact-Pack/impact-pack.js"); + get_wildcards_list = ImpactPack.get_wildcards_list; +} catch (error) { +} + +// fallback +if(!get_wildcards_list) { + get_wildcards_list = () => { return ["Impact Pack isn't installed or is outdated."]; } +} + +let pb_cache = {}; + +async function get_prompt_builder_items(category) { + if(pb_cache[category]) + return pb_cache[category]; + else { + let res = await api.fetchApi(`/inspire/prompt_builder?category=${category}`); + let data = await res.json(); + pb_cache[category] = data.presets; + return data.presets; + } +} + + +app.registerExtension({ + name: "Comfy.Inspire.Prompts", + + nodeCreated(node, app) { + if(node.comfyClass == "WildcardEncode //Inspire") { + const wildcard_text_widget_index = node.widgets.findIndex((w) => w.name == 'wildcard_text'); + const populated_text_widget_index = node.widgets.findIndex((w) => w.name == 'populated_text'); + const mode_widget_index = node.widgets.findIndex((w) => w.name == 'mode'); + + const wildcard_text_widget = node.widgets[wildcard_text_widget_index]; + const populated_text_widget = node.widgets[populated_text_widget_index]; + + // lora selector, wildcard selector + let combo_id = 5; + + Object.defineProperty(node.widgets[combo_id], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Select the LoRA to add to the text") { + let lora_name = value; + if (lora_name.endsWith('.safetensors')) { + lora_name = lora_name.slice(0, -12); + } + + wildcard_text_widget.value += ``; + } + } + }, + get: () => { return "Select the LoRA to add to the text"; } + }); + + Object.defineProperty(node.widgets[combo_id+1], "value", { + set: (value) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(value != "Select the Wildcard to add to the text") { + if(wildcard_text_widget.value != '') + wildcard_text_widget.value += ', ' + + wildcard_text_widget.value += value; + } + } + }, + get: () => { return "Select the Wildcard to add to the text"; } + }); + + Object.defineProperty(node.widgets[combo_id+1].options, "values", { + set: (x) => {}, + get: () => { + return get_wildcards_list(); + } + }); + + // Preventing validation errors from occurring in any situation. + node.widgets[combo_id].serializeValue = () => { return "Select the LoRA to add to the text"; } + node.widgets[combo_id+1].serializeValue = () => { return "Select the Wildcard to add to the text"; } + + // wildcard populating + populated_text_widget.inputEl.disabled = true; + const mode_widget = node.widgets[mode_widget_index]; + + // mode combo + Object.defineProperty(mode_widget, "value", { + set: (value) => { + node._mode_value = value == true || value == "Populate"; + populated_text_widget.inputEl.disabled = value == true || value == "Populate"; + }, + get: () => { + if(node._mode_value != undefined) + return node._mode_value; + else + return true; + } + }); + } + else if(node.comfyClass == "PromptBuilder //Inspire") { + const preset_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'preset')]; + const category_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'category')]; + + Object.defineProperty(preset_widget.options, "values", { + set: (x) => {}, + get: () => { + get_prompt_builder_items(category_widget.value); + if(pb_cache[category_widget.value] == undefined) { + return ["#PRESET"]; + } + return pb_cache[category_widget.value]; + } + }); + + Object.defineProperty(preset_widget, "value", { + set: (x) => { + const stackTrace = new Error().stack; + if(stackTrace.includes('inner_value_change')) { + if(node.widgets[2].value) { + node.widgets[2].value += ', '; + } + + const y = x.split(':'); + if(y.length == 2) + node.widgets[2].value += y[1].trim(); + else + node.widgets[2].value += x.trim(); + + if(node.widgets_values) { + node.widgets_values[2] = node.widgets[2].values; + } + }; + }, + get: () => { return '#PRESET'; } + }); + + preset_widget.serializeValue = (workflowNode, widgetIndex) => { return "#PRESET"; }; + } + else if(node.comfyClass == "SeedExplorer //Inspire" + || node.comfyClass == "RegionalSeedExplorerMask //Inspire" + || node.comfyClass == "RegionalSeedExplorerColorMask //Inspire") { + const prompt_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'seed_prompt')]; + const seed_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'additional_seed')]; + const strength_widget = node.widgets[node.widgets.findIndex(obj => obj.name === 'additional_strength')]; + + let allow_init_seed = node.comfyClass == "SeedExplorer //Inspire"; + + node.addWidget("button", "Add to prompt", null, () => { + if(!prompt_widget.value?.trim() && allow_init_seed) { + prompt_widget.value = ''+seed_widget.value; + } + else { + if(prompt_widget.value?.trim()) + prompt_widget.value += ', '; + + prompt_widget.value += `${seed_widget.value}:${strength_widget.value.toFixed(2)}`; + seed_widget.value += 1; + } + }); + } + } +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/js/regional.js b/custom_nodes/ComfyUI-Inspire-Pack/js/regional.js new file mode 100644 index 0000000000000000000000000000000000000000..22162dc4224e1933aae6d4296a8def94789496f5 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/js/regional.js @@ -0,0 +1,64 @@ +import { ComfyApp, app } from "../../scripts/app.js"; +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { api } from "../../scripts/api.js"; + +app.registerExtension({ + name: "Comfy.Inspire.Regional", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.name === 'ApplyRegionalIPAdapters //Inspire') { + var input_name = "input"; + var base_slot = 0; + + switch(nodeData.name) { + case 'ApplyRegionalIPAdapters //Inspire': + input_name = "regional_ipadapter"; + base_slot = 1; + break; + } + + const onConnectionsChange = nodeType.prototype.onConnectionsChange; + nodeType.prototype.onConnectionsChange = function (type, index, connected, link_info) { + if(!link_info || type == 2) + return; + + if(this.inputs[0].type == '*'){ + const node = app.graph.getNodeById(link_info.origin_id); + let origin_type = node.outputs[link_info.origin_slot].type; + + if(origin_type == '*') { + this.disconnectInput(link_info.target_slot); + return; + } + + for(let i in this.inputs) { + let input_i = this.inputs[i]; + if(input_i.name != 'select' && input_i.name != 'sel_mode') + input_i.type = origin_type; + } + } + + if (!connected && (this.inputs.length > base_slot+1)) { + const stackTrace = new Error().stack; + + if( + !stackTrace.includes('LGraphNode.prototype.connect') && // for touch device + !stackTrace.includes('LGraphNode.connect') && // for mouse device + !stackTrace.includes('loadGraphData')) { + this.removeInput(index); + } + } + + let slot_i = 1; + for (let i = base_slot; i < this.inputs.length; i++) { + let input_i = this.inputs[i]; + input_i.name = `${input_name}${slot_i}` + slot_i++; + } + + let last_slot = this.inputs[this.inputs.length - 1]; + if (last_slot.link != undefined) { + this.addInput(`${input_name}${slot_i}`, this.inputs[base_slot].type); + } + } + } + }}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/js/seed.js b/custom_nodes/ComfyUI-Inspire-Pack/js/seed.js new file mode 100644 index 0000000000000000000000000000000000000000..8233c068351776b34bd64d74250370fb90c62e9f --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/js/seed.js @@ -0,0 +1,47 @@ +import { api } from "../../scripts/api.js"; + +function globalSeedHandler(event) { + let nodes = app.graph._nodes_by_id; + + for(let i in nodes) { + let node = nodes[i]; + + if(node.type == 'GlobalSeed //Inspire') { + if(node.widgets) { + const w = node.widgets.find((w) => w.name == 'value'); + const last_w = node.widgets.find((w) => w.name == 'last_seed'); + last_w.value = w.value; + w.value = event.detail.value; + } + } + else + if(node.widgets) { + const w = node.widgets.find((w) => (w.name == 'seed' || w.name == 'noise_seed') && w.type == 'number'); + if(w && event.detail.seed_map[node.id] != undefined) { + w.value = event.detail.seed_map[node.id]; + } + } + } +} + +api.addEventListener("inspire-global-seed", globalSeedHandler); + + +const original_queuePrompt = api.queuePrompt; +async function queuePrompt_with_seed(number, { output, workflow }) { + workflow.seed_widgets = {}; + + for(let i in app.graph._nodes_by_id) { + let widgets = app.graph._nodes_by_id[i].widgets; + if(widgets) { + for(let j in widgets) { + if((widgets[j].name == 'seed' || widgets[j].name == 'noise_seed') && widgets[j].type != 'converted-widget') + workflow.seed_widgets[i] = parseInt(j); + } + } + } + + return await original_queuePrompt.call(api, number, { output, workflow }); +} + +api.queuePrompt = queuePrompt_with_seed; \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/prompts/example/prompt1.txt b/custom_nodes/ComfyUI-Inspire-Pack/prompts/example/prompt1.txt new file mode 100644 index 0000000000000000000000000000000000000000..0eed7fb5c4822baded231df6222ae848e9f47ca5 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/prompts/example/prompt1.txt @@ -0,0 +1,2 @@ +positive:beautiful scenery nature glass bottle landscape, , purple galaxy bottle, +negative:text, watermark \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/prompts/example/prompt2.txt b/custom_nodes/ComfyUI-Inspire-Pack/prompts/example/prompt2.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ec73eb3d245b80d2e9b323c62d450015e2d956d --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/prompts/example/prompt2.txt @@ -0,0 +1,12 @@ +positive:1girl is walking through street, +raincoat, yellow umbrella + +negative:text, watermark +----------------- +positive:museum, people are looking paintings, abstract + +negative:text, watermark +---- +positive:battle ground of space ships + +negative:text, watermark \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/prompts/put_prompts_dirs_here b/custom_nodes/ComfyUI-Inspire-Pack/prompts/put_prompts_dirs_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-Inspire-Pack/requirements.txt b/custom_nodes/ComfyUI-Inspire-Pack/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b43f7e68658e87f033212c358b00c4d36075458 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/requirements.txt @@ -0,0 +1 @@ +matplotlib \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/resources/lbw-preset.custom.txt.example b/custom_nodes/ComfyUI-Inspire-Pack/resources/lbw-preset.custom.txt.example new file mode 100644 index 0000000000000000000000000000000000000000..be038224da4f9330ecf4880f2820082ec8597492 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/resources/lbw-preset.custom.txt.example @@ -0,0 +1,20 @@ +SD-BODY:1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1 +SD-BODY0.5:1,1,1,1,1,1,0.2,1,0.2,0,0,0.8,1,1,1,1,1 +SD-FACE:1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0 +SD-FACE0.5:1,0,0,0,0,0,0,0,0.8,1,1,0.2,0,0,0,0,0 +SD-FACE0.2:1,0,0,0,0,0,0,0,0.2,0.6,0.8,0.2,0,0,0,0,0 +SD-HAND:1,0,1,1,0.2,0,0,0,0,0,0,0,0,0,0,0,0 +SD-CLOTHING:1,1,1,1,1,0,0.2,0,0.8,1,1,0.2,0,0,0,0,0 +SD-POSE:1,0,0,0,0,0,0.2,1,1,1,0,0,0,0,0,0,0 +SD-PALETTE:1,0,0,0,0,0,0,0,0,0,0,0.8,1,1,1,1,1 +SD-KEEPCHAR:1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,0,0 +SD-KEEPBG:1,1,1,1,1,1,0.2,1,0.2,0,0,0.8,1,1,1,0,0 +SD-REDUCEFIT:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1 +SD-LyCOBODY:1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1 +SD-LyCOBODY0.5:1,1,1,1,1,1,1,1,1,0.2,0.2,0.5,1,1,0,0,0,0.2,0,0,0.8,1,1,1,1 +SD-LyCOFACE:1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0 +SD-LyCOFACE0.5:1,0,0,0,0,0,0,0,0,0,0,0,0,0,0.2,0.5,0.8,1,1,1,0.2,0,0,0,0,0 +SD-LyCOCLOTH:1,1,1,1,1,1,1,1,0,0.2,0.2,0.2,0,0,0,0,0.5,0.8,1,1,0.2,0,0,0,0,0 +SD-LyCOPOSE:1,0,0,0,0,0,0,0,0,0.2,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0 +SD-LyCOKEEPBG:1,1,1,1,1,1,1,1,1,0.2,0.4,0.8,1,1,0.8,0.4,0.2,0.2,0,0,0.8,1,1,1,0,0 +SD-LyCOPALETTE:1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.8,1,1,1,1,1 \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/resources/lbw-preset.txt b/custom_nodes/ComfyUI-Inspire-Pack/resources/lbw-preset.txt new file mode 100644 index 0000000000000000000000000000000000000000..d53b91246672c9baf3e95c03c1f761e0bd64ad34 --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/resources/lbw-preset.txt @@ -0,0 +1,52 @@ +SD-NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +SD-ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +SD-INS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0 +SD-IND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0 +SD-INALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0 +SD-MIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0 +SD-MIDD0.2:1,0,0,0,0,0,0.2,0.4,0.4,0.2,0,0,0,0,0,0,0 +SD-MIDD0.8:1,0,0,0,0,0.5,0.8,0.8,0.4,0,0,0,0,0,0,0,0 +SD-MOUT:1,0,0,0,0,0,1,1,1,1,1,1,1,1,0.5,0,0 +SD-OUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0 +SD-OUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1 +SD-OUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1 +SD-ROUT:1,1,1,1,1,1,1,1,R,R,R,R,R,R,R,R,R +SD-AOUT:A,1,1,1,1,1,1,1,1,1,1,1,A,A,A,A,A +SD-AB:A,B,B,B,B,B,B,B,B,B,B,B,A,A,A,A,A +SD-ALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5 +SD-LyC-NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +SD-LyC-ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +SD-LyC-INALL:1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +SD-LyC-MIDALL:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0 +SD-LyC-OUTALL:1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1 +SDXL-NONE:0,0,0,0,0,0,0,0,0,0,0,0 +SDXL-ALL:1,1,1,1,1,1,1,1,1,1,1,1 +SDXL-INALL:1,1,1,1,1,0,0,0,0,0,0,0 +SDXL-MIDALL:1,0,0,0,0,1,0,0,0,0,0,0 +SDXL-OUTALL:1,0,0,0,0,0,1,1,1,1,1,1 +SDXL-LyC-NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +SDXL-LyC-ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 +SDXL-LyC-INALL:1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0 +SDXL-LyC-MIDALL:1,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0 +SDXL-LyC-OUTALL:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1 +@SD-FULL-TEST:17 +@SD-BLOCK1-TEST:17,12,1 +@SD-BLOCK2-TEST:17,12,2 +@SD-BLOCK3-TEST:17,12,3 +@SD-BLOCK4-TEST:17,12,4 +@SD-BLOCK5-TEST:17,12,5 +@SD-BLOCK6-TEST:17,12,6 +@SD-BLOCK7-TEST:17,12,7 +@SD-BLOCK8-TEST:17,12,8 +@SD-BLOCK9-TEST:17,12,9 +@SD-BLOCK10-TEST:17,12,10 +@SD-BLOCK11-TEST:17,12,11 +@SD-BLOCK12-TEST:17,12,12 +@SD-BLOCK13-TEST:17,12,13 +@SD-BLOCK14-TEST:17,12,14 +@SD-BLOCK15-TEST:17,12,15 +@SD-BLOCK16-TEST:17,12,16 +@SD-BLOCK17-TEST:17,12,17 +@SD-LyC-FULL-TEST:27 +@SDXL-FULL-TEST:12 +@SDXL-LyC-FULL-TEST:21 \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Inspire-Pack/resources/prompt-builder.yaml b/custom_nodes/ComfyUI-Inspire-Pack/resources/prompt-builder.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f69af658c2dc67e1524f79940c7537766257adfb --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/resources/prompt-builder.yaml @@ -0,0 +1,1048 @@ +#Category: + #Input + +Angle of View: + - Atmospheric Perspective + - Blurry Foreground + - Close-Up + - Cowboy Shot + - Cut-In + - Dutch Angle + - First-Person View + - Fisheye + - From Above + - From Behind + - From Below + - From Outside + - From Side + - Hatching (Texture) + - Multiple Views + - Panorama + - Perspective + - Pov + - Rotated + - Sideways + - Three Sided View + - Upside-Down + - Vanishing Point + - Wide Shot + +Artists: + - Abanindranath Tagore + - Abdur Rahman Chughtai + - Abu'L Hasan + - Adolf Wölfli + - Agnes Denes + - Agnes Martin + - Agnolo Di Cosimo (Bronzino) + - Ai Weiwei + - Albert Bierstadt + - Albert Gleizes + - Albert Pinkham Ryder + - Alberto Burri + - Alberto Giacometti + - Albrecht Dürer + - Alexander Calder + - Alexander Gerasimov + - Alexander Kanoldt + - Alexander Rodchenko + - Alfred Wallis + - Alice Neel + - Aloïse Corbaz + - Alphonse Mucha + - Ambrogio Lorenzetti + - Amedeo Modigliani + - Amrita Sher-Gil + - André Derain + - Andrei Rublev + - Andy Goldsworthy + - Andy Warhol + - Angelica Kauffman + - Anna Banana + - Ansel Adams + - Anselm Kiefer + - Anthony Van Dyck + - Antoni Gaudi + - Antonio Canova + - Apelles + - Arkady Plastov + - Artemisia Gentileschi + - Artists Of Hagia Sophia + - Asger Jorn + - Asher B. Durand + - Asit Kumar Haldar + - Aubrey Beardsley + - Audrey Flack + - Auguste Rodin + - Augustus Pugin + - Banksy + - Barbara Kruger + - Barberini Faun + - Bart Van Der Leck + - Baseera Khan + - Becky Lloyd + - Ben Shahn + - Benedetto Antelami + - Bichitr + - Bill Traylor + - Bridget Riley + - Bruce Nauman + - Camille Claudel + - Camille Pissarro + - Caravaggio + - Carel Willink + - Carl Andre + - Carlo Carrà + - Caspar David Friedrich + - Charles Demuth + - Charles Sheeler + - Chiho Aoshima + - Childe Hassam + - Chuck Close + - Cimabue + - Cindy Sherman + - Claude Monet + - Clementine Hunter + - Clyfford Still + - Constant Nieuwenhuys + - Cornelis Norbertus Gysbrechts + - Cory Arcangel + - Dante Gabriel Rossetti + - David Hockney + - Diego Rivera + - Diego Velázquez + - Donald Judd + - Doris Salcedo + - Duccio Di Buoninsegna + - Dying Gaul + - Edgar Degas + - Edouard Manet + - Édouard Vuillard + - Edvard Munch + - Edward Burne-Jones + - Edward Hopper + - Edward Ruscha + - Egon Schiele + - El Greco + - El Lissitzky + - Ellsworth Kelly + - Émile-Jacques Ruhlmann + - Ernst Ludwig Kirchner + - Erté (Romain De Tirtoff) + - Eugene Delacroix + - Eva Hesse + - Evert Collier + - Fernand Léger + - Filippo Tommaso Marinetti + - Francesco Mazzola Parmigianino + - Francis Bacon + - Francisco De Zurbaran + - Francisco Goya + - Francois Boucher + - Frank Stella + - Frans Hals + - Frantisek Kupka + - Frederic Edwin Church + - Frida Kahlo + - Genesis P-Orridge + - George Ault + - George Bellows + - George Grosz + - George Inness + - George Luks + - George Tooker + - Georges Braque + - Georges Seurat + - Georgia O'Keeffe + - Gerrit Rietveld + - Giacomo Balla + - Gian Lorenzo Bernini + - Giorgio De Chirico + - Giorgio Morandi + - Giotto Di Bondone + - Giovanni Battista Tiepolo + - Govardhan + - Grandma Moses (Anna Mary Robertson Moses) + - Grant Wood + - Gustav Klimt + - Gustave Courbet + - Gustave Moreau + - Hannah Höch + - Hans Holbein The Younger + - Helen Frankenthaler + - Henri Cartier-Bresson + - Henri Matisse + - Henri Rousseau + - Henri-Edmond Cross + - Henry Darger + - Hieronymus Bosch + - Hishikawa Moronobu + - Howard Finster + - Invader (Space Invader) + - Ismail Gulgee + - Ivan Generalić + - J.M.W. Turner + - Jackson Pollock + - Jacob Jordaens + - Jacopo Comin (Tintoretto) + - Jacques-Louis David + - James Abbott Mcneill Whistler + - James Wyatt + - Jamini Roy + - Jan Brueghel The Elder + - Jan Vermeer + - Jean Arp + - Jean Auguste Dominique Ingres + - Jean Boulogne (Giambologna) + - Jean Dubuffet + - Jean Metzinger + - Jean Tinguely + - Jean-Antoine Watteau + - Jean-François Millet + - Jean-Honoré Fragonard + - Jean-Michel Basquiat + - Jeff Koons + - Jenny Saville + - Jesús Rafael Soto + - Joan Miró + - Johann Sebastian Bach + - Johannes Vermeer + - John Baldessari + - John Everett Millais + - John F. Peto + - John Mclaughlin + - John Singer Sargent + - John Sloan + - John William Waterhouse + - Josef Albers + - Joseph Kosuth + - Joseph Mallord William Turner + - Juan Gris + - Judy Chicago + - Kara Walker + - Karel Appel + - Karl Benjamin + - Katsushika Hokusai + - Kazimir Malevich + - Keith Haring + - Kitagawa Utamaro + - Lady Pink (Sandra Fabara) + - Laurence Gartel + - Lee Krasner + - Leonardo Da Vinci + - Lex Horn + - Louis Comfort Tiffany + - Louise Bourgeois + - Louise Nevelson + - Lucian Freud + - Maestro Esiguo + - Man Ray + - Marc Chagall + - Marcel Duchamp + - Mark Rothko + - Mary Cassatt + - Master Of The Franciscan Crucifixes + - Mastro Guglielmo + - Maurice De Vlaminck + - Maurice Denis + - Max Beckmann + - Max Ernst + - Michael Janis + - Michelangelo + - Morris Louis + - Mr. (Masaru Shichinohe) + - Nandalal Bose + - Naum Gabo + - Nicholas Of Verdun + - Nicola Pisano + - Odilon Redon + - Olafur Eliasson + - Olga Rozanova + - Oskar Schlemmer + - Otto Dix + - Pablo Picasso + - Paul Cezanne + - Paul Gauguin + - Paul Klee + - Paul Sérusier + - Paul Signac + - Peter Paul Rubens + - Pierre Alechinsky + - Pierre Bonnard + - Pierre-Auguste Renoir + - Piet Mondrian + - Pieter Bruegel The Elder + - Pietro Lorenzetti + - Raffaello Sanzio (Raphael) + - Raja Ravi Varma + - Ralph Albert Blakelock + - Ralph Goings + - Raphael (Raffaello Sanzio) + - Ray Johnson + - Refik Anadol + - Rembrandt Harmenszoon Van Rijn + - René Lalique + - Rene Magritte + - Richard Anuszkiewicz + - Richard Estes + - Richard Hamilton + - Richard Serra + - Robert Delaunay + - Robert Henri + - Robert Smithson + - Robert Watts + - Roy Lichtenstein + - Ryoji Ikeda + - Sadequain + - Saint Luke + - Salvador Dali + - Sandro Botticelli + - Sergei Gerasimov + - Shepard Fairey + - Simone Martini + - Sir Charles Barry + - Sir Lawrence Alma-Tadema + - Sol Lewitt + - Sonia Delaunay + - Takashi Murakami + - Tamara De Lempicka + - Theo Van Doesburg + - Théodore Géricault + - Theodore The Studite + - Thomas Cole + - Thomas Eakins + - Thomas Gainsborough + - Thomas Hart Benton + - Tintoretto (Jacopo Comin) + - Titian (Tiziano Vecellio) + - Tony Da + - Umberto Boccioni + - Ustad Mansur + - Utagawa Hiroshige + - Vasily Kandinsky + - Vera Mukhina + - Victor Vasarely + - Vincent Van Gogh + - Vladimir Tatlin + - Wassily Kandinsky + - White On White + - Willem De Kooning + - William Blake + - William Holman Hunt + - William Merritt Chase + - William Michael Harnett + - Winged Victory Of Samothrace + - Yayoi Kusama + - Yoshitomo Nara + - Yves Klein + - Zaha Hadid + +Character Types: + - Acolyte + - Acrobat + - Apothecary + - Artificer + - Artisan + - Barbarian + - Bard + - Bardic Sage + - Blood Hunter + - Bounty Hunter + - Cavalier + - Champion + - Charlatan + - City Watch + - Cleric + - Clown + - Druid + - Duelist + - Elemental Shaman + - Executioner + - Exorcist + - Explorer + - Fey-Touched + - Fighter + - Geomancer + - Gladiator + - Guild Artisan + - Hermit + - Knight + - Monk + - Mystic + - Noble + - Outlander + - Paladin + - Pirate + - Ranger + - Rogue + - Runescribe + - Sage + - Sailor + - Shaman + - Soldier + - Sorcerer + - Urchin + - Warden + - Warlock + - Warlord + - Wizard + +Colors: + - Almond Color + - Amber + - Apricot Orange + - Ash Gray + - Beige + - Black + - Blue + - Brick Red + - Bronze + - Brown + - Caramel Color + - Carnation Pink + - Cerulean + - Charcoal Grey + - Chocolate Color + - Cinnamon Brown + - Coral Orange + - Cream Color + - Creamy White + - Cyan + - Denim Blue + - Emerald Green + - Forest Green + - Fuchsia + - Gold + - Goldfish Orange + - Grass Green + - Green + - Honey Color + - Indigo + - Ivory Color + - Jade Green + - Lavender + - Lemon Yellow + - Lilac Purple + - Lime Green + - Magenta + - Mahogany + - Marigold + - Maroon + - Mauve + - Midnight Blue + - Mint Green + - Mocha Brown + - Navy Blue + - Olive Green + - Orange + - Orchid Pink + - Pansy Color + - Paprika Red + - Peach Color + - Peachy Pink + - Pearl Color + - Periwinkle + - Pine Color + - Pink + - Plum Purple + - Pomegranate Color + - Purple + - Raspberry Color + - Red + - Rose + - Ruby Red + - Rust Brown + - Salmon Pink + - Sand Color + - Sapphire Blue + - Seafoam Green + - Sienna + - Silver + - Sky Blue + - Slate Gray + - Steel Blue + - Steel Gray + - Tan Brown + - Tangerine Orange + - Teak + - Teal + - Teal Green + - Terracotta Orange + - Topaz Yellow + - Turquoise + - Vanilla Color + - Violet + - White + - Wine Red + - Yellow + +Composition: + - Beauty Shot + - Bird'S Eye View + - Black And White Portrait + - Candid Shot + - Double Exposure Portrait + - Environmental Portrait + - Extreme Close-Up + - Framed Portrait + - Frontal + - Full Shot + - Group Shot + - Headshot + - High Angle + - High-Key Portrait + - Infrared Portrait + - Low Angle + - Macro + - Medium Close-Up + - Medium Shot + - Micro + - Motion Blur Portrait + - Multiple Exposure Portrait + - Narrative Portrait + - Outdoor Portrait + - Over-The-Shoulder + - Profile + - Reflection Portrait + - Selective Color Portrait + - Sepia Tone Portrait + - Shadow Portrait + - Silhouette + - Split Tone Portrait + - Studio Portrait + - Surreal Portrait + - Three-Quarter View + - Two-Shot + - Worm'S Eye View + +Composition Form: + - Afterimage + - Border + - Bust Chart + - Character Chart + - Chart + - Collage + - Column Lineup + - Cropped + - Diagram + - Fading Border + - Fake Scrollbar + - Feet Out Of Frame + - Framed + - Head Out Of Frame + - Isometric + - Letterboxed + - Lineup + - Mosaic Art + - Move Chart + - Negative Space + - Omake + - Out Of Frame + - Outside Border + - Partially Underwater Shot + - Photomosaic + - Pillarboxed + - Polar Opposites + - Projected Inset + - Reference Sheet + - Relationship Graph + - Rotational Symmetry + - Rounded Corners + - Seating Chart + - Social Media Composition + - Stats + - Symmetry + - Tachi-E + - Trim Marks + - Viewfinder + - Zoom Layer + +Lighting: + - Artificial Indoor Lighting + - Back Lighting + - Bright And Sunny Lighting + - Bright Festive Holiday Lights Lighting + - Bright Stadium Lights Lighting + - Broad Lighting + - Butterfly Lighting + - Candlelit Dinner Lighting + - Candlelit Scene Lighting + - Celestial Sky Illumination Lighting + - City Lights Lighting + - Clamshell Lighting + - Classic Chandelier Lighting + - Classic Gas Lamp Lighting + - Classic Oil Lamp Lighting + - Cool And Blue Lighting + - Cozy Book Reading Nook Lighting + - Creepy Haunted House Lighting + - Dappled Sunlight Through Leaves Lighting + - Dazzling Disco Lights Lighting + - Dim And Cozy Lighting + - Dramatic High Contrast Lighting + - Dramatic Opera House Lighting + - Dramatic Spotlight Lighting + - Dreamlike Fairy Circle Lights Lighting + - Dreamy Moonlit Ocean Lighting + - Eerie Moonlit Graveyard Lighting + - Eerie Torchlight Lighting + - Electric Disco Dance Floor Lighting + - Elegant Crystal Lighting + - Enchanted Garden Lanterns Lighting + - Enchanting Bioluminescence Lighting + - Enveloping Fog Lights Lighting + - Ethereal Moonlight Lighting + - Ethereal Starlit Sky Lighting + - Exotic Tiki Torches Lighting + - Firelight Flicker Lighting + - Floating Chinese Lanterns Lighting + - Fluorescent Office Lighting + - Futuristic Laser Lights Lighting + - Futuristic Neon Glow Lighting + - Futuristic Spacecraft Lighting + - Glimmering Disco Ball Lighting + - Glimmering Water Reflections Lighting + - Glistening Icicle Lights Lighting + - Glistening Winter Frost Lighting + - Glittering Cityscape Lighting + - Glowing Carnival Rides Lighting + - Glowing Fireflies Lighting + - Glowing Jack-O'-Lanterns Lighting + - Glowing Lighthouse Beacon Lighting + - Glowing Neon Lighting + - Golden Hour Lighting + - Golden Hour Light Lighting + - Hard Shadows Lighting + - Harsh Overhead Lighting + - Haunting Moonlight Lighting + - High Key Lighting + - Industrial Warehouse Lighting + - Intense Firelight Lighting + - Intense Lightning Storm Lighting + - Intense Police Searchlight Lighting + - Intimate Candlelit Bath Lighting + - Light Painting Lighting + - Loop Lighting + - Low Key Lighting + - Luminous Jellyfish Aquarium Lighting + - Magical Enchanted Forest Lighting + - Majestic Lighting + - Modern Led Strips Lighting + - Moody And Mysterious Lighting + - Moonlit Forest Lighting + - Natural Sunlight Lighting + - Neon Glow Lighting + - Night Photography Lighting + - Nostalgic Retro Diner Glow Lighting + - Peaceful Starry Campfire Lighting + - Product Lighting + - Radiant Angelic Lighting + - Radiant Stadium Floodlights Lighting + - Radiant Streetlamps Lighting + - Rembrandt Lighting + - Reminiscent Film Lighting + - Retro Diner Neon Signs Lighting + - Retro Neon Diner Lights Lighting + - Rim Lighting + - Romantic Candlelight Lighting + - Rustic Barn Lighting + - Rustic Campfire Lighting + - Rustic Fireplace Lighting + - Rustic Lanterns Lighting + - Sci-Fi Futuristic Lighting + - Sci-Fi Holographic Projections Lighting + - Serene Zen Garden Illumination Lighting + - Shadow Play Lighting + - Shimmering Lake Reflections Lighting + - Short Lighting + - Side Lighting + - Silhouette Lighting + - Silky Water Effect (Long Exposure) Lighting + - Sinister Back Alley Lighting + - Soft And Diffused Lighting + - Soft And Warm Lighting + - Soft Candlelit Meditation Lighting + - Soft Illumination Lighting + - Soft Pastel Glow Lighting + - Soft Shadows Lighting + - Split Lighting + - Spooky Halloween Lights Lighting + - Starry Night Lighting + - Still Life Lighting + - Street Lamp Lighting + - Studio Portrait Lighting + - Subdued Desk Lamp Lighting + - Subdued Nightlight Lighting + - Subtle Ambient Glow Lighting + - Sunrise At The Mountains Lighting + - Sunrise Over The Ocean Lighting + - Sunset Silhouette Lighting + - Twinkling Christmas Stars Lighting + - Twinkling Fairy Lights Lighting + - Under The Christmas Tree Lights Lighting + - Under The Streetlights Lighting + - Underwater Illumination Lighting + - Vibrant Stage Lighting + - Vintage Film Noir Lighting + - Vivid Art Gallery Spotlights Lighting + - Warm Fireplace Embers Lighting + - Warm Sunset Glow Lighting + - Whimsical Fairy Tale Lighting + +Negative: + - Blurry, Text, Watermark, Signature, Frame + - Disfigure Body, Disfigured Torso, Disfigured Face, Disfigured Eyes, Disfigured Pupils, Disfigured Arms, Disfigured Hands, Disfigured Fingers, Disfigured Legs, Disfigured Toes + +Picture Effect: + - Anaglyph + - Blending + - Bloom + - Blurry + - Chromatic Aberration + - Chromatic Aberration Abuse + - Cinematic Lighting + - Depth Of Field + - Dithering + - Drop Shadow + - Film Grain + - Fujicolor + - Glowing Light + - God Rays + - Halftone + - Image Fill + - Jpeg Artifacts + - Motion Blur + - Motion Lines + - Multiple Monochrome + - Optical Illusion + - Ray Tracing + - Reflection Light + - Scanlines + - Sparkle + - Speed Lines + - Stereogram + - Vignetting + +Picture Quality: + - 1080P + - 16K + - 4K + - 8K + - Accurate + - Anatomically Correct + - Award Winning + - Best Quality + - HD + - High Quality + - Highres + - Masterpiece + - Retina + - Super Detail + - Textured Skin + - UHD + +Setting: + - Advanced Alien Civilization Setting + - Age Of Exploration Setting + - Ai Uprising Setting + - Ai-Powered Society Setting + - Alien Invasion Setting + - Alternate Dimensions Setting + - Alternate History Setting + - Alternate Identity Setting + - Alternate Realities Collide Setting + - Ancient Civilization Setting + - Ancient Curse Setting + - Ancient Gods Setting + - Ancient Prophecy Setting + - Apocalyptic Earth Setting + - Arctic Setting + - Artificial Intelligence Setting + - Astral Plane Setting + - Biopunk Setting + - City Of Tomorrow Setting + - City Under Siege Setting + - Cosmic Horror Setting + - Cyber Warfare Setting + - Cybernetic Augmentation Setting + - Cyberpunk Setting + - Desert Setting + - Detective Noir Setting + - Dinosaur Era Setting + - Dream Within A Dream Setting + - Dream World Setting + - Dystopian Setting + - Espionage Setting + - Fairytale Setting + - Fantasy Setting + - Forgotten Technology Setting + - Future Dystopia Setting + - Future War Setting + - Futuristic Setting + - Futuristic Sports Setting + - Galactic Conflict Setting + - Galactic Empire Setting + - Genetic Engineering Setting + - Genetic Mutation Setting + - Haunted Mansion Setting + - Hidden Alien Species Setting + - Hidden Sanctuary Setting + - Historical Setting + - Holographic Reality Setting + - Interdimensional Travel Setting + - Jungle Setting + - Lost Civilization Setting + - Lost In Space Setting + - Lost In Time Setting + - Lunar Colony Setting + - Magical School Setting + - Mechanical World Setting + - Medieval Setting + - Mind Upload Setting + - Mythical Artifacts Setting + - Mythical Beasts Setting + - Mythological Setting + - Nature Reclaiming Civilization Setting + - Navigating The Multiverse Setting + - Ocean Exploration Setting + - Parallel Histories Setting + - Parallel Universe Setting + - Pirate Adventure Setting + - Planetary Exploration Setting + - Post-Apocalyptic Setting + - Robot Uprising Setting + - Robotic Society Setting + - Robotic Utopia Setting + - Sci-Fi Setting + - Space Setting + - Space Anomaly Setting + - Space Colonization Setting + - Space Opera Setting + - Space Rebellion Setting + - Space Travelers Setting + - Space Western Setting + - Steampunk Setting + - Superhero Setting + - Supernatural Mystery Setting + - Surreal Setting + - Time Crime Setting + - Time Loop Setting + - Time Paradox Setting + - Time Travel Setting + - Transhumanism Setting + - Underwater Setting + - Urban Setting + - Utopian Society Setting + - Victorian Setting + - Virtual Conspiracy Setting + - Virtual Paradise Setting + - Virtual Reality Setting + - Wild Animal Kingdom Setting + - Wild West Setting + +Shot: + - 135Mm + - 35Mm + - 360 View + - 85Mm + - Bokeh + - Canon + - Caustics + - Diffraction Spikes + - Emphasis Lines + - Eye-Level Shot + - F/1.2 + - F/1.8 + - F/16 + - F/2.8 + - F/4.0 + - Foreshortening + - Fujifilm + - Hasselblad + - Lens Flare + - Macro Photo + - Nikon + - Overexposure + - Satellite Image + - Sony Fe + - Sony Fe Gm + - Ultra-Wide Angle + - Wide-Angle + +Style: + - Abstract Art Style + - Abstract Expressionism Style + - Abstract Figurative Style + - Abstract Landscapes Style + - Abstract Portraiture Style + - Abstract Still Life Style + - Abstractionism Style + - Acrylic Painting Style + - Action Painting Style + - American Impressionism Style + - American Propaganda Poster Style + - American Scene Painting Style + - Analytical Cubism Style + - Anime Style + - Architectural Design Style + - Art Brut Style + - Art Deco Style + - Art Nouveau Style + - Ashcan School Style + - Assemblage Art Style + - Augmented Reality Art Style + - Baroque Style + - Batik Style + - Bauhaus Style + - Bengal School Of Art Style + - Blind Box Toy Style + - Body Painting Style + - Botanical Art Style + - Bronze Casting Style + - By Alfons Mucha Style + - Byzantine Art Style + - Carl Larsson Style + - Ceramic Pottery Style + - Charcoal Drawing Style + - Chiaroscuro Style + - Classicism Style + - Cobra Style + - Color Field Painting Style + - Colored Pencil Drawing Style + - Conceptual Art Style + - Constructivism Style + - Contemporary Style + - Contemporary Art Style + - Cubism Style + - Cubist Futurism Style + - Dada Style + - Dadaism Style + - De Stijl Style + - Digital Art Style + - Digital Sculpture Style + - Dutch Golden Age Style + - Embroidery Style + - En Plein Air Style + - Environmental Art Style + - Environmental Sculpture Style + - Etching Style + - Expressionism Style + - Fashion Design Style + - Fauvism Style + - Fiber Art Style + - Figurative Art Style + - Film And Video Art Style + - Flemish Baroque Style + - Folk Art Style + - Found Object Art Style + - Futurism Style + - Genre Painting Style + - Ghibli-Like Colors Style + - Glassblowing Style + - Gothic Style + - Gothic Art Style + - Gothic Revival Style + - Gouache Painting Style + - Graffiti Art Style + - Graphite Drawing Style + - Hard-Edge Painting Style + - Hellenistic Art Style + - High Detail Style + - Hindu Art Style + - Hudson River School Style + - Hyperrealism Style + - Impressionism Style + - Industrial Design Style + - Installation Art Style + - Interactive Art Style + - Interior Architecture Style + - Interior Design Style + - Islamic Art Style + - Jewelry Design Style + - Kinetic Art Style + - Land Art Style + - Landscapes Style + - Les Nabis Style + - Light Art Style + - Linocut Style + - Lithography Style + - Luminism Style + - Magic Realism Style + - Mail Art Style + - Mannerism Style + - Medieval Art Style + - Metalworking Style + - Metaphysical Art Style + - Minimalism Style + - Mixed Media Style + - Modern Style + - Monet Style + - Monotype Style + - Mosaic Style + - Mughal Art Style + - Naive Art Style + - Neoclassicism Style + - Neo-Expressionism Style + - Neo-Impressionism Style + - New Objectivity Style + - Northern Renaissance Style + - Oil Painting Style + - Op Art Style + - Orphism Style + - Pastel Painting Style + - Pen And Ink Drawing Style + - Performance Art Style + - Performance Painting Style + - Photography Style + - Photorealism Style + - Pixar Style + - Pointillism Style + - Pop Art Style + - Portraiture Style + - Post-Impressionism Style + - Post-Minimalism Style + - Postmodernism Style + - Precisionism Style + - Pre-Raphaelite Brotherhood Style + - Pre-Rephaëlite Painting Style + - Primitivism Style + - Printmaking Style + - Proto-Renaissance Style + - Raised Fist Style + - Realism Style + - Renaissance Style + - Renaissance Art Style + - Rococo Style + - Romanticism Style + - Screen Printing Style + - Sculpture Style + - Seascape Style + - Sgraffito Style + - Sienese School Style + - Social Realism Style + - Socialist Realism Style + - Southern Renaissance Style + - Stained Glass Style + - Still Life Style + - Stone Carving Style + - Street Art Style + - Superflat Style + - Suprematism Style + - Surrealism Style + - Symbolism Style + - Synthetic Cubism Style + - Tapestry Style + - Textile Design Style + - Textile Sculpture Style + - Tonalism Style + - Trompe-L'Oeil Style + - Ukiyo-E Style + - Verism Style + - Victorian Art Style + - Video Art Style + - Virtual Reality Art Style + - Watercolor Style + - Weaving Style + - Wildlife Art Style + - Wood Carving Style + - Woodcut Style diff --git a/custom_nodes/ComfyUI-Inspire-Pack/resources/prompt-builder.yaml.example b/custom_nodes/ComfyUI-Inspire-Pack/resources/prompt-builder.yaml.example new file mode 100644 index 0000000000000000000000000000000000000000..f69af658c2dc67e1524f79940c7537766257adfb --- /dev/null +++ b/custom_nodes/ComfyUI-Inspire-Pack/resources/prompt-builder.yaml.example @@ -0,0 +1,1048 @@ +#Category: + #Input + +Angle of View: + - Atmospheric Perspective + - Blurry Foreground + - Close-Up + - Cowboy Shot + - Cut-In + - Dutch Angle + - First-Person View + - Fisheye + - From Above + - From Behind + - From Below + - From Outside + - From Side + - Hatching (Texture) + - Multiple Views + - Panorama + - Perspective + - Pov + - Rotated + - Sideways + - Three Sided View + - Upside-Down + - Vanishing Point + - Wide Shot + +Artists: + - Abanindranath Tagore + - Abdur Rahman Chughtai + - Abu'L Hasan + - Adolf Wölfli + - Agnes Denes + - Agnes Martin + - Agnolo Di Cosimo (Bronzino) + - Ai Weiwei + - Albert Bierstadt + - Albert Gleizes + - Albert Pinkham Ryder + - Alberto Burri + - Alberto Giacometti + - Albrecht Dürer + - Alexander Calder + - Alexander Gerasimov + - Alexander Kanoldt + - Alexander Rodchenko + - Alfred Wallis + - Alice Neel + - Aloïse Corbaz + - Alphonse Mucha + - Ambrogio Lorenzetti + - Amedeo Modigliani + - Amrita Sher-Gil + - André Derain + - Andrei Rublev + - Andy Goldsworthy + - Andy Warhol + - Angelica Kauffman + - Anna Banana + - Ansel Adams + - Anselm Kiefer + - Anthony Van Dyck + - Antoni Gaudi + - Antonio Canova + - Apelles + - Arkady Plastov + - Artemisia Gentileschi + - Artists Of Hagia Sophia + - Asger Jorn + - Asher B. Durand + - Asit Kumar Haldar + - Aubrey Beardsley + - Audrey Flack + - Auguste Rodin + - Augustus Pugin + - Banksy + - Barbara Kruger + - Barberini Faun + - Bart Van Der Leck + - Baseera Khan + - Becky Lloyd + - Ben Shahn + - Benedetto Antelami + - Bichitr + - Bill Traylor + - Bridget Riley + - Bruce Nauman + - Camille Claudel + - Camille Pissarro + - Caravaggio + - Carel Willink + - Carl Andre + - Carlo Carrà + - Caspar David Friedrich + - Charles Demuth + - Charles Sheeler + - Chiho Aoshima + - Childe Hassam + - Chuck Close + - Cimabue + - Cindy Sherman + - Claude Monet + - Clementine Hunter + - Clyfford Still + - Constant Nieuwenhuys + - Cornelis Norbertus Gysbrechts + - Cory Arcangel + - Dante Gabriel Rossetti + - David Hockney + - Diego Rivera + - Diego Velázquez + - Donald Judd + - Doris Salcedo + - Duccio Di Buoninsegna + - Dying Gaul + - Edgar Degas + - Edouard Manet + - Édouard Vuillard + - Edvard Munch + - Edward Burne-Jones + - Edward Hopper + - Edward Ruscha + - Egon Schiele + - El Greco + - El Lissitzky + - Ellsworth Kelly + - Émile-Jacques Ruhlmann + - Ernst Ludwig Kirchner + - Erté (Romain De Tirtoff) + - Eugene Delacroix + - Eva Hesse + - Evert Collier + - Fernand Léger + - Filippo Tommaso Marinetti + - Francesco Mazzola Parmigianino + - Francis Bacon + - Francisco De Zurbaran + - Francisco Goya + - Francois Boucher + - Frank Stella + - Frans Hals + - Frantisek Kupka + - Frederic Edwin Church + - Frida Kahlo + - Genesis P-Orridge + - George Ault + - George Bellows + - George Grosz + - George Inness + - George Luks + - George Tooker + - Georges Braque + - Georges Seurat + - Georgia O'Keeffe + - Gerrit Rietveld + - Giacomo Balla + - Gian Lorenzo Bernini + - Giorgio De Chirico + - Giorgio Morandi + - Giotto Di Bondone + - Giovanni Battista Tiepolo + - Govardhan + - Grandma Moses (Anna Mary Robertson Moses) + - Grant Wood + - Gustav Klimt + - Gustave Courbet + - Gustave Moreau + - Hannah Höch + - Hans Holbein The Younger + - Helen Frankenthaler + - Henri Cartier-Bresson + - Henri Matisse + - Henri Rousseau + - Henri-Edmond Cross + - Henry Darger + - Hieronymus Bosch + - Hishikawa Moronobu + - Howard Finster + - Invader (Space Invader) + - Ismail Gulgee + - Ivan Generalić + - J.M.W. Turner + - Jackson Pollock + - Jacob Jordaens + - Jacopo Comin (Tintoretto) + - Jacques-Louis David + - James Abbott Mcneill Whistler + - James Wyatt + - Jamini Roy + - Jan Brueghel The Elder + - Jan Vermeer + - Jean Arp + - Jean Auguste Dominique Ingres + - Jean Boulogne (Giambologna) + - Jean Dubuffet + - Jean Metzinger + - Jean Tinguely + - Jean-Antoine Watteau + - Jean-François Millet + - Jean-Honoré Fragonard + - Jean-Michel Basquiat + - Jeff Koons + - Jenny Saville + - Jesús Rafael Soto + - Joan Miró + - Johann Sebastian Bach + - Johannes Vermeer + - John Baldessari + - John Everett Millais + - John F. Peto + - John Mclaughlin + - John Singer Sargent + - John Sloan + - John William Waterhouse + - Josef Albers + - Joseph Kosuth + - Joseph Mallord William Turner + - Juan Gris + - Judy Chicago + - Kara Walker + - Karel Appel + - Karl Benjamin + - Katsushika Hokusai + - Kazimir Malevich + - Keith Haring + - Kitagawa Utamaro + - Lady Pink (Sandra Fabara) + - Laurence Gartel + - Lee Krasner + - Leonardo Da Vinci + - Lex Horn + - Louis Comfort Tiffany + - Louise Bourgeois + - Louise Nevelson + - Lucian Freud + - Maestro Esiguo + - Man Ray + - Marc Chagall + - Marcel Duchamp + - Mark Rothko + - Mary Cassatt + - Master Of The Franciscan Crucifixes + - Mastro Guglielmo + - Maurice De Vlaminck + - Maurice Denis + - Max Beckmann + - Max Ernst + - Michael Janis + - Michelangelo + - Morris Louis + - Mr. (Masaru Shichinohe) + - Nandalal Bose + - Naum Gabo + - Nicholas Of Verdun + - Nicola Pisano + - Odilon Redon + - Olafur Eliasson + - Olga Rozanova + - Oskar Schlemmer + - Otto Dix + - Pablo Picasso + - Paul Cezanne + - Paul Gauguin + - Paul Klee + - Paul Sérusier + - Paul Signac + - Peter Paul Rubens + - Pierre Alechinsky + - Pierre Bonnard + - Pierre-Auguste Renoir + - Piet Mondrian + - Pieter Bruegel The Elder + - Pietro Lorenzetti + - Raffaello Sanzio (Raphael) + - Raja Ravi Varma + - Ralph Albert Blakelock + - Ralph Goings + - Raphael (Raffaello Sanzio) + - Ray Johnson + - Refik Anadol + - Rembrandt Harmenszoon Van Rijn + - René Lalique + - Rene Magritte + - Richard Anuszkiewicz + - Richard Estes + - Richard Hamilton + - Richard Serra + - Robert Delaunay + - Robert Henri + - Robert Smithson + - Robert Watts + - Roy Lichtenstein + - Ryoji Ikeda + - Sadequain + - Saint Luke + - Salvador Dali + - Sandro Botticelli + - Sergei Gerasimov + - Shepard Fairey + - Simone Martini + - Sir Charles Barry + - Sir Lawrence Alma-Tadema + - Sol Lewitt + - Sonia Delaunay + - Takashi Murakami + - Tamara De Lempicka + - Theo Van Doesburg + - Théodore Géricault + - Theodore The Studite + - Thomas Cole + - Thomas Eakins + - Thomas Gainsborough + - Thomas Hart Benton + - Tintoretto (Jacopo Comin) + - Titian (Tiziano Vecellio) + - Tony Da + - Umberto Boccioni + - Ustad Mansur + - Utagawa Hiroshige + - Vasily Kandinsky + - Vera Mukhina + - Victor Vasarely + - Vincent Van Gogh + - Vladimir Tatlin + - Wassily Kandinsky + - White On White + - Willem De Kooning + - William Blake + - William Holman Hunt + - William Merritt Chase + - William Michael Harnett + - Winged Victory Of Samothrace + - Yayoi Kusama + - Yoshitomo Nara + - Yves Klein + - Zaha Hadid + +Character Types: + - Acolyte + - Acrobat + - Apothecary + - Artificer + - Artisan + - Barbarian + - Bard + - Bardic Sage + - Blood Hunter + - Bounty Hunter + - Cavalier + - Champion + - Charlatan + - City Watch + - Cleric + - Clown + - Druid + - Duelist + - Elemental Shaman + - Executioner + - Exorcist + - Explorer + - Fey-Touched + - Fighter + - Geomancer + - Gladiator + - Guild Artisan + - Hermit + - Knight + - Monk + - Mystic + - Noble + - Outlander + - Paladin + - Pirate + - Ranger + - Rogue + - Runescribe + - Sage + - Sailor + - Shaman + - Soldier + - Sorcerer + - Urchin + - Warden + - Warlock + - Warlord + - Wizard + +Colors: + - Almond Color + - Amber + - Apricot Orange + - Ash Gray + - Beige + - Black + - Blue + - Brick Red + - Bronze + - Brown + - Caramel Color + - Carnation Pink + - Cerulean + - Charcoal Grey + - Chocolate Color + - Cinnamon Brown + - Coral Orange + - Cream Color + - Creamy White + - Cyan + - Denim Blue + - Emerald Green + - Forest Green + - Fuchsia + - Gold + - Goldfish Orange + - Grass Green + - Green + - Honey Color + - Indigo + - Ivory Color + - Jade Green + - Lavender + - Lemon Yellow + - Lilac Purple + - Lime Green + - Magenta + - Mahogany + - Marigold + - Maroon + - Mauve + - Midnight Blue + - Mint Green + - Mocha Brown + - Navy Blue + - Olive Green + - Orange + - Orchid Pink + - Pansy Color + - Paprika Red + - Peach Color + - Peachy Pink + - Pearl Color + - Periwinkle + - Pine Color + - Pink + - Plum Purple + - Pomegranate Color + - Purple + - Raspberry Color + - Red + - Rose + - Ruby Red + - Rust Brown + - Salmon Pink + - Sand Color + - Sapphire Blue + - Seafoam Green + - Sienna + - Silver + - Sky Blue + - Slate Gray + - Steel Blue + - Steel Gray + - Tan Brown + - Tangerine Orange + - Teak + - Teal + - Teal Green + - Terracotta Orange + - Topaz Yellow + - Turquoise + - Vanilla Color + - Violet + - White + - Wine Red + - Yellow + +Composition: + - Beauty Shot + - Bird'S Eye View + - Black And White Portrait + - Candid Shot + - Double Exposure Portrait + - Environmental Portrait + - Extreme Close-Up + - Framed Portrait + - Frontal + - Full Shot + - Group Shot + - Headshot + - High Angle + - High-Key Portrait + - Infrared Portrait + - Low Angle + - Macro + - Medium Close-Up + - Medium Shot + - Micro + - Motion Blur Portrait + - Multiple Exposure Portrait + - Narrative Portrait + - Outdoor Portrait + - Over-The-Shoulder + - Profile + - Reflection Portrait + - Selective Color Portrait + - Sepia Tone Portrait + - Shadow Portrait + - Silhouette + - Split Tone Portrait + - Studio Portrait + - Surreal Portrait + - Three-Quarter View + - Two-Shot + - Worm'S Eye View + +Composition Form: + - Afterimage + - Border + - Bust Chart + - Character Chart + - Chart + - Collage + - Column Lineup + - Cropped + - Diagram + - Fading Border + - Fake Scrollbar + - Feet Out Of Frame + - Framed + - Head Out Of Frame + - Isometric + - Letterboxed + - Lineup + - Mosaic Art + - Move Chart + - Negative Space + - Omake + - Out Of Frame + - Outside Border + - Partially Underwater Shot + - Photomosaic + - Pillarboxed + - Polar Opposites + - Projected Inset + - Reference Sheet + - Relationship Graph + - Rotational Symmetry + - Rounded Corners + - Seating Chart + - Social Media Composition + - Stats + - Symmetry + - Tachi-E + - Trim Marks + - Viewfinder + - Zoom Layer + +Lighting: + - Artificial Indoor Lighting + - Back Lighting + - Bright And Sunny Lighting + - Bright Festive Holiday Lights Lighting + - Bright Stadium Lights Lighting + - Broad Lighting + - Butterfly Lighting + - Candlelit Dinner Lighting + - Candlelit Scene Lighting + - Celestial Sky Illumination Lighting + - City Lights Lighting + - Clamshell Lighting + - Classic Chandelier Lighting + - Classic Gas Lamp Lighting + - Classic Oil Lamp Lighting + - Cool And Blue Lighting + - Cozy Book Reading Nook Lighting + - Creepy Haunted House Lighting + - Dappled Sunlight Through Leaves Lighting + - Dazzling Disco Lights Lighting + - Dim And Cozy Lighting + - Dramatic High Contrast Lighting + - Dramatic Opera House Lighting + - Dramatic Spotlight Lighting + - Dreamlike Fairy Circle Lights Lighting + - Dreamy Moonlit Ocean Lighting + - Eerie Moonlit Graveyard Lighting + - Eerie Torchlight Lighting + - Electric Disco Dance Floor Lighting + - Elegant Crystal Lighting + - Enchanted Garden Lanterns Lighting + - Enchanting Bioluminescence Lighting + - Enveloping Fog Lights Lighting + - Ethereal Moonlight Lighting + - Ethereal Starlit Sky Lighting + - Exotic Tiki Torches Lighting + - Firelight Flicker Lighting + - Floating Chinese Lanterns Lighting + - Fluorescent Office Lighting + - Futuristic Laser Lights Lighting + - Futuristic Neon Glow Lighting + - Futuristic Spacecraft Lighting + - Glimmering Disco Ball Lighting + - Glimmering Water Reflections Lighting + - Glistening Icicle Lights Lighting + - Glistening Winter Frost Lighting + - Glittering Cityscape Lighting + - Glowing Carnival Rides Lighting + - Glowing Fireflies Lighting + - Glowing Jack-O'-Lanterns Lighting + - Glowing Lighthouse Beacon Lighting + - Glowing Neon Lighting + - Golden Hour Lighting + - Golden Hour Light Lighting + - Hard Shadows Lighting + - Harsh Overhead Lighting + - Haunting Moonlight Lighting + - High Key Lighting + - Industrial Warehouse Lighting + - Intense Firelight Lighting + - Intense Lightning Storm Lighting + - Intense Police Searchlight Lighting + - Intimate Candlelit Bath Lighting + - Light Painting Lighting + - Loop Lighting + - Low Key Lighting + - Luminous Jellyfish Aquarium Lighting + - Magical Enchanted Forest Lighting + - Majestic Lighting + - Modern Led Strips Lighting + - Moody And Mysterious Lighting + - Moonlit Forest Lighting + - Natural Sunlight Lighting + - Neon Glow Lighting + - Night Photography Lighting + - Nostalgic Retro Diner Glow Lighting + - Peaceful Starry Campfire Lighting + - Product Lighting + - Radiant Angelic Lighting + - Radiant Stadium Floodlights Lighting + - Radiant Streetlamps Lighting + - Rembrandt Lighting + - Reminiscent Film Lighting + - Retro Diner Neon Signs Lighting + - Retro Neon Diner Lights Lighting + - Rim Lighting + - Romantic Candlelight Lighting + - Rustic Barn Lighting + - Rustic Campfire Lighting + - Rustic Fireplace Lighting + - Rustic Lanterns Lighting + - Sci-Fi Futuristic Lighting + - Sci-Fi Holographic Projections Lighting + - Serene Zen Garden Illumination Lighting + - Shadow Play Lighting + - Shimmering Lake Reflections Lighting + - Short Lighting + - Side Lighting + - Silhouette Lighting + - Silky Water Effect (Long Exposure) Lighting + - Sinister Back Alley Lighting + - Soft And Diffused Lighting + - Soft And Warm Lighting + - Soft Candlelit Meditation Lighting + - Soft Illumination Lighting + - Soft Pastel Glow Lighting + - Soft Shadows Lighting + - Split Lighting + - Spooky Halloween Lights Lighting + - Starry Night Lighting + - Still Life Lighting + - Street Lamp Lighting + - Studio Portrait Lighting + - Subdued Desk Lamp Lighting + - Subdued Nightlight Lighting + - Subtle Ambient Glow Lighting + - Sunrise At The Mountains Lighting + - Sunrise Over The Ocean Lighting + - Sunset Silhouette Lighting + - Twinkling Christmas Stars Lighting + - Twinkling Fairy Lights Lighting + - Under The Christmas Tree Lights Lighting + - Under The Streetlights Lighting + - Underwater Illumination Lighting + - Vibrant Stage Lighting + - Vintage Film Noir Lighting + - Vivid Art Gallery Spotlights Lighting + - Warm Fireplace Embers Lighting + - Warm Sunset Glow Lighting + - Whimsical Fairy Tale Lighting + +Negative: + - Blurry, Text, Watermark, Signature, Frame + - Disfigure Body, Disfigured Torso, Disfigured Face, Disfigured Eyes, Disfigured Pupils, Disfigured Arms, Disfigured Hands, Disfigured Fingers, Disfigured Legs, Disfigured Toes + +Picture Effect: + - Anaglyph + - Blending + - Bloom + - Blurry + - Chromatic Aberration + - Chromatic Aberration Abuse + - Cinematic Lighting + - Depth Of Field + - Dithering + - Drop Shadow + - Film Grain + - Fujicolor + - Glowing Light + - God Rays + - Halftone + - Image Fill + - Jpeg Artifacts + - Motion Blur + - Motion Lines + - Multiple Monochrome + - Optical Illusion + - Ray Tracing + - Reflection Light + - Scanlines + - Sparkle + - Speed Lines + - Stereogram + - Vignetting + +Picture Quality: + - 1080P + - 16K + - 4K + - 8K + - Accurate + - Anatomically Correct + - Award Winning + - Best Quality + - HD + - High Quality + - Highres + - Masterpiece + - Retina + - Super Detail + - Textured Skin + - UHD + +Setting: + - Advanced Alien Civilization Setting + - Age Of Exploration Setting + - Ai Uprising Setting + - Ai-Powered Society Setting + - Alien Invasion Setting + - Alternate Dimensions Setting + - Alternate History Setting + - Alternate Identity Setting + - Alternate Realities Collide Setting + - Ancient Civilization Setting + - Ancient Curse Setting + - Ancient Gods Setting + - Ancient Prophecy Setting + - Apocalyptic Earth Setting + - Arctic Setting + - Artificial Intelligence Setting + - Astral Plane Setting + - Biopunk Setting + - City Of Tomorrow Setting + - City Under Siege Setting + - Cosmic Horror Setting + - Cyber Warfare Setting + - Cybernetic Augmentation Setting + - Cyberpunk Setting + - Desert Setting + - Detective Noir Setting + - Dinosaur Era Setting + - Dream Within A Dream Setting + - Dream World Setting + - Dystopian Setting + - Espionage Setting + - Fairytale Setting + - Fantasy Setting + - Forgotten Technology Setting + - Future Dystopia Setting + - Future War Setting + - Futuristic Setting + - Futuristic Sports Setting + - Galactic Conflict Setting + - Galactic Empire Setting + - Genetic Engineering Setting + - Genetic Mutation Setting + - Haunted Mansion Setting + - Hidden Alien Species Setting + - Hidden Sanctuary Setting + - Historical Setting + - Holographic Reality Setting + - Interdimensional Travel Setting + - Jungle Setting + - Lost Civilization Setting + - Lost In Space Setting + - Lost In Time Setting + - Lunar Colony Setting + - Magical School Setting + - Mechanical World Setting + - Medieval Setting + - Mind Upload Setting + - Mythical Artifacts Setting + - Mythical Beasts Setting + - Mythological Setting + - Nature Reclaiming Civilization Setting + - Navigating The Multiverse Setting + - Ocean Exploration Setting + - Parallel Histories Setting + - Parallel Universe Setting + - Pirate Adventure Setting + - Planetary Exploration Setting + - Post-Apocalyptic Setting + - Robot Uprising Setting + - Robotic Society Setting + - Robotic Utopia Setting + - Sci-Fi Setting + - Space Setting + - Space Anomaly Setting + - Space Colonization Setting + - Space Opera Setting + - Space Rebellion Setting + - Space Travelers Setting + - Space Western Setting + - Steampunk Setting + - Superhero Setting + - Supernatural Mystery Setting + - Surreal Setting + - Time Crime Setting + - Time Loop Setting + - Time Paradox Setting + - Time Travel Setting + - Transhumanism Setting + - Underwater Setting + - Urban Setting + - Utopian Society Setting + - Victorian Setting + - Virtual Conspiracy Setting + - Virtual Paradise Setting + - Virtual Reality Setting + - Wild Animal Kingdom Setting + - Wild West Setting + +Shot: + - 135Mm + - 35Mm + - 360 View + - 85Mm + - Bokeh + - Canon + - Caustics + - Diffraction Spikes + - Emphasis Lines + - Eye-Level Shot + - F/1.2 + - F/1.8 + - F/16 + - F/2.8 + - F/4.0 + - Foreshortening + - Fujifilm + - Hasselblad + - Lens Flare + - Macro Photo + - Nikon + - Overexposure + - Satellite Image + - Sony Fe + - Sony Fe Gm + - Ultra-Wide Angle + - Wide-Angle + +Style: + - Abstract Art Style + - Abstract Expressionism Style + - Abstract Figurative Style + - Abstract Landscapes Style + - Abstract Portraiture Style + - Abstract Still Life Style + - Abstractionism Style + - Acrylic Painting Style + - Action Painting Style + - American Impressionism Style + - American Propaganda Poster Style + - American Scene Painting Style + - Analytical Cubism Style + - Anime Style + - Architectural Design Style + - Art Brut Style + - Art Deco Style + - Art Nouveau Style + - Ashcan School Style + - Assemblage Art Style + - Augmented Reality Art Style + - Baroque Style + - Batik Style + - Bauhaus Style + - Bengal School Of Art Style + - Blind Box Toy Style + - Body Painting Style + - Botanical Art Style + - Bronze Casting Style + - By Alfons Mucha Style + - Byzantine Art Style + - Carl Larsson Style + - Ceramic Pottery Style + - Charcoal Drawing Style + - Chiaroscuro Style + - Classicism Style + - Cobra Style + - Color Field Painting Style + - Colored Pencil Drawing Style + - Conceptual Art Style + - Constructivism Style + - Contemporary Style + - Contemporary Art Style + - Cubism Style + - Cubist Futurism Style + - Dada Style + - Dadaism Style + - De Stijl Style + - Digital Art Style + - Digital Sculpture Style + - Dutch Golden Age Style + - Embroidery Style + - En Plein Air Style + - Environmental Art Style + - Environmental Sculpture Style + - Etching Style + - Expressionism Style + - Fashion Design Style + - Fauvism Style + - Fiber Art Style + - Figurative Art Style + - Film And Video Art Style + - Flemish Baroque Style + - Folk Art Style + - Found Object Art Style + - Futurism Style + - Genre Painting Style + - Ghibli-Like Colors Style + - Glassblowing Style + - Gothic Style + - Gothic Art Style + - Gothic Revival Style + - Gouache Painting Style + - Graffiti Art Style + - Graphite Drawing Style + - Hard-Edge Painting Style + - Hellenistic Art Style + - High Detail Style + - Hindu Art Style + - Hudson River School Style + - Hyperrealism Style + - Impressionism Style + - Industrial Design Style + - Installation Art Style + - Interactive Art Style + - Interior Architecture Style + - Interior Design Style + - Islamic Art Style + - Jewelry Design Style + - Kinetic Art Style + - Land Art Style + - Landscapes Style + - Les Nabis Style + - Light Art Style + - Linocut Style + - Lithography Style + - Luminism Style + - Magic Realism Style + - Mail Art Style + - Mannerism Style + - Medieval Art Style + - Metalworking Style + - Metaphysical Art Style + - Minimalism Style + - Mixed Media Style + - Modern Style + - Monet Style + - Monotype Style + - Mosaic Style + - Mughal Art Style + - Naive Art Style + - Neoclassicism Style + - Neo-Expressionism Style + - Neo-Impressionism Style + - New Objectivity Style + - Northern Renaissance Style + - Oil Painting Style + - Op Art Style + - Orphism Style + - Pastel Painting Style + - Pen And Ink Drawing Style + - Performance Art Style + - Performance Painting Style + - Photography Style + - Photorealism Style + - Pixar Style + - Pointillism Style + - Pop Art Style + - Portraiture Style + - Post-Impressionism Style + - Post-Minimalism Style + - Postmodernism Style + - Precisionism Style + - Pre-Raphaelite Brotherhood Style + - Pre-Rephaëlite Painting Style + - Primitivism Style + - Printmaking Style + - Proto-Renaissance Style + - Raised Fist Style + - Realism Style + - Renaissance Style + - Renaissance Art Style + - Rococo Style + - Romanticism Style + - Screen Printing Style + - Sculpture Style + - Seascape Style + - Sgraffito Style + - Sienese School Style + - Social Realism Style + - Socialist Realism Style + - Southern Renaissance Style + - Stained Glass Style + - Still Life Style + - Stone Carving Style + - Street Art Style + - Superflat Style + - Suprematism Style + - Surrealism Style + - Symbolism Style + - Synthetic Cubism Style + - Tapestry Style + - Textile Design Style + - Textile Sculpture Style + - Tonalism Style + - Trompe-L'Oeil Style + - Ukiyo-E Style + - Verism Style + - Victorian Art Style + - Video Art Style + - Virtual Reality Art Style + - Watercolor Style + - Weaving Style + - Wildlife Art Style + - Wood Carving Style + - Woodcut Style diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/.gitignore b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ed8ebf583f771da9150c35db3955987b7d757904 --- /dev/null +++ b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/.gitignore @@ -0,0 +1 @@ +__pycache__ \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/README.md b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9f82363dc5556cd43adb810198ff5f73422bf627 --- /dev/null +++ b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/README.md @@ -0,0 +1,59 @@ +# ComfyUI-Lora-Auto-Trigger-Words + +This project is a fork of https://github.com/Extraltodeus/LoadLoraWithTags +The aim of these custom nodes is to get an _easy_ access to the tags used to trigger a lora. +This project is compatible with Stacked Loras from https://github.com/LucianoCirino/efficiency-nodes-comfyui/releases +I talk about **lora**, but works with **lycoris** too. + +## Install +Some of this project nodes depends on https://github.com/pythongosssss/ComfyUI-Custom-Scripts : +- LoraLoaderAdvanced +- LoraLoaderStackedAdvanced +They get their vanilla equivalents. + +Overall, Custom-Scripts is recommended to be able to know the content of the tag lists with the node `showText` + +## Features +### Main nodes +#### Vanilla vs Advanced +Vanilla refers to nodes that have no lora preview from the menu, nor the lora list. But the features provided are the same. +![image](./images/main.png) +#### Nodes +- LoraLoader (Vanilla or Advanced) +- LoraLoaderStacked (Vanilla or Avanced). The stacked lora input is optional. +Allow to load a lora, either the normal way, or the efficiency-nodes way. +These loaders have two custom outputs: +- civitai_tags_list: a python list of the tags related to this lora on civitai +- meta_tags_list: a python list of the tags used for training the lora embeded in it (if any) +This outputs needs to be filtered by two othere nodes: +- TagsFormater: Helper to show the available tag and their indexes +- tagsSelector: allow to filter tags and apply a weight to it. + - TagSelector contains four parameters. First the selector (see Filtering next) + - The weight `(tag:weight)` + - The boolean `ensure_comma`. To properly append comma if a prefix or suffix is added. + - The boolean `append_loraname_if_empty` which will add the name of the lora in the list of outputs if they are empty. +#### Filtering +The format is simple. It's the same as python list index, but can select multiple index or ranges of indexes separated by comas. +`Ex: 0, 3, 5:8, -8:` +- Select a specific list of indexes: `0, 2, 3, 15`... +- Select range of indexes: `2:5, 10:15`... +- Select a range from the begining to a specific index: `:5` +- Select a range from a specific index to the end: `5:` +- You can use negative indexes. Like `-1` to select the last tag +- By default `:` selects everything + +#### Example of normal workflow +![image](./images/loaderAdvanced.png) + +#### Example of Stacked workflow +![image](./images/loaderStacked.png) + +#### Chaining Selectors and Stacked +Tags selectors can be chained to select differents tags with differents weights `(tags1:0.8), tag2, (tag3:1.1)`. +Lora Stack can also be chained together to load multiple loras into an efficient loaders. +![image](./images/stackingLoras.png) + +### Side nodes I made and kept here +- FusionText: takes two text input and join them together +- Randomizer: takes two couples text+lorastack and return randomly one them +- TextInputBasic: just a text input with two additional input for text chaining diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__init__.py b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe1e2af00d22727b5dd015c0ee698bd15e21df75 --- /dev/null +++ b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__init__.py @@ -0,0 +1,8 @@ +#from .nodes_autotrigger import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as na_NCM, na_NDNM +#from .nodes_utils import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS as nu_NCM, nu_NDNM +from .nodes_autotrigger import NODE_CLASS_MAPPINGS as na_NCM +from .nodes_utils import NODE_CLASS_MAPPINGS as nu_NCM + +NODE_CLASS_MAPPINGS = dict(na_NCM, **nu_NCM) +#NODE_DISPLAY_NAME_MAPPINGS = dict(na_NDNM, **nu_NDNM) +__all__ = ["NODE_CLASS_MAPPINGS"]#, "NODE_DISPLAY_NAME_MAPPINGS"] diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..443791c646c12391c11f003f3ee3b21f7f78e3dc Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..337c7a1cf83ca4e7c58fddd53177fde96a678253 Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_autotrigger.cpython-310.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_autotrigger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae2f8e9cf1d4b9f1215824788ae4df109784ba20 Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_autotrigger.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_autotrigger.cpython-311.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_autotrigger.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3862e40c1169779b27fe62a2772f9b16a87b238 Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_autotrigger.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_utils.cpython-310.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ce74dfc50dd666941a1bc193900f0da077e160a Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_utils.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_utils.cpython-311.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d33cc2e2a40988d4a4f351a49812d177db3a17bb Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/nodes_utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/utils.cpython-310.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74e8fba3e653753af6458b733c5b6c507cf95dec Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/utils.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/utils.cpython-311.pyc b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82f2d39bb20a99350e0e124a49912b5cbac629d8 Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/__pycache__/utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/loaderAdvanced.png b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/loaderAdvanced.png new file mode 100644 index 0000000000000000000000000000000000000000..13f983a8e13411fa1ff86894794889092f01349f Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/loaderAdvanced.png differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/loaderStacked.png b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/loaderStacked.png new file mode 100644 index 0000000000000000000000000000000000000000..b10df80d721719ee964b7128b6db33648c6077cb Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/loaderStacked.png differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/main.png b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/main.png new file mode 100644 index 0000000000000000000000000000000000000000..215aaf31964db6ede1a35594586f3264eef9083f Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/main.png differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/stackingLoras.png b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/stackingLoras.png new file mode 100644 index 0000000000000000000000000000000000000000..f9b8c10dbf722ff471f4c825d6220682bdf363a8 Binary files /dev/null and b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/images/stackingLoras.png differ diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/nodes_autotrigger.py b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/nodes_autotrigger.py new file mode 100644 index 0000000000000000000000000000000000000000..f6d1772f07724389d31c85393b72835c1507f0c1 --- /dev/null +++ b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/nodes_autotrigger.py @@ -0,0 +1,219 @@ +from comfy.sd import load_lora_for_models +from comfy.utils import load_torch_file +import folder_paths + +from .utils import * + +class LoraLoaderVanilla: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + LORA_LIST = sorted(folder_paths.get_filename_list("loras"), key=str.lower) + return { + "required": { + "model": ("MODEL",), + "clip": ("CLIP", ), + "lora_name": (LORA_LIST, ), + "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}), + "force_fetch": ("BOOLEAN", {"default": False}), + "append_loraname_if_empty": ("BOOLEAN", {"default": False}), + } + } + + RETURN_TYPES = ("MODEL", "CLIP", "LIST", "LIST") + RETURN_NAMES = ("MODEL", "CLIP", "civitai_tags_list", "meta_tags_list") + FUNCTION = "load_lora" + CATEGORY = "autotrigger" + + def load_lora(self, model, clip, lora_name, strength_model, strength_clip, force_fetch, append_loraname_if_empty): + meta_tags_list = sort_tags_by_frequency(get_metadata(lora_name, "loras")) + civitai_tags_list = load_and_save_tags(lora_name, force_fetch) + + meta_tags_list = append_lora_name_if_empty(meta_tags_list, lora_name, append_loraname_if_empty) + civitai_tags_list = append_lora_name_if_empty(civitai_tags_list, lora_name, append_loraname_if_empty) + + lora_path = folder_paths.get_full_path("loras", lora_name) + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + temp = self.loaded_lora + self.loaded_lora = None + del temp + + if lora is None: + lora = load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_lora, clip_lora = load_lora_for_models(model, clip, lora, strength_model, strength_clip) + + return (model_lora, clip_lora, civitai_tags_list, meta_tags_list) + +class LoraLoaderStackedVanilla: + @classmethod + def INPUT_TYPES(s): + LORA_LIST = folder_paths.get_filename_list("loras") + return { + "required": { + "lora_name": (LORA_LIST,), + "lora_weight": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "force_fetch": ("BOOLEAN", {"default": False}), + "append_loraname_if_empty": ("BOOLEAN", {"default": False}), + }, + "optional": { + "lora_stack": ("LORA_STACK", ), + } + } + + RETURN_TYPES = ("LIST", "LIST", "LORA_STACK",) + RETURN_NAMES = ("civitai_tags_list", "meta_tags_list", "LORA_STACK",) + FUNCTION = "set_stack" + #OUTPUT_NODE = False + CATEGORY = "autotrigger" + + def set_stack(self, lora_name, lora_weight, force_fetch, append_loraname_if_empty, lora_stack=None): + civitai_tags_list = load_and_save_tags(lora_name, force_fetch) + + meta_tags = get_metadata(lora_name, "loras") + meta_tags_list = sort_tags_by_frequency(meta_tags) + + civitai_tags_list = append_lora_name_if_empty(civitai_tags_list, lora_name, append_loraname_if_empty) + meta_tags_list = append_lora_name_if_empty(meta_tags_list, lora_name, append_loraname_if_empty) + + loras = [(lora_name,lora_weight,lora_weight,)] + if lora_stack is not None: + loras.extend(lora_stack) + + return (civitai_tags_list, meta_tags_list, loras) + +class LoraLoaderAdvanced: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + LORA_LIST = sorted(folder_paths.get_filename_list("loras"), key=str.lower) + populate_items(LORA_LIST, "loras") + return { + "required": { + "model": ("MODEL",), + "clip": ("CLIP", ), + "lora_name": (LORA_LIST, ), + "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}), + "force_fetch": ("BOOLEAN", {"default": False}), + "enable_preview": ("BOOLEAN", {"default": False}), + "append_loraname_if_empty": ("BOOLEAN", {"default": False}), + } + } + + RETURN_TYPES = ("MODEL", "CLIP", "LIST", "LIST") + RETURN_NAMES = ("MODEL", "CLIP", "civitai_tags_list", "meta_tags_list") + FUNCTION = "load_lora" + CATEGORY = "autotrigger" + + def load_lora(self, model, clip, lora_name, strength_model, strength_clip, force_fetch, enable_preview, append_loraname_if_empty): + meta_tags_list = sort_tags_by_frequency(get_metadata(lora_name["content"], "loras")) + civitai_tags_list = load_and_save_tags(lora_name["content"], force_fetch) + + civitai_tags_list = append_lora_name_if_empty(civitai_tags_list, lora_name["content"], append_loraname_if_empty) + meta_tags_list = append_lora_name_if_empty(meta_tags_list, lora_name["content"], append_loraname_if_empty) + + lora_path = folder_paths.get_full_path("loras", lora_name["content"]) + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + temp = self.loaded_lora + self.loaded_lora = None + del temp + + if lora is None: + lora = load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_lora, clip_lora = load_lora_for_models(model, clip, lora, strength_model, strength_clip) + if enable_preview: + _, preview = copy_preview_to_temp(lora_name["image"]) + if preview is not None: + preview_output = { + "filename": preview, + "subfolder": "lora_preview", + "type": "temp" + } + return {"ui": {"images": [preview_output]}, "result": (model_lora, clip_lora, civitai_tags_list, meta_tags_list)} + + + return (model_lora, clip_lora, civitai_tags_list, meta_tags_list) + +class LoraLoaderStackedAdvanced: + @classmethod + def INPUT_TYPES(s): + LORA_LIST = folder_paths.get_filename_list("loras") + populate_items(LORA_LIST, "loras") + return { + "required": { + "lora_name": (LORA_LIST,), + "lora_weight": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "force_fetch": ("BOOLEAN", {"default": False}), + "enable_preview": ("BOOLEAN", {"default": False}), + "append_loraname_if_empty": ("BOOLEAN", {"default": False}), + }, + "optional": { + "lora_stack": ("LORA_STACK", ), + } + } + + RETURN_TYPES = ("LIST", "LIST", "LORA_STACK",) + RETURN_NAMES = ("civitai_tags_list", "meta_tags_list", "LORA_STACK",) + FUNCTION = "set_stack" + #OUTPUT_NODE = False + CATEGORY = "autotrigger" + + def set_stack(self, lora_name, lora_weight, force_fetch, enable_preview, append_loraname_if_empty, lora_stack=None): + civitai_tags_list = load_and_save_tags(lora_name["content"], force_fetch) + + meta_tags = get_metadata(lora_name["content"], "loras") + meta_tags_list = sort_tags_by_frequency(meta_tags) + + civitai_tags_list = append_lora_name_if_empty(civitai_tags_list, lora_name["content"], append_loraname_if_empty) + meta_tags_list = append_lora_name_if_empty(meta_tags_list, lora_name["content"], append_loraname_if_empty) + + loras = [(lora_name["content"],lora_weight,lora_weight,)] + if lora_stack is not None: + loras.extend(lora_stack) + + if enable_preview: + _, preview = copy_preview_to_temp(lora_name["image"]) + if preview is not None: + preview_output = { + "filename": preview, + "subfolder": "lora_preview", + "type": "temp" + } + return {"ui": {"images": [preview_output]}, "result": (civitai_tags_list, meta_tags_list, loras)} + + return {"result": (civitai_tags_list, meta_tags_list, loras)} + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "LoraLoaderVanilla": LoraLoaderVanilla, + "LoraLoaderStackedVanilla": LoraLoaderStackedVanilla, + "LoraLoaderAdvanced": LoraLoaderAdvanced, + "LoraLoaderStackedAdvanced": LoraLoaderStackedAdvanced, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "LoraLoaderVanilla": "LoraLoaderVanilla", + "LoraLoaderStackedVanilla": "LoraLoaderStackedVanilla", + "LoraLoaderAdvanced": "LoraLoaderAdvanced", + "LoraLoaderStackedAdvanced": "LoraLoaderStackedAdvanced", +} diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/nodes_utils.py b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/nodes_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8bca891230c50e42e82f20e606c9dc5436bb63b0 --- /dev/null +++ b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/nodes_utils.py @@ -0,0 +1,141 @@ +import random + +from .utils import * + +class FusionText: + @classmethod + def INPUT_TYPES(s): + return {"required": {"text_1": ("STRING", {"default": "", "forceInput": True}), "text_2": ("STRING", {"default": "", "forceInput": True})}} + RETURN_TYPES = ("STRING",) + FUNCTION = "combine" + CATEGORY = "autotrigger" + + def combine(self, text_1, text_2): + return (text_1 + text_2, ) + + +class Randomizer: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text_1":("STRING", {"forceInput": True}), + "lora_1":("LORA_STACK", ), + "text_2":("STRING", {"forceInput": True} ), + "lora_2":("LORA_STACK", ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + RETURN_TYPES = ("STRING", "LORA_STACK") + RETURN_NAMES = ("text", "lora stack") + FUNCTION = "randomize" + + #OUTPUT_NODE = False + + CATEGORY = "autotrigger" + + def randomize(self, text_1, lora_1, text_2, lora_2, seed): + random.seed(seed) + if random.random() < .5: + return (text_1, lora_1) + return (text_2, lora_2) + +class TextInputBasic: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text":("STRING", {"default":"", "multiline":True}), + }, + "optional": { + "prefix":("STRING", {"default":"", "forceInput": True}), + "suffix":("STRING", {"default":"", "forceInput": True}), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text", ) + FUNCTION = "get_text" + + #OUTPUT_NODE = False + + CATEGORY = "autotrigger" + + def get_text(self, text, prefix="", suffix=""): + return (prefix + text + suffix, ) + + +class TagsSelector: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tags_list": ("LIST", {"default": []}), + "selector": ("STRING", {"default": ":"}), + "weight": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "ensure_comma": ("BOOLEAN", {"default": True}) + }, + "optional": { + "prefix":("STRING", {"default":"", "forceInput": True}), + "suffix":("STRING", {"default":"", "forceInput": True}), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "select_tags" + CATEGORY = "autotrigger" + + def select_tags(self, tags_list, selector, weight, ensure_comma, prefix="", suffix=""): + if weight != 1.0: + tags_list = [f"({tag}:{weight})" for tag in tags_list] + output = parse_selector(selector, tags_list) + if ensure_comma: + striped_prefix = prefix.strip() + striped_suffix = suffix.strip() + if striped_prefix != "" and not striped_prefix.endswith(",") and output != "" and not output.startswith(","): + prefix = striped_prefix + ", " + if output != "" and not output.endswith(",") and striped_suffix != "" and not striped_suffix.startswith(","): + suffix = ", " + striped_suffix + return (prefix + output + suffix, ) + +class TagsFormater: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tags_list": ("LIST", {"default": []}), + }, + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "format_tags" + CATEGORY = "autotrigger" + + def format_tags(self, tags_list): + output = "" + i = 0 + for tag in tags_list: + output += f'{i} : "{tag}"\n' + i+=1 + + return (output,) + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "Randomizer": Randomizer, + "FusionText": FusionText, + "TextInputBasic": TextInputBasic, + "TagsSelector": TagsSelector, + "TagsFormater": TagsFormater, +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "Randomizer": "Randomizer", + "FusionText": "FusionText", + "TextInputBasic": "TextInputBasic", + "TagsSelector": "TagsSelector", + "TagsFormater": "TagsFormater", +} diff --git a/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/utils.py b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2120327bb5229e6a8b3d9b3dd1163d63150cffe0 --- /dev/null +++ b/custom_nodes/ComfyUI-Lora-Auto-Trigger-Words/utils.py @@ -0,0 +1,222 @@ +import folder_paths +import hashlib +import json +import os +import requests +import shutil + +def get_preview_path(name, type): + file_name = os.path.splitext(name)[0] + file_path = folder_paths.get_full_path(type, name) + + if file_path is None: + print(f"Unable to get path for {type} {name}") + return None + + file_path_no_ext = os.path.splitext(file_path)[0] + item_image=None + for ext in ["png", "jpg", "jpeg", "preview.png"]: + has_image = os.path.isfile(file_path_no_ext + "." + ext) + if has_image: + item_image = f"{file_name}.{ext}" + break + + return has_image, item_image + + +def copy_preview_to_temp(file_name): + if file_name is None: + return None, None + base_name = os.path.basename(file_name) + lora_less = "/".join(file_name.split("/")[1:]) + + file_path = folder_paths.get_full_path("loras", lora_less) + + temp_path = folder_paths.get_temp_directory() + preview_path = os.path.join(temp_path, "lora_preview") + if not os.path.isdir(preview_path) : + os.makedirs(preview_path) + preview_path = os.path.join(preview_path, base_name) + + + shutil.copyfile(file_path, preview_path) + return preview_path, base_name + +# add previews in selectors +def populate_items(names, type): + for idx, item_name in enumerate(names): + + has_image, item_image = get_preview_path(item_name, type) + + names[idx] = { + "content": item_name, + "image": f"{type}/{item_image}" if has_image else None, + "type": "loras", + } + names.sort(key=lambda i: i["content"].lower()) + + +def load_json_from_file(file_path): + try: + with open(file_path, 'r') as json_file: + data = json.load(json_file) + return data + except FileNotFoundError: + print(f"File not found: {file_path}") + return None + except json.JSONDecodeError: + print(f"Error decoding JSON in file: {file_path}") + return None + +def save_dict_to_json(data_dict, file_path): + try: + with open(file_path, 'w') as json_file: + json.dump(data_dict, json_file, indent=4) + print(f"Data saved to {file_path}") + except Exception as e: + print(f"Error saving JSON to file: {e}") + +def get_model_version_info(hash_value): + api_url = f"https://civitai.com/api/v1/model-versions/by-hash/{hash_value}" + response = requests.get(api_url) + + if response.status_code == 200: + return response.json() + else: + return None + +def calculate_sha256(file_path): + sha256_hash = hashlib.sha256() + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + sha256_hash.update(chunk) + return sha256_hash.hexdigest() + + +def load_and_save_tags(lora_name, force_fetch): + json_tags_path = "./loras_tags.json" + lora_tags = load_json_from_file(json_tags_path) + output_tags = lora_tags.get(lora_name, None) if lora_tags is not None else None + if output_tags is not None: + output_tags_list = output_tags + else: + output_tags_list = [] + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_tags is None or force_fetch or output_tags is None: # search on civitai only if no local cache or forced + print("[Lora-Auto-Trigger] calculating lora hash") + LORAsha256 = calculate_sha256(lora_path) + print("[Lora-Auto-Trigger] requesting infos") + model_info = get_model_version_info(LORAsha256) + if model_info is not None: + if "trainedWords" in model_info: + print("[Lora-Auto-Trigger] tags found!") + if lora_tags is None: + lora_tags = {} + lora_tags[lora_name] = model_info["trainedWords"] + save_dict_to_json(lora_tags, json_tags_path) + output_tags_list = model_info["trainedWords"] + else: + print("[Lora-Auto-Trigger] No informations found.") + if lora_tags is None: + lora_tags = {} + lora_tags[lora_name] = [] + save_dict_to_json(lora_tags,json_tags_path) + + return output_tags_list + +def show_list(list_input): + i = 0 + output = "" + for debug in list_input: + output += f"{i} : {debug}\n" + i+=1 + return output + +def get_metadata(filepath, type): + filepath = folder_paths.get_full_path(type, filepath) + with open(filepath, "rb") as file: + # https://github.com/huggingface/safetensors#format + # 8 bytes: N, an unsigned little-endian 64-bit integer, containing the size of the header + header_size = int.from_bytes(file.read(8), "little", signed=False) + + if header_size <= 0: + raise BufferError("Invalid header size") + + header = file.read(header_size) + if header_size <= 0: + raise BufferError("Invalid header") + header_json = json.loads(header) + return header_json["__metadata__"] if "__metadata__" in header_json else None + +# parse the __metadata__ json looking for trained tags +def sort_tags_by_frequency(meta_tags): + if meta_tags is None: + return [] + if "ss_tag_frequency" in meta_tags: + meta_tags = meta_tags["ss_tag_frequency"] + meta_tags = json.loads(meta_tags) + sorted_tags = {} + for _, dataset in meta_tags.items(): + for tag, count in dataset.items(): + tag = str(tag).strip() + if tag in sorted_tags: + sorted_tags[tag] = sorted_tags[tag] + count + else: + sorted_tags[tag] = count + # sort tags by training frequency. Most seen tags firsts + sorted_tags = dict(sorted(sorted_tags.items(), key=lambda item: item[1], reverse=True)) + return list(sorted_tags.keys()) + else: + return [] + +def parse_selector(selector, tags_list): + if len(tags_list) == 0: + return "" + range_index_list = selector.split(",") + output = {} + for range_index in range_index_list: + # single value + if range_index.count(":") == 0: + # remove empty values + if range_index.strip() == "": + continue + index = int(range_index) + # ignore out of bound indexes + if abs(index) > len(tags_list) - 1: + continue + output[index] = tags_list[index] + + # actual range + if range_index.count(":") == 1: + indexes = range_index.split(":") + # check empty + if indexes[0] == "": + start = 0 + else: + start = int(indexes[0]) + if indexes[1] == "": + end = len(tags_list) + else: + end = int(indexes[1]) + # check negative + if start < 0: + start = len(tags_list) + start + if end < 0: + end = len(tags_list) + end + # clamp start and end values within list boundaries + start, end = min(start, len(tags_list)), min(end, len(tags_list)) + start, end = max(start, 0), max(end, 0) + # merge all + for i in range(start, end): + output[i] = tags_list[i] + return ", ".join(list(output.values())) + +def append_lora_name_if_empty(tags_list, lora_path, enabled): + if not enabled or len(tags_list) > 0: + return tags_list + filename = os.path.splitext(lora_path)[0] + filename = os.path.basename(filename) + + tags_list.append(filename) + return tags_list \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/.gitignore b/custom_nodes/ComfyUI-Manager/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2670f776d54a575f6403c3f58647017e0c6fbe42 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/.gitignore @@ -0,0 +1,9 @@ +__pycache__/ +.idea/ +.vscode/ +.tmp +config.ini +snapshots/** +startup-scripts/** +.openart_key +matrix_auth \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/LICENSE.txt b/custom_nodes/ComfyUI-Manager/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI-Manager/README.md b/custom_nodes/ComfyUI-Manager/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fb87f28b177ff980b4463584030a4557914334da --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/README.md @@ -0,0 +1,235 @@ +# ComfyUI Manager + +**ComfyUI-Manager** is an extension designed to enhance the usability of [ComfyUI](https://github.com/comfyanonymous/ComfyUI). It offers management functions to **install, remove, disable, and enable** various custom nodes of ComfyUI. Furthermore, this extension provides a hub feature and convenience functions to access a wide range of information within ComfyUI. + +![menu](misc/menu.jpg) + +## NOTICE +* 🏆 Join us for the [ComfyUI Workflow Contest](https://contest.openart.ai/), hosted by OpenArt AI (11.27.2023 - 12.15.2023). Our esteemed judge panel includes Scott E. Detweiler, Olivio Sarikas, MERJIC麦橘, among others. We're also thrilled to have the authors of ComfyUI Manager and AnimateDiff as our special guests! +* If you wish to hide the "Share" button, click "Manager" and choose "Share: None" option. +* You can see whole nodes info on [ComfyUI Nodes Info](https://ltdrdata.github.io/) page. +* Versions prior to V0.22.2 will no longer detect missing nodes unless using a local database. Please update ComfyUI-Manager to the latest version. + +## Installation + +### Installation[method1] (General installation method: ComfyUI-Manager only) + +To install ComfyUI-Manager in addition to an existing installation of ComfyUI, you can follow the following steps: + +1. cd custom_nodes +2. git clone https://github.com/ltdrdata/ComfyUI-Manager.git +3. Restart ComfyUI + + +### Installation[method2] (Installation for portable ComfyUI version: ComfyUI-Manager only) +1. install git +- https://git-scm.com/download/win +- standalone version +- select option: use windows default console window +2. Download [scripts/install-manager-for-portable-version.bat](https://github.com/ltdrdata/ComfyUI-Manager/raw/main/scripts/install-manager-for-portable-version.bat) into installed `"ComfyUI_windows_portable"` directory +3. double click `install-manager-for-portable-version.bat` batch file + +![portable-install](misc/portable-install.png) + + +### Installation[method3] (Installation for linux+venv: ComfyUI + ComfyUI-Manager) + +To install ComfyUI with ComfyUI-Manager on Linux using a venv environment, you can follow these steps: +prerequisite: python-is-python3, python3-venv + +1. Download [scripts/install-comfyui-venv-linux.sh](https://github.com/ltdrdata/ComfyUI-Manager/raw/main/scripts/install-comfyui-venv-linux.sh) into empty install directory +- ComfyUI will be installed in the subdirectory of the specified directory, and the directory will contain the generated executable script. +2. `chmod +x install-comfyui-venv-linux.sh` +3. `./install-comfyui-venv-linux.sh` + + +You can execute ComfyUI by running either `./run_gpu.sh` or `./run_cpu.sh` depending on your system configuration. + +## Colab Notebook +This repository provides Colab notebooks that allow you to install and use ComfyUI, including ComfyUI-Manager. To use ComfyUI, [click on this link](https://colab.research.google.com/github/ltdrdata/ComfyUI-Manager/blob/main/notebooks/comfyui_colab_with_manager.ipynb). +* Support for installing ComfyUI +* Support for basic installation of ComfyUI-Manager +* Support for automatically installing dependencies of custom nodes upon restarting Colab notebooks. + +## Changes +* **0.29** Add `Update all` feature +* **0.25** support db channel + * You can directly modify the db channel settings in the `config.ini` file. + * If you want to maintain a new DB channel, please modify the `channels.list` and submit a PR. +* **0.23** support multiple selection +* **0.18.1** `skip update check` feature added. + * A feature that allows quickly opening windows in environments where update checks take a long time. +* **0.17.1** Bug fix for the issue where enable/disable of the web extension was not working. Compatibility patch for StableSwarmUI. + * Requires latest version of ComfyUI (Revision: 1240) +* **0.17** Support preview method setting feature. +* **0.14** Support robust update. +* **0.13** Support additional 'pip' section for install spec. +* **0.12** Better installation support for Windows. +* **0.9** Support keyword search in installer menu. +* **V0.7.1** Bug fix for the issue where updates were not being applied on Windows. + * **For those who have been using versions 0.6, please perform a manual git pull in the custom_nodes/ComfyUI-Manager directory.** +* **V0.7** To address the issue of a slow list refresh, separate the fetch update and update check processes. +* **V0.6** Support extension installation for missing nodes. +* **V0.5** Removed external git program dependencies. + + +## How To Use + +1. Click "Manager" button on main menu + + ![mainmenu](misc/main.jpg) + + +2. If you click on 'Install Custom Nodes' or 'Install Models', an installer dialog will open. + + ![menu](misc/menu.jpg) + + * When the 'Use local DB' feature is enabled, the application will utilize the data stored locally on your device, rather than retrieving node/model information over the internet + + * The ```Fetch Updates``` menu retrieves update data for custom nodes locally. Actual updates are applied by clicking the ```Update``` button in the ```Install Custom Nodes``` menu. + +3. Click 'Install' or 'Try Install' button. + + ![node-install-dialog](misc/custom-nodes.jpg) + + ![model-install-dialog](misc/models.png) + + * Installed: This item is already installed. + * Install: Clicking this button will install the item. + * Try Install: This is a custom node of which installation information cannot be confirmed. Click the button to try installing it. + + * If a red background `Channel` indicator appears at the top, it means it is not the default channel. Since the amount of information held is different from the default channel, many custom nodes may not appear in this channel state. + * Channel settings have a broad impact, affecting not only the node list but also all functions like "Update all." + * Conflicted Nodes with a yellow background show a list of nodes conflicting with other extensions in the respective extension. This issue needs to be addressed by the developer, and users should be aware that due to these conflicts, some nodes may not function correctly and may need to be installed accordingly. + +4. If you set the `Badge:` item in the menu as `Badge: Nickname`, `Badge: Nickname (hide built-in)`, `Badge: #ID Nickname`, `Badge: #ID Nickname (hide built-in)` the information badge will be displayed on the node. + * When selecting (hide built-in), it hides the 🦊 icon, which signifies built-in nodes. + * Nodes without any indication on the badge are custom nodes that Manager cannot recognize. + * `Badge: Nickname` displays the nickname of custom nodes, while `Badge: #ID Nickname` also includes the internal ID of the node. + + ![model-install-dialog](misc/nickname.jpg) + + +5. Share + ![menu](misc/main.jpg) ![share](misc/share.jpg) + + * You can share the workflow by clicking the Share button at the bottom of the main menu or selecting Share Output from the Context Menu of the Image node. + * Currently, it supports sharing via [https://comfyworkflows.com/](https://comfyworkflows.com/) and [https://openart.ai](https://openart.ai/workflows/dev), as well as through the Matrix channel. + + ![menu](misc/share-setting.jpg) + + * Through the Share settings in the Manager menu, you can configure the behavior of the Share button in the Main menu or Share Ouput button on Context Menu. + * `None`: hide from Main menu + * `All`: Show a dialog where the user can select a title for sharing. + + +## Snapshot-Manager +* When you press `Save snapshot` or use `Update All` on `Manager Menu`, the current installation status snapshot is saved. + * Snapshot file dir: `ComfyUI-Manager/snapshots` + * You can rename snapshot file. +* Press the "Restore" button to revert to the installation status of the respective snapshot. + * However, for custom nodes not managed by Git, snapshot support is incomplete. +* When you press `Restore`, it will take effect on the next ComfyUI startup. + + +![model-install-dialog](misc/snapshot.jpg) + +## How to register your custom node into ComfyUI-Manager + +* Add an entry to `custom-node-list.json` located in the root of ComfyUI-Manager and submit a Pull Request. +* NOTE: Before submitting the PR after making changes, please check `Use local DB` and ensure that the extension list loads without any issues in the `Install custom nodes` dialog. Occasionally, missing or extra commas can lead to JSON syntax errors. +* The remaining JSON will be updated through scripts in the future, so you don't need to worry about it. + +## Custom node support guide + +* Currently, the system operates by cloning the git repository and sequentially installing the dependencies listed in requirements.txt using pip, followed by invoking the install.py script. In the future, we plan to discuss and determine the specifications for supporting custom nodes. + +* Please submit a pull request to update either the custom-node-list.json or model-list.json file. + +* The scanner currently provides a detection function for missing nodes, which is capable of detecting nodes described by the following two patterns. + * Or you can provide manually `node_list.json` file. + +``` +NODE_CLASS_MAPPINGS = { + "ExecutionSwitch": ExecutionSwitch, + "ExecutionBlocker": ExecutionBlocker, + ... +} + +NODE_CLASS_MAPPINGS.update({ + "UniFormer-SemSegPreprocessor": Uniformer_SemSegPreprocessor, + "SemSegPreprocessor": Uniformer_SemSegPreprocessor, +}) +``` + +* When you write a docstring in the header of the .py file for the Node as follows, it will be used for managing the database in the Manager. + * Currently, only the `nickname` is being used, but other parts will also be utilized in the future. + * The `nickname` will be the name displayed on the badge of the node. + * If there is no `nickname`, it will be truncated to 20 characters from the arbitrarily written title and used. +``` +""" +@author: Dr.Lt.Data +@title: Impact Pack +@nickname: Impact Pack +@description: This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler. +""" +``` + + +* **Special purpose files** (optional) + * `node_list.json` - When your custom nodes pattern of NODE_CLASS_MAPPINGS is not conventional, it is used to manually provide a list of nodes for reference. ([example](https://github.com/melMass/comfy_mtb/raw/main/node_list.json)) + * `requirements.txt` - When installing, this pip requirements will be installed automatically + * `install.py` - When installing, it is automatically called + * `uninstall.py` - When uninstalling, it is automatically called + * `disable.py` - When disabled, it is automatically called + * When installing a custom node setup `.js` file, it is recommended to write this script for disabling. + * `enable.py` - When enabled, it is automatically called + * **All scripts are executed from the root path of the corresponding custom node.** + + +## Support of missing nodes installation + +![missing-menu](misc/missing-menu.png) + +* When you click on the ```Install Missing Custom Nodes``` button in the menu, it displays a list of extension nodes that contain nodes not currently present in the workflow. + +![missing-list](misc/missing-list.png) + + +## Troubleshooting +* If your `git.exe` is installed in a specific location other than system git, please install ComfyUI-Manager and run ComfyUI. Then, specify the path including the file name in `git_exe = ` in the ComfyUI-Manager/config.ini file that is generated. +* If updating ComfyUI-Manager itself fails, please go to the **ComfyUI-Manager** directory and execute the command `git update-ref refs/remotes/origin/main a361cc1 && git fetch --all && git pull`. + * Alternatively, download the update-fix.py script from [update-fix.py](https://github.com/ltdrdata/ComfyUI-Manager/raw/main/scripts/update-fix.py) and place it in the ComfyUI-Manager directory. Then, run it using your Python command. + For the portable version, use `..\..\..\python_embeded\python.exe update-fix.py`. +* For cases where nodes like `PreviewTextNode` from `ComfyUI_Custom_Nodes_AlekPet` are only supported as front-end nodes, we currently do not provide missing nodes for them. +* Currently, `vid2vid` is not being updated, causing compatibility issues. + + +## TODO: Unconventional form of custom node list + +* https://github.com/diontimmer/Sample-Diffusion-ComfyUI-Extension +* https://github.com/senshilabs/NINJA-plugin +* https://github.com/MockbaTheBorg/Nodes + + +## Roadmap + +- [x] System displaying information about failed custom nodes import. +- [x] Guide for missing nodes in ComfyUI vanilla nodes. +- [x] Collision checking system for nodes with the same ID across extensions. +- [ ] Auto migration for custom nodes with changed structures. +- [ ] Version control feature for nodes. +- [ ] List of currently used custom nodes. +- [ ] Template sharing system. +- [ ] 3rd party API system. + + +# Disclaimer + +* This extension simply provides the convenience of installing custom nodes and does not guarantee their proper functioning. + + +## Credit +ComfyUI/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +**And, for all ComfyUI custom node developers** diff --git a/custom_nodes/ComfyUI-Manager/__init__.py b/custom_nodes/ComfyUI-Manager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4e56b84f37b1059d0836ba0f4f9d0038515c1ec --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/__init__.py @@ -0,0 +1,1856 @@ +import configparser +import mimetypes +import shutil +import folder_paths +import os +import sys +import threading +import datetime +import re +import locale +import subprocess # don't remove this +from tqdm.auto import tqdm +import concurrent +import ssl +from urllib.parse import urlparse +import http.client +import re +import signal +import nodes + +version = "V1.9" +print(f"### Loading: ComfyUI-Manager ({version})") + +required_comfyui_revision = 1793 + +def handle_stream(stream, prefix): + stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') + for msg in stream: + if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg): + if msg.startswith('100%'): + print('\r' + msg, end="", file=sys.stderr), + else: + print('\r' + msg[:-1], end="", file=sys.stderr), + else: + if prefix == '[!]': + print(prefix, msg, end="", file=sys.stderr) + else: + print(prefix, msg, end="") + + +def run_script(cmd, cwd='.'): + if len(cmd) > 0 and cmd[0].startswith("#"): + print(f"[ComfyUI-Manager] Unexpected behavior: `{cmd}`") + return 0 + + process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1) + + stdout_thread = threading.Thread(target=handle_stream, args=(process.stdout, "")) + stderr_thread = threading.Thread(target=handle_stream, args=(process.stderr, "[!]")) + + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return process.wait() + + +try: + import git +except: + my_path = os.path.dirname(__file__) + requirements_path = os.path.join(my_path, "requirements.txt") + + print(f"## ComfyUI-Manager: installing dependencies") + + run_script([sys.executable, '-s', '-m', 'pip', 'install', '-r', requirements_path]) + + try: + import git + except: + print(f"## [ERROR] ComfyUI-Manager: Attempting to reinstall dependencies using an alternative method.") + run_script([sys.executable, '-s', '-m', 'pip', 'install', '--user', '-r', requirements_path]) + + try: + import git + except: + print(f"## [ERROR] ComfyUI-Manager: Failed to install the GitPython package in the correct Python environment. Please install it manually in the appropriate environment. (You can seek help at https://app.element.io/#/room/%23comfyui_space%3Amatrix.org)") + + print(f"## ComfyUI-Manager: installing dependencies done.") + + +from git.remote import RemoteProgress + +sys.path.append('../..') + +from torchvision.datasets.utils import download_url + +comfy_ui_required_revision = 1240 +comfy_ui_revision = "Unknown" +comfy_ui_commit_date = "" + +comfy_path = os.path.dirname(folder_paths.__file__) +custom_nodes_path = os.path.join(comfy_path, 'custom_nodes') +js_path = os.path.join(comfy_path, "web", "extensions") + +comfyui_manager_path = os.path.dirname(__file__) +local_db_model = os.path.join(comfyui_manager_path, "model-list.json") +local_db_alter = os.path.join(comfyui_manager_path, "alter-list.json") +local_db_custom_node_list = os.path.join(comfyui_manager_path, "custom-node-list.json") +local_db_extension_node_mappings = os.path.join(comfyui_manager_path, "extension-node-map.json") +git_script_path = os.path.join(os.path.dirname(__file__), "git_helper.py") + +startup_script_path = os.path.join(comfyui_manager_path, "startup-scripts") +config_path = os.path.join(os.path.dirname(__file__), "config.ini") +cached_config = None + + +default_channels = 'default::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main,recent::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/node_db/new,' +with open(os.path.join(comfyui_manager_path, 'channels.list'), 'r') as file: + channels = file.read() + default_channels = channels.replace('\n', ',') + + +from comfy.cli_args import args +import latent_preview + + +def write_config(): + config = configparser.ConfigParser() + config['default'] = { + 'preview_method': get_current_preview_method(), + 'badge_mode': get_config()['badge_mode'], + 'git_exe': get_config()['git_exe'], + 'channel_url': get_config()['channel_url'], + 'channel_url_list': get_config()['channel_url_list'], + 'share_option': get_config()['share_option'], + 'bypass_ssl': get_config()['bypass_ssl'] + } + with open(config_path, 'w') as configfile: + config.write(configfile) + + +def read_config(): + try: + config = configparser.ConfigParser() + config.read(config_path) + default_conf = config['default'] + + channel_url_list_is_valid = True + if 'channel_url_list' in default_conf and default_conf['channel_url_list'] != '': + for item in default_conf['channel_url_list'].split(","): + if len(item.split("::")) != 2: + channel_url_list_is_valid = False + break + + if channel_url_list_is_valid: + ch_url_list = default_conf['channel_url_list'] + else: + print(f"[WARN] ComfyUI-Manager: channel_url_list is invalid format") + ch_url_list = '' + + return { + 'preview_method': default_conf['preview_method'] if 'preview_method' in default_conf else get_current_preview_method(), + 'badge_mode': default_conf['badge_mode'] if 'badge_mode' in default_conf else 'none', + 'git_exe': default_conf['git_exe'] if 'git_exe' in default_conf else '', + 'channel_url': default_conf['channel_url'] if 'channel_url' in default_conf else 'https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main', + 'channel_url_list': ch_url_list, + 'share_option': default_conf['share_option'] if 'share_option' in default_conf else 'all', + 'bypass_ssl': default_conf['bypass_ssl'] if 'bypass_ssl' in default_conf else False, + } + + except Exception: + return { + 'preview_method': get_current_preview_method(), + 'badge_mode': 'none', + 'git_exe': '', + 'channel_url': 'https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main', + 'channel_url_list': '', + 'share_option': 'all', + 'bypass_ssl': False + } + + +def get_config(): + global cached_config + + if cached_config is None: + cached_config = read_config() + + return cached_config + + +def get_current_preview_method(): + if args.preview_method == latent_preview.LatentPreviewMethod.Auto: + return "auto" + elif args.preview_method == latent_preview.LatentPreviewMethod.Latent2RGB: + return "latent2rgb" + elif args.preview_method == latent_preview.LatentPreviewMethod.TAESD: + return "taesd" + else: + return "none" + + +def set_preview_method(method): + if method == 'auto': + args.preview_method = latent_preview.LatentPreviewMethod.Auto + elif method == 'latent2rgb': + args.preview_method = latent_preview.LatentPreviewMethod.Latent2RGB + elif method == 'taesd': + args.preview_method = latent_preview.LatentPreviewMethod.TAESD + else: + args.preview_method = latent_preview.LatentPreviewMethod.NoPreviews + + get_config()['preview_method'] = args.preview_method + + +def set_badge_mode(mode): + get_config()['badge_mode'] = mode + + +set_preview_method(get_config()['preview_method']) + + +def try_install_script(url, repo_path, install_cmd): + int_comfyui_revision = 0 + + if type(comfy_ui_revision) == int: + int_comfyui_revision = comfy_ui_revision + elif comfy_ui_revision.isdigit(): + int_comfyui_revision = int(comfy_ui_revision) + + if platform.system() == "Windows" and int_comfyui_revision >= comfy_ui_required_revision: + if not os.path.exists(startup_script_path): + os.makedirs(startup_script_path) + + script_path = os.path.join(startup_script_path, "install-scripts.txt") + with open(script_path, "a") as file: + obj = [repo_path] + install_cmd + file.write(f"{obj}\n") + + return True + else: + print(f"\n## ComfyUI-Manager: EXECUTE => {install_cmd}") + code = run_script(install_cmd, cwd=repo_path) + + if platform.system() == "Windows": + try: + if int(comfy_ui_revision) < comfy_ui_required_revision: + print("\n\n###################################################################") + print(f"[WARN] ComfyUI-Manager: Your ComfyUI version ({comfy_ui_revision}) is too old. Please update to the latest version.") + print(f"[WARN] The extension installation feature may not work properly in the current installed ComfyUI version on Windows environment.") + print("###################################################################\n\n") + except: + pass + + if code != 0: + if url is None: + url = os.path.dirname(repo_path) + print(f"install script failed: {url}") + return False + +def print_comfyui_version(): + global comfy_ui_revision + global comfy_ui_commit_date + global comfy_ui_hash + + try: + repo = git.Repo(os.path.dirname(folder_paths.__file__)) + + comfy_ui_revision = len(list(repo.iter_commits('HEAD'))) + current_branch = repo.active_branch.name + comfy_ui_hash = repo.head.commit.hexsha + + try: + if int(comfy_ui_revision) < comfy_ui_required_revision: + print(f"\n\n## [WARN] ComfyUI-Manager: Your ComfyUI version ({comfy_ui_revision}) is too old. Please update to the latest version. ##\n\n") + except: + pass + + comfy_ui_commit_date = repo.head.commit.committed_datetime.date() + if current_branch == "master": + print(f"### ComfyUI Revision: {comfy_ui_revision} [{comfy_ui_hash[:8]}] | Released on '{comfy_ui_commit_date}'") + else: + print(f"### ComfyUI Revision: {comfy_ui_revision} on '{current_branch}' [{comfy_ui_hash[:8]}] | Released on '{comfy_ui_commit_date}'") + except: + print("### ComfyUI Revision: UNKNOWN (The currently installed ComfyUI is not a Git repository)") + + +print_comfyui_version() + + +# use subprocess to avoid file system lock by git (Windows) +def __win_check_git_update(path, do_fetch=False, do_update=False): + if do_fetch: + command = [sys.executable, git_script_path, "--fetch", path] + elif do_update: + command = [sys.executable, git_script_path, "--pull", path] + else: + command = [sys.executable, git_script_path, "--check", path] + + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, _ = process.communicate() + output = output.decode('utf-8').strip() + + if do_update: + if "CUSTOM NODE PULL: True" in output: + process.wait() + print(f"\rUpdated: {path}") + return True + elif "CUSTOM NODE PULL: None" in output: + process.wait() + return True + else: + print(f"\rUpdate error: {path}") + process.wait() + return False + else: + if "CUSTOM NODE CHECK: True" in output: + process.wait() + return True + elif "CUSTOM NODE CHECK: False" in output: + process.wait() + return False + else: + print(f"\rFetch error: {path}") + process.wait() + return False + + +def __win_check_git_pull(path): + command = [sys.executable, git_script_path, "--pull", path] + process = subprocess.Popen(command) + process.wait() + + +def switch_to_default_branch(repo): + show_result = repo.git.remote("show", "origin") + matches = re.search(r"\s*HEAD branch:\s*(.*)", show_result) + if matches: + default_branch = matches.group(1) + repo.git.checkout(default_branch) + + +def git_repo_has_updates(path, do_fetch=False, do_update=False): + if do_fetch: + print(f"\x1b[2K\rFetching: {path}", end='') + elif do_update: + print(f"\x1b[2K\rUpdating: {path}", end='') + + # Check if the path is a git repository + if not os.path.exists(os.path.join(path, '.git')): + raise ValueError('Not a git repository') + + if platform.system() == "Windows": + res = __win_check_git_update(path, do_fetch, do_update) + execute_install_script(None, path, lazy_mode=True) + return res + else: + # Fetch the latest commits from the remote repository + repo = git.Repo(path) + + remote_name = 'origin' + remote = repo.remote(name=remote_name) + + # Get the current commit hash + commit_hash = repo.head.commit.hexsha + + if do_fetch or do_update: + remote.fetch() + + if do_update: + if repo.head.is_detached: + switch_to_default_branch(repo) + + try: + remote.pull() + repo.git.submodule('update', '--init', '--recursive') + new_commit_hash = repo.head.commit.hexsha + + if commit_hash != new_commit_hash: + execute_install_script(None, path) + print(f"\x1b[2K\rUpdated: {path}") + return True + else: + return False + + except Exception as e: + print(f"\nUpdating failed: {path}\n{e}", file=sys.stderr) + + if repo.head.is_detached: + return True + + # Get commit hash of the remote branch + current_branch = repo.active_branch + branch_name = current_branch.name + + remote_commit_hash = repo.refs[f'{remote_name}/{branch_name}'].object.hexsha + + # Compare the commit hashes to determine if the local repository is behind the remote repository + if commit_hash != remote_commit_hash: + # Get the commit dates + commit_date = repo.head.commit.committed_datetime + remote_commit_date = repo.refs[f'{remote_name}/{branch_name}'].object.committed_datetime + + # Compare the commit dates to determine if the local repository is behind the remote repository + if commit_date < remote_commit_date: + return True + + return False + + +def git_pull(path): + # Check if the path is a git repository + if not os.path.exists(os.path.join(path, '.git')): + raise ValueError('Not a git repository') + + # Pull the latest changes from the remote repository + if platform.system() == "Windows": + return __win_check_git_pull(path) + else: + repo = git.Repo(path) + + print(f"path={path} / repo.is_dirty: {repo.is_dirty()}") + + if repo.is_dirty(): + repo.git.stash() + + if repo.head.is_detached: + switch_to_default_branch(repo) + + origin = repo.remote(name='origin') + origin.pull() + repo.git.submodule('update', '--init', '--recursive') + + repo.close() + + return True + + +async def get_data(uri): + print(f"FETCH DATA from: {uri}") + if uri.startswith("http"): + async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(verify_ssl=False)) as session: + async with session.get(uri) as resp: + json_text = await resp.text() + else: + with open(uri, "r", encoding="utf-8") as f: + json_text = f.read() + + json_obj = json.loads(json_text) + return json_obj + + +def setup_js(): + import nodes + js_dest_path = os.path.join(js_path, "comfyui-manager") + + if hasattr(nodes, "EXTENSION_WEB_DIRS"): + if os.path.exists(js_dest_path): + shutil.rmtree(js_dest_path) + else: + print(f"[WARN] ComfyUI-Manager: Your ComfyUI version is outdated. Please update to the latest version.") + # setup js + if not os.path.exists(js_dest_path): + os.makedirs(js_dest_path) + js_src_path = os.path.join(comfyui_manager_path, "js", "comfyui-manager.js") + + print(f"### ComfyUI-Manager: Copy .js from '{js_src_path}' to '{js_dest_path}'") + shutil.copy(js_src_path, js_dest_path) + + +setup_js() + + +def setup_environment(): + git_exe = get_config()['git_exe'] + + if git_exe != '': + git.Git().update_environment(GIT_PYTHON_GIT_EXECUTABLE=git_exe) + + +setup_environment() + + +# Expand Server api + +import server +from aiohttp import web +import aiohttp +import json +import zipfile +import urllib.request + + +def get_model_dir(data): + if data['save_path'] != 'default': + if '..' in data['save_path'] or data['save_path'].startswith('/'): + print(f"[WARN] '{data['save_path']}' is not allowed path. So it will be saved into 'models/etc'.") + base_model = "etc" + else: + if data['save_path'].startswith("custom_nodes"): + base_model = os.path.join(comfy_path, data['save_path']) + else: + base_model = os.path.join(folder_paths.models_dir, data['save_path']) + else: + model_type = data['type'] + if model_type == "checkpoints": + base_model = folder_paths.folder_names_and_paths["checkpoints"][0][0] + elif model_type == "unclip": + base_model = folder_paths.folder_names_and_paths["checkpoints"][0][0] + elif model_type == "VAE": + base_model = folder_paths.folder_names_and_paths["vae"][0][0] + elif model_type == "lora": + base_model = folder_paths.folder_names_and_paths["loras"][0][0] + elif model_type == "T2I-Adapter": + base_model = folder_paths.folder_names_and_paths["controlnet"][0][0] + elif model_type == "T2I-Style": + base_model = folder_paths.folder_names_and_paths["controlnet"][0][0] + elif model_type == "controlnet": + base_model = folder_paths.folder_names_and_paths["controlnet"][0][0] + elif model_type == "clip_vision": + base_model = folder_paths.folder_names_and_paths["clip_vision"][0][0] + elif model_type == "gligen": + base_model = folder_paths.folder_names_and_paths["gligen"][0][0] + elif model_type == "upscale": + base_model = folder_paths.folder_names_and_paths["upscale_models"][0][0] + elif model_type == "embeddings": + base_model = folder_paths.folder_names_and_paths["embeddings"][0][0] + else: + base_model = "etc" + + return base_model + + +def get_model_path(data): + base_model = get_model_dir(data) + return os.path.join(base_model, data['filename']) + + +def check_a_custom_node_installed(item, do_fetch=False, do_update_check=True, do_update=False): + item['installed'] = 'None' + + if item['install_type'] == 'git-clone' and len(item['files']) == 1: + url = item['files'][0] + + if url.endswith("/"): + url = url[:-1] + + dir_name = os.path.splitext(os.path.basename(url))[0].replace(".git", "") + dir_path = os.path.join(custom_nodes_path, dir_name) + if os.path.exists(dir_path): + try: + if do_update_check and git_repo_has_updates(dir_path, do_fetch, do_update): + item['installed'] = 'Update' + elif sys.__comfyui_manager_is_import_failed_extension(dir_name): + item['installed'] = 'Fail' + else: + item['installed'] = 'True' + except: + if sys.__comfyui_manager_is_import_failed_extension(dir_name): + item['installed'] = 'Fail' + else: + item['installed'] = 'True' + + elif os.path.exists(dir_path + ".disabled"): + item['installed'] = 'Disabled' + + else: + item['installed'] = 'False' + + elif item['install_type'] == 'copy' and len(item['files']) == 1: + dir_name = os.path.basename(item['files'][0]) + + if item['files'][0].endswith('.py'): + base_path = custom_nodes_path + elif 'js_path' in item: + base_path = os.path.join(js_path, item['js_path']) + else: + base_path = js_path + + file_path = os.path.join(base_path, dir_name) + if os.path.exists(file_path): + if sys.__comfyui_manager_is_import_failed_extension(dir_name): + item['installed'] = 'Fail' + else: + item['installed'] = 'True' + elif os.path.exists(file_path + ".disabled"): + item['installed'] = 'Disabled' + else: + item['installed'] = 'False' + + +def check_custom_nodes_installed(json_obj, do_fetch=False, do_update_check=True, do_update=False): + if do_fetch: + print("Start fetching...", end="") + elif do_update: + print("Start updating...", end="") + elif do_update_check: + print("Start update check...", end="") + + def process_custom_node(item): + check_a_custom_node_installed(item, do_fetch, do_update_check, do_update) + + with concurrent.futures.ThreadPoolExecutor(4) as executor: + for item in json_obj['custom_nodes']: + executor.submit(process_custom_node, item) + + if do_fetch: + print(f"\x1b[2K\rFetching done.") + elif do_update: + update_exists = any(item['installed'] == 'Update' for item in json_obj['custom_nodes']) + if update_exists: + print(f"\x1b[2K\rUpdate done.") + else: + print(f"\x1b[2K\rAll extensions are already up-to-date.") + elif do_update_check: + print(f"\x1b[2K\rUpdate check done.") + + +@server.PromptServer.instance.routes.get("/customnode/getmappings") +async def fetch_customnode_mappings(request): + if request.rel_url.query["mode"] == "local": + uri = local_db_extension_node_mappings + else: + uri = get_config()['channel_url'] + '/extension-node-map.json' + + json_obj = await get_data(uri) + + all_nodes = set() + patterns = [] + for k, x in json_obj.items(): + all_nodes.update(set(x[0])) + + if 'nodename_pattern' in x[1]: + patterns.append((x[1]['nodename_pattern'], x[0])) + + missing_nodes = set(nodes.NODE_CLASS_MAPPINGS.keys()) - all_nodes + + for x in missing_nodes: + for pat, item in patterns: + if re.match(pat, x): + item.append(x) + + return web.json_response(json_obj, content_type='application/json') + + +@server.PromptServer.instance.routes.get("/customnode/fetch_updates") +async def fetch_updates(request): + try: + if request.rel_url.query["mode"] == "local": + uri = local_db_custom_node_list + else: + uri = get_config()['channel_url'] + '/custom-node-list.json' + + json_obj = await get_data(uri) + check_custom_nodes_installed(json_obj, True) + + update_exists = any('custom_nodes' in json_obj and 'installed' in node and node['installed'] == 'Update' for node in + json_obj['custom_nodes']) + + if update_exists: + return web.Response(status=201) + + return web.Response(status=200) + except: + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/customnode/update_all") +async def update_all(request): + try: + save_snapshot_with_postfix('autosave') + + if request.rel_url.query["mode"] == "local": + uri = local_db_custom_node_list + else: + uri = get_config()['channel_url'] + '/custom-node-list.json' + + json_obj = await get_data(uri) + check_custom_nodes_installed(json_obj, do_update=True) + + update_exists = any(item['installed'] == 'Update' for item in json_obj['custom_nodes']) + + if update_exists: + return web.Response(status=201) + + return web.Response(status=200) + except: + return web.Response(status=400) + + +def convert_markdown_to_html(input_text): + pattern_a = re.compile(r'\[a/([^]]+)\]\(([^)]+)\)') + pattern_w = re.compile(r'\[w/([^]]+)\]') + pattern_i = re.compile(r'\[i/([^]]+)\]') + pattern_bold = re.compile(r'\*\*([^*]+)\*\*') + pattern_white = re.compile(r'%%([^*]+)%%') + + def replace_a(match): + return f"{match.group(1)}" + + def replace_w(match): + return f"

{match.group(1)}

" + + def replace_i(match): + return f"

{match.group(1)}

" + + def replace_bold(match): + return f"{match.group(1)}" + + def replace_white(match): + return f"{match.group(1)}" + + input_text = input_text.replace('\\[', '[').replace('\\]', ']').replace('<', '<').replace('>', '>') + + result_text = re.sub(pattern_a, replace_a, input_text) + result_text = re.sub(pattern_w, replace_w, result_text) + result_text = re.sub(pattern_i, replace_i, result_text) + result_text = re.sub(pattern_bold, replace_bold, result_text) + result_text = re.sub(pattern_white, replace_white, result_text) + + return result_text.replace("\n", "
") + + +def populate_markdown(x): + if 'description' in x: + x['description'] = convert_markdown_to_html(x['description']) + + if 'title' in x: + x['title'] = x['title'].replace('<', '<').replace('>', '>') + + +@server.PromptServer.instance.routes.get("/customnode/getlist") +async def fetch_customnode_list(request): + if "skip_update" in request.rel_url.query and request.rel_url.query["skip_update"] == "true": + skip_update = True + else: + skip_update = False + + if request.rel_url.query["mode"] == "local": + channel = 'local' + uri = local_db_custom_node_list + else: + channel = get_config()['channel_url'] + uri = channel + '/custom-node-list.json' + + json_obj = await get_data(uri) + check_custom_nodes_installed(json_obj, False, not skip_update) + + for x in json_obj['custom_nodes']: + populate_markdown(x) + + if channel != 'local': + channels = default_channels+","+get_config()['channel_url_list'] + channels = channels.split(',') + + found = 'custom' + for item in channels: + item_info = item.split('::') + if len(item_info) == 2 and item_info[1] == channel: + found = item_info[0] + + channel = found + + json_obj['channel'] = channel + + return web.json_response(json_obj, content_type='application/json') + + +@server.PromptServer.instance.routes.get("/alternatives/getlist") +async def fetch_alternatives_list(request): + if "skip_update" in request.rel_url.query and request.rel_url.query["skip_update"] == "true": + skip_update = True + else: + skip_update = False + + if request.rel_url.query["mode"] == "local": + uri1 = local_db_alter + uri2 = local_db_custom_node_list + else: + uri1 = get_config()['channel_url'] + '/alter-list.json' + uri2 = get_config()['channel_url'] + '/custom-node-list.json' + + alter_json = await get_data(uri1) + custom_node_json = await get_data(uri2) + + fileurl_to_custom_node = {} + + for item in custom_node_json['custom_nodes']: + for fileurl in item['files']: + fileurl_to_custom_node[fileurl] = item + + for item in alter_json['items']: + fileurl = item['id'] + if fileurl in fileurl_to_custom_node: + custom_node = fileurl_to_custom_node[fileurl] + check_a_custom_node_installed(custom_node, not skip_update) + + populate_markdown(item) + populate_markdown(custom_node) + item['custom_node'] = custom_node + + return web.json_response(alter_json, content_type='application/json') + + +def check_model_installed(json_obj): + def process_model(item): + model_path = get_model_path(item) + item['installed'] = 'None' + + if model_path is not None: + if os.path.exists(model_path): + item['installed'] = 'True' + else: + item['installed'] = 'False' + + with concurrent.futures.ThreadPoolExecutor(8) as executor: + for item in json_obj['models']: + executor.submit(process_model, item) + + +@server.PromptServer.instance.routes.get("/externalmodel/getlist") +async def fetch_externalmodel_list(request): + if request.rel_url.query["mode"] == "local": + uri = local_db_model + else: + uri = get_config()['channel_url'] + '/model-list.json' + + json_obj = await get_data(uri) + check_model_installed(json_obj) + + return web.json_response(json_obj, content_type='application/json') + + +@server.PromptServer.instance.routes.get("/snapshot/getlist") +async def get_snapshot_list(request): + snapshots_directory = os.path.join(os.path.dirname(__file__), 'snapshots') + items = [f[:-5] for f in os.listdir(snapshots_directory) if f.endswith('.json')] + items.sort(reverse=True) + return web.json_response({'items': items}, content_type='application/json') + + +@server.PromptServer.instance.routes.get("/snapshot/remove") +async def remove_snapshot(request): + try: + target = request.rel_url.query["target"] + + path = os.path.join(os.path.dirname(__file__), 'snapshots', f"{target}.json") + if os.path.exists(path): + os.remove(path) + + return web.Response(status=200) + except: + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/snapshot/restore") +async def remove_snapshot(request): + try: + target = request.rel_url.query["target"] + + path = os.path.join(os.path.dirname(__file__), 'snapshots', f"{target}.json") + if os.path.exists(path): + if not os.path.exists(startup_script_path): + os.makedirs(startup_script_path) + + target_path = os.path.join(startup_script_path, "restore-snapshot.json") + shutil.copy(path, target_path) + + print(f"Snapshot restore scheduled: `{target}`") + return web.Response(status=200) + + print(f"Snapshot file not found: `{path}`") + return web.Response(status=400) + except: + return web.Response(status=400) + + +def get_current_snapshot(): + # Get ComfyUI hash + repo_path = os.path.dirname(folder_paths.__file__) + + if not os.path.exists(os.path.join(repo_path, '.git')): + print(f"ComfyUI update fail: The installed ComfyUI does not have a Git repository.") + return web.Response(status=400) + + repo = git.Repo(repo_path) + comfyui_commit_hash = repo.head.commit.hexsha + + git_custom_nodes = {} + file_custom_nodes = [] + + # Get custom nodes hash + for path in os.listdir(custom_nodes_path): + fullpath = os.path.join(custom_nodes_path, path) + + if os.path.isdir(fullpath): + is_disabled = path.endswith(".disabled") + + try: + git_dir = os.path.join(fullpath, '.git') + + if not os.path.exists(git_dir): + continue + + repo = git.Repo(fullpath) + commit_hash = repo.head.commit.hexsha + url = repo.remotes.origin.url + git_custom_nodes[url] = { + 'hash': commit_hash, + 'disabled': is_disabled + } + + except: + print(f"Failed to extract snapshots for the custom node '{path}'.") + + elif path.endswith('.py'): + is_disabled = path.endswith(".py.disabled") + filename = os.path.basename(path) + item = { + 'filename': filename, + 'disabled': is_disabled + } + + file_custom_nodes.append(item) + + return { + 'comfyui': comfyui_commit_hash, + 'git_custom_nodes': git_custom_nodes, + 'file_custom_nodes': file_custom_nodes, + } + + +def save_snapshot_with_postfix(postfix): + now = datetime.datetime.now() + + date_time_format = now.strftime("%Y-%m-%d_%H-%M-%S") + file_name = f"{date_time_format}_{postfix}" + + path = os.path.join(os.path.dirname(__file__), 'snapshots', f"{file_name}.json") + with open(path, "w") as json_file: + json.dump(get_current_snapshot(), json_file, indent=4) + + +@server.PromptServer.instance.routes.get("/snapshot/get_current") +async def get_current_snapshot_api(request): + try: + return web.json_response(get_current_snapshot(), content_type='application/json') + except: + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/snapshot/save") +async def save_snapshot(request): + try: + save_snapshot_with_postfix('snapshot') + return web.Response(status=200) + except: + return web.Response(status=400) + + +def unzip_install(files): + temp_filename = 'manager-temp.zip' + for url in files: + if url.endswith("/"): + url = url[:-1] + try: + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} + + req = urllib.request.Request(url, headers=headers) + response = urllib.request.urlopen(req) + data = response.read() + + with open(temp_filename, 'wb') as f: + f.write(data) + + with zipfile.ZipFile(temp_filename, 'r') as zip_ref: + zip_ref.extractall(custom_nodes_path) + + os.remove(temp_filename) + except Exception as e: + print(f"Install(unzip) error: {url} / {e}", file=sys.stderr) + return False + + print("Installation was successful.") + return True + + +def download_url_with_agent(url, save_path): + try: + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} + + req = urllib.request.Request(url, headers=headers) + response = urllib.request.urlopen(req) + data = response.read() + + if not os.path.exists(os.path.dirname(save_path)): + os.makedirs(os.path.dirname(save_path)) + + with open(save_path, 'wb') as f: + f.write(data) + + except Exception as e: + print(f"Download error: {url} / {e}", file=sys.stderr) + return False + + print("Installation was successful.") + return True + + +def copy_install(files, js_path_name=None): + for url in files: + if url.endswith("/"): + url = url[:-1] + try: + if url.endswith(".py"): + download_url(url, custom_nodes_path) + else: + path = os.path.join(js_path, js_path_name) if js_path_name is not None else js_path + if not os.path.exists(path): + os.makedirs(path) + download_url(url, path) + + except Exception as e: + print(f"Install(copy) error: {url} / {e}", file=sys.stderr) + return False + + print("Installation was successful.") + return True + + +def copy_uninstall(files, js_path_name='.'): + for url in files: + if url.endswith("/"): + url = url[:-1] + dir_name = os.path.basename(url) + base_path = custom_nodes_path if url.endswith('.py') else os.path.join(js_path, js_path_name) + file_path = os.path.join(base_path, dir_name) + + try: + if os.path.exists(file_path): + os.remove(file_path) + elif os.path.exists(file_path + ".disabled"): + os.remove(file_path + ".disabled") + except Exception as e: + print(f"Uninstall(copy) error: {url} / {e}", file=sys.stderr) + return False + + print("Uninstallation was successful.") + return True + + +def copy_set_active(files, is_disable, js_path_name='.'): + if is_disable: + action_name = "Disable" + else: + action_name = "Enable" + + for url in files: + if url.endswith("/"): + url = url[:-1] + dir_name = os.path.basename(url) + base_path = custom_nodes_path if url.endswith('.py') else os.path.join(js_path, js_path_name) + file_path = os.path.join(base_path, dir_name) + + try: + if is_disable: + current_name = file_path + new_name = file_path + ".disabled" + else: + current_name = file_path + ".disabled" + new_name = file_path + + os.rename(current_name, new_name) + + except Exception as e: + print(f"{action_name}(copy) error: {url} / {e}", file=sys.stderr) + + return False + + print(f"{action_name} was successful.") + return True + + +def execute_install_script(url, repo_path, lazy_mode=False): + install_script_path = os.path.join(repo_path, "install.py") + requirements_path = os.path.join(repo_path, "requirements.txt") + + if lazy_mode: + install_cmd = ["#LAZY-INSTALL-SCRIPT", sys.executable] + try_install_script(url, repo_path, install_cmd) + else: + if os.path.exists(requirements_path): + print("Install: pip packages") + with open(requirements_path, "r") as requirements_file: + for line in requirements_file: + package_name = line.strip() + if package_name: + install_cmd = [sys.executable, "-m", "pip", "install", package_name] + if package_name.strip() != "": + try_install_script(url, repo_path, install_cmd) + + if os.path.exists(install_script_path): + print(f"Install: install script") + install_cmd = [sys.executable, "install.py"] + try_install_script(url, repo_path, install_cmd) + + return True + + +class GitProgress(RemoteProgress): + def __init__(self): + super().__init__() + self.pbar = tqdm() + + def update(self, op_code, cur_count, max_count=None, message=''): + self.pbar.total = max_count + self.pbar.n = cur_count + self.pbar.pos = 0 + self.pbar.refresh() + +def is_valid_url(url): + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except ValueError: + return False + +def gitclone_install(files): + print(f"install: {files}") + for url in files: + if not is_valid_url(url): + print(f"Invalid git url: '{url}'") + return False + + if url.endswith("/"): + url = url[:-1] + try: + print(f"Download: git clone '{url}'") + repo_name = os.path.splitext(os.path.basename(url))[0] + repo_path = os.path.join(custom_nodes_path, repo_name) + + # Clone the repository from the remote URL + if platform.system() == 'Windows': + res = run_script([sys.executable, git_script_path, "--clone", custom_nodes_path, url]) + if res != 0: + return False + else: + repo = git.Repo.clone_from(url, repo_path, recursive=True, progress=GitProgress()) + repo.git.clear_cache() + repo.close() + + if not execute_install_script(url, repo_path): + return False + + except Exception as e: + print(f"Install(git-clone) error: {url} / {e}", file=sys.stderr) + return False + + print("Installation was successful.") + return True + + +import platform +import subprocess +import time + + +def rmtree(path): + retry_count = 3 + + while True: + try: + retry_count -= 1 + + if platform.system() == "Windows": + run_script(['attrib', '-R', path + '\\*', '/S']) + shutil.rmtree(path) + + return True + + except Exception as ex: + print(f"ex: {ex}") + time.sleep(3) + + if retry_count < 0: + raise ex + + print(f"Uninstall retry({retry_count})") + + +def gitclone_uninstall(files): + import shutil + import os + + print(f"uninstall: {files}") + for url in files: + if url.endswith("/"): + url = url[:-1] + try: + dir_name = os.path.splitext(os.path.basename(url))[0].replace(".git", "") + dir_path = os.path.join(custom_nodes_path, dir_name) + + # safety check + if dir_path == '/' or dir_path[1:] == ":/" or dir_path == '': + print(f"Uninstall(git-clone) error: invalid path '{dir_path}' for '{url}'") + return False + + install_script_path = os.path.join(dir_path, "uninstall.py") + disable_script_path = os.path.join(dir_path, "disable.py") + if os.path.exists(install_script_path): + uninstall_cmd = [sys.executable, "uninstall.py"] + code = run_script(uninstall_cmd, cwd=dir_path) + + if code != 0: + print(f"An error occurred during the execution of the uninstall.py script. Only the '{dir_path}' will be deleted.") + elif os.path.exists(disable_script_path): + disable_script = [sys.executable, "disable.py"] + code = run_script(disable_script, cwd=dir_path) + if code != 0: + print(f"An error occurred during the execution of the disable.py script. Only the '{dir_path}' will be deleted.") + + if os.path.exists(dir_path): + rmtree(dir_path) + elif os.path.exists(dir_path + ".disabled"): + rmtree(dir_path + ".disabled") + except Exception as e: + print(f"Uninstall(git-clone) error: {url} / {e}", file=sys.stderr) + return False + + print("Uninstallation was successful.") + return True + + +def gitclone_set_active(files, is_disable): + import os + + if is_disable: + action_name = "Disable" + else: + action_name = "Enable" + + print(f"{action_name}: {files}") + for url in files: + if url.endswith("/"): + url = url[:-1] + try: + dir_name = os.path.splitext(os.path.basename(url))[0].replace(".git", "") + dir_path = os.path.join(custom_nodes_path, dir_name) + + # safey check + if dir_path == '/' or dir_path[1:] == ":/" or dir_path == '': + print(f"{action_name}(git-clone) error: invalid path '{dir_path}' for '{url}'") + return False + + if is_disable: + current_path = dir_path + new_path = dir_path + ".disabled" + else: + current_path = dir_path + ".disabled" + new_path = dir_path + + os.rename(current_path, new_path) + + if is_disable: + if os.path.exists(os.path.join(new_path, "disable.py")): + disable_script = [sys.executable, "disable.py"] + try_install_script(url, new_path, disable_script) + else: + if os.path.exists(os.path.join(new_path, "enable.py")): + enable_script = [sys.executable, "enable.py"] + try_install_script(url, new_path, enable_script) + + except Exception as e: + print(f"{action_name}(git-clone) error: {url} / {e}", file=sys.stderr) + return False + + print(f"{action_name} was successful.") + return True + + +def gitclone_update(files): + import os + + print(f"Update: {files}") + for url in files: + if url.endswith("/"): + url = url[:-1] + try: + repo_name = os.path.splitext(os.path.basename(url))[0].replace(".git", "") + repo_path = os.path.join(custom_nodes_path, repo_name) + git_pull(repo_path) + + if not execute_install_script(url, repo_path, lazy_mode=True): + return False + + except Exception as e: + print(f"Update(git-clone) error: {url} / {e}", file=sys.stderr) + return False + + print("Update was successful.") + return True + + +@server.PromptServer.instance.routes.post("/customnode/install") +async def install_custom_node(request): + json_data = await request.json() + + install_type = json_data['install_type'] + + print(f"Install custom node '{json_data['title']}'") + + res = False + + if len(json_data['files']) == 0: + return web.Response(status=400) + + if install_type == "unzip": + res = unzip_install(json_data['files']) + + if install_type == "copy": + js_path_name = json_data['js_path'] if 'js_path' in json_data else '.' + res = copy_install(json_data['files'], js_path_name) + + elif install_type == "git-clone": + res = gitclone_install(json_data['files']) + + if 'pip' in json_data: + for pname in json_data['pip']: + install_cmd = [sys.executable, "-m", "pip", "install", pname] + try_install_script(json_data['files'][0], ".", install_cmd) + + if res: + print(f"After restarting ComfyUI, please refresh the browser.") + return web.json_response({}, content_type='application/json') + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/customnode/install/git_url") +async def install_custom_node_git_url(request): + res = False + if "url" in request.rel_url.query: + url = request.rel_url.query['url'] + res = gitclone_install([url]) + + if res: + print(f"After restarting ComfyUI, please refresh the browser.") + return web.Response(status=200) + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.post("/customnode/uninstall") +async def uninstall_custom_node(request): + json_data = await request.json() + + install_type = json_data['install_type'] + + print(f"Uninstall custom node '{json_data['title']}'") + + res = False + + if install_type == "copy": + js_path_name = json_data['js_path'] if 'js_path' in json_data else '.' + res = copy_uninstall(json_data['files'], js_path_name) + + elif install_type == "git-clone": + res = gitclone_uninstall(json_data['files']) + + if res: + print(f"After restarting ComfyUI, please refresh the browser.") + return web.json_response({}, content_type='application/json') + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.post("/customnode/update") +async def update_custom_node(request): + json_data = await request.json() + + install_type = json_data['install_type'] + + print(f"Update custom node '{json_data['title']}'") + + res = False + + if install_type == "git-clone": + res = gitclone_update(json_data['files']) + + if res: + print(f"After restarting ComfyUI, please refresh the browser.") + return web.json_response({}, content_type='application/json') + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/comfyui_manager/update_comfyui") +async def update_comfyui(request): + print(f"Update ComfyUI") + + try: + repo_path = os.path.dirname(folder_paths.__file__) + + if not os.path.exists(os.path.join(repo_path, '.git')): + print(f"ComfyUI update fail: The installed ComfyUI does not have a Git repository.") + return web.Response(status=400) + + # version check + repo = git.Repo(repo_path) + + if repo.head.is_detached: + switch_to_default_branch(repo) + + current_branch = repo.active_branch + branch_name = current_branch.name + + remote_name = 'origin' + remote = repo.remote(name=remote_name) + remote.fetch() + + commit_hash = repo.head.commit.hexsha + remote_commit_hash = repo.refs[f'{remote_name}/{branch_name}'].object.hexsha + + if commit_hash != remote_commit_hash: + git_pull(repo_path) + execute_install_script("ComfyUI", repo_path) + return web.Response(status=201) + else: + return web.Response(status=200) + except Exception as e: + print(f"ComfyUI update fail: {e}", file=sys.stderr) + pass + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.post("/customnode/toggle_active") +async def toggle_active(request): + json_data = await request.json() + + install_type = json_data['install_type'] + is_disabled = json_data['installed'] == "Disabled" + + print(f"Update custom node '{json_data['title']}'") + + res = False + + if install_type == "git-clone": + res = gitclone_set_active(json_data['files'], not is_disabled) + elif install_type == "copy": + res = copy_set_active(json_data['files'], not is_disabled, json_data.get('js_path', None)) + + if res: + return web.json_response({}, content_type='application/json') + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.post("/model/install") +async def install_model(request): + json_data = await request.json() + + model_path = get_model_path(json_data) + + res = False + + try: + if model_path is not None: + print(f"Install model '{json_data['name']}' into '{model_path}'") + + if json_data['url'].startswith('https://github.com') or json_data['url'].startswith('https://huggingface.co'): + model_dir = get_model_dir(json_data) + download_url(json_data['url'], model_dir) + + return web.json_response({}, content_type='application/json') + else: + res = download_url_with_agent(json_data['url'], model_path) + else: + print(f"Model installation error: invalid model type - {json_data['type']}") + + if res: + return web.json_response({}, content_type='application/json') + except Exception as e: + print(f"[ERROR] {e}", file=sys.stderr) + pass + + return web.Response(status=400) + + +@server.PromptServer.instance.routes.get("/manager/preview_method") +async def preview_method(request): + if "value" in request.rel_url.query: + set_preview_method(request.rel_url.query['value']) + write_config() + else: + return web.Response(text=get_current_preview_method(), status=200) + + return web.Response(status=200) + + +@server.PromptServer.instance.routes.get("/manager/badge_mode") +async def badge_mode(request): + if "value" in request.rel_url.query: + set_badge_mode(request.rel_url.query['value']) + write_config() + else: + return web.Response(text=get_config()['badge_mode'], status=200) + + return web.Response(status=200) + + +@server.PromptServer.instance.routes.get("/manager/channel_url_list") +async def channel_url_list(request): + channels = default_channels+","+get_config()['channel_url_list'] + channels = channels.split(',') + + if "value" in request.rel_url.query: + for item in channels: + name_url = item.split("::") + if len(name_url) == 2 and name_url[0] == request.rel_url.query['value']: + get_config()['channel_url'] = name_url[1] + write_config() + break + else: + selected = 'custom' + selected_url = get_config()['channel_url'] + for item in channels: + item_info = item.split('::') + if len(item_info) == 2 and item_info[1] == selected_url: + selected = item_info[0] + + res = {'selected': selected, + 'list': channels} + return web.json_response(res, status=200) + + return web.Response(status=200) + + +@server.PromptServer.instance.routes.get("/manager/notice") +async def get_notice(request): + url = "github.com" + path = "/ltdrdata/ltdrdata.github.io/wiki/News" + + conn = http.client.HTTPSConnection(url) + conn.request("GET", path) + + response = conn.getresponse() + + try: + if response.status == 200: + html_content = response.read().decode('utf-8') + + pattern = re.compile(r'
([\s\S]*?)
') + match = pattern.search(html_content) + + if match: + markdown_content = match.group(1) + markdown_content += f"
ComfyUI: {comfy_ui_revision}[{comfy_ui_hash[:6]}]({comfy_ui_commit_date})" + # markdown_content += f"
         ()" + markdown_content += f"
Manager: {version}" + + try: + if required_comfyui_revision > int(comfy_ui_revision): + markdown_content = f'

Your ComfyUI is too OUTDATED!!!

' + markdown_content + except: + pass + + return web.Response(text=markdown_content, status=200) + else: + return web.Response(text="Unable to retrieve Notice", status=200) + else: + return web.Response(text="Unable to retrieve Notice", status=200) + finally: + conn.close() + + +@server.PromptServer.instance.routes.get("/manager/reboot") +def restart(self): + try: + sys.stdout.close_log() + except Exception as e: + pass + + return os.execv(sys.executable, [sys.executable] + sys.argv) + + +@server.PromptServer.instance.routes.get("/manager/share_option") +async def share_option(request): + if "value" in request.rel_url.query: + get_config()['share_option'] = request.rel_url.query['value'] + write_config() + else: + return web.Response(text=get_config()['share_option'], status=200) + + return web.Response(status=200) + + +def get_openart_auth(): + if not os.path.exists(os.path.join(comfyui_manager_path, ".openart_key")): + return None + try: + with open(os.path.join(comfyui_manager_path, ".openart_key"), "r") as f: + openart_key = f.read().strip() + return openart_key if openart_key else None + except: + return None + + +def get_matrix_auth(): + if not os.path.exists(os.path.join(comfyui_manager_path, "matrix_auth")): + return None + try: + with open(os.path.join(comfyui_manager_path, "matrix_auth"), "r") as f: + matrix_auth = f.read() + homeserver, username, password = matrix_auth.strip().split("\n") + if not homeserver or not username or not password: + return None + return { + "homeserver": homeserver, + "username": username, + "password": password, + } + except: + return None + + +def get_comfyworkflows_auth(): + if not os.path.exists(os.path.join(comfyui_manager_path, "comfyworkflows_sharekey")): + return None + try: + with open(os.path.join(comfyui_manager_path, "comfyworkflows_sharekey"), "r") as f: + share_key = f.read() + if not share_key.strip(): + return None + return share_key + except: + return None + + +@server.PromptServer.instance.routes.get("/manager/get_openart_auth") +async def api_get_openart_auth(request): + # print("Getting stored Matrix credentials...") + openart_key = get_openart_auth() + if not openart_key: + return web.Response(status=404) + return web.json_response({"openart_key": openart_key}) + + +@server.PromptServer.instance.routes.post("/manager/set_openart_auth") +async def api_set_openart_auth(request): + json_data = await request.json() + openart_key = json_data['openart_key'] + with open(os.path.join(comfyui_manager_path, ".openart_key"), "w") as f: + f.write(openart_key) + return web.Response(status=200) + + +@server.PromptServer.instance.routes.get("/manager/get_matrix_auth") +async def api_get_matrix_auth(request): + # print("Getting stored Matrix credentials...") + matrix_auth = get_matrix_auth() + if not matrix_auth: + return web.Response(status=404) + return web.json_response(matrix_auth) + + +@server.PromptServer.instance.routes.get("/manager/get_comfyworkflows_auth") +async def api_get_comfyworkflows_auth(request): + # Check if the user has provided Matrix credentials in a file called 'matrix_accesstoken' + # in the same directory as the ComfyUI base folder + # print("Getting stored Comfyworkflows.com auth...") + comfyworkflows_auth = get_comfyworkflows_auth() + if not comfyworkflows_auth: + return web.Response(status=404) + return web.json_response({"comfyworkflows_sharekey" : comfyworkflows_auth}) + + +def set_matrix_auth(json_data): + homeserver = json_data['homeserver'] + username = json_data['username'] + password = json_data['password'] + with open(os.path.join(comfyui_manager_path, "matrix_auth"), "w") as f: + f.write("\n".join([homeserver, username, password])) + + +def set_comfyworkflows_auth(comfyworkflows_sharekey): + with open(os.path.join(comfyui_manager_path, "comfyworkflows_sharekey"), "w") as f: + f.write(comfyworkflows_sharekey) + + +def has_provided_matrix_auth(matrix_auth): + return matrix_auth['homeserver'].strip() and matrix_auth['username'].strip() and matrix_auth['password'].strip() + + +def has_provided_comfyworkflows_auth(comfyworkflows_sharekey): + return comfyworkflows_sharekey.strip() + + +@server.PromptServer.instance.routes.post("/manager/share") +async def share_art(request): + # get json data + json_data = await request.json() + + matrix_auth = json_data['matrix_auth'] + comfyworkflows_sharekey = json_data['cw_auth']['cw_sharekey'] + + set_matrix_auth(matrix_auth) + set_comfyworkflows_auth(comfyworkflows_sharekey) + + share_destinations = json_data['share_destinations'] + credits = json_data['credits'] + title = json_data['title'] + description = json_data['description'] + is_nsfw = json_data['is_nsfw'] + prompt = json_data['prompt'] + potential_outputs = json_data['potential_outputs'] + selected_output_index = json_data['selected_output_index'] + + try: + output_to_share = potential_outputs[int(selected_output_index)] + except: + # for now, pick the first output + output_to_share = potential_outputs[0] + + assert output_to_share['type'] in ('image', 'output') + output_dir = folder_paths.get_output_directory() + + if output_to_share['type'] == 'image': + asset_filename = output_to_share['image']['filename'] + asset_subfolder = output_to_share['image']['subfolder'] + + if output_to_share['image']['type'] == 'temp': + output_dir = folder_paths.get_temp_directory() + else: + asset_filename = output_to_share['output']['filename'] + asset_subfolder = output_to_share['output']['subfolder'] + + if asset_subfolder: + asset_filepath = os.path.join(output_dir, asset_subfolder, asset_filename) + else: + asset_filepath = os.path.join(output_dir, asset_filename) + + # get the mime type of the asset + assetFileType = mimetypes.guess_type(asset_filepath)[0] + + if "comfyworkflows" in share_destinations: + share_website_host = "https://comfyworkflows.com" + share_endpoint = f"{share_website_host}/api" + + # get presigned urls + async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(verify_ssl=False)) as session: + async with session.post( + f"{share_endpoint}/get_presigned_urls", + json={ + "assetFileName": asset_filename, + "assetFileType": assetFileType, + "workflowJsonFileName" : 'workflow.json', + "workflowJsonFileType" : 'application/json', + + }, + ) as resp: + assert resp.status == 200 + presigned_urls_json = await resp.json() + assetFilePresignedUrl = presigned_urls_json["assetFilePresignedUrl"] + assetFileKey = presigned_urls_json["assetFileKey"] + workflowJsonFilePresignedUrl = presigned_urls_json["workflowJsonFilePresignedUrl"] + workflowJsonFileKey = presigned_urls_json["workflowJsonFileKey"] + + # upload asset + async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(verify_ssl=False)) as session: + async with session.put(assetFilePresignedUrl, data=open(asset_filepath, "rb")) as resp: + assert resp.status == 200 + + # upload workflow json + async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(verify_ssl=False)) as session: + async with session.put(workflowJsonFilePresignedUrl, data=json.dumps(prompt['workflow']).encode('utf-8')) as resp: + assert resp.status == 200 + + # make a POST request to /api/upload_workflow with form data key values + async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(verify_ssl=False)) as session: + form = aiohttp.FormData() + if comfyworkflows_sharekey: + form.add_field("shareKey", comfyworkflows_sharekey) + form.add_field("source", "comfyui_manager") + form.add_field("assetFileKey", assetFileKey) + form.add_field("assetFileType", assetFileType) + form.add_field("workflowJsonFileKey", workflowJsonFileKey) + form.add_field("sharedWorkflowWorkflowJsonString", json.dumps(prompt['workflow'])) + form.add_field("sharedWorkflowPromptJsonString", json.dumps(prompt['output'])) + form.add_field("shareWorkflowCredits", credits) + form.add_field("shareWorkflowTitle", title) + form.add_field("shareWorkflowDescription", description) + form.add_field("shareWorkflowIsNSFW", str(is_nsfw).lower()) + + async with session.post( + f"{share_endpoint}/upload_workflow", + data=form, + ) as resp: + assert resp.status == 200 + upload_workflow_json = await resp.json() + workflowId = upload_workflow_json["workflowId"] + + # check if the user has provided Matrix credentials + if "matrix" in share_destinations: + comfyui_share_room_id = '!LGYSoacpJPhIfBqVfb:matrix.org' + filename = os.path.basename(asset_filepath) + content_type = assetFileType + + try: + from matrix_client.api import MatrixHttpApi + from matrix_client.client import MatrixClient + + homeserver = 'matrix.org' + if matrix_auth: + homeserver = matrix_auth.get('homeserver', 'matrix.org') + homeserver = homeserver.replace("http://", "https://") + if not homeserver.startswith("https://"): + homeserver = "https://" + homeserver + + client = MatrixClient(homeserver) + try: + token = client.login(username=matrix_auth['username'], password=matrix_auth['password']) + if not token: + return web.json_response({"error" : "Invalid Matrix credentials."}, content_type='application/json', status=400) + except: + return web.json_response({"error" : "Invalid Matrix credentials."}, content_type='application/json', status=400) + + matrix = MatrixHttpApi(homeserver, token=token) + with open(asset_filepath, 'rb') as f: + mxc_url = matrix.media_upload(f.read(), content_type, filename=filename)['content_uri'] + + workflow_json_mxc_url = matrix.media_upload(prompt['workflow'], 'application/json', filename='workflow.json')['content_uri'] + + text_content = "" + if title: + text_content += f"{title}\n" + if description: + text_content += f"{description}\n" + if credits: + text_content += f"\ncredits: {credits}\n" + response = matrix.send_message(comfyui_share_room_id, text_content) + response = matrix.send_content(comfyui_share_room_id, mxc_url, filename, 'm.image') + response = matrix.send_content(comfyui_share_room_id, workflow_json_mxc_url, 'workflow.json', 'm.file') + except: + import traceback + traceback.print_exc() + return web.json_response({"error" : "An error occurred when sharing your art to Matrix."}, content_type='application/json', status=500) + + return web.json_response({ + "comfyworkflows" : { + "url" : None if "comfyworkflows" not in share_destinations else f"{share_website_host}/workflows/{workflowId}", + }, + "matrix" : { + "success" : None if "matrix" not in share_destinations else True + } + }, content_type='application/json', status=200) + +WEB_DIRECTORY = "js" +NODE_CLASS_MAPPINGS = {} +__all__ = ['NODE_CLASS_MAPPINGS'] + diff --git a/custom_nodes/ComfyUI-Manager/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI-Manager/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96e4caac25185fd1f5da69a3ea8ca9e04a0bb590 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Manager/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Manager/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9653ecebb9d547e6ba90d8b6a3de654d12f38e9 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Manager/__pycache__/prestartup_script.cpython-310.pyc b/custom_nodes/ComfyUI-Manager/__pycache__/prestartup_script.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd4bdf2d293f9a4753956099e3998824fa544e52 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/__pycache__/prestartup_script.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-Manager/__pycache__/prestartup_script.cpython-311.pyc b/custom_nodes/ComfyUI-Manager/__pycache__/prestartup_script.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab958df1e5b686c4cb2aa684a720316dee78abe3 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/__pycache__/prestartup_script.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Manager/alter-list.json b/custom_nodes/ComfyUI-Manager/alter-list.json new file mode 100644 index 0000000000000000000000000000000000000000..6c844ac700311292ac66337072928a5303c530f2 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/alter-list.json @@ -0,0 +1,189 @@ +{ + "items": [ + { + "id":"https://github.com/Fannovel16/comfyui_controlnet_aux", + "tags":"controlnet", + "description": "This extension provides preprocessor nodes for using controlnet." + }, + { + "id":"https://github.com/comfyanonymous/ComfyUI_experiments", + "tags":"Dynamic Thresholding, DT, CFG, controlnet, reference only", + "description": "This experimental nodes contains a 'Reference Only' node and a 'ModelSamplerTonemapNoiseTest' node corresponding to the 'Dynamic Threshold'." + }, + { + "id":"https://github.com/ltdrdata/ComfyUI-Impact-Pack", + "tags":"ddetailer, adetailer, ddsd, DD, loopback scaler, prompt, wildcard, dynamic prompt", + "description": "To implement the feature of automatically detecting faces and enhancing details, various detection nodes and detailers provided by the Impact Pack can be applied. Similarly to Loopback Scaler, it also provides various custom workflows that can apply Ksampler while gradually scaling up." + }, + { + "id":"https://github.com/ltdrdata/ComfyUI-Inspire-Pack", + "tags":"lora block weight, effective block analyzer, lbw, variation seed", + "description": "The Inspire Pack provides the functionality of Lora Block Weight, Variation Seed." + }, + { + "id":"https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py", + "tags":"ddsd", + "description": "This extension provides a feature that generates segment masks on an image using a text prompt. When used in conjunction with Impact Pack, it enables applications such as DDSD." + }, + { + "id":"https://github.com/BadCafeCode/masquerade-nodes-comfyui", + "tags":"ddetailer", + "description": "This extension provides a way to recognize and enhance masks for faces similar to Impact Pack." + }, + { + "id":"https://github.com/BlenderNeko/ComfyUI_Cutoff", + "tags":"cutoff", + "description": "By using this extension, prompts like 'blue hair' can be prevented from interfering with other prompts by blocking the attribute 'blue' from being used in prompts other than 'hair'." + }, + { + "id":"https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb", + "tags":"prompt, weight", + "description": "There are differences in the processing methods of prompts, such as weighting and scheduling, between A1111 and ComfyUI. With this extension, various settings can be used to implement prompt processing methods similar to A1111. As this feature is also integrated into ComfyUI Cutoff, please download the Cutoff extension if you plan to use it in conjunction with Cutoff." + }, + { + "id":"https://github.com/shiimizu/ComfyUI_smZNodes", + "tags":"prompt, weight", + "description": "There are differences in the processing methods of prompts, such as weighting and scheduling, between A1111 and ComfyUI. This extension helps to reproduce the same embedding as A1111." + }, + { + "id":"https://github.com/BlenderNeko/ComfyUI_Noise", + "tags":"img2img alt, random", + "description": "The extension provides an unsampler that reverses the sampling process, allowing for a function similar to img2img alt to be implemented. Furthermore, ComfyUI uses CPU's Random instead of GPU's Random for better reproducibility compared to A1111. This extension provides the ability to use GPU's Random for Latent Noise. However, since GPU's Random may vary depending on the GPU model, reproducibility on different devices cannot be guaranteed." + }, + { + "id":"https://github.com/BlenderNeko/ComfyUI_SeeCoder", + "tags":"seecoder, prompt-free-diffusion", + "description": "The extension provides seecoder feature." + }, + { + "id":"https://github.com/lilly1987/ComfyUI_node_Lilly", + "tags":"prompt, wildcard", + "description": "This extension provides features such as a wildcard function that randomly selects prompts belonging to a category and the ability to directly load lora from prompts." + }, + { + "id":"https://github.com/Davemane42/ComfyUI_Dave_CustomNode", + "tags":"latent couple", + "description": "ComfyUI already provides the ability to composite latents by default. However, this extension makes it more convenient to use by visualizing the composite area." + }, + { + "id":"https://github.com/LEv145/images-grid-comfy-plugin", + "tags":"X/Y Plot", + "description": "This tool provides a viewer node that allows for checking multiple outputs in a grid, similar to the X/Y Plot extension." + }, + { + "id":"https://github.com/pythongosssss/ComfyUI-WD14-Tagger", + "tags":"deepbooru, clip interrogation", + "description": "This extension generates clip text by taking an image as input and using the Deepbooru model." + }, + { + "id":"https://github.com/szhublox/ambw_comfyui", + "tags":"supermerger", + "description": "This node takes two models, merges individual blocks together at various ratios, and automatically rates each merge, keeping the ratio with the highest score. " + }, + { + "id":"https://github.com/ssitu/ComfyUI_UltimateSDUpscale", + "tags":"upscaler, Ultimate SD Upscale", + "description": "ComfyUI nodes for the Ultimate Stable Diffusion Upscale script by Coyote-A. Uses the same script used in the A1111 extension to hopefully replicate images generated using the A1111 webui." + }, + { + "id":"https://github.com/dawangraoming/ComfyUI_ksampler_gpu/raw/main/ksampler_gpu.py", + "tags":"random, noise", + "description": "A1111 provides KSampler that uses GPU-based random noise. This extension offers KSampler utilizing GPU-based random noise." + }, + { + "id":"https://github.com/space-nuko/nui-suite", + "tags":"prompt, dynamic prompt", + "description": "This extension provides nodes with the functionality of dynamic prompts." + }, + { + "id":"https://github.com/melMass/comfy_mtb", + "tags":"roop", + "description": "This extension provides bunch of nodes including roop" + }, + { + "id":"https://github.com/ssitu/ComfyUI_roop", + "tags":"roop", + "description": "This extension provides nodes for the roop A1111 webui script." + }, + { + "id":"https://github.com/asagi4/comfyui-prompt-control", + "tags":"prompt, prompt editing", + "description": "This extension provides the ability to use prompts like \n\n**a [large::0.1] [cat|dog:0.05] [::0.5] [in a park:in space:0.4]**\n\n" + }, + { + "id":"https://github.com/adieyal/comfyui-dynamicprompts", + "tags":"prompt, dynamic prompt", + "description": "This extension is a port of sd-dynamic-prompt to ComfyUI." + }, + { + "id":"https://github.com/kwaroran/abg-comfyui", + "tags":"abg, background remover", + "description": "A Anime Background Remover node for comfyui, based on this hf space, works same as AGB extention in automatic1111." + }, + { + "id":"https://github.com/Gourieff/comfyui-reactor-node", + "tags":"reactor, sd-webui-roop-nsfw", + "description": "This is a ported version of ComfyUI for the sd-webui-roop-nsfw extension." + }, + { + "id":"https://github.com/laksjdjf/attention-couple-ComfyUI", + "tags":"regional prompt, latent couple, prompt", + "description": "This custom nodes provide a functionality similar to regional prompts, offering couple features at the attention level." + }, + { + "id":"https://github.com/FizzleDorf/ComfyUI_FizzNodes", + "tags":"deforum", + "description": "This custom nodes provide functionality that assists in animation creation, similar to deforum." + }, + { + "id":"https://github.com/seanlynch/comfyui-optical-flow", + "tags":"deforum, vid2vid", + "description": "This custom nodes provide functionality that assists in animation creation, similar to deforum." + }, + { + "id":"https://github.com/ssitu/ComfyUI_fabric", + "tags":"fabric", + "description": "Similar to sd-webui-fabric, this custom nodes provide the functionality of [a/FABRIC](https://github.com/sd-fabric/fabric)." + }, + { + "id":"https://github.com/Zuellni/ComfyUI-ExLlama", + "tags":"ExLlama, prompt, language model", + "description": "Similar to text-generation-webui, this custom nodes provide the functionality of [a/exllama](https://github.com/turboderp/exllama)." + }, + { + "id":"https://github.com/spinagon/ComfyUI-seamless-tiling", + "tags":"tiling", + "description": "ComfyUI node for generating seamless textures Replicates 'Tiling' option from A1111" + }, + { + "id":"https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI", + "tags":"cd-tuner, negpip", + "description": "This extension is a port of the [a/sd-webui-cd-tuner](https://github.com/hako-mikan/sd-webui-cd-tuner)(a.k.a. CD(color/Detail) Tuner )and [a/sd-webui-negpip](https://github.com/hako-mikan/sd-webui-negpip)(a.k.a. NegPiP) extensions of A1111 to ComfyUI." + }, + { + "id":"https://github.com/mcmonkeyprojects/sd-dynamic-thresholding", + "tags":"DT, dynamic thresholding", + "description": "This custom node is a port of the Dynamic Thresholding extension from A1111 to make it available for use in ComfyUI." + }, + { + "id":"https://github.com/hhhzzyang/Comfyui_Lama", + "tags":"lama, inpainting anything", + "description": "This extension provides custom nodes developed based on [a/LaMa](https://github.com/advimman/lama) and [a/Inpainting anything](https://github.com/geekyutao/Inpaint-Anything)." + }, + { + "id":"https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor", + "tags":"lama", + "description": "This extension provides custom nodes for [a/LaMa](https://github.com/advimman/lama) functionality." + }, + { + "id":"https://github.com/Haoming02/comfyui-diffusion-cg", + "tags":"diffusion-cg", + "description": "This extension provides custom nodes for [a/SD Webui Diffusion Color Grading](https://github.com/Haoming02/sd-webui-diffusion-cg) functionality." + }, + { + "id":"https://github.com/asagi4/ComfyUI-CADS", + "tags":"diffusion-cg", + "description": "This extension provides custom nodes for [a/sd-webui-cads](https://github.com/v0xie/sd-webui-cads) functionality." + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/channels.list b/custom_nodes/ComfyUI-Manager/channels.list new file mode 100644 index 0000000000000000000000000000000000000000..b00b66ca25da160672c88466c5ea3220a612c1ca --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/channels.list @@ -0,0 +1,5 @@ +default::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main +recent::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/node_db/new +legacy::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/node_db/legacy +forked::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/node_db/forked +dev::https://raw.githubusercontent.com/ltdrdata/ComfyUI-Manager/main/node_db/dev \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/custom-node-list.json b/custom_nodes/ComfyUI-Manager/custom-node-list.json new file mode 100644 index 0000000000000000000000000000000000000000..af1946aa6717eeba2255eac7cdb87abab0ca266c --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/custom-node-list.json @@ -0,0 +1,3465 @@ +{ + "custom_nodes": [ + { + "author": "Dr.Lt.Data", + "title": "ComfyUI-Manager", + "reference": "https://github.com/ltdrdata/ComfyUI-Manager", + "files": [ + "https://github.com/ltdrdata/ComfyUI-Manager" + ], + "install_type": "git-clone", + "description": "ComfyUI-Manager itself is also a custom node." + }, + { + "author": "Dr.Lt.Data", + "title": "ComfyUI Impact Pack", + "reference": "https://github.com/ltdrdata/ComfyUI-Impact-Pack", + "files": [ + "https://github.com/ltdrdata/ComfyUI-Impact-Pack" + ], + "pip": ["ultralytics"], + "install_type": "git-clone", + "description": "This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler.\n[w/NOTE:'Segs & Mask' has been renamed to 'ImpactSegsAndMask.' Please replace the node with the new name.]" + }, + { + "author": "Dr.Lt.Data", + "title": "ComfyUI Inspire Pack", + "reference": "https://github.com/ltdrdata/ComfyUI-Inspire-Pack", + "nodename_pattern": "Inspire$", + "files": [ + "https://github.com/ltdrdata/ComfyUI-Inspire-Pack" + ], + "install_type": "git-clone", + "description": "This extension provides various nodes to support Lora Block Weight and the Impact Pack. Provides many easily applicable regional features and applications for Variation Seed." + }, + { + "author": "comfyanonymous", + "title": "ComfyUI_experiments", + "reference": "https://github.com/comfyanonymous/ComfyUI_experiments", + "files": [ + "https://github.com/comfyanonymous/ComfyUI_experiments" + ], + "install_type": "git-clone", + "description": "Nodes: ModelSamplerTonemapNoiseTest, TonemapNoiseWithRescaleCFG, ReferenceOnlySimple, RescaleClassifierFreeGuidanceTest, ModelMergeBlockNumber, ModelMergeSDXL, ModelMergeSDXLTransformers, ModelMergeSDXLDetailedTransformers.[w/NOTE: This is a consolidation of the previously separate custom nodes. Please delete the sampler_tonemap.py, sampler_rescalecfg.py, advanced_model_merging.py, sdxl_model_merging.py, and reference_only.py files installed in custom_nodes before.]" + }, + { + "author": "Stability-AI", + "title": "stability-ComfyUI-nodes", + "reference": "https://github.com/Stability-AI/stability-ComfyUI-nodes", + "files": [ + "https://github.com/Stability-AI/stability-ComfyUI-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: ColorBlend, ControlLoraSave, GetImageSize. NOTE: Control-LoRA recolor example uses these nodes." + }, + { + "author": "Fannovel16", + "title": "ComfyUI's ControlNet Auxiliary Preprocessors", + "reference": "https://github.com/Fannovel16/comfyui_controlnet_aux", + "files": [ + "https://github.com/Fannovel16/comfyui_controlnet_aux" + ], + "install_type": "git-clone", + "description": "This is a rework of comfyui_controlnet_preprocessors based on ControlNet auxiliary models by 🤗. I think the old repo isn't good enough to maintain. All old workflow will still be work with this repo but the version option won't do anything. Almost all v1 preprocessors are replaced by v1.1 except those doesn't appear in v1.1. [w/NOTE: Please refrain from using the controlnet preprocessor alongside this installation, as it may lead to conflicts and prevent proper recognition.]" + }, + { + "author": "Fannovel16", + "title": "ComfyUI Frame Interpolation", + "reference": "https://github.com/Fannovel16/ComfyUI-Frame-Interpolation", + "files": [ + "https://github.com/Fannovel16/ComfyUI-Frame-Interpolation" + ], + "install_type": "git-clone", + "description": "Nodes: KSampler Gradually Adding More Denoise (efficient)" + }, + { + "author": "Fannovel16", + "title": "ComfyUI Loopchain", + "reference": "https://github.com/Fannovel16/ComfyUI-Loopchain", + "files": [ + "https://github.com/Fannovel16/ComfyUI-Loopchain" + ], + "install_type": "git-clone", + "description": "A collection of nodes which can be useful for animation in ComfyUI. The main focus of this extension is implementing a mechanism called loopchain. A loopchain in this case is the chain of nodes only executed repeatly in the workflow. If a node chain contains a loop node from this extension, it will become a loop chain." + }, + { + "author": "Fannovel16", + "title": "ComfyUI MotionDiff", + "reference": "https://github.com/Fannovel16/ComfyUI-MotionDiff", + "files": [ + "https://github.com/Fannovel16/ComfyUI-MotionDiff" + ], + "install_type": "git-clone", + "description": "Implementation of MDM, MotionDiffuse and ReMoDiffuse into ComfyUI." + }, + { + "author": "Fannovel16", + "title": "ComfyUI-Video-Matting", + "reference": "https://github.com/Fannovel16/ComfyUI-Video-Matting", + "files": [ + "https://github.com/Fannovel16/ComfyUI-Video-Matting" + ], + "install_type": "git-clone", + "description": "A minimalistic implementation of [a/Robust Video Matting (RVM)](https://github.com/PeterL1n/RobustVideoMatting/) in ComfyUI" + }, + { + "author": "biegert", + "title": "CLIPSeg", + "reference": "https://github.com/biegert/ComfyUI-CLIPSeg", + "files": [ + "https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py" + ], + "install_type": "copy", + "description": "The CLIPSeg node generates a binary mask for a given input image and text prompt." + }, + { + "author": "BlenderNeko", + "title": "ComfyUI Cutoff", + "reference": "https://github.com/BlenderNeko/ComfyUI_Cutoff", + "files": [ + "https://github.com/BlenderNeko/ComfyUI_Cutoff" + ], + "install_type": "git-clone", + "description": "These custom nodes provides features that allow for better control over the effects of the text prompt." + }, + { + "author": "BlenderNeko", + "title": "Advanced CLIP Text Encode", + "reference": "https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb", + "files": [ + "https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb" + ], + "install_type": "git-clone", + "description": "Advanced CLIP Text Encode (if you need A1111 like prompt. you need this. But Cutoff node includes this feature, already.)" + }, + { + "author": "BlenderNeko", + "title": "ComfyUI Noise", + "reference": "https://github.com/BlenderNeko/ComfyUI_Noise", + "files": [ + "https://github.com/BlenderNeko/ComfyUI_Noise" + ], + "install_type": "git-clone", + "description": "This extension contains 6 nodes for ComfyUI that allows for more control and flexibility over the noise." + }, + { + "author": "BlenderNeko", + "title": "Tiled sampling for ComfyUI", + "reference": "https://github.com/BlenderNeko/ComfyUI_TiledKSampler", + "files": [ + "https://github.com/BlenderNeko/ComfyUI_TiledKSampler" + ], + "install_type": "git-clone", + "description": "This extension contains a tiled sampler for ComfyUI. It allows for denoising larger images by splitting it up into smaller tiles and denoising these. It tries to minimize any seams for showing up in the end result by gradually denoising all tiles one step at the time and randomizing tile positions for every step." + }, + { + "author": "BlenderNeko", + "title": "SeeCoder [WIP]", + "reference": "https://github.com/BlenderNeko/ComfyUI_SeeCoder", + "files": [ + "https://github.com/BlenderNeko/ComfyUI_SeeCoder" + ], + "install_type": "git-clone", + "description": "It provides the capability to generate CLIP from an image input, unlike unCLIP, which works in all models. (To use this extension, you need to download the required model file from **Install Models**)" + }, + { + "author": "jags111", + "title": "Efficiency Nodes for ComfyUI Version 2.0+", + "reference": "https://github.com/jags111/efficiency-nodes-comfyui", + "files": [ + "https://github.com/jags111/efficiency-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "A collection of ComfyUI custom nodes to help streamline workflows and reduce total node count.[w/NOTE: This node is originally created by LucianoCirino, but the [a/original repository](https://github.com/LucianoCirino/efficiency-nodes-comfyui) is no longer maintained and has been forked by a new maintainer. To use the forked version, you should uninstall the original version and **REINSTALL** this one.]" + }, + { + "author": "jags111", + "title": "ComfyUI_Jags_VectorMagic", + "reference": "https://github.com/jags111/ComfyUI_Jags_VectorMagic", + "files": [ + "https://github.com/jags111/ComfyUI_Jags_VectorMagic" + ], + "install_type": "git-clone", + "description": "a collection of nodes to explore Vector and image manipulation" + }, + { + "author": "jags111", + "title": "ComfyUI_Jags_Audiotools", + "reference": "https://github.com/jags111/ComfyUI_Jags_Audiotools", + "files": [ + "https://github.com/jags111/ComfyUI_Jags_Audiotools" + ], + "install_type": "git-clone", + "description": "A collection amazing audio tools for working with audio and sound files in comfyUI" + }, + { + "author": "Derfuu", + "title": "Derfuu_ComfyUI_ModdedNodes", + "reference": "https://github.com/Derfuu/Derfuu_ComfyUI_ModdedNodes", + "files": [ + "https://github.com/Derfuu/Derfuu_ComfyUI_ModdedNodes" + ], + "install_type": "git-clone", + "description": "Automate calculation depending on image sizes or something you want." + }, + { + "author": "paulo-coronado", + "title": "comfy_clip_blip_node", + "reference": "https://github.com/paulo-coronado/comfy_clip_blip_node", + "files": [ + "https://github.com/paulo-coronado/comfy_clip_blip_node" + ], + "install_type": "git-clone", + "apt_dependency": [ + "rustc", + "cargo" + ], + "description": "CLIPTextEncodeBLIP: This custom node provides a CLIP Encoder that is capable of receiving images as input." + }, + { + "author": "Davemane42", + "title": "Visual Area Conditioning / Latent composition", + "reference": "https://github.com/Davemane42/ComfyUI_Dave_CustomNode", + "files": [ + "https://github.com/Davemane42/ComfyUI_Dave_CustomNode" + ], + "install_type": "git-clone", + "description": "This tool provides custom nodes that allow visualization and configuration of area conditioning and latent composite." + }, + { + "author": "WASasquatch", + "title": "WAS Node Suite", + "reference": "https://github.com/WASasquatch/was-node-suite-comfyui", + "pip": ["numba"], + "files": [ + "https://github.com/WASasquatch/was-node-suite-comfyui" + ], + "install_type": "git-clone", + "description": "A node suite for ComfyUI with many new nodes, such as image processing, text processing, and more." + }, + { + "author": "WASasquatch", + "title": "ComfyUI Preset Merger", + "reference": "https://github.com/WASasquatch/ComfyUI_Preset_Merger", + "files": [ + "https://github.com/WASasquatch/ComfyUI_Preset_Merger" + ], + "install_type": "git-clone", + "description": "Nodes: ModelMergeByPreset. Merge checkpoint models by preset" + }, + { + "author": "WASasquatch", + "title": "PPF_Noise_ComfyUI", + "reference": "https://github.com/WASasquatch/PPF_Noise_ComfyUI", + "files": [ + "https://github.com/WASasquatch/PPF_Noise_ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes: WAS_PFN_Latent. Perlin Power Fractal Noisey Latents" + }, + { + "author": "WASasquatch", + "title": "Power Noise Suite for ComfyUI", + "reference": "https://github.com/WASasquatch/PowerNoiseSuite", + "files": [ + "https://github.com/WASasquatch/PowerNoiseSuite" + ], + "install_type": "git-clone", + "description": "Power Noise Suite contains nodes centered around latent noise input, and diffusion, as well as latent adjustments." + }, + { + "author": "WASasquatch", + "title": "FreeU_Advanced", + "reference": "https://github.com/WASasquatch/FreeU_Advanced", + "files": [ + "https://github.com/WASasquatch/FreeU_Advanced" + ], + "install_type": "git-clone", + "description": "This custom node provides advanced settings for FreeU." + }, + { + "author": "WASasquatch", + "title": "ASTERR", + "reference": "https://github.com/WASasquatch/ASTERR", + "files": [ + "https://github.com/WASasquatch/ASTERR" + ], + "install_type": "git-clone", + "description": "Abstract Syntax Trees Evaluated Restricted Run (ASTERR) is a Python Script executor for ComfyUI. [w/Warning:ASTERR runs Python Code from a Web Interface! It is highly recommended to run this in a closed-off environment, as it could have potential security risks.]" + }, + { + "author": "WASasquatch", + "title": "WAS_Extras", + "reference": "https://github.com/WASasquatch/WAS_Extras", + "files": [ + "https://github.com/WASasquatch/WAS_Extras" + ], + "install_type": "git-clone", + "description": "Nodes:Conditioning (Blend), Inpainting VAE Encode (WAS), VividSharpen. Experimental nodes, or other random extra helper nodes." + }, + { + "author": "omar92", + "title": "Quality of life Suit:V2", + "reference": "https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92", + "files": [ + "https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92" + ], + "install_type": "git-clone", + "description": "openAI suite, String suite, Latent Tools, Image Tools: These custom nodes provide expanded functionality for image and string processing, latent processing, as well as the ability to interface with models such as ChatGPT/DallE-2." + }, + { + "author": "lilly1987", + "title": "simple wildcard for ComfyUI", + "reference": "https://github.com/lilly1987/ComfyUI_node_Lilly", + "files": [ + "https://github.com/lilly1987/ComfyUI_node_Lilly" + ], + "install_type": "git-clone", + "description": "These custom nodes provides a feature to insert arbitrary inputs through wildcards in the prompt. Additionally, this tool provides features that help simplify workflows, such as VAELoaderDecoder and SimplerSample." + }, + { + "author": "sylym", + "title": "Vid2vid", + "reference": "https://github.com/sylym/comfy_vid2vid", + "files": [ + "https://github.com/sylym/comfy_vid2vid" + ], + "install_type": "git-clone", + "description": "A node suite for ComfyUI that allows you to load image sequence and generate new image sequence with different styles or content." + }, + { + "author": "EllangoK", + "title": "ComfyUI-post-processing-nodes", + "reference": "https://github.com/EllangoK/ComfyUI-post-processing-nodes", + "files": [ + "https://github.com/EllangoK/ComfyUI-post-processing-nodes" + ], + "install_type": "git-clone", + "description": "A collection of post processing nodes for ComfyUI, simply download this repo and drag." + }, + { + "author": "LEv145", + "title": "ImagesGrid", + "reference": "https://github.com/LEv145/images-grid-comfy-plugin", + "files": [ + "https://github.com/LEv145/images-grid-comfy-plugin" + ], + "install_type": "git-clone", + "description": "This tool provides a viewer node that allows for checking multiple outputs in a grid, similar to the X/Y Plot extension." + }, + { + "author": "diontimmer", + "title": "ComfyUI-Vextra-Nodes", + "reference": "https://github.com/diontimmer/ComfyUI-Vextra-Nodes", + "files": [ + "https://github.com/diontimmer/ComfyUI-Vextra-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes: Pixel Sort, Swap Color Mode, Solid Color, Glitch This, Add Text To Image, Play Sound, Prettify Prompt, Generate Noise, Flatten Colors" + }, + { + "author": "hnmr293", + "title": "ComfyUI-nodes-hnmr", + "reference": "https://github.com/hnmr293/ComfyUI-nodes-hnmr", + "files": [ + "https://github.com/hnmr293/ComfyUI-nodes-hnmr" + ], + "install_type": "git-clone", + "description": "Provide various custom nodes for Latent, Sampling, Model, Loader, Image, Text" + }, + { + "author": "BadCafeCode", + "title": "Masquerade Nodes", + "reference": "https://github.com/BadCafeCode/masquerade-nodes-comfyui", + "files": [ + "https://github.com/BadCafeCode/masquerade-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "This is a node pack for ComfyUI, primarily dealing with masks." + }, + { + "author": "guoyk93", + "title": "y.k.'s ComfyUI node suite", + "reference": "https://github.com/guoyk93/yk-node-suite-comfyui", + "files": [ + "https://github.com/guoyk93/yk-node-suite-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes: YKImagePadForOutpaint, YKMaskToImage" + }, + { + "author": "Jcd1230", + "title": "Rembg Background Removal Node for ComfyUI", + "reference": "https://github.com/Jcd1230/rembg-comfyui-node", + "files": [ + "https://github.com/Jcd1230/rembg-comfyui-node" + ], + "install_type": "git-clone", + "description": "Nodes: Image Remove Background (rembg)" + }, + { + "author": "YinBailiang", + "title": "MergeBlockWeighted_fo_ComfyUI", + "reference": "https://github.com/YinBailiang/MergeBlockWeighted_fo_ComfyUI", + "files": [ + "https://github.com/YinBailiang/MergeBlockWeighted_fo_ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes: MergeBlockWeighted" + }, + { + "author": "trojblue", + "title": "trNodes", + "reference": "https://github.com/trojblue/trNodes", + "files": [ + "https://github.com/trojblue/trNodes" + ], + "install_type": "git-clone", + "description": "Nodes: image_layering, color_correction, model_router" + }, + { + "author": "szhublox", + "title": "Auto-MBW", + "reference": "https://github.com/szhublox/ambw_comfyui", + "files": [ + "https://github.com/szhublox/ambw_comfyui" + ], + "install_type": "git-clone", + "description": "Auto-MBW for ComfyUI loosely based on sdweb-auto-MBW. Nodes: auto merge block weighted" + }, + { + "author": "city96", + "title": "ComfyUI_NetDist", + "reference": "https://github.com/city96/ComfyUI_NetDist", + "files": [ + "https://github.com/city96/ComfyUI_NetDist" + ], + "install_type": "git-clone", + "description": "Run ComfyUI workflows on multiple local GPUs/networked machines. Nodes: Remote images, Local Remote control" + }, + { + "author": "city96", + "title": "Latent-Interposer", + "reference": "https://github.com/city96/SD-Latent-Interposer", + "files": [ + "https://github.com/city96/SD-Latent-Interposer" + ], + "install_type": "git-clone", + "description": "Custom node to convert the lantents between SDXL and SD v1.5 directly without the VAE decoding/encoding step." + }, + { + "author": "city96", + "title": "SD-Advanced-Noise", + "reference": "https://github.com/city96/SD-Advanced-Noise", + "files": [ + "https://github.com/city96/SD-Advanced-Noise" + ], + "install_type": "git-clone", + "description": "Nodes: LatentGaussianNoise, MathEncode. An experimental custom node that generates latent noise directly by utilizing the linear characteristics of the latent space." + }, + { + "author": "city96", + "title": "SD-Latent-Upscaler", + "reference": "https://github.com/city96/SD-Latent-Upscaler", + "files": [ + "https://github.com/city96/SD-Latent-Upscaler" + ], + "pip": ["huggingface-hub"], + "install_type": "git-clone", + "description": "Upscaling stable diffusion latents using a small neural network." + }, + { + "author": "city96", + "title": "ComfyUI_DiT [WIP]", + "reference": "https://github.com/city96/ComfyUI_DiT", + "files": [ + "https://github.com/city96/ComfyUI_DiT" + ], + "pip": ["huggingface-hub"], + "install_type": "git-clone", + "description": "Testbed for [a/DiT(Scalable Diffusion Models with Transformers)](https://github.com/facebookresearch/DiT). [w/None of this code is stable, expect breaking changes if for some reason you want to use this.]" + }, + { + "author": "city96", + "title": "ComfyUI_ColorMod", + "reference": "https://github.com/city96/ComfyUI_ColorMod", + "files": [ + "https://github.com/city96/ComfyUI_ColorMod" + ], + "install_type": "git-clone", + "description": "This extension currently has two sets of nodes - one set for editing the contrast/color of images and another set for saving images as 16 bit PNG files." + }, + { + "author": "city96", + "title": "Extra Models for ComfyUI", + "reference": "https://github.com/city96/ComfyUI_ExtraModels", + "files": [ + "https://github.com/city96/ComfyUI_ExtraModels" + ], + "install_type": "git-clone", + "description": "This extension aims to add support for various random image diffusion models to ComfyUI." + }, + { + "author": "Kaharos94", + "title": "ComfyUI-Saveaswebp", + "reference": "https://github.com/Kaharos94/ComfyUI-Saveaswebp", + "files": [ + "https://github.com/Kaharos94/ComfyUI-Saveaswebp" + ], + "install_type": "git-clone", + "description": "Save a picture as Webp file in Comfy + Workflow loading" + }, + { + "author": "SLAPaper", + "title": "ComfyUI-Image-Selector", + "reference": "https://github.com/SLAPaper/ComfyUI-Image-Selector", + "files": [ + "https://github.com/SLAPaper/ComfyUI-Image-Selector" + ], + "install_type": "git-clone", + "description": "A custom node for ComfyUI, which can select one or some of images from a batch." + }, + { + "author": "flyingshutter", + "title": "As_ComfyUI_CustomNodes", + "reference": "https://github.com/flyingshutter/As_ComfyUI_CustomNodes", + "files": [ + "https://github.com/flyingshutter/As_ComfyUI_CustomNodes" + ], + "install_type": "git-clone", + "description": "Manipulation nodes for Image, Latent" + }, + { + "author": "Zuellni", + "title": "Zuellni/ComfyUI-Custom-Nodes", + "reference": "https://github.com/Zuellni/ComfyUI-Custom-Nodes", + "files": [ + "https://github.com/Zuellni/ComfyUI-Custom-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes: DeepFloyd, Filter, Select, Save, Decode, Encode, Repeat, Noise, Noise" + }, + { + "author": "Zuellni", + "title": "ComfyUI-ExLlama", + "reference": "https://github.com/Zuellni/ComfyUI-ExLlama", + "files": [ + "https://github.com/Zuellni/ComfyUI-ExLlama" + ], + "pip": ["sentencepiece", "https://github.com/jllllll/exllama/releases/download/0.0.17/exllama-0.0.17+cu118-cp310-cp310-win_amd64.whl"], + "install_type": "git-clone", + "description": "Nodes: ExLlama Loader, ExLlama Generator.\nUsed to load 4-bit GPTQ Llama/2 models. You can find a lot of them over at [a/https://huggingface.co/TheBloke](https://huggingface.co/TheBloke)[w/NOTE: You need to manually install a pip package that suits your system. For example. If your system is 'Python3.10 + Windows + CUDA 11.8' then you need to install 'exllama-0.0.17+cu118-cp310-cp310-win_amd64.whl'. Available package files are [a/here](https://github.com/jllllll/exllama/releases)]" + }, + { + "author": "Zuellni", + "title": "ComfyUI PickScore Nodes", + "reference": "https://github.com/Zuellni/ComfyUI-PickScore-Nodes", + "files": [ + "https://github.com/Zuellni/ComfyUI-PickScore-Nodes" + ], + "install_type": "git-clone", + "description": "Image scoring nodes for ComfyUI using PickScore with a batch of images to predict which ones fit a given prompt the best." + }, + { + "author": "AlekPet", + "title": "AlekPet/ComfyUI_Custom_Nodes_AlekPet", + "reference": "https://github.com/AlekPet/ComfyUI_Custom_Nodes_AlekPet", + "files": [ + "https://github.com/AlekPet/ComfyUI_Custom_Nodes_AlekPet" + ], + "install_type": "git-clone", + "description": "Nodes: PoseNode, TranslateCLIPTextEncodeNode" + }, + { + "author": "pythongosssss", + "title": "ComfyUI WD 1.4 Tagger", + "reference": "https://github.com/pythongosssss/ComfyUI-WD14-Tagger", + "files": [ + "https://github.com/pythongosssss/ComfyUI-WD14-Tagger" + ], + "install_type": "git-clone", + "description": "A ComfyUI extension allowing the interrogation of booru tags from images." + }, + { + "author": "pythongosssss", + "title": "pythongosssss/ComfyUI-Custom-Scripts", + "reference": "https://github.com/pythongosssss/ComfyUI-Custom-Scripts", + "files": [ + "https://github.com/pythongosssss/ComfyUI-Custom-Scripts" + ], + "install_type": "git-clone", + "description": "This extension provides: Auto Arrange Graph, Workflow SVG, Favicon Status, Image Feed, Latent Upscale By, Lock Nodes & Groups, Lora Subfolders, Preset Text, Show Text, Touch Support, Link Render Mode, Locking, Node Finder, Quick Nodes, Show Image On Menu, Show Text, Workflow Managements, Custom Widget Default Values" + }, + { + "author": "strimmlarn", + "title": "ComfyUI_Strimmlarns_aesthetic_score", + "reference": "https://github.com/strimmlarn/ComfyUI_Strimmlarns_aesthetic_score", + "js_path": "strimmlarn", + "files": [ + "https://github.com/strimmlarn/ComfyUI_Strimmlarns_aesthetic_score" + ], + "install_type": "git-clone", + "description": "Nodes: CalculateAestheticScore, LoadAesteticModel, AesthetlcScoreSorter, ScoreToNumber" + }, + { + "author": "tinyterra", + "title": "tinyterraNodes", + "reference": "https://github.com/tinyterra/ComfyUI_tinyterraNodes", + "files": [ + "https://github.com/TinyTerra/ComfyUI_tinyterraNodes" + ], + "install_type": "git-clone", + "nodename_pattern": "^ttN ", + "description": "This extension offers various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more." + }, + { + "author": "Jordach", + "title": "comfy-plasma", + "reference": "https://github.com/Jordach/comfy-plasma", + "files": [ + "https://github.com/Jordach/comfy-plasma" + ], + "install_type": "git-clone", + "description": "Nodes: Plasma Noise, Random Noise, Greyscale Noise, Pink Noise, Brown Noise, Plasma KSampler" + }, + { + "author": "bvhari", + "title": "ImageProcessing", + "reference": "https://github.com/bvhari/ComfyUI_ImageProcessing", + "files": [ + "https://github.com/bvhari/ComfyUI_ImageProcessing" + ], + "install_type": "git-clone", + "description": "ComfyUI custom nodes to apply various image processing techniques." + }, + { + "author": "bvhari", + "title": "LatentToRGB", + "reference": "https://github.com/bvhari/ComfyUI_LatentToRGB", + "files": [ + "https://github.com/bvhari/ComfyUI_LatentToRGB" + ], + "install_type": "git-clone", + "description": "ComfyUI custom node to convert latent to RGB." + }, + { + "author": "bvhari", + "title": "ComfyUI_PerpNeg [WIP]", + "reference": "https://github.com/bvhari/ComfyUI_PerpNeg", + "files": [ + "https://github.com/bvhari/ComfyUI_PerpNeg" + ], + "install_type": "git-clone", + "description": "Nodes: KSampler (Advanced + Perp-Neg). Implementation of [a/Perp-Neg](https://perp-neg.github.io/)\nIncludes Tonemap and CFG Rescale optionsComfyUI custom node to convert latent to RGB.[w/WARNING: Experimental code, might have incompatibilities and edge cases.]" + }, + { + "author": "bvhari", + "title": "ComfyUI_PerpWeight", + "reference": "https://github.com/bvhari/ComfyUI_PerpWeight", + "files": [ + "https://github.com/bvhari/ComfyUI_PerpWeight" + ], + "install_type": "git-clone", + "description": "A novel weighting scheme for token vectors from CLIP. Allows a wider range of values for the weight. Inspired by Perp-Neg." + }, + { + "author": "ssitu", + "title": "UltimateSDUpscale", + "reference": "https://github.com/ssitu/ComfyUI_UltimateSDUpscale", + "files": [ + "https://github.com/ssitu/ComfyUI_UltimateSDUpscale" + ], + "install_type": "git-clone", + "description": "ComfyUI nodes for the Ultimate Stable Diffusion Upscale script by Coyote-A." + }, + { + "author": "ssitu", + "title": "NestedNodeBuilder", + "reference": "https://github.com/ssitu/ComfyUI_NestedNodeBuilder", + "files": [ + "https://github.com/ssitu/ComfyUI_NestedNodeBuilder" + ], + "install_type": "git-clone", + "description": "This extension provides the ability to combine multiple nodes into a single node." + }, + { + "author": "ssitu", + "title": "Restart Sampling", + "reference": "https://github.com/ssitu/ComfyUI_restart_sampling", + "files": [ + "https://github.com/ssitu/ComfyUI_restart_sampling" + ], + "install_type": "git-clone", + "description": "Unofficial ComfyUI nodes for restart sampling based on the paper 'Restart Sampling for Improving Generative Processes' ([a/paper](https://arxiv.org/abs/2306.14878), [a/repo](https://github.com/Newbeeer/diffusion_restart_sampling))" + }, + { + "author": "ssitu", + "title": "ComfyUI roop", + "reference": "https://github.com/ssitu/ComfyUI_roop", + "files": [ + "https://github.com/ssitu/ComfyUI_roop" + ], + "install_type": "git-clone", + "description": "ComfyUI nodes for the roop A1111 webui script." + }, + { + "author": "ssitu", + "title": "ComfyUI fabric", + "reference": "https://github.com/ssitu/ComfyUI_fabric", + "files": [ + "https://github.com/ssitu/ComfyUI_fabric" + ], + "install_type": "git-clone", + "description": "ComfyUI nodes based on the paper [a/FABRIC: Personalizing Diffusion Models with Iterative Feedback](https://arxiv.org/abs/2307.10159) (Feedback via Attention-Based Reference Image Conditioning)" + }, + { + "author": "space-nuko", + "title": "Disco Diffusion", + "reference": "https://github.com/space-nuko/ComfyUI-Disco-Diffusion", + "files": [ + "https://github.com/space-nuko/ComfyUI-Disco-Diffusion" + ], + "install_type": "git-clone", + "description": "Modularized version of Disco Diffusion for use with ComfyUI." + }, + { + "author": "space-nuko", + "title": "OpenPose Editor", + "reference": "https://github.com/space-nuko/ComfyUI-OpenPose-Editor", + "files": [ + "https://github.com/space-nuko/ComfyUI-OpenPose-Editor" + ], + "install_type": "git-clone", + "description": "A port of the openpose-editor extension for stable-diffusion-webui. NOTE: Requires [a/this ComfyUI patch](https://github.com/comfyanonymous/ComfyUI/pull/711) to work correctly" + }, + { + "author": "space-nuko", + "title": "nui suite", + "reference": "https://github.com/space-nuko/nui-suite", + "files": [ + "https://github.com/space-nuko/nui-suite" + ], + "install_type": "git-clone", + "description": "NODES: Dynamic Prompts Text Encode, Feeling Lucky Text Encode, Output String" + }, + { + "author": "Nourepide", + "title": "Allor Plugin", + "reference": "https://github.com/Nourepide/ComfyUI-Allor", + "files": [ + "https://github.com/Nourepide/ComfyUI-Allor" + ], + "install_type": "git-clone", + "description": "Allor is a plugin for ComfyUI with an emphasis on transparency and performance.\n[w/NOTE: If you do not disable the default node override feature in the settings, the built-in nodes, namely ImageScale and ImageScaleBy nodes, will be disabled. (ref: [a/Configutation](https://github.com/Nourepide/ComfyUI-Allor#configuration))]" + }, + { + "author": "melMass", + "title": "MTB Nodes", + "reference": "https://github.com/melMass/comfy_mtb", + "files": [ + "https://github.com/melMass/comfy_mtb" + ], + "nodename_pattern": "\\(mtb\\)$", + "install_type": "git-clone", + "description": "NODES: Face Swap, Film Interpolation, Latent Lerp, Int To Number, Bounding Box, Crop, Uncrop, ImageBlur, Denoise, ImageCompare, RGV to HSV, HSV to RGB, Color Correct, Modulo, Deglaze Image, Smart Step, ..." + }, + { + "author": "xXAdonesXx", + "title": "NodeGPT", + "reference": "https://github.com/xXAdonesXx/NodeGPT", + "files": [ + "https://github.com/xXAdonesXx/NodeGPT" + ], + "install_type": "git-clone", + "description": "Implementation of AutoGen inside ComfyUI. This repository is under development, and not everything is functioning correctly yet." + }, + { + "author": "Suzie1", + "title": "ComfyUI_Comfyroll_CustomNodes", + "reference": "https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes", + "files": [ + "https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes" + ], + "install_type": "git-clone", + "description": "Custom nodes for SDXL and SD1.5 including Multi-ControlNet, LoRA, Aspect Ratio, Process Switches, and many more nodes. NOTE: Maintainer is changed to Suzie1 from RockOfFire." + }, + { + "author": "bmad4ever", + "title": "ComfyUI-Bmad-DirtyUndoRedo", + "reference": "https://github.com/bmad4ever/ComfyUI-Bmad-DirtyUndoRedo", + "files": [ + "https://github.com/bmad4ever/ComfyUI-Bmad-DirtyUndoRedo" + ], + "install_type": "git-clone", + "description": "ComfyUI extension that adds undo (and redo) functionality." + }, + { + "author": "bmad4ever", + "title": "Bmad Nodes", + "reference": "https://github.com/bmad4ever/comfyui_bmad_nodes", + "files": [ + "https://github.com/bmad4ever/comfyui_bmad_nodes" + ], + "install_type": "git-clone", + "description": "This custom node offers the following functionalities: API support for setting up API requests, computer vision primarily for masking or collages, and general utility to streamline workflow setup or implement essential missing features." + }, + { + "author": "bmad4ever", + "title": "comfyui_ab_sampler", + "reference": "https://github.com/bmad4ever/comfyui_ab_samplercustom", + "files": [ + "https://github.com/bmad4ever/comfyui_ab_samplercustom" + ], + "install_type": "git-clone", + "description": "Experimental sampler node. Sampling alternates between A and B inputs until only one remains, starting with A. B steps run over a 2x2 grid, where 3/4's of the grid are copies of the original input latent. When the optional mask is used, the region outside the defined roi is copied from the original latent at the end of every step." + }, + { + "author": "FizzleDorf", + "title": "FizzNodes", + "reference": "https://github.com/FizzleDorf/ComfyUI_FizzNodes", + "files": [ + "https://github.com/FizzleDorf/ComfyUI_FizzNodes" + ], + "install_type": "git-clone", + "description": "Scheduled prompts, scheduled float/int values and wave function nodes for animations and utility. compatable with [a/framesync](https://www.framesync.xyz/) and [a/keyframe-string-generator](https://www.chigozie.co.uk/keyframe-string-generator/) for audio synced animations in Comfyui." + }, + { + "author": "FizzleDorf", + "title": "ComfyUI-AIT", + "reference": "https://github.com/FizzleDorf/ComfyUI-AIT", + "files": [ + "https://github.com/FizzleDorf/ComfyUI-AIT" + ], + "install_type": "git-clone", + "description": "A ComfyUI implementation of Facebook Meta's [a/AITemplate](https://github.com/facebookincubator/AITemplate) repo for faster inference using cpp/cuda. This new repo is behind the old version but is a much more stable foundation to keep AIT online. Please be patient as the repo will eventually include the same features as before.\nNOTE: You can find the old AIT extension in the legacy channel." + }, + { + "author": "filipemeneses", + "title": "Pixelization", + "reference": "https://github.com/filipemeneses/comfy_pixelization", + "files": [ + "https://github.com/filipemeneses/comfy_pixelization" + ], + "install_type": "git-clone", + "description": "ComfyUI node that pixelizes images." + }, + { + "author": "shiimizu", + "title": "smZNodes", + "reference": "https://github.com/shiimizu/ComfyUI_smZNodes", + "files": [ + "https://github.com/shiimizu/ComfyUI_smZNodes" + ], + "install_type": "git-clone", + "description": "NODES: CLIP Text Encode++. Achieve identical embeddings from stable-diffusion-webui for ComfyUI." + }, + { + "author": "ZaneA", + "title": "ImageReward", + "reference": "https://github.com/ZaneA/ComfyUI-ImageReward", + "files": [ + "https://github.com/ZaneA/ComfyUI-ImageReward" + ], + "install_type": "git-clone", + "description": "NODES: ImageRewardLoader, ImageRewardScore" + }, + { + "author": "SeargeDP", + "title": "SeargeSDXL", + "reference": "https://github.com/SeargeDP/SeargeSDXL", + "files": [ + "https://github.com/SeargeDP/SeargeSDXL" + ], + "install_type": "git-clone", + "description": "Custom nodes for easier use of SDXL in ComfyUI including an img2img workflow that utilizes both the base and refiner checkpoints." + }, + { + "author": "cubiq", + "title": "Simple Math", + "reference": "https://github.com/cubiq/ComfyUI_SimpleMath", + "files": [ + "https://github.com/cubiq/ComfyUI_SimpleMath" + ], + "install_type": "git-clone", + "description": "custom node for ComfyUI to perform simple math operations" + }, + { + "author": "cubiq", + "title": "ComfyUI_IPAdapter_plus", + "reference": "https://github.com/cubiq/ComfyUI_IPAdapter_plus", + "files": [ + "https://github.com/cubiq/ComfyUI_IPAdapter_plus" + ], + "install_type": "git-clone", + "description": "ComfyUI reference implementation for IPAdapter models. The code is mostly taken from the original IPAdapter repository and laksjdjf's implementation, all credit goes to them. I just made the extension closer to ComfyUI philosophy." + }, + { + "author": "shockz0rz", + "title": "InterpolateEverything", + "reference": "https://github.com/shockz0rz/ComfyUI_InterpolateEverything", + "files": [ + "https://github.com/shockz0rz/ComfyUI_InterpolateEverything" + ], + "install_type": "git-clone", + "description": "Nodes: Interpolate Poses, Interpolate Lineart, ... Custom nodes for interpolating between, well, everything in the Stable Diffusion ComfyUI." + }, + { + "author": "yolanother", + "title": "Comfy UI Prompt Agent", + "reference": "https://github.com/yolanother/DTAIComfyPromptAgent", + "files": [ + "https://github.com/yolanother/DTAIComfyPromptAgent" + ], + "install_type": "git-clone", + "description": "Nodes: Prompt Agent, Prompt Agent (String). This script provides a prompt agent node for the Comfy UI stable diffusion client." + }, + { + "author": "yolanother", + "title": "Image to Text Node", + "reference": "https://github.com/yolanother/DTAIImageToTextNode", + "files": [ + "https://github.com/yolanother/DTAIImageToTextNode" + ], + "install_type": "git-clone", + "description": "Nodes: Image URL to Text, Image to Text." + }, + { + "author": "yolanother", + "title": "Comfy UI Online Loaders", + "reference": "https://github.com/yolanother/DTAIComfyLoaders", + "files": [ + "https://github.com/yolanother/DTAIComfyLoaders" + ], + "install_type": "git-clone", + "description": "Nodes: Submit Image (Parameters), Submit Image. A collection of loaders that use a shared common online data source rather than relying on the files to be present locally." + }, + { + "author": "yolanother", + "title": "Comfy AI DoubTech.ai Image Sumission Node", + "reference": "https://github.com/yolanother/DTAIComfyImageSubmit", + "files": [ + "https://github.com/yolanother/DTAIComfyImageSubmit" + ], + "install_type": "git-clone", + "description": "A ComfyAI submit node to upload images to DoubTech.ai" + }, + { + "author": "yolanother", + "title": "Comfy UI QR Codes", + "reference": "https://github.com/yolanother/DTAIComfyQRCodes", + "files": [ + "https://github.com/yolanother/DTAIComfyQRCodes" + ], + "install_type": "git-clone", + "description": "This extension introduces QR code nodes for the Comfy UI stable diffusion client. NOTE: ComfyUI qrcode extension required." + }, + { + "author": "yolanother", + "title": "Variables for Comfy UI", + "reference": "https://github.com/yolanother/DTAIComfyVariables", + "files": [ + "https://github.com/yolanother/DTAIComfyVariables" + ], + "install_type": "git-clone", + "description": "Nodes: String, Int, Float, Short String, CLIP Text Encode (With Variables), String Format, Short String Format. This extension introduces quality of life improvements by providing variable nodes and shared global variables." + }, + { + "author": "sipherxyz", + "title": "comfyui-art-venture", + "reference": "https://github.com/sipherxyz/comfyui-art-venture", + "files": [ + "https://github.com/sipherxyz/comfyui-art-venture" + ], + "install_type": "git-clone", + "description": "Nodes: ImagesConcat, LoadImageFromUrl, AV_UploadImage" + }, + { + "author": "SOELexicon", + "title": "LexMSDBNodes", + "reference": "https://github.com/SOELexicon/ComfyUI-LexMSDBNodes", + "files": [ + "https://github.com/SOELexicon/ComfyUI-LexMSDBNodes" + ], + "install_type": "git-clone", + "description": "Nodes: MSSqlTableNode, MSSqlSelectNode. This extension provides custom nodes to interact with MSSQL." + }, + { + "author": "pants007", + "title": "pants", + "reference": "https://github.com/pants007/comfy-pants", + "files": [ + "https://github.com/pants007/comfy-pants" + ], + "install_type": "git-clone", + "description": "Nodes: Make Square Node, Interrogate Node, TextEncodeAIO" + }, + { + "author": "evanspearman", + "title": "ComfyMath", + "reference": "https://github.com/evanspearman/ComfyMath", + "files": [ + "https://github.com/evanspearman/ComfyMath" + ], + "install_type": "git-clone", + "description": "Provides Math Nodes for ComfyUI. Boolean Logic, Integer Arithmetic, Floating Point Arithmetic and Functions, Vec2, Vec3, and Vec4 Arithmetic and Functions" + }, + { + "author": "civitai", + "title": "comfy-nodes", + "reference": "https://github.com/civitai/comfy-nodes", + "files": [ + "https://github.com/civitai/comfy-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: CivitAI_Loaders. Load Checkpoints, and LORA models directly from CivitAI API." + }, + { + "author": "andersxa", + "title": "CLIP Directional Prompt Attention", + "reference": "https://github.com/andersxa/comfyui-PromptAttention", + "files": [ + "https://github.com/andersxa/comfyui-PromptAttention" + ], + "pip": ["scikit-learn", "matplotlib"], + "install_type": "git-clone", + "description": "Nodes: CLIP Directional Prompt Attention Encode. Direction prompt attention tries to solve the problem of contextual words (or parts of the prompt) having an effect on much later or irrelevant parts of the prompt." + }, + { + "author": "ArtVentureX", + "title": "AnimateDiff", + "reference": "https://github.com/ArtVentureX/comfyui-animatediff", + "pip": ["flash_attn"], + "files": [ + "https://github.com/ArtVentureX/comfyui-animatediff" + ], + "install_type": "git-clone", + "description": "AnimateDiff integration for ComfyUI, adapts from sd-webui-animatediff.\n[w/You only need to download one of [a/mm_sd_v14.ckpt](https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v14.ckpt) | [a/mm_sd_v15.ckpt](https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15.ckpt). Put the model weights under %%ComfyUI/custom_nodes/comfyui-animatediff/models%%. DO NOT change model filename.]" + }, + { + "author": "twri", + "title": "SDXL Prompt Styler", + "reference": "https://github.com/twri/sdxl_prompt_styler", + "files": [ + "https://github.com/twri/sdxl_prompt_styler" + ], + "install_type": "git-clone", + "description": "SDXL Prompt Styler is a node that enables you to style prompts based on predefined templates stored in a JSON file." + }, + { + "author": "wolfden", + "title": "SDXL Prompt Styler (customized version by wolfden)", + "reference": "https://github.com/wolfden/ComfyUi_PromptStylers", + "files": [ + "https://github.com/wolfden/ComfyUi_PromptStylers" + ], + "install_type": "git-clone", + "description": "These custom nodes provide a variety of customized prompt stylers based on [a/twri/SDXL Prompt Styler](https://github.com/twri/sdxl_prompt_styler)." + }, + { + "author": "wolfden", + "title": "ComfyUi_String_Function_Tree", + "reference": "https://github.com/wolfden/ComfyUi_String_Function_Tree", + "files": [ + "https://github.com/wolfden/ComfyUi_String_Function_Tree" + ], + "install_type": "git-clone", + "description": "This custom node provides the capability to manipulate multiple string inputs." + }, + { + "author": "daxthin", + "title": "DZ-FaceDetailer", + "reference": "https://github.com/daxthin/DZ-FaceDetailer", + "files": [ + "https://github.com/daxthin/DZ-FaceDetailer" + ], + "install_type": "git-clone", + "description": "Face Detailer is a custom node for the 'ComfyUI' framework inspired by !After Detailer extension from auto1111, it allows you to detect faces using Mediapipe and YOLOv8n to create masks for the detected faces." + }, + { + "author": "asagi4", + "title": "ComfyUI prompt control", + "reference": "https://github.com/asagi4/comfyui-prompt-control", + "files": [ + "https://github.com/asagi4/comfyui-prompt-control" + ], + "install_type": "git-clone", + "description": "Nodes for convenient prompt editing. The aim is to make basic generations in ComfyUI completely prompt-controllable." + }, + { + "author": "asagi4", + "title": "ComfyUI-CADS", + "reference": "https://github.com/asagi4/ComfyUI-CADS", + "files": [ + "https://github.com/asagi4/ComfyUI-CADS" + ], + "install_type": "git-clone", + "description": "Attempts to implement [a/CADS](https://arxiv.org/abs/2310.17347) for ComfyUI. Credit also to the [a/A1111 implementation](https://github.com/v0xie/sd-webui-cads/tree/main) that I used as a reference." + }, + { + "author": "asagi4", + "title": "asagi4/comfyui-utility-nodes", + "reference": "https://github.com/asagi4/comfyui-utility-nodes", + "files": [ + "https://github.com/asagi4/comfyui-utility-nodes" + ], + "install_type": "git-clone", + "description": "Nodes:MUJinjaRender, MUSimpleWildcard" + }, + { + "author": "jamesWalker55", + "title": "ComfyUI - P2LDGAN Node", + "reference": "https://github.com/jamesWalker55/comfyui-p2ldgan", + "files": [ + "https://github.com/jamesWalker55/comfyui-p2ldgan" + ], + "install_type": "git-clone", + "description": "Nodes: P2LDGAN. This integrates P2LDGAN into ComfyUI. P2LDGAN extracts lineart from input images.\n[w/To use this extension, you need to download the [a/p2ldgan model](https://drive.google.com/file/d/1To4V_Btc3QhCLBWZ0PdSNgC1cbm3isHP) and save it in the %%ComfyUI/custom_nodes/comfyui-p2ldgan/checkpoints%% directory.]" + }, + { + "author": "jamesWalker55", + "title": "Various ComfyUI Nodes by Type", + "reference": "https://github.com/jamesWalker55/comfyui-various", + "files": [ + "https://github.com/jamesWalker55/comfyui-various" + ], + "nodename_pattern": "^JW", + "install_type": "git-clone", + "description": "Nodes: JWInteger, JWFloat, JWString, JWImageLoadRGB, JWImageResize, ..." + }, + { + "author": "adieyal", + "title": "DynamicPrompts Custom Nodes", + "reference": "https://github.com/adieyal/comfyui-dynamicprompts", + "files": [ + "https://github.com/adieyal/comfyui-dynamicprompts" + ], + "install_type": "git-clone", + "description": "Nodes: Random Prompts, Combinatorial Prompts, I'm Feeling Lucky, Magic Prompt, Jinja2 Templates. ComfyUI-DynamicPrompts is a custom nodes library that integrates into your existing ComfyUI Library. It provides nodes that enable the use of Dynamic Prompts in your ComfyUI." + }, + { + "author": "mihaiiancu", + "title": "mihaiiancu/Inpaint", + "reference": "https://github.com/mihaiiancu/ComfyUI_Inpaint", + "files": [ + "https://github.com/mihaiiancu/ComfyUI_Inpaint" + ], + "install_type": "git-clone", + "description": "Nodes: InpaintMediapipe. This node provides a simple interface to inpaint." + }, + { + "author": "kwaroran", + "title": "abg-comfyui", + "reference": "https://github.com/kwaroran/abg-comfyui", + "files": [ + "https://github.com/kwaroran/abg-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes: Remove Image Background (abg). A Anime Background Remover node for comfyui, based on this hf space, works same as AGB extention in automatic1111." + }, + { + "author": "bash-j", + "title": "Mikey Nodes", + "reference": "https://github.com/bash-j/mikey_nodes", + "files": [ + "https://github.com/bash-j/mikey_nodes" + ], + "install_type": "git-clone", + "description": "Nodes: Prompt With Style, Prompt With SDXL, Resize Image for SDXL, Save Image With Prompt Data, HaldCLUT, Empty Latent Ratio Select/Custom SDXL" + }, + { + "author": "failfa.st", + "title": "failfast-comfyui-extensions", + "reference": "https://github.com/failfa-st/failfast-comfyui-extensions", + "files": [ + "https://github.com/failfa-st/failfast-comfyui-extensions" + ], + "install_type": "git-clone", + "description": "node color customization, custom colors, dot reroutes, link rendering options, straight lines, group freezing, node pinning, automated arrangement of nodes, copy image" + }, + { + "author": "Pfaeff", + "title": "pfaeff-comfyui", + "reference": "https://github.com/Pfaeff/pfaeff-comfyui", + "files": [ + "https://github.com/Pfaeff/pfaeff-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes: AstropulsePixelDetector, BackgroundRemover, ImagePadForBetterOutpaint, InpaintingPipelineLoader, Inpainting, ..." + }, + { + "author": "wallish77", + "title": "wlsh_nodes", + "reference": "https://github.com/wallish77/wlsh_nodes", + "files": [ + "https://github.com/wallish77/wlsh_nodes" + ], + "install_type": "git-clone", + "description": "Nodes: Checkpoint Loader with Name, Save Prompt Info, Outpaint to Image, CLIP Positive-Negative, SDXL Quick Empty Latent, Empty Latent by Ratio, Time String, SDXL Steps, SDXL Resolutions ..." + }, + { + "author": "Kosinkadink", + "title": "ComfyUI-Advanced-ControlNet", + "reference": "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet", + "files": [ + "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet" + ], + "install_type": "git-clone", + "description": "Nodes: ControlNetLoaderAdvanced, DiffControlNetLoaderAdvanced, ScaledSoftControlNetWeights, SoftControlNetWeights, CustomControlNetWeights, SoftT2IAdapterWeights, CustomT2IAdapterWeights" + }, + { + "author": "Kosinkadink", + "title": "AnimateDiff Evolved", + "reference": "https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved", + "files": [ + "https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved" + ], + "install_type": "git-clone", + "description": "A forked repository that actively maintains [a/AnimateDiff](https://github.com/ArtVentureX/comfyui-animatediff), created by ArtVentureX.\n\nImproved AnimateDiff integration for ComfyUI, adapts from sd-webui-animatediff.\n[w/Download one or more motion models from [a/Original Models](https://huggingface.co/guoyww/animatediff/tree/main) | [a/Finetuned Models](https://huggingface.co/manshoety/AD_Stabilized_Motion/tree/main). See README for additional model links and usage. Put the model weights under %%ComfyUI/custom_nodes/ComfyUI-AnimateDiff-Evolved/models%%. You are free to rename the models, but keeping original names will ease use when sharing your workflow.]" + }, + { + "author": "Kosinkadink", + "title": "ComfyUI-VideoHelperSuite", + "reference": "https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite", + "files": [ + "https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite" + ], + "install_type": "git-clone", + "description": "Nodes: VHS_VideoCombine. Nodes related to video workflows" + }, + { + "author": "Gourieff", + "title": "ReActor Node for ComfyUI", + "reference": "https://github.com/Gourieff/comfyui-reactor-node", + "files": [ + "https://github.com/Gourieff/comfyui-reactor-node" + ], + "install_type": "git-clone", + "description": "The Fast and Simple 'roop-like' Face Swap Extension Node for ComfyUI, based on ReActor (ex Roop-GE) SD-WebUI Face Swap Extension" + }, + { + "author": "imb101", + "title": "FaceSwap", + "reference": "https://github.com/imb101/ComfyUI-FaceSwap", + "files": [ + "https://github.com/imb101/ComfyUI-FaceSwap" + ], + "install_type": "git-clone", + "description": "Nodes:FaceSwapNode. Very basic custom node to enable face swapping in ComfyUI. (roop)" + }, + { + "author": "Chaoses-Ib", + "title": "ComfyUI_Ib_CustomNodes", + "reference": "https://github.com/Chaoses-Ib/ComfyUI_Ib_CustomNodes", + "files": [ + "https://github.com/Chaoses-Ib/ComfyUI_Ib_CustomNodes" + ], + "install_type": "git-clone", + "description": "Nodes: LoadImageFromPath. Load Image From Path loads the image from the source path and does not have such problems." + }, + { + "author": "AIrjen", + "title": "One Button Prompt", + "reference": "https://github.com/AIrjen/OneButtonPrompt", + "files": [ + "https://github.com/AIrjen/OneButtonPrompt" + ], + "install_type": "git-clone", + "description": "One Button Prompt has a prompt generation node for beginners who have problems writing a good prompt, or advanced users who want to get inspired. It generates an entire prompt from scratch. It is random, but controlled. You simply load up the script and press generate, and let it surprise you." + }, + { + "author": "coreyryanhanson", + "title": "ComfyQR", + "reference": "https://github.com/coreyryanhanson/ComfyQR", + "files": [ + "https://github.com/coreyryanhanson/ComfyQR" + ], + "install_type": "git-clone", + "description": "QR generation within ComfyUI. Contains nodes suitable for workflows from generating basic QR images to techniques with advanced QR masking." + }, + { + "author": "coreyryanhanson", + "title": "ComfyQR-scanning-nodes", + "reference": "https://github.com/coreyryanhanson/ComfyQR-scanning-nodes", + "files": [ + "https://github.com/coreyryanhanson/ComfyQR-scanning-nodes" + ], + "install_type": "git-clone", + "description": "A set of ComfyUI nodes to quickly test generated QR codes for scannability. A companion project to ComfyQR." + }, + { + "author": "dimtoneff", + "title": "ComfyUI PixelArt Detector", + "reference": "https://github.com/dimtoneff/ComfyUI-PixelArt-Detector", + "files": [ + "https://github.com/dimtoneff/ComfyUI-PixelArt-Detector" + ], + "install_type": "git-clone", + "description": "This node manipulates the pixel art image in ways that it should look pixel perfect (downscales, changes palette, upscales etc.)." + }, + { + "author": "dimtoneff", + "title": "Eagle PNGInfo", + "reference": "https://github.com/hylarucoder/ComfyUI-Eagle-PNGInfo", + "files": [ + "https://github.com/hylarucoder/ComfyUI-Eagle-PNGInfo" + ], + "install_type": "git-clone", + "description": "Nodes: EagleImageNode" + }, + { + "author": "theUpsider", + "title": "Styles CSV Loader Extension for ComfyUI", + "reference": "https://github.com/theUpsider/ComfyUI-Styles_CSV_Loader", + "files": [ + "https://github.com/theUpsider/ComfyUI-Styles_CSV_Loader" + ], + "install_type": "git-clone", + "description": "This extension allows users to load styles from a CSV file, primarily for migration purposes from the automatic1111 Stable Diffusion web UI." + }, + { + "author": "M1kep", + "title": "Comfy_KepListStuff", + "reference": "https://github.com/M1kep/Comfy_KepListStuff", + "files": [ + "https://github.com/M1kep/Comfy_KepListStuff" + ], + "install_type": "git-clone", + "description": "Nodes: Range(Step), Range(Num Steps), List Length, Image Overlay, Stack Images, Empty Images, Join Image Lists, Join Float Lists. This extension provides various list manipulation nodes" + }, + { + "author": "M1kep", + "title": "ComfyLiterals", + "reference": "https://github.com/M1kep/ComfyLiterals", + "files": [ + "https://github.com/M1kep/ComfyLiterals" + ], + "install_type": "git-clone", + "description": "Nodes: Int, Float, String, Operation, Checkpoint" + }, + { + "author": "M1kep", + "title": "KepPromptLang", + "reference": "https://github.com/M1kep/KepPromptLang", + "files": [ + "https://github.com/M1kep/KepPromptLang" + ], + "install_type": "git-clone", + "description": "Nodes: Build Gif, Special CLIP Loader. It offers various manipulation capabilities for the internal operations of the prompt." + }, + { + "author": "M1kep", + "title": "Comfy_KepMatteAnything", + "reference": "https://github.com/M1kep/Comfy_KepMatteAnything", + "files": [ + "https://github.com/M1kep/Comfy_KepMatteAnything" + ], + "install_type": "git-clone", + "description": "This extension provides a custom node that allows the use of [a/Matte Anything](https://github.com/hustvl/Matte-Anything) in ComfyUI." + }, + { + "author": "M1kep", + "title": "Comfy_KepKitchenSink", + "reference": "https://github.com/M1kep/Comfy_KepKitchenSink", + "files": [ + "https://github.com/M1kep/Comfy_KepKitchenSink" + ], + "install_type": "git-clone", + "description": "Nodes: KepRotateImage" + }, + { + "author": "M1kep", + "title": "ComfyUI-OtherVAEs", + "reference": "https://github.com/M1kep/ComfyUI-OtherVAEs", + "files": [ + "https://github.com/M1kep/ComfyUI-OtherVAEs" + ], + "install_type": "git-clone", + "description": "Nodes: TAESD VAE Decode" + }, + { + "author": "M1kep", + "title": "ComfyUI-KepOpenAI", + "reference": "https://github.com/M1kep/ComfyUI-KepOpenAI", + "files": [ + "https://github.com/M1kep/ComfyUI-KepOpenAI" + ], + "install_type": "git-clone", + "description": "ComfyUI-KepOpenAI is a user-friendly node that serves as an interface to the GPT-4 with Vision (GPT-4V) API. This integration facilitates the processing of images coupled with text prompts, leveraging the capabilities of the OpenAI API to generate text completions that are contextually relevant to the provided inputs." + }, + { + "author": "uarefans", + "title": "ComfyUI-Fans", + "reference": "https://github.com/uarefans/ComfyUI-Fans", + "files": [ + "https://github.com/uarefans/ComfyUI-Fans" + ], + "install_type": "git-clone", + "description": "Nodes: Fans Styler (Max 10 Style), Fans Text Concat (Until 10 text)." + }, + { + "author": "NicholasMcCarthy", + "title": "ComfyUI_TravelSuite", + "reference": "https://github.com/NicholasMcCarthy/ComfyUI_TravelSuite", + "files": [ + "https://github.com/NicholasMcCarthy/ComfyUI_TravelSuite" + ], + "install_type": "git-clone", + "description": "ComfyUI custom nodes to apply various latent travel techniques." + }, + { + "author": "ManglerFTW", + "title": "ComfyI2I", + "reference": "https://github.com/ManglerFTW/ComfyI2I", + "files": [ + "https://github.com/ManglerFTW/ComfyI2I" + ], + "install_type": "git-clone", + "description": "A set of custom nodes to perform image 2 image functions in ComfyUI." + }, + { + "author": "theUpsider", + "title": "ComfyUI-Logic", + "reference": "https://github.com/theUpsider/ComfyUI-Logic", + "files": [ + "https://github.com/theUpsider/ComfyUI-Logic" + ], + "install_type": "git-clone", + "description": "An extension to ComfyUI that introduces logic nodes and conditional rendering capabilities." + }, + { + "author": "mpiquero7164", + "title": "SaveImgPrompt", + "reference": "https://github.com/mpiquero7164/ComfyUI-SaveImgPrompt", + "files": [ + "https://github.com/mpiquero7164/ComfyUI-SaveImgPrompt" + ], + "install_type": "git-clone", + "description": "Save a png or jpeg and option to save prompt/workflow in a text or json file for each image in Comfy + Workflow loading." + }, + { + "author": "m-sokes", + "title": "ComfyUI Sokes Nodes", + "reference": "https://github.com/m-sokes/ComfyUI-Sokes-Nodes", + "files": [ + "https://github.com/m-sokes/ComfyUI-Sokes-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes: Empty Latent Randomizer (9 Inputs)" + }, + { + "author": "Extraltodeus", + "title": "noise latent perlinpinpin", + "reference": "https://github.com/Extraltodeus/noise_latent_perlinpinpin", + "files": [ + "https://github.com/Extraltodeus/noise_latent_perlinpinpin" + ], + "install_type": "git-clone", + "description": "Nodes: NoisyLatentPerlin. This allows to create latent spaces filled with perlin-based noise that can actually be used by the samplers." + }, + { + "author": "JPS", + "title": "JPS Custom Nodes for ComfyUI", + "reference": "https://github.com/JPS-GER/ComfyUI_JPS-Nodes", + "files": [ + "https://github.com/JPS-GER/ComfyUI_JPS-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes: SDXL - Resolutions, SDXL - Basic Settings, SDXL - Additional Settings, Math - Resolution Multiply, Math - Largest Integer, Switch - TXT2IMG & IMG2IMG" + }, + { + "author": "hustille", + "title": "hus' utils for ComfyUI", + "reference": "https://github.com/hustille/ComfyUI_hus_utils", + "files": [ + "https://github.com/hustille/ComfyUI_hus_utils" + ], + "install_type": "git-clone", + "description": "ComfyUI nodes primarily for seed and filename generation" + }, + { + "author": "hustille", + "title": "ComfyUI_Fooocus_KSampler", + "reference": "https://github.com/hustille/ComfyUI_Fooocus_KSampler", + "files": [ + "https://github.com/hustille/ComfyUI_Fooocus_KSampler" + ], + "install_type": "git-clone", + "description": "Nodes: KSampler With Refiner (Fooocus). The KSampler from [a/Fooocus](https://github.com/lllyasviel/Fooocus) as a ComfyUI node [w/NOTE: This patches basic ComfyUI behaviour - don't use together with other samplers. Or perhaps do? Other samplers might profit from those changes ... ymmv.]" + }, + { + "author": "badjeff", + "title": "LoRA Tag Loader for ComfyUI", + "reference": "https://github.com/badjeff/comfyui_lora_tag_loader", + "files": [ + "https://github.com/badjeff/comfyui_lora_tag_loader" + ], + "install_type": "git-clone", + "description": "A ComfyUI custom node to read LoRA tag(s) from text and load it into checkpoint model." + }, + { + "author": "rgthree", + "title": "rgthree's ComfyUi Nodes", + "reference": "https://github.com/rgthree/rgthree-comfy", + "files": [ + "https://github.com/rgthree/rgthree-comfy" + ], + "nodename_pattern": " \\(rgthree\\)$", + "install_type": "git-clone", + "description": "Nodes: Seed, Reroute, Context, Lora Loader Stack, Context Switch, Fast Muter. These custom nodes helps organize the building of complex workflows." + }, + { + "author": "AIGODLIKE", + "title": "AIGODLIKE-COMFYUI-TRANSLATION", + "reference": "https://github.com/AIGODLIKE/AIGODLIKE-COMFYUI-TRANSLATION", + "files": [ + "https://github.com/AIGODLIKE/AIGODLIKE-COMFYUI-TRANSLATION" + ], + "install_type": "git-clone", + "description": "It provides language settings. (Contribution from users of various languages is needed due to the support for each language.)" + }, + { + "author": "syllebra", + "title": "BilboX's ComfyUI Custom Nodes", + "reference": "https://github.com/syllebra/bilbox-comfyui", + "files": [ + "https://github.com/syllebra/bilbox-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes: BilboX's PromptGeek Photo Prompt. This provides a convenient way to compose photorealistic prompts into ComfyUI." + }, + { + "author": "Girish Gopaul", + "title": "Save Image with Generation Metadata", + "reference": "https://github.com/giriss/comfy-image-saver", + "files": [ + "https://github.com/giriss/comfy-image-saver" + ], + "install_type": "git-clone", + "description": "All the tools you need to save images with their generation metadata on ComfyUI. Compatible with Civitai & Prompthero geninfo auto-detection. Works with png, jpeg and webp." + }, + { + "author": "shingo1228", + "title": "ComfyUI-send-Eagle(slim)", + "reference": "https://github.com/shingo1228/ComfyUI-send-eagle-slim", + "files": [ + "https://github.com/shingo1228/ComfyUI-send-eagle-slim" + ], + "install_type": "git-clone", + "description": "Nodes:Send Webp Image to Eagle. This is an extension node for ComfyUI that allows you to send generated images in webp format to Eagle. This extension node is a re-implementation of the Eagle linkage functions of the previous ComfyUI-send-Eagle node, focusing on the functions required for this node." + }, + { + "author": "shingo1228", + "title": "ComfyUI-SDXL-EmptyLatentImage", + "reference": "https://github.com/shingo1228/ComfyUI-SDXL-EmptyLatentImage", + "files": [ + "https://github.com/shingo1228/ComfyUI-SDXL-EmptyLatentImage" + ], + "install_type": "git-clone", + "description": "Nodes:SDXL Empty Latent Image. An extension node for ComfyUI that allows you to select a resolution from the pre-defined json files and output a Latent Image." + }, + { + "author": "laksjdjf", + "title": "IPAdapter-ComfyUI", + "reference": "https://github.com/laksjdjf/IPAdapter-ComfyUI", + "files": [ + "https://github.com/laksjdjf/IPAdapter-ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes:Load IPAdapter. This custom nodes provides loader of the IP-Adapter model.[w/NOTE: To use this extension node, you need to download the [a/ip-adapter_sd15.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.bin) file and place it in the %%**custom_nodes/IPAdapter-ComfyUI/models**%% directory. Additionally, you need to download the 'Clip vision model' from the 'Install models' menu as well.]" + }, + { + "author": "laksjdjf", + "title": "pfg-ComfyUI", + "reference": "https://github.com/laksjdjf/pfg-ComfyUI", + "files": [ + "https://github.com/laksjdjf/pfg-ComfyUI" + ], + "install_type": "git-clone", + "description": "ComfyUI version of https://github.com/laksjdjf/pfg-webui. (To use this extension, you need to download the required model file from **Install Models**)" + }, + { + "author": "laksjdjf", + "title": "attention-couple-ComfyUI", + "reference": "https://github.com/laksjdjf/attention-couple-ComfyUI", + "files": [ + "https://github.com/laksjdjf/attention-couple-ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes:Attention couple. This is a custom node that manipulates region-specific prompts. While vanilla ComfyUI employs an area specification method based on latent couples, this node divides regions using attention layers within UNet." + }, + { + "author": "laksjdjf", + "title": "cd-tuner_negpip-ComfyUI", + "reference": "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI", + "files": [ + "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes:Apply CDTuner, Apply Negapip. This extension provides the [a/CD(Color/Detail) Tuner](https://github.com/hako-mikan/sd-webui-cd-tuner) and the [a/Negative Prompt in the Prompt](https://github.com/hako-mikan/sd-webui-negpip) features." + }, + { + "author": "laksjdjf", + "title": "LoRA-Merger-ComfyUI", + "reference": "https://github.com/laksjdjf/LoRA-Merger-ComfyUI", + "files": [ + "https://github.com/laksjdjf/LoRA-Merger-ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes:Load LoRA Weight Only, Load LoRA from Weight, Merge LoRA, Save LoRA. This extension provides nodes for merging LoRA." + }, + { + "author": "laksjdjf", + "title": "LCMSampler-ComfyUI", + "reference": "https://github.com/laksjdjf/LCMSampler-ComfyUI", + "files": [ + "https://github.com/laksjdjf/LCMSampler-ComfyUI" + ], + "install_type": "git-clone", + "description": "This extension node is intended for the use of LCM conversion for SSD-1B-anime. It does not guarantee operation with the original LCM (as it cannot load weights in the current version). To take advantage of fast generation with LCM, a node for using TAESD as a decoder is also provided. This is inspired by ComfyUI-OtherVAEs." + }, + { + "author": "alsritter", + "title": "asymmetric-tiling-comfyui", + "reference": "https://github.com/alsritter/asymmetric-tiling-comfyui", + "files": [ + "https://github.com/alsritter/asymmetric-tiling-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes:Asymmetric_Tiling_KSampler. " + }, + { + "author": "meap158", + "title": "GPU temperature protection", + "reference": "https://github.com/meap158/ComfyUI-GPU-temperature-protection", + "files": [ + "https://github.com/meap158/ComfyUI-GPU-temperature-protection" + ], + "install_type": "git-clone", + "description": "Pause image generation when GPU temperature exceeds threshold." + }, + { + "author": "meap158", + "title": "ComfyUI-Prompt-Expansion", + "reference": "https://github.com/meap158/ComfyUI-Prompt-Expansion", + "files": [ + "https://github.com/meap158/ComfyUI-Prompt-Expansion" + ], + "install_type": "git-clone", + "description": "Dynamic prompt expansion, powered by GPT-2 locally on your device." + }, + { + "author": "TeaCrab", + "title": "ComfyUI-TeaNodes", + "reference": "https://github.com/TeaCrab/ComfyUI-TeaNodes", + "files": [ + "https://github.com/TeaCrab/ComfyUI-TeaNodes" + ], + "install_type": "git-clone", + "description": "Nodes:TC_EqualizeCLAHE, TC_SizeApproximation, TC_ImageResize, TC_ImageScale, TC_ColorFill." + }, + { + "author": "nagolinc", + "title": "ComfyUI_FastVAEDecorder_SDXL", + "reference": "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL", + "files": [ + "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL" + ], + "install_type": "git-clone", + "description": "Based off of: [a/Birch-san/diffusers-play/approx_vae](https://github.com/Birch-san/diffusers-play/tree/main/approx_vae). This ComfyUI node allows you to quickly preview SDXL 1.0 latents." + }, + { + "author": "bradsec", + "title": "ResolutionSelector for ComfyUI", + "reference": "https://github.com/bradsec/ComfyUI_ResolutionSelector", + "files": [ + "https://github.com/bradsec/ComfyUI_ResolutionSelector" + ], + "install_type": "git-clone", + "description": "Nodes:ResolutionSelector" + }, + { + "author": "kohya-ss", + "title": "ControlNet-LLLite-ComfyUI", + "reference": "https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI", + "files": [ + "https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes: LLLiteLoader" + }, + { + "author": "jjkramhoeft", + "title": "ComfyUI-Jjk-Nodes", + "reference": "https://github.com/jjkramhoeft/ComfyUI-Jjk-Nodes", + "files": [ + "https://github.com/jjkramhoeft/ComfyUI-Jjk-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes: SDXLRecommendedImageSize, JjkText, JjkShowText, JjkConcat. A set of custom nodes for ComfyUI - focused on text and parameter utility" + }, + { + "author": "dagthomas", + "title": "SDXL Auto Prompter", + "reference": "https://github.com/dagthomas/comfyui_dagthomas", + "files": [ + "https://github.com/dagthomas/comfyui_dagthomas" + ], + "install_type": "git-clone", + "description": "Easy prompting for generation of endless random art pieces and photographs!" + }, + { + "author": "marhensa", + "title": "Recommended Resolution Calculator", + "reference": "https://github.com/marhensa/sdxl-recommended-res-calc", + "files": [ + "https://github.com/marhensa/sdxl-recommended-res-calc" + ], + "install_type": "git-clone", + "description": "Input your desired output final resolution, it will automaticaly set the initial recommended SDXL ratio/size and its Upscale Factor to reach that output final resolution, also there's an option for 2x/4x reverse Upscale Factor. These all to avoid using bad/arbitary initial ratio/resolution." + }, + { + "author": "Nuked", + "title": "ComfyUI-N-Nodes", + "reference": "https://github.com/Nuked88/ComfyUI-N-Nodes", + "files": [ + "https://github.com/Nuked88/ComfyUI-N-Nodes" + ], + "install_type": "git-clone", + "description": "A suite of custom nodes for ConfyUI that includes GPT text-prompt generation, LoadVideo,SaveVideo,LoadFramesFromFolder and FrameInterpolator" + }, + { + "author": "Extraltodeus", + "title": "LoadLoraWithTags", + "reference": "https://github.com/Extraltodeus/LoadLoraWithTags", + "files": [ + "https://github.com/Extraltodeus/LoadLoraWithTags" + ], + "install_type": "git-clone", + "description": "Nodes:LoadLoraWithTags. Save/Load trigger words for loras from a json and auto fetch them on civitai if they are missing." + }, + { + "author": "richinsley", + "title": "Comfy-LFO", + "reference": "https://github.com/richinsley/Comfy-LFO", + "files": [ + "https://github.com/richinsley/Comfy-LFO" + ], + "install_type": "git-clone", + "description": "Nodes:LFO_Triangle, LFO_Sine, SawtoothNode, SquareNode, PulseNode. ComfyUI custom nodes to create Low Frequency Oscillators." + }, + { + "author": "Beinsezii", + "title": "bsz-cui-extras", + "reference": "https://github.com/Beinsezii/bsz-cui-extras", + "files": [ + "https://github.com/Beinsezii/bsz-cui-extras" + ], + "install_type": "git-clone", + "description": "This contains all-in-one 'principled' nodes for T2I, I2I, refining, and scaling. Additionally it has many tools for directly manipulating the color of latents, high res fix math, and scripted image post-processing." + }, + { + "author": "youyegit", + "title": "tdxh_node_comfyui", + "reference": "https://github.com/youyegit/tdxh_node_comfyui", + "files": [ + "https://github.com/youyegit/tdxh_node_comfyui" + ], + "install_type": "git-clone", + "description": "Nodes:TdxhImageToSize, TdxhImageToSizeAdvanced, TdxhLoraLoader, TdxhIntInput, TdxhFloatInput, TdxhStringInput. Some nodes for stable diffusion comfyui. Sometimes it helps conveniently to use less nodes for doing the same things." + }, + { + "author": "Sxela", + "title": "ComfyWarp", + "reference": "https://github.com/Sxela/ComfyWarp", + "files": [ + "https://github.com/Sxela/ComfyWarp" + ], + "install_type": "git-clone", + "description": "Nodes:LoadFrameSequence, LoadFrame" + }, + { + "author": "skfoo", + "title": "ComfyUI-Coziness", + "reference": "https://github.com/skfoo/ComfyUI-Coziness", + "files": [ + "https://github.com/skfoo/ComfyUI-Coziness" + ], + "install_type": "git-clone", + "description": "Nodes:MultiLora Loader, Lora Text Extractor. Provides a node for assisting in loading loras through text." + }, + { + "author": "YOUR-WORST-TACO", + "title": "ComfyUI-TacoNodes", + "reference": "https://github.com/YOUR-WORST-TACO/ComfyUI-TacoNodes", + "files": [ + "https://github.com/YOUR-WORST-TACO/ComfyUI-TacoNodes" + ], + "install_type": "git-clone", + "description": "Nodes:TacoLatent, TacoAnimatedLoader, TacoImg2ImgAnimatedLoader, TacoGifMaker." + }, + { + "author": "Lerc", + "title": "Canvas Tab", + "reference": "https://github.com/Lerc/canvas_tab", + "files": [ + "https://github.com/Lerc/canvas_tab" + ], + "install_type": "git-clone", + "description": "This extension provides a full page image editor with mask support. There are two nodes, one to receive images from the editor and one to send images to the editor." + }, + { + "author": "Ttl", + "title": "ComfyUI Neural network latent upscale custom node", + "reference": "https://github.com/Ttl/ComfyUi_NNLatentUpscale", + "files": [ + "https://github.com/Ttl/ComfyUi_NNLatentUpscale" + ], + "install_type": "git-clone", + "description": "A custom ComfyUI node designed for rapid latent upscaling using a compact neural network, eliminating the need for VAE-based decoding and encoding." + }, + { + "author": "spro", + "title": "Latent Mirror node for ComfyUI", + "reference": "https://github.com/spro/comfyui-mirror", + "files": [ + "https://github.com/spro/comfyui-mirror" + ], + "install_type": "git-clone", + "description": "Nodes: Latent Mirror. Node to mirror a latent along the Y (vertical / left to right) or X (horizontal / top to bottom) axis." + }, + { + "author": "Tropfchen", + "title": "Embedding Picker", + "reference": "https://github.com/Tropfchen/ComfyUI-Embedding_Picker", + "files": [ + "https://github.com/Tropfchen/ComfyUI-Embedding_Picker" + ], + "install_type": "git-clone", + "description": "Tired of forgetting and misspelling often weird names of embeddings you use? Or perhaps you use only one, cause you forgot you have tens of them installed?" + }, + { + "author": "Acly", + "title": "ComfyUI Nodes for External Tooling", + "reference": "https://github.com/Acly/comfyui-tooling-nodes", + "files": [ + "https://github.com/Acly/comfyui-tooling-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: Load Image (Base64), Load Mask (Base64), Send Image (WebSocket), Crop Image, Apply Mask to Image. Provides nodes geared towards using ComfyUI as a backend for external tools." + }, + { + "author": "picturesonpictures", + "title": "comfy_PoP", + "reference": "https://github.com/picturesonpictures/comfy_PoP", + "files": ["https://github.com/picturesonpictures/comfy_PoP"], + "install_type": "git-clone", + "description": "A collection of custom nodes for ComfyUI. Includes a quick canny edge detection node with unconventional settings, simple LoRA stack nodes for workflow efficiency, and a customizable aspect ratio node." + }, + { + "author": "Dream Project", + "title": "Dream Project Animation Nodes", + "reference": "https://github.com/alt-key-project/comfyui-dream-project", + "files": [ + "https://github.com/alt-key-project/comfyui-dream-project" + ], + "install_type": "git-clone", + "description": "This extension offers various nodes that are useful for Deforum-like animations in ComfyUI." + }, + { + "author": "Dream Project", + "title": "Dream Video Batches", + "reference": "https://github.com/alt-key-project/comfyui-dream-video-batches", + "files": [ + "https://github.com/alt-key-project/comfyui-dream-video-batches" + ], + "install_type": "git-clone", + "description": "Provide utilities for batch based video generation workflows (s.a. AnimateDiff and Stable Video Diffusion)." + }, + { + "author": "seanlynch", + "title": "ComfyUI Optical Flow", + "reference": "https://github.com/seanlynch/comfyui-optical-flow", + "files": [ + "https://github.com/seanlynch/comfyui-optical-flow" + ], + "install_type": "git-clone", + "description": "This package contains three nodes to help you compute optical flow between pairs of images, usually adjacent frames in a video, visualize the flow, and apply the flow to another image of the same dimensions. Most of the code is from Deforum, so this is released under the same license (MIT)." + }, + { + "author": "ealkanat", + "title": "ComfyUI Easy Padding", + "reference": "https://github.com/ealkanat/comfyui_easy_padding", + "files": [ + "https://github.com/ealkanat/comfyui_easy_padding" + ], + "install_type": "git-clone", + "description": "ComfyUI Easy Padding is a simple custom ComfyUI node that helps you to add padding to images on ComfyUI." + }, + { + "author": "ArtBot2023", + "title": "Character Face Swap", + "reference": "https://github.com/ArtBot2023/CharacterFaceSwap", + "files": [ + "https://github.com/ArtBot2023/CharacterFaceSwap" + ], + "install_type": "git-clone", + "description": "Character face swap with LoRA and embeddings." + }, + { + "author": "mav-rik", + "title": "Facerestore CF (Code Former)", + "reference": "https://github.com/mav-rik/facerestore_cf", + "files": [ + "https://github.com/mav-rik/facerestore_cf" + ], + "install_type": "git-clone", + "description": "This is a copy of [a/facerestore custom node](https://civitai.com/models/24690/comfyui-facerestore-node) with a bit of a change to support CodeFormer Fidelity parameter. These ComfyUI nodes can be used to restore faces in images similar to the face restore option in AUTOMATIC1111 webui.\nNOTE: To use this node, you need to download the face restoration model and face detection model from the 'Install models' menu." + }, + { + "author": "braintacles", + "title": "braintacles-nodes", + "reference": "https://github.com/braintacles/braintacles-comfyui-nodes", + "files": [ + "https://github.com/braintacles/braintacles-comfyui-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: CLIPTextEncodeSDXL-Multi-IO, CLIPTextEncodeSDXL-Pipe, Empty Latent Image from Aspect-Ratio, Random Find and Replace." + }, + { + "author": "hayden-fr", + "title": "ComfyUI-Model-Manager", + "reference": "https://github.com/hayden-fr/ComfyUI-Model-Manager", + "files": [ + "https://github.com/hayden-fr/ComfyUI-Model-Manager" + ], + "install_type": "git-clone", + "description": "Manage models: browsing, download and delete." + }, + { + "author": "hayden-fr", + "title": "ComfyUI-Image-Browsing", + "reference": "https://github.com/hayden-fr/ComfyUI-Image-Browsing", + "files": [ + "https://github.com/hayden-fr/ComfyUI-Image-Browsing" + ], + "install_type": "git-clone", + "description": "Image Browsing: browsing, download and delete." + }, + { + "author": "ali1234", + "title": "comfyui-job-iterator", + "reference": "https://github.com/ali1234/comfyui-job-iterator", + "files": [ + "https://github.com/ali1234/comfyui-job-iterator" + ], + "install_type": "git-clone", + "description": "Implements iteration over sequences within a single workflow run. [w/NOTE: This node replaces the execution of ComfyUI for iterative processing functionality.]" + }, + { + "author": "jmkl", + "title": "ComfyUI Ricing", + "reference": "https://github.com/jmkl/ComfyUI-ricing", + "files": [ + "https://github.com/jmkl/ComfyUI-ricing" + ], + "install_type": "git-clone", + "description": "ComfyUI custom user.css and some script stuff. mainly for web interface." + }, + { + "author": "budihartono", + "title": "Otonx's Custom Nodes", + "reference": "https://github.com/budihartono/comfyui_otonx_nodes", + "files": [ + "https://github.com/budihartono/comfyui_otonx_nodes" + ], + "install_type": "git-clone", + "description": "Nodes: OTX Multiple Values, OTX KSampler Feeder. This extension provides custom nodes for ComfyUI created for personal projects. Made available for reference. Nodes may be updated or changed intermittently or not at all. Review & test before use." + }, + { + "author": "ramyma", + "title": "A8R8 ComfyUI Nodes", + "reference": "https://github.com/ramyma/A8R8_ComfyUI_nodes", + "files": [ + "https://github.com/ramyma/A8R8_ComfyUI_nodes" + ], + "install_type": "git-clone", + "description": "Nodes: Base64Image Input Node, Base64Image Output Node. [a/A8R8](https://github.com/ramyma/a8r8) supporting nodes to integrate with ComfyUI" + }, + { + "author": "spinagon", + "title": "Seamless tiling Node for ComfyUI", + "reference": "https://github.com/spinagon/ComfyUI-seamless-tiling", + "files": [ + "https://github.com/spinagon/ComfyUI-seamless-tiling" + ], + "install_type": "git-clone", + "description": "Node for generating almost seamless textures, based on similar setting from A1111." + }, + { + "author": "BiffMunky", + "title": "Endless ️🌊✨ Nodes", + "reference": "https://github.com/tusharbhutt/Endless-Nodes", + "files": [ + "https://github.com/tusharbhutt/Endless-Nodes" + ], + "install_type": "git-clone", + "description": "A small set of nodes I created for various numerical and text inputs. Features image saver with ability to have JSON saved to separate folder, parameter collection nodes, two aesthetic scoring models, switches for text and numbers, and conversion of string to numeric and vice versa." + }, + { + "author": "spacepxl", + "title": "ComfyUI-HQ-Image-Save", + "reference": "https://github.com/spacepxl/ComfyUI-HQ-Image-Save", + "files": [ + "https://github.com/spacepxl/ComfyUI-HQ-Image-Save" + ], + "install_type": "git-clone", + "description": "Add Image Save nodes for TIFF 16 bit and EXR 32 bit formats. Probably only useful if you're applying a LUT or other color corrections, and care about preserving as much color accuracy as possible." + }, + { + "author": "PTA", + "title": "auto nodes layout", + "reference": "https://github.com/phineas-pta/comfyui-auto-nodes-layout", + "files": [ + "https://github.com/phineas-pta/comfyui-auto-nodes-layout" + ], + "install_type": "git-clone", + "description": "A ComfyUI extension to apply better nodes layout algorithm to ComfyUI workflow (mostly for visualization purpose)" + }, + { + "author": "receyuki", + "title": "comfyui-prompt-reader-node", + "reference": "https://github.com/receyuki/comfyui-prompt-reader-node", + "files": [ + "https://github.com/receyuki/comfyui-prompt-reader-node" + ], + "install_type": "git-clone", + "description": "ComfyUI node version of the SD Prompt Reader." + }, + { + "author": "rklaffehn", + "title": "rk-comfy-nodes", + "reference": "https://github.com/rklaffehn/rk-comfy-nodes", + "files": [ + "https://github.com/rklaffehn/rk-comfy-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: RK_CivitAIMetaChecker, RK_CivitAIAddHashes." + }, + { + "author": "cubiq", + "title": "ComfyUI Essentials", + "reference": "https://github.com/cubiq/ComfyUI_essentials", + "files": [ + "https://github.com/cubiq/ComfyUI_essentials" + ], + "install_type": "git-clone", + "description": "Essential nodes that are weirdly missing from ComfyUI core. With few exceptions they are new features and not commodities. I hope this will be just a temporary repository until the nodes get included into ComfyUI." + }, + { + "author": "Clybius", + "title": "ComfyUI-Latent-Modifiers", + "reference": "https://github.com/Clybius/ComfyUI-Latent-Modifiers", + "files": [ + "https://github.com/Clybius/ComfyUI-Latent-Modifiers" + ], + "install_type": "git-clone", + "description": "Nodes: Latent Diffusion Mega Modifier. ComfyUI nodes which modify the latent during the diffusion process. (Sharpness, Tonemap, Rescale, Extra Noise)" + }, + { + "author": "mcmonkeyprojects", + "title": "Stable Diffusion Dynamic Thresholding (CFG Scale Fix)", + "reference": "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding", + "files": [ + "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding" + ], + "install_type": "git-clone", + "description": "Extension for StableSwarmUI, ComfyUI, and AUTOMATIC1111 Stable Diffusion WebUI that enables a way to use higher CFG Scales without color issues. This works by clamping latents between steps." + }, + { + "author": "Tropfchen", + "title": "YARS: Yet Another Resolution Selector", + "reference": "https://github.com/Tropfchen/ComfyUI-yaResolutionSelector", + "files": [ + "https://github.com/Tropfchen/ComfyUI-yaResolutionSelector" + ], + "install_type": "git-clone", + "description": "A slightly different Resolution Selector node, allowing to freely change base resolution and aspect ratio, with options to maintain the pixel count or use the base resolution as the highest or lowest dimension." + }, + { + "author": "chrisgoringe", + "title": "Variation seeds", + "reference": "https://github.com/chrisgoringe/cg-noise", + "files": [ + "https://github.com/chrisgoringe/cg-noise" + ], + "install_type": "git-clone", + "description": "Adds KSampler custom nodes with variation seed and variation strength." + }, + { + "author": "chrisgoringe", + "title": "Image chooser", + "reference": "https://github.com/chrisgoringe/cg-image-picker", + "files": [ + "https://github.com/chrisgoringe/cg-image-picker" + ], + "install_type": "git-clone", + "description": "A custom node that pauses the flow while you choose which image (or latent) to pass on to the rest of the workflow." + }, + { + "author": "chrisgoringe", + "title": "Use Everywhere (UE Nodes)", + "reference": "https://github.com/chrisgoringe/cg-use-everywhere", + "files": [ + "https://github.com/chrisgoringe/cg-use-everywhere" + ], + "install_type": "git-clone", + "nodename_pattern": "(^(Prompts|Anything) Everywhere|Simple String)", + "description": "A set of nodes that allow data to be 'broadcast' to some or all unconnected inputs. Greatly reduces link spaghetti." + }, + { + "author": "chrisgoringe", + "title": "Prompt Info", + "reference": "https://github.com/chrisgoringe/cg-prompt-info", + "files": [ + "https://github.com/chrisgoringe/cg-prompt-info" + ], + "install_type": "git-clone", + "description": "Prompt Info" + }, + { + "author": "TGu-97", + "title": "TGu Utilities", + "reference": "https://github.com/TGu-97/ComfyUI-TGu-utils", + "files": [ + "https://github.com/TGu-97/ComfyUI-TGu-utils" + ], + "install_type": "git-clone", + "description": "Nodes: MPN Switch, MPN Reroute, PN Switch. This is a set of custom nodes for ComfyUI. Mainly focus on control switches." + }, + { + "author": "seanlynch", + "title": "SRL's nodes", + "reference": "https://github.com/seanlynch/srl-nodes", + "files": [ + "https://github.com/seanlynch/srl-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: SRL Conditional Interrupt, SRL Format String, SRL Eval, SRL Filter Image List. This is a collection of nodes I find useful. Note that at least one module allows execution of arbitrary code. Do not use any of these nodes on a system that allow untrusted users to control workflows or inputs.[w/WARNING: The custom nodes in this extension are vulnerable to **security risks** because they allow the execution of arbitrary code through the workflow]" + }, + { + "author": "alpertunga-bile", + "title": "prompt-generator", + "reference": "https://github.com/alpertunga-bile/prompt-generator-comfyui", + "files": [ + "https://github.com/alpertunga-bile/prompt-generator-comfyui" + ], + "install_type": "git-clone", + "description": "Custom AI prompt generator node for ComfyUI." + }, + { + "author": "mlinmg", + "title": "LaMa Preprocessor [WIP]", + "reference": "https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor", + "files": [ + "https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor" + ], + "install_type": "git-clone", + "description": "A LaMa prerocessor for ComfyUI. This preprocessor finally enable users to generate coherent inpaint and outpaint prompt-free. The best results are given on landscapes, not so much in drawings/animation." + }, + { + "author": "azazeal04", + "title": "ComfyUI-Styles", + "reference": "https://github.com/azazeal04/ComfyUI-Styles", + "files": [ + "https://github.com/azazeal04/ComfyUI-Styles" + ], + "install_type": "git-clone", + "description": "Nodes:Anime_Styler, Fantasy_Styler, Gothic_Styler, Line_Art_Styler, Movie_Poster_Styler, Punk_Styler, Travel_Poster_Styler. This extension offers 8 art style nodes, each of which includes approximately 50 individual style variations." + }, + { + "author": "kijai", + "title": "KJNodes for ComfyUI", + "reference": "https://github.com/kijai/ComfyUI-KJNodes", + "files": [ + "https://github.com/kijai/ComfyUI-KJNodes" + ], + "install_type": "git-clone", + "description": "Various quality of life -nodes for ComfyUI, mostly just visual stuff to improve usability." + }, + { + "author": "hhhzzyang", + "title": "Comfyui-Lama", + "reference": "https://github.com/hhhzzyang/Comfyui_Lama", + "files": [ + "https://github.com/hhhzzyang/Comfyui_Lama" + ], + "install_type": "git-clone", + "description": "Nodes: LamaaModelLoad, LamaApply, YamlConfigLoader. a costumer node is realized to remove anything/inpainting anything from a picture by mask inpainting.[w/WARN:This extension includes the entire model, which can result in a very long initial installation time, and there may be some compatibility issues with older dependencies and ComfyUI.]" + }, + { + "author": "thedyze", + "title": "Save Image Extended for ComfyUI", + "reference": "https://github.com/thedyze/save-image-extended-comfyui", + "files": [ + "https://github.com/thedyze/save-image-extended-comfyui" + ], + "install_type": "git-clone", + "description": "Customize the information saved in file- and folder names. Use the values of sampler parameters as part of file or folder names. Save your positive & negative prompt as entries in a JSON (text) file, in each folder." + }, + { + "author": "SOELexicon", + "title": "ComfyUI-LexTools", + "reference": "https://github.com/SOELexicon/ComfyUI-LexTools", + "files": [ + "https://github.com/SOELexicon/ComfyUI-LexTools" + ], + "install_type": "git-clone", + "description": "ComfyUI-LexTools is a Python-based image processing and analysis toolkit that uses machine learning models for semantic image segmentation, image scoring, and image captioning." + }, + { + "author": "mikkel", + "title": "ComfyUI - Text Overlay Plugin", + "reference": "https://github.com/mikkel/ComfyUI-text-overlay", + "files": [ + "https://github.com/mikkel/ComfyUI-text-overlay" + ], + "install_type": "git-clone", + "description": "The ComfyUI Text Overlay Plugin provides functionalities for superimposing text on images. Users can select different font types, set text size, choose color, and adjust the text's position on the image." + }, + { + "author": "avatechai", + "title": "avatar-graph-comfyui", + "reference": "https://github.com/avatechai/avatar-graph-comfyui", + "files": [ + "https://github.com/avatechai/avatar-graph-comfyui" + ], + "install_type": "git-clone", + "description": "A custom nodes module for creating real-time interactive avatars powered by blender bpy mesh api + Avatech Shape Flow runtime." + }, + { + "author": "TRI3D-LC", + "title": "tri3d-comfyui-nodes", + "reference": "https://github.com/TRI3D-LC/tri3d-comfyui-nodes", + "files": [ + "https://github.com/TRI3D-LC/tri3d-comfyui-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: tri3d-extract-hand, tri3d-fuzzification, tri3d-position-hands, tri3d-atr-parse." + }, + { + "author": "storyicon", + "title": "segment anything", + "reference": "https://github.com/storyicon/comfyui_segment_anything", + "files": [ + "https://github.com/storyicon/comfyui_segment_anything" + ], + "install_type": "git-clone", + "description": "Based on GroundingDino and SAM, use semantic strings to segment any element in an image. The comfyui version of sd-webui-segment-anything." + }, + { + "author": "a1lazydog", + "title": "ComfyUI-AudioScheduler", + "reference": "https://github.com/a1lazydog/ComfyUI-AudioScheduler", + "files": [ + "https://github.com/a1lazydog/ComfyUI-AudioScheduler" + ], + "install_type": "git-clone", + "description": "Load mp3 files and use the audio nodes to power animations and prompt scheduling. Use with FizzNodes." + }, + { + "author": "whatbirdisthat", + "title": "cyberdolphin", + "reference": "https://github.com/whatbirdisthat/cyberdolphin", + "files": [ + "https://github.com/whatbirdisthat/cyberdolphin" + ], + "install_type": "git-clone", + "description": "Cyberdolphin Suite of ComfyUI nodes for wiring up things." + }, + { + "author": "chrish-slingshot", + "title": "CrasH Utils", + "reference": "https://github.com/chrish-slingshot/CrasHUtils", + "files": [ + "https://github.com/chrish-slingshot/CrasHUtils" + ], + "install_type": "git-clone", + "description": "A mixture of effects and quality of life nodes. Nodes: ImageGlitcher (gives an image a cool glitchy effect), ColorStylizer (highlights a single color in an image), QueryLocalLLM (queries a local LLM API though oobabooga), SDXLReslution (resolution picker for the standard SDXL resolutions, the complete list), SDXLResolutionSplit (splits the SDXL resolution into width and height). " + }, + { + "author": "spinagon", + "title": "ComfyUI-seam-carving", + "reference": "https://github.com/spinagon/ComfyUI-seam-carving", + "files": [ + "https://github.com/spinagon/ComfyUI-seam-carving" + ], + "install_type": "git-clone", + "description": "Nodes: Image Resize (seam carving). Seam carving (image resize) for ComfyUI. Based on [a/https://github.com/li-plus/seam-carving](https://github.com/li-plus/seam-carving). With seam carving algorithm, the image could be intelligently resized while keeping the important contents undistorted. The carving process could be further guided, so that an object could be removed from the image without apparent artifacts." + }, + { + "author": "YMC", + "title": "ymc-node-suite-comfyui", + "reference": "https://github.com/YMC-GitHub/ymc-node-suite-comfyui", + "files": [ + "https://github.com/YMC-GitHub/ymc-node-suite-comfyui" + ], + "install_type": "git-clone", + "description": "ymc 's nodes for comfyui. This extension is composed of nodes that provide various utility features such as text, region, and I/O." + }, + { + "author": "chibiace", + "title": "ComfyUI-Chibi-Nodes", + "reference": "https://github.com/chibiace/ComfyUI-Chibi-Nodes", + "files": [ + "https://github.com/chibiace/ComfyUI-Chibi-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:Loader, Prompts, ImageTool, Wildcards, LoadEmbedding, ConditionText, SaveImages, ..." + }, + { + "author": "DigitalIO", + "title": "ComfyUI-stable-wildcards", + "reference": "https://github.com/DigitalIO/ComfyUI-stable-wildcards", + "files": [ + "https://github.com/DigitalIO/ComfyUI-stable-wildcards" + ], + "install_type": "git-clone", + "description": "Wildcard implementation that can be reproduced with workflows." + }, + { + "author": "THtianhao", + "title": "ComfyUI-Portrait-Maker", + "reference": "https://github.com/THtianhao/ComfyUI-Portrait-Maker", + "files": [ + "https://github.com/THtianhao/ComfyUI-Portrait-Maker" + ], + "install_type": "git-clone", + "description": "Nodes:RetainFace, FaceFusion, RatioMerge2Image, MaskMerge2Image, ReplaceBoxImg, ExpandMaskBox, FaceSkin, SkinRetouching, PortraitEnhancement, ..." + }, + { + "author": "THtianhao", + "title": "ComfyUI-FaceChain", + "reference": "https://github.com/THtianhao/ComfyUI-FaceChain", + "files": [ + "https://github.com/THtianhao/ComfyUI-FaceChain" + ], + "install_type": "git-clone", + "description": "Nodes:FC_LoraMerge." + }, + { + "author": "zer0TF", + "title": "Cute Comfy", + "reference": "https://github.com/zer0TF/cute-comfy", + "files": [ + "https://github.com/zer0TF/cute-comfy" + ], + "install_type": "git-clone", + "description": "Adds a configurable folder watcher that auto-converts Comfy metadata into a Civitai-friendly format for automatic resource tagging when you upload images. Oh, and it makes your UI awesome, too. 💜" + }, + { + "author": "chflame163", + "title": "ComfyUI_MSSpeech_TTS", + "reference": "https://github.com/chflame163/ComfyUI_MSSpeech_TTS", + "files": [ + "https://github.com/chflame163/ComfyUI_MSSpeech_TTS" + ], + "install_type": "git-clone", + "description": "A text-to-speech plugin used under ComfyUI. It utilizes the Microsoft Speech TTS interface to convert text content into MP3 format audio files." + }, + { + "author": "drustan-hawk", + "title": "primitive-types", + "reference": "https://github.com/drustan-hawk/primitive-types", + "files": [ + "https://github.com/drustan-hawk/primitive-types" + ], + "install_type": "git-clone", + "description": "A text-to-speech plugin used under ComfyUI. It utilizes the Microsoft Speech TTS interface to convert text content into MP3 format audio files." + }, + { + "author": "shadowcz007", + "title": "comfyui-mixlab-nodes [WIP]", + "reference": "https://github.com/shadowcz007/comfyui-mixlab-nodes", + "files": [ + "https://github.com/shadowcz007/comfyui-mixlab-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: RandomPrompt, TransparentImage, LoadImageFromPath, Splitting a long image into sections, FaceToMask, AreaToMask, ImagesCrop, ImageCropByAlpha, FeatheredMask, SplitLongMask, EnhanceImage, CLIPSeg, Consistency Decoder Loader, Consistency Decoder Decode, ..." + }, + { + "author": "ostris", + "title": "Ostris Nodes ComfyUI", + "reference": "https://github.com/ostris/ostris_nodes_comfyui", + "files": [ + "https://github.com/ostris/ostris_nodes_comfyui" + ], + "install_type": "git-clone", + "nodename_pattern": "- Ostris$", + "description": "This is a collection of custom nodes for ComfyUI that I made for some QOL. I will be adding much more advanced ones in the future once I get more familiar with the API." + }, + { + "author": "0xbitches", + "title": "Latent Consistency Model for ComfyUI", + "reference": "https://github.com/0xbitches/ComfyUI-LCM", + "files": [ + "https://github.com/0xbitches/ComfyUI-LCM" + ], + "install_type": "git-clone", + "description": "This custom node implements a Latent Consistency Model sampler in ComfyUI. (LCM)" + }, + { + "author": "aszc-dev", + "title": "Core ML Suite for ComfyUI", + "reference": "https://github.com/aszc-dev/ComfyUI-CoreMLSuite", + "files": [ + "https://github.com/aszc-dev/ComfyUI-CoreMLSuite" + ], + "install_type": "git-clone", + "description": "This extension contains a set of custom nodes for ComfyUI that allow you to use Core ML models in your ComfyUI workflows. The models can be obtained here, or you can convert your own models using coremltools. The main motivation behind using Core ML models in ComfyUI is to allow you to utilize the ANE (Apple Neural Engine) on Apple Silicon (M1/M2) machines to improve performance." + }, + { + "author": "taabata", + "title": "Syrian Falcon Nodes", + "reference": "https://github.com/taabata/Comfy_Syrian_Falcon_Nodes", + "files": [ + "https://github.com/taabata/Comfy_Syrian_Falcon_Nodes/raw/main/SyrianFalconNodes.py" + ], + "install_type": "copy", + "description": "Nodes:Prompt editing, Word as Image" + }, + { + "author": "taabata", + "title": "LCM_Inpaint-Outpaint_Comfy", + "reference": "https://github.com/taabata/LCM_Inpaint-Outpaint_Comfy", + "files": [ + "https://github.com/taabata/LCM_Inpaint-Outpaint_Comfy" + ], + "install_type": "git-clone", + "description": "ComfyUI custom nodes for inpainting/outpainting using the new latent consistency model (LCM)" + }, + { + "author": "noxinias", + "title": "ComfyUI_NoxinNodes", + "reference": "https://github.com/noxinias/ComfyUI_NoxinNodes", + "files": [ + "https://github.com/noxinias/ComfyUI_NoxinNodes" + ], + "install_type": "git-clone", + "description": "Nodes: Noxin Complete Chime, Noxin Scaled Resolutions, Load from Noxin Prompt Library, Save to Noxin Prompt Library" + }, + { + "author": "apesplat", + "title": "ezXY scripts and nodes", + "reference": "https://github.com/GMapeSplat/ComfyUI_ezXY", + "files": [ + "https://github.com/GMapeSplat/ComfyUI_ezXY" + ], + "install_type": "git-clone", + "description": "Extensions/Patches: Enables linking float and integer inputs and ouputs. Values are automatically cast to the correct type and clamped to the correct range. Works with both builtin and custom nodes.[w/NOTE: This repo patches ComfyUI's validate_inputs and map_node_over_list functions while running. May break depending on your version of ComfyUI. Can be deactivated in config.yaml.]Nodes: A collection of nodes for facilitating the generation of XY plots. Capable of plotting changes over most primitive values." + }, + { + "author": "kinfolk0117", + "title": "SimpleTiles", + "reference": "https://github.com/kinfolk0117/ComfyUI_SimpleTiles", + "files": [ + "https://github.com/kinfolk0117/ComfyUI_SimpleTiles" + ], + "install_type": "git-clone", + "description": "Nodes:TileSplit, TileMerge." + }, + { + "author": "kinfolk0117", + "title": "ComfyUI_GradientDeepShrink", + "reference": "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink", + "files": [ + "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink" + ], + "install_type": "git-clone", + "description": "Nodes:GradientPatchModelAddDownscale (Kohya Deep Shrink)." + }, + { + "author": "kinfolk0117", + "title": "TiledIPAdapter", + "reference": "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter", + "files": [ + "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter" + ], + "install_type": "git-clone", + "description": "Proof of concent on how to use IPAdapter to control tiled upscaling. NOTE: You need to have 'ComfyUI_IPAdapter_plus' installed." + }, + { + "author": "Fictiverse", + "title": "ComfyUI Fictiverse Nodes", + "reference": "https://github.com/Fictiverse/ComfyUI_Fictiverse", + "files": [ + "https://github.com/Fictiverse/ComfyUI_Fictiverse" + ], + "install_type": "git-clone", + "description": "Nodes:Color correction." + }, + { + "author": "idrirap", + "title": "ComfyUI-Lora-Auto-Trigger-Words", + "reference": "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words", + "files": [ + "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words" + ], + "install_type": "git-clone", + "description": "This project is a fork of [a/https://github.com/Extraltodeus/LoadLoraWithTags](https://github.com/Extraltodeus/LoadLoraWithTags) The aim of these custom nodes is to get an easy access to the tags used to trigger a lora." + }, + { + "author": "aianimation55", + "title": "Comfy UI FatLabels", + "reference": "https://github.com/aianimation55/ComfyUI-FatLabels", + "files": [ + "https://github.com/aianimation55/ComfyUI-FatLabels" + ], + "install_type": "git-clone", + "description": "It's a super simple custom node for Comfy UI, to generate text, with a font size option. Useful for bigger labelling of nodes, helpful for wider screen captures or tutorials. Plus you can of course use the text within your generations." + }, + { + "author": "noEmbryo", + "title": "noEmbryo nodes", + "reference": "https://github.com/noembryo/ComfyUI-noEmbryo", + "files": [ + "https://github.com/noembryo/ComfyUI-noEmbryo" + ], + "install_type": "git-clone", + "description": "PromptTermList (1-6): are some nodes that help with the creation of Prompts inside ComfyUI. Resolution Scale outputs image dimensions using a scale factor. Regex Text Chopper outputs the chopped parts of a text using RegEx." + }, + { + "author": "mikkel", + "title": "ComfyUI - Mask Bounding Box", + "reference": "https://github.com/mikkel/comfyui-mask-boundingbox", + "files": [ + "https://github.com/mikkel/comfyui-mask-boundingbox" + ], + "install_type": "git-clone", + "description": "The ComfyUI Mask Bounding Box Plugin provides functionalities for selecting a specific size mask from an image. Can be combined with ClipSEG to replace any aspect of an SDXL image with an SD1.5 output." + }, + { + "author": "ParmanBabra", + "title": "ComfyUI-Malefish-Custom-Scripts", + "reference": "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts", + "files": [ + "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts" + ], + "install_type": "git-clone", + "description": "Nodes:Multi Lora Loader, Random (Prompt), Combine (Prompt), CSV Prompts Loader" + }, + { + "author": "IAmMatan.com", + "title": "ComfyUI Serving toolkit", + "reference": "https://github.com/matan1905/ComfyUI-Serving-Toolkit", + "files": [ + "https://github.com/matan1905/ComfyUI-Serving-Toolkit" + ], + "install_type": "git-clone", + "description": "This extension adds nodes that allow you to easily serve your workflow (for example using a discord bot) " + }, + { + "author": "PCMonsterx", + "title": "ComfyUI-CSV-Loader", + "reference": "https://github.com/PCMonsterx/ComfyUI-CSV-Loader", + "files": [ + "https://github.com/PCMonsterx/ComfyUI-CSV-Loader" + ], + "install_type": "git-clone", + "description": "CSV Loader for prompt building within ComfyUI interface. Allows access to positive/negative prompts associated with a name. Selections are being pulled from CSV files." + }, + { + "author": "Trung0246", + "title": "ComfyUI-0246", + "reference": "https://github.com/Trung0246/ComfyUI-0246", + "files": [ + "https://github.com/Trung0246/ComfyUI-0246" + ], + "install_type": "git-clone", + "description": "Nodes: Highway, Junction. Random nodes for ComfyUI I made to solve my struggle with ComfyUI. Have varying quality." + }, + { + "author": "fexli", + "title": "fexli-util-node-comfyui", + "reference": "https://github.com/fexli/fexli-util-node-comfyui", + "files": [ + "https://github.com/fexli/fexli-util-node-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes:FEImagePadForOutpaint, FEColorOut, FEColor2Image, FERandomizedColor2Image" + }, + { + "author": "AbyssYuan0", + "title": "ComfyUI_BadgerTools", + "reference": "https://github.com/AbyssYuan0/ComfyUI_BadgerTools", + "files": [ + "https://github.com/AbyssYuan0/ComfyUI_BadgerTools" + ], + "install_type": "git-clone", + "description": "Nodes:ImageOverlap-badger, FloatToInt-badger, IntToString-badger, FloatToString-badger, ImageNormalization-badger, ImageScaleToSide-badger, NovelToFizz-badger." + }, + { + "author": "palant", + "title": "Image Resize for ComfyUI", + "reference": "https://github.com/palant/image-resize-comfyui", + "files": [ + "https://github.com/palant/image-resize-comfyui" + ], + "install_type": "git-clone", + "description": "This custom node provides various tools for resizing images. The goal is resizing without distorting proportions, yet without having to perform any calculations with the size of the original image. If a mask is present, it is resized and modified along with the image." + }, + { + "author": "palant", + "title": "Integrated Nodes for ComfyUI", + "reference": "https://github.com/palant/integrated-nodes-comfyui", + "files": [ + "https://github.com/palant/integrated-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "This tool will turn entire workflows or parts of them into single integrated nodes. In a way, it is similar to the Node Templates functionality but hides the inner structure. This is useful if all you want is to reuse and quickly configure a bunch of nodes without caring how they are interconnected." + }, + { + "author": "palant", + "title": "Extended Save Image for ComfyUI", + "reference": "https://github.com/palant/extended-saveimage-comfyui", + "files": [ + "https://github.com/palant/extended-saveimage-comfyui" + ], + "install_type": "git-clone", + "description": "This custom node is largely identical to the usual Save Image but allows saving images also in JPEG and WEBP formats, the latter with both lossless and lossy compression. Metadata is embedded in the images as usual, and the resulting images can be used to load a workflow." + }, + { + "author": "whmc76", + "title": "ComfyUI-Openpose-Editor-Plus", + "reference": "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus", + "files": [ + "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus" + ], + "install_type": "git-clone", + "description": "Nodes:Openpose Editor Plus" + }, + { + "author": "martijnat", + "title": "comfyui-previewlatent", + "reference": "https://github.com/martijnat/comfyui-previewlatent", + "files": [ + "https://github.com/martijnat/comfyui-previewlatent" + ], + "install_type": "git-clone", + "description": "a ComfyUI plugin for previewing latents without vae decoding. Useful for showing intermediate results and can be used a faster 'preview image' if you don't wan't to use vae decode." + }, + { + "author": "peteromallet", + "title": "ComfyUI-Creative-Interpolation [Beta]", + "reference": "https://github.com/peteromallet/ComfyUI-Creative-Interpolation", + "files": [ + "https://github.com/peteromallet/ComfyUI-Creative-Interpolation" + ], + "install_type": "git-clone", + "description": "This a ComfyUI node for batch creative interpolation. The goal is to allow you to input a batch of images, and to provide a range of simple settings to control how the images are interpolated between." + }, + { + "author": "gemell1", + "title": "ComfyUI_GMIC", + "reference": "https://github.com/gemell1/ComfyUI_GMIC", + "files": [ + "https://github.com/gemell1/ComfyUI_GMIC" + ], + "install_type": "git-clone", + "description": "Nodes:GMIC Image Processing." + }, + { + "author": "LonicaMewinsky", + "title": "ComfyBreakAnim", + "reference": "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame", + "files": [ + "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame" + ], + "install_type": "git-clone", + "description": "Nodes:BreakFrames, GetKeyFrames, MakeGrid." + }, + { + "author": "TheBarret", + "title": "ZSuite", + "reference": "https://github.com/TheBarret/ZSuite", + "files": [ + "https://github.com/TheBarret/ZSuite" + ], + "install_type": "git-clone", + "description": "Nodes:Prompter, RF Noise, SeedMod." + }, + { + "author": "romeobuilderotti", + "title": "ComfyUI PNG Metadata", + "reference": "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata", + "files": [ + "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata" + ], + "install_type": "git-clone", + "description": "Add custom Metadata fields to your saved PNG files." + }, + { + "author": "ka-puna", + "title": "comfyui-yanc", + "reference": "https://github.com/ka-puna/comfyui-yanc", + "files": [ + "https://github.com/ka-puna/comfyui-yanc" + ], + "install_type": "git-clone", + "description": "NOTE: Concatenate Strings, Format Datetime String, Integer Caster, Multiline String, Truncate String. Yet Another Node Collection, a repository of simple nodes for ComfyUI. This repository eases the addition or removal of custom nodes to itself." + }, + { + "author": "amorano", + "title": "Jovimetrix Composition Nodes", + "reference": "https://github.com/Amorano/Jovimetrix", + "files": [ + "https://github.com/Amorano/Jovimetrix" + ], + "nodename_pattern": " \\(jov\\)$", + "install_type": "git-clone", + "description": "Compose like Substance Designer. Webcams, Media Streams (in/out), Tick animation, Color correction, Geometry manipulation, Pixel shader, Polygonal shape generator, Remap images gometry and color, Heavily inspired by WAS and MTB Node Suites" + }, + { + "author": "Umikaze-job", + "title": "select_folder_path_easy", + "reference": "https://github.com/Umikaze-job/select_folder_path_easy", + "files": [ + "https://github.com/Umikaze-job/select_folder_path_easy" + ], + "install_type": "git-clone", + "description": "This extension simply connects the nodes and specifies the output path of the generated images to a manageable path." + }, + { + "author": "Niutonian", + "title": "ComfyUi-NoodleWebcam", + "reference": "https://github.com/Niutonian/ComfyUi-NoodleWebcam", + "files": [ + "https://github.com/Niutonian/ComfyUi-NoodleWebcam" + ], + "install_type": "git-clone", + "description": "Nodes:Noodle webcam is a node that records frames and send them to your favourite node." + }, + { + "author": "Feidorian", + "title": "feidorian-ComfyNodes", + "reference": "https://github.com/Feidorian/feidorian-ComfyNodes", + "nodename_pattern": "^Feidorian_", + "files": [ + "https://github.com/Feidorian/feidorian-ComfyNodes" + ], + "install_type": "git-clone", + "description": "This extension provides various custom nodes. literals, loaders, logic, output, switches" + }, + { + "author": "wutipong", + "title": "ComfyUI-TextUtils", + "reference": "https://github.com/wutipong/ComfyUI-TextUtils", + "files": [ + "https://github.com/wutipong/ComfyUI-TextUtils" + ], + "install_type": "git-clone", + "description": "Nodes:Create N-Token String" + }, + { + "author": "natto-maki", + "title": "ComfyUI-NegiTools", + "reference": "https://github.com/natto-maki/ComfyUI-NegiTools", + "files": [ + "https://github.com/natto-maki/ComfyUI-NegiTools" + ], + "install_type": "git-clone", + "description": "Nodes:OpenAI DALLe3, OpenAI Translate to English, String Function, Seed Generator" + }, + { + "author": "LonicaMewinsky", + "title": "ComfyUI-RawSaver", + "reference": "https://github.com/LonicaMewinsky/ComfyUI-RawSaver", + "files": [ + "https://github.com/LonicaMewinsky/ComfyUI-RawSaver" + ], + "install_type": "git-clone", + "description": "Nodes:SaveTifImage. ComfyUI custom node for purpose of saving image as uint16 tif file." + }, + { + "author": "jojkaart", + "title": "ComfyUI-sampler-lcm-alternative", + "reference": "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative", + "files": [ + "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative" + ], + "install_type": "git-clone", + "description": "Nodes:LCMScheduler, SamplerLCMAlternative, SamplerLCMCycle. ComfyUI Custom Sampler nodes that add a new improved LCM sampler functions" + }, + { + "author": "GTSuya-Studio", + "title": "ComfyUI-GTSuya-Nodes", + "reference": "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes", + "files": [ + "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes" + ], + "install_type": "git-clone", + "description": "ComfyUI-GTSuya-Nodes is a ComyUI extension designed to add several wildcards supports into ComfyUI. Wildcards allow you to use __name__ syntax in your prompt to get a random line from a file named name.txt in a wildcards directory." + }, + { + "author": "oyvindg", + "title": "ComfyUI-TrollSuite", + "reference": "https://github.com/oyvindg/ComfyUI-TrollSuite", + "files": [ + "https://github.com/oyvindg/ComfyUI-TrollSuite" + ], + "install_type": "git-clone", + "description": "Nodes: BinaryImageMask, ImagePadding, LoadLastCreatedImage, RandomMask, TransparentImage." + }, + { + "author": "drago87", + "title": "ComfyUI_Dragos_Nodes", + "reference": "https://github.com/drago87/ComfyUI_Dragos_Nodes", + "files": [ + "https://github.com/drago87/ComfyUI_Dragos_Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:File Padding, Image Info, VAE Loader With Name" + }, + { + "author": "ansonkao", + "title": "comfyui-geometry", + "reference": "https://github.com/ansonkao/comfyui-geometry", + "files": [ + "https://github.com/ansonkao/comfyui-geometry" + ], + "install_type": "git-clone", + "description": "Nodes: Mask to Centroid, Mask to Eigenvector. A small collection of custom nodes for use with ComfyUI, for geometry calculations" + }, + { + "author": "bronkula", + "title": "comfyui-fitsize", + "reference": "https://github.com/bronkula/comfyui-fitsize", + "files": [ + "https://github.com/bronkula/comfyui-fitsize" + ], + "install_type": "git-clone", + "description": "Nodes:Fit Size From Int/Image/Resize, Load Image And Resize To Fit, Pick Image From Batch/List, Crop Image Into Even Pieces, Image Region To Mask... A simple set of nodes for making an image fit within a bounding box" + }, + { + "author": "kijai", + "title": "ComfyUI-SVD", + "reference": "https://github.com/kijai/ComfyUI-SVD", + "files": [ + "https://github.com/kijai/ComfyUI-SVD" + ], + "install_type": "git-clone", + "description": "Preliminary use of SVD in ComfyUI.\nNOTE: Quick Implementation, Unstable. See details on repositories." + }, + { + "author": "toyxyz", + "title": "ComfyUI_toyxyz_test_nodes", + "reference": "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes", + "files": [ + "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes" + ], + "install_type": "git-clone", + "description": "This node was created to send a webcam to ComfyUI in real time. This node is recommended for use with LCM." + }, + { + "author": "thecooltechguy", + "title": "ComfyUI Stable Video Diffusion", + "reference": "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion", + "files": [ + "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion" + ], + "install_type": "git-clone", + "description": "Easily use Stable Video Diffusion inside ComfyUI!" + }, + { + "author": "Danand", + "title": "ComfyUI-ComfyCouple", + "reference": "https://github.com/Danand/ComfyUI-ComfyCouple", + "files": [ + "https://github.com/Danand/ComfyUI-ComfyCouple" + ], + "install_type": "git-clone", + "description": " Simple custom node which helps to generate images of actual couples." + }, + { + "author": "42lux", + "title": "ComfyUI-safety-checker", + "reference": "https://github.com/42lux/ComfyUI-safety-checker", + "files": [ + "https://github.com/42lux/ComfyUI-safety-checker" + ], + "install_type": "git-clone", + "description": "A NSFW/Safety Checker Node for ComfyUI." + }, + { + "author": "sergekatzmann", + "title": "ComfyUI_Nimbus-Pack", + "reference": "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack", + "files": [ + "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack" + ], + "install_type": "git-clone", + "description": "Nodes:Image Square Adapter Node, Image Resize And Crop Node" + }, + { + "author": "komojini", + "title": "ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes", + "reference": "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes", + "files": [ + "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes" + ], + "install_type": "git-clone", + "description": "Nodes:XL DreamBooth LoRA, S3 Bucket LoRA" + }, + { + "author": "ZHO-ZHO-ZHO", + "title": "ComfyUI-Text_Image-Composite", + "reference": "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite", + "files": [ + "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite" + ], + "install_type": "git-clone", + "description": "Nodes:Text_Image_Zho, Text_Image_Multiline_Zho, RGB_Image_Zho, AlphaChanelAddByMask, ImageComposite_Zho, ..." + }, + { + "author": "kenjiqq", + "title": "qq-nodes-comfyui", + "reference": "https://github.com/kenjiqq/qq-nodes-comfyui", + "files": [ + "https://github.com/kenjiqq/qq-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes:Any List, Image Accumulator Start, Image Accumulator End, Load Lines From Text File, XY Grid Helper, Slice List, Axis To String/Int/Float/Model, ..." + }, + { + "author": "80sVectorz", + "title": "ComfyUI-Static-Primitives", + "reference": "https://github.com/80sVectorz/ComfyUI-Static-Primitives", + "files": [ + "https://github.com/80sVectorz/ComfyUI-Static-Primitives" + ], + "install_type": "git-clone", + "description": "Adds Static Primitives to ComfyUI. Mostly to work with reroute nodes" + }, + { + "author": "AbdullahAlfaraj", + "title": "Comfy-Photoshop-SD", + "reference": "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD", + "files": [ + "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD" + ], + "install_type": "git-clone", + "description": "Nodes: load Image with metadata, get config data, load image from base64 string, Load Loras From Prompt, Generate Latent Noise, Combine Two Latents Into Batch, General Purpose Controlnet Unit, ControlNet Script, Content Mask Latent, Auto-Photoshop-SD Seed, Expand and Blur the Mask" + }, + { + "author": "zhuanqianfish", + "title": "EasyCaptureNode for ComfyUI", + "reference": "https://github.com/zhuanqianfish/ComfyUI-EasyNode", + "files": [ + "https://github.com/zhuanqianfish/ComfyUI-EasyNode" + ], + "install_type": "git-clone", + "description": "Capture window content from other programs, easyway combined with LCM for real-time painting" + }, + { + "author": "discopixel-studio", + "title": "ComfyUI Discopixel Nodes", + "reference": "https://github.com/discopixel-studio/comfyui-discopixel", + "files": [ + "https://github.com/discopixel-studio/comfyui-discopixel" + ], + "install_type": "git-clone", + "description": "Nodes:TransformTemplateOntoFaceMask, ..." + }, + { + "author": "zcfrank1st", + "title": "ComfyUI Yolov8", + "reference": "https://github.com/zcfrank1st/Comfyui-Yolov8", + "files": [ + "https://github.com/zcfrank1st/Comfyui-Yolov8" + ], + "install_type": "git-clone", + "description": "Nodes: Yolov8Detection, Yolov8Segmentation. Deadly simple yolov8 comfyui plugin" + }, + { + "author": "SoftMeng", + "title": "ComfyUI_Mexx_Styler", + "reference": "https://github.com/SoftMeng/ComfyUI_Mexx_Styler", + "files": [ + "https://github.com/SoftMeng/ComfyUI_Mexx_Styler" + ], + "install_type": "git-clone", + "description": "Nodes: ComfyUI Mexx Styler, ComfyUI Mexx Styler Advanced" + }, + { + "author": "SoftMeng", + "title": "ComfyUI_Mexx_Poster", + "reference": "https://github.com/SoftMeng/ComfyUI_Mexx_Poster", + "files": [ + "https://github.com/SoftMeng/ComfyUI_Mexx_Poster" + ], + "install_type": "git-clone", + "description": "Nodes: ComfyUI_Mexx_Poster" + }, + { + "author": "wmatson", + "title": "easy-comfy-nodes", + "reference": "https://github.com/wmatson/easy-comfy-nodes", + "files": [ + "https://github.com/wmatson/easy-comfy-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: HTTP POST, Empty Dict, Assoc Str, Assoc Dict, Assoc Img, Load Img From URL (EZ), Load Img Batch From URLs (EZ), Video Combine + upload (EZ), ..." + }, + { + "author": "DrJKL", + "title": "ComfyUI-Anchors", + "reference": "https://github.com/DrJKL/ComfyUI-Anchors", + "files": [ + "https://github.com/DrJKL/ComfyUI-Anchors" + ], + "install_type": "git-clone", + "description": "A ComfyUI extension to add spatial anchors/waypoints to better navigate large workflows." + }, + { + "author": "vanillacode314", + "title": "Simple Wildcard", + "reference": "https://github.com/vanillacode314/SimpleWildcardsComfyUI", + "files": ["https://github.com/vanillacode314/SimpleWildcardsComfyUI"], + "install_type": "git-clone", + "pip": ["pipe"], + "description": "A simple wildcard node for ComfyUI. Can also be used a style prompt node." + }, + { + "author": "WebDev9000", + "title": "WebDev9000-Nodes", + "reference": "https://github.com/WebDev9000/WebDev9000-Nodes", + "files": [ + "https://github.com/WebDev9000/WebDev9000-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:Ignore Braces, Settings Switch." + }, + { + "author": "Scholar01", + "title": "SComfyUI-Keyframe", + "reference": "https://github.com/Scholar01/ComfyUI-Keyframe", + "files": [ + "https://github.com/Scholar01/ComfyUI-Keyframe" + ], + "install_type": "git-clone", + "description": "Nodes:Keyframe Part, Keyframe Interpolation Part, Keyframe Apply." + }, + { + "author": "Haoming02", + "title": "ComfyUI Diffusion Color Grading", + "reference": "https://github.com/Haoming02/comfyui-diffusion-cg", + "files": [ + "https://github.com/Haoming02/comfyui-diffusion-cg" + ], + "install_type": "git-clone", + "description": "This is the ComfyUI port of the joint research between me and TimothyAlexisVass. For more information, check out the original [a/Extension](https://github.com/Haoming02/sd-webui-diffusion-cg) for Automatic1111." + }, + { + "author": "bedovyy", + "title": "ComfyUI_NAIDGenerator", + "reference": "https://github.com/bedovyy/ComfyUI_NAIDGenerator", + "files": [ + "https://github.com/bedovyy/ComfyUI_NAIDGenerator" + ], + "install_type": "git-clone", + "description": "This extension helps generate images through NAI." + }, + { + "author": "Off-Live", + "title": "ComfyUI-off-suite", + "reference": "https://github.com/Off-Live/ComfyUI-off-suite", + "files": [ + "https://github.com/Off-Live/ComfyUI-off-suite" + ], + "install_type": "git-clone", + "description": "Nodes:Image Crop Fit, OFF SEGS to Image, Crop Center wigh SEGS, Watermarking, GW Number Formatting Node." + }, + { + "author": "ningxiaoxiao", + "title": "comfyui-NDI", + "reference": "https://github.com/ningxiaoxiao/comfyui-NDI", + "files": [ + "https://github.com/ningxiaoxiao/comfyui-NDI" + ], + "pip": ["ndi-python"], + "install_type": "git-clone", + "description": "Real-time input output node for ComfyUI by NDI. Leveraging the powerful linking capabilities of NDI, you can access NDI video stream frames and send images generated by the model to NDI video streams." + }, + { + "author": "subtleGradient", + "title": "Touchpad two-finger gesture support for macOS", + "reference": "https://github.com/subtleGradient/TinkerBot-tech-for-ComfyUI-Touchpad", + "files": [ + "https://github.com/subtleGradient/TinkerBot-tech-for-ComfyUI-Touchpad" + ], + "install_type": "git-clone", + "description": "Two-finger scrolling (vertical and horizontal) to pan the canvas. Two-finger pinch to zoom in and out. Command-scroll up and down to zoom in and out. Fixes [comfyanonymous/ComfyUI#2059](https://github.com/comfyanonymous/ComfyUI/issues/2059)." + }, + { + "author": "zcfrank1st", + "title": "comfyui_visual_anagram", + "reference": "https://github.com/zcfrank1st/comfyui_visual_anagrams", + "files": [ + "https://github.com/zcfrank1st/comfyui_visual_anagrams" + ], + "install_type": "git-clone", + "description": "Nodes:visual_anagrams_sample, visual_anagrams_animate" + }, + { + "author": "Electrofried", + "title": "OpenAINode", + "reference": "https://github.com/Electrofried/ComfyUI-OpenAINode", + "files": [ + "https://github.com/Electrofried/ComfyUI-OpenAINode" + ], + "install_type": "git-clone", + "description": "A simply node for hooking in to openAI API based servers via comfyUI" + }, + { + "author": "AustinMroz", + "title": "SpliceTools", + "reference": "https://github.com/AustinMroz/ComfyUI-SpliceTools", + "files": [ + "https://github.com/AustinMroz/ComfyUI-SpliceTools" + ], + "install_type": "git-clone", + "description": "Experimental utility nodes with a focus on manipulation of noised latents" + }, + { + "author": "11cafe", + "title": "ComfyUI Workspace Manager - Comfyspace", + "reference": "https://github.com/11cafe/comfyui-workspace-manager", + "files": [ + "https://github.com/11cafe/comfyui-workspace-manager" + ], + "install_type": "git-clone", + "description": "A ComfyUI custom node for project management to centralize the management of all your workflows in one place. Seamlessly switch between workflows, create and update them within a single workspace, like Google Docs." + }, + { + "author": "thecooltechguy", + "title": "ComfyUI-MagicAnimate", + "reference": "https://github.com/thecooltechguy/ComfyUI-MagicAnimate", + "files": [ + "https://github.com/thecooltechguy/ComfyUI-MagicAnimate" + ], + "install_type": "git-clone", + "description": "Easily use Magic Animate within ComfyUI!" + }, + { + "author": "knuknX", + "title": "ComfyUI-Image-Tools", + "reference": "https://github.com/knuknX/ComfyUI-Image-Tools", + "files": [ + "https://github.com/knuknX/ComfyUI-Image-Tools" + ], + "install_type": "git-clone", + "description": "Nodes:BatchImageResizeProcessor, SingleImagePathLoader, SingleImageUrlLoader" + }, + { + "author": "jtrue", + "title": "ComfyUI-JaRue", + "reference": "https://github.com/jtrue/ComfyUI-JaRue", + "files": [ + "https://github.com/jtrue/ComfyUI-JaRue" + ], + "install_type": "git-clone", + "description": "A collection of nodes powering a tensor oracle on a home network with automation" + }, + { + "author": "filliptm", + "title": "ComfyUI_Fill-Nodes", + "reference": "https://github.com/filliptm/ComfyUI_Fill-Nodes", + "files": [ + "https://github.com/filliptm/ComfyUI_Fill-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:FL Image Randomizer. The start of a pack that I will continue to build out to fill the gaps of nodes and functionality that I feel is missing in comfyUI" + }, + { + "author": "Ser-Hilary", + "title": "SDXL_sizing", + "reference": "https://github.com/Ser-Hilary/SDXL_sizing", + "files": [ + "https://github.com/Ser-Hilary/SDXL_sizing/raw/main/conditioning_sizing_for_SDXL.py" + ], + "install_type": "copy", + "description": "Nodes:sizing_node. Size calculation node related to image size in prompts supported by SDXL." + }, + { + "author": "ailex000", + "title": "Image Gallery", + "reference": "https://github.com/ailex000/ComfyUI-Extensions", + "js_path": "image-gallery", + "files": [ + "https://github.com/ailex000/ComfyUI-Extensions/raw/main/image-gallery/imageGallery.js" + ], + "install_type": "copy", + "description": "Custom javascript extensions for better UX for ComfyUI. Supported nodes: PreviewImage, SaveImage. Double click on image to open." + }, + { + "author": "rock-land", + "title": "graphNavigator", + "reference": "https://github.com/rock-land/graphNavigator", + "js_path": "graphNavigator", + "files": [ + "https://github.com/rock-land/graphNavigator/raw/main/graphNavigator/graphNavigator.js" + ], + "install_type": "copy", + "description": "ComfyUI Web Extension for saving views and navigating graphs." + }, + { + "author": "diffus3", + "title": "diffus3/ComfyUI-extensions", + "reference": "https://github.com/diffus3/ComfyUI-extensions", + "js_path": "diffus3", + "files": [ + "https://github.com/diffus3/ComfyUI-extensions/raw/main/multiReroute/multireroute.js", + "https://github.com/diffus3/ComfyUI-extensions/raw/main/setget/setget.js" + ], + "install_type": "copy", + "description": "Extensions: subgraph, setget, multiReroute" + }, + { + "author": "m957ymj75urz", + "title": "m957ymj75urz/ComfyUI-Custom-Nodes", + "reference": "https://github.com/m957ymj75urz/ComfyUI-Custom-Nodes", + "js_path": "m957ymj75urz", + "files": [ + "https://github.com/m957ymj75urz/ComfyUI-Custom-Nodes/raw/main/clip-text-encode-split/clip_text_encode_split.py", + "https://github.com/m957ymj75urz/ComfyUI-Custom-Nodes/raw/main/colors/colors.js" + ], + "install_type": "copy", + "description": "Nodes: RawText, RawTextCLIPEncode, RawTextCombine, RawTextReplace, Extension: m957ymj75urz.colors" + }, + { + "author": "Bikecicle", + "title": "Waveform Extensions", + "reference": "https://github.com/Bikecicle/ComfyUI-Waveform-Extensions", + "files": [ + "https://github.com/Bikecicle/ComfyUI-Waveform-Extensions/raw/main/EXT_AudioManipulation.py", + "https://github.com/Bikecicle/ComfyUI-Waveform-Extensions/raw/main/EXT_VariationUtils.py" + ], + "install_type": "copy", + "description": "Some additional audio utilites for use on top of Sample Diffusion ComfyUI Extension" + }, + { + "author": "dawangraoming", + "title": "KSampler GPU", + "reference": "https://github.com/dawangraoming/ComfyUI_ksampler_gpu", + "files": [ + "https://github.com/dawangraoming/ComfyUI_ksampler_gpu/raw/main/ksampler_gpu.py" + ], + "install_type": "copy", + "description": "KSampler is provided, based on GPU random noise" + }, + { + "author": "fitCorder", + "title": "fcSuite", + "reference": "https://github.com/fitCorder/fcSuite", + "files": [ + "https://github.com/fitCorder/fcSuite/raw/main/fcSuite.py" + ], + "install_type": "copy", + "description": "fcFloatMatic is a custom module, that when configured correctly will increment through the lines generating you loras at different strengths. The JSON file will load the config." + }, + { + "author": "lrzjason", + "title": "ComfyUIJasonNode", + "reference": "https://github.com/lrzjason/ComfyUIJasonNode", + "files": [ + "https://github.com/lrzjason/ComfyUIJasonNode/raw/main/SDXLMixSampler.py", + "https://github.com/lrzjason/ComfyUIJasonNode/raw/main/LatentByRatio.py" + ], + "install_type": "copy", + "description": "Nodes:SDXLMixSampler, LatentByRatio" + }, + { + "author": "lordgasmic", + "title": "Wildcards", + "reference": "https://github.com/lordgasmic/ComfyUI-Wildcards", + "files": [ + "https://github.com/lordgasmic/ComfyUI-Wildcards/raw/master/wildcards.py" + ], + "install_type": "copy", + "description": "Nodes:CLIPTextEncodeWithWildcards. This wildcard node is a wildcard node that operates based on the seed." + }, + { + "author": "throttlekitty", + "title": "SDXLCustomAspectRatio", + "reference": "https://github.com/throttlekitty/SDXLCustomAspectRatio", + "files": [ + "https://raw.githubusercontent.com/throttlekitty/SDXLCustomAspectRatio/main/SDXLAspectRatio.py" + ], + "install_type": "copy", + "description": "A quick and easy ComfyUI custom node for setting SDXL-friendly aspect ratios." + }, + { + "author": "s1dlx", + "title": "comfy_meh", + "reference": "https://github.com/s1dlx/comfy_meh", + "files": [ + "https://github.com/s1dlx/comfy_meh/raw/main/meh.py" + ], + "install_type": "copy", + "description": "Advanced merging methods." + }, + { + "author": "tudal", + "title": "Hakkun-ComfyUI-nodes", + "reference": "https://github.com/tudal/Hakkun-ComfyUI-nodes", + "files": [ + "https://github.com/tudal/Hakkun-ComfyUI-nodes/raw/main/hakkun_nodes.py" + ], + "install_type": "copy", + "description": "Nodes: Prompt parser. ComfyUI extra nodes. Mostly prompt parsing." + }, + { + "author": "SadaleNet", + "title": "ComfyUI A1111-like Prompt Custom Node Solution", + "reference": "https://github.com/SadaleNet/CLIPTextEncodeA1111-ComfyUI", + "files": [ + "https://github.com/SadaleNet/CLIPTextEncodeA1111-ComfyUI/raw/master/custom_nodes/clip_text_encoder_a1111.py" + ], + "install_type": "copy", + "description": "Nodes: CLIPTextEncodeA1111, RerouteTextForCLIPTextEncodeA1111." + }, + { + "author": "wsippel", + "title": "SDXLResolutionPresets", + "reference": "https://github.com/wsippel/comfyui_ws", + "files": [ + "https://github.com/wsippel/comfyui_ws/raw/main/sdxl_utility.py" + ], + "install_type": "copy", + "description": "Nodes: SDXLResolutionPresets. Easy access to the officially supported resolutions, in both horizontal and vertical formats: 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640" + }, + { + "author": "nicolai256", + "title": "comfyUI_Nodes_nicolai256", + "reference": "https://github.com/nicolai256/comfyUI_Nodes_nicolai256", + "files": [ + "https://github.com/nicolai256/comfyUI_Nodes_nicolai256/raw/main/yugioh-presets.py" + ], + "install_type": "copy", + "description": "Nodes: yugioh_Presets. by Nicolai256 inspired by throttlekitty SDXLAspectRatio" + }, + { + "author": "Onierous", + "title": "QRNG_Node_ComfyUI", + "reference": "https://github.com/Onierous/QRNG_Node_ComfyUI", + "files": [ + "https://github.com/Onierous/QRNG_Node_ComfyUI/raw/main/qrng_node.py" + ], + "install_type": "copy", + "description": "Nodes: QRNG Node CSV. A node that takes in an array of random numbers from the ANU QRNG API and stores them locally for generating quantum random number noise_seeds in ComfyUI" + }, + { + "author": "ntdviet", + "title": "ntdviet/comfyui-ext", + "reference": "https://github.com/ntdviet/comfyui-ext", + "files": [ + "https://github.com/ntdviet/comfyui-ext/raw/main/custom_nodes/gcLatentTunnel/gcLatentTunnel.py" + ], + "install_type": "copy", + "description": "Nodes:LatentGarbageCollector. This ComfyUI custom node flushes the GPU cache and empty cuda interprocess memory. It's helpfull for low memory environment such as the free Google Colab, especially when the workflow VAE decode latents of the size above 1500x1500." + }, + { + "author": "alkemann", + "title": "alkemann nodes", + "reference": "https://gist.github.com/alkemann/7361b8eb966f29c8238fd323409efb68", + "files": [ + "https://gist.github.com/alkemann/7361b8eb966f29c8238fd323409efb68/raw/f9605be0b38d38d3e3a2988f89248ff557010076/alkemann.py" + ], + "install_type": "copy", + "description": "Nodes:Int to Text, Seed With Text, Save A1 Image." + }, + { + "author": "catscandrive", + "title": "Image loader with subfolders", + "reference": "https://github.com/catscandrive/comfyui-imagesubfolders", + "files": [ + "https://github.com/catscandrive/comfyui-imagesubfolders/raw/main/loadImageWithSubfolders.py" + ], + "install_type": "copy", + "description": "Adds an Image Loader node that also shows images in subfolders of the default input directory" + }, + { + "author": "Smuzzies", + "title": "Chatbox Overlay node for ComfyUI", + "reference": "https://github.com/Smuzzies/comfyui_chatbox_overlay", + "files": [ + "https://github.com/Smuzzies/comfyui_chatbox_overlay/raw/main/chatbox_overlay.py" + ], + "install_type": "copy", + "description": "Nodes: Chatbox Overlay. Custom node for ComfyUI to add a text box over a processed image before save node." + }, + { + "author": "CaptainGrock", + "title": "ComfyUIInvisibleWatermark", + "reference": "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark", + "files": [ + "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark/raw/main/Invisible%20Watermark.py" + ], + "install_type": "copy", + "description": "Nodes:Apply Invisible Watermark, Extract Watermark. Adds up to 12 characters encoded into an image that can be extracted." + }, + { + "author": "fearnworks", + "title": "Fearnworks Custom Nodes", + "reference": "https://github.com/fearnworks/ComfyUI_FearnworksNodes", + "files": [ + "https://github.com/fearnworks/ComfyUI_FearnworksNodes/raw/main/fw_nodes.py" + ], + "install_type": "copy", + "description": "A collection of ComfyUI nodes. These nodes are tailored for specific tasks, such as counting files in directories and sorting text segments based on token counts. Currently this is only tested on SDXL 1.0 models. An additional swich is needed to hand 1.x" + }, + { + "author": "theally", + "title": "TheAlly's Custom Nodes", + "reference": "https://civitai.com/models/19625?modelVersionId=23296", + "files": [ + "https://civitai.com/api/download/models/25114", + "https://civitai.com/api/download/models/24679", + "https://civitai.com/api/download/models/24154", + "https://civitai.com/api/download/models/23884", + "https://civitai.com/api/download/models/23649", + "https://civitai.com/api/download/models/23467", + "https://civitai.com/api/download/models/23296" + ], + "install_type": "unzip", + "description": "Custom nodes for ComfyUI by TheAlly." + }, + { + "author": "xss", + "title": "Custom Nodes by xss", + "reference": "https://civitai.com/models/24869/comfyui-custom-nodes-by-xss", + "files": [ + "https://civitai.com/api/download/models/32717", + "https://civitai.com/api/download/models/47776", + "https://civitai.com/api/download/models/29772", + "https://civitai.com/api/download/models/31618", + "https://civitai.com/api/download/models/31591", + "https://civitai.com/api/download/models/29773", + "https://civitai.com/api/download/models/29774", + "https://civitai.com/api/download/models/29755", + "https://civitai.com/api/download/models/29750" + ], + "install_type": "unzip", + "description": "Various image processing nodes." + }, + { + "author": "aimingfail", + "title": "Image2Halftone Node for ComfyUI", + "reference": "https://civitai.com/models/143293/image2halftone-node-for-comfyui", + "files": [ + "https://civitai.com/api/download/models/158997" + ], + "install_type": "unzip", + "description": "This is a node to convert an image into a CMYK Halftone dot image." + } + ] +} diff --git a/custom_nodes/ComfyUI-Manager/extension-node-map.json b/custom_nodes/ComfyUI-Manager/extension-node-map.json new file mode 100644 index 0000000000000000000000000000000000000000..8ae906e8dedadbb163850187623efb229562539d --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/extension-node-map.json @@ -0,0 +1,5495 @@ +{ + "https://gist.github.com/alkemann/7361b8eb966f29c8238fd323409efb68/raw/f9605be0b38d38d3e3a2988f89248ff557010076/alkemann.py": [ + [ + "Int to Text", + "Save A1 Image", + "Seed With Text" + ], + { + "title_aux": "alkemann nodes" + } + ], + "https://github.com/0xbitches/ComfyUI-LCM": [ + [ + "LCM_Sampler", + "LCM_Sampler_Advanced", + "LCM_img2img_Sampler", + "LCM_img2img_Sampler_Advanced" + ], + { + "title_aux": "Latent Consistency Model for ComfyUI" + } + ], + "https://github.com/42lux/ComfyUI-safety-checker": [ + [ + "Safety Checker" + ], + { + "title_aux": "ComfyUI-safety-checker" + } + ], + "https://github.com/80sVectorz/ComfyUI-Static-Primitives": [ + [ + "FloatStaticPrimitive", + "IntStaticPrimitive", + "StringMlStaticPrimitive", + "StringStaticPrimitive" + ], + { + "title_aux": "ComfyUI-Static-Primitives" + } + ], + "https://github.com/AIrjen/OneButtonPrompt": [ + [ + "CreatePromptVariant", + "OneButtonPrompt", + "SavePromptToFile" + ], + { + "title_aux": "One Button Prompt" + } + ], + "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD": [ + [ + "APS_LatentBatch", + "APS_Seed", + "ContentMaskLatent", + "ControlNetScript", + "ControlnetUnit", + "GaussianLatentImage", + "GetConfig", + "LoadImageBase64", + "LoadImageWithMetaData", + "LoadLorasFromPrompt", + "MaskExpansion" + ], + { + "title_aux": "Comfy-Photoshop-SD" + } + ], + "https://github.com/AbyssYuan0/ComfyUI_BadgerTools": [ + [ + "FloatToInt-badger", + "FloatToString-badger", + "ImageNormalization-badger", + "ImageOverlap-badger", + "ImageScaleToSide-badger", + "IntToString-badger", + "StringToFizz-badger", + "TextListToString-badger" + ], + { + "title_aux": "ComfyUI_BadgerTools" + } + ], + "https://github.com/Acly/comfyui-tooling-nodes": [ + [ + "ETN_ApplyMaskToImage", + "ETN_CropImage", + "ETN_LoadImageBase64", + "ETN_LoadMaskBase64", + "ETN_SendImageWebSocket" + ], + { + "title_aux": "ComfyUI Nodes for External Tooling" + } + ], + "https://github.com/Amorano/Jovimetrix": [ + [], + { + "author": "amorano", + "description": "Procedural & Compositing. Includes a Webcam node.", + "nodename_pattern": " \\(jov\\)$", + "title": "Jovimetrix Composition Pack", + "title_aux": "Jovimetrix Composition Nodes" + } + ], + "https://github.com/ArtBot2023/CharacterFaceSwap": [ + [ + "Color Blend", + "Crop Face", + "Exclude Facial Feature", + "Generation Parameter Input", + "Generation Parameter Output", + "Image Full BBox", + "Load BiseNet", + "Load RetinaFace", + "Mask Contour", + "Segment Face", + "Uncrop Face" + ], + { + "title_aux": "Character Face Swap" + } + ], + "https://github.com/ArtVentureX/comfyui-animatediff": [ + [ + "AnimateDiffCombine", + "AnimateDiffLoraLoader", + "AnimateDiffModuleLoader", + "AnimateDiffSampler", + "AnimateDiffSlidingWindowOptions", + "ImageSizeAndBatchSize", + "LoadVideo" + ], + { + "title_aux": "AnimateDiff" + } + ], + "https://github.com/AustinMroz/ComfyUI-SpliceTools": [ + [ + "LogSigmas", + "SpliceDenoised", + "SpliceLatents", + "TemporalSplice" + ], + { + "title_aux": "SpliceTools" + } + ], + "https://github.com/BadCafeCode/masquerade-nodes-comfyui": [ + [ + "Blur", + "Change Channel Count", + "Combine Masks", + "Constant Mask", + "Convert Color Space", + "Create QR Code", + "Create Rect Mask", + "Cut By Mask", + "Get Image Size", + "Image To Mask", + "Make Image Batch", + "Mask By Text", + "Mask Morphology", + "Mask To Region", + "MasqueradeIncrementer", + "Mix Color By Mask", + "Mix Images By Mask", + "Paste By Mask", + "Prune By Mask", + "Separate Mask Components", + "Unary Image Op", + "Unary Mask Op" + ], + { + "title_aux": "Masquerade Nodes" + } + ], + "https://github.com/Beinsezii/bsz-cui-extras": [ + [ + "BSZAbsoluteHires", + "BSZAspectHires", + "BSZColoredLatentImageXL", + "BSZCombinedHires", + "BSZHueChromaXL", + "BSZInjectionKSampler", + "BSZLatentDebug", + "BSZLatentFill", + "BSZLatentGradient", + "BSZLatentHSVAImage", + "BSZLatentOffsetXL", + "BSZLatentRGBAImage", + "BSZLatentbuster", + "BSZPixelbuster", + "BSZPixelbusterHelp", + "BSZPrincipledConditioning", + "BSZPrincipledSampler", + "BSZPrincipledScale", + "BSZStrangeResample" + ], + { + "title_aux": "bsz-cui-extras" + } + ], + "https://github.com/Bikecicle/ComfyUI-Waveform-Extensions/raw/main/EXT_AudioManipulation.py": [ + [ + "BatchJoinAudio", + "CutAudio", + "DuplicateAudio", + "JoinAudio", + "ResampleAudio", + "ReverseAudio", + "StretchAudio" + ], + { + "title_aux": "Waveform Extensions" + } + ], + "https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb": [ + [ + "BNK_AddCLIPSDXLParams", + "BNK_AddCLIPSDXLRParams", + "BNK_CLIPTextEncodeAdvanced", + "BNK_CLIPTextEncodeSDXLAdvanced" + ], + { + "title_aux": "Advanced CLIP Text Encode" + } + ], + "https://github.com/BlenderNeko/ComfyUI_Cutoff": [ + [ + "BNK_CutoffBasePrompt", + "BNK_CutoffRegionsToConditioning", + "BNK_CutoffRegionsToConditioning_ADV", + "BNK_CutoffSetRegions" + ], + { + "title_aux": "ComfyUI Cutoff" + } + ], + "https://github.com/BlenderNeko/ComfyUI_Noise": [ + [ + "BNK_DuplicateBatchIndex", + "BNK_GetSigma", + "BNK_InjectNoise", + "BNK_NoisyLatentImage", + "BNK_SlerpLatent", + "BNK_Unsampler" + ], + { + "title_aux": "ComfyUI Noise" + } + ], + "https://github.com/BlenderNeko/ComfyUI_SeeCoder": [ + [ + "ConcatConditioning", + "SEECoderImageEncode" + ], + { + "title_aux": "SeeCoder [WIP]" + } + ], + "https://github.com/BlenderNeko/ComfyUI_TiledKSampler": [ + [ + "BNK_TiledKSampler", + "BNK_TiledKSamplerAdvanced" + ], + { + "title_aux": "Tiled sampling for ComfyUI" + } + ], + "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark/raw/main/Invisible%20Watermark.py": [ + [ + "Apply Invisible Watermark", + "Extract Watermark" + ], + { + "title_aux": "ComfyUIInvisibleWatermark" + } + ], + "https://github.com/Chaoses-Ib/ComfyUI_Ib_CustomNodes": [ + [ + "LoadImageFromPath" + ], + { + "title_aux": "ComfyUI_Ib_CustomNodes" + } + ], + "https://github.com/Clybius/ComfyUI-Latent-Modifiers": [ + [ + "Latent Diffusion Mega Modifier" + ], + { + "title_aux": "ComfyUI-Latent-Modifiers" + } + ], + "https://github.com/Danand/ComfyUI-ComfyCouple": [ + [ + "Attention couple", + "Comfy Couple" + ], + { + "author": "Rei D.", + "description": "If you want to draw two different characters together without blending their features, so you could try to check out this custom node.", + "nickname": "Danand", + "title": "Comfy Couple", + "title_aux": "ComfyUI-ComfyCouple" + } + ], + "https://github.com/Davemane42/ComfyUI_Dave_CustomNode": [ + [ + "ABGRemover", + "ConditioningStretch", + "ConditioningUpscale", + "MultiAreaConditioning", + "MultiLatentComposite" + ], + { + "title_aux": "Visual Area Conditioning / Latent composition" + } + ], + "https://github.com/Derfuu/Derfuu_ComfyUI_ModdedNodes": [ + [ + "ABSNode_DF", + "Absolute value", + "Ceil", + "CeilNode_DF", + "Conditioning area scale by ratio", + "ConditioningSetArea with tuples", + "ConditioningSetAreaEXT_DF", + "ConditioningSetArea_DF", + "CosNode_DF", + "Cosines", + "Divide", + "DivideNode_DF", + "EmptyLatentImage_DF", + "Float", + "Float debug print", + "Float2Tuple_DF", + "FloatDebugPrint_DF", + "FloatNode_DF", + "Floor", + "FloorNode_DF", + "Get image size", + "Get latent size", + "GetImageSize_DF", + "GetLatentSize_DF", + "Image scale by ratio", + "Image scale to side", + "ImageScale_Ratio_DF", + "ImageScale_Side_DF", + "Int debug print", + "Int to float", + "Int to tuple", + "Int2Float_DF", + "IntDebugPrint_DF", + "Integer", + "IntegerNode_DF", + "Latent Scale by ratio", + "Latent Scale to side", + "LatentComposite with tuples", + "LatentScale_Ratio_DF", + "LatentScale_Side_DF", + "MultilineStringNode_DF", + "Multiply", + "MultiplyNode_DF", + "PowNode_DF", + "Power", + "Random", + "RandomFloat_DF", + "SinNode_DF", + "Sinus", + "SqrtNode_DF", + "Square root", + "String debug print", + "StringNode_DF", + "Subtract", + "SubtractNode_DF", + "Sum", + "SumNode_DF", + "TanNode_DF", + "Tangent", + "Text", + "Text box", + "Tuple", + "Tuple debug print", + "Tuple multiply", + "Tuple swap", + "Tuple to floats", + "Tuple to ints", + "Tuple2Float_DF", + "TupleDebugPrint_DF", + "TupleNode_DF" + ], + { + "title_aux": "Derfuu_ComfyUI_ModdedNodes" + } + ], + "https://github.com/Electrofried/ComfyUI-OpenAINode": [ + [ + "OpenAINode" + ], + { + "title_aux": "OpenAINode" + } + ], + "https://github.com/EllangoK/ComfyUI-post-processing-nodes": [ + [ + "ArithmeticBlend", + "AsciiArt", + "Blend", + "Blur", + "CannyEdgeMask", + "ChromaticAberration", + "ColorCorrect", + "ColorTint", + "Dissolve", + "Dither", + "DodgeAndBurn", + "FilmGrain", + "Glow", + "HSVThresholdMask", + "KMeansQuantize", + "KuwaharaBlur", + "Parabolize", + "PencilSketch", + "PixelSort", + "Pixelize", + "Quantize", + "Sharpen", + "SineWave", + "Solarize", + "Vignette" + ], + { + "title_aux": "ComfyUI-post-processing-nodes" + } + ], + "https://github.com/Extraltodeus/LoadLoraWithTags": [ + [ + "LoraLoaderTagsQuery" + ], + { + "title_aux": "LoadLoraWithTags" + } + ], + "https://github.com/Extraltodeus/noise_latent_perlinpinpin": [ + [ + "NoisyLatentPerlin" + ], + { + "title_aux": "noise latent perlinpinpin" + } + ], + "https://github.com/Fannovel16/ComfyUI-Frame-Interpolation": [ + [ + "AMT VFI", + "CAIN VFI", + "EISAI VFI", + "FILM VFI", + "FLAVR VFI", + "GMFSS Fortuna VFI", + "IFRNet VFI", + "IFUnet VFI", + "KSampler Gradually Adding More Denoise (efficient)", + "M2M VFI", + "Make Interpolation State List", + "RIFE VFI", + "STMFNet VFI", + "Sepconv VFI" + ], + { + "title_aux": "ComfyUI Frame Interpolation" + } + ], + "https://github.com/Fannovel16/ComfyUI-Loopchain": [ + [ + "EmptyLatentImageLoop", + "FolderToImageStorage", + "ImageStorageExportLoop", + "ImageStorageImport", + "ImageStorageReset", + "LatentStorageExportLoop", + "LatentStorageImport", + "LatentStorageReset" + ], + { + "title_aux": "ComfyUI Loopchain" + } + ], + "https://github.com/Fannovel16/ComfyUI-MotionDiff": [ + [ + "EmptyMotionData", + "ExportSMPLTo3DSoftware", + "MotionCLIPTextEncode", + "MotionDataVisualizer", + "MotionDiffLoader", + "MotionDiffSimpleSampler", + "RenderSMPLMesh", + "SMPLLoader", + "SaveSMPL", + "SmplifyMotionData" + ], + { + "title_aux": "ComfyUI MotionDiff" + } + ], + "https://github.com/Fannovel16/ComfyUI-Video-Matting": [ + [ + "Robust Video Matting" + ], + { + "title_aux": "ComfyUI-Video-Matting" + } + ], + "https://github.com/Fannovel16/comfyui_controlnet_aux": [ + [ + "AIO_Preprocessor", + "AnimalPosePreprocessor", + "AnimeFace_SemSegPreprocessor", + "AnimeLineArtPreprocessor", + "BAE-NormalMapPreprocessor", + "BinaryPreprocessor", + "CannyEdgePreprocessor", + "ColorPreprocessor", + "DWPreprocessor", + "DensePosePreprocessor", + "FakeScribblePreprocessor", + "HEDPreprocessor", + "HintImageEnchance", + "ImageGenResolutionFromImage", + "ImageGenResolutionFromLatent", + "InpaintPreprocessor", + "LeReS-DepthMapPreprocessor", + "LineArtPreprocessor", + "M-LSDPreprocessor", + "Manga2Anime_LineArt_Preprocessor", + "MediaPipe-FaceMeshPreprocessor", + "MiDaS-DepthMapPreprocessor", + "MiDaS-NormalMapPreprocessor", + "OneFormer-ADE20K-SemSegPreprocessor", + "OneFormer-COCO-SemSegPreprocessor", + "OpenposePreprocessor", + "PiDiNetPreprocessor", + "PixelPerfectResolution", + "SAMPreprocessor", + "ScribblePreprocessor", + "Scribble_XDoG_Preprocessor", + "SemSegPreprocessor", + "ShufflePreprocessor", + "TilePreprocessor", + "UniFormer-SemSegPreprocessor", + "Zoe-DepthMapPreprocessor" + ], + { + "author": "tstandley", + "title_aux": "ComfyUI's ControlNet Auxiliary Preprocessors" + } + ], + "https://github.com/Feidorian/feidorian-ComfyNodes": [ + [], + { + "nodename_pattern": "^Feidorian_", + "title_aux": "feidorian-ComfyNodes" + } + ], + "https://github.com/Fictiverse/ComfyUI_Fictiverse": [ + [ + "Add Noise to Image with Mask", + "Color correction", + "Displace Image with Depth", + "Displace Images with Mask", + "Zoom Image with Depth" + ], + { + "title_aux": "ComfyUI Fictiverse Nodes" + } + ], + "https://github.com/FizzleDorf/ComfyUI-AIT": [ + [ + "AIT_Unet_Loader", + "AIT_VAE_Encode_Loader" + ], + { + "title_aux": "ComfyUI-AIT" + } + ], + "https://github.com/FizzleDorf/ComfyUI_FizzNodes": [ + [ + "AbsCosWave", + "AbsSinWave", + "BatchGLIGENSchedule", + "BatchPromptSchedule", + "BatchPromptScheduleEncodeSDXL", + "BatchPromptScheduleLatentInput", + "BatchPromptScheduleNodeFlowEnd", + "BatchPromptScheduleSDXLLatentInput", + "BatchStringSchedule", + "BatchValueSchedule", + "BatchValueScheduleLatentInput", + "CalculateFrameOffset", + "ConcatStringSingle", + "CosWave", + "FizzFrame", + "FizzFrameConcatenate", + "Init FizzFrame", + "InvCosWave", + "InvSinWave", + "Lerp", + "PromptSchedule", + "PromptScheduleEncodeSDXL", + "PromptScheduleNodeFlow", + "PromptScheduleNodeFlowEnd", + "SawtoothWave", + "SinWave", + "SquareWave", + "StringConcatenate", + "StringSchedule", + "TriangleWave", + "ValueSchedule", + "convertKeyframeKeysToBatchKeys" + ], + { + "title_aux": "FizzNodes" + } + ], + "https://github.com/GMapeSplat/ComfyUI_ezXY": [ + [ + "ConcatenateString", + "ItemFromDropdown", + "IterationDriver", + "JoinImages", + "LineToConsole", + "NumberFromList", + "NumbersToList", + "PlotImages", + "StringFromList", + "StringToLabel", + "StringsToList", + "ezMath", + "ezXY_AssemblePlot", + "ezXY_Driver" + ], + { + "title_aux": "ezXY scripts and nodes" + } + ], + "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes": [ + [ + "Danbooru (ID)", + "Danbooru (Random)", + "Replace Strings", + "Simple Wildcards", + "Simple Wildcards (Dir.)", + "Wildcards Nodes" + ], + { + "title_aux": "ComfyUI-GTSuya-Nodes" + } + ], + "https://github.com/Gourieff/comfyui-reactor-node": [ + [ + "ReActorFaceSwap", + "ReActorLoadFaceModel", + "ReActorSaveFaceModel" + ], + { + "title_aux": "ReActor Node for ComfyUI" + } + ], + "https://github.com/Haoming02/comfyui-diffusion-cg": [ + [ + "Hook Recenter", + "Hook Recenter XL", + "Normalization", + "NormalizationXL", + "Tensor Debug", + "Unhook Recenter" + ], + { + "title_aux": "ComfyUI Diffusion Color Grading" + } + ], + "https://github.com/JPS-GER/ComfyUI_JPS-Nodes": [ + [ + "Conditioning Switch (JPS)", + "ControlNet Switch (JPS)", + "Crop Image Square (JPS)", + "Crop Image TargetSize (JPS)", + "Disable Enable Switch (JPS)", + "Enable Disable Switch (JPS)", + "Generation Settings (JPS)", + "Generation Settings Pipe (JPS)", + "Generation TXT IMG Settings (JPS)", + "Get Date Time String (JPS)", + "Get Image Size (JPS)", + "IP Adapter Settings (JPS)", + "IP Adapter Settings Pipe (JPS)", + "Image Switch (JPS)", + "Images Masks MultiPipe (JPS)", + "Integer Switch (JPS)", + "Largest Int (JPS)", + "Latent Switch (JPS)", + "Lora Loader (JPS)", + "Mask Switch (JPS)", + "Model Switch (JPS)", + "Multiply Float Float (JPS)", + "Multiply Int Float (JPS)", + "Multiply Int Int (JPS)", + "Resolution Multiply (JPS)", + "Revision Settings (JPS)", + "Revision Settings Pipe (JPS)", + "SDXL Basic Settings (JPS)", + "SDXL Basic Settings Pipe (JPS)", + "SDXL Fundamentals MultiPipe (JPS)", + "SDXL Prompt Handling (JPS)", + "SDXL Prompt Handling Plus (JPS)", + "SDXL Prompt Styler (JPS)", + "SDXL Recommended Resolution Calc (JPS)", + "SDXL Resolutions (JPS)", + "Sampler Scheduler Settings (JPS)", + "Substract Int Int (JPS)", + "Text Concatenate (JPS)", + "VAE Switch (JPS)" + ], + { + "author": "JPS", + "description": "Various nodes to handle SDXL Resolutions, SDXL Basic Settings, IP Adapter Settings, Revision Settings, SDXL Prompt Styler, Crop Image to Square, Crop Image to Target Size, Get Date-Time String, Resolution Multiply, Largest Integer, 5-to-1 Switches for Integer, Images, Latents, Conditioning, Model, VAE, ControlNet", + "nickname": "JPS Custom Nodes", + "title": "JPS Custom Nodes for ComfyUI", + "title_aux": "JPS Custom Nodes for ComfyUI" + } + ], + "https://github.com/Jcd1230/rembg-comfyui-node": [ + [ + "Image Remove Background (rembg)" + ], + { + "title_aux": "Rembg Background Removal Node for ComfyUI" + } + ], + "https://github.com/Jordach/comfy-plasma": [ + [ + "JDC_AutoContrast", + "JDC_BlendImages", + "JDC_BrownNoise", + "JDC_Contrast", + "JDC_EqualizeGrey", + "JDC_GaussianBlur", + "JDC_GreyNoise", + "JDC_Greyscale", + "JDC_ImageLoader", + "JDC_ImageLoaderMeta", + "JDC_PinkNoise", + "JDC_Plasma", + "JDC_PlasmaSampler", + "JDC_PowerImage", + "JDC_RandNoise", + "JDC_ResizeFactor" + ], + { + "title_aux": "comfy-plasma" + } + ], + "https://github.com/Kaharos94/ComfyUI-Saveaswebp": [ + [ + "Save_as_webp" + ], + { + "title_aux": "ComfyUI-Saveaswebp" + } + ], + "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet": [ + [ + "ACN_AdvancedControlNetApply", + "ACN_DefaultUniversalWeights", + "ControlNetLoaderAdvanced", + "CustomControlNetWeights", + "CustomT2IAdapterWeights", + "DiffControlNetLoaderAdvanced", + "LatentKeyframe", + "LatentKeyframeBatchedGroup", + "LatentKeyframeGroup", + "LatentKeyframeTiming", + "LoadImagesFromDirectory", + "ScaledSoftControlNetWeights", + "ScaledSoftMaskedUniversalWeights", + "SoftControlNetWeights", + "SoftT2IAdapterWeights", + "TimestepKeyframe" + ], + { + "title_aux": "ComfyUI-Advanced-ControlNet" + } + ], + "https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved": [ + [ + "ADE_AnimateDiffCombine", + "ADE_AnimateDiffLoRALoader", + "ADE_AnimateDiffLoaderV1Advanced", + "ADE_AnimateDiffLoaderWithContext", + "ADE_AnimateDiffModelSettings", + "ADE_AnimateDiffModelSettingsAdvancedAttnStrengths", + "ADE_AnimateDiffModelSettingsSimple", + "ADE_AnimateDiffModelSettings_Release", + "ADE_AnimateDiffUniformContextOptions", + "ADE_AnimateDiffUniformContextOptionsExperimental", + "ADE_AnimateDiffUnload", + "ADE_EmptyLatentImageLarge", + "AnimateDiffLoaderV1", + "CheckpointLoaderSimpleWithNoiseSelect" + ], + { + "title_aux": "AnimateDiff Evolved" + } + ], + "https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite": [ + [ + "VHS_DuplicateImages", + "VHS_DuplicateLatents", + "VHS_GetImageCount", + "VHS_GetLatentCount", + "VHS_LoadImages", + "VHS_LoadImagesPath", + "VHS_LoadVideo", + "VHS_LoadVideoPath", + "VHS_MergeImages", + "VHS_MergeLatents", + "VHS_SelectEveryNthImage", + "VHS_SelectEveryNthLatent", + "VHS_SplitImages", + "VHS_SplitLatents", + "VHS_VideoCombine" + ], + { + "title_aux": "ComfyUI-VideoHelperSuite" + } + ], + "https://github.com/LEv145/images-grid-comfy-plugin": [ + [ + "GridAnnotation", + "ImageCombine", + "ImagesGridByColumns", + "ImagesGridByRows", + "LatentCombine" + ], + { + "title_aux": "ImagesGrid" + } + ], + "https://github.com/Lerc/canvas_tab": [ + [ + "Canvas_Tab", + "Send_To_Editor" + ], + { + "author": "Lerc", + "description": "This extension provides a full page image editor with mask support. There are two nodes, one to receive images from the editor and one to send images to the editor.", + "nickname": "Canvas Tab", + "title": "Canvas Tab", + "title_aux": "Canvas Tab" + } + ], + "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame": [ + [ + "BreakFrames", + "BreakGrid", + "GetKeyFrames", + "MakeGrid", + "RandomImageFromDir" + ], + { + "title_aux": "ComfyBreakAnim" + } + ], + "https://github.com/LonicaMewinsky/ComfyUI-RawSaver": [ + [ + "SaveTifImage" + ], + { + "title_aux": "ComfyUI-RawSaver" + } + ], + "https://github.com/M1kep/ComfyLiterals": [ + [ + "Checkpoint", + "Float", + "Int", + "KepStringLiteral", + "Lora", + "Operation", + "String" + ], + { + "title_aux": "ComfyLiterals" + } + ], + "https://github.com/M1kep/ComfyUI-KepOpenAI": [ + [ + "KepOpenAI_ImageWithPrompt" + ], + { + "title_aux": "ComfyUI-KepOpenAI" + } + ], + "https://github.com/M1kep/ComfyUI-OtherVAEs": [ + [ + "OtherVAE_Taesd" + ], + { + "title_aux": "ComfyUI-OtherVAEs" + } + ], + "https://github.com/M1kep/Comfy_KepKitchenSink": [ + [ + "KepRotateImage" + ], + { + "title_aux": "Comfy_KepKitchenSink" + } + ], + "https://github.com/M1kep/Comfy_KepListStuff": [ + [ + "Empty Images", + "Image Overlay", + "ImageListLoader", + "Join Float Lists", + "Join Image Lists", + "KepStringList", + "KepStringListFromNewline", + "Kep_JoinListAny", + "Kep_RepeatList", + "Kep_ReverseList", + "Kep_VariableImageBuilder", + "List Length", + "Range(Num Steps) - Float", + "Range(Num Steps) - Int", + "Range(Step) - Float", + "Range(Step) - Int", + "Stack Images", + "XYAny", + "XYImage" + ], + { + "title_aux": "Comfy_KepListStuff" + } + ], + "https://github.com/M1kep/Comfy_KepMatteAnything": [ + [ + "MatteAnything_DinoBoxes", + "MatteAnything_GenerateVITMatte", + "MatteAnything_InitSamPredictor", + "MatteAnything_LoadDINO", + "MatteAnything_LoadVITMatteModel", + "MatteAnything_SAMLoader", + "MatteAnything_SAMMaskFromBoxes", + "MatteAnything_ToTrimap" + ], + { + "title_aux": "Comfy_KepMatteAnything" + } + ], + "https://github.com/M1kep/KepPromptLang": [ + [ + "Build Gif", + "Special CLIP Loader" + ], + { + "title_aux": "KepPromptLang" + } + ], + "https://github.com/ManglerFTW/ComfyI2I": [ + [ + "Color Transfer", + "Combine and Paste", + "Inpaint Segments", + "Mask Ops" + ], + { + "author": "ManglerFTW", + "title": "ComfyI2I", + "title_aux": "ComfyI2I" + } + ], + "https://github.com/NicholasMcCarthy/ComfyUI_TravelSuite": [ + [ + "LatentTravel" + ], + { + "title_aux": "ComfyUI_TravelSuite" + } + ], + "https://github.com/Niutonian/ComfyUi-NoodleWebcam": [ + [ + "WebcamNode" + ], + { + "title_aux": "ComfyUi-NoodleWebcam" + } + ], + "https://github.com/Nourepide/ComfyUI-Allor": [ + [ + "AlphaChanelAdd", + "AlphaChanelAddByMask", + "AlphaChanelAsMask", + "AlphaChanelRemove", + "AlphaChanelRestore", + "ClipClamp", + "ClipVisionClamp", + "ClipVisionOutputClamp", + "ConditioningClamp", + "ControlNetClamp", + "GligenClamp", + "ImageBatchCopy", + "ImageBatchFork", + "ImageBatchGet", + "ImageBatchJoin", + "ImageBatchPermute", + "ImageBatchRemove", + "ImageClamp", + "ImageCompositeAbsolute", + "ImageCompositeAbsoluteByContainer", + "ImageCompositeRelative", + "ImageCompositeRelativeByContainer", + "ImageContainer", + "ImageContainerInheritanceAdd", + "ImageContainerInheritanceMax", + "ImageContainerInheritanceScale", + "ImageContainerInheritanceSum", + "ImageDrawArc", + "ImageDrawArcByContainer", + "ImageDrawChord", + "ImageDrawChordByContainer", + "ImageDrawEllipse", + "ImageDrawEllipseByContainer", + "ImageDrawLine", + "ImageDrawLineByContainer", + "ImageDrawPieslice", + "ImageDrawPiesliceByContainer", + "ImageDrawPolygon", + "ImageDrawRectangle", + "ImageDrawRectangleByContainer", + "ImageDrawRectangleRounded", + "ImageDrawRectangleRoundedByContainer", + "ImageEffectsAdjustment", + "ImageEffectsGrayscale", + "ImageEffectsLensBokeh", + "ImageEffectsLensChromaticAberration", + "ImageEffectsLensOpticAxis", + "ImageEffectsLensVignette", + "ImageEffectsLensZoomBurst", + "ImageEffectsNegative", + "ImageEffectsSepia", + "ImageFilterBilateralBlur", + "ImageFilterBlur", + "ImageFilterBoxBlur", + "ImageFilterContour", + "ImageFilterDetail", + "ImageFilterEdgeEnhance", + "ImageFilterEdgeEnhanceMore", + "ImageFilterEmboss", + "ImageFilterFindEdges", + "ImageFilterGaussianBlur", + "ImageFilterGaussianBlurAdvanced", + "ImageFilterMax", + "ImageFilterMedianBlur", + "ImageFilterMin", + "ImageFilterMode", + "ImageFilterRank", + "ImageFilterSharpen", + "ImageFilterSmooth", + "ImageFilterSmoothMore", + "ImageFilterStackBlur", + "ImageNoiseBeta", + "ImageNoiseBinomial", + "ImageNoiseBytes", + "ImageNoiseGaussian", + "ImageSegmentation", + "ImageSegmentationCustom", + "ImageSegmentationCustomAdvanced", + "ImageText", + "ImageTextMultiline", + "ImageTextMultilineOutlined", + "ImageTextOutlined", + "ImageTransformCropAbsolute", + "ImageTransformCropCorners", + "ImageTransformCropRelative", + "ImageTransformPaddingAbsolute", + "ImageTransformPaddingRelative", + "ImageTransformResizeAbsolute", + "ImageTransformResizeClip", + "ImageTransformResizeRelative", + "ImageTransformRotate", + "ImageTransformTranspose", + "LatentClamp", + "MaskClamp", + "ModelClamp", + "StyleModelClamp", + "UpscaleModelClamp", + "VaeClamp" + ], + { + "title_aux": "Allor Plugin" + } + ], + "https://github.com/Nuked88/ComfyUI-N-Nodes": [ + [ + "DynamicPrompt", + "Float Variable", + "FrameInterpolator", + "GPT Loader Simple", + "GPTSampler", + "Integer Variable", + "LoadFramesFromFolder", + "LoadVideo", + "SaveVideo", + "SetMetadataForSaveVideo", + "String Variable" + ], + { + "title_aux": "ComfyUI-N-Nodes" + } + ], + "https://github.com/Off-Live/ComfyUI-off-suite": [ + [ + "Cached Image Load From URL", + "Crop Center wigh SEGS", + "Crop Center with SEGS", + "GW Number Formatting", + "Image Crop Fit", + "Image Resize Fit", + "OFF SEGS to Image", + "Watermarking" + ], + { + "title_aux": "ComfyUI-off-suite" + } + ], + "https://github.com/Onierous/QRNG_Node_ComfyUI/raw/main/qrng_node.py": [ + [ + "QRNG_Node_CSV" + ], + { + "title_aux": "QRNG_Node_ComfyUI" + } + ], + "https://github.com/PCMonsterx/ComfyUI-CSV-Loader": [ + [ + "Load Artists CSV", + "Load Artmovements CSV", + "Load Characters CSV", + "Load Colors CSV", + "Load Composition CSV", + "Load Lighting CSV", + "Load Negative CSV", + "Load Positive CSV", + "Load Settings CSV", + "Load Styles CSV" + ], + { + "title_aux": "ComfyUI-CSV-Loader" + } + ], + "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts": [ + [ + "CSVPromptsLoader", + "CombinePrompt", + "MultiLoraLoader", + "RandomPrompt" + ], + { + "title_aux": "ComfyUI-Malefish-Custom-Scripts" + } + ], + "https://github.com/Pfaeff/pfaeff-comfyui": [ + [ + "AstropulsePixelDetector", + "BackgroundRemover", + "ImagePadForBetterOutpaint", + "Inpainting", + "InpaintingPipelineLoader" + ], + { + "title_aux": "pfaeff-comfyui" + } + ], + "https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes": [ + [ + "CR 3D Camera Drone", + "CR 3D Camera Static", + "CR 3D Polygon", + "CR 3D Solids", + "CR Add Annotation", + "CR Alternate Latents", + "CR Apply Annotations", + "CR Apply ControlNet", + "CR Apply LoRA Stack", + "CR Apply Model Merge", + "CR Apply Multi Upscale", + "CR Apply Multi-ControlNet", + "CR Arabic Text RTL", + "CR Aspect Ratio", + "CR Aspect Ratio SDXL", + "CR Batch Process Switch", + "CR Central Schedule", + "CR Check Job Complete", + "CR Checker Pattern", + "CR Clip Input Switch", + "CR Color Bars", + "CR Color Gradient", + "CR Color Panel", + "CR Color Tint", + "CR Combine Schedules", + "CR Comic Panel Templates", + "CR Comic Panel Templates (Advanced)", + "CR Comic Panel Templates Advanced", + "CR Composite Text", + "CR Conditioning Input Switch", + "CR Conditioning Mixer", + "CR Continuous Rotation", + "CR Continuous Track", + "CR Continuous Zoom", + "CR ControlNet Input Switch", + "CR Current Frame", + "CR Cycle Images", + "CR Cycle Images Simple", + "CR Cycle LoRAs", + "CR Cycle Models", + "CR Cycle Styles", + "CR Cycle Text", + "CR Cycle Text Simple", + "CR Debatch Frames", + "CR Draw Perspective Text", + "CR Draw Text", + "CR Encode Scheduled Prompts", + "CR Float To Integer", + "CR Float To String", + "CR Gradient Float", + "CR Gradient Integer", + "CR Halftone Filter", + "CR Halftone Grid", + "CR Hires Fix Process Switch", + "CR Image Border", + "CR Image Grid Panel", + "CR Image Input Switch", + "CR Image Input Switch (4 way)", + "CR Image List", + "CR Image List Simple", + "CR Image Output", + "CR Image Panel", + "CR Image Pipe Edit", + "CR Image Pipe In", + "CR Image Pipe Out", + "CR Image Size", + "CR Image Transition", + "CR Image XY Panel", + "CR Img2Img Process Switch", + "CR Increment Float", + "CR Increment Integer", + "CR Index", + "CR Index Increment", + "CR Index Multiply", + "CR Index Reset", + "CR Input Text List", + "CR Integer Multiple", + "CR Integer To String", + "CR Interpolate Latents", + "CR Interpolate Prompt Weights", + "CR Interpolate Rotation", + "CR Interpolate Track", + "CR Interpolate Zoom", + "CR Job Current Frame", + "CR Job List", + "CR Job Scheduler", + "CR Keyframe List", + "CR Latent Batch Size", + "CR Latent Input Switch", + "CR LoRA List", + "CR LoRA Stack", + "CR Load Animation Frames", + "CR Load Flow Frames", + "CR Load LoRA", + "CR Load Prompt Style", + "CR Load Schedule From File", + "CR Load Scheduled ControlNets", + "CR Load Scheduled LoRAs", + "CR Load Scheduled Models", + "CR Load Workflow", + "CR Load XY Annotation From File", + "CR Mask Text", + "CR Model Input Switch", + "CR Model List", + "CR Model Merge Stack", + "CR Module Input", + "CR Module Output", + "CR Module Pipe Loader", + "CR Multi Upscale Stack", + "CR Multi-ControlNet Stack", + "CR Multi-Panel Meme Template", + "CR Output Flow Frames", + "CR Output Schedule To File", + "CR Overlay Text", + "CR Overlay Transparent Image", + "CR Page Layout", + "CR Pipe Switch", + "CR Polygons", + "CR Popular Meme Templates", + "CR Prompt List", + "CR Prompt List Keyframes", + "CR Prompt Scheduler", + "CR Prompt Text", + "CR Prompt Weight Scheduler", + "CR Radial Gradient", + "CR Random Hex Color", + "CR Random RGB", + "CR SD1.5 Aspect Ratio", + "CR SDXL Aspect Ratio", + "CR SDXL Base Prompt Encoder", + "CR SDXL Prompt Mix Presets", + "CR SDXL Style Text", + "CR Schedule Camera Movements", + "CR Schedule ControlNets", + "CR Schedule Input Switch", + "CR Schedule Styles", + "CR Schedule To ScheduleList", + "CR Seed", + "CR Seed to Int", + "CR Select Model", + "CR Simple Annotations", + "CR Simple Image Watermark", + "CR Simple Meme Template", + "CR Simple Prompt List", + "CR Simple Prompt List Keyframes", + "CR Simple Prompt Scheduler", + "CR Simple Schedule", + "CR Simple Text Panel", + "CR Simple Text Scheduler", + "CR Simple Text Watermark", + "CR Simple Value Scheduler", + "CR Spawn Workflow Instance", + "CR Split String", + "CR Starburst Colors", + "CR Starburst Lines", + "CR String To Combo", + "CR String To Number", + "CR Strobe Images", + "CR Style Bars", + "CR Style List", + "CR Switch Model and CLIP", + "CR Text Input Switch", + "CR Text Input Switch (4 way)", + "CR Text List", + "CR Text List Cross Join", + "CR Text List Simple", + "CR Text List To String", + "CR Text Scheduler", + "CR Trigger", + "CR Upscale Image", + "CR VAE Input Switch", + "CR Value", + "CR Value Scheduler", + "CR XY From Folder", + "CR XY Grid", + "CR XY Index", + "CR XY Interpolate", + "CR XY List", + "CR XY Save Grid Image", + "CR XYZ Index", + "CR XYZ Interpolate", + "CR XYZ List" + ], + { + "author": "RockOfFire", + "description": "Custom nodes for SDXL and SD1.5 including Multi-ControlNet, LoRA, Aspect Ratio, Process Switches, and many more nodes.", + "nickname": "Comfyroll Custom Nodes", + "title": "Comfyroll Custom Nodes", + "title_aux": "ComfyUI_Comfyroll_CustomNodes" + } + ], + "https://github.com/SLAPaper/ComfyUI-Image-Selector": [ + [ + "ImageDuplicator", + "ImageSelector", + "LatentDuplicator", + "LatentSelector" + ], + { + "title_aux": "ComfyUI-Image-Selector" + } + ], + "https://github.com/SOELexicon/ComfyUI-LexMSDBNodes": [ + [ + "MSSqlSelectNode", + "MSSqlTableNode" + ], + { + "title_aux": "LexMSDBNodes" + } + ], + "https://github.com/SOELexicon/ComfyUI-LexTools": [ + [ + "AgeClassifierNode", + "ArtOrHumanClassifierNode", + "DocumentClassificationNode", + "FoodCategoryClassifierNode", + "ImageAspectPadNode", + "ImageCaptioning", + "ImageFilterByFloatScoreNode", + "ImageFilterByIntScoreNode", + "ImageQualityScoreNode", + "ImageRankingNode", + "ImageScaleToMin", + "MD5ImageHashNode", + "SamplerPropertiesNode", + "ScoreConverterNode", + "SeedIncrementerNode", + "SegformerNode", + "SegformerNodeMasks", + "SegformerNodeMergeSegments", + "StepCfgIncrementNode" + ], + { + "title_aux": "ComfyUI-LexTools" + } + ], + "https://github.com/SadaleNet/CLIPTextEncodeA1111-ComfyUI/raw/master/custom_nodes/clip_text_encoder_a1111.py": [ + [ + "CLIPTextEncodeA1111", + "RerouteTextForCLIPTextEncodeA1111" + ], + { + "title_aux": "ComfyUI A1111-like Prompt Custom Node Solution" + } + ], + "https://github.com/Scholar01/ComfyUI-Keyframe": [ + [ + "KeyframeApply", + "KeyframeInterpolationPart", + "KeyframePart" + ], + { + "title_aux": "SComfyUI-Keyframe" + } + ], + "https://github.com/SeargeDP/SeargeSDXL": [ + [ + "SeargeAdvancedParameters", + "SeargeCheckpointLoader", + "SeargeConditionMixing", + "SeargeConditioningMuxer2", + "SeargeConditioningMuxer5", + "SeargeConditioningParameters", + "SeargeControlnetAdapterV2", + "SeargeControlnetModels", + "SeargeCustomAfterUpscaling", + "SeargeCustomAfterVaeDecode", + "SeargeCustomPromptMode", + "SeargeDebugPrinter", + "SeargeEnablerInputs", + "SeargeFloatConstant", + "SeargeFloatMath", + "SeargeFloatPair", + "SeargeFreeU", + "SeargeGenerated1", + "SeargeGenerationParameters", + "SeargeHighResolution", + "SeargeImage2ImageAndInpainting", + "SeargeImageAdapterV2", + "SeargeImageSave", + "SeargeImageSaving", + "SeargeInput1", + "SeargeInput2", + "SeargeInput3", + "SeargeInput4", + "SeargeInput5", + "SeargeInput6", + "SeargeInput7", + "SeargeIntegerConstant", + "SeargeIntegerMath", + "SeargeIntegerPair", + "SeargeIntegerScaler", + "SeargeLatentMuxer3", + "SeargeLoraLoader", + "SeargeLoras", + "SeargeMagicBox", + "SeargeModelSelector", + "SeargeOperatingMode", + "SeargeOutput1", + "SeargeOutput2", + "SeargeOutput3", + "SeargeOutput4", + "SeargeOutput5", + "SeargeOutput6", + "SeargeOutput7", + "SeargeParameterProcessor", + "SeargePipelineStart", + "SeargePipelineTerminator", + "SeargePreviewImage", + "SeargePromptAdapterV2", + "SeargePromptCombiner", + "SeargePromptStyles", + "SeargePromptText", + "SeargeSDXLBasePromptEncoder", + "SeargeSDXLImage2ImageSampler", + "SeargeSDXLImage2ImageSampler2", + "SeargeSDXLPromptEncoder", + "SeargeSDXLRefinerPromptEncoder", + "SeargeSDXLSampler", + "SeargeSDXLSampler2", + "SeargeSDXLSamplerV3", + "SeargeSamplerAdvanced", + "SeargeSamplerInputs", + "SeargeSaveFolderInputs", + "SeargeSeparator", + "SeargeStylePreprocessor", + "SeargeTextInputV2", + "SeargeUpscaleModelLoader", + "SeargeUpscaleModels", + "SeargeVAELoader" + ], + { + "title_aux": "SeargeSDXL" + } + ], + "https://github.com/Ser-Hilary/SDXL_sizing/raw/main/conditioning_sizing_for_SDXL.py": [ + [ + "get_aspect_from_image", + "get_aspect_from_ints", + "sizing_node", + "sizing_node_basic", + "sizing_node_unparsed" + ], + { + "title_aux": "SDXL_sizing" + } + ], + "https://github.com/Smuzzies/comfyui_chatbox_overlay/raw/main/chatbox_overlay.py": [ + [ + "Chatbox Overlay" + ], + { + "title_aux": "Chatbox Overlay node for ComfyUI" + } + ], + "https://github.com/SoftMeng/ComfyUI_Mexx_Poster": [ + [ + "ComfyUI_Mexx_Poster" + ], + { + "title_aux": "ComfyUI_Mexx_Poster" + } + ], + "https://github.com/SoftMeng/ComfyUI_Mexx_Styler": [ + [ + "MexxSDXLPromptStyler", + "MexxSDXLPromptStylerAdvanced" + ], + { + "title_aux": "ComfyUI_Mexx_Styler" + } + ], + "https://github.com/Stability-AI/stability-ComfyUI-nodes": [ + [ + "ColorBlend", + "ControlLoraSave", + "GetImageSize" + ], + { + "title_aux": "stability-ComfyUI-nodes" + } + ], + "https://github.com/Sxela/ComfyWarp": [ + [ + "ExtractOpticalFlow", + "LoadFrame", + "LoadFrameFromDataset", + "LoadFrameFromFolder", + "LoadFramePairFromDataset", + "LoadFrameSequence", + "MakeFrameDataset", + "MixConsistencyMaps", + "OffsetNumber", + "ResizeToFit", + "SaveFrame", + "WarpFrame" + ], + { + "title_aux": "ComfyWarp" + } + ], + "https://github.com/TGu-97/ComfyUI-TGu-utils": [ + [ + "MPNReroute", + "MPNSwitch", + "PNSwitch" + ], + { + "title_aux": "TGu Utilities" + } + ], + "https://github.com/THtianhao/ComfyUI-FaceChain": [ + [ + "FCStyleLoraLoad", + "FC_CropAndPaste", + "FC_CropBottom", + "FC_CropFace", + "FC_CropMask", + "FC_FaceDetection", + "FC_FaceFusion", + "FC_MaskOP", + "FC_ReplaceImage", + "FC_Segment", + "FC_StyleLoraLoad" + ], + { + "title_aux": "ComfyUI-FaceChain" + } + ], + "https://github.com/THtianhao/ComfyUI-Portrait-Maker": [ + [ + "PM_BoxCropImage", + "PM_ColorTransfer", + "PM_ExpandMaskBox", + "PM_FaceFusion", + "PM_FaceShapMatch", + "PM_FaceSkin", + "PM_GetImageInfo", + "PM_ImageResizeTarget", + "PM_ImageScaleShort", + "PM_MakeUpTransfer", + "PM_MaskDilateErode", + "PM_MaskMerge2Image", + "PM_PortraitEnhancement", + "PM_RatioMerge2Image", + "PM_ReplaceBoxImg", + "PM_RetinaFace", + "PM_SkinRetouching", + "PM_SuperColorTransfer", + "PM_SuperMakeUpTransfer" + ], + { + "title_aux": "ComfyUI-Portrait-Maker" + } + ], + "https://github.com/TRI3D-LC/tri3d-comfyui-nodes": [ + [ + "tri3d-atr-parse", + "tri3d-atr-parse-batch", + "tri3d-dwpose", + "tri3d-extract-hand", + "tri3d-extract-parts-batch", + "tri3d-extract-parts-batch2", + "tri3d-extract-parts-mask-batch", + "tri3d-fuzzification", + "tri3d-interaction-canny", + "tri3d-pose-adaption", + "tri3d-pose-to-image", + "tri3d-position-hands", + "tri3d-position-parts-batch", + "tri3d-skin-feathered-padded-mask", + "tri3d-swap-pixels" + ], + { + "title_aux": "tri3d-comfyui-nodes" + } + ], + "https://github.com/TeaCrab/ComfyUI-TeaNodes": [ + [ + "TC_ColorFill", + "TC_EqualizeCLAHE", + "TC_ImageResize", + "TC_ImageScale", + "TC_MaskBG_DIS", + "TC_RandomColorFill", + "TC_SizeApproximation" + ], + { + "title_aux": "ComfyUI-TeaNodes" + } + ], + "https://github.com/TheBarret/ZSuite": [ + [ + "ZSuite: Prompter", + "ZSuite: RF Noise", + "ZSuite: SeedMod" + ], + { + "title_aux": "ZSuite" + } + ], + "https://github.com/TinyTerra/ComfyUI_tinyterraNodes": [ + [ + "ttN busIN", + "ttN busOUT", + "ttN compareInput", + "ttN concat", + "ttN debugInput", + "ttN float", + "ttN hiresfixScale", + "ttN imageOutput", + "ttN imageREMBG", + "ttN int", + "ttN multiModelMerge", + "ttN pipe2BASIC", + "ttN pipe2DETAILER", + "ttN pipeEDIT", + "ttN pipeEncodeConcat", + "ttN pipeIN", + "ttN pipeKSampler", + "ttN pipeKSamplerAdvanced", + "ttN pipeKSamplerSDXL", + "ttN pipeLoader", + "ttN pipeLoaderSDXL", + "ttN pipeLoraStack", + "ttN pipeOUT", + "ttN seed", + "ttN seedDebug", + "ttN text", + "ttN text3BOX_3WAYconcat", + "ttN text7BOX_concat", + "ttN textDebug", + "ttN xyPlot" + ], + { + "author": "tinyterra", + "description": "This extension offers various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more.", + "nickname": "ttNodes", + "nodename_pattern": "^ttN ", + "title": "tinyterraNodes", + "title_aux": "tinyterraNodes" + } + ], + "https://github.com/Tropfchen/ComfyUI-Embedding_Picker": [ + [ + "EmbeddingPicker" + ], + { + "title_aux": "Embedding Picker" + } + ], + "https://github.com/Tropfchen/ComfyUI-yaResolutionSelector": [ + [ + "YARS", + "YARSAdv" + ], + { + "title_aux": "YARS: Yet Another Resolution Selector" + } + ], + "https://github.com/Trung0246/ComfyUI-0246": [ + [ + "0246.Beautify", + "0246.Convert", + "0246.Count", + "0246.Highway", + "0246.Hold", + "0246.Junction", + "0246.JunctionBatch", + "0246.Loop", + "0246.Mimic", + "0246.RandomInt", + "0246.Stringify" + ], + { + "title_aux": "ComfyUI-0246" + } + ], + "https://github.com/Ttl/ComfyUi_NNLatentUpscale": [ + [ + "NNLatentUpscale" + ], + { + "title_aux": "ComfyUI Neural network latent upscale custom node" + } + ], + "https://github.com/Umikaze-job/select_folder_path_easy": [ + [ + "SelectFolderPathEasy" + ], + { + "title_aux": "select_folder_path_easy" + } + ], + "https://github.com/WASasquatch/ASTERR": [ + [ + "ASTERR", + "SaveASTERR" + ], + { + "title_aux": "ASTERR" + } + ], + "https://github.com/WASasquatch/ComfyUI_Preset_Merger": [ + [ + "Preset_Model_Merge" + ], + { + "title_aux": "ComfyUI Preset Merger" + } + ], + "https://github.com/WASasquatch/FreeU_Advanced": [ + [ + "FreeU (Advanced)" + ], + { + "title_aux": "FreeU_Advanced" + } + ], + "https://github.com/WASasquatch/PPF_Noise_ComfyUI": [ + [ + "Blend Latents (PPF Noise)", + "Cross-Hatch Power Fractal (PPF Noise)", + "Images as Latents (PPF Noise)", + "Perlin Power Fractal Latent (PPF Noise)" + ], + { + "title_aux": "PPF_Noise_ComfyUI" + } + ], + "https://github.com/WASasquatch/PowerNoiseSuite": [ + [ + "Blend Latents (PPF Noise)", + "Cross-Hatch Power Fractal (PPF Noise)", + "Cross-Hatch Power Fractal Settings (PPF Noise)", + "Images as Latents (PPF Noise)", + "Latent Adjustment (PPF Noise)", + "Latents to CPU (PPF Noise)", + "Linear Cross-Hatch Power Fractal (PPF Noise)", + "Perlin Power Fractal Latent (PPF Noise)", + "Perlin Power Fractal Settings (PPF Noise)", + "Power KSampler Advanced (PPF Noise)", + "Power-Law Noise (PPF Noise)" + ], + { + "title_aux": "Power Noise Suite for ComfyUI" + } + ], + "https://github.com/WASasquatch/WAS_Extras": [ + [ + "BLVAEEncode", + "CLIPTextEncodeList", + "CLIPTextEncodeSequence2", + "ConditioningBlend", + "DebugInput", + "KSamplerSeq", + "KSamplerSeq2", + "VAEEncodeForInpaint (WAS)", + "VividSharpen" + ], + { + "title_aux": "WAS_Extras" + } + ], + "https://github.com/WASasquatch/was-node-suite-comfyui": [ + [ + "BLIP Analyze Image", + "BLIP Model Loader", + "Blend Latents", + "Bounded Image Blend", + "Bounded Image Blend with Mask", + "Bounded Image Crop", + "Bounded Image Crop with Mask", + "Bus Node", + "CLIP Input Switch", + "CLIP Vision Input Switch", + "CLIPSeg Batch Masking", + "CLIPSeg Masking", + "CLIPSeg Model Loader", + "CLIPTextEncode (BlenderNeko Advanced + NSP)", + "CLIPTextEncode (NSP)", + "Cache Node", + "Checkpoint Loader", + "Checkpoint Loader (Simple)", + "Conditioning Input Switch", + "Constant Number", + "Control Net Model Input Switch", + "Convert Masks to Images", + "Create Grid Image", + "Create Grid Image from Batch", + "Create Morph Image", + "Create Morph Image from Path", + "Create Video from Path", + "Debug Number to Console", + "Dictionary to Console", + "Diffusers Hub Model Down-Loader", + "Diffusers Model Loader", + "Export API", + "Image Analyze", + "Image Aspect Ratio", + "Image Batch", + "Image Blank", + "Image Blend", + "Image Blend by Mask", + "Image Blending Mode", + "Image Bloom Filter", + "Image Bounds", + "Image Bounds to Console", + "Image Canny Filter", + "Image Chromatic Aberration", + "Image Color Palette", + "Image Crop Face", + "Image Crop Location", + "Image Crop Square Location", + "Image Displacement Warp", + "Image Dragan Photography Filter", + "Image Edge Detection Filter", + "Image Film Grain", + "Image Filter Adjustments", + "Image Flip", + "Image Generate Gradient", + "Image Gradient Map", + "Image High Pass Filter", + "Image History Loader", + "Image Input Switch", + "Image Levels Adjustment", + "Image Load", + "Image Lucy Sharpen", + "Image Median Filter", + "Image Mix RGB Channels", + "Image Monitor Effects Filter", + "Image Nova Filter", + "Image Padding", + "Image Paste Crop", + "Image Paste Crop by Location", + "Image Paste Face", + "Image Perlin Noise", + "Image Perlin Power Fractal", + "Image Pixelate", + "Image Power Noise", + "Image Rembg (Remove Background)", + "Image Remove Background (Alpha)", + "Image Remove Color", + "Image Resize", + "Image Rotate", + "Image Rotate Hue", + "Image SSAO (Ambient Occlusion)", + "Image SSDO (Direct Occlusion)", + "Image Save", + "Image Seamless Texture", + "Image Select Channel", + "Image Select Color", + "Image Shadows and Highlights", + "Image Size to Number", + "Image Stitch", + "Image Style Filter", + "Image Threshold", + "Image Tiled", + "Image Transpose", + "Image Voronoi Noise Filter", + "Image fDOF Filter", + "Image to Latent Mask", + "Image to Noise", + "Image to Seed", + "Images to Linear", + "Images to RGB", + "Inset Image Bounds", + "Integer place counter", + "KSampler (WAS)", + "KSampler Cycle", + "Latent Input Switch", + "Latent Noise Injection", + "Latent Size to Number", + "Latent Upscale by Factor (WAS)", + "Load Cache", + "Load Image Batch", + "Load Lora", + "Load Text File", + "Logic Boolean", + "Lora Input Switch", + "Lora Loader", + "Mask Arbitrary Region", + "Mask Batch", + "Mask Batch to Mask", + "Mask Ceiling Region", + "Mask Crop Dominant Region", + "Mask Crop Minority Region", + "Mask Crop Region", + "Mask Dilate Region", + "Mask Dominant Region", + "Mask Erode Region", + "Mask Fill Holes", + "Mask Floor Region", + "Mask Gaussian Region", + "Mask Invert", + "Mask Minority Region", + "Mask Paste Region", + "Mask Smooth Region", + "Mask Threshold Region", + "Masks Add", + "Masks Combine Batch", + "Masks Combine Regions", + "Masks Subtract", + "MiDaS Depth Approximation", + "MiDaS Mask Image", + "MiDaS Model Loader", + "Model Input Switch", + "Number Counter", + "Number Input Condition", + "Number Input Switch", + "Number Multiple Of", + "Number Operation", + "Number PI", + "Number to Float", + "Number to Int", + "Number to Seed", + "Number to String", + "Number to Text", + "Prompt Multiple Styles Selector", + "Prompt Styles Selector", + "Random Number", + "SAM Image Mask", + "SAM Model Loader", + "SAM Parameters", + "SAM Parameters Combine", + "Samples Passthrough (Stat System)", + "Save Text File", + "Seed", + "String to Text", + "Tensor Batch to Image", + "Text Add Token by Input", + "Text Add Tokens", + "Text Compare", + "Text Concatenate", + "Text Dictionary Update", + "Text File History Loader", + "Text Find and Replace", + "Text Find and Replace Input", + "Text Find and Replace by Dictionary", + "Text Input Switch", + "Text List", + "Text List Concatenate", + "Text Load Line From File", + "Text Multiline", + "Text Parse A1111 Embeddings", + "Text Parse Noodle Soup Prompts", + "Text Parse Tokens", + "Text Random Line", + "Text Random Prompt", + "Text Shuffle", + "Text String", + "Text String Truncate", + "Text to Conditioning", + "Text to Console", + "Text to Number", + "Text to String", + "True Random.org Number Generator", + "Upscale Model Loader", + "Upscale Model Switch", + "VAE Input Switch", + "Video Dump Frames", + "Write to GIF", + "Write to Video", + "unCLIP Checkpoint Loader" + ], + { + "title_aux": "WAS Node Suite" + } + ], + "https://github.com/WebDev9000/WebDev9000-Nodes": [ + [ + "IgnoreBraces", + "SettingsSwitch" + ], + { + "title_aux": "WebDev9000-Nodes" + } + ], + "https://github.com/YMC-GitHub/ymc-node-suite-comfyui": [ + [ + "Image Save", + "Save Text File", + "canvas-util-cal-size", + "conditioning-util-input-switch", + "cutoff-region-util", + "hks-util-cal-denoise-step", + "img-util-get-image-size", + "img-util-switch-input-image", + "io-util-file-list-get", + "io-util-file-list-get-text", + "number-util-random-num", + "pipe-util-to-basic-pipe", + "region-util-get-by-center-and-size", + "region-util-get-by-lt", + "region-util-get-crop-location-from-center-size-text", + "region-util-get-pad-out-location-by-size", + "text-preset-colors", + "text-util-join-text", + "text-util-loop-text", + "text-util-path-list", + "text-util-prompt-add-prompt", + "text-util-prompt-adv-dup", + "text-util-prompt-adv-search", + "text-util-prompt-del", + "text-util-prompt-dup", + "text-util-prompt-join", + "text-util-prompt-search", + "text-util-prompt-shuffle", + "text-util-prompt-std", + "text-util-prompt-unweight", + "text-util-random-text", + "text-util-search-text", + "text-util-show-text", + "text-util-switch-text", + "xyz-util-txt-to-int" + ], + { + "title_aux": "ymc-node-suite-comfyui" + } + ], + "https://github.com/YOUR-WORST-TACO/ComfyUI-TacoNodes": [ + [ + "Example", + "TacoAnimatedLoader", + "TacoGifMaker", + "TacoImg2ImgAnimatedLoader", + "TacoImg2ImgAnimatedProcessor", + "TacoLatent" + ], + { + "title_aux": "ComfyUI-TacoNodes" + } + ], + "https://github.com/YinBailiang/MergeBlockWeighted_fo_ComfyUI": [ + [ + "MergeBlockWeighted" + ], + { + "title_aux": "MergeBlockWeighted_fo_ComfyUI" + } + ], + "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite": [ + [ + "AlphaChanelAddByMask", + "ImageCompositeBy_BG_Zho", + "ImageCompositeBy_Zho", + "ImageComposite_BG_Zho", + "ImageComposite_Zho", + "RGB_Image_Zho", + "Text_Image_Frame_Zho", + "Text_Image_Multiline_Zho", + "Text_Image_Zho" + ], + { + "title_aux": "ComfyUI-Text_Image-Composite" + } + ], + "https://github.com/ZaneA/ComfyUI-ImageReward": [ + [ + "ImageRewardLoader", + "ImageRewardScore" + ], + { + "title_aux": "ImageReward" + } + ], + "https://github.com/Zuellni/ComfyUI-ExLlama": [ + [ + "ZuellniExLlamaGenerator", + "ZuellniExLlamaLoader", + "ZuellniTextPreview", + "ZuellniTextReplace" + ], + { + "title_aux": "ComfyUI-ExLlama" + } + ], + "https://github.com/Zuellni/ComfyUI-PickScore-Nodes": [ + [ + "ZuellniPickScoreImageProcessor", + "ZuellniPickScoreLoader", + "ZuellniPickScoreSelector", + "ZuellniPickScoreTextProcessor" + ], + { + "title_aux": "ComfyUI PickScore Nodes" + } + ], + "https://github.com/a1lazydog/ComfyUI-AudioScheduler": [ + [ + "AmplitudeToGraph", + "AmplitudeToNumber", + "AudioToAmplitudeGraph", + "AudioToFFTs", + "BatchAmplitudeSchedule", + "ClipAmplitude", + "GateNormalizedAmplitude", + "LoadAudio", + "NormalizeAmplitude", + "NormalizedAmplitudeDrivenString", + "NormalizedAmplitudeToGraph", + "NormalizedAmplitudeToNumber", + "TransientAmplitudeBasic" + ], + { + "title_aux": "ComfyUI-AudioScheduler" + } + ], + "https://github.com/adieyal/comfyui-dynamicprompts": [ + [ + "DPCombinatorialGenerator", + "DPFeelingLucky", + "DPJinja", + "DPMagicPrompt", + "DPOutput", + "DPRandomGenerator" + ], + { + "title_aux": "DynamicPrompts Custom Nodes" + } + ], + "https://github.com/aianimation55/ComfyUI-FatLabels": [ + [ + "FatLabels" + ], + { + "title_aux": "Comfy UI FatLabels" + } + ], + "https://github.com/alpertunga-bile/prompt-generator-comfyui": [ + [ + "Prompt Generator" + ], + { + "title_aux": "prompt-generator" + } + ], + "https://github.com/alsritter/asymmetric-tiling-comfyui": [ + [ + "Asymmetric_Tiling_KSampler" + ], + { + "title_aux": "asymmetric-tiling-comfyui" + } + ], + "https://github.com/alt-key-project/comfyui-dream-project": [ + [ + "Analyze Palette [Dream]", + "Beat Curve [Dream]", + "Big Float Switch [Dream]", + "Big Image Switch [Dream]", + "Big Int Switch [Dream]", + "Big Latent Switch [Dream]", + "Big Palette Switch [Dream]", + "Big Text Switch [Dream]", + "Boolean To Float [Dream]", + "Boolean To Int [Dream]", + "Build Prompt [Dream]", + "CSV Curve [Dream]", + "CSV Generator [Dream]", + "Calculation [Dream]", + "Common Frame Dimensions [Dream]", + "Compare Palettes [Dream]", + "FFMPEG Video Encoder [Dream]", + "File Count [Dream]", + "Finalize Prompt [Dream]", + "Float Input [Dream]", + "Float to Log Entry [Dream]", + "Frame Count Calculator [Dream]", + "Frame Counter (Directory) [Dream]", + "Frame Counter (Simple) [Dream]", + "Frame Counter Info [Dream]", + "Frame Counter Offset [Dream]", + "Frame Counter Time Offset [Dream]", + "Image Brightness Adjustment [Dream]", + "Image Color Shift [Dream]", + "Image Contrast Adjustment [Dream]", + "Image Motion [Dream]", + "Image Sequence Blend [Dream]", + "Image Sequence Loader [Dream]", + "Image Sequence Saver [Dream]", + "Image Sequence Tweening [Dream]", + "Int Input [Dream]", + "Int to Log Entry [Dream]", + "Laboratory [Dream]", + "Linear Curve [Dream]", + "Log Entry Joiner [Dream]", + "Log File [Dream]", + "Noise from Area Palettes [Dream]", + "Noise from Palette [Dream]", + "Palette Color Align [Dream]", + "Palette Color Shift [Dream]", + "Sample Image Area as Palette [Dream]", + "Sample Image as Palette [Dream]", + "Saw Curve [Dream]", + "Sine Curve [Dream]", + "Smooth Event Curve [Dream]", + "String Input [Dream]", + "String Tokenizer [Dream]", + "String to Log Entry [Dream]", + "Text Input [Dream]", + "Triangle Curve [Dream]", + "Triangle Event Curve [Dream]", + "WAV Curve [Dream]" + ], + { + "title_aux": "Dream Project Animation Nodes" + } + ], + "https://github.com/alt-key-project/comfyui-dream-video-batches": [ + [ + "Blended Transition [DVB]", + "Calculation [DVB]", + "Create Frame Set [DVB]", + "Divide [DVB]", + "Fade From Black [DVB]", + "Fade To Black [DVB]", + "Float Input [DVB]", + "For Each Done [DVB]", + "For Each Filename [DVB]", + "Frame Set Append [DVB]", + "Frame Set Frame Dimensions Scaled [DVB]", + "Frame Set Index Offset [DVB]", + "Frame Set Merger [DVB]", + "Frame Set Reindex [DVB]", + "Frame Set Repeat [DVB]", + "Frame Set Reverse [DVB]", + "Frame Set Split Beginning [DVB]", + "Frame Set Split End [DVB]", + "Frame Set Splitter [DVB]", + "Generate Inbetween Frames [DVB]", + "Int Input [DVB]", + "Linear Camera Pan [DVB]", + "Linear Camera Roll [DVB]", + "Linear Camera Zoom [DVB]", + "Load Image From Path [DVB]", + "Multiply [DVB]", + "Sine Camera Pan [DVB]", + "Sine Camera Roll [DVB]", + "Sine Camera Zoom [DVB]", + "String Input [DVB]", + "Text Input [DVB]", + "Trace Memory Allocation [DVB]", + "Unwrap Frame Set [DVB]" + ], + { + "title_aux": "Dream Video Batches" + } + ], + "https://github.com/andersxa/comfyui-PromptAttention": [ + [ + "CLIPAttentionMaskEncode" + ], + { + "title_aux": "CLIP Directional Prompt Attention" + } + ], + "https://github.com/asagi4/ComfyUI-CADS": [ + [ + "CADS" + ], + { + "title_aux": "ComfyUI-CADS" + } + ], + "https://github.com/asagi4/comfyui-prompt-control": [ + [ + "EditableCLIPEncode", + "FilterSchedule", + "LoRAScheduler", + "PCSplitSampling", + "PromptControlSimple", + "PromptToSchedule", + "ScheduleToCond", + "ScheduleToModel" + ], + { + "title_aux": "ComfyUI prompt control" + } + ], + "https://github.com/asagi4/comfyui-utility-nodes": [ + [ + "MUJinjaRender", + "MUSimpleWildcard" + ], + { + "title_aux": "asagi4/comfyui-utility-nodes" + } + ], + "https://github.com/aszc-dev/ComfyUI-CoreMLSuite": [ + [ + "Core ML Converter", + "Core ML LCM Converter", + "Core ML LoRA Loader", + "CoreMLModelAdapter", + "CoreMLSampler", + "CoreMLSamplerAdvanced", + "CoreMLUNetLoader" + ], + { + "title_aux": "Core ML Suite for ComfyUI" + } + ], + "https://github.com/avatechai/avatar-graph-comfyui": [ + [ + "ApplyMeshTransformAsShapeKey", + "B_ENUM", + "B_VECTOR3", + "B_VECTOR4", + "CreateShapeFlow", + "ExportBlendshapes", + "ExportGLTF", + "Image Alpha Mask Merge", + "ImageBridge", + "LoadImageWithAlpha", + "SAM MultiLayer", + "Save Image With Workflow" + ], + { + "author": "Avatech Limited", + "description": "Include nodes for sam + bpy operation, that allows workflow creations for generative 2d character rig.", + "nickname": "Avatar Graph", + "title": "Avatar Graph", + "title_aux": "avatar-graph-comfyui" + } + ], + "https://github.com/azazeal04/ComfyUI-Styles": [ + [ + "menus" + ], + { + "title_aux": "ComfyUI-Styles" + } + ], + "https://github.com/badjeff/comfyui_lora_tag_loader": [ + [ + "LoraTagLoader" + ], + { + "title_aux": "LoRA Tag Loader for ComfyUI" + } + ], + "https://github.com/bash-j/mikey_nodes": [ + [ + "AddMetaData", + "Batch Crop Image", + "Batch Crop Resize Inplace", + "Batch Load Images", + "Batch Resize Image for SDXL", + "Checkpoint Loader Simple Mikey", + "CinematicLook", + "Empty Latent Ratio Custom SDXL", + "Empty Latent Ratio Select SDXL", + "EvalFloats", + "FileNamePrefix", + "Float to String", + "HaldCLUT", + "Image Caption", + "ImageBorder", + "ImageOverlay", + "ImagePaste", + "Int to String", + "LoraSyntaxProcessor", + "Mikey Sampler", + "Mikey Sampler Base Only", + "Mikey Sampler Base Only Advanced", + "Mikey Sampler Tiled", + "Mikey Sampler Tiled Base Only", + "MikeySamplerTiledAdvanced", + "MikeySamplerTiledAdvancedBaseOnly", + "OobaPrompt", + "PresetRatioSelector", + "Prompt With SDXL", + "Prompt With Style", + "Prompt With Style V2", + "Prompt With Style V3", + "Range Float", + "Range Integer", + "Ratio Advanced", + "Resize Image for SDXL", + "Save Image If True", + "Save Image With Prompt Data", + "Save Images Mikey", + "Save Images No Display", + "SaveMetaData", + "SearchAndReplace", + "Seed String", + "Style Conditioner", + "Style Conditioner Base Only", + "Text2InputOr3rdOption", + "TextCombinations", + "TextCombinations3", + "TextPreserve", + "Upscale Tile Calculator", + "Wildcard Processor", + "WildcardAndLoraSyntaxProcessor", + "WildcardOobaPrompt" + ], + { + "title_aux": "Mikey Nodes" + } + ], + "https://github.com/bedovyy/ComfyUI_NAIDGenerator": [ + [ + "GenerateNAID", + "ImageToNAIMask", + "Img2ImgOptionNAID", + "InpaintingOptionNAID", + "ModelOptionNAID" + ], + { + "title_aux": "ComfyUI_NAIDGenerator" + } + ], + "https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py": [ + [ + "CLIPSeg", + "CombineSegMasks" + ], + { + "title_aux": "CLIPSeg" + } + ], + "https://github.com/bmad4ever/comfyui_ab_samplercustom": [ + [ + "AB SamplerCustom (experimental)" + ], + { + "title_aux": "comfyui_ab_sampler" + } + ], + "https://github.com/bmad4ever/comfyui_bmad_nodes": [ + [ + "AdaptiveThresholding", + "Add String To Many", + "AddAlpha", + "AdjustRect", + "AnyToAny", + "BoundingRect (contours)", + "BuildColorRangeAdvanced (hsv)", + "BuildColorRangeHSV (hsv)", + "CLAHE", + "CLIPEncodeMultiple", + "CLIPEncodeMultipleAdvanced", + "ChameleonMask", + "CheckpointLoader (dirty)", + "CheckpointLoaderSimple (dirty)", + "Color (RGB)", + "Color (hexadecimal)", + "Color Clip", + "Color Clip (advanced)", + "Color Clip ADE20k", + "ColorDictionary", + "ColorDictionary (custom)", + "Conditioning (combine multiple)", + "Conditioning (combine selective)", + "Conditioning Grid (cond)", + "Conditioning Grid (string)", + "Conditioning Grid (string) Advanced", + "Contour To Mask", + "Contours", + "ControlNetHadamard", + "ControlNetHadamard (manual)", + "ConvertImg", + "CopyMakeBorder", + "CreateRequestMetadata", + "DistanceTransform", + "Draw Contour(s)", + "EqualizeHistogram", + "FadeMaskEdges", + "Filter Contour", + "FindComplementaryColor", + "FindThreshold", + "FlatLatentsIntoSingleGrid", + "Framed Mask Grab Cut", + "Framed Mask Grab Cut 2", + "FromListGet1Color", + "FromListGet1Cond", + "FromListGet1Float", + "FromListGet1Image", + "FromListGet1Int", + "FromListGet1Latent", + "FromListGet1Model", + "FromListGet1String", + "FromListGetColors", + "FromListGetConds", + "FromListGetFloats", + "FromListGetImages", + "FromListGetInts", + "FromListGetLatents", + "FromListGetModels", + "FromListGetStrings", + "Get Contour from list", + "Get Models", + "Get Prompt", + "HypernetworkLoader (dirty)", + "ImageBatchToList", + "InRange (hsv)", + "InnerCylinder (remap)", + "Inpaint", + "Input/String to Int Array", + "KMeansColor", + "Load 64 Encoded Image", + "LoraLoader (dirty)", + "MaskGrid N KSamplers Advanced", + "Merge Latent Batch Gridwise", + "MonoMerge", + "MorphologicOperation", + "MorphologicSkeletoning", + "NaiveAutoKMeansColor", + "OtsuThreshold", + "OuterCylinder (remap)", + "RGB to HSV", + "Rect Grab Cut", + "Remap", + "Repeat Into Grid (image)", + "Repeat Into Grid (latent)", + "RequestInputs", + "SampleColorHSV", + "Save Image (api)", + "SeamlessClone", + "SeamlessClone (simple)", + "SetRequestStateToComplete", + "String", + "String to Float", + "String to Integer", + "ToColorList", + "ToCondList", + "ToFloatList", + "ToImageList", + "ToIntList", + "ToLatentList", + "ToModelList", + "ToStringList", + "UnGridify (image)", + "VAEEncodeBatch" + ], + { + "title_aux": "Bmad Nodes" + } + ], + "https://github.com/bradsec/ComfyUI_ResolutionSelector": [ + [ + "ResolutionSelector" + ], + { + "title_aux": "ResolutionSelector for ComfyUI" + } + ], + "https://github.com/braintacles/braintacles-comfyui-nodes": [ + [ + "CLIPTextEncodeSDXL-Multi-IO", + "CLIPTextEncodeSDXL-Pipe", + "Empty Latent Image from Aspect-Ratio", + "Random Find and Replace", + "VAE Decode Pipe", + "VAE Decode Tiled Pipe", + "VAE Encode Pipe", + "VAE Encode Tiled Pipe" + ], + { + "title_aux": "braintacles-nodes" + } + ], + "https://github.com/bronkula/comfyui-fitsize": [ + [ + "FS: Crop Image Into Even Pieces", + "FS: Fit Image And Resize", + "FS: Fit Size From Image", + "FS: Fit Size From Int", + "FS: Image Region To Mask", + "FS: Load Image And Resize To Fit", + "FS: Pick Image From Batch", + "FS: Pick Image From Batches", + "FS: Pick Image From List" + ], + { + "title_aux": "comfyui-fitsize" + } + ], + "https://github.com/budihartono/comfyui_otonx_nodes": [ + [ + "OTX Integer Multiple Inputs 4", + "OTX Integer Multiple Inputs 5", + "OTX Integer Multiple Inputs 6", + "OTX KSampler Feeder", + "OTX Versatile Multiple Inputs 4", + "OTX Versatile Multiple Inputs 5", + "OTX Versatile Multiple Inputs 6" + ], + { + "title_aux": "Otonx's Custom Nodes" + } + ], + "https://github.com/bvhari/ComfyUI_ImageProcessing": [ + [ + "BilateralFilter", + "Brightness", + "Gamma", + "Hue", + "Saturation", + "SigmoidCorrection", + "UnsharpMask" + ], + { + "title_aux": "ImageProcessing" + } + ], + "https://github.com/bvhari/ComfyUI_LatentToRGB": [ + [ + "LatentToRGB" + ], + { + "title_aux": "LatentToRGB" + } + ], + "https://github.com/bvhari/ComfyUI_PerpNeg": [ + [ + "KSamplerAdvancedPerpNeg" + ], + { + "title_aux": "ComfyUI_PerpNeg [WIP]" + } + ], + "https://github.com/bvhari/ComfyUI_PerpWeight": [ + [ + "CLIPTextEncodePerpWeight" + ], + { + "title_aux": "ComfyUI_PerpWeight" + } + ], + "https://github.com/catscandrive/comfyui-imagesubfolders/raw/main/loadImageWithSubfolders.py": [ + [ + "LoadImagewithSubfolders" + ], + { + "title_aux": "Image loader with subfolders" + } + ], + "https://github.com/chflame163/ComfyUI_MSSpeech_TTS": [ + [ + "MicorsoftSpeech_TTS", + "Play Sound" + ], + { + "title_aux": "ComfyUI_MSSpeech_TTS" + } + ], + "https://github.com/chibiace/ComfyUI-Chibi-Nodes": [ + [ + "ConditionText", + "ConditionTextMulti", + "ImageAddText", + "ImageSimpleResize", + "ImageSizeInfo", + "ImageTool", + "Int2String", + "LoadEmbedding", + "LoadImageExtended", + "Loader", + "Prompts", + "SaveImages", + "SeedGenerator", + "SimpleSampler", + "TextSplit", + "Textbox", + "Wildcards" + ], + { + "title_aux": "ComfyUI-Chibi-Nodes" + } + ], + "https://github.com/chrisgoringe/cg-image-picker": [ + [ + "Preview Chooser", + "Preview Chooser Fabric" + ], + { + "author": "chrisgoringe", + "description": "Custom nodes that preview images and pause the workflow to allow the user to select one or more to progress", + "nickname": "Image Chooser", + "title": "Image Chooser", + "title_aux": "Image chooser" + } + ], + "https://github.com/chrisgoringe/cg-noise": [ + [ + "Hijack", + "KSampler Advanced with Variations", + "KSampler with Variations", + "UnHijack" + ], + { + "title_aux": "Variation seeds" + } + ], + "https://github.com/chrisgoringe/cg-use-everywhere": [ + [ + "Seed Everywhere" + ], + { + "nodename_pattern": "(^(Prompts|Anything) Everywhere|Simple String)", + "title_aux": "Use Everywhere (UE Nodes)" + } + ], + "https://github.com/city96/ComfyUI_ColorMod": [ + [ + "ColorModEdges", + "ColorModPivot", + "LoadImageHighPrec", + "PreviewImageHighPrec", + "SaveImageHighPrec" + ], + { + "title_aux": "ComfyUI_ColorMod" + } + ], + "https://github.com/city96/ComfyUI_DiT": [ + [ + "DiTCheckpointLoader", + "DiTCheckpointLoaderSimple", + "DiTLabelCombine", + "DiTLabelSelect", + "DiTSampler" + ], + { + "title_aux": "ComfyUI_DiT [WIP]" + } + ], + "https://github.com/city96/ComfyUI_ExtraModels": [ + [ + "DiTCondLabelEmpty", + "DiTCondLabelSelect", + "DitCheckpointLoader", + "ExtraVAELoader", + "PixArtCheckpointLoader", + "PixArtDPMSampler", + "PixArtResolutionSelect", + "PixArtT5TextEncode", + "T5TextEncode", + "T5v11Loader" + ], + { + "title_aux": "Extra Models for ComfyUI" + } + ], + "https://github.com/city96/ComfyUI_NetDist": [ + [ + "FetchRemote", + "QueueRemote" + ], + { + "title_aux": "ComfyUI_NetDist" + } + ], + "https://github.com/city96/SD-Advanced-Noise": [ + [ + "LatentGaussianNoise", + "MathEncode" + ], + { + "title_aux": "SD-Advanced-Noise" + } + ], + "https://github.com/city96/SD-Latent-Interposer": [ + [ + "LatentInterposer" + ], + { + "title_aux": "Latent-Interposer" + } + ], + "https://github.com/city96/SD-Latent-Upscaler": [ + [ + "LatentUpscaler" + ], + { + "title_aux": "SD-Latent-Upscaler" + } + ], + "https://github.com/civitai/comfy-nodes": [ + [ + "CivitAI_Checkpoint_Loader", + "CivitAI_Lora_Loader" + ], + { + "title_aux": "comfy-nodes" + } + ], + "https://github.com/comfyanonymous/ComfyUI": [ + [ + "BasicScheduler", + "CLIPLoader", + "CLIPMergeSimple", + "CLIPSave", + "CLIPSetLastLayer", + "CLIPTextEncode", + "CLIPTextEncodeSDXL", + "CLIPTextEncodeSDXLRefiner", + "CLIPVisionEncode", + "CLIPVisionLoader", + "Canny", + "CheckpointLoader", + "CheckpointLoaderSimple", + "CheckpointSave", + "ConditioningAverage", + "ConditioningCombine", + "ConditioningConcat", + "ConditioningSetArea", + "ConditioningSetAreaPercentage", + "ConditioningSetMask", + "ConditioningSetTimestepRange", + "ConditioningZeroOut", + "ControlNetApply", + "ControlNetApplyAdvanced", + "ControlNetLoader", + "CropMask", + "DiffControlNetLoader", + "DiffusersLoader", + "DualCLIPLoader", + "EmptyImage", + "EmptyLatentImage", + "ExponentialScheduler", + "FeatherMask", + "FlipSigmas", + "FreeU", + "FreeU_V2", + "GLIGENLoader", + "GLIGENTextBoxApply", + "GrowMask", + "HyperTile", + "HypernetworkLoader", + "ImageBatch", + "ImageBlend", + "ImageBlur", + "ImageColorToMask", + "ImageCompositeMasked", + "ImageCrop", + "ImageInvert", + "ImageOnlyCheckpointLoader", + "ImagePadForOutpaint", + "ImageQuantize", + "ImageScale", + "ImageScaleBy", + "ImageScaleToTotalPixels", + "ImageSharpen", + "ImageToMask", + "ImageUpscaleWithModel", + "InvertMask", + "JoinImageWithAlpha", + "KSampler", + "KSamplerAdvanced", + "KSamplerSelect", + "KarrasScheduler", + "LatentAdd", + "LatentBlend", + "LatentComposite", + "LatentCompositeMasked", + "LatentCrop", + "LatentFlip", + "LatentFromBatch", + "LatentInterpolate", + "LatentMultiply", + "LatentRotate", + "LatentSubtract", + "LatentUpscale", + "LatentUpscaleBy", + "LoadImage", + "LoadImageMask", + "LoadLatent", + "LoraLoader", + "LoraLoaderModelOnly", + "MaskComposite", + "MaskToImage", + "ModelMergeAdd", + "ModelMergeBlocks", + "ModelMergeSimple", + "ModelMergeSubtract", + "ModelSamplingContinuousEDM", + "ModelSamplingDiscrete", + "PatchModelAddDownscale", + "PolyexponentialScheduler", + "PorterDuffImageComposite", + "PreviewImage", + "RebatchLatents", + "RepeatImageBatch", + "RepeatLatentBatch", + "RescaleCFG", + "SDTurboScheduler", + "SVD_img2vid_Conditioning", + "SamplerCustom", + "SamplerDPMPP_2M_SDE", + "SamplerDPMPP_SDE", + "SaveAnimatedPNG", + "SaveAnimatedWEBP", + "SaveImage", + "SaveLatent", + "SetLatentNoiseMask", + "SolidMask", + "SplitImageWithAlpha", + "SplitSigmas", + "StyleModelApply", + "StyleModelLoader", + "TomePatchModel", + "UNETLoader", + "UpscaleModelLoader", + "VAEDecode", + "VAEDecodeTiled", + "VAEEncode", + "VAEEncodeForInpaint", + "VAEEncodeTiled", + "VAELoader", + "VAESave", + "VPScheduler", + "VideoLinearCFGGuidance", + "unCLIPCheckpointLoader", + "unCLIPConditioning" + ], + { + "title_aux": "ComfyUI" + } + ], + "https://github.com/comfyanonymous/ComfyUI_experiments": [ + [ + "ModelMergeBlockNumber", + "ModelMergeSDXL", + "ModelMergeSDXLDetailedTransformers", + "ModelMergeSDXLTransformers", + "ModelSamplerTonemapNoiseTest", + "ReferenceOnlySimple", + "RescaleClassifierFreeGuidanceTest", + "TonemapNoiseWithRescaleCFG" + ], + { + "title_aux": "ComfyUI_experiments" + } + ], + "https://github.com/coreyryanhanson/ComfyQR": [ + [ + "comfy-qr-by-image-size", + "comfy-qr-by-module-size", + "comfy-qr-by-module-split", + "comfy-qr-mask_errors" + ], + { + "title_aux": "ComfyQR" + } + ], + "https://github.com/coreyryanhanson/ComfyQR-scanning-nodes": [ + [ + "comfy-qr-read", + "comfy-qr-validate" + ], + { + "title_aux": "ComfyQR-scanning-nodes" + } + ], + "https://github.com/cubiq/ComfyUI_IPAdapter_plus": [ + [ + "IPAdapterApply", + "IPAdapterApplyEncoded", + "IPAdapterBatchEmbeds", + "IPAdapterEncoder", + "IPAdapterLoadEmbeds", + "IPAdapterModelLoader", + "IPAdapterSaveEmbeds", + "PrepImageForClipVision" + ], + { + "title_aux": "ComfyUI_IPAdapter_plus" + } + ], + "https://github.com/cubiq/ComfyUI_SimpleMath": [ + [ + "SimpleMath", + "SimpleMathDebug" + ], + { + "title_aux": "Simple Math" + } + ], + "https://github.com/cubiq/ComfyUI_essentials": [ + [ + "ConsoleDebug+", + "GetImageSize+", + "ImageCASharpening+", + "ImageCrop+", + "ImageDesaturate+", + "ImageEnhanceDifference+", + "ImageExpandBatch+", + "ImageFlip+", + "ImagePosterize+", + "ImageResize+", + "MaskBatch+", + "MaskBlur+", + "MaskExpandBatch+", + "MaskFlip+", + "MaskPreview+", + "ModelCompile+", + "SimpleMath+", + "TransitionMask+" + ], + { + "title_aux": "ComfyUI Essentials" + } + ], + "https://github.com/dagthomas/comfyui_dagthomas": [ + [ + "CSL", + "CSVPromptGenerator", + "PromptGenerator" + ], + { + "title_aux": "SDXL Auto Prompter" + } + ], + "https://github.com/dawangraoming/ComfyUI_ksampler_gpu/raw/main/ksampler_gpu.py": [ + [ + "KSamplerAdvancedGPU", + "KSamplerGPU" + ], + { + "title_aux": "KSampler GPU" + } + ], + "https://github.com/daxthin/DZ-FaceDetailer": [ + [ + "DZ_Face_Detailer" + ], + { + "title_aux": "DZ-FaceDetailer" + } + ], + "https://github.com/dimtoneff/ComfyUI-PixelArt-Detector": [ + [ + "PixelArtAddDitherPattern", + "PixelArtDetectorConverter", + "PixelArtDetectorSave", + "PixelArtDetectorToImage", + "PixelArtLoadPalettes" + ], + { + "title_aux": "ComfyUI PixelArt Detector" + } + ], + "https://github.com/diontimmer/ComfyUI-Vextra-Nodes": [ + [ + "Add Text To Image", + "Apply Instagram Filter", + "Create Solid Color", + "Flatten Colors", + "Generate Noise Image", + "GlitchThis Effect", + "Hue Rotation", + "Load Picture Index", + "Pixel Sort", + "Play Sound At Execution", + "Prettify Prompt Using distilgpt2", + "Swap Color Mode" + ], + { + "title_aux": "ComfyUI-Vextra-Nodes" + } + ], + "https://github.com/drago87/ComfyUI_Dragos_Nodes": [ + [ + "file_padding", + "image_info", + "lora_loader", + "vae_loader" + ], + { + "title_aux": "ComfyUI_Dragos_Nodes" + } + ], + "https://github.com/drustan-hawk/primitive-types": [ + [ + "float", + "int", + "string", + "string_multiline" + ], + { + "title_aux": "primitive-types" + } + ], + "https://github.com/ealkanat/comfyui_easy_padding": [ + [ + "comfyui-easy-padding" + ], + { + "title_aux": "ComfyUI Easy Padding" + } + ], + "https://github.com/evanspearman/ComfyMath": [ + [ + "CM_BoolBinaryOperation", + "CM_BoolToInt", + "CM_BoolUnaryOperation", + "CM_BreakoutVec2", + "CM_BreakoutVec3", + "CM_BreakoutVec4", + "CM_ComposeVec2", + "CM_ComposeVec3", + "CM_ComposeVec4", + "CM_FloatBinaryCondition", + "CM_FloatBinaryOperation", + "CM_FloatToInt", + "CM_FloatToNumber", + "CM_FloatUnaryCondition", + "CM_FloatUnaryOperation", + "CM_IntBinaryCondition", + "CM_IntBinaryOperation", + "CM_IntToBool", + "CM_IntToFloat", + "CM_IntToNumber", + "CM_IntUnaryCondition", + "CM_IntUnaryOperation", + "CM_NearestSDXLResolution", + "CM_NumberBinaryCondition", + "CM_NumberBinaryOperation", + "CM_NumberToFloat", + "CM_NumberToInt", + "CM_NumberUnaryCondition", + "CM_NumberUnaryOperation", + "CM_SDXLResolution", + "CM_Vec2BinaryCondition", + "CM_Vec2BinaryOperation", + "CM_Vec2ScalarOperation", + "CM_Vec2ToScalarBinaryOperation", + "CM_Vec2ToScalarUnaryOperation", + "CM_Vec2UnaryCondition", + "CM_Vec2UnaryOperation", + "CM_Vec3BinaryCondition", + "CM_Vec3BinaryOperation", + "CM_Vec3ScalarOperation", + "CM_Vec3ToScalarBinaryOperation", + "CM_Vec3ToScalarUnaryOperation", + "CM_Vec3UnaryCondition", + "CM_Vec3UnaryOperation", + "CM_Vec4BinaryCondition", + "CM_Vec4BinaryOperation", + "CM_Vec4ScalarOperation", + "CM_Vec4ToScalarBinaryOperation", + "CM_Vec4ToScalarUnaryOperation", + "CM_Vec4UnaryCondition", + "CM_Vec4UnaryOperation" + ], + { + "title_aux": "ComfyMath" + } + ], + "https://github.com/fearnworks/ComfyUI_FearnworksNodes/raw/main/fw_nodes.py": [ + [ + "Count Files in Directory (FW)", + "Count Tokens (FW)", + "Token Count Ranker(FW)", + "Trim To Tokens (FW)" + ], + { + "title_aux": "Fearnworks Custom Nodes" + } + ], + "https://github.com/fexli/fexli-util-node-comfyui": [ + [ + "FEColor2Image", + "FEColorOut", + "FEImagePadForOutpaint", + "FERandomizedColor2Image" + ], + { + "title_aux": "fexli-util-node-comfyui" + } + ], + "https://github.com/filipemeneses/comfy_pixelization": [ + [ + "Pixelization" + ], + { + "title_aux": "Pixelization" + } + ], + "https://github.com/filliptm/ComfyUI_Fill-Nodes": [ + [ + "FL_ImageRandomizer" + ], + { + "title_aux": "ComfyUI_Fill-Nodes" + } + ], + "https://github.com/fitCorder/fcSuite/raw/main/fcSuite.py": [ + [ + "fcFloat", + "fcFloatMatic", + "fcInteger" + ], + { + "title_aux": "fcSuite" + } + ], + "https://github.com/flyingshutter/As_ComfyUI_CustomNodes": [ + [ + "BatchIndex_AS", + "CropImage_AS", + "ImageMixMasked_As", + "ImageToMask_AS", + "Increment_AS", + "Int2Any_AS", + "LatentAdd_AS", + "LatentMixMasked_As", + "LatentMix_AS", + "LatentToImages_AS", + "LoadLatent_AS", + "MapRange_AS", + "MaskToImage_AS", + "Math_AS", + "NoiseImage_AS", + "Number2Float_AS", + "Number2Int_AS", + "Number_AS", + "SaveLatent_AS", + "TextToImage_AS", + "TextWildcardList_AS" + ], + { + "title_aux": "As_ComfyUI_CustomNodes" + } + ], + "https://github.com/gemell1/ComfyUI_GMIC": [ + [ + "GmicCliWrapper" + ], + { + "title_aux": "ComfyUI_GMIC" + } + ], + "https://github.com/giriss/comfy-image-saver": [ + [ + "Cfg Literal", + "Checkpoint Selector", + "Int Literal", + "Sampler Selector", + "Save Image w/Metadata", + "Scheduler Selector", + "Seed Generator", + "String Literal", + "Width/Height Literal" + ], + { + "title_aux": "Save Image with Generation Metadata" + } + ], + "https://github.com/guoyk93/yk-node-suite-comfyui": [ + [ + "YKImagePadForOutpaint", + "YKMaskToImage" + ], + { + "title_aux": "y.k.'s ComfyUI node suite" + } + ], + "https://github.com/hhhzzyang/Comfyui_Lama": [ + [ + "LamaApply", + "LamaModelLoader", + "YamlConfigLoader" + ], + { + "title_aux": "Comfyui-Lama" + } + ], + "https://github.com/hnmr293/ComfyUI-nodes-hnmr": [ + [ + "CLIPIter", + "Dict2Model", + "GridImage", + "ImageBlend2", + "KSamplerOverrided", + "KSamplerSetting", + "KSamplerXYZ", + "LatentToHist", + "LatentToImage", + "ModelIter", + "RandomLatentImage", + "SaveStateDict", + "SaveText", + "StateDictLoader", + "StateDictMerger", + "StateDictMergerBlockWeighted", + "StateDictMergerBlockWeightedMulti", + "VAEDecodeBatched", + "VAEEncodeBatched", + "VAEIter" + ], + { + "title_aux": "ComfyUI-nodes-hnmr" + } + ], + "https://github.com/hustille/ComfyUI_Fooocus_KSampler": [ + [ + "KSampler With Refiner (Fooocus)" + ], + { + "title_aux": "ComfyUI_Fooocus_KSampler" + } + ], + "https://github.com/hustille/ComfyUI_hus_utils": [ + [ + "3way Prompt Styler", + "Batch State", + "Date Time Format", + "Debug Extra", + "Fetch widget value", + "Text Hash" + ], + { + "title_aux": "hus' utils for ComfyUI" + } + ], + "https://github.com/hylarucoder/ComfyUI-Eagle-PNGInfo": [ + [ + "EagleImageNode", + "SDXLPromptStyler", + "SDXLPromptStylerAdvanced", + "SDXLResolutionPresets" + ], + { + "title_aux": "Eagle PNGInfo" + } + ], + "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words": [ + [ + "FusionText", + "LoraLoaderAdvanced", + "LoraLoaderStackedAdvanced", + "LoraLoaderStackedVanilla", + "LoraLoaderVanilla", + "Randomizer", + "TagsFormater", + "TagsSelector", + "TextInputBasic" + ], + { + "title_aux": "ComfyUI-Lora-Auto-Trigger-Words" + } + ], + "https://github.com/imb101/ComfyUI-FaceSwap": [ + [ + "FaceSwapNode" + ], + { + "title_aux": "FaceSwap" + } + ], + "https://github.com/jags111/ComfyUI_Jags_Audiotools": [ + [ + "BatchJoinAudio", + "BatchToList", + "BitCrushAudioFX", + "BulkVariation", + "ChorusAudioFX", + "ClippingAudioFX", + "CompressorAudioFX", + "ConcatAudioList", + "ConvolutionAudioFX", + "CutAudio", + "DelayAudioFX", + "DistortionAudioFX", + "DuplicateAudio", + "GainAudioFX", + "GenerateAudioSample", + "GenerateAudioWave", + "GetAudioFromFolderIndex", + "GetSingle", + "GetStringByIndex", + "HighShelfFilter", + "HighpassFilter", + "ImageToSpectral", + "InvertAudioFX", + "JoinAudio", + "LadderFilter", + "LimiterAudioFX", + "ListToBatch", + "LoadAudioDir", + "LoadAudioFile", + "LoadAudioModel (DD)", + "LoadVST3", + "LowShelfFilter", + "LowpassFilter", + "MP3CompressorAudioFX", + "MixAudioTensors", + "NoiseGateAudioFX", + "OTTAudioFX", + "PeakFilter", + "PhaserEffectAudioFX", + "PitchShiftAudioFX", + "PlotSpectrogram", + "PreviewAudioFile", + "PreviewAudioTensor", + "ResampleAudio", + "ReverbAudioFX", + "ReverseAudio", + "SaveAudioTensor", + "SequenceVariation", + "SliceAudio", + "StretchAudio" + ], + { + "author": "jags111", + "description": "This extension offers various audio generation tools", + "nickname": "Audiotools", + "title": "Jags_Audiotools", + "title_aux": "ComfyUI_Jags_Audiotools" + } + ], + "https://github.com/jags111/ComfyUI_Jags_VectorMagic": [ + [ + "CircularVAEDecode", + "SVG", + "YoloSEGdetectionNode", + "YoloSegNode", + "color_drop", + "my unique name", + "xy_Tiling_KSampler" + ], + { + "author": "jags111", + "description": "This extension offers various vector manipulation and generation tools", + "nickname": "Jags_VectorMagic", + "title": "Jags_VectorMagic", + "title_aux": "ComfyUI_Jags_VectorMagic" + } + ], + "https://github.com/jags111/efficiency-nodes-comfyui": [ + [ + "AnimateDiff Script", + "Apply ControlNet Stack", + "Control Net Stacker", + "Eff. Loader SDXL", + "Efficient Loader", + "HighRes-Fix Script", + "Image Overlay", + "Join XY Inputs of Same Type", + "KSampler (Efficient)", + "KSampler Adv. (Efficient)", + "KSampler SDXL (Eff.)", + "LatentUpscaler", + "LoRA Stacker", + "Manual XY Entry Info", + "NNLatentUpscale", + "Noise Control Script", + "Pack SDXL Tuple", + "Tiled Upscaler Script", + "Unpack SDXL Tuple", + "XY Input: Add/Return Noise", + "XY Input: Aesthetic Score", + "XY Input: CFG Scale", + "XY Input: Checkpoint", + "XY Input: Clip Skip", + "XY Input: Control Net", + "XY Input: Control Net Plot", + "XY Input: Denoise", + "XY Input: LoRA", + "XY Input: LoRA Plot", + "XY Input: LoRA Stacks", + "XY Input: Manual XY Entry", + "XY Input: Prompt S/R", + "XY Input: Refiner On/Off", + "XY Input: Sampler/Scheduler", + "XY Input: Seeds++ Batch", + "XY Input: Steps", + "XY Input: VAE", + "XY Plot" + ], + { + "title_aux": "Efficiency Nodes for ComfyUI Version 2.0+" + } + ], + "https://github.com/jamesWalker55/comfyui-various": [ + [], + { + "nodename_pattern": "^JW", + "title_aux": "Various ComfyUI Nodes by Type" + } + ], + "https://github.com/jjkramhoeft/ComfyUI-Jjk-Nodes": [ + [ + "JjkConcat", + "JjkShowText", + "JjkText", + "SDXLRecommendedImageSize" + ], + { + "title_aux": "ComfyUI-Jjk-Nodes" + } + ], + "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative": [ + [ + "LCMScheduler", + "SamplerLCMAlternative", + "SamplerLCMCycle" + ], + { + "title_aux": "ComfyUI-sampler-lcm-alternative" + } + ], + "https://github.com/jtrue/ComfyUI-JaRue": [ + [ + "ConcatStringWithDelimiter_jru", + "ConcatString_jru", + "Float2Int_jru", + "Float2String_jru", + "ImageSizer_jru", + "Int2FloatMultiply_jru", + "Int2String_jru", + "String2Int_jru", + "YouTube2Prompt_jru" + ], + { + "title_aux": "ComfyUI-JaRue" + } + ], + "https://github.com/ka-puna/comfyui-yanc": [ + [ + "YANC.ConcatStrings", + "YANC.FormatDatetimeString", + "YANC.GetWidgetValueString", + "YANC.IntegerCaster", + "YANC.MultilineString", + "YANC.TruncateString" + ], + { + "title_aux": "comfyui-yanc" + } + ], + "https://github.com/kenjiqq/qq-nodes-comfyui": [ + [ + "Any List", + "Axis To Float", + "Axis To Int", + "Axis To Model", + "Axis To String", + "Image Accumulator End", + "Image Accumulator Start", + "Load Lines From Text File", + "Slice List", + "XY Grid Helper" + ], + { + "title_aux": "qq-nodes-comfyui" + } + ], + "https://github.com/kijai/ComfyUI-KJNodes": [ + [ + "AddLabel", + "BatchCLIPSeg", + "BatchCropFromMask", + "BatchCropFromMaskAdvanced", + "BatchUncrop", + "BatchUncropAdvanced", + "BboxToInt", + "ColorMatch", + "ColorToMask", + "ConditioningMultiCombine", + "ConditioningSetMaskAndCombine", + "ConditioningSetMaskAndCombine3", + "ConditioningSetMaskAndCombine4", + "ConditioningSetMaskAndCombine5", + "CreateAudioMask", + "CreateFadeMask", + "CreateFadeMaskAdvanced", + "CreateFluidMask", + "CreateGradientMask", + "CreateMagicMask", + "CreateShapeMask", + "CreateTextMask", + "CreateVoronoiMask", + "CrossFadeImages", + "DummyLatentOut", + "EmptyLatentImagePresets", + "FlipSigmasAdjusted", + "FloatConstant", + "GetImageRangeFromBatch", + "GrowMaskWithBlur", + "INTConstant", + "ImageBatchTestPattern", + "ImageConcanate", + "ImageGrabPIL", + "ImageGridComposite2x2", + "ImageGridComposite3x3", + "InjectNoiseToLatent", + "NormalizeLatent", + "OffsetMask", + "ReplaceImagesInBatch", + "ResizeMask", + "ReverseImageBatch", + "RoundMask", + "SaveImageWithAlpha", + "SomethingToString", + "SplitBboxes", + "VRAM_Debug", + "WidgetToString" + ], + { + "title_aux": "KJNodes for ComfyUI" + } + ], + "https://github.com/kijai/ComfyUI-SVD": [ + [ + "SVDimg2vid" + ], + { + "title_aux": "ComfyUI-SVD" + } + ], + "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink": [ + [ + "GradientPatchModelAddDownscale", + "GradientPatchModelAddDownscaleAdvanced" + ], + { + "title_aux": "ComfyUI_GradientDeepShrink" + } + ], + "https://github.com/kinfolk0117/ComfyUI_SimpleTiles": [ + [ + "TileCalc", + "TileMerge", + "TileSplit" + ], + { + "title_aux": "SimpleTiles" + } + ], + "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter": [ + [ + "TiledIPAdapter" + ], + { + "title_aux": "TiledIPAdapter" + } + ], + "https://github.com/knuknX/ComfyUI-Image-Tools": [ + [ + "ImageBatchSqueezeProcessor", + "ImageBgRemoveProcessor", + "ImageStandardResizeProcessor", + "SingleImagePathLoader", + "SingleImageUrlLoader" + ], + { + "title_aux": "ComfyUI-Image-Tools" + } + ], + "https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI": [ + [ + "LLLiteLoader" + ], + { + "title_aux": "ControlNet-LLLite-ComfyUI" + } + ], + "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes": [ + [ + "S3 Bucket LoRA", + "S3Bucket_Load_LoRA", + "XL DreamBooth LoRA", + "XLDB_LoRA" + ], + { + "title_aux": "ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes" + } + ], + "https://github.com/kwaroran/abg-comfyui": [ + [ + "Remove Image Background (abg)" + ], + { + "title_aux": "abg-comfyui" + } + ], + "https://github.com/laksjdjf/IPAdapter-ComfyUI": [ + [ + "IPAdapter", + "ImageCrop" + ], + { + "title_aux": "IPAdapter-ComfyUI" + } + ], + "https://github.com/laksjdjf/LCMSampler-ComfyUI": [ + [ + "SamplerLCM", + "TAESDLoader" + ], + { + "title_aux": "LCMSampler-ComfyUI" + } + ], + "https://github.com/laksjdjf/LoRA-Merger-ComfyUI": [ + [ + "LoraLoaderFromWeight", + "LoraLoaderWeightOnly", + "LoraMerge", + "LoraSave" + ], + { + "title_aux": "LoRA-Merger-ComfyUI" + } + ], + "https://github.com/laksjdjf/attention-couple-ComfyUI": [ + [ + "Attention couple" + ], + { + "title_aux": "attention-couple-ComfyUI" + } + ], + "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI": [ + [ + "CDTuner", + "Negapip", + "Negpip" + ], + { + "title_aux": "cd-tuner_negpip-ComfyUI" + } + ], + "https://github.com/laksjdjf/pfg-ComfyUI": [ + [ + "PFG" + ], + { + "title_aux": "pfg-ComfyUI" + } + ], + "https://github.com/lilly1987/ComfyUI_node_Lilly": [ + [ + "CheckpointLoaderSimpleText", + "LoraLoaderText", + "LoraLoaderTextRandom", + "Random_Sampler", + "VAELoaderDecode" + ], + { + "title_aux": "simple wildcard for ComfyUI" + } + ], + "https://github.com/lordgasmic/ComfyUI-Wildcards/raw/master/wildcards.py": [ + [ + "CLIPTextEncodeWithWildcards" + ], + { + "title_aux": "Wildcards" + } + ], + "https://github.com/lrzjason/ComfyUIJasonNode/raw/main/SDXLMixSampler.py": [ + [ + "SDXLMixSampler" + ], + { + "title_aux": "ComfyUIJasonNode" + } + ], + "https://github.com/ltdrdata/ComfyUI-Impact-Pack": [ + [ + "AddMask", + "BasicPipeToDetailerPipe", + "BasicPipeToDetailerPipeSDXL", + "BboxDetectorCombined", + "BboxDetectorCombined_v2", + "BboxDetectorForEach", + "BboxDetectorSEGS", + "BitwiseAndMask", + "BitwiseAndMaskForEach", + "CLIPSegDetectorProvider", + "CfgScheduleHookProvider", + "CombineRegionalPrompts", + "CoreMLDetailerHookProvider", + "DenoiseScheduleHookProvider", + "DetailerForEach", + "DetailerForEachDebug", + "DetailerForEachDebugPipe", + "DetailerForEachPipe", + "DetailerPipeToBasicPipe", + "EditBasicPipe", + "EditDetailerPipe", + "EditDetailerPipeSDXL", + "EmptySegs", + "FaceDetailer", + "FaceDetailerPipe", + "FromBasicPipe", + "FromBasicPipe_v2", + "FromDetailerPipe", + "FromDetailerPipeSDXL", + "FromDetailerPipe_v2", + "ImageListToImageBatch", + "ImageMaskSwitch", + "ImageReceiver", + "ImageSender", + "ImpactAssembleSEGS", + "ImpactCombineConditionings", + "ImpactCompare", + "ImpactConditionalBranch", + "ImpactConditionalStopIteration", + "ImpactControlBridge", + "ImpactControlNetApplySEGS", + "ImpactDecomposeSEGS", + "ImpactDilateMask", + "ImpactDilate_Mask_SEG_ELT", + "ImpactDummyInput", + "ImpactEdit_SEG_ELT", + "ImpactFloat", + "ImpactFrom_SEG_ELT", + "ImpactHFTransformersClassifierProvider", + "ImpactImageBatchToImageList", + "ImpactImageInfo", + "ImpactInt", + "ImpactInversedSwitch", + "ImpactIsNotEmptySEGS", + "ImpactKSamplerAdvancedBasicPipe", + "ImpactKSamplerBasicPipe", + "ImpactLogger", + "ImpactMakeImageBatch", + "ImpactMakeImageList", + "ImpactMinMax", + "ImpactNeg", + "ImpactNodeSetMuteState", + "ImpactQueueTrigger", + "ImpactQueueTriggerCountdown", + "ImpactSEGSClassify", + "ImpactSEGSConcat", + "ImpactSEGSLabelFilter", + "ImpactSEGSOrderedFilter", + "ImpactSEGSPicker", + "ImpactSEGSRangeFilter", + "ImpactSEGSToMaskBatch", + "ImpactSEGSToMaskList", + "ImpactScaleBy_BBOX_SEG_ELT", + "ImpactSegsAndMask", + "ImpactSegsAndMaskForEach", + "ImpactSetWidgetValue", + "ImpactSimpleDetectorSEGS", + "ImpactSimpleDetectorSEGSPipe", + "ImpactSimpleDetectorSEGS_for_AD", + "ImpactSleep", + "ImpactStringSelector", + "ImpactSwitch", + "ImpactValueReceiver", + "ImpactValueSender", + "ImpactWildcardEncode", + "ImpactWildcardProcessor", + "IterativeImageUpscale", + "IterativeLatentUpscale", + "KSamplerAdvancedProvider", + "KSamplerProvider", + "LatentPixelScale", + "LatentReceiver", + "LatentSender", + "LatentSwitch", + "MMDetDetectorProvider", + "MMDetLoader", + "MaskDetailerPipe", + "MaskListToMaskBatch", + "MaskPainter", + "MaskToSEGS", + "MaskToSEGS_for_AnimateDiff", + "MasksToMaskList", + "MediaPipeFaceMeshToSEGS", + "NoiseInjectionDetailerHookProvider", + "NoiseInjectionHookProvider", + "ONNXDetectorProvider", + "ONNXDetectorSEGS", + "PixelKSampleHookCombine", + "PixelKSampleUpscalerProvider", + "PixelKSampleUpscalerProviderPipe", + "PixelTiledKSampleUpscalerProvider", + "PixelTiledKSampleUpscalerProviderPipe", + "PreviewBridge", + "ReencodeLatent", + "ReencodeLatentPipe", + "RegionalPrompt", + "RegionalSampler", + "RegionalSamplerAdvanced", + "RemoveNoiseMask", + "SAMDetectorCombined", + "SAMDetectorSegmented", + "SAMLoader", + "SEGSDetailer", + "SEGSDetailerForAnimateDiff", + "SEGSPaste", + "SEGSPreview", + "SEGSSwitch", + "SEGSToImageList", + "SegmDetectorCombined", + "SegmDetectorCombined_v2", + "SegmDetectorForEach", + "SegmDetectorSEGS", + "Segs Mask", + "Segs Mask ForEach", + "SegsMaskCombine", + "SegsToCombinedMask", + "SubtractMask", + "SubtractMaskForEach", + "TiledKSamplerProvider", + "ToBasicPipe", + "ToBinaryMask", + "ToDetailerPipe", + "ToDetailerPipeSDXL", + "TwoAdvancedSamplersForMask", + "TwoSamplersForMask", + "TwoSamplersForMaskUpscalerProvider", + "TwoSamplersForMaskUpscalerProviderPipe", + "UltralyticsDetectorProvider" + ], + { + "author": "Dr.Lt.Data", + "description": "This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler.", + "nickname": "Impact Pack", + "title": "Impact Pack", + "title_aux": "ComfyUI Impact Pack" + } + ], + "https://github.com/ltdrdata/ComfyUI-Inspire-Pack": [ + [ + "AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire", + "ApplyRegionalIPAdapters //Inspire", + "BindImageListPromptList //Inspire", + "CacheBackendData //Inspire", + "CacheBackendDataList //Inspire", + "CacheBackendDataNumberKey //Inspire", + "CacheBackendDataNumberKeyList //Inspire", + "Canny_Preprocessor_Provider_for_SEGS //Inspire", + "ChangeImageBatchSize //Inspire", + "Color_Preprocessor_Provider_for_SEGS //Inspire", + "DWPreprocessor_Provider_for_SEGS //Inspire", + "FakeScribblePreprocessor_Provider_for_SEGS //Inspire", + "FloatRange //Inspire", + "FromIPAdapterPipe //Inspire", + "GlobalSeed //Inspire", + "HEDPreprocessor_Provider_for_SEGS //Inspire", + "InpaintPreprocessor_Provider_for_SEGS //Inspire", + "KSampler //Inspire", + "KSamplerAdvanced //Inspire", + "KSamplerAdvancedProgress //Inspire", + "KSamplerProgress //Inspire", + "LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire", + "LineArt_Preprocessor_Provider_for_SEGS //Inspire", + "ListCounter //Inspire", + "LoadImage //Inspire", + "LoadImageListFromDir //Inspire", + "LoadImagesFromDir //Inspire", + "LoadPromptsFromDir //Inspire", + "LoadPromptsFromFile //Inspire", + "LoraBlockInfo //Inspire", + "LoraLoaderBlockWeight //Inspire", + "Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire", + "MediaPipeFaceMeshDetectorProvider //Inspire", + "MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire", + "MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire", + "OpenPose_Preprocessor_Provider_for_SEGS //Inspire", + "PromptBuilder //Inspire", + "PromptExtractor //Inspire", + "RegionalConditioningColorMask //Inspire", + "RegionalConditioningSimple //Inspire", + "RegionalIPAdapterColorMask //Inspire", + "RegionalIPAdapterEncodedColorMask //Inspire", + "RegionalIPAdapterEncodedMask //Inspire", + "RegionalIPAdapterMask //Inspire", + "RegionalPromptColorMask //Inspire", + "RegionalPromptSimple //Inspire", + "RegionalSeedExplorerColorMask //Inspire", + "RegionalSeedExplorerMask //Inspire", + "RemoveBackendData //Inspire", + "RemoveBackendDataNumberKey //Inspire", + "RetrieveBackendData //Inspire", + "RetrieveBackendDataNumberKey //Inspire", + "SeedExplorer //Inspire", + "ShowCachedInfo //Inspire", + "TilePreprocessor_Provider_for_SEGS //Inspire", + "ToIPAdapterPipe //Inspire", + "UnzipPrompt //Inspire", + "WildcardEncode //Inspire", + "XY Input: Lora Block Weight //Inspire", + "ZipPrompt //Inspire", + "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire" + ], + { + "author": "Dr.Lt.Data", + "description": "This extension provides various nodes to support Lora Block Weight and the Impact Pack.", + "nickname": "Inspire Pack", + "nodename_pattern": "Inspire$", + "title": "Inspire Pack", + "title_aux": "ComfyUI Inspire Pack" + } + ], + "https://github.com/m-sokes/ComfyUI-Sokes-Nodes": [ + [ + "Custom Date Format | sokes \ud83e\uddac", + "Latent Switch x9 | sokes \ud83e\uddac" + ], + { + "title_aux": "ComfyUI Sokes Nodes" + } + ], + "https://github.com/m957ymj75urz/ComfyUI-Custom-Nodes/raw/main/clip-text-encode-split/clip_text_encode_split.py": [ + [ + "RawText", + "RawTextCombine", + "RawTextEncode", + "RawTextReplace" + ], + { + "title_aux": "m957ymj75urz/ComfyUI-Custom-Nodes" + } + ], + "https://github.com/marhensa/sdxl-recommended-res-calc": [ + [ + "RecommendedResCalc" + ], + { + "title_aux": "Recommended Resolution Calculator" + } + ], + "https://github.com/martijnat/comfyui-previewlatent": [ + [ + "PreviewLatent", + "PreviewLatentAdvanced" + ], + { + "title_aux": "comfyui-previewlatent" + } + ], + "https://github.com/matan1905/ComfyUI-Serving-Toolkit": [ + [ + "DiscordServing", + "ServingInputNumber", + "ServingInputText", + "ServingOutput", + "WebSocketServing" + ], + { + "title_aux": "ComfyUI Serving toolkit" + } + ], + "https://github.com/mav-rik/facerestore_cf": [ + [ + "CropFace", + "FaceRestoreCFWithModel", + "FaceRestoreModelLoader" + ], + { + "title_aux": "Facerestore CF (Code Former)" + } + ], + "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding": [ + [ + "DynamicThresholdingFull", + "DynamicThresholdingSimple" + ], + { + "title_aux": "Stable Diffusion Dynamic Thresholding (CFG Scale Fix)" + } + ], + "https://github.com/meap158/ComfyUI-GPU-temperature-protection": [ + [ + "GPUTemperatureProtection" + ], + { + "title_aux": "GPU temperature protection" + } + ], + "https://github.com/meap158/ComfyUI-Prompt-Expansion": [ + [ + "PromptExpansion" + ], + { + "title_aux": "ComfyUI-Prompt-Expansion" + } + ], + "https://github.com/melMass/comfy_mtb": [ + [ + "Animation Builder (mtb)", + "Any To String (mtb)", + "Batch Float (mtb)", + "Batch Float Assemble (mtb)", + "Batch Float Fill (mtb)", + "Batch Make (mtb)", + "Batch Merge (mtb)", + "Batch Shake (mtb)", + "Batch Shape (mtb)", + "Batch Transform (mtb)", + "Bbox (mtb)", + "Bbox From Mask (mtb)", + "Blur (mtb)", + "Color Correct (mtb)", + "Colored Image (mtb)", + "Concat Images (mtb)", + "Crop (mtb)", + "Debug (mtb)", + "Deep Bump (mtb)", + "Export With Ffmpeg (mtb)", + "Face Swap (mtb)", + "Film Interpolation (mtb)", + "Fit Number (mtb)", + "Float To Number (mtb)", + "Get Batch From History (mtb)", + "Image Compare (mtb)", + "Image Premultiply (mtb)", + "Image Remove Background Rembg (mtb)", + "Image Resize Factor (mtb)", + "Image Tile Offset (mtb)", + "Int To Bool (mtb)", + "Int To Number (mtb)", + "Interpolate Clip Sequential (mtb)", + "Latent Lerp (mtb)", + "Load Face Analysis Model (mtb)", + "Load Face Enhance Model (mtb)", + "Load Face Swap Model (mtb)", + "Load Film Model (mtb)", + "Load Image From Url (mtb)", + "Load Image Sequence (mtb)", + "Mask To Image (mtb)", + "Math Expression (mtb)", + "Model Patch Seamless (mtb)", + "Pick From Batch (mtb)", + "Qr Code (mtb)", + "Restore Face (mtb)", + "Save Gif (mtb)", + "Save Image Grid (mtb)", + "Save Image Sequence (mtb)", + "Save Tensors (mtb)", + "Sharpen (mtb)", + "Smart Step (mtb)", + "Stack Images (mtb)", + "String Replace (mtb)", + "Styles Loader (mtb)", + "Text To Image (mtb)", + "Transform Image (mtb)", + "Uncrop (mtb)", + "Unsplash Image (mtb)", + "Vae Decode (mtb)" + ], + { + "nodename_pattern": "\\(mtb\\)$", + "title_aux": "MTB Nodes" + } + ], + "https://github.com/mihaiiancu/ComfyUI_Inpaint": [ + [ + "InpaintMediapipe" + ], + { + "title_aux": "mihaiiancu/Inpaint" + } + ], + "https://github.com/mikkel/ComfyUI-text-overlay": [ + [ + "Image Text Overlay" + ], + { + "title_aux": "ComfyUI - Text Overlay Plugin" + } + ], + "https://github.com/mikkel/comfyui-mask-boundingbox": [ + [ + "Mask Bounding Box" + ], + { + "title_aux": "ComfyUI - Mask Bounding Box" + } + ], + "https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor": [ + [ + "LaMaPreprocessor", + "lamaPreprocessor" + ], + { + "title_aux": "LaMa Preprocessor [WIP]" + } + ], + "https://github.com/mpiquero7164/ComfyUI-SaveImgPrompt": [ + [ + "Save IMG Prompt" + ], + { + "title_aux": "SaveImgPrompt" + } + ], + "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL": [ + [ + "FastLatentToImage" + ], + { + "title_aux": "ComfyUI_FastVAEDecorder_SDXL" + } + ], + "https://github.com/natto-maki/ComfyUI-NegiTools": [ + [ + "NegiTools_CompositeImages", + "NegiTools_ImageProperties", + "NegiTools_LatentProperties", + "NegiTools_NoiseImageGenerator", + "NegiTools_OpenAiDalle3", + "NegiTools_OpenAiTranslate", + "NegiTools_SeedGenerator", + "NegiTools_StringFunction" + ], + { + "title_aux": "ComfyUI-NegiTools" + } + ], + "https://github.com/nicolai256/comfyUI_Nodes_nicolai256/raw/main/yugioh-presets.py": [ + [ + "yugioh_Presets" + ], + { + "title_aux": "comfyUI_Nodes_nicolai256" + } + ], + "https://github.com/ningxiaoxiao/comfyui-NDI": [ + [ + "NDI_LoadImage", + "NDI_SendImage" + ], + { + "title_aux": "comfyui-NDI" + } + ], + "https://github.com/noembryo/ComfyUI-noEmbryo": [ + [ + "PromptTermList1", + "PromptTermList2", + "PromptTermList3", + "PromptTermList4", + "PromptTermList5", + "PromptTermList6" + ], + { + "author": "noEmbryo", + "description": "Some useful nodes for ComfyUI", + "nickname": "noEmbryo", + "title": "noEmbryo nodes for ComfyUI", + "title_aux": "noEmbryo nodes" + } + ], + "https://github.com/noxinias/ComfyUI_NoxinNodes": [ + [ + "NoxinChime", + "NoxinPromptLoad", + "NoxinPromptSave", + "NoxinScaledResolution", + "NoxinSimpleMath", + "NoxinSplitPrompt" + ], + { + "title_aux": "ComfyUI_NoxinNodes" + } + ], + "https://github.com/ntdviet/comfyui-ext/raw/main/custom_nodes/gcLatentTunnel/gcLatentTunnel.py": [ + [ + "gcLatentTunnel" + ], + { + "title_aux": "ntdviet/comfyui-ext" + } + ], + "https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92": [ + [ + "CLIPStringEncode _O", + "Chat completion _O", + "ChatGPT Simple _O", + "ChatGPT _O", + "ChatGPT compact _O", + "Chat_Completion _O", + "Chat_Message _O", + "Chat_Message_fromString _O", + "Concat Text _O", + "ConcatRandomNSP_O", + "Debug String _O", + "Debug Text _O", + "Debug Text route _O", + "Edit_image _O", + "Equation1param _O", + "Equation2params _O", + "GetImage_(Width&Height) _O", + "GetLatent_(Width&Height) _O", + "ImageScaleFactor _O", + "ImageScaleFactorSimple _O", + "LatentUpscaleFactor _O", + "LatentUpscaleFactorSimple _O", + "LatentUpscaleMultiply", + "Note _O", + "RandomNSP _O", + "Replace Text _O", + "String _O", + "Text _O", + "Text2Image _O", + "Trim Text _O", + "VAEDecodeParallel _O", + "combine_chat_messages _O", + "compine_chat_messages _O", + "concat Strings _O", + "create image _O", + "create_image _O", + "debug Completeion _O", + "debug messages_O", + "float _O", + "floatToInt _O", + "floatToText _O", + "int _O", + "intToFloat _O", + "load_openAI _O", + "replace String _O", + "replace String advanced _O", + "saveTextToFile _O", + "seed _O", + "selectLatentFromBatch _O", + "string2Image _O", + "trim String _O", + "variation_image _O" + ], + { + "title_aux": "Quality of life Suit:V2" + } + ], + "https://github.com/ostris/ostris_nodes_comfyui": [ + [ + "LLM Pipe Loader - Ostris", + "LLM Prompt Upsampling - Ostris", + "One Seed - Ostris", + "Text Box - Ostris" + ], + { + "nodename_pattern": "- Ostris$", + "title_aux": "Ostris Nodes ComfyUI" + } + ], + "https://github.com/oyvindg/ComfyUI-TrollSuite": [ + [ + "BinaryImageMask", + "ImagePadding", + "LoadLastImage", + "RandomMask", + "TransparentImage" + ], + { + "title_aux": "ComfyUI-TrollSuite" + } + ], + "https://github.com/palant/extended-saveimage-comfyui": [ + [ + "SaveImageExtended" + ], + { + "title_aux": "Extended Save Image for ComfyUI" + } + ], + "https://github.com/palant/image-resize-comfyui": [ + [ + "ImageResize" + ], + { + "title_aux": "Image Resize for ComfyUI" + } + ], + "https://github.com/pants007/comfy-pants": [ + [ + "CLIPTextEncodeAIO", + "Image Make Square" + ], + { + "title_aux": "pants" + } + ], + "https://github.com/paulo-coronado/comfy_clip_blip_node": [ + [ + "CLIPTextEncodeBLIP", + "CLIPTextEncodeBLIP-2", + "Example" + ], + { + "title_aux": "comfy_clip_blip_node" + } + ], + "https://github.com/peteromallet/ComfyUI-Creative-Interpolation": [ + [ + "BatchCreativeInterpolation" + ], + { + "title_aux": "ComfyUI-Creative-Interpolation [Beta]" + } + ], + "https://github.com/picturesonpictures/comfy_PoP": [ + [ + "AdaptiveCannyDetector_PoP", + "AnyAspectRatio", + "ConditioningMultiplier_PoP", + "ConditioningNormalizer_PoP", + "LoadImageResizer_PoP", + "LoraStackLoader10_PoP", + "LoraStackLoader_PoP", + "VAEDecoderPoP", + "VAEEncoderPoP" + ], + { + "title_aux": "comfy_PoP" + } + ], + "https://github.com/pythongosssss/ComfyUI-Custom-Scripts": [ + [ + "CheckpointLoader|pysssss", + "ConstrainImage|pysssss", + "LoadText|pysssss", + "LoraLoader|pysssss", + "MathExpression|pysssss", + "MultiPrimitive|pysssss", + "PlaySound|pysssss", + "Repeater|pysssss", + "ReroutePrimitive|pysssss", + "SaveText|pysssss", + "ShowText|pysssss", + "StringFunction|pysssss" + ], + { + "title_aux": "pythongosssss/ComfyUI-Custom-Scripts" + } + ], + "https://github.com/pythongosssss/ComfyUI-WD14-Tagger": [ + [ + "WD14Tagger|pysssss" + ], + { + "title_aux": "ComfyUI WD 1.4 Tagger" + } + ], + "https://github.com/ramyma/A8R8_ComfyUI_nodes": [ + [ + "Base64ImageInput", + "Base64ImageOutput" + ], + { + "title_aux": "A8R8 ComfyUI Nodes" + } + ], + "https://github.com/receyuki/comfyui-prompt-reader-node": [ + [ + "SDBatchLoader", + "SDParameterGenerator", + "SDPromptMerger", + "SDPromptReader", + "SDPromptSaver", + "SDTypeConverter" + ], + { + "author": "receyuki", + "description": "ComfyUI node version of the SD Prompt Reader", + "nickname": "SD Prompt Reader", + "title": "SD Prompt Reader", + "title_aux": "comfyui-prompt-reader-node" + } + ], + "https://github.com/rgthree/rgthree-comfy": [ + [], + { + "author": "rgthree", + "description": "A bunch of nodes I created that I also find useful.", + "nickname": "rgthree", + "nodename_pattern": " \\(rgthree\\)$", + "title": "Comfy Nodes", + "title_aux": "rgthree's ComfyUi Nodes" + } + ], + "https://github.com/richinsley/Comfy-LFO": [ + [ + "LFO_Pulse", + "LFO_Sawtooth", + "LFO_Sine", + "LFO_Square", + "LFO_Triangle" + ], + { + "title_aux": "Comfy-LFO" + } + ], + "https://github.com/rklaffehn/rk-comfy-nodes": [ + [ + "RK_CivitAIAddHashes", + "RK_CivitAIMetaChecker" + ], + { + "title_aux": "rk-comfy-nodes" + } + ], + "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata": [ + [ + "SetMetadataAll", + "SetMetadataString" + ], + { + "title_aux": "ComfyUI PNG Metadata" + } + ], + "https://github.com/s1dlx/comfy_meh/raw/main/meh.py": [ + [ + "MergingExecutionHelper" + ], + { + "title_aux": "comfy_meh" + } + ], + "https://github.com/seanlynch/comfyui-optical-flow": [ + [ + "Apply optical flow", + "Compute optical flow", + "Visualize optical flow" + ], + { + "title_aux": "ComfyUI Optical Flow" + } + ], + "https://github.com/seanlynch/srl-nodes": [ + [ + "SRL Conditional Interrrupt", + "SRL Eval", + "SRL Filter Image List", + "SRL Format String" + ], + { + "title_aux": "SRL's nodes" + } + ], + "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack": [ + [ + "ImageResizeAndCropNode", + "ImageSquareAdapterNode" + ], + { + "title_aux": "ComfyUI_Nimbus-Pack" + } + ], + "https://github.com/shadowcz007/comfyui-mixlab-nodes": [ + [ + "AreaToMask", + "CLIPSeg", + "CLIPSeg_", + "CharacterInText", + "ChatGPTOpenAI", + "CombineMasks_", + "CombineSegMasks", + "EditLayer", + "EmptyLayer", + "EnhanceImage", + "FaceToMask", + "FeatheredMask", + "FloatingVideo", + "ImageCropByAlpha", + "LoadImagesFromPath", + "MergeLayers", + "NewLayer", + "RandomPrompt", + "ScreenShare", + "ShowTextForGPT", + "SmoothMask", + "SplitLongMask", + "SvgImage", + "TextImage", + "TransparentImage", + "VAEDecodeConsistencyDecoder", + "VAELoaderConsistencyDecoder" + ], + { + "title_aux": "comfyui-mixlab-nodes [WIP]" + } + ], + "https://github.com/shiimizu/ComfyUI_smZNodes": [ + [ + "smZ CLIPTextEncode", + "smZ Settings" + ], + { + "title_aux": "smZNodes" + } + ], + "https://github.com/shingo1228/ComfyUI-SDXL-EmptyLatentImage": [ + [ + "SDXL Empty Latent Image" + ], + { + "title_aux": "ComfyUI-SDXL-EmptyLatentImage" + } + ], + "https://github.com/shingo1228/ComfyUI-send-eagle-slim": [ + [ + "Send Webp Image to Eagle" + ], + { + "title_aux": "ComfyUI-send-Eagle(slim)" + } + ], + "https://github.com/shockz0rz/ComfyUI_InterpolateEverything": [ + [ + "OpenposePreprocessorInterpolate" + ], + { + "title_aux": "InterpolateEverything" + } + ], + "https://github.com/sipherxyz/comfyui-art-venture": [ + [ + "AV_CheckpointMerge", + "AV_CheckpointModelsToParametersPipe", + "AV_CheckpointSave", + "AV_ControlNetEfficientLoader", + "AV_ControlNetEfficientLoaderAdvanced", + "AV_ControlNetEfficientStacker", + "AV_ControlNetEfficientStackerSimple", + "AV_ControlNetLoader", + "AV_ControlNetPreprocessor", + "AV_LoraListLoader", + "AV_LoraListStacker", + "AV_LoraLoader", + "AV_ParametersPipeToCheckpointModels", + "AV_ParametersPipeToPrompts", + "AV_PromptsToParametersPipe", + "AV_SAMLoader", + "AV_VAELoader", + "AspectRatioSelector", + "BLIPCaption", + "BLIPLoader", + "BooleanPrimitive", + "ColorBlend", + "ColorCorrect", + "DeepDanbooruCaption", + "DependenciesEdit", + "Fooocus_KSampler", + "Fooocus_KSamplerAdvanced", + "GetBoolFromJson", + "GetFloatFromJson", + "GetIntFromJson", + "GetObjectFromJson", + "GetSAMEmbedding", + "GetTextFromJson", + "ISNetLoader", + "ISNetSegment", + "ImageAlphaComposite", + "ImageApplyChannel", + "ImageExtractChannel", + "ImageGaussianBlur", + "ImageMuxer", + "ImageRepeat", + "ImageScaleDown", + "ImageScaleDownBy", + "ImageScaleDownToSize", + "ImageScaleToMegapixels", + "LaMaInpaint", + "LoadImageAsMaskFromUrl", + "LoadImageFromUrl", + "LoadJsonFromUrl", + "MergeModels", + "NumberScaler", + "OverlayInpaintedImage", + "OverlayInpaintedLatent", + "PrepareImageAndMaskForInpaint", + "QRCodeGenerator", + "RandomFloat", + "RandomInt", + "SAMEmbeddingToImage", + "SDXLAspectRatioSelector", + "SDXLPromptStyler", + "SeedSelector", + "StringToInt", + "StringToNumber" + ], + { + "title_aux": "comfyui-art-venture" + } + ], + "https://github.com/skfoo/ComfyUI-Coziness": [ + [ + "LoraTextExtractor-b1f83aa2", + "MultiLoraLoader-70bf3d77" + ], + { + "title_aux": "ComfyUI-Coziness" + } + ], + "https://github.com/space-nuko/ComfyUI-Disco-Diffusion": [ + [ + "DiscoDiffusion_DiscoDiffusion", + "DiscoDiffusion_DiscoDiffusionExtraSettings", + "DiscoDiffusion_GuidedDiffusionLoader", + "DiscoDiffusion_OpenAICLIPLoader" + ], + { + "title_aux": "Disco Diffusion" + } + ], + "https://github.com/space-nuko/ComfyUI-OpenPose-Editor": [ + [ + "Nui.OpenPoseEditor" + ], + { + "title_aux": "OpenPose Editor" + } + ], + "https://github.com/space-nuko/nui-suite": [ + [ + "Nui.DynamicPromptsTextGen", + "Nui.FeelingLuckyTextGen", + "Nui.OutputString" + ], + { + "title_aux": "nui suite" + } + ], + "https://github.com/spacepxl/ComfyUI-HQ-Image-Save": [ + [ + "LoadLatentEXR", + "SaveEXR", + "SaveLatentEXR", + "SaveTiff" + ], + { + "title_aux": "ComfyUI-HQ-Image-Save" + } + ], + "https://github.com/spinagon/ComfyUI-seam-carving": [ + [ + "SeamCarving" + ], + { + "title_aux": "ComfyUI-seam-carving" + } + ], + "https://github.com/spinagon/ComfyUI-seamless-tiling": [ + [ + "CircularVAEDecode", + "MakeCircularVAE", + "OffsetImage", + "SeamlessTile" + ], + { + "title_aux": "Seamless tiling Node for ComfyUI" + } + ], + "https://github.com/spro/comfyui-mirror": [ + [ + "LatentMirror" + ], + { + "title_aux": "Latent Mirror node for ComfyUI" + } + ], + "https://github.com/ssitu/ComfyUI_UltimateSDUpscale": [ + [ + "UltimateSDUpscale", + "UltimateSDUpscaleNoUpscale" + ], + { + "title_aux": "UltimateSDUpscale" + } + ], + "https://github.com/ssitu/ComfyUI_fabric": [ + [ + "FABRICPatchModel", + "FABRICPatchModelAdv", + "KSamplerAdvFABRICAdv", + "KSamplerFABRIC", + "KSamplerFABRICAdv", + "LatentBatch" + ], + { + "title_aux": "ComfyUI fabric" + } + ], + "https://github.com/ssitu/ComfyUI_restart_sampling": [ + [ + "KRestartSampler", + "KRestartSamplerAdv", + "KRestartSamplerSimple" + ], + { + "title_aux": "Restart Sampling" + } + ], + "https://github.com/ssitu/ComfyUI_roop": [ + [ + "RoopImproved", + "roop" + ], + { + "title_aux": "ComfyUI roop" + } + ], + "https://github.com/storyicon/comfyui_segment_anything": [ + [ + "GroundingDinoModelLoader (segment anything)", + "GroundingDinoSAMSegment (segment anything)", + "InvertMask (segment anything)", + "SAMModelLoader (segment anything)" + ], + { + "title_aux": "segment anything" + } + ], + "https://github.com/strimmlarn/ComfyUI_Strimmlarns_aesthetic_score": [ + [ + "AesthetlcScoreSorter", + "CalculateAestheticScore", + "LoadAesteticModel", + "ScoreToNumber" + ], + { + "title_aux": "ComfyUI_Strimmlarns_aesthetic_score" + } + ], + "https://github.com/syllebra/bilbox-comfyui": [ + [ + "BilboXLut", + "BilboXPhotoPrompt", + "BilboXVignette" + ], + { + "title_aux": "BilboX's ComfyUI Custom Nodes" + } + ], + "https://github.com/sylym/comfy_vid2vid": [ + [ + "CheckpointLoaderSimpleSequence", + "DdimInversionSequence", + "KSamplerSequence", + "LoadImageMaskSequence", + "LoadImageSequence", + "LoraLoaderSequence", + "SetLatentNoiseSequence", + "TrainUnetSequence", + "VAEEncodeForInpaintSequence" + ], + { + "title_aux": "Vid2vid" + } + ], + "https://github.com/szhublox/ambw_comfyui": [ + [ + "Auto Merge Block Weighted", + "CLIPMergeSimple", + "CheckpointSave", + "ModelMergeBlocks", + "ModelMergeSimple" + ], + { + "title_aux": "Auto-MBW" + } + ], + "https://github.com/taabata/Comfy_Syrian_Falcon_Nodes/raw/main/SyrianFalconNodes.py": [ + [ + "CompositeImage", + "KSamplerAlternate", + "KSamplerPromptEdit", + "KSamplerPromptEditAndAlternate", + "LoopBack", + "QRGenerate", + "WordAsImage" + ], + { + "title_aux": "Syrian Falcon Nodes" + } + ], + "https://github.com/taabata/LCM_Inpaint-Outpaint_Comfy": [ + [ + "FreeU_LCM", + "ImageOutputToComfyNodes", + "ImageShuffle", + "LCMGenerate", + "LCMGenerate_ReferenceOnly", + "LCMGenerate_SDTurbo", + "LCMGenerate_img2img", + "LCMGenerate_img2img_IPAdapter", + "LCMGenerate_img2img_controlnet", + "LCMGenerate_inpaintv2", + "LCMGenerate_inpaintv3", + "LCMLoader", + "LCMLoader_RefInpaint", + "LCMLoader_ReferenceOnly", + "LCMLoader_SDTurbo", + "LCMLoader_controlnet", + "LCMLoader_controlnet_inpaint", + "LCMLoader_img2img", + "LCMLoraLoader_inpaint", + "LCMLora_inpaint", + "LCMT2IAdapter", + "LCM_IPAdapter", + "LCM_IPAdapter_inpaint", + "LCM_outpaint_prep", + "LoadImageNode_LCM", + "OutpaintCanvasTool", + "SaveImage_LCM", + "stitch" + ], + { + "title_aux": "LCM_Inpaint-Outpaint_Comfy" + } + ], + "https://github.com/theUpsider/ComfyUI-Logic": [ + [ + "Compare", + "DebugPrint", + "If ANY execute A else B", + "Int", + "String" + ], + { + "title_aux": "ComfyUI-Logic" + } + ], + "https://github.com/theUpsider/ComfyUI-Styles_CSV_Loader": [ + [ + "Load Styles CSV" + ], + { + "title_aux": "Styles CSV Loader Extension for ComfyUI" + } + ], + "https://github.com/thecooltechguy/ComfyUI-MagicAnimate": [ + [ + "MagicAnimate", + "MagicAnimateModelLoader" + ], + { + "title_aux": "ComfyUI-MagicAnimate" + } + ], + "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion": [ + [ + "SVDDecoder", + "SVDModelLoader", + "SVDSampler", + "SVDSimpleImg2Vid" + ], + { + "title_aux": "ComfyUI Stable Video Diffusion" + } + ], + "https://github.com/thedyze/save-image-extended-comfyui": [ + [ + "SaveImageExtended" + ], + { + "title_aux": "Save Image Extended for ComfyUI" + } + ], + "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes": [ + [ + "CaptureWebcam", + "LoadWebcamImage", + "SaveImagetoPath" + ], + { + "title_aux": "ComfyUI_toyxyz_test_nodes" + } + ], + "https://github.com/trojblue/trNodes": [ + [ + "JpgConvertNode", + "trColorCorrection", + "trLayering", + "trRouter", + "trRouterLonger" + ], + { + "title_aux": "trNodes" + } + ], + "https://github.com/tudal/Hakkun-ComfyUI-nodes/raw/main/hakkun_nodes.py": [ + [ + "Any Converter", + "Calculate Upscale", + "Image Resize To Height", + "Image Resize To Width", + "Image size to string", + "Load Random Image", + "Load Text", + "Multi Text Merge", + "Prompt Parser", + "Random Line", + "Random Line 4" + ], + { + "nodename_pattern": "\\(mtb\\)$", + "title_aux": "Hakkun-ComfyUI-nodes" + } + ], + "https://github.com/tusharbhutt/Endless-Nodes": [ + [ + "ESS Aesthetic Scoring", + "ESS Aesthetic Scoring Auto", + "ESS Combo Parameterizer", + "ESS Combo Parameterizer & Prompts", + "ESS Eight Input Random", + "ESS Eight Input Text Switch", + "ESS Float to Integer", + "ESS Float to Number", + "ESS Float to String", + "ESS Float to X", + "ESS Global Envoy", + "ESS Image Reward", + "ESS Image Reward Auto", + "ESS Image Saver with JSON", + "ESS Integer to Float", + "ESS Integer to Number", + "ESS Integer to String", + "ESS Integer to X", + "ESS Number to Float", + "ESS Number to Integer", + "ESS Number to String", + "ESS Number to X", + "ESS Parameterizer", + "ESS Parameterizer & Prompts", + "ESS Six Float Output", + "ESS Six Input Random", + "ESS Six Input Text Switch", + "ESS Six Integer IO Switch", + "ESS Six Integer IO Widget", + "ESS String to Float", + "ESS String to Integer", + "ESS String to Num", + "ESS String to X", + "\u267e\ufe0f\ud83c\udf0a\u2728 Image Saver with JSON" + ], + { + "author": "BiffMunky", + "description": "A small set of nodes I created for various numerical and text inputs. Features image saver with ability to have JSON saved to separate folder, parameter collection nodes, two aesthetic scoring models, switches for text and numbers, and conversion of string to numeric and vice versa.", + "nickname": "\u267e\ufe0f\ud83c\udf0a\u2728", + "title": "Endless \ufe0f\ud83c\udf0a\u2728 Nodes", + "title_aux": "Endless \ufe0f\ud83c\udf0a\u2728 Nodes" + } + ], + "https://github.com/twri/sdxl_prompt_styler": [ + [ + "SDXLPromptStyler", + "SDXLPromptStylerAdvanced" + ], + { + "title_aux": "SDXL Prompt Styler" + } + ], + "https://github.com/uarefans/ComfyUI-Fans": [ + [ + "Fans Prompt Styler Negative", + "Fans Prompt Styler Positive", + "Fans Styler", + "Fans Text Concatenate" + ], + { + "title_aux": "ComfyUI-Fans" + } + ], + "https://github.com/vanillacode314/SimpleWildcardsComfyUI": [ + [ + "SimpleConcat", + "SimpleWildcard" + ], + { + "author": "VanillaCode314", + "description": "A simple wildcard node for ComfyUI. Can also be used a style prompt node.", + "nickname": "Simple Wildcard", + "title": "Simple Wildcard", + "title_aux": "Simple Wildcard" + } + ], + "https://github.com/wallish77/wlsh_nodes": [ + [ + "Alternating KSampler (WLSH)", + "Build Filename String (WLSH)", + "CLIP +/- w/Text Unified (WLSH)", + "CLIP Positive-Negative (WLSH)", + "CLIP Positive-Negative XL (WLSH)", + "CLIP Positive-Negative XL w/Text (WLSH)", + "CLIP Positive-Negative w/Text (WLSH)", + "Checkpoint Loader w/Name (WLSH)", + "Empty Latent by Pixels (WLSH)", + "Empty Latent by Ratio (WLSH)", + "Empty Latent by Size (WLSH)", + "Generate Border Mask (WLSH)", + "Grayscale Image (WLSH)", + "Image Load with Metadata (WLSH)", + "Image Save with Prompt (WLSH)", + "Image Save with Prompt File (WLSH)", + "Image Save with Prompt/Info (WLSH)", + "Image Save with Prompt/Info File (WLSH)", + "Image Scale By Factor (WLSH)", + "Image Scale by Shortside (WLSH)", + "KSamplerAdvanced (WLSH)", + "Multiply Integer (WLSH)", + "Outpaint to Image (WLSH)", + "Prompt Weight (WLSH)", + "Quick Resolution Multiply (WLSH)", + "Resolutions by Ratio (WLSH)", + "SDXL Quick Empty Latent (WLSH)", + "SDXL Quick Image Scale (WLSH)", + "SDXL Resolutions (WLSH)", + "SDXL Steps (WLSH)", + "Save Positive Prompt(WLSH)", + "Save Prompt (WLSH)", + "Save Prompt/Info (WLSH)", + "Seed and Int (WLSH)", + "Seed to Number (WLSH)", + "Simple Pattern Replace (WLSH)", + "Simple String Combine (WLSH)", + "Time String (WLSH)", + "Upscale by Factor with Model (WLSH)", + "VAE Encode for Inpaint w/Padding (WLSH)" + ], + { + "title_aux": "wlsh_nodes" + } + ], + "https://github.com/whatbirdisthat/cyberdolphin": [ + [ + "\ud83d\udc2c Gradio ChatInterface", + "\ud83d\udc2c OpenAI Advanced", + "\ud83d\udc2c OpenAI Compatible", + "\ud83d\udc2c OpenAI DALL\u00b7E", + "\ud83d\udc2c OpenAI Simple" + ], + { + "title_aux": "cyberdolphin" + } + ], + "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus": [ + [ + "CDL.OpenPoseEditorPlus" + ], + { + "title_aux": "ComfyUI-Openpose-Editor-Plus" + } + ], + "https://github.com/wmatson/easy-comfy-nodes": [ + [ + "EZAssocDictNode", + "EZAssocImgNode", + "EZAssocStrNode", + "EZEmptyDictNode", + "EZHttpPostNode", + "EZLoadImgBatchFromUrlsNode", + "EZLoadImgFromUrlNode", + "EZVideoCombiner" + ], + { + "title_aux": "easy-comfy-nodes" + } + ], + "https://github.com/wolfden/ComfyUi_PromptStylers": [ + [ + "SDXLPromptStylerAll", + "SDXLPromptStylerHorror", + "SDXLPromptStylerMisc", + "SDXLPromptStylerbyArtist", + "SDXLPromptStylerbyCamera", + "SDXLPromptStylerbyComposition", + "SDXLPromptStylerbyCyberpunkSurrealism", + "SDXLPromptStylerbyDepth", + "SDXLPromptStylerbyEnvironment", + "SDXLPromptStylerbyFantasySetting", + "SDXLPromptStylerbyFilter", + "SDXLPromptStylerbyFocus", + "SDXLPromptStylerbyImpressionism", + "SDXLPromptStylerbyLighting", + "SDXLPromptStylerbyMileHigh", + "SDXLPromptStylerbyMood", + "SDXLPromptStylerbyMythicalCreature", + "SDXLPromptStylerbyOriginal", + "SDXLPromptStylerbyQuantumRealism", + "SDXLPromptStylerbySteamPunkRealism", + "SDXLPromptStylerbySubject", + "SDXLPromptStylerbySurrealism", + "SDXLPromptStylerbyTheme", + "SDXLPromptStylerbyTimeofDay", + "SDXLPromptStylerbyWyvern", + "SDXLPromptbyCelticArt", + "SDXLPromptbyContemporaryNordicArt", + "SDXLPromptbyFashionArt", + "SDXLPromptbyGothicRevival", + "SDXLPromptbyIrishFolkArt", + "SDXLPromptbyRomanticNationalismArt", + "SDXLPromptbySportsArt", + "SDXLPromptbyStreetArt", + "SDXLPromptbyVikingArt", + "SDXLPromptbyWildlifeArt" + ], + { + "title_aux": "SDXL Prompt Styler (customized version by wolfden)" + } + ], + "https://github.com/wolfden/ComfyUi_String_Function_Tree": [ + [ + "StringFunction" + ], + { + "title_aux": "ComfyUi_String_Function_Tree" + } + ], + "https://github.com/wsippel/comfyui_ws/raw/main/sdxl_utility.py": [ + [ + "SDXLResolutionPresets" + ], + { + "title_aux": "SDXLResolutionPresets" + } + ], + "https://github.com/wutipong/ComfyUI-TextUtils": [ + [ + "Text Utils - Join N-Elements of String List", + "Text Utils - Join String List", + "Text Utils - Join Strings", + "Text Utils - Split String to List" + ], + { + "title_aux": "ComfyUI-TextUtils" + } + ], + "https://github.com/xXAdonesXx/NodeGPT": [ + [ + "AppendAgent", + "Assistant", + "Chat", + "ChatGPT", + "CombineInput", + "Conditioning", + "CostumeAgent_1", + "CostumeAgent_2", + "CostumeMaster_1", + "Critic", + "DisplayString", + "DisplayTextAsImage", + "EVAL", + "Engineer", + "Executor", + "GroupChat", + "Image_generation_Conditioning", + "LM_Studio", + "LoadAPIconfig", + "LoadTXT", + "MemGPT", + "Memory_Excel", + "Model_1", + "Ollama", + "Output2String", + "Planner", + "Scientist", + "TextCombine", + "TextGeneration", + "TextGenerator", + "TextInput", + "TextOutput", + "UserProxy", + "llama-cpp", + "llava", + "oobaboogaOpenAI" + ], + { + "title_aux": "NodeGPT" + } + ], + "https://github.com/yolanother/DTAIComfyImageSubmit": [ + [ + "DTSimpleSubmitImage", + "DTSubmitImage" + ], + { + "title_aux": "Comfy AI DoubTech.ai Image Sumission Node" + } + ], + "https://github.com/yolanother/DTAIComfyLoaders": [ + [ + "DTCLIPLoader", + "DTCLIPVisionLoader", + "DTCheckpointLoader", + "DTCheckpointLoaderSimple", + "DTControlNetLoader", + "DTDiffControlNetLoader", + "DTDiffusersLoader", + "DTGLIGENLoader", + "DTLoadImage", + "DTLoadImageMask", + "DTLoadLatent", + "DTLoraLoader", + "DTLorasLoader", + "DTStyleModelLoader", + "DTUpscaleModelLoader", + "DTVAELoader", + "DTunCLIPCheckpointLoader" + ], + { + "title_aux": "Comfy UI Online Loaders" + } + ], + "https://github.com/yolanother/DTAIComfyPromptAgent": [ + [ + "DTPromptAgent", + "DTPromptAgentString" + ], + { + "title_aux": "Comfy UI Prompt Agent" + } + ], + "https://github.com/yolanother/DTAIComfyQRCodes": [ + [ + "QRCode" + ], + { + "title_aux": "Comfy UI QR Codes" + } + ], + "https://github.com/yolanother/DTAIComfyVariables": [ + [ + "DTCLIPTextEncode", + "DTSingleLineStringVariable", + "DTSingleLineStringVariableNoClip", + "FloatVariable", + "IntVariable", + "StringFormat", + "StringFormatSingleLine", + "StringVariable" + ], + { + "title_aux": "Variables for Comfy UI" + } + ], + "https://github.com/yolanother/DTAIImageToTextNode": [ + [ + "DTAIImageToTextNode", + "DTAIImageUrlToTextNode" + ], + { + "title_aux": "Image to Text Node" + } + ], + "https://github.com/youyegit/tdxh_node_comfyui": [ + [ + "TdxhBoolNumber", + "TdxhClipVison", + "TdxhControlNetApply", + "TdxhControlNetProcessor", + "TdxhFloatInput", + "TdxhImageToSize", + "TdxhImageToSizeAdvanced", + "TdxhImg2ImgLatent", + "TdxhIntInput", + "TdxhLoraLoader", + "TdxhOnOrOff", + "TdxhReference", + "TdxhStringInput", + "TdxhStringInputTranslator" + ], + { + "title_aux": "tdxh_node_comfyui" + } + ], + "https://github.com/zcfrank1st/Comfyui-Yolov8": [ + [ + "Yolov8Detection", + "Yolov8Segmentation" + ], + { + "title_aux": "ComfyUI Yolov8" + } + ], + "https://github.com/zcfrank1st/comfyui_visual_anagrams": [ + [ + "VisualAnagramsAnimate", + "VisualAnagramsSample" + ], + { + "title_aux": "comfyui_visual_anagram" + } + ], + "https://github.com/zer0TF/cute-comfy": [ + [ + "Cute.Placeholder" + ], + { + "title_aux": "Cute Comfy" + } + ], + "https://github.com/zhuanqianfish/ComfyUI-EasyNode": [ + [ + "EasyCaptureNode", + "EasyVideoOutputNode", + "SendImageWebSocket" + ], + { + "title_aux": "EasyCaptureNode for ComfyUI" + } + ], + "https://raw.githubusercontent.com/throttlekitty/SDXLCustomAspectRatio/main/SDXLAspectRatio.py": [ + [ + "SDXLAspectRatio" + ], + { + "title_aux": "SDXLCustomAspectRatio" + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/git_helper.py b/custom_nodes/ComfyUI-Manager/git_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..504b5e075be15ddee6aebef4e0f1aaf9f7193ddc --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/git_helper.py @@ -0,0 +1,306 @@ +import sys +import os +import git +import configparser +import re +import json +from torchvision.datasets.utils import download_url +from tqdm.auto import tqdm +from git.remote import RemoteProgress + +config_path = os.path.join(os.path.dirname(__file__), "config.ini") +nodelist_path = os.path.join(os.path.dirname(__file__), "custom-node-list.json") +working_directory = os.getcwd() + + +class GitProgress(RemoteProgress): + def __init__(self): + super().__init__() + self.pbar = tqdm(ascii=True) + + def update(self, op_code, cur_count, max_count=None, message=''): + self.pbar.total = max_count + self.pbar.n = cur_count + self.pbar.pos = 0 + self.pbar.refresh() + + +def gitclone(custom_nodes_path, url, target_hash=None): + repo_name = os.path.splitext(os.path.basename(url))[0] + repo_path = os.path.join(custom_nodes_path, repo_name) + + # Clone the repository from the remote URL + repo = git.Repo.clone_from(url, repo_path, recursive=True, progress=GitProgress()) + + if target_hash is not None: + print(f"CHECKOUT: {repo_name} [{target_hash}]") + repo.git.checkout(target_hash) + + repo.git.clear_cache() + repo.close() + + +def gitcheck(path, do_fetch=False): + try: + # Fetch the latest commits from the remote repository + repo = git.Repo(path) + + if repo.head.is_detached: + print("CUSTOM NODE CHECK: True") + return + + current_branch = repo.active_branch + branch_name = current_branch.name + + remote_name = 'origin' + remote = repo.remote(name=remote_name) + + if do_fetch: + remote.fetch() + + # Get the current commit hash and the commit hash of the remote branch + commit_hash = repo.head.commit.hexsha + remote_commit_hash = repo.refs[f'{remote_name}/{branch_name}'].object.hexsha + + # Compare the commit hashes to determine if the local repository is behind the remote repository + if commit_hash != remote_commit_hash: + # Get the commit dates + commit_date = repo.head.commit.committed_datetime + remote_commit_date = repo.refs[f'{remote_name}/{branch_name}'].object.committed_datetime + + # Compare the commit dates to determine if the local repository is behind the remote repository + if commit_date < remote_commit_date: + print("CUSTOM NODE CHECK: True") + else: + print("CUSTOM NODE CHECK: False") + except Exception as e: + print(e) + print("CUSTOM NODE CHECK: Error") + + +def switch_to_default_branch(repo): + show_result = repo.git.remote("show", "origin") + matches = re.search(r"\s*HEAD branch:\s*(.*)", show_result) + if matches: + default_branch = matches.group(1) + repo.git.checkout(default_branch) + + +def gitpull(path): + # Check if the path is a git repository + if not os.path.exists(os.path.join(path, '.git')): + raise ValueError('Not a git repository') + + # Pull the latest changes from the remote repository + repo = git.Repo(path) + if repo.is_dirty(): + repo.git.stash() + + commit_hash = repo.head.commit.hexsha + try: + if repo.head.is_detached: + switch_to_default_branch(repo) + + origin = repo.remote(name='origin') + origin.pull() + + repo.git.submodule('update', '--init', '--recursive') + new_commit_hash = repo.head.commit.hexsha + + if commit_hash != new_commit_hash: + print("CUSTOM NODE PULL: True") + else: + print("CUSTOM NODE PULL: None") + except Exception as e: + print(e) + print("CUSTOM NODE PULL: False") + + repo.close() + + +def checkout_comfyui_hash(target_hash): + repo_path = os.path.join(working_directory, '..') # ComfyUI dir + + repo = git.Repo(repo_path) + commit_hash = repo.head.commit.hexsha + + if commit_hash != target_hash: + try: + print(f"CHECKOUT: ComfyUI [{target_hash}]") + repo.git.checkout(target_hash) + except git.GitCommandError as e: + print(f"Error checking out the ComfyUI: {str(e)}") + + +def checkout_custom_node_hash(git_custom_node_infos): + repo_name_to_url = {} + + for url in git_custom_node_infos.keys(): + repo_name = url.split('/')[-1] + + if repo_name.endswith('.git'): + repo_name = repo_name[:-4] + + repo_name_to_url[repo_name] = url + + for path in os.listdir(working_directory): + if path.endswith("ComfyUI-Manager"): + continue + + fullpath = os.path.join(working_directory, path) + + if os.path.isdir(fullpath): + is_disabled = path.endswith(".disabled") + + try: + git_dir = os.path.join(fullpath, '.git') + if not os.path.exists(git_dir): + continue + + need_checkout = False + repo_name = os.path.basename(fullpath) + + if repo_name.endswith('.disabled'): + repo_name = repo_name[:-9] + + item = git_custom_node_infos[repo_name_to_url[repo_name]] + if item['disabled'] and is_disabled: + pass + elif item['disabled'] and not is_disabled: + # disable + print(f"DISABLE: {repo_name}") + new_path = fullpath + ".disabled" + os.rename(fullpath, new_path) + pass + elif not item['disabled'] and is_disabled: + # enable + print(f"ENABLE: {repo_name}") + new_path = fullpath[:-9] + os.rename(fullpath, new_path) + fullpath = new_path + need_checkout = True + else: + need_checkout = True + + if need_checkout: + repo = git.Repo(fullpath) + commit_hash = repo.head.commit.hexsha + + if commit_hash != item['hash']: + print(f"CHECKOUT: {repo_name} [{item['hash']}]") + repo.git.checkout(item['hash']) + except Exception: + print(f"Failed to restore snapshots for the custom node '{path}'") + + # clone missing + for k, v in git_custom_node_infos.items(): + if not v['disabled']: + repo_name = k.split('/')[-1] + if repo_name.endswith('.git'): + repo_name = repo_name[:-4] + + path = os.path.join(working_directory, repo_name) + if not os.path.exists(path): + print(f"CLONE: {path}") + gitclone(working_directory, k, v['hash']) + + +def invalidate_custom_node_file(file_custom_node_infos): + global nodelist_path + + enabled_set = set() + for item in file_custom_node_infos: + if not item['disabled']: + enabled_set.add(item['filename']) + + for path in os.listdir(working_directory): + fullpath = os.path.join(working_directory, path) + + if not os.path.isdir(fullpath) and fullpath.endswith('.py'): + if path not in enabled_set: + print(f"DISABLE: {path}") + new_path = fullpath+'.disabled' + os.rename(fullpath, new_path) + + elif not os.path.isdir(fullpath) and fullpath.endswith('.py.disabled'): + path = path[:-9] + if path in enabled_set: + print(f"ENABLE: {path}") + new_path = fullpath[:-9] + os.rename(fullpath, new_path) + + # download missing: just support for 'copy' style + py_to_url = {} + + with open(nodelist_path, 'r', encoding="UTF-8") as json_file: + info = json.load(json_file) + for item in info['custom_nodes']: + if item['install_type'] == 'copy': + for url in item['files']: + if url.endswith('.py'): + py = url.split('/')[-1] + py_to_url[py] = url + + for item in file_custom_node_infos: + filename = item['filename'] + if not item['disabled']: + target_path = os.path.join(working_directory, filename) + + if not os.path.exists(target_path) and filename in py_to_url: + url = py_to_url[filename] + print(f"DOWNLOAD: {filename}") + download_url(url, working_directory) + + +def apply_snapshot(target): + try: + path = os.path.join(os.path.dirname(__file__), 'snapshots', f"{target}") + if os.path.exists(path): + with open(path, 'r', encoding="UTF-8") as json_file: + info = json.load(json_file) + + comfyui_hash = info['comfyui'] + git_custom_node_infos = info['git_custom_nodes'] + file_custom_node_infos = info['file_custom_nodes'] + + checkout_comfyui_hash(comfyui_hash) + checkout_custom_node_hash(git_custom_node_infos) + invalidate_custom_node_file(file_custom_node_infos) + + print("APPLY SNAPSHOT: True") + return + + print(f"Snapshot file not found: `{path}`") + print("APPLY SNAPSHOT: False") + except Exception as e: + print(e) + print("APPLY SNAPSHOT: False") + + +def setup_environment(): + config = configparser.ConfigParser() + config.read(config_path) + if 'default' in config and 'git_exe' in config['default'] and config['default']['git_exe'] != '': + git.Git().update_environment(GIT_PYTHON_GIT_EXECUTABLE=config['default']['git_exe']) + + +setup_environment() + + +try: + if sys.argv[1] == "--clone": + gitclone(sys.argv[2], sys.argv[3]) + elif sys.argv[1] == "--check": + gitcheck(sys.argv[2], False) + elif sys.argv[1] == "--fetch": + gitcheck(sys.argv[2], True) + elif sys.argv[1] == "--pull": + gitpull(sys.argv[2]) + elif sys.argv[1] == "--apply-snapshot": + apply_snapshot(sys.argv[2]) + sys.exit(0) +except Exception as e: + print(e) + sys.exit(-1) + + diff --git a/custom_nodes/ComfyUI-Manager/js/a1111-alter-downloader.js b/custom_nodes/ComfyUI-Manager/js/a1111-alter-downloader.js new file mode 100644 index 0000000000000000000000000000000000000000..4f75ccb9c86f36134950ec76e6879b36fabcd19a --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/a1111-alter-downloader.js @@ -0,0 +1,567 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { install_checked_custom_node, manager_instance, rebootAPI } from "./common.js"; + +async function getAlterList() { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + var skip_update = ""; + if(manager_instance.update_check_checkbox.checked) + skip_update = "&skip_update=true"; + + const response = await api.fetchApi(`/alternatives/getlist?mode=${mode}${skip_update}`); + + const data = await response.json(); + return data; +} + +export class AlternativesInstaller extends ComfyDialog { + static instance = null; + + install_buttons = []; + message_box = null; + data = null; + + clear() { + this.install_buttons = []; + this.message_box = null; + this.data = null; + } + + constructor(app, manager_dialog) { + super(); + this.manager_dialog = manager_dialog; + this.search_keyword = ''; + this.element = $el("div.comfy-modal", { parent: document.body }, []); + } + + startInstall(target) { + const self = AlternativesInstaller.instance; + + self.updateMessage(`
Installing '${target.title}'`); + } + + disableButtons() { + for(let i in this.install_buttons) { + this.install_buttons[i].disabled = true; + this.install_buttons[i].style.backgroundColor = 'gray'; + } + } + + apply_searchbox(data) { + let keyword = this.search_box.value.toLowerCase(); + for(let i in this.grid_rows) { + let data1 = this.grid_rows[i].data; + let data2 = data1.custom_node; + + if(!data2) + continue; + + let content = data1.tags.toLowerCase() + data1.description.toLowerCase() + data2.author.toLowerCase() + data2.description.toLowerCase() + data2.title.toLowerCase(); + + if(this.filter && this.filter != '*') { + if(this.filter != data2.installed) { + this.grid_rows[i].control.style.display = 'none'; + continue; + } + } + + if(keyword == "") + this.grid_rows[i].control.style.display = null; + else if(content.includes(keyword)) { + this.grid_rows[i].control.style.display = null; + } + else { + this.grid_rows[i].control.style.display = 'none'; + } + } + } + + async invalidateControl() { + this.clear(); + + // splash + while (this.element.children.length) { + this.element.removeChild(this.element.children[0]); + } + + const msg = $el('div', {id:'custom-message'}, + [$el('br'), + 'The custom node DB is currently being updated, and updates to custom nodes are being checked for.', + $el('br'), + 'NOTE: Update only checks for extensions that have been fetched.', + $el('br')]); + msg.style.height = '100px'; + msg.style.verticalAlign = 'middle'; + this.element.appendChild(msg); + + // invalidate + this.data = (await getAlterList()).items; + + this.element.removeChild(msg); + + while (this.element.children.length) { + this.element.removeChild(this.element.children[0]); + } + + this.createHeaderControls(); + await this.createGrid(); + this.apply_searchbox(this.data); + this.createBottomControls(); + } + + updateMessage(msg, btn_id) { + this.message_box.innerHTML = msg; + if(btn_id) { + const rebootButton = document.getElementById(btn_id); + const self = this; + rebootButton.onclick = function() { + if(rebootAPI()) { + self.close(); + self.manager_dialog.close(); + } + }; + } + } + + invalidate_checks(is_checked, install_state) { + if(is_checked) { + for(let i in this.grid_rows) { + let data = this.grid_rows[i].data; + let checkbox = this.grid_rows[i].checkbox; + let buttons = this.grid_rows[i].buttons; + + checkbox.disabled = data.custom_node.installed != install_state; + + if(checkbox.disabled) { + for(let j in buttons) { + buttons[j].style.display = 'none'; + } + } + else { + for(let j in buttons) { + buttons[j].style.display = null; + } + } + } + + this.checkbox_all.disabled = false; + } + else { + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + if(checkbox.check) + return; // do nothing + } + + // every checkbox is unchecked -> enable all checkbox + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + let buttons = this.grid_rows[i].buttons; + checkbox.disabled = false; + + for(let j in buttons) { + buttons[j].style.display = null; + } + } + + this.checkbox_all.checked = false; + this.checkbox_all.disabled = true; + } + } + + check_all(is_checked) { + if(is_checked) { + // lookup first checked item's state + let check_state = null; + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + if(checkbox.checked) { + check_state = this.grid_rows[i].data.custom_node.installed; + } + } + + if(check_state == null) + return; + + // check only same state items + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + if(this.grid_rows[i].data.custom_node.installed == check_state) + checkbox.checked = true; + } + } + else { + // uncheck all + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + let buttons = this.grid_rows[i].buttons; + checkbox.checked = false; + checkbox.disabled = false; + + for(let j in buttons) { + buttons[j].style.display = null; + } + } + + this.checkbox_all.disabled = true; + } + } + + async createGrid() { + var grid = document.createElement('table'); + grid.setAttribute('id', 'alternatives-grid'); + + this.grid_rows = {}; + + let self = this; + + var thead = document.createElement('thead'); + var tbody = document.createElement('tbody'); + + var headerRow = document.createElement('tr'); + thead.style.position = "sticky"; + thead.style.top = "0px"; + thead.style.borderCollapse = "collapse"; + thead.style.tableLayout = "fixed"; + + var header0 = document.createElement('th'); + header0.style.width = "20px"; + this.checkbox_all = $el("input",{type:'checkbox', id:'check_all'},[]); + header0.appendChild(this.checkbox_all); + this.checkbox_all.checked = false; + this.checkbox_all.disabled = true; + this.checkbox_all.addEventListener('change', function() { self.check_all.call(self, self.checkbox_all.checked); }); + + var header1 = document.createElement('th'); + header1.innerHTML = '  ID  '; + header1.style.width = "20px"; + var header2 = document.createElement('th'); + header2.innerHTML = 'Tags'; + header2.style.width = "10%"; + var header3 = document.createElement('th'); + header3.innerHTML = 'Author'; + header3.style.width = "150px"; + var header4 = document.createElement('th'); + header4.innerHTML = 'Title'; + header4.style.width = "20%"; + var header5 = document.createElement('th'); + header5.innerHTML = 'Description'; + header5.style.width = "50%"; + var header6 = document.createElement('th'); + header6.innerHTML = 'Install'; + header6.style.width = "130px"; + + header1.style.position = "sticky"; + header1.style.top = "0px"; + header2.style.position = "sticky"; + header2.style.top = "0px"; + header3.style.position = "sticky"; + header3.style.top = "0px"; + header4.style.position = "sticky"; + header4.style.top = "0px"; + header5.style.position = "sticky"; + header5.style.top = "0px"; + + thead.appendChild(headerRow); + headerRow.appendChild(header0); + headerRow.appendChild(header1); + headerRow.appendChild(header2); + headerRow.appendChild(header3); + headerRow.appendChild(header4); + headerRow.appendChild(header5); + headerRow.appendChild(header6); + + headerRow.style.backgroundColor = "Black"; + headerRow.style.color = "White"; + headerRow.style.textAlign = "center"; + headerRow.style.width = "100%"; + headerRow.style.padding = "0"; + + grid.appendChild(thead); + grid.appendChild(tbody); + + if(this.data) + for (var i = 0; i < this.data.length; i++) { + const data = this.data[i]; + var dataRow = document.createElement('tr'); + + let data0 = document.createElement('td'); + let checkbox = $el("input",{type:'checkbox', id:`check_${i}`},[]); + data0.appendChild(checkbox); + checkbox.checked = false; + checkbox.addEventListener('change', function() { self.invalidate_checks.call(self, checkbox.checked, data.custom_node?.installed); }); + + var data1 = document.createElement('td'); + data1.style.textAlign = "center"; + data1.innerHTML = i+1; + var data2 = document.createElement('td'); + data2.innerHTML = ` ${data.tags}`; + var data3 = document.createElement('td'); + var data4 = document.createElement('td'); + if(data.custom_node) { + data3.innerHTML = ` ${data.custom_node.author}`; + data4.innerHTML = ` ${data.custom_node.title}`; + } + else { + data3.innerHTML = ` Unknown`; + data4.innerHTML = ` Unknown`; + } + var data5 = document.createElement('td'); + data5.innerHTML = data.description; + var data6 = document.createElement('td'); + data6.style.textAlign = "center"; + + var installBtn = document.createElement('button'); + var installBtn2 = null; + var installBtn3 = null; + + if(data.custom_node) { + this.install_buttons.push(installBtn); + + switch(data.custom_node.installed) { + case 'Disabled': + installBtn3 = document.createElement('button'); + installBtn3.innerHTML = 'Enable'; + installBtn3.style.backgroundColor = 'blue'; + installBtn3.style.color = 'white'; + this.install_buttons.push(installBtn3); + + installBtn.innerHTML = 'Uninstall'; + installBtn.style.backgroundColor = 'red'; + installBtn.style.color = 'white'; + break; + case 'Update': + installBtn2 = document.createElement('button'); + installBtn2.innerHTML = 'Update'; + installBtn2.style.backgroundColor = 'blue'; + installBtn2.style.color = 'white'; + this.install_buttons.push(installBtn2); + + installBtn3 = document.createElement('button'); + installBtn3.innerHTML = 'Disable'; + installBtn3.style.backgroundColor = 'MediumSlateBlue'; + installBtn3.style.color = 'white'; + this.install_buttons.push(installBtn3); + + installBtn.innerHTML = 'Uninstall'; + installBtn.style.backgroundColor = 'red'; + installBtn.style.color = 'white'; + break; + case 'True': + installBtn3 = document.createElement('button'); + installBtn3.innerHTML = 'Disable'; + installBtn3.style.backgroundColor = 'MediumSlateBlue'; + installBtn3.style.color = 'white'; + this.install_buttons.push(installBtn3); + + installBtn.innerHTML = 'Uninstall'; + installBtn.style.backgroundColor = 'red'; + installBtn.style.color = 'white'; + break; + case 'False': + installBtn.innerHTML = 'Install'; + installBtn.style.backgroundColor = 'black'; + installBtn.style.color = 'white'; + break; + default: + installBtn.innerHTML = 'Try Install'; + installBtn.style.backgroundColor = 'Gray'; + installBtn.style.color = 'white'; + } + + let j = i; + if(installBtn2 != null) { + installBtn2.style.width = "120px"; + installBtn2.addEventListener('click', function() { + install_checked_custom_node(self.grid_rows, j, AlternativesInstaller.instance, 'update'); + }); + + data6.appendChild(installBtn2); + } + + if(installBtn3 != null) { + installBtn3.style.width = "120px"; + installBtn3.addEventListener('click', function() { + install_checked_custom_node(self.grid_rows, j, AlternativesInstaller.instance, 'toggle_active'); + }); + + data6.appendChild(installBtn3); + } + + + installBtn.style.width = "120px"; + installBtn.addEventListener('click', function() { + if(this.innerHTML == 'Uninstall') { + if (confirm(`Are you sure uninstall ${data.title}?`)) { + install_checked_custom_node(self.grid_rows, j, AlternativesInstaller.instance, 'uninstall'); + } + } + else { + install_checked_custom_node(self.grid_rows, j, AlternativesInstaller.instance, 'install'); + } + }); + + data6.appendChild(installBtn); + } + + dataRow.style.backgroundColor = "var(--bg-color)"; + dataRow.style.color = "var(--fg-color)"; + dataRow.style.textAlign = "left"; + + dataRow.appendChild(data0); + dataRow.appendChild(data1); + dataRow.appendChild(data2); + dataRow.appendChild(data3); + dataRow.appendChild(data4); + dataRow.appendChild(data5); + dataRow.appendChild(data6); + tbody.appendChild(dataRow); + + let buttons = []; + if(installBtn) { + buttons.push(installBtn); + } + if(installBtn2) { + buttons.push(installBtn2); + } + if(installBtn3) { + buttons.push(installBtn3); + } + + this.grid_rows[i] = {data:data, buttons:buttons, checkbox:checkbox, control:dataRow}; + } + + const panel = document.createElement('div'); + panel.style.width = "100%"; + panel.appendChild(grid); + + function handleResize() { + const parentHeight = self.element.clientHeight; + const gridHeight = parentHeight - 200; + + grid.style.height = gridHeight + "px"; + } + window.addEventListener("resize", handleResize); + + grid.style.position = "relative"; + grid.style.display = "inline-block"; + grid.style.width = "100%"; + grid.style.height = "100%"; + grid.style.overflowY = "scroll"; + this.element.style.height = "85%"; + this.element.style.width = "80%"; + this.element.appendChild(panel); + + handleResize(); + } + + createFilterCombo() { + let combo = document.createElement("select"); + + combo.style.cssFloat = "left"; + combo.style.fontSize = "14px"; + combo.style.padding = "4px"; + combo.style.background = "black"; + combo.style.marginLeft = "2px"; + combo.style.width = "199px"; + combo.id = `combo-manger-filter`; + combo.style.borderRadius = "15px"; + + let items = + [ + { value:'*', text:'Filter: all' }, + { value:'Disabled', text:'Filter: disabled' }, + { value:'Update', text:'Filter: update' }, + { value:'True', text:'Filter: installed' }, + { value:'False', text:'Filter: not-installed' }, + ]; + + items.forEach(item => { + const option = document.createElement("option"); + option.value = item.value; + option.text = item.text; + combo.appendChild(option); + }); + + let self = this; + combo.addEventListener('change', function(event) { + self.filter = event.target.value; + self.apply_searchbox(); + }); + + if(self.filter) { + combo.value = self.filter; + } + + return combo; + } + + createHeaderControls() { + let self = this; + this.search_box = $el('input.cm-search-filter', {type:'text', id:'manager-alternode-search-box', placeholder:'input search keyword', value:this.search_keyword}, []); + this.search_box.style.height = "25px"; + this.search_box.onkeydown = (event) => { + if (event.key === 'Enter') { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + } + if (event.key === 'Escape') { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + } + }; + + let search_button = document.createElement("button"); + search_button.className = "cm-small-button"; + search_button.innerHTML = "Search"; + search_button.onclick = () => { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + }; + search_button.style.display = "inline-block"; + + let filter_control = this.createFilterCombo(); + filter_control.style.display = "inline-block"; + + let cell = $el('td', {width:'100%'}, [filter_control, this.search_box, ' ', search_button]); + let search_control = $el('table', {width:'100%'}, + [ + $el('tr', {}, [cell]) + ] + ); + + cell.style.textAlign = "right"; + this.element.appendChild(search_control); + } + + async createBottomControls() { + var close_button = document.createElement("button"); + close_button.className = "cm-small-button"; + close_button.innerHTML = "Close"; + close_button.onclick = () => { this.close(); } + close_button.style.display = "inline-block"; + + this.message_box = $el('div', {id:'alternatives-installer-message'}, [$el('br'), '']); + this.message_box.style.height = '60px'; + this.message_box.style.verticalAlign = 'middle'; + + this.element.appendChild(this.message_box); + this.element.appendChild(close_button); + } + + async show() { + try { + this.invalidateControl(); + this.element.style.display = "block"; + this.element.style.zIndex = 10001; + } + catch(exception) { + app.ui.dialog.show(`Failed to get alternatives list. / ${exception}`); + console.error(exception); + } + } +} diff --git a/custom_nodes/ComfyUI-Manager/js/comfyui-manager.js b/custom_nodes/ComfyUI-Manager/js/comfyui-manager.js new file mode 100644 index 0000000000000000000000000000000000000000..cce1c07329f7e3eefd4790289f8c822c2748cb5a --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/comfyui-manager.js @@ -0,0 +1,1029 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { ShareDialog, SUPPORTED_OUTPUT_NODE_TYPES, getPotentialOutputsAndOutputNodes, ShareDialogChooser, showOpenArtShareDialog, showShareDialog } from "./comfyui-share-common.js"; +import { OpenArtShareDialog } from "./comfyui-share-openart.js"; +import { CustomNodesInstaller } from "./custom-nodes-downloader.js"; +import { AlternativesInstaller } from "./a1111-alter-downloader.js"; +import { SnapshotManager } from "./snapshot.js"; +import { ModelInstaller } from "./model-downloader.js"; +import { manager_instance, setManagerInstance, install_via_git_url, rebootAPI } from "./common.js"; + +var docStyle = document.createElement('style'); +docStyle.innerHTML = ` +#cm-manager-dialog { + width: 1000px; + height: 410px; + box-sizing: content-box; + z-index: 10000; +} + +.cm-menu-container { + column-gap: 20px; + display: flex; + flex-wrap: wrap; + justify-content: center; + box-sizing: content-box; +} + +.cm-menu-column { + display: flex; + flex-direction: column; + flex: 1 1 auto; + width: 300px; + box-sizing: content-box; +} + +.cm-title { + background-color: black; + text-align: center; + height: 40px; + width: calc(100% - 10px); + font-weight: bold; + justify-content: center; + align-content: center; + vertical-align: middle; +} + +#cm-channel-badge { + color: white; + background-color: #AA0000; + width: 150px; + height: 23px; + font-size: 13px; + border-radius: 5px; + left: 5px; + top: 5px; + align-content: center; + justify-content: center; + text-align: center; + font-weight: bold; + float: left; + vertical-align: middle; + position: relative; +} + +.cm-notice-board { + width: 310px; + padding: 0px !important; + height: 190px; + overflow: auto; + color: var(--input-text); + border: 1px solid var(--descrip-text); + padding: 10px; + overflow-x: hidden; +} + +.cm-conflicted-nodes-text { + background-color: #CCCC55 !important; + color: #AA3333 !important; + font-size: 10px; + border-radius: 5px; + padding: 10px; +} + +.cm-warn-note { + background-color: #101010 !important; + color: #FF3800 !important; + font-size: 13px; + border-radius: 5px; + padding: 10px; + overflow-x: hidden; + overflow: auto; +} + +.cm-info-note { + background-color: #101010 !important; + color: #FF3800 !important; + font-size: 13px; + border-radius: 5px; + padding: 10px; + overflow-x: hidden; + overflow: auto; +} +`; + +document.head.appendChild(docStyle); + +var update_comfyui_button = null; +var fetch_updates_button = null; +var update_all_button = null; +var badge_mode = "none"; +let share_option = 'all'; + +// copied style from https://github.com/pythongosssss/ComfyUI-Custom-Scripts +const style = ` +#comfyworkflows-button { + width: 310px; + height: 27px; + padding: 0px !important; + position: relative; + overflow: hidden; +} +#cm-nodeinfo-button { + width: 310px; + height: 27px; + padding: 0px !important; + position: relative; + overflow: hidden; +} +#cm-manual-button { + width: 310px; + height: 27px; + position: relative; + overflow: hidden; +} + +.cm-button { + width: 310px; + height: 30px; + position: relative; + overflow: hidden; + font-size: 17px !important; +} + +.cm-small-button { + width: 120px; + height: 30px; + position: relative; + overflow: hidden; + box-sizing: border-box; + font-size: 17px !important; +} + +.cm-search-filter { + width: 200px; + height: 30px !important; + position: relative; + overflow: hidden; + box-sizing: border-box; +} + +#cm-close-button { + width: calc(100% - 65px); + bottom: 10px; + position: absolute; + overflow: hidden; +} + +.pysssss-workflow-arrow-2 { + position: absolute; + top: 0; + bottom: 0; + right: 0; + font-size: 12px; + display: flex; + align-items: center; + width: 24px; + justify-content: center; + background: rgba(255,255,255,0.1); + content: "▼"; +} +.pysssss-workflow-arrow-2:after { + content: "▼"; + } + .pysssss-workflow-arrow-2:hover { + filter: brightness(1.6); + background-color: var(--comfy-menu-bg); + } +.pysssss-workflow-popup-2 ~ .litecontextmenu { + transform: scale(1.3); +} +#comfyworkflows-button-menu { + z-index: 10000000000 !important; +} +#cm-manual-button-menu { + z-index: 10000000000 !important; +} +`; + + + +async function init_badge_mode() { + api.fetchApi('/manager/badge_mode') + .then(response => response.text()) + .then(data => { badge_mode = data; }) +} + +async function init_share_option() { + api.fetchApi('/manager/share_option') + .then(response => response.text()) + .then(data => { + share_option = data || 'all'; + }); +} + +async function init_notice(notice) { + api.fetchApi('/manager/notice') + .then(response => response.text()) + .then(data => { + notice.innerHTML = data; + }) +} + +await init_badge_mode(); +await init_share_option(); + + +async function fetchNicknames() { + const response1 = await api.fetchApi(`/customnode/getmappings?mode=local`); + const mappings = await response1.json(); + + let result = {}; + let nickname_patterns = []; + + for (let i in mappings) { + let item = mappings[i]; + var nickname; + if (item[1].title) { + nickname = item[1].title; + } + else { + nickname = item[1].title_aux; + } + + for (let j in item[0]) { + result[item[0][j]] = nickname; + } + + if(item[1].nodename_pattern) { + nickname_patterns.push([item[1].nodename_pattern, nickname]); + } + } + + return [result, nickname_patterns]; +} + +const [nicknames, nickname_patterns] = await fetchNicknames(); + + +async function updateComfyUI() { + let prev_text = update_comfyui_button.innerText; + update_comfyui_button.innerText = "Updating ComfyUI..."; + update_comfyui_button.disabled = true; + update_comfyui_button.style.backgroundColor = "gray"; + + try { + const response = await api.fetchApi('/comfyui_manager/update_comfyui'); + + if (response.status == 400) { + app.ui.dialog.show('Failed to update ComfyUI.'); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + + if (response.status == 201) { + app.ui.dialog.show('ComfyUI has been successfully updated.'); + app.ui.dialog.element.style.zIndex = 10010; + } + else { + app.ui.dialog.show('ComfyUI is already up to date with the latest version.'); + app.ui.dialog.element.style.zIndex = 10010; + } + + return true; + } + catch (exception) { + app.ui.dialog.show(`Failed to update ComfyUI / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + update_comfyui_button.disabled = false; + update_comfyui_button.innerText = prev_text; + update_comfyui_button.style.backgroundColor = ""; + } +} + +async function fetchUpdates(update_check_checkbox) { + let prev_text = fetch_updates_button.innerText; + fetch_updates_button.innerText = "Fetching updates..."; + fetch_updates_button.disabled = true; + fetch_updates_button.style.backgroundColor = "gray"; + + try { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + const response = await api.fetchApi(`/customnode/fetch_updates?mode=${mode}`); + + if (response.status != 200 && response.status != 201) { + app.ui.dialog.show('Failed to fetch updates.'); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + + if (response.status == 201) { + app.ui.dialog.show('There is an updated extension available.

NOTE:
Fetch Updates is not an update.
Please update from "Install Custom Nodes".

'); + app.ui.dialog.element.style.zIndex = 10010; + update_check_checkbox.checked = false; + } + else { + app.ui.dialog.show('All extensions are already up-to-date with the latest versions.'); + app.ui.dialog.element.style.zIndex = 10010; + } + + return true; + } + catch (exception) { + app.ui.dialog.show(`Failed to update custom nodes / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + fetch_updates_button.disabled = false; + fetch_updates_button.innerText = prev_text; + fetch_updates_button.style.backgroundColor = ""; + } +} + +async function updateAll(update_check_checkbox, manager_dialog) { + let prev_text = update_all_button.innerText; + update_all_button.innerText = "Updating all...(ComfyUI)"; + update_all_button.disabled = true; + update_all_button.style.backgroundColor = "gray"; + + try { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + update_all_button.innerText = "Updating all..."; + const response1 = await api.fetchApi('/comfyui_manager/update_comfyui'); + const response2 = await api.fetchApi(`/customnode/update_all?mode=${mode}`); + + if (response1.status != 200 && response2.status != 201) { + app.ui.dialog.show('Failed to update ComfyUI or several extensions.

See terminal log.
'); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + if(response1.status == 201 || response2.status == 201) { + app.ui.dialog.show("ComfyUI and all extensions have been updated to the latest version.
To apply the updated custom node, please ComfyUI. And refresh browser."); + + const rebootButton = document.getElementById('cm-reboot-button'); + rebootButton.onclick = function() { + if(rebootAPI()) { + manager_dialog.close(); + } + }; + + app.ui.dialog.element.style.zIndex = 10010; + } + else { + app.ui.dialog.show('ComfyUI and all extensions are already up-to-date with the latest versions.'); + app.ui.dialog.element.style.zIndex = 10010; + } + + return true; + } + catch (exception) { + app.ui.dialog.show(`Failed to update ComfyUI or several extensions / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + update_all_button.disabled = false; + update_all_button.innerText = prev_text; + update_all_button.style.backgroundColor = ""; + } +} + +function newDOMTokenList(initialTokens) { + const tmp = document.createElement(`div`); + + const classList = tmp.classList; + if (initialTokens) { + initialTokens.forEach(token => { + classList.add(token); + }); + } + + return classList; + } + +/** + * Check whether the node is a potential output node (img, gif or video output) + */ +const isOutputNode = (node) => { + return [ + "VHS_VideoCombine", + "PreviewImage", + "SaveImage", + "ADE_AnimateDiffCombine", + "SaveAnimatedWEBP", + ].includes(node.type); +} + +// ----------- +class ManagerMenuDialog extends ComfyDialog { + local_mode_checkbox = null; + + createControlsMid() { + let self = this; + + update_comfyui_button = + $el("button.cm-button", { + type: "button", + textContent: "Update ComfyUI", + onclick: + () => updateComfyUI() + }); + + fetch_updates_button = + $el("button.cm-button", { + type: "button", + textContent: "Fetch Updates", + onclick: + () => fetchUpdates(this.update_check_checkbox) + }); + + update_all_button = + $el("button.cm-button", { + type: "button", + textContent: "Update All", + onclick: + () => updateAll(this.update_check_checkbox, self) + }); + + const res = + [ + $el("button.cm-button", { + type: "button", + textContent: "Install Custom Nodes", + onclick: + () => { + if(!CustomNodesInstaller.instance) + CustomNodesInstaller.instance = new CustomNodesInstaller(app, self); + CustomNodesInstaller.instance.show(false); + } + }), + + $el("button.cm-button", { + type: "button", + textContent: "Install Missing Custom Nodes", + onclick: + () => { + if(!CustomNodesInstaller.instance) + CustomNodesInstaller.instance = new CustomNodesInstaller(app, self); + CustomNodesInstaller.instance.show(true); + } + }), + + $el("button.cm-button", { + type: "button", + textContent: "Install Models", + onclick: + () => { + if(!ModelInstaller.instance) + ModelInstaller.instance = new ModelInstaller(app, self); + ModelInstaller.instance.show(); + } + }), + + $el("br", {}, []), + update_all_button, + update_comfyui_button, + fetch_updates_button, + + $el("br", {}, []), + $el("button.cm-button", { + type: "button", + textContent: "Alternatives of A1111", + onclick: + () => { + if(!AlternativesInstaller.instance) + AlternativesInstaller.instance = new AlternativesInstaller(app, self); + AlternativesInstaller.instance.show(); + } + }) + ]; + + return res; + } + + createControlsLeft() { + let self = this; + + this.local_mode_checkbox = $el("input",{type:'checkbox', id:"use_local_db"},[]) + const checkbox_text = $el("label",{for: "use_local_db"},[" Use local DB"]) + checkbox_text.style.color = "var(--fg-color)"; + checkbox_text.style.cursor = "pointer"; + checkbox_text.style.marginRight = "10px"; + + this.update_check_checkbox = $el("input",{type:'checkbox', id:"skip_update_check"},[]) + const uc_checkbox_text = $el("label",{for:"skip_update_check"},[" Skip update check"]) + uc_checkbox_text.style.color = "var(--fg-color)"; + uc_checkbox_text.style.cursor = "pointer"; + this.update_check_checkbox.checked = true; + + // preview method + let preview_combo = document.createElement("select"); + preview_combo.style.cursor = "pointer"; + preview_combo.appendChild($el('option', { value: 'auto', text: 'Preview method: Auto' }, [])); + preview_combo.appendChild($el('option', { value: 'taesd', text: 'Preview method: TAESD (slow)' }, [])); + preview_combo.appendChild($el('option', { value: 'latent2rgb', text: 'Preview method: Latent2RGB (fast)' }, [])); + preview_combo.appendChild($el('option', { value: 'none', text: 'Preview method: None (very fast)' }, [])); + + api.fetchApi('/manager/preview_method') + .then(response => response.text()) + .then(data => { preview_combo.value = data; }) + + preview_combo.addEventListener('change', function (event) { + api.fetchApi(`/manager/preview_method?value=${event.target.value}`); + }); + + // nickname + let badge_combo = document.createElement("select"); + badge_combo.style.cursor = "pointer"; + badge_combo.appendChild($el('option', { value: 'none', text: 'Badge: None' }, [])); + badge_combo.appendChild($el('option', { value: 'nick', text: 'Badge: Nickname' }, [])); + badge_combo.appendChild($el('option', { value: 'nick_hide', text: 'Badge: Nickname (hide built-in)' }, [])); + badge_combo.appendChild($el('option', { value: 'id_nick', text: 'Badge: #ID Nickname' }, [])); + badge_combo.appendChild($el('option', { value: 'id_nick_hide', text: 'Badge: #ID Nickname (hide built-in)' }, [])); + + api.fetchApi('/manager/badge_mode') + .then(response => response.text()) + .then(data => { badge_combo.value = data; badge_mode = data; }); + + badge_combo.addEventListener('change', function (event) { + api.fetchApi(`/manager/badge_mode?value=${event.target.value}`); + badge_mode = event.target.value; + app.graph.setDirtyCanvas(true); + }); + + // channel + let channel_combo = document.createElement("select"); + channel_combo.style.cursor = "pointer"; + api.fetchApi('/manager/channel_url_list') + .then(response => response.json()) + .then(async data => { + try { + let urls = data.list; + for (let i in urls) { + if (urls[i] != '') { + let name_url = urls[i].split('::'); + channel_combo.appendChild($el('option', { value: name_url[0], text: `Channel: ${name_url[0]}` }, [])); + } + } + + channel_combo.addEventListener('change', function (event) { + api.fetchApi(`/manager/channel_url_list?value=${event.target.value}`); + }); + + channel_combo.value = data.selected; + } + catch (exception) { + + } + }); + + // share + let share_combo = document.createElement("select"); + share_combo.style.cursor = "pointer"; + const share_options = [ + ['none', 'None'], + ['openart', 'OpenArt AI'], + ['matrix', 'Matrix Server'], + ['comfyworkflows', 'ComfyWorkflows'], + ['all', 'All'], + ]; + for (const option of share_options) { + share_combo.appendChild($el('option', { value: option[0], text: `Share: ${option[1]}` }, [])); + } + + api.fetchApi('/manager/share_option') + .then(response => response.text()) + .then(data => { + share_combo.value = data || 'all'; + share_option = data || 'all'; + }); + + share_combo.addEventListener('change', function (event) { + const value = event.target.value; + share_option = value; + api.fetchApi(`/manager/share_option?value=${value}`); + const shareButton = document.getElementById("shareButton"); + if (value === 'none') { + shareButton.style.display = "none"; + } else { + shareButton.style.display = "inline-block"; + } + }); + + return [ + $el("div", {}, [this.local_mode_checkbox, checkbox_text, this.update_check_checkbox, uc_checkbox_text]), + $el("br", {}, []), + preview_combo, + badge_combo, + channel_combo, + share_combo, + + $el("hr", {}, []), + $el("center", {}, ["!! EXPERIMENTAL !!"]), + $el("br", {}, []), + $el("button.cm-button", { + type: "button", + textContent: "Snapshot Manager", + onclick: + () => { + if(!SnapshotManager.instance) + SnapshotManager.instance = new SnapshotManager(app, self); + SnapshotManager.instance.show(); + } + }), + $el("button.cm-button", { + type: "button", + textContent: "Install via Git URL", + onclick: () => { + var url = prompt("Please enter the URL of the Git repository to install", ""); + + if (url !== null) { + install_via_git_url(url, self); + } + } + }), + ]; + } + + createControlsRight() { + const elts = [ + $el("button.cm-button", { + id: 'cm-manual-button', + type: "button", + textContent: "Community Manual", + onclick: () => { window.open("https://blenderneko.github.io/ComfyUI-docs/", "comfyui-community-manual"); } + }, [ + $el("div.pysssss-workflow-arrow-2", { + id: `cm-manual-button-arrow`, + onclick: (e) => { + e.preventDefault(); + e.stopPropagation(); + + LiteGraph.closeAllContextMenus(); + const menu = new LiteGraph.ContextMenu( + [ + { + title: "Comfy Custom Node How To", + callback: () => { window.open("https://github.com/chrisgoringe/Comfy-Custom-Node-How-To/wiki/aaa_index", "comfyui-community-manual1"); }, + }, + { + title: "ComfyUI Guide To Making Custom Nodes", + callback: () => { window.open("https://github.com/Suzie1/ComfyUI_Guide_To_Making_Custom_Nodes/wiki", "comfyui-community-manual2"); }, + }, + { + title: "ComfyUI Examples", + callback: () => { window.open("https://comfyanonymous.github.io/ComfyUI_examples", "comfyui-community-manual3"); }, + }, + { + title: "Close", + callback: () => { + this.close(); + }, + } + ], + { + event: e, + scale: 1.3, + }, + window + ); + // set the id so that we can override the context menu's z-index to be above the comfyui manager menu + menu.root.id = "cm-manual-button-menu"; + menu.root.classList.add("pysssss-workflow-popup-2"); + }, + }) + ]), + + $el("button", { + id: 'comfyworkflows-button', + type: "button", + textContent: "Workflow Gallery", + onclick: () => { window.open("https://comfyworkflows.com/", "comfyui-workflow-gallery"); } + }, [ + $el("div.pysssss-workflow-arrow-2", { + id: `comfyworkflows-button-arrow`, + onclick: (e) => { + e.preventDefault(); + e.stopPropagation(); + + LiteGraph.closeAllContextMenus(); + const menu = new LiteGraph.ContextMenu( + [ + { + title: "Share your art", + callback: () => { + this.close(); + if (!ShareDialog.instance) { + ShareDialog.instance = new ShareDialog(); + } + + app.graphToPrompt().then(prompt => { + // console.log({ prompt }) + return app.graph._nodes; + }).then(nodes => { + // console.log({ nodes }); + const { potential_outputs, potential_output_nodes } = getPotentialOutputsAndOutputNodes(nodes); + + if (potential_outputs.length === 0) { + if (potential_output_nodes.length === 0) { + // todo: add support for other output node types (animatediff combine, etc.) + const supported_nodes_string = SUPPORTED_OUTPUT_NODE_TYPES.join(", "); + alert(`No supported output node found (${supported_nodes_string}). To share this workflow, please add an output node to your graph and re-run your prompt.`); + } else { + alert("To share this, first run a prompt. Once it's done, click 'Share'."); + } + return; + } + + ShareDialog.instance.show({ potential_outputs, potential_output_nodes }); + }); + }, + }, + { + title: "Close", + callback: () => { + this.close(); + }, + } + ], + { + event: e, + scale: 1.3, + }, + window + ); + // set the id so that we can override the context menu's z-index to be above the comfyui manager menu + menu.root.id = "comfyworkflows-button-menu"; + menu.root.classList.add("pysssss-workflow-popup-2"); + }, + }) + ]), + + $el("button.cm-button", { + id: 'cm-nodeinfo-button', + type: "button", + textContent: "Nodes Info", + onclick: () => { window.open("https://ltdrdata.github.io/", "comfyui-node-info"); } + }), + $el("br", {}, []), + ]; + + var textarea = document.createElement("div"); + textarea.className = "cm-notice-board"; + elts.push(textarea); + + init_notice(textarea); + + return elts; + } + + constructor() { + super(); + + const close_button = $el("button", { id: "cm-close-button", type: "button", textContent: "Close", onclick: () => this.close() }); + + const content = + $el("div.comfy-modal-content", + [ + $el("tr.cm-title", {}, [ + $el("font", {size:6, color:"white"}, [`ComfyUI Manager Menu`])] + ), + $el("br", {}, []), + $el("div.cm-menu-container", + [ + $el("div.cm-menu-column", [...this.createControlsLeft()]), + $el("div.cm-menu-column", [...this.createControlsMid()]), + $el("div.cm-menu-column", [...this.createControlsRight()]) + ]), + + $el("br", {}, []), + close_button, + ] + ); + + content.style.width = '100%'; + content.style.height = '100%'; + + this.element = $el("div.comfy-modal", { id:'cm-manager-dialog', parent: document.body }, [ content ]); + } + + show() { + this.element.style.display = "block"; + } +} + + +app.registerExtension({ + name: "Comfy.ManagerMenu", + init() { + $el("style", { + textContent: style, + parent: document.head, + }); + }, + async setup() { + const menu = document.querySelector(".comfy-menu"); + const separator = document.createElement("hr"); + + separator.style.margin = "20px 0"; + separator.style.width = "100%"; + menu.append(separator); + + const managerButton = document.createElement("button"); + managerButton.textContent = "Manager"; + managerButton.onclick = () => { + if(!manager_instance) + setManagerInstance(new ManagerMenuDialog()); + manager_instance.show(); + } + menu.append(managerButton); + + + const shareButton = document.createElement("button"); + shareButton.id = "shareButton"; + shareButton.textContent = "Share"; + shareButton.onclick = () => { + if (share_option === 'openart') { + showOpenArtShareDialog(); + return; + } else if (share_option === 'matrix' || share_option === 'comfyworkflows') { + showShareDialog(share_option); + return; + } + + if(!ShareDialogChooser.instance) { + ShareDialogChooser.instance = new ShareDialogChooser(); + } + ShareDialogChooser.instance.show(); + } + // make the background color a gradient of blue to green + shareButton.style.background = "linear-gradient(90deg, #00C9FF 0%, #92FE9D 100%)"; + shareButton.style.color = "black"; + + // Load share option from local storage to determine whether to show + // the share button. + const shouldShowShareButton = share_option !== 'none'; + shareButton.style.display = shouldShowShareButton ? "inline-block" : "none"; + + menu.append(shareButton); + }, + + async beforeRegisterNodeDef(nodeType, nodeData, app) { + const onDrawForeground = nodeType.prototype.onDrawForeground; + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + + if (!this.flags.collapsed && badge_mode != 'none' && nodeType.title_mode != LiteGraph.NO_TITLE) { + let text = ""; + if (badge_mode.startsWith('id_nick')) + text = `#${this.id} `; + + if (nicknames[nodeData.name.trim()]) { + let nick = nicknames[nodeData.name.trim()]; + + if (nick == 'ComfyUI') { + if(badge_mode.endsWith('hide')) { + nick = ""; + } + else { + nick = "🦊" + } + } + + if (nick.length > 25) { + text += nick.substring(0, 23) + ".."; + } + else { + text += nick; + } + } + + if (text != "") { + let fgColor = "white"; + let bgColor = "#0F1F0F"; + let visible = true; + + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(text); + ctx.fillStyle = bgColor; + ctx.beginPath(); + ctx.roundRect(this.size[0] - sz.width - 12, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + ctx.fillStyle = fgColor; + ctx.fillText(text, this.size[0] - sz.width - 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + } + } + return r; + }; + + this._addExtraNodeContextMenu(nodeType, app); + }, + async nodeCreated(node, app) { + + }, + async loadedGraphNode(node, app) { + if (node.has_errors) { + const onDrawForeground = node.onDrawForeground; + node.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + + if (!this.flags.collapsed && badge_mode != 'none') { + let text = ""; + if (badge_mode.startsWith('id_nick')) + text = `#${this.id} `; + + if (nicknames[node.type.trim()]) { + let nick = nicknames[node.type.trim()]; + + if (nick == 'ComfyUI') { + if(badge_mode.endsWith('hide')) { + nick = ""; + } + else { + nick = "🦊" + } + } + + if (nick.length > 25) { + text += nick.substring(0, 23) + ".."; + } + else { + text += nick; + } + } + + if (text != "") { + let fgColor = "white"; + let bgColor = "#0F1F0F"; + let visible = true; + + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(text); + ctx.fillStyle = bgColor; + ctx.beginPath(); + ctx.roundRect(this.size[0] - sz.width - 12, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + ctx.fillStyle = fgColor; + ctx.fillText(text, this.size[0] - sz.width - 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + + ctx.save(); + ctx.font = "bold 14px sans-serif"; + const sz2 = ctx.measureText(node.type); + ctx.fillStyle = 'white'; + ctx.fillText(node.type, this.size[0] / 2 - sz2.width / 2, this.size[1] / 2); + ctx.restore(); + } + } + + return r; + }; + } + }, + + _addExtraNodeContextMenu(node, app) { + const origGetExtraMenuOptions = node.prototype.getExtraMenuOptions; + node.prototype.getExtraMenuOptions = function (_, options) { + origGetExtraMenuOptions?.apply?.(this, arguments); + if (isOutputNode(node)) { + const { potential_outputs } = getPotentialOutputsAndOutputNodes([this]); + const hasOutput = potential_outputs.length > 0; + + // Check if the previous menu option is `null`. If it's not, + // then we need to add a `null` as a separator. + if (options[options.length - 1] !== null) { + options.push(null); + } + + options.push({ + content: "🏞️ Share Output", + disabled: !hasOutput, + callback: (obj) => { + if (!ShareDialog.instance) { + ShareDialog.instance = new ShareDialog(); + } + const shareButton = document.getElementById("shareButton"); + if (shareButton) { + const currentNode = this; + if (!OpenArtShareDialog.instance) { + OpenArtShareDialog.instance = new OpenArtShareDialog(); + } + OpenArtShareDialog.instance.selectedNodeId = currentNode.id; + if (!ShareDialog.instance) { + ShareDialog.instance = new ShareDialog(share_option); + } + ShareDialog.instance.selectedNodeId = currentNode.id; + shareButton.click(); + } + } + }, null); + } + } + }, +}); diff --git a/custom_nodes/ComfyUI-Manager/js/comfyui-share-common.js b/custom_nodes/ComfyUI-Manager/js/comfyui-share-common.js new file mode 100644 index 0000000000000000000000000000000000000000..2072316468267bd52d543b6d710396c5aa15ab96 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/comfyui-share-common.js @@ -0,0 +1,975 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { OpenArtShareDialog } from "./comfyui-share-openart.js"; + +export const SUPPORTED_OUTPUT_NODE_TYPES = [ + "PreviewImage", + "SaveImage", + "VHS_VideoCombine", + "ADE_AnimateDiffCombine", + "SaveAnimatedWEBP", +] + +var docStyle = document.createElement('style'); +docStyle.innerHTML = ` +.cm-menu-container { + column-gap: 20px; + display: flex; + flex-wrap: wrap; + justify-content: center; +} + +.cm-menu-column { + display: flex; + flex-direction: column; +} + +.cm-title { + padding: 10px 10px 0 10p; + background-color: black; + text-align: center; + height: 45px; +} +`; +document.head.appendChild(docStyle); + +export function getPotentialOutputsAndOutputNodes(nodes) { + const potential_outputs = []; + const potential_output_nodes = []; + + // iterate over the array of nodes to find the ones that are marked as SaveImage + // TODO: Add support for AnimateDiffCombine, etc. nodes that save videos/gifs, etc. + for (let i = 0; i < nodes.length; i++) { + const node = nodes[i]; + if (!SUPPORTED_OUTPUT_NODE_TYPES.includes(node.type)) { + continue; + } + + if (node.type === "SaveImage") { + // check if node has an 'images' array property + if (node.hasOwnProperty("images") && Array.isArray(node.images)) { + // iterate over the images array and add each image to the potential_outputs array + for (let j = 0; j < node.images.length; j++) { + potential_output_nodes.push(node); + potential_outputs.push({ "type": "image", "image": node.images[j], "title": node.title, "node_id": node.id }); + } + } + } + else if (node.type === "PreviewImage") { + // check if node has an 'images' array property + if (node.hasOwnProperty("images") && Array.isArray(node.images)) { + // iterate over the images array and add each image to the potential_outputs array + for (let j = 0; j < node.images.length; j++) { + potential_output_nodes.push(node); + potential_outputs.push({ "type": "image", "image": node.images[j], "title": node.title, "node_id": node.id }); + } + } + } + else if (node.type === "VHS_VideoCombine") { + // check if node has a 'widgets' array property, with type 'image' + if (node.hasOwnProperty("widgets") && Array.isArray(node.widgets)) { + // iterate over the widgets array and add each image to the potential_outputs array + for (let j = 0; j < node.widgets.length; j++) { + if (node.widgets[j].type === "image") { + const widgetValue = node.widgets[j].value; + const parsedURLVals = parseURLPath(widgetValue); + + // ensure that the parsedURLVals have 'filename', 'subfolder', 'type', and 'format' properties + if (parsedURLVals.hasOwnProperty("filename") && parsedURLVals.hasOwnProperty("subfolder") && parsedURLVals.hasOwnProperty("type") && parsedURLVals.hasOwnProperty("format")) { + if (parsedURLVals.type !== "output") { + // TODO + } + potential_output_nodes.push(node); + potential_outputs.push({ "type": "output", 'title': node.title, "node_id": node.id , "output": { "filename": parsedURLVals.filename, "subfolder": parsedURLVals.subfolder, "value": widgetValue, "format": parsedURLVals.format } }); + } + } else if (node.widgets[j].type === "preview") { + const widgetValue = node.widgets[j].value; + const parsedURLVals = widgetValue.params; + + if(!parsedURLVals.format.startsWith('image')) { + // video isn't supported format + continue; + } + + // ensure that the parsedURLVals have 'filename', 'subfolder', 'type', and 'format' properties + if (parsedURLVals.hasOwnProperty("filename") && parsedURLVals.hasOwnProperty("subfolder") && parsedURLVals.hasOwnProperty("type") && parsedURLVals.hasOwnProperty("format")) { + if (parsedURLVals.type !== "output") { + // TODO + } + potential_output_nodes.push(node); + potential_outputs.push({ "type": "output", 'title': node.title, "node_id": node.id , "output": { "filename": parsedURLVals.filename, "subfolder": parsedURLVals.subfolder, "value": `/view?filename=${parsedURLVals.filename}&subfolder=${parsedURLVals.subfolder}&type=${parsedURLVals.type}&format=${parsedURLVals.format}`, "format": parsedURLVals.format } }); + } + } + } + } + } + else if (node.type === "ADE_AnimateDiffCombine") { + // check if node has a 'widgets' array property, with type 'image' + if (node.hasOwnProperty("widgets") && Array.isArray(node.widgets)) { + // iterate over the widgets array and add each image to the potential_outputs array + for (let j = 0; j < node.widgets.length; j++) { + if (node.widgets[j].type === "image") { + const widgetValue = node.widgets[j].value; + const parsedURLVals = parseURLPath(widgetValue); + // ensure that the parsedURLVals have 'filename', 'subfolder', 'type', and 'format' properties + if (parsedURLVals.hasOwnProperty("filename") && parsedURLVals.hasOwnProperty("subfolder") && parsedURLVals.hasOwnProperty("type") && parsedURLVals.hasOwnProperty("format")) { + if (parsedURLVals.type !== "output") { + // TODO + continue; + } + potential_output_nodes.push(node); + potential_outputs.push({ "type": "output", 'title': node.title, "output": { "filename": parsedURLVals.filename, "subfolder": parsedURLVals.subfolder, "type": parsedURLVals.type, "value": widgetValue, "format": parsedURLVals.format } }); + } + } + } + } + } + else if (node.type === "SaveAnimatedWEBP") { + // check if node has an 'images' array property + if (node.hasOwnProperty("images") && Array.isArray(node.images)) { + // iterate over the images array and add each image to the potential_outputs array + for (let j = 0; j < node.images.length; j++) { + potential_output_nodes.push(node); + potential_outputs.push({ "type": "image", "image": node.images[j], "title": node.title }); + } + } + } + } + + // Note: make sure that two arrays are the same length + return { potential_outputs, potential_output_nodes }; +} + + +export function parseURLPath(urlPath) { + // Extract the query string from the URL path + var queryString = urlPath.split('?')[1]; + + // Use the URLSearchParams API to parse the query string + var params = new URLSearchParams(queryString); + + // Create an object to store the parsed parameters + var parsedParams = {}; + + // Iterate over each parameter and add it to the object + for (var pair of params.entries()) { + parsedParams[pair[0]] = pair[1]; + } + + // Return the object with the parsed parameters + return parsedParams; +} + + +export const showOpenArtShareDialog = () => { + if (!OpenArtShareDialog.instance) { + OpenArtShareDialog.instance = new OpenArtShareDialog(); + } + + return app.graphToPrompt() + .then(prompt => { + // console.log({ prompt }) + return app.graph._nodes; + }) + .then(nodes => { + const { potential_outputs, potential_output_nodes } = getPotentialOutputsAndOutputNodes(nodes); + OpenArtShareDialog.instance.show({ potential_outputs, potential_output_nodes}); + }) +} + +export const showShareDialog = async (share_option) => { + if (!ShareDialog.instance) { + ShareDialog.instance = new ShareDialog(share_option); + } + return app.graphToPrompt() + .then(prompt => { + // console.log({ prompt }) + return app.graph._nodes; + }) + .then(nodes => { + // console.log({ nodes }); + const { potential_outputs, potential_output_nodes } = getPotentialOutputsAndOutputNodes(nodes); + if (potential_outputs.length === 0) { + if (potential_output_nodes.length === 0) { + // todo: add support for other output node types (animatediff combine, etc.) + const supported_nodes_string = SUPPORTED_OUTPUT_NODE_TYPES.join(", "); + alert(`No supported output node found (${supported_nodes_string}). To share this workflow, please add an output node to your graph and re-run your prompt.`); + } else { + alert("To share this, first run a prompt. Once it's done, click 'Share'.\n\nNOTE: Images of the Share target can only be selected in the PreviewImage, SaveImage, and VHS_VideoCombine nodes. In the case of VHS_VideoCombine, only the image/gif and image/webp formats are supported."); + } + return false; + } + ShareDialog.instance.show({ potential_outputs, potential_output_nodes, share_option }); + return true; + }); +} + +export class ShareDialogChooser extends ComfyDialog { + static instance = null; + constructor() { + super(); + this.element = $el("div.comfy-modal", { + parent: document.body, style: { + 'overflow-y': "auto", + } + }, + [$el("div.comfy-modal-content", + {}, + [...this.createButtons()]), + ]); + this.selectedNodeId = null; + } + createButtons() { + const buttons = [ + { + key: "openart", + textContent: "OpenArt AI", + website: "https://openart.ai/workflows/", + description: "Share ComfyUI workflows and art on OpenArt.ai", + onclick: () => { + showOpenArtShareDialog(); + this.close(); + } + }, + { + key: "matrix", + textContent: "Matrix Server", + website: "https://app.element.io/#/room/%23comfyui_space%3Amatrix.org", + description: "Share your art on the official ComfyUI matrix server", + onclick: async () => { + showShareDialog('matrix').then((suc) => { + suc && this.close(); + }) + } + }, + { + key: "comfyworkflows", + textContent: "ComfyWorkflows", + website: "https://comfyworkflows.com", + description: "Share ComfyUI art on comfyworkflows.com", + onclick: () => { + showShareDialog('comfyworkflows').then((suc) => { + suc && this.close(); + }) + } + }, + ]; + + function createShareButtonsWithDescriptions() { + // Responsive container + const container = $el("div", { + style: { + display: "flex", + 'flex-wrap': 'wrap', + 'justify-content': 'space-around', + 'padding': '20px', + } + }); + + buttons.forEach(b => { + const button = $el("button", { + type: "button", + textContent: b.textContent, + onclick: b.onclick, + style: { + 'width': '25%', + 'minWidth': '200px', + 'background-color': b.backgroundColor || '', + 'border-radius': '5px', + 'cursor': 'pointer', + 'padding': '5px 5px', + 'margin-bottom': '5px', + 'transition': 'background-color 0.3s', + } + }); + button.addEventListener('mouseover', () => { + button.style.backgroundColor = '#007BFF'; // Change color on hover + }); + button.addEventListener('mouseout', () => { + button.style.backgroundColor = b.backgroundColor || ''; + }); + + const description = $el("p", { + textContent: b.description, + style: { + 'text-align': 'left', + color: 'white', + 'font-size': '14px', + 'margin-bottom': '10px', + }, + }); + + const websiteLink = $el("a", { + textContent: "🌐 Website", + href: b.website, + target: "_blank", + style: { + color: 'white', + 'margin-left': '10px', + 'font-size': '12px', + 'text-decoration': 'none', + 'align-self': 'center', + }, + }); + + // Add highlight to the website link + websiteLink.addEventListener('mouseover', () => { + websiteLink.style.opacity = '0.7'; + }); + + websiteLink.addEventListener('mouseout', () => { + websiteLink.style.opacity = '1'; + }); + + const buttonLinkContainer = $el("div", { + style: { + display: 'flex', + 'align-items': 'center', + 'margin-bottom': '10px', + } + }, [button, websiteLink]); + + const column = $el("div", { + style: { + 'flex-basis': '100%', + 'margin': '10px', + 'padding': '20px', + 'border': '1px solid #ddd', + 'border-radius': '5px', + 'box-shadow': '0 2px 4px rgba(0, 0, 0, 0.1)', + } + }, [buttonLinkContainer, description]); + + container.appendChild(column); + }); + + return container; + } + + return [ + $el("p", { + textContent: 'Choose a platform to share your workflow', + style: { + 'text-align': 'center', + 'color': 'white', + 'font-size': '18px', + 'margin-bottom': '10px', + }, + } + ), + + $el("div.cm-menu-container", { + id: "comfyui-share-container" + }, [ + $el("div.cm-menu-column", [ + createShareButtonsWithDescriptions(), + $el("br", {}, []), + ]), + ]), + $el("div.cm-menu-container", { + id: "comfyui-share-container" + }, [ + $el("button", { + type: "button", + style: { + margin: "0 25px", + width: "100%", + }, + textContent: "Close", + onclick: () => { + this.close() + } + }), + $el("br", {}, []), + ]), + ]; + } + show() { + this.element.style.display = "block"; + } +} +export class ShareDialog extends ComfyDialog { + static instance = null; + static matrix_auth = { homeserver: "matrix.org", username: "", password: "" }; + static cw_sharekey = ""; + + constructor(share_option) { + super(); + this.share_option = share_option; + this.element = $el("div.comfy-modal", { + parent: document.body, style: { + 'overflow-y': "auto", + } + }, + [$el("div.comfy-modal-content", + {}, + [...this.createButtons()]), + ]); + this.selectedOutputIndex = 0; + } + + createButtons() { + this.radio_buttons = $el("div", { + id: "selectOutputImages", + }, []); + + this.is_nsfw_checkbox = $el("input", { type: 'checkbox', id: "is_nsfw" }, []) + const is_nsfw_checkbox_text = $el("label", { + }, [" Is this NSFW?"]) + this.is_nsfw_checkbox.style.color = "var(--fg-color)"; + this.is_nsfw_checkbox.checked = false; + + this.matrix_destination_checkbox = $el("input", { type: 'checkbox', id: "matrix_destination" }, []) + const matrix_destination_checkbox_text = $el("label", {}, [" ComfyUI Matrix server"]) + this.matrix_destination_checkbox.style.color = "var(--fg-color)"; + this.matrix_destination_checkbox.checked = this.share_option === 'matrix'; //true; + + this.comfyworkflows_destination_checkbox = $el("input", { type: 'checkbox', id: "comfyworkflows_destination" }, []) + const comfyworkflows_destination_checkbox_text = $el("label", {}, [" ComfyWorkflows.com"]) + this.comfyworkflows_destination_checkbox.style.color = "var(--fg-color)"; + this.comfyworkflows_destination_checkbox.checked = this.share_option !== 'matrix'; + + this.matrix_homeserver_input = $el("input", { type: 'text', id: "matrix_homeserver", placeholder: "matrix.org", value: ShareDialog.matrix_auth.homeserver || 'matrix.org' }, []); + this.matrix_username_input = $el("input", { type: 'text', placeholder: "Username", value: ShareDialog.matrix_auth.username || '' }, []); + this.matrix_password_input = $el("input", { type: 'password', placeholder: "Password", value: ShareDialog.matrix_auth.password || '' }, []); + + this.cw_sharekey_input = $el("input", { type: 'text', placeholder: "Share key (found on your profile page)", value: ShareDialog.cw_sharekey || '' }, []); + this.cw_sharekey_input.style.width = "100%"; + + this.credits_input = $el("input", { + type: "text", + placeholder: "This will be used to give credits", + required: false, + }, []); + + this.title_input = $el("input", { + type: "text", + placeholder: "ex: My awesome art", + required: false + }, []); + + this.description_input = $el("textarea", { + placeholder: "ex: Trying out a new workflow... ", + required: false, + }, []); + + this.share_button = $el("button", { + type: "submit", + textContent: "Share", + style: { + backgroundColor: "blue" + } + }, []); + + this.final_message = $el("div", { + style: { + color: "white", + textAlign: "center", + // marginTop: "10px", + // backgroundColor: "black", + padding: "10px", + } + }, []); + + this.share_finalmessage_container = $el("div.cm-menu-container", { + id: "comfyui-share-finalmessage-container", + style: { + display: "none", + } + }, [ + $el("div.cm-menu-column", [ + this.final_message, + $el("button", { + type: "button", + textContent: "Close", + onclick: () => { + // Reset state + this.matrix_destination_checkbox.checked = this.share_option === 'matrix'; + this.comfyworkflows_destination_checkbox.checked = this.share_option !== 'matrix'; + this.share_button.textContent = "Share"; + this.share_button.style.display = "inline-block"; + this.final_message.innerHTML = ""; + this.final_message.style.color = "white"; + this.credits_input.value = ""; + this.title_input.value = ""; + this.description_input.value = ""; + this.is_nsfw_checkbox.checked = false; + this.selectedOutputIndex = 0; + + // hide the final message + this.share_finalmessage_container.style.display = "none"; + + // show the share container + this.share_container.style.display = "flex"; + + this.close() + } + }), + ]) + ]); + this.share_container = $el("div.cm-menu-container", { + id: "comfyui-share-container" + }, [ + $el("div.cm-menu-column", [ + $el("details", { + style: { + border: "1px solid #999", + padding: "5px", + borderRadius: "5px", + backgroundColor: "#222" + } + }, [ + $el("summary", { + style: { + color: "white", + cursor: "pointer", + } + }, [`Matrix account`]), + $el("div", { + style: { + display: "flex", + flexDirection: "row", + } + }, [ + $el("div", { + textContent: "Homeserver", + style: { + marginRight: "10px", + } + }, []), + this.matrix_homeserver_input, + ]), + + $el("div", { + style: { + display: "flex", + flexDirection: "row", + } + }, [ + $el("div", { + textContent: "Username", + style: { + marginRight: "10px", + } + }, []), + this.matrix_username_input, + ]), + + $el("div", { + style: { + display: "flex", + flexDirection: "row", + } + }, [ + $el("div", { + textContent: "Password", + style: { + marginRight: "10px", + } + }, []), + this.matrix_password_input, + ]), + + ]), + $el("details", { + style: { + border: "1px solid #999", + marginTop: "10px", + padding: "5px", + borderRadius: "5px", + backgroundColor: "#222" + } + }, [ + $el("summary", { + style: { + color: "white", + cursor: "pointer", + } + }, [`Comfyworkflows.com account`]), + $el("h4", { + textContent: "Share key (found on your profile page)", + }, []), + $el("p", { size: 3, color: "white" }, ["When provided, your art will be saved to your account."]), + this.cw_sharekey_input, + ]), + + $el("div", {}, [ + $el("p", { + size: 3, color: "white", style: { + color: 'white' + } + }, [`Select where to share your art:`]), + this.matrix_destination_checkbox, + matrix_destination_checkbox_text, + $el("br", {}, []), + this.comfyworkflows_destination_checkbox, + comfyworkflows_destination_checkbox_text, + ]), + + $el("h4", { + textContent: "Credits (optional)", + size: 3, + color: "white", + style: { + color: 'white' + } + }, []), + this.credits_input, + // $el("br", {}, []), + + $el("h4", { + textContent: "Title (optional)", + size: 3, + color: "white", + style: { + color: 'white' + } + }, []), + this.title_input, + // $el("br", {}, []), + + $el("h4", { + textContent: "Description (optional)", + size: 3, + color: "white", + style: { + color: 'white' + } + }, []), + this.description_input, + $el("br", {}, []), + + $el("div", {}, [this.is_nsfw_checkbox, is_nsfw_checkbox_text]), + // $el("br", {}, []), + + // this.final_message, + // $el("br", {}, []), + ]), + $el("div.cm-menu-column", [ + this.radio_buttons, + $el("br", {}, []), + + this.share_button, + + $el("button", { + type: "button", + textContent: "Close", + onclick: () => { + // Reset state + this.matrix_destination_checkbox.checked = this.share_option === 'matrix'; + this.comfyworkflows_destination_checkbox.checked = this.share_option !== 'matrix'; + this.share_button.textContent = "Share"; + this.share_button.style.display = "inline-block"; + this.final_message.innerHTML = ""; + this.final_message.style.color = "white"; + this.credits_input.value = ""; + this.title_input.value = ""; + this.description_input.value = ""; + this.is_nsfw_checkbox.checked = false; + this.selectedOutputIndex = 0; + + // hide the final message + this.share_finalmessage_container.style.display = "none"; + + // show the share container + this.share_container.style.display = "flex"; + + this.close() + } + }), + $el("br", {}, []), + ]), + ]); + + // get the user's existing matrix auth and share key + ShareDialog.matrix_auth = { homeserver: "matrix.org", username: "", password: "" }; + try { + api.fetchApi(`/manager/get_matrix_auth`) + .then(response => response.json()) + .then(data => { + ShareDialog.matrix_auth = data; + this.matrix_homeserver_input.value = ShareDialog.matrix_auth.homeserver; + this.matrix_username_input.value = ShareDialog.matrix_auth.username; + this.matrix_password_input.value = ShareDialog.matrix_auth.password; + }) + .catch(error => { + // console.log(error); + }); + } catch (error) { + // console.log(error); + } + + // get the user's existing comfyworkflows share key + ShareDialog.cw_sharekey = ""; + try { + // console.log("Fetching comfyworkflows share key") + api.fetchApi(`/manager/get_comfyworkflows_auth`) + .then(response => response.json()) + .then(data => { + ShareDialog.cw_sharekey = data.comfyworkflows_sharekey; + this.cw_sharekey_input.value = ShareDialog.cw_sharekey; + }) + .catch(error => { + // console.log(error); + }); + } catch (error) { + // console.log(error); + } + + this.share_button.onclick = async () => { + const prompt = await app.graphToPrompt(); + const nodes = app.graph._nodes; + + // console.log({ prompt, nodes }); + + const destinations = []; + if (this.matrix_destination_checkbox.checked) { + destinations.push("matrix"); + } + if (this.comfyworkflows_destination_checkbox.checked) { + destinations.push("comfyworkflows"); + } + + // if destinations includes matrix, make an api call to /manager/check_matrix to ensure that the user has configured their matrix settings + if (destinations.includes("matrix")) { + let definedMatrixAuth = !!this.matrix_homeserver_input.value && !!this.matrix_username_input.value && !!this.matrix_password_input.value; + if (!definedMatrixAuth) { + alert("Please set your Matrix account details."); + return; + } + } + + if (destinations.includes("comfyworkflows") && !this.cw_sharekey_input.value && !confirm("You have NOT set your ComfyWorkflows.com share key. Your art will NOT be connected to your account (it will be shared anonymously). Continue?")) { + return; + } + + const { potential_outputs, potential_output_nodes } = getPotentialOutputsAndOutputNodes(nodes); + + // console.log({ potential_outputs, potential_output_nodes }) + + if (potential_outputs.length === 0) { + if (potential_output_nodes.length === 0) { + // todo: add support for other output node types (animatediff combine, etc.) + const supported_nodes_string = SUPPORTED_OUTPUT_NODE_TYPES.join(", "); + alert(`No supported output node found (${supported_nodes_string}). To share this workflow, please add an output node to your graph and re-run your prompt.`); + } else { + alert("To share this, first run a prompt. Once it's done, click 'Share'.\n\nNOTE: Images of the Share target can only be selected in the PreviewImage, SaveImage, and VHS_VideoCombine nodes. In the case of VHS_VideoCombine, only the image/gif and image/webp formats are supported."); + } + this.selectedOutputIndex = 0; + this.close(); + return; + } + + // Change the text of the share button to "Sharing..." to indicate that the share process has started + this.share_button.textContent = "Sharing..."; + + const response = await api.fetchApi(`/manager/share`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + matrix_auth: { + homeserver: this.matrix_homeserver_input.value, + username: this.matrix_username_input.value, + password: this.matrix_password_input.value, + }, + cw_auth: { + cw_sharekey: this.cw_sharekey_input.value, + }, + share_destinations: destinations, + credits: this.credits_input.value, + title: this.title_input.value, + description: this.description_input.value, + is_nsfw: this.is_nsfw_checkbox.checked, + prompt, + potential_outputs, + selected_output_index: this.selectedOutputIndex, + // potential_output_nodes + }) + }); + + if (response.status != 200) { + try { + const response_json = await response.json(); + if (response_json.error) { + alert(response_json.error); + this.close(); + return; + } else { + alert("Failed to share your art. Please try again."); + this.close(); + return; + } + } catch (e) { + alert("Failed to share your art. Please try again."); + this.close(); + return; + } + } + + const response_json = await response.json(); + + if (response_json.comfyworkflows.url) { + this.final_message.innerHTML = "Your art has been shared: " + response_json.comfyworkflows.url + ""; + if (response_json.matrix.success) { + this.final_message.innerHTML += "
Your art has been shared in the ComfyUI Matrix server's #share channel!"; + } + } else { + if (response_json.matrix.success) { + this.final_message.innerHTML = "Your art has been shared in the ComfyUI Matrix server's #share channel!"; + } + } + + this.final_message.style.color = "green"; + + // hide #comfyui-share-container and show #comfyui-share-finalmessage-container + this.share_container.style.display = "none"; + this.share_finalmessage_container.style.display = "block"; + + // hide the share button + this.share_button.textContent = "Shared!"; + this.share_button.style.display = "none"; + // this.close(); + } + + const res = + [ + $el("tr.td", { width: "100%" }, [ + $el("font", { size: 6, color: "white" }, [`Share your art`]), + ]), + $el("br", {}, []), + + this.share_finalmessage_container, + this.share_container, + ]; + + res[0].style.padding = "10px 10px 10px 10px"; + res[0].style.backgroundColor = "black"; //"linear-gradient(90deg, #00C9FF 0%, #92FE9D 100%)"; + res[0].style.textAlign = "center"; + res[0].style.height = "45px"; + return res; + } + + show({potential_outputs, potential_output_nodes, share_option}) { + // Sort `potential_output_nodes` by node ID to make the order always + // consistent, but we should also keep `potential_outputs` in the same + // order as `potential_output_nodes`. + const potential_output_to_order = {}; + potential_output_nodes.forEach((node, index) => { + if (node.id in potential_output_to_order) { + potential_output_to_order[node.id][1].push(potential_outputs[index]); + } else { + potential_output_to_order[node.id] = [node, [potential_outputs[index]]]; + } + }) + // Sort the object `potential_output_to_order` by key (node ID) + const sorted_potential_output_to_order = Object.fromEntries( + Object.entries(potential_output_to_order).sort((a, b) => a[0].id - b[0].id) + ); + const sorted_potential_outputs = [] + const sorted_potential_output_nodes = [] + for (const [key, value] of Object.entries(sorted_potential_output_to_order)) { + sorted_potential_output_nodes.push(value[0]); + sorted_potential_outputs.push(...value[1]); + } + potential_output_nodes = sorted_potential_output_nodes; + potential_outputs = sorted_potential_outputs; + + // console.log({ potential_outputs, potential_output_nodes }) + this.radio_buttons.innerHTML = ""; // clear the radio buttons + let is_radio_button_checked = false; // only check the first radio button if multiple images from the same node + const new_radio_buttons = $el("div", { + id: "selectOutput-Options", + style: { + 'overflow-y': 'scroll', + 'max-height': '400px', + } + }, potential_outputs.map((output, index) => { + const {node_id} = output; + const radio_button = $el("input", { type: 'radio', name: "selectOutputImages", value: index, required: index === 0 }, []) + let radio_button_img; + if (output.type === "image" || output.type === "temp") { + radio_button_img = $el("img", { src: `/view?filename=${output.image.filename}&subfolder=${output.image.subfolder}&type=${output.image.type}`, style: { width: "auto", height: "100px" } }, []); + } else if (output.type === "output") { + radio_button_img = $el("img", { src: output.output.value, style: { width: "auto", height: "100px" } }, []); + } else { + // unsupported output type + // this should never happen + // TODO + radio_button_img = $el("img", { src: "", style: { width: "auto", height: "100px" } }, []); + } + const radio_button_text = $el("label", { + // style: { + // color: 'white' + // } + }, [output.title]) + radio_button.style.color = "var(--fg-color)"; + + // Make the radio button checked if it's the selected node, + // otherwise make the first radio button checked. + if (this.selectedNodeId) { + if (this.selectedNodeId === node_id && !is_radio_button_checked) { + radio_button.checked = true; + is_radio_button_checked = true; + } + } else { + radio_button.checked = index === 0; + } + + if (radio_button.checked) { + this.selectedOutputIndex = index; + } + + radio_button.onchange = () => { + this.selectedOutputIndex = parseInt(radio_button.value); + }; + + return $el("div", { + style: { + display: "flex", + 'align-items': 'center', + 'justify-content': 'space-between', + 'margin-bottom': '10px', + } + }, [radio_button, radio_button_text, radio_button_img]); + })); + const header = $el("h3", { + textContent: "Select an image to share", + size: 3, + color: "white", + style: { + 'text-align': 'center', + color: 'white', + backgroundColor: 'black', + padding: '10px', + 'margin-top': '0px', + } + }, [ + $el("p", { + textContent: "Scroll to see all outputs", + size: 2, + color: "white", + style: { + 'text-align': 'center', + color: 'white', + 'margin-bottom': '5px', + 'font-style': 'italic', + 'font-size': '12px', + }, + }, []) + ]); + this.radio_buttons.appendChild(header); + // this.radio_buttons.appendChild(subheader); + this.radio_buttons.appendChild(new_radio_buttons); + this.element.style.display = "block"; + + share_option = share_option || this.share_option; + if (share_option === 'comfyworkflows') { + this.matrix_destination_checkbox.checked = false; + this.comfyworkflows_destination_checkbox.checked = true; + } else { + this.matrix_destination_checkbox.checked = true; + this.comfyworkflows_destination_checkbox.checked = false; + } + } +} diff --git a/custom_nodes/ComfyUI-Manager/js/comfyui-share-openart.js b/custom_nodes/ComfyUI-Manager/js/comfyui-share-openart.js new file mode 100644 index 0000000000000000000000000000000000000000..5a7f12739f2186503841c998c2d7b4c2dcbe02ec --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/comfyui-share-openart.js @@ -0,0 +1,747 @@ +import {app} from "../../scripts/app.js"; +import {api} from "../../scripts/api.js"; +import {ComfyDialog, $el} from "../../scripts/ui.js"; + +const LOCAL_STORAGE_KEY = "openart_comfy_workflow_key"; +const DEFAULT_HOMEPAGE_URL = "https://openart.ai/workflows/dev?developer=true"; +//const DEFAULT_HOMEPAGE_URL = "http://localhost:8080/workflows/dev?developer=true"; + +const API_ENDPOINT = "https://openart.ai/api"; +//const API_ENDPOINT = "http://localhost:8080/api"; + +const style = ` + .openart-share-dialog a { + color: #f8f8f8; + } + .openart-share-dialog a:hover { + color: #007bff; + } + .output_label { + border: 5px solid transparent; + } + .output_label:hover { + border: 5px solid #59E8C6; + } + .output_label.checked { + border: 5px solid #59E8C6; + } +`; + +// Shared component styles +const sectionStyle = { + marginBottom: 0, + padding: 0, + borderRadius: "8px", + boxShadow: "0 2px 4px rgba(0, 0, 0, 0.05)", + display: "flex", + flexDirection: "column", + justifyContent: "center", +}; + +export class OpenArtShareDialog extends ComfyDialog { + static instance = null; + + constructor() { + super(); + $el("style", { + textContent: style, + parent: document.head, + }); + this.element = $el( + "div.comfy-modal.openart-share-dialog", + { + parent: document.body, + style: { + "overflow-y": "auto", + }, + }, + [$el("div.comfy-modal-content", {}, [...this.createButtons()])] + ); + this.selectedOutputIndex = 0; + this.selectedNodeId = null; + this.uploadedImages = []; + this.selectedFile = null; + } + + async readKey() { + let key = "" + try { + key = await api.fetchApi(`/manager/get_openart_auth`) + .then(response => response.json()) + .then(data => { + return data.openart_key; + }) + .catch(error => { + // console.log(error); + }); + } catch (error) { + // console.log(error); + } + return key || ""; + } + + async saveKey(value) { + await api.fetchApi(`/manager/set_openart_auth`, { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ + openart_key: value + }) + }); + } + + createButtons() { + const inputStyle = { + display: "block", + minWidth: "500px", + width: "100%", + padding: "10px", + margin: "10px 0", + borderRadius: "4px", + border: "1px solid #ddd", + boxSizing: "border-box", + }; + + const hyperLinkStyle = { + display: "block", + marginBottom: "15px", + fontWeight: "bold", + fontSize: "14px", + }; + + const labelStyle = { + color: "#f8f8f8", + display: "block", + margin: "10px 0 0 0", + fontWeight: "bold", + textDecoration: "none", + }; + + const buttonStyle = { + padding: "10px 80px", + margin: "10px 5px", + borderRadius: "4px", + border: "none", + cursor: "pointer", + color: "#fff", + backgroundColor: "#007bff", + }; + + // upload images input + this.uploadImagesInput = $el("input", { + type: "file", + multiple: false, + style: inputStyle, + accept: "image/*", + }); + + this.uploadImagesInput.addEventListener("change", async (e) => { + const file = e.target.files[0]; + if (!file) { + this.previewImage.src = ""; + this.previewImage.style.display = "none"; + return; + } + const reader = new FileReader(); + reader.onload = async (e) => { + const imgData = e.target.result; + this.previewImage.src = imgData; + this.previewImage.style.display = "block"; + this.selectedFile = null + // Once user uploads an image, we uncheck all radio buttons + this.radioButtons.forEach((ele) => { + ele.checked = false; + ele.parentElement.classList.remove("checked"); + }); + + // Add the opacity style toggle here to indicate that they only need + // to upload one image or choose one from the outputs. + this.outputsSection.style.opacity = 0.35; + this.uploadImagesInput.style.opacity = 1; + }; + reader.readAsDataURL(file); + }); + + // preview image + this.previewImage = $el("img", { + src: "", + style: { + width: "100%", + maxHeight: "100px", + objectFit: "contain", + display: "none", + marginTop: '10px', + }, + }); + + this.keyInput = $el("input", { + type: "password", + placeholder: "Copy & paste your API key", + style: inputStyle, + }); + this.NameInput = $el("input", { + type: "text", + placeholder: "Title (required)", + style: inputStyle, + }); + this.descriptionInput = $el("textarea", { + placeholder: "Description (optional)", + style: { + ...inputStyle, + minHeight: "100px", + }, + }); + + // Header Section + const headerSection = $el("h3", { + textContent: "Share your workflow to OpenArt", + size: 3, + color: "white", + style: { + 'text-align': 'center', + color: 'white', + margin: '0 0 10px 0', + } + }); + + // LinkSection + this.communityLink = $el("a", { + style: hyperLinkStyle, + href: DEFAULT_HOMEPAGE_URL, + target: "_blank" + }, ["👉 Check out thousands of workflows shared from the community"]) + this.getAPIKeyLink = $el("a", { + style: { + ...hyperLinkStyle, + color: "#59E8C6" + }, + href: DEFAULT_HOMEPAGE_URL, + target: "_blank" + }, ["👉 Get your API key here"]) + const linkSection = $el( + "div", + { + style: { + marginTop: "10px", + display: "flex", + flexDirection: "column", + }, + }, + [ + this.communityLink, + this.getAPIKeyLink, + ] + ); + + // Account Section + const accountSection = $el("div", {style: sectionStyle}, [ + $el("label", {style: labelStyle}, ["1️⃣ OpenArt API Key"]), + this.keyInput, + ]); + + // Output Upload Section + const outputUploadSection = $el("div", {style: sectionStyle}, [ + $el("label", { + style: { + ...labelStyle, + margin: "10px 0 0 0" + } + }, ["2️⃣ Image/Thumbnail (Required)"]), + this.previewImage, + this.uploadImagesInput, + ]); + + // Outputs Section + this.outputsSection = $el("div", { + id: "selectOutputs", + }, []); + + // Additional Inputs Section + const additionalInputsSection = $el("div", {style: sectionStyle}, [ + $el("label", {style: labelStyle}, ["3️⃣ Workflow Information"]), + this.NameInput, + this.descriptionInput, + ]); + + // OpenArt Contest Section + this.joinContestCheckbox = $el("input", { + type: 'checkbox', + id: "join_contest" + }, []) + this.joinContestDescription = $el("a", { + style: { + ...hyperLinkStyle, + display: 'inline-block', + color: "#59E8C6", + fontSize: '12px', + marginLeft: '10px', + marginBottom: 0, + }, + href: "https://contest.openart.ai/", + target: "_blank" + }, ["🏆 I'm participating in the OpenArt workflow contest"]) + this.joinContestLabel = $el("label", { + style: { + display: 'flex', + alignItems: 'center', + cursor: 'pointer', + } + }, [this.joinContestCheckbox, this.joinContestDescription]) + const contestSection = $el("div", {style: sectionStyle}, [ + this.joinContestLabel, + ]); + + // Message Section + this.message = $el( + "div", + { + style: { + color: "#ff3d00", + textAlign: "center", + padding: "10px", + fontSize: "20px", + }, + }, + [] + ); + + this.shareButton = $el("button", { + type: "submit", + textContent: "Share", + style: buttonStyle, + onclick: () => { + this.handleShareButtonClick(); + }, + }); + + // Share and Close Buttons + const buttonsSection = $el( + "div", + { + style: { + textAlign: "right", + marginTop: "20px", + display: "flex", + justifyContent: "space-between", + }, + }, + [ + $el("button", { + type: "button", + textContent: "Close", + style: { + ...buttonStyle, + backgroundColor: undefined, + }, + onclick: () => { + this.close(); + }, + }), + this.shareButton, + ] + ); + + // Composing the full layout + const layout = [ + headerSection, + linkSection, + accountSection, + outputUploadSection, + this.outputsSection, + additionalInputsSection, + contestSection, + this.message, + buttonsSection, + ]; + + return layout; + } + + async fetchApi(path, options, statusText) { + if (statusText) { + this.message.textContent = statusText; + } + const addSearchParams = (url, params = {}) => + new URL( + `${url.origin}${url.pathname}?${new URLSearchParams([ + ...Array.from(url.searchParams.entries()), + ...Object.entries(params), + ])}` + ); + + const fullPath = addSearchParams(new URL(API_ENDPOINT + path), { + workflow_api_key: this.keyInput.value, + }); + + const response = await fetch(fullPath, options); + + if (!response.ok) { + throw new Error(response.statusText); + } + + if (statusText) { + this.message.textContent = ""; + } + const data = await response.json(); + return { + ok: response.ok, + statusText: response.statusText, + status: response.status, + data, + }; + } + + async uploadThumbnail(uploadFile) { + const form = new FormData(); + form.append("file", uploadFile); + try { + const res = await this.fetchApi( + `/workflows/upload_thumbnail`, + { + method: "POST", + body: form, + }, + "Uploading thumbnail..." + ); + + if (res.ok && res.data) { + const {image_url, width, height} = res.data; + this.uploadedImages.push({ + url: image_url, + width, + height, + }); + } + } catch (e) { + if (e?.response?.status === 413) { + throw new Error("File size is too large (max 20MB)"); + } else { + throw new Error("Error uploading thumbnail: " + e.message); + } + } + } + + async handleShareButtonClick() { + this.message.textContent = ""; + await this.saveKey(this.keyInput.value); + try { + this.shareButton.disabled = true; + this.shareButton.textContent = "Sharing..."; + await this.share(); + } catch (e) { + alert(e.message); + } + this.shareButton.disabled = false; + this.shareButton.textContent = "Share"; + } + + async share() { + const prompt = await app.graphToPrompt(); + const workflowJSON = prompt["workflow"]; + const workflowAPIJSON = prompt["output"]; + const form_values = { + name: this.NameInput.value, + description: this.descriptionInput.value, + }; + + if (!this.keyInput.value) { + throw new Error("API key is required"); + } + + if (!this.uploadImagesInput.files[0] && !this.selectedFile) { + throw new Error("Thumbnail is required"); + } + + if (!form_values.name) { + throw new Error("Title is required"); + } + + const current_snapshot = await api.fetchApi(`/snapshot/get_current`) + .then(response => response.json()) + .catch(error => { + // console.log(error); + }); + + + if (!this.uploadedImages.length) { + if (this.selectedFile) { + await this.uploadThumbnail(this.selectedFile); + } else { + for (const file of this.uploadImagesInput.files) { + try { + await this.uploadThumbnail(file); + } catch (e) { + this.uploadedImages = []; + throw new Error(e.message); + } + } + + if (this.uploadImagesInput.files.length === 0) { + throw new Error("No thumbnail uploaded"); + } + + if (this.uploadImagesInput.files.length === 0) { + throw new Error("No thumbnail uploaded"); + } + } + } + + const join_contest = this.joinContestCheckbox.checked; + + try { + const response = await this.fetchApi( + "/workflows/publish", + { + method: "POST", + headers: {"Content-Type": "application/json"}, + body: JSON.stringify({ + workflow_json: workflowJSON, + upload_images: this.uploadedImages, + form_values, + advanced_config: { + workflow_api_json: workflowAPIJSON, + snapshot: current_snapshot, + }, + join_contest, + }), + }, + "Uploading workflow..." + ); + + if (response.ok) { + const {workflow_id} = response.data; + if (workflow_id) { + const url = `https://openart.ai/workflows/-/-/${workflow_id}`; + this.message.innerHTML = `Workflow has been shared successfully. Click here to view it.`; + this.previewImage.src = ""; + this.previewImage.style.display = "none"; + this.uploadedImages = []; + this.NameInput.value = ""; + this.descriptionInput.value = ""; + this.radioButtons.forEach((ele) => { + ele.checked = false; + ele.parentElement.classList.remove("checked"); + }); + this.selectedOutputIndex = 0; + this.selectedNodeId = null; + this.selectedFile = null; + } + } + } catch (e) { + throw new Error("Error sharing workflow: " + e.message); + } + } + + async fetchImageBlob(url) { + const response = await fetch(url); + const blob = await response.blob(); + return blob; + } + + async show({potential_outputs, potential_output_nodes} = {}) { + // Sort `potential_output_nodes` by node ID to make the order always + // consistent, but we should also keep `potential_outputs` in the same + // order as `potential_output_nodes`. + const potential_output_to_order = {}; + potential_output_nodes.forEach((node, index) => { + if (node.id in potential_output_to_order) { + potential_output_to_order[node.id][1].push(potential_outputs[index]); + } else { + potential_output_to_order[node.id] = [node, [potential_outputs[index]]]; + } + }) + // Sort the object `potential_output_to_order` by key (node ID) + const sorted_potential_output_to_order = Object.fromEntries( + Object.entries(potential_output_to_order).sort((a, b) => a[0].id - b[0].id) + ); + const sorted_potential_outputs = [] + const sorted_potential_output_nodes = [] + for (const [key, value] of Object.entries(sorted_potential_output_to_order)) { + sorted_potential_output_nodes.push(value[0]); + sorted_potential_outputs.push(...value[1]); + } + potential_output_nodes = sorted_potential_output_nodes; + potential_outputs = sorted_potential_outputs; + + this.message.innerHTML = ""; + this.message.textContent = ""; + this.element.style.display = "block"; + this.previewImage.src = ""; + this.previewImage.style.display = "none"; + const key = await this.readKey(); + this.keyInput.value = key; + this.uploadedImages = []; + + // If `selectedNodeId` is provided, we will select the corresponding radio + // button for the node. In addition, we move the selected radio button to + // the top of the list. + if (this.selectedNodeId) { + const index = potential_output_nodes.findIndex(node => node.id === this.selectedNodeId); + if (index >= 0) { + this.selectedOutputIndex = index; + } + } + + this.radioButtons = []; + const new_radio_buttons = $el("div", + { + id: "selectOutput-Options", + style: { + 'overflow-y': 'scroll', + 'max-height': '200px', + + 'display': 'grid', + 'grid-template-columns': 'repeat(auto-fit, minmax(100px, 1fr))', + 'grid-template-rows': 'auto', + 'grid-column-gap': '10px', + 'grid-row-gap': '10px', + 'margin-bottom': '10px', + 'padding': '10px', + 'border-radius': '8px', + 'box-shadow': '0 2px 4px rgba(0, 0, 0, 0.05)', + 'background-color': 'var(--bg-color)', + } + }, + potential_outputs.map((output, index) => { + const {node_id} = output; + const radio_button = $el("input", { + type: 'radio', + name: "selectOutputImages", + value: index, + required: index === 0 + }, []) + let radio_button_img; + let filename; + if (output.type === "image" || output.type === "temp") { + radio_button_img = $el("img", { + src: `/view?filename=${output.image.filename}&subfolder=${output.image.subfolder}&type=${output.image.type}`, + style: { + width: "100px", + height: "100px", + objectFit: "cover", + borderRadius: "5px" + } + }, []); + filename = output.image.filename + } else if (output.type === "output") { + radio_button_img = $el("img", { + src: output.output.value, + style: { + width: "auto", + height: "100px", + objectFit: "cover", + borderRadius: "5px" + } + }, []); + filename = output.filename + } else { + // unsupported output type + // this should never happen + // TODO + radio_button_img = $el("img", { + src: "", + style: {width: "auto", height: "100px"} + }, []); + } + const radio_button_text = $el("span", { + style: { + color: 'gray', + display: 'block', + fontSize: '12px', + overflowX: 'hidden', + textOverflow: 'ellipsis', + textWrap: 'nowrap', + maxWidth: '100px', + } + }, [output.title]) + const node_id_chip = $el("span", { + style: { + color: '#FBFBFD', + display: 'block', + backgroundColor: 'rgba(0, 0, 0, 0.5)', + fontSize: '12px', + overflowX: 'hidden', + padding: '2px 3px', + textOverflow: 'ellipsis', + textWrap: 'nowrap', + maxWidth: '100px', + position: 'absolute', + top: '3px', + left: '3px', + borderRadius: '3px', + } + }, [`Node: ${node_id}`]) + radio_button.style.color = "var(--fg-color)"; + radio_button.checked = this.selectedOutputIndex === index; + + radio_button.onchange = async () => { + this.selectedOutputIndex = parseInt(radio_button.value); + + // Remove the "checked" class from all radio buttons + this.radioButtons.forEach((ele) => { + ele.parentElement.classList.remove("checked"); + }); + radio_button.parentElement.classList.add("checked"); + + this.fetchImageBlob(radio_button_img.src).then((blob) => { + const file = new File([blob], filename, { + type: blob.type, + }); + this.previewImage.src = radio_button_img.src; + this.previewImage.style.display = "block"; + this.selectedFile = file; + }) + + // Add the opacity style toggle here to indicate that they only need + // to upload one image or choose one from the outputs. + this.outputsSection.style.opacity = 1; + this.uploadImagesInput.style.opacity = 0.35; + }; + + if (radio_button.checked) { + this.fetchImageBlob(radio_button_img.src).then((blob) => { + const file = new File([blob], filename, { + type: blob.type, + }); + this.previewImage.src = radio_button_img.src; + this.previewImage.style.display = "block"; + this.selectedFile = file; + }) + // Add the opacity style toggle here to indicate that they only need + // to upload one image or choose one from the outputs. + this.outputsSection.style.opacity = 1; + this.uploadImagesInput.style.opacity = 0.35; + } + + this.radioButtons.push(radio_button); + + return $el(`label.output_label${radio_button.checked ? '.checked' : ''}`, { + style: { + display: "flex", + flexDirection: "column", + alignItems: "center", + justifyContent: "center", + marginBottom: "10px", + cursor: "pointer", + position: 'relative', + } + }, [radio_button_img, radio_button_text, radio_button, node_id_chip]); + }) + ); + + const header = + $el("p", { + textContent: this.radioButtons.length === 0 ? "Queue Prompt to see the outputs" : "Or choose one from the outputs (scroll to see all)", + size: 2, + color: "white", + style: { + color: 'white', + margin: '0 0 5px 0', + fontSize: '12px', + }, + }, []) + this.outputsSection.innerHTML = ""; + this.outputsSection.appendChild(header); + this.outputsSection.appendChild(new_radio_buttons); + } +} diff --git a/custom_nodes/ComfyUI-Manager/js/common.js b/custom_nodes/ComfyUI-Manager/js/common.js new file mode 100644 index 0000000000000000000000000000000000000000..aa7c7b17a6f0109df8df6bf418490c8375e8a663 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/common.js @@ -0,0 +1,116 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" + +export function rebootAPI() { + if (confirm("Are you sure you'd like to reboot the server?")) { + try { + api.fetchApi("/manager/reboot"); + } + catch(exception) { + + } + return true; + } + + return false; +} + +export async function install_checked_custom_node(grid_rows, target_i, caller, mode) { + if(caller) { + let failed = ''; + + caller.disableButtons(); + + for(let i in grid_rows) { + if(!grid_rows[i].checkbox.checked && i != target_i) + continue; + + var target; + + if(grid_rows[i].data.custom_node) { + target = grid_rows[i].data.custom_node; + } + else { + target = grid_rows[i].data; + } + + caller.startInstall(target); + + try { + const response = await api.fetchApi(`/customnode/${mode}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(target) + }); + + if(response.status == 400) { + app.ui.dialog.show(`${mode} failed: ${target.title}`); + app.ui.dialog.element.style.zIndex = 10010; + continue; + } + + const status = await response.json(); + app.ui.dialog.close(); + target.installed = 'True'; + continue; + } + catch(exception) { + failed += `
${target.title}`; + } + } + + if(failed != '') { + app.ui.dialog.show(`${mode} failed: ${failed}`); + app.ui.dialog.element.style.zIndex = 10010; + } + + await caller.invalidateControl(); + caller.updateMessage("
To apply the installed/updated/disabled/enabled custom node, please ComfyUI. And refresh browser.", 'cm-reboot-button'); + } +}; + +export var manager_instance = null; + +export function setManagerInstance(obj) { + manager_instance = obj; +} + +function isValidURL(url) { + const pattern = /^(https?|ftp):\/\/[^\s/$.?#].[^\s]*$/; + return pattern.test(url); +} + +export async function install_via_git_url(url, manager_dialog) { + if(!url) { + return; + } + + if(!isValidURL(url)) { + app.ui.dialog.show(`Invalid Git url '${url}'`); + app.ui.dialog.element.style.zIndex = 10010; + return; + } + + app.ui.dialog.show(`Wait...

Installing '${url}'`); + app.ui.dialog.element.style.zIndex = 10010; + + const res = await api.fetchApi(`/customnode/install/git_url?url=${url}`); + + if(res.status == 200) { + app.ui.dialog.show(`'${url}' is installed
To apply the installed custom node, please ComfyUI.`); + + const rebootButton = document.getElementById('cm-reboot-button'); + const self = this; + rebootButton.onclick = function() { + if(rebootAPI()) { + manager_dialog.close(); + } + }; + + app.ui.dialog.element.style.zIndex = 10010; + } + else { + app.ui.dialog.show(`Failed to install '${url}'
See terminal log.`); + app.ui.dialog.element.style.zIndex = 10010; + } +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/js/custom-nodes-downloader.js b/custom_nodes/ComfyUI-Manager/js/custom-nodes-downloader.js new file mode 100644 index 0000000000000000000000000000000000000000..49e6ca78d15ae74fc909b17127c1035af1c9dc03 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/custom-nodes-downloader.js @@ -0,0 +1,755 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { install_checked_custom_node, manager_instance, rebootAPI } from "./common.js"; + +async function getCustomNodes() { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + var skip_update = ""; + if(manager_instance.update_check_checkbox.checked) + skip_update = "&skip_update=true"; + + const response = await api.fetchApi(`/customnode/getlist?mode=${mode}${skip_update}`); + + const data = await response.json(); + return data; +} + +async function getCustomnodeMappings() { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + const response = await api.fetchApi(`/customnode/getmappings?mode=${mode}`); + + const data = await response.json(); + return data; +} + +async function getConflictMappings() { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + const response = await api.fetchApi(`/customnode/getmappings?mode=${mode}`); + + const data = await response.json(); + + let node_to_extensions_map = {}; + + for(let k in data) { + for(let i in data[k][0]) { + let node = data[k][0][i]; + let l = node_to_extensions_map[node]; + if(!l) { + l = []; + node_to_extensions_map[node] = l; + } + l.push(k); + } + } + + let conflict_map = {}; + for(let node in node_to_extensions_map) { + if(node_to_extensions_map[node].length > 1) { + for(let i in node_to_extensions_map[node]) { + let extension = node_to_extensions_map[node][i]; + let l = conflict_map[extension]; + + if(!l) { + l = []; + conflict_map[extension] = l; + } + + for(let j in node_to_extensions_map[node]) { + let extension2 = node_to_extensions_map[node][j]; + if(extension != extension2) + l.push([node, extension2]); + } + } + } + } + + return conflict_map; +} + +async function getUnresolvedNodesInComponent() { + try { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + const response = await api.fetchApi(`/component/get_unresolved`); + + const data = await response.json(); + return data.nodes; + } + catch { + return []; + } +} + +export class CustomNodesInstaller extends ComfyDialog { + static instance = null; + + install_buttons = []; + message_box = null; + data = null; + + clear() { + this.install_buttons = []; + this.message_box = null; + this.data = null; + } + + constructor(app, manager_dialog) { + super(); + this.manager_dialog = manager_dialog; + this.search_keyword = ''; + this.element = $el("div.comfy-modal", { parent: document.body }, []); + } + + startInstall(target) { + const self = CustomNodesInstaller.instance; + + self.updateMessage(`
Installing '${target.title}'`); + } + + disableButtons() { + for(let i in this.install_buttons) { + this.install_buttons[i].disabled = true; + this.install_buttons[i].style.backgroundColor = 'gray'; + } + } + + apply_searchbox(data) { + let keyword = this.search_box.value.toLowerCase(); + for(let i in this.grid_rows) { + let data = this.grid_rows[i].data; + let content = data.author.toLowerCase() + data.description.toLowerCase() + data.title.toLowerCase() + data.reference.toLowerCase(); + + if(this.filter && this.filter != '*') { + if(this.filter != data.installed) { + this.grid_rows[i].control.style.display = 'none'; + continue; + } + } + + if(keyword == "") + this.grid_rows[i].control.style.display = null; + else if(content.includes(keyword)) { + this.grid_rows[i].control.style.display = null; + } + else { + this.grid_rows[i].control.style.display = 'none'; + } + } + } + + async filter_missing_node(data) { + const mappings = await getCustomnodeMappings(); + + + // build regex->url map + const regex_to_url = []; + for (let i in data) { + if(data[i]['nodename_pattern']) { + let item = {regex: new RegExp(data[i].nodename_pattern), url: data[i].files[0]}; + regex_to_url.push(item); + } + } + + // build name->url map + const name_to_url = {}; + for (const url in mappings) { + const names = mappings[url]; + for(const name in names[0]) { + name_to_url[names[0][name]] = url; + } + } + + const registered_nodes = new Set(); + for (let i in LiteGraph.registered_node_types) { + registered_nodes.add(LiteGraph.registered_node_types[i].type); + } + + const missing_nodes = new Set(); + const workflow = app.graph.serialize(); + const group_nodes = workflow.extra && workflow.extra.groupNodes ? workflow.extra.groupNodes : []; + let nodes = workflow.nodes; + + for (let i in group_nodes) { + let group_node = group_nodes[i]; + nodes = nodes.concat(group_node.nodes); + } + + for (let i in nodes) { + const node_type = nodes[i].type; + if(node_type.startsWith('workflow/')) + continue; + + if (!registered_nodes.has(node_type)) { + const url = name_to_url[node_type.trim()]; + if(url) + missing_nodes.add(url); + else { + for(let j in regex_to_url) { + if(regex_to_url[j].regex.test(node_type)) { + missing_nodes.add(regex_to_url[j].url); + } + } + } + } + } + + let unresolved_nodes = await getUnresolvedNodesInComponent(); + for (let i in unresolved_nodes) { + let node_type = unresolved_nodes[i]; + const url = name_to_url[node_type]; + if(url) + missing_nodes.add(url); + } + + return data.filter(node => node.files.some(file => missing_nodes.has(file))); + } + + async invalidateControl() { + this.clear(); + + // splash + while (this.element.children.length) { + this.element.removeChild(this.element.children[0]); + } + + const msg = $el('div', {id:'custom-message'}, + [$el('br'), + 'The custom node DB is currently being updated, and updates to custom nodes are being checked for.', + $el('br'), + 'NOTE: Update only checks for extensions that have been fetched.', + $el('br')]); + msg.style.height = '100px'; + msg.style.verticalAlign = 'middle'; + msg.style.color = "var(--fg-color)"; + + this.element.appendChild(msg); + + // invalidate + let data = await getCustomNodes(); + this.data = data.custom_nodes; + this.channel = data.channel; + + this.conflict_mappings = await getConflictMappings(); + + if(this.is_missing_node_mode) + this.data = await this.filter_missing_node(this.data); + + this.element.removeChild(msg); + + while (this.element.children.length) { + this.element.removeChild(this.element.children[0]); + } + + this.createHeaderControls(); + await this.createGrid(); + this.apply_searchbox(this.data); + this.createBottomControls(); + } + + updateMessage(msg, btn_id) { + this.message_box.innerHTML = msg; + if(btn_id) { + const rebootButton = document.getElementById(btn_id); + const self = this; + rebootButton.onclick = function() { + if(rebootAPI()) { + self.close(); + self.manager_dialog.close(); + } + }; + } + } + + invalidate_checks(is_checked, install_state) { + if(is_checked) { + for(let i in this.grid_rows) { + let data = this.grid_rows[i].data; + let checkbox = this.grid_rows[i].checkbox; + let buttons = this.grid_rows[i].buttons; + + checkbox.disabled = data.installed != install_state; + + if(checkbox.disabled) { + for(let j in buttons) { + buttons[j].style.display = 'none'; + } + } + else { + for(let j in buttons) { + buttons[j].style.display = null; + } + } + } + + this.checkbox_all.disabled = false; + } + else { + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + if(checkbox.check) + return; // do nothing + } + + // every checkbox is unchecked -> enable all checkbox + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + let buttons = this.grid_rows[i].buttons; + checkbox.disabled = false; + + for(let j in buttons) { + buttons[j].style.display = null; + } + } + + this.checkbox_all.checked = false; + this.checkbox_all.disabled = true; + } + } + + check_all(is_checked) { + if(is_checked) { + // lookup first checked item's state + let check_state = null; + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + if(checkbox.checked) { + check_state = this.grid_rows[i].data.installed; + } + } + + if(check_state == null) + return; + + // check only same state items + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + if(this.grid_rows[i].data.installed == check_state) + checkbox.checked = true; + } + } + else { + // uncheck all + for(let i in this.grid_rows) { + let checkbox = this.grid_rows[i].checkbox; + let buttons = this.grid_rows[i].buttons; + checkbox.checked = false; + checkbox.disabled = false; + + for(let j in buttons) { + buttons[j].style.display = null; + } + } + + this.checkbox_all.disabled = true; + } + } + + async createGrid() { + var grid = document.createElement('table'); + grid.setAttribute('id', 'custom-nodes-grid'); + + this.grid_rows = {}; + + let self = this; + + var thead = document.createElement('thead'); + var tbody = document.createElement('tbody'); + + var headerRow = document.createElement('tr'); + thead.style.position = "sticky"; + thead.style.top = "0px"; + thead.style.borderCollapse = "collapse"; + thead.style.tableLayout = "fixed"; + + var header0 = document.createElement('th'); + header0.style.width = "20px"; + this.checkbox_all = $el("input",{type:'checkbox', id:'check_all'},[]); + header0.appendChild(this.checkbox_all); + this.checkbox_all.checked = false; + this.checkbox_all.disabled = true; + this.checkbox_all.addEventListener('change', function() { self.check_all.call(self, self.checkbox_all.checked); }); + + var header1 = document.createElement('th'); + header1.innerHTML = '  ID  '; + header1.style.width = "20px"; + var header2 = document.createElement('th'); + header2.innerHTML = 'Author'; + header2.style.width = "150px"; + var header3 = document.createElement('th'); + header3.innerHTML = 'Name'; + header3.style.width = "20%"; + var header4 = document.createElement('th'); + header4.innerHTML = 'Description'; + header4.style.width = "60%"; +// header4.classList.add('expandable-column'); + var header5 = document.createElement('th'); + header5.innerHTML = 'Install'; + header5.style.width = "130px"; + + header0.style.position = "sticky"; + header0.style.top = "0px"; + header1.style.position = "sticky"; + header1.style.top = "0px"; + header2.style.position = "sticky"; + header2.style.top = "0px"; + header3.style.position = "sticky"; + header3.style.top = "0px"; + header4.style.position = "sticky"; + header4.style.top = "0px"; + header5.style.position = "sticky"; + header5.style.top = "0px"; + + thead.appendChild(headerRow); + headerRow.appendChild(header0); + headerRow.appendChild(header1); + headerRow.appendChild(header2); + headerRow.appendChild(header3); + headerRow.appendChild(header4); + headerRow.appendChild(header5); + + headerRow.style.backgroundColor = "Black"; + headerRow.style.color = "White"; + headerRow.style.textAlign = "center"; + headerRow.style.width = "100%"; + headerRow.style.padding = "0"; + + grid.appendChild(thead); + grid.appendChild(tbody); + + if(this.data) + for (var i = 0; i < this.data.length; i++) { + const data = this.data[i]; + let dataRow = document.createElement('tr'); + + let data0 = document.createElement('td'); + let checkbox = $el("input",{type:'checkbox', id:`check_${i}`},[]); + data0.appendChild(checkbox); + checkbox.checked = false; + checkbox.addEventListener('change', function() { self.invalidate_checks.call(self, checkbox.checked, data.installed); }); + + var data1 = document.createElement('td'); + data1.style.textAlign = "center"; + data1.innerHTML = i+1; + + var data2 = document.createElement('td'); + data2.style.maxWidth = "100px"; + data2.className = "cm-node-author" + data2.textContent = ` ${data.author}`; + data2.style.whiteSpace = "nowrap"; + data2.style.overflow = "hidden"; + data2.style.textOverflow = "ellipsis"; + + var data3 = document.createElement('td'); + data3.style.maxWidth = "200px"; + data3.style.wordWrap = "break-word"; + data3.className = "cm-node-name" + data3.innerHTML = ` ${data.title}`; + if(data.installed == 'Fail') + data3.innerHTML = ' (IMPORT FAILED)' + data3.innerHTML; + + var data4 = document.createElement('td'); + data4.innerHTML = data.description; + data4.className = "cm-node-desc" + + let conflicts = this.conflict_mappings[data.files[0]]; + if(conflicts) { + let buf = '

Conflicted Nodes:
'; + for(let k in conflicts) { + let node_name = conflicts[k][0]; + + let extension_name = conflicts[k][1].split('/').pop(); + if(extension_name.endsWith('/')) { + extension_name = extension_name.slice(0, -1); + } + if(node_name.endsWith('.git')) { + extension_name = extension_name.slice(0, -4); + } + + buf += `${node_name} [${extension_name}], `; + } + + if(buf.endsWith(', ')) { + buf = buf.slice(0, -2); + } + buf += "

"; + data4.innerHTML += buf; + } + + var data5 = document.createElement('td'); + data5.style.textAlign = "center"; + + var installBtn = document.createElement('button'); + installBtn.className = "cm-btn-install"; + var installBtn2 = null; + var installBtn3 = null; + + this.install_buttons.push(installBtn); + + switch(data.installed) { + case 'Disabled': + installBtn3 = document.createElement('button'); + installBtn3.innerHTML = 'Enable'; + installBtn3.className = "cm-btn-enable"; + installBtn3.style.backgroundColor = 'blue'; + installBtn3.style.color = 'white'; + this.install_buttons.push(installBtn3); + + installBtn.innerHTML = 'Uninstall'; + installBtn.style.backgroundColor = 'red'; + break; + case 'Update': + installBtn2 = document.createElement('button'); + installBtn2.innerHTML = 'Update'; + installBtn2.className = "cm-btn-update"; + installBtn2.style.backgroundColor = 'blue'; + installBtn2.style.color = 'white'; + this.install_buttons.push(installBtn2); + + installBtn3 = document.createElement('button'); + installBtn3.innerHTML = 'Disable'; + installBtn3.className = "cm-btn-disable"; + installBtn3.style.backgroundColor = 'MediumSlateBlue'; + installBtn3.style.color = 'white'; + this.install_buttons.push(installBtn3); + + installBtn.innerHTML = 'Uninstall'; + installBtn.style.backgroundColor = 'red'; + break; + case 'Fail': + case 'True': + installBtn3 = document.createElement('button'); + installBtn3.innerHTML = 'Disable'; + installBtn3.className = "cm-btn-disable"; + installBtn3.style.backgroundColor = 'MediumSlateBlue'; + installBtn3.style.color = 'white'; + this.install_buttons.push(installBtn3); + + installBtn.innerHTML = 'Uninstall'; + installBtn.style.backgroundColor = 'red'; + break; + case 'False': + installBtn.innerHTML = 'Install'; + installBtn.style.backgroundColor = 'black'; + installBtn.style.color = 'white'; + break; + default: + installBtn.innerHTML = `Try Install${data.installed}`; + installBtn.style.backgroundColor = 'Gray'; + installBtn.style.color = 'white'; + } + + let j = i; + if(installBtn2 != null) { + installBtn2.style.width = "120px"; + installBtn2.addEventListener('click', function() { + install_checked_custom_node(self.grid_rows, j, CustomNodesInstaller.instance, 'update'); + }); + + data5.appendChild(installBtn2); + } + + if(installBtn3 != null) { + installBtn3.style.width = "120px"; + installBtn3.addEventListener('click', function() { + install_checked_custom_node(self.grid_rows, j, CustomNodesInstaller.instance, 'toggle_active'); + }); + + data5.appendChild(installBtn3); + } + + installBtn.style.width = "120px"; + installBtn.addEventListener('click', function() { + if(this.innerHTML == 'Uninstall') { + if (confirm(`Are you sure uninstall ${data.title}?`)) { + install_checked_custom_node(self.grid_rows, j, CustomNodesInstaller.instance, 'uninstall'); + } + } + else { + install_checked_custom_node(self.grid_rows, j, CustomNodesInstaller.instance, 'install'); + } + }); + + data5.appendChild(installBtn); + + if(data.installed == 'Fail') + dataRow.style.backgroundColor = "#880000"; + else + dataRow.style.backgroundColor = "var(--bg-color)"; + dataRow.style.color = "var(--fg-color)"; + dataRow.style.textAlign = "left"; + + dataRow.appendChild(data0); + dataRow.appendChild(data1); + dataRow.appendChild(data2); + dataRow.appendChild(data3); + dataRow.appendChild(data4); + dataRow.appendChild(data5); + tbody.appendChild(dataRow); + + let buttons = []; + if(installBtn) { + buttons.push(installBtn); + } + if(installBtn2) { + buttons.push(installBtn2); + } + if(installBtn3) { + buttons.push(installBtn3); + } + + this.grid_rows[i] = {data:data, buttons:buttons, checkbox:checkbox, control:dataRow}; + } + + const panel = document.createElement('div'); + panel.style.width = "100%"; + panel.appendChild(grid); + + function handleResize() { + const parentHeight = self.element.clientHeight; + const gridHeight = parentHeight - 200; + + grid.style.height = gridHeight + "px"; + } + window.addEventListener("resize", handleResize); + + grid.style.position = "relative"; + grid.style.display = "inline-block"; + grid.style.width = "100%"; + grid.style.height = "100%"; + grid.style.overflowY = "scroll"; + this.element.style.height = "85%"; + this.element.style.width = "80%"; + this.element.appendChild(panel); + + handleResize(); + } + + createFilterCombo() { + let combo = document.createElement("select"); + + combo.style.cssFloat = "left"; + combo.style.fontSize = "14px"; + combo.style.padding = "4px"; + combo.style.background = "black"; + combo.style.marginLeft = "2px"; + combo.style.width = "199px"; + combo.id = `combo-manger-filter`; + combo.style.borderRadius = "15px"; + + let items = + [ + { value:'*', text:'Filter: all' }, + { value:'Disabled', text:'Filter: disabled' }, + { value:'Update', text:'Filter: update' }, + { value:'True', text:'Filter: installed' }, + { value:'False', text:'Filter: not-installed' }, + { value:'Fail', text:'Filter: import failed' }, + ]; + + items.forEach(item => { + const option = document.createElement("option"); + option.value = item.value; + option.text = item.text; + combo.appendChild(option); + }); + + let self = this; + combo.addEventListener('change', function(event) { + self.filter = event.target.value; + self.apply_searchbox(); + }); + + if(self.filter) { + combo.value = self.filter; + } + + return combo; + } + + createHeaderControls() { + let self = this; + this.search_box = $el('input.cm-search-filter', {type:'text', id:'manager-customnode-search-box', placeholder:'input search keyword', value:this.search_keyword}, []); + this.search_box.style.height = "25px"; + this.search_box.onkeydown = (event) => { + if (event.key === 'Enter') { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + } + if (event.key === 'Escape') { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + } + }; + + + let search_button = document.createElement("button"); + search_button.className = "cm-small-button"; + search_button.innerHTML = "Search"; + search_button.onclick = () => { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + }; + search_button.style.display = "inline-block"; + + let filter_control = this.createFilterCombo(); + filter_control.style.display = "inline-block"; + + let channel_badge = ''; + if(this.channel != 'default') { + channel_badge = $el('span', {id:'cm-channel-badge'}, [`Channel: ${this.channel}`]); + } + else { + + } + let cell = $el('td', {width:'100%'}, [filter_control, channel_badge, this.search_box, ' ', search_button]); + let search_control = $el('table', {width:'100%'}, + [ + $el('tr', {}, [cell]) + ] + ); + + cell.style.textAlign = "right"; + + this.element.appendChild(search_control); + } + + async createBottomControls() { + var close_button = document.createElement("button"); + close_button.className = "cm-small-button"; + close_button.innerHTML = "Close"; + close_button.onclick = () => { this.close(); } + close_button.style.display = "inline-block"; + + this.message_box = $el('div', {id:'custom-installer-message'}, [$el('br'), '']); + this.message_box.style.height = '60px'; + this.message_box.style.verticalAlign = 'middle'; + + this.element.appendChild(this.message_box); + this.element.appendChild(close_button); + } + + async show(is_missing_node_mode) { + this.is_missing_node_mode = is_missing_node_mode; + try { + this.invalidateControl(); + + this.element.style.display = "block"; + this.element.style.zIndex = 10001; + } + catch(exception) { + app.ui.dialog.show(`Failed to get custom node list. / ${exception}`); + } + } +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/js/model-downloader.js b/custom_nodes/ComfyUI-Manager/js/model-downloader.js new file mode 100644 index 0000000000000000000000000000000000000000..9616ae7557069b4aa197463ce46a06d59b976758 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/model-downloader.js @@ -0,0 +1,390 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { install_checked_custom_node, manager_instance, rebootAPI } from "./common.js"; + +async function install_model(target) { + if(ModelInstaller.instance) { + ModelInstaller.instance.startInstall(target); + + try { + const response = await api.fetchApi('/model/install', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(target) + }); + + const status = await response.json(); + app.ui.dialog.close(); + target.installed = 'True'; + return true; + } + catch(exception) { + app.ui.dialog.show(`Install failed: ${target.title} / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + await ModelInstaller.instance.invalidateControl(); + ModelInstaller.instance.updateMessage("
To apply the installed model, please click the 'Refresh' button on the main menu."); + } + } +} + +async function getModelList() { + var mode = "url"; + if(manager_instance.local_mode_checkbox.checked) + mode = "local"; + + const response = await api.fetchApi(`/externalmodel/getlist?mode=${mode}`); + + const data = await response.json(); + return data; +} + +export class ModelInstaller extends ComfyDialog { + static instance = null; + + install_buttons = []; + message_box = null; + data = null; + + clear() { + this.install_buttons = []; + this.message_box = null; + this.data = null; + } + + constructor(app, manager_dialog) { + super(); + this.manager_dialog = manager_dialog; + this.search_keyword = ''; + this.element = $el("div.comfy-modal", { parent: document.body }, []); + } + + createControls() { + return [ + $el("button.cm-small-button", { + type: "button", + textContent: "Close", + onclick: () => { this.close(); } + }) + ]; + } + + startInstall(target) { + const self = ModelInstaller.instance; + + self.updateMessage(`
Installing '${target.name}'`); + + for(let i in self.install_buttons) { + self.install_buttons[i].disabled = true; + self.install_buttons[i].style.backgroundColor = 'gray'; + } + } + + apply_searchbox(data) { + let keyword = this.search_box.value.toLowerCase(); + for(let i in this.grid_rows) { + let data = this.grid_rows[i].data; + let content = data.name.toLowerCase() + data.type.toLowerCase() + data.base.toLowerCase() + data.description.toLowerCase(); + + if(this.filter && this.filter != '*') { + if(this.filter != data.installed) { + this.grid_rows[i].control.style.display = 'none'; + continue; + } + } + + if(keyword == "") + this.grid_rows[i].control.style.display = null; + else if(content.includes(keyword)) { + this.grid_rows[i].control.style.display = null; + } + else { + this.grid_rows[i].control.style.display = 'none'; + } + } + } + + async invalidateControl() { + this.clear(); + this.data = (await getModelList()).models; + + while (this.element.children.length) { + this.element.removeChild(this.element.children[0]); + } + + await this.createHeaderControls(); + + if(this.search_keyword) { + this.search_box.value = this.search_keyword; + } + + await this.createGrid(); + await this.createBottomControls(); + + this.apply_searchbox(this.data); + } + + updateMessage(msg, btn_id) { + this.message_box.innerHTML = msg; + if(btn_id) { + const rebootButton = document.getElementById(btn_id); + const self = this; + rebootButton.onclick = function() { + if(rebootAPI()) { + self.close(); + self.manager_dialog.close(); + } + }; + } + } + + async createGrid(models_json) { + var grid = document.createElement('table'); + grid.setAttribute('id', 'external-models-grid'); + + var thead = document.createElement('thead'); + var tbody = document.createElement('tbody'); + + var headerRow = document.createElement('tr'); + thead.style.position = "sticky"; + thead.style.top = "0px"; + thead.style.borderCollapse = "collapse"; + thead.style.tableLayout = "fixed"; + + var header1 = document.createElement('th'); + header1.innerHTML = '  ID  '; + header1.style.width = "20px"; + var header2 = document.createElement('th'); + header2.innerHTML = 'Type'; + header2.style.width = "100px"; + var header3 = document.createElement('th'); + header3.innerHTML = 'Base'; + header3.style.width = "100px"; + var header4 = document.createElement('th'); + header4.innerHTML = 'Name'; + header4.style.width = "30%"; + var header5 = document.createElement('th'); + header5.innerHTML = 'Filename'; + header5.style.width = "20%"; + header5.style.tableLayout = "fixed"; + var header6 = document.createElement('th'); + header6.innerHTML = 'Description'; + header6.style.width = "50%"; + var header_down = document.createElement('th'); + header_down.innerHTML = 'Download'; + header_down.style.width = "50px"; + + thead.appendChild(headerRow); + headerRow.appendChild(header1); + headerRow.appendChild(header2); + headerRow.appendChild(header3); + headerRow.appendChild(header4); + headerRow.appendChild(header5); + headerRow.appendChild(header6); + headerRow.appendChild(header_down); + + headerRow.style.backgroundColor = "Black"; + headerRow.style.color = "White"; + headerRow.style.textAlign = "center"; + headerRow.style.width = "100%"; + headerRow.style.padding = "0"; + + grid.appendChild(thead); + grid.appendChild(tbody); + + this.grid_rows = {}; + + if(this.data) + for (var i = 0; i < this.data.length; i++) { + const data = this.data[i]; + var dataRow = document.createElement('tr'); + var data1 = document.createElement('td'); + data1.style.textAlign = "center"; + data1.innerHTML = i+1; + var data2 = document.createElement('td'); + data2.innerHTML = ` ${data.type}`; + var data3 = document.createElement('td'); + data3.innerHTML = ` ${data.base}`; + var data4 = document.createElement('td'); + data4.className = "cm-node-name"; + data4.innerHTML = ` ${data.name}`; + var data5 = document.createElement('td'); + data5.className = "cm-node-filename"; + data5.innerHTML = ` ${data.filename}`; + data5.style.wordBreak = "break-all"; + var data6 = document.createElement('td'); + data6.className = "cm-node-desc"; + data6.innerHTML = data.description; + data6.style.wordBreak = "break-all"; + var data_install = document.createElement('td'); + var installBtn = document.createElement('button'); + data_install.style.textAlign = "center"; + + installBtn.innerHTML = 'Install'; + this.install_buttons.push(installBtn); + + switch(data.installed) { + case 'True': + installBtn.innerHTML = 'Installed'; + installBtn.style.backgroundColor = 'green'; + installBtn.style.color = 'white'; + installBtn.disabled = true; + break; + default: + installBtn.innerHTML = 'Install'; + installBtn.style.backgroundColor = 'black'; + installBtn.style.color = 'white'; + break; + } + + installBtn.style.width = "100px"; + + installBtn.addEventListener('click', function() { + install_model(data); + }); + + data_install.appendChild(installBtn); + + dataRow.style.backgroundColor = "var(--bg-color)"; + dataRow.style.color = "var(--fg-color)"; + dataRow.style.textAlign = "left"; + + dataRow.appendChild(data1); + dataRow.appendChild(data2); + dataRow.appendChild(data3); + dataRow.appendChild(data4); + dataRow.appendChild(data5); + dataRow.appendChild(data6); + dataRow.appendChild(data_install); + tbody.appendChild(dataRow); + + this.grid_rows[i] = {data:data, control:dataRow}; + } + + let self = this; + const panel = document.createElement('div'); + panel.style.width = "100%"; + panel.appendChild(grid); + + function handleResize() { + const parentHeight = self.element.clientHeight; + const gridHeight = parentHeight - 200; + + grid.style.height = gridHeight + "px"; + } + window.addEventListener("resize", handleResize); + + grid.style.position = "relative"; + grid.style.display = "inline-block"; + grid.style.width = "100%"; + grid.style.height = "100%"; + grid.style.overflowY = "scroll"; + this.element.style.height = "85%"; + this.element.style.width = "80%"; + this.element.appendChild(panel); + + handleResize(); + } + + createFilterCombo() { + let combo = document.createElement("select"); + + combo.style.cssFloat = "left"; + combo.style.fontSize = "14px"; + combo.style.padding = "4px"; + combo.style.background = "black"; + combo.style.marginLeft = "2px"; + combo.style.width = "199px"; + combo.id = `combo-manger-filter`; + combo.style.borderRadius = "15px"; + + let items = + [ + { value:'*', text:'Filter: all' }, + { value:'True', text:'Filter: installed' }, + { value:'False', text:'Filter: not-installed' }, + ]; + + items.forEach(item => { + const option = document.createElement("option"); + option.value = item.value; + option.text = item.text; + combo.appendChild(option); + }); + + let self = this; + combo.addEventListener('change', function(event) { + self.filter = event.target.value; + self.apply_searchbox(); + }); + + return combo; + } + + createHeaderControls() { + let self = this; + this.search_box = $el('input.cm-search-filter', {type:'text', id:'manager-model-search-box', placeholder:'input search keyword', value:this.search_keyword}, []); + this.search_box.style.height = "25px"; + this.search_box.onkeydown = (event) => { + if (event.key === 'Enter') { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + } + if (event.key === 'Escape') { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + } + }; + + let search_button = document.createElement("button"); + search_button.className = "cm-small-button"; + search_button.innerHTML = "Search"; + search_button.onclick = () => { + self.search_keyword = self.search_box.value; + self.apply_searchbox(); + }; + search_button.style.display = "inline-block"; + + let filter_control = this.createFilterCombo(); + filter_control.style.display = "inline-block"; + + let cell = $el('td', {width:'100%'}, [filter_control, this.search_box, ' ', search_button]); + let search_control = $el('table', {width:'100%'}, + [ + $el('tr', {}, [cell]) + ] + ); + + cell.style.textAlign = "right"; + this.element.appendChild(search_control); + } + + async createBottomControls() { + var close_button = document.createElement("button"); + close_button.className = "cm-small-button"; + close_button.innerHTML = "Close"; + close_button.onclick = () => { this.close(); } + close_button.style.display = "inline-block"; + + this.message_box = $el('div', {id:'custom-download-message'}, [$el('br'), '']); + this.message_box.style.height = '60px'; + this.message_box.style.verticalAlign = 'middle'; + + this.element.appendChild(this.message_box); + this.element.appendChild(close_button); + } + + async show() { + try { + this.invalidateControl(); + this.element.style.display = "block"; + this.element.style.zIndex = 10001; + } + catch(exception) { + app.ui.dialog.show(`Failed to get external model list. / ${exception}`); + } + } +} diff --git a/custom_nodes/ComfyUI-Manager/js/snapshot.js b/custom_nodes/ComfyUI-Manager/js/snapshot.js new file mode 100644 index 0000000000000000000000000000000000000000..e4a720eaa4610c3abf00c034a1c76d18dde84b4a --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/js/snapshot.js @@ -0,0 +1,292 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" +import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { manager_instance, rebootAPI } from "./common.js"; + + +async function restore_snapshot(target) { + if(SnapshotManager.instance) { + try { + const response = await api.fetchApi(`/snapshot/restore?target=${target}`, { cache: "no-store" }); + if(response.status == 400) { + app.ui.dialog.show(`Restore snapshot failed: ${target.title} / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + } + + app.ui.dialog.close(); + return true; + } + catch(exception) { + app.ui.dialog.show(`Restore snapshot failed: ${target.title} / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + await SnapshotManager.instance.invalidateControl(); + SnapshotManager.instance.updateMessage("
To apply the snapshot, please ComfyUI. And refresh browser.", 'cm-reboot-button'); + } + } +} + +async function remove_snapshot(target) { + if(SnapshotManager.instance) { + try { + const response = await api.fetchApi(`/snapshot/remove?target=${target}`, { cache: "no-store" }); + if(response.status == 400) { + app.ui.dialog.show(`Remove snapshot failed: ${target.title} / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + } + + app.ui.dialog.close(); + return true; + } + catch(exception) { + app.ui.dialog.show(`Restore snapshot failed: ${target.title} / ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + await SnapshotManager.instance.invalidateControl(); + } + } +} + +async function save_current_snapshot() { + try { + const response = await api.fetchApi('/snapshot/save', { cache: "no-store" }); + app.ui.dialog.close(); + return true; + } + catch(exception) { + app.ui.dialog.show(`Backup snapshot failed: ${exception}`); + app.ui.dialog.element.style.zIndex = 10010; + return false; + } + finally { + await SnapshotManager.instance.invalidateControl(); + SnapshotManager.instance.updateMessage("
Current snapshot saved."); + } +} + +async function getSnapshotList() { + const response = await api.fetchApi(`/snapshot/getlist`); + const data = await response.json(); + return data; +} + +export class SnapshotManager extends ComfyDialog { + static instance = null; + + restore_buttons = []; + message_box = null; + data = null; + + clear() { + this.restore_buttons = []; + this.message_box = null; + this.data = null; + } + + constructor(app, manager_dialog) { + super(); + this.manager_dialog = manager_dialog; + this.search_keyword = ''; + this.element = $el("div.comfy-modal", { parent: document.body }, []); + } + + async remove_item() { + caller.disableButtons(); + + await caller.invalidateControl(); + } + + createControls() { + return [ + $el("button.cm-small-button", { + type: "button", + textContent: "Close", + onclick: () => { this.close(); } + }) + ]; + } + + startRestore(target) { + const self = SnapshotManager.instance; + + self.updateMessage(`
Restore snapshot '${target.name}'`); + + for(let i in self.restore_buttons) { + self.restore_buttons[i].disabled = true; + self.restore_buttons[i].style.backgroundColor = 'gray'; + } + } + + async invalidateControl() { + this.clear(); + this.data = (await getSnapshotList()).items; + + while (this.element.children.length) { + this.element.removeChild(this.element.children[0]); + } + + await this.createGrid(); + await this.createBottomControls(); + } + + updateMessage(msg, btn_id) { + this.message_box.innerHTML = msg; + if(btn_id) { + const rebootButton = document.getElementById(btn_id); + const self = this; + rebootButton.onclick = function() { + if(rebootAPI()) { + self.close(); + self.manager_dialog.close(); + } + }; + } + } + + async createGrid(models_json) { + var grid = document.createElement('table'); + grid.setAttribute('id', 'snapshot-list-grid'); + + var thead = document.createElement('thead'); + var tbody = document.createElement('tbody'); + + var headerRow = document.createElement('tr'); + thead.style.position = "sticky"; + thead.style.top = "0px"; + thead.style.borderCollapse = "collapse"; + thead.style.tableLayout = "fixed"; + + var header1 = document.createElement('th'); + header1.innerHTML = '  ID  '; + header1.style.width = "20px"; + var header2 = document.createElement('th'); + header2.innerHTML = 'Datetime'; + header2.style.width = "100%"; + var header_button = document.createElement('th'); + header_button.innerHTML = 'Action'; + header_button.style.width = "100px"; + + thead.appendChild(headerRow); + headerRow.appendChild(header1); + headerRow.appendChild(header2); + headerRow.appendChild(header_button); + + headerRow.style.backgroundColor = "Black"; + headerRow.style.color = "White"; + headerRow.style.textAlign = "center"; + headerRow.style.width = "100%"; + headerRow.style.padding = "0"; + + grid.appendChild(thead); + grid.appendChild(tbody); + + this.grid_rows = {}; + + if(this.data) + for (var i = 0; i < this.data.length; i++) { + const data = this.data[i]; + var dataRow = document.createElement('tr'); + var data1 = document.createElement('td'); + data1.style.textAlign = "center"; + data1.innerHTML = i+1; + var data2 = document.createElement('td'); + data2.innerHTML = ` ${data}`; + var data_button = document.createElement('td'); + data_button.style.textAlign = "center"; + + var restoreBtn = document.createElement('button'); + restoreBtn.innerHTML = 'Restore'; + restoreBtn.style.width = "100px"; + restoreBtn.style.backgroundColor = 'blue'; + + restoreBtn.addEventListener('click', function() { + restore_snapshot(data); + }); + + var removeBtn = document.createElement('button'); + removeBtn.innerHTML = 'Remove'; + removeBtn.style.width = "100px"; + removeBtn.style.backgroundColor = 'red'; + + removeBtn.addEventListener('click', function() { + remove_snapshot(data); + }); + + data_button.appendChild(restoreBtn); + data_button.appendChild(removeBtn); + + dataRow.style.backgroundColor = "var(--bg-color)"; + dataRow.style.color = "var(--fg-color)"; + dataRow.style.textAlign = "left"; + + dataRow.appendChild(data1); + dataRow.appendChild(data2); + dataRow.appendChild(data_button); + tbody.appendChild(dataRow); + + this.grid_rows[i] = {data:data, control:dataRow}; + } + + let self = this; + const panel = document.createElement('div'); + panel.style.width = "100%"; + panel.appendChild(grid); + + function handleResize() { + const parentHeight = self.element.clientHeight; + const gridHeight = parentHeight - 200; + + grid.style.height = gridHeight + "px"; + } + window.addEventListener("resize", handleResize); + + grid.style.position = "relative"; + grid.style.display = "inline-block"; + grid.style.width = "100%"; + grid.style.height = "100%"; + grid.style.overflowY = "scroll"; + this.element.style.height = "85%"; + this.element.style.width = "80%"; + this.element.appendChild(panel); + + handleResize(); + } + + async createBottomControls() { + var close_button = document.createElement("button"); + close_button.className = "cm-small-button"; + close_button.innerHTML = "Close"; + close_button.onclick = () => { this.close(); } + close_button.style.display = "inline-block"; + + var save_button = document.createElement("button"); + save_button.className = "cm-small-button"; + save_button.innerHTML = "Save snapshot"; + save_button.onclick = () => { save_current_snapshot(); } + save_button.style.display = "inline-block"; + save_button.style.horizontalAlign = "right"; + + this.message_box = $el('div', {id:'custom-download-message'}, [$el('br'), '']); + this.message_box.style.height = '60px'; + this.message_box.style.verticalAlign = 'middle'; + + this.element.appendChild(this.message_box); + this.element.appendChild(close_button); + this.element.appendChild(save_button); + } + + async show() { + try { + this.invalidateControl(); + this.element.style.display = "block"; + this.element.style.zIndex = 10001; + } + catch(exception) { + app.ui.dialog.show(`Failed to get external model list. / ${exception}`); + } + } +} diff --git a/custom_nodes/ComfyUI-Manager/misc/custom-nodes.jpg b/custom_nodes/ComfyUI-Manager/misc/custom-nodes.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10482f1b7b55a617f3c9cabba55b318b5ab4d714 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/custom-nodes.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/misc/main.jpg b/custom_nodes/ComfyUI-Manager/misc/main.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec31f76fcaf06cbd2b2b51d98f4d244bcd34fcb6 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/main.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/misc/main.png b/custom_nodes/ComfyUI-Manager/misc/main.png new file mode 100644 index 0000000000000000000000000000000000000000..910da417bf2cbab3828a9beea5ef83ff3b86bbb5 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/main.png differ diff --git a/custom_nodes/ComfyUI-Manager/misc/menu.jpg b/custom_nodes/ComfyUI-Manager/misc/menu.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c650a9e3f749ae48d8bb81592769cdfaad4f082 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/menu.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/misc/missing-list.png b/custom_nodes/ComfyUI-Manager/misc/missing-list.png new file mode 100644 index 0000000000000000000000000000000000000000..f1cc4fd2cf3681b48a43a6cc9eededa85ea9697e Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/missing-list.png differ diff --git a/custom_nodes/ComfyUI-Manager/misc/missing-menu.png b/custom_nodes/ComfyUI-Manager/misc/missing-menu.png new file mode 100644 index 0000000000000000000000000000000000000000..5e74744b1c46c079841aa5ae60a6c0f891aea5d1 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/missing-menu.png differ diff --git a/custom_nodes/ComfyUI-Manager/misc/models.png b/custom_nodes/ComfyUI-Manager/misc/models.png new file mode 100644 index 0000000000000000000000000000000000000000..9e985fb498d528218ec0f3ada8508c2abd6a6926 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/models.png differ diff --git a/custom_nodes/ComfyUI-Manager/misc/nickname.jpg b/custom_nodes/ComfyUI-Manager/misc/nickname.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3cfdcac5f0be077c5f90543b0c480b87b0f2a1d Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/nickname.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/misc/portable-install.png b/custom_nodes/ComfyUI-Manager/misc/portable-install.png new file mode 100644 index 0000000000000000000000000000000000000000..1771745132f0d541d17da70f5f131cef651cdada Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/portable-install.png differ diff --git a/custom_nodes/ComfyUI-Manager/misc/share-setting.jpg b/custom_nodes/ComfyUI-Manager/misc/share-setting.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ceacf2cdccfe49f7b1a54e5c7aac83232df2504 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/share-setting.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/misc/share.jpg b/custom_nodes/ComfyUI-Manager/misc/share.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97c0ae7de58265351e8cd4dfbb9489fedc41abb5 Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/share.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/misc/snapshot.jpg b/custom_nodes/ComfyUI-Manager/misc/snapshot.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33269564bbd2b286994c78a92bf09905d251907b Binary files /dev/null and b/custom_nodes/ComfyUI-Manager/misc/snapshot.jpg differ diff --git a/custom_nodes/ComfyUI-Manager/model-list.json b/custom_nodes/ComfyUI-Manager/model-list.json new file mode 100644 index 0000000000000000000000000000000000000000..f2e9ea4f4f70b37fe246f3f01c8a44921257419b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/model-list.json @@ -0,0 +1,1591 @@ +{ + "models": [ + { + "name": "TAESDXL Decoder", + "type": "TAESD", + "base": "SDXL", + "save_path": "vae_approx", + "description": "(SDXL Verison) To view the preview in high quality while running samples in ComfyUI, you will need this model.", + "reference": "https://github.com/madebyollin/taesd", + "filename": "taesdxl_decoder.pth", + "url": "https://github.com/madebyollin/taesd/raw/main/taesdxl_decoder.pth" + }, + { + "name": "TAESDXL Encoder", + "type": "TAESD", + "base": "SDXL", + "save_path": "vae_approx", + "description": "(SDXL Verison) To view the preview in high quality while running samples in ComfyUI, you will need this model.", + "reference": "https://github.com/madebyollin/taesd", + "filename": "taesdxl_encoder.pth", + "url": "https://github.com/madebyollin/taesd/raw/main/taesdxl_encoder.pth" + }, + { + "name": "TAESD Decoder", + "type": "TAESD", + "base": "SD1.x", + "save_path": "vae_approx", + "description": "To view the preview in high quality while running samples in ComfyUI, you will need this model.", + "reference": "https://github.com/madebyollin/taesd", + "filename": "taesd_decoder.pth", + "url": "https://github.com/madebyollin/taesd/raw/main/taesd_decoder.pth" + }, + { + "name": "TAESD Encoder", + "type": "TAESD", + "base": "SD1.x", + "save_path": "vae_approx", + "description": "To view the preview in high quality while running samples in ComfyUI, you will need this model.", + "reference": "https://github.com/madebyollin/taesd", + "filename": "taesd_encoder.pth", + "url": "https://github.com/madebyollin/taesd/raw/main/taesd_encoder.pth" + }, + { + "name": "RealESRGAN x2", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "RealESRGAN x2 upscaler model", + "reference": "https://huggingface.co/ai-forever/Real-ESRGAN", + "filename": "RealESRGAN_x2.pth", + "url": "https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth" + }, + { + "name": "RealESRGAN x4", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "RealESRGAN x4 upscaler model", + "reference": "https://huggingface.co/ai-forever/Real-ESRGAN", + "filename": "RealESRGAN_x4.pth", + "url": "https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth" + }, + { + "name": "ESRGAN x4", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "ESRGAN x4 upscaler model", + "reference": "https://huggingface.co/Afizi/ESRGAN_4x.pth", + "filename": "ESRGAN_4x.pth", + "url": "https://huggingface.co/Afizi/ESRGAN_4x.pth/resolve/main/ESRGAN_4x.pth" + }, + { + "name": "4x_foolhardy_Remacri", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "4x_foolhardy_Remacri upscaler model", + "reference": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri", + "filename": "4x_foolhardy_Remacri.pth", + "url": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth" + }, + { + "name": "4x-AnimeSharp", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "4x-AnimeSharp upscaler model", + "reference": "https://huggingface.co/konohashinobi4/4xAnimesharp", + "filename": "4x-AnimeSharp.pth", + "url": "https://huggingface.co/konohashinobi4/4xAnimesharp/resolve/main/4x-AnimeSharp.pth" + }, + { + "name": "4x-UltraSharp", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "4x-UltraSharp upscaler model", + "reference": "https://upscale.wiki/wiki/Model_Database", + "filename": "4x-UltraSharp.pth", + "url": "https://huggingface.co/datasets/Kizi-Art/Upscale/resolve/fa98e357882a23b8e7928957a39462fbfaee1af5/4x-UltraSharp.pth" + }, + { + "name": "4x_NMKD-Siax_200k", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "4x_NMKD-Siax_200k upscaler model", + "reference": "https://huggingface.co/gemasai/4x_NMKD-Siax_200k", + "filename": "4x_NMKD-Siax_200k.pth", + "url": "https://huggingface.co/gemasai/4x_NMKD-Siax_200k/resolve/main/4x_NMKD-Siax_200k.pth" + }, + { + "name": "8x_NMKD-Superscale_150000_G", + "type": "upscale", + "base": "upscale", + "save_path": "default", + "description": "8x_NMKD-Superscale_150000_G upscaler model", + "reference": "https://huggingface.co/uwg/upscaler", + "filename": "8x_NMKD-Superscale_150000_G.pth", + "url": "https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/8x_NMKD-Superscale_150000_G.pth" + }, + { + "name": "Inswapper-fp16 (face swap)", + "type": "insightface", + "base" : "inswapper", + "save_path": "insightface", + "description": "[264MB] Checkpoint of the insightface swapper model
(used by ComfyUI-FaceSwap, comfyui-reactor-node, CharacterFaceSwap,
ComfyUI roop and comfy_mtb)", + "reference": "https://github.com/facefusion/facefusion-assets", + "filename": "inswapper_128_fp16.onnx", + "url": "https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx" + }, + { + "name": "Inswapper (face swap)", + "type": "insightface", + "base" : "inswapper", + "save_path": "insightface", + "description": "[529MB] Checkpoint of the insightface swapper model
(used by ComfyUI-FaceSwap, comfyui-reactor-node, CharacterFaceSwap,
ComfyUI roop and comfy_mtb)", + "reference": "https://github.com/facefusion/facefusion-assets", + "filename": "inswapper_128.onnx", + "url": "https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx" + }, + { + "name": "Deepbump", + "type": "deepbump", + "base": "deepbump", + "save_path": "deepbump", + "description": "Checkpoint of the deepbump model to generate height and normal maps textures from an image (requires comfy_mtb)", + "reference": "https://github.com/HugoTini/DeepBump", + "filename": "deepbump256.onnx", + "url": "https://github.com/HugoTini/DeepBump/raw/master/deepbump256.onnx" + }, + { + "name": "GFPGAN 1.3", + "type": "face_restore", + "base": "face_restore", + "save_path": "face_restore", + "description": "Face restoration", + "reference": "https://github.com/TencentARC/GFPGAN", + "filename": "GFPGANv1.3.pth", + "url": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth" + }, + { + "name": "GFPGAN 1.4", + "type": "face_restore", + "base": "face_restore", + "save_path": "face_restore", + "description": "Face restoration", + "reference": "https://github.com/TencentARC/GFPGAN", + "filename": "GFPGANv1.4.pth", + "url": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" + }, + { + "name": "RestoreFormer", + "type": "face_restore", + "base": "face_restore", + "save_path": "face_restore", + "description": "Face restoration", + "reference": "https://github.com/TencentARC/GFPGAN", + "filename": "RestoreFormer.pth", + "url": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth" + }, + { + "name": "Stable Video Diffusion Image-to-Video", + "type": "checkpoints", + "base": "SVD", + "save_path": "checkpoints/SVD", + "description": "Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.
NOTE: 14 frames @ 576x1024", + "reference": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid", + "filename": "svd.safetensors", + "url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid/resolve/main/svd.safetensors" + }, + { + "name": "Stable Video Diffusion Image-to-Video (XT)", + "type": "checkpoints", + "base": "SVD", + "save_path": "checkpoints/SVD", + "description": "Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.
NOTE: 25 frames @ 576x1024 ", + "reference": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt", + "filename": "svd_xt.safetensors", + "url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/resolve/main/svd_xt.safetensors" + }, + { + "name": "negative_hand Negative Embedding", + "type": "embeddings", + "base": "SD1.5", + "save_path": "default", + "description": "If you use this embedding with negatives, you can solve the issue of damaging your hands.", + "reference": "https://civitai.com/models/56519/negativehand-negative-embedding", + "filename": "negative_hand-neg.pt", + "url": "https://civitai.com/api/download/models/60938" + }, + { + "name": "bad_prompt Negative Embedding", + "type": "embeddings", + "base": "SD1.5", + "save_path": "default", + "description": "The idea behind this embedding was to somehow train the negative prompt as an embedding, thus unifying the basis of the negative prompt into one word or embedding.", + "reference": "https://civitai.com/models/55700/badprompt-negative-embedding", + "filename": "bad_prompt_version2-neg.pt", + "url": "https://civitai.com/api/download/models/60095" + }, + { + "name": "Deep Negative V1.75", + "type": "embeddings", + "base": "SD1.5", + "save_path": "default", + "description": "These embedding learn what disgusting compositions and color patterns are, including faulty human anatomy, offensive color schemes, upside-down spatial structures, and more. Placing it in the negative can go a long way to avoiding these things.", + "reference": "https://civitai.com/models/4629/deep-negative-v1x", + "filename": "ng_deepnegative_v1_75t.pt", + "url": "https://civitai.com/api/download/models/5637" + }, + { + "name": "EasyNegative", + "type": "embeddings", + "base": "SD1.5", + "save_path": "default", + "description": "This embedding should be used in your NEGATIVE prompt. Adjust the strength as desired (seems to scale well without any distortions), the strength required may vary based on positive and negative prompts.", + "reference": "https://civitai.com/models/7808/easynegative", + "filename": "easynegative.safetensors", + "url": "https://civitai.com/api/download/models/9208" + }, + { + "name": "SDXL-Turbo 1.0 (fp16)", + "type": "checkpoints", + "base": "SDXL", + "save_path": "checkpoints/SDXL-TURBO", + "description": "[6.9GB] SDXL-Turbo 1.0 fp16", + "reference": "https://huggingface.co/stabilityai/sdxl-turbo", + "filename": "sd_xl_turbo_1.0_fp16.safetensors", + "url": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0_fp16.safetensors" + }, + { + "name": "SDXL-Turbo 1.0", + "type": "checkpoints", + "base": "SDXL", + "save_path": "checkpoints/SDXL-TURBO", + "description": "[13.9GB] SDXL-Turbo 1.0", + "reference": "https://huggingface.co/stabilityai/sdxl-turbo", + "filename": "sd_xl_turbo_1.0.safetensors", + "url": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0.safetensors" + }, + { + "name": "sd_xl_base_1.0_0.9vae.safetensors", + "type": "checkpoints", + "base": "SDXL", + "save_path": "default", + "description": "Stable Diffusion XL base model (VAE 0.9)", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0", + "filename": "sd_xl_base_1.0_0.9vae.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors" + }, + { + "name": "sd_xl_base_1.0.safetensors", + "type": "checkpoints", + "base": "SDXL", + "save_path": "default", + "description": "Stable Diffusion XL base model", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0", + "filename": "sd_xl_base_1.0.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors" + }, + { + "name": "sd_xl_refiner_1.0_0.9vae.safetensors", + "type": "checkpoints", + "base": "SDXL", + "save_path": "default", + "description": "Stable Diffusion XL refiner model (VAE 0.9)", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0", + "filename": "sd_xl_refiner_1.0_0.9vae.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors" + }, + { + "name": "stable-diffusion-xl-refiner-1.0", + "type": "checkpoints", + "base": "SDXL", + "save_path": "default", + "description": "Stable Diffusion XL refiner model", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0", + "filename": "sd_xl_refiner_1.0.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors" + }, + { + "name": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1 (UNET/fp16)", + "type": "unet", + "base": "SDXL", + "save_path": "unet/xl-inpaint-0.1", + "description": "[5.14GB] Stable Diffusion XL inpainting model 0.1. You need UNETLoader instead of CheckpointLoader.", + "reference": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "filename": "diffusion_pytorch_model.fp16.safetensors", + "url": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors" + }, + { + "name": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1 (UNET)", + "type": "unet", + "base": "SDXL", + "save_path": "unet/xl-inpaint-0.1", + "description": "[10.3GB] Stable Diffusion XL inpainting model 0.1. You need UNETLoader instead of CheckpointLoader.", + "reference": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "filename": "diffusion_pytorch_model.safetensors", + "url": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1/resolve/main/unet/diffusion_pytorch_model.safetensors" + }, + { + "name": "sd_xl_offset_example-lora_1.0.safetensors", + "type": "lora", + "base": "SDXL", + "save_path": "default", + "description": "Stable Diffusion XL offset LoRA", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0", + "filename": "sd_xl_offset_example-lora_1.0.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" + }, + { + "name": "v1-5-pruned-emaonly.ckpt", + "type": "checkpoints", + "base": "SD1.5", + "save_path": "default", + "description": "Stable Diffusion 1.5 base model", + "reference": "https://huggingface.co/runwayml/stable-diffusion-v1-5", + "filename": "v1-5-pruned-emaonly.ckpt", + "url": "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt" + }, + { + "name": "v2-1_512-ema-pruned.safetensors", + "type": "checkpoints", + "base": "SD2", + "save_path": "default", + "description": "Stable Diffusion 2 base model (512)", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-2-1-base", + "filename": "v2-1_512-ema-pruned.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.safetensors" + }, + { + "name": "v2-1_768-ema-pruned.safetensors", + "type": "checkpoints", + "base": "SD2", + "save_path": "default", + "description": "Stable Diffusion 2 base model (768)", + "reference": "https://huggingface.co/stabilityai/stable-diffusion-2-1", + "filename": "v2-1_768-ema-pruned.safetensors", + "url": "https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors" + }, + { + "name": "AbyssOrangeMix2 (hard)", + "type": "checkpoints", + "base": "SD1.5", + "save_path": "default", + "description": "AbyssOrangeMix2 - hard version (anime style)", + "reference": "https://huggingface.co/WarriorMama777/OrangeMixs", + "filename": "AbyssOrangeMix2_hard.safetensors", + "url": "https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors" + }, + { + "name": "AbyssOrangeMix3 A1", + "type": "checkpoints", + "base": "SD1.5", + "save_path": "default", + "description": "AbyssOrangeMix3 - A1 (anime style)", + "reference": "https://huggingface.co/WarriorMama777/OrangeMixs", + "filename": "AOM3A1_orangemixs.safetensors", + "url": "https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1_orangemixs.safetensors" + }, + { + "name": "AbyssOrangeMix3 A3", + "type": "checkpoints", + "base": "SD1.5", + "save_path": "default", + "description": "AbyssOrangeMix - A3 (anime style)", + "reference": "https://huggingface.co/WarriorMama777/OrangeMixs", + "filename": "AOM3A3_orangemixs.safetensors", + "url": "https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3_orangemixs.safetensors" + }, + { + "name": "Anything v3 (fp16; pruned)", + "type": "checkpoints", + "base": "SD1.5", + "save_path": "default", + "description": "Anything v3 (anime style)", + "reference": "https://huggingface.co/Linaqruf/anything-v3.0", + "filename": "anything-v3-fp16-pruned.safetensors", + "url": "https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors" + }, + { + "name": "Waifu Diffusion 1.5 Beta3 (fp16)", + "type": "checkpoints", + "base": "SD2.1", + "save_path": "default", + "description": "Waifu Diffusion 1.5 Beta3", + "reference": "https://huggingface.co/waifu-diffusion/wd-1-5-beta3", + "filename": "wd-illusion-fp16.safetensors", + "url": "https://huggingface.co/waifu-diffusion/wd-1-5-beta3/resolve/main/wd-illusion-fp16.safetensors" + }, + { + "name": "illuminatiDiffusionV1_v11 unCLIP model", + "type": "unclip", + "base": "SD2.1", + "save_path": "default", + "description": "Mix model (SD2.1 unCLIP + illuminatiDiffusionV1_v11)", + "reference": "https://huggingface.co/comfyanonymous/illuminatiDiffusionV1_v11_unCLIP", + "filename": "illuminatiDiffusionV1_v11-unclip-h-fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/illuminatiDiffusionV1_v11_unCLIP/resolve/main/illuminatiDiffusionV1_v11-unclip-h-fp16.safetensors" + }, + { + "name": "Waifu Diffusion 1.5 unCLIP model", + "type": "unclip", + "base": "SD2.1", + "save_path": "default", + "description": "Mix model (SD2.1 unCLIP + Waifu Diffusion 1.5)", + "reference": "https://huggingface.co/comfyanonymous/wd-1.5-beta2_unCLIP", + "filename": "wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/wd-1.5-beta2_unCLIP/resolve/main/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors" + }, + { + "name": "sdxl_vae.safetensors", + "type": "VAE", + "base": "SDXL VAE", + "save_path": "default", + "description": "SDXL-VAE", + "reference": "https://huggingface.co/stabilityai/sdxl-vae", + "filename": "sdxl_vae.safetensors", + "url": "https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors" + }, + { + "name": "vae-ft-mse-840000-ema-pruned", + "type": "VAE", + "base": "SD1.5 VAE", + "save_path": "default", + "description": "vae-ft-mse-840000-ema-pruned", + "reference": "https://huggingface.co/stabilityai/sd-vae-ft-mse-original", + "filename": "vae-ft-mse-840000-ema-pruned.safetensors", + "url": "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors" + }, + { + "name": "orangemix.vae", + "type": "VAE", + "base": "SD1.5 VAE", + "save_path": "default", + "description": "orangemix vae model", + "reference": "https://huggingface.co/WarriorMama777/OrangeMixs", + "filename": "orangemix.vae.pt", + "url": "https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt" + }, + { + "name": "kl-f8-anime2", + "type": "VAE", + "base": "SD2.1 VAE", + "save_path": "default", + "description": "kl-f8-anime2 vae model", + "reference": "https://huggingface.co/hakurei/waifu-diffusion-v1-4", + "filename": "kl-f8-anime2.ckpt", + "url": "https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime2.ckpt" + }, + { + "name": "LCM LoRA SD1.5", + "type": "lora", + "base": "SD1.5", + "save_path": "loras/lcm/SD1.5", + "description": "Latent Consistency LoRA for SD1.5", + "reference": "https://huggingface.co/latent-consistency/lcm-lora-sdv1-5", + "filename": "pytorch_lora_weights.safetensors", + "url": "https://huggingface.co/latent-consistency/lcm-lora-sdv1-5/resolve/main/pytorch_lora_weights.safetensors" + }, + { + "name": "LCM LoRA SSD-1B", + "type": "lora", + "base": "SSD-1B", + "save_path": "loras/lcm/SSD-1B", + "description": "Latent Consistency LoRA for SSD-1B", + "reference": "https://huggingface.co/latent-consistency/lcm-lora-ssd-1b", + "filename": "pytorch_lora_weights.safetensors", + "url": "https://huggingface.co/latent-consistency/lcm-lora-ssd-1b/resolve/main/pytorch_lora_weights.safetensors" + }, + { + "name": "LCM LoRA SDXL", + "type": "lora", + "base": "SSD-1B", + "save_path": "loras/lcm/SDXL", + "description": "Latent Consistency LoRA for SDXL", + "reference": "https://huggingface.co/latent-consistency/lcm-lora-sdxl", + "filename": "pytorch_lora_weights.safetensors", + "url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/resolve/main/pytorch_lora_weights.safetensors" + }, + { + "name": "Theovercomer8's Contrast Fix (SD2.1)", + "type": "lora", + "base": "SD2.1", + "save_path": "default", + "description": "LORA: Theovercomer8's Contrast Fix (SD2.1)", + "reference": "https://civitai.com/models/8765/theovercomer8s-contrast-fix-sd15sd21-768", + "filename": "theovercomer8sContrastFix_sd21768.safetensors", + "url": "https://civitai.com/api/download/models/10350" + }, + { + "name": "Theovercomer8's Contrast Fix (SD1.5)", + "type": "lora", + "base": "SD1.5", + "save_path": "default", + "description": "LORA: Theovercomer8's Contrast Fix (SD1.5)", + "reference": "https://civitai.com/models/8765/theovercomer8s-contrast-fix-sd15sd21-768", + "filename": "theovercomer8sContrastFix_sd15.safetensors", + "url": "https://civitai.com/api/download/models/10638" + }, + { + "name": "T2I-Adapter (depth)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for depth", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_depth_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth" + }, + { + "name": "T2I-Adapter (seg)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for seg", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_seg_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth" + }, + { + "name": "T2I-Adapter (sketch)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for sketch", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_sketch_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth" + }, + { + "name": "T2I-Adapter (keypose)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for keypose", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_keypose_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth" + }, + { + "name": "T2I-Adapter (openpose)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for openpose", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_openpose_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth" + }, + { + "name": "T2I-Adapter (color)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for color", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_color_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth" + }, + { + "name": "T2I-Adapter (canny)", + "type": "T2I-Adapter", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter for canny", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_canny_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth" + }, + { + "name": "T2I-Style model", + "type": "T2I-Style", + "base": "SD1.5", + "save_path": "default", + "description": "ControlNet T2I-Adapter style model. Need to download CLIPVision model.", + "reference": "https://huggingface.co/TencentARC/T2I-Adapter", + "filename": "t2iadapter_style_sd14v1.pth", + "url": "https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth" + }, + { + "name": "CiaraRowles/TemporalNet2", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "TemporalNet was a ControlNet model designed to enhance the temporal consistency of generated outputs", + "reference": "https://huggingface.co/CiaraRowles/TemporalNet2", + "filename": "temporalnetversion2.ckpt", + "url": "https://huggingface.co/CiaraRowles/TemporalNet2/resolve/main/temporalnetversion2.ckpt" + }, + { + "name": "CiaraRowles/TemporalNet1XL (1.0)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "controlnet/TemporalNet1XL", + "description": "This is TemporalNet1XL, it is a re-train of the controlnet TemporalNet1 with Stable Diffusion XL.", + "reference": "https://huggingface.co/CiaraRowles/controlnet-temporalnet-sdxl-1.0", + "filename": "diffusion_pytorch_model.safetensors", + "url": "https://huggingface.co/CiaraRowles/controlnet-temporalnet-sdxl-1.0/resolve/main/diffusion_pytorch_model.safetensors" + }, + { + "name": "CLIPVision model (stabilityai/clip_vision_g)", + "type": "clip_vision", + "base": "SDXL", + "save_path": "clip_vision/SDXL", + "description": "[3.69GB] clip_g vision model", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "clip_vision_g.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/revision/clip_vision_g.safetensors" + }, + { + "name": "CLIPVision model (openai/clip-vit-large)", + "type": "clip_vision", + "base": "SD1.5", + "save_path": "clip_vision/SD1.5", + "description": "[1.7GB] CLIPVision model (needed for styles model)", + "reference": "https://huggingface.co/openai/clip-vit-large-patch14", + "filename": "pytorch_model.bin", + "url": "https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin" + }, + { + "name": "CLIPVision model (IP-Adapter)", + "type": "clip_vision", + "base": "SD1.5", + "save_path": "clip_vision/SD1.5", + "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "pytorch_model.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin" + }, + { + "name": "CLIPVision model (IP-Adapter)", + "type": "clip_vision", + "base": "SDXL", + "save_path": "clip_vision/SDXL", + "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "pytorch_model.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin" + }, + { + "name": "stabilityai/control-lora-canny-rank128.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: canny rank128", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-canny-rank128.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-canny-rank128.safetensors" + }, + { + "name": "stabilityai/control-lora-depth-rank128.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: depth rank128", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-depth-rank128.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-depth-rank128.safetensors" + }, + { + "name": "stabilityai/control-lora-recolor-rank128.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: recolor rank128", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-recolor-rank128.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors" + }, + { + "name": "stabilityai/control-lora-sketch-rank128-metadata.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: sketch rank128 metadata", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-sketch-rank128-metadata.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors" + }, + { + "name": "stabilityai/control-lora-canny-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: canny rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-canny-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors" + }, + { + "name": "stabilityai/control-lora-depth-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: depth rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-depth-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors" + }, + { + "name": "stabilityai/control-lora-recolor-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: recolor rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-recolor-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors" + }, + { + "name": "stabilityai/control-lora-sketch-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: sketch rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-sketch-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors" + }, + + { + "name": "kohya-ss/ControlNet-LLLite: SDXL Canny Anime", + "type": "controlnet", + "base": "SDXL", + "save_path": "custom_nodes/ControlNet-LLLite-ComfyUI/models", + "description": "[46.2MB] An extremely compactly designed controlnet model (a.k.a. ControlNet-LLLite). Note: The model structure is highly experimental and may be subject to change in the future.", + "reference": "https://huggingface.co/kohya-ss/controlnet-lllite", + "filename": "controllllite_v01032064e_sdxl_canny_anime.safetensors", + "url": "https://huggingface.co/kohya-ss/controlnet-lllite/resolve/main/controllllite_v01032064e_sdxl_canny_anime.safetensors" + }, + + { + "name": "SDXL-controlnet: OpenPose (v2)", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "ControlNet openpose model for SDXL", + "reference": "https://huggingface.co/thibaud/controlnet-openpose-sdxl-1.0", + "filename": "OpenPoseXL2.safetensors", + "url": "https://huggingface.co/thibaud/controlnet-openpose-sdxl-1.0/resolve/main/OpenPoseXL2.safetensors" + }, + { + "name": "controlnet-SargeZT/controlnet-sd-xl-1.0-softedge-dexined", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "ControlNet softedge model for SDXL", + "reference": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-softedge-dexined", + "filename": "controlnet-sd-xl-1.0-softedge-dexined.safetensors", + "url": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-softedge-dexined/resolve/main/controlnet-sd-xl-1.0-softedge-dexined.safetensors" + }, + { + "name": "controlnet-SargeZT/controlnet-sd-xl-1.0-depth-16bit-zoe", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "ControlNet depth-zoe model for SDXL", + "reference": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-depth-16bit-zoe", + "filename": "depth-zoe-xl-v1.0-controlnet.safetensors", + "url": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-depth-16bit-zoe/resolve/main/depth-zoe-xl-v1.0-controlnet.safetensors" + }, + + { + "name": "ControlNet-v1-1 (ip2p; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (ip2p)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11e_sd15_ip2p_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (shuffle; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (shuffle)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11e_sd15_shuffle_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (canny; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (canny)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_canny_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (depth; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (depth)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11f1p_sd15_depth_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (inpaint; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (inpaint)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_inpaint_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (lineart; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (lineart)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_lineart_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_lineart_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (mlsd; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (mlsd)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_mlsd_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (normalbae; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (normalbae)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_normalbae_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (openpose; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (openpose)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_openpose_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (scribble; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (scribble)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_scribble_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_scribble_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (seg; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (seg)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_seg_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_seg_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (softedge; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (softedge)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15_softedge_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_softedge_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (anime; fp16)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (anime)", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11p_sd15s2_lineart_anime_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (tile; fp16; v11u)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (tile) / v11u", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11u_sd15_tile_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors" + }, + { + "name": "ControlNet-v1-1 (tile; fp16; v11f1e)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "default", + "description": "Safetensors/FP16 versions of the new ControlNet-v1-1 checkpoints (tile) / v11f1e
You need to this model for Tiled Resample", + "reference": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors", + "filename": "control_v11f1e_sd15_tile_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors" + }, + { + "name": "GLIGEN textbox (fp16; pruned)", + "type": "gligen", + "base": "SD1.5", + "save_path": "default", + "description": "GLIGEN textbox model", + "reference": "https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors", + "filename": "gligen_sd14_textbox_pruned_fp16.safetensors", + "url": "https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/resolve/main/gligen_sd14_textbox_pruned_fp16.safetensors" + }, + { + "name": "ViT-H SAM model", + "type": "sam", + "base": "SAM", + "save_path": "sams", + "description": "Segmenty Anything SAM model (ViT-H)", + "reference": "https://github.com/facebookresearch/segment-anything#model-checkpoints", + "filename": "sam_vit_h_4b8939.pth", + "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth" + }, + { + "name": "ViT-L SAM model", + "type": "sam", + "base": "SAM", + "save_path": "sams", + "description": "Segmenty Anything SAM model (ViT-L)", + "reference": "https://github.com/facebookresearch/segment-anything#model-checkpoints", + "filename": "sam_vit_l_0b3195.pth", + "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth" + }, + { + "name": "ViT-B SAM model", + "type": "sam", + "base": "SAM", + "save_path": "sams", + "description": "Segmenty Anything SAM model (ViT-B)", + "reference": "https://github.com/facebookresearch/segment-anything#model-checkpoints", + "filename": "sam_vit_b_01ec64.pth", + "url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth" + }, + { + "name": "seecoder v1.0", + "type": "seecoder", + "base": "SEECODER", + "save_path": "seecoders", + "description": "SeeCoder model", + "reference": "https://huggingface.co/shi-labs/prompt-free-diffusion/tree/main/pretrained/pfd/seecoder", + "filename": "seecoder-v1-0.safetensors", + "url": "https://huggingface.co/shi-labs/prompt-free-diffusion/resolve/main/pretrained/pfd/seecoder/seecoder-v1-0.safetensors" + }, + { + "name": "seecoder pa v1.0", + "type": "seecoder", + "base": "SEECODER", + "save_path": "seecoders", + "description": "SeeCoder model", + "reference": "https://huggingface.co/shi-labs/prompt-free-diffusion/tree/main/pretrained/pfd/seecoder", + "filename": "seecoder-pa-v1-0.safetensors", + "url": "https://huggingface.co/shi-labs/prompt-free-diffusion/resolve/main/pretrained/pfd/seecoder/seecoder-pa-v1-0.safetensors" + }, + { + "name": "seecoder anime v1.0", + "type": "seecoder", + "base": "SEECODER", + "save_path": "seecoders", + "description": "SeeCoder model", + "reference": "https://huggingface.co/shi-labs/prompt-free-diffusion/tree/main/pretrained/pfd/seecoder", + "filename": "seecoder-anime-v1-0.safetensors", + "url": "https://huggingface.co/shi-labs/prompt-free-diffusion/resolve/main/pretrained/pfd/seecoder/seecoder-anime-v1-0.safetensors" + }, + { + "name": "face_yolov8m (bbox)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/bbox", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "face_yolov8m.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8m.pt" + }, + { + "name": "face_yolov8n (bbox)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/bbox", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "face_yolov8n.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n.pt" + }, + { + "name": "face_yolov8n_v2 (bbox)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/bbox", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "face_yolov8n_v2.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n_v2.pt" + }, + { + "name": "face_yolov8s (bbox)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/bbox", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "face_yolov8s.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8s.pt" + }, + { + "name": "hand_yolov8n (bbox)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/bbox", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "hand_yolov8n.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8n.pt" + }, + { + "name": "hand_yolov8s (bbox)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/bbox", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "hand_yolov8s.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8s.pt" + }, + { + "name": "person_yolov8m (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "person_yolov8m-seg.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/person_yolov8m-seg.pt" + }, + { + "name": "person_yolov8n (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "person_yolov8n-seg.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/person_yolov8n-seg.pt" + }, + { + "name": "person_yolov8s (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "person_yolov8s-seg.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/person_yolov8s-seg.pt" + }, + { + "name": "deepfashion2_yolov8s (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://huggingface.co/Bingsu/adetailer/tree/main", + "filename": "deepfashion2_yolov8s-seg.pt", + "url": "https://huggingface.co/Bingsu/adetailer/resolve/main/deepfashion2_yolov8s-seg.pt" + }, + + { + "name": "face_yolov8m-seg_60.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "face_yolov8m-seg_60.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/face_yolov8m-seg_60.pt" + }, + { + "name": "face_yolov8n-seg2_60.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "face_yolov8n-seg2_60.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/face_yolov8n-seg2_60.pt" + }, + { + "name": "hair_yolov8n-seg_60.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "hair_yolov8n-seg_60.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/hair_yolov8n-seg_60.pt" + }, + { + "name": "skin_yolov8m-seg_400.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "skin_yolov8m-seg_400.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8m-seg_400.pt" + }, + { + "name": "skin_yolov8n-seg_400.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "skin_yolov8n-seg_400.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8n-seg_400.pt" + }, + { + "name": "skin_yolov8n-seg_800.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "skin_yolov8n-seg_800.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8n-seg_800.pt" + }, + + { + "name": "animatediff/mmd_sd_v14.ckpt (comfyui-animatediff)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/comfyui-animatediff/models", + "description": "Pressing 'install' directly downloads the model from the ArtVentureX/AnimateDiff extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v14.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v14.ckpt" + }, + { + "name": "animatediff/mm_sd_v15.ckpt (comfyui-animatediff)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/comfyui-animatediff/models", + "description": "Pressing 'install' directly downloads the model from the ArtVentureX/AnimateDiff extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v15.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15.ckpt" + }, + + { + "name": "animatediff/mmd_sd_v14.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v14.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v14.ckpt" + }, + { + "name": "animatediff/mm_sd_v15.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v15.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15.ckpt" + }, + { + "name": "animatediff/mm_sd_v15_v2.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v15_v2.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt" + }, + { + "name": "animatediff/mm_sdxl_v10_beta.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sdxl_v10_beta.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sdxl_v10_beta.ckpt" + }, + { + "name": "AD_Stabilized_Motion/mm-Stabilized_high.pth (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/manshoety/AD_Stabilized_Motion", + "filename": "mm-Stabilized_high.pth", + "url": "https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_high.pth" + }, + { + "name": "AD_Stabilized_Motion/mm-Stabilized_mid.pth (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/manshoety/AD_Stabilized_Motion", + "filename": "mm-Stabilized_mid.pth", + "url": "https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_mid.pth" + }, + { + "name": "CiaraRowles/temporaldiff-v1-animatediff.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/CiaraRowles/TemporalDiff", + "filename": "temporaldiff-v1-animatediff.ckpt", + "url": "https://huggingface.co/CiaraRowles/TemporalDiff/resolve/main/temporaldiff-v1-animatediff.ckpt" + }, + + { + "name": "animatediff/v2_lora_PanLeft.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_PanLeft.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanLeft.ckpt" + }, + { + "name": "animatediff/v2_lora_PanRight.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_PanRight.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanRight.ckpt" + }, + { + "name": "animatediff/v2_lora_RollingAnticlockwise.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_RollingAnticlockwise.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_RollingAnticlockwise.ckpt" + }, + { + "name": "animatediff/v2_lora_RollingClockwise.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_RollingClockwise.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_RollingClockwise.ckpt" + }, + { + "name": "animatediff/v2_lora_TiltDown.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_TiltDown.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_TiltDown.ckpt" + }, + { + "name": "animatediff/v2_lora_TiltUp.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_TiltUp.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_TiltUp.ckpt" + }, + { + "name": "animatediff/v2_lora_ZoomIn.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_ZoomIn.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_ZoomIn.ckpt" + }, + { + "name": "animatediff/v2_lora_ZoomOut.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_ZoomOut.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_ZoomOut.ckpt" + }, + { + "name": "ip-adapter_sd15.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models
extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.bin" + }, + { + "name": "ip-adapter_sd15_light.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Use this when text prompt is more important than reference images", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sd15_light.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15_light.bin" + }, + { + "name": "ip-adapter-plus_sd15.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models
extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.bin" + }, + { + "name": "ip-adapter-plus-face_sd15.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models
extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus-face_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus-face_sd15.bin" + }, + { + "name": "ip-adapter-full-face_sd15.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models
extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-full-face_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-full-face_sd15.bin" + }, + { + "name": "ip-adapter_sdxl.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models
extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sdxl.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.bin" + }, + { + "name": "wd15_ip_adapter_plus.bin
(IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models
extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "wd15_ip_adapter_plus.bin", + "url": "https://huggingface.co/furusu/IP-Adapter/resolve/main/wd15_ip_adapter_plus.bin" + }, + { + "name": "ip-adapter_sd15.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.bin" + }, + { + "name": "ip-adapter_sd15_light.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Use this when text prompt is more important than reference images", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sd15_light.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15_light.bin" + }, + { + "name": "ip-adapter-plus_sd15.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.bin" + }, + { + "name": "ip-adapter-plus-face_sd15.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus-face_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus-face_sd15.bin" + }, + { + "name": "ip-adapter-full-face_sd15.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-full-face_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-full-face_sd15.bin" + }, + { + "name": "ip-adapter_sdxl.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sdxl.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.bin" + }, + { + "name": "ip-adapter_sdxl_vit-h.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sdxl_vit-h.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl_vit-h.bin" + }, + { + "name": "ip-adapter-plus_sdxl_vit-h.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus_sdxl_vit-h.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin" + }, + { + "name": "ip-adapter-plus-face_sdxl_vit-h.bin
(ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus-face_sdxl_vit-h.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus-face_sdxl_vit-h.bin" + }, + { + "name": "pfg-novel-n10.pt", + "type": "PFG", + "base": "SD1.5", + "save_path": "custom_nodes/pfg-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/furusu/PFG", + "filename": "pfg-novel-n10.pt", + "url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-novel-n10.pt" + }, + { + "name": "pfg-wd14-n10.pt", + "type": "PFG", + "base": "SD1.5", + "save_path": "custom_nodes/pfg-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/furusu/PFG", + "filename": "pfg-wd14-n10.pt", + "url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd14-n10.pt" + }, + { + "name": "pfg-wd15beta2-n10.pt", + "type": "PFG", + "base": "SD1.5", + "save_path": "custom_nodes/pfg-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/furusu/PFG", + "filename": "pfg-wd15beta2-n10.pt", + "url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd15beta2-n10.pt" + }, + { + "name": "GFPGANv1.4.pth", + "type": "GFPGAN", + "base": "GFPGAN", + "save_path": "facerestore_models", + "description": "Face Restoration Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/TencentARC/GFPGAN/releases", + "filename": "GFPGANv1.4.pth", + "url": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth" + }, + { + "name": "codeformer.pth", + "type": "CodeFormer", + "base": "CodeFormer", + "save_path": "facerestore_models", + "description": "Face Restoration Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/sczhou/CodeFormer/releases", + "filename": "codeformer.pth", + "url": "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" + }, + { + "name": "detection_Resnet50_Final.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facerestore_models", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "detection_Resnet50_Final.pth", + "url": "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth" + }, + { + "name": "detection_mobilenet0.25_Final.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facerestore_models", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "detection_mobilenet0.25_Final.pth", + "url": "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth" + }, + { + "name": "yolov5l-face.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facedetection", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "yolov5l-face.pth", + "url": "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth" + }, + { + "name": "yolov5n-face.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facedetection", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "yolov5n-face.pth", + "url": "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5n-face.pth" + } + ] +} diff --git a/custom_nodes/ComfyUI-Manager/node_db/dev/custom-node-list.json b/custom_nodes/ComfyUI-Manager/node_db/dev/custom-node-list.json new file mode 100644 index 0000000000000000000000000000000000000000..a11499b24af6f7a4ad5a6f024d76c20edcd7de66 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/dev/custom-node-list.json @@ -0,0 +1,314 @@ +{ + "custom_nodes": [ + { + "author": "talesofai", + "title": "comfyui-supersave [WIP]", + "reference": "https://github.com/talesofai/comfyui-supersave", + "files": [ + "https://github.com/talesofai/comfyui-supersave" + ], + "install_type": "git-clone", + "description": "WIP" + }, + { + "author": "Sai-ComfyUI", + "title": "ComfyUI-MS-Nodes [WIP]", + "reference": "https://github.com/Sai-ComfyUI/ComfyUI-MS-Nodes", + "files": [ + "https://github.com/Sai-ComfyUI/ComfyUI-MS-Nodes" + ], + "install_type": "git-clone", + "description": "WIP" + }, + { + "author": "eigenpunk", + "title": "ComfyUI-audio", + "reference": "https://github.com/eigenpunk/ComfyUI-audio", + "files": [ + "https://github.com/eigenpunk/ComfyUI-audio" + ], + "install_type": "git-clone", + "description": "generative audio tools for ComfyUI. highly experimental-expect things to break." + }, + { + "author": "dmarx", + "title": "Plush-for-ComfyUI", + "reference": "https://github.com/dmarx/ComfyUI-Keyframed", + "files": [ + "https://github.com/dmarx/ComfyUI-Keyframed" + ], + "install_type": "git-clone", + "description": " ComfyUI nodes to facilitate value keyframing by providing an interface for using [a/keyframed](https://github.com/dmarx/keyframed) in ComfyUI workflows." + }, + { + "author": "glibsonoran", + "title": "Plush-for-ComfyUI", + "reference": "https://github.com/glibsonoran/Plush-for-ComfyUI", + "files": [ + "https://github.com/glibsonoran/Plush-for-ComfyUI" + ], + "install_type": "git-clone", + "description": "Nodes: Style Prompt, OAI Dall_e Image" + }, + { + "author": "crystian", + "title": "Crystian Node Suite [WIP]", + "reference": "https://github.com/crystian/ComfyUI-Crys-Plugins", + "files": [ + "https://github.com/crystian/ComfyUI-Crys-Plugins" + ], + "install_type": "git-clone", + "description": "Nodes: Debugger, CryConvert, Util, ..." + }, + { + "author": "Jaxkr", + "title": "comfyui-terminal-command [UNSAFE]", + "reference": "https://github.com/Jaxkr/comfyui-terminal-command", + "files": [ + "https://github.com/Jaxkr/comfyui-terminal-command" + ], + "install_type": "git-clone", + "description": "Nodes: Run Terminal Command. [w/This node is an unsafe node that includes the capability to execute terminal commands.]" + }, + { + "author": "BlueDangerX", + "title": "ComfyUI-BDXNodes [WIP]", + "reference": "https://github.com/BlueDangerX/ComfyUI-BDXNodes", + "files": [ + "https://github.com/BlueDangerX/ComfyUI-BDXNodes" + ], + "install_type": "git-clone", + "description": "Nodes: Node Jumper. Various quality of life testing nodes" + }, + { + "author": "ilovejohnwhite", + "title": "TatToolkit", + "reference": "https://github.com/ilovejohnwhite/UncleBillyGoncho", + "files": [ + "https://github.com/ilovejohnwhite/UncleBillyGoncho" + ], + "install_type": "git-clone", + "description": "Nodes:UWU TTK Preprocessor, Pixel Perfect Resolution, Generation Resolution From Image, Generation Resolution From Latent, Enchance And Resize Hint Images, ..." + }, + { + "author": "ilovejohnwhite", + "title": "TatToolkit", + "reference": "https://github.com/ilovejohnwhite/TatToolkit", + "files": [ + "https://github.com/ilovejohnwhite/TatToolkit" + ], + "install_type": "git-clone", + "description": "Nodes:Dip It, Rip It. This extension provides several image processing nodes." + }, + { + "author": "IvanZhd", + "title": "comfyui-codeformer [WIP]", + "reference": "https://github.com/IvanZhd/comfyui-codeformer", + "files": [ + "https://github.com/IvanZhd/comfyui-codeformer" + ], + "install_type": "git-clone", + "description": "Nodes:Image Inverter" + }, + { + "author": "hinablue", + "title": "ComfyUI 3D Pose Editor", + "reference": "https://github.com/hinablue/ComfyUI_3dPoseEditor", + "files": [ + "https://github.com/hinablue/ComfyUI_3dPoseEditor" + ], + "install_type": "git-clone", + "description": "Nodes:3D Pose Editor" + }, + { + "author": "alt-key-project", + "title": "Dream Project Video Batches [WIP]", + "reference": "https://github.com/alt-key-project/comfyui-dream-video-batches", + "files": [ + "https://github.com/alt-key-project/comfyui-dream-video-batches" + ], + "install_type": "git-clone", + "description": "NOTE: This is currently work in progress. Expect nodes to break (or be broken) until 1.0 release." + }, + { + "author": "oyvindg", + "title": "ComfyUI-TrollSuite", + "reference": "https://github.com/oyvindg/ComfyUI-TrollSuite", + "files": [ + "https://github.com/oyvindg/ComfyUI-TrollSuite" + ], + "install_type": "git-clone", + "description": "Nodes: BinaryImageMask, ImagePadding, LoadLastCreatedImage, RandomMask, TransparentImage." + }, + { + "author": "romeobuilderotti", + "title": "ComfyUI-EZ-Pipes", + "reference": "https://github.com/romeobuilderotti/ComfyUI-EZ-Pipes", + "files": [ + "https://github.com/romeobuilderotti/ComfyUI-EZ-Pipes" + ], + "install_type": "git-clone", + "description": "ComfyUI-EZ-Pipes is a set of custom pipe nodes for ComfyUI. It provides a set of Input/Edit/Output nodes for each pipe type." + }, + { + "author": "baldsam", + "title": "ComfyUI_baldsam", + "reference": "https://github.com/baldsam/ComfyUI_baldsam", + "files": [ + "https://github.com/baldsam/ComfyUI_baldsam" + ], + "install_type": "git-clone", + "description": "Nodes:Universal Aspect Ratio." + }, + { + "author": "wormley", + "title": "comfyui-wormley-nodes", + "reference": "https://github.com/wormley/comfyui-wormley-nodes", + "files": [ + "hhttps://github.com/wormley/comfyui-wormley-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: CheckpointVAELoaderSimpleText, CheckpointVAESelectorText, LoRA_Tag_To_Stack" + }, + { + "author": "dnl13", + "title": "ComfyUI-dnl13-seg", + "reference": "https://github.com/dnl13/ComfyUI-dnl13-seg", + "files": [ + "https://github.com/dnl13/ComfyUI-dnl13-seg" + ], + "install_type": "git-clone", + "description": "After discovering @storyicon implementation here of Segment Anything, I realized its potential as a powerful tool for ComfyUI if implemented correctly. I delved into the SAM and Dino models. The following is my own adaptation of sam_hq for ComfyUI." + }, + { + "author": "phineas-pta", + "title": "comfy-trt-test [WIP]", + "reference": "https://github.com/phineas-pta/comfy-trt-test", + "files": [ + "https://github.com/phineas-pta/comfy-trt-test" + ], + "install_type": "git-clone", + "description": "Test project for ComfyUI TensorRT Support.\nNOT WORKING YET.\nnot automatic yet, do not use ComfyUI-Manager to install !!!.\nnot beginner-friendly yet, still intended to technical users\nNOTE: The reason for registration in the Manager is for guidance, and for detailed installation instructions, please visit the repository." + }, + { + "author": "Brandelan", + "title": "ComfyUI_bd_customNodes", + "reference": "https://github.com/Brandelan/ComfyUI_bd_customNodes", + "files": [ + "https://github.com/Brandelan/ComfyUI_bd_customNodes" + ], + "install_type": "git-clone", + "description": "Nodes: BD Random Range, BD Settings, BD Sequencer." + }, + { + "author": "Jordach", + "title": "comfy-consistency-vae", + "reference": "https://github.com/Jordach/comfy-consistency-vae", + "files": [ + "https://github.com/Jordach/comfy-consistency-vae" + ], + "install_type": "git-clone", + "description": "Nodes: Comfy_ConsistencyVAE" + }, + { + "author": "gameltb", + "title": "ComfyUI_stable_fast", + "reference": "https://github.com/gameltb/ComfyUI_stable_fast", + "files": [ + "https://github.com/gameltb/ComfyUI_stable_fast" + ], + "install_type": "git-clone", + "description": "Nodes:ApplyStableFastUnet. Experimental usage of stable-fast." + }, + { + "author": "jn-jairo", + "title": "jn_node_suite_comfyui [WIP]", + "reference": "https://github.com/jn-jairo/jn_node_suite_comfyui", + "files": [ + "https://github.com/jn-jairo/jn_node_suite_comfyui" + ], + "install_type": "git-clone", + "description": "Image manipulation nodes, Temperature control nodes, Tiling nodes, Primitive and operation nodes, ..." + }, + { + "author": "PluMaZero", + "title": "ComfyUI-SpaceFlower", + "reference": "https://github.com/PluMaZero/ComfyUI-SpaceFlower", + "files": [ + "https://github.com/PluMaZero/ComfyUI-SpaceFlower" + ], + "install_type": "git-clone", + "description": "Nodes: SpaceFlower_Prompt." + }, + { + "author": "laksjdjf", + "title": "ssd-1b-comfyui", + "reference": "https://github.com/laksjdjf/ssd-1b-comfyui", + "files": [ + "https://github.com/laksjdjf/ssd-1b-comfyui" + ], + "install_type": "git-clone", + "description": "Experimental node for SSD-1B. This node is not need for latest comfyui." + }, + { + "author": "flowtyone", + "title": "comfyui-flowty-lcm", + "reference": "https://github.com/flowtyone/comfyui-flowty-lcm", + "files": [ + "https://github.com/flowtyone/comfyui-flowty-lcm" + ], + "install_type": "git-clone", + "description": "This is a comfyui early testing node for LCM, adapted from [a/https://github.com/0xbitches/sd-webui-lcm](https://github.com/0xbitches/sd-webui-lcm). It uses the diffusers backend unfortunately and not comfy's model loading mechanism. But the intention here is just to be able to execute lcm inside comfy.\nNOTE: 0xbitches's 'Latent Consistency Model for ComfyUI' is original implementation." + }, + { + "author": "doucx", + "title": "ComfyUI_WcpD_Utility_Kit", + "reference": "https://github.com/doucx/ComfyUI_WcpD_Utility_Kit", + "files": [ + "https://github.com/doucx/ComfyUI_WcpD_Utility_Kit" + ], + "install_type": "git-clone", + "description": "Nodes: MergeStrings, ExecStrAsCode, RandnLatentImage. [w/NOTE: This extension includes the ability to execute code as a string in nodes. Be cautious during installation, as it can pose a security risk.]" + }, + { + "author": "AbyssYuan0", + "title": "ComfyUI_BadgerTools", + "reference": "https://github.com/AbyssYuan0/ComfyUI_BadgerTools", + "files": [ + "https://github.com/AbyssYuan0/ComfyUI_BadgerTools" + ], + "install_type": "git-clone", + "description": "Nodes: ImageOverlap-badger, FloatToInt-badger, IntToString-badger, FloatToString-badger." + }, + { + "author": "WSJUSA", + "title": "pre-comfyui-stablsr", + "reference": "https://github.com/WSJUSA/Comfyui-StableSR", + "files": [ + "https://github.com/WSJUSA/Comfyui-StableSR" + ], + "install_type": "git-clone", + "description": "This is a development respository for debugging migration of StableSR to Comfyui" + }, + { + "author": "Fannovel16", + "title": "ComfyUI MotionDiff", + "reference": "https://github.com/Fannovel16/ComfyUI-MotionDiff", + "files": [ + "https://github.com/Fannovel16/ComfyUI-MotionDiff" + ], + "install_type": "git-clone", + "description": "N/A" + }, + { + "author": "Dr.Lt.Data", + "title": "ComfyUI-Workflow-Component [WIP]", + "reference": "https://github.com/ltdrdata/ComfyUI-Workflow-Component", + "files": [ + "https://github.com/ltdrdata/ComfyUI-Workflow-Component" + ], + "install_type": "git-clone", + "description": "This extension provides the capability to use ComfyUI Workflow as a component and the ability to use the Image Refiner functionality based on components. NOTE: This is an experimental extension feature with no consideration for backward compatibility and can be highly unstable." + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/dev/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/dev/extension-node-map.json new file mode 100644 index 0000000000000000000000000000000000000000..6e1628d43c3b861b036efede471bc1f7bd3e892a --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/dev/extension-node-map.json @@ -0,0 +1,11 @@ +{ + "https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor": [ + [ + "LaMaPreprocessor", + "lamaPreprocessor" + ], + { + "title_aux": "ComfyUI LaMA Preprocessor [WIP]" + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/dev/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/dev/model-list.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/dev/model-list.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/dev/scan.sh b/custom_nodes/ComfyUI-Manager/node_db/dev/scan.sh new file mode 100644 index 0000000000000000000000000000000000000000..5d8d8c48b6e3f48dc1491738c1226f574909c05d --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/dev/scan.sh @@ -0,0 +1,4 @@ +#!/bin/bash +source ../../../../venv/bin/activate +rm .tmp/*.py > /dev/null +python ../../scanner.py diff --git a/custom_nodes/ComfyUI-Manager/node_db/forked/custom-node-list.json b/custom_nodes/ComfyUI-Manager/node_db/forked/custom-node-list.json new file mode 100644 index 0000000000000000000000000000000000000000..403f1e7d7b5f0626f597ff27fa92a7099c5a2c18 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/forked/custom-node-list.json @@ -0,0 +1,14 @@ +{ + "custom_nodes": [ + { + "author": "hustille", + "title": "ComfyUI_Fooocus_KSampler (harrr1 version)", + "reference": "https://github.com/harrr1/ComfyUI_Fooocus_KSampler", + "files": [ + "https://github.com/harrr1/ComfyUI_Fooocus_KSampler" + ], + "install_type": "git-clone", + "description": "A fork branch providing a hotfix for the currently incompatible Fooocus node.

Nodes: KSampler With Refiner (Fooocus). The KSampler from Fooocus as a ComfyUI node

NOTE: This patches basic ComfyUI behaviour - don't use together with other samplers. Or perhaps do? Other samplers might profit from those changes ... ymmv.

" + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/forked/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/forked/extension-node-map.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/forked/extension-node-map.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/forked/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/forked/model-list.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/forked/model-list.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/forked/scan.sh b/custom_nodes/ComfyUI-Manager/node_db/forked/scan.sh new file mode 100644 index 0000000000000000000000000000000000000000..5d8d8c48b6e3f48dc1491738c1226f574909c05d --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/forked/scan.sh @@ -0,0 +1,4 @@ +#!/bin/bash +source ../../../../venv/bin/activate +rm .tmp/*.py > /dev/null +python ../../scanner.py diff --git a/custom_nodes/ComfyUI-Manager/node_db/legacy/alter-list.json b/custom_nodes/ComfyUI-Manager/node_db/legacy/alter-list.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/legacy/alter-list.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/legacy/custom-node-list.json b/custom_nodes/ComfyUI-Manager/node_db/legacy/custom-node-list.json new file mode 100644 index 0000000000000000000000000000000000000000..4f366cbb1cf3881af41daa4224ccc1e41f155356 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/legacy/custom-node-list.json @@ -0,0 +1,155 @@ +{ + "custom_nodes": [ + { + "author": "RockOfFire", + "title": "CR Animation Nodes", + "reference": "https://github.com/RockOfFire/CR_Animation_Nodes", + "files": [ + "https://github.com/RockOfFire/CR_Animation_Nodes" + ], + "install_type": "git-clone", + "description": "A comprehensive suite of nodes to enhance your animations. These nodes include some features similar to Deforum, and also some new ideas.
NOTE: This node is merged into Comfyroll Custom Nodes." + }, + { + "author": "tkoenig89", + "title": "Load Image with metadata", + "reference": "https://github.com/tkoenig89/ComfyUI_Load_Image_With_Metadata", + "files": [ + "https://github.com/tkoenig89/ComfyUI_Load_Image_With_Metadata" + ], + "install_type": "git-clone", + "description": "A custom node for comfy ui to read generation data from images (prompt, seed, size...). This could be used when upscaling generated images to use the original prompt and seed." + }, + { + "author": "LucianoCirino", + "title": "Efficiency Nodes for ComfyUI [LEGACY]", + "reference": "https://github.com/LucianoCirino/efficiency-nodes-comfyui", + "files": [ + "https://github.com/LucianoCirino/efficiency-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "A collection of ComfyUI custom nodes to help streamline workflows and reduce total node count.
NOTE: This repository is the original repository but is no longer maintained. Please use the forked version by jags." + }, + { + "author": "GeLi1989", + "title": "roop nodes for ComfyUI", + "reference": "https://github.com/GeLi1989/GK-beifen-ComfyUI_roop", + "files": [ + "https://github.com/GeLi1989/GK-beifen-ComfyUI_roop" + ], + "install_type": "git-clone", + "description": "ComfyUI nodes for the roop A1111 webui script. NOTE: Need to download model to use this node. NOTE: This is removed." + }, + { + "author": "ProDALOR", + "title": "comfyui_u2net", + "reference": "https://github.com/ProDALOR/comfyui_u2net", + "files": [ + "https://github.com/ProDALOR/comfyui_u2net" + ], + "install_type": "git-clone", + "description": "Nodes: Load U2Net model, U2Net segmentation, To mask, Segmentation to mask, U2NetBaseNormalization, U2NetMaxNormalization. NOTE: This is removed." + }, + { + "author": "FizzleDorf", + "title": "AIT", + "reference": "https://github.com/FizzleDorf/AIT", + "files": [ + "https://github.com/FizzleDorf/AIT" + ], + "install_type": "git-clone", + "description": "Nodes: Load AITemplate, Load AITemplate (ControlNet), VAE Decode (AITemplate), VAE Encode (AITemplate), VAE Encode (AITemplate, Inpaint). Experimental usage of AITemplate. NOTE: This is deprecated extension. Use ComfyUI-AIT instead of this." + }, + { + "author": "chenbaiyujason", + "title": "sc-node-comfyui", + "reference": "https://github.com/chenbaiyujason/sc-node-comfyui", + "files": [ + "https://github.com/chenbaiyujason/sc-node-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes for GPT interaction and text manipulation" + }, + { + "author": "asd417", + "title": "CheckpointTomeLoader", + "reference": "https://github.com/asd417/tomeSD_for_Comfy", + "files": [ + "https://github.com/ltdrdata/ComfyUI-tomeSD-installer" + ], + "install_type": "git-clone", + "description": "tomeSD(https://github.com/dbolya/tomesd) applied to ComfyUI stable diffusion UI using custom node. Note:In vanilla ComfyUI, the TomePatchModel node is provided as a built-in feature." + }, + { + "author": "gamert", + "title": "ComfyUI_tagger", + "reference": "https://github.com/gamert/ComfyUI_tagger", + "pip": ["gradio"], + "files": [ + "https://github.com/gamert/ComfyUI_tagger" + ], + "install_type": "git-clone", + "description": "Nodes: CLIPTextEncodeTaggerDD, ImageTaggerDD.

WARNING: Installing the current version is causing an issue where ComfyUI fails to start.

" + }, + { + "author": "Fannovel16", + "title": "ControlNet Preprocessors", + "reference": "https://github.com/Fannovel16/comfy_controlnet_preprocessors", + "files": [ + "https://github.com/Fannovel16/comfy_controlnet_preprocessors" + ], + "install_type": "git-clone", + "description": "ControlNet Preprocessors. (To use this extension, you need to download the required model file from Install Models)

NOTE: Please uninstall this custom node and instead install 'ComfyUI's ControlNet Auxiliary Preprocessors' from the default channel.
To use nodes belonging to controlnet v1 such as Canny_Edge_Preprocessor, MIDAS_Depth_Map_Preprocessor, Uniformer_SemSegPreprocessor, etc., you need to copy the config.yaml.example file to config.yaml and change skip_v1: True to skip_v1: False.

" + }, + { + "author": "comfyanonymous", + "title": "ComfyUI_experiments/sampler_tonemap", + "reference": "https://github.com/comfyanonymous/ComfyUI_experiments", + "files": [ + "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/sampler_tonemap.py" + ], + "install_type": "copy", + "description": "ModelSamplerTonemapNoiseTest a node that makes the sampler use a simple tonemapping algorithm to tonemap the noise. It will let you use higher CFG without breaking the image. To using higher CFG lower the multiplier value. Similar to Dynamic Thresholding extension of A1111. " + }, + { + "author": "comfyanonymous", + "title": "ComfyUI_experiments/sampler_rescalecfg", + "reference": "https://github.com/comfyanonymous/ComfyUI_experiments", + "files": [ + "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/sampler_rescalecfg.py" + ], + "install_type": "copy", + "description": "RescaleClassifierFreeGuidance improves the problem of images being degraded by high CFG.To using higher CFG lower the multiplier value. Similar to Dynamic Thresholding extension of A1111. (reference paper)

It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.

" + }, + { + "author": "comfyanonymous", + "title": "ComfyUI_experiments/advanced_model_merging", + "reference": "https://github.com/comfyanonymous/ComfyUI_experiments", + "files": [ + "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/advanced_model_merging.py" + ], + "install_type": "copy", + "description": "This provides a detailed model merge feature based on block weight. ModelMergeBlock, in vanilla ComfyUI, allows for adjusting the ratios of input/middle/output layers, but this node provides ratio adjustments for all blocks within each layer.

It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.

" + }, + { + "author": "comfyanonymous", + "title": "ComfyUI_experiments/sdxl_model_merging", + "reference": "https://github.com/comfyanonymous/ComfyUI_experiments", + "files": [ + "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/sdxl_model_merging.py" + ], + "install_type": "copy", + "description": "These nodes provide the capability to merge SDXL base models.

It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.

" + }, + { + "author": "comfyanonymous", + "title": "ComfyUI_experiments/reference_only", + "reference": "https://github.com/comfyanonymous/ComfyUI_experiments", + "files": [ + "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/reference_only.py" + ], + "install_type": "copy", + "description": "This node provides functionality corresponding to Reference only in Controlnet.

It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.

" + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/legacy/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/legacy/extension-node-map.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/legacy/extension-node-map.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/legacy/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/legacy/model-list.json new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/legacy/model-list.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/alter-list.json b/custom_nodes/ComfyUI-Manager/node_db/new/alter-list.json new file mode 100644 index 0000000000000000000000000000000000000000..072c3bb5e8bd05b6f14f6df25386dc1e1010a137 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/new/alter-list.json @@ -0,0 +1,4 @@ +{ + "items": [ + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/custom-node-list.json b/custom_nodes/ComfyUI-Manager/node_db/new/custom-node-list.json new file mode 100644 index 0000000000000000000000000000000000000000..3bda1d94eca007508d06672f2c1d5f8cfa4a1511 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/new/custom-node-list.json @@ -0,0 +1,876 @@ +{ + "custom_nodes": [ + { + "author": "jtrue", + "title": "ComfyUI-JaRue", + "reference": "https://github.com/jtrue/ComfyUI-JaRue", + "files": [ + "https://github.com/jtrue/ComfyUI-JaRue" + ], + "install_type": "git-clone", + "description": "A collection of nodes powering a tensor oracle on a home network with automation" + }, + { + "author": "filliptm", + "title": "ComfyUI_Fill-Nodes", + "reference": "https://github.com/filliptm/ComfyUI_Fill-Nodes", + "files": [ + "https://github.com/filliptm/ComfyUI_Fill-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:FL Image Randomizer. The start of a pack that I will continue to build out to fill the gaps of nodes and functionality that I feel is missing in comfyUI" + }, + { + "author": "thecooltechguy", + "title": "ComfyUI-MagicAnimate", + "reference": "https://github.com/thecooltechguy/ComfyUI-MagicAnimate", + "files": [ + "https://github.com/thecooltechguy/ComfyUI-MagicAnimate" + ], + "install_type": "git-clone", + "description": "Easily use Magic Animate within ComfyUI!" + }, + { + "author": "knuknX", + "title": "ComfyUI-Image-Tools", + "reference": "https://github.com/knuknX/ComfyUI-Image-Tools", + "files": [ + "https://github.com/knuknX/ComfyUI-Image-Tools" + ], + "install_type": "git-clone", + "description": "Nodes:BatchImageResizeProcessor, SingleImagePathLoader, SingleImageUrlLoader" + }, + { + "author": "11cafe", + "title": "ComfyUI Workspace Manager - Comfyspace", + "reference": "https://github.com/11cafe/comfyui-workspace-manager", + "files": [ + "https://github.com/11cafe/comfyui-workspace-manager" + ], + "install_type": "git-clone", + "description": "A ComfyUI custom node for project management to centralize the management of all your workflows in one place. Seamlessly switch between workflows, create and update them within a single workspace, like Google Docs." + }, + { + "author": "AustinMroz", + "title": "SpliceTools", + "reference": "https://github.com/AustinMroz/ComfyUI-SpliceTools", + "files": [ + "https://github.com/AustinMroz/ComfyUI-SpliceTools" + ], + "install_type": "git-clone", + "description": "Experimental utility nodes with a focus on manipulation of noised latents" + }, + { + "author": "asagi4", + "title": "asagi4/comfyui-utility-nodes", + "reference": "https://github.com/asagi4/comfyui-utility-nodes", + "files": [ + "https://github.com/asagi4/comfyui-utility-nodes" + ], + "install_type": "git-clone", + "description": "Nodes:MUJinjaRender, MUSimpleWildcard" + }, + { + "author": "asagi4", + "title": "ComfyUI-CADS", + "reference": "https://github.com/asagi4/ComfyUI-CADS", + "files": [ + "https://github.com/asagi4/ComfyUI-CADS" + ], + "install_type": "git-clone", + "description": "Attempts to implement [a/CADS](https://arxiv.org/abs/2310.17347) for ComfyUI. Credit also to the [a/A1111 implementation](https://github.com/v0xie/sd-webui-cads/tree/main) that I used as a reference." + }, + { + "author": "Electrofried", + "title": "OpenAINode", + "reference": "https://github.com/Electrofried/ComfyUI-OpenAINode", + "files": [ + "https://github.com/Electrofried/ComfyUI-OpenAINode" + ], + "install_type": "git-clone", + "description": "A simply node for hooking in to openAI API based servers via comfyUI" + }, + { + "author": "zcfrank1st", + "title": "comfyui_visual_anagram", + "reference": "https://github.com/zcfrank1st/comfyui_visual_anagrams", + "files": [ + "https://github.com/zcfrank1st/comfyui_visual_anagrams" + ], + "install_type": "git-clone", + "description": "Nodes:visual_anagrams_sample, visual_anagrams_animate" + }, + { + "author": "subtleGradient", + "title": "Touchpad two-finger gesture support for macOS", + "reference": "https://github.com/subtleGradient/TinkerBot-tech-for-ComfyUI-Touchpad", + "files": [ + "https://github.com/subtleGradient/TinkerBot-tech-for-ComfyUI-Touchpad" + ], + "install_type": "git-clone", + "description": "Two-finger scrolling (vertical and horizontal) to pan the canvas. Two-finger pinch to zoom in and out. Command-scroll up and down to zoom in and out. Fixes [a/comfyanonymous/ComfyUI#2059](https://github.com/comfyanonymous/ComfyUI/issues/2059)." + }, + { + "author": "SoftMeng", + "title": "ComfyUI_Mexx_Poster", + "reference": "https://github.com/SoftMeng/ComfyUI_Mexx_Poster", + "files": [ + "https://github.com/SoftMeng/ComfyUI_Mexx_Poster" + ], + "install_type": "git-clone", + "description": "Nodes: ComfyUI_Mexx_Poster" + }, + { + "author": "ningxiaoxiao", + "title": "comfyui-NDI", + "reference": "https://github.com/ningxiaoxiao/comfyui-NDI", + "files": [ + "https://github.com/ningxiaoxiao/comfyui-NDI" + ], + "pip": ["ndi-python"], + "install_type": "git-clone", + "description": "Real-time input output node for ComfyUI by NDI. Leveraging the powerful linking capabilities of NDI, you can access NDI video stream frames and send images generated by the model to NDI video streams." + }, + { + "author": "Fannovel16", + "title": "ComfyUI-Video-Matting", + "reference": "https://github.com/Fannovel16/ComfyUI-Video-Matting", + "files": [ + "https://github.com/Fannovel16/ComfyUI-Video-Matting" + ], + "install_type": "git-clone", + "description": "A minimalistic implementation of [a/Robust Video Matting (RVM)](https://github.com/PeterL1n/RobustVideoMatting/) in ComfyUI" + }, + { + "author": "Dream Project", + "title": "Dream Video Batches", + "reference": "https://github.com/alt-key-project/comfyui-dream-video-batches", + "files": [ + "https://github.com/alt-key-project/comfyui-dream-video-batches" + ], + "install_type": "git-clone", + "description": "Provide utilities for batch based video generation workflows (s.a. AnimateDiff and Stable Video Diffusion)." + }, + { + "author": "bedovyy", + "title": "ComfyUI_NAIDGenerator", + "reference": "https://github.com/bedovyy/ComfyUI_NAIDGenerator", + "files": [ + "https://github.com/bedovyy/ComfyUI_NAIDGenerator" + ], + "install_type": "git-clone", + "description": "This extension helps generate images through NAI." + }, + { + "author": "jags111", + "title": "ComfyUI_Jags_Audiotools", + "reference": "https://github.com/jags111/ComfyUI_Jags_Audiotools", + "files": [ + "https://github.com/jags111/ComfyUI_Jags_Audiotools" + ], + "install_type": "git-clone", + "description": "A collection amazing audio tools for working with audio and sound files in comfyUI" + }, + { + "author": "Haoming02", + "title": "ComfyUI Diffusion Color Grading", + "reference": "https://github.com/Haoming02/comfyui-diffusion-cg", + "files": [ + "https://github.com/Haoming02/comfyui-diffusion-cg" + ], + "install_type": "git-clone", + "description": "This is the ComfyUI port of the joint research between me and TimothyAlexisVass. For more information, check out the original [a/Extension](https://github.com/Haoming02/sd-webui-diffusion-cg) for Automatic1111." + }, + { + "author": "Scholar01", + "title": "SComfyUI-Keyframe", + "reference": "https://github.com/Scholar01/ComfyUI-Keyframe", + "files": [ + "https://github.com/Scholar01/ComfyUI-Keyframe" + ], + "install_type": "git-clone", + "description": "Nodes:Keyframe Part, Keyframe Interpolation Part, Keyframe Apply." + }, + { + "author": "WebDev9000", + "title": "WebDev9000-Nodes", + "reference": "https://github.com/WebDev9000/WebDev9000-Nodes", + "files": [ + "https://github.com/WebDev9000/WebDev9000-Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:Ignore Braces, Settings Switch." + }, + { + "author": "vanillacode314", + "title": "Simple Wildcard", + "reference": "https://github.com/vanillacode314/SimpleWildcardsComfyUI", + "files": ["https://github.com/vanillacode314/SimpleWildcardsComfyUI"], + "install_type": "git-clone", + "pip": ["pipe"], + "description": "A simple wildcard node for ComfyUI. Can also be used a style prompt node." + }, + { + "author": "DrJKL", + "title": "ComfyUI-Anchors", + "reference": "https://github.com/DrJKL/ComfyUI-Anchors", + "files": [ + "https://github.com/DrJKL/ComfyUI-Anchors" + ], + "install_type": "git-clone", + "description": "A ComfyUI extension to add spatial anchors/waypoints to better navigate large workflows." + }, + { + "author": "wmatson", + "title": "easy-comfy-nodes", + "reference": "https://github.com/wmatson/easy-comfy-nodes", + "files": [ + "https://github.com/wmatson/easy-comfy-nodes" + ], + "install_type": "git-clone", + "description": "Nodes: HTTP POST, Empty Dict, Assoc Str, Assoc Dict, Assoc Img, Load Img From URL (EZ), Load Img Batch From URLs (EZ), Video Combine + upload (EZ), ..." + }, + { + "author": "SoftMeng", + "title": "ComfyUI_Mexx_Styler", + "reference": "https://github.com/SoftMeng/ComfyUI_Mexx_Styler", + "files": [ + "https://github.com/SoftMeng/ComfyUI_Mexx_Styler" + ], + "install_type": "git-clone", + "description": "Nodes: ComfyUI Mexx Styler, ComfyUI Mexx Styler Advanced" + }, + { + "author": "zcfrank1st", + "title": "ComfyUI Yolov8", + "reference": "https://github.com/zcfrank1st/Comfyui-Yolov8", + "files": [ + "https://github.com/zcfrank1st/Comfyui-Yolov8" + ], + "install_type": "git-clone", + "description": "Nodes: Yolov8Detection, Yolov8Segmentation. Deadly simple yolov8 comfyui plugin" + }, + { + "author": "discopixel-studio", + "title": "ComfyUI Discopixel Nodes", + "reference": "https://github.com/discopixel-studio/comfyui-discopixel", + "files": [ + "https://github.com/discopixel-studio/comfyui-discopixel" + ], + "install_type": "git-clone", + "description": "Nodes:TransformTemplateOntoFaceMask, ..." + }, + { + "author": "zhuanqianfish", + "title": "EasyCaptureNode for ComfyUI", + "reference": "https://github.com/zhuanqianfish/ComfyUI-EasyNode", + "files": [ + "https://github.com/zhuanqianfish/ComfyUI-EasyNode" + ], + "install_type": "git-clone", + "description": "Capture window content from other programs, easyway combined with LCM for real-time painting" + }, + { + "author": "AbdullahAlfaraj", + "title": "Comfy-Photoshop-SD", + "reference": "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD", + "files": [ + "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD" + ], + "install_type": "git-clone", + "description": "Nodes: load Image with metadata, get config data, load image from base64 string, Load Loras From Prompt, Generate Latent Noise, Combine Two Latents Into Batch, General Purpose Controlnet Unit, ControlNet Script, Content Mask Latent, Auto-Photoshop-SD Seed, Expand and Blur the Mask" + }, + { + "author": "80sVectorz", + "title": "ComfyUI-Static-Primitives", + "reference": "https://github.com/80sVectorz/ComfyUI-Static-Primitives", + "files": [ + "https://github.com/80sVectorz/ComfyUI-Static-Primitives" + ], + "install_type": "git-clone", + "description": "Adds Static Primitives to ComfyUI. Mostly to work with reroute nodes" + }, + { + "author": "kenjiqq", + "title": "qq-nodes-comfyui", + "reference": "https://github.com/kenjiqq/qq-nodes-comfyui", + "files": [ + "https://github.com/kenjiqq/qq-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes:Any List, Image Accumulator Start, Image Accumulator End, Load Lines From Text File, XY Grid Helper, Slice List, Axis To String/Int/Float/Model, ..." + }, + { + "author": "fearnworks", + "title": "Fearnworks Custom Nodes", + "reference": "https://github.com/fearnworks/ComfyUI_FearnworksNodes", + "files": [ + "https://github.com/fearnworks/ComfyUI_FearnworksNodes/raw/main/fw_nodes.py" + ], + "install_type": "copy", + "description": "A collection of ComfyUI nodes. These nodes are tailored for specific tasks, such as counting files in directories and sorting text segments based on token counts. Currently this is only tested on SDXL 1.0 models. An additional swich is needed to hand 1.x" + }, + { + "author": "hayden-fr", + "title": "ComfyUI-Image-Browsing", + "reference": "https://github.com/hayden-fr/ComfyUI-Image-Browsing", + "files": [ + "https://github.com/hayden-fr/ComfyUI-Image-Browsing" + ], + "install_type": "git-clone", + "description": "Image Browsing: browsing, download and delete." + }, + { + "author": "kinfolk0117", + "title": "TiledIPAdapter", + "reference": "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter", + "files": [ + "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter" + ], + "install_type": "git-clone", + "description": "Proof of concent on how to use IPAdapter to control tiled upscaling. NOTE: You need to have 'ComfyUI_IPAdapter_plus' installed." + }, + { + "author": "komojini", + "title": "ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes", + "reference": "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes", + "files": [ + "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes" + ], + "install_type": "git-clone", + "description": "Nodes:XL DreamBooth LoRA, S3 Bucket LoRA" + }, + { + "author": "42lux", + "title": "ComfyUI-safety-checker", + "reference": "https://github.com/42lux/ComfyUI-safety-checker", + "files": [ + "https://github.com/42lux/ComfyUI-safety-checker" + ], + "install_type": "git-clone", + "description": "A NSFW/Safety Checker Node for ComfyUI." + }, + { + "author": "ZHO-ZHO-ZHO", + "title": "ComfyUI-Text_Image-Composite", + "reference": "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite", + "files": [ + "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite" + ], + "install_type": "git-clone", + "description": "Nodes:Text_Image_Zho, Text_Image_Multiline_Zho, RGB_Image_Zho, AlphaChanelAddByMask, ImageComposite_Zho, ..." + }, + { + "author": "sergekatzmann", + "title": "ComfyUI_Nimbus-Pack", + "reference": "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack", + "files": [ + "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack" + ], + "install_type": "git-clone", + "description": "Nodes:Image Square Adapter Node, Image Resize And Crop Node" + }, + { + "author": "Danand", + "title": "ComfyUI-ComfyCouple", + "reference": "https://github.com/Danand/ComfyUI-ComfyCouple", + "files": [ + "https://github.com/Danand/ComfyUI-ComfyCouple" + ], + "install_type": "git-clone", + "description": " Simple custom node which helps to generate images of actual couples." + }, + { + "author": "thecooltechguy", + "title": "ComfyUI Stable Video Diffusion", + "reference": "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion", + "files": [ + "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion" + ], + "install_type": "git-clone", + "description": "Easily use Stable Video Diffusion inside ComfyUI!" + }, + { + "author": "toyxyz", + "title": "ComfyUI_toyxyz_test_nodes", + "reference": "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes", + "files": [ + "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes" + ], + "install_type": "git-clone", + "description": "This node was created to send a webcam to ComfyUI in real time. This node is recommended for use with LCM." + }, + { + "author": "kijai", + "title": "ComfyUI-SVD", + "reference": "https://github.com/kijai/ComfyUI-SVD", + "files": [ + "https://github.com/kijai/ComfyUI-SVD" + ], + "install_type": "git-clone", + "description": "Preliminary use of SVD in ComfyUI.\nNOTE: Quick Implementation, Unstable. See details on repositories." + }, + { + "author": "bronkula", + "title": "comfyui-fitsize", + "reference": "https://github.com/bronkula/comfyui-fitsize", + "files": [ + "https://github.com/bronkula/comfyui-fitsize" + ], + "install_type": "git-clone", + "description": "Nodes:Fit Size From Int/Image/Resize, Load Image And Resize To Fit, Pick Image From Batch/List, Crop Image Into Even Pieces, Image Region To Mask... A simple set of nodes for making an image fit within a bounding box" + }, + { + "author": "drago87", + "title": "ComfyUI_Dragos_Nodes", + "reference": "https://github.com/drago87/ComfyUI_Dragos_Nodes", + "files": [ + "https://github.com/drago87/ComfyUI_Dragos_Nodes" + ], + "install_type": "git-clone", + "description": "Nodes:File Padding, Image Info, VAE Loader With Name" + }, + { + "author": "ansonkao", + "title": "comfyui-geometry", + "reference": "https://github.com/ansonkao/comfyui-geometry", + "files": [ + "https://github.com/ansonkao/comfyui-geometry" + ], + "install_type": "git-clone", + "description": "Nodes: Mask to Centroid, Mask to Eigenvector. A small collection of custom nodes for use with ComfyUI, for geometry calculations" + }, + { + "author": "GTSuya-Studio", + "title": "ComfyUI-GTSuya-Nodes", + "reference": "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes", + "files": [ + "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes" + ], + "install_type": "git-clone", + "description": "ComfyUI-GTSuya-Nodes is a ComyUI extension designed to add several wildcards supports into ComfyUI. Wildcards allow you to use __name__ syntax in your prompt to get a random line from a file named name.txt in a wildcards directory." + }, + { + "author": "jojkaart", + "title": "ComfyUI-sampler-lcm-alternative", + "reference": "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative", + "files": [ + "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative" + ], + "install_type": "git-clone", + "description": "Nodes:LCMScheduler, SamplerLCMAlternative, SamplerLCMCycle. ComfyUI Custom Sampler nodes that add a new improved LCM sampler functions" + }, + { + "author": "LonicaMewinsky", + "title": "ComfyUI-RawSaver", + "reference": "https://github.com/LonicaMewinsky/ComfyUI-RawSaver", + "files": [ + "https://github.com/LonicaMewinsky/ComfyUI-RawSaver" + ], + "install_type": "git-clone", + "description": "Nodes:SaveTifImage. ComfyUI custom node for purpose of saving image as uint16 tif file." + }, + { + "author": "natto-maki", + "title": "ComfyUI-NegiTools", + "reference": "https://github.com/natto-maki/ComfyUI-NegiTools", + "files": [ + "https://github.com/natto-maki/ComfyUI-NegiTools" + ], + "install_type": "git-clone", + "description": "Nodes:OpenAI DALLe3, OpenAI Translate to English, String Function, Seed Generator" + }, + { + "author": "wutipong", + "title": "ComfyUI-TextUtils", + "reference": "https://github.com/wutipong/ComfyUI-TextUtils", + "files": [ + "https://github.com/wutipong/ComfyUI-TextUtils" + ], + "install_type": "git-clone", + "description": "Nodes:Create N-Token String" + }, + { + "author": "Feidorian", + "title": "feidorian-ComfyNodes", + "reference": "https://github.com/Feidorian/feidorian-ComfyNodes", + "nodename_pattern": "^Feidorian_", + "files": [ + "https://github.com/Feidorian/feidorian-ComfyNodes" + ], + "install_type": "git-clone", + "description": "This extension provides various custom nodes. literals, loaders, logic, output, switches" + }, + { + "author": "kinfolk0117", + "title": "ComfyUI_GradientDeepShrink", + "reference": "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink", + "files": [ + "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink" + ], + "install_type": "git-clone", + "description": "Nodes:GradientPatchModelAddDownscale (Kohya Deep Shrink)." + }, + { + "author": "Niutonian", + "title": "ComfyUi-NoodleWebcam", + "reference": "https://github.com/Niutonian/ComfyUi-NoodleWebcam", + "files": [ + "https://github.com/Niutonian/ComfyUi-NoodleWebcam" + ], + "install_type": "git-clone", + "description": "Nodes:Noodle webcam is a node that records frames and send them to your favourite node." + }, + { + "author": "Umikaze-job", + "title": "select_folder_path_easy", + "reference": "https://github.com/Umikaze-job/select_folder_path_easy", + "files": [ + "https://github.com/Umikaze-job/select_folder_path_easy" + ], + "install_type": "git-clone", + "description": "This extension simply connects the nodes and specifies the output path of the generated images to a manageable path." + }, + { + "author": "amorano", + "title": "Jovimetrix Composition Nodes", + "reference": "https://github.com/Amorano/Jovimetrix", + "files": [ + "https://github.com/Amorano/Jovimetrix" + ], + "nodename_pattern": " \\(jov\\)$", + "install_type": "git-clone", + "description": "Compose like Substance Designer. Webcams, Media Streams (in/out), Tick animation, Color correction, Geometry manipulation, Pixel shader, Polygonal shape generator, Remap images gometry and color, Heavily inspired by WAS and MTB Node Suites" + }, + { + "author": "romeobuilderotti", + "title": "ComfyUI PNG Metadata", + "reference": "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata", + "files": [ + "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata" + ], + "install_type": "git-clone", + "description": "Add custom Metadata fields to your saved PNG files." + }, + { + "author": "ka-puna", + "title": "comfyui-yanc", + "reference": "https://github.com/ka-puna/comfyui-yanc", + "files": [ + "https://github.com/ka-puna/comfyui-yanc" + ], + "install_type": "git-clone", + "description": "NOTE: Concatenate Strings, Format Datetime String, Integer Caster, Multiline String, Truncate String. Yet Another Node Collection, a repository of simple nodes for ComfyUI. This repository eases the addition or removal of custom nodes to itself." + }, + { + "author": "TheBarret", + "title": "ZSuite", + "reference": "https://github.com/TheBarret/ZSuite", + "files": [ + "https://github.com/TheBarret/ZSuite" + ], + "install_type": "git-clone", + "description": "Nodes:Prompter, RF Noise, SeedMod." + }, + { + "author": "palant", + "title": "Extended Save Image for ComfyUI", + "reference": "https://github.com/palant/extended-saveimage-comfyui", + "files": [ + "https://github.com/palant/extended-saveimage-comfyui" + ], + "install_type": "git-clone", + "description": "This custom node is largely identical to the usual Save Image but allows saving images also in JPEG and WEBP formats, the latter with both lossless and lossy compression. Metadata is embedded in the images as usual, and the resulting images can be used to load a workflow." + }, + { + "author": "LonicaMewinsky", + "title": "ComfyBreakAnim", + "reference": "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame", + "files": [ + "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame" + ], + "install_type": "git-clone", + "description": "Nodes:BreakFrames, GetKeyFrames, MakeGrid." + }, + { + "author": "gemell1", + "title": "ComfyUI_GMIC", + "reference": "https://github.com/gemell1/ComfyUI_GMIC", + "files": [ + "https://github.com/gemell1/ComfyUI_GMIC" + ], + "install_type": "git-clone", + "description": "Nodes:GMIC Image Processing." + }, + { + "author": "peteromallet", + "title": "ComfyUI-Creative-Interpolation [Beta]", + "reference": "https://github.com/peteromallet/ComfyUI-Creative-Interpolation", + "files": [ + "https://github.com/peteromallet/ComfyUI-Creative-Interpolation" + ], + "install_type": "git-clone", + "description": "This a ComfyUI node for batch creative interpolation. The goal is to allow you to input a batch of images, and to provide a range of simple settings to control how the images are interpolated between." + }, + { + "author": "martijnat", + "title": "comfyui-previewlatent", + "reference": "https://github.com/martijnat/comfyui-previewlatent", + "files": [ + "https://github.com/martijnat/comfyui-previewlatent" + ], + "install_type": "git-clone", + "description": "a ComfyUI plugin for previewing latents without vae decoding. Useful for showing intermediate results and can be used a faster 'preview image' if you don't wan't to use vae decode." + }, + { + "author": "whmc76", + "title": "ComfyUI-Openpose-Editor-Plus", + "reference": "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus", + "files": [ + "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus" + ], + "install_type": "git-clone", + "description": "Nodes:Openpose Editor Plus" + }, + { + "author": "Off-Live", + "title": "ComfyUI-off-suite", + "reference": "https://github.com/Off-Live/ComfyUI-off-suite", + "files": [ + "https://github.com/Off-Live/ComfyUI-off-suite" + ], + "install_type": "git-clone", + "description": "Nodes:Image Crop Fit, OFF SEGS to Image, Crop Center wigh SEGS, Watermarking, GW Number Formatting Node." + }, + { + "author": "laksjdjf", + "title": "LCMSampler-ComfyUI", + "reference": "https://github.com/laksjdjf/LCMSampler-ComfyUI", + "files": [ + "https://github.com/laksjdjf/LCMSampler-ComfyUI" + ], + "install_type": "git-clone", + "description": "This extension node is intended for the use of LCM conversion for SSD-1B-anime. It does not guarantee operation with the original LCM (as it cannot load weights in the current version). To take advantage of fast generation with LCM, a node for using TAESD as a decoder is also provided. This is inspired by ComfyUI-OtherVAEs." + }, + { + "author": "palant", + "title": "Integrated Nodes for ComfyUI", + "reference": "https://github.com/palant/integrated-nodes-comfyui", + "files": [ + "https://github.com/palant/integrated-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "This tool will turn entire workflows or parts of them into single integrated nodes. In a way, it is similar to the Node Templates functionality but hides the inner structure. This is useful if all you want is to reuse and quickly configure a bunch of nodes without caring how they are interconnected." + }, + { + "author": "palant", + "title": "Image Resize for ComfyUI", + "reference": "https://github.com/palant/image-resize-comfyui", + "files": [ + "https://github.com/palant/image-resize-comfyui" + ], + "install_type": "git-clone", + "description": "This custom node provides various tools for resizing images. The goal is resizing without distorting proportions, yet without having to perform any calculations with the size of the original image. If a mask is present, it is resized and modified along with the image." + }, + { + "author": "M1kep", + "title": "ComfyUI-KepOpenAI", + "reference": "https://github.com/M1kep/ComfyUI-KepOpenAI", + "files": [ + "https://github.com/M1kep/ComfyUI-KepOpenAI" + ], + "install_type": "git-clone", + "description": "ComfyUI-KepOpenAI is a user-friendly node that serves as an interface to the GPT-4 with Vision (GPT-4V) API. This integration facilitates the processing of images coupled with text prompts, leveraging the capabilities of the OpenAI API to generate text completions that are contextually relevant to the provided inputs." + }, + { + "author": "bmad4ever", + "title": "comfyui_ab_sampler", + "reference": "https://github.com/bmad4ever/comfyui_ab_samplercustom", + "files": [ + "https://github.com/bmad4ever/comfyui_ab_samplercustom" + ], + "install_type": "git-clone", + "description": "Experimental sampler node. Sampling alternates between A and B inputs until only one remains, starting with A. B steps run over a 2x2 grid, where 3/4's of the grid are copies of the original input latent. When the optional mask is used, the region outside the defined roi is copied from the original latent at the end of every step." + }, + { + "author": "AbyssYuan0", + "title": "ComfyUI_BadgerTools", + "reference": "https://github.com/AbyssYuan0/ComfyUI_BadgerTools", + "files": [ + "https://github.com/AbyssYuan0/ComfyUI_BadgerTools" + ], + "install_type": "git-clone", + "description": "Nodes:ImageOverlap-badger, FloatToInt-badger, IntToString-badger, FloatToString-badger, ImageNormalization-badger, ImageScaleToSide-badger, NovelToFizz-badger." + }, + { + "author": "fexli", + "title": "fexli-util-node-comfyui", + "reference": "https://github.com/fexli/fexli-util-node-comfyui", + "files": [ + "https://github.com/fexli/fexli-util-node-comfyui" + ], + "install_type": "git-clone", + "description": "Nodes:FEImagePadForOutpaint, FEColorOut, FEColor2Image, FERandomizedColor2Image" + }, + { + "author": "nagolinc", + "title": "ComfyUI_FastVAEDecorder_SDXL", + "reference": "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL", + "files": [ + "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL" + ], + "install_type": "git-clone", + "description": "Based off of: [a/Birch-san/diffusers-play/approx_vae](https://github.com/Birch-san/diffusers-play/tree/main/approx_vae). This ComfyUI node allows you to quickly preview SDXL 1.0 latents." + }, + { + "author": "jags111", + "title": "ComfyUI_Jags_VectorMagic", + "reference": "https://github.com/jags111/ComfyUI_Jags_VectorMagic", + "files": [ + "https://github.com/jags111/ComfyUI_Jags_VectorMagic" + ], + "install_type": "git-clone", + "description": "a collection of nodes to explore Vector and image manipulation" + }, + { + "author": "Trung0246", + "title": "ComfyUI-0246", + "reference": "https://github.com/Trung0246/ComfyUI-0246", + "files": [ + "https://github.com/Trung0246/ComfyUI-0246" + ], + "install_type": "git-clone", + "description": "Nodes: Highway, Junction. Random nodes for ComfyUI I made to solve my struggle with ComfyUI. Have varying quality." + }, + { + "author": "PCMonsterx", + "title": "ComfyUI-CSV-Loader", + "reference": "https://github.com/PCMonsterx/ComfyUI-CSV-Loader", + "files": [ + "https://github.com/PCMonsterx/ComfyUI-CSV-Loader" + ], + "install_type": "git-clone", + "description": "CSV Loader for prompt building within ComfyUI interface. Allows access to positive/negative prompts associated with a name. Selections are being pulled from CSV files." + }, + { + "author": "IAmMatan.com", + "title": "ComfyUI Serving toolkit", + "reference": "https://github.com/matan1905/ComfyUI-Serving-Toolkit", + "files": [ + "https://github.com/matan1905/ComfyUI-Serving-Toolkit" + ], + "install_type": "git-clone", + "description": "This extension adds nodes that allow you to easily serve your workflow (for example using a discord bot) " + }, + { + "author": "ParmanBabra", + "title": "ComfyUI-Malefish-Custom-Scripts", + "reference": "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts", + "files": [ + "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts" + ], + "install_type": "git-clone", + "description": "Nodes:Multi Lora Loader, Random (Prompt), Combine (Prompt), CSV Prompts Loader" + }, + { + "author": "mikkel", + "title": "ComfyUI - Mask Bounding Box", + "reference": "https://github.com/mikkel/comfyui-mask-boundingbox", + "files": [ + "https://github.com/mikkel/comfyui-mask-boundingbox" + ], + "install_type": "git-clone", + "description": "The ComfyUI Mask Bounding Box Plugin provides functionalities for selecting a specific size mask from an image. Can be combined with ClipSEG to replace any aspect of an SDXL image with an SD1.5 output." + }, + { + "author": "THtianhao", + "title": "ComfyUI-FaceChain", + "reference": "https://github.com/THtianhao/ComfyUI-FaceChain", + "files": [ + "https://github.com/THtianhao/ComfyUI-FaceChain" + ], + "install_type": "git-clone", + "description": "Nodes:FC_LoraMerge." + }, + { + "author": "noEmbryo", + "title": "noEmbryo nodes", + "reference": "https://github.com/noembryo/ComfyUI-noEmbryo", + "files": [ + "https://github.com/noembryo/ComfyUI-noEmbryo" + ], + "install_type": "git-clone", + "description": "PromptTermList (1-6): are some nodes that help with the creation of Prompts inside ComfyUI. Resolution Scale outputs image dimensions using a scale factor. Regex Text Chopper outputs the chopped parts of a text using RegEx." + }, + { + "author": "aianimation55", + "title": "Comfy UI FatLabels", + "reference": "https://github.com/aianimation55/ComfyUI-FatLabels", + "files": [ + "https://github.com/aianimation55/ComfyUI-FatLabels" + ], + "install_type": "git-clone", + "description": "It's a super simple custom node for Comfy UI, to generate text, with a font size option. Useful for bigger labelling of nodes, helpful for wider screen captures or tutorials. Plus you can of course use the text within your generations." + }, + { + "author": "idrirap", + "title": "ComfyUI-Lora-Auto-Trigger-Words", + "reference": "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words", + "files": [ + "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words" + ], + "install_type": "git-clone", + "description": "This project is a fork of [a/https://github.com/Extraltodeus/LoadLoraWithTags](https://github.com/Extraltodeus/LoadLoraWithTags) The aim of these custom nodes is to get an easy access to the tags used to trigger a lora." + }, + { + "author": "jags111", + "title": "Efficiency Nodes for ComfyUI Version 2.0+", + "reference": "https://github.com/jags111/efficiency-nodes-comfyui", + "files": [ + "https://github.com/jags111/efficiency-nodes-comfyui" + ], + "install_type": "git-clone", + "description": "A collection of ComfyUI custom nodes to help streamline workflows and reduce total node count.[w/NOTE: This node is originally created by LucianoCirino, but the [a/original repository](https://github.com/LucianoCirino/efficiency-nodes-comfyui) is no longer maintained and has been forked by a new maintainer. To use the forked version, you should uninstall the original version and **REINSTALL** this one.]" + }, + { + "author": "M1kep", + "title": "ComfyUI-OtherVAEs", + "reference": "https://github.com/M1kep/ComfyUI-OtherVAEs", + "files": [ + "https://github.com/M1kep/ComfyUI-OtherVAEs" + ], + "install_type": "git-clone", + "description": "Nodes: TAESD VAE Decode" + }, + { + "author": "Fictiverse", + "title": "ComfyUI Fictiverse Nodes", + "reference": "https://github.com/Fictiverse/ComfyUI_Fictiverse", + "files": [ + "https://github.com/Fictiverse/ComfyUI_Fictiverse" + ], + "install_type": "git-clone", + "description": "Nodes:Color correction." + }, + { + "author": "kinfolk0117", + "title": "SimpleTiles", + "reference": "https://github.com/kinfolk0117/ComfyUI_SimpleTiles", + "files": [ + "https://github.com/kinfolk0117/ComfyUI_SimpleTiles" + ], + "install_type": "git-clone", + "description": "Nodes:TileSplit, TileMerge." + }, + { + "author": "CaptainGrock", + "title": "ComfyUIInvisibleWatermark", + "reference": "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark", + "files": [ + "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark/raw/main/Invisible%20Watermark.py" + ], + "install_type": "copy", + "description": "Nodes:Apply Invisible Watermark, Extract Watermark. Adds up to 12 characters encoded into an image that can be extracted." + } + ] +} diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/new/extension-node-map.json new file mode 100644 index 0000000000000000000000000000000000000000..8ae906e8dedadbb163850187623efb229562539d --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/new/extension-node-map.json @@ -0,0 +1,5495 @@ +{ + "https://gist.github.com/alkemann/7361b8eb966f29c8238fd323409efb68/raw/f9605be0b38d38d3e3a2988f89248ff557010076/alkemann.py": [ + [ + "Int to Text", + "Save A1 Image", + "Seed With Text" + ], + { + "title_aux": "alkemann nodes" + } + ], + "https://github.com/0xbitches/ComfyUI-LCM": [ + [ + "LCM_Sampler", + "LCM_Sampler_Advanced", + "LCM_img2img_Sampler", + "LCM_img2img_Sampler_Advanced" + ], + { + "title_aux": "Latent Consistency Model for ComfyUI" + } + ], + "https://github.com/42lux/ComfyUI-safety-checker": [ + [ + "Safety Checker" + ], + { + "title_aux": "ComfyUI-safety-checker" + } + ], + "https://github.com/80sVectorz/ComfyUI-Static-Primitives": [ + [ + "FloatStaticPrimitive", + "IntStaticPrimitive", + "StringMlStaticPrimitive", + "StringStaticPrimitive" + ], + { + "title_aux": "ComfyUI-Static-Primitives" + } + ], + "https://github.com/AIrjen/OneButtonPrompt": [ + [ + "CreatePromptVariant", + "OneButtonPrompt", + "SavePromptToFile" + ], + { + "title_aux": "One Button Prompt" + } + ], + "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD": [ + [ + "APS_LatentBatch", + "APS_Seed", + "ContentMaskLatent", + "ControlNetScript", + "ControlnetUnit", + "GaussianLatentImage", + "GetConfig", + "LoadImageBase64", + "LoadImageWithMetaData", + "LoadLorasFromPrompt", + "MaskExpansion" + ], + { + "title_aux": "Comfy-Photoshop-SD" + } + ], + "https://github.com/AbyssYuan0/ComfyUI_BadgerTools": [ + [ + "FloatToInt-badger", + "FloatToString-badger", + "ImageNormalization-badger", + "ImageOverlap-badger", + "ImageScaleToSide-badger", + "IntToString-badger", + "StringToFizz-badger", + "TextListToString-badger" + ], + { + "title_aux": "ComfyUI_BadgerTools" + } + ], + "https://github.com/Acly/comfyui-tooling-nodes": [ + [ + "ETN_ApplyMaskToImage", + "ETN_CropImage", + "ETN_LoadImageBase64", + "ETN_LoadMaskBase64", + "ETN_SendImageWebSocket" + ], + { + "title_aux": "ComfyUI Nodes for External Tooling" + } + ], + "https://github.com/Amorano/Jovimetrix": [ + [], + { + "author": "amorano", + "description": "Procedural & Compositing. Includes a Webcam node.", + "nodename_pattern": " \\(jov\\)$", + "title": "Jovimetrix Composition Pack", + "title_aux": "Jovimetrix Composition Nodes" + } + ], + "https://github.com/ArtBot2023/CharacterFaceSwap": [ + [ + "Color Blend", + "Crop Face", + "Exclude Facial Feature", + "Generation Parameter Input", + "Generation Parameter Output", + "Image Full BBox", + "Load BiseNet", + "Load RetinaFace", + "Mask Contour", + "Segment Face", + "Uncrop Face" + ], + { + "title_aux": "Character Face Swap" + } + ], + "https://github.com/ArtVentureX/comfyui-animatediff": [ + [ + "AnimateDiffCombine", + "AnimateDiffLoraLoader", + "AnimateDiffModuleLoader", + "AnimateDiffSampler", + "AnimateDiffSlidingWindowOptions", + "ImageSizeAndBatchSize", + "LoadVideo" + ], + { + "title_aux": "AnimateDiff" + } + ], + "https://github.com/AustinMroz/ComfyUI-SpliceTools": [ + [ + "LogSigmas", + "SpliceDenoised", + "SpliceLatents", + "TemporalSplice" + ], + { + "title_aux": "SpliceTools" + } + ], + "https://github.com/BadCafeCode/masquerade-nodes-comfyui": [ + [ + "Blur", + "Change Channel Count", + "Combine Masks", + "Constant Mask", + "Convert Color Space", + "Create QR Code", + "Create Rect Mask", + "Cut By Mask", + "Get Image Size", + "Image To Mask", + "Make Image Batch", + "Mask By Text", + "Mask Morphology", + "Mask To Region", + "MasqueradeIncrementer", + "Mix Color By Mask", + "Mix Images By Mask", + "Paste By Mask", + "Prune By Mask", + "Separate Mask Components", + "Unary Image Op", + "Unary Mask Op" + ], + { + "title_aux": "Masquerade Nodes" + } + ], + "https://github.com/Beinsezii/bsz-cui-extras": [ + [ + "BSZAbsoluteHires", + "BSZAspectHires", + "BSZColoredLatentImageXL", + "BSZCombinedHires", + "BSZHueChromaXL", + "BSZInjectionKSampler", + "BSZLatentDebug", + "BSZLatentFill", + "BSZLatentGradient", + "BSZLatentHSVAImage", + "BSZLatentOffsetXL", + "BSZLatentRGBAImage", + "BSZLatentbuster", + "BSZPixelbuster", + "BSZPixelbusterHelp", + "BSZPrincipledConditioning", + "BSZPrincipledSampler", + "BSZPrincipledScale", + "BSZStrangeResample" + ], + { + "title_aux": "bsz-cui-extras" + } + ], + "https://github.com/Bikecicle/ComfyUI-Waveform-Extensions/raw/main/EXT_AudioManipulation.py": [ + [ + "BatchJoinAudio", + "CutAudio", + "DuplicateAudio", + "JoinAudio", + "ResampleAudio", + "ReverseAudio", + "StretchAudio" + ], + { + "title_aux": "Waveform Extensions" + } + ], + "https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb": [ + [ + "BNK_AddCLIPSDXLParams", + "BNK_AddCLIPSDXLRParams", + "BNK_CLIPTextEncodeAdvanced", + "BNK_CLIPTextEncodeSDXLAdvanced" + ], + { + "title_aux": "Advanced CLIP Text Encode" + } + ], + "https://github.com/BlenderNeko/ComfyUI_Cutoff": [ + [ + "BNK_CutoffBasePrompt", + "BNK_CutoffRegionsToConditioning", + "BNK_CutoffRegionsToConditioning_ADV", + "BNK_CutoffSetRegions" + ], + { + "title_aux": "ComfyUI Cutoff" + } + ], + "https://github.com/BlenderNeko/ComfyUI_Noise": [ + [ + "BNK_DuplicateBatchIndex", + "BNK_GetSigma", + "BNK_InjectNoise", + "BNK_NoisyLatentImage", + "BNK_SlerpLatent", + "BNK_Unsampler" + ], + { + "title_aux": "ComfyUI Noise" + } + ], + "https://github.com/BlenderNeko/ComfyUI_SeeCoder": [ + [ + "ConcatConditioning", + "SEECoderImageEncode" + ], + { + "title_aux": "SeeCoder [WIP]" + } + ], + "https://github.com/BlenderNeko/ComfyUI_TiledKSampler": [ + [ + "BNK_TiledKSampler", + "BNK_TiledKSamplerAdvanced" + ], + { + "title_aux": "Tiled sampling for ComfyUI" + } + ], + "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark/raw/main/Invisible%20Watermark.py": [ + [ + "Apply Invisible Watermark", + "Extract Watermark" + ], + { + "title_aux": "ComfyUIInvisibleWatermark" + } + ], + "https://github.com/Chaoses-Ib/ComfyUI_Ib_CustomNodes": [ + [ + "LoadImageFromPath" + ], + { + "title_aux": "ComfyUI_Ib_CustomNodes" + } + ], + "https://github.com/Clybius/ComfyUI-Latent-Modifiers": [ + [ + "Latent Diffusion Mega Modifier" + ], + { + "title_aux": "ComfyUI-Latent-Modifiers" + } + ], + "https://github.com/Danand/ComfyUI-ComfyCouple": [ + [ + "Attention couple", + "Comfy Couple" + ], + { + "author": "Rei D.", + "description": "If you want to draw two different characters together without blending their features, so you could try to check out this custom node.", + "nickname": "Danand", + "title": "Comfy Couple", + "title_aux": "ComfyUI-ComfyCouple" + } + ], + "https://github.com/Davemane42/ComfyUI_Dave_CustomNode": [ + [ + "ABGRemover", + "ConditioningStretch", + "ConditioningUpscale", + "MultiAreaConditioning", + "MultiLatentComposite" + ], + { + "title_aux": "Visual Area Conditioning / Latent composition" + } + ], + "https://github.com/Derfuu/Derfuu_ComfyUI_ModdedNodes": [ + [ + "ABSNode_DF", + "Absolute value", + "Ceil", + "CeilNode_DF", + "Conditioning area scale by ratio", + "ConditioningSetArea with tuples", + "ConditioningSetAreaEXT_DF", + "ConditioningSetArea_DF", + "CosNode_DF", + "Cosines", + "Divide", + "DivideNode_DF", + "EmptyLatentImage_DF", + "Float", + "Float debug print", + "Float2Tuple_DF", + "FloatDebugPrint_DF", + "FloatNode_DF", + "Floor", + "FloorNode_DF", + "Get image size", + "Get latent size", + "GetImageSize_DF", + "GetLatentSize_DF", + "Image scale by ratio", + "Image scale to side", + "ImageScale_Ratio_DF", + "ImageScale_Side_DF", + "Int debug print", + "Int to float", + "Int to tuple", + "Int2Float_DF", + "IntDebugPrint_DF", + "Integer", + "IntegerNode_DF", + "Latent Scale by ratio", + "Latent Scale to side", + "LatentComposite with tuples", + "LatentScale_Ratio_DF", + "LatentScale_Side_DF", + "MultilineStringNode_DF", + "Multiply", + "MultiplyNode_DF", + "PowNode_DF", + "Power", + "Random", + "RandomFloat_DF", + "SinNode_DF", + "Sinus", + "SqrtNode_DF", + "Square root", + "String debug print", + "StringNode_DF", + "Subtract", + "SubtractNode_DF", + "Sum", + "SumNode_DF", + "TanNode_DF", + "Tangent", + "Text", + "Text box", + "Tuple", + "Tuple debug print", + "Tuple multiply", + "Tuple swap", + "Tuple to floats", + "Tuple to ints", + "Tuple2Float_DF", + "TupleDebugPrint_DF", + "TupleNode_DF" + ], + { + "title_aux": "Derfuu_ComfyUI_ModdedNodes" + } + ], + "https://github.com/Electrofried/ComfyUI-OpenAINode": [ + [ + "OpenAINode" + ], + { + "title_aux": "OpenAINode" + } + ], + "https://github.com/EllangoK/ComfyUI-post-processing-nodes": [ + [ + "ArithmeticBlend", + "AsciiArt", + "Blend", + "Blur", + "CannyEdgeMask", + "ChromaticAberration", + "ColorCorrect", + "ColorTint", + "Dissolve", + "Dither", + "DodgeAndBurn", + "FilmGrain", + "Glow", + "HSVThresholdMask", + "KMeansQuantize", + "KuwaharaBlur", + "Parabolize", + "PencilSketch", + "PixelSort", + "Pixelize", + "Quantize", + "Sharpen", + "SineWave", + "Solarize", + "Vignette" + ], + { + "title_aux": "ComfyUI-post-processing-nodes" + } + ], + "https://github.com/Extraltodeus/LoadLoraWithTags": [ + [ + "LoraLoaderTagsQuery" + ], + { + "title_aux": "LoadLoraWithTags" + } + ], + "https://github.com/Extraltodeus/noise_latent_perlinpinpin": [ + [ + "NoisyLatentPerlin" + ], + { + "title_aux": "noise latent perlinpinpin" + } + ], + "https://github.com/Fannovel16/ComfyUI-Frame-Interpolation": [ + [ + "AMT VFI", + "CAIN VFI", + "EISAI VFI", + "FILM VFI", + "FLAVR VFI", + "GMFSS Fortuna VFI", + "IFRNet VFI", + "IFUnet VFI", + "KSampler Gradually Adding More Denoise (efficient)", + "M2M VFI", + "Make Interpolation State List", + "RIFE VFI", + "STMFNet VFI", + "Sepconv VFI" + ], + { + "title_aux": "ComfyUI Frame Interpolation" + } + ], + "https://github.com/Fannovel16/ComfyUI-Loopchain": [ + [ + "EmptyLatentImageLoop", + "FolderToImageStorage", + "ImageStorageExportLoop", + "ImageStorageImport", + "ImageStorageReset", + "LatentStorageExportLoop", + "LatentStorageImport", + "LatentStorageReset" + ], + { + "title_aux": "ComfyUI Loopchain" + } + ], + "https://github.com/Fannovel16/ComfyUI-MotionDiff": [ + [ + "EmptyMotionData", + "ExportSMPLTo3DSoftware", + "MotionCLIPTextEncode", + "MotionDataVisualizer", + "MotionDiffLoader", + "MotionDiffSimpleSampler", + "RenderSMPLMesh", + "SMPLLoader", + "SaveSMPL", + "SmplifyMotionData" + ], + { + "title_aux": "ComfyUI MotionDiff" + } + ], + "https://github.com/Fannovel16/ComfyUI-Video-Matting": [ + [ + "Robust Video Matting" + ], + { + "title_aux": "ComfyUI-Video-Matting" + } + ], + "https://github.com/Fannovel16/comfyui_controlnet_aux": [ + [ + "AIO_Preprocessor", + "AnimalPosePreprocessor", + "AnimeFace_SemSegPreprocessor", + "AnimeLineArtPreprocessor", + "BAE-NormalMapPreprocessor", + "BinaryPreprocessor", + "CannyEdgePreprocessor", + "ColorPreprocessor", + "DWPreprocessor", + "DensePosePreprocessor", + "FakeScribblePreprocessor", + "HEDPreprocessor", + "HintImageEnchance", + "ImageGenResolutionFromImage", + "ImageGenResolutionFromLatent", + "InpaintPreprocessor", + "LeReS-DepthMapPreprocessor", + "LineArtPreprocessor", + "M-LSDPreprocessor", + "Manga2Anime_LineArt_Preprocessor", + "MediaPipe-FaceMeshPreprocessor", + "MiDaS-DepthMapPreprocessor", + "MiDaS-NormalMapPreprocessor", + "OneFormer-ADE20K-SemSegPreprocessor", + "OneFormer-COCO-SemSegPreprocessor", + "OpenposePreprocessor", + "PiDiNetPreprocessor", + "PixelPerfectResolution", + "SAMPreprocessor", + "ScribblePreprocessor", + "Scribble_XDoG_Preprocessor", + "SemSegPreprocessor", + "ShufflePreprocessor", + "TilePreprocessor", + "UniFormer-SemSegPreprocessor", + "Zoe-DepthMapPreprocessor" + ], + { + "author": "tstandley", + "title_aux": "ComfyUI's ControlNet Auxiliary Preprocessors" + } + ], + "https://github.com/Feidorian/feidorian-ComfyNodes": [ + [], + { + "nodename_pattern": "^Feidorian_", + "title_aux": "feidorian-ComfyNodes" + } + ], + "https://github.com/Fictiverse/ComfyUI_Fictiverse": [ + [ + "Add Noise to Image with Mask", + "Color correction", + "Displace Image with Depth", + "Displace Images with Mask", + "Zoom Image with Depth" + ], + { + "title_aux": "ComfyUI Fictiverse Nodes" + } + ], + "https://github.com/FizzleDorf/ComfyUI-AIT": [ + [ + "AIT_Unet_Loader", + "AIT_VAE_Encode_Loader" + ], + { + "title_aux": "ComfyUI-AIT" + } + ], + "https://github.com/FizzleDorf/ComfyUI_FizzNodes": [ + [ + "AbsCosWave", + "AbsSinWave", + "BatchGLIGENSchedule", + "BatchPromptSchedule", + "BatchPromptScheduleEncodeSDXL", + "BatchPromptScheduleLatentInput", + "BatchPromptScheduleNodeFlowEnd", + "BatchPromptScheduleSDXLLatentInput", + "BatchStringSchedule", + "BatchValueSchedule", + "BatchValueScheduleLatentInput", + "CalculateFrameOffset", + "ConcatStringSingle", + "CosWave", + "FizzFrame", + "FizzFrameConcatenate", + "Init FizzFrame", + "InvCosWave", + "InvSinWave", + "Lerp", + "PromptSchedule", + "PromptScheduleEncodeSDXL", + "PromptScheduleNodeFlow", + "PromptScheduleNodeFlowEnd", + "SawtoothWave", + "SinWave", + "SquareWave", + "StringConcatenate", + "StringSchedule", + "TriangleWave", + "ValueSchedule", + "convertKeyframeKeysToBatchKeys" + ], + { + "title_aux": "FizzNodes" + } + ], + "https://github.com/GMapeSplat/ComfyUI_ezXY": [ + [ + "ConcatenateString", + "ItemFromDropdown", + "IterationDriver", + "JoinImages", + "LineToConsole", + "NumberFromList", + "NumbersToList", + "PlotImages", + "StringFromList", + "StringToLabel", + "StringsToList", + "ezMath", + "ezXY_AssemblePlot", + "ezXY_Driver" + ], + { + "title_aux": "ezXY scripts and nodes" + } + ], + "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes": [ + [ + "Danbooru (ID)", + "Danbooru (Random)", + "Replace Strings", + "Simple Wildcards", + "Simple Wildcards (Dir.)", + "Wildcards Nodes" + ], + { + "title_aux": "ComfyUI-GTSuya-Nodes" + } + ], + "https://github.com/Gourieff/comfyui-reactor-node": [ + [ + "ReActorFaceSwap", + "ReActorLoadFaceModel", + "ReActorSaveFaceModel" + ], + { + "title_aux": "ReActor Node for ComfyUI" + } + ], + "https://github.com/Haoming02/comfyui-diffusion-cg": [ + [ + "Hook Recenter", + "Hook Recenter XL", + "Normalization", + "NormalizationXL", + "Tensor Debug", + "Unhook Recenter" + ], + { + "title_aux": "ComfyUI Diffusion Color Grading" + } + ], + "https://github.com/JPS-GER/ComfyUI_JPS-Nodes": [ + [ + "Conditioning Switch (JPS)", + "ControlNet Switch (JPS)", + "Crop Image Square (JPS)", + "Crop Image TargetSize (JPS)", + "Disable Enable Switch (JPS)", + "Enable Disable Switch (JPS)", + "Generation Settings (JPS)", + "Generation Settings Pipe (JPS)", + "Generation TXT IMG Settings (JPS)", + "Get Date Time String (JPS)", + "Get Image Size (JPS)", + "IP Adapter Settings (JPS)", + "IP Adapter Settings Pipe (JPS)", + "Image Switch (JPS)", + "Images Masks MultiPipe (JPS)", + "Integer Switch (JPS)", + "Largest Int (JPS)", + "Latent Switch (JPS)", + "Lora Loader (JPS)", + "Mask Switch (JPS)", + "Model Switch (JPS)", + "Multiply Float Float (JPS)", + "Multiply Int Float (JPS)", + "Multiply Int Int (JPS)", + "Resolution Multiply (JPS)", + "Revision Settings (JPS)", + "Revision Settings Pipe (JPS)", + "SDXL Basic Settings (JPS)", + "SDXL Basic Settings Pipe (JPS)", + "SDXL Fundamentals MultiPipe (JPS)", + "SDXL Prompt Handling (JPS)", + "SDXL Prompt Handling Plus (JPS)", + "SDXL Prompt Styler (JPS)", + "SDXL Recommended Resolution Calc (JPS)", + "SDXL Resolutions (JPS)", + "Sampler Scheduler Settings (JPS)", + "Substract Int Int (JPS)", + "Text Concatenate (JPS)", + "VAE Switch (JPS)" + ], + { + "author": "JPS", + "description": "Various nodes to handle SDXL Resolutions, SDXL Basic Settings, IP Adapter Settings, Revision Settings, SDXL Prompt Styler, Crop Image to Square, Crop Image to Target Size, Get Date-Time String, Resolution Multiply, Largest Integer, 5-to-1 Switches for Integer, Images, Latents, Conditioning, Model, VAE, ControlNet", + "nickname": "JPS Custom Nodes", + "title": "JPS Custom Nodes for ComfyUI", + "title_aux": "JPS Custom Nodes for ComfyUI" + } + ], + "https://github.com/Jcd1230/rembg-comfyui-node": [ + [ + "Image Remove Background (rembg)" + ], + { + "title_aux": "Rembg Background Removal Node for ComfyUI" + } + ], + "https://github.com/Jordach/comfy-plasma": [ + [ + "JDC_AutoContrast", + "JDC_BlendImages", + "JDC_BrownNoise", + "JDC_Contrast", + "JDC_EqualizeGrey", + "JDC_GaussianBlur", + "JDC_GreyNoise", + "JDC_Greyscale", + "JDC_ImageLoader", + "JDC_ImageLoaderMeta", + "JDC_PinkNoise", + "JDC_Plasma", + "JDC_PlasmaSampler", + "JDC_PowerImage", + "JDC_RandNoise", + "JDC_ResizeFactor" + ], + { + "title_aux": "comfy-plasma" + } + ], + "https://github.com/Kaharos94/ComfyUI-Saveaswebp": [ + [ + "Save_as_webp" + ], + { + "title_aux": "ComfyUI-Saveaswebp" + } + ], + "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet": [ + [ + "ACN_AdvancedControlNetApply", + "ACN_DefaultUniversalWeights", + "ControlNetLoaderAdvanced", + "CustomControlNetWeights", + "CustomT2IAdapterWeights", + "DiffControlNetLoaderAdvanced", + "LatentKeyframe", + "LatentKeyframeBatchedGroup", + "LatentKeyframeGroup", + "LatentKeyframeTiming", + "LoadImagesFromDirectory", + "ScaledSoftControlNetWeights", + "ScaledSoftMaskedUniversalWeights", + "SoftControlNetWeights", + "SoftT2IAdapterWeights", + "TimestepKeyframe" + ], + { + "title_aux": "ComfyUI-Advanced-ControlNet" + } + ], + "https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved": [ + [ + "ADE_AnimateDiffCombine", + "ADE_AnimateDiffLoRALoader", + "ADE_AnimateDiffLoaderV1Advanced", + "ADE_AnimateDiffLoaderWithContext", + "ADE_AnimateDiffModelSettings", + "ADE_AnimateDiffModelSettingsAdvancedAttnStrengths", + "ADE_AnimateDiffModelSettingsSimple", + "ADE_AnimateDiffModelSettings_Release", + "ADE_AnimateDiffUniformContextOptions", + "ADE_AnimateDiffUniformContextOptionsExperimental", + "ADE_AnimateDiffUnload", + "ADE_EmptyLatentImageLarge", + "AnimateDiffLoaderV1", + "CheckpointLoaderSimpleWithNoiseSelect" + ], + { + "title_aux": "AnimateDiff Evolved" + } + ], + "https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite": [ + [ + "VHS_DuplicateImages", + "VHS_DuplicateLatents", + "VHS_GetImageCount", + "VHS_GetLatentCount", + "VHS_LoadImages", + "VHS_LoadImagesPath", + "VHS_LoadVideo", + "VHS_LoadVideoPath", + "VHS_MergeImages", + "VHS_MergeLatents", + "VHS_SelectEveryNthImage", + "VHS_SelectEveryNthLatent", + "VHS_SplitImages", + "VHS_SplitLatents", + "VHS_VideoCombine" + ], + { + "title_aux": "ComfyUI-VideoHelperSuite" + } + ], + "https://github.com/LEv145/images-grid-comfy-plugin": [ + [ + "GridAnnotation", + "ImageCombine", + "ImagesGridByColumns", + "ImagesGridByRows", + "LatentCombine" + ], + { + "title_aux": "ImagesGrid" + } + ], + "https://github.com/Lerc/canvas_tab": [ + [ + "Canvas_Tab", + "Send_To_Editor" + ], + { + "author": "Lerc", + "description": "This extension provides a full page image editor with mask support. There are two nodes, one to receive images from the editor and one to send images to the editor.", + "nickname": "Canvas Tab", + "title": "Canvas Tab", + "title_aux": "Canvas Tab" + } + ], + "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame": [ + [ + "BreakFrames", + "BreakGrid", + "GetKeyFrames", + "MakeGrid", + "RandomImageFromDir" + ], + { + "title_aux": "ComfyBreakAnim" + } + ], + "https://github.com/LonicaMewinsky/ComfyUI-RawSaver": [ + [ + "SaveTifImage" + ], + { + "title_aux": "ComfyUI-RawSaver" + } + ], + "https://github.com/M1kep/ComfyLiterals": [ + [ + "Checkpoint", + "Float", + "Int", + "KepStringLiteral", + "Lora", + "Operation", + "String" + ], + { + "title_aux": "ComfyLiterals" + } + ], + "https://github.com/M1kep/ComfyUI-KepOpenAI": [ + [ + "KepOpenAI_ImageWithPrompt" + ], + { + "title_aux": "ComfyUI-KepOpenAI" + } + ], + "https://github.com/M1kep/ComfyUI-OtherVAEs": [ + [ + "OtherVAE_Taesd" + ], + { + "title_aux": "ComfyUI-OtherVAEs" + } + ], + "https://github.com/M1kep/Comfy_KepKitchenSink": [ + [ + "KepRotateImage" + ], + { + "title_aux": "Comfy_KepKitchenSink" + } + ], + "https://github.com/M1kep/Comfy_KepListStuff": [ + [ + "Empty Images", + "Image Overlay", + "ImageListLoader", + "Join Float Lists", + "Join Image Lists", + "KepStringList", + "KepStringListFromNewline", + "Kep_JoinListAny", + "Kep_RepeatList", + "Kep_ReverseList", + "Kep_VariableImageBuilder", + "List Length", + "Range(Num Steps) - Float", + "Range(Num Steps) - Int", + "Range(Step) - Float", + "Range(Step) - Int", + "Stack Images", + "XYAny", + "XYImage" + ], + { + "title_aux": "Comfy_KepListStuff" + } + ], + "https://github.com/M1kep/Comfy_KepMatteAnything": [ + [ + "MatteAnything_DinoBoxes", + "MatteAnything_GenerateVITMatte", + "MatteAnything_InitSamPredictor", + "MatteAnything_LoadDINO", + "MatteAnything_LoadVITMatteModel", + "MatteAnything_SAMLoader", + "MatteAnything_SAMMaskFromBoxes", + "MatteAnything_ToTrimap" + ], + { + "title_aux": "Comfy_KepMatteAnything" + } + ], + "https://github.com/M1kep/KepPromptLang": [ + [ + "Build Gif", + "Special CLIP Loader" + ], + { + "title_aux": "KepPromptLang" + } + ], + "https://github.com/ManglerFTW/ComfyI2I": [ + [ + "Color Transfer", + "Combine and Paste", + "Inpaint Segments", + "Mask Ops" + ], + { + "author": "ManglerFTW", + "title": "ComfyI2I", + "title_aux": "ComfyI2I" + } + ], + "https://github.com/NicholasMcCarthy/ComfyUI_TravelSuite": [ + [ + "LatentTravel" + ], + { + "title_aux": "ComfyUI_TravelSuite" + } + ], + "https://github.com/Niutonian/ComfyUi-NoodleWebcam": [ + [ + "WebcamNode" + ], + { + "title_aux": "ComfyUi-NoodleWebcam" + } + ], + "https://github.com/Nourepide/ComfyUI-Allor": [ + [ + "AlphaChanelAdd", + "AlphaChanelAddByMask", + "AlphaChanelAsMask", + "AlphaChanelRemove", + "AlphaChanelRestore", + "ClipClamp", + "ClipVisionClamp", + "ClipVisionOutputClamp", + "ConditioningClamp", + "ControlNetClamp", + "GligenClamp", + "ImageBatchCopy", + "ImageBatchFork", + "ImageBatchGet", + "ImageBatchJoin", + "ImageBatchPermute", + "ImageBatchRemove", + "ImageClamp", + "ImageCompositeAbsolute", + "ImageCompositeAbsoluteByContainer", + "ImageCompositeRelative", + "ImageCompositeRelativeByContainer", + "ImageContainer", + "ImageContainerInheritanceAdd", + "ImageContainerInheritanceMax", + "ImageContainerInheritanceScale", + "ImageContainerInheritanceSum", + "ImageDrawArc", + "ImageDrawArcByContainer", + "ImageDrawChord", + "ImageDrawChordByContainer", + "ImageDrawEllipse", + "ImageDrawEllipseByContainer", + "ImageDrawLine", + "ImageDrawLineByContainer", + "ImageDrawPieslice", + "ImageDrawPiesliceByContainer", + "ImageDrawPolygon", + "ImageDrawRectangle", + "ImageDrawRectangleByContainer", + "ImageDrawRectangleRounded", + "ImageDrawRectangleRoundedByContainer", + "ImageEffectsAdjustment", + "ImageEffectsGrayscale", + "ImageEffectsLensBokeh", + "ImageEffectsLensChromaticAberration", + "ImageEffectsLensOpticAxis", + "ImageEffectsLensVignette", + "ImageEffectsLensZoomBurst", + "ImageEffectsNegative", + "ImageEffectsSepia", + "ImageFilterBilateralBlur", + "ImageFilterBlur", + "ImageFilterBoxBlur", + "ImageFilterContour", + "ImageFilterDetail", + "ImageFilterEdgeEnhance", + "ImageFilterEdgeEnhanceMore", + "ImageFilterEmboss", + "ImageFilterFindEdges", + "ImageFilterGaussianBlur", + "ImageFilterGaussianBlurAdvanced", + "ImageFilterMax", + "ImageFilterMedianBlur", + "ImageFilterMin", + "ImageFilterMode", + "ImageFilterRank", + "ImageFilterSharpen", + "ImageFilterSmooth", + "ImageFilterSmoothMore", + "ImageFilterStackBlur", + "ImageNoiseBeta", + "ImageNoiseBinomial", + "ImageNoiseBytes", + "ImageNoiseGaussian", + "ImageSegmentation", + "ImageSegmentationCustom", + "ImageSegmentationCustomAdvanced", + "ImageText", + "ImageTextMultiline", + "ImageTextMultilineOutlined", + "ImageTextOutlined", + "ImageTransformCropAbsolute", + "ImageTransformCropCorners", + "ImageTransformCropRelative", + "ImageTransformPaddingAbsolute", + "ImageTransformPaddingRelative", + "ImageTransformResizeAbsolute", + "ImageTransformResizeClip", + "ImageTransformResizeRelative", + "ImageTransformRotate", + "ImageTransformTranspose", + "LatentClamp", + "MaskClamp", + "ModelClamp", + "StyleModelClamp", + "UpscaleModelClamp", + "VaeClamp" + ], + { + "title_aux": "Allor Plugin" + } + ], + "https://github.com/Nuked88/ComfyUI-N-Nodes": [ + [ + "DynamicPrompt", + "Float Variable", + "FrameInterpolator", + "GPT Loader Simple", + "GPTSampler", + "Integer Variable", + "LoadFramesFromFolder", + "LoadVideo", + "SaveVideo", + "SetMetadataForSaveVideo", + "String Variable" + ], + { + "title_aux": "ComfyUI-N-Nodes" + } + ], + "https://github.com/Off-Live/ComfyUI-off-suite": [ + [ + "Cached Image Load From URL", + "Crop Center wigh SEGS", + "Crop Center with SEGS", + "GW Number Formatting", + "Image Crop Fit", + "Image Resize Fit", + "OFF SEGS to Image", + "Watermarking" + ], + { + "title_aux": "ComfyUI-off-suite" + } + ], + "https://github.com/Onierous/QRNG_Node_ComfyUI/raw/main/qrng_node.py": [ + [ + "QRNG_Node_CSV" + ], + { + "title_aux": "QRNG_Node_ComfyUI" + } + ], + "https://github.com/PCMonsterx/ComfyUI-CSV-Loader": [ + [ + "Load Artists CSV", + "Load Artmovements CSV", + "Load Characters CSV", + "Load Colors CSV", + "Load Composition CSV", + "Load Lighting CSV", + "Load Negative CSV", + "Load Positive CSV", + "Load Settings CSV", + "Load Styles CSV" + ], + { + "title_aux": "ComfyUI-CSV-Loader" + } + ], + "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts": [ + [ + "CSVPromptsLoader", + "CombinePrompt", + "MultiLoraLoader", + "RandomPrompt" + ], + { + "title_aux": "ComfyUI-Malefish-Custom-Scripts" + } + ], + "https://github.com/Pfaeff/pfaeff-comfyui": [ + [ + "AstropulsePixelDetector", + "BackgroundRemover", + "ImagePadForBetterOutpaint", + "Inpainting", + "InpaintingPipelineLoader" + ], + { + "title_aux": "pfaeff-comfyui" + } + ], + "https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes": [ + [ + "CR 3D Camera Drone", + "CR 3D Camera Static", + "CR 3D Polygon", + "CR 3D Solids", + "CR Add Annotation", + "CR Alternate Latents", + "CR Apply Annotations", + "CR Apply ControlNet", + "CR Apply LoRA Stack", + "CR Apply Model Merge", + "CR Apply Multi Upscale", + "CR Apply Multi-ControlNet", + "CR Arabic Text RTL", + "CR Aspect Ratio", + "CR Aspect Ratio SDXL", + "CR Batch Process Switch", + "CR Central Schedule", + "CR Check Job Complete", + "CR Checker Pattern", + "CR Clip Input Switch", + "CR Color Bars", + "CR Color Gradient", + "CR Color Panel", + "CR Color Tint", + "CR Combine Schedules", + "CR Comic Panel Templates", + "CR Comic Panel Templates (Advanced)", + "CR Comic Panel Templates Advanced", + "CR Composite Text", + "CR Conditioning Input Switch", + "CR Conditioning Mixer", + "CR Continuous Rotation", + "CR Continuous Track", + "CR Continuous Zoom", + "CR ControlNet Input Switch", + "CR Current Frame", + "CR Cycle Images", + "CR Cycle Images Simple", + "CR Cycle LoRAs", + "CR Cycle Models", + "CR Cycle Styles", + "CR Cycle Text", + "CR Cycle Text Simple", + "CR Debatch Frames", + "CR Draw Perspective Text", + "CR Draw Text", + "CR Encode Scheduled Prompts", + "CR Float To Integer", + "CR Float To String", + "CR Gradient Float", + "CR Gradient Integer", + "CR Halftone Filter", + "CR Halftone Grid", + "CR Hires Fix Process Switch", + "CR Image Border", + "CR Image Grid Panel", + "CR Image Input Switch", + "CR Image Input Switch (4 way)", + "CR Image List", + "CR Image List Simple", + "CR Image Output", + "CR Image Panel", + "CR Image Pipe Edit", + "CR Image Pipe In", + "CR Image Pipe Out", + "CR Image Size", + "CR Image Transition", + "CR Image XY Panel", + "CR Img2Img Process Switch", + "CR Increment Float", + "CR Increment Integer", + "CR Index", + "CR Index Increment", + "CR Index Multiply", + "CR Index Reset", + "CR Input Text List", + "CR Integer Multiple", + "CR Integer To String", + "CR Interpolate Latents", + "CR Interpolate Prompt Weights", + "CR Interpolate Rotation", + "CR Interpolate Track", + "CR Interpolate Zoom", + "CR Job Current Frame", + "CR Job List", + "CR Job Scheduler", + "CR Keyframe List", + "CR Latent Batch Size", + "CR Latent Input Switch", + "CR LoRA List", + "CR LoRA Stack", + "CR Load Animation Frames", + "CR Load Flow Frames", + "CR Load LoRA", + "CR Load Prompt Style", + "CR Load Schedule From File", + "CR Load Scheduled ControlNets", + "CR Load Scheduled LoRAs", + "CR Load Scheduled Models", + "CR Load Workflow", + "CR Load XY Annotation From File", + "CR Mask Text", + "CR Model Input Switch", + "CR Model List", + "CR Model Merge Stack", + "CR Module Input", + "CR Module Output", + "CR Module Pipe Loader", + "CR Multi Upscale Stack", + "CR Multi-ControlNet Stack", + "CR Multi-Panel Meme Template", + "CR Output Flow Frames", + "CR Output Schedule To File", + "CR Overlay Text", + "CR Overlay Transparent Image", + "CR Page Layout", + "CR Pipe Switch", + "CR Polygons", + "CR Popular Meme Templates", + "CR Prompt List", + "CR Prompt List Keyframes", + "CR Prompt Scheduler", + "CR Prompt Text", + "CR Prompt Weight Scheduler", + "CR Radial Gradient", + "CR Random Hex Color", + "CR Random RGB", + "CR SD1.5 Aspect Ratio", + "CR SDXL Aspect Ratio", + "CR SDXL Base Prompt Encoder", + "CR SDXL Prompt Mix Presets", + "CR SDXL Style Text", + "CR Schedule Camera Movements", + "CR Schedule ControlNets", + "CR Schedule Input Switch", + "CR Schedule Styles", + "CR Schedule To ScheduleList", + "CR Seed", + "CR Seed to Int", + "CR Select Model", + "CR Simple Annotations", + "CR Simple Image Watermark", + "CR Simple Meme Template", + "CR Simple Prompt List", + "CR Simple Prompt List Keyframes", + "CR Simple Prompt Scheduler", + "CR Simple Schedule", + "CR Simple Text Panel", + "CR Simple Text Scheduler", + "CR Simple Text Watermark", + "CR Simple Value Scheduler", + "CR Spawn Workflow Instance", + "CR Split String", + "CR Starburst Colors", + "CR Starburst Lines", + "CR String To Combo", + "CR String To Number", + "CR Strobe Images", + "CR Style Bars", + "CR Style List", + "CR Switch Model and CLIP", + "CR Text Input Switch", + "CR Text Input Switch (4 way)", + "CR Text List", + "CR Text List Cross Join", + "CR Text List Simple", + "CR Text List To String", + "CR Text Scheduler", + "CR Trigger", + "CR Upscale Image", + "CR VAE Input Switch", + "CR Value", + "CR Value Scheduler", + "CR XY From Folder", + "CR XY Grid", + "CR XY Index", + "CR XY Interpolate", + "CR XY List", + "CR XY Save Grid Image", + "CR XYZ Index", + "CR XYZ Interpolate", + "CR XYZ List" + ], + { + "author": "RockOfFire", + "description": "Custom nodes for SDXL and SD1.5 including Multi-ControlNet, LoRA, Aspect Ratio, Process Switches, and many more nodes.", + "nickname": "Comfyroll Custom Nodes", + "title": "Comfyroll Custom Nodes", + "title_aux": "ComfyUI_Comfyroll_CustomNodes" + } + ], + "https://github.com/SLAPaper/ComfyUI-Image-Selector": [ + [ + "ImageDuplicator", + "ImageSelector", + "LatentDuplicator", + "LatentSelector" + ], + { + "title_aux": "ComfyUI-Image-Selector" + } + ], + "https://github.com/SOELexicon/ComfyUI-LexMSDBNodes": [ + [ + "MSSqlSelectNode", + "MSSqlTableNode" + ], + { + "title_aux": "LexMSDBNodes" + } + ], + "https://github.com/SOELexicon/ComfyUI-LexTools": [ + [ + "AgeClassifierNode", + "ArtOrHumanClassifierNode", + "DocumentClassificationNode", + "FoodCategoryClassifierNode", + "ImageAspectPadNode", + "ImageCaptioning", + "ImageFilterByFloatScoreNode", + "ImageFilterByIntScoreNode", + "ImageQualityScoreNode", + "ImageRankingNode", + "ImageScaleToMin", + "MD5ImageHashNode", + "SamplerPropertiesNode", + "ScoreConverterNode", + "SeedIncrementerNode", + "SegformerNode", + "SegformerNodeMasks", + "SegformerNodeMergeSegments", + "StepCfgIncrementNode" + ], + { + "title_aux": "ComfyUI-LexTools" + } + ], + "https://github.com/SadaleNet/CLIPTextEncodeA1111-ComfyUI/raw/master/custom_nodes/clip_text_encoder_a1111.py": [ + [ + "CLIPTextEncodeA1111", + "RerouteTextForCLIPTextEncodeA1111" + ], + { + "title_aux": "ComfyUI A1111-like Prompt Custom Node Solution" + } + ], + "https://github.com/Scholar01/ComfyUI-Keyframe": [ + [ + "KeyframeApply", + "KeyframeInterpolationPart", + "KeyframePart" + ], + { + "title_aux": "SComfyUI-Keyframe" + } + ], + "https://github.com/SeargeDP/SeargeSDXL": [ + [ + "SeargeAdvancedParameters", + "SeargeCheckpointLoader", + "SeargeConditionMixing", + "SeargeConditioningMuxer2", + "SeargeConditioningMuxer5", + "SeargeConditioningParameters", + "SeargeControlnetAdapterV2", + "SeargeControlnetModels", + "SeargeCustomAfterUpscaling", + "SeargeCustomAfterVaeDecode", + "SeargeCustomPromptMode", + "SeargeDebugPrinter", + "SeargeEnablerInputs", + "SeargeFloatConstant", + "SeargeFloatMath", + "SeargeFloatPair", + "SeargeFreeU", + "SeargeGenerated1", + "SeargeGenerationParameters", + "SeargeHighResolution", + "SeargeImage2ImageAndInpainting", + "SeargeImageAdapterV2", + "SeargeImageSave", + "SeargeImageSaving", + "SeargeInput1", + "SeargeInput2", + "SeargeInput3", + "SeargeInput4", + "SeargeInput5", + "SeargeInput6", + "SeargeInput7", + "SeargeIntegerConstant", + "SeargeIntegerMath", + "SeargeIntegerPair", + "SeargeIntegerScaler", + "SeargeLatentMuxer3", + "SeargeLoraLoader", + "SeargeLoras", + "SeargeMagicBox", + "SeargeModelSelector", + "SeargeOperatingMode", + "SeargeOutput1", + "SeargeOutput2", + "SeargeOutput3", + "SeargeOutput4", + "SeargeOutput5", + "SeargeOutput6", + "SeargeOutput7", + "SeargeParameterProcessor", + "SeargePipelineStart", + "SeargePipelineTerminator", + "SeargePreviewImage", + "SeargePromptAdapterV2", + "SeargePromptCombiner", + "SeargePromptStyles", + "SeargePromptText", + "SeargeSDXLBasePromptEncoder", + "SeargeSDXLImage2ImageSampler", + "SeargeSDXLImage2ImageSampler2", + "SeargeSDXLPromptEncoder", + "SeargeSDXLRefinerPromptEncoder", + "SeargeSDXLSampler", + "SeargeSDXLSampler2", + "SeargeSDXLSamplerV3", + "SeargeSamplerAdvanced", + "SeargeSamplerInputs", + "SeargeSaveFolderInputs", + "SeargeSeparator", + "SeargeStylePreprocessor", + "SeargeTextInputV2", + "SeargeUpscaleModelLoader", + "SeargeUpscaleModels", + "SeargeVAELoader" + ], + { + "title_aux": "SeargeSDXL" + } + ], + "https://github.com/Ser-Hilary/SDXL_sizing/raw/main/conditioning_sizing_for_SDXL.py": [ + [ + "get_aspect_from_image", + "get_aspect_from_ints", + "sizing_node", + "sizing_node_basic", + "sizing_node_unparsed" + ], + { + "title_aux": "SDXL_sizing" + } + ], + "https://github.com/Smuzzies/comfyui_chatbox_overlay/raw/main/chatbox_overlay.py": [ + [ + "Chatbox Overlay" + ], + { + "title_aux": "Chatbox Overlay node for ComfyUI" + } + ], + "https://github.com/SoftMeng/ComfyUI_Mexx_Poster": [ + [ + "ComfyUI_Mexx_Poster" + ], + { + "title_aux": "ComfyUI_Mexx_Poster" + } + ], + "https://github.com/SoftMeng/ComfyUI_Mexx_Styler": [ + [ + "MexxSDXLPromptStyler", + "MexxSDXLPromptStylerAdvanced" + ], + { + "title_aux": "ComfyUI_Mexx_Styler" + } + ], + "https://github.com/Stability-AI/stability-ComfyUI-nodes": [ + [ + "ColorBlend", + "ControlLoraSave", + "GetImageSize" + ], + { + "title_aux": "stability-ComfyUI-nodes" + } + ], + "https://github.com/Sxela/ComfyWarp": [ + [ + "ExtractOpticalFlow", + "LoadFrame", + "LoadFrameFromDataset", + "LoadFrameFromFolder", + "LoadFramePairFromDataset", + "LoadFrameSequence", + "MakeFrameDataset", + "MixConsistencyMaps", + "OffsetNumber", + "ResizeToFit", + "SaveFrame", + "WarpFrame" + ], + { + "title_aux": "ComfyWarp" + } + ], + "https://github.com/TGu-97/ComfyUI-TGu-utils": [ + [ + "MPNReroute", + "MPNSwitch", + "PNSwitch" + ], + { + "title_aux": "TGu Utilities" + } + ], + "https://github.com/THtianhao/ComfyUI-FaceChain": [ + [ + "FCStyleLoraLoad", + "FC_CropAndPaste", + "FC_CropBottom", + "FC_CropFace", + "FC_CropMask", + "FC_FaceDetection", + "FC_FaceFusion", + "FC_MaskOP", + "FC_ReplaceImage", + "FC_Segment", + "FC_StyleLoraLoad" + ], + { + "title_aux": "ComfyUI-FaceChain" + } + ], + "https://github.com/THtianhao/ComfyUI-Portrait-Maker": [ + [ + "PM_BoxCropImage", + "PM_ColorTransfer", + "PM_ExpandMaskBox", + "PM_FaceFusion", + "PM_FaceShapMatch", + "PM_FaceSkin", + "PM_GetImageInfo", + "PM_ImageResizeTarget", + "PM_ImageScaleShort", + "PM_MakeUpTransfer", + "PM_MaskDilateErode", + "PM_MaskMerge2Image", + "PM_PortraitEnhancement", + "PM_RatioMerge2Image", + "PM_ReplaceBoxImg", + "PM_RetinaFace", + "PM_SkinRetouching", + "PM_SuperColorTransfer", + "PM_SuperMakeUpTransfer" + ], + { + "title_aux": "ComfyUI-Portrait-Maker" + } + ], + "https://github.com/TRI3D-LC/tri3d-comfyui-nodes": [ + [ + "tri3d-atr-parse", + "tri3d-atr-parse-batch", + "tri3d-dwpose", + "tri3d-extract-hand", + "tri3d-extract-parts-batch", + "tri3d-extract-parts-batch2", + "tri3d-extract-parts-mask-batch", + "tri3d-fuzzification", + "tri3d-interaction-canny", + "tri3d-pose-adaption", + "tri3d-pose-to-image", + "tri3d-position-hands", + "tri3d-position-parts-batch", + "tri3d-skin-feathered-padded-mask", + "tri3d-swap-pixels" + ], + { + "title_aux": "tri3d-comfyui-nodes" + } + ], + "https://github.com/TeaCrab/ComfyUI-TeaNodes": [ + [ + "TC_ColorFill", + "TC_EqualizeCLAHE", + "TC_ImageResize", + "TC_ImageScale", + "TC_MaskBG_DIS", + "TC_RandomColorFill", + "TC_SizeApproximation" + ], + { + "title_aux": "ComfyUI-TeaNodes" + } + ], + "https://github.com/TheBarret/ZSuite": [ + [ + "ZSuite: Prompter", + "ZSuite: RF Noise", + "ZSuite: SeedMod" + ], + { + "title_aux": "ZSuite" + } + ], + "https://github.com/TinyTerra/ComfyUI_tinyterraNodes": [ + [ + "ttN busIN", + "ttN busOUT", + "ttN compareInput", + "ttN concat", + "ttN debugInput", + "ttN float", + "ttN hiresfixScale", + "ttN imageOutput", + "ttN imageREMBG", + "ttN int", + "ttN multiModelMerge", + "ttN pipe2BASIC", + "ttN pipe2DETAILER", + "ttN pipeEDIT", + "ttN pipeEncodeConcat", + "ttN pipeIN", + "ttN pipeKSampler", + "ttN pipeKSamplerAdvanced", + "ttN pipeKSamplerSDXL", + "ttN pipeLoader", + "ttN pipeLoaderSDXL", + "ttN pipeLoraStack", + "ttN pipeOUT", + "ttN seed", + "ttN seedDebug", + "ttN text", + "ttN text3BOX_3WAYconcat", + "ttN text7BOX_concat", + "ttN textDebug", + "ttN xyPlot" + ], + { + "author": "tinyterra", + "description": "This extension offers various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more.", + "nickname": "ttNodes", + "nodename_pattern": "^ttN ", + "title": "tinyterraNodes", + "title_aux": "tinyterraNodes" + } + ], + "https://github.com/Tropfchen/ComfyUI-Embedding_Picker": [ + [ + "EmbeddingPicker" + ], + { + "title_aux": "Embedding Picker" + } + ], + "https://github.com/Tropfchen/ComfyUI-yaResolutionSelector": [ + [ + "YARS", + "YARSAdv" + ], + { + "title_aux": "YARS: Yet Another Resolution Selector" + } + ], + "https://github.com/Trung0246/ComfyUI-0246": [ + [ + "0246.Beautify", + "0246.Convert", + "0246.Count", + "0246.Highway", + "0246.Hold", + "0246.Junction", + "0246.JunctionBatch", + "0246.Loop", + "0246.Mimic", + "0246.RandomInt", + "0246.Stringify" + ], + { + "title_aux": "ComfyUI-0246" + } + ], + "https://github.com/Ttl/ComfyUi_NNLatentUpscale": [ + [ + "NNLatentUpscale" + ], + { + "title_aux": "ComfyUI Neural network latent upscale custom node" + } + ], + "https://github.com/Umikaze-job/select_folder_path_easy": [ + [ + "SelectFolderPathEasy" + ], + { + "title_aux": "select_folder_path_easy" + } + ], + "https://github.com/WASasquatch/ASTERR": [ + [ + "ASTERR", + "SaveASTERR" + ], + { + "title_aux": "ASTERR" + } + ], + "https://github.com/WASasquatch/ComfyUI_Preset_Merger": [ + [ + "Preset_Model_Merge" + ], + { + "title_aux": "ComfyUI Preset Merger" + } + ], + "https://github.com/WASasquatch/FreeU_Advanced": [ + [ + "FreeU (Advanced)" + ], + { + "title_aux": "FreeU_Advanced" + } + ], + "https://github.com/WASasquatch/PPF_Noise_ComfyUI": [ + [ + "Blend Latents (PPF Noise)", + "Cross-Hatch Power Fractal (PPF Noise)", + "Images as Latents (PPF Noise)", + "Perlin Power Fractal Latent (PPF Noise)" + ], + { + "title_aux": "PPF_Noise_ComfyUI" + } + ], + "https://github.com/WASasquatch/PowerNoiseSuite": [ + [ + "Blend Latents (PPF Noise)", + "Cross-Hatch Power Fractal (PPF Noise)", + "Cross-Hatch Power Fractal Settings (PPF Noise)", + "Images as Latents (PPF Noise)", + "Latent Adjustment (PPF Noise)", + "Latents to CPU (PPF Noise)", + "Linear Cross-Hatch Power Fractal (PPF Noise)", + "Perlin Power Fractal Latent (PPF Noise)", + "Perlin Power Fractal Settings (PPF Noise)", + "Power KSampler Advanced (PPF Noise)", + "Power-Law Noise (PPF Noise)" + ], + { + "title_aux": "Power Noise Suite for ComfyUI" + } + ], + "https://github.com/WASasquatch/WAS_Extras": [ + [ + "BLVAEEncode", + "CLIPTextEncodeList", + "CLIPTextEncodeSequence2", + "ConditioningBlend", + "DebugInput", + "KSamplerSeq", + "KSamplerSeq2", + "VAEEncodeForInpaint (WAS)", + "VividSharpen" + ], + { + "title_aux": "WAS_Extras" + } + ], + "https://github.com/WASasquatch/was-node-suite-comfyui": [ + [ + "BLIP Analyze Image", + "BLIP Model Loader", + "Blend Latents", + "Bounded Image Blend", + "Bounded Image Blend with Mask", + "Bounded Image Crop", + "Bounded Image Crop with Mask", + "Bus Node", + "CLIP Input Switch", + "CLIP Vision Input Switch", + "CLIPSeg Batch Masking", + "CLIPSeg Masking", + "CLIPSeg Model Loader", + "CLIPTextEncode (BlenderNeko Advanced + NSP)", + "CLIPTextEncode (NSP)", + "Cache Node", + "Checkpoint Loader", + "Checkpoint Loader (Simple)", + "Conditioning Input Switch", + "Constant Number", + "Control Net Model Input Switch", + "Convert Masks to Images", + "Create Grid Image", + "Create Grid Image from Batch", + "Create Morph Image", + "Create Morph Image from Path", + "Create Video from Path", + "Debug Number to Console", + "Dictionary to Console", + "Diffusers Hub Model Down-Loader", + "Diffusers Model Loader", + "Export API", + "Image Analyze", + "Image Aspect Ratio", + "Image Batch", + "Image Blank", + "Image Blend", + "Image Blend by Mask", + "Image Blending Mode", + "Image Bloom Filter", + "Image Bounds", + "Image Bounds to Console", + "Image Canny Filter", + "Image Chromatic Aberration", + "Image Color Palette", + "Image Crop Face", + "Image Crop Location", + "Image Crop Square Location", + "Image Displacement Warp", + "Image Dragan Photography Filter", + "Image Edge Detection Filter", + "Image Film Grain", + "Image Filter Adjustments", + "Image Flip", + "Image Generate Gradient", + "Image Gradient Map", + "Image High Pass Filter", + "Image History Loader", + "Image Input Switch", + "Image Levels Adjustment", + "Image Load", + "Image Lucy Sharpen", + "Image Median Filter", + "Image Mix RGB Channels", + "Image Monitor Effects Filter", + "Image Nova Filter", + "Image Padding", + "Image Paste Crop", + "Image Paste Crop by Location", + "Image Paste Face", + "Image Perlin Noise", + "Image Perlin Power Fractal", + "Image Pixelate", + "Image Power Noise", + "Image Rembg (Remove Background)", + "Image Remove Background (Alpha)", + "Image Remove Color", + "Image Resize", + "Image Rotate", + "Image Rotate Hue", + "Image SSAO (Ambient Occlusion)", + "Image SSDO (Direct Occlusion)", + "Image Save", + "Image Seamless Texture", + "Image Select Channel", + "Image Select Color", + "Image Shadows and Highlights", + "Image Size to Number", + "Image Stitch", + "Image Style Filter", + "Image Threshold", + "Image Tiled", + "Image Transpose", + "Image Voronoi Noise Filter", + "Image fDOF Filter", + "Image to Latent Mask", + "Image to Noise", + "Image to Seed", + "Images to Linear", + "Images to RGB", + "Inset Image Bounds", + "Integer place counter", + "KSampler (WAS)", + "KSampler Cycle", + "Latent Input Switch", + "Latent Noise Injection", + "Latent Size to Number", + "Latent Upscale by Factor (WAS)", + "Load Cache", + "Load Image Batch", + "Load Lora", + "Load Text File", + "Logic Boolean", + "Lora Input Switch", + "Lora Loader", + "Mask Arbitrary Region", + "Mask Batch", + "Mask Batch to Mask", + "Mask Ceiling Region", + "Mask Crop Dominant Region", + "Mask Crop Minority Region", + "Mask Crop Region", + "Mask Dilate Region", + "Mask Dominant Region", + "Mask Erode Region", + "Mask Fill Holes", + "Mask Floor Region", + "Mask Gaussian Region", + "Mask Invert", + "Mask Minority Region", + "Mask Paste Region", + "Mask Smooth Region", + "Mask Threshold Region", + "Masks Add", + "Masks Combine Batch", + "Masks Combine Regions", + "Masks Subtract", + "MiDaS Depth Approximation", + "MiDaS Mask Image", + "MiDaS Model Loader", + "Model Input Switch", + "Number Counter", + "Number Input Condition", + "Number Input Switch", + "Number Multiple Of", + "Number Operation", + "Number PI", + "Number to Float", + "Number to Int", + "Number to Seed", + "Number to String", + "Number to Text", + "Prompt Multiple Styles Selector", + "Prompt Styles Selector", + "Random Number", + "SAM Image Mask", + "SAM Model Loader", + "SAM Parameters", + "SAM Parameters Combine", + "Samples Passthrough (Stat System)", + "Save Text File", + "Seed", + "String to Text", + "Tensor Batch to Image", + "Text Add Token by Input", + "Text Add Tokens", + "Text Compare", + "Text Concatenate", + "Text Dictionary Update", + "Text File History Loader", + "Text Find and Replace", + "Text Find and Replace Input", + "Text Find and Replace by Dictionary", + "Text Input Switch", + "Text List", + "Text List Concatenate", + "Text Load Line From File", + "Text Multiline", + "Text Parse A1111 Embeddings", + "Text Parse Noodle Soup Prompts", + "Text Parse Tokens", + "Text Random Line", + "Text Random Prompt", + "Text Shuffle", + "Text String", + "Text String Truncate", + "Text to Conditioning", + "Text to Console", + "Text to Number", + "Text to String", + "True Random.org Number Generator", + "Upscale Model Loader", + "Upscale Model Switch", + "VAE Input Switch", + "Video Dump Frames", + "Write to GIF", + "Write to Video", + "unCLIP Checkpoint Loader" + ], + { + "title_aux": "WAS Node Suite" + } + ], + "https://github.com/WebDev9000/WebDev9000-Nodes": [ + [ + "IgnoreBraces", + "SettingsSwitch" + ], + { + "title_aux": "WebDev9000-Nodes" + } + ], + "https://github.com/YMC-GitHub/ymc-node-suite-comfyui": [ + [ + "Image Save", + "Save Text File", + "canvas-util-cal-size", + "conditioning-util-input-switch", + "cutoff-region-util", + "hks-util-cal-denoise-step", + "img-util-get-image-size", + "img-util-switch-input-image", + "io-util-file-list-get", + "io-util-file-list-get-text", + "number-util-random-num", + "pipe-util-to-basic-pipe", + "region-util-get-by-center-and-size", + "region-util-get-by-lt", + "region-util-get-crop-location-from-center-size-text", + "region-util-get-pad-out-location-by-size", + "text-preset-colors", + "text-util-join-text", + "text-util-loop-text", + "text-util-path-list", + "text-util-prompt-add-prompt", + "text-util-prompt-adv-dup", + "text-util-prompt-adv-search", + "text-util-prompt-del", + "text-util-prompt-dup", + "text-util-prompt-join", + "text-util-prompt-search", + "text-util-prompt-shuffle", + "text-util-prompt-std", + "text-util-prompt-unweight", + "text-util-random-text", + "text-util-search-text", + "text-util-show-text", + "text-util-switch-text", + "xyz-util-txt-to-int" + ], + { + "title_aux": "ymc-node-suite-comfyui" + } + ], + "https://github.com/YOUR-WORST-TACO/ComfyUI-TacoNodes": [ + [ + "Example", + "TacoAnimatedLoader", + "TacoGifMaker", + "TacoImg2ImgAnimatedLoader", + "TacoImg2ImgAnimatedProcessor", + "TacoLatent" + ], + { + "title_aux": "ComfyUI-TacoNodes" + } + ], + "https://github.com/YinBailiang/MergeBlockWeighted_fo_ComfyUI": [ + [ + "MergeBlockWeighted" + ], + { + "title_aux": "MergeBlockWeighted_fo_ComfyUI" + } + ], + "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite": [ + [ + "AlphaChanelAddByMask", + "ImageCompositeBy_BG_Zho", + "ImageCompositeBy_Zho", + "ImageComposite_BG_Zho", + "ImageComposite_Zho", + "RGB_Image_Zho", + "Text_Image_Frame_Zho", + "Text_Image_Multiline_Zho", + "Text_Image_Zho" + ], + { + "title_aux": "ComfyUI-Text_Image-Composite" + } + ], + "https://github.com/ZaneA/ComfyUI-ImageReward": [ + [ + "ImageRewardLoader", + "ImageRewardScore" + ], + { + "title_aux": "ImageReward" + } + ], + "https://github.com/Zuellni/ComfyUI-ExLlama": [ + [ + "ZuellniExLlamaGenerator", + "ZuellniExLlamaLoader", + "ZuellniTextPreview", + "ZuellniTextReplace" + ], + { + "title_aux": "ComfyUI-ExLlama" + } + ], + "https://github.com/Zuellni/ComfyUI-PickScore-Nodes": [ + [ + "ZuellniPickScoreImageProcessor", + "ZuellniPickScoreLoader", + "ZuellniPickScoreSelector", + "ZuellniPickScoreTextProcessor" + ], + { + "title_aux": "ComfyUI PickScore Nodes" + } + ], + "https://github.com/a1lazydog/ComfyUI-AudioScheduler": [ + [ + "AmplitudeToGraph", + "AmplitudeToNumber", + "AudioToAmplitudeGraph", + "AudioToFFTs", + "BatchAmplitudeSchedule", + "ClipAmplitude", + "GateNormalizedAmplitude", + "LoadAudio", + "NormalizeAmplitude", + "NormalizedAmplitudeDrivenString", + "NormalizedAmplitudeToGraph", + "NormalizedAmplitudeToNumber", + "TransientAmplitudeBasic" + ], + { + "title_aux": "ComfyUI-AudioScheduler" + } + ], + "https://github.com/adieyal/comfyui-dynamicprompts": [ + [ + "DPCombinatorialGenerator", + "DPFeelingLucky", + "DPJinja", + "DPMagicPrompt", + "DPOutput", + "DPRandomGenerator" + ], + { + "title_aux": "DynamicPrompts Custom Nodes" + } + ], + "https://github.com/aianimation55/ComfyUI-FatLabels": [ + [ + "FatLabels" + ], + { + "title_aux": "Comfy UI FatLabels" + } + ], + "https://github.com/alpertunga-bile/prompt-generator-comfyui": [ + [ + "Prompt Generator" + ], + { + "title_aux": "prompt-generator" + } + ], + "https://github.com/alsritter/asymmetric-tiling-comfyui": [ + [ + "Asymmetric_Tiling_KSampler" + ], + { + "title_aux": "asymmetric-tiling-comfyui" + } + ], + "https://github.com/alt-key-project/comfyui-dream-project": [ + [ + "Analyze Palette [Dream]", + "Beat Curve [Dream]", + "Big Float Switch [Dream]", + "Big Image Switch [Dream]", + "Big Int Switch [Dream]", + "Big Latent Switch [Dream]", + "Big Palette Switch [Dream]", + "Big Text Switch [Dream]", + "Boolean To Float [Dream]", + "Boolean To Int [Dream]", + "Build Prompt [Dream]", + "CSV Curve [Dream]", + "CSV Generator [Dream]", + "Calculation [Dream]", + "Common Frame Dimensions [Dream]", + "Compare Palettes [Dream]", + "FFMPEG Video Encoder [Dream]", + "File Count [Dream]", + "Finalize Prompt [Dream]", + "Float Input [Dream]", + "Float to Log Entry [Dream]", + "Frame Count Calculator [Dream]", + "Frame Counter (Directory) [Dream]", + "Frame Counter (Simple) [Dream]", + "Frame Counter Info [Dream]", + "Frame Counter Offset [Dream]", + "Frame Counter Time Offset [Dream]", + "Image Brightness Adjustment [Dream]", + "Image Color Shift [Dream]", + "Image Contrast Adjustment [Dream]", + "Image Motion [Dream]", + "Image Sequence Blend [Dream]", + "Image Sequence Loader [Dream]", + "Image Sequence Saver [Dream]", + "Image Sequence Tweening [Dream]", + "Int Input [Dream]", + "Int to Log Entry [Dream]", + "Laboratory [Dream]", + "Linear Curve [Dream]", + "Log Entry Joiner [Dream]", + "Log File [Dream]", + "Noise from Area Palettes [Dream]", + "Noise from Palette [Dream]", + "Palette Color Align [Dream]", + "Palette Color Shift [Dream]", + "Sample Image Area as Palette [Dream]", + "Sample Image as Palette [Dream]", + "Saw Curve [Dream]", + "Sine Curve [Dream]", + "Smooth Event Curve [Dream]", + "String Input [Dream]", + "String Tokenizer [Dream]", + "String to Log Entry [Dream]", + "Text Input [Dream]", + "Triangle Curve [Dream]", + "Triangle Event Curve [Dream]", + "WAV Curve [Dream]" + ], + { + "title_aux": "Dream Project Animation Nodes" + } + ], + "https://github.com/alt-key-project/comfyui-dream-video-batches": [ + [ + "Blended Transition [DVB]", + "Calculation [DVB]", + "Create Frame Set [DVB]", + "Divide [DVB]", + "Fade From Black [DVB]", + "Fade To Black [DVB]", + "Float Input [DVB]", + "For Each Done [DVB]", + "For Each Filename [DVB]", + "Frame Set Append [DVB]", + "Frame Set Frame Dimensions Scaled [DVB]", + "Frame Set Index Offset [DVB]", + "Frame Set Merger [DVB]", + "Frame Set Reindex [DVB]", + "Frame Set Repeat [DVB]", + "Frame Set Reverse [DVB]", + "Frame Set Split Beginning [DVB]", + "Frame Set Split End [DVB]", + "Frame Set Splitter [DVB]", + "Generate Inbetween Frames [DVB]", + "Int Input [DVB]", + "Linear Camera Pan [DVB]", + "Linear Camera Roll [DVB]", + "Linear Camera Zoom [DVB]", + "Load Image From Path [DVB]", + "Multiply [DVB]", + "Sine Camera Pan [DVB]", + "Sine Camera Roll [DVB]", + "Sine Camera Zoom [DVB]", + "String Input [DVB]", + "Text Input [DVB]", + "Trace Memory Allocation [DVB]", + "Unwrap Frame Set [DVB]" + ], + { + "title_aux": "Dream Video Batches" + } + ], + "https://github.com/andersxa/comfyui-PromptAttention": [ + [ + "CLIPAttentionMaskEncode" + ], + { + "title_aux": "CLIP Directional Prompt Attention" + } + ], + "https://github.com/asagi4/ComfyUI-CADS": [ + [ + "CADS" + ], + { + "title_aux": "ComfyUI-CADS" + } + ], + "https://github.com/asagi4/comfyui-prompt-control": [ + [ + "EditableCLIPEncode", + "FilterSchedule", + "LoRAScheduler", + "PCSplitSampling", + "PromptControlSimple", + "PromptToSchedule", + "ScheduleToCond", + "ScheduleToModel" + ], + { + "title_aux": "ComfyUI prompt control" + } + ], + "https://github.com/asagi4/comfyui-utility-nodes": [ + [ + "MUJinjaRender", + "MUSimpleWildcard" + ], + { + "title_aux": "asagi4/comfyui-utility-nodes" + } + ], + "https://github.com/aszc-dev/ComfyUI-CoreMLSuite": [ + [ + "Core ML Converter", + "Core ML LCM Converter", + "Core ML LoRA Loader", + "CoreMLModelAdapter", + "CoreMLSampler", + "CoreMLSamplerAdvanced", + "CoreMLUNetLoader" + ], + { + "title_aux": "Core ML Suite for ComfyUI" + } + ], + "https://github.com/avatechai/avatar-graph-comfyui": [ + [ + "ApplyMeshTransformAsShapeKey", + "B_ENUM", + "B_VECTOR3", + "B_VECTOR4", + "CreateShapeFlow", + "ExportBlendshapes", + "ExportGLTF", + "Image Alpha Mask Merge", + "ImageBridge", + "LoadImageWithAlpha", + "SAM MultiLayer", + "Save Image With Workflow" + ], + { + "author": "Avatech Limited", + "description": "Include nodes for sam + bpy operation, that allows workflow creations for generative 2d character rig.", + "nickname": "Avatar Graph", + "title": "Avatar Graph", + "title_aux": "avatar-graph-comfyui" + } + ], + "https://github.com/azazeal04/ComfyUI-Styles": [ + [ + "menus" + ], + { + "title_aux": "ComfyUI-Styles" + } + ], + "https://github.com/badjeff/comfyui_lora_tag_loader": [ + [ + "LoraTagLoader" + ], + { + "title_aux": "LoRA Tag Loader for ComfyUI" + } + ], + "https://github.com/bash-j/mikey_nodes": [ + [ + "AddMetaData", + "Batch Crop Image", + "Batch Crop Resize Inplace", + "Batch Load Images", + "Batch Resize Image for SDXL", + "Checkpoint Loader Simple Mikey", + "CinematicLook", + "Empty Latent Ratio Custom SDXL", + "Empty Latent Ratio Select SDXL", + "EvalFloats", + "FileNamePrefix", + "Float to String", + "HaldCLUT", + "Image Caption", + "ImageBorder", + "ImageOverlay", + "ImagePaste", + "Int to String", + "LoraSyntaxProcessor", + "Mikey Sampler", + "Mikey Sampler Base Only", + "Mikey Sampler Base Only Advanced", + "Mikey Sampler Tiled", + "Mikey Sampler Tiled Base Only", + "MikeySamplerTiledAdvanced", + "MikeySamplerTiledAdvancedBaseOnly", + "OobaPrompt", + "PresetRatioSelector", + "Prompt With SDXL", + "Prompt With Style", + "Prompt With Style V2", + "Prompt With Style V3", + "Range Float", + "Range Integer", + "Ratio Advanced", + "Resize Image for SDXL", + "Save Image If True", + "Save Image With Prompt Data", + "Save Images Mikey", + "Save Images No Display", + "SaveMetaData", + "SearchAndReplace", + "Seed String", + "Style Conditioner", + "Style Conditioner Base Only", + "Text2InputOr3rdOption", + "TextCombinations", + "TextCombinations3", + "TextPreserve", + "Upscale Tile Calculator", + "Wildcard Processor", + "WildcardAndLoraSyntaxProcessor", + "WildcardOobaPrompt" + ], + { + "title_aux": "Mikey Nodes" + } + ], + "https://github.com/bedovyy/ComfyUI_NAIDGenerator": [ + [ + "GenerateNAID", + "ImageToNAIMask", + "Img2ImgOptionNAID", + "InpaintingOptionNAID", + "ModelOptionNAID" + ], + { + "title_aux": "ComfyUI_NAIDGenerator" + } + ], + "https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py": [ + [ + "CLIPSeg", + "CombineSegMasks" + ], + { + "title_aux": "CLIPSeg" + } + ], + "https://github.com/bmad4ever/comfyui_ab_samplercustom": [ + [ + "AB SamplerCustom (experimental)" + ], + { + "title_aux": "comfyui_ab_sampler" + } + ], + "https://github.com/bmad4ever/comfyui_bmad_nodes": [ + [ + "AdaptiveThresholding", + "Add String To Many", + "AddAlpha", + "AdjustRect", + "AnyToAny", + "BoundingRect (contours)", + "BuildColorRangeAdvanced (hsv)", + "BuildColorRangeHSV (hsv)", + "CLAHE", + "CLIPEncodeMultiple", + "CLIPEncodeMultipleAdvanced", + "ChameleonMask", + "CheckpointLoader (dirty)", + "CheckpointLoaderSimple (dirty)", + "Color (RGB)", + "Color (hexadecimal)", + "Color Clip", + "Color Clip (advanced)", + "Color Clip ADE20k", + "ColorDictionary", + "ColorDictionary (custom)", + "Conditioning (combine multiple)", + "Conditioning (combine selective)", + "Conditioning Grid (cond)", + "Conditioning Grid (string)", + "Conditioning Grid (string) Advanced", + "Contour To Mask", + "Contours", + "ControlNetHadamard", + "ControlNetHadamard (manual)", + "ConvertImg", + "CopyMakeBorder", + "CreateRequestMetadata", + "DistanceTransform", + "Draw Contour(s)", + "EqualizeHistogram", + "FadeMaskEdges", + "Filter Contour", + "FindComplementaryColor", + "FindThreshold", + "FlatLatentsIntoSingleGrid", + "Framed Mask Grab Cut", + "Framed Mask Grab Cut 2", + "FromListGet1Color", + "FromListGet1Cond", + "FromListGet1Float", + "FromListGet1Image", + "FromListGet1Int", + "FromListGet1Latent", + "FromListGet1Model", + "FromListGet1String", + "FromListGetColors", + "FromListGetConds", + "FromListGetFloats", + "FromListGetImages", + "FromListGetInts", + "FromListGetLatents", + "FromListGetModels", + "FromListGetStrings", + "Get Contour from list", + "Get Models", + "Get Prompt", + "HypernetworkLoader (dirty)", + "ImageBatchToList", + "InRange (hsv)", + "InnerCylinder (remap)", + "Inpaint", + "Input/String to Int Array", + "KMeansColor", + "Load 64 Encoded Image", + "LoraLoader (dirty)", + "MaskGrid N KSamplers Advanced", + "Merge Latent Batch Gridwise", + "MonoMerge", + "MorphologicOperation", + "MorphologicSkeletoning", + "NaiveAutoKMeansColor", + "OtsuThreshold", + "OuterCylinder (remap)", + "RGB to HSV", + "Rect Grab Cut", + "Remap", + "Repeat Into Grid (image)", + "Repeat Into Grid (latent)", + "RequestInputs", + "SampleColorHSV", + "Save Image (api)", + "SeamlessClone", + "SeamlessClone (simple)", + "SetRequestStateToComplete", + "String", + "String to Float", + "String to Integer", + "ToColorList", + "ToCondList", + "ToFloatList", + "ToImageList", + "ToIntList", + "ToLatentList", + "ToModelList", + "ToStringList", + "UnGridify (image)", + "VAEEncodeBatch" + ], + { + "title_aux": "Bmad Nodes" + } + ], + "https://github.com/bradsec/ComfyUI_ResolutionSelector": [ + [ + "ResolutionSelector" + ], + { + "title_aux": "ResolutionSelector for ComfyUI" + } + ], + "https://github.com/braintacles/braintacles-comfyui-nodes": [ + [ + "CLIPTextEncodeSDXL-Multi-IO", + "CLIPTextEncodeSDXL-Pipe", + "Empty Latent Image from Aspect-Ratio", + "Random Find and Replace", + "VAE Decode Pipe", + "VAE Decode Tiled Pipe", + "VAE Encode Pipe", + "VAE Encode Tiled Pipe" + ], + { + "title_aux": "braintacles-nodes" + } + ], + "https://github.com/bronkula/comfyui-fitsize": [ + [ + "FS: Crop Image Into Even Pieces", + "FS: Fit Image And Resize", + "FS: Fit Size From Image", + "FS: Fit Size From Int", + "FS: Image Region To Mask", + "FS: Load Image And Resize To Fit", + "FS: Pick Image From Batch", + "FS: Pick Image From Batches", + "FS: Pick Image From List" + ], + { + "title_aux": "comfyui-fitsize" + } + ], + "https://github.com/budihartono/comfyui_otonx_nodes": [ + [ + "OTX Integer Multiple Inputs 4", + "OTX Integer Multiple Inputs 5", + "OTX Integer Multiple Inputs 6", + "OTX KSampler Feeder", + "OTX Versatile Multiple Inputs 4", + "OTX Versatile Multiple Inputs 5", + "OTX Versatile Multiple Inputs 6" + ], + { + "title_aux": "Otonx's Custom Nodes" + } + ], + "https://github.com/bvhari/ComfyUI_ImageProcessing": [ + [ + "BilateralFilter", + "Brightness", + "Gamma", + "Hue", + "Saturation", + "SigmoidCorrection", + "UnsharpMask" + ], + { + "title_aux": "ImageProcessing" + } + ], + "https://github.com/bvhari/ComfyUI_LatentToRGB": [ + [ + "LatentToRGB" + ], + { + "title_aux": "LatentToRGB" + } + ], + "https://github.com/bvhari/ComfyUI_PerpNeg": [ + [ + "KSamplerAdvancedPerpNeg" + ], + { + "title_aux": "ComfyUI_PerpNeg [WIP]" + } + ], + "https://github.com/bvhari/ComfyUI_PerpWeight": [ + [ + "CLIPTextEncodePerpWeight" + ], + { + "title_aux": "ComfyUI_PerpWeight" + } + ], + "https://github.com/catscandrive/comfyui-imagesubfolders/raw/main/loadImageWithSubfolders.py": [ + [ + "LoadImagewithSubfolders" + ], + { + "title_aux": "Image loader with subfolders" + } + ], + "https://github.com/chflame163/ComfyUI_MSSpeech_TTS": [ + [ + "MicorsoftSpeech_TTS", + "Play Sound" + ], + { + "title_aux": "ComfyUI_MSSpeech_TTS" + } + ], + "https://github.com/chibiace/ComfyUI-Chibi-Nodes": [ + [ + "ConditionText", + "ConditionTextMulti", + "ImageAddText", + "ImageSimpleResize", + "ImageSizeInfo", + "ImageTool", + "Int2String", + "LoadEmbedding", + "LoadImageExtended", + "Loader", + "Prompts", + "SaveImages", + "SeedGenerator", + "SimpleSampler", + "TextSplit", + "Textbox", + "Wildcards" + ], + { + "title_aux": "ComfyUI-Chibi-Nodes" + } + ], + "https://github.com/chrisgoringe/cg-image-picker": [ + [ + "Preview Chooser", + "Preview Chooser Fabric" + ], + { + "author": "chrisgoringe", + "description": "Custom nodes that preview images and pause the workflow to allow the user to select one or more to progress", + "nickname": "Image Chooser", + "title": "Image Chooser", + "title_aux": "Image chooser" + } + ], + "https://github.com/chrisgoringe/cg-noise": [ + [ + "Hijack", + "KSampler Advanced with Variations", + "KSampler with Variations", + "UnHijack" + ], + { + "title_aux": "Variation seeds" + } + ], + "https://github.com/chrisgoringe/cg-use-everywhere": [ + [ + "Seed Everywhere" + ], + { + "nodename_pattern": "(^(Prompts|Anything) Everywhere|Simple String)", + "title_aux": "Use Everywhere (UE Nodes)" + } + ], + "https://github.com/city96/ComfyUI_ColorMod": [ + [ + "ColorModEdges", + "ColorModPivot", + "LoadImageHighPrec", + "PreviewImageHighPrec", + "SaveImageHighPrec" + ], + { + "title_aux": "ComfyUI_ColorMod" + } + ], + "https://github.com/city96/ComfyUI_DiT": [ + [ + "DiTCheckpointLoader", + "DiTCheckpointLoaderSimple", + "DiTLabelCombine", + "DiTLabelSelect", + "DiTSampler" + ], + { + "title_aux": "ComfyUI_DiT [WIP]" + } + ], + "https://github.com/city96/ComfyUI_ExtraModels": [ + [ + "DiTCondLabelEmpty", + "DiTCondLabelSelect", + "DitCheckpointLoader", + "ExtraVAELoader", + "PixArtCheckpointLoader", + "PixArtDPMSampler", + "PixArtResolutionSelect", + "PixArtT5TextEncode", + "T5TextEncode", + "T5v11Loader" + ], + { + "title_aux": "Extra Models for ComfyUI" + } + ], + "https://github.com/city96/ComfyUI_NetDist": [ + [ + "FetchRemote", + "QueueRemote" + ], + { + "title_aux": "ComfyUI_NetDist" + } + ], + "https://github.com/city96/SD-Advanced-Noise": [ + [ + "LatentGaussianNoise", + "MathEncode" + ], + { + "title_aux": "SD-Advanced-Noise" + } + ], + "https://github.com/city96/SD-Latent-Interposer": [ + [ + "LatentInterposer" + ], + { + "title_aux": "Latent-Interposer" + } + ], + "https://github.com/city96/SD-Latent-Upscaler": [ + [ + "LatentUpscaler" + ], + { + "title_aux": "SD-Latent-Upscaler" + } + ], + "https://github.com/civitai/comfy-nodes": [ + [ + "CivitAI_Checkpoint_Loader", + "CivitAI_Lora_Loader" + ], + { + "title_aux": "comfy-nodes" + } + ], + "https://github.com/comfyanonymous/ComfyUI": [ + [ + "BasicScheduler", + "CLIPLoader", + "CLIPMergeSimple", + "CLIPSave", + "CLIPSetLastLayer", + "CLIPTextEncode", + "CLIPTextEncodeSDXL", + "CLIPTextEncodeSDXLRefiner", + "CLIPVisionEncode", + "CLIPVisionLoader", + "Canny", + "CheckpointLoader", + "CheckpointLoaderSimple", + "CheckpointSave", + "ConditioningAverage", + "ConditioningCombine", + "ConditioningConcat", + "ConditioningSetArea", + "ConditioningSetAreaPercentage", + "ConditioningSetMask", + "ConditioningSetTimestepRange", + "ConditioningZeroOut", + "ControlNetApply", + "ControlNetApplyAdvanced", + "ControlNetLoader", + "CropMask", + "DiffControlNetLoader", + "DiffusersLoader", + "DualCLIPLoader", + "EmptyImage", + "EmptyLatentImage", + "ExponentialScheduler", + "FeatherMask", + "FlipSigmas", + "FreeU", + "FreeU_V2", + "GLIGENLoader", + "GLIGENTextBoxApply", + "GrowMask", + "HyperTile", + "HypernetworkLoader", + "ImageBatch", + "ImageBlend", + "ImageBlur", + "ImageColorToMask", + "ImageCompositeMasked", + "ImageCrop", + "ImageInvert", + "ImageOnlyCheckpointLoader", + "ImagePadForOutpaint", + "ImageQuantize", + "ImageScale", + "ImageScaleBy", + "ImageScaleToTotalPixels", + "ImageSharpen", + "ImageToMask", + "ImageUpscaleWithModel", + "InvertMask", + "JoinImageWithAlpha", + "KSampler", + "KSamplerAdvanced", + "KSamplerSelect", + "KarrasScheduler", + "LatentAdd", + "LatentBlend", + "LatentComposite", + "LatentCompositeMasked", + "LatentCrop", + "LatentFlip", + "LatentFromBatch", + "LatentInterpolate", + "LatentMultiply", + "LatentRotate", + "LatentSubtract", + "LatentUpscale", + "LatentUpscaleBy", + "LoadImage", + "LoadImageMask", + "LoadLatent", + "LoraLoader", + "LoraLoaderModelOnly", + "MaskComposite", + "MaskToImage", + "ModelMergeAdd", + "ModelMergeBlocks", + "ModelMergeSimple", + "ModelMergeSubtract", + "ModelSamplingContinuousEDM", + "ModelSamplingDiscrete", + "PatchModelAddDownscale", + "PolyexponentialScheduler", + "PorterDuffImageComposite", + "PreviewImage", + "RebatchLatents", + "RepeatImageBatch", + "RepeatLatentBatch", + "RescaleCFG", + "SDTurboScheduler", + "SVD_img2vid_Conditioning", + "SamplerCustom", + "SamplerDPMPP_2M_SDE", + "SamplerDPMPP_SDE", + "SaveAnimatedPNG", + "SaveAnimatedWEBP", + "SaveImage", + "SaveLatent", + "SetLatentNoiseMask", + "SolidMask", + "SplitImageWithAlpha", + "SplitSigmas", + "StyleModelApply", + "StyleModelLoader", + "TomePatchModel", + "UNETLoader", + "UpscaleModelLoader", + "VAEDecode", + "VAEDecodeTiled", + "VAEEncode", + "VAEEncodeForInpaint", + "VAEEncodeTiled", + "VAELoader", + "VAESave", + "VPScheduler", + "VideoLinearCFGGuidance", + "unCLIPCheckpointLoader", + "unCLIPConditioning" + ], + { + "title_aux": "ComfyUI" + } + ], + "https://github.com/comfyanonymous/ComfyUI_experiments": [ + [ + "ModelMergeBlockNumber", + "ModelMergeSDXL", + "ModelMergeSDXLDetailedTransformers", + "ModelMergeSDXLTransformers", + "ModelSamplerTonemapNoiseTest", + "ReferenceOnlySimple", + "RescaleClassifierFreeGuidanceTest", + "TonemapNoiseWithRescaleCFG" + ], + { + "title_aux": "ComfyUI_experiments" + } + ], + "https://github.com/coreyryanhanson/ComfyQR": [ + [ + "comfy-qr-by-image-size", + "comfy-qr-by-module-size", + "comfy-qr-by-module-split", + "comfy-qr-mask_errors" + ], + { + "title_aux": "ComfyQR" + } + ], + "https://github.com/coreyryanhanson/ComfyQR-scanning-nodes": [ + [ + "comfy-qr-read", + "comfy-qr-validate" + ], + { + "title_aux": "ComfyQR-scanning-nodes" + } + ], + "https://github.com/cubiq/ComfyUI_IPAdapter_plus": [ + [ + "IPAdapterApply", + "IPAdapterApplyEncoded", + "IPAdapterBatchEmbeds", + "IPAdapterEncoder", + "IPAdapterLoadEmbeds", + "IPAdapterModelLoader", + "IPAdapterSaveEmbeds", + "PrepImageForClipVision" + ], + { + "title_aux": "ComfyUI_IPAdapter_plus" + } + ], + "https://github.com/cubiq/ComfyUI_SimpleMath": [ + [ + "SimpleMath", + "SimpleMathDebug" + ], + { + "title_aux": "Simple Math" + } + ], + "https://github.com/cubiq/ComfyUI_essentials": [ + [ + "ConsoleDebug+", + "GetImageSize+", + "ImageCASharpening+", + "ImageCrop+", + "ImageDesaturate+", + "ImageEnhanceDifference+", + "ImageExpandBatch+", + "ImageFlip+", + "ImagePosterize+", + "ImageResize+", + "MaskBatch+", + "MaskBlur+", + "MaskExpandBatch+", + "MaskFlip+", + "MaskPreview+", + "ModelCompile+", + "SimpleMath+", + "TransitionMask+" + ], + { + "title_aux": "ComfyUI Essentials" + } + ], + "https://github.com/dagthomas/comfyui_dagthomas": [ + [ + "CSL", + "CSVPromptGenerator", + "PromptGenerator" + ], + { + "title_aux": "SDXL Auto Prompter" + } + ], + "https://github.com/dawangraoming/ComfyUI_ksampler_gpu/raw/main/ksampler_gpu.py": [ + [ + "KSamplerAdvancedGPU", + "KSamplerGPU" + ], + { + "title_aux": "KSampler GPU" + } + ], + "https://github.com/daxthin/DZ-FaceDetailer": [ + [ + "DZ_Face_Detailer" + ], + { + "title_aux": "DZ-FaceDetailer" + } + ], + "https://github.com/dimtoneff/ComfyUI-PixelArt-Detector": [ + [ + "PixelArtAddDitherPattern", + "PixelArtDetectorConverter", + "PixelArtDetectorSave", + "PixelArtDetectorToImage", + "PixelArtLoadPalettes" + ], + { + "title_aux": "ComfyUI PixelArt Detector" + } + ], + "https://github.com/diontimmer/ComfyUI-Vextra-Nodes": [ + [ + "Add Text To Image", + "Apply Instagram Filter", + "Create Solid Color", + "Flatten Colors", + "Generate Noise Image", + "GlitchThis Effect", + "Hue Rotation", + "Load Picture Index", + "Pixel Sort", + "Play Sound At Execution", + "Prettify Prompt Using distilgpt2", + "Swap Color Mode" + ], + { + "title_aux": "ComfyUI-Vextra-Nodes" + } + ], + "https://github.com/drago87/ComfyUI_Dragos_Nodes": [ + [ + "file_padding", + "image_info", + "lora_loader", + "vae_loader" + ], + { + "title_aux": "ComfyUI_Dragos_Nodes" + } + ], + "https://github.com/drustan-hawk/primitive-types": [ + [ + "float", + "int", + "string", + "string_multiline" + ], + { + "title_aux": "primitive-types" + } + ], + "https://github.com/ealkanat/comfyui_easy_padding": [ + [ + "comfyui-easy-padding" + ], + { + "title_aux": "ComfyUI Easy Padding" + } + ], + "https://github.com/evanspearman/ComfyMath": [ + [ + "CM_BoolBinaryOperation", + "CM_BoolToInt", + "CM_BoolUnaryOperation", + "CM_BreakoutVec2", + "CM_BreakoutVec3", + "CM_BreakoutVec4", + "CM_ComposeVec2", + "CM_ComposeVec3", + "CM_ComposeVec4", + "CM_FloatBinaryCondition", + "CM_FloatBinaryOperation", + "CM_FloatToInt", + "CM_FloatToNumber", + "CM_FloatUnaryCondition", + "CM_FloatUnaryOperation", + "CM_IntBinaryCondition", + "CM_IntBinaryOperation", + "CM_IntToBool", + "CM_IntToFloat", + "CM_IntToNumber", + "CM_IntUnaryCondition", + "CM_IntUnaryOperation", + "CM_NearestSDXLResolution", + "CM_NumberBinaryCondition", + "CM_NumberBinaryOperation", + "CM_NumberToFloat", + "CM_NumberToInt", + "CM_NumberUnaryCondition", + "CM_NumberUnaryOperation", + "CM_SDXLResolution", + "CM_Vec2BinaryCondition", + "CM_Vec2BinaryOperation", + "CM_Vec2ScalarOperation", + "CM_Vec2ToScalarBinaryOperation", + "CM_Vec2ToScalarUnaryOperation", + "CM_Vec2UnaryCondition", + "CM_Vec2UnaryOperation", + "CM_Vec3BinaryCondition", + "CM_Vec3BinaryOperation", + "CM_Vec3ScalarOperation", + "CM_Vec3ToScalarBinaryOperation", + "CM_Vec3ToScalarUnaryOperation", + "CM_Vec3UnaryCondition", + "CM_Vec3UnaryOperation", + "CM_Vec4BinaryCondition", + "CM_Vec4BinaryOperation", + "CM_Vec4ScalarOperation", + "CM_Vec4ToScalarBinaryOperation", + "CM_Vec4ToScalarUnaryOperation", + "CM_Vec4UnaryCondition", + "CM_Vec4UnaryOperation" + ], + { + "title_aux": "ComfyMath" + } + ], + "https://github.com/fearnworks/ComfyUI_FearnworksNodes/raw/main/fw_nodes.py": [ + [ + "Count Files in Directory (FW)", + "Count Tokens (FW)", + "Token Count Ranker(FW)", + "Trim To Tokens (FW)" + ], + { + "title_aux": "Fearnworks Custom Nodes" + } + ], + "https://github.com/fexli/fexli-util-node-comfyui": [ + [ + "FEColor2Image", + "FEColorOut", + "FEImagePadForOutpaint", + "FERandomizedColor2Image" + ], + { + "title_aux": "fexli-util-node-comfyui" + } + ], + "https://github.com/filipemeneses/comfy_pixelization": [ + [ + "Pixelization" + ], + { + "title_aux": "Pixelization" + } + ], + "https://github.com/filliptm/ComfyUI_Fill-Nodes": [ + [ + "FL_ImageRandomizer" + ], + { + "title_aux": "ComfyUI_Fill-Nodes" + } + ], + "https://github.com/fitCorder/fcSuite/raw/main/fcSuite.py": [ + [ + "fcFloat", + "fcFloatMatic", + "fcInteger" + ], + { + "title_aux": "fcSuite" + } + ], + "https://github.com/flyingshutter/As_ComfyUI_CustomNodes": [ + [ + "BatchIndex_AS", + "CropImage_AS", + "ImageMixMasked_As", + "ImageToMask_AS", + "Increment_AS", + "Int2Any_AS", + "LatentAdd_AS", + "LatentMixMasked_As", + "LatentMix_AS", + "LatentToImages_AS", + "LoadLatent_AS", + "MapRange_AS", + "MaskToImage_AS", + "Math_AS", + "NoiseImage_AS", + "Number2Float_AS", + "Number2Int_AS", + "Number_AS", + "SaveLatent_AS", + "TextToImage_AS", + "TextWildcardList_AS" + ], + { + "title_aux": "As_ComfyUI_CustomNodes" + } + ], + "https://github.com/gemell1/ComfyUI_GMIC": [ + [ + "GmicCliWrapper" + ], + { + "title_aux": "ComfyUI_GMIC" + } + ], + "https://github.com/giriss/comfy-image-saver": [ + [ + "Cfg Literal", + "Checkpoint Selector", + "Int Literal", + "Sampler Selector", + "Save Image w/Metadata", + "Scheduler Selector", + "Seed Generator", + "String Literal", + "Width/Height Literal" + ], + { + "title_aux": "Save Image with Generation Metadata" + } + ], + "https://github.com/guoyk93/yk-node-suite-comfyui": [ + [ + "YKImagePadForOutpaint", + "YKMaskToImage" + ], + { + "title_aux": "y.k.'s ComfyUI node suite" + } + ], + "https://github.com/hhhzzyang/Comfyui_Lama": [ + [ + "LamaApply", + "LamaModelLoader", + "YamlConfigLoader" + ], + { + "title_aux": "Comfyui-Lama" + } + ], + "https://github.com/hnmr293/ComfyUI-nodes-hnmr": [ + [ + "CLIPIter", + "Dict2Model", + "GridImage", + "ImageBlend2", + "KSamplerOverrided", + "KSamplerSetting", + "KSamplerXYZ", + "LatentToHist", + "LatentToImage", + "ModelIter", + "RandomLatentImage", + "SaveStateDict", + "SaveText", + "StateDictLoader", + "StateDictMerger", + "StateDictMergerBlockWeighted", + "StateDictMergerBlockWeightedMulti", + "VAEDecodeBatched", + "VAEEncodeBatched", + "VAEIter" + ], + { + "title_aux": "ComfyUI-nodes-hnmr" + } + ], + "https://github.com/hustille/ComfyUI_Fooocus_KSampler": [ + [ + "KSampler With Refiner (Fooocus)" + ], + { + "title_aux": "ComfyUI_Fooocus_KSampler" + } + ], + "https://github.com/hustille/ComfyUI_hus_utils": [ + [ + "3way Prompt Styler", + "Batch State", + "Date Time Format", + "Debug Extra", + "Fetch widget value", + "Text Hash" + ], + { + "title_aux": "hus' utils for ComfyUI" + } + ], + "https://github.com/hylarucoder/ComfyUI-Eagle-PNGInfo": [ + [ + "EagleImageNode", + "SDXLPromptStyler", + "SDXLPromptStylerAdvanced", + "SDXLResolutionPresets" + ], + { + "title_aux": "Eagle PNGInfo" + } + ], + "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words": [ + [ + "FusionText", + "LoraLoaderAdvanced", + "LoraLoaderStackedAdvanced", + "LoraLoaderStackedVanilla", + "LoraLoaderVanilla", + "Randomizer", + "TagsFormater", + "TagsSelector", + "TextInputBasic" + ], + { + "title_aux": "ComfyUI-Lora-Auto-Trigger-Words" + } + ], + "https://github.com/imb101/ComfyUI-FaceSwap": [ + [ + "FaceSwapNode" + ], + { + "title_aux": "FaceSwap" + } + ], + "https://github.com/jags111/ComfyUI_Jags_Audiotools": [ + [ + "BatchJoinAudio", + "BatchToList", + "BitCrushAudioFX", + "BulkVariation", + "ChorusAudioFX", + "ClippingAudioFX", + "CompressorAudioFX", + "ConcatAudioList", + "ConvolutionAudioFX", + "CutAudio", + "DelayAudioFX", + "DistortionAudioFX", + "DuplicateAudio", + "GainAudioFX", + "GenerateAudioSample", + "GenerateAudioWave", + "GetAudioFromFolderIndex", + "GetSingle", + "GetStringByIndex", + "HighShelfFilter", + "HighpassFilter", + "ImageToSpectral", + "InvertAudioFX", + "JoinAudio", + "LadderFilter", + "LimiterAudioFX", + "ListToBatch", + "LoadAudioDir", + "LoadAudioFile", + "LoadAudioModel (DD)", + "LoadVST3", + "LowShelfFilter", + "LowpassFilter", + "MP3CompressorAudioFX", + "MixAudioTensors", + "NoiseGateAudioFX", + "OTTAudioFX", + "PeakFilter", + "PhaserEffectAudioFX", + "PitchShiftAudioFX", + "PlotSpectrogram", + "PreviewAudioFile", + "PreviewAudioTensor", + "ResampleAudio", + "ReverbAudioFX", + "ReverseAudio", + "SaveAudioTensor", + "SequenceVariation", + "SliceAudio", + "StretchAudio" + ], + { + "author": "jags111", + "description": "This extension offers various audio generation tools", + "nickname": "Audiotools", + "title": "Jags_Audiotools", + "title_aux": "ComfyUI_Jags_Audiotools" + } + ], + "https://github.com/jags111/ComfyUI_Jags_VectorMagic": [ + [ + "CircularVAEDecode", + "SVG", + "YoloSEGdetectionNode", + "YoloSegNode", + "color_drop", + "my unique name", + "xy_Tiling_KSampler" + ], + { + "author": "jags111", + "description": "This extension offers various vector manipulation and generation tools", + "nickname": "Jags_VectorMagic", + "title": "Jags_VectorMagic", + "title_aux": "ComfyUI_Jags_VectorMagic" + } + ], + "https://github.com/jags111/efficiency-nodes-comfyui": [ + [ + "AnimateDiff Script", + "Apply ControlNet Stack", + "Control Net Stacker", + "Eff. Loader SDXL", + "Efficient Loader", + "HighRes-Fix Script", + "Image Overlay", + "Join XY Inputs of Same Type", + "KSampler (Efficient)", + "KSampler Adv. (Efficient)", + "KSampler SDXL (Eff.)", + "LatentUpscaler", + "LoRA Stacker", + "Manual XY Entry Info", + "NNLatentUpscale", + "Noise Control Script", + "Pack SDXL Tuple", + "Tiled Upscaler Script", + "Unpack SDXL Tuple", + "XY Input: Add/Return Noise", + "XY Input: Aesthetic Score", + "XY Input: CFG Scale", + "XY Input: Checkpoint", + "XY Input: Clip Skip", + "XY Input: Control Net", + "XY Input: Control Net Plot", + "XY Input: Denoise", + "XY Input: LoRA", + "XY Input: LoRA Plot", + "XY Input: LoRA Stacks", + "XY Input: Manual XY Entry", + "XY Input: Prompt S/R", + "XY Input: Refiner On/Off", + "XY Input: Sampler/Scheduler", + "XY Input: Seeds++ Batch", + "XY Input: Steps", + "XY Input: VAE", + "XY Plot" + ], + { + "title_aux": "Efficiency Nodes for ComfyUI Version 2.0+" + } + ], + "https://github.com/jamesWalker55/comfyui-various": [ + [], + { + "nodename_pattern": "^JW", + "title_aux": "Various ComfyUI Nodes by Type" + } + ], + "https://github.com/jjkramhoeft/ComfyUI-Jjk-Nodes": [ + [ + "JjkConcat", + "JjkShowText", + "JjkText", + "SDXLRecommendedImageSize" + ], + { + "title_aux": "ComfyUI-Jjk-Nodes" + } + ], + "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative": [ + [ + "LCMScheduler", + "SamplerLCMAlternative", + "SamplerLCMCycle" + ], + { + "title_aux": "ComfyUI-sampler-lcm-alternative" + } + ], + "https://github.com/jtrue/ComfyUI-JaRue": [ + [ + "ConcatStringWithDelimiter_jru", + "ConcatString_jru", + "Float2Int_jru", + "Float2String_jru", + "ImageSizer_jru", + "Int2FloatMultiply_jru", + "Int2String_jru", + "String2Int_jru", + "YouTube2Prompt_jru" + ], + { + "title_aux": "ComfyUI-JaRue" + } + ], + "https://github.com/ka-puna/comfyui-yanc": [ + [ + "YANC.ConcatStrings", + "YANC.FormatDatetimeString", + "YANC.GetWidgetValueString", + "YANC.IntegerCaster", + "YANC.MultilineString", + "YANC.TruncateString" + ], + { + "title_aux": "comfyui-yanc" + } + ], + "https://github.com/kenjiqq/qq-nodes-comfyui": [ + [ + "Any List", + "Axis To Float", + "Axis To Int", + "Axis To Model", + "Axis To String", + "Image Accumulator End", + "Image Accumulator Start", + "Load Lines From Text File", + "Slice List", + "XY Grid Helper" + ], + { + "title_aux": "qq-nodes-comfyui" + } + ], + "https://github.com/kijai/ComfyUI-KJNodes": [ + [ + "AddLabel", + "BatchCLIPSeg", + "BatchCropFromMask", + "BatchCropFromMaskAdvanced", + "BatchUncrop", + "BatchUncropAdvanced", + "BboxToInt", + "ColorMatch", + "ColorToMask", + "ConditioningMultiCombine", + "ConditioningSetMaskAndCombine", + "ConditioningSetMaskAndCombine3", + "ConditioningSetMaskAndCombine4", + "ConditioningSetMaskAndCombine5", + "CreateAudioMask", + "CreateFadeMask", + "CreateFadeMaskAdvanced", + "CreateFluidMask", + "CreateGradientMask", + "CreateMagicMask", + "CreateShapeMask", + "CreateTextMask", + "CreateVoronoiMask", + "CrossFadeImages", + "DummyLatentOut", + "EmptyLatentImagePresets", + "FlipSigmasAdjusted", + "FloatConstant", + "GetImageRangeFromBatch", + "GrowMaskWithBlur", + "INTConstant", + "ImageBatchTestPattern", + "ImageConcanate", + "ImageGrabPIL", + "ImageGridComposite2x2", + "ImageGridComposite3x3", + "InjectNoiseToLatent", + "NormalizeLatent", + "OffsetMask", + "ReplaceImagesInBatch", + "ResizeMask", + "ReverseImageBatch", + "RoundMask", + "SaveImageWithAlpha", + "SomethingToString", + "SplitBboxes", + "VRAM_Debug", + "WidgetToString" + ], + { + "title_aux": "KJNodes for ComfyUI" + } + ], + "https://github.com/kijai/ComfyUI-SVD": [ + [ + "SVDimg2vid" + ], + { + "title_aux": "ComfyUI-SVD" + } + ], + "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink": [ + [ + "GradientPatchModelAddDownscale", + "GradientPatchModelAddDownscaleAdvanced" + ], + { + "title_aux": "ComfyUI_GradientDeepShrink" + } + ], + "https://github.com/kinfolk0117/ComfyUI_SimpleTiles": [ + [ + "TileCalc", + "TileMerge", + "TileSplit" + ], + { + "title_aux": "SimpleTiles" + } + ], + "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter": [ + [ + "TiledIPAdapter" + ], + { + "title_aux": "TiledIPAdapter" + } + ], + "https://github.com/knuknX/ComfyUI-Image-Tools": [ + [ + "ImageBatchSqueezeProcessor", + "ImageBgRemoveProcessor", + "ImageStandardResizeProcessor", + "SingleImagePathLoader", + "SingleImageUrlLoader" + ], + { + "title_aux": "ComfyUI-Image-Tools" + } + ], + "https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI": [ + [ + "LLLiteLoader" + ], + { + "title_aux": "ControlNet-LLLite-ComfyUI" + } + ], + "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes": [ + [ + "S3 Bucket LoRA", + "S3Bucket_Load_LoRA", + "XL DreamBooth LoRA", + "XLDB_LoRA" + ], + { + "title_aux": "ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes" + } + ], + "https://github.com/kwaroran/abg-comfyui": [ + [ + "Remove Image Background (abg)" + ], + { + "title_aux": "abg-comfyui" + } + ], + "https://github.com/laksjdjf/IPAdapter-ComfyUI": [ + [ + "IPAdapter", + "ImageCrop" + ], + { + "title_aux": "IPAdapter-ComfyUI" + } + ], + "https://github.com/laksjdjf/LCMSampler-ComfyUI": [ + [ + "SamplerLCM", + "TAESDLoader" + ], + { + "title_aux": "LCMSampler-ComfyUI" + } + ], + "https://github.com/laksjdjf/LoRA-Merger-ComfyUI": [ + [ + "LoraLoaderFromWeight", + "LoraLoaderWeightOnly", + "LoraMerge", + "LoraSave" + ], + { + "title_aux": "LoRA-Merger-ComfyUI" + } + ], + "https://github.com/laksjdjf/attention-couple-ComfyUI": [ + [ + "Attention couple" + ], + { + "title_aux": "attention-couple-ComfyUI" + } + ], + "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI": [ + [ + "CDTuner", + "Negapip", + "Negpip" + ], + { + "title_aux": "cd-tuner_negpip-ComfyUI" + } + ], + "https://github.com/laksjdjf/pfg-ComfyUI": [ + [ + "PFG" + ], + { + "title_aux": "pfg-ComfyUI" + } + ], + "https://github.com/lilly1987/ComfyUI_node_Lilly": [ + [ + "CheckpointLoaderSimpleText", + "LoraLoaderText", + "LoraLoaderTextRandom", + "Random_Sampler", + "VAELoaderDecode" + ], + { + "title_aux": "simple wildcard for ComfyUI" + } + ], + "https://github.com/lordgasmic/ComfyUI-Wildcards/raw/master/wildcards.py": [ + [ + "CLIPTextEncodeWithWildcards" + ], + { + "title_aux": "Wildcards" + } + ], + "https://github.com/lrzjason/ComfyUIJasonNode/raw/main/SDXLMixSampler.py": [ + [ + "SDXLMixSampler" + ], + { + "title_aux": "ComfyUIJasonNode" + } + ], + "https://github.com/ltdrdata/ComfyUI-Impact-Pack": [ + [ + "AddMask", + "BasicPipeToDetailerPipe", + "BasicPipeToDetailerPipeSDXL", + "BboxDetectorCombined", + "BboxDetectorCombined_v2", + "BboxDetectorForEach", + "BboxDetectorSEGS", + "BitwiseAndMask", + "BitwiseAndMaskForEach", + "CLIPSegDetectorProvider", + "CfgScheduleHookProvider", + "CombineRegionalPrompts", + "CoreMLDetailerHookProvider", + "DenoiseScheduleHookProvider", + "DetailerForEach", + "DetailerForEachDebug", + "DetailerForEachDebugPipe", + "DetailerForEachPipe", + "DetailerPipeToBasicPipe", + "EditBasicPipe", + "EditDetailerPipe", + "EditDetailerPipeSDXL", + "EmptySegs", + "FaceDetailer", + "FaceDetailerPipe", + "FromBasicPipe", + "FromBasicPipe_v2", + "FromDetailerPipe", + "FromDetailerPipeSDXL", + "FromDetailerPipe_v2", + "ImageListToImageBatch", + "ImageMaskSwitch", + "ImageReceiver", + "ImageSender", + "ImpactAssembleSEGS", + "ImpactCombineConditionings", + "ImpactCompare", + "ImpactConditionalBranch", + "ImpactConditionalStopIteration", + "ImpactControlBridge", + "ImpactControlNetApplySEGS", + "ImpactDecomposeSEGS", + "ImpactDilateMask", + "ImpactDilate_Mask_SEG_ELT", + "ImpactDummyInput", + "ImpactEdit_SEG_ELT", + "ImpactFloat", + "ImpactFrom_SEG_ELT", + "ImpactHFTransformersClassifierProvider", + "ImpactImageBatchToImageList", + "ImpactImageInfo", + "ImpactInt", + "ImpactInversedSwitch", + "ImpactIsNotEmptySEGS", + "ImpactKSamplerAdvancedBasicPipe", + "ImpactKSamplerBasicPipe", + "ImpactLogger", + "ImpactMakeImageBatch", + "ImpactMakeImageList", + "ImpactMinMax", + "ImpactNeg", + "ImpactNodeSetMuteState", + "ImpactQueueTrigger", + "ImpactQueueTriggerCountdown", + "ImpactSEGSClassify", + "ImpactSEGSConcat", + "ImpactSEGSLabelFilter", + "ImpactSEGSOrderedFilter", + "ImpactSEGSPicker", + "ImpactSEGSRangeFilter", + "ImpactSEGSToMaskBatch", + "ImpactSEGSToMaskList", + "ImpactScaleBy_BBOX_SEG_ELT", + "ImpactSegsAndMask", + "ImpactSegsAndMaskForEach", + "ImpactSetWidgetValue", + "ImpactSimpleDetectorSEGS", + "ImpactSimpleDetectorSEGSPipe", + "ImpactSimpleDetectorSEGS_for_AD", + "ImpactSleep", + "ImpactStringSelector", + "ImpactSwitch", + "ImpactValueReceiver", + "ImpactValueSender", + "ImpactWildcardEncode", + "ImpactWildcardProcessor", + "IterativeImageUpscale", + "IterativeLatentUpscale", + "KSamplerAdvancedProvider", + "KSamplerProvider", + "LatentPixelScale", + "LatentReceiver", + "LatentSender", + "LatentSwitch", + "MMDetDetectorProvider", + "MMDetLoader", + "MaskDetailerPipe", + "MaskListToMaskBatch", + "MaskPainter", + "MaskToSEGS", + "MaskToSEGS_for_AnimateDiff", + "MasksToMaskList", + "MediaPipeFaceMeshToSEGS", + "NoiseInjectionDetailerHookProvider", + "NoiseInjectionHookProvider", + "ONNXDetectorProvider", + "ONNXDetectorSEGS", + "PixelKSampleHookCombine", + "PixelKSampleUpscalerProvider", + "PixelKSampleUpscalerProviderPipe", + "PixelTiledKSampleUpscalerProvider", + "PixelTiledKSampleUpscalerProviderPipe", + "PreviewBridge", + "ReencodeLatent", + "ReencodeLatentPipe", + "RegionalPrompt", + "RegionalSampler", + "RegionalSamplerAdvanced", + "RemoveNoiseMask", + "SAMDetectorCombined", + "SAMDetectorSegmented", + "SAMLoader", + "SEGSDetailer", + "SEGSDetailerForAnimateDiff", + "SEGSPaste", + "SEGSPreview", + "SEGSSwitch", + "SEGSToImageList", + "SegmDetectorCombined", + "SegmDetectorCombined_v2", + "SegmDetectorForEach", + "SegmDetectorSEGS", + "Segs Mask", + "Segs Mask ForEach", + "SegsMaskCombine", + "SegsToCombinedMask", + "SubtractMask", + "SubtractMaskForEach", + "TiledKSamplerProvider", + "ToBasicPipe", + "ToBinaryMask", + "ToDetailerPipe", + "ToDetailerPipeSDXL", + "TwoAdvancedSamplersForMask", + "TwoSamplersForMask", + "TwoSamplersForMaskUpscalerProvider", + "TwoSamplersForMaskUpscalerProviderPipe", + "UltralyticsDetectorProvider" + ], + { + "author": "Dr.Lt.Data", + "description": "This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler.", + "nickname": "Impact Pack", + "title": "Impact Pack", + "title_aux": "ComfyUI Impact Pack" + } + ], + "https://github.com/ltdrdata/ComfyUI-Inspire-Pack": [ + [ + "AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire", + "ApplyRegionalIPAdapters //Inspire", + "BindImageListPromptList //Inspire", + "CacheBackendData //Inspire", + "CacheBackendDataList //Inspire", + "CacheBackendDataNumberKey //Inspire", + "CacheBackendDataNumberKeyList //Inspire", + "Canny_Preprocessor_Provider_for_SEGS //Inspire", + "ChangeImageBatchSize //Inspire", + "Color_Preprocessor_Provider_for_SEGS //Inspire", + "DWPreprocessor_Provider_for_SEGS //Inspire", + "FakeScribblePreprocessor_Provider_for_SEGS //Inspire", + "FloatRange //Inspire", + "FromIPAdapterPipe //Inspire", + "GlobalSeed //Inspire", + "HEDPreprocessor_Provider_for_SEGS //Inspire", + "InpaintPreprocessor_Provider_for_SEGS //Inspire", + "KSampler //Inspire", + "KSamplerAdvanced //Inspire", + "KSamplerAdvancedProgress //Inspire", + "KSamplerProgress //Inspire", + "LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire", + "LineArt_Preprocessor_Provider_for_SEGS //Inspire", + "ListCounter //Inspire", + "LoadImage //Inspire", + "LoadImageListFromDir //Inspire", + "LoadImagesFromDir //Inspire", + "LoadPromptsFromDir //Inspire", + "LoadPromptsFromFile //Inspire", + "LoraBlockInfo //Inspire", + "LoraLoaderBlockWeight //Inspire", + "Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire", + "MediaPipeFaceMeshDetectorProvider //Inspire", + "MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire", + "MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire", + "OpenPose_Preprocessor_Provider_for_SEGS //Inspire", + "PromptBuilder //Inspire", + "PromptExtractor //Inspire", + "RegionalConditioningColorMask //Inspire", + "RegionalConditioningSimple //Inspire", + "RegionalIPAdapterColorMask //Inspire", + "RegionalIPAdapterEncodedColorMask //Inspire", + "RegionalIPAdapterEncodedMask //Inspire", + "RegionalIPAdapterMask //Inspire", + "RegionalPromptColorMask //Inspire", + "RegionalPromptSimple //Inspire", + "RegionalSeedExplorerColorMask //Inspire", + "RegionalSeedExplorerMask //Inspire", + "RemoveBackendData //Inspire", + "RemoveBackendDataNumberKey //Inspire", + "RetrieveBackendData //Inspire", + "RetrieveBackendDataNumberKey //Inspire", + "SeedExplorer //Inspire", + "ShowCachedInfo //Inspire", + "TilePreprocessor_Provider_for_SEGS //Inspire", + "ToIPAdapterPipe //Inspire", + "UnzipPrompt //Inspire", + "WildcardEncode //Inspire", + "XY Input: Lora Block Weight //Inspire", + "ZipPrompt //Inspire", + "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire" + ], + { + "author": "Dr.Lt.Data", + "description": "This extension provides various nodes to support Lora Block Weight and the Impact Pack.", + "nickname": "Inspire Pack", + "nodename_pattern": "Inspire$", + "title": "Inspire Pack", + "title_aux": "ComfyUI Inspire Pack" + } + ], + "https://github.com/m-sokes/ComfyUI-Sokes-Nodes": [ + [ + "Custom Date Format | sokes \ud83e\uddac", + "Latent Switch x9 | sokes \ud83e\uddac" + ], + { + "title_aux": "ComfyUI Sokes Nodes" + } + ], + "https://github.com/m957ymj75urz/ComfyUI-Custom-Nodes/raw/main/clip-text-encode-split/clip_text_encode_split.py": [ + [ + "RawText", + "RawTextCombine", + "RawTextEncode", + "RawTextReplace" + ], + { + "title_aux": "m957ymj75urz/ComfyUI-Custom-Nodes" + } + ], + "https://github.com/marhensa/sdxl-recommended-res-calc": [ + [ + "RecommendedResCalc" + ], + { + "title_aux": "Recommended Resolution Calculator" + } + ], + "https://github.com/martijnat/comfyui-previewlatent": [ + [ + "PreviewLatent", + "PreviewLatentAdvanced" + ], + { + "title_aux": "comfyui-previewlatent" + } + ], + "https://github.com/matan1905/ComfyUI-Serving-Toolkit": [ + [ + "DiscordServing", + "ServingInputNumber", + "ServingInputText", + "ServingOutput", + "WebSocketServing" + ], + { + "title_aux": "ComfyUI Serving toolkit" + } + ], + "https://github.com/mav-rik/facerestore_cf": [ + [ + "CropFace", + "FaceRestoreCFWithModel", + "FaceRestoreModelLoader" + ], + { + "title_aux": "Facerestore CF (Code Former)" + } + ], + "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding": [ + [ + "DynamicThresholdingFull", + "DynamicThresholdingSimple" + ], + { + "title_aux": "Stable Diffusion Dynamic Thresholding (CFG Scale Fix)" + } + ], + "https://github.com/meap158/ComfyUI-GPU-temperature-protection": [ + [ + "GPUTemperatureProtection" + ], + { + "title_aux": "GPU temperature protection" + } + ], + "https://github.com/meap158/ComfyUI-Prompt-Expansion": [ + [ + "PromptExpansion" + ], + { + "title_aux": "ComfyUI-Prompt-Expansion" + } + ], + "https://github.com/melMass/comfy_mtb": [ + [ + "Animation Builder (mtb)", + "Any To String (mtb)", + "Batch Float (mtb)", + "Batch Float Assemble (mtb)", + "Batch Float Fill (mtb)", + "Batch Make (mtb)", + "Batch Merge (mtb)", + "Batch Shake (mtb)", + "Batch Shape (mtb)", + "Batch Transform (mtb)", + "Bbox (mtb)", + "Bbox From Mask (mtb)", + "Blur (mtb)", + "Color Correct (mtb)", + "Colored Image (mtb)", + "Concat Images (mtb)", + "Crop (mtb)", + "Debug (mtb)", + "Deep Bump (mtb)", + "Export With Ffmpeg (mtb)", + "Face Swap (mtb)", + "Film Interpolation (mtb)", + "Fit Number (mtb)", + "Float To Number (mtb)", + "Get Batch From History (mtb)", + "Image Compare (mtb)", + "Image Premultiply (mtb)", + "Image Remove Background Rembg (mtb)", + "Image Resize Factor (mtb)", + "Image Tile Offset (mtb)", + "Int To Bool (mtb)", + "Int To Number (mtb)", + "Interpolate Clip Sequential (mtb)", + "Latent Lerp (mtb)", + "Load Face Analysis Model (mtb)", + "Load Face Enhance Model (mtb)", + "Load Face Swap Model (mtb)", + "Load Film Model (mtb)", + "Load Image From Url (mtb)", + "Load Image Sequence (mtb)", + "Mask To Image (mtb)", + "Math Expression (mtb)", + "Model Patch Seamless (mtb)", + "Pick From Batch (mtb)", + "Qr Code (mtb)", + "Restore Face (mtb)", + "Save Gif (mtb)", + "Save Image Grid (mtb)", + "Save Image Sequence (mtb)", + "Save Tensors (mtb)", + "Sharpen (mtb)", + "Smart Step (mtb)", + "Stack Images (mtb)", + "String Replace (mtb)", + "Styles Loader (mtb)", + "Text To Image (mtb)", + "Transform Image (mtb)", + "Uncrop (mtb)", + "Unsplash Image (mtb)", + "Vae Decode (mtb)" + ], + { + "nodename_pattern": "\\(mtb\\)$", + "title_aux": "MTB Nodes" + } + ], + "https://github.com/mihaiiancu/ComfyUI_Inpaint": [ + [ + "InpaintMediapipe" + ], + { + "title_aux": "mihaiiancu/Inpaint" + } + ], + "https://github.com/mikkel/ComfyUI-text-overlay": [ + [ + "Image Text Overlay" + ], + { + "title_aux": "ComfyUI - Text Overlay Plugin" + } + ], + "https://github.com/mikkel/comfyui-mask-boundingbox": [ + [ + "Mask Bounding Box" + ], + { + "title_aux": "ComfyUI - Mask Bounding Box" + } + ], + "https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor": [ + [ + "LaMaPreprocessor", + "lamaPreprocessor" + ], + { + "title_aux": "LaMa Preprocessor [WIP]" + } + ], + "https://github.com/mpiquero7164/ComfyUI-SaveImgPrompt": [ + [ + "Save IMG Prompt" + ], + { + "title_aux": "SaveImgPrompt" + } + ], + "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL": [ + [ + "FastLatentToImage" + ], + { + "title_aux": "ComfyUI_FastVAEDecorder_SDXL" + } + ], + "https://github.com/natto-maki/ComfyUI-NegiTools": [ + [ + "NegiTools_CompositeImages", + "NegiTools_ImageProperties", + "NegiTools_LatentProperties", + "NegiTools_NoiseImageGenerator", + "NegiTools_OpenAiDalle3", + "NegiTools_OpenAiTranslate", + "NegiTools_SeedGenerator", + "NegiTools_StringFunction" + ], + { + "title_aux": "ComfyUI-NegiTools" + } + ], + "https://github.com/nicolai256/comfyUI_Nodes_nicolai256/raw/main/yugioh-presets.py": [ + [ + "yugioh_Presets" + ], + { + "title_aux": "comfyUI_Nodes_nicolai256" + } + ], + "https://github.com/ningxiaoxiao/comfyui-NDI": [ + [ + "NDI_LoadImage", + "NDI_SendImage" + ], + { + "title_aux": "comfyui-NDI" + } + ], + "https://github.com/noembryo/ComfyUI-noEmbryo": [ + [ + "PromptTermList1", + "PromptTermList2", + "PromptTermList3", + "PromptTermList4", + "PromptTermList5", + "PromptTermList6" + ], + { + "author": "noEmbryo", + "description": "Some useful nodes for ComfyUI", + "nickname": "noEmbryo", + "title": "noEmbryo nodes for ComfyUI", + "title_aux": "noEmbryo nodes" + } + ], + "https://github.com/noxinias/ComfyUI_NoxinNodes": [ + [ + "NoxinChime", + "NoxinPromptLoad", + "NoxinPromptSave", + "NoxinScaledResolution", + "NoxinSimpleMath", + "NoxinSplitPrompt" + ], + { + "title_aux": "ComfyUI_NoxinNodes" + } + ], + "https://github.com/ntdviet/comfyui-ext/raw/main/custom_nodes/gcLatentTunnel/gcLatentTunnel.py": [ + [ + "gcLatentTunnel" + ], + { + "title_aux": "ntdviet/comfyui-ext" + } + ], + "https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92": [ + [ + "CLIPStringEncode _O", + "Chat completion _O", + "ChatGPT Simple _O", + "ChatGPT _O", + "ChatGPT compact _O", + "Chat_Completion _O", + "Chat_Message _O", + "Chat_Message_fromString _O", + "Concat Text _O", + "ConcatRandomNSP_O", + "Debug String _O", + "Debug Text _O", + "Debug Text route _O", + "Edit_image _O", + "Equation1param _O", + "Equation2params _O", + "GetImage_(Width&Height) _O", + "GetLatent_(Width&Height) _O", + "ImageScaleFactor _O", + "ImageScaleFactorSimple _O", + "LatentUpscaleFactor _O", + "LatentUpscaleFactorSimple _O", + "LatentUpscaleMultiply", + "Note _O", + "RandomNSP _O", + "Replace Text _O", + "String _O", + "Text _O", + "Text2Image _O", + "Trim Text _O", + "VAEDecodeParallel _O", + "combine_chat_messages _O", + "compine_chat_messages _O", + "concat Strings _O", + "create image _O", + "create_image _O", + "debug Completeion _O", + "debug messages_O", + "float _O", + "floatToInt _O", + "floatToText _O", + "int _O", + "intToFloat _O", + "load_openAI _O", + "replace String _O", + "replace String advanced _O", + "saveTextToFile _O", + "seed _O", + "selectLatentFromBatch _O", + "string2Image _O", + "trim String _O", + "variation_image _O" + ], + { + "title_aux": "Quality of life Suit:V2" + } + ], + "https://github.com/ostris/ostris_nodes_comfyui": [ + [ + "LLM Pipe Loader - Ostris", + "LLM Prompt Upsampling - Ostris", + "One Seed - Ostris", + "Text Box - Ostris" + ], + { + "nodename_pattern": "- Ostris$", + "title_aux": "Ostris Nodes ComfyUI" + } + ], + "https://github.com/oyvindg/ComfyUI-TrollSuite": [ + [ + "BinaryImageMask", + "ImagePadding", + "LoadLastImage", + "RandomMask", + "TransparentImage" + ], + { + "title_aux": "ComfyUI-TrollSuite" + } + ], + "https://github.com/palant/extended-saveimage-comfyui": [ + [ + "SaveImageExtended" + ], + { + "title_aux": "Extended Save Image for ComfyUI" + } + ], + "https://github.com/palant/image-resize-comfyui": [ + [ + "ImageResize" + ], + { + "title_aux": "Image Resize for ComfyUI" + } + ], + "https://github.com/pants007/comfy-pants": [ + [ + "CLIPTextEncodeAIO", + "Image Make Square" + ], + { + "title_aux": "pants" + } + ], + "https://github.com/paulo-coronado/comfy_clip_blip_node": [ + [ + "CLIPTextEncodeBLIP", + "CLIPTextEncodeBLIP-2", + "Example" + ], + { + "title_aux": "comfy_clip_blip_node" + } + ], + "https://github.com/peteromallet/ComfyUI-Creative-Interpolation": [ + [ + "BatchCreativeInterpolation" + ], + { + "title_aux": "ComfyUI-Creative-Interpolation [Beta]" + } + ], + "https://github.com/picturesonpictures/comfy_PoP": [ + [ + "AdaptiveCannyDetector_PoP", + "AnyAspectRatio", + "ConditioningMultiplier_PoP", + "ConditioningNormalizer_PoP", + "LoadImageResizer_PoP", + "LoraStackLoader10_PoP", + "LoraStackLoader_PoP", + "VAEDecoderPoP", + "VAEEncoderPoP" + ], + { + "title_aux": "comfy_PoP" + } + ], + "https://github.com/pythongosssss/ComfyUI-Custom-Scripts": [ + [ + "CheckpointLoader|pysssss", + "ConstrainImage|pysssss", + "LoadText|pysssss", + "LoraLoader|pysssss", + "MathExpression|pysssss", + "MultiPrimitive|pysssss", + "PlaySound|pysssss", + "Repeater|pysssss", + "ReroutePrimitive|pysssss", + "SaveText|pysssss", + "ShowText|pysssss", + "StringFunction|pysssss" + ], + { + "title_aux": "pythongosssss/ComfyUI-Custom-Scripts" + } + ], + "https://github.com/pythongosssss/ComfyUI-WD14-Tagger": [ + [ + "WD14Tagger|pysssss" + ], + { + "title_aux": "ComfyUI WD 1.4 Tagger" + } + ], + "https://github.com/ramyma/A8R8_ComfyUI_nodes": [ + [ + "Base64ImageInput", + "Base64ImageOutput" + ], + { + "title_aux": "A8R8 ComfyUI Nodes" + } + ], + "https://github.com/receyuki/comfyui-prompt-reader-node": [ + [ + "SDBatchLoader", + "SDParameterGenerator", + "SDPromptMerger", + "SDPromptReader", + "SDPromptSaver", + "SDTypeConverter" + ], + { + "author": "receyuki", + "description": "ComfyUI node version of the SD Prompt Reader", + "nickname": "SD Prompt Reader", + "title": "SD Prompt Reader", + "title_aux": "comfyui-prompt-reader-node" + } + ], + "https://github.com/rgthree/rgthree-comfy": [ + [], + { + "author": "rgthree", + "description": "A bunch of nodes I created that I also find useful.", + "nickname": "rgthree", + "nodename_pattern": " \\(rgthree\\)$", + "title": "Comfy Nodes", + "title_aux": "rgthree's ComfyUi Nodes" + } + ], + "https://github.com/richinsley/Comfy-LFO": [ + [ + "LFO_Pulse", + "LFO_Sawtooth", + "LFO_Sine", + "LFO_Square", + "LFO_Triangle" + ], + { + "title_aux": "Comfy-LFO" + } + ], + "https://github.com/rklaffehn/rk-comfy-nodes": [ + [ + "RK_CivitAIAddHashes", + "RK_CivitAIMetaChecker" + ], + { + "title_aux": "rk-comfy-nodes" + } + ], + "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata": [ + [ + "SetMetadataAll", + "SetMetadataString" + ], + { + "title_aux": "ComfyUI PNG Metadata" + } + ], + "https://github.com/s1dlx/comfy_meh/raw/main/meh.py": [ + [ + "MergingExecutionHelper" + ], + { + "title_aux": "comfy_meh" + } + ], + "https://github.com/seanlynch/comfyui-optical-flow": [ + [ + "Apply optical flow", + "Compute optical flow", + "Visualize optical flow" + ], + { + "title_aux": "ComfyUI Optical Flow" + } + ], + "https://github.com/seanlynch/srl-nodes": [ + [ + "SRL Conditional Interrrupt", + "SRL Eval", + "SRL Filter Image List", + "SRL Format String" + ], + { + "title_aux": "SRL's nodes" + } + ], + "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack": [ + [ + "ImageResizeAndCropNode", + "ImageSquareAdapterNode" + ], + { + "title_aux": "ComfyUI_Nimbus-Pack" + } + ], + "https://github.com/shadowcz007/comfyui-mixlab-nodes": [ + [ + "AreaToMask", + "CLIPSeg", + "CLIPSeg_", + "CharacterInText", + "ChatGPTOpenAI", + "CombineMasks_", + "CombineSegMasks", + "EditLayer", + "EmptyLayer", + "EnhanceImage", + "FaceToMask", + "FeatheredMask", + "FloatingVideo", + "ImageCropByAlpha", + "LoadImagesFromPath", + "MergeLayers", + "NewLayer", + "RandomPrompt", + "ScreenShare", + "ShowTextForGPT", + "SmoothMask", + "SplitLongMask", + "SvgImage", + "TextImage", + "TransparentImage", + "VAEDecodeConsistencyDecoder", + "VAELoaderConsistencyDecoder" + ], + { + "title_aux": "comfyui-mixlab-nodes [WIP]" + } + ], + "https://github.com/shiimizu/ComfyUI_smZNodes": [ + [ + "smZ CLIPTextEncode", + "smZ Settings" + ], + { + "title_aux": "smZNodes" + } + ], + "https://github.com/shingo1228/ComfyUI-SDXL-EmptyLatentImage": [ + [ + "SDXL Empty Latent Image" + ], + { + "title_aux": "ComfyUI-SDXL-EmptyLatentImage" + } + ], + "https://github.com/shingo1228/ComfyUI-send-eagle-slim": [ + [ + "Send Webp Image to Eagle" + ], + { + "title_aux": "ComfyUI-send-Eagle(slim)" + } + ], + "https://github.com/shockz0rz/ComfyUI_InterpolateEverything": [ + [ + "OpenposePreprocessorInterpolate" + ], + { + "title_aux": "InterpolateEverything" + } + ], + "https://github.com/sipherxyz/comfyui-art-venture": [ + [ + "AV_CheckpointMerge", + "AV_CheckpointModelsToParametersPipe", + "AV_CheckpointSave", + "AV_ControlNetEfficientLoader", + "AV_ControlNetEfficientLoaderAdvanced", + "AV_ControlNetEfficientStacker", + "AV_ControlNetEfficientStackerSimple", + "AV_ControlNetLoader", + "AV_ControlNetPreprocessor", + "AV_LoraListLoader", + "AV_LoraListStacker", + "AV_LoraLoader", + "AV_ParametersPipeToCheckpointModels", + "AV_ParametersPipeToPrompts", + "AV_PromptsToParametersPipe", + "AV_SAMLoader", + "AV_VAELoader", + "AspectRatioSelector", + "BLIPCaption", + "BLIPLoader", + "BooleanPrimitive", + "ColorBlend", + "ColorCorrect", + "DeepDanbooruCaption", + "DependenciesEdit", + "Fooocus_KSampler", + "Fooocus_KSamplerAdvanced", + "GetBoolFromJson", + "GetFloatFromJson", + "GetIntFromJson", + "GetObjectFromJson", + "GetSAMEmbedding", + "GetTextFromJson", + "ISNetLoader", + "ISNetSegment", + "ImageAlphaComposite", + "ImageApplyChannel", + "ImageExtractChannel", + "ImageGaussianBlur", + "ImageMuxer", + "ImageRepeat", + "ImageScaleDown", + "ImageScaleDownBy", + "ImageScaleDownToSize", + "ImageScaleToMegapixels", + "LaMaInpaint", + "LoadImageAsMaskFromUrl", + "LoadImageFromUrl", + "LoadJsonFromUrl", + "MergeModels", + "NumberScaler", + "OverlayInpaintedImage", + "OverlayInpaintedLatent", + "PrepareImageAndMaskForInpaint", + "QRCodeGenerator", + "RandomFloat", + "RandomInt", + "SAMEmbeddingToImage", + "SDXLAspectRatioSelector", + "SDXLPromptStyler", + "SeedSelector", + "StringToInt", + "StringToNumber" + ], + { + "title_aux": "comfyui-art-venture" + } + ], + "https://github.com/skfoo/ComfyUI-Coziness": [ + [ + "LoraTextExtractor-b1f83aa2", + "MultiLoraLoader-70bf3d77" + ], + { + "title_aux": "ComfyUI-Coziness" + } + ], + "https://github.com/space-nuko/ComfyUI-Disco-Diffusion": [ + [ + "DiscoDiffusion_DiscoDiffusion", + "DiscoDiffusion_DiscoDiffusionExtraSettings", + "DiscoDiffusion_GuidedDiffusionLoader", + "DiscoDiffusion_OpenAICLIPLoader" + ], + { + "title_aux": "Disco Diffusion" + } + ], + "https://github.com/space-nuko/ComfyUI-OpenPose-Editor": [ + [ + "Nui.OpenPoseEditor" + ], + { + "title_aux": "OpenPose Editor" + } + ], + "https://github.com/space-nuko/nui-suite": [ + [ + "Nui.DynamicPromptsTextGen", + "Nui.FeelingLuckyTextGen", + "Nui.OutputString" + ], + { + "title_aux": "nui suite" + } + ], + "https://github.com/spacepxl/ComfyUI-HQ-Image-Save": [ + [ + "LoadLatentEXR", + "SaveEXR", + "SaveLatentEXR", + "SaveTiff" + ], + { + "title_aux": "ComfyUI-HQ-Image-Save" + } + ], + "https://github.com/spinagon/ComfyUI-seam-carving": [ + [ + "SeamCarving" + ], + { + "title_aux": "ComfyUI-seam-carving" + } + ], + "https://github.com/spinagon/ComfyUI-seamless-tiling": [ + [ + "CircularVAEDecode", + "MakeCircularVAE", + "OffsetImage", + "SeamlessTile" + ], + { + "title_aux": "Seamless tiling Node for ComfyUI" + } + ], + "https://github.com/spro/comfyui-mirror": [ + [ + "LatentMirror" + ], + { + "title_aux": "Latent Mirror node for ComfyUI" + } + ], + "https://github.com/ssitu/ComfyUI_UltimateSDUpscale": [ + [ + "UltimateSDUpscale", + "UltimateSDUpscaleNoUpscale" + ], + { + "title_aux": "UltimateSDUpscale" + } + ], + "https://github.com/ssitu/ComfyUI_fabric": [ + [ + "FABRICPatchModel", + "FABRICPatchModelAdv", + "KSamplerAdvFABRICAdv", + "KSamplerFABRIC", + "KSamplerFABRICAdv", + "LatentBatch" + ], + { + "title_aux": "ComfyUI fabric" + } + ], + "https://github.com/ssitu/ComfyUI_restart_sampling": [ + [ + "KRestartSampler", + "KRestartSamplerAdv", + "KRestartSamplerSimple" + ], + { + "title_aux": "Restart Sampling" + } + ], + "https://github.com/ssitu/ComfyUI_roop": [ + [ + "RoopImproved", + "roop" + ], + { + "title_aux": "ComfyUI roop" + } + ], + "https://github.com/storyicon/comfyui_segment_anything": [ + [ + "GroundingDinoModelLoader (segment anything)", + "GroundingDinoSAMSegment (segment anything)", + "InvertMask (segment anything)", + "SAMModelLoader (segment anything)" + ], + { + "title_aux": "segment anything" + } + ], + "https://github.com/strimmlarn/ComfyUI_Strimmlarns_aesthetic_score": [ + [ + "AesthetlcScoreSorter", + "CalculateAestheticScore", + "LoadAesteticModel", + "ScoreToNumber" + ], + { + "title_aux": "ComfyUI_Strimmlarns_aesthetic_score" + } + ], + "https://github.com/syllebra/bilbox-comfyui": [ + [ + "BilboXLut", + "BilboXPhotoPrompt", + "BilboXVignette" + ], + { + "title_aux": "BilboX's ComfyUI Custom Nodes" + } + ], + "https://github.com/sylym/comfy_vid2vid": [ + [ + "CheckpointLoaderSimpleSequence", + "DdimInversionSequence", + "KSamplerSequence", + "LoadImageMaskSequence", + "LoadImageSequence", + "LoraLoaderSequence", + "SetLatentNoiseSequence", + "TrainUnetSequence", + "VAEEncodeForInpaintSequence" + ], + { + "title_aux": "Vid2vid" + } + ], + "https://github.com/szhublox/ambw_comfyui": [ + [ + "Auto Merge Block Weighted", + "CLIPMergeSimple", + "CheckpointSave", + "ModelMergeBlocks", + "ModelMergeSimple" + ], + { + "title_aux": "Auto-MBW" + } + ], + "https://github.com/taabata/Comfy_Syrian_Falcon_Nodes/raw/main/SyrianFalconNodes.py": [ + [ + "CompositeImage", + "KSamplerAlternate", + "KSamplerPromptEdit", + "KSamplerPromptEditAndAlternate", + "LoopBack", + "QRGenerate", + "WordAsImage" + ], + { + "title_aux": "Syrian Falcon Nodes" + } + ], + "https://github.com/taabata/LCM_Inpaint-Outpaint_Comfy": [ + [ + "FreeU_LCM", + "ImageOutputToComfyNodes", + "ImageShuffle", + "LCMGenerate", + "LCMGenerate_ReferenceOnly", + "LCMGenerate_SDTurbo", + "LCMGenerate_img2img", + "LCMGenerate_img2img_IPAdapter", + "LCMGenerate_img2img_controlnet", + "LCMGenerate_inpaintv2", + "LCMGenerate_inpaintv3", + "LCMLoader", + "LCMLoader_RefInpaint", + "LCMLoader_ReferenceOnly", + "LCMLoader_SDTurbo", + "LCMLoader_controlnet", + "LCMLoader_controlnet_inpaint", + "LCMLoader_img2img", + "LCMLoraLoader_inpaint", + "LCMLora_inpaint", + "LCMT2IAdapter", + "LCM_IPAdapter", + "LCM_IPAdapter_inpaint", + "LCM_outpaint_prep", + "LoadImageNode_LCM", + "OutpaintCanvasTool", + "SaveImage_LCM", + "stitch" + ], + { + "title_aux": "LCM_Inpaint-Outpaint_Comfy" + } + ], + "https://github.com/theUpsider/ComfyUI-Logic": [ + [ + "Compare", + "DebugPrint", + "If ANY execute A else B", + "Int", + "String" + ], + { + "title_aux": "ComfyUI-Logic" + } + ], + "https://github.com/theUpsider/ComfyUI-Styles_CSV_Loader": [ + [ + "Load Styles CSV" + ], + { + "title_aux": "Styles CSV Loader Extension for ComfyUI" + } + ], + "https://github.com/thecooltechguy/ComfyUI-MagicAnimate": [ + [ + "MagicAnimate", + "MagicAnimateModelLoader" + ], + { + "title_aux": "ComfyUI-MagicAnimate" + } + ], + "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion": [ + [ + "SVDDecoder", + "SVDModelLoader", + "SVDSampler", + "SVDSimpleImg2Vid" + ], + { + "title_aux": "ComfyUI Stable Video Diffusion" + } + ], + "https://github.com/thedyze/save-image-extended-comfyui": [ + [ + "SaveImageExtended" + ], + { + "title_aux": "Save Image Extended for ComfyUI" + } + ], + "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes": [ + [ + "CaptureWebcam", + "LoadWebcamImage", + "SaveImagetoPath" + ], + { + "title_aux": "ComfyUI_toyxyz_test_nodes" + } + ], + "https://github.com/trojblue/trNodes": [ + [ + "JpgConvertNode", + "trColorCorrection", + "trLayering", + "trRouter", + "trRouterLonger" + ], + { + "title_aux": "trNodes" + } + ], + "https://github.com/tudal/Hakkun-ComfyUI-nodes/raw/main/hakkun_nodes.py": [ + [ + "Any Converter", + "Calculate Upscale", + "Image Resize To Height", + "Image Resize To Width", + "Image size to string", + "Load Random Image", + "Load Text", + "Multi Text Merge", + "Prompt Parser", + "Random Line", + "Random Line 4" + ], + { + "nodename_pattern": "\\(mtb\\)$", + "title_aux": "Hakkun-ComfyUI-nodes" + } + ], + "https://github.com/tusharbhutt/Endless-Nodes": [ + [ + "ESS Aesthetic Scoring", + "ESS Aesthetic Scoring Auto", + "ESS Combo Parameterizer", + "ESS Combo Parameterizer & Prompts", + "ESS Eight Input Random", + "ESS Eight Input Text Switch", + "ESS Float to Integer", + "ESS Float to Number", + "ESS Float to String", + "ESS Float to X", + "ESS Global Envoy", + "ESS Image Reward", + "ESS Image Reward Auto", + "ESS Image Saver with JSON", + "ESS Integer to Float", + "ESS Integer to Number", + "ESS Integer to String", + "ESS Integer to X", + "ESS Number to Float", + "ESS Number to Integer", + "ESS Number to String", + "ESS Number to X", + "ESS Parameterizer", + "ESS Parameterizer & Prompts", + "ESS Six Float Output", + "ESS Six Input Random", + "ESS Six Input Text Switch", + "ESS Six Integer IO Switch", + "ESS Six Integer IO Widget", + "ESS String to Float", + "ESS String to Integer", + "ESS String to Num", + "ESS String to X", + "\u267e\ufe0f\ud83c\udf0a\u2728 Image Saver with JSON" + ], + { + "author": "BiffMunky", + "description": "A small set of nodes I created for various numerical and text inputs. Features image saver with ability to have JSON saved to separate folder, parameter collection nodes, two aesthetic scoring models, switches for text and numbers, and conversion of string to numeric and vice versa.", + "nickname": "\u267e\ufe0f\ud83c\udf0a\u2728", + "title": "Endless \ufe0f\ud83c\udf0a\u2728 Nodes", + "title_aux": "Endless \ufe0f\ud83c\udf0a\u2728 Nodes" + } + ], + "https://github.com/twri/sdxl_prompt_styler": [ + [ + "SDXLPromptStyler", + "SDXLPromptStylerAdvanced" + ], + { + "title_aux": "SDXL Prompt Styler" + } + ], + "https://github.com/uarefans/ComfyUI-Fans": [ + [ + "Fans Prompt Styler Negative", + "Fans Prompt Styler Positive", + "Fans Styler", + "Fans Text Concatenate" + ], + { + "title_aux": "ComfyUI-Fans" + } + ], + "https://github.com/vanillacode314/SimpleWildcardsComfyUI": [ + [ + "SimpleConcat", + "SimpleWildcard" + ], + { + "author": "VanillaCode314", + "description": "A simple wildcard node for ComfyUI. Can also be used a style prompt node.", + "nickname": "Simple Wildcard", + "title": "Simple Wildcard", + "title_aux": "Simple Wildcard" + } + ], + "https://github.com/wallish77/wlsh_nodes": [ + [ + "Alternating KSampler (WLSH)", + "Build Filename String (WLSH)", + "CLIP +/- w/Text Unified (WLSH)", + "CLIP Positive-Negative (WLSH)", + "CLIP Positive-Negative XL (WLSH)", + "CLIP Positive-Negative XL w/Text (WLSH)", + "CLIP Positive-Negative w/Text (WLSH)", + "Checkpoint Loader w/Name (WLSH)", + "Empty Latent by Pixels (WLSH)", + "Empty Latent by Ratio (WLSH)", + "Empty Latent by Size (WLSH)", + "Generate Border Mask (WLSH)", + "Grayscale Image (WLSH)", + "Image Load with Metadata (WLSH)", + "Image Save with Prompt (WLSH)", + "Image Save with Prompt File (WLSH)", + "Image Save with Prompt/Info (WLSH)", + "Image Save with Prompt/Info File (WLSH)", + "Image Scale By Factor (WLSH)", + "Image Scale by Shortside (WLSH)", + "KSamplerAdvanced (WLSH)", + "Multiply Integer (WLSH)", + "Outpaint to Image (WLSH)", + "Prompt Weight (WLSH)", + "Quick Resolution Multiply (WLSH)", + "Resolutions by Ratio (WLSH)", + "SDXL Quick Empty Latent (WLSH)", + "SDXL Quick Image Scale (WLSH)", + "SDXL Resolutions (WLSH)", + "SDXL Steps (WLSH)", + "Save Positive Prompt(WLSH)", + "Save Prompt (WLSH)", + "Save Prompt/Info (WLSH)", + "Seed and Int (WLSH)", + "Seed to Number (WLSH)", + "Simple Pattern Replace (WLSH)", + "Simple String Combine (WLSH)", + "Time String (WLSH)", + "Upscale by Factor with Model (WLSH)", + "VAE Encode for Inpaint w/Padding (WLSH)" + ], + { + "title_aux": "wlsh_nodes" + } + ], + "https://github.com/whatbirdisthat/cyberdolphin": [ + [ + "\ud83d\udc2c Gradio ChatInterface", + "\ud83d\udc2c OpenAI Advanced", + "\ud83d\udc2c OpenAI Compatible", + "\ud83d\udc2c OpenAI DALL\u00b7E", + "\ud83d\udc2c OpenAI Simple" + ], + { + "title_aux": "cyberdolphin" + } + ], + "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus": [ + [ + "CDL.OpenPoseEditorPlus" + ], + { + "title_aux": "ComfyUI-Openpose-Editor-Plus" + } + ], + "https://github.com/wmatson/easy-comfy-nodes": [ + [ + "EZAssocDictNode", + "EZAssocImgNode", + "EZAssocStrNode", + "EZEmptyDictNode", + "EZHttpPostNode", + "EZLoadImgBatchFromUrlsNode", + "EZLoadImgFromUrlNode", + "EZVideoCombiner" + ], + { + "title_aux": "easy-comfy-nodes" + } + ], + "https://github.com/wolfden/ComfyUi_PromptStylers": [ + [ + "SDXLPromptStylerAll", + "SDXLPromptStylerHorror", + "SDXLPromptStylerMisc", + "SDXLPromptStylerbyArtist", + "SDXLPromptStylerbyCamera", + "SDXLPromptStylerbyComposition", + "SDXLPromptStylerbyCyberpunkSurrealism", + "SDXLPromptStylerbyDepth", + "SDXLPromptStylerbyEnvironment", + "SDXLPromptStylerbyFantasySetting", + "SDXLPromptStylerbyFilter", + "SDXLPromptStylerbyFocus", + "SDXLPromptStylerbyImpressionism", + "SDXLPromptStylerbyLighting", + "SDXLPromptStylerbyMileHigh", + "SDXLPromptStylerbyMood", + "SDXLPromptStylerbyMythicalCreature", + "SDXLPromptStylerbyOriginal", + "SDXLPromptStylerbyQuantumRealism", + "SDXLPromptStylerbySteamPunkRealism", + "SDXLPromptStylerbySubject", + "SDXLPromptStylerbySurrealism", + "SDXLPromptStylerbyTheme", + "SDXLPromptStylerbyTimeofDay", + "SDXLPromptStylerbyWyvern", + "SDXLPromptbyCelticArt", + "SDXLPromptbyContemporaryNordicArt", + "SDXLPromptbyFashionArt", + "SDXLPromptbyGothicRevival", + "SDXLPromptbyIrishFolkArt", + "SDXLPromptbyRomanticNationalismArt", + "SDXLPromptbySportsArt", + "SDXLPromptbyStreetArt", + "SDXLPromptbyVikingArt", + "SDXLPromptbyWildlifeArt" + ], + { + "title_aux": "SDXL Prompt Styler (customized version by wolfden)" + } + ], + "https://github.com/wolfden/ComfyUi_String_Function_Tree": [ + [ + "StringFunction" + ], + { + "title_aux": "ComfyUi_String_Function_Tree" + } + ], + "https://github.com/wsippel/comfyui_ws/raw/main/sdxl_utility.py": [ + [ + "SDXLResolutionPresets" + ], + { + "title_aux": "SDXLResolutionPresets" + } + ], + "https://github.com/wutipong/ComfyUI-TextUtils": [ + [ + "Text Utils - Join N-Elements of String List", + "Text Utils - Join String List", + "Text Utils - Join Strings", + "Text Utils - Split String to List" + ], + { + "title_aux": "ComfyUI-TextUtils" + } + ], + "https://github.com/xXAdonesXx/NodeGPT": [ + [ + "AppendAgent", + "Assistant", + "Chat", + "ChatGPT", + "CombineInput", + "Conditioning", + "CostumeAgent_1", + "CostumeAgent_2", + "CostumeMaster_1", + "Critic", + "DisplayString", + "DisplayTextAsImage", + "EVAL", + "Engineer", + "Executor", + "GroupChat", + "Image_generation_Conditioning", + "LM_Studio", + "LoadAPIconfig", + "LoadTXT", + "MemGPT", + "Memory_Excel", + "Model_1", + "Ollama", + "Output2String", + "Planner", + "Scientist", + "TextCombine", + "TextGeneration", + "TextGenerator", + "TextInput", + "TextOutput", + "UserProxy", + "llama-cpp", + "llava", + "oobaboogaOpenAI" + ], + { + "title_aux": "NodeGPT" + } + ], + "https://github.com/yolanother/DTAIComfyImageSubmit": [ + [ + "DTSimpleSubmitImage", + "DTSubmitImage" + ], + { + "title_aux": "Comfy AI DoubTech.ai Image Sumission Node" + } + ], + "https://github.com/yolanother/DTAIComfyLoaders": [ + [ + "DTCLIPLoader", + "DTCLIPVisionLoader", + "DTCheckpointLoader", + "DTCheckpointLoaderSimple", + "DTControlNetLoader", + "DTDiffControlNetLoader", + "DTDiffusersLoader", + "DTGLIGENLoader", + "DTLoadImage", + "DTLoadImageMask", + "DTLoadLatent", + "DTLoraLoader", + "DTLorasLoader", + "DTStyleModelLoader", + "DTUpscaleModelLoader", + "DTVAELoader", + "DTunCLIPCheckpointLoader" + ], + { + "title_aux": "Comfy UI Online Loaders" + } + ], + "https://github.com/yolanother/DTAIComfyPromptAgent": [ + [ + "DTPromptAgent", + "DTPromptAgentString" + ], + { + "title_aux": "Comfy UI Prompt Agent" + } + ], + "https://github.com/yolanother/DTAIComfyQRCodes": [ + [ + "QRCode" + ], + { + "title_aux": "Comfy UI QR Codes" + } + ], + "https://github.com/yolanother/DTAIComfyVariables": [ + [ + "DTCLIPTextEncode", + "DTSingleLineStringVariable", + "DTSingleLineStringVariableNoClip", + "FloatVariable", + "IntVariable", + "StringFormat", + "StringFormatSingleLine", + "StringVariable" + ], + { + "title_aux": "Variables for Comfy UI" + } + ], + "https://github.com/yolanother/DTAIImageToTextNode": [ + [ + "DTAIImageToTextNode", + "DTAIImageUrlToTextNode" + ], + { + "title_aux": "Image to Text Node" + } + ], + "https://github.com/youyegit/tdxh_node_comfyui": [ + [ + "TdxhBoolNumber", + "TdxhClipVison", + "TdxhControlNetApply", + "TdxhControlNetProcessor", + "TdxhFloatInput", + "TdxhImageToSize", + "TdxhImageToSizeAdvanced", + "TdxhImg2ImgLatent", + "TdxhIntInput", + "TdxhLoraLoader", + "TdxhOnOrOff", + "TdxhReference", + "TdxhStringInput", + "TdxhStringInputTranslator" + ], + { + "title_aux": "tdxh_node_comfyui" + } + ], + "https://github.com/zcfrank1st/Comfyui-Yolov8": [ + [ + "Yolov8Detection", + "Yolov8Segmentation" + ], + { + "title_aux": "ComfyUI Yolov8" + } + ], + "https://github.com/zcfrank1st/comfyui_visual_anagrams": [ + [ + "VisualAnagramsAnimate", + "VisualAnagramsSample" + ], + { + "title_aux": "comfyui_visual_anagram" + } + ], + "https://github.com/zer0TF/cute-comfy": [ + [ + "Cute.Placeholder" + ], + { + "title_aux": "Cute Comfy" + } + ], + "https://github.com/zhuanqianfish/ComfyUI-EasyNode": [ + [ + "EasyCaptureNode", + "EasyVideoOutputNode", + "SendImageWebSocket" + ], + { + "title_aux": "EasyCaptureNode for ComfyUI" + } + ], + "https://raw.githubusercontent.com/throttlekitty/SDXLCustomAspectRatio/main/SDXLAspectRatio.py": [ + [ + "SDXLAspectRatio" + ], + { + "title_aux": "SDXLCustomAspectRatio" + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/new/model-list.json new file mode 100644 index 0000000000000000000000000000000000000000..8868e76d536dc93d5e18270127db206d80dee68e --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/node_db/new/model-list.json @@ -0,0 +1,671 @@ +{ + "models": [ + { + "name": "SDXL-Turbo 1.0 (fp16)", + "type": "checkpoints", + "base": "SDXL", + "save_path": "checkpoints/SDXL-TURBO", + "description": "[6.9GB] SDXL-Turbo 1.0 fp16", + "reference": "https://huggingface.co/stabilityai/sdxl-turbo", + "filename": "sd_xl_turbo_1.0_fp16.safetensors", + "url": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0_fp16.safetensors" + }, + { + "name": "SDXL-Turbo 1.0", + "type": "checkpoints", + "base": "SDXL", + "save_path": "checkpoints/SDXL-TURBO", + "description": "[13.9GB] SDXL-Turbo 1.0", + "reference": "https://huggingface.co/stabilityai/sdxl-turbo", + "filename": "sd_xl_turbo_1.0.safetensors", + "url": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0.safetensors" + }, + { + "name": "Stable Video Diffusion Image-to-Video", + "type": "checkpoints", + "base": "SVD", + "save_path": "checkpoints/SVD", + "description": "Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.
NOTE: 14 frames @ 576x1024", + "reference": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid", + "filename": "svd.safetensors", + "url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid/resolve/main/svd.safetensors" + }, + { + "name": "Stable Video Diffusion Image-to-Video (XT)", + "type": "checkpoints", + "base": "SVD", + "save_path": "checkpoints/SVD", + "description": "Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.
NOTE: 25 frames @ 576x1024 ", + "reference": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt", + "filename": "svd_xt.safetensors", + "url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/resolve/main/svd_xt.safetensors" + }, + + { + "name": "animatediff/mm_sdxl_v10_beta.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sdxl_v10_beta.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sdxl_v10_beta.ckpt" + }, + { + "name": "animatediff/v2_lora_PanLeft.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_PanLeft.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanLeft.ckpt" + }, + { + "name": "animatediff/v2_lora_PanRight.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_PanRight.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanRight.ckpt" + }, + { + "name": "animatediff/v2_lora_RollingAnticlockwise.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_RollingAnticlockwise.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_RollingAnticlockwise.ckpt" + }, + { + "name": "animatediff/v2_lora_RollingClockwise.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_RollingClockwise.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_RollingClockwise.ckpt" + }, + { + "name": "animatediff/v2_lora_TiltDown.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_TiltDown.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_TiltDown.ckpt" + }, + { + "name": "animatediff/v2_lora_TiltUp.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_TiltUp.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_TiltUp.ckpt" + }, + { + "name": "animatediff/v2_lora_ZoomIn.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_ZoomIn.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_ZoomIn.ckpt" + }, + { + "name": "animatediff/v2_lora_ZoomOut.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "motion lora", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/motion_lora", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "v2_lora_ZoomOut.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_ZoomOut.ckpt" + }, + + { + "name": "CiaraRowles/TemporalNet1XL (1.0)", + "type": "controlnet", + "base": "SD1.5", + "save_path": "controlnet/TemporalNet1XL", + "description": "This is TemporalNet1XL, it is a re-train of the controlnet TemporalNet1 with Stable Diffusion XL.", + "reference": "https://huggingface.co/CiaraRowles/controlnet-temporalnet-sdxl-1.0", + "filename": "diffusion_pytorch_model.safetensors", + "url": "https://huggingface.co/CiaraRowles/controlnet-temporalnet-sdxl-1.0/resolve/main/diffusion_pytorch_model.safetensors" + }, + + { + "name": "LCM LoRA SD1.5", + "type": "lora", + "base": "SD1.5", + "save_path": "loras/lcm/SD1.5", + "description": "Latent Consistency LoRA for SD1.5", + "reference": "https://huggingface.co/latent-consistency/lcm-lora-sdv1-5", + "filename": "pytorch_lora_weights.safetensors", + "url": "https://huggingface.co/latent-consistency/lcm-lora-sdv1-5/resolve/main/pytorch_lora_weights.safetensors" + }, + { + "name": "LCM LoRA SSD-1B", + "type": "lora", + "base": "SSD-1B", + "save_path": "loras/lcm/SSD-1B", + "description": "Latent Consistency LoRA for SSD-1B", + "reference": "https://huggingface.co/latent-consistency/lcm-lora-ssd-1b", + "filename": "pytorch_lora_weights.safetensors", + "url": "https://huggingface.co/latent-consistency/lcm-lora-ssd-1b/resolve/main/pytorch_lora_weights.safetensors" + }, + { + "name": "LCM LoRA SDXL", + "type": "lora", + "base": "SSD-1B", + "save_path": "loras/lcm/SDXL", + "description": "Latent Consistency LoRA for SDXL", + "reference": "https://huggingface.co/latent-consistency/lcm-lora-sdxl", + "filename": "pytorch_lora_weights.safetensors", + "url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/resolve/main/pytorch_lora_weights.safetensors" + }, + + { + "name": "face_yolov8m-seg_60.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "face_yolov8m-seg_60.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/face_yolov8m-seg_60.pt" + }, + { + "name": "face_yolov8n-seg2_60.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "face_yolov8n-seg2_60.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/face_yolov8n-seg2_60.pt" + }, + { + "name": "hair_yolov8n-seg_60.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "hair_yolov8n-seg_60.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/hair_yolov8n-seg_60.pt" + }, + { + "name": "skin_yolov8m-seg_400.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "skin_yolov8m-seg_400.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8m-seg_400.pt" + }, + { + "name": "skin_yolov8n-seg_400.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "skin_yolov8n-seg_400.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8n-seg_400.pt" + }, + { + "name": "skin_yolov8n-seg_800.pt (segm)", + "type": "Ultralytics", + "base": "Ultralytics", + "save_path": "ultralytics/segm", + "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.", + "reference": "https://github.com/hben35096/assets/releases/tag/yolo8", + "filename": "skin_yolov8n-seg_800.pt", + "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8n-seg_800.pt" + }, + + { + "name": "ip-adapter-plus_sdxl_vit-h.bin
(install to ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus_sdxl_vit-h.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin" + }, + { + "name": "ip-adapter_sdxl_vit-h.bin
(install to ComfyUI_IPAdapter_plus)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/ComfyUI_IPAdapter_plus/models", + "description": "Pressing 'install' directly downloads the model from the ComfyUI_IPAdapter_plus/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sdxl_vit-h.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl_vit-h.bin" + }, + { + "name": "CiaraRowles/temporaldiff-v1-animatediff.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/CiaraRowles/TemporalDiff", + "filename": "temporaldiff-v1-animatediff.ckpt", + "url": "https://huggingface.co/CiaraRowles/TemporalDiff/resolve/main/temporaldiff-v1-animatediff.ckpt" + }, + { + "name": "animatediff/mm_sd_v15_v2.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v15_v2.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt" + }, + { + "name": "AD_Stabilized_Motion/mm-Stabilized_high.pth (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/manshoety/AD_Stabilized_Motion", + "filename": "mm-Stabilized_high.pth", + "url": "https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_high.pth" + }, + { + "name": "AD_Stabilized_Motion/mm-Stabilized_mid.pth (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/manshoety/AD_Stabilized_Motion", + "filename": "mm-Stabilized_mid.pth", + "url": "https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_mid.pth" + }, + + { + "name": "GFPGANv1.4.pth", + "type": "GFPGAN", + "base": "GFPGAN", + "save_path": "facerestore_models", + "description": "Face Restoration Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/TencentARC/GFPGAN/releases", + "filename": "GFPGANv1.4.pth", + "url": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/GFPGANv1.4.pth" + }, + { + "name": "codeformer.pth", + "type": "CodeFormer", + "base": "CodeFormer", + "save_path": "facerestore_models", + "description": "Face Restoration Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/sczhou/CodeFormer/releases", + "filename": "codeformer.pth", + "url": "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" + }, + { + "name": "detection_Resnet50_Final.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facerestore_models", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "detection_Resnet50_Final.pth", + "url": "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth" + }, + { + "name": "detection_mobilenet0.25_Final.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facerestore_models", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "detection_mobilenet0.25_Final.pth", + "url": "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth" + }, + { + "name": "yolov5l-face.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facedetection", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "yolov5l-face.pth", + "url": "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth" + }, + { + "name": "yolov5n-face.pth", + "type": "facexlib", + "base": "facexlib", + "save_path": "facedetection", + "description": "Face Detection Models. Download the model required for using the 'Facerestore CF (Code Former)' custom node.", + "reference": "https://github.com/xinntao/facexlib", + "filename": "yolov5n-face.pth", + "url": "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5n-face.pth" + }, + { + "name": "ip-adapter-plus_sd15.bin
(install to IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.bin" + }, + { + "name": "ip-adapter-plus-face_sd15.bin
(install to IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter-plus-face_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus-face_sd15.bin" + }, + + { + "name": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1 (UNET/fp16)", + "type": "unet", + "base": "SDXL", + "save_path": "unet/xl-inpaint-0.1", + "description": "[5.14GB] Stable Diffusion XL inpainting model 0.1. You need UNETLoader instead of CheckpointLoader.", + "reference": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "filename": "diffusion_pytorch_model.fp16.safetensors", + "url": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors" + }, + { + "name": "diffusers/stable-diffusion-xl-1.0-inpainting-0.1 (UNET)", + "type": "unet", + "base": "SDXL", + "save_path": "unet/xl-inpaint-0.1", + "description": "[10.3GB] Stable Diffusion XL inpainting model 0.1. You need UNETLoader instead of CheckpointLoader.", + "reference": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1", + "filename": "diffusion_pytorch_model.safetensors", + "url": "https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1/resolve/main/unet/diffusion_pytorch_model.safetensors" + }, + { + "name": "wd15_ip_adapter_plus.bin", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "wd15_ip_adapter_plus.bin", + "url": "https://huggingface.co/furusu/IP-Adapter/resolve/main/wd15_ip_adapter_plus.bin" + }, + + { + "name": "Inswapper (face swap)", + "type": "insightface", + "base" : "inswapper", + "save_path": "insightface", + "description": "Checkpoint of the insightface swapper model (used by Comfy-Roop and comfy_mtb)", + "reference": "https://huggingface.co/deepinsight/inswapper/", + "filename": "inswapper_128.onnx", + "url": "https://huggingface.co/deepinsight/inswapper/resolve/main/inswapper_128.onnx" + }, + + { + "name": "CLIPVision model (stabilityai/clip_vision_g)", + "type": "clip_vision", + "base": "SDXL", + "save_path": "clip_vision/SDXL", + "description": "[3.69GB] clip_g vision model", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "clip_vision_g.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/revision/clip_vision_g.safetensors" + }, + + { + "name": "CLIPVision model (IP-Adapter)", + "type": "clip_vision", + "base": "SD1.5", + "save_path": "clip_vision/SD1.5", + "description": "[2.5GB] CLIPVision model (needed for IP-Adapter)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "pytorch_model.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin" + }, + { + "name": "CLIPVision model (IP-Adapter)", + "type": "clip_vision", + "base": "SDXL", + "save_path": "clip_vision/SDXL", + "description": "[3.69GB] CLIPVision model (needed for IP-Adapter)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "pytorch_model.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/pytorch_model.bin" + }, + + { + "name": "stabilityai/control-lora-canny-rank128.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: canny rank128", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-canny-rank128.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-canny-rank128.safetensors" + }, + { + "name": "stabilityai/control-lora-depth-rank128.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: depth rank128", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-depth-rank128.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-depth-rank128.safetensors" + }, + { + "name": "stabilityai/control-lora-recolor-rank128.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: recolor rank128", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-recolor-rank128.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-recolor-rank128.safetensors" + }, + { + "name": "stabilityai/control-lora-sketch-rank128-metadata.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: sketch rank128 metadata", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-sketch-rank128-metadata.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank128/control-lora-sketch-rank128-metadata.safetensors" + }, + + { + "name": "stabilityai/control-lora-canny-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: canny rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-canny-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors" + }, + { + "name": "stabilityai/control-lora-depth-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: depth rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-depth-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors" + }, + { + "name": "stabilityai/control-lora-recolor-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: recolor rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-recolor-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors" + }, + { + "name": "stabilityai/control-lora-sketch-rank256.safetensors", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "Control-LoRA: sketch rank256", + "reference": "https://huggingface.co/stabilityai/control-lora", + "filename": "control-lora-sketch-rank256.safetensors", + "url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors" + }, + + { + "name": "kohya-ss/ControlNet-LLLite: SDXL Canny Anime", + "type": "controlnet", + "base": "SDXL", + "save_path": "custom_nodes/ControlNet-LLLite-ComfyUI/models", + "description": "[46.2MB] An extremely compactly designed controlnet model (a.k.a. ControlNet-LLLite). Note: The model structure is highly experimental and may be subject to change in the future.", + "reference": "https://huggingface.co/kohya-ss/controlnet-lllite", + "filename": "controllllite_v01032064e_sdxl_canny_anime.safetensors", + "url": "https://huggingface.co/kohya-ss/controlnet-lllite/resolve/main/controllllite_v01032064e_sdxl_canny_anime.safetensors" + }, + + { + "name": "SDXL-controlnet: OpenPose (v2)", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "ControlNet openpose model for SDXL", + "reference": "https://huggingface.co/thibaud/controlnet-openpose-sdxl-1.0", + "filename": "OpenPoseXL2.safetensors", + "url": "https://huggingface.co/thibaud/controlnet-openpose-sdxl-1.0/resolve/main/OpenPoseXL2.safetensors" + }, + { + "name": "controlnet-SargeZT/controlnet-sd-xl-1.0-softedge-dexined", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "ControlNet softedge model for SDXL", + "reference": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-softedge-dexined", + "filename": "controlnet-sd-xl-1.0-softedge-dexined.safetensors", + "url": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-softedge-dexined/resolve/main/controlnet-sd-xl-1.0-softedge-dexined.safetensors" + }, + { + "name": "controlnet-SargeZT/controlnet-sd-xl-1.0-depth-16bit-zoe", + "type": "controlnet", + "base": "SDXL", + "save_path": "default", + "description": "ControlNet depth-zoe model for SDXL", + "reference": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-depth-16bit-zoe", + "filename": "depth-zoe-xl-v1.0-controlnet.safetensors", + "url": "https://huggingface.co/SargeZT/controlnet-sd-xl-1.0-depth-16bit-zoe/resolve/main/depth-zoe-xl-v1.0-controlnet.safetensors" + }, + + { + "name": "animatediff/mmd_sd_v14.ckpt (comfyui-animatediff)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/comfyui-animatediff/models", + "description": "Pressing 'install' directly downloads the model from the ArtVentureX/AnimateDiff extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v14.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v14.ckpt" + }, + { + "name": "animatediff/mm_sd_v15.ckpt (comfyui-animatediff)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/comfyui-animatediff/models", + "description": "Pressing 'install' directly downloads the model from the ArtVentureX/AnimateDiff extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v15.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15.ckpt" + }, + + { + "name": "animatediff/mmd_sd_v14.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v14.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v14.ckpt" + }, + { + "name": "animatediff/mm_sd_v15.ckpt (ComfyUI-AnimateDiff-Evolved)", + "type": "animatediff", + "base": "SD1.x", + "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models", + "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/guoyww/animatediff", + "filename": "mm_sd_v15.ckpt", + "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15.ckpt" + }, + + { + "name": "ip-adapter_sdxl.bin
(install to IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SDXL", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sdxl.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.bin" + }, + { + "name": "ip-adapter_sd15.bin
(install to IPAdapter-ComfyUI)", + "type": "IP-Adapter", + "base": "SD1.5", + "save_path": "custom_nodes/IPAdapter-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the IPAdapter-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/h94/IP-Adapter", + "filename": "ip-adapter_sd15.bin", + "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.bin" + }, + { + "name": "pfg-novel-n10.pt", + "type": "PFG", + "base": "SD1.5", + "save_path": "custom_nodes/pfg-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/furusu/PFG", + "filename": "pfg-novel-n10.pt", + "url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-novel-n10.pt" + }, + { + "name": "pfg-wd14-n10.pt", + "type": "PFG", + "base": "SD1.5", + "save_path": "custom_nodes/pfg-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/furusu/PFG", + "filename": "pfg-wd14-n10.pt", + "url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd14-n10.pt" + }, + { + "name": "pfg-wd15beta2-n10.pt", + "type": "PFG", + "base": "SD1.5", + "save_path": "custom_nodes/pfg-ComfyUI/models", + "description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)", + "reference": "https://huggingface.co/furusu/PFG", + "filename": "pfg-wd15beta2-n10.pt", + "url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd15beta2-n10.pt" + } + ] +} diff --git a/custom_nodes/ComfyUI-Manager/notebooks/comfyui_colab_with_manager.ipynb b/custom_nodes/ComfyUI-Manager/notebooks/comfyui_colab_with_manager.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..d4e2665666d9dc5e56075cc8c9fde9435059550b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/notebooks/comfyui_colab_with_manager.ipynb @@ -0,0 +1,350 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "aaaaaaaaaa" + }, + "source": [ + "Git clone the repo and install the requirements. (ignore the pip errors about protobuf)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bbbbbbbbbb" + }, + "outputs": [], + "source": [ + "# #@title Environment Setup\n", + "\n", + "from pathlib import Path\n", + "\n", + "OPTIONS = {}\n", + "\n", + "USE_GOOGLE_DRIVE = True #@param {type:\"boolean\"}\n", + "UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n", + "USE_COMFYUI_MANAGER = True #@param {type:\"boolean\"}\n", + "INSTALL_CUSTOM_NODES_DEPENDENCIES = True #@param {type:\"boolean\"}\n", + "OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n", + "OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n", + "OPTIONS['USE_COMFYUI_MANAGER'] = USE_COMFYUI_MANAGER\n", + "OPTIONS['INSTALL_CUSTOM_NODES_DEPENDENCIES'] = INSTALL_CUSTOM_NODES_DEPENDENCIES\n", + "\n", + "current_dir = !pwd\n", + "WORKSPACE = f\"{current_dir[0]}/ComfyUI\"\n", + "\n", + "if OPTIONS['USE_GOOGLE_DRIVE']:\n", + " !echo \"Mounting Google Drive...\"\n", + " %cd /\n", + "\n", + " from google.colab import drive\n", + " drive.mount('/content/drive')\n", + "\n", + " WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n", + " %cd /content/drive/MyDrive\n", + "\n", + "![ ! -d $WORKSPACE ] && echo -= Initial setup ComfyUI =- && git clone https://github.com/comfyanonymous/ComfyUI\n", + "%cd $WORKSPACE\n", + "\n", + "if OPTIONS['UPDATE_COMFY_UI']:\n", + " !echo -= Updating ComfyUI =-\n", + " !git pull\n", + "\n", + "!echo -= Install dependencies =-\n", + "#Remove cu121 as it causes issues in Colab.\n", + "#!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117\n", + "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117\n", + "\n", + "if OPTIONS['USE_COMFYUI_MANAGER']:\n", + " %cd custom_nodes\n", + " ![ ! -d ComfyUI-Manager ] && echo -= Initial setup ComfyUI-Manager =- && git clone https://github.com/ltdrdata/ComfyUI-Manager\n", + " %cd ComfyUI-Manager\n", + " !git pull\n", + "\n", + "%cd $WORKSPACE\n", + "\n", + "if OPTIONS['INSTALL_CUSTOM_NODES_DEPENDENCIES']:\n", + " !pwd\n", + " !echo -= Install custom nodes dependencies =-\n", + " ![ -f \"custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py\" ] && python \"custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py\"\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cccccccccc" + }, + "source": [ + "Download some models/checkpoints/vae or custom comfyui nodes (uncomment the commands for the ones you want)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dddddddddd" + }, + "outputs": [], + "source": [ + "# Checkpoints\n", + "\n", + "### SDXL\n", + "### I recommend these workflow examples: https://comfyanonymous.github.io/ComfyUI_examples/sdxl/\n", + "\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -P ./models/checkpoints/\n", + "\n", + "# SDXL ReVision\n", + "#!wget -c https://huggingface.co/comfyanonymous/clip_vision_g/resolve/main/clip_vision_g.safetensors -P ./models/clip_vision/\n", + "\n", + "# SD1.5\n", + "!wget -c https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -P ./models/checkpoints/\n", + "\n", + "# SD2\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -P ./models/checkpoints/\n", + "\n", + "# Some SD1.5 anime style\n", + "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1_orangemixs.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3_orangemixs.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors -P ./models/checkpoints/\n", + "\n", + "# Waifu Diffusion 1.5 (anime style SD2.x 768-v)\n", + "#!wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta3/resolve/main/wd-illusion-fp16.safetensors -P ./models/checkpoints/\n", + "\n", + "\n", + "# unCLIP models\n", + "#!wget -c https://huggingface.co/comfyanonymous/illuminatiDiffusionV1_v11_unCLIP/resolve/main/illuminatiDiffusionV1_v11-unclip-h-fp16.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/comfyanonymous/wd-1.5-beta2_unCLIP/resolve/main/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors -P ./models/checkpoints/\n", + "\n", + "\n", + "# VAE\n", + "!wget -c https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors -P ./models/vae/\n", + "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt -P ./models/vae/\n", + "#!wget -c https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime2.ckpt -P ./models/vae/\n", + "\n", + "\n", + "# Loras\n", + "#!wget -c https://civitai.com/api/download/models/10350 -O ./models/loras/theovercomer8sContrastFix_sd21768.safetensors #theovercomer8sContrastFix SD2.x 768-v\n", + "#!wget -c https://civitai.com/api/download/models/10638 -O ./models/loras/theovercomer8sContrastFix_sd15.safetensors #theovercomer8sContrastFix SD1.x\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors -P ./models/loras/ #SDXL offset noise lora\n", + "\n", + "\n", + "# T2I-Adapter\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P ./models/controlnet/\n", + "\n", + "# T2I Styles Model\n", + "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth -P ./models/style_models/\n", + "\n", + "# CLIPVision model (needed for styles model)\n", + "#!wget -c https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin -O ./models/clip_vision/clip_vit14.bin\n", + "\n", + "\n", + "# ControlNet\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_lineart_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_scribble_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_seg_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_softedge_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors -P ./models/controlnet/\n", + "\n", + "# ControlNet SDXL\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors -P ./models/controlnet/\n", + "\n", + "# Controlnet Preprocessor nodes by Fannovel16\n", + "#!cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n", + "\n", + "\n", + "# GLIGEN\n", + "#!wget -c https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/resolve/main/gligen_sd14_textbox_pruned_fp16.safetensors -P ./models/gligen/\n", + "\n", + "\n", + "# ESRGAN upscale model\n", + "#!wget -c https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./models/upscale_models/\n", + "#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth -P ./models/upscale_models/\n", + "#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth -P ./models/upscale_models/\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kkkkkkkkkkkkkkk" + }, + "source": [ + "### Run ComfyUI with cloudflared (Recommended Way)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jjjjjjjjjjjjjj" + }, + "outputs": [], + "source": [ + "!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n", + "!dpkg -i cloudflared-linux-amd64.deb\n", + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "import urllib.request\n", + "\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\\n\")\n", + "\n", + " p = subprocess.Popen([\"cloudflared\", \"tunnel\", \"--url\", \"http://127.0.0.1:{}\".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", + " for line in p.stderr:\n", + " l = line.decode()\n", + " if \"trycloudflare.com \" in l:\n", + " print(\"This is the URL to access ComfyUI:\", l[l.find(\"http\"):], end='')\n", + " #print(l, end='')\n", + "\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kkkkkkkkkkkkkk" + }, + "source": [ + "### Run ComfyUI with localtunnel\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jjjjjjjjjjjjj" + }, + "outputs": [], + "source": [ + "!npm install -g localtunnel\n", + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "import urllib.request\n", + "\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\\n\")\n", + "\n", + " print(\"The password/enpoint ip for localtunnel is:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n", + " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", + " for line in p.stdout:\n", + " print(line.decode(), end='')\n", + "\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gggggggggg" + }, + "source": [ + "### Run ComfyUI with colab iframe (use only in case the previous way with localtunnel doesn't work)\n", + "\n", + "You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n", + "\n", + "If you want to open it in another window use the link.\n", + "\n", + "Note that some UI features like live image previews won't work because the colab iframe blocks websockets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hhhhhhhhhh" + }, + "outputs": [], + "source": [ + "import threading\n", + "import time\n", + "import socket\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " from google.colab import output\n", + " output.serve_kernel_port_as_iframe(port, height=1024)\n", + " print(\"to open it in a window you can open this link here:\")\n", + " output.serve_kernel_port_as_window(port)\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/custom_nodes/ComfyUI-Manager/prestartup_script.py b/custom_nodes/ComfyUI-Manager/prestartup_script.py new file mode 100644 index 0000000000000000000000000000000000000000..9dfc013832f86fc6b7067be5bf10a2c69ae0c1be --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/prestartup_script.py @@ -0,0 +1,398 @@ +import datetime +import os +import subprocess +import sys +import atexit +import threading +import re +import locale + + +message_collapses = [] +import_failed_extensions = set() + + +def register_message_collapse(f): + global message_collapses + message_collapses.append(f) + + +def is_import_failed_extension(x): + global import_failed_extensions + return x in import_failed_extensions + + +sys.__comfyui_manager_register_message_collapse = register_message_collapse +sys.__comfyui_manager_is_import_failed_extension = is_import_failed_extension + +comfyui_manager_path = os.path.dirname(__file__) +custom_nodes_path = os.path.abspath(os.path.join(comfyui_manager_path, "..")) +startup_script_path = os.path.join(comfyui_manager_path, "startup-scripts") +restore_snapshot_path = os.path.join(startup_script_path, "restore-snapshot.json") +git_script_path = os.path.join(comfyui_manager_path, "git_helper.py") + + +def handle_stream(stream, prefix): + stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') + for msg in stream: + if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg): + if msg.startswith('100%'): + print('\r' + msg, end="", file=sys.stderr), + else: + print('\r' + msg[:-1], end="", file=sys.stderr), + else: + if prefix == '[!]': + print(prefix, msg, end="", file=sys.stderr) + else: + print(prefix, msg, end="") + + +def process_wrap(cmd_str, cwd_path, handler=None): + process = subprocess.Popen(cmd_str, cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1) + + if handler is None: + handler = handle_stream + + stdout_thread = threading.Thread(target=handler, args=(process.stdout, "")) + stderr_thread = threading.Thread(target=handler, args=(process.stderr, "[!]")) + + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return process.wait() + + +try: + if '--port' in sys.argv: + port_index = sys.argv.index('--port') + if port_index + 1 < len(sys.argv): + port = int(sys.argv[port_index + 1]) + postfix = f"_{port}" + else: + postfix = "" + + # Logger setup + if os.path.exists(f"comfyui{postfix}.log"): + if os.path.exists(f"comfyui{postfix}.prev.log"): + if os.path.exists(f"comfyui{postfix}.prev2.log"): + os.remove(f"comfyui{postfix}.prev2.log") + os.rename(f"comfyui{postfix}.prev.log", f"comfyui{postfix}.prev2.log") + os.rename(f"comfyui{postfix}.log", f"comfyui{postfix}.prev.log") + + original_stdout = sys.stdout + original_stderr = sys.stderr + + pat_tqdm = r'\d+%.*\[(.*?)\]' + pat_import_fail = r'seconds \(IMPORT FAILED\):' + pat_custom_node = r'[/\\]custom_nodes[/\\](.*)$' + + is_start_mode = True + is_import_fail_mode = False + + log_file = open(f"comfyui{postfix}.log", "w", encoding="utf-8") + log_lock = threading.Lock() + + class ComfyUIManagerLogger: + def __init__(self, is_stdout): + self.is_stdout = is_stdout + self.encoding = "utf-8" + + def fileno(self): + try: + if self.is_stdout: + return original_stdout.fileno() + else: + return original_stderr.fileno() + except AttributeError: + # Handle error + raise ValueError("The object does not have a fileno method") + + def write(self, message): + global is_start_mode + global is_import_fail_mode + + if any(f(message) for f in message_collapses): + return + + if is_start_mode: + if is_import_fail_mode: + match = re.search(pat_custom_node, message) + if match: + import_failed_extensions.add(match.group(1)) + is_import_fail_mode = False + else: + match = re.search(pat_import_fail, message) + if match: + is_import_fail_mode = True + else: + is_import_fail_mode = False + + if 'Starting server' in message: + is_start_mode = False + + if not self.is_stdout: + match = re.search(pat_tqdm, message) + if match: + message = re.sub(r'([#|])\d', r'\1▌', message) + message = re.sub('#', '█', message) + if '100%' in message: + self.sync_write(message) + else: + original_stderr.write(message) + original_stderr.flush() + else: + self.sync_write(message) + else: + self.sync_write(message) + + def sync_write(self, message): + with log_lock: + log_file.write(message) + log_file.flush() + + if self.is_stdout: + original_stdout.write(message) + original_stdout.flush() + else: + original_stderr.write(message) + original_stderr.flush() + + def flush(self): + log_file.flush() + if self.is_stdout: + original_stdout.flush() + else: + original_stderr.flush() + + def close(self): + self.flush() + pass + + def reconfigure(self, *args, **kwargs): + pass + + # You can close through sys.stderr.close_log() + def close_log(self): + sys.stderr = original_stderr + sys.stdout = original_stdout + log_file.close() + + def close_log(): + sys.stderr = original_stderr + sys.stdout = original_stdout + log_file.close() + + sys.stdout = ComfyUIManagerLogger(True) + sys.stderr = ComfyUIManagerLogger(False) + + atexit.register(close_log) +except Exception as e: + print(f"[ComfyUI-Manager] Logging failed: {e}") + + +print("** ComfyUI start up time:", datetime.datetime.now()) + + +def check_bypass_ssl(): + try: + import configparser + import ssl + config_path = os.path.join(os.path.dirname(__file__), "config.ini") + config = configparser.ConfigParser() + config.read(config_path) + default_conf = config['default'] + + if 'bypass_ssl' in default_conf and default_conf['bypass_ssl'].lower() == 'true': + print(f"[ComfyUI-Manager] WARN: Unsafe - SSL verification bypass option is Enabled. (see ComfyUI-Manager/config.ini)") + ssl._create_default_https_context = ssl._create_unverified_context # SSL certificate error fix. + except Exception: + pass + + +check_bypass_ssl() + + +# Perform install +processed_install = set() +script_list_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "startup-scripts", "install-scripts.txt") +pip_list = None + + +def get_installed_packages(): + global pip_list + + if pip_list is None: + try: + result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True) + pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()]) + except subprocess.CalledProcessError as e: + print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.") + return set() + + return pip_list + + +def is_installed(name): + name = name.strip() + + if name.startswith('#'): + return True + + pattern = r'([^<>!=]+)([<>!=]=?)' + match = re.search(pattern, name) + + if match: + name = match.group(1) + + return name.lower() in get_installed_packages() + + +if os.path.exists(restore_snapshot_path): + try: + import json + + cloned_repos = [] + + def msg_capture(stream, prefix): + stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace') + for msg in stream: + if msg.startswith("CLONE: "): + cloned_repos.append(msg[7:]) + if prefix == '[!]': + print(prefix, msg, end="", file=sys.stderr) + else: + print(prefix, msg, end="") + + elif prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg): + if msg.startswith('100%'): + print('\r' + msg, end="", file=sys.stderr), + else: + print('\r'+msg[:-1], end="", file=sys.stderr), + else: + if prefix == '[!]': + print(prefix, msg, end="", file=sys.stderr) + else: + print(prefix, msg, end="") + + print(f"[ComfyUI-Manager] Restore snapshot.") + cmd_str = [sys.executable, git_script_path, '--apply-snapshot', restore_snapshot_path] + exit_code = process_wrap(cmd_str, custom_nodes_path, handler=msg_capture) + + with open(restore_snapshot_path, 'r', encoding="UTF-8") as json_file: + info = json.load(json_file) + for url in cloned_repos: + try: + repository_name = url.split("/")[-1].strip() + repo_path = os.path.join(custom_nodes_path, repository_name) + repo_path = os.path.abspath(repo_path) + + requirements_path = os.path.join(repo_path, 'requirements.txt') + install_script_path = os.path.join(repo_path, 'install.py') + + this_exit_code = 0 + + if os.path.exists(requirements_path): + with open(requirements_path, 'r', encoding="UTF-8") as file: + for line in file: + package_name = line.strip() + if package_name and not is_installed(package_name): + install_cmd = [sys.executable, "-m", "pip", "install", package_name] + this_exit_code += process_wrap(install_cmd, repo_path) + + if os.path.exists(install_script_path) and f'{repo_path}/install.py' not in processed_install: + processed_install.add(f'{repo_path}/install.py') + install_cmd = [sys.executable, install_script_path] + print(f">>> {install_cmd} / {repo_path}") + this_exit_code += process_wrap(install_cmd, repo_path) + + if this_exit_code != 0: + print(f"[ComfyUI-Manager] Restoring '{repository_name}' is failed.") + + except Exception as e: + print(e) + print(f"[ComfyUI-Manager] Restoring '{repository_name}' is failed.") + + if exit_code != 0: + print(f"[ComfyUI-Manager] Restore snapshot failed.") + else: + print(f"[ComfyUI-Manager] Restore snapshot done.") + + except Exception as e: + print(e) + print(f"[ComfyUI-Manager] Restore snapshot failed.") + + os.remove(restore_snapshot_path) + + +def execute_lazy_install_script(repo_path, executable): + global processed_install + + install_script_path = os.path.join(repo_path, "install.py") + requirements_path = os.path.join(repo_path, "requirements.txt") + + if os.path.exists(requirements_path): + print(f"Install: pip packages for '{repo_path}'") + with open(requirements_path, "r") as requirements_file: + for line in requirements_file: + package_name = line.strip() + if package_name and not is_installed(package_name): + install_cmd = [executable, "-m", "pip", "install", package_name] + process_wrap(install_cmd, repo_path) + + if os.path.exists(install_script_path) and f'{repo_path}/install.py' not in processed_install: + processed_install.add(f'{repo_path}/install.py') + print(f"Install: install script for '{repo_path}'") + install_cmd = [executable, "install.py"] + process_wrap(install_cmd, repo_path) + + +# Check if script_list_path exists +if os.path.exists(script_list_path): + print("\n#######################################################################") + print("[ComfyUI-Manager] Starting dependency installation/(de)activation for the extension\n") + + executed = set() + # Read each line from the file and convert it to a list using eval + with open(script_list_path, 'r', encoding="UTF-8") as file: + for line in file: + if line in executed: + continue + + executed.add(line) + + try: + script = eval(line) + + if script[1].startswith('#'): + if script[1] == "#LAZY-INSTALL-SCRIPT": + execute_lazy_install_script(script[0], script[2]) + + elif os.path.exists(script[0]): + if 'pip' in script[1:] and 'install' in script[1:] and is_installed(script[-1]): + continue + + print(f"\n## ComfyUI-Manager: EXECUTE => {script[1:]}") + print(f"\n## Execute install/(de)activation script for '{script[0]}'") + + exit_code = process_wrap(script[1:], script[0]) + + if exit_code != 0: + print(f"install/(de)activation script failed: {script[0]}") + else: + print(f"\n## ComfyUI-Manager: CANCELED => {script[1:]}") + + except Exception as e: + print(f"[ERROR] Failed to execute install/(de)activation script: {line} / {e}") + + # Remove the script_list_path file + if os.path.exists(script_list_path): + os.remove(script_list_path) + + print("\n[ComfyUI-Manager] Startup script completed.") + print("#######################################################################\n") + +del processed_install +del pip_list \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/requirements.txt b/custom_nodes/ComfyUI-Manager/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..3467b3e51c131ba4bbf1db1ae8317aa45cc3ef53 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/requirements.txt @@ -0,0 +1,2 @@ +GitPython +matrix-client==0.4.0 \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/scan.sh b/custom_nodes/ComfyUI-Manager/scan.sh new file mode 100644 index 0000000000000000000000000000000000000000..a169cd488a476f95d8b134914f42c7e578a5397b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scan.sh @@ -0,0 +1,5 @@ +#!/bin/bash +source ../../venv/bin/activate +rm .tmp/*.py > /dev/null +python scanner.py +cp extension-node-map.json node_db/new/. diff --git a/custom_nodes/ComfyUI-Manager/scanner.py b/custom_nodes/ComfyUI-Manager/scanner.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9304191f133e83eb6f6b94a971733dff28017d --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scanner.py @@ -0,0 +1,296 @@ +import re +import os +import json +from git import Repo +from torchvision.datasets.utils import download_url +import concurrent + +builtin_nodes = set() + + +def scan_in_file(filename, is_builtin=False): + global builtin_nodes + + try: + with open(filename, encoding='utf-8') as file: + code = file.read() + except UnicodeDecodeError: + with open(filename, encoding='cp949') as file: + code = file.read() + + pattern = r"_CLASS_MAPPINGS\s*=\s*{([^}]*)}" + regex = re.compile(pattern, re.MULTILINE | re.DOTALL) + + nodes = set() + class_dict = {} + + pattern2 = r'^[^=]*_CLASS_MAPPINGS\["(.*?)"\]' + keys = re.findall(pattern2, code) + for key in keys: + nodes.add(key.strip()) + + pattern3 = r'^[^=]*_CLASS_MAPPINGS\[\'(.*?)\'\]' + keys = re.findall(pattern3, code) + for key in keys: + nodes.add(key.strip()) + + matches = regex.findall(code) + for match in matches: + dict_text = match + + key_value_pairs = re.findall(r"\"([^\"]*)\"\s*:\s*([^,\n]*)", dict_text) + for key, value in key_value_pairs: + class_dict[key.strip()] = value.strip() + + key_value_pairs = re.findall(r"'([^']*)'\s*:\s*([^,\n]*)", dict_text) + for key, value in key_value_pairs: + class_dict[key.strip()] = value.strip() + + for key, value in class_dict.items(): + nodes.add(key.strip()) + + update_pattern = r"_CLASS_MAPPINGS.update\s*\({([^}]*)}\)" + update_match = re.search(update_pattern, code) + if update_match: + update_dict_text = update_match.group(1) + update_key_value_pairs = re.findall(r"\"([^\"]*)\"\s*:\s*([^,\n]*)", update_dict_text) + for key, value in update_key_value_pairs: + class_dict[key.strip()] = value.strip() + nodes.add(key.strip()) + + metadata = {} + lines = code.strip().split('\n') + for line in lines: + if line.startswith('@'): + if line.startswith("@author:") or line.startswith("@title:") or line.startswith("@nickname:") or line.startswith("@description:"): + key, value = line[1:].strip().split(':') + metadata[key.strip()] = value.strip() + + if is_builtin: + builtin_nodes += set(nodes) + else: + for x in builtin_nodes: + if x in nodes: + nodes.remove(x) + + return nodes, metadata + + +def get_py_file_paths(dirname): + file_paths = [] + + for root, dirs, files in os.walk(dirname): + if ".git" in root or "__pycache__" in root: + continue + + for file in files: + if file.endswith(".py"): + file_path = os.path.join(root, file) + file_paths.append(file_path) + + return file_paths + + +def get_nodes(target_dir): + py_files = [] + directories = [] + + for item in os.listdir(target_dir): + if ".git" in item or "__pycache__" in item: + continue + + path = os.path.abspath(os.path.join(target_dir, item)) + + if os.path.isfile(path) and item.endswith(".py"): + py_files.append(path) + elif os.path.isdir(path): + directories.append(path) + + return py_files, directories + + +def get_git_urls_from_json(json_file): + with open(json_file, encoding='utf-8') as file: + data = json.load(file) + + custom_nodes = data.get('custom_nodes', []) + git_clone_files = [] + for node in custom_nodes: + if node.get('install_type') == 'git-clone': + files = node.get('files', []) + if files: + git_clone_files.append((files[0], node.get('title'), node.get('nodename_pattern'))) + + git_clone_files.append(("https://github.com/comfyanonymous/ComfyUI", "ComfyUI", None)) + + return git_clone_files + + +def get_py_urls_from_json(json_file): + with open(json_file, encoding='utf-8') as file: + data = json.load(file) + + custom_nodes = data.get('custom_nodes', []) + py_files = [] + for node in custom_nodes: + if node.get('install_type') == 'copy': + files = node.get('files', []) + if files: + py_files.append((files[0], node.get('title'), node.get('nodename_pattern'))) + + return py_files + + +def clone_or_pull_git_repository(git_url): + repo_name = git_url.split("/")[-1].split(".")[0] + repo_dir = os.path.join(os.getcwd(), ".tmp", repo_name) + + if os.path.exists(repo_dir): + try: + repo = Repo(repo_dir) + origin = repo.remote(name="origin") + origin.pull(rebase=True) + repo.git.submodule('update', '--init', '--recursive') + print(f"Pulling {repo_name}...") + except Exception as e: + print(f"Pulling {repo_name} failed: {e}") + else: + try: + Repo.clone_from(git_url, repo_dir, recursive=True) + print(f"Cloning {repo_name}...") + except Exception as e: + print(f"Cloning {repo_name} failed: {e}") + + +def update_custom_nodes(): + tmp_dir = os.path.join(os.getcwd(), ".tmp") + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + node_info = {} + + git_url_titles = get_git_urls_from_json('custom-node-list.json') + + def process_git_url_title(url, title, node_pattern): + name = os.path.basename(url) + if name.endswith(".git"): + name = name[:-4] + + node_info[name] = (url, title, node_pattern) + clone_or_pull_git_repository(url) + + with concurrent.futures.ThreadPoolExecutor(10) as executor: + for url, title, node_pattern in git_url_titles: + executor.submit(process_git_url_title, url, title, node_pattern) + + py_url_titles_and_pattern = get_py_urls_from_json('custom-node-list.json') + + def download_and_store_info(url_title_and_pattern): + url, title, node_pattern = url_title_and_pattern + name = os.path.basename(url) + if name.endswith(".py"): + node_info[name] = (url, title, node_pattern) + + try: + download_url(url, ".tmp") + except: + print(f"[ERROR] Cannot download '{url}'") + + with concurrent.futures.ThreadPoolExecutor(10) as executor: + executor.map(download_and_store_info, py_url_titles_and_pattern) + + return node_info + + +def gen_json(node_info): + # scan from .py file + node_files, node_dirs = get_nodes(".tmp") + + comfyui_path = os.path.abspath(os.path.join('.tmp', "ComfyUI")) + node_dirs.remove(comfyui_path) + node_dirs = [comfyui_path] + node_dirs + + data = {} + for dirname in node_dirs: + py_files = get_py_file_paths(dirname) + metadata = {} + + nodes = set() + for py in py_files: + nodes_in_file, metadata_in_file = scan_in_file(py, dirname == "ComfyUI") + nodes.update(nodes_in_file) + metadata.update(metadata_in_file) + + dirname = os.path.basename(dirname) + + if len(nodes) > 0 or (dirname in node_info and node_info[dirname][2] is not None): + nodes = list(nodes) + nodes.sort() + + if dirname in node_info: + git_url, title, node_pattern = node_info[dirname] + metadata['title_aux'] = title + if node_pattern is not None: + metadata['nodename_pattern'] = node_pattern + data[git_url] = (nodes, metadata) + else: + print(f"WARN: {dirname} is removed from custom-node-list.json") + + for file in node_files: + nodes, metadata = scan_in_file(file) + + if len(nodes) > 0 or (dirname in node_info and node_info[dirname][2] is not None): + nodes = list(nodes) + nodes.sort() + + file = os.path.basename(file) + + if file in node_info: + url, title, node_pattern = node_info[file] + metadata['title_aux'] = title + if node_pattern is not None: + metadata['nodename_pattern'] = node_pattern + data[url] = (nodes, metadata) + else: + print(f"Missing info: {url}") + + # scan from node_list.json file + extensions = [name for name in os.listdir('.tmp') if os.path.isdir(os.path.join('.tmp', name))] + + for extension in extensions: + node_list_json_path = os.path.join('.tmp', extension, 'node_list.json') + if os.path.exists(node_list_json_path): + git_url, title, node_pattern = node_info[extension] + + with open(node_list_json_path, 'r', encoding='utf-8') as f: + node_list_json = json.load(f) + + metadata_in_url = {} + if git_url not in data: + nodes = set() + else: + nodes_in_url, metadata_in_url = data[git_url] + nodes = set(nodes_in_url) + + for x, desc in node_list_json.items(): + nodes.add(x.strip()) + + metadata_in_url['title_aux'] = title + if node_pattern is not None: + metadata['nodename_pattern'] = node_pattern + nodes = list(nodes) + nodes.sort() + data[git_url] = (nodes, metadata_in_url) + + json_path = f"extension-node-map.json" + with open(json_path, "w", encoding='utf-8') as file: + json.dump(data, file, indent=4, sort_keys=True) + + +print("### ComfyUI Manager Node Scanner ###") + +print("\n# Updating extensions\n") +updated_node_info = update_custom_nodes() + +print("\n# 'extension-node-map.json' file is generated.\n") +gen_json(updated_node_info) diff --git a/custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py b/custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..d5a70ed6dd92ba90e8084e07fbb9097fe3096ea5 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py @@ -0,0 +1,39 @@ +import os +import subprocess + + +def get_enabled_subdirectories_with_files(base_directory): + subdirs_with_files = [] + for subdir in os.listdir(base_directory): + try: + full_path = os.path.join(base_directory, subdir) + if os.path.isdir(full_path) and not subdir.endswith(".disabled") and not subdir.startswith('.') and subdir != '__pycache__': + print(f"## Install dependencies for '{subdir}'") + requirements_file = os.path.join(full_path, "requirements.txt") + install_script = os.path.join(full_path, "install.py") + + if os.path.exists(requirements_file) or os.path.exists(install_script): + subdirs_with_files.append((full_path, requirements_file, install_script)) + except Exception as e: + print(f"EXCEPTION During Dependencies INSTALL on '{subdir}':\n{e}") + + return subdirs_with_files + + +def install_requirements(requirements_file_path): + if os.path.exists(requirements_file_path): + subprocess.run(["pip", "install", "-r", requirements_file_path]) + + +def run_install_script(install_script_path): + if os.path.exists(install_script_path): + subprocess.run(["python", install_script_path]) + + +custom_nodes_directory = "custom_nodes" +subdirs_with_files = get_enabled_subdirectories_with_files(custom_nodes_directory) + + +for subdir, requirements_file, install_script in subdirs_with_files: + install_requirements(requirements_file) + run_install_script(install_script) diff --git a/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-linux.sh b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-linux.sh new file mode 100644 index 0000000000000000000000000000000000000000..be473dc66f8eeb36c48d409945eb5ae83a030171 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-linux.sh @@ -0,0 +1,21 @@ +git clone https://github.com/comfyanonymous/ComfyUI +cd ComfyUI/custom_nodes +git clone https://github.com/ltdrdata/ComfyUI-Manager +cd .. +python -m venv venv +source venv/bin/activate +python -m pip install -r requirements.txt +python -m pip install -r custom_nodes/ComfyUI-Manager/requirements.txt +python -m pip install torchvision +cd .. +echo "#!/bin/bash" > run_gpu.sh +echo "cd ComfyUI" >> run_gpu.sh +echo "source venv/bin/activate" >> run_gpu.sh +echo "python main.py --preview-method auto" >> run_gpu.sh +chmod +x run_gpu.sh + +echo "#!/bin/bash" > run_cpu.sh +echo "cd ComfyUI" >> run_cpu.sh +echo "source venv/bin/activate" >> run_cpu.sh +echo "python main.py --preview-method auto --cpu" >> run_cpu.sh +chmod +x run_cpu.sh diff --git a/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-win.bat b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-win.bat new file mode 100644 index 0000000000000000000000000000000000000000..6bb0e8364b5170530c2a85341ad754764c6788ae --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-win.bat @@ -0,0 +1,20 @@ +git clone https://github.com/comfyanonymous/ComfyUI +cd ComfyUI/custom_nodes +git clone https://github.com/ltdrdata/ComfyUI-Manager +cd .. +python -m venv venv +call venv/Scripts/activate +python -m pip install -r requirements.txt +python -m pip install -r custom_nodes/ComfyUI-Manager/requirements.txt +python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers +cd .. +echo "cd ComfyUI" >> run_gpu.sh +echo "call venv/Scripts/activate" >> run_gpu.sh +echo "python main.py" >> run_gpu.sh +chmod +x run_gpu.sh + +echo "#!/bin/bash" > run_cpu.sh +echo "cd ComfyUI" >> run_cpu.sh +echo "call venv/Scripts/activate" >> run_cpu.sh +echo "python main.py --cpu" >> run_cpu.sh +chmod +x run_cpu.sh diff --git a/custom_nodes/ComfyUI-Manager/scripts/install-manager-for-portable-version.bat b/custom_nodes/ComfyUI-Manager/scripts/install-manager-for-portable-version.bat new file mode 100644 index 0000000000000000000000000000000000000000..7b067dfd770d197ccd68e760087536552223f260 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scripts/install-manager-for-portable-version.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s -m pip install gitpython +.\python_embeded\python.exe -c "import git; git.Repo.clone_from('https://github.com/ltdrdata/ComfyUI-Manager', './ComfyUI/custom_nodes/ComfyUI-Manager')" diff --git a/custom_nodes/ComfyUI-Manager/scripts/update-fix.py b/custom_nodes/ComfyUI-Manager/scripts/update-fix.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ac10074607544d0b9cdaf4372e43c7f62bb8d0 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/scripts/update-fix.py @@ -0,0 +1,12 @@ +import git + +commit_hash = "a361cc1" + +repo = git.Repo('.') + +if repo.is_dirty(): + repo.git.stash() + +repo.git.update_ref("refs/remotes/origin/main", commit_hash) +repo.remotes.origin.fetch() +repo.git.pull("origin", "main") diff --git a/custom_nodes/ComfyUI-Manager/snapshots/2023-11-21_14-29-27_autosave.json b/custom_nodes/ComfyUI-Manager/snapshots/2023-11-21_14-29-27_autosave.json new file mode 100644 index 0000000000000000000000000000000000000000..88a9f255fc3146228757e259a07497cf29bfc169 --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/snapshots/2023-11-21_14-29-27_autosave.json @@ -0,0 +1,70 @@ +{ + "comfyui": "2dd5b4dd78fc0a30f3d5baa0b99a6b10f002d917", + "git_custom_nodes": { + "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI": { + "hash": "c2aad505427d2d8ae3bc3c8650e1f407fac7cfec", + "disabled": false + }, + "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet": { + "hash": "19ea71ad886677b5fa19a2c4bbdc879ff04078db", + "disabled": false + }, + "https://github.com/pythongosssss/ComfyUI-Custom-Scripts": { + "hash": "27555d4f71bb4e24b87571f89eab2b4a06677bb6", + "disabled": false + }, + "https://github.com/alt-key-project/comfyui-dream-project": { + "hash": "25cbdff39c5b74c9055c876532316d4926a64948", + "disabled": false + }, + "https://github.com/ltdrdata/ComfyUI-Impact-Pack": { + "hash": "dffa779e11420f8ca57180897e15fcaab439ebca", + "disabled": false + }, + "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words": { + "hash": "05de5959e17c22352fa0218a6ae9b952f2901faa", + "disabled": false + }, + "https://github.com/ltdrdata/ComfyUI-Manager.git": { + "hash": "7529774f0bc0f2af36db69b50d0aace4e3f93054", + "disabled": false + }, + "https://github.com/pythongosssss/ComfyUI-WD14-Tagger": { + "hash": "b8a63949bc58df940aa306befd5f84534b80459d", + "disabled": false + }, + "https://github.com/Fannovel16/comfyui_controlnet_aux": { + "hash": "e0a5be0890b3a6230f1f3edb0b15ba949039757f", + "disabled": false + }, + "https://github.com/comfyanonymous/ComfyUI_experiments": { + "hash": "934dba9d206e4738e0dac26a09b51f1dffcb4e44", + "disabled": false + }, + "https://github.com/lilly1987/ComfyUI_node_Lilly": { + "hash": "87d6fd0c35cd34159639a22300c7ac8e7eef9886", + "disabled": false + }, + "https://github.com/BlenderNeko/ComfyUI_TiledKSampler": { + "hash": "6d7604c9b28f06a6337bc83d555825e362cece7a", + "disabled": false + }, + "https://github.com/WASasquatch/FreeU_Advanced": { + "hash": "120c23a3f48618aaf9478076552261b5f111cf4b", + "disabled": false + }, + "https://github.com/laksjdjf/LCMSampler-ComfyUI": { + "hash": "c2ab561f8ae7598b2313e7f3f6c91b106851b193", + "disabled": false + }, + "https://github.com/city96/SD-Latent-Upscaler": { + "hash": "82b7b817a34ad9021137e1f08de287440f48eb03", + "disabled": false + }, + "https://github.com/WASasquatch/was-node-suite-comfyui": { + "hash": "a55684c738a4a547f39bbb83658c8163a1cf4f5d", + "disabled": false + } + }, + "file_custom_nodes": [] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/snapshots/2023-12-08_21-22-44_autosave.json b/custom_nodes/ComfyUI-Manager/snapshots/2023-12-08_21-22-44_autosave.json new file mode 100644 index 0000000000000000000000000000000000000000..82ebc8268f232041e655ac5dee4f4fe338e14c5b --- /dev/null +++ b/custom_nodes/ComfyUI-Manager/snapshots/2023-12-08_21-22-44_autosave.json @@ -0,0 +1,86 @@ +{ + "comfyui": "a4ec54a40d978c4249dc6a7e2d5133657d1fd109", + "git_custom_nodes": { + "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI": { + "hash": "c2aad505427d2d8ae3bc3c8650e1f407fac7cfec", + "disabled": false + }, + "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet": { + "hash": "b5e77ecc3f8cd274f13996bf05816c601d90006f", + "disabled": false + }, + "https://github.com/pythongosssss/ComfyUI-Custom-Scripts": { + "hash": "27555d4f71bb4e24b87571f89eab2b4a06677bb6", + "disabled": false + }, + "https://github.com/alt-key-project/comfyui-dream-project": { + "hash": "25cbdff39c5b74c9055c876532316d4926a64948", + "disabled": false + }, + "https://github.com/ltdrdata/ComfyUI-Impact-Pack": { + "hash": "2111b2fedf05d8285cb88bc0367e523e077c34da", + "disabled": false + }, + "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words": { + "hash": "b63862f2e3004be7cd255b1d86349500a555ee1d", + "disabled": false + }, + "https://github.com/ltdrdata/ComfyUI-Manager.git": { + "hash": "1d21359a5aade09956113aa58be2812bd5427d80", + "disabled": false + }, + "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative.git": { + "hash": "0fc84c7d72d57763c39efa8b47eaa9aa83e2758e", + "disabled": false + }, + "https://github.com/matan1905/ComfyUI-Serving-Toolkit": { + "hash": "4eae42c65e19d42252de2e2441e15a53bc9e1369", + "disabled": false + }, + "https://github.com/pythongosssss/ComfyUI-WD14-Tagger": { + "hash": "b8a63949bc58df940aa306befd5f84534b80459d", + "disabled": false + }, + "https://github.com/Fannovel16/comfyui_controlnet_aux": { + "hash": "e2fc116be0ccc4016dcd81224d61a9c9176de223", + "disabled": false + }, + "https://github.com/comfyanonymous/ComfyUI_experiments": { + "hash": "934dba9d206e4738e0dac26a09b51f1dffcb4e44", + "disabled": false + }, + "https://github.com/lilly1987/ComfyUI_node_Lilly": { + "hash": "87d6fd0c35cd34159639a22300c7ac8e7eef9886", + "disabled": false + }, + "https://github.com/shiimizu/ComfyUI_smZNodes": { + "hash": "b1defa02d4d160e77c3d69d83212afd76de42e43", + "disabled": false + }, + "https://github.com/BlenderNeko/ComfyUI_TiledKSampler": { + "hash": "25d1cd2145268b617b12e7a98f28aeb5f3752bbc", + "disabled": false + }, + "https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI": { + "hash": "103a4457d44fe0d3be586c6d6b32ec5ccdbd0793", + "disabled": false + }, + "https://github.com/WASasquatch/FreeU_Advanced": { + "hash": "120c23a3f48618aaf9478076552261b5f111cf4b", + "disabled": false + }, + "https://github.com/laksjdjf/IPAdapter-ComfyUI": { + "hash": "c39015c384cd433d38b4bbd3de276682ab74e0fc", + "disabled": false + }, + "https://github.com/city96/SD-Latent-Upscaler": { + "hash": "82b7b817a34ad9021137e1f08de287440f48eb03", + "disabled": false + }, + "https://github.com/WASasquatch/was-node-suite-comfyui": { + "hash": "4e53775e650a7e2d2d1d73056bb914d7edc57f69", + "disabled": false + } + }, + "file_custom_nodes": [] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Manager/snapshots/the_snapshot_files_are_located_here b/custom_nodes/ComfyUI-Manager/snapshots/the_snapshot_files_are_located_here new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/.gitignore b/custom_nodes/ComfyUI-Serving-Toolkit/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..763624ebe547350200a2b9d538bdff0b90536f61 --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/.gitignore @@ -0,0 +1 @@ +__pycache__/* \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/README.md b/custom_nodes/ComfyUI-Serving-Toolkit/README.md new file mode 100644 index 0000000000000000000000000000000000000000..df8ef30dca5cad8f9ede4a2edf482be58cd32f10 --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/README.md @@ -0,0 +1,107 @@ +# ComfyUI Serving Toolkit +Welcome to the ComfyUI Serving Toolkit, a powerful tool for serving image generation workflows in Discord and other platforms (soon). +This toolkit is designed to simplify the process of serving your ComfyUI workflow, making image generation bots easier than ever before. +You can serve on discord, or on websockets. Checkout https://comfypixels.com for online serving + + +## Features +* Allows both Images or videos (when in batch mode, such as animatediff - if you return more than one image it will create a video) +* Add arguments with default values, then allow your users to use them +* Serve from your own computer, workflow is not inserted into the images so your secrets are 100% safe + +## Installation +[Use ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) +or put all the files inside custom_nodes and run: +``` +..\..\..\python_embeded\python.exe -s -m pip install -r requirements.txt +``` + +## The simplest configuration +Here a simple workflow that will get a !generate \ and resond with an image +![image](https://github.com/matan1905/ComfyUI-Serving-Toolkit/assets/24731932/e193be18-7b83-4f44-b119-21230f0b9a16) + +You can copy the workflow json: +[discordserv.json](https://github.com/matan1905/ComfyUI-Serving-Toolkit/files/13248566/discordserv.json) + + + +## Running +After setting up your workflow, In order for the serving to always be up, you need to allow auto queue, here is an image to help you do that: + +![image](https://github.com/matan1905/ComfyUI-Serving-Toolkit/assets/24731932/d8f7b486-725d-4934-b72d-1a042b5f355a) + +#### This will require you to keep your ComfyUI and computer running. If you want to host your workflow, you can use [vast.ai](https://cloud.vast.ai/?ref_id=93071) + + +## Nodes +**DiscordServing** + +This node is an essencial part of the serving, queueing the prompt it will wait for a single message, process it and optionally return the image. +Note that in order for it to work for all messages you would have to mark `Auto Queue` (details above in the running section) + +Inputs: +* discord_token - [here is how you get one](https://www.writebots.com/discord-bot-token/) , make sure to enable message viewing intent +* command_name - the command used to generate, without the '!'. defaults to generate (so you would have to do !generate \ --your_argument1 \ + +Outputs: +* Serving Config - A basic reference for this serving, used by the other nodes of this toolkit to get arguments and return images. + +**WebsocketServing** + +This will connect to a websocket and wait for JSON of {_requestId, prompt, arguments} and will return a json of {_requestId, base64_img} +You can see an example ws server over at examples/websocket.js + +Inputs: +* websocket_url - the url of the websocket you connect to, if you use the example it will be ws://localhost:8080 + +Outputs: +* Serving Config - A basic reference for this serving, used by the other nodes of this toolkit to get arguments and return images. + + + +**ServingInputText** + +Allows you to grab a text arguments from the request + +Discord example: + +When a user types: !generate 4k epic realism portrait --negative drawing +you could set the argument=negative and then recieve the value of "drawing" inside the output text. + + +Inputs: +* serving_config - a config made by a serving node +* argument - the argument name, the prompt itself will be inside the "prompt" argument. When using discord serving, you can access attachments url using 'attachment_url_0' (and attachment_url_1 etc). then you can use nodes like WAS Image Load to download these images +* default - the default value of this argument + +Outputs: +text - the value of the argument + + + + +**ServingInputNumber** + +similar to ServingInputText, this one is for numbers. it is important to set the minimum, maximum and step to the right values in order to avoid errors (for example when trying a width that does isn't divisable by 16) +Inputs that are not in ServingInputText: +* max_value - the maximum value of this argument +* min_value - the minimum value of this argument +* step - the steps of this value (setting this to 1 will ensure only whole numbers, 0.5 will allow jumps of half etc) + +**ServingOutput** + +Allows you to return an image/video back to the request +Inputs: +* image - the generated image. note that if this is more than one image (for example in the case of batches or animatediff frames) it will return a video +* duration - in the case of a video, what is the time in miliseconds each frame should appear? if you have an FPS number you can use 1000/FPS to calculate the duration value + + + + + + + + + + + diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/__init__.py b/custom_nodes/ComfyUI-Serving-Toolkit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..261c1c1403d665fa8ad9cb5fc7a717a880f3e484 --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/__init__.py @@ -0,0 +1,6 @@ +import __main__ + +from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88370f83ebc8454f23e7e093661e1d26d5d12bca Binary files /dev/null and b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/discord_client.cpython-311.pyc b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/discord_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..006573d6538a6c173687c0de5bc463cb496fb05b Binary files /dev/null and b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/discord_client.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/nodes.cpython-311.pyc b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39fb3d4e7ee5a31c02b645f242f49416b0d1180b Binary files /dev/null and b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/utils.cpython-311.pyc b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dc49011a6e4174423868dbdc1d704f353786443 Binary files /dev/null and b/custom_nodes/ComfyUI-Serving-Toolkit/__pycache__/utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/discord_client.py b/custom_nodes/ComfyUI-Serving-Toolkit/discord_client.py new file mode 100644 index 0000000000000000000000000000000000000000..bf82bfc95ed2b18486d0fc4a51b478830b1bc95d --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/discord_client.py @@ -0,0 +1,16 @@ +import discord +from discord.ext import commands + + +intents = discord.Intents.default() +intents.message_content = True +discord_client = commands.Bot(command_prefix='!', intents=intents) + + + + +# Event handler for when the bot is ready +@discord_client.event +async def on_ready(): + print(f'Logged in as {discord_client.user.name}. Ready to take requests!') + diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/examples/websocket.js b/custom_nodes/ComfyUI-Serving-Toolkit/examples/websocket.js new file mode 100644 index 0000000000000000000000000000000000000000..92a8105a20f0e33ba5f24f4bb105ee132f17b09e --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/examples/websocket.js @@ -0,0 +1,55 @@ +import WebSocket, { WebSocketServer } from 'ws'; +import fs from 'fs'; + + +const wss = new WebSocketServer({ + port: 8080, +}); +wss.on("connection", (ws) => { + console.log("WebSocket client connected"); + + // Event listener for receiving messages from the client + ws.on("message", (message) => { + try { + const parsed = JSON.parse(message) + saveBase64Image(parsed.base64_img, parsed._requestId + '_image.webp'); + } catch (e) { + console.log("Error occured when getting a message", e) + } + }); + + // Event listener for the WebSocket connection closing + ws.on("close", () => { + console.log("WebSocket client disconnected"); + }); +}); + +function saveBase64Image(base64String, filePath) { + const binaryData = Buffer.from(base64String, 'base64'); + + fs.writeFile(filePath, binaryData, 'binary', (err) => { + if (err) { + console.error('Error saving the image:', err); + } else { + console.log('Image saved successfully:', filePath); + } + }); +} +console.log("Listening on 8080") + +function sendMessage(message) { + wss.clients.forEach((client) => { + console.log("Messaging Everyone a hi") + + if (client.readyState === WebSocket.OPEN) { + client.send(message); + } + }); +} + + +let i = 0 +setInterval(() => sendMessage(JSON.stringify({ + _requestId: ++i, + prompt: "Cow" +})), 5000) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/nodes.py b/custom_nodes/ComfyUI-Serving-Toolkit/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..bdb7a94e6c3cdcabd3e4489ec9c8e9c6594fbe5d --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/nodes.py @@ -0,0 +1,297 @@ +import time +import threading +from .discord_client import discord_client +import threading +from collections import deque +from .utils import parse_command_string, tensorToImageConversion +import discord +import asyncio +import websocket +import json +import base64 + + + +class ServingOutput: + def __init__(self): + # start listening to api/discord + # when something happen, pass to serving manager with the details + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "serving_config": ("SERVING_CONFIG",), + "image": ("IMAGE",), + "frame_duration": ("INT", {"default": 30, "min": 1, "step": 1, "max": 9999999}), + }, + } + + RETURN_TYPES = () + # RETURN_NAMES = ("image_output_name",) + + FUNCTION = "out" + + OUTPUT_NODE = True + + CATEGORY = "Serving-Toolkit" + + def out(self, image,serving_config,frame_duration): + serving_config["serve_image_function"](image,frame_duration) + return {} + + +class ServingInputText: + def __init__(self): + # start listening to api/discord + # when something happen, pass to serving manager with the details + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "serving_config": ("SERVING_CONFIG",), + "argument": ("STRING", { + "multiline": False, + "default": "prompt" + }), + "default": ("STRING", { + "multiline": True, + "default": "" + }), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + + FUNCTION = "out" + + CATEGORY = "Serving-Toolkit" + + def out(self, serving_config, argument,default): + if argument not in serving_config: + return (default,) + return (serving_config[argument],) + +class ServingInputNumber: + def __init__(self): + # start listening to api/discord + # when something happen, pass to serving manager with the details + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "serving_config": ("SERVING_CONFIG",), + "argument": ("STRING", { + "multiline": False, + "default": "number" + }), + "default": ("FLOAT", {"default": 0.0, "min": -999999.0, "max": 999999.0, "step": 0.0001}), + "min_value": ("FLOAT", {"default": -999999.0, "min": -999999.0, "max": 999999.0, "step": 0.0001}), + "max_value": ("FLOAT", {"default": 999999.0, "min": -999999.0, "max": 999999.0, "step": 0.0001}), + "step": ("FLOAT", {"default": 0.1, "min": -999999.0, "max": 999999.0, "step": 0.0001}), + } + } + + RETURN_TYPES = ("FLOAT", "INT") + + FUNCTION = "out" + + CATEGORY = "Serving-Toolkit" + + def out(self, serving_config, argument,default, min_value, max_value, step): + val = default + if argument in serving_config and serving_config[argument].replace('.','',1).isdigit(): + val = serving_config[argument] + valFloat = min(max(float(val), min_value), max_value) // step * step + valInt = round(valFloat) + return (valFloat,valInt) + + +class DiscordServing(): + discord_running = False + def __init__(self): + self.registered_command = False + self.data_ready = threading.Event() + self.data = deque() + self.discord_token = None + pass + + def discord_runner(self): + discord_client.run(self.discord_token) + + def get_data(self): + if not self.data: + self.data_ready.wait() + data = self.data.popleft() + self.data_ready.clear() + return data + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "discord_token": ("STRING", { + "multiline": True, + "default": "" + }), + "command_name": ("STRING", { + "multiline": False, + "default": "generate" + }) + } + } + + RETURN_TYPES = ("SERVING_CONFIG",) + RETURN_NAMES = ("Serving config",) + + FUNCTION = "serve" + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + # OUTPUT_NODE = False + + CATEGORY = "Serving-Toolkit" + + def serve(self, command_name, discord_token): + if not DiscordServing.discord_running: + self.discord_token = discord_token + run_discord = threading.Thread(target=self.discord_runner) + run_discord.start() + print("Client running") + DiscordServing.discord_running = True + if not self.registered_command: + self.registered_command = True + @discord_client.command(name=command_name) + async def execute(ctx): + parsed_data = parse_command_string(ctx.message.content,command_name) + def serve_image_function(image, frame_duration): + image_file = tensorToImageConversion(image, frame_duration) + asyncio.run_coroutine_threadsafe(ctx.reply(file=discord.File(image_file, filename='image.webp')), discord_client.loop) + parsed_data["serve_image_function"] = serve_image_function + parsed_data.update({f"attachment_url_{i}": attachment.url for i, attachment in enumerate(ctx.message.attachments)}) # populates all the attachments urls + self.data.append(parsed_data) + self.data_ready.set() + + data = self.get_data() + + return (data,) + +class WebSocketServing(): + def __init__(self): + self.data_ready = threading.Event() + self.data = deque() + self.ws_running = False + self.websocket_url= None + self.ws = None + pass + def on_message(self,ws,message): + try: + parsed = json.loads(message) + self.data.append(parsed) + self.data_ready.set() + except Exception as e: + print("Error parsing JSON", e) + + + def on_close(self,ws): + print("WS Client closed!") + + def on_error(self,ws,error): + print("WS Client error: ", error) + # Try to reconnect + time.sleep(1) + self.ws_runner() + + def ws_runner(self): + print("Starting WS Client...") + self.ws = websocket.WebSocketApp( self.websocket_url, + + on_message=self.on_message, on_close= self.on_close, on_error=self.on_error) + while True: + try: + self.ws.run_forever(reconnect=1, + ping_interval=10, + ping_timeout=5,) + except Exception as e: + print("WS Client error: ", e) + time.sleep(5) + continue + def get_data(self): + if not self.data: + self.data_ready.wait() + data = self.data.popleft() + self.data_ready.clear() + return data + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "websocket_url": ("STRING", { + "multiline": False, + "default": "" + }) + } + } + + RETURN_TYPES = ("SERVING_CONFIG",) + RETURN_NAMES = ("Serving config",) + + FUNCTION = "serve" + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + # OUTPUT_NODE = False + + CATEGORY = "Serving-Toolkit" + + def serve(self, websocket_url): + if not self.ws_running: + self.websocket_url = websocket_url + threading.Thread(target=self.ws_runner).start() + print("WS Client running") + self.ws_running = True + + data = self.get_data() + def serve_image_function(image, frame_duration): + image_file = tensorToImageConversion(image, frame_duration) + base64_img = base64.b64encode(image_file.read()).decode('utf-8') + response= { + "base64_img":base64_img, + "_requestId":data["_requestId"] # It's assumed that it will exist. + } + self.ws.send(json.dumps(response)) + data["serve_image_function"] = serve_image_function + + return (data,) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "ServingOutput": ServingOutput, + "ServingInputText": ServingInputText, + "ServingInputNumber": ServingInputNumber, + "DiscordServing": DiscordServing, + "WebSocketServing": WebSocketServing +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "ServingOutput": "Serving Output", + "DiscordServing": "Discord Serving", + "WebSocketServing": "WebSocket Serving", + "ServingInputText": "Serving Input Text", + "ServingInputNumber": "Serving Input Number", +} + + +# input - simply a push diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/requirements.txt b/custom_nodes/ComfyUI-Serving-Toolkit/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdd0bc95d7b932daaae1718cb85c36afbe468cb4 --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/requirements.txt @@ -0,0 +1,3 @@ +discord.py +websocket-client +rel \ No newline at end of file diff --git a/custom_nodes/ComfyUI-Serving-Toolkit/utils.py b/custom_nodes/ComfyUI-Serving-Toolkit/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eedf58cc1b4a6f4a7ccaea6783123200d1675408 --- /dev/null +++ b/custom_nodes/ComfyUI-Serving-Toolkit/utils.py @@ -0,0 +1,40 @@ +def parse_command_string(command_string, command_name): + textAndArgs = command_string[1+ len(command_name):].strip().split('--') + result = {} + text = textAndArgs[0].strip() + args = textAndArgs[1:] + print(args) + # The first element is the "freeText" part, remove any leading or trailing whitespace. + result["prompt"] = text.strip() + + for arg in args: + parts = arg.split() + if len(parts) > 1: + # Extract the argument name and value + arg_name = parts[0].strip() + arg_value = ' '.join(parts[1:]).strip() + result[arg_name] = arg_value + + + return result + +import io +from PIL import Image, ImageSequence +import numpy as np +def tensorToImageConversion(images, duration): + # Create a list to store each image as a frame + frames = [] + + for img_tensor in images: + i = 255. * img_tensor.cpu().numpy() + img_pil = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + frames.append(img_pil) + + # Create a GIF from the list of frames + img_byte_array = io.BytesIO() + frames[0].save(img_byte_array, save_all=True, append_images=frames[1:], format='WEBP', duration=duration, loop=0, quality=100, lossless=True) + + img_byte_array.seek(0) + return img_byte_array + + diff --git a/custom_nodes/ComfyUI-WD14-Tagger/.gitignore b/custom_nodes/ComfyUI-WD14-Tagger/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..10a149b2025c679b581013f18ed39fb03778247a --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/.gitignore @@ -0,0 +1,2 @@ +models +__pycache__ \ No newline at end of file diff --git a/custom_nodes/ComfyUI-WD14-Tagger/README.md b/custom_nodes/ComfyUI-WD14-Tagger/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e153e43460219372937499da7a1c5a19daf31d7d --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/README.md @@ -0,0 +1,49 @@ +# ComfyUI WD 1.4 Tagger + +A [ComfyUI](https://github.com/comfyanonymous/ComfyUI) extension allowing the interrogation of booru tags from images. + +Based on [SmilingWolf/wd-v1-4-tags](https://huggingface.co/spaces/SmilingWolf/wd-v1-4-tags) and [toriato/stable-diffusion-webui-wd14-tagger](https://github.com/toriato/stable-diffusion-webui-wd14-tagger) +All models created by [SmilingWolf](https://huggingface.co/SmilingWolf) + +## Installation +1. `git clone https://github.com/pythongosssss/ComfyUI-WD14-Tagger` into the `custom_nodes` folder + - e.g. `custom_nodes\ComfyUI-WD14-Tagger` +2. Open a Command Prompt/Terminal/etc +3. Change to the `custom_nodes\ComfyUI-WD14-Tagger` folder you just created + - e.g. `cd C:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-WD14-Tagger` or wherever you have it installed +4. Install python packages + - **Windows Standalone installation** (embedded python): + `../../../python_embeded/python.exe -s -m pip install -r requirements.txt` + - **Manual/non-Windows installation** + `pip install -r requirement.txt` + +## Usage +Add the node via `image` -> `WD14Tagger|pysssss` +![image](https://github.com/pythongosssss/ComfyUI-WD14-Tagger/assets/125205205/ee6756ae-73f6-4e9f-a3da-eb87a056eb87) +Models are automatically downloaded at runtime if missing. +![image](https://github.com/pythongosssss/ComfyUI-WD14-Tagger/assets/125205205/cc09ae71-1a38-44da-afec-90f470a4b47d) +Supports tagging and outputting multiple batched inputs. +- **model**: The interrogation model to use. You can try them out here [WaifuDiffusion v1.4 Tags](https://huggingface.co/spaces/SmilingWolf/wd-v1-4-tags). The newest model (as of writing) is `MOAT` and the most popular is `ConvNextV2`. +- **threshold**: The score for the tag to be considered valid +- **character_threshold**: The score for the character tag to be considered valid +- **exclude_tags** A comma separated list of tags that should not be included in the results + +Quick interrogation of images is also available on any node that is displaying an image, e.g. a `LoadImage`, `SaveImage`, `PreviewImage` node. +Simply right click on the node (or if displaying multiple images, on the image you want to interrogate) and select `WD14 Tagger` from the menu +![image](https://github.com/pythongosssss/ComfyUI-WD14-Tagger/assets/125205205/11733899-6163-49f6-a22b-8dd86d910de6) + +Settings used for this are in the `settings` section of `pysssss.json`. + +### Offline Use +Simplest way is to use it online, interrogate an image, and the model will be downloaded and cached, however if you want to manually download the models: +- Create a `models` folder (in same folder as the `wd14tagger.py`) +- Use URLs for models from the list in `pysssss.json` +- Download `model.onnx` and name it with the model name e.g. `wd-v1-4-convnext-tagger-v2.onnx` +- Download `selected_tags.csv` and name it with the model name e.g. `wd-v1-4-convnext-tagger-v2.csv` + +## Requirements +`onnxruntime` (recommended, interrogation is still fast on CPU, included in requirements.txt) +or `onnxruntime-gpu` (allows use of GPU, many people have issues with this, if you try I can't provide support for this) + +## Changelog +- 2023-05-14 - Moved to own repo, add downloading models, support multiple inputs diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__init__.py b/custom_nodes/ComfyUI-WD14-Tagger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efe56a97a58e1e8841fc6a3eb2a71a872284c6ce --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/__init__.py @@ -0,0 +1,5 @@ +from .pysssss import init + +if init(check_imports=["onnxruntime"]): + from .wd14tagger import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0fa5f2f24a4ef860477b9d536ebfb20e6c8e23d Binary files /dev/null and b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9ce5a736466ab79b6298e5cff897fa44c1201ae Binary files /dev/null and b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/pysssss.cpython-310.pyc b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/pysssss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5da29ec6e9dd77d501db350339d0deb82b7b8bc7 Binary files /dev/null and b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/pysssss.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/pysssss.cpython-311.pyc b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/pysssss.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d37188d6c4d06892026bf891ff5d45399a24992 Binary files /dev/null and b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/pysssss.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/wd14tagger.cpython-310.pyc b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/wd14tagger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb220da57de6f661ae45e50b743242ee26325403 Binary files /dev/null and b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/wd14tagger.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/wd14tagger.cpython-311.pyc b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/wd14tagger.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfcd2543a999f3e298976b089d362dd52d828cd5 Binary files /dev/null and b/custom_nodes/ComfyUI-WD14-Tagger/__pycache__/wd14tagger.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-WD14-Tagger/js/wd14tagger.js b/custom_nodes/ComfyUI-WD14-Tagger/js/wd14tagger.js new file mode 100644 index 0000000000000000000000000000000000000000..346951b2138da8306cda71b5efb5570c07c3d117 --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/js/wd14tagger.js @@ -0,0 +1,141 @@ +import { app } from "/scripts/app.js"; +import { ComfyWidgets } from "/scripts/widgets.js"; +import { api } from "/scripts/api.js"; + +class Pysssss { + constructor() { + if (!window.__pysssss__) { + window.__pysssss__ = Symbol("__pysssss__"); + } + this.symbol = window.__pysssss__; + } + + getState(node) { + return node[this.symbol] || {}; + } + + setState(node, state) { + node[this.symbol] = state; + app.canvas.setDirty(true); + } + + addStatusTagHandler(nodeType) { + if (nodeType[this.symbol]?.statusTagHandler) { + return; + } + if (!nodeType[this.symbol]) { + nodeType[this.symbol] = {}; + } + nodeType[this.symbol] = { + statusTagHandler: true, + }; + + api.addEventListener("pysssss/update_status", ({ detail }) => { + let { node, progress, text } = detail; + const n = app.graph.getNodeById(+(node || app.runningNodeId)); + if (!n) return; + const state = this.getState(n); + state.status = Object.assign(state.status || {}, { progress: text ? progress : null, text: text || null }); + this.setState(n, state); + }); + + const self = this; + const onDrawForeground = nodeType.prototype.onDrawForeground; + nodeType.prototype.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + const state = self.getState(this); + if (!state?.status?.text) { + return r; + } + + const { fgColor, bgColor, text, progress, progressColor } = { ...state.status }; + + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(text); + ctx.fillStyle = bgColor || "dodgerblue"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + if (progress) { + ctx.fillStyle = progressColor || "green"; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, (sz.width + 12) * progress, 20, 5); + ctx.fill(); + } + + ctx.fillStyle = fgColor || "#fff"; + ctx.fillText(text, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + return r; + }; + } +} + +const pysssss = new Pysssss(); + +app.registerExtension({ + name: "pysssss.Wd14Tagger", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + pysssss.addStatusTagHandler(nodeType); + + if (nodeData.name === "WD14Tagger|pysssss") { + const onExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function (message) { + const r = onExecuted?.apply?.(this, arguments); + + const pos = this.widgets.findIndex((w) => w.name === "tags"); + if (pos !== -1) { + for (let i = pos; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = pos; + } + + for (const list of message.tags) { + const w = ComfyWidgets["STRING"](this, "tags", ["STRING", { multiline: true }], app).widget; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.6; + w.value = list; + } + + this.onResize?.(this.size); + + return r; + }; + } else { + const getExtraMenuOptions = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function (_, options) { + const r = getExtraMenuOptions?.apply?.(this, arguments); + let img; + if (this.imageIndex != null) { + // An image is selected so select that + img = this.imgs[this.imageIndex]; + } else if (this.overIndex != null) { + // No image is selected but one is hovered + img = this.imgs[this.overIndex]; + } + if (img) { + let pos = options.findIndex((o) => o.content === "Save Image"); + if (pos === -1) { + pos = 0; + } else { + pos++; + } + options.splice(pos, 0, { + content: "WD14 Tagger", + callback: async () => { + let src = img.src; + src = src.replace("/view?", `/pysssss/wd14tagger/tag?node=${this.id}&clientId=${api.clientId}&`); + const res = await (await fetch(src)).json(); + alert(res); + }, + }); + } + + return r; + }; + } + }, +}); diff --git a/custom_nodes/ComfyUI-WD14-Tagger/pysssss.json b/custom_nodes/ComfyUI-WD14-Tagger/pysssss.json new file mode 100644 index 0000000000000000000000000000000000000000..e785dc3707c0504d680369d7a24dcfe7653df966 --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/pysssss.json @@ -0,0 +1,19 @@ +{ + "name": "WD14Tagger", + "logging": false, + "settings": { + "model": "wd-v1-4-moat-tagger-v2", + "threshold": 0.35, + "character_threshold": 0.85, + "exclude_tags": "" + }, + "models": [ + { + "wd-v1-4-moat-tagger-v2": "https://huggingface.co/SmilingWolf/wd-v1-4-moat-tagger-v2", + "wd-v1-4-convnextv2-tagger-v2": "https://huggingface.co/SmilingWolf/wd-v1-4-convnextv2-tagger-v2", + "wd-v1-4-convnext-tagger-v2": "https://huggingface.co/SmilingWolf/wd-v1-4-convnext-tagger-v2", + "wd-v1-4-convnext-tagger": "https://huggingface.co/SmilingWolf/wd-v1-4-convnext-tagger", + "wd-v1-4-vit-tagger-v2": "https://huggingface.co/SmilingWolf/wd-v1-4-vit-tagger-v2" + } + ] +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-WD14-Tagger/pysssss.py b/custom_nodes/ComfyUI-WD14-Tagger/pysssss.py new file mode 100644 index 0000000000000000000000000000000000000000..5886a74aa550d0e273580f77102c813a2073064d --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/pysssss.py @@ -0,0 +1,202 @@ +import asyncio +import os +import json +import shutil +import inspect +import aiohttp +from server import PromptServer +from tqdm import tqdm + +config = None + + +def is_logging_enabled(): + config = get_extension_config() + if "logging" not in config: + return False + return config["logging"] + + +def log(message, type=None, always=False): + if not always and not is_logging_enabled(): + return + + if type is not None: + message = f"[{type}] {message}" + + name = get_extension_config()["name"] + + print(f"(pysssss:{name}) {message}") + + +def get_ext_dir(subpath=None, mkdir=False): + dir = os.path.dirname(__file__) + if subpath is not None: + dir = os.path.join(dir, subpath) + + dir = os.path.abspath(dir) + + if mkdir and not os.path.exists(dir): + os.makedirs(dir) + return dir + + +def get_comfy_dir(subpath=None): + dir = os.path.dirname(inspect.getfile(PromptServer)) + if subpath is not None: + dir = os.path.join(dir, subpath) + + dir = os.path.abspath(dir) + + return dir + + +def get_web_ext_dir(): + config = get_extension_config() + name = config["name"] + dir = get_comfy_dir("web/extensions/pysssss") + if not os.path.exists(dir): + os.makedirs(dir) + dir += "/" + name + return dir + + +def get_extension_config(reload=False): + global config + if reload == False and config is not None: + return config + + config_path = get_ext_dir("pysssss.json") + if not os.path.exists(config_path): + log("Missing pysssss.json, this extension may not work correctly. Please reinstall the extension.", + type="ERROR", always=True) + print(f"Extension path: {get_ext_dir()}") + return {"name": "Unknown", "version": -1} + with open(config_path, "r") as f: + config = json.loads(f.read()) + return config + + +def link_js(src, dst): + try: + os.symlink(src, dst) + return True + except: + return False + + +def install_js(): + src_dir = get_ext_dir("js") + if not os.path.exists(src_dir): + log("No JS") + return + + dst_dir = get_web_ext_dir() + + if os.path.exists(dst_dir): + if os.path.islink(dst_dir): + log("JS already linked") + return + elif link_js(src_dir, dst_dir): + log("JS linked") + return + + log("Copying JS files") + shutil.copytree(src_dir, dst_dir, dirs_exist_ok=True) + + +def init(check_imports): + log("Init") + + if check_imports is not None: + import importlib.util + for imp in check_imports: + spec = importlib.util.find_spec(imp) + if spec is None: + log(f"{imp} is required, please check requirements are installed.", type="ERROR", always=True) + return False + + install_js() + return True + + +async def download_to_file(url, destination, update_callback, is_ext_subpath=True, session=None): + close_session = False + if session is None: + close_session = True + loop = None + try: + loop = asyncio.get_event_loop() + except: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + session = aiohttp.ClientSession(loop=loop) + if is_ext_subpath: + destination = get_ext_dir(destination) + try: + async with session.get(url) as response: + size = int(response.headers.get('content-length', 0)) or None + + with tqdm( + unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1], total=size, + ) as progressbar: + with open(destination, mode='wb') as f: + perc = 0 + async for chunk in response.content.iter_chunked(2048): + f.write(chunk) + progressbar.update(len(chunk)) + if update_callback is not None and progressbar.total is not None and progressbar.total != 0: + last = perc + perc = round(progressbar.n / progressbar.total, 2) + if perc != last: + last = perc + await update_callback(perc) + finally: + if close_session and session is not None: + await session.close() + + +def wait_for_async(async_fn, loop=None): + res = [] + + async def run_async(): + r = await async_fn() + res.append(r) + + if loop is None: + try: + loop = asyncio.get_event_loop() + except: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + loop.run_until_complete(run_async()) + + return res[0] + +def update_node_status(client_id, node, text, progress=None): + if client_id is None: + client_id = PromptServer.instance.client_id + + if client_id is None: + return + + PromptServer.instance.send_sync("pysssss/update_status", { + "node": node, + "progress": progress, + "text": text + }, client_id) + +async def update_node_status_async(client_id, node, text, progress=None): + if client_id is None: + client_id = PromptServer.instance.client_id + + if client_id is None: + return + + await PromptServer.instance.send("pysssss/update_status", { + "node": node, + "progress": progress, + "text": text + }, client_id) diff --git a/custom_nodes/ComfyUI-WD14-Tagger/requirements.txt b/custom_nodes/ComfyUI-WD14-Tagger/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..51decf87adad3e5697ac7a7e325a119339eab6b9 --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/requirements.txt @@ -0,0 +1 @@ +onnxruntime \ No newline at end of file diff --git a/custom_nodes/ComfyUI-WD14-Tagger/wd14tagger.py b/custom_nodes/ComfyUI-WD14-Tagger/wd14tagger.py new file mode 100644 index 0000000000000000000000000000000000000000..045b6f832ca75d81a0d6ddf686b8683e048ce3ab --- /dev/null +++ b/custom_nodes/ComfyUI-WD14-Tagger/wd14tagger.py @@ -0,0 +1,179 @@ +# https://huggingface.co/spaces/SmilingWolf/wd-v1-4-tags + +import comfy.utils +import asyncio +import aiohttp +import numpy as np +import csv +import os +import sys +import onnxruntime as ort +from onnxruntime import InferenceSession +from PIL import Image +from server import PromptServer +from aiohttp import web +from .pysssss import get_ext_dir, get_comfy_dir, download_to_file, update_node_status, wait_for_async, get_extension_config +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) + + +config = get_extension_config() + +defaults = { + "model": "wd-v1-4-moat-tagger-v2", + "threshold": 0.35, + "character_threshold": 0.85, + "exclude_tags": "" +} +defaults.update(config.get("settings", {})) + +models_dir = get_ext_dir("models", mkdir=True) +all_models = ("wd-v1-4-moat-tagger-v2", + "wd-v1-4-convnext-tagger-v2", "wd-v1-4-convnext-tagger", + "wd-v1-4-convnextv2-tagger-v2", "wd-v1-4-vit-tagger-v2") + + +def get_installed_models(): + return filter(lambda x: x.endswith(".onnx"), os.listdir(models_dir)) + + +async def tag(image, model_name, threshold=0.35, character_threshold=0.85, exclude_tags="", client_id=None, node=None): + if model_name.endswith(".onnx"): + model_name = model_name[0:-5] + installed = list(get_installed_models()) + if not any(model_name + ".onnx" in s for s in installed): + await download_model(model_name, client_id, node) + + name = os.path.join(models_dir, model_name + ".onnx") + model = InferenceSession(name, providers=ort.get_available_providers()) + + input = model.get_inputs()[0] + height = input.shape[1] + + # Reduce to max size and pad with white + ratio = float(height)/max(image.size) + new_size = tuple([int(x*ratio) for x in image.size]) + image = image.resize(new_size, Image.LANCZOS) + square = Image.new("RGB", (height, height), (255, 255, 255)) + square.paste(image, ((height-new_size[0])//2, (height-new_size[1])//2)) + + image = np.array(square).astype(np.float32) + image = image[:, :, ::-1] # RGB -> BGR + image = np.expand_dims(image, 0) + + # Read all tags from csv and locate start of each category + tags = [] + general_index = None + character_index = None + with open(os.path.join(models_dir, model_name + ".csv")) as f: + reader = csv.reader(f) + next(reader) + for row in reader: + if general_index is None and row[2] == "0": + general_index = reader.line_num - 2 + elif character_index is None and row[2] == "4": + character_index = reader.line_num - 2 + tags.append(row[1]) + + label_name = model.get_outputs()[0].name + probs = model.run([label_name], {input.name: image})[0] + + result = list(zip(tags, probs[0])) + + # rating = max(result[:general_index], key=lambda x: x[1]) + general = [item for item in result[general_index:character_index] if item[1] > threshold] + character = [item for item in result[character_index:] if item[1] > character_threshold] + + all = character + general + remove = [s.strip() for s in exclude_tags.lower().split(",")] + all = [tag for tag in all if tag[0] not in remove] + + res = ", ".join((item[0].replace("(", "\\(").replace(")", "\\)") for item in all)) + + print(res) + return res + + +async def download_model(model, client_id, node): + url = f"https://huggingface.co/SmilingWolf/{model}/resolve/main/" + async with aiohttp.ClientSession(loop=asyncio.get_event_loop()) as session: + async def update_callback(perc): + nonlocal client_id + message = "" + if perc < 100: + message = f"Downloading {model}" + update_node_status(client_id, node, message, perc) + + await download_to_file( + f"{url}model.onnx", os.path.join("models",f"{model}.onnx"), update_callback, session=session) + await download_to_file( + f"{url}selected_tags.csv", os.path.join("models",f"{model}.csv"), update_callback, session=session) + + update_node_status(client_id, node, None) + + return web.Response(status=200) + + +@PromptServer.instance.routes.get("/pysssss/wd14tagger/tag") +async def get_tags(request): + if "filename" not in request.rel_url.query: + return web.Response(status=404) + + type = request.query.get("type", "output") + if type not in ["output", "input", "temp"]: + return web.Response(status=400) + + target_dir = get_comfy_dir(type) + image_path = os.path.abspath(os.path.join( + target_dir, request.query.get("subfolder", ""), request.query["filename"])) + c = os.path.commonpath((image_path, target_dir)) + if os.path.commonpath((image_path, target_dir)) != target_dir: + return web.Response(status=403) + + if not os.path.isfile(image_path): + return web.Response(status=404) + + image = Image.open(image_path) + + models = get_installed_models() + model = next(models, defaults["model"]) + + return web.json_response(await tag(image, model, client_id=request.rel_url.query.get("clientId", ""), node=request.rel_url.query.get("node", ""))) + + +class WD14Tagger: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "model": (all_models, ), + "threshold": ("FLOAT", {"default": defaults["threshold"], "min": 0.0, "max": 1, "step": 0.05}), + "character_threshold": ("FLOAT", {"default": defaults["character_threshold"], "min": 0.0, "max": 1, "step": 0.05}), + "exclude_tags": ("STRING", {"default": defaults["exclude_tags"]}), + }} + + RETURN_TYPES = ("STRING",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "tag" + OUTPUT_NODE = True + + CATEGORY = "image" + + def tag(self, image, model, threshold, character_threshold, exclude_tags=""): + tensor = image*255 + tensor = np.array(tensor, dtype=np.uint8) + + pbar = comfy.utils.ProgressBar(tensor.shape[0]) + tags = [] + for i in range(tensor.shape[0]): + image = Image.fromarray(tensor[i]) + tags.append(wait_for_async(lambda: tag(image, model, threshold, character_threshold, exclude_tags))) + pbar.update(1) + return {"ui": {"tags": tags}, "result": (tags,)} + + +NODE_CLASS_MAPPINGS = { + "WD14Tagger|pysssss": WD14Tagger, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "WD14Tagger|pysssss": "WD14 Tagger 🐍", +} diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/.gitignore b/custom_nodes/ComfyUI-sampler-lcm-alternative/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..68bc17f9ff2104a9d7b6777058bb4c343ca72609 --- /dev/null +++ b/custom_nodes/ComfyUI-sampler-lcm-alternative/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/LICENSE b/custom_nodes/ComfyUI-sampler-lcm-alternative/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI-sampler-lcm-alternative/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/README.md b/custom_nodes/ComfyUI-sampler-lcm-alternative/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ffeba323f218e44d41d8afc936f351bd34654948 --- /dev/null +++ b/custom_nodes/ComfyUI-sampler-lcm-alternative/README.md @@ -0,0 +1,23 @@ +# ComfyUI-sampler-lcm-alternative +ComfyUI Custom Sampler nodes that add a new improved LCM sampler functions + +This custom node repository adds three new nodes for ComfyUI to the Custom Sampler category. SamplerLCMAlternative, SamplerLCMCycle and LCMScheduler (just to save a few clicks, as you could also use the BasicScheduler and choose smg_uniform). +Just clone it into your custom_nodes folder and you can start using it as soon as you restart ComfyUI. + +SamplerLCMAlternative has two extra parameters. +- `euler_steps`, which tells the sampler to use Euler sampling for the first n steps (or skip euler only for last n steps if n is negative). +- `ancestral`, If you give this a value above 0.0, the Euler steps get some fresh randomness injected each step. The value controls how much. + +With default parameters, this sampler acts exactly like the original LCM sampler from ComfyUI. When you start tuning, I recommend starting by setting `euler_steps` to half of the total step count this sampler will be handling. going higher will increase details/sharpness and lower will decrease both. + +SamplerLCMCycle has three extra parameters. This sampler repeats a cycle of Euler and LCM sampling steps until inference is done. +If you're doing txt2img with LCM and feel like LCM is giving boring or artificial looking images, give this sampler a try. +- `euler_steps`, sets the number of euler steps per cycle +- `lcm_steps`, sets the number of lcm steps per cycle +- `ancestral`, same as with SamplerLCMAlternative + +The default settings should work fine. I recommend using at least 6 steps to allow for 2 full cycles, that said, this sampler seems to really benefit from extra steps. + +Here's an example workflow for how to use SamplerLCMCycle: +![SampleLCMCycle example](SamplerLCMCycle-example.png) + diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/SamplerLCMCycle-example.png b/custom_nodes/ComfyUI-sampler-lcm-alternative/SamplerLCMCycle-example.png new file mode 100644 index 0000000000000000000000000000000000000000..4523b3f9ac70b5618ec8137886232f9a5e03a553 --- /dev/null +++ b/custom_nodes/ComfyUI-sampler-lcm-alternative/SamplerLCMCycle-example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6956e88128295cdb8ecf440e4558171f88beb796961f4bd9eadbf24a1996aea6 +size 1794813 diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/__init__.py b/custom_nodes/ComfyUI-sampler-lcm-alternative/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a23196efd7da5a58f92ef2e3919cbe02ece52190 --- /dev/null +++ b/custom_nodes/ComfyUI-sampler-lcm-alternative/__init__.py @@ -0,0 +1,3 @@ +from .sampler_lcm_alt import NODE_CLASS_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS'] diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI-sampler-lcm-alternative/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4160c78fc7ba7d635f19ec665df4b78d2834de8 Binary files /dev/null and b/custom_nodes/ComfyUI-sampler-lcm-alternative/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/__pycache__/sampler_lcm_alt.cpython-311.pyc b/custom_nodes/ComfyUI-sampler-lcm-alternative/__pycache__/sampler_lcm_alt.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f7056c0b885bf58cc5146f7bd6f1eb3a731c3d4 Binary files /dev/null and b/custom_nodes/ComfyUI-sampler-lcm-alternative/__pycache__/sampler_lcm_alt.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI-sampler-lcm-alternative/sampler_lcm_alt.py b/custom_nodes/ComfyUI-sampler-lcm-alternative/sampler_lcm_alt.py new file mode 100644 index 0000000000000000000000000000000000000000..b9221189e3b27d016a3e11f8bad6d1d56ab84dde --- /dev/null +++ b/custom_nodes/ComfyUI-sampler-lcm-alternative/sampler_lcm_alt.py @@ -0,0 +1,120 @@ +import comfy.samplers +from comfy.k_diffusion.sampling import default_noise_sampler +from tqdm.auto import trange, tqdm +from itertools import product +import torch + +@torch.no_grad() +def sample_lcm_alt(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, euler_steps=0, ancestral=0.0, noise_mult = 1.0): + extra_args = {} if extra_args is None else extra_args + noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + steps = len(sigmas)-1 + euler_limit = euler_steps%steps + loop_control = [True] * euler_limit + [False] * (steps - euler_limit) + return sample_lcm_backbone(model, x, sigmas, extra_args, callback, disable, noise_sampler, loop_control, ancestral, noise_mult) + +@torch.no_grad() +def sample_lcm_cycle(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, euler_steps = 1, lcm_steps = 1, tweak_sigmas = False, ancestral=0.0): + extra_args = {} if extra_args is None else extra_args + noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + steps = len(sigmas) - 2 + cycle_length = euler_steps + lcm_steps + repeats = steps // (cycle_length) + leftover = steps % (cycle_length) + cycle = [True] * euler_steps + [False] * lcm_steps + loop_control = cycle * repeats + cycle[-leftover:] #+ [False] + if tweak_sigmas: + index_map = torch.tensor([i + j * repeats for i,j in product(range(repeats),range(cycle_length))] + + list(range(cycle_length*repeats,len(sigmas)))).to(sigmas.device) + sigmas = torch.index_select(sigmas, 0, index_map) + return sample_lcm_backbone(model, x, sigmas, extra_args, callback, disable, noise_sampler, loop_control, ancestral) + +@torch.no_grad() +def sample_lcm_backbone(model, x, sigmas, extra_args, callback, disable, noise_sampler, loop_control, ancestral, noise_mult = 1.0): + s_in = x.new_ones([x.shape[0]]) + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + + if sigmas[i + 1] > 0: + if loop_control[i]: + if ancestral < 1.0: + removed_noise = (x - denoised) / sigmas[i] + if ancestral > 0.0: + noise = noise_sampler(sigmas[i], sigmas[i + 1]) + if ancestral < 1.0: + noise = (ancestral**0.5) * noise + ((1.0 - ancestral)**0.5) * removed_noise + elif ancestral == 0.0: + noise = removed_noise*noise_mult + else: + noise = noise_sampler(sigmas[i], sigmas[i + 1]) + else: + noise = None + x = denoised + if noise is not None: + x += sigmas[i + 1] * noise + return x + +class LCMScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 8, "min": 1, "max": 10000}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "sampling/custom_sampling/schedulers" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, model, steps): + sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, "sgm_uniform", steps).cpu() + return (sigmas, ) + +class SamplerLCMAlternative: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"euler_steps": ("INT", {"default": 0, "min": -10000, "max": 10000}), + "ancestral": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01, "round": True}), + "noise_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.001, "round": True}), + } + } + RETURN_TYPES = ("SAMPLER",) + CATEGORY = "sampling/custom_sampling/samplers" + + FUNCTION = "get_sampler" + + def get_sampler(self, euler_steps, ancestral, noise_mult): + sampler = comfy.samplers.KSAMPLER(sample_lcm_alt, extra_options={"euler_steps": euler_steps, "noise_mult": noise_mult, "ancestral": ancestral}) + return (sampler, ) + +class SamplerLCMCycle: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"euler_steps": ("INT", {"default": 1, "min": 1, "max": 50}), + "lcm_steps": ("INT", {"default": 2, "min": 1, "max": 50}), + "tweak_sigmas": ("BOOLEAN", {"default": False}), + "ancestral": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01, "round": False}), + } + } + RETURN_TYPES = ("SAMPLER",) + CATEGORY = "sampling/custom_sampling/samplers" + + FUNCTION = "get_sampler" + + def get_sampler(self, euler_steps, lcm_steps, tweak_sigmas, ancestral): + sampler = comfy.samplers.KSAMPLER(sample_lcm_cycle, extra_options={"euler_steps": euler_steps, "lcm_steps": lcm_steps, "tweak_sigmas": tweak_sigmas, "ancestral": ancestral}) + return (sampler, ) + + +NODE_CLASS_MAPPINGS = { + "LCMScheduler": LCMScheduler, + "SamplerLCMAlternative": SamplerLCMAlternative, + "SamplerLCMCycle": SamplerLCMCycle, +} diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/Animation_Nodes.md b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/Animation_Nodes.md new file mode 100644 index 0000000000000000000000000000000000000000..aa1495fba22d6decd42ae0829ce6c609e921950a --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/Animation_Nodes.md @@ -0,0 +1,203 @@ +# CR Animation Nodes +A comprehensive suite of nodes to enhance your animations + +These nodes include some features similar to Deforum, and also some new ideas. + +If you would like to contribute to this project with suggestions or feedback then please DM me on the AI Revolution discord, or add issues or feature requests here on Github. + +## CivitAI Post +[ComfyUI - CR Animation Nodes](https://civitai.com/models/137333/comfyui-cr-animation-nodes) + +## Example Videos +These YouTube short videos were made by [AI Music Experiment](https://www.youtube.com/channel/UCypaKOXWzzTxDvr3jWlfD6g) and made use of early versions of the nodes. + +[Cat Morph](https://www.youtube.com/shorts/kiSO-8i4RZ4) + +[Wolf Girl](https://www.youtube.com/shorts/bDWL5GIbmvs) + +## Demo Workflows +Demo workflows are available on CivitAI. These are designed to demonstrate how the nodes function. They are not full animation workflows. Full template workflows will be published when the project nears completion. + +[ComfyUI - CR Animation Nodes - Demo Workflows](https://civitai.com/models/138947/comfyui-cr-animation-nodes-demo-workflows) + +![Cyclers_04](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/0d704325-7055-40ae-9a45-00a9a2db86e7) + +## Recommended Downloads +The following node packs are recommended for building workflows using these nodes: + +[Comfyroll Custom Nodes](https://civitai.com/models/87609/comfyroll-custom-nodes-for-comfyui) + +[Fizz Nodes](https://github.com/FizzleDorf/ComfyUI_FizzNodes) + +- This is needed for the Prompt Scheduler + +[MTB](https://github.com/melMass/comfy_mtb) + +- This is needed for the Animation Builder and several other nodes + +## Compatibility +These nodes are designed to work with both Fizz Nodes and MTB Nodes. We are also looking at testing with Loopchain. + +# List of Nodes +These are the first 20 nodes. There are 35 nodes currently in development. It is planned to release these in multiple drops during September. + +Feedback on the new nodes is welcomed. + +## Scheduling Nodes + +CR Schedule Prompts SD1.5 + +CR Schedule Prompts SDXL + +CR Simple Value Scheduler + +CR Simple Text Scheduler + +CR Simple Scheduler + +CR Central Scheduling Table + +CR Value Scheduler + +CR Text Scheduler + +CR Load Scheduled Models + +## Prompt Keyframe Nodes +CR Simple Prompt List + +CR Simple Prompt List Keyframes + +CR Prompt List + +CR Prompt List Keyframes + +CR Keyframe List + +CR Promp Text + +## List Nodes +CR Model List + +CR LoRA List + +CR Text List + +CR Text List Simple + +CR Image List + +CR Image List Simple + +## Gradient and Increment Nodes +CR Gradient Float + +CR Gradient Integer + +CR Increment Float + +CR Increment Integer + +## Cycler Nodes +CR Cycle Models + +CR Cycle LoRAs + +CR Cycle Text + +CR Cycle Text Simple + +CR Cycle Images + +CR Cycle Images Simple + +## Index Nodes +CR Index Reset + +CR Index increment + +CR Index Multiply + +## Latent Nodes +CR Interpolate Latents + +## IO Nodes +CR Load Animation Frames + +CR Output Schedule To File + +CR Load Schedule From File + +CR Load Flow Frames + +CR Output Flow Frames + +## Utlity Nodes +CR Debatch Frames + +CR Text List To String + +CR Current Frame + +# Overview of New Nodes +Please see this WIP CivitAI article for a guide to the new nodes + +[CR Animation Nodes Guide](https://civitai.com/articles/2001/comfyui-guide-to-cr-animation-nodes) + +# Troubleshooting +A troubleshooting article will be added soon + +# Node Images +**Model Scheduling** + +![scheduling table](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/4dade499-6311-4282-8632-6ee691a3167e) + +![simple schedule](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/b73ba216-b3c1-42f1-b506-361645d9889a) + +![scheduled models loras2](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/fe70bb27-f1c5-4352-8f5f-e0ba8bb5abe4) + +**Prompt Keyframe Nodes** + +![prompt list](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/e40a907e-268a-4b73-9c78-f2f1a15dc37d) + +![animation stack](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/eba4ff74-0223-4021-aed7-c8da63ece300) + +![Keyframe List](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/89b48df3-3c28-436e-84b2-e45952f8f873) + +**Cycler Nodes** + +![Cyclers_05](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/961629d8-231b-4153-92fc-81b05b563d6b) + +**Text Cycler Nodes** + +![text cyclers](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/4ba9f44d-3b67-4297-922a-38547289ffc8) + +**Image Cycler Nodes** + +![image cycler nodes](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/3bdfc5be-ab40-45a0-b3f0-8d7bad207c18) + +**Interpolation Nodes** + +![Interpolations_02](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/6d97ab56-d393-42ce-a5c3-280b3572ceea) + +**IO Nodes** + +![load anim frames](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/082a657e-26da-4d86-ac6a-e48f80ded288) + +![IO](https://github.com/RockOfFire/CR_Animation_Nodes/assets/42118269/5a84364e-4df8-4888-85aa-e145046175e4) + +# Credits + +WASasquatch https://github.com/WASasquatch/was-node-suite-comfyui + +melMass https://github.com/melMass/comfy_mtb + +FizzleDorf https://github.com/FizzleDorf/ComfyUI_FizzNodes + +SeargeDP https://github.com/SeargeDP/SeargeSDXL + +ltdrdata https://github.com/ltdrdata/ComfyUI-Impact-Pack + +LucianoCirino https://github.com/LucianoCirino/efficiency-nodes-comfyui + +sylym https://github.com/sylym/comfy_vid2vid diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/Patch_Notes.md b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/Patch_Notes.md new file mode 100644 index 0000000000000000000000000000000000000000..313659bdb052d6a5e7f2e5313fb7e6b363a40672 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/Patch_Notes.md @@ -0,0 +1,151 @@ +# 🧩 Comfyroll Custom Nodes - Patch Notes + +## PR59 Dec 6, 2023 + +__Changed Nodes__ + + CR Aspect Ratio + + - added prescale_factor + +## PR55 Dec 2, 2023 + +__Other Changes__ + + - removed node images + - fixes to CR Page Layout + +## PR54 Dec 2, 2023 + +__Other Changes__ + + - added show-help outputs on animation nodes with links to wiki + +## PR51, PR52 Dec 2, 2023 + +__Added Nodes__ + + CR Random RGB + +## PR50 Dec 1, 2023 + +__Other Changes__ + + - added show-help outputs with links to wiki + +## PR48 Nov 30, 2023 + +__Other Changes__ + + - disabled CR Load Prompt Style + - rename classes on logic nodes + - increased max sizes on Aspect Ratio nodes + +## PR45 Nov 29, 2023 + +__Added Nodes__ + + CR Random Hex Color + +__Changed Nodes__ + + CR Color Tint + + - added custom color + + CR Simple Text Panel + + - added outline text + +__Other Changes__ + + - added demo workflows + +## PR44 Nov 28, 2023 + +__Changed Nodes__ + + CR Select Model + + - added ckpt_name output + +__Other Changes__ + + - added new Patch Notes page + +## PR40 Nov 27, 2023 + +__Added Nodes__ + + CR Select Model + + - allows selection of model from one of 5 preset models + +__Changed Nodes__ + + CR Simple Text Watermark + + - added batch support + - added custom font hex color + + CR Aspect Ratio + + - changed descriptions in aspect_ratios for issue 24 + + CR Upscale Image + + - fixed issue with batched images + +__Other Changes__ + + - changed preset RGB for brown to 160, 85, 15 + + +## PR39 Nov 26, 2023 + +__Changed Nodes__ + + CR Halftone Filter + + - changed handling for RGBA inputs + + +## PR38 Nov 26, 2023 + + __Added Nodes__ + + CR Aspect Ratio + + - combines aspect ratio options for both SD1.5 and SDXL + - includes empty_latent output + +__Changed Nodes__ + + CR Halftone Filter + + - added resolution options + - modified antialias_scale parameters + + CR SDXL Aspect Ratio + + - added empty_latent output + + CR SD1.5 Aspect Ratio + + - added empty_latent output + + +## PR37 Nov 19, 2023 + +__Added Nodes__ + + CR Simple Text Watermark + + - adds a text watermark to an image + +__Other Changes__ + + - merged CR Animation Nodes into Comfyroll custom Nodes + - added CR Animation Nodes demo workflows + - added reduce_opacity function in graphics_functions + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/README.md b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..81e8332acf66dde414b6d1e693d8a00054a47721 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/README.md @@ -0,0 +1,223 @@ +# 🧩 Comfyroll Custom Nodes for SDXL and SD1.5 + +Co-authored by Suzie1 and RockOfFire + +These nodes can be used in any ComfyUI workflow. + +# Installation + +1. cd custom_nodes +2. git clone https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes.git +3. Restart ComfyUI + +You can also install the nodes using the following methods: +* install using [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) +* download from [CivitAI](https://civitai.com/models/87609/comfyroll-custom-nodes-for-comfyui) + +# Patch Notes + +https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/blob/main/Patch_Notes.md + + +# Wiki + +https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki + +# List of Custom Nodes + +__🔳 Aspect Ratio__ +* CR SDXL Aspect Ratio +* CR SD1.5 Aspect Ratio +* CR Aspect Ratio (new 27/11/2023) + +__🌟 SDXL__ +* CR SDXL Prompt Mix Presets +* CR SDXL Style Text +* CR SDXL Base Prompt Encoder + +__💊 LoRA__ +* CR Load LoRA +* CR LoRA Stack +* CR Apply LoRA Stack + +__🕹️ ControlNet__ +* CR Apply ControlNet +* CR Multi-ControlNet Stack +* CR Apply Multi-ControlNet Stack + +__🔂 Process__ +* CR Img2Img Process Switch +* CR Hires Fix Process Switch +* CR Batch Process Switch + +__👓 Graphics - Filter__ +* CR Color Tint +* CR Halftone Filter + +__🌈 Graphics - Pattern__ +* CR Halftone Grid +* CR Color Bars +* CR Style Bars +* CR Checker Pattern +* CR Polygons +* CR Color Gradient +* CR Radial Gradiant +* CR Starburst Lines +* CR Starburst Colors +* CR Simple Binary Pattern (new 8/12/2023) +* CR Binary Pattern (new 8/12/2023) + +__🔤 Graphics - Text__ +* CR Overlay Text +* CR Draw Text +* CR Mask Text +* CR Composite Text + +__👽 Graphics - Template__ +* CR Simple Meme Template +* CR Simple Banner +* CR Comic Panel Templates + +__🌁 Graphics - Layout__ +* CR Image Panel +* CR Page Layout +* CR Image Grid Panel +* CR Image Border +* CR Color Panel +* CR Simple Text Panel +* CR Overlay Transparent Image + +__✈️ Module__ +* CR Module Pipe Loader +* CR Module Input +* CR Module Output + +__🛩️ Pipe__ +* CR Image Pipe In +* CR Image Pipe Edit +* CR Image Pipe Out +* CR Pipe Switch + +__⛏️ Model Merge__ +* CR Model Stack +* CR Apply Model Merge + +__🔍 Upscale__ +* CR Multi Upscale Stack +* CR Upscale Image +* CR Apply Multi Upscale + +__📉 XY Grid__ +* CR XY List +* CR XY Interpolate +* CR XY Index +* CR XY From Folder +* CR XY Save Grid Image +* CR Image Output + +__🔢 Index__ +* CR Index +* CR Index Increment +* CR Index Multiply +* CR Index Reset +* CR Trigger + +__🔧 Conversion__ +* CR String To Number +* CR String To Combo +* CR Float To String +* CR Float To Integer +* CR Integer To String +* CR Text List To String +* CR Seed to Int + +__🔀 Logic__ +* CR Image Input Switch +* CR Image Input Switch (4 way) +* CR Latent Input Switch +* CR Conditioning Input Switch +* CR Clip Input Switch +* CR Model Input Switch +* CR ControlNet Input Switch +* CR VAE Input Switch +* CR Text Input Switch +* CR Text Input Switch (4 way) +* CR Switch Model and CLIP + +__🎲 Random__ +* CR Random Hex Color +* CR Random RGB +* CR Random Multiline Values (new 8/12/2023) +* CR Random RGB Gradient (new 8/12/2023) + +__📦 Other__ +* CR Latent Batch Size +* CR Prompt Text +* CR Split String +* CR Integer Multiple +* CR Seed +* CR Value +* CR Conditioning Mixer (new 27/11/2023) +* CR Select Model (new 27/11/2023) + +__Deleted Nodes__ +* CR Aspect Ratio SDXL replaced by CR SDXL Aspect Ratio +* CR SDXL Prompt Mixer replaced by CR SDXL Prompt Mix Presets + +# CR Animation Nodes + +CR Animation Nodes are now included in the Comfyroll Custom Nodes pack. + +[Animation Nodes](https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/blob/suzie_dev/Animation_Nodes.md) + +# Multi-ControlNet methodology + +The method used in CR Apply Multi-ControlNet is to chain the conditioning so that the output from the first Controlnet becomes the input to the second. + +For an example of this method see this link: + +https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#mixing-controlnets + +# Multi-ControlNet compatability with Efficiency nodes + +![Custom Nodes](/images/Efficiency_Compability.JPG) + +CR LoRA Stack and CR Multi-ControlNet Stack are both compatible with the Efficient Loader node, in Efficiency nodes by LucianoCirino. + +CR Apply Multi-ControlNet Stack can accept inputs from the Control Net Stacker node in the Efficiency nodes (see diagram in Node Images below). + +# SDXL Prompt Mix Presets + +Preset mappings can be found in this CivitAI article: + +https://civitai.com/articles/1835 + +# Comfyroll Workflow Templates + +The nodes were originally made for use in the Comfyroll Template Workflows. + +[Comfyroll Template Workflows](https://civitai.com/models/59806/comfyroll-template-workflows) + +[Comfyroll Pro Templates](https://civitai.com/models/85619/comfyroll-pro-template) + +[Comfyroll SDXL Workflow Templates](https://civitai.com/models/118005/comfyroll-sdxl-workflow-templates) + +[SDXL Workflow for ComfyUI with Multi-ControlNet](https://civitai.com/models/129858/sdxl-workflow-for-comfyui-with-multi-controlnet) + +[SDXL and SD1.5 Model Merge Templates for ComfyUI](https://civitai.com/models/123125/sdxl-and-sd15-model-merge-templates-for-comfyui) + +# Credits + +comfyanonymous/[ComfyUI](https://github.com/comfyanonymous/ComfyUI) - A powerful and modular stable diffusion GUI. + +WASasquatch/[was-node-suite-comfyui](https://github.com/WASasquatch/was-node-suite-comfyui) - A powerful custom node extensions of ComfyUI. + +TinyTerra/[ComfyUI_tinyterraNodes](https://github.com/TinyTerra/ComfyUI_tinyterraNodes) - A selection of nodes for Stable Diffusion ComfyUI + +hnmr293/[ComfyUI-nodes-hnmr](https://github.com/hnmr293/ComfyUI-nodes-hnmr) - ComfyUI custom nodes - merge, grid (aka xyz-plot) and others + +SeargeDP/[SeargeSDXL](https://github.com/SeargeDP) - ComfyUI custom nodes - Prompt nodes and Conditioning nodes + +LucianoCirino/[efficiency-nodes-comfyui](https://github.com/LucianoCirino/efficiency-nodes-comfyui) - A collection of ComfyUI custom nodes. + +SLAPaper/[ComfyUI-Image-Selector](https://github.com/SLAPaper/ComfyUI-Image-Selector) - Select one or some of images from a batch diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__init__.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a6fdff9cd7f696e71975c878733a97ccafea5af0 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__init__.py @@ -0,0 +1,410 @@ +""" +@author: RockOfFire +@title: Comfyroll Custom Nodes +@nickname: Comfyroll Custom Nodes +@description: Custom nodes for SDXL and SD1.5 including Multi-ControlNet, LoRA, Aspect Ratio, Process Switches, and many more nodes. +""" + +from .nodes.nodes import * +from .nodes.legacy_nodes import * +from .nodes.lora import * +from .nodes.controlnet import * +from .nodes.pipe import * +from .nodes.sdxl import * +from .nodes.logic import * +from .nodes.model_merge import * +from .nodes.upscale import * +from .nodes.xygrid import * +from .nodes.index import * +from .nodes.conversion import * +from .nodes.matplot import * +from .nodes.pil_text import * +from .nodes.pil_layout import * +from .nodes.pil_filter import * +from .nodes.pil_template import * +from .nodes.pil_pattern import * +from .nodes.nodes_random import * + +from .animation_nodes.interpolation import * +from .animation_nodes.io import * +from .animation_nodes.prompt import * +from .animation_nodes.schedulers import * +from .animation_nodes.schedules import * +from .animation_nodes.lists import * +from .animation_nodes.utils import * +from .animation_nodes.cyclers import * + +LIVE_NODE_CLASS_MAPPINGS = { + ### Misc Nodes + "CR Image Output": CR_ImageOutput, + "CR Integer Multiple": CR_IntegerMultipleOf, + "CR Latent Batch Size": CR_LatentBatchSize, + "CR Seed": CR_Seed, + "CR Prompt Text":CR_PromptText, + "CR Split String":CR_SplitString, + "CR Value": CR_Value, + "CR Conditioning Mixer":CR_ConditioningMixer, + "CR Select Model": CR_SelectModel, + ### Aspect Ratio Nodes + "CR SD1.5 Aspect Ratio":CR_AspectRatioSD15, + "CR SDXL Aspect Ratio":CR_SDXLAspectRatio, + "CR Aspect Ratio": CR_AspectRatio, + ### Legacy Nodes + "CR Image Size": CR_ImageSize, + "CR Aspect Ratio SDXL": CR_AspectRatio_SDXL, + ### ControlNet Nodes + "CR Apply ControlNet": CR_ApplyControlNet, + "CR Multi-ControlNet Stack": CR_ControlNetStack, + "CR Apply Multi-ControlNet": CR_ApplyControlNetStack, + ### LoRA Nodes + "CR Load LoRA": CR_LoraLoader, + "CR LoRA Stack": CR_LoRAStack, + "CR Apply LoRA Stack": CR_ApplyLoRAStack, + ### Model Merge Nodes + "CR Apply Model Merge": CR_ApplyModelMerge, + "CR Model Merge Stack": CR_ModelMergeStack, + ### Pipe Nodes + "CR Module Pipe Loader": CR_ModulePipeLoader, + "CR Module Input": CR_ModuleInput, + "CR Module Output": CR_ModuleOutput, + "CR Image Pipe In": CR_ImagePipeIn, + "CR Image Pipe Edit": CR_ImagePipeEdit, + "CR Image Pipe Out": CR_ImagePipeOut, + "CR Pipe Switch": CR_InputSwitchPipe, + ### SDXL Nodes + "CR SDXL Prompt Mix Presets": CR_PromptMixPresets, + "CR SDXL Style Text": CR_SDXLStyleText, + "CR SDXL Base Prompt Encoder": CR_SDXLBasePromptEncoder, + ### Upscale Nodes + "CR Multi Upscale Stack": CR_MultiUpscaleStack, + "CR Upscale Image": CR_UpscaleImage, + "CR Apply Multi Upscale": CR_ApplyMultiUpscale, + ### XY Grid Nodes + "CR XY List": CR_XYList, + "CR XY Interpolate": CR_XYInterpolate, + "CR XY Index": CR_XYIndex, + "CR XY From Folder": CR_XYFromFolder, + "CR XY Save Grid Image": CR_XYSaveGridImage, + ### Graphics Pattern + "CR Halftone Grid": CR_HalftoneGrid, + "CR Color Bars": CR_ColorBars, + "CR Style Bars": CR_StyleBars, + "CR Checker Pattern": CR_CheckerPattern, + "CR Polygons": CR_Polygons, + "CR Color Gradient": CR_ColorGradient, + "CR Radial Gradient": CR_RadialGradient, + "CR Starburst Lines": CR_StarburstLines, + "CR Starburst Colors": CR_StarburstColors, + "CR Simple Binary Pattern": CR_BinaryPatternSimple, + "CR Binary Pattern": CR_BinaryPattern, + ### Graphics Text + "CR Overlay Text": CR_OverlayText, + "CR Draw Text": CR_DrawText, + "CR Mask Text": CR_MaskText, + "CR Composite Text": CR_CompositeText, + #"CR Arabic Text RTL": CR_ArabicTextRTL, + "CR Simple Text Watermark": CR_SimpleTextWatermark, + #"CR System TrueType Font": CR_SystemTrueTypeFont, + #"CR Display Font": CR_DisplayFont, + ### Graphics Filter + "CR Halftone Filter": CR_HalftoneFilter, + "CR Color Tint": CR_ColorTint, + ### Graphics Layout + "CR Page Layout": CR_PageLayout, + "CR Image Panel": CR_ImagePanel, + "CR Image Grid Panel": CR_ImageGridPanel, + "CR Image Border": CR_ImageBorder, + "CR Simple Text Panel": CR_SimpleTextPanel, + "CR Color Panel": CR_ColorPanel, + "CR Overlay Transparent Image": CR_OverlayTransparentImage, + #"CR Simple Titles": CR_SimpleTitles, + ### Graphics Template + "CR Simple Meme Template": CR_SimpleMemeTemplate, + "CR Simple Banner": CR_SimpleBanner, + "CR Comic Panel Templates": CR_ComicPanelTemplates, + ### Utils Logic Nodes + "CR Image Input Switch": CR_ImageInputSwitch, + "CR Image Input Switch (4 way)": CR_ImageInputSwitch4way, + "CR Latent Input Switch": CR_LatentInputSwitch, + "CR Conditioning Input Switch": CR_ConditioningInputSwitch, + "CR Clip Input Switch": CR_ClipInputSwitch, + "CR Model Input Switch": CR_ModelInputSwitch, + "CR ControlNet Input Switch": CR_ControlNetInputSwitch, + "CR VAE Input Switch": CR_VAEInputSwitch, + "CR Text Input Switch": CR_TextInputSwitch, + "CR Text Input Switch (4 way)": CR_TextInputSwitch4way, + "CR Switch Model and CLIP": CR_ModelAndCLIPInputSwitch, + ### Utils Process Nodes + "CR Batch Process Switch": CR_BatchProcessSwitch, + "CR Img2Img Process Switch": CR_Img2ImgProcessSwitch, + "CR Hires Fix Process Switch": CR_HiResFixProcessSwitch, + ### Utils Index Nodes + "CR Index": CR_Index, + "CR Index Increment": CR_IncrementIndex, + "CR Index Multiply": CR_MultiplyIndex, + "CR Index Reset": CR_IndexReset, + "CR Trigger": CR_Trigger, + ### Utils Conversion Nodes + "CR String To Number": CR_StringToNumber, + "CR String To Combo": CR_StringToCombo, + "CR Float To String": CR_FloatToString, + "CR Float To Integer": CR_FloatToInteger, + "CR Integer To String": CR_IntegerToString, + "CR Text List To String": CR_TextListToString, + "CR Seed to Int": CR_SeedToInt, + ### Utils Random Nodes + "CR Random Hex Color": CR_RandomHexColor, + "CR Random RGB": CR_RandomRGB, + "CR Random Multiline Values": CR_RandomMultilineValues, + "CR Random RGB Gradient": CR_RandomRGBGradient, + #------------------------------------------------------ + ### Animation Nodes + # Schedules + "CR Simple Schedule": CR_SimpleSchedule, + "CR Central Schedule": CR_CentralSchedule, + "CR Combine Schedules": CR_CombineSchedules, + "CR Output Schedule To File": CR_OutputScheduleToFile, + "CR Load Schedule From File": CR_LoadScheduleFromFile, + "CR Schedule Input Switch": Comfyroll_ScheduleInputSwitch, + # Schedulers + "CR Simple Value Scheduler": CR_SimpleValueScheduler, + "CR Simple Text Scheduler": CR_SimpleTextScheduler, + "CR Value Scheduler": CR_ValueScheduler, + "CR Text Scheduler": CR_TextScheduler, + "CR Load Scheduled Models": CR_LoadScheduledModels, + "CR Load Scheduled LoRAs": CR_LoadScheduledLoRAs, + "CR Prompt Scheduler": CR_PromptScheduler, + "CR Simple Prompt Scheduler": CR_SimplePromptScheduler, + # Prompt + "CR Prompt List": CR_PromptList, + "CR Prompt List Keyframes": CR_PromptListKeyframes, + "CR Simple Prompt List": CR_SimplePromptList, + "CR Simple Prompt List Keyframes": CR_SimplePromptListKeyframes, + "CR Keyframe List": CR_KeyframeList, + "CR Prompt Text": CR_PromptText, + #"CR Load Prompt Style": CR_LoadPromptStyle, + "CR Encode Scheduled Prompts": CR_EncodeScheduledPrompts, + # Interpolation + "CR Gradient Float": CR_GradientFloat, + "CR Gradient Integer": CR_GradientInteger, + "CR Increment Float": CR_IncrementFloat, + "CR Increment Integer": CR_IncrementInteger, + "CR Interpolate Latents": CR_InterpolateLatents, + # Lists + "CR Model List": CR_ModelList, + "CR LoRA List": CR_LoRAList, + "CR Text List": CR_TextList, + "CR Text List Simple": CR_TextListSimple, + "CR Image List": CR_ImageList, + "CR Image List Simple": CR_ImageListSimple, + # Cyclers + "CR Cycle Models": CR_CycleModels, + "CR Cycle LoRAs": CR_CycleLoRAs, + "CR Cycle Text": CR_CycleText, + "CR Cycle Text Simple": CR_CycleTextSimple, + "CR Cycle Images": CR_CycleImages, + "CR Cycle Images Simple": CR_CycleImagesSimple, + # Utils + "CR Debatch Frames": CR_DebatchFrames, + "CR Current Frame": CR_CurrentFrame, + "CR Input Text List": CR_InputTextList, + # IO + "CR Load Animation Frames": CR_LoadAnimationFrames, + "CR Load Flow Frames": CR_LoadFlowFrames, + "CR Output Flow Frames": CR_OutputFlowFrames, +} + +LIVE_NODE_DISPLAY_NAME_MAPPINGS = { + ### Misc Nodes + "CR Image Output": "💾 CR Image Output", + "CR Integer Multiple": "⚙️ CR Integer Multiple", + "CR Latent Batch Size": "⚙️ CR Latent Batch Size", + "CR Seed": "🌱 CR Seed", + "CR Prompt Text": "📝 CR Prompt Text", + "CR Split String": "⚙️ CR Split String", + "CR Value": "⚙️ CR Value", + "CR Conditioning Mixer": "⚙️ CR Conditioning Mixer", + "CR Select Model": "🔮 CR Select Model", + ### Aspect Ratio Nodes + "CR SD1.5 Aspect Ratio": "🔳 CR SD1.5 Aspect Ratio", + "CR SDXL Aspect Ratio": "🔳 CR SDXL Aspect Ratio", + "CR Aspect Ratio": "🔳 CR Aspect Ratio", + ### Legacy Nodes + "CR Image Size": "CR Image Size (Legacy)", + "CR Aspect Ratio SDXL": "CR Aspect Ratio SDXL (Legacy)", + ### ControlNet Nodes + "CR Apply ControlNet": "🕹️ CR Apply ControlNet", + "CR Multi-ControlNet Stack": "🕹️ CR Multi-ControlNet Stack", + "CR Apply Multi-ControlNet": "🕹️ CR Apply Multi-ControlNet", + ### LoRA Nodes + "CR Load LoRA": "💊 CR Load LoRA", + "CR LoRA Stack": "💊 CR LoRA Stack", + "CR Apply LoRA Stack": "💊 CR Apply LoRA Stack", + ### Model Merge Nodes + "CR Apply Model Merge": "⛏️ CR Apply Model Merge", + "CR Model Merge Stack": "⛏️ CR Model Merge Stack", + ### Pipe Nodes + "CR Module Pipe Loader": "✈️ CR Module Pipe Loader", + "CR Module Input": "✈️ CR Module Input", + "CR Module Output": "✈️ CR Module Output", + "CR Image Pipe In": "🛩 CR Image Pipe In", + "CR Image Pipe Edit": "🛩️ CR Image Pipe Edit", + "CR Image Pipe Out": "🛩️ CR Image Pipe Out", + "CR Pipe Switch": "🔀️ CR Pipe Switch", + ### SDXL Nodes + "CR SDXL Prompt Mix Presets": "🌟 CR SDXL Prompt Mix Presets", + "CR SDXL Style Text": "🌟 CR SDXL Style Text", + "CR SDXL Base Prompt Encoder": "🌟 CR SDXL Base Prompt Encoder", + ### Upscale Nodes + "CR Multi Upscale Stack": "🔍 CR Multi Upscale Stack", + "CR Upscale Image": "🔍 CR Upscale Image", + "CR Apply Multi Upscale": "🔍 CR Apply Multi Upscale", + ### XY Grid Nodes + "CR XY List": "📉 CR XY List", + "CR XY Interpolate": "📉 CR XY Interpolate", + "CR XY Index": "📉 CR XY Index", + "CR XY From Folder": "📉 CR XY From Folder", + "CR XY Save Grid Image": "📉 CR XY Save Grid Image", + ### Graphics Pattern + "CR Halftone Grid" : "🟫 CR Halftone Grid", + "CR Color Bars" : "🟫 CR Color Bars", + "CR Style Bars" : "🟪 CR Style Bars", + "CR Checker Pattern": "🟦 CR Checker Pattern", + "CR Polygons": "🟩 CR Polygons", + "CR Color Gradient": "🟨 CR Color Gradient", + "CR Radial Gradient": "🟨 CR Radial Gradient", + "CR Starburst Lines": "🟧 CR Starburst Lines", + "CR Starburst Colors": "🟥 CR Starburst Colors", + "CR Simple Binary Pattern": "🟥 CR Simple Binary Pattern", + "CR Binary Pattern": "🟥 CR Binary Pattern", + ### Graphics Text + "CR Overlay Text": "🔤 CR Overlay Text", + "CR Draw Text": "🔤️ CR Draw Text", + "CR Mask Text": "🔤️ CR Mask Text", + "CR Composite Text": "🔤️ CR Composite Text", + #"CR Arabic Text RTL": "🔤️ CR Arabic Text RTL", + "CR Simple Text Watermark": "🔤️ CR Simple Text Watermark", + ### Graphics Filter + "CR Halftone Filter": "🎨 Halftone Filter", + "CR Color Tint": "🎨 CR Color Tint", + ### Graphics Layout + "CR Image Panel": "🌁 CR Image Panel", + "CR Image Grid Panel": "🌁 CR Image Grid Panel", + "CR Simple Text Panel": "🌁 CR Simple Text Panel", + "CR Color Panel": "🌁 CR Color Panel", + "CR Page Layout": "🌁 CR Page Layout", + "CR Image Border": "🌁 CR Image Border", + "CR Overlay Transparent Image": "🌁 CR Overlay Transparent Image", + #"CR Simple Titles": "🌁 CR Simple Titles", + ### Graphics Template + "CR Simple Meme Template": "👽 CR Simple Meme Template", + "CR Simple Banner": "👽 CR Simple Banner", + "CR Comic Panel Templates": "👽 CR Comic Panel Templates", + ### Utils Logic Nodes + "CR Image Input Switch": "🔀 CR Image Input Switch", + "CR Image Input Switch (4 way)": "🔀 CR Image Input Switch (4 way)", + "CR Latent Input Switch": "🔀 CR Latent Input Switch", + "CR Conditioning Input Switch": "🔀 CR Conditioning Input Switch", + "CR Clip Input Switch": "🔀 CR Clip Input Switch", + "CR Model Input Switch": "🔀 CR Model Input Switch", + "CR ControlNet Input Switch": "🔀 CR ControlNet Input Switch", + "CR VAE Input Switch": "🔀 CR VAE Input Switch", + "CR Text Input Switch": "🔀 CR Text Input Switch", + "CR Text Input Switch (4 way)": "🔀 CR Text Input Switch (4 way)", + "CR Switch Model and CLIP": "🔀 CR Switch Model and CLIP", + ### Utils Process Nodes + "CR Batch Process Switch": "🔂 CR Batch Process Switch", + "CR Img2Img Process Switch": "🔂 CR Img2Img Process Switch", + "CR Hires Fix Process Switch": "🔂 CR Hires Fix Process Switch", + ### Utils Index Nodes + "CR Index":"🔢 CR Index", + "CR Index Increment": "🔢 CR Index Increment", + "CR Index Multiply": "🔢 CR Index Multiply", + "CR Index Reset": "🔢 CR Index Reset", + "CR Trigger": "🔢 CR Trigger", + ### Utils Conversion Nodes + "CR String To Number": "🔧 CR String To Number", + "CR String To Combo": "🔧 CR String To Combo", + "CR Float To String": "🔧 CR Float To String", + "CR Float To Integer": "🔧 CR Float To Integer", + "CR Integer To String": "🔧 CR Integer To String", + "CR Text List To String": "🔧 CR Text List To String", + "CR Seed to Int": "🔧 CR Seed to Int", + ### Utils Random Nodes + "CR Random Hex Color": "🎲 CR Random Hex Color", + "CR Random RGB": "🎲 CR Random RGB", + "CR Random Multiline Values": "🎲 CR Random Multiline Values", + "CR Random RGB Gradient": "🎲 CR Random RGB Gradient", + #------------------------------------------------------ + ### Animation Nodes + # Schedules + "CR Simple Schedule": "📋 CR Simple Schedule", + "CR Central Schedule": "📋 CR Central Schedule", + "CR Combine Schedules": "📋 CR Combine Schedules", + "CR Output Schedule To File": "📋 CR Output Schedule To File", + "CR Load Schedule From File": "📋 CR Load Schedule From File", + "CR Schedule Input Switch": "📋 CR Schedule Input Switch", + # Schedulers + "CR Simple Value Scheduler": "📑 CR Simple Value Scheduler", + "CR Simple Text Scheduler": "📑 CR Simple Text Scheduler", + "CR Value Scheduler": "📑 CR Value Scheduler", + "CR Text Scheduler": "📑 CR Text Scheduler", + "CR Load Scheduled Models": "📑 CR Load Scheduled Models", + "CR Load Scheduled LoRAs": "📑 CR Load Scheduled LoRAs", + "CR Prompt Scheduler": "📑 CR Prompt Scheduler", + "CR Simple Prompt Scheduler": "📑 CR Simple Prompt Scheduler", + # Prompt + "CR Prompt List": "📝 CR Prompt List", + "CR Prompt List Keyframes": "📝 CR Prompt List Keyframes", + "CR Simple Prompt List": "📝 CR Simple Prompt List", + "CR Simple Prompt List Keyframes": "📝 CR Simple Prompt List Keyframes", + "CR Keyframe List": "📝 CR Keyframe List", + "CR Prompt Text": "📝 CR Prompt Text", + #"CR Load Prompt Style": "📝 CR Load Prompt Style", + "CR Encode Scheduled Prompts": "📝 CR Encode Scheduled Prompts", + # Interpolation + "CR Gradient Float": "🔢 CR Gradient Float", + "CR Gradient Integer": "🔢 CR Gradient Integer", + "CR Increment Float": "🔢 CR Increment Float", + "CR Increment Integer": "🔢 CR Increment Integer", + "CR Interpolate Latents": "🔢 CR Interpolate Latents", + # Lists + "CR Model List": "📃 CR Model List", + "CR LoRA List": "📃 CR LoRA List", + "CR Text List": "📃 CR Text List", + "CR Text List Simple": "📃 CR Text List Simple", + "CR Image List": "📃 CR Image List", + "CR Image List Simple": "📃 CR Image List Simple", + "CR Input Text List": "📃 CR Input Text List", + # Cyclers + "CR Cycle Models": "♻️ CR Cycle Models", + "CR Cycle LoRAs": "♻️ CR Cycle LoRAs", + "CR Cycle Text": "♻️ CR Cycle Text", + "CR Cycle Text Simple": "♻️ CR Cycle Text Simple", + "CR Cycle Images": "♻️ CR Cycle Images", + "CR Cycle Images Simple": "♻️ CR Cycle Images Simple", + # Utils + "CR Debatch Frames": "🛠️ CR Debatch Frames", + "CR Current Frame": "🛠️ CR Current Frame", + # IO + "CR Load Animation Frames": "⌨️ CR Load Animation Frames", + "CR Load Flow Frames": "⌨️ CR Load Flow Frames", + "CR Output Flow Frames": "⌨️ CR Output Flow Frames", +} + +INCLUDE_DEV_NODES = False + +try: + from .dev_node_mappings import DEV_NODE_CLASS_MAPPINGS, DEV_NODE_DISPLAY_NAME_MAPPINGS + if INCLUDE_DEV_NODES: + NODE_CLASS_MAPPINGS = {**DEV_NODE_CLASS_MAPPINGS, **LIVE_NODE_CLASS_MAPPINGS} + NODE_DISPLAY_NAME_MAPPINGS = {**DEV_NODE_DISPLAY_NAME_MAPPINGS, **LIVE_NODE_DISPLAY_NAME_MAPPINGS} + print("\033[34mComfyroll Custom Nodes: \033[92mDev Nodes Loaded\033[0m") + else: + NODE_CLASS_MAPPINGS = LIVE_NODE_CLASS_MAPPINGS + NODE_DISPLAY_NAME_MAPPINGS = LIVE_NODE_DISPLAY_NAME_MAPPINGS +except ImportError: + NODE_CLASS_MAPPINGS = LIVE_NODE_CLASS_MAPPINGS + NODE_DISPLAY_NAME_MAPPINGS = LIVE_NODE_DISPLAY_NAME_MAPPINGS + +print("\033[34mComfyroll Custom Nodes: \033[92mLoaded\033[0m") diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6425bb903c2bfa0a7409a4d90cfbf0531c432b4 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/categories.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/categories.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d209c8f37b3edf555b218458b87c623a3dd3980f Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/categories.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/config.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a40bb884c7f5414d2b27754f83d6943ea63cb01 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/config.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/dev_node_mappings.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/dev_node_mappings.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e3ac8b291d54cf72d53afdc09770367049ce7a7 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/__pycache__/dev_node_mappings.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/cyclers.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/cyclers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb306a89b7b142211c4ed072c003898dc2c88681 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/cyclers.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/functions.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/functions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22f9624baa833b68f2d61941d2570ba9c774041e Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/functions.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/interpolation.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/interpolation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c83cb7f649a517b80360879146aa8600623fbad9 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/interpolation.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/io.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/io.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f40778925e44820b182d5060f98c1fd2dd0970fe Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/io.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/json_functions.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/json_functions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0271f807454cdb8055e01ce6b67d8ceaf3ebe67 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/json_functions.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/lists.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/lists.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7afc4c31bfbe8dfb10760951ce2d1edef50a60e9 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/lists.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/prompt.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/prompt.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72415aaf890cb740aa081085f71ab246610afc92 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/prompt.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/schedulers.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/schedulers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a42a5e99ad8016a5c31023d08b1804e96e8d2b0 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/schedulers.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/schedules.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/schedules.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34c39f6f210b9ecccf01d7d42f7c49d2805b0396 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/schedules.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/utils.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceaecf3b5f8ad85590345efa58cdc3c16cd64b19 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/__pycache__/utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/cyclers.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/cyclers.py new file mode 100644 index 0000000000000000000000000000000000000000..62e58549cc47cc5e34833d41f72e7bb0026d48d6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/cyclers.py @@ -0,0 +1,377 @@ +#---------------------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import comfy.sd +import torch +import os +import sys +import folder_paths +import random +from PIL import Image, ImageEnhance +import numpy as np +import io +from ..categories import icons + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) +#---------------------------------------------------------------------------------------------------------------------# +# FUNCTIONS +#---------------------------------------------------------------------------------------------------------------------# +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) +#---------------------------------------------------------------------------------------------------------------------# +# NODES +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleModels: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Off", "Sequential"] + + return {"required": {"mode": (modes,), + "model": ("MODEL",), + "clip": ("CLIP",), + "model_list": ("MODEL_LIST",), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "show_help", ) + FUNCTION = "cycle_models" + CATEGORY = icons.get("Comfyroll/Animation/Cyclers") + + def cycle_models(self, mode, model, clip, model_list, frame_interval, loops, current_frame,): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Cycler-Nodes#cr-cycle-models" + + # Initialize the list + model_params = list() + + # Extend lora_params with the lora_list items + if model_list: + for _ in range(loops): + model_params.extend(model_list) + #print(f"[Debug] CR Cycle Models:{model_params}") + + if mode == "Off": + return (model, clip, show_help, ) + + elif mode == "Sequential": + if current_frame == 0: + return (model, clip, show_help, ) + else: + # Calculate the index of the current model based on the current_frame and frame_interval + current_model_index = (current_frame // frame_interval) % len(model_params) + #print(f"[Debug] CR Cycle Models:{current_model_index}") + + # Get the parameters of the current model + current_model_params = model_params[current_model_index] + model_alias, ckpt_name = current_model_params + print(f"[Info] CR Cycle Models: Current model is {ckpt_name}") + + # Load the current model + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, + embedding_directory=folder_paths.get_folder_paths("embeddings")) + return (out, show_help, ) + #else: + # return (model, clip) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleLoRAs: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Off", "Sequential"] + + return {"required": {"mode": (modes,), + "model": ("MODEL",), + "clip": ("CLIP",), + "lora_list": ("LORA_LIST",), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("MODEL", "CLIP", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "show_help", ) + FUNCTION = "cycle" + CATEGORY = icons.get("Comfyroll/Animation/Cyclers") + + def cycle(self, mode, model, clip, lora_list, frame_interval, loops, current_frame): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Cycler-Nodes#cr-cycle-loras" + + # Initialize the list + lora_params = list() + + # Extend lora_params with lora_list items + if lora_list: + for _ in range(loops): + lora_params.extend(lora_list) + #print(f"[Debug] CR Cycle LoRAs:{lora_params}") + else: + return (model, clip, show_help, ) + + if mode == "Sequential": + # Calculate the index of the current LoRA based on the current_frame and frame_interval + current_lora_index = (current_frame // frame_interval) % len(lora_params) + #print(f"[Debug] CR Cycle LoRAs:{current_lora_index}") + + # Get the parameters of the current LoRA + current_lora_params = lora_params[current_lora_index] + lora_alias, lora_name, model_strength, clip_strength = current_lora_params + + # Load the current LoRA + lora_path = folder_paths.get_full_path("loras", lora_name) + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + print(f"[Info] CR_CycleLoRAs: Current LoRA is {lora_name}") + + # Apply the current LoRA to the model and clip + model_lora, clip_lora = comfy.sd.load_lora_for_models( + model, clip, lora, model_strength, clip_strength) + return (model_lora, clip_lora, show_help, ) + else: + return (model, clip, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleText: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Sequential"] + + return {"required": {"mode": (modes,), + "text_list": ("TEXT_LIST",), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "cycle_text" + CATEGORY = icons.get("Comfyroll/Animation/Cyclers") + + def cycle_text(self, mode, text_list, frame_interval, loops, current_frame,): + + # Initialize the list + text_params = list() + + # Extend text_params with text_list items + if text_list: + for _ in range(loops): + text_params.extend(text_list) + #print(f"[Debug] CR Cycle Text:{text_params}") + + if mode == "Sequential": + # Calculate the index of the current text string based on the current_frame and frame_interval + current_text_index = (current_frame // frame_interval) % len(text_params) + #print(f"[Debug] CR Cycle Text:{current_text_index}") + + # Get the parameters of the current text + current_text_params = text_params[current_text_index] + print(f"[Debug] CR Cycle Text:{current_text_params}") + text_alias, current_text_item = current_text_params + #print(f"[Debug] CR Cycle Text:{current_text_item}") + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Cycler-Nodes#cr-cycle-text" + + return (current_text_item, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleTextSimple: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Sequential"] + + return {"required": {"mode": (modes,), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + "optional": {"text_1": ("STRING", {"multiline": False, "default": ""}), + "text_2": ("STRING", {"multiline": False, "default": ""}), + "text_3": ("STRING", {"multiline": False, "default": ""}), + "text_4": ("STRING", {"multiline": False, "default": ""}), + "text_5": ("STRING", {"multiline": False, "default": ""}), + "text_list_simple": ("TEXT_LIST_SIMPLE",), + }, + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "cycle_text" + CATEGORY = icons.get("Comfyroll/Animation/Cyclers") + + def cycle_text(self, mode, frame_interval, loops, current_frame, + text_1, text_2, text_3, text_4, text_5, + text_list_simple=None ): + + # Initialize the list + text_params = list() + + text_list = list() + if text_1 != "": + text_list.append(text_1) + if text_2 != "": + text_list.append(text_2) + if text_3 != "": + text_list.append(text_3) + if text_4 != "": + text_list.append(text_4) + if text_5 != "": + text_list.append(text_5) + + # Extend text_params with text items + for _ in range(loops): + if text_list_simple: + text_params.extend(text_list_simple) + text_params.extend(text_list) + #print(f"[Debug] CR Cycle Text:{len(text_params)}") + #print(f"[Debug] CR Cycle Text:{text_params}") + + if mode == "Sequential": + # Calculate the index of the current text string based on the current_frame and frame_interval + current_text_index = (current_frame // frame_interval) % len(text_params) + #print(f"[Debug] CR Cycle Text:{current_text_index}") + + # Get the parameters of the current text + current_text_item = text_params[current_text_index] + #print(f"[Debug] CR Cycle Text + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Cycler-Nodes#cr-cycle-text-simple" + + return (current_text_item, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleImages: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Sequential"] + + return {"required": {"mode": (modes,), + "image_list": ("IMAGE_LIST",), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "cycle" + CATEGORY = icons.get("Comfyroll/Animation/Cyclers") + + def cycle(self, mode, image_list, frame_interval, loops, current_frame,): + + # Initialize the list + image_params = list() + + # Extend image_params with image_list items + if image_list: + for _ in range(loops): + image_params.extend(image_list) + + if mode == "Sequential": + # Calculate the index of the current image string based on the current_frame and frame_interval + current_image_index = (current_frame // frame_interval) % len(image_params) + print(f"[Debug] CR Cycle Image:{current_image_index}") + + # Get the parameters of the current image + current_image_params = image_params[current_image_index] + image_alias, current_image_item = current_image_params + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Cycler-Nodes#cr-cycle-images" + + return (current_image_item, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleImagesSimple: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Sequential"] + + return {"required": {"mode": (modes,), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}) + }, + "optional": {"image_1": ("IMAGE",), + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "image_4": ("IMAGE",), + "image_5": ("IMAGE",), + "image_list_simple": ("IMAGE_LIST_SIMPLE",) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "cycle_image" + CATEGORY = icons.get("Comfyroll/Animation/Cyclers") + + def cycle_image(self, mode, frame_interval, loops, current_frame, + image_1=None, image_2=None, image_3=None, image_4=None, image_5=None, + image_list_simple=None ): + + # Initialize the list + image_params = list() + + image_list = list() + if image_1 != None: + image_list.append(image_1), + if image_2 != None: + image_list.append(image_2), + if image_3 != None: + image_list.append(image_3), + if image_4 != None: + image_list.append(image_4), + if image_5 != None: + image_list.append(image_5), + + # Extend image_params with image items + for _ in range(loops): + if image_list_simple: + image_params.extend(image_list_simple) + image_params.extend(image_list) + + if mode == "Sequential": + # Calculate the index of the current image string based on the current_frame and frame_interval + current_image_index = (current_frame // frame_interval) % len(image_params) + print(f"[Debug] CR Cycle Text:{current_image_index}") + + # Get the parameters of the current image + current_image_item = image_params[current_image_index] + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Cycler-Nodes#cr-cycle-images-simple" + return (current_image_item, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 6 nodes +''' +NODE_CLASS_MAPPINGS = { + ### Cyclers + "CR Cycle Models":CR_CycleModels, + "CR Cycle LoRAs":CR_CycleLoRAs, + "CR Cycle Images":CR_CycleImages, + "CR Cycle Images":CR_CycleImagesSimple, + "CR Cycle Text":CR_CycleText, + "CR Cycle Text Simple":CR_CycleTextSimple, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/functions.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..890fb851fc0dcbd29e043c273dd518ade8164905 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/functions.py @@ -0,0 +1,105 @@ +#-----------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#-----------------------------------------------------------------------------------------------------------# + +#-----------------------------------------------------------------------------------------------------------# +# FUNCTIONS +#-----------------------------------------------------------------------------------------------------------# + +def keyframe_scheduler(schedule, schedule_alias, current_frame): + + # Initialise + schedule_lines = list() + previous_params = "" + + # Loop through the schedule to find lines with matching schedule_alias + for item in schedule: + alias = item[0] + if alias == schedule_alias: + schedule_lines.extend([(item)]) + + # Loop through the filtered lines + for i, item in enumerate(schedule_lines): + # Get alias and schedule line + alias, line = item + + # Skip empty lines + if not line.strip(): + print(f"[Warning] Skipped blank line at line {i}") + continue + + # Get parameters from the tuples + frame_str, params = line.split(',', 1) + frame = int(frame_str) + + # Strip spaces at start of params + params = params.lstrip() + + # Return the params + if frame < current_frame: + previous_params = params + continue + if frame == current_frame: + previous_params = params + else: + params = previous_params + return params + + # Continue using the final params after the last schedule line has been evaluated + return previous_params + +def prompt_scheduler(schedule, schedule_alias, current_frame): + + # Initialise + schedule_lines = list() + previous_prompt = "" + previous_keyframe = 0 + + #print(schedule, schedule_alias, current_frame) + + # Loop through the schedule to find lines with matching schedule_alias + for item in schedule: + alias = item[0] + if alias == schedule_alias: + schedule_lines.extend([(item)]) + + # Loop through the filtered lines + for i, item in enumerate(schedule_lines): + # Get alias and schedule line + alias, line = item + + # Get parameters from the tuples + frame_str, prompt = line.split(',', 1) + frame_str = frame_str.strip('\"') + frame = int(frame_str) + + # Strip leading spaces and quotes + prompt = prompt.lstrip() + prompt = prompt.replace('"', '') + + # Return the parameters + if frame < current_frame: + previous_prompt = prompt + previous_keyframe = frame + #print(f"[Debug] frame < current_frame, frame {frame}, current frame {current_frame}") + #print(f"[Debug] frame < current_frame, prompt {prompt}") + continue + if frame == current_frame: + next_prompt = prompt + next_keyframe = frame + previous_prompt = prompt + previous_keyframe = frame + #print(f"[Debug] frame = current_frame, frame {frame}, current frame {current_frame}, next keyframe {next_keyframe}") + #print(f"[Debug] frame = current_frame, prompt {prompt}") + else: + next_prompt = prompt + next_keyframe = frame + prompt = previous_prompt + #print(f"[Debug] frame > current_frame, frame {frame}, current frame {current_frame}, next keyframe {next_keyframe}") + #print(f"[Debug] frame > current_frame, next prompt {next_prompt}") + + return prompt, next_prompt, previous_keyframe, next_keyframe + + # Continue using the final params after the last schedule line has been evaluated + return previous_prompt, previous_prompt, previous_keyframe, previous_keyframe \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/interpolation.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..f328e4c014a18868ee3fa70ac00ab9dd4105036d --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/interpolation.py @@ -0,0 +1,225 @@ +#---------------------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import torch +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +# NODES +#---------------------------------------------------------------------------------------------------------------------# +class CR_GradientInteger: + + @classmethod + def INPUT_TYPES(s): + gradient_profiles = ["Lerp"] + + return {"required": {"start_value": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "end_value": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "gradient_profile": (gradient_profiles,) + }, + } + + RETURN_TYPES = ("INT", "STRING", ) + RETURN_NAMES = ("INT", "show_help", ) + FUNCTION = "gradient" + CATEGORY = icons.get("Comfyroll/Animation/Interpolate") + + def gradient(self, start_value, end_value, start_frame, frame_duration, current_frame, gradient_profile): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Interpolation-Nodes#cr-gradient-integer" + + if current_frame < start_frame: + return (start_value, show_help, ) + + if current_frame > start_frame + frame_duration: + return (end_value, show_help, ) + + step = (end_value - start_value) / frame_duration + + current_step = current_frame - start_frame + + int_out = start_value + int(current_step * step) + + return (int_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_GradientFloat: + + @classmethod + def INPUT_TYPES(s): + gradient_profiles = ["Lerp"] + + return {"required": {"start_value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "end_value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "gradient_profile": (gradient_profiles,) + }, + } + + RETURN_TYPES = ("FLOAT", "STRING", ) + RETURN_NAMES = ("FLOAT", "show_help", ) + FUNCTION = "gradient" + CATEGORY = icons.get("Comfyroll/Animation/Interpolate") + + def gradient(self, start_value, end_value, start_frame, frame_duration, current_frame, gradient_profile): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Interpolation-Nodes#cr-gradient-float" + + if current_frame < start_frame: + return (start_value, show_help, ) + + if current_frame > start_frame + frame_duration: + return (end_value, show_help, ) + + step = (end_value - start_value) / frame_duration + + current_step = current_frame - start_frame + + float_out = start_value + current_step * step + + return (float_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_IncrementFloat: + + @classmethod + def INPUT_TYPES(s): + + return {"required": {"start_value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.001,}), + "step": ("FLOAT", {"default": 0.1, "min": -9999.0, "max": 9999.0, "step": 0.001,}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.00,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("FLOAT", "STRING", ) + RETURN_NAMES = ("FLOAT", "show_help", ) + OUTPUT_NODE = True + FUNCTION = "increment" + CATEGORY = icons.get("Comfyroll/Animation/Interpolate") + + def increment(self, start_value, step, start_frame, frame_duration, current_frame): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Interpolation-Nodes#cr-increment-float" + + #print(f"current frame {current_frame}") + if current_frame < start_frame: + return (start_value, show_help, ) + + current_value = start_value + (current_frame - start_frame) * step + if current_frame <= start_frame + frame_duration: + current_value += step + #print(f" 1: + print("Warning: Conditioning from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") + + cond_from = conditioning_from[0][0] + pooled_output_from = conditioning_from[0][1].get("pooled_output", None) + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from) + t0 = cond_from[:,:t1.shape[1]] + if t0.shape[1] < t1.shape[1]: + t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1) + + tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength)) + t_to = conditioning_to[i][1].copy() + if pooled_output_from is not None and pooled_output_to is not None: + t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength)) + elif pooled_output_from is not None: + t_to["pooled_output"] = pooled_output_from + + n = [tw, t_to] + out.append(n) + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Prompt-Nodes#cr-encode-scheduled-prompts" + return (out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 7 nodes +''' +NODE_CLASS_MAPPINGS = { + "CR Prompt List":CR_PromptList, + "CR Prompt List Keyframes":CR_PromptListKeyframes, + "CR Simple Prompt List":CR_SimplePromptList, + "CR Simple Prompt List Keyframes":CR_SimplePromptListKeyframes, + "CR Keyframe List":CR_KeyframeList, + #"CR Load Prompt Style":CR_LoadPromptStyle, + "CR Encode Scheduled Prompts":CR_EncodeScheduledPrompts, +} +''' diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/schedulers.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..fdbb03221ab2e23bb5f663962d9bb09f2ee36fb6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/schedulers.py @@ -0,0 +1,523 @@ +#-----------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#-----------------------------------------------------------------------------------------------------------# + +import comfy.sd +import os +import sys +import folder_paths +from nodes import LoraLoader +from .functions import keyframe_scheduler, prompt_scheduler +from ..categories import icons + +#-----------------------------------------------------------------------------------------------------------# +# NODES +#-----------------------------------------------------------------------------------------------------------# +# Schedulers +#-----------------------------------------------------------------------------------------------------------# +class CR_ValueScheduler: + + @classmethod + def INPUT_TYPES(s): + modes = ["Default Value", "Schedule"] + return {"required": {"mode": (modes,), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "default_value": ("FLOAT", {"default": 1.0, "min": -9999.0, "max": 9999.0, "step": 0.01,}), + "schedule_format": (["CR", "Deforum"],), + }, + "optional": {"schedule": ("SCHEDULE",), + } + } + + RETURN_TYPES = ("INT", "FLOAT", "STRING", ) + RETURN_NAMES = ("INT", "FLOAT", "show_help", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, current_frame, schedule_alias, default_value, schedule_format, schedule=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-value-scheduler" + + if mode == "Default Value": + print(f"[Info] CR Value Scheduler: Scheduler {schedule_alias} is disabled") + int_out, float_out = int(default_value), float(default_value) + return (int_out, float_out, show_help, ) + + # Get params + params = keyframe_scheduler(schedule, schedule_alias, current_frame) + + # Handle case where there is no schedule line for frame 0 + if params == "": + if current_frame == 0: + print(f"[Warning] CR Value Scheduler. No frame 0 found in schedule. Starting with default value at frame 0") + int_out, float_out = int(default_value), float(default_value) + else: + # Try the params + try: + value = float(params) + int_out, float_out = int(value), float(value) + except ValueError: + print(f"[Warning] CR Value Scheduler. Invalid params: {params}") + return() + return (int_out, float_out, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_TextScheduler: + + @classmethod + def INPUT_TYPES(s): + modes = ["Default Text", "Schedule"] + return {"required": {"mode": (modes,), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "default_text": ("STRING", {"multiline": False, "default": "default text"}), + "schedule_format": (["CR", "Deforum"],), + }, + "optional": {"schedule": ("SCHEDULE",), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, current_frame, schedule_alias, default_text, schedule_format, schedule=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-text-scheduler" + + if mode == "Default Text": + print(f"[Info] CR Text Scheduler: Scheduler {schedule_alias} is disabled") + text_out = default_text + return (text_out, show_help, ) + + # Get params + params = keyframe_scheduler(schedule, schedule_alias, current_frame) + + # Handle case where there is no schedule line for frame 0 + if params == "": + if current_frame == 0: + print(f"[Warning] CR Text Scheduler. No frame 0 found in schedule. Starting with default value at frame 0") + text_out = default_value, + else: + # Try the params + try: + text_out = params + except ValueError: + print(f"[Warning] CR Text Scheduler. Invalid params: {params}") + return() + return (text_out, show_help, ) + + +#-----------------------------------------------------------------------------------------------------------# +class CR_PromptScheduler: + + @classmethod + def INPUT_TYPES(s): + modes = ["Default Prompt", "Keyframe List", "Schedule"] + return {"required": {"mode": (modes,), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "default_prompt": ("STRING", {"multiline": False, "default": "default prompt"}), + "schedule_format": (["CR", "Deforum"],), + #"pingpong_keyframes": (["No", "Yes"],), + "interpolate_prompt": (["Yes", "No"],), + }, + "optional": {"schedule": ("SCHEDULE",), + "schedule_alias": ("STRING", {"default prompt": "", "multiline": False}), + "keyframe_list": ("STRING", {"multiline": True, "default": "keyframe list"}), + "prepend_text": ("STRING", {"multiline": True, "default": "prepend text"}), + "append_text": ("STRING", {"multiline": True, "default": "append text"}), + } + } + + RETURN_TYPES = ("STRING", "STRING", "FLOAT", "STRING", ) + RETURN_NAMES = ("current_prompt", "next_prompt", "weight", "show_help", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, prepend_text, append_text, current_frame, schedule_alias, default_prompt, schedule_format, interpolate_prompt, keyframe_list="", schedule=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-prompt-scheduler" + + schedule_lines = list() + + if mode == "Default Prompt": + print(f"[Info] CR Prompt Scheduler: Scheduler {schedule_alias} is disabled") + return (default_prompt, default_prompt, 1.0, show_help, ) + + if mode == "Keyframe List": + if keyframe_list == "": + print(f"[Error] CR Prompt Scheduler: No keyframe list found.") + return () + else: + lines = keyframe_list.split('\n') + for line in lines: + # If deforum, convert to CR format + if schedule_format == "Deforum": + line = line.replace(":", ",") + line = line.rstrip(',') + line = line.lstrip() + # Strip empty lines + if not line.strip(): + print(f"[Warning] CR Simple Prompt Scheduler. Skipped blank line at line {i}") + continue + schedule_lines.extend([(schedule_alias, line)]) + schedule = schedule_lines + + if mode == "Schedule": + if schedule is None: + print(f"[Error] CR Prompt Scheduler: No schedule found.") + return () + # If deforum, convert to CR format + if schedule_format == "Deforum": + for item in schedule: + alias, line = item + line = line.replace(":", ",") + line = line.rstrip(',') + schedule_lines.extend([(schedule_alias, line)]) + schedule = schedule_lines + + current_prompt, next_prompt, current_keyframe, next_keyframe = prompt_scheduler(schedule, schedule_alias, current_frame) + + if current_prompt == "": + print(f"[Warning] CR Simple Prompt Scheduler. No prompt found for frame. Schedules should start at frame 0.") + else: + try: + current_prompt_out = prepend_text + ", " + str(current_prompt) + ", " + append_text + next_prompt_out = prepend_text + ", " + str(next_prompt) + ", " + append_text + from_index = int(current_keyframe) + to_index = int(next_keyframe) + except ValueError: + print(f"[Warning] CR Simple Text Scheduler. Invalid keyframe at frame {current_frame}") + + if from_index == to_index or interpolate_prompt == "No": + weight_out = 1.0 + else: + weight_out = (to_index - current_frame) / (to_index - from_index) + + #if pingpong_keyframes == "Yes": + # temp = current_prompt_out + # current_prompt_out = next_prompt_out + # next_prompt_out = temp + # weight_out = 1 - weight_out + + return (current_prompt_out, next_prompt_out, weight_out, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_SimplePromptScheduler: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"keyframe_list": ("STRING", {"multiline": True, "default": "frame_number, text"}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "keyframe_format": (["CR", "Deforum"],), + }, + } + + RETURN_TYPES = ("STRING", "STRING", "FLOAT", "STRING", ) + RETURN_NAMES = ("current_prompt", "next_prompt", "weight", "show_help", ) + FUNCTION = "simple_schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def simple_schedule(self, keyframe_list, keyframe_format, current_frame): + + keyframes = list() + + if keyframe_list == "": + print(f"[Error] CR Simple Prompt Scheduler. No lines in keyframe list") + return () + lines = keyframe_list.split('\n') + for line in lines: + # If deforum, convert to CR format + if keyframe_format == "Deforum": + line = line.replace(":", ",") + line = line.rstrip(',') + if not line.strip(): + print(f"[Warning] CR Simple Prompt Scheduler. Skipped blank line at line {i}") + continue + keyframes.extend([("SIMPLE", line)]) + + #print(f"[Debug] CR Simple Prompt Scheduler. Calling function") + current_prompt, next_prompt, current_keyframe, next_keyframe = prompt_scheduler(keyframes, "SIMPLE", current_frame) + + if current_prompt == "": + print(f"[Warning] CR Simple Prompt Scheduler. No prompt found for frame. Simple schedules must start at frame 0.") + else: + try: + current_prompt_out = str(current_prompt) + next_prompt_out = str(next_prompt) + from_index = int(current_keyframe) + to_index = int(next_keyframe) + except ValueError: + print(f"[Warning] CR Simple Text Scheduler. Invalid keyframe at frame {current_frame}") + + if from_index == to_index: + weight_out = 1.0 + else: + weight_out = (to_index - current_frame) / (to_index - from_index) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-simple-prompt-scheduler" + + return(current_prompt_out, next_prompt_out, weight_out, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_SimpleValueScheduler: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"schedule": ("STRING", {"multiline": True, "default": "frame_number, value"}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("INT", "FLOAT", "STRING", ) + RETURN_NAMES = ("INT", "FLOAT", "show_help", ) + FUNCTION = "simple_schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def simple_schedule(self, schedule, current_frame): + + schedule_lines = list() + + if schedule == "": + print(f"[Warning] CR Simple Value Scheduler. No lines in schedule") + return () + + lines = schedule.split('\n') + for line in lines: + schedule_lines.extend([("SIMPLE", line)]) + + params = keyframe_scheduler(schedule_lines, "SIMPLE", current_frame) + + if params == "": + print(f"[Warning] CR Simple Value Scheduler. No schedule found for frame. Simple schedules must start at frame 0.") + else: + try: + int_out = int(params.split('.')[0]) #rounds down + float_out = float(params) + except ValueError: + print(f"[Warning] CR Simple Value Scheduler. Invalid params {params} at frame {current_frame}") + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-simple-value-scheduler" + + return (int_out, float_out, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_SimpleTextScheduler: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"schedule": ("STRING", {"multiline": True, "default": "frame_number, text"}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "simple_schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def simple_schedule(self, schedule, current_frame): + + schedule_lines = list() + + if schedule == "": + print(f"[Warning] CR Simple Text Scheduler. No lines in schedule") + return () + + lines = schedule.split('\n') + for line in lines: + schedule_lines.extend([("SIMPLE", line)]) + + params = keyframe_scheduler(schedule_lines, "SIMPLE", current_frame) + + if params == "": + print(f"[Warning] CR Simple Text Scheduler. No schedule found for frame. Simple schedules must start at frame 0.") + else: + try: + text_out = str(params) + except ValueError: + print(f"[Warning] CR Simple Text Scheduler. Invalid params {params} at frame {current_frame}") + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-simple-text-scheduler" + + return(text_out, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_LoadScheduledModels: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Load default Model", "Schedule"] + + return {"required": {"mode": (modes,), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "default_model": (folder_paths.get_filename_list("checkpoints"), ), + "schedule_format": (["CR", "Deforum"],) + }, + "optional": {"model_list": ("MODEL_LIST",), + "schedule": ("SCHEDULE",) + }, + } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "show_help", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, current_frame, schedule_alias, default_model, schedule_format, model_list=None, schedule=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-load-scheduled-models" + + #model_name = "" + + # Load default Model mode + if mode == "Load default Model": + ckpt_path = folder_paths.get_full_path("checkpoints", default_model) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + print(f"[Debug] CR Load Scheduled Models. Loading default model.") + return (out[:3], show_help, ) + + # Get params + params = keyframe_scheduler(schedule, schedule_alias, current_frame) + + # Handle case where there is no schedule line for a frame + if params == "": + print(f"[Warning] CR Load Scheduled Models. No model specified in schedule for frame {current_frame}. Using default model.") + ckpt_path = folder_paths.get_full_path("checkpoints", default_model) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return (out[:3], show_help, ) + else: + # Try the params + try: + model_alias = str(params) + except ValueError: + print(f"[Warning] CR Load Scheduled Models. Invalid params: {params}") + return() + + # Iterate through the model list to get the model name + for ckpt_alias, ckpt_name in model_list: + if ckpt_alias == model_alias: + model_name = ckpt_name + break # Exit the loop early once a match is found, ignores any duplicate matches + + # Check if a matching model has been found + if model_name == "": + print(f"[Info] CR Load Scheduled Models. No model alias match found for {model_alias}. Frame {current_frame} will produce an error.") + return() + else: + print(f"[Info] CR Load Scheduled Models. Model alias {model_alias} matched to {model_name}") + + # Load the new model + ckpt_path = folder_paths.get_full_path("checkpoints", model_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + print(f"[Info] CR Load Scheduled Models. Loading new checkpoint model {model_name}") + return (out[:3], show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_LoadScheduledLoRAs: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Off", "Load default LoRA", "Schedule"] + + return {"required": {"mode": (modes,), + "model": ("MODEL",), + "clip": ("CLIP", ), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "default_lora": (folder_paths.get_filename_list("loras"), ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "schedule_format": (["CR", "Deforum"],) + }, + "optional": {"lora_list": ("LORA_LIST",), + "schedule": ("SCHEDULE",) + }, + } + + RETURN_TYPES = ("MODEL", "CLIP", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "show_help", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, model, clip, current_frame, schedule_alias, default_lora, strength_model, strength_clip, schedule_format, lora_list=None, schedule=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Scheduler-Nodes#cr-load-scheduled-loras" + #lora_name = "" + + # Off mode + if mode == "Off": + print(f"[Info] CR Load Scheduled LoRAs. Disabled.") + return (model, clip, show_help, ) + + # Load Default LoRA mode + if mode == "Load default LoRA": + if default_lora == None: + return (model, clip, show_help, ) + if strength_model == 0 and strength_clip == 0: + return (model, clip, show_help, ) + model, clip = LoraLoader().load_lora(model, clip, default_lora, strength_model, strength_clip) + print(f"[Info] CR Load Scheduled LoRAs. Loading default LoRA {lora_name}.") + return (model, clip, show_help, ) + + # Get params + params = keyframe_scheduler(schedule, schedule_alias, current_frame) + + # Handle case where there is no schedule line for a frame + if params == "": + print(f"[Warning] CR Load Scheduled LoRAs. No LoRA specified in schedule for frame {current_frame}. Using default lora.") + if default_lora != None: + model, clip = LoraLoader().load_lora(model, clip, default_lora, strength_model, strength_clip) + return (model, clip, show_help, ) + else: + # Unpack the parameters + parts = params.split(',') + if len(parts) == 3: + s_lora_alias = parts[0].strip() + s_strength_model = float(parts[1].strip()) + s_strength_clip = float(parts[1].strip()) + else: + print(f"[Warning] CR Simple Value Scheduler. Skipped invalid line: {line}") + return() + + # Iterate through the LoRA list to get the LoRA name + for l_lora_alias, l_lora_name, l_strength_model, l_strength_clip in lora_list: + print(l_lora_alias, l_lora_name, l_strength_model, l_strength_clip) + if l_lora_alias == s_lora_alias: + print(f"[Info] CR Load Scheduled LoRAs. LoRA alias match found for {s_lora_alias}") + lora_name = l_lora_name + break # Exit the loop early once a match is found, ignores any duplicate matches + + # Check if a matching LoRA has been found + if lora_name == "": + print(f"[Info] CR Load Scheduled LoRAs. No LoRA alias match found for {s_lora_alias}. Frame {current_frame}.") + return() + else: + print(f"[Info] CR Load Scheduled LoRAs. LoRA {lora_name}") + + # Load the new LoRA + model, clip = LoraLoader().load_lora(model, clip, lora_name, s_strength_model, s_strength_clip) + print(f"[Debug] CR Load Scheduled LoRAs. Loading new LoRA {lora_name}") + return (model, clip, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +# MAPPINGS +#-----------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 11 nodes +''' +NODE_CLASS_MAPPINGS = { + ### Schedulers + "CR Simple Value Scheduler":CR_SimpleValueScheduler, + "CR Simple Text Scheduler":CR_SimpleTextScheduler, + "CR Simple Prompt Scheduler":CR_SimplePromptScheduler, + "CR Load Scheduled Models":CR_LoadScheduledModels, + "CR Load Scheduled LoRAs":CR_LoadScheduledLoRAs, + "CR Load Scheduled ControlNets":CR_LoadScheduledControlNets, + "CR Value Scheduler":CR_ValueScheduler, + "CR Text Scheduler":CR_TextScheduler, + "CR Prompt Scheduler":CR_PromptScheduler, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/schedules.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/schedules.py new file mode 100644 index 0000000000000000000000000000000000000000..85566d288ead36d771be4e5bd803ac6b9e6378cf --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/schedules.py @@ -0,0 +1,308 @@ +#-----------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#-----------------------------------------------------------------------------------------------------------# + +import comfy.sd +import os +import sys +import folder_paths +from nodes import LoraLoader +from .functions import keyframe_scheduler, prompt_scheduler +from ..categories import icons + +#-----------------------------------------------------------------------------------------------------------# +# Schedules +#-----------------------------------------------------------------------------------------------------------# +class CR_SimpleSchedule: + + @classmethod + def INPUT_TYPES(s): + schedule_types = ["Value", "Text", "Prompt", "Prompt Weight", "Model", "LoRA", "ControlNet", "Style", "Upscale", "Camera", "Job"] + return {"required": {"schedule": ("STRING", + {"multiline": True, "default": "frame_number, item_alias, [attr_value1, attr_value2]"} + ), + "schedule_type": (schedule_types,), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "schedule_format": (["CR", "Deforum"],), + }, + } + + RETURN_TYPES = ("SCHEDULE", "STRING", ) + RETURN_NAMES = ("SCHEDULE", "show_help", ) + FUNCTION = "send_schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedule") + + def send_schedule(self, schedule, schedule_type, schedule_alias, schedule_format): + + schedule_lines = list() + + # Extend the list for each line in the schedule + if schedule != "" and schedule_alias != "": + lines = schedule.split('\n') + for line in lines: + # Skip empty lines + if not line.strip(): + print(f"[Warning] CR Simple Schedule. Skipped blank line: {line}") + continue + + schedule_lines.extend([(schedule_alias, line)]) + #print(f"[Debug] CR Simple Schedule: {schedule_lines}") + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Schedule-Nodes#cr-simple-schedule" + + return (schedule_lines, show_help, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_CombineSchedules: + + @classmethod + def INPUT_TYPES(cls): + return {"required": { + }, + "optional":{ + "schedule_1": ("SCHEDULE",), + "schedule_2": ("SCHEDULE",), + "schedule_3": ("SCHEDULE",), + "schedule_4": ("SCHEDULE",), + }, + } + + RETURN_TYPES = ("SCHEDULE", "STRING", ) + RETURN_NAMES = ("SCHEDULE", "show_text", ) + FUNCTION = "combine" + CATEGORY = icons.get("Comfyroll/Animation/Schedule") + + def combine(self, schedule_1=None, schedule_2=None, schedule_3=None, schedule_4=None): + + # Initialise the list + schedules = list() + schedule_text = list() + + # Extend the list for each schedule in connected stacks + if schedule_1 is not None: + schedules.extend([l for l in schedule_1]), + schedule_text.extend(schedule_1), + + if schedule_2 is not None: + schedules.extend([l for l in schedule_2]), + schedule_text.extend(schedule_2), + + if schedule_3 is not None: + schedules.extend([l for l in schedule_3]), + schedule_text.extend(schedule_3), + + if schedule_4 is not None: + schedules.extend([l for l in schedule_4]), + schedule_text.extend(schedule_4), + + print(f"[Debug] CR Combine Schedules: {schedules}") + + show_text = "".join(str(schedule_text)) + + return (schedules, show_text, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_CentralSchedule: + + @classmethod + def INPUT_TYPES(cls): + schedule_types = ["Value", "Text", "Prompt", "Prompt Weight", "Model", "LoRA", "ControlNet", "Style", "Upscale", "Camera", "Job"] + return {"required": { + "schedule_1": ("STRING", {"multiline": True, "default": "schedule"}), + "schedule_type1": (schedule_types,), + "schedule_alias1": ("STRING", {"multiline": False, "default": ""}), + "schedule_2": ("STRING", {"multiline": True, "default": "schedule"}), + "schedule_type2": (schedule_types,), + "schedule_alias2": ("STRING", {"multiline": False, "default": ""}), + "schedule_3": ("STRING", {"multiline": True, "default": "schedule"}), + "schedule_type3": (schedule_types,), + "schedule_alias3": ("STRING", {"multiline": False, "default": ""}), + "schedule_format": (["CR", "Deforum"],), + }, + "optional": {"schedule": ("SCHEDULE",) + }, + } + + RETURN_TYPES = ("SCHEDULE", "STRING", ) + RETURN_NAMES = ("SCHEDULE", "show_text", ) + FUNCTION = "build_schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedule") + + def build_schedule(self, schedule_1, schedule_type1, schedule_alias1, schedule_2, schedule_type2, schedule_alias2, schedule_3, schedule_type3, schedule_alias3, schedule_format, schedule=None): + + # schedule_type and schedule_format are not used in the function + + # Initialise the list + schedules = list() + schedule_text = list() + + # Extend the list for each schedule in linked stacks + if schedule is not None: + schedules.extend([l for l in schedule]) + schedule_text.extend([l for l in schedule]), + + # Extend the list for each schedule in the stack + if schedule_1 != "" and schedule_alias1 != "": + lines = schedule_1.split('\n') + for line in lines: + schedules.extend([(schedule_alias1, line)]), + schedule_text.extend([(schedule_alias1 + "," + schedule_1 + "\n")]), + + if schedule_2 != "" and schedule_alias2 != "": + lines = schedule_2.split('\n') + for line in lines: + schedules.extend([(schedule_alias2, line)]), + schedule_text.extend([(schedule_alias2 + "," + schedule_2 + "\n")]), + + if schedule_3 != "" and schedule_alias3 != "": + lines = schedule_3.split('\n') + for line in lines: + schedules.extend([(schedule_alias3, line)]), + schedule_text.extend([(schedule_alias3 + "," + schedule_3 + "\n")]), + + #print(f"[Debug] CR Schedule List: {schedules}") + + show_text = "".join(schedule_text) + + return (schedules, show_text, ) + +#-----------------------------------------------------------------------------------------------------------# +class Comfyroll_ScheduleInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "schedule1": ("SCHEDULE",), + "schedule2": ("SCHEDULE",) + } + } + + RETURN_TYPES = ("SCHEDULE", "STRING", ) + RETURN_NAMES = ("SCHEDULE", "show_help", ) + OUTPUT_NODE = True + FUNCTION = "switch" + + CATEGORY = icons.get("Comfyroll/Animation/Schedule") + + def switch(self, Input, schedule1, schedule2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Schedule-Nodes#cr-schedule-input-switch" + if Input == 1: + return (schedule1, show_help, ) + else: + return (schedule2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_OutputScheduleToFile: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "output_file_path": ("STRING", {"multiline": False, "default": ""}), + "file_name": ("STRING", {"multiline": False, "default": ""}), + "file_extension": (["txt", "csv"],), + "schedule": ("SCHEDULE",), + } + } + + RETURN_TYPES = () + OUTPUT_NODE = True + FUNCTION = "csvoutput" + CATEGORY = icons.get("Comfyroll/Animation/Schedule") + + def csvoutput(self, output_file_path, file_name, schedule, file_extension): + filepath = output_file_path + "\\" + file_name + "." + file_extension + + index = 2 + + if(output_file_path == "" or file_name == ""): + print(f"[Warning] CR Output Schedule To File. No file details found. No file output.") + return () + + while os.path.exists(filepath): + if os.path.exists(filepath): + filepath = output_file_path + "\\" + file_name + str(index) + "." + file_extension + + index = index + 1 + else: + break + + print(f"[Info] CR_Output Schedule To File: Saving to {filepath}") + + if file_extension == "csv": + with open(filepath, "w", newline="") as csv_file: + csv_writer = csv.writer(csv_file) + csv_writer.writerows(schedule) + else: + with open(filepath, "w", newline="") as text_writer: + for line in schedule: + str_item = f'{line[0]},"{line[1]}"\n' + text_writer.write(str_item) + + + return () + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LoadScheduleFromFile: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "input_file_path": ("STRING", {"multiline": False, "default": ""}), + "file_name": ("STRING", {"multiline": False, "default": ""}), + "file_extension": (["txt", "csv"],), + } + } + + RETURN_TYPES = ("SCHEDULE", "STRING", ) + RETURN_NAMES = ("SCHEDULE", "show_text", ) + FUNCTION = "csvinput" + CATEGORY = icons.get("Comfyroll/Animation/Schedule") + + def csvinput(self, input_file_path, file_name, file_extension): + filepath = input_file_path + "\\" + file_name + "." + file_extension + print(f"CR_Load Schedule From File: Loading {filepath}") + + lists = [] + + if file_extension == "csv": + with open(filepath, "r") as csv_file: + reader = csv.reader(csv_file) + + for row in reader: + lists.append(row) + + else: + with open(filepath, "r") as txt_file: + for row in txt_file: + parts = row.strip().split(",", 1) + + if len(parts) >= 2: + second_part = parts[1].strip('"') + lists.append([parts[0], second_part]) + + print(lists) + return(lists,str(lists),) + +#-----------------------------------------------------------------------------------------------------------# +# MAPPINGS +#-----------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 11 nodes +''' +NODE_CLASS_MAPPINGS = { + ### Schedules + "CR Simple Schedule":CR_SimpleSchedule, + "CR Combine Schedules":CR_CombineSchedules, + "CR Central Schedule":CR_CentralSchedule, + "CR Schedule To ScheduleList":CR_ScheduleToScheduleList, + "CR Schedule Input Switch": Comfyroll_ScheduleInputSwitch, + "CR Output Schedule To File":CR_OutputScheduleToFile, + "CR Load Schedule From File":CR_LoadScheduleFromFile, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/utils.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..694066b43a09f8c6c0f0c10aeddcd304cbbcc91a --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/animation_nodes/utils.py @@ -0,0 +1,61 @@ +#---------------------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# + +class CR_DebatchFrames: + # cloned from ltdrdata Image Batch To Image List node + + @classmethod + def INPUT_TYPES(s): + return {"required": { "frames": ("IMAGE",), } } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("debatched_frames",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "debatch" + CATEGORY = icons.get("Comfyroll/Animation/Utils") + + def debatch(self, frames): + images = [frames[i:i + 1, ...] for i in range(frames.shape[0])] + return (images, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CurrentFrame: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "index": ("INT", {"default": 1, "min": -10000, "max": 10000}), + "print_to_console": (["Yes","No"],), + } + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("index",) + FUNCTION = "to_console" + CATEGORY = icons.get("Comfyroll/Animation/Utils") + + def to_console(self, index, print_to_console): + if print_to_console == "Yes": + print(f"[Info] CR Current Frame:{index}") + + return (index, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 8 nodes +''' +NODE_CLASS_MAPPINGS = { + # Utils + "CR Debatch Frames":CR_DebatchFrames, + "CR Current Frame":CR_CurrentFrame, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/categories.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/categories.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ce562570d093e122b0b6c8b4ecbcd6e4c6ebbd --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/categories.py @@ -0,0 +1,37 @@ +icons = { + "Comfyroll/Upscale": "🧩 Comfyroll/🔍 Upscale", + "Comfyroll/Model Merge": "🧩 Comfyroll/⛏️ Model Merge", + "Comfyroll/Utils/Logic": "🧩 Comfyroll/🛠️ Utils/🔀 Logic", + "Comfyroll/Utils/Process": "🧩 Comfyroll/🛠️ Utils/🔂 Process", + "Comfyroll/Utils/Index": "🧩 Comfyroll/🛠️ Utils/🔢 Index", + "Comfyroll/Utils/Conversion": "🧩 Comfyroll/🛠️ Utils/🔧 Conversion", + "Comfyroll/Utils/Random": "🧩 Comfyroll/🛠️ Utils/🎲 Random", + "Comfyroll/LoRA": "🧩 Comfyroll/💊 LoRA", + "Comfyroll/ControlNet": "🧩 Comfyroll/🕹️ ControlNet", + "Comfyroll/XY Grid": "🧩 Comfyroll/📉 XY Grid", + "Comfyroll/SDXL": "🧩 Comfyroll/🌟 SDXL", + "Comfyroll/Aspect Ratio": "🧩 Comfyroll/🔳 Aspect Ratio", + "Comfyroll/Pipe/Module": "🧩 Comfyroll/🎷 Pipe/✈️ Module", + "Comfyroll/Pipe/Image": "🧩 Comfyroll/🎷 Pipe/🛩️ Image", + "Comfyroll/Pipe": "🧩 Comfyroll/🎷 Pipe", + "Comfyroll/Graphics/Text": "🧩 Comfyroll/👾 Graphics/🔤 Text", + "Comfyroll/Graphics/Layout": "🧩 Comfyroll/👾 Graphics/🌁 Layout", + "Comfyroll/Graphics/Template": "🧩 Comfyroll/👾 Graphics/👽 Template", + "Comfyroll/Graphics/Filter": "🧩 Comfyroll/👾 Graphics/🎨 Filter", + "Comfyroll/Graphics/Pattern": "🧩 Comfyroll/👾 Graphics/🌈 Pattern", + "Comfyroll/Graphics/3D": "🧩 Comfyroll/👾 Graphics/3D", + "Comfyroll/Graphics/Utilty": "🧩 Comfyroll/👾 Graphics/🔧 Utility", + "Comfyroll/Workflow": "🧩 Comfyroll/Workflow", + "Comfyroll/Other": "🧩 Comfyroll/📦 Other", + "Comfyroll/Other/Legacy": "🧩 Comfyroll/📦 Other/💀 Legacy", + "Comfyroll/Animation/Camera": "🧩 Comfyroll/🎥 Animation/🎦 Camera", + "Comfyroll/Animation/Utils": "🧩 Comfyroll/🎥 Animation/🛠️ Utils", + "Comfyroll/Animation/Schedule": "🧩 Comfyroll/🎥 Animation/📋 Schedule", + "Comfyroll/Animation/Schedulers": "🧩 Comfyroll/🎥 Animation/📑 Schedulers", + "Comfyroll/Animation/Prompt": "🧩 Comfyroll/🎥 Animation/📝 Prompt", + "Comfyroll/Animation/List": "🧩 Comfyroll/🎥 Animation/📃 List", + "Comfyroll/Animation/Cyclers": "🧩 Comfyroll/🎥 Animation/♻️ Cyclers", + "Comfyroll/Animation/Interpolate": "🧩 Comfyroll/🎥 Animation/🔢 Interpolate", + "Comfyroll/Animation/IO": "🧩 Comfyroll/🎥 Animation/⌨️ IO", + "Comfyroll/Animation/Other": "🧩 Comfyroll/🎥 Animation/📦 Other", +} diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/config.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/config.py new file mode 100644 index 0000000000000000000000000000000000000000..dfb45566341dc925b82e7d83d4272ac5a69eba5f --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/config.py @@ -0,0 +1,37 @@ +color_mapping = { + "white": (255, 255, 255), + "black": (0, 0, 0), + "red": (255, 0, 0), + "green": (0, 255, 0), + "blue": (0, 0, 255), + "yellow": (255, 255, 0), + "cyan": (0, 255, 255), + "magenta": (255, 0, 255), + "orange": (255, 165, 0), + "purple": (128, 0, 128), + "pink": (255, 192, 203), + "brown": (160, 85, 15), + "gray": (128, 128, 128), + "lightgray": (211, 211, 211), + "darkgray": (169, 169, 169), + "olive": (128, 128, 0), + "lime": (0, 128, 0), + "teal": (0, 128, 128), + "navy": (0, 0, 128), + "maroon": (128, 0, 0), + "fuchsia": (255, 0, 128), + "aqua": (0, 255, 128), + "silver": (192, 192, 192), + "gold": (255, 215, 0), + "turquoise": (64, 224, 208), + "lavender": (230, 230, 250), + "violet": (238, 130, 238), + "coral": (255, 127, 80), + "indigo": (75, 0, 130), +} + +COLORS = ["custom", "white", "black", "red", "green", "blue", "yellow", + "cyan", "magenta", "orange", "purple", "pink", "brown", "gray", + "lightgray", "darkgray", "olive", "lime", "teal", "navy", "maroon", + "fuchsia", "aqua", "silver", "gold", "turquoise", "lavender", + "violet", "coral", "indigo"] \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_node_mappings.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_node_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..4df9289563a65db27b8511efeb036115574c5ef4 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_node_mappings.py @@ -0,0 +1,99 @@ +from .animation_nodes.prompt import * + +from .dev_nodes.dev_nodes import * +from .dev_nodes.graphics_dev_nodes import * +from .dev_nodes.dev_pil_3D import * +from .dev_nodes.dev_workflow import * +from .dev_nodes.animation_dev_nodes import * +from .dev_nodes.dev_schedulers import * +from .dev_nodes.dev_xygrid import * + +DEV_NODE_CLASS_MAPPINGS = { + ### XY Dev Nodes + "CR XYZ List": CR_XYZList, + "CR XYZ Interpolate": CR_XYZInterpolate, + "CR XYZ Index": CR_XYZIndex, + ### Graphics Dev Nodes + "CR Multi-Panel Meme Template": CR_MultiPanelMemeTemplate, + "CR Popular Meme Templates": CR_PopularMemeTemplates, + "CR Draw Perspective Text": CR_DrawPerspectiveText, + "CR Simple Annotations": CR_SimpleAnnotations, + "CR Apply Annotations": CR_ApplyAnnotations, + "CR Add Annotation": CR_AddAnnotation, + "CR 3D Polygon": CR_3DPolygon, + "CR 3D Solids": CR_3DSolids, + "CR Draw OBJ": CR_DrawOBJ, + "CR Simple Image Watermark": CR_SimpleImageWatermark, + "CR Comic Panel Templates (Advanced)": CR_ComicPanelTemplatesAdvanced, + ### Workflow Dev Nodes + "CR Job List": CR_JobList, + "CR Job Scheduler": CR_JobScheduler, + "CR Check Job Complete": CR_CheckJobComplete, + "CR Spawn Workflow Instance": CR_SpawnWorkflowInstance, + "CR Job Current Frame": CR_JobCurrentFrame, + "CR Load Workflow": CR_LoadWorkflow, + ### Animation Dev Nodes + "CR Prompt Weight Scheduler": CR_PromptWeightScheduler, + "CR Load Scheduled ControlNets": CR_LoadScheduledControlNets, + "CR Interpolate Prompt Weights": CR_InterpolatePromptWeights, + "CR Text List Cross Join": CR_TextListCrossJoin, + "CR Schedule Camera Movements": CR_ScheduleCameraMovements, + "CR Schedule Styles": CR_ScheduleStyles, + "CR Style List": CR_StyleList, + "CR Cycle Styles": CR_CycleStyles, + "CR Image Transition": CR_ImageTransition, + "CR Strobe Images": CR_StrobeImages, + "CR Alternate Latents": CR_AlternateLatents, + "CR 3D Camera Drone": CR_DroneCamera3D, + "CR 3D Camera Static": CR_StaticCamera3D, + "CR Interpolate Zoom": CR_InterpolateZoom, + "CR Interpolate Rotation": CR_InterpolateRotation, + "CR Interpolate Track": CR_InterpolateTrack, + "CR Continuous Zoom": CR_ContinuousZoom, + "CR Continuous Rotation": CR_ContinuousRotation, + "CR Continuous Track": CR_ContinuousTrack, +} + +DEV_NODE_DISPLAY_NAME_MAPPINGS = { + # Dev Nodes + "CR XYZ List": "CR XYZ List (Dev)", + "CR XYZ Interpolate": "CR XYZ Interpolate (Dev)", + "CR XYZ Index": "CR XYZ Index (Dev)", + "CR Multi-Panel Meme Template": "CR Multi-Panel Meme Template (Dev)", + "CR Popular Meme Templates": "CR Popular Meme Templates (Dev)", + "CR Draw Perspective Text": "CR Draw Perspective Text (Dev)", + "CR Simple Annotations": "CR Simple Annotations (Dev)", + "CR Apply Annotations": "CR Apply Annotations (Prototype)", + "CR Add Annotation": "CR Add Annotation (Prototype)", + "CR 3D Polygon": "CR 3D Polygon (Dev)", + "CR 3D Solids": "CR 3D Solids (Dev)", + "CR Draw OBJ": "CR Draw OBJ", + "CR Simple Image Watermark": "CR Simple Image Watermark (Dev)", + "CR Comic Panel Templates Advanced": "👽 Comic Panel Templates (Advanced)", + "CR Job List": "CR Job List (Prototype)", + "CR Job Scheduler": "CR Job Scheduler (Prototype)", + "CR Check Job Complete": "CR Check Job Complete (Prototype)", + "CR Spawn Workflow Instance": "CR Spawn Workflow Instance (Prototype)", + "CR Job Current Frame": "CR Job Current Frame (Prototype)", + "CR Load Workflow": "CR Load Workflow (Prototype)", + ### Animation Dev Nodes + "CR Prompt Weight Scheduler": "CR Prompt Weight Scheduler (Dev)", + "CR Load Scheduled ControlNets": "CR Load Scheduled ControlNets (Dev)", + "CR Interpolate Prompt Weights": "CR Interpolate Prompt Weights (Dev)", + "CR Text List Cross Join": "CR Text List Cross Join (Dev)", + "CR Schedule Camera Movements": "CR Schedule Camera Movements (Prototype)", + "CR Schedule Styles": "CR Schedule Styles (Prototype)", + "CR Style List": "CR Style List (Prototype)", + "CR Cycle Styles": "CR Cycle Styles (Prototype)", + "CR Image Transition": "CR Image Transition (Prototype)", + "CR Strobe Images": "CR Strobe Images (Prototype)", + "CR Alternate Latents": "CR Alternate Latents (Prototype)", + "CR 3D Camera Drone": "CR 3D Camera Drone (Prototype)", + "CR 3D Camera Static": "CR 3D Camera Static (Prototype)", + "CR Interpolate Zoom": "CR Interpolate Zoom (Prototype)", + "CR Interpolate Rotation": "CR Interpolate Rotation (Prototype)", + "CR Interpolate Track": "CR Interpolate Track (Prototype)", + "CR Continuous Zoom": "CR Continuous Zoom (Prototype)", + "CR Continuous Rotation": "CR Continuous Rotation (Prototype)", + "CR Continuous Track": "CR Continuous Track (Prototype)", +} diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/dev_nodes.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/dev_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..317c75858a65856dc94688d829e555ab16bd1e11 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/dev_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/dev_pil_3D.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/dev_pil_3D.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7013d02a2b8ef9051bba08b8bf6eea185a121575 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/dev_pil_3D.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/graphics_dev_nodes.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/graphics_dev_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dfac61a950c5ce83d96e129bf8550fb0c7ce38b Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/__pycache__/graphics_dev_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/animation_dev_nodes.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/animation_dev_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..1222feccf186b663a8546e2a96339f431365d05f --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/animation_dev_nodes.py @@ -0,0 +1,759 @@ +#---------------------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from PIL import Image +import torch +import numpy as np +import folder_paths +from math import sqrt, ceil +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +# Interpolation Nodes +#---------------------------------------------------------------------------------------------------------------------# +class CR_InterpolatePromptWeights: + + @classmethod + def INPUT_TYPES(cls): + + #interpolation_methods = ["lerp", "slerp"] + interpolation_methods = ["lerp"] + + return { + "required": { + "weight1": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "weight2": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "method": (interpolation_methods,), + }, + #"optional": {"schedule": ("SCHEDULE",), + #} + + } + + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("weight",) + FUNCTION = "interpolate" + + CATEGORY = icons.get("Comfyroll/Animation/Interpolate") + + def interpolate(self, weight1, weight2, method): + + weight = 0.5 + return (weight,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImageTransition: + @classmethod + def INPUT_TYPES(s): + transition_types = ["Morph", "Dissolve", "Cross-Fade", "Jump Cut", + "Swipe-Left", "Swipe-Right", "Fade to Black"] + + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "transition_type": (transition_types,), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "start_keyframe": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "end_keyframe": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "get_image" + CATEGORY = icons.get("Comfyroll/Animation/Other") + + def get_image(self, image1, image2, transition_type, current_frame, start_keyframe, end_keyframe): + + image_out = image1 + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_AlternateLatents: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "latent1": ("LATENT",), + "latent2": ("LATENT",), + "frame_interval": ("INT", {"default": 1, "min": 1, "max": 999}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "InputLatents" + CATEGORY = icons.get("Comfyroll/Animation/Other") + + def InputLatents(self, latent1, latent2, frame_interval, current_frame): + + if current_frame == 0: + return latent1 # Start with the first latent + + frame_mod = (current_frame // frame_interval) % 2 + + if frame_mod == 0: + return latent1 + else: + return latent2 + +#---------------------------------------------------------------------------------------------------------------------# +class CR_StrobeImages: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "animationframes1": ("IMAGE",), + "animation frames2": ("IMAGE",), + "frame_interval": ("INT", {"default": 1, "min": 1, "max": 999}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "strobe" + CATEGORY = icons.get("Comfyroll/Animation/Other") + + def strobe(self, image1, image2, frame_interval, current_frame): + + if current_frame == 0: + return image1 # Start with the first image + + frame_mod = (current_frame // frame_interval) % 2 + + if frame_mod == 0: + return image1 + else: + return image2 + +#---------------------------------------------------------------------------------------------------------------------# +# Camera Nodes +#---------------------------------------------------------------------------------------------------------------------# +# add more camera types and motions, including +# hand held, steadycam +# crane/boom camera +# dolly/track +# pov +# static camera +#---------------------------------------------------------------------------------------------------------------------# + +class CR_StaticCamera3D: + @classmethod + def INPUT_TYPES(s): + rotation_types = ["Tilt Up", "Tilt Down", "Pan Left", "Pan Right"] + + return {"required": { + "image": ("IMAGE",), + "x_position": ("FLOAT", {"default": 0.0}), + "y_position": ("FLOAT", {"default": 0.0}), + "pan_rotation": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10.0, "step": 0.1}), + "tilt_rotation": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10.0, "step": 0.1}), + "zoom_factor": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10.0, "step": 0.1}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "static3d" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def static3d(self, image, x_position, y_position, pan_rotation, tilt_rotation, zoom_factor, start_frame, current_frame, frame_duration): + + if current_frame < start_frame: + return (image,) + + if current_frame >= start_frame + frame_duration: + return (image,) + + if rotation_type == "Tilt Up": + rotation_angle = start_angle - (tilt_rotation * ((current_frame - start_frame) / frame_duration)) + elif rotation_type == "Tilt Down": + rotation_angle = start_angle + (tilt_rotation * ((current_frame - start_frame) / frame_duration)) + elif rotation_type == "Pan Left": + rotation_angle = start_angle - (pan_rotation * ((current_frame - start_frame) / frame_duration)) + elif rotation_type == "Pan Right": + rotation_angle = start_angle + (pan_rotation * ((current_frame - start_frame) / frame_duration)) + else: + rotation_angle = start_angle + + # Apply rotation to the image using your rotation logic here + # You should apply the rotation angle to transform the image accordingly + + # image_out = apply_rotation(image, rotation_angle) # Replace this with your actual rotation logic + image_out = image + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_DroneCamera3D: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "image": ("IMAGE",), + "x_position": ("FLOAT", {"default": 0.0}), + "y_position": ("FLOAT", {"default": 0.0}), + "yaw_angle": ("FLOAT", {"default": 0.0}), + "pitch_angle": ("FLOAT", {"default": 0.0}), + "roll_angle": ("FLOAT", {"default": 0.0}), + "zoom_factor": ("FLOAT", {"default": 0.0}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + } + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "drone3d" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def drone3d(self, x_position, y_position, z_position, yaw_angle, pitch_angle, + roll_angle, zoom_factor, start_frame, current_frame, frame_duration): + """ + Create an animation of aerial camera movements. + + Args: + x_position (float): The camera's x-coordinate in 3D space. + y_position (float): The camera's y-coordinate in 3D space. + z_position (float): The camera's z-coordinate in 3D space. + yaw_angle (float): The camera's rotation around the vertical axis (yaw). + pitch_angle (float): The camera's rotation around the lateral axis (pitch). + roll_angle (float): The camera's rotation around the longitudinal axis (roll). + fov (float): The camera's field of view angle. + zoom_factor (float): The zoom factor affecting FOV or object size. + frame_rate (int): The number of frames per second for the animation. + animation_duration (float): The total duration of the animation in seconds. + image_width (int): The width of the output image in pixels. + image_height (int): The height of the output image in pixels. + + Returns: + List[PIL.Image.Image]: A list of image. + """ + # Your animation generation logic here + # Combine camera movements, FOV changes, and zoom effects + # Generate image based on the provided parameters + + #animation_frames = [] # List to store image + + # Implement your animation generation logic here + image_out = image + + return image_out + +#---------------------------------------------------------------------------------------------------------------------# +class CR_InterpolateZoom: + @classmethod + def INPUT_TYPES(s): + zoom_types = ["Zoom In", "Zoom Out",] + return {"required": { + "image": ("IMAGE",), + "zoom_type": (zoom_types,), + "start_factor": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "step_factor": ("INT", {"default": 1.0, "min": -9999.0, "max": 9999.0, "step": 1.0,}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "zoom" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def zoom(self, image, zoom_type, current_frame, zoom_factor): + + if current_frame < start_frame: + return (image,) + + if current_frame >= start_frame + frame_duration: + return (image,) + + if zoom_type == "Zoom In": + zoom_factor = 1.0 + (zoom_amount - 1.0) * ((current_frame - start_frame) / frame_duration) + elif zoom_type == "Zoom Out": + zoom_factor = 1.0 - (zoom_amount - 1.0) * ((current_frame - start_frame) / frame_duration) + else: + zoom_factor = 1.0 + + # Apply zoom to the image using your zoom logic here + # You should apply the zoom factor to resize the image accordingly + + # image_out = apply_zoom(image, zoom_factor) # Replace this with your actual zoom logic + image_out = image + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +class CR_InterpolateRotation: + @classmethod + def INPUT_TYPES(s): + rotation_types = ["Tilt Up", "Tilt Down", "Pan Left", "Pan Right"] + + return {"required": { + "image": ("IMAGE",), + "rotation_type": (rotation_types,), + "pan_rotation": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10.0, "step": 0.1}), + "tilt_rotation": ("FLOAT", {"default": 0.0, "min": -10.0, "max": 10.0, "step": 0.1}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "rotate" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def rotate(self, image, rotation_type, current_frame, rotation_angle): + + if current_frame < start_frame: + return (image,) + + if current_frame >= start_frame + frame_duration: + return (image,) + + if rotation_type == "Tilt Up": + rotation_angle = start_angle - (tilt_rotation * ((current_frame - start_frame) / frame_duration)) + elif rotation_type == "Tilt Down": + rotation_angle = start_angle + (tilt_rotation * ((current_frame - start_frame) / frame_duration)) + elif rotation_type == "Pan Left": + rotation_angle = start_angle - (pan_rotation * ((current_frame - start_frame) / frame_duration)) + elif rotation_type == "Pan Right": + rotation_angle = start_angle + (pan_rotation * ((current_frame - start_frame) / frame_duration)) + else: + rotation_angle = start_angle + + # Apply rotation to the image using your rotation logic here + # You should apply the rotation angle to transform the image accordingly + + # image_out = apply_rotation(image, rotation_angle) # Replace this with your actual rotation logic + image_out = image + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +class CR_InterpolateTrack: + @classmethod + def INPUT_TYPES(s): + track_types = ["Track Left", "Track Left", "Track Up", "Track Down"] + + return {"required": { + "image": ("IMAGE",), + "track_type": (track_types,), + "track_speed": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "start_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "frame_duration": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "track" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def track(self, image, rotation_type, current_frame, rotation_angle): + + if current_frame < start_frame: + return (image,) + + if current_frame >= start_frame + frame_duration: + return (image,) + + if track_type == "Track Left": + track_distance = track_speed * ((current_frame - start_frame) / frame_duration) + elif track_type == "Track Right": + track_distance = track_speed * ((current_frame - start_frame) / frame_duration) + elif track_type == "Track Up": + track_distance = track_speed * ((current_frame - start_frame) / frame_duration) + elif track_type == "Track Down": + track_distance = track_speed * ((current_frame - start_frame) / frame_duration) + else: + track_distance = 0.0 + + # Apply tracking motion to the image using your tracking logic here + # You should apply the track_distance to translate the image accordingly + + # image_out = apply_tracking(image, track_distance) # Replace this with your actual tracking logic + image_out = image + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ContinuousZoom: + @classmethod + def INPUT_TYPES(s): + zoom_types = ["Zoom In", "Zoom Out",] + + return {"required": { + "image": ("IMAGE",), + "zoom_type": (zoom_types,), + "zoom_factor": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "zoom" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def zoom(self, image, zoom_type, current_frame, zoom_factor): + """ + Apply continuous zoom to the input image based on the zoom type and factor. + + Args: + image (PIL.Image.Image): The input image. + zoom_type (str): Either "Zoom In" or "Zoom Out" to specify the zoom direction. + zoom_factor (float): The factor by which to zoom the image. + + Returns: + PIL.Image.Image: The zoomed image. + """ + width, height = image.size + + if zoom_type == "Zoom In": + new_width = int(width / zoom_factor) + new_height = int(height / zoom_factor) + else: + new_width = int(width * zoom_factor) + new_height = int(height * zoom_factor) + + # Perform image resizing using the specified resampling method (LANCZOS) + image_out = image.resize((new_width, new_height), resample=Image.LANCZOS) + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ContinuousRotation: + @classmethod + def INPUT_TYPES(s): + rotation_types = ["Tilt Up", "Tilt Down", "Pan Left", "Pan Right"] + return {"required": { + "image": ("IMAGE",), + "rotation_type": (rotation_types,), + "rotation_angle": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "get_image" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def get_image(self, image, rotation_type, current_frame, rotation_angle): + + """ + Apply continuous camera rotation to the input image. + + Args: + image (PIL.Image.Image): The input image. + rotation_type (str): One of "Tilt Up", "Tilt Down", "Pan Left", or "Pan Right" to specify the rotation type. + rotation_angle (float): The angle by which to rotate the camera. + + Returns: + PIL.Image.Image: The rotated image. + """ + if rotation_type == "Tilt Up": + rotated_image = image.rotate(-rotation_angle, resample=Image.LANCZOS, expand=True) + elif rotation_type == "Tilt Down": + rotated_image = image.rotate(rotation_angle, resample=Image.LANCZOS, expand=True) + elif rotation_type == "Pan Left": + rotated_image = image.rotate(rotation_angle, resample=Image.LANCZOS, expand=True) + elif rotation_type == "Pan Right": + rotated_image = image.rotate(-rotation_angle, resample=Image.LANCZOS, expand=True) + else: + rotated_image = image # No rotation + + image_out = image + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ContinuousTrack: + @classmethod + def INPUT_TYPES(s): + track_types = ["Track Left", "Track Left", "Track Up", "Track Down"] + return {"required": { + "image": ("IMAGE",), + "track_type": (track_types,), + "track_speed": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "get_image" + CATEGORY = icons.get("Comfyroll/Animation/Camera") + + def get_image(self, image, rotation_type, current_frame, rotation_angle): + """ + Apply continuous tracking to the input image based on the tracking type and speed. + + Args: + image (PIL.Image.Image): The input image. + track_type (str): One of "Track Left", "Track Right", "Track Up", or "Track Down" to specify tracking direction. + track_speed (float): The speed of the tracking operation. + + Returns: + PIL.Image.Image: The tracked image. + """ + # Placeholder for tracking logic based on the provided track_type and track_speed + # Replace with actual tracking implementation + + # In this example, we're just returning the original image + image_out = image + + return (image_out,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_TextListCrossJoin: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "text_list_simple1": ("TEXT_LIST_SIMPLE", {"default": 1, "min": 0, "max": 10000}), + "text_list_simple2": ("TEXT_LIST_SIMPLE", {"default": 1, "min": 0, "max": 10000}), + } + } + + RETURN_TYPES = ("TEXT_LIST_SIMPLE", ) + RETURN_NAMES = ("TEXT_LIST_SIMPLE", ) + FUNCTION = "cross_join" + CATEGORY = icons.get("Comfyroll/Animation/List") + + def cross_join(self, current_frame, max_frames): + + lines = list() + + for line1 in list1: + for line2 in list2: + concat_line = line1 + ',' + line2 + lines.append(concat_line) + + list = list1 + return (lines, ) + +#---------------------------------------------------------------------------------------------------------------------# +# Load From List +#---------------------------------------------------------------------------------------------------------------------# +class CR_LoadModelFromList: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"model_list": ("MODEL_LIST",), + "model_ID": ("STRING", {"default": "", "multiline": False}), + }, + } + + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + RETURN_NAMES = ("MODEL", "CLIP", "VAE") + FUNCTION = "loadmodel" + CATEGORY = icons.get("Comfyroll/Animation/List") + + def loadmodel(self, model_list, model_ID,): + + print(model_list) + #get modelname from ID + #try GPT for this + model_name = "SD1_5\CounterfeitV25_25.safetensors" + ckpt_path = folder_paths.get_full_path("checkpoints", model_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, + embedding_directory=folder_paths.get_folder_paths("embeddings")) + return out + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LoadLoRAFromList: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL",), + "clip": ("CLIP",), + "lora_list": ("LORA_LIST",), + "lora_ID": ("STRING", {"default": "", "multiline": False}), + }, + } + + RETURN_TYPES = ("MODEL", "CLIP", ) + RETURN_NAMES = ("MODEL", "CLIP", ) + FUNCTION = "loadlora" + CATEGORY = icons.get("Comfyroll/Animation/List") + + def loadlora(self, model, clip, lora_ID, lora_list,): + + print(lora_list) + #if lora_list == None: + + #pick lora by ID + #load lora + + return (model, clip,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LoadStyleFromList: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"style_list": ("STYLE_LIST",), + "style_ID": ("STRING", {"default": "", "multiline": False}), + }, + } + + RETURN_TYPES = ("STYLE", ) + RETURN_NAMES = ("STYLE", ) + FUNCTION = "loadstyle" + CATEGORY = icons.get("Comfyroll/Animation/List") + + def loadstyle(self, style_ID, style_list,): + + print(style_list) + #if style_list == None: + + #pick style by ID + #load style + + return (style_ID,) + +#--------------------------------------------------------------------------------------------------------------------# +class CR_StyleList: + + @classmethod + def INPUT_TYPES(cls): + + #current_directory = os.path.dirname(os.path.realpath(__file__)) + #self.json_data, style_files = load_styles_from_directory(current_directory) + + style_files = ["None"] + folder_paths.get_filename_list("loras") + + return {"required": { + "style_name1": (style_files,), + "alias1": ("STRING", {"multiline": False, "default": ""}), + "style_name2": (style_files,), + "alias2": ("STRING", {"multiline": False, "default": ""}), + "style_name3": (style_files,), + "alias3": ("STRING", {"multiline": False, "default": ""}), + "style_name4": (style_files,), + "alias4": ("STRING", {"multiline": False, "default": ""}), + "style_name5": (style_files,), + "alias5": ("STRING", {"multiline": False, "default": ""}), + }, + "optional": {"style_list": ("style_LIST",) + }, + } + + RETURN_TYPES = ("STYLE_LIST", "STRING", ) + RETURN_NAMES = ("STYLE_LIST", "show_text", ) + FUNCTION = "style_list" + CATEGORY = icons.get("Comfyroll/Animation/List") + + def style_list(self, style_name1, alias1, style_name2, alias2, style_name3, alias3, style_name4, alias4, + style_name5, alias5, style_list=None): + + # Initialise the list + styles = list() + + # Extend the list for each style in the stack + if style_list is not None: + styles.extend([l for l in style_list]) + + if style_name1 != "None": + styles.extend([(alias1 + "," + style_name1 + "\n")]), + + if style_name2 != "None": + styles.extend([(alias2 + "," + style_name2 + "\n")]), + + if style_name3 != "None": + styles.extend([(alias3 + "," + style_name3 + "\n")]), + + if style_name4 != "None": + styles.extend([(alias4 + "," + style_name4 + "\n")]), + + if style_name5 != "None": + styles.extend([(alias5 + "," + style_name5 + "\n")]), + + #print(f"[TEST] CR style List: {styles}") + + show_text = "".join(styles) + + return (styles, show_text, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CycleStyles: + + @classmethod + def INPUT_TYPES(s): + + cycle_methods = ["Sequential", "Random"] + + return {"required": {"switch": ([ + "Off", + "On"],), + "style_list": ("STYLE_LIST",), + "frame_interval": ("INT", {"default": 30, "min": 0, "max": 999, "step": 1,}), + "loops": ("INT", {"default": 1, "min": 1, "max": 1000}), + "cycle_method": (cycle_methods,), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + }, + } + + RETURN_TYPES = ("STYLE", ) + FUNCTION = "cycle" + CATEGORY = icons.get("Comfyroll/Animation/Other") + + def cycle(self, switch, style_list, frame_interval, loops, cycle_method, current_frame,): + + if switch == "Off": + return (None, ) + + print(style_list) + #loop through style names + + style_ID = 1 + + return (style_ID, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' + # + "CR Interpolate Prompt Weights":CR_InterpolatePromptWeights, + "CR Alternate Latents":CR_AlternateLatents, + "CR Image Transition":CR_ImageTransition, + "CR Strobe Images": CR_StrobeImages, + # + "CR 3D Camera Drone":CR_DroneCamera3D, + "CR 3D Camera Static":CR_StaticCamera3D, + "CR Interpolate Zoom":CR_InterpolateZoom, + "CR Interpolate Rotation":CR_InterpolateRotation, + "CR Interpolate Track":CR_InterpolateTrack, + "CR Continuous Zoom":CR_ContinuousZoom, + "CR Continuous Rotation":CR_ContinuousRotation, + "CR Continuous Track":CR_ContinuousTrack, + # + "CR Text List Cross Join":CR_TextListCrossJoin, + "CR Style List":CR_StyleList, + # + "CR Load Model From List":CR_LoadModelFromList, + "CR Load LoRA From List":CR_LoadLoRAFromList, + "CR Load Style From List":CR_LoadStyleFromList, + "CR Load Image From List":CR_LoadImageFromList, + "CR Load Text From List":CR_LoadTextFromList, + # + "CR Cycle Styles":CR_CycleStyles, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_nodes.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..fc135471694e029eb2200e122c8373d4a9cee93e --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_nodes.py @@ -0,0 +1,18 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_pil_3D.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_pil_3D.py new file mode 100644 index 0000000000000000000000000000000000000000..016af2fe32a499c44d5ec0184ddd623a5ade9e3c --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_pil_3D.py @@ -0,0 +1,282 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import numpy as np +import math +import torch +import os +from PIL import Image, ImageDraw +from ..categories import icons +from ..config import color_mapping, COLORS +from pywavefront import Wavefront + +#---------------------------------------------------------------------------------------------------------------------# + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +def align_text(align_txt, img_center_x, img_center_y, img_width, img_height, pos_x, pos_y, txt_width, txt_height, txt_padding): + if align_txt == "center": + txt_center_x = img_center_x + pos_x - txt_width / 2 + txt_center_y = img_center_y + pos_y - txt_height / 2 + elif align_txt == "top left": + txt_center_x = pos_x + txt_padding + txt_center_y = pos_y + txt_padding + if align_txt == "top right": + txt_center_x = img_width + pos_x - txt_width - txt_padding + txt_center_y = pos_y + txt_padding + elif align_txt == "top center": + txt_center_x = img_width/2 + pos_x - txt_width/2 - txt_padding + txt_center_y = pos_y + txt_padding + elif align_txt == "bottom left": + txt_center_x = pos_x + txt_padding + txt_center_y = img_height + pos_y - txt_height - txt_padding + elif align_txt == "bottom right": + txt_center_x = img_width + pos_x - txt_width - txt_padding + txt_center_y = img_height + pos_y - txt_height - txt_padding + elif align_txt == "bottom center": + txt_center_x = img_width/2 + pos_x - txt_width/2 - txt_padding + txt_center_y = img_height + pos_y - txt_height - txt_padding + return (txt_center_x, txt_center_y, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_3DPolygon: + + @classmethod + def INPUT_TYPES(s): + + shapes = ["cube","tetrahedron"] + + return {"required": { + "shape": (shapes,), + "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "radius": ("INT", {"default": 100, "min": 2, "max": 2048}), + "distance": ("INT", {"default": 200, "min": 2, "max": 2048}), + "rotation_angle": ("FLOAT", {"default": 0, "min": 0, "max": 3600, "step": 0.5}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "draw_cube" + CATEGORY = icons.get("Comfyroll/Graphics/3D") + + def draw_cube(self, shape, image_width, image_height, radius, distance, rotation_angle=45): + # Create a blank canvas + size = (image_height, image_width) + image = Image.new("RGB", size) + draw = ImageDraw.Draw(image) + + if shape == "cube": + vertices = [ + (-radius, -radius, -radius), + (radius, -radius, -radius), + (radius, radius, -radius), + (-radius, radius, -radius), + (-radius, -radius, radius), + (radius, -radius, radius), + (radius, radius, radius), + (-radius, radius, radius) + ] + edges = [ + (0, 1), (1, 2), (2, 3), (3, 0), + (4, 5), (5, 6), (6, 7), (7, 4), + (0, 4), (1, 5), (2, 6), (3, 7) + ] + elif shape == "tetrahedron": + vertices = [ + (0, radius, 0), + (radius, -radius, -radius), + (-radius, -radius, -radius), + (0, -radius, radius) + ] + edges = [ + (0, 1), (0, 2), (0, 3), + (1, 2), (2, 3), (3, 1) + ] + + # Function to project 3D points to 2D + def project_point(point): + x, y, z = point + x_2d = x * distance / (z + distance) + size[0] / 2 + y_2d = y * distance / (z + distance) + size[1] / 2 + return x_2d, y_2d + + # Rotate the cube + rotated_vertices = [] + angle = math.radians(rotation_angle) + cos_a = math.cos(angle) + sin_a = math.sin(angle) + for vertex in vertices: + x, y, z = vertex + new_x = x * cos_a - z * sin_a + new_z = x * sin_a + z * cos_a + rotated_vertices.append((new_x, y, new_z)) + + # Project and draw the rotated cube + for edge in edges: + start_point = project_point(rotated_vertices[edge[0]]) + end_point = project_point(rotated_vertices[edge[1]]) + draw.line([start_point, end_point], fill=(255, 255, 255)) + + # Convert the PIL image to a PyTorch tensor + tensor_image = pil2tensor(image) + + return (tensor_image,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_3DSolids: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "radius": ("INT", {"default": 100, "min": 2, "max": 2048}), + "height": ("INT", {"default": 100, "min": 2, "max": 2048}), + "distance": ("INT", {"default": 200, "min": 2, "max": 2048}), + "rotation_angle": ("FLOAT", {"default": 0, "min": 0, "max": 3600, "step": 0.5}), + }, + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/3D") + + def draw(self, image_width, image_height, radius, height, distance, rotation_angle=45): + + # Create a blank canvas + size = (image_height, image_width) + image = Image.new("RGB", size) + draw = ImageDraw.Draw(image) + + # Define the cone's vertices + vertices = [ + (0, height / 2, 0), + (0, -height / 2, 0) + ] + + num_points = 20 # Number of points to approximate the circular base + base_points = [ + (radius * math.cos(2 * math.pi * i / num_points), -height / 2, radius * math.sin(2 * math.pi * i / num_points)) + for i in range(num_points) + ] + vertices = vertices + base_points + + # Define the cone's edges as pairs of vertices + edges = [] + for i in range(num_points): + edges.append((0, i + 2)) + edges.append((1, i + 2)) + edges.append((i + 2, (i + 3) if i < num_points - 1 else 2)) + + # Function to project 3D points to 2D + def project_point(point): + x, y, z = point + x_2d = x * distance / (z + distance) + size[0] / 2 + y_2d = y * distance / (z + distance) + size[1] / 2 + return x_2d, y_2d + + # Rotate the cone + rotated_vertices = [] + angle = math.radians(rotation_angle) + cos_a = math.cos(angle) + sin_a = math.sin(angle) + for vertex in vertices: + x, y, z = vertex + new_x = x * cos_a - z * sin_a + new_z = x * sin_a + z * cos_a + rotated_vertices.append((new_x, y, new_z)) + + # Project and draw the rotated cone's faces with different colors + colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # Example colors + for i in range(num_points): + vertices_indices = [0, i + 2, (i + 3) if i < num_points - 1 else 2] + face_vertices = [project_point(rotated_vertices[idx]) for idx in vertices_indices] + fill_color = colors[i % 3] # Cycle through colors for each face + draw.polygon(face_vertices, fill=fill_color) + + # Draw the edges + for edge in edges: + start_point = project_point(rotated_vertices[edge[0]]) + end_point = project_point(rotated_vertices[edge[1]]) + draw.line([start_point, end_point], fill=(0, 0, 0)) + + # Convert the PIL image to a PyTorch tensor + tensor_image = pil2tensor(image) + + return (tensor_image,) + +#---------------------------------------------------------------------------------------------------------------------# + +class CR_DrawOBJ: + + @classmethod + def INPUT_TYPES(s): + + obj_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "obj") + file_list = [f for f in os.listdir(obj_dir) if os.path.isfile(os.path.join(obj_dir, f)) and f.lower().endswith(".obj")] + + + return {"required": { + "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "obj_name": (file_list,), + "line_color": (COLORS[1:],), + }, + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "draw_wireframe" + CATEGORY = icons.get("Comfyroll/Graphics/3D") + + def draw_wireframe(self, obj_name, image_width=800, image_height=800, line_color="black"): + + # Load the OBJ file + obj_file = "obj\\" + str(obj_name) + resolved_obj_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), obj_file) + scene = Wavefront(resolved_obj_path) + + # Create a blank image + img = Image.new("RGB", (image_width, image_height), (0, 0, 0)) + draw = ImageDraw.Draw(img) + + for name, material in scene.materials.items(): + for face in material.mesh.faces: + vertices = [scene.vertices[i] for i in face] + + # Draw lines between vertices to create wireframe + for i in range(len(vertices)): + x1, y1, z1 = vertices[i] + x2, y2, z2 = vertices[(i + 1) % len(vertices)] + + # Scale and translate vertices to fit the image + x1 = int((x1 + 1) * image_width / 2) + y1 = int((1 - y1) * image_height / 2) + x2 = int((x2 + 1) * image_width / 2) + y2 = int((1 - y2) * image_height / 2) + + draw.line([(x1, y1), (x2, y2)], fill=line_color) + + # Convert the PIL image to a PyTorch tensor + tensor_image = pil2tensor(img) + + return (tensor_image,) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR 3D Polygon":CR_3DPolygon, + "CR 3D Solids":CR_3DSolids, + "CR Draw OBJ":CR_DrawOBJ, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_schedulers.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..85d77f771f33592a33cb38c81d08f45d2fb93316 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_schedulers.py @@ -0,0 +1,223 @@ +#-----------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/RockOfFire/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#-----------------------------------------------------------------------------------------------------------# + +import comfy.sd +import os +import sys +import folder_paths +from nodes import LoraLoader +from ..animation_nodes.functions import keyframe_scheduler, prompt_scheduler +from ..categories import icons + +#-----------------------------------------------------------------------------------------------------------# +# NODES +#-----------------------------------------------------------------------------------------------------------# +# Schedulers +#-----------------------------------------------------------------------------------------------------------# +class CR_PromptWeightScheduler: + + @classmethod + def INPUT_TYPES(s): + modes = ["Default Value", "Schedule"] + return {"required": {"mode": (modes,), + "current_prompt": ("STRING", {"multiline": False, "default": "prepend text"}), + "next_prompt": ("STRING", {"multiline": False, "default": "append text"}), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "default_text": ("STRING", {"default": "default prompt", "multiline": False}), + "schedule_format": (["CR", "Deforum"],), + }, + "optional": {"schedule": ("SCHEDULE",), + } + } + + RETURN_TYPES = ("STRING", "STRING", "FLOAT", ) + RETURN_NAMES = ("current_prompt", "next_prompt", "weight", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, current_prompt, next_prompt, current_frame, schedule_alias, default_value, schedule_format, schedule=None): + + if mode == "Default Value": + print(f"[Info] CR Prompt Weight Scheduler: Scheduler {schedule_alias} is disabled") + text_out = default_value + return (text_out,) + + # Get params + params = keyframe_scheduler(schedule, schedule_alias, current_frame) + + # Handle case where there is no schedule line for frame 0 + if params == "": + if current_frame == 0: + print(f"[Warning] CR Prompt Weight Scheduler. No frame 0 found in schedule. Starting with default value at frame 0") + text_out = default_value, + else: + # Try the params + try: + weight = float(params) + except ValueError: + print(f"[Warning] CR Prompt Weight Scheduler. Invalid params: {params}") + return() + + # Insert prepend and append text + current_prompt_out = default_text + next_prompt_out = default_text + + return (current_prompt_out, next_prompt_out, weight_out, ) + +#-----------------------------------------------------------------------------------------------------------# +class CR_LoadScheduledControlNets: + + @classmethod + def INPUT_TYPES(s): + + modes = ["Off", "Load default ControlNet", "Schedule"] + + return {"required": {"mode": (modes,), + "conditioning": ("CONDITIONING", ), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "default_controlnet": (folder_paths.get_filename_list("loras"), ), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "schedule_format": (["CR", "Deforum"],) + }, + "optional": {"controlnet_list": ("CONTROLNET_LIST",), + "schedule": ("SCHEDULE",) + }, + } + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, mode, conditioning, current_frame, schedule_alias, default_controlnet, strength, schedule_format, controlnet_list=None, schedule=None): + + controlnet_name = "" + + # Off mode + if mode == "Off": + print(f"[Info] CR Load Scheduled ControlNets. Disabled.") + return (conditioning,) + + # Load Default ControlNet mode + if mode == "Load default ControlNet": + if default_controlnet == None: + return (conditioning,) + if strength_model == 0 and strength_clip == 0: + return (conditioning,) + model, clip = ControlNetLoader().load_controlnet(control_net_name) + print(f"[Info] CR Load Scheduled ControlNets. Loading default ControlNet {controlnet_name}.") + return (conditioning,) + + # Get params + params = keyframe_scheduler(schedule, schedule_alias, current_frame) + + # Handle case where there is no schedule line for a frame + if params == "": + print(f"[Warning] CR Load Scheduled ControlNets. No ControlNet specified in schedule for frame {current_frame}. Using default controlnet.") + if default_controlnet != None: + conditioning = LoraLoader().load_controlnet(model, clip, default_controlnet, strength_model, strength_clip) + return (conditioning,) + else: + # Unpack the parameters + parts = params.split(',') + if len(parts) == 3: + s_controlnet_alias = parts[0].strip() + s_strength_model = float(parts[1].strip()) + s_strength_clip = float(parts[1].strip()) + else: + print(f"[Warning] CR Load Scheduled ControlNets. Skipped invalid line: {line}") + return() + + # Iterate through the LoRA list to get the LoRA name + for l_controlnet_alias, l_controlnet_name, l_strength_model, l_strength_clip in controlnet_list: + print(l_controlnet_alias, l_controlnet_name, l_strength_model, l_strength_clip) + if l_controlnet_alias == s_controlnet_alias: + print(f"[Info] CR Load Scheduled ControlNets. LoRA alias match found for {s_controlnet_alias}") + controlnet_name = l_controlnet_name + break # Exit the loop early once a match is found, ignores any duplicate matches + + # Check if a matching LoRA has been found + if controlnet_name == "": + print(f"[Info] CR Load Scheduled ControlNets. No ControlNet alias match found for {s_controlnet_alias}. Frame {current_frame}.") + return() + else: + print(f"[Info] CR Load Scheduled ControlNets. controlnet_name {controlnet_name}") + # Load the new LoRA + model, clip = LoraLoader().load_controlnet(model, clip, controlnet_name, s_strength_model, s_strength_clip) + print(f"[Debug] CR Load Scheduled ControlNets. Loading new controlnet {controlnet_name}") + return (conditioning,) + +#-----------------------------------------------------------------------------------------------------------# +class CR_ScheduleCameraMovements: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE",), + "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "max_frames": ("INT", {"default": 1, "min": 1, "max": 10000}), + "schedule": ("SCHEDULE",), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "schedule_format": (["CR", "Deforum"],), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("IMAGE", ) + FUNCTION = "get_image" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def get_image(self, image, camera_schedule, current_frame, max_frames, schedule_format, ): + + image_out = image1 + + return (image_out,) + +#-----------------------------------------------------------------------------------------------------------# +class CR_ScheduleStyles: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "max_frames": ("INT", {"default": 120, "min": 1, "max": 10000}), + "schedule": ("SCHEDULE",), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "schedule_format": (["CR", "Deforum"],), + }, + "optional": {"style_list": ("STYLE_LIST",), + } + } + + RETURN_TYPES = ("STYLE", ) + RETURN_NAMES = ("STYLE", ) + FUNCTION = "schedule" + CATEGORY = icons.get("Comfyroll/Animation/Schedulers") + + def schedule(self, current_frame, max_frames, schedule, schedule_alias, schedule_format, style_list=None): + + #loop through tuple list in schedule + #expand tuples + #do something + #return output + + return (None,) + +#-----------------------------------------------------------------------------------------------------------# +# MAPPINGS +#-----------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 11 nodes +''' +NODE_CLASS_MAPPINGS = { + # Schedulers + "CR Simple Prompt Scheduler":CR_SimplePromptScheduler, + "CR Load Scheduled ControlNets":CR_LoadScheduledControlNets, + "CR Prompt Weight Scheduler":CR_PromptWeightScheduler, + "CR Schedule Camera Movements":CR_ScheduleCameraMovements, + "CR Schedule Styles":CR_ScheduleStyles, + "CR Schedule ControlNets":CR_ScheduleControlNets, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_workflow.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d4e3f4e9034d4849a55c35ab63d56e63a85daa --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_workflow.py @@ -0,0 +1,173 @@ +#---------------------------------------------------------------------------------------------------------------------# +# CR Animation Nodes by RockOfFire and Akatsuzi https://github.com/RockOfFire/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +class CR_JobList: + + @classmethod + def INPUT_TYPES(s): + job_types = ["Input", "Batch Process", "Output"] + return {"required":{ + "job_desc1": ("STRING", {"default": "job description", "multiline": True}), + "job_type1": (job_types,), + "job_alias1": ("STRING", {"default": "", "multiline": False}), + "job_desc2": ("STRING", {"default": "job description", "multiline": True}), + "job_type2": (job_types,), + "job_alias2": ("STRING", {"default": "", "multiline": False}), + "job_desc3": ("STRING", {"default": "job description", "multiline": True}), + "job_type3": (job_types,), + "job_alias3": ("STRING", {"default": "", "multiline": False}), + }, + "optional": {"job": ("JOB",), + } + } + + RETURN_TYPES = ("JOB", ) + RETURN_NAMES = ("JOB", ) + FUNCTION = "increment" + CATEGORY = icons.get("Comfyroll/Workflow") + + def increment(self, job_desc1, job_type1, job_alias1, job_desc2, job_type2, job_alias2, job_desc3, job_type3, job_alias3, job=None): + job = list() + return (job, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_JobScheduler: + + @classmethod + def INPUT_TYPES(s): + + status = ["Asleep", "Awake"] + + return {"required":{ + "schedule": ("SCHEDULE", ), + "index": ("INT", {"default": 1, "min": -10000, "max": 10000}), + "schedule_alias": ("STRING", {"default": "", "multiline": False}), + "status": (status,), + } + } + + RETURN_TYPES = ("JOB", "STRING", ) + RETURN_NAMES = ("JOB", "log", ) + FUNCTION = "listen" + CATEGORY = icons.get("Comfyroll/Workflow") + + def listen(listen, index, schedule, schedule_alias, status): + log = "" + return (log, ) +#---------------------------------------------------------------------------------------------------------------------# +class CR_JobCurrentFrame: + + @classmethod + def INPUT_TYPES(s): + + return {"required":{ + "index": ("INT", {"default": 1, "min": -10000, "max": 10000}), + "max_frames": ("INT", {"default": 1, "min": 0, "max": 10000}), + "print_to_console": ([ + "Yes", + "No"],), + } + } + + RETURN_TYPES = ("INT", "INT",) + RETURN_NAMES = ("current_frame", "max_frames",) + FUNCTION = "to_console" + CATEGORY = icons.get("Comfyroll/Workflow") + + def to_console(self, index, max_frames, print_to_console): + if print_to_console == "Yes": + print(f"[Info] CR Current Frame:{index}") + current_frame = index + + return (current_frame, max_frames, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CheckJobComplete: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "current_frame": ("INT", {"default": 1, "min": 0, "max": 10000}), + "max_frames": ("INT", {"default": 1, "min": 0, "max": 10000}), + } + } + + RETURN_TYPES = ("BOOL", ) + RETURN_NAMES = ("BOOL", ) + FUNCTION = "reset" + CATEGORY = icons.get("Comfyroll/Workflow") + + def reset(self, current_frame, max_frames): + + return (BOOL) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SpawnWorkflowInstance: + + @classmethod + def INPUT_TYPES(s): + + #mode = ["API"] + + return {"required":{ + #"mode": (mode,), + "job": ("JOB", ), + #"job_alias": ("STRING", {"default": "", "multiline": False}), + "workflow_path": ("STRING", {"multiline": False, "default": ""}), + "workflow_name": ("STRING", {"multiline": False, "default": ""}), + } + } + + RETURN_TYPES = () + RETURN_NAMES = () + OUTPUT_NODE = True + FUNCTION = "spawn" + CATEGORY = icons.get("Comfyroll/Workflow") + + def spawn(self, job, workflow_path, workflow_name): + + return () + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LoadWorkflow: + + @classmethod + def INPUT_TYPES(s): + + return {"required":{ + "workflow_path": ("STRING", {"multiline": False, "default": ""}), + "workflow_name": ("STRING", {"multiline": False, "default": ""}), + } + } + + RETURN_TYPES = ("WORKFLOW", ) + RETURN_NAMES = ("WORKFLOW", ) + FUNCTION = "workflow" + CATEGORY = icons.get("Comfyroll/Workflow") + + def spawn(self, mode, job, schedule, workflow): + workflow = "" + return (workflow, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 3 nodes +''' +NODE_CLASS_MAPPINGS = { + # Jobs + "CR Job List": CR_JobList, + "CR Job Scheduler": CR_JobScheduler, + "CR Job Current Frame": CR_JobCurrentFrame, + "CR Check Job Complete": CR_CheckJobComplete, + "CR Spawn Workflow Instance": CR_SpawnWorkflowInstance, + "CR Load Workflow": CR_LoadWorkflow, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_xygrid.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_xygrid.py new file mode 100644 index 0000000000000000000000000000000000000000..1c611868fd51acc25c0def0c6d582f156cf53d4d --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/dev_xygrid.py @@ -0,0 +1,398 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Nodes by RockOfFire and Akatsuzi https://github.com/RockOfFire/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import os +import folder_paths +from PIL import Image, ImageFont +import torch +import numpy as np +import re +from pathlib import Path +import typing as t +from dataclasses import dataclass +from ..nodes.xygrid_functions import create_images_grid_by_columns, Annotation +from ..categories import icons + +def tensor_to_pillow(image: t.Any) -> Image.Image: + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pillow_to_tensor(image: Image.Image) -> t.Any: + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +def find_highest_numeric_value(directory, filename_prefix): + highest_value = -1 # Initialize with a value lower than possible numeric values + + # Iterate through all files in the directory + for filename in os.listdir(directory): + if filename.startswith(filename_prefix): + try: + # Extract numeric part of the filename + numeric_part = filename[len(filename_prefix):] + numeric_str = re.search(r'\d+', numeric_part).group() + numeric_value = int(numeric_str) + # Check if the current numeric value is higher than the highest found so far + if numeric_value > highest_value: + highest_value = int(numeric_value) + except ValueError: + # If the numeric part is not a valid integer, ignore the file + continue + + return highest_value + +#---------------------------------------------------------------------------------------------------------------------# +# NODES +#---------------------------------------------------------------------------------------------------------------------# +# These nodes are based on https://github.com/LEv145/images-grid-comfy-plugin +#--------------------------------------------------------------------------------------------------------------------- +class CR_XYZList: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "list1": ("STRING", {"multiline": True, "default": "x"}), + "x_prepend": ("STRING", {"multiline": False, "default": ""}), + "x_append": ("STRING", {"multiline": False, "default": ""}), + "x_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "list2": ("STRING", {"multiline": True, "default": "y"}), + "y_prepend": ("STRING", {"multiline": False, "default": ""}), + "y_append": ("STRING", {"multiline": False, "default": ""}), + "y_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "list3": ("STRING", {"multiline": True, "default": "z"}), + "z_prepend": ("STRING", {"multiline": False, "default": ""}), + "z_append": ("STRING", {"multiline": False, "default": ""}), + "z_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + } + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "BOOLEAN",) + RETURN_NAMES = ("X", "Y", "Z", "x_annotation", "y_annotation", "z_annotation", "trigger",) + FUNCTION = "cross_join" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def cross_join(self, list1, list2, list3, + x_prepend, x_append, x_annotation_prepend, + y_prepend, y_append, y_annotation_prepend, + z_prepend, z_append, z_annotation_prepend, index): + + # Index values for all XY nodes start from 1 + index -=1 + trigger = False + + listx = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', list1) + listy = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', list2) + listz = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', list3) + + listx = [item.strip() for item in listx] + listy = [item.strip() for item in listy] + listz = [item.strip() for item in listz] + + lenx = len(listx) + leny = len(listy) + lenz = len(listz) + + sheet_size = lenx * leny + grid_size = lenx * leny * lenz + + x = index % lenx + y = int(index / lenx) % leny + z = int(index / sheet_size) + + x_out = x_prepend + listx[x] + x_append + y_out = y_prepend + listy[y] + y_append + z_out = z_prepend + listz[z] + z_append + + x_ann_out = "" + y_ann_out = "" + z_ann_out = "" + + if index + 1 == grid_size: + x_ann_out = [x_annotation_prepend + item + ";" for item in listx] + y_ann_out = [y_annotation_prepend + item + ";" for item in listy] + z_ann_out = [z_annotation_prepend + item + ";" for item in listz] + x_ann_out = "".join([str(item) for item in x_ann_out]) + y_ann_out = "".join([str(item) for item in y_ann_out]) + z_ann_out = "".join([str(item) for item in z_ann_out]) + trigger = True + + return (x_out, y_out, z_out, x_ann_out, y_ann_out, z_ann_out, trigger, ) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_XYZInterpolate: + + @classmethod + def INPUT_TYPES(s): + gradient_profiles = ["Lerp"] + + return {"required": {"x_columns":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "x_start_value": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "x_step": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "x_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "y_rows":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "y_start_value": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "y_step": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "y_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "z_sheets":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "z_start_value": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "z_step": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "z_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "gradient_profile": (gradient_profiles,) + } + } + + RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT", "STRING", "STRING", "STRING", "BOOLEAN", ) + RETURN_NAMES = ("X", "Y", "Z", "x_annotation", "y_annotation", "z_annotation", "trigger", ) + FUNCTION = "gradient" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def gradient(self, x_columns, x_start_value, x_step, x_annotation_prepend, + y_rows, y_start_value, y_step, y_annotation_prepend, + z_rows, z_start_value, z_step, z_annotation_prepend, + index, gradient_profile): + + # Index values for all XY nodes start from 1 + index -=1 + trigger = False + sheet_size = x_columns * y_rows + grid_size = x_columns * y_rows * z_sheets + + x = index % x_columns + y = int(index / x_columns) % y_rows + z = int(index / sheet_size) + + x_float_out = round(x_start_value + x * x_step, 3) + y_float_out = round(y_start_value + y * y_step, 3) + z_float_out = round(z_start_value + z * z_step, 3) + + x_ann_out = "" + y_ann_out = "" + z_ann_out = "" + + if index + 1 == grid_size: + for i in range(0, x_columns): + x = index % x_columns + x_float_out = x_start_value + i * x_step + x_float_out = round(x_float_out, 3) + x_ann_out = x_ann_out + x_annotation_prepend + str(x_float_out) + "; " + for j in range(0, y_rows): + y = int(index / x_columns) + y_float_out = y_start_value + j * y_step + y_float_out = round(y_float_out, 3) + y_ann_out = y_ann_out + y_annotation_prepend + str(y_float_out) + "; " + for k in range(0, z_sheets): + z = int(index / x_columns) + z_float_out = z_start_value + k * z_step + z_float_out = round(z_float_out, 3) + z_ann_out = z_ann_out + z_annotation_prepend + str(z_float_out) + "; " + + x_ann_out = x_ann_out[:-1] + y_ann_out = y_ann_out[:-1] + z_ann_out = z_ann_out[:-1] + print(x_ann_out,y_ann_out,z_ann_out) + trigger = True + + return (x_float_out, y_float_out, z_float_out, x_ann_out, y_ann_out, z_ann_out, trigger) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_XYZIndex: + + @classmethod + def INPUT_TYPES(s): + gradient_profiles = ["Lerp"] + + return {"required": {"x_columns":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "y_rows":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "z_sheets":("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + } + } + + RETURN_TYPES = ("INT", "INT", "INT",) + RETURN_NAMES = ("x", "y", "z",) + FUNCTION = "index" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def index(self, x_columns, y_rows, z_sheets, index): + + # Index values for all XY nodes start from 1 + index -=1 + sheet_size = x_columns * y_rows + + x = index % x_columns + y = int(index / x_columns) % y_rows + z = int(index / sheet_size) + #print (x,y,z) + + return (x, y, z) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LoadXYAnnotationFromFile: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "input_file_path": ("STRING", {"multiline": False, "default": ""}), + "file_name": ("STRING", {"multiline": False, "default": ""}), + "file_extension": (["txt", "csv"],), + } + } + + RETURN_TYPES = ("GRID_ANNOTATION", "STRING", ) + RETURN_NAMES = ("GRID_ANNOTATION", "show_text", ) + FUNCTION = "load" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def load(self, input_file_path, file_name, file_extension): + filepath = input_file_path + "\\" + file_name + "." + file_extension + print(f"CR_Load Schedule From File: Loading {filepath}") + + lists = [] + + if file_extension == "csv": + with open(filepath, "r") as csv_file: + reader = csv.reader(csv_file) + + for row in reader: + lists.append(row) + + else: + with open(filepath, "r") as txt_file: + for row in txt_file: + parts = row.strip().split(",", 1) + + if len(parts) >= 2: + second_part = parts[1].strip('"') + lists.append([parts[0], second_part]) + + print(lists) + return(lists,str(lists),) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_XYGrid: + + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "images": ("IMAGE",), + "gap": ("INT", {"default": 0, "min": 0}), + "max_columns": ("INT", {"default": 1, "min": 1, "max": 10000}), + }, + "optional": { + "annotation": ("GRID_ANNOTATION",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "create_image" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def create_image(self, images, gap, max_columns, annotation=None): + + pillow_images = [tensor_to_pillow(i) for i in images] + pillow_grid = create_images_grid_by_columns( + images=pillow_images, + gap=gap, + annotation=annotation, + max_columns=max_columns, + ) + tensor_grid = pillow_to_tensor(pillow_grid) + + return (tensor_grid,) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_XYSaveGridImage: +# originally based on SaveImageSequence by mtb + + def __init__(self): + self.type = "output" + + @classmethod + def INPUT_TYPES(cls): + + output_dir = folder_paths.output_directory + output_folders = [name for name in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir,name))] + + return { + "required": {"mode": (["Save", "Preview"],), + "output_folder": (sorted(output_folders), ), + "image": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "CR"}), + "file_format": (["webp", "jpg", "png", "tif"],), + }, + "optional": {"output_path": ("STRING", {"default": '', "multiline": False}), + "trigger": ("BOOLEAN", {"default": False},), + } + } + + RETURN_TYPES = () + FUNCTION = "save_image" + OUTPUT_NODE = True + CATEGORY = icons.get("Comfyroll/XY Grid") + + def save_image(self, mode, output_folder, image, file_format, output_path='', filename_prefix="CR", trigger=False): + + if trigger == False: + return () + + output_dir = folder_paths.get_output_directory() + out_folder = os.path.join(output_dir, output_folder) + + # Set the output path + if output_path != '': + if not os.path.exists(output_path): + print(f"[Warning] CR Save XY Grid Image: The input_path `{output_path}` does not exist") + return ("",) + out_path = output_path + else: + out_path = os.path.join(output_dir, out_folder) + + if mode == "Preview": + out_path = folder_paths.temp_directory + + print(f"[Info] CR Save XY Grid Image: Output path is `{out_path}`") + + # Set the counter + counter = find_highest_numeric_value(out_path, filename_prefix) + 1 + #print(f"[Debug] counter {counter}") + + # Output image + output_image = image[0].cpu().numpy() + img = Image.fromarray(np.clip(output_image * 255.0, 0, 255).astype(np.uint8)) + + output_filename = f"{filename_prefix}_{counter:05}" + img_params = {'png': {'compress_level': 4}, + 'webp': {'method': 6, 'lossless': False, 'quality': 80}, + 'jpg': {'format': 'JPEG'}, + 'tif': {'format': 'TIFF'} + } + self.type = "output" if mode == "Save" else 'temp' + + resolved_image_path = os.path.join(out_path, f"{output_filename}.{file_format}") + img.save(resolved_image_path, **img_params[file_format]) + print(f"[Info] CR Save XY Grid Image: Saved to {output_filename}.{file_format}") + out_filename = f"{output_filename}.{file_format}" + preview = {"ui": {"images": [{"filename": out_filename,"subfolder": out_path,"type": self.type,}]}} + + return preview + +#--------------------------------------------------------------------------------------------------------------------- +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 0 nodes released +''' +NODE_CLASS_MAPPINGS = { + # XY Grid + "CR XYZ List":CR_XYZList, + "CR XYZ Index":CR_XYZIndex, + "CR XYZ Interpolate":CR_XYZInterpolate, + "CR Load XY Annotation From File":CR_LoadXYAnnotationFromFile, + "CR XY Grid":CR_XYGrid, +} +''' + + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/graphics_dev_nodes.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/graphics_dev_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d9e04f48655d6c1979abdb3afdb97207548c5e --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/dev_nodes/graphics_dev_nodes.py @@ -0,0 +1,675 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/RockOfFire/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import numpy as np +import torch +import os +from PIL import Image, ImageDraw, ImageOps, ImageFont +from ..categories import icons +from ..config import color_mapping, COLORS +from ..nodes.graphics_functions import (hex_to_rgb, + get_color_values, + text_panel, + combine_images, + apply_outline_and_border, + get_font_size, + draw_text_on_image, + crop_and_resize_image) + +font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") +file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + +#try: +# import Markdown +#except ImportError: +# import pip +# pip.main(['install', 'Markdown']) + +#---------------------------------------------------------------------------------------------------------------------# + +ALIGN_OPTIONS = ["top", "center", "bottom"] +ROTATE_OPTIONS = ["text center", "image center"] +JUSTIFY_OPTIONS = ["left", "center", "right"] +PERSPECTIVE_OPTIONS = ["top", "bottom", "left", "right"] + +#---------------------------------------------------------------------------------------------------------------------# + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_MultiPanelMemeTemplate: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + templates = ["vertical - 2 image + 2 text", + "vertical - 3 image + 3 text", + "vertical - 4 image + 4 text", + "horizontal - 2 image + 2 text", + "horizontal - text bar + 2 image", + "text bar + 1 image with overlay text", + "text bar + 4 image", + "text bar + 4 image with overlay text"] + colors = COLORS[1:] + + return {"required": { + "template": (templates,), + "image_1": ("IMAGE",), + "text_1": ("STRING", {"multiline": True, "default": "text_1"}), + "text_2": ("STRING", {"multiline": True, "default": "text_2"}), + "text_3": ("STRING", {"multiline": True, "default": "text_3"}), + "text_4": ("STRING", {"multiline": True, "default": "text_4"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "font_color": (colors,), + "bar_color": (colors,), + "reverse_panels": (["No", "Yes"],), + }, + "optional": { + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "image_4": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "draw_text" + CATEGORY = icons.get("Comfyroll/Graphics/Template") + + def draw_text(self, template, image_1, text_1, text_2, text_3, text_4, + font_name, font_size, font_color, bar_color, reverse_panels, image_2 = None, image_3 = None, image_4 = None): + + show_help = "example help text" + + # Convert the PIL image back to a torch tensor + return image_1, show_help, + +#---------------------------------------------------------------------------------------------------------------------# +class CR_PopularMemeTemplates: + + @classmethod + def INPUT_TYPES(s): + + templates = ["Expanding brain", + "My honest reaction", + "The GF I want", + "Who would win?", + "I have 4 sides", + "This is Fine", + "Is This a Pigeon?", + "Drake hotline bling"] + colors = COLORS[1:] + + return {"required": { + "meme": (templates,), + "image_1": ("IMAGE",), + + "text_1": ("STRING", {"multiline": True, "default": "text_1"}), + "text_2": ("STRING", {"multiline": True, "default": "text_2"}), + "text_3": ("STRING", {"multiline": True, "default": "text_3"}), + "text_4": ("STRING", {"multiline": True, "default": "text_4"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "font_color": (colors,), + }, + "optional": { + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "image_4": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "draw_text" + CATEGORY = icons.get("Comfyroll/Graphics/Template") + + def draw_text(self, meme, image_1, text_1, text_2, text_3, text_4, + font_name, font_size, font_color, image_2 = None, image_3 = None, image_4 = None): + + show_help = "example help text" + + # Convert the PIL image back to a torch tensor + return image_1, show_help, + +#---------------------------------------------------------------------------------------------------------------------# +class CR_DrawPerspectiveText: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "font_color": (COLORS,), + "background_color": (COLORS,), + "align": (ALIGN_OPTIONS,), + "justify": (JUSTIFY_OPTIONS,), + "margins": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "line_spacing": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "position_x": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "position_y": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "perspective_factor": ("FLOAT", {"default": 0.00, "min": 0.00, "max": 1.00, "step": 0.01}), + "perspective_direction": (PERSPECTIVE_OPTIONS,), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "draw_text" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def draw_text(self, image_width, image_height, text, + font_name, font_size, font_color, background_color, + margins, line_spacing, + position_x, position_y, + align, justify, + perspective_factor, perspective_direction, + font_color_hex='#000000', bg_color_hex='#000000'): + + # Get RGB values for the text and background colors + text_color = get_color_values(font_color, font_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + # Create PIL images for the text and background layers and text mask + size = (image_width, image_height) + text_image = Image.new('RGB', size, text_color) + back_image = Image.new('RGB', size, bg_color) + text_mask = Image.new('L', back_image.size) + + # Draw the text on the text mask + text_mask = draw_masked_text_v2(text_mask, text, font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + perspective_factor, perspective_direction) + + # Composite the text image onto the background image using the rotated text mask + image_out = Image.composite(text_image, back_image, text_mask) + preview_out = text_mask + + show_help = "example help text" + + # Convert the PIL image back to a torch tensor + return pil2tensor(image_out), pil2tensor(preview_out), show_help, + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleAnnotations: + + @classmethod + def INPUT_TYPES(s): + + bar_opts = ["top", "bottom", "top and bottom", "no bars"] + + return {"required": { + "image": ("IMAGE",), + "text_top": ("STRING", {"multiline": True, "default": "text_top"}), + "text_bottom": ("STRING", {"multiline": True, "default": "text_bottom"}), + "font_name": (file_list,), + "max_font_size": ("INT", {"default": 100, "min": 50, "max": 150}), + "font_color": (COLORS,), + "bar_color": (COLORS,), + "bar_options": (bar_opts,), + "bar_scaling_factor": ("FLOAT", {"default": 0.2, "min": 0.1, "max": 2, "step": 0.1}), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bar_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_meme" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def make_meme(self, image, + text_top, text_bottom, + font_name, max_font_size, + font_color, bar_color, bar_options, bar_scaling_factor, + font_color_hex='#000000', + bar_color_hex='#000000'): + + text_color = get_color_values(font_color, font_color_hex, color_mapping) + bar_color = get_color_values(bar_color, bar_color_hex, color_mapping) + + # Convert tensor images + image_3d = image[0, :, :, :] + + # Calculate the height factor + if bar_options == "top": + height_factor = 1 + bar_scaling_factor + elif bar_options == "bottom": + height_factor = 1 + bar_scaling_factor + elif bar_options == "top and bottom": + height_factor = 1 + 2 * bar_scaling_factor + else: + height_factor = 1.0 + + # Create PIL images for the image and text bars + back_image = tensor2pil(image_3d) + size = back_image.width, int(back_image.height * height_factor) + result_image = Image.new("RGB", size) + + # Define font settings + font_file = "fonts\\" + str(font_name) + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), font_file) + + # Create the drawing context + draw = ImageDraw.Draw(result_image) + + # Create two color bars at the top and bottom + bar_width = back_image.width + bar_height = back_image.height // 5 ### add parameter for this in adv node + top_bar = Image.new("RGB", (bar_width, bar_height), bar_color) + bottom_bar = Image.new("RGB", (bar_width, bar_height), bar_color) + + # Composite the result image onto the input image + if bar_options == "top" or bar_options == "top and bottom": + image_out = result_image.paste(back_image, (0, bar_height)) + else: + image_out = result_image.paste(back_image, (0, 0)) + + # Get the font size and draw the text + if bar_options == "top" or bar_options == "top and bottom": + result_image.paste(top_bar, (0, 0)) + font_top = get_font_size(draw, text_top, bar_width, bar_height, resolved_font_path, max_font_size) + draw_text_on_image(draw, 0, bar_width, bar_height, text_top, font_top, text_color, "No") + + if bar_options == "bottom" or bar_options == "top and bottom": + result_image.paste(bottom_bar, (0, (result_image.height - bar_height))) + font_bottom = get_font_size(draw, text_bottom, bar_width, bar_height, resolved_font_path, max_font_size) + if bar_options == "bottom": + y_position = back_image.height + else: + y_position = bar_height + back_image.height + draw_text_on_image(draw, y_position, bar_width, bar_height, text_bottom, font_bottom, text_color, "No") + + # Overlay text on image + if bar_options == "bottom" and text_top > "": + font_top = get_font_size(draw, text_top, bar_width, bar_height, resolved_font_path, max_font_size) + draw_text_on_image(draw, 0, bar_width, bar_height, text_top, font_top, text_color, "No") + + if (bar_options == "top" or bar_options == "none") and text_bottom > "": + font_bottom = get_font_size(draw, text_bottom, bar_width, bar_height, resolved_font_path, max_font_size) + y_position = back_image.height + draw_text_on_image(draw, y_position, bar_width, bar_height, text_bottom, font_bottom, text_color, "No") + + if bar_options == "none" and text_bottom > "": + font_bottom = get_font_size(draw, text_bottom, bar_width, bar_height, resolved_font_path, max_font_size) + y_position = back_image.height - bar_height + draw_text_on_image(draw, y_position, bar_width, bar_height, text_bottom, font_bottom, text_color, "No") + + show_help = "example help text" + + image_out = np.array(result_image).astype(np.float32) / 255.0 + image_out = torch.from_numpy(image_out).unsqueeze(0) + + # Convert the PIL image back to a torch tensor + #return (pil2tensor(image_out), show_help, ) + return (image_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ApplyAnnotations: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + bar_opts = ["no bars", "top", "bottom", "top and bottom"] + + return {"required": { + "image": ("IMAGE", ), + "annotation_stack": ("ANNOTATION_STACK", ), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "apply_annotations" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def apply_annotations(self, image, annotation_stack): + + show_help = "example help text" + + return (image_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_AddAnnotation: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + bar_opts = ["no bars", "top", "bottom", "top and bottom"] + + return {"required": { + "text": ("STRING", {"multiline": True, "default": "text_top"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 100, "min": 20, "max": 150}), + "font_color": (COLORS,), + "position_x": ("INT", {"default": 0, "min": 0, "max": 4096}), + "position_y": ("INT", {"default": 0, "min": 0, "max": 4096}), + "justify": (JUSTIFY_OPTIONS,), + }, + "optional": { + "annotation_stack": ("ANNOTATION_STACK",), + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("ANNOTATION_STACK", "STRING", ) + RETURN_NAMES = ("ANNOTATION_STACK", "show_help", ) + FUNCTION = "add_annotation" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def add_annotation(self, image, + font_name, font_size, font_color, + position_x, position_y, justify, + annotation_stack=None, font_color_hex='#000000'): + + show_help = "example help text" + + return (annotation_stack, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleImageWatermark: + + @classmethod + def INPUT_TYPES(cls): + + ALIGN_OPTIONS = ["center", "top left", "top center", "top right", "bottom left", "bottom center", "bottom right"] + + return {"required": { + "image": ("IMAGE",), + "watermark_image": ("IMAGE",), + "watermark_scale": ("FLOAT", {"default": 1, "min": 0.1, "max": 5.00, "step": 0.01}), + "opacity": ("FLOAT", {"default": 0.30, "min": 0.00, "max": 1.00, "step": 0.01}), + "align": (ALIGN_OPTIONS,), + "x_margin": ("INT", {"default": 20, "min": -1024, "max": 1024}), + "y_margin": ("INT", {"default": 20, "min": -1024, "max": 1024}), + } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "overlay_image" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def overlay_image(self, image, watermark_image, watermark_scale, opacity, align, x_margin, y_margin): + + # Create PIL images for the background layer + image = tensor2pil(image) + watermark_image = tensor2pil(watermark_image) + + # Open images using Pillow + image = image.convert("RGBA") + watermark = watermark_image.convert("RGBA") + + # Resize watermark if needed + watermark = watermark.resize(image.size) + + # Create a transparent layer for the watermark + watermark_layer = Image.new("RGBA", image.size, (0, 0, 0, 0)) + draw = ImageDraw.Draw(watermark_layer) + + # Calculate the position to place the watermark based on the alignment + if align == 'center': + watermark_pos = ((image.width - watermark.width) // 2, (image.height - watermark.height) // 2) + elif align == 'top left': + watermark_pos = (x_margin, y_margin) + elif align == 'top center': + watermark_pos = ((image.width - watermark.width) // 2, y_margin) + elif align == 'top right': + watermark_pos = (image.width - watermark.width - x_margin, y_margin) + elif align == 'bottom left': + watermark_pos = (x_margin, image.height - watermark.height - y_margin) + elif align == 'bottom center': + watermark_pos = ((image.width - watermark.width) // 2, image.height - watermark.height - y_margin) + elif align == 'bottom right': + watermark_pos = (image.width - watermark.width - x_margin, image.height - watermark.height - y_margin) + + # Paste the watermark onto the transparent layer + #watermark_layer.paste(watermark, watermark_pos, watermark) + + # Blend the images using the specified opacity + #image = Image.alpha_composite(image, watermark_layer) + + # Adjust the opacity of the watermark layer if needed + if opacity != 1: + watermark_layer = reduce_opacity(watermark_layer, opacity) + + # Composite the text layer on top of the original image + image_out = Image.composite(watermark_layer, image, watermark_layer) + + # Convert the PIL image back to a torch tensor + return pil2tensor(image_out) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ComicPanelTemplatesAdvanced: + + @classmethod + def INPUT_TYPES(s): + + directions = ["left to right", "right to left"] + + templates = ["custom", + "G22", "G33", + "H2", "H3", + "H12", "H13", + "H21", "H23", + "H31", "H32", + "V2", "V3", + "V12", "V13", + "V21", "V23", + "V31", "V32"] + + return {"required": { + "page_width": ("INT", {"default": 512, "min": 8, "max": 4096}), + "page_height": ("INT", {"default": 512, "min": 8, "max": 4096}), + "template": (templates,), + "reading_direction": (directions,), + "border_thickness": ("INT", {"default": 5, "min": 0, "max": 1024}), + "outline_thickness": ("INT", {"default": 2, "min": 0, "max": 1024}), + "outline_color": (COLORS,), + "panel_color": (COLORS,), + "background_color": (COLORS,), + }, + "optional": { + "images1": ("IMAGE",), + "images2": ("IMAGE",), + "images3": ("IMAGE",), + "images4": ("IMAGE",), + "custom_panel_layout": ("STRING", {"multiline": False, "default": "H123"}), + "outline_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "panel_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "layout" + CATEGORY = icons.get("Comfyroll/Graphics/Template") + + def layout(self, page_width, page_height, template, reading_direction, + border_thickness, outline_thickness, + outline_color, panel_color, background_color, + images1=None, images2=None, images3=None, images4=None, custom_panel_layout='G44', + outline_color_hex='#000000', panel_color_hex='#000000', bg_color_hex='#000000'): + + + + panels = [] + k = 0 + batches = 0 + + # Convert tensor images to PIL + if images1 is not None: + images1 = [tensor2pil(image) for image in images1] + len_images1 = len(images1) + batches+=1 + + if images2 is not None: + images2 = [tensor2pil(image) for image in images2] + len_images2 = len(images2) + batches+=1 + + if images3 is not None: + images3 = [tensor2pil(image) for image in images3] + len_images3 = len(images3) + batches+=1 + + if images4 is not None: + images4 = [tensor2pil(image) for image in images4] + len_images4 = len(images4) + batches+=1 + + # Get RGB values for the text and background colors + outline_color = get_color_values(outline_color, outline_color_hex, color_mapping) + panel_color = get_color_values(panel_color, panel_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + # Create page and apply bg color + size = (page_width - (2 * border_thickness), page_height - (2 * border_thickness)) + page = Image.new('RGB', size, bg_color) + draw = ImageDraw.Draw(page) + + if template == "custom": + template = custom_panel_layout + + # Calculate panel positions and add to bg image + first_char = template[0] + if first_char == "G": + rows = int(template[1]) + columns = int(template[2]) + panel_width = (page.width - (2 * columns * (border_thickness + outline_thickness))) // columns + panel_height = (page.height - (2 * rows * (border_thickness + outline_thickness))) // rows + #Batch Loop + #for b in range(batches): + # Row loop + for i in range(rows): + # Column Loop + for j in range(columns): + # Draw the panel + create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page.width, + panel_color, bg_color, outline_color, + images1, i, j, k, len_images1, reading_direction) + k += 1 + + elif first_char == "H": + rows = len(template) - 1 + panel_height = (page.height - (2 * rows * (border_thickness + outline_thickness))) // rows + #Batch Loop + #for b in range(batches): + # Row loop + for i in range(rows): + columns = int(template[i+1]) + panel_width = (page.width - (2 * columns * (border_thickness + outline_thickness))) // columns + # Column Loop + for j in range(columns): + # Draw the panel + create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page.width, + panel_color, bg_color, outline_color, + images1, i, j, k, len_images1, reading_direction) + k += 1 + + elif first_char == "V": + columns = len(template) - 1 + panel_width = (page.width - (2 * columns * (border_thickness + outline_thickness))) // columns + #Batch Loop + #for b in range(batches): + # Column Loop + for j in range(columns): + rows = int(template[j+1]) + panel_height = (page.height - (2 * rows * (border_thickness + outline_thickness))) // rows + # Row loop + for i in range(rows): + # Draw the panel + create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page.width, + panel_color, bg_color, outline_color, + images1, i, j, k, len_images1, reading_direction) + k += 1 + + # Add a border to the page + if border_thickness > 0: + page = ImageOps.expand(page, border_thickness, bg_color) + + show_help = "example help text" + + return (pil2tensor(page), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +''' +class CR_ASCIIPattern: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("multiline_text", "show_help", ) + FUNCTION = "draw_pattern" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw_pattern(self, image): + + pixel_ascii_map = "`^\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$" + + im = image + x = list(im.getdata()) + for pixel in iter(x): + x = sum(pixel) // 3 # integer division + x = (x * len(pixel_ascii_map)) // 255 # rescaling + ascii_val = pixel_ascii_map[x] + + text_out = ascii_val + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-ascii-pattern" + + # Convert the PIL image back to a torch tensor + return (text_out, show_help, ) +''' +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Multi-Panel Meme Template": CR_MultiPanelMemeTemplate, + "CR Popular Meme Templates": CR_PopularMemeTemplates, + "CR Draw Perspective Text": CR_DrawPerspectiveText, + "CR Simple Annotations": CR_SimpleAnnotations, + "CR Apply Annotations": CR_ApplyAnnotations, + "CR Add Annotation": CR_AddAnnotation, + "CR Simple Image Watermark": CR_SimpleImageWatermark, + "CR Comic Panel Templates Advanced": CR_ComicPanelTemplatesAdvanced, + "CR ASCII Pattern": CR_ASCIIPattern, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/AlumniSansCollegiateOne-Regular.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/AlumniSansCollegiateOne-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..047451fa81fd6faaa8365ef45ddab3459b7ad655 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/AlumniSansCollegiateOne-Regular.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/NotoSansArabic-Regular.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/NotoSansArabic-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..79359c460b13e94945a8700227dabdc790091c0e Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/NotoSansArabic-Regular.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Oswald-Bold.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Oswald-Bold.ttf new file mode 100644 index 0000000000000000000000000000000000000000..b9c6e3767357f4bf5016a21dc0b6015d08be3c99 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Oswald-Bold.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/PixelifySans-Bold.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/PixelifySans-Bold.ttf new file mode 100644 index 0000000000000000000000000000000000000000..4d8aeb670dc228fe48a6e90366bae9f0d6930f4c Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/PixelifySans-Bold.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Quicksand-Bold.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Quicksand-Bold.ttf new file mode 100644 index 0000000000000000000000000000000000000000..07d5127c04b17a9a62121d12aeb00b701e920500 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Quicksand-Bold.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Roboto-Regular.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Roboto-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..3033308a695ab4efa60441c23f2c18aaa94c568f Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/Roboto-Regular.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/YoungSerif-Regular.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/YoungSerif-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..f454fbedd4c8b00833d17fffa1f89acb5149b02c Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/YoungSerif-Regular.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/comic.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/comic.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2d8e9ca9ce1216331f7d665090eb5678bacdd614 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/comic.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/impact.ttf b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/impact.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2675688cd0e23bec6295d5d218b8be276705468c Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/fonts/impact.ttf differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/images/CR Animation drop 10.JPG b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/images/CR Animation drop 10.JPG new file mode 100644 index 0000000000000000000000000000000000000000..fd7be15ec99c2a01d0dc3efce5a7de30ec36391c Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/images/CR Animation drop 10.JPG differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/images/Gradient Nodes 2.JPG b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/images/Gradient Nodes 2.JPG new file mode 100644 index 0000000000000000000000000000000000000000..fc0da59302128adf6020cc8a6bd841de7ae33a4e Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/images/Gradient Nodes 2.JPG differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/controlnet.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/controlnet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac3b373aa0c8ddd85856cb6f9aed998472729590 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/controlnet.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/conversion.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/conversion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4023b237f2f173550216175302293c0cd318dfdd Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/conversion.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/graphics_functions.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/graphics_functions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6721ca7fdb98fc564b05c03e73701328a7e9bc17 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/graphics_functions.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/index.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/index.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caa1245c2406fec9b47cec8c65edf33a17f010ca Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/index.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/legacy_nodes.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/legacy_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b67cf015734942d8be8a751ced532b60421c10a Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/legacy_nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/logic.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/logic.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b80abea3dc9ebe024dcd426ced5f36eac1a3cb1b Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/logic.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/lora.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/lora.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69f5ac6fee3fd99aa59fb8a183d39d61f0ebcc72 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/lora.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/matplot.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/matplot.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46d04c63e7c6c14dfb925066b2ab602cfc6a16c0 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/matplot.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/model_merge.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/model_merge.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def69b99d3089eecf774ab39912427cd7dfe6346 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/model_merge.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/nodes.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aece1cef55881e2d9d1d196e865567e7548c2970 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/nodes_random.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/nodes_random.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0fa751d6fb84bc1dea853da0ef025cbe1454457 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/nodes_random.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_filter.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_filter.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df86996ff5af051215192ac34f03bbf9e67ffd8f Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_filter.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_layout.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_layout.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766e4d348a5236c6b1110b88a641ac3ee5ea5ed9 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_layout.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_pattern.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_pattern.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23a142ae2452d08d3f8f78b123399e2af56e8926 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_pattern.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_template.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_template.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae5f6a782295b4160d2c8560c9af1aaf393a8c7f Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_template.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_text.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_text.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..722798ee2dbeba9bb80e8b5cccce788e9b3c5af6 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pil_text.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pipe.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pipe.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b720015d1adb7f48592bc340fdae0d5e1ff162d Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/pipe.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/sdxl.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/sdxl.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac31dfc99feee5dd693d605c639e3cdeeabd335f Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/sdxl.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/upscale.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/upscale.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e809b3fa817f308b464dc3d1a01edd93afc9c13 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/upscale.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/upscale_functions.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/upscale_functions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55bc5fcd9c1fe0b427061c6a5bfee4c5482173d1 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/upscale_functions.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/xygrid.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/xygrid.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b35bd93d4ed2b6578b3f389eda1a913c9c9ba3d Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/xygrid.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/xygrid_functions.cpython-311.pyc b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/xygrid_functions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97a8868b268a422fca4bbeb548c359d549a12310 Binary files /dev/null and b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/__pycache__/xygrid_functions.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/controlnet.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e0bf86bf39c3b931572362042805ea0268853914 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/controlnet.py @@ -0,0 +1,174 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes # +# for ComfyUI https://github.com/comfyanonymous/ComfyUI # +#---------------------------------------------------------------------------------------------------------------------# + +import os +import sys +import comfy.controlnet +import comfy.sd +import folder_paths +from nodes import ControlNetApplyAdvanced +from ..categories import icons + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) + +#---------------------------------------------------------------------------------------------------------------------# +# This node will apply any type of ControlNet. +class CR_ApplyControlNet: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "control_net": ("CONTROL_NET", ), + "image": ("IMAGE", ), + "switch": ([ + "On", + "Off"],), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}) + }} + RETURN_TYPES = ("CONDITIONING", "STRING", ) + RETURN_NAMES = ("CONDITIONING", "show_help", ) + FUNCTION = "apply_controlnet" + + CATEGORY = icons.get("Comfyroll/ControlNet") + + def apply_controlnet(self, conditioning, control_net, image, switch, strength): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/ControlNet-Nodes#cr-apply-controlnet" + if strength == 0 or switch == "Off": + return (conditioning, show_help, ) + + c = [] + control_hint = image.movedim(-1,1) + for t in conditioning: + n = [t[0], t[1].copy()] + c_net = control_net.copy().set_cond_hint(control_hint, strength) + if 'control' in t[1]: + c_net.set_previous_controlnet(t[1]['control']) + n[1]['control'] = c_net + c.append(n) + return (c, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# This node is a stack of controlnets each with their own switch. +class CR_ControlNetStack: + + controlnets = ["None"] + folder_paths.get_filename_list("controlnet") + + @classmethod + def INPUT_TYPES(cls): + #controlnets = ["None"] + return {"required": { + }, + "optional": { + "switch_1": (["Off","On"],), + "controlnet_1": (cls.controlnets,), + "controlnet_strength_1": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "start_percent_1": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent_1": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + # + "switch_2": (["Off","On"],), + "controlnet_2": (cls.controlnets,), + "controlnet_strength_2": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "start_percent_2": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent_2": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + # + "switch_3": (["Off","On"],), + "controlnet_3": (cls.controlnets,), + "controlnet_strength_3": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "start_percent_3": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent_3": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "image_1": ("IMAGE",), + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "controlnet_stack": ("CONTROL_NET_STACK",) + }, + } + + RETURN_TYPES = ("CONTROL_NET_STACK", "STRING", ) + RETURN_NAMES = ("CONTROLNET_STACK", "show_help", ) + FUNCTION = "controlnet_stacker" + CATEGORY = icons.get("Comfyroll/ControlNet") + + def controlnet_stacker(self, switch_1, controlnet_1, controlnet_strength_1, start_percent_1, end_percent_1, + switch_2, controlnet_2, controlnet_strength_2, start_percent_2, end_percent_2, + switch_3, controlnet_3, controlnet_strength_3, start_percent_3, end_percent_3, + image_1=None, image_2=None, image_3=None, controlnet_stack=None): + + # Initialise the list + controlnet_list= [] + + if controlnet_stack is not None: + controlnet_list.extend([l for l in controlnet_stack if l[0] != "None"]) + + if controlnet_1 != "None" and switch_1 == "On" and image_1 is not None: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_1) + controlnet_1 = comfy.controlnet.load_controlnet(controlnet_path) + controlnet_list.extend([(controlnet_1, image_1, controlnet_strength_1, start_percent_1, end_percent_1)]), + + if controlnet_2 != "None" and switch_2 == "On" and image_2 is not None: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_2) + controlnet_2 = comfy.controlnet.load_controlnet(controlnet_path) + controlnet_list.extend([(controlnet_2, image_2, controlnet_strength_2, start_percent_2, end_percent_2)]), + + if controlnet_3 != "None" and switch_3 == "On" and image_3 is not None: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_3) + controlnet_3 = comfy.controlnet.load_controlnet(controlnet_path) + controlnet_list.extend([(controlnet_3, image_3, controlnet_strength_3, start_percent_3, end_percent_3)]), + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/ControlNet-Nodes#cr-multi-controlnet-stack" + + return (controlnet_list, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# This applies the ControlNet stack. +class CR_ApplyControlNetStack: + @classmethod + def INPUT_TYPES(s): + return {"required": {"base_positive": ("CONDITIONING", ), + "base_negative": ("CONDITIONING",), + "switch": (["Off","On"],), + "controlnet_stack": ("CONTROL_NET_STACK", ), + } + } + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "STRING", ) + RETURN_NAMES = ("base_pos", "base_neg", "show_help", ) + FUNCTION = "apply_controlnet_stack" + CATEGORY = icons.get("Comfyroll/ControlNet") + + def apply_controlnet_stack(self, base_positive, base_negative, switch, controlnet_stack=None,): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/ControlNet-Nodes#cr-apply-multi-controlnet-stack" + + if switch == "Off": + return (base_positive, base_negative, show_help, ) + + if controlnet_stack is not None: + for controlnet_tuple in controlnet_stack: + controlnet_name, image, strength, start_percent, end_percent = controlnet_tuple + + if type(controlnet_name) == str: + controlnet_path = folder_paths.get_full_path("controlnet", controlnet_name) + controlnet = comfy.sd.load_controlnet(controlnet_path) + else: + controlnet = controlnet_name + + controlnet_conditioning = ControlNetApplyAdvanced().apply_controlnet(base_positive, base_negative, + controlnet, image, strength, + start_percent, end_percent) + + base_positive, base_negative = controlnet_conditioning[0], controlnet_conditioning[1] + + return (base_positive, base_negative, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Apply ControlNet": CR_ApplyControlNet, + "CR Multi-ControlNet Stack":CR_ControlNetStack, + "CR Apply Multi-ControlNet":CR_ApplyControlNetStack, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/conversion.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..1a997bfb34f1cc2c1907ad70579e3755c1c12f9a --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/conversion.py @@ -0,0 +1,183 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + +any = AnyType("*") + +#---------------------------------------------------------------------------------------------------------------------# +class CR_StringToNumber: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"text": ("STRING", {"multiline": False, "default": "text"}), + }, + } + + RETURN_TYPES = ("INT", "FLOAT", "STRING", ) + RETURN_NAMES = ("INT", "FLOAT", "show_help", ) + FUNCTION = "convert" + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def convert(self, text): + + # Check if number + if text.replace('.','',1).isdigit(): + float_out = float(text) + int_out = int(float_out) + else: + print(f"[Error] CR String To Number. Not a number.") + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-string-to-number" + return (int_out, float_out, show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_TextListToString: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text_list": ("STRING", {"forceInput": True}), + }, + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "joinlist" + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def joinlist(self, text_list): + + string_out = " ".join(text_list) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-text-list-to-string" + + return (string_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# based on Repeater node by pythongosssss +class CR_StringToCombo: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"multiline": False, "default": "", "forceInput": True}), + }, + } + + RETURN_TYPES = (any, "STRING", ) + RETURN_NAMES = ("any", "show_help", ) + FUNCTION = "convert" + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def convert(self, text): + + text_list = list() + + if text != "": + values = text.split(',') + text_list = values[0] + print(text_list) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-string-to-combo" + + return (text_list, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# Cloned from Mikey Nodes +class CR_IntegerToString: + @classmethod + def INPUT_TYPES(s): + return {"required": {"int_": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = ("STRING","STRING", ) + RETURN_NAMES = ("STRING","show_help", ) + FUNCTION = 'convert' + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def convert(self, int_): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-integer-to-string" + return (f'{int_}', show_help, ) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# Cloned from Mikey Nodes +class CR_FloatToString: + @classmethod + def INPUT_TYPES(s): + return {"required": {"float_": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000000.0}), + } + } + + RETURN_TYPES = ('STRING', "STRING", ) + RETURN_NAMES = ('STRING', "show_help", ) + FUNCTION = 'convert' + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def convert(self, float_): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-float-to-string" + return (f'{float_}', show_help, ) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_FloatToInteger: + @classmethod + def INPUT_TYPES(cls): + return {"required": {"_float": ("FLOAT", {"default": 0.0})}} + + RETURN_TYPES = ("INT", "STRING", ) + RETURN_NAMES = ("INT", "show_help", ) + FUNCTION = "convert" + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def convert(self, _float): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-float-to-integer" + return (int(_float), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# This node is used to convert type Seed to type INT +class CR_SeedToInt: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "seed": ("SEED", ), + } + } + + RETURN_TYPES = ("INT", "STRING", ) + RETURN_NAMES = ("INT", "show_help", ) + FUNCTION = "seed_to_int" + CATEGORY = icons.get("Comfyroll/Utils/Conversion") + + def seed_to_int(self, seed): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Conversion-Nodes#cr-seed-to-int" + return (seed.get('seed'), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 10 nodes published +''' +NODE_CLASS_MAPPINGS = { + "CR String To Number":CR_StringToNumber, + "CR String To Combo":CR_StringToCombo, + "CR Float To String":CR_FloatToString, + "CR Float To Integer":CR_FloatToInteger, + "CR Integer To String":CR_IntegerToString, + "CR Text List To String":CR_TextListToString, + "CR Seed to Int": CR_SeedToInt, +} +''' + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/graphics_functions.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/graphics_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..bcbffd02037a2999eeab04caae087360541dc818 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/graphics_functions.py @@ -0,0 +1,445 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import os +import random +from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance +from ..config import color_mapping + +font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") +file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + +def align_text(align, img_height, text_height, text_pos_y, margins): + if align == "center": + text_plot_y = img_height / 2 - text_height / 2 + text_pos_y + elif align == "top": + text_plot_y = text_pos_y + margins + elif align == "bottom": + text_plot_y = img_height - text_height + text_pos_y - margins + return text_plot_y + + +def justify_text(justify, img_width, line_width, margins): + if justify == "left": + text_plot_x = 0 + margins + elif justify == "right": + text_plot_x = img_width - line_width - margins + elif justify == "center": + text_plot_x = img_width/2 - line_width/2 + return text_plot_x + + +def get_text_size(draw, text, font): + bbox = draw.textbbox((0, 0), text, font=font) + + # Calculate the text width and height + text_width = bbox[2] - bbox[0] + text_height = bbox[3] - bbox[1] + return text_width, text_height + + +def draw_masked_text(text_mask, text, + font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options): + + # Create the drawing context + draw = ImageDraw.Draw(text_mask) + + # Define font settings + font_folder = "fonts" + font_file = os.path.join(font_folder, font_name) + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), font_file) + font = ImageFont.truetype(str(resolved_font_path), size=font_size) + + # Split the input text into lines + text_lines = text.split('\n') + + # Calculate the size of the text plus padding for the tallest line + max_text_width = 0 + max_text_height = 0 + + for line in text_lines: + # Calculate the width and height of the current line + line_width, line_height = get_text_size(draw, line, font) + + line_height = line_height + line_spacing + max_text_width = max(max_text_width, line_width) + max_text_height = max(max_text_height, line_height) + + # Get the image width and height + image_width, image_height = text_mask.size + image_center_x = image_width / 2 + image_center_y = image_height / 2 + + text_pos_y = position_y + sum_text_plot_y = 0 + text_height = max_text_height * len(text_lines) + + for line in text_lines: + # Calculate the width of the current line + line_width, _ = get_text_size(draw, line, font) + + # Get the text x and y positions for each line + text_plot_x = position_x + justify_text(justify, image_width, line_width, margins) + text_plot_y = align_text(align, image_height, text_height, text_pos_y, margins) + + # Add the current line to the text mask + draw.text((text_plot_x, text_plot_y), line, fill=255, font=font) + + text_pos_y += max_text_height # Move down for the next line + sum_text_plot_y += text_plot_y # Sum the y positions + + # Calculate centers for rotation + text_center_x = text_plot_x + max_text_width / 2 + text_center_y = sum_text_plot_y / len(text_lines) + + if rotation_options == "text center": + rotated_text_mask = text_mask.rotate(rotation_angle, center=(text_center_x, text_center_y)) + elif rotation_options == "image center": + rotated_text_mask = text_mask.rotate(rotation_angle, center=(image_center_x, image_center_y)) + + return rotated_text_mask + +def draw_text_on_image(draw, y_position, bar_width, bar_height, text, font, text_color, font_outline): + + # Calculate the width and height of the text + text_width, text_height = get_text_size(draw, text, font) + + if font_outline == "thin": + outline_thickness = text_height // 40 + elif font_outline == "thick": + outline_thickness = text_height // 20 + elif font_outline == "extra thick": + outline_thickness = text_height // 10 + + outline_color = (0, 0, 0) + + text_lines = text.split('\n') + + if len(text_lines) == 1: + x = (bar_width - text_width) // 2 + y = y_position + (bar_height - text_height) // 2 - (bar_height * 0.10) + if font_outline == "none": + draw.text((x, y), text, fill=text_color, font=font) + else: + draw.text((x, y), text, fill=text_color, font=font, stroke_width=outline_thickness, stroke_fill='black') + elif len(text_lines) > 1: + # Calculate the width and height of the text + text_width, text_height = get_text_size(draw, text_lines[0], font) + + x = (bar_width - text_width) // 2 + y = y_position + (bar_height - text_height * 2) // 2 - (bar_height * 0.15) + if font_outline == "none": + draw.text((x, y), text_lines[0], fill=text_color, font=font) + else: + draw.text((x, y), text_lines[0], fill=text_color, font=font, stroke_width=outline_thickness, stroke_fill='black') + + # Calculate the width and height of the text + text_width, text_height = get_text_size(draw, text_lines[1], font) + + x = (bar_width - text_width) // 2 + y = y_position + (bar_height - text_height * 2) // 2 + text_height - (bar_height * 0.00) + if font_outline == "none": + draw.text((x, y), text_lines[1], fill=text_color, font=font) + else: + draw.text((x, y), text_lines[1], fill=text_color, font=font, stroke_width=outline_thickness, stroke_fill='black') + + +def get_font_size(draw, text, max_width, max_height, font_path, max_font_size): + + # Adjust the max-width to allow for start and end padding + max_width = max_width * 0.9 + + # Start with the maximum font size + font_size = max_font_size + font = ImageFont.truetype(str(font_path), size=font_size) + + # Get the first two lines + text_lines = text.split('\n')[:2] + + if len(text_lines) == 2: + font_size = min(max_height//2, max_font_size) + font = ImageFont.truetype(str(font_path), size=font_size) + + # Calculate max text width and height with the current font + max_text_width = 0 + longest_line = text_lines[0] + for line in text_lines: + # Calculate the width and height of the current line + line_width, line_height = get_text_size(draw, line, font) + + if line_width > max_text_width: + longest_line = line + max_text_width = max(max_text_width, line_width) + + # Calculate the width and height of the text + text_width, text_height = get_text_size(draw, text, font) + + # Decrease the font size until it fits within the bounds + while max_text_width > max_width or text_height > 0.88 * max_height / len(text_lines): + font_size -= 1 + font = ImageFont.truetype(str(font_path), size=font_size) + max_text_width, text_height = get_text_size(draw, longest_line, font) + + return font + + +def hex_to_rgb(hex_color): + hex_color = hex_color.lstrip('#') # Remove the '#' character, if present + r = int(hex_color[0:2], 16) + g = int(hex_color[2:4], 16) + b = int(hex_color[4:6], 16) + return (r, g, b) + + +def text_panel(image_width, image_height, text, + font_name, font_size, font_color, + font_outline_thickness, font_outline_color, + background_color, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options): + + """ + Create an image with text overlaid on a background. + + Returns: + PIL.Image.Image: Image with text overlaid on the background. + """ + + # Create PIL images for the text and background layers and text mask + size = (image_width, image_height) + panel = Image.new('RGB', size, background_color) + + # Draw the text on the text mask + image_out = draw_text(panel, text, + font_name, font_size, font_color, + font_outline_thickness, font_outline_color, + background_color, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options) + + return image_out + + +def draw_text(panel, text, + font_name, font_size, font_color, + font_outline_thickness, font_outline_color, + bg_color, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options): + + # Create the drawing context + draw = ImageDraw.Draw(panel) + + # Define font settings + font_folder = "fonts" + font_file = os.path.join(font_folder, font_name) + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), font_file) + font = ImageFont.truetype(str(resolved_font_path), size=font_size) + + # Split the input text into lines + text_lines = text.split('\n') + + # Calculate the size of the text plus padding for the tallest line + max_text_width = 0 + max_text_height = 0 + + for line in text_lines: + # Calculate the width and height of the current line + line_width, line_height = get_text_size(draw, line, font) + + line_height = line_height + line_spacing + max_text_width = max(max_text_width, line_width) + max_text_height = max(max_text_height, line_height) + + # Get the image center + image_center_x = panel.width / 2 + image_center_y = panel.height / 2 + + text_pos_y = position_y + sum_text_plot_y = 0 + text_height = max_text_height * len(text_lines) + + for line in text_lines: + # Calculate the width and height of the current line + line_width, line_height = get_text_size(draw, line, font) + + # Get the text x and y positions for each line + text_plot_x = position_x + justify_text(justify, panel.width, line_width, margins) + text_plot_y = align_text(align, panel.height, text_height, text_pos_y, margins) + + # Add the current line to the text mask + draw.text((text_plot_x, text_plot_y), line, fill=font_color, font=font, stroke_width=font_outline_thickness, stroke_fill=font_outline_color) + + text_pos_y += max_text_height # Move down for the next line + sum_text_plot_y += text_plot_y # Sum the y positions + + text_center_x = text_plot_x + max_text_width / 2 + text_center_y = sum_text_plot_y / len(text_lines) + + if rotation_options == "text center": + rotated_panel = panel.rotate(rotation_angle, center=(text_center_x, text_center_y), resample=Image.BILINEAR) + elif rotation_options == "image center": + rotated_panel = panel.rotate(rotation_angle, center=(image_center_x, image_center_y), resample=Image.BILINEAR) + + return rotated_panel + + +def combine_images(images, layout_direction='horizontal'): + """ + Combine a list of PIL Image objects either horizontally or vertically. + + Args: + images (list of PIL.Image.Image): List of PIL Image objects to combine. + layout_direction (str): 'horizontal' for horizontal layout, 'vertical' for vertical layout. + + Returns: + PIL.Image.Image: Combined image. + """ + + if layout_direction == 'horizontal': + combined_width = sum(image.width for image in images) + combined_height = max(image.height for image in images) + else: + combined_width = max(image.width for image in images) + combined_height = sum(image.height for image in images) + + combined_image = Image.new('RGB', (combined_width, combined_height)) + + x_offset = 0 + y_offset = 0 # Initialize y_offset for vertical layout + for image in images: + combined_image.paste(image, (x_offset, y_offset)) + if layout_direction == 'horizontal': + x_offset += image.width + else: + y_offset += image.height + + return combined_image + + +def apply_outline_and_border(images, outline_thickness, outline_color, border_thickness, border_color): + for i, image in enumerate(images): + # Apply the outline + if outline_thickness > 0: + image = ImageOps.expand(image, outline_thickness, fill=outline_color) + + # Apply the border + if border_thickness > 0: + image = ImageOps.expand(image, border_thickness, fill=border_color) + + images[i] = image + + return images + + +def get_color_values(color, color_hex, color_mapping): + + #Get RGB values for the text and background colors. + + if color == "custom": + color_rgb = hex_to_rgb(color_hex) + else: + color_rgb = color_mapping.get(color, (0, 0, 0)) # Default to black if the color is not found + + return color_rgb + + +def hex_to_rgb(hex_color): + hex_color = hex_color.lstrip('#') # Remove the '#' character, if present + r = int(hex_color[0:2], 16) + g = int(hex_color[2:4], 16) + b = int(hex_color[4:6], 16) + return (r, g, b) + + +def crop_and_resize_image(image, target_width, target_height): + width, height = image.size + aspect_ratio = width / height + target_aspect_ratio = target_width / target_height + + if aspect_ratio > target_aspect_ratio: + # Crop the image's width to match the target aspect ratio + crop_width = int(height * target_aspect_ratio) + crop_height = height + left = (width - crop_width) // 2 + top = 0 + else: + # Crop the image's height to match the target aspect ratio + crop_height = int(width / target_aspect_ratio) + crop_width = width + left = 0 + top = (height - crop_height) // 2 + + # Perform the center cropping + cropped_image = image.crop((left, top, left + crop_width, top + crop_height)) + + return cropped_image + + +def create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page_width, + panel_color, bg_color, outline_color, + images, i, j, k, len_images, reading_direction): + panel = Image.new("RGB", (panel_width, panel_height), panel_color) + if k < len_images: + img = images[k] + image = crop_and_resize_image(img, panel_width, panel_height) + image.thumbnail((panel_width, panel_height), Image.Resampling.LANCZOS) + panel.paste(image, (0, 0)) + panel = ImageOps.expand(panel, border=outline_thickness, fill=outline_color) + panel = ImageOps.expand(panel, border=border_thickness, fill=bg_color) + new_panel_width, new_panel_height = panel.size + if reading_direction == "right to left": + page.paste(panel, (page_width - (j + 1) * new_panel_width, i * new_panel_height)) + else: + page.paste(panel, (j * new_panel_width, i * new_panel_height)) + + +def reduce_opacity(img, opacity): + """Returns an image with reduced opacity.""" + assert opacity >= 0 and opacity <= 1 + if img.mode != 'RGBA': + img = img.convert('RGBA') + else: + img = img.copy() + alpha = img.split()[3] + alpha = ImageEnhance.Brightness(alpha).enhance(opacity) + img.putalpha(alpha) + return img + + +def random_hex_color(): + # Generate three random values for RGB + r = random.randint(0, 255) + g = random.randint(0, 255) + b = random.randint(0, 255) + + # Convert RGB to hex format + hex_color = "#{:02x}{:02x}{:02x}".format(r, g, b) + + return hex_color + + +def random_rgb(): + # Generate three random values for RGB + r = random.randint(0, 255) + g = random.randint(0, 255) + b = random.randint(0, 255) + + # Format RGB as a string in the format "128,128,128" + rgb_string = "{},{},{}".format(r, g, b) + + return rgb_string \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/index.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/index.py new file mode 100644 index 0000000000000000000000000000000000000000..f26f84684c5c4ce7c359a9a5e583b4c0dca3ad66 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/index.py @@ -0,0 +1,129 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#--------------------------------------------------------------------------------------------------------------------- +class CR_Trigger: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "trigger_value": ("INT", {"default": 1, "min": 0, "max": 10000}), + }, + } + + RETURN_TYPES = ("INT", "BOOLEAN", "STRING", ) + RETURN_NAMES = ("index", "trigger", "show_help", ) + FUNCTION = "trigger" + CATEGORY = icons.get("Comfyroll/Utils/Index") + + def trigger(self, index, trigger_value): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Index-Nodes#cr-trigger" + return (index, index == trigger_value, show_help, ) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_Index: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"index": ("INT", {"default": 1, "min": 0, "max": 10000}), + "print_to_console": (["Yes","No"],), + }, + } + + RETURN_TYPES = ("INT", "STRING", ) + RETURN_NAMES = ("INT", "show_help", ) + FUNCTION = "index" + CATEGORY = icons.get("Comfyroll/Utils/Index") + + def index(self, index, print_to_console): + + if print_to_console == "Yes": + print(f"[Info] CR Index:{index}") + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Index-Nodes#cr-index" + return (index, show_help, ) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_IncrementIndex: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "index": ("INT", {"default": 1, "min": -10000, "max": 10000}), + "interval": ("INT", {"default": 1, "min": -10000, "max": 10000}), + } + } + + RETURN_TYPES = ("INT", "INT", "STRING", ) + RETURN_NAMES = ("index", "interval", "show_help", ) + FUNCTION = "increment" + CATEGORY = icons.get("Comfyroll/Utils/Index") + + def increment(self, index, interval): + index+=interval + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Index-Nodes#cr-index-increment" + return (index, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_MultiplyIndex: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "index": ("INT", {"default": 1, "min": 0, "max": 10000}), + "factor": ("INT", {"default": 1, "min": 0, "max": 10000}), + } + } + + + RETURN_TYPES = ("INT", "INT", "STRING", ) + RETURN_NAMES = ("index", "factor", "show_help", ) + FUNCTION = "multiply" + CATEGORY = icons.get("Comfyroll/Utils/Index") + + def multiply(self, index, factor): + index = index * factor + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Index-Nodes#cr-index-multiply" + return (index, factor, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_IndexReset: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "index": ("INT", {"default": 1, "min": 0, "max": 10000}), + "reset_to": ("INT", {"default": 1, "min": 0, "max": 10000}), + } + } + + + RETURN_TYPES = ("INT", "INT", "STRING", ) + RETURN_NAMES = ("index", "reset_to", "show_help", ) + FUNCTION = "reset" + CATEGORY = icons.get("Comfyroll/Utils/Index") + + def reset(self, index, reset_to): + index = reset_to + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Index-Nodes#cr-index-reset" + return (index, reset_to, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + # Index + "CR Index":CR_Index, + "CR Index Increment":CR_IncrementIndex, + "CR Index Multiply":CR_MultiplyIndex, + "CR Index Reset":CR_IndexReset, + "CR Trigger":CR_Trigger, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/legacy_nodes.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/legacy_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6fa3ff5d76bb4d22f5c390372040fcf3a46cc6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/legacy_nodes.py @@ -0,0 +1,93 @@ +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes # +# for ComfyUI https://github.com/comfyanonymous/ComfyUI # +#---------------------------------------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +class CR_ImageSize: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "upscale_factor": ("FLOAT", {"default": 1, "min": 1, "max": 2000}) + } + } + RETURN_TYPES = ("INT", "INT", "FLOAT", "STRING", ) + RETURN_NAMES = ("Width", "Height", "upscale_factor", "show_help", ) + FUNCTION = "ImageSize" + CATEGORY = icons.get("Comfyroll/Other/Legacy") + + def ImageSize(self, width, height, upscale_factor): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Legacy-Nodes#cr-image-size" + return(width, height, upscale_factor, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +class CR_AspectRatio_SDXL: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "width": ("INT", {"default": 1024, "min": 64, "max": 2048}), + "height": ("INT", {"default": 1024, "min": 64, "max": 2048}), + "aspect_ratio": (["custom", "1:1 square 1024x1024", "3:4 portrait 896x1152", "5:8 portrait 832x1216", "9:16 portrait 768x1344", "9:21 portrait 640x1536", "4:3 landscape 1152x896", "3:2 landscape 1216x832", "16:9 landscape 1344x768", "21:9 landscape 1536x640"],), + "swap_dimensions": (["Off", "On"],), + "upscale_factor1": ("FLOAT", {"default": 1, "min": 1, "max": 2000}), + "upscale_factor2": ("FLOAT", {"default": 1, "min": 1, "max": 2000}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}) + } + } + RETURN_TYPES = ("INT", "INT", "FLOAT", "FLOAT", "INT", "STRING", ) + RETURN_NAMES = ("INT", "INT", "FLOAT", "FLOAT", "INT", "show_help", ) + #RETURN_NAMES = ("Width", "Height") + FUNCTION = "Aspect_Ratio" + + CATEGORY = icons.get("Comfyroll/Other/Legacy") + + def Aspect_Ratio(self, width, height, aspect_ratio, swap_dimensions, upscale_factor1, upscale_factor2, batch_size): + if aspect_ratio == "1:1 square 1024x1024": + width, height = 1024, 1024 + elif aspect_ratio == "3:4 portrait 896x1152": + width, height = 896, 1152 + elif aspect_ratio == "5:8 portrait 832x1216": + width, height = 832, 1216 + elif aspect_ratio == "9:16 portrait 768x1344": + width, height = 768, 1344 + elif aspect_ratio == "9:21 portrait 640x1536": + width, height = 640, 1536 + elif aspect_ratio == "4:3 landscape 1152x896": + width, height = 1152, 896 + elif aspect_ratio == "3:2 landscape 1216x832": + width, height = 1216, 832 + elif aspect_ratio == "16:9 landscape 1344x768": + width, height = 1344, 768 + elif aspect_ratio == "21:9 landscape 1536x640": + width, height = 1536, 640 + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Legacy-Nodes#cr-aspect-ratio-sdxl" + + if swap_dimensions == "On": + return(height, width, upscale_factor1, upscale_factor2, batch_size,show_help,) + else: + return(width, height, upscale_factor1, upscale_factor2, batch_size,show_help,) + +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Image Size": CR_ImageSize, + "CR Aspect Ratio SDXL": CR_AspectRatio_SDXL, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/logic.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/logic.py new file mode 100644 index 0000000000000000000000000000000000000000..b805b5fabc684df5f1e81acf69b27c4169b059d3 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/logic.py @@ -0,0 +1,436 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +# Logic Switches +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImageInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "image1": ("IMAGE",), + "image2": ("IMAGE",) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, image1, image2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-image-input-switch" + if Input == 1: + return (image1, show_help, ) + else: + return (image2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LatentInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "latent1": ("LATENT",), + "latent2": ("LATENT",) + } + } + + RETURN_TYPES = ("LATENT", "STRING", ) + RETURN_NAMES = ("LATENT", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, latent1, latent2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-latent-input-switch" + if Input == 1: + return (latent1, show_help, ) + else: + return (latent2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ConditioningInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "conditioning1": ("CONDITIONING",), + "conditioning2": ("CONDITIONING",) + } + } + + RETURN_TYPES = ("CONDITIONING", "STRING", ) + RETURN_NAMES = ("CONDITIONING", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, conditioning1, conditioning2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-conditioning-input-switch" + if Input == 1: + return (conditioning1, show_help, ) + else: + return (conditioning2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ClipInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "clip1": ("CLIP",), + "clip2": ("CLIP",) + } + } + + RETURN_TYPES = ("CLIP", "STRING", ) + RETURN_NAMES = ("CLIP", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, clip1, clip2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-clip-input-switch" + if Input == 1: + return (clip1, show_help, ) + else: + return (clip2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ModelInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "model1": ("MODEL",), + "model2": ("MODEL",) + } + } + + RETURN_TYPES = ("MODEL", "STRING", ) + RETURN_NAMES = ("MODEL", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, model1, model2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-model-input-switch" + if Input == 1: + return (model1, show_help, ) + else: + return (model2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +#This is an input switch for controlNet. Can pick an input and that image will be the one picked for the workflow. +class CR_ControlNetInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "control_net1": ("CONTROL_NET",), + "control_net2": ("CONTROL_NET",) + } + } + + RETURN_TYPES = ("CONTROL_NET", "STRING", ) + RETURN_NAMES = ("CONTROL_NET", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, control_net1, control_net2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-controlnet-input-switch" + if Input == 1: + return (control_net1, show_help, ) + else: + return (control_net2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +#This is an input switch for text. Can pick an input and that image will be the one picked for the workflow. +class CR_TextInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "text1": ("STRING", {"forceInput": True}), + "text2": ("STRING", {"forceInput": True}), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, text1, text2,): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-text-input-switch" + if Input == 1: + return (text1, show_help, ) + else: + return (text2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_VAEInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "VAE1": ("VAE", {"forceInput": True}), + "VAE2": ("VAE", {"forceInput": True}), + } + } + + RETURN_TYPES = ("VAE", "STRING", ) + RETURN_NAMES = ("VAE", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, VAE1, VAE2,): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-vae-input-switch" + if Input == 1: + return (VAE1, show_help, ) + else: + return (VAE2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImageInputSwitch4way: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 4}), + "image1": ("IMAGE",), + }, + "optional": { + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, image1, image2=None, image3=None, image4=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-text-input-switch-4-way" + if Input == 1: + return (image1, show_help, ) + elif Input == 2: + return (image2, show_help, ) + elif Input == 3: + return (image3, show_help, ) + else: + return (image4, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_TextInputSwitch4way: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 4}), + "text1": ("STRING", {"forceInput": True}), + }, + "optional": { + "text2": ("STRING", {"forceInput": True}), + "text3": ("STRING", {"forceInput": True}), + "text4": ("STRING", {"forceInput": True}), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("STRING", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, text1, text2=None, text3=None, text4=None): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-text-input-switch-4-way" + if Input == 1: + return (text1, show_help, ) + elif Input == 2: + return (text2, show_help, ) + elif Input == 3: + return (text3, show_help, ) + else: + return (text4, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ModelAndCLIPInputSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "model1": ("MODEL",), + "clip1": ("CLIP",), + "model2": ("MODEL",), + "clip2": ("CLIP",) + } + } + + RETURN_TYPES = ("MODEL", "CLIP", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Logic") + + def switch(self, Input, clip1, clip2, model1, model2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Logic-Nodes#cr-switch-model-and-clip" + if Input == 1: + return (model1, clip1, show_help, ) + else: + return (model2, clip2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# Process switches +#---------------------------------------------------------------------------------------------------------------------# +class CR_Img2ImgProcessSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": (["txt2img", "img2img"],), + "txt2img": ("LATENT",), + "img2img": ("LATENT",) + } + } + + RETURN_TYPES = ("LATENT", "STRING", ) + RETURN_NAMES = ("LATENT", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Process") + + def switch(self, Input, txt2img, img2img): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Process-Nodes#cr-img2img-process-switch" + if Input == "txt2img": + return (txt2img, show_help, ) + else: + return (img2img, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_HiResFixProcessSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": (["latent_upscale", "image_upscale"],), + "latent_upscale": ("LATENT",), + "image_upscale": ("LATENT",) + } + } + + RETURN_TYPES = ("LATENT", "STRING", ) + RETURN_NAMES = ("LATENT", "STRING", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Process") + + def switch(self, Input, latent_upscale, image_upscale): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Process-Nodes#cr-hires-fix-process-switch" + if Input == "latent_upscale": + return (latent_upscale, show_help, ) + else: + return (image_upscale, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_BatchProcessSwitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": (["image", "image batch"],), + "image": ("IMAGE", ), + "image_batch": ("IMAGE", ) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "switch" + CATEGORY = icons.get("Comfyroll/Utils/Process") + + def switch(self, Input, image, image_batch): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Process-Nodes#cr-batch-process-switch" + if Input == "image": + return (image, show_help, ) + else: + return (image_batch, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + # Logic switches + "CR Image Input Switch": CR_ImageInputSwitch, + "CR Latent Input Switch": CR_LatentInputSwitch, + "CR Conditioning Input Switch": CR_ConditioningInputSwitch, + "CR Clip Input Switch": CR_ClipInputSwitch, + "CR Model Input Switch": CR_ModelInputSwitch, + "CR ControlNet Input Switch": CR_ControlNetInputSwitch, + "CR Text Input Switch": CR_TextInputSwitch, + "CR VAE Input Switch": CR_VAEInputSwitch, + "CR Switch Model and CLIP":CR_ModelAndCLIPInputSwitch, + # 4-way switches + "CR Image Input Switch (4 way)": CR_InputImages4way, + "CR Text Input Switch (4 way)": CR_TextInputSwitch4way, + # Process switches + "CR Img2Img Process Switch": CR_Img2ImgProcessSwitch:, + "CR Hires Fix Process Switch": CR_HiResFixProcessSwitch, + "CR Batch Process Switch": CR_BatchProcessSwitch, +} +''' + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/lora.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/lora.py new file mode 100644 index 0000000000000000000000000000000000000000..d0612eafd68e44e89e5914bc7ca20c6d9c1dc1a7 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/lora.py @@ -0,0 +1,170 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes # +# for ComfyUI https://github.com/comfyanonymous/ComfyUI # +#---------------------------------------------------------------------------------------------------------------------# + +import os +import sys +import comfy.sd +import comfy.utils +import folder_paths +from ..categories import icons + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) + +#---------------------------------------------------------------------------------------------------------------------# +# LoRA Nodes +#---------------------------------------------------------------------------------------------------------------------# +# This is a load lora node with an added switch to turn on or off. On will add the lora and off will skip the node. +class CR_LoraLoader: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + file_list = folder_paths.get_filename_list("loras") + file_list.insert(0, "None") + return {"required": { "model": ("MODEL",), + "clip": ("CLIP", ), + "switch": (["On","Off"],), + "lora_name": (file_list, ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL", "CLIP", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "show_help", ) + FUNCTION = "load_lora" + CATEGORY = icons.get("Comfyroll/LoRA") + + def load_lora(self, model, clip, switch, lora_name, strength_model, strength_clip): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/LoRA-Nodes#cr-load-lora" + if strength_model == 0 and strength_clip == 0: + return (model, clip, show_help, ) + + if switch == "Off" or lora_name == "None": + return (model, clip, show_help, ) + + lora_path = folder_paths.get_full_path("loras", lora_name) + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + del self.loaded_lora + + if lora is None: + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + return (model_lora, clip_lora, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# Based on Efficiency Nodes +# This is a lora stack where a single node has 3 different loras each with their own switch +class CR_LoRAStack: + + @classmethod + def INPUT_TYPES(cls): + + loras = ["None"] + folder_paths.get_filename_list("loras") + + return {"required": { + "switch_1": (["Off","On"],), + "lora_name_1": (loras,), + "model_weight_1": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "clip_weight_1": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "switch_2": (["Off","On"],), + "lora_name_2": (loras,), + "model_weight_2": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "clip_weight_2": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "switch_3": (["Off","On"],), + "lora_name_3": (loras,), + "model_weight_3": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "clip_weight_3": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }, + "optional": {"lora_stack": ("LORA_STACK",) + }, + } + + RETURN_TYPES = ("LORA_STACK", "STRING", ) + RETURN_NAMES = ("LORA_STACK", "show_help", ) + FUNCTION = "lora_stacker" + CATEGORY = icons.get("Comfyroll/LoRA") + + def lora_stacker(self, lora_name_1, model_weight_1, clip_weight_1, switch_1, lora_name_2, model_weight_2, clip_weight_2, switch_2, lora_name_3, model_weight_3, clip_weight_3, switch_3, lora_stack=None): + + # Initialise the list + lora_list=list() + + if lora_stack is not None: + lora_list.extend([l for l in lora_stack if l[0] != "None"]) + + if lora_name_1 != "None" and switch_1 == "On": + lora_list.extend([(lora_name_1, model_weight_1, clip_weight_1)]), + + if lora_name_2 != "None" and switch_2 == "On": + lora_list.extend([(lora_name_2, model_weight_2, clip_weight_2)]), + + if lora_name_3 != "None" and switch_3 == "On": + lora_list.extend([(lora_name_3, model_weight_3, clip_weight_3)]), + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/LoRA-Nodes#cr-lora-stack" + + return (lora_list, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# This applies the lora stack. +class CR_ApplyLoRAStack: + + @classmethod + def INPUT_TYPES(cls): + return {"required": {"model": ("MODEL",), + "clip": ("CLIP", ), + "lora_stack": ("LORA_STACK", ), + } + } + + RETURN_TYPES = ("MODEL", "CLIP", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "show_help", ) + FUNCTION = "apply_lora_stack" + CATEGORY = icons.get("Comfyroll/LoRA") + + def apply_lora_stack(self, model, clip, lora_stack=None,): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/LoRA-Nodes#cr-apply-lora-stack" + + # Initialise the list + lora_params = list() + + # Extend lora_params with lora-stack items + if lora_stack: + lora_params.extend(lora_stack) + else: + return (model, clip, show_help,) + + # Initialise the model and clip + model_lora = model + clip_lora = clip + + # Loop through the list + for tup in lora_params: + lora_name, strength_model, strength_clip = tup + + lora_path = folder_paths.get_full_path("loras", lora_name) + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + + model_lora, clip_lora = comfy.sd.load_lora_for_models(model_lora, clip_lora, lora, strength_model, strength_clip) + + return (model_lora, clip_lora, show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Load LoRA": CR_LoraLoader, + "CR LoRA Stack":CR_LoRAStack, + "CR Apply LoRA Stack":CR_ApplyLoRAStack, +} +''' diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/matplot.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/matplot.py new file mode 100644 index 0000000000000000000000000000000000000000..42b793460ed184b51ef4fbb9cbc07626d8269a3d --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/matplot.py @@ -0,0 +1,848 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import torch +import numpy as np +import os +import sys +import io +import folder_paths +from PIL import Image +from ..categories import icons + +try: + import matplotlib.pyplot as plt +except ImportError: + import pip + pip.main(['install', 'matplotlib']) + import matplotlib.pyplot as plt + +from matplotlib.patches import RegularPolygon + +#---------------------------------------------------------------------------------------------------------------------# + +#icons = { +# "Comfyroll/Graphics/Pattern": "🧩 Comfyroll/👾 Graphics/🌈 Pattern", +#} + +# Dictionary to map color names to RGB values +color_mapping = { + "white": (255, 255, 255), + "black": (0, 0, 0), + "red": (255, 0, 0), + "green": (0, 255, 0), + "blue": (0, 0, 255), + "yellow": (255, 255, 0), + "cyan": (0, 255, 255), + "magenta": (255, 0, 255), + "orange": (255, 165, 0), + "purple": (128, 0, 128), + "pink": (255, 192, 203), + "brown": (165, 42, 42), + "gray": (128, 128, 128), + "lightgray": (211, 211, 211), + "darkgray": (169, 169, 169), + "olive": (128, 128, 0), + "lime": (0, 128, 0), + "teal": (0, 128, 128), + "navy": (0, 0, 128), + "maroon": (128, 0, 0), + "fuchsia": (255, 0, 128), + "aqua": (0, 255, 128), + "silver": (192, 192, 192), + "gold": (255, 215, 0), + "turquoise": (64, 224, 208), + "lavender": (230, 230, 250), + "violet": (238, 130, 238), + "coral": (255, 127, 80), + "indigo": (75, 0, 130), +} + +COLORS = ["custom", "white", "black", "red", "green", "blue", "yellow", + "cyan", "magenta", "orange", "purple", "pink", "brown", "gray", + "lightgray", "darkgray", "olive", "lime", "teal", "navy", "maroon", + "fuchsia", "aqua", "silver", "gold", "turquoise", "lavender", + "violet", "coral", "indigo"] + +STYLES = ["Accent","afmhot","autumn","binary","Blues","bone","BrBG","brg", + "BuGn","BuPu","bwr","cividis","CMRmap","cool","coolwarm","copper","cubehelix","Dark2","flag", + "gist_earth","gist_gray","gist_heat","gist_rainbow","gist_stern","gist_yarg","GnBu","gnuplot","gnuplot2","gray","Greens", + "Greys","hot","hsv","inferno","jet","magma","nipy_spectral","ocean","Oranges","OrRd", + "Paired","Pastel1","Pastel2","pink","PiYG","plasma","PRGn","prism","PuBu","PuBuGn", + "PuOr","PuRd","Purples","rainbow","RdBu","RdGy","RdPu","RdYlBu","RdYlGn","Reds","seismic", + "Set1","Set2","Set3","Spectral","spring","summer","tab10","tab20","tab20b","tab20c","terrain", + "turbo","twilight","twilight_shifted","viridis","winter","Wistia","YlGn","YlGnBu","YlOrBr","YlOrRd"] + +#---------------------------------------------------------------------------------------------------------------------# + +def rgb_to_hex(rgb): + r, g, b = rgb + return "#{:02X}{:02X}{:02X}".format(r, g, b) + +def hex_to_rgb(hex_color): + hex_color = hex_color.lstrip('#') # Remove the '#' character, if present + r = int(hex_color[0:2], 16) + g = int(hex_color[2:4], 16) + b = int(hex_color[4:6], 16) + return (r, g, b) + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_HalftoneGrid: + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "dot_style": (STYLES,), + "reverse_dot_style": (["No", "Yes"],), + "dot_frequency": ("INT", {"default": 50, "min": 1, "max":200, "step": 1}), + "background_color": (COLORS,), + "x_pos": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": .01}), + "y_pos": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": .01}), + }, + "optional": { + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "halftone" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + + def halftone(self, width, height, dot_style, reverse_dot_style, dot_frequency, background_color, x_pos, y_pos, bg_color_hex='#000000'): + + if background_color == "custom": + bgc = bg_color_hex + else: + bgc = background_color + + reverse = "" + + if reverse_dot_style == "Yes": + reverse = "_r" + + fig, ax = plt.subplots(figsize=(width/100,height/100)) + + dotsx = np.linspace(0, 1, dot_frequency) + dotsy = np.linspace(0, 1, dot_frequency) + + X, Y = np.meshgrid(dotsx, dotsy) + + dist = np.sqrt((X - x_pos)**2 + (Y - y_pos)**2) + + fig.patch.set_facecolor(bgc) + ax.scatter(X, Y, c=dist, cmap=dot_style+reverse) + + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(tight=True) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-halftone-grid" + + return(pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ColorBars: + @classmethod + def INPUT_TYPES(s): + + modes = ["2-color"] + + return {"required": { + "mode": (modes,), + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "color_1": (COLORS,), + "color_2": (COLORS,), + "orientation": (["vertical", "horizontal", "diagonal", "alt_diagonal"],), #added 135 angle for diagonals + "bar_frequency": ("INT", {"default": 5, "min": 1, "max":200, "step": 1}), + "offset": ("FLOAT", {"default": 0, "min": 0, "max":20, "step": 0.05}), + }, + "optional": { + "color1_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "color2_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, mode, width, height, color_1, color_2, + orientation, bar_frequency, offset=0, + color1_hex='#000000', color2_hex='#000000'): + + # Get RGB values + if color_1 == "custom": + color1_rgb = hex_to_rgb(color1_hex) + else: + color1_rgb = color_mapping.get(color_1, (255, 255, 255)) # Default to white if the color is not found + + if color_2 == "custom": + color2_rgb = hex_to_rgb(color2_hex) + else: + color2_rgb = color_mapping.get(color_2, (0, 0, 0)) # Default to black if the color is not found + + canvas = np.zeros((height, width, 3), dtype=np.uint8) + + bar_width = width / bar_frequency + bar_height = height / bar_frequency + offset_pixels = int(offset * max(width, height)) + + if orientation == "vertical": + for j in range(height): + for i in range(width): + if ((i + offset_pixels) // bar_width) % 2 == 0: # Check for even index + canvas[j, i] = color1_rgb + else: + canvas[j, i] = color2_rgb + elif orientation == "horizontal": + for j in range(height): + for i in range(width): + if ((j + offset_pixels) // bar_height) % 2 == 0: # Check for even index + canvas[j, i] = color1_rgb + else: + canvas[j, i] = color2_rgb + elif orientation == "diagonal": + # Calculate the bar width based on a 45 degree angle + bar_width = int(bar_height / np.tan(np.pi / 4)) * 2 + for j in range(height): + for i in range(width): + # Calculate which diagonal bar the pixel belongs to with the offset + bar_number = (i + j + offset_pixels) // bar_width + if bar_number % 2 == 0: # Check for even bar number + canvas[j, i] = color1_rgb + else: + canvas[j, i] = color2_rgb + elif orientation == "alt_diagonal": + bar_width = int(bar_height / np.tan(np.pi / 4)) * 2 + for j in range(height): + for i in range(width): + # Calculate which diagonal bar the pixel belongs to with the offset + bar_number = (i - j + width + offset_pixels) // bar_width + if bar_number % 2 == 0: # Check for even bar number + canvas[j, i] = color1_rgb + else: + canvas[j, i] = color2_rgb + + fig, ax = plt.subplots(figsize=(width/100, height/100)) + + ax.imshow(canvas) + + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(tight=True) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-color-bars" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_StyleBars: + @classmethod + def INPUT_TYPES(s): + + modes = ["color bars", "sin wave", "gradient bars"] + + return {"required": { + "mode": (modes,), + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "bar_style": (STYLES,), + "orientation": (["vertical", "horizontal", ],), + "bar_frequency": ("INT", {"default": 5, "min": 1, "max":200, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, mode, width, height, bar_style, orientation, bar_frequency): + + # Create a horizontal or vertical bar depending on the orientation + if orientation == "vertical": + x = np.linspace(0, 1, width) + y = np.zeros((height, width)) + elif orientation == "horizontal": + x = np.zeros((height, width)) + y = np.linspace(0, 1, height) + + # Create a grid of colors for the bar + X, Y = np.meshgrid(x, y) + + if mode == "color bars": + bar_width = 1 / bar_frequency + if orientation == "vertical": + colors = (X // bar_width) % 2 + elif orientation == "horizontal": + colors = (Y // bar_width) % 2 + elif mode == "sin wave": + if orientation == "vertical": + colors = np.sin(2 * np.pi * bar_frequency * X) + elif orientation == "horizontal": + colors = np.sin(2 * np.pi * bar_frequency * Y) + elif mode == "gradient bars": + if orientation == "vertical": + colors = (X * bar_frequency * 2) % 2 + elif orientation == "horizontal": + colors = (Y * bar_frequency * 2) % 2 + + fig, ax = plt.subplots(figsize=(width/100, height/100)) + + ax.imshow(colors, cmap=bar_style, aspect='auto') + + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(tight=True) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-style-bars" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ColorGradient: + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "start_color": (COLORS,), + "end_color": (COLORS,), + "gradient_distance": ("FLOAT", {"default": 1, "min": 0, "max": 2, "step": 0.05}), + "linear_transition": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.05}), + "orientation": (["vertical", "horizontal", ],), + }, + "optional": { + "start_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "end_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, width, height, start_color, end_color, orientation, + linear_transition=0.5, gradient_distance=1, + start_color_hex='#000000', end_color_hex='#000000'): # Default to .5 if the value is not found + + # Get RGB values + if start_color == "custom": + color1_rgb = hex_to_rgb(start_color_hex) + else: + color1_rgb = color_mapping.get(start_color, (255, 255, 255)) # Default to white if the color is not found + + if end_color == "custom": + color2_rgb = hex_to_rgb(end_color_hex) + else: + color2_rgb = color_mapping.get(end_color, (0, 0, 0)) # Default to black if the color is not found + + # Create a blank canvas + canvas = np.zeros((height, width, 3), dtype=np.uint8) + transition_pixel = int(linear_transition * (width if orientation == 'horizontal' else height)) #getting center point for gradient + + def get_gradient_value(pos, length, linear_transition, gradient_distance): #getting the distance we use to apply gradient + # Calculate the start and end of the transition + transition_length = length * gradient_distance + transition_start = linear_transition * length - transition_length / 2 + transition_end = linear_transition * length + transition_length / 2 + + # Return the gradient value based on position + if pos < transition_start: + return 0 + elif pos > transition_end: + return 1 + else: + return (pos - transition_start) / transition_length + + if orientation == 'horizontal': + # Define the x-values for interpolation + x = [0, width * linear_transition - 0.5 * width * gradient_distance, width * linear_transition + 0.5 * width * gradient_distance, width] + # Define the y-values for interpolation (t-values) + y = [0, 0, 1, 1] + # Interpolate + t_values = np.interp(np.arange(width), x, y) + for i, t in enumerate(t_values): + interpolated_color = [int(c1 * (1 - t) + c2 * t) for c1, c2 in zip(color1_rgb, color2_rgb)] + canvas[:, i] = interpolated_color + + elif orientation == 'vertical': + # Define the x-values for interpolation + x = [0, height * linear_transition - 0.5 * height * gradient_distance, height * linear_transition + 0.5 * height * gradient_distance, height] + # Define the y-values for interpolation (t-values) + y = [0, 0, 1, 1] + # Interpolate + t_values = np.interp(np.arange(height), x, y) + for j, t in enumerate(t_values): + interpolated_color = [int(c1 * (1 - t) + c2 * t) for c1, c2 in zip(color1_rgb, color2_rgb)] + canvas[j, :] = interpolated_color + + fig, ax = plt.subplots(figsize=(width / 100, height / 100)) + + ax.imshow(canvas) + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(tight=True) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-color-gradient" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_RadialGradient: + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "start_color": (COLORS,), + "end_color": (COLORS,), + "gradient_distance": ("FLOAT", {"default": 1, "min": 0, "max": 2, "step": 0.05}), + "radial_center_x": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.05}), + "radial_center_y": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.05}), + }, + "optional": { + "start_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "end_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_Help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, width, height, start_color, end_color, + radial_center_x=0.5, radial_center_y=0.5, gradient_distance=1, + start_color_hex='#000000', end_color_hex='#000000'): # Default to .5 if the value is not found + + # Get RGB values + if start_color == "custom": + color1_rgb = hex_to_rgb(start_color_hex) + else: + color1_rgb = color_mapping.get(start_color, (255, 255, 255)) # Default to white if the color is not found + + if end_color == "custom": + color2_rgb = hex_to_rgb(end_color_hex) + else: + color2_rgb = color_mapping.get(end_color, (0, 0, 0)) # Default to black if the color is not found + + # Create a blank canvas + canvas = np.zeros((height, width, 3), dtype=np.uint8) + + center_x = int(radial_center_x * width) + center_y = int(radial_center_y * height) + # Computation for max_distance + max_distance = (np.sqrt(max(center_x, width - center_x)**2 + max(center_y, height - center_y)**2))*gradient_distance + + for i in range(width): + for j in range(height): + distance_to_center = np.sqrt((i - center_x) ** 2 + (j - center_y) ** 2) + t = distance_to_center / max_distance + # Ensure t is between 0 and 1 + t = max(0, min(t, 1)) + interpolated_color = [int(c1 * (1 - t) + c2 * t) for c1, c2 in zip(color1_rgb, color2_rgb)] + canvas[j, i] = interpolated_color + + fig, ax = plt.subplots(figsize=(width / 100, height / 100)) + + ax.imshow(canvas) + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(tight=True) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-radial-gradiant" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CheckerPattern: + + @classmethod + def INPUT_TYPES(s): + + modes = ["regular", "stepped"] + + return {"required": { + "mode": (modes,), + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "color_1": (COLORS,), + "color_2": (COLORS,), + "grid_frequency": ("INT", {"default": 8, "min": 1, "max": 200, "step": 1}), + "step": ("INT", {"default": 2, "min": 2, "max": 200, "step": 1}), + }, + "optional": { + "color1_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "color2_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, mode, width, height, color_1, color_2, + grid_frequency, step, + color1_hex='#000000', color2_hex='#000000'): + + # Get RGB values + if color_1 == "custom": + color1_rgb = hex_to_rgb(color1_hex) + else: + color1_rgb = color_mapping.get(color_1, (255, 255, 255)) # Default to white if the color is not found + + if color_2 == "custom": + color2_rgb = hex_to_rgb(color2_hex) + else: + color2_rgb = color_mapping.get(color_2, (0, 0, 0)) # Default to black if the color is not found + + # Create a blank canvas + canvas = np.zeros((height, width, 3), dtype=np.uint8) + + grid_size = width / grid_frequency + + for i in range(width): + for j in range(height): + + if mode == "regular": + if (i // grid_size) % 2 == (j // grid_size) % 2: + canvas[j, i] = color1_rgb + else: + canvas[j, i] = color2_rgb + elif mode == "stepped": + if (i // grid_size) % step != (j // grid_size) % step: + canvas[j, i] = color1_rgb + else: + canvas[j, i] = color2_rgb + + fig, ax = plt.subplots(figsize=(width/100, height/100)) + + ax.imshow(canvas) + + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(tight=True) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-checker-pattern" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_Polygons: + + @classmethod + def INPUT_TYPES(s): + + modes = ["hexagons", "triangles"] + + return {"required": { + "mode": (modes,), + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "rows": ("INT", {"default": 5, "min": 1, "max": 512}), + "columns": ("INT", {"default": 5, "min": 1, "max": 512}), + "face_color": (COLORS,), + "background_color": (COLORS,), + "line_color": (COLORS,), + "line_width": ("INT", {"default": 2, "min": 0, "max": 512}), + }, + "optional": { + "face_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "line_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, mode, width, height, rows, columns, + face_color, background_color, line_color, line_width, + face_color_hex='#000000', bg_color_hex='#000000', line_color_hex='#000000'): + + # Get RGB values + if face_color == "custom": + face_color = face_color_hex + + if line_color == "custom": + line_color = line_color_hex + + if background_color == "custom": + background_color = bg_color_hex + + fig, ax = plt.subplots(figsize=(width/100, height/100)) + fig.set_facecolor(background_color) + plt.xlim(0, width/100) + plt.ylim(0, height/100) + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(False) + + # Get polygon shape + if mode == "hexagons": + vertices = 6 + elif mode == "triangles": + vertices = 3 + + # Define the height and width of a hexagon + cell_width = (width/100) / columns + + cell_height = (width/height) * np.sqrt(3) * (height/100) / (2 * columns) + + for row in range(rows + 2): + for col in range(columns + 2): + x = col * cell_width + y = row * cell_height + + # Shift every other row + if row % 2 == 1: + x += cell_width / 2 + + # Create a hexagon as a polygon patch + hexagon = RegularPolygon((x, y), numVertices=vertices, radius=cell_width/1.732, edgecolor=line_color, linewidth=line_width, facecolor=face_color) + ax.add_patch(hexagon) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-polygons" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_StarburstLines: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "num_lines": ("INT", {"default": 6, "min": 1, "max": 500}), + "line_length": ("FLOAT", {"default": 5, "min": 0, "max": 100, "step": 0.1}), + "line_width": ("INT", {"default": 5, "min": 1, "max": 512}), + "line_color": (COLORS,), + "background_color": (COLORS,), + "center_x": ("INT", {"default": 0, "min": 0, "max": 1024}), + "center_y": ("INT", {"default": 0, "min": 0, "max": 1024}), + "rotation": ("FLOAT", {"default": 0, "min": 0, "max": 720}), + }, + "optional": { + "line_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, width, height, num_lines, line_length, line_width, line_color, background_color, + center_x, center_y, rotation=0, + line_color_hex='#000000', bg_color_hex='#000000'): + + if line_color == "custom": + line_color = line_color_hex + else: + line_color = line_color + + if background_color == "custom": + bgc = bg_color_hex + else: + bgc = background_color + + # Define the angle for the spokes in the starburst + angle = 360 / num_lines + + # Set up the plot + fig, ax = plt.subplots(figsize=(width/100,height/100)) + plt.xlim(-width/100, width/100) + plt.ylim(-height/100, height/100) + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(False) + + # Coordinates of the central point + center_x = center_x/100 + center_y = center_y/100 + + # Draw the starburst lines + for i in range(num_lines): + # Calculate the endpoint of each line + x_unrotated = center_x + line_length * np.cos(np.radians(i * angle)) + y_unrotated = center_y + line_length * np.sin(np.radians(i * angle)) + + # Apply rotation transformation + x = center_x + x_unrotated * np.cos(np.radians(rotation)) - y_unrotated * np.sin(np.radians(rotation)) + y = center_y + x_unrotated * np.sin(np.radians(rotation)) + y_unrotated * np.cos(np.radians(rotation)) + + # Plot the line + fig.patch.set_facecolor(bgc) + ax.plot([center_x, x], [center_y, y], color=line_color, linewidth=line_width) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-starburst-lines" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_StarburstColors: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "num_triangles": ("INT", {"default": 6, "min": 1, "max": 512}), + "color_1": (COLORS,), + "color_2": (COLORS,), + "center_x": ("INT", {"default": 0, "min": 0, "max": 512}), + "center_y": ("INT", {"default": 0, "min": 0, "max": 512}), + "rotation": ("FLOAT", {"default": 0, "min": 0, "max": 720}), + "bbox_factor": ("FLOAT", {"default": 2, "min": 0, "max": 2, "step": .01}), + }, + "optional": { + "color1_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "color2_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw(self, width, height, num_triangles, color_1, color_2, + center_x, center_y, bbox_factor, rotation=0, + color1_hex='#000000', color2_hex='#000000'): + + # Get RGB values for the text color + if color_1 == "custom": + color_1 = color1_hex + else: + color_1 = color_1 + + if color_2 == "custom": + color_2 = color2_hex + else: + color_2 = color_2 + + # Set up the plot + fig, ax = plt.subplots() + + x = width/100 + y = height/100 + + fig, ax = plt.subplots(figsize=(x,y)) + plt.xlim(-x/2, x/2) + plt.ylim(-y/2, y/2) + + plt.axis('off') + plt.tight_layout(pad=0, w_pad=0, h_pad=0) + plt.autoscale(False) + + # Set the size of the starburst bounding box in x and y dimensions + box_width = bbox_factor * x + box_height = bbox_factor * y + + # Initialize a color list for alternating colors + colors = [color_1, color_2] + + tri = num_triangles + + # Draw the starburst triangles with alternating colors and square pattern + for i in range(tri): + # Calculate the endpoints of the triangle with varying length + x1 = center_x/100 + y1 = center_y/100 + x2_unrotated = (box_width / 2) * np.cos(np.radians(i * 360 / tri)) + y2_unrotated = (box_height / 2) * np.sin(np.radians(i * 360 / tri)) + x3_unrotated = (box_width / 2) * np.cos(np.radians((i + 1) * 360 / tri)) + y3_unrotated = (box_height / 2) * np.sin(np.radians((i + 1) * 360 / tri)) + + #apply rotation transform + x2 = x2_unrotated * np.cos(np.radians(rotation)) - y2_unrotated * np.sin(np.radians(rotation)) + y2 = x2_unrotated * np.sin(np.radians(rotation)) + y2_unrotated * np.cos(np.radians(rotation)) + x3 = x3_unrotated * np.cos(np.radians(rotation)) - y3_unrotated * np.sin(np.radians(rotation)) + y3 = x3_unrotated * np.sin(np.radians(rotation)) + y3_unrotated * np.cos(np.radians(rotation)) + + # Plot the triangle with alternating colors + ax.fill([x1, x2, x3, x1], [y1, y2, y3, y1], color=colors[i % 2]) + + img_buf = io.BytesIO() + plt.savefig(img_buf, format='png') + img = Image.open(img_buf) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pattern-Nodes#cr-starburst-colors" + + return (pil2tensor(img), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Color Bars": CR_ColorBars, + "CR Style Bars": CR_StyleBars, + "CR Checker Pattern": CR_CheckerPattern, + "CR Polygons":CR_Polygons, + "CR Halftone Grid": CR_HalftoneGrid, + "CR Color Gradient": CR_ColorGradient, + "CR Radial Gradient": CR_RadialGradient, + "CR Overlay Text": CR_OverlayText, + "CR Starburst Lines": CR_StarburstLines, + "CR Starburst Colors": CR_StarburstColors, +} +''' diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/model_merge.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/model_merge.py new file mode 100644 index 0000000000000000000000000000000000000000..585bdbbdff43754dfe45f82c39fcd34729f98059 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/model_merge.py @@ -0,0 +1,176 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import comfy.sd +import comfy.model_management +import folder_paths +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +# Model Merge Nodes +#---------------------------------------------------------------------------------------------------------------------# +class CR_ModelMergeStack: + + @classmethod + def INPUT_TYPES(cls): + + checkpoint_files = ["None"] + folder_paths.get_filename_list("checkpoints") + + return {"required": {"switch_1": (["Off","On"],), + "ckpt_name1": (checkpoint_files,), + "model_ratio1": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), + "clip_ratio1": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), + # + "switch_2": (["Off","On"],), + "ckpt_name2": (checkpoint_files,), + "model_ratio2": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), + "clip_ratio2": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), + # + "switch_3": (["Off","On"],), + "ckpt_name3": (checkpoint_files,), + "model_ratio3": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), + "clip_ratio3": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}), + }, + "optional":{ + "model_stack": ("MODEL_STACK",), + }, + } + + RETURN_TYPES = ("MODEL_STACK", "STRING", ) + RETURN_NAMES = ("MODEL_STACK", "show_help", ) + FUNCTION = "list_checkpoints" + CATEGORY = icons.get("Comfyroll/Model Merge") + + def list_checkpoints(self, switch_1, ckpt_name1, model_ratio1, clip_ratio1, switch_2, ckpt_name2, model_ratio2, clip_ratio2, switch_3, ckpt_name3, model_ratio3, clip_ratio3, model_stack=None): + + # Initialise the list + model_list = list() + + if model_stack is not None: + model_list.extend([l for l in model_stack if l[0] != "None"]) + + if ckpt_name1 != "None" and switch_1 == "On": + model_list.extend([(ckpt_name1, model_ratio1, clip_ratio1)]), + + if ckpt_name2 != "None" and switch_2 == "On": + model_list.extend([(ckpt_name2, model_ratio2, clip_ratio2)]), + + if ckpt_name3 != "None" and switch_3 == "On": + model_list.extend([(ckpt_name3, model_ratio3, clip_ratio3)]), + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Model-Merge-Nodes#cr-model-stack" + + return (model_list, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ApplyModelMerge: + + @classmethod + def INPUT_TYPES(s): + + merge_methods = ["Recursive", "Weighted"] + + return {"required": {"model_stack": ("MODEL_STACK",), + "merge_method": (merge_methods,), + "normalise_ratios": (["Yes","No"],), + "weight_factor":("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("MODEL", "CLIP", "STRING", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "model_mix_info", "show_help", ) + FUNCTION = "merge" + CATEGORY = icons.get("Comfyroll/Model Merge") + + def merge(self, model_stack, merge_method, normalise_ratios, weight_factor): + + # Initialise + sum_clip_ratio = 0 + sum_model_ratio = 0 + model_mix_info = str("Merge Info:\n") + + # If no models + if len(model_stack) == 0: + print(f"[Warning] Apply Model Merge: No active models selected in the model merge stack") + return() + + # If only one model + if len(model_stack) == 1: + print(f"[Warning] Apply Model Merge: Only one active model found in the model merge stack. At least 2 models are normally needed for merging. The active model will be output.") + model_name, model_ratio, clip_ratio = model_stack[0] + ckpt_path = folder_paths.get_full_path("checkpoints", model_name) + return comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + # Calculate ratio sums for normalisation + for i, model_tuple in enumerate(model_stack): + model_name, model_ratio, clip_ratio = model_tuple + sum_model_ratio += model_ratio + sum_clip_ratio += clip_ratio + + # Do recursive merge loops + model_mix_info = model_mix_info + "Ratios are applied using the Recursive method\n\n" + + # Loop through the models and compile the merged model + for i, model_tuple in enumerate(model_stack): + model_name, model_ratio, clip_ratio = model_tuple + ckpt_path = folder_paths.get_full_path("checkpoints", model_name) + merge_model = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + print(f"Apply Model Merge: Model Name {model_name}, Model Ratio {model_ratio}, CLIP Ratio {clip_ratio}") + + if sum_model_ratio != 1 and normalise_ratios == "Yes": + print(f"[Warning] Apply Model Merge: Sum of model ratios != 1. Ratios will be normalised") + # Normalise the ratios + model_ratio = round(model_ratio / sum_model_ratio, 2) + clip_ratio = round(clip_ratio / sum_clip_ratio, 2) + + # Weighted merge method + if merge_method == "Weighted": + if i == 1: + # Reassign extra weight to the second model + model_ratio = 1 - weight_factor + (weight_factor * model_ratio) + clip_ratio = 1 - weight_factor + (weight_factor * clip_ratio) + + #Clone the first model + if i == 0: + model1 = merge_model[0].clone() + clip1 = merge_model[1].clone() + + model_mix_info = model_mix_info + "Base Model Name: " + model_name + else: + # Merge next model + # Comfy merge logic is flipped for stacked nodes. This is because the first model is effectively model1 and all subsequent models are model2. + model2 = merge_model[0].clone() + kp = model2.get_key_patches("diffusion_model.") + for k in kp: + #model1.add_patches({k: kp[k]}, 1.0 - model_ratio, model_ratio) #original logic + model1.add_patches({k: kp[k]}, model_ratio, 1.0 - model_ratio) #flipped logic + # Merge next clip + clip2 = merge_model[1].clone() + kp = clip2.get_key_patches() + for k in kp: + if k.endswith(".position_ids") or k.endswith(".logit_scale"): + continue + #clip1.add_patches({k: kp[k]}, 1.0 - clip_ratio, clip_ratio) #original logic + clip1.add_patches({k: kp[k]}, clip_ratio, 1.0 - clip_ratio) #flipped logic + + # Update model info + model_mix_info = model_mix_info + "\nModel Name: " + model_name + "\nModel Ratio: " + str(model_ratio) + "\nCLIP Ratio: " + str(clip_ratio) + "\n" + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Model-Merge-Nodes#cr-apply-model-merge" + + return (model1, clip1, model_mix_info, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Apply Model Merge": CR_ApplyModelMerge, + "CR Model Merge Stack": CR_ModelMergeStack, +} +''' + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/nodes.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..b47e5b838fdc6efa8b0ed610309257944f4dc0bd --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/nodes.py @@ -0,0 +1,623 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import torch +import numpy as np +import os +import sys +import io +import comfy.sd +from PIL import Image +from PIL.PngImagePlugin import PngInfo +import json +import folder_paths +import typing as tg +import random +from .graphics_functions import random_hex_color, random_rgb +from ..categories import icons + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) + +#---------------------------------------------------------------------------------------------------------------------# +# Aspect Ratio Nodes +#---------------------------------------------------------------------------------------------------------------------# +class CR_AspectRatioSD15: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + aspect_ratios = ["custom", + "1:1 square 512x512", + "1:1 square 1024x1024", + "2:3 portrait 512x768", + "3:4 portrait 512x682", + "3:2 landscape 768x512", + "4:3 landscape 682x512", + "16:9 cinema 910x512", + "1.85:1 cinema 952x512", + "2:1 cinema 1024x512", + "2.39:1 anamorphic 1224x512"] + + return { + "required": { + "width": ("INT", {"default": 512, "min": 64, "max": 8192}), + "height": ("INT", {"default": 512, "min": 64, "max": 8192}), + "aspect_ratio": (aspect_ratios,), + "swap_dimensions": (["Off", "On"],), + "upscale_factor": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 100.0, "step":0.1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}) + } + } + RETURN_TYPES = ("INT", "INT", "FLOAT", "INT", "LATENT", "STRING", ) + RETURN_NAMES = ("width", "height", "upscale_factor", "batch_size", "empty_latent", "show_help", ) + FUNCTION = "Aspect_Ratio" + CATEGORY = icons.get("Comfyroll/Aspect Ratio") + + def Aspect_Ratio(self, width, height, aspect_ratio, swap_dimensions, upscale_factor, batch_size): + if aspect_ratio == "2:3 portrait 512x768": + width, height = 512, 768 + elif aspect_ratio == "3:2 landscape 768x512": + width, height = 768, 512 + elif aspect_ratio == "1:1 square 512x512": + width, height = 512, 512 + elif aspect_ratio == "1:1 square 1024x1024": + width, height = 1024, 1024 + elif aspect_ratio == "16:9 cinema 910x512": + width, height = 910, 512 + elif aspect_ratio == "3:4 portrait 512x682": + width, height = 512, 682 + elif aspect_ratio == "4:3 landscape 682x512": + width, height = 682, 512 + elif aspect_ratio == "1.85:1 cinema 952x512": + width, height = 952, 512 + elif aspect_ratio == "2:1 cinema 1024x512": + width, height = 1024, 512 + elif aspect_ratio == "2.39:1 anamorphic 1224x512": + width, height = 1224, 512 + + if swap_dimensions == "On": + width, height = height, width + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Aspect-Ratio-Nodes#cr-sd15-aspect-ratio" + + return(width, height, upscale_factor, batch_size, {"samples":latent}, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SDXLAspectRatio: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + aspect_ratios = ["custom", + "1:1 square 1024x1024", + "3:4 portrait 896x1152", + "5:8 portrait 832x1216", + "9:16 portrait 768x1344", + "9:21 portrait 640x1536", + "4:3 landscape 1152x896", + "3:2 landscape 1216x832", + "16:9 landscape 1344x768", + "21:9 landscape 1536x640"] + + return { + "required": { + "width": ("INT", {"default": 1024, "min": 64, "max": 8192}), + "height": ("INT", {"default": 1024, "min": 64, "max": 8192}), + "aspect_ratio": (aspect_ratios,), + "swap_dimensions": (["Off", "On"],), + "upscale_factor": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 100.0, "step":0.1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}) + } + } + RETURN_TYPES = ("INT", "INT", "FLOAT", "INT", "LATENT", "STRING", ) + RETURN_NAMES = ("width", "height", "upscale_factor", "batch_size", "empty_latent", "show_help", ) + FUNCTION = "Aspect_Ratio" + CATEGORY = icons.get("Comfyroll/Aspect Ratio") + + def Aspect_Ratio(self, width, height, aspect_ratio, swap_dimensions, upscale_factor, batch_size): + if aspect_ratio == "1:1 square 1024x1024": + width, height = 1024, 1024 + elif aspect_ratio == "3:4 portrait 896x1152": + width, height = 896, 1152 + elif aspect_ratio == "5:8 portrait 832x1216": + width, height = 832, 1216 + elif aspect_ratio == "9:16 portrait 768x1344": + width, height = 768, 1344 + elif aspect_ratio == "9:21 portrait 640x1536": + width, height = 640, 1536 + elif aspect_ratio == "4:3 landscape 1152x896": + width, height = 1152, 896 + elif aspect_ratio == "3:2 landscape 1216x832": + width, height = 1216, 832 + elif aspect_ratio == "16:9 landscape 1344x768": + width, height = 1344, 768 + elif aspect_ratio == "21:9 landscape 1536x640": + width, height = 1536, 640 + + if swap_dimensions == "On": + width, height = height, width + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Aspect-Ratio-Nodes#cr-sdxl-aspect-ratio" + + return(width, height, upscale_factor, batch_size, {"samples":latent}, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_AspectRatio: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + aspect_ratios = ["custom", + "SD1.5 - 1:1 square 512x512", + "SD1.5 - 2:3 portrait 512x768", + "SD1.5 - 3:4 portrait 512x682", + "SD1.5 - 3:2 landscape 768x512", + "SD1.5 - 4:3 landscape 682x512", + "SD1.5 - 16:9 cinema 910x512", + "SD1.5 - 1.85:1 cinema 952x512", + "SD1.5 - 2:1 cinema 1024x512", + "SDXL - 1:1 square 1024x1024", + "SDXL - 3:4 portrait 896x1152", + "SDXL - 5:8 portrait 832x1216", + "SDXL - 9:16 portrait 768x1344", + "SDXL - 9:21 portrait 640x1536", + "SDXL - 4:3 landscape 1152x896", + "SDXL - 3:2 landscape 1216x832", + "SDXL - 16:9 landscape 1344x768", + "SDXL - 21:9 landscape 1536x640"] + + return { + "required": { + "width": ("INT", {"default": 1024, "min": 64, "max": 8192}), + "height": ("INT", {"default": 1024, "min": 64, "max": 8192}), + "aspect_ratio": (aspect_ratios,), + "swap_dimensions": (["Off", "On"],), + "upscale_factor": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 100.0, "step":0.1}), + "prescale_factor": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 100.0, "step":0.1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}) + } + } + RETURN_TYPES = ("INT", "INT", "FLOAT", "FLOAT", "INT", "LATENT", "STRING", ) + RETURN_NAMES = ("width", "height", "upscale_factor", "prescale_factor", "batch_size", "empty_latent", "show_help", ) + FUNCTION = "Aspect_Ratio" + CATEGORY = icons.get("Comfyroll/Aspect Ratio") + + def Aspect_Ratio(self, width, height, aspect_ratio, swap_dimensions, upscale_factor, prescale_factor, batch_size): + + # SD1.5 + if aspect_ratio == "SD1.5 - 1:1 square 512x512": + width, height = 512, 512 + elif aspect_ratio == "SD1.5 - 2:3 portrait 512x768": + width, height = 512, 768 + elif aspect_ratio == "SD1.5 - 16:9 cinema 910x512": + width, height = 910, 512 + elif aspect_ratio == "SD1.5 - 3:4 portrait 512x682": + width, height = 512, 682 + elif aspect_ratio == "SD1.5 - 3:2 landscape 768x512": + width, height = 768, 512 + elif aspect_ratio == "SD1.5 - 4:3 landscape 682x512": + width, height = 682, 512 + elif aspect_ratio == "SD1.5 - 1.85:1 cinema 952x512": + width, height = 952, 512 + elif aspect_ratio == "SD1.5 - 2:1 cinema 1024x512": + width, height = 1024, 512 + elif aspect_ratio == "SD1.5 - 2.39:1 anamorphic 1224x512": + width, height = 1224, 512 + # SDXL + if aspect_ratio == "SDXL - 1:1 square 1024x1024": + width, height = 1024, 1024 + elif aspect_ratio == "SDXL - 3:4 portrait 896x1152": + width, height = 896, 1152 + elif aspect_ratio == "SDXL - 5:8 portrait 832x1216": + width, height = 832, 1216 + elif aspect_ratio == "SDXL - 9:16 portrait 768x1344": + width, height = 768, 1344 + elif aspect_ratio == "SDXL - 9:21 portrait 640x1536": + width, height = 640, 1536 + elif aspect_ratio == "SDXL - 4:3 landscape 1152x896": + width, height = 1152, 896 + elif aspect_ratio == "SDXL - 3:2 landscape 1216x832": + width, height = 1216, 832 + elif aspect_ratio == "SDXL - 16:9 landscape 1344x768": + width, height = 1344, 768 + elif aspect_ratio == "SDXL - 21:9 landscape 1536x640": + width, height = 1536, 640 + + if swap_dimensions == "On": + width, height = height, width + + width = int(width*prescale_factor) + height = int(height*prescale_factor) + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Aspect-Ratio-Nodes#cr-aspect-ratio" + + return(width, height, upscale_factor, prescale_factor, batch_size, {"samples":latent}, show_help, ) +#---------------------------------------------------------------------------------------------------------------------# +# Other Nodes +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImageOutput: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "output_type": (["Preview", "Save"],), + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + "optional": { + "trigger": ("BOOLEAN", {"default": False},),} + } + + RETURN_TYPES = ("BOOLEAN", ) + RETURN_NAMES = ("trigger", ) + FUNCTION = "save_images" + OUTPUT_NODE = True + CATEGORY = icons.get("Comfyroll/Other") + + def save_images(self, images, filename_prefix="ComfyUI", trigger = False, output_type = "Preview", prompt=None, extra_pnginfo=None): + def map_filename(filename): + prefix_len = len(os.path.basename(filename_prefix)) + prefix = filename[:prefix_len + 1] + try: + digits = int(filename[prefix_len + 1:].split('_')[0]) + except: + digits = 0 + return (digits, prefix) + + def compute_vars(input): + input = input.replace("%width%", str(images[0].shape[1])) + input = input.replace("%height%", str(images[0].shape[0])) + return input + + if output_type == "Save": + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + elif output_type == "Preview": + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + + filename_prefix = compute_vars(filename_prefix) + + subfolder = os.path.dirname(os.path.normpath(filename_prefix)) + filename = os.path.basename(os.path.normpath(filename_prefix)) + + full_output_folder = os.path.join(self.output_dir, subfolder) + + if os.path.commonpath((self.output_dir, os.path.abspath(full_output_folder))) != self.output_dir: + return {} + + try: + counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1 + except ValueError: + counter = 1 + except FileNotFoundError: + os.makedirs(full_output_folder, exist_ok=True) + counter = 1 + + results = list() + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + file = f"{filename}_{counter:05}_.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-image-output" + + return { "ui": { "images": results }, "result": (trigger,) } + + +#---------------------------------------------------------------------------------------------------------------------# +class CR_IntegerMultipleOf: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "integer": ("INT", {"default": 1, "min": -18446744073709551615, "max": 18446744073709551615}), + "multiple": ("FLOAT", {"default": 8, "min": 1, "max": 18446744073709551615}), + } + } + + RETURN_TYPES =("INT", "STRING", ) + RETURN_NAMES =("INT", "show_help", ) + FUNCTION = "int_multiple_of" + CATEGORY = icons.get("Comfyroll/Other") + + def int_multiple_of(self, integer, multiple=8): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-integer-multiple" + if multiple == 0: + return (int(integer), show_help, ) + integer = integer * multiple + return (int(integer), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_Seed: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})}} + + RETURN_TYPES = ("INT", "STRING", ) + RETURN_NAMES = ("seed", "show_help", ) + FUNCTION = "seedint" + OUTPUT_NODE = True + CATEGORY = icons.get("Comfyroll/Other") + + @staticmethod + def seedint(seed): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-seed" + return (seed, show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_LatentBatchSize: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": {"latent": ("LATENT", ), + "batch_size": ("INT", {"default": 2, "min": 1, "max": 16, "step": 1}), + } + } + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "batchsize" + CATEGORY = icons.get("Comfyroll/Other") + + def batchsize(self, latent: tg.Sequence[tg.Mapping[tg.Text, torch.Tensor]], batch_size: int): + samples = latent['samples'] + shape = samples.shape + + sample_list = [samples] + [ + torch.clone(samples) for _ in range(batch_size - 1) + ] + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-latent-batch-size" + + return ({ + 'samples': torch.cat(sample_list), + }, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_PromptText: + @classmethod + def INPUT_TYPES(s): + return {"required": {"prompt": ("STRING", {"default": "prompt", "multiline": True})}} + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("prompt", "show_help", ) + FUNCTION = "get_value" + CATEGORY = icons.get("Comfyroll/Other") + + def get_value(self, prompt): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-prompt-text" + return (prompt, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SplitString: + + @classmethod + def INPUT_TYPES(s): + + return {"required": {"text": ("STRING", {"multiline": False, "default": "text"}), + "delimiter": ("STRING", {"multiline": False, "default": ","}), + } + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", ) + RETURN_NAMES = ("string_1", "string_2", "string_3", "string_4", "show_help", ) + FUNCTION = "split" + CATEGORY = icons.get("Comfyroll/Other") + + def split(self, text, delimiter): + + # Split the text string + parts = text.split(delimiter) + strings = [part.strip() for part in parts[:4]] + string_1, string_2, string_3, string_4 = strings + [""] * (4 - len(strings)) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-split-string" + + return (string_1, string_2, string_3, string_4, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_Value: + + @classmethod + def INPUT_TYPES(s): + return {"required": {"value": ("FLOAT", {"default": 1.0,},)}} + + RETURN_TYPES = ("FLOAT", "INT", "STRING", ) + RETURN_NAMES = ("FLOAT", "INT", "show_help", ) + CATEGORY = icons.get("Comfyroll/Other") + FUNCTION = "get_value" + + def get_value(self, value): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-value" + return (float(value), int(value), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ConditioningMixer: + + @classmethod + def INPUT_TYPES(s): + + mix_methods = ["Combine", "Average", "Concatenate"] + + return {"required": + {"conditioning_1": ("CONDITIONING", ), + "conditioning_2": ("CONDITIONING", ), + "mix_method": (mix_methods, ), + "average_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("CONDITIONING", "STRING", ) + RETURN_NAMES = ("CONDITIONING", "show_help", ) + FUNCTION = "conditioning" + CATEGORY = icons.get("Comfyroll/Other") + + def conditioning(self, mix_method, conditioning_1, conditioning_2, average_strength): + + conditioning_from = conditioning_1 + conditioning_to = conditioning_2 + conditioning_to_strength = average_strength + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-conditioning-mixer" + + if mix_method == "Combine": + return (conditioning_1 + conditioning_2, show_help, ) + + if mix_method == "Average": + + out = [] + + if len(conditioning_from) > 1: + print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") + + cond_from = conditioning_from[0][0] + pooled_output_from = conditioning_from[0][1].get("pooled_output", None) + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from) + t0 = cond_from[:,:t1.shape[1]] + if t0.shape[1] < t1.shape[1]: + t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1) + + tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength)) + t_to = conditioning_to[i][1].copy() + if pooled_output_from is not None and pooled_output_to is not None: + t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength)) + elif pooled_output_from is not None: + t_to["pooled_output"] = pooled_output_from + + n = [tw, t_to] + out.append(n) + return (out, show_help, ) + + if mix_method == "Concatenate": + + out = [] + + if len(conditioning_from) > 1: + print("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") + + cond_from = conditioning_from[0][0] + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + tw = torch.cat((t1, cond_from),1) + n = [tw, conditioning_to[i][1].copy()] + out.append(n) + return (out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SelectModel: + + @classmethod + def INPUT_TYPES(cls): + + checkpoint_files = ["None"] + folder_paths.get_filename_list("checkpoints") + + return {"required": {"ckpt_name1": (checkpoint_files,), + "ckpt_name2": (checkpoint_files,), + "ckpt_name3": (checkpoint_files,), + "ckpt_name4": (checkpoint_files,), + "ckpt_name5": (checkpoint_files,), + "select_model": ("INT", {"default": 1, "min": 1, "max": 5}), + } + } + + + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "STRING", "STRING", ) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "ckpt_name", "show_help", ) + FUNCTION = "select_model" + CATEGORY = icons.get("Comfyroll/Other") + + def select_model(self, ckpt_name1, ckpt_name2, ckpt_name3, ckpt_name4, ckpt_name5, select_model): + + # Initialise the list + model_list = list() + + if select_model == 1: + model_name = ckpt_name1 + elif select_model == 2: + model_name = ckpt_name2 + elif select_model == 3: + model_name = ckpt_name3 + elif select_model == 4: + model_name = ckpt_name4 + elif select_model == 5: + model_name = ckpt_name5 + + if model_name == "None": + print(f"CR Select Model: No model selected") + return() + + ckpt_path = folder_paths.get_full_path("checkpoints", model_name) + model, clip, vae, clipvision = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, + embedding_directory=folder_paths.get_folder_paths("embeddings")) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-select-model" + + return (model, clip, vae, model_name, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + ### Aspect ratio + "CR SD1.5 Aspect Ratio": CR_AspectRatioSD15, + "CR SDXL Aspect Ratio":CR_SDXLAspectRatio, + "CR Aspect Ratio": CR_AspectRatio, + ### Other + "CR Image Output": CR_ImageOutput, + "CR Integer Multiple": CR_IntegerMultipleOf, + "CR Latent Batch Size":CR_LatentBatchSize + "CR Seed":CR_Seed, + "CR Prompt Text":CR_PromptText, + "CR Split String":CR_SplitString, + "CR Value": CR_Value, + "CR Conditioning Mixer":CR_ConditioningMixer, + "CR Select Model": CR_SelectModel, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/nodes_random.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/nodes_random.py new file mode 100644 index 0000000000000000000000000000000000000000..630818ab7be129b69f36a203ebec700e8f3ff65c --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/nodes_random.py @@ -0,0 +1,160 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import random +import string +from .graphics_functions import random_hex_color, random_rgb +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +# Random values +#---------------------------------------------------------------------------------------------------------------------# +class CR_RandomHexColor: + + @classmethod + def INPUT_TYPES(cls): + + return {"required": {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),}} + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", ) + RETURN_NAMES = ("hex_color1", "hex_color2", "hex_color3", "hex_color4", "show_help", ) + FUNCTION = "get_colors" + CATEGORY = icons.get("Comfyroll/Utils/Random") + + def get_colors(self, seed): + + # Set the seed + random.seed(seed) + + hex_color1 = random_hex_color() + hex_color2 = random_hex_color() + hex_color3 = random_hex_color() + hex_color4 = random_hex_color() + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-random-hex-color" + + return (hex_color1, hex_color2, hex_color3, hex_color4, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_RandomRGB: + + @classmethod + def INPUT_TYPES(cls): + + return {"required": {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),}} + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", ) + RETURN_NAMES = ("rgb_1", "rgb_2", "rgb_3", "rgb_4", "show_help", ) + FUNCTION = "get_colors" + CATEGORY = icons.get("Comfyroll/Utils/Random") + + def get_colors(self, seed): + + # Set the seed + random.seed(seed) + + rgb_1 = random_rgb() + rgb_2 = random_rgb() + rgb_3 = random_rgb() + rgb_4 = random_rgb() + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-random-rgb" + + return (rgb_1, rgb_2, rgb_3, rgb_4, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_RandomMultilineValues: + + @classmethod + def INPUT_TYPES(cls): + + types = ["binary", "decimal", "hexadecimal", "alphabetic", "alphanumeric"] + + return {"required": {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "value_type": (types,), + "rows": ("INT", {"default": 5, "min": 1, "max": 2048}), + "string_length": ("INT", {"default": 5, "min": 1, "max": 2048}), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("multiline_text", "show_help", ) + FUNCTION = "generate" + CATEGORY = icons.get("Comfyroll/Utils/Random") + + def generate(self, value_type, rows, string_length, seed): + + # Set the seed + random.seed(seed) + + if value_type == "binary": + choice_str = '01' + elif value_type == "decimal": + choice_str = '0123456789' + elif value_type == "hexadecimal": + choice_str = '0123456789abcdef' + elif value_type == "alphabetic": + choice_str = string.ascii_letters + elif value_type == "alphanumeric": + choice_str = string.ascii_letters + string.digits + + multiline_text = '\n'.join([''.join(random.choice(choice_str) for _ in range(string_length)) for _ in range(rows)]) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-random-multiline-values" + + return (multiline_text, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# + +class CR_RandomRGBGradient: + + @classmethod + def INPUT_TYPES(cls): + + return {"required": {"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "rows": ("INT", {"default": 5, "min": 1, "max": 2048}), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("multiline_text", "show_help", ) + FUNCTION = "generate" + CATEGORY = icons.get("Comfyroll/Utils/Random") + + def generate(self, rows, seed): + + # Set the seed + random.seed(seed) + + temp = 0 + multiline_text = "" + + for i in range(1, rows + 1): + print(temp) + if temp <= 99 - rows + i: + upper_bound = min(99, temp + (99 - temp) // (rows - i + 1)) + current_value = random.randint(temp, upper_bound) + multiline_text += f'{current_value}:{random.randint(0, 255)},{random.randint(0, 255)},{random.randint(0, 255)}\n' + print(multiline_text) + temp = current_value + 1 + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Other-Nodes#cr-random-RGB-gradient" + + return (multiline_text, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + # Random + "CR Random Hex Color": CR_RandomHexColor, + "CR Random RGB": CR_RandomRGB, + "CR Random Multiline Values": CR_RandomMultilineValues, + "CR Random RGB Gradient": CR_RandomRGBGradient, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_filter.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_filter.py new file mode 100644 index 0000000000000000000000000000000000000000..b428640e8c818537dd3e64307943180c7ab5fd86 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_filter.py @@ -0,0 +1,299 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# +import torch +import numpy as np +from PIL import Image, ImageDraw, ImageStat, ImageFilter +from .graphics_functions import get_color_values +from ..config import color_mapping, COLORS +from ..categories import icons + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +# Based on Color Tint node by hnmr293 +class CR_ColorTint: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + #tints = COLORS.append('sepia') + ''' + tints = ["custom", "white", "black", "sepia", "red", "green", "blue", + "cyan", "magenta", "yellow", "purple", "orange", "warm", + "cool", "lime", "navy", "vintage", "rose", "teal", + "maroon", "peach", "lavender", "olive"] + ''' + + return { + "required": {"image": ("IMAGE",), + "strength": ("FLOAT", {"default": 1.0,"min": 0.1,"max": 1.0,"step": 0.1}), + "mode": (COLORS,), + }, + "optional": {"tint_color_hex": ("STRING", {"multiline": False, "default": "#000000"}),} + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "color_tint" + CATEGORY = icons.get("Comfyroll/Graphics/Filter") + + def color_tint(self, image: torch.Tensor, strength, mode: str="sepia", tint_color_hex='#000000'): + + if strength == 0: + return (image,) + + # Get RGB values for the tint color + tint_color = get_color_values(mode, tint_color_hex, color_mapping) + color_rgb = tuple([value / 255 for value in tint_color]) + + sepia_weights = torch.tensor([0.2989, 0.5870, 0.1140]).view(1, 1, 1, 3).to(image.device) + + mode_filters = { + "custom": torch.tensor([color_rgb[0], color_rgb[1], color_rgb[2]]), + "white": torch.tensor([1, 1, 1]), + "black": torch.tensor([0, 0, 0]), + "sepia": torch.tensor([1.0, 0.8, 0.6]), + "red": torch.tensor([1.0, 0.6, 0.6]), + "green": torch.tensor([0.6, 1.0, 0.6]), + "blue": torch.tensor([0.6, 0.8, 1.0]), + "cyan": torch.tensor([0.6, 1.0, 1.0]), + "magenta": torch.tensor([1.0, 0.6, 1.0]), + "yellow": torch.tensor([1.0, 1.0, 0.6]), + "purple": torch.tensor([0.8, 0.6, 1.0]), + "orange": torch.tensor([1.0, 0.7, 0.3]), + "warm": torch.tensor([1.0, 0.9, 0.7]), + "cool": torch.tensor([0.7, 0.9, 1.0]), + "lime": torch.tensor([0.7, 1.0, 0.3]), + "navy": torch.tensor([0.3, 0.4, 0.7]), + "vintage": torch.tensor([0.9, 0.85, 0.7]), + "rose": torch.tensor([1.0, 0.8, 0.9]), + "teal": torch.tensor([0.3, 0.8, 0.8]), + "maroon": torch.tensor([0.7, 0.3, 0.5]), + "peach": torch.tensor([1.0, 0.8, 0.6]), + "lavender": torch.tensor([0.8, 0.6, 1.0]), + "olive": torch.tensor([0.6, 0.7, 0.4]), + } + + scale_filter = mode_filters[mode].view(1, 1, 1, 3).to(image.device) + + grayscale = torch.sum(image * sepia_weights, dim=-1, keepdim=True) + tinted = grayscale * scale_filter + + result = tinted * strength + image * (1 - strength) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Filter-Nodes#cr-color-tint" + + return (result, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_HalftoneFilter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + + shapes = ["ellipse", "rectangle"] + rez = ["normal", "hi-res (2x output size)"] + + return { + "required": { + "image": ("IMAGE",), + "dot_size": ("INT", {"default": 5, "min": 1, "max": 30, "step": 1}), + "dot_shape": (shapes, {"default": "ellipse"}), + #"scale": ("INT", {"default": 1, "min": 1, "max": 8, "step": 1}), + "resolution": (rez, {"default": "normal"}), + "angle_c": ("INT", {"default": 75, "min": 0, "max": 360, "step": 1}), + "angle_m": ("INT", {"default": 45, "min": 0, "max": 360, "step": 1}), + "angle_y": ("INT", {"default": 15, "min": 0, "max": 360, "step": 1}), + "angle_k": ("INT", {"default": 0, "min": 0, "max": 360, "step": 1}), + "greyscale": ("BOOLEAN", {"default": True}), + "antialias": ("BOOLEAN", {"default": True}), + "antialias_scale": ("INT", {"default": 2, "min": 1, "max": 4, "step": 1}), + "border_blending": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "halftone_effect" + CATEGORY = icons.get("Comfyroll/Graphics/Filter") + + def tensor_to_pil(self, tensor): + if tensor.ndim == 4 and tensor.shape[0] == 1: # Check for batch dimension + tensor = tensor.squeeze(0) # Remove batch dimension + if tensor.dtype == torch.float32: # Check for float tensors + tensor = tensor.mul(255).byte() # Convert to range [0, 255] and change to byte type + elif tensor.dtype != torch.uint8: # If not float and not uint8, conversion is needed + tensor = tensor.byte() # Convert to byte type + + numpy_image = tensor.cpu().numpy() + + # Determine the correct mode based on the number of channels + if tensor.ndim == 3: + if tensor.shape[2] == 1: + mode = 'L' # Grayscale + elif tensor.shape[2] == 3: + mode = 'RGB' # RGB + elif tensor.shape[2] == 4: + mode = 'RGBA' # RGBA + else: + raise ValueError(f"Unsupported channel number: {tensor.shape[2]}") + else: + raise ValueError(f"Unexpected tensor shape: {tensor.shape}") + + pil_image = Image.fromarray(numpy_image, mode) + return pil_image + + def pil_to_tensor(self, pil_image): + numpy_image = np.array(pil_image) + tensor = torch.from_numpy(numpy_image).float().div(255) # Convert to range [0, 1] + tensor = tensor.unsqueeze(0) # Add batch dimension + return tensor + + def halftone_effect(self, image, dot_size, dot_shape, resolution, angle_c, angle_m, angle_y, angle_k, greyscale, antialias, border_blending, antialias_scale): + + sample = dot_size + shape = dot_shape + + # Map resolution to scale + resolution_to_scale = { + "normal": 1, + "hi-res (2x output size)": 2, + } + scale = resolution_to_scale.get(resolution, 1) # Default to 1 if resolution is not recognized + + # If the input is a PyTorch tensor, convert to PIL Image + if isinstance(image, torch.Tensor): + image = self.tensor_to_pil(image) + + # Ensure the image is a PIL Image + if not isinstance(image, Image.Image): + raise TypeError("The provided image is neither a PIL Image nor a PyTorch tensor.") + + pil_image = image # Now we are sure pil_image is defined + + # Convert to greyscale or CMYK + if greyscale: + pil_image = pil_image.convert("L") + channel_images = [pil_image] + angles = [angle_k] + else: + pil_image = pil_image.convert("CMYK") + channel_images = list(pil_image.split()) + angles = [angle_c, angle_m, angle_y, angle_k] + + # Apply the halftone effect using PIL + halftone_images = self._halftone_pil(pil_image, channel_images, sample, scale, angles, antialias, border_blending, antialias_scale, shape) + + # Merge channels and convert to RGB + if greyscale: + new_image = halftone_images[0].convert("RGB") # Convert the greyscale image to RGB + else: + new_image = Image.merge("CMYK", halftone_images).convert("RGB") + + result_tensor = self.pil_to_tensor(new_image) + + # Debug print to check the final tensor shape + print("Final tensor shape:", result_tensor.shape) + + return (result_tensor, show_help, ) + + def _halftone_pil(self, im, cmyk, sample, scale, angles, antialias, border_blending, antialias_scale, shape): + # If we're antialiasing, we'll multiply the size of the image by this + # scale while drawing, and then scale it back down again afterwards. + antialias_res = antialias_scale if antialias else 1 + scale = scale * antialias_res + + dots = [] + + for channel_index, (channel, angle) in enumerate(zip(cmyk, angles)): + channel = channel.rotate(angle, expand=1) + size = channel.size[0] * scale, channel.size[1] * scale + half_tone = Image.new("L", size) + draw = ImageDraw.Draw(half_tone) + + # Cycle through one sample point at a time, drawing a circle for + # each one: + for x in range(0, channel.size[0], sample): + for y in range(0, channel.size[1], sample): + + # Adjust the sampling near the borders for non-square angles + if border_blending and angle % 90 != 0 and (x < sample or y < sample or x > channel.size[0] - sample or y > channel.size[1] - sample): + # Get a weighted average of the neighboring pixels + neighboring_pixels = channel.crop((max(x - 1, 0), max(y - 1, 0), min(x + 2, channel.size[0]), min(y + 2, channel.size[1]))) + pixels = list(neighboring_pixels.getdata()) + weights = [0.5 if i in [0, len(pixels)-1] else 1 for i in range(len(pixels))] + weighted_mean = sum(p * w for p, w in zip(pixels, weights)) / sum(weights) + mean = weighted_mean + else: + # Area we sample to get the level: + box = channel.crop((x, y, x + sample, y + sample)) + # The average level for that box (0-255): + mean = ImageStat.Stat(box).mean[0] + + # The diameter or side length of the shape to draw based on the mean (0-1): + size = (mean / 255) ** 0.5 + + # Size of the box we'll draw the circle in: + box_size = sample * scale + + # Diameter or side length of shape we'll draw: + draw_size = size * box_size + + # Position of top-left of box we'll draw the circle in: + box_x, box_y = (x * scale), (y * scale) + + # Positioned of top-left and bottom-right of circle: + x1 = box_x + ((box_size - draw_size) / 2) + y1 = box_y + ((box_size - draw_size) / 2) + x2 = x1 + draw_size + y2 = y1 + draw_size + + # Draw the shape based on the variable passed + draw_method = getattr(draw, shape, None) + if draw_method: + draw_method([(x1, y1), (x2, y2)], fill=255) + + half_tone = half_tone.rotate(-angle, expand=1) + width_half, height_half = half_tone.size + + # Top-left and bottom-right of the image to crop to: + xx1 = (width_half - im.size[0] * scale) / 2 + yy1 = (height_half - im.size[1] * scale) / 2 + xx2 = xx1 + im.size[0] * scale + yy2 = yy1 + im.size[1] * scale + + half_tone = half_tone.crop((xx1, yy1, xx2, yy2)) + + if antialias: + # Scale it back down to antialias the image. + w = int((xx2 - xx1) / antialias_scale) + h = int((yy2 - yy1) / antialias_scale) + half_tone = half_tone.resize((w, h), resample=Image.LANCZOS) + + dots.append(half_tone) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Filter-Nodes#cr-halftone-filter" + + return (dots, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Halftone Filter": "CR HalftoneFilter", + "CR Color Tint": CR_ColorTint, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_layout.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_layout.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb86564b43b10d43fa914eb8887f74d343201b6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_layout.py @@ -0,0 +1,606 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import numpy as np +import torch +import os +from PIL import Image, ImageDraw, ImageOps, ImageFont +from ..categories import icons +from ..config import color_mapping, COLORS +from .graphics_functions import (hex_to_rgb, + get_color_values, + text_panel, + combine_images, + apply_outline_and_border, + get_font_size, + draw_text_on_image) + +#try: +# import Markdown +#except ImportError: +# import pip +# pip.main(['install', 'Markdown']) + +#---------------------------------------------------------------------------------------------------------------------# + +ALIGN_OPTIONS = ["top", "center", "bottom"] +ROTATE_OPTIONS = ["text center", "image center"] +JUSTIFY_OPTIONS = ["left", "center", "right"] +PERSPECTIVE_OPTIONS = ["top", "bottom", "left", "right"] + +#---------------------------------------------------------------------------------------------------------------------# + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_PageLayout: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + layout_options = ["header", "footer", "header and footer", "no header or footer"] + + return {"required": { + "layout_options": (layout_options,), + "image_panel": ("IMAGE",), + "header_height": ("INT", {"default": 0, "min": 0, "max": 1024}), + "header_text": ("STRING", {"multiline": True, "default": "text"}), + "header_align": (JUSTIFY_OPTIONS, ), + "footer_height": ("INT", {"default": 0, "min": 0, "max": 1024}), + "footer_text": ("STRING", {"multiline": True, "default": "text"}), + "footer_align": (JUSTIFY_OPTIONS, ), + "font_name": (file_list,), + "font_color": (COLORS,), + "header_font_size": ("INT", {"default": 150, "min": 0, "max": 1024}), + "footer_font_size": ("INT", {"default": 50, "min": 0, "max": 1024}), + "border_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "border_color": (COLORS,), + "background_color": (COLORS,), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "border_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "layout" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def layout(self, layout_options, image_panel, + border_thickness, border_color, background_color, + header_height, header_text, header_align, + footer_height, footer_text, footer_align, + font_name, font_color, + header_font_size, footer_font_size, + font_color_hex='#000000', border_color_hex='#000000', bg_color_hex='#000000'): + + # Get RGB values for the text and background colors + font_color = get_color_values(font_color, font_color_hex, color_mapping) + border_color = get_color_values(border_color, border_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + main_panel = tensor2pil(image_panel) + + # Get image width and height + image_width = main_panel.width + image_height = main_panel.height + + # Set defaults + margins = 50 + line_spacing = 0 + position_x = 0 + position_y = 0 + align = "center" + rotation_angle = 0 + rotation_options = "image center" + font_outline_thickness = 0 + font_outline_color = "black" + + images = [] + + ### Create text panels and add to images array + if layout_options == "header" or layout_options == "header and footer": + header_panel = text_panel(image_width, header_height, header_text, + font_name, header_font_size, font_color, + font_outline_thickness, font_outline_color, + bg_color, + margins, line_spacing, + position_x, position_y, + align, header_align, + rotation_angle, rotation_options) + images.append(header_panel) + + images.append(main_panel) + + if layout_options == "footer" or layout_options == "header and footer": + footer_panel = text_panel(image_width, footer_height, footer_text, + font_name, footer_font_size, font_color, + font_outline_thickness, font_outline_color, + bg_color, + margins, line_spacing, + position_x, position_y, + align, footer_align, + rotation_angle, rotation_options) + images.append(footer_panel) + + combined_image = combine_images(images, 'vertical') + + # Add a border to the combined image + if border_thickness > 0: + combined_image = ImageOps.expand(combined_image, border_thickness, border_color) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-page-layout" + + return (pil2tensor(combined_image), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleTitles: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + layout_options = ["header", "footer", "header and footer", "no header or footer"] + + return {"required": { + "image": ("IMAGE",), + "header_text": ("STRING", {"multiline": True, "default": "text"}), + "header_height": ("INT", {"default": 0, "min": 0, "max": 1024}), + "header_font_size": ("INT", {"default": 150, "min": 0, "max": 1024}), + "header_align": (JUSTIFY_OPTIONS, ), + "footer_text": ("STRING", {"multiline": True, "default": "text"}), + "footer_height": ("INT", {"default": 0, "min": 0, "max": 1024}), + "footer_font_size": ("INT", {"default": 50, "min": 0, "max": 1024}), + "footer_align": (JUSTIFY_OPTIONS, ), + "font_name": (file_list,), + "font_color": (COLORS,), + "background_color": (COLORS,), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "layout" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def layout(self, image, + header_height, header_text, header_align, header_font_size, + footer_height, footer_text, footer_align, footer_font_size, + font_name, font_color, background_color, + font_color_hex='#000000', bg_color_hex='#000000',): + + # Get RGB values for the text and background colors + font_color = get_color_values(font_color, font_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + main_panel = tensor2pil(image) + + # Get image width and height + image_width = main_panel.width + image_height = main_panel.height + + # Set defaults + margins = 50 + line_spacing = 0 + position_x = 0 + position_y = 0 + align = "center" + rotation_angle = 0 + rotation_options = "image center" + font_outline_thickness = 0 + font_outline_color = "black" + + images = [] + + ### Create text panels and add to images array + if header_height >0: + header_panel = text_panel(image_width, header_height, header_text, + font_name, header_font_size, font_color, + font_outline_thickness, font_outline_color, + bg_color, + margins, line_spacing, + position_x, position_y, + align, header_align, + rotation_angle, rotation_options) + images.append(header_panel) + + images.append(main_panel) + + if footer_height >0: + footer_panel = text_panel(image_width, footer_height, footer_text, + font_name, footer_font_size, font_color, + font_outline_thickness, font_outline_color, + bg_color, + margins, line_spacing, + position_x, position_y, + align, footer_align, + rotation_angle, rotation_options) + images.append(footer_panel) + + combined_image = combine_images(images, 'vertical') + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-simple_titles" + + return (pil2tensor(combined_image), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImagePanel: + + @classmethod + def INPUT_TYPES(s): + + directions = ["horizontal", "vertical"] + + return {"required": { + "image_1": ("IMAGE",), + "border_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "border_color": (COLORS,), + "outline_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "outline_color": (COLORS[1:],), + "layout_direction": (directions,), + }, + "optional": { + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "image_4": ("IMAGE",), + "border_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_panel" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def make_panel(self, image_1, + border_thickness, border_color, + outline_thickness, outline_color, + layout_direction, image_2=None, image_3=None, image_4=None, + border_color_hex='#000000'): + + border_color = get_color_values(border_color, border_color_hex, color_mapping) + + # Convert PIL images to NumPy arrays + images = [] + #image_1 = image_1[0, :, :, :] + images.append(tensor2pil(image_1)) + if image_2 is not None: + #image_2 = image_2[0, :, :, :] + images.append(tensor2pil(image_2)) + if image_3 is not None: + #image_3 = image_3[0, :, :, :] + images.append(tensor2pil(image_3)) + if image_4 is not None: + #image_4 = image_4[0, :, :, :] + images.append(tensor2pil(image_4)) + + # Apply borders and outlines to each image + images = apply_outline_and_border(images, outline_thickness, outline_color, border_thickness, border_color) + + combined_image = combine_images(images, layout_direction) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-image-panel" + + return (pil2tensor(combined_image), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImageGridPanel: + + @classmethod + def INPUT_TYPES(s): + + directions = ["horizontal", "vertical"] + + return {"required": { + "images": ("IMAGE",), + "border_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "border_color": (COLORS,), + "outline_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "outline_color": (COLORS[1:],), + "max_columns": ("INT", {"default": 5, "min": 0, "max": 256}), + }, + "optional": { + "border_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_panel" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def make_panel(self, images, + border_thickness, border_color, + outline_thickness, outline_color, + max_columns, border_color_hex='#000000'): + + border_color = get_color_values(border_color, border_color_hex, color_mapping) + + # Convert PIL images to NumPy arrays + images = [tensor2pil(image) for image in images] + + # Apply borders and outlines to each image + images = apply_outline_and_border(images, outline_thickness, outline_color, border_thickness, border_color) + + # Calculate dimensions for the grid + num_images = len(images) + num_rows = (num_images - 1) // max_columns + 1 + combined_width = max(image.width for image in images) * min(max_columns, num_images) + combined_height = max(image.height for image in images) * num_rows + + combined_image = Image.new('RGB', (combined_width, combined_height)) + + x_offset, y_offset = 0, 0 # Initialize offsets + for image in images: + combined_image.paste(image, (x_offset, y_offset)) + x_offset += image.width + if x_offset >= max_columns * image.width: + x_offset = 0 + y_offset += image.height + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-image-grid-panel" + + return (pil2tensor(combined_image), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImageBorder: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "image": ("IMAGE",), + "top_thickness": ("INT", {"default": 0, "min": 0, "max": 4096}), + "bottom_thickness": ("INT", {"default": 0, "min": 0, "max": 4096}), + "left_thickness": ("INT", {"default": 0, "min": 0, "max": 4096}), + "right_thickness": ("INT", {"default": 0, "min": 0, "max": 4096}), + "border_color": (COLORS,), + "outline_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "outline_color": (COLORS[1:],), + }, + "optional": { + "border_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_panel" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def make_panel(self, image, + top_thickness, bottom_thickness, + left_thickness, right_thickness, border_color, + outline_thickness, outline_color, + border_color_hex='#000000'): + + images = [] + + border_color = get_color_values(border_color, border_color_hex, color_mapping) + + for img in image: + img = tensor2pil(img) + + # Apply the outline + if outline_thickness > 0: + img = ImageOps.expand(img, outline_thickness, fill=outline_color) + + # Apply the borders + if left_thickness > 0 or right_thickness > 0 or top_thickness > 0 or bottom_thickness > 0: + img = ImageOps.expand(img, (left_thickness, top_thickness, right_thickness, bottom_thickness), fill=border_color) + + images.append(pil2tensor(img)) + + images = torch.cat(images, dim=0) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-image-border" + + return (images, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ColorPanel: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "panel_width": ("INT", {"default": 512, "min": 8, "max": 4096}), + "panel_height": ("INT", {"default": 512, "min": 8, "max": 4096}), + "fill_color": (COLORS,), + }, + "optional": { + "fill_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_panel" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def make_panel(self, panel_width, panel_height, + fill_color, fill_color_hex='#000000'): + + fill_color = get_color_values(fill_color, fill_color_hex, color_mapping) + + size = (panel_width, panel_height) + panel = Image.new('RGB', size, fill_color) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-color-panel" + + return (pil2tensor(panel), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleTextPanel: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "panel_width": ("INT", {"default": 512, "min": 8, "max": 4096}), + "panel_height": ("INT", {"default": 512, "min": 8, "max": 4096}), + "text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "font_color": (COLORS,), + "font_size": ("INT", {"default": 100, "min": 0, "max": 1024}), + "font_outline_thickness": ("INT", {"default": 0, "min": 0, "max": 50}), + "font_outline_color": (COLORS,), + "background_color": (COLORS,), + "align": (ALIGN_OPTIONS, ), + "justify": (JUSTIFY_OPTIONS, ), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "layout" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def layout(self, panel_width, panel_height, + text, align, justify, + font_name, font_color, font_size, + font_outline_thickness, font_outline_color, + background_color, + font_color_hex='#000000', font_outline_color_hex='#000000', bg_color_hex='#000000'): + + # Get RGB values for the text and background colors + font_color = get_color_values(font_color, font_color_hex, color_mapping) + outline_color = get_color_values(font_outline_color, font_outline_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + # Set defaults + margins = 50 + line_spacing = 0 + position_x = 0 + position_y = 0 + rotation_angle = 0 + rotation_options = "image center" + + ### Create text panels + + panel = text_panel(panel_width, panel_height, text, + font_name, font_size, font_color, + font_outline_thickness, outline_color, + bg_color, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Layout-Nodes#cr-simple-text-panel" + + return (pil2tensor(panel), show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_OverlayTransparentImage: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "back_image": ("IMAGE",), + "overlay_image": ("IMAGE",), + "transparency": ("FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.1}), + "offset_x": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "offset_y": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "rotation_angle": ("FLOAT", {"default": 0.0, "min": -360.0, "max": 360.0, "step": 0.1}), + "overlay_scale_factor": ("FLOAT", {"default": 1.0, "min": -0.1, "max": 100.0, "step": 0.1}), + } + } + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "overlay_image" + CATEGORY = icons.get("Comfyroll/Graphics/Layout") + + def overlay_image(self, back_image, overlay_image, + transparency, offset_x, offset_y, rotation_angle, overlay_scale_factor=1.0): + + """ + Overlay an image onto another image with transparency, rotation, and scaling. + + Args: + back_image (torch.Tensor): Background image tensor. + overlay_image (torch.Tensor): Overlay image tensor. + transparency (float): Transparency level for the overlay image (0.0 to 1.0). + offset_x (int): X-coordinate relative to the center of the back image. + offset_y (int): Y-coordinate relative to the center of the back image. + rotation_angle (float): Rotation angle in degrees. + scale_factor (float): Scaling factor for the overlay image. + + Returns: + torch.Tensor: Resulting image tensor. + """ + + # Convert tensor images + #back_image = back_image[0, :, :, :] + #overlay_image = overlay_image[0, :, :, :] + + # Create PIL images for the text and background layers and text mask + back_image = tensor2pil(back_image) + overlay_image = tensor2pil(overlay_image) + + # Apply transparency to overlay image + overlay_image.putalpha(int(255 * (1 - transparency))) + + # Rotate overlay image + overlay_image = overlay_image.rotate(rotation_angle, expand=True) + + # Scale overlay image + overlay_width, overlay_height = overlay_image.size + new_size = (int(overlay_width * overlay_scale_factor), int(overlay_height * overlay_scale_factor)) + overlay_image = overlay_image.resize(new_size, Image.ANTIALIAS) + + # Calculate centered position relative to the center of the background image + center_x = back_image.width // 2 + center_y = back_image.height // 2 + position_x = center_x - overlay_image.width // 2 + offset_x + position_y = center_y - overlay_image.height // 2 + offset_y + + # Paste the rotated overlay image onto the new back image at the specified position + back_image.paste(overlay_image, (position_x, position_y), overlay_image) + + # Convert the PIL image back to a torch tensor + return pil2tensor(back_image), + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Page Layout": CR_PageLayout, + "CR Image Grid Panel": CR_ImageGridPanel, + "CR Image XY Panel": CR_ImageXYPanel, + "CR Image Border": CR_ImageBorder, + "CR Color Panel": CR_ColorPanel, + "CR Simple Text Panel": CR_SimpleTextPanel, + "CR Overlay Transparent Image": CR_OverlayTransparentImage, + "CR Simple Titles": CR_SimpleTitles, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_pattern.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_pattern.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d7c64ea5095cc31ec5eadb5ac438a5e939d662 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_pattern.py @@ -0,0 +1,158 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import numpy as np +import torch +import random +import os +from PIL import Image, ImageDraw +from .graphics_functions import get_color_values +from ..categories import icons +from ..config import color_mapping, COLORS + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_BinaryPatternSimple: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "binary_pattern": ("STRING", {"multiline": True, "default": "10101"}), + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw_pattern" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw_pattern(self, binary_pattern, width, height): + # Convert multiline binary pattern to a 2D list + rows = binary_pattern.strip().split('\n') + grid = [[int(bit) for bit in row.strip()] for row in rows] + + # Calculate the size of each square + square_width = width // len(rows[0]) + square_height = height // len(rows) + + # Create a new image + image = Image.new("RGB", (width, height), color='black') + draw = ImageDraw.Draw(image) + + # Draw grid based on the binary pattern + for row_index, row in enumerate(grid): + for col_index, bit in enumerate(row): + x1 = col_index * square_width + y1 = row_index * square_height + x2 = x1 + square_width + y2 = y1 + square_height + + # Draw black square if bit is 1, else draw white square + color = 'black' if bit == 1 else 'white' + draw.rectangle([x1, y1, x2, y2], fill=color, outline="black") + + image_out = pil2tensor(image) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-simple-binary-pattern" + + # Convert the PIL image back to a torch tensor + return (image_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_BinaryPattern: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "binary_pattern": ("STRING", {"multiline": True, "default": "10101"}), + "width": ("INT", {"default": 512, "min": 64, "max": 4096}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096}), + "background_color": (COLORS,), + "color_0": (COLORS,), + "color_1": (COLORS,), + "outline_thickness": ("INT", {"default": 0, "min": 0, "max": 1024}), + "outline_color": (COLORS,), + "jitter_distance": ("INT", {"default": 0, "min": 0, "max": 1024}), + }, + "optional": { + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "color0_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "color1_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "outline_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "draw_pattern" + CATEGORY = icons.get("Comfyroll/Graphics/Pattern") + + def draw_pattern(self, binary_pattern, width, height, + background_color, outline_color, + color_0="white", color_1="black", outline_thickness=0, + color0_hex='#000000', color1_hex='#000000', + bg_color_hex='#000000', outline_color_hex='#000000', + jitter_distance = 0): + + # Get RGB values + color0 = get_color_values(color_0, color0_hex, color_mapping) + color1 = get_color_values(color_1, color1_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + outline_color = get_color_values(outline_color, outline_color_hex, color_mapping) + + # Convert multiline binary pattern to a 2D list + rows = binary_pattern.strip().split('\n') + grid = [[int(bit) for bit in row.strip()] for row in rows] + + # Calculate the size of each square + square_width = width // len(rows[0]) + square_height = height // len(rows) + + # Create a new image + image = Image.new("RGB", (width, height), color=bg_color) + draw = ImageDraw.Draw(image) + + x_jitter = 0 + y_jitter = 0 + + # Draw grid based on the binary pattern + for row_index, row in enumerate(grid): + for col_index, bit in enumerate(row): + if jitter_distance != 0: + x_jitter = random.uniform(0, jitter_distance) + y_jitter = random.uniform(0, jitter_distance) + x1 = col_index * square_width + x_jitter + y1 = row_index * square_height + y_jitter + x2 = x1 + square_width + x_jitter + y2 = y1 + square_height + y_jitter + + # Draw black square if bit is 1, else draw white square + color = color1 if bit == 1 else color0 + draw.rectangle([x1, y1, x2, y2], fill=color, outline=outline_color, width=outline_thickness) + + image_out = pil2tensor(image) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-binary-pattern" + + # Convert the PIL image back to a torch tensor + return (image_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Simple Binary Pattern Simple": CR Binary Pattern Simple, + "CR Binary Pattern": CR_BinaryPattern, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_template.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_template.py new file mode 100644 index 0000000000000000000000000000000000000000..c668622ee845673b3382d72a80bd91cb29e146e6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_template.py @@ -0,0 +1,412 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import numpy as np +import torch +import os +from PIL import Image, ImageDraw, ImageOps, ImageFont +from ..categories import icons +from ..config import color_mapping, COLORS +from .graphics_functions import (hex_to_rgb, + get_color_values, + get_font_size, + draw_text_on_image, + crop_and_resize_image, + create_and_paste_panel) + +font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") +file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + +#---------------------------------------------------------------------------------------------------------------------# + +ALIGN_OPTIONS = ["top", "center", "bottom"] +ROTATE_OPTIONS = ["text center", "image center"] +JUSTIFY_OPTIONS = ["left", "center", "right"] +PERSPECTIVE_OPTIONS = ["top", "bottom", "left", "right"] + +#---------------------------------------------------------------------------------------------------------------------# + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleMemeTemplate: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + bar_opts = ["no bars", "top", "bottom", "top and bottom"] + simple_meme_presets = ["custom", + "One Does Not Simply ... MEME IN COMFY", + "This is fine.", + "Good Morning ... No Such Thing!"] + + return {"required": { + "image": ("IMAGE",), + "preset": (simple_meme_presets,), + "text_top": ("STRING", {"multiline": True, "default": "text_top"}), + "text_bottom": ("STRING", {"multiline": True, "default": "text_bottom"}), + "font_name": (file_list,), + "max_font_size": ("INT", {"default": 150, "min": 20, "max": 2048}), + "font_color": (COLORS,), + "font_outline": (["none", "thin", "thick", "extra thick"],), + "bar_color": (COLORS,), + "bar_options": (bar_opts,), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bar_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_meme" + CATEGORY = icons.get("Comfyroll/Graphics/Template") + + def make_meme(self, image, preset, + text_top, text_bottom, + font_name, max_font_size, font_color, font_outline, + bar_color, bar_options, + font_color_hex='#000000', bar_color_hex='#000000'): + + # Get RGB values for the text and bar colors + text_color = get_color_values(font_color, font_color_hex, color_mapping) + bar_color = get_color_values(bar_color, bar_color_hex, color_mapping) + + total_images = [] + + for img in image: + + # Calculate the height factor + if bar_options == "top": + height_factor = 1.2 + elif bar_options == "bottom": + height_factor = 1.2 + elif bar_options == "top and bottom": + height_factor = 1.4 + else: + height_factor = 1.0 + + if preset == "One Does Not Simply ... MEME IN COMFY": + text_top = "One Does Not Simply" + text_bottom = "MEME IN COMFY" + if preset == "This is fine.": + text_top = "This is fine." + text_bottom = "" + if preset == "Good Morning ... No Such Thing!": + text_top = "Good Morning" + text_bottom = "\"No Such Thing!\"" + + # Create PIL images for the image and text bars + back_image = tensor2pil(img) + size = back_image.width, int(back_image.height * height_factor) + result_image = Image.new("RGB", size) + + # Define font settings + #font_file = "fonts\\" + str(font_name) + font_file = os.path.join("fonts", font_name) + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), font_file) + + # Create the drawing context + draw = ImageDraw.Draw(result_image) + + # Create two color bars at the top and bottom + bar_width = back_image.width + bar_height = back_image.height // 5 ### add parameter for this in adv node + top_bar = Image.new("RGB", (bar_width, bar_height), bar_color) + bottom_bar = Image.new("RGB", (bar_width, bar_height), bar_color) + + # Composite the result image onto the input image + if bar_options == "top" or bar_options == "top and bottom": + image_out = result_image.paste(back_image, (0, bar_height)) + else: + image_out = result_image.paste(back_image, (0, 0)) + + # Get the font size and draw the text + if bar_options == "top" or bar_options == "top and bottom": + result_image.paste(top_bar, (0, 0)) + font_top = get_font_size(draw, text_top, bar_width, bar_height, resolved_font_path, max_font_size) + draw_text_on_image(draw, 0, bar_width, bar_height, text_top, font_top, text_color, font_outline) + + if bar_options == "bottom" or bar_options == "top and bottom": + result_image.paste(bottom_bar, (0, (result_image.height - bar_height))) + font_bottom = get_font_size(draw, text_bottom, bar_width, bar_height, resolved_font_path, max_font_size) + if bar_options == "bottom": + y_position = back_image.height + else: + y_position = bar_height + back_image.height + draw_text_on_image(draw, y_position, bar_width, bar_height, text_bottom, font_bottom, text_color, font_outline) + + # Overlay text on image + if bar_options == "bottom" and text_top > "": + font_top = get_font_size(draw, text_top, bar_width, bar_height, resolved_font_path, max_font_size) + draw_text_on_image(draw, 0, bar_width, bar_height, text_top, font_top, text_color, font_outline) + + if (bar_options == "top" or bar_options == "none") and text_bottom > "": + font_bottom = get_font_size(draw, text_bottom, bar_width, bar_height, resolved_font_path, max_font_size) + y_position = back_image.height + draw_text_on_image(draw, y_position, bar_width, bar_height, text_bottom, font_bottom, text_color, font_outline) + + if bar_options == "no bars" and text_bottom > "": + font_bottom = get_font_size(draw, text_bottom, bar_width, bar_height, resolved_font_path, max_font_size) + y_position = back_image.height - bar_height + draw_text_on_image(draw, y_position, bar_width, bar_height, text_bottom, font_bottom, text_color, font_outline) + + if bar_options == "no bars" and text_top > "": + font_top = get_font_size(draw, text_top, bar_width, bar_height, resolved_font_path, max_font_size) + draw_text_on_image(draw, 0, bar_width, bar_height, text_top, font_top, text_color, font_outline) + + #image_out = np.array(result_image).astype(np.float32) / 255.0 + #image_out = torch.from_numpy(image_out).unsqueeze(0) + + # Convert the PIL image back to a torch tensor + #return (pil2tensor(image_out), show_help, ) + #return (image_out, show_help, ) + + # Convert to tensor + out_image = np.array(result_image.convert("RGB")).astype(np.float32) / 255.0 + out_image = torch.from_numpy(out_image).unsqueeze(0) + total_images.append(out_image) + + # Batch the images + images_out = torch.cat(total_images, 0) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Template-Nodes#cr-simple-meme-template" + + return (images_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleBanner: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "image": ("IMAGE",), + #"image_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}), + "banner_text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "max_font_size": ("INT", {"default": 150, "min": 20, "max": 2048}), + "font_color": (COLORS,), + "outline_thickness": ("INT", {"default": 0, "min": 0, "max": 500}), + "outline_color": (COLORS,), + #"text_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}), + #"drop_shadow_angle": ("INT", {"default": 0, "min": 0, "max": 500}), + #"drop_shadow_offset": ("INT", {"default": 0, "min": 0, "max": 500}), + #"drop_shadow_color": (COLORS,), + #"drop_shadow_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}), + #"wrap_text": (["true", "false"],), + "margin_size": ("INT", {"default": 0, "min": 0, "max": 500}), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "outline_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "make_banner" + CATEGORY = icons.get("Comfyroll/Graphics/Template") + + def make_banner(self, image, banner_text, + font_name, max_font_size, font_color, + outline_thickness, outline_color, margin_size, + font_color_hex='#000000', outline_color_hex='#000000'): + + # Get RGB values for the text and bar colors + text_color = get_color_values(font_color, font_color_hex, color_mapping) + outline_color = get_color_values(outline_color, outline_color_hex, color_mapping) + + total_images = [] + + for img in image: + + # Create PIL images for the image and text bars + back_image = tensor2pil(img).convert("RGBA") + size = back_image.width, back_image.height + #result_image = Image.new("RGB", size) + + # Define font settings + font_file = os.path.join("fonts", font_name) + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), font_file) + + # Create the drawing context + draw = ImageDraw.Draw(back_image) + + area_width = back_image.width - (margin_size * 2) + area_height = back_image.width - (margin_size * 2) + + # Get the font size and draw the text + font = get_font_size(draw, banner_text, area_width, area_height, resolved_font_path, max_font_size) + + x = back_image.width // 2 + y = back_image.height // 2 + + if outline_thickness > 0: + draw.text((x, y), banner_text, fill=text_color, font=font, anchor='mm', stroke_width=outline_thickness, stroke_fill=outline_color) + else: + draw.text((x, y), banner_text, fill=text_color, font=font, anchor='mm') + + # Convert to tensor + out_image = np.array(back_image.convert("RGB")).astype(np.float32) / 255.0 + out_image = torch.from_numpy(out_image).unsqueeze(0) + total_images.append(out_image) + + # Batch the images + images_out = torch.cat(total_images, 0) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Template-Nodes#cr-simple-banner" + + return (images_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ComicPanelTemplates: + + @classmethod + def INPUT_TYPES(s): + + directions = ["left to right", "right to left"] + + templates = ["custom", + "G22", "G33", + "H2", "H3", + "H12", "H13", + "H21", "H23", + "H31", "H32", + "V2", "V3", + "V12", "V13", + "V21", "V23", + "V31", "V32"] + + return {"required": { + "page_width": ("INT", {"default": 512, "min": 8, "max": 4096}), + "page_height": ("INT", {"default": 512, "min": 8, "max": 4096}), + "template": (templates,), + "reading_direction": (directions,), + "border_thickness": ("INT", {"default": 5, "min": 0, "max": 1024}), + "outline_thickness": ("INT", {"default": 2, "min": 0, "max": 1024}), + "outline_color": (COLORS,), + "panel_color": (COLORS,), + "background_color": (COLORS,), + }, + "optional": { + "images": ("IMAGE",), + "custom_panel_layout": ("STRING", {"multiline": False, "default": "H123"}), + "outline_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "panel_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("image", "show_help", ) + FUNCTION = "layout" + CATEGORY = icons.get("Comfyroll/Graphics/Template") + + def layout(self, page_width, page_height, template, reading_direction, + border_thickness, outline_thickness, + outline_color, panel_color, background_color, + images=None, custom_panel_layout='G44', + outline_color_hex='#000000', panel_color_hex='#000000', bg_color_hex='#000000'): + + panels = [] + k = 0 + len_images = 0 + + # Convert tensor images to PIL + if images is not None: + images = [tensor2pil(image) for image in images] + len_images = len(images) + + # Get RGB values for the text and background colors + outline_color = get_color_values(outline_color, outline_color_hex, color_mapping) + panel_color = get_color_values(panel_color, panel_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + # Create page and apply bg color + size = (page_width - (2 * border_thickness), page_height - (2 * border_thickness)) + page = Image.new('RGB', size, bg_color) + draw = ImageDraw.Draw(page) + + if template == "custom": + template = custom_panel_layout + + # Calculate panel positions and add to bg image + first_char = template[0] + if first_char == "G": + rows = int(template[1]) + columns = int(template[2]) + panel_width = (page.width - (2 * columns * (border_thickness + outline_thickness))) // columns + panel_height = (page.height - (2 * rows * (border_thickness + outline_thickness))) // rows + # Row loop + for i in range(rows): + # Column Loop + for j in range(columns): + # Draw the panel + create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page.width, + panel_color, bg_color, outline_color, + images, i, j, k, len_images, reading_direction) + k += 1 + + elif first_char == "H": + rows = len(template) - 1 + panel_height = (page.height - (2 * rows * (border_thickness + outline_thickness))) // rows + for i in range(rows): + columns = int(template[i+1]) + panel_width = (page.width - (2 * columns * (border_thickness + outline_thickness))) // columns + for j in range(columns): + # Draw the panel + create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page.width, + panel_color, bg_color, outline_color, + images, i, j, k, len_images, reading_direction) + k += 1 + + elif first_char == "V": + columns = len(template) - 1 + panel_width = (page.width - (2 * columns * (border_thickness + outline_thickness))) // columns + for j in range(columns): + rows = int(template[j+1]) + panel_height = (page.height - (2 * rows * (border_thickness + outline_thickness))) // rows + for i in range(rows): + # Draw the panel + create_and_paste_panel(page, border_thickness, outline_thickness, + panel_width, panel_height, page.width, + panel_color, bg_color, outline_color, + images, i, j, k, len_images, reading_direction) + k += 1 + + # Add a border to the page + if border_thickness > 0: + page = ImageOps.expand(page, border_thickness, bg_color) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Template-Nodes#cr-comic-panel-templates" + + return (pil2tensor(page), show_help, ) + + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Simple Meme Template": CR_SimpleMemeTemplate, + "CR Simple Banner": CR_SimpleBanner, + "CR Comic Panel Templates": CR_ComicPanelTemplates, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_text.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_text.py new file mode 100644 index 0000000000000000000000000000000000000000..ef2fab495b5df9ccbad09c0ef687a774474b1bef --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pil_text.py @@ -0,0 +1,556 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import numpy as np +import torch +import os +from PIL import Image, ImageDraw, ImageOps, ImageFont +from ..categories import icons +from ..config import color_mapping, COLORS +from .graphics_functions import (draw_masked_text, + hex_to_rgb, + draw_text_on_image, + get_font_size, + get_text_size, + get_color_values, + reduce_opacity) + +''' +try: + from bidi.algorithm import get_display +except ImportError: + import subprocess + subprocess.check_call(['python', '-m', 'pip', 'install', 'python_bidi']) + +try: + import arabic_reshaper +except ImportError: + import subprocess + subprocess.check_call(['python', '-m', 'pip', 'install', 'arabic_reshaper']) +''' + +def get_offset_for_true_mm(text, draw, font): + anchor_bbox = draw.textbbox((0, 0), text, font=font, anchor='lt') + anchor_center = (anchor_bbox[0] + anchor_bbox[2]) // 2, (anchor_bbox[1] + anchor_bbox[3]) // 2 + mask_bbox = font.getmask(text).getbbox() + mask_center = (mask_bbox[0] + mask_bbox[2]) // 2, (mask_bbox[1] + mask_bbox[3]) // 2 + return anchor_center[0] - mask_center[0], anchor_center[1] - mask_center[1] + +#---------------------------------------------------------------------------------------------------------------------# + +ALIGN_OPTIONS = ["center", "top", "bottom"] +ROTATE_OPTIONS = ["text center", "image center"] +JUSTIFY_OPTIONS = ["center", "left", "right"] +PERSPECTIVE_OPTIONS = ["top", "bottom", "left", "right"] + +#---------------------------------------------------------------------------------------------------------------------# + +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_OverlayText: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "image": ("IMAGE",), + "text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "font_color": (COLORS,), + "align": (ALIGN_OPTIONS,), + "justify": (JUSTIFY_OPTIONS,), + "margins": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "line_spacing": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "position_x": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "position_y": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "rotation_angle": ("FLOAT", {"default": 0.0, "min": -360.0, "max": 360.0, "step": 0.1}), + "rotation_options": (ROTATE_OPTIONS,), + }, + "optional": {"font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING",) + RETURN_NAMES = ("IMAGE", "show_help",) + FUNCTION = "overlay_text" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def overlay_text(self, image, text, font_name, font_size, font_color, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options, + font_color_hex='#000000'): + + # Get RGB values for the text color + text_color = get_color_values(font_color, font_color_hex, color_mapping) + + # Convert tensor images + image_3d = image[0, :, :, :] + + # Create PIL images for the text and background layers and text mask + back_image = tensor2pil(image_3d) + text_image = Image.new('RGB', back_image.size, text_color) + text_mask = Image.new('L', back_image.size) + + # Draw the text on the text mask + rotated_text_mask = draw_masked_text(text_mask, text, font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options) + + # Composite the text image onto the background image using the rotated text mask + image_out = Image.composite(text_image, back_image, rotated_text_mask) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-overlay-text" + + # Convert the PIL image back to a torch tensor + return (pil2tensor(image_out), show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_DrawText: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "image_width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "image_height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "font_color": (COLORS,), + "background_color": (COLORS,), + "align": (ALIGN_OPTIONS,), + "justify": (JUSTIFY_OPTIONS,), + "margins": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "line_spacing": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "position_x": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "position_y": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "rotation_angle": ("FLOAT", {"default": 0.0, "min": -360.0, "max": 360.0, "step": 0.1}), + "rotation_options": (ROTATE_OPTIONS,), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING",) + RETURN_NAMES = ("IMAGE", "show_help",) + FUNCTION = "draw_text" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def draw_text(self, image_width, image_height, text, + font_name, font_size, font_color, + background_color, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options, + font_color_hex='#000000', bg_color_hex='#000000'): + + # Get RGB values for the text and background colors + text_color = get_color_values(font_color, font_color_hex, color_mapping) + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + # Create PIL images for the text and background layers and text mask + size = (image_width, image_height) + text_image = Image.new('RGB', size, text_color) + back_image = Image.new('RGB', size, bg_color) + text_mask = Image.new('L', back_image.size) + + # Draw the text on the text mask + rotated_text_mask = draw_masked_text(text_mask, text, font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options) + + # Composite the text image onto the background image using the rotated text mask + image_out = Image.composite(text_image, back_image, rotated_text_mask) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-draw-text" + + # Convert the PIL image back to a torch tensor + return (pil2tensor(image_out), show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_MaskText: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "image": ("IMAGE",), + "text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "background_color": (COLORS,), + "align": (ALIGN_OPTIONS,), + "justify": (JUSTIFY_OPTIONS,), + "margins": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "line_spacing": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "position_x": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "position_y": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "rotation_angle": ("FLOAT", {"default": 0.0, "min": -360.0, "max": 360.0, "step": 0.1}), + "rotation_options": (ROTATE_OPTIONS,), + }, + "optional": { + "bg_color_hex": ("STRING", {"multiline": False, "default": "#000000"}) + } + } + + RETURN_TYPES = ("IMAGE", "STRING",) + RETURN_NAMES = ("IMAGE", "show_help",) + FUNCTION = "mask_text" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def mask_text(self, image, text, font_name, font_size, + margins, line_spacing, + position_x, position_y, background_color, + align, justify, + rotation_angle, rotation_options, + bg_color_hex='#000000'): + + # Get RGB values for the background color + bg_color = get_color_values(background_color, bg_color_hex, color_mapping) + + # Convert tensor images + image_3d = image[0, :, :, :] + + # Create PIL images for the text and background layers and text mask + text_image = tensor2pil(image_3d) + text_mask = Image.new('L', text_image.size) + background_image = Image.new('RGB', text_mask.size, bg_color) + + # Draw the text on the text mask + rotated_text_mask = draw_masked_text(text_mask, text, font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options) + + # Invert the text mask (so the text is white and the background is black) + text_mask = ImageOps.invert(rotated_text_mask) + + # Composite the text image onto the background image using the inverted text mask + image_out = Image.composite(background_image, text_image, text_mask) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-mask-text" + + # Convert the PIL image back to a torch tensor + return (pil2tensor(image_out), show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_CompositeText: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "image_text": ("IMAGE",), + "image_background": ("IMAGE",), + "text": ("STRING", {"multiline": True, "default": "text"}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "align": (ALIGN_OPTIONS,), + "justify": (JUSTIFY_OPTIONS,), + "margins": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "line_spacing": ("INT", {"default": 0, "min": -1024, "max": 1024}), + "position_x": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "position_y": ("INT", {"default": 0, "min": -4096, "max": 4096}), + "rotation_angle": ("FLOAT", {"default": 0.0, "min": -360.0, "max": 360.0, "step": 0.1}), + "rotation_options": (ROTATE_OPTIONS,), + } + } + + RETURN_TYPES = ("IMAGE", "STRING",) + RETURN_NAMES = ("IMAGE", "show_help",) + FUNCTION = "composite_text" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def composite_text(self, image_text, image_background, text, + font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options): + + # Convert tensor images + image_text_3d = image_text[0, :, :, :] + image_back_3d = image_background[0, :, :, :] + + # Create PIL images for the text and background layers and text mask + text_image = tensor2pil(image_text_3d) + back_image = tensor2pil(image_back_3d) + text_mask = Image.new('L', back_image.size) + + # Draw the text on the text mask + rotated_text_mask = draw_masked_text(text_mask, text, font_name, font_size, + margins, line_spacing, + position_x, position_y, + align, justify, + rotation_angle, rotation_options) + + # Composite the text image onto the background image using the rotated text mask + image_out = Image.composite(text_image, back_image, rotated_text_mask) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-composite-text" + + # Convert the PIL image back to a torch tensor + return (pil2tensor(image_out), show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ArabicTextRTL: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "arabic_text": ("STRING", {"multiline": True, "default": "شمس"}), + } + } + + RETURN_TYPES = ("STRING", "STRING", ) + RETURN_NAMES = ("arabic_text_rtl", "show help", ) + FUNCTION = "adjust_arabic_to_rtl" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def adjust_arabic_to_rtl(self, arabic_text): + """ + Adjust Arabic text to read from right to left (RTL). + + Args: + arabic_text (str): The Arabic text to be adjusted. + + Returns: + str: The adjusted Arabic text in RTL format. + """ + + arabic_text_reshaped = arabic_reshaper.reshape(arabic_text) + rtl_text = get_display(arabic_text_reshaped) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-arabic-text-rtl" + + return (rtl_text, show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SimpleTextWatermark: + + @classmethod + def INPUT_TYPES(s): + + font_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts") + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + ALIGN_OPTIONS = ["center", "top left", "top center", "top right", "bottom left", "bottom center", "bottom right"] + + return {"required": { + "image": ("IMAGE",), + "text": ("STRING", {"multiline": False, "default": "@ your name"}), + "align": (ALIGN_OPTIONS,), + "opacity": ("FLOAT", {"default": 0.30, "min": 0.00, "max": 1.00, "step": 0.01}), + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + "font_color": (COLORS,), + "x_margin": ("INT", {"default": 20, "min": -1024, "max": 1024}), + "y_margin": ("INT", {"default": 20, "min": -1024, "max": 1024}), + }, + "optional": { + "font_color_hex": ("STRING", {"multiline": False, "default": "#000000"}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "overlay_text" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def overlay_text(self, image, text, align, + font_name, font_size, font_color, + opacity, x_margin, y_margin, font_color_hex='#000000'): + + # Get RGB values for the text color + text_color = get_color_values(font_color, font_color_hex, color_mapping) + + total_images = [] + + for img in image: + + # Create PIL images for the background layer + img = tensor2pil(img) + + textlayer = Image.new("RGBA", img.size) + draw = ImageDraw.Draw(textlayer) + + # Load the font + font_file = "fonts\\" + str(font_name) + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), font_file) + font = ImageFont.truetype(str(resolved_font_path), size=font_size) + + # Get the size of the text + textsize = get_text_size(draw, text, font) + + # Calculate the position to place the text based on the alignment + if align == 'center': + textpos = [(img.size[0] - textsize[0]) // 2, (img.size[1] - textsize[1]) // 2] + elif align == 'top left': + textpos = [x_margin, y_margin] + elif align == 'top center': + textpos = [(img.size[0] - textsize[0]) // 2, y_margin] + elif align == 'top right': + textpos = [img.size[0] - textsize[0] - x_margin, y_margin] + elif align == 'bottom left': + textpos = [x_margin, img.size[1] - textsize[1] - y_margin] + elif align == 'bottom center': + textpos = [(img.size[0] - textsize[0]) // 2, img.size[1] - textsize[1] - y_margin] + elif align == 'bottom right': + textpos = [img.size[0] - textsize[0] - x_margin, img.size[1] - textsize[1] - y_margin] + + # Draw the text on the text layer + draw.text(textpos, text, font=font, fill=text_color) + + # Adjust the opacity of the text layer if needed + if opacity != 1: + textlayer = reduce_opacity(textlayer, opacity) + + # Composite the text layer on top of the original image + out_image = Image.composite(textlayer, img, textlayer) + + # convert to tensor + out_image = np.array(out_image.convert("RGB")).astype(np.float32) / 255.0 + out_image = torch.from_numpy(out_image).unsqueeze(0) + total_images.append(out_image) + + images_out = torch.cat(total_images, 0) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-simple-text-watermark" + + # Convert the PIL image back to a torch tensor + return (images_out, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SystemTrueTypeFont: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + + system_root = os.environ.get('SystemRoot') + font_dir = os.path.join(system_root, 'Fonts') + file_list = [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")] + + return {"required": { + "font_name": (file_list,), + "font_size": ("INT", {"default": 50, "min": 1, "max": 1024}), + } + } + + RETURN_TYPES = ("FONT", "IMAGE", "STRING",) + RETURN_NAMES = ("FONT", "preview", "show_help",) + FUNCTION = "truetype_font" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def truetype_font(self, font_name, font_size): + + # Construct the path to the Fonts directory + system_root = os.environ.get('SystemRoot') + fonts_directory = os.path.join(system_root, 'Fonts') + resolved_font_path = os.path.join(fonts_directory, font_name) + font_out = ImageFont.truetype(str(resolved_font_path), size=font_size) + + # Create a blank image with a white background + image = Image.new('RGB', (300, 100), 'white') + draw = ImageDraw.Draw(image) + + x = image.width // 2 + y = image.height // 2 + + text = "abcdefghij" + + # Draw the text on the image + draw.text((x, y), text, font=font_out, fill='black', anchor='mm') + + preview = pil2tensor(image) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-system-truetype-font" + + return (font_out, preview, show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_DisplayFont: + + @classmethod + def INPUT_TYPES(s): + + return {"required": { + "font": ("FONT",), + "text": ("STRING", {"multiline": False, "default": "abcdefghij"}), + } + } + + RETURN_TYPES = ("IMAGE", "show_help", ) + OUTPUT_NODE = True + FUNCTION = "draw_font" + CATEGORY = icons.get("Comfyroll/Graphics/Text") + + def draw_font(self, font, text): + # Create a blank image with a white background + image = Image.new('RGB', (300, 100), 'white') + draw = ImageDraw.Draw(image) + + # Calculate the position to center the text + x = image.width // 2 + y = image.height // 2 + + # Draw the text on the image + draw.text((x, y), text, font=font, fill='black', anchor='mm') + + # Convert the PIL image back to a torch tensor + image_out = pil2tensor(image) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Text-Nodes#cr-display-font" + + return (image_out, show_help,) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR Overlay Text":CR_OverlayText, + "CR Draw Text":CR_DrawText, + "CR Mask Text":CR_MaskText, + "CR Composite Text":CR_CompositeText, + "CR Draw Perspective Text":CR_DrawPerspectiveText, + "CR Arabic Text RTL": CR_ArabicTextRTL, + "CR Simple Text Watermark": CR_SimpleTextWatermark, + "CR System TrueType Font": CR_SystemTrueTypeFont, + "CR Display Font": CR_DisplayFont, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pipe.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..0636d9152c0a8f03d2c9077967f08651b861fe52 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/pipe.py @@ -0,0 +1,266 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Pipe Nodes by Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# +# based on Tiny Terra nodes +#---------------------------------------------------------------------------------------------------------------------# + +from ..categories import icons + +#---------------------------------------------------------------------------------------------------------------------# +# MODULE NODES +#---------------------------------------------------------------------------------------------------------------------# + +class CR_ModulePipeLoader: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + #"model": ("MODEL",), + }, + "optional": { + "model": ("MODEL",), + "pos": ("CONDITIONING",), + "neg": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + "clip": ("CLIP",), + "controlnet": ("CONTROL_NET",), + "image": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}) + }, + } + + RETURN_TYPES = ("PIPE_LINE", "STRING", ) + RETURN_NAMES = ("pipe", "show_help", ) + FUNCTION = "flush" + CATEGORY = icons.get("Comfyroll/Pipe/Module") + + def flush(self, model=0, pos=0, neg=0, latent=0, vae=0, clip=0, controlnet=0, image=0, seed=0): + pipe_line = (model, pos, neg, latent, vae, clip, controlnet, image, seed) + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-module-pipe-loader" + return (pipe_line, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ModuleInput: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": {"pipe": ("PIPE_LINE",)}, + } + + RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "CONTROL_NET", "IMAGE", "INT", "STRING", ) + RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "controlnet", "image", "seed", "show_help", ) + FUNCTION = "flush" + + CATEGORY = icons.get("Comfyroll/Pipe/Module") + + def flush(self, pipe): + model, pos, neg, latent, vae, clip, controlnet, image, seed = pipe + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-module-input" + return (pipe, model, pos, neg, latent, vae, clip, controlnet, image, seed, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ModuleOutput: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": {"pipe": ("PIPE_LINE",)}, + "optional": { + "model": ("MODEL",), + "pos": ("CONDITIONING",), + "neg": ("CONDITIONING",), + "latent": ("LATENT",), + "vae": ("VAE",), + "clip": ("CLIP",), + "controlnet": ("CONTROL_NET",), + "image": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}) + }, + } + + RETURN_TYPES = ("PIPE_LINE", "STRING", ) + RETURN_NAMES = ("pipe", "show_help", ) + FUNCTION = "flush" + CATEGORY = icons.get("Comfyroll/Pipe/Module") + + def flush(self, pipe, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, controlnet=None, image=None, seed=None): + new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed = pipe + + if model is not None: + new_model = model + + if pos is not None: + new_pos = pos + + if neg is not None: + new_neg = neg + + if latent is not None: + new_latent = latent + + if vae is not None: + new_vae = vae + + if clip is not None: + new_clip = clip + + if controlnet is not None: + new_controlnet = controlnet + + if image is not None: + new_image = image + + if seed is not None: + new_seed = seed + + pipe = new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_controlnet, new_image, new_seed + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-module-output" + return (pipe, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# PIPE NODES +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImagePipeIn: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + #"model": ("MODEL",), + }, + "optional": { + "image": ("IMAGE",), + "width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "upscale_factor": ("FLOAT", {"default": 1, "min": 1, "max": 2000}) + }, + } + + RETURN_TYPES = ("PIPE_LINE", "STRING", ) + RETURN_NAMES = ("pipe", "show_help", ) + FUNCTION = "flush" + CATEGORY = icons.get("Comfyroll/Pipe/Image") + + def flush(self, image=0, width=0, height=0, upscale_factor=0): + pipe_line = (image, width, height, upscale_factor) + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-image-pipe-in" + return (pipe_line, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImagePipeEdit: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return {"required": {"pipe": ("PIPE_LINE",)}, + "optional": { + "image": ("IMAGE",), + "width": ("INT", {"default": 512, "min": 64, "max": 2048}), + "height": ("INT", {"default": 512, "min": 64, "max": 2048}), + "upscale_factor": ("FLOAT", {"default": 1, "min": 1, "max": 2000}) + }, + } + + RETURN_TYPES = ("PIPE_LINE", "STRING", ) + RETURN_NAMES = ("pipe", "show_help", ) + FUNCTION = "flush" + CATEGORY = icons.get("Comfyroll/Pipe/Image") + + def flush(self, pipe, image=None, width=None, height=None, upscale_factor=None): + new_image, new_width, new_height, new_upscale_factor = pipe + + if image is not None: + new_image = image + + if width is not None: + new_width = width + + if height is not None: + new_height = height + + if upscale_factor is not None: + new_upscale_factor = upscale_factor + + pipe = new_image, new_width, new_height, new_upscale_factor + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-image-pipe-edit" + return (pipe, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_ImagePipeOut: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": {"pipe": ("PIPE_LINE",)}, + } + + RETURN_TYPES = ("PIPE_LINE", "IMAGE", "INT", "INT", "FLOAT", "STRING", ) + RETURN_NAMES = ("pipe", "image", "width", "height", "upscale_factor", "show_help", ) + FUNCTION = "flush" + CATEGORY = icons.get("Comfyroll/Pipe/Image") + + def flush(self, pipe): + #if switch == "Off": + #return (pipe, ) + #else: + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-image-pipe-out" + image, width, height, upscale_factor = pipe + return (pipe, image, width, height, upscale_factor, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_InputSwitchPipe: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "Input": ("INT", {"default": 1, "min": 1, "max": 2}), + "pipe1": ("PIPE_LINE",), + "pipe2": ("PIPE_LINE",) + } + } + + RETURN_TYPES = ("PIPE_LINE", "STRING", ) + RETURN_NAMES = ("PIPE_LINE", "show_help", ) + OUTPUT_NODE = True + FUNCTION = "InputSwitchPipe" + CATEGORY = icons.get("Comfyroll/Pipe") + + def InputSwitchPipe(self, Input, pipe1, pipe2): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Pipe-Nodes#cr-pipe-switch" + if Input == 1: + return (pipe1, show_help, ) + else: + return (pipe2, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS_2 = { + "CR Module Pipe Loader":CR_ModulePipeLoader, + "CR Module Input":CR_ModuleInput, + "CR Module Output":CR_ModuleOutput, + "CR Image Pipe In":CR_ImagePipeIn, + "CR Image Pipe Edit":CR_ImagePipeEdit, + "CR Image Pipe Out":CR_ImagePipeOut, + "CR Pipe Switch":CR_InputSwitchPipe, +} +''' diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/sdxl.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..7db12a08061f74069e897abe08abe0230b5fe6f5 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/sdxl.py @@ -0,0 +1,206 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import torch +import numpy as np +from PIL import Image, ImageEnhance +import os +import sys +import folder_paths +from nodes import MAX_RESOLUTION, ControlNetApply +from ..categories import icons + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) + +#---------------------------------------------------------------------------------------------------------------------# +# SDXL Nodes +#---------------------------------------------------------------------------------------------------------------------# +class CR_PromptMixPresets: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required":{ + }, + "optional":{ + "prompt_positive": ("STRING", {"multiline": True, "default": "prompt_pos"}), + "prompt_negative": ("STRING", {"multiline": True, "default": "prompt_neg"}), + "style_positive": ("STRING", {"multiline": True, "default": "style_pos"}), + "style_negative": ("STRING", {"multiline": True, "default": "style_neg"}), + "preset": (["default with no style text", "default with style text", + "style boost 1", "style boost 2", "style text to refiner"],), + }, + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING", ) + RETURN_NAMES = ("pos_g", "pos_l", "pos_r", "neg_g", "neg_l", "neg_r", "show_help", ) + FUNCTION = "mixer" + CATEGORY = icons.get("Comfyroll/SDXL") + + def mixer(self, prompt_positive, prompt_negative, style_positive, style_negative, preset): + if preset == "default with no style text": + pos_g = prompt_positive + pos_l = prompt_positive + pos_r = prompt_positive + neg_g = prompt_negative + neg_l = prompt_negative + neg_r = prompt_negative + elif preset == "default with style text": + pos_g = prompt_positive + style_positive + pos_l = prompt_positive + style_positive + pos_r = prompt_positive + style_positive + neg_g = prompt_negative + style_negative + neg_l = prompt_negative + style_negative + neg_r = prompt_negative + style_negative + elif preset == "style boost 1": + pos_g = prompt_positive + pos_l = style_positive + pos_r = prompt_positive + neg_g = prompt_negative + neg_l = style_negative + neg_r = prompt_negative + elif preset == "style boost 2": + pos_g = style_positive + pos_l = prompt_positive + pos_r = style_positive + neg_g = style_negative + neg_l = prompt_negative + neg_r = style_negative + elif preset == "style text to refiner": + pos_g = prompt_positive + pos_l = prompt_positive + pos_r = style_positive + neg_g = prompt_negative + neg_l = prompt_negative + neg_r = style_negative + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/SDXL-Nodes#cr-sdxl-prompt-mix-presets" + return (pos_g, pos_l, pos_r, neg_g, neg_l, neg_r, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SDXLStyleText: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "positive_style": ("STRING", {"default": "POS_STYLE", "multiline": True}), + "negative_style": ("STRING", {"default": "NEG_STYLE", "multiline": True}), + }, + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", ) + RETURN_NAMES = ("positive_prompt_text_l", "negative_prompt_text_l" , "show_help", ) + FUNCTION = "get_value" + CATEGORY = icons.get("Comfyroll/SDXL") + + def get_value(self, positive_style, negative_style): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/SDXL-Nodes#cr-sdxl-style-text" + return (positive_style, negative_style, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_SDXLBasePromptEncoder: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "base_clip": ("CLIP", ), + "pos_g": ("STRING", {"multiline": True, "default": "POS_G"}), + "pos_l": ("STRING", {"multiline": True, "default": "POS_L"}), + "neg_g": ("STRING", {"multiline": True, "default": "NEG_G"}), + "neg_l": ("STRING", {"multiline": True, "default": "NEG_L"}), + "preset": (["preset A", "preset B", "preset C"],), + "base_width": ("INT", {"default": 4096.0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), + "base_height": ("INT", {"default": 4096.0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), + "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), + "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), + "target_width": ("INT", {"default": 4096.0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), + "target_height": ("INT", {"default": 4096.0, "min": 0, "max": MAX_RESOLUTION, "step": 64}), + }, + } + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "STRING", ) + RETURN_NAMES = ("base_positive", "base_negative", "show_help", ) + FUNCTION = "encode" + CATEGORY = icons.get("Comfyroll/SDXL") + + def encode(self, base_clip, pos_g, pos_l, neg_g, neg_l, base_width, base_height, crop_w, crop_h, target_width, target_height, preset,): + empty = base_clip.tokenize("") + + # positive prompt + tokens1 = base_clip.tokenize(pos_g) + tokens1["l"] = base_clip.tokenize(pos_l)["l"] + + if len(tokens1["l"]) != len(tokens1["g"]): + while len(tokens1["l"]) < len(tokens1["g"]): + tokens1["l"] += empty["l"] + while len(tokens1["l"]) > len(tokens1["g"]): + tokens1["g"] += empty["g"] + + cond1, pooled1 = base_clip.encode_from_tokens(tokens1, return_pooled=True) + res1 = [[cond1, {"pooled_output": pooled1, "width": base_width, "height": base_height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]] + + # negative prompt + tokens2 = base_clip.tokenize(neg_g) + tokens2["l"] = base_clip.tokenize(neg_l)["l"] + + if len(tokens2["l"]) != len(tokens2["g"]): + while len(tokens2["l"]) < len(tokens2["g"]): + tokens2["l"] += empty["l"] + while len(tokens2["l"]) > len(tokens2["g"]): + tokens2["g"] += empty["g"] + + cond2, pooled2 = base_clip.encode_from_tokens(tokens2, return_pooled=True) + res2 = [[cond2, {"pooled_output": pooled2, "width": base_width, "height": base_height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]] + + # positive style + tokens2 = base_clip.tokenize(pos_l) + tokens2["l"] = base_clip.tokenize(neg_l)["l"] + + if len(tokens2["l"]) != len(tokens2["g"]): + while len(tokens2["l"]) < len(tokens2["g"]): + tokens2["l"] += empty["l"] + while len(tokens2["l"]) > len(tokens2["g"]): + tokens2["g"] += empty["g"] + + cond2, pooled2 = base_clip.encode_from_tokens(tokens2, return_pooled=True) + res3 = [[cond2, {"pooled_output": pooled2, "width": base_width, "height": base_height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]] + + # negative style + tokens2 = base_clip.tokenize(neg_l) + tokens2["l"] = base_clip.tokenize(neg_l)["l"] + + if len(tokens2["l"]) != len(tokens2["g"]): + while len(tokens2["l"]) < len(tokens2["g"]): + tokens2["l"] += empty["l"] + while len(tokens2["l"]) > len(tokens2["g"]): + tokens2["g"] += empty["g"] + + cond2, pooled2 = base_clip.encode_from_tokens(tokens2, return_pooled=True) + res4 = [[cond2, {"pooled_output": pooled2, "width": base_width, "height": base_height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]] + + if preset == "preset A": + base_positive = res1 + base_negative = res2 + elif preset == "preset B": + base_positive = res3 + base_negative = res4 + elif preset == "preset C": + base_positive = res1 + res3 + base_negative = res2 + res4 + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/SDXL-Nodes#cr-sdxl-base-prompt-encoder" + return (base_positive, base_negative, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +''' +NODE_CLASS_MAPPINGS = { + "CR SDXL Style Text":CR_SDXLStyleText, + "CR SDXL Base Prompt Encoder":CR_SDXLBasePromptEncoder, + "CR SDXL Prompt Mix Presets":CR_PromptMixPresets, +} +''' + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/upscale.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..4ea2dcec87ef2076979ae26623e1246d7c62b344 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/upscale.py @@ -0,0 +1,211 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +import torch +import numpy as np +import folder_paths +from PIL import Image +from ..categories import icons +from .upscale_functions import load_model, upscale_with_model, apply_resize_image + +#MAX_RESOLUTION=8192 + +# PIL to Tensor +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +# Tensor to PIL +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +#--------------------------------------------------------------------------------------------------------------------- +# NODES +#--------------------------------------------------------------------------------------------------------------------- +# These nodes are based on WAS nodes Image Resize and the Comfy Extras upscale with model nodes +#--------------------------------------------------------------------------------------------------------------------- +class CR_UpscaleImage: + + @classmethod + def INPUT_TYPES(s): + + resampling_methods = ["lanczos", "nearest", "bilinear", "bicubic"] + + return {"required": + {"image": ("IMAGE",), + "upscale_model": (folder_paths.get_filename_list("upscale_models"), ), + "mode": (["rescale", "resize"],), + "rescale_factor": ("FLOAT", {"default": 2, "min": 0.01, "max": 16.0, "step": 0.01}), + "resize_width": ("INT", {"default": 1024, "min": 1, "max": 48000, "step": 1}), + "resampling_method": (resampling_methods,), + "supersample": (["true", "false"],), + "rounding_modulus": ("INT", {"default": 8, "min": 8, "max": 1024, "step": 8}), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "upscale" + CATEGORY = icons.get("Comfyroll/Upscale") + + def upscale(self, image, upscale_model, rounding_modulus=8, loops=1, mode="rescale", supersample='true', resampling_method="lanczos", rescale_factor=2, resize_width=1024): + + # Load upscale model + up_model = load_model(upscale_model) + + # Upscale with model + up_image = upscale_with_model(up_model, image) + + for img in image: + pil_img = tensor2pil(img) + original_width, original_height = pil_img.size + + for img in up_image: + # Get new size + pil_img = tensor2pil(img) + upscaled_width, upscaled_height = pil_img.size + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Upscale-Nodes#cr-upscale-image" + + # Return if no rescale needed + if upscaled_width == original_width and rescale_factor == 1: + return (up_image, show_help) + + # Image resize + scaled_images = [] + + for img in up_image: + scaled_images.append(pil2tensor(apply_resize_image(tensor2pil(img), original_width, original_height, rounding_modulus, mode, supersample, rescale_factor, resize_width, resampling_method))) + images_out = torch.cat(scaled_images, dim=0) + + return (images_out, show_help, ) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_MultiUpscaleStack: + + @classmethod + def INPUT_TYPES(s): + + mix_methods = ["Combine", "Average", "Concatenate"] + up_models = ["None"] + folder_paths.get_filename_list("upscale_models") + + return {"required": + { + "switch_1": (["On","Off"],), + "upscale_model_1": (up_models, ), + "rescale_factor_1": ("FLOAT", {"default": 2, "min": 0.01, "max": 16.0, "step": 0.01}), + "switch_2": (["On","Off"],), + "upscale_model_2": (up_models, ), + "rescale_factor_2": ("FLOAT", {"default": 2, "min": 0.01, "max": 16.0, "step": 0.01}), + "switch_3": (["On","Off"],), + "upscale_model_3": (up_models, ), + "rescale_factor_3": ("FLOAT", {"default": 2, "min": 0.01, "max": 16.0, "step": 0.01}), + }, + "optional": {"upscale_stack": ("UPSCALE_STACK",), + } + } + + RETURN_TYPES = ("UPSCALE_STACK", "STRING", ) + RETURN_NAMES = ("UPSCALE_STACK", "show_help", ) + FUNCTION = "stack" + CATEGORY = icons.get("Comfyroll/Upscale") + + def stack(self, switch_1, upscale_model_1, rescale_factor_1, switch_2, upscale_model_2, rescale_factor_2, switch_3, upscale_model_3, rescale_factor_3, upscale_stack=None): + + # Initialise the list + upscale_list=list() + + if upscale_stack is not None: + upscale_list.extend([l for l in upscale_stack if l[0] != "None"]) + + if upscale_model_1 != "None" and switch_1 == "On": + upscale_list.extend([(upscale_model_1, rescale_factor_1)]), + + if upscale_model_2 != "None" and switch_2 == "On": + upscale_list.extend([(upscale_model_2, rescale_factor_2)]), + + if upscale_model_3 != "None" and switch_3 == "On": + upscale_list.extend([(upscale_model_3, rescale_factor_3)]), + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Upscale-Nodes#cr-multi-upscale-stack" + return (upscale_list, show_help, ) + +#--------------------------------------------------------------------------------------------------------------------- +class CR_ApplyMultiUpscale: + + @classmethod + def INPUT_TYPES(s): + + resampling_methods = ["lanczos", "nearest", "bilinear", "bicubic"] + + return {"required": {"image": ("IMAGE",), + "resampling_method": (resampling_methods,), + "supersample": (["true", "false"],), + "rounding_modulus": ("INT", {"default": 8, "min": 8, "max": 1024, "step": 8}), + "upscale_stack": ("UPSCALE_STACK",), + } + } + + RETURN_TYPES = ("IMAGE", "STRING", ) + RETURN_NAMES = ("IMAGE", "show_help", ) + FUNCTION = "apply" + CATEGORY = icons.get("Comfyroll/Upscale") + + def apply(self, image, resampling_method, supersample, rounding_modulus, upscale_stack): + + # Get original size + pil_img = tensor2pil(image) + original_width, original_height = pil_img.size + + # Extend params with upscale-stack items + params = list() + params.extend(upscale_stack) + + # Loop through the list + for tup in params: + upscale_model, rescale_factor = tup + print(f"[Info] CR Apply Multi Upscale: Applying {upscale_model} and rescaling by factor {rescale_factor}") + # Load upscale model + up_model = load_model(upscale_model) + + # Upscale with model + up_image = upscale_with_model(up_model, image) + + # Get new size + pil_img = tensor2pil(up_image) + upscaled_width, upscaled_height = pil_img.size + + # Return if no rescale needed + if upscaled_width == original_width and rescale_factor == 1: + image = up_image + else: + # Image resize + scaled_images = [] + mode = "rescale" + resize_width = 1024 + + for img in up_image: + scaled_images.append(pil2tensor(apply_resize_image(tensor2pil(img), original_width, original_height, rounding_modulus, mode, supersample, rescale_factor, resize_width, resampling_method))) + image = torch.cat(scaled_images, dim=0) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/Upscale-Nodes#cr-apply-multi-upscale" + + return (image, show_help, ) + +#--------------------------------------------------------------------------------------------------------------------- +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 0 nodes released +''' +NODE_CLASS_MAPPINGS = { + # Conditioning + "CR Multi Upscale Stack":CR_MultiUpscaleStack, + "CR Upscale Image":CR_UpscaleImage, + "CR Apply Multi Upscale":CR_ApplyMultiUpscale, +} +''' + + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/upscale_functions.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/upscale_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..644dd76f024bbaca74d779648e1ea24ecf9074a6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/upscale_functions.py @@ -0,0 +1,87 @@ +#-----------------------------------------------------------------------------------------------------------# +# Comfyroll Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#-----------------------------------------------------------------------------------------------------------# + +#-----------------------------------------------------------------------------------------------------------# +# UPSCALE FUNCTIONS +#-----------------------------------------------------------------------------------------------------------# +# These functions are based on WAS nodes Image Resize and the Comfy Extras upscale with model nodes +#-----------------------------------------------------------------------------------------------------------# + +import torch +#import os +from comfy_extras.chainner_models import model_loading +from comfy import model_management +import numpy as np +import comfy.utils +import folder_paths +from PIL import Image + +# PIL to Tensor +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +# Tensor to PIL +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def load_model(model_name): + model_path = folder_paths.get_full_path("upscale_models", model_name) + sd = comfy.utils.load_torch_file(model_path, safe_load=True) + if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""}) + out = model_loading.load_state_dict(sd).eval() + return out + +def upscale_with_model(upscale_model, image): + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = image.movedim(-1,-3).to(device) + free_memory = model_management.get_free_memory(device) + + tile = 512 + overlap = 32 + + oom = True + while oom: + try: + steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap) + pbar = comfy.utils.ProgressBar(steps) + s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar) + oom = False + except model_management.OOM_EXCEPTION as e: + tile //= 2 + if tile < 128: + raise e + + upscale_model.cpu() + s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0) + return s + +def apply_resize_image(image: Image.Image, original_width, original_height, rounding_modulus, mode='scale', supersample='true', factor: int = 2, width: int = 1024, height: int = 1024, resample='bicubic'): + + # Calculate the new width and height based on the given mode and parameters + if mode == 'rescale': + new_width, new_height = int(original_width * factor), int(original_height * factor) + else: + m = rounding_modulus + original_ratio = original_height / original_width + height = int(width * original_ratio) + + new_width = width if width % m == 0 else width + (m - width % m) + new_height = height if height % m == 0 else height + (m - height % m) + + # Define a dictionary of resampling filters + resample_filters = {'nearest': 0, 'bilinear': 2, 'bicubic': 3, 'lanczos': 1} + + # Apply supersample + if supersample == 'true': + image = image.resize((new_width * 8, new_height * 8), resample=Image.Resampling(resample_filters[resample])) + + # Resize the image using the given resampling filter + resized_image = image.resize((new_width, new_height), resample=Image.Resampling(resample_filters[resample])) + + return resized_image + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/xygrid.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/xygrid.py new file mode 100644 index 0000000000000000000000000000000000000000..2abdf94e4c5ad7549f4bb22743e358f00fa381c6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/xygrid.py @@ -0,0 +1,376 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/CR-Animation-Nodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# +# based on https://github.com/LEv145/images-grid-comfy-plugin +#---------------------------------------------------------------------------------------------------------------------# + +import os +import folder_paths +from PIL import Image, ImageFont +import torch +import numpy as np +import re +from pathlib import Path +import typing as t +from dataclasses import dataclass +from .xygrid_functions import create_images_grid_by_columns, Annotation +from ..categories import icons + +def tensor_to_pillow(image: t.Any) -> Image.Image: + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +def pillow_to_tensor(image: Image.Image) -> t.Any: + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +def find_highest_numeric_value(directory, filename_prefix): + highest_value = -1 # Initialize with a value lower than possible numeric values + + # Iterate through all files in the directory + for filename in os.listdir(directory): + if filename.startswith(filename_prefix): + try: + # Extract numeric part of the filename + numeric_part = filename[len(filename_prefix):] + numeric_str = re.search(r'\d+', numeric_part).group() + numeric_value = int(numeric_str) + # Check if the current numeric value is higher than the highest found so far + if numeric_value > highest_value: + highest_value = int(numeric_value) + except ValueError: + # If the numeric part is not a valid integer, ignore the file + continue + + return highest_value + +#---------------------------------------------------------------------------------------------------------------------# +class CR_XYList: + + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "list1": ("STRING", {"multiline": True, "default": "x"}), #"forceInput": True}), + "x_prepend": ("STRING", {"multiline": False, "default": ""}), + "x_append": ("STRING", {"multiline": False, "default": ""}), + "x_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "list2": ("STRING", {"multiline": True, "default": "y"}), + "y_prepend": ("STRING", {"multiline": False, "default": ""}), + "y_append": ("STRING", {"multiline": False, "default": ""}), + "y_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + } + } + + RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "BOOLEAN", "STRING", ) + RETURN_NAMES = ("X", "Y", "x_annotation", "y_annotation", "trigger", "show_help", ) + FUNCTION = "cross_join" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def cross_join(self, list1, list2, x_prepend, x_append, x_annotation_prepend, + y_prepend, y_append, y_annotation_prepend, index): + + # Index values for all XY nodes start from 1 + index -=1 + + trigger = False + + #listx = list1.split(",") + #listy = list2.split(",") + listx = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', list1) + listy = re.split(r',(?=(?:[^"]*"[^"]*")*[^"]*$)', list2) + + listx = [item.strip() for item in listx] + listy = [item.strip() for item in listy] + + lenx = len(listx) + leny = len(listy) + + grid_size = lenx * leny + + x = index % lenx + y = int(index / lenx) + + x_out = x_prepend + listx[x] + x_append + y_out = y_prepend + listy[y] + y_append + + x_ann_out = "" + y_ann_out = "" + + if index + 1 == grid_size: + x_ann_out = [x_annotation_prepend + item + ";" for item in listx] + y_ann_out = [y_annotation_prepend + item + ";" for item in listy] + x_ann_out = "".join([str(item) for item in x_ann_out]) + y_ann_out = "".join([str(item) for item in y_ann_out]) + trigger = True + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/XY-Grid-Nodes#cr-xy-list" + + return (x_out, y_out, x_ann_out, y_ann_out, trigger, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_XYInterpolate: + + @classmethod + def INPUT_TYPES(s): + gradient_profiles = ["Lerp"] + + return {"required": {"x_columns":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "x_start_value": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "x_step": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "x_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "y_rows":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "y_start_value": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "y_step": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01,}), + "y_annotation_prepend": ("STRING", {"multiline": False, "default": ""}), + "index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "gradient_profile": (gradient_profiles,) + } + } + + RETURN_TYPES = ("FLOAT", "FLOAT", "STRING", "STRING", "BOOLEAN", "STRING", ) + RETURN_NAMES = ("X", "Y", "x_annotation", "y_annotation", "trigger", "show_help", ) + FUNCTION = "gradient" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def gradient(self, x_columns, x_start_value, x_step, x_annotation_prepend, + y_rows, y_start_value, y_step, y_annotation_prepend, + index, gradient_profile): + + # Index values for all XY nodes start from 1 + index -=1 + trigger = False + grid_size = x_columns * y_rows + + x = index % x_columns + y = int(index / x_columns) + + x_float_out = round(x_start_value + x * x_step, 3) + y_float_out = round(y_start_value + y * y_step, 3) + + x_ann_out = "" + y_ann_out = "" + + if index + 1 == grid_size: + for i in range(0, x_columns): + x = index % x_columns + x_float_out = x_start_value + i * x_step + x_float_out = round(x_float_out, 3) + x_ann_out = x_ann_out + x_annotation_prepend + str(x_float_out) + "; " + for j in range(0, y_rows): + y = int(index / x_columns) + y_float_out = y_start_value + j * y_step + y_float_out = round(y_float_out, 3) + y_ann_out = y_ann_out + y_annotation_prepend + str(y_float_out) + "; " + + x_ann_out = x_ann_out[:-1] + y_ann_out = y_ann_out[:-1] + print(x_ann_out,y_ann_out) + trigger = True + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/XY-Grid-Nodes#cr-xy-interpolate" + + return (x_float_out, y_float_out, x_ann_out, y_ann_out, trigger, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_XYIndex: + + @classmethod + def INPUT_TYPES(s): + gradient_profiles = ["Lerp"] + + return {"required": {"x_columns":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "y_rows":("INT", {"default": 5.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + "index": ("INT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0,}), + } + } + + RETURN_TYPES = ("INT", "INT", "STRING", ) + RETURN_NAMES = ("x", "y", "show_help", ) + FUNCTION = "index" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def index(self, x_columns, y_rows, index): + + # Index values for all XY nodes start from 1 + index -=1 + + x = index % x_columns + y = int(index / x_columns) + + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/XY-Grid-Nodes#cr-xy-index" + + return (x, y, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_XYFromFolder: + + @classmethod + def INPUT_TYPES(cls) -> dict[str, t.Any]: + + input_dir = folder_paths.output_directory + image_folder = [name for name in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir,name))] + + return {"required": + {"image_folder": (sorted(image_folder), ), + "start_index": ("INT", {"default": 1, "min": 0, "max": 10000}), + "end_index": ("INT", {"default": 1, "min": 1, "max": 10000}), + "max_columns": ("INT", {"default": 1, "min": 1, "max": 10000}), + "x_annotation": ("STRING", {"multiline": True}), + "y_annotation": ("STRING", {"multiline": True}), + "font_size": ("INT", {"default": 50, "min": 1}), + "gap": ("INT", {"default": 0, "min": 0}), + }, + "optional": { + "trigger": ("BOOLEAN", {"default": False},), + } + } + + RETURN_TYPES = ("IMAGE", "BOOLEAN", "STRING", ) + RETURN_NAMES = ("IMAGE", "trigger", "show_help", ) + FUNCTION = "load_images" + CATEGORY = icons.get("Comfyroll/XY Grid") + + def load_images(self, image_folder, start_index, end_index, max_columns, x_annotation, y_annotation, font_size, gap, trigger=False): + show_help = "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/wiki/XY-Grid-Nodes#cr-xy-from-folder" + + if trigger == False: + return((), False, show_help, ) + + input_dir = folder_paths.output_directory + image_path = os.path.join(input_dir, image_folder) + file_list = sorted(os.listdir(image_path), key=lambda s: sum(((s, int(n)) for s, n in re.findall(r'(\D+)(\d+)', 'a%s0' % s)), ())) + + sample_frames = [] + pillow_images = [] + + if len(file_list) < end_index: + end_index = len(file_list) + + for num in range(start_index, end_index + 1): + i = Image.open(os.path.join(image_path, file_list[num - 1])) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + image = image.squeeze() + sample_frames.append(image) + + resolved_font_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "fonts\Roboto-Regular.ttf") + font = ImageFont.truetype(str(resolved_font_path), size=font_size) + + start_x_ann = (start_index % max_columns) - 1 + start_y_ann = int(start_index / max_columns) + + column_list = x_annotation.split(";")[start_x_ann:] + row_list = y_annotation.split(";")[start_y_ann:] + + column_list = [item.strip() for item in column_list] + row_list = [item.strip() for item in row_list] + + annotation = Annotation(column_texts=column_list, row_texts=row_list, font=font) + images = torch.stack(sample_frames) + + pillow_images = [tensor_to_pillow(i) for i in images] + pillow_grid = create_images_grid_by_columns( + images=pillow_images, + gap=gap, + annotation=annotation, + max_columns=max_columns, + ) + tensor_grid = pillow_to_tensor(pillow_grid) + + return (tensor_grid, trigger, show_help, ) + +#---------------------------------------------------------------------------------------------------------------------# +class CR_XYSaveGridImage: +# originally based on SaveImageSequence by mtb + + def __init__(self): + self.type = "output" + + @classmethod + def INPUT_TYPES(cls): + + output_dir = folder_paths.output_directory + output_folders = [name for name in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir,name))] + + return { + "required": {"mode": (["Save", "Preview"],), + "output_folder": (sorted(output_folders), ), + "image": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "CR"}), + "file_format": (["webp", "jpg", "png", "tif"],), + }, + "optional": {"output_path": ("STRING", {"default": '', "multiline": False}), + "trigger": ("BOOLEAN", {"default": False},), + } + } + + RETURN_TYPES = () + FUNCTION = "save_image" + OUTPUT_NODE = True + CATEGORY = icons.get("Comfyroll/XY Grid") + + def save_image(self, mode, output_folder, image, file_format, output_path='', filename_prefix="CR", trigger=False): + + if trigger == False: + return () + + output_dir = folder_paths.get_output_directory() + out_folder = os.path.join(output_dir, output_folder) + + # Set the output path + if output_path != '': + if not os.path.exists(output_path): + print(f"[Warning] CR Save XY Grid Image: The input_path `{output_path}` does not exist") + return ("",) + out_path = output_path + else: + out_path = os.path.join(output_dir, out_folder) + + if mode == "Preview": + out_path = folder_paths.temp_directory + + print(f"[Info] CR Save XY Grid Image: Output path is `{out_path}`") + + # Set the counter + counter = find_highest_numeric_value(out_path, filename_prefix) + 1 + #print(f"[Debug] counter {counter}") + + # Output image + output_image = image[0].cpu().numpy() + img = Image.fromarray(np.clip(output_image * 255.0, 0, 255).astype(np.uint8)) + + output_filename = f"{filename_prefix}_{counter:05}" + img_params = {'png': {'compress_level': 4}, + 'webp': {'method': 6, 'lossless': False, 'quality': 80}, + 'jpg': {'format': 'JPEG'}, + 'tif': {'format': 'TIFF'} + } + self.type = "output" if mode == "Save" else 'temp' + + resolved_image_path = os.path.join(out_path, f"{output_filename}.{file_format}") + img.save(resolved_image_path, **img_params[file_format]) + print(f"[Info] CR Save XY Grid Image: Saved to {output_filename}.{file_format}") + out_filename = f"{output_filename}.{file_format}" + preview = {"ui": {"images": [{"filename": out_filename,"subfolder": out_path,"type": self.type,}]}} + + return preview + +#---------------------------------------------------------------------------------------------------------------------# +# MAPPINGS +#---------------------------------------------------------------------------------------------------------------------# +# For reference only, actual mappings are in __init__.py +# 0 nodes released +''' +NODE_CLASS_MAPPINGS = { + # XY Grid + "CR XY List":CR_XYList, + "CR XY Index":CR_XYIndex, + "CR XY Interpolate":CR_XYInterpolate, + "CR XY From Folder":CR_XYFromFolder, + "CR XY Save Grid Image":CR_XYSaveGridImage, +} +''' + + + diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/xygrid_functions.py b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/xygrid_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..0510a99bcfa332efc7845e2dfd99b13b224e7742 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/nodes/xygrid_functions.py @@ -0,0 +1,218 @@ +#---------------------------------------------------------------------------------------------------------------------# +# Comfyroll Custom Nodes by RockOfFire and Akatsuzi https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes +# for ComfyUI https://github.com/comfyanonymous/ComfyUI +#---------------------------------------------------------------------------------------------------------------------# + +# these functions are a straight clone from +# https://github.com/LEv145/images-grid-comfy-plugin + +import typing as t +from dataclasses import dataclass +from contextlib import suppress + +from PIL import Image, ImageDraw, ImageFont + + +WIDEST_LETTER = "W" + + +@dataclass +class Annotation(): + column_texts: list[str] + row_texts: list[str] + font: ImageFont.FreeTypeFont + + +def create_images_grid_by_columns( + images: list[Image.Image], + gap: int, + max_columns: int, + annotation: t.Optional[Annotation] = None, +) -> Image.Image: + max_rows = (len(images) + max_columns - 1) // max_columns + return _create_images_grid(images, gap, max_columns, max_rows, annotation) + + +def create_images_grid_by_rows( + images: list[Image.Image], + gap: int, + max_rows: int, + annotation: t.Optional[Annotation] = None, +) -> Image.Image: + max_columns = (len(images) + max_rows - 1) // max_rows + return _create_images_grid(images, gap, max_columns, max_rows, annotation) + + +@dataclass +class _GridInfo(): + image: Image.Image + gap: int + one_image_size: tuple[int, int] + + +def _create_images_grid( + images: list[Image.Image], + gap: int, + max_columns: int, + max_rows: int, + annotation: t.Optional[Annotation], +) -> Image.Image: + size = images[0].size + grid_width = size[0] * max_columns + (max_columns - 1) * gap + grid_height = size[1] * max_rows + (max_rows - 1) * gap + + grid_image = Image.new("RGB", (grid_width, grid_height), color="white") + + _arrange_images_on_grid(grid_image, images=images, size=size, max_columns=max_columns, gap=gap) + + if annotation is None: + return grid_image + return _create_grid_annotation( + grid_info=_GridInfo( + image=grid_image, + gap=gap, + one_image_size=size, + ), + column_texts=annotation.column_texts, + row_texts=annotation.row_texts, + font=annotation.font, + ) + + +def _arrange_images_on_grid( + grid_image: Image.Image, + /, + images: list[Image.Image], + size: tuple[int, int], + max_columns: int, + gap: int, +): + for i, image in enumerate(images): + x = (i % max_columns) * (size[0] + gap) + y = (i // max_columns) * (size[1] + gap) + + grid_image.paste(image, (x, y)) + + +def _create_grid_annotation( + grid_info: _GridInfo, + column_texts: list[str], + row_texts: list[str], + font: ImageFont.FreeTypeFont, +) -> Image.Image: + if not column_texts and not row_texts: + raise ValueError("Column text and row text is empty") + + grid = grid_info.image + left_padding = 0 + top_padding = 0 + + if row_texts: + left_padding = int( + max( + font.getlength(splitted_text) + for raw_text in row_texts + for splitted_text in raw_text.split("\n") + ) + + font.getlength(WIDEST_LETTER)*2 + ) + if column_texts: + top_padding = int(font.size * 2) + + image = Image.new( + "RGB", + (grid.size[0] + left_padding, grid.size[1] + top_padding), + color="white", + ) + draw = ImageDraw.Draw(image) + # https://github.com/python-pillow/Pillow/blob/9.5.x/docs/reference/ImageDraw.rst + draw.font = font # type: ignore + + _paste_image_to_lower_left_corner(image, grid) + if column_texts: + _draw_column_text( + draw=draw, + texts=column_texts, + grid_info=grid_info, + left_padding=left_padding, + top_padding=top_padding, + ) + if row_texts: + _draw_row_text( + draw=draw, + texts=row_texts, + grid_info=grid_info, + left_padding=left_padding, + top_padding=top_padding, + ) + + return image + + +def _draw_column_text( + draw: ImageDraw.ImageDraw, + texts: list[str], + grid_info: _GridInfo, + left_padding: int, + top_padding: int, +) -> None: + i = 0 + x0 = left_padding + y0 = 0 + x1 = left_padding + grid_info.one_image_size[0] + y1 = top_padding + while x0 != grid_info.image.size[0] + left_padding + grid_info.gap: + i = _draw_text_by_xy((x0, y0, x1, y1), i, draw=draw, texts=texts) + x0 += grid_info.one_image_size[0] + grid_info.gap + x1 += grid_info.one_image_size[0] + grid_info.gap + + +def _draw_row_text( + draw: ImageDraw.ImageDraw, + texts: list[str], + grid_info: _GridInfo, + left_padding: int, + top_padding: int, +) -> None: + i = 0 + x0 = 0 + y0 = top_padding + x1 = left_padding + y1 = top_padding + grid_info.one_image_size[1] + while y0 != grid_info.image.size[1] + top_padding + grid_info.gap: + i = _draw_text_by_xy((x0, y0, x1, y1), i, draw=draw, texts=texts) + y0 += grid_info.one_image_size[1] + grid_info.gap + y1 += grid_info.one_image_size[1] + grid_info.gap + + +def _draw_text_by_xy( + xy: tuple[int, int, int, int], + index: int, + \ + draw: ImageDraw.ImageDraw, + texts: list[str], +) -> int: + with suppress(IndexError): + _draw_center_text(draw, xy, texts[index]) + return index + 1 + + +def _draw_center_text( + draw: ImageDraw.ImageDraw, + xy: tuple[int, int, int, int], + text: str, + fill: t.Any = "black", +) -> None: + _, _, *text_size = draw.textbbox((0, 0), text) + draw.multiline_text( + ( + (xy[2] - text_size[0] + xy[0]) / 2, + (xy[3] - text_size[1] + xy[1]) / 2, + ), + text, + fill=fill, + ) + + +def _paste_image_to_lower_left_corner(base: Image.Image, image: Image.Image) -> None: + base.paste(image, (base.size[0] - image.size[0], base.size[1] - image.size[1])) diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/obj/tetrahedron.obj b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/obj/tetrahedron.obj new file mode 100644 index 0000000000000000000000000000000000000000..e15c62dfc0d7b86be17b560e996d054ba51c655b --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/obj/tetrahedron.obj @@ -0,0 +1,14 @@ +# tetrahedron.obj created by hand. +# + +g tetrahedron + +v 1.00 1.00 1.00 +v 2.00 1.00 1.00 +v 1.00 2.00 1.00 +v 1.00 1.00 2.00 + +f 1 3 2 +f 1 4 3 +f 1 2 4 +f 2 3 4 \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/styles/place style json files here.txt b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/styles/place style json files here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A1_PromptKeyframeScheduling_Demo_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A1_PromptKeyframeScheduling_Demo_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..304bf5c0035d19c2c303390cbff3c5081fa38e8a --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A1_PromptKeyframeScheduling_Demo_v01.json @@ -0,0 +1,1759 @@ +{ + "last_node_id": 619, + "last_link_id": 1123, + "nodes": [ + { + "id": 249, + "type": "CheckpointLoaderSimple", + "pos": [ + 1463.9153190458471, + -676.6040770005651 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 902 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 1091 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 509, + "type": "Note", + "pos": [ + 1076.5295277914042, + -267.27814460234464 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 591, + "type": "CLIPTextEncode", + "pos": [ + 2190.728895078124, + -138.77898584960937 + ], + "size": { + "0": 220, + "1": 80 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 1045 + }, + { + "name": "text", + "type": "STRING", + "link": 1044, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1087 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ] + }, + { + "id": 250, + "type": "VAELoader", + "pos": [ + 1463.9153190458471, + -526.6040770005652 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 837 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 528, + "type": "Reroute", + "pos": [ + 1900, + -500 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1091 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 985, + 1045 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 500, + "type": "Reroute", + "pos": [ + 1900, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 902 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 899 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 602, + "type": "PreviewImage", + "pos": [ + 3500, + -280 + ], + "size": { + "0": 510, + "1": 530 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1083 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 491, + "type": "Reroute", + "pos": [ + 1900, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 837 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1101 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 607, + "type": "Reroute", + "pos": [ + 2510, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1101 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 609, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 254.34497335156243 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1119, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "anime line-art, 1girl, long black hair, 2D, illustration" + ] + }, + { + "id": 608, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 124.34497335156253 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1118, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "anime line-art, 1girl, long black hair, 2D, illustration" + ] + }, + { + "id": 387, + "type": "Reroute", + "pos": [ + 2510, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 899 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 614 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 600, + "type": "VAEDecode", + "pos": [ + 3310, + -250 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 1080 + }, + { + "name": "vae", + "type": "VAE", + "link": 1082 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1083 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 601, + "type": "Reroute", + "pos": [ + 3170, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1100 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1082 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 563, + "type": "CR Encode Scheduled Prompts", + "pos": [ + 2190.728895078124, + -298.7789858496094 + ], + "size": { + "0": 290, + "1": 94 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 985 + }, + { + "name": "current_prompt", + "type": "STRING", + "link": 1115, + "widget": { + "name": "current_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "next_prompt", + "type": "STRING", + "link": 1116, + "widget": { + "name": "next_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "weight", + "type": "FLOAT", + "link": 1117, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 0, + "min": -9999, + "max": 9999, + "step": 0.1 + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1106 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Encode Scheduled Prompts" + }, + "widgets_values": [ + "", + "", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 610, + "type": "CR Apply ControlNet", + "pos": [ + 2570, + -300 + ], + "size": { + "0": 250, + "1": 122 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 1106 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 1112 + }, + { + "name": "image", + "type": "IMAGE", + "link": 1110 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Apply ControlNet" + }, + "widgets_values": [ + "On", + 0.7000000000000001 + ] + }, + { + "id": 605, + "type": "LoadImage", + "pos": [ + 2470, + -1070 + ], + "size": { + "0": 320, + "1": 310 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1110 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "depth_leres-0070.png", + "image" + ] + }, + { + "id": 613, + "type": "ControlNetLoader", + "pos": [ + 2470, + -700 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 1112 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "t2iadapter_zoedepth_sd15v1.pth" + ] + }, + { + "id": 612, + "type": "EmptyLatentImage", + "pos": [ + 2570, + -130 + ], + "size": { + "0": 250, + "1": 120 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1111 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 768, + 1 + ] + }, + { + "id": 537, + "type": "CR Prompt Text", + "pos": [ + 1690, + 60 + ], + "size": { + "0": 310, + "1": 90 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 1044 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "embedding:EasyNegative, " + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 490, + "type": "CR Current Frame", + "pos": [ + 1390, + -230 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": { + "collapsed": true + }, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 836, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 1089, + 1114 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 252, + "type": "KSampler", + "pos": [ + 2918.4079541276733, + -278.0029971578494 + ], + "size": { + "0": 290, + "1": 550 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 614 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 1107 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 1087 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 1111 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1080 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 722889772155925, + "fixed", + 20, + 10, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 617, + "type": "CR Simple Prompt List", + "pos": [ + 601.8719232562495, + 37.05121388925782 + ], + "size": { + "0": 468.5999755859375, + "1": 276 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "SIMPLE_PROMPT_LIST", + "type": "SIMPLE_PROMPT_LIST", + "links": [ + 1121 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List" + }, + "widgets_values": [ + "1girl, long grey hair", + "1girl, long blue hair", + "1girl, long red hair", + "1girl, long black hair", + "1girl, long pink hair" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 614, + "type": "CR Prompt Scheduler", + "pos": [ + 1670, + -280 + ], + "size": { + "0": 350, + "1": 286 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 1114, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + }, + { + "name": "keyframe_list", + "type": "STRING", + "link": 1122, + "widget": { + "name": "keyframe_list", + "config": [ + "STRING", + { + "multiline": true, + "default": "keyframe list" + } + ] + } + } + ], + "outputs": [ + { + "name": "current_prompt", + "type": "STRING", + "links": [ + 1115, + 1118 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "next_prompt", + "type": "STRING", + "links": [ + 1116, + 1119 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "weight", + "type": "FLOAT", + "links": [ + 1117, + 1120 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Scheduler" + }, + "widgets_values": [ + "Keyframe List", + 0, + "default text", + "Deforum", + "Yes", + "P1", + "anime lineart", + "anime line-art", + "2D, illustration" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 618, + "type": "CR Simple Prompt List Keyframes", + "pos": [ + 1131.8719232562507, + 37.05121388925782 + ], + "size": { + "0": 405.5999755859375, + "1": 178 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": 1121 + } + ], + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": [ + 1122, + 1123 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List Keyframes" + }, + "widgets_values": [ + 3, + 1, + "Default", + "Default", + "Default", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 619, + "type": "ShowText|pysssss", + "pos": [ + 1201.8719232562507, + 287.051213889258 + ], + "size": [ + 332.17811035156274, + 133.2184790039064 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1123, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "\"0\": \"1girl, long grey hair\",\n \"3\": \"1girl, long blue hair\",\n \"6\": \"1girl, long red hair\",\n \"9\": \"1girl, long black hair\",\n \"12\": \"1girl, long pink hair\"" + ] + }, + { + "id": 581, + "type": "ShowText|pysssss", + "pos": [ + 2420, + 390 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1006, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "9" + ] + }, + { + "id": 582, + "type": "CR Integer To String", + "pos": [ + 2210, + 420 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 1089, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1006 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 585, + "type": "ShowText|pysssss", + "pos": [ + 2430, + 530 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1010, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1.0" + ] + }, + { + "id": 587, + "type": "CR Float To String", + "pos": [ + 2210, + 560 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 1120, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1010 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 508, + "type": "PrimitiveNode", + "pos": [ + 1076.5295277914042, + -417.2781446023464 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 836 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 614, + 387, + 0, + 252, + 0, + "MODEL" + ], + [ + 836, + 508, + 0, + 490, + 0, + "INT" + ], + [ + 837, + 250, + 0, + 491, + 0, + "*" + ], + [ + 899, + 500, + 0, + 387, + 0, + "*" + ], + [ + 902, + 249, + 0, + 500, + 0, + "*" + ], + [ + 985, + 528, + 0, + 563, + 0, + "CLIP" + ], + [ + 1006, + 582, + 0, + 581, + 0, + "STRING" + ], + [ + 1010, + 587, + 0, + 585, + 0, + "STRING" + ], + [ + 1044, + 537, + 0, + 591, + 1, + "STRING" + ], + [ + 1045, + 528, + 0, + 591, + 0, + "CLIP" + ], + [ + 1080, + 252, + 0, + 600, + 0, + "LATENT" + ], + [ + 1082, + 601, + 0, + 600, + 1, + "VAE" + ], + [ + 1083, + 600, + 0, + 602, + 0, + "IMAGE" + ], + [ + 1087, + 591, + 0, + 252, + 2, + "CONDITIONING" + ], + [ + 1089, + 490, + 0, + 582, + 0, + "INT" + ], + [ + 1091, + 249, + 1, + 528, + 0, + "*" + ], + [ + 1100, + 607, + 0, + 601, + 0, + "*" + ], + [ + 1101, + 491, + 0, + 607, + 0, + "*" + ], + [ + 1106, + 563, + 0, + 610, + 0, + "CONDITIONING" + ], + [ + 1107, + 610, + 0, + 252, + 1, + "CONDITIONING" + ], + [ + 1110, + 605, + 0, + 610, + 2, + "IMAGE" + ], + [ + 1111, + 612, + 0, + 252, + 3, + "LATENT" + ], + [ + 1112, + 613, + 0, + 610, + 1, + "CONTROL_NET" + ], + [ + 1114, + 490, + 0, + 614, + 1, + "INT" + ], + [ + 1115, + 614, + 0, + 563, + 1, + "STRING" + ], + [ + 1116, + 614, + 1, + 563, + 2, + "STRING" + ], + [ + 1117, + 614, + 2, + 563, + 3, + "FLOAT" + ], + [ + 1118, + 614, + 0, + 608, + 0, + "STRING" + ], + [ + 1119, + 614, + 1, + 609, + 0, + "STRING" + ], + [ + 1120, + 614, + 2, + 587, + 0, + "FLOAT" + ], + [ + 1121, + 617, + 0, + 618, + 0, + "SIMPLE_PROMPT_LIST" + ], + [ + 1122, + 618, + 0, + 614, + 2, + "STRING" + ], + [ + 1123, + 618, + 0, + 619, + 0, + "STRING" + ] + ], + "groups": [ + { + "title": "Model", + "bounding": [ + 1437, + -777, + 383, + 344 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Sampling", + "bounding": [ + 2876, + -390, + 369, + 693 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Load Frames", + "bounding": [ + 1043, + -523, + 284, + 416 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Conditioning", + "bounding": [ + 2162, + -387, + 354, + 365 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt", + "bounding": [ + 1617, + -388, + 434, + 582 + ], + "color": "#a1309b", + "font_size": 24, + "locked": false + }, + { + "title": "Show Values", + "bounding": [ + 2159, + 26, + 515, + 605 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt Keyframes", + "bounding": [ + 562, + -57, + 1013, + 506 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A2_PromptKeyframeNodes_Demo_v02b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A2_PromptKeyframeNodes_Demo_v02b.json new file mode 100644 index 0000000000000000000000000000000000000000..2e8996247076f5987ed79e0cda8a248c2701e7cc --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A2_PromptKeyframeNodes_Demo_v02b.json @@ -0,0 +1,1704 @@ +{ + "last_node_id": 87, + "last_link_id": 99, + "nodes": [ + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 1480, + 410 + ], + "size": { + "0": 320, + "1": 110 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 1480, + 90 + ], + "size": { + "0": 315, + "1": 238 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "batch_size", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "2:3 portrait 512x768", + "Off", + 1, + 1 + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1910, + 580 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1870, + 730 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 53, + "type": "CLIPTextEncode", + "pos": [ + 1060, + 760 + ], + "size": { + "0": 260, + "1": 54 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 71 + }, + { + "name": "text", + "type": "STRING", + "link": 57, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 66 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative.pt" + ] + }, + { + "id": 52, + "type": "CR Prompt Text", + "pos": [ + 460, + 600 + ], + "size": { + "0": 370, + "1": 110 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 55 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Pre Text", + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "anime, illustration, line-art, 2.5D," + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 54, + "type": "CR Prompt Text", + "pos": [ + 460, + 770 + ], + "size": { + "0": 370, + "1": 110 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 57 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Negative Prompt", + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "embedding:EasyNegative.pt,\nnsfw" + ], + "color": "#571a1a", + "bgcolor": "#6b2e2e" + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 2250, + 350 + ], + "size": { + "0": 380, + "1": 290 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 25, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 74, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 49, + "type": "PromptSchedule", + "pos": [ + 950, + 410 + ], + "size": { + "0": 400, + "1": 280 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 67 + }, + { + "name": "current_frame", + "type": "INT", + "link": 69, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + }, + { + "name": "text", + "type": "STRING", + "link": 54, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "pre_text", + "type": "STRING", + "link": 55, + "widget": { + "name": "pre_text", + "config": [ + "STRING", + { + "multiline": false + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 65 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Prompt Scheduler", + "properties": { + "Node name for S&R": "PromptSchedule" + }, + "widgets_values": [ + "\"0\": \"1girl, solo, long grey hair, grey eyes, black sweater, (smiling:`(0.5+0.5*sin(t/12))`)\",\n\"24\": \"1girl, solo, long grey hair, grey eyes, black sweater, (smiling:`(0.5+0.5*sin(t/max_f))`)\"", + 12, + 0, + "", + "", + 0, + 0, + 0, + 0 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 65, + "type": "Note", + "pos": [ + 1120, + 240 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The prompt scheduler assembles the prompt based on the current frame" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 63, + "type": "Note", + "pos": [ + -520, + 370 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 5, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "To run this workflow, first press Reset in the Animation Builder and then press the Queue button, Do not use queue prompt in the ComfyUI menu." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 66, + "type": "Note", + "pos": [ + 50, + 210 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence. The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 460, + 240 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 68 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 67, + 71 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ayonimix_V4VAEBaked.safetensors" + ] + }, + { + "id": 67, + "type": "Note", + "pos": [ + 50, + 80 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 8, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 1480, + 580 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 68 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 65 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 66 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 0, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 64, + "type": "Note", + "pos": [ + 548.9214027773999, + 1557.2026116672523 + ], + "size": { + "0": 220, + "1": 140 + }, + "flags": {}, + "order": 9, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "There are three methods of creating prompt keyframes ranging from simple to advanced.\n\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 77, + "type": "Note", + "pos": [ + -460, + 2600 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 10, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "This method allows detailed prompt control at the keyframe level" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 24, + "type": "Animation Builder (mtb)", + "pos": [ + -270, + 360 + ], + "size": { + "0": 210, + "1": 320 + }, + "flags": {}, + "order": 11, + "mode": 0, + "outputs": [ + { + "name": "frame", + "type": "INT", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "0-1 (scaled)", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "loop_ended", + "type": "BOOLEAN", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Animation Builder (mtb)" + }, + "widgets_values": [ + 12, + 1, + 1, + 12, + 1, + "frame: 0 / 11", + "Done 😎!", + "reset", + "queue" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 78, + "type": "CR Keyframe List", + "pos": [ + -181.07859722259994, + 2597.2026116672523 + ], + "size": [ + 570, + 210 + ], + "flags": {}, + "order": 12, + "mode": 0, + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": [ + 89 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Keyframe List" + }, + "widgets_values": [ + "\"0\": \"1girl, solo, long grey hair, (smiling:`(0.5+0.5*sin(t/12))`)\",\n\"72\": \"1girl, solo, short red hair, (smiling:`(0.5+0.5*sin(t/max_f))`)\"", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 56, + "type": "CR Text Input Switch (4 way)", + "pos": [ + 548.9214027773999, + 1377.2026116672523 + ], + "size": { + "0": 240, + "1": 120 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "text1", + "type": "STRING", + "link": 98 + }, + { + "name": "text2", + "type": "STRING", + "link": 97 + }, + { + "name": "text3", + "type": "STRING", + "link": 96 + }, + { + "name": "text4", + "type": "STRING", + "link": 89, + "slot_index": 3 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 54 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text Input Switch (4 way)" + }, + "widgets_values": [ + 2, + "", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 79, + "type": "CR Cycle Text Simple", + "pos": [ + -914.7703639000006, + 1433.9683054179686 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 99, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 93 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Text Simple" + }, + "widgets_values": [ + "Sequential", + 3, + 2, + 0, + "1girl, solo, long grey hair", + "1girl, solo, long blue hair", + "1girl, solo, long red hair", + "1girl, solo, long black hair", + "1girl, solo, long pink hair" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 81, + "type": "CR Prompt List", + "pos": [ + -914.7703639000006, + 1793.9683054179686 + ], + "size": { + "0": 400, + "1": 684.0000610351562 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "prompt_list", + "type": "PROMPT_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "PROMPT_LIST", + "type": "PROMPT_LIST", + "links": [ + 91 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt List" + }, + "widgets_values": [ + 3, + 2, + "1girl, solo, long grey hair", + "Default", + "Default", + "Default", + "1girl, solo, long blue hair", + "Default", + "Default", + "Default", + "1girl, solo, long red hair", + "Default", + "Default", + "Default", + "1girl, solo, long black hair", + "Default", + "Default", + "Default", + "1girl, solo, long pink hair", + "Default", + "Default", + "Default" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 82, + "type": "CR Prompt List", + "pos": [ + -464.7703638999999, + 1793.9683054179686 + ], + "size": { + "0": 400, + "1": 684.0000610351562 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "prompt_list", + "type": "PROMPT_LIST", + "link": 91 + } + ], + "outputs": [ + { + "name": "PROMPT_LIST", + "type": "PROMPT_LIST", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt List" + }, + "widgets_values": [ + 3, + 1, + "", + "Default", + "Default", + "Default", + "", + "Default", + "Default", + "Default", + "", + "Default", + "Default", + "Default", + "", + "Default", + "Default", + "Default", + "", + "Default", + "Default", + "Default" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 83, + "type": "CR Simple Prompt List", + "pos": [ + -454.77036389999995, + 1033.9683054179686 + ], + "size": { + "0": 400, + "1": 276.00006103515625 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": 92 + } + ], + "outputs": [ + { + "name": "SIMPLE_PROMPT_LIST", + "type": "SIMPLE_PROMPT_LIST", + "links": [ + 94 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List" + }, + "widgets_values": [ + "", + "", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 85, + "type": "CR Simple Prompt List", + "pos": [ + -464.7703638999999, + 1413.9683054179686 + ], + "size": { + "0": 400, + "1": 280 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": null + }, + { + "name": "prompt_1", + "type": "STRING", + "link": 93, + "widget": { + "name": "prompt_1", + "config": [ + "STRING", + { + "multiline": true, + "default": "prompt" + } + ] + } + } + ], + "outputs": [ + { + "name": "SIMPLE_PROMPT_LIST", + "type": "SIMPLE_PROMPT_LIST", + "links": [ + 95 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List" + }, + "widgets_values": [ + "prompt", + "", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 84, + "type": "CR Simple Prompt List", + "pos": [ + -910, + 1030 + ], + "size": { + "0": 400, + "1": 276.00006103515625 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "SIMPLE_PROMPT_LIST", + "type": "SIMPLE_PROMPT_LIST", + "links": [ + 92 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List" + }, + "widgets_values": [ + "1girl, solo, long grey hair", + "1girl, solo, long blue hair", + "1girl, solo, long red hair", + "1girl, solo, long black hair", + "1girl, solo, long pink hair" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 80, + "type": "CR Prompt List Keyframes", + "pos": [ + -4.770363900000342, + 1793.9683054179686 + ], + "size": { + "0": 330, + "1": 60 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "prompt_list", + "type": "PROMPT_LIST", + "link": 90 + } + ], + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": [ + 96 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt List Keyframes" + }, + "widgets_values": [ + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 87, + "type": "CR Simple Prompt List Keyframes", + "pos": [ + -4.770363900000342, + 1413.9683054179686 + ], + "size": { + "0": 405.5999755859375, + "1": 178 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": 95 + } + ], + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": [ + 97 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List Keyframes" + }, + "widgets_values": [ + 3, + 1, + "Default", + "Default", + "Default", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 86, + "type": "CR Simple Prompt List Keyframes", + "pos": [ + -4.770363900000342, + 1033.9683054179686 + ], + "size": { + "0": 405.5999755859375, + "1": 178 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": 94 + } + ], + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": [ + 98 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List Keyframes" + }, + "widgets_values": [ + 3, + 1, + "Default", + "Default", + "Default", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 76, + "type": "Note", + "pos": [ + -1240, + 1440 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 15, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Cycling text adds another level of prompt control" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 50, + 360 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": false + }, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 23, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 69, + 74, + 99 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 23, + 24, + 0, + 25, + 0, + "INT" + ], + [ + 25, + 16, + 0, + 26, + 0, + "IMAGE" + ], + [ + 54, + 56, + 0, + 49, + 2, + "STRING" + ], + [ + 55, + 52, + 0, + 49, + 3, + "STRING" + ], + [ + 57, + 54, + 0, + 53, + 1, + "STRING" + ], + [ + 65, + 49, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 66, + 53, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 67, + 47, + 1, + 49, + 0, + "CLIP" + ], + [ + 68, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 69, + 25, + 0, + 49, + 1, + "INT" + ], + [ + 71, + 47, + 1, + 53, + 0, + "CLIP" + ], + [ + 74, + 25, + 0, + 26, + 1, + "INT" + ], + [ + 89, + 78, + 0, + 56, + 3, + "STRING" + ], + [ + 90, + 82, + 0, + 80, + 0, + "PROMPT_LIST" + ], + [ + 91, + 81, + 0, + 82, + 0, + "PROMPT_LIST" + ], + [ + 92, + 84, + 0, + 83, + 0, + "SIMPLE_PROMPT_LIST" + ], + [ + 93, + 79, + 0, + 85, + 1, + "STRING" + ], + [ + 94, + 83, + 0, + 86, + 0, + "SIMPLE_PROMPT_LIST" + ], + [ + 95, + 85, + 0, + 87, + 0, + "SIMPLE_PROMPT_LIST" + ], + [ + 96, + 80, + 0, + 56, + 2, + "STRING" + ], + [ + 97, + 87, + 0, + 56, + 1, + "STRING" + ], + [ + 98, + 86, + 0, + 56, + 0, + "STRING" + ], + [ + 99, + 25, + 0, + 79, + 1, + "INT" + ] + ], + "groups": [ + { + "title": "Prompt Keyframes", + "bounding": [ + -972, + 924, + 1799, + 1930 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "1 - Simple", + "bounding": [ + -1183, + 1001, + 186, + 80 + ], + "color": "#b58b2a", + "locked": false + }, + { + "title": "2 - Intermediate", + "bounding": [ + -1208, + 1768, + 188, + 80 + ], + "color": "#b58b2a", + "locked": false + }, + { + "title": "3 - Advanced", + "bounding": [ + -1189, + 2550, + 189, + 80 + ], + "color": "#b58b2a", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A3_SimplePromptScheduling_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A3_SimplePromptScheduling_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..0f0e8642224cf6d4396cadc200a1495bcf1da662 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A3_SimplePromptScheduling_Demo_v01b.json @@ -0,0 +1,1560 @@ +{ + "last_node_id": 613, + "last_link_id": 1112, + "nodes": [ + { + "id": 249, + "type": "CheckpointLoaderSimple", + "pos": [ + 1463.9153190458471, + -676.6040770005651 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 902 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 1091 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 509, + "type": "Note", + "pos": [ + 1106.950216410155, + -129.61091906250033 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 591, + "type": "CLIPTextEncode", + "pos": [ + 2190.728895078124, + -138.77898584960937 + ], + "size": { + "0": 220, + "1": 80 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 1045 + }, + { + "name": "text", + "type": "STRING", + "link": 1044, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1087 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ] + }, + { + "id": 250, + "type": "VAELoader", + "pos": [ + 1463.9153190458471, + -526.6040770005652 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 837 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 528, + "type": "Reroute", + "pos": [ + 1900, + -500 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1091 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 985, + 1045 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 500, + "type": "Reroute", + "pos": [ + 1900, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 902 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 899 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 585, + "type": "ShowText|pysssss", + "pos": [ + 2420.721570859376, + 524.3449733515627 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1010, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "0.4" + ] + }, + { + "id": 587, + "type": "CR Float To String", + "pos": [ + 2210.721570859376, + 554.3449733515627 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 1097, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1010 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 602, + "type": "PreviewImage", + "pos": [ + 3500, + -280 + ], + "size": { + "0": 510, + "1": 530 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1083 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 491, + "type": "Reroute", + "pos": [ + 1900, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 837 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1101 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 607, + "type": "Reroute", + "pos": [ + 2510, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1101 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 490, + "type": "CR Current Frame", + "pos": [ + 1400, + -250 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": { + "collapsed": true + }, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 836, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 1089, + 1096 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 581, + "type": "ShowText|pysssss", + "pos": [ + 2420.721570859376, + 384.34497335156266 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1006, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "13" + ] + }, + { + "id": 604, + "type": "CR Simple Prompt Scheduler", + "pos": [ + 1660, + -280 + ], + "size": { + "0": 340, + "1": 190 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 1096, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "current_prompt", + "type": "STRING", + "links": [ + 1093, + 1103 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "next_prompt", + "type": "STRING", + "links": [ + 1094, + 1104 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "weight", + "type": "FLOAT", + "links": [ + 1095, + 1097 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt Scheduler" + }, + "widgets_values": [ + "\"0\": \"1girl, long grey hair\",\n\"5\": \"1girl, long blue hair\",\n\"10\": \"1girl, long red hair\",\n\"15\": \"1girl, long black hair\"", + 0, + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 609, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 254.34497335156243 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1104, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1girl, long black hair" + ] + }, + { + "id": 608, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 124.34497335156253 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1103, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1girl, long red hair" + ] + }, + { + "id": 387, + "type": "Reroute", + "pos": [ + 2510, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 899 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 614 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 600, + "type": "VAEDecode", + "pos": [ + 3310, + -250 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 1080 + }, + { + "name": "vae", + "type": "VAE", + "link": 1082 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1083 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 601, + "type": "Reroute", + "pos": [ + 3170, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1100 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1082 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 563, + "type": "CR Encode Scheduled Prompts", + "pos": [ + 2190.728895078124, + -298.7789858496094 + ], + "size": { + "0": 290, + "1": 94 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 985 + }, + { + "name": "current_prompt", + "type": "STRING", + "link": 1093, + "widget": { + "name": "current_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "next_prompt", + "type": "STRING", + "link": 1094, + "widget": { + "name": "next_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "weight", + "type": "FLOAT", + "link": 1095, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 0, + "min": -9999, + "max": 9999, + "step": 0.1 + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1106 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Encode Scheduled Prompts" + }, + "widgets_values": [ + "", + "", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 610, + "type": "CR Apply ControlNet", + "pos": [ + 2570, + -300 + ], + "size": { + "0": 250, + "1": 122 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 1106 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 1112 + }, + { + "name": "image", + "type": "IMAGE", + "link": 1110 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Apply ControlNet" + }, + "widgets_values": [ + "On", + 0.7000000000000001 + ] + }, + { + "id": 605, + "type": "LoadImage", + "pos": [ + 2470, + -1070 + ], + "size": { + "0": 320, + "1": 310 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1110 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "depth_leres-0070.png", + "image" + ] + }, + { + "id": 613, + "type": "ControlNetLoader", + "pos": [ + 2470, + -700 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 1112 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "t2iadapter_zoedepth_sd15v1.pth" + ] + }, + { + "id": 537, + "type": "CR Prompt Text", + "pos": [ + 1690, + -20 + ], + "size": { + "0": 310, + "1": 90 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 1044 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "embedding:EasyNegative, " + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 612, + "type": "EmptyLatentImage", + "pos": [ + 2570, + -130 + ], + "size": { + "0": 250, + "1": 120 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1111 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 768, + 1 + ] + }, + { + "id": 582, + "type": "CR Integer To String", + "pos": [ + 2210, + 410 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 1089, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1006 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 252, + "type": "KSampler", + "pos": [ + 2918.4079541276733, + -278.0029971578494 + ], + "size": { + "0": 290, + "1": 550 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 614 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 1107 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 1087 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 1111 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1080 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 722889772155925, + "fixed", + 20, + 10, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 508, + "type": "PrimitiveNode", + "pos": [ + 1106.950216410155, + -279.6109190625014 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 836 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 614, + 387, + 0, + 252, + 0, + "MODEL" + ], + [ + 836, + 508, + 0, + 490, + 0, + "INT" + ], + [ + 837, + 250, + 0, + 491, + 0, + "*" + ], + [ + 899, + 500, + 0, + 387, + 0, + "*" + ], + [ + 902, + 249, + 0, + 500, + 0, + "*" + ], + [ + 985, + 528, + 0, + 563, + 0, + "CLIP" + ], + [ + 1006, + 582, + 0, + 581, + 0, + "STRING" + ], + [ + 1010, + 587, + 0, + 585, + 0, + "STRING" + ], + [ + 1044, + 537, + 0, + 591, + 1, + "STRING" + ], + [ + 1045, + 528, + 0, + 591, + 0, + "CLIP" + ], + [ + 1080, + 252, + 0, + 600, + 0, + "LATENT" + ], + [ + 1082, + 601, + 0, + 600, + 1, + "VAE" + ], + [ + 1083, + 600, + 0, + 602, + 0, + "IMAGE" + ], + [ + 1087, + 591, + 0, + 252, + 2, + "CONDITIONING" + ], + [ + 1089, + 490, + 0, + 582, + 0, + "INT" + ], + [ + 1091, + 249, + 1, + 528, + 0, + "*" + ], + [ + 1093, + 604, + 0, + 563, + 1, + "STRING" + ], + [ + 1094, + 604, + 1, + 563, + 2, + "STRING" + ], + [ + 1095, + 604, + 2, + 563, + 3, + "FLOAT" + ], + [ + 1096, + 490, + 0, + 604, + 0, + "INT" + ], + [ + 1097, + 604, + 2, + 587, + 0, + "FLOAT" + ], + [ + 1100, + 607, + 0, + 601, + 0, + "*" + ], + [ + 1101, + 491, + 0, + 607, + 0, + "*" + ], + [ + 1103, + 604, + 0, + 608, + 0, + "STRING" + ], + [ + 1104, + 604, + 1, + 609, + 0, + "STRING" + ], + [ + 1106, + 563, + 0, + 610, + 0, + "CONDITIONING" + ], + [ + 1107, + 610, + 0, + 252, + 1, + "CONDITIONING" + ], + [ + 1110, + 605, + 0, + 610, + 2, + "IMAGE" + ], + [ + 1111, + 612, + 0, + 252, + 3, + "LATENT" + ], + [ + 1112, + 613, + 0, + 610, + 1, + "CONTROL_NET" + ] + ], + "groups": [ + { + "title": "Model", + "bounding": [ + 1437, + -777, + 383, + 344 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Sampling", + "bounding": [ + 2876, + -390, + 369, + 693 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Load Frames", + "bounding": [ + 1074, + -385, + 284, + 416 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Conditioning", + "bounding": [ + 2162, + -387, + 354, + 365 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt", + "bounding": [ + 1617, + -388, + 425, + 501 + ], + "color": "#a1309b", + "font_size": 24, + "locked": false + }, + { + "title": "Show Values", + "bounding": [ + 2159, + 26, + 515, + 605 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A4_PromptScheduling_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A4_PromptScheduling_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..7a114ab8d2470b4c7438f157c3109549ca717289 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A4_PromptScheduling_Demo_v01a.json @@ -0,0 +1,1571 @@ +{ + "last_node_id": 616, + "last_link_id": 1120, + "nodes": [ + { + "id": 249, + "type": "CheckpointLoaderSimple", + "pos": [ + 1463.9153190458471, + -676.6040770005651 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 902 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 1091 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 509, + "type": "Note", + "pos": [ + 1076.5295277914042, + -267.27814460234464 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 591, + "type": "CLIPTextEncode", + "pos": [ + 2190.728895078124, + -138.77898584960937 + ], + "size": { + "0": 220, + "1": 80 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 1045 + }, + { + "name": "text", + "type": "STRING", + "link": 1044, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1087 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ] + }, + { + "id": 250, + "type": "VAELoader", + "pos": [ + 1463.9153190458471, + -526.6040770005652 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 837 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 528, + "type": "Reroute", + "pos": [ + 1900, + -500 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1091 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 985, + 1045 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 500, + "type": "Reroute", + "pos": [ + 1900, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 902 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 899 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 585, + "type": "ShowText|pysssss", + "pos": [ + 2420.721570859376, + 524.3449733515627 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1010, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1.0" + ] + }, + { + "id": 602, + "type": "PreviewImage", + "pos": [ + 3500, + -280 + ], + "size": { + "0": 510, + "1": 530 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1083 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 491, + "type": "Reroute", + "pos": [ + 1900, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 837 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1101 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 607, + "type": "Reroute", + "pos": [ + 2510, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1101 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 581, + "type": "ShowText|pysssss", + "pos": [ + 2420.721570859376, + 384.34497335156266 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1006, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "10" + ] + }, + { + "id": 609, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 254.34497335156243 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1119, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "anime lineart, 1girl, solo, short red hair, 2D, illustration" + ] + }, + { + "id": 608, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 124.34497335156253 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1118, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "anime lineart, 1girl, solo, short red hair, 2D, illustration" + ] + }, + { + "id": 387, + "type": "Reroute", + "pos": [ + 2510, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 899 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 614 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 600, + "type": "VAEDecode", + "pos": [ + 3310, + -250 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 1080 + }, + { + "name": "vae", + "type": "VAE", + "link": 1082 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1083 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 601, + "type": "Reroute", + "pos": [ + 3170, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1100 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1082 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 563, + "type": "CR Encode Scheduled Prompts", + "pos": [ + 2190.728895078124, + -298.7789858496094 + ], + "size": { + "0": 290, + "1": 94 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 985 + }, + { + "name": "current_prompt", + "type": "STRING", + "link": 1115, + "widget": { + "name": "current_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "next_prompt", + "type": "STRING", + "link": 1116, + "widget": { + "name": "next_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "weight", + "type": "FLOAT", + "link": 1117, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 0, + "min": -9999, + "max": 9999, + "step": 0.1 + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1106 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Encode Scheduled Prompts" + }, + "widgets_values": [ + "", + "", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 610, + "type": "CR Apply ControlNet", + "pos": [ + 2570, + -300 + ], + "size": { + "0": 250, + "1": 122 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 1106 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 1112 + }, + { + "name": "image", + "type": "IMAGE", + "link": 1110 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Apply ControlNet" + }, + "widgets_values": [ + "On", + 0.7000000000000001 + ] + }, + { + "id": 605, + "type": "LoadImage", + "pos": [ + 2470, + -1070 + ], + "size": { + "0": 320, + "1": 310 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1110 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "depth_leres-0070.png", + "image" + ] + }, + { + "id": 613, + "type": "ControlNetLoader", + "pos": [ + 2470, + -700 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 1112 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "t2iadapter_zoedepth_sd15v1.pth" + ] + }, + { + "id": 612, + "type": "EmptyLatentImage", + "pos": [ + 2570, + -130 + ], + "size": { + "0": 250, + "1": 120 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1111 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 768, + 1 + ] + }, + { + "id": 582, + "type": "CR Integer To String", + "pos": [ + 2210, + 410 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 1089, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1006 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 490, + "type": "CR Current Frame", + "pos": [ + 1390, + -230 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": { + "collapsed": true + }, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 836, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 1089, + 1114 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 537, + "type": "CR Prompt Text", + "pos": [ + 1700, + 170 + ], + "size": { + "0": 310, + "1": 90 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 1044 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "embedding:EasyNegative, " + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 252, + "type": "KSampler", + "pos": [ + 2918.4079541276733, + -278.0029971578494 + ], + "size": { + "0": 290, + "1": 550 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 614 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 1107 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 1087 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 1111 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1080 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 722889772155925, + "fixed", + 20, + 10, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 587, + "type": "CR Float To String", + "pos": [ + 2210, + 550 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 1120, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1010 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 614, + "type": "CR Prompt Scheduler", + "pos": [ + 1670, + -280 + ], + "size": { + "0": 350, + "1": 390 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 1114, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "current_prompt", + "type": "STRING", + "links": [ + 1115, + 1118 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "next_prompt", + "type": "STRING", + "links": [ + 1116, + 1119 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "weight", + "type": "FLOAT", + "links": [ + 1117, + 1120 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Scheduler" + }, + "widgets_values": [ + "Keyframe List", + 0, + "default text", + "Deforum", + "Yes", + "P1", + "\"0\": \"1girl, solo, long grey hair\",\n\"5\": \"1girl, solo, long blue hair\",\n\"10\": \"1girl, solo, short red hair\",\n\"15\": \"1girl, solo, short black hair\"", + "anime lineart", + "2D, illustration" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 508, + "type": "PrimitiveNode", + "pos": [ + 1076.5295277914042, + -417.2781446023464 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 836 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 614, + 387, + 0, + 252, + 0, + "MODEL" + ], + [ + 836, + 508, + 0, + 490, + 0, + "INT" + ], + [ + 837, + 250, + 0, + 491, + 0, + "*" + ], + [ + 899, + 500, + 0, + 387, + 0, + "*" + ], + [ + 902, + 249, + 0, + 500, + 0, + "*" + ], + [ + 985, + 528, + 0, + 563, + 0, + "CLIP" + ], + [ + 1006, + 582, + 0, + 581, + 0, + "STRING" + ], + [ + 1010, + 587, + 0, + 585, + 0, + "STRING" + ], + [ + 1044, + 537, + 0, + 591, + 1, + "STRING" + ], + [ + 1045, + 528, + 0, + 591, + 0, + "CLIP" + ], + [ + 1080, + 252, + 0, + 600, + 0, + "LATENT" + ], + [ + 1082, + 601, + 0, + 600, + 1, + "VAE" + ], + [ + 1083, + 600, + 0, + 602, + 0, + "IMAGE" + ], + [ + 1087, + 591, + 0, + 252, + 2, + "CONDITIONING" + ], + [ + 1089, + 490, + 0, + 582, + 0, + "INT" + ], + [ + 1091, + 249, + 1, + 528, + 0, + "*" + ], + [ + 1100, + 607, + 0, + 601, + 0, + "*" + ], + [ + 1101, + 491, + 0, + 607, + 0, + "*" + ], + [ + 1106, + 563, + 0, + 610, + 0, + "CONDITIONING" + ], + [ + 1107, + 610, + 0, + 252, + 1, + "CONDITIONING" + ], + [ + 1110, + 605, + 0, + 610, + 2, + "IMAGE" + ], + [ + 1111, + 612, + 0, + 252, + 3, + "LATENT" + ], + [ + 1112, + 613, + 0, + 610, + 1, + "CONTROL_NET" + ], + [ + 1114, + 490, + 0, + 614, + 1, + "INT" + ], + [ + 1115, + 614, + 0, + 563, + 1, + "STRING" + ], + [ + 1116, + 614, + 1, + 563, + 2, + "STRING" + ], + [ + 1117, + 614, + 2, + 563, + 3, + "FLOAT" + ], + [ + 1118, + 614, + 0, + 608, + 0, + "STRING" + ], + [ + 1119, + 614, + 1, + 609, + 0, + "STRING" + ], + [ + 1120, + 614, + 2, + 587, + 0, + "FLOAT" + ] + ], + "groups": [ + { + "title": "Model", + "bounding": [ + 1437, + -777, + 383, + 344 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Sampling", + "bounding": [ + 2876, + -390, + 369, + 693 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Load Frames", + "bounding": [ + 1043, + -523, + 284, + 416 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Conditioning", + "bounding": [ + 2162, + -387, + 354, + 365 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt", + "bounding": [ + 1617, + -388, + 434, + 697 + ], + "color": "#a1309b", + "font_size": 24, + "locked": false + }, + { + "title": "Show Values", + "bounding": [ + 2159, + 26, + 515, + 605 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A5_CentralPromptScheduling_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A5_CentralPromptScheduling_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..453e3e843a3c0e35980d85a213d7a712a7bb499e --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_A5_CentralPromptScheduling_Demo_v01a.json @@ -0,0 +1,1650 @@ +{ + "last_node_id": 616, + "last_link_id": 1120, + "nodes": [ + { + "id": 249, + "type": "CheckpointLoaderSimple", + "pos": [ + 1463.9153190458471, + -676.6040770005651 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 902 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 1091 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 509, + "type": "Note", + "pos": [ + 1076.5295277914042, + -267.27814460234464 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 591, + "type": "CLIPTextEncode", + "pos": [ + 2190.728895078124, + -138.77898584960937 + ], + "size": { + "0": 220, + "1": 80 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 1045 + }, + { + "name": "text", + "type": "STRING", + "link": 1044, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1087 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ] + }, + { + "id": 250, + "type": "VAELoader", + "pos": [ + 1463.9153190458471, + -526.6040770005652 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 837 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 528, + "type": "Reroute", + "pos": [ + 1900, + -500 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1091 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 985, + 1045 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 500, + "type": "Reroute", + "pos": [ + 1900, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 902 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 899 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 585, + "type": "ShowText|pysssss", + "pos": [ + 2420.721570859376, + 524.3449733515627 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1010, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "0.4" + ] + }, + { + "id": 602, + "type": "PreviewImage", + "pos": [ + 3500, + -280 + ], + "size": { + "0": 510, + "1": 530 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1083 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 491, + "type": "Reroute", + "pos": [ + 1900, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 837 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1101 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 607, + "type": "Reroute", + "pos": [ + 2510, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1101 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 581, + "type": "ShowText|pysssss", + "pos": [ + 2420.721570859376, + 384.34497335156266 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1006, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "5" + ] + }, + { + "id": 609, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 254.34497335156243 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1119, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1girl, long black hair" + ] + }, + { + "id": 608, + "type": "ShowText|pysssss", + "pos": [ + 2310.721570859376, + 124.34497335156253 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 1118, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1girl, long red hair" + ] + }, + { + "id": 387, + "type": "Reroute", + "pos": [ + 2510, + -560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 899 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 614 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 600, + "type": "VAEDecode", + "pos": [ + 3310, + -250 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 1080 + }, + { + "name": "vae", + "type": "VAE", + "link": 1082 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1083 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 601, + "type": "Reroute", + "pos": [ + 3170, + -450 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 1100 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 1082 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 563, + "type": "CR Encode Scheduled Prompts", + "pos": [ + 2190.728895078124, + -298.7789858496094 + ], + "size": { + "0": 290, + "1": 94 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 985 + }, + { + "name": "current_prompt", + "type": "STRING", + "link": 1115, + "widget": { + "name": "current_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "next_prompt", + "type": "STRING", + "link": 1116, + "widget": { + "name": "next_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "weight", + "type": "FLOAT", + "link": 1117, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 0, + "min": -9999, + "max": 9999, + "step": 0.1 + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1106 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Encode Scheduled Prompts" + }, + "widgets_values": [ + "", + "", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 610, + "type": "CR Apply ControlNet", + "pos": [ + 2570, + -300 + ], + "size": { + "0": 250, + "1": 122 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 1106 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 1112 + }, + { + "name": "image", + "type": "IMAGE", + "link": 1110 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 1107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Apply ControlNet" + }, + "widgets_values": [ + "On", + 0.7000000000000001 + ] + }, + { + "id": 605, + "type": "LoadImage", + "pos": [ + 2470, + -1070 + ], + "size": { + "0": 320, + "1": 310 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1110 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "depth_leres-0070.png", + "image" + ] + }, + { + "id": 613, + "type": "ControlNetLoader", + "pos": [ + 2470, + -700 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 1112 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "t2iadapter_zoedepth_sd15v1.pth" + ] + }, + { + "id": 612, + "type": "EmptyLatentImage", + "pos": [ + 2570, + -130 + ], + "size": { + "0": 250, + "1": 120 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1111 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 768, + 1 + ] + }, + { + "id": 582, + "type": "CR Integer To String", + "pos": [ + 2210, + 410 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 1089, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1006 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 537, + "type": "CR Prompt Text", + "pos": [ + 1690, + 60 + ], + "size": { + "0": 310, + "1": 90 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 1044 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "embedding:EasyNegative, " + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 490, + "type": "CR Current Frame", + "pos": [ + 1390, + -230 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": { + "collapsed": true + }, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 836, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 1089, + 1114 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 615, + "type": "CR Central Schedule", + "pos": [ + 1040, + -30 + ], + "size": { + "0": 410, + "1": 550 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 1113 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "\"0\": \"1girl, long grey hair\",\n\"5\": \"1girl, long blue hair\",\n\"10\": \"1girl, long red hair\",\n\"15\": \"1girl, long black hair\"", + "Prompt", + "P1", + "schedule", + "Value", + "", + "schedule", + "Value", + "", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 252, + "type": "KSampler", + "pos": [ + 2918.4079541276733, + -278.0029971578494 + ], + "size": { + "0": 290, + "1": 550 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 614 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 1107 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 1087 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 1111 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 1080 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 722889772155925, + "fixed", + 20, + 10, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 614, + "type": "CR Prompt Scheduler", + "pos": [ + 1670, + -280 + ], + "size": { + "0": 350, + "1": 286 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 1113 + }, + { + "name": "current_frame", + "type": "INT", + "link": 1114, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + }, + { + "name": "keyframe_list", + "type": "STRING", + "link": null, + "widget": { + "name": "keyframe_list", + "config": [ + "STRING", + { + "multiline": true, + "default": "keyframe list" + } + ] + } + } + ], + "outputs": [ + { + "name": "current_prompt", + "type": "STRING", + "links": [ + 1115, + 1118 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "next_prompt", + "type": "STRING", + "links": [ + 1116, + 1119 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "weight", + "type": "FLOAT", + "links": [ + 1117, + 1120 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "default text", + "Deforum", + "Yes", + "P1", + "anime lineart", + "anime line-art", + "2D, illustration" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 587, + "type": "CR Float To String", + "pos": [ + 2210, + 550 + ], + "size": { + "0": 320, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 1120, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 1010 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 508, + "type": "PrimitiveNode", + "pos": [ + 1076.5295277914042, + -417.2781446023464 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 836 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 614, + 387, + 0, + 252, + 0, + "MODEL" + ], + [ + 836, + 508, + 0, + 490, + 0, + "INT" + ], + [ + 837, + 250, + 0, + 491, + 0, + "*" + ], + [ + 899, + 500, + 0, + 387, + 0, + "*" + ], + [ + 902, + 249, + 0, + 500, + 0, + "*" + ], + [ + 985, + 528, + 0, + 563, + 0, + "CLIP" + ], + [ + 1006, + 582, + 0, + 581, + 0, + "STRING" + ], + [ + 1010, + 587, + 0, + 585, + 0, + "STRING" + ], + [ + 1044, + 537, + 0, + 591, + 1, + "STRING" + ], + [ + 1045, + 528, + 0, + 591, + 0, + "CLIP" + ], + [ + 1080, + 252, + 0, + 600, + 0, + "LATENT" + ], + [ + 1082, + 601, + 0, + 600, + 1, + "VAE" + ], + [ + 1083, + 600, + 0, + 602, + 0, + "IMAGE" + ], + [ + 1087, + 591, + 0, + 252, + 2, + "CONDITIONING" + ], + [ + 1089, + 490, + 0, + 582, + 0, + "INT" + ], + [ + 1091, + 249, + 1, + 528, + 0, + "*" + ], + [ + 1100, + 607, + 0, + 601, + 0, + "*" + ], + [ + 1101, + 491, + 0, + 607, + 0, + "*" + ], + [ + 1106, + 563, + 0, + 610, + 0, + "CONDITIONING" + ], + [ + 1107, + 610, + 0, + 252, + 1, + "CONDITIONING" + ], + [ + 1110, + 605, + 0, + 610, + 2, + "IMAGE" + ], + [ + 1111, + 612, + 0, + 252, + 3, + "LATENT" + ], + [ + 1112, + 613, + 0, + 610, + 1, + "CONTROL_NET" + ], + [ + 1113, + 615, + 0, + 614, + 0, + "SCHEDULE" + ], + [ + 1114, + 490, + 0, + 614, + 1, + "INT" + ], + [ + 1115, + 614, + 0, + 563, + 1, + "STRING" + ], + [ + 1116, + 614, + 1, + 563, + 2, + "STRING" + ], + [ + 1117, + 614, + 2, + 563, + 3, + "FLOAT" + ], + [ + 1118, + 614, + 0, + 608, + 0, + "STRING" + ], + [ + 1119, + 614, + 1, + 609, + 0, + "STRING" + ], + [ + 1120, + 614, + 2, + 587, + 0, + "FLOAT" + ] + ], + "groups": [ + { + "title": "Model", + "bounding": [ + 1437, + -777, + 383, + 344 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Sampling", + "bounding": [ + 2876, + -390, + 369, + 693 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Load Frames", + "bounding": [ + 1043, + -523, + 284, + 416 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Conditioning", + "bounding": [ + 2162, + -387, + 354, + 365 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt", + "bounding": [ + 1617, + -388, + 434, + 582 + ], + "color": "#a1309b", + "font_size": 24, + "locked": false + }, + { + "title": "Show Values", + "bounding": [ + 2159, + 26, + 515, + 605 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B1_CentralSchedule_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B1_CentralSchedule_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..2b69311422dfbb1de01743d85fb7b10fd3fbdb31 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B1_CentralSchedule_Demo_v01b.json @@ -0,0 +1,879 @@ +{ + "last_node_id": 128, + "last_link_id": 146, + "nodes": [ + { + "id": 59, + "type": "Note", + "pos": [ + 928.237579825001, + 372.36553706933677 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 0, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 928.237579825001, + 682.3655370693361 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 928.237579825001, + 772.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "15" + ] + }, + { + "id": 114, + "type": "Note", + "pos": [ + 368.2375798250001, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 170 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n\nIn normal workflows the Primitive node can replace the Animation Builder" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 79, + "type": "ShowText|pysssss", + "pos": [ + 1648.2375798250027, + 622.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 144 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "Airbrushing" + ] + }, + { + "id": 84, + "type": "ShowText|pysssss", + "pos": [ + 1648.2375798250027, + 482.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 99 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1024" + ] + }, + { + "id": 83, + "type": "CR Integer To String", + "pos": [ + 1648.2375798250027, + 392.36553706933677 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 145, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 99 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 68, + "type": "Note", + "pos": [ + 928.237579825001, + 242.3655370693365 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 86, + "type": "Note", + "pos": [ + 650, + -10 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "As a general rule schedules should always have a line for frame 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 120, + "type": "Note", + "pos": [ + 1260, + 240 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Always convert the current_frame widget to input" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 91, + "type": "Reroute", + "pos": [ + 990, + 20 + ], + "size": [ + 107.2, + 26 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 138 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 139, + 140 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 928.237579825001, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 134, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 91, + 141, + 142 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 122, + "type": "CR Central Schedule", + "pos": [ + -160, + -460 + ], + "size": { + "0": 360, + "1": 510 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 137 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "schedule", + "LoRA", + "", + "schedule", + "Upscale", + "", + "schedule", + "Model", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 124, + "type": "CR Text Scheduler", + "pos": [ + 1258.2375798250025, + 622.3655370693361 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 140 + }, + { + "name": "current_frame", + "type": "INT", + "link": 142, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 144 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "T1", + "default text", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 123, + "type": "CR Value Scheduler", + "pos": [ + 1258.2375798250025, + 392.36553706933677 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 139 + }, + { + "name": "current_frame", + "type": "INT", + "link": 141, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 145 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Value Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "V1", + 1024, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 113, + "type": "PrimitiveNode", + "pos": [ + 618.2375798249993, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 134 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + }, + { + "id": 121, + "type": "CR Central Schedule", + "pos": [ + 240, + -460 + ], + "size": { + "0": 360, + "1": 510 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 137 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 138, + 146 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "0, 512\n2, 640\n3, 768\n4, 896\n8, 1024", + "Value", + "V1", + "0, Art Nouveau\n2, Antarctica\n4, 2D Game Art\n5, Animation\n8, Airbrushing", + "Text", + "T1", + "schedule", + "Camera", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 128, + "type": "CR Output Schedule To File", + "pos": [ + 780, + -460 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 146 + } + ], + "properties": { + "Node name for S&R": "CR Output Schedule To File" + }, + "widgets_values": [ + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 99, + 83, + 0, + 84, + 0, + "STRING" + ], + [ + 123, + 98, + 0, + 91, + 0, + "*" + ], + [ + 134, + 113, + 0, + 25, + 0, + "INT" + ], + [ + 137, + 122, + 0, + 121, + 0, + "SCHEDULE" + ], + [ + 138, + 121, + 0, + 91, + 0, + "*" + ], + [ + 139, + 91, + 0, + 123, + 0, + "SCHEDULE" + ], + [ + 140, + 91, + 0, + 124, + 0, + "SCHEDULE" + ], + [ + 141, + 25, + 0, + 123, + 1, + "INT" + ], + [ + 142, + 25, + 0, + 124, + 1, + "INT" + ], + [ + 144, + 124, + 0, + 79, + 0, + "STRING" + ], + [ + 145, + 123, + 0, + 83, + 0, + "INT" + ], + [ + 146, + 121, + 0, + 128, + 0, + "SCHEDULE" + ] + ], + "groups": [ + { + "title": "Central Schedule", + "bounding": [ + -205, + -562, + 1381, + 656 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Schedulers", + "bounding": [ + 323, + 136, + 1580, + 765 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B2_LoadScheduleFromFile_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B2_LoadScheduleFromFile_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..f10768be74e69c65ae1edc0911866cb1c56b96f3 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B2_LoadScheduleFromFile_Demo_v01a.json @@ -0,0 +1,705 @@ +{ + "last_node_id": 133, + "last_link_id": 154, + "nodes": [ + { + "id": 59, + "type": "Note", + "pos": [ + 928.237579825001, + 372.36553706933677 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 0, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 928.237579825001, + 682.3655370693361 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 928.237579825001, + 772.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "5" + ] + }, + { + "id": 114, + "type": "Note", + "pos": [ + 368.2375798250001, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 170 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n\nIn normal workflows the Primitive node can replace the Animation Builder" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 79, + "type": "ShowText|pysssss", + "pos": [ + 1648.2375798250027, + 622.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 144 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "Animation" + ] + }, + { + "id": 84, + "type": "ShowText|pysssss", + "pos": [ + 1648.2375798250027, + 482.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 99 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "896" + ] + }, + { + "id": 83, + "type": "CR Integer To String", + "pos": [ + 1648.2375798250027, + 392.36553706933677 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 145, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 99 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 68, + "type": "Note", + "pos": [ + 928.237579825001, + 242.3655370693365 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 120, + "type": "Note", + "pos": [ + 1260, + 240 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Always convert the current_frame widget to input" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 928.237579825001, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 134, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 91, + 141, + 142 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 5, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 124, + "type": "CR Text Scheduler", + "pos": [ + 1258.2375798250025, + 622.3655370693361 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 153 + }, + { + "name": "current_frame", + "type": "INT", + "link": 142, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 144 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "T1", + "default text", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 123, + "type": "CR Value Scheduler", + "pos": [ + 1258.2375798250025, + 392.36553706933677 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 152 + }, + { + "name": "current_frame", + "type": "INT", + "link": 141, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 145 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Value Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "V1", + 1024, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 130, + "type": "Note", + "pos": [ + 360, + 250 + ], + "size": { + "0": 210, + "1": 90 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Load scheduling data from a file" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 113, + "type": "PrimitiveNode", + "pos": [ + 618.2375798249993, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 134 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 6, + "increment" + ] + }, + { + "id": 132, + "type": "CR Load Schedule From File", + "pos": [ + 620, + 350 + ], + "size": [ + 260, + 130 + ], + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 152, + 153 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Load Schedule From File" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Schedules", + "TestSchedule2", + "csv" + ] + } + ], + "links": [ + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 99, + 83, + 0, + 84, + 0, + "STRING" + ], + [ + 123, + 98, + 0, + 91, + 0, + "*" + ], + [ + 134, + 113, + 0, + 25, + 0, + "INT" + ], + [ + 141, + 25, + 0, + 123, + 1, + "INT" + ], + [ + 142, + 25, + 0, + 124, + 1, + "INT" + ], + [ + 144, + 124, + 0, + 79, + 0, + "STRING" + ], + [ + 145, + 123, + 0, + 83, + 0, + "INT" + ], + [ + 152, + 132, + 0, + 123, + 0, + "SCHEDULE" + ], + [ + 153, + 132, + 0, + 124, + 0, + "SCHEDULE" + ] + ], + "groups": [ + { + "title": "Schedulers", + "bounding": [ + 323, + 136, + 1580, + 765 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B3_OutputScheduleToFile_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B3_OutputScheduleToFile_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..2987eda2bef941cdede696e2cc4bdf7875540cbb --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B3_OutputScheduleToFile_Demo_v01a.json @@ -0,0 +1,193 @@ +{ + "last_node_id": 128, + "last_link_id": 146, + "nodes": [ + { + "id": 122, + "type": "CR Central Schedule", + "pos": [ + -160, + -460 + ], + "size": { + "0": 360, + "1": 510 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 137 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "schedule", + "LoRA", + "", + "schedule", + "Upscale", + "", + "schedule", + "Model", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 121, + "type": "CR Central Schedule", + "pos": [ + 240, + -460 + ], + "size": { + "0": 360, + "1": 510 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 137 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 146 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "0, 512\n2, 640\n3, 768\n4, 896\n8, 1024", + "Value", + "V1", + "0, Art Nouveau\n2, Antarctica\n4, 2D Game Art\n5, Animation\n8, Airbrushing", + "Text", + "T1", + "schedule", + "Camera", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 128, + "type": "CR Output Schedule To File", + "pos": [ + 780, + -460 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 146 + } + ], + "properties": { + "Node name for S&R": "CR Output Schedule To File" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Schedules", + "TestSchedule", + "csv" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 123, + 98, + 0, + 91, + 0, + "*" + ], + [ + 137, + 122, + 0, + 121, + 0, + "SCHEDULE" + ], + [ + 146, + 121, + 0, + 128, + 0, + "SCHEDULE" + ] + ], + "groups": [ + { + "title": "Central Schedule", + "bounding": [ + -205, + -562, + 1381, + 656 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B4_CombineSchedules_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B4_CombineSchedules_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..371a29308f8e69022bfbfe1012d9ac3e3450c643 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_B4_CombineSchedules_Demo_v01a.json @@ -0,0 +1,1102 @@ +{ + "last_node_id": 135, + "last_link_id": 164, + "nodes": [ + { + "id": 59, + "type": "Note", + "pos": [ + 928.237579825001, + 372.36553706933677 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 0, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 928.237579825001, + 682.3655370693361 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 928.237579825001, + 772.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "4" + ] + }, + { + "id": 114, + "type": "Note", + "pos": [ + 368.2375798250001, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 170 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n\nIn normal workflows the Primitive node can replace the Animation Builder" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 79, + "type": "ShowText|pysssss", + "pos": [ + 1648.2375798250027, + 622.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 144 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "2D Game Art" + ] + }, + { + "id": 84, + "type": "ShowText|pysssss", + "pos": [ + 1648.2375798250027, + 482.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 99 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "896" + ] + }, + { + "id": 83, + "type": "CR Integer To String", + "pos": [ + 1648.2375798250027, + 392.36553706933677 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 145, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 99 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 68, + "type": "Note", + "pos": [ + 928.237579825001, + 242.3655370693365 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 120, + "type": "Note", + "pos": [ + 1260, + 240 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Always convert the current_frame widget to input" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 928.237579825001, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 134, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 91, + 141, + 142 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 124, + "type": "CR Text Scheduler", + "pos": [ + 1258.2375798250025, + 622.3655370693361 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 140 + }, + { + "name": "current_frame", + "type": "INT", + "link": 142, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 144 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "T1", + "default text", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 123, + "type": "CR Value Scheduler", + "pos": [ + 1258.2375798250025, + 392.36553706933677 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 139 + }, + { + "name": "current_frame", + "type": "INT", + "link": 141, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 145 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Value Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "V1", + 1024, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 128, + "type": "CR Output Schedule To File", + "pos": [ + 732.5905286035164, + -717.3633016601559 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 159 + } + ], + "properties": { + "Node name for S&R": "CR Output Schedule To File" + }, + "widgets_values": [ + "", + "", + "txt" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 91, + "type": "Reroute", + "pos": [ + 980, + 30 + ], + "size": [ + 107.2, + 26 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 158 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 139, + 140 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 131, + "type": "CR Combine Schedules", + "pos": [ + 280, + -510 + ], + "size": { + "0": 254.40000915527344, + "1": 106 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "schedule_1", + "type": "SCHEDULE", + "link": 163 + }, + { + "name": "schedule_2", + "type": "SCHEDULE", + "link": 161 + }, + { + "name": "schedule_3", + "type": "SCHEDULE", + "link": 162 + }, + { + "name": "schedule_4", + "type": "SCHEDULE", + "link": 164 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 158, + 159 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [ + 157 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Combine Schedules" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 132, + "type": "CR Simple Schedule", + "pos": [ + -207.40947139648455, + -137.36330166015622 + ], + "size": { + "0": 360, + "1": 200 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 162 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Schedule" + }, + "widgets_values": [ + "0, 512\n2, 640\n3, 768\n4, 896\n8, 1024", + "Value", + "V1", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 133, + "type": "CR Combine Schedules", + "pos": [ + 280, + -670 + ], + "size": { + "0": 254.40000915527344, + "1": 106 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "schedule_1", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule_2", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule_3", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule_4", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 163 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Combine Schedules" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 122, + "type": "CR Central Schedule", + "pos": [ + -207.40947139648455, + -717.3633016601559 + ], + "size": { + "0": 360, + "1": 510 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 161 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "0, Art Nouveau\n2, Antarctica\n4, 2D Game Art\n5, Animation\n8, Airbrushing", + "Text", + "T1", + "schedule", + "Upscale", + "", + "schedule", + "Model", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 134, + "type": "Note", + "pos": [ + 660, + -510 + ], + "size": [ + 230, + 160 + ], + "flags": {}, + "order": 8, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Four different methods of combining schedules are shown\n\n1. Combine from Central Schedules\n2. Combine from Simple Schedules\n3. Combine from combination nodes\n4. Combine from File\n\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 135, + "type": "CR Load Schedule From File", + "pos": [ + 240, + -70 + ], + "size": { + "0": 315, + "1": 126 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 164 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Schedule From File" + }, + "widgets_values": [ + "", + "", + "txt" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 113, + "type": "PrimitiveNode", + "pos": [ + 618.2375798249993, + 572.3655370693361 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 134 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + }, + { + "id": 130, + "type": "ShowText|pysssss", + "pos": [ + 670, + -260 + ], + "size": { + "0": 310, + "1": 130 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 157, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "[('T1', '0, Art Nouveau'), ('T1', '2, Antarctica'), ('T1', '4, 2D Game Art'), ('T1', '5, Animation'), ('T1', '8, Airbrushing'), ('V1', '0, 512'), ('V1', '2, 640'), ('V1', '3, 768'), ('V1', '4, 896'), ('V1', '8, 1024'), ['V1', '0, 512'], ['V1', '2, 640'], ['V1', '3, 768'], ['V1', '4, 896'], ['V1', '8, 1024'], ['T1', '0, Art Nouveau'], ['T1', '2, Antarctica'], ['T1', '4, 2D Game Art'], ['T1', '5, Animation'], ['T1', '8, Airbrushing']]" + ] + } + ], + "links": [ + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 99, + 83, + 0, + 84, + 0, + "STRING" + ], + [ + 123, + 98, + 0, + 91, + 0, + "*" + ], + [ + 134, + 113, + 0, + 25, + 0, + "INT" + ], + [ + 139, + 91, + 0, + 123, + 0, + "SCHEDULE" + ], + [ + 140, + 91, + 0, + 124, + 0, + "SCHEDULE" + ], + [ + 141, + 25, + 0, + 123, + 1, + "INT" + ], + [ + 142, + 25, + 0, + 124, + 1, + "INT" + ], + [ + 144, + 124, + 0, + 79, + 0, + "STRING" + ], + [ + 145, + 123, + 0, + 83, + 0, + "INT" + ], + [ + 157, + 131, + 1, + 130, + 0, + "STRING" + ], + [ + 158, + 131, + 0, + 91, + 0, + "*" + ], + [ + 159, + 131, + 0, + 128, + 0, + "SCHEDULE" + ], + [ + 161, + 122, + 0, + 131, + 1, + "SCHEDULE" + ], + [ + 162, + 132, + 0, + 131, + 2, + "SCHEDULE" + ], + [ + 163, + 133, + 0, + 131, + 0, + "SCHEDULE" + ], + [ + 164, + 135, + 0, + 131, + 3, + "SCHEDULE" + ] + ], + "groups": [ + { + "title": "Central Schedule", + "bounding": [ + -253, + -819, + 1389, + 919 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Schedulers", + "bounding": [ + 323, + 136, + 1580, + 765 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C1_SimpleValueScheduler_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C1_SimpleValueScheduler_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..5c722e888873aee17dcc4b3309459297cb5d28b9 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C1_SimpleValueScheduler_Demo_v01b.json @@ -0,0 +1,1258 @@ +{ + "last_node_id": 90, + "last_link_id": 103, + "nodes": [ + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 660 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 1480, + 580 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + }, + { + "name": "denoise", + "type": "FLOAT", + "link": 93, + "widget": { + "name": "denoise", + "config": [ + "FLOAT", + { + "default": 1, + "min": 0, + "max": 1, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32603574575332, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1860, + 710 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 59, + "type": "Note", + "pos": [ + 660, + 20 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 630, + 590 + ], + "size": { + "0": 380, + "1": 100 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54, + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 68, + "type": "Note", + "pos": [ + 660, + -100 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 1580, + 420 + ], + "size": { + "0": 210, + "1": 74 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 72, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 660, + 330 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 660, + 420 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "0" + ] + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 830 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative,\nnsfw" + ] + }, + { + "id": 79, + "type": "ShowText|pysssss", + "pos": [ + 1210, + 400 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 97 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1.0" + ] + }, + { + "id": 72, + "type": "CR Simple Value Scheduler", + "pos": [ + 950, + 310 + ], + "size": { + "0": 220, + "1": 150 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 87, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 93, + 96 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Simple Value Scheduler" + }, + "widgets_values": [ + "0, 0.7\n2, 0.8\n4, 0.9\n6, 0.95\n8, 1.0", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 81, + "type": "CR Float To String", + "pos": [ + 1210, + 280 + ], + "size": { + "0": 210, + "1": 60 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 96, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 97 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 84, + "type": "ShowText|pysssss", + "pos": [ + 1210, + 90 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 99 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1024" + ] + }, + { + "id": 83, + "type": "CR Integer To String", + "pos": [ + 1210, + 10 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 98, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 99 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 660, + 220 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 102, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 87, + 91, + 100 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 82, + "type": "CR Simple Value Scheduler", + "pos": [ + 950, + 30 + ], + "size": { + "0": 220, + "1": 150 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 100, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 98, + 101 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Simple Value Scheduler" + }, + "widgets_values": [ + "0, 512\n2, 640\n4, 768\n6, 896\n8, 1024", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 1480, + 110 + ], + "size": { + "0": 320, + "1": 240 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 101, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 2048 + } + ] + } + } + ], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 72 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "custom", + "Off", + 1, + 1 + ] + }, + { + "id": 86, + "type": "Note", + "pos": [ + 950, + -100 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Simple value schedulers must have a line for frame 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 87, + "type": "Note", + "pos": [ + 1540, + -110 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 5, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The first value scheduler is increasing the width in CR SD1.5 Aspect Ratio\n\nThe second value scheduler is increasing the denoise in the KSampler" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 88, + "type": "PrimitiveNode", + "pos": [ + 360, + 220 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 102 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 1, + "increment" + ] + }, + { + "id": 89, + "type": "Note", + "pos": [ + 100, + 220 + ], + "size": { + "0": 210, + "1": 170 + }, + "flags": {}, + "order": 7, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n\nIn normal workflows the Primitive node can replace the Animation Builder" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1910, + 580 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 103 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 90, + "type": "PreviewImage", + "pos": [ + 2250, + 580 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 103 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 53, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 54, + 47, + 1, + 11, + 0, + "CLIP" + ], + [ + 55, + 47, + 1, + 12, + 0, + "CLIP" + ], + [ + 72, + 14, + 3, + 15, + 2, + "INT" + ], + [ + 87, + 25, + 0, + 72, + 0, + "INT" + ], + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 93, + 72, + 1, + 13, + 4, + "FLOAT" + ], + [ + 96, + 72, + 1, + 81, + 0, + "FLOAT" + ], + [ + 97, + 81, + 0, + 79, + 0, + "STRING" + ], + [ + 98, + 82, + 0, + 83, + 0, + "INT" + ], + [ + 99, + 83, + 0, + 84, + 0, + "STRING" + ], + [ + 100, + 25, + 0, + 82, + 0, + "INT" + ], + [ + 101, + 82, + 0, + 14, + 0, + "INT" + ], + [ + 102, + 88, + 0, + 25, + 0, + "INT" + ], + [ + 103, + 16, + 0, + 90, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C2_ValueScheduler_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C2_ValueScheduler_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..2ccfa8a732d5d3bb83cd41d13463562fec9a11c7 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C2_ValueScheduler_Demo_v01a.json @@ -0,0 +1,1355 @@ +{ + "last_node_id": 123, + "last_link_id": 141, + "nodes": [ + { + "id": 17, + "type": "VAELoader", + "pos": [ + 2540, + 510 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 2590, + 380 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 133 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 60, + "type": "Note", + "pos": [ + -80, + 320 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "To run this workflow, first press Reset in the Animation Builder and then press the Queue button, Do not use queue prompt in the ComfyUI menu." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 59, + "type": "Note", + "pos": [ + 510, + 30 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 2260, + 220 + ], + "size": { + "0": 210, + "1": 74 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 72, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 510, + 340 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 510, + 430 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "0" + ] + }, + { + "id": 68, + "type": "Note", + "pos": [ + 510, + -90 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 2160, + 380 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32603574575332, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 2160, + -90 + ], + "size": { + "0": 320, + "1": 240 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 140, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 2048 + } + ] + } + } + ], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 72 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "custom", + "Off", + 1, + 1 + ] + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 2900, + 210 + ], + "size": { + "0": 550, + "1": 730 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 133, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 56, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 1880, + 520 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 1880, + 680 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative,\nnsfw" + ] + }, + { + "id": 83, + "type": "CR Integer To String", + "pos": [ + 1870, + 20 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 134, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 99 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 87, + "type": "Note", + "pos": [ + 2160, + -250 + ], + "size": { + "0": 210, + "1": 90 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The Value Scheduler is increasing the width in CR SD1.5 Aspect Ratio\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 1420, + 380 + ], + "size": { + "0": 380, + "1": 100 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54, + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 91, + "type": "Reroute", + "pos": [ + 990, + -100 + ], + "size": [ + 107.2, + 26 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 135 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 138 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 119, + "type": "CR Central Schedule", + "pos": [ + 10, + -450 + ], + "size": { + "0": 390, + "1": 520 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 135 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "0, 512\n2, 640\n3, 768\n4, 896\n8, 1024", + "Value", + "V1", + "0, Art Nouveau\n2, Antarctica\n4, 2D Game Art\n5, Animation\n8, Airbrushing", + "Text", + "T1", + "schedule", + "Model", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 84, + "type": "ShowText|pysssss", + "pos": [ + 1870, + 110 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 99 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "512" + ] + }, + { + "id": 118, + "type": "CR Value Scheduler", + "pos": [ + 1460, + 0 + ], + "size": { + "0": 320, + "1": 170 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 139 + }, + { + "name": "current_frame", + "type": "INT", + "link": 141, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 134, + 140 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Value Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "V1", + 512, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 86, + "type": "Note", + "pos": [ + 850, + 410 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 7, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Schedules should always have a line for frame 0\n\nIf frame 0 is missing the default value will be used" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 114, + "type": "Note", + "pos": [ + 1180, + -220 + ], + "size": { + "0": 210, + "1": 140 + }, + "flags": {}, + "order": 8, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "You can define either local or central schedules\n\nThis workflow shows both. You can switch between the two.\n\nThis switch would not normally be needed." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 120, + "type": "CR Simple Schedule", + "pos": [ + 840, + 150 + ], + "size": { + "0": 290, + "1": 200 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 137 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Schedule" + }, + "widgets_values": [ + "0, 512\n2, 640\n3, 768\n4, 896\n8, 1024", + "Value", + "V1", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 510, + 230 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 23, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 56, + 91, + 141 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 24, + "type": "Animation Builder (mtb)", + "pos": [ + 180, + 230 + ], + "size": { + "0": 210, + "1": 320 + }, + "flags": {}, + "order": 10, + "mode": 0, + "outputs": [ + { + "name": "frame", + "type": "INT", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "0-1 (scaled)", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "loop_ended", + "type": "BOOLEAN", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Animation Builder (mtb)" + }, + "widgets_values": [ + 10, + 1, + 1, + 11, + 1, + "frame: 1 / 9", + "Done 😎!", + "reset", + "queue" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 121, + "type": "CR Schedule Input Switch", + "pos": [ + 1170, + 0 + ], + "size": { + "0": 240, + "1": 80 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "schedule1", + "type": "SCHEDULE", + "link": 138 + }, + { + "name": "schedule2", + "type": "SCHEDULE", + "link": 137 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 139 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Schedule Input Switch" + }, + "widgets_values": [ + 1 + ] + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 23, + 24, + 0, + 25, + 0, + "INT" + ], + [ + 53, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 54, + 47, + 1, + 11, + 0, + "CLIP" + ], + [ + 55, + 47, + 1, + 12, + 0, + "CLIP" + ], + [ + 56, + 25, + 0, + 26, + 1, + "INT" + ], + [ + 72, + 14, + 3, + 15, + 2, + "INT" + ], + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 99, + 83, + 0, + 84, + 0, + "STRING" + ], + [ + 123, + 98, + 0, + 91, + 0, + "*" + ], + [ + 133, + 16, + 0, + 26, + 0, + "IMAGE" + ], + [ + 134, + 118, + 0, + 83, + 0, + "INT" + ], + [ + 135, + 119, + 0, + 91, + 0, + "*" + ], + [ + 137, + 120, + 0, + 121, + 1, + "SCHEDULE" + ], + [ + 138, + 91, + 0, + 121, + 0, + "SCHEDULE" + ], + [ + 139, + 121, + 0, + 118, + 0, + "SCHEDULE" + ], + [ + 140, + 118, + 0, + 14, + 0, + "INT" + ], + [ + 141, + 25, + 0, + 118, + 1, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C3_SimpleTextScheduler_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C3_SimpleTextScheduler_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..1ba4bee895b265e38dd6facf057d4645840ba547 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C3_SimpleTextScheduler_Demo_v01a.json @@ -0,0 +1,829 @@ +{ + "last_node_id": 91, + "last_link_id": 107, + "nodes": [ + { + "id": 13, + "type": "KSampler", + "pos": [ + 1480, + 580 + ], + "size": [ + 320, + 470 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32603574575332, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1860, + 710 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 59, + "type": "Note", + "pos": [ + 660, + 20 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 630, + 590 + ], + "size": { + "0": 380, + "1": 100 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54, + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 68, + "type": "Note", + "pos": [ + 660, + -100 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 660, + 330 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 660, + 420 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "9" + ] + }, + { + "id": 89, + "type": "Note", + "pos": [ + 100, + 220 + ], + "size": { + "0": 210, + "1": 170 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The primitive node increments the current_frame on each batch\n\nReset the value to 0 before each run\n\nIn normal workflows the Primitive node can replace the Animation Builder" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1910, + 580 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 103 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 90, + "type": "PreviewImage", + "pos": [ + 2250, + 580 + ], + "size": { + "0": 210, + "1": 250 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 103 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 660, + 220 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 102, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 91, + 105 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 9, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 91, + "type": "CR Simple Text Scheduler", + "pos": [ + 940, + 220 + ], + "size": { + "0": 230, + "1": 150 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 105, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 106, + 107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Text Scheduler" + }, + "widgets_values": [ + "0, 1girl, long grey hair\n2, 1girl, long blue hair\n4, 1girl, long red hair,\n6, 1girl, long black hair\n8, 1girl, long pink hair", + 0 + ] + }, + { + "id": 84, + "type": "ShowText|pysssss", + "pos": [ + 1210, + 220 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 106 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "1girl, long pink hair" + ] + }, + { + "id": 86, + "type": "Note", + "pos": [ + 940, + 80 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 5, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Simple value schedulers must have a line for frame 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 660 + ], + "size": { + "0": 210, + "1": 60 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + }, + { + "name": "text", + "type": "STRING", + "link": 107, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 770 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative,\nnsfw" + ] + }, + { + "id": 88, + "type": "PrimitiveNode", + "pos": [ + 360, + 220 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 102 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 10, + "increment" + ] + }, + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 1120, + 940 + ], + "size": [ + 210, + 110 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 53, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 54, + 47, + 1, + 11, + 0, + "CLIP" + ], + [ + 55, + 47, + 1, + 12, + 0, + "CLIP" + ], + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 102, + 88, + 0, + 25, + 0, + "INT" + ], + [ + 103, + 16, + 0, + 90, + 0, + "IMAGE" + ], + [ + 105, + 25, + 0, + 91, + 0, + "INT" + ], + [ + 106, + 91, + 0, + 84, + 0, + "STRING" + ], + [ + 107, + 91, + 0, + 11, + 1, + "STRING" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C4_TextScheduler_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C4_TextScheduler_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..f8a8c5e0d73a0b7258f83ec8fe560ce7580b8432 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C4_TextScheduler_Demo_v01b.json @@ -0,0 +1,2921 @@ +{ + "last_node_id": 135, + "last_link_id": 159, + "nodes": [ + { + "id": 17, + "type": "VAELoader", + "pos": [ + 3010, + 500 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 3060, + 370 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 133 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 59, + "type": "Note", + "pos": [ + 810, + 40 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 2730, + 210 + ], + "size": { + "0": 210, + "1": 74 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 72, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 78, + "type": "CR Integer To String", + "pos": [ + 810, + 350 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "int_", + "type": "INT", + "link": 91, + "widget": { + "name": "int_", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Integer To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 77, + "type": "ShowText|pysssss", + "pos": [ + 810, + 440 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 90 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "5" + ] + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 2630, + 370 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32603574575332, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 3370, + 200 + ], + "size": { + "0": 550, + "1": 730 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 133, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 56, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 2630, + -100 + ], + "size": { + "0": 320, + "1": 240 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 72 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "2:3 portrait 512x768", + "Off", + 1, + 1 + ] + }, + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 2350, + 510 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + }, + { + "name": "text", + "type": "STRING", + "link": 155, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 130, + "type": "MileHighStyler", + "pos": [ + 1590, + 760 + ], + "size": { + "0": 400, + "1": 210 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "style", + "type": "no style,2D Game Art,3D Animation,3D Game Art,3D Modeling,3D Printing Art,3D Printing in Art,AR Art Variant_Uncategorized,Aardman_Uncategorized,Abandoned Asylum_Uncategorized,Aboriginal Dot Painting,Abstract Expressionism,Abstract Painting,Abstract Photography,Abstract Sculpture,Absurdist Theater,Academic Art,Acrylic Painting,Action Films,Addams Family_Portraiture_Horror,Adrian Ghenie,Adventure,Adventure Films,Aerial Dance,Aerial Photography,African Beadwork,African Beadwork Art,African Cuisine,African Mask Art,African Mask Making,Agnes Martin,Ai Weiwei_1,Ai Weiwei_2,Air Art,Airbrushing,Albrecht Durer,Album Cover Art,Alchemist's Study_Uncategorized,Amazon Rainforest,American Cuisine,American Traditional_Retro_Tattoo Art,Amsterdam,Amsterdam cityscape,Analytical Cubism,Ancient Maya_Uncategorized,Andy Warhol,Anger Art,Animated Corpse_Animation,Animated Films,Animation,Anish Kapoor,Ankama_Animation,Anselm Kiefer,Antarctica,Appropriation (1)_Culture,Après-Ski_Uncategorized,Arachnid Swarm_Uncategorized,Architectural Design,Architectural Photography,Argentinian Art,Art Activism,Art Collaborations with Musicians,Art Collaborations with Writers,Art Conservation,Art Criticism,Art Curation,Art Deco,Art Deco Architecture,Art Deco Architecture_Architecture,Art Deco Design,Art Education,Art Education for Adults,Art Education for Children,Art Education for Remote Areas,Art Education for Special Needs,Art Gallery Management,Art Games,Art Historical Writing,Art History,Art History Research,Art Informatics,Art Informel (1)_Uncategorized,Art Inspired by Ancient Civilizations,Art Inspired by the Digital Age,Art Inspired by the Renaissance,Art Inspired by the Roaring Twenties,Art Inspired by the Victorian Era,Art Installations,Art Journalism,Art Marketing,Art Nouveau,Art Nouveau Architecture,Art Nouveau Design,Art Nouveau Poster_Uncategorized,Art Nouveau Variant_Uncategorized,Art Restoration,Art Sales and Auctions,Art Therapy,Art Therapy for Adults,Art Therapy for Children,Art Workshop Facilitation,Art and AI Collaboration,Art and Architecture Collaboration,Art and Cultural Heritage Preservation,Art and Environmental Sustainability,Art and Literature Collaboration,Art and Medical Collaboration,Art and Mental Health,Art and Music Collaboration,Art and Science Collaboration,Art and Social Justice Projects,Art and Technology Collaboration,Art and Urban Development,Art for Agricultural Industry,Art for Agricultural Sector,Art for Airports,Art for Animal Welfare Organizations,Art for Anniversaries,Art for Aquariums,Art for Architectural Visualization,Art for Asian Cultures,Art for Augmented Reality Experiences,Art for Automotive Design,Art for Automotive Industry,Art for Aviation Industry,Art for Baby Showers,Art for Birthdays,Art for Botanical Gardens,Art for Cafes and Restaurants,Art for Charity Fundraisers,Art for Children,Art for Children's Hospitals,Art for Climate Change Initiatives,Art for Construction Industry,Art for Corporate Spaces,Art for Cruise Ships,Art for Culinary Presentation,Art for E-Commerce Platforms,Art for Educational Institutions,Art for Educational Technology,Art for Elderly,Art for Emergency Services,Art for Energy Industry,Art for Entertainment Industry,Art for Environmental Activism,Art for Environmental Campaigns,Art for Factories and Workshops,Art for Fashion Industry,Art for Festivals and Events,Art for Financial Institutions,Art for Financial Sector,Art for Fitness Centers,Art for Funerals,Art for Gender Equality,Art for Government Entities,Art for Graduations,Art for Health Care Facilities,Art for Home Decor,Art for Hospitality Industry,Art for Hotels,Art for Human Anatomy Studies,Art for Human Rights Campaigns,Art for Indigenous Cultures,Art for LGBTQ+ Celebrations,Art for Libraries,Art for Marine Industry,Art for Maritime Industry,Art for Medical Illustrations,Art for Military and Defense Sector,Art for Military and Veterans,Art for Mobile Apps,Art for Museums,Art for Music Videos,Art for National Holidays,Art for Nautical Navigation,Art for Non-Profit Organizations,Art for Office Spaces,Art for Outdoor Advertising,Art for Packaging Design,Art for Pet Products,Art for Pharmaceutical Industry,Art for Political Campaigns,Art for Prisons,Art for Public Transportation,Art for Real Estate Marketing,Art for Religious Celebrations,Art for Religious Institutions,Art for Renewable Energy Sector,Art for Retail Spaces,Art for Retirement Parties,Art for Robotics,Art for Schools and Colleges,Art for Science Centers,Art for Scientific Exploration,Art for Security and Defense,Art for Seniors,Art for Shopping Malls,Art for Smart City Projects,Art for Social Media Platforms,Art for Social Networking Sites,Art for Spa and Wellness Centers,Art for Space Exploration,Art for Space Industry,Art for Spaceships and Aerospace,Art for Sports Industry,Art for Sports Venues,Art for Technical Manuals,Art for Teenagers,Art for Teens,Art for Television Shows,Art for Theme Parks,Art for Toddlers,Art for Train Stations,Art for Underwater Exploration,Art for Video Game Development,Art for Virtual Assistants and AI,Art for Virtual Events,Art for Virtual Reality Experiences,Art for Wearable Technology,Art for Wearables,Art for Web Platforms,Art for Weddings,Art for Zoos,Art in Public Transportation,Art with Light and Projection,Art with Metalwork,Art with Organic Materials,Art with Recycled Materials,Artist's Books,Artware Variant_Sci-Fi_Graffiti_Digital Media,Aspen,Assemblage Art,Astrophotography,Athens,Athleisure Fashion,Atlantis,Augmented Reality (AR) Art,Augmented Reality Art,Australian Aboriginal Art,Autobiography,Automotive Design,Autumn Art,Avant-Garde Fashion,Aztec Calendar_Uncategorized,Back Alley Rogue_Uncategorized,Ballet Dance,Ballet_Uncategorized,Ballroom Dance,Bangkok,Banksy_1,Banksy_2,Barbara Kruger,Barcelona,Baroque,Baroque Architecture,Baroque Art,Baroque Music,Bas-Relief Sculpture_Sculpture,Basket Weaving,Basket Weaving Art,Battle_Uncategorized,Bauhaus,Bauhaus (1)_Architecture,Bauhaus Architecture,Bauhaus Design,Bauhaus Design_Uncategorized,Beachwear Fashion,Beijing,Belly Dance,Berlin,Bharatanatyam Dance,Bikini Bottom,Bio Art,Bio Art_Nature,Biographical Films,Biographical Literature,Biography,Biomorphic Architecture,Black Hole,Black Velvet Painting_Portraiture,Black and White Photography,Blacklight Poster_Uncategorized,Blockbuster Films,Bloodthirsty Vampire_Horror,Bluegrass Music,Blueprint_Uncategorized,Blues Music,Blues Music Illustration,Body Art,Body Art Performance,Body Painting,Bohemian Fashion,Bomber Jacket_Retro,Bookbinding,Botanical Illustration,Boudoir Photography_Photography,Brazil,Brazilian Art,Brazilian Cuisine,Brazilian Graffiti Art,Breakdance,Bridal Fashion,Brightwater Variant_Nature,British Art,Bronze Sculpture,Bruce Nauman,Bruges,Brutalism,Brutalist Architecture,Budapest cityscape,Cabinet of Curiosities_Occult,Cai Guo-Qiang,Cake Decorating,Canada,Candid Portrait Photography,Caravaggio,Caribbean Carnival Art,Caribbean Cuisine,Caricature,Carnival Freakshow_Retro,Caspar David Friedrich,Cassette Bedroom_Retro,Cassette Collage_Sci-Fi_Surrealism_Retro,Cassette Futurism_Retro,Cassette Graphics_Retro_Surrealism,Cassette J-Card_Retro,Cassette Wall_Retro,Casual Fashion,Caveopolis Variant_Lifestyle,Cecily Brown,Celtic Knotwork Art,Celtic Mythology Art,Cemetery Statue_Uncategorized,Central African Art,Central American_Uncategorized,Ceramic Art,Ceramic Design,Ceramic Sculpture,Ceramics,Chalk Art,Charcoal Drawing,Charles Ray,Chicago,Children's Fashion,Children's Theater,Chilean Art,Chinese Architecture,Chinese Art,Chinese Calligraphy,Chinese Cuisine,Chinese Ink Painting,Chinese Jade Carving,Chinese Landscape Painting,Chinese Mythology Art,Chinese Paper Cutting,Chinese Scroll Painting,Chris Ofili,Cindy Sherman_1,Cindy Sherman_2,Cinematography,Cinque Terre,Circuit Bending_Uncategorized,Circus Arts,Circus Performer_Retro,Classic Western,Classical Architecture,Classical Art,Classical Music,Classical Music Illustration,Classical Realism,Classical Realism_Portraiture,Classical Theater,Claude Monet,Clockwork City Variant_Architecture_Location,Collaborative Art Projects,Collage,Collage Art,Colombian Art,Colonial Architecture,Colosseum,Combine Painting_Sci-Fi_Still Life,Comedy,Comedy Literature,Commercial Photography,Community Mural Projects,Computer art,Concept Art for Movies,Concept Art for Video Games,Conceptual Art,Concert Poster Design,Conjoined Twins_Uncategorized,Constructivism,Constructivism Art,Contemporary Ballet,Contemporary Dance,Copenhagen,Copenhagen cityscape,Corporate Identity Design,Cosplay Design,Cottagecore Fashion_Fashion,Country Music,Country Music Graphics,Crawler Mimicry_Uncategorized,Creepy Children_Portraiture,Creepy Porcelain Doll_Fashion_Portraiture,Crime Films,Critical Realism_Uncategorized,Cross-Disciplinary Art,Crucifixion_Uncategorized,Crystal Caverns Variant_Architecture,Cuban Art,Cuban Cuisine,Cubism,Cubism Art,Cult Films,Cyberpunk,Cyberpunk Fantasy Art,Dadaism,Dadaism Art,Damien Hirst_1,Damien Hirst_2,Dan Flavin,Dance Choreography,Dance Performance Art,Dark Carnival_Gothic,Dark Fantasy Art,Data Art Variant_Uncategorized,Data Art_Uncategorized,Data Visualization Art,Day of the Dead_Uncategorized,De Stijl_Uncategorized,Death Masque_Uncategorized,Deconstructivist Architecture,Demonic Clown_Uncategorized,Demonic Portal_Horror,Demonic Possession_Uncategorized,Demoscene_Animation,Desaturated_Uncategorized,Die Brücke_Graffiti,Diego Velazquez,Dieselpunk_Retro,Digital Animation,Digital Art,Digital Art_Digital Media,Digital Drawing Tablets,Digital Illustration,Digital Painting,Digital Sculpture,Digital Storytelling,Diorama_Uncategorized,Disco Music,Disney Animation_Animation,Disrespectful Grave Robber_Uncategorized,Documentary Films,Documentary Photography,Drama,Drama Films,Dubai,Dublin,Dublin cityscape,Dunder Mifflin,Dutch Art,Dwarvendom Variant_Uncategorized,Dwarvenholm Variant_Uncategorized,Earth Art,East African Art,Eco Art,Eco-Art,Ed Ruscha,Edgar Degas,Edinburgh cityscape,Editorial Design,Edvard Munch,Edward Hopper,Egyptian Hieroglyphs_Uncategorized,Egyptian Mythology Art,Egyptian Wall Art,Egyptology_Uncategorized,El Anatsui,Electronic Music,Electronic Music Visuals,Elegant_Erotic_Photography,Elfheim Variant_Architecture_Fantasy,Elven City Variant_Architecture_Location,Embroidery,Emerging_Artist,Engraving,Environmental Art,Environmental Design,Ephemeral Art,Etching,Eugene Delacroix,Exhibition Design,Exoplanet,Exorcism_Uncategorized,Experimental Art,Experimental Films,Experimental Music Video,Experimental Photography,Experimental Theater,Expressionism,Expressionist Architecture,Expressionist painting,Fairy Tale Art,Fantasy,Fantasy Films,Fantasy Literature,Farce,Fashion Design,Fashion Illustration,Fashion Photography,Fast Fashion,Fauvism,Fauvism Art,Ferocious Werewolf_Uncategorized,Festival Fashion,Fiction,Figurative Expressionism_Uncategorized,Figurine Shelf_Fantasy_Sculpture,Filipino Art,Film Direction,Film Editing,Film Noir,Fine Art Photography,Fine_Art_Black_and_White_Photography,Fire Art,Flamenco Dance,Folk Art Variant_Folk Art,Folk Art_Folk Art,Folk Dance,Folk Music,Folk Music Art,Food Art,Food Photography,Formal Fashion,Fortune Teller_Occult,Fortune Telling_Occult,France,Francisco Goya,Frankfurt cityscape,French Art,French Cuisine,French Impressionism,Fresco Painting Technique,Frida Kahlo,Funk Music,Furniture Design,Futurism,Futurist Architecture,GAYZ_Portraiture,Gabriel Orozco,Galactic_Sci-Fi,Game Design,Generative Art,Genetic Art_Uncategorized,Geometric Abstraction,Geometric abstract painting,Georg Baselitz,Georgia O'Keeffe,Gerhard Richter_1,Gerhard Richter_2,German Art,Ghibli_Surrealism,Ghoul City Variant_Architecture_Location,Giant Robot_Sci-Fi_Retro_Architecture,Glamorous Portrait_Fashion_Portraiture,Glamorous_Erotic_Photography,Glasgow cityscape,Glass Sculpture,Glassblowing,Glazing Technique in Painting,Glenn Ligon,Glitch Art_Uncategorized,Glitchcore_Digital Media,Gongfu Tea_Uncategorized,Gospel Music,Goth Boudoir_Gothic,Gotham City,Gothic Architecture,Gothic Architecture_Architecture_Gothic,Gothic Fashion,Gothic Literature,Gothic Monster_Architecture_Gothic,Gothic Revival Architecture,Gothic Revival Architecture_Architecture_Gothic,Graffiti Art,Graffiti Style_Graffiti,Grand Canyon,Grant Wood,Graphic Design,Graveyard Mist_Horror,Great Barrier Reef,Great Wall of China,Greek Art,Greek Classical Sculpture,Greek Mythology Art,Greek Pottery Art,Greendale,Gritty_Voyeuristic_Photography,Grotesque Gargoyle_Uncategorized,Grunge Flyer_Uncategorized,Gustav Klimt,Gutai_Sci-Fi_Event,H.P. Lovecraft Cover_Horror,Hackersville Variant_Architecture,Hallstatt,Hard-edge Painting_Uncategorized,Hate Crime_Uncategorized,Haunted Carnival_Horror,Haunted Portrait_Portraiture_Horror,Haute Couture,Haute Couture Fashion,Haute Cuisine,Hawkins,Headless Horseman_Uncategorized,Heavy Metal,Henri Matisse,Hieronymus Bosch,High Fantasy,High Fantasy Art,Hip-Hop Album Art,Hip-Hop Dance,Hip-Hop Fashion,Hip-Hop Music,Historical Fiction,Hogwarts,Hong Kong,Hong Kong cityscape,Horror,Horror Films,Horror Movie Poster_Horror_Gothic,Hyperrealism_Uncategorized,Ice Sculpture,Illustration Design,Illustration for Children's Books,Impressionism,Impressionism Art,Impressionist Landscape Painting,Impressionist Portrait Painting,Improvisational Theater,Inca Mythology Art,Indian Art,Indian Cuisine,Indian Miniature Painting,Indian Mythology Art,Indie Films,Indie Music Art,Indigenous Australian Art,Indigenous Painting,Indigenous Pottery,Indonesian Art,Industrial Architecture,Industrial Design,Information Art_Uncategorized,Ink Drawing,Insectoid Mutant_Portraiture,Installation Art,Interaction Design,Interactive Art,Interactive Art Installations,Interactive artwork,Interior Design,Internet Art_Sci-Fi_Digital Media,Intimate_Naturist_Photography,Intuitive Art_Uncategorized,Irish Art,Irish Dance,Islamic Architecture,Islamic Art,Islamic Calligraphy,Islamic Geometric Art,Islamic Geometric Patterns,Island Luau_Uncategorized_Location,Istanbul,Istanbul cityscape,Italian Art,Italian Cuisine,Italian Renaissance Art,J.M.W. Turner,Jackson Pollock,Jakarta cityscape,Japan,Japanese Architecture,Japanese Art,Japanese Cuisine,Japanese Mythology Art,Jazz Dance,Jazz Music,Jazz Poster Art,Jean-Honore Fragonard,Jeff Koons,Jenny Holzer,Jerusalem,Jewelry Design,Johannes Vermeer,John Baldessari,Joyful Art,Julie Mehretu,Kabuki Theater,Kara Walker,Kathak Dance,Katsushika Hokusai,Kawaii Character_Uncategorized,Kawaii Fashion_Fashion,Kehinde Wiley,Kerry James Marshall,Kiki Smith,Kinetic Art,Kinetic Sculpture,Kintsugi (Japanese Gold Repair),Kitsch Movement_Uncategorized,Knitting,Korean Art,Korean Celadon Ceramics,Korean Celadon Pottery,Korean Cuisine,Kuala Lumpur,Kyoto,Kyoto cityscape,LAIKA_Animation,Land Art,Land Art (1)_Fantasy_Nature_Sculpture_Landscape,Landscape Architecture,Landscape Design,Landscape Photography,Laser Grid_Uncategorized,Later European abstraction (1)_Uncategorized,Leonardo da Vinci,Lettrist artwork,Leviathan Variant_Architecture,Light Art,Line Dance,Lisbon cityscape,Lithography,Living Burial_Uncategorized,London,Los Angeles,Lost Vegas Variant_Architecture,Lounge Singer_Retro,Lovecraftian Horror_Horror,Low Fantasy,Lowbrow Art Variant_Surrealism_Culture,Lowbrow Art_Culture,Luau Fire Dancer_Fashion,Luchador_Uncategorized,Luxury Fashion,Lynching_Uncategorized,Lyrical abstract painting,Macabre Memento Mori_Horror_Horror & Dark_Still Life,Machinima Variant_Uncategorized,Machu Picchu,Macro Photography,Mad Scientist Machinery_Uncategorized,Madhubani Painting,Madhubani Painting (Indian Folk Art),Mage City Variant_Architecture_Fantasy_Location,Magic Realist painting,Mall Goth_Portraiture_Gothic,Mannerism,Mannerist Architecture,Maori Wood Carving,Mardi Gras_Uncategorized,Marina Abramović,Mark Bradford,Mark Grotjahn,Martin Puryear,Masked Killer_Uncategorized,Masked Stalker_Uncategorized,Maurizio Cattelan,Maximalism,Mecca,Mech City Variant_Sci-Fi_Architecture_Location,Mech City_Sci-Fi_Architecture_Location,Mech City__Location,Media Art,Medical Oddities_Uncategorized,Mediterranean Cuisine,Melancholy Art,Melodrama,Melting Skull_Uncategorized,Memento Mori_Horror_Horror & Dark,Memoir,Menacing Scarecrow_Uncategorized,Menswear Fashion,Mesoamerican Mythology Art,Mesopotamian Mythology Art,Metabolist Architecture,Metal Music,Metal Music Artwork,Metalwork,Metropolis,Mexican Art,Mexican Cuisine,Mexican Muralism,Mexican Skull Art_Uncategorized,Miami,Michelangelo,Middle Eastern Cuisine,Middle-earth,Midgard Variant_Architecture,Milky Way Galaxy,Mime,Mime City Variant_Architecture_Location,Minimalism,Minimalist Web Design,Mixed Media Art,Mixer_Animation,Modern Architecture,Modern Dance,Modernist Architecture,Mona Hatoum,Monoprinting Technique,Mosaic,Mosaic Art,Motion Design,Motion Graphics Design,Mount Everest,Mount Olympus,Movie Storyboard_Uncategorized,Mughal Miniature Painting,Mumbai,Mummy Portrait_Portraiture,Munich cityscape,Music Video Direction,Musica Variant_Architecture_Culture,Musical Films,Musical Theater,Mutated Beast_Uncategorized,My Little Pony_Uncategorized,Mystery,Mystery Literature,Mythic Fantasy Art,Nantucket,Native American Art,Native American Basketry,Native American Mythology Art,Native American Pottery,Naturalism in Literature,Nature Landscape Photography,Nature Photography,Nautical_Retro,Naïve Art (1)_Uncategorized,Nebula,Neo Pop_Pop Culture_Culture,Neo Rauch,Neo-Dada_Uncategorized,Neo-Expressionism_Uncategorized,Neo-Gothic Architecture,Neo-Noir,Neo-Pop (1)_Pop Culture_Culture,Neo-primitivism (1)_Still Life,Neoclassical Architecture,Neoclassicism,Neon Lighting_Uncategorized,Neon Racer_Sci-Fi,Neon Tokyo_Retro,Neoplasticism,Neotokyo Variant_Sci-Fi_Architecture,Neue Sachlichkeit Variant_Portraiture,Neue Wilde (1)_Uncategorized,New Caelum Variant_Architecture,New Caelum_Architecture,New Media Art_Digital Media,New Orleans,New Perpendicular art_Uncategorized,New Simplicity_Architecture,New York City,New York cityscape,Niagara Falls,Nicole Eisenman,Night Photography,Nightmare Beast_Uncategorized,Non-Fiction,Nordic Viking Art,Norse Mythology Art,North African Art,Norwegian romantic nationalism_Nature_Landscape,Nouveau Circus_Uncategorized,Nova Alexandria Variant_Architecture_Culture,Occult Ritual_Occult,Occult Sacrifice_Occult,Oil Painting,Olafur Eliasson,Ominous Fog_Uncategorized,Ominous Warning_Uncategorized,Op Art,Op Art_Uncategorized,Opera,Opera Music,Opera Music Illustration,Osaka cityscape,Outsider Art_Uncategorized,Pablo Picasso,Package Design,Pandora,Paper Cutting,Paper Mache Art,Parametric Architecture,Paris,Participatory Art,Patchwork Creature_Uncategorized,Paul Cezanne,Performance Art,Performance Sculpture,Peruvian Art,Petra,Photography,Photojournalism,Photorealism,Photorealistic painting,Physical Theater,Pinup_Retro,Pixel Art,Pizza Making,Plague Mass Grave_Uncategorized,Plein Air Painting,Plotter Art Variant_Uncategorized,Plotter Art_Uncategorized,Plus-Size Fashion,Poetry,Pointillism,Pointillism Art,Pole Dance,Polynesian Mythology Art,Polynesian Tattoo Art,Pop Art,Pop Music,Pop Music Branding,Pop Surrealism_Nature_Surrealism_Landscape_Still Life,Pop art style,Porcelain Art,Portrait Photography,Portuguese Art,Post-Impressionism,Postmodern Architecture,Pottery,Prague,Prague cityscape,Prairie Dress_Retro_Fashion,Pre-Raphaelite_Uncategorized,Preppy Fashion,Printmaking,Prismatic_Uncategorized,Projection Mapping Art,Propaganda Art_Retro,Propaganda Poster_Uncategorized,Prose Literature,Provocative_Surreal_Photography,Pseudorealism_Uncategorized,Psychedelic Concert Posters,Psychedelic Pop Art_Surrealism,Public Art Installations,Public Installations,Public Sculptures,Punk Fashion,Punk Music,Punk Poster_Uncategorized,Puppetry,Pyramids of Giza,Quahog,Quilting,Quilting Art,Quito cityscape,R&B Music,Rachel Whiteread,Radical Realism (1)_Still Life,Rangoli (Indian Floor Art),Rap Music Graphics,Raphael,Rashid Johnson,Rat Infestation_Uncategorized,Rat King_Uncategorized,Realism Art,Realism in Literature,Realistic Fiction,Reanimated Corpse_Animation,Recycled Art,Reggae Music,Reggae Music Design,Rembrandt,Remodernism Variant_Uncategorized,Remodernism_Architecture,Renaissance,Renaissance Architecture,Renaissance Art,Rene Magritte,Responsive Web Design,Richard Serra,Richard Tuttle,Rio de Janeiro,Rio de Janeiro cityscape,Robert Gober,Robotics Art,Rock Album Art,Rock Music,Rococo,Rococo Architecture,Rococo Art,Rococo Interior_Uncategorized,Roman Mosaic Art,Roman Mythology Art,Romance,Romance Literature,Romanesque Architecture,Romantic Comedy,Romantic Films,Romanticism,Romanticism Art,Romanticism in Literature,Rome,Rural Photography,Russia,Russian Art,Russian Icon Painting,Sahara Desert,Salem,Salsa Dance,Salsa Music,Salvador Dali,Samurai_Uncategorized,Sanctuary Variant_Uncategorized,Sand Sculpture,Sandro Botticelli,Sarah Sze,Satanic_Horror_Occult,Satire,Satire Literature,Scandinavian Architecture,Scandinavian Art,Scandinavian Design,Scarecrow_Horror,Scary Pumpkin_Uncategorized,Scary Stories at Campfire_Horror_Horror & Dark,Scary Stories_Horror,Sci-Fi Films,Science Fiction,Scientific Illustration_Retro,Screen Printing,Screwball Comedy,Sculpture,Self-taught Art (1)_Fantasy,Seoul,Serial Killer_Horror,Set Design for Theater,Shadow City Variant_Architecture_Occult_Gothic_Location,Shadow City_Architecture_Occult_Gothic_Location,Shadow City_Horror_Occult_Horror & Dark_Gothic_Location,Shanghai,Shangri-La Variant_Uncategorized,Shepard Fairey,Shirakawa-go,Shirin Neshat,Sideshow Poster_Retro,Silent Films,Singapore,Sinister Crone_Uncategorized,Sinister Laboratory_Horror_Occult_Still Life,Sinister Ritual_Uncategorized,Situationist International Variant_Uncategorized,Situationist International_Uncategorized,Skateboarding Fashion,Skeleton Dance_Animation,Skeleton Dance_Horror_Horror & Dark_Animation,Slavic Mythology Art,Slow Fashion,Smothering Earth_Fantasy,Social Realism painting,Sonnet,Soul Music,Sound Art,Sound Design,Sound Sculpture,South African Art,South American Textile Art,Southern Gothic_Gothic,Southwest Kachina Dolls,Spaghetti Western,Spanish Art,Spanish Cuisine,Spider Queen_Uncategorized,Sports Card_Photography_Portraiture,Sports Photography,Spring Art,Springfield,St Ives School Variant_Nature_Landscape,St Ives School_Nature_Landscape,Stained Glass Art,Stained Glass_Uncategorized,Stand-Up Comedy,Stars Hollow,Steampunk,Steampunk City Variant_Architecture_Location,Steampunk Fantasy Art,Steampunk Fashion,Steampunk Portrait_Fantasy_Portraiture,Steampunk_Fantasy_Fashion,Steamtown Variant_Architecture_Retro,Steeltown Variant_Architecture,Stockholm cityscape,Stone Sculpture,Stop Motion_Animation,Streamer Bike_Retro,Street Art,Street Art Performance,Street Art and Graffiti,Street Photography,Street Theater,Streetwear,Streetwear Fashion,Stuckism Variant_Uncategorized,Stuckism_Uncategorized,Studio Ghibli_Fantasy_Surrealism,Studio Portrait Photography,Sub Anaheim Variant_Fantasy_Location,Sub Annapolis Variant_Sculpture_Location,Sub Atlanta Variant_Uncategorized_Location,Sub Baton Rouge Variant_Culture_Location,Sub Baton Rouge_Culture_Location,Sub Baton Rouge__Location,Sub Berkeley Variant_Retro_Location,Sub Boise Variant_Uncategorized_Location,Sub Boise_Uncategorized_Location,Sub Boise__Location,Sub Bozeman Variant_Architecture_Location,Sub Carlsbad Variant_Architecture_Culture_Location,Sub Carson City Variant_Architecture_Location,Sub Casper Variant_Uncategorized_Location,Sub Cheyenne Variant_Uncategorized_Location,Sub Columbia Variant_Architecture_Culture_Location,Sub Concord Variant_Uncategorized_Location,Sub Costa Mesa Variant_Culture_Location,Sub Denver Variant_Uncategorized_Location,Sub Des Moines Variant_Architecture_Location,Sub Dover Variant_Uncategorized_Location,Sub Downey Variant_Sci-Fi_Location,Sub El Monte Variant_Sci-Fi_Location,Sub Fontana Variant_Culture_Location,Sub Frankfort Variant_Uncategorized_Location,Sub Fresno Variant_Architecture_Nature_Landscape_Location,Sub Garden Grove Variant_Architecture_Location,Sub Glendale Variant_Uncategorized_Location,Sub Indianapolis Variant_Uncategorized_Location,Sub Inglewood Variant_Sci-Fi_Pop Culture_Culture_Location,Sub Irvine Variant_Uncategorized_Location,Sub Jackson Variant_Folk Art_Location,Sub Jefferson City Variant_Architecture_Folk Art_Location,Sub Juneau Variant_Architecture_Location,Sub Lancaster Variant_Sci-Fi_Retro_Location,Sub Montgomery Variant_Uncategorized_Location,Sub Montpelier Variant_Sculpture_Location,Sub Moreno Valley Variant_Uncategorized_Location,Sub Oakland Variant_Sci-Fi_Culture_Location,Sub Ontario Variant_Uncategorized_Location,Sub Orange Variant_Retro_Location,Sub Oxnard Variant_Uncategorized_Location,Sub Oxnard_Uncategorized_Location,Sub Oxnard__Location,Sub Palmdale Variant_Sci-Fi_Location,Sub Pasadena Variant_Uncategorized_Location,Sub Pierre Variant_Uncategorized_Location,Sub Pomona Variant_Retro_Location,Sub Providence Variant_Uncategorized_Location,Sub Rancho Cucamonga Variant_Architecture_Lifestyle_Location,Sub Richmond Variant_Architecture_Location,Sub Roseville Variant_Architecture_Location,Sub Salem Variant_Sci-Fi_Culture_Location,Sub Santa Ana Variant_Sci-Fi_Culture_Location,Sub Santa Clarita Variant_Uncategorized_Location,Sub Santa Rosa Variant_Sci-Fi_Nature_Location,Sub Santa Rosa_Sci-Fi_Nature_Location,Sub Santa Rosa__Location,Sub Simi Valley Variant_Pop Culture_Culture_Retro_Location,Sub Spokane Variant_Architecture_Location,Sub Tacoma Variant_Architecture_Culture_Retro_Location,Sub Temecula Variant_Lifestyle_Location,Sub Thousand Oaks Variant_Uncategorized_Location,Sub Topeka Variant_Architecture_Folk Art_Location,Sub Torrance Variant_Sci-Fi_Location,Sub Victorville Variant_Uncategorized_Location,Sumi-e Painting,Summer Art,Summer Fashion,Surf Wood Sign_Retro,Surrealism,Surrealism Art,Surrealist Painting,Surrealist Sculpture,Sushi Making,Sustainable Architecture,Sustainable Art Variant_Uncategorized,Sustainable Art_Uncategorized,Sustainable Fashion,Swing Dance,Sydney,Symbolism Art,Synthetic Cubism,Taj Mahal,Takashi Murakami,Talavera Pottery,Tamara de Lempicka,Tango Dance,Tap Dance,Tarot Cards_Occult,Tarot_Occult,Tatooine,Tattoo Print_Retro_Tattoo Art,Tech City Variant_Architecture_Nature_Location,Techno Music Visuals,Technotopia Variant_Architecture_Nature,Temporary Art Installations,Terrarium Bottle_Still Life,Terrarium_Uncategorized,Teslapunk_Portraiture,Textile Art,Textile Design,Textile Sculpture,Thai Art,Thai Cuisine,Thomas Gainsborough,Thriller,Thriller Films,Thriller Literature,Tibetan Thangka Painting,Tiki Bar_Uncategorized,Tiki Cocktail_Uncategorized,Tiki Idol_Uncategorized,Tiki Mug_Retro,Tiki Outdoor Shower_Uncategorized,Tiki Totem_Sculpture,Titian,Toei_Retro_Animation,Tokyo,Tokyo cityscape,Torture Chamber_Uncategorized,Torture Device_Horror_Horror & Dark,Tortured Prisoner_Uncategorized,Tortured Soul_Uncategorized,Toy Design,Traditional Animation,Traditional Dance,Traditional Japanese Architecture,Traditional Pottery,Tragedy,Tragedy Literature,Tranquil Art,Transavantgarde Variant_Uncategorized,Transavantgarde_Uncategorized,Transgressive Art Variant_Uncategorized,Transgressive Art_Uncategorized,Travel Photography,Tropical Bathroom_Uncategorized,Tropical Cocktail_Uncategorized,Tropical Hotel_Uncategorized,Tropical Luau_Uncategorized,Twin Peaks,Typography Design,UPA_Comics_Animation,Ukiyo-e (Japanese Woodblock Printing),Ukiyo-e Art,Undead Gluttony_Architecture,Undead Portrait_Portraiture,Undefined_Emerging_Artist,Under Albany Variant_Architecture_Surrealism_Location,Under Bakersfield Variant_Uncategorized_Location,Under Berlin Variant_Retro_Surrealism_Location,Under Berlin_Retro_Surrealism_Location,Under Berlin__Location,Under Bismarck Variant_Uncategorized_Location,Under Charleston Variant_Architecture_Location,Under Chicago Variant_Architecture_Portraiture_Culture_Retro_Location,Under Eugene Variant_Folk Art_Location,Under Fargo Variant_Architecture_Location,Under Hartford Variant_Architecture_Location,Under Honolulu Variant_Architecture_Location,Under Istanbul Variant_Architecture_Location,Under Jackson Variant_Folk Art_Location,Under Juneau Variant_Architecture_Location,Under London Variant_Architecture_Location,Under Montreal Variant_Architecture_Location,Under Nashville Variant_Uncategorized_Location,Under Oklahoma City Variant_Architecture_Location,Under Omaha Variant_Culture_Location,Under Paris Variant_Uncategorized_Location,Under Sacramento Variant_Uncategorized_Location,Under Santa Fe Variant_Uncategorized_Location,Under St. Paul Variant_Architecture_Location,Under Tallahassee Variant_Sci-Fi_Retro_Architecture_Location,Under Trenton Variant_Uncategorized_Location,Underground Anchorage Variant_Architecture_Location,Underground Austin Variant_Uncategorized_Location,Underground Chula Vista Variant_Uncategorized_Location,Underground Columbus Variant_Retro_Location,Underground Concord Variant_Culture_Location,Underground Helena Variant_Architecture_Location,Underground Huntington Beach Variant_Architecture_Culture_Location,Underground Lansing Variant_Culture_Location,Underground Lincoln Variant_Uncategorized_Location,Underground Little Rock Variant_Uncategorized_Location,Underground Portland Variant_Sci-Fi_Location,Underground Riverside Variant_Culture_Location,Underground Rome Variant_Architecture_Location,Underground Salt Lake City Variant_Architecture_Location,Underground San Jose Variant_Uncategorized_Location,Underground Seattle Variant_Uncategorized_Location,Underground Springfield Variant_Folk Art_Location,Underground Wichita Variant_Folk Art_Location,Underwater Photography,Urban Fantasy Art,Urban Landscape Photography,Urban Photography,Urban Sculpture,User-Centered Design,Utrecht cityscape,VR Art Variant_Uncategorized,Vacuous Grimace_Uncategorized,Valhalla,Valve,Vampire_Portraiture_Horror,Vaporgram_Retro,Vaporwave City_Sci-Fi_Dystopia_Architecture_Location,Vaporwave Graphics_Retro_Surrealism_Graphic Design,Vaporwave Retro_Sci-Fi_Retro,Vaporwave Sunset_Uncategorized,Vaporwave_Architecture_Retro,Vatican City,Vector Portrait_Portraiture,Venezuelan Art,Venice,Verbatim Theater,Victorian Architecture,Victorian Fashion,Victorian Laboratory_Occult_Still Life,Video Art,Video Art_Uncategorized,Video Games Variant_Games,Video Games_Games_Culture,Video Mapping,Vienna,Vienna cityscape,Vietnamese Art,Vietnamese Cuisine,Vija Celmins,Vincent Van Gogh,Vintage Baseball_Retro_Photography,Vintage Fashion,Vintage Halloween Costume_Retro,Vintage Halloween Mask_Retro,Vintage Halloween_Retro,Vintage Robot Toy_Sci-Fi_Retro,Vintage Tattoo Flash_Retro_Tattoo Art,Vintage Tattoo Print_Retro_Tattoo Art,Vintage Travel Poster_Retro_Nature_Landscape,Virtual Art Variant_Uncategorized,Virtual Art_Sci-Fi,Virtual Reality (VR) Art,Virtual Reality Art,Visionary Art (1)_Uncategorized,Visual Effects (VFX) Design,Vogue Cover_Photography_Fashion,Volcano Lair_Uncategorized,Voodoo Altar_Occult,Voodoo Ceremony_Occult,Voodoo Doll_Retro_Occult,Voodoo Queen_Portraiture_Occult,Voodoo Shop_Occult,Voodoo_Occult,Vorticism_Uncategorized,Wallace and Gromit,Waltz Dance,War Films,Wassily Kandinsky,Water Art,Watercolor Painting,Weaving,Web Design,Wedding Fashion,Wedding Photography,Wellington cityscape,West African Art,Westeros,Wildlife Photography,William Kentridge,Winter Art,Winter Fashion,Wolfgang Tillmans,Womenswear Fashion,Wonderland,Wood Carving,Woodblock Art_Nature,Woodblock Print_Uncategorized,Woodblock Printing,Woodcut,Workwear Fashion,World Music,Xiamen cityscape,Xilam_Comics_Animation,Yayoi Kusama,Yellowstone National Park,Yokohama cityscape,Zion Variant_Culture,Zurich cityscape,_Uncategorized,ads-advertising_Uncategorized,ads-automotive_Uncategorized,ads-corporate_Uncategorized,ads-fashion editorial_Fashion,ads-food photography_Photography,ads-luxury_Uncategorized,ads-real estate_Photography,ads-retail_Uncategorized,artstyle-abstract expressionism_Uncategorized,artstyle-abstract_Uncategorized,artstyle-art deco_Uncategorized,artstyle-art nouveau_Nature,artstyle-constructivist_Uncategorized,artstyle-cubist_Uncategorized,artstyle-expressionist_Uncategorized,artstyle-graffiti_Architecture_Graffiti,artstyle-hyperrealism_Photography,artstyle-impressionist_Uncategorized,artstyle-pointillism_Uncategorized,artstyle-pop art_Culture,artstyle-psychedelic_Surrealism,artstyle-renaissance_Uncategorized,artstyle-steampunk_Uncategorized,artstyle-surrealist_Surrealism,artstyle-typography_Uncategorized,artstyle-watercolor_Uncategorized,carpint_Gothic,citz_Sci-Fi_Architecture,coolio_Portraiture,enhance_Uncategorized,futuristic-biomechanical cyberpunk_Sci-Fi_Dystopia,futuristic-biomechanical_Sci-Fi,futuristic-cybernetic robot_Sci-Fi,futuristic-cybernetic_Sci-Fi,futuristic-cyberpunk cityscape_Sci-Fi_Architecture,futuristic-futuristic_Sci-Fi,futuristic-retro cyberpunk_Sci-Fi_Retro,futuristic-retro futurism_Sci-Fi_Retro,futuristic-sci-fi_Sci-Fi,futuristic-vaporwave_Sci-Fi_Retro,game-bubble bobble_Fantasy,game-cyberpunk game_Sci-Fi_Dystopia_Games_Digital Media,game-fighting game_Games,game-gta_Uncategorized,game-mario_Fantasy_Comics,game-minecraft_Still Life,game-pokemon_Fantasy,game-retro arcade_Retro_Games,game-retro game_Retro,game-rpg fantasy game_Fantasy_Games,game-strategy game_Games,game-streetfighter_Uncategorized,game-zelda_Fantasy,getting there_Portraiture,girlz_Fashion_Horror_Horror & Dark_Gothic,gotit jinx_Tattoo Art,greatz_Portraiture,gsssggg_Portraiture,hoop_Portraiture,jinx_Tattoo Art,jinxed_Portraiture,kjkjkjj_Digital Media_Still Life_Comics,kool_Portraiture,misc-architectural_Uncategorized,misc-disco_Retro,misc-dreamscape_Fantasy_Surrealism,misc-dystopian_Dystopia,misc-fairy tale_Fantasy,misc-gothic_Gothic,misc-grunge_Retro,misc-horror_Horror,misc-horror_Horror_Horror & Dark,misc-kawaii_Uncategorized,misc-lovecraftian_Surrealism_Horror,misc-macabre_Gothic,misc-manga_Uncategorized,misc-metropolis_Sci-Fi_Architecture,misc-minimalist_Uncategorized,misc-monochrome_Uncategorized,misc-nautical_Uncategorized,misc-space_Sci-Fi,misc-stained glass_Uncategorized,misc-techwear fashion_Sci-Fi_Fashion_Architecture,misc-tribal_Uncategorized,misc-zentangle_Uncategorized,mkkk_Portraiture_Digital Media_Animation,papercraft-collage_Uncategorized,papercraft-flat papercut_Uncategorized,papercraft-kirigami_Uncategorized,papercraft-paper mache_Uncategorized,papercraft-paper quilling_Uncategorized,papercraft-papercut collage_Uncategorized,papercraft-papercut shadow box_Uncategorized,papercraft-stacked papercut_Uncategorized,papercraft-thick layered papercut_Uncategorized,photo-alien_Sci-Fi_Photography,photo-film noir_Photography,photo-hdr_Photography,photo-long exposure_Photography_Surrealism,photo-neon noir_Photography,photo-silhouette_Photography,photo-tilt-shift_Photography,sai-3d-model_Uncategorized,sai-analog film_Retro_Photography,sai-anime_Uncategorized,sai-cinematic_Uncategorized,sai-comic book_Uncategorized,sai-craft clay_Sculpture,sai-digital art_Digital Media,sai-fantasy art_Fantasy_Surrealism,sai-isometric_Uncategorized,sai-line art_Uncategorized,sai-lowpoly_Uncategorized,sai-neonpunk_Uncategorized,sai-origami_Uncategorized,sai-photographic_Photography,sai-pixel art_Uncategorized,sai-texture_Uncategorized,stfhgff_Photography", + "link": 154, + "widget": { + "name": "style", + "config": [ + [ + "no style", + "2D Game Art", + "3D Animation", + "3D Game Art", + "3D Modeling", + "3D Printing Art", + "3D Printing in Art", + "AR Art Variant_Uncategorized", + "Aardman_Uncategorized", + "Abandoned Asylum_Uncategorized", + "Aboriginal Dot Painting", + "Abstract Expressionism", + "Abstract Painting", + "Abstract Photography", + "Abstract Sculpture", + "Absurdist Theater", + "Academic Art", + "Acrylic Painting", + "Action Films", + "Addams Family_Portraiture_Horror", + "Adrian Ghenie", + "Adventure", + "Adventure Films", + "Aerial Dance", + "Aerial Photography", + "African Beadwork", + "African Beadwork Art", + "African Cuisine", + "African Mask Art", + "African Mask Making", + "Agnes Martin", + "Ai Weiwei_1", + "Ai Weiwei_2", + "Air Art", + "Airbrushing", + "Albrecht Durer", + "Album Cover Art", + "Alchemist's Study_Uncategorized", + "Amazon Rainforest", + "American Cuisine", + "American Traditional_Retro_Tattoo Art", + "Amsterdam", + "Amsterdam cityscape", + "Analytical Cubism", + "Ancient Maya_Uncategorized", + "Andy Warhol", + "Anger Art", + "Animated Corpse_Animation", + "Animated Films", + "Animation", + "Anish Kapoor", + "Ankama_Animation", + "Anselm Kiefer", + "Antarctica", + "Appropriation (1)_Culture", + "Après-Ski_Uncategorized", + "Arachnid Swarm_Uncategorized", + "Architectural Design", + "Architectural Photography", + "Argentinian Art", + "Art Activism", + "Art Collaborations with Musicians", + "Art Collaborations with Writers", + "Art Conservation", + "Art Criticism", + "Art Curation", + "Art Deco", + "Art Deco Architecture", + "Art Deco Architecture_Architecture", + "Art Deco Design", + "Art Education", + "Art Education for Adults", + "Art Education for Children", + "Art Education for Remote Areas", + "Art Education for Special Needs", + "Art Gallery Management", + "Art Games", + "Art Historical Writing", + "Art History", + "Art History Research", + "Art Informatics", + "Art Informel (1)_Uncategorized", + "Art Inspired by Ancient Civilizations", + "Art Inspired by the Digital Age", + "Art Inspired by the Renaissance", + "Art Inspired by the Roaring Twenties", + "Art Inspired by the Victorian Era", + "Art Installations", + "Art Journalism", + "Art Marketing", + "Art Nouveau", + "Art Nouveau Architecture", + "Art Nouveau Design", + "Art Nouveau Poster_Uncategorized", + "Art Nouveau Variant_Uncategorized", + "Art Restoration", + "Art Sales and Auctions", + "Art Therapy", + "Art Therapy for Adults", + "Art Therapy for Children", + "Art Workshop Facilitation", + "Art and AI Collaboration", + "Art and Architecture Collaboration", + "Art and Cultural Heritage Preservation", + "Art and Environmental Sustainability", + "Art and Literature Collaboration", + "Art and Medical Collaboration", + "Art and Mental Health", + "Art and Music Collaboration", + "Art and Science Collaboration", + "Art and Social Justice Projects", + "Art and Technology Collaboration", + "Art and Urban Development", + "Art for Agricultural Industry", + "Art for Agricultural Sector", + "Art for Airports", + "Art for Animal Welfare Organizations", + "Art for Anniversaries", + "Art for Aquariums", + "Art for Architectural Visualization", + "Art for Asian Cultures", + "Art for Augmented Reality Experiences", + "Art for Automotive Design", + "Art for Automotive Industry", + "Art for Aviation Industry", + "Art for Baby Showers", + "Art for Birthdays", + "Art for Botanical Gardens", + "Art for Cafes and Restaurants", + "Art for Charity Fundraisers", + "Art for Children", + "Art for Children's Hospitals", + "Art for Climate Change Initiatives", + "Art for Construction Industry", + "Art for Corporate Spaces", + "Art for Cruise Ships", + "Art for Culinary Presentation", + "Art for E-Commerce Platforms", + "Art for Educational Institutions", + "Art for Educational Technology", + "Art for Elderly", + "Art for Emergency Services", + "Art for Energy Industry", + "Art for Entertainment Industry", + "Art for Environmental Activism", + "Art for Environmental Campaigns", + "Art for Factories and Workshops", + "Art for Fashion Industry", + "Art for Festivals and Events", + "Art for Financial Institutions", + "Art for Financial Sector", + "Art for Fitness Centers", + "Art for Funerals", + "Art for Gender Equality", + "Art for Government Entities", + "Art for Graduations", + "Art for Health Care Facilities", + "Art for Home Decor", + "Art for Hospitality Industry", + "Art for Hotels", + "Art for Human Anatomy Studies", + "Art for Human Rights Campaigns", + "Art for Indigenous Cultures", + "Art for LGBTQ+ Celebrations", + "Art for Libraries", + "Art for Marine Industry", + "Art for Maritime Industry", + "Art for Medical Illustrations", + "Art for Military and Defense Sector", + "Art for Military and Veterans", + "Art for Mobile Apps", + "Art for Museums", + "Art for Music Videos", + "Art for National Holidays", + "Art for Nautical Navigation", + "Art for Non-Profit Organizations", + "Art for Office Spaces", + "Art for Outdoor Advertising", + "Art for Packaging Design", + "Art for Pet Products", + "Art for Pharmaceutical Industry", + "Art for Political Campaigns", + "Art for Prisons", + "Art for Public Transportation", + "Art for Real Estate Marketing", + "Art for Religious Celebrations", + "Art for Religious Institutions", + "Art for Renewable Energy Sector", + "Art for Retail Spaces", + "Art for Retirement Parties", + "Art for Robotics", + "Art for Schools and Colleges", + "Art for Science Centers", + "Art for Scientific Exploration", + "Art for Security and Defense", + "Art for Seniors", + "Art for Shopping Malls", + "Art for Smart City Projects", + "Art for Social Media Platforms", + "Art for Social Networking Sites", + "Art for Spa and Wellness Centers", + "Art for Space Exploration", + "Art for Space Industry", + "Art for Spaceships and Aerospace", + "Art for Sports Industry", + "Art for Sports Venues", + "Art for Technical Manuals", + "Art for Teenagers", + "Art for Teens", + "Art for Television Shows", + "Art for Theme Parks", + "Art for Toddlers", + "Art for Train Stations", + "Art for Underwater Exploration", + "Art for Video Game Development", + "Art for Virtual Assistants and AI", + "Art for Virtual Events", + "Art for Virtual Reality Experiences", + "Art for Wearable Technology", + "Art for Wearables", + "Art for Web Platforms", + "Art for Weddings", + "Art for Zoos", + "Art in Public Transportation", + "Art with Light and Projection", + "Art with Metalwork", + "Art with Organic Materials", + "Art with Recycled Materials", + "Artist's Books", + "Artware Variant_Sci-Fi_Graffiti_Digital Media", + "Aspen", + "Assemblage Art", + "Astrophotography", + "Athens", + "Athleisure Fashion", + "Atlantis", + "Augmented Reality (AR) Art", + "Augmented Reality Art", + "Australian Aboriginal Art", + "Autobiography", + "Automotive Design", + "Autumn Art", + "Avant-Garde Fashion", + "Aztec Calendar_Uncategorized", + "Back Alley Rogue_Uncategorized", + "Ballet Dance", + "Ballet_Uncategorized", + "Ballroom Dance", + "Bangkok", + "Banksy_1", + "Banksy_2", + "Barbara Kruger", + "Barcelona", + "Baroque", + "Baroque Architecture", + "Baroque Art", + "Baroque Music", + "Bas-Relief Sculpture_Sculpture", + "Basket Weaving", + "Basket Weaving Art", + "Battle_Uncategorized", + "Bauhaus", + "Bauhaus (1)_Architecture", + "Bauhaus Architecture", + "Bauhaus Design", + "Bauhaus Design_Uncategorized", + "Beachwear Fashion", + "Beijing", + "Belly Dance", + "Berlin", + "Bharatanatyam Dance", + "Bikini Bottom", + "Bio Art", + "Bio Art_Nature", + "Biographical Films", + "Biographical Literature", + "Biography", + "Biomorphic Architecture", + "Black Hole", + "Black Velvet Painting_Portraiture", + "Black and White Photography", + "Blacklight Poster_Uncategorized", + "Blockbuster Films", + "Bloodthirsty Vampire_Horror", + "Bluegrass Music", + "Blueprint_Uncategorized", + "Blues Music", + "Blues Music Illustration", + "Body Art", + "Body Art Performance", + "Body Painting", + "Bohemian Fashion", + "Bomber Jacket_Retro", + "Bookbinding", + "Botanical Illustration", + "Boudoir Photography_Photography", + "Brazil", + "Brazilian Art", + "Brazilian Cuisine", + "Brazilian Graffiti Art", + "Breakdance", + "Bridal Fashion", + "Brightwater Variant_Nature", + "British Art", + "Bronze Sculpture", + "Bruce Nauman", + "Bruges", + "Brutalism", + "Brutalist Architecture", + "Budapest cityscape", + "Cabinet of Curiosities_Occult", + "Cai Guo-Qiang", + "Cake Decorating", + "Canada", + "Candid Portrait Photography", + "Caravaggio", + "Caribbean Carnival Art", + "Caribbean Cuisine", + "Caricature", + "Carnival Freakshow_Retro", + "Caspar David Friedrich", + "Cassette Bedroom_Retro", + "Cassette Collage_Sci-Fi_Surrealism_Retro", + "Cassette Futurism_Retro", + "Cassette Graphics_Retro_Surrealism", + "Cassette J-Card_Retro", + "Cassette Wall_Retro", + "Casual Fashion", + "Caveopolis Variant_Lifestyle", + "Cecily Brown", + "Celtic Knotwork Art", + "Celtic Mythology Art", + "Cemetery Statue_Uncategorized", + "Central African Art", + "Central American_Uncategorized", + "Ceramic Art", + "Ceramic Design", + "Ceramic Sculpture", + "Ceramics", + "Chalk Art", + "Charcoal Drawing", + "Charles Ray", + "Chicago", + "Children's Fashion", + "Children's Theater", + "Chilean Art", + "Chinese Architecture", + "Chinese Art", + "Chinese Calligraphy", + "Chinese Cuisine", + "Chinese Ink Painting", + "Chinese Jade Carving", + "Chinese Landscape Painting", + "Chinese Mythology Art", + "Chinese Paper Cutting", + "Chinese Scroll Painting", + "Chris Ofili", + "Cindy Sherman_1", + "Cindy Sherman_2", + "Cinematography", + "Cinque Terre", + "Circuit Bending_Uncategorized", + "Circus Arts", + "Circus Performer_Retro", + "Classic Western", + "Classical Architecture", + "Classical Art", + "Classical Music", + "Classical Music Illustration", + "Classical Realism", + "Classical Realism_Portraiture", + "Classical Theater", + "Claude Monet", + "Clockwork City Variant_Architecture_Location", + "Collaborative Art Projects", + "Collage", + "Collage Art", + "Colombian Art", + "Colonial Architecture", + "Colosseum", + "Combine Painting_Sci-Fi_Still Life", + "Comedy", + "Comedy Literature", + "Commercial Photography", + "Community Mural Projects", + "Computer art", + "Concept Art for Movies", + "Concept Art for Video Games", + "Conceptual Art", + "Concert Poster Design", + "Conjoined Twins_Uncategorized", + "Constructivism", + "Constructivism Art", + "Contemporary Ballet", + "Contemporary Dance", + "Copenhagen", + "Copenhagen cityscape", + "Corporate Identity Design", + "Cosplay Design", + "Cottagecore Fashion_Fashion", + "Country Music", + "Country Music Graphics", + "Crawler Mimicry_Uncategorized", + "Creepy Children_Portraiture", + "Creepy Porcelain Doll_Fashion_Portraiture", + "Crime Films", + "Critical Realism_Uncategorized", + "Cross-Disciplinary Art", + "Crucifixion_Uncategorized", + "Crystal Caverns Variant_Architecture", + "Cuban Art", + "Cuban Cuisine", + "Cubism", + "Cubism Art", + "Cult Films", + "Cyberpunk", + "Cyberpunk Fantasy Art", + "Dadaism", + "Dadaism Art", + "Damien Hirst_1", + "Damien Hirst_2", + "Dan Flavin", + "Dance Choreography", + "Dance Performance Art", + "Dark Carnival_Gothic", + "Dark Fantasy Art", + "Data Art Variant_Uncategorized", + "Data Art_Uncategorized", + "Data Visualization Art", + "Day of the Dead_Uncategorized", + "De Stijl_Uncategorized", + "Death Masque_Uncategorized", + "Deconstructivist Architecture", + "Demonic Clown_Uncategorized", + "Demonic Portal_Horror", + "Demonic Possession_Uncategorized", + "Demoscene_Animation", + "Desaturated_Uncategorized", + "Die Brücke_Graffiti", + "Diego Velazquez", + "Dieselpunk_Retro", + "Digital Animation", + "Digital Art", + "Digital Art_Digital Media", + "Digital Drawing Tablets", + "Digital Illustration", + "Digital Painting", + "Digital Sculpture", + "Digital Storytelling", + "Diorama_Uncategorized", + "Disco Music", + "Disney Animation_Animation", + "Disrespectful Grave Robber_Uncategorized", + "Documentary Films", + "Documentary Photography", + "Drama", + "Drama Films", + "Dubai", + "Dublin", + "Dublin cityscape", + "Dunder Mifflin", + "Dutch Art", + "Dwarvendom Variant_Uncategorized", + "Dwarvenholm Variant_Uncategorized", + "Earth Art", + "East African Art", + "Eco Art", + "Eco-Art", + "Ed Ruscha", + "Edgar Degas", + "Edinburgh cityscape", + "Editorial Design", + "Edvard Munch", + "Edward Hopper", + "Egyptian Hieroglyphs_Uncategorized", + "Egyptian Mythology Art", + "Egyptian Wall Art", + "Egyptology_Uncategorized", + "El Anatsui", + "Electronic Music", + "Electronic Music Visuals", + "Elegant_Erotic_Photography", + "Elfheim Variant_Architecture_Fantasy", + "Elven City Variant_Architecture_Location", + "Embroidery", + "Emerging_Artist", + "Engraving", + "Environmental Art", + "Environmental Design", + "Ephemeral Art", + "Etching", + "Eugene Delacroix", + "Exhibition Design", + "Exoplanet", + "Exorcism_Uncategorized", + "Experimental Art", + "Experimental Films", + "Experimental Music Video", + "Experimental Photography", + "Experimental Theater", + "Expressionism", + "Expressionist Architecture", + "Expressionist painting", + "Fairy Tale Art", + "Fantasy", + "Fantasy Films", + "Fantasy Literature", + "Farce", + "Fashion Design", + "Fashion Illustration", + "Fashion Photography", + "Fast Fashion", + "Fauvism", + "Fauvism Art", + "Ferocious Werewolf_Uncategorized", + "Festival Fashion", + "Fiction", + "Figurative Expressionism_Uncategorized", + "Figurine Shelf_Fantasy_Sculpture", + "Filipino Art", + "Film Direction", + "Film Editing", + "Film Noir", + "Fine Art Photography", + "Fine_Art_Black_and_White_Photography", + "Fire Art", + "Flamenco Dance", + "Folk Art Variant_Folk Art", + "Folk Art_Folk Art", + "Folk Dance", + "Folk Music", + "Folk Music Art", + "Food Art", + "Food Photography", + "Formal Fashion", + "Fortune Teller_Occult", + "Fortune Telling_Occult", + "France", + "Francisco Goya", + "Frankfurt cityscape", + "French Art", + "French Cuisine", + "French Impressionism", + "Fresco Painting Technique", + "Frida Kahlo", + "Funk Music", + "Furniture Design", + "Futurism", + "Futurist Architecture", + "GAYZ_Portraiture", + "Gabriel Orozco", + "Galactic_Sci-Fi", + "Game Design", + "Generative Art", + "Genetic Art_Uncategorized", + "Geometric Abstraction", + "Geometric abstract painting", + "Georg Baselitz", + "Georgia O'Keeffe", + "Gerhard Richter_1", + "Gerhard Richter_2", + "German Art", + "Ghibli_Surrealism", + "Ghoul City Variant_Architecture_Location", + "Giant Robot_Sci-Fi_Retro_Architecture", + "Glamorous Portrait_Fashion_Portraiture", + "Glamorous_Erotic_Photography", + "Glasgow cityscape", + "Glass Sculpture", + "Glassblowing", + "Glazing Technique in Painting", + "Glenn Ligon", + "Glitch Art_Uncategorized", + "Glitchcore_Digital Media", + "Gongfu Tea_Uncategorized", + "Gospel Music", + "Goth Boudoir_Gothic", + "Gotham City", + "Gothic Architecture", + "Gothic Architecture_Architecture_Gothic", + "Gothic Fashion", + "Gothic Literature", + "Gothic Monster_Architecture_Gothic", + "Gothic Revival Architecture", + "Gothic Revival Architecture_Architecture_Gothic", + "Graffiti Art", + "Graffiti Style_Graffiti", + "Grand Canyon", + "Grant Wood", + "Graphic Design", + "Graveyard Mist_Horror", + "Great Barrier Reef", + "Great Wall of China", + "Greek Art", + "Greek Classical Sculpture", + "Greek Mythology Art", + "Greek Pottery Art", + "Greendale", + "Gritty_Voyeuristic_Photography", + "Grotesque Gargoyle_Uncategorized", + "Grunge Flyer_Uncategorized", + "Gustav Klimt", + "Gutai_Sci-Fi_Event", + "H.P. Lovecraft Cover_Horror", + "Hackersville Variant_Architecture", + "Hallstatt", + "Hard-edge Painting_Uncategorized", + "Hate Crime_Uncategorized", + "Haunted Carnival_Horror", + "Haunted Portrait_Portraiture_Horror", + "Haute Couture", + "Haute Couture Fashion", + "Haute Cuisine", + "Hawkins", + "Headless Horseman_Uncategorized", + "Heavy Metal", + "Henri Matisse", + "Hieronymus Bosch", + "High Fantasy", + "High Fantasy Art", + "Hip-Hop Album Art", + "Hip-Hop Dance", + "Hip-Hop Fashion", + "Hip-Hop Music", + "Historical Fiction", + "Hogwarts", + "Hong Kong", + "Hong Kong cityscape", + "Horror", + "Horror Films", + "Horror Movie Poster_Horror_Gothic", + "Hyperrealism_Uncategorized", + "Ice Sculpture", + "Illustration Design", + "Illustration for Children's Books", + "Impressionism", + "Impressionism Art", + "Impressionist Landscape Painting", + "Impressionist Portrait Painting", + "Improvisational Theater", + "Inca Mythology Art", + "Indian Art", + "Indian Cuisine", + "Indian Miniature Painting", + "Indian Mythology Art", + "Indie Films", + "Indie Music Art", + "Indigenous Australian Art", + "Indigenous Painting", + "Indigenous Pottery", + "Indonesian Art", + "Industrial Architecture", + "Industrial Design", + "Information Art_Uncategorized", + "Ink Drawing", + "Insectoid Mutant_Portraiture", + "Installation Art", + "Interaction Design", + "Interactive Art", + "Interactive Art Installations", + "Interactive artwork", + "Interior Design", + "Internet Art_Sci-Fi_Digital Media", + "Intimate_Naturist_Photography", + "Intuitive Art_Uncategorized", + "Irish Art", + "Irish Dance", + "Islamic Architecture", + "Islamic Art", + "Islamic Calligraphy", + "Islamic Geometric Art", + "Islamic Geometric Patterns", + "Island Luau_Uncategorized_Location", + "Istanbul", + "Istanbul cityscape", + "Italian Art", + "Italian Cuisine", + "Italian Renaissance Art", + "J.M.W. Turner", + "Jackson Pollock", + "Jakarta cityscape", + "Japan", + "Japanese Architecture", + "Japanese Art", + "Japanese Cuisine", + "Japanese Mythology Art", + "Jazz Dance", + "Jazz Music", + "Jazz Poster Art", + "Jean-Honore Fragonard", + "Jeff Koons", + "Jenny Holzer", + "Jerusalem", + "Jewelry Design", + "Johannes Vermeer", + "John Baldessari", + "Joyful Art", + "Julie Mehretu", + "Kabuki Theater", + "Kara Walker", + "Kathak Dance", + "Katsushika Hokusai", + "Kawaii Character_Uncategorized", + "Kawaii Fashion_Fashion", + "Kehinde Wiley", + "Kerry James Marshall", + "Kiki Smith", + "Kinetic Art", + "Kinetic Sculpture", + "Kintsugi (Japanese Gold Repair)", + "Kitsch Movement_Uncategorized", + "Knitting", + "Korean Art", + "Korean Celadon Ceramics", + "Korean Celadon Pottery", + "Korean Cuisine", + "Kuala Lumpur", + "Kyoto", + "Kyoto cityscape", + "LAIKA_Animation", + "Land Art", + "Land Art (1)_Fantasy_Nature_Sculpture_Landscape", + "Landscape Architecture", + "Landscape Design", + "Landscape Photography", + "Laser Grid_Uncategorized", + "Later European abstraction (1)_Uncategorized", + "Leonardo da Vinci", + "Lettrist artwork", + "Leviathan Variant_Architecture", + "Light Art", + "Line Dance", + "Lisbon cityscape", + "Lithography", + "Living Burial_Uncategorized", + "London", + "Los Angeles", + "Lost Vegas Variant_Architecture", + "Lounge Singer_Retro", + "Lovecraftian Horror_Horror", + "Low Fantasy", + "Lowbrow Art Variant_Surrealism_Culture", + "Lowbrow Art_Culture", + "Luau Fire Dancer_Fashion", + "Luchador_Uncategorized", + "Luxury Fashion", + "Lynching_Uncategorized", + "Lyrical abstract painting", + "Macabre Memento Mori_Horror_Horror & Dark_Still Life", + "Machinima Variant_Uncategorized", + "Machu Picchu", + "Macro Photography", + "Mad Scientist Machinery_Uncategorized", + "Madhubani Painting", + "Madhubani Painting (Indian Folk Art)", + "Mage City Variant_Architecture_Fantasy_Location", + "Magic Realist painting", + "Mall Goth_Portraiture_Gothic", + "Mannerism", + "Mannerist Architecture", + "Maori Wood Carving", + "Mardi Gras_Uncategorized", + "Marina Abramović", + "Mark Bradford", + "Mark Grotjahn", + "Martin Puryear", + "Masked Killer_Uncategorized", + "Masked Stalker_Uncategorized", + "Maurizio Cattelan", + "Maximalism", + "Mecca", + "Mech City Variant_Sci-Fi_Architecture_Location", + "Mech City_Sci-Fi_Architecture_Location", + "Mech City__Location", + "Media Art", + "Medical Oddities_Uncategorized", + "Mediterranean Cuisine", + "Melancholy Art", + "Melodrama", + "Melting Skull_Uncategorized", + "Memento Mori_Horror_Horror & Dark", + "Memoir", + "Menacing Scarecrow_Uncategorized", + "Menswear Fashion", + "Mesoamerican Mythology Art", + "Mesopotamian Mythology Art", + "Metabolist Architecture", + "Metal Music", + "Metal Music Artwork", + "Metalwork", + "Metropolis", + "Mexican Art", + "Mexican Cuisine", + "Mexican Muralism", + "Mexican Skull Art_Uncategorized", + "Miami", + "Michelangelo", + "Middle Eastern Cuisine", + "Middle-earth", + "Midgard Variant_Architecture", + "Milky Way Galaxy", + "Mime", + "Mime City Variant_Architecture_Location", + "Minimalism", + "Minimalist Web Design", + "Mixed Media Art", + "Mixer_Animation", + "Modern Architecture", + "Modern Dance", + "Modernist Architecture", + "Mona Hatoum", + "Monoprinting Technique", + "Mosaic", + "Mosaic Art", + "Motion Design", + "Motion Graphics Design", + "Mount Everest", + "Mount Olympus", + "Movie Storyboard_Uncategorized", + "Mughal Miniature Painting", + "Mumbai", + "Mummy Portrait_Portraiture", + "Munich cityscape", + "Music Video Direction", + "Musica Variant_Architecture_Culture", + "Musical Films", + "Musical Theater", + "Mutated Beast_Uncategorized", + "My Little Pony_Uncategorized", + "Mystery", + "Mystery Literature", + "Mythic Fantasy Art", + "Nantucket", + "Native American Art", + "Native American Basketry", + "Native American Mythology Art", + "Native American Pottery", + "Naturalism in Literature", + "Nature Landscape Photography", + "Nature Photography", + "Nautical_Retro", + "Naïve Art (1)_Uncategorized", + "Nebula", + "Neo Pop_Pop Culture_Culture", + "Neo Rauch", + "Neo-Dada_Uncategorized", + "Neo-Expressionism_Uncategorized", + "Neo-Gothic Architecture", + "Neo-Noir", + "Neo-Pop (1)_Pop Culture_Culture", + "Neo-primitivism (1)_Still Life", + "Neoclassical Architecture", + "Neoclassicism", + "Neon Lighting_Uncategorized", + "Neon Racer_Sci-Fi", + "Neon Tokyo_Retro", + "Neoplasticism", + "Neotokyo Variant_Sci-Fi_Architecture", + "Neue Sachlichkeit Variant_Portraiture", + "Neue Wilde (1)_Uncategorized", + "New Caelum Variant_Architecture", + "New Caelum_Architecture", + "New Media Art_Digital Media", + "New Orleans", + "New Perpendicular art_Uncategorized", + "New Simplicity_Architecture", + "New York City", + "New York cityscape", + "Niagara Falls", + "Nicole Eisenman", + "Night Photography", + "Nightmare Beast_Uncategorized", + "Non-Fiction", + "Nordic Viking Art", + "Norse Mythology Art", + "North African Art", + "Norwegian romantic nationalism_Nature_Landscape", + "Nouveau Circus_Uncategorized", + "Nova Alexandria Variant_Architecture_Culture", + "Occult Ritual_Occult", + "Occult Sacrifice_Occult", + "Oil Painting", + "Olafur Eliasson", + "Ominous Fog_Uncategorized", + "Ominous Warning_Uncategorized", + "Op Art", + "Op Art_Uncategorized", + "Opera", + "Opera Music", + "Opera Music Illustration", + "Osaka cityscape", + "Outsider Art_Uncategorized", + "Pablo Picasso", + "Package Design", + "Pandora", + "Paper Cutting", + "Paper Mache Art", + "Parametric Architecture", + "Paris", + "Participatory Art", + "Patchwork Creature_Uncategorized", + "Paul Cezanne", + "Performance Art", + "Performance Sculpture", + "Peruvian Art", + "Petra", + "Photography", + "Photojournalism", + "Photorealism", + "Photorealistic painting", + "Physical Theater", + "Pinup_Retro", + "Pixel Art", + "Pizza Making", + "Plague Mass Grave_Uncategorized", + "Plein Air Painting", + "Plotter Art Variant_Uncategorized", + "Plotter Art_Uncategorized", + "Plus-Size Fashion", + "Poetry", + "Pointillism", + "Pointillism Art", + "Pole Dance", + "Polynesian Mythology Art", + "Polynesian Tattoo Art", + "Pop Art", + "Pop Music", + "Pop Music Branding", + "Pop Surrealism_Nature_Surrealism_Landscape_Still Life", + "Pop art style", + "Porcelain Art", + "Portrait Photography", + "Portuguese Art", + "Post-Impressionism", + "Postmodern Architecture", + "Pottery", + "Prague", + "Prague cityscape", + "Prairie Dress_Retro_Fashion", + "Pre-Raphaelite_Uncategorized", + "Preppy Fashion", + "Printmaking", + "Prismatic_Uncategorized", + "Projection Mapping Art", + "Propaganda Art_Retro", + "Propaganda Poster_Uncategorized", + "Prose Literature", + "Provocative_Surreal_Photography", + "Pseudorealism_Uncategorized", + "Psychedelic Concert Posters", + "Psychedelic Pop Art_Surrealism", + "Public Art Installations", + "Public Installations", + "Public Sculptures", + "Punk Fashion", + "Punk Music", + "Punk Poster_Uncategorized", + "Puppetry", + "Pyramids of Giza", + "Quahog", + "Quilting", + "Quilting Art", + "Quito cityscape", + "R&B Music", + "Rachel Whiteread", + "Radical Realism (1)_Still Life", + "Rangoli (Indian Floor Art)", + "Rap Music Graphics", + "Raphael", + "Rashid Johnson", + "Rat Infestation_Uncategorized", + "Rat King_Uncategorized", + "Realism Art", + "Realism in Literature", + "Realistic Fiction", + "Reanimated Corpse_Animation", + "Recycled Art", + "Reggae Music", + "Reggae Music Design", + "Rembrandt", + "Remodernism Variant_Uncategorized", + "Remodernism_Architecture", + "Renaissance", + "Renaissance Architecture", + "Renaissance Art", + "Rene Magritte", + "Responsive Web Design", + "Richard Serra", + "Richard Tuttle", + "Rio de Janeiro", + "Rio de Janeiro cityscape", + "Robert Gober", + "Robotics Art", + "Rock Album Art", + "Rock Music", + "Rococo", + "Rococo Architecture", + "Rococo Art", + "Rococo Interior_Uncategorized", + "Roman Mosaic Art", + "Roman Mythology Art", + "Romance", + "Romance Literature", + "Romanesque Architecture", + "Romantic Comedy", + "Romantic Films", + "Romanticism", + "Romanticism Art", + "Romanticism in Literature", + "Rome", + "Rural Photography", + "Russia", + "Russian Art", + "Russian Icon Painting", + "Sahara Desert", + "Salem", + "Salsa Dance", + "Salsa Music", + "Salvador Dali", + "Samurai_Uncategorized", + "Sanctuary Variant_Uncategorized", + "Sand Sculpture", + "Sandro Botticelli", + "Sarah Sze", + "Satanic_Horror_Occult", + "Satire", + "Satire Literature", + "Scandinavian Architecture", + "Scandinavian Art", + "Scandinavian Design", + "Scarecrow_Horror", + "Scary Pumpkin_Uncategorized", + "Scary Stories at Campfire_Horror_Horror & Dark", + "Scary Stories_Horror", + "Sci-Fi Films", + "Science Fiction", + "Scientific Illustration_Retro", + "Screen Printing", + "Screwball Comedy", + "Sculpture", + "Self-taught Art (1)_Fantasy", + "Seoul", + "Serial Killer_Horror", + "Set Design for Theater", + "Shadow City Variant_Architecture_Occult_Gothic_Location", + "Shadow City_Architecture_Occult_Gothic_Location", + "Shadow City_Horror_Occult_Horror & Dark_Gothic_Location", + "Shanghai", + "Shangri-La Variant_Uncategorized", + "Shepard Fairey", + "Shirakawa-go", + "Shirin Neshat", + "Sideshow Poster_Retro", + "Silent Films", + "Singapore", + "Sinister Crone_Uncategorized", + "Sinister Laboratory_Horror_Occult_Still Life", + "Sinister Ritual_Uncategorized", + "Situationist International Variant_Uncategorized", + "Situationist International_Uncategorized", + "Skateboarding Fashion", + "Skeleton Dance_Animation", + "Skeleton Dance_Horror_Horror & Dark_Animation", + "Slavic Mythology Art", + "Slow Fashion", + "Smothering Earth_Fantasy", + "Social Realism painting", + "Sonnet", + "Soul Music", + "Sound Art", + "Sound Design", + "Sound Sculpture", + "South African Art", + "South American Textile Art", + "Southern Gothic_Gothic", + "Southwest Kachina Dolls", + "Spaghetti Western", + "Spanish Art", + "Spanish Cuisine", + "Spider Queen_Uncategorized", + "Sports Card_Photography_Portraiture", + "Sports Photography", + "Spring Art", + "Springfield", + "St Ives School Variant_Nature_Landscape", + "St Ives School_Nature_Landscape", + "Stained Glass Art", + "Stained Glass_Uncategorized", + "Stand-Up Comedy", + "Stars Hollow", + "Steampunk", + "Steampunk City Variant_Architecture_Location", + "Steampunk Fantasy Art", + "Steampunk Fashion", + "Steampunk Portrait_Fantasy_Portraiture", + "Steampunk_Fantasy_Fashion", + "Steamtown Variant_Architecture_Retro", + "Steeltown Variant_Architecture", + "Stockholm cityscape", + "Stone Sculpture", + "Stop Motion_Animation", + "Streamer Bike_Retro", + "Street Art", + "Street Art Performance", + "Street Art and Graffiti", + "Street Photography", + "Street Theater", + "Streetwear", + "Streetwear Fashion", + "Stuckism Variant_Uncategorized", + "Stuckism_Uncategorized", + "Studio Ghibli_Fantasy_Surrealism", + "Studio Portrait Photography", + "Sub Anaheim Variant_Fantasy_Location", + "Sub Annapolis Variant_Sculpture_Location", + "Sub Atlanta Variant_Uncategorized_Location", + "Sub Baton Rouge Variant_Culture_Location", + "Sub Baton Rouge_Culture_Location", + "Sub Baton Rouge__Location", + "Sub Berkeley Variant_Retro_Location", + "Sub Boise Variant_Uncategorized_Location", + "Sub Boise_Uncategorized_Location", + "Sub Boise__Location", + "Sub Bozeman Variant_Architecture_Location", + "Sub Carlsbad Variant_Architecture_Culture_Location", + "Sub Carson City Variant_Architecture_Location", + "Sub Casper Variant_Uncategorized_Location", + "Sub Cheyenne Variant_Uncategorized_Location", + "Sub Columbia Variant_Architecture_Culture_Location", + "Sub Concord Variant_Uncategorized_Location", + "Sub Costa Mesa Variant_Culture_Location", + "Sub Denver Variant_Uncategorized_Location", + "Sub Des Moines Variant_Architecture_Location", + "Sub Dover Variant_Uncategorized_Location", + "Sub Downey Variant_Sci-Fi_Location", + "Sub El Monte Variant_Sci-Fi_Location", + "Sub Fontana Variant_Culture_Location", + "Sub Frankfort Variant_Uncategorized_Location", + "Sub Fresno Variant_Architecture_Nature_Landscape_Location", + "Sub Garden Grove Variant_Architecture_Location", + "Sub Glendale Variant_Uncategorized_Location", + "Sub Indianapolis Variant_Uncategorized_Location", + "Sub Inglewood Variant_Sci-Fi_Pop Culture_Culture_Location", + "Sub Irvine Variant_Uncategorized_Location", + "Sub Jackson Variant_Folk Art_Location", + "Sub Jefferson City Variant_Architecture_Folk Art_Location", + "Sub Juneau Variant_Architecture_Location", + "Sub Lancaster Variant_Sci-Fi_Retro_Location", + "Sub Montgomery Variant_Uncategorized_Location", + "Sub Montpelier Variant_Sculpture_Location", + "Sub Moreno Valley Variant_Uncategorized_Location", + "Sub Oakland Variant_Sci-Fi_Culture_Location", + "Sub Ontario Variant_Uncategorized_Location", + "Sub Orange Variant_Retro_Location", + "Sub Oxnard Variant_Uncategorized_Location", + "Sub Oxnard_Uncategorized_Location", + "Sub Oxnard__Location", + "Sub Palmdale Variant_Sci-Fi_Location", + "Sub Pasadena Variant_Uncategorized_Location", + "Sub Pierre Variant_Uncategorized_Location", + "Sub Pomona Variant_Retro_Location", + "Sub Providence Variant_Uncategorized_Location", + "Sub Rancho Cucamonga Variant_Architecture_Lifestyle_Location", + "Sub Richmond Variant_Architecture_Location", + "Sub Roseville Variant_Architecture_Location", + "Sub Salem Variant_Sci-Fi_Culture_Location", + "Sub Santa Ana Variant_Sci-Fi_Culture_Location", + "Sub Santa Clarita Variant_Uncategorized_Location", + "Sub Santa Rosa Variant_Sci-Fi_Nature_Location", + "Sub Santa Rosa_Sci-Fi_Nature_Location", + "Sub Santa Rosa__Location", + "Sub Simi Valley Variant_Pop Culture_Culture_Retro_Location", + "Sub Spokane Variant_Architecture_Location", + "Sub Tacoma Variant_Architecture_Culture_Retro_Location", + "Sub Temecula Variant_Lifestyle_Location", + "Sub Thousand Oaks Variant_Uncategorized_Location", + "Sub Topeka Variant_Architecture_Folk Art_Location", + "Sub Torrance Variant_Sci-Fi_Location", + "Sub Victorville Variant_Uncategorized_Location", + "Sumi-e Painting", + "Summer Art", + "Summer Fashion", + "Surf Wood Sign_Retro", + "Surrealism", + "Surrealism Art", + "Surrealist Painting", + "Surrealist Sculpture", + "Sushi Making", + "Sustainable Architecture", + "Sustainable Art Variant_Uncategorized", + "Sustainable Art_Uncategorized", + "Sustainable Fashion", + "Swing Dance", + "Sydney", + "Symbolism Art", + "Synthetic Cubism", + "Taj Mahal", + "Takashi Murakami", + "Talavera Pottery", + "Tamara de Lempicka", + "Tango Dance", + "Tap Dance", + "Tarot Cards_Occult", + "Tarot_Occult", + "Tatooine", + "Tattoo Print_Retro_Tattoo Art", + "Tech City Variant_Architecture_Nature_Location", + "Techno Music Visuals", + "Technotopia Variant_Architecture_Nature", + "Temporary Art Installations", + "Terrarium Bottle_Still Life", + "Terrarium_Uncategorized", + "Teslapunk_Portraiture", + "Textile Art", + "Textile Design", + "Textile Sculpture", + "Thai Art", + "Thai Cuisine", + "Thomas Gainsborough", + "Thriller", + "Thriller Films", + "Thriller Literature", + "Tibetan Thangka Painting", + "Tiki Bar_Uncategorized", + "Tiki Cocktail_Uncategorized", + "Tiki Idol_Uncategorized", + "Tiki Mug_Retro", + "Tiki Outdoor Shower_Uncategorized", + "Tiki Totem_Sculpture", + "Titian", + "Toei_Retro_Animation", + "Tokyo", + "Tokyo cityscape", + "Torture Chamber_Uncategorized", + "Torture Device_Horror_Horror & Dark", + "Tortured Prisoner_Uncategorized", + "Tortured Soul_Uncategorized", + "Toy Design", + "Traditional Animation", + "Traditional Dance", + "Traditional Japanese Architecture", + "Traditional Pottery", + "Tragedy", + "Tragedy Literature", + "Tranquil Art", + "Transavantgarde Variant_Uncategorized", + "Transavantgarde_Uncategorized", + "Transgressive Art Variant_Uncategorized", + "Transgressive Art_Uncategorized", + "Travel Photography", + "Tropical Bathroom_Uncategorized", + "Tropical Cocktail_Uncategorized", + "Tropical Hotel_Uncategorized", + "Tropical Luau_Uncategorized", + "Twin Peaks", + "Typography Design", + "UPA_Comics_Animation", + "Ukiyo-e (Japanese Woodblock Printing)", + "Ukiyo-e Art", + "Undead Gluttony_Architecture", + "Undead Portrait_Portraiture", + "Undefined_Emerging_Artist", + "Under Albany Variant_Architecture_Surrealism_Location", + "Under Bakersfield Variant_Uncategorized_Location", + "Under Berlin Variant_Retro_Surrealism_Location", + "Under Berlin_Retro_Surrealism_Location", + "Under Berlin__Location", + "Under Bismarck Variant_Uncategorized_Location", + "Under Charleston Variant_Architecture_Location", + "Under Chicago Variant_Architecture_Portraiture_Culture_Retro_Location", + "Under Eugene Variant_Folk Art_Location", + "Under Fargo Variant_Architecture_Location", + "Under Hartford Variant_Architecture_Location", + "Under Honolulu Variant_Architecture_Location", + "Under Istanbul Variant_Architecture_Location", + "Under Jackson Variant_Folk Art_Location", + "Under Juneau Variant_Architecture_Location", + "Under London Variant_Architecture_Location", + "Under Montreal Variant_Architecture_Location", + "Under Nashville Variant_Uncategorized_Location", + "Under Oklahoma City Variant_Architecture_Location", + "Under Omaha Variant_Culture_Location", + "Under Paris Variant_Uncategorized_Location", + "Under Sacramento Variant_Uncategorized_Location", + "Under Santa Fe Variant_Uncategorized_Location", + "Under St. Paul Variant_Architecture_Location", + "Under Tallahassee Variant_Sci-Fi_Retro_Architecture_Location", + "Under Trenton Variant_Uncategorized_Location", + "Underground Anchorage Variant_Architecture_Location", + "Underground Austin Variant_Uncategorized_Location", + "Underground Chula Vista Variant_Uncategorized_Location", + "Underground Columbus Variant_Retro_Location", + "Underground Concord Variant_Culture_Location", + "Underground Helena Variant_Architecture_Location", + "Underground Huntington Beach Variant_Architecture_Culture_Location", + "Underground Lansing Variant_Culture_Location", + "Underground Lincoln Variant_Uncategorized_Location", + "Underground Little Rock Variant_Uncategorized_Location", + "Underground Portland Variant_Sci-Fi_Location", + "Underground Riverside Variant_Culture_Location", + "Underground Rome Variant_Architecture_Location", + "Underground Salt Lake City Variant_Architecture_Location", + "Underground San Jose Variant_Uncategorized_Location", + "Underground Seattle Variant_Uncategorized_Location", + "Underground Springfield Variant_Folk Art_Location", + "Underground Wichita Variant_Folk Art_Location", + "Underwater Photography", + "Urban Fantasy Art", + "Urban Landscape Photography", + "Urban Photography", + "Urban Sculpture", + "User-Centered Design", + "Utrecht cityscape", + "VR Art Variant_Uncategorized", + "Vacuous Grimace_Uncategorized", + "Valhalla", + "Valve", + "Vampire_Portraiture_Horror", + "Vaporgram_Retro", + "Vaporwave City_Sci-Fi_Dystopia_Architecture_Location", + "Vaporwave Graphics_Retro_Surrealism_Graphic Design", + "Vaporwave Retro_Sci-Fi_Retro", + "Vaporwave Sunset_Uncategorized", + "Vaporwave_Architecture_Retro", + "Vatican City", + "Vector Portrait_Portraiture", + "Venezuelan Art", + "Venice", + "Verbatim Theater", + "Victorian Architecture", + "Victorian Fashion", + "Victorian Laboratory_Occult_Still Life", + "Video Art", + "Video Art_Uncategorized", + "Video Games Variant_Games", + "Video Games_Games_Culture", + "Video Mapping", + "Vienna", + "Vienna cityscape", + "Vietnamese Art", + "Vietnamese Cuisine", + "Vija Celmins", + "Vincent Van Gogh", + "Vintage Baseball_Retro_Photography", + "Vintage Fashion", + "Vintage Halloween Costume_Retro", + "Vintage Halloween Mask_Retro", + "Vintage Halloween_Retro", + "Vintage Robot Toy_Sci-Fi_Retro", + "Vintage Tattoo Flash_Retro_Tattoo Art", + "Vintage Tattoo Print_Retro_Tattoo Art", + "Vintage Travel Poster_Retro_Nature_Landscape", + "Virtual Art Variant_Uncategorized", + "Virtual Art_Sci-Fi", + "Virtual Reality (VR) Art", + "Virtual Reality Art", + "Visionary Art (1)_Uncategorized", + "Visual Effects (VFX) Design", + "Vogue Cover_Photography_Fashion", + "Volcano Lair_Uncategorized", + "Voodoo Altar_Occult", + "Voodoo Ceremony_Occult", + "Voodoo Doll_Retro_Occult", + "Voodoo Queen_Portraiture_Occult", + "Voodoo Shop_Occult", + "Voodoo_Occult", + "Vorticism_Uncategorized", + "Wallace and Gromit", + "Waltz Dance", + "War Films", + "Wassily Kandinsky", + "Water Art", + "Watercolor Painting", + "Weaving", + "Web Design", + "Wedding Fashion", + "Wedding Photography", + "Wellington cityscape", + "West African Art", + "Westeros", + "Wildlife Photography", + "William Kentridge", + "Winter Art", + "Winter Fashion", + "Wolfgang Tillmans", + "Womenswear Fashion", + "Wonderland", + "Wood Carving", + "Woodblock Art_Nature", + "Woodblock Print_Uncategorized", + "Woodblock Printing", + "Woodcut", + "Workwear Fashion", + "World Music", + "Xiamen cityscape", + "Xilam_Comics_Animation", + "Yayoi Kusama", + "Yellowstone National Park", + "Yokohama cityscape", + "Zion Variant_Culture", + "Zurich cityscape", + "_Uncategorized", + "ads-advertising_Uncategorized", + "ads-automotive_Uncategorized", + "ads-corporate_Uncategorized", + "ads-fashion editorial_Fashion", + "ads-food photography_Photography", + "ads-luxury_Uncategorized", + "ads-real estate_Photography", + "ads-retail_Uncategorized", + "artstyle-abstract expressionism_Uncategorized", + "artstyle-abstract_Uncategorized", + "artstyle-art deco_Uncategorized", + "artstyle-art nouveau_Nature", + "artstyle-constructivist_Uncategorized", + "artstyle-cubist_Uncategorized", + "artstyle-expressionist_Uncategorized", + "artstyle-graffiti_Architecture_Graffiti", + "artstyle-hyperrealism_Photography", + "artstyle-impressionist_Uncategorized", + "artstyle-pointillism_Uncategorized", + "artstyle-pop art_Culture", + "artstyle-psychedelic_Surrealism", + "artstyle-renaissance_Uncategorized", + "artstyle-steampunk_Uncategorized", + "artstyle-surrealist_Surrealism", + "artstyle-typography_Uncategorized", + "artstyle-watercolor_Uncategorized", + "carpint_Gothic", + "citz_Sci-Fi_Architecture", + "coolio_Portraiture", + "enhance_Uncategorized", + "futuristic-biomechanical cyberpunk_Sci-Fi_Dystopia", + "futuristic-biomechanical_Sci-Fi", + "futuristic-cybernetic robot_Sci-Fi", + "futuristic-cybernetic_Sci-Fi", + "futuristic-cyberpunk cityscape_Sci-Fi_Architecture", + "futuristic-futuristic_Sci-Fi", + "futuristic-retro cyberpunk_Sci-Fi_Retro", + "futuristic-retro futurism_Sci-Fi_Retro", + "futuristic-sci-fi_Sci-Fi", + "futuristic-vaporwave_Sci-Fi_Retro", + "game-bubble bobble_Fantasy", + "game-cyberpunk game_Sci-Fi_Dystopia_Games_Digital Media", + "game-fighting game_Games", + "game-gta_Uncategorized", + "game-mario_Fantasy_Comics", + "game-minecraft_Still Life", + "game-pokemon_Fantasy", + "game-retro arcade_Retro_Games", + "game-retro game_Retro", + "game-rpg fantasy game_Fantasy_Games", + "game-strategy game_Games", + "game-streetfighter_Uncategorized", + "game-zelda_Fantasy", + "getting there_Portraiture", + "girlz_Fashion_Horror_Horror & Dark_Gothic", + "gotit jinx_Tattoo Art", + "greatz_Portraiture", + "gsssggg_Portraiture", + "hoop_Portraiture", + "jinx_Tattoo Art", + "jinxed_Portraiture", + "kjkjkjj_Digital Media_Still Life_Comics", + "kool_Portraiture", + "misc-architectural_Uncategorized", + "misc-disco_Retro", + "misc-dreamscape_Fantasy_Surrealism", + "misc-dystopian_Dystopia", + "misc-fairy tale_Fantasy", + "misc-gothic_Gothic", + "misc-grunge_Retro", + "misc-horror_Horror", + "misc-horror_Horror_Horror & Dark", + "misc-kawaii_Uncategorized", + "misc-lovecraftian_Surrealism_Horror", + "misc-macabre_Gothic", + "misc-manga_Uncategorized", + "misc-metropolis_Sci-Fi_Architecture", + "misc-minimalist_Uncategorized", + "misc-monochrome_Uncategorized", + "misc-nautical_Uncategorized", + "misc-space_Sci-Fi", + "misc-stained glass_Uncategorized", + "misc-techwear fashion_Sci-Fi_Fashion_Architecture", + "misc-tribal_Uncategorized", + "misc-zentangle_Uncategorized", + "mkkk_Portraiture_Digital Media_Animation", + "papercraft-collage_Uncategorized", + "papercraft-flat papercut_Uncategorized", + "papercraft-kirigami_Uncategorized", + "papercraft-paper mache_Uncategorized", + "papercraft-paper quilling_Uncategorized", + "papercraft-papercut collage_Uncategorized", + "papercraft-papercut shadow box_Uncategorized", + "papercraft-stacked papercut_Uncategorized", + "papercraft-thick layered papercut_Uncategorized", + "photo-alien_Sci-Fi_Photography", + "photo-film noir_Photography", + "photo-hdr_Photography", + "photo-long exposure_Photography_Surrealism", + "photo-neon noir_Photography", + "photo-silhouette_Photography", + "photo-tilt-shift_Photography", + "sai-3d-model_Uncategorized", + "sai-analog film_Retro_Photography", + "sai-anime_Uncategorized", + "sai-cinematic_Uncategorized", + "sai-comic book_Uncategorized", + "sai-craft clay_Sculpture", + "sai-digital art_Digital Media", + "sai-fantasy art_Fantasy_Surrealism", + "sai-isometric_Uncategorized", + "sai-line art_Uncategorized", + "sai-lowpoly_Uncategorized", + "sai-neonpunk_Uncategorized", + "sai-origami_Uncategorized", + "sai-photographic_Photography", + "sai-pixel art_Uncategorized", + "sai-texture_Uncategorized", + "stfhgff_Photography" + ] + ] + } + } + ], + "outputs": [ + { + "name": "positive_prompt_text_g", + "type": "STRING", + "links": [ + 153, + 155 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative_prompt_text_g", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "MileHighStyler" + }, + "widgets_values": [ + "1girl, period costume", + "", + "2D Game Art", + "No" + ] + }, + { + "id": 129, + "type": "ShowText|pysssss", + "pos": [ + 2050, + 830 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 153 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "Animation 1girl, period costume, Creating art with Animation, often for moving images, storytelling, or dynamic visuals." + ] + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 810, + 240 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 23, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 56, + 91, + 159 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 2350, + 670 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative,\nnsfw" + ] + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 1890, + 370 + ], + "size": { + "0": 380, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54, + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 126, + "type": "ShowText|pysssss", + "pos": [ + 1590, + 610 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 157 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "Animation" + ] + }, + { + "id": 87, + "type": "Note", + "pos": [ + 2360, + 840 + ], + "size": { + "0": 210, + "1": 90 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The Text Scheduler is changing the style applied in the prompt\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 122, + "type": "CR Central Schedule", + "pos": [ + 210, + -350 + ], + "size": { + "0": 340, + "1": 490 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 149 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "0, 512\n2, 640\n3, 768\n4, 896\n8, 1024\n", + "Value", + "V1", + "0, Art Nouveau\n2, Antarctica\n4, 2D Game Art\n5, Animation\n8, Airbrushing", + "Text", + "T1", + "schedule", + "Model", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 68, + "type": "Note", + "pos": [ + 810, + -90 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 60, + "type": "Note", + "pos": [ + 230, + 310 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 7, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "To run this workflow, first press Reset in the Animation Builder and then press the Queue button, Do not use queue prompt in the ComfyUI menu." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 124, + "type": "CR Simple Schedule", + "pos": [ + 540, + 710 + ], + "size": { + "0": 290, + "1": 200 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 150 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Schedule" + }, + "widgets_values": [ + "0, Art Nouveau\n2, Antarctica\n4, 2D Game Art\n5, Animation\n8, Airbrushing", + "Text", + "T1", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 86, + "type": "Note", + "pos": [ + 290, + 710 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 9, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Schedules should have a line for frame 0\n\nIf frame 0 is missing the default value will be used" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 128, + "type": "CR String To Combo", + "pos": [ + 1280, + 890 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 158 + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": [ + 154 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR String To Combo" + }, + "widgets_values": [ + "" + ] + }, + { + "id": 135, + "type": "CR Text Scheduler", + "pos": [ + 1180, + 650 + ], + "size": { + "0": 320, + "1": 170 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 156 + }, + { + "name": "current_frame", + "type": "INT", + "link": 159, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 157, + 158 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text Scheduler" + }, + "widgets_values": [ + "Schedule", + 0, + "T1", + "default text", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 125, + "type": "CR Schedule Input Switch", + "pos": [ + 910, + 650 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "schedule1", + "type": "SCHEDULE", + "link": 149 + }, + { + "name": "schedule2", + "type": "SCHEDULE", + "link": 150 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 156 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Schedule Input Switch" + }, + "widgets_values": [ + 2 + ] + }, + { + "id": 114, + "type": "Note", + "pos": [ + 910, + 790 + ], + "size": { + "0": 210, + "1": 140 + }, + "flags": {}, + "order": 10, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "You can define either local or central schedules\n\nThis workflow shows both. You can switch between the two.\n\nThis switch would not normally be needed." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 24, + "type": "Animation Builder (mtb)", + "pos": [ + 480, + 240 + ], + "size": { + "0": 210, + "1": 320 + }, + "flags": {}, + "order": 11, + "mode": 0, + "outputs": [ + { + "name": "frame", + "type": "INT", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "0-1 (scaled)", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "loop_ended", + "type": "BOOLEAN", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Animation Builder (mtb)" + }, + "widgets_values": [ + 10, + 1, + 1, + 10, + 1, + "frame: 0 / 9", + "Done 😎!", + "reset", + "queue" + ], + "color": "#223", + "bgcolor": "#335" + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 23, + 24, + 0, + 25, + 0, + "INT" + ], + [ + 53, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 54, + 47, + 1, + 11, + 0, + "CLIP" + ], + [ + 55, + 47, + 1, + 12, + 0, + "CLIP" + ], + [ + 56, + 25, + 0, + 26, + 1, + "INT" + ], + [ + 72, + 14, + 3, + 15, + 2, + "INT" + ], + [ + 90, + 78, + 0, + 77, + 0, + "STRING" + ], + [ + 91, + 25, + 0, + 78, + 0, + "INT" + ], + [ + 123, + 98, + 0, + 91, + 0, + "*" + ], + [ + 133, + 16, + 0, + 26, + 0, + "IMAGE" + ], + [ + 149, + 122, + 0, + 125, + 0, + "SCHEDULE" + ], + [ + 150, + 124, + 0, + 125, + 1, + "SCHEDULE" + ], + [ + 153, + 130, + 0, + 129, + 0, + "STRING" + ], + [ + 154, + 128, + 0, + 130, + 0, + "no style,2D Game Art,3D Animation,3D Game Art,3D Modeling,3D Printing Art,3D Printing in Art,AR Art Variant_Uncategorized,Aardman_Uncategorized,Abandoned Asylum_Uncategorized,Aboriginal Dot Painting,Abstract Expressionism,Abstract Painting,Abstract Photography,Abstract Sculpture,Absurdist Theater,Academic Art,Acrylic Painting,Action Films,Addams Family_Portraiture_Horror,Adrian Ghenie,Adventure,Adventure Films,Aerial Dance,Aerial Photography,African Beadwork,African Beadwork Art,African Cuisine,African Mask Art,African Mask Making,Agnes Martin,Ai Weiwei_1,Ai Weiwei_2,Air Art,Airbrushing,Albrecht Durer,Album Cover Art,Alchemist's Study_Uncategorized,Amazon Rainforest,American Cuisine,American Traditional_Retro_Tattoo Art,Amsterdam,Amsterdam cityscape,Analytical Cubism,Ancient Maya_Uncategorized,Andy Warhol,Anger Art,Animated Corpse_Animation,Animated Films,Animation,Anish Kapoor,Ankama_Animation,Anselm Kiefer,Antarctica,Appropriation (1)_Culture,Après-Ski_Uncategorized,Arachnid Swarm_Uncategorized,Architectural Design,Architectural Photography,Argentinian Art,Art Activism,Art Collaborations with Musicians,Art Collaborations with Writers,Art Conservation,Art Criticism,Art Curation,Art Deco,Art Deco Architecture,Art Deco Architecture_Architecture,Art Deco Design,Art Education,Art Education for Adults,Art Education for Children,Art Education for Remote Areas,Art Education for Special Needs,Art Gallery Management,Art Games,Art Historical Writing,Art History,Art History Research,Art Informatics,Art Informel (1)_Uncategorized,Art Inspired by Ancient Civilizations,Art Inspired by the Digital Age,Art Inspired by the Renaissance,Art Inspired by the Roaring Twenties,Art Inspired by the Victorian Era,Art Installations,Art Journalism,Art Marketing,Art Nouveau,Art Nouveau Architecture,Art Nouveau Design,Art Nouveau Poster_Uncategorized,Art Nouveau Variant_Uncategorized,Art Restoration,Art Sales and Auctions,Art Therapy,Art Therapy for Adults,Art Therapy for Children,Art Workshop Facilitation,Art and AI Collaboration,Art and Architecture Collaboration,Art and Cultural Heritage Preservation,Art and Environmental Sustainability,Art and Literature Collaboration,Art and Medical Collaboration,Art and Mental Health,Art and Music Collaboration,Art and Science Collaboration,Art and Social Justice Projects,Art and Technology Collaboration,Art and Urban Development,Art for Agricultural Industry,Art for Agricultural Sector,Art for Airports,Art for Animal Welfare Organizations,Art for Anniversaries,Art for Aquariums,Art for Architectural Visualization,Art for Asian Cultures,Art for Augmented Reality Experiences,Art for Automotive Design,Art for Automotive Industry,Art for Aviation Industry,Art for Baby Showers,Art for Birthdays,Art for Botanical Gardens,Art for Cafes and Restaurants,Art for Charity Fundraisers,Art for Children,Art for Children's Hospitals,Art for Climate Change Initiatives,Art for Construction Industry,Art for Corporate Spaces,Art for Cruise Ships,Art for Culinary Presentation,Art for E-Commerce Platforms,Art for Educational Institutions,Art for Educational Technology,Art for Elderly,Art for Emergency Services,Art for Energy Industry,Art for Entertainment Industry,Art for Environmental Activism,Art for Environmental Campaigns,Art for Factories and Workshops,Art for Fashion Industry,Art for Festivals and Events,Art for Financial Institutions,Art for Financial Sector,Art for Fitness Centers,Art for Funerals,Art for Gender Equality,Art for Government Entities,Art for Graduations,Art for Health Care Facilities,Art for Home Decor,Art for Hospitality Industry,Art for Hotels,Art for Human Anatomy Studies,Art for Human Rights Campaigns,Art for Indigenous Cultures,Art for LGBTQ+ Celebrations,Art for Libraries,Art for Marine Industry,Art for Maritime Industry,Art for Medical Illustrations,Art for Military and Defense Sector,Art for Military and Veterans,Art for Mobile Apps,Art for Museums,Art for Music Videos,Art for National Holidays,Art for Nautical Navigation,Art for Non-Profit Organizations,Art for Office Spaces,Art for Outdoor Advertising,Art for Packaging Design,Art for Pet Products,Art for Pharmaceutical Industry,Art for Political Campaigns,Art for Prisons,Art for Public Transportation,Art for Real Estate Marketing,Art for Religious Celebrations,Art for Religious Institutions,Art for Renewable Energy Sector,Art for Retail Spaces,Art for Retirement Parties,Art for Robotics,Art for Schools and Colleges,Art for Science Centers,Art for Scientific Exploration,Art for Security and Defense,Art for Seniors,Art for Shopping Malls,Art for Smart City Projects,Art for Social Media Platforms,Art for Social Networking Sites,Art for Spa and Wellness Centers,Art for Space Exploration,Art for Space Industry,Art for Spaceships and Aerospace,Art for Sports Industry,Art for Sports Venues,Art for Technical Manuals,Art for Teenagers,Art for Teens,Art for Television Shows,Art for Theme Parks,Art for Toddlers,Art for Train Stations,Art for Underwater Exploration,Art for Video Game Development,Art for Virtual Assistants and AI,Art for Virtual Events,Art for Virtual Reality Experiences,Art for Wearable Technology,Art for Wearables,Art for Web Platforms,Art for Weddings,Art for Zoos,Art in Public Transportation,Art with Light and Projection,Art with Metalwork,Art with Organic Materials,Art with Recycled Materials,Artist's Books,Artware Variant_Sci-Fi_Graffiti_Digital Media,Aspen,Assemblage Art,Astrophotography,Athens,Athleisure Fashion,Atlantis,Augmented Reality (AR) Art,Augmented Reality Art,Australian Aboriginal Art,Autobiography,Automotive Design,Autumn Art,Avant-Garde Fashion,Aztec Calendar_Uncategorized,Back Alley Rogue_Uncategorized,Ballet Dance,Ballet_Uncategorized,Ballroom Dance,Bangkok,Banksy_1,Banksy_2,Barbara Kruger,Barcelona,Baroque,Baroque Architecture,Baroque Art,Baroque Music,Bas-Relief Sculpture_Sculpture,Basket Weaving,Basket Weaving Art,Battle_Uncategorized,Bauhaus,Bauhaus (1)_Architecture,Bauhaus Architecture,Bauhaus Design,Bauhaus Design_Uncategorized,Beachwear Fashion,Beijing,Belly Dance,Berlin,Bharatanatyam Dance,Bikini Bottom,Bio Art,Bio Art_Nature,Biographical Films,Biographical Literature,Biography,Biomorphic Architecture,Black Hole,Black Velvet Painting_Portraiture,Black and White Photography,Blacklight Poster_Uncategorized,Blockbuster Films,Bloodthirsty Vampire_Horror,Bluegrass Music,Blueprint_Uncategorized,Blues Music,Blues Music Illustration,Body Art,Body Art Performance,Body Painting,Bohemian Fashion,Bomber Jacket_Retro,Bookbinding,Botanical Illustration,Boudoir Photography_Photography,Brazil,Brazilian Art,Brazilian Cuisine,Brazilian Graffiti Art,Breakdance,Bridal Fashion,Brightwater Variant_Nature,British Art,Bronze Sculpture,Bruce Nauman,Bruges,Brutalism,Brutalist Architecture,Budapest cityscape,Cabinet of Curiosities_Occult,Cai Guo-Qiang,Cake Decorating,Canada,Candid Portrait Photography,Caravaggio,Caribbean Carnival Art,Caribbean Cuisine,Caricature,Carnival Freakshow_Retro,Caspar David Friedrich,Cassette Bedroom_Retro,Cassette Collage_Sci-Fi_Surrealism_Retro,Cassette Futurism_Retro,Cassette Graphics_Retro_Surrealism,Cassette J-Card_Retro,Cassette Wall_Retro,Casual Fashion,Caveopolis Variant_Lifestyle,Cecily Brown,Celtic Knotwork Art,Celtic Mythology Art,Cemetery Statue_Uncategorized,Central African Art,Central American_Uncategorized,Ceramic Art,Ceramic Design,Ceramic Sculpture,Ceramics,Chalk Art,Charcoal Drawing,Charles Ray,Chicago,Children's Fashion,Children's Theater,Chilean Art,Chinese Architecture,Chinese Art,Chinese Calligraphy,Chinese Cuisine,Chinese Ink Painting,Chinese Jade Carving,Chinese Landscape Painting,Chinese Mythology Art,Chinese Paper Cutting,Chinese Scroll Painting,Chris Ofili,Cindy Sherman_1,Cindy Sherman_2,Cinematography,Cinque Terre,Circuit Bending_Uncategorized,Circus Arts,Circus Performer_Retro,Classic Western,Classical Architecture,Classical Art,Classical Music,Classical Music Illustration,Classical Realism,Classical Realism_Portraiture,Classical Theater,Claude Monet,Clockwork City Variant_Architecture_Location,Collaborative Art Projects,Collage,Collage Art,Colombian Art,Colonial Architecture,Colosseum,Combine Painting_Sci-Fi_Still Life,Comedy,Comedy Literature,Commercial Photography,Community Mural Projects,Computer art,Concept Art for Movies,Concept Art for Video Games,Conceptual Art,Concert Poster Design,Conjoined Twins_Uncategorized,Constructivism,Constructivism Art,Contemporary Ballet,Contemporary Dance,Copenhagen,Copenhagen cityscape,Corporate Identity Design,Cosplay Design,Cottagecore Fashion_Fashion,Country Music,Country Music Graphics,Crawler Mimicry_Uncategorized,Creepy Children_Portraiture,Creepy Porcelain Doll_Fashion_Portraiture,Crime Films,Critical Realism_Uncategorized,Cross-Disciplinary Art,Crucifixion_Uncategorized,Crystal Caverns Variant_Architecture,Cuban Art,Cuban Cuisine,Cubism,Cubism Art,Cult Films,Cyberpunk,Cyberpunk Fantasy Art,Dadaism,Dadaism Art,Damien Hirst_1,Damien Hirst_2,Dan Flavin,Dance Choreography,Dance Performance Art,Dark Carnival_Gothic,Dark Fantasy Art,Data Art Variant_Uncategorized,Data Art_Uncategorized,Data Visualization Art,Day of the Dead_Uncategorized,De Stijl_Uncategorized,Death Masque_Uncategorized,Deconstructivist Architecture,Demonic Clown_Uncategorized,Demonic Portal_Horror,Demonic Possession_Uncategorized,Demoscene_Animation,Desaturated_Uncategorized,Die Brücke_Graffiti,Diego Velazquez,Dieselpunk_Retro,Digital Animation,Digital Art,Digital Art_Digital Media,Digital Drawing Tablets,Digital Illustration,Digital Painting,Digital Sculpture,Digital Storytelling,Diorama_Uncategorized,Disco Music,Disney Animation_Animation,Disrespectful Grave Robber_Uncategorized,Documentary Films,Documentary Photography,Drama,Drama Films,Dubai,Dublin,Dublin cityscape,Dunder Mifflin,Dutch Art,Dwarvendom Variant_Uncategorized,Dwarvenholm Variant_Uncategorized,Earth Art,East African Art,Eco Art,Eco-Art,Ed Ruscha,Edgar Degas,Edinburgh cityscape,Editorial Design,Edvard Munch,Edward Hopper,Egyptian Hieroglyphs_Uncategorized,Egyptian Mythology Art,Egyptian Wall Art,Egyptology_Uncategorized,El Anatsui,Electronic Music,Electronic Music Visuals,Elegant_Erotic_Photography,Elfheim Variant_Architecture_Fantasy,Elven City Variant_Architecture_Location,Embroidery,Emerging_Artist,Engraving,Environmental Art,Environmental Design,Ephemeral Art,Etching,Eugene Delacroix,Exhibition Design,Exoplanet,Exorcism_Uncategorized,Experimental Art,Experimental Films,Experimental Music Video,Experimental Photography,Experimental Theater,Expressionism,Expressionist Architecture,Expressionist painting,Fairy Tale Art,Fantasy,Fantasy Films,Fantasy Literature,Farce,Fashion Design,Fashion Illustration,Fashion Photography,Fast Fashion,Fauvism,Fauvism Art,Ferocious Werewolf_Uncategorized,Festival Fashion,Fiction,Figurative Expressionism_Uncategorized,Figurine Shelf_Fantasy_Sculpture,Filipino Art,Film Direction,Film Editing,Film Noir,Fine Art Photography,Fine_Art_Black_and_White_Photography,Fire Art,Flamenco Dance,Folk Art Variant_Folk Art,Folk Art_Folk Art,Folk Dance,Folk Music,Folk Music Art,Food Art,Food Photography,Formal Fashion,Fortune Teller_Occult,Fortune Telling_Occult,France,Francisco Goya,Frankfurt cityscape,French Art,French Cuisine,French Impressionism,Fresco Painting Technique,Frida Kahlo,Funk Music,Furniture Design,Futurism,Futurist Architecture,GAYZ_Portraiture,Gabriel Orozco,Galactic_Sci-Fi,Game Design,Generative Art,Genetic Art_Uncategorized,Geometric Abstraction,Geometric abstract painting,Georg Baselitz,Georgia O'Keeffe,Gerhard Richter_1,Gerhard Richter_2,German Art,Ghibli_Surrealism,Ghoul City Variant_Architecture_Location,Giant Robot_Sci-Fi_Retro_Architecture,Glamorous Portrait_Fashion_Portraiture,Glamorous_Erotic_Photography,Glasgow cityscape,Glass Sculpture,Glassblowing,Glazing Technique in Painting,Glenn Ligon,Glitch Art_Uncategorized,Glitchcore_Digital Media,Gongfu Tea_Uncategorized,Gospel Music,Goth Boudoir_Gothic,Gotham City,Gothic Architecture,Gothic Architecture_Architecture_Gothic,Gothic Fashion,Gothic Literature,Gothic Monster_Architecture_Gothic,Gothic Revival Architecture,Gothic Revival Architecture_Architecture_Gothic,Graffiti Art,Graffiti Style_Graffiti,Grand Canyon,Grant Wood,Graphic Design,Graveyard Mist_Horror,Great Barrier Reef,Great Wall of China,Greek Art,Greek Classical Sculpture,Greek Mythology Art,Greek Pottery Art,Greendale,Gritty_Voyeuristic_Photography,Grotesque Gargoyle_Uncategorized,Grunge Flyer_Uncategorized,Gustav Klimt,Gutai_Sci-Fi_Event,H.P. Lovecraft Cover_Horror,Hackersville Variant_Architecture,Hallstatt,Hard-edge Painting_Uncategorized,Hate Crime_Uncategorized,Haunted Carnival_Horror,Haunted Portrait_Portraiture_Horror,Haute Couture,Haute Couture Fashion,Haute Cuisine,Hawkins,Headless Horseman_Uncategorized,Heavy Metal,Henri Matisse,Hieronymus Bosch,High Fantasy,High Fantasy Art,Hip-Hop Album Art,Hip-Hop Dance,Hip-Hop Fashion,Hip-Hop Music,Historical Fiction,Hogwarts,Hong Kong,Hong Kong cityscape,Horror,Horror Films,Horror Movie Poster_Horror_Gothic,Hyperrealism_Uncategorized,Ice Sculpture,Illustration Design,Illustration for Children's Books,Impressionism,Impressionism Art,Impressionist Landscape Painting,Impressionist Portrait Painting,Improvisational Theater,Inca Mythology Art,Indian Art,Indian Cuisine,Indian Miniature Painting,Indian Mythology Art,Indie Films,Indie Music Art,Indigenous Australian Art,Indigenous Painting,Indigenous Pottery,Indonesian Art,Industrial Architecture,Industrial Design,Information Art_Uncategorized,Ink Drawing,Insectoid Mutant_Portraiture,Installation Art,Interaction Design,Interactive Art,Interactive Art Installations,Interactive artwork,Interior Design,Internet Art_Sci-Fi_Digital Media,Intimate_Naturist_Photography,Intuitive Art_Uncategorized,Irish Art,Irish Dance,Islamic Architecture,Islamic Art,Islamic Calligraphy,Islamic Geometric Art,Islamic Geometric Patterns,Island Luau_Uncategorized_Location,Istanbul,Istanbul cityscape,Italian Art,Italian Cuisine,Italian Renaissance Art,J.M.W. Turner,Jackson Pollock,Jakarta cityscape,Japan,Japanese Architecture,Japanese Art,Japanese Cuisine,Japanese Mythology Art,Jazz Dance,Jazz Music,Jazz Poster Art,Jean-Honore Fragonard,Jeff Koons,Jenny Holzer,Jerusalem,Jewelry Design,Johannes Vermeer,John Baldessari,Joyful Art,Julie Mehretu,Kabuki Theater,Kara Walker,Kathak Dance,Katsushika Hokusai,Kawaii Character_Uncategorized,Kawaii Fashion_Fashion,Kehinde Wiley,Kerry James Marshall,Kiki Smith,Kinetic Art,Kinetic Sculpture,Kintsugi (Japanese Gold Repair),Kitsch Movement_Uncategorized,Knitting,Korean Art,Korean Celadon Ceramics,Korean Celadon Pottery,Korean Cuisine,Kuala Lumpur,Kyoto,Kyoto cityscape,LAIKA_Animation,Land Art,Land Art (1)_Fantasy_Nature_Sculpture_Landscape,Landscape Architecture,Landscape Design,Landscape Photography,Laser Grid_Uncategorized,Later European abstraction (1)_Uncategorized,Leonardo da Vinci,Lettrist artwork,Leviathan Variant_Architecture,Light Art,Line Dance,Lisbon cityscape,Lithography,Living Burial_Uncategorized,London,Los Angeles,Lost Vegas Variant_Architecture,Lounge Singer_Retro,Lovecraftian Horror_Horror,Low Fantasy,Lowbrow Art Variant_Surrealism_Culture,Lowbrow Art_Culture,Luau Fire Dancer_Fashion,Luchador_Uncategorized,Luxury Fashion,Lynching_Uncategorized,Lyrical abstract painting,Macabre Memento Mori_Horror_Horror & Dark_Still Life,Machinima Variant_Uncategorized,Machu Picchu,Macro Photography,Mad Scientist Machinery_Uncategorized,Madhubani Painting,Madhubani Painting (Indian Folk Art),Mage City Variant_Architecture_Fantasy_Location,Magic Realist painting,Mall Goth_Portraiture_Gothic,Mannerism,Mannerist Architecture,Maori Wood Carving,Mardi Gras_Uncategorized,Marina Abramović,Mark Bradford,Mark Grotjahn,Martin Puryear,Masked Killer_Uncategorized,Masked Stalker_Uncategorized,Maurizio Cattelan,Maximalism,Mecca,Mech City Variant_Sci-Fi_Architecture_Location,Mech City_Sci-Fi_Architecture_Location,Mech City__Location,Media Art,Medical Oddities_Uncategorized,Mediterranean Cuisine,Melancholy Art,Melodrama,Melting Skull_Uncategorized,Memento Mori_Horror_Horror & Dark,Memoir,Menacing Scarecrow_Uncategorized,Menswear Fashion,Mesoamerican Mythology Art,Mesopotamian Mythology Art,Metabolist Architecture,Metal Music,Metal Music Artwork,Metalwork,Metropolis,Mexican Art,Mexican Cuisine,Mexican Muralism,Mexican Skull Art_Uncategorized,Miami,Michelangelo,Middle Eastern Cuisine,Middle-earth,Midgard Variant_Architecture,Milky Way Galaxy,Mime,Mime City Variant_Architecture_Location,Minimalism,Minimalist Web Design,Mixed Media Art,Mixer_Animation,Modern Architecture,Modern Dance,Modernist Architecture,Mona Hatoum,Monoprinting Technique,Mosaic,Mosaic Art,Motion Design,Motion Graphics Design,Mount Everest,Mount Olympus,Movie Storyboard_Uncategorized,Mughal Miniature Painting,Mumbai,Mummy Portrait_Portraiture,Munich cityscape,Music Video Direction,Musica Variant_Architecture_Culture,Musical Films,Musical Theater,Mutated Beast_Uncategorized,My Little Pony_Uncategorized,Mystery,Mystery Literature,Mythic Fantasy Art,Nantucket,Native American Art,Native American Basketry,Native American Mythology Art,Native American Pottery,Naturalism in Literature,Nature Landscape Photography,Nature Photography,Nautical_Retro,Naïve Art (1)_Uncategorized,Nebula,Neo Pop_Pop Culture_Culture,Neo Rauch,Neo-Dada_Uncategorized,Neo-Expressionism_Uncategorized,Neo-Gothic Architecture,Neo-Noir,Neo-Pop (1)_Pop Culture_Culture,Neo-primitivism (1)_Still Life,Neoclassical Architecture,Neoclassicism,Neon Lighting_Uncategorized,Neon Racer_Sci-Fi,Neon Tokyo_Retro,Neoplasticism,Neotokyo Variant_Sci-Fi_Architecture,Neue Sachlichkeit Variant_Portraiture,Neue Wilde (1)_Uncategorized,New Caelum Variant_Architecture,New Caelum_Architecture,New Media Art_Digital Media,New Orleans,New Perpendicular art_Uncategorized,New Simplicity_Architecture,New York City,New York cityscape,Niagara Falls,Nicole Eisenman,Night Photography,Nightmare Beast_Uncategorized,Non-Fiction,Nordic Viking Art,Norse Mythology Art,North African Art,Norwegian romantic nationalism_Nature_Landscape,Nouveau Circus_Uncategorized,Nova Alexandria Variant_Architecture_Culture,Occult Ritual_Occult,Occult Sacrifice_Occult,Oil Painting,Olafur Eliasson,Ominous Fog_Uncategorized,Ominous Warning_Uncategorized,Op Art,Op Art_Uncategorized,Opera,Opera Music,Opera Music Illustration,Osaka cityscape,Outsider Art_Uncategorized,Pablo Picasso,Package Design,Pandora,Paper Cutting,Paper Mache Art,Parametric Architecture,Paris,Participatory Art,Patchwork Creature_Uncategorized,Paul Cezanne,Performance Art,Performance Sculpture,Peruvian Art,Petra,Photography,Photojournalism,Photorealism,Photorealistic painting,Physical Theater,Pinup_Retro,Pixel Art,Pizza Making,Plague Mass Grave_Uncategorized,Plein Air Painting,Plotter Art Variant_Uncategorized,Plotter Art_Uncategorized,Plus-Size Fashion,Poetry,Pointillism,Pointillism Art,Pole Dance,Polynesian Mythology Art,Polynesian Tattoo Art,Pop Art,Pop Music,Pop Music Branding,Pop Surrealism_Nature_Surrealism_Landscape_Still Life,Pop art style,Porcelain Art,Portrait Photography,Portuguese Art,Post-Impressionism,Postmodern Architecture,Pottery,Prague,Prague cityscape,Prairie Dress_Retro_Fashion,Pre-Raphaelite_Uncategorized,Preppy Fashion,Printmaking,Prismatic_Uncategorized,Projection Mapping Art,Propaganda Art_Retro,Propaganda Poster_Uncategorized,Prose Literature,Provocative_Surreal_Photography,Pseudorealism_Uncategorized,Psychedelic Concert Posters,Psychedelic Pop Art_Surrealism,Public Art Installations,Public Installations,Public Sculptures,Punk Fashion,Punk Music,Punk Poster_Uncategorized,Puppetry,Pyramids of Giza,Quahog,Quilting,Quilting Art,Quito cityscape,R&B Music,Rachel Whiteread,Radical Realism (1)_Still Life,Rangoli (Indian Floor Art),Rap Music Graphics,Raphael,Rashid Johnson,Rat Infestation_Uncategorized,Rat King_Uncategorized,Realism Art,Realism in Literature,Realistic Fiction,Reanimated Corpse_Animation,Recycled Art,Reggae Music,Reggae Music Design,Rembrandt,Remodernism Variant_Uncategorized,Remodernism_Architecture,Renaissance,Renaissance Architecture,Renaissance Art,Rene Magritte,Responsive Web Design,Richard Serra,Richard Tuttle,Rio de Janeiro,Rio de Janeiro cityscape,Robert Gober,Robotics Art,Rock Album Art,Rock Music,Rococo,Rococo Architecture,Rococo Art,Rococo Interior_Uncategorized,Roman Mosaic Art,Roman Mythology Art,Romance,Romance Literature,Romanesque Architecture,Romantic Comedy,Romantic Films,Romanticism,Romanticism Art,Romanticism in Literature,Rome,Rural Photography,Russia,Russian Art,Russian Icon Painting,Sahara Desert,Salem,Salsa Dance,Salsa Music,Salvador Dali,Samurai_Uncategorized,Sanctuary Variant_Uncategorized,Sand Sculpture,Sandro Botticelli,Sarah Sze,Satanic_Horror_Occult,Satire,Satire Literature,Scandinavian Architecture,Scandinavian Art,Scandinavian Design,Scarecrow_Horror,Scary Pumpkin_Uncategorized,Scary Stories at Campfire_Horror_Horror & Dark,Scary Stories_Horror,Sci-Fi Films,Science Fiction,Scientific Illustration_Retro,Screen Printing,Screwball Comedy,Sculpture,Self-taught Art (1)_Fantasy,Seoul,Serial Killer_Horror,Set Design for Theater,Shadow City Variant_Architecture_Occult_Gothic_Location,Shadow City_Architecture_Occult_Gothic_Location,Shadow City_Horror_Occult_Horror & Dark_Gothic_Location,Shanghai,Shangri-La Variant_Uncategorized,Shepard Fairey,Shirakawa-go,Shirin Neshat,Sideshow Poster_Retro,Silent Films,Singapore,Sinister Crone_Uncategorized,Sinister Laboratory_Horror_Occult_Still Life,Sinister Ritual_Uncategorized,Situationist International Variant_Uncategorized,Situationist International_Uncategorized,Skateboarding Fashion,Skeleton Dance_Animation,Skeleton Dance_Horror_Horror & Dark_Animation,Slavic Mythology Art,Slow Fashion,Smothering Earth_Fantasy,Social Realism painting,Sonnet,Soul Music,Sound Art,Sound Design,Sound Sculpture,South African Art,South American Textile Art,Southern Gothic_Gothic,Southwest Kachina Dolls,Spaghetti Western,Spanish Art,Spanish Cuisine,Spider Queen_Uncategorized,Sports Card_Photography_Portraiture,Sports Photography,Spring Art,Springfield,St Ives School Variant_Nature_Landscape,St Ives School_Nature_Landscape,Stained Glass Art,Stained Glass_Uncategorized,Stand-Up Comedy,Stars Hollow,Steampunk,Steampunk City Variant_Architecture_Location,Steampunk Fantasy Art,Steampunk Fashion,Steampunk Portrait_Fantasy_Portraiture,Steampunk_Fantasy_Fashion,Steamtown Variant_Architecture_Retro,Steeltown Variant_Architecture,Stockholm cityscape,Stone Sculpture,Stop Motion_Animation,Streamer Bike_Retro,Street Art,Street Art Performance,Street Art and Graffiti,Street Photography,Street Theater,Streetwear,Streetwear Fashion,Stuckism Variant_Uncategorized,Stuckism_Uncategorized,Studio Ghibli_Fantasy_Surrealism,Studio Portrait Photography,Sub Anaheim Variant_Fantasy_Location,Sub Annapolis Variant_Sculpture_Location,Sub Atlanta Variant_Uncategorized_Location,Sub Baton Rouge Variant_Culture_Location,Sub Baton Rouge_Culture_Location,Sub Baton Rouge__Location,Sub Berkeley Variant_Retro_Location,Sub Boise Variant_Uncategorized_Location,Sub Boise_Uncategorized_Location,Sub Boise__Location,Sub Bozeman Variant_Architecture_Location,Sub Carlsbad Variant_Architecture_Culture_Location,Sub Carson City Variant_Architecture_Location,Sub Casper Variant_Uncategorized_Location,Sub Cheyenne Variant_Uncategorized_Location,Sub Columbia Variant_Architecture_Culture_Location,Sub Concord Variant_Uncategorized_Location,Sub Costa Mesa Variant_Culture_Location,Sub Denver Variant_Uncategorized_Location,Sub Des Moines Variant_Architecture_Location,Sub Dover Variant_Uncategorized_Location,Sub Downey Variant_Sci-Fi_Location,Sub El Monte Variant_Sci-Fi_Location,Sub Fontana Variant_Culture_Location,Sub Frankfort Variant_Uncategorized_Location,Sub Fresno Variant_Architecture_Nature_Landscape_Location,Sub Garden Grove Variant_Architecture_Location,Sub Glendale Variant_Uncategorized_Location,Sub Indianapolis Variant_Uncategorized_Location,Sub Inglewood Variant_Sci-Fi_Pop Culture_Culture_Location,Sub Irvine Variant_Uncategorized_Location,Sub Jackson Variant_Folk Art_Location,Sub Jefferson City Variant_Architecture_Folk Art_Location,Sub Juneau Variant_Architecture_Location,Sub Lancaster Variant_Sci-Fi_Retro_Location,Sub Montgomery Variant_Uncategorized_Location,Sub Montpelier Variant_Sculpture_Location,Sub Moreno Valley Variant_Uncategorized_Location,Sub Oakland Variant_Sci-Fi_Culture_Location,Sub Ontario Variant_Uncategorized_Location,Sub Orange Variant_Retro_Location,Sub Oxnard Variant_Uncategorized_Location,Sub Oxnard_Uncategorized_Location,Sub Oxnard__Location,Sub Palmdale Variant_Sci-Fi_Location,Sub Pasadena Variant_Uncategorized_Location,Sub Pierre Variant_Uncategorized_Location,Sub Pomona Variant_Retro_Location,Sub Providence Variant_Uncategorized_Location,Sub Rancho Cucamonga Variant_Architecture_Lifestyle_Location,Sub Richmond Variant_Architecture_Location,Sub Roseville Variant_Architecture_Location,Sub Salem Variant_Sci-Fi_Culture_Location,Sub Santa Ana Variant_Sci-Fi_Culture_Location,Sub Santa Clarita Variant_Uncategorized_Location,Sub Santa Rosa Variant_Sci-Fi_Nature_Location,Sub Santa Rosa_Sci-Fi_Nature_Location,Sub Santa Rosa__Location,Sub Simi Valley Variant_Pop Culture_Culture_Retro_Location,Sub Spokane Variant_Architecture_Location,Sub Tacoma Variant_Architecture_Culture_Retro_Location,Sub Temecula Variant_Lifestyle_Location,Sub Thousand Oaks Variant_Uncategorized_Location,Sub Topeka Variant_Architecture_Folk Art_Location,Sub Torrance Variant_Sci-Fi_Location,Sub Victorville Variant_Uncategorized_Location,Sumi-e Painting,Summer Art,Summer Fashion,Surf Wood Sign_Retro,Surrealism,Surrealism Art,Surrealist Painting,Surrealist Sculpture,Sushi Making,Sustainable Architecture,Sustainable Art Variant_Uncategorized,Sustainable Art_Uncategorized,Sustainable Fashion,Swing Dance,Sydney,Symbolism Art,Synthetic Cubism,Taj Mahal,Takashi Murakami,Talavera Pottery,Tamara de Lempicka,Tango Dance,Tap Dance,Tarot Cards_Occult,Tarot_Occult,Tatooine,Tattoo Print_Retro_Tattoo Art,Tech City Variant_Architecture_Nature_Location,Techno Music Visuals,Technotopia Variant_Architecture_Nature,Temporary Art Installations,Terrarium Bottle_Still Life,Terrarium_Uncategorized,Teslapunk_Portraiture,Textile Art,Textile Design,Textile Sculpture,Thai Art,Thai Cuisine,Thomas Gainsborough,Thriller,Thriller Films,Thriller Literature,Tibetan Thangka Painting,Tiki Bar_Uncategorized,Tiki Cocktail_Uncategorized,Tiki Idol_Uncategorized,Tiki Mug_Retro,Tiki Outdoor Shower_Uncategorized,Tiki Totem_Sculpture,Titian,Toei_Retro_Animation,Tokyo,Tokyo cityscape,Torture Chamber_Uncategorized,Torture Device_Horror_Horror & Dark,Tortured Prisoner_Uncategorized,Tortured Soul_Uncategorized,Toy Design,Traditional Animation,Traditional Dance,Traditional Japanese Architecture,Traditional Pottery,Tragedy,Tragedy Literature,Tranquil Art,Transavantgarde Variant_Uncategorized,Transavantgarde_Uncategorized,Transgressive Art Variant_Uncategorized,Transgressive Art_Uncategorized,Travel Photography,Tropical Bathroom_Uncategorized,Tropical Cocktail_Uncategorized,Tropical Hotel_Uncategorized,Tropical Luau_Uncategorized,Twin Peaks,Typography Design,UPA_Comics_Animation,Ukiyo-e (Japanese Woodblock Printing),Ukiyo-e Art,Undead Gluttony_Architecture,Undead Portrait_Portraiture,Undefined_Emerging_Artist,Under Albany Variant_Architecture_Surrealism_Location,Under Bakersfield Variant_Uncategorized_Location,Under Berlin Variant_Retro_Surrealism_Location,Under Berlin_Retro_Surrealism_Location,Under Berlin__Location,Under Bismarck Variant_Uncategorized_Location,Under Charleston Variant_Architecture_Location,Under Chicago Variant_Architecture_Portraiture_Culture_Retro_Location,Under Eugene Variant_Folk Art_Location,Under Fargo Variant_Architecture_Location,Under Hartford Variant_Architecture_Location,Under Honolulu Variant_Architecture_Location,Under Istanbul Variant_Architecture_Location,Under Jackson Variant_Folk Art_Location,Under Juneau Variant_Architecture_Location,Under London Variant_Architecture_Location,Under Montreal Variant_Architecture_Location,Under Nashville Variant_Uncategorized_Location,Under Oklahoma City Variant_Architecture_Location,Under Omaha Variant_Culture_Location,Under Paris Variant_Uncategorized_Location,Under Sacramento Variant_Uncategorized_Location,Under Santa Fe Variant_Uncategorized_Location,Under St. Paul Variant_Architecture_Location,Under Tallahassee Variant_Sci-Fi_Retro_Architecture_Location,Under Trenton Variant_Uncategorized_Location,Underground Anchorage Variant_Architecture_Location,Underground Austin Variant_Uncategorized_Location,Underground Chula Vista Variant_Uncategorized_Location,Underground Columbus Variant_Retro_Location,Underground Concord Variant_Culture_Location,Underground Helena Variant_Architecture_Location,Underground Huntington Beach Variant_Architecture_Culture_Location,Underground Lansing Variant_Culture_Location,Underground Lincoln Variant_Uncategorized_Location,Underground Little Rock Variant_Uncategorized_Location,Underground Portland Variant_Sci-Fi_Location,Underground Riverside Variant_Culture_Location,Underground Rome Variant_Architecture_Location,Underground Salt Lake City Variant_Architecture_Location,Underground San Jose Variant_Uncategorized_Location,Underground Seattle Variant_Uncategorized_Location,Underground Springfield Variant_Folk Art_Location,Underground Wichita Variant_Folk Art_Location,Underwater Photography,Urban Fantasy Art,Urban Landscape Photography,Urban Photography,Urban Sculpture,User-Centered Design,Utrecht cityscape,VR Art Variant_Uncategorized,Vacuous Grimace_Uncategorized,Valhalla,Valve,Vampire_Portraiture_Horror,Vaporgram_Retro,Vaporwave City_Sci-Fi_Dystopia_Architecture_Location,Vaporwave Graphics_Retro_Surrealism_Graphic Design,Vaporwave Retro_Sci-Fi_Retro,Vaporwave Sunset_Uncategorized,Vaporwave_Architecture_Retro,Vatican City,Vector Portrait_Portraiture,Venezuelan Art,Venice,Verbatim Theater,Victorian Architecture,Victorian Fashion,Victorian Laboratory_Occult_Still Life,Video Art,Video Art_Uncategorized,Video Games Variant_Games,Video Games_Games_Culture,Video Mapping,Vienna,Vienna cityscape,Vietnamese Art,Vietnamese Cuisine,Vija Celmins,Vincent Van Gogh,Vintage Baseball_Retro_Photography,Vintage Fashion,Vintage Halloween Costume_Retro,Vintage Halloween Mask_Retro,Vintage Halloween_Retro,Vintage Robot Toy_Sci-Fi_Retro,Vintage Tattoo Flash_Retro_Tattoo Art,Vintage Tattoo Print_Retro_Tattoo Art,Vintage Travel Poster_Retro_Nature_Landscape,Virtual Art Variant_Uncategorized,Virtual Art_Sci-Fi,Virtual Reality (VR) Art,Virtual Reality Art,Visionary Art (1)_Uncategorized,Visual Effects (VFX) Design,Vogue Cover_Photography_Fashion,Volcano Lair_Uncategorized,Voodoo Altar_Occult,Voodoo Ceremony_Occult,Voodoo Doll_Retro_Occult,Voodoo Queen_Portraiture_Occult,Voodoo Shop_Occult,Voodoo_Occult,Vorticism_Uncategorized,Wallace and Gromit,Waltz Dance,War Films,Wassily Kandinsky,Water Art,Watercolor Painting,Weaving,Web Design,Wedding Fashion,Wedding Photography,Wellington cityscape,West African Art,Westeros,Wildlife Photography,William Kentridge,Winter Art,Winter Fashion,Wolfgang Tillmans,Womenswear Fashion,Wonderland,Wood Carving,Woodblock Art_Nature,Woodblock Print_Uncategorized,Woodblock Printing,Woodcut,Workwear Fashion,World Music,Xiamen cityscape,Xilam_Comics_Animation,Yayoi Kusama,Yellowstone National Park,Yokohama cityscape,Zion Variant_Culture,Zurich cityscape,_Uncategorized,ads-advertising_Uncategorized,ads-automotive_Uncategorized,ads-corporate_Uncategorized,ads-fashion editorial_Fashion,ads-food photography_Photography,ads-luxury_Uncategorized,ads-real estate_Photography,ads-retail_Uncategorized,artstyle-abstract expressionism_Uncategorized,artstyle-abstract_Uncategorized,artstyle-art deco_Uncategorized,artstyle-art nouveau_Nature,artstyle-constructivist_Uncategorized,artstyle-cubist_Uncategorized,artstyle-expressionist_Uncategorized,artstyle-graffiti_Architecture_Graffiti,artstyle-hyperrealism_Photography,artstyle-impressionist_Uncategorized,artstyle-pointillism_Uncategorized,artstyle-pop art_Culture,artstyle-psychedelic_Surrealism,artstyle-renaissance_Uncategorized,artstyle-steampunk_Uncategorized,artstyle-surrealist_Surrealism,artstyle-typography_Uncategorized,artstyle-watercolor_Uncategorized,carpint_Gothic,citz_Sci-Fi_Architecture,coolio_Portraiture,enhance_Uncategorized,futuristic-biomechanical cyberpunk_Sci-Fi_Dystopia,futuristic-biomechanical_Sci-Fi,futuristic-cybernetic robot_Sci-Fi,futuristic-cybernetic_Sci-Fi,futuristic-cyberpunk cityscape_Sci-Fi_Architecture,futuristic-futuristic_Sci-Fi,futuristic-retro cyberpunk_Sci-Fi_Retro,futuristic-retro futurism_Sci-Fi_Retro,futuristic-sci-fi_Sci-Fi,futuristic-vaporwave_Sci-Fi_Retro,game-bubble bobble_Fantasy,game-cyberpunk game_Sci-Fi_Dystopia_Games_Digital Media,game-fighting game_Games,game-gta_Uncategorized,game-mario_Fantasy_Comics,game-minecraft_Still Life,game-pokemon_Fantasy,game-retro arcade_Retro_Games,game-retro game_Retro,game-rpg fantasy game_Fantasy_Games,game-strategy game_Games,game-streetfighter_Uncategorized,game-zelda_Fantasy,getting there_Portraiture,girlz_Fashion_Horror_Horror & Dark_Gothic,gotit jinx_Tattoo Art,greatz_Portraiture,gsssggg_Portraiture,hoop_Portraiture,jinx_Tattoo Art,jinxed_Portraiture,kjkjkjj_Digital Media_Still Life_Comics,kool_Portraiture,misc-architectural_Uncategorized,misc-disco_Retro,misc-dreamscape_Fantasy_Surrealism,misc-dystopian_Dystopia,misc-fairy tale_Fantasy,misc-gothic_Gothic,misc-grunge_Retro,misc-horror_Horror,misc-horror_Horror_Horror & Dark,misc-kawaii_Uncategorized,misc-lovecraftian_Surrealism_Horror,misc-macabre_Gothic,misc-manga_Uncategorized,misc-metropolis_Sci-Fi_Architecture,misc-minimalist_Uncategorized,misc-monochrome_Uncategorized,misc-nautical_Uncategorized,misc-space_Sci-Fi,misc-stained glass_Uncategorized,misc-techwear fashion_Sci-Fi_Fashion_Architecture,misc-tribal_Uncategorized,misc-zentangle_Uncategorized,mkkk_Portraiture_Digital Media_Animation,papercraft-collage_Uncategorized,papercraft-flat papercut_Uncategorized,papercraft-kirigami_Uncategorized,papercraft-paper mache_Uncategorized,papercraft-paper quilling_Uncategorized,papercraft-papercut collage_Uncategorized,papercraft-papercut shadow box_Uncategorized,papercraft-stacked papercut_Uncategorized,papercraft-thick layered papercut_Uncategorized,photo-alien_Sci-Fi_Photography,photo-film noir_Photography,photo-hdr_Photography,photo-long exposure_Photography_Surrealism,photo-neon noir_Photography,photo-silhouette_Photography,photo-tilt-shift_Photography,sai-3d-model_Uncategorized,sai-analog film_Retro_Photography,sai-anime_Uncategorized,sai-cinematic_Uncategorized,sai-comic book_Uncategorized,sai-craft clay_Sculpture,sai-digital art_Digital Media,sai-fantasy art_Fantasy_Surrealism,sai-isometric_Uncategorized,sai-line art_Uncategorized,sai-lowpoly_Uncategorized,sai-neonpunk_Uncategorized,sai-origami_Uncategorized,sai-photographic_Photography,sai-pixel art_Uncategorized,sai-texture_Uncategorized,stfhgff_Photography" + ], + [ + 155, + 130, + 0, + 11, + 1, + "STRING" + ], + [ + 156, + 125, + 0, + 135, + 0, + "SCHEDULE" + ], + [ + 157, + 135, + 0, + 126, + 0, + "STRING" + ], + [ + 158, + 135, + 0, + 128, + 0, + "STRING" + ], + [ + 159, + 25, + 0, + 135, + 1, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C5_SimpleLoadScheduledModels_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C5_SimpleLoadScheduledModels_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..07c5dbc518176b05a310c5b2b8daa854fe9cdfb8 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C5_SimpleLoadScheduledModels_Demo_v01b.json @@ -0,0 +1,658 @@ +{ + "last_node_id": 16, + "last_link_id": 24, + "nodes": [ + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 1240, + 680 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 20 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 1240, + 840 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 21 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative, \nnsfw" + ] + }, + { + "id": 8, + "type": "KSampler", + "pos": [ + 1580, + 600 + ], + "size": [ + 320, + 470 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 19 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 24 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 485968570890081, + "randomize", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 10, + "type": "VAELoader", + "pos": [ + 2000, + 450 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 9, + "type": "VAEDecode", + "pos": [ + 1990, + 600 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 10 + }, + { + "name": "vae", + "type": "VAE", + "link": 11 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 11, + "type": "PreviewImage", + "pos": [ + 2270, + 600 + ], + "size": [ + 210, + 250 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 12 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 1, + "type": "CR Model List", + "pos": [ + 640, + 240 + ], + "size": { + "0": 460, + "1": 294 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_LIST", + "type": "MODEL_LIST", + "links": [ + 18 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Model List" + }, + "widgets_values": [ + "SD1_5\\dalcefoV3Anime_dalcefoV3Anime.safetensors", + "DAL", + "SD1_5\\CounterfeitV25_25.safetensors", + "COU", + "SD1_5\\epicrealism_newEra.safetensors", + "EPI", + "SD1_5\\aZovyaPhotoreal_v2.safetensors", + "ZOV", + "None", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 4, + "type": "CR Current Frame", + "pos": [ + 320, + 640 + ], + "size": { + "0": 240, + "1": 80 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 4, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 22 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 15, + "type": "CR Simple Schedule", + "pos": [ + 330, + 330 + ], + "size": { + "0": 250, + "1": 200 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Schedule" + }, + "widgets_values": [ + "0, ZOV\n3, COU\n6, DAL\n9, EPI", + "Model", + "M1", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 14, + "type": "CR Load Scheduled Models", + "pos": [ + 640, + 600 + ], + "size": { + "0": 460, + "1": 200 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": 18 + }, + { + "name": "schedule", + "type": "SCHEDULE", + "link": 23 + }, + { + "name": "current_frame", + "type": "INT", + "link": 22, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 19 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 20, + 21 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Scheduled Models" + }, + "widgets_values": [ + "Schedule", + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors", + "M1", + "SD1_5\\Comfyroll_v1_fp16_pruned.safetensors", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 16, + "type": "EmptyLatentImage", + "pos": [ + 1580, + 430 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 5, + "type": "PrimitiveNode", + "pos": [ + 50, + 640 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 4 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 4, + 5, + 0, + 4, + 0, + "INT" + ], + [ + 8, + 6, + 0, + 8, + 1, + "CONDITIONING" + ], + [ + 9, + 7, + 0, + 8, + 2, + "CONDITIONING" + ], + [ + 10, + 8, + 0, + 9, + 0, + "LATENT" + ], + [ + 11, + 10, + 0, + 9, + 1, + "VAE" + ], + [ + 12, + 9, + 0, + 11, + 0, + "IMAGE" + ], + [ + 18, + 1, + 0, + 14, + 0, + "MODEL_LIST" + ], + [ + 19, + 14, + 0, + 8, + 0, + "MODEL" + ], + [ + 20, + 14, + 1, + 6, + 0, + "CLIP" + ], + [ + 21, + 14, + 1, + 7, + 0, + "CLIP" + ], + [ + 22, + 4, + 0, + 14, + 2, + "INT" + ], + [ + 23, + 15, + 0, + 14, + 1, + "SCHEDULE" + ], + [ + 24, + 16, + 0, + 8, + 3, + "LATENT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C6_LoadScheduledModelsLoRAs_Demo_v0.1a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C6_LoadScheduledModelsLoRAs_Demo_v0.1a.json new file mode 100644 index 0000000000000000000000000000000000000000..5020c834903f36e82fc68e7b8542a90d09283471 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_C6_LoadScheduledModelsLoRAs_Demo_v0.1a.json @@ -0,0 +1,866 @@ +{ + "last_node_id": 26, + "last_link_id": 53, + "nodes": [ + { + "id": 10, + "type": "VAELoader", + "pos": [ + 2380, + 460 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 16, + "type": "EmptyLatentImage", + "pos": [ + 1990, + 430 + ], + "size": [ + 320, + 110 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 26 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 4, + "type": "CR Current Frame", + "pos": [ + 320, + 660 + ], + "size": { + "0": 240, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 4, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 20, + 43 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 13, + "type": "CR Central Schedule", + "pos": [ + 60, + 40 + ], + "size": { + "0": 400, + "1": 530 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 18, + 47 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "0, AYO\n3, COU\n6, ZOV\n9, EPI", + "Model", + "M1", + "0, MAY, 1.0, 1.0\n4, HIL, 1.0, 1.0\n8, LIL, 1.0, 1.0", + "LoRA", + "L1", + "schedule", + "Text", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 11, + "type": "PreviewImage", + "pos": [ + 2650, + 620 + ], + "size": { + "0": 210, + "1": 250 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 12 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 9, + "type": "VAEDecode", + "pos": [ + 2370, + 620 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 10 + }, + { + "name": "vae", + "type": "VAE", + "link": 11 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 17, + "type": "CLIPTextEncode", + "pos": [ + 1630, + 690 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 48 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 1640, + 850 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 49 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative, \nnsfw" + ] + }, + { + "id": 14, + "type": "CR Load Scheduled Models", + "pos": [ + 640, + 620 + ], + "size": { + "0": 460, + "1": 170 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": 19 + }, + { + "name": "schedule", + "type": "SCHEDULE", + "link": 18 + }, + { + "name": "current_frame", + "type": "INT", + "link": 20, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 44 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Scheduled Models" + }, + "widgets_values": [ + "Schedule", + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors", + "M1", + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 8, + "type": "KSampler", + "pos": [ + 1990, + 620 + ], + "size": [ + 320, + 470 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 50 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 28 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 26 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1018201769555609, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 21, + "type": "CR LoRA List", + "pos": [ + 1190, + 210 + ], + "size": { + "0": 315, + "1": 342 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "lora_list", + "type": "lora_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "LORA_LIST", + "type": "LORA_LIST", + "links": [ + 46 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR LoRA List" + }, + "widgets_values": [ + "SD1_5\\character_pokemon_hilda_v3.safetensors", + "HIL", + 1, + 1, + "SD1_5\\character_pokemon_lillie_v5.safetensors", + "LIL", + 1, + 1, + "SD1_5\\character_pokemon_may_v6.safetensors", + "MAY", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 23, + "type": "CR Load Scheduled LoRAs", + "pos": [ + 1190, + 620 + ], + "size": { + "0": 320, + "1": 260 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "clip", + "type": "CLIP", + "link": 44 + }, + { + "name": "lora_list", + "type": "LORA_LIST", + "link": 46 + }, + { + "name": "schedule", + "type": "SCHEDULE", + "link": 47 + }, + { + "name": "current_frame", + "type": "INT", + "link": 43, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 48, + 49 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Load Scheduled LoRAs" + }, + "widgets_values": [ + "Schedule", + 0, + "L1", + "SD1_5\\ArknightsSuzuran_20.safetensors", + 1, + 1, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 1, + "type": "CR Model List", + "pos": [ + 640, + 280 + ], + "size": { + "0": 460, + "1": 294 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_LIST", + "type": "MODEL_LIST", + "links": [ + 19 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Model List" + }, + "widgets_values": [ + "SD1_5\\ayonimix_V4VAEBaked.safetensors", + "AYO", + "SD1_5\\CounterfeitV25_25.safetensors", + "COU", + "SD1_5\\Comfyroll_v1_fp16_pruned.safetensors", + "EPI", + "SD1_5\\cocotifamix_v20This.safetensors", + "ZOV", + "None", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 5, + "type": "PrimitiveNode", + "pos": [ + 50, + 660 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 4 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 4, + 5, + 0, + 4, + 0, + "INT" + ], + [ + 9, + 7, + 0, + 8, + 2, + "CONDITIONING" + ], + [ + 10, + 8, + 0, + 9, + 0, + "LATENT" + ], + [ + 11, + 10, + 0, + 9, + 1, + "VAE" + ], + [ + 12, + 9, + 0, + 11, + 0, + "IMAGE" + ], + [ + 18, + 13, + 0, + 14, + 1, + "SCHEDULE" + ], + [ + 19, + 1, + 0, + 14, + 0, + "MODEL_LIST" + ], + [ + 20, + 4, + 0, + 14, + 2, + "INT" + ], + [ + 26, + 16, + 0, + 8, + 3, + "LATENT" + ], + [ + 28, + 17, + 0, + 8, + 1, + "CONDITIONING" + ], + [ + 43, + 4, + 0, + 23, + 4, + "INT" + ], + [ + 44, + 14, + 1, + 23, + 1, + "CLIP" + ], + [ + 46, + 21, + 0, + 23, + 2, + "LORA_LIST" + ], + [ + 47, + 13, + 0, + 23, + 3, + "SCHEDULE" + ], + [ + 48, + 23, + 1, + 17, + 0, + "CLIP" + ], + [ + 49, + 23, + 1, + 7, + 0, + "CLIP" + ], + [ + 50, + 23, + 0, + 8, + 0, + "MODEL" + ], + [ + 53, + 14, + 0, + 23, + 0, + "MODEL" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D1_CyclerNodes_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D1_CyclerNodes_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..17ff2d7aca664328200b991d130427be96f80654 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D1_CyclerNodes_Demo_v01b.json @@ -0,0 +1,1287 @@ +{ + "last_node_id": 56, + "last_link_id": 56, + "nodes": [ + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 1480, + 410 + ], + "size": [ + 320, + 110 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 56, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 1480, + 580 + ], + "size": [ + 320, + 470 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 32 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 305779246565633, + "randomize", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 660 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 30 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 820 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 31 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative, \nnsfw" + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1910, + 580 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1870, + 730 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 35, + "type": "CR LoRA List", + "pos": [ + 630, + 170 + ], + "size": { + "0": 300, + "1": 342 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "lora_list", + "type": "lora_LIST", + "link": 39 + } + ], + "outputs": [ + { + "name": "LORA_LIST", + "type": "LORA_LIST", + "links": [ + 38 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR LoRA List" + }, + "widgets_values": [ + "SD1_5\\add_detail.safetensors", + "ADD", + 1, + 1, + "SD1_5\\Cyberpunk-000010.safetensors", + "CYB", + 1, + 1, + "None", + "", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 28, + "type": "CR Cycle LoRAs", + "pos": [ + 630, + 580 + ], + "size": { + "0": 310, + "1": 190 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 48 + }, + { + "name": "clip", + "type": "CLIP", + "link": 49 + }, + { + "name": "lora_list", + "type": "LORA_LIST", + "link": 38 + }, + { + "name": "current_frame", + "type": "INT", + "link": 54, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 30, + 31 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Cycle LoRAs" + }, + "widgets_values": [ + "Sequential", + 3, + 1, + "Sequential" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 48, + "type": "CR Cycle Models", + "pos": [ + 200, + 580 + ], + "size": { + "0": 320, + "1": 190 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 45 + }, + { + "name": "clip", + "type": "CLIP", + "link": 46 + }, + { + "name": "model_list", + "type": "MODEL_LIST", + "link": 47 + }, + { + "name": "current_frame", + "type": "INT", + "link": 53, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 48 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Models" + }, + "widgets_values": [ + "Sequential", + 2, + 2, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + -230, + 620 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 46 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ayonimix_V4VAEBaked.safetensors" + ] + }, + { + "id": 50, + "type": "Note", + "pos": [ + 370, + 840 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The cycler nodes process each model or LoRA in the lists in sequence based on the keyframe interval and number of loops." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 52, + "type": "Note", + "pos": [ + 600, + 840 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Aiases are used by scheduler nodes, they are not used by cycler nodes." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 5, + "type": "CR Model List", + "pos": [ + 210, + 220 + ], + "size": { + "0": 315, + "1": 294 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": 35 + } + ], + "outputs": [ + { + "name": "MODEL_LIST", + "type": "MODEL_LIST", + "links": [ + 47 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Model List" + }, + "widgets_values": [ + "SD1_5\\dalcefoV3Anime_dalcefoV3Anime.safetensors", + "DAL", + "None", + "", + "SD1_5\\epicrealism_newEra.safetensors", + "EPI", + "None", + "", + "None", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 49, + "type": "Note", + "pos": [ + -780, + 370 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "To run this workflow, first press Reset in the Animation Builder and then press the Queue button, Do not use queue prompt in the ComfyUI menu." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 1480, + 100 + ], + "size": { + "0": 315, + "1": 238 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 56 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "2:3 portrait 512x768", + "Off", + 1, + 1 + ] + }, + { + "id": 54, + "type": "CR Current Frame", + "pos": [ + -200, + 350 + ], + "size": { + "0": 240, + "1": 80 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 52, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 53, + 54, + 55 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 55, + "type": "Note", + "pos": [ + -200, + 190 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 56, + "type": "Note", + "pos": [ + -190, + 70 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 7, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 2280, + 330 + ], + "size": { + "0": 380, + "1": 290 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 25, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 55, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 29, + "type": "CR Model List", + "pos": [ + 210, + -120 + ], + "size": { + "0": 315, + "1": 294 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_LIST", + "type": "MODEL_LIST", + "links": [ + 35 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Model List" + }, + "widgets_values": [ + "SD1_5\\aZovyaPhotoreal_v2.safetensors", + "ZOV", + "SD1_5\\CounterfeitV25_25.safetensors", + "COU", + "None", + "", + "None", + "", + "None", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 39, + "type": "CR LoRA List", + "pos": [ + 630, + -210 + ], + "size": { + "0": 290, + "1": 342 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "lora_list", + "type": "lora_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "LORA_LIST", + "type": "LORA_LIST", + "links": [ + 39 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR LoRA List" + }, + "widgets_values": [ + "None", + "", + 1, + 1, + "SD1_5\\ArknightsSuzuran_20.safetensors", + "SUZ", + 1, + 1, + "None", + "", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 24, + "type": "Animation Builder (mtb)", + "pos": [ + -530, + 350 + ], + "size": { + "0": 210, + "1": 320 + }, + "flags": {}, + "order": 10, + "mode": 0, + "outputs": [ + { + "name": "frame", + "type": "INT", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "0-1 (scaled)", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "loop_ended", + "type": "BOOLEAN", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Animation Builder (mtb)" + }, + "widgets_values": [ + 12, + 1, + 1, + 12, + 1, + "frame: 0 / 11", + "Done 😎!", + "reset", + "queue" + ], + "color": "#223", + "bgcolor": "#335" + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 25, + 16, + 0, + 26, + 0, + "IMAGE" + ], + [ + 30, + 28, + 1, + 11, + 0, + "CLIP" + ], + [ + 31, + 28, + 1, + 12, + 0, + "CLIP" + ], + [ + 32, + 28, + 0, + 13, + 0, + "MODEL" + ], + [ + 35, + 29, + 0, + 5, + 0, + "MODEL_LIST" + ], + [ + 38, + 35, + 0, + 28, + 2, + "LORA_LIST" + ], + [ + 39, + 39, + 0, + 35, + 0, + "lora_LIST" + ], + [ + 45, + 47, + 0, + 48, + 0, + "MODEL" + ], + [ + 46, + 47, + 1, + 48, + 1, + "CLIP" + ], + [ + 47, + 5, + 0, + 48, + 2, + "MODEL_LIST" + ], + [ + 48, + 48, + 0, + 28, + 0, + "MODEL" + ], + [ + 49, + 48, + 1, + 28, + 1, + "CLIP" + ], + [ + 52, + 24, + 0, + 54, + 0, + "INT" + ], + [ + 53, + 54, + 0, + 48, + 3, + "INT" + ], + [ + 54, + 54, + 0, + 28, + 3, + "INT" + ], + [ + 55, + 54, + 0, + 26, + 1, + "INT" + ], + [ + 56, + 14, + 3, + 15, + 2, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D2_TextCycler_Demo_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D2_TextCycler_Demo_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..29e9205029c4a3f616948c4fb1c1c2322c07a3fc --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D2_TextCycler_Demo_v01.json @@ -0,0 +1,1459 @@ +{ + "last_node_id": 133, + "last_link_id": 174, + "nodes": [ + { + "id": 52, + "type": "Note", + "pos": [ + -189.97908726170903, + 139.1117772231442 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 0, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "To run this workflow, first press Reset in the Animation Builder and then press the Queue button, Do not use queue prompt in the ComfyUI menu." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 600.0209127382922, + 329.1117772231446 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 78 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 83, + 84 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "title": "Load Initial Checkpoint", + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\dreamshaper_7.safetensors" + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1960.020912738287, + 229.11177722314451 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 85 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1910.0209127382873, + 349.11177722314466 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 116, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 780.0209127382919, + -150.88822277685603 + ], + "size": { + "0": 320, + "1": 240 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 145 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 146 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 147 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "2:3 portrait 512x768", + "Off", + 1, + 1 + ] + }, + { + "id": 115, + "type": "EmptyLatentImage", + "pos": [ + 1140.0209127382889, + -140.88822277685603 + ], + "size": { + "0": 210, + "1": 90 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 145, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 146, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 147, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 144 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 2330.0209127382855, + 109.111777223144 + ], + "size": { + "0": 380, + "1": 290 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 25, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 168, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 117, + "type": "Reroute", + "pos": [ + -199.58456613570323, + 782.2872045326562 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 167 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 150, + 169, + 170 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 124, + "type": "CR Cycle Text Simple", + "pos": [ + 53.128190699999955, + 1478.0512829 + ], + "size": { + "0": 310, + "1": 250 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 169, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 160 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Text Simple" + }, + "widgets_values": [ + "Sequential", + 1, + 1, + "rainbow", + "castle", + "tropical island", + "mountain covered in snow", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 111, + "type": "CR Prompt Text", + "pos": [ + 713.1281906999992, + 968.0512829 + ], + "size": { + "0": 320, + "1": 90 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 137 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "in background" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 110, + "type": "CR Prompt Text", + "pos": [ + 713.1281906999992, + 838.0512829 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 140 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "1girl with " + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 93, + "type": "CR Text List Simple", + "pos": [ + 53.128190699999955, + 1158.0512829 + ], + "size": { + "0": 310, + "1": 154 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": null + } + ], + "outputs": [ + { + "name": "TEXT_LIST_SIMPLE", + "type": "TEXT_LIST_SIMPLE", + "links": [ + 113 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text List Simple" + }, + "widgets_values": [ + "tropical island", + "mountain covered in snow", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 91, + "type": "CR Cycle Text Simple", + "pos": [ + 433.1281907000002, + 1158.0512829 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": 113 + }, + { + "name": "current_frame", + "type": "INT", + "link": 150, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 172 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Text Simple" + }, + "widgets_values": [ + "Sequential", + 3, + 2, + "rainbow", + "castle", + "house", + "village", + "mine", + "shop" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 125, + "type": "CR Cycle Text Simple", + "pos": [ + 433.1281907000002, + 1478.0512829 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 170, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + }, + { + "name": "text_1", + "type": "STRING", + "link": 160, + "widget": { + "name": "text_1", + "config": [ + "STRING", + { + "multiline": false, + "default": "" + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 173 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Text Simple" + }, + "widgets_values": [ + "Sequential", + 3, + 2, + "rainbow", + "castle", + "house", + "village", + "mine", + "shop" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 130, + "type": "CR Text Input Switch", + "pos": [ + 833.1281906999992, + 1158.0512829 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "text1", + "type": "STRING", + "link": 172 + }, + { + "name": "text2", + "type": "STRING", + "link": 173 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 174 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Text Input Switch" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 24, + "type": "Animation Builder (mtb)", + "pos": [ + 50.020912738290995, + 139.1117772231442 + ], + "size": { + "0": 210, + "1": 320 + }, + "flags": {}, + "order": 10, + "mode": 0, + "outputs": [ + { + "name": "frame", + "type": "INT", + "links": [ + 166 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "0-1 (scaled)", + "type": "FLOAT", + "links": null, + "shape": 3, + "slot_index": 1 + }, + { + "name": "count", + "type": "INT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "loop_ended", + "type": "BOOLEAN", + "links": null, + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "Animation Builder (mtb)" + }, + "widgets_values": [ + 12, + 1, + 1, + 9, + 1, + "frame: 0 / 8", + "Done 😎!", + "reset", + "queue" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 127, + "type": "CR Current Frame", + "pos": [ + 340.0209127382915, + 139.1117772231442 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 166, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 167, + 168 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "max_frames", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 131, + "type": "Note", + "pos": [ + 833.1281906999992, + 1288.0512829 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 11, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Switch betten the two scenarios" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 56, + "type": "Note", + "pos": [ + 370.0209127382916, + -40.88822277685599 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence. The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 133, + "type": "Note", + "pos": [ + 2330.0209127382855, + -70.88822277685595 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Use the GIF Writer workflow to compile the output images into a GIF" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 75, + "type": "KSampler", + "pos": [ + 1470.0209127382889, + 149.1117772231443 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 78 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 81 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 82 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 144 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 85 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 927131295014992, + "fixed", + 20, + 10, + "dpmpp_3m_sde_gpu", + "karras", + 0.7 + ] + }, + { + "id": 109, + "type": "Text Concatenate", + "pos": [ + 1113.1281907000011, + 898.0512829 + ], + "size": { + "0": 315, + "1": 118 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "text_a", + "type": "STRING", + "link": 140 + }, + { + "name": "text_b", + "type": "STRING", + "link": 174 + }, + { + "name": "text_c", + "type": "STRING", + "link": 137 + }, + { + "name": "text_d", + "type": "STRING", + "link": null + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 130 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Text Concatenate" + }, + "widgets_values": [ + "false" + ] + }, + { + "id": 76, + "type": "CLIPTextEncode", + "pos": [ + 1130, + 330 + ], + "size": { + "0": 230, + "1": 90 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 84 + }, + { + "name": "text", + "type": "STRING", + "link": 130, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 81 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "kaleidoscope, colorful, vivid, crystals, centered, radial symmetry" + ] + }, + { + "id": 77, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 480 + ], + "size": { + "0": 240, + "1": 76 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 83 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 82 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative,\nnsfw" + ] + }, + { + "id": 129, + "type": "Note", + "pos": [ + -210, + 1160 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The text list can be extended with additional Text List nodes" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 128, + "type": "Note", + "pos": [ + -220, + 1480 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 8, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Text cyclers can be chained together to create complex sequences" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 132, + "type": "Note", + "pos": [ + 420, + 890 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 12, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "In the demo, the cyclers are being use to generate variable prompts" + ], + "color": "#233", + "bgcolor": "#355" + } + ], + "links": [ + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 25, + 16, + 0, + 26, + 0, + "IMAGE" + ], + [ + 78, + 47, + 0, + 75, + 0, + "MODEL" + ], + [ + 81, + 76, + 0, + 75, + 1, + "CONDITIONING" + ], + [ + 82, + 77, + 0, + 75, + 2, + "CONDITIONING" + ], + [ + 83, + 47, + 1, + 77, + 0, + "CLIP" + ], + [ + 84, + 47, + 1, + 76, + 0, + "CLIP" + ], + [ + 85, + 75, + 0, + 16, + 0, + "LATENT" + ], + [ + 113, + 93, + 0, + 91, + 0, + "TEXT_LIST_SIMPLE" + ], + [ + 130, + 109, + 0, + 76, + 1, + "STRING" + ], + [ + 137, + 111, + 0, + 109, + 2, + "STRING" + ], + [ + 140, + 110, + 0, + 109, + 0, + "STRING" + ], + [ + 144, + 115, + 0, + 75, + 3, + "LATENT" + ], + [ + 145, + 116, + 0, + 115, + 0, + "INT" + ], + [ + 146, + 116, + 1, + 115, + 1, + "INT" + ], + [ + 147, + 116, + 3, + 115, + 2, + "INT" + ], + [ + 150, + 117, + 0, + 91, + 1, + "INT" + ], + [ + 160, + 124, + 0, + 125, + 2, + "STRING" + ], + [ + 166, + 24, + 0, + 127, + 0, + "INT" + ], + [ + 167, + 127, + 0, + 117, + 0, + "*" + ], + [ + 168, + 127, + 0, + 26, + 1, + "INT" + ], + [ + 169, + 117, + 0, + 124, + 1, + "INT" + ], + [ + 170, + 117, + 0, + 125, + 1, + "INT" + ], + [ + 172, + 91, + 0, + 130, + 0, + "STRING" + ], + [ + 173, + 125, + 0, + 130, + 1, + "STRING" + ], + [ + 174, + 130, + 0, + 109, + 1, + "STRING" + ] + ], + "groups": [ + { + "title": "Cycle Text", + "bounding": [ + -253, + 707, + 1763, + 1077 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Simple Animation Flow (for SD1.5)", + "bounding": [ + -257, + -263, + 3059, + 933 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D3_ImageCycler_Demo_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D3_ImageCycler_Demo_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..8ce69def1965b2d09216fb6c14a3706549426ab2 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_D3_ImageCycler_Demo_v01.json @@ -0,0 +1,569 @@ +{ + "last_node_id": 30, + "last_link_id": 55, + "nodes": [ + { + "id": 2, + "type": "CR Cycle Images Simple", + "pos": [ + 600, + 590 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": null + }, + { + "name": "image_2", + "type": "IMAGE", + "link": null + }, + { + "name": "image_3", + "type": "IMAGE", + "link": null + }, + { + "name": "image_4", + "type": "IMAGE", + "link": null + }, + { + "name": "image_5", + "type": "IMAGE", + "link": null + }, + { + "name": "image_list_simple", + "type": "IMAGE_LIST_SIMPLE", + "link": 34 + }, + { + "name": "current_frame", + "type": "INT", + "link": 48, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Images Simple" + }, + "widgets_values": [ + "Sequential", + 1, + 9, + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 1, + "type": "PreviewImage", + "pos": [ + 1020, + 590 + ], + "size": { + "0": 210, + "1": 250 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 45 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 17, + "type": "CR Image List Simple", + "pos": [ + 220, + 600 + ], + "size": { + "0": 300, + "1": 130 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": 46 + }, + { + "name": "image_2", + "type": "IMAGE", + "link": 52 + }, + { + "name": "image_3", + "type": "IMAGE", + "link": 54 + }, + { + "name": "image_4", + "type": "IMAGE", + "link": 55 + }, + { + "name": "image_5", + "type": "IMAGE", + "link": null + }, + { + "name": "image_list_simple", + "type": "IMAGE_LIST_SIMPLE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE_LIST_SIMPLE", + "type": "IMAGE_LIST_SIMPLE", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Image List Simple" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 25, + "type": "Note", + "pos": [ + -170, + 240 + ], + "size": [ + 210, + 90 + ], + "flags": {}, + "order": 0, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Reset to 0 after each test run\n\nSet the btach count in Queue Prompt to the number of frames you want to process " + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 30, + "type": "Note", + "pos": [ + 1270, + 590 + ], + "size": { + "0": 210, + "1": 90 + }, + "flags": {}, + "order": 7, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The images will cycle in the preview" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 26, + "type": "LoadImage", + "pos": [ + -50, + 580 + ], + "size": [ + 210, + 310 + ], + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00008_ (2).png", + "image" + ] + }, + { + "id": 28, + "type": "LoadImage", + "pos": [ + -290, + 950 + ], + "size": [ + 210, + 310 + ], + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 54 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00029_.png", + "image" + ] + }, + { + "id": 29, + "type": "LoadImage", + "pos": [ + -50, + 950 + ], + "size": [ + 210, + 310 + ], + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 55 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00014_.png", + "image" + ] + }, + { + "id": 22, + "type": "LoadImage", + "pos": [ + -290, + 580 + ], + "size": [ + 210, + 310 + ], + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 46 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00017_ (2) (3).png", + "image" + ] + }, + { + "id": 27, + "type": "Note", + "pos": [ + -540, + 590 + ], + "size": { + "0": 210, + "1": 90 + }, + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Drag some images into these Load Image Nodes" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 23, + "type": "CR Current Frame", + "pos": [ + 170, + 400 + ], + "size": [ + 320, + 80 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 51, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 48 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 24, + "type": "PrimitiveNode", + "pos": [ + -170, + 400 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 51 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 34, + 17, + 0, + 2, + 5, + "IMAGE_LIST_SIMPLE" + ], + [ + 45, + 2, + 0, + 1, + 0, + "IMAGE" + ], + [ + 46, + 22, + 0, + 17, + 0, + "IMAGE" + ], + [ + 48, + 23, + 0, + 2, + 6, + "INT" + ], + [ + 51, + 24, + 0, + 23, + 0, + "INT" + ], + [ + 52, + 26, + 0, + 17, + 1, + "IMAGE" + ], + [ + 54, + 28, + 0, + 17, + 2, + "IMAGE" + ], + [ + 55, + 29, + 0, + 17, + 3, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_E1_GradientNodes_Demo_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_E1_GradientNodes_Demo_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..d57cc9469ebdbbccc88921e722306207da07f35d --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_E1_GradientNodes_Demo_v01a.json @@ -0,0 +1,965 @@ +{ + "last_node_id": 56, + "last_link_id": 60, + "nodes": [ + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 1480, + 410 + ], + "size": { + "0": 320, + "1": 110 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 660 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 820 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative, " + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1910, + 580 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 1480, + 580 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + }, + { + "name": "cfg", + "type": "FLOAT", + "link": 57, + "widget": { + "name": "cfg", + "config": [ + "FLOAT", + { + "default": 8, + "min": 0, + "max": 100 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32603574575332, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 1480, + 90 + ], + "size": { + "0": 320, + "1": 240 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 58, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 2048 + } + ] + } + } + ], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "batch_size", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "custom", + "Off", + 1, + 1 + ] + }, + { + "id": 49, + "type": "CR Gradient Float", + "pos": [ + 1030, + 360 + ], + "size": { + "0": 320, + "1": 180 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 60, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 57 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Gradient Float" + }, + "widgets_values": [ + 10, + 20, + 3, + 8, + 0, + "Lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 2250, + 330 + ], + "size": { + "0": 380, + "1": 290 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 25, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 56, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1860, + 710 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 580, + 540 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54, + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "title": "Load Initial Checkpoint", + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 630, + 290 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 23, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 56, + 59, + 60 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + "Yes" + ] + }, + { + "id": 51, + "type": "CR Gradient Integer", + "pos": [ + 1030, + 90 + ], + "size": { + "0": 320, + "1": 180 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 59, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 58 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Gradient Integer" + }, + "widgets_values": [ + 512, + 1024, + 3, + 8, + 0, + "Lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 54, + "type": "Note", + "pos": [ + 1270, + -70 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The float gradient is changing the the cfg with each frame with each frame, starting from the third frame" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 52, + "type": "Note", + "pos": [ + 70, + 330 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "To run this workflow, first press Reset in the Animation Builder and then press the Queue button, Do not use queue prompt in the ComfyUI menu." + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 53, + "type": "Note", + "pos": [ + 1030, + -70 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "In this example the integer gradiant changes the width of the image with each frame, starting from the third frame" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 55, + "type": "Note", + "pos": [ + 1510, + -70 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 5, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Integer and float gradients can be attached to any widget with the same data type" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 56, + "type": "Note", + "pos": [ + 630, + 120 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence. The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 24, + "type": "Animation Builder (mtb)", + "pos": [ + 310, + 330 + ], + "size": { + "0": 210, + "1": 320 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "frame", + "type": "INT", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "0-1 (scaled)", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "loop_ended", + "type": "BOOLEAN", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Animation Builder (mtb)" + }, + "widgets_values": [ + 12, + 1, + 1, + 12, + 1, + "frame: 0 / 11", + "Done 😎!", + "reset", + "queue" + ], + "color": "#223", + "bgcolor": "#335" + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 23, + 24, + 0, + 25, + 0, + "INT" + ], + [ + 25, + 16, + 0, + 26, + 0, + "IMAGE" + ], + [ + 53, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 54, + 47, + 1, + 11, + 0, + "CLIP" + ], + [ + 55, + 47, + 1, + 12, + 0, + "CLIP" + ], + [ + 56, + 25, + 0, + 26, + 1, + "INT" + ], + [ + 57, + 49, + 0, + 13, + 4, + "FLOAT" + ], + [ + 58, + 51, + 0, + 14, + 0, + "INT" + ], + [ + 59, + 25, + 0, + 51, + 0, + "INT" + ], + [ + 60, + 25, + 0, + 49, + 0, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_E2_IncrementNodes_Demo_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_E2_IncrementNodes_Demo_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..3e45389b1aca5242882117b07fdaefdadbd449a8 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_E2_IncrementNodes_Demo_v01b.json @@ -0,0 +1,1136 @@ +{ + "last_node_id": 69, + "last_link_id": 81, + "nodes": [ + { + "id": 15, + "type": "EmptyLatentImage", + "pos": [ + 1480, + 410 + ], + "size": { + "0": 320, + "1": 110 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 11, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 12, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 72, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 11, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 660 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "1girl, period costume" + ] + }, + { + "id": 12, + "type": "CLIPTextEncode", + "pos": [ + 1120, + 820 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 55 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "embedding:EasyNegative,\nnsfw" + ] + }, + { + "id": 13, + "type": "KSampler", + "pos": [ + 1480, + 580 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 53 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 13 + }, + { + "name": "denoise", + "type": "FLOAT", + "link": 65, + "widget": { + "name": "denoise", + "config": [ + "FLOAT", + { + "default": 1, + "min": 0, + "max": 1, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32603574575332, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 17, + "type": "VAELoader", + "pos": [ + 1860, + 710 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 16, + "type": "VAEDecode", + "pos": [ + 1910, + 580 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 14 + }, + { + "name": "vae", + "type": "VAE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 68 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 64, + "type": "Note", + "pos": [ + 1480, + -70 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 1, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Integer and float increment nodes can be attached to any widget with the same data type" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 58, + "type": "CR Increment Float", + "pos": [ + 1920, + 70 + ], + "size": { + "0": 320, + "1": 130 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 71, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 78 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Increment Float" + }, + "widgets_values": [ + 2, + 0.25, + 2, + 8, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 26, + "type": "Save Image Sequence (mtb)", + "pos": [ + 2700, + 200 + ], + "size": { + "0": 550, + "1": 730 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 69, + "slot_index": 0 + }, + { + "name": "current_frame", + "type": "INT", + "link": 56, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + }, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Save Image Sequence (mtb)" + }, + "widgets_values": [ + "F:\\ComfyUI\\ComfyUI_windows_portable\\ComfyUI\\output\\Test\\", + 5 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 56, + "type": "ImageScaleBy", + "pos": [ + 2310, + 300 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 68 + }, + { + "name": "scale_by", + "type": "FLOAT", + "link": 78, + "widget": { + "name": "scale_by", + "config": [ + "FLOAT", + { + "default": 1, + "min": 0.01, + "max": 8, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 69 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScaleBy" + }, + "widgets_values": [ + "nearest-exact", + 1 + ] + }, + { + "id": 66, + "type": "CR Increment Integer", + "pos": [ + 1070, + 20 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 80, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 79 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Increment Integer" + }, + "widgets_values": [ + 768, + 16, + 0, + 8, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 25, + "type": "CR Current Frame", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 58 + }, + "flags": { + "collapsed": false + }, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 81, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 56, + 63, + 71, + 80 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 0, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 14, + "type": "CR SD1.5 Aspect Ratio", + "pos": [ + 1480, + 100 + ], + "size": { + "0": 320, + "1": 240 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "height", + "type": "INT", + "link": 79, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 2048 + } + ] + } + } + ], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 72 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "CR SD1.5 Aspect Ratio" + }, + "widgets_values": [ + 512, + 512, + "custom", + "Off", + 1, + 1 + ] + }, + { + "id": 59, + "type": "Note", + "pos": [ + 650, + 50 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 2, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "The CR Current Frame node prints the current frame index to console so that you can see which frame is currently being processed" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 68, + "type": "Note", + "pos": [ + 650, + -70 + ], + "size": { + "0": 210, + "1": 70 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Frames are processed in sequence starting from frame index 0" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 67, + "type": "Note", + "pos": [ + 1080, + -150 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 4, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "This integer increment is changing the height of the image with each frame starting from the first frame" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 62, + "type": "Note", + "pos": [ + 1920, + -80 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 5, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "This float increment is changing the upscale factor with each frame with each frame, starting from the second frame" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 52, + "type": "CR Increment Float", + "pos": [ + 1070, + 360 + ], + "size": { + "0": 320, + "1": 130 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 63, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 65 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Increment Float" + }, + "widgets_values": [ + 0.75, + 0.04, + 2, + 8, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 63, + "type": "Note", + "pos": [ + 820, + 360 + ], + "size": { + "0": 210, + "1": 100 + }, + "flags": {}, + "order": 6, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "This float increment is changing the denoise with each frame with each frame, starting from the second frame" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 47, + "type": "CheckpointLoaderSimple", + "pos": [ + 630, + 590 + ], + "size": { + "0": 380, + "1": 100 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54, + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors" + ] + }, + { + "id": 69, + "type": "PrimitiveNode", + "pos": [ + 360, + 250 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 81 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "increment" + ] + } + ], + "links": [ + [ + 8, + 11, + 0, + 13, + 1, + "CONDITIONING" + ], + [ + 9, + 12, + 0, + 13, + 2, + "CONDITIONING" + ], + [ + 11, + 14, + 0, + 15, + 0, + "INT" + ], + [ + 12, + 14, + 1, + 15, + 1, + "INT" + ], + [ + 13, + 15, + 0, + 13, + 3, + "LATENT" + ], + [ + 14, + 13, + 0, + 16, + 0, + "LATENT" + ], + [ + 15, + 17, + 0, + 16, + 1, + "VAE" + ], + [ + 53, + 47, + 0, + 13, + 0, + "MODEL" + ], + [ + 54, + 47, + 1, + 11, + 0, + "CLIP" + ], + [ + 55, + 47, + 1, + 12, + 0, + "CLIP" + ], + [ + 56, + 25, + 0, + 26, + 1, + "INT" + ], + [ + 63, + 25, + 0, + 52, + 0, + "INT" + ], + [ + 65, + 52, + 0, + 13, + 4, + "FLOAT" + ], + [ + 68, + 16, + 0, + 56, + 0, + "IMAGE" + ], + [ + 69, + 56, + 0, + 26, + 0, + "IMAGE" + ], + [ + 71, + 25, + 0, + 58, + 0, + "INT" + ], + [ + 72, + 14, + 3, + 15, + 2, + "INT" + ], + [ + 78, + 58, + 0, + 56, + 1, + "FLOAT" + ], + [ + 79, + 66, + 0, + 14, + 0, + "INT" + ], + [ + 80, + 25, + 0, + 66, + 0, + "INT" + ], + [ + 81, + 69, + 0, + 25, + 0, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_F1_IONodes_Demo_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_F1_IONodes_Demo_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..b246a0221231706f9968aeae8825f9756053252e --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/CR_Animation_F1_IONodes_Demo_v01.json @@ -0,0 +1,489 @@ +{ + "last_node_id": 40, + "last_link_id": 66, + "nodes": [ + { + "id": 38, + "type": "VAEEncode", + "pos": [ + 1380, + 590 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 60 + }, + { + "name": "vae", + "type": "VAE", + "link": 64 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 59 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 37, + "type": "CR Interpolate Latents", + "pos": [ + 1580, + 590 + ], + "size": { + "0": 250, + "1": 102 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "latent1", + "type": "LATENT", + "link": 59 + }, + { + "name": "latent2", + "type": "LATENT", + "link": 62 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 55 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Interpolate Latents" + }, + "widgets_values": [ + 0.5, + "lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 31, + "type": "CR Load Flow Frames", + "pos": [ + 1000, + 480 + ], + "size": { + "0": 300, + "1": 194 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 44, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "current_image", + "type": "IMAGE", + "links": [ + 57, + 60 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "previous_image", + "type": "IMAGE", + "links": [ + 66 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "current_frame", + "type": "INT", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Load Flow Frames" + }, + "widgets_values": [ + "TikTok_frames", + "Index", + 0, + 0, + "", + "*.png" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 39, + "type": "VAEEncode", + "pos": [ + 1380, + 650 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 66 + }, + { + "name": "vae", + "type": "VAE", + "link": 63 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 62 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 35, + "type": "VAEDecode", + "pos": [ + 1910, + 620 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 55 + }, + { + "name": "vae", + "type": "VAE", + "link": 65 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 56 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 34, + "type": "CR Output Flow Frames", + "pos": [ + 2110, + 480 + ], + "size": { + "0": 320, + "1": 380 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "current_image", + "type": "IMAGE", + "link": 57 + }, + { + "name": "interpolated_img", + "type": "IMAGE", + "link": 56 + }, + { + "name": "current_frame", + "type": "INT", + "link": 53, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + } + } + ], + "properties": { + "Node name for S&R": "CR Output Flow Frames" + }, + "widgets_values": [ + "Video", + "CR", + 0, + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 40, + "type": "VAELoader", + "pos": [ + 990, + 750 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 63, + 64, + 65 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 2, + "type": "PrimitiveNode", + "pos": [ + 730, + 480 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 44 + ], + "slot_index": 0, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 1, + "increment" + ] + } + ], + "links": [ + [ + 44, + 2, + 0, + 31, + 0, + "INT" + ], + [ + 53, + 31, + 2, + 34, + 2, + "INT" + ], + [ + 55, + 37, + 0, + 35, + 0, + "LATENT" + ], + [ + 56, + 35, + 0, + 34, + 1, + "IMAGE" + ], + [ + 57, + 31, + 0, + 34, + 0, + "IMAGE" + ], + [ + 59, + 38, + 0, + 37, + 0, + "LATENT" + ], + [ + 60, + 31, + 0, + 38, + 0, + "IMAGE" + ], + [ + 62, + 39, + 0, + 37, + 1, + "LATENT" + ], + [ + 63, + 40, + 0, + 39, + 1, + "VAE" + ], + [ + 64, + 40, + 0, + 38, + 1, + "VAE" + ], + [ + 65, + 40, + 0, + 35, + 1, + "VAE" + ], + [ + 66, + 31, + 1, + 39, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/Node Schema/CR_Animation_Nodes_Status_drop10a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/Node Schema/CR_Animation_Nodes_Status_drop10a.json new file mode 100644 index 0000000000000000000000000000000000000000..9508b1d5deeb38ab8755aae7484262efc2268c1e --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Animation/Node Schema/CR_Animation_Nodes_Status_drop10a.json @@ -0,0 +1,3310 @@ +{ + "last_node_id": 274, + "last_link_id": 234, + "nodes": [ + { + "id": 90, + "type": "CR Debatch Frames", + "pos": [ + -924.2428318353343, + 575.3365003734204 + ], + "size": { + "0": 292.20001220703125, + "1": 26 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "frames", + "type": "IMAGE", + "link": null + } + ], + "outputs": [ + { + "name": "debatched_frames", + "type": "IMAGE", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "CR Debatch Frames" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 91, + "type": "CR Text List To String", + "pos": [ + -924.031854638067, + 477.3582341136594 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "text_list", + "type": "STRING", + "link": null + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Text List To String" + }, + "widgets_values": [ + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 83, + "type": "CR Increment Float", + "pos": [ + 1931.619638004882, + 1113.8662124388768 + ], + "size": { + "0": 330, + "1": 140 + }, + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 159, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Increment Float" + }, + "widgets_values": [ + 1, + 0.1, + 0, + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 84, + "type": "CR Increment Integer", + "pos": [ + 2311.619638004884, + 1113.8662124388768 + ], + "size": { + "0": 320, + "1": 140 + }, + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 158, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Increment Integer" + }, + "widgets_values": [ + 1, + 1, + 0, + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 65, + "type": "CR Gradient Integer", + "pos": [ + 2311.619638004884, + 863.8662124388808 + ], + "size": { + "0": 330, + "1": 170 + }, + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 157, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Gradient Integer" + }, + "widgets_values": [ + 0, + 0, + 1, + 0, + 0, + "Lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 64, + "type": "CR Gradient Float", + "pos": [ + 1931.080418403318, + 863.8662124388808 + ], + "size": { + "0": 330, + "1": 170 + }, + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 156, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Gradient Float" + }, + "widgets_values": [ + 0, + 0, + 1, + 0, + 0, + "Lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 186, + "type": "CR Cycle LoRAs", + "pos": [ + 717.7845361871587, + 1542.5758248663394 + ], + "size": { + "0": 320, + "1": 190 + }, + "flags": {}, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 129 + }, + { + "name": "clip", + "type": "CLIP", + "link": 130 + }, + { + "name": "lora_list", + "type": "LORA_LIST", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 161, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle LoRAs" + }, + "widgets_values": [ + "Sequential", + 30, + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 185, + "type": "CR Cycle Models", + "pos": [ + 337.7845361871574, + 1542.5758248663394 + ], + "size": { + "0": 320, + "1": 190 + }, + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "model_list", + "type": "MODEL_LIST", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 160, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 129 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 130 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Models" + }, + "widgets_values": [ + "Sequential", + 30, + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 171, + "type": "CR Cycle Images", + "pos": [ + 1094.548181208022, + 1543.7795145350904 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "image_list", + "type": "image_LIST", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 162, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Images" + }, + "widgets_values": [ + "Off", + 30, + 1, + "Sequential" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 187, + "type": "CR Model List", + "pos": [ + 1245.5496187605943, + -56.18786894116704 + ], + "size": { + "0": 320, + "1": 294 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_LIST", + "type": "MODEL_LIST", + "links": null, + "shape": 3 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Model List" + }, + "widgets_values": [ + "None", + "", + "None", + "", + "None", + "", + "None", + "", + "None", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 188, + "type": "CR LoRA List", + "pos": [ + 1245.5496187605943, + 293.81213105883563 + ], + "size": { + "0": 320, + "1": 342 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "lora_list", + "type": "lora_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "LORA_LIST", + "type": "LORA_LIST", + "links": null, + "shape": 3 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR LoRA List" + }, + "widgets_values": [ + "None", + "", + "", + "", + "None", + "", + "", + "", + "None", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 190, + "type": "CR Text List", + "pos": [ + 1615.549618760595, + -56.18786894116704 + ], + "size": { + "0": 290, + "1": 294 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "text_list", + "type": "text_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "TEXT_LIST", + "type": "TEXT_LIST", + "links": null, + "shape": 3 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Text List" + }, + "widgets_values": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 189, + "type": "CR Text List Simple", + "pos": [ + 1615.549618760595, + 293.81213105883563 + ], + "size": { + "0": 300, + "1": 154 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": null + } + ], + "outputs": [ + { + "name": "TEXT_LIST_SIMPLE", + "type": "TEXT_LIST_SIMPLE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Text List Simple" + }, + "widgets_values": [ + "", + "", + "", + "", + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 192, + "type": "CR Image List Simple", + "pos": [ + 1955.5496187605954, + 293.81213105883563 + ], + "size": { + "0": 310, + "1": 130 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": null + }, + { + "name": "image_2", + "type": "IMAGE", + "link": null + }, + { + "name": "image_3", + "type": "IMAGE", + "link": null + }, + { + "name": "image_4", + "type": "IMAGE", + "link": null + }, + { + "name": "image_5", + "type": "IMAGE", + "link": null + }, + { + "name": "image_list_simple", + "type": "IMAGE_LIST_SIMPLE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE_LIST_SIMPLE", + "type": "IMAGE_LIST_SIMPLE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Image List Simple" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 184, + "type": "CR Image List", + "pos": [ + 1955.5496187605954, + -56.18786894116704 + ], + "size": { + "0": 310, + "1": 294 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image_list", + "type": "image_LIST", + "link": null + }, + { + "name": "image_2", + "type": "IMAGE", + "link": null + }, + { + "name": "image_3", + "type": "IMAGE", + "link": null + }, + { + "name": "image_4", + "type": "IMAGE", + "link": null + }, + { + "name": "image_5", + "type": "IMAGE", + "link": null + }, + { + "name": "image_list", + "type": "image_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE_LIST", + "type": "IMAGE_LIST", + "links": null, + "shape": 3 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Image List" + }, + "widgets_values": [ + "None", + "", + "None", + "", + "None" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 194, + "type": "CR Cycle Images Simple", + "pos": [ + 1094.548181208022, + 1763.7795145350904 + ], + "size": { + "0": 320, + "1": 230 + }, + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": null + }, + { + "name": "image_2", + "type": "IMAGE", + "link": null + }, + { + "name": "image_3", + "type": "IMAGE", + "link": null + }, + { + "name": "image_4", + "type": "IMAGE", + "link": null + }, + { + "name": "image_5", + "type": "IMAGE", + "link": null + }, + { + "name": "image_list_simple", + "type": "IMAGE_LIST_SIMPLE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 165, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Images Simple" + }, + "widgets_values": [ + "Sequential", + 30, + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 53, + "type": "CR Interpolate Latents", + "pos": [ + 2397.0391363635654, + 535.6252973667436 + ], + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "latent1", + "type": "LATENT", + "link": null + }, + { + "name": "latent2", + "type": "LATENT", + "link": null + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Interpolate Latents" + }, + "widgets_values": [ + 0.5, + "lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 10, + "type": "CR Index Reset", + "pos": [ + -59.91286945504303, + 55.54442351082017 + ], + "size": { + "0": 240, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": null, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": 0, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "reset_to", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Index Reset" + }, + "widgets_values": [ + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 11, + "type": "CR Index Multiply", + "pos": [ + -59.91286945504303, + 215.5444235108203 + ], + "size": { + "0": 240, + "1": 90 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": null, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": 0, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "factor", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Index Multiply" + }, + "widgets_values": [ + 107, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 231, + "type": "CR Input Text List", + "pos": [ + 1616.7185560109374, + 507.5897292398438 + ], + "size": { + "0": 300, + "1": 120 + }, + "flags": {}, + "order": 11, + "mode": 0, + "outputs": [ + { + "name": "TEXT_LIST_SIMPLE", + "type": "TEXT_LIST_SIMPLE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Input Text List" + }, + "widgets_values": [ + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 239, + "type": "CR Simple Prompt List Keyframes", + "pos": [ + -651.0531598311474, + 871.1768746588539 + ], + "size": { + "0": 420, + "1": 180 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": 213 + } + ], + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List Keyframes" + }, + "widgets_values": [ + 30, + 1, + "Default", + "Default", + "Default", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 240, + "type": "CR Simple Prompt List", + "pos": [ + -1171.0531598311436, + 871.1768746588539 + ], + "size": { + "0": 468.5999755859375, + "1": 276.0000305175781 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "simple_prompt_list", + "type": "SIMPLE_PROMPT_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "SIMPLE_PROMPT_LIST", + "type": "SIMPLE_PROMPT_LIST", + "links": [ + 213 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt List" + }, + "widgets_values": [ + "prompt", + "prompt", + "prompt", + "prompt", + "prompt" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 243, + "type": "CR Prompt List", + "pos": [ + -1171.0531598311436, + 1221.1768746588546 + ], + "size": { + "0": 470, + "1": 684 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "prompt_list", + "type": "PROMPT_LIST", + "link": null + } + ], + "outputs": [ + { + "name": "PROMPT_LIST", + "type": "PROMPT_LIST", + "links": [ + 214 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Prompt List" + }, + "widgets_values": [ + 30, + 1, + "prompt", + "Default", + "Default", + "Default", + "prompt", + "Default", + "Default", + "Default", + "prompt", + "Default", + "Default", + "Default", + "prompt", + "Default", + "Default", + "Default", + "prompt", + "Default", + "Default", + "Default" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 244, + "type": "CR Prompt List Keyframes", + "pos": [ + -631.0531598311474, + 1221.1768746588546 + ], + "size": { + "0": 317.4000244140625, + "1": 58 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "prompt_list", + "type": "PROMPT_LIST", + "link": 214 + } + ], + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Prompt List Keyframes" + }, + "widgets_values": [ + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 250, + "type": "CR Simple Schedule", + "pos": [ + 331.5431064101562, + -114.01670582187359 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 14, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Schedule" + }, + "widgets_values": [ + "frame_number, item_alias, [attr_value1, attr_value2]", + "Value", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 251, + "type": "CR Load Scheduled Models", + "pos": [ + 335.13422812533, + 932.7062050850162 + ], + "size": { + "0": 320, + "1": 190 + }, + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "model_list", + "type": "MODEL_LIST", + "link": null + }, + { + "name": "schedule", + "type": "SCHEDULE", + "link": 219 + }, + { + "name": "current_frame", + "type": "INT", + "link": null, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": null, + "shape": 3 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Scheduled Models" + }, + "widgets_values": [ + "Load Default Model", + "SD1_5\\7th_anime_v3_A.safetensors", + 0, + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 249, + "type": "CR Central Schedule", + "pos": [ + 338.4568325546874, + 150.6133610892579 + ], + "size": { + "0": 390, + "1": 480 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 220 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Central Schedule" + }, + "widgets_values": [ + "schedule", + "Value", + "", + "schedule", + "Value", + "", + "schedule", + "Value", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 252, + "type": "CR Load Scheduled LoRAs", + "pos": [ + 705.1342281253314, + 932.7062050850162 + ], + "size": { + "0": 320, + "1": 258 + }, + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": null + }, + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "schedule", + "type": "SCHEDULE", + "link": 221 + }, + { + "name": "lora_list", + "type": "LORA_LIST", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": null, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": null, + "shape": 3 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Scheduled LoRAs" + }, + "widgets_values": [ + "Off", + 0, + 120, + "", + "CR", + 1, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 209, + "type": "CR String To Combo", + "pos": [ + -922.7771217524553, + 659.7320834823254 + ], + "size": { + "0": 210, + "1": 34 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": null + } + ], + "outputs": [ + { + "name": "*", + "type": "*", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR String To Combo" + }, + "widgets_values": [ + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 159, + "type": "CR Cycle Text", + "pos": [ + 1471.5431674453125, + 1535.1876997187492 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "text_list", + "type": "STYLE_LIST", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 163, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Text" + }, + "widgets_values": [ + "Off", + 30, + 1, + "Sequential" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 191, + "type": "CR Cycle Text Simple", + "pos": [ + 1471.5431674453125, + 1755.1876997187492 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "text_list_simple", + "type": "TEXT_LIST_SIMPLE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": 166, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Cycle Text Simple" + }, + "widgets_values": [ + "", + "", + "", + "", + "", + "Sequential", + 30, + 1, + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 203, + "type": "CR Simple Value Scheduler", + "pos": [ + 1934.4145575758114, + 1539.07570596151 + ], + "size": { + "0": 380, + "1": 190 + }, + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 228, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Value Scheduler" + }, + "widgets_values": [ + "frame_number, value", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 207, + "type": "CR Simple Text Scheduler", + "pos": [ + 1934.4145575758114, + 1799.07570596151 + ], + "size": { + "0": 380, + "1": 200 + }, + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 229, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Text Scheduler" + }, + "widgets_values": [ + "frame_number, value", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 7, + "type": "CR Index Increment", + "pos": [ + -59.47699937965249, + 358.4312166894532 + ], + "size": { + "0": 240, + "1": 90 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": null, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "interval", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Index Increment" + }, + "widgets_values": [ + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 268, + "type": "CR Output Flow Frames", + "pos": [ + -516.1425520625003, + 531.9835967714845 + ], + "size": { + "0": 320, + "1": 146 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "current_img", + "type": "IMAGE", + "link": null + }, + { + "name": "interpolated_img", + "type": "IMAGE", + "link": null + }, + { + "name": "current_frame", + "type": "INT", + "link": null, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999999 + } + ] + } + } + ], + "properties": { + "Node name for S&R": "CR Output Flow Frames" + }, + "widgets_values": [ + "1.5 template tests", + "Sequence", + 0, + "" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 266, + "type": "CR Load Flow Frames", + "pos": [ + -516.1425520625003, + 281.9835967714843 + ], + "size": { + "0": 320, + "1": 194 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": null, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "current_img", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "previous_img", + "type": "IMAGE", + "links": null, + "shape": 3 + }, + { + "name": "current_frame", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Flow Frames" + }, + "widgets_values": [ + "FlowBack", + "Index", + 0, + 0, + "", + "*.png" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 97, + "type": "Reroute", + "pos": [ + 335.13422812533, + 842.7062050850162 + ], + "size": [ + 107.2, + 26 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 220 + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": [ + 219, + 221, + 223, + 224, + 230 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 245, + "type": "CR Keyframe List", + "pos": [ + -631.0531598311474, + 1421.1768746588546 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 20, + "mode": 0, + "outputs": [ + { + "name": "keyframe_list", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "keyframe_format", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Keyframe List" + }, + "widgets_values": [ + "keyframes", + "Deforum" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 271, + "type": "CR Prompt Scheduler", + "pos": [ + 1080, + 930 + ], + "size": { + "0": 330, + "1": 340 + }, + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 230 + }, + { + "name": "current_frame", + "type": "INT", + "link": 234, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "current_prompt", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "next_prompt", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "weight", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Scheduler" + }, + "widgets_values": [ + "Default Prompt", + 0, + "default prompt", + "CR", + "", + "keyframe list", + "prepend text", + "append text", + "append text" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 256, + "type": "CR Value Scheduler", + "pos": [ + 1460, + 930 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 223 + }, + { + "name": "current_frame", + "type": "INT", + "link": null, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Value Scheduler" + }, + "widgets_values": [ + "Default Value", + "", + "", + "", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 257, + "type": "CR Text Scheduler", + "pos": [ + 1460, + 1140 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": 224 + }, + { + "name": "current_frame", + "type": "INT", + "link": null, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Text Scheduler" + }, + "widgets_values": [ + "Default Text", + 0, + "", + "default text", + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 260, + "type": "Reroute", + "pos": [ + 1730, + 1380 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 227 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 228, + 229, + 231 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 87, + "type": "Reroute", + "pos": [ + 300, + 1380 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 167 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 160, + 161, + 162, + 163, + 165, + 166 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 269, + "type": "CR Simple Prompt Scheduler", + "pos": [ + 2362.0920513924993, + 1537.8720122265615 + ], + "size": { + "0": 370, + "1": 200 + }, + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 231, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "current_prompt", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "next_prompt", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "weight", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Prompt Scheduler" + }, + "widgets_values": [ + "frame_number, text", + 0, + "CR" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 85, + "type": "Reroute", + "pos": [ + 1890, + 700 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 189 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 156, + 157, + 158, + 159 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 95, + "type": "CR Load Animation Frames", + "pos": [ + -516.1425520625003, + 61.9835967714843 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "frames", + "type": "IMAGE", + "links": [], + "shape": 3, + "slot_index": 0 + }, + { + "name": "masks", + "type": "MASK", + "links": null, + "shape": 3 + }, + { + "name": "current_frame", + "type": "INT", + "links": [], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Load Animation Frames" + }, + "widgets_values": [ + "FlowBack", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 201, + "type": "CR Current Frame", + "pos": [ + -57.17248667578124, + 589.9743383583987 + ], + "size": { + "0": 250, + "1": 80 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": null, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 146 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 1, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 86, + "type": "Reroute", + "pos": [ + 510, + 700 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 146 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 167, + 189, + 227, + 234 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 92, + "type": "CR Prompt Text", + "pos": [ + -113.23711794591394, + 866.3292853335626 + ], + "size": { + "0": 310, + "1": 110 + }, + "flags": {}, + "order": 23, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Prompt Text" + }, + "widgets_values": [ + "prompt" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 248, + "type": "CR Output Schedule To File", + "pos": [ + 790, + 520 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "schedule", + "type": "SCHEDULE", + "link": null + } + ], + "properties": { + "Node name for S&R": "CR Output Schedule To File" + }, + "widgets_values": [ + "", + "", + "txt" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 263, + "type": "CR Load Schedule From File", + "pos": [ + 790, + 330 + ], + "size": { + "0": 315, + "1": 126 + }, + "flags": {}, + "order": 26, + "mode": 0, + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": null, + "shape": 3 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Schedule From File" + }, + "widgets_values": [ + "", + "", + "csv" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 264, + "type": "CR Combine Schedules", + "pos": [ + 800, + -112.30761668222642 + ], + "size": { + "0": 280, + "1": 90 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "schedule_1", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule_2", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule_3", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule_4", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": null, + "shape": 3 + }, + { + "name": "show_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Combine Schedules" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 258, + "type": "CR Schedule Input Switch", + "pos": [ + 800, + 30 + ], + "size": { + "0": 280, + "1": 80 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "schedule1", + "type": "SCHEDULE", + "link": null + }, + { + "name": "schedule2", + "type": "SCHEDULE", + "link": null + } + ], + "outputs": [ + { + "name": "SCHEDULE", + "type": "SCHEDULE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Schedule Input Switch" + }, + "widgets_values": [ + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 273, + "type": "CR Encode Scheduled Prompts", + "pos": [ + -110, + 1220 + ], + "size": [ + 310, + 120 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": null + }, + { + "name": "current_prompt", + "type": "STRING", + "link": null, + "widget": { + "name": "current_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "next_prompt", + "type": "STRING", + "link": null, + "widget": { + "name": "next_prompt", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + }, + { + "name": "weight", + "type": "FLOAT", + "link": null, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 0, + "min": -9999, + "max": 9999, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Encode Scheduled Prompts" + }, + "widgets_values": [ + "", + "", + 0 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 274, + "type": "CR Load Prompt Style", + "pos": [ + -110, + 1040 + ], + "size": [ + 310, + 120 + ], + "flags": {}, + "order": 29, + "mode": 0, + "outputs": [ + { + "name": "prepend_text", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "append_text", + "type": "STRING", + "links": null, + "shape": 3 + }, + { + "name": "negative_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Load Prompt Style" + }, + "widgets_values": [ + "None", + "json" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 129, + 185, + 0, + 186, + 0, + "MODEL" + ], + [ + 130, + 185, + 1, + 186, + 1, + "CLIP" + ], + [ + 146, + 201, + 0, + 86, + 0, + "*" + ], + [ + 156, + 85, + 0, + 64, + 0, + "INT" + ], + [ + 157, + 85, + 0, + 65, + 0, + "INT" + ], + [ + 158, + 85, + 0, + 84, + 0, + "INT" + ], + [ + 159, + 85, + 0, + 83, + 0, + "INT" + ], + [ + 160, + 87, + 0, + 185, + 3, + "INT" + ], + [ + 161, + 87, + 0, + 186, + 3, + "INT" + ], + [ + 162, + 87, + 0, + 171, + 1, + "INT" + ], + [ + 163, + 87, + 0, + 159, + 1, + "INT" + ], + [ + 165, + 87, + 0, + 194, + 6, + "INT" + ], + [ + 166, + 87, + 0, + 191, + 1, + "INT" + ], + [ + 167, + 86, + 0, + 87, + 0, + "*" + ], + [ + 189, + 86, + 0, + 85, + 0, + "*" + ], + [ + 213, + 240, + 0, + 239, + 0, + "SIMPLE_PROMPT_LIST" + ], + [ + 214, + 243, + 0, + 244, + 0, + "PROMPT_LIST" + ], + [ + 219, + 97, + 0, + 251, + 1, + "SCHEDULE" + ], + [ + 220, + 249, + 0, + 97, + 0, + "*" + ], + [ + 221, + 97, + 0, + 252, + 2, + "SCHEDULE" + ], + [ + 223, + 97, + 0, + 256, + 0, + "SCHEDULE" + ], + [ + 224, + 97, + 0, + 257, + 0, + "SCHEDULE" + ], + [ + 227, + 86, + 0, + 260, + 0, + "*" + ], + [ + 228, + 260, + 0, + 203, + 0, + "INT" + ], + [ + 229, + 260, + 0, + 207, + 0, + "INT" + ], + [ + 230, + 97, + 0, + 271, + 0, + "SCHEDULE" + ], + [ + 231, + 260, + 0, + 269, + 0, + "INT" + ], + [ + 234, + 86, + 0, + 271, + 1, + "INT" + ] + ], + "groups": [ + { + "title": "Schedulers", + "bounding": [ + 291, + 766, + 1535, + 576 + ], + "color": "#8AA", + "font_size": 24, + "locked": false + }, + { + "title": "Index", + "bounding": [ + -109, + -40, + 342, + 763 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Interpolations", + "bounding": [ + 1886, + 766, + 799, + 523 + ], + "color": "#8AA", + "font_size": 24, + "locked": false + }, + { + "title": "Cyclers", + "bounding": [ + 290, + 1441, + 1540, + 598 + ], + "color": "#8AA", + "font_size": 24, + "locked": false + }, + { + "title": "Lists", + "bounding": [ + 1190, + -158, + 1122, + 827 + ], + "color": "#a1309b", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt Keyframes", + "bounding": [ + -1219, + 768, + 1031, + 1181 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + }, + { + "title": "Schedules", + "bounding": [ + 290, + -224, + 858, + 893 + ], + "color": "#a1309b", + "font_size": 24, + "locked": false + }, + { + "title": "Utils", + "bounding": [ + -951, + 383, + 355, + 342 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "IO", + "bounding": [ + -556, + -41, + 409, + 765 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Interpolation", + "bounding": [ + 2363, + 433, + 383, + 234 + ], + "color": "#3f789e", + "font_size": 24, + "locked": false + }, + { + "title": "Simple Schedulers", + "bounding": [ + 1890, + 1438, + 878, + 600 + ], + "color": "#8AA", + "font_size": 24, + "locked": false + }, + { + "title": "Prompt", + "bounding": [ + -146, + 769, + 378, + 603 + ], + "color": "#8A8", + "font_size": 24, + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Aspect Ratio/CR_Aspect_Ratio_demo_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Aspect Ratio/CR_Aspect_Ratio_demo_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..43ed3bed9c2d5fe309feb6111b2583eeaae522b6 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Aspect Ratio/CR_Aspect_Ratio_demo_v01.json @@ -0,0 +1,394 @@ +{ + "last_node_id": 7, + "last_link_id": 9, + "nodes": [ + { + "id": 2, + "type": "VAEDecode", + "pos": [ + 1300, + 560 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 1 + }, + { + "name": "vae", + "type": "VAE", + "link": 3 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 5, + "type": "VAEDecode", + "pos": [ + 1300, + 290 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 5 + }, + { + "name": "vae", + "type": "VAE", + "link": 6 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 7, + "type": "EmptyLatentImage", + "pos": [ + 1010, + 290 + ], + "size": [ + 240, + 110 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 7, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 8, + "widget": { + "name": "height" + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 9, + "widget": { + "name": "batch_size" + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 6, + "type": "PreviewImage", + "pos": [ + 1560, + 290 + ], + "size": [ + 260, + 200 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 4 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 3, + "type": "PreviewImage", + "pos": [ + 1560, + 560 + ], + "size": [ + 260, + 200 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 2 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 1, + "type": "CR Aspect Ratio", + "pos": [ + 580, + 410 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "prescale_factor", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 4 + }, + { + "name": "empty_latent", + "type": "LATENT", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 5 + }, + { + "name": "show_help", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Aspect Ratio" + }, + "widgets_values": [ + 1024, + 1024, + "SDXL - 3:4 portrait 896x1152", + "Off", + 1, + 1, + 2 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 4, + "type": "VAELoader", + "pos": [ + 580, + 260 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 3, + 6 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae_fixed.safetensors" + ] + } + ], + "links": [ + [ + 1, + 1, + 5, + 2, + 0, + "LATENT" + ], + [ + 2, + 2, + 0, + 3, + 0, + "IMAGE" + ], + [ + 3, + 4, + 0, + 2, + 1, + "VAE" + ], + [ + 4, + 5, + 0, + 6, + 0, + "IMAGE" + ], + [ + 5, + 7, + 0, + 5, + 0, + "LATENT" + ], + [ + 6, + 4, + 0, + 5, + 1, + "VAE" + ], + [ + 7, + 1, + 0, + 7, + 0, + "INT" + ], + [ + 8, + 1, + 1, + 7, + 1, + "INT" + ], + [ + 9, + 1, + 4, + 7, + 2, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Pattern/CR_Gradient_Nodes_v1.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Pattern/CR_Gradient_Nodes_v1.json new file mode 100644 index 0000000000000000000000000000000000000000..f8856b35b0e3a752817bf625363f5e0af79cc7de --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Pattern/CR_Gradient_Nodes_v1.json @@ -0,0 +1,1586 @@ +{ + "last_node_id": 75, + "last_link_id": 136, + "nodes": [ + { + "id": 40, + "type": "PreviewImage", + "pos": [ + 1700, + 400 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 117 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 42, + "type": "PreviewImage", + "pos": [ + 2480, + 90 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 120 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 57, + "type": "PreviewImage", + "pos": [ + 2480, + 400 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 127 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 38, + "type": "PreviewImage", + "pos": [ + 2480, + 710 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 116 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 44, + "type": "PreviewImage", + "pos": [ + 2480, + 1330 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 121 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 59, + "type": "PreviewImage", + "pos": [ + 2480, + 1020 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 128 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 34, + "type": "CR Style Bars", + "pos": [ + 2110, + 710 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 116 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Style Bars" + }, + "widgets_values": [ + "sin wave", + 512, + 512, + "plasma", + "vertical", + 3 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 58, + "type": "CR Style Bars", + "pos": [ + 2110, + 1020 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 128 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Style Bars" + }, + "widgets_values": [ + "sin wave", + 512, + 512, + "ocean", + "horizontal", + 3 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 61, + "type": "PreviewImage", + "pos": [ + 2480, + 1650 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 129 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 43, + "type": "CR Style Bars", + "pos": [ + 2110, + 1330 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 121 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Style Bars" + }, + "widgets_values": [ + "gradient bars", + 512, + 512, + "Pastel1", + "vertical", + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 60, + "type": "CR Style Bars", + "pos": [ + 2110, + 1650 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 129 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Style Bars" + }, + "widgets_values": [ + "gradient bars", + 512, + 512, + "magma", + "horizontal", + 2 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 56, + "type": "CR Style Bars", + "pos": [ + 2110, + 400 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 127 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Style Bars" + }, + "widgets_values": [ + "color bars", + 512, + 512, + "cubehelix", + "horizontal", + 5 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 41, + "type": "CR Style Bars", + "pos": [ + 2110, + 90 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 120 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Style Bars" + }, + "widgets_values": [ + "color bars", + 512, + 512, + "Accent", + "vertical", + 4 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 37, + "type": "PreviewImage", + "pos": [ + 900, + 90 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 119 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 50, + "type": "PreviewImage", + "pos": [ + 900, + 420 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 123 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 52, + "type": "PreviewImage", + "pos": [ + 900, + 760 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 124 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 72, + "type": "PreviewImage", + "pos": [ + 900, + 1100 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 134 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 68, + "type": "PreviewImage", + "pos": [ + 900, + 1490 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 132 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 64, + "type": "PreviewImage", + "pos": [ + 120, + 530 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 130 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 48, + "type": "PreviewImage", + "pos": [ + 1700, + 1660 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 136 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 39, + "type": "PreviewImage", + "pos": [ + 1700, + 940 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 118 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 73, + "type": "CR Starburst Colors", + "pos": [ + -250, + 1310 + ], + "size": { + "0": 315, + "1": 298 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 135 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Starburst Colors" + }, + "widgets_values": [ + 512, + 512, + 14, + "fuchsia", + "lightgray", + 0, + 0, + 15, + 2, + "#0011AA", + "#007711" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 62, + "type": "CR Starburst Lines", + "pos": [ + -250, + 530 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 130 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Starburst Lines" + }, + "widgets_values": [ + 511, + 512, + 6, + 256, + 5, + "orange", + "green", + 0, + 0, + 30, + "#00FF33", + "#0033AA" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 49, + "type": "CR Color Bars", + "pos": [ + 530, + 420 + ], + "size": { + "0": 315, + "1": 274 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 123 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Color Bars" + }, + "widgets_values": [ + "2-color", + 512, + 512, + "black", + "red", + "horizontal", + 3, + 0, + "#000000", + "#000000" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 69, + "type": "PreviewImage", + "pos": [ + 900, + 1880 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 133 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 65, + "type": "PreviewImage", + "pos": [ + 120, + 930 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 131 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 74, + "type": "PreviewImage", + "pos": [ + 120, + 1310 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 135 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 2, + "type": "PreviewImage", + "pos": [ + 120, + 90 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 115 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 46, + "type": "PreviewImage", + "pos": [ + 1700, + 90 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 122 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 54, + "type": "PreviewImage", + "pos": [ + 1700, + 1270 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 126 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 63, + "type": "CR Starburst Colors", + "pos": [ + -250, + 930 + ], + "size": { + "0": 310, + "1": 300 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 131 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Starburst Colors" + }, + "widgets_values": [ + 512, + 512, + 14, + "navy", + "yellow", + 0, + 0, + 15, + 1, + "#440000", + "#003377" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 32, + "type": "CR Halftone Grid", + "pos": [ + -240, + 90 + ], + "size": { + "0": 310, + "1": 250 + }, + "flags": {}, + "order": 10, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 115 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Halftone Grid" + }, + "widgets_values": [ + 512, + 512, + "plasma", + "No", + 20, + "black", + 0, + 0, + "#001133" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 33, + "type": "CR Color Bars", + "pos": [ + 530, + 90 + ], + "size": { + "0": 315, + "1": 274 + }, + "flags": {}, + "order": 11, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 119 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Color Bars" + }, + "widgets_values": [ + "2-color", + 512, + 512, + "yellow", + "pink", + "vertical", + 5, + 0, + "#220077", + "#880000" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 51, + "type": "CR Color Bars", + "pos": [ + 530, + 760 + ], + "size": { + "0": 315, + "1": 274 + }, + "flags": {}, + "order": 12, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 124 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Color Bars" + }, + "widgets_values": [ + "2-color", + 512, + 512, + "orange", + "white", + "alt_diagonal", + 10, + 0, + "#00BB00", + "#0000CC" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 71, + "type": "CR Color Bars", + "pos": [ + 530, + 1100 + ], + "size": { + "0": 315, + "1": 274 + }, + "flags": {}, + "order": 13, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 134 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Color Bars" + }, + "widgets_values": [ + "2-color", + 512, + 512, + "navy", + "black", + "diagonal", + 10, + 0, + "#550000", + "#007700" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 66, + "type": "CR Polygons", + "pos": [ + 530, + 1490 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 14, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 132 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Polygons" + }, + "widgets_values": [ + "hexagons", + 512, + 512, + 5, + 5, + "yellow", + "white", + "black", + 5, + "#55BB00", + "#335500", + "#003344" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 67, + "type": "CR Polygons", + "pos": [ + 530, + 1880 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 15, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 133 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Polygons" + }, + "widgets_values": [ + "triangles", + 512, + 512, + 5, + 5, + "maroon", + "blue", + "lavender", + 9, + "#770099", + "#770000", + "#006699" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 75, + "type": "CR Radial Gradient", + "pos": [ + 1310, + 1660 + ], + "size": { + "0": 315, + "1": 274 + }, + "flags": {}, + "order": 16, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 136 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Radial Gradient" + }, + "widgets_values": [ + 512, + 512, + "orange", + "purple", + 1, + 0.5, + 0.5, + "#6600FF", + "#004400" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 53, + "type": "CR Color Gradient", + "pos": [ + 1310, + 1270 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 17, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 126 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Color Gradient" + }, + "widgets_values": [ + 512, + 512, + "green", + "red", + 0.75, + 0.25, + "horizontal", + "#770000", + "#008800" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 36, + "type": "CR Color Gradient", + "pos": [ + 1310, + 940 + ], + "size": { + "0": 310, + "1": 250 + }, + "flags": {}, + "order": 18, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 118 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Color Gradient" + }, + "widgets_values": [ + 512, + 512, + "yellow", + "blue", + 0.5, + 0.5, + "vertical", + "#5555000", + "#004444" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 35, + "type": "CR Checker Pattern", + "pos": [ + 1310, + 400 + ], + "size": { + "0": 315, + "1": 250 + }, + "flags": {}, + "order": 19, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 117 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Checker Pattern" + }, + "widgets_values": [ + "stepped", + 512, + 512, + "aqua", + "fuchsia", + 8, + 3, + "#004400", + "#000099" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 45, + "type": "CR Checker Pattern", + "pos": [ + 1310, + 90 + ], + "size": { + "0": 315, + "1": 250 + }, + "flags": {}, + "order": 20, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 122 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Checker Pattern" + }, + "widgets_values": [ + "regular", + 512, + 512, + "black", + "white", + 8, + 2, + "#0044BB", + "#007788" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 115, + 32, + 0, + 2, + 0, + "IMAGE" + ], + [ + 116, + 34, + 0, + 38, + 0, + "IMAGE" + ], + [ + 117, + 35, + 0, + 40, + 0, + "IMAGE" + ], + [ + 118, + 36, + 0, + 39, + 0, + "IMAGE" + ], + [ + 119, + 33, + 0, + 37, + 0, + "IMAGE" + ], + [ + 120, + 41, + 0, + 42, + 0, + "IMAGE" + ], + [ + 121, + 43, + 0, + 44, + 0, + "IMAGE" + ], + [ + 122, + 45, + 0, + 46, + 0, + "IMAGE" + ], + [ + 123, + 49, + 0, + 50, + 0, + "IMAGE" + ], + [ + 124, + 51, + 0, + 52, + 0, + "IMAGE" + ], + [ + 126, + 53, + 0, + 54, + 0, + "IMAGE" + ], + [ + 127, + 56, + 0, + 57, + 0, + "IMAGE" + ], + [ + 128, + 58, + 0, + 59, + 0, + "IMAGE" + ], + [ + 129, + 60, + 0, + 61, + 0, + "IMAGE" + ], + [ + 130, + 62, + 0, + 64, + 0, + "IMAGE" + ], + [ + 131, + 63, + 0, + 65, + 0, + "IMAGE" + ], + [ + 132, + 66, + 0, + 68, + 0, + "IMAGE" + ], + [ + 133, + 67, + 0, + 69, + 0, + "IMAGE" + ], + [ + 134, + 71, + 0, + 72, + 0, + "IMAGE" + ], + [ + 135, + 73, + 0, + 74, + 0, + "IMAGE" + ], + [ + 136, + 75, + 0, + 48, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Pattern/CR_Starburst_Lines_demo_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Pattern/CR_Starburst_Lines_demo_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..508d5de0c255c6baa31a8fd54df81e4eb1d96823 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Pattern/CR_Starburst_Lines_demo_v01.json @@ -0,0 +1,198 @@ +{ + "last_node_id": 9, + "last_link_id": 7, + "nodes": [ + { + "id": 2, + "type": "PreviewImage", + "pos": [ + 1190, + 620 + ], + "size": { + "0": 320, + "1": 250 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 1 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 5, + "type": "PrimitiveNode", + "pos": [ + 270, + 620 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 6 + ], + "slot_index": 0, + "widget": { + "name": "a" + } + } + ], + "properties": {}, + "widgets_values": [ + 180.5, + "increment" + ] + }, + { + "id": 9, + "type": "JWFloatMul", + "pos": [ + 540, + 620 + ], + "size": [ + 210, + 70 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "FLOAT", + "link": 6, + "widget": { + "name": "a" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "JWFloatMul" + }, + "widgets_values": [ + 180.5, + 20 + ] + }, + { + "id": 1, + "type": "CR Starburst Lines", + "pos": [ + 820, + 620 + ], + "size": [ + 320, + 340 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "rotation", + "type": "FLOAT", + "link": 7, + "widget": { + "name": "rotation" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Starburst Lines" + }, + "widgets_values": [ + 511, + 512, + 6, + 2, + 50, + "blue", + "yellow", + 0, + 0, + 60, + "#00FF33", + "#0033AA" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 1, + 1, + 0, + 2, + 0, + "IMAGE" + ], + [ + 6, + 5, + 0, + 9, + 0, + "FLOAT" + ], + [ + 7, + 9, + 0, + 1, + 0, + "FLOAT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo1_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo1_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..2a6e64d26c2a8b3b3a50561e7382b37522890186 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo1_v01.json @@ -0,0 +1,149 @@ +{ + "last_node_id": 60, + "last_link_id": 65, + "nodes": [ + { + "id": 9, + "type": "LoadImage", + "pos": [ + 140, + 0 + ], + "size": [ + 320, + 310 + ], + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 22 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL_00006_.png", + "image" + ] + }, + { + "id": 35, + "type": "SaveImage", + "pos": [ + 150, + 760 + ], + "size": { + "0": 320, + "1": 270 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 37 + } + ], + "properties": {}, + "widgets_values": [ + "CR" + ] + }, + { + "id": 21, + "type": "CR Simple Meme Template", + "pos": [ + 130, + 390 + ], + "size": { + "0": 400, + "1": 314 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 22 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Simple Meme Template" + }, + "widgets_values": [ + "custom", + "One Does Not Simply", + "MEME IN COMFY", + "impact.ttf", + 150, + "white", + "thick", + "white", + "no bars" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 22, + 9, + 0, + 21, + 0, + "IMAGE" + ], + [ + 37, + 21, + 0, + 35, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo2_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo2_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..8f539d00beba16dedfabb81f7227558ff893ff0f --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo2_v01.json @@ -0,0 +1,606 @@ +{ + "last_node_id": 59, + "last_link_id": 58, + "nodes": [ + { + "id": 35, + "type": "SaveImage", + "pos": [ + 130, + 770 + ], + "size": { + "0": 320, + "1": 270 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 37 + } + ], + "properties": {}, + "widgets_values": [ + "CR" + ] + }, + { + "id": 49, + "type": "SaveImage", + "pos": [ + 580, + 770 + ], + "size": { + "0": 320, + "1": 270 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 51 + } + ], + "properties": {}, + "widgets_values": [ + "CR" + ] + }, + { + "id": 52, + "type": "SaveImage", + "pos": [ + 1030, + 770 + ], + "size": { + "0": 320, + "1": 270 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 53 + } + ], + "properties": {}, + "widgets_values": [ + "CR" + ] + }, + { + "id": 47, + "type": "LoadImage", + "pos": [ + 590, + 0 + ], + "size": [ + 320, + 310 + ], + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00108_ (1).png", + "image" + ] + }, + { + "id": 55, + "type": "SaveImage", + "pos": [ + 1480, + 770 + ], + "size": { + "0": 320, + "1": 270 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 55 + } + ], + "properties": {}, + "widgets_values": [ + "CR" + ] + }, + { + "id": 53, + "type": "LoadImage", + "pos": [ + 1490, + 0 + ], + "size": [ + 320, + 310 + ], + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 54 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00108_ (1).png", + "image" + ] + }, + { + "id": 50, + "type": "LoadImage", + "pos": [ + 1040, + 0 + ], + "size": [ + 320, + 310 + ], + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00108_ (1).png", + "image" + ] + }, + { + "id": 59, + "type": "ShowText|pysssss", + "pos": [ + 1920, + 410 + ], + "size": { + "0": 430, + "1": 170 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 58, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "Help:\n \n The two text entry boxes are for the top and bottom text.\n these can be added either on a color bar or as an overlay.\n Both top and bottom text are optional.\n \n Only the first two lines will be used for top and bottom text.\n If you enter more than two lines any additional lines will be ignored.\n \n If you enter both top and bottom text and select a single bar (top or bottom),\n then one of texts will be ouput as overlay text.\n \n If you enter both top and bottom text and select no bars,\n then both texts will be ouput as overlay text." + ] + }, + { + "id": 9, + "type": "LoadImage", + "pos": [ + 140, + 0 + ], + "size": [ + 320, + 310 + ], + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 22 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00108_ (1).png", + "image" + ] + }, + { + "id": 21, + "type": "CR Simple Meme Template", + "pos": [ + 130, + 390 + ], + "size": { + "0": 400, + "1": 314 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 22 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Meme Template" + }, + "widgets_values": [ + "custom", + "Make A Meme Make A Meme Make A Meme\nMake A Meme Make A Meme Make A Meme", + "impact.ttf", + "impact.ttf", + 150, + "white", + "thick", + "black", + "no bars" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 48, + "type": "CR Simple Meme Template", + "pos": [ + 580, + 390 + ], + "size": { + "0": 400, + "1": 314 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 50 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 51 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Meme Template" + }, + "widgets_values": [ + "custom", + "Make A Meme", + "impact.ttf", + "impact.ttf", + 150, + "white", + "thick", + "black", + "top" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 51, + "type": "CR Simple Meme Template", + "pos": [ + 1030, + 390 + ], + "size": { + "0": 400, + "1": 314 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 52 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Meme Template" + }, + "widgets_values": [ + "custom", + "Make A Meme", + "impact.ttf", + "impact.ttf", + 150, + "white", + "thick", + "black", + "bottom" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 54, + "type": "CR Simple Meme Template", + "pos": [ + 1480, + 390 + ], + "size": { + "0": 400, + "1": 314 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 54 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 55 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": [ + 58 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CR Simple Meme Template" + }, + "widgets_values": [ + "custom", + "Make A Meme", + "impact.ttf", + "impact.ttf", + 150, + "white", + "thick", + "black", + "top and bottom" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 22, + 9, + 0, + 21, + 0, + "IMAGE" + ], + [ + 37, + 21, + 0, + 35, + 0, + "IMAGE" + ], + [ + 50, + 47, + 0, + 48, + 0, + "IMAGE" + ], + [ + 51, + 48, + 0, + 49, + 0, + "IMAGE" + ], + [ + 52, + 50, + 0, + 51, + 0, + "IMAGE" + ], + [ + 53, + 51, + 0, + 52, + 0, + "IMAGE" + ], + [ + 54, + 53, + 0, + 54, + 0, + "IMAGE" + ], + [ + 55, + 54, + 0, + 55, + 0, + "IMAGE" + ], + [ + 58, + 54, + 1, + 59, + 0, + "STRING" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo3_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo3_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..1f25343283e84eb494ef7745ccfb4b1269c7930a --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Graphics/Template/CR_Simple_Meme_Template_Demo3_v01.json @@ -0,0 +1,302 @@ +{ + "last_node_id": 18, + "last_link_id": 27, + "nodes": [ + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 790, + 30 + ], + "size": { + "0": 270, + "1": 360 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 23 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 17, + "type": "CR Simple Meme Template", + "pos": [ + 310, + 30 + ], + "size": { + "0": 400, + "1": 362 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 24 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "show_help", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CR Simple Meme Template" + }, + "widgets_values": [ + "One Does Not Simply ... MEME IN COMFY", + "text_top", + "text_bottom", + "impact.ttf", + 150, + "white", + "none", + "black", + "top and bottom", + "#000000", + "#000000" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 18, + "type": "ImpactMakeImageBatch", + "pos": [ + 40, + 30 + ], + "size": [ + 180, + 90 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 25 + }, + { + "name": "image2", + "type": "IMAGE", + "link": 26 + }, + { + "name": "image3", + "type": "IMAGE", + "link": 27 + }, + { + "name": "image4", + "type": "IMAGE", + "link": null + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImpactMakeImageBatch" + } + }, + { + "id": 4, + "type": "LoadImage", + "pos": [ + -750, + 10 + ], + "size": { + "0": 210, + "1": 360 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00007_.png", + "image" + ] + }, + { + "id": 9, + "type": "LoadImage", + "pos": [ + -500, + 10 + ], + "size": [ + 210, + 360 + ], + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 26 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00008_.png", + "image" + ] + }, + { + "id": 13, + "type": "LoadImage", + "pos": [ + -250, + 10 + ], + "size": [ + 210, + 360 + ], + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 27 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "SDXL10__00009_ (1).png", + "image" + ] + } + ], + "links": [ + [ + 23, + 17, + 0, + 16, + 0, + "IMAGE" + ], + [ + 24, + 18, + 0, + 17, + 0, + "IMAGE" + ], + [ + 25, + 4, + 0, + 18, + 0, + "IMAGE" + ], + [ + 26, + 9, + 0, + 18, + 1, + "IMAGE" + ], + [ + 27, + 13, + 0, + 18, + 2, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SDXL_MultiModelGradientMerge_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SDXL_MultiModelGradientMerge_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..2cb756e80f834242825c1e40760001738b9729d1 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SDXL_MultiModelGradientMerge_v01b.json @@ -0,0 +1,3072 @@ +{ + "last_node_id": 243, + "last_link_id": 588, + "nodes": [ + { + "id": 125, + "type": "UpscaleModelLoader", + "pos": [ + 2271.464720934434, + -42.51139164477244 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "UPSCALE_MODEL", + "type": "UPSCALE_MODEL", + "links": [ + 363 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "UpscaleModelLoader" + }, + "widgets_values": [ + "RealESRGAN_x2.pth" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 177, + "type": "Note", + "pos": [ + -507.1336667459445, + 828.9509770334472 + ], + "size": { + "0": 315.0569152832031, + "1": 64.73121643066406 + }, + "flags": {}, + "order": 1, + "mode": 0, + "title": "Save Model", + "properties": { + "text": "" + }, + "widgets_values": [ + "To unhide this node, right click then Mode > Always\n\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 174, + "type": "VAELoader", + "pos": [ + 68.82123096767066, + 238.46804881065552 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 503 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 205, + "type": "CheckpointLoaderSimple", + "pos": [ + 630, + -50 + ], + "size": { + "0": 458, + "1": 132 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 538 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 531 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null + } + ], + "title": "XL Refiner Model", + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SDXL\\sd_xl_refiner_1.0.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 206, + "type": "SeargePromptText", + "pos": [ + 671.3846854124997, + 386.538436474219 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 532, + 533, + 534 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SeargePromptText" + }, + "widgets_values": [ + "A vibrant 20-year-old woman with a free-spirited style, strolling gracefully through a picturesque meadow adorned with a myriad of colorful flowers. She radiates the essence of summer, her flowing dress and long, sun-kissed hair adding to the natural beauty of the scene" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 207, + "type": "SeargePromptText", + "pos": [ + 671.3846854124997, + 666.5384364742179 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 535, + 536, + 537 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SeargePromptText" + }, + "widgets_values": [ + "" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 208, + "type": "SeargeSamplerInputs", + "pos": [ + 1641.0815801410135, + 62.855455210877295 + ], + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "sampler_name", + "type": "SAMPLER_NAME", + "links": [ + 528 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "scheduler", + "type": "SCHEDULER_NAME", + "links": [ + 529 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SeargeSamplerInputs" + }, + "widgets_values": [ + "dpmpp_2m", + "karras" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 160, + "type": "ModelMergeSimple", + "pos": [ + -1318.4105971698255, + 476.88168464943556 + ], + "size": { + "0": 230, + "1": 80 + }, + "flags": { + "collapsed": false + }, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "model1", + "type": "MODEL", + "link": 575 + }, + { + "name": "model2", + "type": "MODEL", + "link": 574 + }, + { + "name": "ratio", + "type": "FLOAT", + "link": 566, + "widget": { + "name": "ratio", + "config": [ + "FLOAT", + { + "default": 1, + "min": 0, + "max": 1, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 464 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Model Merge", + "properties": { + "Node name for S&R": "ModelMergeSimple" + }, + "widgets_values": [ + 0.5 + ] + }, + { + "id": 172, + "type": "CR LoRA Stack", + "pos": [ + -972.1116192382817, + 468.55508251673456 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": 461 + } + ], + "outputs": [ + { + "name": "LORA_STACK", + "type": "LORA_STACK", + "links": [ + 462 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL LoRA Stack 2", + "properties": { + "Node name for S&R": "CR LoRA Stack" + }, + "widgets_values": [ + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 183, + "type": "Reroute", + "pos": [ + 1430, + 100 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 489 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 547 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 188, + "type": "Reroute", + "pos": [ + 510, + 170 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 503 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 504 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 209, + "type": "Reroute", + "pos": [ + 1430, + 250 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 538 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 524 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 95, + "type": "EmptyLatentImage", + "pos": [ + 530, + -160 + ], + "size": { + "0": 210, + "1": 74 + }, + "flags": { + "collapsed": true + }, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 540, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 541, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 542, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 549 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 161, + "type": "CLIPMergeSimple", + "pos": [ + -1320.2519182689223, + 609.9827772677869 + ], + "size": { + "0": 230, + "1": 80 + }, + "flags": { + "collapsed": false + }, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "clip1", + "type": "CLIP", + "link": 555 + }, + { + "name": "clip2", + "type": "CLIP", + "link": 556 + }, + { + "name": "ratio", + "type": "FLOAT", + "link": 567, + "widget": { + "name": "ratio", + "config": [ + "FLOAT", + { + "default": 1, + "min": 0, + "max": 1, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 463 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "CLIP Merge", + "properties": { + "Node name for S&R": "CLIPMergeSimple" + }, + "widgets_values": [ + 0.5 + ] + }, + { + "id": 212, + "type": "Reroute", + "pos": [ + 1150, + -190 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 549 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 550 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 211, + "type": "Reroute", + "pos": [ + 1430, + 140 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 550 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 546 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 185, + "type": "Reroute", + "pos": [ + -220, + 410 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 494 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 551 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 167, + "type": "CheckpointSave", + "pos": [ + -505.1336667459445, + 656.9509770334475 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 40, + "mode": 2, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 468 + }, + { + "name": "clip", + "type": "CLIP", + "link": 467 + }, + { + "name": "vae", + "type": "VAE", + "link": 498 + } + ], + "title": "Checkpoint Save", + "properties": { + "Node name for S&R": "CheckpointSave" + }, + "widgets_values": [ + "checkpoints/MyModel" + ] + }, + { + "id": 184, + "type": "Reroute", + "pos": [ + -220, + 470 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 495 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 552 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 197, + "type": "Reroute", + "pos": [ + 480, + 410 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 551 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 543 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 196, + "type": "Reroute", + "pos": [ + 480, + 470 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 552 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 530 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 189, + "type": "Reroute", + "pos": [ + 1430, + 210 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 543 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 521 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 204, + "type": "SeargeSDXLPromptEncoder", + "pos": [ + 1230, + 390 + ], + "size": { + "0": 311.32244873046875, + "1": 415.3662414550781 + }, + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "base_clip", + "type": "CLIP", + "link": 530 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "link": 531 + }, + { + "name": "pos_g", + "type": "STRING", + "link": 532, + "widget": { + "name": "pos_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "POS_G" + } + ] + } + }, + { + "name": "pos_l", + "type": "STRING", + "link": 533, + "widget": { + "name": "pos_l", + "config": [ + "STRING", + { + "multiline": true, + "default": "POS_L" + } + ] + } + }, + { + "name": "pos_r", + "type": "STRING", + "link": 534, + "widget": { + "name": "pos_r", + "config": [ + "STRING", + { + "multiline": true, + "default": "POS_R" + } + ] + } + }, + { + "name": "neg_g", + "type": "STRING", + "link": 535, + "widget": { + "name": "neg_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "NEG_G" + } + ] + } + }, + { + "name": "neg_l", + "type": "STRING", + "link": 536, + "widget": { + "name": "neg_l", + "config": [ + "STRING", + { + "multiline": true, + "default": "NEG_L" + } + ] + } + }, + { + "name": "neg_r", + "type": "STRING", + "link": 537, + "widget": { + "name": "neg_r", + "config": [ + "STRING", + { + "multiline": true, + "default": "NEG_R" + } + ] + } + } + ], + "outputs": [ + { + "name": "base_positive", + "type": "CONDITIONING", + "links": [ + 522 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "base_negative", + "type": "CONDITIONING", + "links": [ + 523 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "links": [ + 525 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "links": [ + 526 + ], + "shape": 3, + "slot_index": 3 + } + ], + "title": "XL Prompt Encoder", + "properties": { + "Node name for S&R": "SeargeSDXLPromptEncoder" + }, + "widgets_values": [ + "POS_G", + "POS_L", + "POS_R", + "NEG_G", + "NEG_L", + "NEG_R", + 4096, + 4096, + 0, + 0, + 4096, + 4096, + 6, + 2.5, + 2048, + 2048 + ] + }, + { + "id": 118, + "type": "VAEDecode", + "pos": [ + 2030, + 200 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 548 + }, + { + "name": "vae", + "type": "VAE", + "link": 504 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 341, + 364 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 123, + "type": "PreviewImage", + "pos": [ + 2240, + 210 + ], + "size": { + "0": 520, + "1": 800 + }, + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 341 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 137, + "type": "ImageUpscaleWithModel", + "pos": [ + 2271.464720934434, + 67.48860835522774 + ], + "size": { + "0": 241.79998779296875, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "upscale_model", + "type": "UPSCALE_MODEL", + "link": 363 + }, + { + "name": "image", + "type": "IMAGE", + "link": 364 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 423 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageUpscaleWithModel" + } + }, + { + "id": 157, + "type": "Image Levels Adjustment", + "pos": [ + 2621.464720934434, + -42.51139164477244 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": { + "collapsed": false + }, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 423 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 424 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Levels Adjustment" + }, + "widgets_values": [ + 0, + 127.5, + 255 + ] + }, + { + "id": 135, + "type": "SaveImage", + "pos": [ + 2810, + 180 + ], + "size": { + "0": 520, + "1": 830 + }, + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 424, + "slot_index": 0 + } + ], + "properties": {}, + "widgets_values": [ + "Merge/Merge" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 173, + "type": "CR Apply LoRA Stack", + "pos": [ + -550, + 420 + ], + "size": { + "0": 210, + "1": 66 + }, + "flags": { + "collapsed": false + }, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 464 + }, + { + "name": "clip", + "type": "CLIP", + "link": 463 + }, + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": 462 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 468, + 494 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 467, + 495 + ], + "shape": 3, + "slot_index": 1 + } + ], + "title": "Apply LoRA Stack", + "properties": { + "Node name for S&R": "CR Apply LoRA Stack" + } + }, + { + "id": 210, + "type": "CR SDXL Aspect Ratio", + "pos": [ + 70, + -190 + ], + "size": { + "0": 315, + "1": 238 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 540 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 541 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 542 + ], + "shape": 3, + "slot_index": 3 + } + ], + "title": "SDXL Aspect Ratio", + "properties": { + "Node name for S&R": "CR SDXL Aspect Ratio" + }, + "widgets_values": [ + 1024, + 1024, + "3:4 portrait 896x1152", + "Off", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 181, + "type": "CR Seed", + "pos": [ + 68.82123096767066, + 98.46804881065545 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "seed", + "type": "INT", + "links": [ + 489 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Seed", + "properties": { + "Node name for S&R": "CR Seed" + }, + "widgets_values": [ + 0, + "fixed" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 219, + "type": "CR Gradient Float", + "pos": [ + -1340, + -80 + ], + "size": { + "0": 250, + "1": 154 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 564, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 567, + 568 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "CLIP Gradient", + "properties": { + "Node name for S&R": "CR Gradient Float" + }, + "widgets_values": [ + 1, + 0, + 0, + 10, + 0, + "Lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 225, + "type": "CR Float To String", + "pos": [ + -1640, + 130 + ], + "size": { + "0": 220, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 571, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 570 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 224, + "type": "ShowText|pysssss", + "pos": [ + -1650, + 200 + ], + "size": { + "0": 220, + "1": 80 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 570, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "0.7" + ] + }, + { + "id": 222, + "type": "CR Float To String", + "pos": [ + -1330, + 130 + ], + "size": { + "0": 220, + "1": 60 + }, + "flags": { + "collapsed": true + }, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "float_", + "type": "FLOAT", + "link": 568, + "widget": { + "name": "float_", + "config": [ + "FLOAT", + { + "default": 0, + "min": 0, + "max": 1000000 + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 569 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Float To String" + }, + "widgets_values": [ + 0 + ] + }, + { + "id": 218, + "type": "CR Current Frame", + "pos": [ + -1330, + -160 + ], + "size": { + "0": 250, + "1": 80 + }, + "flags": { + "collapsed": true + }, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "index", + "type": "INT", + "link": 563, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "index", + "type": "INT", + "links": [ + 564, + 565 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Current Frame" + }, + "widgets_values": [ + 3, + "Yes" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 221, + "type": "ShowText|pysssss", + "pos": [ + -1330, + 200 + ], + "size": { + "0": 220, + "1": 80 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 569, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "forceInput": true + } + ] + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "0.7" + ] + }, + { + "id": 186, + "type": "VAELoader", + "pos": [ + -540, + 260 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 498 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae_fixed.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 162, + "type": "CLIPSetLastLayer", + "pos": [ + -1660, + 660 + ], + "size": { + "0": 220, + "1": 60 + }, + "flags": { + "pinned": false, + "collapsed": false + }, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 577 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 556 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPSetLastLayer" + }, + "widgets_values": [ + -1 + ] + }, + { + "id": 2, + "type": "CLIPSetLastLayer", + "pos": [ + -1660, + 550 + ], + "size": { + "0": 220, + "1": 60 + }, + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 576 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 555 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPSetLastLayer" + }, + "widgets_values": [ + -1 + ] + }, + { + "id": 73, + "type": "Note", + "pos": [ + -2520, + -280 + ], + "size": { + "0": 530, + "1": 150 + }, + "flags": {}, + "order": 10, + "mode": 0, + "title": "Workbook Details", + "properties": { + "text": "" + }, + "widgets_values": [ + "Workflow\nhttps://civitai.com/models/123125\n\nRequires CR Animation Nodes\nhttps://civitai.com/models/137333/comfyui-cr-animation-nodes\n\nSetember 2023\nAkatsuzi\n\n\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 217, + "type": "PrimitiveNode", + "pos": [ + -1650, + -230 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 11, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 563 + ], + "slot_index": 0, + "widget": { + "name": "index", + "config": [ + "INT", + { + "default": 1, + "min": -10000, + "max": 10000 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 4, + "increment" + ] + }, + { + "id": 229, + "type": "CR Apply Model Merge", + "pos": [ + -2120, + 70 + ], + "size": { + "0": 330, + "1": 146 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 587 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 575 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 576 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "model_mix_info", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "title": "Apply Model Merge 1", + "properties": { + "Node name for S&R": "CR Apply Model Merge" + }, + "widgets_values": [ + "Recursive", + "Yes", + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 171, + "type": "CR LoRA Stack", + "pos": [ + -971.3108651981408, + 96.30713404293414 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "LORA_STACK", + "type": "LORA_STACK", + "links": [ + 461 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL LoRA Stack 1", + "properties": { + "Node name for S&R": "CR LoRA Stack" + }, + "widgets_values": [ + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 226, + "type": "CR Apply Model Merge", + "pos": [ + -2120, + 450 + ], + "size": { + "0": 330, + "1": 146 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 588 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 574 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 577 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "model_mix_info", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "title": "Apply Model Merge 2", + "properties": { + "Node name for S&R": "CR Apply Model Merge" + }, + "widgets_values": [ + "Recursive", + "Yes", + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 220, + "type": "CR Gradient Float", + "pos": [ + -1670, + -80 + ], + "size": { + "0": 260, + "1": 154 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "current_frame", + "type": "INT", + "link": 565, + "widget": { + "name": "current_frame", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 9999, + "step": 1 + } + ] + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 566, + 571 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Model Gradient", + "properties": { + "Node name for S&R": "CR Gradient Float" + }, + "widgets_values": [ + 1, + 0, + 0, + 10, + 0, + "Lerp" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 242, + "type": "Note", + "pos": [ + -1340, + 790 + ], + "size": { + "0": 220, + "1": 70 + }, + "flags": {}, + "order": 13, + "mode": 0, + "title": "Merging", + "properties": { + "text": "" + }, + "widgets_values": [ + "The output from the two stack merges are merged here\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 243, + "type": "Note", + "pos": [ + -2010, + 660 + ], + "size": { + "0": 220, + "1": 70 + }, + "flags": {}, + "order": 14, + "mode": 0, + "title": "Merging", + "properties": { + "text": "" + }, + "widgets_values": [ + "The two model merge stacks are each merged here\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 241, + "type": "Note", + "pos": [ + -1010, + -290 + ], + "size": { + "0": 270, + "1": 150 + }, + "flags": {}, + "order": 15, + "mode": 0, + "title": "Gradients", + "properties": { + "text": "" + }, + "widgets_values": [ + "The gradients will provide 10 merges stepping between the merged models outputs from stacks 1 and 2\n\nSet the Batch Count in Queue Prompt to 10\n\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 239, + "type": "CR Model Merge Stack", + "pos": [ + -2480, + 70 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 587 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL Model Merge Stack 1", + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SDXL\\rundiffusionXL_beta.safetensors", + 0.5, + 0.5, + "On", + "SDXL\\nijiDiffusionXlBase1_v10.safetensors", + 0.5, + 0.5, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 240, + "type": "CR Model Merge Stack", + "pos": [ + -2480, + 450 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 588 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL Model Merge Stack 2", + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SDXL\\dreamshaperXL10_alpha2Xl10.safetensors", + 0.5, + 0.5, + "On", + "SDXL\\copaxRealisticXLSDXL1_v2.safetensors", + 0.5, + 0.5, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 203, + "type": "SeargeSDXLSampler2", + "pos": [ + 1641.0815801410135, + 232.8554552108752 + ], + "size": { + "0": 320, + "1": 620 + }, + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "base_model", + "type": "MODEL", + "link": 521 + }, + { + "name": "base_positive", + "type": "CONDITIONING", + "link": 522 + }, + { + "name": "base_negative", + "type": "CONDITIONING", + "link": 523 + }, + { + "name": "refiner_model", + "type": "MODEL", + "link": 524 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "link": 525 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "link": 526 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 546 + }, + { + "name": "sampler_name", + "type": "SAMPLER_NAME", + "link": 528 + }, + { + "name": "scheduler", + "type": "SCHEDULER_NAME", + "link": 529 + }, + { + "name": "noise_seed", + "type": "INT", + "link": 547, + "widget": { + "name": "noise_seed", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 548 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "SDXL Mix Sampler", + "properties": { + "Node name for S&R": "SeargeSDXLSampler2" + }, + "widgets_values": [ + 602234572132077, + "randomize", + 24, + 8, + 1, + 1, + 3, + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 341, + 118, + 0, + 123, + 0, + "IMAGE" + ], + [ + 363, + 125, + 0, + 137, + 0, + "UPSCALE_MODEL" + ], + [ + 364, + 118, + 0, + 137, + 1, + "IMAGE" + ], + [ + 423, + 137, + 0, + 157, + 0, + "IMAGE" + ], + [ + 424, + 157, + 0, + 135, + 0, + "IMAGE" + ], + [ + 461, + 171, + 0, + 172, + 0, + "LORA_STACK" + ], + [ + 462, + 172, + 0, + 173, + 2, + "LORA_STACK" + ], + [ + 463, + 161, + 0, + 173, + 1, + "CLIP" + ], + [ + 464, + 160, + 0, + 173, + 0, + "MODEL" + ], + [ + 467, + 173, + 1, + 167, + 1, + "CLIP" + ], + [ + 468, + 173, + 0, + 167, + 0, + "MODEL" + ], + [ + 489, + 181, + 0, + 183, + 0, + "*" + ], + [ + 494, + 173, + 0, + 185, + 0, + "*" + ], + [ + 495, + 173, + 1, + 184, + 0, + "*" + ], + [ + 498, + 186, + 0, + 167, + 2, + "VAE" + ], + [ + 503, + 174, + 0, + 188, + 0, + "*" + ], + [ + 504, + 188, + 0, + 118, + 1, + "VAE" + ], + [ + 521, + 189, + 0, + 203, + 0, + "MODEL" + ], + [ + 522, + 204, + 0, + 203, + 1, + "CONDITIONING" + ], + [ + 523, + 204, + 1, + 203, + 2, + "CONDITIONING" + ], + [ + 524, + 209, + 0, + 203, + 3, + "MODEL" + ], + [ + 525, + 204, + 2, + 203, + 4, + "CONDITIONING" + ], + [ + 526, + 204, + 3, + 203, + 5, + "CONDITIONING" + ], + [ + 528, + 208, + 0, + 203, + 7, + "SAMPLER_NAME" + ], + [ + 529, + 208, + 1, + 203, + 8, + "SCHEDULER_NAME" + ], + [ + 530, + 196, + 0, + 204, + 0, + "CLIP" + ], + [ + 531, + 205, + 1, + 204, + 1, + "CLIP" + ], + [ + 532, + 206, + 0, + 204, + 2, + "STRING" + ], + [ + 533, + 206, + 0, + 204, + 3, + "STRING" + ], + [ + 534, + 206, + 0, + 204, + 4, + "STRING" + ], + [ + 535, + 207, + 0, + 204, + 5, + "STRING" + ], + [ + 536, + 207, + 0, + 204, + 6, + "STRING" + ], + [ + 537, + 207, + 0, + 204, + 7, + "STRING" + ], + [ + 538, + 205, + 0, + 209, + 0, + "*" + ], + [ + 540, + 210, + 0, + 95, + 0, + "INT" + ], + [ + 541, + 210, + 1, + 95, + 1, + "INT" + ], + [ + 542, + 210, + 3, + 95, + 2, + "INT" + ], + [ + 543, + 197, + 0, + 189, + 0, + "*" + ], + [ + 546, + 211, + 0, + 203, + 6, + "LATENT" + ], + [ + 547, + 183, + 0, + 203, + 9, + "INT" + ], + [ + 548, + 203, + 0, + 118, + 0, + "LATENT" + ], + [ + 549, + 95, + 0, + 212, + 0, + "*" + ], + [ + 550, + 212, + 0, + 211, + 0, + "*" + ], + [ + 551, + 185, + 0, + 197, + 0, + "*" + ], + [ + 552, + 184, + 0, + 196, + 0, + "*" + ], + [ + 555, + 2, + 0, + 161, + 0, + "CLIP" + ], + [ + 556, + 162, + 0, + 161, + 1, + "CLIP" + ], + [ + 563, + 217, + 0, + 218, + 0, + "INT" + ], + [ + 564, + 218, + 0, + 219, + 0, + "INT" + ], + [ + 565, + 218, + 0, + 220, + 0, + "INT" + ], + [ + 566, + 220, + 0, + 160, + 2, + "FLOAT" + ], + [ + 567, + 219, + 0, + 161, + 2, + "FLOAT" + ], + [ + 568, + 219, + 0, + 222, + 0, + "FLOAT" + ], + [ + 569, + 222, + 0, + 221, + 0, + "STRING" + ], + [ + 570, + 225, + 0, + 224, + 0, + "STRING" + ], + [ + 571, + 220, + 0, + 225, + 0, + "FLOAT" + ], + [ + 574, + 226, + 0, + 160, + 1, + "MODEL" + ], + [ + 575, + 229, + 0, + 160, + 0, + "MODEL" + ], + [ + 576, + 229, + 1, + 2, + 0, + "CLIP" + ], + [ + 577, + 226, + 1, + 162, + 0, + "CLIP" + ], + [ + 587, + 239, + 0, + 229, + 0, + "MODEL_STACK" + ], + [ + 588, + 240, + 0, + 226, + 0, + "MODEL_STACK" + ] + ], + "groups": [ + { + "title": "Prompt", + "bounding": [ + 626, + 264, + 482, + 678 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "XL Models", + "bounding": [ + -2524, + -25, + 772, + 835 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "XL LoRAs", + "bounding": [ + -1002, + -6, + 383, + 847 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Sampling", + "bounding": [ + 1608, + -32, + 386, + 968 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Upscale and Levels", + "bounding": [ + 2238, + -130, + 731, + 233 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "SDXL Model Gradient Merge with Model Merge Stacks", + "bounding": [ + -2563, + -385, + 2447, + 1430 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Merge Models", + "bounding": [ + -1346, + 394, + 287, + 335 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Setup", + "bounding": [ + 33, + -282, + 397, + 629 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Save Model", + "bounding": [ + -535, + 569, + 376, + 363 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Model Preview", + "bounding": [ + -12, + -381, + 3380, + 1429 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Gradients", + "bounding": [ + -1693, + -318, + 635, + 675 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SDXL_MultiModelMerge_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SDXL_MultiModelMerge_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..3638d84001200cbeef39e5c9e1cd1e270fd65a56 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SDXL_MultiModelMerge_v01b.json @@ -0,0 +1,2222 @@ +{ + "last_node_id": 245, + "last_link_id": 596, + "nodes": [ + { + "id": 125, + "type": "UpscaleModelLoader", + "pos": [ + 2269.567273365684, + -98.5369286336395 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "UPSCALE_MODEL", + "type": "UPSCALE_MODEL", + "links": [ + 363 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "UpscaleModelLoader" + }, + "widgets_values": [ + "RealESRGAN_x2.pth" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 174, + "type": "VAELoader", + "pos": [ + 68.82123096767066, + 238.46804881065552 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 503 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 205, + "type": "CheckpointLoaderSimple", + "pos": [ + 630, + -50 + ], + "size": { + "0": 458, + "1": 132 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 538 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 531 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null + } + ], + "title": "XL Refiner Model", + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "SDXL\\sd_xl_refiner_1.0.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 208, + "type": "SeargeSamplerInputs", + "pos": [ + 1641.0815801410135, + 62.855455210877295 + ], + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "sampler_name", + "type": "SAMPLER_NAME", + "links": [ + 528 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "scheduler", + "type": "SCHEDULER_NAME", + "links": [ + 529 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SeargeSamplerInputs" + }, + "widgets_values": [ + "dpmpp_2m", + "karras" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 172, + "type": "CR LoRA Stack", + "pos": [ + -972.1116192382817, + 468.55508251673456 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": 461 + } + ], + "outputs": [ + { + "name": "LORA_STACK", + "type": "LORA_STACK", + "links": [ + 462 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL LoRA Stack 2", + "properties": { + "Node name for S&R": "CR LoRA Stack" + }, + "widgets_values": [ + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 183, + "type": "Reroute", + "pos": [ + 1430, + 100 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 489 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 547 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 188, + "type": "Reroute", + "pos": [ + 510, + 170 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 503 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 504 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 209, + "type": "Reroute", + "pos": [ + 1430, + 250 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 538 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 524 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 95, + "type": "EmptyLatentImage", + "pos": [ + 530, + -160 + ], + "size": { + "0": 210, + "1": 74 + }, + "flags": { + "collapsed": true + }, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 540, + "widget": { + "name": "width", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "height", + "type": "INT", + "link": 541, + "widget": { + "name": "height", + "config": [ + "INT", + { + "default": 512, + "min": 64, + "max": 8192, + "step": 8 + } + ] + } + }, + { + "name": "batch_size", + "type": "INT", + "link": 542, + "widget": { + "name": "batch_size", + "config": [ + "INT", + { + "default": 1, + "min": 1, + "max": 64 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 549 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 212, + "type": "Reroute", + "pos": [ + 1150, + -190 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 549 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 550 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 211, + "type": "Reroute", + "pos": [ + 1430, + 140 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 550 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 546 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 185, + "type": "Reroute", + "pos": [ + -220, + 410 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 494 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 551 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 167, + "type": "CheckpointSave", + "pos": [ + -505.1336667459445, + 656.9509770334475 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 26, + "mode": 2, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 468 + }, + { + "name": "clip", + "type": "CLIP", + "link": 467 + }, + { + "name": "vae", + "type": "VAE", + "link": 498 + } + ], + "title": "Checkpoint Save", + "properties": { + "Node name for S&R": "CheckpointSave" + }, + "widgets_values": [ + "checkpoints/MyModel" + ] + }, + { + "id": 197, + "type": "Reroute", + "pos": [ + 480, + 410 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 551 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 543 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 196, + "type": "Reroute", + "pos": [ + 480, + 470 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 596 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 530 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 189, + "type": "Reroute", + "pos": [ + 1430, + 210 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 543 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 521 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 204, + "type": "SeargeSDXLPromptEncoder", + "pos": [ + 1230, + 390 + ], + "size": { + "0": 311.32244873046875, + "1": 415.3662414550781 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "base_clip", + "type": "CLIP", + "link": 530 + }, + { + "name": "refiner_clip", + "type": "CLIP", + "link": 531 + }, + { + "name": "pos_g", + "type": "STRING", + "link": 532, + "widget": { + "name": "pos_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "POS_G" + } + ] + } + }, + { + "name": "pos_l", + "type": "STRING", + "link": 533, + "widget": { + "name": "pos_l", + "config": [ + "STRING", + { + "multiline": true, + "default": "POS_L" + } + ] + } + }, + { + "name": "pos_r", + "type": "STRING", + "link": 534, + "widget": { + "name": "pos_r", + "config": [ + "STRING", + { + "multiline": true, + "default": "POS_R" + } + ] + } + }, + { + "name": "neg_g", + "type": "STRING", + "link": 535, + "widget": { + "name": "neg_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "NEG_G" + } + ] + } + }, + { + "name": "neg_l", + "type": "STRING", + "link": 536, + "widget": { + "name": "neg_l", + "config": [ + "STRING", + { + "multiline": true, + "default": "NEG_L" + } + ] + } + }, + { + "name": "neg_r", + "type": "STRING", + "link": 537, + "widget": { + "name": "neg_r", + "config": [ + "STRING", + { + "multiline": true, + "default": "NEG_R" + } + ] + } + } + ], + "outputs": [ + { + "name": "base_positive", + "type": "CONDITIONING", + "links": [ + 522 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "base_negative", + "type": "CONDITIONING", + "links": [ + 523 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "links": [ + 525 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "links": [ + 526 + ], + "shape": 3, + "slot_index": 3 + } + ], + "title": "XL Prompt Encoder", + "properties": { + "Node name for S&R": "SeargeSDXLPromptEncoder" + }, + "widgets_values": [ + "POS_G", + "POS_L", + "POS_R", + "NEG_G", + "NEG_L", + "NEG_R", + 4096, + 4096, + 0, + 0, + 4096, + 4096, + 6, + 2.5, + 2048, + 2048 + ] + }, + { + "id": 203, + "type": "SeargeSDXLSampler2", + "pos": [ + 1641.0815801410135, + 232.8554552108752 + ], + "size": { + "0": 320, + "1": 620 + }, + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "base_model", + "type": "MODEL", + "link": 521 + }, + { + "name": "base_positive", + "type": "CONDITIONING", + "link": 522 + }, + { + "name": "base_negative", + "type": "CONDITIONING", + "link": 523 + }, + { + "name": "refiner_model", + "type": "MODEL", + "link": 524 + }, + { + "name": "refiner_positive", + "type": "CONDITIONING", + "link": 525 + }, + { + "name": "refiner_negative", + "type": "CONDITIONING", + "link": 526 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 546 + }, + { + "name": "sampler_name", + "type": "SAMPLER_NAME", + "link": 528 + }, + { + "name": "scheduler", + "type": "SCHEDULER_NAME", + "link": 529 + }, + { + "name": "noise_seed", + "type": "INT", + "link": 547, + "widget": { + "name": "noise_seed", + "config": [ + "INT", + { + "default": 0, + "min": 0, + "max": 18446744073709552000 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 548 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "SDXL Mix Sampler", + "properties": { + "Node name for S&R": "SeargeSDXLSampler2" + }, + "widgets_values": [ + 68907621190797, + "randomize", + 20, + 7, + 0.8, + 1, + 0, + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 118, + "type": "VAEDecode", + "pos": [ + 2030, + 200 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 548 + }, + { + "name": "vae", + "type": "VAE", + "link": 504 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 364 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 137, + "type": "ImageUpscaleWithModel", + "pos": [ + 2269.567273365684, + 11.463071366360666 + ], + "size": { + "0": 241.79998779296875, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "upscale_model", + "type": "UPSCALE_MODEL", + "link": 363 + }, + { + "name": "image", + "type": "IMAGE", + "link": 364 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 423 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageUpscaleWithModel" + } + }, + { + "id": 157, + "type": "Image Levels Adjustment", + "pos": [ + 2619.567273365684, + -98.5369286336395 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": { + "collapsed": false + }, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 423 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 424 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Levels Adjustment" + }, + "widgets_values": [ + 0, + 127.5, + 255 + ] + }, + { + "id": 173, + "type": "CR Apply LoRA Stack", + "pos": [ + -550, + 420 + ], + "size": { + "0": 210, + "1": 66 + }, + "flags": { + "collapsed": false + }, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 593 + }, + { + "name": "clip", + "type": "CLIP", + "link": 592 + }, + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": 462 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 468, + 494 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 467, + 495 + ], + "shape": 3, + "slot_index": 1 + } + ], + "title": "Apply LoRA Stack", + "properties": { + "Node name for S&R": "CR Apply LoRA Stack" + } + }, + { + "id": 181, + "type": "CR Seed", + "pos": [ + 68.82123096767066, + 98.46804881065545 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "seed", + "type": "INT", + "links": [ + 489 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Seed", + "properties": { + "Node name for S&R": "CR Seed" + }, + "widgets_values": [ + 0, + "fixed" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 186, + "type": "VAELoader", + "pos": [ + -540, + 260 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 498 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae_fixed.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 171, + "type": "CR LoRA Stack", + "pos": [ + -971.3108651981408, + 96.30713404293414 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "LORA_STACK", + "type": "LORA_STACK", + "links": [ + 461 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL LoRA Stack 1", + "properties": { + "Node name for S&R": "CR LoRA Stack" + }, + "widgets_values": [ + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 184, + "type": "Reroute", + "pos": [ + -220, + 470 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 495 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 596 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 210, + "type": "CR SDXL Aspect Ratio", + "pos": [ + 70, + -190 + ], + "size": { + "0": 315, + "1": 238 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 540 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 541 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "upscale_factor", + "type": "FLOAT", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "batch_size", + "type": "INT", + "links": [ + 542 + ], + "shape": 3, + "slot_index": 3 + } + ], + "title": "SDXL Aspect Ratio", + "properties": { + "Node name for S&R": "CR SDXL Aspect Ratio" + }, + "widgets_values": [ + 1024, + 1024, + "3:4 portrait 896x1152", + "Off", + 2, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 162, + "type": "CLIPSetLastLayer", + "pos": [ + -1320, + 498.4568778542175 + ], + "size": { + "0": 220, + "1": 60 + }, + "flags": { + "pinned": false, + "collapsed": false + }, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 577 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 592 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPSetLastLayer" + }, + "widgets_values": [ + -1 + ] + }, + { + "id": 226, + "type": "CR Apply Model Merge", + "pos": [ + -1700, + 418.4568778542175 + ], + "size": { + "0": 330, + "1": 146 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 594 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 593 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 577 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "model_mix_info", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "title": "Apply Model Merge", + "properties": { + "Node name for S&R": "CR Apply Model Merge" + }, + "widgets_values": [ + "Recursive", + "Yes", + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 243, + "type": "Note", + "pos": [ + -1590, + 618.4568778542175 + ], + "size": { + "0": 220, + "1": 70 + }, + "flags": {}, + "order": 8, + "mode": 0, + "title": "Merging", + "properties": { + "text": "" + }, + "widgets_values": [ + "The model merge stacks are each merged here\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 73, + "type": "Note", + "pos": [ + -2120, + -210 + ], + "size": { + "0": 530, + "1": 150 + }, + "flags": {}, + "order": 9, + "mode": 0, + "title": "Workbook Details", + "properties": { + "text": "" + }, + "widgets_values": [ + "Workflow\nhttps://civitai.com/models/145275\n\nSetember 2023\nAkatsuzi\n\n\n" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 240, + "type": "CR Model Merge Stack", + "pos": [ + -2080, + 100 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 595 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL Model Merge Stack", + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SDXL\\4Guofeng4_v10Beta.safetensors", + 1, + 1, + "On", + "SDXL\\xl6HEPHAISTOSSD10XLSFW_v21BakedVAEFP16Fix.safetensors", + 1, + 1, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 245, + "type": "CR Model Merge Stack", + "pos": [ + -2080, + 480 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 595 + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 594 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "XL Model Merge Stack", + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SDXL\\rundiffusionXL_beta.safetensors", + 1, + 1, + "On", + "SDXL\\copaxRealisticXLSDXL1_v2.safetensors", + 1, + 1, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 177, + "type": "Note", + "pos": [ + -507.1336667459445, + 828.9509770334472 + ], + "size": { + "0": 230, + "1": 60 + }, + "flags": {}, + "order": 11, + "mode": 0, + "title": "Save Model", + "properties": { + "text": "" + }, + "widgets_values": [ + "To unhide this node, right click then Mode > Always\n\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 207, + "type": "SeargePromptText", + "pos": [ + 670, + 670 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 12, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 535, + 536, + 537 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Prompt text input", + "properties": { + "Node name for S&R": "SeargePromptText" + }, + "widgets_values": [ + "" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 206, + "type": "SeargePromptText", + "pos": [ + 670, + 390 + ], + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 13, + "mode": 0, + "outputs": [ + { + "name": "prompt", + "type": "STRING", + "links": [ + 532, + 533, + 534 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "Prompt text input", + "properties": { + "Node name for S&R": "SeargePromptText" + }, + "widgets_values": [ + "A beautiful young woman staring into the abyss of infinity" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 135, + "type": "SaveImage", + "pos": [ + 2240, + 140 + ], + "size": { + "0": 720, + "1": 860 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 424, + "slot_index": 0 + } + ], + "properties": {}, + "widgets_values": [ + "Merge/Merge" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 363, + 125, + 0, + 137, + 0, + "UPSCALE_MODEL" + ], + [ + 364, + 118, + 0, + 137, + 1, + "IMAGE" + ], + [ + 423, + 137, + 0, + 157, + 0, + "IMAGE" + ], + [ + 424, + 157, + 0, + 135, + 0, + "IMAGE" + ], + [ + 461, + 171, + 0, + 172, + 0, + "LORA_STACK" + ], + [ + 462, + 172, + 0, + 173, + 2, + "LORA_STACK" + ], + [ + 467, + 173, + 1, + 167, + 1, + "CLIP" + ], + [ + 468, + 173, + 0, + 167, + 0, + "MODEL" + ], + [ + 489, + 181, + 0, + 183, + 0, + "*" + ], + [ + 494, + 173, + 0, + 185, + 0, + "*" + ], + [ + 495, + 173, + 1, + 184, + 0, + "*" + ], + [ + 498, + 186, + 0, + 167, + 2, + "VAE" + ], + [ + 503, + 174, + 0, + 188, + 0, + "*" + ], + [ + 504, + 188, + 0, + 118, + 1, + "VAE" + ], + [ + 521, + 189, + 0, + 203, + 0, + "MODEL" + ], + [ + 522, + 204, + 0, + 203, + 1, + "CONDITIONING" + ], + [ + 523, + 204, + 1, + 203, + 2, + "CONDITIONING" + ], + [ + 524, + 209, + 0, + 203, + 3, + "MODEL" + ], + [ + 525, + 204, + 2, + 203, + 4, + "CONDITIONING" + ], + [ + 526, + 204, + 3, + 203, + 5, + "CONDITIONING" + ], + [ + 528, + 208, + 0, + 203, + 7, + "SAMPLER_NAME" + ], + [ + 529, + 208, + 1, + 203, + 8, + "SCHEDULER_NAME" + ], + [ + 530, + 196, + 0, + 204, + 0, + "CLIP" + ], + [ + 531, + 205, + 1, + 204, + 1, + "CLIP" + ], + [ + 532, + 206, + 0, + 204, + 2, + "STRING" + ], + [ + 533, + 206, + 0, + 204, + 3, + "STRING" + ], + [ + 534, + 206, + 0, + 204, + 4, + "STRING" + ], + [ + 535, + 207, + 0, + 204, + 5, + "STRING" + ], + [ + 536, + 207, + 0, + 204, + 6, + "STRING" + ], + [ + 537, + 207, + 0, + 204, + 7, + "STRING" + ], + [ + 538, + 205, + 0, + 209, + 0, + "*" + ], + [ + 540, + 210, + 0, + 95, + 0, + "INT" + ], + [ + 541, + 210, + 1, + 95, + 1, + "INT" + ], + [ + 542, + 210, + 3, + 95, + 2, + "INT" + ], + [ + 543, + 197, + 0, + 189, + 0, + "*" + ], + [ + 546, + 211, + 0, + 203, + 6, + "LATENT" + ], + [ + 547, + 183, + 0, + 203, + 9, + "INT" + ], + [ + 548, + 203, + 0, + 118, + 0, + "LATENT" + ], + [ + 549, + 95, + 0, + 212, + 0, + "*" + ], + [ + 550, + 212, + 0, + 211, + 0, + "*" + ], + [ + 551, + 185, + 0, + 197, + 0, + "*" + ], + [ + 577, + 226, + 1, + 162, + 0, + "CLIP" + ], + [ + 592, + 162, + 0, + 173, + 1, + "CLIP" + ], + [ + 593, + 226, + 0, + 173, + 0, + "MODEL" + ], + [ + 594, + 245, + 0, + 226, + 0, + "MODEL_STACK" + ], + [ + 595, + 240, + 0, + 245, + 0, + "MODEL_STACK" + ], + [ + 596, + 184, + 0, + 196, + 0, + "*" + ] + ], + "groups": [ + { + "title": "Prompt", + "bounding": [ + 626, + 264, + 482, + 678 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "XL Model Merge", + "bounding": [ + -2125, + -5, + 1064, + 849 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "XL LoRAs", + "bounding": [ + -1002, + -6, + 383, + 847 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Sampling", + "bounding": [ + 1608, + -32, + 386, + 968 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Upscale and Levels", + "bounding": [ + 2236, + -186, + 731, + 233 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "SDXL Model Merge with Model Merge Stacks", + "bounding": [ + -2168, + -383, + 2061, + 1432 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Setup", + "bounding": [ + 33, + -282, + 397, + 629 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Save Model", + "bounding": [ + -535, + 569, + 376, + 363 + ], + "color": "#3f789e", + "locked": false + }, + { + "title": "Model Preview", + "bounding": [ + -12, + -381, + 3033, + 1432 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SimpleMultiModelMerge_v01a.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SimpleMultiModelMerge_v01a.json new file mode 100644 index 0000000000000000000000000000000000000000..4114228c4541f8a4575b51cbdf92c1a2cd7ecc4a --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_SimpleMultiModelMerge_v01a.json @@ -0,0 +1,562 @@ +{ + "last_node_id": 38, + "last_link_id": 53, + "nodes": [ + { + "id": 10, + "type": "CheckpointSave", + "pos": [ + 1240, + 720 + ], + "size": { + "0": 310, + "1": 100 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 31 + }, + { + "name": "clip", + "type": "CLIP", + "link": 32 + }, + { + "name": "vae", + "type": "VAE", + "link": 13 + } + ], + "title": "Checkpoint Save", + "properties": { + "Node name for S&R": "CheckpointSave" + }, + "widgets_values": [ + "MergeModels/Merge" + ] + }, + { + "id": 21, + "type": "CR Apply LoRA Stack", + "pos": [ + 940, + 680 + ], + "size": { + "0": 210, + "1": 66 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 49 + }, + { + "name": "clip", + "type": "CLIP", + "link": 48 + }, + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": 28 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 31 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 1 + } + ], + "title": "Apply LoRA Stack", + "properties": { + "Node name for S&R": "CR Apply LoRA Stack" + }, + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 25, + "type": "Save Text File", + "pos": [ + 1240, + 870 + ], + "size": { + "0": 310, + "1": 130 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 51 + } + ], + "properties": { + "Node name for S&R": "Save Text File" + }, + "widgets_values": [ + "./ComfyUI/output/MergeModels/[time(%Y-%m-%d)]", + "MergeInfo", + "_", + 4, + 4 + ] + }, + { + "id": 20, + "type": "CR LoRA Stack", + "pos": [ + 1050, + 250 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": 27 + } + ], + "outputs": [ + { + "name": "LORA_STACK", + "type": "LORA_STACK", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "LoRA Stack", + "properties": { + "Node name for S&R": "CR LoRA Stack" + }, + "widgets_values": [ + "Off", + "None", + 1, + 1, + "Off", + "None", + 1, + 1, + "Off", + "SD1_5\\ArknightsDusk_10.safetensors", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 31, + "type": "CR Apply Model Merge", + "pos": [ + 540, + 680 + ], + "size": { + "0": 330, + "1": 146 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 52 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 48 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "model_mix_info", + "type": "STRING", + "links": [ + 51 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Apply Model Merge" + }, + "widgets_values": [ + "Recursive", + "Yes", + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 34, + "type": "Note", + "pos": [ + -100, + 650 + ], + "size": { + "0": 270, + "1": 150 + }, + "flags": {}, + "order": 3, + "mode": 0, + "title": "Gradients", + "properties": { + "text": "" + }, + "widgets_values": [ + "Model and CLIP ratios should add up to 1.0.\n\nIf normalise_ratios is on Yes, then the total ratios may be more or less than 1. The model merge will automatically normalise the ratios.\n\n" + ], + "color": "#233", + "bgcolor": "#355" + }, + { + "id": 32, + "type": "CR Model Merge Stack", + "pos": [ + -100, + 260 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 53 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SD1_5\\ComfyrollAnime_v1_fp16_pruned.safetensors", + 0.33, + 0.33, + "On", + "SD1_5\\7th_anime_v3_A.safetensors", + 0.33, + 0.33, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 33, + "type": "CR Model Merge Stack", + "pos": [ + 250, + 260 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 53 + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SD1_5\\mixProV4_v4.safetensors", + 0.33, + 0.33, + "Off", + "None", + 1, + 1, + "Off", + "SD1_5\\mixproyuki77mi_v10.safetensors", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 19, + "type": "CR LoRA Stack", + "pos": [ + 700, + 250 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "lora_stack", + "type": "LORA_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "LORA_STACK", + "type": "LORA_STACK", + "links": [ + 27 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "LoRA Stack", + "properties": { + "Node name for S&R": "CR LoRA Stack" + }, + "widgets_values": [ + "Off", + "SD1_5\\add_detail.safetensors", + 0.2, + 0.2, + "Off", + "None", + 1, + 1, + "Off", + "SD1_5\\ArknightsNian_20.safetensors", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 12, + "type": "VAELoader", + "pos": [ + 730, + 890 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 13, + 12, + 0, + 10, + 2, + "VAE" + ], + [ + 27, + 19, + 0, + 20, + 0, + "LORA_STACK" + ], + [ + 28, + 20, + 0, + 21, + 2, + "LORA_STACK" + ], + [ + 31, + 21, + 0, + 10, + 0, + "MODEL" + ], + [ + 32, + 21, + 1, + 10, + 1, + "CLIP" + ], + [ + 48, + 31, + 1, + 21, + 1, + "CLIP" + ], + [ + 49, + 31, + 0, + 21, + 0, + "MODEL" + ], + [ + 51, + 31, + 2, + 25, + 0, + "STRING" + ], + [ + 52, + 33, + 0, + 31, + 0, + "MODEL_STACK" + ], + [ + 53, + 32, + 0, + 33, + 0, + "MODEL_STACK" + ] + ], + "groups": [ + { + "title": "Stacked Model Merge Template (SDXL and 1.5)", + "bounding": [ + -158, + 154, + 1762, + 883 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_UltraSimpleMultiModelMerge_v01b.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_UltraSimpleMultiModelMerge_v01b.json new file mode 100644 index 0000000000000000000000000000000000000000..5b673b5b6b9b4c30a3d78f7630fce0bc1b39c521 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Model Merge/CR_UltraSimpleMultiModelMerge_v01b.json @@ -0,0 +1,240 @@ +{ + "last_node_id": 39, + "last_link_id": 55, + "nodes": [ + { + "id": 12, + "type": "VAELoader", + "pos": [ + 320, + 598.3380063476562 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae.safetensors" + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 10, + "type": "CheckpointSave", + "pos": [ + 770, + 478.33800634765623 + ], + "size": { + "0": 310, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 54 + }, + { + "name": "clip", + "type": "CLIP", + "link": 55 + }, + { + "name": "vae", + "type": "VAE", + "link": 13 + } + ], + "title": "Checkpoint Save", + "properties": { + "Node name for S&R": "CheckpointSave" + }, + "widgets_values": [ + "MergeModels/Merge" + ] + }, + { + "id": 33, + "type": "CR Model Merge Stack", + "pos": [ + -90, + 258.33800634765623 + ], + "size": { + "0": 315, + "1": 322 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL_STACK", + "type": "MODEL_STACK", + "links": [ + 52 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Model Merge Stack" + }, + "widgets_values": [ + "On", + "SDXL\\mbbxlUltimate_v10RC.safetensors", + 0.5, + 0.5, + "On", + "SDXL\\dreamshaperXL10_alpha2Xl10.safetensors", + 0.5, + 0.5, + "Off", + "None", + 1, + 1 + ], + "color": "#222", + "bgcolor": "#000" + }, + { + "id": 31, + "type": "CR Apply Model Merge", + "pos": [ + 310, + 398.33800634765623 + ], + "size": { + "0": 330, + "1": 146 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "model_stack", + "type": "MODEL_STACK", + "link": 52 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 54 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 55 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "model_mix_info", + "type": "STRING", + "links": [], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CR Apply Model Merge" + }, + "widgets_values": [ + "Recursive", + "Yes", + 1 + ], + "color": "#222", + "bgcolor": "#000" + } + ], + "links": [ + [ + 13, + 12, + 0, + 10, + 2, + "VAE" + ], + [ + 52, + 33, + 0, + 31, + 0, + "MODEL_STACK" + ], + [ + 54, + 31, + 0, + 10, + 0, + "MODEL" + ], + [ + 55, + 31, + 1, + 10, + 1, + "CLIP" + ] + ], + "groups": [ + { + "title": "Ultra-Simple Model Merge Template (SDXL and 1.5)", + "bounding": [ + -158, + 152, + 1271, + 554 + ], + "color": "#3f789e", + "locked": false + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Upscale/Comfyroll_Upscale_Demo1_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Upscale/Comfyroll_Upscale_Demo1_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..732808ab9a670c33c4e847cd463e7b54bd48b878 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Upscale/Comfyroll_Upscale_Demo1_v01.json @@ -0,0 +1,195 @@ +{ + "last_node_id": 25, + "last_link_id": 28, + "nodes": [ + { + "id": 3, + "type": "PreviewImage", + "pos": [ + 1330, + 330 + ], + "size": { + "0": 290, + "1": 330 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 26 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 5, + "type": "CR Upscale Image", + "pos": [ + 550, + 330 + ], + "size": { + "0": 310, + "1": 202 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 28 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Upscale Image" + }, + "widgets_values": [ + "1x_PixelSharpen_v2_strong.pth", + "rescale", + 1, + 1024, + "lanczos", + "true", + 8 + ] + }, + { + "id": 21, + "type": "CR Upscale Image", + "pos": [ + 940, + 330 + ], + "size": { + "0": 310, + "1": 202 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 25 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 26 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Upscale Image" + }, + "widgets_values": [ + "4x-UltraSharp.pth", + "rescale", + 2, + 1024, + "lanczos", + "true", + 8 + ] + }, + { + "id": 25, + "type": "Load Image Batch", + "pos": [ + 160, + 330 + ], + "size": { + "0": 315, + "1": 222 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "filename_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Load Image Batch" + }, + "widgets_values": [ + "incremental_image", + 0, + "Batch 001", + "E:\\Comfy Projects\\SDXL\\17 - Metal Cat\\plastic", + "*", + "false", + "true" + ] + } + ], + "links": [ + [ + 25, + 5, + 0, + 21, + 0, + "IMAGE" + ], + [ + 26, + 21, + 0, + 3, + 0, + "IMAGE" + ], + [ + 28, + 25, + 0, + 5, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Upscale/Comfyroll_Upscale_Demo2_v01.json b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Upscale/Comfyroll_Upscale_Demo2_v01.json new file mode 100644 index 0000000000000000000000000000000000000000..3a481466784047d054c9d82709026d831418d072 --- /dev/null +++ b/custom_nodes/ComfyUI_Comfyroll_CustomNodes/workflows/Upscale/Comfyroll_Upscale_Demo2_v01.json @@ -0,0 +1,198 @@ +{ + "last_node_id": 18, + "last_link_id": 22, + "nodes": [ + { + "id": 9, + "type": "CR Multi Upscale Stack", + "pos": [ + -140, + -260 + ], + "size": { + "0": 390, + "1": 250 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "upscale_stack", + "type": "UPSCALE_STACK", + "link": null + } + ], + "outputs": [ + { + "name": "UPSCALE_STACK", + "type": "UPSCALE_STACK", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Multi Upscale Stack" + }, + "widgets_values": [ + "Off", + "1x_ArtClarity.pth", + 1, + "On", + "1x_PixelSharpen_v2.pth", + 1, + "On", + "4x-UltraSharp.pth", + 2 + ] + }, + { + "id": 18, + "type": "Load Image Batch", + "pos": [ + -130, + 70 + ], + "size": { + "0": 315, + "1": 222 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 21 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "filename_text", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Load Image Batch" + }, + "widgets_values": [ + "incremental_image", + 0, + "Batch 001", + "E:\\Comfy Projects\\SDXL\\17 - Metal Cat\\plastic", + "*", + "false", + "true" + ] + }, + { + "id": 10, + "type": "CR Apply Multi Upscale", + "pos": [ + 320, + 70 + ], + "size": { + "0": 270, + "1": 126 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 21 + }, + { + "name": "upscale_stack", + "type": "UPSCALE_STACK", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CR Apply Multi Upscale" + }, + "widgets_values": [ + "lanczos", + "true", + 8 + ] + }, + { + "id": 8, + "type": "PreviewImage", + "pos": [ + 660, + -270 + ], + "size": [ + 390, + 560 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 10 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + } + ], + "links": [ + [ + 8, + 9, + 0, + 10, + 1, + "UPSCALE_STACK" + ], + [ + 10, + 10, + 0, + 8, + 0, + "IMAGE" + ], + [ + 21, + 18, + 0, + 10, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/.gitignore b/custom_nodes/ComfyUI_IPAdapter_plus/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..42b4d89400b633d0dac7e2de4d7ec88b32e6fa0d --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/.gitignore @@ -0,0 +1,3 @@ +/__pycache__/ +/models/*.bin +/models/*.safetensors diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/IPAdapterPlus.py b/custom_nodes/ComfyUI_IPAdapter_plus/IPAdapterPlus.py new file mode 100644 index 0000000000000000000000000000000000000000..782a4125d1494d401b20c5f21098c55147f28e84 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/IPAdapterPlus.py @@ -0,0 +1,765 @@ +import torch +import contextlib +import os +import math + +import comfy.utils +import comfy.model_management +from comfy.clip_vision import clip_preprocess +from comfy.ldm.modules.attention import optimized_attention +import folder_paths + +from torch import nn +from PIL import Image +import torch.nn.functional as F +import torchvision.transforms as TT + +from .resampler import Resampler + +# set the models directory backward compatible +GLOBAL_MODELS_DIR = os.path.join(folder_paths.models_dir, "ipadapter") +MODELS_DIR = GLOBAL_MODELS_DIR if os.path.isdir(GLOBAL_MODELS_DIR) else os.path.join(os.path.dirname(os.path.realpath(__file__)), "models") +if "ipadapter" not in folder_paths.folder_names_and_paths: + folder_paths.folder_names_and_paths["ipadapter"] = ([MODELS_DIR], folder_paths.supported_pt_extensions) +else: + folder_paths.folder_names_and_paths["ipadapter"][1].update(folder_paths.supported_pt_extensions) + +class MLPProjModel(torch.nn.Module): + """SD model with image prompt""" + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024): + super().__init__() + + self.proj = torch.nn.Sequential( + torch.nn.Linear(clip_embeddings_dim, clip_embeddings_dim), + torch.nn.GELU(), + torch.nn.Linear(clip_embeddings_dim, cross_attention_dim), + torch.nn.LayerNorm(cross_attention_dim) + ) + + def forward(self, image_embeds): + clip_extra_context_tokens = self.proj(image_embeds) + return clip_extra_context_tokens + +class ImageProjModel(nn.Module): + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4): + super().__init__() + + self.cross_attention_dim = cross_attention_dim + self.clip_extra_context_tokens = clip_extra_context_tokens + self.proj = nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim) + self.norm = nn.LayerNorm(cross_attention_dim) + + def forward(self, image_embeds): + embeds = image_embeds + clip_extra_context_tokens = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens, self.cross_attention_dim) + clip_extra_context_tokens = self.norm(clip_extra_context_tokens) + return clip_extra_context_tokens + +class To_KV(nn.Module): + def __init__(self, state_dict): + super().__init__() + + self.to_kvs = nn.ModuleDict() + for key, value in state_dict.items(): + self.to_kvs[key.replace(".weight", "").replace(".", "_")] = nn.Linear(value.shape[1], value.shape[0], bias=False) + self.to_kvs[key.replace(".weight", "").replace(".", "_")].weight.data = value + +def set_model_patch_replace(model, patch_kwargs, key): + to = model.model_options["transformer_options"] + if "patches_replace" not in to: + to["patches_replace"] = {} + if "attn2" not in to["patches_replace"]: + to["patches_replace"]["attn2"] = {} + if key not in to["patches_replace"]["attn2"]: + patch = CrossAttentionPatch(**patch_kwargs) + to["patches_replace"]["attn2"][key] = patch + else: + to["patches_replace"]["attn2"][key].set_new_condition(**patch_kwargs) + +def image_add_noise(image, noise): + image = image.permute([0,3,1,2]) + torch.manual_seed(0) # use a fixed random for reproducible results + transforms = TT.Compose([ + TT.CenterCrop(min(image.shape[2], image.shape[3])), + TT.Resize((224, 224), interpolation=TT.InterpolationMode.BICUBIC, antialias=True), + TT.ElasticTransform(alpha=75.0, sigma=noise*3.5), # shuffle the image + TT.RandomVerticalFlip(p=1.0), # flip the image to change the geometry even more + TT.RandomHorizontalFlip(p=1.0), + ]) + image = transforms(image.cpu()) + image = image.permute([0,2,3,1]) + image = image + ((0.25*(1-noise)+0.05) * torch.randn_like(image) ) # add further random noise + return image + +def zeroed_hidden_states(clip_vision, batch_size): + image = torch.zeros([batch_size, 224, 224, 3]) + comfy.model_management.load_model_gpu(clip_vision.patcher) + pixel_values = clip_preprocess(image.to(clip_vision.load_device)) + + if clip_vision.dtype != torch.float32: + precision_scope = torch.autocast + else: + precision_scope = lambda a, b: contextlib.nullcontext(a) + + with precision_scope(comfy.model_management.get_autocast_device(clip_vision.load_device), torch.float32): + outputs = clip_vision.model(pixel_values, intermediate_output=-2) + + # we only need the penultimate hidden states + outputs = outputs[1].to(comfy.model_management.intermediate_device()) + + return outputs + +def min_(tensor_list): + # return the element-wise min of the tensor list. + x = torch.stack(tensor_list) + mn = x.min(axis=0)[0] + return torch.clamp(mn, min=0) + +def max_(tensor_list): + # return the element-wise max of the tensor list. + x = torch.stack(tensor_list) + mx = x.max(axis=0)[0] + return torch.clamp(mx, max=1) + +# From https://github.com/Jamy-L/Pytorch-Contrast-Adaptive-Sharpening/ +def contrast_adaptive_sharpening(image, amount): + img = F.pad(image, pad=(1, 1, 1, 1)).cpu() + + a = img[..., :-2, :-2] + b = img[..., :-2, 1:-1] + c = img[..., :-2, 2:] + d = img[..., 1:-1, :-2] + e = img[..., 1:-1, 1:-1] + f = img[..., 1:-1, 2:] + g = img[..., 2:, :-2] + h = img[..., 2:, 1:-1] + i = img[..., 2:, 2:] + + # Computing contrast + cross = (b, d, e, f, h) + mn = min_(cross) + mx = max_(cross) + + diag = (a, c, g, i) + mn2 = min_(diag) + mx2 = max_(diag) + mx = mx + mx2 + mn = mn + mn2 + + # Computing local weight + inv_mx = torch.reciprocal(mx) + amp = inv_mx * torch.minimum(mn, (2 - mx)) + + # scaling + amp = torch.sqrt(amp) + w = - amp * (amount * (1/5 - 1/8) + 1/8) + div = torch.reciprocal(1 + 4*w) + + output = ((b + d + f + h)*w + e) * div + output = output.clamp(0, 1) + output = torch.nan_to_num(output) + + return (output) + +class IPAdapter(nn.Module): + def __init__(self, ipadapter_model, cross_attention_dim=1024, output_cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4, is_sdxl=False, is_plus=False, is_full=False): + super().__init__() + + self.clip_embeddings_dim = clip_embeddings_dim + self.cross_attention_dim = cross_attention_dim + self.output_cross_attention_dim = output_cross_attention_dim + self.clip_extra_context_tokens = clip_extra_context_tokens + self.is_sdxl = is_sdxl + self.is_full = is_full + + self.image_proj_model = self.init_proj() if not is_plus else self.init_proj_plus() + self.image_proj_model.load_state_dict(ipadapter_model["image_proj"]) + self.ip_layers = To_KV(ipadapter_model["ip_adapter"]) + + def init_proj(self): + image_proj_model = ImageProjModel( + cross_attention_dim=self.cross_attention_dim, + clip_embeddings_dim=self.clip_embeddings_dim, + clip_extra_context_tokens=self.clip_extra_context_tokens + ) + return image_proj_model + + def init_proj_plus(self): + if self.is_full: + image_proj_model = MLPProjModel( + cross_attention_dim=self.cross_attention_dim, + clip_embeddings_dim=self.clip_embeddings_dim + ) + else: + image_proj_model = Resampler( + dim=self.cross_attention_dim, + depth=4, + dim_head=64, + heads=20 if self.is_sdxl else 12, + num_queries=self.clip_extra_context_tokens, + embedding_dim=self.clip_embeddings_dim, + output_dim=self.output_cross_attention_dim, + ff_mult=4 + ) + return image_proj_model + + @torch.inference_mode() + def get_image_embeds(self, clip_embed, clip_embed_zeroed): + image_prompt_embeds = self.image_proj_model(clip_embed) + uncond_image_prompt_embeds = self.image_proj_model(clip_embed_zeroed) + return image_prompt_embeds, uncond_image_prompt_embeds + +class CrossAttentionPatch: + # forward for patching + def __init__(self, weight, ipadapter, device, dtype, number, cond, uncond, weight_type, mask=None, sigma_start=0.0, sigma_end=1.0, unfold_batch=False): + self.weights = [weight] + self.ipadapters = [ipadapter] + self.conds = [cond] + self.unconds = [uncond] + self.device = 'cuda' if 'cuda' in device.type else 'cpu' + self.dtype = dtype if 'cuda' in self.device else torch.bfloat16 + self.number = number + self.weight_type = [weight_type] + self.masks = [mask] + self.sigma_start = [sigma_start] + self.sigma_end = [sigma_end] + self.unfold_batch = [unfold_batch] + + self.k_key = str(self.number*2+1) + "_to_k_ip" + self.v_key = str(self.number*2+1) + "_to_v_ip" + + def set_new_condition(self, weight, ipadapter, device, dtype, number, cond, uncond, weight_type, mask=None, sigma_start=0.0, sigma_end=1.0, unfold_batch=False): + self.weights.append(weight) + self.ipadapters.append(ipadapter) + self.conds.append(cond) + self.unconds.append(uncond) + self.masks.append(mask) + self.device = 'cuda' if 'cuda' in device.type else 'cpu' + self.dtype = dtype if 'cuda' in self.device else torch.bfloat16 + self.weight_type.append(weight_type) + self.sigma_start.append(sigma_start) + self.sigma_end.append(sigma_end) + self.unfold_batch.append(unfold_batch) + + def __call__(self, n, context_attn2, value_attn2, extra_options): + org_dtype = n.dtype + cond_or_uncond = extra_options["cond_or_uncond"] + sigma = extra_options["sigmas"][0].item() if 'sigmas' in extra_options else 999999999.9 + + # extra options for AnimateDiff + ad_params = extra_options['ad_params'] if "ad_params" in extra_options else None + + with torch.autocast(device_type=self.device, dtype=self.dtype): + q = n + k = context_attn2 + v = value_attn2 + b = q.shape[0] + qs = q.shape[1] + batch_prompt = b // len(cond_or_uncond) + out = optimized_attention(q, k, v, extra_options["n_heads"]) + _, _, lh, lw = extra_options["original_shape"] + + for weight, cond, uncond, ipadapter, mask, weight_type, sigma_start, sigma_end, unfold_batch in zip(self.weights, self.conds, self.unconds, self.ipadapters, self.masks, self.weight_type, self.sigma_start, self.sigma_end, self.unfold_batch): + if sigma > sigma_start or sigma < sigma_end: + continue + + if unfold_batch and cond.shape[0] > 1: + # Check AnimateDiff context window + if ad_params is not None and ad_params["sub_idxs"] is not None: + # if images length matches or exceeds full_length get sub_idx images + if cond.shape[0] >= ad_params["full_length"]: + cond = torch.Tensor(cond[ad_params["sub_idxs"]]) + uncond = torch.Tensor(uncond[ad_params["sub_idxs"]]) + # otherwise, need to do more to get proper sub_idxs masks + else: + # check if images length matches full_length - if not, make it match + if cond.shape[0] < ad_params["full_length"]: + cond = torch.cat((cond, cond[-1:].repeat((ad_params["full_length"]-cond.shape[0], 1, 1))), dim=0) + uncond = torch.cat((uncond, uncond[-1:].repeat((ad_params["full_length"]-uncond.shape[0], 1, 1))), dim=0) + # if we have too many remove the excess (should not happen, but just in case) + if cond.shape[0] > ad_params["full_length"]: + cond = cond[:ad_params["full_length"]] + uncond = uncond[:ad_params["full_length"]] + cond = cond[ad_params["sub_idxs"]] + uncond = uncond[ad_params["sub_idxs"]] + + # if we don't have enough reference images repeat the last one until we reach the right size + if cond.shape[0] < batch_prompt: + cond = torch.cat((cond, cond[-1:].repeat((batch_prompt-cond.shape[0], 1, 1))), dim=0) + uncond = torch.cat((uncond, uncond[-1:].repeat((batch_prompt-uncond.shape[0], 1, 1))), dim=0) + # if we have too many remove the exceeding + elif cond.shape[0] > batch_prompt: + cond = cond[:batch_prompt] + uncond = uncond[:batch_prompt] + + k_cond = ipadapter.ip_layers.to_kvs[self.k_key](cond) + k_uncond = ipadapter.ip_layers.to_kvs[self.k_key](uncond) + v_cond = ipadapter.ip_layers.to_kvs[self.v_key](cond) + v_uncond = ipadapter.ip_layers.to_kvs[self.v_key](uncond) + else: + k_cond = ipadapter.ip_layers.to_kvs[self.k_key](cond).repeat(batch_prompt, 1, 1) + k_uncond = ipadapter.ip_layers.to_kvs[self.k_key](uncond).repeat(batch_prompt, 1, 1) + v_cond = ipadapter.ip_layers.to_kvs[self.v_key](cond).repeat(batch_prompt, 1, 1) + v_uncond = ipadapter.ip_layers.to_kvs[self.v_key](uncond).repeat(batch_prompt, 1, 1) + + if weight_type.startswith("linear"): + ip_k = torch.cat([(k_cond, k_uncond)[i] for i in cond_or_uncond], dim=0) * weight + ip_v = torch.cat([(v_cond, v_uncond)[i] for i in cond_or_uncond], dim=0) * weight + else: + ip_k = torch.cat([(k_cond, k_uncond)[i] for i in cond_or_uncond], dim=0) + ip_v = torch.cat([(v_cond, v_uncond)[i] for i in cond_or_uncond], dim=0) + + if weight_type.startswith("channel"): + # code by Lvmin Zhang at Stanford University as also seen on Fooocus IPAdapter implementation + # please read licensing notes https://github.com/lllyasviel/Fooocus/blob/main/fooocus_extras/ip_adapter.py#L225 + ip_v_mean = torch.mean(ip_v, dim=1, keepdim=True) + ip_v_offset = ip_v - ip_v_mean + _, _, C = ip_k.shape + channel_penalty = float(C) / 1280.0 + W = weight * channel_penalty + ip_k = ip_k * W + ip_v = ip_v_offset + ip_v_mean * W + + out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"]) + if weight_type.startswith("original"): + out_ip = out_ip * weight + + if mask is not None: + # TODO: needs checking + mask_h = max(1, round(lh / math.sqrt(lh * lw / qs))) + mask_w = qs // mask_h + + # check if using AnimateDiff and sliding context window + if (mask.shape[0] > 1 and ad_params is not None and ad_params["sub_idxs"] is not None): + # if mask length matches or exceeds full_length, just get sub_idx masks, resize, and continue + if mask.shape[0] >= ad_params["full_length"]: + mask_downsample = torch.Tensor(mask[ad_params["sub_idxs"]]) + mask_downsample = F.interpolate(mask_downsample.unsqueeze(1), size=(mask_h, mask_w), mode="bicubic").squeeze(1) + # otherwise, need to do more to get proper sub_idxs masks + else: + # resize to needed attention size (to save on memory) + mask_downsample = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bicubic").squeeze(1) + # check if mask length matches full_length - if not, make it match + if mask_downsample.shape[0] < ad_params["full_length"]: + mask_downsample = torch.cat((mask_downsample, mask_downsample[-1:].repeat((ad_params["full_length"]-mask_downsample.shape[0], 1, 1))), dim=0) + # if we have too many remove the excess (should not happen, but just in case) + if mask_downsample.shape[0] > ad_params["full_length"]: + mask_downsample = mask_downsample[:ad_params["full_length"]] + # now, select sub_idxs masks + mask_downsample = mask_downsample[ad_params["sub_idxs"]] + # otherwise, perform usual mask interpolation + else: + mask_downsample = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bicubic").squeeze(1) + + # if we don't have enough masks repeat the last one until we reach the right size + if mask_downsample.shape[0] < batch_prompt: + mask_downsample = torch.cat((mask_downsample, mask_downsample[-1:, :, :].repeat((batch_prompt-mask_downsample.shape[0], 1, 1))), dim=0) + # if we have too many remove the exceeding + elif mask_downsample.shape[0] > batch_prompt: + mask_downsample = mask_downsample[:batch_prompt, :, :] + + # repeat the masks + mask_downsample = mask_downsample.repeat(len(cond_or_uncond), 1, 1) + mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1, 1).repeat(1, 1, out.shape[2]) + + out_ip = out_ip * mask_downsample + + out = out + out_ip + + return out.to(dtype=org_dtype) + +class IPAdapterModelLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "ipadapter_file": (folder_paths.get_filename_list("ipadapter"), )}} + + RETURN_TYPES = ("IPADAPTER",) + FUNCTION = "load_ipadapter_model" + + CATEGORY = "ipadapter" + + def load_ipadapter_model(self, ipadapter_file): + ckpt_path = folder_paths.get_full_path("ipadapter", ipadapter_file) + + model = comfy.utils.load_torch_file(ckpt_path, safe_load=True) + + if ckpt_path.lower().endswith(".safetensors"): + st_model = {"image_proj": {}, "ip_adapter": {}} + for key in model.keys(): + if key.startswith("image_proj."): + st_model["image_proj"][key.replace("image_proj.", "")] = model[key] + elif key.startswith("ip_adapter."): + st_model["ip_adapter"][key.replace("ip_adapter.", "")] = model[key] + model = st_model + + if not "ip_adapter" in model.keys() or not model["ip_adapter"]: + raise Exception("invalid IPAdapter model {}".format(ckpt_path)) + + return (model,) + +class IPAdapterApply: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "ipadapter": ("IPADAPTER", ), + "clip_vision": ("CLIP_VISION",), + "image": ("IMAGE",), + "model": ("MODEL", ), + "weight": ("FLOAT", { "default": 1.0, "min": -1, "max": 3, "step": 0.05 }), + "noise": ("FLOAT", { "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01 }), + "weight_type": (["original", "linear", "channel penalty"], ), + "start_at": ("FLOAT", { "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001 }), + "end_at": ("FLOAT", { "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001 }), + "unfold_batch": ("BOOLEAN", { "default": False }), + }, + "optional": { + "attn_mask": ("MASK",), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "apply_ipadapter" + CATEGORY = "ipadapter" + + def apply_ipadapter(self, ipadapter, model, weight, clip_vision=None, image=None, weight_type="original", noise=None, embeds=None, attn_mask=None, start_at=0.0, end_at=1.0, unfold_batch=False): + self.dtype = model.model.diffusion_model.dtype + self.device = comfy.model_management.get_torch_device() + self.weight = weight + self.is_full = "proj.0.weight" in ipadapter["image_proj"] + self.is_plus = self.is_full or "latents" in ipadapter["image_proj"] + + output_cross_attention_dim = ipadapter["ip_adapter"]["1.to_k_ip.weight"].shape[1] + self.is_sdxl = output_cross_attention_dim == 2048 + cross_attention_dim = 1280 if self.is_plus and self.is_sdxl else output_cross_attention_dim + clip_extra_context_tokens = 16 if self.is_plus else 4 + + if embeds is not None: + embeds = torch.unbind(embeds) + clip_embed = embeds[0].cpu() + clip_embed_zeroed = embeds[1].cpu() + else: + if image.shape[1] != image.shape[2]: + print("\033[33mINFO: the IPAdapter reference image is not a square, CLIPImageProcessor will resize and crop it at the center. If the main focus of the picture is not in the middle the result might not be what you are expecting.\033[0m") + + clip_embed = clip_vision.encode_image(image) + neg_image = image_add_noise(image, noise) if noise > 0 else None + + if self.is_plus: + clip_embed = clip_embed.penultimate_hidden_states + if noise > 0: + clip_embed_zeroed = clip_vision.encode_image(neg_image).penultimate_hidden_states + else: + clip_embed_zeroed = zeroed_hidden_states(clip_vision, image.shape[0]) + else: + clip_embed = clip_embed.image_embeds + if noise > 0: + clip_embed_zeroed = clip_vision.encode_image(neg_image).image_embeds + else: + clip_embed_zeroed = torch.zeros_like(clip_embed) + + clip_embeddings_dim = clip_embed.shape[-1] + + self.ipadapter = IPAdapter( + ipadapter, + cross_attention_dim=cross_attention_dim, + output_cross_attention_dim=output_cross_attention_dim, + clip_embeddings_dim=clip_embeddings_dim, + clip_extra_context_tokens=clip_extra_context_tokens, + is_sdxl=self.is_sdxl, + is_plus=self.is_plus, + is_full=self.is_full, + ) + + self.ipadapter.to(self.device, dtype=self.dtype) + + image_prompt_embeds, uncond_image_prompt_embeds = self.ipadapter.get_image_embeds(clip_embed.to(self.device, self.dtype), clip_embed_zeroed.to(self.device, self.dtype)) + image_prompt_embeds = image_prompt_embeds.to(self.device, dtype=self.dtype) + uncond_image_prompt_embeds = uncond_image_prompt_embeds.to(self.device, dtype=self.dtype) + + work_model = model.clone() + + if attn_mask is not None: + attn_mask = attn_mask.to(self.device) + + sigma_start = model.model.model_sampling.percent_to_sigma(start_at) + sigma_end = model.model.model_sampling.percent_to_sigma(end_at) + + patch_kwargs = { + "number": 0, + "weight": self.weight, + "ipadapter": self.ipadapter, + "device": self.device, + "dtype": self.dtype, + "cond": image_prompt_embeds, + "uncond": uncond_image_prompt_embeds, + "weight_type": weight_type, + "mask": attn_mask, + "sigma_start": sigma_start, + "sigma_end": sigma_end, + "unfold_batch": unfold_batch, + } + + if not self.is_sdxl: + for id in [1,2,4,5,7,8]: # id of input_blocks that have cross attention + set_model_patch_replace(work_model, patch_kwargs, ("input", id)) + patch_kwargs["number"] += 1 + for id in [3,4,5,6,7,8,9,10,11]: # id of output_blocks that have cross attention + set_model_patch_replace(work_model, patch_kwargs, ("output", id)) + patch_kwargs["number"] += 1 + set_model_patch_replace(work_model, patch_kwargs, ("middle", 0)) + else: + for id in [4,5,7,8]: # id of input_blocks that have cross attention + block_indices = range(2) if id in [4, 5] else range(10) # transformer_depth + for index in block_indices: + set_model_patch_replace(work_model, patch_kwargs, ("input", id, index)) + patch_kwargs["number"] += 1 + for id in range(6): # id of output_blocks that have cross attention + block_indices = range(2) if id in [3, 4, 5] else range(10) # transformer_depth + for index in block_indices: + set_model_patch_replace(work_model, patch_kwargs, ("output", id, index)) + patch_kwargs["number"] += 1 + for index in range(10): + set_model_patch_replace(work_model, patch_kwargs, ("middle", 0, index)) + patch_kwargs["number"] += 1 + + return (work_model, ) + +class PrepImageForClipVision: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "interpolation": (["LANCZOS", "BICUBIC", "HAMMING", "BILINEAR", "BOX", "NEAREST"],), + "crop_position": (["top", "bottom", "left", "right", "center", "pad"],), + "sharpening": ("FLOAT", {"default": 0.0, "min": 0, "max": 1, "step": 0.05}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "prep_image" + + CATEGORY = "ipadapter" + + def prep_image(self, image, interpolation="LANCZOS", crop_position="center", sharpening=0.0): + _, oh, ow, _ = image.shape + output = image.permute([0,3,1,2]) + + if "pad" in crop_position: + target_length = max(oh, ow) + pad_l = (target_length - ow) // 2 + pad_r = (target_length - ow) - pad_l + pad_t = (target_length - oh) // 2 + pad_b = (target_length - oh) - pad_t + output = F.pad(output, (pad_l, pad_r, pad_t, pad_b), value=0, mode="constant") + else: + crop_size = min(oh, ow) + x = (ow-crop_size) // 2 + y = (oh-crop_size) // 2 + if "top" in crop_position: + y = 0 + elif "bottom" in crop_position: + y = oh-crop_size + elif "left" in crop_position: + x = 0 + elif "right" in crop_position: + x = ow-crop_size + + x2 = x+crop_size + y2 = y+crop_size + + # crop + output = output[:, :, y:y2, x:x2] + + # resize (apparently PIL resize is better than tourchvision interpolate) + imgs = [] + for i in range(output.shape[0]): + img = TT.ToPILImage()(output[i]) + img = img.resize((224,224), resample=Image.Resampling[interpolation]) + imgs.append(TT.ToTensor()(img)) + output = torch.stack(imgs, dim=0) + + if sharpening > 0: + output = contrast_adaptive_sharpening(output, sharpening) + + output = output.permute([0,2,3,1]) + + return (output,) + +class IPAdapterEncoder: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip_vision": ("CLIP_VISION",), + "image_1": ("IMAGE",), + "ipadapter_plus": ("BOOLEAN", { "default": False }), + "noise": ("FLOAT", { "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01 }), + "weight_1": ("FLOAT", { "default": 1.0, "min": 0, "max": 1.0, "step": 0.01 }), + }, + "optional": { + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "image_4": ("IMAGE",), + "weight_2": ("FLOAT", { "default": 1.0, "min": 0, "max": 1.0, "step": 0.01 }), + "weight_3": ("FLOAT", { "default": 1.0, "min": 0, "max": 1.0, "step": 0.01 }), + "weight_4": ("FLOAT", { "default": 1.0, "min": 0, "max": 1.0, "step": 0.01 }), + } + } + + RETURN_TYPES = ("EMBEDS",) + FUNCTION = "preprocess" + CATEGORY = "ipadapter" + + def preprocess(self, clip_vision, image_1, ipadapter_plus, noise, weight_1, image_2=None, image_3=None, image_4=None, weight_2=1.0, weight_3=1.0, weight_4=1.0): + weight_1 *= (0.1 + (weight_1 - 0.1)) + weight_1 = 1.19e-05 if weight_1 <= 1.19e-05 else weight_1 + weight_2 *= (0.1 + (weight_2 - 0.1)) + weight_2 = 1.19e-05 if weight_2 <= 1.19e-05 else weight_2 + weight_3 *= (0.1 + (weight_3 - 0.1)) + weight_3 = 1.19e-05 if weight_3 <= 1.19e-05 else weight_3 + weight_4 *= (0.1 + (weight_4 - 0.1)) + weight_5 = 1.19e-05 if weight_4 <= 1.19e-05 else weight_4 + + image = image_1 + weight = [weight_1]*image_1.shape[0] + + if image_2 is not None: + if image_1.shape[1:] != image_2.shape[1:]: + image_2 = comfy.utils.common_upscale(image_2.movedim(-1,1), image.shape[2], image.shape[1], "bilinear", "center").movedim(1,-1) + image = torch.cat((image, image_2), dim=0) + weight += [weight_2]*image_2.shape[0] + if image_3 is not None: + if image.shape[1:] != image_3.shape[1:]: + image_3 = comfy.utils.common_upscale(image_3.movedim(-1,1), image.shape[2], image.shape[1], "bilinear", "center").movedim(1,-1) + image = torch.cat((image, image_3), dim=0) + weight += [weight_3]*image_3.shape[0] + if image_4 is not None: + if image.shape[1:] != image_4.shape[1:]: + image_4 = comfy.utils.common_upscale(image_4.movedim(-1,1), image.shape[2], image.shape[1], "bilinear", "center").movedim(1,-1) + image = torch.cat((image, image_4), dim=0) + weight += [weight_4]*image_4.shape[0] + + clip_embed = clip_vision.encode_image(image) + neg_image = image_add_noise(image, noise) if noise > 0 else None + + if ipadapter_plus: + clip_embed = clip_embed.penultimate_hidden_states + if noise > 0: + clip_embed_zeroed = clip_vision.encode_image(neg_image).penultimate_hidden_states + else: + clip_embed_zeroed = zeroed_hidden_states(clip_vision, image.shape[0]) + else: + clip_embed = clip_embed.image_embeds + if noise > 0: + clip_embed_zeroed = clip_vision.encode_image(neg_image).image_embeds + else: + clip_embed_zeroed = torch.zeros_like(clip_embed) + + if any(e != 1.0 for e in weight): + weight = torch.tensor(weight).unsqueeze(-1) if not ipadapter_plus else torch.tensor(weight).unsqueeze(-1).unsqueeze(-1) + clip_embed = clip_embed * weight + + output = torch.stack((clip_embed, clip_embed_zeroed)) + + return( output, ) + +class IPAdapterApplyEncoded(IPAdapterApply): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "ipadapter": ("IPADAPTER", ), + "embeds": ("EMBEDS",), + "model": ("MODEL", ), + "weight": ("FLOAT", { "default": 1.0, "min": -1, "max": 3, "step": 0.05 }), + "weight_type": (["original", "linear", "channel penalty"], ), + "start_at": ("FLOAT", { "default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001 }), + "end_at": ("FLOAT", { "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001 }), + "unfold_batch": ("BOOLEAN", { "default": False }), + }, + "optional": { + "attn_mask": ("MASK",), + } + } + +class IPAdapterSaveEmbeds: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "embeds": ("EMBEDS",), + "filename_prefix": ("STRING", {"default": "embeds/IPAdapter"}) + }, + } + + RETURN_TYPES = () + FUNCTION = "save" + OUTPUT_NODE = True + CATEGORY = "ipadapter" + + def save(self, embeds, filename_prefix): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + file = f"{filename}_{counter:05}_.ipadpt" + file = os.path.join(full_output_folder, file) + + torch.save(embeds, file) + return (None, ) + + +class IPAdapterLoadEmbeds: + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [os.path.relpath(os.path.join(root, file), input_dir) for root, dirs, files in os.walk(input_dir) for file in files if file.endswith('.ipadpt')] + return {"required": {"embeds": [sorted(files), ]}, } + + RETURN_TYPES = ("EMBEDS", ) + FUNCTION = "load" + CATEGORY = "ipadapter" + + def load(self, embeds): + path = folder_paths.get_annotated_filepath(embeds) + output = torch.load(path).cpu() + + return (output, ) + + +class IPAdapterBatchEmbeds: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "embed1": ("EMBEDS",), + "embed2": ("EMBEDS",), + }} + + RETURN_TYPES = ("EMBEDS",) + FUNCTION = "batch" + CATEGORY = "ipadapter" + + def batch(self, embed1, embed2): + output = torch.cat((embed1, embed2), dim=1) + return (output, ) + +NODE_CLASS_MAPPINGS = { + "IPAdapterModelLoader": IPAdapterModelLoader, + "IPAdapterApply": IPAdapterApply, + "IPAdapterApplyEncoded": IPAdapterApplyEncoded, + "PrepImageForClipVision": PrepImageForClipVision, + "IPAdapterEncoder": IPAdapterEncoder, + "IPAdapterSaveEmbeds": IPAdapterSaveEmbeds, + "IPAdapterLoadEmbeds": IPAdapterLoadEmbeds, + "IPAdapterBatchEmbeds": IPAdapterBatchEmbeds, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "IPAdapterModelLoader": "Load IPAdapter Model", + "IPAdapterApply": "Apply IPAdapter", + "IPAdapterApplyEncoded": "Apply IPAdapter from Encoded", + "PrepImageForClipVision": "Prepare Image For Clip Vision", + "IPAdapterEncoder": "Encode IPAdapter Image", + "IPAdapterSaveEmbeds": "Save IPAdapter Embeds", + "IPAdapterLoadEmbeds": "Load IPAdapter Embeds", + "IPAdapterBatchEmbeds": "IPAdapter Batch Embeds", +} diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/IPAdapter_workflow.json b/custom_nodes/ComfyUI_IPAdapter_plus/IPAdapter_workflow.json new file mode 100644 index 0000000000000000000000000000000000000000..6f66f283f84f51fa820ea5b732dbe68b4371f704 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/IPAdapter_workflow.json @@ -0,0 +1,606 @@ +{ + "last_node_id": 12, + "last_link_id": 13, + "nodes": [ + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 650, + 590 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful renaissance girl, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 940, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 6, + "type": "LoadImage", + "pos": [ + 40, + 60 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 3 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 10 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 0, + "fixed", + 25, + 6, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "v1-5-pruned-emaonly.safetensors" + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 651, + -57 + ], + "size": { + "0": 210, + "1": 258 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 3 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + }, + { + "name": "attn_mask", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 1, + 0, + "original", + 0, + 1, + false + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter_sd15.safetensors" + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 3, + 6, + 0, + 5, + 2, + "IMAGE" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 8, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 10, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/LICENSE b/custom_nodes/ComfyUI_IPAdapter_plus/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/README.md b/custom_nodes/ComfyUI_IPAdapter_plus/README.md new file mode 100644 index 0000000000000000000000000000000000000000..864171cf7e12d08ca32faf4dd423c4bcff951d3a --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/README.md @@ -0,0 +1,209 @@ +# ComfyUI IPAdapter plus +[ComfyUI](https://github.com/comfyanonymous/ComfyUI) reference implementation for [IPAdapter](https://github.com/tencent-ailab/IP-Adapter/) models. + +IPAdapter implementation that follows the ComfyUI way of doing things. The code is memory efficient, fast, and shouldn't break with Comfy updates. + +## Important updates + +**2023/12/05**: Added `batch embeds` node. This lets you encode images in batches and merge them together into an `IPAdapter Apply Encoded` node. Useful mostly for animations because the clip vision encoder takes a lot of VRAM. My suggestion is to split the animation in batches of about 120 frames. + +**2023/11/29**: Added `unfold_batch` option to send the reference images sequentially to a latent batch. Useful for animations. + +**2023/11/26**: Added [timestepping](#timestepping). You may need to delete the old nodes and recreate them. **Important:** For this to work you need to update ComfyUI to the latest version. + +**2023/11/24**: Support for multiple attention masks. + +**2023/11/23**: Small but important update: the new default location for the IPAdapter models is `ComfyUI/models/ipadapter`. **No panic**: the legacy `ComfyUI/custom_nodes/ComfyUI_IPAdapter_plus/models` location still works and nothing will break. + +**2023/11/08**: Added [attention masking](#attention-masking). + +**2023/11/07**: Added three ways to apply the weight. [See below](#weight-types) for more info. **This might break things!** Please let me know if you are having issues. When loading an old workflow try to reload the page a couple of times or delete the `IPAdapter Apply` node and insert a new one. + +**2023/11/02**: Added compatibility with the new models in safetensors format (available on [huggingface](https://huggingface.co/h94/IP-Adapter)). + +**2023/10/12**: Added image weighting in the `IPAdapterEncoder` node. This update is somewhat breaking; if you use `IPAdapterEncoder` and `PrepImageForClipVision` nodes you need to remove them from your workflow, refresh and recreate them. In the examples you'll find a [workflow](examples/IPAdapter_weighted.json) for weighted images. + +*(previous updates removed for better readability)* + +## What is it? + +The IPAdapter are very powerful models for image-to-image conditioning. Given a reference image you can do variations augmented by text prompt, controlnets and masks. Think of it as a 1-image lora. + +## Example workflow + +![IPAdapter Example workflow](./ipadapter_workflow.png) + +## Video Introduction + + + Watch the video + + +**:nerd_face: [Basic usage video](https://youtu.be/7m9ZZFU3HWo)** + +**:rocket: [Advanced features video](https://www.youtube.com/watch?v=mJQ62ly7jrg)** + +**:japanese_goblin: [Attention Masking video](https://www.youtube.com/watch?v=vqG1VXKteQg)** + +**:movie_camera: [Animation Features video](https://www.youtube.com/watch?v=ddYbhv3WgWw)** + +## Installation + +Download or git clone this repository inside `ComfyUI/custom_nodes/` directory. + +The pre-trained models are available on [huggingface](https://huggingface.co/h94/IP-Adapter), download and place them in the `ComfyUI/models/ipadapter` directory (create it if not present). You can also use any custom location setting an `ipadapter` entry in the `extra_model_paths.yaml` file. + +Note: the legacy `ComfyUI/custom_nodes/ComfyUI_IPAdapter_plus/models` is still supported and it will be ignored only if the global directory is present. + +For SD1.5 you need: + +- [ip-adapter_sd15.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.bin) +- [ip-adapter_sd15_light.bin](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter_sd15_light.safetensors), use this when text prompt is more important than reference images +- [ip-adapter-plus_sd15.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.bin) +- [ip-adapter-plus-face_sd15.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus-face_sd15.bin) +- [ip-adapter-full-face_sd15.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-full-face_sd15.bin) +- [ip-adapter_sd15_vit-G.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15_vit-G.bin), this model requires the vit-bigG image encoder (the SDXL one below) + +For SDXL you need: +- [ip-adapter_sdxl.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.bin) +- [ip-adapter_sdxl_vit-h.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl_vit-h.bin) **This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints** +- [ip-adapter-plus_sdxl_vit-h.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.bin) Same as above, use the SD1.5 encoder +- [ip-adapter-plus-face_sdxl_vit-h.bin](https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus-face_sdxl_vit-h.bin) As always, use the SD1.5 encoder + +Please note that now the models are also available in safetensors format, you can find them on [huggingface](https://huggingface.co/h94/IP-Adapter). + +Additionally you need the image encoders to be placed in the `ComfyUI/models/clip_vision/` directory: + +- [SD 1.5 model](https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/model.safetensors) (use this also for all models ending with **_vit-h**) +- [SDXL model](https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/image_encoder/model.safetensors) + +You can rename them to something easier to remember or put them into a sub-directory. + +**Note:** the image encoders are actually [ViT-H](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K) and [ViT-bigG](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) (used only for one SDXL model). You probably already have them. + +## How to + +There's a basic workflow included in this repo and a few examples in the [examples](./examples/) directory. Usually it's a good idea to lower the `weight` to at least `0.8`. + +The `noise` parameter is an experimental exploitation of the IPAdapter models. You can set it as low as `0.01` for an arguably better result. + +
+More info about the noise option + +canny controlnet + +Basically the IPAdapter sends two pictures for the conditioning, one is the reference the other --that you don't see-- is an empty image that could be considered like a negative conditioning. + +What I'm doing is to send a very noisy image instead of an empty one. The `noise` parameter determines the amount of noise that is added. A value of `0.01` adds a lot of noise (more noise == less impact becaue the model doesn't get it); a value of `1.0` removes most of noise so the generated image gets conditioned more. +
+ +### Preparing the reference image + +The reference image needs to be encoded by the CLIP vision model. The encoder resizes the image to 224×224 **and crops it to the center!**. It's not an IPAdapter thing, it's how the clip vision works. This means that if you use a portrait or landscape image and the main attention (eg: the face of a character) is not in the middle you'll likely get undesired results. Use square pictures as reference for more predictable results. + +I've added a `PrepImageForClipVision` node that does all the required operations for you. You just have to select the crop position (top/left/center/etc...) and a sharpening amount if you want. + +In the image below you can see the difference between prepped and not prepped images. + +prepped images + +### KSampler configuration suggestions + +The IPAdapter generally requires a few more `steps` than usual, if the result is underwhelming try to add 10+ steps. The model tends to burn the images a little. If needed lower the CFG scale. + +The `noise` option generally grants better results, experiment with it. + +### IPAdapter + ControlNet + +The model is very effective when paired with a ControlNet. In the example below I experimented with Canny. [The workflow](./examples/IPAdapter_Canny.json) is in the examples directory. + +canny controlnet + +### IPAdapter Face + +IPAdapter offers an interesting model for a kind of "face swap" effect. [The workflow is provided](./examples/IPAdapter_face.json). Set a close up face as reference image and then input your text prompt as always. The generated character should have the face of the reference. It also works with img2img given a high denoise. + +face swap + +**Note:** there's a new `full-face` model available that's arguably better. + +### Masking (Inpainting) + +The most effective way to apply the IPAdapter to a region is by an [inpainting workflow](./examples/IPAdapter_inpaint.json). Remeber to use a specific checkpoint for inpainting otherwise it won't work. Even if you are inpainting a face I find that the *IPAdapter-Plus* (not the *face* one), works best. + +inpainting + +### Image Batches + +It is possible to pass multiple images for the conditioning with the `Batch Images` node. An [example workflow](./examples/IPAdapter_batch_images.json) is provided; in the picture below you can see the result of one and two images conditioning. + +batcg images + +It seems to be effective with 2-3 images, beyond that it tends to *blur* the information too much. + +### Image Weighting + +When sending multiple images you can increase/decrease the weight of each image by using the `IPAdapterEncoder` node. The workflow ([included in the examples](examples/IPAdapter_weighted.json)) looks like this: + +image weighting + +The node accepts 4 images, but remember that you can send batches of images to each slot. + +### Weight types + +You can choose how the IPAdapter weight is applied to the image embeds. Options are: + +- **original**: The weight is applied to the aggregated tensors. The weight works predictably for values greater and lower than 1. +- **linear**: The weight is applied to the individual tensors before aggretating them. Compared to `original` the influence is weaker when weight is <1 and stronger when >1. **Note:** at weight `1` the two methods are equivalent. +- **channel penalty**: This method is a modified version of Lvmin Zhang's (Fooocus). Results are sometimes sharper. It works very well also when weight is >1. Still experimental, may change in the future. + +The image below shows the difference (zoom in). + +weight types + +In the examples directory you can find [a workflow](examples/IPAdapter_weight_types.json) that lets you easily compare the three methods. + +**Note:** I'm not still sure whether all methods will stay. `Linear` seems the most sensible but I wanted to keep the `original` for backward compatibility. `channel penalty` has a weird non-commercial clause but it's still part of a GNU GPLv3 software (ie: there's a licensing clash) so I'm trying to understand how to deal with that. + +### Attention masking + +It's possible to add a mask to define the area where the IPAdapter will be applied to. Everything outside the mask will ignore the reference images and will only listen to the text prompt. + +It is suggested to use a mask of the same size of the final generated image. + +In the picture below I use two reference images masked one on the left and the other on the right. The image is generated only with IPAdapter and one ksampler (without in/outpainting or area conditioning). + +masking + +It is also possible to send a batch of masks that will be applied to a batch of latents, one per frame. The size should be the same but if needed some normalization will be performed to avoid errors. This feature also supports (experimentally) AnimateDiff including context sliding. + +In the examples directory you'll find a couple of masking workflows: [simple](examples/IPAdapter_mask.json) and [two masks](examples/IPAdapter_2_masks.json). + +### Timestepping + +In the `Apply IPAdapter` node you can set a start and an end point. The IPAdapter will be applied exclusively in that timeframe of the generation. This is a very powerful tool to modulate the intesity of IPAdapter models. + +timestepping + +## Troubleshooting + +Please check the [troubleshooting](https://github.com/cubiq/ComfyUI_IPAdapter_plus/issues/108) before posting a new issue. + +## Diffusers version + +If you are interested I've also implemented the same features for [Huggingface Diffusers](https://github.com/cubiq/Diffusers_IPAdapter). + +## Credits + +- [IPAdapter](https://github.com/tencent-ailab/IP-Adapter/) +- [ComfyUI](https://github.com/comfyanonymous/ComfyUI) +- [laksjdjf](https://github.com/laksjdjf/IPAdapter-ComfyUI/) +- [fooocus](https://github.com/lllyasviel/Fooocus/blob/main/fooocus_extras/ip_adapter.py) + +## IPAdapter in the wild + +Let me know if you spot the IPAdapter in the wild or tag @latentvision in the video description! + +- For German speakers you can find interesting YouTube tutorials on [A Latent Place](https://www.youtube.com/watch?v=rAWn_0YOBU0). +- In Chinese [Introversify](https://www.youtube.com/watch?v=xl8f3oxZgY8) +- [Scott Detweiler](https://www.youtube.com/watch?v=xzGdynQDzsM) covered this extension. diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/__init__.py b/custom_nodes/ComfyUI_IPAdapter_plus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cdd2511fd1d90cf6d3048b631ffb0d080fb11e4a --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/__init__.py @@ -0,0 +1,3 @@ +from .IPAdapterPlus import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/IPAdapterPlus.cpython-311.pyc b/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/IPAdapterPlus.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cf67f825edd99c482fa8c99ed04f810155c02ad Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/IPAdapterPlus.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea82c51f9f2550020f723984998cd46ba50e9d72 Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/resampler.cpython-311.pyc b/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/resampler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..442bf5363ad187fe1c2b09266ef632c08d780afc Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/__pycache__/resampler.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_2_masks.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_2_masks.json new file mode 100644 index 0000000000000000000000000000000000000000..1e8105ff5bacb2f0fe54db19ea94081d8669fb01 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_2_masks.json @@ -0,0 +1,1202 @@ +{ + "last_node_id": 52, + "last_link_id": 92, + "nodes": [ + { + "id": 5, + "type": "CLIPTextEncode", + "pos": [ + 550, + 510 + ], + "size": { + "0": 400, + "1": 160 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 3 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, malformed, illustration, video game, rendering, naked, cleavage, horror, zombie" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 6, + "type": "VAEDecode", + "pos": [ + 1430, + 300 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 7, + "type": "VAELoader", + "pos": [ + 1160, + 600 + ], + "size": { + "0": 240, + "1": 60 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 2, + "type": "CheckpointLoaderSimple", + "pos": [ + 210, + 410 + ], + "size": { + "0": 290, + "1": 100 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 88 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd15/dreamshaper_8.safetensors" + ] + }, + { + "id": 3, + "type": "EmptyLatentImage", + "pos": [ + 740, + 710 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 512, + 1 + ] + }, + { + "id": 34, + "type": "SolidMask", + "pos": [ + -570, + -50 + ], + "size": { + "0": 210, + "1": 106 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 51, + 57 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SolidMask" + }, + "widgets_values": [ + 0, + 768, + 512 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 32, + "type": "SolidMask", + "pos": [ + -570, + 100 + ], + "size": { + "0": 210, + "1": 106 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 71 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SolidMask" + }, + "widgets_values": [ + 1, + 384, + 512 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 4, + "type": "CLIPTextEncode", + "pos": [ + 550, + 310 + ], + "size": { + "0": 400, + "1": 160 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "two girl friends laughing" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 8, + "type": "PreviewImage", + "pos": [ + 1450, + 400 + ], + "size": { + "0": 675.9465942382812, + "1": 480.1444091796875 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 50, + "type": "IPAdapterApply", + "pos": [ + 1030, + -220 + ], + "size": { + "0": 210, + "1": 190 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 79 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 80 + }, + { + "name": "image", + "type": "IMAGE", + "link": 92 + }, + { + "name": "model", + "type": "MODEL", + "link": 90 + }, + { + "name": "attn_mask", + "type": "MASK", + "link": 83 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 84 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.7000000000000001, + 0.5, + "channel penalty" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 52, + "type": "PrepImageForClipVision", + "pos": [ + 760, + -380 + ], + "size": { + "0": 243.60000610351562, + "1": 110 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 91 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 92 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PrepImageForClipVision" + }, + "widgets_values": [ + "LANCZOS", + "center", + 0 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 11, + "type": "CLIPVisionLoader", + "pos": [ + 50, + -350 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 80, + 86 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 12, + "type": "LoadImage", + "pos": [ + -270, + -420 + ], + "size": { + "0": 230, + "1": 320 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "woman.png", + "image" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 13, + "type": "PrepImageForClipVision", + "pos": [ + 20, + -200 + ], + "size": { + "0": 243.60000610351562, + "1": 110 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 32 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 87 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PrepImageForClipVision" + }, + "widgets_values": [ + "LANCZOS", + "center", + 0 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 47, + "type": "FeatherMask", + "pos": [ + -310, + 100 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 71 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 72 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "FeatherMask" + }, + "widgets_values": [ + 0, + 0, + 150, + 0 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 33, + "type": "MaskComposite", + "pos": [ + -50, + 40 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "destination", + "type": "MASK", + "link": 51 + }, + { + "name": "source", + "type": "MASK", + "link": 72 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 89 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskComposite" + }, + "widgets_values": [ + 0, + 0, + "add" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 38, + "type": "MaskComposite", + "pos": [ + 780, + -70 + ], + "size": { + "0": 210, + "1": 126 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "destination", + "type": "MASK", + "link": 57 + }, + { + "name": "source", + "type": "MASK", + "link": 76 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 83 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskComposite" + }, + "widgets_values": [ + 384, + 0, + "add" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 37, + "type": "SolidMask", + "pos": [ + 280, + 100 + ], + "size": { + "0": 210, + "1": 106 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 75 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SolidMask" + }, + "widgets_values": [ + 1, + 384, + 512 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 48, + "type": "FeatherMask", + "pos": [ + 530, + 90 + ], + "size": { + "0": 210, + "1": 130 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 75 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 76 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "FeatherMask" + }, + "widgets_values": [ + 150, + 0, + 0, + 0 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 51, + "type": "IPAdapterApply", + "pos": [ + 468, + -169 + ], + "size": { + "0": 210, + "1": 190 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 85 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 86 + }, + { + "name": "image", + "type": "IMAGE", + "link": 87 + }, + { + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "name": "attn_mask", + "type": "MASK", + "link": 89 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.7000000000000001, + 0.5, + "channel penalty" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 27, + "type": "LoadImage", + "pos": [ + 480, + -600 + ], + "size": { + "0": 240, + "1": 330 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 91 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 10, + "type": "IPAdapterModelLoader", + "pos": [ + 50, + -460 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 79, + 85 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter-plus_sd15.safetensors" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 1, + "type": "KSampler", + "pos": [ + 1160, + 300 + ], + "size": { + "0": 240, + "1": 262 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 84 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 2 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 3 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 4 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 37, + "fixed", + 30, + 5, + "dpmpp_2m", + "karras", + 1 + ] + } + ], + "links": [ + [ + 2, + 4, + 0, + 1, + 1, + "CONDITIONING" + ], + [ + 3, + 5, + 0, + 1, + 2, + "CONDITIONING" + ], + [ + 4, + 3, + 0, + 1, + 3, + "LATENT" + ], + [ + 5, + 2, + 1, + 4, + 0, + "CLIP" + ], + [ + 6, + 2, + 1, + 5, + 0, + "CLIP" + ], + [ + 7, + 1, + 0, + 6, + 0, + "LATENT" + ], + [ + 8, + 7, + 0, + 6, + 1, + "VAE" + ], + [ + 9, + 6, + 0, + 8, + 0, + "IMAGE" + ], + [ + 32, + 12, + 0, + 13, + 0, + "IMAGE" + ], + [ + 51, + 34, + 0, + 33, + 0, + "MASK" + ], + [ + 57, + 34, + 0, + 38, + 0, + "MASK" + ], + [ + 71, + 32, + 0, + 47, + 0, + "MASK" + ], + [ + 72, + 47, + 0, + 33, + 1, + "MASK" + ], + [ + 75, + 37, + 0, + 48, + 0, + "MASK" + ], + [ + 76, + 48, + 0, + 38, + 1, + "MASK" + ], + [ + 79, + 10, + 0, + 50, + 0, + "IPADAPTER" + ], + [ + 80, + 11, + 0, + 50, + 1, + "CLIP_VISION" + ], + [ + 83, + 38, + 0, + 50, + 4, + "MASK" + ], + [ + 84, + 50, + 0, + 1, + 0, + "MODEL" + ], + [ + 85, + 10, + 0, + 51, + 0, + "IPADAPTER" + ], + [ + 86, + 11, + 0, + 51, + 1, + "CLIP_VISION" + ], + [ + 87, + 13, + 0, + 51, + 2, + "IMAGE" + ], + [ + 88, + 2, + 0, + 51, + 3, + "MODEL" + ], + [ + 89, + 33, + 0, + 51, + 4, + "MASK" + ], + [ + 90, + 51, + 0, + 50, + 3, + "MODEL" + ], + [ + 91, + 27, + 0, + 52, + 0, + "IMAGE" + ], + [ + 92, + 52, + 0, + 50, + 2, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_Canny.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_Canny.json new file mode 100644 index 0000000000000000000000000000000000000000..d91bd2b9d0880687b7cc620de79c31959f97a9c7 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_Canny.json @@ -0,0 +1,832 @@ +{ + "last_node_id": 17, + "last_link_id": 19, + "nodes": [ + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 650, + 590 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 16 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful renaissance girl with a necklace, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 940, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 6, + "type": "LoadImage", + "pos": [ + 40, + 60 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 3 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 14, + "type": "LoadImage", + "pos": [ + 50, + 860 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "portrait.jpg", + "image" + ] + }, + { + "id": 15, + "type": "ControlNetLoader", + "pos": [ + 190, + 750 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "control_v11p_sd15_canny_fp16.safetensors" + ] + }, + { + "id": 17, + "type": "PreviewImage", + "pos": [ + 579.412728881836, + 903.3208389282227 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 19 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 13, + "type": "Canny", + "pos": [ + 290, + 860 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 14 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 18, + 19 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Canny" + }, + "widgets_values": [ + 0.1, + 0.5 + ] + }, + { + "id": 16, + "type": "ControlNetApply", + "pos": [ + 540, + 750 + ], + "size": { + "0": 317.4000244140625, + "1": 98 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 16 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 15 + }, + { + "name": "image", + "type": "IMAGE", + "link": 18 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 17 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetApply" + }, + "widgets_values": [ + 0.8 + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 17 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 10 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 27, + "fixed", + 30, + 6, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter-plus_sd15.safetensors" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd15/realisticVisionV51_v51VAE.safetensors" + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 656, + -51 + ], + "size": { + "0": 210, + "1": 258 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 3 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + }, + { + "name": "attn_mask", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.8, + 0, + "original", + 0, + 1, + false + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 3, + 6, + 0, + 5, + 2, + "IMAGE" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 10, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ], + [ + 14, + 14, + 0, + 13, + 0, + "IMAGE" + ], + [ + 15, + 15, + 0, + 16, + 1, + "CONTROL_NET" + ], + [ + 16, + 7, + 0, + 16, + 0, + "CONDITIONING" + ], + [ + 17, + 16, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 18, + 13, + 0, + 16, + 2, + "IMAGE" + ], + [ + 19, + 13, + 0, + 17, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_batch_images.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_batch_images.json new file mode 100644 index 0000000000000000000000000000000000000000..16bd704a38665ba7f52f03e3e4497ed4629b09a9 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_batch_images.json @@ -0,0 +1,693 @@ +{ + "last_node_id": 14, + "last_link_id": 23, + "nodes": [ + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 650, + 590 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter_sd15.bin" + ] + }, + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful renaissance girl, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 940, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 650, + 60 + ], + "size": { + "0": 210, + "1": 142 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 23 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 1, + 0 + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 10 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 0, + "fixed", + 25, + 6, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 6, + "type": "LoadImage", + "pos": [ + 40, + 60 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 21 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 13, + "type": "LoadImage", + "pos": [ + 40, + 420 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 14 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "portrait.jpg", + "image" + ] + }, + { + "id": 14, + "type": "ImageBatch", + "pos": [ + 290, + 430 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 21 + }, + { + "name": "image2", + "type": "IMAGE", + "link": 14 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageBatch" + } + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd15/absolutereality_v181.safetensors" + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 8, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 10, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ], + [ + 14, + 13, + 0, + 14, + 1, + "IMAGE" + ], + [ + 21, + 6, + 0, + 14, + 0, + "IMAGE" + ], + [ + 23, + 14, + 0, + 5, + 2, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_face.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_face.json new file mode 100644 index 0000000000000000000000000000000000000000..54731fbffef3d99509ad966e05b84e626852f3aa --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_face.json @@ -0,0 +1,597 @@ +{ + "last_node_id": 18, + "last_link_id": 28, + "nodes": [ + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "realisticVisionV51_v51VAE.safetensors" + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter-plus-face_sd15.bin" + ] + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 940, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "closeup photo of a guy flexing muscles at the gym" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 650, + 590 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 26 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 14, + "type": "LoadImage", + "pos": [ + 40, + 60 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "einstein.jpg", + "image" + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 650, + 60 + ], + "size": { + "0": 210, + "1": 142 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 28 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.6000000000000001, + 0.3 + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 25 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 26 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 32, + "fixed", + 35, + 6, + "ddim", + "ddim_uniform", + 1 + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ], + [ + 25, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 26, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 28, + 14, + 0, + 5, + 2, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_inpaint.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_inpaint.json new file mode 100644 index 0000000000000000000000000000000000000000..0e41cc6e0381c77fc6d8419f79c7da9450633cfe --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_inpaint.json @@ -0,0 +1,679 @@ +{ + "last_node_id": 23, + "last_link_id": 37, + "nodes": [ + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "closeup photo of a renaissance astronaut " + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 14, + "type": "LoadImage", + "pos": [ + 40, + 60 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 650, + 60 + ], + "size": { + "0": 210, + "1": 142 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 28 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.8, + 0 + ] + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 290, + 430 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12, + 30 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 25 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 32 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 34, + "fixed", + 35, + 6, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter-plus_sd15.bin" + ] + }, + { + "id": 20, + "type": "VAEEncodeForInpaint", + "pos": [ + 650, + 590 + ], + "size": { + "0": 230, + "1": 100 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 29 + }, + { + "name": "vae", + "type": "VAE", + "link": 30 + }, + { + "name": "mask", + "type": "MASK", + "link": 37 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncodeForInpaint" + }, + "widgets_values": [ + 12 + ] + }, + { + "id": 19, + "type": "LoadImage", + "pos": [ + 370, + 600 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 29 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-8127362.png [input]", + "image" + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd15/absolutereality_v181INPAINTING.safetensors" + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ], + [ + 25, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 28, + 14, + 0, + 5, + 2, + "IMAGE" + ], + [ + 29, + 19, + 0, + 20, + 0, + "IMAGE" + ], + [ + 30, + 2, + 0, + 20, + 1, + "VAE" + ], + [ + 32, + 20, + 0, + 9, + 3, + "LATENT" + ], + [ + 37, + 19, + 1, + 20, + 2, + "MASK" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_mask.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_mask.json new file mode 100644 index 0000000000000000000000000000000000000000..b82d79be4dbe0058aa577dca87bd57d74290aad7 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_mask.json @@ -0,0 +1,665 @@ +{ + "last_node_id": 51, + "last_link_id": 96, + "nodes": [ + { + "id": 5, + "type": "CLIPTextEncode", + "pos": [ + 550, + 510 + ], + "size": { + "0": 400, + "1": 160 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 3 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, malformed, video game, rendering, naked, cleavage, horror, zombie, text, watermark" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 6, + "type": "VAEDecode", + "pos": [ + 1360, + 270 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 7, + "type": "VAELoader", + "pos": [ + 1090, + 570 + ], + "size": { + "0": 240, + "1": 60 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 2, + "type": "CheckpointLoaderSimple", + "pos": [ + 210, + 410 + ], + "size": { + "0": 290, + "1": 100 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 88 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd15/dreamshaper_8.safetensors" + ] + }, + { + "id": 11, + "type": "CLIPVisionLoader", + "pos": [ + 490, + 20 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 86 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 13, + "type": "PrepImageForClipVision", + "pos": [ + 540, + 130 + ], + "size": { + "0": 243.60000610351562, + "1": 110 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 32 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 87 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PrepImageForClipVision" + }, + "widgets_values": [ + "LANCZOS", + "center", + 0 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 4, + "type": "CLIPTextEncode", + "pos": [ + 550, + 310 + ], + "size": { + "0": 400, + "1": 160 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "photography of a warrior woman in a cherry blossom forest, high quality, highly detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 3, + "type": "EmptyLatentImage", + "pos": [ + 740, + 720 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 512, + 1 + ] + }, + { + "id": 1, + "type": "KSampler", + "pos": [ + 1090, + 270 + ], + "size": { + "0": 240, + "1": 262 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 93 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 2 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 3 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 4 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 40, + "fixed", + 30, + 5, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 10, + "type": "IPAdapterModelLoader", + "pos": [ + 490, + -80 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 85 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter-plus_sd15.bin" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 12, + "type": "LoadImage", + "pos": [ + 230, + -80 + ], + "size": [ + 230, + 320 + ], + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": [ + 96 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "clipspace/clipspace-mask-790059.png [input]", + "image" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 51, + "type": "IPAdapterApply", + "pos": [ + 830, + 40 + ], + "size": [ + 210, + 190 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 85 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 86 + }, + { + "name": "image", + "type": "IMAGE", + "link": 87 + }, + { + "name": "model", + "type": "MODEL", + "link": 88 + }, + { + "name": "attn_mask", + "type": "MASK", + "link": 96 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 93 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.7000000000000001, + 0, + "linear" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 8, + "type": "PreviewImage", + "pos": [ + 1365, + 369 + ], + "size": [ + 675.9466064453127, + 480.1444152832032 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + } + ], + "links": [ + [ + 2, + 4, + 0, + 1, + 1, + "CONDITIONING" + ], + [ + 3, + 5, + 0, + 1, + 2, + "CONDITIONING" + ], + [ + 4, + 3, + 0, + 1, + 3, + "LATENT" + ], + [ + 5, + 2, + 1, + 4, + 0, + "CLIP" + ], + [ + 6, + 2, + 1, + 5, + 0, + "CLIP" + ], + [ + 7, + 1, + 0, + 6, + 0, + "LATENT" + ], + [ + 8, + 7, + 0, + 6, + 1, + "VAE" + ], + [ + 9, + 6, + 0, + 8, + 0, + "IMAGE" + ], + [ + 32, + 12, + 0, + 13, + 0, + "IMAGE" + ], + [ + 85, + 10, + 0, + 51, + 0, + "IPADAPTER" + ], + [ + 86, + 11, + 0, + 51, + 1, + "CLIP_VISION" + ], + [ + 87, + 13, + 0, + 51, + 2, + "IMAGE" + ], + [ + 88, + 2, + 0, + 51, + 3, + "MODEL" + ], + [ + 93, + 51, + 0, + 1, + 0, + "MODEL" + ], + [ + 96, + 12, + 1, + 51, + 4, + "MASK" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_prepped.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_prepped.json new file mode 100644 index 0000000000000000000000000000000000000000..85d5ad7fef628c2de4f286befce661c285cb5101 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_prepped.json @@ -0,0 +1,791 @@ +{ + "last_node_id": 29, + "last_link_id": 64, + "nodes": [ + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 10 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 0, + "fixed", + 30, + 5, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 650, + 60 + ], + "size": { + "0": 210, + "1": 142 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 45 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 1, + 0 + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter_sdxl_vit-h.bin" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sdxl/sd_xl_base_1.0.safetensors" + ] + }, + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 650, + 590 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 940, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae.safetensors" + ] + }, + { + "id": 21, + "type": "LoadImage", + "pos": [ + -10, + 440 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 61 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "woman.png", + "image" + ] + }, + { + "id": 6, + "type": "LoadImage", + "pos": [ + -10, + 70 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 63 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "girl_sitting.png", + "image" + ] + }, + { + "id": 24, + "type": "ImageBatch", + "pos": [ + 380, + 420 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 64 + }, + { + "name": "image2", + "type": "IMAGE", + "link": 62 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageBatch" + } + }, + { + "id": 28, + "type": "PrepImageForClipVision", + "pos": [ + 350, + 510 + ], + "size": [ + 240, + 110 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 61 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 62 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PrepImageForClipVision" + }, + "widgets_values": [ + "LANCZOS", + "top", + 0 + ] + }, + { + "id": 29, + "type": "PrepImageForClipVision", + "pos": [ + 350, + 660 + ], + "size": [ + 240, + 110 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 63 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 64 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PrepImageForClipVision" + }, + "widgets_values": [ + "LANCZOS", + "top", + 0 + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 8, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 10, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ], + [ + 45, + 24, + 0, + 5, + 2, + "IMAGE" + ], + [ + 61, + 21, + 0, + 28, + 0, + "IMAGE" + ], + [ + 62, + 28, + 0, + 24, + 1, + "IMAGE" + ], + [ + 63, + 6, + 0, + 29, + 0, + "IMAGE" + ], + [ + 64, + 29, + 0, + 24, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_sdxl_vit-h.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_sdxl_vit-h.json new file mode 100644 index 0000000000000000000000000000000000000000..dc20bf47c7d746d6687ca0faa9a4ff94c68d78f5 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_sdxl_vit-h.json @@ -0,0 +1,606 @@ +{ + "last_node_id": 12, + "last_link_id": 13, + "nodes": [ + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 650, + 420 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 650, + 250 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful renaissance girl, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1300, + 170 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1300, + 270 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 6, + "type": "LoadImage", + "pos": [ + 40, + 60 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 3 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 940, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "sdxl_vae.safetensors" + ] + }, + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 650, + 590 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 930, + 170 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 7 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 10 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 0, + "fixed", + 25, + 5, + "ddim", + "ddim_uniform", + 1 + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 290, + 170 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 5, + "type": "IPAdapterApply", + "pos": [ + 652, + -55 + ], + "size": { + "0": 210, + "1": 258 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 1 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 3 + }, + { + "name": "model", + "type": "MODEL", + "link": 4 + }, + { + "name": "attn_mask", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.8, + 0, + "original", + 0, + 1, + false + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 290, + 60 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 1 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter_sdxl_vit-h.safetensors" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 290, + 280 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 4 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sdxl/sd_xl_base_1.0_0.9vae.safetensors" + ] + } + ], + "links": [ + [ + 1, + 3, + 0, + 5, + 0, + "IPADAPTER" + ], + [ + 2, + 4, + 0, + 5, + 1, + "CLIP_VISION" + ], + [ + 3, + 6, + 0, + 5, + 2, + "IMAGE" + ], + [ + 4, + 1, + 0, + 5, + 3, + "MODEL" + ], + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 7, + 5, + 0, + 9, + 0, + "MODEL" + ], + [ + 8, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 10, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_weight_types.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_weight_types.json new file mode 100644 index 0000000000000000000000000000000000000000..5b9d099f718d59389e34c9fb8060ec3cdb2b4a36 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_weight_types.json @@ -0,0 +1,1380 @@ +{ + "last_node_id": 57, + "last_link_id": 154, + "nodes": [ + { + "id": 18, + "type": "CLIPVisionLoader", + "pos": [ + 260, + -470 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 89, + 94, + 98 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 360, + 120 + ], + "size": { + "0": 380, + "1": 160 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 72 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6, + 102, + 111, + 129 + ], + "slot_index": 0 + } + ], + "title": "CLIP Text Encode (Negative)", + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, distorted, malformed, gore, naked, bare skin, tattoo" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 11, + "type": "VAELoader", + "pos": [ + 440, + 480 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12, + 104, + 106, + 133 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -184, + 113 + ], + "size": { + "0": 340, + "1": 100 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 91, + 96, + 100, + 131 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 71, + 72 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd15/deliberate_v3.safetensors" + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 520, + 330 + ], + "size": { + "0": 220, + "1": 106 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2, + 103, + 112, + 130 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 17, + "type": "IPAdapterModelLoader", + "pos": [ + 260, + -570 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 88, + 93, + 97 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter-plus_sd15.bin" + ] + }, + { + "id": 51, + "type": "PreviewImage", + "pos": [ + 960, + 10 + ], + "size": { + "0": 527.208984375, + "1": 576.05859375 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 134 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 42, + "type": "PreviewImage", + "pos": [ + 2730, + 10 + ], + "size": { + "0": 532.08154296875, + "1": 578.00732421875 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 107 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 43, + "type": "KSampler", + "pos": [ + 2970, + -290 + ], + "size": { + "0": 290, + "1": 262 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 115 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 151 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 111 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 112 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 117 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 292, + "fixed", + 40, + 5.5, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 41, + "type": "VAEDecode", + "pos": [ + 2810, + -90 + ], + "size": { + "0": 140, + "1": 60 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 117 + }, + { + "name": "vae", + "type": "VAE", + "link": 106 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 107 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 2370, + -300 + ], + "size": { + "0": 300, + "1": 262 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 92 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 149 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 292, + "fixed", + 40, + 5.5, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 2210, + -100 + ], + "size": { + "0": 140, + "1": 60 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 56 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 38, + "type": "KSampler", + "pos": [ + 1770, + -310 + ], + "size": { + "0": 290, + "1": 262 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 108 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 150 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 102 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 103 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 116 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 292, + "fixed", + 40, + 5.5, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 39, + "type": "VAEDecode", + "pos": [ + 1610, + -110 + ], + "size": { + "0": 140, + "1": 60 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 116 + }, + { + "name": "vae", + "type": "VAE", + "link": 104 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 105 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 50, + "type": "VAEDecode", + "pos": [ + 1000, + -110 + ], + "size": { + "0": 140, + "1": 60 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 135 + }, + { + "name": "vae", + "type": "VAE", + "link": 133 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 134 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 360, + -90 + ], + "size": { + "0": 380, + "1": 170 + }, + "flags": { + "collapsed": false + }, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 71 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 149, + 150, + 151, + 152 + ], + "slot_index": 0 + } + ], + "title": "CLIP Text Encode (Positive)", + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "portrait illustration of a warrior woman in full armor, in a dungeon\n\nhighly detailed, dramatic lighting, 4k" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 20, + "type": "PrepImageForClipVision", + "pos": [ + 320, + -350 + ], + "size": { + "0": 243.60000610351562, + "1": 110 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 148 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 144, + 146, + 147 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PrepImageForClipVision" + }, + "widgets_values": [ + "LANCZOS", + "center", + 0 + ] + }, + { + "id": 49, + "type": "KSampler", + "pos": [ + 1160, + -310 + ], + "size": { + "0": 300, + "1": 262 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 131 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 152 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 129 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 130 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 135 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 292, + "fixed", + 46, + 5.5, + "dpmpp_2m", + "karras", + 1 + ] + }, + { + "id": 40, + "type": "PreviewImage", + "pos": [ + 1540, + 10 + ], + "size": { + "0": 520, + "1": 570 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 105 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 29, + "type": "PreviewImage", + "pos": [ + 2130, + 10 + ], + "size": { + "0": 520, + "1": 570 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 56 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 36, + "type": "IPAdapterApply", + "pos": [ + 1520, + -320 + ], + "size": { + "0": 234.41876220703125, + "1": 166 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 93 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 94 + }, + { + "name": "image", + "type": "IMAGE", + "link": 146 + }, + { + "name": "model", + "type": "MODEL", + "link": 96 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 108 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.25, + 0.3, + "linear" + ] + }, + { + "id": 35, + "type": "IPAdapterApply", + "pos": [ + 2100, + -310 + ], + "size": { + "0": 246.50965881347656, + "1": 166 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 88 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 89 + }, + { + "name": "image", + "type": "IMAGE", + "link": 144 + }, + { + "name": "model", + "type": "MODEL", + "link": 91 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 92 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.25, + 0.3, + "original" + ] + }, + { + "id": 37, + "type": "IPAdapterApply", + "pos": [ + 2710, + -300 + ], + "size": { + "0": 239.32774353027344, + "1": 166 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 97 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 98 + }, + { + "name": "image", + "type": "IMAGE", + "link": 147 + }, + { + "name": "model", + "type": "MODEL", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 115 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApply" + }, + "widgets_values": [ + 0.25, + 0.3, + "channel penalty" + ] + }, + { + "id": 19, + "type": "LoadImage", + "pos": [ + 78, + -351 + ], + "size": { + "0": 210, + "1": 280 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 148 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "woman.png", + "image" + ] + } + ], + "links": [ + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 12, + 11, + 0, + 8, + 1, + "VAE" + ], + [ + 56, + 8, + 0, + 29, + 0, + "IMAGE" + ], + [ + 71, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 72, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 88, + 17, + 0, + 35, + 0, + "IPADAPTER" + ], + [ + 89, + 18, + 0, + 35, + 1, + "CLIP_VISION" + ], + [ + 91, + 4, + 0, + 35, + 3, + "MODEL" + ], + [ + 92, + 35, + 0, + 3, + 0, + "MODEL" + ], + [ + 93, + 17, + 0, + 36, + 0, + "IPADAPTER" + ], + [ + 94, + 18, + 0, + 36, + 1, + "CLIP_VISION" + ], + [ + 96, + 4, + 0, + 36, + 3, + "MODEL" + ], + [ + 97, + 17, + 0, + 37, + 0, + "IPADAPTER" + ], + [ + 98, + 18, + 0, + 37, + 1, + "CLIP_VISION" + ], + [ + 100, + 4, + 0, + 37, + 3, + "MODEL" + ], + [ + 102, + 7, + 0, + 38, + 2, + "CONDITIONING" + ], + [ + 103, + 5, + 0, + 38, + 3, + "LATENT" + ], + [ + 104, + 11, + 0, + 39, + 1, + "VAE" + ], + [ + 105, + 39, + 0, + 40, + 0, + "IMAGE" + ], + [ + 106, + 11, + 0, + 41, + 1, + "VAE" + ], + [ + 107, + 41, + 0, + 42, + 0, + "IMAGE" + ], + [ + 108, + 36, + 0, + 38, + 0, + "MODEL" + ], + [ + 111, + 7, + 0, + 43, + 2, + "CONDITIONING" + ], + [ + 112, + 5, + 0, + 43, + 3, + "LATENT" + ], + [ + 115, + 37, + 0, + 43, + 0, + "MODEL" + ], + [ + 116, + 38, + 0, + 39, + 0, + "LATENT" + ], + [ + 117, + 43, + 0, + 41, + 0, + "LATENT" + ], + [ + 129, + 7, + 0, + 49, + 2, + "CONDITIONING" + ], + [ + 130, + 5, + 0, + 49, + 3, + "LATENT" + ], + [ + 131, + 4, + 0, + 49, + 0, + "MODEL" + ], + [ + 133, + 11, + 0, + 50, + 1, + "VAE" + ], + [ + 134, + 50, + 0, + 51, + 0, + "IMAGE" + ], + [ + 135, + 49, + 0, + 50, + 0, + "LATENT" + ], + [ + 144, + 20, + 0, + 35, + 2, + "IMAGE" + ], + [ + 146, + 20, + 0, + 36, + 2, + "IMAGE" + ], + [ + 147, + 20, + 0, + 37, + 2, + "IMAGE" + ], + [ + 148, + 19, + 0, + 20, + 0, + "IMAGE" + ], + [ + 149, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 150, + 6, + 0, + 38, + 1, + "CONDITIONING" + ], + [ + 151, + 6, + 0, + 43, + 1, + "CONDITIONING" + ], + [ + 152, + 6, + 0, + 49, + 1, + "CONDITIONING" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_weighted.json b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_weighted.json new file mode 100644 index 0000000000000000000000000000000000000000..b55ba3e1d464daa57577f88e31dd16ab5980cdf5 --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/examples/IPAdapter_weighted.json @@ -0,0 +1,710 @@ +{ + "last_node_id": 15, + "last_link_id": 22, + "nodes": [ + { + "id": 10, + "type": "EmptyLatentImage", + "pos": [ + 820, + 520 + ], + "size": { + "0": 210, + "1": 110 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 1 + ] + }, + { + "id": 8, + "type": "CLIPTextEncode", + "pos": [ + 820, + 350 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 6 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "blurry, horror" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 820, + 180 + ], + "size": { + "0": 210, + "1": 120 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 8 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful renaissance girl, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 11, + "type": "VAEDecode", + "pos": [ + 1470, + 120 + ], + "size": { + "0": 140, + "1": 50 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 11 + }, + { + "name": "vae", + "type": "VAE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 12, + "type": "SaveImage", + "pos": [ + 1470, + 220 + ], + "size": { + "0": 400, + "1": 450 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": {}, + "widgets_values": [ + "IPAdapter" + ] + }, + { + "id": 4, + "type": "CLIPVisionLoader", + "pos": [ + 120, + -280 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 16 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "IPAdapter_image_encoder_sd15.safetensors" + ] + }, + { + "id": 15, + "type": "LoadImage", + "pos": [ + -40, + -170 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 21 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "cwf_portrait.jpg", + "image" + ] + }, + { + "id": 1, + "type": "CheckpointLoaderSimple", + "pos": [ + 410, + 210 + ], + "size": { + "0": 300, + "1": 100 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 19 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5, + 6 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "v1-5-pruned-emaonly.safetensors" + ] + }, + { + "id": 6, + "type": "LoadImage", + "pos": [ + 200, + -170 + ], + "size": { + "0": 220, + "1": 320 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 20 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 3, + "type": "IPAdapterModelLoader", + "pos": [ + 450, + -210 + ], + "size": [ + 260, + 60 + ], + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "IPADAPTER", + "type": "IPADAPTER", + "links": [ + 17 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterModelLoader" + }, + "widgets_values": [ + "ip-adapter_sd15.bin" + ] + }, + { + "id": 13, + "type": "IPAdapterApplyEncoded", + "pos": [ + 790, + 30 + ], + "size": [ + 240, + 100 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "ipadapter", + "type": "IPADAPTER", + "link": 17 + }, + { + "name": "embeds", + "type": "EMBEDS", + "link": 18 + }, + { + "name": "model", + "type": "MODEL", + "link": 19 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 22 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterApplyEncoded" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 2, + "type": "VAELoader", + "pos": [ + 1110, + 460 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "vae-ft-mse-840000-ema-pruned.safetensors" + ] + }, + { + "id": 14, + "type": "IPAdapterEncoder", + "pos": [ + 500, + -100 + ], + "size": [ + 210, + 260 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 16 + }, + { + "name": "image_1", + "type": "IMAGE", + "link": 21 + }, + { + "name": "image_2", + "type": "IMAGE", + "link": 20 + }, + { + "name": "image_3", + "type": "IMAGE", + "link": null + }, + { + "name": "image_4", + "type": "IMAGE", + "link": null + } + ], + "outputs": [ + { + "name": "EMBEDS", + "type": "EMBEDS", + "links": [ + 18 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "IPAdapterEncoder" + }, + "widgets_values": [ + false, + 0.31, + 0.38, + 1, + 1, + 1 + ] + }, + { + "id": 9, + "type": "KSampler", + "pos": [ + 1100, + 150 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 22 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 8 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 9 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 10 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 0, + "fixed", + 35, + 5, + "ddim", + "ddim_uniform", + 1 + ] + } + ], + "links": [ + [ + 5, + 1, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 1, + 1, + 8, + 0, + "CLIP" + ], + [ + 8, + 7, + 0, + 9, + 1, + "CONDITIONING" + ], + [ + 9, + 8, + 0, + 9, + 2, + "CONDITIONING" + ], + [ + 10, + 10, + 0, + 9, + 3, + "LATENT" + ], + [ + 11, + 9, + 0, + 11, + 0, + "LATENT" + ], + [ + 12, + 2, + 0, + 11, + 1, + "VAE" + ], + [ + 13, + 11, + 0, + 12, + 0, + "IMAGE" + ], + [ + 16, + 4, + 0, + 14, + 0, + "CLIP_VISION" + ], + [ + 17, + 3, + 0, + 13, + 0, + "IPADAPTER" + ], + [ + 18, + 14, + 0, + 13, + 1, + "EMBEDS" + ], + [ + 19, + 1, + 0, + 13, + 2, + "MODEL" + ], + [ + 20, + 6, + 0, + 14, + 2, + "IMAGE" + ], + [ + 21, + 15, + 0, + 14, + 1, + "IMAGE" + ], + [ + 22, + 13, + 0, + 9, + 0, + "MODEL" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/batch_images.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/batch_images.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fffae3e09d2a62c916968154b6649277a9c0d8c5 Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/batch_images.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/canny_controlnet.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/canny_controlnet.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fd1ef5c82e1b1577e4e5498e756e901fdc100cd Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/canny_controlnet.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/face_swap.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/face_swap.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d638483f1ceaffe561183cc543a38bfc4edc89aa Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/face_swap.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/image_weighting.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/image_weighting.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94c27ec111a4ab59f4fe7f8b8ec995a7b7f2358c Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/image_weighting.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/inpainting.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/inpainting.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8ab43725aaa0318c1ad957013d2e57419b47f0b Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/inpainting.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/masking.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/masking.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd45bae2537a5cd5254c59693ba9cf75e7270ca0 Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/masking.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/noise_example.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/noise_example.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c73dc93f8128cbc418040c49d66102cd22ae637a Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/noise_example.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/prep_images.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/prep_images.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8aed2821cbd38b3976b37b356f22baaa86104f4f Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/prep_images.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/timestepping.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/timestepping.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3878778b9d77527a57b068dd117dbfa223e55766 Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/timestepping.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/examples/weight_types.jpg b/custom_nodes/ComfyUI_IPAdapter_plus/examples/weight_types.jpg new file mode 100644 index 0000000000000000000000000000000000000000..228cd7b596e482587a4cf397b8f43df05d203e5a Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/examples/weight_types.jpg differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/ipadapter_workflow.png b/custom_nodes/ComfyUI_IPAdapter_plus/ipadapter_workflow.png new file mode 100644 index 0000000000000000000000000000000000000000..f061ca1a72f94664ef6ced3e354a1c557fabb2bc Binary files /dev/null and b/custom_nodes/ComfyUI_IPAdapter_plus/ipadapter_workflow.png differ diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/models/ip-adapter_sd15_plus.pth b/custom_nodes/ComfyUI_IPAdapter_plus/models/ip-adapter_sd15_plus.pth new file mode 100644 index 0000000000000000000000000000000000000000..b4fcc4637147d978238cfa837de3478c533882cb --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/models/ip-adapter_sd15_plus.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0db87557825ac2f8888ef06ec64c85f175e8ba5467baa2722ade3b4a9feb9f5 +size 158030471 diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/models/put_ipadapter_models_here.txt b/custom_nodes/ComfyUI_IPAdapter_plus/models/put_ipadapter_models_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI_IPAdapter_plus/resampler.py b/custom_nodes/ComfyUI_IPAdapter_plus/resampler.py new file mode 100644 index 0000000000000000000000000000000000000000..4521c8c3e6f17caf4547c3dd84118da760e5179f --- /dev/null +++ b/custom_nodes/ComfyUI_IPAdapter_plus/resampler.py @@ -0,0 +1,121 @@ +# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py +import math + +import torch +import torch.nn as nn + + +# FFN +def FeedForward(dim, mult=4): + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +def reshape_tensor(x, heads): + bs, length, width = x.shape + #(bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, length, heads, -1) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs, heads, length, -1) + return x + + +class PerceiverAttention(nn.Module): + def __init__(self, *, dim, dim_head=64, heads=8): + super().__init__() + self.scale = dim_head**-0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, l, _ = latents.shape + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, l, -1) + + return self.to_out(out) + + +class Resampler(nn.Module): + def __init__( + self, + dim=1024, + depth=8, + dim_head=64, + heads=16, + num_queries=8, + embedding_dim=768, + output_dim=1024, + ff_mult=4, + ): + super().__init__() + + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) + + self.proj_in = nn.Linear(embedding_dim, dim) + + self.proj_out = nn.Linear(dim, output_dim) + self.norm_out = nn.LayerNorm(output_dim) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + def forward(self, x): + + latents = self.latents.repeat(x.size(0), 1, 1) + + x = self.proj_in(x) + + for attn, ff in self.layers: + latents = attn(x, latents) + latents + latents = ff(latents) + latents + + latents = self.proj_out(latents) + return self.norm_out(latents) \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Noise/LICENSE b/custom_nodes/ComfyUI_Noise/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI_Noise/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI_Noise/README.md b/custom_nodes/ComfyUI_Noise/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7402929546b9cc995e7346f96f5ee5724b5ab9e6 --- /dev/null +++ b/custom_nodes/ComfyUI_Noise/README.md @@ -0,0 +1,88 @@ +# ComfyUI Noise + +This repo contains 6 nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) that allows for more control and flexibility over the noise. This allows e.g. for workflows with small variations to generations or finding the accompanying noise to some input image and prompt. + +## Nodes + +### Noisy Latent Image: +This node lets you generate noise, you can find this node under `latent>noise` and it the following settings: +- **source**: where to generate the noise, currently supports GPU and CPU. +- **seed**: the noise seed. +- **width**: image width. +- **height**: image height. +- **batch_size**: batch size. + +### Duplicate Batch Index: +The functionality of this node has been moved to core, please use: `Latent>Batch>Repeat Latent Batch` and `Latent>Batch>Latent From Batch` instead. + +This node lets you duplicate a certain sample in the batch, this can be used to duplicate e.g. encoded images but also noise generated from the node listed above. You can find this node under `latent` and it has the following settings: +- **latents**: the latents. +- **batch_index**: which sample in the latents to duplicate. +- **batch_size**: the new batch size, (i.e. how many times to duplicate the sample). + +### Slerp Latents: +This node lets you mix two latents together. Both of the input latents must share the same dimensions or the node will ignore the mix factor and instead output the top slot. When it comes to other things attached to the latents such as e.g. masks, only those of the top slot are passed on. You can find this node under `latent` and it comes with the following inputs: +- **latents1**: first batch of latents. +- **latents2**: second batch of latents. This input is optional. +- **mask**: determines where in the latents to slerp. This input is optional +- **factor**: how much of the second batch of latents should be slerped into the first. + +### Get Sigma: +This node can be used to calculate the amount of noise a sampler expects when it starts denoising. You can find this node under `latent>noise` and it comes with the following inputs and settings: +- **model**: The model for which to calculate the sigma. +- **sampler_name**: the name of the sampler for which to calculate the sigma. +- **scheduler**: the type of schedule used in the sampler +- **steps**: the total number of steps in the schedule +- **start_at_step**: the start step of the sampler, i.e. how much noise it expects in the input image +- **end_at_step**: the current end step of the previous sampler, i.e. how much noise already is in the image. + +Most of the time you'd simply want to keep `start_at_step` at zero, and `end_at_step` at `steps`, but if you'd want to re-inject some noise in between two samplers, e.g. one sampler that denoises from 0 to 15, and a second that denoises from 10 to 20, you'd want to use a `start_at_step` 10 and an `end_at_step` of 15. So that the image we get, which is at step 15, can be noised back down to step 10, so the second sampler can bring it to 20. Take note that the Advanced Ksampler has a settings for `add_noise` and `return_with_leftover_noise` which when working with these nodes we both want to have disabled. + +### Inject Noise: +This node lets you actually inject the noise into an image latent, you can find this node under `latent>noise` and it comes with the following inputs: +- **latents**: The latents to inject the noise into. +- **noise**: The noise. This input is optional +- **mask**: determines where to inject noise. This input is optional +- **strength**: The strength of the noise. Note that we can use the node above to calculate for us an appropriate strength value. + +### Unsampler: +This node does the reverse of a sampler. It calculates the noise that would generate the image given the model and the prompt. You can find this node under `sampling` and it takes the following inputs and settings: +- **model**: The model to target. +- **steps**: number of steps to noise. +- **end_step**: to what step to travel back to. +- **cfg**: classifier free guidance scale. +- **sampler_name**: The name of the sampling technique to use. +- **scheduler**: The type of schedule to use. +- **normalize**: whether to normalize the noise before output. Useful when passing it on to an Inject Noise node which expects normalizes noise. +- **positive**: Positive prompt. +- **negative**: Negative prompt. +- **latent_image**: The image to renoise. + +When trying to reconstruct the target image as faithful as possible this works best if both the unsampler and sampler use a cfg scale close to 1.0 and similar number of steps. But it is fun and worth it to play around with these settings to get a better intuition of the results. This node let's you do similar things the A1111 [img2img alternative](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#img2img-alternative-test) script does + +## Examples + +here are some examples that show how to use the nodes above. Workflows to these examples can be found in the `example_workflow` folder. + +
+ +generating variations + + +![screenshot of a workflow that demos generating small variations to a given seed](https://github.com/BlenderNeko/ComfyUI_noise/blob/master/examples/example_variation.png) + +To create small variations to a given generation we can do the following: We generate the noise of the seed that we're interested using a `Noisy Latent Image` node, we then create an entire batch of these with a `Duplicate Batch Index` node. Note that if we were doing this for img2img we can use this same node to duplicate the image latents. Next we generate some more noise, but this time we generate a batch of noise rather than a single sample. We then Slerp this newly created noise into the other one with a `Slerp Latents` node. To figure out the required strength for injecting this noise we use a `Get Sigma` node. And finally we inject the slerped noise into a batch of empty latents with a `Inject Noise` node. Take note that we use an advanced Ksampler with the `add_noise` setting disabled + +
+ +
+ +"unsampling" + + +![screenshot of a workflow that demos generating small variations to a given seed](https://github.com/BlenderNeko/ComfyUI_noise/blob/master/examples/example_unsample.png) + +To get the noise that recreates a certain image, we first load an image. Then we use the `Unsampler` node with a low cfg value. To check if this is working we then take the resulting noise and feed it back into an advanced ksampler with the `add_noise` setting disabled, and a cfg of 1.0. + +
+ diff --git a/custom_nodes/ComfyUI_Noise/__init__.py b/custom_nodes/ComfyUI_Noise/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d721463be66961a2f388b3a756760d167ea5d510 --- /dev/null +++ b/custom_nodes/ComfyUI_Noise/__init__.py @@ -0,0 +1,3 @@ +from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Noise/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_Noise/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47f6c9adc1aa6c245862d8a98a173ca00e8df104 Binary files /dev/null and b/custom_nodes/ComfyUI_Noise/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Noise/__pycache__/nodes.cpython-311.pyc b/custom_nodes/ComfyUI_Noise/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b441cb996a749b8b2ad95d685d3916e2e07ada Binary files /dev/null and b/custom_nodes/ComfyUI_Noise/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_Noise/example_workflows/unsample_example.json b/custom_nodes/ComfyUI_Noise/example_workflows/unsample_example.json new file mode 100644 index 0000000000000000000000000000000000000000..86ebae968a66c3450636d45465de50d9a628e6ce --- /dev/null +++ b/custom_nodes/ComfyUI_Noise/example_workflows/unsample_example.json @@ -0,0 +1,698 @@ +{ + "last_node_id": 27, + "last_link_id": 66, + "nodes": [ + { + "id": 23, + "type": "Reroute", + "pos": [ + 228, + 840 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 50 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 51, + 52 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 24, + "type": "Reroute", + "pos": [ + 400, + 740 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 53 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 54 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 970, + 640 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 44 + }, + { + "name": "vae", + "type": "VAE", + "link": 52 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 1280, + 681 + ], + "size": { + "0": 367.50909423828125, + "1": 383.8414306640625 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": {}, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -64, + 642 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 56 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -68, + 432 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 59 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," + ] + }, + { + "id": 19, + "type": "LoadImage", + "pos": [ + -124, + 906 + ], + "size": { + "0": 434.40911865234375, + "1": 440.44140625 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 34 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "example.png", + "image" + ] + }, + { + "id": 12, + "type": "KSamplerAdvanced", + "pos": [ + 950, + 740 + ], + "size": { + "0": 315, + "1": 334 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 54 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 61 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 58 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 66 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 44 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvanced" + }, + "widgets_values": [ + "disable", + 0, + "fixed", + 25, + 1, + "dpmpp_2m", + "karras", + 0, + 25, + "disable" + ] + }, + { + "id": 26, + "type": "Reroute", + "pos": [ + 450, + 670 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 59 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 61, + 62 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 25, + "type": "Reroute", + "pos": [ + 430, + 700 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 56 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 58, + 63 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 20, + "type": "VAEEncode", + "pos": [ + 354, + 894 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 34 + }, + { + "name": "vae", + "type": "VAE", + "link": 51 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 64 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -635, + 661 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53, + 65 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 50 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "v1-5-pruned-emaonly.safetensors" + ] + }, + { + "id": 27, + "type": "BNK_Unsampler", + "pos": [ + 608, + 857 + ], + "size": { + "0": 315, + "1": 214 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 65 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 62 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 63 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 64 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 66 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_Unsampler" + }, + "widgets_values": [ + 25, + 0, + 1, + "dpmpp_2m", + "karras" + ] + } + ], + "links": [ + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 34, + 19, + 0, + 20, + 0, + "IMAGE" + ], + [ + 44, + 12, + 0, + 8, + 0, + "LATENT" + ], + [ + 50, + 4, + 2, + 23, + 0, + "*" + ], + [ + 51, + 23, + 0, + 20, + 1, + "VAE" + ], + [ + 52, + 23, + 0, + 8, + 1, + "VAE" + ], + [ + 53, + 4, + 0, + 24, + 0, + "*" + ], + [ + 54, + 24, + 0, + 12, + 0, + "MODEL" + ], + [ + 56, + 7, + 0, + 25, + 0, + "*" + ], + [ + 58, + 25, + 0, + 12, + 2, + "CONDITIONING" + ], + [ + 59, + 6, + 0, + 26, + 0, + "*" + ], + [ + 61, + 26, + 0, + 12, + 1, + "CONDITIONING" + ], + [ + 62, + 26, + 0, + 27, + 1, + "CONDITIONING" + ], + [ + 63, + 25, + 0, + 27, + 2, + "CONDITIONING" + ], + [ + 64, + 20, + 0, + 27, + 3, + "LATENT" + ], + [ + 65, + 4, + 0, + 27, + 0, + "MODEL" + ], + [ + 66, + 27, + 0, + 12, + 3, + "LATENT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Noise/example_workflows/variations_example.json b/custom_nodes/ComfyUI_Noise/example_workflows/variations_example.json new file mode 100644 index 0000000000000000000000000000000000000000..a9a75e41d34ecaeeb3a2d7f19f1c117a9ff4103d --- /dev/null +++ b/custom_nodes/ComfyUI_Noise/example_workflows/variations_example.json @@ -0,0 +1,868 @@ +{ + "last_node_id": 39, + "last_link_id": 84, + "nodes": [ + { + "id": 26, + "type": "Reroute", + "pos": [ + 450, + 670 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 59 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 61 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 25, + "type": "Reroute", + "pos": [ + 430, + 700 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 56 + } + ], + "outputs": [ + { + "name": "", + "type": "CONDITIONING", + "links": [ + 58 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 24, + "type": "Reroute", + "pos": [ + 400, + 740 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 53 + } + ], + "outputs": [ + { + "name": "", + "type": "MODEL", + "links": [ + 54 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -64, + 642 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 56 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark" + ] + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -68, + 432 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 59 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," + ] + }, + { + "id": 12, + "type": "KSamplerAdvanced", + "pos": [ + 835, + 887 + ], + "size": { + "0": 315, + "1": 334 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 54 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 61 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 58 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 84 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 44 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSamplerAdvanced" + }, + "widgets_values": [ + "disable", + 0, + "fixed", + 25, + 8, + "dpmpp_2m", + "karras", + 0, + 25, + "disable" + ] + }, + { + "id": 23, + "type": "Reroute", + "pos": [ + -230, + 1632 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 50 + } + ], + "outputs": [ + { + "name": "", + "type": "VAE", + "links": [ + 52 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + } + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1183, + 1133 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 44 + }, + { + "name": "vae", + "type": "VAE", + "link": 52 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 771, + 1259 + ], + "size": { + "0": 494.55535888671875, + "1": 524.3897705078125 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": {}, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -635, + 661 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 53, + 74 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 50 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "v1-5-pruned-emaonly.safetensors" + ] + }, + { + "id": 34, + "type": "BNK_NoisyLatentImage", + "pos": [ + -216, + 980 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 75 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_NoisyLatentImage" + }, + "widgets_values": [ + "CPU", + 0, + "fixed", + 512, + 512, + 1 + ] + }, + { + "id": 35, + "type": "BNK_NoisyLatentImage", + "pos": [ + -217, + 1197 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 77 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_NoisyLatentImage" + }, + "widgets_values": [ + "CPU", + 1, + "fixed", + 512, + 512, + 4 + ] + }, + { + "id": 37, + "type": "BNK_DuplicateBatchIndex", + "pos": [ + 134, + 1012 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "latents", + "type": "LATENT", + "link": 75 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 76 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_DuplicateBatchIndex" + }, + "widgets_values": [ + 0, + 4 + ] + }, + { + "id": 38, + "type": "BNK_SlerpLatent", + "pos": [ + 137, + 1144 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "latents1", + "type": "LATENT", + "link": 76 + }, + { + "name": "latents2", + "type": "LATENT", + "link": 77 + }, + { + "name": "mask", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 81 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_SlerpLatent" + }, + "widgets_values": [ + 0.05 + ] + }, + { + "id": 39, + "type": "BNK_InjectNoise", + "pos": [ + 476, + 1131 + ], + "size": [ + 315, + 98 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "latents", + "type": "LATENT", + "link": 82 + }, + { + "name": "noise", + "type": "LATENT", + "link": 81 + }, + { + "name": "mask", + "type": "MASK", + "link": null + }, + { + "name": "strength", + "type": "FLOAT", + "link": 80, + "widget": { + "name": "strength", + "config": [ + "FLOAT", + { + "default": 1, + "min": 0, + "max": 20, + "step": 0.01 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 84 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_InjectNoise" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 33, + "type": "EmptyLatentImage", + "pos": [ + 474, + 985 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 82 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 512, + 512, + 4 + ] + }, + { + "id": 36, + "type": "BNK_GetSigma", + "pos": [ + -221, + 1420 + ], + "size": { + "0": 315, + "1": 154 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 74 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 80 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BNK_GetSigma" + }, + "widgets_values": [ + "dpmpp_2m", + "karras", + 25, + 0, + 25 + ] + } + ], + "links": [ + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 44, + 12, + 0, + 8, + 0, + "LATENT" + ], + [ + 50, + 4, + 2, + 23, + 0, + "*" + ], + [ + 52, + 23, + 0, + 8, + 1, + "VAE" + ], + [ + 53, + 4, + 0, + 24, + 0, + "*" + ], + [ + 54, + 24, + 0, + 12, + 0, + "MODEL" + ], + [ + 56, + 7, + 0, + 25, + 0, + "*" + ], + [ + 58, + 25, + 0, + 12, + 2, + "CONDITIONING" + ], + [ + 59, + 6, + 0, + 26, + 0, + "*" + ], + [ + 61, + 26, + 0, + 12, + 1, + "CONDITIONING" + ], + [ + 74, + 4, + 0, + 36, + 0, + "MODEL" + ], + [ + 75, + 34, + 0, + 37, + 0, + "LATENT" + ], + [ + 76, + 37, + 0, + 38, + 0, + "LATENT" + ], + [ + 77, + 35, + 0, + 38, + 1, + "LATENT" + ], + [ + 80, + 36, + 0, + 39, + 3, + "FLOAT" + ], + [ + 81, + 38, + 0, + 39, + 1, + "LATENT" + ], + [ + 82, + 33, + 0, + 39, + 0, + "LATENT" + ], + [ + 84, + 39, + 0, + 12, + 3, + "LATENT" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_Noise/examples/example_unsample.png b/custom_nodes/ComfyUI_Noise/examples/example_unsample.png new file mode 100644 index 0000000000000000000000000000000000000000..6296c1d5490484cb7d183ca4974689d23b2bd695 Binary files /dev/null and b/custom_nodes/ComfyUI_Noise/examples/example_unsample.png differ diff --git a/custom_nodes/ComfyUI_Noise/examples/example_variation.png b/custom_nodes/ComfyUI_Noise/examples/example_variation.png new file mode 100644 index 0000000000000000000000000000000000000000..44d9a3f5424d9d8db31c090ba031385058cff69b Binary files /dev/null and b/custom_nodes/ComfyUI_Noise/examples/example_variation.png differ diff --git a/custom_nodes/ComfyUI_Noise/nodes.py b/custom_nodes/ComfyUI_Noise/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..7a2314df4daeebba82dd00d58c5a694cf4548c58 --- /dev/null +++ b/custom_nodes/ComfyUI_Noise/nodes.py @@ -0,0 +1,265 @@ +import torch + +import os +import sys + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) + +import comfy.model_management +import comfy.sample + +MAX_RESOLUTION=8192 + +def prepare_mask(mask, shape): + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear") + mask = mask.expand((-1,shape[1],-1,-1)) + if mask.shape[0] < shape[0]: + mask = mask.repeat((shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]] + return mask + +class NoisyLatentImage: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "source":(["CPU", "GPU"], ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + }} + RETURN_TYPES = ("LATENT",) + FUNCTION = "create_noisy_latents" + + CATEGORY = "latent/noise" + + def create_noisy_latents(self, source, seed, width, height, batch_size): + torch.manual_seed(seed) + if source == "CPU": + device = "cpu" + else: + device = comfy.model_management.get_torch_device() + noise = torch.randn((batch_size, 4, height // 8, width // 8), dtype=torch.float32, device=device).cpu() + return ({"samples":noise}, ) + +class DuplicateBatchIndex: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latents":("LATENT",), + "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "duplicate_index" + + CATEGORY = "latent" + + def duplicate_index(self, latents, batch_index, batch_size): + s = latents.copy() + batch_index = min(s["samples"].shape[0] - 1, batch_index) + target = s["samples"][batch_index:batch_index + 1].clone() + target = target.repeat((batch_size,1,1,1)) + s["samples"] = target + return (s,) + +# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475 +def slerp(val, low, high): + dims = low.shape + + #flatten to batches + low = low.reshape(dims[0], -1) + high = high.reshape(dims[0], -1) + + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + + # in case we divide by zero + low_norm[low_norm != low_norm] = 0.0 + high_norm[high_norm != high_norm] = 0.0 + + omega = torch.acos((low_norm*high_norm).sum(1)) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + return res.reshape(dims) + +class LatentSlerp: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents1":("LATENT",), + "factor": ("FLOAT", {"default": .5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional" :{ + "latents2":("LATENT",), + "mask": ("MASK", ), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "slerp_latents" + + CATEGORY = "latent" + + def slerp_latents(self, latents1, factor, latents2=None, mask=None): + s = latents1.copy() + if latents2 is None: + return (s,) + if latents1["samples"].shape != latents2["samples"].shape: + print("warning, shapes in LatentSlerp not the same, ignoring") + return (s,) + slerped = slerp(factor, latents1["samples"].clone(), latents2["samples"].clone()) + if mask is not None: + mask = prepare_mask(mask, slerped.shape) + slerped = mask * slerped + (1-mask) * latents1["samples"] + s["samples"] = slerped + return (s,) + +class GetSigma: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "steps": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 1, "max": 10000}), + }} + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "calc_sigma" + + CATEGORY = "latent/noise" + + def calc_sigma(self, model, sampler_name, scheduler, steps, start_at_step, end_at_step): + device = comfy.model_management.get_torch_device() + end_at_step = min(steps, end_at_step) + start_at_step = min(start_at_step, end_at_step) + real_model = None + comfy.model_management.load_model_gpu(model) + real_model = model.model + sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options) + sigmas = sampler.sigmas + sigma = sigmas[start_at_step] - sigmas[end_at_step] + sigma /= model.model.latent_format.scale_factor + return (sigma.cpu().numpy(),) + +class InjectNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latents":("LATENT",), + + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), + }, + "optional":{ + "noise": ("LATENT",), + "mask": ("MASK", ), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "inject_noise" + + CATEGORY = "latent/noise" + + def inject_noise(self, latents, strength, noise=None, mask=None): + s = latents.copy() + if noise is None: + return (s,) + if latents["samples"].shape != noise["samples"].shape: + print("warning, shapes in InjectNoise not the same, ignoring") + return (s,) + noised = s["samples"].clone() + noise["samples"].clone() * strength + if mask is not None: + mask = prepare_mask(mask, noised.shape) + noised = mask * noised + (1-mask) * latents["samples"] + s["samples"] = noised + return (s,) + +class Unsampler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "end_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "normalize": (["disable", "enable"], ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "unsampler" + + CATEGORY = "sampling" + + def unsampler(self, model, cfg, sampler_name, steps, end_at_step, scheduler, normalize, positive, negative, latent_image): + normalize = normalize == "enable" + device = comfy.model_management.get_torch_device() + latent = latent_image + latent_image = latent["samples"] + + end_at_step = min(end_at_step, steps-1) + end_at_step = steps - end_at_step + + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + noise_mask = None + if "noise_mask" in latent: + noise_mask = comfy.sample.prepare_mask(latent["noise_mask"], noise, device) + + real_model = None + real_model = model.model + + noise = noise.to(device) + latent_image = latent_image.to(device) + + positive = comfy.sample.convert_cond(positive) + negative = comfy.sample.convert_cond(negative) + + models, inference_memory = comfy.sample.get_additional_models(positive, negative, model.model_dtype()) + + comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise.shape) + inference_memory) + + sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options) + + sigmas = sigmas = sampler.sigmas.flip(0) + 0.0001 + + pbar = comfy.utils.ProgressBar(steps) + def callback(step, x0, x, total_steps): + pbar.update_absolute(step + 1, total_steps) + + samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, force_full_denoise=False, denoise_mask=noise_mask, sigmas=sigmas, start_step=0, last_step=end_at_step, callback=callback) + if normalize: + #technically doesn't normalize because unsampling is not guaranteed to end at a std given by the schedule + samples -= samples.mean() + samples /= samples.std() + samples = samples.cpu() + + comfy.sample.cleanup_additional_models(models) + + out = latent.copy() + out["samples"] = samples + return (out, ) + +NODE_CLASS_MAPPINGS = { + "BNK_NoisyLatentImage": NoisyLatentImage, + #"BNK_DuplicateBatchIndex": DuplicateBatchIndex, + "BNK_SlerpLatent": LatentSlerp, + "BNK_GetSigma": GetSigma, + "BNK_InjectNoise": InjectNoise, + "BNK_Unsampler": Unsampler, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "BNK_NoisyLatentImage": "Noisy Latent Image", + #"BNK_DuplicateBatchIndex": "Duplicate Batch Index", + "BNK_SlerpLatent": "Slerp Latents", + "BNK_GetSigma": "Get Sigma", + "BNK_InjectNoise": "Inject Noise", + "BNK_Unsampler": "Unsampler", +} diff --git a/custom_nodes/ComfyUI_TiledKSampler/LICENSE b/custom_nodes/ComfyUI_TiledKSampler/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI_TiledKSampler/README.md b/custom_nodes/ComfyUI_TiledKSampler/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a1553387b497d39a1f7ee8f0b8cf45bd2f375352 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/README.md @@ -0,0 +1,66 @@ +# Tiled sampling for ComfyUI + +![panorama of the ocean, sailboats and large moody clouds](https://github.com/BlenderNeko/ComfyUI_TiledKSampler/blob/master/examples/ComfyUI_02010_.png) + +this repo contains a tiled sampler for [ComfyUI](https://github.com/comfyanonymous/ComfyUI). It allows for denoising larger images by splitting it up into smaller tiles and denoising these. It tries to minimize any seams for showing up in the end result by gradually denoising all tiles one step at the time and randomizing tile positions for every step. + +### settings + +The tiled samplers comes with some additional settings to further control it's behavior: + +- **tile_width**: the width of the tiles. +- **tile_height**: the height of the tiles. +- **tiling_strategy**: how to do the tiling + +## Tiling strategies + +### random: +The random tiling strategy aims to reduce the presence of seams as much as possible by slowly denoising the entire image step by step, randomizing the tile positions for each step. It does this by alternating between horizontal and vertical brick patterns, randomly offsetting the pattern each time. As the number of steps grows to infinity the strength of seams shrinks to zero. Although this random offset eliminates seams, it comes at the cost of additional overhead per step and makes this strategy incompatible with uni samplers. + +
+ +visual explanation + + +![gif showing of the random brick tiling](https://github.com/BlenderNeko/ComfyUI_TiledKSampler/blob/master/examples/tiled_random.gif) +
+ +
+ +example seamless image + + +This tiling strategy is exceptionally good in hiding seams, even when starting off from complete noise, repetitions are visible but seams are not. + +![gif showing of the random brick tiling](https://github.com/BlenderNeko/ComfyUI_TiledKSampler/blob/master/examples/ComfyUI_02006_.png) +
+ +### random strict: + +One downside of random is that it can unfavorably crop border tiles, random strict uses masking to ensure no border tiles have to be cropped. This tiling strategy does not play nice with the SDE sampler. + +### padded: + +The padded tiling strategy tries to reduce seams by giving each tile more context of its surroundings through padding. It does this by further dividing each tile into 9 smaller tiles, which are denoised in such a way that a tile is always surrounded by static contex during denoising. This strategy is more prone to seams but because the location of the tiles is static, this strategy is compatible with uni samplers and has no overhead between steps. However the padding makes it so that up to 4 times as many tiles have to be denoised. + +
+ +visual explanation + + +![gif showing of padded tiling](https://github.com/BlenderNeko/ComfyUI_TiledKSampler/blob/master/examples/tiled_padding.gif) +
+ +### simple + +The simple tiling strategy divides the image into a static grid of tiles and denoises these one by one. + +### roadmap: + + - [x] latent masks + - [x] image wide control nets + - [x] T2I adaptors + - [ ] tile wide control nets and T2I adaptors (e.g. style models) + - [x] area conditioning + - [x] area mask conditioning + - [x] GLIGEN diff --git a/custom_nodes/ComfyUI_TiledKSampler/__init__.py b/custom_nodes/ComfyUI_TiledKSampler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d721463be66961a2f388b3a756760d167ea5d510 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/__init__.py @@ -0,0 +1,3 @@ +from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] \ No newline at end of file diff --git a/custom_nodes/ComfyUI_TiledKSampler/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd66c24fc47dc02fdd1d21df917a983a86ba5091 Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97de7c835d1c20cd190dbebc3d6b9c96a297b206 Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/__pycache__/nodes.cpython-310.pyc b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2149ce089478522f194d7de30fb183a656a036a Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/nodes.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/__pycache__/nodes.cpython-311.pyc b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5292bbfa63a3452aea581fb35dd1434fdb826772 Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/__pycache__/tiling.cpython-310.pyc b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/tiling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6091a1b7be9051bcb3d91a182f5a869d089e369c Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/tiling.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/__pycache__/tiling.cpython-311.pyc b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/tiling.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebe7bd48f71c748d5659eeb30b8a15fc64adf451 Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/__pycache__/tiling.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02006_.png b/custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02006_.png new file mode 100644 index 0000000000000000000000000000000000000000..19c28f15e9cbd65d7d1abfee69362b9bad3a5375 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02006_.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:343e7b747e6af0b766ce2b40fdb1d8191fe70be150ab11ac1a0ab0bcb65f10a7 +size 7490127 diff --git a/custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02010_.png b/custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02010_.png new file mode 100644 index 0000000000000000000000000000000000000000..bb3843f89c5777d4b5c6be4d249c8fc8a37c4e47 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/examples/ComfyUI_02010_.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a845e0a9fd437ec675b1249a8858d14c63e768e174f705d3c64af04553b28ba +size 4286483 diff --git a/custom_nodes/ComfyUI_TiledKSampler/examples/tiled_padding.gif b/custom_nodes/ComfyUI_TiledKSampler/examples/tiled_padding.gif new file mode 100644 index 0000000000000000000000000000000000000000..5aa15a4be21f29f17cf901227f6f1aec44804208 Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/examples/tiled_padding.gif differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/examples/tiled_random.gif b/custom_nodes/ComfyUI_TiledKSampler/examples/tiled_random.gif new file mode 100644 index 0000000000000000000000000000000000000000..c370287efd396991fdf036694ec3d520a6444e83 Binary files /dev/null and b/custom_nodes/ComfyUI_TiledKSampler/examples/tiled_random.gif differ diff --git a/custom_nodes/ComfyUI_TiledKSampler/nodes.py b/custom_nodes/ComfyUI_TiledKSampler/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..0c13a81101b1b69e4a0789a58d7a4252919dc1e9 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/nodes.py @@ -0,0 +1,359 @@ +import sys +import os +import itertools +import numpy as np + +from tqdm.auto import tqdm + +import torch + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) +import comfy.sd +import comfy.controlnet +import comfy.model_management +import comfy.sample +from . import tiling +import latent_preview + +MAX_RESOLUTION=8192 + +def recursion_to_list(obj, attr): + current = obj + yield current + while True: + current = getattr(current, attr, None) + if current is not None: + yield current + else: + return + +def copy_cond(cond): + return [[c1,c2.copy()] for c1,c2 in cond] + +def slice_cond(tile_h, tile_h_len, tile_w, tile_w_len, cond, area): + tile_h_end = tile_h + tile_h_len + tile_w_end = tile_w + tile_w_len + coords = area[0] #h_len, w_len, h, w, + mask = area[1] + if coords is not None: + h_len, w_len, h, w = coords + h_end = h + h_len + w_end = w + w_len + if h < tile_h_end and h_end > tile_h and w < tile_w_end and w_end > tile_w: + new_h = max(0, h - tile_h) + new_w = max(0, w - tile_w) + new_h_end = min(tile_h_end, h_end - tile_h) + new_w_end = min(tile_w_end, w_end - tile_w) + cond[1]['area'] = (new_h_end - new_h, new_w_end - new_w, new_h, new_w) + else: + return (cond, True) + if mask is not None: + new_mask = tiling.get_slice(mask, tile_h,tile_h_len,tile_w,tile_w_len) + if new_mask.sum().cpu() == 0.0 and 'mask' in cond[1]: + return (cond, True) + else: + cond[1]['mask'] = new_mask + return (cond, False) + +def slice_gligen(tile_h, tile_h_len, tile_w, tile_w_len, cond, gligen): + tile_h_end = tile_h + tile_h_len + tile_w_end = tile_w + tile_w_len + if gligen is None: + return + gligen_type = gligen[0] + gligen_model = gligen[1] + gligen_areas = gligen[2] + + gligen_areas_new = [] + for emb, h_len, w_len, h, w in gligen_areas: + h_end = h + h_len + w_end = w + w_len + if h < tile_h_end and h_end > tile_h and w < tile_w_end and w_end > tile_w: + new_h = max(0, h - tile_h) + new_w = max(0, w - tile_w) + new_h_end = min(tile_h_end, h_end - tile_h) + new_w_end = min(tile_w_end, w_end - tile_w) + gligen_areas_new.append((emb, new_h_end - new_h, new_w_end - new_w, new_h, new_w)) + + if len(gligen_areas_new) == 0: + del cond['gligen'] + else: + cond['gligen'] = (gligen_type, gligen_model, gligen_areas_new) + +def slice_cnet(h, h_len, w, w_len, model:comfy.controlnet.ControlBase, img): + if img is None: + img = model.cond_hint_original + model.cond_hint = tiling.get_slice(img, h*8, h_len*8, w*8, w_len*8).to(model.control_model.dtype).to(model.device) + +def slices_T2I(h, h_len, w, w_len, model:comfy.controlnet.ControlBase, img): + model.control_input = None + if img is None: + img = model.cond_hint_original + model.cond_hint = tiling.get_slice(img, h*8, h_len*8, w*8, w_len*8).float().to(model.device) + +# TODO: refactor some of the mess + +from PIL import Image + +def sample_common(model, add_noise, noise_seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0, preview=False): + end_at_step = min(end_at_step, steps) + device = comfy.model_management.get_torch_device() + samples = latent_image["samples"] + noise_mask = latent_image["noise_mask"] if "noise_mask" in latent_image else None + force_full_denoise = return_with_leftover_noise == "enable" + if add_noise == "disable": + noise = torch.zeros(samples.size(), dtype=samples.dtype, layout=samples.layout, device="cpu") + else: + skip = latent_image["batch_index"] if "batch_index" in latent_image else None + noise = comfy.sample.prepare_noise(samples, noise_seed, skip) + + if noise_mask is not None: + noise_mask = comfy.sample.prepare_mask(noise_mask, noise.shape, device='cpu') + + shape = samples.shape + samples = samples.clone() + + tile_width = min(shape[-1] * 8, tile_width) + tile_height = min(shape[2] * 8, tile_height) + + real_model = None + positive_copy = comfy.sample.convert_cond(positive) + negative_copy = comfy.sample.convert_cond(negative) + modelPatches, inference_memory = comfy.sample.get_additional_models(positive_copy, negative_copy, model.model_dtype()) + + comfy.model_management.load_models_gpu([model] + modelPatches, model.memory_required(noise.shape) + inference_memory) + real_model = model.model + + sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) + + if tiling_strategy != 'padded': + if noise_mask is not None: + samples += sampler.sigmas[start_at_step].cpu() * noise_mask * model.model.process_latent_out(noise) + else: + samples += sampler.sigmas[start_at_step].cpu() * model.model.process_latent_out(noise) + + # cnets + cnets = [c['control'] for (_, c) in positive + negative if 'control' in c] + # unroll recursion + cnets = list(set([x for m in cnets for x in recursion_to_list(m, "previous_controlnet")])) + # filter down to only cnets + cnets = [x for x in cnets if isinstance(x, comfy.controlnet.ControlNet)] + cnet_imgs = [ + torch.nn.functional.interpolate(m.cond_hint_original, (shape[-2] * 8, shape[-1] * 8), mode='nearest-exact').to('cpu') + if m.cond_hint_original.shape[-2] != shape[-2] * 8 or m.cond_hint_original.shape[-1] != shape[-1] * 8 else None + for m in cnets] + + # T2I + T2Is = [c['control'] for (_, c) in positive + negative if 'control' in c] + # unroll recursion + T2Is = [x for m in T2Is for x in recursion_to_list(m, "previous_controlnet")] + # filter down to only T2I + T2Is = [x for x in T2Is if isinstance(x, comfy.controlnet.T2IAdapter)] + T2I_imgs = [ + torch.nn.functional.interpolate(m.cond_hint_original, (shape[-2] * 8, shape[-1] * 8), mode='nearest-exact').to('cpu') + if m.cond_hint_original.shape[-2] != shape[-2] * 8 or m.cond_hint_original.shape[-1] != shape[-1] * 8 or (m.channels_in == 1 and m.cond_hint_original.shape[1] != 1) else None + for m in T2Is + ] + T2I_imgs = [ + torch.mean(img, 1, keepdim=True) if img is not None and m.channels_in == 1 and m.cond_hint_original.shape[1] else img + for m, img in zip(T2Is, T2I_imgs) + ] + + #cond area and mask + spatial_conds_pos = [ + (c[1]['area'] if 'area' in c[1] else None, + comfy.sample.prepare_mask(c[1]['mask'], shape, device) if 'mask' in c[1] else None) + for c in positive + ] + spatial_conds_neg = [ + (c[1]['area'] if 'area' in c[1] else None, + comfy.sample.prepare_mask(c[1]['mask'], shape, device) if 'mask' in c[1] else None) + for c in negative + ] + + #gligen + gligen_pos = [ + c[1]['gligen'] if 'gligen' in c[1] else None + for c in positive + ] + gligen_neg = [ + c[1]['gligen'] if 'gligen' in c[1] else None + for c in negative + ] + + gen = torch.manual_seed(noise_seed) + if tiling_strategy == 'random' or tiling_strategy == 'random strict': + tiles = tiling.get_tiles_and_masks_rgrid(end_at_step - start_at_step, samples.shape, tile_height, tile_width, gen) + elif tiling_strategy == 'padded': + tiles = tiling.get_tiles_and_masks_padded(end_at_step - start_at_step, samples.shape, tile_height, tile_width) + else: + tiles = tiling.get_tiles_and_masks_simple(end_at_step - start_at_step, samples.shape, tile_height, tile_width) + + total_steps = sum([num_steps for img_pass in tiles for steps_list in img_pass for _,_,_,_,num_steps,_ in steps_list]) + current_step = [0] + + preview_format = "JPEG" + if preview_format not in ["JPEG", "PNG"]: + preview_format = "JPEG" + previewer = None + if preview: + previewer = latent_preview.get_previewer(device, model.model.latent_format) + + + with tqdm(total=total_steps) as pbar_tqdm: + pbar = comfy.utils.ProgressBar(total_steps) + + def callback(step, x0, x, total_steps): + current_step[0] += 1 + preview_bytes = None + if previewer: + preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) + pbar.update_absolute(current_step[0], preview=preview_bytes) + pbar_tqdm.update(1) + + if tiling_strategy == "random strict": + samples_next = samples.clone() + for img_pass in tiles: + for i in range(len(img_pass)): + for tile_h, tile_h_len, tile_w, tile_w_len, tile_steps, tile_mask in img_pass[i]: + tiled_mask = None + if noise_mask is not None: + tiled_mask = tiling.get_slice(noise_mask, tile_h, tile_h_len, tile_w, tile_w_len).to(device) + if tile_mask is not None: + if tiled_mask is not None: + tiled_mask *= tile_mask.to(device) + else: + tiled_mask = tile_mask.to(device) + + if tiling_strategy == 'padded' or tiling_strategy == 'random strict': + tile_h, tile_h_len, tile_w, tile_w_len, tiled_mask = tiling.mask_at_boundary( tile_h, tile_h_len, tile_w, tile_w_len, + tile_height, tile_width, samples.shape[-2], samples.shape[-1], + tiled_mask, device) + + + if tiled_mask is not None and tiled_mask.sum().cpu() == 0.0: + continue + + tiled_latent = tiling.get_slice(samples, tile_h, tile_h_len, tile_w, tile_w_len).to(device) + + if tiling_strategy == 'padded': + tiled_noise = tiling.get_slice(noise, tile_h, tile_h_len, tile_w, tile_w_len).to(device) + else: + if tiled_mask is None or noise_mask is None: + tiled_noise = torch.zeros_like(tiled_latent) + else: + tiled_noise = tiling.get_slice(noise, tile_h, tile_h_len, tile_w, tile_w_len).to(device) * (1 - tiled_mask) + + #TODO: all other condition based stuff like area sets and GLIGEN should also happen here + + #cnets + for m, img in zip(cnets, cnet_imgs): + slice_cnet(tile_h, tile_h_len, tile_w, tile_w_len, m, img) + + #T2I + for m, img in zip(T2Is, T2I_imgs): + slices_T2I(tile_h, tile_h_len, tile_w, tile_w_len, m, img) + + pos = [c.copy() for c in positive_copy]#copy_cond(positive_copy) + neg = [c.copy() for c in negative_copy]#copy_cond(negative_copy) + + #cond areas + pos = [slice_cond(tile_h, tile_h_len, tile_w, tile_w_len, c, area) for c, area in zip(pos, spatial_conds_pos)] + pos = [c for c, ignore in pos if not ignore] + neg = [slice_cond(tile_h, tile_h_len, tile_w, tile_w_len, c, area) for c, area in zip(neg, spatial_conds_neg)] + neg = [c for c, ignore in neg if not ignore] + + #gligen + for cond, gligen in zip(pos, gligen_pos): + slice_gligen(tile_h, tile_h_len, tile_w, tile_w_len, cond, gligen) + for cond, gligen in zip(neg, gligen_neg): + slice_gligen(tile_h, tile_h_len, tile_w, tile_w_len, cond, gligen) + + tile_result = sampler.sample(tiled_noise, pos, neg, cfg=cfg, latent_image=tiled_latent, start_step=start_at_step + i * tile_steps, last_step=start_at_step + i*tile_steps + tile_steps, force_full_denoise=force_full_denoise and i+1 == end_at_step - start_at_step, denoise_mask=tiled_mask, callback=callback, disable_pbar=True, seed=noise_seed) + tile_result = tile_result.cpu() + if tiled_mask is not None: + tiled_mask = tiled_mask.cpu() + if tiling_strategy == "random strict": + tiling.set_slice(samples_next, tile_result, tile_h, tile_h_len, tile_w, tile_w_len, tiled_mask) + else: + tiling.set_slice(samples, tile_result, tile_h, tile_h_len, tile_w, tile_w_len, tiled_mask) + if tiling_strategy == "random strict": + samples = samples_next.clone() + + + comfy.sample.cleanup_additional_models(modelPatches) + + out = latent_image.copy() + out["samples"] = samples.cpu() + return (out, ) + +class TiledKSampler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "tile_width": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "random strict", "padded", 'simple'], ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "sample" + + CATEGORY = "sampling" + + def sample(self, model, seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise): + steps_total = int(steps / denoise) + return sample_common(model, 'enable', seed, tile_width, tile_height, tiling_strategy, steps_total, cfg, sampler_name, scheduler, positive, negative, latent_image, steps_total-steps, steps_total, 'disable', denoise=1.0, preview=True) + +class TiledKSamplerAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "add_noise": (["enable", "disable"], ), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "tile_width": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tile_height": ("INT", {"default": 512, "min": 256, "max": MAX_RESOLUTION, "step": 64}), + "tiling_strategy": (["random", "random strict", "padded", 'simple'], ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + "return_with_leftover_noise": (["disable", "enable"], ), + "preview": (["disable", "enable"], ), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "sample" + + CATEGORY = "sampling" + + def sample(self, model, add_noise, noise_seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, preview, denoise=1.0): + return sample_common(model, add_noise, noise_seed, tile_width, tile_height, tiling_strategy, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0, preview= preview == 'enable') + + + +NODE_CLASS_MAPPINGS = { + "BNK_TiledKSamplerAdvanced": TiledKSamplerAdvanced, + "BNK_TiledKSampler": TiledKSampler, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "BNK_TiledKSamplerAdvanced": "TiledK Sampler (Advanced)", + "BNK_TiledKSampler": "Tiled KSampler", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_TiledKSampler/tiling.py b/custom_nodes/ComfyUI_TiledKSampler/tiling.py new file mode 100644 index 0000000000000000000000000000000000000000..096f8baa1a1a639e3ed251e415a95aa14bcf8753 --- /dev/null +++ b/custom_nodes/ComfyUI_TiledKSampler/tiling.py @@ -0,0 +1,175 @@ +import torch +import itertools +import numpy as np + +def grouper(n, iterable): + it = iter(iterable) + while True: + chunk = list(itertools.islice(it, n)) + if not chunk: + return + yield chunk + +def create_batches(n, iterable): + groups = itertools.groupby(iterable, key= lambda x: (x[1], x[3])) + for _, x in groups: + for y in grouper(n, x): + yield y + + +def get_slice(tensor, h, h_len, w, w_len): + t = tensor.narrow(-2, h, h_len) + t = t.narrow(-1, w, w_len) + return t + +def set_slice(tensor1,tensor2, h, h_len, w, w_len, mask=None): + if mask is not None: + tensor1[:,:,h:h+h_len,w:w+w_len] = tensor1[:,:,h:h+h_len,w:w+w_len] * (1 - mask) + tensor2 * mask + else: + tensor1[:,:,h:h+h_len,w:w+w_len] = tensor2 + +def get_tiles_and_masks_simple(steps, latent_shape, tile_height, tile_width): + latent_size_h = latent_shape[-2] + latent_size_w = latent_shape[-1] + tile_size_h = int(tile_height // 8) + tile_size_w = int(tile_width // 8) + + h = np.arange(0,latent_size_h, tile_size_h) + w = np.arange(0,latent_size_w, tile_size_w) + + def create_tile(hs, ws, i, j): + h = int(hs[i]) + w = int(ws[j]) + h_len = min(tile_size_h, latent_size_h - h) + w_len = min(tile_size_w, latent_size_w - w) + return (h, h_len, w, w_len, steps, None) + + passes = [ + [[create_tile(h, w, i, j) for i in range(len(h)) for j in range(len(w))]], + ] + return passes + +def get_tiles_and_masks_padded(steps, latent_shape, tile_height, tile_width): + batch_size = latent_shape[0] + latent_size_h = latent_shape[-2] + latent_size_w = latent_shape[-1] + + tile_size_h = int(tile_height // 8) + tile_size_h = int((tile_size_h // 4) * 4) + tile_size_w = int(tile_width // 8) + tile_size_w = int((tile_size_w // 4) * 4) + + #masks + mask_h = [0,tile_size_h // 4, tile_size_h - tile_size_h // 4, tile_size_h] + mask_w = [0,tile_size_w // 4, tile_size_w - tile_size_w // 4, tile_size_w] + masks = [[] for _ in range(3)] + for i in range(3): + for j in range(3): + mask = torch.zeros((batch_size,1,tile_size_h, tile_size_w), dtype=torch.float32, device='cpu') + mask[:,:,mask_h[i]:mask_h[i+1],mask_w[j]:mask_w[j+1]] = 1.0 + masks[i].append(mask) + + def create_mask(h_ind, w_ind, h_ind_max, w_ind_max, mask_h, mask_w, h_len, w_len): + mask = masks[1][1] + if not (h_ind == 0 or h_ind == h_ind_max or w_ind == 0 or w_ind == w_ind_max): + return get_slice(mask, 0, h_len, 0, w_len) + mask = mask.clone() + if h_ind == 0 and mask_h: + mask += masks[0][1] + if h_ind == h_ind_max and mask_h: + mask += masks[2][1] + if w_ind == 0 and mask_w: + mask += masks[1][0] + if w_ind == w_ind_max and mask_w: + mask += masks[1][2] + if h_ind == 0 and w_ind == 0 and mask_h and mask_w: + mask += masks[0][0] + if h_ind == 0 and w_ind == w_ind_max and mask_h and mask_w: + mask += masks[0][2] + if h_ind == h_ind_max and w_ind == 0 and mask_h and mask_w: + mask += masks[2][0] + if h_ind == h_ind_max and w_ind == w_ind_max and mask_h and mask_w: + mask += masks[2][2] + return get_slice(mask, 0, h_len, 0, w_len) + + h = np.arange(0,latent_size_h, tile_size_h) + h_shift = np.arange(tile_size_h // 2, latent_size_h - tile_size_h // 2, tile_size_h) + w = np.arange(0,latent_size_w, tile_size_w) + w_shift = np.arange(tile_size_w // 2, latent_size_w - tile_size_h // 2, tile_size_w) + + + def create_tile(hs, ws, mask_h, mask_w, i, j): + h = int(hs[i]) + w = int(ws[j]) + h_len = min(tile_size_h, latent_size_h - h) + w_len = min(tile_size_w, latent_size_w - w) + mask = create_mask(i,j,len(hs)-1, len(ws)-1, mask_h, mask_w, h_len, w_len) + return (h, h_len, w, w_len, steps, mask) + + passes = [ + [[create_tile(h, w, True, True, i, j) for i in range(len(h)) for j in range(len(w))]], + [[create_tile(h_shift, w, False, True, i, j) for i in range(len(h_shift)) for j in range(len(w))]], + [[create_tile(h, w_shift, True, False, i, j) for i in range(len(h)) for j in range(len(w_shift))]], + [[create_tile(h_shift, w_shift, False, False, i,j) for i in range(len(h_shift)) for j in range(len(w_shift))]], + ] + + return passes + +def mask_at_boundary(h, h_len, w, w_len, tile_size_h, tile_size_w, latent_size_h, latent_size_w, mask, device='cpu'): + tile_size_h = int(tile_size_h // 8) + tile_size_w = int(tile_size_w // 8) + + if (h_len == tile_size_h or h_len == latent_size_h) and (w_len == tile_size_w or w_len == latent_size_w): + return h, h_len, w, w_len, mask + h_offset = min(0, latent_size_h - (h + tile_size_h)) + w_offset = min(0, latent_size_w - (w + tile_size_w)) + new_mask = torch.zeros((1,1,tile_size_h, tile_size_w), dtype=torch.float32, device=device) + new_mask[:,:,-h_offset:h_len if h_offset == 0 else tile_size_h, -w_offset:w_len if w_offset == 0 else tile_size_w] = 1.0 if mask is None else mask + return h + h_offset, tile_size_h, w + w_offset, tile_size_w, new_mask + +def get_tiles_and_masks_rgrid(steps, latent_shape, tile_height, tile_width, generator): + + def calc_coords(latent_size, tile_size, jitter): + tile_coords = int((latent_size + jitter - 1) // tile_size + 1) + tile_coords = [np.clip(tile_size * c - jitter, 0, latent_size) for c in range(tile_coords + 1)] + tile_coords = [(c1, c2-c1) for c1, c2 in zip(tile_coords, tile_coords[1:])] + return tile_coords + + #calc stuff + batch_size = latent_shape[0] + latent_size_h = latent_shape[-2] + latent_size_w = latent_shape[-1] + tile_size_h = int(tile_height // 8) + tile_size_w = int(tile_width // 8) + + tiles_all = [] + + for s in range(steps): + rands = torch.rand((2,), dtype=torch.float32, generator=generator, device='cpu').numpy() + + jitter_w1 = int(rands[0] * tile_size_w) + jitter_w2 = int(((rands[0] + .5) % 1.0) * tile_size_w) + jitter_h1 = int(rands[1] * tile_size_h) + jitter_h2 = int(((rands[1] + .5) % 1.0) * tile_size_h) + + #calc number of tiles + tiles_h = [ + calc_coords(latent_size_h, tile_size_h, jitter_h1), + calc_coords(latent_size_h, tile_size_h, jitter_h2) + ] + tiles_w = [ + calc_coords(latent_size_w, tile_size_w, jitter_w1), + calc_coords(latent_size_w, tile_size_w, jitter_w2) + ] + + tiles = [] + if s % 2 == 0: + for i, h in enumerate(tiles_h[0]): + for w in tiles_w[i%2]: + tiles.append((int(h[0]), int(h[1]), int(w[0]), int(w[1]), 1, None)) + else: + for i, w in enumerate(tiles_w[0]): + for h in tiles_h[i%2]: + tiles.append((int(h[0]), int(h[1]), int(w[0]), int(w[1]), 1, None)) + tiles_all.append(tiles) + return [tiles_all] \ No newline at end of file diff --git a/custom_nodes/ComfyUI_experiments/LICENSE b/custom_nodes/ComfyUI_experiments/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI_experiments/README.md b/custom_nodes/ComfyUI_experiments/README.md new file mode 100644 index 0000000000000000000000000000000000000000..accace076c26b68b78b625ced597db595281b379 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/README.md @@ -0,0 +1,23 @@ +## Some experimental custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) + +Copy the .py files to your custom_nodes directory to use them. + +They will show up in: custom_node_experiments/ + +### sampler_tonemap.py +contains ModelSamplerTonemapNoiseTest a node that makes the sampler use a simple tonemapping algorithm to tonemap the noise. It will let you use higher CFG without breaking the image. To using higher CFG lower the multiplier value. + +### sampler_rescalecfg.py +contains an implementation of the Rescale Classifier-Free Guidance from: https://arxiv.org/pdf/2305.08891.pdf + +### advanced_model_merging.py + +Node for merging models by block. + +### sdxl_model_merging.py + +Node for merging SDXL base models. + +### reference_only.py + +Contains a node that implements the "reference only controlnet". An example workflow can be found in the workflows folder. diff --git a/custom_nodes/ComfyUI_experiments/__init__.py b/custom_nodes/ComfyUI_experiments/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c6273d12a0f26a8e03127fcd9f72ff626f3c4226 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/__init__.py @@ -0,0 +1,23 @@ +import importlib +import os + +node_list = [ #Add list of .py files containing nodes here + "advanced_model_merging", + "reference_only", + "sampler_rescalecfg", + "sampler_tonemap", + "sampler_tonemap_rescalecfg", + "sdxl_model_merging" +] + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + +for module_name in node_list: + imported_module = importlib.import_module(".{}".format(module_name), __name__) + + NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} + if hasattr(imported_module, "NODE_DISPLAY_NAME_MAPPINGS"): + NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173fdfe00ea278fe9899fc7c7f260bb3d4c0c3b5 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9db01aa97823253d77d6f727851460b3f2f25f4d Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/advanced_model_merging.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/advanced_model_merging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb301ffa959a7bd7136279fa17324f72664aae2a Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/advanced_model_merging.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/advanced_model_merging.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/advanced_model_merging.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc512addc5db0e272d2ae9472c8ba7ac72f77d08 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/advanced_model_merging.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/reference_only.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/reference_only.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..510156161c8bcf0a1120aadc2011e21012223389 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/reference_only.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/reference_only.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/reference_only.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6330e5c0a3c4b2bf4194bb8d8c0b4b273f9ec67 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/reference_only.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sampler_rescalecfg.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_rescalecfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..686246fa9a88a4d2abe654b14a8afe740185fe60 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_rescalecfg.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sampler_rescalecfg.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_rescalecfg.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..174ecfa49ce84a769756f0ad12e7580991bf310e Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_rescalecfg.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c838567ab4827362eb628d77c19522eb447726d Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0dc33661ee0dcaad7b6b833d03c750ed72a57c7 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap_rescalecfg.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap_rescalecfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dde2579c050a5ae9fafe189306fef8501e3241b Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap_rescalecfg.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap_rescalecfg.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap_rescalecfg.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4caf1b20f545bb4abd9a9f4cd7fb5653a6c33c98 Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sampler_tonemap_rescalecfg.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sdxl_model_merging.cpython-310.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sdxl_model_merging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0623b8a569e3e47e19fa926662dacf9c457776f Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sdxl_model_merging.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/__pycache__/sdxl_model_merging.cpython-311.pyc b/custom_nodes/ComfyUI_experiments/__pycache__/sdxl_model_merging.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58db7fb43940b2e8d4ba2615f2c69cd54a8cea8a Binary files /dev/null and b/custom_nodes/ComfyUI_experiments/__pycache__/sdxl_model_merging.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_experiments/advanced_model_merging.py b/custom_nodes/ComfyUI_experiments/advanced_model_merging.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d068676fd4185b684a3643a0377e981d3c7397 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/advanced_model_merging.py @@ -0,0 +1,30 @@ +import comfy_extras.nodes_model_merging + +class ModelMergeBlockNumber(comfy_extras.nodes_model_merging.ModelMergeBlocks): + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["time_embed."] = argument + arg_dict["label_emb."] = argument + + for i in range(12): + arg_dict["input_blocks.{}.".format(i)] = argument + + for i in range(3): + arg_dict["middle_block.{}.".format(i)] = argument + + for i in range(12): + arg_dict["output_blocks.{}.".format(i)] = argument + + arg_dict["out."] = argument + + return {"required": arg_dict} + + +NODE_CLASS_MAPPINGS = { + "ModelMergeBlockNumber": ModelMergeBlockNumber, +} diff --git a/custom_nodes/ComfyUI_experiments/reference_only.py b/custom_nodes/ComfyUI_experiments/reference_only.py new file mode 100644 index 0000000000000000000000000000000000000000..ae824a4142169d6e55b3d7b6ca497ca4b0ea1d2e --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/reference_only.py @@ -0,0 +1,54 @@ +import torch + +class ReferenceOnlySimple: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "reference": ("LATENT",), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}) + }} + + RETURN_TYPES = ("MODEL", "LATENT") + FUNCTION = "reference_only" + + CATEGORY = "custom_node_experiments" + + def reference_only(self, model, reference, batch_size): + model_reference = model.clone() + size_latent = list(reference["samples"].shape) + size_latent[0] = batch_size + latent = {} + latent["samples"] = torch.zeros(size_latent) + + batch = latent["samples"].shape[0] + reference["samples"].shape[0] + def reference_apply(q, k, v, extra_options): + k = k.clone().repeat(1, 2, 1) + offset = 0 + if q.shape[0] > batch: + offset = batch + + for o in range(0, q.shape[0], batch): + for x in range(1, batch): + k[x + o, q.shape[1]:] = q[o,:] + + return q, k, k + + model_reference.set_model_attn1_patch(reference_apply) + out_latent = torch.cat((reference["samples"], latent["samples"])) + if "noise_mask" in latent: + mask = latent["noise_mask"] + else: + mask = torch.ones((64,64), dtype=torch.float32, device="cpu") + + if len(mask.shape) < 3: + mask = mask.unsqueeze(0) + if mask.shape[0] < latent["samples"].shape[0]: + print(latent["samples"].shape, mask.shape) + mask = mask.repeat(latent["samples"].shape[0], 1, 1) + + out_mask = torch.zeros((1,mask.shape[1],mask.shape[2]), dtype=torch.float32, device="cpu") + return (model_reference, {"samples": out_latent, "noise_mask": torch.cat((out_mask, mask))}) + +NODE_CLASS_MAPPINGS = { + "ReferenceOnlySimple": ReferenceOnlySimple, +} diff --git a/custom_nodes/ComfyUI_experiments/sampler_rescalecfg.py b/custom_nodes/ComfyUI_experiments/sampler_rescalecfg.py new file mode 100644 index 0000000000000000000000000000000000000000..d99959d6a8131c0faed9a6ea33974fa55728bafb --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/sampler_rescalecfg.py @@ -0,0 +1,38 @@ +import torch + + +class RescaleClassifierFreeGuidance: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "multiplier": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "custom_node_experiments" + + def patch(self, model, multiplier): + + def rescale_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + + x_cfg = uncond + cond_scale * (cond - uncond) + ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True) + ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True) + + x_rescaled = x_cfg * (ro_pos / ro_cfg) + x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg + + return x_final + + m = model.clone() + m.set_model_sampler_cfg_function(rescale_cfg) + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "RescaleClassifierFreeGuidanceTest": RescaleClassifierFreeGuidance, +} diff --git a/custom_nodes/ComfyUI_experiments/sampler_tonemap.py b/custom_nodes/ComfyUI_experiments/sampler_tonemap.py new file mode 100644 index 0000000000000000000000000000000000000000..14f732e2529d608c42da3250ddb7a24893a8abf8 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/sampler_tonemap.py @@ -0,0 +1,44 @@ +import torch + + +class ModelSamplerTonemapNoiseTest: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "custom_node_experiments" + + def patch(self, model, multiplier): + + def sampler_tonemap_reinhard(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + noise_pred = (cond - uncond) + noise_pred_vector_magnitude = (torch.linalg.vector_norm(noise_pred, dim=(1)) + 0.0000000001)[:,None] + noise_pred /= noise_pred_vector_magnitude + + mean = torch.mean(noise_pred_vector_magnitude, dim=(1,2,3), keepdim=True) + std = torch.std(noise_pred_vector_magnitude, dim=(1,2,3), keepdim=True) + + top = (std * 3 + mean) * multiplier + + #reinhard + noise_pred_vector_magnitude *= (1.0 / top) + new_magnitude = noise_pred_vector_magnitude / (noise_pred_vector_magnitude + 1.0) + new_magnitude *= top + + return uncond + noise_pred * new_magnitude * cond_scale + + m = model.clone() + m.set_model_sampler_cfg_function(sampler_tonemap_reinhard) + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "ModelSamplerTonemapNoiseTest": ModelSamplerTonemapNoiseTest, +} diff --git a/custom_nodes/ComfyUI_experiments/sampler_tonemap_rescalecfg.py b/custom_nodes/ComfyUI_experiments/sampler_tonemap_rescalecfg.py new file mode 100644 index 0000000000000000000000000000000000000000..3d3bf051a4155bbf0df1af814853a3c54502b8be --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/sampler_tonemap_rescalecfg.py @@ -0,0 +1,55 @@ +import torch + + +class TonemapNoiseWithRescaleCFG: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL",), + "tonemap_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), + "rescale_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "custom_node_experiments" + + def patch(self, model, tonemap_multiplier, rescale_multiplier): + + def tonemap_noise_rescale_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + + # Tonemap + noise_pred = (cond - uncond) + noise_pred_vector_magnitude = (torch.linalg.vector_norm(noise_pred, dim=(1)) + 0.0000000001)[:, None] + noise_pred /= noise_pred_vector_magnitude + + mean = torch.mean(noise_pred_vector_magnitude, dim=(1, 2, 3), keepdim=True) + std = torch.std(noise_pred_vector_magnitude, dim=(1, 2, 3), keepdim=True) + + top = (std * 3 + mean) * tonemap_multiplier + + # Reinhard + noise_pred_vector_magnitude *= (1.0 / top) + new_magnitude = noise_pred_vector_magnitude / (noise_pred_vector_magnitude + 1.0) + new_magnitude *= top + + # Rescale CFG + x_cfg = uncond + (noise_pred * new_magnitude * cond_scale) + ro_pos = torch.std(cond, dim=(1, 2, 3), keepdim=True) + ro_cfg = torch.std(x_cfg, dim=(1, 2, 3), keepdim=True) + + x_rescaled = x_cfg * (ro_pos / ro_cfg) + x_final = rescale_multiplier * x_rescaled + (1.0 - rescale_multiplier) * x_cfg + + return x_final + + m = model.clone() + m.set_model_sampler_cfg_function(tonemap_noise_rescale_cfg) + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "TonemapNoiseWithRescaleCFG": TonemapNoiseWithRescaleCFG, +} diff --git a/custom_nodes/ComfyUI_experiments/sdxl_model_merging.py b/custom_nodes/ComfyUI_experiments/sdxl_model_merging.py new file mode 100644 index 0000000000000000000000000000000000000000..e288f3cb4963379fbb02b08d5907988bd3810356 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/sdxl_model_merging.py @@ -0,0 +1,114 @@ +import comfy_extras.nodes_model_merging + +class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks): + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["time_embed."] = argument + arg_dict["label_emb."] = argument + + for i in range(9): + arg_dict["input_blocks.{}".format(i)] = argument + + for i in range(3): + arg_dict["middle_block.{}".format(i)] = argument + + for i in range(9): + arg_dict["output_blocks.{}".format(i)] = argument + + arg_dict["out."] = argument + + return {"required": arg_dict} + + +class ModelMergeSDXLTransformers(comfy_extras.nodes_model_merging.ModelMergeBlocks): + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["time_embed."] = argument + arg_dict["label_emb."] = argument + + transformers = {4: 2, 5:2, 7:10, 8:10} + + for i in range(9): + arg_dict["input_blocks.{}.0.".format(i)] = argument + if i in transformers: + arg_dict["input_blocks.{}.1.".format(i)] = argument + for j in range(transformers[i]): + arg_dict["input_blocks.{}.1.transformer_blocks.{}.".format(i, j)] = argument + + for i in range(3): + arg_dict["middle_block.{}.".format(i)] = argument + if i == 1: + for j in range(10): + arg_dict["middle_block.{}.transformer_blocks.{}.".format(i, j)] = argument + + transformers = {3:2, 4: 2, 5:2, 6:10, 7:10, 8:10} + for i in range(9): + arg_dict["output_blocks.{}.0.".format(i)] = argument + t = 8 - i + if t in transformers: + arg_dict["output_blocks.{}.1.".format(i)] = argument + for j in range(transformers[t]): + arg_dict["output_blocks.{}.1.transformer_blocks.{}.".format(i, j)] = argument + + arg_dict["out."] = argument + + return {"required": arg_dict} + +class ModelMergeSDXLDetailedTransformers(comfy_extras.nodes_model_merging.ModelMergeBlocks): + @classmethod + def INPUT_TYPES(s): + arg_dict = { "model1": ("MODEL",), + "model2": ("MODEL",)} + + argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) + + arg_dict["time_embed."] = argument + arg_dict["label_emb."] = argument + + transformers = {4: 2, 5:2, 7:10, 8:10} + transformers_args = ["norm1", "attn1.to_q", "attn1.to_k", "attn1.to_v", "attn1.to_out", "ff.net", "norm2", "attn2.to_q", "attn2.to_k", "attn2.to_v", "attn2.to_out", "norm3"] + + for i in range(9): + arg_dict["input_blocks.{}.0.".format(i)] = argument + if i in transformers: + arg_dict["input_blocks.{}.1.".format(i)] = argument + for j in range(transformers[i]): + for x in transformers_args: + arg_dict["input_blocks.{}.1.transformer_blocks.{}.{}".format(i, j, x)] = argument + + for i in range(3): + arg_dict["middle_block.{}.".format(i)] = argument + if i == 1: + for j in range(10): + for x in transformers_args: + arg_dict["middle_block.{}.transformer_blocks.{}.{}".format(i, j, x)] = argument + + transformers = {3:2, 4: 2, 5:2, 6:10, 7:10, 8:10} + for i in range(9): + arg_dict["output_blocks.{}.0.".format(i)] = argument + t = 8 - i + if t in transformers: + arg_dict["output_blocks.{}.1.".format(i)] = argument + for j in range(transformers[t]): + for x in transformers_args: + arg_dict["output_blocks.{}.1.transformer_blocks.{}.{}".format(i, j, x)] = argument + + arg_dict["out."] = argument + + return {"required": arg_dict} + +NODE_CLASS_MAPPINGS = { + "ModelMergeSDXL": ModelMergeSDXL, + "ModelMergeSDXLTransformers": ModelMergeSDXLTransformers, + "ModelMergeSDXLDetailedTransformers": ModelMergeSDXLDetailedTransformers, +} diff --git a/custom_nodes/ComfyUI_experiments/workflows/reference_only_simple.json b/custom_nodes/ComfyUI_experiments/workflows/reference_only_simple.json new file mode 100644 index 0000000000000000000000000000000000000000..952ce762fda365cc9d98b8204f4872dc22797244 --- /dev/null +++ b/custom_nodes/ComfyUI_experiments/workflows/reference_only_simple.json @@ -0,0 +1,552 @@ +{ + "last_node_id": 15, + "last_link_id": 37, + "nodes": [ + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1209, + 188 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 233, + 117 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "crude drawing of girl" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 237, + 370 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark" + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 863, + 186 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 37 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 34 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 719286772344905, + "fixed", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 1548, + 180 + ], + "size": [ + 1454.6668601568254, + 548.2885143635223 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": {}, + "widgets_values": [ + "refer/ComfyUI" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -563, + 510 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 32 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8, + 20 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd_xl_1.0.safetensors" + ] + }, + { + "id": 14, + "type": "ImageScale", + "pos": [ + -129, + 763 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 19 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 18 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "nearest-exact", + 768, + 768, + "center" + ] + }, + { + "id": 13, + "type": "LoadImage", + "pos": [ + -483, + 777 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 19 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "example.png", + "image" + ] + }, + { + "id": 15, + "type": "ReferenceOnlySimple", + "pos": [ + 515, + 675 + ], + "size": { + "0": 315, + "1": 78 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 32, + "slot_index": 0 + }, + { + "name": "reference", + "type": "LATENT", + "link": 35 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ReferenceOnlySimple" + }, + "widgets_values": [ + 2 + ] + }, + { + "id": 12, + "type": "VAEEncode", + "pos": [ + 248, + 732 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 18, + "slot_index": 0 + }, + { + "name": "vae", + "type": "VAE", + "link": 20, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 35 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + } + ], + "links": [ + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 18, + 14, + 0, + 12, + 0, + "IMAGE" + ], + [ + 19, + 13, + 0, + 14, + 0, + "IMAGE" + ], + [ + 20, + 4, + 2, + 12, + 1, + "VAE" + ], + [ + 32, + 4, + 0, + 15, + 0, + "MODEL" + ], + [ + 34, + 15, + 1, + 3, + 3, + "LATENT" + ], + [ + 35, + 12, + 0, + 15, + 1, + "LATENT" + ], + [ + 37, + 15, + 0, + 3, + 0, + "MODEL" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} diff --git a/custom_nodes/ComfyUI_node_Lilly/.gitattributes b/custom_nodes/ComfyUI_node_Lilly/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..dfe0770424b2a19faf507a501ebfc23be8f54e7b --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/custom_nodes/ComfyUI_node_Lilly/.gitignore b/custom_nodes/ComfyUI_node_Lilly/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..439474c5a0e05c38347464e690e94c07e7567387 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/.gitignore @@ -0,0 +1,3 @@ + +*.pyc +**/nppBackup/ diff --git a/custom_nodes/ComfyUI_node_Lilly/CLIPTextEncodeWildcards.py b/custom_nodes/ComfyUI_node_Lilly/CLIPTextEncodeWildcards.py new file mode 100644 index 0000000000000000000000000000000000000000..0616381deea5dd0eb97e7c7bbfc8f7c84c595180 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/CLIPTextEncodeWildcards.py @@ -0,0 +1,88 @@ +import os, glob, sys +import random +import re +import os +if __name__ == os.path.splitext(os.path.basename(__file__))[0] or __name__ =='__main__': + from ConsoleColor import print, console + from wildcards import wildcards +else: + from .ConsoleColor import print, console + from .wildcards import wildcards +#print(__file__) +#print(os.path.basename(__file__)) + +#print("wildcards_ComfyUI") +#print(os.getcwd()) +#Wprint(f"CLIPTextEncodeWildcards __name__ {__name__}") + +class CLIPTextEncodeWildcards: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", {"multiline": True}), "clip": ("CLIP", ) + } + } + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "conditioning" + + def encode(self, clip, text): + print(f"[green]text : [/green]",text) + r=wildcards.run(text) + print(f"[green]result : [/green]",r) + return ([[clip.encode(r), {}]], ) + +class CLIPTextEncodeWildcards2: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", {"multiline": True}), "clip": ("CLIP", ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + + } + } + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + + CATEGORY = "conditioning" + + def encode(self, seed, clip, text): + random.seed(seed) + print(f"[green]text : [/green]",text) + r=wildcards.run(text) + print(f"[green]result : [/green]",r) + return ([[clip.encode(r), {}]], ) + + + +class CLIPTextEncodeWildcards3: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "clip": ("CLIP", ), + "positive": ("STRING", {"multiline": True}), + "negative": ("STRING", {"multiline": True}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + + } + } + RETURN_TYPES = ("CONDITIONING","CONDITIONING") + FUNCTION = "encode" + + CATEGORY = "conditioning" + + def encode(self, seed, clip, positive, negative): + random.seed(seed) + print(f"[green]positive : [/green]",positive) + positive=wildcards.run(positive) + print(f"[green]result : [/green]",positive) + print(f"[green]negative : [/green]",negative) + negative=wildcards.run(negative) + print(f"[green]result : [/green]",negative) + return ([[clip.encode(positive), {}]], [[clip.encode(negative), {}]], ) + + diff --git a/custom_nodes/ComfyUI_node_Lilly/CheckpointLoaderRandom.py b/custom_nodes/ComfyUI_node_Lilly/CheckpointLoaderRandom.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4c5a009affbf42dc6b2554ae510fb98c396597 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/CheckpointLoaderRandom.py @@ -0,0 +1,49 @@ +import os +import comfy.sd +from nodes import * +import random +import folder_paths + +cnt=0 +ckpt_name="" +ckpt_path="" + +class CheckpointLoaderRandom: + models_dir = os.path.join(os.getcwd(),"ComfyUI", "models") + ckpt_dir = os.path.join(models_dir, "checkpoints") + cnt=0 + ckpt_name="" + ckpt_path="" + + def __init__(self): + print(f"CheckpointLoaderRandom __init__") + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "max": ("INT", {"default": 10, "min": 0, "max": 0xffffffffffffffff}), + #"ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), ), + } + } + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + FUNCTION = "load_checkpoint" + + CATEGORY = "loaders" + + def load_checkpoint(self, seed, max, output_vae=True, output_clip=True): + global cnt, ckpt_name, ckpt_path + print(f"cnt : { cnt}") + if ckpt_name=="" or cnt>=max : + cnt=0 + ckpt_names= folder_paths.get_filename_list("checkpoints") + #print(f"ckpt_names : { ckpt_names}") + ckpt_name=random.choice(ckpt_names) + print(f"ckpt_name : { ckpt_name}") + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + cnt+=1 + return out + diff --git a/custom_nodes/ComfyUI_node_Lilly/CheckpointLoaderSimpleText.py b/custom_nodes/ComfyUI_node_Lilly/CheckpointLoaderSimpleText.py new file mode 100644 index 0000000000000000000000000000000000000000..3bdf13e441eaa6a4e7eb9290498497e23b59b183 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/CheckpointLoaderSimpleText.py @@ -0,0 +1,53 @@ +import os +import comfy.sd +from nodes import * +from folder_paths import * +import random +import os +if __name__ == os.path.splitext(os.path.basename(__file__))[0] : + from ConsoleColor import print, console, ccolor + from mypath import * +else: + from .ConsoleColor import print, console, ccolor + from .mypath import * + +#print(__file__) +#print(os.path.basename(__file__)) + +class CheckpointLoaderSimpleText: + @classmethod + def INPUT_TYPES(s): + t_checkpoints=folder_paths.get_filename_list("checkpoints") + #print(f"checkpoints count : {len(t_checkpoints)}", Colors.BGREEN) + return { + "required": { + "ckpt_name": ( + "STRING", { + "multiline": False, + "default": random.choice(t_checkpoints) + } + ), + } + } + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + FUNCTION = "load_checkpoint" + + CATEGORY = "loaders" + + def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): + print(f"[{ccolor}]ckpt_name : [/{ccolor}]", ckpt_name) + ckpt_path=folder_paths.get_full_path("checkpoints", ckpt_name) + if ckpt_path is None: + ckpt_path=getFullPath(ckpt_name,"checkpoints") + print(f"[{ccolor}]ckpt_path : [/{ccolor}]", ckpt_path) + try: + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return out + except Exception as e: + console.print_exception() + return + + +#NODE_CLASS_MAPPINGS = { +# "CheckpointLoaderSimpleText": CheckpointLoaderSimpleText, +#} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/ConsoleColor.py b/custom_nodes/ComfyUI_node_Lilly/ConsoleColor.py new file mode 100644 index 0000000000000000000000000000000000000000..cce625d3d7d76bb058ddada39f4fda90776899d3 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/ConsoleColor.py @@ -0,0 +1,61 @@ +import os, sys +import sys +import subprocess +import pkg_resources + +required = {'rich'} +installed = {pkg.key for pkg in pkg_resources.working_set} +missing = required - installed + +if missing: + python = sys.executable + subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL) + +from rich.console import Console +from rich.theme import Theme +#console=Console(style="reset") +custom_theme = Theme({ + "repr.path": "bright_blue", + "progress.percentage": "bright_blue", + "markdown.block_quote": "bright_blue", + "iso8601.time": "bright_blue" +}) +console = Console(theme=custom_theme) +print=console.log +ccolor="bright_yellow" +""" +print("test", style="bold white on blue") +print("test", style="bold green") +print("test", style="bold CYAN") +""" +""" +import os +if __name__ == os.path.splitext(os.path.basename(__file__))[0] or __name__ =='__main__': + from ConsoleColor import print, console +else: + from .ConsoleColor import print, console +print(__file__) +print(os.path.basename(__file__)) +""" + +""" + +print( + { + 'test1':'tset', + 'test2':'tset', + } +) +print("test", style="bold white on blue") +""" + +""" +print(__file__) +print(os.path.basename(__file__)) +try: + Exception_test() +except Exception: + #console.print_exception(show_locals=True) + console.print_exception() + +""" \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/LoraLoaderText.py b/custom_nodes/ComfyUI_node_Lilly/LoraLoaderText.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6851208d6ac5ebd174d640bee820cc3e7f9b8a --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/LoraLoaderText.py @@ -0,0 +1,78 @@ +import os +import comfy.sd +from nodes import * +import folder_paths + +if __name__ == os.path.splitext(os.path.basename(__file__))[0] : + from ConsoleColor import print, console, ccolor + from mypath import * +else: + from .ConsoleColor import print, console, ccolor + from .mypath import * + +class LoraLoaderText: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "clip": ("CLIP", ), + "lora_name": ("STRING", { + "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node + "default": (folder_paths.get_filename_list("loras"), ) + }), + #"lora_name": (folder_paths.get_filename_list("loras"), ), + "strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + } + } + RETURN_TYPES = ("MODEL", "CLIP") + FUNCTION = "load_lora" + + CATEGORY = "loaders" + + def load_lora(self, model, clip, lora_name, strength_model, strength_clip): + + print(f"[{ccolor}]lora_name : [/{ccolor}]", lora_name) + if strength_model == 0 and strength_clip == 0: + print("[red]strength_model,strength_clip 0[/red] : ", lora_name) + return (model, clip) + + if lora_name is None or lora_name =="" : + print("[red]No lora_name[/red] : ", lora_name) + return (model, clip) + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None: + print("[yellow]No lora_path of lora_name [/yellow] : ", lora_name) + lora_path=getFullPath(lora_name,"loras") + if lora_path is None: + print("[red]No lora_path of lora_name [/red] : ", lora_name) + return (model, clip) + + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + del self.loaded_lora + + if lora is None: + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + # ========================================= + + try: + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + return (model_lora, clip_lora) + except Exception as e: + console.print_exception() + return (model, clip) + +#NODE_CLASS_MAPPINGS = { +# "LoraLoaderText": LoraLoaderText, +#} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/LoraLoaderTextRandom.py b/custom_nodes/ComfyUI_node_Lilly/LoraLoaderTextRandom.py new file mode 100644 index 0000000000000000000000000000000000000000..1f72df3378c4cb72c93d26c469a8a0f0077a6cd9 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/LoraLoaderTextRandom.py @@ -0,0 +1,90 @@ +import os +import comfy.sd +from nodes import * +import folder_paths +import random +if __name__ == os.path.splitext(os.path.basename(__file__))[0] : + from ConsoleColor import print, console + from mypath import * +else: + from .ConsoleColor import print, console + from .mypath import * + +class LoraLoaderTextRandom: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "clip": ("CLIP", ), + "lora_name": ("STRING", { + "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node + "default": (folder_paths.get_filename_list("loras"), ) + }), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + #"lora_name": (folder_paths.get_filename_list("loras"), ), + "strength_model_min": ("FLOAT", {"default": 0.50, "min": 0.0, "max": 10.0, "step": 0.01}), + "strength_model_max": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "strength_clip_min": ("FLOAT", {"default": 0.50, "min": 0.0, "max": 10.0, "step": 0.01}), + "strength_clip_max": ("FLOAT", {"default": 1.50, "min": 0.0, "max": 10.0, "step": 0.01}), + } + } + RETURN_TYPES = ("MODEL", "CLIP") + FUNCTION = "load_lora" + + CATEGORY = "loaders" + + def load_lora(self, + model, + clip, + lora_name, + seed, + strength_model_min, + strength_model_max, + strength_clip_min, + strength_clip_max + ): + + strength_model=random.uniform(min(strength_model_min,strength_model_max),max(strength_model_min,strength_model_max)) + strength_clip=random.uniform(min(strength_clip_min,strength_clip_max),max(strength_clip_min,strength_clip_max)) + + print(f"[{ccolor}]lora_name : [/{ccolor}]", lora_name) + if strength_model == 0 and strength_clip == 0: + print("[red]strength_model,strength_clip 0[/red] : ", lora_name) + return (model, clip) + + if lora_name is None or lora_name =="" : + print("[red]No lora_name[/red] : ", lora_name) + return (model, clip) + + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None: + #print("[red]No lora_path of lora_name [/red] : ", lora_name) + lora_path=getFullPath(lora_name,"loras") + if lora_path is None: + print("[red]No lora_path of lora_name [/red] : ", lora_name) + return (model, clip) + + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + del self.loaded_lora + + if lora is None: + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + # ========================================= + + try: + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip) + return (model_lora, clip_lora) + except Exception as e: + console.print_exception() + return (model, clip) + +#NODE_CLASS_MAPPINGS = { +# "LoraLoaderTextRandom": LoraLoaderTextRandom, +#} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/README.md b/custom_nodes/ComfyUI_node_Lilly/README.md new file mode 100644 index 0000000000000000000000000000000000000000..111f764c4b60095e4949589540aeb3ccb3df7c1d --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/README.md @@ -0,0 +1,149 @@ +# ComfyUI-node-Lilly + +## install + +Go to ./custom_nodes and clone git repo: + +``` +cd ./custom_nodes +git clone https://github.com/kuriot/ComfyUI_node_Lilly.git +``` +or https://github.com/lilly1987/ComfyUI_node_Lilly/archive/refs/heads/main.zip install this like + +![2023-03-22 05 54 43](https://user-images.githubusercontent.com/20321215/226738610-c042a51c-8e72-45de-b714-385eaac383af.png) + + +## wildcards + +### ex - wildcard + +- form : +a{__b__|{c|}|{__d__|e|}|f|}g____ __my__ + +- to : +aeg __quality_my__, __breasts__, { |__character_dress__|__dress_my__}, __shoulder__, {high heels,| } {choker,| } {,| } NSFW, __NSFW_my__, { |__style_my__,} + +``` +ex : {3$$a1|{b2|c3|}|d4|{-$$|f|g}|{-2$$h||i}|{1-$$j|k|}}/{$$l|m|}/{0$$n|} +{1|2|3} -> 1 or 2 or 3 +{2$$a|b|c} -> a,b or b,c or c,a or bb or .... +{9$$a|b|c} -> {3$$a|b|c} auto fix max count +{1-2$$a|b|c} -> 1~2 random choise +{-2$$a|b|c} -> {0-2$$a|b|c} 0-2 +{1-$$a|b|c} -> {0-3$$a|b|c} 1-max +{-$$a|b|c} -> {0-3$$a|b|c} 0-max +``` + +### ex - wildcard text file use + +- ~/a/b.txt +``` +1 +``` +- ~/b.txt +``` +2 +``` + +- __b__ to 1 or 2 +- __/b__ to 2 +- __/a/b__ to 1 +- __?b__ to 2 +- __*__ to 1 or 2 + +### filename pattern matching +- \* is matches everything +- ? is matches any single character +- \[seq\] is matches any character in seq +- \[!seq\] is matches any character not in seq +- reference https://docs.python.org/3/library/fnmatch.html + +### run sample + +``` +python wildcards.py +``` + +### python sample + +``` +import wildcards as w + +# 가져올 파일 목록. get wildcards file +w.card_path=os.path.dirname(__file__)+"\\wildcards\\**\\*.txt" + +# 실행 run +print(w.run("a{__b__|{c|}|{__d__|e|}|f|}g____ __my__")) +``` + + + +### txt file (supports .txt files in different encodings.) +from +``` +# 주석 comment +a,b +{b|c|__anotherfile__} +__anotherfile__ +``` +result +``` +a,b +b +c +__anotherfile__ +``` + +### reload card + +call wildcards.card_load() +or +wildcards.run("{9$$-$$a|b|c}",True) + +## for ComfyUI + + + +### CLIPTextEncodeWildcards + +- CLIPTextEncodeWildcards : no seed +- CLIPTextEncodeWildcards : seed + +![2023-03-20 02 13 50](https://user-images.githubusercontent.com/20321215/226194627-b560c9e1-5dfa-49d9-8503-939693a8b119.png) + + +### SimpleSampler+modelVAE + +- include wildcards + +![SimpleSampler+modelVAE](https://user-images.githubusercontent.com/20321215/229340970-19c5c0f7-6281-430d-87ce-c2e512ead277.png) + + +### SimpleSampler + +- include wildcards + +![SimpleSampler](https://user-images.githubusercontent.com/20321215/229341019-0cea9dd8-0b03-4f4a-8f49-aff068b58faf.png) + + +### SimpleSamplerVAE + +- include wildcards + +![SimpleSamplerVAE](https://user-images.githubusercontent.com/20321215/229341040-72d422d5-7904-41c3-a0e7-ac256ea40d0e.png) + +### VAELoaderText , LoraLoaderText , CheckpointLoaderSimpleText + +- support file name Wildcard(?*) + +![2023-04-13 23 07 29](https://user-images.githubusercontent.com/20321215/231785743-a77257b1-6932-4713-8b91-0614aeeb45e8.png) +![2023-04-13 23 07 49](https://user-images.githubusercontent.com/20321215/231785748-b33b5d69-de00-4265-8fdb-405e61ab8758.png) + + +### random_sampler_node.py + +![2023-03-18 20 53 37](https://user-images.githubusercontent.com/20321215/226104447-eadd1d15-437f-4a41-b989-511390236d13.png) + +### VAELoaderDecode.py + +![2023-03-18 20 52 27](https://user-images.githubusercontent.com/20321215/226104441-a13f49c6-c5be-4c70-b93e-f4ad984e9ff1.png) diff --git a/custom_nodes/ComfyUI_node_Lilly/Random_Sampler.py b/custom_nodes/ComfyUI_node_Lilly/Random_Sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..2f6d1c35f25f18879c9b6e17bb6307a288f9c287 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/Random_Sampler.py @@ -0,0 +1,104 @@ +import os +import nodes +import comfy.samplers +import random +from nodes import common_ksampler + +#wd = os.getcwd() +#print("working directory is ", wd) +# +#filePath = __file__ +#print("This script file path is ", filePath) +# +#absFilePath = os.path.abspath(__file__) +#print("This script absolute path is ", absFilePath) +# +#path, filename = os.path.split(absFilePath) +#print("Script file path is {}, filename is {}".format(path, filename)) + + +class Random_Sampler: + def __init__(self): + print(f"Random_Sampler __init__") + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "LATENT": ("LATENT", ), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + #"Random": (["enable", "disable"],), + "steps_min": ("INT", {"default": 20, "min": 1,"max": 10000, "step": 1 }), + "steps_max": ("INT", {"default": 30, "min": 1,"max": 10000, "step": 1 }), + "cfg_min": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 100.0, "step": 0.5}), + "cfg_max": ("FLOAT", {"default": 9.0, "min": 0.0, "max": 100.0, "step": 0.5}), + "denoise_min": ("FLOAT", {"default": 0.50, "min": 0.01, "max": 1.0, "step": 0.01}), + "denoise_max": ("FLOAT", {"default": 1.00, "min": 0.01, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "test" + + OUTPUT_NODE = False + + CATEGORY = "sampling" + + def test(self, + model, + positive, + negative, + LATENT, + sampler_name, + scheduler, + seed, + #Random, + steps_min, + steps_max, + cfg_min, + cfg_max, + denoise_min, + denoise_max, + ): + print(f""" + model : {model} ; + positive : {positive} ; + negative : {negative} ; + LATENT: {LATENT} ; + sampler_name : {sampler_name} ; + scheduler: {scheduler} ; + {seed} ; + + {steps_min} ; + {steps_max} ; + {cfg_min} ; + {cfg_max} ; + {denoise_min} ; + {denoise_max} ; + """) + #if Random == "enable": + # print(f"Random enable") + # return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) + return common_ksampler( + model, + seed, + random.randint( min(steps_min,steps_max), max(steps_min,steps_max) ), + random.randint( int(cfg_min*2) , int(cfg_max*2) ) / 2 , + sampler_name, + scheduler, + positive, + negative, + LATENT, + denoise=random.uniform(min(denoise_min,denoise_max),max(denoise_min,denoise_max)) + ) + #return (LATENT,) + +#NODE_CLASS_MAPPINGS = { +# "Random_Sampler": Random_Sampler +#} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/SaveImageSimple.py b/custom_nodes/ComfyUI_node_Lilly/SaveImageSimple.py new file mode 100644 index 0000000000000000000000000000000000000000..c8cecf661fdb2a5a6b924635ec2b0a3ac6d8e4c7 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/SaveImageSimple.py @@ -0,0 +1,75 @@ +from PIL import Image +from PIL.PngImagePlugin import PngInfo +import numpy as np +import json +import re + +import time + +import os +if __name__ == os.path.splitext(os.path.basename(__file__))[0] : + from ConsoleColor import print, console + from mypath import * +else: + from .ConsoleColor import print, console + from .mypath import * +#print(__file__) +#print(os.path.basename(__file__)) + + +class SaveImageSimple: + def __init__(self): + self.type = "output" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": ""}) + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO" + }, + } + RETURN_TYPES = () + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "image" + + def save_images(self, images, filename_prefix="", prompt=None, extra_pnginfo=None): + + outputdir=os.path.join(mainfolder, "output") + #print("outputdir : " + outputdir , Colors.CYAN) + + #print("len(images) : " + str(len(images)) , Colors.CYAN) + filename_prefix=re.sub(r"[*]", "",filename_prefix) + filename_prefix+=time.strftime('_%Y%m%d_%H%M%S') + results = list() + cnt=1 + for image in images : + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + #print("extra_pnginfo : " + json.dumps(extra_pnginfo) , Colors.CYAN) + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + if not os.path.exists(outputdir): + print("makedirs : " + outputdir ) + os.makedirs(outputdir) + filename=filename_prefix+f"_{cnt:05}_.png" + filename=os.path.join(outputdir, filename) + img.save(filename, pnginfo=metadata, optimize=True) + results.append({ + "filename": filename, + "subfolder": subfolder, + "type": self.type + }); + cnt+=1 + + return { "ui": { "images": results } } \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/SimpleSampler.py b/custom_nodes/ComfyUI_node_Lilly/SimpleSampler.py new file mode 100644 index 0000000000000000000000000000000000000000..54fd1758a4183f215a54e3895d9e8c2a231d5549 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/SimpleSampler.py @@ -0,0 +1,187 @@ +import comfy.samplers +import comfy.sd +import comfy.utils + +#import comfy_extras.clip_vision + +import model_management +import importlib + +import folder_paths +import torch + +import os +import sys +import json +import hashlib +import copy +import traceback + + +from PIL import Image +from nodes import common_ksampler +from PIL.PngImagePlugin import PngInfo +import numpy as np + +#print(f"SimpleSampler __name__ {__name__}") +#print(f"SimpleSampler __file__ {os.path.splitext(os.path.basename(__file__))[0]}") + +import os + + +if __name__ == os.path.splitext(os.path.basename(__file__))[0] or __name__ =='__main__': + from ConsoleColor import print, console + from wildcards import wildcards +else: + from .ConsoleColor import print, console + from .wildcards import wildcards +#print(__file__) +#print(os.path.basename(__file__)) + +#---------------------------- +wildcardsOn=True +# wildcards support check +#wildcardsOn=False +#try: +# wildcardsOn=True +# #wildcards.card_path=os.path.dirname(__file__)+"\\..\\wildcards\\**\\*.txt" +# print(f"import wildcards succ", style="bold GREEN" ) +#except: +# print(f"import wildcards fail", style="bold RED") +# wildcardsOn=False +# err_msg = traceback.format_exc() +# print(err_msg) + + +def encode(clip, text): + if wildcardsOn: + text=wildcards.run(text) + return [[clip.encode(text), {}]] + +def generate(width, height, batch_size=1): + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return {"samples":latent} + # RETURN_TYPES = ("LATENT",) + +def decode(vae, samples): + return vae.decode(samples["samples"]) + # RETURN_TYPES = ("IMAGE",) + +def sample( + model, seed, steps, cfg, sampler_name, scheduler, + clip, + vae, + positive, negative, + #latent_image, + width, height, denoise=1.0, batch_size=1 + ): + + samples=common_ksampler( + model, seed, steps, cfg, sampler_name, scheduler, + #positive, + encode(clip, positive), + #negative, + encode(clip, negative), + #latent_image, + generate( width, height, batch_size=1), + denoise=denoise)[0] + + return (decode(vae,samples),) + +def load_vae(vae_name): + vae_path = folder_paths.get_full_path("vae", vae_name) + vae = comfy.sd.VAE(ckpt_path=vae_path) + return vae +#---------------------------- +class SimpleSampler: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "model": ("MODEL",), + #"positive": ("CONDITIONING", ), + "clip": ("CLIP", ), + "vae": ("VAE", ), + "positive": ("STRING", {"multiline": True}), + #"negative": ("CONDITIONING", ), + "negative": ("STRING", {"multiline": True}), + "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + #"latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("IMAGE",) + #RETURN_TYPES = ("LATENT",) + FUNCTION = "simple" + + CATEGORY = "sampling" + + def simple(self, + model, seed, steps, cfg, sampler_name, scheduler, + clip, + vae, + positive, negative, + width, height, denoise=1.0, batch_size=1 + ): + + return sample( + model, seed, steps, cfg, sampler_name, scheduler, + clip, + vae, + positive, negative, + width, height, denoise, batch_size + ) + +#---------------------------- +class SimpleSamplerVAE: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "model": ("MODEL",), + #"positive": ("CONDITIONING", ), + "clip": ("CLIP", ), + #"vae": ("VAE", ), + "vae_name": (folder_paths.get_filename_list("vae"), ), + "positive": ("STRING", {"multiline": True}), + #"negative": ("CONDITIONING", ), + "negative": ("STRING", {"multiline": True}), + "width": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}), + "height": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + #"latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("IMAGE",) + #RETURN_TYPES = ("LATENT",) + FUNCTION = "simple" + + CATEGORY = "sampling" + + def simple(self, + model, seed, steps, cfg, sampler_name, scheduler, + clip, + vae_name, + positive, negative, + width, height, denoise=1.0, batch_size=1 + ): + + return sample( + model, seed, steps, cfg, sampler_name, scheduler, + clip, + load_vae(vae_name), + positive, negative, + width, height, denoise, batch_size + ) + diff --git a/custom_nodes/ComfyUI_node_Lilly/TextWildcards.py b/custom_nodes/ComfyUI_node_Lilly/TextWildcards.py new file mode 100644 index 0000000000000000000000000000000000000000..1b153c293a40b9ac1159812a091a060afb81873f --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/TextWildcards.py @@ -0,0 +1,33 @@ +import os, glob, sys +import random +import re +import os +if __name__ == os.path.splitext(os.path.basename(__file__))[0] or __name__ =='__main__': + from ConsoleColor import print, console + from wildcards import wildcards +else: + from .ConsoleColor import print, console + from .wildcards import wildcards + + +class TextWildcards: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", {"multiline": True}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + + } + } + RETURN_TYPES = ("STRING","ASCII") + FUNCTION = "encode" + + CATEGORY = "utils" + + def encode(self, seed, text): + random.seed(seed) + print(f"[green]text : [/green]",text) + r=wildcards.run(text) + print(f"[green]result : [/green]",r) + return (r, r) \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/VAELoaderDecode.py b/custom_nodes/ComfyUI_node_Lilly/VAELoaderDecode.py new file mode 100644 index 0000000000000000000000000000000000000000..736b7844609b72bdfbd8074da645e632e3e3b291 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/VAELoaderDecode.py @@ -0,0 +1,75 @@ +import os +import comfy.sd +from nodes import * +import folder_paths + + +# VAEDecode +# VAELoader +# VAELoaderDecode + +#wd = os.getcwd() +#print("working directory is ", wd) +# +#filePath = __file__ +#print("This script file path is ", filePath) +# +#absFilePath = os.path.abspath(__file__) +#print("This script absolute path is ", absFilePath) +# +#realFilePath = os.path.realpath(__file__) +#print("This script real path is ", realFilePath) +# +#path, filename = os.path.split(absFilePath) +#print("Script file path is {}, filename is {}".format(path, filename)) + +class VAELoaderDecode: + + def __init__(self, device="cpu"): + self.device = device + + #@classmethod + #def INPUT_TYPES(s): + # return {"required": { "samples": ("LATENT", ), "vae": ("VAE", )}} + # + #@classmethod + #def INPUT_TYPES(s): + # return {"required": { "vae_name": (filter_files_extensions(recursive_search(s.vae_dir), supported_pt_extensions), )}} + + @classmethod + def INPUT_TYPES(s): + + return { + "required": { + "samples": ("LATENT", ), + "vae_name": (folder_paths.get_filename_list("vae"), ) + } + } + + RETURN_TYPES = ("IMAGE",) + + FUNCTION = "test" + + CATEGORY = "latent" + + #TODO: scale factor? + #def load_vae(self, vae_name): + # vae_path = os.path.join(self.vae_dir, vae_name) + # vae = comfy.sd.VAE(ckpt_path=vae_path) + # return (vae,) + # + #def decode(self, vae, samples): + # return (vae.decode(samples["samples"]), ) + + def test(self, vae_name, samples): + + t=folder_paths.get_filename_list("vae") + print(f"VAELoaderDecode : {t}") + vae_path = folder_paths.get_full_path("vae", vae_name) + print(f"VAELoaderDecode : {vae_path}") + vae = comfy.sd.VAE(ckpt_path=vae_path) + return (vae.decode(samples["samples"]), ) + +#NODE_CLASS_MAPPINGS = { +# "VAELoaderDecode": VAELoaderDecode, +#} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/VAELoaderText.py b/custom_nodes/ComfyUI_node_Lilly/VAELoaderText.py new file mode 100644 index 0000000000000000000000000000000000000000..144f23e7996e877900ed2b30c51b3df750246fc9 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/VAELoaderText.py @@ -0,0 +1,38 @@ +import folder_paths +import comfy.sd +import os +from folder_paths import * +if __name__ == os.path.splitext(os.path.basename(__file__))[0] : + from ConsoleColor import print, console, ccolor + from mypath import * +else: + from .ConsoleColor import print, console, ccolor + from .mypath import * + +class VAELoaderText: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "vae_name": ("STRING", { + "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node + "default": random.choice(folder_paths.get_filename_list("vae")) + }), + }} + RETURN_TYPES = ("VAE",) + FUNCTION = "load_vae" + + CATEGORY = "loaders" + + #TODO: scale factor? + def load_vae(self, vae_name): + print(f"[{ccolor}]vae_name : [/{ccolor}]", vae_name) + vae_path=getFullPath(vae_name,"vae") + + + try: + sd = comfy.utils.load_torch_file(vae_path) + vae = comfy.sd.VAE(sd=sd) + return (vae,) + except Exception as e: + console.print_exception() + return \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/__init__.py b/custom_nodes/ComfyUI_node_Lilly/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1c3108808ec430a6a247c7ee6d15923286c82a89 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/__init__.py @@ -0,0 +1,102 @@ +import sys +import os + + +print(f"__name__ : {__name__}") +print(f"__file__ : {__file__}") +print(f"os.path.basename(__file__) : {os.path.basename(__file__)}") +print(f"os.path.splitext(os.path.basename(__file__))[0] : {os.path.splitext(os.path.basename(__file__))[0]}") +#print(f"os.path.basename(__file__) : {os.path.basename('/ComfyUI_windows_portable/ComfyUI/custom_nodes/ComfyUI_node_Lilly/__init__.py')}") +#print(f"os.path.splitext(os.path.basename(__file__))[0] : {os.path.splitext(os.path.basename('/ComfyUI_windows_portable/ComfyUI/custom_nodes/ComfyUI_node_Lilly/__init__.py'))[0]}") + +wd = os.getcwd() +print("working directory : ", wd) + +if __name__ == os.path.splitext(os.path.basename(__file__))[0] or __name__ =='__main__': + from ConsoleColor import print, console + #md="custom_nodes.ComfyUI_node_Lilly." +else: + from .ConsoleColor import print, console + #md="custom_nodes.ComfyUI_node_Lilly." +md="custom_nodes.ComfyUI_node_Lilly." +#print(__file__) +#print(os.path.basename(__file__)) + +#print(f"sys.modules : {sys.modules}") + +#filePath = __file__ +#print("This script file path is ", filePath) +# +#absFilePath = os.path.abspath(__file__) +#print("This script absolute path is ", absFilePath) +# +#realFilePath = os.path.realpath(__file__) +#print("This script real path is ", realFilePath) +# +#path, filename = os.path.split(absFilePath) +#print("Script file path is {}, filename is {}".format(path, filename)) + + +# +#nm=os.path.abspath(__name__) +#nm=os.path.abspath(__name__) +#print("abspath __name__ : ", nm) +#print("abspath __name__ : ", nm) +# +#md=nm.replace(wd+"\\","") +#print("import name", md) +""" +""" +#if md in sys.modules: +# print(f"{md!r} already in sys.modules") +#else: +# print(f"{md!r} not in sys.modules") + +#import importlib +#import ComfyUI_node_Lilly +#from custom_nodes.ComfyUI_node_Lilly import eval(f"{name}") +#print(dir(ComfyUI_node_Lilly)) +#print(dir(ComfyUI_node_Lilly.ComfyUI_node_Lilly)) + +#print(__name__ == md) +#print(__name__ != md) +#print(__name__ == "ComfyUI_node_Lilly") +#print(__name__ != "ComfyUI_node_Lilly") +if __name__ == "ComfyUI_node_Lilly" : + NODE_CLASS_MAPPINGS = { + } + + def add(name,clist=None): + #print(f"Load : {name}") + try: + #pkg = importlib.import_module(f"{md}{name}") + #eval(f"{md}{name}") + exec(f"import {md}{name}") + if clist is None: + NODE_CLASS_MAPPINGS[name]=eval(f"{md}{name}.{name}") + elif type(clist) is str: + NODE_CLASS_MAPPINGS[clist]=eval(f"{md}{name}.{clist}") + elif type(clist) is list: + for c in clist: + NODE_CLASS_MAPPINGS[c]=eval(f"{md}{name}.{c}") + + print(f"Load ok : {name}", style="bold green") + except Exception: + console.print_exception() + + console.rule(f" init start ", style="bold green") + + add("CheckpointLoaderRandom") + add("CheckpointLoaderSimpleText") + add("CLIPTextEncodeWildcards",["CLIPTextEncodeWildcards","CLIPTextEncodeWildcards2","CLIPTextEncodeWildcards3"]) + add("LoraLoaderText") + add("LoraLoaderTextRandom") + add("Random_Sampler") + add("VAELoaderDecode") + add("VAELoaderText") + add("SimpleSampler",["SimpleSampler","SimpleSamplerVAE"]) + add("SaveImageSimple") + add("TextWildcards") + #add("test") + + console.rule(" init end ", style="bold green") \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/CLIPTextEncodeWildcards.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CLIPTextEncodeWildcards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e04ccc22a11f52dde250dca953a869469e7affa7 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CLIPTextEncodeWildcards.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/CLIPTextEncodeWildcards.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CLIPTextEncodeWildcards.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54386bec542f9115fdf3cf3a0d2ed71969c2def8 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CLIPTextEncodeWildcards.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderRandom.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderRandom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..226d6efc81a28ff4cf60d57639301856e84fd187 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderRandom.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderRandom.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderRandom.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe2ba89c3d9209d848af7c44c68726e68e4d49e7 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderRandom.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderSimpleText.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderSimpleText.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bb7261ca6301593f04f1be079aa029027453a74 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderSimpleText.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderSimpleText.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderSimpleText.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e72933f4defa8a53d9a4321fa21a7e14e18e4ac2 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/CheckpointLoaderSimpleText.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/ConsoleColor.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/ConsoleColor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a124577066fc7ec2d4792e38bd15c5cc065ea99 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/ConsoleColor.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/ConsoleColor.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/ConsoleColor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f935b78e4e2050592525b26d6deeec3f6bb39e32 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/ConsoleColor.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderText.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderText.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a56dcb8176fa338e79e25934729d3d1b290b6c4 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderText.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderText.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderText.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70e210615392f45d7b8e055c035c5f796d9b3088 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderText.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderTextRandom.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderTextRandom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20a9c4cef36d0294cc8f8ebf78421ab18c73c406 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderTextRandom.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderTextRandom.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderTextRandom.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7d4134d1774e5e3a41938306b712f8075290af1 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/LoraLoaderTextRandom.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/Random_Sampler.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/Random_Sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17892a4a78e8d6f4714ceee5eb6616ec6577fc72 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/Random_Sampler.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/Random_Sampler.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/Random_Sampler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cff518b962edea21d1265f7d26861b9438c90aa6 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/Random_Sampler.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/SaveImageSimple.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SaveImageSimple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c50517248801c2ec49b251ebe53d1593024b320c Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SaveImageSimple.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/SaveImageSimple.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SaveImageSimple.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae81a1007882bc4a5a24237684d559db42d9273a Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SaveImageSimple.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/SimpleSampler.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SimpleSampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c14fcad17d93fbb4678591b2de5a8e9cb3e6d281 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SimpleSampler.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/SimpleSampler.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SimpleSampler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98399ac85f145541d86138f72db15a0963fe70d9 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/SimpleSampler.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/TextWildcards.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/TextWildcards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e410b5528dde8e42907c9f2f7af90e78aa51b2c Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/TextWildcards.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/TextWildcards.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/TextWildcards.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..575b5186342305a5f5866493148b381d56177292 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/TextWildcards.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderDecode.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderDecode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5b288d65e2d7e2cbc4814595271a0269becf162 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderDecode.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderDecode.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderDecode.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1790485587727ee171f41249eafb226fe1d369a Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderDecode.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderText.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderText.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef8695fa90e3d9bbbec6377593cb6c9193dce04b Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderText.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderText.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderText.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c2ffe2213e45a9239d8a0799cc0bcdf6a249ca0 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/VAELoaderText.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/__init__.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85308036df68f3b5b544a4c8797ade99019eb523 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c6275c1b63f6f97ec759de661459a0cc4526178 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/mypath.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/mypath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6ac614df83661d7681e696970d437a332fd8052 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/mypath.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/mypath.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/mypath.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da264e6b5f5c3822a1392695096e46d26442ddb9 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/mypath.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/wildcards.cpython-310.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/wildcards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..918016f3d14158bcd072c50230e728f26e652443 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/wildcards.cpython-310.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/__pycache__/wildcards.cpython-311.pyc b/custom_nodes/ComfyUI_node_Lilly/__pycache__/wildcards.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..663f24c43f07c15d92fdb7b86e4741fbdb5e00c1 Binary files /dev/null and b/custom_nodes/ComfyUI_node_Lilly/__pycache__/wildcards.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_node_Lilly/mypath.py b/custom_nodes/ComfyUI_node_Lilly/mypath.py new file mode 100644 index 0000000000000000000000000000000000000000..354ba8aab1e9604e3e185e54347f50c73be08ed7 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/mypath.py @@ -0,0 +1,112 @@ +import sys +import json +import ast +import os, glob +import random +from folder_paths import * +if __name__ == os.path.splitext(os.path.basename(__file__))[0] : + from ConsoleColor import print, console +else: + from .ConsoleColor import print, console + +""" +import psutil +for proc in psutil.process_iter(): + ps_name = proc.name() + if ps_name == 'python3': + cmdline = proc.cmdline() + print(cmdline) +""" + +""" +print() +for key, value in os.environ.items(): + print('{}: {}'.format(key, value)) +print() +""" + +py_name=os.path.basename(__file__) +#print("os.path.basename(__file__) : ",py_name, style="bold CYAN") + +absFilePath = os.path.abspath(__file__) +#print("os.path.abspath(__file__) : " , absFilePath , style="bold CYAN") + +realFilePath = os.path.realpath(__file__) +#print("os.path.abspath(__file__) : " + realFilePath , style="bold CYAN") + +normpath=os.path.normpath(__file__) +#print("os.path.normpath(__file__) : " + normpath , style="bold CYAN") + +subfolder = os.path.dirname(normpath) +#print("os.path.dirname(normpath) : " + subfolder , style="bold CYAN") + +filename = os.path.basename(normpath) +#print("os.path.basename(normpath) : " + filename , style="bold CYAN") + +mainFile = os.path.abspath(sys.modules['__main__'].__file__) +#print("os.path.abspath(sys.modules\['__main__'].__file__) : " + mainFile ,style="bold CYAN") +mainfolder = os.path.dirname(mainFile) +#print("os.path.dirname(mainFile) : " + mainfolder , style="bold CYAN") + +def check_name(kind,name,supported_extensions): + for ext in supported_extensions: + if name.lower().endswith(ext): + path = folder_paths.get_full_path(kind, name) + if path is not None: + return path + + for ext in supported_extensions: + path = folder_paths.get_full_path(kind, name+ext) + if path is not None: + return path + +def check_name_ckpt(name): + return check_name("checkpoints",name,supported_ckpt_extensions) + +def check_name_pt(kind,name): + return check_name(kind,name,supported_pt_extensions) + +def name_split_choice(name): + return random.choice(name.split('|')) + +#---------------------- + +def filenameget(v_path): + t_path=os.path.join(os.path.dirname(__file__),v_path) + print(t_path) + fullpaths=glob.glob(t_path, recursive=True) + print(fullpaths) + fullpath=random.choice(fullpaths) + name=os.path.basename(fullpath) + #r_path=[os.path.basename(fullpath) for fullpath in fullpaths] + return (name,fullpath) + +# "test","vae",["pt","safetensors"] +def getFullPath(p,k,el=["safetensors","ckpt","pt"]): + if os.path.isabs(p): + path=p + else: + path=os.path.join(models_dir,k,"**",p) + #print(f"path : ", path) + t=False + for e in el: + if p.endswith('.'+e): + t=True + break + if t: + files=glob.glob(path, recursive=True) + else: + for e in el: + t=path+"."+e + #print(f"t : ", t) + files=glob.glob(t, recursive=True) + if len(files): + break + result=None + #print(f"files : ", files) + if len(files): + result=random.choice(files) + print(f"result : ", result) + else: + print("[red]No file in path[/red] : ", path) + return result \ No newline at end of file diff --git a/custom_nodes/ComfyUI_node_Lilly/wildcards.py b/custom_nodes/ComfyUI_node_Lilly/wildcards.py new file mode 100644 index 0000000000000000000000000000000000000000..00ab845f30bae21616d3b5ce39092335ae405fc9 --- /dev/null +++ b/custom_nodes/ComfyUI_node_Lilly/wildcards.py @@ -0,0 +1,223 @@ +import os +import glob +import sys +import random +import re +import fnmatch + +if __name__ == os.path.splitext(os.path.basename(__file__))[0] or __name__ =='__main__': + from ConsoleColor import print, console, ccolor +else: + from .ConsoleColor import print, console , ccolor +#print(__file__) +#print(os.path.basename(__file__)) +#print(os.getcwd()) + +import subprocess +import pkg_resources + +required = {'chardet'} +installed = {pkg.key for pkg in pkg_resources.working_set} +missing = required - installed + +if missing: + python = sys.executable + subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL) + +import chardet + + +# ============================================================ +class wildcards: + + # 가져올 파일 목록 + card_path = os.path.join(os.path.dirname(__file__), "..", "..", "wildcards", "**", "*.txt") + #card_path=f"{os.getcwd()}\\wildcards\\**\\*.txt" + print(f"wildcards card_path : ", card_path , style="bold CYAN") + + # 정규식 + #resub = re.compile(r"(\{)([^\{\}]*)(\})") + #resub = re.compile(r"(\{)(((\d+)|(\d+)?-(\d+)?)?\$\$((.*)?\$\$)?)?([^\{\}]*)(\})") + resub = re.compile(r"(\{)(((\d+)|(\d+)?-(\d+)?)?\$\$(([^\{\}]*?)\$\$)?)?([^\{\}]*)(\})") + recard = re.compile(r"(__)(.*?)(__)") + + # 카드 목록 + is_card_Load = False + cards = {} + seperator=", " + loop_max=50 + + # | 로 입력된것중 하나 가져오기 + def sub(match): + #print(f"sub : {(match.groups())}") + try: + #m=match.group(2) + seperator=wildcards.seperator + s=match.group(3) + m=match.group(9).split("|") + p=match.group(8) + if p: + seperator=p + + if s is None: + return random.choice(m) + c=len(m) + n=int(match.group(4)) if match.group(4) else None + if n: + + r=seperator.join(random.sample(m,min(n,c))) + #print(f"n : {n} ; {r}") + return r + + n1=match.group(5) + n2=match.group(6) + + if n1 or n2: + a=min(int(n1 if n1 else c), int(n2 if n2 else c),c) + b=min(max(int(n1 if n1 else 0), int(n2 if n2 else 0)),c) + #print(f"ab : {a} ; {b}") + r=seperator.join( + random.sample( + m, + random.randint( + a,b + ) + ) + ) + #n1=int(match.group(5)) if not match.group(5) is None + #n2=int(match.group(6)) if not match.group(6) is None + else: + r=seperator.join( + random.sample( + m, + random.randint( + 0,c + ) + ) + ) + #print(f"12 : {r}") + return r + + + except Exception as e: + console.print_exception() + return "" + + + + # | 로 입력된것중 하나 가져오기 반복 + def sub_loop(text): + bak=text + for i in range(1, wildcards.loop_max): + tmp=wildcards.resub.sub(wildcards.sub, bak) + #print(f"tmp : {tmp}") + if bak==tmp : + return tmp + bak=tmp + return bak + + # 카드 중에서 가져오기 + def card(match): + #print(f"card in : {match.group(2)}") + lst=fnmatch.filter(wildcards.cards, match.group(2)) + if len(lst)>0: + #print(f"card lst : {lst}") + cd=random.choice(lst) + #print(f"card get : {cd}") + r=random.choice(wildcards.cards[cd]) + else : + r= match.group(2) + #print(f"card out : {r}") + return r + + + # 카드 중에서 가져오기 반복. | 의 것도 처리 + def card_loop(text): + bak=text + for i in range(1, wildcards.loop_max): + tmp=wildcards.recard.sub(wildcards.card, bak) + #print(f"card l : {bak}") + if bak==tmp : + tmp=wildcards.sub_loop(tmp) + + if bak==tmp : + #print(f"card le : {bak}") + return tmp + bak=tmp + #print(f"card le : {bak}") + return bak + + # 카드 파일 읽기 + def card_load(): + #cards=wildcards.cards + card_path=wildcards.card_path + cards = {} + #print(f"path : {path}") + files=glob.glob(card_path, recursive=True) + #print(f"files : {files}") + + for file in files: + basenameAll = os.path.basename(file) + basename = os.path.relpath(file, os.path.dirname(__file__)).replace("\\", "/").replace("../../wildcards/", "") + #print(f"basenameAll : {basenameAll}") + #print(f"basename : {basename}") + file_nameAll = os.path.splitext(basenameAll)[0] + file_name = "/"+os.path.splitext(basename)[0] + #print(f"file_nameAll : {file_nameAll}") + #print(f"file_name : {file_name}") + if not file_nameAll in cards: + cards[file_nameAll]=[] + if not file_name in cards: + cards[file_name]=[] + #print(f"file_name : {file_name}") + with open(file, "rb") as f: + raw_data = f.read() + encoding = chardet.detect(raw_data)["encoding"] + with open(file, "r", encoding=encoding) as f: + lines = f.readlines() + for line in lines: + line=line.strip() + # 주석 빈줄 제외 + if line.startswith("#") or len(line)==0: + continue + cards[file_nameAll]+=[line] + cards[file_name]+=[line] + #print(f"line : {line}") + wildcards.cards=cards + print(f"[cyan]cards file count : [/cyan]", len(wildcards.cards)) + #print(f"cards : {cards.keys()}") + wildcards.is_card_Load=True + + # 실행기 + def run(text,load=False): + if text is None or not isinstance(text, str): + print("[red]text is not str : [/red]",text) + return None + if not wildcards.is_card_Load or load: + wildcards.card_load() + + #print(f"text : {text}") + result=wildcards.card_loop(text) + #print(f"result : {result}") + return result + + # ============================================================ + +#m = p.sub(sub, test) +#print(m) +#print(__name__) +#if __name__ == '__main__' : + # 테스트용 +#test="{3$$a1|{b2|c3|}|d4|{-$$|f|g}|{-2$$h||i}|{1-$$j|k|}}/{$$l|m|}/{0$$n|}/{9$$-$$a|b|c}/{9$$ {and|or} $$a|b|c}" +#print("[green]wildcards test : [/green]",wildcards.run(test),style="reset") +#print("wildcards test : "+wildcards.run("{9$$a|b}")) +#print("[green]wildcards test : [/green]",wildcards.run("__my__")) +#print("wildcards test : "+wildcards.run("{9$$-$$a|b|c}")) +#print("wildcards test : "+wildcards.run("{9$$ {and|or} $$a|b|c}")) +#print("wildcards test : "+wildcards.run("{{slender,|} {nature,|} {curvy,|} {thin,|} {narrow,|} {slim,|} {mini,|} {little,|}| {|very }{-$$ $$thin|slender|narrow|slim|little|skinny|mini} body, }")) +print("wildcards test : "+wildcards.run("__aest__")) +print("wildcards test : "+wildcards.run("__*test__")) +print("wildcards test : "+wildcards.run("__?est__")) +print("wildcards test : "+wildcards.run("__test__")) +print("wildcards test : "+wildcards.run("__/test__")) +print("wildcards test : "+wildcards.run("__/0/test__")) diff --git a/custom_nodes/ComfyUI_smZNodes/.gitignore b/custom_nodes/ComfyUI_smZNodes/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c295ed41d7fc8bcc7c27b67c767927af4565e459 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/.gitignore @@ -0,0 +1,165 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +backup* +**/.DS_Store +**/.venv +**/.vscode \ No newline at end of file diff --git a/custom_nodes/ComfyUI_smZNodes/LICENSE b/custom_nodes/ComfyUI_smZNodes/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI_smZNodes/README.md b/custom_nodes/ComfyUI_smZNodes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3ad3500c9a2539ea112c9fd4f9d58dd272ac63b3 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/README.md @@ -0,0 +1,147 @@ + +# smZNodes +A selection of custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI). + +1. [CLIP Text Encode++](#clip-text-encode) +2. [Settings](#settings) + +## CLIP Text Encode++ + +

+

+

+ Clip Text Encode++ – Default settings on stable-diffusion-webui +

+

+ + + + + +CLIP Text Encode++ can generate identical embeddings from [stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) for [ComfyUI](https://github.com/comfyanonymous/ComfyUI). + + +This means you can reproduce the same images generated from `stable-diffusion-webui` on `ComfyUI`. + +Simple prompts generate _identical_ images. More complex prompts with complex attention/emphasis/weighting may generate images with slight differences due to how `ComfyUI` denoises images. In that case, you can enable the option to use another denoiser with the Settings node. + + +### Features + +- [Prompt editing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-editing) + - [Alternating words](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alternating-words) +- Weight normalization +- Usage of `BREAK` and `AND` keywords +- Optional `embedding:` identifier + +### Installation + +Three methods are available for installation: + +1. Load via [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) +2. Clone the repository directly into the extensions directory. +3. Download the project manually. + + +#### Load via ComfyUI Manager + + +
+ ComfyUI Manager +

Install via ComfyUI Manager

+
+ +#### Clone Repository + +```shell +cd path/to/your/ComfyUI/custom_nodes +git clone https://github.com/shiimizu/ComfyUI_smZNodes.git +``` + +#### Download Manually + +1. Download the project archive from [here](https://github.com/shiimizu/ComfyUI_smZNodes/archive/refs/heads/main.zip). +2. Extract the downloaded zip file. +3. Move the extracted files to `path/to/your/ComfyUI/custom_nodes`. +4. Restart ComfyUI + +The folder structure should resemble: `path/to/your/ComfyUI/custom_nodes/ComfyUI_smZNodes`. + + +### Update + +To update the extension, update via [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) or pull the latest changes from the repository: + +```shell +cd path/to/your/ComfyUI/custom_nodes/ComfyUI_smZNodes +git pull +``` + +### Comparisons +These images can be dragged into ComfyUI to load their workflows. Each image is done using the [Silicon29](https://huggingface.co/Xynon/SD-Silicon) (in SD v1.5) checkpoint with 18 steps using the Heun sampler. + +|stable-diffusion-webui|A1111 parser|Comfy parser| +|:---:|:---:|:---:| +| ![00008-0-cinematic wide shot of the ocean, beach, (palmtrees_1 5), at sunset, milkyway](https://github.com/shiimizu/ComfyUI_smZNodes/assets/54494639/719457d8-96fc-495e-aabc-48c4fe4d648d) | ![A1111 parser comparison 1](https://github.com/shiimizu/ComfyUI_smZNodes/assets/54494639/c7e0d3cd-ae22-4a6a-bc21-a2b6e10f9652) | ![Comfy parser comparison 1](https://github.com/shiimizu/ComfyUI_smZNodes/assets/54494639/21415ca1-57f9-454a-8e63-19b04832a38c) | +| ![00007-0-a photo of an astronaut riding a horse on mars, ((palmtrees_1 2) on water)](https://github.com/shiimizu/ComfyUI_smZNodes/assets/54494639/9ad8b569-8c6d-4a09-bf36-288d81ce4cf9) | ![A1111 parser comparison 2](https://github.com/shiimizu/ComfyUI_smZNodes/assets/54494639/6986be92-b210-4fdd-8667-7004d6cd628c) | ![Comfy parser comparison 2](https://github.com/shiimizu/ComfyUI_smZNodes/assets/54494639/c0d918bb-32df-4aaa-ae85-def22c2d7d07) | + +Image slider links: +- https://imgsli.com/MTkxMjE0 +- https://imgsli.com/MTkxMjEy + +### Options + +|Name|Description| +| --- | --- | +| `parser` | The parser selected to parse prompts into tokens and then transformed (encoded) into embeddings. Taken from [`automatic`](https://github.com/vladmandic/automatic/discussions/99#discussioncomment-5931014). | +| `mean_normalization` | Whether to take the mean of your prompt weights. It's `true` by default on `stable-diffusion-webui`.
This is implemented according to `stable-diffusion-webui`. (They say that it's probably not the correct way to take the mean.) | +| `multi_conditioning` | This is usually set to `true` for your positive prompt and `false` for your negative prompt.
For each prompt, the list is obtained by splitting the prompt using the `AND` separator.
See [Compositional Visual Generation with Composable Diffusion Models](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/)
| +|`use_old_emphasis_implementation`|
Use old emphasis implementation. Can be useful to reproduce old seeds.
| + +> [!IMPORTANT] +> You can right click the node to show/hide some of the widgets. E.g. the `with_SDXL` option. + +
+ +| Parser | Description | +| ----------------- | -------------------------------------------------------------------------------- | +| `comfy` | The default way `ComfyUI` handles everything | +| `comfy++` | Uses `ComfyUI`'s parser but encodes tokens the way `stable-diffusion-webui` does, allowing to take the mean as they do. | +| `A1111` | The default parser used in `stable-diffusion-webui` | +| `full` | Same as `A1111` but whitespaces and newlines are stripped | +| `compel` | Uses [`compel`](https://github.com/damian0815/compel) | +| `fixed attention` | Prompt is untampered with | + +> **Note** +> Every `parser` except `comfy` uses `stable-diffusion-webui`'s encoding pipeline. + +> **Warning** +> LoRA syntax (``) is not suppprted. + +## Settings + +
+ Settings Workflow +

Settings node workflow

+
+ + +The Settings node can be used to finetune results from CLIP Text Encode++. Some settings apply globally, or just during tokenization, or just for CFGDenoiser. The `RNG` setting applies globally. + +This node can change whenever it is updated, so you may have to recreate the node to prevent issues. Hook it up before CLIP Text Encode++ nodes to apply any changes. Settings can be overridden by using another Settings node somewhere past a previous one. Right click the node for the `Hide/show all descriptions` menu option. + + +## Tips to get reproducible results on both UIs +- Use the same seed, sampler settings, RNG (CPU or GPU), clip skip (CLIP Set Last Layer), etc. +- Ancestral samplers may not be deterministic. +- If you're using `DDIM` as your sampler, use the `ddim_uniform` scheduler. +- There are different `unipc` configurations. Adjust accordingly on both UIs. + +### FAQs +- How does this differ from [`ComfyUI_ADV_CLIP_emb`](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb)? + - In regards to `stable-diffusion-webui`: + - Mine parses prompts using their parser. + - Mine takes the mean exactly as they do. `ComfyUI_ADV_CLIP_emb` probably takes the correct mean but hey, this is for the purpose of reproducible images. +- Where can I learn more about how ComfyUI interprets weights? + - https://comfyanonymous.github.io/ComfyUI_examples/faq/ + - https://blenderneko.github.io/ComfyUI-docs/Interface/Textprompts/ diff --git a/custom_nodes/ComfyUI_smZNodes/__init__.py b/custom_nodes/ComfyUI_smZNodes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ffbaec5466557f412d94c87bbac926e91729e47b --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/__init__.py @@ -0,0 +1,170 @@ +from pathlib import Path +import os +import shutil +import subprocess +import importlib +from functools import partial + +def install(module): + import sys + try: + print(f"\033[92m[smZNodes] \033[0;31m{module} is not installed. Attempting to install...\033[0m") + subprocess.check_call([sys.executable, "-m", "pip", "install", module]) + reload() + print(f"\033[92m[smZNodes] {module} Installed!\033[0m") + except: + print(f"\033[92m[smZNodes] \033[0;31mFailed to install {module}.\033[0m") + +# Reload modules after installation +PRELOADED_MODULES = set() +def init() : + # local imports to keep things neat + from sys import modules + import importlib + global PRELOADED_MODULES + # sys and importlib are ignored here too + PRELOADED_MODULES = set(modules.values()) +def reload() : + from sys import modules + import importlib + for module in set(modules.values()) - PRELOADED_MODULES : + try : + importlib.reload(module) + except : + pass +init() + +# compel ================= +if importlib.util.find_spec('compel') is None: + install("compel") + +# lark ================= +if importlib.util.find_spec('lark') is None: + install("lark") +# ============================ + +WEB_DIRECTORY = "./web" + +from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] + +# ==== web ====== +cwd_path = Path(__file__).parent +comfy_path = cwd_path.parent.parent + +def setup_web_extension(): + import nodes + web_extension_path = os.path.join(comfy_path, "web", "extensions", "smZNodes") + + if os.path.exists(web_extension_path): + shutil.rmtree(web_extension_path) + if not hasattr(nodes, "EXTENSION_WEB_DIRS"): + # print(f"[smZNodes]: Your ComfyUI version is outdated. Please update to the latest version.") + # setup js + if not os.path.exists(web_extension_path): + os.makedirs(web_extension_path) + + js_src_path = os.path.join(cwd_path, "web/js", "smZdynamicWidgets.js") + shutil.copy(js_src_path, web_extension_path) + +setup_web_extension() + +# ============== + +# add_sample_dpmpp_2m_alt, inject_code, opts as smZ_opts +from .smZNodes import add_sample_dpmpp_2m_alt, inject_code, CFGNoisePredictor + +add_sample_dpmpp_2m_alt() + +# ============== +# Hijack sampling + +payload = [{ + "target_line": 'extra_args["denoise_mask"] = denoise_mask', + "code_to_insert": """ + if (any([_p[1].get('from_smZ', False) for _p in positive]) or + any([_p[1].get('from_smZ', False) for _p in negative])): + from ComfyUI_smZNodes.modules.shared import opts as smZ_opts + if not smZ_opts.sgm_noise_multiplier: max_denoise = False +""" +}, +{ + "target_line": 'positive = positive[:]', + "code_to_insert": """ + if hasattr(self, 'model_denoise'): self.model_denoise.step = start_step if start_step != None else 0 +""" +}, +] + +import comfy +if not hasattr(comfy.samplers, 'Sampler'): + print(f"[smZNodes]: Your ComfyUI version is outdated. Please update to the latest version.") + comfy.samplers.KSampler.sample = inject_code(comfy.samplers.KSampler.sample, payload) +else: + _KSampler_sample = comfy.samplers.KSampler.sample + _Sampler = comfy.samplers.Sampler + _max_denoise = comfy.samplers.Sampler.max_denoise + _sample = comfy.samplers.sample + _wrap_model = comfy.samplers.wrap_model + + def get_value_from_args(args, kwargs, key_to_lookup, fn, idx=None): + arg_names = fn.__code__.co_varnames[:fn.__code__.co_argcount] + value = None + if key_to_lookup in kwargs: + value = kwargs[key_to_lookup] + else: + try: + # Get its position in the formal parameters list and retrieve from args + index = arg_names.index(key_to_lookup) + value = args[index] if index < len(args) else None + except Exception as err: + if idx is not None and idx < len(args): + value = args[idx] + return value + + def KSampler_sample(*args, **kwargs): + start_step = get_value_from_args(args, kwargs, 'start_step', _KSampler_sample) + if start_step is not None: + args[0].model.start_step = start_step + return _KSampler_sample(*args, **kwargs) + + def sample(*args, **kwargs): + model = get_value_from_args(args, kwargs, 'model', _sample, 0) + positive = get_value_from_args(args, kwargs, 'positive', _sample, 2) + negative = get_value_from_args(args, kwargs, 'negative', _sample, 3) + get_p1 = lambda x: x[1] if type(x) is list else x + model.from_smZ = (any([get_p1(_p).get('from_smZ', False) for _p in positive]) or + any([get_p1(_p).get('from_smZ', False) for _p in negative])) + return _sample(*args, **kwargs) + + class Sampler(_Sampler): + def max_denoise(self, model_wrap, sigmas): + model = model_wrap.inner_model + if hasattr(model, 'inner_model'): + model = model.inner_model + if getattr(model, 'start_step', None) is not None: + model_wrap.inner_model.step = int(model.start_step) + del model.start_step + if model.from_smZ: + from .modules.shared import opts + if opts.sgm_noise_multiplier: + return _max_denoise(self, model_wrap, sigmas) + else: + return False + else: + return _max_denoise(self, model_wrap, sigmas) + + comfy.samplers.Sampler.max_denoise = Sampler.max_denoise + comfy.samplers.KSampler.sample = KSampler_sample + comfy.samplers.sample = sample +comfy.samplers.CFGNoisePredictor = CFGNoisePredictor + +if hasattr(comfy.model_management, 'unet_dtype'): + comfy.model_management.unet_dtype_orig = comfy.model_management.unet_dtype + from .modules import devices + def unet_dtype(device=None, model_params=0): + dtype = comfy.model_management.unet_dtype_orig(device=device, model_params=model_params) + if model_params != 0: + devices.dtype_unet = dtype + return dtype + comfy.model_management.unet_dtype = unet_dtype diff --git a/custom_nodes/ComfyUI_smZNodes/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60c6a2e691f97897a9e09dd25d89dbc151b1f8ec Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/__pycache__/nodes.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0ee45b6a1eb710244441448b373cb482a769abf Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/__pycache__/smZNodes.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/__pycache__/smZNodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb3e82addc6485eb87d094017ab93bb3113406b9 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/__pycache__/smZNodes.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/devices.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/devices.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d6b30d96b7e8affa24af7fab8e105199a4e3ec2 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/devices.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/errors.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/errors.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f151180a4432dca0060ef76c96a703fee16630ad Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/errors.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/prompt_parser.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/prompt_parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b3ea42cf1456d0097e09b7ff97820c445a35bfa Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/prompt_parser.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/rng_philox.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/rng_philox.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b02b131d1bdc6eb77d66c24b9d0b7aa9f5d4c36b Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/rng_philox.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/script_callbacks.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/script_callbacks.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a509c0a53d8b1c667871e67f9f7c13255f20fe Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/script_callbacks.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9bcd2fe387f6d37c587d506efec1fc9361ff1ba Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_clip.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_clip.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..741acc1140cf4ec9f85acfb72c9ca3610e1f7e2d Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_clip.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_open_clip.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_open_clip.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e384c3cefc6b96f67cc9c329174e1cfe17948f3f Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_open_clip.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_optimizations.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_optimizations.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8951490eb6b7b41d300dc17ff21a6d6c9c09318 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_optimizations.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_unet.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_unet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bc4ebd04234699ad0d4fa54028c1f53a7537472 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_unet.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_utils.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8844c44782f12e22bed77dff61f91e085320c23 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_hijack_utils.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_samplers_cfg_denoiser.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_samplers_cfg_denoiser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80acf9089072b2b102a43a5f8163d835b2941774 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/sd_samplers_cfg_denoiser.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/shared.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/shared.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60fe82c593e942ab05360121fc1fd450a62ab44f Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/__pycache__/shared.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/devices.py b/custom_nodes/ComfyUI_smZNodes/modules/devices.py new file mode 100644 index 0000000000000000000000000000000000000000..b93950d64849d5f4611e67c42b2fd21cd9ea0101 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/devices.py @@ -0,0 +1,82 @@ +import sys +import contextlib +import torch +from . import shared +from comfy import model_management + +if sys.platform == "darwin": + from . import mac_specific + + +def has_mps() -> bool: + if sys.platform != "darwin": + return False + else: + return mac_specific.has_mps + +cpu = torch.device("cpu") +device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None +dtype = torch.float16 +dtype_vae = torch.float16 +dtype_unet = torch.float16 +unet_needs_upcast = False + +def cond_cast_unet(input): + return input.to(dtype_unet) if unet_needs_upcast else input + + +def cond_cast_float(input): + return input.float() if unet_needs_upcast else input + + +def randn(seed, shape): + from modules.shared import opts + + torch.manual_seed(seed) + if opts.randn_source == "CPU" or device.type == 'mps': + return torch.randn(shape, device=cpu).to(device) + return torch.randn(shape, device=device) + + +def randn_without_seed(shape): + from modules.shared import opts + + if opts.randn_source == "CPU" or device.type == 'mps': + return torch.randn(shape, device=cpu).to(device) + return torch.randn(shape, device=device) + +def autocast(disable=False): + if disable: + return contextlib.nullcontext() + + if dtype == torch.float32 or model_management.get_torch_device() == torch.device("mps"): # or shared.cmd_opts.precision == "full": + return contextlib.nullcontext() + + # only cuda + autocast_device = model_management.get_autocast_device(model_management.get_torch_device()) + # autocast_device = "cuda" + return torch.autocast(autocast_device) + +def without_autocast(disable=False): + return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext() + +class NansException(Exception): + pass + +def test_for_nans(x, where): + if shared.opts.disable_nan_check: + return + if not torch.all(torch.isnan(x)).item(): + return + if where == "unet": + message = "A tensor with all NaNs was produced in Unet." + if not shared.opts.no_half: + message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this." + elif where == "vae": + message = "A tensor with all NaNs was produced in VAE." + if not shared.opts.no_half and not shared.opts.no_half_vae: + message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this." + else: + message = "A tensor with all NaNs was produced." + message += " Use --disable-nan-check commandline argument to disable this check." + raise NansException(message) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/errors.py b/custom_nodes/ComfyUI_smZNodes/modules/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..23bc885d714817c17d3bd48b49ff64b830b29159 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/errors.py @@ -0,0 +1,85 @@ +import sys +import textwrap +import traceback + + +exception_records = [] + + +def record_exception(): + _, e, tb = sys.exc_info() + if e is None: + return + + if exception_records and exception_records[-1] == e: + return + + exception_records.append((e, tb)) + + if len(exception_records) > 5: + exception_records.pop(0) + + +def report(message: str, *, exc_info: bool = False) -> None: + """ + Print an error message to stderr, with optional traceback. + """ + + record_exception() + + for line in message.splitlines(): + print("***", line, file=sys.stderr) + if exc_info: + print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr) + print("---", file=sys.stderr) + + +def print_error_explanation(message): + record_exception() + + lines = message.strip().split("\n") + max_len = max([len(x) for x in lines]) + + print('=' * max_len, file=sys.stderr) + for line in lines: + print(line, file=sys.stderr) + print('=' * max_len, file=sys.stderr) + + +def display(e: Exception, task, *, full_traceback=False): + record_exception() + + print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr) + te = traceback.TracebackException.from_exception(e) + if full_traceback: + # include frames leading up to the try-catch block + te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack) + print(*te.format(), sep="", file=sys.stderr) + + message = str(e) + if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message: + print_error_explanation(""" +The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its config file. +See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this. + """) + + +already_displayed = {} + + +def display_once(e: Exception, task): + record_exception() + + if task in already_displayed: + return + + display(e, task) + + already_displayed[task] = 1 + + +def run(code, task): + try: + code() + except Exception as e: + display(task, e) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/hypernetworks/__pycache__/hypernetwork.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/hypernetworks/__pycache__/hypernetwork.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84a90987a0f0daf404843734640d6b6adaa382e Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/hypernetworks/__pycache__/hypernetwork.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/hypernetworks/hypernetwork.py b/custom_nodes/ComfyUI_smZNodes/modules/hypernetworks/hypernetwork.py new file mode 100644 index 0000000000000000000000000000000000000000..aa15dbf91191517e81c18e718f82a0ef34cdcd5a --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/hypernetworks/hypernetwork.py @@ -0,0 +1,23 @@ +from .. import devices +def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) + + if hypernetwork_layers is None: + return context_k, context_v + + if layer is not None: + layer.hyper_k = hypernetwork_layers[0] + layer.hyper_v = hypernetwork_layers[1] + + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) + return context_k, context_v + + +def apply_hypernetworks(hypernetworks, context, layer=None): + context_k = context + context_v = context + for hypernetwork in hypernetworks: + context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer) + + return context_k, context_v \ No newline at end of file diff --git a/custom_nodes/ComfyUI_smZNodes/modules/mac_specific.py b/custom_nodes/ComfyUI_smZNodes/modules/mac_specific.py new file mode 100644 index 0000000000000000000000000000000000000000..c0a9e9b01ab0eab81d7783b32151c2258f8afb23 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/mac_specific.py @@ -0,0 +1,85 @@ +import logging + +import torch +import platform +from .sd_hijack_utils import CondFunc +from packaging import version + +log = logging.getLogger(__name__) + + +# before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+, +# use check `getattr` and try it for compatibility. +# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty, +# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279 +def check_for_mps() -> bool: + if version.parse(torch.__version__) <= version.parse("2.0.1"): + if not getattr(torch, 'has_mps', False): + return False + try: + torch.zeros(1).to(torch.device("mps")) + return True + except Exception: + return False + else: + return torch.backends.mps.is_available() and torch.backends.mps.is_built() + + +has_mps = check_for_mps() + + +def torch_mps_gc() -> None: + try: + from .shared import state + if state.current_latent is not None: + log.debug("`current_latent` is set, skipping MPS garbage collection") + return + from torch.mps import empty_cache + empty_cache() + except Exception: + log.warning("MPS garbage collection failed", exc_info=True) + + +# MPS workaround for https://github.com/pytorch/pytorch/issues/89784 +def cumsum_fix(input, cumsum_func, *args, **kwargs): + if input.device.type == 'mps': + output_dtype = kwargs.get('dtype', input.dtype) + if output_dtype == torch.int64: + return cumsum_func(input.cpu(), *args, **kwargs).to(input.device) + elif output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16): + return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64) + return cumsum_func(input, *args, **kwargs) + +if has_mps: + # MPS fix for randn in torchsde + CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps') + + if platform.mac_ver()[0].startswith("13.2."): + # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124) + CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: 1, lambda _, input, weight, bias: "1") + + if version.parse(torch.__version__) < version.parse("1.13"): + # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working + + # MPS workaround for https://github.com/pytorch/pytorch/issues/79383 + CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs), + lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')) + # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 + CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs), + lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps') + # MPS workaround for https://github.com/pytorch/pytorch/issues/90532 + CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad) + elif version.parse(torch.__version__) > version.parse("1.13.1"): + cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0)) + cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs) + CondFunc('torch.cumsum', cumsum_fix_func, None) + CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None) + CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None) + + # MPS workaround for https://github.com/pytorch/pytorch/issues/96113 + CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps') + + # MPS workaround for https://github.com/pytorch/pytorch/issues/92311 + if platform.processor() == 'i386': + for funcName in ['torch.argmax', 'torch.Tensor.argmax']: + CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') diff --git a/custom_nodes/ComfyUI_smZNodes/modules/prompt_parser.py b/custom_nodes/ComfyUI_smZNodes/modules/prompt_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4c86c9bf18e71f6cb50a6a7705ca84c89f49e4 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/prompt_parser.py @@ -0,0 +1,530 @@ +from __future__ import annotations + +import re +from collections import namedtuple +from typing import List +import lark +import torch +from compel import Compel +if __name__ == "__main__": + from shared import opts, log +else: + from .shared import opts, log + +# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" +# will be represented with prompt_schedule like this (assuming steps=100): +# [25, 'fantasy landscape with a mountain and an oak in foreground shoddy'] +# [50, 'fantasy landscape with a lake and an oak in foreground in background shoddy'] +# [60, 'fantasy landscape with a lake and an oak in foreground in background masterful'] +# [75, 'fantasy landscape with a lake and an oak in background masterful'] +# [100, 'fantasy landscape with a lake and a christmas tree in background masterful'] + +round_bracket_multiplier = 1.1 +square_bracket_multiplier = 1.0 / 1.1 +ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) +schedule_parser = lark.Lark(r""" +!start: (prompt | /[][():]/+)* +prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)* +!emphasized: "(" prompt ")" + | "(" prompt ":" prompt ")" + | "[" prompt "]" +scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER [WHITESPACE] "]" +alternate: "[" prompt ("|" [prompt])+ "]" +WHITESPACE: /\s+/ +plain: /([^\\\[\]():|]|\\.)+/ +%import common.SIGNED_NUMBER -> NUMBER +""") +re_clean = re.compile(r"^\W+", re.S) +re_whitespace = re.compile(r"\s+", re.S) + + +def get_learned_conditioning_prompt_schedules(prompts, base_steps, hires_steps=None, use_old_scheduling=False): + """ + >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0] + >>> g("test") + [[10, 'test']] + >>> g("a [b:3]") + [[3, 'a '], [10, 'a b']] + >>> g("a [b: 3]") + [[3, 'a '], [10, 'a b']] + >>> g("a [[[b]]:2]") + [[2, 'a '], [10, 'a [[b]]']] + >>> g("[(a:2):3]") + [[3, ''], [10, '(a:2)']] + >>> g("a [b : c : 1] d") + [[1, 'a b d'], [10, 'a c d']] + >>> g("a[b:[c:d:2]:1]e") + [[1, 'abe'], [2, 'ace'], [10, 'ade']] + >>> g("a [unbalanced") + [[10, 'a [unbalanced']] + >>> g("a [b:.5] c") + [[5, 'a c'], [10, 'a b c']] + >>> g("a [{b|d{:.5] c") # not handling this right now + [[5, 'a c'], [10, 'a {b|d{ c']] + >>> g("((a][:b:c [d:3]") + [[3, '((a][:b:c '], [10, '((a][:b:c d']] + >>> g("[a|(b:1.1)]") + [[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']] + >>> g("[fe|]male") + [[1, 'female'], [2, 'male'], [3, 'female'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'female'], [8, 'male'], [9, 'female'], [10, 'male']] + >>> g("[fe|||]male") + [[1, 'female'], [2, 'male'], [3, 'male'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'male'], [8, 'male'], [9, 'female'], [10, 'male']] + >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10, 10)[0] + >>> g("a [b:.5] c") + [[10, 'a b c']] + >>> g("a [b:1.5] c") + [[5, 'a c'], [10, 'a b c']] + """ + + if hires_steps is None or use_old_scheduling: + int_offset = 0 + flt_offset = 0 + steps = base_steps + else: + int_offset = base_steps + flt_offset = 1.0 + steps = hires_steps + + def collect_steps(steps, tree): + res = [steps] + + class CollectSteps(lark.Visitor): + def scheduled(self, tree): + s = tree.children[-2] + v = float(s) + if use_old_scheduling: + v = v*steps if v<1 else v + else: + if "." in s: + v = (v - flt_offset) * steps + else: + v = (v - int_offset) + tree.children[-2] = min(steps, int(v)) + if tree.children[-2] >= 1: + res.append(tree.children[-2]) + + def alternate(self, tree): + res.extend(range(1, steps+1)) + + CollectSteps().visit(tree) + return sorted(set(res)) + + def at_step(step, tree): + class AtStep(lark.Transformer): + def scheduled(self, args): + before, after, _, when, _ = args + yield before or () if step <= when else after + def alternate(self, args): + args = ["" if not arg else arg for arg in args] + yield args[(step - 1) % len(args)] + def start(self, args): + def flatten(x): + if isinstance(x, str): + yield x + else: + for gen in x: + yield from flatten(gen) + return ''.join(flatten(args)) + def plain(self, args): + yield args[0].value + def __default__(self, data, children, meta): + for child in children: + yield child + return AtStep().transform(tree) + + def get_schedule(prompt): + try: + tree = schedule_parser.parse(prompt) + except lark.exceptions.LarkError: + if 0: + import traceback + traceback.print_exc() + return [[steps, prompt]] + return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] + + promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)} + return [promptdict[prompt] for prompt in prompts] + + +ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) + + +class SdConditioning(list): + """ + A list with prompts for stable diffusion's conditioner model. + Can also specify width and height of created image - SDXL needs it. + """ + def __init__(self, prompts, is_negative_prompt=False, width=None, height=None, copy_from=None): + super().__init__() + self.extend(prompts) + + if copy_from is None: + copy_from = prompts + + self.is_negative_prompt = is_negative_prompt or getattr(copy_from, 'is_negative_prompt', False) + self.width = width or getattr(copy_from, 'width', None) + self.height = height or getattr(copy_from, 'height', None) + + + +def get_learned_conditioning(model, prompts: SdConditioning | list[str], steps, hires_steps=None, use_old_scheduling=False): + """converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond), + and the sampling step at which this condition is to be replaced by the next one. + + Input: + (model, ['a red crown', 'a [blue:green:5] jeweled crown'], 20) + + Output: + [ + [ + ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0523, ..., -0.4901, -0.3066, 0.0674], ..., [ 0.3317, -0.5102, -0.4066, ..., 0.4119, -0.7647, -1.0160]], device='cuda:0')) + ], + [ + ScheduledPromptConditioning(end_at_step=5, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.0192, 0.3867, -0.4644, ..., 0.1135, -0.3696, -0.4625]], device='cuda:0')), + ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.7352, -0.4356, -0.7888, ..., 0.6994, -0.4312, -1.2593]], device='cuda:0')) + ] + ] + """ + res = [] + + prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps, hires_steps, use_old_scheduling) + cache = {} + first_pooled = None + for prompt, prompt_schedule in zip(prompts, prompt_schedules): + + cached = cache.get(prompt, None) + if cached is not None: + res.append(cached) + continue + + texts = SdConditioning([x[1] for x in prompt_schedule], copy_from=prompts) + # conds = model.get_learned_conditioning(texts) + conds = model.forward(texts) + if first_pooled == None: + # first_pooled = conds.pooled + if conds.pooled.shape[0] > 1: + first_pooled = conds.pooled[1:2] + else: + first_pooled = conds.pooled[0:1] + cond_schedule = [] + for i, (end_at_step, _) in enumerate(prompt_schedule): + if isinstance(conds, dict): + cond = {k: v[i] for k, v in conds.items()} + else: + cond = conds[i] + if i == 0: + cond.pooled = first_pooled + cond_schedule.append(ScheduledPromptConditioning(end_at_step, cond)) + + cache[prompt] = cond_schedule + res.append(cond_schedule) + + return res + + +re_AND = re.compile(r"\bAND\b") +re_weight = re.compile(r"^((?:\s|.)*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$") + + +def get_multicond_prompt_list(prompts: SdConditioning | list[str]): + res_indexes = [] + + prompt_indexes = {} + prompt_flat_list = SdConditioning(prompts) + prompt_flat_list.clear() + + for prompt in prompts: + subprompts = re_AND.split(prompt) + + indexes = [] + for subprompt in subprompts: + match = re_weight.search(subprompt) + + text, weight = match.groups() if match is not None else (subprompt, 1.0) + + weight = float(weight) if weight is not None else 1.0 + + index = prompt_indexes.get(text, None) + if index is None: + index = len(prompt_flat_list) + prompt_flat_list.append(text) + prompt_indexes[text] = index + + indexes.append((index, weight)) + + res_indexes.append(indexes) + + return res_indexes, prompt_flat_list, prompt_indexes + + +class ComposableScheduledPromptConditioning: + def __init__(self, schedules, weight=1.0): + self.schedules: List[ScheduledPromptConditioning] = schedules + self.weight: float = weight + + +class MulticondLearnedConditioning: + def __init__(self, shape, batch): + self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS + self.batch: List[List[ComposableScheduledPromptConditioning]] = batch + + +def get_multicond_learned_conditioning(model, prompts, steps, hires_steps=None, use_old_scheduling=False) -> MulticondLearnedConditioning: + """same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt. + For each prompt, the list is obtained by splitting the prompt using the AND separator. + + https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/ + """ + + res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts) + + learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps, hires_steps, use_old_scheduling) + + res = [] + for indexes in res_indexes: + res.append([ComposableScheduledPromptConditioning(learned_conditioning[i], weight) for i, weight in indexes]) + + return MulticondLearnedConditioning(shape=(len(prompts),), batch=res) + + +class DictWithShape(dict): + def __init__(self, x, shape): + super().__init__() + self.update(x) + + @property + def shape(self): + return self["crossattn"].shape + + +def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step): + param = c[0][0].cond + is_dict = isinstance(param, dict) + + if is_dict: + dict_cond = param + res = {k: torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for k, param in dict_cond.items()} + res = DictWithShape(res, (len(c),) + dict_cond['crossattn'].shape) + else: + res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) + + for i, cond_schedule in enumerate(c): + target_index = 0 + for current, entry in enumerate(cond_schedule): + if current_step <= entry.end_at_step: + target_index = current + break + + if is_dict: + for k, param in cond_schedule[target_index].cond.items(): + res[k][i] = param + else: + res[i] = cond_schedule[target_index].cond + + res.pooled = param.pooled + res.pooled.schedules = c + return res + + +def stack_conds(tensors): + # if prompts have wildly different lengths above the limit we'll get tensors of different shapes + # and won't be able to torch.stack them. So this fixes that. + token_count = max([x.shape[0] for x in tensors]) + for i in range(len(tensors)): + if tensors[i].shape[0] != token_count: + last_vector = tensors[i][-1:] + last_vector_repeated = last_vector.repeat([token_count - tensors[i].shape[0], 1]) + tensors[i] = torch.vstack([tensors[i], last_vector_repeated]) + + return torch.stack(tensors) + + + +def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step): + param = c.batch[0][0].schedules[0].cond + + tensors = [] + conds_list = [] + + for composable_prompts in c.batch: + conds_for_batch = [] + + for composable_prompt in composable_prompts: + target_index = 0 + for current, entry in enumerate(composable_prompt.schedules): + if current_step <= entry.end_at_step: + target_index = current + break + + conds_for_batch.append((len(tensors), composable_prompt.weight)) + tensors.append(composable_prompt.schedules[target_index].cond) + + conds_list.append(conds_for_batch) + + if isinstance(tensors[0], dict): + keys = list(tensors[0].keys()) + stacked = {k: stack_conds([x[k] for x in tensors]) for k in keys} + stacked = DictWithShape(stacked, stacked['crossattn'].shape) + else: + stacked = stack_conds(tensors).to(device=param.device, dtype=param.dtype) + + stacked.pooled = param.pooled + stacked.pooled.schedules = c + return conds_list, stacked + + +re_attention = re.compile(r""" +\\\(| +\\\)| +\\\[| +\\]| +\\\\| +\\| +\(| +\[| +:\s*([+-]?[.\d]+)\s*\)| +\)| +]| +[^\\()\[\]:]+| +: +""", re.X) + +re_break = re.compile(r"\s*\bBREAK\b\s*", re.S) +re_attention_v1 = re_attention + +def parse_prompt_attention(text): + """ + Parses a string with attention tokens and returns a list of pairs: text and its associated weight. + Accepted tokens are: + (abc) - increases attention to abc by a multiplier of 1.1 + (abc:3.12) - increases attention to abc by a multiplier of 3.12 + [abc] - decreases attention to abc by a multiplier of 1.1 + \( - literal character '(' + \[ - literal character '[' + \) - literal character ')' + \] - literal character ']' + \\ - literal character '\' + anything else - just text + + >>> parse_prompt_attention('normal text') + [['normal text', 1.0]] + >>> parse_prompt_attention('an (important) word') + [['an ', 1.0], ['important', 1.1], [' word', 1.0]] + >>> parse_prompt_attention('(unbalanced') + [['unbalanced', 1.1]] + >>> parse_prompt_attention('\(literal\]') + [['(literal]', 1.0]] + >>> parse_prompt_attention('(unnecessary)(parens)') + [['unnecessaryparens', 1.1]] + >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') + [['a ', 1.0], + ['house', 1.5730000000000004], + [' ', 1.1], + ['on', 1.0], + [' a ', 1.1], + ['hill', 0.55], + [', sun, ', 1.1], + ['sky', 1.4641000000000006], + ['.', 1.1]] + """ + + res = [] + round_brackets = [] + square_brackets = [] + + round_bracket_multiplier = 1.1 + square_bracket_multiplier = 1 / 1.1 + if opts.prompt_attention == 'Fixed attention': + res = [[text, 1.0]] + return res + elif opts.prompt_attention == 'Compel parser': + conjunction = Compel.parse_prompt_string(text) + if conjunction is None or conjunction.prompts is None or conjunction.prompts is None or len(conjunction.prompts[0].children) == 0: + return [["", 1.0]] + res = [] + for frag in conjunction.prompts[0].children: + res.append([frag.text, frag.weight]) + return res + elif opts.prompt_attention == 'A1111 parser': + re_attention = re_attention_v1 + whitespace = '' + else: + re_attention = re_attention_v1 + text = text.replace('\\n', ' ') + whitespace = ' ' + + def multiply_range(start_position, multiplier): + for p in range(start_position, len(res)): + res[p][1] *= multiplier + + for m in re_attention.finditer(text): + text = m.group(0) + weight = m.group(1) + + if text.startswith('\\'): + res.append([text[1:], 1.0]) + elif text == '(': + round_brackets.append(len(res)) + elif text == '[': + square_brackets.append(len(res)) + elif weight is not None and round_brackets: + multiply_range(round_brackets.pop(), float(weight)) + elif text == ')' and round_brackets: + multiply_range(round_brackets.pop(), round_bracket_multiplier) + elif text == ']' and square_brackets: + multiply_range(square_brackets.pop(), square_bracket_multiplier) + else: + parts = re.split(re_break, text) + for i, part in enumerate(parts): + if i > 0: + res.append(["BREAK", -1]) + if opts.prompt_attention == 'Full parser': + part = re_clean.sub("", part) + part = re_whitespace.sub(" ", part).strip() + if len(part) == 0: + continue + res.append([part, 1.0]) + + for pos in round_brackets: + multiply_range(pos, round_bracket_multiplier) + + for pos in square_brackets: + multiply_range(pos, square_bracket_multiplier) + + if len(res) == 0: + res = [["", 1.0]] + + # merge runs of identical weights + i = 0 + while i + 1 < len(res): + if res[i][1] == res[i + 1][1]: + res[i][0] += whitespace + res[i + 1][0] + res.pop(i + 1) + else: + i += 1 + + return res + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) + input_text = '[black] [[grey]] (white) ((gray)) ((orange:1.1) yellow) ((purple) and [dark] red:1.1) [mouse:0.2] [(cat:1.1):0.5]' + print(f'Prompt: {input_text}') + all_schedules = get_learned_conditioning_prompt_schedules([input_text], 100)[0] + print('Schedules', all_schedules) + for schedule in all_schedules: + print('Schedule', schedule[0]) + opts.prompt_attention = 'Fixed attention' + output_list = parse_prompt_attention(schedule[1]) + print(' Fixed:', output_list) + opts.prompt_attention = 'Compel parser' + output_list = parse_prompt_attention(schedule[1]) + print(' Compel:', output_list) + opts.prompt_attention = 'A1111 parser' + output_list = parse_prompt_attention(schedule[1]) + print(' A1111:', output_list) + opts.prompt_attention = 'Full parser' + output_list = parse_prompt_attention(schedule[1]) + print(' Full :', output_list) +else: + import torch # doctest faster diff --git a/custom_nodes/ComfyUI_smZNodes/modules/rng_philox.py b/custom_nodes/ComfyUI_smZNodes/modules/rng_philox.py new file mode 100644 index 0000000000000000000000000000000000000000..8897dc3ac54beec0eb43e315e2201ccd6613576e --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/rng_philox.py @@ -0,0 +1,102 @@ +"""RNG imitiating torch cuda randn on CPU. You are welcome. + +Usage: + +``` +g = Generator(seed=0) +print(g.randn(shape=(3, 4))) +``` + +Expected output: +``` +[[-0.92466259 -0.42534415 -2.6438457 0.14518388] + [-0.12086647 -0.57972564 -0.62285122 -0.32838709] + [-1.07454231 -0.36314407 -1.67105067 2.26550497]] +``` +""" + +import numpy as np + +philox_m = [0xD2511F53, 0xCD9E8D57] +philox_w = [0x9E3779B9, 0xBB67AE85] + +two_pow32_inv = np.array([2.3283064e-10], dtype=np.float32) +two_pow32_inv_2pi = np.array([2.3283064e-10 * 6.2831855], dtype=np.float32) + + +def uint32(x): + """Converts (N,) np.uint64 array into (2, N) np.unit32 array.""" + return x.view(np.uint32).reshape(-1, 2).transpose(1, 0) + + +def philox4_round(counter, key): + """A single round of the Philox 4x32 random number generator.""" + + v1 = uint32(counter[0].astype(np.uint64) * philox_m[0]) + v2 = uint32(counter[2].astype(np.uint64) * philox_m[1]) + + counter[0] = v2[1] ^ counter[1] ^ key[0] + counter[1] = v2[0] + counter[2] = v1[1] ^ counter[3] ^ key[1] + counter[3] = v1[0] + + +def philox4_32(counter, key, rounds=10): + """Generates 32-bit random numbers using the Philox 4x32 random number generator. + + Parameters: + counter (numpy.ndarray): A 4xN array of 32-bit integers representing the counter values (offset into generation). + key (numpy.ndarray): A 2xN array of 32-bit integers representing the key values (seed). + rounds (int): The number of rounds to perform. + + Returns: + numpy.ndarray: A 4xN array of 32-bit integers containing the generated random numbers. + """ + + for _ in range(rounds - 1): + philox4_round(counter, key) + + key[0] = key[0] + philox_w[0] + key[1] = key[1] + philox_w[1] + + philox4_round(counter, key) + return counter + + +def box_muller(x, y): + """Returns just the first out of two numbers generated by Box–Muller transform algorithm.""" + u = x * two_pow32_inv + two_pow32_inv / 2 + v = y * two_pow32_inv_2pi + two_pow32_inv_2pi / 2 + + s = np.sqrt(-2.0 * np.log(u)) + + r1 = s * np.sin(v) + return r1.astype(np.float32) + + +class Generator: + """RNG that produces same outputs as torch.randn(..., device='cuda') on CPU""" + + def __init__(self, seed): + self.seed = seed + self.offset = 0 + + def randn(self, shape): + """Generate a sequence of n standard normal random variables using the Philox 4x32 random number generator and the Box-Muller transform.""" + + n = 1 + for x in shape: + n *= x + + counter = np.zeros((4, n), dtype=np.uint32) + counter[0] = self.offset + counter[2] = np.arange(n, dtype=np.uint32) # up to 2^32 numbers can be generated - if you want more you'd need to spill into counter[3] + self.offset += 1 + + key = np.empty(n, dtype=np.uint64) + key.fill(self.seed) + key = uint32(key) + + g = philox4_32(counter, key) + + return box_muller(g[0], g[1]).reshape(shape) # discard g[2] and g[3] diff --git a/custom_nodes/ComfyUI_smZNodes/modules/script_callbacks.py b/custom_nodes/ComfyUI_smZNodes/modules/script_callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..fd9e3d635680645dfd6aaf50ce0804376b124656 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/script_callbacks.py @@ -0,0 +1,55 @@ + +import inspect +from . import errors +from collections import namedtuple + +def report_exception(c, job): + errors.report(f"Error executing callback {job} for {c.script}", exc_info=True) + +ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) +callback_map = dict( + callbacks_app_started=[], + callbacks_model_loaded=[], + callbacks_ui_tabs=[], + callbacks_ui_train_tabs=[], + callbacks_ui_settings=[], + callbacks_before_image_saved=[], + callbacks_image_saved=[], + callbacks_cfg_denoiser=[], + callbacks_cfg_denoised=[], + callbacks_cfg_after_cfg=[], + callbacks_before_component=[], + callbacks_after_component=[], + callbacks_image_grid=[], + callbacks_infotext_pasted=[], + callbacks_script_unloaded=[], + callbacks_before_ui=[], + callbacks_on_reload=[], + callbacks_list_optimizers=[], + callbacks_list_unets=[], +) + +def list_optimizers_callback(): + res = [] + + for c in callback_map['callbacks_list_optimizers']: + try: + c.callback(res) + except Exception: + report_exception(c, 'list_optimizers') + + return res + + +def on_list_optimizers(callback): + """register a function to be called when UI is making a list of cross attention optimization options. + The function will be called with one argument, a list, and shall add objects of type modules.sd_hijack_optimizations.SdOptimization + to it.""" + + add_callback(callback_map['callbacks_list_optimizers'], callback) + +def add_callback(callbacks, fun): + stack = [x for x in inspect.stack() if x.filename != __file__] + filename = stack[0].filename if stack else 'unknown file' + + callbacks.append(ScriptCallback(filename, fun)) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack.py new file mode 100644 index 0000000000000000000000000000000000000000..9ccc687f4f739ab05d2051b4e53686122b242ee8 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack.py @@ -0,0 +1,251 @@ +import torch +import comfy +import comfy.sd1_clip +from torch.nn.functional import silu +from types import MethodType +from comfy.sd import CLIP +from comfy import ldm +import ldm.modules.diffusionmodules +import ldm.modules.diffusionmodules.model +import ldm.modules.diffusionmodules.openaimodel +import ldm.modules.attention +from . import devices, shared, sd_hijack_unet, sd_hijack_optimizations, script_callbacks, errors +from .textual_inversion import textual_inversion +from ..smZNodes import FrozenCLIPEmbedderWithCustomWordsCustom, FrozenOpenCLIPEmbedder2WithCustomWordsCustom, get_learned_conditioning +from functools import partial +if not hasattr(ldm.modules.diffusionmodules.model, "nonlinearity_orig"): + ldm.modules.diffusionmodules.model.nonlinearity_orig = ldm.modules.diffusionmodules.model.nonlinearity +if not hasattr(ldm.modules.diffusionmodules.openaimodel, "th_orig"): + ldm.modules.diffusionmodules.openaimodel.th_orig = ldm.modules.diffusionmodules.openaimodel.th + +ldm.modules.attention.CrossAttention.forward_orig = ldm.modules.attention.CrossAttention.forward +ldm.modules.diffusionmodules.model.AttnBlock.forward_orig = ldm.modules.diffusionmodules.model.AttnBlock.forward + +optimizers = [] +current_optimizer: sd_hijack_optimizations.SdOptimization = None +already_optimized = False # temp fix for displaying info since two cliptextencode's will run + +def list_optimizers(): + script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers) + new_optimizers = script_callbacks.list_optimizers_callback() + + new_optimizers = [x for x in new_optimizers if x.is_available()] + + new_optimizers = sorted(new_optimizers, key=lambda x: x.priority, reverse=True) + + optimizers.clear() + optimizers.extend(new_optimizers) + + +def apply_optimizations(option=None): + global already_optimized + if already_optimized: + display = False + list_optimizers() + global current_optimizer + + undo_optimizations() + + if len(optimizers) == 0: + # a script can access the model very early, and optimizations would not be filled by then + current_optimizer = None + return '' + + ldm.modules.diffusionmodules.model.nonlinearity = silu + ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th + + # sgm.modules.diffusionmodules.model.nonlinearity = silu + # sgm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th + + if current_optimizer is not None: + current_optimizer.undo() + current_optimizer = None + + selection = option or shared.opts.cross_attention_optimization + if selection == "Automatic" and len(optimizers) > 0: + matching_optimizer = next(iter([x for x in optimizers if x.cmd_opt and getattr(shared.cmd_opts, x.cmd_opt, False)]), optimizers[0]) + else: + matching_optimizer = next(iter([x for x in optimizers if x.cmd_opt == selection]), None) + if selection == "None": + matching_optimizer = None + elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention: + matching_optimizer = None + elif matching_optimizer is None: + matching_optimizer = optimizers[0] + + if matching_optimizer is not None: + if shared.opts.debug: + print(f"Applying attention optimization: {matching_optimizer.name}... ", end='') + matching_optimizer.apply() + already_optimized = True + if shared.opts.debug: + print("done.") + current_optimizer = matching_optimizer + return current_optimizer + else: + # if shared.opts.debug: + # print("Disabling attention optimization") + return '' + +def undo_optimizations(): + sd_hijack_optimizations.undo() + ldm.modules.diffusionmodules.model.nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity_orig + ldm.modules.diffusionmodules.openaimodel.th = ldm.modules.diffusionmodules.openaimodel.th_orig + +class StableDiffusionModelHijack: + fixes = None + comments = [] + layers = None + circular_enabled = False + clip = None + tokenizer = None + optimization_method = None + embedding_db = textual_inversion.EmbeddingDatabase() + + def apply_optimizations(self, option=None): + try: + self.optimization_method = apply_optimizations(option) + except Exception as e: + errors.display(e, "applying optimizations") + undo_optimizations() + + def hijack(self, m: comfy.sd1_clip.SD1ClipModel): + tokenizer_parent = m.tokenizer # SD1Tokenizer + # SDTokenizer + tokenizer_parent2 = getattr(tokenizer_parent, tokenizer_parent.clip) if hasattr(tokenizer_parent, 'clip') else tokenizer_parent + tokenizer = getattr(tokenizer_parent, tokenizer_parent.clip).tokenizer if hasattr(tokenizer_parent, 'clip') else tokenizer_parent.tokenizer + if hasattr(m, 'clip'): + m = getattr(m, m.clip) + model_embeddings = m.transformer.text_model.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) + model_embeddings.token_embedding.weight = model_embeddings.token_embedding.wrapped._parameters.get('weight').to(device=devices.device) + m.tokenizer_parent0 = tokenizer_parent + m.tokenizer_parent = tokenizer_parent2 + m.tokenizer = tokenizer + m = FrozenOpenCLIPEmbedder2WithCustomWordsCustom(m, self) if "SDXLClipG" in type(m).__name__ else FrozenCLIPEmbedderWithCustomWordsCustom(m, self) + m.clip_layer = getattr(m.wrapped, "clip_layer", None) + m.reset_clip_layer = getattr(m.wrapped, "reset_clip_layer", None) + m.transformer = getattr(m.wrapped, "transformer", None) + self.cond_stage_model = m + self.clip = m + + apply_weighted_forward(self.clip) + self.apply_optimizations() + + def undo_hijack(self, m): + try: + m = m.wrapped + model_embeddings = m.transformer.text_model.embeddings + if type(model_embeddings.token_embedding) == EmbeddingsWithFixes: + model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped + undo_optimizations() + undo_weighted_forward(m) + self.apply_circular(False) + # self.layers = None + self.clip = None + self.cond_stage_model = None + except Exception as err: + print(err) + + def apply_circular(self, enable): + if self.circular_enabled == enable: + return + + self.circular_enabled = enable + + for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]: + layer.padding_mode = 'circular' if enable else 'zeros' + + def clear_comments(self): + self.comments = [] + + def get_prompt_lengths(self, text): + if self.clip is None: + return 0, 0 + _, token_count = self.clip.process_texts([text]) + return token_count, self.clip.get_target_prompt_token_count(token_count) + +model_hijack = StableDiffusionModelHijack() + +def weighted_loss(sd_model, pred, target, mean=True): + #Calculate the weight normally, but ignore the mean + loss = sd_model._old_get_loss(pred, target, mean=False) # pylint: disable=protected-access + + #Check if we have weights available + weight = getattr(sd_model, '_custom_loss_weight', None) + if weight is not None: + loss *= weight + + #Return the loss, as mean if specified + return loss.mean() if mean else loss + +def weighted_forward(sd_model, x, c, w, *args, **kwargs): + try: + #Temporarily append weights to a place accessible during loss calc + sd_model._custom_loss_weight = w # pylint: disable=protected-access + + #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely + #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set + if not hasattr(sd_model, '_old_get_loss'): + sd_model._old_get_loss = sd_model.get_loss # pylint: disable=protected-access + sd_model.get_loss = MethodType(weighted_loss, sd_model) + + #Run the standard forward function, but with the patched 'get_loss' + return sd_model.forward(x, c, *args, **kwargs) + finally: + try: + #Delete temporary weights if appended + del sd_model._custom_loss_weight + except AttributeError: + pass + + #If we have an old loss function, reset the loss function to the original one + if hasattr(sd_model, '_old_get_loss'): + sd_model.get_loss = sd_model._old_get_loss # pylint: disable=protected-access + del sd_model._old_get_loss + +def apply_weighted_forward(sd_model): + #Add new function 'weighted_forward' that can be called to calc weighted loss + sd_model.weighted_forward = MethodType(weighted_forward, sd_model) + +def undo_weighted_forward(sd_model): + try: + del sd_model.weighted_forward + except AttributeError: + pass + + +class EmbeddingsWithFixes(torch.nn.Module): + def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'): + super().__init__() + self.wrapped = wrapped + self.embeddings = embeddings + + def forward(self, input_ids): + batch_fixes = self.embeddings.fixes + self.embeddings.fixes = None + + try: + inputs_embeds = self.wrapped(input_ids) + except: + inputs_embeds = self.wrapped(input_ids.cpu()) + + if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0: + return inputs_embeds + + vecs = [] + for fixes, tensor in zip(batch_fixes, inputs_embeds): + for offset, embedding in fixes: + vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec + emb = devices.cond_cast_unet(vec) + if emb.device != tensor.device: + emb = emb.to(device=tensor.device) + emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0]) + try: + tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]) + except Exception as err: + print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", tensor.shape[0], emb.shape[1]) + # raise err + vecs.append(tensor) + + return torch.stack(vecs) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_clip.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..f95ceab492662d07e7b5d690c9dfb86e346df907 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_clip.py @@ -0,0 +1,314 @@ +from __future__ import annotations +import math +from collections import namedtuple +import torch +from . import prompt_parser, devices, sd_hijack +from .shared import opts +from comfy.sd1_clip import SD1ClipModel +from comfy.sdxl_clip import SDXLClipModel + +class PromptChunk: + """ + This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. + If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. + Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, + so just 75 tokens from prompt. + """ + def __init__(self): + self.tokens = [] + self.multipliers = [] + self.fixes = [] + + +PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) +"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt +chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally +are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" + + +class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): + """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to + have unlimited prompt length and assign weights to tokens in prompt. + """ + def __init__(self, wrapped: SD1ClipModel|SDXLClipModel, hijack): + super().__init__() + self.wrapped = wrapped + """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation, + depending on model.""" + self.hijack: sd_hijack.StableDiffusionModelHijack = hijack + self.chunk_length = 75 + self.is_trainable = getattr(wrapped, 'is_trainable', False) + self.input_key = getattr(wrapped, 'input_key', 'txt') + self.legacy_ucg_val = None + + def empty_chunk(self): + """creates an empty PromptChunk and returns it""" + chunk = PromptChunk() + chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) + chunk.multipliers = [1.0] * (self.chunk_length + 2) + return chunk + + def get_target_prompt_token_count(self, token_count): + """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" + return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length + + def tokenize(self, texts): + """Converts a batch of texts into a batch of token ids""" + raise NotImplementedError + + def encode_with_transformers(self, tokens): + """ + converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens; + All python lists with tokens are assumed to have same length, usually 77. + if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on + model - can be 768 and 1024. + Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None). + """ + raise NotImplementedError + + def encode_embedding_init_text(self, init_text, nvpt): + """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through + transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned.""" + raise NotImplementedError + + def tokenize_line(self, line): + """ + this transforms a single prompt into a list of PromptChunk objects - as many as needed to + represent the prompt. + Returns the list and the total number of tokens in the prompt. + """ + + if opts.enable_emphasis: + parsed = prompt_parser.parse_prompt_attention(line) + else: + parsed = [[line, 1.0]] + + tokenized = self.tokenize([text for text, _ in parsed]) + chunks = [] + chunk = PromptChunk() + token_count = 0 + last_comma = -1 + + def next_chunk(is_last=False): + """puts current chunk into the list of results and produces the next one - empty; + if is_last is true, tokens tokens at the end won't add to token_count""" + nonlocal token_count + nonlocal last_comma + nonlocal chunk + if is_last: + token_count += len(chunk.tokens) + else: + token_count += self.chunk_length + to_add = self.chunk_length - len(chunk.tokens) + if to_add > 0: + chunk.tokens += [self.id_end] * to_add + chunk.multipliers += [1.0] * to_add + chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end] + chunk.multipliers = [1.0] + chunk.multipliers + [1.0] + last_comma = -1 + chunks.append(chunk) + chunk = PromptChunk() + + for tokens, (text, weight) in zip(tokenized, parsed): + if text == 'BREAK' and weight == -1: + next_chunk() + continue + position = 0 + while position < len(tokens): + token = tokens[position] + if token == self.comma_token: + last_comma = len(chunk.tokens) + # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack + # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. + elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: + break_location = last_comma + 1 + reloc_tokens = chunk.tokens[break_location:] + reloc_mults = chunk.multipliers[break_location:] + chunk.tokens = chunk.tokens[:break_location] + chunk.multipliers = chunk.multipliers[:break_location] + next_chunk() + chunk.tokens = reloc_tokens + chunk.multipliers = reloc_mults + if len(chunk.tokens) == self.chunk_length: + next_chunk() + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position) + if embedding is None: + chunk.tokens.append(token) + chunk.multipliers.append(weight) + position += 1 + continue + emb_len = int(embedding.vectors) + if len(chunk.tokens) + emb_len > self.chunk_length: + next_chunk() + chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding)) + chunk.tokens += [0] * emb_len + chunk.multipliers += [weight] * emb_len + position += embedding_length_in_tokens + if chunk.tokens or not chunks: + next_chunk(is_last=True) + return chunks, token_count + + def process_texts(self, texts): + """ + Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum + length, in tokens, of all texts. + """ + token_count = 0 + cache = {} + batch_chunks = [] + for line in texts: + if line in cache: + chunks = cache[line] + else: + chunks, current_token_count = self.tokenize_line(line) + token_count = max(current_token_count, token_count) + cache[line] = chunks + batch_chunks.append(chunks) + return batch_chunks, token_count + + def forward(self, texts): + """ + Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. + Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will + be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. + An example shape returned by this function can be: (2, 77, 768). + For SDXL, instead of returning one tensor above, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. + Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet + is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" + """ + if opts.use_old_emphasis_implementation: + from . import sd_hijack_clip_old + ret = sd_hijack_clip_old.forward_old(self, texts) + return (ret, ret.pooled) if getattr(self.wrapped, 'return_pooled', False) else ret + + batch_chunks, _token_count = self.process_texts(texts) + used_embeddings = {} + chunk_count = max([len(x) for x in batch_chunks]) + + if opts.return_batch_chunks: + return (batch_chunks, chunk_count) + + to_pad_count = max(opts.max_chunk_count, chunk_count) - chunk_count + if to_pad_count > 0: + self.empty_batch_chunks, _ = self.process_texts([""]) + batch_chunks=[z+x*to_pad_count for z,x in zip(batch_chunks, self.empty_batch_chunks)] + chunk_count = max([len(x) for x in batch_chunks]) + + zs = [] + for i in range(chunk_count): + batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks] + tokens = [x.tokens for x in batch_chunk] + multipliers = [x.multipliers for x in batch_chunk] + self.hijack.fixes = [x.fixes for x in batch_chunk] + for fixes in self.hijack.fixes: + for _position, embedding in fixes: + used_embeddings[embedding.name] = embedding + z = self.process_tokens(tokens, multipliers) + zs.append(z) + if len(used_embeddings) > 0: + embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()]) + self.hijack.comments.append(f"Used embeddings: {embeddings_list}") + zst = torch.hstack(zs) + zst.pooled = zs[0].pooled + if getattr(self.wrapped, 'return_pooled', False): + return (zst, zst.pooled) + else: + return zst + + def process_tokens(self, remade_batch_tokens, batch_multipliers): + """ + sends one single prompt chunk to be encoded by transformers neural network. + remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually + there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. + Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier + corresponds to one token. + """ + try: + tokens = torch.asarray(remade_batch_tokens).to(devices.device) + + # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. + if self.id_end != self.id_pad: + for batch_pos in range(len(remade_batch_tokens)): + index = remade_batch_tokens[batch_pos].index(self.id_end) + tokens[batch_pos, index+1:tokens.shape[1]] = self.id_pad + + z = self.encode_with_transformers(tokens) + except ValueError: + # This is where Comfy tokens were fed in that has textual inversion embeddings in the list + # i.e tensors in the list along with tokens + z = self.encode_with_transformers(remade_batch_tokens) + pooled = getattr(z, 'pooled', None) + + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise + batch_multipliers = torch.asarray(batch_multipliers).to(devices.device) + if opts.prompt_mean_norm: + original_mean = z.mean() + z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) + new_mean = z.mean() + z = z * (original_mean / new_mean) + else: + z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) + if pooled is not None: + z.pooled = pooled + return z + + +class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + self.tokenizer = wrapped.tokenizer + vocab = self.tokenizer.get_vocab() + self.comma_token = vocab.get(',', None) + self.token_mults = {} + tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] + for text, ident in tokens_with_parens: + mult = 1.0 + for c in text: + if c == '[': + mult /= 1.1 + if c == ']': + mult *= 1.1 + if c == '(': + mult *= 1.1 + if c == ')': + mult /= 1.1 + if mult != 1.0: + self.token_mults[ident] = mult + self.id_start = self.wrapped.tokenizer.bos_token_id + self.id_end = self.wrapped.tokenizer.eos_token_id + self.id_pad = self.id_end + + def tokenize(self, texts): + tokenized = self.wrapped.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"] + return tokenized + + def encode_with_transformers(self, tokens): + outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) + + if opts.CLIP_stop_at_last_layers > 1: + z = outputs.hidden_states[-opts.CLIP_stop_at_last_layers] + z = self.wrapped.transformer.text_model.final_layer_norm(z) + else: + z = outputs.last_hidden_state + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + embedding_layer = self.wrapped.transformer.text_model.embeddings + ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] + embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0) + return embedded + +class FrozenCLIPEmbedderForSDXLWithCustomWords(FrozenCLIPEmbedderWithCustomWords): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + + def encode_with_transformers(self, tokens): + outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=self.wrapped.layer == "hidden") + + if self.wrapped.layer == "last": + z = outputs.last_hidden_state + else: + z = outputs.hidden_states[self.wrapped.layer_idx] + + return z \ No newline at end of file diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_clip_old.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_clip_old.py new file mode 100644 index 0000000000000000000000000000000000000000..2f6520b99bb92e39c9010ee39449ce3308a7bf44 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_clip_old.py @@ -0,0 +1,87 @@ +from . import sd_hijack_clip +from . import shared + + +def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): + id_start = self.id_start + id_end = self.id_end + maxlen = self.wrapped.max_length # you get to stay at 77 + used_custom_terms = [] + remade_batch_tokens = [] + hijack_comments = [] + hijack_fixes = [] + token_count = 0 + + cache = {} + batch_tokens = self.tokenize(texts) + batch_multipliers = [] + for tokens in batch_tokens: + tuple_tokens = tuple(tokens) + + if tuple_tokens in cache: + remade_tokens, fixes, multipliers = cache[tuple_tokens] + else: + fixes = [] + remade_tokens = [] + multipliers = [] + mult = 1.0 + + i = 0 + while i < len(tokens): + token = tokens[i] + + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + + mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None + if mult_change is not None: + mult *= mult_change + i += 1 + elif embedding is None: + remade_tokens.append(token) + multipliers.append(mult) + i += 1 + else: + emb_len = int(embedding.vec.shape[0]) + fixes.append((len(remade_tokens), embedding)) + remade_tokens += [0] * emb_len + multipliers += [mult] * emb_len + used_custom_terms.append((embedding.name, embedding.checksum())) + i += embedding_length_in_tokens + + if len(remade_tokens) > maxlen - 2: + vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} + ovf = remade_tokens[maxlen - 2:] + overflowing_words = [vocab.get(int(x), "") for x in ovf] + overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) + hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") + + token_count = len(remade_tokens) + remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) + remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] + cache[tuple_tokens] = (remade_tokens, fixes, multipliers) + + multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) + multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] + + remade_batch_tokens.append(remade_tokens) + hijack_fixes.append(fixes) + batch_multipliers.append(multipliers) + return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count + + +def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): + batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, _token_count = process_text_old(self, texts) + + chunk_count = max([len(x) for x in remade_batch_tokens]) + + if shared.opts.return_batch_chunks: + return (remade_batch_tokens, chunk_count) + + self.hijack.comments += hijack_comments + + if len(used_custom_terms) > 0: + embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms) + self.hijack.comments.append(f"Used embeddings: {embedding_names}") + + self.hijack.fixes = hijack_fixes + return self.process_tokens(remade_batch_tokens, batch_multipliers) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_open_clip.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_open_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..9f47540bfbf1de2386f126141d4447405460c13d --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_open_clip.py @@ -0,0 +1,77 @@ +# import open_clip.tokenizer +import torch + +from . import sd_hijack_clip, devices +from .shared import opts + +# tokenizer = open_clip.tokenizer._tokenizer + + +class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + self.tokenizer = tokenizer = self.wrapped.tokenizer + + self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] + # self.id_start = tokenizer.encoder[""] + # self.id_end = tokenizer.encoder[""] + self.id_start = tokenizer.bos_token_id + self.id_end = tokenizer.eos_token_id + self.id_pad = 0 + + def tokenize(self, texts): + assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' + + tokenized = [self.tokenizer.encode(text) for text in texts] + + return tokenized + + def encode_with_transformers(self, tokens): + # set self.wrapped.layer_idx here according to opts.CLIP_stop_at_last_layers + z = self.wrapped.encode_with_transformer(tokens) + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + ids = self.tokenizer.encode(init_text) + ids = torch.asarray([ids], device=devices.device, dtype=torch.int) + embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) + + return embedded + + +class FrozenOpenCLIPEmbedder2WithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + self.tokenizer = tokenizer = self.wrapped.tokenizer + + self.comma_token = [v for k, v in tokenizer.encoder.items() if k == ','][0] + # self.id_start = tokenizer.encoder[""] + # self.id_end = tokenizer.encoder[""] + self.id_start = tokenizer.bos_token_id + self.id_end = tokenizer.eos_token_id + self.id_pad = 0 + + def tokenize(self, texts): + assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' + + tokenized = [self.tokenizer.encode(text) for text in texts] + + return tokenized + + def encode_with_transformers(self, tokens): + d = self.wrapped.encode_with_transformer(tokens) + z = d[self.wrapped.layer] + + pooled = d.get("pooled") + if pooled is not None: + z.pooled = pooled + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + ids = self.tokenizer.encode(init_text) + ids = torch.asarray([ids], device=devices.device, dtype=torch.int) + embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) + + return embedded diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_optimizations.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_optimizations.py new file mode 100644 index 0000000000000000000000000000000000000000..6157c874bc942725c21bd11a98fe722d0da5bcc6 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_optimizations.py @@ -0,0 +1,671 @@ +from __future__ import annotations +import math +import psutil + +import torch +from torch import einsum + +from comfy import ldm +from ldm.util import default +from einops import rearrange + +from . import shared, errors, devices +from ldm.modules import sub_quadratic_attention +from .hypernetworks import hypernetwork + +def apply_funcs(undo=False): + def _apply_funcs(class_name): + import ldm.modules.diffusionmodules.model + import ldm.modules.attention + module = ldm.modules.diffusionmodules.model if "Attn" in class_name else ldm.modules.attention + if not hasattr(module, class_name): return + m = getattr(module, class_name, object()) + if not hasattr(m, "forward_orig") and hasattr(m, "forward"): + setattr(m, "forward_orig", m.forward) + if undo and hasattr(m, "forward_orig"): + setattr(m, "forward", m.forward_orig) + cross_attention = ["CrossAttention", "MemoryEfficientCrossAttention", "CrossAttentionPytorch", "CrossAttentionBirchSan"] + attn_block = ["AttnBlock", "MemoryEfficientAttnBlock", "MemoryEfficientAttnBlockPytorch"] + for class_name in cross_attention+attn_block: + _apply_funcs(class_name) +apply_funcs() + +def apply_func(m, x, fn): + if hasattr(m, x): + setattr(getattr(m, x, object()), 'forward', fn) + + +class SdOptimization: + name: str = None + label: str | None = None + cmd_opt: str | None = None + priority: int = 0 + + def title(self): + if self.label is None: + return self.name + + return f"{self.name} - {self.label}" + + def is_available(self): + return True + + def apply(self): + pass + + def undo(self): + return undo() + + +def undo(): + apply_funcs(undo=True) + # ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_ldm.modules.attention.CrossAttention_forward + # sgm.modules.attention.ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_ldm.modules.attention.CrossAttention_forward + # sgm.modules.diffusionmodules.model.ldm.modules.diffusionmodules.model.AttnBlock.forward = sgm_diffusionmodules_model_AttnBlock_forward + + +class SdOptimizationXformers(SdOptimization): + name = "xformers" + cmd_opt = "xformers" + priority = 100 + + def is_available(self): + return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.cuda.is_available() and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)) + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', xformers_attention_forward) + apply_func(ldm.modules.attention, 'MemoryEfficientCrossAttention', xformers_attention_forward) + apply_func(ldm.modules.diffusionmodules.model, 'MemoryEfficientAttnBlock', xformers_attnblock_forward) + + +class SdOptimizationSdpNoMem(SdOptimization): + name = "sdp-no-mem" + label = "scaled dot product without memory efficient attention" + cmd_opt = "opt_sdp_no_mem_attention" + priority = 80 + + def is_available(self): + return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', scaled_dot_product_no_mem_attention_forward) + apply_func(ldm.modules.attention, 'CrossAttentionPytorch', scaled_dot_product_no_mem_attention_forward) + apply_func(ldm.modules.diffusionmodules.model, 'AttnBlock', sdp_no_mem_attnblock_forward) + apply_func(ldm.modules.diffusionmodules.model, 'MemoryEfficientAttnBlock', sdp_no_mem_attnblock_forward) + + +class SdOptimizationSdp(SdOptimizationSdpNoMem): + name = "sdp" + label = "scaled dot product" + cmd_opt = "opt_sdp_attention" + priority = 70 + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', scaled_dot_product_attention_forward) + apply_func(ldm.modules.attention, 'CrossAttentionPytorch', scaled_dot_product_attention_forward) + apply_func(ldm.modules.diffusionmodules.model, 'AttnBlock', sdp_attnblock_forward) + + +class SdOptimizationSubQuad(SdOptimization): + name = "sub-quadratic" + cmd_opt = "opt_sub_quad_attention" + priority = 10 + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', sub_quad_attention_forward) + apply_func(ldm.modules.attention, 'CrossAttentionBirchSan', sub_quad_attention_forward) + apply_func(ldm.modules.diffusionmodules.model, 'AttnBlock', sub_quad_attnblock_forward) + +class SdOptimizationV1(SdOptimization): + name = "V1" + label = "original v1" + cmd_opt = "opt_split_attention_v1" + priority = 10 + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', split_cross_attention_forward_v1) + apply_func(ldm.modules.attention, 'CrossAttentionPytorch', split_cross_attention_forward_v1) + + +class SdOptimizationInvokeAI(SdOptimization): + name = "InvokeAI" + cmd_opt = "opt_split_attention_invokeai" + + @property + def priority(self): + return 1000 if not torch.cuda.is_available() else 10 + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', split_cross_attention_forward_invokeAI) + apply_func(ldm.modules.attention, 'CrossAttentionPytorch', split_cross_attention_forward_invokeAI) + + +class SdOptimizationDoggettx(SdOptimization): + name = "Doggettx" + cmd_opt = "opt_split_attention" + priority = 90 + + def apply(self): + apply_func(ldm.modules.attention, 'CrossAttention', split_cross_attention_forward) + apply_func(ldm.modules.attention, 'CrossAttentionPytorch', split_cross_attention_forward) + apply_func(ldm.modules.diffusionmodules.model, 'AttnBlock', cross_attention_attnblock_forward) + + +def list_optimizers(res): + res.extend([ + SdOptimizationXformers(), + SdOptimizationSdpNoMem(), + SdOptimizationSdp(), + SdOptimizationSubQuad(), + SdOptimizationV1(), + SdOptimizationInvokeAI(), + SdOptimizationDoggettx(), + ]) + + +def get_available_vram(): + if shared.device.type == 'cuda': + stats = torch.cuda.memory_stats(shared.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + return mem_free_total + else: + return psutil.virtual_memory().available + + +# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion +def split_cross_attention_forward_v1(self, x, context=None, mask=None, **kwargs): + h = self.heads + + q_in = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) + k_in = self.to_k(context_k) + v_in = self.to_v(context_v) + del context, context_k, context_v, x + + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) + del q_in, k_in, v_in + + dtype = q.dtype + if shared.opts.upcast_attn: + q, k, v = q.float(), k.float(), v.float() + + with devices.without_autocast(disable=not shared.opts.upcast_attn): + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + for i in range(0, q.shape[0], 2): + end = i + 2 + s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) + s1 *= self.scale + + s2 = s1.softmax(dim=-1) + del s1 + + r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) + del s2 + del q, k, v + + r1 = r1.to(dtype) + + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) + del r1 + + return self.to_out(r2) + + +# taken from https://github.com/Doggettx/stable-diffusion and modified +def split_cross_attention_forward(self, x, context=None, mask=None, **kwargs): + h = self.heads + + q_in = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) + k_in = self.to_k(context_k) + v_in = self.to_v(context_v) + + dtype = q_in.dtype + if shared.opts.upcast_attn: + q_in, k_in, v_in = q_in.float(), k_in.float(), v_in if v_in.device.type == 'mps' else v_in.float() + + with devices.without_autocast(disable=not shared.opts.upcast_attn): + k_in = k_in * self.scale + + del context, x + + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) + del q_in, k_in, v_in + + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + + mem_free_total = get_available_vram() + + gb = 1024 ** 3 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() + modifier = 3 if q.element_size() == 2 else 2.5 + mem_required = tensor_size * modifier + steps = 1 + + if mem_required > mem_free_total: + steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) + # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " + # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") + + if steps > 64: + max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 + raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' + f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) + + s2 = s1.softmax(dim=-1, dtype=q.dtype) + del s1 + + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) + del s2 + + del q, k, v + + r1 = r1.to(dtype) + + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) + del r1 + + return self.to_out(r2) + + +# -- Taken from https://github.com/invoke-ai/InvokeAI and modified -- +mem_total_gb = psutil.virtual_memory().total // (1 << 30) + + +def einsum_op_compvis(q, k, v): + s = einsum('b i d, b j d -> b i j', q, k) + s = s.softmax(dim=-1, dtype=s.dtype) + return einsum('b i j, b j d -> b i d', s, v) + + +def einsum_op_slice_0(q, k, v, slice_size): + r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + for i in range(0, q.shape[0], slice_size): + end = i + slice_size + r[i:end] = einsum_op_compvis(q[i:end], k[i:end], v[i:end]) + return r + + +def einsum_op_slice_1(q, k, v, slice_size): + r = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + r[:, i:end] = einsum_op_compvis(q[:, i:end], k, v) + return r + + +def einsum_op_mps_v1(q, k, v): + if q.shape[0] * q.shape[1] <= 2**16: # (512x512) max q.shape[1]: 4096 + return einsum_op_compvis(q, k, v) + else: + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) + if slice_size % 4096 == 0: + slice_size -= 1 + return einsum_op_slice_1(q, k, v, slice_size) + + +def einsum_op_mps_v2(q, k, v): + if mem_total_gb > 8 and q.shape[0] * q.shape[1] <= 2**16: + return einsum_op_compvis(q, k, v) + else: + return einsum_op_slice_0(q, k, v, 1) + + +def einsum_op_tensor_mem(q, k, v, max_tensor_mb): + size_mb = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() // (1 << 20) + if size_mb <= max_tensor_mb: + return einsum_op_compvis(q, k, v) + div = 1 << int((size_mb - 1) / max_tensor_mb).bit_length() + if div <= q.shape[0]: + return einsum_op_slice_0(q, k, v, q.shape[0] // div) + return einsum_op_slice_1(q, k, v, max(q.shape[1] // div, 1)) + + +def einsum_op_cuda(q, k, v): + stats = torch.cuda.memory_stats(q.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(q.device) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + # Divide factor of safety as there's copying and fragmentation + return einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20)) + + +def einsum_op(q, k, v): + if q.device.type == 'cuda': + return einsum_op_cuda(q, k, v) + + if q.device.type == 'mps': + if mem_total_gb >= 32 and q.shape[0] % 32 != 0 and q.shape[0] * q.shape[1] < 2**18: + return einsum_op_mps_v1(q, k, v) + return einsum_op_mps_v2(q, k, v) + + # Smaller slices are faster due to L2/L3/SLC caches. + # Tested on i7 with 8MB L3 cache. + return einsum_op_tensor_mem(q, k, v, 32) + + +def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None, **kwargs): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) + k = self.to_k(context_k) + v = self.to_v(context_v) + del context, context_k, context_v, x + + dtype = q.dtype + if shared.opts.upcast_attn: + q, k, v = q.float(), k.float(), v if v.device.type == 'mps' else v.float() + + with devices.without_autocast(disable=not shared.opts.upcast_attn): + k = k * self.scale + + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) + r = einsum_op(q, k, v) + r = r.to(dtype) + return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) + +# -- End of code from https://github.com/invoke-ai/InvokeAI -- + + +# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1 +# The sub_quad_attention_forward function is under the MIT License listed under Memory Efficient Attention in the Licenses section of the web UI interface +def sub_quad_attention_forward(self, x, context=None, mask=None, **kwargs): + assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor." + + h = self.heads + + q = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) + k = self.to_k(context_k) + v = self.to_v(context_v) + del context, context_k, context_v, x + + q = q.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + + if q.device.type == 'mps': + q, k, v = q.contiguous(), k.contiguous(), v.contiguous() + + dtype = q.dtype + if shared.opts.upcast_attn: + q, k = q.float(), k.float() + + x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) + + x = x.to(dtype) + + x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2) + + out_proj, dropout = self.to_out + x = out_proj(x) + x = dropout(x) + + return x + + +def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold=None, use_checkpoint=True): + bytes_per_token = torch.finfo(q.dtype).bits//8 + batch_x_heads, q_tokens, _ = q.shape + _, k_tokens, _ = k.shape + qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens + + if chunk_threshold is None: + chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7) + elif chunk_threshold == 0: + chunk_threshold_bytes = None + else: + chunk_threshold_bytes = int(0.01 * chunk_threshold * get_available_vram()) + + if kv_chunk_size_min is None and chunk_threshold_bytes is not None: + kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2])) + elif kv_chunk_size_min == 0: + kv_chunk_size_min = None + + if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: + # the big matmul fits into our memory limit; do everything in 1 chunk, + # i.e. send it down the unchunked fast-path + kv_chunk_size = k_tokens + + with devices.without_autocast(disable=q.dtype == v.dtype): + return sub_quadratic_attention.efficient_dot_product_attention( + q, + k, + v, + query_chunk_size=q_chunk_size, + kv_chunk_size=kv_chunk_size, + kv_chunk_size_min = kv_chunk_size_min, + use_checkpoint=use_checkpoint, + ) + + +def get_xformers_flash_attention_op(q, k, v): + if not shared.cmd_opts.xformers_flash_attention: + return None + + try: + flash_attention_op = xformers.ops.MemoryEfficientAttentionFlashAttentionOp + fw, bw = flash_attention_op + if fw.supports(xformers.ops.fmha.Inputs(query=q, key=k, value=v, attn_bias=None)): + return flash_attention_op + except Exception as e: + errors.display_once(e, "enabling flash attention") + + return None + + +def xformers_attention_forward(self, x, context=None, mask=None, **kwargs): + h = self.heads + q_in = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) + k_in = self.to_k(context_k) + v_in = self.to_v(context_v) + + q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in)) + del q_in, k_in, v_in + + dtype = q.dtype + if shared.opts.upcast_attn: + q, k, v = q.float(), k.float(), v.float() + + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v)) + + out = out.to(dtype) + + out = rearrange(out, 'b n h d -> b n (h d)', h=h) + return self.to_out(out) + + +# Based on Diffusers usage of scaled dot product attention from https://github.com/huggingface/diffusers/blob/c7da8fd23359a22d0df2741688b5b4f33c26df21/src/diffusers/models/cross_attention.py +# The scaled_dot_product_attention_forward function contains parts of code under Apache-2.0 license listed under Scaled Dot Product Attention in the Licenses section of the web UI interface +def scaled_dot_product_attention_forward(self, x, context=None, mask=None, **kwargs): + batch_size, sequence_length, inner_dim = x.shape + if mask is not None: + mask = self.prepare_attention_mask(mask, sequence_length, batch_size) + mask = mask.view(batch_size, self.heads, -1, mask.shape[-1]) + + h = self.heads + q_in = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) + k_in = self.to_k(context_k) + v_in = self.to_v(context_v) + + head_dim = inner_dim // h + q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) + + del q_in, k_in, v_in + + dtype = q.dtype + if shared.opts.upcast_attn: + q, k, v = q.float(), k.float(), v.float() + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + hidden_states = torch.nn.functional.scaled_dot_product_attention( + q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, h * head_dim) + hidden_states = hidden_states.to(dtype) + + # linear proj + hidden_states = self.to_out[0](hidden_states) + # dropout + hidden_states = self.to_out[1](hidden_states) + return hidden_states + + +def scaled_dot_product_no_mem_attention_forward(self, x, context=None, mask=None, **kwargs): + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False): + return scaled_dot_product_attention_forward(self, x, context, mask) + + +def cross_attention_attnblock_forward(self, x): + h_ = x + h_ = self.norm(h_) + q1 = self.q(h_) + k1 = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q1.shape + + q2 = q1.reshape(b, c, h*w) + del q1 + + q = q2.permute(0, 2, 1) # b,hw,c + del q2 + + k = k1.reshape(b, c, h*w) # b,c,hw + del k1 + + h_ = torch.zeros_like(k, device=q.device) + + mem_free_total = get_available_vram() + + tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size() + mem_required = tensor_size * 2.5 + steps = 1 + + if mem_required > mem_free_total: + steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + + w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w2 = w1 * (int(c)**(-0.5)) + del w1 + w3 = torch.nn.functional.softmax(w2, dim=2, dtype=q.dtype) + del w2 + + # attend to values + v1 = v.reshape(b, c, h*w) + w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) + del w3 + + h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + del v1, w4 + + h2 = h_.reshape(b, c, h, w) + del h_ + + h3 = self.proj_out(h2) + del h2 + + h3 += x + + return h3 + + +def xformers_attnblock_forward(self, x): + try: + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + b, c, h, w = q.shape + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) + dtype = q.dtype + if shared.opts.upcast_attn: + q, k = q.float(), k.float() + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + out = xformers.ops.memory_efficient_attention(q, k, v, op=get_xformers_flash_attention_op(q, k, v)) + out = out.to(dtype) + out = rearrange(out, 'b (h w) c -> b c h w', h=h) + out = self.proj_out(out) + return x + out + except NotImplementedError: + return cross_attention_attnblock_forward(self, x) + + +def sdp_attnblock_forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + b, c, h, w = q.shape + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) + dtype = q.dtype + if shared.opts.upcast_attn: + q, k, v = q.float(), k.float(), v.float() + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=0.0, is_causal=False) + out = out.to(dtype) + out = rearrange(out, 'b (h w) c -> b c h w', h=h) + out = self.proj_out(out) + return x + out + + +def sdp_no_mem_attnblock_forward(self, x): + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False): + return sdp_attnblock_forward(self, x) + + +def sub_quad_attnblock_forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + b, c, h, w = q.shape + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) + out = rearrange(out, 'b (h w) c -> b c h w', h=h) + out = self.proj_out(out) + return x + out diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_unet.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_unet.py new file mode 100644 index 0000000000000000000000000000000000000000..1028d7f7a1bcff1e58a8ee81d51ec16c2bbe1ae2 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_unet.py @@ -0,0 +1,134 @@ +import torch +from packaging import version + +from . import devices +from .sd_hijack_utils import CondFunc +from torch.nn.functional import silu +import comfy +from comfy import ldm +import contextlib + +class TorchHijackForUnet: + """ + This is torch, but with cat that resizes tensors to appropriate dimensions if they do not match; + this makes it possible to create pictures with dimensions that are multiples of 8 rather than 64 + """ + + def __getattr__(self, item): + if item == 'cat': + return self.cat + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def cat(self, tensors, *args, **kwargs): + if len(tensors) == 2: + a, b = tensors + if a.shape[-2:] != b.shape[-2:]: + a = torch.nn.functional.interpolate(a, b.shape[-2:], mode="nearest") + + tensors = (a, b) + + return torch.cat(tensors, *args, **kwargs) + +th = TorchHijackForUnet() + +from . import sd_hijack_optimizations +from comfy.model_base import BaseModel +from functools import wraps +sdp_no_mem = sd_hijack_optimizations.SdOptimizationSdpNoMem() +BaseModel.apply_model_orig = BaseModel.apply_model + +# @contextmanager +class ApplyOptimizationsContext: + def __init__(self): + self.nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity + self.th = ldm.modules.diffusionmodules.openaimodel.th + ldm.modules.diffusionmodules.model.nonlinearity = silu + ldm.modules.diffusionmodules.openaimodel.th = th + sdp_no_mem.apply() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + ldm.modules.diffusionmodules.model.nonlinearity = self.nonlinearity + ldm.modules.diffusionmodules.openaimodel.th = self.th + sd_hijack_optimizations.undo() + + + +def ApplyOptimizationsContext3(func): + @wraps(func) + def wrapper(*args, **kwargs): + with ApplyOptimizationsContext(): + return func(*args, **kwargs) + return wrapper + +precision_scope_null = lambda a, dtype=None: contextlib.nullcontext(a) + +# def apply_model(orig_func, self, x_noisy, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}, *args, **kwargs): +def apply_model(orig_func, self, *args, **kwargs): + transformer_options = kwargs['transformer_options'] if 'transformer_options' in kwargs else {} + c_crossattn = kwargs['c_crossattn'] if 'c_crossattn' in kwargs else args[3] + x_noisy = kwargs['x_noisy'] if 'x_noisy' in kwargs else args[0] + if not transformer_options.get('from_smZ', False): + return self.apply_model_orig(*args, **kwargs) + + cond=c_crossattn + if isinstance(cond, dict): + for y in cond.keys(): + if isinstance(cond[y], list): + cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]] + else: + cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y] + + if x_noisy.dtype != torch.float32: + precision_scope = torch.autocast + else: + precision_scope = precision_scope_null + + with precision_scope(comfy.model_management.get_autocast_device(x_noisy.device), dtype=x_noisy.dtype): # , torch.float32): + # with devices.autocast(): + out = orig_func(self, *args, **kwargs).float() + return out + +class GELUHijack(torch.nn.GELU, torch.nn.Module): + def __init__(self, *args, **kwargs): + torch.nn.GELU.__init__(self, *args, **kwargs) + def forward(self, x): + if devices.unet_needs_upcast: + return torch.nn.GELU.forward(self.float(), x.float()).to(devices.dtype_unet) + else: + return torch.nn.GELU.forward(self, x) + +ddpm_edit_hijack = None +def hijack_ddpm_edit(): + global ddpm_edit_hijack + if not ddpm_edit_hijack: + CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond) + CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) + ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) + + +unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast +# CondFunc('comfy.model_base.BaseModel.apply_model', apply_model, unet_needs_upcast) +# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) +# CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) +# if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available(): +# CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast) +# CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast) +# try: +# CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU) +# except: +# CondFunc('comfy.t2i_adapter.adapter.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU) +first_stage_cond = lambda _, self, *args, **kwargs: devices.unet_needs_upcast and self.model.diffusion_model.dtype == torch.float16 +first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devices.dtype_vae), **kwargs) +# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond) +# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) +# CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond) + +# CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast) +# CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_utils.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..179ebc78e6a3d16e7a4318b8644fee690b447d12 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_hijack_utils.py @@ -0,0 +1,28 @@ +import importlib + +class CondFunc: + def __new__(cls, orig_func, sub_func, cond_func): + self = super(CondFunc, cls).__new__(cls) + if isinstance(orig_func, str): + func_path = orig_func.split('.') + for i in range(len(func_path)-1, -1, -1): + try: + resolved_obj = importlib.import_module('.'.join(func_path[:i])) + break + except ImportError: + pass + for attr_name in func_path[i:-1]: + resolved_obj = getattr(resolved_obj, attr_name) + orig_func = getattr(resolved_obj, func_path[-1]) + setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) + self.__init__(orig_func, sub_func, cond_func) + return lambda *args, **kwargs: self(*args, **kwargs) + def __init__(self, orig_func, sub_func, cond_func): + self.__orig_func = orig_func + self.__sub_func = sub_func + self.__cond_func = cond_func + def __call__(self, *args, **kwargs): + if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): + return self.__sub_func(self.__orig_func, *args, **kwargs) + else: + return self.__orig_func(*args, **kwargs) diff --git a/custom_nodes/ComfyUI_smZNodes/modules/sd_samplers_cfg_denoiser.py b/custom_nodes/ComfyUI_smZNodes/modules/sd_samplers_cfg_denoiser.py new file mode 100644 index 0000000000000000000000000000000000000000..76e592c5d56daa1d4731d9197d174bf0e48a22b0 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/sd_samplers_cfg_denoiser.py @@ -0,0 +1,260 @@ +import torch +from . import devices +from . import prompt_parser +from . import shared +from comfy import model_management +def catenate_conds(conds): + if not isinstance(conds[0], dict): + return torch.cat(conds) + + return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()} + + +def subscript_cond(cond, a, b): + if not isinstance(cond, dict): + return cond[a:b] + + return {key: vec[a:b] for key, vec in cond.items()} + + +def pad_cond(tensor, repeats, empty): + if not isinstance(tensor, dict): + return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1)).to(device=tensor.device)], axis=1) + + tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty) + return tensor + + +class CFGDenoiser(torch.nn.Module): + """ + Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) + that can take a noisy picture and produce a noise-free picture using two guidances (prompts) + instead of one. Originally, the second prompt is just an empty string, but we use non-empty + negative prompt. + """ + + def __init__(self, model): + super().__init__() + self.inner_model = model + self.model_wrap = None + self.mask = None + self.nmask = None + self.init_latent = None + self.steps = None + """number of steps as specified by user in UI""" + + self.total_steps = None + """expected number of calls to denoiser calculated from self.steps and specifics of the selected sampler""" + + self.step = 0 + self.image_cfg_scale = None + self.padded_cond_uncond = False + self.sampler = None + self.model_wrap = None + self.p = None + self.mask_before_denoising = False + import comfy + import inspect + apply_model_src = inspect.getsource(comfy.model_base.BaseModel.apply_model_orig) + self.c_crossattn_as_list = 'torch.cat(c_crossattn, 1)' in apply_model_src + + # @property + # def inner_model(self): + # raise NotImplementedError() + + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + + def combine_denoised_for_edit_model(self, x_out, cond_scale): + out_cond, out_img_cond, out_uncond = x_out.chunk(3) + denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) + + return denoised + + def get_pred_x0(self, x_in, x_out, sigma): + return x_out + + def update_inner_model(self): + self.model_wrap = None + + c, uc = self.p.get_conds() + self.sampler.sampler_extra_args['cond'] = c + self.sampler.sampler_extra_args['uncond'] = uc + + def make_condition_dict(self, x, d): + if x.c_adm is not None: + k = x.c_adm['key'] + d[k] = x.c_adm[k] + d['c_crossattn'] = d['c_crossattn'].to(device=x.device) + return d + + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): + model_management.throw_exception_if_processing_interrupted() + # if state.interrupted or state.skipped: + # raise sd_samplers_common.InterruptedException + + # if sd_samplers_common.apply_refiner(self): + # cond = self.sampler.sampler_extra_args['cond'] + # uncond = self.sampler.sampler_extra_args['uncond'] + + # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, + # so is_edit_model is set to False to support AND composition. + # is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 + is_edit_model = False + + conds_list, tensor = cond + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + + if self.mask_before_denoising and self.mask is not None: + x = self.init_latent * self.mask + self.nmask * x + + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + if not hasattr(x, 'c_adm'): + x.c_adm = None + + # if shared.sd_model.model.conditioning_key == "crossattn-adm": + # image_uncond = torch.zeros_like(image_cond) + # make_condition_dict = lambda c_crossattn: {"c_crossattn": c_crossattn} # pylint: disable=C3001 + # else: + # image_uncond = image_cond + # if isinstance(uncond, dict): + # make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]} + # else: + # make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]} + + # unclip + # if shared.sd_model.model.conditioning_key == "crossattn-adm": + if False: + image_uncond = torch.zeros_like(image_cond) + if self.c_crossattn_as_list: + make_condition_dict = lambda c_crossattn: {"c_crossattn": [ctn.to(device=self.device) for ctn in c_crossattn] if type(c_crossattn) is list else [c_crossattn.to(device=self.device)], 'transformer_options': {'from_smZ': True}} # pylint: disable=C3001 + else: + make_condition_dict = lambda c_crossattn: {"c_crossattn": c_crossattn, 'transformer_options': {'from_smZ': True}} # pylint: disable=C3001 + else: + image_uncond = image_cond + if isinstance(uncond, dict): + make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": None, 'transformer_options': {'from_smZ': True}} + else: + if self.c_crossattn_as_list: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn if type(c_crossattn) is list else [c_crossattn], "c_concat": None, 'transformer_options': {'from_smZ': True}} + else: + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": None, 'transformer_options': {'from_smZ': True}} + + _make_condition_dict = make_condition_dict + make_condition_dict = lambda *a, **kwa: self.make_condition_dict(x, _make_condition_dict(*a, **kwa)) + + if not is_edit_model: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond]) + else: + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) + image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)]) + + # denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond) + # cfg_denoiser_callback(denoiser_params) + # x_in = denoiser_params.x + # image_cond_in = denoiser_params.image_cond + # sigma_in = denoiser_params.sigma + # tensor = denoiser_params.text_cond + # uncond = denoiser_params.text_uncond + skip_uncond = False + + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + + self.padded_cond_uncond = False + if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + tensor = pad_cond(tensor, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: + cond_in = catenate_conds([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = catenate_conds([tensor, uncond]) + + if shared.opts.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, **make_condition_dict(cond_in, image_cond_in)) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], **make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b])) + else: + x_out = torch.zeros_like(x_in) + batch_size = batch_size*2 if shared.opts.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): + a = batch_offset + b = min(a + batch_size, tensor.shape[0]) + + if not is_edit_model: + c_crossattn = subscript_cond(tensor, a, b) + else: + c_crossattn = torch.cat([tensor[a:b]], uncond) + + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], **make_condition_dict(c_crossattn, image_cond_in[a:b])) + + if not skip_uncond: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], **make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:])) + + denoised_image_indexes = [x[0][0] for x in conds_list] + if skip_uncond: + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + + # denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) + # cfg_denoised_callback(denoised_params) + + devices.test_for_nans(x_out, "unet") + + if is_edit_model: + denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + + if not self.mask_before_denoising and self.mask is not None: + denoised = self.init_latent * self.mask + self.nmask * denoised + + # self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma) + + # if opts.live_preview_content == "Prompt": + # preview = self.sampler.last_latent + # elif opts.live_preview_content == "Negative prompt": + # preview = self.get_pred_x0(x_in[-uncond.shape[0]:], x_out[-uncond.shape[0]:], sigma) + # else: + # preview = self.get_pred_x0(torch.cat([x_in[i:i+1] for i in denoised_image_indexes]), torch.cat([denoised[i:i+1] for i in denoised_image_indexes]), sigma) + + # sd_samplers_common.store_latent(preview) + + # after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + # cfg_after_cfg_callback(after_cfg_callback_params) + # denoised = after_cfg_callback_params.x + + self.step += 1 + del x_out + return denoised diff --git a/custom_nodes/ComfyUI_smZNodes/modules/shared.py b/custom_nodes/ComfyUI_smZNodes/modules/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..dbc066af3551838dffef33949534cd25da8e81d5 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/shared.py @@ -0,0 +1,101 @@ +from comfy.model_management import vram_state, VRAMState +import logging +import sys +from comfy.cli_args import args +from comfy import model_management +from . import devices + +log = logging.getLogger("sd") +options_templates = {} +loaded_hypernetworks = [] +xformers_available = model_management.XFORMERS_IS_AVAILABLE +device = devices.device + +class Options: + data = None + data_labels = options_templates + typemap = {int: float} + + def __init__(self): + self.data = {k: v.default for k, v in self.data_labels.items()} + + def __setattr__(self, key, value): # pylint: disable=inconsistent-return-statements + if self.data is not None: + if key in self.data or key in self.data_labels: + # if cmd_opts.freeze: + # log.warning(f'Settings are frozen: {key}') + # return + # if cmd_opts.hide_ui_dir_config and key in restricted_opts: + # log.warning(f'Settings key is restricted: {key}') + # return + self.data[key] = value + return + return super(Options, self).__setattr__(key, value) # pylint: disable=super-with-arguments + + def __getattr__(self, item): + if self.data is not None: + if item in self.data: + return self.data[item] + if item in self.data_labels: + return self.data_labels[item].default + return super(Options, self).__getattribute__(item) # pylint: disable=super-with-arguments + + +opts = Options() +opts.prompt_attention = 'A1111 parser' +opts.prompt_mean_norm = True +opts.comma_padding_backtrack = 20 +opts.CLIP_stop_at_last_layers = 1 +opts.enable_emphasis = True +opts.use_old_emphasis_implementation = False +opts.disable_nan_check = True +opts.pad_cond_uncond = False +opts.s_min_uncond = 0.0 +opts.upcast_sampling = True +opts.upcast_attn = not args.dont_upcast_attention +opts.textual_inversion_add_hashes_to_infotext = False +opts.encode_count = 0 +opts.max_chunk_count = 0 +opts.return_batch_chunks = False +opts.noise = None +opts.pad_with_repeats = True +opts.randn_source = "cpu" +opts.lora_functional = False +opts.use_old_scheduling = False +opts.eta_noise_seed_delta = 0 + +opts.use_CFGDenoiser = False +opts.sgm_noise_multiplier = True +opts.debug= False + + +opts.sdxl_crop_top = 0 +opts.sdxl_crop_left = 0 +opts.sdxl_refiner_low_aesthetic_score = 2.5 +opts.sdxl_refiner_high_aesthetic_score = 6.0 + +sd_model = Options() +sd_model.cond_stage_model = Options() + +cmd_opts = Options() + +opts.batch_cond_uncond = False +cmd_opts.lowvram = vram_state == VRAMState.LOW_VRAM +cmd_opts.medvram = vram_state == VRAMState.NORMAL_VRAM +should_batch_cond_uncond = lambda: opts.batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) +opts.batch_cond_uncond = should_batch_cond_uncond() + +cmd_opts.xformers = xformers_available +cmd_opts.force_enable_xformers = xformers_available + +opts.cross_attention_optimization = "None" +# opts.cross_attention_optimization = "opt_sdp_no_mem_attention" +# opts.cross_attention_optimization = "opt_sub_quad_attention" +cmd_opts.sub_quad_q_chunk_size = 512 +cmd_opts.sub_quad_kv_chunk_size = 512 +cmd_opts.sub_quad_chunk_threshold = 80 +cmd_opts.token_merging_ratio = 0.0 +cmd_opts.token_merging_ratio_img2img = 0.0 +cmd_opts.token_merging_ratio_hr = 0.0 +cmd_opts.sd_vae_sliced_encode = False +cmd_opts.disable_opt_split_attention = False \ No newline at end of file diff --git a/custom_nodes/ComfyUI_smZNodes/modules/textual_inversion/__pycache__/textual_inversion.cpython-311.pyc b/custom_nodes/ComfyUI_smZNodes/modules/textual_inversion/__pycache__/textual_inversion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c45509b23d775f44d97c7861a8c9ee49f542e7f0 Binary files /dev/null and b/custom_nodes/ComfyUI_smZNodes/modules/textual_inversion/__pycache__/textual_inversion.cpython-311.pyc differ diff --git a/custom_nodes/ComfyUI_smZNodes/modules/textual_inversion/textual_inversion.py b/custom_nodes/ComfyUI_smZNodes/modules/textual_inversion/textual_inversion.py new file mode 100644 index 0000000000000000000000000000000000000000..0e24afefa3b47f685bc780497a5136a3ee16730d --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/modules/textual_inversion/textual_inversion.py @@ -0,0 +1,85 @@ +import torch +from .. import shared + +class Embedding: + def __init__(self, vec, name, step=None): + self.vec = vec + self.name = name + self.step = step + self.shape = None + self.vectors = 0 + self.cached_checksum = None + self.sd_checkpoint = None + self.sd_checkpoint_name = None + self.optimizer_state_dict = None + self.filename = None + + self.shape = vec.shape[-1] + self.vectors = vec.shape[0] + + def save(self, filename): + embedding_data = { + "string_to_token": {"*": 265}, + "string_to_param": {"*": self.vec}, + "name": self.name, + "step": self.step, + "sd_checkpoint": self.sd_checkpoint, + "sd_checkpoint_name": self.sd_checkpoint_name, + } + + torch.save(embedding_data, filename) + + if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None: + optimizer_saved_dict = { + 'hash': self.checksum(), + 'optimizer_state_dict': self.optimizer_state_dict, + } + torch.save(optimizer_saved_dict, f"{filename}.optim") + + def checksum(self): + if self.cached_checksum is not None: + return self.cached_checksum + + def const_hash(a): + r = 0 + for v in a: + r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF + return r + + self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}' + return self.cached_checksum + +class EmbeddingDatabase: + def __init__(self): + self.ids_lookup = {} + self.word_embeddings = {} + self.skipped_embeddings = {} + self.expected_shape = -1 + self.embedding_dirs = {} + self.previously_displayed_embeddings = () + + def register_embedding(self, embedding, model): + self.word_embeddings[embedding.name] = embedding + + ids = model.tokenize([embedding.name])[0] + + first_id = ids[0] + if first_id not in self.ids_lookup: + self.ids_lookup[first_id] = [] + + self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True) + + return embedding + + def find_embedding_at_position(self, tokens, offset): + token = tokens[offset] + possible_matches = self.ids_lookup.get(token, None) + + if possible_matches is None: + return None, None + + for ids, embedding in possible_matches: + if tokens[offset:offset + len(ids)] == ids: + return embedding, len(ids) + + return None, None \ No newline at end of file diff --git a/custom_nodes/ComfyUI_smZNodes/nodes.py b/custom_nodes/ComfyUI_smZNodes/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..9c488ccecd92f3b8d6d3943766867bde8b23c7c4 --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/nodes.py @@ -0,0 +1,196 @@ +import torch +import inspect +from pathlib import Path +from functools import partial +import os +from .modules import prompt_parser, devices, shared +from .modules.sd_hijack import model_hijack +from .smZNodes import run, prepare_noise +from nodes import MAX_RESOLUTION +import comfy.sd +import comfy.model_management +import comfy.samplers +import comfy.sample + +BOOLEAN = [False, True] +try: + cwd_path = Path(__file__).parent + comfy_path = cwd_path.parent.parent + widgets_path = os.path.join(comfy_path, "web", "scripts", "widgets.js") + with open(widgets_path, encoding='utf8') as f: + widgets_js = f.read() + if 'BOOLEAN(' in widgets_js: + BOOLEAN = "BOOLEAN" + del widgets_js +except Exception as err: + print("[smZNodes]:", err) + +class smZ_CLIPTextEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "text": ("STRING", {"multiline": True}), + "clip": ("CLIP", ), + "parser": (["comfy", "comfy++", "A1111", "full", "compel", "fixed attention"],{"default": "comfy"}), + # whether weights are normalized by taking the mean + "mean_normalization": (BOOLEAN, {"default": True}), + "multi_conditioning": (BOOLEAN, {"default": True}), + "use_old_emphasis_implementation": (BOOLEAN, {"default": False}), + "with_SDXL": (BOOLEAN, {"default": False}), + "ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}), + "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), + "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}), + "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "text_g": ("STRING", {"multiline": True, "placeholder": "CLIP_G"}), + "text_l": ("STRING", {"multiline": True, "placeholder": "CLIP_L"}), + }, + "optional": { + "smZ_steps": ("INT", {"default": 1, "min": 1, "max": 0xffffffffffffffff}), + }, + } + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "encode" + CATEGORY = "conditioning" + + def encode(self, clip: comfy.sd.CLIP, text, parser, mean_normalization, + multi_conditioning, use_old_emphasis_implementation, + with_SDXL, ascore, width, height, crop_w, + crop_h, target_width, target_height, text_g, text_l, smZ_steps=1): + params = locals() + params['steps'] = params.pop('smZ_steps', smZ_steps) + from .modules.shared import opts + is_sdxl = "SDXL" in type(clip.cond_stage_model).__name__ + + should_use_fp16_signature = inspect.signature(comfy.model_management.should_use_fp16) + _p = should_use_fp16_signature.parameters + devices.device = shared.device = clip.patcher.load_device if hasattr(clip.patcher, 'load_device') else clip.device + if 'device' in _p and 'prioritize_performance' in _p: + should_use_fp16 = partial(comfy.model_management.should_use_fp16, device=devices.device, prioritize_performance=False) + elif 'device' in should_use_fp16_signature.parameters: + should_use_fp16 = partial(comfy.model_management.should_use_fp16, device=devices.device) + else: + should_use_fp16 = comfy.model_management.should_use_fp16 + dtype = torch.float16 if should_use_fp16() else torch.float32 + dtype_unet= dtype + devices.dtype = dtype + # devices.dtype_unet was hijacked so it will be the correct dtype by default + if devices.dtype_unet == torch.float16: + devices.dtype_unet = dtype_unet + devices.unet_needs_upcast = opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16 + devices.dtype_vae = comfy.model_management.vae_dtype() if hasattr(comfy.model_management, 'vae_dtype') else torch.float32 + + params.pop('self', None) + result = run(**params) + result[0][0][1]['params'] = {} + result[0][0][1]['params'].update(params) + if opts.pad_cond_uncond: + text=params['text'] + with_SDXL=params['with_SDXL'] + params['text'] = '' + params['with_SDXL'] = False + empty = run(**params)[0] + params['text'] = text + params['with_SDXL'] = with_SDXL + shared.sd_model.cond_stage_model_empty_prompt = empty[0][0] + return result + +# Hack: string type that is always equal in not equal comparisons +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + +# Our any instance wants to be a wildcard string +anytype = AnyType("*") + +class smZ_Settings: + @classmethod + def INPUT_TYPES(s): + from .modules.shared import opts + return {"required": { + "clip": ("CLIP", ), + }, + "optional": { + "extra": ("STRING", {"multiline": True, "default": '{"show":true}'}), + + "ㅤ"*1: ( "STRING", {"multiline": False, "default": "Stable Diffusion"}), + "info_comma_padding_backtrack": ("STRING", {"multiline": True, "default": "Prompt word wrap length limit\nin tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"}), + "Prompt word wrap length limit": ("INT", {"default": opts.comma_padding_backtrack, "min": 0, "max": 74, "step": 1}), + # "enable_emphasis": (BOOLEAN, {"default": opts.enable_emphasis}), + "info_RNG": ("STRING", {"multiline": True, "default": "Random number generator source.\nchanges seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"}), + "RNG": (["cpu", "gpu", "nv"],{"default": opts.randn_source}), + + "ㅤ"*2: ("STRING", {"multiline": False, "default": "Compute Settings"}), + "info_disable_nan_check": ("STRING", {"multiline": True, "default": "Disable NaN check in produced images/latent spaces. Only for CFGDenoiser."}), + "disable_nan_check": (BOOLEAN, {"default": opts.disable_nan_check}), + + "ㅤ"*3: ("STRING", {"multiline": False, "default": "Sampler parameters"}), + "info_eta_noise_seed_delta": ("STRING", {"multiline": True, "default": "Eta noise seed delta\ndoes not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"}), + "ENSD": ("INT", {"default": opts.eta_noise_seed_delta, "min": 0, "max": 0xffffffffffffffff, "step": 1}), + "info_sgm_noise_multiplier": ("STRING", {"multiline": True, "default": "SGM noise multiplier\nmatch initial noise to official SDXL implementation - only useful for reproducing images\nsee https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818"}), + "sgm_noise_multiplier": (BOOLEAN, {"default": opts.sgm_noise_multiplier}), + "info_upcast_sampling": ("STRING", {"multiline": True, "default": "upcast sampling.\nNo effect with --force-fp32. Usually produces similar results to --force-fp32 with better performance while using less memory."}), + "upcast_sampling": (BOOLEAN, {"default": opts.upcast_sampling}), + + "ㅤ"*4: ("STRING", {"multiline": False, "default": "Optimizations"}), + "info_NGMS": ("STRING", {"multiline": True, "default": "Negative Guidance minimum sigma\nskip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster. Only for CFGDenoiser.\nsee https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177"}), + "NGMS": ("FLOAT", {"default": opts.s_min_uncond, "min": 0.0, "max": 4.0, "step": 0.01}), + "info_pad_cond_uncond": ("STRING", {"multiline": True, "default": "Pad prompt/negative prompt to be same length\nimproves performance when prompt and negative prompt have different lengths; changes seeds. Only for CFGDenoiser."}), + "pad_cond_uncond": (BOOLEAN, {"default": opts.pad_cond_uncond}), + "info_batch_cond_uncond": ("STRING", {"multiline": True, "default": "Batch cond/uncond\ndo both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed – enabled on SDXL models. Only for CFGDenoiser."}), + "batch_cond_uncond": (BOOLEAN, {"default": opts.batch_cond_uncond}), + + "ㅤ"*5: ("STRING", {"multiline": False, "default": "Compatibility"}), + "info_use_prev_scheduling": ("STRING", {"multiline": True, "default": "Previous prompt editing timelines\nFor [red:green:N]; previous: If N < 1, it's a fraction of steps (and hires fix uses range from 0 to 1), if N >= 1, it's an absolute number of steps; new: If N has a decimal point in it, it's a fraction of steps (and hires fix uses range from 1 to 2), othewrwise it's an absolute number of steps"}), + "Use previous prompt editing timelines": (BOOLEAN, {"default": opts.use_old_scheduling}), + + "ㅤ"*6: ("STRING", {"multiline": False, "default": "Experimental"}), + "info_use_CFGDenoiser": ("STRING", {"multiline": True, "default": "CFGDenoiser\nAn experimental option to use stable-diffusion-webui's denoiser. It may not work as expected with inpainting/UnCLIP models or ComfyUI's Conditioning nodes, but it allows you to get identical images regardless of the prompt."}), + "Use CFGDenoiser": (BOOLEAN, {"default": opts.use_CFGDenoiser}), + "info_debug": ("STRING", {"multiline": True, "default": "Debugging messages in the console."}), + "Debug": (BOOLEAN, {"default": opts.debug, "label_on": "on", "label_off": "off"}), + }} + RETURN_TYPES = ("CLIP",) + OUTPUT_NODE = False + FUNCTION = "run" + CATEGORY = "advanced" + + def run(self, *args, **kwargs): + from .modules.shared import opts + device = comfy.model_management.get_torch_device() + + clip = kwargs.pop('clip', None) if 'clip' in kwargs else args[0] + kwargs['s_min_uncond'] = max(min(kwargs.pop('NGMS'), 4.0), 0) + kwargs['comma_padding_backtrack'] = kwargs.pop('Prompt word wrap length limit') + kwargs['comma_padding_backtrack'] = max(min(kwargs['comma_padding_backtrack'], 74), 0) + kwargs['use_old_scheduling']=kwargs.pop("Use previous prompt editing timelines") + kwargs['use_CFGDenoiser'] = kwargs.pop("Use CFGDenoiser") + kwargs['debug'] = kwargs.pop('Debug') + kwargs['randn_source'] = kwargs.pop('RNG') + kwargs['eta_noise_seed_delta'] = kwargs.pop('ENSD') + + [kwargs.pop(k, None) for k in [k for k in kwargs.keys() if 'info' in k or 'heading' in k or 'ㅤ' in k]] + for k,v in kwargs.items(): + setattr(opts, k, v) + + if not hasattr(comfy.sample, 'prepare_noise_orig'): + comfy.sample.prepare_noise_orig = comfy.sample.prepare_noise + if opts.randn_source == 'cpu': + device = torch.device("cpu") + _prepare_noise = partial(prepare_noise, device=device.type) + comfy.sample.prepare_noise = _prepare_noise + return (clip,) + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "smZ CLIPTextEncode": smZ_CLIPTextEncode, + "smZ Settings": smZ_Settings, +} +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "smZ CLIPTextEncode" : "CLIP Text Encode++", + "smZ Settings" : "Settings (smZ)", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI_smZNodes/smZNodes.py b/custom_nodes/ComfyUI_smZNodes/smZNodes.py new file mode 100644 index 0000000000000000000000000000000000000000..74263c1b15f382d9c1e6f36a76d01349c627792b --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/smZNodes.py @@ -0,0 +1,1058 @@ +from __future__ import annotations +import comfy +import torch +from typing import List, Tuple +from functools import partial +from .modules import prompt_parser, shared, devices +from .modules.shared import opts +from .modules.sd_samplers_cfg_denoiser import CFGDenoiser +from .modules.sd_hijack_clip import FrozenCLIPEmbedderForSDXLWithCustomWords +from .modules.sd_hijack_open_clip import FrozenOpenCLIPEmbedder2WithCustomWords +from .modules.textual_inversion.textual_inversion import Embedding +import comfy.sdxl_clip +import comfy.sd1_clip +import comfy.sample +from comfy.sd1_clip import SD1Tokenizer, unescape_important, escape_important, token_weights, expand_directory_list +from nodes import CLIPTextEncode +from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution +from comfy import model_management +import inspect +from textwrap import dedent, indent +import functools +import tempfile +import importlib +import sys +import os +import re +import contextlib +import itertools +import binascii + +try: + from comfy_extras.nodes_clip_sdxl import CLIPTextEncodeSDXL, CLIPTextEncodeSDXLRefiner +except Exception as err: + print(f"[smZNodes]: Your ComfyUI version is outdated. Please update to the latest version. ({err})") + class CLIPTextEncodeSDXL(CLIPTextEncode): ... + class CLIPTextEncodeSDXLRefiner(CLIPTextEncode): ... + +def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + +class PopulateVars: + def populate_self_variables(self, from_): + super_attrs = vars(from_) + self_attrs = vars(self) + self_attrs.update(super_attrs) + +should_use_fp16_signature = inspect.signature(comfy.model_management.should_use_fp16) +class ClipTextEncoderCustom: + + def _forward(self: comfy.sd1_clip.SD1ClipModel, tokens): + def set_dtype_compat(dtype, newv = False): + dtype_num = lambda d : int(re.sub(r'.*?(\d+)', r'\1', repr(d))) + _p = should_use_fp16_signature.parameters + # newer versions of ComfyUI upcasts the transformer embeddings, which is technically correct + # when it's a newer version, we want to downcast it to torch.float16, so set newv=True + # newv = 'device' in _p and 'prioritize_performance' in _p # comment this to have default comfy behaviour + if dtype_num(dtype) >= 32: + newv = False + if not newv: return + dtype = devices.dtype if dtype != devices.dtype else dtype + # self.transformer.text_model.embeddings.position_embedding.to(dtype) + # self.transformer.text_model.embeddings.token_embedding.to(dtype) + inner_model = getattr(self.transformer, self.inner_name, None) + if inner_model is not None and hasattr(inner_model, "embeddings"): + inner_model.embeddings.to(dtype) + else: + self.transformer.set_input_embeddings(self.transformer.get_input_embeddings().to(dtype)) + def reset_dtype_compat(): + # token_embedding_dtype = position_embedding_dtype = torch.float32 + # self.transformer.text_model.embeddings.token_embedding.to(token_embedding_dtype) + # self.transformer.text_model.embeddings.position_embedding.to(position_embedding_dtype) + inner_model = getattr(self.transformer, self.inner_name, None) + if inner_model is not None and hasattr(inner_model, "embeddings"): + inner_model.embeddings.to(torch.float32) + else: + self.transformer.set_input_embeddings(self.transformer.get_input_embeddings().to(torch.float32)) + enable_compat = False + if enable_compat: set_dtype_compat(torch.float16, enable_compat) + + backup_embeds = self.transformer.get_input_embeddings() + device = backup_embeds.weight.device + tokens = self.set_up_textual_embeddings(tokens, backup_embeds) + tokens = torch.LongTensor(tokens).to(device) + + # dtype=backup_embeds.weight.dtype + if hasattr(self.transformer, 'dtype'): + dtype = self.transformer.dtype + else: + dtype = getattr(self.transformer, self.inner_name, self.transformer.text_model).final_layer_norm.weight.dtype + + if dtype != torch.float32: + precision_scope = torch.autocast + else: + precision_scope = lambda a, dtype=None: contextlib.nullcontext(a) + + with precision_scope(model_management.get_autocast_device(device), dtype=dtype if enable_compat else torch.float32): + attention_mask = None + if self.enable_attention_masks: + attention_mask = torch.zeros_like(tokens) + max_token = self.transformer.get_input_embeddings().weight.shape[0] - 1 + for x in range(attention_mask.shape[0]): + for y in range(attention_mask.shape[1]): + attention_mask[x, y] = 1 + if tokens[x, y] == max_token: + break + + outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) + self.transformer.set_input_embeddings(backup_embeds) + + if self.layer == "last": + z = outputs[0] + else: + z = outputs[1] + + if outputs[2] is not None: + pooled_output = outputs[2].float() + else: + pooled_output = None + + if enable_compat: reset_dtype_compat() + + if self.text_projection is not None and pooled_output is not None: + pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float() + return z.float(), pooled_output + + def encode_with_transformers_comfy_(self, tokens: List[List[int]], return_pooled=False): + tokens_orig = tokens + try: + if isinstance(tokens, torch.Tensor): + tokens = tokens.tolist() + z, pooled = ClipTextEncoderCustom._forward(self.wrapped, tokens) # self.wrapped.encode(tokens) + except Exception as e: + z, pooled = ClipTextEncoderCustom._forward(self.wrapped, tokens_orig) + + # z = self.encode_with_transformers__(tokens_bak) + if z.device != devices.device: + z = z.to(device=devices.device) + # if z.dtype != devices.dtype: + # z = z.to(dtype=devices.dtype) + # if pooled.dtype != devices.dtype: + # pooled = pooled.to(dtype=devices.dtype) + z.pooled = pooled + return (z, pooled) if return_pooled else z + + def encode_with_transformers_comfy(self, tokens: List[List[int]], return_pooled=False) -> Tuple[torch.Tensor, torch.Tensor]: + ''' + This function is different from `clip.cond_stage_model.encode_token_weights()` + in that the tokens are `List[List[int]]`, not including the weights. + + Originally from `sd1_clip.py`: `encode()` -> `forward()` + ''' + tokens_orig = tokens + try: + if isinstance(tokens, torch.Tensor): + tokens = tokens.tolist() + z, pooled = self.wrapped(tokens) # self.wrapped.encode(tokens) + except Exception as e: + z, pooled = self.wrapped(tokens_orig) + + # z = self.encode_with_transformers__(tokens_bak) + if z.device != devices.device: + z = z.to(device=devices.device) + # if z.dtype != devices.dtype: + # z = z.to(dtype=devices.dtype) + # if pooled.dtype != devices.dtype: + # pooled = pooled.to(dtype=devices.dtype) + z.pooled = pooled + return (z, pooled) if return_pooled else z + +class FrozenOpenCLIPEmbedder2WithCustomWordsCustom(FrozenOpenCLIPEmbedder2WithCustomWords, ClipTextEncoderCustom, PopulateVars): + def __init__(self, wrapped: comfy.sdxl_clip.SDXLClipG, hijack): + self.populate_self_variables(wrapped.tokenizer_parent) + super().__init__(wrapped, hijack) + self.id_start = self.wrapped.tokenizer.bos_token_id + self.id_end = self.wrapped.tokenizer.eos_token_id + self.id_pad = 0 + # Below is safe to do since ComfyUI uses the same CLIP model + # for Open Clip instead of an actual Open Clip model? + self.token_mults = {} + vocab = self.tokenizer.get_vocab() + self.comma_token = vocab.get(',', None) + tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] + for text, ident in tokens_with_parens: + mult = 1.0 + for c in text: + if c == '[': + mult /= 1.1 + if c == ']': + mult *= 1.1 + if c == '(': + mult *= 1.1 + if c == ')': + mult /= 1.1 + if mult != 1.0: + self.token_mults[ident] = mult + + def tokenize_line(self, line): + line = parse_and_register_embeddings(self, line) + return super().tokenize_line(line) + + def encode(self, tokens): + return self.encode_with_transformers(tokens, True) + + def encode_with_transformers(self, tokens, return_pooled=False): + return self.encode_with_transformers_comfy_(tokens, return_pooled) + + def encode_token_weights(self, tokens): + pass + + def tokenize(self, texts): + # assert not opts.use_old_emphasis_implementation, 'Old emphasis implementation not supported for Open Clip' + tokenized = [self.tokenizer(text)["input_ids"][1:-1] for text in texts] + return tokenized + + +class FrozenCLIPEmbedderWithCustomWordsCustom(FrozenCLIPEmbedderForSDXLWithCustomWords, ClipTextEncoderCustom, PopulateVars): + ''' + Custom class that also inherits a tokenizer to have the `_try_get_embedding()` method. + ''' + def __init__(self, wrapped: comfy.sd1_clip.SD1ClipModel, hijack): + self.populate_self_variables(wrapped.tokenizer_parent) # SD1Tokenizer + # self.embedding_identifier_tokenized = wrapped.tokenizer([self.embedding_identifier])["input_ids"][0][1:-1] + super().__init__(wrapped, hijack) + + def encode_token_weights(self, tokens): + pass + + def encode(self, tokens): + return self.encode_with_transformers(tokens, True) + + def encode_with_transformers(self, tokens, return_pooled=False): + return self.encode_with_transformers_comfy_(tokens, return_pooled) + + def tokenize_line(self, line): + line = parse_and_register_embeddings(self, line) + return super().tokenize_line(line) + + def tokenize(self, texts): + tokenized = [self.tokenizer(text)["input_ids"][1:-1] for text in texts] + return tokenized + +emb_re_ = r"(embedding:)?(?:({}[\w\.\-\!\$\/\\]+(\.safetensors|\.pt|\.bin)|(?(1)[\w\.\-\!\$\/\\]+|(?!)))(\.safetensors|\.pt|\.bin)?)(?::(\d+\.?\d*|\d*\.\d+))?" + +def tokenize_with_weights_custom(self, text:str, return_word_ids=False): + ''' + Takes a prompt and converts it to a list of (token, weight, word id) elements. + Tokens can both be integer tokens and pre computed CLIP tensors. + Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens. + Returned list has the dimensions NxM where M is the input size of CLIP + ''' + if self.pad_with_end: + pad_token = self.end_token + else: + pad_token = 0 + + text = escape_important(text) + parsed_weights = token_weights(text, 1.0) + + embs = get_valid_embeddings(self.embedding_directory) if self.embedding_directory is not None else [] + embs_str = embs_str + '|' if (embs_str:='|'.join(embs)) else '' + emb_re = emb_re_.format(embs_str) + emb_re = re.compile(emb_re, flags=re.MULTILINE | re.UNICODE | re.IGNORECASE) + + #tokenize words + tokens = [] + for weighted_segment, weight in parsed_weights: + to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ') + to_tokenize = [x for x in to_tokenize if x != ""] + for word in to_tokenize: + matches = emb_re.finditer(word) + last_end = 0 + leftovers=[] + for _, match in enumerate(matches, start=1): + start=match.start() + end=match.end() + if (fragment:=word[last_end:start]): + leftovers.append(fragment) + ext = ext if (ext:=match.group(4)) else '' + embedding_sname = embedding_sname if (embedding_sname:=match.group(2)) else '' + embedding_name = embedding_sname + ext + if embedding_name: + embed, leftover = self._try_get_embedding(embedding_name) + if embed is None: + print(f"warning, embedding:{embedding_name} does not exist, ignoring") + else: + if opts.debug: + print(f'[smZNodes] using embedding:{embedding_name}') + if len(embed.shape) == 1: + tokens.append([(embed, weight)]) + else: + tokens.append([(embed[x], weight) for x in range(embed.shape[0])]) + last_end = end + if (fragment:=word[last_end:]): + leftovers.append(fragment) + word_new = ''.join(leftovers) + tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]]) + + #reshape token array to CLIP input size + batched_tokens = [] + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0, 0)) + batched_tokens.append(batch) + for i, t_group in enumerate(tokens): + #determine if we're going to try and keep the tokens in a single batch + is_large = len(t_group) >= self.max_word_length + + while len(t_group) > 0: + if len(t_group) + len(batch) > self.max_length - 1: + remaining_length = self.max_length - len(batch) - 1 + #break word in two and add end token + if is_large: + batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]]) + batch.append((self.end_token, 1.0, 0)) + t_group = t_group[remaining_length:] + #add end token and pad + else: + batch.append((self.end_token, 1.0, 0)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0, 0)] * (remaining_length)) + #start new batch + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0, 0)) + batched_tokens.append(batch) + else: + batch.extend([(t,w,i+1) for t,w in t_group]) + t_group = [] + + #fill last batch + batch.append((self.end_token, 1.0, 0)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0, 0)] * (self.max_length - len(batch))) + + if not return_word_ids: + batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens] + + return batched_tokens + +def get_valid_embeddings(embedding_directory): + from builtins import any as b_any + exts = ['.safetensors', '.pt', '.bin'] + if isinstance(embedding_directory, str): + embedding_directory = [embedding_directory] + embedding_directory = expand_directory_list(embedding_directory) + embs = [] + for embd in embedding_directory: + for root, dirs, files in os.walk(embd, topdown=False): + for name in files: + if not b_any(x in os.path.splitext(name)[1] for x in exts): continue + n = os.path.basename(name) + for ext in exts: n=n.removesuffix(ext) + embs.append(re.escape(n)) + embs.sort(key=len, reverse=True) + return embs + +def parse_and_register_embeddings(self: FrozenCLIPEmbedderWithCustomWordsCustom|FrozenOpenCLIPEmbedder2WithCustomWordsCustom, text: str, return_word_ids=False): + from builtins import any as b_any + embedding_directory = self.wrapped.tokenizer_parent.embedding_directory + embs = get_valid_embeddings(embedding_directory) + embs_str = '|'.join(embs) + emb_re = emb_re_.format(embs_str + '|' if embs_str else '') + emb_re = re.compile(emb_re, flags=re.MULTILINE | re.UNICODE | re.IGNORECASE) + matches = emb_re.finditer(text) + for matchNum, match in enumerate(matches, start=1): + found=False + ext = ext if (ext:=match.group(4)) else '' + embedding_sname = embedding_sname if (embedding_sname:=match.group(2)) else '' + embedding_name = embedding_sname + ext + if embedding_name: + embed, _ = self.wrapped.tokenizer_parent._try_get_embedding(embedding_name) + if embed is not None: + found=True + if opts.debug: + print(f'[smZNodes] using embedding:{embedding_name}') + if embed.device != devices.device: + embed = embed.to(device=devices.device) + self.hijack.embedding_db.register_embedding(Embedding(embed, embedding_sname), self) + if not found: + print(f"warning, embedding:{embedding_name} does not exist, ignoring") + out = emb_re.sub(r"\2", text) + return out + +def expand(tensor1, tensor2): + def adjust_tensor_shape(tensor_small, tensor_big): + # Calculate replication factor + # -(-a // b) is ceiling of division without importing math.ceil + replication_factor = -(-tensor_big.size(1) // tensor_small.size(1)) + + # Use repeat to extend tensor_small + tensor_small_extended = tensor_small.repeat(1, replication_factor, 1) + + # Take the rows of the extended tensor_small to match tensor_big + tensor_small_matched = tensor_small_extended[:, :tensor_big.size(1), :] + + return tensor_small_matched + + # Check if their second dimensions are different + if tensor1.size(1) != tensor2.size(1): + # Check which tensor has the smaller second dimension and adjust its shape + if tensor1.size(1) < tensor2.size(1): + tensor1 = adjust_tensor_shape(tensor1, tensor2) + else: + tensor2 = adjust_tensor_shape(tensor2, tensor1) + return (tensor1, tensor2) + +def reconstruct_schedules(schedules, step): + create_reconstruct_fn = lambda _cc: prompt_parser.reconstruct_multicond_batch if type(_cc).__name__ == "MulticondLearnedConditioning" else prompt_parser.reconstruct_cond_batch + reconstruct_fn = create_reconstruct_fn(schedules) + return reconstruct_fn(schedules, step) + + +class ClipTokenWeightEncoder: + def encode_token_weights(self, token_weight_pairs, steps=0, current_step=0, multi=False): + schedules = token_weight_pairs + texts = token_weight_pairs + conds_list = [[(0, 1.0)]] + from .modules.sd_hijack import model_hijack + try: + model_hijack.hijack(self) + if isinstance(token_weight_pairs, list) and isinstance(token_weight_pairs[0], str): + if multi: schedules = prompt_parser.get_multicond_learned_conditioning(model_hijack.cond_stage_model, texts, steps, None, opts.use_old_scheduling) + else: schedules = prompt_parser.get_learned_conditioning(model_hijack.cond_stage_model, texts, steps, None, opts.use_old_scheduling) + cond = reconstruct_schedules(schedules, current_step) + if type(cond) is tuple: + conds_list, cond = cond + pooled = cond.pooled.cpu() + cond = cond.cpu() + cond.pooled = pooled + cond.pooled.conds_list = conds_list + cond.pooled.schedules = schedules + else: + # comfy++ + def encode_toks(_token_weight_pairs): + zs = [] + first_pooled = None + for batch_chunk in _token_weight_pairs: + tokens = [x[0] for x in batch_chunk] + multipliers = [x[1] for x in batch_chunk] + z = model_hijack.cond_stage_model.process_tokens([tokens], [multipliers]) + if first_pooled == None: + first_pooled = z.pooled + zs.append(z) + zcond = torch.hstack(zs) + zcond.pooled = first_pooled + return zcond + # non-sdxl will be something like: {"l": [[]]} + if isinstance(token_weight_pairs, dict): + token_weight_pairs = next(iter(token_weight_pairs.values())) + cond = encode_toks(token_weight_pairs) + pooled = cond.pooled.cpu() + cond = cond.cpu() + cond.pooled = pooled + cond.pooled.conds_list = conds_list + finally: + model_hijack.undo_hijack(model_hijack.cond_stage_model) + return (cond, cond.pooled) + +class SD1ClipModel(ClipTokenWeightEncoder): ... + +class SDXLClipG(ClipTokenWeightEncoder): ... + +class SDXLClipModel(ClipTokenWeightEncoder): + + def encode_token_weights(self: comfy.sdxl_clip.SDXLClipModel, token_weight_pairs, steps=0, current_step=0, multi=False): + token_weight_pairs_g = token_weight_pairs["g"] + token_weight_pairs_l = token_weight_pairs["l"] + + self.clip_g.encode_token_weights_orig = self.clip_g.encode_token_weights + self.clip_l.encode_token_weights_orig = self.clip_l.encode_token_weights + self.clip_g.cond_stage_model = self.clip_g + self.clip_l.cond_stage_model = self.clip_l + self.clip_g.encode_token_weights = partial(SDXLClipG.encode_token_weights, self.clip_g) + self.clip_l.encode_token_weights = partial(SD1ClipModel.encode_token_weights, self.clip_l) + try: + g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g, steps, current_step, multi) + l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l, steps, current_step, multi) + # g_out, g_pooled = SDXLClipG.encode_token_weights(self.clip_g, token_weight_pairs_g, steps, current_step, multi) + # l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l, steps, current_step, multi) + finally: + self.clip_g.encode_token_weights = self.clip_g.encode_token_weights_orig + self.clip_l.encode_token_weights = self.clip_l.encode_token_weights_orig + self.clip_g.cond_stage_model = None + self.clip_l.cond_stage_model = None + + if hasattr(g_pooled, 'schedules') and hasattr(l_pooled, 'schedules'): + g_pooled.schedules = {"g": g_pooled.schedules, "l": l_pooled.schedules} + + g_out, l_out = expand(g_out, l_out) + l_out, g_out = expand(l_out, g_out) + + return torch.cat([l_out, g_out], dim=-1), g_pooled + +class SDXLRefinerClipModel(ClipTokenWeightEncoder): + + def encode_token_weights(self: comfy.sdxl_clip.SDXLClipModel, token_weight_pairs, steps=0, current_step=0, multi=False): + self.clip_g.encode_token_weights_orig = self.clip_g.encode_token_weights + self.clip_g.encode_token_weights = partial(SDXLClipG.encode_token_weights, self.clip_g) + token_weight_pairs_g = token_weight_pairs["g"] + try: g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g, steps, current_step, multi) + finally: self.clip_g.encode_token_weights = self.clip_g.encode_token_weights_orig + if hasattr(g_pooled, 'schedules'): + g_pooled.schedules = {"g": g_pooled.schedules} + return (g_out, g_pooled) + +def is_prompt_editing(schedules): + if schedules == None: return False + if not isinstance(schedules, dict): + schedules = {'g': schedules} + for k,v in schedules.items(): + if type(v) == list: + if len(v[0]) != 1: return True + else: + if len(v.batch[0][0].schedules) != 1: return True + return False + +# =================================================================== +# RNG +from .modules import rng_philox +def randn_without_seed(x, generator=None, randn_source="cpu"): + """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + + Use either randn() or manual_seed() to initialize the generator.""" + if randn_source == "nv": + return torch.asarray(generator.randn(x.size()), device=x.device) + else: + if generator is not None and generator.device.type == "cpu": + return torch.randn(x.size(), dtype=x.dtype, layout=x.layout, device=devices.cpu, generator=generator).to(device=x.device) + else: + return torch.randn(x.size(), dtype=x.dtype, layout=x.layout, device=x.device, generator=generator) + +class TorchHijack: + """This is here to replace torch.randn_like of k-diffusion. + + k-diffusion has random_sampler argument for most samplers, but not for all, so + this is needed to properly replace every use of torch.randn_like. + + We need to replace to make images generated in batches to be same as images generated individually.""" + + def __init__(self, generator, randn_source): + # self.rng = p.rng + self.generator = generator + self.randn_source = randn_source + + def __getattr__(self, item): + if item == 'randn_like': + return self.randn_like + + if hasattr(torch, item): + return getattr(torch, item) + + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") + + def randn_like(self, x): + return randn_without_seed(x, generator=self.generator, randn_source=self.randn_source) + +def prepare_noise(latent_image, seed, noise_inds=None, device='cpu'): + """ + creates random noise given a latent image and a seed. + optional arg skip can be used to skip and discard x number of noise generations for a given seed + """ + from .modules.shared import opts + from comfy.sample import np + def get_generator(seed): + nonlocal device + nonlocal opts + _generator = torch.Generator(device=device) + generator = _generator.manual_seed(seed) + if opts.randn_source == 'nv': + generator = rng_philox.Generator(seed) + return generator + generator = generator_eta = get_generator(seed) + + if opts.eta_noise_seed_delta > 0: + seed = min(int(seed + opts.eta_noise_seed_delta), int(0xffffffffffffffff)) + generator_eta = get_generator(seed) + + + # hijack randn_like + import comfy.k_diffusion.sampling + comfy.k_diffusion.sampling.torch = TorchHijack(generator_eta, opts.randn_source) + + if noise_inds is None: + shape = latent_image.size() + if opts.randn_source == 'nv': + return torch.asarray(generator.randn(shape), device=devices.cpu) + else: + return torch.randn(shape, dtype=latent_image.dtype, layout=latent_image.layout, device=device, generator=generator) + + unique_inds, inverse = np.unique(noise_inds, return_inverse=True) + noises = [] + for i in range(unique_inds[-1]+1): + shape = [1] + list(latent_image.size())[1:] + if opts.randn_source == 'nv': + noise = torch.asarray(generator.randn(shape), device=devices.cpu) + else: + noise = torch.randn(shape, dtype=latent_image.dtype, layout=latent_image.layout, device=device, generator=generator) + if i in unique_inds: + noises.append(noise) + noises = [noises[i] for i in inverse] + noises = torch.cat(noises, axis=0) + return noises + +# =========================================================== + +def run(clip: comfy.sd.CLIP, text, parser, mean_normalization, + multi_conditioning, use_old_emphasis_implementation, with_SDXL, + ascore, width, height, crop_w, crop_h, target_width, target_height, + text_g, text_l, steps=1, step=0): + opts.prompt_mean_norm = mean_normalization + opts.use_old_emphasis_implementation = use_old_emphasis_implementation + opts.CLIP_stop_at_last_layers = abs(clip.layer_idx or 1) + is_sdxl = "SDXL" in type(clip.cond_stage_model).__name__ + if is_sdxl: + # Prevents tensor shape mismatch + # This is what comfy does by default + opts.batch_cond_uncond = True + + parser_d = {"full": "Full parser", + "compel": "Compel parser", + "A1111": "A1111 parser", + "fixed attention": "Fixed attention", + "comfy++": "Comfy++ parser", + } + opts.prompt_attention = parser_d.get(parser, "Comfy parser") + + sdxl_params = {} + if with_SDXL and is_sdxl: + sdxl_params = { + "aesthetic_score": ascore, "width": width, "height": height, + "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, + "target_height": target_height, "text_g": text_g, "text_l": text_l + } + pooled={} + if hasattr(comfy.sd1_clip, 'SDTokenizer'): + SDTokenizer = comfy.sd1_clip.SDTokenizer + else: + SDTokenizer = comfy.sd1_clip.SD1Tokenizer + tokenize_with_weights_orig = SDTokenizer.tokenize_with_weights + if parser == "comfy": + SDTokenizer.tokenize_with_weights = tokenize_with_weights_custom + clip_model_type_name = type(clip.cond_stage_model).__name__ + if with_SDXL and is_sdxl: + if clip_model_type_name== "SDXLClipModel": + out = CLIPTextEncodeSDXL().encode(clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l) + out[0][0][1]['aesthetic_score'] = sdxl_params['aesthetic_score'] + elif clip_model_type_name == "SDXLRefinerClipModel": + out = CLIPTextEncodeSDXLRefiner().encode(clip, ascore, width, height, text) + for item in ['aesthetic_score', 'width', 'height', 'text_g', 'text_l']: + sdxl_params.pop(item) + out[0][0][1].update(sdxl_params) + else: + raise NotImplementedError() + else: + out = CLIPTextEncode().encode(clip, text) + SDTokenizer.tokenize_with_weights = tokenize_with_weights_orig + return out + else: + texts = [text] + create_prompts = lambda txts: prompt_parser.SdConditioning(txts) + texts = create_prompts(texts) + if is_sdxl: + if with_SDXL: + texts = {"g": create_prompts([text_g]), "l": create_prompts([text_l])} + else: + texts = {"g": texts, "l": texts} + + # clip_clone = clip.clone() + clip_clone = clip + clip_clone.cond_stage_model_orig = clip_clone.cond_stage_model + clip_clone.cond_stage_model.encode_token_weights_orig = clip_clone.cond_stage_model.encode_token_weights + + def patch_cond_stage_model(): + nonlocal clip_clone + from .smZNodes import SD1ClipModel, SDXLClipModel, SDXLRefinerClipModel + ctp = type(clip_clone.cond_stage_model) + clip_clone.cond_stage_model.tokenizer = clip_clone.tokenizer + if ctp is comfy.sdxl_clip.SDXLClipModel: + clip_clone.cond_stage_model.encode_token_weights = SDXLClipModel.encode_token_weights + clip_clone.cond_stage_model.clip_g.tokenizer = clip_clone.tokenizer.clip_g + clip_clone.cond_stage_model.clip_l.tokenizer = clip_clone.tokenizer.clip_l + elif ctp is comfy.sdxl_clip.SDXLRefinerClipModel: + clip_clone.cond_stage_model.encode_token_weights = SDXLRefinerClipModel.encode_token_weights + clip_clone.cond_stage_model.clip_g.tokenizer = clip_clone.tokenizer.clip_g + else: + clip_clone.cond_stage_model.encode_token_weights = SD1ClipModel.encode_token_weights + + tokens = texts + if parser == "comfy++": + SDTokenizer.tokenize_with_weights = tokenize_with_weights_custom + tokens = clip_clone.tokenize(text) + SDTokenizer.tokenize_with_weights = tokenize_with_weights_orig + cond = pooled = None + patch_cond_stage_model() + try: + clip_clone.cond_stage_model.encode_token_weights = partial(clip_clone.cond_stage_model.encode_token_weights, clip_clone.cond_stage_model, steps=steps, current_step=step, multi=multi_conditioning) + cond, pooled = clip_clone.encode_from_tokens(tokens, True) + finally: + clip_clone.cond_stage_model = clip_clone.cond_stage_model_orig + clip_clone.cond_stage_model.encode_token_weights = clip_clone.cond_stage_model.encode_token_weights_orig + + if opts.debug: + print('[smZNodes] using steps', steps) + gen_id = lambda : binascii.hexlify(os.urandom(1024))[64:72] + id=gen_id() + schedules = getattr(pooled, 'schedules', [[(0, 1.0)]]) + pooled = {"pooled_output": pooled, "from_smZ": True, "smZid": id, "conds_list": pooled.conds_list, **sdxl_params} + out = [[cond, pooled]] + if is_prompt_editing(schedules): + for x in range(1,steps): + if type(schedules) is not dict: + cond=reconstruct_schedules(schedules, x) + if type(cond) is tuple: + conds_list, cond = cond + pooled['conds_list'] = conds_list + cond=cond.cpu() + elif type(schedules) is dict and len(schedules) == 1: # SDXLRefiner + cond = reconstruct_schedules(next(iter(schedules.values())), x) + if type(cond) is tuple: + conds_list, cond = cond + pooled['conds_list'] = conds_list + cond=cond.cpu() + elif type(schedules) is dict: + g_out = reconstruct_schedules(schedules['g'], x) + if type(g_out) is tuple: _, g_out = g_out + l_out = reconstruct_schedules(schedules['l'], x) + if type(l_out) is tuple: _, l_out = l_out + g_out, l_out = expand(g_out, l_out) + l_out, g_out = expand(l_out, g_out) + cond = torch.cat([l_out, g_out], dim=-1).cpu() + else: + raise NotImplementedError + out = out + [[cond, pooled]] + out[0][1]['orig_len'] = len(out) + return (out,) + +# ======================================================================== + +from server import PromptServer +def prompt_handler(json_data): + data=json_data['prompt'] + def tmp(): + nonlocal data + current_clip_id = None + def find_nearest_ksampler(clip_id): + """Find the nearest KSampler node that references the given CLIPTextEncode id.""" + for ksampler_id, node in data.items(): + if "Sampler" in node["class_type"] or "sampler" in node["class_type"]: + # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node + if check_link_to_clip(ksampler_id, clip_id): + return get_steps(data, ksampler_id) + return None + + def get_steps(graph, node_id): + node = graph.get(str(node_id), {}) + steps_input_value = node.get("inputs", {}).get("steps", None) + if steps_input_value is None: + steps_input_value = node.get("inputs", {}).get("sigmas", None) + + while(True): + # Base case: it's a direct value + if isinstance(steps_input_value, (int, float, str)): + return min(max(1, int(steps_input_value)), 10000) + + # Loop case: it's a reference to another node + elif isinstance(steps_input_value, list): + ref_node_id, ref_input_index = steps_input_value + ref_node = graph.get(str(ref_node_id), {}) + steps_input_value = ref_node.get("inputs", {}).get("steps", None) + if steps_input_value is None: + keys = list(ref_node.get("inputs", {}).keys()) + ref_input_key = keys[ref_input_index % len(keys)] + steps_input_value = ref_node.get("inputs", {}).get(ref_input_key) + else: + return None + + def check_link_to_clip(node_id, clip_id, visited=None): + """Check if a given node links directly or indirectly to a CLIPTextEncode node.""" + if visited is None: + visited = set() + + node = data[node_id] + + if node_id in visited: + return False + visited.add(node_id) + + for input_value in node["inputs"].values(): + if isinstance(input_value, list) and input_value[0] == clip_id: + return True + if isinstance(input_value, list) and check_link_to_clip(input_value[0], clip_id, visited): + return True + + return False + + + # Update each CLIPTextEncode node's steps with the steps from its nearest referencing KSampler node + for clip_id, node in data.items(): + if node["class_type"] == "smZ CLIPTextEncode": + current_clip_id = clip_id + steps = find_nearest_ksampler(clip_id) + if steps is not None: + node["inputs"]["smZ_steps"] = steps + if opts.debug: + print(f'[smZNodes] id: {current_clip_id} | steps: {steps}') + tmp() + return json_data + +if hasattr(PromptServer.instance, 'add_on_prompt_handler'): + PromptServer.instance.add_on_prompt_handler(prompt_handler) + +# ======================================================================== +def bounded_modulo(number, modulo_value): + return number if number < modulo_value else modulo_value + +def get_adm(c): + for y in ["adm_encoded", "c_adm", "y"]: + if y in c: + c_c_adm = c[y] + if y == "adm_encoded": y="c_adm" + if type(c_c_adm) is not torch.Tensor: c_c_adm = c_c_adm.cond + return {y: c_c_adm, 'key': y} + return None + +getp=lambda x: x[1] if type(x) is list else x +def calc_cond(c, current_step): + """Group by smZ conds that may do prompt-editing / regular conds / comfy conds.""" + _cond = [] + # Group by conds from smZ + fn=lambda x : getp(x).get("from_smZ", None) is not None + an_iterator = itertools.groupby(c, fn ) + for key, group in an_iterator: + ls=list(group) + # Group by prompt-editing conds + fn2=lambda x : getp(x).get("smZid", None) + an_iterator2 = itertools.groupby(ls, fn2) + for key2, group2 in an_iterator2: + ls2=list(group2) + if key2 is not None: + orig_len = getp(ls2[0]).get('orig_len', 1) + i = bounded_modulo(current_step, orig_len - 1) + _cond = _cond + [ls2[i]] + else: + _cond = _cond + ls2 + return _cond + +CFGNoisePredictorOrig = comfy.samplers.CFGNoisePredictor +class CFGNoisePredictor(CFGNoisePredictorOrig): + def __init__(self, model): + super().__init__(model) + self.step = 0 + self.inner_model2 = CFGDenoiser(model.apply_model) + self.s_min_uncond = opts.s_min_uncond + self.c_adm = None + self.init_cond = None + self.init_uncond = None + self.is_prompt_editing_u = False + self.is_prompt_editing_c = False + + def apply_model(self, *args, **kwargs): + x=kwargs['x'] if 'x' in kwargs else args[0] + timestep=kwargs['timestep'] if 'timestep' in kwargs else args[1] + cond=kwargs['cond'] if 'cond' in kwargs else args[2] + uncond=kwargs['uncond'] if 'uncond' in kwargs else args[3] + cond_scale=kwargs['cond_scale'] if 'cond_scale' in kwargs else args[4] + model_options=kwargs['model_options'] if 'model_options' in kwargs else {} + + cc=calc_cond(cond, self.step) + uu=calc_cond(uncond, self.step) + self.step += 1 + + if (any([getp(p).get('from_smZ', False) for p in cc]) or + any([getp(p).get('from_smZ', False) for p in uu])): + if model_options.get('transformer_options',None) is None: + model_options['transformer_options'] = {} + model_options['transformer_options']['from_smZ'] = True + + if not opts.use_CFGDenoiser or not model_options['transformer_options'].get('from_smZ', False): + if 'cond' in kwargs: kwargs['cond'] = cc + else: args[2]=cc + if 'uncond' in kwargs: kwargs['uncond'] = uu + else: args[3]=uu + out = super().apply_model(*args, **kwargs) + else: + # Only supports one cond + for ix in range(len(cc)): + if getp(cc[ix]).get('from_smZ', False): + cc = [cc[ix]] + break + for ix in range(len(uu)): + if getp(uu[ix]).get('from_smZ', False): + uu = [uu[ix]] + break + c=getp(cc[0]) + u=getp(uu[0]) + _cc = cc[0][0] if type(cc[0]) is list else cc[0]['model_conds']['c_crossattn'].cond + _uu = uu[0][0] if type(uu[0]) is list else uu[0]['model_conds']['c_crossattn'].cond + conds_list = c.get('conds_list', [[(0, 1.0)]]) + if 'model_conds' in c: c = c['model_conds'] + if 'model_conds' in u: u = u['model_conds'] + c_c_adm = get_adm(c) + if c_c_adm is not None: + u_c_adm = get_adm(u) + k = c_c_adm['key'] + self.c_adm = {k: torch.cat([c_c_adm[k], u_c_adm[u_c_adm['key']]]).to(device=x.device), 'key': k} + # SDXL. Need to pad with repeats + _cc, _uu = expand(_cc, _uu) + _uu, _cc = expand(_uu, _cc) + x.c_adm = self.c_adm + image_cond = txt2img_image_conditioning(None, x) + out = self.inner_model2(x, timestep, cond=(conds_list, _cc), uncond=_uu, cond_scale=cond_scale, s_min_uncond=self.s_min_uncond, image_cond=image_cond) + return out + +def txt2img_image_conditioning(sd_model, x, width=None, height=None): + return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) + # if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models + # # The "masked-image" in this case will just be all zeros since the entire image is masked. + # image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) + # image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) + # # Add the fake full 1s mask to the first dimension. + # image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) + # image_conditioning = image_conditioning.to(x.dtype) + # return image_conditioning + # elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models + # return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device) + # else: + # # Dummy zero conditioning if we're not using inpainting or unclip models. + # # Still takes up a bit of memory, but no encoder call. + # # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. + # return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) + +# ======================================================================================= + +def inject_code(original_func, data): + # Get the source code of the original function + original_source = inspect.getsource(original_func) + + # Split the source code into lines + lines = original_source.split("\n") + + for item in data: + # Find the line number of the target line + target_line_number = None + for i, line in enumerate(lines): + if item['target_line'] in line: + target_line_number = i + 1 + + # Find the indentation of the line where the new code will be inserted + indentation = '' + for char in line: + if char == ' ': + indentation += char + else: + break + + # Indent the new code to match the original + code_to_insert = dedent(item['code_to_insert']) + code_to_insert = indent(code_to_insert, indentation) + break + + if target_line_number is None: + raise FileNotFoundError + # Target line not found, return the original function + # return original_func + + # Insert the code to be injected after the target line + lines.insert(target_line_number, code_to_insert) + + # Recreate the modified source code + modified_source = "\n".join(lines) + modified_source = dedent(modified_source.strip("\n")) + + # Create a temporary file to write the modified source code so I can still view the + # source code when debugging. + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.py') as temp_file: + temp_file.write(modified_source) + temp_file.flush() + + MODULE_PATH = temp_file.name + MODULE_NAME = __name__.split('.')[0] + "_patch_modules" + spec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH) + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + + # Pass global variables to the modified module + globals_dict = original_func.__globals__ + for key, value in globals_dict.items(): + setattr(module, key, value) + modified_module = module + + # Retrieve the modified function from the module + modified_function = getattr(modified_module, original_func.__name__) + + # If the original function was a method, bind it to the first argument (self) + if inspect.ismethod(original_func): + modified_function = modified_function.__get__(original_func.__self__, original_func.__class__) + + # Update the metadata of the modified function to associate it with the original function + functools.update_wrapper(modified_function, original_func) + + # Return the modified function + return modified_function + +# ======================================================================== + +# DPM++ 2M alt + +from tqdm.auto import trange +@torch.no_grad() +def sample_dpmpp_2m_alt(model, x, sigmas, extra_args=None, callback=None, disable=None): + """DPM-Solver++(2M).""" + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + sigma_fn = lambda t: t.neg().exp() + t_fn = lambda sigma: sigma.log().neg() + old_denoised = None + + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1]) + h = t_next - t + if old_denoised is None or sigmas[i + 1] == 0: + x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised + else: + h_last = t - t_fn(sigmas[i - 1]) + r = h_last / h + denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised + x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d + sigma_progress = i / len(sigmas) + adjustment_factor = 1 + (0.15 * (sigma_progress * sigma_progress)) + old_denoised = denoised * adjustment_factor + return x + + +def add_sample_dpmpp_2m_alt(): + from comfy.samplers import KSampler, k_diffusion_sampling + if "dpmpp_2m_alt" not in KSampler.SAMPLERS: + try: + idx = KSampler.SAMPLERS.index("dpmpp_2m") + KSampler.SAMPLERS.insert(idx+1, "dpmpp_2m_alt") + setattr(k_diffusion_sampling, 'sample_dpmpp_2m_alt', sample_dpmpp_2m_alt) + import importlib + importlib.reload(k_diffusion_sampling) + except ValueError as err: + pass diff --git a/custom_nodes/ComfyUI_smZNodes/web/js/smZdynamicWidgets.js b/custom_nodes/ComfyUI_smZNodes/web/js/smZdynamicWidgets.js new file mode 100644 index 0000000000000000000000000000000000000000..2c01bb1d540e0eec8f55044d3f13fe30a598e11f --- /dev/null +++ b/custom_nodes/ComfyUI_smZNodes/web/js/smZdynamicWidgets.js @@ -0,0 +1,335 @@ +import { app } from "/scripts/app.js"; +// import { app } from "../../../scripts/app.js"; +// import { ComfyWidgets } from "../../../scripts/widgets.js"; + +const ids1 = new Set(["smZ CLIPTextEncode"]) +const ids2 = new Set(["smZ Settings"]) +const widgets = ['mean_normalization', 'multi_conditioning', 'use_old_emphasis_implementation', 'with_SDXL'] +const widgets_sdxl = ['ascore', 'width', 'height', 'crop_w', 'crop_h', 'target_width', 'target_height', 'text_g', 'text_l'] +const getSetWidgets = new Set(['parser', 'with_SDXL']) + +let origProps = {}; +const HIDDEN_TAG = "smZhidden" + +const findWidgetByName = (node, name) => node.widgets.find((w) => w.name === name); +const findWidgetsByName = (node, name) => node.widgets.filter((w) => w.name.endsWith(name)); + +const doesInputWithNameExist = (node, name) => node.inputs ? node.inputs.some((input) => input.name === name) : false; + +// round in increments of x, with an offset +function round(number, increment = 10, offset = 0) { + return Math.ceil((number - offset) / increment ) * increment + offset; +} + +function toggleWidget(node, widget, show = false, suffix = "") { + if (!widget || doesInputWithNameExist(node, widget.name)) return; + if (!origProps[widget.name]) { + origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize}; + } + const origSize = node.size; + + widget.type = show ? origProps[widget.name].origType : HIDDEN_TAG + suffix; + widget.computeSize = show ? origProps[widget.name].origComputeSize : () => [0, -3.3]; + + widget.linkedWidgets?.forEach(w => toggleWidget(node, w, ":" + widget.name, show)); + + const height = show ? Math.max(node.computeSize()[1], origSize[1]) : node.size[1]; + node.setSize([node.size[0], height]); + if (show) + delete widget.computedHeight; + else + widget.computedHeight = 0; +} + +function widgetLogic(node, widget) { + const wname = widget.name + if (wname.endsWith("parser")) { + const in_comfy = widget.value.includes("comfy") + toggleMenuOption(node, ['multi_conditioning', wname], !in_comfy) + toggleMenuOption(node, ['mean_normalization', wname], widget.value !== "comfy") + if (in_comfy) { + toggleMenuOption(node, ['use_old_emphasis_implementation', wname], false) + } + } else if (wname.endsWith("with_SDXL")) { + toggleMenuOption(node, ['text', wname], !widget.value) + + // Resize node when widget is set to false + if (!widget.value) { + // Prevents resizing on init/webpage reload + if(widget.init === false) { + // Resize when set to false + node.setSize([node.size[0], Math.max(100, round(node.size[1]/1.5))]) + } + } else { + // When enabled, set init to false + widget.init = false + } + + // Toggle sdxl widgets if sdxl widget value is true/false + for (const w of widgets_sdxl) { + toggleMenuOption(node, [w, wname], widget.value) + } + + // Keep showing the widget if it's enabled + if (widget.value && widget.type === HIDDEN_TAG) { + toggleMenuOption(node, [widget.name, wname], true) + } + } +} + +function getGroupNodeConfig(node) { + let ls = [] + let nodeData = node.constructor?.nodeData + if (nodeData) { + for(let sym of Object.getOwnPropertySymbols(nodeData) ) { + const o = nodeData[sym]; + if (!o) continue; + ls.push(o) + } + } + return ls +} + +function getSetters(node) { + if (node.widgets) { + let gncl = getGroupNodeConfig(node) + for (const w of node.widgets) { + for (const gsw of [...getSetWidgets]) { + if (!w.name.endsWith(gsw)) continue; + let shouldBreak = false + for (const gnc of gncl) { + const nwmap = gnc.newToOldWidgetMap[w.name] + if (nwmap && !(nwmap.node.type === [...ids1][0] && nwmap.inputName === gsw)) + shouldBreak = true + } + if (shouldBreak) break; + widgetLogic(node, w); + w._value = w.value + + Object.defineProperty(w, 'value', { + get() { + return w._value; + }, + set(newVal) { + w._value = newVal + widgetLogic(node, w); + } + }); + + // Hide SDXL widget on init + // Doing it in nodeCreated fixes its toggling for some reason + if (w.name.endsWith('with_SDXL')) { + toggleMenuOption(node, ['with_SDXL', w.name]) + w.init = true + + // Hide steps + toggleMenuOption(node, ['smZ_steps', w.name] , false) + } + } + } + } +} + +function toggleMenuOption(node, widget_arr, _show = null, perform_action = true) { + const gncl = getGroupNodeConfig(node) + const [widget_name, companion_widget_name] = Array.isArray(widget_arr) ? widget_arr : [widget_arr] + let nwname = widget_name + // Use companion_widget_name to get the correct widget with the new name + if (companion_widget_name) { + for (const gnc of gncl) { + const omap = Object.values(gnc.oldToNewWidgetMap).find(x => Object.values(x).find(z => z === companion_widget_name)) + const tmp2 = omap[widget_name] + if (tmp2) nwname = tmp2; + } + } + const widgets = companion_widget_name ? [findWidgetByName(node, nwname)] : findWidgetsByName(node, nwname) + for (const widget of widgets) + toggleMenuOption0(node, widget, _show, perform_action) +} + +function toggleMenuOption0(node, widget, _show = null, perform_action = true) { + if (!widget || doesInputWithNameExist(node, widget.name)) return; + if (!origProps[widget.name]) { + origProps[widget.name] = { origType: widget.type, origComputeSize: widget.computeSize}; + } + const show = (widget.type === origProps[widget.name].origType) + if (perform_action) { + toggleWidget(node, widget, _show !== null ? _show : !show) + node.setDirtyCanvas(true); + } +} + +function toggle_all_settings_desc_widgets(node, _show = null) { + let found_widgets = node.widgets.filter((w) => w.name.includes('info')); + let is_showing = _show !== null ? _show : null + found_widgets.forEach(w => { + toggleMenuOption(node, [w.name, w.name], _show) + is_showing = _show !== null ? _show : w.type === origProps[w.name].origType + }); + + let w = node.widgets.find((w) => w.name === 'extra'); + if (w) { + let value = null; + try { + value =JSON.parse(w.value); + } catch (error) { + // when node definitions change due to an update or some other error + value = {"show":true} + } + value.show = is_showing; + w.value = JSON.stringify(value); + } + + // Collapse the node if the widgets aren't showing + if (!is_showing) { + node.setSize([node.size[0], node.computeSize()[1]]) + } +} + +function create_custom_option(content, _callback) { + return { + content: content, + callback: () => _callback(), + } +}; + +app.registerExtension({ + name: "comfy.smZ.dynamicWidgets", + + /** + * Called when a node is created. Used to add menu options to nodes. + * @param node The node that was created. + * @param app The app. + */ + nodeCreated(node) { + const nodeType = node.type || node.constructor?.type + let inGroupNode = false + let inGroupNode2 = false + let innerNodes = node.getInnerNodes?.() + if (innerNodes) { + for (const inode of innerNodes) { + const _nodeType = inode.type || inode.constructor?.type + if (ids1.has(_nodeType)) + inGroupNode = ids1.has(_nodeType) + if (inGroupNode) + ids1.add(nodeType) // GroupNode's type + if (ids2.has(_nodeType)) + inGroupNode2 = ids2.has(_nodeType) + if (inGroupNode2) + ids2.add(nodeType) // GroupNode's type + } + } + // let nodeData = node.constructor?.nodeData + // if (nodeData) { + // for(let sym of Object.getOwnPropertySymbols(nodeData) ) { + // const nds = nodeData[sym]; + // if (nds) { + // inGroupNode=true + // inGroupNode2=true + // break + // } + // } + // } + // ClipTextEncode++ node + if (ids1.has(nodeType) || inGroupNode) { + node.widgets.forEach(w => w._name = w.name) + + getSetters(node) + + // Reduce initial node size cause of SDXL widgets + // node.setSize([node.size[0], Math.max(node.size[1]/1.5, 220)]) + node.setSize([node.size[0], 220]) + } + // Settings node + if (ids2.has(nodeType) || inGroupNode2) { + node.serialize_widgets = true + + const onConfigure = node.onConfigure; + node.onConfigure = function(o) { + const r = onConfigure ? onConfigure.apply(this, arguments) : undefined; + const w = this.widgets.find(w => w.name === 'extra') + let value = null + try { + value = JSON.parse(w.value); + } catch (error) { + // when node definitions change due to an update or some other error + value = {"show":true} + } + toggle_all_settings_desc_widgets(this, value.show) + return r; + } + + // Styling. + node.widgets.forEach(function(w) { + w._name = w.name + if (w.name.includes('ㅤ')) { + w.heading = true + } else if (w.name.includes('info')) { + w.info = true + w.inputEl.disabled = true; + w.inputEl.readOnly = true; + w.inputEl.style.opacity = 0.75; + w.inputEl.style.alignContent = 'center'; + w.inputEl.style.textAlign = 'center'; + } + }) + // Hide `extra` widget + toggleMenuOption(node, 'extra', false) + } + // Add extra MenuOptions for + // ClipTextEncode++ and Settings node + if (ids1.has(nodeType) || inGroupNode || ids2.has(nodeType) || inGroupNode2) { + // Save the original options + const getExtraMenuOptions = node.getExtraMenuOptions; + + node.getExtraMenuOptions = function (_, options) { + // Call the original function for the default menu options + const r = getExtraMenuOptions ? getExtraMenuOptions.apply(this, arguments) : undefined; + let customOptions = [] + node.setDirtyCanvas(true, true); + // if (!r) return r; + + if (ids2.has(nodeType) || inGroupNode2) { + const content_hide_show = "Hide/show all descriptions"; + customOptions.push(null) // seperator + customOptions.push(create_custom_option(content_hide_show, toggle_all_settings_desc_widgets.bind(this, node))) + + // Alternate way to cleanup MenuOption + const toHideWidgets = node.widgets.filter(w => w.name.includes('ㅤ') || w.name.includes('info') || w.name.includes('extra')) + const wo = options.filter(o => o === null || (o && !toHideWidgets.some(w => o.content.includes(`Convert ${w.name} to input`)))) + options.splice(0, options.length, ...wo); + } + + if (ids1.has(nodeType) || inGroupNode) { + // Dynamic MenuOption depending on the widgets + const content_hide_show = "Hide/show "; + // const whWidgets = node.widgets.filter(w => w.name === 'width' || w.name === 'height') + const hiddenWidgets = node.widgets.filter(w => w.type === HIDDEN_TAG) + // doesn't take GroupNode into account + const with_SDXL = node.widgets.find(w => w.name === 'with_SDXL') + const parser = node.widgets.find(w => w.name === 'parser') + const in_comfy = parser.value.includes("comfy") + let ws = widgets.map(widget_name => create_custom_option(content_hide_show + widget_name, toggleMenuOption.bind(this, node, widget_name))) + ws = ws.filter((w) => (in_comfy && parser.value !== 'comfy' && w.content.includes('mean_normalization')) || (in_comfy && w.content.includes('with_SDXL')) || !in_comfy ) + // customOptions.push(null) // seperator + customOptions.push(...ws) + + let wo = options.filter(o => o === null || (o && !hiddenWidgets.some(w => o.content.includes(`Convert ${w.name} to input`)))) + const width = node.widgets.find(w => w.name === 'width') + const height = node.widgets.find(w => w.name === 'height') + if (width && height) { + const width_type = width.type.toLowerCase() + const height_type = height.type.toLowerCase() + if (!(width_type.includes('number') || width_type.includes('int') || width_type.includes('float') || + height_type.includes('number') || height_type.includes('int') || height_type.includes('float'))) + wo = wo.filter(o => o === null || (o && !o.content.includes('Swap width/height'))) + } + options.splice(0, options.length, ...wo); + } + // options.unshift(...customOptions); // top + options.splice(options.length - 1, 0, ...customOptions) + // return r; + } + } + } +}); diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/LICENSE b/custom_nodes/ControlNet-LLLite-ComfyUI/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ControlNet-LLLite-ComfyUI/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/README.md b/custom_nodes/ControlNet-LLLite-ComfyUI/README.md new file mode 100644 index 0000000000000000000000000000000000000000..40b53e670ca79a1d368d36471e7d201902362d22 --- /dev/null +++ b/custom_nodes/ControlNet-LLLite-ComfyUI/README.md @@ -0,0 +1,67 @@ +# ControlNet-LLLite-ComfyUI + +日本語版ドキュメントは後半にあります。 + +This is a UI for inference of [ControlNet-LLLite](https://github.com/kohya-ss/sd-scripts/blob/sdxl/docs/train_lllite_README.md). + +ControlNet-LLLite is an experimental implementation, so there may be some problems. + +![image](https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI/assets/52813779/ef2ea8d6-121b-48ea-b4b0-a41601dcd6f2) + +## Installation + +1. Clone this repository to `custom_nodes`. +2. Put ControlNet-LLLite models to `ControlNet-LLLite-ComfyUI/models`. You can download sample models from [here](https://huggingface.co/kohya-ss/controlnet-lllite/tree/main). + +## Usage + +Load [sample workflow](lllite_workflow.json). + +You can specify the strength of the effect with `strength`. 1.0 is default, 0.0 is no effect. + +You can apply only to some diffusion steps with `steps`, `start_percent`, and `end_percent`. Specify the number of steps specified in the sampler in `steps`, and specify the start and end steps from 0 to 100 in `start_percent` and `end_percent`, respectively. + +(Because we cannot check the total number of steps in the node, this specification has been made. Please check the console output for the specific application range.) + +## Tips + ++ If the generated image size is different from the control image size, resize it with `image/upscaling/UpscaleImage` as shown in the workflow. + ++ You can create a Canny image from a normal image with the `image/preprocessors/Canny` node. + +## Acknowledgements + +This repository is based on [IPAdapter-ComfyUI](https://github.com/laksjdjf/IPAdapter-ComfyUI) by laksjdjf. Thanks to laksjdjf. + + +# ControlNet-LLLite-ComfyUI:日本語版ドキュメント + +[ControlNet-LLLite](https://github.com/kohya-ss/sd-scripts/blob/sdxl/docs/train_lllite_README.md) の推論用のUIです。 + +ControlNet-LLLiteがそもそもきわめて実験的な実装のため、問題がいろいろあるかもしれません。 + +![image](https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI/assets/52813779/ef2ea8d6-121b-48ea-b4b0-a41601dcd6f2) + +## インストール方法 + +1. `custom_nodes`にcloneします。 +2. `ControlNet-LLLite-ComfyUI/models` にモデルを入れます。サンプルは[こちら](https://huggingface.co/kohya-ss/controlnet-lllite/tree/main)からダウンロードできます。 + +## 使い方 + +[サンプルのワークフロー](lllite_workflow.json)を読み込んでください。 + +`strength`に効果の強さを指定できます。1.0でデフォルト、0.0で効果なしです。 + +`steps`と`start_percent`、`end_percent`で拡散ステップの一部にだけ効果を適用できます。`steps`にsamplerに指定したステップ数を指定し、`start_percent`と`end_percent`にそれぞれ開始と終了のステップを0から100で指定します。 + +(ノード内で全体のステップ数を確認できないためこのような仕様になっています。具体的な適用範囲はコンソール出力を確認してください。) + +## ヒント + ++ 生成画像サイズと制御用画像サイズが異なる場合はワークフローにあるように `image/upscaling/UpscaleImage` でリサイズしてください。 ++ 通常の画像からCanny画像を作るには `image/preprocessors/Canny` のノードが使えます。 + +## 謝辞 + +laksjdjf 氏の [IPAdapter-ComfyUI](https://github.com/laksjdjf/IPAdapter-ComfyUI) を参考にしています。laksjdjf 氏に感謝します。 diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/__init__.py b/custom_nodes/ControlNet-LLLite-ComfyUI/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea5b1ccd97a2b4cb186b6af0a3c9a357f732dd07 --- /dev/null +++ b/custom_nodes/ControlNet-LLLite-ComfyUI/__init__.py @@ -0,0 +1,2 @@ +from .node_control_net_lllite import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] \ No newline at end of file diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/__pycache__/__init__.cpython-311.pyc b/custom_nodes/ControlNet-LLLite-ComfyUI/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce72c6a642066e274d342dee87c9be1bf31fe83c Binary files /dev/null and b/custom_nodes/ControlNet-LLLite-ComfyUI/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/__pycache__/node_control_net_lllite.cpython-311.pyc b/custom_nodes/ControlNet-LLLite-ComfyUI/__pycache__/node_control_net_lllite.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9742a297c8e415cc346f064fac7685d1362c9760 Binary files /dev/null and b/custom_nodes/ControlNet-LLLite-ComfyUI/__pycache__/node_control_net_lllite.cpython-311.pyc differ diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/lllite_workflow.json b/custom_nodes/ControlNet-LLLite-ComfyUI/lllite_workflow.json new file mode 100644 index 0000000000000000000000000000000000000000..eaf11acb3b958f5a9daf18ed1458a1760d3c5601 --- /dev/null +++ b/custom_nodes/ControlNet-LLLite-ComfyUI/lllite_workflow.json @@ -0,0 +1,717 @@ +{ + "last_node_id": 62, + "last_link_id": 108, + "nodes": [ + { + "id": 16, + "type": "PrimitiveNode", + "pos": [ + -90, + -40 + ], + "size": { + "0": 398, + "1": 140 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 55, + 57 + ], + "slot_index": 0, + "widget": { + "name": "text_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "CLIP_G" + } + ] + } + } + ], + "title": "Negative Prompt", + "properties": {}, + "widgets_values": [ + "nsfw, bad face, lowres, low quality, worst quality, low effort, watermark, signature, ugly, poorly drawn " + ] + }, + { + "id": 32, + "type": "PrimitiveNode", + "pos": [ + -90, + -220 + ], + "size": { + "0": 398, + "1": 140 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 51, + 52 + ], + "widget": { + "name": "text_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "CLIP_G" + } + ] + }, + "slot_index": 0 + } + ], + "title": "Positive Prompt", + "properties": {}, + "widgets_values": [ + "anime screen cap 1girl standing at classroom, looking at viewer, in school uniform, (solo), teen age, smile, long hair, upper body, trending on pixiv, 8k wallpaper, beautiful face" + ] + }, + { + "id": 26, + "type": "VAEDecode", + "pos": [ + 1203, + -216 + ], + "size": { + "0": 140, + "1": 46 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 45, + "slot_index": 0 + }, + { + "name": "vae", + "type": "VAE", + "link": 46, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 48 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 30, + "type": "CLIPTextEncodeSDXL", + "pos": [ + 407, + -219 + ], + "size": { + "0": 399.84454345703125, + "1": 262.6287841796875 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 49, + "slot_index": 0 + }, + { + "name": "text_g", + "type": "STRING", + "link": 51, + "widget": { + "name": "text_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "CLIP_G" + } + ] + } + }, + { + "name": "text_l", + "type": "STRING", + "link": 52, + "widget": { + "name": "text_l", + "config": [ + "STRING", + { + "multiline": true, + "default": "CLIP_L" + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 50 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXL" + }, + "widgets_values": [ + 1024, + 1024, + 0, + 0, + 1024, + 1024, + "anime screen cap 1girl standing at classroom, looking at viewer, in school uniform, (solo), teen age, smile, long hair, upper body, trending on pixiv, 8k wallpaper, beautiful face", + "anime screen cap 1girl standing at classroom, looking at viewer, in school uniform, (solo), teen age, smile, long hair, upper body, trending on pixiv, 8k wallpaper, beautiful face" + ] + }, + { + "id": 33, + "type": "CLIPTextEncodeSDXL", + "pos": [ + 405, + 92 + ], + "size": { + "0": 400, + "1": 270.0000305175781 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54, + "slot_index": 0 + }, + { + "name": "text_g", + "type": "STRING", + "link": 55, + "widget": { + "name": "text_g", + "config": [ + "STRING", + { + "multiline": true, + "default": "CLIP_G" + } + ] + } + }, + { + "name": "text_l", + "type": "STRING", + "link": 57, + "widget": { + "name": "text_l", + "config": [ + "STRING", + { + "multiline": true, + "default": "CLIP_L" + } + ] + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 56 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncodeSDXL" + }, + "widgets_values": [ + 1024, + 1024, + 0, + 0, + 1024, + 1024, + "nsfw, bad face, lowres, low quality, worst quality, low effort, watermark, signature, ugly, poorly drawn ", + "nsfw, bad face, lowres, low quality, worst quality, low effort, watermark, signature, ugly, poorly drawn " + ] + }, + { + "id": 19, + "type": "SaveImage", + "pos": [ + 863, + 321 + ], + "size": { + "0": 486.4142761230469, + "1": 454.6273498535156 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 48 + } + ], + "properties": {}, + "widgets_values": [ + "lllite_output" + ] + }, + { + "id": 43, + "type": "EmptyLatentImage", + "pos": [ + -18, + 165 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 68 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 1024, + 1024, + 1 + ] + }, + { + "id": 17, + "type": "KSampler", + "pos": [ + 868, + -220 + ], + "size": { + "0": 315, + "1": 474 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 108 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 50 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 56 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 68 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1, + "fixed", + 36, + 7.5, + "ddim", + "normal", + 1 + ] + }, + { + "id": 59, + "type": "LoadImage", + "pos": [ + -395, + 380 + ], + "size": { + "0": 428.49603271484375, + "1": 442.3898620605469 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 104 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "canny1.png", + "image" + ] + }, + { + "id": 61, + "type": "ImageScale", + "pos": [ + 52, + 510 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 104 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 106 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "bilinear", + 1024, + 1024, + "disabled" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -541, + 35 + ], + "size": { + "0": 397, + "1": 98 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 107 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 49, + 54 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 46 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "sd_xl_base_1.0_0.9vae.safetensors" + ] + }, + { + "id": 62, + "type": "LLLiteLoader", + "pos": [ + 405, + 433 + ], + "size": { + "0": 396.923095703125, + "1": 174 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 107 + }, + { + "name": "cond_image", + "type": "IMAGE", + "link": 106 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 108 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LLLiteLoader" + }, + "widgets_values": [ + "controllllite_v01032064e_sdxl_canny_anime.safetensors", + 1, + 0, + 0, + 0 + ] + } + ], + "links": [ + [ + 45, + 17, + 0, + 26, + 0, + "LATENT" + ], + [ + 46, + 4, + 2, + 26, + 1, + "VAE" + ], + [ + 48, + 26, + 0, + 19, + 0, + "IMAGE" + ], + [ + 49, + 4, + 1, + 30, + 0, + "CLIP" + ], + [ + 50, + 30, + 0, + 17, + 1, + "CONDITIONING" + ], + [ + 51, + 32, + 0, + 30, + 1, + "STRING" + ], + [ + 52, + 32, + 0, + 30, + 2, + "STRING" + ], + [ + 54, + 4, + 1, + 33, + 0, + "CLIP" + ], + [ + 55, + 16, + 0, + 33, + 1, + "STRING" + ], + [ + 56, + 33, + 0, + 17, + 2, + "CONDITIONING" + ], + [ + 57, + 16, + 0, + 33, + 2, + "STRING" + ], + [ + 68, + 43, + 0, + 17, + 3, + "LATENT" + ], + [ + 104, + 59, + 0, + 61, + 0, + "IMAGE" + ], + [ + 106, + 61, + 0, + 62, + 1, + "IMAGE" + ], + [ + 107, + 4, + 0, + 62, + 0, + "MODEL" + ], + [ + 108, + 62, + 0, + 17, + 0, + "MODEL" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/models/put_models_here.txt b/custom_nodes/ControlNet-LLLite-ComfyUI/models/put_models_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..a43ed46cd15151db4b75473b8bb7d1350fad85c4 --- /dev/null +++ b/custom_nodes/ControlNet-LLLite-ComfyUI/models/put_models_here.txt @@ -0,0 +1 @@ +( •̀ ω •́ )✧ diff --git a/custom_nodes/ControlNet-LLLite-ComfyUI/node_control_net_lllite.py b/custom_nodes/ControlNet-LLLite-ComfyUI/node_control_net_lllite.py new file mode 100644 index 0000000000000000000000000000000000000000..cb67f7df54cfa569c477e17f7b65f21b0f638923 --- /dev/null +++ b/custom_nodes/ControlNet-LLLite-ComfyUI/node_control_net_lllite.py @@ -0,0 +1,289 @@ +import math +import torch +import os + +import comfy + +CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) + + +def get_file_list(path): + return [file for file in os.listdir(path) if file != "put_models_here.txt"] + + +def extra_options_to_module_prefix(extra_options): + # extra_options = {'transformer_index': 2, 'block_index': 8, 'original_shape': [2, 4, 128, 128], 'block': ('input', 7), 'n_heads': 20, 'dim_head': 64} + + # block is: [('input', 4), ('input', 5), ('input', 7), ('input', 8), ('middle', 0), + # ('output', 0), ('output', 1), ('output', 2), ('output', 3), ('output', 4), ('output', 5)] + # transformer_index is: [0, 1, 2, 3, 4, 5, 6, 7, 8], for each block + # block_index is: 0-1 or 0-9, depends on the block + # input 7 and 8, middle has 10 blocks + + # make module name from extra_options + block = extra_options["block"] + block_index = extra_options["block_index"] + if block[0] == "input": + module_pfx = f"lllite_unet_input_blocks_{block[1]}_1_transformer_blocks_{block_index}" + elif block[0] == "middle": + module_pfx = f"lllite_unet_middle_block_1_transformer_blocks_{block_index}" + elif block[0] == "output": + module_pfx = f"lllite_unet_output_blocks_{block[1]}_1_transformer_blocks_{block_index}" + else: + raise Exception("invalid block name") + return module_pfx + + +def load_control_net_lllite_patch(path, cond_image, multiplier, num_steps, start_percent, end_percent): + # calculate start and end step + start_step = math.floor(num_steps * start_percent * 0.01) if start_percent > 0 else 0 + end_step = math.floor(num_steps * end_percent * 0.01) if end_percent > 0 else num_steps + + # load weights + ctrl_sd = comfy.utils.load_torch_file(path, safe_load=True) + + # split each weights for each module + module_weights = {} + for key, value in ctrl_sd.items(): + fragments = key.split(".") + module_name = fragments[0] + weight_name = ".".join(fragments[1:]) + + if module_name not in module_weights: + module_weights[module_name] = {} + module_weights[module_name][weight_name] = value + + # load each module + modules = {} + for module_name, weights in module_weights.items(): + # ここの自動判定を何とかしたい + if "conditioning1.4.weight" in weights: + depth = 3 + elif weights["conditioning1.2.weight"].shape[-1] == 4: + depth = 2 + else: + depth = 1 + + module = LLLiteModule( + name=module_name, + is_conv2d=weights["down.0.weight"].ndim == 4, + in_dim=weights["down.0.weight"].shape[1], + depth=depth, + cond_emb_dim=weights["conditioning1.0.weight"].shape[0] * 2, + mlp_dim=weights["down.0.weight"].shape[0], + multiplier=multiplier, + num_steps=num_steps, + start_step=start_step, + end_step=end_step, + ) + info = module.load_state_dict(weights) + modules[module_name] = module + if len(modules) == 1: + module.is_first = True + + print(f"loaded {path} successfully, {len(modules)} modules") + + # cond imageをセットする + cond_image = cond_image.permute(0, 3, 1, 2) # b,h,w,3 -> b,3,h,w + cond_image = cond_image * 2.0 - 1.0 # 0-1 -> -1-+1 + + for module in modules.values(): + module.set_cond_image(cond_image) + + class control_net_lllite_patch: + def __init__(self, modules): + self.modules = modules + + def __call__(self, q, k, v, extra_options): + module_pfx = extra_options_to_module_prefix(extra_options) + + is_attn1 = q.shape[-1] == k.shape[-1] # self attention + if is_attn1: + module_pfx = module_pfx + "_attn1" + else: + module_pfx = module_pfx + "_attn2" + + module_pfx_to_q = module_pfx + "_to_q" + module_pfx_to_k = module_pfx + "_to_k" + module_pfx_to_v = module_pfx + "_to_v" + + if module_pfx_to_q in self.modules: + q = q + self.modules[module_pfx_to_q](q) + if module_pfx_to_k in self.modules: + k = k + self.modules[module_pfx_to_k](k) + if module_pfx_to_v in self.modules: + v = v + self.modules[module_pfx_to_v](v) + + return q, k, v + + def to(self, device): + for d in self.modules.keys(): + self.modules[d] = self.modules[d].to(device) + return self + + return control_net_lllite_patch(modules) + + +class LLLiteModule(torch.nn.Module): + def __init__( + self, + name: str, + is_conv2d: bool, + in_dim: int, + depth: int, + cond_emb_dim: int, + mlp_dim: int, + multiplier: int, + num_steps: int, + start_step: int, + end_step: int, + ): + super().__init__() + self.name = name + self.is_conv2d = is_conv2d + self.multiplier = multiplier + self.num_steps = num_steps + self.start_step = start_step + self.end_step = end_step + self.is_first = False + + modules = [] + modules.append(torch.nn.Conv2d(3, cond_emb_dim // 2, kernel_size=4, stride=4, padding=0)) # to latent (from VAE) size*2 + if depth == 1: + modules.append(torch.nn.ReLU(inplace=True)) + modules.append(torch.nn.Conv2d(cond_emb_dim // 2, cond_emb_dim, kernel_size=2, stride=2, padding=0)) + elif depth == 2: + modules.append(torch.nn.ReLU(inplace=True)) + modules.append(torch.nn.Conv2d(cond_emb_dim // 2, cond_emb_dim, kernel_size=4, stride=4, padding=0)) + elif depth == 3: + # kernel size 8は大きすぎるので、4にする / kernel size 8 is too large, so set it to 4 + modules.append(torch.nn.ReLU(inplace=True)) + modules.append(torch.nn.Conv2d(cond_emb_dim // 2, cond_emb_dim // 2, kernel_size=4, stride=4, padding=0)) + modules.append(torch.nn.ReLU(inplace=True)) + modules.append(torch.nn.Conv2d(cond_emb_dim // 2, cond_emb_dim, kernel_size=2, stride=2, padding=0)) + + self.conditioning1 = torch.nn.Sequential(*modules) + + if self.is_conv2d: + self.down = torch.nn.Sequential( + torch.nn.Conv2d(in_dim, mlp_dim, kernel_size=1, stride=1, padding=0), + torch.nn.ReLU(inplace=True), + ) + self.mid = torch.nn.Sequential( + torch.nn.Conv2d(mlp_dim + cond_emb_dim, mlp_dim, kernel_size=1, stride=1, padding=0), + torch.nn.ReLU(inplace=True), + ) + self.up = torch.nn.Sequential( + torch.nn.Conv2d(mlp_dim, in_dim, kernel_size=1, stride=1, padding=0), + ) + else: + self.down = torch.nn.Sequential( + torch.nn.Linear(in_dim, mlp_dim), + torch.nn.ReLU(inplace=True), + ) + self.mid = torch.nn.Sequential( + torch.nn.Linear(mlp_dim + cond_emb_dim, mlp_dim), + torch.nn.ReLU(inplace=True), + ) + self.up = torch.nn.Sequential( + torch.nn.Linear(mlp_dim, in_dim), + ) + + self.depth = depth + self.cond_image = None + self.cond_emb = None + self.current_step = 0 + + # @torch.inference_mode() + def set_cond_image(self, cond_image): + # print("set_cond_image", self.name) + self.cond_image = cond_image + self.cond_emb = None + self.current_step = 0 + + def forward(self, x): + if self.num_steps > 0: + if self.current_step < self.start_step: + self.current_step += 1 + return torch.zeros_like(x) + elif self.current_step >= self.end_step: + if self.is_first and self.current_step == self.end_step: + print(f"end LLLite: step {self.current_step}") + self.current_step += 1 + if self.current_step >= self.num_steps: + self.current_step = 0 # reset + return torch.zeros_like(x) + else: + if self.is_first and self.current_step == self.start_step: + print(f"start LLLite: step {self.current_step}") + self.current_step += 1 + if self.current_step >= self.num_steps: + self.current_step = 0 # reset + + if self.cond_emb is None: + # print(f"cond_emb is None, {self.name}") + cx = self.conditioning1(self.cond_image.to(x.device, dtype=x.dtype)) + if not self.is_conv2d: + # reshape / b,c,h,w -> b,h*w,c + n, c, h, w = cx.shape + cx = cx.view(n, c, h * w).permute(0, 2, 1) + self.cond_emb = cx + + cx = self.cond_emb + # print(f"forward {self.name}, {cx.shape}, {x.shape}") + + # uncond/condでxはバッチサイズが2倍 + if x.shape[0] != cx.shape[0]: + if self.is_conv2d: + cx = cx.repeat(x.shape[0] // cx.shape[0], 1, 1, 1) + else: + # print("x.shape[0] != cx.shape[0]", x.shape[0], cx.shape[0]) + cx = cx.repeat(x.shape[0] // cx.shape[0], 1, 1) + + cx = torch.cat([cx, self.down(x)], dim=1 if self.is_conv2d else 2) + cx = self.mid(cx) + cx = self.up(cx) + return cx * self.multiplier + + +class LLLiteLoader: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "model_name": (get_file_list(os.path.join(CURRENT_DIR, "models")),), + "cond_image": ("IMAGE",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "steps": ("INT", {"default": 0, "min": 0, "max": 200, "step": 1}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "end_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_lllite" + CATEGORY = "loaders" + + def load_lllite(self, model, model_name, cond_image, strength, steps, start_percent, end_percent): + # cond_image is b,h,w,3, 0-1 + + model_path = os.path.join(CURRENT_DIR, os.path.join(CURRENT_DIR, "models", model_name)) + + model_lllite = model.clone() + patch = load_control_net_lllite_patch(model_path, cond_image, strength, steps, start_percent, end_percent) + if patch is not None: + model_lllite.set_model_attn1_patch(patch) + model_lllite.set_model_attn2_patch(patch) + + return (model_lllite,) + + +NODE_CLASS_MAPPINGS = {"LLLiteLoader": LLLiteLoader} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LLLiteLoader": "Load LLLite", +} diff --git a/custom_nodes/FreeU_Advanced/.gitattributes b/custom_nodes/FreeU_Advanced/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..dfe0770424b2a19faf507a501ebfc23be8f54e7b --- /dev/null +++ b/custom_nodes/FreeU_Advanced/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/custom_nodes/FreeU_Advanced/.gitignore b/custom_nodes/FreeU_Advanced/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d9005f2cc7fc4e65f14ed5518276007c08cf2fd0 --- /dev/null +++ b/custom_nodes/FreeU_Advanced/.gitignore @@ -0,0 +1,152 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/custom_nodes/FreeU_Advanced/LICENSE b/custom_nodes/FreeU_Advanced/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..302dbfa90d5585377679e8fe49dee870dc05e822 --- /dev/null +++ b/custom_nodes/FreeU_Advanced/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Jordan Thompson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/custom_nodes/FreeU_Advanced/README.md b/custom_nodes/FreeU_Advanced/README.md new file mode 100644 index 0000000000000000000000000000000000000000..06caa2b7b5054f7ce169d3016a707ae9019ffd0a --- /dev/null +++ b/custom_nodes/FreeU_Advanced/README.md @@ -0,0 +1,54 @@ +# FreeU Advanced Plus +Let's say you and I grab dinner, and movie after lunch? 🌃📺😏 + +![image](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/c1dc2ec9-e6a3-4d2d-bf81-697e5d5aabcb) + +### Exmaple of default node settings applied across blocks. +![default_block_examples](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/d01dea23-7ad6-4b89-ba43-70412afbd75f) +![default_block_examples_2](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/489a9990-76f7-4f09-b95a-9d54f7a319db) +![default_block_examples_3](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/3723f54b-4af8-4a09-9771-22db16328773) +![default_block_examples_4](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/d193d3e1-0e3e-4bdd-bdda-c5a4dffa0112) +![default_block_examples_5](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/a2612c22-160a-41c9-b189-b2201332eb78) +![default_block_examples_6](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/171b0bad-1c39-420d-a30a-be11f053168a) +![default_block_examples_7](https://github.com/WASasquatch/FreeU_Advanced/assets/1151589/32df5124-418d-418c-97ee-6b76d6bfcb6c) + +## Input Parameters + +- `model` (`MODEL`): Model to patch +- `target_block` (`COMBO`): Which block to target; `input_block`, `middle_block`, and `output_block` +- `multiscale_mode` (`COMBO`): A list of available multiscale modes: + - `["Default", "Bandpass", "Low-Pass", "High-Pass", "Pass-Through", "Gaussian-Blur", "Edge-Enhancement", "Sharpen", "Multi-Bandpass", "Multi-Low-Pass", "Multi-High-Pass", "Multi-Pass-Through", "Multi-Gaussian-Blur", "Multi-Edge-Enhancement", "Multi-Sharpen"]` +- `multiscale_strength` (`FLOAT`, Default: 1.0, Range: [0.0, 1.0], Step: 0.001): Strength of scaling +- `b1_slice` (`INT`, Default: 640, Range: [64, 1280], Step: 1): The size of the array slice for b1 operation +- `b2_slice` (`INT`, Default: 640, Range: [64, 640], Step: 1): The size of the array slice for b2 operation +- `b1` (`FLOAT`, Default: 1.1, Range: [0.0, 10.0], Step: 0.001): `b1` output multiplier +- `b2` (`FLOAT`, Default: 1.2, Range: [0.0, 10.0], Step: 0.001): `b2` output multiplier +- `s1` (`FLOAT`, Default: 0.9, Range: [0.0, 10.0], Step: 0.001): `s1` Fourier transform scale strength +- `s2` (`FLOAT`, Default: 0.2, Range: [0.0, 10.0], Step: 0.001): `s2` Fourier transform scale strength + +### Optional Parameters + +- `b1_mode` (`COMBO`): Blending modes for `b1` multiplied result. + - `['bislerp', 'colorize', 'cosine interp', 'cuberp', 'hslerp', 'inject', 'lerp', 'linear dodge', 'slerp']` +- `b1_blend` (`FLOAT`, Default: 1.0, Range: [0.0, 100], Step: 0.001): Blending strength for `b1`. +- `b2_mode` (`COMBO`): Blending modes for `b2` multiplied result. + - `['bislerp', 'colorize', 'cosine interp', 'cuberp', 'hslerp', 'inject', 'lerp', 'linear dodge', 'slerp']` +- `b2_blend` (`FLOAT`, Default: 1.0, Range: [0.0, 100], Step: 0.001): Blending strength for `b2`. +- `threshold` (`INT`, Default: 1.0, Range: [1, 10], Step: 1): The exposed threshold value of the Fourier transform function. +- `use_override_scales` (`COMBO`): "true", or "false" on whether to use `override_scales` +- `override_scales` (`STRING`, Default: [Multiline String]): Override scales. Create custom scales and experiment with results. + - Example `10, 1.5` would create the `multiscale_mode` effect `Sharpen` + - You can use `#`, `//` and `!` to comment out lines. + +### FreeU BibTex + ``` +@article{Si2023FreeU, + author = {Chenyang Si, Ziqi Huang, Yuming Jiang, Ziwei Liu}, + title = {FreeU: Free Lunch in Diffusion U-Net}, + journal = {arXiv}, + year = {2023}, +} +``` +## :newspaper_roll: License + +Distributed under the MIT License. See `LICENSE` for more information. diff --git a/custom_nodes/FreeU_Advanced/__init__.py b/custom_nodes/FreeU_Advanced/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d721463be66961a2f388b3a756760d167ea5d510 --- /dev/null +++ b/custom_nodes/FreeU_Advanced/__init__.py @@ -0,0 +1,3 @@ +from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] \ No newline at end of file diff --git a/custom_nodes/FreeU_Advanced/__pycache__/__init__.cpython-311.pyc b/custom_nodes/FreeU_Advanced/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65e795f44a44b99960a89b54b1a4661ec01ede8c Binary files /dev/null and b/custom_nodes/FreeU_Advanced/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/FreeU_Advanced/__pycache__/nodes.cpython-311.pyc b/custom_nodes/FreeU_Advanced/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..336536a6a1e0cb1b859353a881042fc143e3c0c3 Binary files /dev/null and b/custom_nodes/FreeU_Advanced/__pycache__/nodes.cpython-311.pyc differ diff --git a/custom_nodes/FreeU_Advanced/nodes.py b/custom_nodes/FreeU_Advanced/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..11bcc9f9bb6a5bba60fc4685ddcc2c7cb1c594ca --- /dev/null +++ b/custom_nodes/FreeU_Advanced/nodes.py @@ -0,0 +1,375 @@ +#code originally taken from: https://github.com/ChenyangSi/FreeU (under MIT License) + +import torch +import torch as th +import torch.fft as fft +import torch.nn.functional as F +import math + +def normalize(latent, target_min=None, target_max=None): + """ + Normalize a tensor `latent` between `target_min` and `target_max`. + + Args: + latent (torch.Tensor): The input tensor to be normalized. + target_min (float, optional): The minimum value after normalization. + - When `None` min will be tensor min range value. + target_max (float, optional): The maximum value after normalization. + - When `None` max will be tensor max range value. + + Returns: + torch.Tensor: The normalized tensor + """ + min_val = latent.min() + max_val = latent.max() + + if target_min is None: + target_min = min_val + if target_max is None: + target_max = max_val + + normalized = (latent - min_val) / (max_val - min_val) + scaled = normalized * (target_max - target_min) + target_min + return scaled + +def hslerp(a, b, t): + """ + Perform Hybrid Spherical Linear Interpolation (HSLERP) between two tensors. + + This function combines two input tensors `a` and `b` using HSLERP, which is a specialized + interpolation method for smooth transitions between orientations or colors. + + Args: + a (tensor): The first input tensor. + b (tensor): The second input tensor. + t (float): The blending factor, a value between 0 and 1 that controls the interpolation. + + Returns: + tensor: The result of HSLERP interpolation between `a` and `b`. + + Note: + HSLERP provides smooth transitions between orientations or colors, particularly useful + in applications like image processing and 3D graphics. + """ + if a.shape != b.shape: + raise ValueError("Input tensors a and b must have the same shape.") + + num_channels = a.size(1) + + interpolation_tensor = torch.zeros(1, num_channels, 1, 1, device=a.device, dtype=a.dtype) + interpolation_tensor[0, 0, 0, 0] = 1.0 + + result = (1 - t) * a + t * b + + if t < 0.5: + result += (torch.norm(b - a, dim=1, keepdim=True) / 6) * interpolation_tensor + else: + result -= (torch.norm(b - a, dim=1, keepdim=True) / 6) * interpolation_tensor + + return result + +blending_modes = { + # Args: + # - a (tensor): Latent input 1 + # - b (tensor): Latent input 2 + # - t (float): Blending factor + + # Interpolates between tensors a and b using normalized linear interpolation. + 'bislerp': lambda a, b, t: normalize((1 - t) * a + t * b), + # Transfer the color from `b` to `a` by t` factor + 'colorize': lambda a, b, t: a + (b - a) * t, + # Interpolates between tensors a and b using cosine interpolation. + 'cosine interp': lambda a, b, t: (a + b - (a - b) * torch.cos(t * torch.tensor(math.pi))) / 2, + # Interpolates between tensors a and b using cubic interpolation. + 'cuberp': lambda a, b, t: a + (b - a) * (3 * t ** 2 - 2 * t ** 3), + # Interpolates between tensors a and b using normalized linear interpolation, + # with a twist when t is greater than or equal to 0.5. + 'hslerp': hslerp, + # Adds tensor b to tensor a, scaled by t. + 'inject': lambda a, b, t: a + b * t, + # Interpolates between tensors a and b using linear interpolation. + 'lerp': lambda a, b, t: (1 - t) * a + t * b, + # Simulates a brightening effect by adding tensor b to tensor a, scaled by t. + 'linear dodge': lambda a, b, t: normalize(a + b * t), +} + +mscales = { + "Default": None, + "Bandpass": [ + (5, 0.0), # Low-pass filter + (15, 1.0), # Pass-through filter (allows mid-range frequencies) + (25, 0.0), # High-pass filter + ], + "Low-Pass": [ + (10, 1.0), # Allows low-frequency components, suppresses high-frequency components + ], + "High-Pass": [ + (10, 0.0), # Suppresses low-frequency components, allows high-frequency components + ], + "Pass-Through": [ + (10, 1.0), # Passes all frequencies unchanged, no filtering + ], + "Gaussian-Blur": [ + (10, 0.5), # Blurs the image by allowing a range of frequencies with a Gaussian shape + ], + "Edge-Enhancement": [ + (10, 2.0), # Enhances edges and high-frequency features while suppressing low-frequency details + ], + "Sharpen": [ + (10, 1.5), # Increases the sharpness of the image by emphasizing high-frequency components + ], + "Multi-Bandpass": [ + [(5, 0.0), (15, 1.0), (25, 0.0)], # Multi-scale bandpass filter + ], + "Multi-Low-Pass": [ + [(5, 1.0), (10, 0.5), (15, 0.2)], # Multi-scale low-pass filter + ], + "Multi-High-Pass": [ + [(5, 0.0), (10, 0.5), (15, 0.8)], # Multi-scale high-pass filter + ], + "Multi-Pass-Through": [ + [(5, 1.0), (10, 1.0), (15, 1.0)], # Pass-through at different scales + ], + "Multi-Gaussian-Blur": [ + [(5, 0.5), (10, 0.8), (15, 0.2)], # Multi-scale Gaussian blur + ], + "Multi-Edge-Enhancement": [ + [(5, 1.2), (10, 1.5), (15, 2.0)], # Multi-scale edge enhancement + ], + "Multi-Sharpen": [ + [(5, 1.5), (10, 2.0), (15, 2.5)], # Multi-scale sharpening + ], +} + +# forward function from comfy.ldm.modules.diuffusionmodules.openaimodel +# Hopefully temporary replacement +def __temp__forward(self, x, timesteps=None, context=None, y=None, control=None, transformer_options={}, **kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + transformer_options["original_shape"] = list(x.shape) + transformer_options["current_index"] = 0 + transformer_patches = transformer_options.get("patches", {}) + + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(self.dtype) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for id, module in enumerate(self.input_blocks): + transformer_options["block"] = ("input", id) + h = forward_timestep_embed(module, h, emb, context, transformer_options) + if control is not None and 'input' in control and len(control['input']) > 0: + ctrl = control['input'].pop() + if ctrl is not None: + h += ctrl + hs.append(h) + + hsp = hs + if "input_block_patch" in transformer_patches: + patch = transformer_patches["input_block_patch"] + for p in patch: + h, hsp = p(h, hsp, transformer_options) + del hsp + + transformer_options["block"] = ("middle", 0) + h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) + if control is not None and 'middle' in control and len(control['middle']) > 0: + ctrl = control['middle'].pop() + if ctrl is not None: + h += ctrl + + hsp = [h] + if "middle_block_patch" in transformer_patches: + patch = transformer_patches["middle_block_patch"] + for p in patch: + h, hsp = p(h, hsp, transformer_options) + del hsp + + for id, module in enumerate(self.output_blocks): + transformer_options["block"] = ("output", id) + hsp = hs.pop() + if control is not None and 'output' in control and len(control['output']) > 0: + ctrl = control['output'].pop() + if ctrl is not None: + hsp += ctrl + + if "output_block_patch" in transformer_patches: + patch = transformer_patches["output_block_patch"] + for p in patch: + h, hsp = p(h, hsp, transformer_options) + + h = th.cat([h, hsp], dim=1) + del hsp + if len(hs) > 0: + output_shape = hs[-1].shape + else: + output_shape = None + h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) + +print("Patching UNetModel.forward") +import comfy.ldm.modules.diffusionmodules.openaimodel +from comfy.ldm.modules.diffusionmodules.openaimodel import forward_timestep_embed +from comfy.ldm.modules.diffusionmodules.util import timestep_embedding +comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = __temp__forward +if comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel.forward is __temp__forward: + print("UNetModel.forward has been successfully patched.") +else: + print("UNetModel.forward patching failed.") + +def Fourier_filter(x, threshold, scale, scales=None, strength=1.0): + # FFT + if isinstance(x, list): + x = x[0] + if isinstance(x, torch.Tensor): + x_freq = fft.fftn(x.float(), dim=(-2, -1)) + x_freq = fft.fftshift(x_freq, dim=(-2, -1)) + + B, C, H, W = x_freq.shape + mask = torch.ones((B, C, H, W), device=x.device) + + crow, ccol = H // 2, W // 2 + mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale + + if scales is not None: + if isinstance(scales[0], tuple): + # Single-scale mode + for scale_params in scales: + if len(scale_params) == 2: + scale_threshold, scale_value = scale_params + scaled_scale_value = scale_value * strength + scale_mask = torch.ones((B, C, H, W), device=x.device) + scale_mask[..., crow - scale_threshold:crow + scale_threshold, ccol - scale_threshold:ccol + scale_threshold] = scaled_scale_value + mask = mask + (scale_mask - mask) * strength + else: + # Multi-scale mode + for scale_params in scales: + if isinstance(scale_params, list): + for scale_tuple in scale_params: + if len(scale_tuple) == 2: + scale_threshold, scale_value = scale_tuple + scaled_scale_value = scale_value * strength + scale_mask = torch.ones((B, C, H, W), device=x.device) + scale_mask[..., crow - scale_threshold:crow + scale_threshold, ccol - scale_threshold:ccol + scale_threshold] = scaled_scale_value + mask = mask + (scale_mask - mask) * strength + + x_freq = x_freq * mask + + # IFFT + x_freq = fft.ifftshift(x_freq, dim=(-2, -1)) + x_filtered = fft.ifftn(x_freq, dim=(-2, -1)).real + + return x_filtered.to(x.dtype) + + return x + +class WAS_FreeU: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "target_block": (["output_block", "middle_block", "input_block", "all"],), + "multiscale_mode": (list(mscales.keys()),), + "multiscale_strength": ("FLOAT", {"default": 1.0, "max": 1.0, "min": 0, "step": 0.001}), + "slice_b1": ("INT", {"default": 640, "min": 64, "max": 1280, "step": 1}), + "slice_b2": ("INT", {"default": 320, "min": 64, "max": 640, "step": 1}), + "b1": ("FLOAT", {"default": 1.1, "min": 0.0, "max": 10.0, "step": 0.001}), + "b2": ("FLOAT", {"default": 1.2, "min": 0.0, "max": 10.0, "step": 0.001}), + "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.001}), + "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.001}), + }, + "optional": { + "b1_mode": (list(blending_modes.keys()),), + "b1_blend": ("FLOAT", {"default": 1.0, "max": 100, "min": 0, "step": 0.001}), + "b2_mode": (list(blending_modes.keys()),), + "b2_blend": ("FLOAT", {"default": 1.0, "max": 100, "min": 0, "step": 0.001}), + "threshold": ("INT", {"default": 1.0, "max": 10, "min": 1, "step": 1}), + "use_override_scales": (["false", "true"],), + "override_scales": ("STRING", {"default": '''# OVERRIDE SCALES + +# Sharpen +# 10, 1.5''', "multiline": True}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, target_block, multiscale_mode, multiscale_strength, slice_b1, slice_b2, b1, b2, s1, s2, b1_mode="add", b1_blend=1.0, b2_mode="add", b2_blend=1.0, threshold=1.0, use_override_scales="false", override_scales=""): + + min_slice = 64 + max_slice_b1 = 1280 + max_slice_b2 = 640 + slice_b1 = max(min(max_slice_b1, slice_b1), min_slice) + slice_b2 = max(min(min(slice_b1, max_slice_b2), slice_b2), min_slice) + + scales_list = [] + if use_override_scales == "true": + if override_scales.strip() != "": + scales_str = override_scales.strip().splitlines() + for line in scales_str: + if not line.strip().startswith('#') and not line.strip().startswith('!') and not line.strip().startswith('//'): + scale_values = line.split(',') + if len(scale_values) == 2: + scales_list.append((int(scale_values[0]), float(scale_values[1]))) + + if use_override_scales == "true" and not scales_list: + print("No valid override scales found. Using default scale.") + scales_list = None + + scales = mscales[multiscale_mode] if use_override_scales == "false" else scales_list + + print(f"FreeU Plate Portions: {slice_b1} over {slice_b2}") + print(f"FreeU Multi-Scales: {scales}") + + def block_patch(h, hsp, transformer_options): + if h.shape[1] == 1280: + h_t = h[:,:slice_b1] + h_r = h_t * b1 + h[:,:slice_b1] = blending_modes[b1_mode](h_t, h_r, b1_blend) + hsp = Fourier_filter(hsp, threshold=threshold, scale=s1, scales=scales, strength=multiscale_strength) + if h.shape[1] == 640: + h_t = h[:,:slice_b2] + h_r = h_t * b2 + h[:,:slice_b2] = blending_modes[b2_mode](h_t, h_r, b2_blend) + hsp = Fourier_filter(hsp, threshold=threshold, scale=s2, scales=scales, strength=multiscale_strength) + return h, hsp + + print(f"Patching {target_block}") + + m = model.clone() + if target_block == "all" or target_block == "output_block": + m.set_model_output_block_patch(block_patch) + if target_block == "all" or target_block == "input_block": + m.set_model_patch(block_patch, "input_block_patch") + if target_block == "all" or target_block == "middle_block": + m.set_model_patch(block_patch, "middle_block_patch") + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "FreeU (Advanced)": WAS_FreeU, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "FreeU (Advanced)": "FreeU (Advanced Plus)", +} diff --git a/custom_nodes/IPAdapter-ComfyUI/LICENSE b/custom_nodes/IPAdapter-ComfyUI/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/IPAdapter-ComfyUI/README.md b/custom_nodes/IPAdapter-ComfyUI/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de1354e942fa93a662923a4fa0d18cdb109ce093 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/README.md @@ -0,0 +1,49 @@ +> [!IMPORTANT] +> **I decided to move my development to the better [cubiq's repository](https://github.com/cubiq/ComfyUI_IPAdapter_plus).** +> +> **This repository may not be available anymore due to future updates of ComfyUI.** + + + +# IPAdapter-ComfyUI +[IP-Adapter](https://github.com/tencent-ailab/IP-Adapter)の[ComfyUI](https://github.com/comfyanonymous/ComfyUI)カスタムノードです。 + +# Install + +1. custom_nodesにclone +2. `IPAdapter-ComfyUI/models`にip-adapterのモデル(例:[SDv1.5用モデル](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter_sd15.bin))を入れる。 +3. `ComfyUI/models/clip_vision`にCLIP_visionモデル(例:[SDv1.5用モデル](https://huggingface.co/h94/IP-Adapter/blob/main/models/image_encoder/pytorch_model.bin))を入れる。 + +# Usage +`ip-adapter.json`を参照してください。 + +## Input ++ **model**:modelをつなげてください。LoRALoaderなどとつなげる順番の違いについては影響ありません。 ++ **image**:画像をつなげてください。 ++ **clip_vision**:`Load CLIP Vision`の出力とつなげてください。 ++ **mask**:任意です。マスクをつなげると適用領域を制限できます。必ず生成画像と同じ解像度にしてください。 ++ **weight**:適用強度です。 ++ **model_name**:使うモデルのファイル名を指定してください。 ++ **dtype**:黒い画像が生成される場合、`fp32`を選択してください。ほとんど生成時間が変わらないのでずっと`fp32`のままでもよいかもしれません。 + +## Output ++ **MODEL**:KSampler等につなげてください。 ++ **CLIP_VISION_OUTPUT**:ふつうは気にしなくていいです。Revision等を使うときに無駄な計算を省くことができます。 + +## Multiple condition. +ノードを自然につなげることで、複数画像を入力することができます。Maskと組み合わせることで、左右で条件付けを分けるみたいなこともできます。 +![image](https://github.com/laksjdjf/IPAdapter-ComfyUI/assets/22386664/c2282aee-ab98-488d-936e-1787994e957f) +背景も分割されてしまうことが問題ですね^^; + +# Hint ++ 入力画像は自動で中央切り抜きによって正方形にされるので、避けたい場合は予め切り取り処理をするか、`preprocess/furusu Image crop`を使うとよいかもしれません。`preprocess/furusu Image crop`にはパディングをする`padding`とキャラの顔位置を基準に切り取りをする`face_crop`があります。`face_crop`に必要な[lbpcascade_animeface.xml](https://github.com/nagadomi/lbpcascade_animefacehttps://github.com/nagadomi/lbpcascade_animeface)は自動ダウンロードできない場合があるので、その場合は手動でリポジトリ直下に入れてください。 + +# Bug ++ ~~Apply ControlNetはなぜかバグるので、代わりにApply ControlNet(Advanced)を使ってください。~~ 多分治った。 + +# Models ++ official models:https://huggingface.co/h94/IP-Adapter ++ my models:https:https://huggingface.co/furusu/IP-Adapter + +# CITIATION +IP-Adapter:https://github.com/tencent-ailab/IP-Adapter diff --git a/custom_nodes/IPAdapter-ComfyUI/__init__.py b/custom_nodes/IPAdapter-ComfyUI/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c6926c0fb40c0c8a537d32a43876bda2bef79167 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/__init__.py @@ -0,0 +1,14 @@ +from .image_preprocessor import ImageCrop +from .ip_adapter import IPAdapter + +NODE_CLASS_MAPPINGS = { + "IPAdapter": IPAdapter, + "ImageCrop": ImageCrop, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "IPAdapter": "Load IPAdapter", + "ImageCrop": "furusu Image Crop", +} + +__all__ = [NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS] \ No newline at end of file diff --git a/custom_nodes/IPAdapter-ComfyUI/__pycache__/__init__.cpython-311.pyc b/custom_nodes/IPAdapter-ComfyUI/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fcc054baacdc178db1d36a1a92925308c6cb75c Binary files /dev/null and b/custom_nodes/IPAdapter-ComfyUI/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/IPAdapter-ComfyUI/__pycache__/image_preprocessor.cpython-311.pyc b/custom_nodes/IPAdapter-ComfyUI/__pycache__/image_preprocessor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3049c8b68749733ca3854476f6bd2eae29d9513 Binary files /dev/null and b/custom_nodes/IPAdapter-ComfyUI/__pycache__/image_preprocessor.cpython-311.pyc differ diff --git a/custom_nodes/IPAdapter-ComfyUI/__pycache__/ip_adapter.cpython-311.pyc b/custom_nodes/IPAdapter-ComfyUI/__pycache__/ip_adapter.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19bec6dd2bdcfcf04fc0f847005569f04f7f0b9c Binary files /dev/null and b/custom_nodes/IPAdapter-ComfyUI/__pycache__/ip_adapter.cpython-311.pyc differ diff --git a/custom_nodes/IPAdapter-ComfyUI/__pycache__/resampler.cpython-311.pyc b/custom_nodes/IPAdapter-ComfyUI/__pycache__/resampler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84011c77c60bef9415f97b1f62a622842fd4a03f Binary files /dev/null and b/custom_nodes/IPAdapter-ComfyUI/__pycache__/resampler.cpython-311.pyc differ diff --git a/custom_nodes/IPAdapter-ComfyUI/image_preprocessor.py b/custom_nodes/IPAdapter-ComfyUI/image_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..31c509cf48e144119cb713d82f40767696c341a2 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/image_preprocessor.py @@ -0,0 +1,92 @@ +import torch +import torch.nn.functional as F +import os +import subprocess + +CV2_AVAILABLE = True +try: + import cv2 +except: + print("OpenCV is not installed so face cropping is not available.") + CV2_AVAILABLE = False + +CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) +DETECTOR_FILE = "lbpcascade_animeface.xml" + +if not os.path.exists(os.path.join(CURRENT_DIR, DETECTOR_FILE)): + print("Downloading anime face detector...") + try: + subprocess.run(["wget", "https://raw.githubusercontent.com/nagadomi/lbpcascade_animeface/master/lbpcascade_animeface.xml", "-P", CURRENT_DIR]) + except: + print(f"Failed to download lbpcascade_animeface.xml so please download it in {CURRENT_DIR}.") + CV2_AVAILABLE = False + +CROP_MODES = ["padding", "face_crop", "none"] if CV2_AVAILABLE else ["padding", "none"] + +def image_to_numpy(image): + image = image.squeeze(0) * 255 + return image.numpy().astype("uint8") + +def numpy_to_image(image): + image = torch.tensor(image).float() / 255 + return image.unsqueeze(0) + +def pad_to_square(tensor): + tensor = tensor.squeeze(0).permute(2, 0, 1) + _, h, w = tensor.shape + + target_length = max(h, w) + + pad_l = (target_length - w) // 2 + pad_r = (target_length - w) - pad_l + + pad_t = (target_length - h) // 2 + pad_b = (target_length - h) - pad_t + + padded_tensor = F.pad(tensor, (pad_l, pad_r, pad_t, pad_b), mode="constant", value=0) + + return padded_tensor.permute(1, 2, 0).unsqueeze(0) + +def face_crop(image): + image = image_to_numpy(image) + face_cascade = cv2.CascadeClassifier(os.path.join(CURRENT_DIR, DETECTOR_FILE)) + gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + faces = face_cascade.detectMultiScale(gray, 1.3, 5) + + w, h = image.shape[1], image.shape[0] + + target_length = min(w, h) + fx, fy, fw, fh = (0, 0, w, h) if len(faces) == 0 else faces[0] + + dx = target_length - fw // 2 + dy = target_length - fh // 2 + + target_x = 0 if w < h else max(0, fx - dx) + target_y = 0 if w > h else max(0, fy - dy) + + image = image[target_y:target_y+target_length, target_x:target_x+target_length] + image = numpy_to_image(image) + + return image + +class ImageCrop: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "mode": (CROP_MODES, ), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "preprocess" + CATEGORY = "image/preprocessors" + + def preprocess(self, image, mode): + if mode == "padding": + image = pad_to_square(image) + elif mode == "face_crop": + image = face_crop(image) + + return (image,) \ No newline at end of file diff --git a/custom_nodes/IPAdapter-ComfyUI/ip-adapter.json b/custom_nodes/IPAdapter-ComfyUI/ip-adapter.json new file mode 100644 index 0000000000000000000000000000000000000000..1bd0a7595b80582ff661224cab1fc59ac724f3f6 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/ip-adapter.json @@ -0,0 +1,523 @@ +{ + "last_node_id": 33, + "last_link_id": 78, + "nodes": [ + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 1260, + 190 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 434, + 190 + ], + "size": { + "0": 407.8851318359375, + "1": 84.13611602783203 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 3 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "masterpiece best quality, 1girl" + ] + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 440, + 320 + ], + "size": { + "0": 421.13385009765625, + "1": 108.7844009399414 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "low quality,jpeg artifacts,signature,watermark,username,blurry,missing fingers,missing arms,Humpbacked,shadow\n" + ] + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + 32, + 188 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 75 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 3, + 5 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 8 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "Counterfeit-V3.0_fp16.safetensors" + ] + }, + { + "id": 11, + "type": "LoadImage", + "pos": [ + 30, + 491 + ], + "size": { + "0": 366.6781005859375, + "1": 583.0964965820312 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 76 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "segsegs (1).png", + "image" + ] + }, + { + "id": 13, + "type": "CLIPVisionLoader", + "pos": [ + 31, + 358 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "CLIP_VISION", + "type": "CLIP_VISION", + "links": [ + 74 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPVisionLoader" + }, + "widgets_values": [ + "pytorch_model.fp16.bin" + ] + }, + { + "id": 33, + "type": "IPAdapter", + "pos": [ + 446, + 483 + ], + "size": [ + 415.49357104492185, + 170.70820529174807 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 75 + }, + { + "name": "image", + "type": "IMAGE", + "link": 76 + }, + { + "name": "clip_vision", + "type": "CLIP_VISION", + "link": 74 + }, + { + "name": "mask", + "type": "MASK", + "link": null + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 78 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP_VISION_OUTPUT", + "type": "CLIP_VISION_OUTPUT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "IPAdapter" + }, + "widgets_values": [ + 1, + "ip-adapter-plus_sd15.bin", + "fp16" + ] + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 900, + 193 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 640, + 896, + 1 + ] + }, + { + "id": 9, + "type": "SaveImage", + "pos": [ + 1263, + 295 + ], + "size": [ + 686.0428685974125, + 987.6802014801028 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": {}, + "widgets_values": [ + "ComfyUI" + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 897, + 353 + ], + "size": { + "0": 310.4039306640625, + "1": 474 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 78 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 4624, + "increment", + 30, + 7, + "euler", + "normal", + 1 + ] + } + ], + "links": [ + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 3, + 4, + 1, + 6, + 0, + "CLIP" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 5, + 4, + 1, + 7, + 0, + "CLIP" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 8, + 4, + 2, + 8, + 1, + "VAE" + ], + [ + 9, + 8, + 0, + 9, + 0, + "IMAGE" + ], + [ + 74, + 13, + 0, + 33, + 2, + "CLIP_VISION" + ], + [ + 75, + 4, + 0, + 33, + 0, + "MODEL" + ], + [ + 76, + 11, + 0, + 33, + 1, + "IMAGE" + ], + [ + 78, + 33, + 0, + 3, + 0, + "MODEL" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/IPAdapter-ComfyUI/ip_adapter.py b/custom_nodes/IPAdapter-ComfyUI/ip_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..24602c602d3858793f0b208a4c06e6614b0c4767 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/ip_adapter.py @@ -0,0 +1,319 @@ +import torch +import os +from .resampler import Resampler + +import contextlib +import comfy.model_management +from comfy.ldm.modules.attention import optimized_attention +from comfy.clip_vision import clip_preprocess + +CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) + +# attention_channels of input, output, middle +SD_V12_CHANNELS = [320] * 4 + [640] * 4 + [1280] * 4 + [1280] * 6 + [640] * 6 + [320] * 6 + [1280] * 2 +SD_XL_CHANNELS = [640] * 8 + [1280] * 40 + [1280] * 60 + [640] * 12 + [1280] * 20 + +def get_file_list(path): + return [f for f in os.listdir(path) if f.endswith('.bin') or f.endswith('.safetensors')] + +def set_model_patch_replace(model, patch_kwargs, key): + to = model.model_options["transformer_options"] + if "patches_replace" not in to: + to["patches_replace"] = {} + if "attn2" not in to["patches_replace"]: + to["patches_replace"]["attn2"] = {} + if key not in to["patches_replace"]["attn2"]: + patch = CrossAttentionPatch(**patch_kwargs) + to["patches_replace"]["attn2"][key] = patch + else: + to["patches_replace"]["attn2"][key].set_new_condition(**patch_kwargs) + +def load_ipadapter(ckpt_path): + model = comfy.utils.load_torch_file(ckpt_path, safe_load=True) + + if ckpt_path.lower().endswith(".safetensors"): + st_model = {"image_proj": {}, "ip_adapter": {}} + for key in model.keys(): + if key.startswith("image_proj."): + st_model["image_proj"][key.replace("image_proj.", "")] = model[key] + elif key.startswith("ip_adapter."): + st_model["ip_adapter"][key.replace("ip_adapter.", "")] = model[key] + # sort keys + model = {"image_proj": st_model["image_proj"], "ip_adapter": {}} + sorted_keys = sorted(st_model["ip_adapter"].keys(), key=lambda x: int(x.split(".")[0])) + for key in sorted_keys: + model["ip_adapter"][key] = st_model["ip_adapter"][key] + st_model = None + + if not "ip_adapter" in model.keys() or not model["ip_adapter"]: + raise Exception("invalid IPAdapter model {}".format(ckpt_path)) + + return model + + +class ImageProjModel(torch.nn.Module): + """Projection Model""" + def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4): + super().__init__() + + self.cross_attention_dim = cross_attention_dim + self.clip_extra_context_tokens = clip_extra_context_tokens + self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim) + self.norm = torch.nn.LayerNorm(cross_attention_dim) + + def forward(self, image_embeds): + embeds = image_embeds + clip_extra_context_tokens = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens, self.cross_attention_dim) + clip_extra_context_tokens = self.norm(clip_extra_context_tokens) + return clip_extra_context_tokens + +# Cross Attention to_k, to_v for IPAdapter +class To_KV(torch.nn.Module): + def __init__(self, cross_attention_dim): + super().__init__() + + channels = SD_XL_CHANNELS if cross_attention_dim == 2048 else SD_V12_CHANNELS + self.to_kvs = torch.nn.ModuleList([torch.nn.Linear(cross_attention_dim, channel, bias=False) for channel in channels]) + + def load_state_dict(self, state_dict): + # input -> output -> middle + for i, key in enumerate(state_dict.keys()): + self.to_kvs[i].weight.data = state_dict[key] + +class IPAdapterModel(torch.nn.Module): + def __init__(self, state_dict, plus, cross_attention_dim=768, clip_embeddings_dim=1024, clip_extra_context_tokens=4, sdxl_plus=False): + super().__init__() + self.plus = plus + if self.plus: + self.image_proj_model = Resampler( + dim=1280 if sdxl_plus else cross_attention_dim, + depth=4, + dim_head=64, + heads=20 if sdxl_plus else 12, + num_queries=clip_extra_context_tokens, + embedding_dim=clip_embeddings_dim, + output_dim=cross_attention_dim, + ff_mult=4 + ) + else: + self.image_proj_model = ImageProjModel( + cross_attention_dim=cross_attention_dim, + clip_embeddings_dim=clip_embeddings_dim, + clip_extra_context_tokens=clip_extra_context_tokens + ) + + self.image_proj_model.load_state_dict(state_dict["image_proj"]) + self.ip_layers = To_KV(cross_attention_dim) + self.ip_layers.load_state_dict(state_dict["ip_adapter"]) + + @torch.inference_mode() + def get_image_embeds(self, cond, uncond): + image_prompt_embeds = self.image_proj_model(cond) + uncond_image_prompt_embeds = self.image_proj_model(uncond) + return image_prompt_embeds, uncond_image_prompt_embeds + + +class IPAdapter: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL", ), + "image": ("IMAGE", ), + "clip_vision": ("CLIP_VISION", ), + "weight": ("FLOAT", { + "default": 1, + "min": -1, #Minimum value + "max": 3, #Maximum value + "step": 0.05 #Slider's step + }), + "model_name": (get_file_list(os.path.join(CURRENT_DIR,"models")), ), + "dtype": (["fp16", "fp32"], ), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("MODEL", "CLIP_VISION_OUTPUT") + FUNCTION = "adapter" + CATEGORY = "loaders" + + def adapter(self, model, image, clip_vision, weight, model_name, dtype, mask=None): + device = comfy.model_management.get_torch_device() + self.dtype = torch.float32 if dtype == "fp32" or device.type == "mps" else torch.float16 + self.weight = weight # ip_adapter scale + + ip_state_dict = load_ipadapter(os.path.join(CURRENT_DIR, os.path.join(CURRENT_DIR, "models", model_name))) + self.plus = "latents" in ip_state_dict["image_proj"] + + # cross_attention_dim is equal to text_encoder output + self.cross_attention_dim = ip_state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[1] + + self.sdxl = self.cross_attention_dim == 2048 + self.sdxl_plus = self.sdxl and self.plus + + # number of tokens of ip_adapter embedding + if self.plus: + self.clip_extra_context_tokens = ip_state_dict["image_proj"]["latents"].shape[1] + else: + self.clip_extra_context_tokens = ip_state_dict["image_proj"]["proj.weight"].shape[0] // self.cross_attention_dim + + cond, uncond, outputs = self.clip_vision_encode(clip_vision, image, self.plus) + self.clip_embeddings_dim = cond.shape[-1] + + self.ipadapter = IPAdapterModel( + ip_state_dict, + plus = self.plus, + cross_attention_dim = self.cross_attention_dim, + clip_embeddings_dim = self.clip_embeddings_dim, + clip_extra_context_tokens = self.clip_extra_context_tokens, + sdxl_plus = self.sdxl_plus + ) + + self.ipadapter.to(device, dtype=self.dtype) + + self.image_emb, self.uncond_image_emb = self.ipadapter.get_image_embeds(cond.to(device, dtype=self.dtype), uncond.to(device, dtype=self.dtype)) + self.image_emb = self.image_emb.to(device, dtype=self.dtype) + self.uncond_image_emb = self.uncond_image_emb.to(device, dtype=self.dtype) + # Not sure of batch size at this point. + self.cond_uncond_image_emb = None + + new_model = model.clone() + + if mask is not None: + mask = mask.squeeze().to(device) + + ''' + patch_name of sdv1-2: ("input" or "output" or "middle", block_id) + patch_name of sdxl: ("input" or "output" or "middle", block_id, transformer_index) + ''' + patch_kwargs = { + "number": 0, + "weight": self.weight, + "ipadapter": self.ipadapter, + "dtype": self.dtype, + "cond": self.image_emb, + "uncond": self.uncond_image_emb, + "mask": mask + } + + if not self.sdxl: + for id in [1,2,4,5,7,8]: # id of input_blocks that have cross attention + set_model_patch_replace(new_model, patch_kwargs, ("input", id)) + patch_kwargs["number"] += 1 + for id in [3,4,5,6,7,8,9,10,11]: # id of output_blocks that have cross attention + set_model_patch_replace(new_model, patch_kwargs, ("output", id)) + patch_kwargs["number"] += 1 + set_model_patch_replace(new_model, patch_kwargs, ("middle", 0)) + else: + for id in [4,5,7,8]: # id of input_blocks that have cross attention + block_indices = range(2) if id in [4, 5] else range(10) # transformer_depth + for index in block_indices: + set_model_patch_replace(new_model, patch_kwargs, ("input", id, index)) + patch_kwargs["number"] += 1 + for id in range(6): # id of output_blocks that have cross attention + block_indices = range(2) if id in [3, 4, 5] else range(10) # transformer_depth + for index in block_indices: + set_model_patch_replace(new_model, patch_kwargs, ("output", id, index)) + patch_kwargs["number"] += 1 + for index in range(10): + set_model_patch_replace(new_model, patch_kwargs, ("middle", 0, index)) + patch_kwargs["number"] += 1 + + return (new_model, outputs) + + def clip_vision_encode(self, clip_vision, image, plus=False): + + inputs = clip_preprocess(image) + comfy.model_management.load_model_gpu(clip_vision.patcher) + pixel_values = inputs.to(clip_vision.load_device) + + if clip_vision.dtype != torch.float32: + precision_scope = torch.autocast + else: + precision_scope = lambda a, b: contextlib.nullcontext(a) + + with precision_scope(comfy.model_management.get_autocast_device(clip_vision.load_device), torch.float32): + outputs = clip_vision.model(pixel_values=pixel_values, output_hidden_states=True) + + if plus: + cond = outputs.hidden_states[-2] + with precision_scope(comfy.model_management.get_autocast_device(clip_vision.load_device), torch.float32): + uncond = clip_vision.model(torch.zeros_like(pixel_values), output_hidden_states=True).hidden_states[-2] + else: + cond = outputs.image_embeds + uncond = torch.zeros_like(cond) + for k in outputs: + t = outputs[k] + if k == "hidden_states": + outputs[k] = None + elif t is not None: + outputs[k] = t.cpu() + return cond, uncond, outputs + + +class CrossAttentionPatch: + # forward for patching + def __init__(self, weight, ipadapter, dtype, number, cond, uncond, mask=None): + self.weights = [weight] + self.ipadapters = [ipadapter] + self.conds = [cond] + self.unconds = [uncond] + self.dtype = dtype + self.number = number + self.masks = [mask] + + def set_new_condition(self, weight, ipadapter, cond, uncond, dtype, number, mask=None): + self.weights.append(weight) + self.ipadapters.append(ipadapter) + self.conds.append(cond) + self.unconds.append(uncond) + self.masks.append(mask) + self.dtype = dtype + + def __call__(self, n, context_attn2, value_attn2, extra_options): + org_dtype = n.dtype + cond_or_uncond = extra_options["cond_or_uncond"] + original_shape = (extra_options["original_shape"][2], extra_options["original_shape"][3]) + with torch.autocast("cuda", dtype=self.dtype): + q = n + k = context_attn2 + v = value_attn2 + b, _, _ = q.shape + batch_prompt = b // len(cond_or_uncond) + out = optimized_attention(q, k, v, extra_options["n_heads"]) + + for weight, cond, uncond, ipadapter, mask in zip(self.weights, self.conds, self.unconds, self.ipadapters, self.masks): + k_cond = ipadapter.ip_layers.to_kvs[self.number*2](cond).repeat(batch_prompt, 1, 1) + k_uncond = ipadapter.ip_layers.to_kvs[self.number*2](uncond).repeat(batch_prompt, 1, 1) + v_cond = ipadapter.ip_layers.to_kvs[self.number*2+1](cond).repeat(batch_prompt, 1, 1) + v_uncond = ipadapter.ip_layers.to_kvs[self.number*2+1](uncond).repeat(batch_prompt, 1, 1) + + ip_k = torch.cat([(k_cond, k_uncond)[i] for i in cond_or_uncond], dim=0) + ip_v = torch.cat([(v_cond, v_uncond)[i] for i in cond_or_uncond], dim=0) + + # Convert ip_k and ip_v to the same dtype as q + ip_k = ip_k.to(dtype=q.dtype) + ip_v = ip_v.to(dtype=q.dtype) + + ip_out = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"]) + + if mask is not None: + # 良い方法募集 + if original_shape[0] * original_shape[1] == q.shape[1]: + down_sample_rate = 1 + elif (original_shape[0] // 2) * (original_shape[1] // 2) == q.shape[1]: + down_sample_rate = 2 + elif (original_shape[0] // 4) * (original_shape[1] // 4) == q.shape[1]: + down_sample_rate = 4 + else: + down_sample_rate = 8 + mask_downsample = torch.nn.functional.interpolate(mask.unsqueeze(0).unsqueeze(0), size=(original_shape[0] // down_sample_rate, original_shape[1] // down_sample_rate), mode="nearest").squeeze(0) + mask_downsample = mask_downsample.view(1, -1, 1).repeat(out.shape[0], 1, out.shape[2]) + ip_out = ip_out * mask_downsample + + out = out + ip_out * weight + + return out.to(dtype=org_dtype) + diff --git a/custom_nodes/IPAdapter-ComfyUI/memo.md b/custom_nodes/IPAdapter-ComfyUI/memo.md new file mode 100644 index 0000000000000000000000000000000000000000..ae20b553890d0e8673caa5975c18353dd918d0a0 --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/memo.md @@ -0,0 +1,34 @@ +## plusかどうか +`state_dict["image_proj"]["lantents"]`の存在で判断 + +## テキストエンコーダの隠れ状態次元数: +keyの入力次元で判断 + +`cross_attention_dim = state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[1]` + +## SDXLかどうか +`sdxl = self.cross_attention_dim == 2048` + +## IP-Adapterのトークン数 +plusでない場合image_projの出力次元からcross_attention_dimを割る + +`clip_extra_context_tokens = state_dict["image_proj"]["proj.weight"].shape[0] // cross_attention_dim` + +plusの場合latentsのトークン数で判断 + +`self.clip_extra_context_tokens = ip_state_dict["image_proj"]["latents"].shape[1]` + +## CLIP特徴量の次元数 +実際の出力で判断 + +`clip_embeddings_dim = cond.shape[-1]` + +## 残り +plusの場合のresamplerの設定は保留・・・ + +``` +depth=4 +dim_head=64 +heads=12 +ff_mult=4 +``` diff --git a/custom_nodes/IPAdapter-ComfyUI/models/ip-adapter_sd15.bin b/custom_nodes/IPAdapter-ComfyUI/models/ip-adapter_sd15.bin new file mode 100644 index 0000000000000000000000000000000000000000..0fb1fd7d32243e0395bcfb03cd6cc2f223857bfb --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/models/ip-adapter_sd15.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68e1df30d760f280e578c302f1e73b37ea08654eff16a31153588047affe0058 +size 44642825 diff --git a/custom_nodes/IPAdapter-ComfyUI/models/put_models_here.txt b/custom_nodes/IPAdapter-ComfyUI/models/put_models_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..c5aa96ecfe47827191bbf9507e8b68526241f80f --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/models/put_models_here.txt @@ -0,0 +1 @@ +^q^q^ diff --git a/custom_nodes/IPAdapter-ComfyUI/resampler.py b/custom_nodes/IPAdapter-ComfyUI/resampler.py new file mode 100644 index 0000000000000000000000000000000000000000..4521c8c3e6f17caf4547c3dd84118da760e5179f --- /dev/null +++ b/custom_nodes/IPAdapter-ComfyUI/resampler.py @@ -0,0 +1,121 @@ +# modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py +import math + +import torch +import torch.nn as nn + + +# FFN +def FeedForward(dim, mult=4): + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +def reshape_tensor(x, heads): + bs, length, width = x.shape + #(bs, length, width) --> (bs, length, n_heads, dim_per_head) + x = x.view(bs, length, heads, -1) + # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) + x = x.transpose(1, 2) + # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) + x = x.reshape(bs, heads, length, -1) + return x + + +class PerceiverAttention(nn.Module): + def __init__(self, *, dim, dim_head=64, heads=8): + super().__init__() + self.scale = dim_head**-0.5 + self.dim_head = dim_head + self.heads = heads + inner_dim = dim_head * heads + + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + + def forward(self, x, latents): + """ + Args: + x (torch.Tensor): image features + shape (b, n1, D) + latent (torch.Tensor): latent features + shape (b, n2, D) + """ + x = self.norm1(x) + latents = self.norm2(latents) + + b, l, _ = latents.shape + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + + q = reshape_tensor(q, self.heads) + k = reshape_tensor(k, self.heads) + v = reshape_tensor(v, self.heads) + + # attention + scale = 1 / math.sqrt(math.sqrt(self.dim_head)) + weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards + weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) + out = weight @ v + + out = out.permute(0, 2, 1, 3).reshape(b, l, -1) + + return self.to_out(out) + + +class Resampler(nn.Module): + def __init__( + self, + dim=1024, + depth=8, + dim_head=64, + heads=16, + num_queries=8, + embedding_dim=768, + output_dim=1024, + ff_mult=4, + ): + super().__init__() + + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) + + self.proj_in = nn.Linear(embedding_dim, dim) + + self.proj_out = nn.Linear(dim, output_dim) + self.norm_out = nn.LayerNorm(output_dim) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + def forward(self, x): + + latents = self.latents.repeat(x.size(0), 1, 1) + + x = self.proj_in(x) + + for attn, ff in self.layers: + latents = attn(x, latents) + latents + latents = ff(latents) + latents + + latents = self.proj_out(latents) + return self.norm_out(latents) \ No newline at end of file diff --git a/custom_nodes/SD-Latent-Upscaler/.gitignore b/custom_nodes/SD-Latent-Upscaler/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..83bae750d0d71f352cadd85fefb08e70e806af46 --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/.gitignore @@ -0,0 +1,175 @@ +raw/ +images/ +latent_*/ +vae/ +models/ +other/ +test.py +*.png +*.zip +*.npy +*.ckpt +*.safetensors + +# default github .gitignore follows + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/custom_nodes/SD-Latent-Upscaler/LICENSE b/custom_nodes/SD-Latent-Upscaler/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/custom_nodes/SD-Latent-Upscaler/README.md b/custom_nodes/SD-Latent-Upscaler/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3da81996a7575a4054d3288af65da1da73f6c421 --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/README.md @@ -0,0 +1,63 @@ +# SD-Latent-Upscaler +Upscaling stable diffusion latents using a small neural network. + +Very similar to my [latent interposer](https://github.com/city96/SD-Latent-Interposer/tree/main), this small model can be used to upscale latents in a way that doesn't ruin the image. I mostly explain some of the issues with upscaling latents in [this issue](https://github.com/city96/SD-Advanced-Noise/issues/1#issuecomment-1678193121). Think of this as an ESRGAN for latents, except severely undertrained. + +**Currently, SDXL has some minimal hue shift issues.** Because of course it does. + +## Installation + +### ComfyUI + +To install it, simply clone this repo to your custom_nodes folder using the following command: `git clone https://github.com/city96/SD-Latent-Upscaler custom_nodes/SD-Latent-Upscaler`. + +Alternatively, you can download the [comfy_latent_upscaler.py](https://github.com/city96/SD-Latent-Upscaler/blob/main/comfy_latent_upscaler.py) file to your ComfyUI/custom_nodes folder as well. You may need to install hfhub using the command pip install huggingface-hub inside your venv. + +If you need the model weights for something else, they are [hosted on HF](https://huggingface.co/city96/SD-Latent-Upscaler/tree/main) under the same Apache2 license as the rest of the repo. + +### Auto1111 + +Currently not supported but it should be possible to use it at the hires-fix part. + +### Local models + +The node pulls the required files from huggingface hub by default. You can create a `models` folder and place the modules there if you have a flaky connection or prefer to use it completely offline, it will load them locally instead. The path should be: `ComfyUI/custom_nodes/SD-Latent-Upscaler/models` + +Alternatively, just clone the entire HF repo to it: `git clone https://huggingface.co/city96/SD-Latent-Upscaler custom_nodes/SD-Latent-Upscaler/models` + +### Usage/principle + +Usage is fairly simple. You use it anywhere where you would upscale a latent. If you need a higher scale factor (e.g. x4), simply chain two of the upscalers. + +![LATENT_UPSCALER_ANI](https://github.com/city96/SD-Latent-Upscaler/assets/125218114/dc187631-fd94-445e-9f20-a5741091bb0e) + +![LATENT_UPSCALER_V2](https://github.com/city96/SD-Latent-Upscaler/assets/125218114/16e7fcb3-74e5-476f-8d54-1eb4d6d4f78b) + +As part of a workflow - notice how the second stage works despite the low denoise of 0.2. The image remains relatively unchanged. + +![LATENT_UPSCALER_WF](https://github.com/city96/SD-Latent-Upscaler/assets/125218114/6ae1779d-42ec-413e-8e44-1b9b8a1e2663) + +## Training + +### Upscaler v2.0 + +I decided to do some more research and change the network architecture alltogether. This one is just a bunch of `Conv2d` layers with an `Upsample` at the beginning, similar to before except I reduced the kernel size/padding and instead added more layers. + +Trained for 1M iterations on DIV2K + Flickr2K. I changed to AdamW + L1 loss (from SGD and MSE loss) and added a `OneCycleLR` scheduler. + +![loss](https://github.com/city96/SD-Latent-Upscaler/assets/125218114/ca361dfd-7148-4b1b-bbf2-59151f8992cc) + +### Upscaler v1.0 + +This version was still relatively undertrained. Mostly a proof-of-concept. + +Trained for 1M iterations on DIV2K + Flickr2K. + +
+ Loss graphs for v1.0 models + + (Left is training loss, right is validation loss.) + + ![loss](https://github.com/city96/SD-Latent-Upscaler/assets/125218114/edbc30b4-56b4-4b74-8c0b-3ab35916e963) + +
diff --git a/custom_nodes/SD-Latent-Upscaler/__init__.py b/custom_nodes/SD-Latent-Upscaler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9229cd166e866150d6b12a1f350f2861575508d8 --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/__init__.py @@ -0,0 +1,8 @@ +# only import if running as a custom node +try: + import comfy.utils +except ImportError: + pass +else: + from .comfy_latent_upscaler import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS + __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS'] diff --git a/custom_nodes/SD-Latent-Upscaler/__pycache__/__init__.cpython-311.pyc b/custom_nodes/SD-Latent-Upscaler/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5352a322ea5667dac0ef281f21c4f6e7252ec87c Binary files /dev/null and b/custom_nodes/SD-Latent-Upscaler/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/SD-Latent-Upscaler/__pycache__/comfy_latent_upscaler.cpython-311.pyc b/custom_nodes/SD-Latent-Upscaler/__pycache__/comfy_latent_upscaler.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba7da5022876a47541d6069d514c04ee3e85d6a Binary files /dev/null and b/custom_nodes/SD-Latent-Upscaler/__pycache__/comfy_latent_upscaler.cpython-311.pyc differ diff --git a/custom_nodes/SD-Latent-Upscaler/comfy_latent_upscaler.py b/custom_nodes/SD-Latent-Upscaler/comfy_latent_upscaler.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a4fb40154c41c2b805f045e7613dfd8e417cfd --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/comfy_latent_upscaler.py @@ -0,0 +1,105 @@ +import os +import torch +import torch.nn as nn +from safetensors.torch import load_file +from huggingface_hub import hf_hub_download + + +class Upscaler(nn.Module): + """ + Basic NN layout, ported from: + https://github.com/city96/SD-Latent-Upscaler/blob/main/upscaler.py + """ + version = 2.1 # network revision + def head(self): + return [ + nn.Conv2d(self.chan, self.size, kernel_size=self.krn, padding=self.pad), + nn.ReLU(), + nn.Upsample(scale_factor=self.fac, mode="nearest"), + nn.ReLU(), + ] + def core(self): + layers = [] + for _ in range(self.depth): + layers += [ + nn.Conv2d(self.size, self.size, kernel_size=self.krn, padding=self.pad), + nn.ReLU(), + ] + return layers + def tail(self): + return [ + nn.Conv2d(self.size, self.chan, kernel_size=self.krn, padding=self.pad), + ] + + def __init__(self, fac, depth=16): + super().__init__() + self.size = 64 # Conv2d size + self.chan = 4 # in/out channels + self.depth = depth # no. of layers + self.fac = fac # scale factor + self.krn = 3 # kernel size + self.pad = 1 # padding + + self.sequential = nn.Sequential( + *self.head(), + *self.core(), + *self.tail(), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.sequential(x) + + +class LatentUpscaler: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "samples": ("LATENT", ), + "latent_ver": (["v1", "xl"],), + "scale_factor": (["1.25", "1.5", "2.0"],), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "upscale" + CATEGORY = "latent" + + def upscale(self, samples, latent_ver, scale_factor): + model = Upscaler(scale_factor) + filename = f"latent-upscaler-v{model.version}_SD{latent_ver}-x{scale_factor}.safetensors" + local = os.path.join( + os.path.join(os.path.dirname(os.path.realpath(__file__)),"models"), + filename + ) + + if os.path.isfile(local): + print("LatentUpscaler: Using local model") + weights = local + else: + print("LatentUpscaler: Using HF Hub model") + weights = str(hf_hub_download( + repo_id="city96/SD-Latent-Upscaler", + filename=filename) + ) + + model.load_state_dict(load_file(weights)) + lt = samples["samples"] + lt = model(lt) + del model + if "noise_mask" in samples.keys(): + # expand the noise mask to the same shape as the latent + mask = torch.nn.functional.interpolate(samples['noise_mask'], scale_factor=float(scale_factor), mode='bicubic') + return ({"samples": lt, "noise_mask": mask},) + return ({"samples": lt},) + +NODE_CLASS_MAPPINGS = { + "LatentUpscaler": LatentUpscaler, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "LatentUpscaler": "Latent Upscaler" +} diff --git a/custom_nodes/SD-Latent-Upscaler/log_loss.py b/custom_nodes/SD-Latent-Upscaler/log_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..a7d12eb87e13fcd8b6bc44cb38c2b923c3452745 --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/log_loss.py @@ -0,0 +1,48 @@ +import os +import math +import matplotlib.pyplot as plt + +files = [f"models/{x}" for x in os.listdir("models") if x.endswith(".csv")] +train_loss = {} +eval_loss = {} + +def process_lines(lines): + global train_loss + global eval_loss + name = fp.split("/")[1] + vals = [x.split(",") for x in lines] + train_loss[name] = ( + [int(x[0]) for x in vals], + [math.log(float(x[1])) for x in vals], + ) + if len(vals[0]) >= 3: + eval_loss[name] = ( + [int(x[0]) for x in vals], + [math.log(float(x[2])) for x in vals], + ) + +# https://stackoverflow.com/a/49357445 +def smooth(scalars, weight): + last = scalars[0] + smoothed = list() + for point in scalars: + smoothed_val = last * weight + (1 - weight) * point + smoothed.append(smoothed_val) + last = smoothed_val + return smoothed + +def plot(data, fname): + fig, ax = plt.subplots() + ax.grid() + for name, val in data.items(): + ax.plot(val[0], smooth(val[1], 0.9), label=name) + plt.legend(loc="upper right") + plt.savefig(fname, dpi=300, bbox_inches='tight') + +for fp in files: + with open(fp) as f: + lines = f.readlines() + process_lines(lines) + +plot(train_loss, "loss.png") +plot(eval_loss, "loss-eval.png") diff --git a/custom_nodes/SD-Latent-Upscaler/preprocess_latents.py b/custom_nodes/SD-Latent-Upscaler/preprocess_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..37d2bd55a9b7c81753a914d6681fccb7a2ffc52e --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/preprocess_latents.py @@ -0,0 +1,80 @@ +import os +import torch +import hashlib +import argparse +import numpy as np +from torchvision import transforms +from diffusers import AutoencoderKL +from tqdm import tqdm +from PIL import Image + +from vae import get_vae + +def parse_args(): + parser = argparse.ArgumentParser(description="Preprocess images into latents") + parser.add_argument("-r", "--res", type=int, default=512, help="Source resolution") + parser.add_argument("-f", "--fac", type=float, default=1.5, help="Upscale factor") + parser.add_argument("-v", "--ver", choices=["v1","xl"], default="v1", help="SD version") + parser.add_argument('--vae', help="Path to VAE (Optional)") + parser.add_argument('--src', default="raw", help="Source folder with images") + return parser.parse_args() + +def encode(vae, img): + """image [PIL Image] -> latent [np array]""" + inp = transforms.ToTensor()(img).unsqueeze(0) + inp = inp.to("cuda") # move to GPU + latent = vae.encode(inp*2.0-1.0) + latent = latent.latent_dist.sample() + return latent.cpu().detach() + +def scale(path, res): + """Crop image to the top-left corner""" + img = Image.open(path) + img = img.convert('RGB') + target = (res, res) + if min(img.height, img.width) < 256: + return + if img.width > img.height: + target = (int(img.width/img.height*res), res) + elif img.height > img.width: + target = (res, int(img.height/img.width*res)) + img = img.resize(target, Image.LANCZOS) + img = img.crop([0,0,res,res]) + return img + +def process_folder(vae, src_dir, ver, res): + dst_dir = f"latents/{ver}_{res}px" + if not os.path.isdir(dst_dir): + os.mkdir(dst_dir) + + for file in tqdm(os.listdir(src_dir)): + src = os.path.join(src_dir, file) + md5 = hashlib.md5(open(src,'rb').read()).hexdigest() + dst = os.path.join(dst_dir, f"{md5}.npy") + if os.path.isfile(dst): + continue + img = scale(src, res) + latent = encode(vae, img) + np.save(dst, latent) + +def process_res(vae, src_dir, ver, res): + process_folder(vae, src_dir, ver, res) + # test image, optional + if os.path.isfile("test.png"): + if os.path.isfile(f"test_{ver}_{res}px.npy"): + return + img = scale("test.png", res) + latent = encode(vae, img) + np.save(f"test_{ver}_{res}px.npy", latent) + torch.cuda.empty_cache() + +if __name__ == "__main__": + if not os.path.isdir("latents"): + os.mkdir("latents") + args = parse_args() + vae = get_vae(args.ver, args.vae) + vae.to("cuda") + ## args + dst_res = int(args.res*args.fac) + process_res(vae, args.src, args.ver, args.res) + process_res(vae, args.src, args.ver, dst_res) diff --git a/custom_nodes/SD-Latent-Upscaler/train.py b/custom_nodes/SD-Latent-Upscaler/train.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3f331a6d480ba0e77f323f14887f2268f9108c --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/train.py @@ -0,0 +1,164 @@ +import os +import torch +import torch.nn as nn +import numpy as np +import argparse +import random +from PIL import Image +from tqdm import tqdm +from safetensors.torch import save_file, load_file +from torch.utils.data import DataLoader, Dataset + +from upscaler import LatentUpscaler as Upscaler +from vae import get_vae + +torch.backends.cudnn.benchmark = True + +def parse_args(): + parser = argparse.ArgumentParser(description="Train latent interposer model") + parser.add_argument("--steps", type=int, default=500000, help="No. of training steps") + parser.add_argument('--bs', type=int, default=4, help="Batch size") + parser.add_argument('--lr', default="5e-4", help="Learning rate") + parser.add_argument("-n", "--save_every_n", type=int, dest="save", default=50000, help="Save model/sample periodically") + parser.add_argument("-r", "--res", type=int, default=512, help="Source resolution") + parser.add_argument("-f", "--fac", type=float, default=1.5, help="Upscale factor") + parser.add_argument("-v", "--ver", choices=["v1","xl"], default="v1", help="SD version") + parser.add_argument('--vae', help="Path to VAE (Optional)") + parser.add_argument('--resume', help="Checkpoint to resume from") + args = parser.parse_args() + try: + float(args.lr) + except: + parser.error("--lr must be a valid float eg. 0.001 or 1e-3") + return args + +vae = None +def sample_decode(latent, filename, version): + global vae + if not vae: + vae = get_vae(version, fp16=True) + vae.to("cuda") + + latent = latent.half().to("cuda") + out = vae.decode(latent).sample + out = out.cpu().detach().numpy() + out = np.squeeze(out, 0) + out = out.transpose((1, 2, 0)) + out = np.clip(out, -1.0, 1.0) + out = (out+1)/2 * 255 + out = out.astype(np.uint8) + out = Image.fromarray(out) + out.save(filename) + +def eval_model(step, model, criterion, scheduler, src, dst): + with torch.no_grad(): + t_pred = model(src) + t_loss = criterion(t_pred, dst) + tqdm.write(f"{str(step):<10} {loss.data.item():.4e}|{t_loss.data.item():.4e} @ {float(scheduler.get_last_lr()[0]):.4e}") + log.write(f"{step},{loss.data.item()},{t_loss.data.item()},{float(scheduler.get_last_lr()[0])}\n") + log.flush() + +def save_model(step, model, ver, fac, src): + out = model(src) + output_name = f"./models/latent-upscaler_SD{ver}-x{fac}_e{round(step/1000)}k" + sample_decode(out, f"{output_name}.png", ver) + save_file(model.state_dict(), f"{output_name}.safetensors") + +class Latent: + def __init__(self, md5, ver, src_res, dst_res): + src = os.path.join(f"latents/{ver}_{src_res}px", f"{md5}.npy") + dst = os.path.join(f"latents/{ver}_{dst_res}px", f"{md5}.npy") + self.src = torch.from_numpy(np.load(src)).to("cuda") + self.dst = torch.from_numpy(np.load(dst)).to("cuda") + self.src = torch.squeeze(self.src, 0) + self.dst = torch.squeeze(self.dst, 0) + +class LatentDataset(Dataset): + def __init__(self, ver, src_res, dst_res): + print("Loading latents from disk") + self.latents = [] + for i in tqdm(os.listdir(f"latents/{ver}_{src_res}px")): + md5 = os.path.splitext(i)[0] + self.latents.append( + Latent(md5, ver, src_res, dst_res) + ) + + def __len__(self): + return len(self.latents) + + def __getitem__(self, index): + return ( + self.latents[index].src, + self.latents[index].dst, + ) + +if __name__ == "__main__": + args = parse_args() + target_dev = "cuda" + dst_res = int(args.res*args.fac) + + dataset = LatentDataset(args.ver, args.res, dst_res) + loader = DataLoader( + dataset, + batch_size=args.bs, + shuffle=True, + num_workers=0, + ) + + if not os.path.isdir("models"): os.mkdir("models") + log = open(f"models/latent-upscaler_SD{args.ver}-x{args.fac}.csv", "w") + + if os.path.isfile(f"test_{args.ver}_{args.res}px.npy") and os.path.isfile(f"test_{args.ver}_{dst_res}px.npy"): + eval_src = torch.from_numpy(np.load(f"test_{args.ver}_{args.res}px.npy")).to(target_dev) + eval_dst = torch.from_numpy(np.load(f"test_{args.ver}_{dst_res}px.npy")).to(target_dev) + else: + eval_src = torch.unsqueeze(dataset[0][0],0) + eval_dst = torch.unsqueeze(dataset[0][1],0) + + model = Upscaler(args.fac) + if args.resume: + model.load_state_dict(load_file(args.resume)) + model.to(target_dev) + + # criterion = torch.nn.MSELoss() + criterion = torch.nn.L1Loss() + + # optimizer = torch.optim.SGD(model.parameters(), lr=float(args.lr)/args.bs) + optimizer = torch.optim.AdamW(model.parameters(), lr=float(args.lr)/args.bs) + + scheduler = torch.optim.lr_scheduler.OneCycleLR( + optimizer, + total_steps=int(args.steps/args.bs), + max_lr=float(args.lr)/args.bs, + pct_start=0.015, + final_div_factor=2500, + ) + # scaler = torch.cuda.amp.GradScaler() + progress = tqdm(total=args.steps) + + while progress.n < args.steps: + for src, dst in loader: + with torch.cuda.amp.autocast(): + y_pred = model(src) # forward + loss = criterion(y_pred, dst) # loss + + # backward + optimizer.zero_grad() + loss.backward() + optimizer.step() + scheduler.step() + + # eval/save + progress.update(args.bs) + if progress.n % (1000 + 1000%args.bs) == 0: + eval_model(progress.n, model, criterion, scheduler, eval_src, eval_dst) + if progress.n % (args.save + args.save%args.bs) == 0: + save_model(progress.n, model, args.ver, args.fac, eval_src) + if progress.n >= args.steps: + break + progress.close() + + # save final output + eval_model(args.steps, model, criterion, scheduler, eval_src, eval_dst) + save_model(args.steps, model, args.ver, args.fac, eval_src) + log.close() diff --git a/custom_nodes/SD-Latent-Upscaler/upscaler.py b/custom_nodes/SD-Latent-Upscaler/upscaler.py new file mode 100644 index 0000000000000000000000000000000000000000..490e5af56d2b43727698ea11afe1a51d5774e3c6 --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/upscaler.py @@ -0,0 +1,42 @@ +import torch +import torch.nn as nn +import numpy as np + +class LatentUpscaler(nn.Module): + def head(self): + return [ + nn.Conv2d(self.chan, self.size, kernel_size=self.krn, padding=self.pad), + nn.ReLU(), + nn.Upsample(scale_factor=self.fac, mode="nearest"), + nn.ReLU(), + ] + def core(self): + layers = [] + for _ in range(self.depth): + layers += [ + nn.Conv2d(self.size, self.size, kernel_size=self.krn, padding=self.pad), + nn.ReLU(), + ] + return layers + def tail(self): + return [ + nn.Conv2d(self.size, self.chan, kernel_size=self.krn, padding=self.pad), + ] + + def __init__(self, fac, depth=16): + super().__init__() + self.size = 64 # Conv2d size + self.chan = 4 # in/out channels + self.depth = depth # no. of layers + self.fac = fac # scale factor + self.krn = 3 # kernel size + self.pad = 1 # padding + + self.sequential = nn.Sequential( + *self.head(), + *self.core(), + *self.tail(), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.sequential(x) diff --git a/custom_nodes/SD-Latent-Upscaler/vae.py b/custom_nodes/SD-Latent-Upscaler/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..1b15a1398920dffd1cae1584ec3e674c33bef54c --- /dev/null +++ b/custom_nodes/SD-Latent-Upscaler/vae.py @@ -0,0 +1,48 @@ +import torch +from diffusers import AutoencoderKL + +def get_vae(version, file_path=None, fp16=False): + """Load VAE from file or default hf repo. fp16 only works from hf""" + vae = None + dtype = torch.float16 if fp16 else torch.float32 + if version == "v1" and file_path: + vae = AutoencoderKL.from_single_file( + file_path, + image_size=512, + ) + elif version == "v1": + vae = AutoencoderKL.from_pretrained( + "runwayml/stable-diffusion-v1-5", + subfolder="vae", + torch_dtype=dtype, + ) + elif version == "v2" and file_path: + vae = AutoencoderKL.from_single_file( + file_path, + image_size=768, + ) + elif version == "v2": + vae = AutoencoderKL.from_pretrained( + "stabilityai/stable-diffusion-2-1", + subfolder="vae", + torch_dtype=dtype, + ) + elif version == "xl" and file_path: + vae = AutoencoderKL.from_single_file( + file_path, + image_size=1024 + ) + elif version == "xl" and fp16: + vae = AutoencoderKL.from_pretrained( + "madebyollin/sdxl-vae-fp16-fix", + torch_dtype=torch.float16, + ) + elif version == "xl": + vae = AutoencoderKL.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + subfolder="vae" + ) + else: + input("Invalid VAE version. Press any key to exit") + exit(1) + return vae diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/README.md b/custom_nodes/cd-tuner_negpip-ComfyUI/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e828fef4b68f62fcac1cfa56ff043e46c3a72275 --- /dev/null +++ b/custom_nodes/cd-tuner_negpip-ComfyUI/README.md @@ -0,0 +1,14 @@ +# cd-tuner_negpip-ComfyUI +このカスタムノードは、[hako-mikan](https://github.com/hako-mikan)氏による以下の二つのwebUI拡張をComfyUI実装するものです。 + ++ https://github.com/hako-mikan/sd-webui-cd-tuner +:色調や書き込み量を調節する機能、とりあえず一部の機能のみ実装・・・ ++ https://github.com/hako-mikan/sd-webui-negpip +:プロンプトにマイナスの重みを実装する機能 + +# 説明 +loaderに二つのノードが追加されます。使い方の説明が必要なほど難しくないです。 +cd-tunerのstart、endは0,1000の範囲で指定してください(step単位じゃないのは実装の都合です)。 + +# 謝辞 +二つの実装の考案者である[hako-mikan](https://github.com/hako-mikan)氏に感謝いたします。 diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/__init__.py b/custom_nodes/cd-tuner_negpip-ComfyUI/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..212f26f45719bff5d3bfd5e92ddee41753716830 --- /dev/null +++ b/custom_nodes/cd-tuner_negpip-ComfyUI/__init__.py @@ -0,0 +1,14 @@ +from .cd_tuner import CDTuner +from .negpip import Negpip + +NODE_CLASS_MAPPINGS = { + "CDTuner": CDTuner, + "Negapip": Negpip, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "CDTuner": "Apply CDTuner", + "Negapip": "Apply Negapip", +} + +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] \ No newline at end of file diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/__init__.cpython-311.pyc b/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01de3c6829a2d762492311a67676fb287b795fe6 Binary files /dev/null and b/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/cd_tuner.cpython-311.pyc b/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/cd_tuner.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f4b86de074975ecc69dde266d7d4c474c64c25 Binary files /dev/null and b/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/cd_tuner.cpython-311.pyc differ diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/negpip.cpython-311.pyc b/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/negpip.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fcfe92fa9472187a77266dfc28729230cde5943 Binary files /dev/null and b/custom_nodes/cd-tuner_negpip-ComfyUI/__pycache__/negpip.cpython-311.pyc differ diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/cd_tuner.py b/custom_nodes/cd-tuner_negpip-ComfyUI/cd_tuner.py new file mode 100644 index 0000000000000000000000000000000000000000..8b1805bca843c2cf916148b92402e41f730141ff --- /dev/null +++ b/custom_nodes/cd-tuner_negpip-ComfyUI/cd_tuner.py @@ -0,0 +1,134 @@ +import torch + + +class CDTuner: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL", ), + "detail_1": ("FLOAT", { + "default": 0, + "min": -10, + "max": 10, + "step": 0.1 + }), + "detail_2": ("FLOAT", { + "default": 0, + "min": -10, + "max": 10, + "step": 0.1 + }), + "contrast_1": ("FLOAT", { + "default": 0, + "min": -20, + "max": 20, + "step": 0.1 + }), + "start": ("INT", { + "default": 0, + "min": 0, + "max": 1000, + "step": 1, + "display": "number" + }), + "end": ("INT", { + "default": 1000, + "min": 0, + "max": 1000, + "step": 1, + "display": "number" + }), + }, + } + + RETURN_TYPES = ("MODEL", ) + FUNCTION = "apply" + CATEGORY = "loaders" + + def apply(self, model, detail_1, detail_2, contrast_1, start, end): + ''' + detail_1: 最初のConv層のweightを減らしbiasを増やすことで、detailを増やす・・? + detail_2: 最後のConv層前のGroupNormの以下略 + contrast_1: 最後のConv層のbiasの0チャンネル目を増やすことでコントラストを増やす・・・? + ''' + new_model = model.clone() + ratios = fineman([detail_1, detail_2, contrast_1]) + self.storedweights = {} + self.start = start + self.end = end + + # unet計算前後のパッチ + def apply_cdtuner(model_function, kwargs): + if kwargs["timestep"][0] < (1000 - self.end) or kwargs["timestep"][0] > (1000 - self.start): + return model_function(kwargs["input"], kwargs["timestep"], **kwargs["c"]) + for i, name in enumerate(ADJUSTS): + # 元の重みをロード + self.storedweights[name] = getset_nested_module_tensor(True, new_model, name).clone() + if 4 > i: + new_weight = self.storedweights[name] * ratios[i] + else: + device = self.storedweights[name].device + dtype = self.storedweights[name].dtype + new_weight = self.storedweights[name] + torch.tensor(ratios[i], device=device, dtype=dtype) + # 重みを書き換え + getset_nested_module_tensor(False, new_model, name, new_tensor=new_weight) + retval = model_function(kwargs["input"], kwargs["timestep"], **kwargs["c"]) + + # 重みを元に戻す + for name in ADJUSTS: + getset_nested_module_tensor(False, new_model, name, new_tensor=self.storedweights[name]) + + return retval + + new_model.set_model_unet_function_wrapper(apply_cdtuner) + + return (new_model, ) + + +def getset_nested_module_tensor(clone, model, tensor_path, new_tensor=None): + sdmodules = tensor_path.split('.') + target_module = model + last_attr = None + + for module_name in sdmodules if clone else sdmodules[:-1]: + if module_name.isdigit(): + target_module = target_module[int(module_name)] + else: + target_module = getattr(target_module, module_name) + + if clone: + return target_module + + last_attr = sdmodules[-1] + setattr(target_module, last_attr, torch.nn.Parameter(new_tensor)) + +# なんでfineman? +def fineman(fine): + fine = [ + 1 - fine[0] * 0.01, + 1 + fine[0] * 0.02, + 1 - fine[1] * 0.01, + 1 + fine[1] * 0.02, + [fine[2] * 0.02, 0, 0, 0] + ] + return fine + + +ADJUSTS = [ + "model.diffusion_model.input_blocks.0.0.weight", + "model.diffusion_model.input_blocks.0.0.bias", + "model.diffusion_model.out.0.weight", + "model.diffusion_model.out.0.bias", + "model.diffusion_model.out.2.bias", +] + +NODE_CLASS_MAPPINGS = { + "CDTuner": CDTuner, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "CDTuner": "Apply CDTuner", +} + +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] diff --git a/custom_nodes/cd-tuner_negpip-ComfyUI/negpip.py b/custom_nodes/cd-tuner_negpip-ComfyUI/negpip.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a440cf3f6824b3faf98feb6ec4e1abf102234c --- /dev/null +++ b/custom_nodes/cd-tuner_negpip-ComfyUI/negpip.py @@ -0,0 +1,101 @@ +import torch +import copy +from comfy.sd1_clip import gen_empty_tokens + +class Negpip: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL", ), + "clip": ("CLIP", ), + }, + } + + RETURN_TYPES = ("MODEL", "CLIP") + FUNCTION = "apply" + CATEGORY = "loaders" + + def apply(self, model, clip): + new_clip = copy.copy(clip) + if hasattr(new_clip.cond_stage_model, "clip_g"): + new_clip.cond_stage_model.clip_g.encode_token_weights = hook_clip_encode_token_weights(new_clip.cond_stage_model.clip_g) + if hasattr(new_clip.cond_stage_model, "clip_h"): + new_clip.cond_stage_model.clip_h.encode_token_weights = hook_clip_encode_token_weights(new_clip.cond_stage_model.clip_h) + if hasattr(new_clip.cond_stage_model, "clip_l"): + new_clip.cond_stage_model.clip_l.encode_token_weights = hook_clip_encode_token_weights(new_clip.cond_stage_model.clip_l) + new_model = model.clone() + + def negpip_apply(q, k, v, extra_options): + new_k = k[:, 0::2] + new_v = v[:, 1::2] + return q, new_k, new_v + + new_model.set_model_attn2_patch(negpip_apply) + + return new_model, new_clip + +# prompt weightingの計算で無理やりk,vを計算 +# k,vはattn2_patchで分離する +# weightがマイナスのときはvだけマイナスにする +def hook_clip_encode_token_weights(self): + + def encode_token_weights(token_weight_pairs): + to_encode = list() + max_token_len = 0 + has_weights = False + for x in token_weight_pairs: + tokens = list(map(lambda a: a[0], x)) + max_token_len = max(len(tokens), max_token_len) + has_weights = has_weights or not all(map(lambda a: a[1] == 1.0, x)) + to_encode.append(tokens) + + sections = len(to_encode) + if has_weights or sections == 0: + to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len)) + + out, pooled = self.encode(to_encode) + if pooled is not None: + first_pooled = pooled[0:1].cpu() + else: + first_pooled = pooled + + output = [] + for k in range(0, sections): + zk = out[k:k+1].clone() + zv = out[k:k+1].clone() + if has_weights: + z_empty = out[-1] + for i in range(len(zk)): + for j in range(len(zk[i])): + weight = token_weight_pairs[k][j][1] + if weight < 0: + weight = -weight + sign = -1 + else: + sign = 1 + zk[i][j] = (zk[i][j] - z_empty[0][j]) * weight + z_empty[0][j] + zv[i][j] = sign * ((zv[i][j] - z_empty[0][j]) * weight + z_empty[0][j]) + + z = torch.zeros_like(zk).repeat(1, 2, 1) + for i in range(zk.shape[1]): # 頭悪いのでfor文 + z[:, 2*i, :] += zk[:, i, :] + z[:, 2*i+1, :] += zv[:, i, :] + output.append(z) + + if (len(output) == 0): + return out[-1:].cpu(), first_pooled + return torch.cat(output, dim=-2).cpu(), first_pooled + + return encode_token_weights + + +NODE_CLASS_MAPPINGS = { + "Negpip": Negpip, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Negpip": "Apply Negpip", +} + +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"] diff --git a/custom_nodes/comfyui-dream-project/.gitignore b/custom_nodes/comfyui-dream-project/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..bda22c3d18b6b8287aa4a7f57d43d7cb38151242 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/.gitignore @@ -0,0 +1,3 @@ +.idea +__pycache__ +config.json \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/__init__.py b/custom_nodes/comfyui-dream-project/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a07a8e73018c2cb86576bdccafae0ece9f04970 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/__init__.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +from typing import Type + +from .base import * +from .colors import * +from .curves import * +from .image_processing import * +from .inputfields import * +from .loaders import * +from .noise import * +from .output import * +from .prompting import * +from .seq_processing import * +from .switches import * +from .utility import * +from .calculate import * +from .laboratory import * + +_NODE_CLASSES: List[Type] = [DreamSineWave, DreamLinear, DreamCSVCurve, DreamBeatCurve, DreamFrameDimensions, + DreamImageMotion, DreamNoiseFromPalette, DreamAnalyzePalette, DreamColorShift, + DreamDirectoryFileCount, DreamFrameCounterOffset, DreamDirectoryBackedFrameCounter, + DreamSimpleFrameCounter, DreamImageSequenceInputWithDefaultFallback, + DreamImageSequenceOutput, DreamCSVGenerator, DreamImageAreaSampler, + DreamVideoEncoder, DreamSequenceTweening, DreamSequenceBlend, DreamColorAlign, + DreamImageSampler, DreamNoiseFromAreaPalettes, + DreamInputString, DreamInputFloat, DreamInputInt, DreamInputText, DreamBigLatentSwitch, + DreamFrameCountCalculator, DreamBigImageSwitch, DreamBigTextSwitch, DreamBigFloatSwitch, + DreamBigIntSwitch, DreamBigPaletteSwitch, DreamWeightedPromptBuilder, DreamPromptFinalizer, + DreamFrameCounterInfo, DreamBoolToFloat, DreamBoolToInt, DreamSawWave, DreamTriangleWave, + DreamTriangleEvent, DreamSmoothEvent, DreamCalculation, DreamImageColorShift, + DreamComparePalette, DreamImageContrast, DreamImageBrightness, DreamLogFile, + DreamLaboratory, DreamStringToLog, DreamIntToLog, DreamFloatToLog, DreamJoinLog, + DreamStringTokenizer, DreamWavCurve, DreamFrameCounterTimeOffset] +_SIGNATURE_SUFFIX = " [Dream]" + +MANIFEST = { + "name": "Dream Project Animation", + "version": (5, 0, 0), + "author": "Dream Project", + "project": "https://github.com/alt-key-project/comfyui-dream-project", + "description": "Various utility nodes for creating animations with ComfyUI", +} + +NODE_CLASS_MAPPINGS = {} + +NODE_DISPLAY_NAME_MAPPINGS = {} + +config = DreamConfig() + + +def update_category(cls): + top = config.get("ui.top_category", "").strip().strip("/") + leaf_icon = "" + if top and "CATEGORY" in cls.__dict__: + cls.CATEGORY = top + "/" + cls.CATEGORY.lstrip("/") + if "CATEGORY" in cls.__dict__: + joined = [] + for partial in cls.CATEGORY.split("/"): + icon = config.get("ui.category_icons." + partial, "") + if icon: + leaf_icon = icon + if config.get("ui.prepend_icon_to_category", False): + partial = icon.lstrip() + " " + partial + if config.get("ui.append_icon_to_category", False): + partial = partial + " " + icon.rstrip() + joined.append(partial) + cls.CATEGORY = "/".join(joined) + return leaf_icon + + +def update_display_name(cls, category_icon, display_name): + icon = cls.__dict__.get("ICON", category_icon) + if config.get("ui.prepend_icon_to_node", False): + display_name = icon.lstrip() + " " + display_name + if config.get("ui.append_icon_to_node", False): + display_name = display_name + " " + icon.rstrip() + return display_name + + +for cls in _NODE_CLASSES: + category_icon = update_category(cls) + clsname = cls.__name__ + if "NODE_NAME" in cls.__dict__: + node_name = cls.__dict__["NODE_NAME"] + _SIGNATURE_SUFFIX + NODE_CLASS_MAPPINGS[node_name] = cls + NODE_DISPLAY_NAME_MAPPINGS[node_name] = update_display_name(cls, category_icon, + cls.__dict__.get("DISPLAY_NAME", + cls.__dict__["NODE_NAME"])) + else: + raise Exception("Class {} is missing NODE_NAME!".format(str(cls))) + + +def update_node_index(): + node_list_path = os.path.join(os.path.dirname(__file__), "node_list.json") + with open(node_list_path) as f: + node_list = json.loads(f.read()) + updated = False + for nodename in NODE_CLASS_MAPPINGS.keys(): + if nodename not in node_list: + node_list[nodename] = "" + updated = True + if updated or True: + with open(node_list_path, "w") as f: + f.write(json.dumps(node_list, indent=2, sort_keys=True)) + + +update_node_index() diff --git a/custom_nodes/comfyui-dream-project/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24fa5a4fb413b624e4e97208c7e00a479243b1d8 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/base.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..192791a8a3fab7239870e107b3dfbdf0908c16e1 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/base.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/calculate.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/calculate.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..720eccd094370eb03c53a28c949b0390607fb19f Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/calculate.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/categories.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/categories.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74ab64721a69f6ae1a2c8b5fb9bbb32b6fcfa75c Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/categories.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/colors.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/colors.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f2256e3b97edeb4f3aeb6c1343e1466c6cb3872 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/colors.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/curves.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/curves.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38bd2ecf6535e4582280352263525ffd527d73b5 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/curves.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/dreamlogger.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/dreamlogger.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8485278eeeb7dbc60b9e4ba5b7f9a7861346e121 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/dreamlogger.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/dreamtypes.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/dreamtypes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..839a843fd1bcf9ca00de08502366ac681fa4ed61 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/dreamtypes.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/embedded_config.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/embedded_config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d32b5b795f895c7ca2eb4cb90c9f6b5c5940c3a8 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/embedded_config.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/err.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/err.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df22add20eb8add683f13d4c6166b42b76c87cc6 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/err.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/image_processing.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/image_processing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f9e41b969231a5ecdefb4d4d33af0d1bd2e82ea Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/image_processing.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/inputfields.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/inputfields.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e82072fba2f0cfe2a67c5991b12cee2c850f648 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/inputfields.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/laboratory.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/laboratory.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0ceae26f94759c6ee5840fa811c4a042443713d Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/laboratory.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/loaders.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/loaders.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9de6f9bbf9c69828e819e197a6b486661591ead5 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/loaders.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/noise.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/noise.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bfe57688f63019446a9c3bffe95d7bb076f6e05 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/noise.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/output.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/output.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbe36bbcccf9c501b42d68145c4c339cc11dccb4 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/output.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/prompting.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/prompting.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7ebb29cf709c87868cab5d4d7ceef2b46cd2415 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/prompting.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/seq_processing.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/seq_processing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..982f96aa3733c4881c76a6eb8cceafddcdf19470 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/seq_processing.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/shared.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/shared.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21394f2fd9eab26dbeb4d5dd500ff8e171455335 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/shared.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/switches.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/switches.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0997108333c5de3765fa26fb6692b87d84bd00a5 Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/switches.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/__pycache__/utility.cpython-311.pyc b/custom_nodes/comfyui-dream-project/__pycache__/utility.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebe2e23a3fd6e21eb6271dca1ce88be10032f5ce Binary files /dev/null and b/custom_nodes/comfyui-dream-project/__pycache__/utility.cpython-311.pyc differ diff --git a/custom_nodes/comfyui-dream-project/base.py b/custom_nodes/comfyui-dream-project/base.py new file mode 100644 index 0000000000000000000000000000000000000000..fa024498ecf61c31bb39403625b2b7fd447bfcf2 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/base.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +import glob + +from .categories import NodeCategories +from .shared import * +from .dreamtypes import * + + +class DreamFrameCounterInfo: + NODE_NAME = "Frame Counter Info" + ICON = "⚋" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = ("INT", "INT", "BOOLEAN", "BOOLEAN", "FLOAT", "FLOAT", "FLOAT", "FLOAT") + RETURN_NAMES = ("frames_completed", "total_frames", "first_frame", "last_frame", + "elapsed_seconds", "remaining_seconds", "total_seconds", "completion") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *v): + return ALWAYS_CHANGED_FLAG + + def result(self, frame_counter: FrameCounter): + return (frame_counter.current_frame, + frame_counter.total_frames, + frame_counter.is_first_frame, + frame_counter.is_final_frame, + frame_counter.current_time_in_seconds, + frame_counter.remaining_time_in_seconds, + frame_counter.total_time_in_seconds, + frame_counter.current_time_in_seconds / max(0.01, frame_counter.total_time_in_seconds)) + + +class DreamDirectoryFileCount: + NODE_NAME = "File Count" + ICON = "📂" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "directory_path": ("STRING", {"default": '', "multiline": False}), + "patterns": ("STRING", {"default": '*.jpg|*.png|*.jpeg', "multiline": False}), + }, + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("TOTAL",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *v): + return ALWAYS_CHANGED_FLAG + + def result(self, directory_path, patterns): + if not os.path.isdir(directory_path): + return (0,) + total = 0 + for pattern in patterns.split("|"): + files = list(glob.glob(pattern, root_dir=directory_path)) + total += len(files) + print("total " + str(total)) + return (total,) + + +class DreamFrameCounterOffset: + NODE_NAME = "Frame Counter Offset" + + ICON = "±" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "offset": ("INT", {"default": -1}), + }, + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = (FrameCounter.ID,) + RETURN_NAMES = ("frame_counter",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, frame_counter, offset): + return hashed_as_strings(frame_counter, offset) + + def result(self, frame_counter: FrameCounter, offset): + return (frame_counter.incremented(offset),) + +class DreamFrameCounterTimeOffset: + NODE_NAME = "Frame Counter Time Offset" + + ICON = "±" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "offset_seconds": ("FLOAT", {"default": 0.0}), + }, + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = (FrameCounter.ID,) + RETURN_NAMES = ("frame_counter",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, frame_counter, offset): + return hashed_as_strings(frame_counter, offset) + + def result(self, frame_counter: FrameCounter, offset_seconds): + offset = offset_seconds * frame_counter.frames_per_second + return (frame_counter.incremented(offset),) + + +class DreamSimpleFrameCounter: + NODE_NAME = "Frame Counter (Simple)" + ICON = "⚋" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "frame_index": ("INT", {"min": 0, "default": 0}), + "total_frames": ("INT", {"default": 100, "min": 1, "max": 24 * 3600 * 60}), + "frames_per_second": ("INT", {"min": 1, "default": 25}), + }, + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = (FrameCounter.ID,) + RETURN_NAMES = ("frame_counter",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, frame_index, total_frames, frames_per_second): + n = frame_index + return (FrameCounter(n, total_frames, frames_per_second),) + + +class DreamDirectoryBackedFrameCounter: + NODE_NAME = "Frame Counter (Directory)" + ICON = "⚋" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "directory_path": ("STRING", {"default": '', "multiline": False}), + "pattern": ("STRING", {"default": '*', "multiline": False}), + "indexing": (["numeric", "alphabetic order"],), + "total_frames": ("INT", {"default": 100, "min": 2, "max": 24 * 3600 * 60}), + "frames_per_second": ("INT", {"min": 1, "default": 30}), + }, + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = (FrameCounter.ID,) + RETURN_NAMES = ("frame_counter",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, directory_path, pattern, indexing, total_frames, frames_per_second): + results = list_images_in_directory(directory_path, pattern, indexing == "alphabetic order") + if not results: + return (FrameCounter(0, total_frames, frames_per_second),) + n = max(results.keys()) + 1 + return (FrameCounter(n, total_frames, frames_per_second),) + + +class DreamFrameCountCalculator: + NODE_NAME = "Frame Count Calculator" + ICON = "⌛" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "hours": ("INT", {"min": 0, "default": 0, "max": 23}), + "minutes": ("INT", {"min": 0, "default": 0, "max": 59}), + "seconds": ("INT", {"min": 0, "default": 10, "max": 59}), + "milliseconds": ("INT", {"min": 0, "default": 0, "max": 59}), + "frames_per_second": ("INT", {"min": 1, "default": 30}) + }, + } + + CATEGORY = NodeCategories.ANIMATION + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("TOTAL",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *v): + return ALWAYS_CHANGED_FLAG + + def result(self, hours, minutes, seconds, milliseconds, frames_per_second): + total_s = seconds + 0.001 * milliseconds + minutes * 60 + hours * 3600 + return (round(total_s * frames_per_second),) diff --git a/custom_nodes/comfyui-dream-project/calculate.py b/custom_nodes/comfyui-dream-project/calculate.py new file mode 100644 index 0000000000000000000000000000000000000000..47cd72525f9690a759352d5b00b3b1c474a80ab8 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/calculate.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +import math + +from evalidate import Expr, EvalException, base_eval_model + +from .categories import * +from .err import on_error +from .shared import hashed_as_strings + + +class DreamCalculation: + NODE_NAME = "Calculation" + ICON = "🖩" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "expression": ("STRING", {"default": "a + b + c - (r * s * t)", "multiline": True}) + }, + "optional": { + "a_int": ("INT", {"default": 0, "multiline": False}), + "b_int": ("INT", {"default": 0, "multiline": False}), + "c_int": ("INT", {"default": 0, "multiline": False}), + "r_float": ("FLOAT", {"default": 0.0, "multiline": False}), + "s_float": ("FLOAT", {"default": 0.0, "multiline": False}), + "t_float": ("FLOAT", {"default": 0.0, "multiline": False}) + } + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def _make_model(self): + funcs = self._make_functions() + m = base_eval_model.clone() + m.nodes.append('Mult') + m.nodes.append('Call') + for funname in funcs.keys(): + m.allowed_functions.append(funname) + return (m, funcs) + + def _make_functions(self): + return { + "round": round, + "float": float, + "int": int, + "abs": abs, + "min": min, + "max": max, + "tan": math.tan, + "tanh": math.tanh, + "sin": math.sin, + "sinh": math.sinh, + "cos": math.cos, + "cosh": math.cosh, + "pow": math.pow, + "sqrt": math.sqrt, + "ceil": math.ceil, + "floor": math.floor, + "pi": math.pi, + "log": math.log, + "log2": math.log2, + "acos": math.acos, + "asin": math.asin, + "acosh": math.acosh, + "asinh": math.asinh, + "atan": math.atan, + "atanh": math.atanh, + "exp": math.exp, + "fmod": math.fmod, + "factorial": math.factorial, + "dist": math.dist, + "atan2": math.atan2, + "log10": math.log10 + } + + def result(self, expression, **values): + model, funcs = self._make_model() + vars = funcs + for key in ("a_int", "b_int", "c_int", "r_float", "s_float", "t_float"): + nm = key.split("_")[0] + v = values.get(key, None) + if v is not None: + vars[nm] = v + try: + data = Expr(expression, model=model).eval(vars) + if isinstance(data, (int, float)): + return float(data), int(round(data)) + else: + return 0.0, 0 + except EvalException as e: + on_error(DreamCalculation, str(e)) diff --git a/custom_nodes/comfyui-dream-project/categories.py b/custom_nodes/comfyui-dream-project/categories.py new file mode 100644 index 0000000000000000000000000000000000000000..4299647857f987adbbcbe0ea6480231f2b8c4794 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/categories.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- + +class NodeCategories: + ANIMATION = "animation" + ANIMATION_POSTPROCESSING = ANIMATION + "/postprocessing" + ANIMATION_TRANSFORMS = ANIMATION + "/transforms" + ANIMATION_CURVES = "animation/curves" + CONDITIONING = "conditioning" + IMAGE_POSTPROCESSING = "image/postprocessing" + IMAGE_ANIMATION = "image/animation" + IMAGE_COLORS = "image/color" + IMAGE_GENERATE = "image/generate" + IMAGE = "image" + UTILS = "utils" + UTILS_SWITCHES = "utils/switches" \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/colors.py b/custom_nodes/comfyui-dream-project/colors.py new file mode 100644 index 0000000000000000000000000000000000000000..9628682e17cf558e8bc46764731abfc5dc3051a7 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/colors.py @@ -0,0 +1,390 @@ +# -*- coding: utf-8 -*- + +from .categories import NodeCategories +from .shared import * +from .dreamtypes import * + + +class DreamImageAreaSampler: + NODE_NAME = "Sample Image Area as Palette" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "samples": ("INT", {"default": 256, "min": 1, "max": 1024 * 4}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "area": (["top-left", "top-center", "top-right", + "center-left", "center", "center-right", + "bottom-left", "bottom-center", "bottom-right"],) + }, + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = (RGBPalette.ID,) + RETURN_NAMES = ("palette",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def _get_pixel_area(self, img: DreamImage, area): + w = img.width + h = img.height + wpart = round(w / 3) + hpart = round(h / 3) + x0 = 0 + x1 = wpart - 1 + x2 = wpart + x3 = wpart + wpart - 1 + x4 = wpart + wpart + x5 = w - 1 + y0 = 0 + y1 = hpart - 1 + y2 = hpart + y3 = hpart + hpart - 1 + y4 = hpart + hpart + y5 = h - 1 + if area == "center": + return (x2, y2, x3, y3) + elif area == "top-center": + return (x2, y0, x3, y1) + elif area == "bottom-center": + return (x2, y4, x3, y5) + elif area == "center-left": + return (x0, y2, x1, y3) + elif area == "top-left": + return (x0, y0, x1, y1) + elif area == "bottom-left": + return (x0, y4, x1, y5) + elif area == "center-right": + return (x4, y2, x5, y3) + elif area == "top-right": + return (x4, y0, x5, y1) + elif area == "bottom-right": + return (x4, y4, x5, y5) + + def result(self, image, samples, seed, area): + result = list() + r = random.Random() + r.seed(seed) + for data in image: + di = DreamImage(tensor_image=data) + area = self._get_pixel_area(di, area) + + pixels = list() + for i in range(samples): + x = r.randint(area[0], area[2]) + y = r.randint(area[1], area[3]) + pixels.append(di.get_pixel(x, y)) + result.append(RGBPalette(colors=pixels)) + + return (tuple(result),) + + +class DreamImageSampler: + NODE_NAME = "Sample Image as Palette" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "samples": ("INT", {"default": 1024, "min": 1, "max": 1024 * 4}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}) + }, + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = (RGBPalette.ID,) + RETURN_NAMES = ("palette",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, image, samples, seed): + result = list() + r = random.Random() + r.seed(seed) + for data in image: + di = DreamImage(tensor_image=data) + pixels = list() + for i in range(samples): + x = r.randint(0, di.width - 1) + y = r.randint(0, di.height - 1) + pixels.append(di.get_pixel(x, y)) + result.append(RGBPalette(colors=pixels)) + + return (tuple(result),) + + +class DreamColorAlign: + NODE_NAME = "Palette Color Align" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.palette | { + "target_align": (RGBPalette.ID,), + "alignment_factor": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 10.0, "step": 0.1}), + } + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = (RGBPalette.ID,) + RETURN_NAMES = ("palette",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, palette: Tuple[RGBPalette], target_align: Tuple[RGBPalette], alignment_factor: float): + results = list() + + def _limit(c): + return max(min(c, 255), 0) + + for i in range(len(palette)): + p = palette[i] + t = target_align[i] + (_, _, r1, g1, b1) = p.analyze() + (_, _, r2, g2, b2) = t.analyze() + + dr = (r2 - r1) * alignment_factor + dg = (g2 - g1) * alignment_factor + db = (b2 - b1) * alignment_factor + new_pixels = list() + for pixel in p: + r = _limit(round(pixel[0] + (255 * dr))) + g = _limit(round(pixel[1] + (255 * dg))) + b = _limit(round(pixel[1] + (255 * db))) + new_pixels.append((r, g, b)) + results.append(RGBPalette(colors=new_pixels)) + return (tuple(results),) + + +class DreamColorShift: + NODE_NAME = "Palette Color Shift" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.palette | { + "red_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "green_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "blue_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "fixed_brightness": (["yes", "no"],), + } + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = (RGBPalette.ID,) + RETURN_NAMES = ("palette",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, palette, red_multiplier, green_multiplier, blue_multiplier, fixed_brightness): + results = list() + + def _limit(c): + return max(min(c, 255), 0) + + for p in palette: + new_pixels = list() + for pixel in p: + s = pixel[0] + pixel[1] + pixel[2] + r = _limit(round(pixel[0] * red_multiplier)) + g = _limit(round(pixel[1] * green_multiplier)) + b = _limit(round(pixel[2] * blue_multiplier)) + if fixed_brightness == "yes": + brightness_factor = max(s, 1) / float(max(r + g + b, 1)) + r = _limit(round(r * brightness_factor)) + g = _limit(round(g * brightness_factor)) + b = _limit(round(b * brightness_factor)) + + new_pixels.append((r, g, b)) + results.append(RGBPalette(colors=new_pixels)) + return (tuple(results),) + + +class DreamImageColorShift: + NODE_NAME = "Image Color Shift" + ICON = "🖼" + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"image": ("IMAGE",), + "red_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0}), + "green_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0}), + "blue_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0}), + }, + + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, image, red_multiplier, green_multiplier, blue_multiplier): + proc = DreamImageProcessor(inputs=image) + + def recolor(im: DreamImage, *a, **args): + return (im.adjust_colors(red_multiplier, green_multiplier, blue_multiplier),) + + return proc.process(recolor) + + +class DreamImageBrightness: + NODE_NAME = "Image Brightness Adjustment" + ICON = "☼" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"image": ("IMAGE",), + "factor": ("FLOAT", {"default": 1.0, "min": 0.0}), + }, + + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, image, factor): + proc = DreamImageProcessor(inputs=image) + + def change(im: DreamImage, *a, **args): + return (im.change_brightness(factor),) + + return proc.process(change) + + +class DreamImageContrast: + NODE_NAME = "Image Contrast Adjustment" + ICON = "◐" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {"image": ("IMAGE",), + "factor": ("FLOAT", {"default": 1.0, "min": 0.0}), + }, + + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, image, factor): + proc = DreamImageProcessor(inputs=image) + + def change(im: DreamImage, *a, **args): + return (im.change_contrast(factor),) + + return proc.process(change) + + +class DreamComparePalette: + NODE_NAME = "Compare Palettes" + ICON = "📊" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "a": (RGBPalette.ID,), + "b": (RGBPalette.ID,), + }, + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT", "FLOAT") + RETURN_NAMES = ( + "brightness_multiplier", "contrast_multiplier", "red_multiplier", "green_multiplier", "blue_multiplier") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, a, b): + MIN_VALUE = 1 / 255.0 + + brightness = list() + contrasts = list() + reds = list() + greens = list() + blues = list() + + for i in range(min(len(a), len(b))): + (bright, ctr, red, green, blue) = a[i].analyze() + (bright2, ctr2, red2, green2, blue2) = b[i].analyze() + brightness.append(bright2 / max(MIN_VALUE, bright)) + contrasts.append(ctr2 / max(MIN_VALUE, ctr)) + reds.append(red2 / max(MIN_VALUE, red)) + greens.append(green2 / max(MIN_VALUE, green)) + blues.append(blue2 / max(MIN_VALUE, blue)) + + n = len(brightness) + + return (sum(brightness) / n, sum(contrasts) / n, sum(reds) / n, + sum(greens) / n, sum(blues) / n) + + +class DreamAnalyzePalette: + NODE_NAME = "Analyze Palette" + ICON = "📊" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.palette + , + } + + CATEGORY = NodeCategories.IMAGE_COLORS + RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT", "FLOAT", "FLOAT") + RETURN_NAMES = ("brightness", "contrast", "redness", "greenness", "blueness") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, palette): + f = 1.0 / len(palette) + (w, c, r, g, b) = (0, 0, 0, 0, 0) + for p in palette: + (brightness, contrast, red, green, blue) = p.analyze() + w += brightness + c += contrast + r += red + g += green + b += blue + + return w * f, c * f, r * f, g * f, b * f diff --git a/custom_nodes/comfyui-dream-project/config.json b/custom_nodes/comfyui-dream-project/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c1829880dd7d86ac6ac6de9da4619b14fc7cf80f --- /dev/null +++ b/custom_nodes/comfyui-dream-project/config.json @@ -0,0 +1,54 @@ +{ + "ffmpeg": { + "file_extension": "mp4", + "path": "ffmpeg", + "arguments": [ + "-r", + "%FPS%", + "-f", + "concat", + "-safe", + "0", + "-vsync", + "cfr", + "-i", + "%FRAMES%", + "-c:v", + "libx264", + "-pix_fmt", + "yuv420p", + "%OUTPUT%" + ] + }, + "mpeg_coder": { + "encoding_threads": 4, + "bitrate_factor": 1.0, + "max_b_frame": 2, + "file_extension": "mp4", + "codec_name": "libx264" + }, + "encoding": { + "jpeg_quality": 95 + }, + "debug": false, + "ui": { + "top_category": "Dream", + "prepend_icon_to_category": true, + "append_icon_to_category": false, + "prepend_icon_to_node": true, + "append_icon_to_node": false, + "category_icons": { + "animation": "\ud83c\udfa5", + "postprocessing": "\u2699", + "transforms": "\ud83d\udd00", + "curves": "\ud83d\udcc8", + "color": "\ud83c\udfa8", + "generate": "\u26a1", + "utils": "\ud83d\udee0", + "image": "\ud83c\udf04", + "switches": "\u2b46", + "conditioning": "\u262f", + "Dream": "\u2728" + } + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/curves.py b/custom_nodes/comfyui-dream-project/curves.py new file mode 100644 index 0000000000000000000000000000000000000000..acedce17af25b34df034c1419853219a9a443eaa --- /dev/null +++ b/custom_nodes/comfyui-dream-project/curves.py @@ -0,0 +1,459 @@ +# -*- coding: utf-8 -*- +import csv +import functools +import math +import os + +from scipy.io.wavfile import read as wav_read + +from .categories import NodeCategories +from .shared import hashed_as_strings +from .dreamtypes import SharedTypes, FrameCounter + + +def _linear_value_calc(x, x_start, x_end, y_start, y_end): + if x <= x_start: + return y_start + if x >= x_end: + return y_end + dx = max(x_end - x_start, 0.0001) + n = (x - x_start) / dx + return (y_end - y_start) * n + y_start + + +def _curve_result(f: float): + return (f, int(round(f))) + + +class DreamSineWave: + NODE_NAME = "Sine Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "max_value": ("FLOAT", {"default": 1.0, "multiline": False}), + "min_value": ("FLOAT", {"default": 0.0, "multiline": False}), + "periodicity_seconds": ("FLOAT", {"default": 10.0, "multiline": False, "min": 0.01}), + "phase": ("FLOAT", {"default": 0.0, "multiline": False, "min": -1, "max": 1}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, frame_counter: FrameCounter, max_value, min_value, periodicity_seconds, phase): + x = frame_counter.current_time_in_seconds + a = (max_value - min_value) * 0.5 + c = phase + b = 2 * math.pi / periodicity_seconds + d = (max_value + min_value) / 2 + y = a * math.sin(b * (x + c)) + d + return _curve_result(y) + + +class DreamSawWave: + NODE_NAME = "Saw Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "max_value": ("FLOAT", {"default": 1.0, "multiline": False}), + "min_value": ("FLOAT", {"default": 0.0, "multiline": False}), + "periodicity_seconds": ("FLOAT", {"default": 10.0, "multiline": False, "min": 0.01}), + "phase": ("FLOAT", {"default": 0.0, "multiline": False, "min": -1, "max": 1}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, frame_counter: FrameCounter, max_value, min_value, periodicity_seconds, phase): + x = frame_counter.current_time_in_seconds + x = ((x + periodicity_seconds * phase) % periodicity_seconds) / periodicity_seconds + y = x * (max_value - min_value) + min_value + return _curve_result(y) + + +class DreamTriangleWave: + NODE_NAME = "Triangle Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "max_value": ("FLOAT", {"default": 1.0, "multiline": False}), + "min_value": ("FLOAT", {"default": 0.0, "multiline": False}), + "periodicity_seconds": ("FLOAT", {"default": 10.0, "multiline": False, "min": 0.01}), + "phase": ("FLOAT", {"default": 0.0, "multiline": False, "min": -1, "max": 1}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, frame_counter: FrameCounter, max_value, min_value, periodicity_seconds, phase): + x = frame_counter.current_time_in_seconds + x = ((x + periodicity_seconds * phase) % periodicity_seconds) / periodicity_seconds + if x <= 0.5: + x *= 2 + y = x * (max_value - min_value) + min_value + else: + x = (x - 0.5) * 2 + y = max_value - x * (max_value - min_value) + return _curve_result(y) + + +class WavData: + def __init__(self, sampling_rate: float, single_channel_samples, fps: float): + self._length_in_seconds = len(single_channel_samples) / sampling_rate + self._num_buckets = round(self._length_in_seconds * fps * 3) + self._bucket_size = len(single_channel_samples) / float(self._num_buckets) + self._buckets = list() + self._rate = sampling_rate + self._max_bucket_value = 0 + for i in range(self._num_buckets): + start_index = round(i * self._bucket_size) + end_index = round((i + 1) * self._bucket_size) - 1 + samples = list(map(lambda n: abs(n), single_channel_samples[start_index:end_index])) + bucket_total = sum(samples) + self._buckets.append(bucket_total) + self._max_bucket_value=max(bucket_total, self._max_bucket_value) + + for i in range(self._num_buckets): + self._buckets[i] = float(self._buckets[i]) / self._max_bucket_value + + def value_at_time(self, second: float) -> float: + if second < 0.0 or second > self._length_in_seconds: + return 0.0 + nsample = second * self._rate + nbucket = min(max(0, round(nsample / self._bucket_size)), self._num_buckets - 1) + return self._buckets[nbucket] + + +@functools.lru_cache(4) +def _wav_loader(filepath, fps): + sampling_rate, samples = wav_read(filepath) + single_channel = samples[:, 0] + return WavData(sampling_rate, single_channel, fps) + + +class DreamWavCurve: + NODE_NAME = "WAV Curve" + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + ICON = "∿" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "wav_path": ("STRING", {"default": "audio.wav"}), + "scale": ("FLOAT", {"default": 1.0, "multiline": False}) + }, + } + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, frame_counter: FrameCounter, wav_path, scale): + if not os.path.isfile(wav_path): + return (0.0, 0) + data = _wav_loader(wav_path, frame_counter.frames_per_second) + frame_counter.current_time_in_seconds + v = data.value_at_time(frame_counter.current_time_in_seconds) + return (v * scale, round(v * scale)) + + +class DreamTriangleEvent: + NODE_NAME = "Triangle Event Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "max_value": ("FLOAT", {"default": 1.0, "multiline": False}), + "min_value": ("FLOAT", {"default": 0.0, "multiline": False}), + "width_seconds": ("FLOAT", {"default": 1.0, "multiline": False, "min": 0.1}), + "center_seconds": ("FLOAT", {"default": 10.0, "multiline": False, "min": 0.0}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, frame_counter: FrameCounter, max_value, min_value, width_seconds, center_seconds): + x = frame_counter.current_time_in_seconds + start = center_seconds - width_seconds * 0.5 + end = center_seconds + width_seconds * 0.5 + if start <= x <= center_seconds: + y = _linear_value_calc(x, start, center_seconds, min_value, max_value) + elif center_seconds < x <= end: + y = _linear_value_calc(x, center_seconds, end, max_value, min_value) + else: + y = min_value + return _curve_result(y) + + +class DreamSmoothEvent: + NODE_NAME = "Smooth Event Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "max_value": ("FLOAT", {"default": 1.0, "multiline": False}), + "min_value": ("FLOAT", {"default": 0.0, "multiline": False}), + "width_seconds": ("FLOAT", {"default": 1.0, "multiline": False, "min": 0.1}), + "center_seconds": ("FLOAT", {"default": 10.0, "multiline": False, "min": 0.0}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, frame_counter: FrameCounter, max_value, min_value, width_seconds, center_seconds): + x = frame_counter.current_time_in_seconds + start = center_seconds - width_seconds * 0.5 + end = center_seconds + width_seconds * 0.5 + if start <= x <= center_seconds: + y = _linear_value_calc(x, start, center_seconds, 0.0, 1.0) + elif center_seconds < x <= end: + y = _linear_value_calc(x, center_seconds, end, 1.0, 0.0) + else: + y = 0.0 + if y < 0.5: + y = ((y + y) * (y + y)) * 0.5 + else: + a = (y - 0.5) * 2 + y = math.pow(a, 0.25) * 0.5 + 0.5 + return _curve_result(y * (max_value - min_value) + min_value) + + +class DreamBeatCurve: + NODE_NAME = "Beat Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "bpm": ("FLOAT", {"default": 100.0, "multiline": False}), + "time_offset": ("FLOAT", {"default": 0.0, "multiline": False}), + "measure_length": ("INT", {"default": 4, "min": 1}), + "low_value": ("FLOAT", {"default": 0.0}), + "high_value": ("FLOAT", {"default": 1.0}), + "invert": (["no", "yes"],), + "power": ("FLOAT", {"default": 2.0, "min": 0.25, "max": 4}), + "accent_1": ("INT", {"default": 1, "min": 1, "max": 24}), + }, + "optional": { + "accent_2": ("INT", {"default": 3, "min": 1, "max": 24}), + "accent_3": ("INT", {"default": 0}), + "accent_4": ("INT", {"default": 0}), + } + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def _get_value_for_accent(self, accent, measure_length, bpm, frame_counter: FrameCounter, frame_offset): + current_frame = frame_counter.current_frame + frame_offset + frames_per_minute = frame_counter.frames_per_second * 60.0 + frames_per_beat = frames_per_minute / bpm + frames_per_measure = frames_per_beat * measure_length + frame = (current_frame % frames_per_measure) + accent_start = (accent - 1) * frames_per_beat + accent_end = accent * frames_per_beat + if frame >= accent_start and frame < accent_end: + return 1.0 - ((frame - accent_start) / frames_per_beat) + return 0 + + def result(self, bpm, frame_counter: FrameCounter, measure_length, low_value, high_value, power, invert, + time_offset, **accents): + frame_offset = int(round(time_offset * frame_counter.frames_per_second)) + accents_set = set(filter(lambda v: v >= 1 and v <= measure_length, + map(lambda i: accents.get("accent_" + str(i), -1), range(30)))) + v = 0.0 + for a in accents_set: + v += math.pow(self._get_value_for_accent(a, measure_length, bpm, frame_counter, frame_offset), power) + if invert == "yes": + v = 1.0 - v + + r = low_value + v * (high_value - low_value) + return _curve_result(r) + + +class DreamLinear: + NODE_NAME = "Linear Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "initial_value": ("FLOAT", {"default": 0.0, "multiline": False}), + "final_value": ("FLOAT", {"default": 100.0, "multiline": False}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, initial_value, final_value, frame_counter: FrameCounter): + d = final_value - initial_value + v = initial_value + frame_counter.progress * d + return (v, int(round(v))) + + +def _is_as_float(s: str): + try: + float(s) + return True + except ValueError: + return False + + +class DreamCSVGenerator: + NODE_NAME = "CSV Generator" + ICON = "⌗" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "value": ("FLOAT", {"forceInput": True, "default": 0.0}), + "csvfile": ("STRING", {"default": "", "multiline": False}), + "csv_dialect": (csv.list_dialects(),) + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = () + RETURN_NAMES = () + FUNCTION = "write" + OUTPUT_NODE = True + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def write(self, csvfile, frame_counter: FrameCounter, value, csv_dialect): + if frame_counter.is_first_frame and csvfile: + with open(csvfile, 'w', newline='') as csvfile: + csvwriter = csv.writer(csvfile, dialect=csv_dialect) + csvwriter.writerow(['Frame', 'Value']) + csvwriter.writerow([frame_counter.current_frame, str(value)]) + else: + with open(csvfile, 'a', newline='') as csvfile: + csvwriter = csv.writer(csvfile, dialect=csv_dialect) + csvwriter.writerow([frame_counter.current_frame, str(value)]) + return () + + +class DreamCSVCurve: + NODE_NAME = "CSV Curve" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "csvfile": ("STRING", {"default": "", "multiline": False}), + "first_column_type": (["seconds", "frames"],), + "interpolate": (["true", "false"],), + "csv_dialect": (csv.list_dialects(),) + }, + } + + CATEGORY = NodeCategories.ANIMATION_CURVES + RETURN_TYPES = ("FLOAT", "INT") + RETURN_NAMES = ("FLOAT", "INT") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def _row_yield(self, file, csv_dialect): + prev_row = None + for row in csv.reader(file, dialect=csv_dialect): + if len(row) == 2 and _is_as_float(row[0]) and _is_as_float(row[1]): + row = list(map(float, row)) + yield (prev_row, row) + prev_row = row + if prev_row is not None: + yield (prev_row, None) + + def result(self, csvfile, frame_counter: FrameCounter, first_column_type, interpolate, csv_dialect): + interpolate = interpolate == "true" + + def _first_col_to_frame(v: float): + if first_column_type == "frames": + return round(v) + else: + return round(v * frame_counter.frames_per_second) + + with open(csvfile) as f: + for (prev, current) in self._row_yield(f, csv_dialect): + if prev is None and frame_counter.current_frame < _first_col_to_frame(current[0]): + # before first row + return (current[1], int(round(current[1]))) + if current is None: + # after last row + return (prev[1], int(round(prev[1]))) + if prev is not None and current is not None: + frame1 = _first_col_to_frame(prev[0]) + value1 = prev[1] + frame2 = _first_col_to_frame(current[0]) + value2 = current[1] + if frame1 <= frame_counter.current_frame and interpolate and frame2 > frame_counter.current_frame: + offset = (frame_counter.current_frame - frame1) / float(frame2 - frame1) + v = value1 * (1.0 - offset) + value2 * offset + return (v, int(round(v))) + elif frame1 <= frame_counter.current_frame and frame2 > frame_counter.current_frame: + return (value1, int(round(value1))) + return (0.0, 0) diff --git a/custom_nodes/comfyui-dream-project/disable.py b/custom_nodes/comfyui-dream-project/disable.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e651c52d074b9fc9c55f191b439e26f0b6673e --- /dev/null +++ b/custom_nodes/comfyui-dream-project/disable.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +def run_disable(): + pass + + +if __name__ == "__main__": + run_disable() diff --git a/custom_nodes/comfyui-dream-project/dreamlogger.py b/custom_nodes/comfyui-dream-project/dreamlogger.py new file mode 100644 index 0000000000000000000000000000000000000000..511cf461d0e692bd6034ed038ef357a5a9c0b35f --- /dev/null +++ b/custom_nodes/comfyui-dream-project/dreamlogger.py @@ -0,0 +1,18 @@ +class DreamLog: + def __init__(self, debug_active=False): + self._debug = debug_active + + def _print(self, text: str, *args, **kwargs): + if args or kwargs: + text = text.format(*args, **kwargs) + print("[DREAM] " + text) + + def error(self, text: str, *args, **kwargs): + self._print(text, *args, **kwargs) + + def info(self, text: str, *args, **kwargs): + self._print(text, *args, **kwargs) + + def debug(self, text: str, *args, **kwargs): + if self._debug: + self._print(text, *args, **kwargs) diff --git a/custom_nodes/comfyui-dream-project/dreamtypes.py b/custom_nodes/comfyui-dream-project/dreamtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..746275932b6aa024743cc0940b508b1a92c6de3b --- /dev/null +++ b/custom_nodes/comfyui-dream-project/dreamtypes.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- +import random +import time + +from typing import List, Dict, Tuple + +from .shared import DreamImage + + +class RGBPalette: + ID = "RGB_PALETTE" + + def __init__(self, colors: List[tuple[int, int, int]] = None, image: DreamImage = None): + self._colors = [] + + def _fix_tuple(t): + if len(t) < 3: + return (t[0], t[0], t[0]) + else: + return t + + if image: + for p, _, _ in image: + self._colors.append(_fix_tuple(p)) + if colors: + for c in colors: + self._colors.append(_fix_tuple(c)) + + def _calculate_channel_contrast(self, c): + hist = list(map(lambda _: 0, range(16))) + for pixel in self._colors: + hist[pixel[c] // 16] += 1 + s = 0 + max_possible = (15 - 0) * (len(self) // 2) * (len(self) // 2) + for i in range(16): + for j in range(i): + if i != j: + s += abs(i - j) * hist[i] * hist[j] + return s / max_possible + + def _calculate_combined_contrast(self): + s = 0 + for c in range(3): + s += self._calculate_channel_contrast(c) + return s / 3 + + def analyze(self): + total_red = 0 + total_blue = 0 + total_green = 0 + for pixel in self: + total_red += pixel[0] + total_green += pixel[1] + total_blue += pixel[2] + n = len(self._colors) + r = float(total_red) / (255 * n) + g = float(total_green) / (255 * n) + b = float(total_blue) / (255 * n) + return ((r + g + b) / 3.0, self._calculate_combined_contrast(), r, g, b) + + def __len__(self): + return len(self._colors) + + def __iter__(self): + return iter(self._colors) + + def random_iteration(self, seed=None): + s = seed if seed is not None else int(time.time() * 1000) + n = len(self._colors) - 1 + c = self._colors + + class _ColorIterator: + def __init__(self): + self._r = random.Random() + self._r.seed(s) + self._n = n + self._c = c + + def __next__(self): + return self._c[self._r.randint(0, self._n)] + + return _ColorIterator() + + +class PartialPrompt: + ID = "PARTIAL_PROMPT" + + def __init__(self): + self._data = {} + + def add(self, text: str, weight: float): + output = PartialPrompt() + output._data = dict(self._data) + for parts in text.split(","): + parts = parts.strip() + if " " in parts: + output._data["(" + parts + ")"] = weight + else: + output._data[parts] = weight + return output + + def is_empty(self): + return not self._data + + def abs_sum(self): + if not self._data: + return 0.0 + return sum(map(abs, self._data.values())) + + def abs_max(self): + if not self._data: + return 0.0 + return max(map(abs, self._data.values())) + + def scaled_by(self, f: float): + new_data = PartialPrompt() + new_data._data = dict(self._data) + for text, weight in new_data._data.items(): + new_data._data[text] = weight * f + return new_data + + def finalize(self, clamp: float): + items = self._data.items() + items = sorted(items, key=lambda pair: (pair[1], pair[0])) + pos = list() + neg = list() + for text, w in sorted(items, key=lambda pair: (-pair[1], pair[0])): + if w >= 0.0001: + pos.append("({}:{:.3f})".format(text, min(clamp, w))) + for text, w in sorted(items, key=lambda pair: (pair[1], pair[0])): + if w <= -0.0001: + neg.append("({}:{:.3f})".format(text, min(clamp, -w))) + return ", ".join(pos), ", ".join(neg) + + +class LogEntry: + ID = "LOG_ENTRY" + + @classmethod + def new(cls, text): + return LogEntry([(time.time(), text)]) + + def __init__(self, data: List[Tuple[float, str]] = None): + if data is None: + self._data = list() + else: + self._data = list(data) + + def add(self, text: str): + new_data = list(self._data) + new_data.append((time.time(), text)) + return LogEntry(new_data) + + def merge(self, log_entry): + new_data = list(self._data) + new_data.extend(log_entry._data) + return LogEntry(new_data) + + def get_filtered_entries(self, t: float): + for d in sorted(self._data): + if d[0] > t: + yield d + + +class FrameCounter: + ID = "FRAME_COUNTER" + + def __init__(self, current_frame=0, total_frames=1, frames_per_second=25.0): + self.current_frame = max(0, current_frame) + self.total_frames = max(total_frames, 1) + self.frames_per_second = float(max(1.0, frames_per_second)) + + def incremented(self, amount: int): + return FrameCounter(self.current_frame + amount, self.total_frames, self.frames_per_second) + + @property + def is_first_frame(self): + return self.current_frame == 0 + + @property + def is_final_frame(self): + return (self.current_frame + 1) == self.total_frames + + @property + def is_after_last_frame(self): + return self.current_frame >= self.total_frames + + @property + def current_time_in_seconds(self): + return float(self.current_frame) / self.frames_per_second + + @property + def total_time_in_seconds(self): + return float(self.total_frames) / self.frames_per_second + + @property + def remaining_time_in_seconds(self): + return self.total_time_in_seconds - self.current_time_in_seconds + + @property + def progress(self): + return float(self.current_frame) / (max(2, self.total_frames) - 1) + + +class AnimationSequence: + ID = "ANIMATION_SEQUENCE" + + def __init__(self, frame_counter: FrameCounter, frames: Dict[int, List[str]] = None): + self.frames = frames + self.fps = frame_counter.frames_per_second + self.frame_counter = frame_counter + if self.is_defined: + self.keys_in_order = sorted(frames.keys()) + self.num_batches = min(map(len, self.frames.values())) + else: + self.keys_in_order = [] + self.num_batches = 0 + + @property + def batches(self): + return range(self.num_batches) + + def get_image_files_of_batch(self, batch_num): + for key in self.keys_in_order: + yield self.frames[key][batch_num] + + @property + def is_defined(self): + if self.frames: + return True + else: + return False + + +class SharedTypes: + frame_counter = {"frame_counter": (FrameCounter.ID,)} + sequence = {"sequence": (AnimationSequence.ID,)} + palette = {"palette": (RGBPalette.ID,)} diff --git a/custom_nodes/comfyui-dream-project/embedded_config.py b/custom_nodes/comfyui-dream-project/embedded_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b795cdcc437ea376f7a14fe949fb94125d8851c7 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/embedded_config.py @@ -0,0 +1,41 @@ +EMBEDDED_CONFIGURATION = { + "ffmpeg": { + "file_extension": "mp4", + "path": "ffmpeg", + "arguments": ["-r", "%FPS%", "-f", "concat", "-safe", "0", "-vsync", + "cfr", "-i", "%FRAMES%", "-c:v", "libx264", "-pix_fmt", + "yuv420p", "%OUTPUT%"] + }, + "mpeg_coder": { + "encoding_threads": 4, + "bitrate_factor": 1.0, + "max_b_frame": 2, + "file_extension": "mp4", + "codec_name": "libx264" + }, + "encoding": { + "jpeg_quality": 95 + }, + "debug": False, + "ui": { + "top_category": "Dream", + "prepend_icon_to_category": True, + "append_icon_to_category": False, + "prepend_icon_to_node": True, + "append_icon_to_node": False, + "category_icons": { + "animation": "🎥", + "postprocessing": "⚙", + "transforms": "🔀", + "curves": "📈", + "color": "🎨", + "generate": "⚡", + "utils": "🛠", + "image": "🌄", + "switches": "⭆", + "conditioning": "☯", + "Dream": "✨" + } + }, + +} diff --git a/custom_nodes/comfyui-dream-project/enable.py b/custom_nodes/comfyui-dream-project/enable.py new file mode 100644 index 0000000000000000000000000000000000000000..32c0cabcbb2cac55bc0650c74041a3606054792d --- /dev/null +++ b/custom_nodes/comfyui-dream-project/enable.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +def run_enable(): + pass + + +if __name__ == "__main__": + run_enable() diff --git a/custom_nodes/comfyui-dream-project/err.py b/custom_nodes/comfyui-dream-project/err.py new file mode 100644 index 0000000000000000000000000000000000000000..8d0e16fd1da58843d6540f9e7784b975af1408db --- /dev/null +++ b/custom_nodes/comfyui-dream-project/err.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +def _get_node_name(cls): + return cls.__dict__.get("NODE_NAME", str(cls)) + + +def on_error(node_cls: type, message: str): + msg = "Failure in [" + _get_node_name(node_cls) + "]:" + message + print(msg) + raise Exception(msg) diff --git a/custom_nodes/comfyui-dream-project/examples/area-sampled-noise.json b/custom_nodes/comfyui-dream-project/examples/area-sampled-noise.json new file mode 100644 index 0000000000000000000000000000000000000000..c75e02b646948e6147e778538d9a15149d53b3f9 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/examples/area-sampled-noise.json @@ -0,0 +1,1475 @@ +{ + "last_node_id": 32, + "last_link_id": 37, + "nodes": [ + { + "id": 1, + "type": "LoadImage", + "pos": [ + -290, + 250 + ], + "size": { + "0": 430, + "1": 530 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "forest (2).jpg", + "image" + ] + }, + { + "id": 4, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 440 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 1 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 18, + 32 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 6263246444646, + "randomize", + "center" + ] + }, + { + "id": 12, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 70 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 2 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 15, + 30 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 602439956783214, + "randomize", + "top-right" + ] + }, + { + "id": 5, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 250 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 3 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 17, + 31 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 347895810515905, + "randomize", + "center-left" + ] + }, + { + "id": 6, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 640 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 4 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 19, + 33 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 442018658189454, + "randomize", + "center-right" + ] + }, + { + "id": 7, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 810 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 20, + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 369707362068911, + "randomize", + "bottom-left" + ] + }, + { + "id": 10, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 1010 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 6 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 21, + 35 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 495981514872635, + "randomize", + "bottom-center" + ] + }, + { + "id": 8, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + 1190 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 7 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 22, + 36 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 531491245299573, + "randomize", + "bottom-right" + ] + }, + { + "id": 9, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + -110 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 8 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 14, + 29 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 275748471798978, + "randomize", + "top-center" + ] + }, + { + "id": 11, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + 530, + -300 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 9, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 10, + 28 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 1078875074929860, + "randomize", + "top-left" + ] + }, + { + "id": 21, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 620 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 18 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 524929391212381, + "randomize" + ] + }, + { + "id": 17, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 80 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 15 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 78435709751137, + "randomize" + ] + }, + { + "id": 19, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 330 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 17 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 16 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 525418659561865, + "randomize" + ] + }, + { + "id": 23, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 870 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 19 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 635328469053725, + "randomize" + ] + }, + { + "id": 25, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 1150 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 20 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 383192376875704, + "randomize" + ] + }, + { + "id": 27, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 1440 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 21 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 26 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 1096999909838714, + "randomize" + ] + }, + { + "id": 29, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + 1690 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 22 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 27 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 945264383034375, + "randomize" + ] + }, + { + "id": 15, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + -210 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 14 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 385670812107820, + "randomize" + ] + }, + { + "id": 13, + "type": "Noise from Palette [Dream]", + "pos": [ + 1050, + -490 + ], + "size": { + "0": 315, + "1": 178 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 10 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Palette [Dream]" + }, + "widgets_values": [ + 256, + 256, + 0.3, + 0.5, + 869209746177441, + "randomize" + ] + }, + { + "id": 31, + "type": "Noise from Area Palettes [Dream]", + "pos": [ + 1810, + 140 + ], + "size": { + "0": 342.5999755859375, + "1": 362 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "top_left_palette", + "type": "RGB_PALETTE", + "link": 28 + }, + { + "name": "top_center_palette", + "type": "RGB_PALETTE", + "link": 29 + }, + { + "name": "top_right_palette", + "type": "RGB_PALETTE", + "link": 30 + }, + { + "name": "center_left_palette", + "type": "RGB_PALETTE", + "link": 31 + }, + { + "name": "center_palette", + "type": "RGB_PALETTE", + "link": 32 + }, + { + "name": "center_right_palette", + "type": "RGB_PALETTE", + "link": 33 + }, + { + "name": "bottom_left_palette", + "type": "RGB_PALETTE", + "link": 34 + }, + { + "name": "bottom_center_palette", + "type": "RGB_PALETTE", + "link": 35 + }, + { + "name": "bottom_right_palette", + "type": "RGB_PALETTE", + "link": 36 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Area Palettes [Dream]" + }, + "widgets_values": [ + 0.5, + 512, + 512, + 0.22727050781249997, + 0.5, + 336106403318857, + "randomize" + ] + }, + { + "id": 22, + "type": "PreviewImage", + "pos": [ + 1580, + 540 + ], + "size": [ + 140, + 250 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 23 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 18, + "type": "PreviewImage", + "pos": [ + 1580, + 60 + ], + "size": [ + 140, + 190 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 20, + "type": "PreviewImage", + "pos": [ + 1580, + 310 + ], + "size": [ + 140, + 180 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 16 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 24, + "type": "PreviewImage", + "pos": [ + 1580, + 840 + ], + "size": [ + 140, + 180 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 24 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 26, + "type": "PreviewImage", + "pos": [ + 1580, + 1070 + ], + "size": [ + 140, + 250 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 25 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 28, + "type": "PreviewImage", + "pos": [ + 1580, + 1390 + ], + "size": [ + 140, + 250 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 26 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 30, + "type": "PreviewImage", + "pos": [ + 1580, + 1690 + ], + "size": [ + 140, + 250 + ], + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 27 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 16, + "type": "PreviewImage", + "pos": [ + 1580, + -240 + ], + "size": [ + 140, + 250 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 12 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 14, + "type": "PreviewImage", + "pos": [ + 1580, + -540 + ], + "size": [ + 140, + 250 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 11 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 32, + "type": "PreviewImage", + "pos": [ + 1790, + 600 + ], + "size": [ + 460, + 510 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 37 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + } + ], + "links": [ + [ + 1, + 1, + 0, + 4, + 0, + "IMAGE" + ], + [ + 2, + 1, + 0, + 12, + 0, + "IMAGE" + ], + [ + 3, + 1, + 0, + 5, + 0, + "IMAGE" + ], + [ + 4, + 1, + 0, + 6, + 0, + "IMAGE" + ], + [ + 5, + 1, + 0, + 7, + 0, + "IMAGE" + ], + [ + 6, + 1, + 0, + 10, + 0, + "IMAGE" + ], + [ + 7, + 1, + 0, + 8, + 0, + "IMAGE" + ], + [ + 8, + 1, + 0, + 9, + 0, + "IMAGE" + ], + [ + 9, + 1, + 0, + 11, + 0, + "IMAGE" + ], + [ + 10, + 11, + 0, + 13, + 0, + "RGB_PALETTE" + ], + [ + 11, + 13, + 0, + 14, + 0, + "IMAGE" + ], + [ + 12, + 15, + 0, + 16, + 0, + "IMAGE" + ], + [ + 13, + 17, + 0, + 18, + 0, + "IMAGE" + ], + [ + 14, + 9, + 0, + 15, + 0, + "RGB_PALETTE" + ], + [ + 15, + 12, + 0, + 17, + 0, + "RGB_PALETTE" + ], + [ + 16, + 19, + 0, + 20, + 0, + "IMAGE" + ], + [ + 17, + 5, + 0, + 19, + 0, + "RGB_PALETTE" + ], + [ + 18, + 4, + 0, + 21, + 0, + "RGB_PALETTE" + ], + [ + 19, + 6, + 0, + 23, + 0, + "RGB_PALETTE" + ], + [ + 20, + 7, + 0, + 25, + 0, + "RGB_PALETTE" + ], + [ + 21, + 10, + 0, + 27, + 0, + "RGB_PALETTE" + ], + [ + 22, + 8, + 0, + 29, + 0, + "RGB_PALETTE" + ], + [ + 23, + 21, + 0, + 22, + 0, + "IMAGE" + ], + [ + 24, + 23, + 0, + 24, + 0, + "IMAGE" + ], + [ + 25, + 25, + 0, + 26, + 0, + "IMAGE" + ], + [ + 26, + 27, + 0, + 28, + 0, + "IMAGE" + ], + [ + 27, + 29, + 0, + 30, + 0, + "IMAGE" + ], + [ + 28, + 11, + 0, + 31, + 0, + "RGB_PALETTE" + ], + [ + 29, + 9, + 0, + 31, + 1, + "RGB_PALETTE" + ], + [ + 30, + 12, + 0, + 31, + 2, + "RGB_PALETTE" + ], + [ + 31, + 5, + 0, + 31, + 3, + "RGB_PALETTE" + ], + [ + 32, + 4, + 0, + 31, + 4, + "RGB_PALETTE" + ], + [ + 33, + 6, + 0, + 31, + 5, + "RGB_PALETTE" + ], + [ + 34, + 7, + 0, + 31, + 6, + "RGB_PALETTE" + ], + [ + 35, + 10, + 0, + 31, + 7, + "RGB_PALETTE" + ], + [ + 36, + 8, + 0, + 31, + 8, + "RGB_PALETTE" + ], + [ + 37, + 31, + 0, + 32, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/examples/laboratory.json b/custom_nodes/comfyui-dream-project/examples/laboratory.json new file mode 100644 index 0000000000000000000000000000000000000000..19d2337c87d480a5693e9f79a679af95fb9a422d --- /dev/null +++ b/custom_nodes/comfyui-dream-project/examples/laboratory.json @@ -0,0 +1,3626 @@ +{ + "last_node_id": 80, + "last_link_id": 116, + "nodes": [ + { + "id": 14, + "type": "Build Prompt [Dream]", + "pos": [ + 1010, + -280 + ], + "size": [ + 245.1999969482422, + 108.295654296875 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 13 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 23, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 1 + } + ] + } + }, + { + "name": "added_prompt", + "type": "STRING", + "link": 84, + "widget": { + "name": "added_prompt", + "config": [ + "STRING", + { + "default": "", + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 75 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "rainbow", + 1 + ] + }, + { + "id": 13, + "type": "Build Prompt [Dream]", + "pos": [ + 730, + -280 + ], + "size": [ + 245.1999969482422, + 108.295654296875 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 12 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 22, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 1 + } + ] + } + }, + { + "name": "added_prompt", + "type": "STRING", + "link": 83, + "widget": { + "name": "added_prompt", + "config": [ + "STRING", + { + "default": "", + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "christmas", + 1 + ] + }, + { + "id": 51, + "type": "Reroute", + "pos": [ + 1469.1866925231607, + 562.588884695758 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 64, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 63 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 52, + "type": "Reroute", + "pos": [ + -428.86312922343996, + 564.5376609822581 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 65 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 64 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 10, + "type": "Build Prompt [Dream]", + "pos": [ + -173, + -284 + ], + "size": [ + 245.1999969482422, + 108.295654296875 + ], + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": null + }, + { + "name": "weight", + "type": "FLOAT", + "link": 19, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 1 + } + ] + } + }, + { + "name": "added_prompt", + "type": "STRING", + "link": 77, + "widget": { + "name": "added_prompt", + "config": [ + "STRING", + { + "default": "", + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 10 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "house", + 1 + ] + }, + { + "id": 61, + "type": "PrimitiveNode", + "pos": [ + 376, + 294 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 80 + ], + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + }, + "slot_index": 0 + } + ], + "properties": {}, + "widgets_values": [ + 624, + "randomize" + ] + }, + { + "id": 12, + "type": "Build Prompt [Dream]", + "pos": [ + 430, + -280 + ], + "size": [ + 245.1999969482422, + 108.295654296875 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 11 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 21, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 1 + } + ] + } + }, + { + "name": "added_prompt", + "type": "STRING", + "link": 81, + "widget": { + "name": "added_prompt", + "config": [ + "STRING", + { + "default": "", + "multiline": true + } + ] + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 12 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "jungle", + 1 + ] + }, + { + "id": 11, + "type": "Build Prompt [Dream]", + "pos": [ + 140, + -286 + ], + "size": [ + 245.1999969482422, + 108.295654296875 + ], + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 10 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 20, + "widget": { + "name": "weight", + "config": [ + "FLOAT", + { + "default": 1 + } + ] + } + }, + { + "name": "added_prompt", + "type": "STRING", + "link": 79, + "widget": { + "name": "added_prompt", + "config": [ + "STRING", + { + "default": "", + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "horse", + 1 + ] + }, + { + "id": 60, + "type": "String Tokenizer [Dream]", + "pos": [ + 370, + 79 + ], + "size": { + "0": 325.8482971191406, + "1": 157.50692749023438 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "selected", + "type": "INT", + "link": 80, + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "token", + "type": "STRING", + "links": [ + 81 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String Tokenizer [Dream]" + }, + "widgets_values": [ + "watercolor, anime, ink sketch, photo, oil painting, graffiti, glass mosaic, charcoal art, comic book art, impressionist, old photo, pixel art, ", + ",", + 1360 + ] + }, + { + "id": 63, + "type": "PrimitiveNode", + "pos": [ + 755, + 286 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 82 + ], + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 1787, + "randomize" + ] + }, + { + "id": 62, + "type": "String Tokenizer [Dream]", + "pos": [ + 736, + 77 + ], + "size": { + "0": 325.8482971191406, + "1": 157.50692749023438 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "selected", + "type": "INT", + "link": 82, + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "token", + "type": "STRING", + "links": [ + 83 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String Tokenizer [Dream]" + }, + "widgets_values": [ + "christmas, summer, winter, easter, halloween, superbowl, world cup, county fair, circus, market, celebration, birthday party, rave, horror movie, slapstick", + ",", + 207 + ] + }, + { + "id": 64, + "type": "String Tokenizer [Dream]", + "pos": [ + 1119, + 68 + ], + "size": { + "0": 325.8482971191406, + "1": 157.50692749023438 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "selected", + "type": "INT", + "link": 85, + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "token", + "type": "STRING", + "links": [ + 84 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String Tokenizer [Dream]" + }, + "widgets_values": [ + "airplane, car, school, banana, rock star, president, preacher, monk, train, sailboat, monster truck, roller coaster, shopping mall, rock concert, ", + ",", + 0 + ] + }, + { + "id": 58, + "type": "String Tokenizer [Dream]", + "pos": [ + -12, + 87 + ], + "size": { + "0": 325.8482971191406, + "1": 157.50692749023438 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "selected", + "type": "INT", + "link": 78, + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "token", + "type": "STRING", + "links": [ + 79 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String Tokenizer [Dream]" + }, + "widgets_values": [ + "horse, cat, dog, chicken, monkey, bird, elephant, pig, kitten, puppy, whale, goat, fish, tiger, teddybear, panda, rabbit", + ",", + 1886 + ] + }, + { + "id": 59, + "type": "PrimitiveNode", + "pos": [ + -6, + 286 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 3, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 78 + ], + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 1576, + "randomize" + ] + }, + { + "id": 55, + "type": "String Tokenizer [Dream]", + "pos": [ + -372, + 84 + ], + "size": [ + 325.84829956054546, + 157.5069268798817 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "selected", + "type": "INT", + "link": 76, + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + }, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "token", + "type": "STRING", + "links": [ + 77 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String Tokenizer [Dream]" + }, + "widgets_values": [ + "in a house, on a field, in a tree, in a shopping mall, in a car, on a river boat, in the forest, in the ocean, on a mountain", + ",", + 581 + ] + }, + { + "id": 57, + "type": "PrimitiveNode", + "pos": [ + -359, + 286 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 76 + ], + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 584, + "randomize" + ] + }, + { + "id": 69, + "type": "Reroute", + "pos": [ + 2710.2738751686875, + -143.5705977615644 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 89 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 90, + 91 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + 2887.9416937538945, + -120.19947392580968 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": { + "collapsed": true + }, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 91 + }, + { + "name": "text", + "type": "STRING", + "link": 32, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 4 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + 2885.9416937538945, + -80.19947392580966 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": { + "collapsed": true + }, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 90 + }, + { + "name": "text", + "type": "STRING", + "link": 33, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "multiline": true + } + ] + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 6 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 30, + "type": "Finalize Prompt [Dream]", + "pos": [ + 1291, + -275 + ], + "size": { + "0": 315, + "1": 126 + }, + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 75 + } + ], + "outputs": [ + { + "name": "positive", + "type": "STRING", + "links": [ + 30, + 94 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative", + "type": "STRING", + "links": [ + 31, + 95 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Finalize Prompt [Dream]" + }, + "widgets_values": [ + "raw", + 2, + 1 + ] + }, + { + "id": 73, + "type": "Log Entry Joiner [Dream]", + "pos": [ + 2205, + -949 + ], + "size": { + "0": 216.59999084472656, + "1": 86 + }, + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "entry_0", + "type": "LOG_ENTRY", + "link": 97 + }, + { + "name": "entry_1", + "type": "LOG_ENTRY", + "link": 98 + }, + { + "name": "entry_2", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_3", + "type": "LOG_ENTRY", + "link": null + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 102 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Log Entry Joiner [Dream]" + } + }, + { + "id": 70, + "type": "Log Entry Joiner [Dream]", + "pos": [ + 4078, + -952 + ], + "size": { + "0": 216.59999084472656, + "1": 86 + }, + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "entry_0", + "type": "LOG_ENTRY", + "link": 102 + }, + { + "name": "entry_1", + "type": "LOG_ENTRY", + "link": 93 + }, + { + "name": "entry_2", + "type": "LOG_ENTRY", + "link": 103 + }, + { + "name": "entry_3", + "type": "LOG_ENTRY", + "link": null + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 104 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Log Entry Joiner [Dream]" + } + }, + { + "id": 77, + "type": "Reroute", + "pos": [ + -599.5009408776509, + -1151.2457658693525 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 107 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 108 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 78, + "type": "Reroute", + "pos": [ + 5088.998468177342, + -835.3322583924374 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 108 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 109 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 21, + "type": "Laboratory [Dream]", + "pos": [ + -224, + -946 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 41 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 20 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "Random value 455046", + 925180296886693, + "randomize", + "every frame", + -0.8, + 1.2, + "random uniform", + 0.1 + ] + }, + { + "id": 23, + "type": "Laboratory [Dream]", + "pos": [ + 353, + -955 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 45 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 22 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "Random value 455046", + 290988309040769, + "randomize", + "every frame", + -0.8, + 1.2, + "random uniform", + 0.1 + ] + }, + { + "id": 22, + "type": "Laboratory [Dream]", + "pos": [ + 78, + -614 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 43 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 21 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "Random value 455046", + 268911751368403, + "randomize", + "every frame", + -0.8, + 1.2, + "random uniform", + 0.1 + ] + }, + { + "id": 24, + "type": "Laboratory [Dream]", + "pos": [ + 658, + -631 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 47 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "Random value 455046", + 350640887146024, + "randomize", + "every frame", + -0.8, + 1.2, + "random uniform", + 0.1 + ] + }, + { + "id": 37, + "type": "Frame Counter (Directory) [Dream]", + "pos": [ + -1147, + -975 + ], + "size": { + "0": 315, + "1": 154 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "directory_path", + "type": "STRING", + "link": 37, + "widget": { + "name": "directory_path", + "config": [ + "STRING", + { + "default": "", + "multiline": false + } + ] + } + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 38 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter (Directory) [Dream]" + }, + "widgets_values": [ + "", + "*", + "numeric", + 1000, + 30 + ] + }, + { + "id": 36, + "type": "String Input [Dream]", + "pos": [ + -1147, + -730 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 37, + 65, + 107 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Output Directory", + "properties": { + "Node name for S&R": "String Input [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI" + ] + }, + { + "id": 8, + "type": "VAEDecode", + "pos": [ + 3956.941693753893, + -38.199473925809635 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 113 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 34, + 60 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 5, + "type": "EmptyLatentImage", + "pos": [ + 2988.9416937538945, + 21.80052607419041 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 2 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 768, + 512, + 1 + ] + }, + { + "id": 53, + "type": "Reroute", + "pos": [ + 3480, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 67 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 68, + 100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 47, + "type": "Reroute", + "pos": [ + 2960, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 58 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 67, + 99 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 46, + "type": "Reroute", + "pos": [ + 2100, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 114 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 58 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 43, + "type": "Reroute", + "pos": [ + 780, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 48 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 114 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 42, + "type": "Reroute", + "pos": [ + 520, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 46 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 47, + 48 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 41, + "type": "Reroute", + "pos": [ + 200, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 44 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 45, + 46 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 40, + "type": "Reroute", + "pos": [ + -80, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 42 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 43, + 44 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 39, + "type": "Reroute", + "pos": [ + -420, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 40 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 41, + 42 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 38, + "type": "Reroute", + "pos": [ + -790, + -1080 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 38 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 39, + 40 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 79, + "type": "Reroute", + "pos": [ + 4924.998468177342, + -785.3322583924377 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 110 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 111 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 20, + "type": "Laboratory [Dream]", + "pos": [ + -453, + -621 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 39 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 19 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "Random value 455046", + 1036088194386378, + "randomize", + "every frame", + -0.8, + 1.2, + "random uniform", + 0.1 + ] + }, + { + "id": 71, + "type": "String to Log Entry [Dream]", + "pos": [ + 1726, + -965 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 94, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "default": "" + } + ] + } + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 97 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String to Log Entry [Dream]" + }, + "widgets_values": [ + "", + "Positive prompt" + ] + }, + { + "id": 72, + "type": "String to Log Entry [Dream]", + "pos": [ + 1727, + -832 + ], + "size": [ + 315, + 82 + ], + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 95, + "widget": { + "name": "text", + "config": [ + "STRING", + { + "default": "" + } + ] + } + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 98 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String to Log Entry [Dream]" + }, + "widgets_values": [ + "", + "Negative prompt" + ] + }, + { + "id": 54, + "type": "Laboratory [Dream]", + "pos": [ + 2666.9416937538945, + -521.1994739258095 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 99 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [], + "shape": 3 + }, + { + "name": "INT", + "type": "INT", + "links": [ + 70 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 93 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "KSampler Steps", + 633069612491609, + "randomize", + "every frame", + 15, + 50, + "random bell", + 0.1 + ] + }, + { + "id": 3, + "type": "KSampler", + "pos": [ + 3518.941693753895, + -37.199473925809635 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 112 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 4 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 6 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 2 + }, + { + "name": "cfg", + "type": "FLOAT", + "link": 115, + "widget": { + "name": "cfg", + "config": [ + "FLOAT", + { + "default": 8, + "min": 0, + "max": 100 + } + ] + }, + "slot_index": 4 + }, + { + "name": "steps", + "type": "INT", + "link": 70, + "widget": { + "name": "steps", + "config": [ + "INT", + { + "default": 20, + "min": 1, + "max": 10000 + } + ] + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 496064975436282, + "randomize", + 20, + 8, + "euler", + "normal", + 1 + ] + }, + { + "id": 35, + "type": "Laboratory [Dream]", + "pos": [ + 3196.9416937538945, + -533.1994739258092 + ], + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 68 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 115 + ], + "shape": 3 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 103 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "Laboratory [Dream]" + }, + "widgets_values": [ + "KSampler Cfg", + 357990178944917, + "randomize", + "every frame", + 6, + 10, + "random uniform", + 0.1 + ] + }, + { + "id": 75, + "type": "Log Entry Joiner [Dream]", + "pos": [ + 4744.998468177342, + -648.3322583924376 + ], + "size": { + "0": 216.59999084472656, + "1": 86 + }, + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "entry_0", + "type": "LOG_ENTRY", + "link": 104 + }, + { + "name": "entry_1", + "type": "LOG_ENTRY", + "link": 105 + }, + { + "name": "entry_2", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_3", + "type": "LOG_ENTRY", + "link": null + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 106 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Log Entry Joiner [Dream]" + } + }, + { + "id": 76, + "type": "Log File [Dream]", + "pos": [ + 5097.998468177342, + -397.3322583924378 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 111 + }, + { + "name": "entry_0", + "type": "LOG_ENTRY", + "link": 106 + }, + { + "name": "entry_1", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_2", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_3", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_4", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_5", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_6", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_7", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "log_directory", + "type": "STRING", + "link": 109, + "widget": { + "name": "log_directory", + "config": [ + "STRING", + { + "default": "I:\\AI\\ComfyUI\\ComfyUI\\output" + } + ] + } + } + ], + "properties": { + "Node name for S&R": "Log File [Dream]" + }, + "widgets_values": [ + "I:\\AI\\ComfyUI\\ComfyUI\\output", + "dreamlog.txt", + true, + true, + true + ] + }, + { + "id": 48, + "type": "Image Sequence Saver [Dream]", + "pos": [ + 4466.998468177342, + -343.33225839243767 + ], + "size": { + "0": 315, + "1": 174 + }, + "flags": {}, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 101 + }, + { + "name": "image", + "type": "IMAGE", + "link": 60, + "slot_index": 1 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 61, + "widget": { + "name": "directory_path", + "config": [ + "STRING", + { + "default": "I:\\AI\\ComfyUI\\ComfyUI\\output", + "multiline": false + } + ] + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": null, + "shape": 3 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 105 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Saver [Dream]" + }, + "widgets_values": [ + "I:\\AI\\ComfyUI\\ComfyUI\\output", + "frame", + 5, + "stop output", + "png with embedded workflow" + ] + }, + { + "id": 33, + "type": "PreviewImage", + "pos": [ + 4443.998468177342, + -65.33225839243688 + ], + "size": [ + 815.909842426252, + 588.7536164327751 + ], + "flags": {}, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 34 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 31, + "type": "Reroute", + "pos": [ + 2356, + -285 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 30 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 32 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 32, + "type": "Reroute", + "pos": [ + 2360, + -246 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 31 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 33 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 67, + "type": "Reroute", + "pos": [ + 2490, + 674 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 87 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 89 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 50, + "type": "Reroute", + "pos": [ + 3374, + 571 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 63, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 62 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 66, + "type": "Reroute", + "pos": [ + 3358, + 638 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 86 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 112 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 68, + "type": "Reroute", + "pos": [ + 3720, + 712 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 88 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 113 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 49, + "type": "Reroute", + "pos": [ + 4173, + 576 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 62, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 61 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 74, + "type": "Reroute", + "pos": [ + 4291, + -1063 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 100 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 101, + 110 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -1011, + 649 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 86 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 87 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 88 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "public\\main\\512-SD1.5\\anything-v3-fp16-pruned.safetensors" + ] + }, + { + "id": 65, + "type": "PrimitiveNode", + "pos": [ + 1133, + 269 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 85 + ], + "widget": { + "name": "selected", + "config": [ + "INT", + { + "default": 0, + "min": 0 + } + ] + } + } + ], + "properties": {}, + "widgets_values": [ + 0, + "randomize" + ] + } + ], + "links": [ + [ + 2, + 5, + 0, + 3, + 3, + "LATENT" + ], + [ + 4, + 6, + 0, + 3, + 1, + "CONDITIONING" + ], + [ + 6, + 7, + 0, + 3, + 2, + "CONDITIONING" + ], + [ + 7, + 3, + 0, + 8, + 0, + "LATENT" + ], + [ + 10, + 10, + 0, + 11, + 0, + "PARTIAL_PROMPT" + ], + [ + 11, + 11, + 0, + 12, + 0, + "PARTIAL_PROMPT" + ], + [ + 12, + 12, + 0, + 13, + 0, + "PARTIAL_PROMPT" + ], + [ + 13, + 13, + 0, + 14, + 0, + "PARTIAL_PROMPT" + ], + [ + 19, + 20, + 0, + 10, + 1, + "FLOAT" + ], + [ + 20, + 21, + 0, + 11, + 1, + "FLOAT" + ], + [ + 21, + 22, + 0, + 12, + 1, + "FLOAT" + ], + [ + 22, + 23, + 0, + 13, + 1, + "FLOAT" + ], + [ + 23, + 24, + 0, + 14, + 1, + "FLOAT" + ], + [ + 30, + 30, + 0, + 31, + 0, + "*" + ], + [ + 31, + 30, + 1, + 32, + 0, + "*" + ], + [ + 32, + 31, + 0, + 6, + 1, + "STRING" + ], + [ + 33, + 32, + 0, + 7, + 1, + "STRING" + ], + [ + 34, + 8, + 0, + 33, + 0, + "IMAGE" + ], + [ + 37, + 36, + 0, + 37, + 0, + "STRING" + ], + [ + 38, + 37, + 0, + 38, + 0, + "*" + ], + [ + 39, + 38, + 0, + 20, + 0, + "FRAME_COUNTER" + ], + [ + 40, + 38, + 0, + 39, + 0, + "*" + ], + [ + 41, + 39, + 0, + 21, + 0, + "FRAME_COUNTER" + ], + [ + 42, + 39, + 0, + 40, + 0, + "*" + ], + [ + 43, + 40, + 0, + 22, + 0, + "FRAME_COUNTER" + ], + [ + 44, + 40, + 0, + 41, + 0, + "*" + ], + [ + 45, + 41, + 0, + 23, + 0, + "FRAME_COUNTER" + ], + [ + 46, + 41, + 0, + 42, + 0, + "*" + ], + [ + 47, + 42, + 0, + 24, + 0, + "FRAME_COUNTER" + ], + [ + 48, + 42, + 0, + 43, + 0, + "*" + ], + [ + 58, + 46, + 0, + 47, + 0, + "*" + ], + [ + 60, + 8, + 0, + 48, + 1, + "IMAGE" + ], + [ + 61, + 49, + 0, + 48, + 2, + "STRING" + ], + [ + 62, + 50, + 0, + 49, + 0, + "*" + ], + [ + 63, + 51, + 0, + 50, + 0, + "*" + ], + [ + 64, + 52, + 0, + 51, + 0, + "*" + ], + [ + 65, + 36, + 0, + 52, + 0, + "*" + ], + [ + 67, + 47, + 0, + 53, + 0, + "*" + ], + [ + 68, + 53, + 0, + 35, + 0, + "FRAME_COUNTER" + ], + [ + 70, + 54, + 1, + 3, + 5, + "INT" + ], + [ + 75, + 14, + 0, + 30, + 0, + "PARTIAL_PROMPT" + ], + [ + 76, + 57, + 0, + 55, + 0, + "INT" + ], + [ + 77, + 55, + 0, + 10, + 2, + "STRING" + ], + [ + 78, + 59, + 0, + 58, + 0, + "INT" + ], + [ + 79, + 58, + 0, + 11, + 2, + "STRING" + ], + [ + 80, + 61, + 0, + 60, + 0, + "INT" + ], + [ + 81, + 60, + 0, + 12, + 2, + "STRING" + ], + [ + 82, + 63, + 0, + 62, + 0, + "INT" + ], + [ + 83, + 62, + 0, + 13, + 2, + "STRING" + ], + [ + 84, + 64, + 0, + 14, + 2, + "STRING" + ], + [ + 85, + 65, + 0, + 64, + 0, + "INT" + ], + [ + 86, + 4, + 0, + 66, + 0, + "*" + ], + [ + 87, + 4, + 1, + 67, + 0, + "*" + ], + [ + 88, + 4, + 2, + 68, + 0, + "*" + ], + [ + 89, + 67, + 0, + 69, + 0, + "*" + ], + [ + 90, + 69, + 0, + 7, + 0, + "CLIP" + ], + [ + 91, + 69, + 0, + 6, + 0, + "CLIP" + ], + [ + 93, + 54, + 2, + 70, + 1, + "LOG_ENTRY" + ], + [ + 94, + 30, + 0, + 71, + 0, + "STRING" + ], + [ + 95, + 30, + 1, + 72, + 0, + "STRING" + ], + [ + 97, + 71, + 0, + 73, + 0, + "LOG_ENTRY" + ], + [ + 98, + 72, + 0, + 73, + 1, + "LOG_ENTRY" + ], + [ + 99, + 47, + 0, + 54, + 0, + "FRAME_COUNTER" + ], + [ + 100, + 53, + 0, + 74, + 0, + "*" + ], + [ + 101, + 74, + 0, + 48, + 0, + "FRAME_COUNTER" + ], + [ + 102, + 73, + 0, + 70, + 0, + "LOG_ENTRY" + ], + [ + 103, + 35, + 2, + 70, + 2, + "LOG_ENTRY" + ], + [ + 104, + 70, + 0, + 75, + 0, + "LOG_ENTRY" + ], + [ + 105, + 48, + 1, + 75, + 1, + "LOG_ENTRY" + ], + [ + 106, + 75, + 0, + 76, + 1, + "LOG_ENTRY" + ], + [ + 107, + 36, + 0, + 77, + 0, + "*" + ], + [ + 108, + 77, + 0, + 78, + 0, + "*" + ], + [ + 109, + 78, + 0, + 76, + 9, + "STRING" + ], + [ + 110, + 74, + 0, + 79, + 0, + "*" + ], + [ + 111, + 79, + 0, + 76, + 0, + "FRAME_COUNTER" + ], + [ + 112, + 66, + 0, + 3, + 0, + "MODEL" + ], + [ + 113, + 68, + 0, + 8, + 1, + "VAE" + ], + [ + 114, + 43, + 0, + 46, + 0, + "*" + ], + [ + 115, + 35, + 0, + 3, + 4, + "FLOAT" + ] + ], + "groups": [ + { + "title": "Random prompt", + "bounding": [ + -558, + -1027, + 2204, + 1424 + ], + "color": "#3f789e" + }, + { + "title": "KSampler", + "bounding": [ + 2576, + -695, + 1655, + 990 + ], + "color": "#3f789e" + }, + { + "title": "Output", + "bounding": [ + 4353, + -875, + 1182, + 1424 + ], + "color": "#3f789e" + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/examples/motion-workflow-example.json b/custom_nodes/comfyui-dream-project/examples/motion-workflow-example.json new file mode 100644 index 0000000000000000000000000000000000000000..1afa6c8a63e05f1dabecd0342129bc35aa4d90b4 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/examples/motion-workflow-example.json @@ -0,0 +1,4983 @@ +{ + "last_node_id": 423, + "last_link_id": 5467, + "nodes": [ + { + "id": 77, + "type": "PrimitiveNode", + "pos": [ + -2115.912856640875, + 447.2204328247986 + ], + "size": { + "0": 450, + "1": 190 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 2312 + ], + "widget": { + "name": "text" + } + } + ], + "title": "Negative", + "properties": {}, + "widgets_values": [ + "text, watermark, logo, letters, writing, frame, border, hands, frame, paper" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 78, + "type": "PrimitiveNode", + "pos": [ + -2119.0805839357795, + 220.45355818433413 + ], + "size": { + "0": 460, + "1": 190 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 2309 + ], + "slot_index": 0, + "widget": { + "name": "text" + } + } + ], + "title": "Positive", + "properties": {}, + "widgets_values": [ + "serene forest landscape, watercolor, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 227, + "type": "Common Frame Dimensions [Dream]", + "pos": [ + -2107.1983297494985, + 1324.7646814141895 + ], + "size": { + "0": 360, + "1": 240 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 5179 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 5073 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "final_width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "final_height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Common Frame Dimensions [Dream]" + }, + "widgets_values": [ + "512", + "1:1", + "wide", + "1", + 64, + "ceil" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 326, + "type": "Note", + "pos": [ + 41.927985591864186, + 320.26230796549316 + ], + "size": { + "0": 370, + "1": 90 + }, + "flags": {}, + "order": 3, + "mode": 0, + "title": "Note on curves", + "properties": { + "text": "" + }, + "widgets_values": [ + "Curves are deterministic. They transform the frame counter (including information such as framerate, frame index and total frame counter) into a single float value (and a rounded integer)." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 329, + "type": "Note", + "pos": [ + 2160.935341001233, + 880.498221189961 + ], + "size": { + "0": 370, + "1": 90 + }, + "flags": {}, + "order": 4, + "mode": 0, + "title": "Note on outpainting", + "properties": { + "text": "" + }, + "widgets_values": [ + "This is outpainting done using controlnet. This are other ways to do outpainting." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 330, + "type": "Note", + "pos": [ + 960.9353410012387, + 560.4982211899617 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "title": "Note on CN model", + "properties": { + "text": "" + }, + "widgets_values": [ + "You need an inpainting controlnet model here." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 332, + "type": "Note", + "pos": [ + 3743.4799277760635, + 440.536019679167 + ], + "size": { + "0": 590, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "title": "Note on output", + "properties": { + "text": "" + }, + "widgets_values": [ + "The sequence processing is triggered only after the last frame has been saved. We blend the frames slightly, introduce \"tweening\" frames and encode a mp4 video file. We choose to remove the images, so we could in theory continue generating to produce multiple different videos. The ffmpeg node will not overwrite the video file." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 228, + "type": "Reroute", + "pos": [ + -766.5873422711185, + 863.8157107780454 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5179 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5074 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 229, + "type": "Reroute", + "pos": [ + -766.5873422711185, + 893.8157107780456 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5073 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5075 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 201, + "type": "Reroute", + "pos": [ + -1744.0427424682491, + 860.8396239607022 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 2977, + "pos": [ + 37.5, + 0 + ] + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 242, + "type": "Reroute", + "pos": [ + 1400.935341001236, + 560.4982211899617 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5376 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5110 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 241, + "type": "Reroute", + "pos": [ + 1400.935341001236, + 520.4982211899614 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5375 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5111 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 87, + "type": "Reroute", + "pos": [ + -784.3996392822262, + 672.4008465576175 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5369 + } + ], + "outputs": [ + { + "name": "", + "type": "FRAME_COUNTER", + "links": [ + 5080, + 5198 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 231, + "type": "Frame Counter Offset [Dream]", + "pos": [ + -674.3996392822264, + 742.4008465576175 + ], + "size": { + "0": 342.5999755859375, + "1": 58 + }, + "flags": {}, + "order": 64, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5080 + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 5081, + 5082 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter Offset [Dream]" + }, + "widgets_values": [ + -1 + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 282, + "type": "Reroute", + "pos": [ + 87.92452081298865, + 684.8108026123044 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 65, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5198 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5199 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 285, + "type": "Reroute", + "pos": [ + 3081.5470204862327, + 362.2550892786084 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 63, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5204 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5205 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 58, + "type": "Reroute", + "pos": [ + 1640.9353410012352, + 840.4982211899608 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 81, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 333 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 5241 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 124, + "type": "ControlNetApplyAdvanced", + "pos": [ + 1680.9353410012357, + 560.4982211899617 + ], + "size": { + "0": 315, + "1": 166 + }, + "flags": {}, + "order": 82, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 5111 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 5110 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 272 + }, + { + "name": "image", + "type": "IMAGE", + "link": 5296 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 5301 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 5302 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ControlNetApplyAdvanced" + }, + "widgets_values": [ + 1, + 0, + 1 + ] + }, + { + "id": 302, + "type": "VAEEncode", + "pos": [ + 1910.935341001228, + 780.4982211899609 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 84, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 5241 + }, + { + "name": "vae", + "type": "VAE", + "link": 5242 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5303 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 267, + "type": "KSampler", + "pos": [ + 2270.935341001233, + 570.4982211899617 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 85, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 5436 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 5301 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 5302 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 5303 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5159, + 5308 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 582926500345395, + "randomize", + 25, + 9, + "euler_ancestral", + "normal", + 0.804544677734375 + ] + }, + { + "id": 268, + "type": "Reroute", + "pos": [ + 2620.935341001234, + 570.4982211899617 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 86, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5159 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5293 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 265, + "type": "PreviewImage", + "pos": [ + 3130.9159068821527, + 500.0091821650353 + ], + "size": { + "0": 250, + "1": 370 + }, + "flags": {}, + "order": 92, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5152 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 364, + "type": "Int Input [Dream]", + "pos": [ + -2097.1983297494985, + 1704.7646814141895 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5350 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Framerate", + "properties": { + "Node name for S&R": "Int Input [Dream]" + }, + "widgets_values": [ + 15 + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 365, + "type": "Int Input [Dream]", + "pos": [ + -2097.1983297494985, + 1604.7646814141895 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5351 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Number of frames", + "properties": { + "Node name for S&R": "Int Input [Dream]" + }, + "widgets_values": [ + 30 + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 362, + "type": "String Input [Dream]", + "pos": [ + -2097.1983297494985, + 1803.7646814141895 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5346 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Output Directory", + "properties": { + "Node name for S&R": "String Input [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 333, + "type": "Note", + "pos": [ + -2098.1983297494985, + 1907.7646814141895 + ], + "size": { + "0": 408.5453796386719, + "1": 74.01783752441406 + }, + "flags": {}, + "order": 10, + "mode": 0, + "title": "Note on settings", + "properties": { + "text": "" + }, + "widgets_values": [ + "Both framerate and total number of frames are important since they affect curves and sequence processing. Typically, queue an equal number of prompt executions as the total number of frames." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 171, + "type": "VAEDecode", + "pos": [ + 3161.547020486233, + 412.2550892786084 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 90, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 5294 + }, + { + "name": "vae", + "type": "VAE", + "link": 5205 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5152, + 5354 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 367, + "type": "Reroute", + "pos": [ + 3403.212597957077, + 394.68859707729007 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 93, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5354 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5357 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 334, + "type": "Note", + "pos": [ + -1376.5813007363186, + 888.406571589691 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 11, + "mode": 0, + "title": "Note on settings", + "properties": { + "text": "" + }, + "widgets_values": [ + "This node creates progression based on the files in the output directory." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 230, + "type": "Frame Counter (Directory) [Dream]", + "pos": [ + -1374.2153335854396, + 697.9571941696488 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "total_frames", + "type": "INT", + "link": 5351, + "widget": { + "name": "total_frames" + }, + "slot_index": 0 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 5347, + "widget": { + "name": "directory_path" + } + }, + { + "name": "frames_per_second", + "type": "INT", + "link": 5350, + "widget": { + "name": "frames_per_second" + } + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 5176 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter (Directory) [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "*", + "numeric", + 500, + 15 + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 363, + "type": "Reroute", + "pos": [ + -1680, + 1838 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5346 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5347, + 5348, + 5358 + ], + "slot_index": 0 + } + ], + "title": "Test", + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 236, + "type": "LoadImage", + "pos": [ + -2106, + 679 + ], + "size": { + "0": 430, + "1": 340 + }, + "flags": {}, + "order": 12, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5313 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "after (3).jpg", + "image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 373, + "type": "Reroute", + "pos": [ + -1540, + 197 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5365 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5366, + 5367 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 292, + "type": "Linear Curve [Dream]", + "pos": [ + -300.1999890136713, + 310.9999736785893 + ], + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5220 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5222 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Linear Curve [Dream]" + }, + "widgets_values": [ + -0.5, + 0.5 + ] + }, + { + "id": 291, + "type": "Sine Curve [Dream]", + "pos": [ + -300.1999890136713, + 465.99997367858884 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5221 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5223 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Sine Curve [Dream]" + }, + "widgets_values": [ + 0.3, + -0.3, + 2, + 0 + ] + }, + { + "id": 294, + "type": "Beat Curve [Dream]", + "pos": [ + -632.199989013671, + 294.9999736785893 + ], + "size": { + "0": 315, + "1": 318 + }, + "flags": {}, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5224 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5225 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Beat Curve [Dream]" + }, + "widgets_values": [ + 100, + 0, + 4, + -0.3, + 1, + "no", + 2, + 1, + 3, + 0, + 0 + ] + }, + { + "id": 293, + "type": "Reroute", + "pos": [ + -806.1999890136706, + 358.9999736785893 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5368 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5220, + 5221, + 5224, + 5369 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -1256, + 9 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": { + "collapsed": true + }, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5367 + }, + { + "name": "text", + "type": "STRING", + "link": 2312, + "widget": { + "name": "text" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5372 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, logo, letters, writing, frame, border, hands, frame, paper" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -1250, + -43 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": { + "collapsed": true + }, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5366 + }, + { + "name": "text", + "type": "STRING", + "link": 2309, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5371 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "serene forest landscape, watercolor, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 375, + "type": "Reroute", + "pos": [ + 300, + -70 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5371 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5373 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 368, + "type": "Reroute", + "pos": [ + -1236, + 24 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5358 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5359 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 369, + "type": "Reroute", + "pos": [ + 334, + 26 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5359 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5360 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 274, + "type": "Reroute", + "pos": [ + -979, + 68 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5457 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5368, + 5370 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159", + "shape": 2 + }, + { + "id": 376, + "type": "Reroute", + "pos": [ + 301, + -21 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5372 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5374 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 386, + "type": "Reroute", + "pos": [ + -1491, + -190 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5392 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5389 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 382, + "type": "Reroute", + "pos": [ + -1462, + -149 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5381 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5382 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 186, + "type": "KSampler", + "pos": [ + 2772.704072973985, + 438.14907683566435 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 88, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 5399 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 5387 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 5386 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 5293 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5294 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 959601926573140, + "randomize", + 5, + 5, + "dpmpp_3m_sde", + "exponential", + 0.6 + ] + }, + { + "id": 284, + "type": "Reroute", + "pos": [ + 2745.704072973985, + 371.1490768356643 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5395 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5204 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 331, + "type": "Note", + "pos": [ + 2810.704072973985, + 953.1490768356643 + ], + "size": { + "0": 570, + "1": 60 + }, + "flags": {}, + "order": 13, + "mode": 0, + "title": "Note on full frame sampler", + "properties": { + "text": "" + }, + "widgets_values": [ + "This step is really mostly important if you zoom out as it reintroduces details." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 211, + "type": "ImageScale", + "pos": [ + 14, + 800 + ], + "size": { + "0": 320, + "1": 130 + }, + "flags": {}, + "order": 69, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5085 + }, + { + "name": "width", + "type": "INT", + "link": 5074, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 5075, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5088, + 5415 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "nearest-exact", + 512, + 512, + "disabled" + ] + }, + { + "id": 119, + "type": "ControlNetLoader", + "pos": [ + 982.0796921393085, + 673.1125436264036 + ], + "size": { + "0": 400, + "1": 90 + }, + "flags": { + "collapsed": false + }, + "order": 14, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 272 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "SD1.5\\control_v11p_sd15_inpaint.pth" + ] + }, + { + "id": 325, + "type": "InpaintPreprocessor", + "pos": [ + 1164.0796921393064, + 841.1125436264033 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 78, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5295 + }, + { + "name": "mask", + "type": "MASK", + "link": 5430 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5296 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "InpaintPreprocessor" + } + }, + { + "id": 240, + "type": "Reroute", + "pos": [ + 1666.6531151244042, + 940.0343845569474 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5426 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5242 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 150, + "type": "Reroute", + "pos": [ + 1202.0796921393064, + 909.1125436264033 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 76, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5289 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 333 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 403, + "type": "Reroute", + "pos": [ + 924.0796921393085, + 936.1125436264036 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5433 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5426 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 383, + "type": "Reroute", + "pos": [ + 602, + -138 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5382 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5393, + 5433 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 378, + "type": "Reroute", + "pos": [ + 1275, + -11 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5374, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5376, + 5380 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 377, + "type": "Reroute", + "pos": [ + 1383, + -68 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5373, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5375, + 5379 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 380, + "type": "Reroute", + "pos": [ + 2416, + -69 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5379 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5387 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 381, + "type": "Reroute", + "pos": [ + 2381, + -17 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5380 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5386 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 234, + "type": "Image Motion [Dream]", + "pos": [ + 385, + 751 + ], + "size": { + "0": 320, + "1": 360 + }, + "flags": {}, + "order": 71, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5088 + }, + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5199 + }, + { + "name": "noise", + "type": "IMAGE", + "link": null + }, + { + "name": "x_translation", + "type": "FLOAT", + "link": 5222, + "widget": { + "name": "x_translation" + } + }, + { + "name": "y_translation", + "type": "FLOAT", + "link": 5223, + "widget": { + "name": "y_translation" + } + }, + { + "name": "zoom", + "type": "FLOAT", + "link": 5225, + "widget": { + "name": "zoom" + } + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 5439 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "mask1", + "type": "MASK", + "links": [ + 5438 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "mask2", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "mask3", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "Image Motion [Dream]" + }, + "widgets_values": [ + -1.1272729492187497, + 15, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + { + "id": 279, + "type": "Reroute", + "pos": [ + 3598, + 525 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 70, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5385 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5195 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 407, + "type": "Reroute", + "pos": [ + 3608, + 569 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 62, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5441 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5442 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 287, + "type": "Image Sequence Blend [Dream]", + "pos": [ + 4100, + 560 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 97, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 5440 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 5210 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Blend [Dream]" + }, + "widgets_values": [ + 0.1, + 0.1, + 1 + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 233, + "type": "Image Sequence Saver [Dream]", + "pos": [ + 3760, + 560 + ], + "size": { + "0": 320, + "1": 170 + }, + "flags": {}, + "order": 95, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5195 + }, + { + "name": "image", + "type": "IMAGE", + "link": 5193 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 5442, + "widget": { + "name": "directory_path" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 5440 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Saver [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "frame", + 5, + "stop output", + "jpg" + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 389, + "type": "Reroute", + "pos": [ + 2480, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5402 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5399 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 393, + "type": "Reroute", + "pos": [ + 2070, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5437 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5402 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 406, + "type": "Reroute", + "pos": [ + 1840, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5434 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5436, + 5437 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 387, + "type": "Reroute", + "pos": [ + 510, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5389 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5434 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 390, + "type": "Reroute", + "pos": [ + 1551, + -135 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5393 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5394 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 391, + "type": "Reroute", + "pos": [ + 2534, + -139 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5394 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5395 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 374, + "type": "Reroute", + "pos": [ + 538, + 77 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5370 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5378 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 370, + "type": "Reroute", + "pos": [ + 2405, + 32 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5360 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5397 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 379, + "type": "Reroute", + "pos": [ + 2345, + 78 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5378 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5383 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 392, + "type": "Reroute", + "pos": [ + 3374, + 37 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5397 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5441 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 384, + "type": "Reroute", + "pos": [ + 2837, + 79 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 66, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5383 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5384 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 385, + "type": "Reroute", + "pos": [ + 3293, + 80 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 68, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5384 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5385 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 232, + "type": "Image Sequence Loader [Dream]", + "pos": [ + -294.3996392822261, + 772.4008465576175 + ], + "size": { + "0": 320, + "1": 130 + }, + "flags": {}, + "order": 67, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5082 + }, + { + "name": "default_image", + "type": "IMAGE", + "link": 5444 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 5348, + "widget": { + "name": "directory_path" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 5085 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "frame_name", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Loader [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "*", + "numeric" + ] + }, + { + "id": 408, + "type": "Reroute", + "pos": [ + -777, + 820 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5456 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5444 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 405, + "type": "Reroute", + "pos": [ + 927.0796921393089, + 858.1125436264033 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 74, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5438 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 5430, + 5447 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 237, + "type": "Reroute", + "pos": [ + 926.0796921393085, + 791.1125436264036 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 73, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5439 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5289, + 5295, + 5449 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 327, + "type": "Reroute", + "pos": [ + 2550.935341001234, + 920.498221189961 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 87, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5308, + "pos": [ + 45.2, + 0 + ] + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5450 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -2102.608581739303, + 1125.9695878411585 + ], + "size": { + "0": 400, + "1": 100 + }, + "flags": { + "collapsed": false + }, + "order": 15, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5392 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 2977, + 5365 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 5381, + 5451 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "public\\main\\512-SD1.5\\Realistic_Vision_V5.0.safetensors" + ] + }, + { + "id": 415, + "type": "Reroute", + "pos": [ + -1416, + 2365 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5451 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5452 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 338, + "type": "Reroute", + "pos": [ + -1251, + 169 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5313 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5456 + ], + "slot_index": 0 + } + ], + "title": "Seed Image", + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 273, + "type": "Reroute", + "pos": [ + -1124.21533358544, + 636.9571941696493 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5176 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5457 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 328, + "type": "Note", + "pos": [ + -512, + 962 + ], + "size": { + "0": 370, + "1": 90 + }, + "flags": {}, + "order": 16, + "mode": 0, + "title": "Note on motion", + "properties": { + "text": "" + }, + "widgets_values": [ + "This group reads the previously rendered frame and performs a motion transformation on the image. The output of the image motion node is the transformed image and up to three masks to use for outpainting." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 411, + "type": "MaskToImage", + "pos": [ + 1287, + 2084 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": { + "collapsed": true + }, + "order": 79, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 5447 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5448 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 416, + "type": "Reroute", + "pos": [ + 1658, + 2360 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5452 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5453 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 414, + "type": "VAEDecode", + "pos": [ + 1771, + 2095 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 89, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 5450 + }, + { + "name": "vae", + "type": "VAE", + "link": 5453 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5454 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 278, + "type": "Reroute", + "pos": [ + 3614, + 610 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 94, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5357 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5193, + 5460 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 420, + "type": "Reroute", + "pos": [ + 3634.1977835692924, + 1253.5845951163976 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 96, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5460, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5461 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 418, + "type": "PreviewImage", + "pos": [ + 1970, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 98, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5461 + } + ], + "title": "Saved Image", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 417, + "type": "PreviewImage", + "pos": [ + 1730, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 91, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5454 + } + ], + "title": "After Inpainting", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 413, + "type": "PreviewImage", + "pos": [ + 1510, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 77, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5449 + } + ], + "title": "Inpainting input", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 412, + "type": "PreviewImage", + "pos": [ + 1270, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 83, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5448 + } + ], + "title": "Inpainting mask", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 409, + "type": "PreviewImage", + "pos": [ + 1046, + 2072 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 80, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5459 + } + ], + "title": "Previous Frame", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 398, + "type": "Reroute", + "pos": [ + 288, + 1073 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 72, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5415, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5464 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 419, + "type": "Reroute", + "pos": [ + 459, + 2070 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 75, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5464 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5459 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 281, + "type": "Image Sequence Tweening [Dream]", + "pos": [ + 4440, + 560 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 99, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 5210 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 5467 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Tweening [Dream]" + }, + "widgets_values": [ + 2 + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 423, + "type": "FFMPEG Video Encoder [Dream]", + "pos": [ + 4785, + 557 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 100, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 5467 + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FFMPEG Video Encoder [Dream]" + }, + "widgets_values": [ + "video", + 1, + true + ] + } + ], + "links": [ + [ + 272, + 119, + 0, + 124, + 2, + "CONTROL_NET" + ], + [ + 333, + 150, + 0, + 58, + 0, + "*" + ], + [ + 2309, + 78, + 0, + 6, + 1, + "STRING" + ], + [ + 2312, + 77, + 0, + 7, + 1, + "STRING" + ], + [ + 2977, + 4, + 1, + 201, + 0, + "*" + ], + [ + 5073, + 227, + 1, + 229, + 0, + "*" + ], + [ + 5074, + 228, + 0, + 211, + 1, + "INT" + ], + [ + 5075, + 229, + 0, + 211, + 2, + "INT" + ], + [ + 5080, + 87, + 0, + 231, + 0, + "FRAME_COUNTER" + ], + [ + 5081, + 231, + 0, + 82, + 0, + "FRAME_COUNTER" + ], + [ + 5082, + 231, + 0, + 232, + 0, + "FRAME_COUNTER" + ], + [ + 5085, + 232, + 0, + 211, + 0, + "IMAGE" + ], + [ + 5088, + 211, + 0, + 234, + 0, + "IMAGE" + ], + [ + 5110, + 242, + 0, + 124, + 1, + "CONDITIONING" + ], + [ + 5111, + 241, + 0, + 124, + 0, + "CONDITIONING" + ], + [ + 5152, + 171, + 0, + 265, + 0, + "IMAGE" + ], + [ + 5159, + 267, + 0, + 268, + 0, + "*" + ], + [ + 5176, + 230, + 0, + 273, + 0, + "*" + ], + [ + 5179, + 227, + 0, + 228, + 0, + "*" + ], + [ + 5193, + 278, + 0, + 233, + 1, + "IMAGE" + ], + [ + 5195, + 279, + 0, + 233, + 0, + "FRAME_COUNTER" + ], + [ + 5198, + 87, + 0, + 282, + 0, + "*" + ], + [ + 5199, + 282, + 0, + 234, + 1, + "FRAME_COUNTER" + ], + [ + 5204, + 284, + 0, + 285, + 0, + "*" + ], + [ + 5205, + 285, + 0, + 171, + 1, + "VAE" + ], + [ + 5210, + 287, + 0, + 281, + 0, + "ANIMATION_SEQUENCE" + ], + [ + 5220, + 293, + 0, + 292, + 0, + "FRAME_COUNTER" + ], + [ + 5221, + 293, + 0, + 291, + 0, + "FRAME_COUNTER" + ], + [ + 5222, + 292, + 0, + 234, + 3, + "FLOAT" + ], + [ + 5223, + 291, + 0, + 234, + 4, + "FLOAT" + ], + [ + 5224, + 293, + 0, + 294, + 0, + "FRAME_COUNTER" + ], + [ + 5225, + 294, + 0, + 234, + 5, + "FLOAT" + ], + [ + 5241, + 58, + 0, + 302, + 0, + "IMAGE" + ], + [ + 5242, + 240, + 0, + 302, + 1, + "VAE" + ], + [ + 5289, + 237, + 0, + 150, + 0, + "*" + ], + [ + 5293, + 268, + 0, + 186, + 3, + "LATENT" + ], + [ + 5294, + 186, + 0, + 171, + 0, + "LATENT" + ], + [ + 5295, + 237, + 0, + 325, + 0, + "IMAGE" + ], + [ + 5296, + 325, + 0, + 124, + 3, + "IMAGE" + ], + [ + 5301, + 124, + 0, + 267, + 1, + "CONDITIONING" + ], + [ + 5302, + 124, + 1, + 267, + 2, + "CONDITIONING" + ], + [ + 5303, + 302, + 0, + 267, + 3, + "LATENT" + ], + [ + 5308, + 267, + 0, + 327, + 0, + "*" + ], + [ + 5313, + 236, + 0, + 338, + 0, + "*" + ], + [ + 5346, + 362, + 0, + 363, + 0, + "*" + ], + [ + 5347, + 363, + 0, + 230, + 1, + "STRING" + ], + [ + 5348, + 363, + 0, + 232, + 2, + "STRING" + ], + [ + 5350, + 364, + 0, + 230, + 2, + "INT" + ], + [ + 5351, + 365, + 0, + 230, + 0, + "INT" + ], + [ + 5354, + 171, + 0, + 367, + 0, + "*" + ], + [ + 5357, + 367, + 0, + 278, + 0, + "*" + ], + [ + 5358, + 363, + 0, + 368, + 0, + "*" + ], + [ + 5359, + 368, + 0, + 369, + 0, + "*" + ], + [ + 5360, + 369, + 0, + 370, + 0, + "*" + ], + [ + 5365, + 4, + 1, + 373, + 0, + "*" + ], + [ + 5366, + 373, + 0, + 6, + 0, + "CLIP" + ], + [ + 5367, + 373, + 0, + 7, + 0, + "CLIP" + ], + [ + 5368, + 274, + 0, + 293, + 0, + "*" + ], + [ + 5369, + 293, + 0, + 87, + 0, + "*" + ], + [ + 5370, + 274, + 0, + 374, + 0, + "*" + ], + [ + 5371, + 6, + 0, + 375, + 0, + "*" + ], + [ + 5372, + 7, + 0, + 376, + 0, + "*" + ], + [ + 5373, + 375, + 0, + 377, + 0, + "*" + ], + [ + 5374, + 376, + 0, + 378, + 0, + "*" + ], + [ + 5375, + 377, + 0, + 241, + 0, + "*" + ], + [ + 5376, + 378, + 0, + 242, + 0, + "*" + ], + [ + 5378, + 374, + 0, + 379, + 0, + "*" + ], + [ + 5379, + 377, + 0, + 380, + 0, + "*" + ], + [ + 5380, + 378, + 0, + 381, + 0, + "*" + ], + [ + 5381, + 4, + 2, + 382, + 0, + "*" + ], + [ + 5382, + 382, + 0, + 383, + 0, + "*" + ], + [ + 5383, + 379, + 0, + 384, + 0, + "*" + ], + [ + 5384, + 384, + 0, + 385, + 0, + "*" + ], + [ + 5385, + 385, + 0, + 279, + 0, + "*" + ], + [ + 5386, + 381, + 0, + 186, + 2, + "CONDITIONING" + ], + [ + 5387, + 380, + 0, + 186, + 1, + "CONDITIONING" + ], + [ + 5389, + 386, + 0, + 387, + 0, + "*" + ], + [ + 5392, + 4, + 0, + 386, + 0, + "*" + ], + [ + 5393, + 383, + 0, + 390, + 0, + "*" + ], + [ + 5394, + 390, + 0, + 391, + 0, + "*" + ], + [ + 5395, + 391, + 0, + 284, + 0, + "*" + ], + [ + 5397, + 370, + 0, + 392, + 0, + "*" + ], + [ + 5399, + 389, + 0, + 186, + 0, + "MODEL" + ], + [ + 5402, + 393, + 0, + 389, + 0, + "*" + ], + [ + 5415, + 211, + 0, + 398, + 0, + "*" + ], + [ + 5426, + 403, + 0, + 240, + 0, + "*" + ], + [ + 5430, + 405, + 0, + 325, + 1, + "MASK" + ], + [ + 5433, + 383, + 0, + 403, + 0, + "*" + ], + [ + 5434, + 387, + 0, + 406, + 0, + "*" + ], + [ + 5436, + 406, + 0, + 267, + 0, + "MODEL" + ], + [ + 5437, + 406, + 0, + 393, + 0, + "*" + ], + [ + 5438, + 234, + 1, + 405, + 0, + "*" + ], + [ + 5439, + 234, + 0, + 237, + 0, + "*" + ], + [ + 5440, + 233, + 0, + 287, + 0, + "ANIMATION_SEQUENCE" + ], + [ + 5441, + 392, + 0, + 407, + 0, + "*" + ], + [ + 5442, + 407, + 0, + 233, + 2, + "STRING" + ], + [ + 5444, + 408, + 0, + 232, + 1, + "IMAGE" + ], + [ + 5447, + 405, + 0, + 411, + 0, + "MASK" + ], + [ + 5448, + 411, + 0, + 412, + 0, + "IMAGE" + ], + [ + 5449, + 237, + 0, + 413, + 0, + "IMAGE" + ], + [ + 5450, + 327, + 0, + 414, + 0, + "LATENT" + ], + [ + 5451, + 4, + 2, + 415, + 0, + "*" + ], + [ + 5452, + 415, + 0, + 416, + 0, + "*" + ], + [ + 5453, + 416, + 0, + 414, + 1, + "VAE" + ], + [ + 5454, + 414, + 0, + 417, + 0, + "IMAGE" + ], + [ + 5456, + 338, + 0, + 408, + 0, + "*" + ], + [ + 5457, + 273, + 0, + 274, + 0, + "*" + ], + [ + 5459, + 419, + 0, + 409, + 0, + "IMAGE" + ], + [ + 5460, + 278, + 0, + 420, + 0, + "*" + ], + [ + 5461, + 420, + 0, + 418, + 0, + "IMAGE" + ], + [ + 5464, + 398, + 0, + 419, + 0, + "*" + ], + [ + 5467, + 281, + 0, + 423, + 0, + "ANIMATION_SEQUENCE" + ] + ], + "groups": [ + { + "title": "Prompting", + "bounding": [ + -2129, + 148, + 474, + 885 + ], + "color": "#a1309b", + "font_size": 24 + }, + { + "title": "Settings", + "bounding": [ + -2125, + 1259, + 482, + 742 + ], + "color": "#b58b2a", + "font_size": 24 + }, + { + "title": "Inpainting/Outpainting", + "bounding": [ + 950, + 483, + 1711, + 528 + ], + "color": "#929054", + "font_size": 24 + }, + { + "title": "Prev Frame Move", + "bounding": [ + -736, + 630, + 1450, + 456 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Full frame sampler", + "bounding": [ + 2764, + 318, + 691, + 700 + ], + "color": "#88A", + "font_size": 24 + }, + { + "title": "Output", + "bounding": [ + 3653, + 373, + 1458, + 381 + ], + "color": "#b06634", + "font_size": 24 + }, + { + "title": "Animation Driver", + "bounding": [ + -1385, + 592, + 336, + 373 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Motion Control", + "bounding": [ + -738, + 235, + 1451, + 392 + ], + "color": "#ef75ff", + "font_size": 24 + }, + { + "title": "Model selection", + "bounding": [ + -2128, + 1042, + 476, + 204 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Previews", + "bounding": [ + -2124, + 2011, + 6473, + 329 + ], + "color": "#444", + "font_size": 24 + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/examples/motion-workflow-with-color-coherence.json b/custom_nodes/comfyui-dream-project/examples/motion-workflow-with-color-coherence.json new file mode 100644 index 0000000000000000000000000000000000000000..a6492346b17eb1cdb24b315569db1031fa96d96f --- /dev/null +++ b/custom_nodes/comfyui-dream-project/examples/motion-workflow-with-color-coherence.json @@ -0,0 +1,6230 @@ +{ + "last_node_id": 422, + "last_link_id": 5463, + "nodes": [ + { + "id": 77, + "type": "PrimitiveNode", + "pos": [ + -2115.912856640875, + 447.2204328247986 + ], + "size": { + "0": 450, + "1": 190 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 2312 + ], + "widget": { + "name": "text" + } + } + ], + "title": "Negative", + "properties": {}, + "widgets_values": [ + "text, watermark, logo, letters, writing, frame, border, hands, frame, paper" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 78, + "type": "PrimitiveNode", + "pos": [ + -2119.0805839357795, + 220.45355818433413 + ], + "size": { + "0": 460, + "1": 190 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 2309 + ], + "slot_index": 0, + "widget": { + "name": "text" + } + } + ], + "title": "Positive", + "properties": {}, + "widgets_values": [ + "serene forest landscape, watercolor, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 227, + "type": "Common Frame Dimensions [Dream]", + "pos": [ + -2107.1983297494985, + 1324.7646814141895 + ], + "size": { + "0": 360, + "1": 240 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 5179 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 5073 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "final_width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "final_height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Common Frame Dimensions [Dream]" + }, + "widgets_values": [ + "512", + "1:1", + "wide", + "1", + 64, + "ceil" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 326, + "type": "Note", + "pos": [ + 41.927985591864186, + 320.26230796549316 + ], + "size": { + "0": 370, + "1": 90 + }, + "flags": {}, + "order": 3, + "mode": 0, + "title": "Note on curves", + "properties": { + "text": "" + }, + "widgets_values": [ + "Curves are deterministic. They transform the frame counter (including information such as framerate, frame index and total frame counter) into a single float value (and a rounded integer)." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 329, + "type": "Note", + "pos": [ + 2160.935341001233, + 880.498221189961 + ], + "size": { + "0": 370, + "1": 90 + }, + "flags": {}, + "order": 4, + "mode": 0, + "title": "Note on outpainting", + "properties": { + "text": "" + }, + "widgets_values": [ + "This is outpainting done using controlnet. This are other ways to do outpainting." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 330, + "type": "Note", + "pos": [ + 960.9353410012387, + 560.4982211899617 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "title": "Note on CN model", + "properties": { + "text": "" + }, + "widgets_values": [ + "You need an inpainting controlnet model here." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 332, + "type": "Note", + "pos": [ + 3743.4799277760635, + 440.536019679167 + ], + "size": { + "0": 590, + "1": 80 + }, + "flags": {}, + "order": 6, + "mode": 0, + "title": "Note on output", + "properties": { + "text": "" + }, + "widgets_values": [ + "The sequence processing is triggered only after the last frame has been saved. We blend the frames slightly, introduce \"tweening\" frames and encode a mp4 video file. We choose to remove the images, so we could in theory continue generating to produce multiple different videos. The ffmpeg node will not overwrite the video file." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 228, + "type": "Reroute", + "pos": [ + -766.5873422711185, + 863.8157107780454 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5179 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5074 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 229, + "type": "Reroute", + "pos": [ + -766.5873422711185, + 893.8157107780456 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5073 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5075 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 201, + "type": "Reroute", + "pos": [ + -1744.0427424682491, + 860.8396239607022 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 2977, + "pos": [ + 37.5, + 0 + ] + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 345, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -139.11143154552485, + 1294.2580203476157 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5320 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5327 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 984424700736240, + "randomize", + "center-right" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 346, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -139.11143154552485, + 1654.2580203476143 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5321 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5331 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 393866830673316, + "randomize", + "bottom-center" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 242, + "type": "Reroute", + "pos": [ + 1400.935341001236, + 560.4982211899617 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 62, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5376 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5110 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 241, + "type": "Reroute", + "pos": [ + 1400.935341001236, + 520.4982211899614 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5375 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5111 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 87, + "type": "Reroute", + "pos": [ + -784.3996392822262, + 672.4008465576175 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 68, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5369 + } + ], + "outputs": [ + { + "name": "", + "type": "FRAME_COUNTER", + "links": [ + 5080, + 5198 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 231, + "type": "Frame Counter Offset [Dream]", + "pos": [ + -674.3996392822264, + 742.4008465576175 + ], + "size": { + "0": 342.5999755859375, + "1": 58 + }, + "flags": {}, + "order": 72, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5080 + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 5081, + 5082 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter Offset [Dream]" + }, + "widgets_values": [ + -1 + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 282, + "type": "Reroute", + "pos": [ + 87.92452081298865, + 684.8108026123044 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 73, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5198 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5199 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 285, + "type": "Reroute", + "pos": [ + 3081.5470204862327, + 362.2550892786084 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 71, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5204 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5205 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 337, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -139.11143154552485, + 1824.258020347618 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 83, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5312 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5332 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 912873459964733, + "randomize", + "bottom-center" + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 341, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -139.11143154552485, + 1464.258020347614 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 86, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5317 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5326 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 257321647530942, + "randomize", + "center-right" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 350, + "type": "Palette Color Align [Dream]", + "pos": [ + 250.88856845447202, + 1814.2580203476173 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 88, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 5332 + }, + { + "name": "target_align", + "type": "RGB_PALETTE", + "link": 5331 + }, + { + "name": "alignment_factor", + "type": "FLOAT", + "link": 5334, + "widget": { + "name": "alignment_factor" + } + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5340 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Palette Color Align [Dream]" + }, + "widgets_values": [ + 1.2000000000000002 + ], + "color": "#323", + "bgcolor": "#535" + }, + { + "id": 347, + "type": "Palette Color Align [Dream]", + "pos": [ + 260.88856845447134, + 1354.2580203476148 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 89, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 5322 + }, + { + "name": "target_align", + "type": "RGB_PALETTE", + "link": 5323 + }, + { + "name": "alignment_factor", + "type": "FLOAT", + "link": 5325, + "widget": { + "name": "alignment_factor" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5337 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Palette Color Align [Dream]" + }, + "widgets_values": [ + 1.2000000000000002 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 349, + "type": "Palette Color Align [Dream]", + "pos": [ + 250.88856845447202, + 1664.2580203476143 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 90, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 5330 + }, + { + "name": "target_align", + "type": "RGB_PALETTE", + "link": 5329 + }, + { + "name": "alignment_factor", + "type": "FLOAT", + "link": 5333, + "widget": { + "name": "alignment_factor" + } + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5339 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Palette Color Align [Dream]" + }, + "widgets_values": [ + 1.2000000000000002 + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 348, + "type": "Palette Color Align [Dream]", + "pos": [ + 260.88856845447134, + 1504.2580203476136 + ], + "size": { + "0": 320, + "1": 80 + }, + "flags": {}, + "order": 91, + "mode": 0, + "inputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "link": 5326 + }, + { + "name": "target_align", + "type": "RGB_PALETTE", + "link": 5327 + }, + { + "name": "alignment_factor", + "type": "FLOAT", + "link": 5328, + "widget": { + "name": "alignment_factor" + } + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5338 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Palette Color Align [Dream]" + }, + "widgets_values": [ + 1.2000000000000002 + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 58, + "type": "Reroute", + "pos": [ + 1640.9353410012352, + 840.4982211899608 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 103, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 333 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 5241 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 124, + "type": "ControlNetApplyAdvanced", + "pos": [ + 1680.9353410012357, + 560.4982211899617 + ], + "size": { + "0": 315, + "1": 166 + }, + "flags": {}, + "order": 104, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 5111 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 5110 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 272 + }, + { + "name": "image", + "type": "IMAGE", + "link": 5296 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 5301 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 5302 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ControlNetApplyAdvanced" + }, + "widgets_values": [ + 1, + 0, + 1 + ] + }, + { + "id": 302, + "type": "VAEEncode", + "pos": [ + 1910.935341001228, + 780.4982211899609 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 106, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 5241 + }, + { + "name": "vae", + "type": "VAE", + "link": 5242 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5303 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 267, + "type": "KSampler", + "pos": [ + 2270.935341001233, + 570.4982211899617 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 107, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 5436 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 5301 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 5302 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 5303 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5159, + 5308 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 838611060522510, + "randomize", + 25, + 9, + "euler_ancestral", + "normal", + 0.804544677734375 + ] + }, + { + "id": 268, + "type": "Reroute", + "pos": [ + 2620.935341001234, + 570.4982211899617 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 108, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5159 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5293 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 265, + "type": "PreviewImage", + "pos": [ + 3130.9159068821527, + 500.0091821650353 + ], + "size": { + "0": 250, + "1": 370 + }, + "flags": {}, + "order": 114, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5152 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 364, + "type": "Int Input [Dream]", + "pos": [ + -2097.1983297494985, + 1704.7646814141895 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 7, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5350 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Framerate", + "properties": { + "Node name for S&R": "Int Input [Dream]" + }, + "widgets_values": [ + 15 + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 365, + "type": "Int Input [Dream]", + "pos": [ + -2097.1983297494985, + 1604.7646814141895 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 8, + "mode": 0, + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 5351 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Number of frames", + "properties": { + "Node name for S&R": "Int Input [Dream]" + }, + "widgets_values": [ + 30 + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 362, + "type": "String Input [Dream]", + "pos": [ + -2097.1983297494985, + 1803.7646814141895 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5346 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Output Directory", + "properties": { + "Node name for S&R": "String Input [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 333, + "type": "Note", + "pos": [ + -2098.1983297494985, + 1907.7646814141895 + ], + "size": { + "0": 408.5453796386719, + "1": 74.01783752441406 + }, + "flags": {}, + "order": 10, + "mode": 0, + "title": "Note on settings", + "properties": { + "text": "" + }, + "widgets_values": [ + "Both framerate and total number of frames are important since they affect curves and sequence processing. Typically, queue an equal number of prompt executions as the total number of frames." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 171, + "type": "VAEDecode", + "pos": [ + 3161.547020486233, + 412.2550892786084 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 112, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 5294 + }, + { + "name": "vae", + "type": "VAE", + "link": 5205 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5152, + 5354 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 367, + "type": "Reroute", + "pos": [ + 3403.212597957077, + 394.68859707729007 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 115, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5354 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5357 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 334, + "type": "Note", + "pos": [ + -1376.5813007363186, + 888.406571589691 + ], + "size": { + "0": 300, + "1": 60 + }, + "flags": {}, + "order": 11, + "mode": 0, + "title": "Note on settings", + "properties": { + "text": "" + }, + "widgets_values": [ + "This node creates progression based on the files in the output directory." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 230, + "type": "Frame Counter (Directory) [Dream]", + "pos": [ + -1374.2153335854396, + 697.9571941696488 + ], + "size": { + "0": 320, + "1": 150 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "total_frames", + "type": "INT", + "link": 5351, + "widget": { + "name": "total_frames" + }, + "slot_index": 0 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 5347, + "widget": { + "name": "directory_path" + } + }, + { + "name": "frames_per_second", + "type": "INT", + "link": 5350, + "widget": { + "name": "frames_per_second" + } + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 5176 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter (Directory) [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "*", + "numeric", + 500, + 15 + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 363, + "type": "Reroute", + "pos": [ + -1680, + 1838 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5346 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5347, + 5348, + 5358 + ], + "slot_index": 0 + } + ], + "title": "Test", + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 373, + "type": "Reroute", + "pos": [ + -1540, + 197 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5365 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 5366, + 5367 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 292, + "type": "Linear Curve [Dream]", + "pos": [ + -300.1999890136713, + 310.9999736785893 + ], + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 65, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5220 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5222 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Linear Curve [Dream]" + }, + "widgets_values": [ + -0.5, + 0.5 + ] + }, + { + "id": 291, + "type": "Sine Curve [Dream]", + "pos": [ + -300.1999890136713, + 465.99997367858884 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 66, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5221 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5223 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Sine Curve [Dream]" + }, + "widgets_values": [ + 0.3, + -0.3, + 2, + 0 + ] + }, + { + "id": 294, + "type": "Beat Curve [Dream]", + "pos": [ + -632.199989013671, + 294.9999736785893 + ], + "size": { + "0": 315, + "1": 318 + }, + "flags": {}, + "order": 67, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5224 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5225 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Beat Curve [Dream]" + }, + "widgets_values": [ + 100, + 0, + 4, + -0.3, + 1, + "no", + 2, + 1, + 3, + 0, + 0 + ] + }, + { + "id": 352, + "type": "PrimitiveNode", + "pos": [ + -804.1114315455187, + 1318.2580203476155 + ], + "size": { + "0": 210, + "1": 82 + }, + "flags": {}, + "order": 12, + "mode": 0, + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 5325, + 5328, + 5333, + 5334 + ], + "slot_index": 0, + "widget": { + "name": "alignment_factor" + } + } + ], + "title": "Alignment Factor", + "properties": {}, + "widgets_values": [ + 1.2, + "fixed" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 293, + "type": "Reroute", + "pos": [ + -806.1999890136706, + 358.9999736785893 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5368 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5220, + 5221, + 5224, + 5369 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 344, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -581.1111678736465, + 1641.6580613937565 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5319 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5329 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 919440470904266, + "randomize", + "center-left" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 342, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -578.1111678736465, + 1820.6580613937606 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 85, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5316 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5330 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 528123820881935, + "randomize", + "center-left" + ], + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 7, + "type": "CLIPTextEncode", + "pos": [ + -1256, + 9 + ], + "size": { + "0": 425.27801513671875, + "1": 180.6060791015625 + }, + "flags": { + "collapsed": true + }, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5367 + }, + { + "name": "text", + "type": "STRING", + "link": 2312, + "widget": { + "name": "text" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5372 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "text, watermark, logo, letters, writing, frame, border, hands, frame, paper" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 6, + "type": "CLIPTextEncode", + "pos": [ + -1250, + -43 + ], + "size": { + "0": 422.84503173828125, + "1": 164.31304931640625 + }, + "flags": { + "collapsed": true + }, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 5366 + }, + { + "name": "text", + "type": "STRING", + "link": 2309, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5371 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "serene forest landscape, watercolor, detailed" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 375, + "type": "Reroute", + "pos": [ + 300, + -70 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5371 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5373 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 368, + "type": "Reroute", + "pos": [ + -1236, + 24 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5358 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5359 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 369, + "type": "Reroute", + "pos": [ + 334, + 26 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5359 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5360 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 274, + "type": "Reroute", + "pos": [ + -979, + 68 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5457 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5368, + 5370 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159", + "shape": 2 + }, + { + "id": 376, + "type": "Reroute", + "pos": [ + 301, + -21 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5372 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5374 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 386, + "type": "Reroute", + "pos": [ + -1491, + -190 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5392 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5389 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 382, + "type": "Reroute", + "pos": [ + -1462, + -149 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5381 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5382 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 186, + "type": "KSampler", + "pos": [ + 2772.704072973985, + 438.14907683566435 + ], + "size": { + "0": 320, + "1": 470 + }, + "flags": {}, + "order": 110, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 5399 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 5387 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 5386 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 5293 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5294 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 700759510514483, + "randomize", + 5, + 5, + "dpmpp_3m_sde", + "exponential", + 0.6 + ] + }, + { + "id": 284, + "type": "Reroute", + "pos": [ + 2745.704072973985, + 371.1490768356643 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 64, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5395 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5204 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 331, + "type": "Note", + "pos": [ + 2810.704072973985, + 953.1490768356643 + ], + "size": { + "0": 570, + "1": 60 + }, + "flags": {}, + "order": 13, + "mode": 0, + "title": "Note on full frame sampler", + "properties": { + "text": "" + }, + "widgets_values": [ + "This step is really mostly important if you zoom out as it reintroduces details." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 339, + "type": "Reroute", + "pos": [ + -753.0145320767159, + 1250.5620514226848 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5420 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5318, + 5319, + 5320, + 5321 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#8c8c8c", + "bgcolor": "#787878" + }, + { + "id": 336, + "type": "Reroute", + "pos": [ + -773.8466075929075, + 1518.6144670803217 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 81, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5421 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5312, + 5315, + 5316, + 5317 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 398, + "type": "Reroute", + "pos": [ + 288, + 1073 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 79, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5415, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5417 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 400, + "type": "Reroute", + "pos": [ + -878, + 1115 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5419, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5420 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 359, + "type": "Note", + "pos": [ + 624.9741736417784, + 1294.1377389138133 + ], + "size": { + "0": 250, + "1": 270 + }, + "flags": {}, + "order": 14, + "mode": 0, + "title": "Note on noise", + "properties": { + "text": "" + }, + "widgets_values": [ + "This section of the spaghetti samples the edges of the seed image and the last frame and produces noise that will slightly nudge the colors of the next frame towards the original seed image." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 402, + "type": "Reroute", + "pos": [ + 176, + 1049 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 95, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5423 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5424 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 211, + "type": "ImageScale", + "pos": [ + 14, + 800 + ], + "size": { + "0": 320, + "1": 130 + }, + "flags": {}, + "order": 77, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5085 + }, + { + "name": "width", + "type": "INT", + "link": 5074, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 5075, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5088, + 5415 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "nearest-exact", + 512, + 512, + "disabled" + ] + }, + { + "id": 343, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -556.0258263582216, + 1293.1377389138133 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5318 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5323 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 502577689536295, + "randomize", + "top-center" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 340, + "type": "Sample Image Area as Palette [Dream]", + "pos": [ + -560.0258263582216, + 1462.1377389138133 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": {}, + "order": 84, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5315 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 5322 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image Area as Palette [Dream]" + }, + "widgets_values": [ + 256, + 1125637252645999, + "randomize", + "top-center" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 119, + "type": "ControlNetLoader", + "pos": [ + 982.0796921393085, + 673.1125436264036 + ], + "size": { + "0": 400, + "1": 90 + }, + "flags": { + "collapsed": false + }, + "order": 15, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 272 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "SD1.5\\control_v11p_sd15_inpaint.pth" + ] + }, + { + "id": 325, + "type": "InpaintPreprocessor", + "pos": [ + 1164.0796921393064, + 841.1125436264033 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 101, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5295 + }, + { + "name": "mask", + "type": "MASK", + "link": 5430 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5296 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "InpaintPreprocessor" + } + }, + { + "id": 240, + "type": "Reroute", + "pos": [ + 1666.6531151244042, + 940.0343845569474 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5426 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5242 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 150, + "type": "Reroute", + "pos": [ + 1202.0796921393064, + 909.1125436264033 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 99, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5289 + } + ], + "outputs": [ + { + "name": "", + "type": "IMAGE", + "links": [ + 333 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": false, + "horizontal": false + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 403, + "type": "Reroute", + "pos": [ + 924.0796921393085, + 936.1125436264036 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5433 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5426 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 383, + "type": "Reroute", + "pos": [ + 602, + -138 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5382 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5393, + 5433 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 378, + "type": "Reroute", + "pos": [ + 1275, + -11 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5374, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5376, + 5380 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 377, + "type": "Reroute", + "pos": [ + 1383, + -68 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5373, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5375, + 5379 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 380, + "type": "Reroute", + "pos": [ + 2416, + -69 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5379 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5387 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 381, + "type": "Reroute", + "pos": [ + 2381, + -17 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 63, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5380 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 5386 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 234, + "type": "Image Motion [Dream]", + "pos": [ + 385, + 751 + ], + "size": { + "0": 320, + "1": 360 + }, + "flags": {}, + "order": 96, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 5088 + }, + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5199 + }, + { + "name": "noise", + "type": "IMAGE", + "link": 5424 + }, + { + "name": "x_translation", + "type": "FLOAT", + "link": 5222, + "widget": { + "name": "x_translation" + } + }, + { + "name": "y_translation", + "type": "FLOAT", + "link": 5223, + "widget": { + "name": "y_translation" + } + }, + { + "name": "zoom", + "type": "FLOAT", + "link": 5225, + "widget": { + "name": "zoom" + } + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 5439 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "mask1", + "type": "MASK", + "links": [ + 5438 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "mask2", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 2 + }, + { + "name": "mask3", + "type": "MASK", + "links": [], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "Image Motion [Dream]" + }, + "widgets_values": [ + -1.1272729492187497, + 15, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + { + "id": 279, + "type": "Reroute", + "pos": [ + 3598, + 525 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 78, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5385 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5195 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 407, + "type": "Reroute", + "pos": [ + 3608, + 569 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 70, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5441 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5442 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 287, + "type": "Image Sequence Blend [Dream]", + "pos": [ + 4100, + 560 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 119, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 5440 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 5210 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Blend [Dream]" + }, + "widgets_values": [ + 0.1, + 0.1, + 1 + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 233, + "type": "Image Sequence Saver [Dream]", + "pos": [ + 3760, + 560 + ], + "size": { + "0": 320, + "1": 170 + }, + "flags": {}, + "order": 117, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5195 + }, + { + "name": "image", + "type": "IMAGE", + "link": 5193 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 5442, + "widget": { + "name": "directory_path" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 5440 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Saver [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "frame", + 5, + "stop output", + "jpg" + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 389, + "type": "Reroute", + "pos": [ + 2480, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5402 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5399 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 393, + "type": "Reroute", + "pos": [ + 2070, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5437 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5402 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 406, + "type": "Reroute", + "pos": [ + 1840, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5434 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5436, + 5437 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 387, + "type": "Reroute", + "pos": [ + 510, + -180 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5389 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5434 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 390, + "type": "Reroute", + "pos": [ + 1551, + -135 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5393 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5394 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 391, + "type": "Reroute", + "pos": [ + 2534, + -139 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5394 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5395 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 374, + "type": "Reroute", + "pos": [ + 538, + 77 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5370 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5378 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 370, + "type": "Reroute", + "pos": [ + 2405, + 32 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5360 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5397 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 379, + "type": "Reroute", + "pos": [ + 2345, + 78 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 69, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5378 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5383 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 392, + "type": "Reroute", + "pos": [ + 3374, + 37 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5397 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 5441 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 384, + "type": "Reroute", + "pos": [ + 2837, + 79 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 74, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5383 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5384 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 385, + "type": "Reroute", + "pos": [ + 3293, + 80 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 76, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5384 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5385 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 232, + "type": "Image Sequence Loader [Dream]", + "pos": [ + -294.3996392822261, + 772.4008465576175 + ], + "size": { + "0": 320, + "1": 130 + }, + "flags": {}, + "order": 75, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 5082 + }, + { + "name": "default_image", + "type": "IMAGE", + "link": 5444 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 5348, + "widget": { + "name": "directory_path" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 5085 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "frame_name", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Loader [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "*", + "numeric" + ] + }, + { + "id": 408, + "type": "Reroute", + "pos": [ + -777, + 820 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5456 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5444 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 401, + "type": "Reroute", + "pos": [ + -44, + 1103 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 93, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5422 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5423 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 399, + "type": "Reroute", + "pos": [ + -933, + 1273 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 80, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5417, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5421, + 5458 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 354, + "type": "Noise from Area Palettes [Dream]", + "pos": [ + 634.9741736417784, + 1602.1377389138133 + ], + "size": { + "0": 342.5999755859375, + "1": 362 + }, + "flags": {}, + "order": 92, + "mode": 0, + "inputs": [ + { + "name": "top_left_palette", + "type": "RGB_PALETTE", + "link": null + }, + { + "name": "top_center_palette", + "type": "RGB_PALETTE", + "link": 5337 + }, + { + "name": "top_right_palette", + "type": "RGB_PALETTE", + "link": null + }, + { + "name": "center_left_palette", + "type": "RGB_PALETTE", + "link": 5339 + }, + { + "name": "center_palette", + "type": "RGB_PALETTE", + "link": null + }, + { + "name": "center_right_palette", + "type": "RGB_PALETTE", + "link": 5338 + }, + { + "name": "bottom_left_palette", + "type": "RGB_PALETTE", + "link": null + }, + { + "name": "bottom_center_palette", + "type": "RGB_PALETTE", + "link": 5340 + }, + { + "name": "bottom_right_palette", + "type": "RGB_PALETTE", + "link": null + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 5422, + 5446 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Noise from Area Palettes [Dream]" + }, + "widgets_values": [ + 0.18181762695312503, + 256, + 256, + 0.11818237304687501, + 0.5, + 855083604429355, + "randomize" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 405, + "type": "Reroute", + "pos": [ + 927.0796921393089, + 858.1125436264033 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 98, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5438 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 5430, + 5447 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 237, + "type": "Reroute", + "pos": [ + 926.0796921393085, + 791.1125436264036 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 97, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5439 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5289, + 5295, + 5449 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#223", + "bgcolor": "#335" + }, + { + "id": 327, + "type": "Reroute", + "pos": [ + 2550.935341001234, + 920.498221189961 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 109, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5308, + "pos": [ + 45.2, + 0 + ] + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 5450 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 4, + "type": "CheckpointLoaderSimple", + "pos": [ + -2102.608581739303, + 1125.9695878411585 + ], + "size": { + "0": 400, + "1": 100 + }, + "flags": { + "collapsed": false + }, + "order": 16, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 5392 + ], + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 2977, + 5365 + ], + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 5381, + 5451 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "public\\main\\512-SD1.5\\Realistic_Vision_V5.0.safetensors" + ] + }, + { + "id": 415, + "type": "Reroute", + "pos": [ + -1416, + 2365 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5451 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5452 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 338, + "type": "Reroute", + "pos": [ + -1251, + 169 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5313 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5419, + 5456 + ], + "slot_index": 0 + } + ], + "title": "Seed Image", + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 273, + "type": "Reroute", + "pos": [ + -1124.21533358544, + 636.9571941696493 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5176 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 5457 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#2a363b", + "bgcolor": "#3f5159" + }, + { + "id": 328, + "type": "Note", + "pos": [ + -512, + 962 + ], + "size": { + "0": 370, + "1": 90 + }, + "flags": {}, + "order": 17, + "mode": 0, + "title": "Note on motion", + "properties": { + "text": "" + }, + "widgets_values": [ + "This group reads the previously rendered frame and performs a motion transformation on the image. The output of the image motion node is the transformed image and up to three masks to use for outpainting." + ], + "color": "#568479", + "bgcolor": "#427065" + }, + { + "id": 419, + "type": "Reroute", + "pos": [ + -785.8463153465063, + 2070.274048358372 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 82, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5458 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5459 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 411, + "type": "MaskToImage", + "pos": [ + 1287, + 2084 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": { + "collapsed": true + }, + "order": 102, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 5447 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5448 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskToImage" + } + }, + { + "id": 416, + "type": "Reroute", + "pos": [ + 1658, + 2360 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5452 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 5453 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 414, + "type": "VAEDecode", + "pos": [ + 1771, + 2095 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": { + "collapsed": true + }, + "order": 111, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 5450 + }, + { + "name": "vae", + "type": "VAE", + "link": 5453 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5454 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 278, + "type": "Reroute", + "pos": [ + 3614, + 610 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 116, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5357 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5193, + 5460 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 420, + "type": "Reroute", + "pos": [ + 3634.1977835692924, + 1253.5845951163976 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 118, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 5460, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5461 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 418, + "type": "PreviewImage", + "pos": [ + 1970, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 120, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5461 + } + ], + "title": "Saved Image", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 417, + "type": "PreviewImage", + "pos": [ + 1730, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 113, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5454 + } + ], + "title": "After Inpainting", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 413, + "type": "PreviewImage", + "pos": [ + 1510, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 100, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5449 + } + ], + "title": "Inpainting input", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 412, + "type": "PreviewImage", + "pos": [ + 1270, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 105, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5448 + } + ], + "title": "Inpainting mask", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 410, + "type": "PreviewImage", + "pos": [ + 1040, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 94, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5446 + } + ], + "title": "Noise", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 409, + "type": "PreviewImage", + "pos": [ + 800, + 2080 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 87, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 5459 + } + ], + "title": "Previous Frame", + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 281, + "type": "Image Sequence Tweening [Dream]", + "pos": [ + 4440, + 560 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 121, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 5210 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 5463 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Tweening [Dream]" + }, + "widgets_values": [ + 2 + ], + "color": "#332922", + "bgcolor": "#593930" + }, + { + "id": 422, + "type": "FFMPEG Video Encoder [Dream]", + "pos": [ + 4781, + 562 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 122, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 5463 + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FFMPEG Video Encoder [Dream]" + }, + "widgets_values": [ + "video", + 1, + true + ] + }, + { + "id": 236, + "type": "LoadImage", + "pos": [ + -2106, + 679 + ], + "size": { + "0": 430, + "1": 340 + }, + "flags": {}, + "order": 18, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 5313 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "7159726-HSC00002-7 (1).jpg", + "image" + ], + "color": "#2a363b", + "bgcolor": "#3f5159" + } + ], + "links": [ + [ + 272, + 119, + 0, + 124, + 2, + "CONTROL_NET" + ], + [ + 333, + 150, + 0, + 58, + 0, + "*" + ], + [ + 2309, + 78, + 0, + 6, + 1, + "STRING" + ], + [ + 2312, + 77, + 0, + 7, + 1, + "STRING" + ], + [ + 2977, + 4, + 1, + 201, + 0, + "*" + ], + [ + 5073, + 227, + 1, + 229, + 0, + "*" + ], + [ + 5074, + 228, + 0, + 211, + 1, + "INT" + ], + [ + 5075, + 229, + 0, + 211, + 2, + "INT" + ], + [ + 5080, + 87, + 0, + 231, + 0, + "FRAME_COUNTER" + ], + [ + 5081, + 231, + 0, + 82, + 0, + "FRAME_COUNTER" + ], + [ + 5082, + 231, + 0, + 232, + 0, + "FRAME_COUNTER" + ], + [ + 5085, + 232, + 0, + 211, + 0, + "IMAGE" + ], + [ + 5088, + 211, + 0, + 234, + 0, + "IMAGE" + ], + [ + 5110, + 242, + 0, + 124, + 1, + "CONDITIONING" + ], + [ + 5111, + 241, + 0, + 124, + 0, + "CONDITIONING" + ], + [ + 5152, + 171, + 0, + 265, + 0, + "IMAGE" + ], + [ + 5159, + 267, + 0, + 268, + 0, + "*" + ], + [ + 5176, + 230, + 0, + 273, + 0, + "*" + ], + [ + 5179, + 227, + 0, + 228, + 0, + "*" + ], + [ + 5193, + 278, + 0, + 233, + 1, + "IMAGE" + ], + [ + 5195, + 279, + 0, + 233, + 0, + "FRAME_COUNTER" + ], + [ + 5198, + 87, + 0, + 282, + 0, + "*" + ], + [ + 5199, + 282, + 0, + 234, + 1, + "FRAME_COUNTER" + ], + [ + 5204, + 284, + 0, + 285, + 0, + "*" + ], + [ + 5205, + 285, + 0, + 171, + 1, + "VAE" + ], + [ + 5210, + 287, + 0, + 281, + 0, + "ANIMATION_SEQUENCE" + ], + [ + 5220, + 293, + 0, + 292, + 0, + "FRAME_COUNTER" + ], + [ + 5221, + 293, + 0, + 291, + 0, + "FRAME_COUNTER" + ], + [ + 5222, + 292, + 0, + 234, + 3, + "FLOAT" + ], + [ + 5223, + 291, + 0, + 234, + 4, + "FLOAT" + ], + [ + 5224, + 293, + 0, + 294, + 0, + "FRAME_COUNTER" + ], + [ + 5225, + 294, + 0, + 234, + 5, + "FLOAT" + ], + [ + 5241, + 58, + 0, + 302, + 0, + "IMAGE" + ], + [ + 5242, + 240, + 0, + 302, + 1, + "VAE" + ], + [ + 5289, + 237, + 0, + 150, + 0, + "*" + ], + [ + 5293, + 268, + 0, + 186, + 3, + "LATENT" + ], + [ + 5294, + 186, + 0, + 171, + 0, + "LATENT" + ], + [ + 5295, + 237, + 0, + 325, + 0, + "IMAGE" + ], + [ + 5296, + 325, + 0, + 124, + 3, + "IMAGE" + ], + [ + 5301, + 124, + 0, + 267, + 1, + "CONDITIONING" + ], + [ + 5302, + 124, + 1, + 267, + 2, + "CONDITIONING" + ], + [ + 5303, + 302, + 0, + 267, + 3, + "LATENT" + ], + [ + 5308, + 267, + 0, + 327, + 0, + "*" + ], + [ + 5312, + 336, + 0, + 337, + 0, + "IMAGE" + ], + [ + 5313, + 236, + 0, + 338, + 0, + "*" + ], + [ + 5315, + 336, + 0, + 340, + 0, + "IMAGE" + ], + [ + 5316, + 336, + 0, + 342, + 0, + "IMAGE" + ], + [ + 5317, + 336, + 0, + 341, + 0, + "IMAGE" + ], + [ + 5318, + 339, + 0, + 343, + 0, + "IMAGE" + ], + [ + 5319, + 339, + 0, + 344, + 0, + "IMAGE" + ], + [ + 5320, + 339, + 0, + 345, + 0, + "IMAGE" + ], + [ + 5321, + 339, + 0, + 346, + 0, + "IMAGE" + ], + [ + 5322, + 340, + 0, + 347, + 0, + "RGB_PALETTE" + ], + [ + 5323, + 343, + 0, + 347, + 1, + "RGB_PALETTE" + ], + [ + 5325, + 352, + 0, + 347, + 2, + "FLOAT" + ], + [ + 5326, + 341, + 0, + 348, + 0, + "RGB_PALETTE" + ], + [ + 5327, + 345, + 0, + 348, + 1, + "RGB_PALETTE" + ], + [ + 5328, + 352, + 0, + 348, + 2, + "FLOAT" + ], + [ + 5329, + 344, + 0, + 349, + 1, + "RGB_PALETTE" + ], + [ + 5330, + 342, + 0, + 349, + 0, + "RGB_PALETTE" + ], + [ + 5331, + 346, + 0, + 350, + 1, + "RGB_PALETTE" + ], + [ + 5332, + 337, + 0, + 350, + 0, + "RGB_PALETTE" + ], + [ + 5333, + 352, + 0, + 349, + 2, + "FLOAT" + ], + [ + 5334, + 352, + 0, + 350, + 2, + "FLOAT" + ], + [ + 5337, + 347, + 0, + 354, + 1, + "RGB_PALETTE" + ], + [ + 5338, + 348, + 0, + 354, + 5, + "RGB_PALETTE" + ], + [ + 5339, + 349, + 0, + 354, + 3, + "RGB_PALETTE" + ], + [ + 5340, + 350, + 0, + 354, + 7, + "RGB_PALETTE" + ], + [ + 5346, + 362, + 0, + 363, + 0, + "*" + ], + [ + 5347, + 363, + 0, + 230, + 1, + "STRING" + ], + [ + 5348, + 363, + 0, + 232, + 2, + "STRING" + ], + [ + 5350, + 364, + 0, + 230, + 2, + "INT" + ], + [ + 5351, + 365, + 0, + 230, + 0, + "INT" + ], + [ + 5354, + 171, + 0, + 367, + 0, + "*" + ], + [ + 5357, + 367, + 0, + 278, + 0, + "*" + ], + [ + 5358, + 363, + 0, + 368, + 0, + "*" + ], + [ + 5359, + 368, + 0, + 369, + 0, + "*" + ], + [ + 5360, + 369, + 0, + 370, + 0, + "*" + ], + [ + 5365, + 4, + 1, + 373, + 0, + "*" + ], + [ + 5366, + 373, + 0, + 6, + 0, + "CLIP" + ], + [ + 5367, + 373, + 0, + 7, + 0, + "CLIP" + ], + [ + 5368, + 274, + 0, + 293, + 0, + "*" + ], + [ + 5369, + 293, + 0, + 87, + 0, + "*" + ], + [ + 5370, + 274, + 0, + 374, + 0, + "*" + ], + [ + 5371, + 6, + 0, + 375, + 0, + "*" + ], + [ + 5372, + 7, + 0, + 376, + 0, + "*" + ], + [ + 5373, + 375, + 0, + 377, + 0, + "*" + ], + [ + 5374, + 376, + 0, + 378, + 0, + "*" + ], + [ + 5375, + 377, + 0, + 241, + 0, + "*" + ], + [ + 5376, + 378, + 0, + 242, + 0, + "*" + ], + [ + 5378, + 374, + 0, + 379, + 0, + "*" + ], + [ + 5379, + 377, + 0, + 380, + 0, + "*" + ], + [ + 5380, + 378, + 0, + 381, + 0, + "*" + ], + [ + 5381, + 4, + 2, + 382, + 0, + "*" + ], + [ + 5382, + 382, + 0, + 383, + 0, + "*" + ], + [ + 5383, + 379, + 0, + 384, + 0, + "*" + ], + [ + 5384, + 384, + 0, + 385, + 0, + "*" + ], + [ + 5385, + 385, + 0, + 279, + 0, + "*" + ], + [ + 5386, + 381, + 0, + 186, + 2, + "CONDITIONING" + ], + [ + 5387, + 380, + 0, + 186, + 1, + "CONDITIONING" + ], + [ + 5389, + 386, + 0, + 387, + 0, + "*" + ], + [ + 5392, + 4, + 0, + 386, + 0, + "*" + ], + [ + 5393, + 383, + 0, + 390, + 0, + "*" + ], + [ + 5394, + 390, + 0, + 391, + 0, + "*" + ], + [ + 5395, + 391, + 0, + 284, + 0, + "*" + ], + [ + 5397, + 370, + 0, + 392, + 0, + "*" + ], + [ + 5399, + 389, + 0, + 186, + 0, + "MODEL" + ], + [ + 5402, + 393, + 0, + 389, + 0, + "*" + ], + [ + 5415, + 211, + 0, + 398, + 0, + "*" + ], + [ + 5417, + 398, + 0, + 399, + 0, + "*" + ], + [ + 5419, + 338, + 0, + 400, + 0, + "*" + ], + [ + 5420, + 400, + 0, + 339, + 0, + "*" + ], + [ + 5421, + 399, + 0, + 336, + 0, + "*" + ], + [ + 5422, + 354, + 0, + 401, + 0, + "*" + ], + [ + 5423, + 401, + 0, + 402, + 0, + "*" + ], + [ + 5424, + 402, + 0, + 234, + 2, + "IMAGE" + ], + [ + 5426, + 403, + 0, + 240, + 0, + "*" + ], + [ + 5430, + 405, + 0, + 325, + 1, + "MASK" + ], + [ + 5433, + 383, + 0, + 403, + 0, + "*" + ], + [ + 5434, + 387, + 0, + 406, + 0, + "*" + ], + [ + 5436, + 406, + 0, + 267, + 0, + "MODEL" + ], + [ + 5437, + 406, + 0, + 393, + 0, + "*" + ], + [ + 5438, + 234, + 1, + 405, + 0, + "*" + ], + [ + 5439, + 234, + 0, + 237, + 0, + "*" + ], + [ + 5440, + 233, + 0, + 287, + 0, + "ANIMATION_SEQUENCE" + ], + [ + 5441, + 392, + 0, + 407, + 0, + "*" + ], + [ + 5442, + 407, + 0, + 233, + 2, + "STRING" + ], + [ + 5444, + 408, + 0, + 232, + 1, + "IMAGE" + ], + [ + 5446, + 354, + 0, + 410, + 0, + "IMAGE" + ], + [ + 5447, + 405, + 0, + 411, + 0, + "MASK" + ], + [ + 5448, + 411, + 0, + 412, + 0, + "IMAGE" + ], + [ + 5449, + 237, + 0, + 413, + 0, + "IMAGE" + ], + [ + 5450, + 327, + 0, + 414, + 0, + "LATENT" + ], + [ + 5451, + 4, + 2, + 415, + 0, + "*" + ], + [ + 5452, + 415, + 0, + 416, + 0, + "*" + ], + [ + 5453, + 416, + 0, + 414, + 1, + "VAE" + ], + [ + 5454, + 414, + 0, + 417, + 0, + "IMAGE" + ], + [ + 5456, + 338, + 0, + 408, + 0, + "*" + ], + [ + 5457, + 273, + 0, + 274, + 0, + "*" + ], + [ + 5458, + 399, + 0, + 419, + 0, + "*" + ], + [ + 5459, + 419, + 0, + 409, + 0, + "IMAGE" + ], + [ + 5460, + 278, + 0, + 420, + 0, + "*" + ], + [ + 5461, + 420, + 0, + 418, + 0, + "IMAGE" + ], + [ + 5463, + 281, + 0, + 422, + 0, + "ANIMATION_SEQUENCE" + ] + ], + "groups": [ + { + "title": "Prompting", + "bounding": [ + -2129, + 148, + 474, + 885 + ], + "color": "#a1309b", + "font_size": 24 + }, + { + "title": "Settings", + "bounding": [ + -2125, + 1259, + 482, + 742 + ], + "color": "#b58b2a", + "font_size": 24 + }, + { + "title": "Inpainting/Outpainting", + "bounding": [ + 950, + 483, + 1711, + 528 + ], + "color": "#929054", + "font_size": 24 + }, + { + "title": "Prev Frame Move", + "bounding": [ + -736, + 630, + 1450, + 456 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Full frame sampler", + "bounding": [ + 2764, + 318, + 691, + 700 + ], + "color": "#88A", + "font_size": 24 + }, + { + "title": "Output", + "bounding": [ + 3653, + 373, + 1458, + 381 + ], + "color": "#b06634", + "font_size": 24 + }, + { + "title": "Animation Driver", + "bounding": [ + -1385, + 592, + 336, + 373 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Motion Control", + "bounding": [ + -738, + 235, + 1451, + 392 + ], + "color": "#ef75ff", + "font_size": 24 + }, + { + "title": "Model selection", + "bounding": [ + -2128, + 1042, + 476, + 204 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Noise", + "bounding": [ + -730, + 1213, + 1840, + 765 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Previews", + "bounding": [ + -2124, + 2011, + 6473, + 329 + ], + "color": "#444", + "font_size": 24 + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/examples/prompt-morphing.json b/custom_nodes/comfyui-dream-project/examples/prompt-morphing.json new file mode 100644 index 0000000000000000000000000000000000000000..ee3eca8f1efabb09db6205c227314f2653fba287 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/examples/prompt-morphing.json @@ -0,0 +1,7024 @@ +{ + "last_node_id": 193, + "last_link_id": 341, + "nodes": [ + { + "id": 62, + "type": "Reroute", + "pos": [ + -310, + -670 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 91 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 96 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 57, + "type": "Build Prompt [Dream]", + "pos": [ + 1290, + -310 + ], + "size": { + "0": 279.916748046875, + "1": 100 + }, + "flags": {}, + "order": 86, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 85 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 289, + "widget": { + "name": "weight" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 87 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "sunny, spring", + 1 + ] + }, + { + "id": 26, + "type": "Build Prompt [Dream]", + "pos": [ + 970, + -310 + ], + "size": { + "0": 279.916748046875, + "1": 100 + }, + "flags": {}, + "order": 83, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 37 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 286, + "widget": { + "name": "weight" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 85 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "snow, winter", + 1 + ] + }, + { + "id": 25, + "type": "Build Prompt [Dream]", + "pos": [ + 650, + -310 + ], + "size": { + "0": 279.916748046875, + "1": 100 + }, + "flags": {}, + "order": 76, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 36 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 283, + "widget": { + "name": "weight" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "autumn, rain", + 1 + ] + }, + { + "id": 85, + "type": "Reroute", + "pos": [ + 2234, + 424 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 133 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 179 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 84, + "type": "Reroute", + "pos": [ + 2132, + 391 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 132 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 176 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 19, + "type": "Frame Counter Offset [Dream]", + "pos": [ + 2762.9266748065575, + -73.37708387563696 + ], + "size": { + "0": 342.5999755859375, + "1": 58 + }, + "flags": { + "collapsed": true + }, + "order": 81, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 269 + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter Offset [Dream]" + }, + "widgets_values": [ + -1 + ] + }, + { + "id": 96, + "type": "Reroute", + "pos": [ + -128, + 457 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 156 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 335 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 78, + "type": "String Input [Dream]", + "pos": [ + -836, + 275 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 119, + 156 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Output Directory", + "properties": { + "Node name for S&R": "String Input [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI" + ] + }, + { + "id": 124, + "type": "Reroute", + "pos": [ + 4551, + -297 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 70, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 214 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 215 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 103, + "type": "Sample Image as Palette [Dream]", + "pos": [ + 4600, + -219 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": { + "collapsed": false + }, + "order": 77, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 215 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 209 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image as Palette [Dream]" + }, + "widgets_values": [ + 1024, + 1023351604428941, + "randomize" + ] + }, + { + "id": 125, + "type": "Reroute", + "pos": [ + 4539, + 26 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 106, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 273 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 217, + 220 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 127, + "type": "Reroute", + "pos": [ + 4923, + -116 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 111, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 220 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 221 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 104, + "type": "Sample Image as Palette [Dream]", + "pos": [ + 4596, + 108 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": { + "collapsed": false + }, + "order": 110, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 217 + } + ], + "outputs": [ + { + "name": "palette", + "type": "RGB_PALETTE", + "links": [ + 208 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Sample Image as Palette [Dream]" + }, + "widgets_values": [ + 1024, + 557841854168100, + "randomize" + ] + }, + { + "id": 119, + "type": "Compare Palettes [Dream]", + "pos": [ + 4732, + -28 + ], + "size": { + "0": 292.20001220703125, + "1": 86 + }, + "flags": {}, + "order": 113, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "RGB_PALETTE", + "link": 208 + }, + { + "name": "b", + "type": "RGB_PALETTE", + "link": 209 + } + ], + "outputs": [ + { + "name": "brightness_multiplier", + "type": "FLOAT", + "links": [ + 211 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "contrast_multiplier", + "type": "FLOAT", + "links": [ + 210 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "red_multiplier", + "type": "FLOAT", + "links": null, + "shape": 3 + }, + { + "name": "green_multiplier", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Compare Palettes [Dream]" + } + }, + { + "id": 118, + "type": "Reroute", + "pos": [ + 5071, + 34 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 116, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 210 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 227 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 129, + "type": "Calculation [Dream]", + "pos": [ + 5183, + 272 + ], + "size": { + "0": 301.4544372558594, + "1": 232 + }, + "flags": { + "collapsed": true + }, + "order": 119, + "mode": 0, + "inputs": [ + { + "name": "r_float", + "type": "FLOAT", + "link": 224, + "widget": { + "name": "r_float" + } + }, + { + "name": "s_float", + "type": "FLOAT", + "link": 223, + "widget": { + "name": "s_float" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 225 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Calculation [Dream]" + }, + "widgets_values": [ + "(r - 1)*s + 1", + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + { + "id": 121, + "type": "Reroute", + "pos": [ + 5107, + 156 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 115, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 211 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 224 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 131, + "type": "Reroute", + "pos": [ + 5327, + 179 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 123, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 225 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 228 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 132, + "type": "Calculation [Dream]", + "pos": [ + 5182, + 114 + ], + "size": { + "0": 301.4544372558594, + "1": 232 + }, + "flags": { + "collapsed": true + }, + "order": 120, + "mode": 0, + "inputs": [ + { + "name": "r_float", + "type": "FLOAT", + "link": 227, + "widget": { + "name": "r_float" + } + }, + { + "name": "s_float", + "type": "FLOAT", + "link": 226, + "widget": { + "name": "s_float" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 229 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Calculation [Dream]" + }, + "widgets_values": [ + "(r - 1)*s + 1", + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + { + "id": 133, + "type": "Reroute", + "pos": [ + 5068, + -49 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 124, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 229 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 230 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 122, + "type": "Image Contrast Adjustment [Dream]", + "pos": [ + 4949, + -295 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 125, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 221 + }, + { + "name": "factor", + "type": "FLOAT", + "link": 230, + "widget": { + "name": "factor" + } + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 222 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Contrast Adjustment [Dream]" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 128, + "type": "Image Brightness Adjustment [Dream]", + "pos": [ + 5113, + -187 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 126, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 222 + }, + { + "name": "factor", + "type": "FLOAT", + "link": 228, + "widget": { + "name": "factor" + } + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 235 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Brightness Adjustment [Dream]" + }, + "widgets_values": [ + 1 + ] + }, + { + "id": 89, + "type": "Reroute", + "pos": [ + 4714, + -643 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 65, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 143 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 231 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 50, + "type": "MiDaS-DepthMapPreprocessor", + "pos": [ + 3604.370798354893, + -255.81188496288598 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 63, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 247 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 74 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MiDaS-DepthMapPreprocessor" + }, + "widgets_values": [ + 5.237730900441297, + 0.16363647460937536, + 512 + ] + }, + { + "id": 147, + "type": "Reroute", + "pos": [ + 3806.370798354893, + -99.81188496288625 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 108, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 252 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 254 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 88, + "type": "Reroute", + "pos": [ + 3943, + -642 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 57, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 138 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 143 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 69, + "type": "Reroute", + "pos": [ + 3357, + -637 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 50, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 103 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 138 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 68, + "type": "Reroute", + "pos": [ + 1745, + -640 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 102 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 103 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 63, + "type": "Reroute", + "pos": [ + -230, + -640 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 92 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 102 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 43, + "type": "Reroute", + "pos": [ + -253, + -602 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 68 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 69, + 76 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 44, + "type": "Reroute", + "pos": [ + 498, + -605 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 51, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 69 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 71 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 82, + "type": "Reroute", + "pos": [ + 3402, + -603 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 80, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 130 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 155 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 80, + "type": "Reroute", + "pos": [ + 4208, + -610 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 84, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 155 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 123 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 93, + "type": "Reroute", + "pos": [ + 5686.218317189983, + -120.99836627792959 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 90, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 260, + "pos": [ + 74.6, + 0 + ] + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 152 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 79, + "type": "Reroute", + "pos": [ + 5324, + -607 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 87, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 123 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 260, + 261 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 134, + "type": "Reroute", + "pos": [ + 5798, + -637 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 72, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 231 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 232, + 263 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 149, + "type": "Reroute", + "pos": [ + 6305, + -638 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 79, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 263 + } + ], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 264 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 21, + "type": "VAEEncode", + "pos": [ + 5892.055568991249, + -318.8922317182515 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 127, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 235 + }, + { + "name": "vae", + "type": "VAE", + "link": 232 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 140 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + } + }, + { + "id": 64, + "type": "Reroute", + "pos": [ + 981, + -714 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 93 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 97 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 65, + "type": "Reroute", + "pos": [ + 1846, + -673 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 96 + } + ], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 98, + 99 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 66, + "type": "Reroute", + "pos": [ + 1845, + -714 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 49, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 97 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 100 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 92, + "type": "Reroute", + "pos": [ + 4454, + -714 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 71, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 296 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 233 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 135, + "type": "Reroute", + "pos": [ + 5910, + -714 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 78, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 233 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 234 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 61, + "type": "Reroute", + "pos": [ + -313, + -714 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 90 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 93 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 47, + "type": "Reroute", + "pos": [ + -249, + -565 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 70 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 72 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 49, + "type": "Reroute", + "pos": [ + 1875, + -579 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 72 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 117 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 77, + "type": "Reroute", + "pos": [ + 2697, + -568 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 48, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 117 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 161, + 246 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 100, + "type": "Reroute", + "pos": [ + 3421, + -561 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 54, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 161 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 162, + 213 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 123, + "type": "Reroute", + "pos": [ + 4412, + -572 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 62, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 213 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 214 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 90, + "type": "Reroute", + "pos": [ + 4750, + -520 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 121, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 145 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 147 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 91, + "type": "Reroute", + "pos": [ + 4749, + -480 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 122, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 146 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 148 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 151, + "type": "Image Sequence Blend [Dream]", + "pos": [ + 7811.282361863094, + -318.08540210493265 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 134, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 266 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 267 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Blend [Dream]" + }, + "widgets_values": [ + 0.2, + 0.2, + 3 + ] + }, + { + "id": 72, + "type": "Reroute", + "pos": [ + 4327, + -527 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 117, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 141 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 145 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 73, + "type": "Reroute", + "pos": [ + 4348, + -480 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 118, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 142 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 146 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 70, + "type": "Reroute", + "pos": [ + 2584, + -523 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 104, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 104 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 251 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 71, + "type": "Reroute", + "pos": [ + 2582, + -476 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 105, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 105 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 252, + 268 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 152, + "type": "Reroute", + "pos": [ + 3302, + -471 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 109, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 268 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": null + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 146, + "type": "Reroute", + "pos": [ + 3652, + -166 + ], + "size": [ + 140.8, + 26 + ], + "flags": {}, + "order": 107, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 251 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 253 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 112, + "type": "Reroute", + "pos": [ + 2905, + 428 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 179 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 180 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 111, + "type": "Reroute", + "pos": [ + 2678, + 389 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 176 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 177 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 126, + "type": "Reroute", + "pos": [ + 6090, + 460 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 53, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 218 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 219 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 155, + "type": "Reroute", + "pos": [ + 4438, + 429 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 102, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 272 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 273 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 143, + "type": "Reroute", + "pos": [ + 3555, + -427 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 55, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 246, + "pos": [ + 41, + 0 + ] + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 247 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": true + } + }, + { + "id": 52, + "type": "Reroute", + "pos": [ + -360, + -190 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 52, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 76 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 83, + 278 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 24, + "type": "Build Prompt [Dream]", + "pos": [ + 310, + -310 + ], + "size": { + "0": 279.916748046875, + "1": 100 + }, + "flags": {}, + "order": 69, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 81, + "slot_index": 0 + }, + { + "name": "weight", + "type": "FLOAT", + "link": 279, + "widget": { + "name": "weight" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 36 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "summer, green grass", + 1 + ] + }, + { + "id": 140, + "type": "Reroute", + "pos": [ + 30, + 50 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 241, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 240, + 281 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 56, + "type": "Reroute", + "pos": [ + -50, + -190 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 59, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 83 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 112, + 282 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 139, + "type": "Reroute", + "pos": [ + 411, + 41 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 240, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 239, + 284 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 75, + "type": "Reroute", + "pos": [ + 330, + -190 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 67, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 112 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 114, + 285 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 138, + "type": "Reroute", + "pos": [ + 817, + 37 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 47, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 239, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 287 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 76, + "type": "Reroute", + "pos": [ + 750, + -190 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 74, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 114 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 288 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 130, + "type": "Float Input [Dream]", + "pos": [ + 4596, + 280 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 223, + 226 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Alignment factor", + "properties": { + "Node name for S&R": "Float Input [Dream]" + }, + "widgets_values": [ + 0.7 + ] + }, + { + "id": 7, + "type": "VAEDecode", + "pos": [ + 6531.266744446639, + -328.62463277942487 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 129, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": 264, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9, + 292 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecode" + } + }, + { + "id": 163, + "type": "UpscaleModelLoader", + "pos": [ + 6835.001486999512, + -334.6799815673828 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "outputs": [ + { + "name": "UPSCALE_MODEL", + "type": "UPSCALE_MODEL", + "links": [ + 294 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "UpscaleModelLoader" + }, + "widgets_values": [ + "4x_foolhardy_Remacri.pth" + ] + }, + { + "id": 164, + "type": "Note", + "pos": [ + 6839, + -128 + ], + "size": { + "0": 355.8685302734375, + "1": 59.4520263671875 + }, + "flags": {}, + "order": 3, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Using 4x_foolhardy_Remacri for upscale.\nhttps://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 67, + "type": "Reroute", + "pos": [ + 3088, + -714 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 56, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 100 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 295 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 167, + "type": "Reroute", + "pos": [ + 3566, + -712 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 64, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 295 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 296 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 162, + "type": "ImageUpscaleWithModel", + "pos": [ + 6849.001486999512, + -225.6799815673828 + ], + "size": { + "0": 241.79998779296875, + "1": 46 + }, + "flags": {}, + "order": 131, + "mode": 0, + "inputs": [ + { + "name": "upscale_model", + "type": "UPSCALE_MODEL", + "link": 294, + "slot_index": 0 + }, + { + "name": "image", + "type": "IMAGE", + "link": 292 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 303 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageUpscaleWithModel" + } + }, + { + "id": 98, + "type": "Reroute", + "pos": [ + 7324, + 448 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 61, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 219 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 159 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 145, + "type": "ControlNetLoader", + "pos": [ + 3614, + -381 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 4, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 305 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "SD1.5\\control_v11f1p_sd15_depth.pth" + ] + }, + { + "id": 87, + "type": "Reroute", + "pos": [ + 3153, + 390 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 43, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 177 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 136 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 86, + "type": "Reroute", + "pos": [ + 3157, + 427 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 44, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 180 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 137 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 171, + "type": "Reroute", + "pos": [ + -104, + 501 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 309 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 310 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 172, + "type": "Reroute", + "pos": [ + -102, + 538 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 308 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 311 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 173, + "type": "Reroute", + "pos": [ + 3989.896920186283, + 499.7709166573809 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 310 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 312 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 174, + "type": "Reroute", + "pos": [ + 4007, + 543 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 311 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 313 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 175, + "type": "Reroute", + "pos": [ + 7134, + 482 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 45, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 312 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 314 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 176, + "type": "Reroute", + "pos": [ + 7181, + 519 + ], + "size": [ + 75, + 26 + ], + "flags": {}, + "order": 46, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 313 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 315 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 83, + "type": "Common Frame Dimensions [Dream]", + "pos": [ + -830, + 387 + ], + "size": { + "0": 315, + "1": 238 + }, + "flags": {}, + "order": 5, + "mode": 0, + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 132 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 133 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "final_width", + "type": "INT", + "links": [ + 309 + ], + "shape": 3, + "slot_index": 2 + }, + { + "name": "final_height", + "type": "INT", + "links": [ + 308 + ], + "shape": 3, + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "Common Frame Dimensions [Dream]" + }, + "widgets_values": [ + "1280", + "16:9", + "wide", + "2", + 64, + "ceil" + ] + }, + { + "id": 170, + "type": "ImageScale", + "pos": [ + 7168, + -225 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": { + "collapsed": true + }, + "order": 132, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 303 + }, + { + "name": "width", + "type": "INT", + "link": 314, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 315, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 304 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "nearest-exact", + 512, + 512, + "disabled" + ] + }, + { + "id": 18, + "type": "Image Sequence Loader [Dream]", + "pos": [ + 2974.4051691056047, + -29.907667959345808 + ], + "size": { + "0": 315, + "1": 126 + }, + "flags": {}, + "order": 85, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 25 + }, + { + "name": "default_image", + "type": "IMAGE", + "link": 162 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 257, + "widget": { + "name": "directory_path" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 187 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "frame_name", + "type": "STRING", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Loader [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "*", + "numeric" + ] + }, + { + "id": 154, + "type": "Reroute", + "pos": [ + 3482, + 429 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 98, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 271 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 272, + 317 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 178, + "type": "LineArtPreprocessor", + "pos": [ + 3606, + 104 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 103, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 317 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 318 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LineArtPreprocessor" + }, + "widgets_values": [ + "disable", + 512 + ] + }, + { + "id": 177, + "type": "ControlNetLoader", + "pos": [ + 3593, + -2 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 6, + "mode": 0, + "outputs": [ + { + "name": "CONTROL_NET", + "type": "CONTROL_NET", + "links": [ + 316 + ], + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ControlNetLoader" + }, + "widgets_values": [ + "SD1.5\\control_v11p_sd15_lineart.pth" + ] + }, + { + "id": 34, + "type": "ImageScale", + "pos": [ + 3337.8065851212295, + -1.1077146512402969 + ], + "size": { + "0": 315, + "1": 130 + }, + "flags": { + "collapsed": true + }, + "order": 88, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 187 + }, + { + "name": "width", + "type": "INT", + "link": 136, + "widget": { + "name": "width" + }, + "slot_index": 1 + }, + { + "name": "height", + "type": "INT", + "link": 137, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 270 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageScale" + }, + "widgets_values": [ + "bicubic", + 512, + 512, + "disabled" + ] + }, + { + "id": 55, + "type": "Build Prompt [Dream]", + "pos": [ + 1, + -313 + ], + "size": { + "0": 297.6357116699219, + "1": 112.5521240234375 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 80 + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 81 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "mountain landscape, photo, professional, realistic", + 0.8 + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 165, + "type": "Note", + "pos": [ + 3869, + 215 + ], + "size": { + "0": 442.48046875, + "1": 58 + }, + "flags": {}, + "order": 7, + "mode": 0, + "properties": { + "text": "" + }, + "widgets_values": [ + "Using control_v11f1p_sd15_depth.pth and control_v11f1p_sd15_lineart.pth\nhttps://huggingface.co/lllyasviel/ControlNet-v1-1/tree/main" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 157, + "type": "Triangle Event Curve [Dream]", + "pos": [ + -257, + -118 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 60, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 278 + }, + { + "name": "width_seconds", + "type": "FLOAT", + "link": 280, + "widget": { + "name": "width_seconds" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 279 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Triangle Event Curve [Dream]" + }, + "widgets_values": [ + 1, + 0, + 1, + 2 + ] + }, + { + "id": 158, + "type": "Triangle Event Curve [Dream]", + "pos": [ + 150, + -121 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 68, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 282 + }, + { + "name": "width_seconds", + "type": "FLOAT", + "link": 281, + "widget": { + "name": "width_seconds" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 283 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Triangle Event Curve [Dream]" + }, + "widgets_values": [ + 1, + 0, + 1, + 4 + ] + }, + { + "id": 159, + "type": "Triangle Event Curve [Dream]", + "pos": [ + 544, + -123 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 75, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 285 + }, + { + "name": "width_seconds", + "type": "FLOAT", + "link": 284, + "widget": { + "name": "width_seconds" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 286 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Triangle Event Curve [Dream]" + }, + "widgets_values": [ + 1, + 0, + 1, + 6 + ] + }, + { + "id": 160, + "type": "Triangle Event Curve [Dream]", + "pos": [ + 927, + -122 + ], + "size": { + "0": 315, + "1": 150 + }, + "flags": {}, + "order": 82, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 288 + }, + { + "name": "width_seconds", + "type": "FLOAT", + "link": 287, + "widget": { + "name": "width_seconds" + } + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 289 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Triangle Event Curve [Dream]" + }, + "widgets_values": [ + 1, + 0, + 1, + 8 + ] + }, + { + "id": 39, + "type": "Build Prompt [Dream]", + "pos": [ + -270, + -310 + ], + "size": { + "0": 245.1999969482422, + "1": 100 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": null + } + ], + "outputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "links": [ + 80 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Build Prompt [Dream]" + }, + "widgets_values": [ + "logo, text, watermark, signature, signed, frame, signed, pumpkin", + -0.8 + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 46, + "type": "ControlNetApplyAdvanced", + "pos": [ + 3974.370798354893, + -410.8118849628873 + ], + "size": { + "0": 315, + "1": 166 + }, + "flags": {}, + "order": 114, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 255 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 256 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 305, + "slot_index": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 74 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 141 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 142 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ControlNetApplyAdvanced" + }, + "widgets_values": [ + 0.8, + 0, + 1 + ] + }, + { + "id": 141, + "type": "Float Input [Dream]", + "pos": [ + -308, + 71 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 9, + "mode": 0, + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 241, + 280 + ], + "shape": 3, + "slot_index": 0 + } + ], + "title": "✍ Event Width", + "properties": { + "Node name for S&R": "Float Input [Dream]" + }, + "widgets_values": [ + 7 + ] + }, + { + "id": 94, + "type": "Beat Curve [Dream]", + "pos": [ + 5588.415456647985, + -33.219014131323135 + ], + "size": { + "0": 315, + "1": 318 + }, + "flags": {}, + "order": 97, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 152 + }, + { + "name": "low_value", + "type": "FLOAT", + "link": 319, + "widget": { + "name": "low_value" + }, + "slot_index": 1 + }, + { + "name": "high_value", + "type": "FLOAT", + "link": 320, + "widget": { + "name": "high_value" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 153 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "INT", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Beat Curve [Dream]" + }, + "widgets_values": [ + 60, + 0, + 4, + 0.475, + 0.7, + "no", + 1, + 1, + 0, + 0, + 0 + ] + }, + { + "id": 45, + "type": "LoadImage", + "pos": [ + -827, + -99 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 10, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 70 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "frame_00004.jpg", + "image" + ] + }, + { + "id": 5, + "type": "CheckpointLoaderSimple", + "pos": [ + -836, + -266 + ], + "size": { + "0": 315, + "1": 98 + }, + "flags": {}, + "order": 11, + "mode": 0, + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 90 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 91 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "VAE", + "type": "VAE", + "links": [ + 92 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "CheckpointLoaderSimple" + }, + "widgets_values": [ + "public\\main\\768-SD2.1\\landscapeRealistic_v20768BetterRender.safetensors" + ] + }, + { + "id": 6, + "type": "KSampler", + "pos": [ + 6117, + -417 + ], + "size": { + "0": 315, + "1": 262 + }, + "flags": {}, + "order": 128, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 234 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 147 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 148 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 140, + "slot_index": 3 + }, + { + "name": "denoise", + "type": "FLOAT", + "link": 153, + "widget": { + "name": "denoise" + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 7 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "KSampler" + }, + "widgets_values": [ + 1051311018316524, + "randomize", + 30, + 7.5, + "euler", + "normal", + 0.8290911865234379 + ] + }, + { + "id": 8, + "type": "PreviewImage", + "pos": [ + 6063, + -103 + ], + "size": { + "0": 672.37939453125, + "1": 436.3568115234375 + }, + "flags": {}, + "order": 130, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 148, + "type": "Reroute", + "pos": [ + 7052, + -617 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 91, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 261 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 262 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 17, + "type": "Frame Counter (Directory) [Dream]", + "pos": [ + -826, + -666 + ], + "size": { + "0": 315, + "1": 154 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "directory_path", + "type": "STRING", + "link": 119, + "widget": { + "name": "directory_path" + }, + "slot_index": 0 + }, + { + "name": "total_frames", + "type": "INT", + "link": 154, + "widget": { + "name": "total_frames" + } + } + ], + "outputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "links": [ + 68 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Counter (Directory) [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "*", + "numeric", + 300, + 15 + ] + }, + { + "id": 95, + "type": "Frame Count Calculator [Dream]", + "pos": [ + -1217, + -652 + ], + "size": { + "0": 315, + "1": 154 + }, + "flags": {}, + "order": 12, + "mode": 0, + "outputs": [ + { + "name": "TOTAL", + "type": "INT", + "links": [ + 154 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Frame Count Calculator [Dream]" + }, + "widgets_values": [ + 0, + 0, + 10, + 0, + 15 + ] + }, + { + "id": 22, + "type": "Image Sequence Saver [Dream]", + "pos": [ + 7448.356765125588, + -318.5521625401107 + ], + "size": { + "0": 315, + "1": 174 + }, + "flags": {}, + "order": 133, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 262 + }, + { + "name": "image", + "type": "IMAGE", + "link": 304 + }, + { + "name": "directory_path", + "type": "STRING", + "link": 159, + "widget": { + "name": "directory_path" + }, + "slot_index": 2 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 266 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Saver [Dream]" + }, + "widgets_values": [ + "I:\\AI\\output\\ComfyUI", + "frame", + 5, + "stop output", + "jpg" + ] + }, + { + "id": 59, + "type": "Reroute", + "pos": [ + 1986, + -317 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 93, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 88 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 94 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 60, + "type": "Reroute", + "pos": [ + 1988, + -272 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 95, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 89 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 95 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + }, + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 74, + "type": "Reroute", + "pos": [ + 2671, + -608 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 73, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 333 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 130, + 269 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 191, + "type": "Reroute", + "pos": [ + 2267, + -614 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 66, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 332 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 333 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 3, + "type": "CLIPTextEncode", + "pos": [ + 2179, + -346 + ], + "size": { + "0": 210, + "1": 54 + }, + "flags": {}, + "order": 99, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 98, + "slot_index": 0 + }, + { + "name": "text", + "type": "STRING", + "link": 94, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 104 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ], + "color": "#232", + "bgcolor": "#353" + }, + { + "id": 9, + "type": "CLIPTextEncode", + "pos": [ + 2176, + -243 + ], + "size": { + "0": 210, + "1": 54 + }, + "flags": {}, + "order": 100, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 99, + "slot_index": 0 + }, + { + "name": "text", + "type": "STRING", + "link": 95, + "widget": { + "name": "text" + }, + "slot_index": 1 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 105 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "" + ], + "color": "#322", + "bgcolor": "#533" + }, + { + "id": 97, + "type": "Reroute", + "pos": [ + 2710, + 460 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 337 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 218, + 257 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 153, + "type": "Reroute", + "pos": [ + 3463, + 179 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 92, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 270 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 271 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 181, + "type": "Reroute", + "pos": [ + 1193, + 576 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 322, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 321 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 183, + "type": "Reroute", + "pos": [ + 1089, + 613 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 324, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 323 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 180, + "type": "Reroute", + "pos": [ + 5491, + 588 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 323, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 320 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 179, + "type": "Reroute", + "pos": [ + 5384, + 560 + ], + "size": [ + 82, + 26 + ], + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 321, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 319 + ] + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 182, + "type": "Float Input [Dream]", + "pos": [ + -828, + 712 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 13, + "mode": 0, + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 322 + ], + "shape": 3 + } + ], + "title": "✍ Low Denoise", + "properties": { + "Node name for S&R": "Float Input [Dream]" + }, + "widgets_values": [ + 0.55 + ] + }, + { + "id": 142, + "type": "ControlNetApplyAdvanced", + "pos": [ + 3986, + -3 + ], + "size": { + "0": 315, + "1": 166 + }, + "flags": {}, + "order": 112, + "mode": 0, + "inputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "link": 253 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 254 + }, + { + "name": "control_net", + "type": "CONTROL_NET", + "link": 316, + "slot_index": 2 + }, + { + "name": "image", + "type": "IMAGE", + "link": 318 + } + ], + "outputs": [ + { + "name": "positive", + "type": "CONDITIONING", + "links": [ + 255 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative", + "type": "CONDITIONING", + "links": [ + 256 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ControlNetApplyAdvanced" + }, + "widgets_values": [ + 0.75, + 0, + 1 + ] + }, + { + "id": 184, + "type": "Float Input [Dream]", + "pos": [ + -830, + 814 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 14, + "mode": 0, + "outputs": [ + { + "name": "FLOAT", + "type": "FLOAT", + "links": [ + 324 + ], + "shape": 3 + } + ], + "title": "✍ High Denoise", + "properties": { + "Node name for S&R": "Float Input [Dream]" + }, + "widgets_values": [ + 0.75 + ] + }, + { + "id": 38, + "type": "Finalize Prompt [Dream]", + "pos": [ + 1658, + -311 + ], + "size": { + "0": 315, + "1": 126 + }, + "flags": {}, + "order": 89, + "mode": 0, + "inputs": [ + { + "name": "partial_prompt", + "type": "PARTIAL_PROMPT", + "link": 87 + } + ], + "outputs": [ + { + "name": "positive", + "type": "STRING", + "links": [ + 88, + 338 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "negative", + "type": "STRING", + "links": [ + 89, + 339 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "Finalize Prompt [Dream]" + }, + "widgets_values": [ + "by_abs_max", + 1.5, + 1.25 + ] + }, + { + "id": 189, + "type": "String to Log Entry [Dream]", + "pos": [ + 1318, + -132 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 94, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 338, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 330 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String to Log Entry [Dream]" + }, + "widgets_values": [ + "", + "Positive prompt" + ] + }, + { + "id": 190, + "type": "String to Log Entry [Dream]", + "pos": [ + 1240, + 7 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 96, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 339, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": [ + 331 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "String to Log Entry [Dream]" + }, + "widgets_values": [ + "", + "Negative prompt" + ] + }, + { + "id": 188, + "type": "Log File [Dream]", + "pos": [ + 1676, + -142 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 101, + "mode": 0, + "inputs": [ + { + "name": "frame_counter", + "type": "FRAME_COUNTER", + "link": 340 + }, + { + "name": "entry_0", + "type": "LOG_ENTRY", + "link": 330 + }, + { + "name": "entry_1", + "type": "LOG_ENTRY", + "link": 331 + }, + { + "name": "entry_2", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_3", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_4", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_5", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_6", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "entry_7", + "type": "LOG_ENTRY", + "link": null + }, + { + "name": "log_directory", + "type": "STRING", + "link": 336, + "widget": { + "name": "log_directory" + } + } + ], + "properties": { + "Node name for S&R": "Log File [Dream]" + }, + "widgets_values": [ + "I:\\AI\\ComfyUI\\ComfyUI\\output", + "dreamlog.txt", + true, + true, + true + ] + }, + { + "id": 48, + "type": "Reroute", + "pos": [ + 1361, + -612 + ], + "size": [ + 149.2, + 26 + ], + "flags": {}, + "order": 58, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 71 + } + ], + "outputs": [ + { + "name": "FRAME_COUNTER", + "type": "FRAME_COUNTER", + "links": [ + 332, + 340 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 192, + "type": "Reroute", + "pos": [ + 1512, + 462 + ], + "size": [ + 90.4, + 26 + ], + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "", + "type": "*", + "link": 335 + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 336, + 337 + ], + "slot_index": 0 + } + ], + "properties": { + "showOutputText": true, + "horizontal": false + } + }, + { + "id": 37, + "type": "Image Sequence Tweening [Dream]", + "pos": [ + 8156.282361863093, + -319.08540210493265 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 135, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 267 + } + ], + "outputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "links": [ + 341 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Image Sequence Tweening [Dream]" + }, + "widgets_values": [ + 4 + ] + }, + { + "id": 193, + "type": "FFMPEG Video Encoder [Dream]", + "pos": [ + 8506, + -319 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 136, + "mode": 0, + "inputs": [ + { + "name": "sequence", + "type": "ANIMATION_SEQUENCE", + "link": 341 + } + ], + "outputs": [ + { + "name": "log_entry", + "type": "LOG_ENTRY", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "FFMPEG Video Encoder [Dream]" + }, + "widgets_values": [ + "video", + 1, + true + ] + } + ], + "links": [ + [ + 7, + 6, + 0, + 7, + 0, + "LATENT" + ], + [ + 9, + 7, + 0, + 8, + 0, + "IMAGE" + ], + [ + 25, + 19, + 0, + 18, + 0, + "FRAME_COUNTER" + ], + [ + 36, + 24, + 0, + 25, + 0, + "PARTIAL_PROMPT" + ], + [ + 37, + 25, + 0, + 26, + 0, + "PARTIAL_PROMPT" + ], + [ + 68, + 17, + 0, + 43, + 0, + "*" + ], + [ + 69, + 43, + 0, + 44, + 0, + "*" + ], + [ + 70, + 45, + 0, + 47, + 0, + "*" + ], + [ + 71, + 44, + 0, + 48, + 0, + "*" + ], + [ + 72, + 47, + 0, + 49, + 0, + "*" + ], + [ + 74, + 50, + 0, + 46, + 3, + "IMAGE" + ], + [ + 76, + 43, + 0, + 52, + 0, + "*" + ], + [ + 80, + 39, + 0, + 55, + 0, + "PARTIAL_PROMPT" + ], + [ + 81, + 55, + 0, + 24, + 0, + "PARTIAL_PROMPT" + ], + [ + 83, + 52, + 0, + 56, + 0, + "*" + ], + [ + 85, + 26, + 0, + 57, + 0, + "PARTIAL_PROMPT" + ], + [ + 87, + 57, + 0, + 38, + 0, + "PARTIAL_PROMPT" + ], + [ + 88, + 38, + 0, + 59, + 0, + "*" + ], + [ + 89, + 38, + 1, + 60, + 0, + "*" + ], + [ + 90, + 5, + 0, + 61, + 0, + "*" + ], + [ + 91, + 5, + 1, + 62, + 0, + "*" + ], + [ + 92, + 5, + 2, + 63, + 0, + "*" + ], + [ + 93, + 61, + 0, + 64, + 0, + "*" + ], + [ + 94, + 59, + 0, + 3, + 1, + "STRING" + ], + [ + 95, + 60, + 0, + 9, + 1, + "STRING" + ], + [ + 96, + 62, + 0, + 65, + 0, + "*" + ], + [ + 97, + 64, + 0, + 66, + 0, + "*" + ], + [ + 98, + 65, + 0, + 3, + 0, + "CLIP" + ], + [ + 99, + 65, + 0, + 9, + 0, + "CLIP" + ], + [ + 100, + 66, + 0, + 67, + 0, + "*" + ], + [ + 102, + 63, + 0, + 68, + 0, + "*" + ], + [ + 103, + 68, + 0, + 69, + 0, + "*" + ], + [ + 104, + 3, + 0, + 70, + 0, + "*" + ], + [ + 105, + 9, + 0, + 71, + 0, + "*" + ], + [ + 112, + 56, + 0, + 75, + 0, + "*" + ], + [ + 114, + 75, + 0, + 76, + 0, + "*" + ], + [ + 117, + 49, + 0, + 77, + 0, + "*" + ], + [ + 119, + 78, + 0, + 17, + 0, + "STRING" + ], + [ + 123, + 80, + 0, + 79, + 0, + "*" + ], + [ + 130, + 74, + 0, + 82, + 0, + "*" + ], + [ + 132, + 83, + 0, + 84, + 0, + "*" + ], + [ + 133, + 83, + 1, + 85, + 0, + "*" + ], + [ + 136, + 87, + 0, + 34, + 1, + "INT" + ], + [ + 137, + 86, + 0, + 34, + 2, + "INT" + ], + [ + 138, + 69, + 0, + 88, + 0, + "*" + ], + [ + 140, + 21, + 0, + 6, + 3, + "LATENT" + ], + [ + 141, + 46, + 0, + 72, + 0, + "*" + ], + [ + 142, + 46, + 1, + 73, + 0, + "*" + ], + [ + 143, + 88, + 0, + 89, + 0, + "*" + ], + [ + 145, + 72, + 0, + 90, + 0, + "*" + ], + [ + 146, + 73, + 0, + 91, + 0, + "*" + ], + [ + 147, + 90, + 0, + 6, + 1, + "CONDITIONING" + ], + [ + 148, + 91, + 0, + 6, + 2, + "CONDITIONING" + ], + [ + 152, + 93, + 0, + 94, + 0, + "FRAME_COUNTER" + ], + [ + 153, + 94, + 0, + 6, + 4, + "FLOAT" + ], + [ + 154, + 95, + 0, + 17, + 1, + "INT" + ], + [ + 155, + 82, + 0, + 80, + 0, + "*" + ], + [ + 156, + 78, + 0, + 96, + 0, + "*" + ], + [ + 159, + 98, + 0, + 22, + 2, + "STRING" + ], + [ + 161, + 77, + 0, + 100, + 0, + "*" + ], + [ + 162, + 100, + 0, + 18, + 1, + "IMAGE" + ], + [ + 176, + 84, + 0, + 111, + 0, + "*" + ], + [ + 177, + 111, + 0, + 87, + 0, + "*" + ], + [ + 179, + 85, + 0, + 112, + 0, + "*" + ], + [ + 180, + 112, + 0, + 86, + 0, + "*" + ], + [ + 187, + 18, + 0, + 34, + 0, + "IMAGE" + ], + [ + 208, + 104, + 0, + 119, + 0, + "RGB_PALETTE" + ], + [ + 209, + 103, + 0, + 119, + 1, + "RGB_PALETTE" + ], + [ + 210, + 119, + 1, + 118, + 0, + "*" + ], + [ + 211, + 119, + 0, + 121, + 0, + "*" + ], + [ + 213, + 100, + 0, + 123, + 0, + "*" + ], + [ + 214, + 123, + 0, + 124, + 0, + "*" + ], + [ + 215, + 124, + 0, + 103, + 0, + "IMAGE" + ], + [ + 217, + 125, + 0, + 104, + 0, + "IMAGE" + ], + [ + 218, + 97, + 0, + 126, + 0, + "*" + ], + [ + 219, + 126, + 0, + 98, + 0, + "*" + ], + [ + 220, + 125, + 0, + 127, + 0, + "*" + ], + [ + 221, + 127, + 0, + 122, + 0, + "IMAGE" + ], + [ + 222, + 122, + 0, + 128, + 0, + "IMAGE" + ], + [ + 223, + 130, + 0, + 129, + 1, + "FLOAT" + ], + [ + 224, + 121, + 0, + 129, + 0, + "FLOAT" + ], + [ + 225, + 129, + 0, + 131, + 0, + "*" + ], + [ + 226, + 130, + 0, + 132, + 1, + "FLOAT" + ], + [ + 227, + 118, + 0, + 132, + 0, + "FLOAT" + ], + [ + 228, + 131, + 0, + 128, + 1, + "FLOAT" + ], + [ + 229, + 132, + 0, + 133, + 0, + "*" + ], + [ + 230, + 133, + 0, + 122, + 1, + "FLOAT" + ], + [ + 231, + 89, + 0, + 134, + 0, + "*" + ], + [ + 232, + 134, + 0, + 21, + 1, + "VAE" + ], + [ + 233, + 92, + 0, + 135, + 0, + "*" + ], + [ + 234, + 135, + 0, + 6, + 0, + "MODEL" + ], + [ + 235, + 128, + 0, + 21, + 0, + "IMAGE" + ], + [ + 239, + 139, + 0, + 138, + 0, + "*" + ], + [ + 240, + 140, + 0, + 139, + 0, + "*" + ], + [ + 241, + 141, + 0, + 140, + 0, + "*" + ], + [ + 246, + 77, + 0, + 143, + 0, + "*" + ], + [ + 247, + 143, + 0, + 50, + 0, + "IMAGE" + ], + [ + 251, + 70, + 0, + 146, + 0, + "*" + ], + [ + 252, + 71, + 0, + 147, + 0, + "*" + ], + [ + 253, + 146, + 0, + 142, + 0, + "CONDITIONING" + ], + [ + 254, + 147, + 0, + 142, + 1, + "CONDITIONING" + ], + [ + 255, + 142, + 0, + 46, + 0, + "CONDITIONING" + ], + [ + 256, + 142, + 1, + 46, + 1, + "CONDITIONING" + ], + [ + 257, + 97, + 0, + 18, + 2, + "STRING" + ], + [ + 260, + 79, + 0, + 93, + 0, + "*" + ], + [ + 261, + 79, + 0, + 148, + 0, + "*" + ], + [ + 262, + 148, + 0, + 22, + 0, + "FRAME_COUNTER" + ], + [ + 263, + 134, + 0, + 149, + 0, + "*" + ], + [ + 264, + 149, + 0, + 7, + 1, + "VAE" + ], + [ + 266, + 22, + 0, + 151, + 0, + "ANIMATION_SEQUENCE" + ], + [ + 267, + 151, + 0, + 37, + 0, + "ANIMATION_SEQUENCE" + ], + [ + 268, + 71, + 0, + 152, + 0, + "*" + ], + [ + 269, + 74, + 0, + 19, + 0, + "FRAME_COUNTER" + ], + [ + 270, + 34, + 0, + 153, + 0, + "*" + ], + [ + 271, + 153, + 0, + 154, + 0, + "*" + ], + [ + 272, + 154, + 0, + 155, + 0, + "*" + ], + [ + 273, + 155, + 0, + 125, + 0, + "*" + ], + [ + 278, + 52, + 0, + 157, + 0, + "FRAME_COUNTER" + ], + [ + 279, + 157, + 0, + 24, + 1, + "FLOAT" + ], + [ + 280, + 141, + 0, + 157, + 1, + "FLOAT" + ], + [ + 281, + 140, + 0, + 158, + 1, + "FLOAT" + ], + [ + 282, + 56, + 0, + 158, + 0, + "FRAME_COUNTER" + ], + [ + 283, + 158, + 0, + 25, + 1, + "FLOAT" + ], + [ + 284, + 139, + 0, + 159, + 1, + "FLOAT" + ], + [ + 285, + 75, + 0, + 159, + 0, + "FRAME_COUNTER" + ], + [ + 286, + 159, + 0, + 26, + 1, + "FLOAT" + ], + [ + 287, + 138, + 0, + 160, + 1, + "FLOAT" + ], + [ + 288, + 76, + 0, + 160, + 0, + "FRAME_COUNTER" + ], + [ + 289, + 160, + 0, + 57, + 1, + "FLOAT" + ], + [ + 292, + 7, + 0, + 162, + 1, + "IMAGE" + ], + [ + 294, + 163, + 0, + 162, + 0, + "UPSCALE_MODEL" + ], + [ + 295, + 67, + 0, + 167, + 0, + "*" + ], + [ + 296, + 167, + 0, + 92, + 0, + "*" + ], + [ + 303, + 162, + 0, + 170, + 0, + "IMAGE" + ], + [ + 304, + 170, + 0, + 22, + 1, + "IMAGE" + ], + [ + 305, + 145, + 0, + 46, + 2, + "CONTROL_NET" + ], + [ + 308, + 83, + 3, + 172, + 0, + "*" + ], + [ + 309, + 83, + 2, + 171, + 0, + "*" + ], + [ + 310, + 171, + 0, + 173, + 0, + "*" + ], + [ + 311, + 172, + 0, + 174, + 0, + "*" + ], + [ + 312, + 173, + 0, + 175, + 0, + "*" + ], + [ + 313, + 174, + 0, + 176, + 0, + "*" + ], + [ + 314, + 175, + 0, + 170, + 1, + "INT" + ], + [ + 315, + 176, + 0, + 170, + 2, + "INT" + ], + [ + 316, + 177, + 0, + 142, + 2, + "CONTROL_NET" + ], + [ + 317, + 154, + 0, + 178, + 0, + "IMAGE" + ], + [ + 318, + 178, + 0, + 142, + 3, + "IMAGE" + ], + [ + 319, + 179, + 0, + 94, + 1, + "FLOAT" + ], + [ + 320, + 180, + 0, + 94, + 2, + "FLOAT" + ], + [ + 321, + 181, + 0, + 179, + 0, + "*" + ], + [ + 322, + 182, + 0, + 181, + 0, + "*" + ], + [ + 323, + 183, + 0, + 180, + 0, + "*" + ], + [ + 324, + 184, + 0, + 183, + 0, + "*" + ], + [ + 330, + 189, + 0, + 188, + 1, + "LOG_ENTRY" + ], + [ + 331, + 190, + 0, + 188, + 2, + "LOG_ENTRY" + ], + [ + 332, + 48, + 0, + 191, + 0, + "*" + ], + [ + 333, + 191, + 0, + 74, + 0, + "*" + ], + [ + 335, + 96, + 0, + 192, + 0, + "*" + ], + [ + 336, + 192, + 0, + 188, + 9, + "STRING" + ], + [ + 337, + 192, + 0, + 97, + 0, + "*" + ], + [ + 338, + 38, + 0, + 189, + 0, + "STRING" + ], + [ + 339, + 38, + 1, + 190, + 0, + "STRING" + ], + [ + 340, + 48, + 0, + 188, + 0, + "FRAME_COUNTER" + ], + [ + 341, + 37, + 0, + 193, + 0, + "ANIMATION_SEQUENCE" + ] + ], + "groups": [ + { + "title": "Prompt Morphing", + "bounding": [ + -310, + -456, + 2356, + 646 + ], + "color": "#b58b2a", + "font_size": 24 + }, + { + "title": "ControlNet", + "bounding": [ + 3576, + -472, + 764, + 765 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Previous Frame", + "bounding": [ + 2804, + -188, + 748, + 385 + ], + "color": "#a1309b", + "font_size": 24 + }, + { + "title": "Save", + "bounding": [ + 7404, + -481, + 1434, + 353 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Diffusion", + "bounding": [ + 5989, + -490, + 805, + 853 + ], + "color": "#88A", + "font_size": 24 + }, + { + "title": "Denoise curve", + "bounding": [ + 5532, + -169, + 396, + 501 + ], + "color": "#3f789e", + "font_size": 24 + }, + { + "title": "Align brightness/contrast", + "bounding": [ + 4583, + -378, + 860, + 767 + ], + "color": "#8AA", + "font_size": 24 + }, + { + "title": "Upscale", + "bounding": [ + 6810, + -454, + 496, + 459 + ], + "color": "#8A8", + "font_size": 24 + }, + { + "title": "Basic Settings", + "bounding": [ + -1247, + -793, + 788, + 1706 + ], + "color": "#88A", + "font_size": 24 + } + ], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/examples/test_colors.png b/custom_nodes/comfyui-dream-project/examples/test_colors.png new file mode 100644 index 0000000000000000000000000000000000000000..f9c63432cd96f0291a69fc34ead901e2e9c8a29e Binary files /dev/null and b/custom_nodes/comfyui-dream-project/examples/test_colors.png differ diff --git a/custom_nodes/comfyui-dream-project/image_processing.py b/custom_nodes/comfyui-dream-project/image_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..4e4da0325ff25b8cce6958c4520c41e5dc420a31 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/image_processing.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +import math + +import numpy +import torch +from PIL import Image, ImageDraw +from PIL.Image import Resampling + +from .categories import * +from .shared import ALWAYS_CHANGED_FLAG, convertTensorImageToPIL, DreamImageProcessor, \ + DreamImage, DreamMask +from .dreamtypes import SharedTypes, FrameCounter + + +class DreamImageMotion: + NODE_NAME = "Image Motion" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "zoom": ("FLOAT", {"default": 0.0, "min": -10, "max": 10, "step": 0.01}), + "mask_1_feather": ("INT", {"default": 0, "min": 0}), + "mask_1_overlap": ("INT", {"default": 0, "min": 0}), + "mask_2_feather": ("INT", {"default": 10, "min": 0}), + "mask_2_overlap": ("INT", {"default": 5, "min": 0}), + "mask_3_feather": ("INT", {"default": 15, "min": 0}), + "mask_3_overlap": ("INT", {"default": 5, "min": 0}), + "x_translation": ("FLOAT", {"default": 0.0, "min": -10, "max": 10, "step": 0.01}), + "y_translation": ("FLOAT", {"default": 0.0, "min": -10, "max": 10, "step": 0.01}), + } | SharedTypes.frame_counter, + "optional": { + "noise": ("IMAGE",), + "output_resize_width": ("INT", {"default": 0, "min": 0}), + "output_resize_height": ("INT", {"default": 0, "min": 0}) + } + } + + CATEGORY = NodeCategories.ANIMATION_TRANSFORMS + RETURN_TYPES = ("IMAGE", "MASK", "MASK", "MASK") + RETURN_NAMES = ("image", "mask1", "mask2", "mask3") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def _mk_PIL_image(self, size, color=None, mode="RGB") -> Image: + im = Image.new(mode=mode, size=size) + if color: + im.paste(color, (0, 0, size[0], size[1])) + return im + + def _convertPILToMask(self, image): + return torch.from_numpy(numpy.array(image.convert("L")).astype(numpy.float32) / 255.0) + + def _apply_feather(self, pil_image, area, feather): + feather = min((area[2] - area[0]) // 2 - 1, feather) + draw = ImageDraw.Draw(pil_image) + for i in range(1, feather + 1): + rect = [(area[0] + i - 1, area[1] + i - 1), (area[2] - i + 1, area[3] - i + 1)] + c = 255 - int(round(255.0 * (i / (feather + 1)))) + draw.rectangle(rect, fill=None, outline=(c, c, c)) + return pil_image + + def _make_mask(self, width, height, selection_area, feather, overlap): + complete_area = self._mk_PIL_image((width, height), "white") + draw = ImageDraw.Draw(complete_area) + (left, top, right, bottom) = selection_area + area = (left + overlap, top + overlap, right - overlap - 1, bottom - overlap - 1) + draw.rectangle(area, fill="black", width=0) + return self._apply_feather(complete_area, area, feather) + + def _make_resizer(self, output_resize_width, output_resize_height): + def bound(i): + return min(max(i, 1), 32767) + + if output_resize_height and output_resize_width: + return lambda img: img.resize((bound(output_resize_width), bound(output_resize_height)), Resampling.NEAREST) + else: + return lambda img: img + + def result(self, image: torch.Tensor, zoom, x_translation, y_translation, mask_1_feather, mask_1_overlap, + mask_2_feather, mask_2_overlap, mask_3_feather, mask_3_overlap, frame_counter: FrameCounter, + **other): + def _limit_range(f): + return max(-1.0, min(1.0, f)) + + def _motion(image: DreamImage, batch_counter, zoom, x_translation, y_translation, mask_1_overlap, + mask_2_overlap, + mask_3_overlap): + zoom = _limit_range(zoom / frame_counter.frames_per_second) + x_translation = _limit_range(x_translation / frame_counter.frames_per_second) + y_translation = _limit_range(y_translation / frame_counter.frames_per_second) + pil_image = image.pil_image + sz = self._make_resizer(other.get("output_resize_width", None), other.get("output_resize_height", None)) + noise = other.get("noise", None) + multiplier = math.pow(2, zoom) + resized_image = pil_image.resize((round(pil_image.width * multiplier), + round(pil_image.height * multiplier)), Resampling.BILINEAR) + + if noise is None: + base_image = self._mk_PIL_image(pil_image.size, "black") + else: + base_image = convertTensorImageToPIL(noise).resize(pil_image.size, Resampling.BILINEAR) + + selection_offset = (round(x_translation * pil_image.width), round(y_translation * pil_image.height)) + selection = ((pil_image.width - resized_image.width) // 2 + selection_offset[0], + (pil_image.height - resized_image.height) // 2 + selection_offset[1], + (pil_image.width - resized_image.width) // 2 + selection_offset[0] + resized_image.width, + (pil_image.height - resized_image.height) // 2 + selection_offset[1] + resized_image.height) + base_image.paste(resized_image, selection) + + mask_1_overlap = min(pil_image.width // 3, min(mask_1_overlap, pil_image.height // 3)) + mask_2_overlap = min(pil_image.width // 3, min(mask_2_overlap, pil_image.height // 3)) + mask_3_overlap = min(pil_image.width // 3, min(mask_3_overlap, pil_image.height // 3)) + mask1 = self._make_mask(pil_image.width, pil_image.height, selection, mask_1_feather, mask_1_overlap) + mask2 = self._make_mask(pil_image.width, pil_image.height, selection, mask_2_feather, mask_2_overlap) + mask3 = self._make_mask(pil_image.width, pil_image.height, selection, mask_3_feather, mask_3_overlap) + + return (DreamImage(pil_image=sz(base_image)), + DreamMask(pil_image=sz(mask1)), + DreamMask(pil_image=sz(mask2)), + DreamMask(pil_image=sz(mask3))) + + proc = DreamImageProcessor(image, + zoom=zoom, + x_translation=x_translation, + y_translation=y_translation, + mask_1_overlap=mask_1_overlap, + mask_2_overlap=mask_2_overlap, + mask_3_overlap=mask_3_overlap) + return proc.process(_motion) diff --git a/custom_nodes/comfyui-dream-project/inputfields.py b/custom_nodes/comfyui-dream-project/inputfields.py new file mode 100644 index 0000000000000000000000000000000000000000..50b3937eba568a71f2fd2e2cd737fcbbe07ebb29 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/inputfields.py @@ -0,0 +1,100 @@ +from .categories import * +from .shared import * + +class DreamInputText: + NODE_NAME = "Text Input" + ICON = "✍" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("STRING", {"default": "", "multiline": True}), + }, + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("STRING",) + FUNCTION = "noop" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def noop(self, value): + return (value,) + +class DreamInputString: + NODE_NAME = "String Input" + ICON = "✍" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("STRING", {"default": "", "multiline": False}), + }, + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("STRING",) + FUNCTION = "noop" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def noop(self, value): + return (value,) + + +class DreamInputFloat: + NODE_NAME = "Float Input" + ICON = "✍" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 0.0}), + }, + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("FLOAT",) + FUNCTION = "noop" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def noop(self, value): + return (value,) + + +class DreamInputInt: + NODE_NAME = "Int Input" + ICON = "✍" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("INT", {"default": 0}), + }, + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("INT",) + FUNCTION = "noop" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def noop(self, value): + return (value,) diff --git a/custom_nodes/comfyui-dream-project/install.py b/custom_nodes/comfyui-dream-project/install.py new file mode 100644 index 0000000000000000000000000000000000000000..28e6daa194d85857dbbccfc162340cdc7a93cd77 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/install.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +from .shared import DreamConfig + + +def setup_default_config(): + DreamConfig() + + +def run_install(): + setup_default_config() + + +if __name__ == "__main__": + run_install() diff --git a/custom_nodes/comfyui-dream-project/laboratory.py b/custom_nodes/comfyui-dream-project/laboratory.py new file mode 100644 index 0000000000000000000000000000000000000000..ebeae2c9120bd2627aaab8e8d0f591e7429f6dbb --- /dev/null +++ b/custom_nodes/comfyui-dream-project/laboratory.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- + +# -*- coding: utf-8 -*- + +import json + +from .categories import * +from .shared import ALWAYS_CHANGED_FLAG, DreamStateFile +from .dreamtypes import * + +_laboratory_state = DreamStateFile("laboratory") + + +class DreamLaboratory: + NODE_NAME = "Laboratory" + ICON = "🧪" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "key": ("STRING", {"default": "Random value " + str(random.randint(0, 1000000))}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "renew_policy": (["every frame", "first frame"],), + "min_value": ("FLOAT", {"default": 0.0}), + "max_value": ("FLOAT", {"default": 1.0}), + "mode": (["random uniform", "random bell", "ladder", "random walk"],), + }, + "optional": { + "step_size": ("FLOAT", {"default": 0.1}), + }, + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("FLOAT", "INT", LogEntry.ID) + RETURN_NAMES = ("FLOAT", "INT", "log_entry") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def _generate(self, seed, last_value, min_value, max_value, mode, step_size): + rnd = random.Random() + rnd.seed(seed) + + def jsonify(v: float): + return json.loads(json.dumps(v)) + + if mode == "random uniform": + return jsonify(self._mode_uniform(rnd, last_value, min_value, max_value, step_size)) + elif mode == "random bell": + return jsonify(self._mode_bell(rnd, last_value, min_value, max_value, step_size)) + elif mode == "ladder": + return jsonify(self._mode_ladder(rnd, last_value, min_value, max_value, step_size)) + else: + return jsonify(self._mode_walk(rnd, last_value, min_value, max_value, step_size)) + + def _mode_uniform(self, rnd: random.Random, last_value: float, min_value: float, max_value: float, step_size): + return rnd.random() * (max_value - min_value) + min_value + + def _mode_bell(self, rnd: random.Random, last_value: float, min_value: float, max_value: float, step_size): + s = 0.0 + for i in range(3): + s += rnd.random() * (max_value - min_value) + min_value + return s / 3.0 + + def _mode_ladder(self, rnd: random.Random, last_value: float, min_value: float, max_value: float, step_size): + if last_value is None: + last_value = min_value - step_size + next_value = last_value + step_size + if next_value > max_value: + d = abs(max_value - min_value) + next_value = (next_value - min_value) % d + min_value + return next_value + + def _mode_walk(self, rnd: random.Random, last_value: float, min_value: float, max_value: float, step_size): + if last_value is None: + last_value = (max_value - min_value) * 0.5 + if rnd.random() >= 0.5: + return min(max_value, last_value + step_size) + else: + return max(min_value, last_value - step_size) + + def result(self, key, frame_counter: FrameCounter, seed, renew_policy, min_value, max_value, mode, **values): + if min_value > max_value: + t = max_value + max_value = min_value + min_value = t + step_size = values.get("step_size", abs(max_value - min_value) * 0.1) + last_value = _laboratory_state.get_section("values").get(key, None) + + if (last_value is None) or (renew_policy == "every frame") or frame_counter.is_first_frame: + v = _laboratory_state.get_section("values") \ + .update(key, 0, lambda old: self._generate(seed, last_value, min_value, max_value, mode, step_size)) + return v, round(v), LogEntry.new( + "Laboratory generated new value for '{}': {} ({})".format(key, v, round(v))) + else: + return last_value, round(last_value), LogEntry.new("Laboratory reused value for '{}': {} ({})" + .format(key, last_value, round(last_value))) diff --git a/custom_nodes/comfyui-dream-project/license.txt b/custom_nodes/comfyui-dream-project/license.txt new file mode 100644 index 0000000000000000000000000000000000000000..604c43003ef2d5c157a53a4c86d46935220c32e4 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/license.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Morgan Johansson/Dream Project + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/custom_nodes/comfyui-dream-project/loaders.py b/custom_nodes/comfyui-dream-project/loaders.py new file mode 100644 index 0000000000000000000000000000000000000000..d25dfe111275cde15938137c77c288a7c5cc0900 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/loaders.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +from .categories import NodeCategories +from .shared import ALWAYS_CHANGED_FLAG, list_images_in_directory, DreamImage +from .dreamtypes import SharedTypes, FrameCounter +import os + + +class DreamImageSequenceInputWithDefaultFallback: + NODE_NAME = "Image Sequence Loader" + ICON = "💾" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "directory_path": ("STRING", {"default": '', "multiline": False}), + "pattern": ("STRING", {"default": '*', "multiline": False}), + "indexing": (["numeric", "alphabetic order"],) + }, + "optional": { + "default_image": ("IMAGE", {"default": None}) + } + } + + CATEGORY = NodeCategories.IMAGE_ANIMATION + RETURN_TYPES = ("IMAGE","STRING") + RETURN_NAMES = ("image","frame_name") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, frame_counter: FrameCounter, directory_path, pattern, indexing, **other): + default_image = other.get("default_image", None) + entries = list_images_in_directory(directory_path, pattern, indexing == "alphabetic order") + entry = entries.get(frame_counter.current_frame, None) + if not entry: + return (default_image, "") + else: + image_names = [os.path.basename(file_path) for file_path in entry] + images = map(lambda f: DreamImage(file_path=f), entry) + return (DreamImage.join_to_tensor_data(images), image_names[0]) diff --git a/custom_nodes/comfyui-dream-project/node_list.json b/custom_nodes/comfyui-dream-project/node_list.json new file mode 100644 index 0000000000000000000000000000000000000000..1fa445b1471e8483181bf2852afa55c3b404e4bd --- /dev/null +++ b/custom_nodes/comfyui-dream-project/node_list.json @@ -0,0 +1,59 @@ +{ + "Analyze Palette [Dream]": "Output brightness, contrast, red, green and blue averages of a palette", + "Beat Curve [Dream]": "Beat pattern curve with impulses at specified beats of a measure", + "Big Float Switch [Dream]": "Switch for up to 10 inputs", + "Big Image Switch [Dream]": "Switch for up to 10 inputs", + "Big Int Switch [Dream]": "Switch for up to 10 inputs", + "Big Latent Switch [Dream]": "Switch for up to 10 inputs", + "Big Palette Switch [Dream]": "Switch for up to 10 inputs", + "Big Text Switch [Dream]": "Switch for up to 10 inputs", + "Boolean To Float [Dream]": "Converts a boolean value to two different float values", + "Boolean To Int [Dream]": "Converts a boolean value to two different int values", + "Build Prompt [Dream]": "Weighted text prompt builder utility", + "CSV Curve [Dream]": "CSV input curve where first column is frame or second and second column is value", + "CSV Generator [Dream]": "CSV output, mainly for debugging purposes", + "Calculation [Dream]": "Mathematical calculation node", + "Common Frame Dimensions [Dream]": "Utility for calculating good width/height based on common video dimensions", + "Compare Palettes [Dream]": "Analyses two palettes producing the factor for each color channel", + "FFMPEG Video Encoder [Dream]": "Post processing for animation sequences calling FFMPEG to generate video file", + "File Count [Dream]": "Finds the number of files in a directory matching specified patterns", + "Finalize Prompt [Dream]": "Used in conjunction with 'Build Prompt'", + "Float Input [Dream]": "Float input (until primitive routing issues are solved)", + "Float to Log Entry [Dream]": "Logging for float values", + "Frame Count Calculator [Dream]": "Simple utility to calculate number of frames based on duration and framerate", + "Frame Counter (Directory) [Dream]": "Directory backed frame counter, for output directories", + "Frame Counter (Simple) [Dream]": "Integer value used as frame counter", + "Frame Counter Info [Dream]": "Extracts information from the frame counter", + "Frame Counter Offset [Dream]": "Adds an offset to a frame counter", + "Frame Counter Time Offset [Dream]": "Adds an offset to a frame counter in seconds", + "Image Brightness Adjustment [Dream]": "Adjusts the brightness of an image by a factor", + "Image Color Shift [Dream]": "Adjust the colors (or brightness) of an image", + "Image Contrast Adjustment [Dream]": "Adjusts the contrast of an image by a factor", + "Image Motion [Dream]": "Node supporting zooming in/out and translating an image", + "Image Sequence Blend [Dream]": "Post processing for animation sequences blending frame for a smoother blurred effect", + "Image Sequence Loader [Dream]": "Loads a frame from a directory of images", + "Image Sequence Saver [Dream]": "Saves a frame to a directory", + "Image Sequence Tweening [Dream]": "Post processing for animation sequences generating blended in-between frames", + "Int Input [Dream]": "Integer input (until primitive routing issues are solved)", + "Int to Log Entry [Dream]": "Logging for int values", + "Laboratory [Dream]": "Super-charged number generator for experimenting with ComfyUI", + "Linear Curve [Dream]": "Linear interpolation between two value over the full animation", + "Log Entry Joiner [Dream]": "Merges multiple log entries (reduces noodling)", + "Log File [Dream]": "Logging node for output to file", + "Noise from Area Palettes [Dream]": "Generates noise based on the colors of up to nine different palettes", + "Noise from Palette [Dream]": "Generates noise based on the colors in a palette", + "Palette Color Align [Dream]": "Shifts the colors of one palette towards another target palette", + "Palette Color Shift [Dream]": "Multiplies the color values in a palette", + "Sample Image Area as Palette [Dream]": "Samples a palette from an image based on pre-defined areas", + "Sample Image as Palette [Dream]": "Randomly samples pixel values to build a palette from an image", + "Saw Curve [Dream]": "Saw wave curve", + "Sine Curve [Dream]": "Simple sine wave curve", + "Smooth Event Curve [Dream]": "Single event/peak curve with a slight bell-shape", + "String Input [Dream]": "String input (until primitive routing issues are solved)", + "String Tokenizer [Dream]": "Extract individual words or phrases from a text as tokens", + "String to Log Entry [Dream]": "Use any string as a log entry", + "Text Input [Dream]": "Multiline string input (until primitive routing issues are solved)", + "Triangle Curve [Dream]": "Triangle wave curve", + "Triangle Event Curve [Dream]": "Single event/peak curve with triangular shape", + "WAV Curve [Dream]": "WAV audio file as a curve" +} \ No newline at end of file diff --git a/custom_nodes/comfyui-dream-project/noise.py b/custom_nodes/comfyui-dream-project/noise.py new file mode 100644 index 0000000000000000000000000000000000000000..6e01ee38576407299cdcd04597f9a46a064d9f89 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/noise.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +import math + +from .categories import NodeCategories +from .shared import * +from .dreamtypes import * + + +def _generate_noise(image: DreamImage, color_function, rng: random.Random, block_size, blur_amount, + density) -> DreamImage: + w = block_size[0] + h = block_size[1] + blur_radius = round(max(image.width, image.height) * blur_amount * 0.25) + if w <= (image.width // 128) or h <= (image.height // 128): + return image + max_placements = round(density * (image.width * image.height)) + num = min(max_placements, round((image.width * image.height * 2) / (w * h))) + for i in range(num): + x = rng.randint(-w + 1, image.width - 1) + y = rng.randint(-h + 1, image.height - 1) + image.color_area(x, y, w, h, color_function(x + (w >> 1), y + (h >> 1))) + image = image.blur(blur_radius) + return _generate_noise(image, color_function, rng, (w >> 1, h >> 1), blur_amount, density) + + +class DreamNoiseFromPalette: + NODE_NAME = "Noise from Palette" + ICON = "🌫" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.palette | { + "width": ("INT", {"default": 512, "min": 1, "max": 8192}), + "height": ("INT", {"default": 512, "min": 1, "max": 8192}), + "blur_amount": ("FLOAT", {"default": 0.3, "min": 0, "max": 1.0, "step": 0.05}), + "density": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.025}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}) + }, + } + + CATEGORY = NodeCategories.IMAGE_GENERATE + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def result(self, palette: Tuple[RGBPalette], width, height, seed, blur_amount, density): + outputs = list() + rng = random.Random() + for p in palette: + seed += 1 + color_iterator = p.random_iteration(seed) + image = DreamImage(pil_image=Image.new("RGB", (width, height), color=next(color_iterator))) + image = _generate_noise(image, lambda x, y: next(color_iterator), rng, + (image.width >> 1, image.height >> 1), blur_amount, density) + outputs.append(image) + + return (DreamImage.join_to_tensor_data(outputs),) + + +class DreamNoiseFromAreaPalettes: + NODE_NAME = "Noise from Area Palettes" + + @classmethod + def INPUT_TYPES(cls): + return { + "optional": { + "top_left_palette": (RGBPalette.ID,), + "top_center_palette": (RGBPalette.ID,), + "top_right_palette": (RGBPalette.ID,), + "center_left_palette": (RGBPalette.ID,), + "center_palette": (RGBPalette.ID,), + "center_right_palette": (RGBPalette.ID,), + "bottom_left_palette": (RGBPalette.ID,), + "bottom_center_palette": (RGBPalette.ID,), + "bottom_right_palette": (RGBPalette.ID,), + }, + "required": { + "area_sharpness": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.05}), + "width": ("INT", {"default": 512, "min": 1, "max": 8192}), + "height": ("INT", {"default": 512, "min": 1, "max": 8192}), + "blur_amount": ("FLOAT", {"default": 0.3, "min": 0, "max": 1.0, "step": 0.05}), + "density": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.025}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + CATEGORY = NodeCategories.IMAGE_GENERATE + ICON = "🌫" + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def _area_coordinates(self, width, height): + dx = width / 6 + dy = height / 6 + return { + "top_left_palette": (dx, dy), + "top_center_palette": (dx * 3, dy), + "top_right_palette": (dx * 5, dy), + "center_left_palette": (dx, dy * 3), + "center_palette": (dx * 3, dy * 3), + "center_right_palette": (dx * 5, dy * 3), + "bottom_left_palette": (dx * 1, dy * 5), + "bottom_center_palette": (dx * 3, dy * 5), + "bottom_right_palette": (dx * 5, dy * 5), + } + + def _pick_random_area(self, active_coordinates, x, y, rng, area_sharpness): + def _dst(x1, y1, x2, y2): + a = x1 - x2 + b = y1 - y2 + return math.sqrt(a * a + b * b) + + distances = list(map(lambda item: (item[0], _dst(item[1][0], item[1][1], x, y)), active_coordinates)) + areas_by_weight = list( + map(lambda item: (math.pow((1.0 / max(1, item[1])), 0.5 + 4.5 * area_sharpness), item[0]), distances)) + return pick_random_by_weight(areas_by_weight, rng) + + def _setup_initial_colors(self, image: DreamImage, color_func): + w = image.width + h = image.height + wpart = round(w / 3) + hpart = round(h / 3) + for i in range(3): + for j in range(3): + image.color_area(wpart * i, hpart * j, w, h, + color_func(wpart * i + w // 2, hpart * j + h // 2)) + + def result(self, width, height, seed, blur_amount, density, area_sharpness, **palettes): + outputs = list() + rng = random.Random() + coordinates = self._area_coordinates(width, height) + active_palettes = list(filter(lambda pair: pair[1] is not None and len(pair[1]) > 0, palettes.items())) + active_coordinates = list(map(lambda item: (item[0], coordinates[item[0]]), active_palettes)) + + n = max(list(map(len, palettes.values())) + [0]) + for b in range(n): + batch_palettes = dict(map(lambda item: (item[0], item[1][b].random_iteration(seed)), active_palettes)) + + def _color_func(x, y): + name = self._pick_random_area(active_coordinates, x, y, rng, area_sharpness) + rgb = batch_palettes[name] + return next(rgb) + + image = DreamImage(pil_image=Image.new("RGB", (width, height))) + self._setup_initial_colors(image, _color_func) + image = _generate_noise(image, _color_func, rng, (round(image.width / 3), round(image.height / 3)), + blur_amount, density) + outputs.append(image) + + if not outputs: + outputs.append(DreamImage(pil_image=Image.new("RGB", (width, height)))) + + return (DreamImage.join_to_tensor_data(outputs),) diff --git a/custom_nodes/comfyui-dream-project/output.py b/custom_nodes/comfyui-dream-project/output.py new file mode 100644 index 0000000000000000000000000000000000000000..af092c0e3612683c4e9398d73755857b578b1ef8 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/output.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +import json +import os + +import folder_paths as comfy_paths +from PIL.PngImagePlugin import PngInfo + +from .categories import NodeCategories +from .shared import hashed_as_strings, DreamImageProcessor, DreamImage, \ + list_images_in_directory, DreamConfig +from .dreamtypes import SharedTypes, FrameCounter, AnimationSequence, LogEntry + +CONFIG = DreamConfig() + + +def _save_png(pil_image, filepath, embed_info, prompt, extra_pnginfo): + info = PngInfo() + if extra_pnginfo is not None: + for item in extra_pnginfo: + info.add_text(item, json.dumps(extra_pnginfo[item])) + if prompt is not None: + info.add_text("prompt", json.dumps(prompt)) + if embed_info: + pil_image.save(filepath, pnginfo=info, optimize=True) + else: + pil_image.save(filepath, optimize=True) + + +def _save_jpg(pil_image, filepath, quality): + pil_image.save(filepath, quality=quality, optimize=True) + + +class DreamImageSequenceOutput: + NODE_NAME = "Image Sequence Saver" + ICON = "💾" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "image": ("IMAGE",), + "directory_path": ("STRING", {"default": comfy_paths.output_directory, "multiline": False}), + "prefix": ("STRING", {"default": 'frame', "multiline": False}), + "digits": ("INT", {"default": 5}), + "at_end": (["stop output", "raise error", "keep going"],), + "filetype": (['png with embedded workflow', "png", 'jpg'],), + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO" + }, + } + + CATEGORY = NodeCategories.IMAGE_ANIMATION + RETURN_TYPES = (AnimationSequence.ID, LogEntry.ID) + OUTPUT_NODE = True + RETURN_NAMES = ("sequence", "log_entry") + FUNCTION = "save" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def _get_new_filename(self, current_frame, prefix, digits, filetype): + return prefix + "_" + str(current_frame).zfill(digits) + "." + filetype.split(" ")[0] + + def _save_single_image(self, dream_image: DreamImage, batch_counter, frame_counter: FrameCounter, + directory_path, + prefix, digits, filetype, prompt, extra_pnginfo, at_end, logger): + + if at_end == "stop output" and frame_counter.is_after_last_frame: + logger("Reached end of animation - not saving output!") + return () + if at_end == "raise error" and frame_counter.is_after_last_frame: + logger("Reached end of animation - raising error to stop processing!") + raise Exception("Reached end of animation!") + filename = self._get_new_filename(frame_counter.current_frame, prefix, digits, filetype) + if batch_counter >= 0: + filepath = os.path.join(directory_path, "batch_" + (str(batch_counter).zfill(4)), filename) + else: + filepath = os.path.join(directory_path, filename) + save_dir = os.path.dirname(filepath) + if not os.path.isdir(save_dir): + os.makedirs(save_dir) + if filetype.startswith("png"): + dream_image.save_png(filepath, filetype == 'png with embedded workflow', prompt, extra_pnginfo) + elif filetype == "jpg": + dream_image.save_jpg(filepath, int(CONFIG.get("encoding.jpeg_quality", 95))) + logger("Saved {} in {}".format(filename, os.path.abspath(save_dir))) + return () + + def _generate_animation_sequence(self, filetype, directory_path, frame_counter): + if filetype.startswith("png"): + pattern = "*.png" + else: + pattern = "*.jpg" + frames = list_images_in_directory(directory_path, pattern, False) + return AnimationSequence(frame_counter, frames) + + def save(self, image, **args): + log_texts = list() + logger = lambda s: log_texts.append(s) + if not args.get("directory_path", ""): + args["directory_path"] = comfy_paths.output_directory + args["logger"] = logger + proc = DreamImageProcessor(image, **args) + proc.process(self._save_single_image) + frame_counter: FrameCounter = args["frame_counter"] + log_entry = LogEntry([]) + for text in log_texts: + log_entry = log_entry.add(text) + if frame_counter.is_final_frame: + return (self._generate_animation_sequence(args["filetype"], args["directory_path"], + frame_counter), log_entry) + else: + return (AnimationSequence(frame_counter), log_entry) diff --git a/custom_nodes/comfyui-dream-project/prompting.py b/custom_nodes/comfyui-dream-project/prompting.py new file mode 100644 index 0000000000000000000000000000000000000000..b725eaf37765584ddd666deb63c1d104114ecafe --- /dev/null +++ b/custom_nodes/comfyui-dream-project/prompting.py @@ -0,0 +1,69 @@ +from .categories import NodeCategories +from .shared import hashed_as_strings +from .dreamtypes import PartialPrompt + + +class DreamWeightedPromptBuilder: + NODE_NAME = "Build Prompt" + ICON = "⚖" + + @classmethod + def INPUT_TYPES(cls): + return { + "optional": { + "partial_prompt": (PartialPrompt.ID,) + }, + "required": { + "added_prompt": ("STRING", {"default": "", "multiline": True}), + "weight": ("FLOAT", {"default": 1.0}), + }, + } + + CATEGORY = NodeCategories.CONDITIONING + RETURN_TYPES = (PartialPrompt.ID,) + RETURN_NAMES = ("partial_prompt",) + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, added_prompt, weight, **args): + input = args.get("partial_prompt", PartialPrompt()) + p = input.add(added_prompt, weight) + return (p,) + + +class DreamPromptFinalizer: + NODE_NAME = "Finalize Prompt" + ICON = "🗫" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "partial_prompt": (PartialPrompt.ID,), + "adjustment": (["raw", "by_abs_max", "by_abs_sum"],), + "clamp": ("FLOAT", {"default": 2.0, "min": 0.1, "step": 0.1}), + "adjustment_reference": ("FLOAT", {"default": 1.0, "min": 0.1}), + }, + } + + CATEGORY = NodeCategories.CONDITIONING + RETURN_TYPES = ("STRING", "STRING") + RETURN_NAMES = ("positive", "negative") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, partial_prompt: PartialPrompt, adjustment, adjustment_reference, clamp): + if adjustment == "raw" or partial_prompt.is_empty(): + return partial_prompt.finalize(clamp) + elif adjustment == "by_abs_sum": + f = adjustment_reference / partial_prompt.abs_sum() + return partial_prompt.scaled_by(f).finalize(clamp) + else: + f = adjustment_reference / partial_prompt.abs_max() + return partial_prompt.scaled_by(f).finalize(clamp) diff --git a/custom_nodes/comfyui-dream-project/readme.md b/custom_nodes/comfyui-dream-project/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..67efe98d13a5f1ded9e99906d5b6d4b1b9ec2013 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/readme.md @@ -0,0 +1,336 @@ +# Dream Project Animation Nodes for ComfyUI + +This repository contains various nodes for supporting Deforum-style animation generation with ComfyUI. I created these +for my own use (producing videos for my "Alt Key Project" music - +[youtube channel](https://www.youtube.com/channel/UC4cKvJ4hia7zULxeCc-7OcQ)), but I think they should be generic enough +and useful to many ComfyUI users. + +I have demonstrated the use of these custom nodes in this [youtube video](https://youtu.be/pZ6Li3qF-Kk). + +# Notice! + +This custom node pack is currently not being updated. Stable Diffusion video generation is moving towards a different +workflow with AnimateDiff and Stable Video Diffusion. I decided to not try to update this node pack, but I am instead +creating a separate custom node pack here: + +[github](https://github.com/alt-key-project/comfyui-dream-video-batches) + +This new node pack will be getting my attention from now on (at least as long as stable diffusion video generation is done mostly +in batches). + +## Installation + +### Simple option + +You can install Dream Project Animation Nodes using the ComfyUI Manager. + +### Manual option + +Run within (ComfyUI)/custom_nodes/ folder: + +* git clone https://github.com/alt-key-project/comfyui-dream-project.git +* cd comfyui-dream-project + +Then, if you are using the python embedded in ComfyUI: +* (ComfyUI)/python_embedded/python.exe -s -m pip install -r requirements.txt + +With your system-wide python: +* pip install -r requirements.txt + +Finally: +* Start ComfyUI. + +After startup, a configuration file 'config.json' should have been created in the 'comfyui-dream-project' directory. +Specifically check that the path of ffmpeg works in your system (add full path to the command if needed). + +## Upgrade + +When upgrading, it is good to re-run the pip install command as specified in the install section. This will install any +new dependencies. + +## Configuration + +### debug + +Setting this to true will enable some trace-level logging. + +### ffmpeg.file_extension + +Sets the output file extension and with that the envelope used. + +### ffmpeg.path + +Path to the ffmpeg executable or just the command if ffmpeg is in PATH. + +### ffmpeg.arguments + +The arguments sent to FFMPEG. A few of the values are provided by the node: + +* %FPS% the target framerate +* %FRAMES% a frame ionput file +* %OUTPUT% output video file path + +### encoding.jpeg__quality + +Sets the encoding quality of jpeg images. + +### ui.top_category + +Sets the name of the top level category on the menu. Set to empty string "" to remove the top level. If the top level +is removed you may also want to disable the category icons to get nodes into existing category folders. + +### prepend_icon_to_category / append_icon_to_category + +Flags to add a icon before and/or after the category name at each level. + +### prepend_icon_icon_to_node / append_icon_icon_to_node + +Flags to add an icon before and/or after the node name. + +### ui.category_icons + +Each key defines a unicode symbol as an icon used for the specified category. + +### mpeg_coder.bitrate_factor + +This factor allows changing the bitrate to better fit the required quality and codec. A value of 1 is typically +suitable for H.265. + +### mpeg_coder.codec_name + +Codec names as specified by ffmpeg. Some common options include "libx264", "libx264" and "mpeg2video". + +### mpeg_coder.encoding_threads + +Increasing the number of encoding threads in mpegCoder will generally reduce the overall encoding time, but it will also +increase the load on the computer. + +### mpeg_coder.file_extension + +Sets the output file extension and with that the envelope used. + +### mpeg_coder.max_b_frame + +Sets the max-b-frames parameter for as specified in ffmpeg. + +## Concepts used + +These are some concepts used in nodes: + +### Frame Counter + +The frame counter is an abstraction that keeps track of where we are in the animation - what frame is rendered +and how does the current frame fit into the current animation. + +### Curves + +A curve is simply a node that produces a value based on the frame counter (changing over time). + +### Palette + +A palette is a collection of color values. + +### Sequence + +A sequence is a full set of animation frames and a corresponding timeline for these frames. The sequence is +created by the 'Image Sequence Saver' node and it may be used to trigger post processing tasks such as generating the +video file using ffmpeg. These nodes should be seen as a convenience and they are severely limited. Never put sequence +nodes in parallel - they will not work as intended! + +## The nodes +### Analyze Palette [Dream] +Output brightness, red, green and blue averages of a palette. Useful to control other processing. + +### Beat Curve [Dream] +Beat pattern curve with impulses at specified beats of a measure. + +### Big *** Switch [Dream] +Switch nodes for different type for up to ten inputs. + +### Boolean To Float/Int [Dream] +Converts a boolean value to two different numeric values. + +### Build Prompt [Dream] (and Finalize Prompt [Dream]) +Weighted text prompt builder utility. Chain any number of these nodes and terminate with 'Finalize Prompt'. + +### Calculation [Dream] +Mathematical calculation node. Exposes most of the mathematical functions in the python +[math module](https://docs.python.org/3/library/math.html), mathematical operators as well as round, abs, int, +float, max and min. + +### Compare Palettes [Dream] +Analyses two palettes and produces the quotient for each individual channel (b/a) and brightness. + +### CSV Curve [Dream] +CSV input curve where first column is frame or second and second column is value. + +### CSV Generator [Dream] +CSV output, mainly for debugging purposes. First column is frame number and second is value. +Recreates file at frame 0 (removing and existing content in the file). + +### Common Frame Dimensions [Dream] +Utility for calculating good width/height based on common video dimensions. + +### Video Encoder (FFMPEG) [Dream] +Post processing for animation sequences calling FFMPEG to generate video files. + +### File Count [Dream] +Finds the number of files in a directory matching specified patterns. + +### Float/Int/string to Log Entry [Dream] +Logging for float/int/string values. + +### Frame Count Calculator [Dream] +Simple utility to calculate number of frames based on time and framerate. + +### Frame Counter (Directory) [Dream] +Directory backed frame counter, for output directories. + +### Frame Counter (Simple) [Dream] +Integer value used as frame counter. Useful for testing or if an auto-incrementing primitive is used as a frame +counter. + +### Frame Counter Info [Dream] +Extracts information from the frame counter. + +### Frame Counter Offset [Dream] +Adds an offset (in frames) to a frame counter. + +### Frame Counter Time Offset [Dream] +Adds an offset in seconds to a frame counter. + +### Image Brightness Adjustment [Dream] +Adjusts the brightness of an image by a factor. + +### Image Color Shift [Dream] +Allows changing the colors of an image with a multiplier for each channel (RGB). + +### Image Contrast Adjustment [Dream] +Adjusts the contrast of an image by a factor. + +### Image Motion [Dream] +Node supporting zooming in/out and translating an image. + +### Image Sequence Blend [Dream] +Post processing for animation sequences blending frame for a smoother blurred effect. + +### Image Sequence Loader [Dream] +Loads a frame from a directory of images. + +### Image Sequence Saver [Dream] +Saves a frame to a directory. + +### Image Sequence Tweening [Dream] +Post processing for animation sequences generating blended in-between frames. + +### Laboratory [Dream] +Super-charged number generator for experimenting with ComfyUI. + +### Log Entry Joiner [Dream] +Merges multiple log entries (reduces noodling). + +### Log File [Dream] +The text logging facility for the Dream Project Animation nodes. + +### Linear Curve [Dream] +Linear interpolation between two values over the full animation. + +### Noise from Area Palettes [Dream] +Generates noise based on the colors of up to nine different palettes, each connected to position/area of the +image. Although the palettes are optional, at least one palette should be provided. + +### Noise from Palette [Dream] +Generates noise based on the colors in a palette. + +### Palette Color Align [Dream] +Shifts the colors of one palette towards another target palette. If the alignment factor +is 0.5 the result is nearly an average of the two palettes. At 0 no alignment is done and at 1 we get a close +alignment to the target. Above one we will overshoot the alignment. + +### Palette Color Shift [Dream] +Multiplies the color values in a palette to shift the color balance or brightness. + +### Sample Image Area as Palette [Dream] +Randomly samples a palette from an image based on pre-defined areas. The image is separated into nine rectangular areas +of equal size and each node may sample one of these. + +### Sample Image as Palette [Dream] +Randomly samples pixels from a source image to build a palette from it. + +### Saw Curve [Dream] +Saw wave curve. + +### Sine Curve [Dream] +Simple sine wave curve. + +### Smooth Event Curve [Dream] +Single event/peak curve with a slight bell-shape. + +### String Tokenizer [Dream] +Splits a text into tokens by a separator and returns one of the tokens based on a given index. + +### Triangle Curve [Dream] +Triangle wave curve. + +### Triangle Event Curve [Dream] +Single event/peak curve with triangular shape. + +### WAV Curve [Dream] +Use an uncompressed WAV audio file as a curve. + +### Other custom nodes + +Many of the nodes found in 'WAS Node Suite' are useful the Dream Project Animation nodes - I suggest you install those +custom nodes as well! + +## Examples + +### Image Motion with Curves + +This example should be a starting point for anyone wanting to build with the Dream Project Animation nodes. + +[motion-workflow-example](examples/motion-workflow-example.json) + +### Image Motion with Color Coherence + +Same as above but with added color coherence through palettes. + +[motion-workflow-with-color-coherence](examples/motion-workflow-with-color-coherence.json) + +### Area Sampled Noise + +This flow demonstrates sampling image areas into palettes and generating noise for these areas. + +[area-sampled-noise](examples/area-sampled-noise.json) + +### Prompt Morphing + +This flow demonstrates prompt building with weights based on curves and brightness and contrast control. + +[prompt-morphing](examples/prompt-morphing.json) + +### Laboratory + +This flow demonstrates use of the Laboratory and Logging nodes. + +[laboratory](examples/laboratory.json) + +## Known issues + +### FFMPEG + +The call to FFMPEG currently in the default configuration (in config.json) does not seem to work for everyone. The good +news is that you can change the arguments to whatever works for you - the node-supplied parameters (that probably all need to be in the call) +are: + +* -i %FRAMES% (the input file listing frames) +* -r %FPS% (sets the frame rate) +* %OUTPUT% (the path to the video file) + +If possible, I will change the default configuration to one that more versions/builds of ffmpeg will accept. Do let me +know what arguments are causing issues for you! + +### Framerate is not always right with mpegCoder encoding node + +The mpegCoder library will always use variable frame rate encoding if it is available in the output format. With most +outputs this means that your actual framerate will differ slightly from the requested one. diff --git a/custom_nodes/comfyui-dream-project/requirements.txt b/custom_nodes/comfyui-dream-project/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cad039a7a69429da7516c9ccb053df9ae72a5ba7 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/requirements.txt @@ -0,0 +1,6 @@ +imageio +pilgram +scipy +numpy<1.24>=1.18 +torchvision +evalidate diff --git a/custom_nodes/comfyui-dream-project/seq_processing.py b/custom_nodes/comfyui-dream-project/seq_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba1ad332d09e4f1cad1a6e5fa652e28714dc1e3 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/seq_processing.py @@ -0,0 +1,343 @@ +# -*- coding: utf-8 -*- +import os +import shutil +import subprocess +import tempfile +from functools import lru_cache + +from PIL import Image as PilImage + +from .categories import NodeCategories +from .err import on_error +from .shared import DreamConfig +#from .shared import MpegEncoderUtility +from .dreamtypes import * + +CONFIG = DreamConfig() + + +@lru_cache(5) +def _load_image_cached(filename): + return PilImage.open(filename) + + +class TempFileSet: + def __init__(self): + self._files = dict() + + def add(self, temppath, finalpath): + self._files[temppath] = finalpath + + def remove(self): + for f in self._files.keys(): + os.unlink(f) + + def finalize(self): + for a, b in self._files.items(): + shutil.move(a, b) + self._files = dict() + + +class AnimationSeqProcessor: + def __init__(self, sequence: AnimationSequence): + self._sequence = sequence + self._input_cache = {} + self._inputs = {} + self._output_dirs = {} + for b in self._sequence.batches: + self._inputs[b] = list(self._sequence.get_image_files_of_batch(b)) + self._output_dirs[b] = os.path.dirname(os.path.abspath(self._inputs[b][0])) + self._ext = os.path.splitext(self._inputs[0][0])[1].lower() + self._length = len(self._inputs[0]) + + def _load_input(self, batch_id, index) -> DreamImage: + files = self._inputs[batch_id] + index = min(max(0, index), len(files) - 1) + filename = files[index] + return DreamImage(pil_image=_load_image_cached(filename)) + + def _process_single_batch(self, batch_id, indices, index_offsets: List[int], fun, output_dir) -> List[str]: + all_indices = list(indices) + last_index = max(all_indices) + workset = TempFileSet() + rnd = random.randint(0, 1000000) + result_files = list() + try: + for index in all_indices: + images = list(map(lambda offset: self._load_input(batch_id, index + offset), index_offsets)) + + result: Dict[int, DreamImage] = fun(index, last_index, images) + for (result_index, img) in result.items(): + filepath = os.path.join(output_dir, + "tmp_" + str(rnd) + "_" + (str(result_index).zfill(8)) + self._ext) + filepath_final = os.path.join(output_dir, "seq_" + (str(result_index).zfill(8)) + self._ext) + if self._ext == ".png": + img.save_png(filepath) + else: + img.save_jpg(filepath, quality=CONFIG.get("encoding.jpeg_quality", 98)) + workset.add(filepath, filepath_final) + result_files.append(filepath_final) + # all done with batch - remove input files + for oldfile in self._inputs[batch_id]: + os.unlink(oldfile) + workset.finalize() + return result_files + finally: + workset.remove() + + def process(self, index_offsets: List[int], fun): + results = dict() + new_length = 0 + for batch_id in self._sequence.batches: + resulting_filenames = self._process_single_batch(batch_id, range(len(self._inputs[batch_id])), + index_offsets, fun, + self._output_dirs[batch_id]) + for (index, filename) in enumerate(resulting_filenames): + l = results.get(index, []) + l.append(filename) + results[index] = l + new_length = len(resulting_filenames) + new_fps = self._sequence.frame_counter.frames_per_second * (float(new_length) / self._length) + counter = FrameCounter(new_length - 1, new_length, new_fps) + return AnimationSequence(counter, results) + + +def _ffmpeg(config, filenames, fps, output): + fps = float(fps) + duration = 1.0 / fps + tmp = tempfile.NamedTemporaryFile(delete=False, mode="wb") + tempfilepath = tmp.name + try: + for filename in filenames: + filename = filename.replace("\\", "/") + tmp.write(f"file '{filename}'\n".encode()) + tmp.write(f"duration {duration}\n".encode()) + finally: + tmp.close() + + try: + cmd = [config.get("ffmpeg.path", "ffmpeg")] + cmd.extend(config.get("ffmpeg.arguments")) + replacements = {"%FPS%": str(fps), "%FRAMES%": tempfilepath, "%OUTPUT%": output} + + for (key, value) in replacements.items(): + cmd = list(map(lambda s: s.replace(key, value), cmd)) + + subprocess.check_output(cmd, shell=True) + finally: + os.unlink(tempfilepath) + + +def _make_video_filename(name, file_ext): + (b, _) = os.path.splitext(name) + return b + "." + file_ext.strip(".") + +# +# class DreamVideoEncoderMpegCoder: +# NODE_NAME = "Video Encoder (mpegCoder)" +# ICON = "🎬" +# CATEGORY = NodeCategories.ANIMATION_POSTPROCESSING +# RETURN_TYPES = (LogEntry.ID,) +# RETURN_NAMES = ("log_entry",) +# OUTPUT_NODE = True +# FUNCTION = "encode" +# +# @classmethod +# def INPUT_TYPES(cls): +# return { +# "required": SharedTypes.sequence | { +# "name": ("STRING", {"default": 'video', "multiline": False}), +# "framerate_factor": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0}), +# "remove_images": ("BOOLEAN", {"default": True}) +# }, +# } +# +# def _find_free_filename(self, filename, defaultdir): +# if os.path.basename(filename) == filename: +# filename = os.path.join(defaultdir, filename) +# n = 1 +# tested = filename +# while os.path.exists(tested): +# n += 1 +# (b, ext) = os.path.splitext(filename) +# tested = b + "_" + str(n) + ext +# return tested +# +# def encode(self, sequence, name, framerate_factor, remove_images): +# if not sequence.is_defined: +# return (LogEntry([]),) +# config = DreamConfig() +# filename = _make_video_filename(name, config.get("mpeg_coder.file_extension", "mp4")) +# log_entry = LogEntry([]) +# for batch_num in sequence.batches: +# try: +# images = list(sequence.get_image_files_of_batch(batch_num)) +# filename = self._find_free_filename(filename, os.path.dirname(images[0])) +# first_image = DreamImage.from_file(images[0]) +# enc = MpegEncoderUtility(video_path=filename, +# bit_rate_factor=float(config.get("mpeg_coder.bitrate_factor", 1.0)), +# encoding_threads=int(config.get("mpeg_coder.encoding_threads", 4)), +# max_b_frame=int(config.get("mpeg_coder.max_b_frame", 2)), +# width=first_image.width, +# height=first_image.height, +# files=images, +# fps=sequence.fps * framerate_factor, +# codec_name=config.get("mpeg_coder.codec_name", "libx265")) +# enc.encode() +# log_entry = log_entry.add("Generated video '{}'".format(filename)) +# if remove_images: +# for imagepath in images: +# if os.path.isfile(imagepath): +# os.unlink(imagepath) +# except Exception as e: +# on_error(self.__class__, str(e)) +# return (log_entry,) +# + +class DreamVideoEncoder: + NODE_NAME = "FFMPEG Video Encoder" + DISPLAY_NAME = "Video Encoder (FFMPEG)" + ICON = "🎬" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.sequence | { + "name": ("STRING", {"default": 'video', "multiline": False}), + "framerate_factor": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0}), + "remove_images": ("BOOLEAN", {"default": True}) + }, + } + + CATEGORY = NodeCategories.ANIMATION_POSTPROCESSING + RETURN_TYPES = (LogEntry.ID,) + RETURN_NAMES = ("log_entry",) + OUTPUT_NODE = True + FUNCTION = "encode" + + @classmethod + def IS_CHANGED(cls, sequence: AnimationSequence, **kwargs): + return sequence.is_defined + + def _find_free_filename(self, filename, defaultdir): + if os.path.basename(filename) == filename: + filename = os.path.join(defaultdir, filename) + n = 1 + tested = filename + while os.path.exists(tested): + n += 1 + (b, ext) = os.path.splitext(filename) + tested = b + "_" + str(n) + ext + return tested + + def generate_video(self, files, fps, filename, config): + filename = self._find_free_filename(filename, os.path.dirname(files[0])) + _ffmpeg(config, files, fps, filename) + return filename + + def encode(self, sequence: AnimationSequence, name: str, remove_images, framerate_factor): + if not sequence.is_defined: + return (LogEntry([]),) + + config = DreamConfig() + filename = _make_video_filename(name, config.get("ffmpeg.file_extension", "mp4")) + log_entry = LogEntry([]) + for batch_num in sequence.batches: + try: + images = list(sequence.get_image_files_of_batch(batch_num)) + actual_filename = self.generate_video(images, sequence.fps * framerate_factor, filename, config) + + log_entry = log_entry.add("Generated video '{}'".format(actual_filename)) + if remove_images: + for imagepath in images: + if os.path.isfile(imagepath): + os.unlink(imagepath) + except Exception as e: + on_error(self.__class__, str(e)) + return (log_entry,) + + +class DreamSequenceTweening: + NODE_NAME = "Image Sequence Tweening" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.sequence | { + "multiplier": ("INT", {"default": 2, "min": 2, "max": 10}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_POSTPROCESSING + RETURN_TYPES = (AnimationSequence.ID,) + RETURN_NAMES = ("sequence",) + OUTPUT_NODE = False + FUNCTION = "process" + + @classmethod + def IS_CHANGED(cls, sequence: AnimationSequence, **kwargs): + return sequence.is_defined + + def process(self, sequence: AnimationSequence, multiplier): + if not sequence.is_defined: + return (sequence,) + + def _generate_extra_frames(input_index, last_index, images): + results = {} + if input_index == last_index: + # special case + for i in range(multiplier): + results[input_index * multiplier + i] = images[0] + return results + + # normal case + current_frame = images[0] + next_frame = images[1] + for i in range(multiplier): + alpha = float(i + 1) / multiplier + results[multiplier * input_index + i] = current_frame.blend(next_frame, 1.0 - alpha, alpha) + return results + + proc = AnimationSeqProcessor(sequence) + return (proc.process([0, 1], _generate_extra_frames),) + + +class DreamSequenceBlend: + NODE_NAME = "Image Sequence Blend" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.sequence | { + "fade_in": ("FLOAT", {"default": 0.1, "min": 0.01, "max": 0.5}), + "fade_out": ("FLOAT", {"default": 0.1, "min": 0.01, "max": 0.5}), + "iterations": ("INT", {"default": 1, "min": 1, "max": 10}), + }, + } + + CATEGORY = NodeCategories.ANIMATION_POSTPROCESSING + RETURN_TYPES = (AnimationSequence.ID,) + RETURN_NAMES = ("sequence",) + OUTPUT_NODE = False + FUNCTION = "process" + + @classmethod + def IS_CHANGED(cls, sequence: AnimationSequence, **kwargs): + return sequence.is_defined + + def process(self, sequence: AnimationSequence, fade_in, fade_out, iterations): + if not sequence.is_defined: + return (sequence,) + + current_sequence = sequence + for i in range(iterations): + proc = AnimationSeqProcessor(current_sequence) + + def _blur(index: int, last_index: int, images: List[DreamImage]): + pre_frame = images[0].blend(images[1], fade_in, 1.0) + post_frame = images[2].blend(images[1], fade_out, 1.0) + return {index: pre_frame.blend(post_frame)} + + current_sequence = proc.process([-1, 0, 1], _blur) + + return (current_sequence,) diff --git a/custom_nodes/comfyui-dream-project/shared.py b/custom_nodes/comfyui-dream-project/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..24bde244e6208ff15bae931f9127dec674e2ee96 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/shared.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- + +import hashlib +import json +import os +import random +import time + +import folder_paths as comfy_paths +import glob +import numpy +import torch +from PIL import Image, ImageFilter, ImageEnhance +from PIL.ImageDraw import ImageDraw +from PIL.PngImagePlugin import PngInfo +from typing import Dict, Tuple, List + +from .dreamlogger import DreamLog +from .embedded_config import EMBEDDED_CONFIGURATION + +NODE_FILE = os.path.abspath(__file__) +DREAM_NODES_SOURCE_ROOT = os.path.dirname(NODE_FILE) +TEMP_PATH = os.path.join(os.path.abspath(comfy_paths.temp_directory), "Dream_Anim") +ALWAYS_CHANGED_FLAG = float("NaN") + + +def convertTensorImageToPIL(tensor_image) -> Image: + return Image.fromarray(numpy.clip(255. * tensor_image.cpu().numpy().squeeze(), 0, 255).astype(numpy.uint8)) + + +def convertFromPILToTensorImage(pil_image): + return torch.from_numpy(numpy.array(pil_image).astype(numpy.float32) / 255.0).unsqueeze(0) + + +def _replace_pil_image(data): + if isinstance(data, Image.Image): + return DreamImage(pil_image=data) + else: + return data + + +_config_data = None + + +class DreamConfig: + FILEPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.json") + DEFAULT_CONFIG = EMBEDDED_CONFIGURATION + + def __init__(self): + global _config_data + if not os.path.isfile(DreamConfig.FILEPATH): + self._data = DreamConfig.DEFAULT_CONFIG + self._save() + if _config_data is None: + with open(DreamConfig.FILEPATH, encoding="utf-8") as f: + self._data = json.load(f) + if self._merge_with_defaults(self._data, DreamConfig.DEFAULT_CONFIG): + self._save() + _config_data = self._data + else: + self._data = _config_data + + def _save(self): + with open(DreamConfig.FILEPATH, "w", encoding="utf-8") as f: + json.dump(self._data, f, indent=2) + + def _merge_with_defaults(self, config: dict, default_config: dict) -> bool: + changed = False + for key in default_config.keys(): + if key not in config: + changed = True + config[key] = default_config[key] + elif isinstance(default_config[key], dict): + changed = changed or self._merge_with_defaults(config[key], default_config[key]) + return changed + + def get(self, key: str, default=None): + key = key.split(".") + d = self._data + for part in key: + d = d.get(part, {}) + if isinstance(d, dict) and not d: + return default + else: + return d + + +def get_logger(): + config = DreamConfig() + return DreamLog(config.get("debug", False)) + + +class DreamImageProcessor: + def __init__(self, inputs: torch.Tensor, **extra_args): + self._images_in_batch = [convertTensorImageToPIL(tensor) for tensor in inputs] + self._extra_args = extra_args + self.is_batch = len(self._images_in_batch) > 1 + + def process_PIL(self, fun): + def _wrap(dream_image): + pil_outputs = fun(dream_image.pil_image) + return list(map(_replace_pil_image, pil_outputs)) + + return self.process(_wrap) + + def process(self, fun): + output = [] + batch_counter = 0 if self.is_batch else -1 + for pil_image in self._images_in_batch: + exec_result = fun(DreamImage(pil_image=pil_image), batch_counter, **self._extra_args) + exec_result = list(map(_replace_pil_image, exec_result)) + if not output: + output = [list() for i in range(len(exec_result))] + for i in range(len(exec_result)): + output[i].append(exec_result[i].create_tensor_image()) + if batch_counter >= 0: + batch_counter += 1 + return tuple(map(lambda l: torch.cat(l, dim=0), output)) + + +def pick_random_by_weight(data: List[Tuple[float, object]], rng: random.Random): + total_weight = sum(map(lambda item: item[0], data)) + r = rng.random() + for (weight, obj) in data: + r -= weight / total_weight + if r <= 0: + return obj + return data[0][1] + + +class DreamImage: + @classmethod + def join_to_tensor_data(cls, images): + l = list(map(lambda i: i.create_tensor_image(), images)) + return torch.cat(l, dim=0) + + def __init__(self, tensor_image=None, pil_image=None, file_path=None, with_alpha=False): + if pil_image is not None: + self.pil_image = pil_image + elif tensor_image is not None: + self.pil_image = convertTensorImageToPIL(tensor_image) + else: + self.pil_image = Image.open(file_path) + if with_alpha and self.pil_image.mode != "RGBA": + self.pil_image = self.pil_image.convert("RGBA") + else: + if self.pil_image.mode not in ("RGB", "RGBA"): + self.pil_image = self.pil_image.convert("RGB") + self.width = self.pil_image.width + self.height = self.pil_image.height + self.size = self.pil_image.size + self._draw = ImageDraw(self.pil_image) + + def change_brightness(self, factor): + enhancer = ImageEnhance.Brightness(self.pil_image) + return DreamImage(pil_image=enhancer.enhance(factor)) + + def change_contrast(self, factor): + enhancer = ImageEnhance.Contrast(self.pil_image) + return DreamImage(pil_image=enhancer.enhance(factor)) + + def numpy_array(self): + return numpy.array(self.pil_image) + + def _renew(self, pil_image): + self.pil_image = pil_image + self._draw = ImageDraw(self.pil_image) + + def __iter__(self): + class _Pixels: + def __init__(self, image: DreamImage): + self.x = 0 + self.y = 0 + self._img = image + + def __next__(self) -> Tuple[int, int, int, int]: + if self.x >= self._img.width: + self.y += 1 + self.x = 1 + if self.y >= self._img.height: + raise StopIteration + p = self._img.get_pixel(self.x, self.y) + self.x += 1 + return (p, self.x, self.y) + + return _Pixels(self) + + def convert(self, mode="RGB"): + if self.pil_image.mode == mode: + return self + return DreamImage(pil_image=self.pil_image.convert(mode)) + + def create_tensor_image(self): + return convertFromPILToTensorImage(self.pil_image) + + def blend(self, other, weight_self: float = 0.5, weight_other: float = 0.5): + alpha = 1.0 - weight_self / (weight_other + weight_self) + return DreamImage(pil_image=Image.blend(self.pil_image, other.pil_image, alpha)) + + def color_area(self, x, y, w, h, col): + self._draw.rectangle((x, y, x + w - 1, y + h - 1), fill=col, outline=col) + + def blur(self, amount): + return DreamImage(pil_image=self.pil_image.filter(ImageFilter.GaussianBlur(amount))) + + def adjust_colors(self, red_factor=1.0, green_factor=1.0, blue_factor=1.0): + # newRed = 1.1*oldRed + 0*oldGreen + 0*oldBlue + constant + # newGreen = 0*oldRed + 0.9*OldGreen + 0*OldBlue + constant + # newBlue = 0*oldRed + 0*OldGreen + 1*OldBlue + constant + matrix = (red_factor, 0, 0, 0, + 0, green_factor, 0, 0, + 0, 0, blue_factor, 0) + return DreamImage(pil_image=self.pil_image.convert("RGB", matrix)) + + def get_pixel(self, x, y): + p = self.pil_image.getpixel((x, y)) + if len(p) == 4: + return p + else: + return (p[0], p[1], p[2], 255) + + def set_pixel(self, x, y, pixelvalue): + if len(pixelvalue) == 4: + self.pil_image.putpixel((x, y), pixelvalue) + else: + self.pil_image.putpixel((x, y), (pixelvalue[0], pixelvalue[1], pixelvalue[2], 255)) + + def save_png(self, filepath, embed_info=False, prompt=None, extra_pnginfo=None): + info = PngInfo() + print(filepath) + if extra_pnginfo is not None: + for item in extra_pnginfo: + info.add_text(item, json.dumps(extra_pnginfo[item])) + if prompt is not None: + info.add_text("prompt", json.dumps(prompt)) + if embed_info: + self.pil_image.save(filepath, pnginfo=info, optimize=True) + else: + self.pil_image.save(filepath, optimize=True) + + def save_jpg(self, filepath, quality=98): + self.pil_image.save(filepath, quality=quality, optimize=True) + + @classmethod + def from_file(cls, file_path): + return DreamImage(pil_image=Image.open(file_path)) + + +class DreamMask: + def __init__(self, tensor_image=None, pil_image=None): + if pil_image: + self.pil_image = pil_image + else: + self.pil_image = convertTensorImageToPIL(tensor_image) + if self.pil_image.mode != "L": + self.pil_image = self.pil_image.convert("L") + + def create_tensor_image(self): + return torch.from_numpy(numpy.array(self.pil_image).astype(numpy.float32) / 255.0) + + +def list_images_in_directory(directory_path: str, pattern: str, alphabetic_index: bool) -> Dict[int, List[str]]: + if not os.path.isdir(directory_path): + return {} + dirs_to_search = [directory_path] + if os.path.isdir(os.path.join(directory_path, "batch_0001")): + dirs_to_search = list() + for i in range(10000): + dirpath = os.path.join(directory_path, "batch_" + (str(i).zfill(4))) + if not os.path.isdir(dirpath): + break + else: + dirs_to_search.append(dirpath) + + def _num_from_filename(fn): + (text, _) = os.path.splitext(fn) + token = text.split("_")[-1] + if token.isdigit(): + return int(token) + else: + return -1 + + result = dict() + for search_path in dirs_to_search: + files = [] + for file_name in glob.glob(os.path.join(search_path, pattern), recursive=False): + if file_name.lower().endswith(('.jpeg', '.jpg', '.png', '.tiff', '.gif', '.bmp', '.webp')): + files.append(os.path.abspath(file_name)) + + if alphabetic_index: + files.sort() + for idx, item in enumerate(files): + lst = result.get(idx, []) + lst.append(item) + result[idx] = lst + else: + for filepath in files: + idx = _num_from_filename(os.path.basename(filepath)) + lst = result.get(idx, []) + lst.append(filepath) + result[idx] = lst + return result + + +class DreamStateStore: + def __init__(self, name, read_fun, write_fun): + self._read = read_fun + self._write = write_fun + self._name = name + + def _as_key(self, k): + return self._name + "_" + k + + def get(self, key, default): + v = self[key] + if v is None: + return default + else: + return v + + def update(self, key, default, f): + prev = self.get(key, default) + v = f(prev) + self[key] = v + return v + + def __getitem__(self, item): + return self._read(self._as_key(item)) + + def __setitem__(self, key, value): + return self._write(self._as_key(key), value) + + +class DreamStateFile: + def __init__(self, state_collection_name="state"): + self._filepath = os.path.join(TEMP_PATH, state_collection_name+".json") + self._dirname = os.path.dirname(self._filepath) + if not os.path.isdir(self._dirname): + os.makedirs(self._dirname) + if not os.path.isfile(self._filepath): + self._data = {} + else: + with open(self._filepath, encoding="utf-8") as f: + self._data = json.load(f) + + def get_section(self, name: str) -> DreamStateStore: + return DreamStateStore(name, self._read, self._write) + + def _read(self, key): + return self._data.get(key, None) + + def _write(self, key, value): + previous = self._data.get(key, None) + if value is None: + if key in self._data: + del self._data[key] + else: + self._data[key] = value + with open(self._filepath, "w", encoding="utf-8") as f: + json.dump(self._data, f) + return previous + + +def hashed_as_strings(*items): + tokens = "|".join(list(map(str, items))) + m = hashlib.sha256() + m.update(tokens.encode(encoding="utf-8")) + return m.digest().hex() +# +# +# class MpegEncoderUtility: +# def __init__(self, video_path: str, bit_rate_factor: float, width: int, height: int, files: List[str], +# fps: float, encoding_threads: int, codec_name, max_b_frame): +# import mpegCoder +# self._files = files +# self._logger = get_logger() +# self._enc = mpegCoder.MpegEncoder() +# bit_rate = self._calculate_bit_rate(width, height, fps, bit_rate_factor) +# self._logger.info("Bitrate " + str(bit_rate)) +# self._enc.setParameter( +# videoPath=video_path, codecName=codec_name, +# nthread=encoding_threads, bitRate=bit_rate, width=width, height=height, widthSrc=width, +# heightSrc=height, +# GOPSize=len(files), maxBframe=max_b_frame, frameRate=self._fps_to_tuple(fps)) +# +# def _calculate_bit_rate(self, width: int, height: int, fps: float, bit_rate_factor: float): +# bits_per_pixel_base = 0.5 +# return round(max(10, float(width * height * fps * bits_per_pixel_base * bit_rate_factor * 0.001))) +# +# def encode(self): +# if not self._enc.FFmpegSetup(): +# raise Exception("Failed to setup MPEG Encoder - check parameters!") +# try: +# t = time.time() +# +# for filepath in self._files: +# self._logger.debug("Encoding frame {}", filepath) +# image = DreamImage.from_file(filepath).convert("RGB") +# self._enc.EncodeFrame(image.numpy_array()) +# self._enc.FFmpegClose() +# self._logger.info("Completed video encoding of {n} frames in {t} seconds", n=len(self._files), +# t=round(time.time() - t)) +# finally: +# self._enc.clear() +# +# def _fps_to_tuple(self, fps: float): +# def _is_almost_int(f: float): +# return abs(f - int(f)) < 0.001 +# +# a = fps +# b = 1 +# while not _is_almost_int(a) and b < 100: +# a /= 10 +# b *= 10 +# a = round(a) +# b = round(b) +# self._logger.info("Video specified as {fps} fps - encoder framerate {a}/{b}", fps=fps, a=a, b=b) +# return (a, b) diff --git a/custom_nodes/comfyui-dream-project/switches.py b/custom_nodes/comfyui-dream-project/switches.py new file mode 100644 index 0000000000000000000000000000000000000000..d860f43b63d0f167b5319bf1889ef101c3f0b339 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/switches.py @@ -0,0 +1,212 @@ +from .categories import NodeCategories +from .err import * +from .shared import ALWAYS_CHANGED_FLAG, hashed_as_strings +from .dreamtypes import RGBPalette + + +def _generate_switch_input(type: str): + d = dict() + for i in range(10): + d["input_" + str(i)] = (type,) + return { + "required": { + "select": ("INT", {"defualt": 0, "min": 0, "max": 9}), + "on_missing": (["previous", "next"],) + }, + "optional": d + } + + +def _do_pick(cls, select, on_missing, **args): + direction = 1 + if on_missing == "previous": + direction = -1 + if len(args) == 0: + on_error(cls, "No inputs provided!") + while args.get("input_" + str(select), None) is None: + select = (select + direction) % 10 + return args["input_" + str(select)], + + +class DreamBigImageSwitch: + _switch_type = "IMAGE" + NODE_NAME = "Big Image Switch" + ICON = "⭆" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = (_switch_type,) + RETURN_NAMES = ("selected",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return _generate_switch_input(cls._switch_type) + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def pick(self, select, on_missing, **args): + return _do_pick(self.__class__, select, on_missing, **args) + + +class DreamBigLatentSwitch: + _switch_type = "LATENT" + NODE_NAME = "Big Latent Switch" + ICON = "⭆" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = (_switch_type,) + RETURN_NAMES = ("selected",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return _generate_switch_input(cls._switch_type) + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def pick(self, select, on_missing, **args): + return _do_pick(self.__class__, select, on_missing, **args) + + +class DreamBigTextSwitch: + _switch_type = "STRING" + NODE_NAME = "Big Text Switch" + ICON = "⭆" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = (_switch_type,) + RETURN_NAMES = ("selected",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return _generate_switch_input(cls._switch_type) + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(values) + + def pick(self, select, on_missing, **args): + return _do_pick(self.__class__, select, on_missing, **args) + + +class DreamBigPaletteSwitch: + _switch_type = RGBPalette.ID + NODE_NAME = "Big Palette Switch" + ICON = "⭆" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = (_switch_type,) + RETURN_NAMES = ("selected",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return _generate_switch_input(cls._switch_type) + + @classmethod + def IS_CHANGED(cls, *values): + return ALWAYS_CHANGED_FLAG + + def pick(self, select, on_missing, **args): + return _do_pick(self.__class__, select, on_missing, **args) + + +class DreamBigFloatSwitch: + _switch_type = "FLOAT" + NODE_NAME = "Big Float Switch" + ICON = "⭆" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = (_switch_type,) + RETURN_NAMES = ("selected",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return _generate_switch_input(cls._switch_type) + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(values) + + def pick(self, select, on_missing, **args): + return _do_pick(self.__class__, select, on_missing, **args) + + +class DreamBigIntSwitch: + _switch_type = "INT" + NODE_NAME = "Big Int Switch" + ICON = "⭆" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = (_switch_type,) + RETURN_NAMES = ("selected",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return _generate_switch_input(cls._switch_type) + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(values) + + def pick(self, select, on_missing, **args): + return _do_pick(self.__class__, select, on_missing, **args) + + +class DreamBoolToFloat: + NODE_NAME = "Boolean To Float" + ICON = "⬖" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("result",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "boolean": ("BOOLEAN", {"default": False}), + "on_true": ("FLOAT", {"default": 1.0}), + "on_false": ("FLOAT", {"default": 0.0}) + } + } + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(values) + + def pick(self, boolean, on_true, on_false): + if boolean: + return (on_true,) + else: + return (on_false,) + + +class DreamBoolToInt: + NODE_NAME = "Boolean To Int" + ICON = "⬖" + CATEGORY = NodeCategories.UTILS_SWITCHES + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("result",) + FUNCTION = "pick" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "boolean": ("BOOLEAN", {"default": False}), + "on_true": ("INT", {"default": 1}), + "on_false": ("INT", {"default": 0}) + } + } + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(values) + + def pick(self, boolean, on_true, on_false): + if boolean: + return (on_true,) + else: + return (on_false,) diff --git a/custom_nodes/comfyui-dream-project/uninstall.py b/custom_nodes/comfyui-dream-project/uninstall.py new file mode 100644 index 0000000000000000000000000000000000000000..4cef258f451485e3de90af270dd7416b8e7919a7 --- /dev/null +++ b/custom_nodes/comfyui-dream-project/uninstall.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +def run_uninstall(): + pass + + +if __name__ == "__main__": + run_uninstall() diff --git a/custom_nodes/comfyui-dream-project/utility.py b/custom_nodes/comfyui-dream-project/utility.py new file mode 100644 index 0000000000000000000000000000000000000000..5a0e79a3c70d7b23c74315a786665737cbeaf42c --- /dev/null +++ b/custom_nodes/comfyui-dream-project/utility.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- +import datetime +import math +import os + +import folder_paths as comfy_paths + +from .categories import NodeCategories +from .shared import hashed_as_strings, DreamStateFile +from .dreamtypes import LogEntry, SharedTypes, FrameCounter + +_logfile_state = DreamStateFile("logging") + + +class DreamJoinLog: + NODE_NAME = "Log Entry Joiner" + ICON = "🗎" + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = (LogEntry.ID,) + RETURN_NAMES = ("log_entry",) + FUNCTION = "convert" + + @classmethod + def INPUT_TYPES(cls): + return { + "optional": { + "entry_0": (LogEntry.ID,), + "entry_1": (LogEntry.ID,), + "entry_2": (LogEntry.ID,), + "entry_3": (LogEntry.ID,), + } + } + + def convert(self, **values): + entry = LogEntry([]) + for i in range(4): + txt = values.get("entry_" + str(i), None) + if txt: + entry = entry.merge(txt) + return (entry,) + + +class DreamFloatToLog: + NODE_NAME = "Float to Log Entry" + ICON = "🗎" + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = (LogEntry.ID,) + RETURN_NAMES = ("log_entry",) + FUNCTION = "convert" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 0}), + "label": ("STRING", {"default": ""}), + }, + } + + def convert(self, label, value): + return (LogEntry.new(label + ": " + str(value)),) + + +class DreamIntToLog: + NODE_NAME = "Int to Log Entry" + ICON = "🗎" + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = (LogEntry.ID,) + RETURN_NAMES = ("log_entry",) + FUNCTION = "convert" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("INT", {"default": 0}), + "label": ("STRING", {"default": ""}), + }, + } + + def convert(self, label, value): + return (LogEntry.new(label + ": " + str(value)),) + + +class DreamStringToLog: + NODE_NAME = "String to Log Entry" + ICON = "🗎" + OUTPUT_NODE = True + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = (LogEntry.ID,) + RETURN_NAMES = ("log_entry",) + FUNCTION = "convert" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"default": ""}), + }, + "optional": { + "label": ("STRING", {"default": ""}), + } + } + + def convert(self, text, **values): + label = values.get("label", "") + if label: + return (LogEntry.new(label + ": " + text),) + else: + return (LogEntry.new(text),) + + +class DreamStringTokenizer: + NODE_NAME = "String Tokenizer" + ICON = "🪙" + OUTPUT_NODE = True + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("token",) + FUNCTION = "exec" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"default": "", "multiline": True}), + "separator": ("STRING", {"default": ","}), + "selected": ("INT", {"default": 0, "min": 0}) + }, + } + + def exec(self, text: str, separator: str, selected: int): + if separator is None or separator == "": + separator = " " + parts = text.split(sep=separator) + return (parts[abs(selected) % len(parts)].strip(),) + + +class DreamLogFile: + NODE_NAME = "Log File" + ICON = "🗎" + OUTPUT_NODE = True + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = () + RETURN_NAMES = () + FUNCTION = "write" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": SharedTypes.frame_counter | { + "log_directory": ("STRING", {"default": comfy_paths.output_directory}), + "log_filename": ("STRING", {"default": "dreamlog.txt"}), + "stdout": ("BOOLEAN", {"default": True}), + "active": ("BOOLEAN", {"default": True}), + "clock_has_24_hours": ("BOOLEAN", {"default": True}), + }, + "optional": { + "entry_0": (LogEntry.ID,), + "entry_1": (LogEntry.ID,), + "entry_2": (LogEntry.ID,), + "entry_3": (LogEntry.ID,), + "entry_4": (LogEntry.ID,), + "entry_5": (LogEntry.ID,), + "entry_6": (LogEntry.ID,), + "entry_7": (LogEntry.ID,), + }, + } + + def _path_to_log_file(self, log_directory, logfile): + if os.path.isabs(logfile): + return os.path.normpath(os.path.abspath(logfile)) + elif os.path.isabs(log_directory): + return os.path.normpath(os.path.abspath(os.path.join(log_directory, logfile))) + elif log_directory: + return os.path.normpath(os.path.abspath(os.path.join(comfy_paths.output_directory, log_directory, logfile))) + else: + return os.path.normpath(os.path.abspath(os.path.join(comfy_paths.output_directory, logfile))) + + def _get_tm_format(self, clock_has_24_hours): + if clock_has_24_hours: + return "%a %H:%M:%S" + else: + return "%a %I:%M:%S %p" + + def write(self, frame_counter: FrameCounter, log_directory, log_filename, stdout, active, clock_has_24_hours, + **entries): + if not active: + return () + log_entry = None + for i in range(8): + e = entries.get("entry_" + str(i), None) + if e is not None: + if log_entry is None: + log_entry = e + else: + log_entry = log_entry.merge(e) + log_file_path = self._path_to_log_file(log_directory, log_filename) + ts = _logfile_state.get_section("timestamps").get(log_file_path, 0) + output_text = list() + last_t = 0 + for (t, text) in log_entry.get_filtered_entries(ts): + dt = datetime.datetime.fromtimestamp(t) + output_text.append("[frame {}/{} (~{}%), timestamp {}]\n{}".format(frame_counter.current_frame + 1, + frame_counter.total_frames, + round(frame_counter.progress * 100), + dt.strftime(self._get_tm_format( + clock_has_24_hours)), text.rstrip())) + output_text.append("---") + last_t = max(t, last_t) + output_text = "\n".join(output_text) + "\n" + if stdout: + print(output_text) + with open(log_file_path, "a", encoding="utf-8") as f: + f.write(output_text) + _logfile_state.get_section("timestamps").update(log_file_path, 0, lambda _: last_t) + return () + + +def _align_num(n: int, alignment: int, type: str): + if alignment <= 1: + return n + if type == "ceil": + return int(math.ceil(float(n) / alignment)) * alignment + elif type == "floor": + return int(math.floor(float(n) / alignment)) * alignment + else: + return int(round(float(n) / alignment)) * alignment + + +class DreamFrameDimensions: + NODE_NAME = "Common Frame Dimensions" + ICON = "⌗" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "size": (["3840", "1920", "1440", "1280", "768", "720", "640", "512"],), + "aspect_ratio": (["16:9", "16:10", "4:3", "1:1", "5:4", "3:2", "21:9", "14:9"],), + "orientation": (["wide", "tall"],), + "divisor": (["8", "4", "2", "1"],), + "alignment": ("INT", {"default": 64, "min": 1, "max": 512}), + "alignment_type": (["ceil", "floor", "nearest"],), + }, + } + + CATEGORY = NodeCategories.UTILS + RETURN_TYPES = ("INT", "INT", "INT", "INT") + RETURN_NAMES = ("width", "height", "final_width", "final_height") + FUNCTION = "result" + + @classmethod + def IS_CHANGED(cls, *values): + return hashed_as_strings(*values) + + def result(self, size, aspect_ratio, orientation, divisor, alignment, alignment_type): + ratio = tuple(map(int, aspect_ratio.split(":"))) + final_width = int(size) + final_height = int(round((float(final_width) * ratio[1]) / ratio[0])) + width = _align_num(int(round(final_width / float(divisor))), alignment, alignment_type) + height = _align_num(int(round((float(width) * ratio[1]) / ratio[0])), alignment, alignment_type) + if orientation == "wide": + return (width, height, final_width, final_height) + else: + return (height, width, final_height, final_width) diff --git a/custom_nodes/comfyui_controlnet_aux/.gitignore b/custom_nodes/comfyui_controlnet_aux/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..cb63911f1d78422454a3bcf52660ac59cad088aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/.gitignore @@ -0,0 +1,182 @@ +# Initially taken from Github's Python gitignore file + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# tests and logs +tests/fixtures/cached_*_text.txt +logs/ +lightning_logs/ +lang_code_data/ +tests/outputs + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode +.vs +.vscode + +# Pycharm +.idea + +# TF code +tensorflow_code + +# Models +proc_data + +# examples +runs +/runs_old +/wandb +/examples/runs +/examples/**/*.args +/examples/rag/sweep + +# data +/data +serialization_dir + +# emacs +*.*~ +debug.env + +# vim +.*.swp + +#ctags +tags + +# pre-commit +.pre-commit* + +# .lock +*.lock + +# DS_Store (MacOS) +.DS_Store +# RL pipelines may produce mp4 outputs +*.mp4 + +# dependencies +/transformers + +# ruff +.ruff_cache + +wandb + +ckpts/ + +test.ipynb +config.yaml \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/LICENSE.txt b/custom_nodes/comfyui_controlnet_aux/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/custom_nodes/comfyui_controlnet_aux/README.md b/custom_nodes/comfyui_controlnet_aux/README.md new file mode 100644 index 0000000000000000000000000000000000000000..46bd9feb1e7da6f63e6c4dbca5ec693779bc57e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/README.md @@ -0,0 +1,233 @@ +# ComfyUI's ControlNet Auxiliary Preprocessors + +This is a rework of [comfyui_controlnet_preprocessors](https://github.com/Fannovel16/comfy_controlnet_preprocessors) based on [ControlNet auxiliary models by 🤗](https://github.com/patrickvonplaten/controlnet_aux). I think the old repo isn't good enough to maintain. + +YOU NEED TO REMOVE `comfyui_controlnet_preprocessors` BEFORE USING THIS REPO. THESE TWO CONFLICT WITH EACH OTHER. + +All old workflows still can be used with custom nodes in this repo but the version option won't do anything. Almost all v1 preprocessors are replaced by v1.1 except those doesn't apppear in v1.1. + +You don't need to care about the differences between v1 and v1.1 lol. + +The code is copy-pasted from the respective folders in https://github.com/lllyasviel/ControlNet/tree/main/annotator and connected to [the 🤗 Hub](https://huggingface.co/lllyasviel/Annotators). + +All credit & copyright goes to https://github.com/lllyasviel. + +# Updates +* `AIO Aux Preprocessor` intergrating all loadable aux preprocessors as dropdown options. Easy to copy, paste and get the preprocessor faster. +* Added OpenPose-format JSON output from OpenPose Preprocessor and DWPose Preprocessor. Checks [here](#faces-and-poses). +* Fixed wrong model path when downloading DWPose. +* Make hint images less blurry. +* Added `resolution` option, `PixelPerfectResolution` and `HintImageEnchance` nodes (TODO: Documentation). +* Added `RAFT Optical Flow Embedder` for TemporalNet2 (TODO: Workflow example). +* Fixed opencv's conflicts between this extension, [ReActor](https://github.com/Gourieff/comfyui-reactor-node) and Roop. Thanks `Gourieff` for [the solution](https://github.com/Fannovel16/comfyui_controlnet_aux/issues/7#issuecomment-1734319075)! +* RAFT is removed as the code behind it doesn't match what what the original code does +* Changed `lineart`'s display name from `Normal Lineart` to `Realistic Lineart`. This change won't affect old workflows +* Added support for `onnxruntime` to speed-up DWPose (see the Q&A) +* Fixed TypeError: expected size to be one of int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], but got size with types [, ]: [Issue](https://github.com/Fannovel16/comfyui_controlnet_aux/issues/2), [PR](https://github.com/Fannovel16/comfyui_controlnet_aux/pull/71)) +* Fixed ImageGenResolutionFromImage mishape (https://github.com/Fannovel16/comfyui_controlnet_aux/pull/74) +* Fixed LeRes and MiDaS's incomatipility with MPS device +* Fixed checking DWPose onnxruntime session multiple times: https://github.com/Fannovel16/comfyui_controlnet_aux/issues/89) +* Added `Anime Face Segmentor` (in `ControlNet Preprocessors/Semantic Segmentation`) for [ControlNet AnimeFaceSegmentV2](https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite#animefacesegmentv2). Checks [here](#anime-face-segmentor) +* Change download functions and fix [download error](https://github.com/Fannovel16/comfyui_controlnet_aux/issues/39): [PR](https://github.com/Fannovel16/comfyui_controlnet_aux/pull/96) +* Caching DWPose Onnxruntime during the first use of DWPose node instead of ComfyUI startup +* Added alternative YOLOX models for faster speed when using DWPose +* Added alternative DWPose models +* Implemented the preprocessor for [AnimalPose ControlNet](https://github.com/abehonest/ControlNet_AnimalPose/tree/main). Check [Animal Pose AP-10K](#animal-pose-ap-10k) +* Added YOLO-NAS models which are drop-in replacements of YOLOX +* Fixed Openpose Face/Hands no longer detecting: https://github.com/Fannovel16/comfyui_controlnet_aux/issues/54 +* Added TorchScript implementation of DWPose and AnimalPose +* Added TorchScript implementation of DensePose from [Colab notebook](https://colab.research.google.com/drive/16hcaaKs210ivpxjoyGNuvEXZD4eqOOSQ) which doesn't require detectron2. [Example](#densepose). Ps/r: Currently doesn't work +# Q&A: +## Why some nodes doesn't appear after I installed this repo? + +This repo has a new mechanism which will skip any custom node can't be imported. If you meet this case, please create a issue on [Issues tab](https://github.com/Fannovel16/comfyui_controlnet_aux/issues) with the log from the command line. + +## DWPose/AnimalPose only uses CPU so it's so slow. How can I make it use GPU? +There are two ways to speed-up DWPose: using TorchScript checkpoints (.torchscript.pt) checkpoints or ONNXRuntime (.onnx). TorchScript way is little bit slower than ONNXRuntime but doesn't require any additional library and still way way faster than CPU. + +A torchscript bbox detector is compatiable with an onnx pose estimator and vice versa. +### TorchScript +Set `bbox_detector` and `pose_estimator` according to this picture. You can try other bbox detector endings with `.torchscript.pt` to reduce bbox detection time if input images are ideal. +![](./example_torchscript.png) +### ONNXRuntime +If onnxruntime is installed successfully and the checkpoint used endings with `.onnx`, it will replace default cv2 backend to take advantage of GPU. Note that if you are using NVidia card, this method currently can only works on CUDA 11.8 (ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z) unless you compile onnxruntime yourself. + +1. Know your onnxruntime build: +* * NVidia/AMD GPU: `onnxruntime-gpu` +* * DirectML: `onnxruntime-directml` +* * OpenVINO: `onnxruntime-openvino` + +Note that if this is your first time using ComfyUI, please test if it can run on your device before doing next steps. + +2. Add it into `requirements.txt` + +3. Run `install.bat` or pip command mentioned in Installation + +![](./example_onnx.png) +# Installation: +## Using ComfyUI Manager (recommended): +Install [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) and do steps introduced there to install this repo. + +## Alternative: +If you're running on Linux, or non-admin account on windows you'll want to ensure `/ComfyUI/custom_nodes` and `comfyui_controlnet_aux` has write permissions. + +There is now a **install.bat** you can run to install to portable if detected. Otherwise it will default to system and assume you followed ConfyUI's manual installation steps. + +If you can't run **install.bat** (e.g. you are a Linux user). Open the CMD/Shell and do the following: + - Navigate to your `/ComfyUI/custom_nodes/` folder + - Run `git clone https://github.com/Fannovel16/comfyui_controlnet_aux/` + - Navigate to your `comfyui_controlnet_aux` folder + - Portable/venv: + - Run `path/to/ComfUI/python_embeded/python.exe -s -m pip install -r requirements.txt` + - With system python + - Run `pip install -r requirements.txt` + - Start ComfyUI + +# Nodes +Please note that this repo only supports preprocessors making hint images (e.g. stickman, canny edge, etc). +All preprocessors except Inpaint are intergrated into `AIO Aux Preprocessor` node. +This node allow you to quickly get the preprocessor but a preprocessor's own threshold parameters won't be able to set. +You need to use its node directly to set thresholds. + +## Line Extractors +* Binary Lines +* Canny Edge +* HED Lines +* Realistic Lineart (formerly Normal Lineart) +* Anime Lineart +* Manga Lineart +* M-LSD Lines +* PiDiNet Lines +* Scribble Lines +* Scribble XDoG Lines + +## Normal and Depth Map +* LeReS - Depth Map +* MiDaS - Normal Map +* MiDaS - Depth Map +* BAE - Normal Map +* Zoe - Depth Map + +## Faces and Poses +* DWPose Pose Estimation +* OpenPose Pose Estimation +* MediaPipe Face Mesh +* Animal Pose Estimation + +An array of [OpenPose-format JSON](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/02_output.md#json-output-format) corresponsding to each frame in an IMAGE batch can be gotten from DWPose and OpenPose using `app.nodeOutputs` on the UI or `/history` API endpoint. JSON output from AnimalPose uses a kinda similar format to OpenPose JSON: +``` +[ + { + "version": "ap10k", + "animals": [ + [[x1, y1, 1], [x2, y2, 1],..., [x17, y17, 1]], + [[x1, y1, 1], [x2, y2, 1],..., [x17, y17, 1]], + ... + ], + "canvas_height": 512, + "canvas_width": 768 + }, + ... +] +``` + +For extension developers (e.g. Openpose editor): +```js +const poseNodes = app.graph._nodes.filter(node => ["OpenposePreprocessor", "DWPreprocessor", "AnimalPosePreprocessor"].includes(node.type)) +for (const poseNode of poseNodes) { + const openposeResults = JSON.parse(app.nodeOutputs[poseNode.id].openpose_json[0]) + console.log(openposeResults) //An array containing Openpose JSON for each frame +} +``` + +For API users: +Javascript +```js +import fetch from "node-fetch" //Remember to add "type": "module" to "package.json" +async function main() { + const promptId = '792c1905-ecfe-41f4-8114-83e6a4a09a9f' //Too lazy to POST /queue + let history = await fetch(`http://127.0.0.1:8188/history/${promptId}`).then(re => re.json()) + history = history[promptId] + const nodeOutputs = Object.values(history.outputs).filter(output => output.openpose_json) + for (const nodeOutput of nodeOutputs) { + const openposeResults = JSON.parse(nodeOutput.openpose_json[0]) + console.log(openposeResults) //An array containing Openpose JSON for each frame + } +} +main() +``` + +Python +```py +import json, urllib.request + +server_address = "127.0.0.1:8188" +prompt_id = '' #Too lazy to POST /queue + +def get_history(prompt_id): + with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response: + return json.loads(response.read()) + +history = get_history(prompt_id)[prompt_id] +for o in history['outputs']: + for node_id in history['outputs']: + node_output = history['outputs'][node_id] + if 'openpose_json' in node_output: + print(json.loads(node_output['openpose_json'][0])) #An list containing Openpose JSON for each frame +``` +## Semantic Segmentation +* OneFormer ADE20K Segmentor +* UniFormer Segmentor +* OneFormer COCO Segmentor + +## T2IAdapter-only +* Color Pallete +* Content Shuffle + +# Examples +> A picture is worth a thousand words + +Credit to https://huggingface.co/thibaud/controlnet-sd21. You can get the same kind of results from preprocessor nodes of this repo. +## Line Extractors +### Canny Edge +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_canny.png) +### HED Lines +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_hed.png) +### Realistic Lineart +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_lineart.png) +### Scribble/Fake Scribble +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_scribble.png) + +## Normal and Depth Map +### Depth (idk the preprocessor they use) +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_depth.png) +## Zoe - Depth Map +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_zoedepth.png) +## BAE - Normal Map +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_normalbae.png) + +## Faces and Poses +### OpenPose +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_openpose.png) +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_openposev2.png) + +### Animal Pose (AP-10K) +![](./example_animal_pose.png) + +### DensePose +![](./example_densepose.png) + +## Semantic Segmantation +### OneFormer ADE20K Segmentor +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_ade20k.png) + +### Anime Face Segmentor +![](./example_anime_face_segmentor.png) + +## T2IAdapter-only +### Color Pallete for T2I-Adapter +![](https://huggingface.co/thibaud/controlnet-sd21/resolve/main/example_color.png) + +# Testing workflow +https://github.com/Fannovel16/comfyui_controlnet_aux/blob/master/tests/test_cn_aux_full.json +![](https://github.com/Fannovel16/comfyui_controlnet_aux/blob/master/tests/pose.png?raw=true) diff --git a/custom_nodes/comfyui_controlnet_aux/__init__.py b/custom_nodes/comfyui_controlnet_aux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1370c96a091a76c98a1b1a7c7f87151324a652d2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/__init__.py @@ -0,0 +1,122 @@ +import sys, os +from .utils import here, create_node_input_types +from pathlib import Path +import threading +import traceback +import warnings +import importlib +from .log import log, blue_text, cyan_text, get_summary, get_label +from .hint_image_enchance import NODE_CLASS_MAPPINGS as HIE_NODE_CLASS_MAPPINGS +from .hint_image_enchance import NODE_DISPLAY_NAME_MAPPINGS as HIE_NODE_DISPLAY_NAME_MAPPINGS +#Ref: https://github.com/comfyanonymous/ComfyUI/blob/76d53c4622fc06372975ed2a43ad345935b8a551/nodes.py#L17 +sys.path.insert(0, str(Path(here, "src").resolve())) +for pkg_name in ["controlnet_aux", "custom_mmpkg"]: + sys.path.append(str(Path(here, "src", pkg_name).resolve())) + +#Enable CPU fallback for ops not being supported by MPS like upsample_bicubic2d.out +#https://github.com/pytorch/pytorch/issues/77764 +#https://github.com/Fannovel16/comfyui_controlnet_aux/issues/2#issuecomment-1763579485 +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = '1' + + +def load_nodes(): + shorted_errors = [] + full_error_messages = [] + node_class_mappings = {} + node_display_name_mappings = {} + + for filename in (here / "node_wrappers").iterdir(): + + module_name = filename.stem + try: + module = importlib.import_module( + f".node_wrappers.{module_name}", package=__package__ + ) + node_class_mappings.update(getattr(module, "NODE_CLASS_MAPPINGS")) + if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS"): + node_display_name_mappings.update(getattr(module, "NODE_DISPLAY_NAME_MAPPINGS")) + + log.debug(f"Imported {module_name} nodes") + + except AttributeError: + pass # wip nodes + except Exception: + error_message = traceback.format_exc() + full_error_messages.append(error_message) + error_message = error_message.splitlines()[-1] + shorted_errors.append( + f"Failed to import module {module_name} because {error_message}" + ) + + if len(shorted_errors) > 0: + full_err_log = '\n\n'.join(full_error_messages) + print(f"\n\nFull error log from comfyui_controlnet_aux: \n{full_err_log}\n\n") + log.info( + f"Some nodes failed to load:\n\t" + + "\n\t".join(shorted_errors) + + "\n\n" + + "Check that you properly installed the dependencies.\n" + + "If you think this is a bug, please report it on the github page (https://github.com/Fannovel16/comfyui_controlnet_aux/issues)" + ) + return node_class_mappings, node_display_name_mappings + +AUX_NODE_MAPPINGS, AUX_DISPLAY_NAME_MAPPINGS = load_nodes() + +AIO_NOT_SUPPORTED = ["InpaintPreprocessor"] +#For nodes not mapping image to image + +class AIO_Preprocessor: + @classmethod + def INPUT_TYPES(s): + auxs = list(AUX_NODE_MAPPINGS.keys()) + for name in AIO_NOT_SUPPORTED: + if name in auxs: auxs.remove(name) + + return create_node_input_types( + preprocessor=(auxs, {"default": "CannyEdgePreprocessor"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors" + + def execute(self, preprocessor, image, resolution=512): + aux_class = AUX_NODE_MAPPINGS[preprocessor] + input_types = aux_class.INPUT_TYPES() + input_types = { + **input_types["required"], + **(input_types["optional"] if "optional" in input_types else {}) + } + params = {} + for name, input_type in input_types.items(): + if name == "image": + params[name] = image + continue + + if name == "resolution": + params[name] = resolution + continue + + if len(input_type) == 2 and ("default" in input_type[1]): + params[name] = input_type[1]["default"] + continue + + default_values = { "INT": 0, "FLOAT": 0.0 } + if input_type[0] in default_values: + params[name] = default_values[input_type[0]] + + return getattr(aux_class(), aux_class.FUNCTION)(**params) + + +NODE_CLASS_MAPPINGS = { + **AUX_NODE_MAPPINGS, + "AIO_Preprocessor": AIO_Preprocessor, + **HIE_NODE_CLASS_MAPPINGS +} + +NODE_DISPLAY_NAME_MAPPINGS = { + **AUX_DISPLAY_NAME_MAPPINGS, + "AIO_Preprocessor": "AIO Aux Preprocessor", + **HIE_NODE_DISPLAY_NAME_MAPPINGS +} diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/__init__.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ea7e440a0a68ad070609ff4dd72a66ab02cdb46 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5eee383ec9550409263dcc945d4451302406d6f3 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/hint_image_enchance.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/hint_image_enchance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ccaba1a1e64889975c8e2d17857526ffdd0345b Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/hint_image_enchance.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/hint_image_enchance.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/hint_image_enchance.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9051533c941cd9d35ffd124ff6d8ccc5efa13a31 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/hint_image_enchance.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/log.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53df5f6ab4eee3ead8b0da00a30d56ab2358b4c1 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/log.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/log.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/log.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26abfc3f79d349593215c173d1a98ecca365c3bf Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/log.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/lvminthin.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/lvminthin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92d9e62689ef290fdd78b6f2c0f9ca3861dafb4a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/lvminthin.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/lvminthin.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/lvminthin.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d79b17e4c03efb5bd3c9f22a21797e52d6db1196 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/lvminthin.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/utils.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3770041f709845e2ec75ecf7490e3deadd4a1c2b Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/utils.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/__pycache__/utils.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9f49f5893696dee3c65d172ceb7e6dd114f7359 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/__pycache__/utils.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/ControlNetHED.pth b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/ControlNetHED.pth new file mode 100644 index 0000000000000000000000000000000000000000..e0edbff99b09b7241441fb1f9f25187e0f1ff5c9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/ControlNetHED.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca93762ffd68a29fee1af9d495bf6aab80ae86f08905fb35472a083a4c7a8fa +size 29444406 diff --git a/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/body_pose_model.pth b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/body_pose_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..9acb77e68f31906a8875f1daef2f3f7ef94acb1e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/body_pose_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25a948c16078b0f08e236bda51a385d855ef4c153598947c28c0d47ed94bb746 +size 209267595 diff --git a/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/facenet.pth b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/facenet.pth new file mode 100644 index 0000000000000000000000000000000000000000..ccfac27ffec2f25eb02dad5f52512872eb3b53e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/facenet.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8beb52e548624ffcc4aed12af7aee7dcbfaeea420c75609fee999fe7add79d43 +size 153718792 diff --git a/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/hand_pose_model.pth b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/hand_pose_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..f23ccf3413cc8ac8581a82338a3037bc10d573f0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/ckpts/lllyasviel/Annotators/hand_pose_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b76b00d1750901abd07b9f9d8c98cc3385b8fe834a26d4b4f0aad439e75fc600 +size 147341049 diff --git a/custom_nodes/comfyui_controlnet_aux/config.example.yaml b/custom_nodes/comfyui_controlnet_aux/config.example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5128a63c040e1d85aac22c57153860727761ff1e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/config.example.yaml @@ -0,0 +1,15 @@ +# this is an example for config.yaml file, you can rename it to config.yaml if you want to use it +# ############################################################################################### +# you can also use absolute paths like: "/root/ComfyUI/custom_nodes/comfyui_controlnet_aux/ckpts" or "D:\\comfyui\\custom_nodes\\comfyui_controlnet_aux\\ckpts" +annotator_ckpts_path: "./ckpts" +# ############################################################################################### +# if you already have downloaded ckpts via huggingface hub into default cache path like: ~/.cache/huggingface/hub, you can set this True to use symlinks to save space +USE_SYMLINKS: False +# ############################################################################################### +# EP_list is a list of execution providers for onnxruntime, if one of them is not available or not working well, you can delete that provider from here(config.yaml) +# you can find all available providers here: https://onnxruntime.ai/docs/execution-providers +# for example, if you have CUDA installed, you can set it to: ["CUDAExecutionProvider", "CPUExecutionProvider"] +# empty list or only keep ["CPUExecutionProvider"] means you use cv2.dnn.readNetFromONNX to load onnx models +# if your onnx models can only run on the CPU or have other issues, we recommend using pt model instead. +# default value is ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"] +EP_list: ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/dev_interface.py b/custom_nodes/comfyui_controlnet_aux/dev_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..77413c867c0df9c0c1397061ac837b39bf98098d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/dev_interface.py @@ -0,0 +1,6 @@ +from pathlib import Path +from utils import here +import sys +sys.path.append(str(Path(here, "src"))) + +from controlnet_aux import * \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/example_animal_pose.png b/custom_nodes/comfyui_controlnet_aux/example_animal_pose.png new file mode 100644 index 0000000000000000000000000000000000000000..11443aff62ef27bfe924be12c7b23a666fa00ff4 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/example_animal_pose.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/example_anime_face_segmentor.png b/custom_nodes/comfyui_controlnet_aux/example_anime_face_segmentor.png new file mode 100644 index 0000000000000000000000000000000000000000..047d07b2fc65a1a10d72d489d8bd73fb586403a4 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/example_anime_face_segmentor.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/example_densepose.png b/custom_nodes/comfyui_controlnet_aux/example_densepose.png new file mode 100644 index 0000000000000000000000000000000000000000..1d971ea70922e273d2d19c669a43319cf6ef2e9c Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/example_densepose.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/example_onnx.png b/custom_nodes/comfyui_controlnet_aux/example_onnx.png new file mode 100644 index 0000000000000000000000000000000000000000..f3f9ad5a45e2ce33b03883446b55fb487c059a00 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/example_onnx.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/example_torchscript.png b/custom_nodes/comfyui_controlnet_aux/example_torchscript.png new file mode 100644 index 0000000000000000000000000000000000000000..0a685f9cea265c5bc2057f567da0d93614f8ce9a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/example_torchscript.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/hint_image_enchance.py b/custom_nodes/comfyui_controlnet_aux/hint_image_enchance.py new file mode 100644 index 0000000000000000000000000000000000000000..cb2a06974d2e23e53218706248d6340ed99de61a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/hint_image_enchance.py @@ -0,0 +1,233 @@ +from .log import log +from .utils import ResizeMode, safe_numpy +import numpy as np +import torch +import cv2 +from .utils import get_unique_axis0 +from .lvminthin import nake_nms, lvmin_thin + +MAX_IMAGEGEN_RESOLUTION = 8192 #https://github.com/comfyanonymous/ComfyUI/blob/c910b4a01ca58b04e5d4ab4c747680b996ada02b/nodes.py#L42 +RESIZE_MODES = [ResizeMode.RESIZE.value, ResizeMode.INNER_FIT.value, ResizeMode.OUTER_FIT.value] + +#Port from https://github.com/Mikubill/sd-webui-controlnet/blob/e67e017731aad05796b9615dc6eadce911298ea1/internal_controlnet/external_code.py#L89 +class PixelPerfectResolution: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_image": ("IMAGE", ), + "image_gen_width": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}), + "image_gen_height": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}), + #https://github.com/comfyanonymous/ComfyUI/blob/c910b4a01ca58b04e5d4ab4c747680b996ada02b/nodes.py#L854 + "resize_mode": (RESIZE_MODES, {"default": ResizeMode.RESIZE.value}) + } + } + + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("RESOLUTION (INT)", ) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors" + + def execute(self, original_image, image_gen_width, image_gen_height, resize_mode): + _, raw_H, raw_W, _ = original_image.shape + + k0 = float(image_gen_height) / float(raw_H) + k1 = float(image_gen_width) / float(raw_W) + + if resize_mode == ResizeMode.OUTER_FIT.value: + estimation = min(k0, k1) * float(min(raw_H, raw_W)) + else: + estimation = max(k0, k1) * float(min(raw_H, raw_W)) + + log.debug(f"Pixel Perfect Computation:") + log.debug(f"resize_mode = {resize_mode}") + log.debug(f"raw_H = {raw_H}") + log.debug(f"raw_W = {raw_W}") + log.debug(f"target_H = {image_gen_height}") + log.debug(f"target_W = {image_gen_width}") + log.debug(f"estimation = {estimation}") + + return (int(np.round(estimation)), ) + +class HintImageEnchance: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "hint_image": ("IMAGE", ), + "image_gen_width": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}), + "image_gen_height": ("INT", {"default": 512, "min": 64, "max": MAX_IMAGEGEN_RESOLUTION, "step": 8}), + #https://github.com/comfyanonymous/ComfyUI/blob/c910b4a01ca58b04e5d4ab4c747680b996ada02b/nodes.py#L854 + "resize_mode": (RESIZE_MODES, {"default": ResizeMode.RESIZE.value}) + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors" + def execute(self, hint_image, image_gen_width, image_gen_height, resize_mode): + outs = [] + for single_hint_image in hint_image: + np_hint_image = np.asarray(single_hint_image * 255., dtype=np.uint8) + + if resize_mode == ResizeMode.RESIZE.value: + np_hint_image = self.execute_resize(np_hint_image, image_gen_width, image_gen_height) + elif resize_mode == ResizeMode.OUTER_FIT.value: + np_hint_image = self.execute_outer_fit(np_hint_image, image_gen_width, image_gen_height) + else: + np_hint_image = self.execute_inner_fit(np_hint_image, image_gen_width, image_gen_height) + + outs.append(torch.from_numpy(np_hint_image.astype(np.float32) / 255.0)) + + return (torch.stack(outs, dim=0),) + + def execute_resize(self, detected_map, w, h): + detected_map = self.high_quality_resize(detected_map, (w, h)) + detected_map = safe_numpy(detected_map) + return detected_map + + def execute_outer_fit(self, detected_map, w, h): + old_h, old_w, _ = detected_map.shape + old_w = float(old_w) + old_h = float(old_h) + k0 = float(h) / old_h + k1 = float(w) / old_w + safeint = lambda x: int(np.round(x)) + k = min(k0, k1) + + borders = np.concatenate([detected_map[0, :, :], detected_map[-1, :, :], detected_map[:, 0, :], detected_map[:, -1, :]], axis=0) + high_quality_border_color = np.median(borders, axis=0).astype(detected_map.dtype) + if len(high_quality_border_color) == 4: + # Inpaint hijack + high_quality_border_color[3] = 255 + high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1]) + detected_map = self.high_quality_resize(detected_map, (safeint(old_w * k), safeint(old_h * k))) + new_h, new_w, _ = detected_map.shape + pad_h = max(0, (h - new_h) // 2) + pad_w = max(0, (w - new_w) // 2) + high_quality_background[pad_h:pad_h + new_h, pad_w:pad_w + new_w] = detected_map + detected_map = high_quality_background + detected_map = safe_numpy(detected_map) + return detected_map + + def execute_inner_fit(self, detected_map, w, h): + old_h, old_w, _ = detected_map.shape + old_w = float(old_w) + old_h = float(old_h) + k0 = float(h) / old_h + k1 = float(w) / old_w + safeint = lambda x: int(np.round(x)) + k = max(k0, k1) + + detected_map = self.high_quality_resize(detected_map, (safeint(old_w * k), safeint(old_h * k))) + new_h, new_w, _ = detected_map.shape + pad_h = max(0, (new_h - h) // 2) + pad_w = max(0, (new_w - w) // 2) + detected_map = detected_map[pad_h:pad_h+h, pad_w:pad_w+w] + detected_map = safe_numpy(detected_map) + return detected_map + + def high_quality_resize(self, x, size): + # Written by lvmin + # Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges + + inpaint_mask = None + if x.ndim == 3 and x.shape[2] == 4: + inpaint_mask = x[:, :, 3] + x = x[:, :, 0:3] + + if x.shape[0] != size[1] or x.shape[1] != size[0]: + new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1]) + new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1]) + unique_color_count = len(get_unique_axis0(x.reshape(-1, x.shape[2]))) + is_one_pixel_edge = False + is_binary = False + if unique_color_count == 2: + is_binary = np.min(x) < 16 and np.max(x) > 240 + if is_binary: + xc = x + xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1) + xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1) + one_pixel_edge_count = np.where(xc < x)[0].shape[0] + all_edge_count = np.where(x > 127)[0].shape[0] + is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count + + if 2 < unique_color_count < 200: + interpolation = cv2.INTER_NEAREST + elif new_size_is_smaller: + interpolation = cv2.INTER_AREA + else: + interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS + + y = cv2.resize(x, size, interpolation=interpolation) + if inpaint_mask is not None: + inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation) + + if is_binary: + y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8) + if is_one_pixel_edge: + y = nake_nms(y) + _, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + y = lvmin_thin(y, prunings=new_size_is_bigger) + else: + _, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + y = np.stack([y] * 3, axis=2) + else: + y = x + + if inpaint_mask is not None: + inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0 + inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8) + y = np.concatenate([y, inpaint_mask], axis=2) + + return y + + +class ImageGenResolutionFromLatent: + @classmethod + def INPUT_TYPES(s): + return { + "required": { "latent": ("LATENT", ) } + } + + RETURN_TYPES = ("INT", "INT") + RETURN_NAMES = ("IMAGE_GEN_WIDTH (INT)", "IMAGE_GEN_HEIGHT (INT)") + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors" + + def execute(self, latent): + _, _, H, W = latent["samples"].shape + return (W * 8, H * 8) + +class ImageGenResolutionFromImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { "image": ("IMAGE", ) } + } + + RETURN_TYPES = ("INT", "INT") + RETURN_NAMES = ("IMAGE_GEN_WIDTH (INT)", "IMAGE_GEN_HEIGHT (INT)") + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors" + + def execute(self, image): + _, H, W, _ = image.shape + return (W, H) + +NODE_CLASS_MAPPINGS = { + "PixelPerfectResolution": PixelPerfectResolution, + "ImageGenResolutionFromImage": ImageGenResolutionFromImage, + "ImageGenResolutionFromLatent": ImageGenResolutionFromLatent, + "HintImageEnchance": HintImageEnchance +} +NODE_DISPLAY_NAME_MAPPINGS = { + "PixelPerfectResolution": "Pixel Perfect Resolution", + "ImageGenResolutionFromImage": "Generation Resolution From Image", + "ImageGenResolutionFromLatent": "Generation Resolution From Latent", + "HintImageEnchance": "Enchance And Resize Hint Images" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/install.bat b/custom_nodes/comfyui_controlnet_aux/install.bat new file mode 100644 index 0000000000000000000000000000000000000000..c36a67448534a5febc2a83d4ebef4cfa49fa6deb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/install.bat @@ -0,0 +1,20 @@ +@echo off + +set "requirements_txt=%~dp0\requirements.txt" +set "python_exec=..\..\..\python_embeded\python.exe" + +echo Installing ComfyUI's ControlNet Auxiliary Preprocessors.. + +if exist "%python_exec%" ( + echo Installing with ComfyUI Portable + for /f "delims=" %%i in (%requirements_txt%) do ( + %python_exec% -s -m pip install "%%i" + ) +) else ( + echo Installing with system Python + for /f "delims=" %%i in (%requirements_txt%) do ( + pip install "%%i" + ) +) + +pause \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/log.py b/custom_nodes/comfyui_controlnet_aux/log.py new file mode 100644 index 0000000000000000000000000000000000000000..2978c6dc770c78feddc2d0dece7c0b6a91ed23f0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/log.py @@ -0,0 +1,80 @@ +#Cre: https://github.com/melMass/comfy_mtb/blob/main/log.py +import logging +import re +import os + +base_log_level = logging.INFO + + +# Custom object that discards the output +class NullWriter: + def write(self, text): + pass + + +class Formatter(logging.Formatter): + grey = "\x1b[38;20m" + cyan = "\x1b[36;20m" + purple = "\x1b[35;20m" + yellow = "\x1b[33;20m" + red = "\x1b[31;20m" + bold_red = "\x1b[31;1m" + reset = "\x1b[0m" + # format = "%(asctime)s - [%(name)s] - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" + format = "[%(name)s] | %(levelname)s -> %(message)s" + + FORMATS = { + logging.DEBUG: purple + format + reset, + logging.INFO: cyan + format + reset, + logging.WARNING: yellow + format + reset, + logging.ERROR: red + format + reset, + logging.CRITICAL: bold_red + format + reset, + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record) + + +def mklog(name, level=base_log_level): + logger = logging.getLogger(name) + logger.setLevel(level) + + for handler in logger.handlers: + logger.removeHandler(handler) + + ch = logging.StreamHandler() + ch.setLevel(level) + ch.setFormatter(Formatter()) + logger.addHandler(ch) + + # Disable log propagation + logger.propagate = False + + return logger + + +# - The main app logger +log = mklog(__package__, base_log_level) + + +def log_user(arg): + print("\033[34mComfyUI ControlNet AUX:\033[0m {arg}") + + +def get_summary(docstring): + return docstring.strip().split("\n\n", 1)[0] + + +def blue_text(text): + return f"\033[94m{text}\033[0m" + + +def cyan_text(text): + return f"\033[96m{text}\033[0m" + + +def get_label(label): + words = re.findall(r"(?:^|[A-Z])[a-z]*", label) + return " ".join(words).strip() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/lvminthin.py b/custom_nodes/comfyui_controlnet_aux/lvminthin.py new file mode 100644 index 0000000000000000000000000000000000000000..eebe0fb6d4967d0f6c38c0117ce4775d46d0be08 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/lvminthin.py @@ -0,0 +1,87 @@ +# High Quality Edge Thinning using Pure Python +# Written by Lvmin Zhang +# 2023 April +# Stanford University +# If you use this, please Cite "High Quality Edge Thinning using Pure Python", Lvmin Zhang, In Mikubill/sd-webui-controlnet. + + +import cv2 +import numpy as np + + +lvmin_kernels_raw = [ + np.array([ + [-1, -1, -1], + [0, 1, 0], + [1, 1, 1] + ], dtype=np.int32), + np.array([ + [0, -1, -1], + [1, 1, -1], + [0, 1, 0] + ], dtype=np.int32) +] + +lvmin_kernels = [] +lvmin_kernels += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_kernels_raw] +lvmin_kernels += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_kernels_raw] +lvmin_kernels += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_kernels_raw] +lvmin_kernels += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_kernels_raw] + +lvmin_prunings_raw = [ + np.array([ + [-1, -1, -1], + [-1, 1, -1], + [0, 0, -1] + ], dtype=np.int32), + np.array([ + [-1, -1, -1], + [-1, 1, -1], + [-1, 0, 0] + ], dtype=np.int32) +] + +lvmin_prunings = [] +lvmin_prunings += [np.rot90(x, k=0, axes=(0, 1)) for x in lvmin_prunings_raw] +lvmin_prunings += [np.rot90(x, k=1, axes=(0, 1)) for x in lvmin_prunings_raw] +lvmin_prunings += [np.rot90(x, k=2, axes=(0, 1)) for x in lvmin_prunings_raw] +lvmin_prunings += [np.rot90(x, k=3, axes=(0, 1)) for x in lvmin_prunings_raw] + + +def remove_pattern(x, kernel): + objects = cv2.morphologyEx(x, cv2.MORPH_HITMISS, kernel) + objects = np.where(objects > 127) + x[objects] = 0 + return x, objects[0].shape[0] > 0 + + +def thin_one_time(x, kernels): + y = x + is_done = True + for k in kernels: + y, has_update = remove_pattern(y, k) + if has_update: + is_done = False + return y, is_done + + +def lvmin_thin(x, prunings=True): + y = x + for i in range(32): + y, is_done = thin_one_time(y, lvmin_kernels) + if is_done: + break + if prunings: + y, _ = thin_one_time(y, lvmin_prunings) + return y + + +def nake_nms(x): + f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) + f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) + f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) + f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) + y = np.zeros_like(x) + for f in [f1, f2, f3, f4]: + np.putmask(y, cv2.dilate(x, kernel=f) == x, x) + return y diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/anime_face_segment.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/anime_face_segment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01a2f8a05c499aa8cdc08e146fdcb6b30704d2fb Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/anime_face_segment.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/anime_face_segment.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/anime_face_segment.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f4baae6f92594f3b1dcaec5f2e363ed0a9a062e Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/anime_face_segment.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/binary.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54420ba6ec37f1dc7277329db29409bace4cb2dd Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/binary.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/binary.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/binary.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a99c48ad5d3ae76547902c30fafa7eca8cd55e2 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/binary.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/canny.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/canny.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5289a3645b6ffcc9fe63f9d394abde211b04613e Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/canny.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/canny.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/canny.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d71a78eac3c94631fd01b0a4f2b5e3444587c9e Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/canny.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/color.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df29b847473308652670ce64129d6cf22ae9125b Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/color.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/color.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/color.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f072cfce91d1d557dc825ef606ebb03cba38715 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/color.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/densepose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/densepose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c80ac7d5129bf89f939917d6af477d6f0138b857 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/densepose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/dwpose.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/dwpose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deef9aef48512dedd335ee665b9e580ede9a8981 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/dwpose.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/dwpose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/dwpose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cafbd8ed8f4a5253920f7320042c4b0645cb8645 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/dwpose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/hed.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/hed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab454d42454d929e02ecd91324f6555177934c95 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/hed.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/hed.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/hed.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90638a5cc29acf3a6be22b6d87a3118c5e6cbd42 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/hed.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/inpaint.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/inpaint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a23c4d3a08f2734d683087474c43f89e46ad9e0c Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/inpaint.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/inpaint.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/inpaint.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa5788495bf062aa4eb241603369892fce283513 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/inpaint.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/leres.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/leres.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a1c39de990a2130638c8223276b2e9eabe198e2 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/leres.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/leres.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/leres.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50b42076e6bfdb49274e8f90f606d8a3e95a6be5 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/leres.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..223ef432f53ac7922d41979211de34703fc6ca9d Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99049bba3fab2fa7dbdfa38771d1cc8d5646c2db Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart_anime.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart_anime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b9bd170e459ab533177ac01723f9f40c04a3db Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart_anime.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart_anime.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart_anime.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eda64ed324319c2f940605d9f297e1c0c65aa218 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/lineart_anime.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/manga_line.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/manga_line.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecf9a42e79330d705b9d01edcd3806f37585147f Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/manga_line.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/manga_line.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/manga_line.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ddd3dfa456a21e9ddcb3f666effdb5235e1305b Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/manga_line.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mediapipe_face.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mediapipe_face.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..301da8758070d3e83a4172af95bc6f3ed04e6711 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mediapipe_face.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mediapipe_face.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mediapipe_face.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc21692ab5995f3cae9240b955e42370fd7b988f Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mediapipe_face.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/midas.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/midas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd8688573204881a1637e4ced26f8e4b8dafe81a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/midas.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/midas.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/midas.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c08d00bfaeada986d42918af63ad43e0a3f494b2 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/midas.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mlsd.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mlsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb54b5e9701688f59e9afba445f5dc774811ecfd Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mlsd.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mlsd.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mlsd.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc824cc66064a1e471944272c563c60d5eb3b907 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/mlsd.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/normalbae.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/normalbae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dd0aad0c19d7dcfffa7ce42db076e8985073dc8 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/normalbae.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/normalbae.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/normalbae.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bb3091389594567212c5a0991fc34269ff68219 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/normalbae.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/oneformer.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/oneformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a56dcf5b2121d59f9d5f3e63a6ef789b76af12c Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/oneformer.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/oneformer.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/oneformer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db06aacc72f194d2deaba5de2352f553f574c2ea Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/oneformer.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/openpose.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/openpose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8adf9bd72a228225100e7808d28070634d07d640 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/openpose.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/openpose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/openpose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f82ab1da06d2306bc449a55fc8c4bf88d7f284f Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/openpose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/pidinet.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/pidinet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d62c71fd9cdb0feb1037075ce95907a3b362631a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/pidinet.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/pidinet.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/pidinet.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e60203717e3e6d45666ef015d37f14f14f452ca6 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/pidinet.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/scribble.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/scribble.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81bd2a2d222410526e857a30d9d3f438200274c8 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/scribble.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/scribble.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/scribble.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b6250c03fd2702c08b4767cc9ea8309edea6aa0 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/scribble.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/segment_anything.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/segment_anything.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7046548afd24723174171855e7f4da90861180db Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/segment_anything.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/segment_anything.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/segment_anything.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fb769416741d58c8be7d63f2558354aaa517cf9 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/segment_anything.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/shuffle.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/shuffle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7594789d80c9e9a30f6d5c4104af96e466d293b Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/shuffle.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/shuffle.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/shuffle.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49d5b723b89b2725095ef07bfe6d2f3a89f12791 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/shuffle.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/tile.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/tile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b7e9494a076e8d78522f1bbbfad625afaab48cf Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/tile.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/tile.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/tile.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aec8c3d703e8aea2c00fc12f8cbaccb46634ba9 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/tile.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/uniformer.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/uniformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee3fd574af82ba65338667ee32de2bb20cbf7743 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/uniformer.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/uniformer.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/uniformer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff01b13d97ebaff60af7482cc48f21ab9c6c95a3 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/uniformer.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/zoe.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/zoe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..769c67e733545eced1f2dc13ecb035435994f488 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/zoe.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/zoe.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/zoe.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bcb71fcd36da38ac62927e9ada40a6e00e8fb89 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/node_wrappers/__pycache__/zoe.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/anime_face_segment.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/anime_face_segment.py new file mode 100644 index 0000000000000000000000000000000000000000..82056794d2440d08f9fcf2c131f9d0b58eb93293 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/anime_face_segment.py @@ -0,0 +1,48 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, ANIFACESEG_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management +import torch +from einops import rearrange + +class AnimeFace_SemSegPreprocessor: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",) + }, + "optional": { + #This preprocessor is only trained on 512x resolution + #https://github.com/siyeong0/Anime-Face-Segmentation/blob/main/predict.py#L25 + "remove_background_using_abg": ("BOOLEAN", {"default": True}), + "resolution": ("INT", {"default": 512, "min": 512, "max": 512, "step": 64}) + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + RETURN_NAMES = ("IMAGE", "ABG_CHARACTER_MASK (MASK)") + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Semantic Segmentation" + + def execute(self, image, remove_background_using_abg=True, resolution=512, **kwargs): + from controlnet_aux.anime_face_segment import AnimeFaceSegmentor + + model = AnimeFaceSegmentor.from_pretrained(ANIFACESEG_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + if remove_background_using_abg: + out_image_with_mask = common_annotator_call(model, image, resolution=resolution, remove_background=True) + out_image = out_image_with_mask[..., :3] + mask = out_image_with_mask[..., 3:] + mask = rearrange(mask, "n h w c -> n c h w") + else: + out_image = common_annotator_call(model, image, resolution=resolution, remove_background=False) + N, H, W, C = out_image.shape + mask = torch.ones(N, C, H, W) + del model + return (out_image, mask) + +NODE_CLASS_MAPPINGS = { + "AnimeFace_SemSegPreprocessor": AnimeFace_SemSegPreprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "AnimeFace_SemSegPreprocessor": "Anime Face Segmentor" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/binary.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/binary.py new file mode 100644 index 0000000000000000000000000000000000000000..9787081d802f6621c901c894dcfd2f93c04cebd7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/binary.py @@ -0,0 +1,28 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Binary_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + bin_threshold=("INT", {"default": 100, "min": 0, "max": 255, "step": 1}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, bin_threshold, resolution=512, **kwargs): + from controlnet_aux.binary import BinaryDetector + + return (common_annotator_call(BinaryDetector(), image, bin_threshold=bin_threshold, resolution=resolution), ) + + + +NODE_CLASS_MAPPINGS = { + "BinaryPreprocessor": Binary_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "BinaryPreprocessor": "Binary Lines" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/canny.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/canny.py new file mode 100644 index 0000000000000000000000000000000000000000..d76c723402dc1a200a47bdc84e396c1316e11252 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/canny.py @@ -0,0 +1,29 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Canny_Edge_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + low_threshold=("INT", {"default": 100, "min": 0, "max": 255, "step": 1}), + high_threshold=("INT", {"default": 200, "min": 0, "max": 255, "step": 1}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, low_threshold, high_threshold, resolution=512, **kwargs): + from controlnet_aux.canny import CannyDetector + + return (common_annotator_call(CannyDetector(), image, low_threshold=low_threshold, high_threshold=high_threshold, resolution=resolution), ) + + + +NODE_CLASS_MAPPINGS = { + "CannyEdgePreprocessor": Canny_Edge_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "CannyEdgePreprocessor": "Canny Edge" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/color.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/color.py new file mode 100644 index 0000000000000000000000000000000000000000..41aee91a1f6a9e5c6268bb8237b6ea595e8e0270 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/color.py @@ -0,0 +1,26 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Color_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/T2IAdapter-only" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.color import ColorDetector + + return (common_annotator_call(ColorDetector(), image, resolution=resolution), ) + + + +NODE_CLASS_MAPPINGS = { + "ColorPreprocessor": Color_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "ColorPreprocessor": "Color Pallete" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/densepose.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/densepose.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b6bd153d604d05635cc38d0e823fd55d99c4cd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/densepose.py @@ -0,0 +1,31 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class DensePose_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + model=(["densepose_r50_fpn_dl.torchscript", "densepose_r101_fpn_dl.torchscript"], {"default": "densepose_r50_fpn_dl.torchscript"}), + cmap=(["Viridis (MagicAnimate)", "Parula (CivitAI)"], {"default": "Viridis (MagicAnimate)"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Faces and Poses" + + def execute(self, image, model, cmap, resolution=512): + from controlnet_aux.densepose import DenseposeDetector + return (common_annotator_call( + DenseposeDetector.from_pretrained("hr16/DensePose-TorchScript-with-hint-image", model).to(model_management.get_torch_device()), + image, + cmap="viridis" if "Viridis" in cmap else "parula", + resolution=resolution), ) + + +NODE_CLASS_MAPPINGS = { + "DensePosePreprocessor": DensePose_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "DensePosePreprocessor": "DensePose Estimation" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/dwpose.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/dwpose.py new file mode 100644 index 0000000000000000000000000000000000000000..39f0e4d49335f581928a23a4dde24ae15506a057 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/dwpose.py @@ -0,0 +1,155 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, DWPOSE_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management +import numpy as np +import warnings +from controlnet_aux.dwpose import DwposeDetector, AnimalposeDetector +import os +import json + +#Trigger startup caching for onnxruntime +GPU_PROVIDERS = ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider"] +def check_ort_gpu(): + try: + import onnxruntime as ort + for provider in GPU_PROVIDERS: + if provider in ort.get_available_providers(): + return True + return False + except: + return False + +if not os.environ.get("DWPOSE_ONNXRT_CHECKED"): + if check_ort_gpu(): + print("DWPose: Onnxruntime with acceleration providers detected") + else: + warnings.warn("DWPose: Onnxruntime not found or doesn't come with acceleration providers, switch to OpenCV with CPU device. DWPose might run very slowly") + os.environ['AUX_ORT_PROVIDERS'] = '' + os.environ["DWPOSE_ONNXRT_CHECKED"] = '1' + +class DWPose_Preprocessor: + @classmethod + def INPUT_TYPES(s): + input_types = create_node_input_types( + detect_hand=(["enable", "disable"], {"default": "enable"}), + detect_body=(["enable", "disable"], {"default": "enable"}), + detect_face=(["enable", "disable"], {"default": "enable"}) + ) + input_types["optional"] = { + **input_types["optional"], + "bbox_detector": ( + ["yolox_l.torchscript.pt", "yolox_m.torchscript.pt", "yolox_s.torchscript.pt", "yolo_nas_l_fp16.onnx", "yolo_nas_m_fp16.onnx", "yolo_nas_s_fp16.onnx", "yolox_l.onnx", "yolox_m.onnx", "yolox_s.onnx"], + {"default": "yolox_l.onnx"} + ), + "pose_estimator": (["dw-ll_ucoco_384_bs5.torchscript.pt", "dw-ll_ucoco_384.onnx", "dw-ll_ucoco.onnx", "dw-mm_ucoco.onnx", "dw-ss_ucoco.onnx"], {"default": "dw-ll_ucoco_384.onnx"}) + } + return input_types + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "estimate_pose" + + CATEGORY = "ControlNet Preprocessors/Faces and Poses" + + def estimate_pose(self, image, detect_hand, detect_body, detect_face, resolution=512, bbox_detector="yolox_l.onnx", pose_estimator="dw-ll_ucoco_384.onnx", **kwargs): + if bbox_detector == "yolox_l.onnx": + yolo_repo = DWPOSE_MODEL_NAME + elif "yolox" in bbox_detector: + yolo_repo = "hr16/yolox-onnx" + elif "yolo_nas" in bbox_detector: + yolo_repo = "hr16/yolo-nas-fp16" + else: + raise NotImplementedError(f"Download mechanism for {bbox_detector}") + + if pose_estimator == "dw-ll_ucoco_384.onnx": + pose_repo = DWPOSE_MODEL_NAME + elif pose_estimator.endswith(".onnx"): + pose_repo = "hr16/UnJIT-DWPose" + elif pose_estimator.endswith(".torchscript.pt"): + pose_repo = "hr16/DWPose-TorchScript-BatchSize5" + else: + raise NotImplementedError(f"Download mechanism for {pose_estimator}") + + model = DwposeDetector.from_pretrained( + pose_repo, + yolo_repo, + cache_dir=annotator_ckpts_path, det_filename=bbox_detector, pose_filename=pose_estimator, + torchscript_device=model_management.get_torch_device() + ) + detect_hand = detect_hand == "enable" + detect_body = detect_body == "enable" + detect_face = detect_face == "enable" + self.openpose_dicts = [] + def func(image, **kwargs): + pose_img, openpose_dict = model(image, **kwargs) + self.openpose_dicts.append(openpose_dict) + return pose_img + + out = common_annotator_call(func, image, include_hand=detect_hand, include_face=detect_face, include_body=detect_body, image_and_json=True, resolution=resolution) + del model + return { + 'ui': { "openpose_json": [json.dumps(self.openpose_dicts, indent=4)] }, + "result": (out, ) + } + +class AnimalPose_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + bbox_detector = ( + ["yolox_l.torchscript.pt", "yolox_m.torchscript.pt", "yolox_s.torchscript.pt", "yolo_nas_l_fp16.onnx", "yolo_nas_m_fp16.onnx", "yolo_nas_s_fp16.onnx", "yolox_l.onnx", "yolox_m.onnx", "yolox_s.onnx"], + {"default": "yolox_l.onnx"} + ), + pose_estimator = (["rtmpose-m_ap10k_256_bs5.torchscript.pt", "rtmpose-m_ap10k_256.onnx"], {"default": "rtmpose-m_ap10k_256.onnx"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "estimate_pose" + + CATEGORY = "ControlNet Preprocessors/Faces and Poses" + + def estimate_pose(self, image, resolution=512, bbox_detector="yolox_l.onnx", pose_estimator="rtmpose-m_ap10k_256.onnx", **kwargs): + if bbox_detector == "yolox_l.onnx": + yolo_repo = DWPOSE_MODEL_NAME + elif "yolox" in bbox_detector: + yolo_repo = "hr16/yolox-onnx" + elif "yolo_nas" in bbox_detector: + yolo_repo = "hr16/yolo-nas-fp16" + else: + raise NotImplementedError(f"Download mechanism for {bbox_detector}") + + if pose_estimator == "dw-ll_ucoco_384.onnx": + pose_repo = DWPOSE_MODEL_NAME + elif pose_estimator.endswith(".onnx"): + pose_repo = "hr16/UnJIT-DWPose" + elif pose_estimator.endswith(".torchscript.pt"): + pose_repo = "hr16/DWPose-TorchScript-BatchSize5" + else: + raise NotImplementedError(f"Download mechanism for {pose_estimator}") + + model = AnimalposeDetector.from_pretrained( + pose_repo, + yolo_repo, + cache_dir=annotator_ckpts_path, det_filename=bbox_detector, pose_filename=pose_estimator, + torchscript_device=model_management.get_torch_device() + ) + + self.openpose_dicts = [] + def func(image, **kwargs): + pose_img, openpose_dict = model(image, **kwargs) + self.openpose_dicts.append(openpose_dict) + return pose_img + + out = common_annotator_call(func, image, image_and_json=True, resolution=resolution) + del model + return { + 'ui': { "openpose_json": [json.dumps(self.openpose_dicts, indent=4)] }, + "result": (out, ) + } + +NODE_CLASS_MAPPINGS = { + "DWPreprocessor": DWPose_Preprocessor, + "AnimalPosePreprocessor": AnimalPose_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "DWPreprocessor": "DWPose Estimation", + "AnimalPosePreprocessor": "Animal Pose Estimation (AP10K)" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/hed.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/hed.py new file mode 100644 index 0000000000000000000000000000000000000000..8e93e30664af1ab5547f06b89ab4da9b45c0b404 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/hed.py @@ -0,0 +1,51 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class HED_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + safe=(["enable", "disable"], {"default": "enable"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.hed import HEDdetector + + model = HEDdetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, safe = kwargs["safe"] == "enable") + del model + return (out, ) + +class Fake_Scribble_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + safe=(["enable", "disable"], {"default": "enable"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.hed import HEDdetector + + model = HEDdetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, scribble=True, safe=kwargs["safe"]=="enable") + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "HEDPreprocessor": HED_Preprocessor, + "FakeScribblePreprocessor": Fake_Scribble_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "HEDPreprocessor": "HED Lines", + "FakeScribblePreprocessor": "Fake Scribble Lines (aka scribble_hed)" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/inpaint.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..2127b1dde4d436950c36ef990ef6aadaa7f72877 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/inpaint.py @@ -0,0 +1,24 @@ +import torch + +class InpaintPreprocessor: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), "mask": ("MASK",)}} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "preprocess" + + CATEGORY = "ControlNet Preprocessors/others" + + def preprocess(self, image, mask): + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(image.shape[1], image.shape[2]), mode="bilinear") + mask = mask.movedim(1,-1).expand((-1,-1,-1,3)) + image = image.clone() + image[mask > 0.5] = -1.0 # set as masked pixel + return (image,) + +NODE_CLASS_MAPPINGS = { + "InpaintPreprocessor": InpaintPreprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "InpaintPreprocessor": "Inpaint Preprocessor" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/leres.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/leres.py new file mode 100644 index 0000000000000000000000000000000000000000..9032f19329aa83e7f57f7a3097b0bd2766dba1ef --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/leres.py @@ -0,0 +1,31 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class LERES_Depth_Map_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + rm_nearest=("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}), + rm_background=("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}), + boost=(["enable", "disable"], {"default": "disable"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Normal and Depth Map" + + def execute(self, image, rm_nearest, rm_background, resolution=512, **kwargs): + from controlnet_aux.leres import LeresDetector + + model = LeresDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, thr_a=rm_nearest, thr_b=rm_background, boost=kwargs["boost"] == "enable") + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "LeReS-DepthMapPreprocessor": LERES_Depth_Map_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "LeReS-DepthMapPreprocessor": "LeReS - Depth Map (enable boost for leres++)" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart.py new file mode 100644 index 0000000000000000000000000000000000000000..c00a46bee20eeb3cade762019c84b9f5ddca0b70 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart.py @@ -0,0 +1,29 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class LineArt_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + coarse=(["disable", "enable"], {"default": "disable"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.lineart import LineartDetector + + model = LineartDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, coarse = kwargs["coarse"] == "enable") + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "LineArtPreprocessor": LineArt_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "LineArtPreprocessor": "Realistic Lineart" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart_anime.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart_anime.py new file mode 100644 index 0000000000000000000000000000000000000000..417853376bfbaa1c582aa2fd3ee69f36ecc382f9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/lineart_anime.py @@ -0,0 +1,27 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class AnimeLineArt_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.lineart_anime import LineartAnimeDetector + + model = LineartAnimeDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "AnimeLineArtPreprocessor": AnimeLineArt_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "AnimeLineArtPreprocessor": "Anime Lineart" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/manga_line.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/manga_line.py new file mode 100644 index 0000000000000000000000000000000000000000..b6a3fb4bdda75ff04fd7bf06b2dd97712f01b89f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/manga_line.py @@ -0,0 +1,27 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Manga2Anime_LineArt_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.manga_line import LineartMangaDetector + + model = LineartMangaDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "Manga2Anime_LineArt_Preprocessor": Manga2Anime_LineArt_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "Manga2Anime_LineArt_Preprocessor": "Manga Lineart (aka lineart_anime_denoise)" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/mediapipe_face.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/mediapipe_face.py new file mode 100644 index 0000000000000000000000000000000000000000..1015f724570bc2081939af584f2fe6b33d5d3c5c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/mediapipe_face.py @@ -0,0 +1,57 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, DWPOSE_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management +import os, sys +import subprocess, threading + +#Ref: https://github.com/ltdrdata/ComfyUI-Manager/blob/284e90dc8296a2e1e4f14b4b2d10fba2f52f0e53/__init__.py#L14 +def handle_stream(stream, prefix): + for line in stream: + print(prefix, line, end="") + + +def run_script(cmd, cwd='.'): + process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1) + + stdout_thread = threading.Thread(target=handle_stream, args=(process.stdout, "")) + stderr_thread = threading.Thread(target=handle_stream, args=(process.stderr, "[!]")) + + stdout_thread.start() + stderr_thread.start() + + stdout_thread.join() + stderr_thread.join() + + return process.wait() + +class Media_Pipe_Face_Mesh_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + max_faces=("INT", {"default": 10, "min": 1, "max": 50, "step": 1}), #Which image has more than 50 detectable faces? + min_confidence=("FLOAT", {"default": 0.5, "min": 0.01, "max": 1.0, "step": 0.01}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "detect" + + CATEGORY = "ControlNet Preprocessors/Faces and Poses" + + def detect(self, image, max_faces, min_confidence, resolution=512): + try: + import mediapipe + except ImportError: + run_script([sys.executable, '-s', '-m', 'pip', 'install', 'mediapipe']) + run_script([sys.executable, '-s', '-m', 'pip', 'install', '--upgrade', 'protobuf']) + + #Ref: https://github.com/Fannovel16/comfy_controlnet_preprocessors/issues/70#issuecomment-1677967369 + from controlnet_aux.mediapipe_face import MediapipeFaceDetector + + return (common_annotator_call(MediapipeFaceDetector(), image, max_faces=max_faces, min_confidence=min_confidence, resolution=resolution), ) + +NODE_CLASS_MAPPINGS = { + "MediaPipe-FaceMeshPreprocessor": Media_Pipe_Face_Mesh_Preprocessor +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "MediaPipe-FaceMeshPreprocessor": "MediaPipe Face Mesh" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/midas.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/midas.py new file mode 100644 index 0000000000000000000000000000000000000000..49bc6b2f247c12bb2977ac58d834771e6138389a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/midas.py @@ -0,0 +1,57 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management +import numpy as np + +class MIDAS_Normal_Map_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + a = ("FLOAT", {"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.05}), + bg_threshold = ("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.05}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Normal and Depth Map" + + def execute(self, image, a, bg_threshold, resolution=512, **kwargs): + from controlnet_aux.midas import MidasDetector + + model = MidasDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + #Dirty hack :)) + cb = lambda image, **kargs: model(image, **kargs)[1] + out = common_annotator_call(cb, image, resolution=resolution, a=a, bg_th=bg_threshold, depth_and_normal=True) + del model + return (out, ) + +class MIDAS_Depth_Map_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + a = ("FLOAT", {"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.05}), + bg_threshold = ("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.05}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Normal and Depth Map" + + def execute(self, image, a, bg_threshold, resolution=512, **kwargs): + from controlnet_aux.midas import MidasDetector + + # Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_depth2image.py + model = MidasDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, a=a, bg_th=bg_threshold) + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "MiDaS-NormalMapPreprocessor": MIDAS_Normal_Map_Preprocessor, + "MiDaS-DepthMapPreprocessor": MIDAS_Depth_Map_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "MiDaS-NormalMapPreprocessor": "MiDaS - Normal Map", + "MiDaS-DepthMapPreprocessor": "MiDaS - Depth Map" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/mlsd.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/mlsd.py new file mode 100644 index 0000000000000000000000000000000000000000..8e6792d2aceccee751a65ecbd7885939762680e0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/mlsd.py @@ -0,0 +1,30 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management +import numpy as np + +class MLSD_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + score_threshold = ("FLOAT", {"default": 0.1, "min": 0.01, "max": 2.0, "step": 0.01}), + dist_threshold = ("FLOAT", {"default": 0.1, "min": 0.01, "max": 20.0, "step": 0.01}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, score_threshold, dist_threshold, resolution=512, **kwargs): + from controlnet_aux.mlsd import MLSDdetector + + model = MLSDdetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, thr_v=score_threshold, thr_d=dist_threshold) + return (out, ) + +NODE_CLASS_MAPPINGS = { + "M-LSDPreprocessor": MLSD_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "M-LSDPreprocessor": "M-LSD Lines" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/normalbae.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/normalbae.py new file mode 100644 index 0000000000000000000000000000000000000000..151a41bb9c1830502bfd48f5192a49cc322f8ebf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/normalbae.py @@ -0,0 +1,27 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class BAE_Normal_Map_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Normal and Depth Map" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.normalbae import NormalBaeDetector + + model = NormalBaeDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out,) + +NODE_CLASS_MAPPINGS = { + "BAE-NormalMapPreprocessor": BAE_Normal_Map_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "BAE-NormalMapPreprocessor": "BAE - Normal Map" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/oneformer.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/oneformer.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe349e0ce01ffd4f22302499e4577c7b006c2df --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/oneformer.py @@ -0,0 +1,50 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class OneFormer_COCO_SemSegPreprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "semantic_segmentate" + + CATEGORY = "ControlNet Preprocessors/Semantic Segmentation" + + def semantic_segmentate(self, image, resolution=512): + from controlnet_aux.oneformer import OneformerSegmentor + + model = OneformerSegmentor.from_pretrained(HF_MODEL_NAME, "150_16_swin_l_oneformer_coco_100ep.pth", cache_dir=annotator_ckpts_path) + model = model.to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out,) + +class OneFormer_ADE20K_SemSegPreprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "semantic_segmentate" + + CATEGORY = "ControlNet Preprocessors/Semantic Segmentation" + + def semantic_segmentate(self, image, resolution=512): + from controlnet_aux.oneformer import OneformerSegmentor + + model = OneformerSegmentor.from_pretrained(HF_MODEL_NAME, "250_16_swin_l_oneformer_ade20k_160k.pth", cache_dir=annotator_ckpts_path) + model = model.to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out,) + +NODE_CLASS_MAPPINGS = { + "OneFormer-COCO-SemSegPreprocessor": OneFormer_COCO_SemSegPreprocessor, + "OneFormer-ADE20K-SemSegPreprocessor": OneFormer_ADE20K_SemSegPreprocessor +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "OneFormer-COCO-SemSegPreprocessor": "OneFormer COCO Segmentor", + "OneFormer-ADE20K-SemSegPreprocessor": "OneFormer ADE20K Segmentor" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/openpose.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/openpose.py new file mode 100644 index 0000000000000000000000000000000000000000..ed3a648ba4b73a0234b4f5382e3b1aeeaa76a615 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/openpose.py @@ -0,0 +1,46 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, DWPOSE_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class OpenPose_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + detect_hand = (["enable", "disable"], {"default": "enable"}), + detect_body = (["enable", "disable"], {"default": "enable"}), + detect_face = (["enable", "disable"], {"default": "enable"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "estimate_pose" + + CATEGORY = "ControlNet Preprocessors/Faces and Poses" + + def estimate_pose(self, image, detect_hand, detect_body, detect_face, resolution=512, **kwargs): + from controlnet_aux.open_pose import OpenposeDetector + + detect_hand = detect_hand == "enable" + detect_body = detect_body == "enable" + detect_face = detect_face == "enable" + + + self.openpose_json = None + model = OpenposeDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + + def cb(image, **kwargs): + result = model(image, **kwargs) + self.openpose_json = result[1] + return result[0] + + out = common_annotator_call(cb, image, include_hand=detect_hand, include_face=detect_face, include_body=detect_body, image_and_json=True, resolution=resolution) + del model + return { + 'ui': { "openpose_json": [self.openpose_json] }, + "result": (out, ) + } + +NODE_CLASS_MAPPINGS = { + "OpenposePreprocessor": OpenPose_Preprocessor, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "OpenposePreprocessor": "OpenPose Pose Recognition", +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/pidinet.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/pidinet.py new file mode 100644 index 0000000000000000000000000000000000000000..12b588275597ba0af93a6beb2b47dd1a81f8eb58 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/pidinet.py @@ -0,0 +1,29 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class PIDINET_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + safe=(["enable", "disable"], {"default": "enable"}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, safe, resolution=512, **kwargs): + from controlnet_aux.pidi import PidiNetDetector + + model = PidiNetDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution, safe = safe == "enable") + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "PiDiNetPreprocessor": PIDINET_Preprocessor, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "PiDiNetPreprocessor": "PiDiNet Lines" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/scribble.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/scribble.py new file mode 100644 index 0000000000000000000000000000000000000000..fa93539c8f69e6e5ba7c5e767456aa55520641ad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/scribble.py @@ -0,0 +1,45 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Scribble_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.scribble import ScribbleDetector + + model = ScribbleDetector() + return (common_annotator_call(model, image, resolution=resolution), ) + +class Scribble_XDoG_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + threshold = ("INT", {"default": 32, "min": 1, "max": 64, "step": 64}) + ) + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Line Extractors" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.scribble import ScribbleXDog_Detector + + model = ScribbleXDog_Detector() + return (common_annotator_call(model, image, resolution=resolution), ) + +NODE_CLASS_MAPPINGS = { + "ScribblePreprocessor": Scribble_Preprocessor, + "Scribble_XDoG_Preprocessor": Scribble_XDoG_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "ScribblePreprocessor": "Scribble Lines", + "Scribble_XDoG_Preprocessor": "Scribble XDoG Lines" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/segment_anything.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/segment_anything.py new file mode 100644 index 0000000000000000000000000000000000000000..cb61f2e4462f199702880b16d155c64bfa216afe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/segment_anything.py @@ -0,0 +1,27 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class SAM_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/others" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.sam import SamDetector + + mobile_sam = SamDetector.from_pretrained("dhkim2810/MobileSAM", model_type="vit_t", filename="mobile_sam.pt", cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(mobile_sam, image, resolution=resolution) + del mobile_sam + return (out, ) + +NODE_CLASS_MAPPINGS = { + "SAMPreprocessor": SAM_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "SAMPreprocessor": "SAM Segmentor" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/shuffle.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/shuffle.py new file mode 100644 index 0000000000000000000000000000000000000000..f66f25a14fed300c0121ee8342b1ec0438136b30 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/shuffle.py @@ -0,0 +1,24 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Shuffle_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + RETURN_TYPES = ("IMAGE",) + FUNCTION = "preprocess" + + CATEGORY = "ControlNet Preprocessors/T2IAdapter-only" + + def preprocess(self, image, resolution=512): + from controlnet_aux.shuffle import ContentShuffleDetector + + return (common_annotator_call(ContentShuffleDetector(), image, resolution=resolution), ) + +NODE_CLASS_MAPPINGS = { + "ShufflePreprocessor": Shuffle_Preprocessor +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ShufflePreprocessor": "Content Shuffle" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/tile.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/tile.py new file mode 100644 index 0000000000000000000000000000000000000000..01ae86ccc6211390634d2684fa263665ee56cb3d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/tile.py @@ -0,0 +1,29 @@ +from ..utils import common_annotator_call, create_node_input_types + + +class Tile_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types( + pyrUp_iters = ("INT", {"default": 3, "min": 1, "max": 10, "step": 1}) + ) + + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/others" + + def execute(self, image, pyrUp_iters, resolution=512, **kwargs): + from controlnet_aux.tile import TileDetector + + return (common_annotator_call(TileDetector(), image, pyrUp_iters=pyrUp_iters, resolution=resolution),) + + +NODE_CLASS_MAPPINGS = { + "TilePreprocessor": Tile_Preprocessor, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "TilePreprocessor": "Tile" +} diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/uniformer.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/uniformer.py new file mode 100644 index 0000000000000000000000000000000000000000..3e1d3c1de8e4216695c18f3d3121cfe7d72dfd1e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/uniformer.py @@ -0,0 +1,29 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Uniformer_SemSegPreprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "semantic_segmentate" + + CATEGORY = "ControlNet Preprocessors/Semantic Segmentation" + + def semantic_segmentate(self, image, resolution=512): + from controlnet_aux.uniformer import UniformerSegmentor + + model = UniformerSegmentor.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "UniFormer-SemSegPreprocessor": Uniformer_SemSegPreprocessor, + "SemSegPreprocessor": Uniformer_SemSegPreprocessor, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "UniFormer-SemSegPreprocessor": "UniFormer Segmentor", + "SemSegPreprocessor": "Semantic Segmentor (legacy, alias for UniFormer)", +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/node_wrappers/zoe.py b/custom_nodes/comfyui_controlnet_aux/node_wrappers/zoe.py new file mode 100644 index 0000000000000000000000000000000000000000..b8e18db00a2a387c5d80ec4e6708b5073cb12927 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/node_wrappers/zoe.py @@ -0,0 +1,27 @@ +from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types +import comfy.model_management as model_management + +class Zoe_Depth_Map_Preprocessor: + @classmethod + def INPUT_TYPES(s): + return create_node_input_types() + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + + CATEGORY = "ControlNet Preprocessors/Normal and Depth Map" + + def execute(self, image, resolution=512, **kwargs): + from controlnet_aux.zoe import ZoeDetector + + model = ZoeDetector.from_pretrained(HF_MODEL_NAME, cache_dir=annotator_ckpts_path).to(model_management.get_torch_device()) + out = common_annotator_call(model, image, resolution=resolution) + del model + return (out, ) + +NODE_CLASS_MAPPINGS = { + "Zoe-DepthMapPreprocessor": Zoe_Depth_Map_Preprocessor +} +NODE_DISPLAY_NAME_MAPPINGS = { + "Zoe-DepthMapPreprocessor": "Zoe - Depth Map" +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/requirements.txt b/custom_nodes/comfyui_controlnet_aux/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..98eab96783fc719a099a3b477f149c4f7263a008 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/requirements.txt @@ -0,0 +1,20 @@ +torch +importlib_metadata +huggingface_hub +scipy +opencv-python>=4.7.0.72 +filelock +numpy +Pillow +einops +torchvision +pyyaml +scikit-image +python-dateutil +mediapipe +svglib +fvcore +yapf +omegaconf +ftfy +addict \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33e7a7f594ef441479257c788e4c0d6e08657fc8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/__init__.py @@ -0,0 +1 @@ +#Dummy file ensuring this package will be recognized \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33e7a7f594ef441479257c788e4c0d6e08657fc8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__init__.py @@ -0,0 +1 @@ +#Dummy file ensuring this package will be recognized \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/__init__.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02afcd848830bb0ef9af77a727196e4ebdf79151 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a07f4ad6236ba5440409cc22d48ad6a35640c63e Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/util.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99e8cf09aed18506155e8d47023105f4279857b8 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/util.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/util.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/util.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77ce0e8dc2353614ae72c70080d0086c9d54c453 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/__pycache__/util.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..736c4ba7f5a8c17422366f3a324b8f6c1460ef5c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/__init__.py @@ -0,0 +1,67 @@ +from .network import UNet +from .util import seg2img +import torch +import os +import cv2 +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, annotator_ckpts_path, custom_hf_download +from huggingface_hub import hf_hub_download +from PIL import Image +from einops import rearrange +from .anime_segmentation import AnimeSegmentation +import numpy as np + +class AnimeFaceSegmentor: + def __init__(self, model, seg_model): + self.model = model + self.seg_model = seg_model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path=None, filename=None, seg_filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "UNet.pth" + seg_filename = seg_filename or "isnetis.ckpt" + model_path = custom_hf_download(pretrained_model_or_path, filename, subfolder="Annotators", cache_dir=cache_dir) + seg_model_path = custom_hf_download("skytnt/anime-seg", seg_filename, cache_dir=cache_dir) + + model = UNet() + ckpt = torch.load(model_path) + model.load_state_dict(ckpt) + model.eval() + + seg_model = AnimeSegmentation(seg_model_path) + seg_model.net.eval() + return cls(model, seg_model) + + def to(self, device): + self.model.to(device) + self.seg_model.net.to(device) + return self + + def __call__(self, input_image, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", remove_background=True, **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + device = next(iter(self.model.parameters())).device + + with torch.no_grad(): + if remove_background: + print(input_image.shape) + mask, input_image = self.seg_model(input_image, 0) #Don't resize image as it is resized + image_feed = torch.from_numpy(input_image).float().to(device) + image_feed = rearrange(image_feed, 'h w c -> 1 c h w') + image_feed = image_feed / 255 + seg = self.model(image_feed).squeeze(dim=0) + result = seg2img(seg.cpu().detach().numpy()) + + detected_map = HWC3(result) + detected_map = remove_pad(detected_map) + if remove_background: + mask = remove_pad(mask) + H, W, C = detected_map.shape + tmp = np.zeros([H, W, C + 1]) + tmp[:,:,:C] = detected_map + tmp[:,:,3:] = mask + detected_map = tmp + + if output_type == "pil": + detected_map = Image.fromarray(detected_map[..., :3]) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/anime_segmentation.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/anime_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..ac4ab9c0effe4a84b3a76fc1b2cffd373109a035 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/anime_segmentation.py @@ -0,0 +1,58 @@ +#https://github.com/SkyTNT/anime-segmentation/tree/main +#Only adapt isnet_is (https://huggingface.co/skytnt/anime-seg/blob/main/isnetis.ckpt) +import torch.nn as nn +import torch +from .isnet import ISNetDIS +import numpy as np +import cv2 +from comfy.model_management import get_torch_device +DEVICE = get_torch_device() + +class AnimeSegmentation: + def __init__(self, ckpt_path): + super(AnimeSegmentation).__init__() + sd = torch.load(ckpt_path, map_location="cpu") + self.net = ISNetDIS() + #gt_encoder isn't used during inference + self.net.load_state_dict({k.replace("net.", ''):v for k, v in sd.items() if k.startswith("net.")}) + self.net = self.net.to(DEVICE) + self.net.eval() + + def get_mask(self, input_img, s=640): + input_img = (input_img / 255).astype(np.float32) + if s == 0: + img_input = np.transpose(input_img, (2, 0, 1)) + img_input = img_input[np.newaxis, :] + tmpImg = torch.from_numpy(img_input).float().to(DEVICE) + with torch.no_grad(): + pred = self.net(tmpImg)[0][0].sigmoid() #https://github.com/SkyTNT/anime-segmentation/blob/main/train.py#L92C20-L92C47 + pred = pred.cpu().numpy()[0] + pred = np.transpose(pred, (1, 2, 0)) + #pred = pred[:, :, np.newaxis] + return pred + + h, w = h0, w0 = input_img.shape[:-1] + h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) + ph, pw = s - h, s - w + img_input = np.zeros([s, s, 3], dtype=np.float32) + img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(input_img, (w, h)) + img_input = np.transpose(img_input, (2, 0, 1)) + img_input = img_input[np.newaxis, :] + tmpImg = torch.from_numpy(img_input).float().to(DEVICE) + with torch.no_grad(): + pred = self.net(tmpImg)[0][0].sigmoid() #https://github.com/SkyTNT/anime-segmentation/blob/main/train.py#L92C20-L92C47 + pred = pred.cpu().numpy()[0] + pred = np.transpose(pred, (1, 2, 0)) + pred = pred[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] + #pred = cv2.resize(pred, (w0, h0))[:, :, np.newaxis] + pred = cv2.resize(pred, (w0, h0)) + return pred + + def __call__(self, np_img, img_size): + mask = self.get_mask(np_img, int(img_size)) + np_img = (mask * np_img + 255 * (1 - mask)).astype(np.uint8) + mask = (mask * 255).astype(np.uint8) + #np_img = np.concatenate([np_img, mask], axis=2, dtype=np.uint8) + #mask = mask.repeat(3, axis=2) + return mask, np_img + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/isnet.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/isnet.py new file mode 100644 index 0000000000000000000000000000000000000000..8a0a504ecadf426358c8accee62d3f35129b0a11 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/isnet.py @@ -0,0 +1,619 @@ +# Codes are borrowed from +# https://github.com/xuebinqin/DIS/blob/main/IS-Net/models/isnet.py + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import models + +bce_loss = nn.BCEWithLogitsLoss(reduction="mean") + + +def muti_loss_fusion(preds, target): + loss0 = 0.0 + loss = 0.0 + + for i in range(0, len(preds)): + if preds[i].shape[2] != target.shape[2] or preds[i].shape[3] != target.shape[3]: + tmp_target = F.interpolate( + target, size=preds[i].size()[2:], mode="bilinear", align_corners=True + ) + loss = loss + bce_loss(preds[i], tmp_target) + else: + loss = loss + bce_loss(preds[i], target) + if i == 0: + loss0 = loss + return loss0, loss + + +fea_loss = nn.MSELoss(reduction="mean") +kl_loss = nn.KLDivLoss(reduction="mean") +l1_loss = nn.L1Loss(reduction="mean") +smooth_l1_loss = nn.SmoothL1Loss(reduction="mean") + + +def muti_loss_fusion_kl(preds, target, dfs, fs, mode="MSE"): + loss0 = 0.0 + loss = 0.0 + + for i in range(0, len(preds)): + if preds[i].shape[2] != target.shape[2] or preds[i].shape[3] != target.shape[3]: + tmp_target = F.interpolate( + target, size=preds[i].size()[2:], mode="bilinear", align_corners=True + ) + loss = loss + bce_loss(preds[i], tmp_target) + else: + loss = loss + bce_loss(preds[i], target) + if i == 0: + loss0 = loss + + for i in range(0, len(dfs)): + df = dfs[i] + fs_i = fs[i] + if mode == "MSE": + loss = loss + fea_loss( + df, fs_i + ) ### add the mse loss of features as additional constraints + elif mode == "KL": + loss = loss + kl_loss(F.log_softmax(df, dim=1), F.softmax(fs_i, dim=1)) + elif mode == "MAE": + loss = loss + l1_loss(df, fs_i) + elif mode == "SmoothL1": + loss = loss + smooth_l1_loss(df, fs_i) + + return loss0, loss + + +class REBNCONV(nn.Module): + def __init__(self, in_ch=3, out_ch=3, dirate=1, stride=1): + super(REBNCONV, self).__init__() + + self.conv_s1 = nn.Conv2d( + in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, stride=stride + ) + self.bn_s1 = nn.BatchNorm2d(out_ch) + self.relu_s1 = nn.ReLU(inplace=True) + + def forward(self, x): + hx = x + xout = self.relu_s1(self.bn_s1(self.conv_s1(hx))) + + return xout + + +## upsample tensor 'src' to have the same spatial size with tensor 'tar' +def _upsample_like(src, tar): + src = F.interpolate(src, size=tar.shape[2:], mode="bilinear", align_corners=False) + + return src + + +### RSU-7 ### +class RSU7(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3, img_size=512): + super(RSU7, self).__init__() + + self.in_ch = in_ch + self.mid_ch = mid_ch + self.out_ch = out_ch + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) ## 1 -> 1/2 + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + b, c, h, w = x.shape + + hx = x + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + hx = self.pool5(hx5) + + hx6 = self.rebnconv6(hx) + + hx7 = self.rebnconv7(hx6) + + hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1)) + hx6dup = _upsample_like(hx6d, hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-6 ### +class RSU6(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU6, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + + hx6 = self.rebnconv6(hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-5 ### +class RSU5(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU5, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + + hx5 = self.rebnconv5(hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4 ### +class RSU4(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4F ### +class RSU4F(nn.Module): + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4F, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx2 = self.rebnconv2(hx1) + hx3 = self.rebnconv3(hx2) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1)) + hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1)) + + return hx1d + hxin + + +class myrebnconv(nn.Module): + def __init__( + self, + in_ch=3, + out_ch=1, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + groups=1, + ): + super(myrebnconv, self).__init__() + + self.conv = nn.Conv2d( + in_ch, + out_ch, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + self.bn = nn.BatchNorm2d(out_ch) + self.rl = nn.ReLU(inplace=True) + + def forward(self, x): + return self.rl(self.bn(self.conv(x))) + + +class ISNetGTEncoder(nn.Module): + def __init__(self, in_ch=1, out_ch=1): + super(ISNetGTEncoder, self).__init__() + + self.conv_in = myrebnconv( + in_ch, 16, 3, stride=2, padding=1 + ) # nn.Conv2d(in_ch,64,3,stride=2,padding=1) + + self.stage1 = RSU7(16, 16, 64) + self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage2 = RSU6(64, 16, 64) + self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage3 = RSU5(64, 32, 128) + self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage4 = RSU4(128, 32, 256) + self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(256, 64, 512) + self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(512, 64, 512) + + self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side3 = nn.Conv2d(128, out_ch, 3, padding=1) + self.side4 = nn.Conv2d(256, out_ch, 3, padding=1) + self.side5 = nn.Conv2d(512, out_ch, 3, padding=1) + self.side6 = nn.Conv2d(512, out_ch, 3, padding=1) + + @staticmethod + def compute_loss(args): + preds, targets = args + return muti_loss_fusion(preds, targets) + + def forward(self, x): + hx = x + + hxin = self.conv_in(hx) + # hx = self.pool_in(hxin) + + # stage 1 + hx1 = self.stage1(hxin) + hx = self.pool12(hx1) + + # stage 2 + hx2 = self.stage2(hx) + hx = self.pool23(hx2) + + # stage 3 + hx3 = self.stage3(hx) + hx = self.pool34(hx3) + + # stage 4 + hx4 = self.stage4(hx) + hx = self.pool45(hx4) + + # stage 5 + hx5 = self.stage5(hx) + hx = self.pool56(hx5) + + # stage 6 + hx6 = self.stage6(hx) + + # side output + d1 = self.side1(hx1) + d1 = _upsample_like(d1, x) + + d2 = self.side2(hx2) + d2 = _upsample_like(d2, x) + + d3 = self.side3(hx3) + d3 = _upsample_like(d3, x) + + d4 = self.side4(hx4) + d4 = _upsample_like(d4, x) + + d5 = self.side5(hx5) + d5 = _upsample_like(d5, x) + + d6 = self.side6(hx6) + d6 = _upsample_like(d6, x) + + # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1)) + + # return [torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), torch.sigmoid(d6)], [hx1, hx2, hx3, hx4, hx5, hx6] + return [d1, d2, d3, d4, d5, d6], [hx1, hx2, hx3, hx4, hx5, hx6] + + +class ISNetDIS(nn.Module): + def __init__(self, in_ch=3, out_ch=1): + super(ISNetDIS, self).__init__() + + self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1) + self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage1 = RSU7(64, 32, 64) + self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage2 = RSU6(64, 32, 128) + self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage3 = RSU5(128, 64, 256) + self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage4 = RSU4(256, 128, 512) + self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(512, 256, 512) + self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(512, 256, 512) + + # decoder + self.stage5d = RSU4F(1024, 256, 512) + self.stage4d = RSU4(1024, 128, 256) + self.stage3d = RSU5(512, 64, 128) + self.stage2d = RSU6(256, 32, 64) + self.stage1d = RSU7(128, 16, 64) + + self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side3 = nn.Conv2d(128, out_ch, 3, padding=1) + self.side4 = nn.Conv2d(256, out_ch, 3, padding=1) + self.side5 = nn.Conv2d(512, out_ch, 3, padding=1) + self.side6 = nn.Conv2d(512, out_ch, 3, padding=1) + + # self.outconv = nn.Conv2d(6*out_ch,out_ch,1) + + @staticmethod + def compute_loss_kl(preds, targets, dfs, fs, mode="MSE"): + return muti_loss_fusion_kl(preds, targets, dfs, fs, mode=mode) + + @staticmethod + def compute_loss(args): + if len(args) == 3: + ds, dfs, labels = args + return muti_loss_fusion(ds, labels) + else: + ds, dfs, labels, fs = args + return muti_loss_fusion_kl(ds, labels, dfs, fs, mode="MSE") + + def forward(self, x): + hx = x + + hxin = self.conv_in(hx) + hx = self.pool_in(hxin) + + # stage 1 + hx1 = self.stage1(hxin) + hx = self.pool12(hx1) + + # stage 2 + hx2 = self.stage2(hx) + hx = self.pool23(hx2) + + # stage 3 + hx3 = self.stage3(hx) + hx = self.pool34(hx3) + + # stage 4 + hx4 = self.stage4(hx) + hx = self.pool45(hx4) + + # stage 5 + hx5 = self.stage5(hx) + hx = self.pool56(hx5) + + # stage 6 + hx6 = self.stage6(hx) + hx6up = _upsample_like(hx6, hx5) + + # -------------------- decoder -------------------- + hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) + + # side output + d1 = self.side1(hx1d) + d1 = _upsample_like(d1, x) + + d2 = self.side2(hx2d) + d2 = _upsample_like(d2, x) + + d3 = self.side3(hx3d) + d3 = _upsample_like(d3, x) + + d4 = self.side4(hx4d) + d4 = _upsample_like(d4, x) + + d5 = self.side5(hx5d) + d5 = _upsample_like(d5, x) + + d6 = self.side6(hx6) + d6 = _upsample_like(d6, x) + + # d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1)) + + # return [torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), torch.sigmoid(d6)], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6] + return [d1, d2, d3, d4, d5, d6], [hx1d, hx2d, hx3d, hx4d, hx5d, hx6] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/network.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/network.py new file mode 100644 index 0000000000000000000000000000000000000000..2ab2a44e98dc67f9dbdb281c573470d35ede96be --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/network.py @@ -0,0 +1,98 @@ +#https://github.com/siyeong0/Anime-Face-Segmentation/blob/main/network.py +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision +from torchvision.models import MobileNet_V2_Weights + +class UNet(nn.Module): + def __init__(self): + super(UNet, self).__init__() + self.NUM_SEG_CLASSES = 7 # Background, hair, face, eye, mouth, skin, clothes + + mobilenet_v2 = torchvision.models.mobilenet_v2(weights=MobileNet_V2_Weights.IMAGENET1K_V1) + mob_blocks = mobilenet_v2.features + + # Encoder + self.en_block0 = nn.Sequential( # in_ch=3 out_ch=16 + mob_blocks[0], + mob_blocks[1] + ) + self.en_block1 = nn.Sequential( # in_ch=16 out_ch=24 + mob_blocks[2], + mob_blocks[3], + ) + self.en_block2 = nn.Sequential( # in_ch=24 out_ch=32 + mob_blocks[4], + mob_blocks[5], + mob_blocks[6], + ) + self.en_block3 = nn.Sequential( # in_ch=32 out_ch=96 + mob_blocks[7], + mob_blocks[8], + mob_blocks[9], + mob_blocks[10], + mob_blocks[11], + mob_blocks[12], + mob_blocks[13], + ) + self.en_block4 = nn.Sequential( # in_ch=96 out_ch=160 + mob_blocks[14], + mob_blocks[15], + mob_blocks[16], + ) + + # Decoder + self.de_block4 = nn.Sequential( # in_ch=160 out_ch=96 + nn.UpsamplingNearest2d(scale_factor=2), + nn.Conv2d(160, 96, kernel_size=3, padding=1), + nn.InstanceNorm2d(96), + nn.LeakyReLU(0.1), + nn.Dropout(p=0.2) + ) + self.de_block3 = nn.Sequential( # in_ch=96x2 out_ch=32 + nn.UpsamplingNearest2d(scale_factor=2), + nn.Conv2d(96*2, 32, kernel_size=3, padding=1), + nn.InstanceNorm2d(32), + nn.LeakyReLU(0.1), + nn.Dropout(p=0.2) + ) + self.de_block2 = nn.Sequential( # in_ch=32x2 out_ch=24 + nn.UpsamplingNearest2d(scale_factor=2), + nn.Conv2d(32*2, 24, kernel_size=3, padding=1), + nn.InstanceNorm2d(24), + nn.LeakyReLU(0.1), + nn.Dropout(p=0.2) + ) + self.de_block1 = nn.Sequential( # in_ch=24x2 out_ch=16 + nn.UpsamplingNearest2d(scale_factor=2), + nn.Conv2d(24*2, 16, kernel_size=3, padding=1), + nn.InstanceNorm2d(16), + nn.LeakyReLU(0.1), + nn.Dropout(p=0.2) + ) + + self.de_block0 = nn.Sequential( # in_ch=16x2 out_ch=7 + nn.UpsamplingNearest2d(scale_factor=2), + nn.Conv2d(16*2, self.NUM_SEG_CLASSES, kernel_size=3, padding=1), + nn.Softmax2d() + ) + + def forward(self, x): + e0 = self.en_block0(x) + e1 = self.en_block1(e0) + e2 = self.en_block2(e1) + e3 = self.en_block3(e2) + e4 = self.en_block4(e3) + + d4 = self.de_block4(e4) + c4 = torch.cat((d4,e3),1) + d3 = self.de_block3(c4) + c3 = torch.cat((d3,e2),1) + d2 = self.de_block2(c3) + c2 =torch.cat((d2,e1),1) + d1 = self.de_block1(c2) + c1 = torch.cat((d1,e0),1) + y = self.de_block0(c1) + + return y \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/util.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/util.py new file mode 100644 index 0000000000000000000000000000000000000000..f6f3d22543675f9b098b8bc57d244c9e437d0636 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/anime_face_segment/util.py @@ -0,0 +1,40 @@ +#https://github.com/siyeong0/Anime-Face-Segmentation/blob/main/util.py +#The color palette is changed according to https://github.com/Mikubill/sd-webui-controlnet/blob/91f67ddcc7bc47537a6285864abfc12590f46c3f/annotator/anime_face_segment/__init__.py +import cv2 as cv +import glob +import numpy as np +import os + +""" +COLOR_BACKGROUND = (0,255,255) +COLOR_HAIR = (255,0,0) +COLOR_EYE = (0,0,255) +COLOR_MOUTH = (255,255,255) +COLOR_FACE = (0,255,0) +COLOR_SKIN = (255,255,0) +COLOR_CLOTHES = (255,0,255) +""" +COLOR_BACKGROUND = (255,255,0) +COLOR_HAIR = (0,0,255) +COLOR_EYE = (255,0,0) +COLOR_MOUTH = (255,255,255) +COLOR_FACE = (0,255,0) +COLOR_SKIN = (0,255,255) +COLOR_CLOTHES = (255,0,255) +PALETTE = [COLOR_BACKGROUND,COLOR_HAIR,COLOR_EYE,COLOR_MOUTH,COLOR_FACE,COLOR_SKIN,COLOR_CLOTHES] + +def img2seg(path): + src = cv.imread(path) + src = src.reshape(-1, 3) + seg_list = [] + for color in PALETTE: + seg_list.append(np.where(np.all(src==color, axis=1), 1.0, 0.0)) + dst = np.stack(seg_list,axis=1).reshape(512,512,7) + + return dst.astype(np.float32) + +def seg2img(src): + src = np.moveaxis(src,0,2) + dst = [[PALETTE[np.argmax(val)] for val in buf]for buf in src] + + return np.array(dst).astype(np.uint8) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/binary/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/binary/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e45cdd6c23e23f7785486f7c566ce1924eefc657 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/binary/__init__.py @@ -0,0 +1,38 @@ +import warnings +import cv2 +import numpy as np +from PIL import Image +from controlnet_aux.util import HWC3, resize_image_with_pad + +class BinaryDetector: + def __call__(self, input_image=None, bin_threshold=0, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + if "img" in kwargs: + warnings.warn("img is deprecated, please use `input_image=...` instead.", DeprecationWarning) + input_image = kwargs.pop("img") + + if input_image is None: + raise ValueError("input_image must be defined.") + + if not isinstance(input_image, np.ndarray): + input_image = np.array(input_image, dtype=np.uint8) + output_type = output_type or "pil" + else: + output_type = output_type or "np" + + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + img_gray = cv2.cvtColor(detected_map, cv2.COLOR_RGB2GRAY) + if bin_threshold == 0 or bin_threshold == 255: + # Otsu's threshold + otsu_threshold, img_bin = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) + print("Otsu threshold:", otsu_threshold) + else: + _, img_bin = cv2.threshold(img_gray, bin_threshold, 255, cv2.THRESH_BINARY_INV) + + detected_map = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB) + detected_map = HWC3(remove_pad(255 - detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/canny/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/canny/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..647bd1189b50344e4bd6a9cbe2352edd0be2846e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/canny/__init__.py @@ -0,0 +1,17 @@ +import warnings +import cv2 +import numpy as np +from PIL import Image +from controlnet_aux.util import resize_image_with_pad, common_input_validate, HWC3 + +class CannyDetector: + def __call__(self, input_image=None, low_threshold=100, high_threshold=200, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + detected_map = cv2.Canny(detected_map, low_threshold, high_threshold) + detected_map = HWC3(remove_pad(detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/color/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/color/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52f872339a4e90b902723f7ff12e6530ed135760 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/color/__init__.py @@ -0,0 +1,37 @@ +import cv2 +import warnings +import cv2 +import numpy as np +from PIL import Image +from controlnet_aux.util import HWC3, safer_memory, common_input_validate + +def cv2_resize_shortest_edge(image, size): + h, w = image.shape[:2] + if h < w: + new_h = size + new_w = int(round(w / h * size)) + else: + new_w = size + new_h = int(round(h / w * size)) + resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_AREA) + return resized_image + +def apply_color(img, res=512): + img = cv2_resize_shortest_edge(img, res) + h, w = img.shape[:2] + + input_img_color = cv2.resize(img, (w//64, h//64), interpolation=cv2.INTER_CUBIC) + input_img_color = cv2.resize(input_img_color, (w, h), interpolation=cv2.INTER_NEAREST) + return input_img_color + +#Color T2I like multiples-of-64, upscale methods are fixed. +class ColorDetector: + def __call__(self, input_image=None, detect_resolution=512, output_type=None, **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image = HWC3(input_image) + detected_map = HWC3(apply_color(input_image, detect_resolution)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/densepose/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/densepose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7a723dd7a59af0549d5a9688f6b1bb70847470d9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/densepose/__init__.py @@ -0,0 +1,44 @@ +import functools +import os +import warnings + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from einops import rearrange +from huggingface_hub import hf_hub_download +from PIL import Image + +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, annotator_ckpts_path, custom_hf_download + +class DenseposeDetector: + def __init__(self, model): + self.dense_pose_estimation = model + self.device = "cpu" + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + torchscript_model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + densepose = torch.jit.load(torchscript_model_path, map_location="cpu") + return cls(densepose) + + def to(self, device): + self.dense_pose_estimation.to(device) + self.device = device + return self + + def __call__(self, input_image, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", cmap="viridis", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + H, W = input_image.shape[:2] + input_image = rearrange(torch.from_numpy(input_image).to(self.device), 'h w c -> c h w') + detected_map = self.dense_pose_estimation(input_image)[-1 if cmap=="viridis" else -2] + if detected_map.all() == -1: + detected_map = np.zeros([H, W, 3], dtype=np.uint8) + else: + detected_map = detected_map.cpu().detach().numpy() + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + detected_map = remove_pad(HWC3(detected_map)) + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6f60b76d35fa1012809985780964a5068adce4fd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/LICENSE @@ -0,0 +1,108 @@ +OPENPOSE: MULTIPERSON KEYPOINT DETECTION +SOFTWARE LICENSE AGREEMENT +ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY + +BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE. + +This is a license agreement ("Agreement") between your academic institution or non-profit organization or self (called "Licensee" or "You" in this Agreement) and Carnegie Mellon University (called "Licensor" in this Agreement). All rights not specifically granted to you in this Agreement are reserved for Licensor. + +RESERVATION OF OWNERSHIP AND GRANT OF LICENSE: +Licensor retains exclusive ownership of any copy of the Software (as defined below) licensed under this Agreement and hereby grants to Licensee a personal, non-exclusive, +non-transferable license to use the Software for noncommercial research purposes, without the right to sublicense, pursuant to the terms and conditions of this Agreement. As used in this Agreement, the term "Software" means (i) the actual copy of all or any portion of code for program routines made accessible to Licensee by Licensor pursuant to this Agreement, inclusive of backups, updates, and/or merged copies permitted hereunder or subsequently supplied by Licensor, including all or any file structures, programming instructions, user interfaces and screen formats and sequences as well as any and all documentation and instructions related to it, and (ii) all or any derivatives and/or modifications created or made by You to any of the items specified in (i). + +CONFIDENTIALITY: Licensee acknowledges that the Software is proprietary to Licensor, and as such, Licensee agrees to receive all such materials in confidence and use the Software only in accordance with the terms of this Agreement. Licensee agrees to use reasonable effort to protect the Software from unauthorized use, reproduction, distribution, or publication. + +COPYRIGHT: The Software is owned by Licensor and is protected by United +States copyright laws and applicable international treaties and/or conventions. + +PERMITTED USES: The Software may be used for your own noncommercial internal research purposes. You understand and agree that Licensor is not obligated to implement any suggestions and/or feedback you might provide regarding the Software, but to the extent Licensor does so, you are not entitled to any compensation related thereto. + +DERIVATIVES: You may create derivatives of or make modifications to the Software, however, You agree that all and any such derivatives and modifications will be owned by Licensor and become a part of the Software licensed to You under this Agreement. You may only use such derivatives and modifications for your own noncommercial internal research purposes, and you may not otherwise use, distribute or copy such derivatives and modifications in violation of this Agreement. + +BACKUPS: If Licensee is an organization, it may make that number of copies of the Software necessary for internal noncommercial use at a single site within its organization provided that all information appearing in or on the original labels, including the copyright and trademark notices are copied onto the labels of the copies. + +USES NOT PERMITTED: You may not distribute, copy or use the Software except as explicitly permitted herein. Licensee has not been granted any trademark license as part of this Agreement and may not use the name or mark “OpenPose", "Carnegie Mellon" or any renditions thereof without the prior written permission of Licensor. + +You may not sell, rent, lease, sublicense, lend, time-share or transfer, in whole or in part, or provide third parties access to prior or present versions (or any parts thereof) of the Software. + +ASSIGNMENT: You may not assign this Agreement or your rights hereunder without the prior written consent of Licensor. Any attempted assignment without such consent shall be null and void. + +TERM: The term of the license granted by this Agreement is from Licensee's acceptance of this Agreement by downloading the Software or by using the Software until terminated as provided below. + +The Agreement automatically terminates without notice if you fail to comply with any provision of this Agreement. Licensee may terminate this Agreement by ceasing using the Software. Upon any termination of this Agreement, Licensee will delete any and all copies of the Software. You agree that all provisions which operate to protect the proprietary rights of Licensor shall remain in force should breach occur and that the obligation of confidentiality described in this Agreement is binding in perpetuity and, as such, survives the term of the Agreement. + +FEE: Provided Licensee abides completely by the terms and conditions of this Agreement, there is no fee due to Licensor for Licensee's use of the Software in accordance with this Agreement. + +DISCLAIMER OF WARRANTIES: THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT WARRANTY OF ANY KIND INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE OR OF NON-INFRINGEMENT. LICENSEE BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND RELATED MATERIALS. + +SUPPORT AND MAINTENANCE: No Software support or training by the Licensor is provided as part of this Agreement. + +EXCLUSIVE REMEDY AND LIMITATION OF LIABILITY: To the maximum extent permitted under applicable law, Licensor shall not be liable for direct, indirect, special, incidental, or consequential damages or lost profits related to Licensee's use of and/or inability to use the Software, even if Licensor is advised of the possibility of such damage. + +EXPORT REGULATION: Licensee agrees to comply with any and all applicable +U.S. export control laws, regulations, and/or other laws related to embargoes and sanction programs administered by the Office of Foreign Assets Control. + +SEVERABILITY: If any provision(s) of this Agreement shall be held to be invalid, illegal, or unenforceable by a court or other tribunal of competent jurisdiction, the validity, legality and enforceability of the remaining provisions shall not in any way be affected or impaired thereby. + +NO IMPLIED WAIVERS: No failure or delay by Licensor in enforcing any right or remedy under this Agreement shall be construed as a waiver of any future or other exercise of such right or remedy by Licensor. + +GOVERNING LAW: This Agreement shall be construed and enforced in accordance with the laws of the Commonwealth of Pennsylvania without reference to conflict of laws principles. You consent to the personal jurisdiction of the courts of this County and waive their rights to venue outside of Allegheny County, Pennsylvania. + +ENTIRE AGREEMENT AND AMENDMENTS: This Agreement constitutes the sole and entire agreement between Licensee and Licensor as to the matter set forth herein and supersedes any previous agreements, understandings, and arrangements between the parties relating hereto. + + + +************************************************************************ + +THIRD-PARTY SOFTWARE NOTICES AND INFORMATION + +This project incorporates material from the project(s) listed below (collectively, "Third Party Code"). This Third Party Code is licensed to you under their original license terms set forth below. We reserves all other rights not expressly granted, whether by implication, estoppel or otherwise. + +1. Caffe, version 1.0.0, (https://github.com/BVLC/caffe/) + +COPYRIGHT + +All contributions by the University of California: +Copyright (c) 2014-2017 The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014-2017, the respective contributors +All rights reserved. + +Caffe uses a shared copyright model: each contributor holds copyright over +their contributions to Caffe. The project versioning records all such +contribution and copyright details. If a contributor wants to further mark +their specific copyright on a particular contribution, they should indicate +their copyright solely in the commit message of the change when it is +committed. + +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +CONTRIBUTION AGREEMENT + +By contributing to the BVLC/caffe repository through pull-request, comment, +or otherwise, the contributor releases their content to the +license and copyright terms herein. + +************END OF THIRD-PARTY SOFTWARE NOTICES AND INFORMATION********** \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e65a4b1fbf793aa68c983708561720a6ef05195d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__init__.py @@ -0,0 +1,261 @@ +# Openpose +# Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose +# 2nd Edited by https://github.com/Hzzone/pytorch-openpose +# 3rd Edited by ControlNet +# 4th Edited by ControlNet (added face and correct hands) +# 5th Edited by ControlNet (Improved JSON serialization/deserialization, and lots of bug fixs) +# This preprocessor is licensed by CMU for non-commercial use only. +import torch.utils.benchmark as benchmark +benchmark.timer() + +import os +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" + +import json +import torch +import numpy as np +from . import util +from .body import Body, BodyResult, Keypoint +from .hand import Hand +from .face import Face +from .types import PoseResult, HandResult, FaceResult +from huggingface_hub import hf_hub_download +from .wholebody import Wholebody +import warnings +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, annotator_ckpts_path, custom_hf_download +import cv2 +from PIL import Image +from .animalpose import AnimalPoseImage + +from typing import Tuple, List, Callable, Union, Optional + +def draw_poses(poses: List[PoseResult], H, W, draw_body=True, draw_hand=True, draw_face=True): + """ + Draw the detected poses on an empty canvas. + + Args: + poses (List[PoseResult]): A list of PoseResult objects containing the detected poses. + H (int): The height of the canvas. + W (int): The width of the canvas. + draw_body (bool, optional): Whether to draw body keypoints. Defaults to True. + draw_hand (bool, optional): Whether to draw hand keypoints. Defaults to True. + draw_face (bool, optional): Whether to draw face keypoints. Defaults to True. + + Returns: + numpy.ndarray: A 3D numpy array representing the canvas with the drawn poses. + """ + canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8) + + for pose in poses: + if draw_body: + canvas = util.draw_bodypose(canvas, pose.body.keypoints) + + if draw_hand: + canvas = util.draw_handpose(canvas, pose.left_hand) + canvas = util.draw_handpose(canvas, pose.right_hand) + + if draw_face: + canvas = util.draw_facepose(canvas, pose.face) + + return canvas + + +def decode_json_as_poses(json_string: str, normalize_coords: bool = False) -> Tuple[List[PoseResult], int, int]: + """ Decode the json_string complying with the openpose JSON output format + to poses that controlnet recognizes. + https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/02_output.md + + Args: + json_string: The json string to decode. + normalize_coords: Whether to normalize coordinates of each keypoint by canvas height/width. + `draw_pose` only accepts normalized keypoints. Set this param to True if + the input coords are not normalized. + + Returns: + poses + canvas_height + canvas_width + """ + pose_json = json.loads(json_string) + height = pose_json['canvas_height'] + width = pose_json['canvas_width'] + + def chunks(lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield lst[i:i + n] + + def decompress_keypoints(numbers: Optional[List[float]]) -> Optional[List[Optional[Keypoint]]]: + if not numbers: + return None + + assert len(numbers) % 3 == 0 + + def create_keypoint(x, y, c): + if c < 1.0: + return None + keypoint = Keypoint(x, y) + return keypoint + + return [ + create_keypoint(x, y, c) + for x, y, c in chunks(numbers, n=3) + ] + + return ( + [ + PoseResult( + body=BodyResult(keypoints=decompress_keypoints(pose.get('pose_keypoints_2d'))), + left_hand=decompress_keypoints(pose.get('hand_left_keypoints_2d')), + right_hand=decompress_keypoints(pose.get('hand_right_keypoints_2d')), + face=decompress_keypoints(pose.get('face_keypoints_2d')) + ) + for pose in pose_json['people'] + ], + height, + width, + ) + + +def encode_poses_as_dict(poses: List[PoseResult], canvas_height: int, canvas_width: int) -> str: + """ Encode the pose as a dict following openpose JSON output format: + https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/02_output.md + """ + def compress_keypoints(keypoints: Union[List[Keypoint], None]) -> Union[List[float], None]: + if not keypoints: + return None + + return [ + value + for keypoint in keypoints + for value in ( + [float(keypoint.x), float(keypoint.y), 1.0] + if keypoint is not None + else [0.0, 0.0, 0.0] + ) + ] + + return { + 'people': [ + { + 'pose_keypoints_2d': compress_keypoints(pose.body.keypoints), + "face_keypoints_2d": compress_keypoints(pose.face), + "hand_left_keypoints_2d": compress_keypoints(pose.left_hand), + "hand_right_keypoints_2d":compress_keypoints(pose.right_hand), + } + for pose in poses + ], + 'canvas_height': canvas_height, + 'canvas_width': canvas_width, + } + +global_cached_dwpose = Wholebody() + +class DwposeDetector: + """ + A class for detecting human poses in images using the Dwpose model. + + Attributes: + model_dir (str): Path to the directory where the pose models are stored. + """ + def __init__(self, dw_pose_estimation): + self.dw_pose_estimation = dw_pose_estimation + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, pretrained_det_model_or_path=None, det_filename=None, pose_filename=None, cache_dir=annotator_ckpts_path, torchscript_device="cuda"): + global global_cached_dwpose + pretrained_det_model_or_path = pretrained_det_model_or_path or pretrained_model_or_path + det_filename = det_filename or "yolox_l.onnx" + pose_filename = pose_filename or "dw-ll_ucoco_384.onnx" + det_model_path = custom_hf_download(pretrained_det_model_or_path, det_filename, cache_dir=cache_dir) + pose_model_path = custom_hf_download(pretrained_model_or_path, pose_filename, cache_dir=cache_dir) + + print(f"\nDWPose: Using {det_filename} for bbox detection and {pose_filename} for pose estimation") + if global_cached_dwpose.det is None or global_cached_dwpose.det_filename != det_filename: + t = Wholebody(det_model_path, None, torchscript_device=torchscript_device) + t.pose = global_cached_dwpose.pose + t.pose_filename = global_cached_dwpose.pose + global_cached_dwpose = t + + if global_cached_dwpose.pose is None or global_cached_dwpose.pose_filename != pose_filename: + t = Wholebody(None, pose_model_path, torchscript_device=torchscript_device) + t.det = global_cached_dwpose.det + t.det_filename = global_cached_dwpose.det_filename + global_cached_dwpose = t + return cls(global_cached_dwpose) + + def detect_poses(self, oriImg) -> List[PoseResult]: + with torch.no_grad(): + keypoints_info = self.dw_pose_estimation(oriImg.copy()) + return Wholebody.format_result(keypoints_info) + + def __call__(self, input_image, detect_resolution=512, include_body=True, include_hand=False, include_face=False, hand_and_face=None, output_type="pil", image_and_json=False, upscale_method="INTER_CUBIC", **kwargs): + if hand_and_face is not None: + warnings.warn("hand_and_face is deprecated. Use include_hand and include_face instead.", DeprecationWarning) + include_hand = hand_and_face + include_face = hand_and_face + + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + poses = self.detect_poses(input_image) + detected_map = remove_pad(input_image) + canvas = draw_poses(poses, detected_map.shape[0], detected_map.shape[1], draw_body=include_body, draw_hand=include_hand, draw_face=include_face) + + detected_map = HWC3(canvas) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + if image_and_json: + return (detected_map, encode_poses_as_dict(poses, detected_map.shape[0], detected_map.shape[1])) + + return detected_map + +global_cached_animalpose = AnimalPoseImage() +class AnimalposeDetector: + """ + A class for detecting animal poses in images using the RTMPose AP10k model. + + Attributes: + model_dir (str): Path to the directory where the pose models are stored. + """ + def __init__(self, animal_pose_estimation): + self.animal_pose_estimation = animal_pose_estimation + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, pretrained_det_model_or_path=None, det_filename=None, pose_filename=None, cache_dir=annotator_ckpts_path, torchscript_device="cuda"): + global global_cached_animalpose + pretrained_det_model_or_path = pretrained_det_model_or_path or pretrained_model_or_path + det_filename = det_filename or "yolox_l.onnx" + pose_filename = pose_filename or "dw-ll_ucoco_384.onnx" + det_model_path = custom_hf_download(pretrained_det_model_or_path, det_filename, cache_dir=cache_dir) + pose_model_path = custom_hf_download(pretrained_model_or_path, pose_filename, cache_dir=cache_dir) + + print(f"\nAnimalPose: Using {det_filename} for bbox detection and {pose_filename} for pose estimation") + if global_cached_animalpose.det is None or global_cached_animalpose.det_filename != det_filename: + t = AnimalPoseImage(det_model_path, None, torchscript_device=torchscript_device) + t.pose = global_cached_animalpose.pose + t.pose_filename = global_cached_animalpose.pose + global_cached_animalpose = t + + if global_cached_animalpose.pose is None or global_cached_animalpose.pose_filename != pose_filename: + t = AnimalPoseImage(None, pose_model_path, torchscript_device=torchscript_device) + t.det = global_cached_animalpose.det + t.det_filename = global_cached_animalpose.det_filename + global_cached_animalpose = t + return cls(global_cached_animalpose) + + def __call__(self, input_image, detect_resolution=512, output_type="pil", image_and_json=False, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + detected_map, openpose_dict = self.animal_pose_estimation(input_image) + detected_map = remove_pad(detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + if image_and_json: + return (detected_map, openpose_dict) + + return detected_map \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/__init__.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7db1357c010925f540568e76f8e889ace4585e09 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e46572eb171019b910eee2ba7b945a1638794509 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/animalpose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/animalpose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbd9120b48c3066878d7ef53e69585ae02673565 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/animalpose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/body.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/body.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5167f929ad7b500b49577d31006859aaa068d728 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/body.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/body.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/body.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26c100de75fb2e355b695b1ddef66646f56ffa9d Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/body.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_det.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_det.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8828c9011663c9690573865eb18cfbf843e15f49 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_det.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_det.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_det.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce7a60a31076fa6cf3a030f13aee403552cf8de6 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_det.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_pose.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_pose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c0312ee26a46e1078b878a45f3636c628e1f72e Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_pose.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_pose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_pose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cc0ab910115ecadbca3f4113adbdd71742a583e Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/cv_ox_pose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/face.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/face.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d54916655108e7f5069afbc117ccf19a897f1be Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/face.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/face.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/face.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..005285d8d6df71321065a80882acd25b2e63c6c8 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/face.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/hand.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/hand.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae217e9aa0116286a64f67256b977c42dac417a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/hand.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/hand.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/hand.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41e26c57bb1f6668464eb44f724304322f6bfe20 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/hand.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/model.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29d67975885ee38a8d3001c09b14176d137b3a01 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/model.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/model.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/model.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c2e9648e15f9a7aa51818f2586f6db749a5a462 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/model.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/types.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f3460ef890b49f644d61ae6febb0e03730de640 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/types.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/types.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/types.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40df47ab72dc909e5ce87b3e0ac75c9a8835970d Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/types.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/util.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf756fb2b26888b8d90a88bdfb3f5295dc25212d Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/util.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/util.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/util.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb2a8ffabc6d078b9e1e8b8399d949eea435fd5a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/util.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/wholebody.cpython-310.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/wholebody.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73535efdc8b9a8178d090eb3604a3bc2aa754f73 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/wholebody.cpython-310.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/wholebody.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/wholebody.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85c67e8a5df39af5bd8cfbc843060cef9c85ec26 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/wholebody.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/yolo_nas.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/yolo_nas.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56f329e7f8eef450779bd2d4df4caa6d18bc9459 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/__pycache__/yolo_nas.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/animalpose.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/animalpose.py new file mode 100644 index 0000000000000000000000000000000000000000..50bd63e882ac66ae88e3bfbd4defd206570cd7c4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/animalpose.py @@ -0,0 +1,273 @@ +import numpy as np +import cv2 +import os +import cv2 +from .dw_onnx.cv_ox_det import inference_detector as inference_onnx_yolox +from .dw_onnx.cv_ox_yolo_nas import inference_detector as inference_onnx_yolo_nas +from .dw_onnx.cv_ox_pose import inference_pose as inference_onnx_pose + +from .dw_torchscript.jit_det import inference_detector as inference_jit_yolox +from .dw_torchscript.jit_pose import inference_pose as inference_jit_pose +from typing import List, Optional +from .types import PoseResult, BodyResult, Keypoint +from timeit import default_timer +from controlnet_aux.dwpose.util import guess_onnx_input_shape_dtype, get_ort_providers, get_model_type, is_model_torchscript +import json +import torch +import torch.utils.benchmark.utils.timer as torch_timer + +def drawBetweenKeypoints(pose_img, keypoints, indexes, color, scaleFactor): + ind0 = indexes[0] - 1 + ind1 = indexes[1] - 1 + + point1 = (keypoints[ind0][0], keypoints[ind0][1]) + point2 = (keypoints[ind1][0], keypoints[ind1][1]) + + thickness = int(5 // scaleFactor) + + + cv2.line(pose_img, (int(point1[0]), int(point1[1])), (int(point2[0]), int(point2[1])), color, thickness) + + +def drawBetweenKeypointsList(pose_img, keypoints, keypointPairsList, colorsList, scaleFactor): + for ind, keypointPair in enumerate(keypointPairsList): + drawBetweenKeypoints(pose_img, keypoints, keypointPair, colorsList[ind], scaleFactor) + +def drawBetweenSetofKeypointLists(pose_img, keypoints_set, keypointPairsList, colorsList, scaleFactor): + for keypoints in keypoints_set: + drawBetweenKeypointsList(pose_img, keypoints, keypointPairsList, colorsList, scaleFactor) + + +def padImg(img, size, blackBorder=True): + left, right, top, bottom = 0, 0, 0, 0 + + # pad x + if img.shape[1] < size[1]: + sidePadding = int((size[1] - img.shape[1]) // 2) + left = sidePadding + right = sidePadding + + # pad extra on right if padding needed is an odd number + if img.shape[1] % 2 == 1: + right += 1 + + # pad y + if img.shape[0] < size[0]: + topBottomPadding = int((size[0] - img.shape[0]) // 2) + top = topBottomPadding + bottom = topBottomPadding + + # pad extra on bottom if padding needed is an odd number + if img.shape[0] % 2 == 1: + bottom += 1 + + if blackBorder: + paddedImg = cv2.copyMakeBorder(src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_CONSTANT, value=(0,0,0)) + else: + paddedImg = cv2.copyMakeBorder(src=img, top=top, bottom=bottom, left=left, right=right, borderType=cv2.BORDER_REPLICATE) + + return paddedImg + +def smartCrop(img, size, center): + + width = img.shape[1] + height = img.shape[0] + xSize = size[1] + ySize = size[0] + xCenter = center[0] + yCenter = center[1] + + if img.shape[0] > size[0] or img.shape[1] > size[1]: + + + leftMargin = xCenter - xSize//2 + rightMargin = xCenter + xSize//2 + upMargin = yCenter - ySize//2 + downMargin = yCenter + ySize//2 + + + if(leftMargin < 0): + xCenter += (-leftMargin) + if(rightMargin > width): + xCenter -= (rightMargin - width) + + if(upMargin < 0): + yCenter -= -upMargin + if(downMargin > height): + yCenter -= (downMargin - height) + + + img = cv2.getRectSubPix(img, size, (xCenter, yCenter)) + + + + return img + + + +def calculateScaleFactor(img, size, poseSpanX, poseSpanY): + + poseSpanX = max(poseSpanX, size[0]) + + scaleFactorX = 1 + + + if poseSpanX > size[0]: + scaleFactorX = size[0] / poseSpanX + + scaleFactorY = 1 + if poseSpanY > size[1]: + scaleFactorY = size[1] / poseSpanY + + scaleFactor = min(scaleFactorX, scaleFactorY) + + + return scaleFactor + + + +def scaleImg(img, size, poseSpanX, poseSpanY, scaleFactor): + scaledImg = img + + scaledImg = cv2.resize(img, (0, 0), fx=scaleFactor, fy=scaleFactor) + + return scaledImg, scaleFactor + +class AnimalPoseImage: + def __init__(self, det_model_path: Optional[str] = None, pose_model_path: Optional[str] = None, torchscript_device="cuda"): + self.det_filename = det_model_path and os.path.basename(det_model_path) + self.pose_filename = pose_model_path and os.path.basename(pose_model_path) + self.det, self.pose = None, None + # return type: None ort cv2 torchscript + self.det_model_type = get_model_type("AnimalPose",self.det_filename) + self.pose_model_type = get_model_type("AnimalPose",self.pose_filename) + # Always loads to CPU to avoid building OpenCV. + cv2_device = 'cpu' + cv2_backend = cv2.dnn.DNN_BACKEND_OPENCV if cv2_device == 'cpu' else cv2.dnn.DNN_BACKEND_CUDA + # You need to manually build OpenCV through cmake to work with your GPU. + cv2_providers = cv2.dnn.DNN_TARGET_CPU if cv2_device == 'cpu' else cv2.dnn.DNN_TARGET_CUDA + ort_providers = get_ort_providers() + + if self.det_model_type is None: + pass + elif self.det_model_type == "ort": + try: + import onnxruntime as ort + self.det = ort.InferenceSession(det_model_path, providers=ort_providers) + except: + print(f"Failed to load onnxruntime with {self.det.get_providers()}.\nPlease change EP_list in the config.yaml and restart ComfyUI") + self.det = ort.InferenceSession(det_model_path, providers=["CPUExecutionProvider"]) + elif self.det_model_type == "cv2": + try: + self.det = cv2.dnn.readNetFromONNX(det_model_path) + self.det.setPreferableBackend(cv2_backend) + self.det.setPreferableTarget(cv2_providers) + except: + print("TopK operators may not work on your OpenCV, try use onnxruntime with CPUExecutionProvider") + try: + import onnxruntime as ort + self.det = ort.InferenceSession(det_model_path, providers=["CPUExecutionProvider"]) + except: + print(f"Failed to load {det_model_path}, you can use other models instead") + else: + self.det = torch.jit.load(det_model_path) + self.det.to(torchscript_device) + + if self.pose_model_type is None: + pass + elif self.pose_model_type == "ort": + try: + import onnxruntime as ort + self.pose = ort.InferenceSession(pose_model_path, providers=ort_providers) + except: + print(f"Failed to load onnxruntime with {self.pose.get_providers()}.\nPlease change EP_list in the config.yaml and restart ComfyUI") + self.pose = ort.InferenceSession(pose_model_path, providers=["CPUExecutionProvider"]) + elif self.pose_model_type == "cv2": + self.pose = cv2.dnn.readNetFromONNX(pose_model_path) + self.pose.setPreferableBackend(cv2_backend) + self.pose.setPreferableTarget(cv2_providers) + else: + self.pose = torch.jit.load(pose_model_path) + self.pose.to(torchscript_device) + + if self.pose_filename is not None: + self.pose_input_size, _ = guess_onnx_input_shape_dtype(self.pose_filename) + + def __call__(self, oriImg) -> Optional[np.ndarray]: + detect_classes = list(range(14, 23 + 1)) #https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/coco.yaml + + if is_model_torchscript(self.det): + det_start = torch_timer.timer() + det_result = inference_jit_yolox(self.det, oriImg, detect_classes=detect_classes) + print(f"AnimalPose: Bbox {((torch_timer.timer() - det_start) * 1000):.2f}ms") + else: + det_start = default_timer() + det_onnx_dtype = np.float32 if "yolox" in self.det_filename else np.uint8 + if "yolox" in self.det_filename: + det_result = inference_onnx_yolox(self.det, oriImg, detect_classes=detect_classes, dtype=det_onnx_dtype) + else: + #FP16 and INT8 YOLO NAS accept uint8 input + det_result = inference_onnx_yolo_nas(self.det, oriImg, detect_classes=detect_classes, dtype=det_onnx_dtype) + print(f"AnimalPose: Bbox {((default_timer() - det_start) * 1000):.2f}ms") + if (det_result is None) or (det_result.shape[0] == 0): + openpose_dict = { + 'version': 'ap10k', + 'animals': [], + 'canvas_height': oriImg.shape[0], + 'canvas_width': oriImg.shape[1] + } + return np.zeros_like(oriImg), openpose_dict + + if is_model_torchscript(self.pose): + pose_start = torch_timer.timer() + keypoint_sets, scores = inference_jit_pose(self.pose, det_result, oriImg, self.pose_input_size) + print(f"AnimalPose: Pose {((torch_timer.timer() - pose_start) * 1000):.2f}ms on {det_result.shape[0]} animals\n") + else: + pose_start = default_timer() + _, pose_onnx_dtype = guess_onnx_input_shape_dtype(self.pose_filename) + keypoint_sets, scores = inference_onnx_pose(self.pose, det_result, oriImg, self.pose_input_size, dtype=pose_onnx_dtype) + print(f"AnimalPose: Pose {((default_timer() - pose_start) * 1000):.2f}ms on {det_result.shape[0]} animals\n") + + animal_kps_scores = [] + pose_img = np.zeros((oriImg.shape[0], oriImg.shape[1], 3), dtype = np.uint8) + for (idx, keypoints) in enumerate(keypoint_sets): + # don't use keypoints that go outside the frame in calculations for the center + interorKeypoints = keypoints[((keypoints[:,0] > 0) & (keypoints[:,0] < oriImg.shape[1])) & ((keypoints[:,1] > 0) & (keypoints[:,1] < oriImg.shape[0]))] + + xVals = interorKeypoints[:,0] + yVals = interorKeypoints[:,1] + + minX = np.amin(xVals) + minY = np.amin(yVals) + maxX = np.amax(xVals) + maxY = np.amax(yVals) + + poseSpanX = maxX - minX + poseSpanY = maxY - minY + + # find mean center + + xSum = np.sum(xVals) + ySum = np.sum(yVals) + + xCenter = xSum // xVals.shape[0] + yCenter = ySum // yVals.shape[0] + center_of_keypoints = (xCenter,yCenter) + + # order of the keypoints for AP10k and a standardized list of colors for limbs + keypointPairsList = [(1,2), (2,3), (1,3), (3,4), (4,9), (9,10), (10,11), (4,6), (6,7), (7,8), (4,5), (5,15), (15,16), (16,17), (5,12), (12,13), (13,14)] + colorsList = [(255,255,255), (100,255,100), (150,255,255), (100,50,255), (50,150,200), (0,255,255), (0,150,0), (0,0,255), (0,0,150), (255,50,255), (255,0,255), (255,0,0), (150,0,0), (255,255,100), (0,150,0), (255,255,0), (150,150,150)] # 16 colors needed + + drawBetweenKeypointsList(pose_img, keypoints, keypointPairsList, colorsList, scaleFactor=1.0) + score = scores[idx, ..., None] + score[score > 1.0] = 1.0 + score[score < 0.0] = 0.0 + animal_kps_scores.append(np.concatenate((keypoints, score), axis=-1)) + + openpose_dict = { + 'version': 'ap10k', + 'animals': [keypoints.tolist() for keypoints in animal_kps_scores], + 'canvas_height': oriImg.shape[0], + 'canvas_width': oriImg.shape[1] + } + return pose_img, openpose_dict \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/body.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/body.py new file mode 100644 index 0000000000000000000000000000000000000000..32934f19eba4b7e762678fd1fcd6b2bd193811d6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/body.py @@ -0,0 +1,261 @@ +import cv2 +import numpy as np +import math +import time +from scipy.ndimage.filters import gaussian_filter +import matplotlib.pyplot as plt +import matplotlib +import torch +from torchvision import transforms +from typing import NamedTuple, List, Union + +from . import util +from .model import bodypose_model +from .types import Keypoint, BodyResult + +class Body(object): + def __init__(self, model_path): + self.model = bodypose_model() + # if torch.cuda.is_available(): + # self.model = self.model.cuda() + # print('cuda') + model_dict = util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def __call__(self, oriImg): + # scale_search = [0.5, 1.0, 1.5, 2.0] + scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre1 = 0.1 + thre2 = 0.05 + multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] + heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) + paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) + + for m in range(len(multiplier)): + scale = multiplier[m] + imageToTest = util.smart_resize_k(oriImg, fx=scale, fy=scale) + imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) + im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + im = np.ascontiguousarray(im) + + data = torch.from_numpy(im).float() + if torch.cuda.is_available(): + data = data.cuda() + # data = data.permute([2, 0, 1]).unsqueeze(0).float() + with torch.no_grad(): + data = data.to(self.cn_device) + Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data) + Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() + Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() + + # extract outputs, resize, and remove padding + # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps + heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps + heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride) + heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + heatmap = util.smart_resize(heatmap, (oriImg.shape[0], oriImg.shape[1])) + + # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs + paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs + paf = util.smart_resize_k(paf, fx=stride, fy=stride) + paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + paf = util.smart_resize(paf, (oriImg.shape[0], oriImg.shape[1])) + + heatmap_avg += heatmap_avg + heatmap / len(multiplier) + paf_avg += + paf / len(multiplier) + + all_peaks = [] + peak_counter = 0 + + for part in range(18): + map_ori = heatmap_avg[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + + map_left = np.zeros(one_heatmap.shape) + map_left[1:, :] = one_heatmap[:-1, :] + map_right = np.zeros(one_heatmap.shape) + map_right[:-1, :] = one_heatmap[1:, :] + map_up = np.zeros(one_heatmap.shape) + map_up[:, 1:] = one_heatmap[:, :-1] + map_down = np.zeros(one_heatmap.shape) + map_down[:, :-1] = one_heatmap[:, 1:] + + peaks_binary = np.logical_and.reduce( + (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) + peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse + peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] + peak_id = range(peak_counter, peak_counter + len(peaks)) + peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] + + all_peaks.append(peaks_with_score_and_id) + peak_counter += len(peaks) + + # find connection in the specified sequence, center 29 is in the position 15 + limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ + [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ + [1, 16], [16, 18], [3, 17], [6, 18]] + # the middle joints heatmap correpondence + mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ + [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ + [55, 56], [37, 38], [45, 46]] + + connection_all = [] + special_k = [] + mid_num = 10 + + for k in range(len(mapIdx)): + score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] + candA = all_peaks[limbSeq[k][0] - 1] + candB = all_peaks[limbSeq[k][1] - 1] + nA = len(candA) + nB = len(candB) + indexA, indexB = limbSeq[k] + if (nA != 0 and nB != 0): + connection_candidate = [] + for i in range(nA): + for j in range(nB): + vec = np.subtract(candB[j][:2], candA[i][:2]) + norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + norm = max(0.001, norm) + vec = np.divide(vec, norm) + + startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ + np.linspace(candA[i][1], candB[j][1], num=mid_num))) + + vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ + for I in range(len(startend))]) + vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ + for I in range(len(startend))]) + + score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) + score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( + 0.5 * oriImg.shape[0] / norm - 1, 0) + criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) + criterion2 = score_with_dist_prior > 0 + if criterion1 and criterion2: + connection_candidate.append( + [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) + + connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) + connection = np.zeros((0, 5)) + for c in range(len(connection_candidate)): + i, j, s = connection_candidate[c][0:3] + if (i not in connection[:, 3] and j not in connection[:, 4]): + connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) + if (len(connection) >= min(nA, nB)): + break + + connection_all.append(connection) + else: + special_k.append(k) + connection_all.append([]) + + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + subset = -1 * np.ones((0, 20)) + candidate = np.array([item for sublist in all_peaks for item in sublist]) + + for k in range(len(mapIdx)): + if k not in special_k: + partAs = connection_all[k][:, 0] + partBs = connection_all[k][:, 1] + indexA, indexB = np.array(limbSeq[k]) - 1 + + for i in range(len(connection_all[k])): # = 1:size(temp,1) + found = 0 + subset_idx = [-1, -1] + for j in range(len(subset)): # 1:size(subset,1): + if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if subset[j][indexB] != partBs[i]: + subset[j][indexB] = partBs[i] + subset[j][-1] += 1 + subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + subset[j1][:-2] += (subset[j2][:-2] + 1) + subset[j1][-2:] += subset[j2][-2:] + subset[j1][-2] += connection_all[k][i][2] + subset = np.delete(subset, j2, 0) + else: # as like found == 1 + subset[j1][indexB] = partBs[i] + subset[j1][-1] += 1 + subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[indexA] = partAs[i] + row[indexB] = partBs[i] + row[-1] = 2 + row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] + subset = np.vstack([subset, row]) + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(subset)): + if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: + deleteIdx.append(i) + subset = np.delete(subset, deleteIdx, axis=0) + + # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts + # candidate: x, y, score, id + return candidate, subset + + @staticmethod + def format_body_result(candidate: np.ndarray, subset: np.ndarray) -> List[BodyResult]: + """ + Format the body results from the candidate and subset arrays into a list of BodyResult objects. + + Args: + candidate (np.ndarray): An array of candidates containing the x, y coordinates, score, and id + for each body part. + subset (np.ndarray): An array of subsets containing indices to the candidate array for each + person detected. The last two columns of each row hold the total score and total parts + of the person. + + Returns: + List[BodyResult]: A list of BodyResult objects, where each object represents a person with + detected keypoints, total score, and total parts. + """ + return [ + BodyResult( + keypoints=[ + Keypoint( + x=candidate[candidate_index][0], + y=candidate[candidate_index][1], + score=candidate[candidate_index][2], + id=candidate[candidate_index][3] + ) if candidate_index != -1 else None + for candidate_index in person[:18].astype(int) + ], + total_score=person[18], + total_parts=person[19] + ) + for person in subset + ] + + +if __name__ == "__main__": + body_estimation = Body('../model/body_pose_model.pth') + + test_image = '../images/ski.jpg' + oriImg = cv2.imread(test_image) # B,G,R order + candidate, subset = body_estimation(oriImg) + bodies = body_estimation.format_body_result(candidate, subset) + + canvas = oriImg + for body in bodies: + canvas = util.draw_bodypose(canvas, body) + + plt.imshow(canvas[:, :, [2, 1, 0]]) + plt.show() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33e7a7f594ef441479257c788e4c0d6e08657fc8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__init__.py @@ -0,0 +1 @@ +#Dummy file ensuring this package will be recognized \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83dfca207cc854b0b0ae681e978343e7665b2091 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_det.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_det.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c87047d944d49ced7cf2996340f92a4322808208 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_det.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_pose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_pose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252e8821b9ab476514d4c12bd7c623cca095f66a Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_pose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_yolo_nas.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_yolo_nas.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4255b2c494f85762b5c8394a52da134fda3a8913 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/__pycache__/cv_ox_yolo_nas.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_det.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_det.py new file mode 100644 index 0000000000000000000000000000000000000000..0365234c2caef3b98fc01304ba5365da2115ba65 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_det.py @@ -0,0 +1,129 @@ +import cv2 +import numpy as np + +def nms(boxes, scores, nms_thr): + """Single class NMS implemented in Numpy.""" + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= nms_thr)[0] + order = order[inds + 1] + + return keep + +def multiclass_nms(boxes, scores, nms_thr, score_thr): + """Multiclass NMS implemented in Numpy. Class-aware version.""" + final_dets = [] + num_classes = scores.shape[1] + for cls_ind in range(num_classes): + cls_scores = scores[:, cls_ind] + valid_score_mask = cls_scores > score_thr + if valid_score_mask.sum() == 0: + continue + else: + valid_scores = cls_scores[valid_score_mask] + valid_boxes = boxes[valid_score_mask] + keep = nms(valid_boxes, valid_scores, nms_thr) + if len(keep) > 0: + cls_inds = np.ones((len(keep), 1)) * cls_ind + dets = np.concatenate( + [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1 + ) + final_dets.append(dets) + if len(final_dets) == 0: + return None + return np.concatenate(final_dets, 0) + +def demo_postprocess(outputs, img_size, p6=False): + grids = [] + expanded_strides = [] + strides = [8, 16, 32] if not p6 else [8, 16, 32, 64] + + hsizes = [img_size[0] // stride for stride in strides] + wsizes = [img_size[1] // stride for stride in strides] + + for hsize, wsize, stride in zip(hsizes, wsizes, strides): + xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) + grid = np.stack((xv, yv), 2).reshape(1, -1, 2) + grids.append(grid) + shape = grid.shape[:2] + expanded_strides.append(np.full((*shape, 1), stride)) + + grids = np.concatenate(grids, 1) + expanded_strides = np.concatenate(expanded_strides, 1) + outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides + outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides + + return outputs + +def preprocess(img, input_size, swap=(2, 0, 1)): + if len(img.shape) == 3: + padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114 + else: + padded_img = np.ones(input_size, dtype=np.uint8) * 114 + + r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1]) + resized_img = cv2.resize( + img, + (int(img.shape[1] * r), int(img.shape[0] * r)), + interpolation=cv2.INTER_LINEAR, + ).astype(np.uint8) + padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img + + padded_img = padded_img.transpose(swap) + padded_img = np.ascontiguousarray(padded_img, dtype=np.float32) + return padded_img, r + +def inference_detector(session, oriImg, detect_classes=[0], dtype=np.float32): + input_shape = (640,640) + img, ratio = preprocess(oriImg, input_shape) + + input = img[None, :, :, :] + input = input.astype(dtype) + if "InferenceSession" in type(session).__name__: + input_name = session.get_inputs()[0].name + output = session.run(None, {input_name: input}) + else: + outNames = session.getUnconnectedOutLayersNames() + session.setInput(input) + output = session.forward(outNames) + + predictions = demo_postprocess(output[0], input_shape)[0] + + boxes = predictions[:, :4] + scores = predictions[:, 4:5] * predictions[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1) + if dets is None: + return None + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + isscore = final_scores>0.3 + iscat = np.isin(final_cls_inds, detect_classes) + isbbox = [ i and j for (i, j) in zip(isscore, iscat)] + final_boxes = final_boxes[isbbox] + return final_boxes \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_pose.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..956c4bc715214bcc2e6228166032418294df46bc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_pose.py @@ -0,0 +1,363 @@ +from typing import List, Tuple + +import cv2 +import numpy as np + +def preprocess( + img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256) +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Do preprocessing for DWPose model inference. + + Args: + img (np.ndarray): Input image in shape. + input_size (tuple): Input image size in shape (w, h). + + Returns: + tuple: + - resized_img (np.ndarray): Preprocessed image. + - center (np.ndarray): Center of image. + - scale (np.ndarray): Scale of image. + """ + # get shape of image + img_shape = img.shape[:2] + out_img, out_center, out_scale = [], [], [] + if len(out_bbox) == 0: + out_bbox = [[0, 0, img_shape[1], img_shape[0]]] + for i in range(len(out_bbox)): + x0 = out_bbox[i][0] + y0 = out_bbox[i][1] + x1 = out_bbox[i][2] + y1 = out_bbox[i][3] + bbox = np.array([x0, y0, x1, y1]) + + # get center and scale + center, scale = bbox_xyxy2cs(bbox, padding=1.25) + + # do affine transformation + resized_img, scale = top_down_affine(input_size, scale, center, img) + + # normalize image + mean = np.array([123.675, 116.28, 103.53]) + std = np.array([58.395, 57.12, 57.375]) + resized_img = (resized_img - mean) / std + + out_img.append(resized_img) + out_center.append(center) + out_scale.append(scale) + + return out_img, out_center, out_scale + + +def inference(sess, img, dtype=np.float32): + """Inference DWPose model. Processing all image segments at once to take advantage of GPU's parallelism ability if onnxruntime is installed + + Args: + sess : ONNXRuntime session. + img : Input image in shape. + + Returns: + outputs : Output of DWPose model. + """ + all_out = [] + # build input + input = np.stack(img, axis=0).transpose(0, 3, 1, 2) + input = input.astype(dtype) + if "InferenceSession" in type(sess).__name__: + input_name = sess.get_inputs()[0].name + all_outputs = sess.run(None, {input_name: input}) + for batch_idx in range(len(all_outputs[0])): + outputs = [all_outputs[i][batch_idx:batch_idx+1,...] for i in range(len(all_outputs))] + all_out.append(outputs) + return all_out + + #OpenCV doesn't support batch processing sadly + for i in range(len(img)): + input = img[i].transpose(2, 0, 1) + input = input[None, :, :, :] + + outNames = sess.getUnconnectedOutLayersNames() + sess.setInput(input) + outputs = sess.forward(outNames) + all_out.append(outputs) + + return all_out + +def postprocess(outputs: List[np.ndarray], + model_input_size: Tuple[int, int], + center: Tuple[int, int], + scale: Tuple[int, int], + simcc_split_ratio: float = 2.0 + ) -> Tuple[np.ndarray, np.ndarray]: + """Postprocess for DWPose model output. + + Args: + outputs (np.ndarray): Output of RTMPose model. + model_input_size (tuple): RTMPose model Input image size. + center (tuple): Center of bbox in shape (x, y). + scale (tuple): Scale of bbox in shape (w, h). + simcc_split_ratio (float): Split ratio of simcc. + + Returns: + tuple: + - keypoints (np.ndarray): Rescaled keypoints. + - scores (np.ndarray): Model predict scores. + """ + all_key = [] + all_score = [] + for i in range(len(outputs)): + # use simcc to decode + simcc_x, simcc_y = outputs[i] + keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio) + + # rescale keypoints + keypoints = keypoints / model_input_size * scale[i] + center[i] - scale[i] / 2 + all_key.append(keypoints[0]) + all_score.append(scores[0]) + + return np.array(all_key), np.array(all_score) + + +def bbox_xyxy2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (left, top, right, bottom) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + # get bbox center and scale + x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3]) + center = np.hstack([x1 + x2, y1 + y2]) * 0.5 + scale = np.hstack([x2 - x1, y2 - y1]) * padding + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def _fix_aspect_ratio(bbox_scale: np.ndarray, + aspect_ratio: float) -> np.ndarray: + """Extend the scale to match the given aspect ratio. + + Args: + scale (np.ndarray): The image scale (w, h) in shape (2, ) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.ndarray: The reshaped image scale in (2, ) + """ + w, h = np.hsplit(bbox_scale, [1]) + bbox_scale = np.where(w > h * aspect_ratio, + np.hstack([w, w / aspect_ratio]), + np.hstack([h * aspect_ratio, h])) + return bbox_scale + + +def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: + """Rotate a point by an angle. + + Args: + pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) + angle_rad (float): rotation angle in radian + + Returns: + np.ndarray: Rotated point in shape (2, ) + """ + sn, cs = np.sin(angle_rad), np.cos(angle_rad) + rot_mat = np.array([[cs, -sn], [sn, cs]]) + return rot_mat @ pt + + +def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray: + """To calculate the affine matrix, three pairs of points are required. This + function is used to get the 3rd point, given 2D points a & b. + + The 3rd point is defined by rotating vector `a - b` by 90 degrees + anticlockwise, using b as the rotation center. + + Args: + a (np.ndarray): The 1st point (x,y) in shape (2, ) + b (np.ndarray): The 2nd point (x,y) in shape (2, ) + + Returns: + np.ndarray: The 3rd point. + """ + direction = a - b + c = b + np.r_[-direction[1], direction[0]] + return c + + +def get_warp_matrix(center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], + shift: Tuple[float, float] = (0., 0.), + inv: bool = False) -> np.ndarray: + """Calculate the affine transformation matrix that can warp the bbox area + in the input image to the output size. + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (np.ndarray[2, ] | list(2,)): Size of the + destination heatmaps. + shift (0-100%): Shift translation ratio wrt the width/height. + Default (0., 0.). + inv (bool): Option to inverse the affine transform direction. + (inv=False: src->dst or inv=True: dst->src) + + Returns: + np.ndarray: A 2x3 transformation matrix + """ + shift = np.array(shift) + src_w = scale[0] + dst_w = output_size[0] + dst_h = output_size[1] + + # compute transformation matrix + rot_rad = np.deg2rad(rot) + src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad) + dst_dir = np.array([0., dst_w * -0.5]) + + # get four corners of the src rectangle in the original image + src = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale * shift + src[1, :] = center + src_dir + scale * shift + src[2, :] = _get_3rd_point(src[0, :], src[1, :]) + + # get four corners of the dst rectangle in the input image + dst = np.zeros((3, 2), dtype=np.float32) + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return warp_mat + + +def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict, + img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get the bbox image as the model input by affine transform. + + Args: + input_size (dict): The input size of the model. + bbox_scale (dict): The bbox scale of the img. + bbox_center (dict): The bbox center of the img. + img (np.ndarray): The original image. + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: img after affine transform. + - np.ndarray[float32]: bbox scale after affine transform. + """ + w, h = input_size + warp_size = (int(w), int(h)) + + # reshape bbox to fixed aspect ratio + bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h) + + # get the affine matrix + center = bbox_center + scale = bbox_scale + rot = 0 + warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) + + # do affine transform + img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) + + return img, bbox_scale + + +def get_simcc_maximum(simcc_x: np.ndarray, + simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from simcc representations. + + Note: + instance number: N + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) + simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (N, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (N, K) + """ + N, K, Wx = simcc_x.shape + simcc_x = simcc_x.reshape(N * K, -1) + simcc_y = simcc_y.reshape(N * K, -1) + + # get maximum value locations + x_locs = np.argmax(simcc_x, axis=1) + y_locs = np.argmax(simcc_y, axis=1) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + max_val_x = np.amax(simcc_x, axis=1) + max_val_y = np.amax(simcc_y, axis=1) + + # get maximum value across x and y axis + mask = max_val_x > max_val_y + max_val_x[mask] = max_val_y[mask] + vals = max_val_x + locs[vals <= 0.] = -1 + + # reshape + locs = locs.reshape(N, K, 2) + vals = vals.reshape(N, K) + + return locs, vals + + +def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, + simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]: + """Modulate simcc distribution with Gaussian. + + Args: + simcc_x (np.ndarray[K, Wx]): model predicted simcc in x. + simcc_y (np.ndarray[K, Wy]): model predicted simcc in y. + simcc_split_ratio (int): The split ratio of simcc. + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2) + - np.ndarray[float32]: scores in shape (K,) or (n, K) + """ + keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) + keypoints /= simcc_split_ratio + + return keypoints, scores + + +def inference_pose(session, out_bbox, oriImg, model_input_size=(288, 384), dtype=np.float32): + resized_img, center, scale = preprocess(oriImg, out_bbox, model_input_size) + outputs = inference(session, resized_img, dtype) + keypoints, scores = postprocess(outputs, model_input_size, center, scale) + + return keypoints, scores \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_yolo_nas.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_yolo_nas.py new file mode 100644 index 0000000000000000000000000000000000000000..67ff249be283b11e0eb7d95ef7c0adc024c48285 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_onnx/cv_ox_yolo_nas.py @@ -0,0 +1,60 @@ +# Source: https://github.com/Hyuto/yolo-nas-onnx/tree/master/yolo-nas-py +# Inspired from: https://github.com/Deci-AI/super-gradients/blob/3.1.1/src/super_gradients/training/processing/processing.py + +import numpy as np +import cv2 + +def preprocess(img, input_size, swap=(2, 0, 1)): + if len(img.shape) == 3: + padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114 + else: + padded_img = np.ones(input_size, dtype=np.uint8) * 114 + + r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1]) + resized_img = cv2.resize( + img, + (int(img.shape[1] * r), int(img.shape[0] * r)), + interpolation=cv2.INTER_LINEAR, + ).astype(np.uint8) + padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img + + padded_img = padded_img.transpose(swap) + padded_img = np.ascontiguousarray(padded_img, dtype=np.float32) + return padded_img, r + +def inference_detector(session, oriImg, detect_classes=[0], dtype=np.uint8): + """ + This function is only compatible with onnx models exported from the new API with built-in NMS + ```py + from super_gradients.conversion.conversion_enums import ExportQuantizationMode + from super_gradients.common.object_names import Models + from super_gradients.training import models + + model = models.get(Models.YOLO_NAS_L, pretrained_weights="coco") + + export_result = model.export( + "yolo_nas/yolo_nas_l_fp16.onnx", + quantization_mode=ExportQuantizationMode.FP16, + device="cuda" + ) + ``` + """ + input_shape = (640,640) + img, ratio = preprocess(oriImg, input_shape) + input = img[None, :, :, :] + input = input.astype(dtype) + if "InferenceSession" in type(session).__name__: + input_name = session.get_inputs()[0].name + output = session.run(None, {input_name: input}) + else: + outNames = session.getUnconnectedOutLayersNames() + session.setInput(input) + output = session.forward(outNames) + num_preds, pred_boxes, pred_scores, pred_classes = output + num_preds = num_preds[0,0] + if num_preds == 0: + return None + idxs = np.where((np.isin(pred_classes[0, :num_preds], detect_classes)) & (pred_scores[0, :num_preds] > 0.3)) + if (len(idxs) == 0) or (idxs[0].size == 0): + return None + return pred_boxes[0, idxs].squeeze(axis=0) / ratio diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33e7a7f594ef441479257c788e4c0d6e08657fc8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__init__.py @@ -0,0 +1 @@ +#Dummy file ensuring this package will be recognized \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5b3a5714567df1650cfde3d080d53accb0e34e4 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/jit_det.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/jit_det.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..440518671756b7ea15c2601f11f8840c888e4cc9 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/jit_det.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/jit_pose.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/jit_pose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2e2e10e48e79634d95fc7f80faa7df86c52cd5f Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/__pycache__/jit_pose.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/jit_det.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/jit_det.py new file mode 100644 index 0000000000000000000000000000000000000000..b220350ad241be59fdf42a71ff76f01d7bec26ed --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/jit_det.py @@ -0,0 +1,125 @@ +import cv2 +import numpy as np +import torch + +def nms(boxes, scores, nms_thr): + """Single class NMS implemented in Numpy.""" + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= nms_thr)[0] + order = order[inds + 1] + + return keep + +def multiclass_nms(boxes, scores, nms_thr, score_thr): + """Multiclass NMS implemented in Numpy. Class-aware version.""" + final_dets = [] + num_classes = scores.shape[1] + for cls_ind in range(num_classes): + cls_scores = scores[:, cls_ind] + valid_score_mask = cls_scores > score_thr + if valid_score_mask.sum() == 0: + continue + else: + valid_scores = cls_scores[valid_score_mask] + valid_boxes = boxes[valid_score_mask] + keep = nms(valid_boxes, valid_scores, nms_thr) + if len(keep) > 0: + cls_inds = np.ones((len(keep), 1)) * cls_ind + dets = np.concatenate( + [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1 + ) + final_dets.append(dets) + if len(final_dets) == 0: + return None + return np.concatenate(final_dets, 0) + +def demo_postprocess(outputs, img_size, p6=False): + grids = [] + expanded_strides = [] + strides = [8, 16, 32] if not p6 else [8, 16, 32, 64] + + hsizes = [img_size[0] // stride for stride in strides] + wsizes = [img_size[1] // stride for stride in strides] + + for hsize, wsize, stride in zip(hsizes, wsizes, strides): + xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) + grid = np.stack((xv, yv), 2).reshape(1, -1, 2) + grids.append(grid) + shape = grid.shape[:2] + expanded_strides.append(np.full((*shape, 1), stride)) + + grids = np.concatenate(grids, 1) + expanded_strides = np.concatenate(expanded_strides, 1) + outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides + outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides + + return outputs + +def preprocess(img, input_size, swap=(2, 0, 1)): + if len(img.shape) == 3: + padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114 + else: + padded_img = np.ones(input_size, dtype=np.uint8) * 114 + + r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1]) + resized_img = cv2.resize( + img, + (int(img.shape[1] * r), int(img.shape[0] * r)), + interpolation=cv2.INTER_LINEAR, + ).astype(np.uint8) + padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img + + padded_img = padded_img.transpose(swap) + padded_img = np.ascontiguousarray(padded_img, dtype=np.float32) + return padded_img, r + +def inference_detector(model, oriImg, detect_classes=[0]): + input_shape = (640,640) + img, ratio = preprocess(oriImg, input_shape) + + device, dtype = next(model.parameters()).device, next(model.parameters()).dtype + input = img[None, :, :, :] + input = torch.from_numpy(input).to(device).to(dtype) + + output = model(input).float().cpu().detach().numpy() + predictions = demo_postprocess(output[0], input_shape) + + boxes = predictions[:, :4] + scores = predictions[:, 4:5] * predictions[:, 5:] + + boxes_xyxy = np.ones_like(boxes) + boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2]/2. + boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3]/2. + boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2]/2. + boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3]/2. + boxes_xyxy /= ratio + dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1) + if dets is None: + return None + final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5] + isscore = final_scores>0.3 + iscat = np.isin(final_cls_inds, detect_classes) + isbbox = [ i and j for (i, j) in zip(isscore, iscat)] + final_boxes = final_boxes[isbbox] + return final_boxes \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/jit_pose.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/jit_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b015086cec0a81088cb72e24feed400610dc8f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/dw_torchscript/jit_pose.py @@ -0,0 +1,363 @@ +from typing import List, Tuple + +import cv2 +import numpy as np +import torch + +def preprocess( + img: np.ndarray, out_bbox, input_size: Tuple[int, int] = (192, 256) +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Do preprocessing for DWPose model inference. + + Args: + img (np.ndarray): Input image in shape. + input_size (tuple): Input image size in shape (w, h). + + Returns: + tuple: + - resized_img (np.ndarray): Preprocessed image. + - center (np.ndarray): Center of image. + - scale (np.ndarray): Scale of image. + """ + # get shape of image + img_shape = img.shape[:2] + out_img, out_center, out_scale = [], [], [] + if len(out_bbox) == 0: + out_bbox = [[0, 0, img_shape[1], img_shape[0]]] + for i in range(len(out_bbox)): + x0 = out_bbox[i][0] + y0 = out_bbox[i][1] + x1 = out_bbox[i][2] + y1 = out_bbox[i][3] + bbox = np.array([x0, y0, x1, y1]) + + # get center and scale + center, scale = bbox_xyxy2cs(bbox, padding=1.25) + + # do affine transformation + resized_img, scale = top_down_affine(input_size, scale, center, img) + + # normalize image + mean = np.array([123.675, 116.28, 103.53]) + std = np.array([58.395, 57.12, 57.375]) + resized_img = (resized_img - mean) / std + + out_img.append(resized_img) + out_center.append(center) + out_scale.append(scale) + + return out_img, out_center, out_scale + +def inference(model, img, bs=5): + """Inference DWPose model implemented in TorchScript. + + Args: + model : TorchScript Model. + img : Input image in shape. + + Returns: + outputs : Output of DWPose model. + """ + all_out = [] + # build input + orig_img_count = len(img) + #Pad zeros to fit batch size + for _ in range(bs - (orig_img_count % bs)): + img.append(np.zeros_like(img[0])) + input = np.stack(img, axis=0).transpose(0, 3, 1, 2) + device, dtype = next(model.parameters()).device, next(model.parameters()).dtype + input = torch.from_numpy(input).to(device).to(dtype) + + out1, out2 = [], [] + for i in range(input.shape[0] // bs): + curr_batch_output = model(input[i*bs:(i+1)*bs]) + out1.append(curr_batch_output[0].float()) + out2.append(curr_batch_output[1].float()) + out1, out2 = torch.cat(out1, dim=0)[:orig_img_count], torch.cat(out2, dim=0)[:orig_img_count] + out1, out2 = out1.float().cpu().detach().numpy(), out2.float().cpu().detach().numpy() + all_outputs = out1, out2 + + for batch_idx in range(len(all_outputs[0])): + outputs = [all_outputs[i][batch_idx:batch_idx+1,...] for i in range(len(all_outputs))] + all_out.append(outputs) + return all_out +def postprocess(outputs: List[np.ndarray], + model_input_size: Tuple[int, int], + center: Tuple[int, int], + scale: Tuple[int, int], + simcc_split_ratio: float = 2.0 + ) -> Tuple[np.ndarray, np.ndarray]: + """Postprocess for DWPose model output. + + Args: + outputs (np.ndarray): Output of RTMPose model. + model_input_size (tuple): RTMPose model Input image size. + center (tuple): Center of bbox in shape (x, y). + scale (tuple): Scale of bbox in shape (w, h). + simcc_split_ratio (float): Split ratio of simcc. + + Returns: + tuple: + - keypoints (np.ndarray): Rescaled keypoints. + - scores (np.ndarray): Model predict scores. + """ + all_key = [] + all_score = [] + for i in range(len(outputs)): + # use simcc to decode + simcc_x, simcc_y = outputs[i] + keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio) + + # rescale keypoints + keypoints = keypoints / model_input_size * scale[i] + center[i] - scale[i] / 2 + all_key.append(keypoints[0]) + all_score.append(scores[0]) + + return np.array(all_key), np.array(all_score) + + +def bbox_xyxy2cs(bbox: np.ndarray, + padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]: + """Transform the bbox format from (x,y,w,h) into (center, scale) + + Args: + bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted + as (left, top, right, bottom) + padding (float): BBox padding factor that will be multilied to scale. + Default: 1.0 + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or + (n, 2) + - np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or + (n, 2) + """ + # convert single bbox from (4, ) to (1, 4) + dim = bbox.ndim + if dim == 1: + bbox = bbox[None, :] + + # get bbox center and scale + x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3]) + center = np.hstack([x1 + x2, y1 + y2]) * 0.5 + scale = np.hstack([x2 - x1, y2 - y1]) * padding + + if dim == 1: + center = center[0] + scale = scale[0] + + return center, scale + + +def _fix_aspect_ratio(bbox_scale: np.ndarray, + aspect_ratio: float) -> np.ndarray: + """Extend the scale to match the given aspect ratio. + + Args: + scale (np.ndarray): The image scale (w, h) in shape (2, ) + aspect_ratio (float): The ratio of ``w/h`` + + Returns: + np.ndarray: The reshaped image scale in (2, ) + """ + w, h = np.hsplit(bbox_scale, [1]) + bbox_scale = np.where(w > h * aspect_ratio, + np.hstack([w, w / aspect_ratio]), + np.hstack([h * aspect_ratio, h])) + return bbox_scale + + +def _rotate_point(pt: np.ndarray, angle_rad: float) -> np.ndarray: + """Rotate a point by an angle. + + Args: + pt (np.ndarray): 2D point coordinates (x, y) in shape (2, ) + angle_rad (float): rotation angle in radian + + Returns: + np.ndarray: Rotated point in shape (2, ) + """ + sn, cs = np.sin(angle_rad), np.cos(angle_rad) + rot_mat = np.array([[cs, -sn], [sn, cs]]) + return rot_mat @ pt + + +def _get_3rd_point(a: np.ndarray, b: np.ndarray) -> np.ndarray: + """To calculate the affine matrix, three pairs of points are required. This + function is used to get the 3rd point, given 2D points a & b. + + The 3rd point is defined by rotating vector `a - b` by 90 degrees + anticlockwise, using b as the rotation center. + + Args: + a (np.ndarray): The 1st point (x,y) in shape (2, ) + b (np.ndarray): The 2nd point (x,y) in shape (2, ) + + Returns: + np.ndarray: The 3rd point. + """ + direction = a - b + c = b + np.r_[-direction[1], direction[0]] + return c + + +def get_warp_matrix(center: np.ndarray, + scale: np.ndarray, + rot: float, + output_size: Tuple[int, int], + shift: Tuple[float, float] = (0., 0.), + inv: bool = False) -> np.ndarray: + """Calculate the affine transformation matrix that can warp the bbox area + in the input image to the output size. + + Args: + center (np.ndarray[2, ]): Center of the bounding box (x, y). + scale (np.ndarray[2, ]): Scale of the bounding box + wrt [width, height]. + rot (float): Rotation angle (degree). + output_size (np.ndarray[2, ] | list(2,)): Size of the + destination heatmaps. + shift (0-100%): Shift translation ratio wrt the width/height. + Default (0., 0.). + inv (bool): Option to inverse the affine transform direction. + (inv=False: src->dst or inv=True: dst->src) + + Returns: + np.ndarray: A 2x3 transformation matrix + """ + shift = np.array(shift) + src_w = scale[0] + dst_w = output_size[0] + dst_h = output_size[1] + + # compute transformation matrix + rot_rad = np.deg2rad(rot) + src_dir = _rotate_point(np.array([0., src_w * -0.5]), rot_rad) + dst_dir = np.array([0., dst_w * -0.5]) + + # get four corners of the src rectangle in the original image + src = np.zeros((3, 2), dtype=np.float32) + src[0, :] = center + scale * shift + src[1, :] = center + src_dir + scale * shift + src[2, :] = _get_3rd_point(src[0, :], src[1, :]) + + # get four corners of the dst rectangle in the input image + dst = np.zeros((3, 2), dtype=np.float32) + dst[0, :] = [dst_w * 0.5, dst_h * 0.5] + dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir + dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :]) + + if inv: + warp_mat = cv2.getAffineTransform(np.float32(dst), np.float32(src)) + else: + warp_mat = cv2.getAffineTransform(np.float32(src), np.float32(dst)) + + return warp_mat + + +def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict, + img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get the bbox image as the model input by affine transform. + + Args: + input_size (dict): The input size of the model. + bbox_scale (dict): The bbox scale of the img. + bbox_center (dict): The bbox center of the img. + img (np.ndarray): The original image. + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: img after affine transform. + - np.ndarray[float32]: bbox scale after affine transform. + """ + w, h = input_size + warp_size = (int(w), int(h)) + + # reshape bbox to fixed aspect ratio + bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h) + + # get the affine matrix + center = bbox_center + scale = bbox_scale + rot = 0 + warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h)) + + # do affine transform + img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR) + + return img, bbox_scale + + +def get_simcc_maximum(simcc_x: np.ndarray, + simcc_y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """Get maximum response location and value from simcc representations. + + Note: + instance number: N + num_keypoints: K + heatmap height: H + heatmap width: W + + Args: + simcc_x (np.ndarray): x-axis SimCC in shape (K, Wx) or (N, K, Wx) + simcc_y (np.ndarray): y-axis SimCC in shape (K, Wy) or (N, K, Wy) + + Returns: + tuple: + - locs (np.ndarray): locations of maximum heatmap responses in shape + (K, 2) or (N, K, 2) + - vals (np.ndarray): values of maximum heatmap responses in shape + (K,) or (N, K) + """ + N, K, Wx = simcc_x.shape + simcc_x = simcc_x.reshape(N * K, -1) + simcc_y = simcc_y.reshape(N * K, -1) + + # get maximum value locations + x_locs = np.argmax(simcc_x, axis=1) + y_locs = np.argmax(simcc_y, axis=1) + locs = np.stack((x_locs, y_locs), axis=-1).astype(np.float32) + max_val_x = np.amax(simcc_x, axis=1) + max_val_y = np.amax(simcc_y, axis=1) + + # get maximum value across x and y axis + mask = max_val_x > max_val_y + max_val_x[mask] = max_val_y[mask] + vals = max_val_x + locs[vals <= 0.] = -1 + + # reshape + locs = locs.reshape(N, K, 2) + vals = vals.reshape(N, K) + + return locs, vals + + +def decode(simcc_x: np.ndarray, simcc_y: np.ndarray, + simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]: + """Modulate simcc distribution with Gaussian. + + Args: + simcc_x (np.ndarray[K, Wx]): model predicted simcc in x. + simcc_y (np.ndarray[K, Wy]): model predicted simcc in y. + simcc_split_ratio (int): The split ratio of simcc. + + Returns: + tuple: A tuple containing center and scale. + - np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2) + - np.ndarray[float32]: scores in shape (K,) or (n, K) + """ + keypoints, scores = get_simcc_maximum(simcc_x, simcc_y) + keypoints /= simcc_split_ratio + + return keypoints, scores + +def inference_pose(model, out_bbox, oriImg, model_input_size=(288, 384)): + resized_img, center, scale = preprocess(oriImg, out_bbox, model_input_size) + #outputs = inference(session, resized_img, dtype) + outputs = inference(model, resized_img) + + keypoints, scores = postprocess(outputs, model_input_size, center, scale) + + return keypoints, scores \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/face.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/face.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c46d77664aa9fa91c63785a1485a396f05cacc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/face.py @@ -0,0 +1,362 @@ +import logging +import numpy as np +from torchvision.transforms import ToTensor, ToPILImage +import torch +import torch.nn.functional as F +import cv2 + +from . import util +from torch.nn import Conv2d, Module, ReLU, MaxPool2d, init + + +class FaceNet(Module): + """Model the cascading heatmaps. """ + def __init__(self): + super(FaceNet, self).__init__() + # cnn to make feature map + self.relu = ReLU() + self.max_pooling_2d = MaxPool2d(kernel_size=2, stride=2) + self.conv1_1 = Conv2d(in_channels=3, out_channels=64, + kernel_size=3, stride=1, padding=1) + self.conv1_2 = Conv2d( + in_channels=64, out_channels=64, kernel_size=3, stride=1, + padding=1) + self.conv2_1 = Conv2d( + in_channels=64, out_channels=128, kernel_size=3, stride=1, + padding=1) + self.conv2_2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=3, stride=1, + padding=1) + self.conv3_1 = Conv2d( + in_channels=128, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv3_2 = Conv2d( + in_channels=256, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv3_3 = Conv2d( + in_channels=256, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv3_4 = Conv2d( + in_channels=256, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv4_1 = Conv2d( + in_channels=256, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv4_2 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv4_3 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv4_4 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv5_1 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv5_2 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv5_3_CPM = Conv2d( + in_channels=512, out_channels=128, kernel_size=3, stride=1, + padding=1) + + # stage1 + self.conv6_1_CPM = Conv2d( + in_channels=128, out_channels=512, kernel_size=1, stride=1, + padding=0) + self.conv6_2_CPM = Conv2d( + in_channels=512, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage2 + self.Mconv1_stage2 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage2 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage3 + self.Mconv1_stage3 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage3 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage4 + self.Mconv1_stage4 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage4 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage5 + self.Mconv1_stage5 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage5 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage6 + self.Mconv1_stage6 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage6 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + for m in self.modules(): + if isinstance(m, Conv2d): + init.constant_(m.bias, 0) + + def forward(self, x): + """Return a list of heatmaps.""" + heatmaps = [] + + h = self.relu(self.conv1_1(x)) + h = self.relu(self.conv1_2(h)) + h = self.max_pooling_2d(h) + h = self.relu(self.conv2_1(h)) + h = self.relu(self.conv2_2(h)) + h = self.max_pooling_2d(h) + h = self.relu(self.conv3_1(h)) + h = self.relu(self.conv3_2(h)) + h = self.relu(self.conv3_3(h)) + h = self.relu(self.conv3_4(h)) + h = self.max_pooling_2d(h) + h = self.relu(self.conv4_1(h)) + h = self.relu(self.conv4_2(h)) + h = self.relu(self.conv4_3(h)) + h = self.relu(self.conv4_4(h)) + h = self.relu(self.conv5_1(h)) + h = self.relu(self.conv5_2(h)) + h = self.relu(self.conv5_3_CPM(h)) + feature_map = h + + # stage1 + h = self.relu(self.conv6_1_CPM(h)) + h = self.conv6_2_CPM(h) + heatmaps.append(h) + + # stage2 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage2(h)) + h = self.relu(self.Mconv2_stage2(h)) + h = self.relu(self.Mconv3_stage2(h)) + h = self.relu(self.Mconv4_stage2(h)) + h = self.relu(self.Mconv5_stage2(h)) + h = self.relu(self.Mconv6_stage2(h)) + h = self.Mconv7_stage2(h) + heatmaps.append(h) + + # stage3 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage3(h)) + h = self.relu(self.Mconv2_stage3(h)) + h = self.relu(self.Mconv3_stage3(h)) + h = self.relu(self.Mconv4_stage3(h)) + h = self.relu(self.Mconv5_stage3(h)) + h = self.relu(self.Mconv6_stage3(h)) + h = self.Mconv7_stage3(h) + heatmaps.append(h) + + # stage4 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage4(h)) + h = self.relu(self.Mconv2_stage4(h)) + h = self.relu(self.Mconv3_stage4(h)) + h = self.relu(self.Mconv4_stage4(h)) + h = self.relu(self.Mconv5_stage4(h)) + h = self.relu(self.Mconv6_stage4(h)) + h = self.Mconv7_stage4(h) + heatmaps.append(h) + + # stage5 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage5(h)) + h = self.relu(self.Mconv2_stage5(h)) + h = self.relu(self.Mconv3_stage5(h)) + h = self.relu(self.Mconv4_stage5(h)) + h = self.relu(self.Mconv5_stage5(h)) + h = self.relu(self.Mconv6_stage5(h)) + h = self.Mconv7_stage5(h) + heatmaps.append(h) + + # stage6 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage6(h)) + h = self.relu(self.Mconv2_stage6(h)) + h = self.relu(self.Mconv3_stage6(h)) + h = self.relu(self.Mconv4_stage6(h)) + h = self.relu(self.Mconv5_stage6(h)) + h = self.relu(self.Mconv6_stage6(h)) + h = self.Mconv7_stage6(h) + heatmaps.append(h) + + return heatmaps + + +LOG = logging.getLogger(__name__) +TOTEN = ToTensor() +TOPIL = ToPILImage() + + +params = { + 'gaussian_sigma': 2.5, + 'inference_img_size': 736, # 368, 736, 1312 + 'heatmap_peak_thresh': 0.1, + 'crop_scale': 1.5, + 'line_indices': [ + [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], + [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 13], + [13, 14], [14, 15], [15, 16], + [17, 18], [18, 19], [19, 20], [20, 21], + [22, 23], [23, 24], [24, 25], [25, 26], + [27, 28], [28, 29], [29, 30], + [31, 32], [32, 33], [33, 34], [34, 35], + [36, 37], [37, 38], [38, 39], [39, 40], [40, 41], [41, 36], + [42, 43], [43, 44], [44, 45], [45, 46], [46, 47], [47, 42], + [48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], + [54, 55], [55, 56], [56, 57], [57, 58], [58, 59], [59, 48], + [60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66], + [66, 67], [67, 60] + ], +} + + +class Face(object): + """ + The OpenPose face landmark detector model. + + Args: + inference_size: set the size of the inference image size, suggested: + 368, 736, 1312, default 736 + gaussian_sigma: blur the heatmaps, default 2.5 + heatmap_peak_thresh: return landmark if over threshold, default 0.1 + + """ + def __init__(self, face_model_path, + inference_size=None, + gaussian_sigma=None, + heatmap_peak_thresh=None): + self.inference_size = inference_size or params["inference_img_size"] + self.sigma = gaussian_sigma or params['gaussian_sigma'] + self.threshold = heatmap_peak_thresh or params["heatmap_peak_thresh"] + self.model = FaceNet() + self.model.load_state_dict(torch.load(face_model_path)) + # if torch.cuda.is_available(): + # self.model = self.model.cuda() + # print('cuda') + self.model.eval() + + def __call__(self, face_img): + H, W, C = face_img.shape + + w_size = 384 + x_data = torch.from_numpy(util.smart_resize(face_img, (w_size, w_size))).permute([2, 0, 1]) / 256.0 - 0.5 + + x_data = x_data.to(self.cn_device) + + with torch.no_grad(): + hs = self.model(x_data[None, ...]) + heatmaps = F.interpolate( + hs[-1], + (H, W), + mode='bilinear', align_corners=True).cpu().numpy()[0] + return heatmaps + + def compute_peaks_from_heatmaps(self, heatmaps): + all_peaks = [] + for part in range(heatmaps.shape[0]): + map_ori = heatmaps[part].copy() + binary = np.ascontiguousarray(map_ori > 0.05, dtype=np.uint8) + + if np.sum(binary) == 0: + continue + + positions = np.where(binary > 0.5) + intensities = map_ori[positions] + mi = np.argmax(intensities) + y, x = positions[0][mi], positions[1][mi] + all_peaks.append([x, y]) + + return np.array(all_peaks) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/hand.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/hand.py new file mode 100644 index 0000000000000000000000000000000000000000..74767def506c72612954fe3b79056d17a83b1e16 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/hand.py @@ -0,0 +1,94 @@ +import cv2 +import json +import numpy as np +import math +import time +from scipy.ndimage.filters import gaussian_filter +import matplotlib.pyplot as plt +import matplotlib +import torch +from skimage.measure import label + +from .model import handpose_model +from . import util + +class Hand(object): + def __init__(self, model_path): + self.model = handpose_model() + # if torch.cuda.is_available(): + # self.model = self.model.cuda() + # print('cuda') + model_dict = util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def __call__(self, oriImgRaw): + scale_search = [0.5, 1.0, 1.5, 2.0] + # scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre = 0.05 + multiplier = [x * boxsize for x in scale_search] + + wsize = 128 + heatmap_avg = np.zeros((wsize, wsize, 22)) + + Hr, Wr, Cr = oriImgRaw.shape + + oriImg = cv2.GaussianBlur(oriImgRaw, (0, 0), 0.8) + + for m in range(len(multiplier)): + scale = multiplier[m] + imageToTest = util.smart_resize(oriImg, (scale, scale)) + + imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) + im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + im = np.ascontiguousarray(im) + + data = torch.from_numpy(im).float() + if torch.cuda.is_available(): + data = data.cuda() + + with torch.no_grad(): + data = data.to(self.cn_device) + output = self.model(data).cpu().numpy() + + # extract outputs, resize, and remove padding + heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps + heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride) + heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + heatmap = util.smart_resize(heatmap, (wsize, wsize)) + + heatmap_avg += heatmap / len(multiplier) + + all_peaks = [] + for part in range(21): + map_ori = heatmap_avg[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) + + if np.sum(binary) == 0: + all_peaks.append([0, 0]) + continue + label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) + max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 + label_img[label_img != max_index] = 0 + map_ori[label_img == 0] = 0 + + y, x = util.npmax(map_ori) + y = int(float(y) * float(Hr) / float(wsize)) + x = int(float(x) * float(Wr) / float(wsize)) + all_peaks.append([x, y]) + return np.array(all_peaks) + +if __name__ == "__main__": + hand_estimation = Hand('../model/hand_pose_model.pth') + + # test_image = '../images/hand.jpg' + test_image = '../images/hand.jpg' + oriImg = cv2.imread(test_image) # B,G,R order + peaks = hand_estimation(oriImg) + canvas = util.draw_handpose(oriImg, peaks, True) + cv2.imshow('', canvas) + cv2.waitKey(0) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/model.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/model.py new file mode 100644 index 0000000000000000000000000000000000000000..72dc79ad857933a7c108d21494d6395572b816e6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/model.py @@ -0,0 +1,218 @@ +import torch +from collections import OrderedDict + +import torch +import torch.nn as nn + +def make_layers(block, no_relu_layers): + layers = [] + for layer_name, v in block.items(): + if 'pool' in layer_name: + layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], + padding=v[2]) + layers.append((layer_name, layer)) + else: + conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], + kernel_size=v[2], stride=v[3], + padding=v[4]) + layers.append((layer_name, conv2d)) + if layer_name not in no_relu_layers: + layers.append(('relu_'+layer_name, nn.ReLU(inplace=True))) + + return nn.Sequential(OrderedDict(layers)) + +class bodypose_model(nn.Module): + def __init__(self): + super(bodypose_model, self).__init__() + + # these layers have no relu layer + no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\ + 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\ + 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\ + 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] + blocks = {} + block0 = OrderedDict([ + ('conv1_1', [3, 64, 3, 1, 1]), + ('conv1_2', [64, 64, 3, 1, 1]), + ('pool1_stage1', [2, 2, 0]), + ('conv2_1', [64, 128, 3, 1, 1]), + ('conv2_2', [128, 128, 3, 1, 1]), + ('pool2_stage1', [2, 2, 0]), + ('conv3_1', [128, 256, 3, 1, 1]), + ('conv3_2', [256, 256, 3, 1, 1]), + ('conv3_3', [256, 256, 3, 1, 1]), + ('conv3_4', [256, 256, 3, 1, 1]), + ('pool3_stage1', [2, 2, 0]), + ('conv4_1', [256, 512, 3, 1, 1]), + ('conv4_2', [512, 512, 3, 1, 1]), + ('conv4_3_CPM', [512, 256, 3, 1, 1]), + ('conv4_4_CPM', [256, 128, 3, 1, 1]) + ]) + + + # Stage 1 + block1_1 = OrderedDict([ + ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), + ('conv5_5_CPM_L1', [512, 38, 1, 1, 0]) + ]) + + block1_2 = OrderedDict([ + ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), + ('conv5_5_CPM_L2', [512, 19, 1, 1, 0]) + ]) + blocks['block1_1'] = block1_1 + blocks['block1_2'] = block1_2 + + self.model0 = make_layers(block0, no_relu_layers) + + # Stages 2 - 6 + for i in range(2, 7): + blocks['block%d_1' % i] = OrderedDict([ + ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), + ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0]) + ]) + + blocks['block%d_2' % i] = OrderedDict([ + ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), + ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0]) + ]) + + for k in blocks.keys(): + blocks[k] = make_layers(blocks[k], no_relu_layers) + + self.model1_1 = blocks['block1_1'] + self.model2_1 = blocks['block2_1'] + self.model3_1 = blocks['block3_1'] + self.model4_1 = blocks['block4_1'] + self.model5_1 = blocks['block5_1'] + self.model6_1 = blocks['block6_1'] + + self.model1_2 = blocks['block1_2'] + self.model2_2 = blocks['block2_2'] + self.model3_2 = blocks['block3_2'] + self.model4_2 = blocks['block4_2'] + self.model5_2 = blocks['block5_2'] + self.model6_2 = blocks['block6_2'] + + + def forward(self, x): + + out1 = self.model0(x) + + out1_1 = self.model1_1(out1) + out1_2 = self.model1_2(out1) + out2 = torch.cat([out1_1, out1_2, out1], 1) + + out2_1 = self.model2_1(out2) + out2_2 = self.model2_2(out2) + out3 = torch.cat([out2_1, out2_2, out1], 1) + + out3_1 = self.model3_1(out3) + out3_2 = self.model3_2(out3) + out4 = torch.cat([out3_1, out3_2, out1], 1) + + out4_1 = self.model4_1(out4) + out4_2 = self.model4_2(out4) + out5 = torch.cat([out4_1, out4_2, out1], 1) + + out5_1 = self.model5_1(out5) + out5_2 = self.model5_2(out5) + out6 = torch.cat([out5_1, out5_2, out1], 1) + + out6_1 = self.model6_1(out6) + out6_2 = self.model6_2(out6) + + return out6_1, out6_2 + +class handpose_model(nn.Module): + def __init__(self): + super(handpose_model, self).__init__() + + # these layers have no relu layer + no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\ + 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] + # stage 1 + block1_0 = OrderedDict([ + ('conv1_1', [3, 64, 3, 1, 1]), + ('conv1_2', [64, 64, 3, 1, 1]), + ('pool1_stage1', [2, 2, 0]), + ('conv2_1', [64, 128, 3, 1, 1]), + ('conv2_2', [128, 128, 3, 1, 1]), + ('pool2_stage1', [2, 2, 0]), + ('conv3_1', [128, 256, 3, 1, 1]), + ('conv3_2', [256, 256, 3, 1, 1]), + ('conv3_3', [256, 256, 3, 1, 1]), + ('conv3_4', [256, 256, 3, 1, 1]), + ('pool3_stage1', [2, 2, 0]), + ('conv4_1', [256, 512, 3, 1, 1]), + ('conv4_2', [512, 512, 3, 1, 1]), + ('conv4_3', [512, 512, 3, 1, 1]), + ('conv4_4', [512, 512, 3, 1, 1]), + ('conv5_1', [512, 512, 3, 1, 1]), + ('conv5_2', [512, 512, 3, 1, 1]), + ('conv5_3_CPM', [512, 128, 3, 1, 1]) + ]) + + block1_1 = OrderedDict([ + ('conv6_1_CPM', [128, 512, 1, 1, 0]), + ('conv6_2_CPM', [512, 22, 1, 1, 0]) + ]) + + blocks = {} + blocks['block1_0'] = block1_0 + blocks['block1_1'] = block1_1 + + # stage 2-6 + for i in range(2, 7): + blocks['block%d' % i] = OrderedDict([ + ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), + ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0]) + ]) + + for k in blocks.keys(): + blocks[k] = make_layers(blocks[k], no_relu_layers) + + self.model1_0 = blocks['block1_0'] + self.model1_1 = blocks['block1_1'] + self.model2 = blocks['block2'] + self.model3 = blocks['block3'] + self.model4 = blocks['block4'] + self.model5 = blocks['block5'] + self.model6 = blocks['block6'] + + def forward(self, x): + out1_0 = self.model1_0(x) + out1_1 = self.model1_1(out1_0) + concat_stage2 = torch.cat([out1_1, out1_0], 1) + out_stage2 = self.model2(concat_stage2) + concat_stage3 = torch.cat([out_stage2, out1_0], 1) + out_stage3 = self.model3(concat_stage3) + concat_stage4 = torch.cat([out_stage3, out1_0], 1) + out_stage4 = self.model4(concat_stage4) + concat_stage5 = torch.cat([out_stage4, out1_0], 1) + out_stage5 = self.model5(concat_stage5) + concat_stage6 = torch.cat([out_stage5, out1_0], 1) + out_stage6 = self.model6(concat_stage6) + return out_stage6 + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/types.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/types.py new file mode 100644 index 0000000000000000000000000000000000000000..e521e65dcbe155dc8fe863c0a016184d829ec751 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/types.py @@ -0,0 +1,29 @@ +from typing import NamedTuple, List, Optional + +class Keypoint(NamedTuple): + x: float + y: float + score: float = 1.0 + id: int = -1 + + +class BodyResult(NamedTuple): + # Note: Using `Optional` instead of `|` operator as the ladder is a Python + # 3.10 feature. + # Annotator code should be Python 3.8 Compatible, as controlnet repo uses + # Python 3.8 environment. + # https://github.com/lllyasviel/ControlNet/blob/d3284fcd0972c510635a4f5abe2eeb71dc0de524/environment.yaml#L6 + keypoints: List[Optional[Keypoint]] + total_score: float = 0.0 + total_parts: int = 0 + + +HandResult = List[Keypoint] +FaceResult = List[Keypoint] + + +class PoseResult(NamedTuple): + body: BodyResult + left_hand: Optional[HandResult] + right_hand: Optional[HandResult] + face: Optional[FaceResult] diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/util.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/util.py new file mode 100644 index 0000000000000000000000000000000000000000..cce0dc28a0989af7ae1f04ca6f782d694e27a4bd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/util.py @@ -0,0 +1,457 @@ +import math +import numpy as np +import matplotlib +import cv2 +import os +from typing import List, Tuple, Union, Optional + +from .body import BodyResult, Keypoint + +eps = 0.01 + + +def smart_resize(x, s): + Ht, Wt = s + if x.ndim == 2: + Ho, Wo = x.shape + Co = 1 + else: + Ho, Wo, Co = x.shape + if Co == 3 or Co == 1: + k = float(Ht + Wt) / float(Ho + Wo) + return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) + else: + return np.stack([smart_resize(x[:, :, i], s) for i in range(Co)], axis=2) + + +def smart_resize_k(x, fx, fy): + if x.ndim == 2: + Ho, Wo = x.shape + Co = 1 + else: + Ho, Wo, Co = x.shape + Ht, Wt = Ho * fy, Wo * fx + if Co == 3 or Co == 1: + k = float(Ht + Wt) / float(Ho + Wo) + return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) + else: + return np.stack([smart_resize_k(x[:, :, i], fx, fy) for i in range(Co)], axis=2) + + +def padRightDownCorner(img, stride, padValue): + h = img.shape[0] + w = img.shape[1] + + pad = 4 * [None] + pad[0] = 0 # up + pad[1] = 0 # left + pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down + pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right + + img_padded = img + pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) + img_padded = np.concatenate((pad_up, img_padded), axis=0) + pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) + img_padded = np.concatenate((pad_left, img_padded), axis=1) + pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) + img_padded = np.concatenate((img_padded, pad_down), axis=0) + pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) + img_padded = np.concatenate((img_padded, pad_right), axis=1) + + return img_padded, pad + + +def transfer(model, model_weights): + transfered_model_weights = {} + for weights_name in model.state_dict().keys(): + transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] + return transfered_model_weights + + +def is_normalized(keypoints: List[Optional[Keypoint]]) -> bool: + point_normalized = [ + 0 <= abs(k.x) <= 1 and 0 <= abs(k.y) <= 1 + for k in keypoints + if k is not None + ] + if not point_normalized: + return False + return all(point_normalized) + + +def draw_bodypose(canvas: np.ndarray, keypoints: List[Keypoint]) -> np.ndarray: + """ + Draw keypoints and limbs representing body pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the body pose. + keypoints (List[Keypoint]): A list of Keypoint objects representing the body keypoints to be drawn. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn body pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + if not is_normalized(keypoints): + H, W = 1.0, 1.0 + else: + H, W, _ = canvas.shape + + stickwidth = 4 + + limbSeq = [ + [2, 3], [2, 6], [3, 4], [4, 5], + [6, 7], [7, 8], [2, 9], [9, 10], + [10, 11], [2, 12], [12, 13], [13, 14], + [2, 1], [1, 15], [15, 17], [1, 16], + [16, 18], + ] + + colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + + for (k1_index, k2_index), color in zip(limbSeq, colors): + keypoint1 = keypoints[k1_index - 1] + keypoint2 = keypoints[k2_index - 1] + + if keypoint1 is None or keypoint2 is None: + continue + + Y = np.array([keypoint1.x, keypoint2.x]) * float(W) + X = np.array([keypoint1.y, keypoint2.y]) * float(H) + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(canvas, polygon, [int(float(c) * 0.6) for c in color]) + + for keypoint, color in zip(keypoints, colors): + if keypoint is None: + continue + + x, y = keypoint.x, keypoint.y + x = int(x * W) + y = int(y * H) + cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) + + return canvas + + +def draw_handpose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray: + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + if not keypoints: + return canvas + + if not is_normalized(keypoints): + H, W = 1.0, 1.0 + else: + H, W, _ = canvas.shape + + edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ + [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] + + for ie, (e1, e2) in enumerate(edges): + k1 = keypoints[e1] + k2 = keypoints[e2] + if k1 is None or k2 is None: + continue + + x1 = int(k1.x * W) + y1 = int(k1.y * H) + x2 = int(k2.x * W) + y2 = int(k2.y * H) + if x1 > eps and y1 > eps and x2 > eps and y2 > eps: + cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, thickness=2) + + for keypoint in keypoints: + if keypoint is None: + continue + + x, y = keypoint.x, keypoint.y + x = int(x * W) + y = int(y * H) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) + return canvas + + +def draw_facepose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray: + """ + Draw keypoints representing face pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the face pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the face keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn face pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + if not keypoints: + return canvas + + if not is_normalized(keypoints): + H, W = 1.0, 1.0 + else: + H, W, _ = canvas.shape + + for keypoint in keypoints: + if keypoint is None: + continue + + x, y = keypoint.x, keypoint.y + x = int(x * W) + y = int(y * H) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1) + return canvas + + +# detect hand according to body pose keypoints +# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp +def handDetect(body: BodyResult, oriImg) -> List[Tuple[int, int, int, bool]]: + """ + Detect hands in the input body pose keypoints and calculate the bounding box for each hand. + + Args: + body (BodyResult): A BodyResult object containing the detected body pose keypoints. + oriImg (numpy.ndarray): A 3D numpy array representing the original input image. + + Returns: + List[Tuple[int, int, int, bool]]: A list of tuples, each containing the coordinates (x, y) of the top-left + corner of the bounding box, the width (height) of the bounding box, and + a boolean flag indicating whether the hand is a left hand (True) or a + right hand (False). + + Notes: + - The width and height of the bounding boxes are equal since the network requires squared input. + - The minimum bounding box size is 20 pixels. + """ + ratioWristElbow = 0.33 + detect_result = [] + image_height, image_width = oriImg.shape[0:2] + + keypoints = body.keypoints + # right hand: wrist 4, elbow 3, shoulder 2 + # left hand: wrist 7, elbow 6, shoulder 5 + left_shoulder = keypoints[5] + left_elbow = keypoints[6] + left_wrist = keypoints[7] + right_shoulder = keypoints[2] + right_elbow = keypoints[3] + right_wrist = keypoints[4] + + # if any of three not detected + has_left = all(keypoint is not None for keypoint in (left_shoulder, left_elbow, left_wrist)) + has_right = all(keypoint is not None for keypoint in (right_shoulder, right_elbow, right_wrist)) + if not (has_left or has_right): + return [] + + hands = [] + #left hand + if has_left: + hands.append([ + left_shoulder.x, left_shoulder.y, + left_elbow.x, left_elbow.y, + left_wrist.x, left_wrist.y, + True + ]) + # right hand + if has_right: + hands.append([ + right_shoulder.x, right_shoulder.y, + right_elbow.x, right_elbow.y, + right_wrist.x, right_wrist.y, + False + ]) + + for x1, y1, x2, y2, x3, y3, is_left in hands: + # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox + # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]); + # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]); + # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow); + # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder); + # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder); + x = x3 + ratioWristElbow * (x3 - x2) + y = y3 + ratioWristElbow * (y3 - y2) + distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) + distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) + width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) + # x-y refers to the center --> offset to topLeft point + # handRectangle.x -= handRectangle.width / 2.f; + # handRectangle.y -= handRectangle.height / 2.f; + x -= width / 2 + y -= width / 2 # width = height + # overflow the image + if x < 0: x = 0 + if y < 0: y = 0 + width1 = width + width2 = width + if x + width > image_width: width1 = image_width - x + if y + width > image_height: width2 = image_height - y + width = min(width1, width2) + # the max hand box value is 20 pixels + if width >= 20: + detect_result.append((int(x), int(y), int(width), is_left)) + + ''' + return value: [[x, y, w, True if left hand else False]]. + width=height since the network require squared input. + x, y is the coordinate of top left + ''' + return detect_result + + +# Written by Lvmin +def faceDetect(body: BodyResult, oriImg) -> Union[Tuple[int, int, int], None]: + """ + Detect the face in the input body pose keypoints and calculate the bounding box for the face. + + Args: + body (BodyResult): A BodyResult object containing the detected body pose keypoints. + oriImg (numpy.ndarray): A 3D numpy array representing the original input image. + + Returns: + Tuple[int, int, int] | None: A tuple containing the coordinates (x, y) of the top-left corner of the + bounding box and the width (height) of the bounding box, or None if the + face is not detected or the bounding box width is less than 20 pixels. + + Notes: + - The width and height of the bounding box are equal. + - The minimum bounding box size is 20 pixels. + """ + # left right eye ear 14 15 16 17 + image_height, image_width = oriImg.shape[0:2] + + keypoints = body.keypoints + head = keypoints[0] + left_eye = keypoints[14] + right_eye = keypoints[15] + left_ear = keypoints[16] + right_ear = keypoints[17] + + if head is None or all(keypoint is None for keypoint in (left_eye, right_eye, left_ear, right_ear)): + return None + + width = 0.0 + x0, y0 = head.x, head.y + + if left_eye is not None: + x1, y1 = left_eye.x, left_eye.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 3.0) + + if right_eye is not None: + x1, y1 = right_eye.x, right_eye.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 3.0) + + if left_ear is not None: + x1, y1 = left_ear.x, left_ear.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 1.5) + + if right_ear is not None: + x1, y1 = right_ear.x, right_ear.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 1.5) + + x, y = x0, y0 + + x -= width + y -= width + + if x < 0: + x = 0 + + if y < 0: + y = 0 + + width1 = width * 2 + width2 = width * 2 + + if x + width > image_width: + width1 = image_width - x + + if y + width > image_height: + width2 = image_height - y + + width = min(width1, width2) + + if width >= 20: + return int(x), int(y), int(width) + else: + return None + + +# get max index of 2d array +def npmax(array): + arrayindex = array.argmax(1) + arrayvalue = array.max(1) + i = arrayvalue.argmax() + j = arrayindex[i] + return i, j + +def guess_onnx_input_shape_dtype(filename): + dtype = np.float32 + if "fp16" in filename: + dtype = np.float16 + elif "int8" in filename: + dtype = np.uint8 + input_size = (640, 640) if "yolo" in filename else (192, 256) + if "384" in filename: + input_size = (288, 384) + elif "256" in filename: + input_size = (256, 256) + return input_size, dtype + +if os.getenv('AUX_ORT_PROVIDERS'): + ONNX_PROVIDERS = os.getenv('AUX_ORT_PROVIDERS').split(',') +else: + ONNX_PROVIDERS = ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"] +def get_ort_providers() -> List[str]: + providers = [] + try: + import onnxruntime as ort + for provider in ONNX_PROVIDERS: + if provider in ort.get_available_providers(): + providers.append(provider) + return providers + except: + return [] + +def is_model_torchscript(model) -> bool: + return bool(type(model).__name__ == "RecursiveScriptModule") + +def get_model_type(Nodesname, filename) -> str: + ort_providers = list(filter(lambda x : x != "CPUExecutionProvider", get_ort_providers())) + if filename is None: + return None + elif ("onnx" in filename) and ort_providers: + print(f"{Nodesname}: Caching ONNXRuntime session {filename}...") + return "ort" + elif ("onnx" in filename): + print(f"{Nodesname}: Caching OpenCV DNN module {filename} on cv2.DNN...") + return "cv2" + else: + print(f"{Nodesname}: Caching TorchScript module {filename} on ...") + return "torchscript" diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/wholebody.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/wholebody.py new file mode 100644 index 0000000000000000000000000000000000000000..83fbf720b2cf08a1b672f5c75793fa62009dd93e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/dwpose/wholebody.py @@ -0,0 +1,172 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import numpy as np + +from .dw_onnx.cv_ox_det import inference_detector as inference_onnx_yolox +from .dw_onnx.cv_ox_yolo_nas import inference_detector as inference_onnx_yolo_nas +from .dw_onnx.cv_ox_pose import inference_pose as inference_onnx_pose + +from .dw_torchscript.jit_det import inference_detector as inference_jit_yolox +from .dw_torchscript.jit_pose import inference_pose as inference_jit_pose + +from typing import List, Optional +from .types import PoseResult, BodyResult, Keypoint +from timeit import default_timer +import os +from controlnet_aux.dwpose.util import guess_onnx_input_shape_dtype, get_model_type, get_ort_providers, is_model_torchscript +import torch +import torch.utils.benchmark.utils.timer as torch_timer + +class Wholebody: + def __init__(self, det_model_path: Optional[str] = None, pose_model_path: Optional[str] = None, torchscript_device="cuda"): + self.det_filename = det_model_path and os.path.basename(det_model_path) + self.pose_filename = pose_model_path and os.path.basename(pose_model_path) + self.det, self.pose = None, None + # return type: None ort cv2 torchscript + self.det_model_type = get_model_type("DWPose",self.det_filename) + self.pose_model_type = get_model_type("DWPose",self.pose_filename) + # Always loads to CPU to avoid building OpenCV. + cv2_device = 'cpu' + cv2_backend = cv2.dnn.DNN_BACKEND_OPENCV if cv2_device == 'cpu' else cv2.dnn.DNN_BACKEND_CUDA + # You need to manually build OpenCV through cmake to work with your GPU. + cv2_providers = cv2.dnn.DNN_TARGET_CPU if cv2_device == 'cpu' else cv2.dnn.DNN_TARGET_CUDA + ort_providers = get_ort_providers() + + if self.det_model_type is None: + pass + elif self.det_model_type == "ort": + try: + import onnxruntime as ort + self.det = ort.InferenceSession(det_model_path, providers=ort_providers) + except: + print(f"Failed to load onnxruntime with {self.det.get_providers()}.\nPlease change EP_list in the config.yaml and restart ComfyUI") + self.det = ort.InferenceSession(det_model_path, providers=["CPUExecutionProvider"]) + elif self.det_model_type == "cv2": + try: + self.det = cv2.dnn.readNetFromONNX(det_model_path) + self.det.setPreferableBackend(cv2_backend) + self.det.setPreferableTarget(cv2_providers) + except: + print("TopK operators may not work on your OpenCV, try use onnxruntime with CPUExecutionProvider") + try: + import onnxruntime as ort + self.det = ort.InferenceSession(det_model_path, providers=["CPUExecutionProvider"]) + except: + print(f"Failed to load {det_model_path}, you can use other models instead") + else: + self.det = torch.jit.load(det_model_path) + self.det.to(torchscript_device) + + if self.pose_model_type is None: + pass + elif self.pose_model_type == "ort": + try: + import onnxruntime as ort + self.pose = ort.InferenceSession(pose_model_path, providers=ort_providers) + except: + print(f"Failed to load onnxruntime with {self.pose.get_providers()}.\nPlease change EP_list in the config.yaml and restart ComfyUI") + self.pose = ort.InferenceSession(pose_model_path, providers=["CPUExecutionProvider"]) + elif self.pose_model_type == "cv2": + self.pose = cv2.dnn.readNetFromONNX(pose_model_path) + self.pose.setPreferableBackend(cv2_backend) + self.pose.setPreferableTarget(cv2_providers) + else: + self.pose = torch.jit.load(pose_model_path) + self.pose.to(torchscript_device) + + if self.pose_filename is not None: + self.pose_input_size, _ = guess_onnx_input_shape_dtype(self.pose_filename) + + def __call__(self, oriImg) -> Optional[np.ndarray]: + + if is_model_torchscript(self.det): + det_start = torch_timer.timer() + det_result = inference_jit_yolox(self.det, oriImg, detect_classes=[0]) + print(f"DWPose: Bbox {((torch_timer.timer() - det_start) * 1000):.2f}ms") + else: + det_start = default_timer() + if "yolox" in self.det_filename: + det_result = inference_onnx_yolox(self.det, oriImg, detect_classes=[0], dtype=np.float32) + else: + #FP16 and INT8 YOLO NAS accept uint8 input + det_result = inference_onnx_yolo_nas(self.det, oriImg, detect_classes=[0], dtype=np.uint8) + print(f"DWPose: Bbox {((default_timer() - det_start) * 1000):.2f}ms") + if (det_result is None) or (det_result.shape[0] == 0): + return None + + if is_model_torchscript(self.pose): + pose_start = torch_timer.timer() + keypoints, scores = inference_jit_pose(self.pose, det_result, oriImg, self.pose_input_size) + print(f"DWPose: Pose {((torch_timer.timer() - pose_start) * 1000):.2f}ms on {det_result.shape[0]} people\n") + else: + pose_start = default_timer() + _, pose_onnx_dtype = guess_onnx_input_shape_dtype(self.pose_filename) + keypoints, scores = inference_onnx_pose(self.pose, det_result, oriImg, self.pose_input_size, dtype=pose_onnx_dtype) + print(f"DWPose: Pose {((default_timer() - pose_start) * 1000):.2f}ms on {det_result.shape[0]} people\n") + + keypoints_info = np.concatenate( + (keypoints, scores[..., None]), axis=-1) + # compute neck joint + neck = np.mean(keypoints_info[:, [5, 6]], axis=1) + # neck score when visualizing pred + neck[:, 2:4] = np.logical_and( + keypoints_info[:, 5, 2:4] > 0.3, + keypoints_info[:, 6, 2:4] > 0.3).astype(int) + new_keypoints_info = np.insert( + keypoints_info, 17, neck, axis=1) + mmpose_idx = [ + 17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3 + ] + openpose_idx = [ + 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17 + ] + new_keypoints_info[:, openpose_idx] = \ + new_keypoints_info[:, mmpose_idx] + keypoints_info = new_keypoints_info + + return keypoints_info + + @staticmethod + def format_result(keypoints_info: Optional[np.ndarray]) -> List[PoseResult]: + def format_keypoint_part( + part: np.ndarray, + ) -> Optional[List[Optional[Keypoint]]]: + keypoints = [ + Keypoint(x, y, score, i) if score >= 0.3 else None + for i, (x, y, score) in enumerate(part) + ] + return ( + None if all(keypoint is None for keypoint in keypoints) else keypoints + ) + + def total_score(keypoints: Optional[List[Optional[Keypoint]]]) -> float: + return ( + sum(keypoint.score for keypoint in keypoints if keypoint is not None) + if keypoints is not None + else 0.0 + ) + + pose_results = [] + if keypoints_info is None: + return pose_results + + for instance in keypoints_info: + body_keypoints = format_keypoint_part(instance[:18]) or ([None] * 18) + left_hand = format_keypoint_part(instance[92:113]) + right_hand = format_keypoint_part(instance[113:134]) + face = format_keypoint_part(instance[24:92]) + + # Openpose face consists of 70 points in total, while DWPose only + # provides 68 points. Padding the last 2 points. + if face is not None: + # left eye + face.append(body_keypoints[14]) + # right eye + face.append(body_keypoints[15]) + + body = BodyResult( + body_keypoints, total_score(body_keypoints), len(body_keypoints) + ) + pose_results.append(PoseResult(body, left_hand, right_hand, face)) + + return pose_results \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/hed/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/hed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d238ea572183f20030fe8c464d69889f6804c0dc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/hed/__init__.py @@ -0,0 +1,110 @@ +# This is an improved version and model of HED edge detection with Apache License, Version 2.0. +# Please use this implementation in your products +# This implementation may produce slightly different results from Saining Xie's official implementations, +# but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations. +# Different from official models and other implementations, this is an RGB-input model (rather than BGR) +# and in this way it works better for gradio's RGB protocol + +import os +import warnings + +import cv2 +import numpy as np +import torch +from einops import rearrange +from PIL import Image + +from controlnet_aux.util import HWC3, nms, resize_image_with_pad, safe_step, common_input_validate, annotator_ckpts_path, custom_hf_download + + +class DoubleConvBlock(torch.nn.Module): + def __init__(self, input_channel, output_channel, layer_number): + super().__init__() + self.convs = torch.nn.Sequential() + self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1)) + for i in range(1, layer_number): + self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1)) + self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0) + + def __call__(self, x, down_sampling=False): + h = x + if down_sampling: + h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2)) + for conv in self.convs: + h = conv(h) + h = torch.nn.functional.relu(h) + return h, self.projection(h) + + +class ControlNetHED_Apache2(torch.nn.Module): + def __init__(self): + super().__init__() + self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1))) + self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2) + self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2) + self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3) + self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3) + self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3) + + def __call__(self, x): + h = x - self.norm + h, projection1 = self.block1(h) + h, projection2 = self.block2(h, down_sampling=True) + h, projection3 = self.block3(h, down_sampling=True) + h, projection4 = self.block4(h, down_sampling=True) + h, projection5 = self.block5(h, down_sampling=True) + return projection1, projection2, projection3, projection4, projection5 + +class HEDdetector: + def __init__(self, netNetwork): + self.netNetwork = netNetwork + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "ControlNetHED.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + netNetwork = ControlNetHED_Apache2() + netNetwork.load_state_dict(torch.load(model_path, map_location='cpu')) + netNetwork.float().eval() + + return cls(netNetwork) + + def to(self, device): + self.netNetwork.to(device) + return self + + + def __call__(self, input_image, detect_resolution=512, safe=False, output_type="pil", scribble=False, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + assert input_image.ndim == 3 + H, W, C = input_image.shape + with torch.no_grad(): + device = next(iter(self.netNetwork.parameters())).device + image_hed = torch.from_numpy(input_image).float().to(device) + image_hed = rearrange(image_hed, 'h w c -> 1 c h w') + edges = self.netNetwork(image_hed) + edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges] + edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges] + edges = np.stack(edges, axis=2) + edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64))) + if safe: + edge = safe_step(edge) + edge = (edge * 255.0).clip(0, 255).astype(np.uint8) + + detected_map = edge + + if scribble: + detected_map = nms(detected_map, 127, 3.0) + detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0) + detected_map[detected_map > 4] = 255 + detected_map[detected_map < 255] = 0 + + detected_map = HWC3(remove_pad(detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/hed/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/hed/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d53332a0b789e4819d8cc467fe56f1117843c37 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/hed/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9571b19968a7207929311eadb73e4a2be8760c40 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/__init__.py @@ -0,0 +1,95 @@ +import os + +import cv2 +import numpy as np +import torch +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from .leres.depthmap import estimateboost, estimateleres +from .leres.multi_depth_model_woauxi import RelDepthModel +from .leres.net_tools import strip_prefix_if_present +from .pix2pix.models.pix2pix4depth_model import Pix2Pix4DepthModel +from .pix2pix.options.test_options import TestOptions + + +class LeresDetector: + def __init__(self, model, pix2pixmodel): + self.model = model + self.pix2pixmodel = pix2pixmodel + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, pix2pix_filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "res101.pth" + pix2pix_filename = pix2pix_filename or "latest_net_G.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + checkpoint = torch.load(model_path, map_location=torch.device('cpu')) + + model = RelDepthModel(backbone='resnext101') + model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."), strict=True) + del checkpoint + + pix2pix_model_path = custom_hf_download(pretrained_model_or_path, pix2pix_filename, cache_dir=cache_dir) + + opt = TestOptions().parse() + if not torch.cuda.is_available(): + opt.gpu_ids = [] # cpu mode + pix2pixmodel = Pix2Pix4DepthModel(opt) + pix2pixmodel.save_dir = os.path.dirname(pix2pix_model_path) + pix2pixmodel.load_networks('latest') + pix2pixmodel.eval() + + return cls(model, pix2pixmodel) + + def to(self, device): + self.model.to(device) + # TODO - refactor pix2pix implementation to support device migration + # self.pix2pixmodel.to(device) + return self + + def __call__(self, input_image, thr_a=0, thr_b=0, boost=False, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + with torch.no_grad(): + if boost: + depth = estimateboost(detected_map, self.model, 0, self.pix2pixmodel, max(detected_map.shape[1], detected_map.shape[0])) + else: + depth = estimateleres(detected_map, self.model, detected_map.shape[1], detected_map.shape[0]) + + numbytes=2 + depth_min = depth.min() + depth_max = depth.max() + max_val = (2**(8*numbytes))-1 + + # check output before normalizing and mapping to 16 bit + if depth_max - depth_min > np.finfo("float").eps: + out = max_val * (depth - depth_min) / (depth_max - depth_min) + else: + out = np.zeros(depth.shape) + + # single channel, 16 bit image + depth_image = out.astype("uint16") + + # convert to uint8 + depth_image = cv2.convertScaleAbs(depth_image, alpha=(255.0/65535.0)) + + # remove near + if thr_a != 0: + thr_a = ((thr_a/100)*255) + depth_image = cv2.threshold(depth_image, thr_a, 255, cv2.THRESH_TOZERO)[1] + + # invert image + depth_image = cv2.bitwise_not(depth_image) + + # remove bg + if thr_b != 0: + thr_b = ((thr_b/100)*255) + depth_image = cv2.threshold(depth_image, thr_b, 255, cv2.THRESH_TOZERO)[1] + + detected_map = HWC3(remove_pad(depth_image)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e0f1d07d98d4e85e684734d058dfe2515d215405 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/LICENSE @@ -0,0 +1,23 @@ +https://github.com/thygate/stable-diffusion-webui-depthmap-script + +MIT License + +Copyright (c) 2023 Bob Thiry + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/Resnet.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/Resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f12c9975c1aa05401269be3ca3dbaa56bde55581 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/Resnet.py @@ -0,0 +1,199 @@ +import torch.nn as nn +import torch.nn as NN + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d + self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000): + self.inplanes = 64 + super(ResNet, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = NN.BatchNorm2d(64) #NN.BatchNorm2d + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + #self.avgpool = nn.AvgPool2d(7, stride=1) + #self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + features = [] + + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + features.append(x) + x = self.layer2(x) + features.append(x) + x = self.layer3(x) + features.append(x) + x = self.layer4(x) + features.append(x) + + return features + + +def resnet18(pretrained=True, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + return model + + +def resnet34(pretrained=True, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + return model + + +def resnet50(pretrained=True, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + + return model + + +def resnet101(pretrained=True, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + + return model + + +def resnet152(pretrained=True, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/Resnext_torch.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/Resnext_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..9af54fcc3e5b363935ef60c8aaf269110c0d6611 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/Resnext_torch.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python +# coding: utf-8 +import torch.nn as nn + +try: + from urllib import urlretrieve +except ImportError: + from urllib.request import urlretrieve + +__all__ = ['resnext101_32x8d'] + + +model_urls = { + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + #self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + #self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + features = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + features.append(x) + + x = self.layer2(x) + features.append(x) + + x = self.layer3(x) + features.append(x) + + x = self.layer4(x) + features.append(x) + + #x = self.avgpool(x) + #x = torch.flatten(x, 1) + #x = self.fc(x) + + return features + + def forward(self, x): + return self._forward_impl(x) + + + +def resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + return model + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/depthmap.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/depthmap.py new file mode 100644 index 0000000000000000000000000000000000000000..fc743bf4946b514a53f8d286a395e33c7b612582 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/depthmap.py @@ -0,0 +1,548 @@ +# Author: thygate +# https://github.com/thygate/stable-diffusion-webui-depthmap-script + +import gc +from operator import getitem + +import cv2 +import numpy as np +import skimage.measure +import torch +from torchvision.transforms import transforms + +from ...util import torch_gc + +whole_size_threshold = 1600 # R_max from the paper +pix2pixsize = 1024 + +def scale_torch(img): + """ + Scale the image and output it in torch.tensor. + :param img: input rgb is in shape [H, W, C], input depth/disp is in shape [H, W] + :param scale: the scale factor. float + :return: img. [C, H, W] + """ + if len(img.shape) == 2: + img = img[np.newaxis, :, :] + if img.shape[2] == 3: + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406) , (0.229, 0.224, 0.225) )]) + img = transform(img.astype(np.float32)) + else: + img = img.astype(np.float32) + img = torch.from_numpy(img) + return img + +def estimateleres(img, model, w, h): + device = next(iter(model.parameters())).device + # leres transform input + rgb_c = img[:, :, ::-1].copy() + A_resize = cv2.resize(rgb_c, (w, h)) + img_torch = scale_torch(A_resize)[None, :, :, :] + + # compute + with torch.no_grad(): + img_torch = img_torch.to(device) + prediction = model.depth_model(img_torch) + + prediction = prediction.squeeze().cpu().numpy() + prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC) + + return prediction + +def generatemask(size): + # Generates a Guassian mask + mask = np.zeros(size, dtype=np.float32) + sigma = int(size[0]/16) + k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1) + mask[int(0.15*size[0]):size[0] - int(0.15*size[0]), int(0.15*size[1]): size[1] - int(0.15*size[1])] = 1 + mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma) + mask = (mask - mask.min()) / (mask.max() - mask.min()) + mask = mask.astype(np.float32) + return mask + +def resizewithpool(img, size): + i_size = img.shape[0] + n = int(np.floor(i_size/size)) + + out = skimage.measure.block_reduce(img, (n, n), np.max) + return out + +def rgb2gray(rgb): + # Converts rgb to gray + return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140]) + +def calculateprocessingres(img, basesize, confidence=0.1, scale_threshold=3, whole_size_threshold=3000): + # Returns the R_x resolution described in section 5 of the main paper. + + # Parameters: + # img :input rgb image + # basesize : size the dilation kernel which is equal to receptive field of the network. + # confidence: value of x in R_x; allowed percentage of pixels that are not getting any contextual cue. + # scale_threshold: maximum allowed upscaling on the input image ; it has been set to 3. + # whole_size_threshold: maximum allowed resolution. (R_max from section 6 of the main paper) + + # Returns: + # outputsize_scale*speed_scale :The computed R_x resolution + # patch_scale: K parameter from section 6 of the paper + + # speed scale parameter is to process every image in a smaller size to accelerate the R_x resolution search + speed_scale = 32 + image_dim = int(min(img.shape[0:2])) + + gray = rgb2gray(img) + grad = np.abs(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)) + grad = cv2.resize(grad, (image_dim, image_dim), cv2.INTER_AREA) + + # thresholding the gradient map to generate the edge-map as a proxy of the contextual cues + m = grad.min() + M = grad.max() + middle = m + (0.4 * (M - m)) + grad[grad < middle] = 0 + grad[grad >= middle] = 1 + + # dilation kernel with size of the receptive field + kernel = np.ones((int(basesize/speed_scale), int(basesize/speed_scale)), float) + # dilation kernel with size of the a quarter of receptive field used to compute k + # as described in section 6 of main paper + kernel2 = np.ones((int(basesize / (4*speed_scale)), int(basesize / (4*speed_scale))), float) + + # Output resolution limit set by the whole_size_threshold and scale_threshold. + threshold = min(whole_size_threshold, scale_threshold * max(img.shape[:2])) + + outputsize_scale = basesize / speed_scale + for p_size in range(int(basesize/speed_scale), int(threshold/speed_scale), int(basesize / (2*speed_scale))): + grad_resized = resizewithpool(grad, p_size) + grad_resized = cv2.resize(grad_resized, (p_size, p_size), cv2.INTER_NEAREST) + grad_resized[grad_resized >= 0.5] = 1 + grad_resized[grad_resized < 0.5] = 0 + + dilated = cv2.dilate(grad_resized, kernel, iterations=1) + meanvalue = (1-dilated).mean() + if meanvalue > confidence: + break + else: + outputsize_scale = p_size + + grad_region = cv2.dilate(grad_resized, kernel2, iterations=1) + patch_scale = grad_region.mean() + + return int(outputsize_scale*speed_scale), patch_scale + +# Generate a double-input depth estimation +def doubleestimate(img, size1, size2, pix2pixsize, model, net_type, pix2pixmodel): + # Generate the low resolution estimation + estimate1 = singleestimate(img, size1, model, net_type) + # Resize to the inference size of merge network. + estimate1 = cv2.resize(estimate1, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) + + # Generate the high resolution estimation + estimate2 = singleestimate(img, size2, model, net_type) + # Resize to the inference size of merge network. + estimate2 = cv2.resize(estimate2, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) + + # Inference on the merge model + pix2pixmodel.set_input(estimate1, estimate2) + pix2pixmodel.test() + visuals = pix2pixmodel.get_current_visuals() + prediction_mapped = visuals['fake_B'] + prediction_mapped = (prediction_mapped+1)/2 + prediction_mapped = (prediction_mapped - torch.min(prediction_mapped)) / ( + torch.max(prediction_mapped) - torch.min(prediction_mapped)) + prediction_mapped = prediction_mapped.squeeze().cpu().numpy() + + return prediction_mapped + +# Generate a single-input depth estimation +def singleestimate(img, msize, model, net_type): + # if net_type == 0: + return estimateleres(img, model, msize, msize) + # else: + # return estimatemidasBoost(img, model, msize, msize) + +def applyGridpatch(blsize, stride, img, box): + # Extract a simple grid patch. + counter1 = 0 + patch_bound_list = {} + for k in range(blsize, img.shape[1] - blsize, stride): + for j in range(blsize, img.shape[0] - blsize, stride): + patch_bound_list[str(counter1)] = {} + patchbounds = [j - blsize, k - blsize, j - blsize + 2 * blsize, k - blsize + 2 * blsize] + patch_bound = [box[0] + patchbounds[1], box[1] + patchbounds[0], patchbounds[3] - patchbounds[1], + patchbounds[2] - patchbounds[0]] + patch_bound_list[str(counter1)]['rect'] = patch_bound + patch_bound_list[str(counter1)]['size'] = patch_bound[2] + counter1 = counter1 + 1 + return patch_bound_list + +# Generating local patches to perform the local refinement described in section 6 of the main paper. +def generatepatchs(img, base_size): + + # Compute the gradients as a proxy of the contextual cues. + img_gray = rgb2gray(img) + whole_grad = np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=3)) +\ + np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=3)) + + threshold = whole_grad[whole_grad > 0].mean() + whole_grad[whole_grad < threshold] = 0 + + # We use the integral image to speed-up the evaluation of the amount of gradients for each patch. + gf = whole_grad.sum()/len(whole_grad.reshape(-1)) + grad_integral_image = cv2.integral(whole_grad) + + # Variables are selected such that the initial patch size would be the receptive field size + # and the stride is set to 1/3 of the receptive field size. + blsize = int(round(base_size/2)) + stride = int(round(blsize*0.75)) + + # Get initial Grid + patch_bound_list = applyGridpatch(blsize, stride, img, [0, 0, 0, 0]) + + # Refine initial Grid of patches by discarding the flat (in terms of gradients of the rgb image) ones. Refine + # each patch size to ensure that there will be enough depth cues for the network to generate a consistent depth map. + print("Selecting patches ...") + patch_bound_list = adaptiveselection(grad_integral_image, patch_bound_list, gf) + + # Sort the patch list to make sure the merging operation will be done with the correct order: starting from biggest + # patch + patchset = sorted(patch_bound_list.items(), key=lambda x: getitem(x[1], 'size'), reverse=True) + return patchset + +def getGF_fromintegral(integralimage, rect): + # Computes the gradient density of a given patch from the gradient integral image. + x1 = rect[1] + x2 = rect[1]+rect[3] + y1 = rect[0] + y2 = rect[0]+rect[2] + value = integralimage[x2, y2]-integralimage[x1, y2]-integralimage[x2, y1]+integralimage[x1, y1] + return value + +# Adaptively select patches +def adaptiveselection(integral_grad, patch_bound_list, gf): + patchlist = {} + count = 0 + height, width = integral_grad.shape + + search_step = int(32/factor) + + # Go through all patches + for c in range(len(patch_bound_list)): + # Get patch + bbox = patch_bound_list[str(c)]['rect'] + + # Compute the amount of gradients present in the patch from the integral image. + cgf = getGF_fromintegral(integral_grad, bbox)/(bbox[2]*bbox[3]) + + # Check if patching is beneficial by comparing the gradient density of the patch to + # the gradient density of the whole image + if cgf >= gf: + bbox_test = bbox.copy() + patchlist[str(count)] = {} + + # Enlarge each patch until the gradient density of the patch is equal + # to the whole image gradient density + while True: + + bbox_test[0] = bbox_test[0] - int(search_step/2) + bbox_test[1] = bbox_test[1] - int(search_step/2) + + bbox_test[2] = bbox_test[2] + search_step + bbox_test[3] = bbox_test[3] + search_step + + # Check if we are still within the image + if bbox_test[0] < 0 or bbox_test[1] < 0 or bbox_test[1] + bbox_test[3] >= height \ + or bbox_test[0] + bbox_test[2] >= width: + break + + # Compare gradient density + cgf = getGF_fromintegral(integral_grad, bbox_test)/(bbox_test[2]*bbox_test[3]) + if cgf < gf: + break + bbox = bbox_test.copy() + + # Add patch to selected patches + patchlist[str(count)]['rect'] = bbox + patchlist[str(count)]['size'] = bbox[2] + count = count + 1 + + # Return selected patches + return patchlist + +def impatch(image, rect): + # Extract the given patch pixels from a given image. + w1 = rect[0] + h1 = rect[1] + w2 = w1 + rect[2] + h2 = h1 + rect[3] + image_patch = image[h1:h2, w1:w2] + return image_patch + +class ImageandPatchs: + def __init__(self, root_dir, name, patchsinfo, rgb_image, scale=1): + self.root_dir = root_dir + self.patchsinfo = patchsinfo + self.name = name + self.patchs = patchsinfo + self.scale = scale + + self.rgb_image = cv2.resize(rgb_image, (round(rgb_image.shape[1]*scale), round(rgb_image.shape[0]*scale)), + interpolation=cv2.INTER_CUBIC) + + self.do_have_estimate = False + self.estimation_updated_image = None + self.estimation_base_image = None + + def __len__(self): + return len(self.patchs) + + def set_base_estimate(self, est): + self.estimation_base_image = est + if self.estimation_updated_image is not None: + self.do_have_estimate = True + + def set_updated_estimate(self, est): + self.estimation_updated_image = est + if self.estimation_base_image is not None: + self.do_have_estimate = True + + def __getitem__(self, index): + patch_id = int(self.patchs[index][0]) + rect = np.array(self.patchs[index][1]['rect']) + msize = self.patchs[index][1]['size'] + + ## applying scale to rect: + rect = np.round(rect * self.scale) + rect = rect.astype('int') + msize = round(msize * self.scale) + + patch_rgb = impatch(self.rgb_image, rect) + if self.do_have_estimate: + patch_whole_estimate_base = impatch(self.estimation_base_image, rect) + patch_whole_estimate_updated = impatch(self.estimation_updated_image, rect) + return {'patch_rgb': patch_rgb, 'patch_whole_estimate_base': patch_whole_estimate_base, + 'patch_whole_estimate_updated': patch_whole_estimate_updated, 'rect': rect, + 'size': msize, 'id': patch_id} + else: + return {'patch_rgb': patch_rgb, 'rect': rect, 'size': msize, 'id': patch_id} + + def print_options(self, opt): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + """ + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + """ + + def parse(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + #self.print_options(opt) + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + #if len(opt.gpu_ids) > 0: + # torch.cuda.set_device(opt.gpu_ids[0]) + + self.opt = opt + return self.opt + + +def estimateboost(img, model, model_type, pix2pixmodel, max_res=512, depthmap_script_boost_rmax=None): + global whole_size_threshold + + # get settings + if depthmap_script_boost_rmax: + whole_size_threshold = depthmap_script_boost_rmax + + if model_type == 0: #leres + net_receptive_field_size = 448 + patch_netsize = 2 * net_receptive_field_size + elif model_type == 1: #dpt_beit_large_512 + net_receptive_field_size = 512 + patch_netsize = 2 * net_receptive_field_size + else: #other midas + net_receptive_field_size = 384 + patch_netsize = 2 * net_receptive_field_size + + gc.collect() + torch_gc() + + # Generate mask used to smoothly blend the local pathc estimations to the base estimate. + # It is arbitrarily large to avoid artifacts during rescaling for each crop. + mask_org = generatemask((3000, 3000)) + mask = mask_org.copy() + + # Value x of R_x defined in the section 5 of the main paper. + r_threshold_value = 0.2 + #if R0: + # r_threshold_value = 0 + + input_resolution = img.shape + scale_threshold = 3 # Allows up-scaling with a scale up to 3 + + # Find the best input resolution R-x. The resolution search described in section 5-double estimation of the main paper and section B of the + # supplementary material. + whole_image_optimal_size, patch_scale = calculateprocessingres(img, net_receptive_field_size, r_threshold_value, scale_threshold, whole_size_threshold) + + # print('wholeImage being processed in :', whole_image_optimal_size) + + # Generate the base estimate using the double estimation. + whole_estimate = doubleestimate(img, net_receptive_field_size, whole_image_optimal_size, pix2pixsize, model, model_type, pix2pixmodel) + + # Compute the multiplier described in section 6 of the main paper to make sure our initial patch can select + # small high-density regions of the image. + global factor + factor = max(min(1, 4 * patch_scale * whole_image_optimal_size / whole_size_threshold), 0.2) + # print('Adjust factor is:', 1/factor) + + # Check if Local boosting is beneficial. + if max_res < whole_image_optimal_size: + # print("No Local boosting. Specified Max Res is smaller than R20, Returning doubleestimate result") + return cv2.resize(whole_estimate, (input_resolution[1], input_resolution[0]), interpolation=cv2.INTER_CUBIC) + + # Compute the default target resolution. + if img.shape[0] > img.shape[1]: + a = 2 * whole_image_optimal_size + b = round(2 * whole_image_optimal_size * img.shape[1] / img.shape[0]) + else: + a = round(2 * whole_image_optimal_size * img.shape[0] / img.shape[1]) + b = 2 * whole_image_optimal_size + b = int(round(b / factor)) + a = int(round(a / factor)) + + """ + # recompute a, b and saturate to max res. + if max(a,b) > max_res: + print('Default Res is higher than max-res: Reducing final resolution') + if img.shape[0] > img.shape[1]: + a = max_res + b = round(max_res * img.shape[1] / img.shape[0]) + else: + a = round(max_res * img.shape[0] / img.shape[1]) + b = max_res + b = int(b) + a = int(a) + """ + + img = cv2.resize(img, (b, a), interpolation=cv2.INTER_CUBIC) + + # Extract selected patches for local refinement + base_size = net_receptive_field_size * 2 + patchset = generatepatchs(img, base_size) + + # print('Target resolution: ', img.shape) + + # Computing a scale in case user prompted to generate the results as the same resolution of the input. + # Notice that our method output resolution is independent of the input resolution and this parameter will only + # enable a scaling operation during the local patch merge implementation to generate results with the same resolution + # as the input. + """ + if output_resolution == 1: + mergein_scale = input_resolution[0] / img.shape[0] + print('Dynamicly change merged-in resolution; scale:', mergein_scale) + else: + mergein_scale = 1 + """ + # always rescale to input res for now + mergein_scale = input_resolution[0] / img.shape[0] + + imageandpatchs = ImageandPatchs('', '', patchset, img, mergein_scale) + whole_estimate_resized = cv2.resize(whole_estimate, (round(img.shape[1]*mergein_scale), + round(img.shape[0]*mergein_scale)), interpolation=cv2.INTER_CUBIC) + imageandpatchs.set_base_estimate(whole_estimate_resized.copy()) + imageandpatchs.set_updated_estimate(whole_estimate_resized.copy()) + + print('Resulting depthmap resolution will be :', whole_estimate_resized.shape[:2]) + print('Patches to process: '+str(len(imageandpatchs))) + + # Enumerate through all patches, generate their estimations and refining the base estimate. + for patch_ind in range(len(imageandpatchs)): + + # Get patch information + patch = imageandpatchs[patch_ind] # patch object + patch_rgb = patch['patch_rgb'] # rgb patch + patch_whole_estimate_base = patch['patch_whole_estimate_base'] # corresponding patch from base + rect = patch['rect'] # patch size and location + patch_id = patch['id'] # patch ID + org_size = patch_whole_estimate_base.shape # the original size from the unscaled input + print('\t Processing patch', patch_ind, '/', len(imageandpatchs)-1, '|', rect) + + # We apply double estimation for patches. The high resolution value is fixed to twice the receptive + # field size of the network for patches to accelerate the process. + patch_estimation = doubleestimate(patch_rgb, net_receptive_field_size, patch_netsize, pix2pixsize, model, model_type, pix2pixmodel) + patch_estimation = cv2.resize(patch_estimation, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) + patch_whole_estimate_base = cv2.resize(patch_whole_estimate_base, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) + + # Merging the patch estimation into the base estimate using our merge network: + # We feed the patch estimation and the same region from the updated base estimate to the merge network + # to generate the target estimate for the corresponding region. + pix2pixmodel.set_input(patch_whole_estimate_base, patch_estimation) + + # Run merging network + pix2pixmodel.test() + visuals = pix2pixmodel.get_current_visuals() + + prediction_mapped = visuals['fake_B'] + prediction_mapped = (prediction_mapped+1)/2 + prediction_mapped = prediction_mapped.squeeze().cpu().numpy() + + mapped = prediction_mapped + + # We use a simple linear polynomial to make sure the result of the merge network would match the values of + # base estimate + p_coef = np.polyfit(mapped.reshape(-1), patch_whole_estimate_base.reshape(-1), deg=1) + merged = np.polyval(p_coef, mapped.reshape(-1)).reshape(mapped.shape) + + merged = cv2.resize(merged, (org_size[1],org_size[0]), interpolation=cv2.INTER_CUBIC) + + # Get patch size and location + w1 = rect[0] + h1 = rect[1] + w2 = w1 + rect[2] + h2 = h1 + rect[3] + + # To speed up the implementation, we only generate the Gaussian mask once with a sufficiently large size + # and resize it to our needed size while merging the patches. + if mask.shape != org_size: + mask = cv2.resize(mask_org, (org_size[1],org_size[0]), interpolation=cv2.INTER_LINEAR) + + tobemergedto = imageandpatchs.estimation_updated_image + + # Update the whole estimation: + # We use a simple Gaussian mask to blend the merged patch region with the base estimate to ensure seamless + # blending at the boundaries of the patch region. + tobemergedto[h1:h2, w1:w2] = np.multiply(tobemergedto[h1:h2, w1:w2], 1 - mask) + np.multiply(merged, mask) + imageandpatchs.set_updated_estimate(tobemergedto) + + # output + return cv2.resize(imageandpatchs.estimation_updated_image, (input_resolution[1], input_resolution[0]), interpolation=cv2.INTER_CUBIC) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/multi_depth_model_woauxi.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/multi_depth_model_woauxi.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf35d7843e00be5d3c831d72b9ab5d64d130f93 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/multi_depth_model_woauxi.py @@ -0,0 +1,35 @@ +import torch +import torch.nn as nn + +from . import network_auxi as network +from .net_tools import get_func + + +class RelDepthModel(nn.Module): + def __init__(self, backbone='resnet50'): + super(RelDepthModel, self).__init__() + if backbone == 'resnet50': + encoder = 'resnet50_stride32' + elif backbone == 'resnext101': + encoder = 'resnext101_stride32x8d' + self.depth_model = DepthModel(encoder) + + def inference(self, rgb): + with torch.no_grad(): + input = rgb.to(self.depth_model.device) + depth = self.depth_model(input) + #pred_depth_out = depth - depth.min() + 0.01 + return depth #pred_depth_out + + +class DepthModel(nn.Module): + def __init__(self, encoder): + super(DepthModel, self).__init__() + backbone = network.__name__.split('.')[-1] + '.' + encoder + self.encoder_modules = get_func(backbone)() + self.decoder_modules = network.Decoder() + + def forward(self, x): + lateral_out = self.encoder_modules(x) + out_logit = self.decoder_modules(lateral_out) + return out_logit \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/net_tools.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/net_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..2f213315046e078bb861d65d3ef4a6fc446e945d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/net_tools.py @@ -0,0 +1,54 @@ +import importlib +import torch +import os +from collections import OrderedDict + + +def get_func(func_name): + """Helper to return a function object by name. func_name must identify a + function in this module or the path to a function relative to the base + 'modeling' module. + """ + if func_name == '': + return None + try: + parts = func_name.split('.') + # Refers to a function in this module + if len(parts) == 1: + return globals()[parts[0]] + # Otherwise, assume we're referencing a module under modeling + module_name = 'controlnet_aux.leres.leres.' + '.'.join(parts[:-1]) + module = importlib.import_module(module_name) + return getattr(module, parts[-1]) + except Exception: + print('Failed to f1ind function: %s', func_name) + raise + +def load_ckpt(args, depth_model, shift_model, focal_model): + """ + Load checkpoint. + """ + if os.path.isfile(args.load_ckpt): + print("loading checkpoint %s" % args.load_ckpt) + checkpoint = torch.load(args.load_ckpt) + if shift_model is not None: + shift_model.load_state_dict(strip_prefix_if_present(checkpoint['shift_model'], 'module.'), + strict=True) + if focal_model is not None: + focal_model.load_state_dict(strip_prefix_if_present(checkpoint['focal_model'], 'module.'), + strict=True) + depth_model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."), + strict=True) + del checkpoint + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + +def strip_prefix_if_present(state_dict, prefix): + keys = sorted(state_dict.keys()) + if not all(key.startswith(prefix) for key in keys): + return state_dict + stripped_state_dict = OrderedDict() + for key, value in state_dict.items(): + stripped_state_dict[key.replace(prefix, "")] = value + return stripped_state_dict \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/network_auxi.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/network_auxi.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd87011a5339aca632d1a10b217c8737bdc794f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/leres/network_auxi.py @@ -0,0 +1,417 @@ +import torch +import torch.nn as nn +import torch.nn.init as init + +from . import Resnet, Resnext_torch + + +def resnet50_stride32(): + return DepthNet(backbone='resnet', depth=50, upfactors=[2, 2, 2, 2]) + +def resnext101_stride32x8d(): + return DepthNet(backbone='resnext101_32x8d', depth=101, upfactors=[2, 2, 2, 2]) + + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + self.inchannels = [256, 512, 1024, 2048] + self.midchannels = [256, 256, 256, 512] + self.upfactors = [2,2,2,2] + self.outchannels = 1 + + self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3]) + self.conv1 = nn.Conv2d(in_channels=self.midchannels[3], out_channels=self.midchannels[2], kernel_size=3, padding=1, stride=1, bias=True) + self.upsample = nn.Upsample(scale_factor=self.upfactors[3], mode='bilinear', align_corners=True) + + self.ffm2 = FFM(inchannels=self.inchannels[2], midchannels=self.midchannels[2], outchannels = self.midchannels[2], upfactor=self.upfactors[2]) + self.ffm1 = FFM(inchannels=self.inchannels[1], midchannels=self.midchannels[1], outchannels = self.midchannels[1], upfactor=self.upfactors[1]) + self.ffm0 = FFM(inchannels=self.inchannels[0], midchannels=self.midchannels[0], outchannels = self.midchannels[0], upfactor=self.upfactors[0]) + + self.outconv = AO(inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2) + self._init_params() + + def _init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): #NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + def forward(self, features): + x_32x = self.conv(features[3]) # 1/32 + x_32 = self.conv1(x_32x) + x_16 = self.upsample(x_32) # 1/16 + + x_8 = self.ffm2(features[2], x_16) # 1/8 + x_4 = self.ffm1(features[1], x_8) # 1/4 + x_2 = self.ffm0(features[0], x_4) # 1/2 + #----------------------------------------- + x = self.outconv(x_2) # original size + return x + +class DepthNet(nn.Module): + __factory = { + 18: Resnet.resnet18, + 34: Resnet.resnet34, + 50: Resnet.resnet50, + 101: Resnet.resnet101, + 152: Resnet.resnet152 + } + def __init__(self, + backbone='resnet', + depth=50, + upfactors=[2, 2, 2, 2]): + super(DepthNet, self).__init__() + self.backbone = backbone + self.depth = depth + self.pretrained = False + self.inchannels = [256, 512, 1024, 2048] + self.midchannels = [256, 256, 256, 512] + self.upfactors = upfactors + self.outchannels = 1 + + # Build model + if self.backbone == 'resnet': + if self.depth not in DepthNet.__factory: + raise KeyError("Unsupported depth:", self.depth) + self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained) + elif self.backbone == 'resnext101_32x8d': + self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained) + else: + self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained) + + def forward(self, x): + x = self.encoder(x) # 1/32, 1/16, 1/8, 1/4 + return x + + +class FTB(nn.Module): + def __init__(self, inchannels, midchannels=512): + super(FTB, self).__init__() + self.in1 = inchannels + self.mid = midchannels + self.conv1 = nn.Conv2d(in_channels=self.in1, out_channels=self.mid, kernel_size=3, padding=1, stride=1, + bias=True) + # NN.BatchNorm2d + self.conv_branch = nn.Sequential(nn.ReLU(inplace=True), \ + nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, + padding=1, stride=1, bias=True), \ + nn.BatchNorm2d(num_features=self.mid), \ + nn.ReLU(inplace=True), \ + nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, + padding=1, stride=1, bias=True)) + self.relu = nn.ReLU(inplace=True) + + self.init_params() + + def forward(self, x): + x = self.conv1(x) + x = x + self.conv_branch(x) + x = self.relu(x) + + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class ATA(nn.Module): + def __init__(self, inchannels, reduction=8): + super(ATA, self).__init__() + self.inchannels = inchannels + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential(nn.Linear(self.inchannels * 2, self.inchannels // reduction), + nn.ReLU(inplace=True), + nn.Linear(self.inchannels // reduction, self.inchannels), + nn.Sigmoid()) + self.init_params() + + def forward(self, low_x, high_x): + n, c, _, _ = low_x.size() + x = torch.cat([low_x, high_x], 1) + x = self.avg_pool(x) + x = x.view(n, -1) + x = self.fc(x).view(n, c, 1, 1) + x = low_x * x + high_x + + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + # init.normal(m.weight, std=0.01) + init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + # init.normal_(m.weight, std=0.01) + init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class FFM(nn.Module): + def __init__(self, inchannels, midchannels, outchannels, upfactor=2): + super(FFM, self).__init__() + self.inchannels = inchannels + self.midchannels = midchannels + self.outchannels = outchannels + self.upfactor = upfactor + + self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels) + # self.ata = ATA(inchannels = self.midchannels) + self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels) + + self.upsample = nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True) + + self.init_params() + + def forward(self, low_x, high_x): + x = self.ftb1(low_x) + x = x + high_x + x = self.ftb2(x) + x = self.upsample(x) + + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class AO(nn.Module): + # Adaptive output module + def __init__(self, inchannels, outchannels, upfactor=2): + super(AO, self).__init__() + self.inchannels = inchannels + self.outchannels = outchannels + self.upfactor = upfactor + + self.adapt_conv = nn.Sequential( + nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels // 2, kernel_size=3, padding=1, + stride=1, bias=True), \ + nn.BatchNorm2d(num_features=self.inchannels // 2), \ + nn.ReLU(inplace=True), \ + nn.Conv2d(in_channels=self.inchannels // 2, out_channels=self.outchannels, kernel_size=3, padding=1, + stride=1, bias=True), \ + nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True)) + + self.init_params() + + def forward(self, x): + x = self.adapt_conv(x) + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + + +# ============================================================================================================== + + +class ResidualConv(nn.Module): + def __init__(self, inchannels): + super(ResidualConv, self).__init__() + # NN.BatchNorm2d + self.conv = nn.Sequential( + # nn.BatchNorm2d(num_features=inchannels), + nn.ReLU(inplace=False), + # nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1, stride=1, groups=inchannels,bias=True), + # nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, padding=0, stride=1, groups=1,bias=True) + nn.Conv2d(in_channels=inchannels, out_channels=inchannels / 2, kernel_size=3, padding=1, stride=1, + bias=False), + nn.BatchNorm2d(num_features=inchannels / 2), + nn.ReLU(inplace=False), + nn.Conv2d(in_channels=inchannels / 2, out_channels=inchannels, kernel_size=3, padding=1, stride=1, + bias=False) + ) + self.init_params() + + def forward(self, x): + x = self.conv(x) + x + return x + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class FeatureFusion(nn.Module): + def __init__(self, inchannels, outchannels): + super(FeatureFusion, self).__init__() + self.conv = ResidualConv(inchannels=inchannels) + # NN.BatchNorm2d + self.up = nn.Sequential(ResidualConv(inchannels=inchannels), + nn.ConvTranspose2d(in_channels=inchannels, out_channels=outchannels, kernel_size=3, + stride=2, padding=1, output_padding=1), + nn.BatchNorm2d(num_features=outchannels), + nn.ReLU(inplace=True)) + + def forward(self, lowfeat, highfeat): + return self.up(highfeat + self.conv(lowfeat)) + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.ConvTranspose2d): + # init.kaiming_normal_(m.weight, mode='fan_out') + init.normal_(m.weight, std=0.01) + # init.xavier_normal_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.01) + if m.bias is not None: + init.constant_(m.bias, 0) + + +class SenceUnderstand(nn.Module): + def __init__(self, channels): + super(SenceUnderstand, self).__init__() + self.channels = channels + self.conv1 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1), + nn.ReLU(inplace=True)) + self.pool = nn.AdaptiveAvgPool2d(8) + self.fc = nn.Sequential(nn.Linear(512 * 8 * 8, self.channels), + nn.ReLU(inplace=True)) + self.conv2 = nn.Sequential( + nn.Conv2d(in_channels=self.channels, out_channels=self.channels, kernel_size=1, padding=0), + nn.ReLU(inplace=True)) + self.initial_params() + + def forward(self, x): + n, c, h, w = x.size() + x = self.conv1(x) + x = self.pool(x) + x = x.view(n, -1) + x = self.fc(x) + x = x.view(n, self.channels, 1, 1) + x = self.conv2(x) + x = x.repeat(1, 1, h, w) + return x + + def initial_params(self, dev=0.01): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + # print torch.sum(m.weight) + m.weight.data.normal_(0, dev) + if m.bias is not None: + m.bias.data.fill_(0) + elif isinstance(m, nn.ConvTranspose2d): + # print torch.sum(m.weight) + m.weight.data.normal_(0, dev) + if m.bias is not None: + m.bias.data.fill_(0) + elif isinstance(m, nn.Linear): + m.weight.data.normal_(0, dev) + + +if __name__ == '__main__': + net = DepthNet(depth=50, pretrained=True) + print(net) + inputs = torch.ones(4,3,128,128) + out = net(inputs) + print(out.size()) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..38b1a24fd389a138b930dcf1ee606ef97a0186c8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/LICENSE @@ -0,0 +1,19 @@ +https://github.com/compphoto/BoostingMonocularDepth + +Copyright 2021, Seyed Mahdi Hosseini Miangoleh, Sebastian Dille, Computational Photography Laboratory. All rights reserved. + +This software is for academic use only. A redistribution of this +software, with or without modifications, has to be for academic +use only, while giving the appropriate credit to the original +authors of the software. The methods implemented as a part of +this software may be covered under patents or patent applications. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..301c966fca7a375c359b7ee7d455e23ee82ebb64 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/__init__.py @@ -0,0 +1,67 @@ +"""This package contains modules related to objective functions, optimizations, and network architectures. + +To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. +You need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate loss, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + +In the function <__init__>, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. + +Now you can use the model class by specifying flag '--model dummy'. +See our template model class 'template_model.py' for more details. +""" + +import importlib +from .base_model import BaseModel + + +def find_model_using_name(model_name): + """Import the module "models/[model_name]_model.py". + + In the file, the class called DatasetNameModel() will + be instantiated. It has to be a subclass of BaseModel, + and it is case-insensitive. + """ + model_filename = "controlnet_aux.leres.pix2pix.models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() \ + and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + """Return the static method of the model class.""" + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + """Create a model given the option. + + This function warps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from models import create_model + >>> model = create_model(opt) + """ + model = find_model_using_name(opt.model) + instance = model(opt) + print("model [%s] was created" % type(instance).__name__) + return instance diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..66ec298f77cf769e39da38d1107e0b6dc38d519d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model.py @@ -0,0 +1,244 @@ +import gc +import os +from abc import ABC, abstractmethod +from collections import OrderedDict + +import torch + +from ....util import torch_gc +from . import networks + + +class BaseModel(ABC): + """This class is an abstract base class (ABC) for models. + To create a subclass, you need to implement the following five functions: + -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate losses, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the BaseModel class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + + When creating your custom class, you need to implement your own initialization. + In this function, you should first call + Then, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + """ + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir + if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. + torch.backends.cudnn.benchmark = True + self.loss_names = [] + self.model_names = [] + self.visual_names = [] + self.optimizers = [] + self.image_paths = [] + self.metric = 0 # used for learning rate policy 'plateau' + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new model-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): includes the data itself and its metadata information. + """ + pass + + @abstractmethod + def forward(self): + """Run forward pass; called by both functions and .""" + pass + + @abstractmethod + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + pass + + def setup(self, opt): + """Load and print networks; create schedulers + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + if not self.isTrain or opt.continue_train: + load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch + self.load_networks(load_suffix) + self.print_networks(opt.verbose) + + def eval(self): + """Make models eval mode during test time""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + net.eval() + + def test(self): + """Forward function used in test time. + + This function wraps function in no_grad() so we don't save intermediate steps for backprop + It also calls to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + + def compute_visuals(self): + """Calculate additional output images for visdom and HTML visualization""" + pass + + def get_image_paths(self): + """ Return image paths that are used to load current data""" + return self.image_paths + + def update_learning_rate(self): + """Update learning rates for all the networks; called at the end of every epoch""" + old_lr = self.optimizers[0].param_groups[0]['lr'] + for scheduler in self.schedulers: + if self.opt.lr_policy == 'plateau': + scheduler.step(self.metric) + else: + scheduler.step() + + lr = self.optimizers[0].param_groups[0]['lr'] + print('learning rate %.7f -> %.7f' % (old_lr, lr)) + + def get_current_visuals(self): + """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name) + return visual_ret + + def get_current_losses(self): + """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number + return errors_ret + + def save_networks(self, epoch): + """Save all the networks to the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + save_filename = '%s_net_%s.pth' % (epoch, name) + save_path = os.path.join(self.save_dir, save_filename) + net = getattr(self, 'net' + name) + + if len(self.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.module.cpu().state_dict(), save_path) + net.cuda(self.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + def unload_network(self, name): + """Unload network and gc. + """ + if isinstance(name, str): + net = getattr(self, 'net' + name) + del net + gc.collect() + torch_gc() + return None + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + def load_networks(self, epoch): + """Load all the networks from the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + load_filename = '%s_net_%s.pth' % (epoch, name) + load_path = os.path.join(self.save_dir, load_filename) + net = getattr(self, 'net' + name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + # print('Loading depth boost model from %s' % load_path) + # if you are using PyTorch newer than 0.4 (e.g., built from + # GitHub source), you can remove str() on self.device + state_dict = torch.load(load_path, map_location=str(self.device)) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + # patch InstanceNorm checkpoints prior to 0.4 + for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop + self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) + net.load_state_dict(state_dict) + + def print_networks(self, verbose): + """Print the total number of parameters in the network and (if verbose) network architecture + + Parameters: + verbose (bool) -- if verbose: print the network architecture + """ + print('---------- Networks initialized -------------') + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + print(net) + print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + print('-----------------------------------------------') + + def set_requires_grad(self, nets, requires_grad=False): + """Set requies_grad=Fasle for all the networks to avoid unnecessary computations + Parameters: + nets (network list) -- a list of networks + requires_grad (bool) -- whether the networks require gradients or not + """ + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model_hg.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model_hg.py new file mode 100644 index 0000000000000000000000000000000000000000..1709accdf0b048b3793dfd1f58d1b06c35f7b907 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/base_model_hg.py @@ -0,0 +1,58 @@ +import os +import torch + +class BaseModelHG(): + def name(self): + return 'BaseModel' + + def initialize(self, opt): + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) + + def set_input(self, input): + self.input = input + + def forward(self): + pass + + # used in test time, no backprop + def test(self): + pass + + def get_image_paths(self): + pass + + def optimize_parameters(self): + pass + + def get_current_visuals(self): + return self.input + + def get_current_errors(self): + return {} + + def save(self, label): + pass + + # helper saving function that can be used by subclasses + def save_network(self, network, network_label, epoch_label, gpu_ids): + save_filename = '_%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(self.save_dir, save_filename) + torch.save(network.cpu().state_dict(), save_path) + if len(gpu_ids) and torch.cuda.is_available(): + network.cuda(device_id=gpu_ids[0]) + + # helper loading function that can be used by subclasses + def load_network(self, network, network_label, epoch_label): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(self.save_dir, save_filename) + print(save_path) + model = torch.load(save_path) + return model + # network.load_state_dict(torch.load(save_path)) + + def update_learning_rate(): + pass diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/networks.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..0cf912b2973721a02deefd042af621e732bad59f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/networks.py @@ -0,0 +1,623 @@ +import torch +import torch.nn as nn +from torch.nn import init +import functools +from torch.optim import lr_scheduler + + +############################################################################### +# Helper Functions +############################################################################### + + +class Identity(nn.Module): + def forward(self, x): + return x + + +def get_norm_layer(norm_type='instance'): + """Return a normalization layer + + Parameters: + norm_type (str) -- the name of the normalization layer: batch | instance | none + + For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). + For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. + """ + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + def norm_layer(x): return Identity() + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def get_scheduler(optimizer, opt): + """Return a learning rate scheduler + + Parameters: + optimizer -- the optimizer of the network + opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  + opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine + + For 'linear', we keep the same learning rate for the first epochs + and linearly decay the rate to zero over the next epochs. + For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. + See https://pytorch.org/docs/stable/optim.html for more details. + """ + if opt.lr_policy == 'linear': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', init_gain=0.02): + """Initialize network weights. + + Parameters: + net (network) -- network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + + We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might + work better for some applications. Feel free to try yourself. + """ + def init_func(m): # define the initialization function + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, init_gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=init_gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=init_gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. + init.normal_(m.weight.data, 1.0, init_gain) + init.constant_(m.bias.data, 0.0) + + # print('initialize network with %s' % init_type) + net.apply(init_func) # apply the initialization function + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): + """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights + Parameters: + net (network) -- the network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Return an initialized network. + """ + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs + init_weights(net, init_type, init_gain=init_gain) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]): + """Create a generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 + norm (str) -- the name of normalization layers used in the network: batch | instance | none + use_dropout (bool) -- if use dropout layers. + init_type (str) -- the name of our initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a generator + + Our current implementation provides two types of generators: + U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) + The original U-Net paper: https://arxiv.org/abs/1505.04597 + + Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) + Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. + We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). + + + The generator has been initialized by . It uses RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG == 'resnet_9blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) + elif netG == 'resnet_6blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) + elif netG == 'resnet_12blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=12) + elif netG == 'unet_128': + net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_256': + net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_672': + net = UnetGenerator(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_960': + net = UnetGenerator(input_nc, output_nc, 6, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_1024': + net = UnetGenerator(input_nc, output_nc, 10, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]): + """Create a discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the first conv layer + netD (str) -- the architecture's name: basic | n_layers | pixel + n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' + norm (str) -- the type of normalization layers used in the network. + init_type (str) -- the name of the initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a discriminator + + Our current implementation provides three types of discriminators: + [basic]: 'PatchGAN' classifier described in the original pix2pix paper. + It can classify whether 70×70 overlapping patches are real or fake. + Such a patch-level discriminator architecture has fewer parameters + than a full-image discriminator and can work on arbitrarily-sized images + in a fully convolutional fashion. + + [n_layers]: With this mode, you can specify the number of conv layers in the discriminator + with the parameter (default=3 as used in [basic] (PatchGAN).) + + [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. + It encourages greater color diversity but has no effect on spatial statistics. + + The discriminator has been initialized by . It uses Leakly RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': # default PatchGAN classifier + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer) + elif netD == 'n_layers': # more options + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) + elif netD == 'pixel': # classify if each pixel is real or fake + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) + return init_net(net, init_type, init_gain, gpu_ids) + + +############################################################################## +# Classes +############################################################################## +class GANLoss(nn.Module): + """Define different GAN objectives. + + The GANLoss class abstracts away the need to create the target label tensor + that has the same size as the input. + """ + + def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): + """ Initialize the GANLoss class. + + Parameters: + gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. + target_real_label (bool) - - label for a real image + target_fake_label (bool) - - label of a fake image + + Note: Do not use sigmoid as the last layer of Discriminator. + LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. + """ + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + self.gan_mode = gan_mode + if gan_mode == 'lsgan': + self.loss = nn.MSELoss() + elif gan_mode == 'vanilla': + self.loss = nn.BCEWithLogitsLoss() + elif gan_mode in ['wgangp']: + self.loss = None + else: + raise NotImplementedError('gan mode %s not implemented' % gan_mode) + + def get_target_tensor(self, prediction, target_is_real): + """Create label tensors with the same size as the input. + + Parameters: + prediction (tensor) - - tpyically the prediction from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + A label tensor filled with ground truth label, and with the size of the input + """ + + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(prediction) + + def __call__(self, prediction, target_is_real): + """Calculate loss given Discriminator's output and grount truth labels. + + Parameters: + prediction (tensor) - - tpyically the prediction output from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + the calculated loss. + """ + if self.gan_mode in ['lsgan', 'vanilla']: + target_tensor = self.get_target_tensor(prediction, target_is_real) + loss = self.loss(prediction, target_tensor) + elif self.gan_mode == 'wgangp': + if target_is_real: + loss = -prediction.mean() + else: + loss = prediction.mean() + return loss + + +def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): + """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 + + Arguments: + netD (network) -- discriminator network + real_data (tensor array) -- real images + fake_data (tensor array) -- generated images from the generator + device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') + type (str) -- if we mix real and fake data or not [real | fake | mixed]. + constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2 + lambda_gp (float) -- weight for this loss + + Returns the gradient penalty loss + """ + if lambda_gp > 0.0: + if type == 'real': # either use real images, fake images, or a linear interpolation of two. + interpolatesv = real_data + elif type == 'fake': + interpolatesv = fake_data + elif type == 'mixed': + alpha = torch.rand(real_data.shape[0], 1, device=device) + alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) + interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) + else: + raise NotImplementedError('{} not implemented'.format(type)) + interpolatesv.requires_grad_(True) + disc_interpolates = netD(interpolatesv) + gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, + grad_outputs=torch.ones(disc_interpolates.size()).to(device), + create_graph=True, retain_graph=True, only_inputs=True) + gradients = gradients[0].view(real_data.size(0), -1) # flat the data + gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps + return gradient_penalty, gradients + else: + return 0.0, None + + +class ResnetGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class ResnetBlock(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + """Forward function (with skip connections)""" + out = x + self.conv_block(x) # add skip connections + return out + + +class UnetGenerator(nn.Module): + """Create a Unet-based generator""" + + def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet generator + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, + image of size 128x128 will become of size 1x1 # at the bottleneck + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + + We construct the U-Net from the innermost layer to the outermost layer. + It is a recursive process. + """ + super(UnetGenerator, self).__init__() + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class UnetSkipConnectionBlock(nn.Module): + """Defines the Unet submodule with skip connection. + X -------------------identity---------------------- + |-- downsampling -- |submodule| -- upsampling --| + """ + + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet submodule with skip connections. + + Parameters: + outer_nc (int) -- the number of filters in the outer conv layer + inner_nc (int) -- the number of filters in the inner conv layer + input_nc (int) -- the number of channels in input images/features + submodule (UnetSkipConnectionBlock) -- previously defined submodules + outermost (bool) -- if this module is the outermost module + innermost (bool) -- if this module is the innermost module + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + """ + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) + + +class NLayerDiscriminator(nn.Module): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): + """Construct a PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + n_layers (int) -- the number of conv layers in the discriminator + norm_layer -- normalization layer + """ + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = 1 + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2 ** n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map + self.model = nn.Sequential(*sequence) + + def forward(self, input): + """Standard forward.""" + return self.model(input) + + +class PixelDiscriminator(nn.Module): + """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" + + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): + """Construct a 1x1 PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + """ + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + """Standard forward.""" + return self.net(input) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/pix2pix4depth_model.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/pix2pix4depth_model.py new file mode 100644 index 0000000000000000000000000000000000000000..89e89652feb96314973a050c5a2477b474630abb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/models/pix2pix4depth_model.py @@ -0,0 +1,155 @@ +import torch +from .base_model import BaseModel +from . import networks + + +class Pix2Pix4DepthModel(BaseModel): + """ This class implements the pix2pix model, for learning a mapping from input images to output images given paired data. + + The model training requires '--dataset_mode aligned' dataset. + By default, it uses a '--netG unet256' U-Net generator, + a '--netD basic' discriminator (PatchGAN), + and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper). + + pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + + For pix2pix, we do not use image buffer + The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1 + By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets. + """ + # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/) + parser.set_defaults(input_nc=2,output_nc=1,norm='none', netG='unet_1024', dataset_mode='depthmerge') + if is_train: + parser.set_defaults(pool_size=0, gan_mode='vanilla',) + parser.add_argument('--lambda_L1', type=float, default=1000, help='weight for L1 loss') + return parser + + def __init__(self, opt): + """Initialize the pix2pix class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseModel.__init__(self, opt) + # specify the training losses you want to print out. The training/test scripts will call + + self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake'] + # self.loss_names = ['G_L1'] + + # specify the images you want to save/display. The training/test scripts will call + if self.isTrain: + self.visual_names = ['outer','inner', 'fake_B', 'real_B'] + else: + self.visual_names = ['fake_B'] + + # specify the models you want to save to the disk. The training/test scripts will call and + if self.isTrain: + self.model_names = ['G','D'] + else: # during test time, only load G + self.model_names = ['G'] + + # define networks (both generator and discriminator) + self.netG = networks.define_G(opt.input_nc, opt.output_nc, 64, 'unet_1024', 'none', + False, 'normal', 0.02, self.gpu_ids) + + if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc + self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) + + if self.isTrain: + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) + self.criterionL1 = torch.nn.L1Loss() + # initialize optimizers; schedulers will be automatically created by function . + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=1e-4, betas=(opt.beta1, 0.999)) + self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=2e-06, betas=(opt.beta1, 0.999)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def set_input_train(self, input): + self.outer = input['data_outer'].to(self.device) + self.outer = torch.nn.functional.interpolate(self.outer,(1024,1024),mode='bilinear',align_corners=False) + + self.inner = input['data_inner'].to(self.device) + self.inner = torch.nn.functional.interpolate(self.inner,(1024,1024),mode='bilinear',align_corners=False) + + self.image_paths = input['image_path'] + + if self.isTrain: + self.gtfake = input['data_gtfake'].to(self.device) + self.gtfake = torch.nn.functional.interpolate(self.gtfake, (1024, 1024), mode='bilinear', align_corners=False) + self.real_B = self.gtfake + + self.real_A = torch.cat((self.outer, self.inner), 1) + + def set_input(self, outer, inner): + inner = torch.from_numpy(inner).unsqueeze(0).unsqueeze(0) + outer = torch.from_numpy(outer).unsqueeze(0).unsqueeze(0) + + inner = (inner - torch.min(inner))/(torch.max(inner)-torch.min(inner)) + outer = (outer - torch.min(outer))/(torch.max(outer)-torch.min(outer)) + + inner = self.normalize(inner) + outer = self.normalize(outer) + + self.real_A = torch.cat((outer, inner), 1).to(self.device) + + + def normalize(self, input): + input = input * 2 + input = input - 1 + return input + + def forward(self): + """Run forward pass; called by both functions and .""" + self.fake_B = self.netG(self.real_A) # G(A) + + def backward_D(self): + """Calculate GAN loss for the discriminator""" + # Fake; stop backprop to the generator by detaching fake_B + fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator + pred_fake = self.netD(fake_AB.detach()) + self.loss_D_fake = self.criterionGAN(pred_fake, False) + # Real + real_AB = torch.cat((self.real_A, self.real_B), 1) + pred_real = self.netD(real_AB) + self.loss_D_real = self.criterionGAN(pred_real, True) + # combine loss and calculate gradients + self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 + self.loss_D.backward() + + def backward_G(self): + """Calculate GAN and L1 loss for the generator""" + # First, G(A) should fake the discriminator + fake_AB = torch.cat((self.real_A, self.fake_B), 1) + pred_fake = self.netD(fake_AB) + self.loss_G_GAN = self.criterionGAN(pred_fake, True) + # Second, G(A) = B + self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 + # combine loss and calculate gradients + self.loss_G = self.loss_G_L1 + self.loss_G_GAN + self.loss_G.backward() + + def optimize_parameters(self): + self.forward() # compute fake images: G(A) + # update D + self.set_requires_grad(self.netD, True) # enable backprop for D + self.optimizer_D.zero_grad() # set D's gradients to zero + self.backward_D() # calculate gradients for D + self.optimizer_D.step() # update D's weights + # update G + self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G + self.optimizer_G.zero_grad() # set G's gradients to zero + self.backward_G() # calculate graidents for G + self.optimizer_G.step() # udpate G's weights \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7eedebe54aa70169fd25951b3034d819e396c90 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/__init__.py @@ -0,0 +1 @@ +"""This package options includes option modules: training options, test options, and basic options (used in both training and test).""" diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/base_options.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/base_options.py new file mode 100644 index 0000000000000000000000000000000000000000..533a1e88a7e8494223f6994e6861c93667754f83 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/base_options.py @@ -0,0 +1,156 @@ +import argparse +import os +from ...pix2pix.util import util +# import torch +from ...pix2pix import models +# import pix2pix.data +import numpy as np + +class BaseOptions(): + """This class defines options used during both training and test time. + + It also implements several helper functions such as parsing, printing, and saving the options. + It also gathers additional options defined in functions in both dataset class and model class. + """ + + def __init__(self): + """Reset the class; indicates the class hasn't been initailized""" + self.initialized = False + + def initialize(self, parser): + """Define the common options that are used in both training and test.""" + # basic parameters + parser.add_argument('--dataroot', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') + parser.add_argument('--name', type=str, default='void', help='mahdi_unet_new, scaled_unet') + parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--checkpoints_dir', type=str, default='./pix2pix/checkpoints', help='models are saved here') + # model parameters + parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]') + parser.add_argument('--input_nc', type=int, default=2, help='# of input image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') + parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') + parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]') + parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') + parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') + parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') + # dataset parameters + parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') + parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') + parser.add_argument('--batch_size', type=int, default=1, help='input batch size') + parser.add_argument('--load_size', type=int, default=672, help='scale images to this size') + parser.add_argument('--crop_size', type=int, default=672, help='then crop to this size') + parser.add_argument('--max_dataset_size', type=int, default=10000, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') + parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') + parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') + # additional parameters + parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]') + parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') + parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') + + parser.add_argument('--data_dir', type=str, required=False, + help='input files directory images can be .png .jpg .tiff') + parser.add_argument('--output_dir', type=str, required=False, + help='result dir. result depth will be png. vides are JMPG as avi') + parser.add_argument('--savecrops', type=int, required=False) + parser.add_argument('--savewholeest', type=int, required=False) + parser.add_argument('--output_resolution', type=int, required=False, + help='0 for no restriction 1 for resize to input size') + parser.add_argument('--net_receptive_field_size', type=int, required=False) + parser.add_argument('--pix2pixsize', type=int, required=False) + parser.add_argument('--generatevideo', type=int, required=False) + parser.add_argument('--depthNet', type=int, required=False, help='0: midas 1:strurturedRL') + parser.add_argument('--R0', action='store_true') + parser.add_argument('--R20', action='store_true') + parser.add_argument('--Final', action='store_true') + parser.add_argument('--colorize_results', action='store_true') + parser.add_argument('--max_res', type=float, default=np.inf) + + self.initialized = True + return parser + + def gather_options(self): + """Initialize our parser with basic options(only once). + Add additional model-specific and dataset-specific options. + These options are defined in the function + in model and dataset classes. + """ + if not self.initialized: # check if it has been initialized + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + opt, _ = parser.parse_known_args() + + # modify model-related parser options + model_name = opt.model + model_option_setter = models.get_option_setter(model_name) + parser = model_option_setter(parser, self.isTrain) + opt, _ = parser.parse_known_args() # parse again with new defaults + + # modify dataset-related parser options + # dataset_name = opt.dataset_mode + # dataset_option_setter = pix2pix.data.get_option_setter(dataset_name) + # parser = dataset_option_setter(parser, self.isTrain) + + # save and return the parser + self.parser = parser + #return parser.parse_args() #EVIL + return opt + + def print_options(self, opt): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + + def parse(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + #self.print_options(opt) + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + #if len(opt.gpu_ids) > 0: + # torch.cuda.set_device(opt.gpu_ids[0]) + + self.opt = opt + return self.opt diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/test_options.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/test_options.py new file mode 100644 index 0000000000000000000000000000000000000000..a3424b5e3b66d6813f74c8cecad691d7488d121c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/options/test_options.py @@ -0,0 +1,22 @@ +from .base_options import BaseOptions + + +class TestOptions(BaseOptions): + """This class includes test options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) # define shared options + parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') + parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + # Dropout and Batchnorm has different behavioir during training and test. + parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') + parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') + # rewrite devalue values + parser.set_defaults(model='pix2pix4depth') + # To avoid cropping, the load_size should be the same as crop_size + parser.set_defaults(load_size=parser.get_default('crop_size')) + self.isTrain = False + return parser diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/util/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae36f63d8859ec0c60dcbfe67c4ac324e751ddf7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/util/__init__.py @@ -0,0 +1 @@ +"""This package includes a miscellaneous collection of useful helper functions.""" diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/util/util.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/util/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8a7aceaa00681cb76675df7866bf8db58c8d2caf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/leres/pix2pix/util/util.py @@ -0,0 +1,105 @@ +"""This module contains simple helper functions """ +from __future__ import print_function +import torch +import numpy as np +from PIL import Image +import os + + +def tensor2im(input_image, imtype=np.uint16): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + image_numpy = torch.squeeze(image_tensor).cpu().numpy() # convert it into a numpy array + image_numpy = (image_numpy + 1) / 2.0 * (2**16-1) # + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + """Calculate and print the mean of average absolute(gradients) + + Parameters: + net (torch network) -- Torch network + name (str) -- the name of the network + """ + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path, aspect_ratio=1.0): + """Save a numpy image to the disk + + Parameters: + image_numpy (numpy array) -- input numpy array + image_path (str) -- the path of the image + """ + image_pil = Image.fromarray(image_numpy) + + image_pil = image_pil.convert('I;16') + + # image_pil = Image.fromarray(image_numpy) + # h, w, _ = image_numpy.shape + # + # if aspect_ratio > 1.0: + # image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) + # if aspect_ratio < 1.0: + # image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) + + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + """Print the mean, min, max, median, std, and size of a numpy array + + Parameters: + val (bool) -- if print the values of the numpy array + shp (bool) -- if print the shape of the numpy array + """ + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + """create empty directories if they don't exist + + Parameters: + paths (str list) -- a list of directory paths + """ + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + """create a single empty directory if it didn't exist + + Parameters: + path (str) -- a single directory path + """ + if not os.path.exists(path): + os.makedirs(path) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..16a9d56a3d4c15e4f34ac5426459c58487b01520 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Caroline Chan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..863c8372f7a3cccf189fcf468e24a25b2c6024e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart/__init__.py @@ -0,0 +1,142 @@ +import os +import warnings + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from einops import rearrange +from PIL import Image + +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, annotator_ckpts_path, custom_hf_download + +norm_layer = nn.InstanceNorm2d + + +class ResidualBlock(nn.Module): + def __init__(self, in_features): + super(ResidualBlock, self).__init__() + + conv_block = [ nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + norm_layer(in_features), + nn.ReLU(inplace=True), + nn.ReflectionPad2d(1), + nn.Conv2d(in_features, in_features, 3), + norm_layer(in_features) + ] + + self.conv_block = nn.Sequential(*conv_block) + + def forward(self, x): + return x + self.conv_block(x) + + +class Generator(nn.Module): + def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True): + super(Generator, self).__init__() + + # Initial convolution block + model0 = [ nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, 64, 7), + norm_layer(64), + nn.ReLU(inplace=True) ] + self.model0 = nn.Sequential(*model0) + + # Downsampling + model1 = [] + in_features = 64 + out_features = in_features*2 + for _ in range(2): + model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), + norm_layer(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features*2 + self.model1 = nn.Sequential(*model1) + + model2 = [] + # Residual blocks + for _ in range(n_residual_blocks): + model2 += [ResidualBlock(in_features)] + self.model2 = nn.Sequential(*model2) + + # Upsampling + model3 = [] + out_features = in_features//2 + for _ in range(2): + model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), + norm_layer(out_features), + nn.ReLU(inplace=True) ] + in_features = out_features + out_features = in_features//2 + self.model3 = nn.Sequential(*model3) + + # Output layer + model4 = [ nn.ReflectionPad2d(3), + nn.Conv2d(64, output_nc, 7)] + if sigmoid: + model4 += [nn.Sigmoid()] + + self.model4 = nn.Sequential(*model4) + + def forward(self, x, cond=None): + out = self.model0(x) + out = self.model1(out) + out = self.model2(out) + out = self.model3(out) + out = self.model4(out) + + return out + + +class LineartDetector: + def __init__(self, model, coarse_model): + self.model = model + self.model_coarse = coarse_model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, coarse_filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "sk_model.pth" + coarse_filename = coarse_filename or "sk_model2.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + coarse_model_path = custom_hf_download(pretrained_model_or_path, coarse_filename, cache_dir=cache_dir) + + model = Generator(3, 1, 3) + model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) + model.eval() + + coarse_model = Generator(3, 1, 3) + coarse_model.load_state_dict(torch.load(coarse_model_path, map_location=torch.device('cpu'))) + coarse_model.eval() + + return cls(model, coarse_model) + + def to(self, device): + self.model.to(device) + self.model_coarse.to(device) + return self + + def __call__(self, input_image, coarse=False, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + device = next(iter(self.model.parameters())).device + model = self.model_coarse if coarse else self.model + assert detected_map.ndim == 3 + with torch.no_grad(): + image = torch.from_numpy(detected_map).float().to(device) + image = image / 255.0 + image = rearrange(image, 'h w c -> 1 c h w') + line = model(image)[0][0] + + line = line.cpu().numpy() + line = (line * 255.0).clip(0, 255).astype(np.uint8) + + detected_map = HWC3(line) + detected_map = remove_pad(255 - detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart_anime/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart_anime/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..16a9d56a3d4c15e4f34ac5426459c58487b01520 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart_anime/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Caroline Chan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart_anime/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart_anime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6df7bfadb28e49d97445fd6a24d40e66dc501ba --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/lineart_anime/__init__.py @@ -0,0 +1,167 @@ +import functools +import os +import warnings + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from einops import rearrange +from huggingface_hub import hf_hub_download +from PIL import Image + +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, annotator_ckpts_path, custom_hf_download + + +class UnetGenerator(nn.Module): + """Create a Unet-based generator""" + + def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet generator + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, + image of size 128x128 will become of size 1x1 # at the bottleneck + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + We construct the U-Net from the innermost layer to the outermost layer. + It is a recursive process. + """ + super(UnetGenerator, self).__init__() + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + for _ in range(num_downs - 5): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class UnetSkipConnectionBlock(nn.Module): + """Defines the Unet submodule with skip connection. + X -------------------identity---------------------- + |-- downsampling -- |submodule| -- upsampling --| + """ + + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet submodule with skip connections. + Parameters: + outer_nc (int) -- the number of filters in the outer conv layer + inner_nc (int) -- the number of filters in the inner conv layer + input_nc (int) -- the number of channels in input images/features + submodule (UnetSkipConnectionBlock) -- previously defined submodules + outermost (bool) -- if this module is the outermost module + innermost (bool) -- if this module is the innermost module + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + """ + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) + + +class LineartAnimeDetector: + def __init__(self, model): + self.model = model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "netG.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + net = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False) + ckpt = torch.load(model_path) + for key in list(ckpt.keys()): + if 'module.' in key: + ckpt[key.replace('module.', '')] = ckpt[key] + del ckpt[key] + net.load_state_dict(ckpt) + net.eval() + + return cls(net) + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, input_image, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + H, W, C = input_image.shape + Hn = 256 * int(np.ceil(float(H) / 256.0)) + Wn = 256 * int(np.ceil(float(W) / 256.0)) + input_image = cv2.resize(input_image, (Wn, Hn), interpolation=cv2.INTER_CUBIC) + + device = next(iter(self.model.parameters())).device + with torch.no_grad(): + image_feed = torch.from_numpy(input_image).float().to(device) + image_feed = image_feed / 127.5 - 1.0 + image_feed = rearrange(image_feed, 'h w c -> 1 c h w') + + line = self.model(image_feed)[0, 0] * 127.5 + 127.5 + line = line.cpu().numpy() + line = line.clip(0, 255).astype(np.uint8) + + #A1111 uses INTER AREA for downscaling so ig that is the best choice + detected_map = cv2.resize(HWC3(line), (W, H), interpolation=cv2.INTER_AREA) + detected_map = remove_pad(255 - detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..bdca75a54d05781782e3d939401e93161cdd88f7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Miaomiao Li + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51331e2c32bf8d84322cc53e06e48bd81fa9d08c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/__init__.py @@ -0,0 +1,63 @@ +# MangaLineExtraction_PyTorch +# https://github.com/ljsabc/MangaLineExtraction_PyTorch + +#NOTE: This preprocessor is designed to work with lineart_anime ControlNet so the result will be white lines on black canvas + +import torch +import numpy as np +import os +import cv2 +from einops import rearrange +from .model_torch import res_skip +from PIL import Image +import warnings + +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, annotator_ckpts_path, custom_hf_download + +class LineartMangaDetector: + def __init__(self, model): + self.model = model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path=None, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "erika.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + net = res_skip() + ckpt = torch.load(model_path) + for key in list(ckpt.keys()): + if 'module.' in key: + ckpt[key.replace('module.', '')] = ckpt[key] + del ckpt[key] + net.load_state_dict(ckpt) + net.eval() + return cls(net) + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, input_image, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, 256 * int(np.ceil(float(detect_resolution) / 256.0)), upscale_method) + device = next(iter(self.model.parameters())).device + + img = cv2.cvtColor(detected_map, cv2.COLOR_RGB2GRAY) + with torch.no_grad(): + image_feed = torch.from_numpy(img).float().to(device) + image_feed = rearrange(image_feed, 'h w -> 1 1 h w') + + line = self.model(image_feed) + line = line.cpu().numpy()[0,0,:,:] + line[line > 255] = 255 + line[line < 0] = 0 + + line = line.astype(np.uint8) + + detected_map = HWC3(line) + detected_map = remove_pad(255 - detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/model_torch.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/model_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..de5828ccc486d74490b8da710d644651067bd5f3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/manga_line/model_torch.py @@ -0,0 +1,196 @@ +import torch.nn as nn +import numpy as np + +#torch.set_printoptions(precision=10) + + +class _bn_relu_conv(nn.Module): + def __init__(self, in_filters, nb_filters, fw, fh, subsample=1): + super(_bn_relu_conv, self).__init__() + self.model = nn.Sequential( + nn.BatchNorm2d(in_filters, eps=1e-3), + nn.LeakyReLU(0.2), + nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw//2, fh//2), padding_mode='zeros') + ) + + def forward(self, x): + return self.model(x) + + # the following are for debugs + print("****", np.max(x.cpu().numpy()), np.min(x.cpu().numpy()), np.mean(x.cpu().numpy()), np.std(x.cpu().numpy()), x.shape) + for i,layer in enumerate(self.model): + if i != 2: + x = layer(x) + else: + x = layer(x) + #x = nn.functional.pad(x, (1, 1, 1, 1), mode='constant', value=0) + print("____", np.max(x.cpu().numpy()), np.min(x.cpu().numpy()), np.mean(x.cpu().numpy()), np.std(x.cpu().numpy()), x.shape) + print(x[0]) + return x + + +class _u_bn_relu_conv(nn.Module): + def __init__(self, in_filters, nb_filters, fw, fh, subsample=1): + super(_u_bn_relu_conv, self).__init__() + self.model = nn.Sequential( + nn.BatchNorm2d(in_filters, eps=1e-3), + nn.LeakyReLU(0.2), + nn.Conv2d(in_filters, nb_filters, (fw, fh), stride=subsample, padding=(fw//2, fh//2)), + nn.Upsample(scale_factor=2, mode='nearest') + ) + + def forward(self, x): + return self.model(x) + + + +class _shortcut(nn.Module): + def __init__(self, in_filters, nb_filters, subsample=1): + super(_shortcut, self).__init__() + self.process = False + self.model = None + if in_filters != nb_filters or subsample != 1: + self.process = True + self.model = nn.Sequential( + nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample) + ) + + def forward(self, x, y): + #print(x.size(), y.size(), self.process) + if self.process: + y0 = self.model(x) + #print("merge+", torch.max(y0+y), torch.min(y0+y),torch.mean(y0+y), torch.std(y0+y), y0.shape) + return y0 + y + else: + #print("merge", torch.max(x+y), torch.min(x+y),torch.mean(x+y), torch.std(x+y), y.shape) + return x + y + +class _u_shortcut(nn.Module): + def __init__(self, in_filters, nb_filters, subsample): + super(_u_shortcut, self).__init__() + self.process = False + self.model = None + if in_filters != nb_filters: + self.process = True + self.model = nn.Sequential( + nn.Conv2d(in_filters, nb_filters, (1, 1), stride=subsample, padding_mode='zeros'), + nn.Upsample(scale_factor=2, mode='nearest') + ) + + def forward(self, x, y): + if self.process: + return self.model(x) + y + else: + return x + y + + +class basic_block(nn.Module): + def __init__(self, in_filters, nb_filters, init_subsample=1): + super(basic_block, self).__init__() + self.conv1 = _bn_relu_conv(in_filters, nb_filters, 3, 3, subsample=init_subsample) + self.residual = _bn_relu_conv(nb_filters, nb_filters, 3, 3) + self.shortcut = _shortcut(in_filters, nb_filters, subsample=init_subsample) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.residual(x1) + return self.shortcut(x, x2) + +class _u_basic_block(nn.Module): + def __init__(self, in_filters, nb_filters, init_subsample=1): + super(_u_basic_block, self).__init__() + self.conv1 = _u_bn_relu_conv(in_filters, nb_filters, 3, 3, subsample=init_subsample) + self.residual = _bn_relu_conv(nb_filters, nb_filters, 3, 3) + self.shortcut = _u_shortcut(in_filters, nb_filters, subsample=init_subsample) + + def forward(self, x): + y = self.residual(self.conv1(x)) + return self.shortcut(x, y) + + +class _residual_block(nn.Module): + def __init__(self, in_filters, nb_filters, repetitions, is_first_layer=False): + super(_residual_block, self).__init__() + layers = [] + for i in range(repetitions): + init_subsample = 1 + if i == repetitions - 1 and not is_first_layer: + init_subsample = 2 + if i == 0: + l = basic_block(in_filters=in_filters, nb_filters=nb_filters, init_subsample=init_subsample) + else: + l = basic_block(in_filters=nb_filters, nb_filters=nb_filters, init_subsample=init_subsample) + layers.append(l) + + self.model = nn.Sequential(*layers) + + def forward(self, x): + return self.model(x) + + +class _upsampling_residual_block(nn.Module): + def __init__(self, in_filters, nb_filters, repetitions): + super(_upsampling_residual_block, self).__init__() + layers = [] + for i in range(repetitions): + l = None + if i == 0: + l = _u_basic_block(in_filters=in_filters, nb_filters=nb_filters)#(input) + else: + l = basic_block(in_filters=nb_filters, nb_filters=nb_filters)#(input) + layers.append(l) + + self.model = nn.Sequential(*layers) + + def forward(self, x): + return self.model(x) + + +class res_skip(nn.Module): + + def __init__(self): + super(res_skip, self).__init__() + self.block0 = _residual_block(in_filters=1, nb_filters=24, repetitions=2, is_first_layer=True)#(input) + self.block1 = _residual_block(in_filters=24, nb_filters=48, repetitions=3)#(block0) + self.block2 = _residual_block(in_filters=48, nb_filters=96, repetitions=5)#(block1) + self.block3 = _residual_block(in_filters=96, nb_filters=192, repetitions=7)#(block2) + self.block4 = _residual_block(in_filters=192, nb_filters=384, repetitions=12)#(block3) + + self.block5 = _upsampling_residual_block(in_filters=384, nb_filters=192, repetitions=7)#(block4) + self.res1 = _shortcut(in_filters=192, nb_filters=192)#(block3, block5, subsample=(1,1)) + + self.block6 = _upsampling_residual_block(in_filters=192, nb_filters=96, repetitions=5)#(res1) + self.res2 = _shortcut(in_filters=96, nb_filters=96)#(block2, block6, subsample=(1,1)) + + self.block7 = _upsampling_residual_block(in_filters=96, nb_filters=48, repetitions=3)#(res2) + self.res3 = _shortcut(in_filters=48, nb_filters=48)#(block1, block7, subsample=(1,1)) + + self.block8 = _upsampling_residual_block(in_filters=48, nb_filters=24, repetitions=2)#(res3) + self.res4 = _shortcut(in_filters=24, nb_filters=24)#(block0,block8, subsample=(1,1)) + + self.block9 = _residual_block(in_filters=24, nb_filters=16, repetitions=2, is_first_layer=True)#(res4) + self.conv15 = _bn_relu_conv(in_filters=16, nb_filters=1, fh=1, fw=1, subsample=1)#(block7) + + def forward(self, x): + x0 = self.block0(x) + x1 = self.block1(x0) + x2 = self.block2(x1) + x3 = self.block3(x2) + x4 = self.block4(x3) + + x5 = self.block5(x4) + res1 = self.res1(x3, x5) + + x6 = self.block6(res1) + res2 = self.res2(x2, x6) + + x7 = self.block7(res2) + res3 = self.res3(x1, x7) + + x8 = self.block8(res3) + res4 = self.res4(x0, x8) + + x9 = self.block9(res4) + y = self.conv15(x9) + + return y \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mediapipe_face/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mediapipe_face/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f911daa9e729fb8541b701a3afc3f7b300a2989e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mediapipe_face/__init__.py @@ -0,0 +1,31 @@ +import warnings +from typing import Union + +import cv2 +import numpy as np +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad +from .mediapipe_face_common import generate_annotation + + +class MediapipeFaceDetector: + def __call__(self, + input_image: Union[np.ndarray, Image.Image] = None, + max_faces: int = 1, + min_confidence: float = 0.5, + output_type: str = "pil", + detect_resolution: int = 512, + image_resolution: int = 512, + upscale_method="INTER_CUBIC", + **kwargs): + + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + detected_map = generate_annotation(detected_map, max_faces, min_confidence) + detected_map = remove_pad(HWC3(detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mediapipe_face/mediapipe_face_common.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mediapipe_face/mediapipe_face_common.py new file mode 100644 index 0000000000000000000000000000000000000000..32eeaf7455df2dd9efa5976def5e617b08757598 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mediapipe_face/mediapipe_face_common.py @@ -0,0 +1,156 @@ +from typing import Mapping +import warnings + +import mediapipe as mp +import numpy + +if mp: + mp_drawing = mp.solutions.drawing_utils + mp_drawing_styles = mp.solutions.drawing_styles + mp_face_detection = mp.solutions.face_detection # Only for counting faces. + mp_face_mesh = mp.solutions.face_mesh + mp_face_connections = mp.solutions.face_mesh_connections.FACEMESH_TESSELATION + mp_hand_connections = mp.solutions.hands_connections.HAND_CONNECTIONS + mp_body_connections = mp.solutions.pose_connections.POSE_CONNECTIONS + + DrawingSpec = mp.solutions.drawing_styles.DrawingSpec + PoseLandmark = mp.solutions.drawing_styles.PoseLandmark + + min_face_size_pixels: int = 64 + f_thick = 2 + f_rad = 1 + right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad) + right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad) + right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad) + left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad) + left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad) + left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad) + mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad) + head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad) + + # mp_face_mesh.FACEMESH_CONTOURS has all the items we care about. + face_connection_spec = {} + for edge in mp_face_mesh.FACEMESH_FACE_OVAL: + face_connection_spec[edge] = head_draw + for edge in mp_face_mesh.FACEMESH_LEFT_EYE: + face_connection_spec[edge] = left_eye_draw + for edge in mp_face_mesh.FACEMESH_LEFT_EYEBROW: + face_connection_spec[edge] = left_eyebrow_draw + # for edge in mp_face_mesh.FACEMESH_LEFT_IRIS: + # face_connection_spec[edge] = left_iris_draw + for edge in mp_face_mesh.FACEMESH_RIGHT_EYE: + face_connection_spec[edge] = right_eye_draw + for edge in mp_face_mesh.FACEMESH_RIGHT_EYEBROW: + face_connection_spec[edge] = right_eyebrow_draw + # for edge in mp_face_mesh.FACEMESH_RIGHT_IRIS: + # face_connection_spec[edge] = right_iris_draw + for edge in mp_face_mesh.FACEMESH_LIPS: + face_connection_spec[edge] = mouth_draw + iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw} + + +def draw_pupils(image, landmark_list, drawing_spec, halfwidth: int = 2): + """We have a custom function to draw the pupils because the mp.draw_landmarks method requires a parameter for all + landmarks. Until our PR is merged into mediapipe, we need this separate method.""" + if len(image.shape) != 3: + raise ValueError("Input image must be H,W,C.") + image_rows, image_cols, image_channels = image.shape + if image_channels != 3: # BGR channels + raise ValueError('Input image must contain three channel bgr data.') + for idx, landmark in enumerate(landmark_list.landmark): + if ( + (landmark.HasField('visibility') and landmark.visibility < 0.9) or + (landmark.HasField('presence') and landmark.presence < 0.5) + ): + continue + if landmark.x >= 1.0 or landmark.x < 0 or landmark.y >= 1.0 or landmark.y < 0: + continue + image_x = int(image_cols*landmark.x) + image_y = int(image_rows*landmark.y) + draw_color = None + if isinstance(drawing_spec, Mapping): + if drawing_spec.get(idx) is None: + continue + else: + draw_color = drawing_spec[idx].color + elif isinstance(drawing_spec, DrawingSpec): + draw_color = drawing_spec.color + image[image_y-halfwidth:image_y+halfwidth, image_x-halfwidth:image_x+halfwidth, :] = draw_color + + +def reverse_channels(image): + """Given a numpy array in RGB form, convert to BGR. Will also convert from BGR to RGB.""" + # im[:,:,::-1] is a neat hack to convert BGR to RGB by reversing the indexing order. + # im[:,:,::[2,1,0]] would also work but makes a copy of the data. + return image[:, :, ::-1] + + +def generate_annotation( + img_rgb, + max_faces: int, + min_confidence: float +): + """ + Find up to 'max_faces' inside the provided input image. + If min_face_size_pixels is provided and nonzero it will be used to filter faces that occupy less than this many + pixels in the image. + """ + with mp_face_mesh.FaceMesh( + static_image_mode=True, + max_num_faces=max_faces, + refine_landmarks=True, + min_detection_confidence=min_confidence, + ) as facemesh: + img_height, img_width, img_channels = img_rgb.shape + assert(img_channels == 3) + + results = facemesh.process(img_rgb).multi_face_landmarks + + if results is None: + print("No faces detected in controlnet image for Mediapipe face annotator.") + return numpy.zeros_like(img_rgb) + + # Filter faces that are too small + filtered_landmarks = [] + for lm in results: + landmarks = lm.landmark + face_rect = [ + landmarks[0].x, + landmarks[0].y, + landmarks[0].x, + landmarks[0].y, + ] # Left, up, right, down. + for i in range(len(landmarks)): + face_rect[0] = min(face_rect[0], landmarks[i].x) + face_rect[1] = min(face_rect[1], landmarks[i].y) + face_rect[2] = max(face_rect[2], landmarks[i].x) + face_rect[3] = max(face_rect[3], landmarks[i].y) + if min_face_size_pixels > 0: + face_width = abs(face_rect[2] - face_rect[0]) + face_height = abs(face_rect[3] - face_rect[1]) + face_width_pixels = face_width * img_width + face_height_pixels = face_height * img_height + face_size = min(face_width_pixels, face_height_pixels) + if face_size >= min_face_size_pixels: + filtered_landmarks.append(lm) + else: + filtered_landmarks.append(lm) + + # Annotations are drawn in BGR for some reason, but we don't need to flip a zero-filled image at the start. + empty = numpy.zeros_like(img_rgb) + + # Draw detected faces: + for face_landmarks in filtered_landmarks: + mp_drawing.draw_landmarks( + empty, + face_landmarks, + connections=face_connection_spec.keys(), + landmark_drawing_spec=None, + connection_drawing_spec=face_connection_spec + ) + draw_pupils(empty, face_landmarks, iris_landmark_spec, 2) + + # Flip BGR back to RGB. + empty = reverse_channels(empty).copy() + + return empty \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..277b5c11be103f028a8d10985139f1da10c2f08e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..51600f19253080787487f40e19e8b7d5379a781f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/__init__.py @@ -0,0 +1,76 @@ +import os + +import cv2 +import numpy as np +import torch +from einops import rearrange +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from .api import MiDaSInference + + +class MidasDetector: + def __init__(self, model): + self.model = model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, model_type="dpt_hybrid", filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "dpt_hybrid-midas-501f0c75.pt" + subfolder = "annotator/ckpts" if pretrained_model_or_path == "lllyasviel/ControlNet" else '' + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir, subfolder=subfolder) + model = MiDaSInference(model_type=model_type, model_path=model_path) + return cls(model) + + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1, depth_and_normal=False, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + device = next(iter(self.model.parameters())).device + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + image_depth = detected_map + with torch.no_grad(): + image_depth = torch.from_numpy(image_depth).float() + image_depth = image_depth.to(device) + image_depth = image_depth / 127.5 - 1.0 + image_depth = rearrange(image_depth, 'h w c -> 1 c h w') + depth = self.model(image_depth)[0] + + depth_pt = depth.clone() + depth_pt -= torch.min(depth_pt) + depth_pt /= torch.max(depth_pt) + depth_pt = depth_pt.cpu().numpy() + depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8) + + if depth_and_normal: + depth_np = depth.cpu().numpy() + x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3) + y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3) + z = np.ones_like(x) * a + x[depth_pt < bg_th] = 0 + y[depth_pt < bg_th] = 0 + normal = np.stack([x, y, z], axis=2) + normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5 + normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)[:, :, ::-1] + + depth_image = HWC3(depth_image) + if depth_and_normal: + normal_image = HWC3(normal_image) + + + depth_image = remove_pad(depth_image) + if depth_and_normal: + normal_image = remove_pad(normal_image) + + if output_type == "pil": + depth_image = Image.fromarray(depth_image) + if depth_and_normal: + normal_image = Image.fromarray(normal_image) + + if depth_and_normal: + return depth_image, normal_image + else: + return depth_image diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/api.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/api.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7475008e9b9643ed762dbb6c122bcd655794d1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/api.py @@ -0,0 +1,169 @@ +# based on https://github.com/isl-org/MiDaS + +import cv2 +import os +import torch +import torch.nn as nn +from torchvision.transforms import Compose + +from custom_midas_repo.midas.dpt_depth import DPTDepthModel +from custom_midas_repo.midas.midas_net import MidasNet +from custom_midas_repo.midas.midas_net_custom import MidasNet_small +from custom_midas_repo.midas.transforms import Resize, NormalizeImage, PrepareForNet +from controlnet_aux.util import annotator_ckpts_path + + +ISL_PATHS = { + "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"), + "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"), + "midas_v21": "", + "midas_v21_small": "", +} + +remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt" + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def load_midas_transform(model_type): + # https://github.com/isl-org/MiDaS/blob/master/run.py + # load transform only + if model_type == "dpt_large": # DPT-Large + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_hybrid": # DPT-Hybrid + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "midas_v21": + net_w, net_h = 384, 384 + resize_mode = "upper_bound" + normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + elif model_type == "midas_v21_small": + net_w, net_h = 256, 256 + resize_mode = "upper_bound" + normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + else: + assert False, f"model_type '{model_type}' not implemented, use: --model_type large" + + transform = Compose( + [ + Resize( + net_w, + net_h, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method=resize_mode, + image_interpolation_method=cv2.INTER_CUBIC, + ), + normalization, + PrepareForNet(), + ] + ) + + return transform + + +def load_model(model_type, model_path=None): + # https://github.com/isl-org/MiDaS/blob/master/run.py + # load network + model_path = model_path or ISL_PATHS[model_type] + if model_type == "dpt_large": # DPT-Large + model = DPTDepthModel( + path=model_path, + backbone="vitl16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_hybrid": # DPT-Hybrid + if not os.path.exists(model_path): + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) + + model = DPTDepthModel( + path=model_path, + backbone="vitb_rn50_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "midas_v21": + model = MidasNet(model_path, non_negative=True) + net_w, net_h = 384, 384 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + elif model_type == "midas_v21_small": + model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, + non_negative=True, blocks={'expand': True}) + net_w, net_h = 256, 256 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + else: + print(f"model_type '{model_type}' not implemented, use: --model_type large") + assert False + + transform = Compose( + [ + Resize( + net_w, + net_h, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method=resize_mode, + image_interpolation_method=cv2.INTER_CUBIC, + ), + normalization, + PrepareForNet(), + ] + ) + + return model.eval(), transform + + +class MiDaSInference(nn.Module): + MODEL_TYPES_TORCH_HUB = [ + "DPT_Large", + "DPT_Hybrid", + "MiDaS_small" + ] + MODEL_TYPES_ISL = [ + "dpt_large", + "dpt_hybrid", + "midas_v21", + "midas_v21_small", + ] + + def __init__(self, model_type, model_path): + super().__init__() + assert (model_type in self.MODEL_TYPES_ISL) + model, _ = load_model(model_type, model_path) + self.model = model + self.model.train = disabled_train + + def forward(self, x): + with torch.no_grad(): + prediction = self.model(x) + return prediction + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/utils.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9d3b5b66370fa98da9e067ba53ead848ea9a59 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/midas/utils.py @@ -0,0 +1,189 @@ +"""Utils for monoDepth.""" +import sys +import re +import numpy as np +import cv2 +import torch + + +def read_pfm(path): + """Read pfm file. + + Args: + path (str): path to file + + Returns: + tuple: (data, scale) + """ + with open(path, "rb") as file: + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header.decode("ascii") == "PF": + color = True + elif header.decode("ascii") == "Pf": + color = False + else: + raise Exception("Not a PFM file: " + path) + + dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) + if dim_match: + width, height = list(map(int, dim_match.groups())) + else: + raise Exception("Malformed PFM header.") + + scale = float(file.readline().decode("ascii").rstrip()) + if scale < 0: + # little-endian + endian = "<" + scale = -scale + else: + # big-endian + endian = ">" + + data = np.fromfile(file, endian + "f") + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + + return data, scale + + +def write_pfm(path, image, scale=1): + """Write pfm file. + + Args: + path (str): pathto file + image (array): data + scale (int, optional): Scale. Defaults to 1. + """ + + with open(path, "wb") as file: + color = None + + if image.dtype.name != "float32": + raise Exception("Image dtype must be float32.") + + image = np.flipud(image) + + if len(image.shape) == 3 and image.shape[2] == 3: # color image + color = True + elif ( + len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1 + ): # greyscale + color = False + else: + raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") + + file.write("PF\n" if color else "Pf\n".encode()) + file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) + + endian = image.dtype.byteorder + + if endian == "<" or endian == "=" and sys.byteorder == "little": + scale = -scale + + file.write("%f\n".encode() % scale) + + image.tofile(file) + + +def read_image(path): + """Read image and output RGB image (0-1). + + Args: + path (str): path to file + + Returns: + array: RGB image (0-1) + """ + img = cv2.imread(path) + + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 + + return img + + +def resize_image(img): + """Resize image and make it fit for network. + + Args: + img (array): image + + Returns: + tensor: data ready for network + """ + height_orig = img.shape[0] + width_orig = img.shape[1] + + if width_orig > height_orig: + scale = width_orig / 384 + else: + scale = height_orig / 384 + + height = (np.ceil(height_orig / scale / 32) * 32).astype(int) + width = (np.ceil(width_orig / scale / 32) * 32).astype(int) + + img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) + + img_resized = ( + torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() + ) + img_resized = img_resized.unsqueeze(0) + + return img_resized + + +def resize_depth(depth, width, height): + """Resize depth map and bring to CPU (numpy). + + Args: + depth (tensor): depth + width (int): image width + height (int): image height + + Returns: + array: processed depth + """ + depth = torch.squeeze(depth[0, :, :, :]).to("cpu") + + depth_resized = cv2.resize( + depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC + ) + + return depth_resized + +def write_depth(path, depth, bits=1): + """Write depth map to pfm and png file. + + Args: + path (str): filepath without extension + depth (array): depth + """ + write_pfm(path + ".pfm", depth.astype(np.float32)) + + depth_min = depth.min() + depth_max = depth.max() + + max_val = (2**(8*bits))-1 + + if depth_max - depth_min > np.finfo("float").eps: + out = max_val * (depth - depth_min) / (depth_max - depth_min) + else: + out = np.zeros(depth.shape, dtype=depth.type) + + if bits == 1: + cv2.imwrite(path + ".png", out.astype("uint8")) + elif bits == 2: + cv2.imwrite(path + ".png", out.astype("uint16")) + + return diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d855c6db44b4e873eedd750d34fa2eaf22e22363 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021-present NAVER Corp. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7a2b63ca78931940dcc67fc9ac0297edfcf2ca52 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/__init__.py @@ -0,0 +1,52 @@ +import os +import warnings + +import cv2 +import numpy as np +import torch +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from .models.mbv2_mlsd_large import MobileV2_MLSD_Large +from .utils import pred_lines + + +class MLSDdetector: + def __init__(self, model): + self.model = model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "mlsd_large_512_fp32.pth" + subfolder = "annotator/ckpts" if pretrained_model_or_path == "lllyasviel/ControlNet" else '' + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir, subfolder=subfolder) + model = MobileV2_MLSD_Large() + model.load_state_dict(torch.load(model_path), strict=True) + model.eval() + + return cls(model) + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, input_image, thr_v=0.1, thr_d=0.1, detect_resolution=512, output_type="pil", upscale_method="INTER_AREA", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + img = detected_map + img_output = np.zeros_like(img) + try: + with torch.no_grad(): + lines = pred_lines(img, self.model, [img.shape[0], img.shape[1]], thr_v, thr_d) + for line in lines: + x_start, y_start, x_end, y_end = [int(val) for val in line] + cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1) + except Exception as e: + pass + + detected_map = remove_pad(HWC3(img_output[:, :, 0])) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/mbv2_mlsd_large.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/mbv2_mlsd_large.py new file mode 100644 index 0000000000000000000000000000000000000000..5b9799e7573ca41549b3c3b13ac47b906b369603 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/mbv2_mlsd_large.py @@ -0,0 +1,292 @@ +import os +import sys +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo +from torch.nn import functional as F + + +class BlockTypeA(nn.Module): + def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True): + super(BlockTypeA, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(in_c2, out_c2, kernel_size=1), + nn.BatchNorm2d(out_c2), + nn.ReLU(inplace=True) + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_c1, out_c1, kernel_size=1), + nn.BatchNorm2d(out_c1), + nn.ReLU(inplace=True) + ) + self.upscale = upscale + + def forward(self, a, b): + b = self.conv1(b) + a = self.conv2(a) + if self.upscale: + b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True) + return torch.cat((a, b), dim=1) + + +class BlockTypeB(nn.Module): + def __init__(self, in_c, out_c): + super(BlockTypeB, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), + nn.BatchNorm2d(in_c), + nn.ReLU() + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_c, out_c, kernel_size=3, padding=1), + nn.BatchNorm2d(out_c), + nn.ReLU() + ) + + def forward(self, x): + x = self.conv1(x) + x + x = self.conv2(x) + return x + +class BlockTypeC(nn.Module): + def __init__(self, in_c, out_c): + super(BlockTypeC, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5), + nn.BatchNorm2d(in_c), + nn.ReLU() + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), + nn.BatchNorm2d(in_c), + nn.ReLU() + ) + self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + return x + +def _make_divisible(v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + :param v: + :param divisor: + :param min_value: + :return: + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNReLU(nn.Sequential): + def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): + self.channel_pad = out_planes - in_planes + self.stride = stride + #padding = (kernel_size - 1) // 2 + + # TFLite uses slightly different padding than PyTorch + if stride == 2: + padding = 0 + else: + padding = (kernel_size - 1) // 2 + + super(ConvBNReLU, self).__init__( + nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), + nn.BatchNorm2d(out_planes), + nn.ReLU6(inplace=True) + ) + self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride) + + + def forward(self, x): + # TFLite uses different padding + if self.stride == 2: + x = F.pad(x, (0, 1, 0, 1), "constant", 0) + #print(x.shape) + + for module in self: + if not isinstance(module, nn.MaxPool2d): + x = module(x) + return x + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = int(round(inp * expand_ratio)) + self.use_res_connect = self.stride == 1 and inp == oup + + layers = [] + if expand_ratio != 1: + # pw + layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) + layers.extend([ + # dw + ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, pretrained=True): + """ + MobileNet V2 main class + Args: + num_classes (int): Number of classes + width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount + inverted_residual_setting: Network structure + round_nearest (int): Round the number of channels in each layer to be a multiple of this number + Set to 1 to turn off rounding + block: Module specifying inverted residual building block for mobilenet + """ + super(MobileNetV2, self).__init__() + + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + width_mult = 1.0 + round_nearest = 8 + + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + #[6, 160, 3, 2], + #[6, 320, 1, 1], + ] + + # only check the first element, assuming user knows t,c,n,s are required + if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: + raise ValueError("inverted_residual_setting should be non-empty " + "or a 4-element list, got {}".format(inverted_residual_setting)) + + # building first layer + input_channel = _make_divisible(input_channel * width_mult, round_nearest) + self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) + features = [ConvBNReLU(4, input_channel, stride=2)] + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + output_channel = _make_divisible(c * width_mult, round_nearest) + for i in range(n): + stride = s if i == 0 else 1 + features.append(block(input_channel, output_channel, stride, expand_ratio=t)) + input_channel = output_channel + + self.features = nn.Sequential(*features) + self.fpn_selected = [1, 3, 6, 10, 13] + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + if pretrained: + self._load_pretrained_model() + + def _forward_impl(self, x): + # This exists since TorchScript doesn't support inheritance, so the superclass method + # (this one) needs to have a name other than `forward` that can be accessed in a subclass + fpn_features = [] + for i, f in enumerate(self.features): + if i > self.fpn_selected[-1]: + break + x = f(x) + if i in self.fpn_selected: + fpn_features.append(x) + + c1, c2, c3, c4, c5 = fpn_features + return c1, c2, c3, c4, c5 + + + def forward(self, x): + return self._forward_impl(x) + + def _load_pretrained_model(self): + pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth') + model_dict = {} + state_dict = self.state_dict() + for k, v in pretrain_dict.items(): + if k in state_dict: + model_dict[k] = v + state_dict.update(model_dict) + self.load_state_dict(state_dict) + + +class MobileV2_MLSD_Large(nn.Module): + def __init__(self): + super(MobileV2_MLSD_Large, self).__init__() + + self.backbone = MobileNetV2(pretrained=False) + ## A, B + self.block15 = BlockTypeA(in_c1= 64, in_c2= 96, + out_c1= 64, out_c2=64, + upscale=False) + self.block16 = BlockTypeB(128, 64) + + ## A, B + self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64, + out_c1= 64, out_c2= 64) + self.block18 = BlockTypeB(128, 64) + + ## A, B + self.block19 = BlockTypeA(in_c1=24, in_c2=64, + out_c1=64, out_c2=64) + self.block20 = BlockTypeB(128, 64) + + ## A, B, C + self.block21 = BlockTypeA(in_c1=16, in_c2=64, + out_c1=64, out_c2=64) + self.block22 = BlockTypeB(128, 64) + + self.block23 = BlockTypeC(64, 16) + + def forward(self, x): + c1, c2, c3, c4, c5 = self.backbone(x) + + x = self.block15(c4, c5) + x = self.block16(x) + + x = self.block17(c3, x) + x = self.block18(x) + + x = self.block19(c2, x) + x = self.block20(x) + + x = self.block21(c1, x) + x = self.block22(x) + x = self.block23(x) + x = x[:, 7:, :, :] + + return x \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/mbv2_mlsd_tiny.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/mbv2_mlsd_tiny.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ed633f2cc23ea1829a627fdb879ab39f641f83 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/models/mbv2_mlsd_tiny.py @@ -0,0 +1,275 @@ +import os +import sys +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo +from torch.nn import functional as F + + +class BlockTypeA(nn.Module): + def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True): + super(BlockTypeA, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(in_c2, out_c2, kernel_size=1), + nn.BatchNorm2d(out_c2), + nn.ReLU(inplace=True) + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_c1, out_c1, kernel_size=1), + nn.BatchNorm2d(out_c1), + nn.ReLU(inplace=True) + ) + self.upscale = upscale + + def forward(self, a, b): + b = self.conv1(b) + a = self.conv2(a) + b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True) + return torch.cat((a, b), dim=1) + + +class BlockTypeB(nn.Module): + def __init__(self, in_c, out_c): + super(BlockTypeB, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), + nn.BatchNorm2d(in_c), + nn.ReLU() + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_c, out_c, kernel_size=3, padding=1), + nn.BatchNorm2d(out_c), + nn.ReLU() + ) + + def forward(self, x): + x = self.conv1(x) + x + x = self.conv2(x) + return x + +class BlockTypeC(nn.Module): + def __init__(self, in_c, out_c): + super(BlockTypeC, self).__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5), + nn.BatchNorm2d(in_c), + nn.ReLU() + ) + self.conv2 = nn.Sequential( + nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), + nn.BatchNorm2d(in_c), + nn.ReLU() + ) + self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + return x + +def _make_divisible(v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + :param v: + :param divisor: + :param min_value: + :return: + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNReLU(nn.Sequential): + def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): + self.channel_pad = out_planes - in_planes + self.stride = stride + #padding = (kernel_size - 1) // 2 + + # TFLite uses slightly different padding than PyTorch + if stride == 2: + padding = 0 + else: + padding = (kernel_size - 1) // 2 + + super(ConvBNReLU, self).__init__( + nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), + nn.BatchNorm2d(out_planes), + nn.ReLU6(inplace=True) + ) + self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride) + + + def forward(self, x): + # TFLite uses different padding + if self.stride == 2: + x = F.pad(x, (0, 1, 0, 1), "constant", 0) + #print(x.shape) + + for module in self: + if not isinstance(module, nn.MaxPool2d): + x = module(x) + return x + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = int(round(inp * expand_ratio)) + self.use_res_connect = self.stride == 1 and inp == oup + + layers = [] + if expand_ratio != 1: + # pw + layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) + layers.extend([ + # dw + ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, pretrained=True): + """ + MobileNet V2 main class + Args: + num_classes (int): Number of classes + width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount + inverted_residual_setting: Network structure + round_nearest (int): Round the number of channels in each layer to be a multiple of this number + Set to 1 to turn off rounding + block: Module specifying inverted residual building block for mobilenet + """ + super(MobileNetV2, self).__init__() + + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + width_mult = 1.0 + round_nearest = 8 + + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + #[6, 96, 3, 1], + #[6, 160, 3, 2], + #[6, 320, 1, 1], + ] + + # only check the first element, assuming user knows t,c,n,s are required + if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: + raise ValueError("inverted_residual_setting should be non-empty " + "or a 4-element list, got {}".format(inverted_residual_setting)) + + # building first layer + input_channel = _make_divisible(input_channel * width_mult, round_nearest) + self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) + features = [ConvBNReLU(4, input_channel, stride=2)] + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + output_channel = _make_divisible(c * width_mult, round_nearest) + for i in range(n): + stride = s if i == 0 else 1 + features.append(block(input_channel, output_channel, stride, expand_ratio=t)) + input_channel = output_channel + self.features = nn.Sequential(*features) + + self.fpn_selected = [3, 6, 10] + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + #if pretrained: + # self._load_pretrained_model() + + def _forward_impl(self, x): + # This exists since TorchScript doesn't support inheritance, so the superclass method + # (this one) needs to have a name other than `forward` that can be accessed in a subclass + fpn_features = [] + for i, f in enumerate(self.features): + if i > self.fpn_selected[-1]: + break + x = f(x) + if i in self.fpn_selected: + fpn_features.append(x) + + c2, c3, c4 = fpn_features + return c2, c3, c4 + + + def forward(self, x): + return self._forward_impl(x) + + def _load_pretrained_model(self): + pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth') + model_dict = {} + state_dict = self.state_dict() + for k, v in pretrain_dict.items(): + if k in state_dict: + model_dict[k] = v + state_dict.update(model_dict) + self.load_state_dict(state_dict) + + +class MobileV2_MLSD_Tiny(nn.Module): + def __init__(self): + super(MobileV2_MLSD_Tiny, self).__init__() + + self.backbone = MobileNetV2(pretrained=True) + + self.block12 = BlockTypeA(in_c1= 32, in_c2= 64, + out_c1= 64, out_c2=64) + self.block13 = BlockTypeB(128, 64) + + self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64, + out_c1= 32, out_c2= 32) + self.block15 = BlockTypeB(64, 64) + + self.block16 = BlockTypeC(64, 16) + + def forward(self, x): + c2, c3, c4 = self.backbone(x) + + x = self.block12(c3, c4) + x = self.block13(x) + x = self.block14(c2, x) + x = self.block15(x) + x = self.block16(x) + x = x[:, 7:, :, :] + #print(x.shape) + x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True) + + return x \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/utils.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..28071cbf129a2bedb21a44f95d565aef7974e583 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/mlsd/utils.py @@ -0,0 +1,584 @@ +''' +modified by lihaoweicv +pytorch version +''' + +''' +M-LSD +Copyright 2021-present NAVER Corp. +Apache License v2.0 +''' + +import os +import numpy as np +import cv2 +import torch +from torch.nn import functional as F + + +def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5): + ''' + tpMap: + center: tpMap[1, 0, :, :] + displacement: tpMap[1, 1:5, :, :] + ''' + b, c, h, w = tpMap.shape + assert b==1, 'only support bsize==1' + displacement = tpMap[:, 1:5, :, :][0] + center = tpMap[:, 0, :, :] + heat = torch.sigmoid(center) + hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2) + keep = (hmax == heat).float() + heat = heat * keep + heat = heat.reshape(-1, ) + + scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True) + yy = torch.floor_divide(indices, w).unsqueeze(-1) + xx = torch.fmod(indices, w).unsqueeze(-1) + ptss = torch.cat((yy, xx),dim=-1) + + ptss = ptss.detach().cpu().numpy() + scores = scores.detach().cpu().numpy() + displacement = displacement.detach().cpu().numpy() + displacement = displacement.transpose((1,2,0)) + return ptss, scores, displacement + + +def pred_lines(image, model, + input_shape=[512, 512], + score_thr=0.10, + dist_thr=20.0): + h, w, _ = image.shape + + device = next(iter(model.parameters())).device + h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]] + + resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA), + np.ones([input_shape[0], input_shape[1], 1])], axis=-1) + + resized_image = resized_image.transpose((2,0,1)) + batch_image = np.expand_dims(resized_image, axis=0).astype('float32') + batch_image = (batch_image / 127.5) - 1.0 + + batch_image = torch.from_numpy(batch_image).float() + batch_image = batch_image.to(device) + outputs = model(batch_image) + pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3) + start = vmap[:, :, :2] + end = vmap[:, :, 2:] + dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1)) + + segments_list = [] + for center, score in zip(pts, pts_score): + y, x = center + distance = dist_map[y, x] + if score > score_thr and distance > dist_thr: + disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :] + x_start = x + disp_x_start + y_start = y + disp_y_start + x_end = x + disp_x_end + y_end = y + disp_y_end + segments_list.append([x_start, y_start, x_end, y_end]) + + lines = 2 * np.array(segments_list) # 256 > 512 + lines[:, 0] = lines[:, 0] * w_ratio + lines[:, 1] = lines[:, 1] * h_ratio + lines[:, 2] = lines[:, 2] * w_ratio + lines[:, 3] = lines[:, 3] * h_ratio + + return lines + + +def pred_squares(image, + model, + input_shape=[512, 512], + params={'score': 0.06, + 'outside_ratio': 0.28, + 'inside_ratio': 0.45, + 'w_overlap': 0.0, + 'w_degree': 1.95, + 'w_length': 0.0, + 'w_area': 1.86, + 'w_center': 0.14}): + ''' + shape = [height, width] + ''' + h, w, _ = image.shape + original_shape = [h, w] + device = next(iter(model.parameters())).device + + resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA), + np.ones([input_shape[0], input_shape[1], 1])], axis=-1) + resized_image = resized_image.transpose((2, 0, 1)) + batch_image = np.expand_dims(resized_image, axis=0).astype('float32') + batch_image = (batch_image / 127.5) - 1.0 + + batch_image = torch.from_numpy(batch_image).float().to(device) + outputs = model(batch_image) + + pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3) + start = vmap[:, :, :2] # (x, y) + end = vmap[:, :, 2:] # (x, y) + dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1)) + + junc_list = [] + segments_list = [] + for junc, score in zip(pts, pts_score): + y, x = junc + distance = dist_map[y, x] + if score > params['score'] and distance > 20.0: + junc_list.append([x, y]) + disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :] + d_arrow = 1.0 + x_start = x + d_arrow * disp_x_start + y_start = y + d_arrow * disp_y_start + x_end = x + d_arrow * disp_x_end + y_end = y + d_arrow * disp_y_end + segments_list.append([x_start, y_start, x_end, y_end]) + + segments = np.array(segments_list) + + ####### post processing for squares + # 1. get unique lines + point = np.array([[0, 0]]) + point = point[0] + start = segments[:, :2] + end = segments[:, 2:] + diff = start - end + a = diff[:, 1] + b = -diff[:, 0] + c = a * start[:, 0] + b * start[:, 1] + + d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10) + theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi + theta[theta < 0.0] += 180 + hough = np.concatenate([d[:, None], theta[:, None]], axis=-1) + + d_quant = 1 + theta_quant = 2 + hough[:, 0] //= d_quant + hough[:, 1] //= theta_quant + _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True) + + acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32') + idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1 + yx_indices = hough[indices, :].astype('int32') + acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts + idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices + + acc_map_np = acc_map + # acc_map = acc_map[None, :, :, None] + # + # ### fast suppression using tensorflow op + # acc_map = tf.constant(acc_map, dtype=tf.float32) + # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map) + # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32) + # flatten_acc_map = tf.reshape(acc_map, [1, -1]) + # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts)) + # _, h, w, _ = acc_map.shape + # y = tf.expand_dims(topk_indices // w, axis=-1) + # x = tf.expand_dims(topk_indices % w, axis=-1) + # yx = tf.concat([y, x], axis=-1) + + ### fast suppression using pytorch op + acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0) + _,_, h, w = acc_map.shape + max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2) + acc_map = acc_map * ( (acc_map == max_acc_map).float() ) + flatten_acc_map = acc_map.reshape([-1, ]) + + scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True) + yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1) + xx = torch.fmod(indices, w).unsqueeze(-1) + yx = torch.cat((yy, xx), dim=-1) + + yx = yx.detach().cpu().numpy() + + topk_values = scores.detach().cpu().numpy() + indices = idx_map[yx[:, 0], yx[:, 1]] + basis = 5 // 2 + + merged_segments = [] + for yx_pt, max_indice, value in zip(yx, indices, topk_values): + y, x = yx_pt + if max_indice == -1 or value == 0: + continue + segment_list = [] + for y_offset in range(-basis, basis + 1): + for x_offset in range(-basis, basis + 1): + indice = idx_map[y + y_offset, x + x_offset] + cnt = int(acc_map_np[y + y_offset, x + x_offset]) + if indice != -1: + segment_list.append(segments[indice]) + if cnt > 1: + check_cnt = 1 + current_hough = hough[indice] + for new_indice, new_hough in enumerate(hough): + if (current_hough == new_hough).all() and indice != new_indice: + segment_list.append(segments[new_indice]) + check_cnt += 1 + if check_cnt == cnt: + break + group_segments = np.array(segment_list).reshape([-1, 2]) + sorted_group_segments = np.sort(group_segments, axis=0) + x_min, y_min = sorted_group_segments[0, :] + x_max, y_max = sorted_group_segments[-1, :] + + deg = theta[max_indice] + if deg >= 90: + merged_segments.append([x_min, y_max, x_max, y_min]) + else: + merged_segments.append([x_min, y_min, x_max, y_max]) + + # 2. get intersections + new_segments = np.array(merged_segments) # (x1, y1, x2, y2) + start = new_segments[:, :2] # (x1, y1) + end = new_segments[:, 2:] # (x2, y2) + new_centers = (start + end) / 2.0 + diff = start - end + dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1)) + + # ax + by = c + a = diff[:, 1] + b = -diff[:, 0] + c = a * start[:, 0] + b * start[:, 1] + pre_det = a[:, None] * b[None, :] + det = pre_det - np.transpose(pre_det) + + pre_inter_y = a[:, None] * c[None, :] + inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10) + pre_inter_x = c[:, None] * b[None, :] + inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10) + inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32') + + # 3. get corner information + # 3.1 get distance + ''' + dist_segments: + | dist(0), dist(1), dist(2), ...| + dist_inter_to_segment1: + | dist(inter,0), dist(inter,0), dist(inter,0), ... | + | dist(inter,1), dist(inter,1), dist(inter,1), ... | + ... + dist_inter_to_semgnet2: + | dist(inter,0), dist(inter,1), dist(inter,2), ... | + | dist(inter,0), dist(inter,1), dist(inter,2), ... | + ... + ''' + + dist_inter_to_segment1_start = np.sqrt( + np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] + dist_inter_to_segment1_end = np.sqrt( + np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] + dist_inter_to_segment2_start = np.sqrt( + np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] + dist_inter_to_segment2_end = np.sqrt( + np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1] + + # sort ascending + dist_inter_to_segment1 = np.sort( + np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1), + axis=-1) # [n_batch, n_batch, 2] + dist_inter_to_segment2 = np.sort( + np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1), + axis=-1) # [n_batch, n_batch, 2] + + # 3.2 get degree + inter_to_start = new_centers[:, None, :] - inter_pts + deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi + deg_inter_to_start[deg_inter_to_start < 0.0] += 360 + inter_to_end = new_centers[None, :, :] - inter_pts + deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi + deg_inter_to_end[deg_inter_to_end < 0.0] += 360 + + ''' + B -- G + | | + C -- R + B : blue / G: green / C: cyan / R: red + + 0 -- 1 + | | + 3 -- 2 + ''' + # rename variables + deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end + # sort deg ascending + deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1) + + deg_diff_map = np.abs(deg1_map - deg2_map) + # we only consider the smallest degree of intersect + deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180] + + # define available degree range + deg_range = [60, 120] + + corner_dict = {corner_info: [] for corner_info in range(4)} + inter_points = [] + for i in range(inter_pts.shape[0]): + for j in range(i + 1, inter_pts.shape[1]): + # i, j > line index, always i < j + x, y = inter_pts[i, j, :] + deg1, deg2 = deg_sort[i, j, :] + deg_diff = deg_diff_map[i, j] + + check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1] + + outside_ratio = params['outside_ratio'] # over ratio >>> drop it! + inside_ratio = params['inside_ratio'] # over ratio >>> drop it! + check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \ + dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \ + (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \ + dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \ + ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \ + dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \ + (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \ + dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio)) + + if check_degree and check_distance: + corner_info = None + + if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \ + (deg2 >= 315 and deg1 >= 45 and deg1 <= 120): + corner_info, color_info = 0, 'blue' + elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225): + corner_info, color_info = 1, 'green' + elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315): + corner_info, color_info = 2, 'black' + elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \ + (deg2 >= 315 and deg1 >= 225 and deg1 <= 315): + corner_info, color_info = 3, 'cyan' + else: + corner_info, color_info = 4, 'red' # we don't use it + continue + + corner_dict[corner_info].append([x, y, i, j]) + inter_points.append([x, y]) + + square_list = [] + connect_list = [] + segments_list = [] + for corner0 in corner_dict[0]: + for corner1 in corner_dict[1]: + connect01 = False + for corner0_line in corner0[2:]: + if corner0_line in corner1[2:]: + connect01 = True + break + if connect01: + for corner2 in corner_dict[2]: + connect12 = False + for corner1_line in corner1[2:]: + if corner1_line in corner2[2:]: + connect12 = True + break + if connect12: + for corner3 in corner_dict[3]: + connect23 = False + for corner2_line in corner2[2:]: + if corner2_line in corner3[2:]: + connect23 = True + break + if connect23: + for corner3_line in corner3[2:]: + if corner3_line in corner0[2:]: + # SQUARE!!! + ''' + 0 -- 1 + | | + 3 -- 2 + square_list: + order: 0 > 1 > 2 > 3 + | x0, y0, x1, y1, x2, y2, x3, y3 | + | x0, y0, x1, y1, x2, y2, x3, y3 | + ... + connect_list: + order: 01 > 12 > 23 > 30 + | line_idx01, line_idx12, line_idx23, line_idx30 | + | line_idx01, line_idx12, line_idx23, line_idx30 | + ... + segments_list: + order: 0 > 1 > 2 > 3 + | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j | + | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j | + ... + ''' + square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2]) + connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line]) + segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:]) + + def check_outside_inside(segments_info, connect_idx): + # return 'outside or inside', min distance, cover_param, peri_param + if connect_idx == segments_info[0]: + check_dist_mat = dist_inter_to_segment1 + else: + check_dist_mat = dist_inter_to_segment2 + + i, j = segments_info + min_dist, max_dist = check_dist_mat[i, j, :] + connect_dist = dist_segments[connect_idx] + if max_dist > connect_dist: + return 'outside', min_dist, 0, 1 + else: + return 'inside', min_dist, -1, -1 + + top_square = None + + try: + map_size = input_shape[0] / 2 + squares = np.array(square_list).reshape([-1, 4, 2]) + score_array = [] + connect_array = np.array(connect_list) + segments_array = np.array(segments_list).reshape([-1, 4, 2]) + + # get degree of corners: + squares_rollup = np.roll(squares, 1, axis=1) + squares_rolldown = np.roll(squares, -1, axis=1) + vec1 = squares_rollup - squares + normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10) + vec2 = squares_rolldown - squares + normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10) + inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4] + squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4] + + # get square score + overlap_scores = [] + degree_scores = [] + length_scores = [] + + for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree): + ''' + 0 -- 1 + | | + 3 -- 2 + + # segments: [4, 2] + # connects: [4] + ''' + + ###################################### OVERLAP SCORES + cover = 0 + perimeter = 0 + # check 0 > 1 > 2 > 3 + square_length = [] + + for start_idx in range(4): + end_idx = (start_idx + 1) % 4 + + connect_idx = connects[start_idx] # segment idx of segment01 + start_segments = segments[start_idx] + end_segments = segments[end_idx] + + start_point = square[start_idx] + end_point = square[end_idx] + + # check whether outside or inside + start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments, + connect_idx) + end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx) + + cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min + perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min + + square_length.append( + dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min) + + overlap_scores.append(cover / perimeter) + ###################################### + ###################################### DEGREE SCORES + ''' + deg0 vs deg2 + deg1 vs deg3 + ''' + deg0, deg1, deg2, deg3 = degree + deg_ratio1 = deg0 / deg2 + if deg_ratio1 > 1.0: + deg_ratio1 = 1 / deg_ratio1 + deg_ratio2 = deg1 / deg3 + if deg_ratio2 > 1.0: + deg_ratio2 = 1 / deg_ratio2 + degree_scores.append((deg_ratio1 + deg_ratio2) / 2) + ###################################### + ###################################### LENGTH SCORES + ''' + len0 vs len2 + len1 vs len3 + ''' + len0, len1, len2, len3 = square_length + len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0 + len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1 + length_scores.append((len_ratio1 + len_ratio2) / 2) + + ###################################### + + overlap_scores = np.array(overlap_scores) + overlap_scores /= np.max(overlap_scores) + + degree_scores = np.array(degree_scores) + # degree_scores /= np.max(degree_scores) + + length_scores = np.array(length_scores) + + ###################################### AREA SCORES + area_scores = np.reshape(squares, [-1, 4, 2]) + area_x = area_scores[:, :, 0] + area_y = area_scores[:, :, 1] + correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0] + area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1) + area_scores = 0.5 * np.abs(area_scores + correction) + area_scores /= (map_size * map_size) # np.max(area_scores) + ###################################### + + ###################################### CENTER SCORES + centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2] + # squares: [n, 4, 2] + square_centers = np.mean(squares, axis=1) # [n, 2] + center2center = np.sqrt(np.sum((centers - square_centers) ** 2)) + center_scores = center2center / (map_size / np.sqrt(2.0)) + + ''' + score_w = [overlap, degree, area, center, length] + ''' + score_w = [0.0, 1.0, 10.0, 0.5, 1.0] + score_array = params['w_overlap'] * overlap_scores \ + + params['w_degree'] * degree_scores \ + + params['w_area'] * area_scores \ + - params['w_center'] * center_scores \ + + params['w_length'] * length_scores + + best_square = [] + + sorted_idx = np.argsort(score_array)[::-1] + score_array = score_array[sorted_idx] + squares = squares[sorted_idx] + + except Exception as e: + pass + + '''return list + merged_lines, squares, scores + ''' + + try: + new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1] + new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0] + new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1] + new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0] + except: + new_segments = [] + + try: + squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1] + squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0] + except: + squares = [] + score_array = [] + + try: + inter_points = np.array(inter_points) + inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1] + inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0] + except: + inter_points = [] + + return new_segments, squares, score_array, inter_points diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..16a9d56a3d4c15e4f34ac5426459c58487b01520 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Caroline Chan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c03f81f6ff9ae42fb9e5a8495bf45338acb51b1b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/__init__.py @@ -0,0 +1,85 @@ +import os +import types +import warnings + +import cv2 +import numpy as np +import torch +import torchvision.transforms as transforms +from einops import rearrange +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from .nets.NNET import NNET + + +# load model +def load_checkpoint(fpath, model): + ckpt = torch.load(fpath, map_location='cpu')['model'] + + load_dict = {} + for k, v in ckpt.items(): + if k.startswith('module.'): + k_ = k.replace('module.', '') + load_dict[k_] = v + else: + load_dict[k] = v + + model.load_state_dict(load_dict) + return model + +class NormalBaeDetector: + def __init__(self, model): + self.model = model + self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "scannet.pt" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + args = types.SimpleNamespace() + args.mode = 'client' + args.architecture = 'BN' + args.pretrained = 'scannet' + args.sampling_ratio = 0.4 + args.importance_ratio = 0.7 + model = NNET(args) + model = load_checkpoint(model_path, model) + model.eval() + + return cls(model) + + def to(self, device): + self.model.to(device) + return self + + + def __call__(self, input_image, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + device = next(iter(self.model.parameters())).device + image_normal = detected_map + with torch.no_grad(): + image_normal = torch.from_numpy(image_normal).float().to(device) + image_normal = image_normal / 255.0 + image_normal = rearrange(image_normal, 'h w c -> 1 c h w') + image_normal = self.norm(image_normal) + + normal = self.model(image_normal) + normal = normal[0][-1][:, :3] + # d = torch.sum(normal ** 2.0, dim=1, keepdim=True) ** 0.5 + # d = torch.maximum(d, torch.ones_like(d) * 1e-5) + # normal /= d + normal = ((normal + 1) * 0.5).clip(0, 1) + + normal = rearrange(normal[0], 'c h w -> h w c').cpu().numpy() + normal_image = (normal * 255.0).clip(0, 255).astype(np.uint8) + + detected_map = remove_pad(HWC3(normal_image)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map + \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/NNET.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/NNET.py new file mode 100644 index 0000000000000000000000000000000000000000..3ddbc50c3ac18aa4b7f16779fe3c0133981ecc7a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/NNET.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .submodules.encoder import Encoder +from .submodules.decoder import Decoder + + +class NNET(nn.Module): + def __init__(self, args): + super(NNET, self).__init__() + self.encoder = Encoder() + self.decoder = Decoder(args) + + def get_1x_lr_params(self): # lr/10 learning rate + return self.encoder.parameters() + + def get_10x_lr_params(self): # lr learning rate + return self.decoder.parameters() + + def forward(self, img, **kwargs): + return self.decoder(self.encoder(img), **kwargs) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/baseline.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/baseline.py new file mode 100644 index 0000000000000000000000000000000000000000..602d0fbdac1acc9ede9bc1f2e10a5df78831ce9d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/baseline.py @@ -0,0 +1,85 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .submodules.submodules import UpSampleBN, norm_normalize + + +# This is the baseline encoder-decoder we used in the ablation study +class NNET(nn.Module): + def __init__(self, args=None): + super(NNET, self).__init__() + self.encoder = Encoder() + self.decoder = Decoder(num_classes=4) + + def forward(self, x, **kwargs): + out = self.decoder(self.encoder(x), **kwargs) + + # Bilinearly upsample the output to match the input resolution + up_out = F.interpolate(out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False) + + # L2-normalize the first three channels / ensure positive value for concentration parameters (kappa) + up_out = norm_normalize(up_out) + return up_out + + def get_1x_lr_params(self): # lr/10 learning rate + return self.encoder.parameters() + + def get_10x_lr_params(self): # lr learning rate + modules = [self.decoder] + for m in modules: + yield from m.parameters() + + +# Encoder +class Encoder(nn.Module): + def __init__(self): + super(Encoder, self).__init__() + + basemodel_name = 'tf_efficientnet_b5_ap' + basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True) + + # Remove last layer + basemodel.global_pool = nn.Identity() + basemodel.classifier = nn.Identity() + + self.original_model = basemodel + + def forward(self, x): + features = [x] + for k, v in self.original_model._modules.items(): + if (k == 'blocks'): + for ki, vi in v._modules.items(): + features.append(vi(features[-1])) + else: + features.append(v(features[-1])) + return features + + +# Decoder (no pixel-wise MLP, no uncertainty-guided sampling) +class Decoder(nn.Module): + def __init__(self, num_classes=4): + super(Decoder, self).__init__() + self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0) + self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) + self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512) + self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256) + self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128) + self.conv3 = nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1) + + def forward(self, features): + x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11] + x_d0 = self.conv2(x_block4) + x_d1 = self.up1(x_d0, x_block3) + x_d2 = self.up2(x_d1, x_block2) + x_d3 = self.up3(x_d2, x_block1) + x_d4 = self.up4(x_d3, x_block0) + out = self.conv3(x_d4) + return out + + +if __name__ == '__main__': + model = Baseline() + x = torch.rand(2, 3, 480, 640) + out = model(x) + print(out.shape) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/decoder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..993203d1792311f1c492091eaea3c1ac9088187f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/decoder.py @@ -0,0 +1,202 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from .submodules import UpSampleBN, UpSampleGN, norm_normalize, sample_points + + +class Decoder(nn.Module): + def __init__(self, args): + super(Decoder, self).__init__() + + # hyper-parameter for sampling + self.sampling_ratio = args.sampling_ratio + self.importance_ratio = args.importance_ratio + + # feature-map + self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0) + if args.architecture == 'BN': + self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) + self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512) + self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256) + self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128) + + elif args.architecture == 'GN': + self.up1 = UpSampleGN(skip_input=2048 + 176, output_features=1024) + self.up2 = UpSampleGN(skip_input=1024 + 64, output_features=512) + self.up3 = UpSampleGN(skip_input=512 + 40, output_features=256) + self.up4 = UpSampleGN(skip_input=256 + 24, output_features=128) + + else: + raise Exception('invalid architecture') + + # produces 1/8 res output + self.out_conv_res8 = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1) + + # produces 1/4 res output + self.out_conv_res4 = nn.Sequential( + nn.Conv1d(512 + 4, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 4, kernel_size=1), + ) + + # produces 1/2 res output + self.out_conv_res2 = nn.Sequential( + nn.Conv1d(256 + 4, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 4, kernel_size=1), + ) + + # produces 1/1 res output + self.out_conv_res1 = nn.Sequential( + nn.Conv1d(128 + 4, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), + nn.Conv1d(128, 4, kernel_size=1), + ) + + def forward(self, features, gt_norm_mask=None, mode='test'): + x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11] + + # generate feature-map + + x_d0 = self.conv2(x_block4) # x_d0 : [2, 2048, 15, 20] 1/32 res + x_d1 = self.up1(x_d0, x_block3) # x_d1 : [2, 1024, 30, 40] 1/16 res + x_d2 = self.up2(x_d1, x_block2) # x_d2 : [2, 512, 60, 80] 1/8 res + x_d3 = self.up3(x_d2, x_block1) # x_d3: [2, 256, 120, 160] 1/4 res + x_d4 = self.up4(x_d3, x_block0) # x_d4: [2, 128, 240, 320] 1/2 res + + # 1/8 res output + out_res8 = self.out_conv_res8(x_d2) # out_res8: [2, 4, 60, 80] 1/8 res output + out_res8 = norm_normalize(out_res8) # out_res8: [2, 4, 60, 80] 1/8 res output + + ################################################################################################################ + # out_res4 + ################################################################################################################ + + if mode == 'train': + # upsampling ... out_res8: [2, 4, 60, 80] -> out_res8_res4: [2, 4, 120, 160] + out_res8_res4 = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True) + B, _, H, W = out_res8_res4.shape + + # samples: [B, 1, N, 2] + point_coords_res4, rows_int, cols_int = sample_points(out_res8_res4.detach(), gt_norm_mask, + sampling_ratio=self.sampling_ratio, + beta=self.importance_ratio) + + # output (needed for evaluation / visualization) + out_res4 = out_res8_res4 + + # grid_sample feature-map + feat_res4 = F.grid_sample(x_d2, point_coords_res4, mode='bilinear', align_corners=True) # (B, 512, 1, N) + init_pred = F.grid_sample(out_res8, point_coords_res4, mode='bilinear', align_corners=True) # (B, 4, 1, N) + feat_res4 = torch.cat([feat_res4, init_pred], dim=1) # (B, 512+4, 1, N) + + # prediction (needed to compute loss) + samples_pred_res4 = self.out_conv_res4(feat_res4[:, :, 0, :]) # (B, 4, N) + samples_pred_res4 = norm_normalize(samples_pred_res4) # (B, 4, N) - normalized + + for i in range(B): + out_res4[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res4[i, :, :] + + else: + # grid_sample feature-map + feat_map = F.interpolate(x_d2, scale_factor=2, mode='bilinear', align_corners=True) + init_pred = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True) + feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W) + B, _, H, W = feat_map.shape + + # try all pixels + out_res4 = self.out_conv_res4(feat_map.view(B, 512 + 4, -1)) # (B, 4, N) + out_res4 = norm_normalize(out_res4) # (B, 4, N) - normalized + out_res4 = out_res4.view(B, 4, H, W) + samples_pred_res4 = point_coords_res4 = None + + ################################################################################################################ + # out_res2 + ################################################################################################################ + + if mode == 'train': + + # upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320] + out_res4_res2 = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True) + B, _, H, W = out_res4_res2.shape + + # samples: [B, 1, N, 2] + point_coords_res2, rows_int, cols_int = sample_points(out_res4_res2.detach(), gt_norm_mask, + sampling_ratio=self.sampling_ratio, + beta=self.importance_ratio) + + # output (needed for evaluation / visualization) + out_res2 = out_res4_res2 + + # grid_sample feature-map + feat_res2 = F.grid_sample(x_d3, point_coords_res2, mode='bilinear', align_corners=True) # (B, 256, 1, N) + init_pred = F.grid_sample(out_res4, point_coords_res2, mode='bilinear', align_corners=True) # (B, 4, 1, N) + feat_res2 = torch.cat([feat_res2, init_pred], dim=1) # (B, 256+4, 1, N) + + # prediction (needed to compute loss) + samples_pred_res2 = self.out_conv_res2(feat_res2[:, :, 0, :]) # (B, 4, N) + samples_pred_res2 = norm_normalize(samples_pred_res2) # (B, 4, N) - normalized + + for i in range(B): + out_res2[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res2[i, :, :] + + else: + # grid_sample feature-map + feat_map = F.interpolate(x_d3, scale_factor=2, mode='bilinear', align_corners=True) + init_pred = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True) + feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W) + B, _, H, W = feat_map.shape + + out_res2 = self.out_conv_res2(feat_map.view(B, 256 + 4, -1)) # (B, 4, N) + out_res2 = norm_normalize(out_res2) # (B, 4, N) - normalized + out_res2 = out_res2.view(B, 4, H, W) + samples_pred_res2 = point_coords_res2 = None + + ################################################################################################################ + # out_res1 + ################################################################################################################ + + if mode == 'train': + # upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320] + out_res2_res1 = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True) + B, _, H, W = out_res2_res1.shape + + # samples: [B, 1, N, 2] + point_coords_res1, rows_int, cols_int = sample_points(out_res2_res1.detach(), gt_norm_mask, + sampling_ratio=self.sampling_ratio, + beta=self.importance_ratio) + + # output (needed for evaluation / visualization) + out_res1 = out_res2_res1 + + # grid_sample feature-map + feat_res1 = F.grid_sample(x_d4, point_coords_res1, mode='bilinear', align_corners=True) # (B, 128, 1, N) + init_pred = F.grid_sample(out_res2, point_coords_res1, mode='bilinear', align_corners=True) # (B, 4, 1, N) + feat_res1 = torch.cat([feat_res1, init_pred], dim=1) # (B, 128+4, 1, N) + + # prediction (needed to compute loss) + samples_pred_res1 = self.out_conv_res1(feat_res1[:, :, 0, :]) # (B, 4, N) + samples_pred_res1 = norm_normalize(samples_pred_res1) # (B, 4, N) - normalized + + for i in range(B): + out_res1[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res1[i, :, :] + + else: + # grid_sample feature-map + feat_map = F.interpolate(x_d4, scale_factor=2, mode='bilinear', align_corners=True) + init_pred = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True) + feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W) + B, _, H, W = feat_map.shape + + out_res1 = self.out_conv_res1(feat_map.view(B, 128 + 4, -1)) # (B, 4, N) + out_res1 = norm_normalize(out_res1) # (B, 4, N) - normalized + out_res1 = out_res1.view(B, 4, H, W) + samples_pred_res1 = point_coords_res1 = None + + return [out_res8, out_res4, out_res2, out_res1], \ + [out_res8, samples_pred_res4, samples_pred_res2, samples_pred_res1], \ + [None, point_coords_res4, point_coords_res2, point_coords_res1] + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/.gitignore b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f04e5fff91094d9b9c662bba977d762bf71516ac --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/.gitignore @@ -0,0 +1,109 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# pytorch stuff +*.pth +*.onnx +*.pb + +trained_models/ +.fuse_hidden* diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/BENCHMARK.md b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/BENCHMARK.md new file mode 100644 index 0000000000000000000000000000000000000000..6ead7171ce5a5bbd2702f6b5c825dc9808ba5658 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/BENCHMARK.md @@ -0,0 +1,555 @@ +# Model Performance Benchmarks + +All benchmarks run as per: + +``` +python onnx_export.py --model mobilenetv3_100 ./mobilenetv3_100.onnx +python onnx_optimize.py ./mobilenetv3_100.onnx --output mobilenetv3_100-opt.onnx +python onnx_to_caffe.py ./mobilenetv3_100.onnx --c2-prefix mobilenetv3 +python onnx_to_caffe.py ./mobilenetv3_100-opt.onnx --c2-prefix mobilenetv3-opt +python caffe2_benchmark.py --c2-init ./mobilenetv3.init.pb --c2-predict ./mobilenetv3.predict.pb +python caffe2_benchmark.py --c2-init ./mobilenetv3-opt.init.pb --c2-predict ./mobilenetv3-opt.predict.pb +``` + +## EfficientNet-B0 + +### Unoptimized +``` +Main run finished. Milliseconds per iter: 49.2862. Iters per second: 20.2897 +Time per operator type: + 29.7378 ms. 60.5145%. Conv + 12.1785 ms. 24.7824%. Sigmoid + 3.62811 ms. 7.38297%. SpatialBN + 2.98444 ms. 6.07314%. Mul + 0.326902 ms. 0.665225%. AveragePool + 0.197317 ms. 0.401528%. FC + 0.0852877 ms. 0.173555%. Add + 0.0032607 ms. 0.00663532%. Squeeze + 49.1416 ms in Total +FLOP per operator type: + 0.76907 GFLOP. 95.2696%. Conv + 0.0269508 GFLOP. 3.33857%. SpatialBN + 0.00846444 GFLOP. 1.04855%. Mul + 0.002561 GFLOP. 0.317248%. FC + 0.000210112 GFLOP. 0.0260279%. Add + 0.807256 GFLOP in Total +Feature Memory Read per operator type: + 58.5253 MB. 43.0891%. Mul + 43.2015 MB. 31.807%. Conv + 27.2869 MB. 20.0899%. SpatialBN + 5.12912 MB. 3.77631%. FC + 1.6809 MB. 1.23756%. Add + 135.824 MB in Total +Feature Memory Written per operator type: + 33.8578 MB. 38.1965%. Mul + 26.9881 MB. 30.4465%. Conv + 26.9508 MB. 30.4044%. SpatialBN + 0.840448 MB. 0.948147%. Add + 0.004 MB. 0.00451258%. FC + 88.6412 MB in Total +Parameter Memory per operator type: + 15.8248 MB. 74.9391%. Conv + 5.124 MB. 24.265%. FC + 0.168064 MB. 0.795877%. SpatialBN + 0 MB. 0%. Add + 0 MB. 0%. Mul + 21.1168 MB in Total +``` +### Optimized +``` +Main run finished. Milliseconds per iter: 46.0838. Iters per second: 21.6996 +Time per operator type: + 29.776 ms. 65.002%. Conv + 12.2803 ms. 26.8084%. Sigmoid + 3.15073 ms. 6.87815%. Mul + 0.328651 ms. 0.717456%. AveragePool + 0.186237 ms. 0.406563%. FC + 0.0832429 ms. 0.181722%. Add + 0.0026184 ms. 0.00571606%. Squeeze + 45.8078 ms in Total +FLOP per operator type: + 0.76907 GFLOP. 98.5601%. Conv + 0.00846444 GFLOP. 1.08476%. Mul + 0.002561 GFLOP. 0.328205%. FC + 0.000210112 GFLOP. 0.0269269%. Add + 0.780305 GFLOP in Total +Feature Memory Read per operator type: + 58.5253 MB. 53.8803%. Mul + 43.2855 MB. 39.8501%. Conv + 5.12912 MB. 4.72204%. FC + 1.6809 MB. 1.54749%. Add + 108.621 MB in Total +Feature Memory Written per operator type: + 33.8578 MB. 54.8834%. Mul + 26.9881 MB. 43.7477%. Conv + 0.840448 MB. 1.36237%. Add + 0.004 MB. 0.00648399%. FC + 61.6904 MB in Total +Parameter Memory per operator type: + 15.8248 MB. 75.5403%. Conv + 5.124 MB. 24.4597%. FC + 0 MB. 0%. Add + 0 MB. 0%. Mul + 20.9488 MB in Total +``` + +## EfficientNet-B1 +### Optimized +``` +Main run finished. Milliseconds per iter: 71.8102. Iters per second: 13.9256 +Time per operator type: + 45.7915 ms. 66.3206%. Conv + 17.8718 ms. 25.8841%. Sigmoid + 4.44132 ms. 6.43244%. Mul + 0.51001 ms. 0.738658%. AveragePool + 0.233283 ms. 0.337868%. Add + 0.194986 ms. 0.282402%. FC + 0.00268255 ms. 0.00388519%. Squeeze + 69.0456 ms in Total +FLOP per operator type: + 1.37105 GFLOP. 98.7673%. Conv + 0.0138759 GFLOP. 0.99959%. Mul + 0.002561 GFLOP. 0.184489%. FC + 0.000674432 GFLOP. 0.0485847%. Add + 1.38816 GFLOP in Total +Feature Memory Read per operator type: + 94.624 MB. 54.0789%. Mul + 69.8255 MB. 39.9062%. Conv + 5.39546 MB. 3.08357%. Add + 5.12912 MB. 2.93136%. FC + 174.974 MB in Total +Feature Memory Written per operator type: + 55.5035 MB. 54.555%. Mul + 43.5333 MB. 42.7894%. Conv + 2.69773 MB. 2.65163%. Add + 0.004 MB. 0.00393165%. FC + 101.739 MB in Total +Parameter Memory per operator type: + 25.7479 MB. 83.4024%. Conv + 5.124 MB. 16.5976%. FC + 0 MB. 0%. Add + 0 MB. 0%. Mul + 30.8719 MB in Total +``` + +## EfficientNet-B2 +### Optimized +``` +Main run finished. Milliseconds per iter: 92.28. Iters per second: 10.8366 +Time per operator type: + 61.4627 ms. 67.5845%. Conv + 22.7458 ms. 25.0113%. Sigmoid + 5.59931 ms. 6.15701%. Mul + 0.642567 ms. 0.706568%. AveragePool + 0.272795 ms. 0.299965%. Add + 0.216178 ms. 0.237709%. FC + 0.00268895 ms. 0.00295677%. Squeeze + 90.942 ms in Total +FLOP per operator type: + 1.98431 GFLOP. 98.9343%. Conv + 0.0177039 GFLOP. 0.882686%. Mul + 0.002817 GFLOP. 0.140451%. FC + 0.000853984 GFLOP. 0.0425782%. Add + 2.00568 GFLOP in Total +Feature Memory Read per operator type: + 120.609 MB. 54.9637%. Mul + 86.3512 MB. 39.3519%. Conv + 6.83187 MB. 3.11341%. Add + 5.64163 MB. 2.571%. FC + 219.433 MB in Total +Feature Memory Written per operator type: + 70.8155 MB. 54.6573%. Mul + 55.3273 MB. 42.7031%. Conv + 3.41594 MB. 2.63651%. Add + 0.004 MB. 0.00308731%. FC + 129.563 MB in Total +Parameter Memory per operator type: + 30.4721 MB. 84.3913%. Conv + 5.636 MB. 15.6087%. FC + 0 MB. 0%. Add + 0 MB. 0%. Mul + 36.1081 MB in Total +``` + +## MixNet-M +### Optimized +``` +Main run finished. Milliseconds per iter: 63.1122. Iters per second: 15.8448 +Time per operator type: + 48.1139 ms. 75.2052%. Conv + 7.1341 ms. 11.1511%. Sigmoid + 2.63706 ms. 4.12189%. SpatialBN + 1.73186 ms. 2.70701%. Mul + 1.38707 ms. 2.16809%. Split + 1.29322 ms. 2.02139%. Concat + 1.00093 ms. 1.56452%. Relu + 0.235309 ms. 0.367803%. Add + 0.221579 ms. 0.346343%. FC + 0.219315 ms. 0.342803%. AveragePool + 0.00250145 ms. 0.00390993%. Squeeze + 63.9768 ms in Total +FLOP per operator type: + 0.675273 GFLOP. 95.5827%. Conv + 0.0221072 GFLOP. 3.12921%. SpatialBN + 0.00538445 GFLOP. 0.762152%. Mul + 0.003073 GFLOP. 0.434973%. FC + 0.000642488 GFLOP. 0.0909421%. Add + 0 GFLOP. 0%. Concat + 0 GFLOP. 0%. Relu + 0.70648 GFLOP in Total +Feature Memory Read per operator type: + 46.8424 MB. 30.502%. Conv + 36.8626 MB. 24.0036%. Mul + 22.3152 MB. 14.5309%. SpatialBN + 22.1074 MB. 14.3955%. Concat + 14.1496 MB. 9.21372%. Relu + 6.15414 MB. 4.00735%. FC + 5.1399 MB. 3.34692%. Add + 153.571 MB in Total +Feature Memory Written per operator type: + 32.7672 MB. 28.4331%. Conv + 22.1072 MB. 19.1831%. Concat + 22.1072 MB. 19.1831%. SpatialBN + 21.5378 MB. 18.689%. Mul + 14.1496 MB. 12.2781%. Relu + 2.56995 MB. 2.23003%. Add + 0.004 MB. 0.00347092%. FC + 115.243 MB in Total +Parameter Memory per operator type: + 13.7059 MB. 68.674%. Conv + 6.148 MB. 30.8049%. FC + 0.104 MB. 0.521097%. SpatialBN + 0 MB. 0%. Add + 0 MB. 0%. Concat + 0 MB. 0%. Mul + 0 MB. 0%. Relu + 19.9579 MB in Total +``` + +## TF MobileNet-V3 Large 1.0 + +### Optimized +``` +Main run finished. Milliseconds per iter: 22.0495. Iters per second: 45.3525 +Time per operator type: + 17.437 ms. 80.0087%. Conv + 1.27662 ms. 5.8577%. Add + 1.12759 ms. 5.17387%. Div + 0.701155 ms. 3.21721%. Mul + 0.562654 ms. 2.58171%. Relu + 0.431144 ms. 1.97828%. Clip + 0.156902 ms. 0.719936%. FC + 0.0996858 ms. 0.457402%. AveragePool + 0.00112455 ms. 0.00515993%. Flatten + 21.7939 ms in Total +FLOP per operator type: + 0.43062 GFLOP. 98.1484%. Conv + 0.002561 GFLOP. 0.583713%. FC + 0.00210867 GFLOP. 0.480616%. Mul + 0.00193868 GFLOP. 0.441871%. Add + 0.00151532 GFLOP. 0.345377%. Div + 0 GFLOP. 0%. Relu + 0.438743 GFLOP in Total +Feature Memory Read per operator type: + 34.7967 MB. 43.9391%. Conv + 14.496 MB. 18.3046%. Mul + 9.44828 MB. 11.9307%. Add + 9.26157 MB. 11.6949%. Relu + 6.0614 MB. 7.65395%. Div + 5.12912 MB. 6.47673%. FC + 79.193 MB in Total +Feature Memory Written per operator type: + 17.6247 MB. 35.8656%. Conv + 9.26157 MB. 18.847%. Relu + 8.43469 MB. 17.1643%. Mul + 7.75472 MB. 15.7806%. Add + 6.06128 MB. 12.3345%. Div + 0.004 MB. 0.00813985%. FC + 49.1409 MB in Total +Parameter Memory per operator type: + 16.6851 MB. 76.5052%. Conv + 5.124 MB. 23.4948%. FC + 0 MB. 0%. Add + 0 MB. 0%. Div + 0 MB. 0%. Mul + 0 MB. 0%. Relu + 21.8091 MB in Total +``` + +## MobileNet-V3 (RW) + +### Unoptimized +``` +Main run finished. Milliseconds per iter: 24.8316. Iters per second: 40.2712 +Time per operator type: + 15.9266 ms. 69.2624%. Conv + 2.36551 ms. 10.2873%. SpatialBN + 1.39102 ms. 6.04936%. Add + 1.30327 ms. 5.66773%. Div + 0.737014 ms. 3.20517%. Mul + 0.639697 ms. 2.78195%. Relu + 0.375681 ms. 1.63378%. Clip + 0.153126 ms. 0.665921%. FC + 0.0993787 ms. 0.432184%. AveragePool + 0.0032632 ms. 0.0141912%. Squeeze + 22.9946 ms in Total +FLOP per operator type: + 0.430616 GFLOP. 94.4041%. Conv + 0.0175992 GFLOP. 3.85829%. SpatialBN + 0.002561 GFLOP. 0.561449%. FC + 0.00210961 GFLOP. 0.46249%. Mul + 0.00173891 GFLOP. 0.381223%. Add + 0.00151626 GFLOP. 0.33241%. Div + 0 GFLOP. 0%. Relu + 0.456141 GFLOP in Total +Feature Memory Read per operator type: + 34.7354 MB. 36.4363%. Conv + 17.7944 MB. 18.6658%. SpatialBN + 14.5035 MB. 15.2137%. Mul + 9.25778 MB. 9.71113%. Relu + 7.84641 MB. 8.23064%. Add + 6.06516 MB. 6.36216%. Div + 5.12912 MB. 5.38029%. FC + 95.3317 MB in Total +Feature Memory Written per operator type: + 17.6246 MB. 26.7264%. Conv + 17.5992 MB. 26.6878%. SpatialBN + 9.25778 MB. 14.0387%. Relu + 8.43843 MB. 12.7962%. Mul + 6.95565 MB. 10.5477%. Add + 6.06502 MB. 9.19713%. Div + 0.004 MB. 0.00606568%. FC + 65.9447 MB in Total +Parameter Memory per operator type: + 16.6778 MB. 76.1564%. Conv + 5.124 MB. 23.3979%. FC + 0.0976 MB. 0.445674%. SpatialBN + 0 MB. 0%. Add + 0 MB. 0%. Div + 0 MB. 0%. Mul + 0 MB. 0%. Relu + 21.8994 MB in Total + +``` +### Optimized + +``` +Main run finished. Milliseconds per iter: 22.0981. Iters per second: 45.2527 +Time per operator type: + 17.146 ms. 78.8965%. Conv + 1.38453 ms. 6.37084%. Add + 1.30991 ms. 6.02749%. Div + 0.685417 ms. 3.15391%. Mul + 0.532589 ms. 2.45068%. Relu + 0.418263 ms. 1.92461%. Clip + 0.15128 ms. 0.696106%. FC + 0.102065 ms. 0.469648%. AveragePool + 0.0022143 ms. 0.010189%. Squeeze + 21.7323 ms in Total +FLOP per operator type: + 0.430616 GFLOP. 98.1927%. Conv + 0.002561 GFLOP. 0.583981%. FC + 0.00210961 GFLOP. 0.481051%. Mul + 0.00173891 GFLOP. 0.396522%. Add + 0.00151626 GFLOP. 0.34575%. Div + 0 GFLOP. 0%. Relu + 0.438542 GFLOP in Total +Feature Memory Read per operator type: + 34.7842 MB. 44.833%. Conv + 14.5035 MB. 18.6934%. Mul + 9.25778 MB. 11.9323%. Relu + 7.84641 MB. 10.1132%. Add + 6.06516 MB. 7.81733%. Div + 5.12912 MB. 6.61087%. FC + 77.5861 MB in Total +Feature Memory Written per operator type: + 17.6246 MB. 36.4556%. Conv + 9.25778 MB. 19.1492%. Relu + 8.43843 MB. 17.4544%. Mul + 6.95565 MB. 14.3874%. Add + 6.06502 MB. 12.5452%. Div + 0.004 MB. 0.00827378%. FC + 48.3455 MB in Total +Parameter Memory per operator type: + 16.6778 MB. 76.4973%. Conv + 5.124 MB. 23.5027%. FC + 0 MB. 0%. Add + 0 MB. 0%. Div + 0 MB. 0%. Mul + 0 MB. 0%. Relu + 21.8018 MB in Total + +``` + +## MnasNet-A1 + +### Unoptimized +``` +Main run finished. Milliseconds per iter: 30.0892. Iters per second: 33.2345 +Time per operator type: + 24.4656 ms. 79.0905%. Conv + 4.14958 ms. 13.4144%. SpatialBN + 1.60598 ms. 5.19169%. Relu + 0.295219 ms. 0.95436%. Mul + 0.187609 ms. 0.606486%. FC + 0.120556 ms. 0.389724%. AveragePool + 0.09036 ms. 0.292109%. Add + 0.015727 ms. 0.050841%. Sigmoid + 0.00306205 ms. 0.00989875%. Squeeze + 30.9337 ms in Total +FLOP per operator type: + 0.620598 GFLOP. 95.6434%. Conv + 0.0248873 GFLOP. 3.8355%. SpatialBN + 0.002561 GFLOP. 0.394688%. FC + 0.000597408 GFLOP. 0.0920695%. Mul + 0.000222656 GFLOP. 0.0343146%. Add + 0 GFLOP. 0%. Relu + 0.648867 GFLOP in Total +Feature Memory Read per operator type: + 35.5457 MB. 38.4109%. Conv + 25.1552 MB. 27.1829%. SpatialBN + 22.5235 MB. 24.339%. Relu + 5.12912 MB. 5.54256%. FC + 2.40586 MB. 2.59978%. Mul + 1.78125 MB. 1.92483%. Add + 92.5406 MB in Total +Feature Memory Written per operator type: + 24.9042 MB. 32.9424%. Conv + 24.8873 MB. 32.92%. SpatialBN + 22.5235 MB. 29.7932%. Relu + 2.38963 MB. 3.16092%. Mul + 0.890624 MB. 1.17809%. Add + 0.004 MB. 0.00529106%. FC + 75.5993 MB in Total +Parameter Memory per operator type: + 10.2732 MB. 66.1459%. Conv + 5.124 MB. 32.9917%. FC + 0.133952 MB. 0.86247%. SpatialBN + 0 MB. 0%. Add + 0 MB. 0%. Mul + 0 MB. 0%. Relu + 15.5312 MB in Total +``` + +### Optimized +``` +Main run finished. Milliseconds per iter: 24.2367. Iters per second: 41.2597 +Time per operator type: + 22.0547 ms. 91.1375%. Conv + 1.49096 ms. 6.16116%. Relu + 0.253417 ms. 1.0472%. Mul + 0.18506 ms. 0.76473%. FC + 0.112942 ms. 0.466717%. AveragePool + 0.086769 ms. 0.358559%. Add + 0.0127889 ms. 0.0528479%. Sigmoid + 0.0027346 ms. 0.0113003%. Squeeze + 24.1994 ms in Total +FLOP per operator type: + 0.620598 GFLOP. 99.4581%. Conv + 0.002561 GFLOP. 0.41043%. FC + 0.000597408 GFLOP. 0.0957417%. Mul + 0.000222656 GFLOP. 0.0356832%. Add + 0 GFLOP. 0%. Relu + 0.623979 GFLOP in Total +Feature Memory Read per operator type: + 35.6127 MB. 52.7968%. Conv + 22.5235 MB. 33.3917%. Relu + 5.12912 MB. 7.60406%. FC + 2.40586 MB. 3.56675%. Mul + 1.78125 MB. 2.64075%. Add + 67.4524 MB in Total +Feature Memory Written per operator type: + 24.9042 MB. 49.1092%. Conv + 22.5235 MB. 44.4145%. Relu + 2.38963 MB. 4.71216%. Mul + 0.890624 MB. 1.75624%. Add + 0.004 MB. 0.00788768%. FC + 50.712 MB in Total +Parameter Memory per operator type: + 10.2732 MB. 66.7213%. Conv + 5.124 MB. 33.2787%. FC + 0 MB. 0%. Add + 0 MB. 0%. Mul + 0 MB. 0%. Relu + 15.3972 MB in Total +``` +## MnasNet-B1 + +### Unoptimized +``` +Main run finished. Milliseconds per iter: 28.3109. Iters per second: 35.322 +Time per operator type: + 29.1121 ms. 83.3081%. Conv + 4.14959 ms. 11.8746%. SpatialBN + 1.35823 ms. 3.88675%. Relu + 0.186188 ms. 0.532802%. FC + 0.116244 ms. 0.332647%. Add + 0.018641 ms. 0.0533437%. AveragePool + 0.0040904 ms. 0.0117052%. Squeeze + 34.9451 ms in Total +FLOP per operator type: + 0.626272 GFLOP. 96.2088%. Conv + 0.0218266 GFLOP. 3.35303%. SpatialBN + 0.002561 GFLOP. 0.393424%. FC + 0.000291648 GFLOP. 0.0448034%. Add + 0 GFLOP. 0%. Relu + 0.650951 GFLOP in Total +Feature Memory Read per operator type: + 34.4354 MB. 41.3788%. Conv + 22.1299 MB. 26.5921%. SpatialBN + 19.1923 MB. 23.0622%. Relu + 5.12912 MB. 6.16333%. FC + 2.33318 MB. 2.80364%. Add + 83.2199 MB in Total +Feature Memory Written per operator type: + 21.8266 MB. 34.0955%. Conv + 21.8266 MB. 34.0955%. SpatialBN + 19.1923 MB. 29.9805%. Relu + 1.16659 MB. 1.82234%. Add + 0.004 MB. 0.00624844%. FC + 64.016 MB in Total +Parameter Memory per operator type: + 12.2576 MB. 69.9104%. Conv + 5.124 MB. 29.2245%. FC + 0.15168 MB. 0.865099%. SpatialBN + 0 MB. 0%. Add + 0 MB. 0%. Relu + 17.5332 MB in Total +``` + +### Optimized +``` +Main run finished. Milliseconds per iter: 26.6364. Iters per second: 37.5426 +Time per operator type: + 24.9888 ms. 94.0962%. Conv + 1.26147 ms. 4.75011%. Relu + 0.176234 ms. 0.663619%. FC + 0.113309 ms. 0.426672%. Add + 0.0138708 ms. 0.0522311%. AveragePool + 0.00295685 ms. 0.0111341%. Squeeze + 26.5566 ms in Total +FLOP per operator type: + 0.626272 GFLOP. 99.5466%. Conv + 0.002561 GFLOP. 0.407074%. FC + 0.000291648 GFLOP. 0.0463578%. Add + 0 GFLOP. 0%. Relu + 0.629124 GFLOP in Total +Feature Memory Read per operator type: + 34.5112 MB. 56.4224%. Conv + 19.1923 MB. 31.3775%. Relu + 5.12912 MB. 8.3856%. FC + 2.33318 MB. 3.81452%. Add + 61.1658 MB in Total +Feature Memory Written per operator type: + 21.8266 MB. 51.7346%. Conv + 19.1923 MB. 45.4908%. Relu + 1.16659 MB. 2.76513%. Add + 0.004 MB. 0.00948104%. FC + 42.1895 MB in Total +Parameter Memory per operator type: + 12.2576 MB. 70.5205%. Conv + 5.124 MB. 29.4795%. FC + 0 MB. 0%. Add + 0 MB. 0%. Relu + 17.3816 MB in Total +``` diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..80e7d15508202f3262a50db27f5198460d7f509f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Ross Wightman + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/README.md b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..463368280d6a5015060eb73d20fe6512f8e04c50 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/README.md @@ -0,0 +1,323 @@ +# (Generic) EfficientNets for PyTorch + +A 'generic' implementation of EfficientNet, MixNet, MobileNetV3, etc. that covers most of the compute/parameter efficient architectures derived from the MobileNet V1/V2 block sequence, including those found via automated neural architecture search. + +All models are implemented by GenEfficientNet or MobileNetV3 classes, with string based architecture definitions to configure the block layouts (idea from [here](https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py)) + +## What's New + +### Aug 19, 2020 +* Add updated PyTorch trained EfficientNet-B3 weights trained by myself with `timm` (82.1 top-1) +* Add PyTorch trained EfficientNet-Lite0 contributed by [@hal-314](https://github.com/hal-314) (75.5 top-1) +* Update ONNX and Caffe2 export / utility scripts to work with latest PyTorch / ONNX +* ONNX runtime based validation script added +* activations (mostly) brought in sync with `timm` equivalents + + +### April 5, 2020 +* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite + * 3.5M param MobileNet-V2 100 @ 73% + * 4.5M param MobileNet-V2 110d @ 75% + * 6.1M param MobileNet-V2 140 @ 76.5% + * 5.8M param MobileNet-V2 120d @ 77.3% + +### March 23, 2020 + * Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) + * Add PyTorch trained MobileNet-V3 Large weights with 75.77% top-1 + * IMPORTANT CHANGE (if training from scratch) - weight init changed to better match Tensorflow impl, set `fix_group_fanout=False` in `initialize_weight_goog` for old behavior + +### Feb 12, 2020 + * Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet) + * Port new EfficientNet-B8 (RandAugment) weights from TF TPU, these are different than the B8 AdvProp, different input normalization. + * Add RandAugment PyTorch trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) + +### Jan 22, 2020 + * Update weights for EfficientNet B0, B2, B3 and MixNet-XL with latest RandAugment trained weights. Trained with (https://github.com/rwightman/pytorch-image-models) + * Fix torchscript compatibility for PyTorch 1.4, add torchscript support for MixedConv2d using ModuleDict + * Test models, torchscript, onnx export with PyTorch 1.4 -- no issues + +### Nov 22, 2019 + * New top-1 high! Ported official TF EfficientNet AdvProp (https://arxiv.org/abs/1911.09665) weights and B8 model spec. Created a new set of `ap` models since they use a different + preprocessing (Inception mean/std) from the original EfficientNet base/AA/RA weights. + +### Nov 15, 2019 + * Ported official TF MobileNet-V3 float32 large/small/minimalistic weights + * Modifications to MobileNet-V3 model and components to support some additional config needed for differences between TF MobileNet-V3 and mine + +### Oct 30, 2019 + * Many of the models will now work with torch.jit.script, MixNet being the biggest exception + * Improved interface for enabling torchscript or ONNX export compatible modes (via config) + * Add JIT optimized mem-efficient Swish/Mish autograd.fn in addition to memory-efficient autgrad.fn + * Activation factory to select best version of activation by name or override one globally + * Add pretrained checkpoint load helper that handles input conv and classifier changes + +### Oct 27, 2019 + * Add CondConv EfficientNet variants ported from https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + * Add RandAug weights for TF EfficientNet B5 and B7 from https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + * Bring over MixNet-XL model and depth scaling algo from my pytorch-image-models code base + * Switch activations and global pooling to modules + * Add memory-efficient Swish/Mish impl + * Add as_sequential() method to all models and allow as an argument in entrypoint fns + * Move MobileNetV3 into own file since it has a different head + * Remove ChamNet, MobileNet V2/V1 since they will likely never be used here + +## Models + +Implemented models include: + * EfficientNet NoisyStudent (B0-B7, L2) (https://arxiv.org/abs/1911.04252) + * EfficientNet AdvProp (B0-B8) (https://arxiv.org/abs/1911.09665) + * EfficientNet (B0-B8) (https://arxiv.org/abs/1905.11946) + * EfficientNet-EdgeTPU (S, M, L) (https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html) + * EfficientNet-CondConv (https://arxiv.org/abs/1904.04971) + * EfficientNet-Lite (https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) + * MixNet (https://arxiv.org/abs/1907.09595) + * MNASNet B1, A1 (Squeeze-Excite), and Small (https://arxiv.org/abs/1807.11626) + * MobileNet-V3 (https://arxiv.org/abs/1905.02244) + * FBNet-C (https://arxiv.org/abs/1812.03443) + * Single-Path NAS (https://arxiv.org/abs/1904.02877) + +I originally implemented and trained some these models with code [here](https://github.com/rwightman/pytorch-image-models), this repository contains just the GenEfficientNet models, validation, and associated ONNX/Caffe2 export code. + +## Pretrained + +I've managed to train several of the models to accuracies close to or above the originating papers and official impl. My training code is here: https://github.com/rwightman/pytorch-image-models + + +|Model | Prec@1 (Err) | Prec@5 (Err) | Param#(M) | MAdds(M) | Image Scaling | Resolution | Crop | +|---|---|---|---|---|---|---|---| +| efficientnet_b3 | 82.240 (17.760) | 96.116 (3.884) | 12.23 | TBD | bicubic | 320 | 1.0 | +| efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | TBD | bicubic | 300 | 0.904 | +| mixnet_xl | 81.074 (18.926) | 95.282 (4.718) | 11.90 | TBD | bicubic | 256 | 1.0 | +| efficientnet_b2 | 80.612 (19.388) | 95.318 (4.682) | 9.1 | TBD | bicubic | 288 | 1.0 | +| mixnet_xl | 80.476 (19.524) | 94.936 (5.064) | 11.90 | TBD | bicubic | 224 | 0.875 | +| efficientnet_b2 | 80.288 (19.712) | 95.166 (4.834) | 9.1 | 1003 | bicubic | 260 | 0.890 | +| mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | TBD | bicubic | 224 | 0.875 | +| efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.8 | 694 | bicubic | 240 | 0.882 | +| efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | TBD | bicubic | 224 | 0.875 | +| efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.3 | 390 | bicubic | 224 | 0.875 | +| mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | TBD | bicubic | 224 | 0.875 | +| mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | 353 | bicubic | 224 | 0.875 | +| mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | TBD | bicubic | 224 | 0.875 | +| mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | TBD | bicubic | 224 | 0.875 | +| mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | TBD | bicubic | 224 | 0.875 | +| mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | 219 | bicubic | 224 | 0.875 | +| efficientnet_lite0 | 75.472 (24.528) | 92.520 (7.480) | 4.65 | TBD | bicubic | 224 | 0.875 | +| mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.9 | 312 | bicubic | 224 | 0.875 | +| fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | 385 | bilinear | 224 | 0.875 | +| mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | TBD | bicubic | 224 | 0.875 | +| mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.4 | 315 | bicubic | 224 | 0.875 | +| spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.4 | TBD | bilinear | 224 | 0.875 | +| mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | TBD | bicubic | 224 | 0.875 | + + +More pretrained models to come... + + +## Ported Weights + +The weights ported from Tensorflow checkpoints for the EfficientNet models do pretty much match accuracy in Tensorflow once a SAME convolution padding equivalent is added, and the same crop factors, image scaling, etc (see table) are used via cmd line args. + +**IMPORTANT:** +* Tensorflow ported weights for EfficientNet AdvProp (AP), EfficientNet EdgeTPU, EfficientNet-CondConv, EfficientNet-Lite, and MobileNet-V3 models use Inception style (0.5, 0.5, 0.5) for mean and std. +* Enabling the Tensorflow preprocessing pipeline with `--tf-preprocessing` at validation time will improve scores by 0.1-0.5%, very close to original TF impl. + +To run validation for tf_efficientnet_b5: +`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b5 -b 64 --img-size 456 --crop-pct 0.934 --interpolation bicubic` + +To run validation w/ TF preprocessing for tf_efficientnet_b5: +`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b5 -b 64 --img-size 456 --tf-preprocessing` + +To run validation for a model with Inception preprocessing, ie EfficientNet-B8 AdvProp: +`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b8_ap -b 48 --num-gpu 2 --img-size 672 --crop-pct 0.954 --mean 0.5 --std 0.5` + +|Model | Prec@1 (Err) | Prec@5 (Err) | Param # | Image Scaling | Image Size | Crop | +|---|---|---|---|---|---|---| +| tf_efficientnet_l2_ns *tfp | 88.352 (11.648) | 98.652 (1.348) | 480 | bicubic | 800 | N/A | +| tf_efficientnet_l2_ns | TBD | TBD | 480 | bicubic | 800 | 0.961 | +| tf_efficientnet_l2_ns_475 | 88.234 (11.766) | 98.546 (1.454) | 480 | bicubic | 475 | 0.936 | +| tf_efficientnet_l2_ns_475 *tfp | 88.172 (11.828) | 98.566 (1.434) | 480 | bicubic | 475 | N/A | +| tf_efficientnet_b7_ns *tfp | 86.844 (13.156) | 98.084 (1.916) | 66.35 | bicubic | 600 | N/A | +| tf_efficientnet_b7_ns | 86.840 (13.160) | 98.094 (1.906) | 66.35 | bicubic | 600 | N/A | +| tf_efficientnet_b6_ns | 86.452 (13.548) | 97.882 (2.118) | 43.04 | bicubic | 528 | N/A | +| tf_efficientnet_b6_ns *tfp | 86.444 (13.556) | 97.880 (2.120) | 43.04 | bicubic | 528 | N/A | +| tf_efficientnet_b5_ns *tfp | 86.064 (13.936) | 97.746 (2.254) | 30.39 | bicubic | 456 | N/A | +| tf_efficientnet_b5_ns | 86.088 (13.912) | 97.752 (2.248) | 30.39 | bicubic | 456 | N/A | +| tf_efficientnet_b8_ap *tfp | 85.436 (14.564) | 97.272 (2.728) | 87.4 | bicubic | 672 | N/A | +| tf_efficientnet_b8 *tfp | 85.384 (14.616) | 97.394 (2.606) | 87.4 | bicubic | 672 | N/A | +| tf_efficientnet_b8 | 85.370 (14.630) | 97.390 (2.610) | 87.4 | bicubic | 672 | 0.954 | +| tf_efficientnet_b8_ap | 85.368 (14.632) | 97.294 (2.706) | 87.4 | bicubic | 672 | 0.954 | +| tf_efficientnet_b4_ns *tfp | 85.298 (14.702) | 97.504 (2.496) | 19.34 | bicubic | 380 | N/A | +| tf_efficientnet_b4_ns | 85.162 (14.838) | 97.470 (2.530) | 19.34 | bicubic | 380 | 0.922 | +| tf_efficientnet_b7_ap *tfp | 85.154 (14.846) | 97.244 (2.756) | 66.35 | bicubic | 600 | N/A | +| tf_efficientnet_b7_ap | 85.118 (14.882) | 97.252 (2.748) | 66.35 | bicubic | 600 | 0.949 | +| tf_efficientnet_b7 *tfp | 84.940 (15.060) | 97.214 (2.786) | 66.35 | bicubic | 600 | N/A | +| tf_efficientnet_b7 | 84.932 (15.068) | 97.208 (2.792) | 66.35 | bicubic | 600 | 0.949 | +| tf_efficientnet_b6_ap | 84.786 (15.214) | 97.138 (2.862) | 43.04 | bicubic | 528 | 0.942 | +| tf_efficientnet_b6_ap *tfp | 84.760 (15.240) | 97.124 (2.876) | 43.04 | bicubic | 528 | N/A | +| tf_efficientnet_b5_ap *tfp | 84.276 (15.724) | 96.932 (3.068) | 30.39 | bicubic | 456 | N/A | +| tf_efficientnet_b5_ap | 84.254 (15.746) | 96.976 (3.024) | 30.39 | bicubic | 456 | 0.934 | +| tf_efficientnet_b6 *tfp | 84.140 (15.860) | 96.852 (3.148) | 43.04 | bicubic | 528 | N/A | +| tf_efficientnet_b6 | 84.110 (15.890) | 96.886 (3.114) | 43.04 | bicubic | 528 | 0.942 | +| tf_efficientnet_b3_ns *tfp | 84.054 (15.946) | 96.918 (3.082) | 12.23 | bicubic | 300 | N/A | +| tf_efficientnet_b3_ns | 84.048 (15.952) | 96.910 (3.090) | 12.23 | bicubic | 300 | .904 | +| tf_efficientnet_b5 *tfp | 83.822 (16.178) | 96.756 (3.244) | 30.39 | bicubic | 456 | N/A | +| tf_efficientnet_b5 | 83.812 (16.188) | 96.748 (3.252) | 30.39 | bicubic | 456 | 0.934 | +| tf_efficientnet_b4_ap *tfp | 83.278 (16.722) | 96.376 (3.624) | 19.34 | bicubic | 380 | N/A | +| tf_efficientnet_b4_ap | 83.248 (16.752) | 96.388 (3.612) | 19.34 | bicubic | 380 | 0.922 | +| tf_efficientnet_b4 | 83.022 (16.978) | 96.300 (3.700) | 19.34 | bicubic | 380 | 0.922 | +| tf_efficientnet_b4 *tfp | 82.948 (17.052) | 96.308 (3.692) | 19.34 | bicubic | 380 | N/A | +| tf_efficientnet_b2_ns *tfp | 82.436 (17.564) | 96.268 (3.732) | 9.11 | bicubic | 260 | N/A | +| tf_efficientnet_b2_ns | 82.380 (17.620) | 96.248 (3.752) | 9.11 | bicubic | 260 | 0.89 | +| tf_efficientnet_b3_ap *tfp | 81.882 (18.118) | 95.662 (4.338) | 12.23 | bicubic | 300 | N/A | +| tf_efficientnet_b3_ap | 81.828 (18.172) | 95.624 (4.376) | 12.23 | bicubic | 300 | 0.904 | +| tf_efficientnet_b3 | 81.636 (18.364) | 95.718 (4.282) | 12.23 | bicubic | 300 | 0.904 | +| tf_efficientnet_b3 *tfp | 81.576 (18.424) | 95.662 (4.338) | 12.23 | bicubic | 300 | N/A | +| tf_efficientnet_lite4 | 81.528 (18.472) | 95.668 (4.332) | 13.00 | bilinear | 380 | 0.92 | +| tf_efficientnet_b1_ns *tfp | 81.514 (18.486) | 95.776 (4.224) | 7.79 | bicubic | 240 | N/A | +| tf_efficientnet_lite4 *tfp | 81.502 (18.498) | 95.676 (4.324) | 13.00 | bilinear | 380 | N/A | +| tf_efficientnet_b1_ns | 81.388 (18.612) | 95.738 (4.262) | 7.79 | bicubic | 240 | 0.88 | +| tf_efficientnet_el | 80.534 (19.466) | 95.190 (4.810) | 10.59 | bicubic | 300 | 0.904 | +| tf_efficientnet_el *tfp | 80.476 (19.524) | 95.200 (4.800) | 10.59 | bicubic | 300 | N/A | +| tf_efficientnet_b2_ap *tfp | 80.420 (19.580) | 95.040 (4.960) | 9.11 | bicubic | 260 | N/A | +| tf_efficientnet_b2_ap | 80.306 (19.694) | 95.028 (4.972) | 9.11 | bicubic | 260 | 0.890 | +| tf_efficientnet_b2 *tfp | 80.188 (19.812) | 94.974 (5.026) | 9.11 | bicubic | 260 | N/A | +| tf_efficientnet_b2 | 80.086 (19.914) | 94.908 (5.092) | 9.11 | bicubic | 260 | 0.890 | +| tf_efficientnet_lite3 | 79.812 (20.188) | 94.914 (5.086) | 8.20 | bilinear | 300 | 0.904 | +| tf_efficientnet_lite3 *tfp | 79.734 (20.266) | 94.838 (5.162) | 8.20 | bilinear | 300 | N/A | +| tf_efficientnet_b1_ap *tfp | 79.532 (20.468) | 94.378 (5.622) | 7.79 | bicubic | 240 | N/A | +| tf_efficientnet_cc_b1_8e *tfp | 79.464 (20.536)| 94.492 (5.508) | 39.7 | bicubic | 240 | 0.88 | +| tf_efficientnet_cc_b1_8e | 79.298 (20.702) | 94.364 (5.636) | 39.7 | bicubic | 240 | 0.88 | +| tf_efficientnet_b1_ap | 79.278 (20.722) | 94.308 (5.692) | 7.79 | bicubic | 240 | 0.88 | +| tf_efficientnet_b1 *tfp | 79.172 (20.828) | 94.450 (5.550) | 7.79 | bicubic | 240 | N/A | +| tf_efficientnet_em *tfp | 78.958 (21.042) | 94.458 (5.542) | 6.90 | bicubic | 240 | N/A | +| tf_efficientnet_b0_ns *tfp | 78.806 (21.194) | 94.496 (5.504) | 5.29 | bicubic | 224 | N/A | +| tf_mixnet_l *tfp | 78.846 (21.154) | 94.212 (5.788) | 7.33 | bilinear | 224 | N/A | +| tf_efficientnet_b1 | 78.826 (21.174) | 94.198 (5.802) | 7.79 | bicubic | 240 | 0.88 | +| tf_mixnet_l | 78.770 (21.230) | 94.004 (5.996) | 7.33 | bicubic | 224 | 0.875 | +| tf_efficientnet_em | 78.742 (21.258) | 94.332 (5.668) | 6.90 | bicubic | 240 | 0.875 | +| tf_efficientnet_b0_ns | 78.658 (21.342) | 94.376 (5.624) | 5.29 | bicubic | 224 | 0.875 | +| tf_efficientnet_cc_b0_8e *tfp | 78.314 (21.686) | 93.790 (6.210) | 24.0 | bicubic | 224 | 0.875 | +| tf_efficientnet_cc_b0_8e | 77.908 (22.092) | 93.656 (6.344) | 24.0 | bicubic | 224 | 0.875 | +| tf_efficientnet_cc_b0_4e *tfp | 77.746 (22.254) | 93.552 (6.448) | 13.3 | bicubic | 224 | 0.875 | +| tf_efficientnet_cc_b0_4e | 77.304 (22.696) | 93.332 (6.668) | 13.3 | bicubic | 224 | 0.875 | +| tf_efficientnet_es *tfp | 77.616 (22.384) | 93.750 (6.250) | 5.44 | bicubic | 224 | N/A | +| tf_efficientnet_lite2 *tfp | 77.544 (22.456) | 93.800 (6.200) | 6.09 | bilinear | 260 | N/A | +| tf_efficientnet_lite2 | 77.460 (22.540) | 93.746 (6.254) | 6.09 | bicubic | 260 | 0.89 | +| tf_efficientnet_b0_ap *tfp | 77.514 (22.486) | 93.576 (6.424) | 5.29 | bicubic | 224 | N/A | +| tf_efficientnet_es | 77.264 (22.736) | 93.600 (6.400) | 5.44 | bicubic | 224 | N/A | +| tf_efficientnet_b0 *tfp | 77.258 (22.742) | 93.478 (6.522) | 5.29 | bicubic | 224 | N/A | +| tf_efficientnet_b0_ap | 77.084 (22.916) | 93.254 (6.746) | 5.29 | bicubic | 224 | 0.875 | +| tf_mixnet_m *tfp | 77.072 (22.928) | 93.368 (6.632) | 5.01 | bilinear | 224 | N/A | +| tf_mixnet_m | 76.950 (23.050) | 93.156 (6.844) | 5.01 | bicubic | 224 | 0.875 | +| tf_efficientnet_b0 | 76.848 (23.152) | 93.228 (6.772) | 5.29 | bicubic | 224 | 0.875 | +| tf_efficientnet_lite1 *tfp | 76.764 (23.236) | 93.326 (6.674) | 5.42 | bilinear | 240 | N/A | +| tf_efficientnet_lite1 | 76.638 (23.362) | 93.232 (6.768) | 5.42 | bicubic | 240 | 0.882 | +| tf_mixnet_s *tfp | 75.800 (24.200) | 92.788 (7.212) | 4.13 | bilinear | 224 | N/A | +| tf_mobilenetv3_large_100 *tfp | 75.768 (24.232) | 92.710 (7.290) | 5.48 | bilinear | 224 | N/A | +| tf_mixnet_s | 75.648 (24.352) | 92.636 (7.364) | 4.13 | bicubic | 224 | 0.875 | +| tf_mobilenetv3_large_100 | 75.516 (24.484) | 92.600 (7.400) | 5.48 | bilinear | 224 | 0.875 | +| tf_efficientnet_lite0 *tfp | 75.074 (24.926) | 92.314 (7.686) | 4.65 | bilinear | 224 | N/A | +| tf_efficientnet_lite0 | 74.842 (25.158) | 92.170 (7.830) | 4.65 | bicubic | 224 | 0.875 | +| tf_mobilenetv3_large_075 *tfp | 73.730 (26.270) | 91.616 (8.384) | 3.99 | bilinear | 224 |N/A | +| tf_mobilenetv3_large_075 | 73.442 (26.558) | 91.352 (8.648) | 3.99 | bilinear | 224 | 0.875 | +| tf_mobilenetv3_large_minimal_100 *tfp | 72.678 (27.322) | 90.860 (9.140) | 3.92 | bilinear | 224 | N/A | +| tf_mobilenetv3_large_minimal_100 | 72.244 (27.756) | 90.636 (9.364) | 3.92 | bilinear | 224 | 0.875 | +| tf_mobilenetv3_small_100 *tfp | 67.918 (32.082) | 87.958 (12.042 | 2.54 | bilinear | 224 | N/A | +| tf_mobilenetv3_small_100 | 67.918 (32.082) | 87.662 (12.338) | 2.54 | bilinear | 224 | 0.875 | +| tf_mobilenetv3_small_075 *tfp | 66.142 (33.858) | 86.498 (13.502) | 2.04 | bilinear | 224 | N/A | +| tf_mobilenetv3_small_075 | 65.718 (34.282) | 86.136 (13.864) | 2.04 | bilinear | 224 | 0.875 | +| tf_mobilenetv3_small_minimal_100 *tfp | 63.378 (36.622) | 84.802 (15.198) | 2.04 | bilinear | 224 | N/A | +| tf_mobilenetv3_small_minimal_100 | 62.898 (37.102) | 84.230 (15.770) | 2.04 | bilinear | 224 | 0.875 | + + +*tfp models validated with `tf-preprocessing` pipeline + +Google tf and tflite weights ported from official Tensorflow repositories +* https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet +* https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet +* https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet + +## Usage + +### Environment + +All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically Python 3.6.x, 3.7.x, 3.8.x. + +Users have reported that a Python 3 Anaconda install in Windows works. I have not verified this myself. + +PyTorch versions 1.4, 1.5, 1.6 have been tested with this code. + +I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda: +``` +conda create -n torch-env +conda activate torch-env +conda install -c pytorch pytorch torchvision cudatoolkit=10.2 +``` + +### PyTorch Hub + +Models can be accessed via the PyTorch Hub API + +``` +>>> torch.hub.list('rwightman/gen-efficientnet-pytorch') +['efficientnet_b0', ...] +>>> model = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'efficientnet_b0', pretrained=True) +>>> model.eval() +>>> output = model(torch.randn(1,3,224,224)) +``` + +### Pip +This package can be installed via pip. + +Install (after conda env/install): +``` +pip install geffnet +``` + +Eval use: +``` +>>> import geffnet +>>> m = geffnet.create_model('mobilenetv3_large_100', pretrained=True) +>>> m.eval() +``` + +Train use: +``` +>>> import geffnet +>>> # models can also be created by using the entrypoint directly +>>> m = geffnet.efficientnet_b2(pretrained=True, drop_rate=0.25, drop_connect_rate=0.2) +>>> m.train() +``` + +Create in a nn.Sequential container, for fast.ai, etc: +``` +>>> import geffnet +>>> m = geffnet.mixnet_l(pretrained=True, drop_rate=0.25, drop_connect_rate=0.2, as_sequential=True) +``` + +### Exporting + +Scripts are included to +* export models to ONNX (`onnx_export.py`) +* optimized ONNX graph (`onnx_optimize.py` or `onnx_validate.py` w/ `--onnx-output-opt` arg) +* validate with ONNX runtime (`onnx_validate.py`) +* convert ONNX model to Caffe2 (`onnx_to_caffe.py`) +* validate in Caffe2 (`caffe2_validate.py`) +* benchmark in Caffe2 w/ FLOPs, parameters output (`caffe2_benchmark.py`) + +As an example, to export the MobileNet-V3 pretrained model and then run an Imagenet validation: +``` +python onnx_export.py --model mobilenetv3_large_100 ./mobilenetv3_100.onnx +python onnx_validate.py /imagenet/validation/ --onnx-input ./mobilenetv3_100.onnx +``` + +These scripts were tested to be working as of PyTorch 1.6 and ONNX 1.7 w/ ONNX runtime 1.4. Caffe2 compatible +export now requires additional args mentioned in the export script (not needed in earlier versions). + +#### Export Notes +1. The TF ported weights with the 'SAME' conv padding activated cannot be exported to ONNX unless `_EXPORTABLE` flag in `config.py` is set to True. Use `config.set_exportable(True)` as in the `onnx_export.py` script. +2. TF ported models with 'SAME' padding will have the padding fixed at export time to the resolution used for export. Even though dynamic padding is supported in opset >= 11, I can't get it working. +3. ONNX optimize facility doesn't work reliably in PyTorch 1.6 / ONNX 1.7. Fortunately, the onnxruntime based inference is working very well now and includes on the fly optimization. +3. ONNX / Caffe2 export/import frequently breaks with different PyTorch and ONNX version releases. Please check their respective issue trackers before filing issues here. + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_benchmark.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..93f28a1e63d9f7287ca02997c7991fe66dd0aeb9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_benchmark.py @@ -0,0 +1,65 @@ +""" Caffe2 validation script + +This script runs Caffe2 benchmark on exported ONNX model. +It is a useful tool for reporting model FLOPS. + +Copyright 2020 Ross Wightman +""" +import argparse +from caffe2.python import core, workspace, model_helper +from caffe2.proto import caffe2_pb2 + + +parser = argparse.ArgumentParser(description='Caffe2 Model Benchmark') +parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME', + help='caffe2 model pb name prefix') +parser.add_argument('--c2-init', default='', type=str, metavar='PATH', + help='caffe2 model init .pb') +parser.add_argument('--c2-predict', default='', type=str, metavar='PATH', + help='caffe2 model predict .pb') +parser.add_argument('-b', '--batch-size', default=1, type=int, + metavar='N', help='mini-batch size (default: 1)') +parser.add_argument('--img-size', default=224, type=int, + metavar='N', help='Input image dimension, uses model default if empty') + + +def main(): + args = parser.parse_args() + args.gpu_id = 0 + if args.c2_prefix: + args.c2_init = args.c2_prefix + '.init.pb' + args.c2_predict = args.c2_prefix + '.predict.pb' + + model = model_helper.ModelHelper(name="le_net", init_params=False) + + # Bring in the init net from init_net.pb + init_net_proto = caffe2_pb2.NetDef() + with open(args.c2_init, "rb") as f: + init_net_proto.ParseFromString(f.read()) + model.param_init_net = core.Net(init_net_proto) + + # bring in the predict net from predict_net.pb + predict_net_proto = caffe2_pb2.NetDef() + with open(args.c2_predict, "rb") as f: + predict_net_proto.ParseFromString(f.read()) + model.net = core.Net(predict_net_proto) + + # CUDA performance not impressive + #device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id) + #model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True) + #model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True) + + input_blob = model.net.external_inputs[0] + model.param_init_net.GaussianFill( + [], + input_blob.GetUnscopedName(), + shape=(args.batch_size, 3, args.img_size, args.img_size), + mean=0.0, + std=1.0) + workspace.RunNetOnce(model.param_init_net) + workspace.CreateNet(model.net, overwrite=True) + workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True) + + +if __name__ == '__main__': + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_validate.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_validate.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfaab38c095663fe32e4addbdf06b57bcb53614 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_validate.py @@ -0,0 +1,138 @@ +""" Caffe2 validation script + +This script is created to verify exported ONNX models running in Caffe2 +It utilizes the same PyTorch dataloader/processing pipeline for a +fair comparison against the originals. + +Copyright 2020 Ross Wightman +""" +import argparse +import numpy as np +from caffe2.python import core, workspace, model_helper +from caffe2.proto import caffe2_pb2 +from data import create_loader, resolve_data_config, Dataset +from utils import AverageMeter +import time + +parser = argparse.ArgumentParser(description='Caffe2 ImageNet Validation') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME', + help='caffe2 model pb name prefix') +parser.add_argument('--c2-init', default='', type=str, metavar='PATH', + help='caffe2 model init .pb') +parser.add_argument('--c2-predict', default='', type=str, metavar='PATH', + help='caffe2 model predict .pb') +parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', + help='Override default crop pct of 0.875') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true', + help='use tensorflow mnasnet preporcessing') +parser.add_argument('--print-freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') + + +def main(): + args = parser.parse_args() + args.gpu_id = 0 + if args.c2_prefix: + args.c2_init = args.c2_prefix + '.init.pb' + args.c2_predict = args.c2_prefix + '.predict.pb' + + model = model_helper.ModelHelper(name="validation_net", init_params=False) + + # Bring in the init net from init_net.pb + init_net_proto = caffe2_pb2.NetDef() + with open(args.c2_init, "rb") as f: + init_net_proto.ParseFromString(f.read()) + model.param_init_net = core.Net(init_net_proto) + + # bring in the predict net from predict_net.pb + predict_net_proto = caffe2_pb2.NetDef() + with open(args.c2_predict, "rb") as f: + predict_net_proto.ParseFromString(f.read()) + model.net = core.Net(predict_net_proto) + + data_config = resolve_data_config(None, args) + loader = create_loader( + Dataset(args.data, load_bytes=args.tf_preprocessing), + input_size=data_config['input_size'], + batch_size=args.batch_size, + use_prefetcher=False, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + crop_pct=data_config['crop_pct'], + tensorflow_preprocessing=args.tf_preprocessing) + + # this is so obvious, wonderful interface + input_blob = model.net.external_inputs[0] + output_blob = model.net.external_outputs[0] + + if True: + device_opts = None + else: + # CUDA is crashing, no idea why, awesome error message, give it a try for kicks + device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id) + model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True) + model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True) + + model.param_init_net.GaussianFill( + [], input_blob.GetUnscopedName(), + shape=(1,) + data_config['input_size'], mean=0.0, std=1.0) + workspace.RunNetOnce(model.param_init_net) + workspace.CreateNet(model.net, overwrite=True) + + batch_time = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + end = time.time() + for i, (input, target) in enumerate(loader): + # run the net and return prediction + caffe2_in = input.data.numpy() + workspace.FeedBlob(input_blob, caffe2_in, device_opts) + workspace.RunNet(model.net, num_iter=1) + output = workspace.FetchBlob(output_blob) + + # measure accuracy and record loss + prec1, prec5 = accuracy_np(output.data, target.numpy()) + top1.update(prec1.item(), input.size(0)) + top5.update(prec5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + print('Test: [{0}/{1}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s, {ms_avg:.3f} ms/sample) \t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, + ms_avg=100 * batch_time.avg / input.size(0), top1=top1, top5=top5)) + + print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format( + top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg)) + + +def accuracy_np(output, target): + max_indices = np.argsort(output, axis=1)[:, ::-1] + top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean() + top1 = 100 * np.equal(max_indices[:, 0], target).mean() + return top1, top5 + + +if __name__ == '__main__': + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2e441a5838d1e972823b9668ac8d459445f6f6ce --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/__init__.py @@ -0,0 +1,5 @@ +from .gen_efficientnet import * +from .mobilenetv3 import * +from .model_factory import create_model +from .config import is_exportable, is_scriptable, set_exportable, set_scriptable +from .activations import * \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..813421a743ffc33b8eb53ebf62dd4a03d831b654 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/__init__.py @@ -0,0 +1,137 @@ +from geffnet import config +from geffnet.activations.activations_me import * +from geffnet.activations.activations_jit import * +from geffnet.activations.activations import * +import torch + +_has_silu = 'silu' in dir(torch.nn.functional) + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=mish, + relu=F.relu, + relu6=F.relu6, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=hard_sigmoid, + hard_swish=hard_swish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=mish_jit, +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=mish_me, + hard_swish=hard_swish_me, + hard_sigmoid_jit=hard_sigmoid_me, +) + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=HardSigmoid, + hard_swish=HardSwish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=MishJit, +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=MishMe, + hard_swish=HardSwishMe, + hard_sigmoid=HardSigmoidMe +) + +_OVERRIDE_FN = dict() +_OVERRIDE_LAYER = dict() + + +def add_override_act_fn(name, fn): + global _OVERRIDE_FN + _OVERRIDE_FN[name] = fn + + +def update_override_act_fn(overrides): + assert isinstance(overrides, dict) + global _OVERRIDE_FN + _OVERRIDE_FN.update(overrides) + + +def clear_override_act_fn(): + global _OVERRIDE_FN + _OVERRIDE_FN = dict() + + +def add_override_act_layer(name, fn): + _OVERRIDE_LAYER[name] = fn + + +def update_override_act_layer(overrides): + assert isinstance(overrides, dict) + global _OVERRIDE_LAYER + _OVERRIDE_LAYER.update(overrides) + + +def clear_override_act_layer(): + global _OVERRIDE_LAYER + _OVERRIDE_LAYER = dict() + + +def get_act_fn(name='relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if name in _OVERRIDE_FN: + return _OVERRIDE_FN[name] + use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()) + if use_me and name in _ACT_FN_ME: + # If not exporting or scripting the model, first look for a memory optimized version + # activation with custom autograd, then fallback to jit scripted, then a Python or Torch builtin + return _ACT_FN_ME[name] + if config.is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return swish + use_jit = not (config.is_exportable() or config.is_no_jit()) + # NOTE: export tracing should work with jit scripted components, but I keep running into issues + if use_jit and name in _ACT_FN_JIT: # jit scripted models should be okay for export/scripting + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name='relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if name in _OVERRIDE_LAYER: + return _OVERRIDE_LAYER[name] + use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()) + if use_me and name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if config.is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return Swish + use_jit = not (config.is_exportable() or config.is_no_jit()) + # NOTE: export tracing should work with jit scripted components, but I keep running into issues + if use_jit and name in _ACT_FN_JIT: # jit scripted models should be okay for export/scripting + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..bdea692d1397673b2513d898c33edbcb37d94240 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations.py @@ -0,0 +1,102 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3) + and also as Swish (https://arxiv.org/abs/1710.05941). + + TODO Rename to SiLU with addition to PyTorch + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return mish(x, self.inplace) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_jit.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..7176b05e779787528a47f20d55d64d4a0f219360 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_jit.py @@ -0,0 +1,79 @@ +""" Activations (jit) + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + +__all__ = ['swish_jit', 'SwishJit', 'mish_jit', 'MishJit', + 'hard_sigmoid_jit', 'HardSigmoidJit', 'hard_swish_jit', 'HardSwishJit'] + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3) + and also as Swish (https://arxiv.org/abs/1710.05941). + + TODO Rename to SiLU with addition to PyTorch + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_me.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_me.py new file mode 100644 index 0000000000000000000000000000000000000000..e91df5a50fdbe40bc386e2541a4fda743ad95e9a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_me.py @@ -0,0 +1,174 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +__all__ = ['swish_me', 'SwishMe', 'mish_me', 'MishMe', + 'hard_sigmoid_me', 'HardSigmoidMe', 'hard_swish_me', 'HardSwishMe'] + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + + Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3) + and also as Swish (https://arxiv.org/abs/1710.05941). + + TODO Rename to SiLU with addition to PyTorch + """ + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/config.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/config.py new file mode 100644 index 0000000000000000000000000000000000000000..27d5307fd9ee0246f1e35f41520f17385d23f1dd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/config.py @@ -0,0 +1,123 @@ +""" Global layer config state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', 'layer_config_kwargs', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False + + +def layer_config_kwargs(kwargs): + """ Consume config kwargs and return contextmgr obj """ + return set_layer_config( + scriptable=kwargs.pop('scriptable', None), + exportable=kwargs.pop('exportable', None), + no_jit=kwargs.pop('no_jit', None)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/conv2d_layers.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/conv2d_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..d8467460c4b36e54c83ce2dcd3ebe91d3432cad2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/conv2d_layers.py @@ -0,0 +1,304 @@ +""" Conv2D w/ SAME padding, CondConv, MixedConv + +A collection of conv layers and padding helpers needed by EfficientNet, MixNet, and +MobileNetV3 models that maintain weight compatibility with original Tensorflow models. + +Copyright 2020 Ross Wightman +""" +import collections.abc +import math +from functools import partial +from itertools import repeat +from typing import Tuple, Optional + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .config import * + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +_single = _ntuple(1) +_pair = _ntuple(2) +_triple = _ntuple(3) +_quadruple = _ntuple(4) + + +def _is_static_pad(kernel_size, stride=1, dilation=1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +def _get_padding(kernel_size, stride=1, dilation=1, **_): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +def _calc_same_pad(i: int, k: int, s: int, d: int): + return max((-(i // -s) - 1) * s + (k - 1) * d + 1 - i, 0) + + +def _same_pad_arg(input_size, kernel_size, stride, dilation): + ih, iw = input_size + kh, kw = kernel_size + pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0]) + pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1]) + return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + ih, iw = x.size()[-2:] + kh, kw = weight.size()[-2:] + pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0]) + pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1]) + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + # pylint: disable=unused-argument + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class Conv2dSameExport(nn.Conv2d): + """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions + + NOTE: This does not currently work with torch.jit.script + """ + + # pylint: disable=unused-argument + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSameExport, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + self.pad = None + self.pad_input_size = (0, 0) + + def forward(self, x): + input_size = x.size()[-2:] + if self.pad is None: + pad_arg = _same_pad_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) + self.pad = nn.ZeroPad2d(pad_arg) + self.pad_input_size = input_size + + if self.pad is not None: + x = self.pad(x) + return F.conv2d( + x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def get_padding_value(padding, kernel_size, **kwargs): + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if _is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = _get_padding(kernel_size, **kwargs) + else: + # dynamic padding + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = _get_padding(kernel_size, **kwargs) + return padding, dynamic + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + if is_exportable(): + assert not is_scriptable() + return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) + else: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = out_ch if depthwise else 1 + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [conv(x_split[i]) for i, conv in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditional Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = _pair(padding_val) + self.dilation = _pair(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out + + +def select_conv2d(in_chs, out_chs, kernel_size, **kwargs): + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/efficientnet_builder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/efficientnet_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..95dd63d400e70d70664c5a433a2772363f865e61 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/efficientnet_builder.py @@ -0,0 +1,683 @@ +""" EfficientNet / MobileNetV3 Blocks and Builder + +Copyright 2020 Ross Wightman +""" +import re +from copy import deepcopy + +from .conv2d_layers import * +from geffnet.activations import * + +__all__ = ['get_bn_args_tf', 'resolve_bn_args', 'resolve_se_args', 'resolve_act_layer', 'make_divisible', + 'round_channels', 'drop_connect', 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', + 'InvertedResidual', 'CondConvResidual', 'EdgeResidual', 'EfficientNetBuilder', 'decode_arch_def', + 'initialize_weight_default', 'initialize_weight_goog', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT' +] + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +# +# PyTorch defaults are momentum = .1, eps = 1e-5 +# +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +_SE_ARGS_DEFAULT = dict( + gate_fn=sigmoid, + act_layer=None, # None == use containing block's activation layer + reduce_mid=False, + divisor=1) + + +def resolve_se_args(kwargs, in_chs, act_layer=None): + se_kwargs = kwargs.copy() if kwargs is not None else {} + # fill in args that aren't specified with the defaults + for k, v in _SE_ARGS_DEFAULT.items(): + se_kwargs.setdefault(k, v) + # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch + if not se_kwargs.pop('reduce_mid'): + se_kwargs['reduced_base_chs'] = in_chs + # act_layer override, if it remains None, the containing block's act_layer will be used + if se_kwargs['act_layer'] is None: + assert act_layer is not None + se_kwargs['act_layer'] = act_layer + return se_kwargs + + +def resolve_act_layer(kwargs, default='relu'): + act_layer = kwargs.pop('act_layer', default) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + return act_layer + + +def make_divisible(v: int, divisor: int = 8, min_value: int = None): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: # ensure round down does not go down by more than 10%. + new_v += divisor + return new_v + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + channels *= multiplier + return make_divisible(channels, divisor, channel_min) + + +def drop_connect(inputs, training: bool = False, drop_connect_rate: float = 0.): + """Apply drop connect.""" + if not training: + return inputs + + keep_prob = 1 - drop_connect_rate + random_tensor = keep_prob + torch.rand( + (inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device) + random_tensor.floor_() # binarize + output = inputs.div(keep_prob) * random_tensor + return output + + +class SqueezeExcite(nn.Module): + + def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1): + super(SqueezeExcite, self).__init__() + reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) + self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) + self.act1 = act_layer(inplace=True) + self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) + self.gate_fn = gate_fn + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + x = x * self.gate_fn(x_se) + return x + + +class ConvBnAct(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, + stride=1, pad_type='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(ConvBnAct, self).__init__() + assert stride in [1, 2] + norm_kwargs = norm_kwargs or {} + self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type) + self.bn1 = norm_layer(out_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks with an expansion + factor of 1.0. This is an alternative to having a IR with optional first pw conv. + """ + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, + pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + assert stride in [1, 2] + norm_kwargs = norm_kwargs or {} + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.drop_connect_rate = drop_connect_rate + + self.conv_dw = select_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if se_ratio is not None and se_ratio > 0.: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = nn.Identity() + + self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) if pw_act else nn.Identity() + + def forward(self, x): + residual = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + if self.drop_connect_rate > 0.: + x = drop_connect(x, self.training, self.drop_connect_rate) + x += residual + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + conv_kwargs=None, drop_connect_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs: int = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_connect_rate = drop_connect_rate + + # Point-wise expansion + self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = select_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if se_ratio is not None and se_ratio > 0.: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = nn.Identity() # for jit.script compat + + # Point-wise linear projection + self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_connect_rate > 0.: + x = drop_connect(x, self.training, self.drop_connect_rate) + x += residual + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + num_experts=0, drop_connect_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, pad_type=pad_type, + act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs, + norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs, + drop_connect_rate=drop_connect_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + residual = x + + # CondConv routing + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + + # Point-wise expansion + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + + if self.has_residual: + if self.drop_connect_rate > 0.: + x = drop_connect(x, self.training, self.drop_connect_rate) + x += residual + return x + + +class EdgeResidual(nn.Module): + """ EdgeTPU Residual block with expansion convolution followed by pointwise-linear w/ stride""" + + def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, + stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.): + super(EdgeResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + mid_chs = make_divisible(fake_in_chs * exp_ratio) if fake_in_chs > 0 else make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_connect_rate = drop_connect_rate + + # Expansion convolution + self.conv_exp = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if se_ratio is not None and se_ratio > 0.: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = nn.Identity() + + # Point-wise linear projection + self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, stride=stride, padding=pad_type) + self.bn2 = nn.BatchNorm2d(out_chs, **norm_kwargs) + + def forward(self, x): + residual = x + + # Expansion convolution + x = self.conv_exp(x) + x = self.bn1(x) + x = self.act1(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn2(x) + + if self.has_residual: + if self.drop_connect_rate > 0.: + x = drop_connect(x, self.training, self.drop_connect_rate) + x += residual + + return x + + +class EfficientNetBuilder: + """ Build Trunk Blocks for Efficient/Mobile Networks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + + def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + pad_type='', act_layer=None, se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_connect_rate = drop_connect_rate + + # updated during build + self.in_chs = None + self.block_idx = 0 + self.block_count = 0 + + def _round_channels(self, chs): + return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba): + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input filters for EdgeTPU + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count + ba['se_kwargs'] = self.se_kwargs + if ba.get('num_experts', 0) > 0: + block = CondConvResidual(**ba) + else: + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count + ba['se_kwargs'] = self.se_kwargs + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count + ba['se_kwargs'] = self.se_kwargs + block = EdgeResidual(**ba) + elif bt == 'cn': + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def _make_stack(self, stack_args): + blocks = [] + # each stack (stage) contains a list of block arguments + for i, ba in enumerate(stack_args): + if i >= 1: + # only the first block in any stack can have a stride > 1 + ba['stride'] = 1 + block = self._make_block(ba) + blocks.append(block) + self.block_idx += 1 # incr global idx (across all stacks) + return nn.Sequential(*blocks) + + def __call__(self, in_chs, block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + self.in_chs = in_chs + self.block_count = sum([len(x) for x in block_args]) + self.block_idx = 0 + blocks = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stack_idx, stack in enumerate(block_args): + assert isinstance(stack, list) + stack = self._make_stack(stack) + blocks.append(stack) + return blocks + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + fake_in_chs=fake_in_chs, + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) + return arch_args + + +def initialize_weight_goog(m, n='', fix_group_fanout=True): + # weight init as per Tensorflow Official impl + # https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def initialize_weight_default(m, n=''): + if isinstance(m, CondConv2d): + init_fn = get_condconv_initializer(partial( + nn.init.kaiming_normal_, mode='fan_out', nonlinearity='relu'), m.num_experts, m.weight_shape) + init_fn(m.weight) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear') diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/gen_efficientnet.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/gen_efficientnet.py new file mode 100644 index 0000000000000000000000000000000000000000..cd170d4cc5bed6ca82b61539902b470d3320c691 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/gen_efficientnet.py @@ -0,0 +1,1450 @@ +""" Generic Efficient Networks + +A generic MobileNet class with building blocks to support a variety of models: + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* EfficientNet-Lite + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* And likely more... + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F + +from .config import layer_config_kwargs, is_scriptable +from .conv2d_layers import select_conv2d +from .helpers import load_pretrained +from .efficientnet_builder import * + +__all__ = ['GenEfficientNet', 'mnasnet_050', 'mnasnet_075', 'mnasnet_100', 'mnasnet_b1', 'mnasnet_140', + 'semnasnet_050', 'semnasnet_075', 'semnasnet_100', 'mnasnet_a1', 'semnasnet_140', 'mnasnet_small', + 'mobilenetv2_100', 'mobilenetv2_140', 'mobilenetv2_110d', 'mobilenetv2_120d', + 'fbnetc_100', 'spnasnet_100', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', + 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b8', + 'efficientnet_l2', 'efficientnet_es', 'efficientnet_em', 'efficientnet_el', + 'efficientnet_cc_b0_4e', 'efficientnet_cc_b0_8e', 'efficientnet_cc_b1_8e', + 'efficientnet_lite0', 'efficientnet_lite1', 'efficientnet_lite2', 'efficientnet_lite3', 'efficientnet_lite4', + 'tf_efficientnet_b0', 'tf_efficientnet_b1', 'tf_efficientnet_b2', 'tf_efficientnet_b3', + 'tf_efficientnet_b4', 'tf_efficientnet_b5', 'tf_efficientnet_b6', 'tf_efficientnet_b7', 'tf_efficientnet_b8', + 'tf_efficientnet_b0_ap', 'tf_efficientnet_b1_ap', 'tf_efficientnet_b2_ap', 'tf_efficientnet_b3_ap', + 'tf_efficientnet_b4_ap', 'tf_efficientnet_b5_ap', 'tf_efficientnet_b6_ap', 'tf_efficientnet_b7_ap', + 'tf_efficientnet_b8_ap', 'tf_efficientnet_b0_ns', 'tf_efficientnet_b1_ns', 'tf_efficientnet_b2_ns', + 'tf_efficientnet_b3_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b5_ns', 'tf_efficientnet_b6_ns', + 'tf_efficientnet_b7_ns', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475', + 'tf_efficientnet_es', 'tf_efficientnet_em', 'tf_efficientnet_el', + 'tf_efficientnet_cc_b0_4e', 'tf_efficientnet_cc_b0_8e', 'tf_efficientnet_cc_b1_8e', + 'tf_efficientnet_lite0', 'tf_efficientnet_lite1', 'tf_efficientnet_lite2', 'tf_efficientnet_lite3', + 'tf_efficientnet_lite4', + 'mixnet_s', 'mixnet_m', 'mixnet_l', 'mixnet_xl', 'tf_mixnet_s', 'tf_mixnet_m', 'tf_mixnet_l'] + + +model_urls = { + 'mnasnet_050': None, + 'mnasnet_075': None, + 'mnasnet_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', + 'mnasnet_140': None, + 'mnasnet_small': None, + + 'semnasnet_050': None, + 'semnasnet_075': None, + 'semnasnet_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', + 'semnasnet_140': None, + + 'mobilenetv2_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', + 'mobilenetv2_110d': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', + 'mobilenetv2_120d': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', + 'mobilenetv2_140': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', + + 'fbnetc_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + 'spnasnet_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + + 'efficientnet_b0': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', + 'efficientnet_b1': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + 'efficientnet_b2': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + 'efficientnet_b3': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + 'efficientnet_b4': None, + 'efficientnet_b5': None, + 'efficientnet_b6': None, + 'efficientnet_b7': None, + 'efficientnet_b8': None, + 'efficientnet_l2': None, + + 'efficientnet_es': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', + 'efficientnet_em': None, + 'efficientnet_el': None, + + 'efficientnet_cc_b0_4e': None, + 'efficientnet_cc_b0_8e': None, + 'efficientnet_cc_b1_8e': None, + + 'efficientnet_lite0': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', + 'efficientnet_lite1': None, + 'efficientnet_lite2': None, + 'efficientnet_lite3': None, + 'efficientnet_lite4': None, + + 'tf_efficientnet_b0': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + 'tf_efficientnet_b1': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + 'tf_efficientnet_b2': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + 'tf_efficientnet_b3': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + 'tf_efficientnet_b4': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + 'tf_efficientnet_b5': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + 'tf_efficientnet_b6': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + 'tf_efficientnet_b7': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + 'tf_efficientnet_b8': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + + 'tf_efficientnet_b0_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + 'tf_efficientnet_b1_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + 'tf_efficientnet_b2_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + 'tf_efficientnet_b3_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + 'tf_efficientnet_b4_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + 'tf_efficientnet_b5_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + 'tf_efficientnet_b6_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + 'tf_efficientnet_b7_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + 'tf_efficientnet_b8_ap': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + + 'tf_efficientnet_b0_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + 'tf_efficientnet_b1_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + 'tf_efficientnet_b2_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + 'tf_efficientnet_b3_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + 'tf_efficientnet_b4_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + 'tf_efficientnet_b5_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + 'tf_efficientnet_b6_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + 'tf_efficientnet_b7_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + 'tf_efficientnet_l2_ns_475': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + 'tf_efficientnet_l2_ns': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + + 'tf_efficientnet_es': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + 'tf_efficientnet_em': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + 'tf_efficientnet_el': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + + 'tf_efficientnet_cc_b0_4e': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + 'tf_efficientnet_cc_b0_8e': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + 'tf_efficientnet_cc_b1_8e': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + + 'tf_efficientnet_lite0': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + 'tf_efficientnet_lite1': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + 'tf_efficientnet_lite2': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + 'tf_efficientnet_lite3': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + 'tf_efficientnet_lite4': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + + 'mixnet_s': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', + 'mixnet_m': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', + 'mixnet_l': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', + 'mixnet_xl': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', + + 'tf_mixnet_s': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', + 'tf_mixnet_m': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', + 'tf_mixnet_l': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', +} + + +class GenEfficientNet(nn.Module): + """ Generic EfficientNets + + An implementation of mobile optimized networks that covers: + * EfficientNet (B0-B8, L2, CondConv, EdgeTPU) + * MixNet (Small, Medium, and Large, XL) + * MNASNet A1, B1, and small + * FBNet C + * Single-Path NAS Pixel1 + """ + + def __init__(self, block_args, num_classes=1000, in_chans=3, num_features=1280, stem_size=32, fix_stem=False, + channel_multiplier=1.0, channel_divisor=8, channel_min=None, + pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_connect_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + weight_init='goog'): + super(GenEfficientNet, self).__init__() + self.drop_rate = drop_rate + + if not fix_stem: + stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min) + self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + in_chs = stem_size + + builder = EfficientNetBuilder( + channel_multiplier, channel_divisor, channel_min, + pad_type, act_layer, se_kwargs, norm_layer, norm_kwargs, drop_connect_rate) + self.blocks = nn.Sequential(*builder(in_chs, block_args)) + in_chs = builder.in_chs + + self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type) + self.bn2 = norm_layer(num_features, **norm_kwargs) + self.act2 = act_layer(inplace=True) + self.global_pool = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(num_features, num_classes) + + for n, m in self.named_modules(): + if weight_init == 'goog': + initialize_weight_goog(m, n) + else: + initialize_weight_default(m, n) + + def features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + x = self.act2(x) + return x + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([ + self.conv_head, self.bn2, self.act2, + self.global_pool, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def forward(self, x): + x = self.features(x) + x = self.global_pool(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +def _create_model(model_kwargs, variant, pretrained=False): + as_sequential = model_kwargs.pop('as_sequential', False) + model = GenEfficientNet(**model_kwargs) + if pretrained: + load_pretrained(model, model_urls[variant]) + if as_sequential: + model = model.as_sequential() + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + fix_stem=fix_stem_head, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=nn.ReLU6, + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an efficientnet-condconv model.""" + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + channel_multiplier=channel_multiplier, + act_layer=nn.ReLU6, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'relu'), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train set drop_rate=0.2, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train set drop_rate=0.2, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train set drop_rate=0.3, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train set drop_rate=0.3, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train set drop_rate=0.4, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train set drop_rate=0.4, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train set drop_rate=0.5, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train set drop_rate=0.5, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train set drop_rate=0.5, drop_connect_rate=0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2. """ + # NOTE for train, drop_rate should be 0.5 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 AutoAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 AutoAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 AutoAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 AutoAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 AutoAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 RandAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 AutoAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 RandAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 RandAug. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant + Paper: Adversarial Examples Improve Image Recognition (https://arxiv.org/abs/1911.09665) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant + Paper: Self-training with Noisy Student improves ImageNet classification (https://arxiv.org/abs/1911.04252) + """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + # NOTE for train set drop_rate=0.2 + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + # NOTE for train set drop_rate=0.25 + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + # NOTE for train set drop_rate=0.25 + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + # NOTE for train set drop_rate=0.25, drop_connect_rate=0.2 + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + # NOTE for train set drop_rate=0.3, drop_connect_rate=0.2 + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/helpers.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..3f83a07d690c7ad681c777c19b1e7a5bb95da007 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/helpers.py @@ -0,0 +1,71 @@ +""" Checkpoint loading / state_dict helpers +Copyright 2020 Ross Wightman +""" +import torch +import os +from collections import OrderedDict +try: + from torch.hub import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url + + +def load_checkpoint(model, checkpoint_path): + if checkpoint_path and os.path.isfile(checkpoint_path): + print("=> Loading checkpoint '{}'".format(checkpoint_path)) + checkpoint = torch.load(checkpoint_path) + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + if k.startswith('module'): + name = k[7:] # remove `module.` + else: + name = k + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + else: + model.load_state_dict(checkpoint) + print("=> Loaded checkpoint '{}'".format(checkpoint_path)) + else: + print("=> Error: No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_pretrained(model, url, filter_fn=None, strict=True): + if not url: + print("=> Warning: Pretrained model URL is empty, using random initialization.") + return + + state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu') + + input_conv = 'conv_stem' + classifier = 'classifier' + in_chans = getattr(model, input_conv).weight.shape[1] + num_classes = getattr(model, classifier).weight.shape[0] + + input_conv_weight = input_conv + '.weight' + pretrained_in_chans = state_dict[input_conv_weight].shape[1] + if in_chans != pretrained_in_chans: + if in_chans == 1: + print('=> Converting pretrained input conv {} from {} to 1 channel'.format( + input_conv_weight, pretrained_in_chans)) + conv1_weight = state_dict[input_conv_weight] + state_dict[input_conv_weight] = conv1_weight.sum(dim=1, keepdim=True) + else: + print('=> Discarding pretrained input conv {} since input channel count != {}'.format( + input_conv_weight, pretrained_in_chans)) + del state_dict[input_conv_weight] + strict = False + + classifier_weight = classifier + '.weight' + pretrained_num_classes = state_dict[classifier_weight].shape[0] + if num_classes != pretrained_num_classes: + print('=> Discarding pretrained classifier since num_classes != {}'.format(pretrained_num_classes)) + del state_dict[classifier_weight] + del state_dict[classifier + '.bias'] + strict = False + + if filter_fn is not None: + state_dict = filter_fn(state_dict) + + model.load_state_dict(state_dict, strict=strict) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/mobilenetv3.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/mobilenetv3.py new file mode 100644 index 0000000000000000000000000000000000000000..b5966c28f7207e98ee50745b1bc8f3663c650f9d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/mobilenetv3.py @@ -0,0 +1,364 @@ +""" MobileNet-V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F + +from .activations import get_act_fn, get_act_layer, HardSwish +from .config import layer_config_kwargs +from .conv2d_layers import select_conv2d +from .helpers import load_pretrained +from .efficientnet_builder import * + +__all__ = ['mobilenetv3_rw', 'mobilenetv3_large_075', 'mobilenetv3_large_100', 'mobilenetv3_large_minimal_100', + 'mobilenetv3_small_075', 'mobilenetv3_small_100', 'mobilenetv3_small_minimal_100', + 'tf_mobilenetv3_large_075', 'tf_mobilenetv3_large_100', 'tf_mobilenetv3_large_minimal_100', + 'tf_mobilenetv3_small_075', 'tf_mobilenetv3_small_100', 'tf_mobilenetv3_small_minimal_100'] + +model_urls = { + 'mobilenetv3_rw': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + 'mobilenetv3_large_075': None, + 'mobilenetv3_large_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', + 'mobilenetv3_large_minimal_100': None, + 'mobilenetv3_small_075': None, + 'mobilenetv3_small_100': None, + 'mobilenetv3_small_minimal_100': None, + 'tf_mobilenetv3_large_075': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + 'tf_mobilenetv3_large_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + 'tf_mobilenetv3_large_minimal_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + 'tf_mobilenetv3_small_075': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + 'tf_mobilenetv3_small_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + 'tf_mobilenetv3_small_minimal_100': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', +} + + +class MobileNetV3(nn.Module): + """ MobileNet-V3 + + A this model utilizes the MobileNet-v3 specific 'efficient head', where global pooling is done before the + head convolution without a final batch-norm layer before the classifier. + + Paper: https://arxiv.org/abs/1905.02244 + """ + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + channel_multiplier=1.0, pad_type='', act_layer=HardSwish, drop_rate=0., drop_connect_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'): + super(MobileNetV3, self).__init__() + self.drop_rate = drop_rate + + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = nn.BatchNorm2d(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + in_chs = stem_size + + builder = EfficientNetBuilder( + channel_multiplier, pad_type=pad_type, act_layer=act_layer, se_kwargs=se_kwargs, + norm_layer=norm_layer, norm_kwargs=norm_kwargs, drop_connect_rate=drop_connect_rate) + self.blocks = nn.Sequential(*builder(in_chs, block_args)) + in_chs = builder.in_chs + + self.global_pool = nn.AdaptiveAvgPool2d(1) + self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + self.classifier = nn.Linear(num_features, num_classes) + + for m in self.modules(): + if weight_init == 'goog': + initialize_weight_goog(m) + else: + initialize_weight_default(m) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([ + self.global_pool, self.conv_head, self.act2, + nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.features(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +def _create_model(model_kwargs, variant, pretrained=False): + as_sequential = model_kwargs.pop('as_sequential', False) + model = MobileNetV3(**model_kwargs) + if pretrained and model_urls[variant]: + load_pretrained(model, model_urls[variant]) + if as_sequential: + model = model.as_sequential() + return model + + +def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model (RW variant). + + Paper: https://arxiv.org/abs/1905.02244 + + This was my first attempt at reproducing the MobileNet-V3 from paper alone. It came close to the + eventual Tensorflow reference impl but has a few differences: + 1. This model has no bias on the head convolution + 2. This model forces no residual (noskip) on the first DWS block, this is different than MnasNet + 3. This model always uses ReLU for the SE activation layer, other models in the family inherit their act layer + from their parent block + 4. This model does not enforce divisible by 8 limitation on the SE reduction channel count + + Overall the changes are fairly minor and result in a very small parameter count difference and no + top-1/5 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, # one of my mistakes + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_kwargs=dict(gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 large/small/minimal models. + + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v3.py + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = 'relu' + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = 'hard_swish' + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = 'relu' + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = 'hard_swish' + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + with layer_config_kwargs(kwargs): + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=16, + channel_multiplier=channel_multiplier, + act_layer=resolve_act_layer(kwargs, act_layer), + se_kwargs=dict( + act_layer=get_act_layer('relu'), gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True, divisor=8), + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, variant, pretrained) + return model + + +def mobilenetv3_rw(pretrained=False, **kwargs): + """ MobileNet-V3 RW + Attn: See note in gen function for this variant. + """ + # NOTE for train set drop_rate=0.2 + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 Large 0.75""" + # NOTE for train set drop_rate=0.2 + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 Large 1.0 """ + # NOTE for train set drop_rate=0.2 + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 Large (Minimalistic) 1.0 """ + # NOTE for train set drop_rate=0.2 + model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 Small 0.75 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 Small 1.0 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 Small (Minimalistic) 1.0 """ + model = _gen_mobilenet_v3('mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 Large 0.75. Tensorflow compat variant. """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def tf_mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 Large 1.0. Tensorflow compat variant. """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 Large Minimalistic 1.0. Tensorflow compat variant. """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 Small 0.75. Tensorflow compat variant. """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def tf_mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 Small 1.0. Tensorflow compat variant.""" + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 Small Minimalistic 1.0. Tensorflow compat variant. """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/model_factory.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/model_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..4d46ea8baedaf3d787826eb3bb314b4230514647 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/model_factory.py @@ -0,0 +1,27 @@ +from .config import set_layer_config +from .helpers import load_checkpoint + +from .gen_efficientnet import * +from .mobilenetv3 import * + + +def create_model( + model_name='mnasnet_100', + pretrained=None, + num_classes=1000, + in_chans=3, + checkpoint_path='', + **kwargs): + + model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs) + + if model_name in globals(): + create_fn = globals()[model_name] + model = create_fn(**model_kwargs) + else: + raise RuntimeError('Unknown model (%s)' % model_name) + + if checkpoint_path and not pretrained: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/version.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/version.py new file mode 100644 index 0000000000000000000000000000000000000000..a6221b3de7b1490c5e712e8b5fcc94c3d9d04295 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/version.py @@ -0,0 +1 @@ +__version__ = '1.0.2' diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/hubconf.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/hubconf.py new file mode 100644 index 0000000000000000000000000000000000000000..45b17b99bbeba34596569e6e50f6e8a2ebc45c54 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/hubconf.py @@ -0,0 +1,84 @@ +dependencies = ['torch', 'math'] + +from geffnet import efficientnet_b0 +from geffnet import efficientnet_b1 +from geffnet import efficientnet_b2 +from geffnet import efficientnet_b3 + +from geffnet import efficientnet_es + +from geffnet import efficientnet_lite0 + +from geffnet import mixnet_s +from geffnet import mixnet_m +from geffnet import mixnet_l +from geffnet import mixnet_xl + +from geffnet import mobilenetv2_100 +from geffnet import mobilenetv2_110d +from geffnet import mobilenetv2_120d +from geffnet import mobilenetv2_140 + +from geffnet import mobilenetv3_large_100 +from geffnet import mobilenetv3_rw +from geffnet import mnasnet_a1 +from geffnet import mnasnet_b1 +from geffnet import fbnetc_100 +from geffnet import spnasnet_100 + +from geffnet import tf_efficientnet_b0 +from geffnet import tf_efficientnet_b1 +from geffnet import tf_efficientnet_b2 +from geffnet import tf_efficientnet_b3 +from geffnet import tf_efficientnet_b4 +from geffnet import tf_efficientnet_b5 +from geffnet import tf_efficientnet_b6 +from geffnet import tf_efficientnet_b7 +from geffnet import tf_efficientnet_b8 + +from geffnet import tf_efficientnet_b0_ap +from geffnet import tf_efficientnet_b1_ap +from geffnet import tf_efficientnet_b2_ap +from geffnet import tf_efficientnet_b3_ap +from geffnet import tf_efficientnet_b4_ap +from geffnet import tf_efficientnet_b5_ap +from geffnet import tf_efficientnet_b6_ap +from geffnet import tf_efficientnet_b7_ap +from geffnet import tf_efficientnet_b8_ap + +from geffnet import tf_efficientnet_b0_ns +from geffnet import tf_efficientnet_b1_ns +from geffnet import tf_efficientnet_b2_ns +from geffnet import tf_efficientnet_b3_ns +from geffnet import tf_efficientnet_b4_ns +from geffnet import tf_efficientnet_b5_ns +from geffnet import tf_efficientnet_b6_ns +from geffnet import tf_efficientnet_b7_ns +from geffnet import tf_efficientnet_l2_ns_475 +from geffnet import tf_efficientnet_l2_ns + +from geffnet import tf_efficientnet_es +from geffnet import tf_efficientnet_em +from geffnet import tf_efficientnet_el + +from geffnet import tf_efficientnet_cc_b0_4e +from geffnet import tf_efficientnet_cc_b0_8e +from geffnet import tf_efficientnet_cc_b1_8e + +from geffnet import tf_efficientnet_lite0 +from geffnet import tf_efficientnet_lite1 +from geffnet import tf_efficientnet_lite2 +from geffnet import tf_efficientnet_lite3 +from geffnet import tf_efficientnet_lite4 + +from geffnet import tf_mixnet_s +from geffnet import tf_mixnet_m +from geffnet import tf_mixnet_l + +from geffnet import tf_mobilenetv3_large_075 +from geffnet import tf_mobilenetv3_large_100 +from geffnet import tf_mobilenetv3_large_minimal_100 +from geffnet import tf_mobilenetv3_small_075 +from geffnet import tf_mobilenetv3_small_100 +from geffnet import tf_mobilenetv3_small_minimal_100 + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_export.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_export.py new file mode 100644 index 0000000000000000000000000000000000000000..7a5162ce214830df501bdb81edb66c095122f69d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_export.py @@ -0,0 +1,120 @@ +""" ONNX export script + +Export PyTorch models as ONNX graphs. + +This export script originally started as an adaptation of code snippets found at +https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html + +The default parameters work with PyTorch 1.6 and ONNX 1.7 and produce an optimal ONNX graph +for hosting in the ONNX runtime (see onnx_validate.py). To export an ONNX model compatible +with caffe2 (see caffe2_benchmark.py and caffe2_validate.py), the --keep-init and --aten-fallback +flags are currently required. + +Older versions of PyTorch/ONNX (tested PyTorch 1.4, ONNX 1.5) do not need extra flags for +caffe2 compatibility, but they produce a model that isn't as fast running on ONNX runtime. + +Most new release of PyTorch and ONNX cause some sort of breakage in the export / usage of ONNX models. +Please do your research and search ONNX and PyTorch issue tracker before asking me. Thanks. + +Copyright 2020 Ross Wightman +""" +import argparse +import torch +import numpy as np + +import onnx +import geffnet + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') +parser.add_argument('output', metavar='ONNX_FILE', + help='output model filename') +parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100', + help='model architecture (default: mobilenetv3_large_100)') +parser.add_argument('--opset', type=int, default=10, + help='ONNX opset to use (default: 10)') +parser.add_argument('--keep-init', action='store_true', default=False, + help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.') +parser.add_argument('--aten-fallback', action='store_true', default=False, + help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.') +parser.add_argument('--dynamic-size', action='store_true', default=False, + help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.') +parser.add_argument('-b', '--batch-size', default=1, type=int, + metavar='N', help='mini-batch size (default: 1)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--num-classes', type=int, default=1000, + help='Number classes in dataset') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to checkpoint (default: none)') + + +def main(): + args = parser.parse_args() + + args.pretrained = True + if args.checkpoint: + args.pretrained = False + + print("==> Creating PyTorch {} model".format(args.model)) + # NOTE exportable=True flag disables autofn/jit scripted activations and uses Conv2dSameExport layers + # for models using SAME padding + model = geffnet.create_model( + args.model, + num_classes=args.num_classes, + in_chans=3, + pretrained=args.pretrained, + checkpoint_path=args.checkpoint, + exportable=True) + + model.eval() + + example_input = torch.randn((args.batch_size, 3, args.img_size or 224, args.img_size or 224), requires_grad=True) + + # Run model once before export trace, sets padding for models with Conv2dSameExport. This means + # that the padding for models with Conv2dSameExport (most models with tf_ prefix) is fixed for + # the input img_size specified in this script. + # Opset >= 11 should allow for dynamic padding, however I cannot get it to work due to + # issues in the tracing of the dynamic padding or errors attempting to export the model after jit + # scripting it (an approach that should work). Perhaps in a future PyTorch or ONNX versions... + model(example_input) + + print("==> Exporting model to ONNX format at '{}'".format(args.output)) + input_names = ["input0"] + output_names = ["output0"] + dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} + if args.dynamic_size: + dynamic_axes['input0'][2] = 'height' + dynamic_axes['input0'][3] = 'width' + if args.aten_fallback: + export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK + else: + export_type = torch.onnx.OperatorExportTypes.ONNX + + torch_out = torch.onnx._export( + model, example_input, args.output, export_params=True, verbose=True, input_names=input_names, + output_names=output_names, keep_initializers_as_inputs=args.keep_init, dynamic_axes=dynamic_axes, + opset_version=args.opset, operator_export_type=export_type) + + print("==> Loading and checking exported model from '{}'".format(args.output)) + onnx_model = onnx.load(args.output) + onnx.checker.check_model(onnx_model) # assuming throw on error + print("==> Passed") + + if args.keep_init and args.aten_fallback: + import caffe2.python.onnx.backend as onnx_caffe2 + # Caffe2 loading only works properly in newer PyTorch/ONNX combos when + # keep_initializers_as_inputs and aten_fallback are set to True. + print("==> Loading model into Caffe2 backend and comparing forward pass.".format(args.output)) + caffe2_backend = onnx_caffe2.prepare(onnx_model) + B = {onnx_model.graph.input[0].name: x.data.numpy()} + c2_out = caffe2_backend.run(B)[0] + np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5) + print("==> Passed") + + +if __name__ == '__main__': + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_optimize.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_optimize.py new file mode 100644 index 0000000000000000000000000000000000000000..ee20bbf9f0f9473370489512eb96ca0b570b5388 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_optimize.py @@ -0,0 +1,84 @@ +""" ONNX optimization script + +Run ONNX models through the optimizer to prune unneeded nodes, fuse batchnorm layers into conv, etc. + +NOTE: This isn't working consistently in recent PyTorch/ONNX combos (ie PyTorch 1.6 and ONNX 1.7), +it seems time to switch to using the onnxruntime online optimizer (can also be saved for offline). + +Copyright 2020 Ross Wightman +""" +import argparse +import warnings + +import onnx +from onnx import optimizer + + +parser = argparse.ArgumentParser(description="Optimize ONNX model") + +parser.add_argument("model", help="The ONNX model") +parser.add_argument("--output", required=True, help="The optimized model output filename") + + +def traverse_graph(graph, prefix=''): + content = [] + indent = prefix + ' ' + graphs = [] + num_nodes = 0 + for node in graph.node: + pn, gs = onnx.helper.printable_node(node, indent, subgraphs=True) + assert isinstance(gs, list) + content.append(pn) + graphs.extend(gs) + num_nodes += 1 + for g in graphs: + g_count, g_str = traverse_graph(g) + content.append('\n' + g_str) + num_nodes += g_count + return num_nodes, '\n'.join(content) + + +def main(): + args = parser.parse_args() + onnx_model = onnx.load(args.model) + num_original_nodes, original_graph_str = traverse_graph(onnx_model.graph) + + # Optimizer passes to perform + passes = [ + #'eliminate_deadend', + 'eliminate_identity', + 'eliminate_nop_dropout', + 'eliminate_nop_pad', + 'eliminate_nop_transpose', + 'eliminate_unused_initializer', + 'extract_constant_to_initializer', + 'fuse_add_bias_into_conv', + 'fuse_bn_into_conv', + 'fuse_consecutive_concats', + 'fuse_consecutive_reduce_unsqueeze', + 'fuse_consecutive_squeezes', + 'fuse_consecutive_transposes', + #'fuse_matmul_add_bias_into_gemm', + 'fuse_pad_into_conv', + #'fuse_transpose_into_gemm', + #'lift_lexical_references', + ] + + # Apply the optimization on the original serialized model + # WARNING I've had issues with optimizer in recent versions of PyTorch / ONNX causing + # 'duplicate definition of name' errors, see: https://github.com/onnx/onnx/issues/2401 + # It may be better to rely on onnxruntime optimizations, see onnx_validate.py script. + warnings.warn("I've had issues with optimizer in recent versions of PyTorch / ONNX." + "Try onnxruntime optimization if this doesn't work.") + optimized_model = optimizer.optimize(onnx_model, passes) + + num_optimized_nodes, optimzied_graph_str = traverse_graph(optimized_model.graph) + print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str)) + print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes)) + + # Save the ONNX model + onnx.save(optimized_model, args.output) + + +if __name__ == "__main__": + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_to_caffe.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_to_caffe.py new file mode 100644 index 0000000000000000000000000000000000000000..44399aafababcdf6b84147a0613eb0909730db4b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_to_caffe.py @@ -0,0 +1,27 @@ +import argparse + +import onnx +from caffe2.python.onnx.backend import Caffe2Backend + + +parser = argparse.ArgumentParser(description="Convert ONNX to Caffe2") + +parser.add_argument("model", help="The ONNX model") +parser.add_argument("--c2-prefix", required=True, + help="The output file prefix for the caffe2 model init and predict file. ") + + +def main(): + args = parser.parse_args() + onnx_model = onnx.load(args.model) + caffe2_init, caffe2_predict = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) + caffe2_init_str = caffe2_init.SerializeToString() + with open(args.c2_prefix + '.init.pb', "wb") as f: + f.write(caffe2_init_str) + caffe2_predict_str = caffe2_predict.SerializeToString() + with open(args.c2_prefix + '.predict.pb', "wb") as f: + f.write(caffe2_predict_str) + + +if __name__ == "__main__": + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_validate.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_validate.py new file mode 100644 index 0000000000000000000000000000000000000000..ab3e4fb141b6ef660dcc5b447fd9f368a2ea19a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_validate.py @@ -0,0 +1,112 @@ +""" ONNX-runtime validation script + +This script was created to verify accuracy and performance of exported ONNX +models running with the onnxruntime. It utilizes the PyTorch dataloader/processing +pipeline for a fair comparison against the originals. + +Copyright 2020 Ross Wightman +""" +import argparse +import numpy as np +import onnxruntime +from data import create_loader, resolve_data_config, Dataset +from utils import AverageMeter +import time + +parser = argparse.ArgumentParser(description='Caffe2 ImageNet Validation') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--onnx-input', default='', type=str, metavar='PATH', + help='path to onnx model/weights file') +parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH', + help='path to output optimized onnx graph') +parser.add_argument('--profile', action='store_true', default=False, + help='Enable profiler output.') +parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', + help='Override default crop pct of 0.875') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true', + help='use tensorflow mnasnet preporcessing') +parser.add_argument('--print-freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') + + +def main(): + args = parser.parse_args() + args.gpu_id = 0 + + # Set graph optimization level + sess_options = onnxruntime.SessionOptions() + sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + if args.profile: + sess_options.enable_profiling = True + if args.onnx_output_opt: + sess_options.optimized_model_filepath = args.onnx_output_opt + + session = onnxruntime.InferenceSession(args.onnx_input, sess_options) + + data_config = resolve_data_config(None, args) + loader = create_loader( + Dataset(args.data, load_bytes=args.tf_preprocessing), + input_size=data_config['input_size'], + batch_size=args.batch_size, + use_prefetcher=False, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + crop_pct=data_config['crop_pct'], + tensorflow_preprocessing=args.tf_preprocessing) + + input_name = session.get_inputs()[0].name + + batch_time = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + end = time.time() + for i, (input, target) in enumerate(loader): + # run the net and return prediction + output = session.run([], {input_name: input.data.numpy()}) + output = output[0] + + # measure accuracy and record loss + prec1, prec5 = accuracy_np(output, target.numpy()) + top1.update(prec1.item(), input.size(0)) + top5.update(prec5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + print('Test: [{0}/{1}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s, {ms_avg:.3f} ms/sample) \t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, + ms_avg=100 * batch_time.avg / input.size(0), top1=top1, top5=top5)) + + print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format( + top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg)) + + +def accuracy_np(output, target): + max_indices = np.argsort(output, axis=1)[:, ::-1] + top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean() + top1 = 100 * np.equal(max_indices[:, 0], target).mean() + return top1, top5 + + +if __name__ == '__main__': + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/requirements.txt b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac3ffc13bae15f9b11f7cbe3705760056ecd7f13 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/requirements.txt @@ -0,0 +1,2 @@ +torch>=1.2.0 +torchvision>=0.4.0 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/setup.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..023e4c30f98164595964423e3a83eefaf7ffdad6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/setup.py @@ -0,0 +1,47 @@ +""" Setup +""" +from setuptools import setup, find_packages +from codecs import open +from os import path + +here = path.abspath(path.dirname(__file__)) + +# Get the long description from the README file +with open(path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = f.read() + +exec(open('geffnet/version.py').read()) +setup( + name='geffnet', + version=__version__, + description='(Generic) EfficientNets for PyTorch', + long_description=long_description, + long_description_content_type='text/markdown', + url='https://github.com/rwightman/gen-efficientnet-pytorch', + author='Ross Wightman', + author_email='hello@rwightman.com', + classifiers=[ + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + + # Note that this is a string of words separated by whitespace, not a list. + keywords='pytorch pretrained models efficientnet mixnet mobilenetv3 mnasnet', + packages=find_packages(exclude=['data']), + install_requires=['torch >= 1.4', 'torchvision'], + python_requires='>=3.6', +) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/utils.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d327e8bd8120c5cd09ae6c15c3991ccbe27f6c1f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/utils.py @@ -0,0 +1,52 @@ +import os + + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/validate.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/validate.py new file mode 100644 index 0000000000000000000000000000000000000000..5fd44fbb3165ef81ef81251b6299f6aaa80bf2c2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/validate.py @@ -0,0 +1,166 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import time +import torch +import torch.nn as nn +import torch.nn.parallel +from contextlib import suppress + +import geffnet +from data import Dataset, create_loader, resolve_data_config +from utils import accuracy, AverageMeter + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--model', '-m', metavar='MODEL', default='spnasnet1_00', + help='model architecture (default: dpn92)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', + help='Override default crop pct of 0.875') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--num-classes', type=int, default=1000, + help='Number classes in dataset') +parser.add_argument('--print-freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') +parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true', + help='use tensorflow mnasnet preporcessing') +parser.add_argument('--no-cuda', dest='no_cuda', action='store_true', + help='') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='Use native Torch AMP mixed precision.') + + +def main(): + args = parser.parse_args() + + if not args.checkpoint and not args.pretrained: + args.pretrained = True + + amp_autocast = suppress # do nothing + if args.amp: + if not has_native_amp: + print("Native Torch AMP is not available (requires torch >= 1.6), using FP32.") + else: + amp_autocast = torch.cuda.amp.autocast + + # create model + model = geffnet.create_model( + args.model, + num_classes=args.num_classes, + in_chans=3, + pretrained=args.pretrained, + checkpoint_path=args.checkpoint, + scriptable=args.torchscript) + + if args.channels_last: + model = model.to(memory_format=torch.channels_last) + + if args.torchscript: + torch.jit.optimized_execution(True) + model = torch.jit.script(model) + + print('Model %s created, param count: %d' % + (args.model, sum([m.numel() for m in model.parameters()]))) + + data_config = resolve_data_config(model, args) + + criterion = nn.CrossEntropyLoss() + + if not args.no_cuda: + if args.num_gpu > 1: + model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() + else: + model = model.cuda() + criterion = criterion.cuda() + + loader = create_loader( + Dataset(args.data, load_bytes=args.tf_preprocessing), + input_size=data_config['input_size'], + batch_size=args.batch_size, + use_prefetcher=not args.no_cuda, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + crop_pct=data_config['crop_pct'], + tensorflow_preprocessing=args.tf_preprocessing) + + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + model.eval() + end = time.time() + with torch.no_grad(): + for i, (input, target) in enumerate(loader): + if not args.no_cuda: + target = target.cuda() + input = input.cuda() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + # compute output + with amp_autocast(): + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(prec1.item(), input.size(0)) + top5.update(prec5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + print('Test: [{0}/{1}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + i, len(loader), batch_time=batch_time, + rate_avg=input.size(0) / batch_time.avg, + loss=losses, top1=top1, top5=top5)) + + print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format( + top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg)) + + +if __name__ == '__main__': + main() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/encoder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..7f7149ca3c0cf2b6e019105af7e645cfbb3eda11 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/encoder.py @@ -0,0 +1,34 @@ +import os +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Encoder(nn.Module): + def __init__(self): + super(Encoder, self).__init__() + + basemodel_name = 'tf_efficientnet_b5_ap' + print('Loading base model ()...'.format(basemodel_name), end='') + repo_path = os.path.join(os.path.dirname(__file__), 'efficientnet_repo') + basemodel = torch.hub.load(repo_path, basemodel_name, pretrained=False, source='local') + print('Done.') + + # Remove last layer + print('Removing last two layers (global_pool & classifier).') + basemodel.global_pool = nn.Identity() + basemodel.classifier = nn.Identity() + + self.original_model = basemodel + + def forward(self, x): + features = [x] + for k, v in self.original_model._modules.items(): + if (k == 'blocks'): + for ki, vi in v._modules.items(): + features.append(vi(features[-1])) + else: + features.append(v(features[-1])) + return features + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/submodules.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/submodules.py new file mode 100644 index 0000000000000000000000000000000000000000..409733351bd6ab5d191c800aff1bc05bfa4cb6f8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/normalbae/nets/submodules/submodules.py @@ -0,0 +1,140 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +######################################################################################################################## + + +# Upsample + BatchNorm +class UpSampleBN(nn.Module): + def __init__(self, skip_input, output_features): + super(UpSampleBN, self).__init__() + + self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(output_features), + nn.LeakyReLU(), + nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(output_features), + nn.LeakyReLU()) + + def forward(self, x, concat_with): + up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True) + f = torch.cat([up_x, concat_with], dim=1) + return self._net(f) + + +# Upsample + GroupNorm + Weight Standardization +class UpSampleGN(nn.Module): + def __init__(self, skip_input, output_features): + super(UpSampleGN, self).__init__() + + self._net = nn.Sequential(Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1), + nn.GroupNorm(8, output_features), + nn.LeakyReLU(), + Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1), + nn.GroupNorm(8, output_features), + nn.LeakyReLU()) + + def forward(self, x, concat_with): + up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True) + f = torch.cat([up_x, concat_with], dim=1) + return self._net(f) + + +# Conv2d with weight standardization +class Conv2d(nn.Conv2d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, + padding, dilation, groups, bias) + + def forward(self, x): + weight = self.weight + weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, + keepdim=True).mean(dim=3, keepdim=True) + weight = weight - weight_mean + std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5 + weight = weight / std.expand_as(weight) + return F.conv2d(x, weight, self.bias, self.stride, + self.padding, self.dilation, self.groups) + + +# normalize +def norm_normalize(norm_out): + min_kappa = 0.01 + norm_x, norm_y, norm_z, kappa = torch.split(norm_out, 1, dim=1) + norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10 + kappa = F.elu(kappa) + 1.0 + min_kappa + final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa], dim=1) + return final_out + + +# uncertainty-guided sampling (only used during training) +@torch.no_grad() +def sample_points(init_normal, gt_norm_mask, sampling_ratio, beta): + device = init_normal.device + B, _, H, W = init_normal.shape + N = int(sampling_ratio * H * W) + beta = beta + + # uncertainty map + uncertainty_map = -1 * init_normal[:, 3, :, :] # B, H, W + + # gt_invalid_mask (B, H, W) + if gt_norm_mask is not None: + gt_invalid_mask = F.interpolate(gt_norm_mask.float(), size=[H, W], mode='nearest') + gt_invalid_mask = gt_invalid_mask[:, 0, :, :] < 0.5 + uncertainty_map[gt_invalid_mask] = -1e4 + + # (B, H*W) + _, idx = uncertainty_map.view(B, -1).sort(1, descending=True) + + # importance sampling + if int(beta * N) > 0: + importance = idx[:, :int(beta * N)] # B, beta*N + + # remaining + remaining = idx[:, int(beta * N):] # B, H*W - beta*N + + # coverage + num_coverage = N - int(beta * N) + + if num_coverage <= 0: + samples = importance + else: + coverage_list = [] + for i in range(B): + idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N" + coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N + coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N + samples = torch.cat((importance, coverage), dim=1) # B, N + + else: + # remaining + remaining = idx[:, :] # B, H*W + + # coverage + num_coverage = N + + coverage_list = [] + for i in range(B): + idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N" + coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N + coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N + samples = coverage + + # point coordinates + rows_int = samples // W # 0 for first row, H-1 for last row + rows_float = rows_int / float(H-1) # 0 to 1.0 + rows_float = (rows_float * 2.0) - 1.0 # -1.0 to 1.0 + + cols_int = samples % W # 0 for first column, W-1 for last column + cols_float = cols_int / float(W-1) # 0 to 1.0 + cols_float = (cols_float * 2.0) - 1.0 # -1.0 to 1.0 + + point_coords = torch.zeros(B, 1, N, 2) + point_coords[:, 0, :, 0] = cols_float # x coord + point_coords[:, 0, :, 1] = rows_float # y coord + point_coords = point_coords.to(device) + return point_coords, rows_int, cols_int \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebed08adabe6f0bf1d253c67b5c641e07ce9a175 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/__init__.py @@ -0,0 +1,49 @@ +import os +from .api import make_detectron2_model, semantic_run +from pathlib import Path +import warnings +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +import numpy as np +import cv2 +from PIL import Image + +DEFAULT_CONFIGS = { + "coco": { + "name": "150_16_swin_l_oneformer_coco_100ep.pth", + "config": os.path.join(os.path.dirname(__file__), 'configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml') + }, + "ade20k": { + "name": "250_16_swin_l_oneformer_ade20k_160k.pth", + "config": os.path.join(os.path.dirname(__file__), 'configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml') + } +} +class OneformerSegmentor: + def __init__(self, model, metadata): + self.model = model + self.metadata = metadata + + def to(self, device): + self.model.model.to(device) + return self + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path, config_path = None): + filename = filename or "250_16_swin_l_oneformer_ade20k_160k.pth" + config_path = config_path or DEFAULT_CONFIGS["ade20k" if "ade20k" in filename else "coco"]["config"] + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + model, metadata = make_detectron2_model(config_path, model_path) + + return cls(model, metadata) + + def __call__(self, input_image=None, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + detected_map = semantic_run(input_image, self.model, self.metadata) + detected_map = remove_pad(HWC3(detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/api.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/api.py new file mode 100644 index 0000000000000000000000000000000000000000..3502e9404416867ade0063f00bfc44955435a75c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/api.py @@ -0,0 +1,39 @@ +import os +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" + +import torch + +from custom_detectron2.config import get_cfg +from custom_detectron2.projects.deeplab import add_deeplab_config +from custom_detectron2.data import MetadataCatalog + +from custom_oneformer import ( + add_oneformer_config, + add_common_config, + add_swin_config, + add_dinat_config, +) + +from custom_oneformer.demo.defaults import DefaultPredictor +from custom_oneformer.demo.visualizer import Visualizer, ColorMode + + +def make_detectron2_model(config_path, ckpt_path): + cfg = get_cfg() + add_deeplab_config(cfg) + add_common_config(cfg) + add_swin_config(cfg) + add_oneformer_config(cfg) + add_dinat_config(cfg) + cfg.merge_from_file(config_path) + cfg.MODEL.WEIGHTS = ckpt_path + cfg.freeze() + metadata = MetadataCatalog.get(cfg.DATASETS.TEST_PANOPTIC[0] if len(cfg.DATASETS.TEST_PANOPTIC) else "__unused") + return DefaultPredictor(cfg), metadata + + +def semantic_run(img, predictor, metadata): + predictions = predictor(img[:, :, ::-1], "semantic") # Predictor of OneFormer must use BGR image !!! + visualizer_map = Visualizer(img, is_img=False, metadata=metadata, instance_mode=ColorMode.IMAGE) + out_map = visualizer_map.draw_sem_seg(predictions["sem_seg"].argmax(dim=0).cpu(), alpha=1, is_text=False).get_image() + return out_map \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..31eab45b878433fc844a13dbdd54f97c936d9b89 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml @@ -0,0 +1,68 @@ +MODEL: + BACKBONE: + FREEZE_AT: 0 + NAME: "build_resnet_backbone" + WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + RESNETS: + DEPTH: 50 + STEM_TYPE: "basic" # not used + STEM_OUT_CHANNELS: 64 + STRIDE_IN_1X1: False + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + # NORM: "SyncBN" + RES5_MULTI_GRID: [1, 1, 1] # not used +DATASETS: + TRAIN: ("ade20k_panoptic_train",) + TEST_PANOPTIC: ("ade20k_panoptic_val",) + TEST_INSTANCE: ("ade20k_instance_val",) + TEST_SEMANTIC: ("ade20k_sem_seg_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.0001 + MAX_ITER: 160000 + WARMUP_FACTOR: 1.0 + WARMUP_ITERS: 0 + WEIGHT_DECAY: 0.05 + OPTIMIZER: "ADAMW" + LR_SCHEDULER_NAME: "WarmupPolyLR" + BACKBONE_MULTIPLIER: 0.1 + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "full_model" + CLIP_VALUE: 0.01 + NORM_TYPE: 2.0 + AMP: + ENABLED: True +INPUT: + MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 512) for x in range(5, 21)]"] + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 512 + MAX_SIZE_TRAIN: 2048 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (512, 512) + SINGLE_CATEGORY_MAX_AREA: 1.0 + COLOR_AUG_SSD: True + SIZE_DIVISIBILITY: 512 # used in dataset mapper + FORMAT: "RGB" + DATASET_MAPPER_NAME: "oneformer_unified" + MAX_SEQ_LEN: 77 + TASK_SEQ_LEN: 77 + TASK_PROB: + SEMANTIC: 0.33 + INSTANCE: 0.66 +TEST: + EVAL_PERIOD: 5000 + AUG: + ENABLED: False + MIN_SIZES: [256, 384, 512, 640, 768, 896] + MAX_SIZE: 3584 + FLIP: True +DATALOADER: + FILTER_EMPTY_ANNOTATIONS: True + NUM_WORKERS: 4 +VERSION: 2 \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..770ffc81907f8d7c7520e079b1c46060707254b8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml @@ -0,0 +1,58 @@ +_BASE_: Base-ADE20K-UnifiedSegmentation.yaml +MODEL: + META_ARCHITECTURE: "OneFormer" + SEM_SEG_HEAD: + NAME: "OneFormerHead" + IGNORE_VALUE: 255 + NUM_CLASSES: 150 + LOSS_WEIGHT: 1.0 + CONVS_DIM: 256 + MASK_DIM: 256 + NORM: "GN" + # pixel decoder + PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder" + IN_FEATURES: ["res2", "res3", "res4", "res5"] + DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"] + COMMON_STRIDE: 4 + TRANSFORMER_ENC_LAYERS: 6 + ONE_FORMER: + TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder" + TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder" + DEEP_SUPERVISION: True + NO_OBJECT_WEIGHT: 0.1 + CLASS_WEIGHT: 2.0 + MASK_WEIGHT: 5.0 + DICE_WEIGHT: 5.0 + CONTRASTIVE_WEIGHT: 0.5 + CONTRASTIVE_TEMPERATURE: 0.07 + HIDDEN_DIM: 256 + NUM_OBJECT_QUERIES: 150 + USE_TASK_NORM: True + NHEADS: 8 + DROPOUT: 0.1 + DIM_FEEDFORWARD: 2048 + ENC_LAYERS: 0 + PRE_NORM: False + ENFORCE_INPUT_PROJ: False + SIZE_DIVISIBILITY: 32 + CLASS_DEC_LAYERS: 2 + DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query + TRAIN_NUM_POINTS: 12544 + OVERSAMPLE_RATIO: 3.0 + IMPORTANCE_SAMPLE_RATIO: 0.75 + TEXT_ENCODER: + WIDTH: 256 + CONTEXT_LENGTH: 77 + NUM_LAYERS: 6 + VOCAB_SIZE: 49408 + PROJ_NUM_LAYERS: 2 + N_CTX: 16 + TEST: + SEMANTIC_ON: True + INSTANCE_ON: True + PANOPTIC_ON: True + OVERLAP_THRESHOLD: 0.8 + OBJECT_MASK_THRESHOLD: 0.8 + TASK: "panoptic" +TEST: + DETECTIONS_PER_IMAGE: 150 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69c44ade144e4504077c0fe04fa8bb3491a679ed --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml @@ -0,0 +1,40 @@ +_BASE_: oneformer_R50_bs16_160k.yaml +MODEL: + BACKBONE: + NAME: "D2SwinTransformer" + SWIN: + EMBED_DIM: 192 + DEPTHS: [2, 2, 18, 2] + NUM_HEADS: [6, 12, 24, 48] + WINDOW_SIZE: 12 + APE: False + DROP_PATH_RATE: 0.3 + PATCH_NORM: True + PRETRAIN_IMG_SIZE: 384 + WEIGHTS: "swin_large_patch4_window12_384_22k.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + ONE_FORMER: + NUM_OBJECT_QUERIES: 250 +INPUT: + MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 21)]"] + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 640 + MAX_SIZE_TRAIN: 2560 + MAX_SIZE_TEST: 2560 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (640, 640) + SINGLE_CATEGORY_MAX_AREA: 1.0 + COLOR_AUG_SSD: True + SIZE_DIVISIBILITY: 640 # used in dataset mapper + FORMAT: "RGB" +TEST: + DETECTIONS_PER_IMAGE: 250 + EVAL_PERIOD: 5000 + AUG: + ENABLED: False + MIN_SIZES: [320, 480, 640, 800, 960, 1120] + MAX_SIZE: 4480 + FLIP: True diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ccd24f348f9bc7d60dcdc4b74d887708e57cb8a8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml @@ -0,0 +1,54 @@ +MODEL: + BACKBONE: + FREEZE_AT: 0 + NAME: "build_resnet_backbone" + WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + RESNETS: + DEPTH: 50 + STEM_TYPE: "basic" # not used + STEM_OUT_CHANNELS: 64 + STRIDE_IN_1X1: False + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + # NORM: "SyncBN" + RES5_MULTI_GRID: [1, 1, 1] # not used +DATASETS: + TRAIN: ("coco_2017_train_panoptic_with_sem_seg",) + TEST_PANOPTIC: ("coco_2017_val_panoptic_with_sem_seg",) # to evaluate instance and semantic performance as well + TEST_INSTANCE: ("coco_2017_val",) + TEST_SEMANTIC: ("coco_2017_val_panoptic_with_sem_seg",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.0001 + STEPS: (327778, 355092) + MAX_ITER: 368750 + WARMUP_FACTOR: 1.0 + WARMUP_ITERS: 10 + WEIGHT_DECAY: 0.05 + OPTIMIZER: "ADAMW" + BACKBONE_MULTIPLIER: 0.1 + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "full_model" + CLIP_VALUE: 0.01 + NORM_TYPE: 2.0 + AMP: + ENABLED: True +INPUT: + IMAGE_SIZE: 1024 + MIN_SCALE: 0.1 + MAX_SCALE: 2.0 + FORMAT: "RGB" + DATASET_MAPPER_NAME: "coco_unified_lsj" + MAX_SEQ_LEN: 77 + TASK_SEQ_LEN: 77 + TASK_PROB: + SEMANTIC: 0.33 + INSTANCE: 0.66 +TEST: + EVAL_PERIOD: 5000 +DATALOADER: + FILTER_EMPTY_ANNOTATIONS: True + NUM_WORKERS: 4 +VERSION: 2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f768c8fa8b5e4fc1121e65e050053e0d8870cd73 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml @@ -0,0 +1,59 @@ +_BASE_: Base-COCO-UnifiedSegmentation.yaml +MODEL: + META_ARCHITECTURE: "OneFormer" + SEM_SEG_HEAD: + NAME: "OneFormerHead" + IGNORE_VALUE: 255 + NUM_CLASSES: 133 + LOSS_WEIGHT: 1.0 + CONVS_DIM: 256 + MASK_DIM: 256 + NORM: "GN" + # pixel decoder + PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder" + IN_FEATURES: ["res2", "res3", "res4", "res5"] + DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"] + COMMON_STRIDE: 4 + TRANSFORMER_ENC_LAYERS: 6 + ONE_FORMER: + TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder" + TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder" + DEEP_SUPERVISION: True + NO_OBJECT_WEIGHT: 0.1 + CLASS_WEIGHT: 2.0 + MASK_WEIGHT: 5.0 + DICE_WEIGHT: 5.0 + CONTRASTIVE_WEIGHT: 0.5 + CONTRASTIVE_TEMPERATURE: 0.07 + HIDDEN_DIM: 256 + NUM_OBJECT_QUERIES: 150 + USE_TASK_NORM: True + NHEADS: 8 + DROPOUT: 0.1 + DIM_FEEDFORWARD: 2048 + ENC_LAYERS: 0 + PRE_NORM: False + ENFORCE_INPUT_PROJ: False + SIZE_DIVISIBILITY: 32 + CLASS_DEC_LAYERS: 2 + DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query + TRAIN_NUM_POINTS: 12544 + OVERSAMPLE_RATIO: 3.0 + IMPORTANCE_SAMPLE_RATIO: 0.75 + TEXT_ENCODER: + WIDTH: 256 + CONTEXT_LENGTH: 77 + NUM_LAYERS: 6 + VOCAB_SIZE: 49408 + PROJ_NUM_LAYERS: 2 + N_CTX: 16 + TEST: + SEMANTIC_ON: True + INSTANCE_ON: True + PANOPTIC_ON: True + DETECTION_ON: False + OVERLAP_THRESHOLD: 0.8 + OBJECT_MASK_THRESHOLD: 0.8 + TASK: "panoptic" +TEST: + DETECTIONS_PER_IMAGE: 150 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..faae655317c52d90b9f756417f8b1a1adcbe78f2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml @@ -0,0 +1,25 @@ +_BASE_: oneformer_R50_bs16_50ep.yaml +MODEL: + BACKBONE: + NAME: "D2SwinTransformer" + SWIN: + EMBED_DIM: 192 + DEPTHS: [2, 2, 18, 2] + NUM_HEADS: [6, 12, 24, 48] + WINDOW_SIZE: 12 + APE: False + DROP_PATH_RATE: 0.3 + PATCH_NORM: True + PRETRAIN_IMG_SIZE: 384 + WEIGHTS: "swin_large_patch4_window12_384_22k.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + ONE_FORMER: + NUM_OBJECT_QUERIES: 150 +SOLVER: + STEPS: (655556, 735184) + MAX_ITER: 737500 + AMP: + ENABLED: False +TEST: + DETECTIONS_PER_IMAGE: 150 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6f60b76d35fa1012809985780964a5068adce4fd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/LICENSE @@ -0,0 +1,108 @@ +OPENPOSE: MULTIPERSON KEYPOINT DETECTION +SOFTWARE LICENSE AGREEMENT +ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY + +BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE. + +This is a license agreement ("Agreement") between your academic institution or non-profit organization or self (called "Licensee" or "You" in this Agreement) and Carnegie Mellon University (called "Licensor" in this Agreement). All rights not specifically granted to you in this Agreement are reserved for Licensor. + +RESERVATION OF OWNERSHIP AND GRANT OF LICENSE: +Licensor retains exclusive ownership of any copy of the Software (as defined below) licensed under this Agreement and hereby grants to Licensee a personal, non-exclusive, +non-transferable license to use the Software for noncommercial research purposes, without the right to sublicense, pursuant to the terms and conditions of this Agreement. As used in this Agreement, the term "Software" means (i) the actual copy of all or any portion of code for program routines made accessible to Licensee by Licensor pursuant to this Agreement, inclusive of backups, updates, and/or merged copies permitted hereunder or subsequently supplied by Licensor, including all or any file structures, programming instructions, user interfaces and screen formats and sequences as well as any and all documentation and instructions related to it, and (ii) all or any derivatives and/or modifications created or made by You to any of the items specified in (i). + +CONFIDENTIALITY: Licensee acknowledges that the Software is proprietary to Licensor, and as such, Licensee agrees to receive all such materials in confidence and use the Software only in accordance with the terms of this Agreement. Licensee agrees to use reasonable effort to protect the Software from unauthorized use, reproduction, distribution, or publication. + +COPYRIGHT: The Software is owned by Licensor and is protected by United +States copyright laws and applicable international treaties and/or conventions. + +PERMITTED USES: The Software may be used for your own noncommercial internal research purposes. You understand and agree that Licensor is not obligated to implement any suggestions and/or feedback you might provide regarding the Software, but to the extent Licensor does so, you are not entitled to any compensation related thereto. + +DERIVATIVES: You may create derivatives of or make modifications to the Software, however, You agree that all and any such derivatives and modifications will be owned by Licensor and become a part of the Software licensed to You under this Agreement. You may only use such derivatives and modifications for your own noncommercial internal research purposes, and you may not otherwise use, distribute or copy such derivatives and modifications in violation of this Agreement. + +BACKUPS: If Licensee is an organization, it may make that number of copies of the Software necessary for internal noncommercial use at a single site within its organization provided that all information appearing in or on the original labels, including the copyright and trademark notices are copied onto the labels of the copies. + +USES NOT PERMITTED: You may not distribute, copy or use the Software except as explicitly permitted herein. Licensee has not been granted any trademark license as part of this Agreement and may not use the name or mark “OpenPose", "Carnegie Mellon" or any renditions thereof without the prior written permission of Licensor. + +You may not sell, rent, lease, sublicense, lend, time-share or transfer, in whole or in part, or provide third parties access to prior or present versions (or any parts thereof) of the Software. + +ASSIGNMENT: You may not assign this Agreement or your rights hereunder without the prior written consent of Licensor. Any attempted assignment without such consent shall be null and void. + +TERM: The term of the license granted by this Agreement is from Licensee's acceptance of this Agreement by downloading the Software or by using the Software until terminated as provided below. + +The Agreement automatically terminates without notice if you fail to comply with any provision of this Agreement. Licensee may terminate this Agreement by ceasing using the Software. Upon any termination of this Agreement, Licensee will delete any and all copies of the Software. You agree that all provisions which operate to protect the proprietary rights of Licensor shall remain in force should breach occur and that the obligation of confidentiality described in this Agreement is binding in perpetuity and, as such, survives the term of the Agreement. + +FEE: Provided Licensee abides completely by the terms and conditions of this Agreement, there is no fee due to Licensor for Licensee's use of the Software in accordance with this Agreement. + +DISCLAIMER OF WARRANTIES: THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT WARRANTY OF ANY KIND INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE OR OF NON-INFRINGEMENT. LICENSEE BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND RELATED MATERIALS. + +SUPPORT AND MAINTENANCE: No Software support or training by the Licensor is provided as part of this Agreement. + +EXCLUSIVE REMEDY AND LIMITATION OF LIABILITY: To the maximum extent permitted under applicable law, Licensor shall not be liable for direct, indirect, special, incidental, or consequential damages or lost profits related to Licensee's use of and/or inability to use the Software, even if Licensor is advised of the possibility of such damage. + +EXPORT REGULATION: Licensee agrees to comply with any and all applicable +U.S. export control laws, regulations, and/or other laws related to embargoes and sanction programs administered by the Office of Foreign Assets Control. + +SEVERABILITY: If any provision(s) of this Agreement shall be held to be invalid, illegal, or unenforceable by a court or other tribunal of competent jurisdiction, the validity, legality and enforceability of the remaining provisions shall not in any way be affected or impaired thereby. + +NO IMPLIED WAIVERS: No failure or delay by Licensor in enforcing any right or remedy under this Agreement shall be construed as a waiver of any future or other exercise of such right or remedy by Licensor. + +GOVERNING LAW: This Agreement shall be construed and enforced in accordance with the laws of the Commonwealth of Pennsylvania without reference to conflict of laws principles. You consent to the personal jurisdiction of the courts of this County and waive their rights to venue outside of Allegheny County, Pennsylvania. + +ENTIRE AGREEMENT AND AMENDMENTS: This Agreement constitutes the sole and entire agreement between Licensee and Licensor as to the matter set forth herein and supersedes any previous agreements, understandings, and arrangements between the parties relating hereto. + + + +************************************************************************ + +THIRD-PARTY SOFTWARE NOTICES AND INFORMATION + +This project incorporates material from the project(s) listed below (collectively, "Third Party Code"). This Third Party Code is licensed to you under their original license terms set forth below. We reserves all other rights not expressly granted, whether by implication, estoppel or otherwise. + +1. Caffe, version 1.0.0, (https://github.com/BVLC/caffe/) + +COPYRIGHT + +All contributions by the University of California: +Copyright (c) 2014-2017 The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014-2017, the respective contributors +All rights reserved. + +Caffe uses a shared copyright model: each contributor holds copyright over +their contributions to Caffe. The project versioning records all such +contribution and copyright details. If a contributor wants to further mark +their specific copyright on a particular contribution, they should indicate +their copyright solely in the commit message of the change when it is +committed. + +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +CONTRIBUTION AGREEMENT + +By contributing to the BVLC/caffe repository through pull-request, comment, +or otherwise, the contributor releases their content to the +license and copyright terms herein. + +************END OF THIRD-PARTY SOFTWARE NOTICES AND INFORMATION********** \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..029794f1dd345effce074bfba6ec3684baa9a912 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__init__.py @@ -0,0 +1,245 @@ +# Openpose +# Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose +# 2nd Edited by https://github.com/Hzzone/pytorch-openpose +# 3rd Edited by ControlNet +# 4th Edited by ControlNet (added face and correct hands) +# 5th Edited by ControlNet (Improved JSON serialization/deserialization, and lots of bug fixs) +# This preprocessor is licensed by CMU for non-commercial use only. + + +import os + +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" + +import json +import warnings +from typing import Callable, List, NamedTuple, Tuple, Union + +import cv2 +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from . import util +from .body import Body, BodyResult, Keypoint +from .face import Face +from .hand import Hand + +HandResult = List[Keypoint] +FaceResult = List[Keypoint] + +class PoseResult(NamedTuple): + body: BodyResult + left_hand: Union[HandResult, None] + right_hand: Union[HandResult, None] + face: Union[FaceResult, None] + +def draw_poses(poses: List[PoseResult], H, W, draw_body=True, draw_hand=True, draw_face=True): + """ + Draw the detected poses on an empty canvas. + + Args: + poses (List[PoseResult]): A list of PoseResult objects containing the detected poses. + H (int): The height of the canvas. + W (int): The width of the canvas. + draw_body (bool, optional): Whether to draw body keypoints. Defaults to True. + draw_hand (bool, optional): Whether to draw hand keypoints. Defaults to True. + draw_face (bool, optional): Whether to draw face keypoints. Defaults to True. + + Returns: + numpy.ndarray: A 3D numpy array representing the canvas with the drawn poses. + """ + canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8) + + for pose in poses: + if draw_body: + canvas = util.draw_bodypose(canvas, pose.body.keypoints) + + if draw_hand: + canvas = util.draw_handpose(canvas, pose.left_hand) + canvas = util.draw_handpose(canvas, pose.right_hand) + + if draw_face: + canvas = util.draw_facepose(canvas, pose.face) + + return canvas + +def encode_poses_as_json(poses: List[PoseResult], canvas_height: int, canvas_width: int) -> str: + """ Encode the pose as a JSON string following openpose JSON output format: + https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/02_output.md + """ + def compress_keypoints(keypoints: Union[List[Keypoint], None]) -> Union[List[float], None]: + if not keypoints: + return None + + return [ + value + for keypoint in keypoints + for value in ( + [float(keypoint.x), float(keypoint.y), 1.0] + if keypoint is not None + else [0.0, 0.0, 0.0] + ) + ] + + return json.dumps({ + 'people': [ + { + 'pose_keypoints_2d': compress_keypoints(pose.body.keypoints), + "face_keypoints_2d": compress_keypoints(pose.face), + "hand_left_keypoints_2d": compress_keypoints(pose.left_hand), + "hand_right_keypoints_2d":compress_keypoints(pose.right_hand), + } + for pose in poses + ], + 'canvas_height': canvas_height, + 'canvas_width': canvas_width, + }, indent=4) + +class OpenposeDetector: + """ + A class for detecting human poses in images using the Openpose model. + + Attributes: + model_dir (str): Path to the directory where the pose models are stored. + """ + def __init__(self, body_estimation, hand_estimation=None, face_estimation=None): + self.body_estimation = body_estimation + self.hand_estimation = hand_estimation + self.face_estimation = face_estimation + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, hand_filename=None, face_filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "body_pose_model.pth" + hand_filename = hand_filename or "hand_pose_model.pth" + face_filename = face_filename or "facenet.pth" + + if pretrained_model_or_path == "lllyasviel/ControlNet": + subfolder = "annotator/ckpts" + face_pretrained_model_or_path = "lllyasviel/Annotators" + + else: + subfolder = '' + face_pretrained_model_or_path = pretrained_model_or_path + + body_model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir, subfolder=subfolder) + hand_model_path = custom_hf_download(pretrained_model_or_path, hand_filename, cache_dir=cache_dir, subfolder=subfolder) + face_model_path = custom_hf_download(face_pretrained_model_or_path, face_filename, cache_dir=cache_dir, subfolder=subfolder) + + body_estimation = Body(body_model_path) + hand_estimation = Hand(hand_model_path) + face_estimation = Face(face_model_path) + + return cls(body_estimation, hand_estimation, face_estimation) + + def to(self, device): + self.body_estimation.to(device) + self.hand_estimation.to(device) + self.face_estimation.to(device) + return self + + def detect_hands(self, body: BodyResult, oriImg) -> Tuple[Union[HandResult, None], Union[HandResult, None]]: + left_hand = None + right_hand = None + H, W, _ = oriImg.shape + for x, y, w, is_left in util.handDetect(body, oriImg): + peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :]).astype(np.float32) + if peaks.ndim == 2 and peaks.shape[1] == 2: + peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W) + peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H) + + hand_result = [ + Keypoint(x=peak[0], y=peak[1]) + for peak in peaks + ] + + if is_left: + left_hand = hand_result + else: + right_hand = hand_result + + return left_hand, right_hand + + def detect_face(self, body: BodyResult, oriImg) -> Union[FaceResult, None]: + face = util.faceDetect(body, oriImg) + if face is None: + return None + + x, y, w = face + H, W, _ = oriImg.shape + heatmaps = self.face_estimation(oriImg[y:y+w, x:x+w, :]) + peaks = self.face_estimation.compute_peaks_from_heatmaps(heatmaps).astype(np.float32) + if peaks.ndim == 2 and peaks.shape[1] == 2: + peaks[:, 0] = np.where(peaks[:, 0] < 1e-6, -1, peaks[:, 0] + x) / float(W) + peaks[:, 1] = np.where(peaks[:, 1] < 1e-6, -1, peaks[:, 1] + y) / float(H) + return [ + Keypoint(x=peak[0], y=peak[1]) + for peak in peaks + ] + + return None + + def detect_poses(self, oriImg, include_hand=False, include_face=False) -> List[PoseResult]: + """ + Detect poses in the given image. + Args: + oriImg (numpy.ndarray): The input image for pose detection. + include_hand (bool, optional): Whether to include hand detection. Defaults to False. + include_face (bool, optional): Whether to include face detection. Defaults to False. + + Returns: + List[PoseResult]: A list of PoseResult objects containing the detected poses. + """ + oriImg = oriImg[:, :, ::-1].copy() + H, W, C = oriImg.shape + with torch.no_grad(): + candidate, subset = self.body_estimation(oriImg) + bodies = self.body_estimation.format_body_result(candidate, subset) + + results = [] + for body in bodies: + left_hand, right_hand, face = (None,) * 3 + if include_hand: + left_hand, right_hand = self.detect_hands(body, oriImg) + if include_face: + face = self.detect_face(body, oriImg) + + results.append(PoseResult(BodyResult( + keypoints=[ + Keypoint( + x=keypoint.x / float(W), + y=keypoint.y / float(H) + ) if keypoint is not None else None + for keypoint in body.keypoints + ], + total_score=body.total_score, + total_parts=body.total_parts + ), left_hand, right_hand, face)) + + return results + + def __call__(self, input_image, detect_resolution=512, include_body=True, include_hand=False, include_face=False, hand_and_face=None, output_type="pil", image_and_json=False, upscale_method="INTER_CUBIC", **kwargs): + if hand_and_face is not None: + warnings.warn("hand_and_face is deprecated. Use include_hand and include_face instead.", DeprecationWarning) + include_hand = hand_and_face + include_face = hand_and_face + + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + poses = self.detect_poses(detected_map, include_hand=include_hand, include_face=include_face) + detected_map = remove_pad(detected_map) + canvas = draw_poses(poses, detected_map.shape[0], detected_map.shape[1], draw_body=include_body, draw_hand=include_hand, draw_face=include_face) + + detected_map = HWC3(canvas) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + if image_and_json: + return (detected_map, encode_poses_as_json(poses, detected_map.shape[0], detected_map.shape[1])) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0cf82283c3e6a78bf0ccc915a2dc0f8b6c4cda6 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/body.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/body.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7d6bdb4b1b7eab68636fb0478dd2826a042b017 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/body.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/face.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/face.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dedc981ae268292ea978e70cf9f79dfb3b0597b0 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/face.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/hand.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/hand.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..692b5113eecac65495ccaea537b9169d3731639f Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/hand.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/model.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/model.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aed4d41af6c7995079794dba4d4c8006dfff20d Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/model.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/util.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/util.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af24dc5a5a551643a44bfabe66ee57f787e29a2 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/__pycache__/util.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/body.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/body.py new file mode 100644 index 0000000000000000000000000000000000000000..96f9299ad4aa735988668dc5b720ac5a08972848 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/body.py @@ -0,0 +1,277 @@ +import math +from typing import List, NamedTuple, Union + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import torch +from scipy.ndimage.filters import gaussian_filter + +from . import util +from .model import bodypose_model + + +class Keypoint(NamedTuple): + x: float + y: float + score: float = 1.0 + id: int = -1 + + +class BodyResult(NamedTuple): + # Note: Using `Union` instead of `|` operator as the ladder is a Python + # 3.10 feature. + # Annotator code should be Python 3.8 Compatible, as controlnet repo uses + # Python 3.8 environment. + # https://github.com/lllyasviel/ControlNet/blob/d3284fcd0972c510635a4f5abe2eeb71dc0de524/environment.yaml#L6 + keypoints: List[Union[Keypoint, None]] + total_score: float + total_parts: int + + +class Body(object): + def __init__(self, model_path): + self.model = bodypose_model() + model_dict = util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, oriImg): + device = next(iter(self.model.parameters())).device + # scale_search = [0.5, 1.0, 1.5, 2.0] + scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre1 = 0.1 + thre2 = 0.05 + multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] + heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) + paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) + + for m in range(len(multiplier)): + scale = multiplier[m] + imageToTest = util.smart_resize_k(oriImg, fx=scale, fy=scale) + imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) + im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + im = np.ascontiguousarray(im) + + data = torch.from_numpy(im).float() + data = data.to(device) + # data = data.permute([2, 0, 1]).unsqueeze(0).float() + with torch.no_grad(): + Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data) + Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() + Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() + + # extract outputs, resize, and remove padding + # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps + heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps + heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride) + heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + heatmap = util.smart_resize(heatmap, (oriImg.shape[0], oriImg.shape[1])) + + # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs + paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs + paf = util.smart_resize_k(paf, fx=stride, fy=stride) + paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + paf = util.smart_resize(paf, (oriImg.shape[0], oriImg.shape[1])) + + heatmap_avg += heatmap_avg + heatmap / len(multiplier) + paf_avg += + paf / len(multiplier) + + all_peaks = [] + peak_counter = 0 + + for part in range(18): + map_ori = heatmap_avg[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + + map_left = np.zeros(one_heatmap.shape) + map_left[1:, :] = one_heatmap[:-1, :] + map_right = np.zeros(one_heatmap.shape) + map_right[:-1, :] = one_heatmap[1:, :] + map_up = np.zeros(one_heatmap.shape) + map_up[:, 1:] = one_heatmap[:, :-1] + map_down = np.zeros(one_heatmap.shape) + map_down[:, :-1] = one_heatmap[:, 1:] + + peaks_binary = np.logical_and.reduce( + (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) + peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse + peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] + peak_id = range(peak_counter, peak_counter + len(peaks)) + peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] + + all_peaks.append(peaks_with_score_and_id) + peak_counter += len(peaks) + + # find connection in the specified sequence, center 29 is in the position 15 + limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ + [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ + [1, 16], [16, 18], [3, 17], [6, 18]] + # the middle joints heatmap correpondence + mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ + [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ + [55, 56], [37, 38], [45, 46]] + + connection_all = [] + special_k = [] + mid_num = 10 + + for k in range(len(mapIdx)): + score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] + candA = all_peaks[limbSeq[k][0] - 1] + candB = all_peaks[limbSeq[k][1] - 1] + nA = len(candA) + nB = len(candB) + indexA, indexB = limbSeq[k] + if (nA != 0 and nB != 0): + connection_candidate = [] + for i in range(nA): + for j in range(nB): + vec = np.subtract(candB[j][:2], candA[i][:2]) + norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + norm = max(0.001, norm) + vec = np.divide(vec, norm) + + startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ + np.linspace(candA[i][1], candB[j][1], num=mid_num))) + + vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ + for I in range(len(startend))]) + vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ + for I in range(len(startend))]) + + score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) + score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( + 0.5 * oriImg.shape[0] / norm - 1, 0) + criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) + criterion2 = score_with_dist_prior > 0 + if criterion1 and criterion2: + connection_candidate.append( + [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) + + connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) + connection = np.zeros((0, 5)) + for c in range(len(connection_candidate)): + i, j, s = connection_candidate[c][0:3] + if (i not in connection[:, 3] and j not in connection[:, 4]): + connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) + if (len(connection) >= min(nA, nB)): + break + + connection_all.append(connection) + else: + special_k.append(k) + connection_all.append([]) + + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + subset = -1 * np.ones((0, 20)) + candidate = np.array([item for sublist in all_peaks for item in sublist]) + + for k in range(len(mapIdx)): + if k not in special_k: + partAs = connection_all[k][:, 0] + partBs = connection_all[k][:, 1] + indexA, indexB = np.array(limbSeq[k]) - 1 + + for i in range(len(connection_all[k])): # = 1:size(temp,1) + found = 0 + subset_idx = [-1, -1] + for j in range(len(subset)): # 1:size(subset,1): + if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if subset[j][indexB] != partBs[i]: + subset[j][indexB] = partBs[i] + subset[j][-1] += 1 + subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + subset[j1][:-2] += (subset[j2][:-2] + 1) + subset[j1][-2:] += subset[j2][-2:] + subset[j1][-2] += connection_all[k][i][2] + subset = np.delete(subset, j2, 0) + else: # as like found == 1 + subset[j1][indexB] = partBs[i] + subset[j1][-1] += 1 + subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[indexA] = partAs[i] + row[indexB] = partBs[i] + row[-1] = 2 + row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] + subset = np.vstack([subset, row]) + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(subset)): + if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: + deleteIdx.append(i) + subset = np.delete(subset, deleteIdx, axis=0) + + # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts + # candidate: x, y, score, id + return candidate, subset + + @staticmethod + def format_body_result(candidate: np.ndarray, subset: np.ndarray) -> List[BodyResult]: + """ + Format the body results from the candidate and subset arrays into a list of BodyResult objects. + + Args: + candidate (np.ndarray): An array of candidates containing the x, y coordinates, score, and id + for each body part. + subset (np.ndarray): An array of subsets containing indices to the candidate array for each + person detected. The last two columns of each row hold the total score and total parts + of the person. + + Returns: + List[BodyResult]: A list of BodyResult objects, where each object represents a person with + detected keypoints, total score, and total parts. + """ + return [ + BodyResult( + keypoints=[ + Keypoint( + x=candidate[candidate_index][0], + y=candidate[candidate_index][1], + score=candidate[candidate_index][2], + id=candidate[candidate_index][3] + ) if candidate_index != -1 else None + for candidate_index in person[:18].astype(int) + ], + total_score=person[18], + total_parts=person[19] + ) + for person in subset + ] + + +if __name__ == "__main__": + body_estimation = Body('../model/body_pose_model.pth') + + test_image = '../images/ski.jpg' + oriImg = cv2.imread(test_image) # B,G,R order + candidate, subset = body_estimation(oriImg) + bodies = body_estimation.format_body_result(candidate, subset) + + canvas = oriImg + for body in bodies: + canvas = util.draw_bodypose(canvas, body) + + plt.imshow(canvas[:, :, [2, 1, 0]]) + plt.show() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/face.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/face.py new file mode 100644 index 0000000000000000000000000000000000000000..41c7799af10b1f834369464862d41d8f967128c6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/face.py @@ -0,0 +1,364 @@ +import logging + +import numpy as np +import torch +import torch.nn.functional as F +from torch.nn import Conv2d, MaxPool2d, Module, ReLU, init +from torchvision.transforms import ToPILImage, ToTensor + +from . import util + + +class FaceNet(Module): + """Model the cascading heatmaps. """ + def __init__(self): + super(FaceNet, self).__init__() + # cnn to make feature map + self.relu = ReLU() + self.max_pooling_2d = MaxPool2d(kernel_size=2, stride=2) + self.conv1_1 = Conv2d(in_channels=3, out_channels=64, + kernel_size=3, stride=1, padding=1) + self.conv1_2 = Conv2d( + in_channels=64, out_channels=64, kernel_size=3, stride=1, + padding=1) + self.conv2_1 = Conv2d( + in_channels=64, out_channels=128, kernel_size=3, stride=1, + padding=1) + self.conv2_2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=3, stride=1, + padding=1) + self.conv3_1 = Conv2d( + in_channels=128, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv3_2 = Conv2d( + in_channels=256, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv3_3 = Conv2d( + in_channels=256, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv3_4 = Conv2d( + in_channels=256, out_channels=256, kernel_size=3, stride=1, + padding=1) + self.conv4_1 = Conv2d( + in_channels=256, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv4_2 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv4_3 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv4_4 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv5_1 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv5_2 = Conv2d( + in_channels=512, out_channels=512, kernel_size=3, stride=1, + padding=1) + self.conv5_3_CPM = Conv2d( + in_channels=512, out_channels=128, kernel_size=3, stride=1, + padding=1) + + # stage1 + self.conv6_1_CPM = Conv2d( + in_channels=128, out_channels=512, kernel_size=1, stride=1, + padding=0) + self.conv6_2_CPM = Conv2d( + in_channels=512, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage2 + self.Mconv1_stage2 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage2 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage2 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage3 + self.Mconv1_stage3 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage3 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage3 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage4 + self.Mconv1_stage4 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage4 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage4 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage5 + self.Mconv1_stage5 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage5 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage5 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + # stage6 + self.Mconv1_stage6 = Conv2d( + in_channels=199, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv2_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv3_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv4_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv5_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=7, stride=1, + padding=3) + self.Mconv6_stage6 = Conv2d( + in_channels=128, out_channels=128, kernel_size=1, stride=1, + padding=0) + self.Mconv7_stage6 = Conv2d( + in_channels=128, out_channels=71, kernel_size=1, stride=1, + padding=0) + + for m in self.modules(): + if isinstance(m, Conv2d): + init.constant_(m.bias, 0) + + def forward(self, x): + """Return a list of heatmaps.""" + heatmaps = [] + + h = self.relu(self.conv1_1(x)) + h = self.relu(self.conv1_2(h)) + h = self.max_pooling_2d(h) + h = self.relu(self.conv2_1(h)) + h = self.relu(self.conv2_2(h)) + h = self.max_pooling_2d(h) + h = self.relu(self.conv3_1(h)) + h = self.relu(self.conv3_2(h)) + h = self.relu(self.conv3_3(h)) + h = self.relu(self.conv3_4(h)) + h = self.max_pooling_2d(h) + h = self.relu(self.conv4_1(h)) + h = self.relu(self.conv4_2(h)) + h = self.relu(self.conv4_3(h)) + h = self.relu(self.conv4_4(h)) + h = self.relu(self.conv5_1(h)) + h = self.relu(self.conv5_2(h)) + h = self.relu(self.conv5_3_CPM(h)) + feature_map = h + + # stage1 + h = self.relu(self.conv6_1_CPM(h)) + h = self.conv6_2_CPM(h) + heatmaps.append(h) + + # stage2 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage2(h)) + h = self.relu(self.Mconv2_stage2(h)) + h = self.relu(self.Mconv3_stage2(h)) + h = self.relu(self.Mconv4_stage2(h)) + h = self.relu(self.Mconv5_stage2(h)) + h = self.relu(self.Mconv6_stage2(h)) + h = self.Mconv7_stage2(h) + heatmaps.append(h) + + # stage3 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage3(h)) + h = self.relu(self.Mconv2_stage3(h)) + h = self.relu(self.Mconv3_stage3(h)) + h = self.relu(self.Mconv4_stage3(h)) + h = self.relu(self.Mconv5_stage3(h)) + h = self.relu(self.Mconv6_stage3(h)) + h = self.Mconv7_stage3(h) + heatmaps.append(h) + + # stage4 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage4(h)) + h = self.relu(self.Mconv2_stage4(h)) + h = self.relu(self.Mconv3_stage4(h)) + h = self.relu(self.Mconv4_stage4(h)) + h = self.relu(self.Mconv5_stage4(h)) + h = self.relu(self.Mconv6_stage4(h)) + h = self.Mconv7_stage4(h) + heatmaps.append(h) + + # stage5 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage5(h)) + h = self.relu(self.Mconv2_stage5(h)) + h = self.relu(self.Mconv3_stage5(h)) + h = self.relu(self.Mconv4_stage5(h)) + h = self.relu(self.Mconv5_stage5(h)) + h = self.relu(self.Mconv6_stage5(h)) + h = self.Mconv7_stage5(h) + heatmaps.append(h) + + # stage6 + h = torch.cat([h, feature_map], dim=1) # channel concat + h = self.relu(self.Mconv1_stage6(h)) + h = self.relu(self.Mconv2_stage6(h)) + h = self.relu(self.Mconv3_stage6(h)) + h = self.relu(self.Mconv4_stage6(h)) + h = self.relu(self.Mconv5_stage6(h)) + h = self.relu(self.Mconv6_stage6(h)) + h = self.Mconv7_stage6(h) + heatmaps.append(h) + + return heatmaps + + +LOG = logging.getLogger(__name__) +TOTEN = ToTensor() +TOPIL = ToPILImage() + + +params = { + 'gaussian_sigma': 2.5, + 'inference_img_size': 736, # 368, 736, 1312 + 'heatmap_peak_thresh': 0.1, + 'crop_scale': 1.5, + 'line_indices': [ + [0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], + [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 13], + [13, 14], [14, 15], [15, 16], + [17, 18], [18, 19], [19, 20], [20, 21], + [22, 23], [23, 24], [24, 25], [25, 26], + [27, 28], [28, 29], [29, 30], + [31, 32], [32, 33], [33, 34], [34, 35], + [36, 37], [37, 38], [38, 39], [39, 40], [40, 41], [41, 36], + [42, 43], [43, 44], [44, 45], [45, 46], [46, 47], [47, 42], + [48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], + [54, 55], [55, 56], [56, 57], [57, 58], [58, 59], [59, 48], + [60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66], + [66, 67], [67, 60] + ], +} + + +class Face(object): + """ + The OpenPose face landmark detector model. + + Args: + inference_size: set the size of the inference image size, suggested: + 368, 736, 1312, default 736 + gaussian_sigma: blur the heatmaps, default 2.5 + heatmap_peak_thresh: return landmark if over threshold, default 0.1 + + """ + def __init__(self, face_model_path, + inference_size=None, + gaussian_sigma=None, + heatmap_peak_thresh=None): + self.inference_size = inference_size or params["inference_img_size"] + self.sigma = gaussian_sigma or params['gaussian_sigma'] + self.threshold = heatmap_peak_thresh or params["heatmap_peak_thresh"] + self.model = FaceNet() + self.model.load_state_dict(torch.load(face_model_path)) + self.model.eval() + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, face_img): + device = next(iter(self.model.parameters())).device + H, W, C = face_img.shape + + w_size = 384 + x_data = torch.from_numpy(util.smart_resize(face_img, (w_size, w_size))).permute([2, 0, 1]) / 256.0 - 0.5 + + x_data = x_data.to(device) + + with torch.no_grad(): + hs = self.model(x_data[None, ...]) + heatmaps = F.interpolate( + hs[-1], + (H, W), + mode='bilinear', align_corners=True).cpu().numpy()[0] + return heatmaps + + def compute_peaks_from_heatmaps(self, heatmaps): + all_peaks = [] + for part in range(heatmaps.shape[0]): + map_ori = heatmaps[part].copy() + binary = np.ascontiguousarray(map_ori > 0.05, dtype=np.uint8) + + if np.sum(binary) == 0: + continue + + positions = np.where(binary > 0.5) + intensities = map_ori[positions] + mi = np.argmax(intensities) + y, x = positions[0][mi], positions[1][mi] + all_peaks.append([x, y]) + + return np.array(all_peaks) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/hand.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/hand.py new file mode 100644 index 0000000000000000000000000000000000000000..1387c4238c8c3856bb9622edb9b4c883e26c1d59 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/hand.py @@ -0,0 +1,90 @@ +import cv2 +import numpy as np +import torch +from scipy.ndimage.filters import gaussian_filter +from skimage.measure import label + +from . import util +from .model import handpose_model + + +class Hand(object): + def __init__(self, model_path): + self.model = handpose_model() + model_dict = util.transfer(self.model, torch.load(model_path)) + self.model.load_state_dict(model_dict) + self.model.eval() + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, oriImgRaw): + device = next(iter(self.model.parameters())).device + scale_search = [0.5, 1.0, 1.5, 2.0] + # scale_search = [0.5] + boxsize = 368 + stride = 8 + padValue = 128 + thre = 0.05 + multiplier = [x * boxsize for x in scale_search] + + wsize = 128 + heatmap_avg = np.zeros((wsize, wsize, 22)) + + Hr, Wr, Cr = oriImgRaw.shape + + oriImg = cv2.GaussianBlur(oriImgRaw, (0, 0), 0.8) + + for m in range(len(multiplier)): + scale = multiplier[m] + imageToTest = util.smart_resize(oriImg, (scale, scale)) + + imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) + im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 + im = np.ascontiguousarray(im) + + data = torch.from_numpy(im).float() + data = data.to(device) + + with torch.no_grad(): + output = self.model(data).cpu().numpy() + + # extract outputs, resize, and remove padding + heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps + heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride) + heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] + heatmap = util.smart_resize(heatmap, (wsize, wsize)) + + heatmap_avg += heatmap / len(multiplier) + + all_peaks = [] + for part in range(21): + map_ori = heatmap_avg[:, :, part] + one_heatmap = gaussian_filter(map_ori, sigma=3) + binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) + + if np.sum(binary) == 0: + all_peaks.append([0, 0]) + continue + label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) + max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 + label_img[label_img != max_index] = 0 + map_ori[label_img == 0] = 0 + + y, x = util.npmax(map_ori) + y = int(float(y) * float(Hr) / float(wsize)) + x = int(float(x) * float(Wr) / float(wsize)) + all_peaks.append([x, y]) + return np.array(all_peaks) + +if __name__ == "__main__": + hand_estimation = Hand('../model/hand_pose_model.pth') + + # test_image = '../images/hand.jpg' + test_image = '../images/hand.jpg' + oriImg = cv2.imread(test_image) # B,G,R order + peaks = hand_estimation(oriImg) + canvas = util.draw_handpose(oriImg, peaks, True) + cv2.imshow('', canvas) + cv2.waitKey(0) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/model.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/model.py new file mode 100644 index 0000000000000000000000000000000000000000..6c3d47268986f8018b2c75307a7725d364b175fe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/model.py @@ -0,0 +1,217 @@ +import torch +from collections import OrderedDict + +import torch +import torch.nn as nn + +def make_layers(block, no_relu_layers): + layers = [] + for layer_name, v in block.items(): + if 'pool' in layer_name: + layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], + padding=v[2]) + layers.append((layer_name, layer)) + else: + conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], + kernel_size=v[2], stride=v[3], + padding=v[4]) + layers.append((layer_name, conv2d)) + if layer_name not in no_relu_layers: + layers.append(('relu_'+layer_name, nn.ReLU(inplace=True))) + + return nn.Sequential(OrderedDict(layers)) + +class bodypose_model(nn.Module): + def __init__(self): + super(bodypose_model, self).__init__() + + # these layers have no relu layer + no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\ + 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\ + 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\ + 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] + blocks = {} + block0 = OrderedDict([ + ('conv1_1', [3, 64, 3, 1, 1]), + ('conv1_2', [64, 64, 3, 1, 1]), + ('pool1_stage1', [2, 2, 0]), + ('conv2_1', [64, 128, 3, 1, 1]), + ('conv2_2', [128, 128, 3, 1, 1]), + ('pool2_stage1', [2, 2, 0]), + ('conv3_1', [128, 256, 3, 1, 1]), + ('conv3_2', [256, 256, 3, 1, 1]), + ('conv3_3', [256, 256, 3, 1, 1]), + ('conv3_4', [256, 256, 3, 1, 1]), + ('pool3_stage1', [2, 2, 0]), + ('conv4_1', [256, 512, 3, 1, 1]), + ('conv4_2', [512, 512, 3, 1, 1]), + ('conv4_3_CPM', [512, 256, 3, 1, 1]), + ('conv4_4_CPM', [256, 128, 3, 1, 1]) + ]) + + + # Stage 1 + block1_1 = OrderedDict([ + ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), + ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), + ('conv5_5_CPM_L1', [512, 38, 1, 1, 0]) + ]) + + block1_2 = OrderedDict([ + ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), + ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), + ('conv5_5_CPM_L2', [512, 19, 1, 1, 0]) + ]) + blocks['block1_1'] = block1_1 + blocks['block1_2'] = block1_2 + + self.model0 = make_layers(block0, no_relu_layers) + + # Stages 2 - 6 + for i in range(2, 7): + blocks['block%d_1' % i] = OrderedDict([ + ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), + ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0]) + ]) + + blocks['block%d_2' % i] = OrderedDict([ + ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), + ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0]) + ]) + + for k in blocks.keys(): + blocks[k] = make_layers(blocks[k], no_relu_layers) + + self.model1_1 = blocks['block1_1'] + self.model2_1 = blocks['block2_1'] + self.model3_1 = blocks['block3_1'] + self.model4_1 = blocks['block4_1'] + self.model5_1 = blocks['block5_1'] + self.model6_1 = blocks['block6_1'] + + self.model1_2 = blocks['block1_2'] + self.model2_2 = blocks['block2_2'] + self.model3_2 = blocks['block3_2'] + self.model4_2 = blocks['block4_2'] + self.model5_2 = blocks['block5_2'] + self.model6_2 = blocks['block6_2'] + + + def forward(self, x): + + out1 = self.model0(x) + + out1_1 = self.model1_1(out1) + out1_2 = self.model1_2(out1) + out2 = torch.cat([out1_1, out1_2, out1], 1) + + out2_1 = self.model2_1(out2) + out2_2 = self.model2_2(out2) + out3 = torch.cat([out2_1, out2_2, out1], 1) + + out3_1 = self.model3_1(out3) + out3_2 = self.model3_2(out3) + out4 = torch.cat([out3_1, out3_2, out1], 1) + + out4_1 = self.model4_1(out4) + out4_2 = self.model4_2(out4) + out5 = torch.cat([out4_1, out4_2, out1], 1) + + out5_1 = self.model5_1(out5) + out5_2 = self.model5_2(out5) + out6 = torch.cat([out5_1, out5_2, out1], 1) + + out6_1 = self.model6_1(out6) + out6_2 = self.model6_2(out6) + + return out6_1, out6_2 + +class handpose_model(nn.Module): + def __init__(self): + super(handpose_model, self).__init__() + + # these layers have no relu layer + no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\ + 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] + # stage 1 + block1_0 = OrderedDict([ + ('conv1_1', [3, 64, 3, 1, 1]), + ('conv1_2', [64, 64, 3, 1, 1]), + ('pool1_stage1', [2, 2, 0]), + ('conv2_1', [64, 128, 3, 1, 1]), + ('conv2_2', [128, 128, 3, 1, 1]), + ('pool2_stage1', [2, 2, 0]), + ('conv3_1', [128, 256, 3, 1, 1]), + ('conv3_2', [256, 256, 3, 1, 1]), + ('conv3_3', [256, 256, 3, 1, 1]), + ('conv3_4', [256, 256, 3, 1, 1]), + ('pool3_stage1', [2, 2, 0]), + ('conv4_1', [256, 512, 3, 1, 1]), + ('conv4_2', [512, 512, 3, 1, 1]), + ('conv4_3', [512, 512, 3, 1, 1]), + ('conv4_4', [512, 512, 3, 1, 1]), + ('conv5_1', [512, 512, 3, 1, 1]), + ('conv5_2', [512, 512, 3, 1, 1]), + ('conv5_3_CPM', [512, 128, 3, 1, 1]) + ]) + + block1_1 = OrderedDict([ + ('conv6_1_CPM', [128, 512, 1, 1, 0]), + ('conv6_2_CPM', [512, 22, 1, 1, 0]) + ]) + + blocks = {} + blocks['block1_0'] = block1_0 + blocks['block1_1'] = block1_1 + + # stage 2-6 + for i in range(2, 7): + blocks['block%d' % i] = OrderedDict([ + ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), + ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), + ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), + ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0]) + ]) + + for k in blocks.keys(): + blocks[k] = make_layers(blocks[k], no_relu_layers) + + self.model1_0 = blocks['block1_0'] + self.model1_1 = blocks['block1_1'] + self.model2 = blocks['block2'] + self.model3 = blocks['block3'] + self.model4 = blocks['block4'] + self.model5 = blocks['block5'] + self.model6 = blocks['block6'] + + def forward(self, x): + out1_0 = self.model1_0(x) + out1_1 = self.model1_1(out1_0) + concat_stage2 = torch.cat([out1_1, out1_0], 1) + out_stage2 = self.model2(concat_stage2) + concat_stage3 = torch.cat([out_stage2, out1_0], 1) + out_stage3 = self.model3(concat_stage3) + concat_stage4 = torch.cat([out_stage3, out1_0], 1) + out_stage4 = self.model4(concat_stage4) + concat_stage5 = torch.cat([out_stage4, out1_0], 1) + out_stage5 = self.model5(concat_stage5) + concat_stage6 = torch.cat([out_stage5, out1_0], 1) + out_stage6 = self.model6(concat_stage6) + return out_stage6 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/util.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/util.py new file mode 100644 index 0000000000000000000000000000000000000000..a0851ca409863dcee4bf731a47b472992569dd68 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/open_pose/util.py @@ -0,0 +1,383 @@ +import math +import numpy as np +import matplotlib +import cv2 +from typing import List, Tuple, Union + +from .body import BodyResult, Keypoint + +eps = 0.01 + + +def smart_resize(x, s): + Ht, Wt = s + if x.ndim == 2: + Ho, Wo = x.shape + Co = 1 + else: + Ho, Wo, Co = x.shape + if Co == 3 or Co == 1: + k = float(Ht + Wt) / float(Ho + Wo) + return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) + else: + return np.stack([smart_resize(x[:, :, i], s) for i in range(Co)], axis=2) + + +def smart_resize_k(x, fx, fy): + if x.ndim == 2: + Ho, Wo = x.shape + Co = 1 + else: + Ho, Wo, Co = x.shape + Ht, Wt = Ho * fy, Wo * fx + if Co == 3 or Co == 1: + k = float(Ht + Wt) / float(Ho + Wo) + return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) + else: + return np.stack([smart_resize_k(x[:, :, i], fx, fy) for i in range(Co)], axis=2) + + +def padRightDownCorner(img, stride, padValue): + h = img.shape[0] + w = img.shape[1] + + pad = 4 * [None] + pad[0] = 0 # up + pad[1] = 0 # left + pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down + pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right + + img_padded = img + pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) + img_padded = np.concatenate((pad_up, img_padded), axis=0) + pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) + img_padded = np.concatenate((pad_left, img_padded), axis=1) + pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) + img_padded = np.concatenate((img_padded, pad_down), axis=0) + pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) + img_padded = np.concatenate((img_padded, pad_right), axis=1) + + return img_padded, pad + + +def transfer(model, model_weights): + transfered_model_weights = {} + for weights_name in model.state_dict().keys(): + transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] + return transfered_model_weights + + +def draw_bodypose(canvas: np.ndarray, keypoints: List[Keypoint]) -> np.ndarray: + """ + Draw keypoints and limbs representing body pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the body pose. + keypoints (List[Keypoint]): A list of Keypoint objects representing the body keypoints to be drawn. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn body pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + H, W, C = canvas.shape + stickwidth = 4 + + limbSeq = [ + [2, 3], [2, 6], [3, 4], [4, 5], + [6, 7], [7, 8], [2, 9], [9, 10], + [10, 11], [2, 12], [12, 13], [13, 14], + [2, 1], [1, 15], [15, 17], [1, 16], + [16, 18], + ] + + colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ + [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ + [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] + + for (k1_index, k2_index), color in zip(limbSeq, colors): + keypoint1 = keypoints[k1_index - 1] + keypoint2 = keypoints[k2_index - 1] + + if keypoint1 is None or keypoint2 is None: + continue + + Y = np.array([keypoint1.x, keypoint2.x]) * float(W) + X = np.array([keypoint1.y, keypoint2.y]) * float(H) + mX = np.mean(X) + mY = np.mean(Y) + length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 + angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) + polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) + cv2.fillConvexPoly(canvas, polygon, [int(float(c) * 0.6) for c in color]) + + for keypoint, color in zip(keypoints, colors): + if keypoint is None: + continue + + x, y = keypoint.x, keypoint.y + x = int(x * W) + y = int(y * H) + cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) + + return canvas + + +def draw_handpose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray: + """ + Draw keypoints and connections representing hand pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the hand pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the hand keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn hand pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + if not keypoints: + return canvas + + H, W, C = canvas.shape + + edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ + [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] + + for ie, (e1, e2) in enumerate(edges): + k1 = keypoints[e1] + k2 = keypoints[e2] + if k1 is None or k2 is None: + continue + + x1 = int(k1.x * W) + y1 = int(k1.y * H) + x2 = int(k2.x * W) + y2 = int(k2.y * H) + if x1 > eps and y1 > eps and x2 > eps and y2 > eps: + cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, thickness=2) + + for keypoint in keypoints: + x, y = keypoint.x, keypoint.y + x = int(x * W) + y = int(y * H) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) + return canvas + + +def draw_facepose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray: + """ + Draw keypoints representing face pose on a given canvas. + + Args: + canvas (np.ndarray): A 3D numpy array representing the canvas (image) on which to draw the face pose. + keypoints (List[Keypoint]| None): A list of Keypoint objects representing the face keypoints to be drawn + or None if no keypoints are present. + + Returns: + np.ndarray: A 3D numpy array representing the modified canvas with the drawn face pose. + + Note: + The function expects the x and y coordinates of the keypoints to be normalized between 0 and 1. + """ + if not keypoints: + return canvas + + H, W, C = canvas.shape + for keypoint in keypoints: + x, y = keypoint.x, keypoint.y + x = int(x * W) + y = int(y * H) + if x > eps and y > eps: + cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1) + return canvas + + +# detect hand according to body pose keypoints +# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp +def handDetect(body: BodyResult, oriImg) -> List[Tuple[int, int, int, bool]]: + """ + Detect hands in the input body pose keypoints and calculate the bounding box for each hand. + + Args: + body (BodyResult): A BodyResult object containing the detected body pose keypoints. + oriImg (numpy.ndarray): A 3D numpy array representing the original input image. + + Returns: + List[Tuple[int, int, int, bool]]: A list of tuples, each containing the coordinates (x, y) of the top-left + corner of the bounding box, the width (height) of the bounding box, and + a boolean flag indicating whether the hand is a left hand (True) or a + right hand (False). + + Notes: + - The width and height of the bounding boxes are equal since the network requires squared input. + - The minimum bounding box size is 20 pixels. + """ + ratioWristElbow = 0.33 + detect_result = [] + image_height, image_width = oriImg.shape[0:2] + + keypoints = body.keypoints + # right hand: wrist 4, elbow 3, shoulder 2 + # left hand: wrist 7, elbow 6, shoulder 5 + left_shoulder = keypoints[5] + left_elbow = keypoints[6] + left_wrist = keypoints[7] + right_shoulder = keypoints[2] + right_elbow = keypoints[3] + right_wrist = keypoints[4] + + # if any of three not detected + has_left = all(keypoint is not None for keypoint in (left_shoulder, left_elbow, left_wrist)) + has_right = all(keypoint is not None for keypoint in (right_shoulder, right_elbow, right_wrist)) + if not (has_left or has_right): + return [] + + hands = [] + #left hand + if has_left: + hands.append([ + left_shoulder.x, left_shoulder.y, + left_elbow.x, left_elbow.y, + left_wrist.x, left_wrist.y, + True + ]) + # right hand + if has_right: + hands.append([ + right_shoulder.x, right_shoulder.y, + right_elbow.x, right_elbow.y, + right_wrist.x, right_wrist.y, + False + ]) + + for x1, y1, x2, y2, x3, y3, is_left in hands: + # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox + # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]); + # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]); + # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow); + # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder); + # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder); + x = x3 + ratioWristElbow * (x3 - x2) + y = y3 + ratioWristElbow * (y3 - y2) + distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) + distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) + width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) + # x-y refers to the center --> offset to topLeft point + # handRectangle.x -= handRectangle.width / 2.f; + # handRectangle.y -= handRectangle.height / 2.f; + x -= width / 2 + y -= width / 2 # width = height + # overflow the image + if x < 0: x = 0 + if y < 0: y = 0 + width1 = width + width2 = width + if x + width > image_width: width1 = image_width - x + if y + width > image_height: width2 = image_height - y + width = min(width1, width2) + # the max hand box value is 20 pixels + if width >= 20: + detect_result.append((int(x), int(y), int(width), is_left)) + + ''' + return value: [[x, y, w, True if left hand else False]]. + width=height since the network require squared input. + x, y is the coordinate of top left + ''' + return detect_result + + +# Written by Lvmin +def faceDetect(body: BodyResult, oriImg) -> Union[Tuple[int, int, int], None]: + """ + Detect the face in the input body pose keypoints and calculate the bounding box for the face. + + Args: + body (BodyResult): A BodyResult object containing the detected body pose keypoints. + oriImg (numpy.ndarray): A 3D numpy array representing the original input image. + + Returns: + Tuple[int, int, int] | None: A tuple containing the coordinates (x, y) of the top-left corner of the + bounding box and the width (height) of the bounding box, or None if the + face is not detected or the bounding box width is less than 20 pixels. + + Notes: + - The width and height of the bounding box are equal. + - The minimum bounding box size is 20 pixels. + """ + # left right eye ear 14 15 16 17 + image_height, image_width = oriImg.shape[0:2] + + keypoints = body.keypoints + head = keypoints[0] + left_eye = keypoints[14] + right_eye = keypoints[15] + left_ear = keypoints[16] + right_ear = keypoints[17] + + if head is None or all(keypoint is None for keypoint in (left_eye, right_eye, left_ear, right_ear)): + return None + + width = 0.0 + x0, y0 = head.x, head.y + + if left_eye is not None: + x1, y1 = left_eye.x, left_eye.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 3.0) + + if right_eye is not None: + x1, y1 = right_eye.x, right_eye.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 3.0) + + if left_ear is not None: + x1, y1 = left_ear.x, left_ear.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 1.5) + + if right_ear is not None: + x1, y1 = right_ear.x, right_ear.y + d = max(abs(x0 - x1), abs(y0 - y1)) + width = max(width, d * 1.5) + + x, y = x0, y0 + + x -= width + y -= width + + if x < 0: + x = 0 + + if y < 0: + y = 0 + + width1 = width * 2 + width2 = width * 2 + + if x + width > image_width: + width1 = image_width - x + + if y + width > image_height: + width2 = image_height - y + + width = min(width1, width2) + + if width >= 20: + return int(x), int(y), int(width) + else: + return None + + +# get max index of 2d array +def npmax(array): + arrayindex = array.argmax(1) + arrayvalue = array.max(1) + i = arrayvalue.argmax() + j = arrayindex[i] + return i, j \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..913b6cf92c19d37b6ee4f7bc99c65f655e7f840c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/LICENSE @@ -0,0 +1,21 @@ +It is just for research purpose, and commercial use should be contacted with authors first. + +Copyright (c) 2021 Zhuo Su + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6286358567c42ce282b99d8294877219c438842c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/__init__.py @@ -0,0 +1,65 @@ +import os +import warnings + +import cv2 +import numpy as np +import torch +from einops import rearrange +from PIL import Image + +from controlnet_aux.util import HWC3, nms, resize_image_with_pad, safe_step,common_input_validate, annotator_ckpts_path, custom_hf_download +from .model import pidinet + + +class PidiNetDetector: + def __init__(self, netNetwork): + self.netNetwork = netNetwork + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "table5_pidinet.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + netNetwork = pidinet() + netNetwork.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(model_path)['state_dict'].items()}) + netNetwork.eval() + + return cls(netNetwork) + + def to(self, device): + self.netNetwork.to(device) + return self + + def __call__(self, input_image, detect_resolution=512, safe=False, output_type="pil", scribble=False, apply_filter=False, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + detected_map, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + device = next(iter(self.netNetwork.parameters())).device + + detected_map = detected_map[:, :, ::-1].copy() + with torch.no_grad(): + image_pidi = torch.from_numpy(detected_map).float().to(device) + image_pidi = image_pidi / 255.0 + image_pidi = rearrange(image_pidi, 'h w c -> 1 c h w') + edge = self.netNetwork(image_pidi)[-1] + edge = edge.cpu().numpy() + if apply_filter: + edge = edge > 0.5 + if safe: + edge = safe_step(edge) + edge = (edge * 255.0).clip(0, 255).astype(np.uint8) + + detected_map = edge[0, 0] + + if scribble: + detected_map = nms(detected_map, 127, 3.0) + detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0) + detected_map[detected_map > 4] = 255 + detected_map[detected_map < 255] = 0 + + detected_map = HWC3(remove_pad(detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/model.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/model.py new file mode 100644 index 0000000000000000000000000000000000000000..16595b35a4f75a6d2b0e832e24b6e11706d77326 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/pidi/model.py @@ -0,0 +1,681 @@ +""" +Author: Zhuo Su, Wenzhe Liu +Date: Feb 18, 2021 +""" + +import math + +import cv2 +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def img2tensor(imgs, bgr2rgb=True, float32=True): + """Numpy array to tensor. + + Args: + imgs (list[ndarray] | ndarray): Input images. + bgr2rgb (bool): Whether to change bgr to rgb. + float32 (bool): Whether to change to float32. + + Returns: + list[tensor] | tensor: Tensor images. If returned results only have + one element, just return tensor. + """ + + def _totensor(img, bgr2rgb, float32): + if img.shape[2] == 3 and bgr2rgb: + if img.dtype == 'float64': + img = img.astype('float32') + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = torch.from_numpy(img.transpose(2, 0, 1)) + if float32: + img = img.float() + return img + + if isinstance(imgs, list): + return [_totensor(img, bgr2rgb, float32) for img in imgs] + else: + return _totensor(imgs, bgr2rgb, float32) + +nets = { + 'baseline': { + 'layer0': 'cv', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'cv', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'cv', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'cv', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'c-v15': { + 'layer0': 'cd', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'cv', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'cv', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'cv', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'a-v15': { + 'layer0': 'ad', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'cv', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'cv', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'cv', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'r-v15': { + 'layer0': 'rd', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'cv', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'cv', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'cv', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'cvvv4': { + 'layer0': 'cd', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'cd', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'cd', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'cd', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'avvv4': { + 'layer0': 'ad', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'ad', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'ad', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'ad', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'rvvv4': { + 'layer0': 'rd', + 'layer1': 'cv', + 'layer2': 'cv', + 'layer3': 'cv', + 'layer4': 'rd', + 'layer5': 'cv', + 'layer6': 'cv', + 'layer7': 'cv', + 'layer8': 'rd', + 'layer9': 'cv', + 'layer10': 'cv', + 'layer11': 'cv', + 'layer12': 'rd', + 'layer13': 'cv', + 'layer14': 'cv', + 'layer15': 'cv', + }, + 'cccv4': { + 'layer0': 'cd', + 'layer1': 'cd', + 'layer2': 'cd', + 'layer3': 'cv', + 'layer4': 'cd', + 'layer5': 'cd', + 'layer6': 'cd', + 'layer7': 'cv', + 'layer8': 'cd', + 'layer9': 'cd', + 'layer10': 'cd', + 'layer11': 'cv', + 'layer12': 'cd', + 'layer13': 'cd', + 'layer14': 'cd', + 'layer15': 'cv', + }, + 'aaav4': { + 'layer0': 'ad', + 'layer1': 'ad', + 'layer2': 'ad', + 'layer3': 'cv', + 'layer4': 'ad', + 'layer5': 'ad', + 'layer6': 'ad', + 'layer7': 'cv', + 'layer8': 'ad', + 'layer9': 'ad', + 'layer10': 'ad', + 'layer11': 'cv', + 'layer12': 'ad', + 'layer13': 'ad', + 'layer14': 'ad', + 'layer15': 'cv', + }, + 'rrrv4': { + 'layer0': 'rd', + 'layer1': 'rd', + 'layer2': 'rd', + 'layer3': 'cv', + 'layer4': 'rd', + 'layer5': 'rd', + 'layer6': 'rd', + 'layer7': 'cv', + 'layer8': 'rd', + 'layer9': 'rd', + 'layer10': 'rd', + 'layer11': 'cv', + 'layer12': 'rd', + 'layer13': 'rd', + 'layer14': 'rd', + 'layer15': 'cv', + }, + 'c16': { + 'layer0': 'cd', + 'layer1': 'cd', + 'layer2': 'cd', + 'layer3': 'cd', + 'layer4': 'cd', + 'layer5': 'cd', + 'layer6': 'cd', + 'layer7': 'cd', + 'layer8': 'cd', + 'layer9': 'cd', + 'layer10': 'cd', + 'layer11': 'cd', + 'layer12': 'cd', + 'layer13': 'cd', + 'layer14': 'cd', + 'layer15': 'cd', + }, + 'a16': { + 'layer0': 'ad', + 'layer1': 'ad', + 'layer2': 'ad', + 'layer3': 'ad', + 'layer4': 'ad', + 'layer5': 'ad', + 'layer6': 'ad', + 'layer7': 'ad', + 'layer8': 'ad', + 'layer9': 'ad', + 'layer10': 'ad', + 'layer11': 'ad', + 'layer12': 'ad', + 'layer13': 'ad', + 'layer14': 'ad', + 'layer15': 'ad', + }, + 'r16': { + 'layer0': 'rd', + 'layer1': 'rd', + 'layer2': 'rd', + 'layer3': 'rd', + 'layer4': 'rd', + 'layer5': 'rd', + 'layer6': 'rd', + 'layer7': 'rd', + 'layer8': 'rd', + 'layer9': 'rd', + 'layer10': 'rd', + 'layer11': 'rd', + 'layer12': 'rd', + 'layer13': 'rd', + 'layer14': 'rd', + 'layer15': 'rd', + }, + 'carv4': { + 'layer0': 'cd', + 'layer1': 'ad', + 'layer2': 'rd', + 'layer3': 'cv', + 'layer4': 'cd', + 'layer5': 'ad', + 'layer6': 'rd', + 'layer7': 'cv', + 'layer8': 'cd', + 'layer9': 'ad', + 'layer10': 'rd', + 'layer11': 'cv', + 'layer12': 'cd', + 'layer13': 'ad', + 'layer14': 'rd', + 'layer15': 'cv', + }, + } + +def createConvFunc(op_type): + assert op_type in ['cv', 'cd', 'ad', 'rd'], 'unknown op type: %s' % str(op_type) + if op_type == 'cv': + return F.conv2d + + if op_type == 'cd': + def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1): + assert dilation in [1, 2], 'dilation for cd_conv should be in 1 or 2' + assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for cd_conv should be 3x3' + assert padding == dilation, 'padding for cd_conv set wrong' + + weights_c = weights.sum(dim=[2, 3], keepdim=True) + yc = F.conv2d(x, weights_c, stride=stride, padding=0, groups=groups) + y = F.conv2d(x, weights, bias, stride=stride, padding=padding, dilation=dilation, groups=groups) + return y - yc + return func + elif op_type == 'ad': + def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1): + assert dilation in [1, 2], 'dilation for ad_conv should be in 1 or 2' + assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for ad_conv should be 3x3' + assert padding == dilation, 'padding for ad_conv set wrong' + + shape = weights.shape + weights = weights.view(shape[0], shape[1], -1) + weights_conv = (weights - weights[:, :, [3, 0, 1, 6, 4, 2, 7, 8, 5]]).view(shape) # clock-wise + y = F.conv2d(x, weights_conv, bias, stride=stride, padding=padding, dilation=dilation, groups=groups) + return y + return func + elif op_type == 'rd': + def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1): + assert dilation in [1, 2], 'dilation for rd_conv should be in 1 or 2' + assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for rd_conv should be 3x3' + padding = 2 * dilation + + shape = weights.shape + if weights.is_cuda: + buffer = torch.cuda.FloatTensor(shape[0], shape[1], 5 * 5).fill_(0) + else: + buffer = torch.zeros(shape[0], shape[1], 5 * 5).to(weights.device) + weights = weights.view(shape[0], shape[1], -1) + buffer[:, :, [0, 2, 4, 10, 14, 20, 22, 24]] = weights[:, :, 1:] + buffer[:, :, [6, 7, 8, 11, 13, 16, 17, 18]] = -weights[:, :, 1:] + buffer[:, :, 12] = 0 + buffer = buffer.view(shape[0], shape[1], 5, 5) + y = F.conv2d(x, buffer, bias, stride=stride, padding=padding, dilation=dilation, groups=groups) + return y + return func + else: + print('impossible to be here unless you force that') + return None + +class Conv2d(nn.Module): + def __init__(self, pdc, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): + super(Conv2d, self).__init__() + if in_channels % groups != 0: + raise ValueError('in_channels must be divisible by groups') + if out_channels % groups != 0: + raise ValueError('out_channels must be divisible by groups') + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, kernel_size, kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + self.pdc = pdc + + def reset_parameters(self): + nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + nn.init.uniform_(self.bias, -bound, bound) + + def forward(self, input): + + return self.pdc(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + +class CSAM(nn.Module): + """ + Compact Spatial Attention Module + """ + def __init__(self, channels): + super(CSAM, self).__init__() + + mid_channels = 4 + self.relu1 = nn.ReLU() + self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(mid_channels, 1, kernel_size=3, padding=1, bias=False) + self.sigmoid = nn.Sigmoid() + nn.init.constant_(self.conv1.bias, 0) + + def forward(self, x): + y = self.relu1(x) + y = self.conv1(y) + y = self.conv2(y) + y = self.sigmoid(y) + + return x * y + +class CDCM(nn.Module): + """ + Compact Dilation Convolution based Module + """ + def __init__(self, in_channels, out_channels): + super(CDCM, self).__init__() + + self.relu1 = nn.ReLU() + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0) + self.conv2_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=5, padding=5, bias=False) + self.conv2_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=7, padding=7, bias=False) + self.conv2_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=9, padding=9, bias=False) + self.conv2_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=11, padding=11, bias=False) + nn.init.constant_(self.conv1.bias, 0) + + def forward(self, x): + x = self.relu1(x) + x = self.conv1(x) + x1 = self.conv2_1(x) + x2 = self.conv2_2(x) + x3 = self.conv2_3(x) + x4 = self.conv2_4(x) + return x1 + x2 + x3 + x4 + + +class MapReduce(nn.Module): + """ + Reduce feature maps into a single edge map + """ + def __init__(self, channels): + super(MapReduce, self).__init__() + self.conv = nn.Conv2d(channels, 1, kernel_size=1, padding=0) + nn.init.constant_(self.conv.bias, 0) + + def forward(self, x): + return self.conv(x) + + +class PDCBlock(nn.Module): + def __init__(self, pdc, inplane, ouplane, stride=1): + super(PDCBlock, self).__init__() + self.stride=stride + + self.stride=stride + if self.stride > 1: + self.pool = nn.MaxPool2d(kernel_size=2, stride=2) + self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0) + self.conv1 = Conv2d(pdc, inplane, inplane, kernel_size=3, padding=1, groups=inplane, bias=False) + self.relu2 = nn.ReLU() + self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0, bias=False) + + def forward(self, x): + if self.stride > 1: + x = self.pool(x) + y = self.conv1(x) + y = self.relu2(y) + y = self.conv2(y) + if self.stride > 1: + x = self.shortcut(x) + y = y + x + return y + +class PDCBlock_converted(nn.Module): + """ + CPDC, APDC can be converted to vanilla 3x3 convolution + RPDC can be converted to vanilla 5x5 convolution + """ + def __init__(self, pdc, inplane, ouplane, stride=1): + super(PDCBlock_converted, self).__init__() + self.stride=stride + + if self.stride > 1: + self.pool = nn.MaxPool2d(kernel_size=2, stride=2) + self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0) + if pdc == 'rd': + self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=5, padding=2, groups=inplane, bias=False) + else: + self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=3, padding=1, groups=inplane, bias=False) + self.relu2 = nn.ReLU() + self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0, bias=False) + + def forward(self, x): + if self.stride > 1: + x = self.pool(x) + y = self.conv1(x) + y = self.relu2(y) + y = self.conv2(y) + if self.stride > 1: + x = self.shortcut(x) + y = y + x + return y + +class PiDiNet(nn.Module): + def __init__(self, inplane, pdcs, dil=None, sa=False, convert=False): + super(PiDiNet, self).__init__() + self.sa = sa + if dil is not None: + assert isinstance(dil, int), 'dil should be an int' + self.dil = dil + + self.fuseplanes = [] + + self.inplane = inplane + if convert: + if pdcs[0] == 'rd': + init_kernel_size = 5 + init_padding = 2 + else: + init_kernel_size = 3 + init_padding = 1 + self.init_block = nn.Conv2d(3, self.inplane, + kernel_size=init_kernel_size, padding=init_padding, bias=False) + block_class = PDCBlock_converted + else: + self.init_block = Conv2d(pdcs[0], 3, self.inplane, kernel_size=3, padding=1) + block_class = PDCBlock + + self.block1_1 = block_class(pdcs[1], self.inplane, self.inplane) + self.block1_2 = block_class(pdcs[2], self.inplane, self.inplane) + self.block1_3 = block_class(pdcs[3], self.inplane, self.inplane) + self.fuseplanes.append(self.inplane) # C + + inplane = self.inplane + self.inplane = self.inplane * 2 + self.block2_1 = block_class(pdcs[4], inplane, self.inplane, stride=2) + self.block2_2 = block_class(pdcs[5], self.inplane, self.inplane) + self.block2_3 = block_class(pdcs[6], self.inplane, self.inplane) + self.block2_4 = block_class(pdcs[7], self.inplane, self.inplane) + self.fuseplanes.append(self.inplane) # 2C + + inplane = self.inplane + self.inplane = self.inplane * 2 + self.block3_1 = block_class(pdcs[8], inplane, self.inplane, stride=2) + self.block3_2 = block_class(pdcs[9], self.inplane, self.inplane) + self.block3_3 = block_class(pdcs[10], self.inplane, self.inplane) + self.block3_4 = block_class(pdcs[11], self.inplane, self.inplane) + self.fuseplanes.append(self.inplane) # 4C + + self.block4_1 = block_class(pdcs[12], self.inplane, self.inplane, stride=2) + self.block4_2 = block_class(pdcs[13], self.inplane, self.inplane) + self.block4_3 = block_class(pdcs[14], self.inplane, self.inplane) + self.block4_4 = block_class(pdcs[15], self.inplane, self.inplane) + self.fuseplanes.append(self.inplane) # 4C + + self.conv_reduces = nn.ModuleList() + if self.sa and self.dil is not None: + self.attentions = nn.ModuleList() + self.dilations = nn.ModuleList() + for i in range(4): + self.dilations.append(CDCM(self.fuseplanes[i], self.dil)) + self.attentions.append(CSAM(self.dil)) + self.conv_reduces.append(MapReduce(self.dil)) + elif self.sa: + self.attentions = nn.ModuleList() + for i in range(4): + self.attentions.append(CSAM(self.fuseplanes[i])) + self.conv_reduces.append(MapReduce(self.fuseplanes[i])) + elif self.dil is not None: + self.dilations = nn.ModuleList() + for i in range(4): + self.dilations.append(CDCM(self.fuseplanes[i], self.dil)) + self.conv_reduces.append(MapReduce(self.dil)) + else: + for i in range(4): + self.conv_reduces.append(MapReduce(self.fuseplanes[i])) + + self.classifier = nn.Conv2d(4, 1, kernel_size=1) # has bias + nn.init.constant_(self.classifier.weight, 0.25) + nn.init.constant_(self.classifier.bias, 0) + + # print('initialization done') + + def get_weights(self): + conv_weights = [] + bn_weights = [] + relu_weights = [] + for pname, p in self.named_parameters(): + if 'bn' in pname: + bn_weights.append(p) + elif 'relu' in pname: + relu_weights.append(p) + else: + conv_weights.append(p) + + return conv_weights, bn_weights, relu_weights + + def forward(self, x): + H, W = x.size()[2:] + + x = self.init_block(x) + + x1 = self.block1_1(x) + x1 = self.block1_2(x1) + x1 = self.block1_3(x1) + + x2 = self.block2_1(x1) + x2 = self.block2_2(x2) + x2 = self.block2_3(x2) + x2 = self.block2_4(x2) + + x3 = self.block3_1(x2) + x3 = self.block3_2(x3) + x3 = self.block3_3(x3) + x3 = self.block3_4(x3) + + x4 = self.block4_1(x3) + x4 = self.block4_2(x4) + x4 = self.block4_3(x4) + x4 = self.block4_4(x4) + + x_fuses = [] + if self.sa and self.dil is not None: + for i, xi in enumerate([x1, x2, x3, x4]): + x_fuses.append(self.attentions[i](self.dilations[i](xi))) + elif self.sa: + for i, xi in enumerate([x1, x2, x3, x4]): + x_fuses.append(self.attentions[i](xi)) + elif self.dil is not None: + for i, xi in enumerate([x1, x2, x3, x4]): + x_fuses.append(self.dilations[i](xi)) + else: + x_fuses = [x1, x2, x3, x4] + + e1 = self.conv_reduces[0](x_fuses[0]) + e1 = F.interpolate(e1, (H, W), mode="bilinear", align_corners=False) + + e2 = self.conv_reduces[1](x_fuses[1]) + e2 = F.interpolate(e2, (H, W), mode="bilinear", align_corners=False) + + e3 = self.conv_reduces[2](x_fuses[2]) + e3 = F.interpolate(e3, (H, W), mode="bilinear", align_corners=False) + + e4 = self.conv_reduces[3](x_fuses[3]) + e4 = F.interpolate(e4, (H, W), mode="bilinear", align_corners=False) + + outputs = [e1, e2, e3, e4] + + output = self.classifier(torch.cat(outputs, dim=1)) + #if not self.training: + # return torch.sigmoid(output) + + outputs.append(output) + outputs = [torch.sigmoid(r) for r in outputs] + return outputs + +def config_model(model): + model_options = list(nets.keys()) + assert model in model_options, \ + 'unrecognized model, please choose from %s' % str(model_options) + + # print(str(nets[model])) + + pdcs = [] + for i in range(16): + layer_name = 'layer%d' % i + op = nets[model][layer_name] + pdcs.append(createConvFunc(op)) + + return pdcs + +def pidinet(): + pdcs = config_model('carv4') + dil = 24 #if args.dil else None + return PiDiNet(60, pdcs, dil=dil, sa=True) + + +if __name__ == '__main__': + model = pidinet() + ckp = torch.load('table5_pidinet.pth')['state_dict'] + model.load_state_dict({k.replace('module.',''):v for k, v in ckp.items()}) + im = cv2.imread('examples/test_my/cat_v4.png') + im = img2tensor(im).unsqueeze(0)/255. + res = model(im)[-1] + res = res>0.5 + res = res.float() + res = (res[0,0].cpu().data.numpy()*255.).astype(np.uint8) + print(res.shape) + cv2.imwrite('edge.png', res) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/processor.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/processor.py new file mode 100644 index 0000000000000000000000000000000000000000..3d960c91c903c6bb10a002e38cd07415fff29a80 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/processor.py @@ -0,0 +1,147 @@ +""" +This file contains a Processor that can be used to process images with controlnet aux processors +""" +import io +import logging +from typing import Dict, Optional, Union + +from PIL import Image + +from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector, + LeresDetector, LineartAnimeDetector, + LineartDetector, MediapipeFaceDetector, + MidasDetector, MLSDdetector, NormalBaeDetector, + OpenposeDetector, PidiNetDetector, ZoeDetector, TileDetector) + +LOGGER = logging.getLogger(__name__) + + +MODELS = { + # checkpoint models + 'scribble_hed': {'class': HEDdetector, 'checkpoint': True}, + 'softedge_hed': {'class': HEDdetector, 'checkpoint': True}, + 'scribble_hedsafe': {'class': HEDdetector, 'checkpoint': True}, + 'softedge_hedsafe': {'class': HEDdetector, 'checkpoint': True}, + 'depth_midas': {'class': MidasDetector, 'checkpoint': True}, + 'mlsd': {'class': MLSDdetector, 'checkpoint': True}, + 'openpose': {'class': OpenposeDetector, 'checkpoint': True}, + 'openpose_face': {'class': OpenposeDetector, 'checkpoint': True}, + 'openpose_faceonly': {'class': OpenposeDetector, 'checkpoint': True}, + 'openpose_full': {'class': OpenposeDetector, 'checkpoint': True}, + 'openpose_hand': {'class': OpenposeDetector, 'checkpoint': True}, + 'scribble_pidinet': {'class': PidiNetDetector, 'checkpoint': True}, + 'softedge_pidinet': {'class': PidiNetDetector, 'checkpoint': True}, + 'scribble_pidsafe': {'class': PidiNetDetector, 'checkpoint': True}, + 'softedge_pidsafe': {'class': PidiNetDetector, 'checkpoint': True}, + 'normal_bae': {'class': NormalBaeDetector, 'checkpoint': True}, + 'lineart_coarse': {'class': LineartDetector, 'checkpoint': True}, + 'lineart_realistic': {'class': LineartDetector, 'checkpoint': True}, + 'lineart_anime': {'class': LineartAnimeDetector, 'checkpoint': True}, + 'depth_zoe': {'class': ZoeDetector, 'checkpoint': True}, + 'depth_leres': {'class': LeresDetector, 'checkpoint': True}, + 'depth_leres++': {'class': LeresDetector, 'checkpoint': True}, + # instantiate + 'shuffle': {'class': ContentShuffleDetector, 'checkpoint': False}, + 'mediapipe_face': {'class': MediapipeFaceDetector, 'checkpoint': False}, + 'canny': {'class': CannyDetector, 'checkpoint': False}, + 'tile': {'class': TileDetector, 'checkpoint': False}, +} + + +MODEL_PARAMS = { + 'scribble_hed': {'scribble': True}, + 'softedge_hed': {'scribble': False}, + 'scribble_hedsafe': {'scribble': True, 'safe': True}, + 'softedge_hedsafe': {'scribble': False, 'safe': True}, + 'depth_midas': {}, + 'mlsd': {}, + 'openpose': {'include_body': True, 'include_hand': False, 'include_face': False}, + 'openpose_face': {'include_body': True, 'include_hand': False, 'include_face': True}, + 'openpose_faceonly': {'include_body': False, 'include_hand': False, 'include_face': True}, + 'openpose_full': {'include_body': True, 'include_hand': True, 'include_face': True}, + 'openpose_hand': {'include_body': False, 'include_hand': True, 'include_face': False}, + 'scribble_pidinet': {'safe': False, 'scribble': True}, + 'softedge_pidinet': {'safe': False, 'scribble': False}, + 'scribble_pidsafe': {'safe': True, 'scribble': True}, + 'softedge_pidsafe': {'safe': True, 'scribble': False}, + 'normal_bae': {}, + 'lineart_realistic': {'coarse': False}, + 'lineart_coarse': {'coarse': True}, + 'lineart_anime': {}, + 'canny': {}, + 'shuffle': {}, + 'depth_zoe': {}, + 'depth_leres': {'boost': False}, + 'depth_leres++': {'boost': True}, + 'mediapipe_face': {}, + 'tile': {}, +} + +CHOICES = f"Choices for the processor are {list(MODELS.keys())}" + + +class Processor: + def __init__(self, processor_id: str, params: Optional[Dict] = None) -> None: + """Processor that can be used to process images with controlnet aux processors + + Args: + processor_id (str): processor name, options are 'hed, midas, mlsd, openpose, + pidinet, normalbae, lineart, lineart_coarse, lineart_anime, + canny, content_shuffle, zoe, mediapipe_face, tile' + params (Optional[Dict]): parameters for the processor + """ + LOGGER.info("Loading %s".format(processor_id)) + + if processor_id not in MODELS: + raise ValueError(f"{processor_id} is not a valid processor id. Please make sure to choose one of {', '.join(MODELS.keys())}") + + self.processor_id = processor_id + self.processor = self.load_processor(self.processor_id) + + # load default params + self.params = MODEL_PARAMS[self.processor_id] + # update with user params + if params: + self.params.update(params) + + def load_processor(self, processor_id: str) -> 'Processor': + """Load controlnet aux processors + + Args: + processor_id (str): processor name + + Returns: + Processor: controlnet aux processor + """ + processor = MODELS[processor_id]['class'] + + # check if the proecssor is a checkpoint model + if MODELS[processor_id]['checkpoint']: + processor = processor.from_pretrained("lllyasviel/Annotators") + else: + processor = processor() + return processor + + def __call__(self, image: Union[Image.Image, bytes], + to_pil: bool = True) -> Union[Image.Image, bytes]: + """processes an image with a controlnet aux processor + + Args: + image (Union[Image.Image, bytes]): input image in bytes or PIL Image + to_pil (bool): whether to return bytes or PIL Image + + Returns: + Union[Image.Image, bytes]: processed image in bytes or PIL Image + """ + # check if bytes or PIL Image + if isinstance(image, bytes): + image = Image.open(io.BytesIO(image)).convert("RGB") + + processed_image = self.processor(image, **self.params) + + if to_pil: + return processed_image + else: + output_bytes = io.BytesIO() + processed_image.save(output_bytes, format='JPEG') + return output_bytes.getvalue() diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..191a2185d40f5bc43996dd79d190dbeca22e8205 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/__init__.py @@ -0,0 +1,74 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import warnings +from typing import Union + +import cv2 +import numpy as np +import torch +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from .automatic_mask_generator import SamAutomaticMaskGenerator +from .build_sam import sam_model_registry + + +class SamDetector: + def __init__(self, mask_generator: SamAutomaticMaskGenerator): + self.mask_generator = mask_generator + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, model_type="vit_h", filename="sam_vit_h_4b8939.pth", subfolder=None, cache_dir=annotator_ckpts_path): + """ + Possible model_type : vit_h, vit_l, vit_b, vit_t + download weights from https://github.com/facebookresearch/segment-anything + """ + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + sam = sam_model_registry[model_type](checkpoint=model_path) + mask_generator = SamAutomaticMaskGenerator(sam) + + return cls(mask_generator) + + def to(self, device): + model = self.mask_generator.predictor.model.to(device) + model.train(False) #Update attention_bias in https://github.com/Fannovel16/comfyui_controlnet_aux/blob/main/src/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py#L251 + self.mask_generator = SamAutomaticMaskGenerator(model) + return self + + + def show_anns(self, anns): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + h, w = anns[0]['segmentation'].shape + final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB") + for ann in sorted_anns: + m = ann['segmentation'] + img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8) + for i in range(3): + img[:,:,i] = np.random.randint(255, dtype=np.uint8) + final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m*255))) + + return np.array(final_img, dtype=np.uint8) + + def __call__(self, input_image: Union[np.ndarray, Image.Image]=None, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs) -> Image.Image: + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + # Generate Masks + masks = self.mask_generator.generate(input_image) + # Create map + map = self.show_anns(masks) + + detected_map = HWC3(remove_pad(map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/automatic_mask_generator.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/automatic_mask_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ac3589d81890aca612023c85b46c0c8176195d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/automatic_mask_generator.py @@ -0,0 +1,372 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from typing import Any, Dict, List, Optional, Tuple + +from .modeling import Sam +from .predictor import SamPredictor +from .utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SamAutomaticMaskGenerator: + def __init__( + self, + model: Sam, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.88, + stability_score_thresh: float = 0.95, + stability_score_offset: float = 1.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + ) -> None: + """ + Using a SAM model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM with a ViT-H backbone. + + Arguments: + model (Sam): The SAM model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crop_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crop_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + from custom_pycocotools import mask as mask_utils # type: ignore # noqa: F401 + + if min_mask_region_area > 0: + import cv2 # type: ignore # noqa: F401 + + self.predictor = SamPredictor(model) + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Filter small disconnected regions and holes in masks + if self.min_mask_region_area > 0: + mask_data = self.postprocess_small_regions( + mask_data, + self.min_mask_region_area, + max(self.box_nms_thresh, self.crop_nms_thresh), + ) + + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[:2] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[:2] + self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] * points_scale + + # Generate masks for this crop in batches + data = MaskData() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) + data.cat(batch_data) + del batch_data + self.predictor.reset_image() + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros_like(data["boxes"][:, 0]), # categories + iou_threshold=self.box_nms_thresh, + ) + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["points"] = uncrop_points(data["points"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + ) -> MaskData: + orig_h, orig_w = orig_size + + # Run model on this batch + transformed_points = self.predictor.transform.apply_coords(points, im_size) + in_points = torch.as_tensor(transformed_points, device=self.predictor.device) + in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) + masks, iou_preds, _ = self.predictor.predict_torch( + in_points[:, None, :], + in_labels[:, None], + multimask_output=True, + return_logits=True, + ) + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), + ) + del masks + + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > self.predictor.model.mask_threshold + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros_like(boxes[:, 0]), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/build_sam.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/build_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..9a52c506b69d29ee2356cc0e62274fe6f6ee075b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/build_sam.py @@ -0,0 +1,159 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from functools import partial + +from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer, TinyViT + + +def build_sam_vit_h(checkpoint=None): + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + ) + + +build_sam = build_sam_vit_h + + +def build_sam_vit_l(checkpoint=None): + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + ) + + +def build_sam_vit_b(checkpoint=None): + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + checkpoint=checkpoint, + ) + + +def build_sam_vit_t(checkpoint=None): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + mobile_sam = Sam( + image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, + embed_dims=[64, 128, 160, 320], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 5, 10], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.0, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=0.8 + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + + mobile_sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + mobile_sam.load_state_dict(state_dict) + return mobile_sam + + +sam_model_registry = { + "default": build_sam_vit_h, + "vit_h": build_sam_vit_h, + "vit_l": build_sam_vit_l, + "vit_b": build_sam_vit_b, + "vit_t": build_sam_vit_t, +} + + +def _build_sam( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = 1024 + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size + sam = Sam( + image_encoder=ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + num_multimask_outputs=3, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + pixel_mean=[123.675, 116.28, 103.53], + pixel_std=[58.395, 57.12, 57.375], + ) + sam.eval() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + sam.load_state_dict(state_dict) + return sam + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7aa261b8356b8c1174139c19782657abca0cfec2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .sam import Sam +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder +from .transformer import TwoWayTransformer +from .tiny_vit_sam import TinyViT diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/common.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/common.py new file mode 100644 index 0000000000000000000000000000000000000000..2bf15236a3eb24d8526073bc4fa2b274cccb3f96 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/common.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn + +from typing import Type + + +class MLPBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.lin2(self.act(self.lin1(x))) + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/image_encoder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/image_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..66351d9d7c589be693f4b3485901d3bdfed54d4a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/image_encoder.py @@ -0,0 +1,395 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional, Tuple, Type + +from .common import LayerNorm2d, MLPBlock + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViT(nn.Module): + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter( + torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) + ) + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + ) + self.blocks.append(block) + + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + out_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_chans), + nn.Conv2d( + out_chans, + out_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_chans), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + self.pos_embed + + for blk in self.blocks: + x = blk(x) + + x = self.neck(x.permute(0, 3, 1, 2)) + + return x + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (tuple(int, int) or None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (tuple(int, int) or None): Input resolution for calculating the relative + positional parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert ( + input_size is not None + ), "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/mask_decoder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/mask_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5d2fdb03d535a91fa725d1ec4e92a7a1f217dfe0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/mask_decoder.py @@ -0,0 +1,176 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F + +from typing import List, Tuple, Type + +from .common import LayerNorm2d + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + transformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + ) + + # Select the correct mask or masks for output + if multimask_output: + mask_slice = slice(1, None) + else: + mask_slice = slice(0, 1) + masks = masks[:, mask_slice, :, :] + iou_pred = iou_pred[:, mask_slice] + + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + upscaled_embedding = self.output_upscaling(src) + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) + hyper_in = torch.stack(hyper_in_list, dim=1) + b, c, h, w = upscaled_embedding.shape + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/prompt_encoder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/prompt_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..c3143f4f8e02ddd7ca8587b40ff5d47c3a6b7ef3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/prompt_encoder.py @@ -0,0 +1,214 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch import nn + +from typing import Any, Optional, Tuple, Type + +from .common import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/sam.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/sam.py new file mode 100644 index 0000000000000000000000000000000000000000..45b9e7c56d10cc47e7ed0739e35d850bfccbb257 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/sam.py @@ -0,0 +1,175 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F + +from typing import Any, Dict, List, Tuple, Union + +from .tiny_vit_sam import TinyViT +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder + + +class Sam(nn.Module): + mask_threshold: float = 0.0 + image_format: str = "RGB" + + def __init__( + self, + image_encoder: Union[ImageEncoderViT, TinyViT], + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = [123.675, 116.28, 103.53], + pixel_std: List[float] = [58.395, 57.12, 57.375], + ) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + @torch.no_grad() + def forward( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input prompts, + C is determined by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) + image_embeddings = self.image_encoder(input_images) + + outputs = [] + for image_record, curr_embedding in zip(batched_input, image_embeddings): + if "point_coords" in image_record: + points = (image_record["point_coords"], image_record["point_labels"]) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get("boxes", None), + masks=image_record.get("mask_inputs", None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record["image"].shape[-2:], + original_size=image_record["original_size"], + ) + masks = masks > self.mask_threshold + outputs.append( + { + "masks": masks, + "iou_predictions": iou_predictions, + "low_res_logits": low_res_masks, + } + ) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/tiny_vit_sam.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/tiny_vit_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..ce1aad263f07652385010194738f8f69254df644 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/tiny_vit_sam.py @@ -0,0 +1,716 @@ +# -------------------------------------------------------- +# TinyViT Model Architecture +# Copyright (c) 2022 Microsoft +# Adapted from LeViT and Swin Transformer +# LeViT: (https://github.com/facebookresearch/levit) +# Swin: (https://github.com/microsoft/swin-transformer) +# Build the TinyViT Model +# -------------------------------------------------------- + +import itertools +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from custom_timm.models.layers import DropPath as TimmDropPath,\ + to_2tuple, trunc_normal_ +from custom_timm.models.registry import register_model +from typing import Tuple + + +class Conv2d_BN(torch.nn.Sequential): + def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1, + groups=1, bn_weight_init=1): + super().__init__() + self.add_module('c', torch.nn.Conv2d( + a, b, ks, stride, pad, dilation, groups, bias=False)) + bn = torch.nn.BatchNorm2d(b) + torch.nn.init.constant_(bn.weight, bn_weight_init) + torch.nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps)**0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / \ + (bn.running_var + bn.eps)**0.5 + m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size( + 0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class DropPath(TimmDropPath): + def __init__(self, drop_prob=None): + super().__init__(drop_prob=drop_prob) + self.drop_prob = drop_prob + + def __repr__(self): + msg = super().__repr__() + msg += f'(drop_prob={self.drop_prob})' + return msg + + +class PatchEmbed(nn.Module): + def __init__(self, in_chans, embed_dim, resolution, activation): + super().__init__() + img_size: Tuple[int, int] = to_2tuple(resolution) + self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) + self.num_patches = self.patches_resolution[0] * \ + self.patches_resolution[1] + self.in_chans = in_chans + self.embed_dim = embed_dim + n = embed_dim + self.seq = nn.Sequential( + Conv2d_BN(in_chans, n // 2, 3, 2, 1), + activation(), + Conv2d_BN(n // 2, n, 3, 2, 1), + ) + + def forward(self, x): + return self.seq(x) + + +class MBConv(nn.Module): + def __init__(self, in_chans, out_chans, expand_ratio, + activation, drop_path): + super().__init__() + self.in_chans = in_chans + self.hidden_chans = int(in_chans * expand_ratio) + self.out_chans = out_chans + + self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1) + self.act1 = activation() + + self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans, + ks=3, stride=1, pad=1, groups=self.hidden_chans) + self.act2 = activation() + + self.conv3 = Conv2d_BN( + self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0) + self.act3 = activation() + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.act2(x) + + x = self.conv3(x) + + x = self.drop_path(x) + + x += shortcut + x = self.act3(x) + + return x + + +class PatchMerging(nn.Module): + def __init__(self, input_resolution, dim, out_dim, activation): + super().__init__() + + self.input_resolution = input_resolution + self.dim = dim + self.out_dim = out_dim + self.act = activation() + self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0) + stride_c=2 + if(out_dim==320 or out_dim==448 or out_dim==576): + stride_c=1 + self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim) + self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0) + + def forward(self, x): + if x.ndim == 3: + H, W = self.input_resolution + B = len(x) + # (B, C, H, W) + x = x.view(B, H, W, -1).permute(0, 3, 1, 2) + + x = self.conv1(x) + x = self.act(x) + + x = self.conv2(x) + x = self.act(x) + x = self.conv3(x) + x = x.flatten(2).transpose(1, 2) + return x + + +class ConvLayer(nn.Module): + def __init__(self, dim, input_resolution, depth, + activation, + drop_path=0., downsample=None, use_checkpoint=False, + out_dim=None, + conv_expand_ratio=4., + ): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + MBConv(dim, dim, conv_expand_ratio, activation, + drop_path[i] if isinstance(drop_path, list) else drop_path, + ) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, out_dim=out_dim, activation=activation) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, + out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.norm = nn.LayerNorm(in_features) + self.fc1 = nn.Linear(in_features, hidden_features) + self.fc2 = nn.Linear(hidden_features, out_features) + self.act = act_layer() + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.norm(x) + + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(torch.nn.Module): + def __init__(self, dim, key_dim, num_heads=8, + attn_ratio=4, + resolution=(14, 14), + ): + super().__init__() + # (h, w) + assert isinstance(resolution, tuple) and len(resolution) == 2 + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + h = self.dh + nh_kd * 2 + + self.norm = nn.LayerNorm(dim) + self.qkv = nn.Linear(dim, h) + self.proj = nn.Linear(self.dh, dim) + + points = list(itertools.product( + range(resolution[0]), range(resolution[1]))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter( + torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', + torch.LongTensor(idxs).view(N, N), + persistent=False) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): # x (B,N,C) + B, N, _ = x.shape + + # Normalization + x = self.norm(x) + + qkv = self.qkv(x) + # (B, N, num_heads, d) + q, k, v = qkv.view(B, N, self.num_heads, - + 1).split([self.key_dim, self.key_dim, self.d], dim=3) + # (B, num_heads, N, d) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = ( + (q @ k.transpose(-2, -1)) * self.scale + + + (self.attention_biases[:, self.attention_bias_idxs] + if self.training else self.ab) + ) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class TinyViTBlock(nn.Module): + r""" TinyViT Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int, int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + local_conv_size (int): the kernel size of the convolution between + Attention and MLP. Default: 3 + activation: the activation function. Default: nn.GELU + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, + mlp_ratio=4., drop=0., drop_path=0., + local_conv_size=3, + activation=nn.GELU, + ): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + assert window_size > 0, 'window_size must be greater than 0' + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + assert dim % num_heads == 0, 'dim must be divisible by num_heads' + head_dim = dim // num_heads + + window_resolution = (window_size, window_size) + self.attn = Attention(dim, head_dim, num_heads, + attn_ratio=1, resolution=window_resolution) + + mlp_hidden_dim = int(dim * mlp_ratio) + mlp_activation = activation + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, + act_layer=mlp_activation, drop=drop) + + pad = local_conv_size // 2 + self.local_conv = Conv2d_BN( + dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + res_x = x + if H == self.window_size and W == self.window_size: + x = self.attn(x) + else: + x = x.view(B, H, W, C) + pad_b = (self.window_size - H % + self.window_size) % self.window_size + pad_r = (self.window_size - W % + self.window_size) % self.window_size + padding = pad_b > 0 or pad_r > 0 + + if padding: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_size + nW = pW // self.window_size + # window partition + x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape( + B * nH * nW, self.window_size * self.window_size, C) + x = self.attn(x) + # window reverse + x = x.view(B, nH, nW, self.window_size, self.window_size, + C).transpose(2, 3).reshape(B, pH, pW, C) + + if padding: + x = x[:, :H, :W].contiguous() + + x = x.view(B, L, C) + + x = res_x + self.drop_path(x) + + x = x.transpose(1, 2).reshape(B, C, H, W) + x = self.local_conv(x) + x = x.view(B, C, L).transpose(1, 2) + + x = x + self.drop_path(self.mlp(x)) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}" + + +class BasicLayer(nn.Module): + """ A basic TinyViT layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + drop (float, optional): Dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + local_conv_size: the kernel size of the depthwise convolution between attention and MLP. Default: 3 + activation: the activation function. Default: nn.GELU + out_dim: the output dimension of the layer. Default: dim + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., drop=0., + drop_path=0., downsample=None, use_checkpoint=False, + local_conv_size=3, + activation=nn.GELU, + out_dim=None, + ): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + TinyViTBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] if isinstance( + drop_path, list) else drop_path, + local_conv_size=local_conv_size, + activation=activation, + ) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, dim=dim, out_dim=out_dim, activation=activation) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x +class TinyViT(nn.Module): + def __init__(self, img_size=224, in_chans=3, num_classes=1000, + embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_sizes=[7, 7, 14, 7], + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.1, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=1.0, + ): + super().__init__() + self.img_size=img_size + self.num_classes = num_classes + self.depths = depths + self.num_layers = len(depths) + self.mlp_ratio = mlp_ratio + + activation = nn.GELU + + self.patch_embed = PatchEmbed(in_chans=in_chans, + embed_dim=embed_dims[0], + resolution=img_size, + activation=activation) + + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, + sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + kwargs = dict(dim=embed_dims[i_layer], + input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)), + patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))), + # input_resolution=(patches_resolution[0] // (2 ** i_layer), + # patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + downsample=PatchMerging if ( + i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + out_dim=embed_dims[min( + i_layer + 1, len(embed_dims) - 1)], + activation=activation, + ) + if i_layer == 0: + layer = ConvLayer( + conv_expand_ratio=mbconv_expand_ratio, + **kwargs, + ) + else: + layer = BasicLayer( + num_heads=num_heads[i_layer], + window_size=window_sizes[i_layer], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + local_conv_size=local_conv_size, + **kwargs) + self.layers.append(layer) + + # Classifier head + self.norm_head = nn.LayerNorm(embed_dims[-1]) + self.head = nn.Linear( + embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity() + + # init weights + self.apply(self._init_weights) + self.set_layer_lr_decay(layer_lr_decay) + self.neck = nn.Sequential( + nn.Conv2d( + embed_dims[-1], + 256, + kernel_size=1, + bias=False, + ), + LayerNorm2d(256), + nn.Conv2d( + 256, + 256, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(256), + ) + def set_layer_lr_decay(self, layer_lr_decay): + decay_rate = layer_lr_decay + + # layers -> blocks (depth) + depth = sum(self.depths) + lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)] + #print("LR SCALES:", lr_scales) + + def _set_lr_scale(m, scale): + for p in m.parameters(): + p.lr_scale = scale + + self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0])) + i = 0 + for layer in self.layers: + for block in layer.blocks: + block.apply(lambda x: _set_lr_scale(x, lr_scales[i])) + i += 1 + if layer.downsample is not None: + layer.downsample.apply( + lambda x: _set_lr_scale(x, lr_scales[i - 1])) + assert i == depth + for m in [self.norm_head, self.head]: + m.apply(lambda x: _set_lr_scale(x, lr_scales[-1])) + + for k, p in self.named_parameters(): + p.param_name = k + + def _check_lr_scale(m): + for p in m.parameters(): + assert hasattr(p, 'lr_scale'), p.param_name + + self.apply(_check_lr_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'attention_biases'} + + def forward_features(self, x): + # x: (N, C, H, W) + x = self.patch_embed(x) + + x = self.layers[0](x) + start_i = 1 + + for i in range(start_i, len(self.layers)): + layer = self.layers[i] + x = layer(x) + B,_,C=x.size() + x = x.view(B, 64, 64, C) + x=x.permute(0, 3, 1, 2) + x=self.neck(x) + return x + + def forward(self, x): + x = self.forward_features(x) + #x = self.norm_head(x) + #x = self.head(x) + return x + + +_checkpoint_url_format = \ + 'https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/{}.pth' +_provided_checkpoints = { + 'tiny_vit_5m_224': 'tiny_vit_5m_22kto1k_distill', + 'tiny_vit_11m_224': 'tiny_vit_11m_22kto1k_distill', + 'tiny_vit_21m_224': 'tiny_vit_21m_22kto1k_distill', + 'tiny_vit_21m_384': 'tiny_vit_21m_22kto1k_384_distill', + 'tiny_vit_21m_512': 'tiny_vit_21m_22kto1k_512_distill', +} + + +def register_tiny_vit_model(fn): + '''Register a TinyViT model + It is a wrapper of `register_model` with loading the pretrained checkpoint. + ''' + def fn_wrapper(pretrained=False, **kwargs): + model = fn() + if pretrained: + model_name = fn.__name__ + assert model_name in _provided_checkpoints, \ + f'Sorry that the checkpoint `{model_name}` is not provided yet.' + url = _checkpoint_url_format.format( + _provided_checkpoints[model_name]) + checkpoint = torch.hub.load_state_dict_from_url( + url=url, + map_location='cpu', check_hash=False, + ) + model.load_state_dict(checkpoint['model']) + + return model + + # rename the name of fn_wrapper + fn_wrapper.__name__ = fn.__name__ + return register_model(fn_wrapper) + + +@register_tiny_vit_model +def tiny_vit_5m_224(pretrained=False, num_classes=1000, drop_path_rate=0.0): + return TinyViT( + num_classes=num_classes, + embed_dims=[64, 128, 160, 320], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 5, 10], + window_sizes=[7, 7, 14, 7], + drop_path_rate=drop_path_rate, + ) + + +@register_tiny_vit_model +def tiny_vit_11m_224(pretrained=False, num_classes=1000, drop_path_rate=0.1): + return TinyViT( + num_classes=num_classes, + embed_dims=[64, 128, 256, 448], + depths=[2, 2, 6, 2], + num_heads=[2, 4, 8, 14], + window_sizes=[7, 7, 14, 7], + drop_path_rate=drop_path_rate, + ) + + +@register_tiny_vit_model +def tiny_vit_21m_224(pretrained=False, num_classes=1000, drop_path_rate=0.2): + return TinyViT( + num_classes=num_classes, + embed_dims=[96, 192, 384, 576], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 18], + window_sizes=[7, 7, 14, 7], + drop_path_rate=drop_path_rate, + ) + + +@register_tiny_vit_model +def tiny_vit_21m_384(pretrained=False, num_classes=1000, drop_path_rate=0.1): + return TinyViT( + img_size=384, + num_classes=num_classes, + embed_dims=[96, 192, 384, 576], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 18], + window_sizes=[12, 12, 24, 12], + drop_path_rate=drop_path_rate, + ) + + +@register_tiny_vit_model +def tiny_vit_21m_512(pretrained=False, num_classes=1000, drop_path_rate=0.1): + return TinyViT( + img_size=512, + num_classes=num_classes, + embed_dims=[96, 192, 384, 576], + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 18], + window_sizes=[16, 16, 32, 16], + drop_path_rate=drop_path_rate, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/transformer.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..28fafea52288603fea275f3a100790471825c34a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/modeling/transformer.py @@ -0,0 +1,240 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import Tensor, nn + +import math +from typing import Tuple, Type + +from .common import MLPBlock + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attention layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(embedding_dim, self.internal_dim) + self.v_proj = nn.Linear(embedding_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Attention + _, _, _, c_per_head = q.shape + attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ v + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/predictor.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..a3820fb7de8647e5d6adf229debc498b33caad62 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/predictor.py @@ -0,0 +1,269 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from .modeling import Sam + +from typing import Optional, Tuple + +from .utils.transforms import ResizeLongestSide + + +class SamPredictor: + def __init__( + self, + sam_model: Sam, + ) -> None: + """ + Uses SAM to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam): The model to use for mask prediction. + """ + super().__init__() + self.model = sam_model + self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) + self.reset_image() + + def set_image( + self, + image: np.ndarray, + image_format: str = "RGB", + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray): The image for calculating masks. Expects an + image in HWC uint8 format, with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + assert image_format in [ + "RGB", + "BGR", + ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." + if image_format != self.model.image_format: + image = image[..., ::-1] + + # Transform the image to the form expected by the model + input_image = self.transform.apply_image(image) + input_image_torch = torch.as_tensor(input_image, device=self.device) + input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] + + self.set_torch_image(input_image_torch, image.shape[:2]) + + @torch.no_grad() + def set_torch_image( + self, + transformed_image: torch.Tensor, + original_image_size: Tuple[int, ...], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. Expects the input + image to be already transformed to the format expected by the model. + + Arguments: + transformed_image (torch.Tensor): The input image, with shape + 1x3xHxW, which has been transformed with ResizeLongestSide. + original_image_size (tuple(int, int)): The size of the image + before transformation, in (H, W) format. + """ + assert ( + len(transformed_image.shape) == 4 + and transformed_image.shape[1] == 3 + and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size + ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." + self.reset_image() + + self.original_size = original_image_size + self.input_size = tuple(transformed_image.shape[-2:]) + input_image = self.model.preprocess(transformed_image) + self.features = self.model.image_encoder(input_image) + self.is_image_set = True + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + # Transform input prompts + coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = self.transform.apply_coords(point_coords, self.original_size) + coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) + labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] + if box is not None: + box = self.transform.apply_boxes(box, self.original_size) + box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) + box_torch = box_torch[None, :] + if mask_input is not None: + mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) + mask_input_torch = mask_input_torch[None, :, :, :] + + masks, iou_predictions, low_res_masks = self.predict_torch( + coords_torch, + labels_torch, + box_torch, + mask_input_torch, + multimask_output, + return_logits=return_logits, + ) + + masks_np = masks[0].detach().cpu().numpy() + iou_predictions_np = iou_predictions[0].detach().cpu().numpy() + low_res_masks_np = low_res_masks[0].detach().cpu().numpy() + return masks_np, iou_predictions_np, low_res_masks_np + + @torch.no_grad() + def predict_torch( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using ResizeLongestSide. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + boxes (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + if point_coords is not None: + points = (point_coords, point_labels) + else: + points = None + + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=boxes, + masks=mask_input, + ) + + # Predict masks + low_res_masks, iou_predictions = self.model.mask_decoder( + image_embeddings=self.features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + + # Upscale the masks to the original image resolution + masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) + + if not return_logits: + masks = masks > self.model.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert self.features is not None, "Features must exist if an image has been set." + return self.features + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_image(self) -> None: + """Resets the currently set image.""" + self.is_image_set = False + self.features = None + self.orig_h = None + self.orig_w = None + self.input_h = None + self.input_w = None diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/amg.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/amg.py new file mode 100644 index 0000000000000000000000000000000000000000..de480030b10c60fa398fbd6019b7ca32b6ea970f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/amg.py @@ -0,0 +1,346 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + assert isinstance( + item, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def cat(self, new_stats: "MaskData") -> None: + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def to_numpy(self) -> None: + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.detach().cpu().numpy() + + +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + assert len(args) > 0 and all( + len(a) == len(args[0]) for a in args + ), "Batched iteration must have inputs of all the same size." + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """ + Encodes masks to an uncompressed RLE, in the format expected by + pycoco tools. + """ + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat( + [ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), + ] + ) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({"size": [h, w], "counts": counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle["size"] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + return sum(rle["counts"][1::2]) + + +def calculate_stability_score( + masks: torch.Tensor, mask_threshold: float, threshold_offset: float +) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecessary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + unions = ( + (masks > (mask_threshold - threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def build_all_layer_point_grids( + n_per_side: int, n_layers: int, scale_per_layer: int +) -> List[np.ndarray]: + """Generates point grids for all crop layers.""" + points_by_layer = [] + for i in range(n_layers + 1): + n_points = int(n_per_side / (scale_per_layer**i)) + points_by_layer.append(build_point_grid(n_points)) + return points_by_layer + + +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer + has (2**i)**2 boxes for the ith layer. + """ + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks( + masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int +) -> torch.Tensor: + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + from custom_pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle["size"] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + if len(shape) > 2: + masks = masks.flatten(0, -3) + else: + masks = masks.unsqueeze(0) + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + if len(shape) > 2: + out = out.reshape(*shape[:-2], 4) + else: + out = out[0] + + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/onnx.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..3196bdf4b782e6eeb3da4ad66ef3c7b1741535fe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/onnx.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from typing import Tuple + +from ..modeling import Sam +from .amg import calculate_stability_score + + +class SamOnnxModel(nn.Module): + """ + This model should not be called directly, but is used in ONNX export. + It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, + with some functions modified to enable model tracing. Also supports extra + options controlling what information. See the ONNX export script for details. + """ + + def __init__( + self, + model: Sam, + return_single_mask: bool, + use_stability_score: bool = False, + return_extra_metrics: bool = False, + ) -> None: + super().__init__() + self.mask_decoder = model.mask_decoder + self.model = model + self.img_size = model.image_encoder.img_size + self.return_single_mask = return_single_mask + self.use_stability_score = use_stability_score + self.stability_score_offset = 1.0 + self.return_extra_metrics = return_extra_metrics + + @staticmethod + def resize_longest_image_size( + input_image_size: torch.Tensor, longest_side: int + ) -> torch.Tensor: + input_image_size = input_image_size.to(torch.float32) + scale = longest_side / torch.max(input_image_size) + transformed_size = scale * input_image_size + transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) + return transformed_size + + def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: + point_coords = point_coords + 0.5 + point_coords = point_coords / self.img_size + point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) + point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) + + point_embedding = point_embedding * (point_labels != -1) + point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( + point_labels == -1 + ) + + for i in range(self.model.prompt_encoder.num_point_embeddings): + point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ + i + ].weight * (point_labels == i) + + return point_embedding + + def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: + mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) + mask_embedding = mask_embedding + ( + 1 - has_mask_input + ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) + return mask_embedding + + def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: + masks = F.interpolate( + masks, + size=(self.img_size, self.img_size), + mode="bilinear", + align_corners=False, + ) + + prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64) + masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore + + orig_im_size = orig_im_size.to(torch.int64) + h, w = orig_im_size[0], orig_im_size[1] + masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) + return masks + + def select_masks( + self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Determine if we should return the multiclick mask or not from the number of points. + # The reweighting is used to avoid control flow. + score_reweight = torch.tensor( + [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] + ).to(iou_preds.device) + score = iou_preds + (num_points - 2.5) * score_reweight + best_idx = torch.argmax(score, dim=1) + masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) + iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) + + return masks, iou_preds + + @torch.no_grad() + def forward( + self, + image_embeddings: torch.Tensor, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + mask_input: torch.Tensor, + has_mask_input: torch.Tensor, + orig_im_size: torch.Tensor, + ): + sparse_embedding = self._embed_points(point_coords, point_labels) + dense_embedding = self._embed_masks(mask_input, has_mask_input) + + masks, scores = self.model.mask_decoder.predict_masks( + image_embeddings=image_embeddings, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embedding, + dense_prompt_embeddings=dense_embedding, + ) + + if self.use_stability_score: + scores = calculate_stability_score( + masks, self.model.mask_threshold, self.stability_score_offset + ) + + if self.return_single_mask: + masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) + + upscaled_masks = self.mask_postprocessing(masks, orig_im_size) + + if self.return_extra_metrics: + stability_scores = calculate_stability_score( + upscaled_masks, self.model.mask_threshold, self.stability_score_offset + ) + areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) + return upscaled_masks, scores, stability_scores, areas, masks + + return upscaled_masks, scores, masks diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/transforms.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c08ba1e3db751f3a5483a003be38c69c2cf2df85 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/sam/utils/transforms.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch.nn import functional as F +from torchvision.transforms.functional import resize, to_pil_image # type: ignore + +from copy import deepcopy +from typing import Tuple + + +class ResizeLongestSide: + """ + Resizes images to the longest side 'target_length', as well as provides + methods for resizing coordinates and boxes. Provides methods for + transforming both numpy array and batched torch tensors. + """ + + def __init__(self, target_length: int) -> None: + self.target_length = target_length + + def apply_image(self, image: np.ndarray) -> np.ndarray: + """ + Expects a numpy array with shape HxWxC in uint8 format. + """ + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return np.array(resize(to_pil_image(image), target_size)) + + def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).astype(float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array shape Bx4. Requires the original image size + in (H, W) format. + """ + boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor: + """ + Expects batched images with shape BxCxHxW and float format. This + transformation may not exactly match apply_image. apply_image is + the transformation expected by the model. + """ + # Expects an image in BCHW format. May not exactly match apply_image. + target_size = self.get_preprocess_shape(image.shape[2], image.shape[3], self.target_length) + return F.interpolate( + image, target_size, mode="bilinear", align_corners=False, antialias=True + ) + + def apply_coords_torch( + self, coords: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).to(torch.float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes_torch( + self, boxes: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with shape Bx4. Requires the original image + size in (H, W) format. + """ + boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + @staticmethod + def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]: + """ + Compute the output size given input size and target long side length. + """ + scale = long_side_length * 1.0 / max(oldh, oldw) + newh, neww = oldh * scale, oldw * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/scribble/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/scribble/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ce37647520f6e1975641b3da34cab7f18d9fd40 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/scribble/__init__.py @@ -0,0 +1,41 @@ +import warnings +import cv2 +import numpy as np +from PIL import Image +from controlnet_aux.util import HWC3, resize_image_with_pad, common_input_validate, HWC3 + +#Not to be confused with "scribble" from HED. That is "fake scribble" which is more accurate and less picky than this. +class ScribbleDetector: + def __call__(self, input_image=None, detect_resolution=512, output_type=None, upscale_method="INTER_AREA", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + detected_map = np.zeros_like(input_image, dtype=np.uint8) + detected_map[np.min(input_image, axis=2) < 127] = 255 + detected_map = 255 - detected_map + + detected_map = remove_pad(detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map + +class ScribbleXDog_Detector: + def __call__(self, input_image=None, detect_resolution=512, thr_a=32, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + g1 = cv2.GaussianBlur(input_image.astype(np.float32), (0, 0), 0.5) + g2 = cv2.GaussianBlur(input_image.astype(np.float32), (0, 0), 5.0) + dog = (255 - np.min(g2 - g1, axis=2)).clip(0, 255).astype(np.uint8) + result = np.zeros_like(input_image, dtype=np.uint8) + result[2 * (255 - dog) > thr_a] = 255 + #result = 255 - result + + detected_map = HWC3(remove_pad(result)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/scribble/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/scribble/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b3c0dc34600bc76807246d69713c8d525bdf435 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/scribble/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/shuffle/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/shuffle/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..71f131a6cd5437edbd22f92448323a0bd6522a48 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/shuffle/__init__.py @@ -0,0 +1,86 @@ +import warnings + +import cv2 +import numpy as np +from PIL import Image +import random + +from controlnet_aux.util import HWC3, common_input_validate, img2mask, make_noise_disk, resize_image_with_pad + + +class ContentShuffleDetector: + def __call__(self, input_image, h=None, w=None, f=None, detect_resolution=512, output_type="pil", upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + H, W, C = input_image.shape + if h is None: + h = H + if w is None: + w = W + if f is None: + f = 256 + x = make_noise_disk(h, w, 1, f) * float(W - 1) + y = make_noise_disk(h, w, 1, f) * float(H - 1) + flow = np.concatenate([x, y], axis=2).astype(np.float32) + detected_map = cv2.remap(input_image, flow, None, cv2.INTER_LINEAR) + detected_map = remove_pad(detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map + + +class ColorShuffleDetector: + def __call__(self, img): + H, W, C = img.shape + F = np.random.randint(64, 384) + A = make_noise_disk(H, W, 3, F) + B = make_noise_disk(H, W, 3, F) + C = (A + B) / 2.0 + A = (C + (A - C) * 3.0).clip(0, 1) + B = (C + (B - C) * 3.0).clip(0, 1) + L = img.astype(np.float32) / 255.0 + Y = A * L + B * (1 - L) + Y -= np.min(Y, axis=(0, 1), keepdims=True) + Y /= np.maximum(np.max(Y, axis=(0, 1), keepdims=True), 1e-5) + Y *= 255.0 + return Y.clip(0, 255).astype(np.uint8) + + +class GrayDetector: + def __call__(self, img): + eps = 1e-5 + X = img.astype(np.float32) + r, g, b = X[:, :, 0], X[:, :, 1], X[:, :, 2] + kr, kg, kb = [random.random() + eps for _ in range(3)] + ks = kr + kg + kb + kr /= ks + kg /= ks + kb /= ks + Y = r * kr + g * kg + b * kb + Y = np.stack([Y] * 3, axis=2) + return Y.clip(0, 255).astype(np.uint8) + + +class DownSampleDetector: + def __call__(self, img, level=3, k=16.0): + h = img.astype(np.float32) + for _ in range(level): + h += np.random.normal(loc=0.0, scale=k, size=h.shape) + h = cv2.pyrDown(h) + for _ in range(level): + h = cv2.pyrUp(h) + h += np.random.normal(loc=0.0, scale=k, size=h.shape) + return h.clip(0, 255).astype(np.uint8) + + +class Image2MaskShuffleDetector: + def __init__(self, resolution=(640, 512)): + self.H, self.W = resolution + + def __call__(self, img): + m = img2mask(img, self.H, self.W) + m *= 255.0 + return m.clip(0, 255).astype(np.uint8) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/shuffle/__pycache__/__init__.cpython-311.pyc b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/shuffle/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63d0a1efa366af29d752258fb6b55816a6f80572 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/shuffle/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/requirements.txt b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_image.png b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_image.png new file mode 100644 index 0000000000000000000000000000000000000000..c4a751e31da45af83c8a3d5ec02cf8c22c7bb8e9 Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_image.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_processor.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..dee2a0b7e86815bf6c3fa533ed34d5fcb5b4c4aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_processor.py @@ -0,0 +1,95 @@ +"""Test the Processor class.""" +import unittest +from PIL import Image + +from controlnet_aux.processor import Processor + + +class TestProcessor(unittest.TestCase): + def test_hed(self): + processor = Processor('hed') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_midas(self): + processor = Processor('midas') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_mlsd(self): + processor = Processor('mlsd') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_openpose(self): + processor = Processor('openpose') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_pidinet(self): + processor = Processor('pidinet') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_normalbae(self): + processor = Processor('normalbae') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_lineart(self): + processor = Processor('lineart') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_lineart_coarse(self): + processor = Processor('lineart_coarse') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_lineart_anime(self): + processor = Processor('lineart_anime') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_canny(self): + processor = Processor('canny') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_content_shuffle(self): + processor = Processor('content_shuffle') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_zoe(self): + processor = Processor('zoe') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_mediapipe_face(self): + processor = Processor('mediapipe_face') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + def test_tile(self): + processor = Processor('tile') + image = Image.open('test_image.png') + processed_image = processor(image) + self.assertIsInstance(processed_image, bytes) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_processor_pytest.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_processor_pytest.py new file mode 100644 index 0000000000000000000000000000000000000000..065acc392f60652ee6a8994b21b5869b7885f40e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tests/test_processor_pytest.py @@ -0,0 +1,78 @@ +import io + +import numpy as np +import pytest +from PIL import Image + +from controlnet_aux.processor import MODELS, Processor + + +@pytest.fixture(params=[ + 'scribble_hed', + 'softedge_hed', + 'scribble_hedsafe', + 'softedge_hedsafe', + 'depth_midas', + 'mlsd', + 'openpose', + 'openpose_hand', + 'openpose_face', + 'openpose_faceonly', + 'openpose_full', + 'scribble_pidinet', + 'softedge_pidinet', + 'scribble_pidsafe', + 'softedge_pidsafe', + 'normal_bae', + 'lineart_coarse', + 'lineart_realistic', + 'lineart_anime', + 'canny', + 'shuffle', + 'depth_zoe', + 'depth_leres', + 'depth_leres++', + 'mediapipe_face', + 'tile' +]) +def processor(request): + return Processor(request.param) + + +def test_processor_init(processor): + assert isinstance(processor.processor, MODELS[processor.processor_id]['class']) + assert isinstance(processor.params, dict) + + +def test_processor_call(processor): + # Load test image + with open('test_image.png', 'rb') as f: + image_bytes = f.read() + image = Image.open(io.BytesIO(image_bytes)) + + # Output size + resolution = 512 + W, H = image.size + H = float(H) + W = float(W) + k = float(resolution) / min(H, W) + H *= k + W *= k + H = int(np.round(H / 64.0)) * 64 + W = int(np.round(W / 64.0)) * 64 + + # Test processing + processed_image = processor(image) + assert isinstance(processed_image, Image.Image) + assert processed_image.size == (W, H) + + +def test_processor_call_bytes(processor): + # Load test image + with open('test_image.png', 'rb') as f: + image_bytes = f.read() + + # Test processing + processed_image_bytes = processor(image_bytes, to_pil=False) + assert isinstance(processed_image_bytes, bytes) + assert len(processed_image_bytes) > 0 \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tile/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tile/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..81041c819d524badb5bd951b085d4578f8afa504 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/tile/__init__.py @@ -0,0 +1,24 @@ +import warnings +import cv2 +import numpy as np +from PIL import Image +from controlnet_aux.util import get_upscale_method, common_input_validate, HWC3 + + +class TileDetector: + def __call__(self, input_image=None, pyrUp_iters=3, output_type=None, upscale_method="INTER_AREA", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + H, W, _ = input_image.shape + H = int(np.round(H / 64.0)) * 64 + W = int(np.round(W / 64.0)) * 64 + detected_map = cv2.resize(input_image, (W // (2 ** pyrUp_iters), H // (2 ** pyrUp_iters)), + interpolation=get_upscale_method(upscale_method)) + detected_map = HWC3(detected_map) + + for _ in range(pyrUp_iters): + detected_map = cv2.pyrUp(detected_map) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a6776499e5f4b06551deaf97ca3ffdc03b96b45d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/__init__.py @@ -0,0 +1,69 @@ +import os +from .inference import init_segmentor, inference_segmentor, show_result_pyplot +import warnings +import cv2 +import numpy as np +from PIL import Image +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +import torch + +from custom_mmpkg.custom_mmseg.core.evaluation import get_palette + +config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "upernet_global_small.py") + + + +class UniformerSegmentor: + def __init__(self, netNetwork): + self.model = netNetwork + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "upernet_global_small.pth" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + netNetwork = init_segmentor(config_file, model_path, device="cpu") + netNetwork.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(model_path)['state_dict'].items()}) + netNetwork.eval() + + return cls(netNetwork) + + def to(self, device): + self.model.to(device) + return self + + def _inference(self, img): + if next(self.model.parameters()).device.type == 'mps': + # adaptive_avg_pool2d can fail on MPS, workaround with CPU + import torch.nn.functional + + orig_adaptive_avg_pool2d = torch.nn.functional.adaptive_avg_pool2d + def cpu_if_exception(input, *args, **kwargs): + try: + return orig_adaptive_avg_pool2d(input, *args, **kwargs) + except: + return orig_adaptive_avg_pool2d(input.cpu(), *args, **kwargs).to(input.device) + + try: + torch.nn.functional.adaptive_avg_pool2d = cpu_if_exception + result = inference_segmentor(self.model, img) + finally: + torch.nn.functional.adaptive_avg_pool2d = orig_adaptive_avg_pool2d + else: + result = inference_segmentor(self.model, img) + + res_img = show_result_pyplot(self.model, img, result, get_palette('ade'), opacity=1) + return res_img + + def __call__(self, input_image=None, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + detected_map = self._inference(input_image) + detected_map = remove_pad(HWC3(detected_map)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/ade20k.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/ade20k.py new file mode 100644 index 0000000000000000000000000000000000000000..efc8b4bb20c981f3db6df7eb52b3dc0744c94cc0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/ade20k.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ade/ADEChallengeData2016' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/chase_db1.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/chase_db1.py new file mode 100644 index 0000000000000000000000000000000000000000..298594ea925f87f22b37094a2ec50e370aec96a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/chase_db1.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'ChaseDB1Dataset' +data_root = 'data/CHASE_DB1' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (960, 999) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/cityscapes.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..f21867c63e1835f6fceb61f066e802fd8fd2a735 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/cityscapes.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/train', + ann_dir='gtFine/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/cityscapes_769x769.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/cityscapes_769x769.py new file mode 100644 index 0000000000000000000000000000000000000000..336c7b254fe392b4703039fec86a83acdbd2e1a5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/cityscapes_769x769.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (769, 769) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2049, 1025), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/drive.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/drive.py new file mode 100644 index 0000000000000000000000000000000000000000..06e8ff606e0d2a4514ec8b7d2c6c436a32efcbf4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/drive.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'DRIVEDataset' +data_root = 'data/DRIVE' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (584, 565) +crop_size = (64, 64) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/hrf.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/hrf.py new file mode 100644 index 0000000000000000000000000000000000000000..242d790eb1b83e75cf6b7eaa7a35c674099311ad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/hrf.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'HRFDataset' +data_root = 'data/HRF' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (2336, 3504) +crop_size = (256, 256) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_context.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_context.py new file mode 100644 index 0000000000000000000000000000000000000000..ff65bad1b86d7e3a5980bb5b9fc55798dc8df5f4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_context.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'PascalContextDataset' +data_root = 'data/VOCdevkit/VOC2010/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_context_59.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_context_59.py new file mode 100644 index 0000000000000000000000000000000000000000..37585abab89834b95cd5bdd993b994fca1db65f6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_context_59.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'PascalContextDataset59' +data_root = 'data/VOCdevkit/VOC2010/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12.py new file mode 100644 index 0000000000000000000000000000000000000000..ba1d42d0c5781f56dc177d860d856bb34adce555 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'PascalVOCDataset' +data_root = 'data/VOCdevkit/VOC2012' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12_aug.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..3f23b6717d53ad29f02dd15046802a2631a5076b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/pascal_voc12_aug.py @@ -0,0 +1,9 @@ +_base_ = './pascal_voc12.py' +# dataset settings +data = dict( + train=dict( + ann_dir=['SegmentationClass', 'SegmentationClassAug'], + split=[ + 'ImageSets/Segmentation/train.txt', + 'ImageSets/Segmentation/aug.txt' + ])) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/stare.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/stare.py new file mode 100644 index 0000000000000000000000000000000000000000..3f71b25488cc11a6b4d582ac52b5a24e1ad1cf8e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/datasets/stare.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'STAREDataset' +data_root = 'data/STARE' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (605, 700) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/default_runtime.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/default_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..b564cc4e7e7d9a67dacaaddecb100e4d8f5c005b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/default_runtime.py @@ -0,0 +1,14 @@ +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ann_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ann_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..a2cb653827e44e6015b3b83bc578003e614a6aa1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ann_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ANNHead', + in_channels=[1024, 2048], + in_index=[2, 3], + channels=512, + project_channels=256, + query_scales=(1, ), + key_pool_scales=(1, 3, 6, 8), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/apcnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/apcnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..c8f5316cbcf3896ba9de7ca2c801eba512f01d5e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/apcnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='APCHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ccnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ccnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..794148f576b9e215c3c6963e73dffe98204b7717 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ccnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='CCHead', + in_channels=2048, + in_index=3, + channels=512, + recurrence=2, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/cgnet.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/cgnet.py new file mode 100644 index 0000000000000000000000000000000000000000..eff8d9458c877c5db894957e0b1b4597e40da6ab --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/cgnet.py @@ -0,0 +1,35 @@ +# model settings +norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='CGNet', + norm_cfg=norm_cfg, + in_channels=3, + num_channels=(32, 64, 128), + num_blocks=(3, 21), + dilations=(2, 4), + reductions=(8, 16)), + decode_head=dict( + type='FCNHead', + in_channels=256, + in_index=2, + channels=256, + num_convs=0, + concat_input=False, + dropout_ratio=0, + num_classes=19, + norm_cfg=norm_cfg, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0, + class_weight=[ + 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352, + 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905, + 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587, + 10.396974, 10.055647 + ])), + # model training and testing settings + train_cfg=dict(sampler=None), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/danet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/danet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..2c934939fac48525f22ad86f489a041dd7db7d09 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/danet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DAHead', + in_channels=2048, + in_index=3, + channels=512, + pam_channels=64, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..d7a43bee01422ad4795dd27874e0cd4bb6cbfecf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='ASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py new file mode 100644 index 0000000000000000000000000000000000000000..0cd262999d8b2cb8e14a5c32190ae73f479d8e81 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py @@ -0,0 +1,50 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='ASPPHead', + in_channels=64, + in_index=4, + channels=16, + dilations=(1, 12, 24, 36), + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..050e39e091d816df9028d23aa3ecf9db74e441e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DepthwiseSeparableASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + c1_in_channels=256, + c1_channels=48, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/dmnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/dmnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..d22ba52640bebd805b3b8d07025e276dfb023759 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/dmnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DMHead', + in_channels=2048, + in_index=3, + channels=512, + filter_sizes=(1, 3, 5, 7), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/dnl_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/dnl_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..edb4c174c51e34c103737ba39bfc48bf831e561d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/dnl_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DNLHead', + in_channels=2048, + in_index=3, + channels=512, + dropout_ratio=0.1, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/emanet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/emanet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..26adcd430926de0862204a71d345f2543167f27b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/emanet_r50-d8.py @@ -0,0 +1,47 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='EMAHead', + in_channels=2048, + in_index=3, + channels=256, + ema_channels=512, + num_bases=64, + num_stages=3, + momentum=0.1, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/encnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/encnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..be777123a886503172a95fe0719e956a147bbd68 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/encnet_r50-d8.py @@ -0,0 +1,48 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='EncHead', + in_channels=[512, 1024, 2048], + in_index=(1, 2, 3), + channels=512, + num_codes=32, + use_se_loss=True, + add_lateral=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_se_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fast_scnn.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fast_scnn.py new file mode 100644 index 0000000000000000000000000000000000000000..32fdeb659355a5ce5ef2cc7c2f30742703811cdf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fast_scnn.py @@ -0,0 +1,57 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='FastSCNN', + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), + norm_cfg=norm_cfg, + align_corners=False), + decode_head=dict( + type='DepthwiseSeparableFCNHead', + in_channels=128, + channels=128, + concat_input=False, + num_classes=19, + in_index=-1, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + auxiliary_head=[ + dict( + type='FCNHead', + in_channels=128, + channels=32, + num_convs=1, + num_classes=19, + in_index=-2, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + dict( + type='FCNHead', + in_channels=64, + channels=32, + num_convs=1, + num_classes=19, + in_index=-3, + norm_cfg=norm_cfg, + concat_input=False, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_hr18.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_hr18.py new file mode 100644 index 0000000000000000000000000000000000000000..c3e299bc89ada56ca14bbffcbdb08a586b8ed9e9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_hr18.py @@ -0,0 +1,52 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://msra/hrnetv2_w18', + backbone=dict( + type='HRNet', + norm_cfg=norm_cfg, + norm_eval=False, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144)))), + decode_head=dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + channels=sum([18, 36, 72, 144]), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..5e98f6cc918b6146fc6d613c6918e825ef1355c3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_r50-d8.py @@ -0,0 +1,45 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='FCNHead', + in_channels=2048, + in_index=3, + channels=512, + num_convs=2, + concat_input=True, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_unet_s5-d16.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_unet_s5-d16.py new file mode 100644 index 0000000000000000000000000000000000000000..a33e7972877f902d0e7d18401ca675e3e4e60a18 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fcn_unet_s5-d16.py @@ -0,0 +1,51 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='FCNHead', + in_channels=64, + in_index=4, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fpn_r50.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fpn_r50.py new file mode 100644 index 0000000000000000000000000000000000000000..86ab327db92e44c14822d65f1c9277cb007f17c1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fpn_r50.py @@ -0,0 +1,36 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fpn_uniformer.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fpn_uniformer.py new file mode 100644 index 0000000000000000000000000000000000000000..8aae98c5991055bfcc08e82ccdc09f8b1d9f8a8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/fpn_uniformer.py @@ -0,0 +1,35 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='UniFormer', + embed_dim=[64, 128, 320, 512], + layers=[3, 4, 8, 3], + head_dim=64, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1), + neck=dict( + type='FPN', + in_channels=[64, 128, 320, 512], + out_channels=256, + num_outs=4), + decode_head=dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole') +) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/gcnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/gcnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2ad69f5c22adfe79d5fdabf920217628987166 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/gcnet_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='GCHead', + in_channels=2048, + in_index=3, + channels=512, + ratio=1 / 4., + pooling_type='att', + fusion_types=('channel_add', ), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/lraspp_m-v3-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/lraspp_m-v3-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..93258242a90695cc94a7c6bd41562d6a75988771 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/lraspp_m-v3-d8.py @@ -0,0 +1,25 @@ +# model settings +norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) +model = dict( + type='EncoderDecoder', + backbone=dict( + type='MobileNetV3', + arch='large', + out_indices=(1, 3, 16), + norm_cfg=norm_cfg), + decode_head=dict( + type='LRASPPHead', + in_channels=(16, 24, 960), + in_index=(0, 1, 2), + channels=128, + input_transform='multiple_select', + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/nonlocal_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/nonlocal_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..5674a39854cafd1f2e363bac99c58ccae62f24da --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/nonlocal_r50-d8.py @@ -0,0 +1,46 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='NLHead', + in_channels=2048, + in_index=3, + channels=512, + dropout_ratio=0.1, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ocrnet_hr18.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ocrnet_hr18.py new file mode 100644 index 0000000000000000000000000000000000000000..c60f62a7cdf3f5c5096a7a7e725e8268fddcb057 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ocrnet_hr18.py @@ -0,0 +1,68 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='CascadeEncoderDecoder', + num_stages=2, + pretrained='open-mmlab://msra/hrnetv2_w18', + backbone=dict( + type='HRNet', + norm_cfg=norm_cfg, + norm_eval=False, + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(18, 36)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BASIC', + num_blocks=(4, 4, 4), + num_channels=(18, 36, 72)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 4, 4, 4), + num_channels=(18, 36, 72, 144)))), + decode_head=[ + dict( + type='FCNHead', + in_channels=[18, 36, 72, 144], + channels=sum([18, 36, 72, 144]), + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + kernel_size=1, + num_convs=1, + concat_input=False, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=[18, 36, 72, 144], + in_index=(0, 1, 2, 3), + input_transform='resize_concat', + channels=512, + ocr_channels=256, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ocrnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ocrnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..615aa3ff703942b6c22b2d6e9642504dd3e41ebd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/ocrnet_r50-d8.py @@ -0,0 +1,47 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='CascadeEncoderDecoder', + num_stages=2, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=[ + dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + dict( + type='OCRHead', + in_channels=2048, + in_index=3, + channels=512, + ocr_channels=256, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ], + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pointrend_r50.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pointrend_r50.py new file mode 100644 index 0000000000000000000000000000000000000000..9d323dbf9466d41e0800aa57ef84045f3d874bdf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pointrend_r50.py @@ -0,0 +1,56 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='CascadeEncoderDecoder', + num_stages=2, + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=4), + decode_head=[ + dict( + type='FPNHead', + in_channels=[256, 256, 256, 256], + in_index=[0, 1, 2, 3], + feature_strides=[4, 8, 16, 32], + channels=128, + dropout_ratio=-1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + dict( + type='PointHead', + in_channels=[256], + in_index=[0], + channels=256, + num_fcs=3, + coarse_pred_each_layer=True, + dropout_ratio=-1, + num_classes=19, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) + ], + # model training and testing settings + train_cfg=dict( + num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), + test_cfg=dict( + mode='whole', + subdivision_steps=2, + subdivision_num_points=8196, + scale_factor=2)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/psanet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/psanet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/psanet_r50-d8.py @@ -0,0 +1,49 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSAHead', + in_channels=2048, + in_index=3, + channels=512, + mask_size=(97, 97), + psa_type='bi-direction', + compact=False, + shrink_factor=2, + normalization_factor=1.0, + psa_softmax=True, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pspnet_r50-d8.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pspnet_r50-d8.py new file mode 100644 index 0000000000000000000000000000000000000000..f451e08ad2eb0732dcb806b1851eb978d4acf136 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pspnet_r50-d8.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='PSPHead', + in_channels=2048, + in_index=3, + channels=512, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py new file mode 100644 index 0000000000000000000000000000000000000000..fcff9ec4f41fad158344ecd77313dc14564f3682 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py @@ -0,0 +1,50 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UNet', + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False), + decode_head=dict( + type='PSPHead', + in_channels=64, + in_index=4, + channels=16, + pool_scales=(1, 2, 3, 6), + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=128, + in_index=3, + channels=64, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/upernet_r50.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/upernet_r50.py new file mode 100644 index 0000000000000000000000000000000000000000..10974962fdd7136031fd06de1700f497d355ceaa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/upernet_r50.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='UPerHead', + in_channels=[256, 512, 1024, 2048], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/upernet_uniformer.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/upernet_uniformer.py new file mode 100644 index 0000000000000000000000000000000000000000..41aa4db809dc6e2c508e98051f61807d07477903 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/models/upernet_uniformer.py @@ -0,0 +1,43 @@ +# model settings +norm_cfg = dict(type='BN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='UniFormer', + embed_dim=[64, 128, 320, 512], + layers=[3, 4, 8, 3], + head_dim=64, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1), + decode_head=dict( + type='UPerHead', + in_channels=[64, 128, 320, 512], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=320, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_160k.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_160k.py new file mode 100644 index 0000000000000000000000000000000000000000..52603890b10f25faf8eec9f9e5a4468fae09b811 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_160k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU') diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_20k.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_20k.py new file mode 100644 index 0000000000000000000000000000000000000000..bf780a1b6f6521833c6a5859675147824efa599d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_20k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=20000) +checkpoint_config = dict(by_epoch=False, interval=2000) +evaluation = dict(interval=2000, metric='mIoU') diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_40k.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_40k.py new file mode 100644 index 0000000000000000000000000000000000000000..cdbf841abcb26eed87bf76ab816aff4bae0630ee --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_40k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=40000) +checkpoint_config = dict(by_epoch=False, interval=4000) +evaluation = dict(interval=4000, metric='mIoU') diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_80k.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_80k.py new file mode 100644 index 0000000000000000000000000000000000000000..c190cee6bdc7922b688ea75dc8f152fa15c24617 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/configs/_base_/schedules/schedule_80k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=8000) +evaluation = dict(interval=8000, metric='mIoU') diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/inference.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..7efc93e16f51e70d80340f76d74c6f3db5a26443 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/inference.py @@ -0,0 +1,137 @@ + +import torch + +import custom_mmpkg.custom_mmcv as mmcv +from custom_mmpkg.custom_mmcv.parallel import collate, scatter +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from custom_mmpkg.custom_mmseg.datasets.pipelines import Compose +from custom_mmpkg.custom_mmseg.models import build_segmentor + +def init_segmentor(config, checkpoint=None, device='cuda:0'): + """Initialize a segmentor from config file. + + Args: + config (str or :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + device (str, optional) CPU/CUDA device option. Default 'cuda:0'. + Use 'cpu' for loading model on CPU. + Returns: + nn.Module: The constructed segmentor. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + 'but got {}'.format(type(config))) + config.model.pretrained = None + config.model.train_cfg = None + model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + model.CLASSES = checkpoint['meta']['CLASSES'] + model.PALETTE = checkpoint['meta']['PALETTE'] + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +class LoadImage: + """A simple pipeline to load image.""" + + def __call__(self, results): + """Call function to load images into results. + + Args: + results (dict): A result dict contains the file name + of the image to be read. + + Returns: + dict: ``results`` will be returned containing loaded image. + """ + + if isinstance(results['img'], str): + results['filename'] = results['img'] + results['ori_filename'] = results['img'] + else: + results['filename'] = None + results['ori_filename'] = None + img = mmcv.imread(results['img']) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + return results + + +def inference_segmentor(model, img): + """Inference image(s) with the segmentor. + + Args: + model (nn.Module): The loaded segmentor. + imgs (str/ndarray or list[str/ndarray]): Either image files or loaded + images. + + Returns: + (list[Tensor]): The segmentation result. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] + test_pipeline = Compose(test_pipeline) + # prepare data + data = dict(img=img) + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + else: + data['img_metas'] = [i.data[0] for i in data['img_metas']] + + data['img'] = [x.to(device) for x in data['img']] + + # forward the model + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + return result + + +def show_result_pyplot(model, + img, + result, + palette=None, + fig_size=(15, 10), + opacity=0.5, + title='', + block=True): + """Visualize the segmentation results on the image. + + Args: + model (nn.Module): The loaded segmentor. + img (str or np.ndarray): Image filename or loaded image. + result (list): The segmentation result. + palette (list[list[int]]] | None): The palette of segmentation + map. If None is given, random palette will be generated. + Default: None + fig_size (tuple): Figure size of the pyplot figure. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + title (str): The title of pyplot figure. + Default is ''. + block (bool): Whether to block the pyplot figure. + Default is True. + """ + if hasattr(model, 'module'): + model = model.module + img = model.show_result( + img, result, palette=palette, show=False, opacity=opacity) + # plt.figure(figsize=fig_size) + # plt.imshow(mmcv.bgr2rgb(img)) + # plt.title(title) + # plt.tight_layout() + # plt.show(block=block) + return mmcv.bgr2rgb(img) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/mmcv_custom/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/mmcv_custom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4b958738b9fd93bfcec239c550df1d9a44b8c536 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/mmcv_custom/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .checkpoint import load_checkpoint + +__all__ = ['load_checkpoint'] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/mmcv_custom/checkpoint.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/mmcv_custom/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..8453fedcd47fafbedd40ca7ed485dce2e23434e0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/mmcv_custom/checkpoint.py @@ -0,0 +1,500 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import io +import os +import os.path as osp +import pkgutil +import time +import warnings +from collections import OrderedDict +from importlib import import_module +from tempfile import TemporaryDirectory + +import torch +import torchvision +from torch.optim import Optimizer +from torch.utils import model_zoo +from torch.nn import functional as F + +import custom_mmpkg.custom_mmcv as mmcv +from custom_mmpkg.custom_mmcv.fileio import FileClient +from custom_mmpkg.custom_mmcv.fileio import load as load_file +from custom_mmpkg.custom_mmcv.parallel import is_module_wrapper +from custom_mmpkg.custom_mmcv.utils import mkdir_or_exist +from custom_mmpkg.custom_mmcv.runner import get_dist_info + +ENV_MMCV_HOME = 'MMCV_HOME' +ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' +DEFAULT_CACHE_DIR = '~/.cache' + + +def _get_mmcv_home(): + mmcv_home = os.path.expanduser( + os.getenv( + ENV_MMCV_HOME, + os.path.join( + os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) + + mkdir_or_exist(mmcv_home) + return mmcv_home + + +def load_state_dict(module, state_dict, strict=False, logger=None): + """Load state_dict to a module. + + This method is modified from :meth:`torch.nn.Module.load_state_dict`. + Default value for ``strict`` is set to ``False`` and the message for + param mismatch will be shown even if strict is False. + + Args: + module (Module): Module that receives the state_dict. + state_dict (OrderedDict): Weights. + strict (bool): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. + logger (:obj:`logging.Logger`, optional): Logger to log the error + message. If not specified, print function will be used. + """ + unexpected_keys = [] + all_missing_keys = [] + err_msg = [] + + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + # use _load_from_state_dict to enable checkpoint version control + def load(module, prefix=''): + # recursively check parallel module in case that the model has a + # complicated structure, e.g., nn.Module(nn.Module(DDP)) + if is_module_wrapper(module): + module = module.module + local_metadata = {} if metadata is None else metadata.get( + prefix[:-1], {}) + module._load_from_state_dict(state_dict, prefix, local_metadata, True, + all_missing_keys, unexpected_keys, + err_msg) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(module) + load = None # break load->load reference cycle + + # ignore "num_batches_tracked" of BN layers + missing_keys = [ + key for key in all_missing_keys if 'num_batches_tracked' not in key + ] + + if unexpected_keys: + err_msg.append('unexpected key in source ' + f'state_dict: {", ".join(unexpected_keys)}\n') + if missing_keys: + err_msg.append( + f'missing keys in source state_dict: {", ".join(missing_keys)}\n') + + rank, _ = get_dist_info() + if len(err_msg) > 0 and rank == 0: + err_msg.insert( + 0, 'The model and loaded state dict do not match exactly\n') + err_msg = '\n'.join(err_msg) + if strict: + raise RuntimeError(err_msg) + elif logger is not None: + logger.warning(err_msg) + else: + print(err_msg) + + +def load_url_dist(url, model_dir=None): + """In distributed setting, this function only download checkpoint at local + rank 0.""" + rank, world_size = get_dist_info() + rank = int(os.environ.get('LOCAL_RANK', rank)) + if rank == 0: + checkpoint = model_zoo.load_url(url, model_dir=model_dir) + if world_size > 1: + torch.distributed.barrier() + if rank > 0: + checkpoint = model_zoo.load_url(url, model_dir=model_dir) + return checkpoint + + +def load_pavimodel_dist(model_path, map_location=None): + """In distributed setting, this function only download checkpoint at local + rank 0.""" + try: + from pavi import modelcloud + except ImportError: + raise ImportError( + 'Please install pavi to load checkpoint from modelcloud.') + rank, world_size = get_dist_info() + rank = int(os.environ.get('LOCAL_RANK', rank)) + if rank == 0: + model = modelcloud.get(model_path) + with TemporaryDirectory() as tmp_dir: + downloaded_file = osp.join(tmp_dir, model.name) + model.download(downloaded_file) + checkpoint = torch.load(downloaded_file, map_location=map_location) + if world_size > 1: + torch.distributed.barrier() + if rank > 0: + model = modelcloud.get(model_path) + with TemporaryDirectory() as tmp_dir: + downloaded_file = osp.join(tmp_dir, model.name) + model.download(downloaded_file) + checkpoint = torch.load( + downloaded_file, map_location=map_location) + return checkpoint + + +def load_fileclient_dist(filename, backend, map_location): + """In distributed setting, this function only download checkpoint at local + rank 0.""" + rank, world_size = get_dist_info() + rank = int(os.environ.get('LOCAL_RANK', rank)) + allowed_backends = ['ceph'] + if backend not in allowed_backends: + raise ValueError(f'Load from Backend {backend} is not supported.') + if rank == 0: + fileclient = FileClient(backend=backend) + buffer = io.BytesIO(fileclient.get(filename)) + checkpoint = torch.load(buffer, map_location=map_location) + if world_size > 1: + torch.distributed.barrier() + if rank > 0: + fileclient = FileClient(backend=backend) + buffer = io.BytesIO(fileclient.get(filename)) + checkpoint = torch.load(buffer, map_location=map_location) + return checkpoint + + +def get_torchvision_models(): + model_urls = dict() + for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): + if ispkg: + continue + _zoo = import_module(f'torchvision.models.{name}') + if hasattr(_zoo, 'model_urls'): + _urls = getattr(_zoo, 'model_urls') + model_urls.update(_urls) + return model_urls + + +def get_external_models(): + mmcv_home = _get_mmcv_home() + default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') + default_urls = load_file(default_json_path) + assert isinstance(default_urls, dict) + external_json_path = osp.join(mmcv_home, 'open_mmlab.json') + if osp.exists(external_json_path): + external_urls = load_file(external_json_path) + assert isinstance(external_urls, dict) + default_urls.update(external_urls) + + return default_urls + + +def get_mmcls_models(): + mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') + mmcls_urls = load_file(mmcls_json_path) + + return mmcls_urls + + +def get_deprecated_model_names(): + deprecate_json_path = osp.join(mmcv.__path__[0], + 'model_zoo/deprecated.json') + deprecate_urls = load_file(deprecate_json_path) + assert isinstance(deprecate_urls, dict) + + return deprecate_urls + + +def _process_mmcls_checkpoint(checkpoint): + state_dict = checkpoint['state_dict'] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if k.startswith('backbone.'): + new_state_dict[k[9:]] = v + new_checkpoint = dict(state_dict=new_state_dict) + + return new_checkpoint + + +def _load_checkpoint(filename, map_location=None): + """Load checkpoint from somewhere (modelzoo, file, url). + + Args: + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for + details. + map_location (str | None): Same as :func:`torch.load`. Default: None. + + Returns: + dict | OrderedDict: The loaded checkpoint. It can be either an + OrderedDict storing model weights or a dict containing other + information, which depends on the checkpoint. + """ + if filename.startswith('modelzoo://'): + warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' + 'use "torchvision://" instead') + model_urls = get_torchvision_models() + model_name = filename[11:] + checkpoint = load_url_dist(model_urls[model_name]) + elif filename.startswith('torchvision://'): + model_urls = get_torchvision_models() + model_name = filename[14:] + checkpoint = load_url_dist(model_urls[model_name]) + elif filename.startswith('open-mmlab://'): + model_urls = get_external_models() + model_name = filename[13:] + deprecated_urls = get_deprecated_model_names() + if model_name in deprecated_urls: + warnings.warn(f'open-mmlab://{model_name} is deprecated in favor ' + f'of open-mmlab://{deprecated_urls[model_name]}') + model_name = deprecated_urls[model_name] + model_url = model_urls[model_name] + # check if is url + if model_url.startswith(('http://', 'https://')): + checkpoint = load_url_dist(model_url) + else: + filename = osp.join(_get_mmcv_home(), model_url) + if not osp.isfile(filename): + raise IOError(f'{filename} is not a checkpoint file') + checkpoint = torch.load(filename, map_location=map_location) + elif filename.startswith('mmcls://'): + model_urls = get_mmcls_models() + model_name = filename[8:] + checkpoint = load_url_dist(model_urls[model_name]) + checkpoint = _process_mmcls_checkpoint(checkpoint) + elif filename.startswith(('http://', 'https://')): + checkpoint = load_url_dist(filename) + elif filename.startswith('pavi://'): + model_path = filename[7:] + checkpoint = load_pavimodel_dist(model_path, map_location=map_location) + elif filename.startswith('s3://'): + checkpoint = load_fileclient_dist( + filename, backend='ceph', map_location=map_location) + else: + if not osp.isfile(filename): + raise IOError(f'{filename} is not a checkpoint file') + checkpoint = torch.load(filename, map_location=map_location) + return checkpoint + + +def load_checkpoint(model, + filename, + map_location='cpu', + strict=False, + logger=None): + """Load checkpoint from a file or URI. + + Args: + model (Module): Module to load checkpoint. + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for + details. + map_location (str): Same as :func:`torch.load`. + strict (bool): Whether to allow different params for the model and + checkpoint. + logger (:mod:`logging.Logger` or None): The logger for error message. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + checkpoint = _load_checkpoint(filename, map_location) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # for MoBY, load model of online branch + if sorted(list(state_dict.keys()))[0].startswith('encoder'): + state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = model.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H*W: + logger.warning("Error in loading absolute_pos_embed, pass") + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) + + # interpolate position bias table if needed + relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = model.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + logger.warning(f"Error in loading {table_key}, pass") + else: + if L1 != L2: + S1 = int(L1 ** 0.5) + S2 = int(L2 ** 0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).view(1, nH1, S1, S1), + size=(S2, S2), mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) + + # load state_dict + load_state_dict(model, state_dict, strict, logger) + return checkpoint + + +def weights_to_cpu(state_dict): + """Copy a model state_dict to cpu. + + Args: + state_dict (OrderedDict): Model weights on GPU. + + Returns: + OrderedDict: Model weights on GPU. + """ + state_dict_cpu = OrderedDict() + for key, val in state_dict.items(): + state_dict_cpu[key] = val.cpu() + return state_dict_cpu + + +def _save_to_state_dict(module, destination, prefix, keep_vars): + """Saves module state to `destination` dictionary. + + This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. + + Args: + module (nn.Module): The module to generate state_dict. + destination (dict): A dict where state will be stored. + prefix (str): The prefix for parameters and buffers used in this + module. + """ + for name, param in module._parameters.items(): + if param is not None: + destination[prefix + name] = param if keep_vars else param.detach() + for name, buf in module._buffers.items(): + # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d + if buf is not None: + destination[prefix + name] = buf if keep_vars else buf.detach() + + +def get_state_dict(module, destination=None, prefix='', keep_vars=False): + """Returns a dictionary containing a whole state of the module. + + Both parameters and persistent buffers (e.g. running averages) are + included. Keys are corresponding parameter and buffer names. + + This method is modified from :meth:`torch.nn.Module.state_dict` to + recursively check parallel module in case that the model has a complicated + structure, e.g., nn.Module(nn.Module(DDP)). + + Args: + module (nn.Module): The module to generate state_dict. + destination (OrderedDict): Returned dict for the state of the + module. + prefix (str): Prefix of the key. + keep_vars (bool): Whether to keep the variable property of the + parameters. Default: False. + + Returns: + dict: A dictionary containing a whole state of the module. + """ + # recursively check parallel module in case that the model has a + # complicated structure, e.g., nn.Module(nn.Module(DDP)) + if is_module_wrapper(module): + module = module.module + + # below is the same as torch.nn.Module.state_dict() + if destination is None: + destination = OrderedDict() + destination._metadata = OrderedDict() + destination._metadata[prefix[:-1]] = local_metadata = dict( + version=module._version) + _save_to_state_dict(module, destination, prefix, keep_vars) + for name, child in module._modules.items(): + if child is not None: + get_state_dict( + child, destination, prefix + name + '.', keep_vars=keep_vars) + for hook in module._state_dict_hooks.values(): + hook_result = hook(module, destination, prefix, local_metadata) + if hook_result is not None: + destination = hook_result + return destination + + +def save_checkpoint(model, filename, optimizer=None, meta=None): + """Save checkpoint to file. + + The checkpoint will have 3 fields: ``meta``, ``state_dict`` and + ``optimizer``. By default ``meta`` will contain version and time info. + + Args: + model (Module): Module whose params are to be saved. + filename (str): Checkpoint filename. + optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. + meta (dict, optional): Metadata to be saved in checkpoint. + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError(f'meta must be a dict or None, but got {type(meta)}') + meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) + + if is_module_wrapper(model): + model = model.module + + if hasattr(model, 'CLASSES') and model.CLASSES is not None: + # save class name to the meta + meta.update(CLASSES=model.CLASSES) + + checkpoint = { + 'meta': meta, + 'state_dict': weights_to_cpu(get_state_dict(model)) + } + # save optimizer state dict in the checkpoint + if isinstance(optimizer, Optimizer): + checkpoint['optimizer'] = optimizer.state_dict() + elif isinstance(optimizer, dict): + checkpoint['optimizer'] = {} + for name, optim in optimizer.items(): + checkpoint['optimizer'][name] = optim.state_dict() + + if filename.startswith('pavi://'): + try: + from pavi import modelcloud + from pavi.exception import NodeNotFoundError + except ImportError: + raise ImportError( + 'Please install pavi to load checkpoint from modelcloud.') + model_path = filename[7:] + root = modelcloud.Folder() + model_dir, model_name = osp.split(model_path) + try: + model = modelcloud.get(model_dir) + except NodeNotFoundError: + model = root.create_training_model(model_dir) + with TemporaryDirectory() as tmp_dir: + checkpoint_file = osp.join(tmp_dir, model_name) + with open(checkpoint_file, 'wb') as f: + torch.save(checkpoint, f) + f.flush() + model.create_file(checkpoint_file, name=model_name) + else: + mmcv.mkdir_or_exist(osp.dirname(filename)) + # immediately flush buffer + with open(filename, 'wb') as f: + torch.save(checkpoint, f) + f.flush() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/uniformer.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/uniformer.py new file mode 100644 index 0000000000000000000000000000000000000000..30039e2eb45b01c336493ad7a86a5a4e33aa9c1b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/uniformer.py @@ -0,0 +1,421 @@ +# -------------------------------------------------------- +# UniFormer +# Copyright (c) 2022 SenseTime X-Lab +# Licensed under The MIT License [see LICENSE for details] +# Written by Kunchang Li +# -------------------------------------------------------- + + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from functools import partial +from collections import OrderedDict +from custom_timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from custom_mmpkg.custom_mmseg.models.builder import BACKBONES + +from .mmcv_custom import load_checkpoint + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class CMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class CBlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) + self.norm1 = nn.BatchNorm2d(dim) + self.conv1 = nn.Conv2d(dim, dim, 1) + self.conv2 = nn.Conv2d(dim, dim, 1) + self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = nn.BatchNorm2d(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.pos_embed(x) + x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SABlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.pos_embed(x) + B, N, H, W = x.shape + x = x.flatten(2).transpose(1, 2) + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + x = x.transpose(1, 2).reshape(B, N, H, W) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class SABlock_Windows(nn.Module): + def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.window_size=window_size + self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.pos_embed(x) + x = x.permute(0, 2, 3, 1) + B, H, W, C = x.shape + shortcut = x + x = self.norm1(x) + + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + x = x.permute(0, 3, 1, 2).reshape(B, C, H, W) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.norm = nn.LayerNorm(embed_dim) + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, _, H, W = x.shape + x = self.proj(x) + B, _, H, W = x.shape + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() + return x + + +@BACKBONES.register_module() +class UniFormer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512], + head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), + pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0], + windows=False, hybrid=False, window_size=14): + """ + Args: + layer (list): number of block in each layer + img_size (int, tuple): input image size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + head_dim (int): dimension of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer (nn.Module): normalization layer + pretrained_path (str): path of pretrained model + use_checkpoint (bool): whether use checkpoint + checkpoint_num (list): index for using checkpoint in every stage + windows (bool): whether use window MHRA + hybrid (bool): whether use hybrid MHRA + window_size (int): size of window (>14) + """ + super().__init__() + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + self.windows = windows + print(f'Use Checkpoint: {self.use_checkpoint}') + print(f'Checkpoint Number: {self.checkpoint_num}') + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0]) + self.patch_embed2 = PatchEmbed( + img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1]) + self.patch_embed3 = PatchEmbed( + img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2]) + self.patch_embed4 = PatchEmbed( + img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3]) + + self.pos_drop = nn.Dropout(p=drop_rate) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule + num_heads = [dim // head_dim for dim in embed_dim] + self.blocks1 = nn.ModuleList([ + CBlock( + dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) + for i in range(layers[0])]) + self.norm1=norm_layer(embed_dim[0]) + self.blocks2 = nn.ModuleList([ + CBlock( + dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer) + for i in range(layers[1])]) + self.norm2 = norm_layer(embed_dim[1]) + if self.windows: + print('Use local window for all blocks in stage3') + self.blocks3 = nn.ModuleList([ + SABlock_Windows( + dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) + for i in range(layers[2])]) + elif hybrid: + print('Use hybrid window for blocks in stage3') + block3 = [] + for i in range(layers[2]): + if (i + 1) % 4 == 0: + block3.append(SABlock( + dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) + else: + block3.append(SABlock_Windows( + dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) + self.blocks3 = nn.ModuleList(block3) + else: + print('Use global window for all blocks in stage3') + self.blocks3 = nn.ModuleList([ + SABlock( + dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) + for i in range(layers[2])]) + self.norm3 = norm_layer(embed_dim[2]) + self.blocks4 = nn.ModuleList([ + SABlock( + dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer) + for i in range(layers[3])]) + self.norm4 = norm_layer(embed_dim[3]) + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + self.apply(self._init_weights) + self.init_weights(pretrained=pretrained_path) + + def init_weights(self, pretrained): + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) + print(f'Load pretrained model from {pretrained}') + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + out = [] + x = self.patch_embed1(x) + x = self.pos_drop(x) + for i, blk in enumerate(self.blocks1): + if self.use_checkpoint and i < self.checkpoint_num[0]: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + x_out = self.norm1(x.permute(0, 2, 3, 1)) + out.append(x_out.permute(0, 3, 1, 2).contiguous()) + x = self.patch_embed2(x) + for i, blk in enumerate(self.blocks2): + if self.use_checkpoint and i < self.checkpoint_num[1]: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + x_out = self.norm2(x.permute(0, 2, 3, 1)) + out.append(x_out.permute(0, 3, 1, 2).contiguous()) + x = self.patch_embed3(x) + for i, blk in enumerate(self.blocks3): + if self.use_checkpoint and i < self.checkpoint_num[2]: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + x_out = self.norm3(x.permute(0, 2, 3, 1)) + out.append(x_out.permute(0, 3, 1, 2).contiguous()) + x = self.patch_embed4(x) + for i, blk in enumerate(self.blocks4): + if self.use_checkpoint and i < self.checkpoint_num[3]: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + x_out = self.norm4(x.permute(0, 2, 3, 1)) + out.append(x_out.permute(0, 3, 1, 2).contiguous()) + return tuple(out) + + def forward(self, x): + x = self.forward_features(x) + return x \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/upernet_global_small.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/upernet_global_small.py new file mode 100644 index 0000000000000000000000000000000000000000..2410de80ecccf39a0034509539248f8ffe037d85 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/uniformer/upernet_global_small.py @@ -0,0 +1,44 @@ +_base_ = [ + 'configs/_base_/models/upernet_uniformer.py', + 'configs/_base_/datasets/ade20k.py', + 'configs/_base_/default_runtime.py', + 'configs/_base_/schedules/schedule_160k.py' +] + +custom_imports = dict( + imports=['controlnet_aux.uniformer.uniformer'], + allow_failed_imports=False +) + +model = dict( + backbone=dict( + type='UniFormer', + embed_dim=[64, 128, 320, 512], + layers=[3, 4, 8, 3], + head_dim=64, + drop_path_rate=0.25, + windows=False, + hybrid=False + ), + decode_head=dict( + in_channels=[64, 128, 320, 512], + num_classes=150 + ), + auxiliary_head=dict( + in_channels=320, + num_classes=150 + )) + +# AdamW optimizer, no weight decay for position embedding & layer norm in backbone +optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, + paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.), + 'norm': dict(decay_mult=0.)})) + +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) + +data=dict(samples_per_gpu=2) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/util.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/util.py new file mode 100644 index 0000000000000000000000000000000000000000..61d283ae20e995e382a4a15b6c4bcc3cba913f5f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/util.py @@ -0,0 +1,259 @@ +import os +import random + +import cv2 +import numpy as np +import torch +from pathlib import Path +import warnings +from huggingface_hub import hf_hub_download + +annotator_ckpts_path = os.path.join(Path(__file__).parents[2], 'ckpts') +USE_SYMLINKS = False + +try: + USE_SYMLINKS = eval(os.environ['AUX_USE_SYMLINKS']) +except: + warnings.warn("USE_SYMLINKS not set successfully. Using default value: False to download models.") + pass + +# fix SSL: CERTIFICATE_VERIFY_FAILED issue with pytorch download https://github.com/pytorch/pytorch/issues/33288 +try: + from torch.hub import load_state_dict_from_url + test_url = "https://download.pytorch.org/models/mobilenet_v2-b0353104.pth" + load_state_dict_from_url(test_url, progress=False) +except: + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + +here = Path(__file__).parent.resolve() + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + + +def make_noise_disk(H, W, C, F): + noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C)) + noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC) + noise = noise[F: F + H, F: F + W] + noise -= np.min(noise) + noise /= np.max(noise) + if C == 1: + noise = noise[:, :, None] + return noise + + +def nms(x, t, s): + x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) + + f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) + f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) + f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) + f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) + + y = np.zeros_like(x) + + for f in [f1, f2, f3, f4]: + np.putmask(y, cv2.dilate(x, kernel=f) == x, x) + + z = np.zeros_like(y, dtype=np.uint8) + z[y > t] = 255 + return z + +def min_max_norm(x): + x -= np.min(x) + x /= np.maximum(np.max(x), 1e-5) + return x + + +def safe_step(x, step=2): + y = x.astype(np.float32) * float(step + 1) + y = y.astype(np.int32).astype(np.float32) / float(step) + return y + + +def img2mask(img, H, W, low=10, high=90): + assert img.ndim == 3 or img.ndim == 2 + assert img.dtype == np.uint8 + + if img.ndim == 3: + y = img[:, :, random.randrange(0, img.shape[2])] + else: + y = img + + y = cv2.resize(y, (W, H), interpolation=cv2.INTER_CUBIC) + + if random.uniform(0, 1) < 0.5: + y = 255 - y + + return y < np.percentile(y, random.randrange(low, high)) + +def safer_memory(x): + # Fix many MAC/AMD problems + return np.ascontiguousarray(x.copy()).copy() + +UPSCALE_METHODS = ["INTER_NEAREST", "INTER_LINEAR", "INTER_AREA", "INTER_CUBIC", "INTER_LANCZOS4"] +def get_upscale_method(method_str): + assert method_str in UPSCALE_METHODS, f"Method {method_str} not found in {UPSCALE_METHODS}" + return getattr(cv2, method_str) + +def pad64(x): + return int(np.ceil(float(x) / 64.0) * 64 - x) + +#https://github.com/Mikubill/sd-webui-controlnet/blob/main/scripts/processor.py#L17 +#Added upscale_method param +def resize_image_with_pad(input_image, resolution, upscale_method = "", skip_hwc3=False): + if skip_hwc3: + img = input_image + else: + img = HWC3(input_image) + H_raw, W_raw, _ = img.shape + k = float(resolution) / float(min(H_raw, W_raw)) + H_target = int(np.round(float(H_raw) * k)) + W_target = int(np.round(float(W_raw) * k)) + img = cv2.resize(img, (W_target, H_target), interpolation=get_upscale_method(upscale_method) if k > 1 else cv2.INTER_AREA) + H_pad, W_pad = pad64(H_target), pad64(W_target) + img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge') + + def remove_pad(x): + return safer_memory(x[:H_target, :W_target, ...]) + + return safer_memory(img_padded), remove_pad + +def common_input_validate(input_image, output_type, **kwargs): + if "img" in kwargs: + warnings.warn("img is deprecated, please use `input_image=...` instead.", DeprecationWarning) + input_image = kwargs.pop("img") + + if "return_pil" in kwargs: + warnings.warn("return_pil is deprecated. Use output_type instead.", DeprecationWarning) + output_type = "pil" if kwargs["return_pil"] else "np" + + if type(output_type) is bool: + warnings.warn("Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions") + if output_type: + output_type = "pil" + + if input_image is None: + raise ValueError("input_image must be defined.") + + if not isinstance(input_image, np.ndarray): + input_image = np.array(input_image, dtype=np.uint8) + output_type = output_type or "pil" + else: + output_type = output_type or "np" + + return (input_image, output_type) + +def torch_gc(): + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + + +def ade_palette(): + """ADE20K palette that maps each class to RGB values.""" + return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + +def custom_hf_download(pretrained_model_or_path, filename, cache_dir=annotator_ckpts_path, subfolder='', use_symlinks=USE_SYMLINKS): + local_dir = os.path.join(cache_dir, pretrained_model_or_path) + model_path = os.path.join(local_dir, *subfolder.split('/'), filename) + + if not os.path.exists(model_path): + print(f"Failed to find {model_path}.\n Downloading from huggingface.co") + if use_symlinks: + cache_dir_d = os.getenv("HUGGINGFACE_HUB_CACHE") + if cache_dir_d is None: + import platform + if platform.system() == "Windows": + cache_dir_d = os.path.join(os.getenv("USERPROFILE"), ".cache", "huggingface", "hub") + else: + cache_dir_d = os.path.join(os.getenv("HOME"), ".cache", "huggingface", "hub") + try: + # test_link + if not os.path.exists(cache_dir_d): + os.makedirs(cache_dir_d) + open(os.path.join(cache_dir_d, f"linktest_{filename}.txt"), "w") + os.link(os.path.join(cache_dir_d, f"linktest_{filename}.txt"), os.path.join(cache_dir, f"linktest_{filename}.txt")) + os.remove(os.path.join(cache_dir, f"linktest_{filename}.txt")) + os.remove(os.path.join(cache_dir_d, f"linktest_{filename}.txt")) + print("Using symlinks to download models. \n",\ + "Make sure you have enough space on your cache folder. \n",\ + "And do not purge the cache folder after downloading.\n",\ + "Otherwise, you will have to re-download the models every time you run the script.\n",\ + "You can use USE_SYMLINKS: False in config.yaml to avoid this behavior.") + except: + print("Maybe not able to create symlink. Disable using symlinks.") + use_symlinks = False + cache_dir_d = os.path.join(cache_dir, pretrained_model_or_path, "cache") + else: + cache_dir_d = os.path.join(cache_dir, pretrained_model_or_path, "cache") + + model_path = hf_hub_download(repo_id=pretrained_model_or_path, + cache_dir=cache_dir_d, + local_dir=local_dir, + subfolder=subfolder, + filename=filename, + local_dir_use_symlinks=use_symlinks, + resume_download=True, + etag_timeout=100 + ) + if not use_symlinks: + try: + import shutil + shutil.rmtree(cache_dir_d) + except Exception as e : + print(e) + return model_path \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7a1e90d007836c327846ce8e5151013b115042ab --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Intelligent Systems Lab Org + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d2442c2ec6618d9d2cfa9e478eade8f89c5a8922 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/__init__.py @@ -0,0 +1,61 @@ +import os + +import cv2 +import numpy as np +import torch +from einops import rearrange +from PIL import Image + +from controlnet_aux.util import HWC3, common_input_validate, resize_image_with_pad, annotator_ckpts_path, custom_hf_download +from .zoedepth.models.zoedepth.zoedepth_v1 import ZoeDepth +from .zoedepth.utils.config import get_config + + +class ZoeDetector: + def __init__(self, model): + self.model = model + + @classmethod + def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=annotator_ckpts_path): + filename = filename or "ZoeD_M12_N.pt" + model_path = custom_hf_download(pretrained_model_or_path, filename, cache_dir=cache_dir) + + conf = get_config("zoedepth", "infer") + model = ZoeDepth.build_from_config(conf) + model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))['model']) + model.eval() + + return cls(model) + + def to(self, device): + self.model.to(device) + return self + + def __call__(self, input_image, detect_resolution=512, output_type=None, upscale_method="INTER_CUBIC", **kwargs): + device = next(iter(self.model.parameters())).device + input_image, output_type = common_input_validate(input_image, output_type, **kwargs) + input_image, remove_pad = resize_image_with_pad(input_image, detect_resolution, upscale_method) + + image_depth = input_image + with torch.no_grad(): + image_depth = torch.from_numpy(image_depth).float().to(device) + image_depth = image_depth / 255.0 + image_depth = rearrange(image_depth, 'h w c -> 1 c h w') + depth = self.model.infer(image_depth) + + depth = depth[0, 0].cpu().numpy() + + vmin = np.percentile(depth, 2) + vmax = np.percentile(depth, 85) + + depth -= vmin + depth /= vmax - vmin + depth = 1.0 - depth + depth_image = (depth * 255.0).clip(0, 255).astype(np.uint8) + + detected_map = remove_pad(HWC3(depth_image)) + + if output_type == "pil": + detected_map = Image.fromarray(detected_map) + + return detected_map diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/base_models/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/base_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/base_models/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/base_models/midas.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/base_models/midas.py new file mode 100644 index 0000000000000000000000000000000000000000..e04f81f42e2179303be0e3ad3c354e70a993c349 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/base_models/midas.py @@ -0,0 +1,383 @@ +# MIT License +import os + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn +import numpy as np +from torchvision.transforms import Normalize +import inspect +from pathlib import Path + + +def denormalize(x): + """Reverses the imagenet normalization applied to the input. + + Args: + x (torch.Tensor - shape(N,3,H,W)): input tensor + + Returns: + torch.Tensor - shape(N,3,H,W): Denormalized input + """ + mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device) + std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device) + return x * std + mean + +def get_activation(name, bank): + def hook(model, input, output): + bank[name] = output + return hook + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + ): + """Init. + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + # print("Params passed to Resize transform:") + # print("\twidth: ", width) + # print("\theight: ", height) + # print("\tresize_target: ", resize_target) + # print("\tkeep_aspect_ratio: ", keep_aspect_ratio) + # print("\tensure_multiple_of: ", ensure_multiple_of) + # print("\tresize_method: ", resize_method) + + self.__width = width + self.__height = height + + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, x): + width, height = self.get_size(*x.shape[-2:][::-1]) + return nn.functional.interpolate(x, (int(height), int(width)), mode='bilinear', align_corners=True) + +class PrepForMidas(object): + def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True): + if isinstance(img_size, int): + img_size = (img_size, img_size) + net_h, net_w = img_size + self.normalization = Normalize( + mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) \ + if do_resize else nn.Identity() + + def __call__(self, x): + return self.normalization(self.resizer(x)) + + +class MidasCore(nn.Module): + def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True, + img_size=384, **kwargs): + """Midas Base model used for multi-scale feature extraction. + + Args: + midas (torch.nn.Module): Midas model. + trainable (bool, optional): Train midas model. Defaults to False. + fetch_features (bool, optional): Extract multi-scale features. Defaults to True. + layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'). + freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False. + keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True. + img_size (int, tuple, optional): Input resolution. Defaults to 384. + """ + super().__init__() + self.core = midas + self.output_channels = None + self.core_out = {} + self.trainable = trainable + self.fetch_features = fetch_features + # midas.scratch.output_conv = nn.Identity() + self.handles = [] + # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1'] + self.layer_names = layer_names + + self.set_trainable(trainable) + self.set_fetch_features(fetch_features) + + self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio, + img_size=img_size, do_resize=kwargs.get('do_resize', True)) + + if freeze_bn: + self.freeze_bn() + + def set_trainable(self, trainable): + self.trainable = trainable + if trainable: + self.unfreeze() + else: + self.freeze() + return self + + def set_fetch_features(self, fetch_features): + self.fetch_features = fetch_features + if fetch_features: + if len(self.handles) == 0: + self.attach_hooks(self.core) + else: + self.remove_hooks() + return self + + def freeze(self): + for p in self.parameters(): + p.requires_grad = False + self.trainable = False + return self + + def unfreeze(self): + for p in self.parameters(): + p.requires_grad = True + self.trainable = True + return self + + def freeze_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + return self + + def forward(self, x, denorm=False, return_rel_depth=False): + with torch.no_grad(): + if denorm: + x = denormalize(x) + x = self.prep(x) + # print("Shape after prep: ", x.shape) + + with torch.set_grad_enabled(self.trainable): + + # print("Input size to Midascore", x.shape) + rel_depth = self.core(x) + # print("Output from custom_midas_repo.midas shape", rel_depth.shape) + if not self.fetch_features: + return rel_depth + out = [self.core_out[k] for k in self.layer_names] + + if return_rel_depth: + return rel_depth, out + return out + + def get_rel_pos_params(self): + for name, p in self.core.pretrained.named_parameters(): + if "relative_position" in name: + yield p + + def get_enc_params_except_rel_pos(self): + for name, p in self.core.pretrained.named_parameters(): + if "relative_position" not in name: + yield p + + def freeze_encoder(self, freeze_rel_pos=False): + if freeze_rel_pos: + for p in self.core.pretrained.parameters(): + p.requires_grad = False + else: + for p in self.get_enc_params_except_rel_pos(): + p.requires_grad = False + return self + + def attach_hooks(self, midas): + if len(self.handles) > 0: + self.remove_hooks() + if "out_conv" in self.layer_names: + self.handles.append(list(midas.scratch.output_conv.children())[ + 3].register_forward_hook(get_activation("out_conv", self.core_out))) + if "r4" in self.layer_names: + self.handles.append(midas.scratch.refinenet4.register_forward_hook( + get_activation("r4", self.core_out))) + if "r3" in self.layer_names: + self.handles.append(midas.scratch.refinenet3.register_forward_hook( + get_activation("r3", self.core_out))) + if "r2" in self.layer_names: + self.handles.append(midas.scratch.refinenet2.register_forward_hook( + get_activation("r2", self.core_out))) + if "r1" in self.layer_names: + self.handles.append(midas.scratch.refinenet1.register_forward_hook( + get_activation("r1", self.core_out))) + if "l4_rn" in self.layer_names: + self.handles.append(midas.scratch.layer4_rn.register_forward_hook( + get_activation("l4_rn", self.core_out))) + + return self + + def remove_hooks(self): + for h in self.handles: + h.remove() + return self + + def __del__(self): + self.remove_hooks() + + def set_output_channels(self, model_type): + self.output_channels = MIDAS_SETTINGS[model_type] + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs): + if midas_model_type not in MIDAS_SETTINGS: + raise ValueError( + f"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}") + if "img_size" in kwargs: + kwargs = MidasCore.parse_img_size(kwargs) + img_size = kwargs.pop("img_size", [384, 384]) + # print("img_size", img_size) + import custom_midas_repo + midas_path = Path(inspect.getfile(custom_midas_repo)).parent.resolve() + del custom_midas_repo + midas = torch.hub.load(midas_path, midas_model_type, + pretrained=use_pretrained_midas, force_reload=force_reload, source='local') + kwargs.update({'keep_aspect_ratio': force_keep_ar}) + midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features, + freeze_bn=freeze_bn, img_size=img_size, **kwargs) + midas_core.set_output_channels(midas_model_type) + return midas_core + + @staticmethod + def build_from_config(config): + return MidasCore.build(**config) + + @staticmethod + def parse_img_size(config): + assert 'img_size' in config + if isinstance(config['img_size'], str): + assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W" + config['img_size'] = list(map(int, config['img_size'].split(","))) + assert len( + config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W" + elif isinstance(config['img_size'], int): + config['img_size'] = [config['img_size'], config['img_size']] + else: + assert isinstance(config['img_size'], list) and len( + config['img_size']) == 2, "img_size should be a list of H,W" + return config + + +nchannels2models = { + tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"], + (512, 256, 128, 64, 64): ["MiDaS_small"] +} + +# Model name to number of output channels +MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items() + for m in v + } diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/builder.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..0818311b642561712a03a66655c638ce09a04cca --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/builder.py @@ -0,0 +1,51 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from importlib import import_module +from .depth_model import DepthModel + +def build_model(config) -> DepthModel: + """Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface. + This function should be used to construct models for training and evaluation. + + Args: + config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder. + + Returns: + torch.nn.Module: Model corresponding to name and version as specified in config + """ + module_name = f"zoedepth.models.{config.model}" + try: + module = import_module(module_name) + except ModuleNotFoundError as e: + # print the original error message + print(e) + raise ValueError( + f"Model {config.model} not found. Refer above error for details.") from e + try: + get_version = getattr(module, "get_version") + except AttributeError as e: + raise ValueError( + f"Model {config.model} has no get_version function.") from e + return get_version(config.version_name).build_from_config(config) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/depth_model.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/depth_model.py new file mode 100644 index 0000000000000000000000000000000000000000..fc421c108ea3928c9add62b4c190500d9bd4eda1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/depth_model.py @@ -0,0 +1,152 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import transforms +import PIL.Image +from PIL import Image +from typing import Union + + +class DepthModel(nn.Module): + def __init__(self): + super().__init__() + self.device = 'cpu' + + def to(self, device) -> nn.Module: + self.device = device + return super().to(device) + + def forward(self, x, *args, **kwargs): + raise NotImplementedError + + def _infer(self, x: torch.Tensor): + """ + Inference interface for the model + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + return self(x)['metric_depth'] + + def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor: + """ + Inference interface for the model with padding augmentation + Padding augmentation fixes the boundary artifacts in the output depth map. + Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image. + This augmentation pads the input image and crops the prediction back to the original size / view. + + Note: This augmentation is not required for the models trained with 'avoid_boundary'=True. + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + pad_input (bool, optional): whether to pad the input or not. Defaults to True. + fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3. + fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3. + upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'. + padding_mode (str, optional): padding mode. Defaults to "reflect". + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + # assert x is nchw and c = 3 + assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim()) + assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1]) + + if pad_input: + assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0" + pad_h = int(np.sqrt(x.shape[2]/2) * fh) + pad_w = int(np.sqrt(x.shape[3]/2) * fw) + padding = [pad_w, pad_w] + if pad_h > 0: + padding += [pad_h, pad_h] + + x = F.pad(x, padding, mode=padding_mode, **kwargs) + out = self._infer(x) + if out.shape[-2:] != x.shape[-2:]: + out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False) + if pad_input: + # crop to the original size, handling the case where pad_h and pad_w is 0 + if pad_h > 0: + out = out[:, :, pad_h:-pad_h,:] + if pad_w > 0: + out = out[:, :, :, pad_w:-pad_w] + return out + + def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor: + """ + Inference interface for the model with horizontal flip augmentation + Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip. + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + pad_input (bool, optional): whether to use padding augmentation. Defaults to True. + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + # infer with horizontal flip and average + out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) + out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs) + out = (out + torch.flip(out_flip, dims=[3])) / 2 + return out + + def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor: + """ + Inference interface for the model + Args: + x (torch.Tensor): input tensor of shape (b, c, h, w) + pad_input (bool, optional): whether to use padding augmentation. Defaults to True. + with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True. + Returns: + torch.Tensor: output tensor of shape (b, 1, h, w) + """ + if with_flip_aug: + return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs) + else: + return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) + + @torch.no_grad() + def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]: + """ + Inference interface for the model for PIL image + Args: + pil_img (PIL.Image.Image): input PIL image + pad_input (bool, optional): whether to use padding augmentation. Defaults to True. + with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True. + output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy". + """ + x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device) + out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs) + if output_type == "numpy": + return out_tensor.squeeze().cpu().numpy() + elif output_type == "pil": + # uint16 is required for depth pil image + out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16) + return Image.fromarray(out_16bit_numpy) + elif output_type == "tensor": + return out_tensor.squeeze().cpu() + else: + raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'") + \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c344f725c8a10dcaf29d4c308eb49d86ac51ff88 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/__init__.py @@ -0,0 +1,23 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/attractor.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/attractor.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8efe645adea1d88a12e2ac5cc6bb2a251eef9d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/attractor.py @@ -0,0 +1,208 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +@torch.jit.script +def exp_attractor(dx, alpha: float = 300, gamma: int = 2): + """Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor + + Args: + dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center. + alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300. + gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2. + + Returns: + torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc + """ + return torch.exp(-alpha*(torch.abs(dx)**gamma)) * (dx) + + +@torch.jit.script +def inv_attractor(dx, alpha: float = 300, gamma: int = 2): + """Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center + This is the default one according to the accompanying paper. + + Args: + dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center. + alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300. + gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2. + + Returns: + torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc + """ + return dx.div(1+alpha*dx.pow(gamma)) + + +class AttractorLayer(nn.Module): + def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10, + alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): + """ + Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth) + """ + super().__init__() + + self.n_attractors = n_attractors + self.n_bins = n_bins + self.min_depth = min_depth + self.max_depth = max_depth + self.alpha = alpha + self.gamma = gamma + self.kind = kind + self.attractor_type = attractor_type + self.memory_efficient = memory_efficient + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm + nn.ReLU(inplace=True) + ) + + def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): + """ + Args: + x (torch.Tensor) : feature block; shape - n, c, h, w + b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w + + Returns: + tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w + """ + if prev_b_embedding is not None: + if interpolate: + prev_b_embedding = nn.functional.interpolate( + prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) + x = x + prev_b_embedding + + A = self._net(x) + eps = 1e-3 + A = A + eps + n, c, h, w = A.shape + A = A.view(n, self.n_attractors, 2, h, w) + A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w + A_normed = A[:, :, 0, ...] # n, na, h, w + + b_prev = nn.functional.interpolate( + b_prev, (h, w), mode='bilinear', align_corners=True) + b_centers = b_prev + + if self.attractor_type == 'exp': + dist = exp_attractor + else: + dist = inv_attractor + + if not self.memory_efficient: + func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] + # .shape N, nbins, h, w + delta_c = func(dist(A_normed.unsqueeze( + 2) - b_centers.unsqueeze(1)), dim=1) + else: + delta_c = torch.zeros_like(b_centers, device=b_centers.device) + for i in range(self.n_attractors): + # .shape N, nbins, h, w + delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers) + + if self.kind == 'mean': + delta_c = delta_c / self.n_attractors + + b_new_centers = b_centers + delta_c + B_centers = (self.max_depth - self.min_depth) * \ + b_new_centers + self.min_depth + B_centers, _ = torch.sort(B_centers, dim=1) + B_centers = torch.clip(B_centers, self.min_depth, self.max_depth) + return b_new_centers, B_centers + + +class AttractorLayerUnnormed(nn.Module): + def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10, + alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): + """ + Attractor layer for bin centers. Bin centers are unbounded + """ + super().__init__() + + self.n_attractors = n_attractors + self.n_bins = n_bins + self.min_depth = min_depth + self.max_depth = max_depth + self.alpha = alpha + self.gamma = gamma + self.kind = kind + self.attractor_type = attractor_type + self.memory_efficient = memory_efficient + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0), + nn.Softplus() + ) + + def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): + """ + Args: + x (torch.Tensor) : feature block; shape - n, c, h, w + b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w + + Returns: + tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version + """ + if prev_b_embedding is not None: + if interpolate: + prev_b_embedding = nn.functional.interpolate( + prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) + x = x + prev_b_embedding + + A = self._net(x) + n, c, h, w = A.shape + + b_prev = nn.functional.interpolate( + b_prev, (h, w), mode='bilinear', align_corners=True) + b_centers = b_prev + + if self.attractor_type == 'exp': + dist = exp_attractor + else: + dist = inv_attractor + + if not self.memory_efficient: + func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] + # .shape N, nbins, h, w + delta_c = func( + dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1) + else: + delta_c = torch.zeros_like(b_centers, device=b_centers.device) + for i in range(self.n_attractors): + delta_c += dist(A[:, i, ...].unsqueeze(1) - + b_centers) # .shape N, nbins, h, w + + if self.kind == 'mean': + delta_c = delta_c / self.n_attractors + + b_new_centers = b_centers + delta_c + B_centers = b_new_centers + + return b_new_centers, B_centers diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/dist_layers.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/dist_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3208405dfb78fdfc28d5765e5a6d5dbe31967a23 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/dist_layers.py @@ -0,0 +1,121 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +def log_binom(n, k, eps=1e-7): + """ log(nCk) using stirling approximation """ + n = n + eps + k = k + eps + return n * torch.log(n) - k * torch.log(k) - (n-k) * torch.log(n-k+eps) + + +class LogBinomial(nn.Module): + def __init__(self, n_classes=256, act=torch.softmax): + """Compute log binomial distribution for n_classes + + Args: + n_classes (int, optional): number of output classes. Defaults to 256. + """ + super().__init__() + self.K = n_classes + self.act = act + self.register_buffer('k_idx', torch.arange( + 0, n_classes).view(1, -1, 1, 1)) + self.register_buffer('K_minus_1', torch.Tensor( + [self.K-1]).view(1, -1, 1, 1)) + + def forward(self, x, t=1., eps=1e-4): + """Compute log binomial distribution for x + + Args: + x (torch.Tensor - NCHW): probabilities + t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1.. + eps (float, optional): Small number for numerical stability. Defaults to 1e-4. + + Returns: + torch.Tensor -NCHW: log binomial distribution logbinomial(p;t) + """ + if x.ndim == 3: + x = x.unsqueeze(1) # make it nchw + + one_minus_x = torch.clamp(1 - x, eps, 1) + x = torch.clamp(x, eps, 1) + y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * \ + torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x) + return self.act(y/t, dim=1) + + +class ConditionalLogBinomial(nn.Module): + def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax): + """Conditional Log Binomial distribution + + Args: + in_features (int): number of input channels in main feature + condition_dim (int): number of input channels in condition feature + n_classes (int, optional): Number of classes. Defaults to 256. + bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2. + p_eps (float, optional): small eps value. Defaults to 1e-4. + max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50. + min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7. + """ + super().__init__() + self.p_eps = p_eps + self.max_temp = max_temp + self.min_temp = min_temp + self.log_binomial_transform = LogBinomial(n_classes, act=act) + bottleneck = (in_features + condition_dim) // bottleneck_factor + self.mlp = nn.Sequential( + nn.Conv2d(in_features + condition_dim, bottleneck, + kernel_size=1, stride=1, padding=0), + nn.GELU(), + # 2 for p linear norm, 2 for t linear norm + nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0), + nn.Softplus() + ) + + def forward(self, x, cond): + """Forward pass + + Args: + x (torch.Tensor - NCHW): Main feature + cond (torch.Tensor - NCHW): condition feature + + Returns: + torch.Tensor: Output log binomial distribution + """ + pt = self.mlp(torch.concat((x, cond), dim=1)) + p, t = pt[:, :2, ...], pt[:, 2:, ...] + + p = p + self.p_eps + p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...]) + + t = t + self.p_eps + t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...]) + t = t.unsqueeze(1) + t = (self.max_temp - self.min_temp) * t + self.min_temp + + return self.log_binomial_transform(p, t) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/localbins_layers.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/localbins_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..f94481605c3e6958ce50e73b2eb31d9f0c07dc67 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/localbins_layers.py @@ -0,0 +1,169 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +class SeedBinRegressor(nn.Module): + def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10): + """Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval. + + Args: + in_features (int): input channels + n_bins (int, optional): Number of bin centers. Defaults to 16. + mlp_dim (int, optional): Hidden dimension. Defaults to 256. + min_depth (float, optional): Min depth value. Defaults to 1e-3. + max_depth (float, optional): Max depth value. Defaults to 10. + """ + super().__init__() + self.version = "1_1" + self.min_depth = min_depth + self.max_depth = max_depth + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_bins, 1, 1, 0), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + """ + Returns tensor of bin_width vectors (centers). One vector b for every pixel + """ + B = self._net(x) + eps = 1e-3 + B = B + eps + B_widths_normed = B / B.sum(dim=1, keepdim=True) + B_widths = (self.max_depth - self.min_depth) * \ + B_widths_normed # .shape NCHW + # pad has the form (left, right, top, bottom, front, back) + B_widths = nn.functional.pad( + B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth) + B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW + + B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...]) + return B_widths_normed, B_centers + + +class SeedBinRegressorUnnormed(nn.Module): + def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10): + """Bin center regressor network. Bin centers are unbounded + + Args: + in_features (int): input channels + n_bins (int, optional): Number of bin centers. Defaults to 16. + mlp_dim (int, optional): Hidden dimension. Defaults to 256. + min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor) + max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor) + """ + super().__init__() + self.version = "1_1" + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, n_bins, 1, 1, 0), + nn.Softplus() + ) + + def forward(self, x): + """ + Returns tensor of bin_width vectors (centers). One vector b for every pixel + """ + B_centers = self._net(x) + return B_centers, B_centers + + +class Projector(nn.Module): + def __init__(self, in_features, out_features, mlp_dim=128): + """Projector MLP + + Args: + in_features (int): input channels + out_features (int): output channels + mlp_dim (int, optional): hidden dimension. Defaults to 128. + """ + super().__init__() + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.ReLU(inplace=True), + nn.Conv2d(mlp_dim, out_features, 1, 1, 0), + ) + + def forward(self, x): + return self._net(x) + + + +class LinearSplitter(nn.Module): + def __init__(self, in_features, prev_nbins, split_factor=2, mlp_dim=128, min_depth=1e-3, max_depth=10): + super().__init__() + + self.prev_nbins = prev_nbins + self.split_factor = split_factor + self.min_depth = min_depth + self.max_depth = max_depth + + self._net = nn.Sequential( + nn.Conv2d(in_features, mlp_dim, 1, 1, 0), + nn.GELU(), + nn.Conv2d(mlp_dim, prev_nbins * split_factor, 1, 1, 0), + nn.ReLU() + ) + + def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): + """ + x : feature block; shape - n, c, h, w + b_prev : previous bin widths normed; shape - n, prev_nbins, h, w + """ + if prev_b_embedding is not None: + if interpolate: + prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) + x = x + prev_b_embedding + S = self._net(x) + eps = 1e-3 + S = S + eps + n, c, h, w = S.shape + S = S.view(n, self.prev_nbins, self.split_factor, h, w) + S_normed = S / S.sum(dim=2, keepdim=True) # fractional splits + + b_prev = nn.functional.interpolate(b_prev, (h,w), mode='bilinear', align_corners=True) + + + b_prev = b_prev / b_prev.sum(dim=1, keepdim=True) # renormalize for gurantees + # print(b_prev.shape, S_normed.shape) + # if is_for_query:(1).expand(-1, b_prev.size(0)//n, -1, -1, -1, -1).flatten(0,1) # TODO ? can replace all this with a single torch.repeat? + b = b_prev.unsqueeze(2) * S_normed + b = b.flatten(1,2) # .shape n, prev_nbins * split_factor, h, w + + # calculate bin centers for loss calculation + B_widths = (self.max_depth - self.min_depth) * b # .shape N, nprev * splitfactor, H, W + # pad has the form (left, right, top, bottom, front, back) + B_widths = nn.functional.pad(B_widths, (0,0,0,0,1,0), mode='constant', value=self.min_depth) + B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW + + B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:,1:,...]) + return b, B_centers \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/patch_transformer.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/patch_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..99d9e51a06b981bae45ce7dd64eaef19a4121991 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/layers/patch_transformer.py @@ -0,0 +1,91 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn + + +class PatchTransformerEncoder(nn.Module): + def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False): + """ViT-like transformer block + + Args: + in_channels (int): Input channels + patch_size (int, optional): patch size. Defaults to 10. + embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128. + num_heads (int, optional): number of attention heads. Defaults to 4. + use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as "class token"). Defaults to False. + """ + super(PatchTransformerEncoder, self).__init__() + self.use_class_token = use_class_token + encoder_layers = nn.TransformerEncoderLayer( + embedding_dim, num_heads, dim_feedforward=1024) + self.transformer_encoder = nn.TransformerEncoder( + encoder_layers, num_layers=4) # takes shape S,N,E + + self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim, + kernel_size=patch_size, stride=patch_size, padding=0) + + def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'): + """Generate positional encodings + + Args: + sequence_length (int): Sequence length + embedding_dim (int): Embedding dimension + + Returns: + torch.Tensor SBE: Positional encodings + """ + position = torch.arange( + 0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1) + index = torch.arange( + 0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0) + div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim)) + pos_encoding = position * div_term + pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1) + pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1) + return pos_encoding + + + def forward(self, x): + """Forward pass + + Args: + x (torch.Tensor - NCHW): Input feature tensor + + Returns: + torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim + """ + embeddings = self.embedding_convPxP(x).flatten( + 2) # .shape = n,c,s = n, embedding_dim, s + if self.use_class_token: + # extra special token at start ? + embeddings = nn.functional.pad(embeddings, (1, 0)) + + # change to S,N,E format required by transformer + embeddings = embeddings.permute(2, 0, 1) + S, N, E = embeddings.shape + embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device) + x = self.transformer_encoder(embeddings) # .shape = S, N, E + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/model_io.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/model_io.py new file mode 100644 index 0000000000000000000000000000000000000000..78b6579631dd847ac76651238cb5a948b5a66286 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/model_io.py @@ -0,0 +1,92 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch + +def load_state_dict(model, state_dict): + """Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict. + + DataParallel prefixes state_dict keys with 'module.' when saving. + If the model is not a DataParallel model but the state_dict is, then prefixes are removed. + If the model is a DataParallel model but the state_dict is not, then prefixes are added. + """ + state_dict = state_dict.get('model', state_dict) + # if model is a DataParallel model, then state_dict keys are prefixed with 'module.' + + do_prefix = isinstance( + model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)) + state = {} + for k, v in state_dict.items(): + if k.startswith('module.') and not do_prefix: + k = k[7:] + + if not k.startswith('module.') and do_prefix: + k = 'module.' + k + + state[k] = v + + model.load_state_dict(state) + print("Loaded successfully") + return model + + +def load_wts(model, checkpoint_path): + ckpt = torch.load(checkpoint_path, map_location='cpu') + return load_state_dict(model, ckpt) + + +def load_state_dict_from_url(model, url, **kwargs): + state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu', **kwargs) + return load_state_dict(model, state_dict) + + +def load_state_from_resource(model, resource: str): + """Loads weights to the model from a given resource. A resource can be of following types: + 1. URL. Prefixed with "url::" + e.g. url::http(s)://url.resource.com/ckpt.pt + + 2. Local path. Prefixed with "local::" + e.g. local::/path/to/ckpt.pt + + + Args: + model (torch.nn.Module): Model + resource (str): resource string + + Returns: + torch.nn.Module: Model with loaded weights + """ + print(f"Using pretrained resource {resource}") + + if resource.startswith('url::'): + url = resource.split('url::')[1] + return load_state_dict_from_url(model, url, progress=True) + + elif resource.startswith('local::'): + path = resource.split('local::')[1] + return load_wts(model, path) + + else: + raise ValueError("Invalid resource type, only url:: and local:: are supported") + \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc33f737d238766559f0e3a8def3c0b568f23b7f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/__init__.py @@ -0,0 +1,31 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from .zoedepth_v1 import ZoeDepth + +all_versions = { + "v1": ZoeDepth, +} + +get_version = lambda v : all_versions[v] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth.json b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth.json new file mode 100644 index 0000000000000000000000000000000000000000..3112ed78c89f00e1d13f5d6e5be87cd3216b6dc7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth.json @@ -0,0 +1,58 @@ +{ + "model": { + "name": "ZoeDepth", + "version_name": "v1", + "n_bins": 64, + "bin_embedding_dim": 128, + "bin_centers_type": "softplus", + "n_attractors":[16, 8, 4, 1], + "attractor_alpha": 1000, + "attractor_gamma": 2, + "attractor_kind" : "mean", + "attractor_type" : "inv", + "midas_model_type" : "DPT_BEiT_L_384", + "min_temp": 0.0212, + "max_temp": 50.0, + "output_distribution": "logbinomial", + "memory_efficient": true, + "inverse_midas": false, + "img_size": [384, 512] + }, + + "train": { + "train_midas": true, + "use_pretrained_midas": true, + "trainer": "zoedepth", + "epochs": 5, + "bs": 16, + "optim_kwargs": {"lr": 0.000161, "wd": 0.01}, + "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true}, + "same_lr": false, + "w_si": 1, + "w_domain": 0.2, + "w_reg": 0, + "w_grad": 0, + "avoid_boundary": false, + "random_crop": false, + "input_width": 640, + "input_height": 480, + "midas_lr_factor": 1, + "encoder_lr_factor":10, + "pos_enc_lr_factor":10, + "freeze_midas_bn": true + + }, + + "infer":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : null, + "force_keep_ar": true + }, + + "eval":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : null + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json new file mode 100644 index 0000000000000000000000000000000000000000..b51802aa44b91c39e15aacaac4b5ab6bec884414 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json @@ -0,0 +1,22 @@ +{ + "model": { + "bin_centers_type": "normed", + "img_size": [384, 768] + }, + + "train": { + }, + + "infer":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt", + "force_keep_ar": true + }, + + "eval":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt" + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/zoedepth_v1.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/zoedepth_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..bc931b059d6165c84e8ff4f09d5c62d19930cee9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth/zoedepth_v1.py @@ -0,0 +1,250 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import itertools + +import torch +import torch.nn as nn +from ..depth_model import DepthModel +from ..base_models.midas import MidasCore +from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed +from ..layers.dist_layers import ConditionalLogBinomial +from ..layers.localbins_layers import (Projector, SeedBinRegressor, + SeedBinRegressorUnnormed) +from ..model_io import load_state_from_resource + + +class ZoeDepth(DepthModel): + def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, + n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, + midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): + """ZoeDepth model. This is the version of ZoeDepth that has a single metric head + + Args: + core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features + n_bins (int, optional): Number of bin centers. Defaults to 64. + bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. + For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". + bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. + min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. + max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. + n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. + attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. + attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. + attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. + attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. + min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. + max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. + train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. + midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. + encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. + pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. + """ + super().__init__() + + self.core = core + self.max_depth = max_depth + self.min_depth = min_depth + self.min_temp = min_temp + self.bin_centers_type = bin_centers_type + + self.midas_lr_factor = midas_lr_factor + self.encoder_lr_factor = encoder_lr_factor + self.pos_enc_lr_factor = pos_enc_lr_factor + self.train_midas = train_midas + self.inverse_midas = inverse_midas + + if self.encoder_lr_factor <= 0: + self.core.freeze_encoder( + freeze_rel_pos=self.pos_enc_lr_factor <= 0) + + N_MIDAS_OUT = 32 + btlnck_features = self.core.output_channels[0] + num_out_features = self.core.output_channels[1:] + + self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, + kernel_size=1, stride=1, padding=0) # btlnck conv + + if bin_centers_type == "normed": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayer + elif bin_centers_type == "softplus": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid1": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid2": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayer + else: + raise ValueError( + "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") + + self.seed_bin_regressor = SeedBinRegressorLayer( + btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth) + self.seed_projector = Projector(btlnck_features, bin_embedding_dim) + self.projectors = nn.ModuleList([ + Projector(num_out, bin_embedding_dim) + for num_out in num_out_features + ]) + self.attractors = nn.ModuleList([ + Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth, + alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type) + for i in range(len(num_out_features)) + ]) + + last_in = N_MIDAS_OUT + 1 # +1 for relative depth + + # use log binomial instead of softmax + self.conditional_log_binomial = ConditionalLogBinomial( + last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp) + + def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): + """ + Args: + x (torch.Tensor): Input image tensor of shape (B, C, H, W) + return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False. + denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False. + return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False. + + Returns: + dict: Dictionary containing the following keys: + - rel_depth (torch.Tensor): Relative depth map of shape (B, H, W) + - metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W) + - bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True + - probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True + + """ + b, c, h, w = x.shape + # print("input shape ", x.shape) + self.orig_input_width = w + self.orig_input_height = h + rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) + # print("output shapes", rel_depth.shape, out.shape) + + outconv_activation = out[0] + btlnck = out[1] + x_blocks = out[2:] + + x_d0 = self.conv2(btlnck) + x = x_d0 + _, seed_b_centers = self.seed_bin_regressor(x) + + if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': + b_prev = (seed_b_centers - self.min_depth) / \ + (self.max_depth - self.min_depth) + else: + b_prev = seed_b_centers + + prev_b_embedding = self.seed_projector(x) + + # unroll this loop for better performance + for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks): + b_embedding = projector(x) + b, b_centers = attractor( + b_embedding, b_prev, prev_b_embedding, interpolate=True) + b_prev = b.clone() + prev_b_embedding = b_embedding.clone() + + last = outconv_activation + + if self.inverse_midas: + # invert depth followed by normalization + rel_depth = 1.0 / (rel_depth + 1e-6) + rel_depth = (rel_depth - rel_depth.min()) / \ + (rel_depth.max() - rel_depth.min()) + # concat rel depth with last. First interpolate rel depth to last size + rel_cond = rel_depth.unsqueeze(1) + rel_cond = nn.functional.interpolate( + rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True) + last = torch.cat([last, rel_cond], dim=1) + + b_embedding = nn.functional.interpolate( + b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) + x = self.conditional_log_binomial(last, b_embedding) + + # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor + # print(x.shape, b_centers.shape) + b_centers = nn.functional.interpolate( + b_centers, x.shape[-2:], mode='bilinear', align_corners=True) + out = torch.sum(x * b_centers, dim=1, keepdim=True) + + # Structure output dict + output = dict(metric_depth=out) + if return_final_centers or return_probs: + output['bin_centers'] = b_centers + + if return_probs: + output['probs'] = x + + return output + + def get_lr_params(self, lr): + """ + Learning rate configuration for different layers of the model + Args: + lr (float) : Base learning rate + Returns: + list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. + """ + param_conf = [] + if self.train_midas: + if self.encoder_lr_factor > 0: + param_conf.append({'params': self.core.get_enc_params_except_rel_pos( + ), 'lr': lr / self.encoder_lr_factor}) + + if self.pos_enc_lr_factor > 0: + param_conf.append( + {'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor}) + + midas_params = self.core.core.scratch.parameters() + midas_lr_factor = self.midas_lr_factor + param_conf.append( + {'params': midas_params, 'lr': lr / midas_lr_factor}) + + remaining_modules = [] + for name, child in self.named_children(): + if name != 'core': + remaining_modules.append(child) + remaining_params = itertools.chain( + *[child.parameters() for child in remaining_modules]) + + param_conf.append({'params': remaining_params, 'lr': lr}) + + return param_conf + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): + core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, + train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) + model = ZoeDepth(core, **kwargs) + if pretrained_resource: + assert isinstance(pretrained_resource, str), "pretrained_resource must be a string" + model = load_state_from_resource(model, pretrained_resource) + return model + + @staticmethod + def build_from_config(config): + return ZoeDepth.build(**config) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..513a278b939c10c010e3c0250ec73544d5663886 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/__init__.py @@ -0,0 +1,31 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from .zoedepth_nk_v1 import ZoeDepthNK + +all_versions = { + "v1": ZoeDepthNK, +} + +get_version = lambda v : all_versions[v] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json new file mode 100644 index 0000000000000000000000000000000000000000..42bab2a3ad159a09599a5aba270c491021a3cf1a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json @@ -0,0 +1,67 @@ +{ + "model": { + "name": "ZoeDepthNK", + "version_name": "v1", + "bin_conf" : [ + { + "name": "nyu", + "n_bins": 64, + "min_depth": 1e-3, + "max_depth": 10.0 + }, + { + "name": "kitti", + "n_bins": 64, + "min_depth": 1e-3, + "max_depth": 80.0 + } + ], + "bin_embedding_dim": 128, + "bin_centers_type": "softplus", + "n_attractors":[16, 8, 4, 1], + "attractor_alpha": 1000, + "attractor_gamma": 2, + "attractor_kind" : "mean", + "attractor_type" : "inv", + "min_temp": 0.0212, + "max_temp": 50.0, + "memory_efficient": true, + "midas_model_type" : "DPT_BEiT_L_384", + "img_size": [384, 512] + }, + + "train": { + "train_midas": true, + "use_pretrained_midas": true, + "trainer": "zoedepth_nk", + "epochs": 5, + "bs": 16, + "optim_kwargs": {"lr": 0.0002512, "wd": 0.01}, + "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true}, + "same_lr": false, + "w_si": 1, + "w_domain": 100, + "avoid_boundary": false, + "random_crop": false, + "input_width": 640, + "input_height": 480, + "w_grad": 0, + "w_reg": 0, + "midas_lr_factor": 10, + "encoder_lr_factor":10, + "pos_enc_lr_factor":10 + }, + + "infer": { + "train_midas": false, + "pretrained_resource": "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt", + "use_pretrained_midas": false, + "force_keep_ar": true + }, + + "eval": { + "train_midas": false, + "pretrained_resource": "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt", + "use_pretrained_midas": false + } +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..7368ae8031188a9f946d9d3f29633c96e791e68e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py @@ -0,0 +1,333 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import itertools + +import torch +import torch.nn as nn + +from zoedepth.models.depth_model import DepthModel +from zoedepth.models.base_models.midas import MidasCore +from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed +from zoedepth.models.layers.dist_layers import ConditionalLogBinomial +from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor, + SeedBinRegressorUnnormed) +from zoedepth.models.layers.patch_transformer import PatchTransformerEncoder +from zoedepth.models.model_io import load_state_from_resource + + +class ZoeDepthNK(DepthModel): + def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128, + n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', + min_temp=5, max_temp=50, + memory_efficient=False, train_midas=True, + is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): + """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. + + Args: + core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features + + bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: + "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) + + The length of this list determines the number of metric heads. + bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. + For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed". + bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. + + n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. + attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. + attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. + attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. + attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. + + min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. + max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. + + memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False. + + train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. + is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True. + midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. + encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. + pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. + + """ + + super().__init__() + + self.core = core + self.bin_conf = bin_conf + self.min_temp = min_temp + self.max_temp = max_temp + self.memory_efficient = memory_efficient + self.train_midas = train_midas + self.is_midas_pretrained = is_midas_pretrained + self.midas_lr_factor = midas_lr_factor + self.encoder_lr_factor = encoder_lr_factor + self.pos_enc_lr_factor = pos_enc_lr_factor + self.inverse_midas = inverse_midas + + N_MIDAS_OUT = 32 + btlnck_features = self.core.output_channels[0] + num_out_features = self.core.output_channels[1:] + # self.scales = [16, 8, 4, 2] # spatial scale factors + + self.conv2 = nn.Conv2d( + btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) + + # Transformer classifier on the bottleneck + self.patch_transformer = PatchTransformerEncoder( + btlnck_features, 1, 128, use_class_token=True) + self.mlp_classifier = nn.Sequential( + nn.Linear(128, 128), + nn.ReLU(), + nn.Linear(128, 2) + ) + + if bin_centers_type == "normed": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayer + elif bin_centers_type == "softplus": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid1": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid2": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayer + else: + raise ValueError( + "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") + self.bin_centers_type = bin_centers_type + # We have bins for each bin conf. + # Create a map (ModuleDict) of 'name' -> seed_bin_regressor + self.seed_bin_regressors = nn.ModuleDict( + {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim//2, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) + for conf in bin_conf} + ) + + self.seed_projector = Projector( + btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim//2) + self.projectors = nn.ModuleList([ + Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim//2) + for num_out in num_out_features + ]) + + # Create a map (ModuleDict) of 'name' -> attractors (ModuleList) + self.attractors = nn.ModuleDict( + {conf['name']: nn.ModuleList([ + Attractor(bin_embedding_dim, n_attractors[i], + mlp_dim=bin_embedding_dim, alpha=attractor_alpha, + gamma=attractor_gamma, kind=attractor_kind, + attractor_type=attractor_type, memory_efficient=memory_efficient, + min_depth=conf["min_depth"], max_depth=conf["max_depth"]) + for i in range(len(n_attractors)) + ]) + for conf in bin_conf} + ) + + last_in = N_MIDAS_OUT + # conditional log binomial for each bin conf + self.conditional_log_binomial = nn.ModuleDict( + {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) + for conf in bin_conf} + ) + + def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): + """ + Args: + x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain. + return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False. + denorm (bool, optional): Whether to denormalize the input image. Defaults to False. + return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False. + + Returns: + dict: Dictionary of outputs with keys: + - "rel_depth": Relative depth map of shape (B, 1, H, W) + - "metric_depth": Metric depth map of shape (B, 1, H, W) + - "domain_logits": Domain logits of shape (B, 2) + - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True + - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True + """ + b, c, h, w = x.shape + self.orig_input_width = w + self.orig_input_height = h + rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) + + outconv_activation = out[0] + btlnck = out[1] + x_blocks = out[2:] + + x_d0 = self.conv2(btlnck) + x = x_d0 + + # Predict which path to take + embedding = self.patch_transformer(x)[0] # N, E + domain_logits = self.mlp_classifier(embedding) # N, 2 + domain_vote = torch.softmax(domain_logits.sum( + dim=0, keepdim=True), dim=-1) # 1, 2 + + # Get the path + bin_conf_name = ["nyu", "kitti"][torch.argmax( + domain_vote, dim=-1).squeeze().item()] + + try: + conf = [c for c in self.bin_conf if c.name == bin_conf_name][0] + except IndexError: + raise ValueError( + f"bin_conf_name {bin_conf_name} not found in bin_confs") + + min_depth = conf['min_depth'] + max_depth = conf['max_depth'] + + seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] + _, seed_b_centers = seed_bin_regressor(x) + if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': + b_prev = (seed_b_centers - min_depth)/(max_depth - min_depth) + else: + b_prev = seed_b_centers + prev_b_embedding = self.seed_projector(x) + + attractors = self.attractors[bin_conf_name] + for projector, attractor, x in zip(self.projectors, attractors, x_blocks): + b_embedding = projector(x) + b, b_centers = attractor( + b_embedding, b_prev, prev_b_embedding, interpolate=True) + b_prev = b + prev_b_embedding = b_embedding + + last = outconv_activation + + b_centers = nn.functional.interpolate( + b_centers, last.shape[-2:], mode='bilinear', align_corners=True) + b_embedding = nn.functional.interpolate( + b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) + + clb = self.conditional_log_binomial[bin_conf_name] + x = clb(last, b_embedding) + + # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor + # print(x.shape, b_centers.shape) + # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) + out = torch.sum(x * b_centers, dim=1, keepdim=True) + + output = dict(domain_logits=domain_logits, metric_depth=out) + if return_final_centers or return_probs: + output['bin_centers'] = b_centers + + if return_probs: + output['probs'] = x + return output + + def get_lr_params(self, lr): + """ + Learning rate configuration for different layers of the model + + Args: + lr (float) : Base learning rate + Returns: + list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. + """ + param_conf = [] + if self.train_midas: + def get_rel_pos_params(): + for name, p in self.core.core.pretrained.named_parameters(): + if "relative_position" in name: + yield p + + def get_enc_params_except_rel_pos(): + for name, p in self.core.core.pretrained.named_parameters(): + if "relative_position" not in name: + yield p + + encoder_params = get_enc_params_except_rel_pos() + rel_pos_params = get_rel_pos_params() + midas_params = self.core.core.scratch.parameters() + midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 + param_conf.extend([ + {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, + {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, + {'params': midas_params, 'lr': lr / midas_lr_factor} + ]) + + remaining_modules = [] + for name, child in self.named_children(): + if name != 'core': + remaining_modules.append(child) + remaining_params = itertools.chain( + *[child.parameters() for child in remaining_modules]) + param_conf.append({'params': remaining_params, 'lr': lr}) + return param_conf + + def get_conf_parameters(self, conf_name): + """ + Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration + """ + params = [] + for name, child in self.named_children(): + if isinstance(child, nn.ModuleDict): + for bin_conf_name, module in child.items(): + if bin_conf_name == conf_name: + params += list(module.parameters()) + return params + + def freeze_conf(self, conf_name): + """ + Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration + """ + for p in self.get_conf_parameters(conf_name): + p.requires_grad = False + + def unfreeze_conf(self, conf_name): + """ + Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration + """ + for p in self.get_conf_parameters(conf_name): + p.requires_grad = True + + def freeze_all_confs(self): + """ + Freezes all the parameters of all the ModuleDicts children + """ + for name, child in self.named_children(): + if isinstance(child, nn.ModuleDict): + for bin_conf_name, module in child.items(): + for p in module.parameters(): + p.requires_grad = False + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): + core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, + train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) + model = ZoeDepthNK(core, **kwargs) + if pretrained_resource: + assert isinstance(pretrained_resource, str), "pretrained_resource must be a string" + model = load_state_from_resource(model, pretrained_resource) + return model + + @staticmethod + def build_from_config(config): + return ZoeDepthNK.build(**config) diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/arg_utils.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/arg_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3004ec3679c0a40fd8961253733fb4343ad545 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/arg_utils.py @@ -0,0 +1,33 @@ + + +def infer_type(x): # hacky way to infer type from string args + if not isinstance(x, str): + return x + + try: + x = int(x) + return x + except ValueError: + pass + + try: + x = float(x) + return x + except ValueError: + pass + + return x + + +def parse_unknown(unknown_args): + clean = [] + for a in unknown_args: + if "=" in a: + k, v = a.split("=") + clean.extend([k, v]) + else: + clean.append(a) + + keys = clean[::2] + values = clean[1::2] + return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)} diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/config.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..84996564663dadf0e720de2a68ef8c53106ed666 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/config.py @@ -0,0 +1,437 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import json +import os + +from .easydict import EasyDict as edict +from .arg_utils import infer_type + +import pathlib +import platform + +ROOT = pathlib.Path(__file__).parent.parent.resolve() + +HOME_DIR = os.path.expanduser("~") + +COMMON_CONFIG = { + "save_dir": os.path.expanduser("~/shortcuts/monodepth3_checkpoints"), + "project": "ZoeDepth", + "tags": '', + "notes": "", + "gpu": None, + "root": ".", + "uid": None, + "print_losses": False +} + +DATASETS_CONFIG = { + "kitti": { + "dataset": "kitti", + "min_depth": 0.001, + "max_depth": 80, + "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt", + "input_height": 352, + "input_width": 1216, # 704 + "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt", + + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + + "do_random_rotate": True, + "degree": 1.0, + "do_kb_crop": True, + "garg_crop": True, + "eigen_crop": False, + "use_right": False + }, + "kitti_test": { + "dataset": "kitti", + "min_depth": 0.001, + "max_depth": 80, + "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt", + "input_height": 352, + "input_width": 1216, + "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"), + "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"), + "filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt", + + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + + "do_random_rotate": False, + "degree": 1.0, + "do_kb_crop": True, + "garg_crop": True, + "eigen_crop": False, + "use_right": False + }, + "nyu": { + "dataset": "nyu", + "avoid_boundary": False, + "min_depth": 1e-3, # originally 0.1 + "max_depth": 10, + "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"), + "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"), + "filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt", + "input_height": 480, + "input_width": 640, + "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"), + "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"), + "filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt", + "min_depth_eval": 1e-3, + "max_depth_eval": 10, + "min_depth_diff": -10, + "max_depth_diff": 10, + + "do_random_rotate": True, + "degree": 1.0, + "do_kb_crop": False, + "garg_crop": False, + "eigen_crop": True + }, + "ibims": { + "dataset": "ibims", + "ibims_root": os.path.join(HOME_DIR, "shortcuts/datasets/ibims/ibims1_core_raw/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 0, + "max_depth_eval": 10, + "min_depth": 1e-3, + "max_depth": 10 + }, + "sunrgbd": { + "dataset": "sunrgbd", + "sunrgbd_root": os.path.join(HOME_DIR, "shortcuts/datasets/SUNRGBD/test/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 0, + "max_depth_eval": 8, + "min_depth": 1e-3, + "max_depth": 10 + }, + "diml_indoor": { + "dataset": "diml_indoor", + "diml_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_indoor_test/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 0, + "max_depth_eval": 10, + "min_depth": 1e-3, + "max_depth": 10 + }, + "diml_outdoor": { + "dataset": "diml_outdoor", + "diml_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_outdoor_test/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": False, + "min_depth_eval": 2, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80 + }, + "diode_indoor": { + "dataset": "diode_indoor", + "diode_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_indoor/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 1e-3, + "max_depth_eval": 10, + "min_depth": 1e-3, + "max_depth": 10 + }, + "diode_outdoor": { + "dataset": "diode_outdoor", + "diode_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_outdoor/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": False, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80 + }, + "hypersim_test": { + "dataset": "hypersim_test", + "hypersim_test_root": os.path.join(HOME_DIR, "shortcuts/datasets/hypersim_test/"), + "eigen_crop": True, + "garg_crop": False, + "do_kb_crop": False, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 10 + }, + "vkitti": { + "dataset": "vkitti", + "vkitti_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti_test/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": True, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80 + }, + "vkitti2": { + "dataset": "vkitti2", + "vkitti2_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti2/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": True, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80, + }, + "ddad": { + "dataset": "ddad", + "ddad_root": os.path.join(HOME_DIR, "shortcuts/datasets/ddad/ddad_val/"), + "eigen_crop": False, + "garg_crop": True, + "do_kb_crop": True, + "min_depth_eval": 1e-3, + "max_depth_eval": 80, + "min_depth": 1e-3, + "max_depth": 80, + }, +} + +ALL_INDOOR = ["nyu", "ibims", "sunrgbd", "diode_indoor", "hypersim_test"] +ALL_OUTDOOR = ["kitti", "diml_outdoor", "diode_outdoor", "vkitti2", "ddad"] +ALL_EVAL_DATASETS = ALL_INDOOR + ALL_OUTDOOR + +COMMON_TRAINING_CONFIG = { + "dataset": "nyu", + "distributed": True, + "workers": 16, + "clip_grad": 0.1, + "use_shared_dict": False, + "shared_dict": None, + "use_amp": False, + + "aug": True, + "random_crop": False, + "random_translate": False, + "translate_prob": 0.2, + "max_translation": 100, + + "validate_every": 0.25, + "log_images_every": 0.1, + "prefetch": False, +} + + +def flatten(config, except_keys=('bin_conf')): + def recurse(inp): + if isinstance(inp, dict): + for key, value in inp.items(): + if key in except_keys: + yield (key, value) + if isinstance(value, dict): + yield from recurse(value) + else: + yield (key, value) + + return dict(list(recurse(config))) + + +def split_combined_args(kwargs): + """Splits the arguments that are combined with '__' into multiple arguments. + Combined arguments should have equal number of keys and values. + Keys are separated by '__' and Values are separated with ';'. + For example, '__n_bins__lr=256;0.001' + + Args: + kwargs (dict): key-value pairs of arguments where key-value is optionally combined according to the above format. + + Returns: + dict: Parsed dict with the combined arguments split into individual key-value pairs. + """ + new_kwargs = dict(kwargs) + for key, value in kwargs.items(): + if key.startswith("__"): + keys = key.split("__")[1:] + values = value.split(";") + assert len(keys) == len( + values), f"Combined arguments should have equal number of keys and values. Keys are separated by '__' and Values are separated with ';'. For example, '__n_bins__lr=256;0.001. Given (keys,values) is ({keys}, {values})" + for k, v in zip(keys, values): + new_kwargs[k] = v + return new_kwargs + + +def parse_list(config, key, dtype=int): + """Parse a list of values for the key if the value is a string. The values are separated by a comma. + Modifies the config in place. + """ + if key in config: + if isinstance(config[key], str): + config[key] = list(map(dtype, config[key].split(','))) + assert isinstance(config[key], list) and all([isinstance(e, dtype) for e in config[key]] + ), f"{key} should be a list of values dtype {dtype}. Given {config[key]} of type {type(config[key])} with values of type {[type(e) for e in config[key]]}." + + +def get_model_config(model_name, model_version=None): + """Find and parse the .json config file for the model. + + Args: + model_name (str): name of the model. The config file should be named config_{model_name}[_{model_version}].json under the models/{model_name} directory. + model_version (str, optional): Specific config version. If specified config_{model_name}_{model_version}.json is searched for and used. Otherwise config_{model_name}.json is used. Defaults to None. + + Returns: + easydict: the config dictionary for the model. + """ + config_fname = f"config_{model_name}_{model_version}.json" if model_version is not None else f"config_{model_name}.json" + config_file = os.path.join(ROOT, "models", model_name, config_fname) + if not os.path.exists(config_file): + return None + + with open(config_file, "r") as f: + config = edict(json.load(f)) + + # handle dictionary inheritance + # only training config is supported for inheritance + if "inherit" in config.train and config.train.inherit is not None: + inherit_config = get_model_config(config.train["inherit"]).train + for key, value in inherit_config.items(): + if key not in config.train: + config.train[key] = value + return edict(config) + + +def update_model_config(config, mode, model_name, model_version=None, strict=False): + model_config = get_model_config(model_name, model_version) + if model_config is not None: + config = {**config, ** + flatten({**model_config.model, **model_config[mode]})} + elif strict: + raise ValueError(f"Config file for model {model_name} not found.") + return config + + +def check_choices(name, value, choices): + # return # No checks in dev branch + if value not in choices: + raise ValueError(f"{name} {value} not in supported choices {choices}") + + +KEYS_TYPE_BOOL = ["use_amp", "distributed", "use_shared_dict", "same_lr", "aug", "three_phase", + "prefetch", "cycle_momentum"] # Casting is not necessary as their int casted values in config are 0 or 1 + + +def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs): + """Main entry point to get the config for the model. + + Args: + model_name (str): name of the desired model. + mode (str, optional): "train" or "infer". Defaults to 'train'. + dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None. + + Keyword Args: key-value pairs of arguments to overwrite the default config. + + The order of precedence for overwriting the config is (Higher precedence first): + # 1. overwrite_kwargs + # 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json + # 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json + # 4. common_config: Default config for all models specified in COMMON_CONFIG + + Returns: + easydict: The config dictionary for the model. + """ + + + check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"]) + check_choices("Mode", mode, ["train", "infer", "eval"]) + if mode == "train": + check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None]) + + config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG}) + config = update_model_config(config, mode, model_name) + + # update with model version specific config + version_name = overwrite_kwargs.get("version_name", config["version_name"]) + config = update_model_config(config, mode, model_name, version_name) + + # update with config version if specified + config_version = overwrite_kwargs.get("config_version", None) + if config_version is not None: + print("Overwriting config with config_version", config_version) + config = update_model_config(config, mode, model_name, config_version) + + # update with overwrite_kwargs + # Combined args are useful for hyperparameter search + overwrite_kwargs = split_combined_args(overwrite_kwargs) + config = {**config, **overwrite_kwargs} + + # Casting to bool # TODO: Not necessary. Remove and test + for key in KEYS_TYPE_BOOL: + if key in config: + config[key] = bool(config[key]) + + # Model specific post processing of config + parse_list(config, "n_attractors") + + # adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs + if 'bin_conf' in config and 'n_bins' in overwrite_kwargs: + bin_conf = config['bin_conf'] # list of dicts + n_bins = overwrite_kwargs['n_bins'] + new_bin_conf = [] + for conf in bin_conf: + conf['n_bins'] = n_bins + new_bin_conf.append(conf) + config['bin_conf'] = new_bin_conf + + if mode == "train": + orig_dataset = dataset + if dataset == "mix": + dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader + if dataset is not None: + config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb + + if dataset is not None: + config['dataset'] = dataset + config = {**DATASETS_CONFIG[dataset], **config} + + + config['model'] = model_name + typed_config = {k: infer_type(v) for k, v in config.items()} + # add hostname to config + config['hostname'] = platform.node() + return edict(typed_config) + + +def change_dataset(config, new_dataset): + config.update(DATASETS_CONFIG[new_dataset]) + return config diff --git a/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/easydict/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/easydict/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15928179b0182c6045d98bc0a7be1c6ca45f675e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/controlnet_aux/zoe/zoedepth/utils/easydict/__init__.py @@ -0,0 +1,158 @@ +""" +EasyDict +Copy/pasted from https://github.com/makinacorpus/easydict +Original author: Mathieu Leplatre +""" + +class EasyDict(dict): + """ + Get attributes + + >>> d = EasyDict({'foo':3}) + >>> d['foo'] + 3 + >>> d.foo + 3 + >>> d.bar + Traceback (most recent call last): + ... + AttributeError: 'EasyDict' object has no attribute 'bar' + + Works recursively + + >>> d = EasyDict({'foo':3, 'bar':{'x':1, 'y':2}}) + >>> isinstance(d.bar, dict) + True + >>> d.bar.x + 1 + + Bullet-proof + + >>> EasyDict({}) + {} + >>> EasyDict(d={}) + {} + >>> EasyDict(None) + {} + >>> d = {'a': 1} + >>> EasyDict(**d) + {'a': 1} + >>> EasyDict((('a', 1), ('b', 2))) + {'a': 1, 'b': 2} + + Set attributes + + >>> d = EasyDict() + >>> d.foo = 3 + >>> d.foo + 3 + >>> d.bar = {'prop': 'value'} + >>> d.bar.prop + 'value' + >>> d + {'foo': 3, 'bar': {'prop': 'value'}} + >>> d.bar.prop = 'newer' + >>> d.bar.prop + 'newer' + + + Values extraction + + >>> d = EasyDict({'foo':0, 'bar':[{'x':1, 'y':2}, {'x':3, 'y':4}]}) + >>> isinstance(d.bar, list) + True + >>> from operator import attrgetter + >>> list(map(attrgetter('x'), d.bar)) + [1, 3] + >>> list(map(attrgetter('y'), d.bar)) + [2, 4] + >>> d = EasyDict() + >>> list(d.keys()) + [] + >>> d = EasyDict(foo=3, bar=dict(x=1, y=2)) + >>> d.foo + 3 + >>> d.bar.x + 1 + + Still like a dict though + + >>> o = EasyDict({'clean':True}) + >>> list(o.items()) + [('clean', True)] + + And like a class + + >>> class Flower(EasyDict): + ... power = 1 + ... + >>> f = Flower() + >>> f.power + 1 + >>> f = Flower({'height': 12}) + >>> f.height + 12 + >>> f['power'] + 1 + >>> sorted(f.keys()) + ['height', 'power'] + + update and pop items + >>> d = EasyDict(a=1, b='2') + >>> e = EasyDict(c=3.0, a=9.0) + >>> d.update(e) + >>> d.c + 3.0 + >>> d['c'] + 3.0 + >>> d.get('c') + 3.0 + >>> d.update(a=4, b=4) + >>> d.b + 4 + >>> d.pop('a') + 4 + >>> d.a + Traceback (most recent call last): + ... + AttributeError: 'EasyDict' object has no attribute 'a' + """ + def __init__(self, d=None, **kwargs): + if d is None: + d = {} + else: + d = dict(d) + if kwargs: + d.update(**kwargs) + for k, v in d.items(): + setattr(self, k, v) + # Class attributes + for k in self.__class__.__dict__.keys(): + if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'): + setattr(self, k, getattr(self, k)) + + def __setattr__(self, name, value): + if isinstance(value, (list, tuple)): + value = [self.__class__(x) + if isinstance(x, dict) else x for x in value] + elif isinstance(value, dict) and not isinstance(value, self.__class__): + value = self.__class__(value) + super(EasyDict, self).__setattr__(name, value) + super(EasyDict, self).__setitem__(name, value) + + __setitem__ = __setattr__ + + def update(self, e=None, **f): + d = e or dict() + d.update(f) + for k in d: + setattr(self, k, d[k]) + + def pop(self, k, d=None): + delattr(self, k) + return super(EasyDict, self).pop(k, d) + + +if __name__ == "__main__": + import doctest + doctest.testmod() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bdd994b49294485c27610772f97f177741f5518f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .utils.env import setup_environment + +setup_environment() + + +# This line will be programatically read/write by setup.py. +# Leave them at the bottom of this file and don't touch them. +__version__ = "0.6" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..99da0469ae7e169d8970e4b642fed3f870076860 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +# File: + + +from . import catalog as _UNUSED # register the handler +from .detection_checkpoint import DetectionCheckpointer +from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer + +__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/c2_model_loading.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/c2_model_loading.py new file mode 100644 index 0000000000000000000000000000000000000000..c6de2a3c830089aa7a0d27df96bb4a45fc5a7b0d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/c2_model_loading.py @@ -0,0 +1,412 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import re +from typing import Dict, List +import torch +from tabulate import tabulate + + +def convert_basic_c2_names(original_keys): + """ + Apply some basic name conversion to names in C2 weights. + It only deals with typical backbone models. + + Args: + original_keys (list[str]): + Returns: + list[str]: The same number of strings matching those in original_keys. + """ + layer_keys = copy.deepcopy(original_keys) + layer_keys = [ + {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys + ] # some hard-coded mappings + + layer_keys = [k.replace("_", ".") for k in layer_keys] + layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] + layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] + # Uniform both bn and gn names to "norm" + layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] + + # stem + layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] + # to avoid mis-matching with "conv1" in other components (e.g. detection head) + layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] + + # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) + # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] + # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] + # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] + # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] + + # blocks + layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] + layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] + layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] + layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] + + # DensePose substitutions + layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] + layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] + layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] + layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] + layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] + return layer_keys + + +def convert_c2_detectron_names(weights): + """ + Map Caffe2 Detectron weight names to Detectron2 names. + + Args: + weights (dict): name -> tensor + + Returns: + dict: detectron2 names -> tensor + dict: detectron2 names -> C2 names + """ + logger = logging.getLogger(__name__) + logger.info("Renaming Caffe2 weights ......") + original_keys = sorted(weights.keys()) + layer_keys = copy.deepcopy(original_keys) + + layer_keys = convert_basic_c2_names(layer_keys) + + # -------------------------------------------------------------------------- + # RPN hidden representation conv + # -------------------------------------------------------------------------- + # FPN case + # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then + # shared for all other levels, hence the appearance of "fpn2" + layer_keys = [ + k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys + ] + # Non-FPN case + layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # RPN box transformation conv + # -------------------------------------------------------------------------- + # FPN case (see note above about "fpn2") + layer_keys = [ + k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") + for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + # Non-FPN case + layer_keys = [ + k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + + # -------------------------------------------------------------------------- + # Fast R-CNN box head + # -------------------------------------------------------------------------- + layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] + layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] + layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] + layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] + # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s + layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # FPN lateral and output convolutions + # -------------------------------------------------------------------------- + def fpn_map(name): + """ + Look for keys with the following patterns: + 1) Starts with "fpn.inner." + Example: "fpn.inner.res2.2.sum.lateral.weight" + Meaning: These are lateral pathway convolutions + 2) Starts with "fpn.res" + Example: "fpn.res2.2.sum.weight" + Meaning: These are FPN output convolutions + """ + splits = name.split(".") + norm = ".norm" if "norm" in splits else "" + if name.startswith("fpn.inner."): + # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] + stage = int(splits[2][len("res") :]) + return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) + elif name.startswith("fpn.res"): + # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] + stage = int(splits[1][len("res") :]) + return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) + return name + + layer_keys = [fpn_map(k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # Mask R-CNN mask head + # -------------------------------------------------------------------------- + # roi_heads.StandardROIHeads case + layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] + layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] + layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] + # roi_heads.Res5ROIHeads case + layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Keypoint R-CNN head + # -------------------------------------------------------------------------- + # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" + layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] + layer_keys = [ + k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys + ] + layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Done with replacements + # -------------------------------------------------------------------------- + assert len(set(layer_keys)) == len(layer_keys) + assert len(original_keys) == len(layer_keys) + + new_weights = {} + new_keys_to_original_keys = {} + for orig, renamed in zip(original_keys, layer_keys): + new_keys_to_original_keys[renamed] = orig + if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): + # remove the meaningless prediction weight for background class + new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 + new_weights[renamed] = weights[orig][new_start_idx:] + logger.info( + "Remove prediction weight for background class in {}. The shape changes from " + "{} to {}.".format( + renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) + ) + ) + elif renamed.startswith("cls_score."): + # move weights of bg class from original index 0 to last index + logger.info( + "Move classification weights for background class in {} from index 0 to " + "index {}.".format(renamed, weights[orig].shape[0] - 1) + ) + new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) + else: + new_weights[renamed] = weights[orig] + + return new_weights, new_keys_to_original_keys + + +# Note the current matching is not symmetric. +# it assumes model_state_dict will have longer names. +def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): + """ + Match names between the two state-dict, and returns a new chkpt_state_dict with names + converted to match model_state_dict with heuristics. The returned dict can be later + loaded with fvcore checkpointer. + If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 + model and will be renamed at first. + + Strategy: suppose that the models that we will create will have prefixes appended + to each of its keys, for example due to an extra level of nesting that the original + pre-trained weights from ImageNet won't contain. For example, model.state_dict() + might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains + res2.conv1.weight. We thus want to match both parameters together. + For that, we look for each model weight, look among all loaded keys if there is one + that is a suffix of the current weight name, and use it if that's the case. + If multiple matches exist, take the one with longest size + of the corresponding name. For example, for the same model as before, the pretrained + weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, + we want to match backbone[0].body.conv1.weight to conv1.weight, and + backbone[0].body.res2.conv1.weight to res2.conv1.weight. + """ + model_keys = sorted(model_state_dict.keys()) + if c2_conversion: + ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) + # original_keys: the name in the original dict (before renaming) + else: + original_keys = {x: x for x in ckpt_state_dict.keys()} + ckpt_keys = sorted(ckpt_state_dict.keys()) + + def match(a, b): + # Matched ckpt_key should be a complete (starts with '.') suffix. + # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, + # but matches whatever_conv1 or mesh_head.whatever_conv1. + return a == b or a.endswith("." + b) + + # get a matrix of string matches, where each (i, j) entry correspond to the size of the + # ckpt_key string, if it matches + match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] + match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) + # use the matched one with longest size in case of multiple matches + max_match_size, idxs = match_matrix.max(1) + # remove indices that correspond to no-match + idxs[max_match_size == 0] = -1 + + logger = logging.getLogger(__name__) + # matched_pairs (matched checkpoint key --> matched model key) + matched_keys = {} + result_state_dict = {} + for idx_model, idx_ckpt in enumerate(idxs.tolist()): + if idx_ckpt == -1: + continue + key_model = model_keys[idx_model] + key_ckpt = ckpt_keys[idx_ckpt] + value_ckpt = ckpt_state_dict[key_ckpt] + shape_in_model = model_state_dict[key_model].shape + + if shape_in_model != value_ckpt.shape: + logger.warning( + "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( + key_ckpt, value_ckpt.shape, key_model, shape_in_model + ) + ) + logger.warning( + "{} will not be loaded. Please double check and see if this is desired.".format( + key_ckpt + ) + ) + continue + + assert key_model not in result_state_dict + result_state_dict[key_model] = value_ckpt + if key_ckpt in matched_keys: # already added to matched_keys + logger.error( + "Ambiguity found for {} in checkpoint!" + "It matches at least two keys in the model ({} and {}).".format( + key_ckpt, key_model, matched_keys[key_ckpt] + ) + ) + raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") + + matched_keys[key_ckpt] = key_model + + # logging: + matched_model_keys = sorted(matched_keys.values()) + if len(matched_model_keys) == 0: + logger.warning("No weights in checkpoint matched with model.") + return ckpt_state_dict + common_prefix = _longest_common_prefix(matched_model_keys) + rev_matched_keys = {v: k for k, v in matched_keys.items()} + original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} + + model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) + table = [] + memo = set() + for key_model in matched_model_keys: + if key_model in memo: + continue + if key_model in model_key_groups: + group = model_key_groups[key_model] + memo |= set(group) + shapes = [tuple(model_state_dict[k].shape) for k in group] + table.append( + ( + _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", + _group_str([original_keys[k] for k in group]), + " ".join([str(x).replace(" ", "") for x in shapes]), + ) + ) + else: + key_checkpoint = original_keys[key_model] + shape = str(tuple(model_state_dict[key_model].shape)) + table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) + table_str = tabulate( + table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] + ) + logger.info( + "Following weights matched with " + + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + + ":\n" + + table_str + ) + + unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] + for k in unmatched_ckpt_keys: + result_state_dict[k] = ckpt_state_dict[k] + return result_state_dict + + +def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): + """ + Params in the same submodule are grouped together. + + Args: + keys: names of all parameters + original_names: mapping from parameter name to their name in the checkpoint + + Returns: + dict[name -> all other names in the same group] + """ + + def _submodule_name(key): + pos = key.rfind(".") + if pos < 0: + return None + prefix = key[: pos + 1] + return prefix + + all_submodules = [_submodule_name(k) for k in keys] + all_submodules = [x for x in all_submodules if x] + all_submodules = sorted(all_submodules, key=len) + + ret = {} + for prefix in all_submodules: + group = [k for k in keys if k.startswith(prefix)] + if len(group) <= 1: + continue + original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) + if len(original_name_lcp) == 0: + # don't group weights if original names don't share prefix + continue + + for k in group: + if k in ret: + continue + ret[k] = group + return ret + + +def _longest_common_prefix(names: List[str]) -> str: + """ + ["abc.zfg", "abc.zef"] -> "abc." + """ + names = [n.split(".") for n in names] + m1, m2 = min(names), max(names) + ret = [a for a, b in zip(m1, m2) if a == b] + ret = ".".join(ret) + "." if len(ret) else "" + return ret + + +def _longest_common_prefix_str(names: List[str]) -> str: + m1, m2 = min(names), max(names) + lcp = [] + for a, b in zip(m1, m2): + if a == b: + lcp.append(a) + else: + break + lcp = "".join(lcp) + return lcp + + +def _group_str(names: List[str]) -> str: + """ + Turn "common1", "common2", "common3" into "common{1,2,3}" + """ + lcp = _longest_common_prefix_str(names) + rest = [x[len(lcp) :] for x in names] + rest = "{" + ",".join(rest) + "}" + ret = lcp + rest + + # add some simplification for BN specifically + ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") + ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") + return ret diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/catalog.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/catalog.py new file mode 100644 index 0000000000000000000000000000000000000000..2287e9b7fbf4d3ed7ec9bdb26d6d1d4d8ed91196 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/catalog.py @@ -0,0 +1,114 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging + +from custom_detectron2.utils.file_io import PathHandler, PathManager + +class ModelCatalog(object): + """ + Store mappings from names to third-party models. + """ + + S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron" + + # MSRA models have STRIDE_IN_1X1=True. False otherwise. + # NOTE: all BN models here have fused BN into an affine layer. + # As a result, you should only load them to a model with "FrozenBN". + # Loading them to a model with regular BN or SyncBN is wrong. + # Even when loaded to FrozenBN, it is still different from affine by an epsilon, + # which should be negligible for training. + # NOTE: all models here uses PIXEL_STD=[1,1,1] + # NOTE: Most of the BN models here are no longer used. We use the + # re-converted pre-trained models under detectron2 model zoo instead. + C2_IMAGENET_MODELS = { + "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", + "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", + "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", + "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", + "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", + "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl", + "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl", + } + + C2_DETECTRON_PATH_FORMAT = ( + "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950 + ) + + C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival" + C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival" + + # format: {model_name} -> part of the url + C2_DETECTRON_MODELS = { + "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950 + "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950 + "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950 + "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950 + "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950 + "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950 + "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950 + "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950 + "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950 + "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950 + "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950 + "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950 + "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950 + } + + @staticmethod + def get(name): + if name.startswith("Caffe2Detectron/COCO"): + return ModelCatalog._get_c2_detectron_baseline(name) + if name.startswith("ImageNetPretrained/"): + return ModelCatalog._get_c2_imagenet_pretrained(name) + raise RuntimeError("model not present in the catalog: {}".format(name)) + + @staticmethod + def _get_c2_imagenet_pretrained(name): + prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX + name = name[len("ImageNetPretrained/") :] + name = ModelCatalog.C2_IMAGENET_MODELS[name] + url = "/".join([prefix, name]) + return url + + @staticmethod + def _get_c2_detectron_baseline(name): + name = name[len("Caffe2Detectron/COCO/") :] + url = ModelCatalog.C2_DETECTRON_MODELS[name] + if "keypoint_rcnn" in name: + dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS + else: + dataset = ModelCatalog.C2_DATASET_COCO + + if "35998355/rpn_R-50-C4_1x" in name: + # this one model is somehow different from others .. + type = "rpn" + else: + type = "generalized_rcnn" + + # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`. + url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format( + prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset + ) + return url + + +class ModelCatalogHandler(PathHandler): + """ + Resolve URL like catalog://. + """ + + PREFIX = "catalog://" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path, **kwargs): + logger = logging.getLogger(__name__) + catalog_path = ModelCatalog.get(path[len(self.PREFIX) :]) + logger.info("Catalog entry {} points to {}".format(path, catalog_path)) + return PathManager.get_local_path(catalog_path, **kwargs) + + def _open(self, path, mode="r", **kwargs): + return PathManager.open(self._get_local_path(path), mode, **kwargs) + + +PathManager.register_handler(ModelCatalogHandler()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/detection_checkpoint.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/detection_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..2776783ac380d931ded4af69fc5e08bab5ab159a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/checkpoint/detection_checkpoint.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import os +import pickle +from urllib.parse import parse_qs, urlparse +import torch +from fvcore.common.checkpoint import Checkpointer +from torch.nn.parallel import DistributedDataParallel + +import custom_detectron2.utils.comm as comm +from custom_detectron2.utils.file_io import PathManager + +from .c2_model_loading import align_and_update_state_dicts + + +class DetectionCheckpointer(Checkpointer): + """ + Same as :class:`Checkpointer`, but is able to: + 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models. + 2. correctly load checkpoints that are only available on the master worker + """ + + def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): + is_main_process = comm.is_main_process() + super().__init__( + model, + save_dir, + save_to_disk=is_main_process if save_to_disk is None else save_to_disk, + **checkpointables, + ) + self.path_manager = PathManager + self._parsed_url_during_load = None + + def load(self, path, *args, **kwargs): + assert self._parsed_url_during_load is None + need_sync = False + logger = logging.getLogger(__name__) + logger.info("[DetectionCheckpointer] Loading from {} ...".format(path)) + + if path and isinstance(self.model, DistributedDataParallel): + path = self.path_manager.get_local_path(path) + has_file = os.path.isfile(path) + all_has_file = comm.all_gather(has_file) + if not all_has_file[0]: + raise OSError(f"File {path} not found on main worker.") + if not all(all_has_file): + logger.warning( + f"Not all workers can read checkpoint {path}. " + "Training may fail to fully resume." + ) + # TODO: broadcast the checkpoint file contents from main + # worker, and load from it instead. + need_sync = True + if not has_file: + path = None # don't load if not readable + + if path: + parsed_url = urlparse(path) + self._parsed_url_during_load = parsed_url + path = parsed_url._replace(query="").geturl() # remove query from filename + path = self.path_manager.get_local_path(path) + + self.logger.setLevel('CRITICAL') + ret = super().load(path, *args, **kwargs) + + if need_sync: + logger.info("Broadcasting model states from main worker ...") + self.model._sync_params_and_buffers() + self._parsed_url_during_load = None # reset to None + return ret + + def _load_file(self, filename): + if filename.endswith(".pkl"): + with PathManager.open(filename, "rb") as f: + data = pickle.load(f, encoding="latin1") + if "model" in data and "__author__" in data: + # file is in Detectron2 model zoo format + self.logger.info("Reading a file from '{}'".format(data["__author__"])) + return data + else: + # assume file is from Caffe2 / Detectron1 model zoo + if "blobs" in data: + # Detection models have "blobs", but ImageNet models don't + data = data["blobs"] + data = {k: v for k, v in data.items() if not k.endswith("_momentum")} + return {"model": data, "__author__": "Caffe2", "matching_heuristics": True} + elif filename.endswith(".pyth"): + # assume file is from pycls; no one else seems to use the ".pyth" extension + with PathManager.open(filename, "rb") as f: + data = torch.load(f) + assert ( + "model_state" in data + ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'." + model_state = { + k: v + for k, v in data["model_state"].items() + if not k.endswith("num_batches_tracked") + } + return {"model": model_state, "__author__": "pycls", "matching_heuristics": True} + + loaded = self._torch_load(filename) + if "model" not in loaded: + loaded = {"model": loaded} + assert self._parsed_url_during_load is not None, "`_load_file` must be called inside `load`" + parsed_url = self._parsed_url_during_load + queries = parse_qs(parsed_url.query) + if queries.pop("matching_heuristics", "False") == ["True"]: + loaded["matching_heuristics"] = True + if len(queries) > 0: + raise ValueError( + f"Unsupported query remaining: f{queries}, orginal filename: {parsed_url.geturl()}" + ) + return loaded + + def _torch_load(self, f): + return super()._load_file(f) + + def _load_model(self, checkpoint): + if checkpoint.get("matching_heuristics", False): + self._convert_ndarray_to_tensor(checkpoint["model"]) + # convert weights by name-matching heuristics + checkpoint["model"] = align_and_update_state_dicts( + self.model.state_dict(), + checkpoint["model"], + c2_conversion=checkpoint.get("__author__", None) == "Caffe2", + ) + # for non-caffe2 models, use standard ways to load it + incompatible = super()._load_model(checkpoint) + + model_buffers = dict(self.model.named_buffers(recurse=False)) + for k in ["pixel_mean", "pixel_std"]: + # Ignore missing key message about pixel_mean/std. + # Though they may be missing in old checkpoints, they will be correctly + # initialized from config anyway. + if k in model_buffers: + try: + incompatible.missing_keys.remove(k) + except ValueError: + pass + for k in incompatible.unexpected_keys[:]: + # Ignore unexpected keys about cell anchors. They exist in old checkpoints + # but now they are non-persistent buffers and will not be in new checkpoints. + if "anchor_generator.cell_anchors" in k: + incompatible.unexpected_keys.remove(k) + return incompatible diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b98b0872b423a665525d25ab8c203c217543e149 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .compat import downgrade_config, upgrade_config +from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable +from .instantiate import instantiate +from .lazy import LazyCall, LazyConfig + +__all__ = [ + "CfgNode", + "get_cfg", + "global_cfg", + "set_global_cfg", + "downgrade_config", + "upgrade_config", + "configurable", + "instantiate", + "LazyCall", + "LazyConfig", +] + + +from custom_detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/compat.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..11a08c439bf14defd880e37a938fab8a08e68eeb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/compat.py @@ -0,0 +1,229 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Backward compatibility of configs. + +Instructions to bump version: ++ It's not needed to bump version if new keys are added. + It's only needed when backward-incompatible changes happen + (i.e., some existing keys disappear, or the meaning of a key changes) ++ To bump version, do the following: + 1. Increment _C.VERSION in defaults.py + 2. Add a converter in this file. + + Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X, + and a function "downgrade" which in-place downgrades config from X to X-1 + + In each function, VERSION is left unchanged. + + Each converter assumes that its input has the relevant keys + (i.e., the input is not a partial config). + 3. Run the tests (test_config.py) to make sure the upgrade & downgrade + functions are consistent. +""" + +import logging +from typing import List, Optional, Tuple + +from .config import CfgNode as CN +from .defaults import _C + +__all__ = ["upgrade_config", "downgrade_config"] + + +def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: + """ + Upgrade a config from its current version to a newer version. + + Args: + cfg (CfgNode): + to_version (int): defaults to the latest version. + """ + cfg = cfg.clone() + if to_version is None: + to_version = _C.VERSION + + assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version): + converter = globals()["ConverterV" + str(k + 1)] + converter.upgrade(cfg) + cfg.VERSION = k + 1 + return cfg + + +def downgrade_config(cfg: CN, to_version: int) -> CN: + """ + Downgrade a config from its current version to an older version. + + Args: + cfg (CfgNode): + to_version (int): + + Note: + A general downgrade of arbitrary configs is not always possible due to the + different functionalities in different versions. + The purpose of downgrade is only to recover the defaults in old versions, + allowing it to load an old partial yaml config. + Therefore, the implementation only needs to fill in the default values + in the old version when a general downgrade is not possible. + """ + cfg = cfg.clone() + assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version, -1): + converter = globals()["ConverterV" + str(k)] + converter.downgrade(cfg) + cfg.VERSION = k - 1 + return cfg + + +def guess_version(cfg: CN, filename: str) -> int: + """ + Guess the version of a partial config where the VERSION field is not specified. + Returns the version, or the latest if cannot make a guess. + + This makes it easier for users to migrate. + """ + logger = logging.getLogger(__name__) + + def _has(name: str) -> bool: + cur = cfg + for n in name.split("."): + if n not in cur: + return False + cur = cur[n] + return True + + # Most users' partial configs have "MODEL.WEIGHT", so guess on it + ret = None + if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): + ret = 1 + + if ret is not None: + logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) + else: + ret = _C.VERSION + logger.warning( + "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( + filename, ret + ) + ) + return ret + + +def _rename(cfg: CN, old: str, new: str) -> None: + old_keys = old.split(".") + new_keys = new.split(".") + + def _set(key_seq: List[str], val: str) -> None: + cur = cfg + for k in key_seq[:-1]: + if k not in cur: + cur[k] = CN() + cur = cur[k] + cur[key_seq[-1]] = val + + def _get(key_seq: List[str]) -> CN: + cur = cfg + for k in key_seq: + cur = cur[k] + return cur + + def _del(key_seq: List[str]) -> None: + cur = cfg + for k in key_seq[:-1]: + cur = cur[k] + del cur[key_seq[-1]] + if len(cur) == 0 and len(key_seq) > 1: + _del(key_seq[:-1]) + + _set(new_keys, _get(old_keys)) + _del(old_keys) + + +class _RenameConverter: + """ + A converter that handles simple rename. + """ + + RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name) + + @classmethod + def upgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME: + _rename(cfg, old, new) + + @classmethod + def downgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME[::-1]: + _rename(cfg, new, old) + + +class ConverterV1(_RenameConverter): + RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")] + + +class ConverterV2(_RenameConverter): + """ + A large bulk of rename, before public release. + """ + + RENAME = [ + ("MODEL.WEIGHT", "MODEL.WEIGHTS"), + ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"), + ( + "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT", + "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH", + ), + ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"), + ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"), + ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"), + ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"), + ("TEST.AUG_ON", "TEST.AUG.ENABLED"), + ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"), + ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"), + ("TEST.AUG_FLIP", "TEST.AUG.FLIP"), + ] + + @classmethod + def upgrade(cls, cfg: CN) -> None: + super().upgrade(cfg) + + if cfg.MODEL.META_ARCHITECTURE == "RetinaNet": + _rename( + cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS" + ) + _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"] + else: + _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS") + _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"] + + @classmethod + def downgrade(cls, cfg: CN) -> None: + super().downgrade(cfg) + + _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS") + _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES") + cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS + cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES + cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/config.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e9ba1a5e4b7006bece7388efb59008dc902db0ba --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/config.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import functools +import inspect +import logging +from fvcore.common.config import CfgNode as _CfgNode + +from custom_detectron2.utils.file_io import PathManager + + +class CfgNode(_CfgNode): + """ + The same as `fvcore.common.config.CfgNode`, but different in: + + 1. Use unsafe yaml loading by default. + Note that this may lead to arbitrary code execution: you must not + load a config file from untrusted sources before manually inspecting + the content of the file. + 2. Support config versioning. + When attempting to merge an old config, it will convert the old config automatically. + + .. automethod:: clone + .. automethod:: freeze + .. automethod:: defrost + .. automethod:: is_frozen + .. automethod:: load_yaml_with_base + .. automethod:: merge_from_list + .. automethod:: merge_from_other_cfg + """ + + @classmethod + def _open_cfg(cls, filename): + return PathManager.open(filename, "r") + + # Note that the default value of allow_unsafe is changed to True + def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: + """ + Load content from the given config file and merge it into self. + + Args: + cfg_filename: config filename + allow_unsafe: allow unsafe yaml syntax + """ + assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" + loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) + loaded_cfg = type(self)(loaded_cfg) + + # defaults.py needs to import CfgNode + from .defaults import _C + + latest_ver = _C.VERSION + assert ( + latest_ver == self.VERSION + ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" + + logger = logging.getLogger(__name__) + + loaded_ver = loaded_cfg.get("VERSION", None) + if loaded_ver is None: + from .compat import guess_version + + loaded_ver = guess_version(loaded_cfg, cfg_filename) + assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( + loaded_ver, self.VERSION + ) + + if loaded_ver == self.VERSION: + self.merge_from_other_cfg(loaded_cfg) + else: + # compat.py needs to import CfgNode + from .compat import upgrade_config, downgrade_config + + logger.warning( + "Loading an old v{} config file '{}' by automatically upgrading to v{}. " + "See docs/CHANGELOG.md for instructions to update your files.".format( + loaded_ver, cfg_filename, self.VERSION + ) + ) + # To convert, first obtain a full config at an old version + old_self = downgrade_config(self, to_version=loaded_ver) + old_self.merge_from_other_cfg(loaded_cfg) + new_config = upgrade_config(old_self) + self.clear() + self.update(new_config) + + def dump(self, *args, **kwargs): + """ + Returns: + str: a yaml string representation of the config + """ + # to make it show up in docs + return super().dump(*args, **kwargs) + + +global_cfg = CfgNode() + + +def get_cfg() -> CfgNode: + """ + Get a copy of the default config. + + Returns: + a detectron2 CfgNode instance. + """ + from .defaults import _C + + return _C.clone() + + +def set_global_cfg(cfg: CfgNode) -> None: + """ + Let the global config point to the given cfg. + + Assume that the given "cfg" has the key "KEY", after calling + `set_global_cfg(cfg)`, the key can be accessed by: + :: + from custom_detectron2.config import global_cfg + print(global_cfg.KEY) + + By using a hacky global config, you can access these configs anywhere, + without having to pass the config object or the values deep into the code. + This is a hacky feature introduced for quick prototyping / research exploration. + """ + global global_cfg + global_cfg.clear() + global_cfg.update(cfg) + + +def configurable(init_func=None, *, from_config=None): + """ + Decorate a function or a class's __init__ method so that it can be called + with a :class:`CfgNode` object using a :func:`from_config` function that translates + :class:`CfgNode` to arguments. + + Examples: + :: + # Usage 1: Decorator on __init__: + class A: + @configurable + def __init__(self, a, b=2, c=3): + pass + + @classmethod + def from_config(cls, cfg): # 'cfg' must be the first argument + # Returns kwargs to be passed to __init__ + return {"a": cfg.A, "b": cfg.B} + + a1 = A(a=1, b=2) # regular construction + a2 = A(cfg) # construct with a cfg + a3 = A(cfg, b=3, c=4) # construct with extra overwrite + + # Usage 2: Decorator on any function. Needs an extra from_config argument: + @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) + def a_func(a, b=2, c=3): + pass + + a1 = a_func(a=1, b=2) # regular call + a2 = a_func(cfg) # call with a cfg + a3 = a_func(cfg, b=3, c=4) # call with extra overwrite + + Args: + init_func (callable): a class's ``__init__`` method in usage 1. The + class must have a ``from_config`` classmethod which takes `cfg` as + the first argument. + from_config (callable): the from_config function in usage 2. It must take `cfg` + as its first argument. + """ + + if init_func is not None: + assert ( + inspect.isfunction(init_func) + and from_config is None + and init_func.__name__ == "__init__" + ), "Incorrect use of @configurable. Check API documentation for examples." + + @functools.wraps(init_func) + def wrapped(self, *args, **kwargs): + try: + from_config_func = type(self).from_config + except AttributeError as e: + raise AttributeError( + "Class with @configurable must have a 'from_config' classmethod." + ) from e + if not inspect.ismethod(from_config_func): + raise TypeError("Class with @configurable must have a 'from_config' classmethod.") + + if _called_with_cfg(*args, **kwargs): + explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) + init_func(self, **explicit_args) + else: + init_func(self, *args, **kwargs) + + return wrapped + + else: + if from_config is None: + return configurable # @configurable() is made equivalent to @configurable + assert inspect.isfunction( + from_config + ), "from_config argument of configurable must be a function!" + + def wrapper(orig_func): + @functools.wraps(orig_func) + def wrapped(*args, **kwargs): + if _called_with_cfg(*args, **kwargs): + explicit_args = _get_args_from_config(from_config, *args, **kwargs) + return orig_func(**explicit_args) + else: + return orig_func(*args, **kwargs) + + wrapped.from_config = from_config + return wrapped + + return wrapper + + +def _get_args_from_config(from_config_func, *args, **kwargs): + """ + Use `from_config` to obtain explicit arguments. + + Returns: + dict: arguments to be used for cls.__init__ + """ + signature = inspect.signature(from_config_func) + if list(signature.parameters.keys())[0] != "cfg": + if inspect.isfunction(from_config_func): + name = from_config_func.__name__ + else: + name = f"{from_config_func.__self__}.from_config" + raise TypeError(f"{name} must take 'cfg' as the first argument!") + support_var_arg = any( + param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] + for param in signature.parameters.values() + ) + if support_var_arg: # forward all arguments to from_config, if from_config accepts them + ret = from_config_func(*args, **kwargs) + else: + # forward supported arguments to from_config + supported_arg_names = set(signature.parameters.keys()) + extra_kwargs = {} + for name in list(kwargs.keys()): + if name not in supported_arg_names: + extra_kwargs[name] = kwargs.pop(name) + ret = from_config_func(*args, **kwargs) + # forward the other arguments to __init__ + ret.update(extra_kwargs) + return ret + + +def _called_with_cfg(*args, **kwargs): + """ + Returns: + bool: whether the arguments contain CfgNode and should be considered + forwarded to from_config. + """ + from omegaconf import DictConfig + + if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): + return True + if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): + return True + # `from_config`'s first argument is forced to be "cfg". + # So the above check covers all cases. + return False diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/defaults.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..6ec2f00a69ac53d97dcf67623c7b262d911454b4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/defaults.py @@ -0,0 +1,650 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .config import CfgNode as CN + +# NOTE: given the new config system +# (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html), +# we will stop adding new functionalities to default CfgNode. + +# ----------------------------------------------------------------------------- +# Convention about Training / Test specific parameters +# ----------------------------------------------------------------------------- +# Whenever an argument can be either used for training or for testing, the +# corresponding name will be post-fixed by a _TRAIN for a training parameter, +# or _TEST for a test-specific parameter. +# For example, the number of images during training will be +# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be +# IMAGES_PER_BATCH_TEST + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- + +_C = CN() + +# The version number, to upgrade from old configs to new ones if any +# changes happen. It's recommended to keep a VERSION in your config file. +_C.VERSION = 2 + +_C.MODEL = CN() +_C.MODEL.LOAD_PROPOSALS = False +_C.MODEL.MASK_ON = False +_C.MODEL.KEYPOINT_ON = False +_C.MODEL.DEVICE = "cuda" +_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" + +# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file +# to be loaded to the model. You can find available models in the model zoo. +_C.MODEL.WEIGHTS = "" + +# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR). +# To train on images of different number of channels, just set different mean & std. +# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] +_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] +# When using pre-trained models in Detectron1 or any MSRA models, +# std has been absorbed into its conv1 weights, so the std needs to be set 1. +# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) +_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] + + +# ----------------------------------------------------------------------------- +# INPUT +# ----------------------------------------------------------------------------- +_C.INPUT = CN() +# By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge. +# Please refer to ResizeShortestEdge for detailed definition. +# Size of the smallest side of the image during training +_C.INPUT.MIN_SIZE_TRAIN = (800,) +# Sample size of smallest side by choice or random selection from range give by +# INPUT.MIN_SIZE_TRAIN +_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" +# Maximum size of the side of the image during training +_C.INPUT.MAX_SIZE_TRAIN = 1333 +# Size of the smallest side of the image during testing. Set to zero to disable resize in testing. +_C.INPUT.MIN_SIZE_TEST = 800 +# Maximum size of the side of the image during testing +_C.INPUT.MAX_SIZE_TEST = 1333 +# Mode for flipping images used in data augmentation during training +# choose one of ["horizontal, "vertical", "none"] +_C.INPUT.RANDOM_FLIP = "horizontal" + +# `True` if cropping is used for data augmentation during training +_C.INPUT.CROP = CN({"ENABLED": False}) +# Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation. +_C.INPUT.CROP.TYPE = "relative_range" +# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of +# pixels if CROP.TYPE is "absolute" +_C.INPUT.CROP.SIZE = [0.9, 0.9] + + +# Whether the model needs RGB, YUV, HSV etc. +# Should be one of the modes defined here, as we use PIL to read the image: +# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes +# with BGR being the one exception. One can set image format to BGR, we will +# internally use RGB for conversion and flip the channels over +_C.INPUT.FORMAT = "BGR" +# The ground truth mask format that the model will use. +# Mask R-CNN supports either "polygon" or "bitmask" as ground truth. +_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask" + + +# ----------------------------------------------------------------------------- +# Dataset +# ----------------------------------------------------------------------------- +_C.DATASETS = CN() +# List of the dataset names for training. Must be registered in DatasetCatalog +# Samples from these datasets will be merged and used as one dataset. +_C.DATASETS.TRAIN = () +# List of the pre-computed proposal files for training, which must be consistent +# with datasets listed in DATASETS.TRAIN. +_C.DATASETS.PROPOSAL_FILES_TRAIN = () +# Number of top scoring precomputed proposals to keep for training +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 +# List of the dataset names for testing. Must be registered in DatasetCatalog +_C.DATASETS.TEST = () +# List of the pre-computed proposal files for test, which must be consistent +# with datasets listed in DATASETS.TEST. +_C.DATASETS.PROPOSAL_FILES_TEST = () +# Number of top scoring precomputed proposals to keep for test +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 + +# ----------------------------------------------------------------------------- +# DataLoader +# ----------------------------------------------------------------------------- +_C.DATALOADER = CN() +# Number of data loading threads +_C.DATALOADER.NUM_WORKERS = 4 +# If True, each batch should contain only images for which the aspect ratio +# is compatible. This groups portrait images together, and landscape images +# are not batched with portrait images. +_C.DATALOADER.ASPECT_RATIO_GROUPING = True +# Options: TrainingSampler, RepeatFactorTrainingSampler +_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" +# Repeat threshold for RepeatFactorTrainingSampler +_C.DATALOADER.REPEAT_THRESHOLD = 0.0 +# Tf True, when working on datasets that have instance annotations, the +# training dataloader will filter out images without associated annotations +_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True + +# ---------------------------------------------------------------------------- # +# Backbone options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE = CN() + +_C.MODEL.BACKBONE.NAME = "build_resnet_backbone" +# Freeze the first several stages so they are not trained. +# There are 5 stages in ResNet. The first is a convolution, and the following +# stages are each group of residual blocks. +_C.MODEL.BACKBONE.FREEZE_AT = 2 + + +# ---------------------------------------------------------------------------- # +# FPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.FPN = CN() +# Names of the input feature maps to be used by FPN +# They must have contiguous power of 2 strides +# e.g., ["res2", "res3", "res4", "res5"] +_C.MODEL.FPN.IN_FEATURES = [] +_C.MODEL.FPN.OUT_CHANNELS = 256 + +# Options: "" (no norm), "GN" +_C.MODEL.FPN.NORM = "" + +# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg" +_C.MODEL.FPN.FUSE_TYPE = "sum" + + +# ---------------------------------------------------------------------------- # +# Proposal generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.PROPOSAL_GENERATOR = CN() +# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals" +_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" +# Proposal height and width both need to be greater than MIN_SIZE +# (a the scale used during training or inference) +_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 + + +# ---------------------------------------------------------------------------- # +# Anchor generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.ANCHOR_GENERATOR = CN() +# The generator can be any name in the ANCHOR_GENERATOR registry +_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" +# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input. +# Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for +# IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1. +# When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] +# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect +# ratios are generated by an anchor generator. +# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W) +# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true, +# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used +# for all IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] +# Anchor angles. +# list[list[float]], the angle in degrees, for each input feature map. +# ANGLES[i] specifies the list of angles for IN_FEATURES[i]. +_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] +# Relative offset between the center of the first anchor and the top-left corner of the image +# Value has to be in [0, 1). Recommend to use 0.5, which means half stride. +# The value is not expected to affect model accuracy. +_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 + +# ---------------------------------------------------------------------------- # +# RPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.RPN = CN() +_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY + +# Names of the input feature maps to be used by RPN +# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN +_C.MODEL.RPN.IN_FEATURES = ["res4"] +# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels +# Set to -1 or a large value, e.g. 100000, to disable pruning anchors +_C.MODEL.RPN.BOUNDARY_THRESH = -1 +# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD] +# Minimum overlap required between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD +# ==> positive RPN example: 1) +# Maximum overlap allowed between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD +# ==> negative RPN example: 0) +# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD) +# are ignored (-1) +_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] +_C.MODEL.RPN.IOU_LABELS = [0, -1, 1] +# Number of regions per image used to train RPN +_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 +# Target fraction of foreground (positive) examples per RPN minibatch +_C.MODEL.RPN.POSITIVE_FRACTION = 0.5 +# Options are: "smooth_l1", "giou", "diou", "ciou" +_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" +_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 +# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets +_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 +_C.MODEL.RPN.LOSS_WEIGHT = 1.0 +# Number of top scoring RPN proposals to keep before applying NMS +# When FPN is used, this is *per FPN level* (not total) +_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 +_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 +# Number of top scoring RPN proposals to keep after applying NMS +# When FPN is used, this limit is applied per level and then again to the union +# of proposals from all levels +# NOTE: When FPN is used, the meaning of this config is different from Detectron1. +# It means per-batch topk in Detectron1, but per-image topk here. +# See the "find_top_rpn_proposals" function for details. +_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 +_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 +# NMS threshold used on RPN proposals +_C.MODEL.RPN.NMS_THRESH = 0.7 +# Set this to -1 to use the same number of output channels as input channels. +_C.MODEL.RPN.CONV_DIMS = [-1] + +# ---------------------------------------------------------------------------- # +# ROI HEADS options +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_HEADS = CN() +_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" +# Number of foreground classes +_C.MODEL.ROI_HEADS.NUM_CLASSES = 80 +# Names of the input feature maps to be used by ROI heads +# Currently all heads (box, mask, ...) use the same input feature map list +# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN +_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] +# IOU overlap ratios [IOU_THRESHOLD] +# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD) +# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD) +_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] +_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] +# RoI minibatch size *per image* (number of regions of interest [ROIs]) during training +# Total number of RoIs per training minibatch = +# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH +# E.g., a common configuration is: 512 * 16 = 8192 +_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 +# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) +_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 + +# Only used on test mode + +# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to +# balance obtaining high recall with not having too many low precision +# detections that will slow down inference post processing steps (like NMS) +# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down +# inference. +_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 +# Overlap threshold used for non-maximum suppression (suppress boxes with +# IoU >= this threshold) +_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 +# If True, augment proposals with ground-truth boxes before sampling proposals to +# train ROI heads. +_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True + +# ---------------------------------------------------------------------------- # +# Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_HEAD = CN() +# C4 don't use head name option +# Options for non-C4 models: FastRCNNConvFCHead, +_C.MODEL.ROI_BOX_HEAD.NAME = "" +# Options are: "smooth_l1", "giou", "diou", "ciou" +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" +# The final scaling coefficient on the box regression loss, used to balance the magnitude of its +# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`. +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 +# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets +# These are empirically chosen to approximately lead to unit variance targets +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 +_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + +_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 +# Hidden layer dimension for FC layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 +_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 +# Channel dimension for Conv layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_BOX_HEAD.NORM = "" +# Whether to use class agnostic for bbox regression +_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False +# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes. +_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False + +# Federated loss can be used to improve the training of LVIS +_C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False +# Sigmoid cross entrophy is used with federated loss +_C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False +# The power value applied to image_count when calcualting frequency weight +_C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER = 0.5 +# Number of classes to keep in total +_C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES = 50 + +# ---------------------------------------------------------------------------- # +# Cascaded Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_CASCADE_HEAD = CN() +# The number of cascade stages is implicitly defined by the length of the following two configs. +_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( + (10.0, 10.0, 5.0, 5.0), + (20.0, 20.0, 10.0, 10.0), + (30.0, 30.0, 15.0, 15.0), +) +_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) + + +# ---------------------------------------------------------------------------- # +# Mask Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_MASK_HEAD = CN() +_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" +_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head +_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_MASK_HEAD.NORM = "" +# Whether to use class agnostic for mask prediction +_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" + + +# ---------------------------------------------------------------------------- # +# Keypoint Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_KEYPOINT_HEAD = CN() +_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) +_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO. + +# Images with too few (or no) keypoints are excluded from training. +_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 +# Normalize by the total number of visible keypoints in the minibatch if True. +# Otherwise, normalize by the total number of keypoints that could ever exist +# in the minibatch. +# The keypoint softmax loss is only calculated on visible keypoints. +# Since the number of visible keypoints can vary significantly between +# minibatches, this has the effect of up-weighting the importance of +# minibatches with few visible keypoints. (Imagine the extreme case of +# only one visible keypoint versus N: in the case of N, each one +# contributes 1/N to the gradient compared to the single keypoint +# determining the gradient direction). Instead, we can normalize the +# loss by the total number of keypoints, if it were the case that all +# keypoints were visible in a full minibatch. (Returning to the example, +# this means that the one visible keypoint contributes as much as each +# of the N keypoints.) +_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True +# Multi-task loss weight to use for keypoints +# Recommended values: +# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True +# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False +_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" + +# ---------------------------------------------------------------------------- # +# Semantic Segmentation Head +# ---------------------------------------------------------------------------- # +_C.MODEL.SEM_SEG_HEAD = CN() +_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" +_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] +# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for +# the correposnding pixel. +_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 +# Number of classes in the semantic segmentation head +_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 +# Number of channels in the 3x3 convs inside semantic-FPN heads. +_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 +# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride. +_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 +# Normalization method for the convolution layers. Options: "" (no norm), "GN". +_C.MODEL.SEM_SEG_HEAD.NORM = "GN" +_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 + +_C.MODEL.PANOPTIC_FPN = CN() +# Scaling of all losses from instance detection / segmentation head. +_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 + +# options when combining instance & semantic segmentation outputs +_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used +_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 +_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 +_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 + + +# ---------------------------------------------------------------------------- # +# RetinaNet Head +# ---------------------------------------------------------------------------- # +_C.MODEL.RETINANET = CN() + +# This is the number of foreground classes. +_C.MODEL.RETINANET.NUM_CLASSES = 80 + +_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] + +# Convolutions to use in the cls and bbox tower +# NOTE: this doesn't include the last conv for logits +_C.MODEL.RETINANET.NUM_CONVS = 4 + +# IoU overlap ratio [bg, fg] for labeling anchors. +# Anchors with < bg are labeled negative (0) +# Anchors with >= bg and < fg are ignored (-1) +# Anchors with >= fg are labeled positive (1) +_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] +_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] + +# Prior prob for rare case (i.e. foreground) at the beginning of training. +# This is used to set the bias for the logits layer of the classifier subnet. +# This improves training stability in the case of heavy class imbalance. +_C.MODEL.RETINANET.PRIOR_PROB = 0.01 + +# Inference cls score threshold, only anchors with score > INFERENCE_TH are +# considered for inference (to improve speed) +_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 +# Select topk candidates before NMS +_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 +_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 + +# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets +_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) + +# Loss parameters +_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 +_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 +_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 +# Options are: "smooth_l1", "giou", "diou", "ciou" +_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" + +# One of BN, SyncBN, FrozenBN, GN +# Only supports GN until unshared norm is implemented +_C.MODEL.RETINANET.NORM = "" + + +# ---------------------------------------------------------------------------- # +# ResNe[X]t options (ResNets = {ResNet, ResNeXt} +# Note that parts of a resnet may be used for both the backbone and the head +# These options apply to both +# ---------------------------------------------------------------------------- # +_C.MODEL.RESNETS = CN() + +_C.MODEL.RESNETS.DEPTH = 50 +_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone + +# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt +_C.MODEL.RESNETS.NUM_GROUPS = 1 + +# Options: FrozenBN, GN, "SyncBN", "BN" +_C.MODEL.RESNETS.NORM = "FrozenBN" + +# Baseline width of each group. +# Scaling this parameters will scale the width of all bottleneck layers. +_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 + +# Place the stride 2 conv on the 1x1 filter +# Use True only for the original MSRA ResNet; use False for C2 and Torch models +_C.MODEL.RESNETS.STRIDE_IN_1X1 = True + +# Apply dilation in stage "res5" +_C.MODEL.RESNETS.RES5_DILATION = 1 + +# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet +# For R18 and R34, this needs to be set to 64 +_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 +_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 + +# Apply Deformable Convolution in stages +# Specify if apply deform_conv on Res2, Res3, Res4, Res5 +_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] +# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); +# Use False for DeformableV1. +_C.MODEL.RESNETS.DEFORM_MODULATED = False +# Number of groups in deformable conv. +_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 + + +# ---------------------------------------------------------------------------- # +# Solver +# ---------------------------------------------------------------------------- # +_C.SOLVER = CN() + +# Options: WarmupMultiStepLR, WarmupCosineLR. +# See detectron2/solver/build.py for definition. +_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" + +_C.SOLVER.MAX_ITER = 40000 + +_C.SOLVER.BASE_LR = 0.001 +# The end lr, only used by WarmupCosineLR +_C.SOLVER.BASE_LR_END = 0.0 + +_C.SOLVER.MOMENTUM = 0.9 + +_C.SOLVER.NESTEROV = False + +_C.SOLVER.WEIGHT_DECAY = 0.0001 +# The weight decay that's applied to parameters of normalization layers +# (typically the affine transformation) +_C.SOLVER.WEIGHT_DECAY_NORM = 0.0 + +_C.SOLVER.GAMMA = 0.1 +# The iteration number to decrease learning rate by GAMMA. +_C.SOLVER.STEPS = (30000,) +# Number of decays in WarmupStepWithFixedGammaLR schedule +_C.SOLVER.NUM_DECAYS = 3 + +_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 +_C.SOLVER.WARMUP_ITERS = 1000 +_C.SOLVER.WARMUP_METHOD = "linear" +# Whether to rescale the interval for the learning schedule after warmup +_C.SOLVER.RESCALE_INTERVAL = False + +# Save a checkpoint after every this number of iterations +_C.SOLVER.CHECKPOINT_PERIOD = 5000 + +# Number of images per batch across all machines. This is also the number +# of training images per step (i.e. per iteration). If we use 16 GPUs +# and IMS_PER_BATCH = 32, each GPU will see 2 images per batch. +# May be adjusted automatically if REFERENCE_WORLD_SIZE is set. +_C.SOLVER.IMS_PER_BATCH = 16 + +# The reference number of workers (GPUs) this config is meant to train with. +# It takes no effect when set to 0. +# With a non-zero value, it will be used by DefaultTrainer to compute a desired +# per-worker batch size, and then scale the other related configs (total batch size, +# learning rate, etc) to match the per-worker batch size. +# See documentation of `DefaultTrainer.auto_scale_workers` for details: +_C.SOLVER.REFERENCE_WORLD_SIZE = 0 + +# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for +# biases. This is not useful (at least for recent models). You should avoid +# changing these and they exist only to reproduce Detectron v1 training if +# desired. +_C.SOLVER.BIAS_LR_FACTOR = 1.0 +_C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY + +# Gradient clipping +_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) +# Type of gradient clipping, currently 2 values are supported: +# - "value": the absolute values of elements of each gradients are clipped +# - "norm": the norm of the gradient for each parameter is clipped thus +# affecting all elements in the parameter +_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" +# Maximum absolute value used for clipping gradients +_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 +# Floating point number p for L-p norm to be used with the "norm" +# gradient clipping type; for L-inf, please specify .inf +_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 + +# Enable automatic mixed precision for training +# Note that this does not change model's inference behavior. +# To use AMP in inference, run inference under autocast() +_C.SOLVER.AMP = CN({"ENABLED": False}) + +# ---------------------------------------------------------------------------- # +# Specific test options +# ---------------------------------------------------------------------------- # +_C.TEST = CN() +# For end-to-end tests to verify the expected accuracy. +# Each item is [task, metric, value, tolerance] +# e.g.: [['bbox', 'AP', 38.5, 0.2]] +_C.TEST.EXPECTED_RESULTS = [] +# The period (in terms of steps) to evaluate the model during training. +# Set to 0 to disable. +_C.TEST.EVAL_PERIOD = 0 +# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval +# When empty, it will use the defaults in COCO. +# Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. +_C.TEST.KEYPOINT_OKS_SIGMAS = [] +# Maximum number of detections to return per image during inference (100 is +# based on the limit established for the COCO dataset). +_C.TEST.DETECTIONS_PER_IMAGE = 100 + +_C.TEST.AUG = CN({"ENABLED": False}) +_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) +_C.TEST.AUG.MAX_SIZE = 4000 +_C.TEST.AUG.FLIP = True + +_C.TEST.PRECISE_BN = CN({"ENABLED": False}) +_C.TEST.PRECISE_BN.NUM_ITER = 200 + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # +# Directory where output files are written +_C.OUTPUT_DIR = "./output" +# Set seed to negative to fully randomize everything. +# Set seed to positive to use a fixed seed. Note that a fixed seed increases +# reproducibility but does not guarantee fully deterministic behavior. +# Disabling all parallelism further increases reproducibility. +_C.SEED = -1 +# Benchmark different cudnn algorithms. +# If input images have very different sizes, this option will have large overhead +# for about 10k iterations. It usually hurts total time, but can benefit for certain models. +# If input images have the same or similar sizes, benchmark is often helpful. +_C.CUDNN_BENCHMARK = False +# The period (in terms of steps) for minibatch visualization at train time. +# Set to 0 to disable. +_C.VIS_PERIOD = 0 + +# global config is for quick hack purposes. +# You can set them in command line or config files, +# and access it with: +# +# from custom_detectron2.config import global_cfg +# print(global_cfg.HACK) +# +# Do not commit any configs into it. +_C.GLOBAL = CN() +_C.GLOBAL.HACK = 1.0 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/instantiate.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/instantiate.py new file mode 100644 index 0000000000000000000000000000000000000000..24528d71bf09c7ae768aa80df3cf413c8d02598b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/instantiate.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import collections.abc as abc +import dataclasses +import logging +from typing import Any + +from custom_detectron2.utils.registry import _convert_target_to_string, locate + +__all__ = ["dump_dataclass", "instantiate"] + + +def dump_dataclass(obj: Any): + """ + Dump a dataclass recursively into a dict that can be later instantiated. + + Args: + obj: a dataclass object + + Returns: + dict + """ + assert dataclasses.is_dataclass(obj) and not isinstance( + obj, type + ), "dump_dataclass() requires an instance of a dataclass." + ret = {"_target_": _convert_target_to_string(type(obj))} + for f in dataclasses.fields(obj): + v = getattr(obj, f.name) + if dataclasses.is_dataclass(v): + v = dump_dataclass(v) + if isinstance(v, (list, tuple)): + v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] + ret[f.name] = v + return ret + + +def instantiate(cfg): + """ + Recursively instantiate objects defined in dictionaries by + "_target_" and arguments. + + Args: + cfg: a dict-like object with "_target_" that defines the caller, and + other keys that define the arguments + + Returns: + object instantiated by cfg + """ + from omegaconf import ListConfig, DictConfig, OmegaConf + + if isinstance(cfg, ListConfig): + lst = [instantiate(x) for x in cfg] + return ListConfig(lst, flags={"allow_objects": True}) + if isinstance(cfg, list): + # Specialize for list, because many classes take + # list[objects] as arguments, such as ResNet, DatasetMapper + return [instantiate(x) for x in cfg] + + # If input is a DictConfig backed by dataclasses (i.e. omegaconf's structured config), + # instantiate it to the actual dataclass. + if isinstance(cfg, DictConfig) and dataclasses.is_dataclass(cfg._metadata.object_type): + return OmegaConf.to_object(cfg) + + if isinstance(cfg, abc.Mapping) and "_target_" in cfg: + # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, + # but faster: https://github.com/facebookresearch/hydra/issues/1200 + cfg = {k: instantiate(v) for k, v in cfg.items()} + cls = cfg.pop("_target_") + cls = instantiate(cls) + + if isinstance(cls, str): + cls_name = cls + cls = locate(cls_name) + assert cls is not None, cls_name + else: + try: + cls_name = cls.__module__ + "." + cls.__qualname__ + except Exception: + # target could be anything, so the above could fail + cls_name = str(cls) + assert callable(cls), f"_target_ {cls} does not define a callable object" + try: + return cls(**cfg) + except TypeError: + logger = logging.getLogger(__name__) + logger.error(f"Error when instantiating {cls_name}!") + raise + return cfg # return as-is if don't know what to do diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/lazy.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/lazy.py new file mode 100644 index 0000000000000000000000000000000000000000..6a8944f206ea5d6d1e0fa98153f8655819101efa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/config/lazy.py @@ -0,0 +1,435 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import ast +import builtins +import collections.abc as abc +import importlib +import inspect +import logging +import os +import uuid +from contextlib import contextmanager +from copy import deepcopy +from dataclasses import is_dataclass +from typing import List, Tuple, Union +import yaml +from omegaconf import DictConfig, ListConfig, OmegaConf, SCMode + +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.registry import _convert_target_to_string + +__all__ = ["LazyCall", "LazyConfig"] + + +class LazyCall: + """ + Wrap a callable so that when it's called, the call will not be executed, + but returns a dict that describes the call. + + LazyCall object has to be called with only keyword arguments. Positional + arguments are not yet supported. + + Examples: + :: + from custom_detectron2.config import instantiate, LazyCall + + layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32) + layer_cfg.out_channels = 64 # can edit it afterwards + layer = instantiate(layer_cfg) + """ + + def __init__(self, target): + if not (callable(target) or isinstance(target, (str, abc.Mapping))): + raise TypeError( + f"target of LazyCall must be a callable or defines a callable! Got {target}" + ) + self._target = target + + def __call__(self, **kwargs): + if is_dataclass(self._target): + # omegaconf object cannot hold dataclass type + # https://github.com/omry/omegaconf/issues/784 + target = _convert_target_to_string(self._target) + else: + target = self._target + kwargs["_target_"] = target + + return DictConfig(content=kwargs, flags={"allow_objects": True}) + + +def _visit_dict_config(cfg, func): + """ + Apply func recursively to all DictConfig in cfg. + """ + if isinstance(cfg, DictConfig): + func(cfg) + for v in cfg.values(): + _visit_dict_config(v, func) + elif isinstance(cfg, ListConfig): + for v in cfg: + _visit_dict_config(v, func) + + +def _validate_py_syntax(filename): + # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py + with PathManager.open(filename, "r") as f: + content = f.read() + try: + ast.parse(content) + except SyntaxError as e: + raise SyntaxError(f"Config file {filename} has syntax error!") from e + + +def _cast_to_config(obj): + # if given a dict, return DictConfig instead + if isinstance(obj, dict): + return DictConfig(obj, flags={"allow_objects": True}) + return obj + + +_CFG_PACKAGE_NAME = "detectron2._cfg_loader" +""" +A namespace to put all imported config into. +""" + + +def _random_package_name(filename): + # generate a random package name when loading config files + return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename) + + +@contextmanager +def _patch_import(): + """ + Enhance relative import statements in config files, so that they: + 1. locate files purely based on relative location, regardless of packages. + e.g. you can import file without having __init__ + 2. do not cache modules globally; modifications of module states has no side effect + 3. support other storage system through PathManager, so config files can be in the cloud + 4. imported dict are turned into omegaconf.DictConfig automatically + """ + old_import = builtins.__import__ + + def find_relative_file(original_file, relative_import_path, level): + # NOTE: "from . import x" is not handled. Because then it's unclear + # if such import should produce `x` as a python module or DictConfig. + # This can be discussed further if needed. + relative_import_err = """ +Relative import of directories is not allowed within config files. +Within a config file, relative import can only import other config files. +""".replace( + "\n", " " + ) + if not len(relative_import_path): + raise ImportError(relative_import_err) + + cur_file = os.path.dirname(original_file) + for _ in range(level - 1): + cur_file = os.path.dirname(cur_file) + cur_name = relative_import_path.lstrip(".") + for part in cur_name.split("."): + cur_file = os.path.join(cur_file, part) + if not cur_file.endswith(".py"): + cur_file += ".py" + if not PathManager.isfile(cur_file): + cur_file_no_suffix = cur_file[: -len(".py")] + if PathManager.isdir(cur_file_no_suffix): + raise ImportError(f"Cannot import from {cur_file_no_suffix}." + relative_import_err) + else: + raise ImportError( + f"Cannot import name {relative_import_path} from " + f"{original_file}: {cur_file} does not exist." + ) + return cur_file + + def new_import(name, globals=None, locals=None, fromlist=(), level=0): + if ( + # Only deal with relative imports inside config files + level != 0 + and globals is not None + and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME) + ): + cur_file = find_relative_file(globals["__file__"], name, level) + _validate_py_syntax(cur_file) + spec = importlib.machinery.ModuleSpec( + _random_package_name(cur_file), None, origin=cur_file + ) + module = importlib.util.module_from_spec(spec) + module.__file__ = cur_file + with PathManager.open(cur_file) as f: + content = f.read() + exec(compile(content, cur_file, "exec"), module.__dict__) + for name in fromlist: # turn imported dict into DictConfig automatically + val = _cast_to_config(module.__dict__[name]) + module.__dict__[name] = val + return module + return old_import(name, globals, locals, fromlist=fromlist, level=level) + + builtins.__import__ = new_import + yield new_import + builtins.__import__ = old_import + + +class LazyConfig: + """ + Provide methods to save, load, and overrides an omegaconf config object + which may contain definition of lazily-constructed objects. + """ + + @staticmethod + def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): + """ + Similar to :meth:`load()`, but load path relative to the caller's + source file. + + This has the same functionality as a relative import, except that this method + accepts filename as a string, so more characters are allowed in the filename. + """ + caller_frame = inspect.stack()[1] + caller_fname = caller_frame[0].f_code.co_filename + assert caller_fname != "", "load_rel Unable to find caller" + caller_dir = os.path.dirname(caller_fname) + filename = os.path.join(caller_dir, filename) + return LazyConfig.load(filename, keys) + + @staticmethod + def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None): + """ + Load a config file. + + Args: + filename: absolute path or relative path w.r.t. the current working directory + keys: keys to load and return. If not given, return all keys + (whose values are config objects) in a dict. + """ + has_keys = keys is not None + filename = filename.replace("/./", "/") # redundant + if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]: + raise ValueError(f"Config file {filename} has to be a python or yaml file.") + if filename.endswith(".py"): + _validate_py_syntax(filename) + + with _patch_import(): + # Record the filename + module_namespace = { + "__file__": filename, + "__package__": _random_package_name(filename), + } + with PathManager.open(filename) as f: + content = f.read() + # Compile first with filename to: + # 1. make filename appears in stacktrace + # 2. make load_rel able to find its parent's (possibly remote) location + exec(compile(content, filename, "exec"), module_namespace) + + ret = module_namespace + else: + with PathManager.open(filename) as f: + obj = yaml.unsafe_load(f) + ret = OmegaConf.create(obj, flags={"allow_objects": True}) + + if has_keys: + if isinstance(keys, str): + return _cast_to_config(ret[keys]) + else: + return tuple(_cast_to_config(ret[a]) for a in keys) + else: + if filename.endswith(".py"): + # when not specified, only load those that are config objects + ret = DictConfig( + { + name: _cast_to_config(value) + for name, value in ret.items() + if isinstance(value, (DictConfig, ListConfig, dict)) + and not name.startswith("_") + }, + flags={"allow_objects": True}, + ) + return ret + + @staticmethod + def save(cfg, filename: str): + """ + Save a config object to a yaml file. + Note that when the config dictionary contains complex objects (e.g. lambda), + it can't be saved to yaml. In that case we will print an error and + attempt to save to a pkl file instead. + + Args: + cfg: an omegaconf config object + filename: yaml file name to save the config file + """ + logger = logging.getLogger(__name__) + try: + cfg = deepcopy(cfg) + except Exception: + pass + else: + # if it's deep-copyable, then... + def _replace_type_by_name(x): + if "_target_" in x and callable(x._target_): + try: + x._target_ = _convert_target_to_string(x._target_) + except AttributeError: + pass + + # not necessary, but makes yaml looks nicer + _visit_dict_config(cfg, _replace_type_by_name) + + save_pkl = False + try: + dict = OmegaConf.to_container( + cfg, + # Do not resolve interpolation when saving, i.e. do not turn ${a} into + # actual values when saving. + resolve=False, + # Save structures (dataclasses) in a format that can be instantiated later. + # Without this option, the type information of the dataclass will be erased. + structured_config_mode=SCMode.INSTANTIATE, + ) + dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999) + with PathManager.open(filename, "w") as f: + f.write(dumped) + + try: + _ = yaml.unsafe_load(dumped) # test that it is loadable + except Exception: + logger.warning( + "The config contains objects that cannot serialize to a valid yaml. " + f"{filename} is human-readable but cannot be loaded." + ) + save_pkl = True + except Exception: + logger.exception("Unable to serialize the config to yaml. Error:") + save_pkl = True + + if save_pkl: + new_filename = filename + ".pkl" + # try: + # # retry by pickle + # with PathManager.open(new_filename, "wb") as f: + # cloudpickle.dump(cfg, f) + # logger.warning(f"Config is saved using cloudpickle at {new_filename}.") + # except Exception: + # pass + + @staticmethod + def apply_overrides(cfg, overrides: List[str]): + """ + In-place override contents of cfg. + + Args: + cfg: an omegaconf config object + overrides: list of strings in the format of "a=b" to override configs. + See https://hydra.cc/docs/next/advanced/override_grammar/basic/ + for syntax. + + Returns: + the cfg object + """ + + def safe_update(cfg, key, value): + parts = key.split(".") + for idx in range(1, len(parts)): + prefix = ".".join(parts[:idx]) + v = OmegaConf.select(cfg, prefix, default=None) + if v is None: + break + if not OmegaConf.is_config(v): + raise KeyError( + f"Trying to update key {key}, but {prefix} " + f"is not a config, but has type {type(v)}." + ) + OmegaConf.update(cfg, key, value, merge=True) + + try: + from hydra.core.override_parser.overrides_parser import OverridesParser + + has_hydra = True + except ImportError: + has_hydra = False + + if has_hydra: + parser = OverridesParser.create() + overrides = parser.parse_overrides(overrides) + for o in overrides: + key = o.key_or_group + value = o.value() + if o.is_delete(): + # TODO support this + raise NotImplementedError("deletion is not yet a supported override") + safe_update(cfg, key, value) + else: + # Fallback. Does not support all the features and error checking like hydra. + for o in overrides: + key, value = o.split("=") + try: + value = eval(value, {}) + except NameError: + pass + safe_update(cfg, key, value) + return cfg + + # @staticmethod + # def to_py(cfg, prefix: str = "cfg."): + # """ + # Try to convert a config object into Python-like psuedo code. + # + # Note that perfect conversion is not always possible. So the returned + # results are mainly meant to be human-readable, and not meant to be executed. + # + # Args: + # cfg: an omegaconf config object + # prefix: root name for the resulting code (default: "cfg.") + # + # + # Returns: + # str of formatted Python code + # """ + # import black + # + # cfg = OmegaConf.to_container(cfg, resolve=True) + # + # def _to_str(obj, prefix=None, inside_call=False): + # if prefix is None: + # prefix = [] + # if isinstance(obj, abc.Mapping) and "_target_" in obj: + # # Dict representing a function call + # target = _convert_target_to_string(obj.pop("_target_")) + # args = [] + # for k, v in sorted(obj.items()): + # args.append(f"{k}={_to_str(v, inside_call=True)}") + # args = ", ".join(args) + # call = f"{target}({args})" + # return "".join(prefix) + call + # elif isinstance(obj, abc.Mapping) and not inside_call: + # # Dict that is not inside a call is a list of top-level config objects that we + # # render as one object per line with dot separated prefixes + # key_list = [] + # for k, v in sorted(obj.items()): + # if isinstance(v, abc.Mapping) and "_target_" not in v: + # key_list.append(_to_str(v, prefix=prefix + [k + "."])) + # else: + # key = "".join(prefix) + k + # key_list.append(f"{key}={_to_str(v)}") + # return "\n".join(key_list) + # elif isinstance(obj, abc.Mapping): + # # Dict that is inside a call is rendered as a regular dict + # return ( + # "{" + # + ",".join( + # f"{repr(k)}: {_to_str(v, inside_call=inside_call)}" + # for k, v in sorted(obj.items()) + # ) + # + "}" + # ) + # elif isinstance(obj, list): + # return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]" + # else: + # return repr(obj) + # + # py_str = _to_str(cfg, prefix=[prefix]) + # try: + # return black.format_str(py_str, mode=black.Mode()) + # except black.InvalidInput: + # return py_str diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..259f669b78bd05815cb8d3351fd6c5fc9a1b85a1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from . import transforms # isort:skip + +from .build import ( + build_batch_data_loader, + build_detection_test_loader, + build_detection_train_loader, + get_detection_dataset_dicts, + load_proposals_into_dataset, + print_instances_class_histogram, +) +from .catalog import DatasetCatalog, MetadataCatalog, Metadata +from .common import DatasetFromList, MapDataset, ToIterableDataset +from .dataset_mapper import DatasetMapper + +# ensure the builtin datasets are registered +from . import datasets, samplers # isort:skip + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/benchmark.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..eda0bb5c751ee961a1cef8190d6ed966f06f190a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/benchmark.py @@ -0,0 +1,225 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from itertools import count +from typing import List, Tuple +import torch +import tqdm +from fvcore.common.timer import Timer + +from custom_detectron2.utils import comm + +from .build import build_batch_data_loader +from .common import DatasetFromList, MapDataset +from .samplers import TrainingSampler + +logger = logging.getLogger(__name__) + + +class _EmptyMapDataset(torch.utils.data.Dataset): + """ + Map anything to emptiness. + """ + + def __init__(self, dataset): + self.ds = dataset + + def __len__(self): + return len(self.ds) + + def __getitem__(self, idx): + _ = self.ds[idx] + return [0] + + +def iter_benchmark( + iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60 +) -> Tuple[float, List[float]]: + """ + Benchmark an iterator/iterable for `num_iter` iterations with an extra + `warmup` iterations of warmup. + End early if `max_time_seconds` time is spent on iterations. + + Returns: + float: average time (seconds) per iteration + list[float]: time spent on each iteration. Sometimes useful for further analysis. + """ + num_iter, warmup = int(num_iter), int(warmup) + + iterator = iter(iterator) + for _ in range(warmup): + next(iterator) + timer = Timer() + all_times = [] + for curr_iter in tqdm.trange(num_iter): + start = timer.seconds() + if start > max_time_seconds: + num_iter = curr_iter + break + next(iterator) + all_times.append(timer.seconds() - start) + avg = timer.seconds() / num_iter + return avg, all_times + + +class DataLoaderBenchmark: + """ + Some common benchmarks that help understand perf bottleneck of a standard dataloader + made of dataset, mapper and sampler. + """ + + def __init__( + self, + dataset, + *, + mapper, + sampler=None, + total_batch_size, + num_workers=0, + max_time_seconds: int = 90, + ): + """ + Args: + max_time_seconds (int): maximum time to spent for each benchmark + other args: same as in `build.py:build_detection_train_loader` + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False, serialize=True) + if sampler is None: + sampler = TrainingSampler(len(dataset)) + + self.dataset = dataset + self.mapper = mapper + self.sampler = sampler + self.total_batch_size = total_batch_size + self.num_workers = num_workers + self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size() + + self.max_time_seconds = max_time_seconds + + def _benchmark(self, iterator, num_iter, warmup, msg=None): + avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds) + if msg is not None: + self._log_time(msg, avg, all_times) + return avg, all_times + + def _log_time(self, msg, avg, all_times, distributed=False): + percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]] + if not distributed: + logger.info( + f"{msg}: avg={1.0/avg:.1f} it/s, " + f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, " + f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s." + ) + return + avg_per_gpu = comm.all_gather(avg) + percentiles_per_gpu = comm.all_gather(percentiles) + if comm.get_rank() > 0: + return + for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu): + logger.info( + f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, " + f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, " + f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s." + ) + + def benchmark_dataset(self, num_iter, warmup=5): + """ + Benchmark the speed of taking raw samples from the dataset. + """ + + def loader(): + while True: + for k in self.sampler: + yield self.dataset[k] + + self._benchmark(loader(), num_iter, warmup, "Dataset Alone") + + def benchmark_mapper(self, num_iter, warmup=5): + """ + Benchmark the speed of taking raw samples from the dataset and map + them in a single process. + """ + + def loader(): + while True: + for k in self.sampler: + yield self.mapper(self.dataset[k]) + + self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)") + + def benchmark_workers(self, num_iter, warmup=10): + """ + Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers]. + """ + candidates = [0, 1] + if self.num_workers not in candidates: + candidates.append(self.num_workers) + + dataset = MapDataset(self.dataset, self.mapper) + for n in candidates: + loader = build_batch_data_loader( + dataset, + self.sampler, + self.total_batch_size, + num_workers=n, + ) + self._benchmark( + iter(loader), + num_iter * max(n, 1), + warmup * max(n, 1), + f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})", + ) + del loader + + def benchmark_IPC(self, num_iter, warmup=10): + """ + Benchmark the dataloader where each worker outputs nothing. This + eliminates the IPC overhead compared to the regular dataloader. + + PyTorch multiprocessing's IPC only optimizes for torch tensors. + Large numpy arrays or other data structure may incur large IPC overhead. + """ + n = self.num_workers + dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper)) + loader = build_batch_data_loader( + dataset, self.sampler, self.total_batch_size, num_workers=n + ) + self._benchmark( + iter(loader), + num_iter * max(n, 1), + warmup * max(n, 1), + f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm", + ) + + def benchmark_distributed(self, num_iter, warmup=10): + """ + Benchmark the dataloader in each distributed worker, and log results of + all workers. This helps understand the final performance as well as + the variances among workers. + + It also prints startup time (first iter) of the dataloader. + """ + gpu = comm.get_world_size() + dataset = MapDataset(self.dataset, self.mapper) + n = self.num_workers + loader = build_batch_data_loader( + dataset, self.sampler, self.total_batch_size, num_workers=n + ) + + timer = Timer() + loader = iter(loader) + next(loader) + startup_time = timer.seconds() + logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time)) + + comm.synchronize() + + avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1)) + del loader + self._log_time( + f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})", + avg, + all_times, + True, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/build.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/build.py new file mode 100644 index 0000000000000000000000000000000000000000..aaa44834b76e9347f0ad5c452f0d8e2f532d1795 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/build.py @@ -0,0 +1,556 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +import numpy as np +import operator +import pickle +from typing import Any, Callable, Dict, List, Optional, Union +import torch +import torch.utils.data as torchdata +from tabulate import tabulate +from termcolor import colored + +from custom_detectron2.config import configurable +from custom_detectron2.structures import BoxMode +from custom_detectron2.utils.comm import get_world_size +from custom_detectron2.utils.env import seed_all_rng +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import _log_api_usage, log_first_n + +from .catalog import DatasetCatalog, MetadataCatalog +from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset +from .dataset_mapper import DatasetMapper +from .detection_utils import check_metadata_consistency +from .samplers import ( + InferenceSampler, + RandomSubsetTrainingSampler, + RepeatFactorTrainingSampler, + TrainingSampler, +) + +""" +This file contains the default logic to build a dataloader for training or testing. +""" + +__all__ = [ + "build_batch_data_loader", + "build_detection_train_loader", + "build_detection_test_loader", + "get_detection_dataset_dicts", + "load_proposals_into_dataset", + "print_instances_class_histogram", +] + + +def filter_images_with_only_crowd_annotations(dataset_dicts): + """ + Filter out images with none annotations or only crowd annotations + (i.e., images without non-crowd annotations). + A common training-time preprocessing on COCO dataset. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + + Returns: + list[dict]: the same format, but filtered. + """ + num_before = len(dataset_dicts) + + def valid(anns): + for ann in anns: + if ann.get("iscrowd", 0) == 0: + return True + return False + + dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] + num_after = len(dataset_dicts) + logger = logging.getLogger(__name__) + logger.info( + "Removed {} images with no usable annotations. {} images left.".format( + num_before - num_after, num_after + ) + ) + return dataset_dicts + + +def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): + """ + Filter out images with too few number of keypoints. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + + Returns: + list[dict]: the same format as dataset_dicts, but filtered. + """ + num_before = len(dataset_dicts) + + def visible_keypoints_in_image(dic): + # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility + annotations = dic["annotations"] + return sum( + (np.array(ann["keypoints"][2::3]) > 0).sum() + for ann in annotations + if "keypoints" in ann + ) + + dataset_dicts = [ + x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image + ] + num_after = len(dataset_dicts) + logger = logging.getLogger(__name__) + logger.info( + "Removed {} images with fewer than {} keypoints.".format( + num_before - num_after, min_keypoints_per_image + ) + ) + return dataset_dicts + + +def load_proposals_into_dataset(dataset_dicts, proposal_file): + """ + Load precomputed object proposals into the dataset. + + The proposal file should be a pickled dict with the following keys: + + - "ids": list[int] or list[str], the image ids + - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id + - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores + corresponding to the boxes. + - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + proposal_file (str): file path of pre-computed proposals, in pkl format. + + Returns: + list[dict]: the same format as dataset_dicts, but added proposal field. + """ + logger = logging.getLogger(__name__) + logger.info("Loading proposals from: {}".format(proposal_file)) + + with PathManager.open(proposal_file, "rb") as f: + proposals = pickle.load(f, encoding="latin1") + + # Rename the key names in D1 proposal files + rename_keys = {"indexes": "ids", "scores": "objectness_logits"} + for key in rename_keys: + if key in proposals: + proposals[rename_keys[key]] = proposals.pop(key) + + # Fetch the indexes of all proposals that are in the dataset + # Convert image_id to str since they could be int. + img_ids = set({str(record["image_id"]) for record in dataset_dicts}) + id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} + + # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' + bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS + + for record in dataset_dicts: + # Get the index of the proposal + i = id_to_index[str(record["image_id"])] + + boxes = proposals["boxes"][i] + objectness_logits = proposals["objectness_logits"][i] + # Sort the proposals in descending order of the scores + inds = objectness_logits.argsort()[::-1] + record["proposal_boxes"] = boxes[inds] + record["proposal_objectness_logits"] = objectness_logits[inds] + record["proposal_bbox_mode"] = bbox_mode + + return dataset_dicts + + +def print_instances_class_histogram(dataset_dicts, class_names): + """ + Args: + dataset_dicts (list[dict]): list of dataset dicts. + class_names (list[str]): list of class names (zero-indexed). + """ + num_classes = len(class_names) + hist_bins = np.arange(num_classes + 1) + histogram = np.zeros((num_classes,), dtype=np.int) + for entry in dataset_dicts: + annos = entry["annotations"] + classes = np.asarray( + [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int + ) + if len(classes): + assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" + assert ( + classes.max() < num_classes + ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" + histogram += np.histogram(classes, bins=hist_bins)[0] + + N_COLS = min(6, len(class_names) * 2) + + def short_name(x): + # make long class names shorter. useful for lvis + if len(x) > 13: + return x[:11] + ".." + return x + + data = list( + itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) + ) + total_num_instances = sum(data[1::2]) + data.extend([None] * (N_COLS - (len(data) % N_COLS))) + if num_classes > 1: + data.extend(["total", total_num_instances]) + data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + data, + headers=["category", "#instances"] * (N_COLS // 2), + tablefmt="pipe", + numalign="left", + stralign="center", + ) + log_first_n( + logging.INFO, + "Distribution of instances among all {} categories:\n".format(num_classes) + + colored(table, "cyan"), + key="message", + ) + + +def get_detection_dataset_dicts( + names, + filter_empty=True, + min_keypoints=0, + proposal_files=None, + check_consistency=True, +): + """ + Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. + + Args: + names (str or list[str]): a dataset name or a list of dataset names + filter_empty (bool): whether to filter out images without instance annotations + min_keypoints (int): filter out images with fewer keypoints than + `min_keypoints`. Set to 0 to do nothing. + proposal_files (list[str]): if given, a list of object proposal files + that match each dataset in `names`. + check_consistency (bool): whether to check if datasets have consistent metadata. + + Returns: + list[dict]: a list of dicts following the standard dataset dict format. + """ + if isinstance(names, str): + names = [names] + assert len(names), names + dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] + + if isinstance(dataset_dicts[0], torchdata.Dataset): + if len(dataset_dicts) > 1: + # ConcatDataset does not work for iterable style dataset. + # We could support concat for iterable as well, but it's often + # not a good idea to concat iterables anyway. + return torchdata.ConcatDataset(dataset_dicts) + return dataset_dicts[0] + + for dataset_name, dicts in zip(names, dataset_dicts): + assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) + + if proposal_files is not None: + assert len(names) == len(proposal_files) + # load precomputed proposals from proposal files + dataset_dicts = [ + load_proposals_into_dataset(dataset_i_dicts, proposal_file) + for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) + ] + + dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) + + has_instances = "annotations" in dataset_dicts[0] + if filter_empty and has_instances: + dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) + if min_keypoints > 0 and has_instances: + dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) + + if check_consistency and has_instances: + try: + class_names = MetadataCatalog.get(names[0]).thing_classes + check_metadata_consistency("thing_classes", names) + print_instances_class_histogram(dataset_dicts, class_names) + except AttributeError: # class names are not available for this dataset + pass + + assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) + return dataset_dicts + + +def build_batch_data_loader( + dataset, + sampler, + total_batch_size, + *, + aspect_ratio_grouping=False, + num_workers=0, + collate_fn=None, +): + """ + Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: + 1. support aspect ratio grouping options + 2. use no "batch collation", because this is common for detection training + + Args: + dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. + sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. + Must be provided iff. ``dataset`` is a map-style dataset. + total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see + :func:`build_detection_train_loader`. + + Returns: + iterable[list]. Length of each list is the batch size of the current + GPU. Each element in the list comes from the dataset. + """ + world_size = get_world_size() + assert ( + total_batch_size > 0 and total_batch_size % world_size == 0 + ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( + total_batch_size, world_size + ) + batch_size = total_batch_size // world_size + + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + dataset = ToIterableDataset(dataset, sampler) + + if aspect_ratio_grouping: + data_loader = torchdata.DataLoader( + dataset, + num_workers=num_workers, + collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements + worker_init_fn=worker_init_reset_seed, + ) # yield individual mapped dict + data_loader = AspectRatioGroupedDataset(data_loader, batch_size) + if collate_fn is None: + return data_loader + return MapDataset(data_loader, collate_fn) + else: + return torchdata.DataLoader( + dataset, + batch_size=batch_size, + drop_last=True, + num_workers=num_workers, + collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, + worker_init_fn=worker_init_reset_seed, + ) + + +def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): + if dataset is None: + dataset = get_detection_dataset_dicts( + cfg.DATASETS.TRAIN, + filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, + min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE + if cfg.MODEL.KEYPOINT_ON + else 0, + proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, + ) + _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) + + if mapper is None: + mapper = DatasetMapper(cfg, True) + + if sampler is None: + sampler_name = cfg.DATALOADER.SAMPLER_TRAIN + logger = logging.getLogger(__name__) + if isinstance(dataset, torchdata.IterableDataset): + logger.info("Not using any sampler since the dataset is IterableDataset.") + sampler = None + else: + logger.info("Using training sampler {}".format(sampler_name)) + if sampler_name == "TrainingSampler": + sampler = TrainingSampler(len(dataset)) + elif sampler_name == "RepeatFactorTrainingSampler": + repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( + dataset, cfg.DATALOADER.REPEAT_THRESHOLD + ) + sampler = RepeatFactorTrainingSampler(repeat_factors) + elif sampler_name == "RandomSubsetTrainingSampler": + sampler = RandomSubsetTrainingSampler( + len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO + ) + else: + raise ValueError("Unknown training sampler: {}".format(sampler_name)) + + return { + "dataset": dataset, + "sampler": sampler, + "mapper": mapper, + "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, + "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, + "num_workers": cfg.DATALOADER.NUM_WORKERS, + } + + +@configurable(from_config=_train_loader_from_config) +def build_detection_train_loader( + dataset, + *, + mapper, + sampler=None, + total_batch_size, + aspect_ratio_grouping=True, + num_workers=0, + collate_fn=None, +): + """ + Build a dataloader for object detection with some default features. + + Args: + dataset (list or torch.utils.data.Dataset): a list of dataset dicts, + or a pytorch dataset (either map-style or iterable). It can be obtained + by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. + mapper (callable): a callable which takes a sample (dict) from dataset and + returns the format to be consumed by the model. + When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. + sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces + indices to be applied on ``dataset``. + If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, + which coordinates an infinite random shuffle sequence across all workers. + Sampler must be None if ``dataset`` is iterable. + total_batch_size (int): total batch size across all workers. + aspect_ratio_grouping (bool): whether to group images with similar + aspect ratio for efficiency. When enabled, it requires each + element in dataset be a dict with keys "width" and "height". + num_workers (int): number of parallel data loading workers + collate_fn: a function that determines how to do batching, same as the argument of + `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of + data. No collation is OK for small batch size and simple data structures. + If your batch size is large and each sample contains too many small tensors, + it's more efficient to collate them in data loader. + + Returns: + torch.utils.data.DataLoader: + a dataloader. Each output from it is a ``list[mapped_element]`` of length + ``total_batch_size / num_workers``, where ``mapped_element`` is produced + by the ``mapper``. + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False) + if mapper is not None: + dataset = MapDataset(dataset, mapper) + + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + if sampler is None: + sampler = TrainingSampler(len(dataset)) + assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}" + return build_batch_data_loader( + dataset, + sampler, + total_batch_size, + aspect_ratio_grouping=aspect_ratio_grouping, + num_workers=num_workers, + collate_fn=collate_fn, + ) + + +def _test_loader_from_config(cfg, dataset_name, mapper=None): + """ + Uses the given `dataset_name` argument (instead of the names in cfg), because the + standard practice is to evaluate each test set individually (not combining them). + """ + if isinstance(dataset_name, str): + dataset_name = [dataset_name] + + dataset = get_detection_dataset_dicts( + dataset_name, + filter_empty=False, + proposal_files=[ + cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name + ] + if cfg.MODEL.LOAD_PROPOSALS + else None, + ) + if mapper is None: + mapper = DatasetMapper(cfg, False) + return { + "dataset": dataset, + "mapper": mapper, + "num_workers": cfg.DATALOADER.NUM_WORKERS, + "sampler": InferenceSampler(len(dataset)) + if not isinstance(dataset, torchdata.IterableDataset) + else None, + } + + +@configurable(from_config=_test_loader_from_config) +def build_detection_test_loader( + dataset: Union[List[Any], torchdata.Dataset], + *, + mapper: Callable[[Dict[str, Any]], Any], + sampler: Optional[torchdata.Sampler] = None, + batch_size: int = 1, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, +) -> torchdata.DataLoader: + """ + Similar to `build_detection_train_loader`, with default batch size = 1, + and sampler = :class:`InferenceSampler`. This sampler coordinates all workers + to produce the exact set of all samples. + + Args: + dataset: a list of dataset dicts, + or a pytorch dataset (either map-style or iterable). They can be obtained + by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. + mapper: a callable which takes a sample (dict) from dataset + and returns the format to be consumed by the model. + When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. + sampler: a sampler that produces + indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, + which splits the dataset across all workers. Sampler must be None + if `dataset` is iterable. + batch_size: the batch size of the data loader to be created. + Default to 1 image per worker since this is the standard when reporting + inference time in papers. + num_workers: number of parallel data loading workers + collate_fn: same as the argument of `torch.utils.data.DataLoader`. + Defaults to do no collation and return a list of data. + + Returns: + DataLoader: a torch DataLoader, that loads the given detection + dataset, with test-time transformation and batching. + + Examples: + :: + data_loader = build_detection_test_loader( + DatasetRegistry.get("my_test"), + mapper=DatasetMapper(...)) + + # or, instantiate with a CfgNode: + data_loader = build_detection_test_loader(cfg, "my_test") + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False) + if mapper is not None: + dataset = MapDataset(dataset, mapper) + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + if sampler is None: + sampler = InferenceSampler(len(dataset)) + return torchdata.DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + drop_last=False, + num_workers=num_workers, + collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, + ) + + +def trivial_batch_collator(batch): + """ + A batch collator that does nothing. + """ + return batch + + +def worker_init_reset_seed(worker_id): + initial_seed = torch.initial_seed() % 2**31 + seed_all_rng(initial_seed + worker_id) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/catalog.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/catalog.py new file mode 100644 index 0000000000000000000000000000000000000000..8a5507b8e3bd75eb2acba1b6218e7832090ca985 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/catalog.py @@ -0,0 +1,236 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import types +from collections import UserDict +from typing import List + +from custom_detectron2.utils.logger import log_first_n + +__all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"] + + +class _DatasetCatalog(UserDict): + """ + A global dictionary that stores information about the datasets and how to obtain them. + + It contains a mapping from strings + (which are names that identify a dataset, e.g. "coco_2014_train") + to a function which parses the dataset and returns the samples in the + format of `list[dict]`. + + The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details) + if used with the data loader functionalities in `data/build.py,data/detection_transform.py`. + + The purpose of having this catalog is to make it easy to choose + different datasets, by just using the strings in the config. + """ + + def register(self, name, func): + """ + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + func (callable): a callable which takes no arguments and returns a list of dicts. + It must return the same results if called multiple times. + """ + assert callable(func), "You must register a function with `DatasetCatalog.register`!" + assert name not in self, "Dataset '{}' is already registered!".format(name) + self[name] = func + + def get(self, name): + """ + Call the registered function and return its results. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + + Returns: + list[dict]: dataset annotations. + """ + try: + f = self[name] + except KeyError as e: + raise KeyError( + "Dataset '{}' is not registered! Available datasets are: {}".format( + name, ", ".join(list(self.keys())) + ) + ) from e + return f() + + def list(self) -> List[str]: + """ + List all registered datasets. + + Returns: + list[str] + """ + return list(self.keys()) + + def remove(self, name): + """ + Alias of ``pop``. + """ + self.pop(name) + + def __str__(self): + return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys())) + + __repr__ = __str__ + + +DatasetCatalog = _DatasetCatalog() +DatasetCatalog.__doc__ = ( + _DatasetCatalog.__doc__ + + """ + .. automethod:: detectron2.data.catalog.DatasetCatalog.register + .. automethod:: detectron2.data.catalog.DatasetCatalog.get +""" +) + + +class Metadata(types.SimpleNamespace): + """ + A class that supports simple attribute setter/getter. + It is intended for storing metadata of a dataset and make it accessible globally. + + Examples: + :: + # somewhere when you load the data: + MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"] + + # somewhere when you print statistics or visualize: + classes = MetadataCatalog.get("mydataset").thing_classes + """ + + # the name of the dataset + # set default to N/A so that `self.name` in the errors will not trigger getattr again + name: str = "N/A" + + _RENAMED = { + "class_names": "thing_classes", + "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id", + "stuff_class_names": "stuff_classes", + } + + def __getattr__(self, key): + if key in self._RENAMED: + log_first_n( + logging.WARNING, + "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), + n=10, + ) + return getattr(self, self._RENAMED[key]) + + # "name" exists in every metadata + if len(self.__dict__) > 1: + raise AttributeError( + "Attribute '{}' does not exist in the metadata of dataset '{}'. Available " + "keys are {}.".format(key, self.name, str(self.__dict__.keys())) + ) + else: + raise AttributeError( + f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': " + "metadata is empty." + ) + + def __setattr__(self, key, val): + if key in self._RENAMED: + log_first_n( + logging.WARNING, + "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), + n=10, + ) + setattr(self, self._RENAMED[key], val) + + # Ensure that metadata of the same name stays consistent + try: + oldval = getattr(self, key) + assert oldval == val, ( + "Attribute '{}' in the metadata of '{}' cannot be set " + "to a different value!\n{} != {}".format(key, self.name, oldval, val) + ) + except AttributeError: + super().__setattr__(key, val) + + def as_dict(self): + """ + Returns all the metadata as a dict. + Note that modifications to the returned dict will not reflect on the Metadata object. + """ + return copy.copy(self.__dict__) + + def set(self, **kwargs): + """ + Set multiple metadata with kwargs. + """ + for k, v in kwargs.items(): + setattr(self, k, v) + return self + + def get(self, key, default=None): + """ + Access an attribute and return its value if exists. + Otherwise return default. + """ + try: + return getattr(self, key) + except AttributeError: + return default + + +class _MetadataCatalog(UserDict): + """ + MetadataCatalog is a global dictionary that provides access to + :class:`Metadata` of a given dataset. + + The metadata associated with a certain name is a singleton: once created, the + metadata will stay alive and will be returned by future calls to ``get(name)``. + + It's like global variables, so don't abuse it. + It's meant for storing knowledge that's constant and shared across the execution + of the program, e.g.: the class names in COCO. + """ + + def get(self, name): + """ + Args: + name (str): name of a dataset (e.g. coco_2014_train). + + Returns: + Metadata: The :class:`Metadata` instance associated with this name, + or create an empty one if none is available. + """ + assert len(name) + r = super().get(name, None) + if r is None: + r = self[name] = Metadata(name=name) + return r + + def list(self): + """ + List all registered metadata. + + Returns: + list[str]: keys (names of datasets) of all registered metadata + """ + return list(self.keys()) + + def remove(self, name): + """ + Alias of ``pop``. + """ + self.pop(name) + + def __str__(self): + return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys())) + + __repr__ = __str__ + + +MetadataCatalog = _MetadataCatalog() +MetadataCatalog.__doc__ = ( + _MetadataCatalog.__doc__ + + """ + .. automethod:: detectron2.data.catalog.MetadataCatalog.get +""" +) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/common.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/common.py new file mode 100644 index 0000000000000000000000000000000000000000..42d15cfa7fa1bbf04afce3c616a590ac48554d2e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/common.py @@ -0,0 +1,301 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import copy +import itertools +import logging +import numpy as np +import pickle +import random +from typing import Callable, Union +import torch +import torch.utils.data as data +from torch.utils.data.sampler import Sampler + +from custom_detectron2.utils.serialize import PicklableWrapper + +__all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"] + +logger = logging.getLogger(__name__) + + +def _shard_iterator_dataloader_worker(iterable): + # Shard the iterable if we're currently inside pytorch dataloader worker. + worker_info = data.get_worker_info() + if worker_info is None or worker_info.num_workers == 1: + # do nothing + yield from iterable + else: + yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers) + + +class _MapIterableDataset(data.IterableDataset): + """ + Map a function over elements in an IterableDataset. + + Similar to pytorch's MapIterDataPipe, but support filtering when map_func + returns None. + + This class is not public-facing. Will be called by `MapDataset`. + """ + + def __init__(self, dataset, map_func): + self._dataset = dataset + self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work + + def __len__(self): + return len(self._dataset) + + def __iter__(self): + for x in map(self._map_func, self._dataset): + if x is not None: + yield x + + +class MapDataset(data.Dataset): + """ + Map a function over the elements in a dataset. + """ + + def __init__(self, dataset, map_func): + """ + Args: + dataset: a dataset where map function is applied. Can be either + map-style or iterable dataset. When given an iterable dataset, + the returned object will also be an iterable dataset. + map_func: a callable which maps the element in dataset. map_func can + return None to skip the data (e.g. in case of errors). + How None is handled depends on the style of `dataset`. + If `dataset` is map-style, it randomly tries other elements. + If `dataset` is iterable, it skips the data and tries the next. + """ + self._dataset = dataset + self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work + + self._rng = random.Random(42) + self._fallback_candidates = set(range(len(dataset))) + + def __new__(cls, dataset, map_func): + is_iterable = isinstance(dataset, data.IterableDataset) + if is_iterable: + return _MapIterableDataset(dataset, map_func) + else: + return super().__new__(cls) + + def __getnewargs__(self): + return self._dataset, self._map_func + + def __len__(self): + return len(self._dataset) + + def __getitem__(self, idx): + retry_count = 0 + cur_idx = int(idx) + + while True: + data = self._map_func(self._dataset[cur_idx]) + if data is not None: + self._fallback_candidates.add(cur_idx) + return data + + # _map_func fails for this idx, use a random new index from the pool + retry_count += 1 + self._fallback_candidates.discard(cur_idx) + cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0] + + if retry_count >= 3: + logger = logging.getLogger(__name__) + logger.warning( + "Failed to apply `_map_func` for idx: {}, retry count: {}".format( + idx, retry_count + ) + ) + + +class _TorchSerializedList(object): + """ + A list-like object whose items are serialized and stored in a torch tensor. When + launching a process that uses TorchSerializedList with "fork" start method, + the subprocess can read the same buffer without triggering copy-on-access. When + launching a process that uses TorchSerializedList with "spawn/forkserver" start + method, the list will be pickled by a special ForkingPickler registered by PyTorch + that moves data to shared memory. In both cases, this allows parent and child + processes to share RAM for the list data, hence avoids the issue in + https://github.com/pytorch/pytorch/issues/13246. + + See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/ + on how it works. + """ + + def __init__(self, lst: list): + self._lst = lst + + def _serialize(data): + buffer = pickle.dumps(data, protocol=-1) + return np.frombuffer(buffer, dtype=np.uint8) + + logger.info( + "Serializing {} elements to byte tensors and concatenating them all ...".format( + len(self._lst) + ) + ) + self._lst = [_serialize(x) for x in self._lst] + self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64) + self._addr = torch.from_numpy(np.cumsum(self._addr)) + self._lst = torch.from_numpy(np.concatenate(self._lst)) + logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2)) + + def __len__(self): + return len(self._addr) + + def __getitem__(self, idx): + start_addr = 0 if idx == 0 else self._addr[idx - 1].item() + end_addr = self._addr[idx].item() + bytes = memoryview(self._lst[start_addr:end_addr].numpy()) + + # @lint-ignore PYTHONPICKLEISBAD + return pickle.loads(bytes) + + +_DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList + + +@contextlib.contextmanager +def set_default_dataset_from_list_serialize_method(new): + """ + Context manager for using custom serialize function when creating DatasetFromList + """ + + global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD + orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD + _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new + yield + _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig + + +class DatasetFromList(data.Dataset): + """ + Wrap a list to a torch Dataset. It produces elements of the list as data. + """ + + def __init__( + self, + lst: list, + copy: bool = True, + serialize: Union[bool, Callable] = True, + ): + """ + Args: + lst (list): a list which contains elements to produce. + copy (bool): whether to deepcopy the element when producing it, + so that the result can be modified in place without affecting the + source in the list. + serialize (bool or callable): whether to serialize the stroage to other + backend. If `True`, the default serialize method will be used, if given + a callable, the callable will be used as serialize method. + """ + self._lst = lst + self._copy = copy + if not isinstance(serialize, (bool, Callable)): + raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}") + self._serialize = serialize is not False + + if self._serialize: + serialize_method = ( + serialize + if isinstance(serialize, Callable) + else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD + ) + logger.info(f"Serializing the dataset using: {serialize_method}") + self._lst = serialize_method(self._lst) + + def __len__(self): + return len(self._lst) + + def __getitem__(self, idx): + if self._copy and not self._serialize: + return copy.deepcopy(self._lst[idx]) + else: + return self._lst[idx] + + +class ToIterableDataset(data.IterableDataset): + """ + Convert an old indices-based (also called map-style) dataset + to an iterable-style dataset. + """ + + def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True): + """ + Args: + dataset: an old-style dataset with ``__getitem__`` + sampler: a cheap iterable that produces indices to be applied on ``dataset``. + shard_sampler: whether to shard the sampler based on the current pytorch data loader + worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple + workers, it is responsible for sharding its data based on worker id so that workers + don't produce identical data. + + Most samplers (like our TrainingSampler) do not shard based on dataloader worker id + and this argument should be set to True. But certain samplers may be already + sharded, in that case this argument should be set to False. + """ + assert not isinstance(dataset, data.IterableDataset), dataset + assert isinstance(sampler, Sampler), sampler + self.dataset = dataset + self.sampler = sampler + self.shard_sampler = shard_sampler + + def __iter__(self): + if not self.shard_sampler: + sampler = self.sampler + else: + # With map-style dataset, `DataLoader(dataset, sampler)` runs the + # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))` + # will run sampler in every of the N worker. So we should only keep 1/N of the ids on + # each worker. The assumption is that sampler is cheap to iterate so it's fine to + # discard ids in workers. + sampler = _shard_iterator_dataloader_worker(self.sampler) + for idx in sampler: + yield self.dataset[idx] + + def __len__(self): + return len(self.sampler) + + +class AspectRatioGroupedDataset(data.IterableDataset): + """ + Batch data that have similar aspect ratio together. + In this implementation, images whose aspect ratio < (or >) 1 will + be batched together. + This improves training speed because the images then need less padding + to form a batch. + + It assumes the underlying dataset produces dicts with "width" and "height" keys. + It will then produce a list of original dicts with length = batch_size, + all with similar aspect ratios. + """ + + def __init__(self, dataset, batch_size): + """ + Args: + dataset: an iterable. Each element must be a dict with keys + "width" and "height", which will be used to batch data. + batch_size (int): + """ + self.dataset = dataset + self.batch_size = batch_size + self._buckets = [[] for _ in range(2)] + # Hard-coded two aspect ratio groups: w > h and w < h. + # Can add support for more aspect ratio groups, but doesn't seem useful + + def __iter__(self): + for d in self.dataset: + w, h = d["width"], d["height"] + bucket_id = 0 if w > h else 1 + bucket = self._buckets[bucket_id] + bucket.append(d) + if len(bucket) == self.batch_size: + data = bucket[:] + # Clear bucket first, because code after yield is not + # guaranteed to execute + del bucket[:] + yield data diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/dataset_mapper.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/dataset_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..884bbae6e2093974677c8ee8aebb2cae8f43d1b6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/dataset_mapper.py @@ -0,0 +1,191 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import numpy as np +from typing import List, Optional, Union +import torch + +from custom_detectron2.config import configurable + +from . import detection_utils as utils +from . import transforms as T + +""" +This file contains the default mapping that's applied to "dataset dicts". +""" + +__all__ = ["DatasetMapper"] + + +class DatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by the model. + + This is the default callable to be used to map your dataset dict into training data. + You may need to follow it to implement your own one for customized logic, + such as a different way to read or transform images. + See :doc:`/tutorials/data_loading` for details. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies cropping/geometric transforms to the image and annotations + 3. Prepare data and annotations to Tensor and :class:`Instances` + """ + + @configurable + def __init__( + self, + is_train: bool, + *, + augmentations: List[Union[T.Augmentation, T.Transform]], + image_format: str, + use_instance_mask: bool = False, + use_keypoint: bool = False, + instance_mask_format: str = "polygon", + keypoint_hflip_indices: Optional[np.ndarray] = None, + precomputed_proposal_topk: Optional[int] = None, + recompute_boxes: bool = False, + ): + """ + NOTE: this interface is experimental. + + Args: + is_train: whether it's used in training or inference + augmentations: a list of augmentations or deterministic transforms to apply + image_format: an image format supported by :func:`detection_utils.read_image`. + use_instance_mask: whether to process instance segmentation annotations, if available + use_keypoint: whether to process keypoint annotations if available + instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation + masks into this format. + keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` + precomputed_proposal_topk: if given, will load pre-computed + proposals from dataset_dict and keep the top k proposals for each image. + recompute_boxes: whether to overwrite bounding box annotations + by computing tight bounding boxes from instance mask annotations. + """ + if recompute_boxes: + assert use_instance_mask, "recompute_boxes requires instance masks" + # fmt: off + self.is_train = is_train + self.augmentations = T.AugmentationList(augmentations) + self.image_format = image_format + self.use_instance_mask = use_instance_mask + self.instance_mask_format = instance_mask_format + self.use_keypoint = use_keypoint + self.keypoint_hflip_indices = keypoint_hflip_indices + self.proposal_topk = precomputed_proposal_topk + self.recompute_boxes = recompute_boxes + # fmt: on + logger = logging.getLogger(__name__) + mode = "training" if is_train else "inference" + logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") + + @classmethod + def from_config(cls, cfg, is_train: bool = True): + augs = utils.build_augmentation(cfg, is_train) + if cfg.INPUT.CROP.ENABLED and is_train: + augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + recompute_boxes = cfg.MODEL.MASK_ON + else: + recompute_boxes = False + + ret = { + "is_train": is_train, + "augmentations": augs, + "image_format": cfg.INPUT.FORMAT, + "use_instance_mask": cfg.MODEL.MASK_ON, + "instance_mask_format": cfg.INPUT.MASK_FORMAT, + "use_keypoint": cfg.MODEL.KEYPOINT_ON, + "recompute_boxes": recompute_boxes, + } + + if cfg.MODEL.KEYPOINT_ON: + ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) + + if cfg.MODEL.LOAD_PROPOSALS: + ret["precomputed_proposal_topk"] = ( + cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN + if is_train + else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST + ) + return ret + + def _transform_annotations(self, dataset_dict, transforms, image_shape): + # USER: Modify this if you want to keep them for some reason. + for anno in dataset_dict["annotations"]: + if not self.use_instance_mask: + anno.pop("segmentation", None) + if not self.use_keypoint: + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + annos = [ + utils.transform_instance_annotations( + obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances( + annos, image_shape, mask_format=self.instance_mask_format + ) + + # After transforms such as cropping are applied, the bounding box may no longer + # tightly bound the object. As an example, imagine a triangle object + # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight + # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to + # the intersection of original bounding box and the cropping box. + if self.recompute_boxes: + instances.gt_boxes = instances.gt_masks.get_bounding_boxes() + dataset_dict["instances"] = utils.filter_empty_instances(instances) + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + # USER: Write your own image loading if it's not from a file + image = utils.read_image(dataset_dict["file_name"], format=self.image_format) + utils.check_image_size(dataset_dict, image) + + # USER: Remove if you don't do semantic/panoptic segmentation. + if "sem_seg_file_name" in dataset_dict: + sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) + else: + sem_seg_gt = None + + aug_input = T.AugInput(image, sem_seg=sem_seg_gt) + transforms = self.augmentations(aug_input) + image, sem_seg_gt = aug_input.image, aug_input.sem_seg + + image_shape = image.shape[:2] # h, w + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + if sem_seg_gt is not None: + dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) + + # USER: Remove if you don't use pre-computed proposals. + # Most users would not need this feature. + if self.proposal_topk is not None: + utils.transform_proposals( + dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk + ) + + if not self.is_train: + # USER: Modify this if you want to keep them for some reason. + dataset_dict.pop("annotations", None) + dataset_dict.pop("sem_seg_file_name", None) + return dataset_dict + + if "annotations" in dataset_dict: + self._transform_annotations(dataset_dict, transforms, image_shape) + + return dataset_dict diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/README.md b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9fb3e4f7afec17137c95c78be6ef06d520ec8032 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/README.md @@ -0,0 +1,9 @@ + + +### Common Datasets + +The dataset implemented here do not need to load the data into the final format. +It should provide the minimal data structure needed to use the dataset, so it can be very efficient. + +For example, for an image dataset, just provide the file names and labels, but don't read the images. +Let the downstream decide how to read. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a44bedc15e5f0e762fc4d77efd6f1b07c6ff77d0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json +from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated +from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta +from .pascal_voc import load_voc_instances, register_pascal_voc +from . import builtin as _builtin # ensure the builtin datasets are registered + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/builtin.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/builtin.py new file mode 100644 index 0000000000000000000000000000000000000000..1e7409b577b28b3f793143291ab2381bef6e0b20 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/builtin.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + + +""" +This file registers pre-defined datasets at hard-coded paths, and their metadata. + +We hard-code metadata for common datasets. This will enable: +1. Consistency check when loading the datasets +2. Use models on these standard datasets directly and run demos, + without having to download the dataset annotations + +We hard-code some paths to the dataset that's assumed to +exist in "./datasets/". + +Users SHOULD NOT use this file to create new dataset / metadata for new dataset. +To add new dataset, refer to the tutorial "docs/DATASETS.md". +""" + +import os + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog + +from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata +from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic +from .cityscapes_panoptic import register_all_cityscapes_panoptic +from .coco import load_sem_seg, register_coco_instances +from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated +from .lvis import get_lvis_instances_meta, register_lvis_instances +from .pascal_voc import register_pascal_voc + +# ==== Predefined datasets and splits for COCO ========== + +_PREDEFINED_SPLITS_COCO = {} +_PREDEFINED_SPLITS_COCO["coco"] = { + "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"), + "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"), + "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"), + "coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/instances_valminusminival2014.json", + ), + "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"), + "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"), + "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"), + "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"), + "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"), +} + +_PREDEFINED_SPLITS_COCO["coco_person"] = { + "keypoints_coco_2014_train": ( + "coco/train2014", + "coco/annotations/person_keypoints_train2014.json", + ), + "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"), + "keypoints_coco_2014_minival": ( + "coco/val2014", + "coco/annotations/person_keypoints_minival2014.json", + ), + "keypoints_coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/person_keypoints_valminusminival2014.json", + ), + "keypoints_coco_2017_train": ( + "coco/train2017", + "coco/annotations/person_keypoints_train2017.json", + ), + "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"), + "keypoints_coco_2017_val_100": ( + "coco/val2017", + "coco/annotations/person_keypoints_val2017_100.json", + ), +} + + +_PREDEFINED_SPLITS_COCO_PANOPTIC = { + "coco_2017_train_panoptic": ( + # This is the original panoptic annotation directory + "coco/panoptic_train2017", + "coco/annotations/panoptic_train2017.json", + # This directory contains semantic annotations that are + # converted from panoptic annotations. + # It is used by PanopticFPN. + # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py + # to create these directories. + "coco/panoptic_stuff_train2017", + ), + "coco_2017_val_panoptic": ( + "coco/panoptic_val2017", + "coco/annotations/panoptic_val2017.json", + "coco/panoptic_stuff_val2017", + ), + "coco_2017_val_100_panoptic": ( + "coco/panoptic_val2017_100", + "coco/annotations/panoptic_val2017_100.json", + "coco/panoptic_stuff_val2017_100", + ), +} + + +def register_all_coco(root): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances( + key, + _get_builtin_metadata(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + for ( + prefix, + (panoptic_root, panoptic_json, semantic_root), + ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): + prefix_instances = prefix[: -len("_panoptic")] + instances_meta = MetadataCatalog.get(prefix_instances) + image_root, instances_json = instances_meta.image_root, instances_meta.json_file + # The "separated" version of COCO panoptic segmentation dataset, + # e.g. used by Panoptic FPN + register_coco_panoptic_separated( + prefix, + _get_builtin_metadata("coco_panoptic_separated"), + image_root, + os.path.join(root, panoptic_root), + os.path.join(root, panoptic_json), + os.path.join(root, semantic_root), + instances_json, + ) + # The "standard" version of COCO panoptic segmentation dataset, + # e.g. used by Panoptic-DeepLab + register_coco_panoptic( + prefix, + _get_builtin_metadata("coco_panoptic_standard"), + image_root, + os.path.join(root, panoptic_root), + os.path.join(root, panoptic_json), + instances_json, + ) + + +# ==== Predefined datasets and splits for LVIS ========== + + +_PREDEFINED_SPLITS_LVIS = { + "lvis_v1": { + "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"), + "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"), + "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"), + "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"), + }, + "lvis_v0.5": { + "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"), + "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"), + "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"), + "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"), + }, + "lvis_v0.5_cocofied": { + "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"), + "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"), + }, +} + + +def register_all_lvis(root): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + register_lvis_instances( + key, + get_lvis_instances_meta(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + +# ==== Predefined splits for raw cityscapes images =========== +_RAW_CITYSCAPES_SPLITS = { + "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"), + "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"), + "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"), +} + + +def register_all_cityscapes(root): + for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items(): + meta = _get_builtin_metadata("cityscapes") + image_dir = os.path.join(root, image_dir) + gt_dir = os.path.join(root, gt_dir) + + inst_key = key.format(task="instance_seg") + DatasetCatalog.register( + inst_key, + lambda x=image_dir, y=gt_dir: load_cityscapes_instances( + x, y, from_json=True, to_polygons=True + ), + ) + MetadataCatalog.get(inst_key).set( + image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta + ) + + sem_key = key.format(task="sem_seg") + DatasetCatalog.register( + sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y) + ) + MetadataCatalog.get(sem_key).set( + image_dir=image_dir, + gt_dir=gt_dir, + evaluator_type="cityscapes_sem_seg", + ignore_label=255, + **meta, + ) + + +# ==== Predefined splits for PASCAL VOC =========== +def register_all_pascal_voc(root): + SPLITS = [ + ("voc_2007_trainval", "VOC2007", "trainval"), + ("voc_2007_train", "VOC2007", "train"), + ("voc_2007_val", "VOC2007", "val"), + ("voc_2007_test", "VOC2007", "test"), + ("voc_2012_trainval", "VOC2012", "trainval"), + ("voc_2012_train", "VOC2012", "train"), + ("voc_2012_val", "VOC2012", "val"), + ] + for name, dirname, split in SPLITS: + year = 2007 if "2007" in name else 2012 + register_pascal_voc(name, os.path.join(root, dirname), split, year) + MetadataCatalog.get(name).evaluator_type = "pascal_voc" + + +def register_all_ade20k(root): + root = os.path.join(root, "ADEChallengeData2016") + for name, dirname in [("train", "training"), ("val", "validation")]: + image_dir = os.path.join(root, "images", dirname) + gt_dir = os.path.join(root, "annotations_detectron2", dirname) + name = f"ade20k_sem_seg_{name}" + DatasetCatalog.register( + name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg") + ) + MetadataCatalog.get(name).set( + stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:], + image_root=image_dir, + sem_seg_root=gt_dir, + evaluator_type="sem_seg", + ignore_label=255, + ) + + +# True for open source; +# Internally at fb, we register them elsewhere +if __name__.endswith(".builtin"): + # Assume pre-defined datasets live in `./datasets`. + _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets")) + register_all_coco(_root) + register_all_lvis(_root) + register_all_cityscapes(_root) + register_all_cityscapes_panoptic(_root) + register_all_pascal_voc(_root) + register_all_ade20k(_root) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/builtin_meta.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/builtin_meta.py new file mode 100644 index 0000000000000000000000000000000000000000..63c7a1a31b31dd89b82011effee26471faccacf5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/builtin_meta.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +Note: +For your custom dataset, there is no need to hard-code metadata anywhere in the code. +For example, for COCO-format dataset, metadata will be obtained automatically +when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways +during loading. + +However, we hard-coded metadata for a few common dataset here. +The only goal is to allow users who don't have these dataset to use pre-trained models. +Users don't have to download a COCO json (which contains metadata), in order to visualize a +COCO model (with correct class names and colors). +""" + + +# All coco categories, together with their nice-looking visualization colors +# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json +COCO_CATEGORIES = [ + {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, + {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, + {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, + {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, + {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, + {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, + {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, + {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, + {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, + {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, + {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, + {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, + {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, + {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, + {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, + {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, + {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, + {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, + {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, + {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, + {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, + {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, + {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, + {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, + {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, + {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, + {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, + {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, + {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, + {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, + {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, + {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, + {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, + {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, + {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, + {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, + {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, + {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, + {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, + {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, + {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, + {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, + {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, + {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, + {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, + {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, + {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, + {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, + {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, + {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, + {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, + {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, + {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, + {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, + {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, + {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, + {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, + {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, + {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, + {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, + {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, + {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, + {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, + {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, + {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, + {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, + {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, + {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, + {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, + {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, + {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, + {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, + {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, + {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, + {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, + {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, + {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, + {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, + {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, + {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, + {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, + {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, + {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, + {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, + {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, + {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, + {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, + {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, + {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, + {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, + {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, + {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, + {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, + {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, + {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, + {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, + {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, + {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, + {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, + {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, + {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, + {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, + {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, + {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, + {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, + {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, + {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, + {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, + {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, + {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, + {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, + {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, + {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, + {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, + {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, + {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, + {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, + {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, + {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, + {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, + {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, + {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, + {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, + {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, + {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, + {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, + {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, + {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, + {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, + {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, + {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, + {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, + {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, +] + +# fmt: off +COCO_PERSON_KEYPOINT_NAMES = ( + "nose", + "left_eye", "right_eye", + "left_ear", "right_ear", + "left_shoulder", "right_shoulder", + "left_elbow", "right_elbow", + "left_wrist", "right_wrist", + "left_hip", "right_hip", + "left_knee", "right_knee", + "left_ankle", "right_ankle", +) +# fmt: on + +# Pairs of keypoints that should be exchanged under horizontal flipping +COCO_PERSON_KEYPOINT_FLIP_MAP = ( + ("left_eye", "right_eye"), + ("left_ear", "right_ear"), + ("left_shoulder", "right_shoulder"), + ("left_elbow", "right_elbow"), + ("left_wrist", "right_wrist"), + ("left_hip", "right_hip"), + ("left_knee", "right_knee"), + ("left_ankle", "right_ankle"), +) + +# rules for pairs of keypoints to draw a line between, and the line color to use. +KEYPOINT_CONNECTION_RULES = [ + # face + ("left_ear", "left_eye", (102, 204, 255)), + ("right_ear", "right_eye", (51, 153, 255)), + ("left_eye", "nose", (102, 0, 204)), + ("nose", "right_eye", (51, 102, 255)), + # upper-body + ("left_shoulder", "right_shoulder", (255, 128, 0)), + ("left_shoulder", "left_elbow", (153, 255, 204)), + ("right_shoulder", "right_elbow", (128, 229, 255)), + ("left_elbow", "left_wrist", (153, 255, 153)), + ("right_elbow", "right_wrist", (102, 255, 224)), + # lower-body + ("left_hip", "right_hip", (255, 102, 0)), + ("left_hip", "left_knee", (255, 255, 77)), + ("right_hip", "right_knee", (153, 255, 204)), + ("left_knee", "left_ankle", (191, 255, 128)), + ("right_knee", "right_ankle", (255, 195, 77)), +] + +# All Cityscapes categories, together with their nice-looking visualization colors +# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa +CITYSCAPES_CATEGORIES = [ + {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"}, + {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"}, + {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"}, + {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"}, + {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"}, + {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"}, + {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"}, + {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"}, + {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"}, + {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"}, + {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"}, + {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"}, + {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"}, + {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"}, + {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"}, + {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"}, + {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"}, + {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"}, + {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"}, +] + +# fmt: off +ADE20K_SEM_SEG_CATEGORIES = [ + "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa +] +# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore +# fmt: on + + +def _get_coco_instances_meta(): + thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1] + thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1] + assert len(thing_ids) == 80, len(thing_ids) + # Mapping from the incontiguous COCO category id to an id in [0, 79] + thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} + thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1] + ret = { + "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, + "thing_classes": thing_classes, + "thing_colors": thing_colors, + } + return ret + + +def _get_coco_panoptic_separated_meta(): + """ + Returns metadata for "separated" version of the panoptic segmentation dataset. + """ + stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0] + assert len(stuff_ids) == 53, len(stuff_ids) + + # For semantic segmentation, this mapping maps from contiguous stuff id + # (in [0, 53], used in models) to ids in the dataset (used for processing results) + # The id 0 is mapped to an extra category "thing". + stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)} + # When converting COCO panoptic annotations to semantic annotations + # We label the "thing" category to 0 + stuff_dataset_id_to_contiguous_id[0] = 0 + + # 54 names for COCO stuff categories (including "things") + stuff_classes = ["things"] + [ + k["name"].replace("-other", "").replace("-merged", "") + for k in COCO_CATEGORIES + if k["isthing"] == 0 + ] + + # NOTE: I randomly picked a color for things + stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0] + ret = { + "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, + "stuff_classes": stuff_classes, + "stuff_colors": stuff_colors, + } + ret.update(_get_coco_instances_meta()) + return ret + + +def _get_builtin_metadata(dataset_name): + if dataset_name == "coco": + return _get_coco_instances_meta() + if dataset_name == "coco_panoptic_separated": + return _get_coco_panoptic_separated_meta() + elif dataset_name == "coco_panoptic_standard": + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in COCO_CATEGORIES] + thing_colors = [k["color"] for k in COCO_CATEGORIES] + stuff_classes = [k["name"] for k in COCO_CATEGORIES] + stuff_colors = [k["color"] for k in COCO_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # Convert category id for training: + # category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the linear + # softmax classifier. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for i, cat in enumerate(COCO_CATEGORIES): + if cat["isthing"]: + thing_dataset_id_to_contiguous_id[cat["id"]] = i + else: + stuff_dataset_id_to_contiguous_id[cat["id"]] = i + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + return meta + elif dataset_name == "coco_person": + return { + "thing_classes": ["person"], + "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, + "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, + "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, + } + elif dataset_name == "cityscapes": + # fmt: off + CITYSCAPES_THING_CLASSES = [ + "person", "rider", "car", "truck", + "bus", "train", "motorcycle", "bicycle", + ] + CITYSCAPES_STUFF_CLASSES = [ + "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", + "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", + "truck", "bus", "train", "motorcycle", "bicycle", + ] + # fmt: on + return { + "thing_classes": CITYSCAPES_THING_CLASSES, + "stuff_classes": CITYSCAPES_STUFF_CLASSES, + } + raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/cityscapes.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..ddaa4e0e5057bc061edb5d965122a1b478c580ab --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/cityscapes.py @@ -0,0 +1,329 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import functools +import json +import logging +import multiprocessing as mp +import numpy as np +import os +from itertools import chain +import custom_pycocotools.mask as mask_util +from PIL import Image + +from custom_detectron2.structures import BoxMode +from custom_detectron2.utils.comm import get_world_size +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import setup_logger + +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + pass + + +logger = logging.getLogger(__name__) + + +def _get_cityscapes_files(image_dir, gt_dir): + files = [] + # scan through the directory + cities = PathManager.ls(image_dir) + logger.info(f"{len(cities)} cities found in '{image_dir}'.") + for city in cities: + city_img_dir = os.path.join(image_dir, city) + city_gt_dir = os.path.join(gt_dir, city) + for basename in PathManager.ls(city_img_dir): + image_file = os.path.join(city_img_dir, basename) + + suffix = "leftImg8bit.png" + assert basename.endswith(suffix), basename + basename = basename[: -len(suffix)] + + instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png") + label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png") + json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json") + + files.append((image_file, instance_file, label_file, json_file)) + assert len(files), "No images found in {}".format(image_dir) + for f in files[0]: + assert PathManager.isfile(f), f + return files + + +def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". + from_json (bool): whether to read annotations from the raw json file or the png files. + to_polygons (bool): whether to represent the segmentation as polygons + (COCO's format) instead of masks (cityscapes's format). + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + if from_json: + assert to_polygons, ( + "Cityscapes's json annotations are in polygon format. " + "Converting to mask format is not supported now." + ) + files = _get_cityscapes_files(image_dir, gt_dir) + + logger.info("Preprocessing cityscapes annotations ...") + # This is still not fast: all workers will execute duplicate works and will + # take up to 10m on a 8GPU server. + pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4)) + + ret = pool.map( + functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons), + files, + ) + logger.info("Loaded {} images from {}".format(len(ret), image_dir)) + + # Map cityscape ids to contiguous ids + from cityscapesscripts.helpers.labels import labels + + labels = [l for l in labels if l.hasInstances and not l.ignoreInEval] + dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)} + for dict_per_image in ret: + for anno in dict_per_image["annotations"]: + anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]] + return ret + + +def load_cityscapes_semantic(image_dir, gt_dir): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". + + Returns: + list[dict]: a list of dict, each has "file_name" and + "sem_seg_file_name". + """ + ret = [] + # gt_dir is small and contain many small files. make sense to fetch to local first + gt_dir = PathManager.get_local_path(gt_dir) + for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir): + label_file = label_file.replace("labelIds", "labelTrainIds") + + with PathManager.open(json_file, "r") as f: + jsonobj = json.load(f) + ret.append( + { + "file_name": image_file, + "sem_seg_file_name": label_file, + "height": jsonobj["imgHeight"], + "width": jsonobj["imgWidth"], + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile( + ret[0]["sem_seg_file_name"] + ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa + return ret + + +def _cityscapes_files_to_dict(files, from_json, to_polygons): + """ + Parse cityscapes annotation files to a instance segmentation dataset dict. + + Args: + files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file) + from_json (bool): whether to read annotations from the raw json file or the png files. + to_polygons (bool): whether to represent the segmentation as polygons + (COCO's format) instead of masks (cityscapes's format). + + Returns: + A dict in Detectron2 Dataset format. + """ + from cityscapesscripts.helpers.labels import id2label, name2label + + image_file, instance_id_file, _, json_file = files + + annos = [] + + if from_json: + from shapely.geometry import MultiPolygon, Polygon + + with PathManager.open(json_file, "r") as f: + jsonobj = json.load(f) + ret = { + "file_name": image_file, + "image_id": os.path.basename(image_file), + "height": jsonobj["imgHeight"], + "width": jsonobj["imgWidth"], + } + + # `polygons_union` contains the union of all valid polygons. + polygons_union = Polygon() + + # CityscapesScripts draw the polygons in sequential order + # and each polygon *overwrites* existing ones. See + # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa + # We use reverse order, and each polygon *avoids* early ones. + # This will resolve the ploygon overlaps in the same way as CityscapesScripts. + for obj in jsonobj["objects"][::-1]: + if "deleted" in obj: # cityscapes data format specific + continue + label_name = obj["label"] + + try: + label = name2label[label_name] + except KeyError: + if label_name.endswith("group"): # crowd area + label = name2label[label_name[: -len("group")]] + else: + raise + if label.id < 0: # cityscapes data format + continue + + # Cityscapes's raw annotations uses integer coordinates + # Therefore +0.5 here + poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5 + # CityscapesScript uses PIL.ImageDraw.polygon to rasterize + # polygons for evaluation. This function operates in integer space + # and draws each pixel whose center falls into the polygon. + # Therefore it draws a polygon which is 0.5 "fatter" in expectation. + # We therefore dilate the input polygon by 0.5 as our input. + poly = Polygon(poly_coord).buffer(0.5, resolution=4) + + if not label.hasInstances or label.ignoreInEval: + # even if we won't store the polygon it still contributes to overlaps resolution + polygons_union = polygons_union.union(poly) + continue + + # Take non-overlapping part of the polygon + poly_wo_overlaps = poly.difference(polygons_union) + if poly_wo_overlaps.is_empty: + continue + polygons_union = polygons_union.union(poly) + + anno = {} + anno["iscrowd"] = label_name.endswith("group") + anno["category_id"] = label.id + + if isinstance(poly_wo_overlaps, Polygon): + poly_list = [poly_wo_overlaps] + elif isinstance(poly_wo_overlaps, MultiPolygon): + poly_list = poly_wo_overlaps.geoms + else: + raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps)) + + poly_coord = [] + for poly_el in poly_list: + # COCO API can work only with exterior boundaries now, hence we store only them. + # TODO: store both exterior and interior boundaries once other parts of the + # codebase support holes in polygons. + poly_coord.append(list(chain(*poly_el.exterior.coords))) + anno["segmentation"] = poly_coord + (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds + + anno["bbox"] = (xmin, ymin, xmax, ymax) + anno["bbox_mode"] = BoxMode.XYXY_ABS + + annos.append(anno) + else: + # See also the official annotation parsing scripts at + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa + with PathManager.open(instance_id_file, "rb") as f: + inst_image = np.asarray(Image.open(f), order="F") + # ids < 24 are stuff labels (filtering them first is about 5% faster) + flattened_ids = np.unique(inst_image[inst_image >= 24]) + + ret = { + "file_name": image_file, + "image_id": os.path.basename(image_file), + "height": inst_image.shape[0], + "width": inst_image.shape[1], + } + + for instance_id in flattened_ids: + # For non-crowd annotations, instance_id // 1000 is the label_id + # Crowd annotations have <1000 instance ids + label_id = instance_id // 1000 if instance_id >= 1000 else instance_id + label = id2label[label_id] + if not label.hasInstances or label.ignoreInEval: + continue + + anno = {} + anno["iscrowd"] = instance_id < 1000 + anno["category_id"] = label.id + + mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F") + + inds = np.nonzero(mask) + ymin, ymax = inds[0].min(), inds[0].max() + xmin, xmax = inds[1].min(), inds[1].max() + anno["bbox"] = (xmin, ymin, xmax, ymax) + if xmax <= xmin or ymax <= ymin: + continue + anno["bbox_mode"] = BoxMode.XYXY_ABS + if to_polygons: + # This conversion comes from D4809743 and D5171122, + # when Mask-RCNN was first developed. + contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[ + -2 + ] + polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3] + # opencv's can produce invalid polygons + if len(polygons) == 0: + continue + anno["segmentation"] = polygons + else: + anno["segmentation"] = mask_util.encode(mask[:, :, None])[0] + annos.append(anno) + ret["annotations"] = annos + return ret + + +if __name__ == "__main__": + """ + Test the cityscapes dataset loader. + + Usage: + python -m detectron2.data.datasets.cityscapes \ + cityscapes/leftImg8bit/train cityscapes/gtFine/train + """ + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("image_dir") + parser.add_argument("gt_dir") + parser.add_argument("--type", choices=["instance", "semantic"], default="instance") + args = parser.parse_args() + from custom_detectron2.data.catalog import Metadata + from custom_detectron2.utils.visualizer import Visualizer + from cityscapesscripts.helpers.labels import labels + + logger = setup_logger(name=__name__) + + dirname = "cityscapes-data-vis" + os.makedirs(dirname, exist_ok=True) + + if args.type == "instance": + dicts = load_cityscapes_instances( + args.image_dir, args.gt_dir, from_json=True, to_polygons=True + ) + logger.info("Done loading {} samples.".format(len(dicts))) + + thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval] + meta = Metadata().set(thing_classes=thing_classes) + + else: + dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir) + logger.info("Done loading {} samples.".format(len(dicts))) + + stuff_classes = [k.name for k in labels if k.trainId != 255] + stuff_colors = [k.color for k in labels if k.trainId != 255] + meta = Metadata().set(stuff_classes=stuff_classes, stuff_colors=stuff_colors) + + for d in dicts: + img = np.array(Image.open(PathManager.open(d["file_name"], "rb"))) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + # cv2.imshow("a", vis.get_image()[:, :, ::-1]) + # cv2.waitKey() + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/cityscapes_panoptic.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/cityscapes_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..efb8a30ce9c9cf62bc6e585dc029c9b003f6569c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/cityscapes_panoptic.py @@ -0,0 +1,187 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import json +import logging +import os + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES +from custom_detectron2.utils.file_io import PathManager + +""" +This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog. +""" + + +logger = logging.getLogger(__name__) + + +def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info): + files = [] + # scan through the directory + cities = PathManager.ls(image_dir) + logger.info(f"{len(cities)} cities found in '{image_dir}'.") + image_dict = {} + for city in cities: + city_img_dir = os.path.join(image_dir, city) + for basename in PathManager.ls(city_img_dir): + image_file = os.path.join(city_img_dir, basename) + + suffix = "_leftImg8bit.png" + assert basename.endswith(suffix), basename + basename = os.path.basename(basename)[: -len(suffix)] + + image_dict[basename] = image_file + + for ann in json_info["annotations"]: + image_file = image_dict.get(ann["image_id"], None) + assert image_file is not None, "No image {} found for annotation {}".format( + ann["image_id"], ann["file_name"] + ) + label_file = os.path.join(gt_dir, ann["file_name"]) + segments_info = ann["segments_info"] + + files.append((image_file, label_file, segments_info)) + + assert len(files), "No images found in {}".format(image_dir) + assert PathManager.isfile(files[0][0]), files[0][0] + assert PathManager.isfile(files[0][1]), files[0][1] + return files + + +def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., + "~/cityscapes/gtFine/cityscapes_panoptic_train". + gt_json (str): path to the json file. e.g., + "~/cityscapes/gtFine/cityscapes_panoptic_train.json". + meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id" + and "stuff_dataset_id_to_contiguous_id" to map category ids to + contiguous ids for training. + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + return segment_info + + assert os.path.exists( + gt_json + ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa + with open(gt_json) as f: + json_info = json.load(f) + files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info) + ret = [] + for image_file, label_file, segments_info in files: + sem_label_file = ( + image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png" + ) + segments_info = [_convert_category_id(x, meta) for x in segments_info] + ret.append( + { + "file_name": image_file, + "image_id": "_".join( + os.path.splitext(os.path.basename(image_file))[0].split("_")[:3] + ), + "sem_seg_file_name": sem_label_file, + "pan_seg_file_name": label_file, + "segments_info": segments_info, + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile( + ret[0]["sem_seg_file_name"] + ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa + assert PathManager.isfile( + ret[0]["pan_seg_file_name"] + ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa + return ret + + +_RAW_CITYSCAPES_PANOPTIC_SPLITS = { + "cityscapes_fine_panoptic_train": ( + "cityscapes/leftImg8bit/train", + "cityscapes/gtFine/cityscapes_panoptic_train", + "cityscapes/gtFine/cityscapes_panoptic_train.json", + ), + "cityscapes_fine_panoptic_val": ( + "cityscapes/leftImg8bit/val", + "cityscapes/gtFine/cityscapes_panoptic_val", + "cityscapes/gtFine/cityscapes_panoptic_val.json", + ), + # "cityscapes_fine_panoptic_test": not supported yet +} + + +def register_all_cityscapes_panoptic(root): + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] + thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] + stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] + stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # There are three types of ids in cityscapes panoptic segmentation: + # (1) category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the classifier + # (2) instance id: this id is used to differentiate different instances from + # the same category. For "stuff" classes, the instance id is always 0; for + # "thing" classes, the instance id starts from 1 and 0 is reserved for + # ignored instances (e.g. crowd annotation). + # (3) panoptic id: this is the compact id that encode both category and + # instance id by: category_id * 1000 + instance_id. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for k in CITYSCAPES_CATEGORIES: + if k["isthing"] == 1: + thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] + else: + stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items(): + image_dir = os.path.join(root, image_dir) + gt_dir = os.path.join(root, gt_dir) + gt_json = os.path.join(root, gt_json) + + DatasetCatalog.register( + key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta) + ) + MetadataCatalog.get(key).set( + panoptic_root=gt_dir, + image_root=image_dir, + panoptic_json=gt_json, + gt_dir=gt_dir.replace("cityscapes_panoptic_", ""), + evaluator_type="cityscapes_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **meta, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/coco.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/coco.py new file mode 100644 index 0000000000000000000000000000000000000000..13d8bf49c454a0b29746e0c16794c12afb28ccf8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/coco.py @@ -0,0 +1,539 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import datetime +import io +import json +import logging +import numpy as np +import os +import shutil +import custom_pycocotools.mask as mask_util +from fvcore.common.timer import Timer +from iopath.common.file_io import file_lock +from PIL import Image + +from custom_detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes +from custom_detectron2.utils.file_io import PathManager + +from .. import DatasetCatalog, MetadataCatalog + +""" +This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". +""" + + +logger = logging.getLogger(__name__) + +__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] + + +def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): + """ + Load a json file with COCO's instances annotation format. + Currently supports instance detection, instance segmentation, + and person keypoints annotations. + + Args: + json_file (str): full path to the json file in COCO instances annotation format. + image_root (str or path-like): the directory where the images in this json file exists. + dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). + When provided, this function will also do the following: + + * Put "thing_classes" into the metadata associated with this dataset. + * Map the category ids into a contiguous range (needed by standard dataset format), + and add "thing_dataset_id_to_contiguous_id" to the metadata associated + with this dataset. + + This option should usually be provided, unless users need to load + the original json content and apply more processing manually. + extra_annotation_keys (list[str]): list of per-annotation keys that should also be + loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", + "category_id", "segmentation"). The values for these keys will be returned as-is. + For example, the densepose annotations are loaded in this way. + + Returns: + list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See + `Using Custom Datasets `_ ) when `dataset_name` is not None. + If `dataset_name` is None, the returned `category_ids` may be + incontiguous and may not conform to the Detectron2 standard format. + + Notes: + 1. This function does not read the image files. + The results do not have the "image" field. + """ + from custom_pycocotools.coco import COCO + + timer = Timer() + json_file = PathManager.get_local_path(json_file) + with contextlib.redirect_stdout(io.StringIO()): + coco_api = COCO(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + id_map = None + if dataset_name is not None: + meta = MetadataCatalog.get(dataset_name) + cat_ids = sorted(coco_api.getCatIds()) + cats = coco_api.loadCats(cat_ids) + # The categories in a custom json file may not be sorted. + thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] + meta.thing_classes = thing_classes + + # In COCO, certain category ids are artificially removed, + # and by convention they are always ignored. + # We deal with COCO's id issue and translate + # the category ids to contiguous ids in [0, 80). + + # It works by looking at the "categories" field in the json, therefore + # if users' own json also have incontiguous ids, we'll + # apply this mapping as well but print a warning. + if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): + if "coco" not in dataset_name: + logger.warning( + """ +Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. +""" + ) + id_map = {v: i for i, v in enumerate(cat_ids)} + meta.thing_dataset_id_to_contiguous_id = id_map + + # sort indices for reproducible results + img_ids = sorted(coco_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = coco_api.loadImgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'iscrowd': 0, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] + total_num_valid_anns = sum([len(x) for x in anns]) + total_num_anns = len(coco_api.anns) + if total_num_valid_anns < total_num_anns: + logger.warning( + f"{json_file} contains {total_num_anns} annotations, but only " + f"{total_num_valid_anns} of them match to images in the file." + ) + + if "minival" not in json_file: + # The popular valminusminival & minival annotations for COCO2014 contain this bug. + # However the ratio of buggy annotations there is tiny and does not affect accuracy. + # Therefore we explicitly white-list them. + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) + + dataset_dicts = [] + + ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) + + num_instances_without_valid_segmentation = 0 + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + record["file_name"] = os.path.join(image_root, img_dict["file_name"]) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + + # The original COCO valminusminival2014 & minival2014 annotation files + # actually contains bugs that, together with certain ways of using COCO API, + # can trigger this assertion. + assert anno["image_id"] == image_id + + assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' + + obj = {key: anno[key] for key in ann_keys if key in anno} + if "bbox" in obj and len(obj["bbox"]) == 0: + raise ValueError( + f"One annotation of image {image_id} contains empty 'bbox' value! " + "This json does not have valid COCO format." + ) + + segm = anno.get("segmentation", None) + if segm: # either list[list[float]] or dict(RLE) + if isinstance(segm, dict): + if isinstance(segm["counts"], list): + # convert to compressed RLE + segm = mask_util.frPyObjects(segm, *segm["size"]) + else: + # filter out invalid polygons (< 3 points) + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + if len(segm) == 0: + num_instances_without_valid_segmentation += 1 + continue # ignore this instance + obj["segmentation"] = segm + + keypts = anno.get("keypoints", None) + if keypts: # list[int] + for idx, v in enumerate(keypts): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # Therefore we assume the coordinates are "pixel indices" and + # add 0.5 to convert to floating point coordinates. + keypts[idx] = v + 0.5 + obj["keypoints"] = keypts + + obj["bbox_mode"] = BoxMode.XYWH_ABS + if id_map: + annotation_category_id = obj["category_id"] + try: + obj["category_id"] = id_map[annotation_category_id] + except KeyError as e: + raise KeyError( + f"Encountered category_id={annotation_category_id} " + "but this id does not exist in 'categories' of the json file." + ) from e + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + + if num_instances_without_valid_segmentation > 0: + logger.warning( + "Filtered out {} instances without valid segmentation. ".format( + num_instances_without_valid_segmentation + ) + + "There might be issues in your dataset generation process. Please " + "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" + ) + return dataset_dicts + + +def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): + """ + Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are + treated as ground truth annotations and all files under "image_root" with "image_ext" extension + as input images. Ground truth and input images are matched using file paths relative to + "gt_root" and "image_root" respectively without taking into account file extensions. + This works for COCO as well as some other datasets. + + Args: + gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation + annotations are stored as images with integer values in pixels that represent + corresponding semantic labels. + image_root (str): the directory where the input images are. + gt_ext (str): file extension for ground truth annotations. + image_ext (str): file extension for input images. + + Returns: + list[dict]: + a list of dicts in detectron2 standard format without instance-level + annotation. + + Notes: + 1. This function does not read the image and ground truth files. + The results do not have the "image" and "sem_seg" fields. + """ + + # We match input images with ground truth based on their relative filepaths (without file + # extensions) starting from 'image_root' and 'gt_root' respectively. + def file2id(folder_path, file_path): + # extract relative path starting from `folder_path` + image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) + # remove file extension + image_id = os.path.splitext(image_id)[0] + return image_id + + input_files = sorted( + (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), + key=lambda file_path: file2id(image_root, file_path), + ) + gt_files = sorted( + (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), + key=lambda file_path: file2id(gt_root, file_path), + ) + + assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) + + # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images + if len(input_files) != len(gt_files): + logger.warn( + "Directory {} and {} has {} and {} files, respectively.".format( + image_root, gt_root, len(input_files), len(gt_files) + ) + ) + input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] + gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] + intersect = list(set(input_basenames) & set(gt_basenames)) + # sort, otherwise each worker may obtain a list[dict] in different order + intersect = sorted(intersect) + logger.warn("Will use their intersection of {} files.".format(len(intersect))) + input_files = [os.path.join(image_root, f + image_ext) for f in intersect] + gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] + + logger.info( + "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) + ) + + dataset_dicts = [] + for (img_path, gt_path) in zip(input_files, gt_files): + record = {} + record["file_name"] = img_path + record["sem_seg_file_name"] = gt_path + dataset_dicts.append(record) + + return dataset_dicts + + +def convert_to_coco_dict(dataset_name): + """ + Convert an instance detection/segmentation or keypoint detection dataset + in detectron2's standard format into COCO json format. + + Generic dataset description can be found here: + https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset + + COCO data format description can be found here: + http://cocodataset.org/#format-data + + Args: + dataset_name (str): + name of the source dataset + Must be registered in DatastCatalog and in detectron2's standard format. + Must have corresponding metadata "thing_classes" + Returns: + coco_dict: serializable dict in COCO json format + """ + + dataset_dicts = DatasetCatalog.get(dataset_name) + metadata = MetadataCatalog.get(dataset_name) + + # unmap the category mapping ids for COCO + if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} + reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa + else: + reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa + + categories = [ + {"id": reverse_id_mapper(id), "name": name} + for id, name in enumerate(metadata.thing_classes) + ] + + logger.info("Converting dataset dicts into COCO format") + coco_images = [] + coco_annotations = [] + + for image_id, image_dict in enumerate(dataset_dicts): + coco_image = { + "id": image_dict.get("image_id", image_id), + "width": int(image_dict["width"]), + "height": int(image_dict["height"]), + "file_name": str(image_dict["file_name"]), + } + coco_images.append(coco_image) + + anns_per_image = image_dict.get("annotations", []) + for annotation in anns_per_image: + # create a new dict with only COCO fields + coco_annotation = {} + + # COCO requirement: XYWH box format for axis-align and XYWHA for rotated + bbox = annotation["bbox"] + if isinstance(bbox, np.ndarray): + if bbox.ndim != 1: + raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") + bbox = bbox.tolist() + if len(bbox) not in [4, 5]: + raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") + from_bbox_mode = annotation["bbox_mode"] + to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS + bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) + + # COCO requirement: instance area + if "segmentation" in annotation: + # Computing areas for instances by counting the pixels + segmentation = annotation["segmentation"] + # TODO: check segmentation type: RLE, BinaryMask or Polygon + if isinstance(segmentation, list): + polygons = PolygonMasks([segmentation]) + area = polygons.area()[0].item() + elif isinstance(segmentation, dict): # RLE + area = mask_util.area(segmentation).item() + else: + raise TypeError(f"Unknown segmentation type {type(segmentation)}!") + else: + # Computing areas using bounding boxes + if to_bbox_mode == BoxMode.XYWH_ABS: + bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) + area = Boxes([bbox_xy]).area()[0].item() + else: + area = RotatedBoxes([bbox]).area()[0].item() + + if "keypoints" in annotation: + keypoints = annotation["keypoints"] # list[int] + for idx, v in enumerate(keypoints): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # For COCO format consistency we substract 0.5 + # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163 + keypoints[idx] = v - 0.5 + if "num_keypoints" in annotation: + num_keypoints = annotation["num_keypoints"] + else: + num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) + + # COCO requirement: + # linking annotations to images + # "id" field must start with 1 + coco_annotation["id"] = len(coco_annotations) + 1 + coco_annotation["image_id"] = coco_image["id"] + coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] + coco_annotation["area"] = float(area) + coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) + coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) + + # Add optional fields + if "keypoints" in annotation: + coco_annotation["keypoints"] = keypoints + coco_annotation["num_keypoints"] = num_keypoints + + if "segmentation" in annotation: + seg = coco_annotation["segmentation"] = annotation["segmentation"] + if isinstance(seg, dict): # RLE + counts = seg["counts"] + if not isinstance(counts, str): + # make it json-serializable + seg["counts"] = counts.decode("ascii") + + coco_annotations.append(coco_annotation) + + logger.info( + "Conversion finished, " + f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" + ) + + info = { + "date_created": str(datetime.datetime.now()), + "description": "Automatically generated COCO json file for Detectron2.", + } + coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None} + if len(coco_annotations) > 0: + coco_dict["annotations"] = coco_annotations + return coco_dict + + +def convert_to_coco_json(dataset_name, output_file, allow_cached=True): + """ + Converts dataset into COCO format and saves it to a json file. + dataset_name must be registered in DatasetCatalog and in detectron2's standard format. + + Args: + dataset_name: + reference from the config file to the catalogs + must be registered in DatasetCatalog and in detectron2's standard format + output_file: path of json file that will be saved to + allow_cached: if json file is already present then skip conversion + """ + + # TODO: The dataset or the conversion script *may* change, + # a checksum would be useful for validating the cached data + + PathManager.mkdirs(os.path.dirname(output_file)) + with file_lock(output_file): + if PathManager.exists(output_file) and allow_cached: + logger.warning( + f"Using previously cached COCO format annotations at '{output_file}'. " + "You need to clear the cache file if your dataset has been modified." + ) + else: + logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") + coco_dict = convert_to_coco_dict(dataset_name) + + logger.info(f"Caching COCO format annotations at '{output_file}' ...") + tmp_file = output_file + ".tmp" + with PathManager.open(tmp_file, "w") as f: + json.dump(coco_dict, f) + shutil.move(tmp_file, output_file) + + +def register_coco_instances(name, metadata, json_file, image_root): + """ + Register a dataset in COCO's json annotation format for + instance detection, instance segmentation and keypoint detection. + (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. + `instances*.json` and `person_keypoints*.json` in the dataset). + + This is an example of how to register a new dataset. + You can do something similar to this function, to register new datasets. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + metadata (dict): extra metadata associated with this dataset. You can + leave it as an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str or path-like): directory which contains all the images. + """ + assert isinstance(name, str), name + assert isinstance(json_file, (str, os.PathLike)), json_file + assert isinstance(image_root, (str, os.PathLike)), image_root + # 1. register a function which returns dicts + DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) + + # 2. Optionally, add metadata about this dataset, + # since they might be useful in evaluation, visualization or logging + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata + ) + + +if __name__ == "__main__": + """ + Test the COCO json dataset loader. + + Usage: + python -m detectron2.data.datasets.coco \ + path/to/json path/to/image_root dataset_name + + "dataset_name" can be "coco_2014_minival_100", or other + pre-registered ones + """ + from custom_detectron2.utils.logger import setup_logger + from custom_detectron2.utils.visualizer import Visualizer + import custom_detectron2.data.datasets # noqa # add pre-defined metadata + import sys + + logger = setup_logger(name=__name__) + assert sys.argv[3] in DatasetCatalog.list() + meta = MetadataCatalog.get(sys.argv[3]) + + dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "coco-data-vis" + os.makedirs(dirname, exist_ok=True) + for d in dicts: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/coco_panoptic.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/coco_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..e194be84d25c20fa83e1f93a6bf81bd6fd991970 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/coco_panoptic.py @@ -0,0 +1,228 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import json +import os + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.utils.file_io import PathManager + +from .coco import load_coco_json, load_sem_seg + +__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"] + + +def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". + gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". + json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = True + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = False + return segment_info + + with PathManager.open(json_file) as f: + json_info = json.load(f) + + ret = [] + for ann in json_info["annotations"]: + image_id = int(ann["image_id"]) + # TODO: currently we assume image and label has the same filename but + # different extension, and images have extension ".jpg" for COCO. Need + # to make image extension a user-provided argument if we extend this + # function to support other COCO-like datasets. + image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") + label_file = os.path.join(gt_dir, ann["file_name"]) + segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] + ret.append( + { + "file_name": image_file, + "image_id": image_id, + "pan_seg_file_name": label_file, + "segments_info": segments_info, + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] + assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] + return ret + + +def register_coco_panoptic( + name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None +): + """ + Register a "standard" version of COCO panoptic segmentation dataset named `name`. + The dictionaries in this registered dataset follows detectron2's standard format. + Hence it's called "standard". + + Args: + name (str): the name that identifies a dataset, + e.g. "coco_2017_train_panoptic" + metadata (dict): extra metadata associated with this dataset. + image_root (str): directory which contains all the images + panoptic_root (str): directory which contains panoptic annotation images in COCO format + panoptic_json (str): path to the json panoptic annotation file in COCO format + sem_seg_root (none): not used, to be consistent with + `register_coco_panoptic_separated`. + instances_json (str): path to the json instance annotation file + """ + panoptic_name = name + DatasetCatalog.register( + panoptic_name, + lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata), + ) + MetadataCatalog.get(panoptic_name).set( + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + json_file=instances_json, + evaluator_type="coco_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **metadata, + ) + + +def register_coco_panoptic_separated( + name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json +): + """ + Register a "separated" version of COCO panoptic segmentation dataset named `name`. + The annotations in this registered dataset will contain both instance annotations and + semantic annotations, each with its own contiguous ids. Hence it's called "separated". + + It follows the setting used by the PanopticFPN paper: + + 1. The instance annotations directly come from polygons in the COCO + instances annotation task, rather than from the masks in the COCO panoptic annotations. + + The two format have small differences: + Polygons in the instance annotations may have overlaps. + The mask annotations are produced by labeling the overlapped polygons + with depth ordering. + + 2. The semantic annotations are converted from panoptic annotations, where + all "things" are assigned a semantic id of 0. + All semantic categories will therefore have ids in contiguous + range [1, #stuff_categories]. + + This function will also register a pure semantic segmentation dataset + named ``name + '_stuffonly'``. + + Args: + name (str): the name that identifies a dataset, + e.g. "coco_2017_train_panoptic" + metadata (dict): extra metadata associated with this dataset. + image_root (str): directory which contains all the images + panoptic_root (str): directory which contains panoptic annotation images + panoptic_json (str): path to the json panoptic annotation file + sem_seg_root (str): directory which contains all the ground truth segmentation annotations. + instances_json (str): path to the json instance annotation file + """ + panoptic_name = name + "_separated" + DatasetCatalog.register( + panoptic_name, + lambda: merge_to_panoptic( + load_coco_json(instances_json, image_root, panoptic_name), + load_sem_seg(sem_seg_root, image_root), + ), + ) + MetadataCatalog.get(panoptic_name).set( + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + sem_seg_root=sem_seg_root, + json_file=instances_json, # TODO rename + evaluator_type="coco_panoptic_seg", + ignore_label=255, + **metadata, + ) + + semantic_name = name + "_stuffonly" + DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root)) + MetadataCatalog.get(semantic_name).set( + sem_seg_root=sem_seg_root, + image_root=image_root, + evaluator_type="sem_seg", + ignore_label=255, + **metadata, + ) + + +def merge_to_panoptic(detection_dicts, sem_seg_dicts): + """ + Create dataset dicts for panoptic segmentation, by + merging two dicts using "file_name" field to match their entries. + + Args: + detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. + sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. + + Returns: + list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in + both detection_dicts and sem_seg_dicts that correspond to the same image. + The function assumes that the same key in different dicts has the same value. + """ + results = [] + sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} + assert len(sem_seg_file_to_entry) > 0 + + for det_dict in detection_dicts: + dic = copy.copy(det_dict) + dic.update(sem_seg_file_to_entry[dic["file_name"]]) + results.append(dic) + return results + + +if __name__ == "__main__": + """ + Test the COCO panoptic dataset loader. + + Usage: + python -m detectron2.data.datasets.coco_panoptic \ + path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10 + + "dataset_name" can be "coco_2017_train_panoptic", or other + pre-registered ones + """ + from custom_detectron2.utils.logger import setup_logger + from custom_detectron2.utils.visualizer import Visualizer + import custom_detectron2.data.datasets # noqa # add pre-defined metadata + import sys + from PIL import Image + import numpy as np + + logger = setup_logger(name=__name__) + assert sys.argv[4] in DatasetCatalog.list() + meta = MetadataCatalog.get(sys.argv[4]) + + dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict()) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "coco-data-vis" + os.makedirs(dirname, exist_ok=True) + num_imgs_to_vis = int(sys.argv[5]) + for i, d in enumerate(dicts): + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) + if i + 1 >= num_imgs_to_vis: + break diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis.py new file mode 100644 index 0000000000000000000000000000000000000000..337917c1e053c97163a300022702afb8ec2215c5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis.py @@ -0,0 +1,241 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import os +from fvcore.common.timer import Timer + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.structures import BoxMode +from custom_detectron2.utils.file_io import PathManager + +from .builtin_meta import _get_coco_instances_meta +from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES +from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES +from .lvis_v1_category_image_count import LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT + +""" +This file contains functions to parse LVIS-format annotations into dicts in the +"Detectron2 format". +""" + +logger = logging.getLogger(__name__) + +__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"] + + +def register_lvis_instances(name, metadata, json_file, image_root): + """ + Register a dataset in LVIS's json annotation format for instance detection and segmentation. + + Args: + name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". + metadata (dict): extra metadata associated with this dataset. It can be an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str or path-like): directory which contains all the images. + """ + DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name)) + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata + ) + + +def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): + """ + Load a json file in LVIS's annotation format. + + Args: + json_file (str): full path to the LVIS json annotation file. + image_root (str): the directory where the images in this json file exists. + dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train"). + If provided, this function will put "thing_classes" into the metadata + associated with this dataset. + extra_annotation_keys (list[str]): list of per-annotation keys that should also be + loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id", + "segmentation"). The values for these keys will be returned as-is. + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + + Notes: + 1. This function does not read the image files. + The results do not have the "image" field. + """ + from lvis import LVIS + + json_file = PathManager.get_local_path(json_file) + + timer = Timer() + lvis_api = LVIS(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + if dataset_name is not None: + meta = get_lvis_instances_meta(dataset_name) + MetadataCatalog.get(dataset_name).set(**meta) + + # sort indices for reproducible results + img_ids = sorted(lvis_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = lvis_api.load_imgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] + + # Sanity check that each annotation has a unique id + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + + logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)) + + if extra_annotation_keys: + logger.info( + "The following extra annotation keys will be loaded: {} ".format(extra_annotation_keys) + ) + else: + extra_annotation_keys = [] + + def get_file_name(img_root, img_dict): + # Determine the path including the split folder ("train2017", "val2017", "test2017") from + # the coco_url field. Example: + # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg' + split_folder, file_name = img_dict["coco_url"].split("/")[-2:] + return os.path.join(img_root + split_folder, file_name) + + dataset_dicts = [] + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + record["file_name"] = get_file_name(image_root, img_dict) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) + record["neg_category_ids"] = img_dict.get("neg_category_ids", []) + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + assert anno["image_id"] == image_id + obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} + # LVIS data loader can be used to load COCO dataset categories. In this case `meta` + # variable will have a field with COCO-specific category mapping. + if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta: + obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]] + else: + obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed + segm = anno["segmentation"] # list[list[float]] + # filter out invalid polygons (< 3 points) + valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + assert len(segm) == len( + valid_segm + ), "Annotation contains an invalid polygon with < 3 points" + assert len(segm) > 0 + obj["segmentation"] = segm + for extra_ann_key in extra_annotation_keys: + obj[extra_ann_key] = anno[extra_ann_key] + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + + return dataset_dicts + + +def get_lvis_instances_meta(dataset_name): + """ + Load LVIS metadata. + + Args: + dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5"). + + Returns: + dict: LVIS metadata with keys: thing_classes + """ + if "cocofied" in dataset_name: + return _get_coco_instances_meta() + if "v0.5" in dataset_name: + return _get_lvis_instances_meta_v0_5() + elif "v1" in dataset_name: + return _get_lvis_instances_meta_v1() + raise ValueError("No built-in metadata for dataset {}".format(dataset_name)) + + +def _get_lvis_instances_meta_v0_5(): + assert len(LVIS_V0_5_CATEGORIES) == 1230 + cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES] + assert min(cat_ids) == 1 and max(cat_ids) == len( + cat_ids + ), "Category ids are not in [1, #categories], as expected" + # Ensure that the category list is sorted by id + lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"]) + thing_classes = [k["synonyms"][0] for k in lvis_categories] + meta = {"thing_classes": thing_classes} + return meta + + +def _get_lvis_instances_meta_v1(): + assert len(LVIS_V1_CATEGORIES) == 1203 + cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES] + assert min(cat_ids) == 1 and max(cat_ids) == len( + cat_ids + ), "Category ids are not in [1, #categories], as expected" + # Ensure that the category list is sorted by id + lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"]) + thing_classes = [k["synonyms"][0] for k in lvis_categories] + meta = {"thing_classes": thing_classes, "class_image_count": LVIS_V1_CATEGORY_IMAGE_COUNT} + return meta + + +if __name__ == "__main__": + """ + Test the LVIS json dataset loader. + + Usage: + python -m detectron2.data.datasets.lvis \ + path/to/json path/to/image_root dataset_name vis_limit + """ + import sys + import numpy as np + from custom_detectron2.utils.logger import setup_logger + from PIL import Image + import custom_detectron2.data.datasets # noqa # add pre-defined metadata + from custom_detectron2.utils.visualizer import Visualizer + + logger = setup_logger(name=__name__) + meta = MetadataCatalog.get(sys.argv[3]) + + dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3]) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "lvis-data-vis" + os.makedirs(dirname, exist_ok=True) + for d in dicts[: int(sys.argv[4])]: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v0_5_categories.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v0_5_categories.py new file mode 100644 index 0000000000000000000000000000000000000000..d3dab6198da614937b08682f4c9edf52bdf1d236 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v0_5_categories.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Autogen with +# with open("lvis_v0.5_val.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["image_count"] +# del x["instance_count"] +# LVIS_CATEGORIES = repr(c) + " # noqa" + +# fmt: off +LVIS_CATEGORIES = [{'frequency': 'r', 'id': 1, 'synset': 'acorn.n.01', 'synonyms': ['acorn'], 'def': 'nut from an oak tree', 'name': 'acorn'}, {'frequency': 'c', 'id': 2, 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'id': 3, 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'id': 4, 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'c', 'id': 5, 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'id': 6, 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'r', 'id': 7, 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'id': 8, 'synset': 'almond.n.02', 'synonyms': ['almond'], 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'id': 9, 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'r', 'id': 10, 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'id': 11, 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'id': 12, 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'id': 13, 'synset': 'apple.n.01', 'synonyms': ['apple'], 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'id': 14, 'synset': 'apple_juice.n.01', 'synonyms': ['apple_juice'], 'def': 'the juice of apples', 'name': 'apple_juice'}, {'frequency': 'r', 'id': 15, 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'id': 16, 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'id': 17, 'synset': 'apron.n.01', 'synonyms': ['apron'], 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'id': 18, 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'c', 'id': 19, 'synset': 'armband.n.02', 'synonyms': ['armband'], 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'id': 20, 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'id': 21, 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'id': 22, 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'id': 23, 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'id': 24, 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'id': 25, 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'id': 26, 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'id': 27, 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'c', 'id': 28, 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'id': 29, 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'id': 30, 'synset': 'awning.n.01', 'synonyms': ['awning'], 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'id': 31, 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'f', 'id': 32, 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'id': 33, 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'id': 34, 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'id': 35, 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'id': 36, 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'id': 37, 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'id': 38, 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'id': 39, 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'id': 40, 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'id': 41, 'synset': 'ball.n.06', 'synonyms': ['ball'], 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'id': 42, 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'id': 43, 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'id': 44, 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'id': 45, 'synset': 'banana.n.02', 'synonyms': ['banana'], 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'r', 'id': 46, 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'id': 47, 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'c', 'id': 48, 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'id': 49, 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'id': 50, 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'id': 51, 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'id': 52, 'synset': 'barge.n.01', 'synonyms': ['barge'], 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'id': 53, 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'id': 54, 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'id': 55, 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'id': 56, 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'id': 57, 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'id': 58, 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'id': 59, 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'id': 60, 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'id': 61, 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'id': 62, 'synset': 'basket.n.03', 'synonyms': ['basketball_hoop'], 'def': 'metal hoop supporting a net through which players try to throw the basketball', 'name': 'basketball_hoop'}, {'frequency': 'c', 'id': 63, 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'id': 64, 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'r', 'id': 65, 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'id': 66, 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'id': 67, 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'id': 68, 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'id': 69, 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'id': 70, 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'id': 71, 'synset': 'battery.n.02', 'synonyms': ['battery'], 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'id': 72, 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'id': 73, 'synset': 'bead.n.01', 'synonyms': ['bead'], 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'r', 'id': 74, 'synset': 'beaker.n.01', 'synonyms': ['beaker'], 'def': 'a flatbottomed jar made of glass or plastic; used for chemistry', 'name': 'beaker'}, {'frequency': 'c', 'id': 75, 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'id': 76, 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'id': 77, 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'id': 78, 'synset': 'bear.n.01', 'synonyms': ['bear'], 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'id': 79, 'synset': 'bed.n.01', 'synonyms': ['bed'], 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'c', 'id': 80, 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'id': 81, 'synset': 'beef.n.01', 'synonyms': ['cow'], 'def': 'cattle that are reared for their meat', 'name': 'cow'}, {'frequency': 'c', 'id': 82, 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'id': 83, 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'id': 84, 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'id': 85, 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'id': 86, 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'id': 87, 'synset': 'bell.n.01', 'synonyms': ['bell'], 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'id': 88, 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'id': 89, 'synset': 'belt.n.02', 'synonyms': ['belt'], 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'id': 90, 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'id': 91, 'synset': 'bench.n.01', 'synonyms': ['bench'], 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'id': 92, 'synset': 'beret.n.01', 'synonyms': ['beret'], 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'id': 93, 'synset': 'bib.n.02', 'synonyms': ['bib'], 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'id': 94, 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'id': 95, 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'id': 96, 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'c', 'id': 97, 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'id': 98, 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'id': 99, 'synset': 'bird.n.01', 'synonyms': ['bird'], 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'r', 'id': 100, 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'r', 'id': 101, 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'id': 102, 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'id': 103, 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'id': 104, 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'id': 105, 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'id': 106, 'synset': 'biscuit.n.01', 'synonyms': ['biscuit_(bread)'], 'def': 'small round bread leavened with baking-powder or soda', 'name': 'biscuit_(bread)'}, {'frequency': 'r', 'id': 107, 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'id': 108, 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'id': 109, 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'id': 110, 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'id': 111, 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'id': 112, 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'id': 113, 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'c', 'id': 114, 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'c', 'id': 115, 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'id': 116, 'synset': 'boar.n.02', 'synonyms': ['boar'], 'def': 'an uncastrated male hog', 'name': 'boar'}, {'frequency': 'r', 'id': 117, 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'id': 118, 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'c', 'id': 119, 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'r', 'id': 120, 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'id': 121, 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'id': 122, 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'id': 123, 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'id': 124, 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'id': 125, 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'id': 126, 'synset': 'book.n.01', 'synonyms': ['book'], 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'r', 'id': 127, 'synset': 'book_bag.n.01', 'synonyms': ['book_bag'], 'def': 'a bag in which students carry their books', 'name': 'book_bag'}, {'frequency': 'c', 'id': 128, 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'id': 129, 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'id': 130, 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'id': 131, 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'id': 132, 'synset': 'boot.n.01', 'synonyms': ['boot'], 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'id': 133, 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'id': 134, 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'id': 135, 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'id': 136, 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'id': 137, 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'id': 138, 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'id': 139, 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'id': 140, 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'id': 141, 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'id': 142, 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'r', 'id': 143, 'synset': 'bowling_pin.n.01', 'synonyms': ['bowling_pin'], 'def': 'a club-shaped wooden object used in bowling', 'name': 'bowling_pin'}, {'frequency': 'r', 'id': 144, 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'id': 145, 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'id': 146, 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'id': 147, 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'id': 148, 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'id': 149, 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'r', 'id': 150, 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'c', 'id': 151, 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'id': 152, 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'c', 'id': 153, 'synset': 'bristle_brush.n.01', 'synonyms': ['bristle_brush'], 'def': 'a brush that is made with the short stiff hairs of an animal or plant', 'name': 'bristle_brush'}, {'frequency': 'f', 'id': 154, 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'id': 155, 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'id': 156, 'synset': 'broom.n.01', 'synonyms': ['broom'], 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'id': 157, 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'id': 158, 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'id': 159, 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'id': 160, 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'id': 161, 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'id': 162, 'synset': 'bull.n.11', 'synonyms': ['bull'], 'def': 'mature male cow', 'name': 'bull'}, {'frequency': 'r', 'id': 163, 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'id': 164, 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'id': 165, 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'id': 166, 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'id': 167, 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'id': 168, 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'r', 'id': 169, 'synset': 'bully_beef.n.01', 'synonyms': ['corned_beef', 'corn_beef'], 'def': 'beef cured or pickled in brine', 'name': 'corned_beef'}, {'frequency': 'f', 'id': 170, 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'id': 171, 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'id': 172, 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'id': 173, 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'id': 174, 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'id': 175, 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'c', 'id': 176, 'synset': 'butcher_knife.n.01', 'synonyms': ['butcher_knife'], 'def': 'a large sharp knife for cutting or trimming meat', 'name': 'butcher_knife'}, {'frequency': 'c', 'id': 177, 'synset': 'butter.n.01', 'synonyms': ['butter'], 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'id': 178, 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'id': 179, 'synset': 'button.n.01', 'synonyms': ['button'], 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'id': 180, 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'id': 181, 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'r', 'id': 182, 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'id': 183, 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'id': 184, 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'id': 185, 'synset': 'cake.n.03', 'synonyms': ['cake'], 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'id': 186, 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'id': 187, 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'id': 188, 'synset': 'calf.n.01', 'synonyms': ['calf'], 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'id': 189, 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'id': 190, 'synset': 'camel.n.01', 'synonyms': ['camel'], 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'id': 191, 'synset': 'camera.n.01', 'synonyms': ['camera'], 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'id': 192, 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'id': 193, 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'id': 194, 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'id': 195, 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'r', 'id': 196, 'synset': 'candelabrum.n.01', 'synonyms': ['candelabrum', 'candelabra'], 'def': 'branched candlestick; ornamental; has several lights', 'name': 'candelabrum'}, {'frequency': 'f', 'id': 197, 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'id': 198, 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'id': 199, 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'id': 200, 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'id': 201, 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'id': 202, 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'r', 'id': 203, 'synset': 'cannon.n.02', 'synonyms': ['cannon'], 'def': 'heavy gun fired from a tank', 'name': 'cannon'}, {'frequency': 'c', 'id': 204, 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'r', 'id': 205, 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'id': 206, 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'c', 'id': 207, 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'id': 208, 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'r', 'id': 209, 'synset': 'cape.n.02', 'synonyms': ['cape'], 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'id': 210, 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'id': 211, 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'id': 212, 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'def': 'a wheeled vehicle adapted to the rails of railroad', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'id': 213, 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'id': 214, 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'id': 215, 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'id': 216, 'synset': 'card.n.03', 'synonyms': ['card'], 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'r', 'id': 217, 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'id': 218, 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'id': 219, 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'id': 220, 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'id': 221, 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'c', 'id': 222, 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'id': 223, 'synset': 'cart.n.01', 'synonyms': ['cart'], 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'id': 224, 'synset': 'carton.n.02', 'synonyms': ['carton'], 'def': 'a box made of cardboard; opens by flaps on top', 'name': 'carton'}, {'frequency': 'c', 'id': 225, 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'id': 226, 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'id': 227, 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'id': 228, 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'id': 229, 'synset': 'cat.n.01', 'synonyms': ['cat'], 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'c', 'id': 230, 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'r', 'id': 231, 'synset': 'caviar.n.01', 'synonyms': ['caviar', 'caviare'], 'def': "salted roe of sturgeon or other large fish; usually served as an hors d'oeuvre", 'name': 'caviar'}, {'frequency': 'c', 'id': 232, 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'id': 233, 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'c', 'id': 234, 'synset': 'celery.n.01', 'synonyms': ['celery'], 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'id': 235, 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'id': 236, 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'id': 237, 'synset': 'chair.n.01', 'synonyms': ['chair'], 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'id': 238, 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'id': 239, 'synset': 'champagne.n.01', 'synonyms': ['champagne'], 'def': 'a white sparkling wine produced in Champagne or resembling that produced there', 'name': 'champagne'}, {'frequency': 'f', 'id': 240, 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'id': 241, 'synset': 'chap.n.04', 'synonyms': ['chap'], 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'id': 242, 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'id': 243, 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'id': 244, 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'id': 245, 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'r', 'id': 246, 'synset': 'chest_of_drawers.n.01', 'synonyms': ['chest_of_drawers_(furniture)', 'bureau_(furniture)', 'chest_(furniture)'], 'def': 'furniture with drawers for keeping clothes', 'name': 'chest_of_drawers_(furniture)'}, {'frequency': 'c', 'id': 247, 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'id': 248, 'synset': 'chicken_wire.n.01', 'synonyms': ['chicken_wire'], 'def': 'a galvanized wire network with a hexagonal mesh; used to build fences', 'name': 'chicken_wire'}, {'frequency': 'r', 'id': 249, 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'r', 'id': 250, 'synset': 'chihuahua.n.03', 'synonyms': ['Chihuahua'], 'def': 'an old breed of tiny short-haired dog with protruding eyes from Mexico', 'name': 'Chihuahua'}, {'frequency': 'r', 'id': 251, 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'id': 252, 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'id': 253, 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'id': 254, 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'id': 255, 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'id': 256, 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'id': 257, 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'id': 258, 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'id': 259, 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'id': 260, 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'def': 'necklace that fits tightly around the neck', 'name': 'choker'}, {'frequency': 'f', 'id': 261, 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'c', 'id': 262, 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'id': 263, 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'id': 264, 'synset': 'chute.n.02', 'synonyms': ['slide'], 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'id': 265, 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'id': 266, 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'c', 'id': 267, 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'id': 268, 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'id': 269, 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'id': 270, 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'r', 'id': 271, 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'id': 272, 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'id': 273, 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'id': 274, 'synset': 'clip.n.03', 'synonyms': ['clip'], 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'id': 275, 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'f', 'id': 276, 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'id': 277, 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'id': 278, 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'id': 279, 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'id': 280, 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'id': 281, 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'id': 282, 'synset': 'coat.n.01', 'synonyms': ['coat'], 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'id': 283, 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'r', 'id': 284, 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'id': 285, 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'c', 'id': 286, 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'r', 'id': 287, 'synset': 'coffee_filter.n.01', 'synonyms': ['coffee_filter'], 'def': 'filter (usually of paper) that passes the coffee and retains the coffee grounds', 'name': 'coffee_filter'}, {'frequency': 'f', 'id': 288, 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'id': 289, 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'id': 290, 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'id': 291, 'synset': 'coil.n.05', 'synonyms': ['coil'], 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'id': 292, 'synset': 'coin.n.01', 'synonyms': ['coin'], 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'r', 'id': 293, 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'id': 294, 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'id': 295, 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'id': 296, 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'id': 297, 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'id': 298, 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'f', 'id': 299, 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'r', 'id': 300, 'synset': 'concrete_mixer.n.01', 'synonyms': ['concrete_mixer', 'cement_mixer'], 'def': 'a machine with a large revolving drum in which cement/concrete is mixed', 'name': 'concrete_mixer'}, {'frequency': 'f', 'id': 301, 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'id': 302, 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'id': 303, 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'id': 304, 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'c', 'id': 305, 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'id': 306, 'synset': 'cookie_jar.n.01', 'synonyms': ['cookie_jar', 'cooky_jar'], 'def': 'a jar in which cookies are kept (and sometimes money is hidden)', 'name': 'cookie_jar'}, {'frequency': 'r', 'id': 307, 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'id': 308, 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'c', 'id': 309, 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'id': 310, 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'r', 'id': 311, 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'c', 'id': 312, 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'def': 'ears of corn that can be prepared and served for human food', 'name': 'edible_corn'}, {'frequency': 'r', 'id': 313, 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'id': 314, 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'id': 315, 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'id': 316, 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'r', 'id': 317, 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'r', 'id': 318, 'synset': 'cos.n.02', 'synonyms': ['romaine_lettuce'], 'def': 'lettuce with long dark-green leaves in a loosely packed elongated head', 'name': 'romaine_lettuce'}, {'frequency': 'c', 'id': 319, 'synset': 'costume.n.04', 'synonyms': ['costume'], 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'id': 320, 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'id': 321, 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'r', 'id': 322, 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'id': 323, 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'r', 'id': 324, 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'c', 'id': 325, 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'id': 326, 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'id': 327, 'synset': 'crate.n.01', 'synonyms': ['crate'], 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'r', 'id': 328, 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'id': 329, 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'r', 'id': 330, 'synset': 'credit_card.n.01', 'synonyms': ['credit_card', 'charge_card', 'debit_card'], 'def': 'a card, usually plastic, used to pay for goods and services', 'name': 'credit_card'}, {'frequency': 'c', 'id': 331, 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'id': 332, 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'id': 333, 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'def': 'an earthen jar (made of baked clay)', 'name': 'crock_pot'}, {'frequency': 'f', 'id': 334, 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'id': 335, 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'r', 'id': 336, 'synset': 'crow.n.01', 'synonyms': ['crow'], 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'c', 'id': 337, 'synset': 'crown.n.04', 'synonyms': ['crown'], 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'id': 338, 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'id': 339, 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'id': 340, 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'c', 'id': 341, 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'r', 'id': 342, 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'id': 343, 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'r', 'id': 344, 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'id': 345, 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'id': 346, 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'id': 347, 'synset': 'cup.n.01', 'synonyms': ['cup'], 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'id': 348, 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'def': 'a metal vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'c', 'id': 349, 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'id': 350, 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'id': 351, 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'id': 352, 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'id': 353, 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'id': 354, 'synset': 'custard.n.01', 'synonyms': ['custard'], 'def': 'sweetened mixture of milk and eggs baked or boiled or frozen', 'name': 'custard'}, {'frequency': 'c', 'id': 355, 'synset': 'cutter.n.06', 'synonyms': ['cutting_tool'], 'def': 'a cutting implement; a tool for cutting', 'name': 'cutting_tool'}, {'frequency': 'r', 'id': 356, 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'id': 357, 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'id': 358, 'synset': 'dachshund.n.01', 'synonyms': ['dachshund', 'dachsie', 'badger_dog'], 'def': 'small long-bodied short-legged breed of dog having a short sleek coat and long drooping ears', 'name': 'dachshund'}, {'frequency': 'r', 'id': 359, 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'id': 360, 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'id': 361, 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'id': 362, 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'id': 363, 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'id': 364, 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'id': 365, 'synset': 'desk.n.01', 'synonyms': ['desk'], 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'id': 366, 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'id': 367, 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'id': 368, 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'def': 'a daily written record of (usually personal) experiences and observations', 'name': 'diary'}, {'frequency': 'r', 'id': 369, 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'id': 370, 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'id': 371, 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'id': 372, 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'c', 'id': 373, 'synset': 'dish.n.01', 'synonyms': ['dish'], 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'id': 374, 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'id': 375, 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'def': 'a cloth for washing dishes', 'name': 'dishrag'}, {'frequency': 'c', 'id': 376, 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'id': 377, 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'id': 378, 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid'], 'def': 'a low-sudsing detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'r', 'id': 379, 'synset': 'diskette.n.01', 'synonyms': ['diskette', 'floppy', 'floppy_disk'], 'def': 'a small plastic magnetic disk enclosed in a stiff envelope used to store data', 'name': 'diskette'}, {'frequency': 'c', 'id': 380, 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'c', 'id': 381, 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'id': 382, 'synset': 'dog.n.01', 'synonyms': ['dog'], 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'id': 383, 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'c', 'id': 384, 'synset': 'doll.n.01', 'synonyms': ['doll'], 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'id': 385, 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'id': 386, 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'id': 387, 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'r', 'id': 388, 'synset': 'domino.n.03', 'synonyms': ['eye_mask'], 'def': 'a mask covering the upper part of the face but with holes for the eyes', 'name': 'eye_mask'}, {'frequency': 'r', 'id': 389, 'synset': 'doorbell.n.01', 'synonyms': ['doorbell', 'buzzer'], 'def': 'a button at an outer door that gives a ringing or buzzing signal when pushed', 'name': 'doorbell'}, {'frequency': 'f', 'id': 390, 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'id': 391, 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'id': 392, 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'id': 393, 'synset': 'dove.n.01', 'synonyms': ['dove'], 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'id': 394, 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'id': 395, 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'id': 396, 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'id': 397, 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'id': 398, 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'c', 'id': 399, 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'c', 'id': 400, 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'id': 401, 'synset': 'drill.n.01', 'synonyms': ['drill'], 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'id': 402, 'synset': 'drinking_fountain.n.01', 'synonyms': ['drinking_fountain'], 'def': 'a public fountain to provide a jet of drinking water', 'name': 'drinking_fountain'}, {'frequency': 'r', 'id': 403, 'synset': 'drone.n.04', 'synonyms': ['drone'], 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'id': 404, 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'id': 405, 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'id': 406, 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'id': 407, 'synset': 'duck.n.01', 'synonyms': ['duck'], 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'r', 'id': 408, 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'id': 409, 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'id': 410, 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'def': 'a large cylindrical bag of heavy cloth', 'name': 'duffel_bag'}, {'frequency': 'r', 'id': 411, 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'id': 412, 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'id': 413, 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'r', 'id': 414, 'synset': 'dutch_oven.n.02', 'synonyms': ['Dutch_oven'], 'def': 'iron or earthenware cooking pot; used for stews', 'name': 'Dutch_oven'}, {'frequency': 'c', 'id': 415, 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'id': 416, 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'id': 417, 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'id': 418, 'synset': 'earring.n.01', 'synonyms': ['earring'], 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'id': 419, 'synset': 'easel.n.01', 'synonyms': ['easel'], 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'id': 420, 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'id': 421, 'synset': 'eel.n.01', 'synonyms': ['eel'], 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'id': 422, 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'id': 423, 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'id': 424, 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'id': 425, 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'id': 426, 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'id': 427, 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'id': 428, 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'id': 429, 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'r', 'id': 430, 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'id': 431, 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'id': 432, 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'id': 433, 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'id': 434, 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'id': 435, 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'id': 436, 'synset': 'fan.n.01', 'synonyms': ['fan'], 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'id': 437, 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'id': 438, 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'id': 439, 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'id': 440, 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'r', 'id': 441, 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'id': 442, 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'id': 443, 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'id': 444, 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'id': 445, 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'id': 446, 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'id': 447, 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'c', 'id': 448, 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'c', 'id': 449, 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'id': 450, 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'id': 451, 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'id': 452, 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'c', 'id': 453, 'synset': 'fish.n.01', 'synonyms': ['fish'], 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'r', 'id': 454, 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'id': 455, 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'r', 'id': 456, 'synset': 'fishing_boat.n.01', 'synonyms': ['fishing_boat', 'fishing_vessel'], 'def': 'a vessel for fishing', 'name': 'fishing_boat'}, {'frequency': 'c', 'id': 457, 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'id': 458, 'synset': 'flag.n.01', 'synonyms': ['flag'], 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'id': 459, 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'id': 460, 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'id': 461, 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'r', 'id': 462, 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'id': 463, 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'id': 464, 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'id': 465, 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'id': 466, 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'id': 467, 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'id': 468, 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'r', 'id': 469, 'synset': 'foal.n.01', 'synonyms': ['foal'], 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'id': 470, 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'id': 471, 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'id': 472, 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'id': 473, 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'id': 474, 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'id': 475, 'synset': 'fork.n.01', 'synonyms': ['fork'], 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'r', 'id': 476, 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'r', 'id': 477, 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'r', 'id': 478, 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'id': 479, 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'def': 'anything that freshens', 'name': 'freshener'}, {'frequency': 'f', 'id': 480, 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'id': 481, 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'id': 482, 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'r', 'id': 483, 'synset': 'fruit_salad.n.01', 'synonyms': ['fruit_salad'], 'def': 'salad composed of fruits', 'name': 'fruit_salad'}, {'frequency': 'c', 'id': 484, 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'id': 485, 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'id': 486, 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'c', 'id': 487, 'synset': 'futon.n.01', 'synonyms': ['futon'], 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'id': 488, 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'id': 489, 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'id': 490, 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'id': 491, 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'id': 492, 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'id': 493, 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'id': 494, 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'id': 495, 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'r', 'id': 496, 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'id': 497, 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'id': 498, 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'c', 'id': 499, 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'id': 500, 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'id': 501, 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'id': 502, 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'id': 503, 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'id': 504, 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'id': 505, 'synset': 'globe.n.03', 'synonyms': ['globe'], 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'id': 506, 'synset': 'glove.n.02', 'synonyms': ['glove'], 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'id': 507, 'synset': 'goat.n.01', 'synonyms': ['goat'], 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'id': 508, 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'id': 509, 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'r', 'id': 510, 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'id': 511, 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'id': 512, 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'id': 513, 'synset': 'goose.n.01', 'synonyms': ['goose'], 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'id': 514, 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'id': 515, 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'r', 'id': 516, 'synset': 'gown.n.04', 'synonyms': ['surgical_gown', 'scrubs_(surgical_clothing)'], 'def': 'protective garment worn by surgeons during operations', 'name': 'surgical_gown'}, {'frequency': 'f', 'id': 517, 'synset': 'grape.n.01', 'synonyms': ['grape'], 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'r', 'id': 518, 'synset': 'grasshopper.n.01', 'synonyms': ['grasshopper'], 'def': 'plant-eating insect with hind legs adapted for leaping', 'name': 'grasshopper'}, {'frequency': 'c', 'id': 519, 'synset': 'grater.n.01', 'synonyms': ['grater'], 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'id': 520, 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'id': 521, 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'c', 'id': 522, 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'c', 'id': 523, 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'id': 524, 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'r', 'id': 525, 'synset': 'grillroom.n.01', 'synonyms': ['grillroom', 'grill_(restaurant)'], 'def': 'a restaurant where food is cooked on a grill', 'name': 'grillroom'}, {'frequency': 'r', 'id': 526, 'synset': 'grinder.n.04', 'synonyms': ['grinder_(tool)'], 'def': 'a machine tool that polishes metal', 'name': 'grinder_(tool)'}, {'frequency': 'r', 'id': 527, 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'id': 528, 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'id': 529, 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'r', 'id': 530, 'synset': 'guacamole.n.01', 'synonyms': ['guacamole'], 'def': 'a dip made of mashed avocado mixed with chopped onions and other seasonings', 'name': 'guacamole'}, {'frequency': 'f', 'id': 531, 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'id': 532, 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'id': 533, 'synset': 'gun.n.01', 'synonyms': ['gun'], 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'r', 'id': 534, 'synset': 'hair_spray.n.01', 'synonyms': ['hair_spray'], 'def': 'substance sprayed on the hair to hold it in place', 'name': 'hair_spray'}, {'frequency': 'c', 'id': 535, 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'id': 536, 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'id': 537, 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'f', 'id': 538, 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'id': 539, 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'id': 540, 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'r', 'id': 541, 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'id': 542, 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'r', 'id': 543, 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'c', 'id': 544, 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'id': 545, 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'id': 546, 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'id': 547, 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'id': 548, 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'id': 549, 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'id': 550, 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'id': 551, 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'id': 552, 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'id': 553, 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'id': 554, 'synset': 'hat.n.01', 'synonyms': ['hat'], 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'id': 555, 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'r', 'id': 556, 'synset': 'hatch.n.03', 'synonyms': ['hatch'], 'def': 'a movable barrier covering a hatchway', 'name': 'hatch'}, {'frequency': 'c', 'id': 557, 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'def': 'a garment that covers the head and face', 'name': 'veil'}, {'frequency': 'f', 'id': 558, 'synset': 'headband.n.01', 'synonyms': ['headband'], 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'id': 559, 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'id': 560, 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'id': 561, 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'id': 562, 'synset': 'headset.n.01', 'synonyms': ['headset'], 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'id': 563, 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'r', 'id': 564, 'synset': 'hearing_aid.n.02', 'synonyms': ['hearing_aid'], 'def': 'an acoustic device used to direct sound to the ear of a hearing-impaired person', 'name': 'hearing_aid'}, {'frequency': 'c', 'id': 565, 'synset': 'heart.n.02', 'synonyms': ['heart'], 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'id': 566, 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'id': 567, 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'id': 568, 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'id': 569, 'synset': 'heron.n.02', 'synonyms': ['heron'], 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'id': 570, 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'id': 571, 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'id': 572, 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'id': 573, 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'id': 574, 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'id': 575, 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'id': 576, 'synset': 'honey.n.01', 'synonyms': ['honey'], 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'id': 577, 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'id': 578, 'synset': 'hook.n.05', 'synonyms': ['hook'], 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'f', 'id': 579, 'synset': 'horse.n.01', 'synonyms': ['horse'], 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'id': 580, 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'id': 581, 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'id': 582, 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'id': 583, 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'id': 584, 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'id': 585, 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'r', 'id': 586, 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'id': 587, 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'c', 'id': 588, 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'id': 589, 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'id': 590, 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'id': 591, 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'id': 592, 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'id': 593, 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'r', 'id': 594, 'synset': 'ice_tea.n.01', 'synonyms': ['ice_tea', 'iced_tea'], 'def': 'strong tea served over ice', 'name': 'ice_tea'}, {'frequency': 'c', 'id': 595, 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'id': 596, 'synset': 'incense.n.01', 'synonyms': ['incense'], 'def': 'a substance that produces a fragrant odor when burned', 'name': 'incense'}, {'frequency': 'r', 'id': 597, 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'c', 'id': 598, 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'id': 599, 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'r', 'id': 600, 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'id': 601, 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'r', 'id': 602, 'synset': 'jam.n.01', 'synonyms': ['jam'], 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'id': 603, 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'id': 604, 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'id': 605, 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'id': 606, 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'id': 607, 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'c', 'id': 608, 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'id': 609, 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'r', 'id': 610, 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'id': 611, 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'id': 612, 'synset': 'keg.n.02', 'synonyms': ['keg'], 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'id': 613, 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'id': 614, 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'id': 615, 'synset': 'key.n.01', 'synonyms': ['key'], 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'id': 616, 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'r', 'id': 617, 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'id': 618, 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'id': 619, 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'c', 'id': 620, 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'id': 621, 'synset': 'kite.n.03', 'synonyms': ['kite'], 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'id': 622, 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'id': 623, 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'id': 624, 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'id': 625, 'synset': 'knife.n.01', 'synonyms': ['knife'], 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'id': 626, 'synset': 'knight.n.02', 'synonyms': ['knight_(chess_piece)', 'horse_(chess_piece)'], 'def': 'a chess game piece shaped to resemble the head of a horse', 'name': 'knight_(chess_piece)'}, {'frequency': 'r', 'id': 627, 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'id': 628, 'synset': 'knob.n.02', 'synonyms': ['knob'], 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'id': 629, 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'id': 630, 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'id': 631, 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'id': 632, 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'id': 633, 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'r', 'id': 634, 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'c', 'id': 635, 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'id': 636, 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'id': 637, 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'id': 638, 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'id': 639, 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'id': 640, 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'id': 641, 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'id': 642, 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'id': 643, 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'c', 'id': 644, 'synset': 'latch.n.02', 'synonyms': ['latch'], 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'id': 645, 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'id': 646, 'synset': 'leather.n.01', 'synonyms': ['leather'], 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'id': 647, 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'id': 648, 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'f', 'id': 649, 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'id': 650, 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'id': 651, 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'id': 652, 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'id': 653, 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'id': 654, 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'id': 655, 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'def': 'glass bulb or tube shaped electric device that emits light (DO NOT MARK LAMPS AS A WHOLE)', 'name': 'lightbulb'}, {'frequency': 'r', 'id': 656, 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'c', 'id': 657, 'synset': 'lime.n.06', 'synonyms': ['lime'], 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'id': 658, 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'r', 'id': 659, 'synset': 'linen.n.02', 'synonyms': ['linen_paper'], 'def': 'a high-quality paper made of linen fibers or with a linen finish', 'name': 'linen_paper'}, {'frequency': 'c', 'id': 660, 'synset': 'lion.n.01', 'synonyms': ['lion'], 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'id': 661, 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'c', 'id': 662, 'synset': 'lipstick.n.01', 'synonyms': ['lipstick', 'lip_rouge'], 'def': 'makeup that is used to color the lips', 'name': 'lipstick'}, {'frequency': 'r', 'id': 663, 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'def': 'an alcoholic beverage that is distilled rather than fermented', 'name': 'liquor'}, {'frequency': 'r', 'id': 664, 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'r', 'id': 665, 'synset': 'loafer.n.02', 'synonyms': ['Loafer_(type_of_shoe)'], 'def': 'a low leather step-in shoe', 'name': 'Loafer_(type_of_shoe)'}, {'frequency': 'f', 'id': 666, 'synset': 'log.n.01', 'synonyms': ['log'], 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'id': 667, 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'c', 'id': 668, 'synset': 'lotion.n.01', 'synonyms': ['lotion'], 'def': 'any of various cosmetic preparations that are applied to the skin', 'name': 'lotion'}, {'frequency': 'f', 'id': 669, 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'id': 670, 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'id': 671, 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'id': 672, 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'id': 673, 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'r', 'id': 674, 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'c', 'id': 675, 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'id': 676, 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'id': 677, 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'c', 'id': 678, 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'id': 679, 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'id': 680, 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'c', 'id': 681, 'synset': 'map.n.01', 'synonyms': ['map'], 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'c', 'id': 682, 'synset': 'marker.n.03', 'synonyms': ['marker'], 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'id': 683, 'synset': 'martini.n.01', 'synonyms': ['martini'], 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'id': 684, 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'id': 685, 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'id': 686, 'synset': 'masher.n.02', 'synonyms': ['masher'], 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'id': 687, 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'id': 688, 'synset': 'mast.n.01', 'synonyms': ['mast'], 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'id': 689, 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'id': 690, 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'id': 691, 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'id': 692, 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'id': 693, 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'id': 694, 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'id': 695, 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'r', 'id': 696, 'synset': 'melon.n.01', 'synonyms': ['melon'], 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'id': 697, 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'id': 698, 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'id': 699, 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'id': 700, 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'c', 'id': 701, 'synset': 'milk.n.01', 'synonyms': ['milk'], 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'f', 'id': 702, 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'id': 703, 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'id': 704, 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'id': 705, 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'id': 706, 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'id': 707, 'synset': 'money.n.03', 'synonyms': ['money'], 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'id': 708, 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'id': 709, 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'id': 710, 'synset': 'motor.n.01', 'synonyms': ['motor'], 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'id': 711, 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'id': 712, 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'r', 'id': 713, 'synset': 'motorboat.n.01', 'synonyms': ['motorboat', 'powerboat'], 'def': 'a boat propelled by an internal-combustion engine', 'name': 'motorboat'}, {'frequency': 'f', 'id': 714, 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'id': 715, 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'r', 'id': 716, 'synset': 'mouse.n.01', 'synonyms': ['mouse_(animal_rodent)'], 'def': 'a small rodent with pointed snouts and small ears on elongated bodies with slender usually hairless tails', 'name': 'mouse_(animal_rodent)'}, {'frequency': 'f', 'id': 717, 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'def': 'a computer input device that controls an on-screen pointer', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'id': 718, 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'id': 719, 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'id': 720, 'synset': 'mug.n.04', 'synonyms': ['mug'], 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'id': 721, 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'id': 722, 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'r', 'id': 723, 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'id': 724, 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'r', 'id': 725, 'synset': 'nameplate.n.01', 'synonyms': ['nameplate'], 'def': 'a plate bearing a name', 'name': 'nameplate'}, {'frequency': 'f', 'id': 726, 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'id': 727, 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'id': 728, 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'id': 729, 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'r', 'id': 730, 'synset': 'needle.n.03', 'synonyms': ['needle'], 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'id': 731, 'synset': 'nest.n.01', 'synonyms': ['nest'], 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'r', 'id': 732, 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'id': 733, 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'id': 734, 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'r', 'id': 735, 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'id': 736, 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'id': 737, 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'c', 'id': 738, 'synset': 'nut.n.03', 'synonyms': ['nut'], 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'id': 739, 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'c', 'id': 740, 'synset': 'oar.n.01', 'synonyms': ['oar'], 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'id': 741, 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'id': 742, 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'id': 743, 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'id': 744, 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'id': 745, 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'id': 746, 'synset': 'onion.n.01', 'synonyms': ['onion'], 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'id': 747, 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'id': 748, 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'r', 'id': 749, 'synset': 'oregano.n.01', 'synonyms': ['oregano', 'marjoram'], 'def': 'aromatic Eurasian perennial herb used in cooking and baking', 'name': 'oregano'}, {'frequency': 'c', 'id': 750, 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'c', 'id': 751, 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'def': 'thick cushion used as a seat', 'name': 'ottoman'}, {'frequency': 'c', 'id': 752, 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'id': 753, 'synset': 'owl.n.01', 'synonyms': ['owl'], 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'id': 754, 'synset': 'packet.n.03', 'synonyms': ['packet'], 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'id': 755, 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'id': 756, 'synset': 'pad.n.04', 'synonyms': ['pad'], 'def': 'a flat mass of soft material used for protection, stuffing, or comfort', 'name': 'pad'}, {'frequency': 'c', 'id': 757, 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'id': 758, 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'r', 'id': 759, 'synset': 'paintbox.n.01', 'synonyms': ['paintbox'], 'def': "a box containing a collection of cubes or tubes of artists' paint", 'name': 'paintbox'}, {'frequency': 'c', 'id': 760, 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'id': 761, 'synset': 'painting.n.01', 'synonyms': ['painting'], 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'c', 'id': 762, 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'id': 763, 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'id': 764, 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'id': 765, 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'id': 766, 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'id': 767, 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'id': 768, 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'r', 'id': 769, 'synset': 'paper_clip.n.01', 'synonyms': ['paperclip'], 'def': 'a wire or plastic clip for holding sheets of paper together', 'name': 'paperclip'}, {'frequency': 'f', 'id': 770, 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'id': 771, 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'id': 772, 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'id': 773, 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'id': 774, 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'r', 'id': 775, 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'id': 776, 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'r', 'id': 777, 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'r', 'id': 778, 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'id': 779, 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'id': 780, 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'id': 781, 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'id': 782, 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'r', 'id': 783, 'synset': 'passport.n.02', 'synonyms': ['passport'], 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'id': 784, 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'id': 785, 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'id': 786, 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'id': 787, 'synset': 'peach.n.03', 'synonyms': ['peach'], 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'id': 788, 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'c', 'id': 789, 'synset': 'pear.n.01', 'synonyms': ['pear'], 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'r', 'id': 790, 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'id': 791, 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'id': 792, 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'id': 793, 'synset': 'pen.n.01', 'synonyms': ['pen'], 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'c', 'id': 794, 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'id': 795, 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'id': 796, 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'id': 797, 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'id': 798, 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'id': 799, 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'id': 800, 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'c', 'id': 801, 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'id': 802, 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'id': 803, 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'id': 804, 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'id': 805, 'synset': 'person.n.01', 'synonyms': ['baby', 'child', 'boy', 'girl', 'man', 'woman', 'person', 'human'], 'def': 'a human being', 'name': 'baby'}, {'frequency': 'r', 'id': 806, 'synset': 'pet.n.01', 'synonyms': ['pet'], 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'r', 'id': 807, 'synset': 'petfood.n.01', 'synonyms': ['petfood', 'pet-food'], 'def': 'food prepared for animal pets', 'name': 'petfood'}, {'frequency': 'r', 'id': 808, 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'id': 809, 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'id': 810, 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'c', 'id': 811, 'synset': 'piano.n.01', 'synonyms': ['piano'], 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'id': 812, 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'id': 813, 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'id': 814, 'synset': 'pie.n.01', 'synonyms': ['pie'], 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'id': 815, 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'id': 816, 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'id': 817, 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'id': 818, 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'id': 819, 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'id': 820, 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'id': 821, 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'id': 822, 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'id': 823, 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'id': 824, 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'id': 825, 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'r', 'id': 826, 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'id': 827, 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'id': 828, 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'id': 829, 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'id': 830, 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'id': 831, 'synset': 'plate.n.04', 'synonyms': ['plate'], 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'id': 832, 'synset': 'platter.n.01', 'synonyms': ['platter'], 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'id': 833, 'synset': 'playing_card.n.01', 'synonyms': ['playing_card'], 'def': 'one of a pack of cards that are used to play card games', 'name': 'playing_card'}, {'frequency': 'r', 'id': 834, 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'id': 835, 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'id': 836, 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'id': 837, 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'id': 838, 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'id': 839, 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'id': 840, 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'r', 'id': 841, 'synset': 'police_van.n.01', 'synonyms': ['police_van', 'police_wagon', 'paddy_wagon', 'patrol_wagon'], 'def': 'van used by police to transport prisoners', 'name': 'police_van'}, {'frequency': 'f', 'id': 842, 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'id': 843, 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'id': 844, 'synset': 'pony.n.05', 'synonyms': ['pony'], 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'id': 845, 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'id': 846, 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'r', 'id': 847, 'synset': 'portrait.n.02', 'synonyms': ['portrait', 'portrayal'], 'def': 'any likeness of a person, in any medium', 'name': 'portrait'}, {'frequency': 'c', 'id': 848, 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'id': 849, 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'id': 850, 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'id': 851, 'synset': 'pot.n.01', 'synonyms': ['pot'], 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'id': 852, 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'id': 853, 'synset': 'potato.n.01', 'synonyms': ['potato'], 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'id': 854, 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'id': 855, 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'id': 856, 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'r', 'id': 857, 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'id': 858, 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'f', 'id': 859, 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'id': 860, 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'id': 861, 'synset': 'projector.n.02', 'synonyms': ['projector'], 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'id': 862, 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'id': 863, 'synset': 'prune.n.01', 'synonyms': ['prune'], 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'id': 864, 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'id': 865, 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'id': 866, 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'id': 867, 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'id': 868, 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'id': 869, 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'id': 870, 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'r', 'id': 871, 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'id': 872, 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'id': 873, 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'id': 874, 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'id': 875, 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'id': 876, 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'id': 877, 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'id': 878, 'synset': 'radar.n.01', 'synonyms': ['radar'], 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'c', 'id': 879, 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'id': 880, 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'id': 881, 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'id': 882, 'synset': 'raft.n.01', 'synonyms': ['raft'], 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'id': 883, 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'id': 884, 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'id': 885, 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'id': 886, 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'id': 887, 'synset': 'rat.n.01', 'synonyms': ['rat'], 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'id': 888, 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'id': 889, 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'id': 890, 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'def': 'car mirror that reflects the view out of the rear window', 'name': 'rearview_mirror'}, {'frequency': 'c', 'id': 891, 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'id': 892, 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'r', 'id': 893, 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'r', 'id': 894, 'synset': 'red_cabbage.n.02', 'synonyms': ['red_cabbage'], 'def': 'compact head of purplish-red leaves', 'name': 'red_cabbage'}, {'frequency': 'f', 'id': 895, 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'id': 896, 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'id': 897, 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'id': 898, 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'r', 'id': 899, 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'id': 900, 'synset': 'ring.n.08', 'synonyms': ['ring'], 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'id': 901, 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'id': 902, 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'id': 903, 'synset': 'robe.n.01', 'synonyms': ['robe'], 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'id': 904, 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'id': 905, 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'id': 906, 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'id': 907, 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'id': 908, 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'id': 909, 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'id': 910, 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'id': 911, 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'id': 912, 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'id': 913, 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'id': 914, 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'id': 915, 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'id': 916, 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'c', 'id': 917, 'synset': 'sail.n.01', 'synonyms': ['sail'], 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'c', 'id': 918, 'synset': 'salad.n.01', 'synonyms': ['salad'], 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'id': 919, 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'r', 'id': 920, 'synset': 'salami.n.01', 'synonyms': ['salami'], 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'r', 'id': 921, 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'id': 922, 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'r', 'id': 923, 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'id': 924, 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'id': 925, 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'id': 926, 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'id': 927, 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'id': 928, 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'id': 929, 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'id': 930, 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'id': 931, 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'id': 932, 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'id': 933, 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'id': 934, 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'id': 935, 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'id': 936, 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'id': 937, 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'c', 'id': 938, 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'c', 'id': 939, 'synset': 'scrambled_eggs.n.01', 'synonyms': ['scrambled_eggs'], 'def': 'eggs beaten and cooked to a soft firm consistency while stirring', 'name': 'scrambled_eggs'}, {'frequency': 'r', 'id': 940, 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'r', 'id': 941, 'synset': 'scratcher.n.03', 'synonyms': ['scratcher'], 'def': 'a device used for scratching', 'name': 'scratcher'}, {'frequency': 'c', 'id': 942, 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'c', 'id': 943, 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'id': 944, 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'r', 'id': 945, 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'r', 'id': 946, 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'id': 947, 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'id': 948, 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'r', 'id': 949, 'synset': 'seedling.n.01', 'synonyms': ['seedling'], 'def': 'young plant or tree grown from a seed', 'name': 'seedling'}, {'frequency': 'c', 'id': 950, 'synset': 'serving_dish.n.01', 'synonyms': ['serving_dish'], 'def': 'a dish used for serving food', 'name': 'serving_dish'}, {'frequency': 'r', 'id': 951, 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'r', 'id': 952, 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'id': 953, 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'r', 'id': 954, 'synset': 'shark.n.01', 'synonyms': ['shark'], 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'id': 955, 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'id': 956, 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'id': 957, 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'id': 958, 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'id': 959, 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'id': 960, 'synset': 'shears.n.01', 'synonyms': ['shears'], 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'id': 961, 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'id': 962, 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'id': 963, 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'r', 'id': 964, 'synset': 'shield.n.02', 'synonyms': ['shield'], 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'id': 965, 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'id': 966, 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'c', 'id': 967, 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'id': 968, 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'id': 969, 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'id': 970, 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'c', 'id': 971, 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'id': 972, 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'id': 973, 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'f', 'id': 974, 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'id': 975, 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'r', 'id': 976, 'synset': 'sieve.n.01', 'synonyms': ['sieve', 'screen_(sieve)'], 'def': 'a strainer for separating lumps from powdered material or grading particles', 'name': 'sieve'}, {'frequency': 'f', 'id': 977, 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'id': 978, 'synset': 'silo.n.01', 'synonyms': ['silo'], 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'id': 979, 'synset': 'sink.n.01', 'synonyms': ['sink'], 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'id': 980, 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'id': 981, 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'id': 982, 'synset': 'ski.n.01', 'synonyms': ['ski'], 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'id': 983, 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'id': 984, 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'id': 985, 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'id': 986, 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'c', 'id': 987, 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'id': 988, 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'id': 989, 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'id': 990, 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'id': 991, 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'id': 992, 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'id': 993, 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'id': 994, 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'id': 995, 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'id': 996, 'synset': 'soap.n.01', 'synonyms': ['soap'], 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'id': 997, 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'id': 998, 'synset': 'sock.n.01', 'synonyms': ['sock'], 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'r', 'id': 999, 'synset': 'soda_fountain.n.02', 'synonyms': ['soda_fountain'], 'def': 'an apparatus for dispensing soda water', 'name': 'soda_fountain'}, {'frequency': 'r', 'id': 1000, 'synset': 'soda_water.n.01', 'synonyms': ['carbonated_water', 'club_soda', 'seltzer', 'sparkling_water'], 'def': 'effervescent beverage artificially charged with carbon dioxide', 'name': 'carbonated_water'}, {'frequency': 'f', 'id': 1001, 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'id': 1002, 'synset': 'softball.n.01', 'synonyms': ['softball'], 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'id': 1003, 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'id': 1004, 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'c', 'id': 1005, 'synset': 'soup.n.01', 'synonyms': ['soup'], 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'id': 1006, 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'id': 1007, 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'id': 1008, 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'id': 1009, 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'id': 1010, 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'id': 1011, 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'id': 1012, 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'id': 1013, 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'id': 1014, 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'id': 1015, 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'r', 'id': 1016, 'synset': 'spider.n.01', 'synonyms': ['spider'], 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'c', 'id': 1017, 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'id': 1018, 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'id': 1019, 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'id': 1020, 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'id': 1021, 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'c', 'id': 1022, 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'r', 'id': 1023, 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'id': 1024, 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'id': 1025, 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'id': 1026, 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'r', 'id': 1027, 'synset': 'steamer.n.02', 'synonyms': ['steamer_(kitchen_appliance)'], 'def': 'a cooking utensil that can be used to cook food by steaming it', 'name': 'steamer_(kitchen_appliance)'}, {'frequency': 'f', 'id': 1028, 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'id': 1029, 'synset': 'stencil.n.01', 'synonyms': ['stencil'], 'def': 'a sheet of material (metal, plastic, etc.) that has been perforated with a pattern; ink or paint can pass through the perforations to create the printed pattern on the surface below', 'name': 'stencil'}, {'frequency': 'r', 'id': 1030, 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'id': 1031, 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'id': 1032, 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'id': 1033, 'synset': 'stew.n.02', 'synonyms': ['stew'], 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'id': 1034, 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'id': 1035, 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'c', 'id': 1036, 'synset': 'stocking.n.01', 'synonyms': ['stockings_(leg_wear)'], 'def': 'close-fitting hosiery to cover the foot and leg; come in matched pairs', 'name': 'stockings_(leg_wear)'}, {'frequency': 'f', 'id': 1037, 'synset': 'stool.n.01', 'synonyms': ['stool'], 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'id': 1038, 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'id': 1039, 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'id': 1040, 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'id': 1041, 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'id': 1042, 'synset': 'strap.n.01', 'synonyms': ['strap'], 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'id': 1043, 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'id': 1044, 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'id': 1045, 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'id': 1046, 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'id': 1047, 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'id': 1048, 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'def': 'a pointed tool for writing or drawing or engraving', 'name': 'stylus'}, {'frequency': 'r', 'id': 1049, 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'id': 1050, 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'id': 1051, 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'c', 'id': 1052, 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'id': 1053, 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'id': 1054, 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'id': 1055, 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'r', 'id': 1056, 'synset': 'sunscreen.n.01', 'synonyms': ['sunscreen', 'sunblock'], 'def': 'a cream spread on the skin; contains a chemical to filter out ultraviolet light and so protect from sunburn', 'name': 'sunscreen'}, {'frequency': 'f', 'id': 1057, 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'id': 1058, 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'id': 1059, 'synset': 'swab.n.02', 'synonyms': ['mop'], 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'id': 1060, 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'id': 1061, 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'id': 1062, 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'id': 1063, 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'id': 1064, 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'id': 1065, 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'id': 1066, 'synset': 'sword.n.01', 'synonyms': ['sword'], 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'id': 1067, 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'id': 1068, 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'id': 1069, 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'id': 1070, 'synset': 'table.n.02', 'synonyms': ['table'], 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'id': 1071, 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'id': 1072, 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'id': 1073, 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'id': 1074, 'synset': 'taco.n.02', 'synonyms': ['taco'], 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'id': 1075, 'synset': 'tag.n.02', 'synonyms': ['tag'], 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'id': 1076, 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'id': 1077, 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'id': 1078, 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'c', 'id': 1079, 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'id': 1080, 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'c', 'id': 1081, 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'id': 1082, 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'id': 1083, 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'id': 1084, 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'id': 1085, 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'id': 1086, 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'r', 'id': 1087, 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'id': 1088, 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'id': 1089, 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'c', 'id': 1090, 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'id': 1091, 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'id': 1092, 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'def': 'electronic device for communicating by voice over long distances', 'name': 'telephone'}, {'frequency': 'c', 'id': 1093, 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'id': 1094, 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'id': 1095, 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'id': 1096, 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'id': 1097, 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'id': 1098, 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'id': 1099, 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'id': 1100, 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'id': 1101, 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'id': 1102, 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'c', 'id': 1103, 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'id': 1104, 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'id': 1105, 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'id': 1106, 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'id': 1107, 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'id': 1108, 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'id': 1109, 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'id': 1110, 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'id': 1111, 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'r', 'id': 1112, 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'id': 1113, 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'id': 1114, 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'id': 1115, 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'c', 'id': 1116, 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'id': 1117, 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'id': 1118, 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'id': 1119, 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'c', 'id': 1120, 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'id': 1121, 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'id': 1122, 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'id': 1123, 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'c', 'id': 1124, 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'c', 'id': 1125, 'synset': 'top.n.09', 'synonyms': ['cover'], 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'id': 1126, 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'id': 1127, 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'id': 1128, 'synset': 'towel.n.01', 'synonyms': ['towel'], 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'id': 1129, 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'id': 1130, 'synset': 'toy.n.03', 'synonyms': ['toy'], 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'id': 1131, 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'id': 1132, 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'r', 'id': 1133, 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'c', 'id': 1134, 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'id': 1135, 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'id': 1136, 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'id': 1137, 'synset': 'tray.n.01', 'synonyms': ['tray'], 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'id': 1138, 'synset': 'tree_house.n.01', 'synonyms': ['tree_house'], 'def': '(NOT A TREE) a PLAYHOUSE built in the branches of a tree', 'name': 'tree_house'}, {'frequency': 'r', 'id': 1139, 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'id': 1140, 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'r', 'id': 1141, 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'c', 'id': 1142, 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'id': 1143, 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'id': 1144, 'synset': 'truck.n.01', 'synonyms': ['truck'], 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'id': 1145, 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'id': 1146, 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'id': 1147, 'synset': 'tub.n.02', 'synonyms': ['vat'], 'def': 'a large open vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'id': 1148, 'synset': 'turban.n.01', 'synonyms': ['turban'], 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'r', 'id': 1149, 'synset': 'turkey.n.01', 'synonyms': ['turkey_(bird)'], 'def': 'large gallinaceous bird with fan-shaped tail; widely domesticated for food', 'name': 'turkey_(bird)'}, {'frequency': 'c', 'id': 1150, 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'id': 1151, 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'id': 1152, 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'r', 'id': 1153, 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'r', 'id': 1154, 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'id': 1155, 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'c', 'id': 1156, 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'id': 1157, 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'c', 'id': 1158, 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'r', 'id': 1159, 'synset': 'urn.n.01', 'synonyms': ['urn'], 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'id': 1160, 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'c', 'id': 1161, 'synset': 'valve.n.03', 'synonyms': ['valve'], 'def': 'control consisting of a mechanical device for controlling the flow of a fluid', 'name': 'valve'}, {'frequency': 'f', 'id': 1162, 'synset': 'vase.n.01', 'synonyms': ['vase'], 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'id': 1163, 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'id': 1164, 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'c', 'id': 1165, 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'id': 1166, 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'id': 1167, 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'id': 1168, 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'r', 'id': 1169, 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'id': 1170, 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'id': 1171, 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'id': 1172, 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'id': 1173, 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'id': 1174, 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'id': 1175, 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'id': 1176, 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'id': 1177, 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'c', 'id': 1178, 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'id': 1179, 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'id': 1180, 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'id': 1181, 'synset': 'wasabi.n.02', 'synonyms': ['wasabi'], 'def': 'the thick green root of the wasabi plant that the Japanese use in cooking and that tastes like strong horseradish', 'name': 'wasabi'}, {'frequency': 'c', 'id': 1182, 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'id': 1183, 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'id': 1184, 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'id': 1185, 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'id': 1186, 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'id': 1187, 'synset': 'water_filter.n.01', 'synonyms': ['water_filter'], 'def': 'a filter to remove impurities from the water supply', 'name': 'water_filter'}, {'frequency': 'r', 'id': 1188, 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'r', 'id': 1189, 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'id': 1190, 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'id': 1191, 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'id': 1192, 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'id': 1193, 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'id': 1194, 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'c', 'id': 1195, 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'id': 1196, 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'id': 1197, 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'id': 1198, 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'id': 1199, 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'id': 1200, 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'id': 1201, 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'id': 1202, 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'id': 1203, 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'r', 'id': 1204, 'synset': 'whiskey.n.01', 'synonyms': ['whiskey'], 'def': 'a liquor made from fermented mash of grain', 'name': 'whiskey'}, {'frequency': 'r', 'id': 1205, 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'r', 'id': 1206, 'synset': 'wick.n.02', 'synonyms': ['wick'], 'def': 'a loosely woven cord in a candle or oil lamp that is lit on fire', 'name': 'wick'}, {'frequency': 'c', 'id': 1207, 'synset': 'wig.n.01', 'synonyms': ['wig'], 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'id': 1208, 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'id': 1209, 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'def': 'a mill that is powered by the wind', 'name': 'windmill'}, {'frequency': 'c', 'id': 1210, 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'id': 1211, 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'id': 1212, 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'id': 1213, 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'r', 'id': 1214, 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'id': 1215, 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'r', 'id': 1216, 'synset': 'wing_chair.n.01', 'synonyms': ['wing_chair'], 'def': 'easy chair having wings on each side of a high back', 'name': 'wing_chair'}, {'frequency': 'c', 'id': 1217, 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'id': 1218, 'synset': 'wok.n.01', 'synonyms': ['wok'], 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'id': 1219, 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'id': 1220, 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'id': 1221, 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'id': 1222, 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'c', 'id': 1223, 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'id': 1224, 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'r', 'id': 1225, 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'r', 'id': 1226, 'synset': 'yak.n.02', 'synonyms': ['yak'], 'def': 'large long-haired wild ox of Tibet often domesticated', 'name': 'yak'}, {'frequency': 'c', 'id': 1227, 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'r', 'id': 1228, 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'id': 1229, 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'id': 1230, 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa +# fmt: on diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v1_categories.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v1_categories.py new file mode 100644 index 0000000000000000000000000000000000000000..7374e6968bb006f5d8c49e75d9d3b31ea3d77d05 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v1_categories.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Autogen with +# with open("lvis_v1_val.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["image_count"] +# del x["instance_count"] +# LVIS_CATEGORIES = repr(c) + " # noqa" +# with open("/tmp/lvis_categories.py", "wt") as f: +# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}") +# Then paste the contents of that file below + +# fmt: off +LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa +# fmt: on diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v1_category_image_count.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v1_category_image_count.py new file mode 100644 index 0000000000000000000000000000000000000000..31bf0cfcd5096ab87835db86a28671d474514c40 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/lvis_v1_category_image_count.py @@ -0,0 +1,20 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Autogen with +# with open("lvis_v1_train.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["name"] +# del x["instance_count"] +# del x["def"] +# del x["synonyms"] +# del x["frequency"] +# del x["synset"] +# LVIS_CATEGORY_IMAGE_COUNT = repr(c) + " # noqa" +# with open("/tmp/lvis_category_image_count.py", "wt") as f: +# f.write(f"LVIS_CATEGORY_IMAGE_COUNT = {LVIS_CATEGORY_IMAGE_COUNT}") +# Then paste the contents of that file below + +# fmt: off +LVIS_CATEGORY_IMAGE_COUNT = [{'id': 1, 'image_count': 64}, {'id': 2, 'image_count': 364}, {'id': 3, 'image_count': 1911}, {'id': 4, 'image_count': 149}, {'id': 5, 'image_count': 29}, {'id': 6, 'image_count': 26}, {'id': 7, 'image_count': 59}, {'id': 8, 'image_count': 22}, {'id': 9, 'image_count': 12}, {'id': 10, 'image_count': 28}, {'id': 11, 'image_count': 505}, {'id': 12, 'image_count': 1207}, {'id': 13, 'image_count': 4}, {'id': 14, 'image_count': 10}, {'id': 15, 'image_count': 500}, {'id': 16, 'image_count': 33}, {'id': 17, 'image_count': 3}, {'id': 18, 'image_count': 44}, {'id': 19, 'image_count': 561}, {'id': 20, 'image_count': 8}, {'id': 21, 'image_count': 9}, {'id': 22, 'image_count': 33}, {'id': 23, 'image_count': 1883}, {'id': 24, 'image_count': 98}, {'id': 25, 'image_count': 70}, {'id': 26, 'image_count': 46}, {'id': 27, 'image_count': 117}, {'id': 28, 'image_count': 41}, {'id': 29, 'image_count': 1395}, {'id': 30, 'image_count': 7}, {'id': 31, 'image_count': 1}, {'id': 32, 'image_count': 314}, {'id': 33, 'image_count': 31}, {'id': 34, 'image_count': 1905}, {'id': 35, 'image_count': 1859}, {'id': 36, 'image_count': 1623}, {'id': 37, 'image_count': 47}, {'id': 38, 'image_count': 3}, {'id': 39, 'image_count': 3}, {'id': 40, 'image_count': 1}, {'id': 41, 'image_count': 305}, {'id': 42, 'image_count': 6}, {'id': 43, 'image_count': 210}, {'id': 44, 'image_count': 36}, {'id': 45, 'image_count': 1787}, {'id': 46, 'image_count': 17}, {'id': 47, 'image_count': 51}, {'id': 48, 'image_count': 138}, {'id': 49, 'image_count': 3}, {'id': 50, 'image_count': 1470}, {'id': 51, 'image_count': 3}, {'id': 52, 'image_count': 2}, {'id': 53, 'image_count': 186}, {'id': 54, 'image_count': 76}, {'id': 55, 'image_count': 26}, {'id': 56, 'image_count': 303}, {'id': 57, 'image_count': 738}, {'id': 58, 'image_count': 1799}, {'id': 59, 'image_count': 1934}, {'id': 60, 'image_count': 1609}, {'id': 61, 'image_count': 1622}, {'id': 62, 'image_count': 41}, {'id': 63, 'image_count': 4}, {'id': 64, 'image_count': 11}, {'id': 65, 'image_count': 270}, {'id': 66, 'image_count': 349}, {'id': 67, 'image_count': 42}, {'id': 68, 'image_count': 823}, {'id': 69, 'image_count': 6}, {'id': 70, 'image_count': 48}, {'id': 71, 'image_count': 3}, {'id': 72, 'image_count': 42}, {'id': 73, 'image_count': 24}, {'id': 74, 'image_count': 16}, {'id': 75, 'image_count': 605}, {'id': 76, 'image_count': 646}, {'id': 77, 'image_count': 1765}, {'id': 78, 'image_count': 2}, {'id': 79, 'image_count': 125}, {'id': 80, 'image_count': 1420}, {'id': 81, 'image_count': 140}, {'id': 82, 'image_count': 4}, {'id': 83, 'image_count': 322}, {'id': 84, 'image_count': 60}, {'id': 85, 'image_count': 2}, {'id': 86, 'image_count': 231}, {'id': 87, 'image_count': 333}, {'id': 88, 'image_count': 1941}, {'id': 89, 'image_count': 367}, {'id': 90, 'image_count': 1922}, {'id': 91, 'image_count': 18}, {'id': 92, 'image_count': 81}, {'id': 93, 'image_count': 1}, {'id': 94, 'image_count': 1852}, {'id': 95, 'image_count': 430}, {'id': 96, 'image_count': 247}, {'id': 97, 'image_count': 94}, {'id': 98, 'image_count': 21}, {'id': 99, 'image_count': 1821}, {'id': 100, 'image_count': 16}, {'id': 101, 'image_count': 12}, {'id': 102, 'image_count': 25}, {'id': 103, 'image_count': 41}, {'id': 104, 'image_count': 244}, {'id': 105, 'image_count': 7}, {'id': 106, 'image_count': 1}, {'id': 107, 'image_count': 40}, {'id': 108, 'image_count': 40}, {'id': 109, 'image_count': 104}, {'id': 110, 'image_count': 1671}, {'id': 111, 'image_count': 49}, {'id': 112, 'image_count': 243}, {'id': 113, 'image_count': 2}, {'id': 114, 'image_count': 242}, {'id': 115, 'image_count': 271}, {'id': 116, 'image_count': 104}, {'id': 117, 'image_count': 8}, {'id': 118, 'image_count': 1758}, {'id': 119, 'image_count': 1}, {'id': 120, 'image_count': 48}, {'id': 121, 'image_count': 14}, {'id': 122, 'image_count': 40}, {'id': 123, 'image_count': 1}, {'id': 124, 'image_count': 37}, {'id': 125, 'image_count': 1510}, {'id': 126, 'image_count': 6}, {'id': 127, 'image_count': 1903}, {'id': 128, 'image_count': 70}, {'id': 129, 'image_count': 86}, {'id': 130, 'image_count': 7}, {'id': 131, 'image_count': 5}, {'id': 132, 'image_count': 1406}, {'id': 133, 'image_count': 1901}, {'id': 134, 'image_count': 15}, {'id': 135, 'image_count': 28}, {'id': 136, 'image_count': 6}, {'id': 137, 'image_count': 494}, {'id': 138, 'image_count': 234}, {'id': 139, 'image_count': 1922}, {'id': 140, 'image_count': 1}, {'id': 141, 'image_count': 35}, {'id': 142, 'image_count': 5}, {'id': 143, 'image_count': 1828}, {'id': 144, 'image_count': 8}, {'id': 145, 'image_count': 63}, {'id': 146, 'image_count': 1668}, {'id': 147, 'image_count': 4}, {'id': 148, 'image_count': 95}, {'id': 149, 'image_count': 17}, {'id': 150, 'image_count': 1567}, {'id': 151, 'image_count': 2}, {'id': 152, 'image_count': 103}, {'id': 153, 'image_count': 50}, {'id': 154, 'image_count': 1309}, {'id': 155, 'image_count': 6}, {'id': 156, 'image_count': 92}, {'id': 157, 'image_count': 19}, {'id': 158, 'image_count': 37}, {'id': 159, 'image_count': 4}, {'id': 160, 'image_count': 709}, {'id': 161, 'image_count': 9}, {'id': 162, 'image_count': 82}, {'id': 163, 'image_count': 15}, {'id': 164, 'image_count': 3}, {'id': 165, 'image_count': 61}, {'id': 166, 'image_count': 51}, {'id': 167, 'image_count': 5}, {'id': 168, 'image_count': 13}, {'id': 169, 'image_count': 642}, {'id': 170, 'image_count': 24}, {'id': 171, 'image_count': 255}, {'id': 172, 'image_count': 9}, {'id': 173, 'image_count': 1808}, {'id': 174, 'image_count': 31}, {'id': 175, 'image_count': 158}, {'id': 176, 'image_count': 80}, {'id': 177, 'image_count': 1884}, {'id': 178, 'image_count': 158}, {'id': 179, 'image_count': 2}, {'id': 180, 'image_count': 12}, {'id': 181, 'image_count': 1659}, {'id': 182, 'image_count': 7}, {'id': 183, 'image_count': 834}, {'id': 184, 'image_count': 57}, {'id': 185, 'image_count': 174}, {'id': 186, 'image_count': 95}, {'id': 187, 'image_count': 27}, {'id': 188, 'image_count': 22}, {'id': 189, 'image_count': 1391}, {'id': 190, 'image_count': 90}, {'id': 191, 'image_count': 40}, {'id': 192, 'image_count': 445}, {'id': 193, 'image_count': 21}, {'id': 194, 'image_count': 1132}, {'id': 195, 'image_count': 177}, {'id': 196, 'image_count': 4}, {'id': 197, 'image_count': 17}, {'id': 198, 'image_count': 84}, {'id': 199, 'image_count': 55}, {'id': 200, 'image_count': 30}, {'id': 201, 'image_count': 25}, {'id': 202, 'image_count': 2}, {'id': 203, 'image_count': 125}, {'id': 204, 'image_count': 1135}, {'id': 205, 'image_count': 19}, {'id': 206, 'image_count': 72}, {'id': 207, 'image_count': 1926}, {'id': 208, 'image_count': 159}, {'id': 209, 'image_count': 7}, {'id': 210, 'image_count': 1}, {'id': 211, 'image_count': 13}, {'id': 212, 'image_count': 35}, {'id': 213, 'image_count': 18}, {'id': 214, 'image_count': 8}, {'id': 215, 'image_count': 6}, {'id': 216, 'image_count': 35}, {'id': 217, 'image_count': 1222}, {'id': 218, 'image_count': 103}, {'id': 219, 'image_count': 28}, {'id': 220, 'image_count': 63}, {'id': 221, 'image_count': 28}, {'id': 222, 'image_count': 5}, {'id': 223, 'image_count': 7}, {'id': 224, 'image_count': 14}, {'id': 225, 'image_count': 1918}, {'id': 226, 'image_count': 133}, {'id': 227, 'image_count': 16}, {'id': 228, 'image_count': 27}, {'id': 229, 'image_count': 110}, {'id': 230, 'image_count': 1895}, {'id': 231, 'image_count': 4}, {'id': 232, 'image_count': 1927}, {'id': 233, 'image_count': 8}, {'id': 234, 'image_count': 1}, {'id': 235, 'image_count': 263}, {'id': 236, 'image_count': 10}, {'id': 237, 'image_count': 2}, {'id': 238, 'image_count': 3}, {'id': 239, 'image_count': 87}, {'id': 240, 'image_count': 9}, {'id': 241, 'image_count': 71}, {'id': 242, 'image_count': 13}, {'id': 243, 'image_count': 18}, {'id': 244, 'image_count': 2}, {'id': 245, 'image_count': 5}, {'id': 246, 'image_count': 45}, {'id': 247, 'image_count': 1}, {'id': 248, 'image_count': 23}, {'id': 249, 'image_count': 32}, {'id': 250, 'image_count': 4}, {'id': 251, 'image_count': 1}, {'id': 252, 'image_count': 858}, {'id': 253, 'image_count': 661}, {'id': 254, 'image_count': 168}, {'id': 255, 'image_count': 210}, {'id': 256, 'image_count': 65}, {'id': 257, 'image_count': 4}, {'id': 258, 'image_count': 2}, {'id': 259, 'image_count': 159}, {'id': 260, 'image_count': 31}, {'id': 261, 'image_count': 811}, {'id': 262, 'image_count': 1}, {'id': 263, 'image_count': 42}, {'id': 264, 'image_count': 27}, {'id': 265, 'image_count': 2}, {'id': 266, 'image_count': 5}, {'id': 267, 'image_count': 95}, {'id': 268, 'image_count': 32}, {'id': 269, 'image_count': 1}, {'id': 270, 'image_count': 1}, {'id': 271, 'image_count': 1844}, {'id': 272, 'image_count': 897}, {'id': 273, 'image_count': 31}, {'id': 274, 'image_count': 23}, {'id': 275, 'image_count': 1}, {'id': 276, 'image_count': 202}, {'id': 277, 'image_count': 746}, {'id': 278, 'image_count': 44}, {'id': 279, 'image_count': 14}, {'id': 280, 'image_count': 26}, {'id': 281, 'image_count': 1}, {'id': 282, 'image_count': 2}, {'id': 283, 'image_count': 25}, {'id': 284, 'image_count': 238}, {'id': 285, 'image_count': 592}, {'id': 286, 'image_count': 26}, {'id': 287, 'image_count': 5}, {'id': 288, 'image_count': 42}, {'id': 289, 'image_count': 13}, {'id': 290, 'image_count': 46}, {'id': 291, 'image_count': 1}, {'id': 292, 'image_count': 8}, {'id': 293, 'image_count': 34}, {'id': 294, 'image_count': 5}, {'id': 295, 'image_count': 1}, {'id': 296, 'image_count': 1871}, {'id': 297, 'image_count': 717}, {'id': 298, 'image_count': 1010}, {'id': 299, 'image_count': 679}, {'id': 300, 'image_count': 3}, {'id': 301, 'image_count': 4}, {'id': 302, 'image_count': 1}, {'id': 303, 'image_count': 166}, {'id': 304, 'image_count': 2}, {'id': 305, 'image_count': 266}, {'id': 306, 'image_count': 101}, {'id': 307, 'image_count': 6}, {'id': 308, 'image_count': 14}, {'id': 309, 'image_count': 133}, {'id': 310, 'image_count': 2}, {'id': 311, 'image_count': 38}, {'id': 312, 'image_count': 95}, {'id': 313, 'image_count': 1}, {'id': 314, 'image_count': 12}, {'id': 315, 'image_count': 49}, {'id': 316, 'image_count': 5}, {'id': 317, 'image_count': 5}, {'id': 318, 'image_count': 16}, {'id': 319, 'image_count': 216}, {'id': 320, 'image_count': 12}, {'id': 321, 'image_count': 1}, {'id': 322, 'image_count': 54}, {'id': 323, 'image_count': 5}, {'id': 324, 'image_count': 245}, {'id': 325, 'image_count': 12}, {'id': 326, 'image_count': 7}, {'id': 327, 'image_count': 35}, {'id': 328, 'image_count': 36}, {'id': 329, 'image_count': 32}, {'id': 330, 'image_count': 1027}, {'id': 331, 'image_count': 10}, {'id': 332, 'image_count': 12}, {'id': 333, 'image_count': 1}, {'id': 334, 'image_count': 67}, {'id': 335, 'image_count': 71}, {'id': 336, 'image_count': 30}, {'id': 337, 'image_count': 48}, {'id': 338, 'image_count': 249}, {'id': 339, 'image_count': 13}, {'id': 340, 'image_count': 29}, {'id': 341, 'image_count': 14}, {'id': 342, 'image_count': 236}, {'id': 343, 'image_count': 15}, {'id': 344, 'image_count': 1521}, {'id': 345, 'image_count': 25}, {'id': 346, 'image_count': 249}, {'id': 347, 'image_count': 139}, {'id': 348, 'image_count': 2}, {'id': 349, 'image_count': 2}, {'id': 350, 'image_count': 1890}, {'id': 351, 'image_count': 1240}, {'id': 352, 'image_count': 1}, {'id': 353, 'image_count': 9}, {'id': 354, 'image_count': 1}, {'id': 355, 'image_count': 3}, {'id': 356, 'image_count': 11}, {'id': 357, 'image_count': 4}, {'id': 358, 'image_count': 236}, {'id': 359, 'image_count': 44}, {'id': 360, 'image_count': 19}, {'id': 361, 'image_count': 1100}, {'id': 362, 'image_count': 7}, {'id': 363, 'image_count': 69}, {'id': 364, 'image_count': 2}, {'id': 365, 'image_count': 8}, {'id': 366, 'image_count': 5}, {'id': 367, 'image_count': 227}, {'id': 368, 'image_count': 6}, {'id': 369, 'image_count': 106}, {'id': 370, 'image_count': 81}, {'id': 371, 'image_count': 17}, {'id': 372, 'image_count': 134}, {'id': 373, 'image_count': 312}, {'id': 374, 'image_count': 8}, {'id': 375, 'image_count': 271}, {'id': 376, 'image_count': 2}, {'id': 377, 'image_count': 103}, {'id': 378, 'image_count': 1938}, {'id': 379, 'image_count': 574}, {'id': 380, 'image_count': 120}, {'id': 381, 'image_count': 2}, {'id': 382, 'image_count': 2}, {'id': 383, 'image_count': 13}, {'id': 384, 'image_count': 29}, {'id': 385, 'image_count': 1710}, {'id': 386, 'image_count': 66}, {'id': 387, 'image_count': 1008}, {'id': 388, 'image_count': 1}, {'id': 389, 'image_count': 3}, {'id': 390, 'image_count': 1942}, {'id': 391, 'image_count': 19}, {'id': 392, 'image_count': 1488}, {'id': 393, 'image_count': 46}, {'id': 394, 'image_count': 106}, {'id': 395, 'image_count': 115}, {'id': 396, 'image_count': 19}, {'id': 397, 'image_count': 2}, {'id': 398, 'image_count': 1}, {'id': 399, 'image_count': 28}, {'id': 400, 'image_count': 9}, {'id': 401, 'image_count': 192}, {'id': 402, 'image_count': 12}, {'id': 403, 'image_count': 21}, {'id': 404, 'image_count': 247}, {'id': 405, 'image_count': 6}, {'id': 406, 'image_count': 64}, {'id': 407, 'image_count': 7}, {'id': 408, 'image_count': 40}, {'id': 409, 'image_count': 542}, {'id': 410, 'image_count': 2}, {'id': 411, 'image_count': 1898}, {'id': 412, 'image_count': 36}, {'id': 413, 'image_count': 4}, {'id': 414, 'image_count': 1}, {'id': 415, 'image_count': 191}, {'id': 416, 'image_count': 6}, {'id': 417, 'image_count': 41}, {'id': 418, 'image_count': 39}, {'id': 419, 'image_count': 46}, {'id': 420, 'image_count': 1}, {'id': 421, 'image_count': 1451}, {'id': 422, 'image_count': 1878}, {'id': 423, 'image_count': 11}, {'id': 424, 'image_count': 82}, {'id': 425, 'image_count': 18}, {'id': 426, 'image_count': 1}, {'id': 427, 'image_count': 7}, {'id': 428, 'image_count': 3}, {'id': 429, 'image_count': 575}, {'id': 430, 'image_count': 1907}, {'id': 431, 'image_count': 8}, {'id': 432, 'image_count': 4}, {'id': 433, 'image_count': 32}, {'id': 434, 'image_count': 11}, {'id': 435, 'image_count': 4}, {'id': 436, 'image_count': 54}, {'id': 437, 'image_count': 202}, {'id': 438, 'image_count': 32}, {'id': 439, 'image_count': 3}, {'id': 440, 'image_count': 130}, {'id': 441, 'image_count': 119}, {'id': 442, 'image_count': 141}, {'id': 443, 'image_count': 29}, {'id': 444, 'image_count': 525}, {'id': 445, 'image_count': 1323}, {'id': 446, 'image_count': 2}, {'id': 447, 'image_count': 113}, {'id': 448, 'image_count': 16}, {'id': 449, 'image_count': 7}, {'id': 450, 'image_count': 35}, {'id': 451, 'image_count': 1908}, {'id': 452, 'image_count': 353}, {'id': 453, 'image_count': 18}, {'id': 454, 'image_count': 14}, {'id': 455, 'image_count': 77}, {'id': 456, 'image_count': 8}, {'id': 457, 'image_count': 37}, {'id': 458, 'image_count': 1}, {'id': 459, 'image_count': 346}, {'id': 460, 'image_count': 19}, {'id': 461, 'image_count': 1779}, {'id': 462, 'image_count': 23}, {'id': 463, 'image_count': 25}, {'id': 464, 'image_count': 67}, {'id': 465, 'image_count': 19}, {'id': 466, 'image_count': 28}, {'id': 467, 'image_count': 4}, {'id': 468, 'image_count': 27}, {'id': 469, 'image_count': 1861}, {'id': 470, 'image_count': 11}, {'id': 471, 'image_count': 13}, {'id': 472, 'image_count': 13}, {'id': 473, 'image_count': 32}, {'id': 474, 'image_count': 1767}, {'id': 475, 'image_count': 42}, {'id': 476, 'image_count': 17}, {'id': 477, 'image_count': 128}, {'id': 478, 'image_count': 1}, {'id': 479, 'image_count': 9}, {'id': 480, 'image_count': 10}, {'id': 481, 'image_count': 4}, {'id': 482, 'image_count': 9}, {'id': 483, 'image_count': 18}, {'id': 484, 'image_count': 41}, {'id': 485, 'image_count': 28}, {'id': 486, 'image_count': 3}, {'id': 487, 'image_count': 65}, {'id': 488, 'image_count': 9}, {'id': 489, 'image_count': 23}, {'id': 490, 'image_count': 24}, {'id': 491, 'image_count': 1}, {'id': 492, 'image_count': 2}, {'id': 493, 'image_count': 59}, {'id': 494, 'image_count': 48}, {'id': 495, 'image_count': 17}, {'id': 496, 'image_count': 1877}, {'id': 497, 'image_count': 18}, {'id': 498, 'image_count': 1920}, {'id': 499, 'image_count': 50}, {'id': 500, 'image_count': 1890}, {'id': 501, 'image_count': 99}, {'id': 502, 'image_count': 1530}, {'id': 503, 'image_count': 3}, {'id': 504, 'image_count': 11}, {'id': 505, 'image_count': 19}, {'id': 506, 'image_count': 3}, {'id': 507, 'image_count': 63}, {'id': 508, 'image_count': 5}, {'id': 509, 'image_count': 6}, {'id': 510, 'image_count': 233}, {'id': 511, 'image_count': 54}, {'id': 512, 'image_count': 36}, {'id': 513, 'image_count': 10}, {'id': 514, 'image_count': 124}, {'id': 515, 'image_count': 101}, {'id': 516, 'image_count': 3}, {'id': 517, 'image_count': 363}, {'id': 518, 'image_count': 3}, {'id': 519, 'image_count': 30}, {'id': 520, 'image_count': 18}, {'id': 521, 'image_count': 199}, {'id': 522, 'image_count': 97}, {'id': 523, 'image_count': 32}, {'id': 524, 'image_count': 121}, {'id': 525, 'image_count': 16}, {'id': 526, 'image_count': 12}, {'id': 527, 'image_count': 2}, {'id': 528, 'image_count': 214}, {'id': 529, 'image_count': 48}, {'id': 530, 'image_count': 26}, {'id': 531, 'image_count': 13}, {'id': 532, 'image_count': 4}, {'id': 533, 'image_count': 11}, {'id': 534, 'image_count': 123}, {'id': 535, 'image_count': 7}, {'id': 536, 'image_count': 200}, {'id': 537, 'image_count': 91}, {'id': 538, 'image_count': 9}, {'id': 539, 'image_count': 72}, {'id': 540, 'image_count': 1886}, {'id': 541, 'image_count': 4}, {'id': 542, 'image_count': 1}, {'id': 543, 'image_count': 1}, {'id': 544, 'image_count': 1932}, {'id': 545, 'image_count': 4}, {'id': 546, 'image_count': 56}, {'id': 547, 'image_count': 854}, {'id': 548, 'image_count': 755}, {'id': 549, 'image_count': 1843}, {'id': 550, 'image_count': 96}, {'id': 551, 'image_count': 7}, {'id': 552, 'image_count': 74}, {'id': 553, 'image_count': 66}, {'id': 554, 'image_count': 57}, {'id': 555, 'image_count': 44}, {'id': 556, 'image_count': 1905}, {'id': 557, 'image_count': 4}, {'id': 558, 'image_count': 90}, {'id': 559, 'image_count': 1635}, {'id': 560, 'image_count': 8}, {'id': 561, 'image_count': 5}, {'id': 562, 'image_count': 50}, {'id': 563, 'image_count': 545}, {'id': 564, 'image_count': 20}, {'id': 565, 'image_count': 193}, {'id': 566, 'image_count': 285}, {'id': 567, 'image_count': 3}, {'id': 568, 'image_count': 1}, {'id': 569, 'image_count': 1904}, {'id': 570, 'image_count': 294}, {'id': 571, 'image_count': 3}, {'id': 572, 'image_count': 5}, {'id': 573, 'image_count': 24}, {'id': 574, 'image_count': 2}, {'id': 575, 'image_count': 2}, {'id': 576, 'image_count': 16}, {'id': 577, 'image_count': 8}, {'id': 578, 'image_count': 154}, {'id': 579, 'image_count': 66}, {'id': 580, 'image_count': 1}, {'id': 581, 'image_count': 24}, {'id': 582, 'image_count': 1}, {'id': 583, 'image_count': 4}, {'id': 584, 'image_count': 75}, {'id': 585, 'image_count': 6}, {'id': 586, 'image_count': 126}, {'id': 587, 'image_count': 24}, {'id': 588, 'image_count': 22}, {'id': 589, 'image_count': 1872}, {'id': 590, 'image_count': 16}, {'id': 591, 'image_count': 423}, {'id': 592, 'image_count': 1927}, {'id': 593, 'image_count': 38}, {'id': 594, 'image_count': 3}, {'id': 595, 'image_count': 1945}, {'id': 596, 'image_count': 35}, {'id': 597, 'image_count': 1}, {'id': 598, 'image_count': 13}, {'id': 599, 'image_count': 9}, {'id': 600, 'image_count': 14}, {'id': 601, 'image_count': 37}, {'id': 602, 'image_count': 3}, {'id': 603, 'image_count': 4}, {'id': 604, 'image_count': 100}, {'id': 605, 'image_count': 195}, {'id': 606, 'image_count': 1}, {'id': 607, 'image_count': 12}, {'id': 608, 'image_count': 24}, {'id': 609, 'image_count': 489}, {'id': 610, 'image_count': 10}, {'id': 611, 'image_count': 1689}, {'id': 612, 'image_count': 42}, {'id': 613, 'image_count': 81}, {'id': 614, 'image_count': 894}, {'id': 615, 'image_count': 1868}, {'id': 616, 'image_count': 7}, {'id': 617, 'image_count': 1567}, {'id': 618, 'image_count': 10}, {'id': 619, 'image_count': 8}, {'id': 620, 'image_count': 7}, {'id': 621, 'image_count': 629}, {'id': 622, 'image_count': 89}, {'id': 623, 'image_count': 15}, {'id': 624, 'image_count': 134}, {'id': 625, 'image_count': 4}, {'id': 626, 'image_count': 1802}, {'id': 627, 'image_count': 595}, {'id': 628, 'image_count': 1210}, {'id': 629, 'image_count': 48}, {'id': 630, 'image_count': 418}, {'id': 631, 'image_count': 1846}, {'id': 632, 'image_count': 5}, {'id': 633, 'image_count': 221}, {'id': 634, 'image_count': 10}, {'id': 635, 'image_count': 7}, {'id': 636, 'image_count': 76}, {'id': 637, 'image_count': 22}, {'id': 638, 'image_count': 10}, {'id': 639, 'image_count': 341}, {'id': 640, 'image_count': 1}, {'id': 641, 'image_count': 705}, {'id': 642, 'image_count': 1900}, {'id': 643, 'image_count': 188}, {'id': 644, 'image_count': 227}, {'id': 645, 'image_count': 861}, {'id': 646, 'image_count': 6}, {'id': 647, 'image_count': 115}, {'id': 648, 'image_count': 5}, {'id': 649, 'image_count': 43}, {'id': 650, 'image_count': 14}, {'id': 651, 'image_count': 6}, {'id': 652, 'image_count': 15}, {'id': 653, 'image_count': 1167}, {'id': 654, 'image_count': 15}, {'id': 655, 'image_count': 994}, {'id': 656, 'image_count': 28}, {'id': 657, 'image_count': 2}, {'id': 658, 'image_count': 338}, {'id': 659, 'image_count': 334}, {'id': 660, 'image_count': 15}, {'id': 661, 'image_count': 102}, {'id': 662, 'image_count': 1}, {'id': 663, 'image_count': 8}, {'id': 664, 'image_count': 1}, {'id': 665, 'image_count': 1}, {'id': 666, 'image_count': 28}, {'id': 667, 'image_count': 91}, {'id': 668, 'image_count': 260}, {'id': 669, 'image_count': 131}, {'id': 670, 'image_count': 128}, {'id': 671, 'image_count': 3}, {'id': 672, 'image_count': 10}, {'id': 673, 'image_count': 39}, {'id': 674, 'image_count': 2}, {'id': 675, 'image_count': 925}, {'id': 676, 'image_count': 354}, {'id': 677, 'image_count': 31}, {'id': 678, 'image_count': 10}, {'id': 679, 'image_count': 215}, {'id': 680, 'image_count': 71}, {'id': 681, 'image_count': 43}, {'id': 682, 'image_count': 28}, {'id': 683, 'image_count': 34}, {'id': 684, 'image_count': 16}, {'id': 685, 'image_count': 273}, {'id': 686, 'image_count': 2}, {'id': 687, 'image_count': 999}, {'id': 688, 'image_count': 4}, {'id': 689, 'image_count': 107}, {'id': 690, 'image_count': 2}, {'id': 691, 'image_count': 1}, {'id': 692, 'image_count': 454}, {'id': 693, 'image_count': 9}, {'id': 694, 'image_count': 1901}, {'id': 695, 'image_count': 61}, {'id': 696, 'image_count': 91}, {'id': 697, 'image_count': 46}, {'id': 698, 'image_count': 1402}, {'id': 699, 'image_count': 74}, {'id': 700, 'image_count': 421}, {'id': 701, 'image_count': 226}, {'id': 702, 'image_count': 10}, {'id': 703, 'image_count': 1720}, {'id': 704, 'image_count': 261}, {'id': 705, 'image_count': 1337}, {'id': 706, 'image_count': 293}, {'id': 707, 'image_count': 62}, {'id': 708, 'image_count': 814}, {'id': 709, 'image_count': 407}, {'id': 710, 'image_count': 6}, {'id': 711, 'image_count': 16}, {'id': 712, 'image_count': 7}, {'id': 713, 'image_count': 1791}, {'id': 714, 'image_count': 2}, {'id': 715, 'image_count': 1915}, {'id': 716, 'image_count': 1940}, {'id': 717, 'image_count': 13}, {'id': 718, 'image_count': 16}, {'id': 719, 'image_count': 448}, {'id': 720, 'image_count': 12}, {'id': 721, 'image_count': 18}, {'id': 722, 'image_count': 4}, {'id': 723, 'image_count': 71}, {'id': 724, 'image_count': 189}, {'id': 725, 'image_count': 74}, {'id': 726, 'image_count': 103}, {'id': 727, 'image_count': 3}, {'id': 728, 'image_count': 110}, {'id': 729, 'image_count': 5}, {'id': 730, 'image_count': 9}, {'id': 731, 'image_count': 15}, {'id': 732, 'image_count': 25}, {'id': 733, 'image_count': 7}, {'id': 734, 'image_count': 647}, {'id': 735, 'image_count': 824}, {'id': 736, 'image_count': 100}, {'id': 737, 'image_count': 47}, {'id': 738, 'image_count': 121}, {'id': 739, 'image_count': 731}, {'id': 740, 'image_count': 73}, {'id': 741, 'image_count': 49}, {'id': 742, 'image_count': 23}, {'id': 743, 'image_count': 4}, {'id': 744, 'image_count': 62}, {'id': 745, 'image_count': 118}, {'id': 746, 'image_count': 99}, {'id': 747, 'image_count': 40}, {'id': 748, 'image_count': 1036}, {'id': 749, 'image_count': 105}, {'id': 750, 'image_count': 21}, {'id': 751, 'image_count': 229}, {'id': 752, 'image_count': 7}, {'id': 753, 'image_count': 72}, {'id': 754, 'image_count': 9}, {'id': 755, 'image_count': 10}, {'id': 756, 'image_count': 328}, {'id': 757, 'image_count': 468}, {'id': 758, 'image_count': 1}, {'id': 759, 'image_count': 2}, {'id': 760, 'image_count': 24}, {'id': 761, 'image_count': 11}, {'id': 762, 'image_count': 72}, {'id': 763, 'image_count': 17}, {'id': 764, 'image_count': 10}, {'id': 765, 'image_count': 17}, {'id': 766, 'image_count': 489}, {'id': 767, 'image_count': 47}, {'id': 768, 'image_count': 93}, {'id': 769, 'image_count': 1}, {'id': 770, 'image_count': 12}, {'id': 771, 'image_count': 228}, {'id': 772, 'image_count': 5}, {'id': 773, 'image_count': 76}, {'id': 774, 'image_count': 71}, {'id': 775, 'image_count': 30}, {'id': 776, 'image_count': 109}, {'id': 777, 'image_count': 14}, {'id': 778, 'image_count': 1}, {'id': 779, 'image_count': 8}, {'id': 780, 'image_count': 26}, {'id': 781, 'image_count': 339}, {'id': 782, 'image_count': 153}, {'id': 783, 'image_count': 2}, {'id': 784, 'image_count': 3}, {'id': 785, 'image_count': 8}, {'id': 786, 'image_count': 47}, {'id': 787, 'image_count': 8}, {'id': 788, 'image_count': 6}, {'id': 789, 'image_count': 116}, {'id': 790, 'image_count': 69}, {'id': 791, 'image_count': 13}, {'id': 792, 'image_count': 6}, {'id': 793, 'image_count': 1928}, {'id': 794, 'image_count': 79}, {'id': 795, 'image_count': 14}, {'id': 796, 'image_count': 7}, {'id': 797, 'image_count': 20}, {'id': 798, 'image_count': 114}, {'id': 799, 'image_count': 221}, {'id': 800, 'image_count': 502}, {'id': 801, 'image_count': 62}, {'id': 802, 'image_count': 87}, {'id': 803, 'image_count': 4}, {'id': 804, 'image_count': 1912}, {'id': 805, 'image_count': 7}, {'id': 806, 'image_count': 186}, {'id': 807, 'image_count': 18}, {'id': 808, 'image_count': 4}, {'id': 809, 'image_count': 3}, {'id': 810, 'image_count': 7}, {'id': 811, 'image_count': 1413}, {'id': 812, 'image_count': 7}, {'id': 813, 'image_count': 12}, {'id': 814, 'image_count': 248}, {'id': 815, 'image_count': 4}, {'id': 816, 'image_count': 1881}, {'id': 817, 'image_count': 529}, {'id': 818, 'image_count': 1932}, {'id': 819, 'image_count': 50}, {'id': 820, 'image_count': 3}, {'id': 821, 'image_count': 28}, {'id': 822, 'image_count': 10}, {'id': 823, 'image_count': 5}, {'id': 824, 'image_count': 5}, {'id': 825, 'image_count': 18}, {'id': 826, 'image_count': 14}, {'id': 827, 'image_count': 1890}, {'id': 828, 'image_count': 660}, {'id': 829, 'image_count': 8}, {'id': 830, 'image_count': 25}, {'id': 831, 'image_count': 10}, {'id': 832, 'image_count': 218}, {'id': 833, 'image_count': 36}, {'id': 834, 'image_count': 16}, {'id': 835, 'image_count': 808}, {'id': 836, 'image_count': 479}, {'id': 837, 'image_count': 1404}, {'id': 838, 'image_count': 307}, {'id': 839, 'image_count': 57}, {'id': 840, 'image_count': 28}, {'id': 841, 'image_count': 80}, {'id': 842, 'image_count': 11}, {'id': 843, 'image_count': 92}, {'id': 844, 'image_count': 20}, {'id': 845, 'image_count': 194}, {'id': 846, 'image_count': 23}, {'id': 847, 'image_count': 52}, {'id': 848, 'image_count': 673}, {'id': 849, 'image_count': 2}, {'id': 850, 'image_count': 2}, {'id': 851, 'image_count': 1}, {'id': 852, 'image_count': 2}, {'id': 853, 'image_count': 8}, {'id': 854, 'image_count': 80}, {'id': 855, 'image_count': 3}, {'id': 856, 'image_count': 3}, {'id': 857, 'image_count': 15}, {'id': 858, 'image_count': 2}, {'id': 859, 'image_count': 10}, {'id': 860, 'image_count': 386}, {'id': 861, 'image_count': 65}, {'id': 862, 'image_count': 3}, {'id': 863, 'image_count': 35}, {'id': 864, 'image_count': 5}, {'id': 865, 'image_count': 180}, {'id': 866, 'image_count': 99}, {'id': 867, 'image_count': 49}, {'id': 868, 'image_count': 28}, {'id': 869, 'image_count': 1}, {'id': 870, 'image_count': 52}, {'id': 871, 'image_count': 36}, {'id': 872, 'image_count': 70}, {'id': 873, 'image_count': 6}, {'id': 874, 'image_count': 29}, {'id': 875, 'image_count': 24}, {'id': 876, 'image_count': 1115}, {'id': 877, 'image_count': 61}, {'id': 878, 'image_count': 18}, {'id': 879, 'image_count': 18}, {'id': 880, 'image_count': 665}, {'id': 881, 'image_count': 1096}, {'id': 882, 'image_count': 29}, {'id': 883, 'image_count': 8}, {'id': 884, 'image_count': 14}, {'id': 885, 'image_count': 1622}, {'id': 886, 'image_count': 2}, {'id': 887, 'image_count': 3}, {'id': 888, 'image_count': 32}, {'id': 889, 'image_count': 55}, {'id': 890, 'image_count': 1}, {'id': 891, 'image_count': 10}, {'id': 892, 'image_count': 10}, {'id': 893, 'image_count': 47}, {'id': 894, 'image_count': 3}, {'id': 895, 'image_count': 29}, {'id': 896, 'image_count': 342}, {'id': 897, 'image_count': 25}, {'id': 898, 'image_count': 1469}, {'id': 899, 'image_count': 521}, {'id': 900, 'image_count': 347}, {'id': 901, 'image_count': 35}, {'id': 902, 'image_count': 7}, {'id': 903, 'image_count': 207}, {'id': 904, 'image_count': 108}, {'id': 905, 'image_count': 2}, {'id': 906, 'image_count': 34}, {'id': 907, 'image_count': 12}, {'id': 908, 'image_count': 10}, {'id': 909, 'image_count': 13}, {'id': 910, 'image_count': 361}, {'id': 911, 'image_count': 1023}, {'id': 912, 'image_count': 782}, {'id': 913, 'image_count': 2}, {'id': 914, 'image_count': 5}, {'id': 915, 'image_count': 247}, {'id': 916, 'image_count': 221}, {'id': 917, 'image_count': 4}, {'id': 918, 'image_count': 8}, {'id': 919, 'image_count': 158}, {'id': 920, 'image_count': 3}, {'id': 921, 'image_count': 752}, {'id': 922, 'image_count': 64}, {'id': 923, 'image_count': 707}, {'id': 924, 'image_count': 143}, {'id': 925, 'image_count': 1}, {'id': 926, 'image_count': 49}, {'id': 927, 'image_count': 126}, {'id': 928, 'image_count': 76}, {'id': 929, 'image_count': 11}, {'id': 930, 'image_count': 11}, {'id': 931, 'image_count': 4}, {'id': 932, 'image_count': 39}, {'id': 933, 'image_count': 11}, {'id': 934, 'image_count': 13}, {'id': 935, 'image_count': 91}, {'id': 936, 'image_count': 14}, {'id': 937, 'image_count': 5}, {'id': 938, 'image_count': 3}, {'id': 939, 'image_count': 10}, {'id': 940, 'image_count': 18}, {'id': 941, 'image_count': 9}, {'id': 942, 'image_count': 6}, {'id': 943, 'image_count': 951}, {'id': 944, 'image_count': 2}, {'id': 945, 'image_count': 1}, {'id': 946, 'image_count': 19}, {'id': 947, 'image_count': 1942}, {'id': 948, 'image_count': 1916}, {'id': 949, 'image_count': 139}, {'id': 950, 'image_count': 43}, {'id': 951, 'image_count': 1969}, {'id': 952, 'image_count': 5}, {'id': 953, 'image_count': 134}, {'id': 954, 'image_count': 74}, {'id': 955, 'image_count': 381}, {'id': 956, 'image_count': 1}, {'id': 957, 'image_count': 381}, {'id': 958, 'image_count': 6}, {'id': 959, 'image_count': 1826}, {'id': 960, 'image_count': 28}, {'id': 961, 'image_count': 1635}, {'id': 962, 'image_count': 1967}, {'id': 963, 'image_count': 16}, {'id': 964, 'image_count': 1926}, {'id': 965, 'image_count': 1789}, {'id': 966, 'image_count': 401}, {'id': 967, 'image_count': 1968}, {'id': 968, 'image_count': 1167}, {'id': 969, 'image_count': 1}, {'id': 970, 'image_count': 56}, {'id': 971, 'image_count': 17}, {'id': 972, 'image_count': 1}, {'id': 973, 'image_count': 58}, {'id': 974, 'image_count': 9}, {'id': 975, 'image_count': 8}, {'id': 976, 'image_count': 1124}, {'id': 977, 'image_count': 31}, {'id': 978, 'image_count': 16}, {'id': 979, 'image_count': 491}, {'id': 980, 'image_count': 432}, {'id': 981, 'image_count': 1945}, {'id': 982, 'image_count': 1899}, {'id': 983, 'image_count': 5}, {'id': 984, 'image_count': 28}, {'id': 985, 'image_count': 7}, {'id': 986, 'image_count': 146}, {'id': 987, 'image_count': 1}, {'id': 988, 'image_count': 25}, {'id': 989, 'image_count': 22}, {'id': 990, 'image_count': 1}, {'id': 991, 'image_count': 10}, {'id': 992, 'image_count': 9}, {'id': 993, 'image_count': 308}, {'id': 994, 'image_count': 4}, {'id': 995, 'image_count': 1969}, {'id': 996, 'image_count': 45}, {'id': 997, 'image_count': 12}, {'id': 998, 'image_count': 1}, {'id': 999, 'image_count': 85}, {'id': 1000, 'image_count': 1127}, {'id': 1001, 'image_count': 11}, {'id': 1002, 'image_count': 60}, {'id': 1003, 'image_count': 1}, {'id': 1004, 'image_count': 16}, {'id': 1005, 'image_count': 1}, {'id': 1006, 'image_count': 65}, {'id': 1007, 'image_count': 13}, {'id': 1008, 'image_count': 655}, {'id': 1009, 'image_count': 51}, {'id': 1010, 'image_count': 1}, {'id': 1011, 'image_count': 673}, {'id': 1012, 'image_count': 5}, {'id': 1013, 'image_count': 36}, {'id': 1014, 'image_count': 54}, {'id': 1015, 'image_count': 5}, {'id': 1016, 'image_count': 8}, {'id': 1017, 'image_count': 305}, {'id': 1018, 'image_count': 297}, {'id': 1019, 'image_count': 1053}, {'id': 1020, 'image_count': 223}, {'id': 1021, 'image_count': 1037}, {'id': 1022, 'image_count': 63}, {'id': 1023, 'image_count': 1881}, {'id': 1024, 'image_count': 507}, {'id': 1025, 'image_count': 333}, {'id': 1026, 'image_count': 1911}, {'id': 1027, 'image_count': 1765}, {'id': 1028, 'image_count': 1}, {'id': 1029, 'image_count': 5}, {'id': 1030, 'image_count': 1}, {'id': 1031, 'image_count': 9}, {'id': 1032, 'image_count': 2}, {'id': 1033, 'image_count': 151}, {'id': 1034, 'image_count': 82}, {'id': 1035, 'image_count': 1931}, {'id': 1036, 'image_count': 41}, {'id': 1037, 'image_count': 1895}, {'id': 1038, 'image_count': 24}, {'id': 1039, 'image_count': 22}, {'id': 1040, 'image_count': 35}, {'id': 1041, 'image_count': 69}, {'id': 1042, 'image_count': 962}, {'id': 1043, 'image_count': 588}, {'id': 1044, 'image_count': 21}, {'id': 1045, 'image_count': 825}, {'id': 1046, 'image_count': 52}, {'id': 1047, 'image_count': 5}, {'id': 1048, 'image_count': 5}, {'id': 1049, 'image_count': 5}, {'id': 1050, 'image_count': 1860}, {'id': 1051, 'image_count': 56}, {'id': 1052, 'image_count': 1582}, {'id': 1053, 'image_count': 7}, {'id': 1054, 'image_count': 2}, {'id': 1055, 'image_count': 1562}, {'id': 1056, 'image_count': 1885}, {'id': 1057, 'image_count': 1}, {'id': 1058, 'image_count': 5}, {'id': 1059, 'image_count': 137}, {'id': 1060, 'image_count': 1094}, {'id': 1061, 'image_count': 134}, {'id': 1062, 'image_count': 29}, {'id': 1063, 'image_count': 22}, {'id': 1064, 'image_count': 522}, {'id': 1065, 'image_count': 50}, {'id': 1066, 'image_count': 68}, {'id': 1067, 'image_count': 16}, {'id': 1068, 'image_count': 40}, {'id': 1069, 'image_count': 35}, {'id': 1070, 'image_count': 135}, {'id': 1071, 'image_count': 1413}, {'id': 1072, 'image_count': 772}, {'id': 1073, 'image_count': 50}, {'id': 1074, 'image_count': 1015}, {'id': 1075, 'image_count': 1}, {'id': 1076, 'image_count': 65}, {'id': 1077, 'image_count': 1900}, {'id': 1078, 'image_count': 1302}, {'id': 1079, 'image_count': 1977}, {'id': 1080, 'image_count': 2}, {'id': 1081, 'image_count': 29}, {'id': 1082, 'image_count': 36}, {'id': 1083, 'image_count': 138}, {'id': 1084, 'image_count': 4}, {'id': 1085, 'image_count': 67}, {'id': 1086, 'image_count': 26}, {'id': 1087, 'image_count': 25}, {'id': 1088, 'image_count': 33}, {'id': 1089, 'image_count': 37}, {'id': 1090, 'image_count': 50}, {'id': 1091, 'image_count': 270}, {'id': 1092, 'image_count': 12}, {'id': 1093, 'image_count': 316}, {'id': 1094, 'image_count': 41}, {'id': 1095, 'image_count': 224}, {'id': 1096, 'image_count': 105}, {'id': 1097, 'image_count': 1925}, {'id': 1098, 'image_count': 1021}, {'id': 1099, 'image_count': 1213}, {'id': 1100, 'image_count': 172}, {'id': 1101, 'image_count': 28}, {'id': 1102, 'image_count': 745}, {'id': 1103, 'image_count': 187}, {'id': 1104, 'image_count': 147}, {'id': 1105, 'image_count': 136}, {'id': 1106, 'image_count': 34}, {'id': 1107, 'image_count': 41}, {'id': 1108, 'image_count': 636}, {'id': 1109, 'image_count': 570}, {'id': 1110, 'image_count': 1149}, {'id': 1111, 'image_count': 61}, {'id': 1112, 'image_count': 1890}, {'id': 1113, 'image_count': 18}, {'id': 1114, 'image_count': 143}, {'id': 1115, 'image_count': 1517}, {'id': 1116, 'image_count': 7}, {'id': 1117, 'image_count': 943}, {'id': 1118, 'image_count': 6}, {'id': 1119, 'image_count': 1}, {'id': 1120, 'image_count': 11}, {'id': 1121, 'image_count': 101}, {'id': 1122, 'image_count': 1909}, {'id': 1123, 'image_count': 800}, {'id': 1124, 'image_count': 1}, {'id': 1125, 'image_count': 44}, {'id': 1126, 'image_count': 3}, {'id': 1127, 'image_count': 44}, {'id': 1128, 'image_count': 31}, {'id': 1129, 'image_count': 7}, {'id': 1130, 'image_count': 20}, {'id': 1131, 'image_count': 11}, {'id': 1132, 'image_count': 13}, {'id': 1133, 'image_count': 1924}, {'id': 1134, 'image_count': 113}, {'id': 1135, 'image_count': 2}, {'id': 1136, 'image_count': 139}, {'id': 1137, 'image_count': 12}, {'id': 1138, 'image_count': 37}, {'id': 1139, 'image_count': 1866}, {'id': 1140, 'image_count': 47}, {'id': 1141, 'image_count': 1468}, {'id': 1142, 'image_count': 729}, {'id': 1143, 'image_count': 24}, {'id': 1144, 'image_count': 1}, {'id': 1145, 'image_count': 10}, {'id': 1146, 'image_count': 3}, {'id': 1147, 'image_count': 14}, {'id': 1148, 'image_count': 4}, {'id': 1149, 'image_count': 29}, {'id': 1150, 'image_count': 4}, {'id': 1151, 'image_count': 70}, {'id': 1152, 'image_count': 46}, {'id': 1153, 'image_count': 14}, {'id': 1154, 'image_count': 48}, {'id': 1155, 'image_count': 1855}, {'id': 1156, 'image_count': 113}, {'id': 1157, 'image_count': 1}, {'id': 1158, 'image_count': 1}, {'id': 1159, 'image_count': 10}, {'id': 1160, 'image_count': 54}, {'id': 1161, 'image_count': 1923}, {'id': 1162, 'image_count': 630}, {'id': 1163, 'image_count': 31}, {'id': 1164, 'image_count': 69}, {'id': 1165, 'image_count': 7}, {'id': 1166, 'image_count': 11}, {'id': 1167, 'image_count': 1}, {'id': 1168, 'image_count': 30}, {'id': 1169, 'image_count': 50}, {'id': 1170, 'image_count': 45}, {'id': 1171, 'image_count': 28}, {'id': 1172, 'image_count': 114}, {'id': 1173, 'image_count': 193}, {'id': 1174, 'image_count': 21}, {'id': 1175, 'image_count': 91}, {'id': 1176, 'image_count': 31}, {'id': 1177, 'image_count': 1469}, {'id': 1178, 'image_count': 1924}, {'id': 1179, 'image_count': 87}, {'id': 1180, 'image_count': 77}, {'id': 1181, 'image_count': 11}, {'id': 1182, 'image_count': 47}, {'id': 1183, 'image_count': 21}, {'id': 1184, 'image_count': 47}, {'id': 1185, 'image_count': 70}, {'id': 1186, 'image_count': 1838}, {'id': 1187, 'image_count': 19}, {'id': 1188, 'image_count': 531}, {'id': 1189, 'image_count': 11}, {'id': 1190, 'image_count': 941}, {'id': 1191, 'image_count': 113}, {'id': 1192, 'image_count': 26}, {'id': 1193, 'image_count': 5}, {'id': 1194, 'image_count': 56}, {'id': 1195, 'image_count': 73}, {'id': 1196, 'image_count': 32}, {'id': 1197, 'image_count': 128}, {'id': 1198, 'image_count': 623}, {'id': 1199, 'image_count': 12}, {'id': 1200, 'image_count': 52}, {'id': 1201, 'image_count': 11}, {'id': 1202, 'image_count': 1674}, {'id': 1203, 'image_count': 81}] # noqa +# fmt: on diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/pascal_voc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/pascal_voc.py new file mode 100644 index 0000000000000000000000000000000000000000..d67ce3ee5a6e323bd0859f8d6ce5c84a6c3cc21b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/pascal_voc.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import numpy as np +import os +import xml.etree.ElementTree as ET +from typing import List, Tuple, Union + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.structures import BoxMode +from custom_detectron2.utils.file_io import PathManager + +__all__ = ["load_voc_instances", "register_pascal_voc"] + + +# fmt: off +CLASS_NAMES = ( + "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", + "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", + "pottedplant", "sheep", "sofa", "train", "tvmonitor" +) +# fmt: on + + +def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): + """ + Load Pascal VOC detection annotations to Detectron2 format. + + Args: + dirname: Contain "Annotations", "ImageSets", "JPEGImages" + split (str): one of "train", "test", "val", "trainval" + class_names: list or tuple of class names + """ + with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: + fileids = np.loadtxt(f, dtype=np.str) + + # Needs to read many small annotation files. Makes sense at local + annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) + dicts = [] + for fileid in fileids: + anno_file = os.path.join(annotation_dirname, fileid + ".xml") + jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") + + with PathManager.open(anno_file) as f: + tree = ET.parse(f) + + r = { + "file_name": jpeg_file, + "image_id": fileid, + "height": int(tree.findall("./size/height")[0].text), + "width": int(tree.findall("./size/width")[0].text), + } + instances = [] + + for obj in tree.findall("object"): + cls = obj.find("name").text + # We include "difficult" samples in training. + # Based on limited experiments, they don't hurt accuracy. + # difficult = int(obj.find("difficult").text) + # if difficult == 1: + # continue + bbox = obj.find("bndbox") + bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] + # Original annotations are integers in the range [1, W or H] + # Assuming they mean 1-based pixel indices (inclusive), + # a box with annotation (xmin=1, xmax=W) covers the whole image. + # In coordinate space this is represented by (xmin=0, xmax=W) + bbox[0] -= 1.0 + bbox[1] -= 1.0 + instances.append( + {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} + ) + r["annotations"] = instances + dicts.append(r) + return dicts + + +def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES): + DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names)) + MetadataCatalog.get(name).set( + thing_classes=list(class_names), dirname=dirname, year=year, split=split + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/register_coco.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/register_coco.py new file mode 100644 index 0000000000000000000000000000000000000000..e564438d5bf016bcdbb65b4bbdc215d79f579f8a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/datasets/register_coco.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .coco import register_coco_instances # noqa +from .coco_panoptic import register_coco_panoptic_separated # noqa diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/detection_utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/detection_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..425f3e2d501b1f68bec23f8d662a6aff83ddcebc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/detection_utils.py @@ -0,0 +1,659 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +Common data processing utilities that are used in a +typical object detection data pipeline. +""" +import logging +import numpy as np +from typing import List, Union +import custom_pycocotools.mask as mask_util +import torch +from PIL import Image + +from custom_detectron2.structures import ( + BitMasks, + Boxes, + BoxMode, + Instances, + Keypoints, + PolygonMasks, + RotatedBoxes, + polygons_to_bitmask, +) +from custom_detectron2.utils.file_io import PathManager + +from . import transforms as T +from .catalog import MetadataCatalog + +__all__ = [ + "SizeMismatchError", + "convert_image_to_rgb", + "check_image_size", + "transform_proposals", + "transform_instance_annotations", + "annotations_to_instances", + "annotations_to_instances_rotated", + "build_augmentation", + "build_transform_gen", + "create_keypoint_hflip_indices", + "filter_empty_instances", + "read_image", +] + + +class SizeMismatchError(ValueError): + """ + When loaded image has difference width/height compared with annotation. + """ + + +# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601 +_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]] +_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]] + +# https://www.exiv2.org/tags.html +_EXIF_ORIENT = 274 # exif 'Orientation' tag + + +def convert_PIL_to_numpy(image, format): + """ + Convert PIL image to numpy array of target format. + + Args: + image (PIL.Image): a PIL image + format (str): the format of output image + + Returns: + (np.ndarray): also see `read_image` + """ + if format is not None: + # PIL only supports RGB, so convert to RGB and flip channels over below + conversion_format = format + if format in ["BGR", "YUV-BT.601"]: + conversion_format = "RGB" + image = image.convert(conversion_format) + image = np.asarray(image) + # PIL squeezes out the channel dimension for "L", so make it HWC + if format == "L": + image = np.expand_dims(image, -1) + + # handle formats not supported by PIL + elif format == "BGR": + # flip channels if needed + image = image[:, :, ::-1] + elif format == "YUV-BT.601": + image = image / 255.0 + image = np.dot(image, np.array(_M_RGB2YUV).T) + + return image + + +def convert_image_to_rgb(image, format): + """ + Convert an image from given format to RGB. + + Args: + image (np.ndarray or Tensor): an HWC image + format (str): the format of input image, also see `read_image` + + Returns: + (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8 + """ + if isinstance(image, torch.Tensor): + image = image.cpu().numpy() + if format == "BGR": + image = image[:, :, [2, 1, 0]] + elif format == "YUV-BT.601": + image = np.dot(image, np.array(_M_YUV2RGB).T) + image = image * 255.0 + else: + if format == "L": + image = image[:, :, 0] + image = image.astype(np.uint8) + image = np.asarray(Image.fromarray(image, mode=format).convert("RGB")) + return image + + +def _apply_exif_orientation(image): + """ + Applies the exif orientation correctly. + + This code exists per the bug: + https://github.com/python-pillow/Pillow/issues/3973 + with the function `ImageOps.exif_transpose`. The Pillow source raises errors with + various methods, especially `tobytes` + + Function based on: + https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59 + https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527 + + Args: + image (PIL.Image): a PIL image + + Returns: + (PIL.Image): the PIL image with exif orientation applied, if applicable + """ + if not hasattr(image, "getexif"): + return image + + try: + exif = image.getexif() + except Exception: # https://github.com/facebookresearch/detectron2/issues/1885 + exif = None + + if exif is None: + return image + + orientation = exif.get(_EXIF_ORIENT) + + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + + if method is not None: + return image.transpose(method) + return image + + +def read_image(file_name, format=None): + """ + Read an image into the given format. + Will apply rotation and flipping if the image has such exif information. + + Args: + file_name (str): image file path + format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601". + + Returns: + image (np.ndarray): + an HWC image in the given format, which is 0-255, uint8 for + supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601. + """ + with PathManager.open(file_name, "rb") as f: + image = Image.open(f) + + # work around this bug: https://github.com/python-pillow/Pillow/issues/3973 + image = _apply_exif_orientation(image) + return convert_PIL_to_numpy(image, format) + + +def check_image_size(dataset_dict, image): + """ + Raise an error if the image does not match the size specified in the dict. + """ + if "width" in dataset_dict or "height" in dataset_dict: + image_wh = (image.shape[1], image.shape[0]) + expected_wh = (dataset_dict["width"], dataset_dict["height"]) + if not image_wh == expected_wh: + raise SizeMismatchError( + "Mismatched image shape{}, got {}, expect {}.".format( + " for image " + dataset_dict["file_name"] + if "file_name" in dataset_dict + else "", + image_wh, + expected_wh, + ) + + " Please check the width/height in your annotation." + ) + + # To ensure bbox always remap to original image size + if "width" not in dataset_dict: + dataset_dict["width"] = image.shape[1] + if "height" not in dataset_dict: + dataset_dict["height"] = image.shape[0] + + +def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0): + """ + Apply transformations to the proposals in dataset_dict, if any. + + Args: + dataset_dict (dict): a dict read from the dataset, possibly + contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode" + image_shape (tuple): height, width + transforms (TransformList): + proposal_topk (int): only keep top-K scoring proposals + min_box_size (int): proposals with either side smaller than this + threshold are removed + + The input dict is modified in-place, with abovementioned keys removed. A new + key "proposals" will be added. Its value is an `Instances` + object which contains the transformed proposals in its field + "proposal_boxes" and "objectness_logits". + """ + if "proposal_boxes" in dataset_dict: + # Transform proposal boxes + boxes = transforms.apply_box( + BoxMode.convert( + dataset_dict.pop("proposal_boxes"), + dataset_dict.pop("proposal_bbox_mode"), + BoxMode.XYXY_ABS, + ) + ) + boxes = Boxes(boxes) + objectness_logits = torch.as_tensor( + dataset_dict.pop("proposal_objectness_logits").astype("float32") + ) + + boxes.clip(image_shape) + keep = boxes.nonempty(threshold=min_box_size) + boxes = boxes[keep] + objectness_logits = objectness_logits[keep] + + proposals = Instances(image_shape) + proposals.proposal_boxes = boxes[:proposal_topk] + proposals.objectness_logits = objectness_logits[:proposal_topk] + dataset_dict["proposals"] = proposals + + +def get_bbox(annotation): + """ + Get bbox from data + Args: + annotation (dict): dict of instance annotations for a single instance. + Returns: + bbox (ndarray): x1, y1, x2, y2 coordinates + """ + # bbox is 1d (per-instance bounding box) + bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) + return bbox + + +def transform_instance_annotations( + annotation, transforms, image_size, *, keypoint_hflip_indices=None +): + """ + Apply transforms to box, segmentation and keypoints annotations of a single instance. + + It will use `transforms.apply_box` for the box, and + `transforms.apply_coords` for segmentation polygons & keypoints. + If you need anything more specially designed for each data structure, + you'll need to implement your own version of this function or the transforms. + + Args: + annotation (dict): dict of instance annotations for a single instance. + It will be modified in-place. + transforms (TransformList or list[Transform]): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + + Returns: + dict: + the same input dict with fields "bbox", "segmentation", "keypoints" + transformed according to `transforms`. + The "bbox_mode" field will be set to XYXY_ABS. + """ + if isinstance(transforms, (tuple, list)): + transforms = T.TransformList(transforms) + # bbox is 1d (per-instance bounding box) + bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) + # clip transformed bbox to image size + bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0) + annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1]) + annotation["bbox_mode"] = BoxMode.XYXY_ABS + + if "segmentation" in annotation: + # each instance contains 1 or more polygons + segm = annotation["segmentation"] + if isinstance(segm, list): + # polygons + polygons = [np.asarray(p).reshape(-1, 2) for p in segm] + annotation["segmentation"] = [ + p.reshape(-1) for p in transforms.apply_polygons(polygons) + ] + elif isinstance(segm, dict): + # RLE + mask = mask_util.decode(segm) + mask = transforms.apply_segmentation(mask) + assert tuple(mask.shape[:2]) == image_size + annotation["segmentation"] = mask + else: + raise ValueError( + "Cannot transform segmentation of type '{}'!" + "Supported types are: polygons as list[list[float] or ndarray]," + " COCO-style RLE as a dict.".format(type(segm)) + ) + + if "keypoints" in annotation: + keypoints = transform_keypoint_annotations( + annotation["keypoints"], transforms, image_size, keypoint_hflip_indices + ) + annotation["keypoints"] = keypoints + + return annotation + + +def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None): + """ + Transform keypoint annotations of an image. + If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0) + + Args: + keypoints (list[float]): Nx3 float in Detectron2's Dataset format. + Each point is represented by (x, y, visibility). + transforms (TransformList): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + When `transforms` includes horizontal flip, will use the index + mapping to flip keypoints. + """ + # (N*3,) -> (N, 3) + keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3) + keypoints_xy = transforms.apply_coords(keypoints[:, :2]) + + # Set all out-of-boundary points to "unlabeled" + inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1])) + inside = inside.all(axis=1) + keypoints[:, :2] = keypoints_xy + keypoints[:, 2][~inside] = 0 + + # This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + + # Alternative way: check if probe points was horizontally flipped. + # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]]) + # probe_aug = transforms.apply_coords(probe.copy()) + # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa + + # If flipped, swap each keypoint with its opposite-handed equivalent + if do_hflip: + if keypoint_hflip_indices is None: + raise ValueError("Cannot flip keypoints without providing flip indices!") + if len(keypoints) != len(keypoint_hflip_indices): + raise ValueError( + "Keypoint data has {} points, but metadata " + "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices)) + ) + keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :] + + # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0 + keypoints[keypoints[:, 2] == 0] = 0 + return keypoints + + +def annotations_to_instances(annos, image_size, mask_format="polygon"): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + It will contain fields "gt_boxes", "gt_classes", + "gt_masks", "gt_keypoints", if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + boxes = ( + np.stack( + [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] + ) + if len(annos) + else np.zeros((0, 4)) + ) + target = Instances(image_size) + target.gt_boxes = Boxes(boxes) + + classes = [int(obj["category_id"]) for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + if len(annos) and "segmentation" in annos[0]: + segms = [obj["segmentation"] for obj in annos] + if mask_format == "polygon": + try: + masks = PolygonMasks(segms) + except ValueError as e: + raise ValueError( + "Failed to use mask_format=='polygon' from the given annotations!" + ) from e + else: + assert mask_format == "bitmask", mask_format + masks = [] + for segm in segms: + if isinstance(segm, list): + # polygon + masks.append(polygons_to_bitmask(segm, *image_size)) + elif isinstance(segm, dict): + # COCO RLE + masks.append(mask_util.decode(segm)) + elif isinstance(segm, np.ndarray): + assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format( + segm.ndim + ) + # mask array + masks.append(segm) + else: + raise ValueError( + "Cannot convert segmentation of type '{}' to BitMasks!" + "Supported types are: polygons as list[list[float] or ndarray]," + " COCO-style RLE as a dict, or a binary segmentation mask " + " in a 2D numpy array of shape HxW.".format(type(segm)) + ) + # torch.from_numpy does not support array with negative stride. + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks]) + ) + target.gt_masks = masks + + if len(annos) and "keypoints" in annos[0]: + kpts = [obj.get("keypoints", []) for obj in annos] + target.gt_keypoints = Keypoints(kpts) + + return target + + +def annotations_to_instances_rotated(annos, image_size): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + Compared to `annotations_to_instances`, this function is for rotated boxes only + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + Containing fields "gt_boxes", "gt_classes", + if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + boxes = [obj["bbox"] for obj in annos] + target = Instances(image_size) + boxes = target.gt_boxes = RotatedBoxes(boxes) + boxes.clip(image_size) + + classes = [obj["category_id"] for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + return target + + +def filter_empty_instances( + instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False +): + """ + Filter out empty instances in an `Instances` object. + + Args: + instances (Instances): + by_box (bool): whether to filter out instances with empty boxes + by_mask (bool): whether to filter out instances with empty masks + box_threshold (float): minimum width and height to be considered non-empty + return_mask (bool): whether to return boolean mask of filtered instances + + Returns: + Instances: the filtered instances. + tensor[bool], optional: boolean mask of filtered instances + """ + assert by_box or by_mask + r = [] + if by_box: + r.append(instances.gt_boxes.nonempty(threshold=box_threshold)) + if instances.has("gt_masks") and by_mask: + r.append(instances.gt_masks.nonempty()) + + # TODO: can also filter visible keypoints + + if not r: + return instances + m = r[0] + for x in r[1:]: + m = m & x + if return_mask: + return instances[m], m + return instances[m] + + +def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]: + """ + Args: + dataset_names: list of dataset names + + Returns: + list[int]: a list of size=#keypoints, storing the + horizontally-flipped keypoint indices. + """ + if isinstance(dataset_names, str): + dataset_names = [dataset_names] + + check_metadata_consistency("keypoint_names", dataset_names) + check_metadata_consistency("keypoint_flip_map", dataset_names) + + meta = MetadataCatalog.get(dataset_names[0]) + names = meta.keypoint_names + # TODO flip -> hflip + flip_map = dict(meta.keypoint_flip_map) + flip_map.update({v: k for k, v in flip_map.items()}) + flipped_names = [i if i not in flip_map else flip_map[i] for i in names] + flip_indices = [names.index(i) for i in flipped_names] + return flip_indices + + +def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0): + """ + Get frequency weight for each class sorted by class id. + We now calcualte freqency weight using image_count to the power freq_weight_power. + + Args: + dataset_names: list of dataset names + freq_weight_power: power value + """ + if isinstance(dataset_names, str): + dataset_names = [dataset_names] + + check_metadata_consistency("class_image_count", dataset_names) + + meta = MetadataCatalog.get(dataset_names[0]) + class_freq_meta = meta.class_image_count + class_freq = torch.tensor( + [c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])] + ) + class_freq_weight = class_freq.float() ** freq_weight_power + return class_freq_weight + + +def gen_crop_transform_with_instance(crop_size, image_size, instance): + """ + Generate a CropTransform so that the cropping region contains + the center of the given instance. + + Args: + crop_size (tuple): h, w in pixels + image_size (tuple): h, w + instance (dict): an annotation dict of one instance, in Detectron2's + dataset format. + """ + crop_size = np.asarray(crop_size, dtype=np.int32) + bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS) + center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 + assert ( + image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1] + ), "The annotation bounding box is outside of the image!" + assert ( + image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1] + ), "Crop size is larger than image size!" + + min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) + max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) + max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) + + y0 = np.random.randint(min_yx[0], max_yx[0] + 1) + x0 = np.random.randint(min_yx[1], max_yx[1] + 1) + return T.CropTransform(x0, y0, crop_size[1], crop_size[0]) + + +def check_metadata_consistency(key, dataset_names): + """ + Check that the datasets have consistent metadata. + + Args: + key (str): a metadata key + dataset_names (list[str]): a list of dataset names + + Raises: + AttributeError: if the key does not exist in the metadata + ValueError: if the given datasets do not have the same metadata values defined by key + """ + if len(dataset_names) == 0: + return + logger = logging.getLogger(__name__) + entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names] + for idx, entry in enumerate(entries_per_dataset): + if entry != entries_per_dataset[0]: + logger.error( + "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry)) + ) + logger.error( + "Metadata '{}' for dataset '{}' is '{}'".format( + key, dataset_names[0], str(entries_per_dataset[0]) + ) + ) + raise ValueError("Datasets have different metadata '{}'!".format(key)) + + +def build_augmentation(cfg, is_train): + """ + Create a list of default :class:`Augmentation` from config. + Now it includes resizing and flipping. + + Returns: + list[Augmentation] + """ + if is_train: + min_size = cfg.INPUT.MIN_SIZE_TRAIN + max_size = cfg.INPUT.MAX_SIZE_TRAIN + sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + else: + min_size = cfg.INPUT.MIN_SIZE_TEST + max_size = cfg.INPUT.MAX_SIZE_TEST + sample_style = "choice" + augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] + if is_train and cfg.INPUT.RANDOM_FLIP != "none": + augmentation.append( + T.RandomFlip( + horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", + vertical=cfg.INPUT.RANDOM_FLIP == "vertical", + ) + ) + return augmentation + + +build_transform_gen = build_augmentation +""" +Alias for backward-compatibility. +""" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..85c9f1a9df8a4038fbd4246239b699402e382309 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .distributed_sampler import ( + InferenceSampler, + RandomSubsetTrainingSampler, + RepeatFactorTrainingSampler, + TrainingSampler, +) + +from .grouped_batch_sampler import GroupedBatchSampler + +__all__ = [ + "GroupedBatchSampler", + "TrainingSampler", + "RandomSubsetTrainingSampler", + "InferenceSampler", + "RepeatFactorTrainingSampler", +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/distributed_sampler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/distributed_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..eedc4c3ba88886c614a26dbc38fa1860bb503505 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/distributed_sampler.py @@ -0,0 +1,278 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +import math +from collections import defaultdict +from typing import Optional +import torch +from torch.utils.data.sampler import Sampler + +from custom_detectron2.utils import comm + +logger = logging.getLogger(__name__) + + +class TrainingSampler(Sampler): + """ + In training, we only care about the "infinite stream" of training data. + So this sampler produces an infinite stream of indices and + all workers cooperate to correctly shuffle the indices and sample different indices. + + The samplers in each worker effectively produces `indices[worker_id::num_workers]` + where `indices` is an infinite stream of indices consisting of + `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) + or `range(size) + range(size) + ...` (if shuffle is False) + + Note that this sampler does not shard based on pytorch DataLoader worker id. + A sampler passed to pytorch DataLoader is used only with map-style dataset + and will not be executed inside workers. + But if this sampler is used in a way that it gets execute inside a dataloader + worker, then extra work needs to be done to shard its outputs based on worker id. + This is required so that workers don't produce identical data. + :class:`ToIterableDataset` implements this logic. + This note is true for all samplers in detectron2. + """ + + def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + if not isinstance(size, int): + raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.") + if size <= 0: + raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.") + self._size = size + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + if self._shuffle: + yield from torch.randperm(self._size, generator=g).tolist() + else: + yield from torch.arange(self._size).tolist() + + +class RandomSubsetTrainingSampler(TrainingSampler): + """ + Similar to TrainingSampler, but only sample a random subset of indices. + This is useful when you want to estimate the accuracy vs data-number curves by + training the model with different subset_ratio. + """ + + def __init__( + self, + size: int, + subset_ratio: float, + shuffle: bool = True, + seed_shuffle: Optional[int] = None, + seed_subset: Optional[int] = None, + ): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + subset_ratio (float): the ratio of subset data to sample from the underlying dataset + shuffle (bool): whether to shuffle the indices or not + seed_shuffle (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + seed_subset (int): the seed to randomize the subset to be sampled. + Must be the same across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle) + + assert 0.0 < subset_ratio <= 1.0 + self._size_subset = int(size * subset_ratio) + assert self._size_subset > 0 + if seed_subset is None: + seed_subset = comm.shared_random_seed() + self._seed_subset = int(seed_subset) + + # randomly generate the subset indexes to be sampled from + g = torch.Generator() + g.manual_seed(self._seed_subset) + indexes_randperm = torch.randperm(self._size, generator=g) + self._indexes_subset = indexes_randperm[: self._size_subset] + + logger.info("Using RandomSubsetTrainingSampler......") + logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data") + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__() + while True: + if self._shuffle: + # generate a random permutation to shuffle self._indexes_subset + randperm = torch.randperm(self._size_subset, generator=g) + yield from self._indexes_subset[randperm].tolist() + else: + yield from self._indexes_subset.tolist() + + +class RepeatFactorTrainingSampler(Sampler): + """ + Similar to TrainingSampler, but a sample may appear more times than others based + on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS. + """ + + def __init__(self, repeat_factors, *, shuffle=True, seed=None): + """ + Args: + repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's + full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``. + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + # Split into whole number (_int_part) and fractional (_frac_part) parts. + self._int_part = torch.trunc(repeat_factors) + self._frac_part = repeat_factors - self._int_part + + @staticmethod + def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh): + """ + Compute (fractional) per-image repeat factors based on category frequency. + The repeat factor for an image is a function of the frequency of the rarest + category labeled in that image. The "frequency of category c" in [0, 1] is defined + as the fraction of images in the training set (without repeats) in which category c + appears. + See :paper:`lvis` (>= v2) Appendix B.2. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 dataset format. + repeat_thresh (float): frequency threshold below which data is repeated. + If the frequency is half of `repeat_thresh`, the image will be + repeated twice. + + Returns: + torch.Tensor: + the i-th element is the repeat factor for the dataset image at index i. + """ + # 1. For each category c, compute the fraction of images that contain it: f(c) + category_freq = defaultdict(int) + for dataset_dict in dataset_dicts: # For each image (without repeats) + cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} + for cat_id in cat_ids: + category_freq[cat_id] += 1 + num_images = len(dataset_dicts) + for k, v in category_freq.items(): + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t / f(c))) + category_rep = { + cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I, compute the image-level repeat factor: + # r(I) = max_{c in I} r(c) + rep_factors = [] + for dataset_dict in dataset_dicts: + cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} + rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0) + rep_factors.append(rep_factor) + + return torch.tensor(rep_factors, dtype=torch.float32) + + def _get_epoch_indices(self, generator): + """ + Create a list of dataset indices (with repeats) to use for one epoch. + + Args: + generator (torch.Generator): pseudo random number generator used for + stochastic rounding. + + Returns: + torch.Tensor: list of dataset indices to use in one epoch. Each index + is repeated based on its calculated repeat factor. + """ + # Since repeat factors are fractional, we use stochastic rounding so + # that the target repeat factor is achieved in expectation over the + # course of training + rands = torch.rand(len(self._frac_part), generator=generator) + rep_factors = self._int_part + (rands < self._frac_part).float() + # Construct a list of indices in which we repeat images as specified + indices = [] + for dataset_index, rep_factor in enumerate(rep_factors): + indices.extend([dataset_index] * int(rep_factor.item())) + return torch.tensor(indices, dtype=torch.int64) + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + # Sample indices with repeats determined by stochastic rounding; each + # "epoch" may have a slightly different size due to the rounding. + indices = self._get_epoch_indices(g) + if self._shuffle: + randperm = torch.randperm(len(indices), generator=g) + yield from indices[randperm].tolist() + else: + yield from indices.tolist() + + +class InferenceSampler(Sampler): + """ + Produce indices for inference across all workers. + Inference needs to run on the __exact__ set of samples, + therefore when the total number of samples is not divisible by the number of workers, + this sampler produces different number of samples on different workers. + """ + + def __init__(self, size: int): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + """ + self._size = size + assert size > 0 + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + self._local_indices = self._get_local_indices(size, self._world_size, self._rank) + + @staticmethod + def _get_local_indices(total_size, world_size, rank): + shard_size = total_size // world_size + left = total_size % world_size + shard_sizes = [shard_size + int(r < left) for r in range(world_size)] + + begin = sum(shard_sizes[:rank]) + end = min(sum(shard_sizes[: rank + 1]), total_size) + return range(begin, end) + + def __iter__(self): + yield from self._local_indices + + def __len__(self): + return len(self._local_indices) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/grouped_batch_sampler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/grouped_batch_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..5b247730aacd04dd0c752664acde3257c4eddd71 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/samplers/grouped_batch_sampler.py @@ -0,0 +1,47 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from torch.utils.data.sampler import BatchSampler, Sampler + + +class GroupedBatchSampler(BatchSampler): + """ + Wraps another sampler to yield a mini-batch of indices. + It enforces that the batch only contain elements from the same group. + It also tries to provide mini-batches which follows an ordering which is + as close as possible to the ordering from the original sampler. + """ + + def __init__(self, sampler, group_ids, batch_size): + """ + Args: + sampler (Sampler): Base sampler. + group_ids (list[int]): If the sampler produces indices in range [0, N), + `group_ids` must be a list of `N` ints which contains the group id of each sample. + The group ids must be a set of integers in the range [0, num_groups). + batch_size (int): Size of mini-batch. + """ + if not isinstance(sampler, Sampler): + raise ValueError( + "sampler should be an instance of " + "torch.utils.data.Sampler, but got sampler={}".format(sampler) + ) + self.sampler = sampler + self.group_ids = np.asarray(group_ids) + assert self.group_ids.ndim == 1 + self.batch_size = batch_size + groups = np.unique(self.group_ids).tolist() + + # buffer the indices of each group until batch size is reached + self.buffer_per_group = {k: [] for k in groups} + + def __iter__(self): + for idx in self.sampler: + group_id = self.group_ids[idx] + group_buffer = self.buffer_per_group[group_id] + group_buffer.append(idx) + if len(group_buffer) == self.batch_size: + yield group_buffer[:] # yield a copy of the list + del group_buffer[:] + + def __len__(self): + raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.") diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1d3721fc22e5aa3b4100458235f99c45498646d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from fvcore.transforms.transform import Transform, TransformList # order them first +from fvcore.transforms.transform import * +from .transform import * +from .augmentation import * +from .augmentation_impl import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +from custom_detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/augmentation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..63dd41aef658c9b51c7246880399405a029c5580 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/augmentation.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import inspect +import numpy as np +import pprint +from typing import Any, List, Optional, Tuple, Union +from fvcore.transforms.transform import Transform, TransformList + +""" +See "Data Augmentation" tutorial for an overview of the system: +https://detectron2.readthedocs.io/tutorials/augmentation.html +""" + + +__all__ = [ + "Augmentation", + "AugmentationList", + "AugInput", + "TransformGen", + "apply_transform_gens", + "StandardAugInput", + "apply_augmentations", +] + + +def _check_img_dtype(img): + assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format( + type(img) + ) + assert not isinstance(img.dtype, np.integer) or ( + img.dtype == np.uint8 + ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format( + img.dtype + ) + assert img.ndim in [2, 3], img.ndim + + +def _get_aug_input_args(aug, aug_input) -> List[Any]: + """ + Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``. + """ + if aug.input_args is None: + # Decide what attributes are needed automatically + prms = list(inspect.signature(aug.get_transform).parameters.items()) + # The default behavior is: if there is one parameter, then its "image" + # (work automatically for majority of use cases, and also avoid BC breaking), + # Otherwise, use the argument names. + if len(prms) == 1: + names = ("image",) + else: + names = [] + for name, prm in prms: + if prm.kind in ( + inspect.Parameter.VAR_POSITIONAL, + inspect.Parameter.VAR_KEYWORD, + ): + raise TypeError( + f""" \ +The default implementation of `{type(aug)}.__call__` does not allow \ +`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \ +If arguments are unknown, reimplement `__call__` instead. \ +""" + ) + names.append(name) + aug.input_args = tuple(names) + + args = [] + for f in aug.input_args: + try: + args.append(getattr(aug_input, f)) + except AttributeError as e: + raise AttributeError( + f"{type(aug)}.get_transform needs input attribute '{f}', " + f"but it is not an attribute of {type(aug_input)}!" + ) from e + return args + + +class Augmentation: + """ + Augmentation defines (often random) policies/strategies to generate :class:`Transform` + from data. It is often used for pre-processing of input data. + + A "policy" that generates a :class:`Transform` may, in the most general case, + need arbitrary information from input data in order to determine what transforms + to apply. Therefore, each :class:`Augmentation` instance defines the arguments + needed by its :meth:`get_transform` method. When called with the positional arguments, + the :meth:`get_transform` method executes the policy. + + Note that :class:`Augmentation` defines the policies to create a :class:`Transform`, + but not how to execute the actual transform operations to those data. + Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform. + + The returned `Transform` object is meant to describe deterministic transformation, which means + it can be re-applied on associated data, e.g. the geometry of an image and its segmentation + masks need to be transformed together. + (If such re-application is not needed, then determinism is not a crucial requirement.) + """ + + input_args: Optional[Tuple[str]] = None + """ + Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``. + By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only + contain "image". As long as the argument name convention is followed, there is no need for + users to touch this attribute. + """ + + def _init(self, params=None): + if params: + for k, v in params.items(): + if k != "self" and not k.startswith("_"): + setattr(self, k, v) + + def get_transform(self, *args) -> Transform: + """ + Execute the policy based on input data, and decide what transform to apply to inputs. + + Args: + args: Any fixed-length positional arguments. By default, the name of the arguments + should exist in the :class:`AugInput` to be used. + + Returns: + Transform: Returns the deterministic transform to apply to the input. + + Examples: + :: + class MyAug: + # if a policy needs to know both image and semantic segmentation + def get_transform(image, sem_seg) -> T.Transform: + pass + tfm: Transform = MyAug().get_transform(image, sem_seg) + new_image = tfm.apply_image(image) + + Notes: + Users can freely use arbitrary new argument names in custom + :meth:`get_transform` method, as long as they are available in the + input data. In detectron2 we use the following convention: + + * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or + floating point in range [0, 1] or [0, 255]. + * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes + of N instances. Each is in XYXY format in unit of absolute coordinates. + * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. + + We do not specify convention for other types and do not include builtin + :class:`Augmentation` that uses other types in detectron2. + """ + raise NotImplementedError + + def __call__(self, aug_input) -> Transform: + """ + Augment the given `aug_input` **in-place**, and return the transform that's used. + + This method will be called to apply the augmentation. In most augmentation, it + is enough to use the default implementation, which calls :meth:`get_transform` + using the inputs. But a subclass can overwrite it to have more complicated logic. + + Args: + aug_input (AugInput): an object that has attributes needed by this augmentation + (defined by ``self.get_transform``). Its ``transform`` method will be called + to in-place transform it. + + Returns: + Transform: the transform that is applied on the input. + """ + args = _get_aug_input_args(self, aug_input) + tfm = self.get_transform(*args) + assert isinstance(tfm, (Transform, TransformList)), ( + f"{type(self)}.get_transform must return an instance of Transform! " + f"Got {type(tfm)} instead." + ) + aug_input.transform(tfm) + return tfm + + def _rand_range(self, low=1.0, high=None, size=None): + """ + Uniform float random number between low and high. + """ + if high is None: + low, high = 0, low + if size is None: + size = [] + return np.random.uniform(low, high, size) + + def __repr__(self): + """ + Produce something like: + "MyAugmentation(field1={self.field1}, field2={self.field2})" + """ + try: + sig = inspect.signature(self.__init__) + classname = type(self).__name__ + argstr = [] + for name, param in sig.parameters.items(): + assert ( + param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD + ), "The default __repr__ doesn't support *args or **kwargs" + assert hasattr(self, name), ( + "Attribute {} not found! " + "Default __repr__ only works if attributes match the constructor.".format(name) + ) + attr = getattr(self, name) + default = param.default + if default is attr: + continue + attr_str = pprint.pformat(attr) + if "\n" in attr_str: + # don't show it if pformat decides to use >1 lines + attr_str = "..." + argstr.append("{}={}".format(name, attr_str)) + return "{}({})".format(classname, ", ".join(argstr)) + except AssertionError: + return super().__repr__() + + __str__ = __repr__ + + +class _TransformToAug(Augmentation): + def __init__(self, tfm: Transform): + self.tfm = tfm + + def get_transform(self, *args): + return self.tfm + + def __repr__(self): + return repr(self.tfm) + + __str__ = __repr__ + + +def _transform_to_aug(tfm_or_aug): + """ + Wrap Transform into Augmentation. + Private, used internally to implement augmentations. + """ + assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug + if isinstance(tfm_or_aug, Augmentation): + return tfm_or_aug + else: + return _TransformToAug(tfm_or_aug) + + +class AugmentationList(Augmentation): + """ + Apply a sequence of augmentations. + + It has ``__call__`` method to apply the augmentations. + + Note that :meth:`get_transform` method is impossible (will throw error if called) + for :class:`AugmentationList`, because in order to apply a sequence of augmentations, + the kth augmentation must be applied first, to provide inputs needed by the (k+1)th + augmentation. + """ + + def __init__(self, augs): + """ + Args: + augs (list[Augmentation or Transform]): + """ + super().__init__() + self.augs = [_transform_to_aug(x) for x in augs] + + def __call__(self, aug_input) -> TransformList: + tfms = [] + for x in self.augs: + tfm = x(aug_input) + tfms.append(tfm) + return TransformList(tfms) + + def __repr__(self): + msgs = [str(x) for x in self.augs] + return "AugmentationList[{}]".format(", ".join(msgs)) + + __str__ = __repr__ + + +class AugInput: + """ + Input that can be used with :meth:`Augmentation.__call__`. + This is a standard implementation for the majority of use cases. + This class provides the standard attributes **"image", "boxes", "sem_seg"** + defined in :meth:`__init__` and they may be needed by different augmentations. + Most augmentation policies do not need attributes beyond these three. + + After applying augmentations to these attributes (using :meth:`AugInput.transform`), + the returned transforms can then be used to transform other data structures that users have. + + Examples: + :: + input = AugInput(image, boxes=boxes) + tfms = augmentation(input) + transformed_image = input.image + transformed_boxes = input.boxes + transformed_other_data = tfms.apply_other(other_data) + + An extended project that works with new data types may implement augmentation policies + that need other inputs. An algorithm may need to transform inputs in a way different + from the standard approach defined in this class. In those rare situations, users can + implement a class similar to this class, that satify the following condition: + + * The input must provide access to these data in the form of attribute access + (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image" + and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg". + * The input must have a ``transform(tfm: Transform) -> None`` method which + in-place transforms all its attributes. + """ + + # TODO maybe should support more builtin data types here + def __init__( + self, + image: np.ndarray, + *, + boxes: Optional[np.ndarray] = None, + sem_seg: Optional[np.ndarray] = None, + ): + """ + Args: + image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or + floating point in range [0, 1] or [0, 255]. The meaning of C is up + to users. + boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode + sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element + is an integer label of pixel. + """ + _check_img_dtype(image) + self.image = image + self.boxes = boxes + self.sem_seg = sem_seg + + def transform(self, tfm: Transform) -> None: + """ + In-place transform all attributes of this class. + + By "in-place", it means after calling this method, accessing an attribute such + as ``self.image`` will return transformed data. + """ + self.image = tfm.apply_image(self.image) + if self.boxes is not None: + self.boxes = tfm.apply_box(self.boxes) + if self.sem_seg is not None: + self.sem_seg = tfm.apply_segmentation(self.sem_seg) + + def apply_augmentations( + self, augmentations: List[Union[Augmentation, Transform]] + ) -> TransformList: + """ + Equivalent of ``AugmentationList(augmentations)(self)`` + """ + return AugmentationList(augmentations)(self) + + +def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs): + """ + Use ``T.AugmentationList(augmentations)(inputs)`` instead. + """ + if isinstance(inputs, np.ndarray): + # handle the common case of image-only Augmentation, also for backward compatibility + image_only = True + inputs = AugInput(inputs) + else: + image_only = False + tfms = inputs.apply_augmentations(augmentations) + return inputs.image if image_only else inputs, tfms + + +apply_transform_gens = apply_augmentations +""" +Alias for backward-compatibility. +""" + +TransformGen = Augmentation +""" +Alias for Augmentation, since it is something that generates :class:`Transform`s +""" + +StandardAugInput = AugInput +""" +Alias for compatibility. It's not worth the complexity to have two classes. +""" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/augmentation_impl.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/augmentation_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..228b16318e8d4aff3c759ddd7367a1e92e6cbc07 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/augmentation_impl.py @@ -0,0 +1,736 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Implement many useful :class:`Augmentation`. +""" +import numpy as np +import sys +from numpy import random +from typing import Tuple +import torch +from fvcore.transforms.transform import ( + BlendTransform, + CropTransform, + HFlipTransform, + NoOpTransform, + PadTransform, + Transform, + TransformList, + VFlipTransform, +) +from PIL import Image + +from custom_detectron2.structures import Boxes, pairwise_iou + +from .augmentation import Augmentation, _transform_to_aug +from .transform import ExtentTransform, ResizeTransform, RotationTransform + +__all__ = [ + "FixedSizeCrop", + "RandomApply", + "RandomBrightness", + "RandomContrast", + "RandomCrop", + "RandomExtent", + "RandomFlip", + "RandomSaturation", + "RandomLighting", + "RandomRotation", + "Resize", + "ResizeScale", + "ResizeShortestEdge", + "RandomCrop_CategoryAreaConstraint", + "RandomResize", + "MinIoURandomCrop", +] + + +class RandomApply(Augmentation): + """ + Randomly apply an augmentation with a given probability. + """ + + def __init__(self, tfm_or_aug, prob=0.5): + """ + Args: + tfm_or_aug (Transform, Augmentation): the transform or augmentation + to be applied. It can either be a `Transform` or `Augmentation` + instance. + prob (float): probability between 0.0 and 1.0 that + the wrapper transformation is applied + """ + super().__init__() + self.aug = _transform_to_aug(tfm_or_aug) + assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})" + self.prob = prob + + def get_transform(self, *args): + do = self._rand_range() < self.prob + if do: + return self.aug.get_transform(*args) + else: + return NoOpTransform() + + def __call__(self, aug_input): + do = self._rand_range() < self.prob + if do: + return self.aug(aug_input) + else: + return NoOpTransform() + + +class RandomFlip(Augmentation): + """ + Flip the image horizontally or vertically with the given probability. + """ + + def __init__(self, prob=0.5, *, horizontal=True, vertical=False): + """ + Args: + prob (float): probability of flip. + horizontal (boolean): whether to apply horizontal flipping + vertical (boolean): whether to apply vertical flipping + """ + super().__init__() + + if horizontal and vertical: + raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.") + if not horizontal and not vertical: + raise ValueError("At least one of horiz or vert has to be True!") + self._init(locals()) + + def get_transform(self, image): + h, w = image.shape[:2] + do = self._rand_range() < self.prob + if do: + if self.horizontal: + return HFlipTransform(w) + elif self.vertical: + return VFlipTransform(h) + else: + return NoOpTransform() + + +class Resize(Augmentation): + """Resize image to a fixed target size""" + + def __init__(self, shape, interp=Image.BILINEAR): + """ + Args: + shape: (h, w) tuple or a int + interp: PIL interpolation method + """ + if isinstance(shape, int): + shape = (shape, shape) + shape = tuple(shape) + self._init(locals()) + + def get_transform(self, image): + return ResizeTransform( + image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp + ) + + +class ResizeShortestEdge(Augmentation): + """ + Resize the image while keeping the aspect ratio unchanged. + It attempts to scale the shorter edge to the given `short_edge_length`, + as long as the longer edge does not exceed `max_size`. + If `max_size` is reached, then downscale so that the longer edge does not exceed max_size. + """ + + @torch.jit.unused + def __init__( + self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR + ): + """ + Args: + short_edge_length (list[int]): If ``sample_style=="range"``, + a [min, max] interval from which to sample the shortest edge length. + If ``sample_style=="choice"``, a list of shortest edge lengths to sample from. + max_size (int): maximum allowed longest edge length. + sample_style (str): either "range" or "choice". + """ + super().__init__() + assert sample_style in ["range", "choice"], sample_style + + self.is_range = sample_style == "range" + if isinstance(short_edge_length, int): + short_edge_length = (short_edge_length, short_edge_length) + if self.is_range: + assert len(short_edge_length) == 2, ( + "short_edge_length must be two values using 'range' sample style." + f" Got {short_edge_length}!" + ) + self._init(locals()) + + @torch.jit.unused + def get_transform(self, image): + h, w = image.shape[:2] + if self.is_range: + size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) + else: + size = np.random.choice(self.short_edge_length) + if size == 0: + return NoOpTransform() + + newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size) + return ResizeTransform(h, w, newh, neww, self.interp) + + @staticmethod + def get_output_shape( + oldh: int, oldw: int, short_edge_length: int, max_size: int + ) -> Tuple[int, int]: + """ + Compute the output size given input size and target short edge length. + """ + h, w = oldh, oldw + size = short_edge_length * 1.0 + scale = size / min(h, w) + if h < w: + newh, neww = size, scale * w + else: + newh, neww = scale * h, size + if max(newh, neww) > max_size: + scale = max_size * 1.0 / max(newh, neww) + newh = newh * scale + neww = neww * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) + + +class ResizeScale(Augmentation): + """ + Takes target size as input and randomly scales the given target size between `min_scale` + and `max_scale`. It then scales the input image such that it fits inside the scaled target + box, keeping the aspect ratio constant. + This implements the resize part of the Google's 'resize_and_crop' data augmentation: + https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127 + """ + + def __init__( + self, + min_scale: float, + max_scale: float, + target_height: int, + target_width: int, + interp: int = Image.BILINEAR, + ): + """ + Args: + min_scale: minimum image scale range. + max_scale: maximum image scale range. + target_height: target image height. + target_width: target image width. + interp: image interpolation method. + """ + super().__init__() + self._init(locals()) + + def _get_resize(self, image: np.ndarray, scale: float) -> Transform: + input_size = image.shape[:2] + + # Compute new target size given a scale. + target_size = (self.target_height, self.target_width) + target_scale_size = np.multiply(target_size, scale) + + # Compute actual rescaling applied to input image and output size. + output_scale = np.minimum( + target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1] + ) + output_size = np.round(np.multiply(input_size, output_scale)).astype(int) + + return ResizeTransform( + input_size[0], input_size[1], output_size[0], output_size[1], self.interp + ) + + def get_transform(self, image: np.ndarray) -> Transform: + random_scale = np.random.uniform(self.min_scale, self.max_scale) + return self._get_resize(image, random_scale) + + +class RandomRotation(Augmentation): + """ + This method returns a copy of this image, rotated the given + number of degrees counter clockwise around the given center. + """ + + def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None): + """ + Args: + angle (list[float]): If ``sample_style=="range"``, + a [min, max] interval from which to sample the angle (in degrees). + If ``sample_style=="choice"``, a list of angles to sample from + expand (bool): choose if the image should be resized to fit the whole + rotated image (default), or simply cropped + center (list[[float, float]]): If ``sample_style=="range"``, + a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center, + [0, 0] being the top left of the image and [1, 1] the bottom right. + If ``sample_style=="choice"``, a list of centers to sample from + Default: None, which means that the center of rotation is the center of the image + center has no effect if expand=True because it only affects shifting + """ + super().__init__() + assert sample_style in ["range", "choice"], sample_style + self.is_range = sample_style == "range" + if isinstance(angle, (float, int)): + angle = (angle, angle) + if center is not None and isinstance(center[0], (float, int)): + center = (center, center) + self._init(locals()) + + def get_transform(self, image): + h, w = image.shape[:2] + center = None + if self.is_range: + angle = np.random.uniform(self.angle[0], self.angle[1]) + if self.center is not None: + center = ( + np.random.uniform(self.center[0][0], self.center[1][0]), + np.random.uniform(self.center[0][1], self.center[1][1]), + ) + else: + angle = np.random.choice(self.angle) + if self.center is not None: + center = np.random.choice(self.center) + + if center is not None: + center = (w * center[0], h * center[1]) # Convert to absolute coordinates + + if angle % 360 == 0: + return NoOpTransform() + + return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp) + + +class FixedSizeCrop(Augmentation): + """ + If `crop_size` is smaller than the input image size, then it uses a random crop of + the crop size. If `crop_size` is larger than the input image size, then it pads + the right and the bottom of the image to the crop size if `pad` is True, otherwise + it returns the smaller image. + """ + + def __init__( + self, + crop_size: Tuple[int], + pad: bool = True, + pad_value: float = 128.0, + seg_pad_value: int = 255, + ): + """ + Args: + crop_size: target image (height, width). + pad: if True, will pad images smaller than `crop_size` up to `crop_size` + pad_value: the padding value to the image. + seg_pad_value: the padding value to the segmentation mask. + """ + super().__init__() + self._init(locals()) + + def _get_crop(self, image: np.ndarray) -> Transform: + # Compute the image scale and scaled size. + input_size = image.shape[:2] + output_size = self.crop_size + + # Add random crop if the image is scaled up. + max_offset = np.subtract(input_size, output_size) + max_offset = np.maximum(max_offset, 0) + offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0)) + offset = np.round(offset).astype(int) + return CropTransform( + offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0] + ) + + def _get_pad(self, image: np.ndarray) -> Transform: + # Compute the image scale and scaled size. + input_size = image.shape[:2] + output_size = self.crop_size + + # Add padding if the image is scaled down. + pad_size = np.subtract(output_size, input_size) + pad_size = np.maximum(pad_size, 0) + original_size = np.minimum(input_size, output_size) + return PadTransform( + 0, + 0, + pad_size[1], + pad_size[0], + original_size[1], + original_size[0], + self.pad_value, + self.seg_pad_value, + ) + + def get_transform(self, image: np.ndarray) -> TransformList: + transforms = [self._get_crop(image)] + if self.pad: + transforms.append(self._get_pad(image)) + return TransformList(transforms) + + +class RandomCrop(Augmentation): + """ + Randomly crop a rectangle region out of an image. + """ + + def __init__(self, crop_type: str, crop_size): + """ + Args: + crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range". + crop_size (tuple[float, float]): two floats, explained below. + + - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of + size (H, W). crop size should be in (0, 1] + - "relative_range": uniformly sample two values from [crop_size[0], 1] + and [crop_size[1]], 1], and use them as in "relative" crop type. + - "absolute" crop a (crop_size[0], crop_size[1]) region from input image. + crop_size must be smaller than the input image size. + - "absolute_range", for an input of size (H, W), uniformly sample H_crop in + [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])]. + Then crop a region (H_crop, W_crop). + """ + # TODO style of relative_range and absolute_range are not consistent: + # one takes (h, w) but another takes (min, max) + super().__init__() + assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"] + self._init(locals()) + + def get_transform(self, image): + h, w = image.shape[:2] + croph, cropw = self.get_crop_size((h, w)) + assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self) + h0 = np.random.randint(h - croph + 1) + w0 = np.random.randint(w - cropw + 1) + return CropTransform(w0, h0, cropw, croph) + + def get_crop_size(self, image_size): + """ + Args: + image_size (tuple): height, width + + Returns: + crop_size (tuple): height, width in absolute pixels + """ + h, w = image_size + if self.crop_type == "relative": + ch, cw = self.crop_size + return int(h * ch + 0.5), int(w * cw + 0.5) + elif self.crop_type == "relative_range": + crop_size = np.asarray(self.crop_size, dtype=np.float32) + ch, cw = crop_size + np.random.rand(2) * (1 - crop_size) + return int(h * ch + 0.5), int(w * cw + 0.5) + elif self.crop_type == "absolute": + return (min(self.crop_size[0], h), min(self.crop_size[1], w)) + elif self.crop_type == "absolute_range": + assert self.crop_size[0] <= self.crop_size[1] + ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) + cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) + return ch, cw + else: + raise NotImplementedError("Unknown crop type {}".format(self.crop_type)) + + +class RandomCrop_CategoryAreaConstraint(Augmentation): + """ + Similar to :class:`RandomCrop`, but find a cropping window such that no single category + occupies a ratio of more than `single_category_max_area` in semantic segmentation ground + truth, which can cause unstability in training. The function attempts to find such a valid + cropping window for at most 10 times. + """ + + def __init__( + self, + crop_type: str, + crop_size, + single_category_max_area: float = 1.0, + ignored_category: int = None, + ): + """ + Args: + crop_type, crop_size: same as in :class:`RandomCrop` + single_category_max_area: the maximum allowed area ratio of a + category. Set to 1.0 to disable + ignored_category: allow this category in the semantic segmentation + ground truth to exceed the area ratio. Usually set to the category + that's ignored in training. + """ + self.crop_aug = RandomCrop(crop_type, crop_size) + self._init(locals()) + + def get_transform(self, image, sem_seg): + if self.single_category_max_area >= 1.0: + return self.crop_aug.get_transform(image) + else: + h, w = sem_seg.shape + for _ in range(10): + crop_size = self.crop_aug.get_crop_size((h, w)) + y0 = np.random.randint(h - crop_size[0] + 1) + x0 = np.random.randint(w - crop_size[1] + 1) + sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]] + labels, cnt = np.unique(sem_seg_temp, return_counts=True) + if self.ignored_category is not None: + cnt = cnt[labels != self.ignored_category] + if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area: + break + crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0]) + return crop_tfm + + +class RandomExtent(Augmentation): + """ + Outputs an image by cropping a random "subrect" of the source image. + + The subrect can be parameterized to include pixels outside the source image, + in which case they will be set to zeros (i.e. black). The size of the output + image will vary with the size of the random subrect. + """ + + def __init__(self, scale_range, shift_range): + """ + Args: + output_size (h, w): Dimensions of output image + scale_range (l, h): Range of input-to-output size scaling factor + shift_range (x, y): Range of shifts of the cropped subrect. The rect + is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)], + where (w, h) is the (width, height) of the input image. Set each + component to zero to crop at the image's center. + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + img_h, img_w = image.shape[:2] + + # Initialize src_rect to fit the input image. + src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h]) + + # Apply a random scaling to the src_rect. + src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1]) + + # Apply a random shift to the coordinates origin. + src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5) + src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5) + + # Map src_rect coordinates into image coordinates (center at corner). + src_rect[0::2] += 0.5 * img_w + src_rect[1::2] += 0.5 * img_h + + return ExtentTransform( + src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]), + output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])), + ) + + +class RandomContrast(Augmentation): + """ + Randomly transforms image contrast. + + Contrast intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce contrast + - intensity = 1 will preserve the input image + - intensity > 1 will increase contrast + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation + intensity_max (float): Maximum augmentation + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + w = np.random.uniform(self.intensity_min, self.intensity_max) + return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w) + + +class RandomBrightness(Augmentation): + """ + Randomly transforms image brightness. + + Brightness intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce brightness + - intensity = 1 will preserve the input image + - intensity > 1 will increase brightness + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation + intensity_max (float): Maximum augmentation + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + w = np.random.uniform(self.intensity_min, self.intensity_max) + return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w) + + +class RandomSaturation(Augmentation): + """ + Randomly transforms saturation of an RGB image. + Input images are assumed to have 'RGB' channel order. + + Saturation intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce saturation (make the image more grayscale) + - intensity = 1 will preserve the input image + - intensity > 1 will increase saturation + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation (1 preserves input). + intensity_max (float): Maximum augmentation (1 preserves input). + """ + super().__init__() + self._init(locals()) + + def get_transform(self, image): + assert image.shape[-1] == 3, "RandomSaturation only works on RGB images" + w = np.random.uniform(self.intensity_min, self.intensity_max) + grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis] + return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w) + + +class RandomLighting(Augmentation): + """ + The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet. + Input images are assumed to have 'RGB' channel order. + + The degree of color jittering is randomly sampled via a normal distribution, + with standard deviation given by the scale parameter. + """ + + def __init__(self, scale): + """ + Args: + scale (float): Standard deviation of principal component weighting. + """ + super().__init__() + self._init(locals()) + self.eigen_vecs = np.array( + [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]] + ) + self.eigen_vals = np.array([0.2175, 0.0188, 0.0045]) + + def get_transform(self, image): + assert image.shape[-1] == 3, "RandomLighting only works on RGB images" + weights = np.random.normal(scale=self.scale, size=3) + return BlendTransform( + src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0 + ) + + +class RandomResize(Augmentation): + """Randomly resize image to a target size in shape_list""" + + def __init__(self, shape_list, interp=Image.BILINEAR): + """ + Args: + shape_list: a list of shapes in (h, w) + interp: PIL interpolation method + """ + self.shape_list = shape_list + self._init(locals()) + + def get_transform(self, image): + shape_idx = np.random.randint(low=0, high=len(self.shape_list)) + h, w = self.shape_list[shape_idx] + return ResizeTransform(image.shape[0], image.shape[1], h, w, self.interp) + + +class MinIoURandomCrop(Augmentation): + """Random crop the image & bboxes, the cropped patches have minimum IoU + requirement with original image & bboxes, the IoU threshold is randomly + selected from min_ious. + + Args: + min_ious (tuple): minimum IoU threshold for all intersections with + bounding boxes + min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, + where a >= min_crop_size) + mode_trials: number of trials for sampling min_ious threshold + crop_trials: number of trials for sampling crop_size after cropping + """ + + def __init__( + self, + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3, + mode_trials=1000, + crop_trials=50, + ): + self.min_ious = min_ious + self.sample_mode = (1, *min_ious, 0) + self.min_crop_size = min_crop_size + self.mode_trials = mode_trials + self.crop_trials = crop_trials + + def get_transform(self, image, boxes): + """Call function to crop images and bounding boxes with minimum IoU + constraint. + + Args: + boxes: ground truth boxes in (x1, y1, x2, y2) format + """ + if boxes is None: + return NoOpTransform() + h, w, c = image.shape + for _ in range(self.mode_trials): + mode = random.choice(self.sample_mode) + self.mode = mode + if mode == 1: + return NoOpTransform() + + min_iou = mode + for _ in range(self.crop_trials): + new_w = random.uniform(self.min_crop_size * w, w) + new_h = random.uniform(self.min_crop_size * h, h) + + # h / w in [0.5, 2] + if new_h / new_w < 0.5 or new_h / new_w > 2: + continue + + left = random.uniform(w - new_w) + top = random.uniform(h - new_h) + + patch = np.array((int(left), int(top), int(left + new_w), int(top + new_h))) + # Line or point crop is not allowed + if patch[2] == patch[0] or patch[3] == patch[1]: + continue + overlaps = pairwise_iou( + Boxes(patch.reshape(-1, 4)), Boxes(boxes.reshape(-1, 4)) + ).reshape(-1) + if len(overlaps) > 0 and overlaps.min() < min_iou: + continue + + # center of boxes should inside the crop img + # only adjust boxes and instance masks when the gt is not empty + if len(overlaps) > 0: + # adjust boxes + def is_center_of_bboxes_in_patch(boxes, patch): + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = ( + (center[:, 0] > patch[0]) + * (center[:, 1] > patch[1]) + * (center[:, 0] < patch[2]) + * (center[:, 1] < patch[3]) + ) + return mask + + mask = is_center_of_bboxes_in_patch(boxes, patch) + if not mask.any(): + continue + return CropTransform(int(left), int(top), int(new_w), int(new_h)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/transform.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..46769a2569ffc6223a95990f8db5973757e7d23f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/data/transforms/transform.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +See "Data Augmentation" tutorial for an overview of the system: +https://detectron2.readthedocs.io/tutorials/augmentation.html +""" + +import numpy as np +import torch +import torch.nn.functional as F +from fvcore.transforms.transform import ( + CropTransform, + HFlipTransform, + NoOpTransform, + Transform, + TransformList, +) +from PIL import Image + +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + pass + +__all__ = [ + "ExtentTransform", + "ResizeTransform", + "RotationTransform", + "ColorTransform", + "PILColorTransform", +] + + +class ExtentTransform(Transform): + """ + Extracts a subregion from the source image and scales it to the output size. + + The fill color is used to map pixels from the source rect that fall outside + the source image. + + See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform + """ + + def __init__(self, src_rect, output_size, interp=Image.BILINEAR, fill=0): + """ + Args: + src_rect (x0, y0, x1, y1): src coordinates + output_size (h, w): dst image size + interp: PIL interpolation methods + fill: Fill color used when src_rect extends outside image + """ + super().__init__() + self._set_attributes(locals()) + + def apply_image(self, img, interp=None): + h, w = self.output_size + if len(img.shape) > 2 and img.shape[2] == 1: + pil_image = Image.fromarray(img[:, :, 0], mode="L") + else: + pil_image = Image.fromarray(img) + pil_image = pil_image.transform( + size=(w, h), + method=Image.EXTENT, + data=self.src_rect, + resample=interp if interp else self.interp, + fill=self.fill, + ) + ret = np.asarray(pil_image) + if len(img.shape) > 2 and img.shape[2] == 1: + ret = np.expand_dims(ret, -1) + return ret + + def apply_coords(self, coords): + # Transform image center from source coordinates into output coordinates + # and then map the new origin to the corner of the output image. + h, w = self.output_size + x0, y0, x1, y1 = self.src_rect + new_coords = coords.astype(np.float32) + new_coords[:, 0] -= 0.5 * (x0 + x1) + new_coords[:, 1] -= 0.5 * (y0 + y1) + new_coords[:, 0] *= w / (x1 - x0) + new_coords[:, 1] *= h / (y1 - y0) + new_coords[:, 0] += 0.5 * w + new_coords[:, 1] += 0.5 * h + return new_coords + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=Image.NEAREST) + return segmentation + + +class ResizeTransform(Transform): + """ + Resize the image to a target size. + """ + + def __init__(self, h, w, new_h, new_w, interp=None): + """ + Args: + h, w (int): original image size + new_h, new_w (int): new image size + interp: PIL interpolation methods, defaults to bilinear. + """ + # TODO decide on PIL vs opencv + super().__init__() + if interp is None: + interp = Image.BILINEAR + self._set_attributes(locals()) + + def apply_image(self, img, interp=None): + assert img.shape[:2] == (self.h, self.w) + assert len(img.shape) <= 4 + interp_method = interp if interp is not None else self.interp + + if img.dtype == np.uint8: + if len(img.shape) > 2 and img.shape[2] == 1: + pil_image = Image.fromarray(img[:, :, 0], mode="L") + else: + pil_image = Image.fromarray(img) + pil_image = pil_image.resize((self.new_w, self.new_h), interp_method) + ret = np.asarray(pil_image) + if len(img.shape) > 2 and img.shape[2] == 1: + ret = np.expand_dims(ret, -1) + else: + # PIL only supports uint8 + if any(x < 0 for x in img.strides): + img = np.ascontiguousarray(img) + img = torch.from_numpy(img) + shape = list(img.shape) + shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:] + img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw + _PIL_RESIZE_TO_INTERPOLATE_MODE = { + Image.NEAREST: "nearest", + Image.BILINEAR: "bilinear", + Image.BICUBIC: "bicubic", + } + mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method] + align_corners = None if mode == "nearest" else False + img = F.interpolate( + img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners + ) + shape[:2] = (self.new_h, self.new_w) + ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c) + + return ret + + def apply_coords(self, coords): + coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w) + coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h) + return coords + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=Image.NEAREST) + return segmentation + + def inverse(self): + return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp) + + +class RotationTransform(Transform): + """ + This method returns a copy of this image, rotated the given + number of degrees counter clockwise around its center. + """ + + def __init__(self, h, w, angle, expand=True, center=None, interp=None): + """ + Args: + h, w (int): original image size + angle (float): degrees for rotation + expand (bool): choose if the image should be resized to fit the whole + rotated image (default), or simply cropped + center (tuple (width, height)): coordinates of the rotation center + if left to None, the center will be fit to the center of each image + center has no effect if expand=True because it only affects shifting + interp: cv2 interpolation method, default cv2.INTER_LINEAR + """ + super().__init__() + image_center = np.array((w / 2, h / 2)) + if center is None: + center = image_center + if interp is None: + interp = cv2.INTER_LINEAR + abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle)))) + if expand: + # find the new width and height bounds + bound_w, bound_h = np.rint( + [h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin] + ).astype(int) + else: + bound_w, bound_h = w, h + + self._set_attributes(locals()) + self.rm_coords = self.create_rotation_matrix() + # Needed because of this problem https://github.com/opencv/opencv/issues/11784 + self.rm_image = self.create_rotation_matrix(offset=-0.5) + + def apply_image(self, img, interp=None): + """ + img should be a numpy array, formatted as Height * Width * Nchannels + """ + if len(img) == 0 or self.angle % 360 == 0: + return img + assert img.shape[:2] == (self.h, self.w) + interp = interp if interp is not None else self.interp + return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp) + + def apply_coords(self, coords): + """ + coords should be a N * 2 array-like, containing N couples of (x, y) points + """ + coords = np.asarray(coords, dtype=float) + if len(coords) == 0 or self.angle % 360 == 0: + return coords + return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :] + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST) + return segmentation + + def create_rotation_matrix(self, offset=0): + center = (self.center[0] + offset, self.center[1] + offset) + rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1) + if self.expand: + # Find the coordinates of the center of rotation in the new image + # The only point for which we know the future coordinates is the center of the image + rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :] + new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center + # shift the rotation center to the new coordinates + rm[:, 2] += new_center + return rm + + def inverse(self): + """ + The inverse is to rotate it back with expand, and crop to get the original shape. + """ + if not self.expand: # Not possible to inverse if a part of the image is lost + raise NotImplementedError() + rotation = RotationTransform( + self.bound_h, self.bound_w, -self.angle, True, None, self.interp + ) + crop = CropTransform( + (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h + ) + return TransformList([rotation, crop]) + + +class ColorTransform(Transform): + """ + Generic wrapper for any photometric transforms. + These transformations should only affect the color space and + not the coordinate space of the image (e.g. annotation + coordinates such as bounding boxes should not be changed) + """ + + def __init__(self, op): + """ + Args: + op (Callable): operation to be applied to the image, + which takes in an ndarray and returns an ndarray. + """ + if not callable(op): + raise ValueError("op parameter should be callable") + super().__init__() + self._set_attributes(locals()) + + def apply_image(self, img): + return self.op(img) + + def apply_coords(self, coords): + return coords + + def inverse(self): + return NoOpTransform() + + def apply_segmentation(self, segmentation): + return segmentation + + +class PILColorTransform(ColorTransform): + """ + Generic wrapper for PIL Photometric image transforms, + which affect the color space and not the coordinate + space of the image + """ + + def __init__(self, op): + """ + Args: + op (Callable): operation to be applied to the image, + which takes in a PIL Image and returns a transformed + PIL Image. + For reference on possible operations see: + - https://pillow.readthedocs.io/en/stable/ + """ + if not callable(op): + raise ValueError("op parameter should be callable") + super().__init__(op) + + def apply_image(self, img): + img = Image.fromarray(img) + return np.asarray(super().apply_image(img)) + + +def HFlip_rotated_box(transform, rotated_boxes): + """ + Apply the horizontal flip transform on rotated boxes. + + Args: + rotated_boxes (ndarray): Nx5 floating point array of + (x_center, y_center, width, height, angle_degrees) format + in absolute coordinates. + """ + # Transform x_center + rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0] + # Transform angle + rotated_boxes[:, 4] = -rotated_boxes[:, 4] + return rotated_boxes + + +def Resize_rotated_box(transform, rotated_boxes): + """ + Apply the resizing transform on rotated boxes. For details of how these (approximation) + formulas are derived, please refer to :meth:`RotatedBoxes.scale`. + + Args: + rotated_boxes (ndarray): Nx5 floating point array of + (x_center, y_center, width, height, angle_degrees) format + in absolute coordinates. + """ + scale_factor_x = transform.new_w * 1.0 / transform.w + scale_factor_y = transform.new_h * 1.0 / transform.h + rotated_boxes[:, 0] *= scale_factor_x + rotated_boxes[:, 1] *= scale_factor_y + theta = rotated_boxes[:, 4] * np.pi / 180.0 + c = np.cos(theta) + s = np.sin(theta) + rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s)) + rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c)) + rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi + + return rotated_boxes + + +HFlipTransform.register_type("rotated_box", HFlip_rotated_box) +ResizeTransform.register_type("rotated_box", Resize_rotated_box) + +# not necessary any more with latest fvcore +NoOpTransform.register_type("rotated_box", lambda t, x: x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..08a61572b4c7d09c8d400e903a96cbf5b2cc4763 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from .launch import * +from .train_loop import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) +# but still make them available here +from .hooks import * +from .defaults import * diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/defaults.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..574f87cd7a11d11dfb91b9db62c795ba6403aaa9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/defaults.py @@ -0,0 +1,715 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +This file contains components with some default boilerplate logic user may need +in training / testing. They will not work for everyone, but many users may find them useful. + +The behavior of functions/classes in this file is subject to change, +since they are meant to represent the "common default behavior" people need in their projects. +""" + +import argparse +import logging +import os +import sys +import weakref +from collections import OrderedDict +from typing import Optional +import torch +from fvcore.nn.precise_bn import get_bn_modules +from omegaconf import OmegaConf +from torch.nn.parallel import DistributedDataParallel + +import custom_detectron2.data.transforms as T +from custom_detectron2.checkpoint import DetectionCheckpointer +from custom_detectron2.config import CfgNode, LazyConfig +from custom_detectron2.data import ( + MetadataCatalog, + build_detection_test_loader, + build_detection_train_loader, +) +from custom_detectron2.evaluation import ( + DatasetEvaluator, + inference_on_dataset, + print_csv_format, + verify_results, +) +from custom_detectron2.modeling import build_model +from custom_detectron2.solver import build_lr_scheduler, build_optimizer +from custom_detectron2.utils import comm +from custom_detectron2.utils.collect_env import collect_env_info +from custom_detectron2.utils.env import seed_all_rng +from custom_detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import setup_logger + +from . import hooks +from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase + +__all__ = [ + "create_ddp_model", + "default_argument_parser", + "default_setup", + "default_writers", + "DefaultPredictor", + "DefaultTrainer", +] + + +def create_ddp_model(model, *, fp16_compression=False, **kwargs): + """ + Create a DistributedDataParallel model if there are >1 processes. + + Args: + model: a torch.nn.Module + fp16_compression: add fp16 compression hooks to the ddp object. + See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook + kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. + """ # noqa + if comm.get_world_size() == 1: + return model + if "device_ids" not in kwargs: + kwargs["device_ids"] = [comm.get_local_rank()] + ddp = DistributedDataParallel(model, **kwargs) + if fp16_compression: + from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks + + ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook) + return ddp + + +def default_argument_parser(epilog=None): + """ + Create a parser with some common arguments used by detectron2 users. + + Args: + epilog (str): epilog passed to ArgumentParser describing the usage. + + Returns: + argparse.ArgumentParser: + """ + parser = argparse.ArgumentParser( + epilog=epilog + or f""" +Examples: + +Run on single machine: + $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml + +Change some config options: + $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 + +Run on multiple machines: + (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url [--other-flags] + (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url [--other-flags] +""", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument( + "--resume", + action="store_true", + help="Whether to attempt to resume from the checkpoint directory. " + "See documentation of `DefaultTrainer.resume_or_load()` for what it means.", + ) + parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") + parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") + parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") + parser.add_argument( + "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" + ) + + # PyTorch still may leave orphan processes in multi-gpu training. + # Therefore we use a deterministic way to obtain port, + # so that users are aware of orphan processes by seeing the port occupied. + port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14 + parser.add_argument( + "--dist-url", + default="tcp://127.0.0.1:{}".format(port), + help="initialization URL for pytorch distributed backend. See " + "https://pytorch.org/docs/stable/distributed.html for details.", + ) + parser.add_argument( + "opts", + help=""" +Modify config options at the end of the command. For Yacs configs, use +space-separated "PATH.KEY VALUE" pairs. +For python-based LazyConfig, use "path.key=value". + """.strip(), + default=None, + nargs=argparse.REMAINDER, + ) + return parser + + +def _try_get_key(cfg, *keys, default=None): + """ + Try select keys from cfg until the first key that exists. Otherwise return default. + """ + if isinstance(cfg, CfgNode): + cfg = OmegaConf.create(cfg.dump()) + for k in keys: + none = object() + p = OmegaConf.select(cfg, k, default=none) + if p is not none: + return p + return default + + +def _highlight(code, filename): + try: + import pygments + except ImportError: + return code + + from pygments.lexers import Python3Lexer, YamlLexer + from pygments.formatters import Terminal256Formatter + + lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() + code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) + return code + + +def default_setup(cfg, args): + """ + Perform some basic common setups at the beginning of a job, including: + + 1. Set up the detectron2 logger + 2. Log basic information about environment, cmdline arguments, and config + 3. Backup the config to the output directory + + Args: + cfg (CfgNode or omegaconf.DictConfig): the full config to be used + args (argparse.NameSpace): the command line arguments to be logged + """ + output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") + if comm.is_main_process() and output_dir: + PathManager.mkdirs(output_dir) + + rank = comm.get_rank() + setup_logger(output_dir, distributed_rank=rank, name="fvcore") + logger = setup_logger(output_dir, distributed_rank=rank) + + logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) + logger.info("Environment info:\n" + collect_env_info()) + + logger.info("Command line arguments: " + str(args)) + if hasattr(args, "config_file") and args.config_file != "": + logger.info( + "Contents of args.config_file={}:\n{}".format( + args.config_file, + _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), + ) + ) + + if comm.is_main_process() and output_dir: + # Note: some of our scripts may expect the existence of + # config.yaml in output directory + path = os.path.join(output_dir, "config.yaml") + if isinstance(cfg, CfgNode): + logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) + with PathManager.open(path, "w") as f: + f.write(cfg.dump()) + else: + LazyConfig.save(cfg, path) + logger.info("Full config saved to {}".format(path)) + + # make sure each worker has a different, yet deterministic seed if specified + seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) + seed_all_rng(None if seed < 0 else seed + rank) + + # cudnn benchmark has large overhead. It shouldn't be used considering the small size of + # typical validation set. + if not (hasattr(args, "eval_only") and args.eval_only): + torch.backends.cudnn.benchmark = _try_get_key( + cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False + ) + + +def default_writers(output_dir: str, max_iter: Optional[int] = None): + """ + Build a list of :class:`EventWriter` to be used. + It now consists of a :class:`CommonMetricPrinter`, + :class:`TensorboardXWriter` and :class:`JSONWriter`. + + Args: + output_dir: directory to store JSON metrics and tensorboard events + max_iter: the total number of iterations + + Returns: + list[EventWriter]: a list of :class:`EventWriter` objects. + """ + PathManager.mkdirs(output_dir) + return [ + # It may not always print what you want to see, since it prints "common" metrics only. + CommonMetricPrinter(max_iter), + JSONWriter(os.path.join(output_dir, "metrics.json")), + TensorboardXWriter(output_dir), + ] + + +class DefaultPredictor: + """ + Create a simple end-to-end predictor with the given config that runs on + single device for a single input image. + + Compared to using the model directly, this class does the following additions: + + 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. + 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. + 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. + 4. Take one input image and produce a single output, instead of a batch. + + This is meant for simple demo purposes, so it does the above steps automatically. + This is not meant for benchmarks or running complicated inference logic. + If you'd like to do anything more complicated, please refer to its source code as + examples to build and use the model manually. + + Attributes: + metadata (Metadata): the metadata of the underlying dataset, obtained from + cfg.DATASETS.TEST. + + Examples: + :: + pred = DefaultPredictor(cfg) + inputs = cv2.imread("input.jpg") + outputs = pred(inputs) + """ + + def __init__(self, cfg): + self.cfg = cfg.clone() # cfg can be modified by model + self.model = build_model(self.cfg) + self.model.eval() + if len(cfg.DATASETS.TEST): + self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) + + checkpointer = DetectionCheckpointer(self.model) + checkpointer.load(cfg.MODEL.WEIGHTS) + + self.aug = T.ResizeShortestEdge( + [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST + ) + + self.input_format = cfg.INPUT.FORMAT + assert self.input_format in ["RGB", "BGR"], self.input_format + + def __call__(self, original_image): + """ + Args: + original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). + + Returns: + predictions (dict): + the output of the model for one image only. + See :doc:`/tutorials/models` for details about the format. + """ + with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 + # Apply pre-processing to image. + if self.input_format == "RGB": + # whether the model expects BGR inputs or RGB + original_image = original_image[:, :, ::-1] + height, width = original_image.shape[:2] + image = self.aug.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) + + inputs = {"image": image, "height": height, "width": width} + predictions = self.model([inputs])[0] + return predictions + + +class DefaultTrainer(TrainerBase): + """ + A trainer with default training logic. It does the following: + + 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader + defined by the given config. Create a LR scheduler defined by the config. + 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when + `resume_or_load` is called. + 3. Register a few common hooks defined by the config. + + It is created to simplify the **standard model training workflow** and reduce code boilerplate + for users who only need the standard training workflow, with standard features. + It means this class makes *many assumptions* about your training logic that + may easily become invalid in a new research. In fact, any assumptions beyond those made in the + :class:`SimpleTrainer` are too much for research. + + The code of this class has been annotated about restrictive assumptions it makes. + When they do not work for you, you're encouraged to: + + 1. Overwrite methods of this class, OR: + 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and + nothing else. You can then add your own hooks if needed. OR: + 3. Write your own training loop similar to `tools/plain_train_net.py`. + + See the :doc:`/tutorials/training` tutorials for more details. + + Note that the behavior of this class, like other functions/classes in + this file, is not stable, since it is meant to represent the "common default behavior". + It is only guaranteed to work well with the standard models and training workflow in detectron2. + To obtain more stable behavior, write your own training logic with other public APIs. + + Examples: + :: + trainer = DefaultTrainer(cfg) + trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS + trainer.train() + + Attributes: + scheduler: + checkpointer (DetectionCheckpointer): + cfg (CfgNode): + """ + + def __init__(self, cfg): + """ + Args: + cfg (CfgNode): + """ + super().__init__() + logger = logging.getLogger("detectron2") + if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2 + setup_logger() + cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size()) + + # Assume these objects must be constructed in this order. + model = self.build_model(cfg) + optimizer = self.build_optimizer(cfg, model) + data_loader = self.build_train_loader(cfg) + + model = create_ddp_model(model, broadcast_buffers=False) + self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( + model, data_loader, optimizer + ) + + self.scheduler = self.build_lr_scheduler(cfg, optimizer) + self.checkpointer = DetectionCheckpointer( + # Assume you want to save checkpoints together with logs/statistics + model, + cfg.OUTPUT_DIR, + trainer=weakref.proxy(self), + ) + self.start_iter = 0 + self.max_iter = cfg.SOLVER.MAX_ITER + self.cfg = cfg + + self.register_hooks(self.build_hooks()) + + def resume_or_load(self, resume=True): + """ + If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by + a `last_checkpoint` file), resume from the file. Resuming means loading all + available states (eg. optimizer and scheduler) and update iteration counter + from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used. + + Otherwise, this is considered as an independent training. The method will load model + weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start + from iteration 0. + + Args: + resume (bool): whether to do resume or not + """ + self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) + if resume and self.checkpointer.has_checkpoint(): + # The checkpoint stores the training iteration that just finished, thus we start + # at the next iteration + self.start_iter = self.iter + 1 + + def build_hooks(self): + """ + Build a list of default hooks, including timing, evaluation, + checkpointing, lr scheduling, precise BN, writing events. + + Returns: + list[HookBase]: + """ + cfg = self.cfg.clone() + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN + + ret = [ + hooks.IterationTimer(), + hooks.LRScheduler(), + hooks.PreciseBN( + # Run at the same freq as (but before) evaluation. + cfg.TEST.EVAL_PERIOD, + self.model, + # Build a new data loader to not affect training + self.build_train_loader(cfg), + cfg.TEST.PRECISE_BN.NUM_ITER, + ) + if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) + else None, + ] + + # Do PreciseBN before checkpointer, because it updates the model and need to + # be saved by checkpointer. + # This is not always the best: if checkpointing has a different frequency, + # some checkpoints may have more precise statistics than others. + if comm.is_main_process(): + ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) + + def test_and_save_results(): + self._last_eval_results = self.test(self.cfg, self.model) + return self._last_eval_results + + # Do evaluation after checkpointer, because then if it fails, + # we can use the saved checkpoint to debug. + ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) + + if comm.is_main_process(): + # Here the default print/log frequency of each writer is used. + # run writers in the end, so that evaluation metrics are written + ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) + return ret + + def build_writers(self): + """ + Build a list of writers to be used using :func:`default_writers()`. + If you'd like a different list of writers, you can overwrite it in + your trainer. + + Returns: + list[EventWriter]: a list of :class:`EventWriter` objects. + """ + return default_writers(self.cfg.OUTPUT_DIR, self.max_iter) + + def train(self): + """ + Run training. + + Returns: + OrderedDict of results, if evaluation is enabled. Otherwise None. + """ + super().train(self.start_iter, self.max_iter) + if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): + assert hasattr( + self, "_last_eval_results" + ), "No evaluation results obtained during training!" + verify_results(self.cfg, self._last_eval_results) + return self._last_eval_results + + def run_step(self): + self._trainer.iter = self.iter + self._trainer.run_step() + + def state_dict(self): + ret = super().state_dict() + ret["_trainer"] = self._trainer.state_dict() + return ret + + def load_state_dict(self, state_dict): + super().load_state_dict(state_dict) + self._trainer.load_state_dict(state_dict["_trainer"]) + + @classmethod + def build_model(cls, cfg): + """ + Returns: + torch.nn.Module: + + It now calls :func:`detectron2.modeling.build_model`. + Overwrite it if you'd like a different model. + """ + model = build_model(cfg) + logger = logging.getLogger(__name__) + logger.info("Model:\n{}".format(model)) + return model + + @classmethod + def build_optimizer(cls, cfg, model): + """ + Returns: + torch.optim.Optimizer: + + It now calls :func:`detectron2.solver.build_optimizer`. + Overwrite it if you'd like a different optimizer. + """ + return build_optimizer(cfg, model) + + @classmethod + def build_lr_scheduler(cls, cfg, optimizer): + """ + It now calls :func:`detectron2.solver.build_lr_scheduler`. + Overwrite it if you'd like a different scheduler. + """ + return build_lr_scheduler(cfg, optimizer) + + @classmethod + def build_train_loader(cls, cfg): + """ + Returns: + iterable + + It now calls :func:`detectron2.data.build_detection_train_loader`. + Overwrite it if you'd like a different data loader. + """ + return build_detection_train_loader(cfg) + + @classmethod + def build_test_loader(cls, cfg, dataset_name): + """ + Returns: + iterable + + It now calls :func:`detectron2.data.build_detection_test_loader`. + Overwrite it if you'd like a different data loader. + """ + return build_detection_test_loader(cfg, dataset_name) + + @classmethod + def build_evaluator(cls, cfg, dataset_name): + """ + Returns: + DatasetEvaluator or None + + It is not implemented by default. + """ + raise NotImplementedError( + """ +If you want DefaultTrainer to automatically run evaluation, +please implement `build_evaluator()` in subclasses (see train_net.py for example). +Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example). +""" + ) + + @classmethod + def test(cls, cfg, model, evaluators=None): + """ + Evaluate the given model. The given model is expected to already contain + weights to evaluate. + + Args: + cfg (CfgNode): + model (nn.Module): + evaluators (list[DatasetEvaluator] or None): if None, will call + :meth:`build_evaluator`. Otherwise, must have the same length as + ``cfg.DATASETS.TEST``. + + Returns: + dict: a dict of result metrics + """ + logger = logging.getLogger(__name__) + if isinstance(evaluators, DatasetEvaluator): + evaluators = [evaluators] + if evaluators is not None: + assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( + len(cfg.DATASETS.TEST), len(evaluators) + ) + + results = OrderedDict() + for idx, dataset_name in enumerate(cfg.DATASETS.TEST): + data_loader = cls.build_test_loader(cfg, dataset_name) + # When evaluators are passed in as arguments, + # implicitly assume that evaluators can be created before data_loader. + if evaluators is not None: + evaluator = evaluators[idx] + else: + try: + evaluator = cls.build_evaluator(cfg, dataset_name) + except NotImplementedError: + logger.warn( + "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " + "or implement its `build_evaluator` method." + ) + results[dataset_name] = {} + continue + results_i = inference_on_dataset(model, data_loader, evaluator) + results[dataset_name] = results_i + if comm.is_main_process(): + assert isinstance( + results_i, dict + ), "Evaluator must return a dict on the main process. Got {} instead.".format( + results_i + ) + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + + if len(results) == 1: + results = list(results.values())[0] + return results + + @staticmethod + def auto_scale_workers(cfg, num_workers: int): + """ + When the config is defined for certain number of workers (according to + ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of + workers currently in use, returns a new cfg where the total batch size + is scaled so that the per-GPU batch size stays the same as the + original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``. + + Other config options are also scaled accordingly: + * training steps and warmup steps are scaled inverse proportionally. + * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`. + + For example, with the original config like the following: + + .. code-block:: yaml + + IMS_PER_BATCH: 16 + BASE_LR: 0.1 + REFERENCE_WORLD_SIZE: 8 + MAX_ITER: 5000 + STEPS: (4000,) + CHECKPOINT_PERIOD: 1000 + + When this config is used on 16 GPUs instead of the reference number 8, + calling this method will return a new config with: + + .. code-block:: yaml + + IMS_PER_BATCH: 32 + BASE_LR: 0.2 + REFERENCE_WORLD_SIZE: 16 + MAX_ITER: 2500 + STEPS: (2000,) + CHECKPOINT_PERIOD: 500 + + Note that both the original config and this new config can be trained on 16 GPUs. + It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``). + + Returns: + CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``. + """ + old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE + if old_world_size == 0 or old_world_size == num_workers: + return cfg + cfg = cfg.clone() + frozen = cfg.is_frozen() + cfg.defrost() + + assert ( + cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0 + ), "Invalid REFERENCE_WORLD_SIZE in config!" + scale = num_workers / old_world_size + bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale)) + lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale + max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale)) + warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale)) + cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS) + cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale)) + cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale)) + cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant + logger = logging.getLogger(__name__) + logger.info( + f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, " + f"max_iter={max_iter}, warmup={warmup_iter}." + ) + + if frozen: + cfg.freeze() + return cfg + + +# Access basic attributes from the underlying trainer +for _attr in ["model", "data_loader", "optimizer"]: + setattr( + DefaultTrainer, + _attr, + property( + # getter + lambda self, x=_attr: getattr(self._trainer, x), + # setter + lambda self, value, x=_attr: setattr(self._trainer, x, value), + ), + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/hooks.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..a8773d612a05bb028e6db98b60030fcd4fe04981 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/hooks.py @@ -0,0 +1,690 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import datetime +import itertools +import logging +import math +import operator +import os +import tempfile +import time +import warnings +from collections import Counter +import torch +from fvcore.common.checkpoint import Checkpointer +from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer +from fvcore.common.param_scheduler import ParamScheduler +from fvcore.common.timer import Timer +from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats + +import custom_detectron2.utils.comm as comm +from custom_detectron2.evaluation.testing import flatten_results_dict +from custom_detectron2.solver import LRMultiplier +from custom_detectron2.solver import LRScheduler as _LRScheduler +from custom_detectron2.utils.events import EventStorage, EventWriter +from custom_detectron2.utils.file_io import PathManager + +from .train_loop import HookBase + +__all__ = [ + "CallbackHook", + "IterationTimer", + "PeriodicWriter", + "PeriodicCheckpointer", + "BestCheckpointer", + "LRScheduler", + "AutogradProfiler", + "EvalHook", + "PreciseBN", + "TorchProfiler", + "TorchMemoryStats", +] + + +""" +Implement some common hooks. +""" + + +class CallbackHook(HookBase): + """ + Create a hook using callback functions provided by the user. + """ + + def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None): + """ + Each argument is a function that takes one argument: the trainer. + """ + self._before_train = before_train + self._before_step = before_step + self._after_step = after_step + self._after_train = after_train + + def before_train(self): + if self._before_train: + self._before_train(self.trainer) + + def after_train(self): + if self._after_train: + self._after_train(self.trainer) + # The functions may be closures that hold reference to the trainer + # Therefore, delete them to avoid circular reference. + del self._before_train, self._after_train + del self._before_step, self._after_step + + def before_step(self): + if self._before_step: + self._before_step(self.trainer) + + def after_step(self): + if self._after_step: + self._after_step(self.trainer) + + +class IterationTimer(HookBase): + """ + Track the time spent for each iteration (each run_step call in the trainer). + Print a summary in the end of training. + + This hook uses the time between the call to its :meth:`before_step` + and :meth:`after_step` methods. + Under the convention that :meth:`before_step` of all hooks should only + take negligible amount of time, the :class:`IterationTimer` hook should be + placed at the beginning of the list of hooks to obtain accurate timing. + """ + + def __init__(self, warmup_iter=3): + """ + Args: + warmup_iter (int): the number of iterations at the beginning to exclude + from timing. + """ + self._warmup_iter = warmup_iter + self._step_timer = Timer() + self._start_time = time.perf_counter() + self._total_timer = Timer() + + def before_train(self): + self._start_time = time.perf_counter() + self._total_timer.reset() + self._total_timer.pause() + + def after_train(self): + logger = logging.getLogger(__name__) + total_time = time.perf_counter() - self._start_time + total_time_minus_hooks = self._total_timer.seconds() + hook_time = total_time - total_time_minus_hooks + + num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter + + if num_iter > 0 and total_time_minus_hooks > 0: + # Speed is meaningful only after warmup + # NOTE this format is parsed by grep in some scripts + logger.info( + "Overall training speed: {} iterations in {} ({:.4f} s / it)".format( + num_iter, + str(datetime.timedelta(seconds=int(total_time_minus_hooks))), + total_time_minus_hooks / num_iter, + ) + ) + + logger.info( + "Total training time: {} ({} on hooks)".format( + str(datetime.timedelta(seconds=int(total_time))), + str(datetime.timedelta(seconds=int(hook_time))), + ) + ) + + def before_step(self): + self._step_timer.reset() + self._total_timer.resume() + + def after_step(self): + # +1 because we're in after_step, the current step is done + # but not yet counted + iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1 + if iter_done >= self._warmup_iter: + sec = self._step_timer.seconds() + self.trainer.storage.put_scalars(time=sec) + else: + self._start_time = time.perf_counter() + self._total_timer.reset() + + self._total_timer.pause() + + +class PeriodicWriter(HookBase): + """ + Write events to EventStorage (by calling ``writer.write()``) periodically. + + It is executed every ``period`` iterations and after the last iteration. + Note that ``period`` does not affect how data is smoothed by each writer. + """ + + def __init__(self, writers, period=20): + """ + Args: + writers (list[EventWriter]): a list of EventWriter objects + period (int): + """ + self._writers = writers + for w in writers: + assert isinstance(w, EventWriter), w + self._period = period + + def after_step(self): + if (self.trainer.iter + 1) % self._period == 0 or ( + self.trainer.iter == self.trainer.max_iter - 1 + ): + for writer in self._writers: + writer.write() + + def after_train(self): + for writer in self._writers: + # If any new data is found (e.g. produced by other after_train), + # write them before closing + writer.write() + writer.close() + + +class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase): + """ + Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook. + + Note that when used as a hook, + it is unable to save additional data other than what's defined + by the given `checkpointer`. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def before_train(self): + self.max_iter = self.trainer.max_iter + + def after_step(self): + # No way to use **kwargs + self.step(self.trainer.iter) + + +class BestCheckpointer(HookBase): + """ + Checkpoints best weights based off given metric. + + This hook should be used in conjunction to and executed after the hook + that produces the metric, e.g. `EvalHook`. + """ + + def __init__( + self, + eval_period: int, + checkpointer: Checkpointer, + val_metric: str, + mode: str = "max", + file_prefix: str = "model_best", + ) -> None: + """ + Args: + eval_period (int): the period `EvalHook` is set to run. + checkpointer: the checkpointer object used to save checkpoints. + val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50" + mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be + maximized or minimized, e.g. for "bbox/AP50" it should be "max" + file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best" + """ + self._logger = logging.getLogger(__name__) + self._period = eval_period + self._val_metric = val_metric + assert mode in [ + "max", + "min", + ], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.' + if mode == "max": + self._compare = operator.gt + else: + self._compare = operator.lt + self._checkpointer = checkpointer + self._file_prefix = file_prefix + self.best_metric = None + self.best_iter = None + + def _update_best(self, val, iteration): + if math.isnan(val) or math.isinf(val): + return False + self.best_metric = val + self.best_iter = iteration + return True + + def _best_checking(self): + metric_tuple = self.trainer.storage.latest().get(self._val_metric) + if metric_tuple is None: + self._logger.warning( + f"Given val metric {self._val_metric} does not seem to be computed/stored." + "Will not be checkpointing based on it." + ) + return + else: + latest_metric, metric_iter = metric_tuple + + if self.best_metric is None: + if self._update_best(latest_metric, metric_iter): + additional_state = {"iteration": metric_iter} + self._checkpointer.save(f"{self._file_prefix}", **additional_state) + self._logger.info( + f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps" + ) + elif self._compare(latest_metric, self.best_metric): + additional_state = {"iteration": metric_iter} + self._checkpointer.save(f"{self._file_prefix}", **additional_state) + self._logger.info( + f"Saved best model as latest eval score for {self._val_metric} is " + f"{latest_metric:0.5f}, better than last best score " + f"{self.best_metric:0.5f} @ iteration {self.best_iter}." + ) + self._update_best(latest_metric, metric_iter) + else: + self._logger.info( + f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, " + f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}." + ) + + def after_step(self): + # same conditions as `EvalHook` + next_iter = self.trainer.iter + 1 + if ( + self._period > 0 + and next_iter % self._period == 0 + and next_iter != self.trainer.max_iter + ): + self._best_checking() + + def after_train(self): + # same conditions as `EvalHook` + if self.trainer.iter + 1 >= self.trainer.max_iter: + self._best_checking() + + +class LRScheduler(HookBase): + """ + A hook which executes a torch builtin LR scheduler and summarizes the LR. + It is executed after every iteration. + """ + + def __init__(self, optimizer=None, scheduler=None): + """ + Args: + optimizer (torch.optim.Optimizer): + scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler): + if a :class:`ParamScheduler` object, it defines the multiplier over the base LR + in the optimizer. + + If any argument is not given, will try to obtain it from the trainer. + """ + self._optimizer = optimizer + self._scheduler = scheduler + + def before_train(self): + self._optimizer = self._optimizer or self.trainer.optimizer + if isinstance(self.scheduler, ParamScheduler): + self._scheduler = LRMultiplier( + self._optimizer, + self.scheduler, + self.trainer.max_iter, + last_iter=self.trainer.iter - 1, + ) + self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer) + + @staticmethod + def get_best_param_group_id(optimizer): + # NOTE: some heuristics on what LR to summarize + # summarize the param group with most parameters + largest_group = max(len(g["params"]) for g in optimizer.param_groups) + + if largest_group == 1: + # If all groups have one parameter, + # then find the most common initial LR, and use it for summary + lr_count = Counter([g["lr"] for g in optimizer.param_groups]) + lr = lr_count.most_common()[0][0] + for i, g in enumerate(optimizer.param_groups): + if g["lr"] == lr: + return i + else: + for i, g in enumerate(optimizer.param_groups): + if len(g["params"]) == largest_group: + return i + + def after_step(self): + lr = self._optimizer.param_groups[self._best_param_group_id]["lr"] + self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False) + self.scheduler.step() + + @property + def scheduler(self): + return self._scheduler or self.trainer.scheduler + + def state_dict(self): + if isinstance(self.scheduler, _LRScheduler): + return self.scheduler.state_dict() + return {} + + def load_state_dict(self, state_dict): + if isinstance(self.scheduler, _LRScheduler): + logger = logging.getLogger(__name__) + logger.info("Loading scheduler from state_dict ...") + self.scheduler.load_state_dict(state_dict) + + +class TorchProfiler(HookBase): + """ + A hook which runs `torch.profiler.profile`. + + Examples: + :: + hooks.TorchProfiler( + lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR + ) + + The above example will run the profiler for iteration 10~20 and dump + results to ``OUTPUT_DIR``. We did not profile the first few iterations + because they are typically slower than the rest. + The result files can be loaded in the ``chrome://tracing`` page in chrome browser, + and the tensorboard visualizations can be visualized using + ``tensorboard --logdir OUTPUT_DIR/log`` + """ + + def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True): + """ + Args: + enable_predicate (callable[trainer -> bool]): a function which takes a trainer, + and returns whether to enable the profiler. + It will be called once every step, and can be used to select which steps to profile. + output_dir (str): the output directory to dump tracing files. + activities (iterable): same as in `torch.profiler.profile`. + save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/ + """ + self._enable_predicate = enable_predicate + self._activities = activities + self._output_dir = output_dir + self._save_tensorboard = save_tensorboard + + def before_step(self): + if self._enable_predicate(self.trainer): + if self._save_tensorboard: + on_trace_ready = torch.profiler.tensorboard_trace_handler( + os.path.join( + self._output_dir, + "log", + "profiler-tensorboard-iter{}".format(self.trainer.iter), + ), + f"worker{comm.get_rank()}", + ) + else: + on_trace_ready = None + self._profiler = torch.profiler.profile( + activities=self._activities, + on_trace_ready=on_trace_ready, + record_shapes=True, + profile_memory=True, + with_stack=True, + with_flops=True, + ) + self._profiler.__enter__() + else: + self._profiler = None + + def after_step(self): + if self._profiler is None: + return + self._profiler.__exit__(None, None, None) + if not self._save_tensorboard: + PathManager.mkdirs(self._output_dir) + out_file = os.path.join( + self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter) + ) + if "://" not in out_file: + self._profiler.export_chrome_trace(out_file) + else: + # Support non-posix filesystems + with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d: + tmp_file = os.path.join(d, "tmp.json") + self._profiler.export_chrome_trace(tmp_file) + with open(tmp_file) as f: + content = f.read() + with PathManager.open(out_file, "w") as f: + f.write(content) + + +class AutogradProfiler(TorchProfiler): + """ + A hook which runs `torch.autograd.profiler.profile`. + + Examples: + :: + hooks.AutogradProfiler( + lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR + ) + + The above example will run the profiler for iteration 10~20 and dump + results to ``OUTPUT_DIR``. We did not profile the first few iterations + because they are typically slower than the rest. + The result files can be loaded in the ``chrome://tracing`` page in chrome browser. + + Note: + When used together with NCCL on older version of GPUs, + autograd profiler may cause deadlock because it unnecessarily allocates + memory on every device it sees. The memory management calls, if + interleaved with NCCL calls, lead to deadlock on GPUs that do not + support ``cudaLaunchCooperativeKernelMultiDevice``. + """ + + def __init__(self, enable_predicate, output_dir, *, use_cuda=True): + """ + Args: + enable_predicate (callable[trainer -> bool]): a function which takes a trainer, + and returns whether to enable the profiler. + It will be called once every step, and can be used to select which steps to profile. + output_dir (str): the output directory to dump tracing files. + use_cuda (bool): same as in `torch.autograd.profiler.profile`. + """ + warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.") + self._enable_predicate = enable_predicate + self._use_cuda = use_cuda + self._output_dir = output_dir + + def before_step(self): + if self._enable_predicate(self.trainer): + self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda) + self._profiler.__enter__() + else: + self._profiler = None + + +class EvalHook(HookBase): + """ + Run an evaluation function periodically, and at the end of training. + + It is executed every ``eval_period`` iterations and after the last iteration. + """ + + def __init__(self, eval_period, eval_function, eval_after_train=True): + """ + Args: + eval_period (int): the period to run `eval_function`. Set to 0 to + not evaluate periodically (but still evaluate after the last iteration + if `eval_after_train` is True). + eval_function (callable): a function which takes no arguments, and + returns a nested dict of evaluation metrics. + eval_after_train (bool): whether to evaluate after the last iteration + + Note: + This hook must be enabled in all or none workers. + If you would like only certain workers to perform evaluation, + give other workers a no-op function (`eval_function=lambda: None`). + """ + self._period = eval_period + self._func = eval_function + self._eval_after_train = eval_after_train + + def _do_eval(self): + results = self._func() + + if results: + assert isinstance( + results, dict + ), "Eval function must return a dict. Got {} instead.".format(results) + + flattened_results = flatten_results_dict(results) + for k, v in flattened_results.items(): + try: + v = float(v) + except Exception as e: + raise ValueError( + "[EvalHook] eval_function should return a nested dict of float. " + "Got '{}: {}' instead.".format(k, v) + ) from e + self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) + + # Evaluation may take different time among workers. + # A barrier make them start the next iteration together. + comm.synchronize() + + def after_step(self): + next_iter = self.trainer.iter + 1 + if self._period > 0 and next_iter % self._period == 0: + # do the last eval in after_train + if next_iter != self.trainer.max_iter: + self._do_eval() + + def after_train(self): + # This condition is to prevent the eval from running after a failed training + if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter: + self._do_eval() + # func is likely a closure that holds reference to the trainer + # therefore we clean it to avoid circular reference in the end + del self._func + + +class PreciseBN(HookBase): + """ + The standard implementation of BatchNorm uses EMA in inference, which is + sometimes suboptimal. + This class computes the true average of statistics rather than the moving average, + and put true averages to every BN layer in the given model. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def __init__(self, period, model, data_loader, num_iter): + """ + Args: + period (int): the period this hook is run, or 0 to not run during training. + The hook will always run in the end of training. + model (nn.Module): a module whose all BN layers in training mode will be + updated by precise BN. + Note that user is responsible for ensuring the BN layers to be + updated are in training mode when this hook is triggered. + data_loader (iterable): it will produce data to be run by `model(data)`. + num_iter (int): number of iterations used to compute the precise + statistics. + """ + self._logger = logging.getLogger(__name__) + if len(get_bn_modules(model)) == 0: + self._logger.info( + "PreciseBN is disabled because model does not contain BN layers in training mode." + ) + self._disabled = True + return + + self._model = model + self._data_loader = data_loader + self._num_iter = num_iter + self._period = period + self._disabled = False + + self._data_iter = None + + def after_step(self): + next_iter = self.trainer.iter + 1 + is_final = next_iter == self.trainer.max_iter + if is_final or (self._period > 0 and next_iter % self._period == 0): + self.update_stats() + + def update_stats(self): + """ + Update the model with precise statistics. Users can manually call this method. + """ + if self._disabled: + return + + if self._data_iter is None: + self._data_iter = iter(self._data_loader) + + def data_loader(): + for num_iter in itertools.count(1): + if num_iter % 100 == 0: + self._logger.info( + "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter) + ) + # This way we can reuse the same iterator + yield next(self._data_iter) + + with EventStorage(): # capture events in a new storage to discard them + self._logger.info( + "Running precise-BN for {} iterations... ".format(self._num_iter) + + "Note that this could produce different statistics every time." + ) + update_bn_stats(self._model, data_loader(), self._num_iter) + + +class TorchMemoryStats(HookBase): + """ + Writes pytorch's cuda memory statistics periodically. + """ + + def __init__(self, period=20, max_runs=10): + """ + Args: + period (int): Output stats each 'period' iterations + max_runs (int): Stop the logging after 'max_runs' + """ + + self._logger = logging.getLogger(__name__) + self._period = period + self._max_runs = max_runs + self._runs = 0 + + def after_step(self): + if self._runs > self._max_runs: + return + + if (self.trainer.iter + 1) % self._period == 0 or ( + self.trainer.iter == self.trainer.max_iter - 1 + ): + if torch.cuda.is_available(): + max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0 + reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0 + max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 + allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0 + + self._logger.info( + ( + " iter: {} " + " max_reserved_mem: {:.0f}MB " + " reserved_mem: {:.0f}MB " + " max_allocated_mem: {:.0f}MB " + " allocated_mem: {:.0f}MB " + ).format( + self.trainer.iter, + max_reserved_mb, + reserved_mb, + max_allocated_mb, + allocated_mb, + ) + ) + + self._runs += 1 + if self._runs == self._max_runs: + mem_summary = torch.cuda.memory_summary() + self._logger.info("\n" + mem_summary) + + torch.cuda.reset_peak_memory_stats() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/launch.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..91ce1305187778143b0f2f6e487bfadd2700fffd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/launch.py @@ -0,0 +1,123 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +from datetime import timedelta +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from custom_detectron2.utils import comm + +__all__ = ["DEFAULT_TIMEOUT", "launch"] + +DEFAULT_TIMEOUT = timedelta(minutes=30) + + +def _find_free_port(): + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # Binding to port 0 will cause the OS to find an available port for us + sock.bind(("", 0)) + port = sock.getsockname()[1] + sock.close() + # NOTE: there is still a chance the port could be taken by other processes. + return port + + +def launch( + main_func, + # Should be num_processes_per_machine, but kept for compatibility. + num_gpus_per_machine, + num_machines=1, + machine_rank=0, + dist_url=None, + args=(), + timeout=DEFAULT_TIMEOUT, +): + """ + Launch multi-process or distributed training. + This function must be called on all machines involved in the training. + It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. + + Args: + main_func: a function that will be called by `main_func(*args)` + num_gpus_per_machine (int): number of processes per machine. When + using GPUs, this should be the number of GPUs. + num_machines (int): the total number of machines + machine_rank (int): the rank of this machine + dist_url (str): url to connect to for distributed jobs, including protocol + e.g. "tcp://127.0.0.1:8686". + Can be set to "auto" to automatically select a free port on localhost + timeout (timedelta): timeout of the distributed workers + args (tuple): arguments passed to main_func + """ + world_size = num_machines * num_gpus_per_machine + if world_size > 1: + # https://github.com/pytorch/pytorch/pull/14391 + # TODO prctl in spawned processes + + if dist_url == "auto": + assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." + port = _find_free_port() + dist_url = f"tcp://127.0.0.1:{port}" + if num_machines > 1 and dist_url.startswith("file://"): + logger = logging.getLogger(__name__) + logger.warning( + "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" + ) + + mp.start_processes( + _distributed_worker, + nprocs=num_gpus_per_machine, + args=( + main_func, + world_size, + num_gpus_per_machine, + machine_rank, + dist_url, + args, + timeout, + ), + daemon=False, + ) + else: + main_func(*args) + + +def _distributed_worker( + local_rank, + main_func, + world_size, + num_gpus_per_machine, + machine_rank, + dist_url, + args, + timeout=DEFAULT_TIMEOUT, +): + has_gpu = torch.cuda.is_available() + if has_gpu: + assert num_gpus_per_machine <= torch.cuda.device_count() + global_rank = machine_rank * num_gpus_per_machine + local_rank + try: + dist.init_process_group( + backend="NCCL" if has_gpu else "GLOO", + init_method=dist_url, + world_size=world_size, + rank=global_rank, + timeout=timeout, + ) + except Exception as e: + logger = logging.getLogger(__name__) + logger.error("Process group URL: {}".format(dist_url)) + raise e + + # Setup the local process group. + comm.create_local_process_group(num_gpus_per_machine) + if has_gpu: + torch.cuda.set_device(local_rank) + + # synchronize is needed here to prevent a possible timeout after calling init_process_group + # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 + comm.synchronize() + + main_func(*args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/train_loop.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/train_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..066055a99be04d87bde48efdcdbef162bfb27792 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/engine/train_loop.py @@ -0,0 +1,469 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +import time +import weakref +from typing import List, Mapping, Optional +import torch +from torch.nn.parallel import DataParallel, DistributedDataParallel + +import custom_detectron2.utils.comm as comm +from custom_detectron2.utils.events import EventStorage, get_event_storage +from custom_detectron2.utils.logger import _log_api_usage + +__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"] + + +class HookBase: + """ + Base class for hooks that can be registered with :class:`TrainerBase`. + + Each hook can implement 4 methods. The way they are called is demonstrated + in the following snippet: + :: + hook.before_train() + for iter in range(start_iter, max_iter): + hook.before_step() + trainer.run_step() + hook.after_step() + iter += 1 + hook.after_train() + + Notes: + 1. In the hook method, users can access ``self.trainer`` to access more + properties about the context (e.g., model, current iteration, or config + if using :class:`DefaultTrainer`). + + 2. A hook that does something in :meth:`before_step` can often be + implemented equivalently in :meth:`after_step`. + If the hook takes non-trivial time, it is strongly recommended to + implement the hook in :meth:`after_step` instead of :meth:`before_step`. + The convention is that :meth:`before_step` should only take negligible time. + + Following this convention will allow hooks that do care about the difference + between :meth:`before_step` and :meth:`after_step` (e.g., timer) to + function properly. + + """ + + trainer: "TrainerBase" = None + """ + A weak reference to the trainer object. Set by the trainer when the hook is registered. + """ + + def before_train(self): + """ + Called before the first iteration. + """ + pass + + def after_train(self): + """ + Called after the last iteration. + """ + pass + + def before_step(self): + """ + Called before each iteration. + """ + pass + + def after_backward(self): + """ + Called after the backward pass of each iteration. + """ + pass + + def after_step(self): + """ + Called after each iteration. + """ + pass + + def state_dict(self): + """ + Hooks are stateless by default, but can be made checkpointable by + implementing `state_dict` and `load_state_dict`. + """ + return {} + + +class TrainerBase: + """ + Base class for iterative trainer with hooks. + + The only assumption we made here is: the training runs in a loop. + A subclass can implement what the loop is. + We made no assumptions about the existence of dataloader, optimizer, model, etc. + + Attributes: + iter(int): the current iteration. + + start_iter(int): The iteration to start with. + By convention the minimum possible value is 0. + + max_iter(int): The iteration to end training. + + storage(EventStorage): An EventStorage that's opened during the course of training. + """ + + def __init__(self) -> None: + self._hooks: List[HookBase] = [] + self.iter: int = 0 + self.start_iter: int = 0 + self.max_iter: int + self.storage: EventStorage + _log_api_usage("trainer." + self.__class__.__name__) + + def register_hooks(self, hooks: List[Optional[HookBase]]) -> None: + """ + Register hooks to the trainer. The hooks are executed in the order + they are registered. + + Args: + hooks (list[Optional[HookBase]]): list of hooks + """ + hooks = [h for h in hooks if h is not None] + for h in hooks: + assert isinstance(h, HookBase) + # To avoid circular reference, hooks and trainer cannot own each other. + # This normally does not matter, but will cause memory leak if the + # involved objects contain __del__: + # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/ + h.trainer = weakref.proxy(self) + self._hooks.extend(hooks) + + def train(self, start_iter: int, max_iter: int): + """ + Args: + start_iter, max_iter (int): See docs above + """ + logger = logging.getLogger(__name__) + logger.info("Starting training from iteration {}".format(start_iter)) + + self.iter = self.start_iter = start_iter + self.max_iter = max_iter + + with EventStorage(start_iter) as self.storage: + try: + self.before_train() + for self.iter in range(start_iter, max_iter): + self.before_step() + self.run_step() + self.after_step() + # self.iter == max_iter can be used by `after_train` to + # tell whether the training successfully finished or failed + # due to exceptions. + self.iter += 1 + except Exception: + logger.exception("Exception during training:") + raise + finally: + self.after_train() + + def before_train(self): + for h in self._hooks: + h.before_train() + + def after_train(self): + self.storage.iter = self.iter + for h in self._hooks: + h.after_train() + + def before_step(self): + # Maintain the invariant that storage.iter == trainer.iter + # for the entire execution of each step + self.storage.iter = self.iter + + for h in self._hooks: + h.before_step() + + def after_backward(self): + for h in self._hooks: + h.after_backward() + + def after_step(self): + for h in self._hooks: + h.after_step() + + def run_step(self): + raise NotImplementedError + + def state_dict(self): + ret = {"iteration": self.iter} + hooks_state = {} + for h in self._hooks: + sd = h.state_dict() + if sd: + name = type(h).__qualname__ + if name in hooks_state: + # TODO handle repetitive stateful hooks + continue + hooks_state[name] = sd + if hooks_state: + ret["hooks"] = hooks_state + return ret + + def load_state_dict(self, state_dict): + logger = logging.getLogger(__name__) + self.iter = state_dict["iteration"] + for key, value in state_dict.get("hooks", {}).items(): + for h in self._hooks: + try: + name = type(h).__qualname__ + except AttributeError: + continue + if name == key: + h.load_state_dict(value) + break + else: + logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.") + + +class SimpleTrainer(TrainerBase): + """ + A simple trainer for the most common type of task: + single-cost single-optimizer single-data-source iterative optimization, + optionally using data-parallelism. + It assumes that every step, you: + + 1. Compute the loss with a data from the data_loader. + 2. Compute the gradients with the above loss. + 3. Update the model with the optimizer. + + All other tasks during training (checkpointing, logging, evaluation, LR schedule) + are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`. + + If you want to do anything fancier than this, + either subclass TrainerBase and implement your own `run_step`, + or write your own training loop. + """ + + def __init__(self, model, data_loader, optimizer, gather_metric_period=1): + """ + Args: + model: a torch Module. Takes a data from data_loader and returns a + dict of losses. + data_loader: an iterable. Contains data to be used to call model. + optimizer: a torch optimizer. + gather_metric_period: an int. Every gather_metric_period iterations + the metrics are gathered from all the ranks to rank 0 and logged. + """ + super().__init__() + + """ + We set the model to training mode in the trainer. + However it's valid to train a model that's in eval mode. + If you want your model (or a submodule of it) to behave + like evaluation during training, you can overwrite its train() method. + """ + model.train() + + self.model = model + self.data_loader = data_loader + # to access the data loader iterator, call `self._data_loader_iter` + self._data_loader_iter_obj = None + self.optimizer = optimizer + self.gather_metric_period = gather_metric_period + + def run_step(self): + """ + Implement the standard training logic described above. + """ + assert self.model.training, "[SimpleTrainer] model was changed to eval mode!" + start = time.perf_counter() + """ + If you want to do something with the data, you can wrap the dataloader. + """ + data = next(self._data_loader_iter) + data_time = time.perf_counter() - start + + """ + If you want to do something with the losses, you can wrap the model. + """ + loss_dict = self.model(data) + if isinstance(loss_dict, torch.Tensor): + losses = loss_dict + loss_dict = {"total_loss": loss_dict} + else: + losses = sum(loss_dict.values()) + + """ + If you need to accumulate gradients or do something similar, you can + wrap the optimizer with your custom `zero_grad()` method. + """ + self.optimizer.zero_grad() + losses.backward() + + self.after_backward() + + self._write_metrics(loss_dict, data_time) + + """ + If you need gradient clipping/scaling or other processing, you can + wrap the optimizer with your custom `step()` method. But it is + suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4 + """ + self.optimizer.step() + + @property + def _data_loader_iter(self): + # only create the data loader iterator when it is used + if self._data_loader_iter_obj is None: + self._data_loader_iter_obj = iter(self.data_loader) + return self._data_loader_iter_obj + + def reset_data_loader(self, data_loader_builder): + """ + Delete and replace the current data loader with a new one, which will be created + by calling `data_loader_builder` (without argument). + """ + del self.data_loader + data_loader = data_loader_builder() + self.data_loader = data_loader + self._data_loader_iter_obj = None + + def _write_metrics( + self, + loss_dict: Mapping[str, torch.Tensor], + data_time: float, + prefix: str = "", + ) -> None: + if (self.iter + 1) % self.gather_metric_period == 0: + SimpleTrainer.write_metrics(loss_dict, data_time, prefix) + + @staticmethod + def write_metrics( + loss_dict: Mapping[str, torch.Tensor], + data_time: float, + prefix: str = "", + ) -> None: + """ + Args: + loss_dict (dict): dict of scalar losses + data_time (float): time taken by the dataloader iteration + prefix (str): prefix for logging keys + """ + metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()} + metrics_dict["data_time"] = data_time + + # Gather metrics among all workers for logging + # This assumes we do DDP-style training, which is currently the only + # supported method in detectron2. + all_metrics_dict = comm.gather(metrics_dict) + + if comm.is_main_process(): + storage = get_event_storage() + + # data_time among workers can have high variance. The actual latency + # caused by data_time is the maximum among workers. + data_time = np.max([x.pop("data_time") for x in all_metrics_dict]) + storage.put_scalar("data_time", data_time) + + # average the rest metrics + metrics_dict = { + k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys() + } + total_losses_reduced = sum(metrics_dict.values()) + if not np.isfinite(total_losses_reduced): + raise FloatingPointError( + f"Loss became infinite or NaN at iteration={storage.iter}!\n" + f"loss_dict = {metrics_dict}" + ) + + storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced) + if len(metrics_dict) > 1: + storage.put_scalars(**metrics_dict) + + def state_dict(self): + ret = super().state_dict() + ret["optimizer"] = self.optimizer.state_dict() + return ret + + def load_state_dict(self, state_dict): + super().load_state_dict(state_dict) + self.optimizer.load_state_dict(state_dict["optimizer"]) + + +class AMPTrainer(SimpleTrainer): + """ + Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision + in the training loop. + """ + + def __init__( + self, + model, + data_loader, + optimizer, + gather_metric_period=1, + grad_scaler=None, + precision: torch.dtype = torch.float16, + log_grad_scaler: bool = False, + ): + """ + Args: + model, data_loader, optimizer, gather_metric_period: same as in :class:`SimpleTrainer`. + grad_scaler: torch GradScaler to automatically scale gradients. + precision: torch.dtype as the target precision to cast to in computations + """ + unsupported = "AMPTrainer does not support single-process multi-device training!" + if isinstance(model, DistributedDataParallel): + assert not (model.device_ids and len(model.device_ids) > 1), unsupported + assert not isinstance(model, DataParallel), unsupported + + super().__init__(model, data_loader, optimizer, gather_metric_period) + + if grad_scaler is None: + from torch.cuda.amp import GradScaler + + grad_scaler = GradScaler() + self.grad_scaler = grad_scaler + self.precision = precision + self.log_grad_scaler = log_grad_scaler + + def run_step(self): + """ + Implement the AMP training logic. + """ + assert self.model.training, "[AMPTrainer] model was changed to eval mode!" + assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!" + from torch.cuda.amp import autocast + + start = time.perf_counter() + data = next(self._data_loader_iter) + data_time = time.perf_counter() - start + + with autocast(dtype=self.precision): + loss_dict = self.model(data) + if isinstance(loss_dict, torch.Tensor): + losses = loss_dict + loss_dict = {"total_loss": loss_dict} + else: + losses = sum(loss_dict.values()) + + self.optimizer.zero_grad() + self.grad_scaler.scale(losses).backward() + + if self.log_grad_scaler: + storage = get_event_storage() + storage.put_scalar("[metric]grad_scaler", self.grad_scaler.get_scale()) + + self.after_backward() + + self._write_metrics(loss_dict, data_time) + + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + + def state_dict(self): + ret = super().state_dict() + ret["grad_scaler"] = self.grad_scaler.state_dict() + return ret + + def load_state_dict(self, state_dict): + super().load_state_dict(state_dict) + self.grad_scaler.load_state_dict(state_dict["grad_scaler"]) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d96609e8f2261a6800fe85fcf3e1eaeaa44455c6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator +from .coco_evaluation import COCOEvaluator +from .rotated_coco_evaluation import RotatedCOCOEvaluator +from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset +from .lvis_evaluation import LVISEvaluator +from .panoptic_evaluation import COCOPanopticEvaluator +from .pascal_voc_evaluation import PascalVOCDetectionEvaluator +from .sem_seg_evaluation import SemSegEvaluator +from .testing import print_csv_format, verify_results + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/cityscapes_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/cityscapes_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..881aed078e2e1322dbd48e2006785888652441e4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/cityscapes_evaluation.py @@ -0,0 +1,197 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import glob +import logging +import numpy as np +import os +import tempfile +from collections import OrderedDict +import torch +from PIL import Image + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.utils import comm +from custom_detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + + +class CityscapesEvaluator(DatasetEvaluator): + """ + Base class for evaluation using cityscapes API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): the name of the dataset. + It must have the following metadata associated with it: + "thing_classes", "gt_dir". + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") + self._temp_dir = self._working_dir.name + # All workers will write to the same results directory + # TODO this does not work in distributed training + assert ( + comm.get_local_size() == comm.get_world_size() + ), "CityscapesEvaluator currently do not work with multiple machines." + self._temp_dir = comm.all_gather(self._temp_dir)[0] + if self._temp_dir != self._working_dir.name: + self._working_dir.cleanup() + self._logger.info( + "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) + ) + + +class CityscapesInstanceEvaluator(CityscapesEvaluator): + """ + Evaluate instance segmentation results on cityscapes dataset using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import name2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") + + if "instances" in output: + output = output["instances"].to(self._cpu_device) + num_instances = len(output) + with open(pred_txt, "w") as fout: + for i in range(num_instances): + pred_class = output.pred_classes[i] + classes = self._metadata.thing_classes[pred_class] + class_id = name2label[classes].id + score = output.scores[i] + mask = output.pred_masks[i].numpy().astype("uint8") + png_filename = os.path.join( + self._temp_dir, basename + "_{}_{}.png".format(i, classes) + ) + + Image.fromarray(mask * 255).save(png_filename) + fout.write( + "{} {} {}\n".format(os.path.basename(png_filename), class_id, score) + ) + else: + # Cityscapes requires a prediction file for every ground truth image. + with open(pred_txt, "w") as fout: + pass + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + comm.synchronize() + if comm.get_rank() > 0: + return + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + )["averages"] + + ret = OrderedDict() + ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} + self._working_dir.cleanup() + return ret + + +class CityscapesSemSegEvaluator(CityscapesEvaluator): + """ + Evaluate semantic segmentation results on cityscapes dataset using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import trainId2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") + + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() + pred = 255 * np.ones(output.shape, dtype=np.uint8) + for train_id, label in trainId2label.items(): + if label.ignoreInEval: + continue + pred[output == train_id] = label.id + Image.fromarray(pred).save(pred_filename) + + def evaluate(self): + comm.synchronize() + if comm.get_rank() > 0: + return + # Load the Cityscapes eval script *after* setting the required env var, + # since the script reads CITYSCAPES_DATASET into global variables at load time. + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + ) + ret = OrderedDict() + ret["sem_seg"] = { + "IoU": 100.0 * results["averageScoreClasses"], + "iIoU": 100.0 * results["averageScoreInstClasses"], + "IoU_sup": 100.0 * results["averageScoreCategories"], + "iIoU_sup": 100.0 * results["averageScoreInstCategories"], + } + self._working_dir.cleanup() + return ret diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/coco_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..9d651a9fe2eb90d0c6a682ab1a832debedacaf12 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/coco_evaluation.py @@ -0,0 +1,722 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import custom_pycocotools.mask as mask_util +import torch +from custom_pycocotools.coco import COCO +from custom_pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import custom_detectron2.utils.comm as comm +from custom_detectron2.config import CfgNode +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.data.datasets.coco import convert_to_coco_json +from custom_detectron2.structures import Boxes, BoxMode, pairwise_iou +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import create_small_table + +from .evaluator import DatasetEvaluator + +try: + from custom_detectron2.evaluation.fast_eval_api import COCOeval_opt +except ImportError: + COCOeval_opt = COCOeval + + +class COCOEvaluator(DatasetEvaluator): + """ + Evaluate AR for object proposals, AP for instance detection/segmentation, AP + for keypoint detection outputs using COCO's metrics. + See http://cocodataset.org/#detection-eval and + http://cocodataset.org/#keypoints-eval to understand its metrics. + The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means + the metric cannot be computed (e.g. due to no predictions made). + + In addition to COCO, this evaluator is able to support any bounding box detection, + instance segmentation, or keypoint detection dataset. + """ + + def __init__( + self, + dataset_name, + tasks=None, + distributed=True, + output_dir=None, + *, + max_dets_per_image=None, + use_fast_impl=True, + kpt_oks_sigmas=(), + allow_cached_coco=True, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have either the following corresponding metadata: + + "json_file": the path to the COCO format annotation + + Or it must be in detectron2's standard dataset format + so it can be converted to COCO format automatically. + tasks (tuple[str]): tasks that can be evaluated under the given + configuration. A task is one of "bbox", "segm", "keypoints". + By default, will infer this automatically from predictions. + distributed (True): if True, will collect results from all ranks and run evaluation + in the main process. + Otherwise, will only evaluate the results in the current process. + output_dir (str): optional, an output directory to dump all + results predicted on the dataset. The dump contains two files: + + 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and + contains all the results in the format they are produced by the model. + 2. "coco_instances_results.json" a json file in COCO's result format. + max_dets_per_image (int): limit on the maximum number of detections per image. + By default in COCO, this limit is to 100, but this can be customized + to be greater, as is needed in evaluation metrics AP fixed and AP pool + (see https://arxiv.org/pdf/2102.01066.pdf) + This doesn't affect keypoint evaluation. + use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. + Although the results should be very close to the official implementation in COCO + API, it is still recommended to compute results with the official API for use in + papers. The faster implementation also uses more RAM. + kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. + See http://cocodataset.org/#keypoints-eval + When empty, it will use the defaults in COCO. + Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. + allow_cached_coco (bool): Whether to use cached coco json from previous validation + runs. You should set this to False if you need to use different validation data. + Defaults to True. + """ + self._logger = logging.getLogger(__name__) + self._distributed = distributed + self._output_dir = output_dir + + if use_fast_impl and (COCOeval_opt is COCOeval): + self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") + use_fast_impl = False + self._use_fast_impl = use_fast_impl + + # COCOeval requires the limit on the number of detections per image (maxDets) to be a list + # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the + # 3rd element (100) is used as the limit on the number of detections per image when + # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, + # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] + else: + max_dets_per_image = [1, 10, max_dets_per_image] + self._max_dets_per_image = max_dets_per_image + + if tasks is not None and isinstance(tasks, CfgNode): + kpt_oks_sigmas = ( + tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas + ) + self._logger.warn( + "COCO Evaluator instantiated using config, this is deprecated behavior." + " Please pass in explicit arguments instead." + ) + self._tasks = None # Infering it from predictions should be better + else: + self._tasks = tasks + + self._cpu_device = torch.device("cpu") + + self._metadata = MetadataCatalog.get(dataset_name) + if not hasattr(self._metadata, "json_file"): + if output_dir is None: + raise ValueError( + "output_dir must be provided to COCOEvaluator " + "for datasets not in COCO format." + ) + self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") + + cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") + self._metadata.json_file = cache_path + convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) + + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + # Test set json files do not contain annotations (evaluation must be + # performed using the COCO evaluation server). + self._do_evaluation = "annotations" in self._coco_api.dataset + if self._do_evaluation: + self._kpt_oks_sigmas = kpt_oks_sigmas + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + if len(prediction) > 1: + self._predictions.append(prediction) + + def evaluate(self, img_ids=None): + """ + Args: + img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset + """ + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return {} + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "instances" in predictions[0]: + self._eval_predictions(predictions, img_ids=img_ids) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _tasks_from_predictions(self, predictions): + """ + Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. + """ + tasks = {"bbox"} + for pred in predictions: + if "segmentation" in pred: + tasks.add("segm") + if "keypoints" in pred: + tasks.add("keypoints") + return sorted(tasks) + + def _eval_predictions(self, predictions, img_ids=None): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(coco_results) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id + all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) + num_classes = len(all_contiguous_ids) + assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 + + reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} + for result in coco_results: + category_id = result["category_id"] + assert category_id < num_classes, ( + f"A prediction has class={category_id}, " + f"but the dataset only has {num_classes} classes and " + f"predicted class id should be in [0, {num_classes - 1}]." + ) + result["category_id"] = reverse_id_mapping[category_id] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info( + "Evaluating predictions with {} COCO API...".format( + "unofficial" if self._use_fast_impl else "official" + ) + ) + for task in sorted(tasks): + assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, + coco_results, + task, + kpt_oks_sigmas=self._kpt_oks_sigmas, + cocoeval_fn=COCOeval_opt if self._use_fast_impl else COCOeval, + img_ids=img_ids, + max_dets_per_image=self._max_dets_per_image, + ) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + def _derive_coco_results(self, coco_eval, iou_type, class_names=None): + """ + Derive the desired score numbers from summarized COCOeval. + + Args: + coco_eval (None or COCOEval): None represents no predictions from model. + iou_type (str): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], + }[iou_type] + + if coco_eval is None: + self._logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + # the standard metrics + results = { + metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") + for idx, metric in enumerate(metrics) + } + self._logger.info( + "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) + ) + if not np.isfinite(sum(results.values())): + self._logger.info("Some metrics cannot be computed and is shown as NaN.") + + if class_names is None or len(class_names) <= 1: + return results + # Compute per-category AP + # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append(("{}".format(name), float(ap * 100))) + + # tabulate it + N_COLS = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (N_COLS // 2), + numalign="left", + ) + self._logger.info("Per-category {} AP: \n".format(iou_type) + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def instances_to_coco_json(instances, img_id): + """ + Dump an "Instances" object to a COCO-format json that's used for evaluation. + + Args: + instances (Instances): + img_id (int): the image id + + Returns: + list[dict]: list of json annotations in COCO format. + """ + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + has_mask = instances.has("pred_masks") + if has_mask: + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the custom_pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + + has_keypoints = instances.has("pred_keypoints") + if has_keypoints: + keypoints = instances.pred_keypoints + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + if has_mask: + result["segmentation"] = rles[k] + if has_keypoints: + # In COCO annotations, + # keypoints coordinates are pixel indices. + # However our predictions are floating point coordinates. + # Therefore we subtract 0.5 to be consistent with the annotation format. + # This is the inverse of data loading logic in `datasets/coco.py`. + keypoints[k][:, :2] -= 0.5 + result["keypoints"] = keypoints[k].flatten().tolist() + results.append(result) + return results + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official COCO API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0**2, 1e5**2], # all + [0**2, 32**2], # small + [32**2, 96**2], # medium + [96**2, 1e5**2], # large + [96**2, 128**2], # 96-128 + [128**2, 256**2], # 128-256 + [256**2, 512**2], # 256-512 + [512**2, 1e5**2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) + anno = coco_api.loadAnns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + for obj in anno + if obj["iscrowd"] == 0 + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_coco( + coco_gt, + coco_results, + iou_type, + kpt_oks_sigmas=None, + cocoeval_fn=COCOeval_opt, + img_ids=None, + max_dets_per_image=None, +): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + if iou_type == "segm": + coco_results = copy.deepcopy(coco_results) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = cocoeval_fn(coco_gt, coco_dt, iou_type) + # For COCO, the default max_dets_per_image is [1, 10, 100]. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] # Default from COCOEval + else: + assert ( + len(max_dets_per_image) >= 3 + ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" + # In the case that user supplies a custom input for max_dets_per_image, + # apply COCOevalMaxDets to evaluate AP with the custom input. + if max_dets_per_image[2] != 100: + coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) + if iou_type != "keypoints": + coco_eval.params.maxDets = max_dets_per_image + + if img_ids is not None: + coco_eval.params.imgIds = img_ids + + if iou_type == "keypoints": + # Use the COCO default keypoint OKS sigmas unless overrides are specified + if kpt_oks_sigmas: + assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "custom_pycocotools is too old!" + coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) + # COCOAPI requires every detection and every gt to have keypoints, so + # we just take the first entry from both + num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 + num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 + num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) + assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( + f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " + f"Ground truth contains {num_keypoints_gt} keypoints. " + f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " + "They have to agree with each other. For meaning of OKS, please refer to " + "http://cocodataset.org/#keypoints-eval." + ) + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval + + +class COCOevalMaxDets(COCOeval): + """ + Modified version of COCOeval for evaluating AP with a custom + maxDets (by default for COCO, maxDets is 100) + """ + + def summarize(self): + """ + Compute and display summary metrics for evaluation results given + a custom value for max_dets_per_image + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + # Evaluate AP using the custom limit on maximum detections per image + stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType == "segm" or iouType == "bbox": + summarize = _summarizeDets + elif iouType == "keypoints": + summarize = _summarizeKps + self.stats = summarize() + + def __str__(self): + self.summarize() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/evaluator.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..6465b00de7c0c3e6e5ca1de05e7284e7c85bcd80 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/evaluator.py @@ -0,0 +1,224 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import datetime +import logging +import time +from collections import OrderedDict, abc +from contextlib import ExitStack, contextmanager +from typing import List, Union +import torch +from torch import nn + +from custom_detectron2.utils.comm import get_world_size, is_main_process +from custom_detectron2.utils.logger import log_every_n_seconds + + +class DatasetEvaluator: + """ + Base class for a dataset evaluator. + + The function :func:`inference_on_dataset` runs the model over + all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. + + This class will accumulate information of the inputs/outputs (by :meth:`process`), + and produce evaluation results in the end (by :meth:`evaluate`). + """ + + def reset(self): + """ + Preparation for a new round of evaluation. + Should be called before starting a round of evaluation. + """ + pass + + def process(self, inputs, outputs): + """ + Process the pair of inputs and outputs. + If they contain batches, the pairs can be consumed one-by-one using `zip`: + + .. code-block:: python + + for input_, output in zip(inputs, outputs): + # do evaluation on single input/output pair + ... + + Args: + inputs (list): the inputs that's used to call the model. + outputs (list): the return value of `model(inputs)` + """ + pass + + def evaluate(self): + """ + Evaluate/summarize the performance, after processing all input/output pairs. + + Returns: + dict: + A new evaluator class can return a dict of arbitrary format + as long as the user can process the results. + In our train_net.py, we expect the following format: + + * key: the name of the task (e.g., bbox) + * value: a dict of {metric name: score}, e.g.: {"AP50": 80} + """ + pass + + +class DatasetEvaluators(DatasetEvaluator): + """ + Wrapper class to combine multiple :class:`DatasetEvaluator` instances. + + This class dispatches every evaluation call to + all of its :class:`DatasetEvaluator`. + """ + + def __init__(self, evaluators): + """ + Args: + evaluators (list): the evaluators to combine. + """ + super().__init__() + self._evaluators = evaluators + + def reset(self): + for evaluator in self._evaluators: + evaluator.reset() + + def process(self, inputs, outputs): + for evaluator in self._evaluators: + evaluator.process(inputs, outputs) + + def evaluate(self): + results = OrderedDict() + for evaluator in self._evaluators: + result = evaluator.evaluate() + if is_main_process() and result is not None: + for k, v in result.items(): + assert ( + k not in results + ), "Different evaluators produce results with the same key {}".format(k) + results[k] = v + return results + + +def inference_on_dataset( + model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] +): + """ + Run model on the data_loader and evaluate the metrics with evaluator. + Also benchmark the inference speed of `model.__call__` accurately. + The model will be used in eval mode. + + Args: + model (callable): a callable which takes an object from + `data_loader` and returns some outputs. + + If it's an nn.Module, it will be temporarily set to `eval` mode. + If you wish to evaluate a model in `training` mode instead, you can + wrap the given model and override its behavior of `.eval()` and `.train()`. + data_loader: an iterable object with a length. + The elements it generates will be the inputs to the model. + evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, + but don't want to do any evaluation. + + Returns: + The return value of `evaluator.evaluate()` + """ + num_devices = get_world_size() + logger = logging.getLogger(__name__) + logger.info("Start inference on {} batches".format(len(data_loader))) + + total = len(data_loader) # inference data loader must have a fixed length + if evaluator is None: + # create a no-op evaluator + evaluator = DatasetEvaluators([]) + if isinstance(evaluator, abc.MutableSequence): + evaluator = DatasetEvaluators(evaluator) + evaluator.reset() + + num_warmup = min(5, total - 1) + start_time = time.perf_counter() + total_data_time = 0 + total_compute_time = 0 + total_eval_time = 0 + with ExitStack() as stack: + if isinstance(model, nn.Module): + stack.enter_context(inference_context(model)) + stack.enter_context(torch.no_grad()) + + start_data_time = time.perf_counter() + for idx, inputs in enumerate(data_loader): + total_data_time += time.perf_counter() - start_data_time + if idx == num_warmup: + start_time = time.perf_counter() + total_data_time = 0 + total_compute_time = 0 + total_eval_time = 0 + + start_compute_time = time.perf_counter() + outputs = model(inputs) + if torch.cuda.is_available(): + torch.cuda.synchronize() + total_compute_time += time.perf_counter() - start_compute_time + + start_eval_time = time.perf_counter() + evaluator.process(inputs, outputs) + total_eval_time += time.perf_counter() - start_eval_time + + iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) + data_seconds_per_iter = total_data_time / iters_after_start + compute_seconds_per_iter = total_compute_time / iters_after_start + eval_seconds_per_iter = total_eval_time / iters_after_start + total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start + if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: + eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) + log_every_n_seconds( + logging.INFO, + ( + f"Inference done {idx + 1}/{total}. " + f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " + f"Inference: {compute_seconds_per_iter:.4f} s/iter. " + f"Eval: {eval_seconds_per_iter:.4f} s/iter. " + f"Total: {total_seconds_per_iter:.4f} s/iter. " + f"ETA={eta}" + ), + n=5, + ) + start_data_time = time.perf_counter() + + # Measure the time only for this worker (before the synchronization barrier) + total_time = time.perf_counter() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + # NOTE this format is parsed by grep + logger.info( + "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( + total_time_str, total_time / (total - num_warmup), num_devices + ) + ) + total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) + logger.info( + "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( + total_compute_time_str, total_compute_time / (total - num_warmup), num_devices + ) + ) + + results = evaluator.evaluate() + # An evaluator may return None when not in main process. + # Replace it by an empty dict instead to make it easier for downstream code to handle + if results is None: + results = {} + return results + + +@contextmanager +def inference_context(model): + """ + A context where the model is temporarily changed to eval mode, + and restored to previous mode afterwards. + + Args: + model: a torch Module + """ + training_mode = model.training + model.eval() + yield + model.train(training_mode) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/fast_eval_api.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/fast_eval_api.py new file mode 100644 index 0000000000000000000000000000000000000000..659aabb71b057e7db3b11110e21819516dd39ee1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/fast_eval_api.py @@ -0,0 +1,121 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import numpy as np +import time +from custom_pycocotools.cocoeval import COCOeval + +from custom_detectron2 import _C + +logger = logging.getLogger(__name__) + + +class COCOeval_opt(COCOeval): + """ + This is a slightly modified version of the original COCO API, where the functions evaluateImg() + and accumulate() are implemented in C++ to speedup evaluation + """ + + def evaluate(self): + """ + Run per image evaluation on given images and store results in self.evalImgs_cpp, a + datastructure that isn't readable from Python but is used by a c++ implementation of + accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure + self.evalImgs because this datastructure is a computational bottleneck. + :return: None + """ + tic = time.time() + + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = "segm" if p.useSegm == 1 else "bbox" + logger.info("Evaluate annotation type *{}*".format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() # bottleneck + + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType == "segm" or p.iouType == "bbox": + computeIoU = self.computeIoU + elif p.iouType == "keypoints": + computeIoU = self.computeOks + self.ious = { + (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds + } # bottleneck + + maxDet = p.maxDets[-1] + + # <<<< Beginning of code differences with original COCO API + def convert_instances_to_cpp(instances, is_det=False): + # Convert annotations for a list of instances in an image to a format that's fast + # to access in C++ + instances_cpp = [] + for instance in instances: + instance_cpp = _C.InstanceAnnotation( + int(instance["id"]), + instance["score"] if is_det else instance.get("score", 0.0), + instance["area"], + bool(instance.get("iscrowd", 0)), + bool(instance.get("ignore", 0)), + ) + instances_cpp.append(instance_cpp) + return instances_cpp + + # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++ + ground_truth_instances = [ + [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds] + for imgId in p.imgIds + ] + detected_instances = [ + [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds] + for imgId in p.imgIds + ] + ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds] + + if not p.useCats: + # For each image, flatten per-category lists into a single list + ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances] + detected_instances = [[[o for c in i for o in c]] for i in detected_instances] + + # Call C++ implementation of self.evaluateImgs() + self._evalImgs_cpp = _C.COCOevalEvaluateImages( + p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances + ) + self._evalImgs = None + + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic)) + # >>>> End of code differences with original COCO API + + def accumulate(self): + """ + Accumulate per image evaluation results and store the result in self.eval. Does not + support changing parameter settings from those used by self.evaluate() + """ + logger.info("Accumulating evaluation results...") + tic = time.time() + assert hasattr( + self, "_evalImgs_cpp" + ), "evaluate() must be called before accmulate() is called." + + self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp) + + # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections + self.eval["recall"] = np.array(self.eval["recall"]).reshape( + self.eval["counts"][:1] + self.eval["counts"][2:] + ) + + # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X + # num_area_ranges X num_max_detections + self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"]) + self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"]) + toc = time.time() + logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/lvis_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/lvis_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..9f7769b48c3feb96cbcb7ee56794c1a3a35c3540 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/lvis_evaluation.py @@ -0,0 +1,380 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import json +import logging +import os +import pickle +from collections import OrderedDict +import torch + +import custom_detectron2.utils.comm as comm +from custom_detectron2.config import CfgNode +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.structures import Boxes, BoxMode, pairwise_iou +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import create_small_table + +from .coco_evaluation import instances_to_coco_json +from .evaluator import DatasetEvaluator + + +class LVISEvaluator(DatasetEvaluator): + """ + Evaluate object proposal and instance detection/segmentation outputs using + LVIS's metrics and evaluation API. + """ + + def __init__( + self, + dataset_name, + tasks=None, + distributed=True, + output_dir=None, + *, + max_dets_per_image=None, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have the following corresponding metadata: + "json_file": the path to the LVIS format annotation + tasks (tuple[str]): tasks that can be evaluated under the given + configuration. A task is one of "bbox", "segm". + By default, will infer this automatically from predictions. + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): optional, an output directory to dump results. + max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP + This limit, by default of the LVIS dataset, is 300. + """ + from lvis import LVIS + + self._logger = logging.getLogger(__name__) + + if tasks is not None and isinstance(tasks, CfgNode): + self._logger.warn( + "COCO Evaluator instantiated using config, this is deprecated behavior." + " Please pass in explicit arguments instead." + ) + self._tasks = None # Infering it from predictions should be better + else: + self._tasks = tasks + + self._distributed = distributed + self._output_dir = output_dir + self._max_dets_per_image = max_dets_per_image + + self._cpu_device = torch.device("cpu") + + self._metadata = MetadataCatalog.get(dataset_name) + json_file = PathManager.get_local_path(self._metadata.json_file) + self._lvis_api = LVIS(json_file) + # Test set json files do not contain annotations (evaluation must be + # performed using the LVIS evaluation server). + self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0 + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a LVIS model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def evaluate(self): + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[LVISEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "instances" in predictions[0]: + self._eval_predictions(predictions) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _tasks_from_predictions(self, predictions): + for pred in predictions: + if "segmentation" in pred: + return ("bbox", "segm") + return ("bbox",) + + def _eval_predictions(self, predictions): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + + Args: + predictions (list[dict]): list of outputs from the model + """ + self._logger.info("Preparing results in the LVIS format ...") + lvis_results = list(itertools.chain(*[x["instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(lvis_results) + + # LVIS evaluator can be used to evaluate results for COCO dataset categories. + # In this case `_metadata` variable will have a field with COCO-specific category mapping. + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in lvis_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + else: + # unmap the category ids for LVIS (from 0-indexed to 1-indexed) + for result in lvis_results: + result["category_id"] += 1 + + if self._output_dir: + file_path = os.path.join(self._output_dir, "lvis_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(lvis_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + res = _evaluate_predictions_on_lvis( + self._lvis_api, + lvis_results, + task, + max_dets_per_image=self._max_dets_per_image, + class_names=self._metadata.get("thing_classes"), + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official LVIS API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0**2, 1e5**2], # all + [0**2, 32**2], # small + [32**2, 96**2], # medium + [96**2, 1e5**2], # large + [96**2, 128**2], # 96-128 + [128**2, 256**2], # 128-256 + [256**2, 512**2], # 256-512 + [512**2, 1e5**2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]]) + anno = lvis_api.load_anns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_lvis( + lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None +): + """ + Args: + iou_type (str): + max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP + This limit, by default of the LVIS dataset, is 300. + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + }[iou_type] + + logger = logging.getLogger(__name__) + + if len(lvis_results) == 0: # TODO: check if needed + logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + if iou_type == "segm": + lvis_results = copy.deepcopy(lvis_results) + # When evaluating mask AP, if the results contain bbox, LVIS API will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in lvis_results: + c.pop("bbox", None) + + if max_dets_per_image is None: + max_dets_per_image = 300 # Default for LVIS dataset + + from lvis import LVISEval, LVISResults + + logger.info(f"Evaluating with max detections per image = {max_dets_per_image}") + lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image) + lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) + lvis_eval.run() + lvis_eval.print_results() + + # Pull the standard metrics from the LVIS results + results = lvis_eval.get_results() + results = {metric: float(results[metric] * 100) for metric in metrics} + logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) + return results diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/panoptic_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/panoptic_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..833f3bd2f483360eb5cb7f6a5b6b02a2db0d01d6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/panoptic_evaluation.py @@ -0,0 +1,199 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import contextlib +import io +import itertools +import json +import logging +import numpy as np +import os +import tempfile +from collections import OrderedDict +from typing import Optional +from PIL import Image +from tabulate import tabulate + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.utils import comm +from custom_detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + +logger = logging.getLogger(__name__) + + +class COCOPanopticEvaluator(DatasetEvaluator): + """ + Evaluate Panoptic Quality metrics on COCO using PanopticAPI. + It saves panoptic segmentation prediction in `output_dir` + + It contains a synchronize call and has to be called from all workers. + """ + + def __init__(self, dataset_name: str, output_dir: Optional[str] = None): + """ + Args: + dataset_name: name of the dataset + output_dir: output directory to save results for evaluation. + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._thing_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + self._stuff_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items() + } + + self._output_dir = output_dir + if self._output_dir is not None: + PathManager.mkdirs(self._output_dir) + + def reset(self): + self._predictions = [] + + def _convert_category_id(self, segment_info): + isthing = segment_info.pop("isthing", None) + if isthing is None: + # the model produces panoptic category id directly. No more conversion needed + return segment_info + if isthing is True: + segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + return segment_info + + def process(self, inputs, outputs): + from panopticapi.utils import id2rgb + + for input, output in zip(inputs, outputs): + panoptic_img, segments_info = output["panoptic_seg"] + panoptic_img = panoptic_img.cpu().numpy() + if segments_info is None: + # If "segments_info" is None, we assume "panoptic_img" is a + # H*W int32 image storing the panoptic_id in the format of + # category_id * label_divisor + instance_id. We reserve -1 for + # VOID label, and add 1 to panoptic_img since the official + # evaluation script uses 0 for VOID label. + label_divisor = self._metadata.label_divisor + segments_info = [] + for panoptic_label in np.unique(panoptic_img): + if panoptic_label == -1: + # VOID region. + continue + pred_class = panoptic_label // label_divisor + isthing = ( + pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values() + ) + segments_info.append( + { + "id": int(panoptic_label) + 1, + "category_id": int(pred_class), + "isthing": bool(isthing), + } + ) + # Official evaluation script uses 0 for VOID label. + panoptic_img += 1 + + file_name = os.path.basename(input["file_name"]) + file_name_png = os.path.splitext(file_name)[0] + ".png" + with io.BytesIO() as out: + Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") + segments_info = [self._convert_category_id(x) for x in segments_info] + self._predictions.append( + { + "image_id": input["image_id"], + "file_name": file_name_png, + "png_string": out.getvalue(), + "segments_info": segments_info, + } + ) + + def evaluate(self): + comm.synchronize() + + self._predictions = comm.gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not comm.is_main_process(): + return + + # PanopticApi requires local files + gt_json = PathManager.get_local_path(self._metadata.panoptic_json) + gt_folder = PathManager.get_local_path(self._metadata.panoptic_root) + + with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: + logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) + for p in self._predictions: + with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: + f.write(p.pop("png_string")) + + with open(gt_json, "r") as f: + json_data = json.load(f) + json_data["annotations"] = self._predictions + + output_dir = self._output_dir or pred_dir + predictions_json = os.path.join(output_dir, "predictions.json") + with PathManager.open(predictions_json, "w") as f: + f.write(json.dumps(json_data)) + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + gt_json, + PathManager.get_local_path(predictions_json), + gt_folder=gt_folder, + pred_folder=pred_dir, + ) + + res = {} + res["PQ"] = 100 * pq_res["All"]["pq"] + res["SQ"] = 100 * pq_res["All"]["sq"] + res["RQ"] = 100 * pq_res["All"]["rq"] + res["PQ_th"] = 100 * pq_res["Things"]["pq"] + res["SQ_th"] = 100 * pq_res["Things"]["sq"] + res["RQ_th"] = 100 * pq_res["Things"]["rq"] + res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] + res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] + res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] + + results = OrderedDict({"panoptic_seg": res}) + _print_panoptic_results(pq_res) + + return results + + +def _print_panoptic_results(pq_res): + headers = ["", "PQ", "SQ", "RQ", "#categories"] + data = [] + for name in ["All", "Things", "Stuff"]: + row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] + data.append(row) + table = tabulate( + data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" + ) + logger.info("Panoptic Evaluation Results:\n" + table) + + +if __name__ == "__main__": + from custom_detectron2.utils.logger import setup_logger + + logger = setup_logger() + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--gt-json") + parser.add_argument("--gt-dir") + parser.add_argument("--pred-json") + parser.add_argument("--pred-dir") + args = parser.parse_args() + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir + ) + _print_panoptic_results(pq_res) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/pascal_voc_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/pascal_voc_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..7de637cf20a7389ebc60b16c491fbcbd3ab87305 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/pascal_voc_evaluation.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +import os +import tempfile +import xml.etree.ElementTree as ET +from collections import OrderedDict, defaultdict +from functools import lru_cache +import torch + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.utils import comm +from custom_detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + + +class PascalVOCDetectionEvaluator(DatasetEvaluator): + """ + Evaluate Pascal VOC style AP for Pascal VOC dataset. + It contains a synchronization, therefore has to be called from all ranks. + + Note that the concept of AP can be implemented in different ways and may not + produce identical results. This class mimics the implementation of the official + Pascal VOC Matlab API, and should produce similar but not identical results to the + official API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): name of the dataset, e.g., "voc_2007_test" + """ + self._dataset_name = dataset_name + meta = MetadataCatalog.get(dataset_name) + + # Too many tiny files, download all to local for speed. + annotation_dir_local = PathManager.get_local_path( + os.path.join(meta.dirname, "Annotations/") + ) + self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml") + self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt") + self._class_names = meta.thing_classes + assert meta.year in [2007, 2012], meta.year + self._is_2007 = meta.year == 2007 + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._predictions = defaultdict(list) # class name -> list of prediction strings + + def process(self, inputs, outputs): + for input, output in zip(inputs, outputs): + image_id = input["image_id"] + instances = output["instances"].to(self._cpu_device) + boxes = instances.pred_boxes.tensor.numpy() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + for box, score, cls in zip(boxes, scores, classes): + xmin, ymin, xmax, ymax = box + # The inverse of data loading logic in `datasets/pascal_voc.py` + xmin += 1 + ymin += 1 + self._predictions[cls].append( + f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}" + ) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75". + """ + all_predictions = comm.gather(self._predictions, dst=0) + if not comm.is_main_process(): + return + predictions = defaultdict(list) + for predictions_per_rank in all_predictions: + for clsid, lines in predictions_per_rank.items(): + predictions[clsid].extend(lines) + del all_predictions + + self._logger.info( + "Evaluating {} using {} metric. " + "Note that results do not use the official Matlab API.".format( + self._dataset_name, 2007 if self._is_2007 else 2012 + ) + ) + + with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname: + res_file_template = os.path.join(dirname, "{}.txt") + + aps = defaultdict(list) # iou -> ap per class + for cls_id, cls_name in enumerate(self._class_names): + lines = predictions.get(cls_id, [""]) + + with open(res_file_template.format(cls_name), "w") as f: + f.write("\n".join(lines)) + + for thresh in range(50, 100, 5): + rec, prec, ap = voc_eval( + res_file_template, + self._anno_file_template, + self._image_set_path, + cls_name, + ovthresh=thresh / 100.0, + use_07_metric=self._is_2007, + ) + aps[thresh].append(ap * 100) + + ret = OrderedDict() + mAP = {iou: np.mean(x) for iou, x in aps.items()} + ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]} + return ret + + +############################################################################## +# +# Below code is modified from +# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py +# -------------------------------------------------------- +# Fast/er R-CNN +# Licensed under The MIT License [see LICENSE for details] +# Written by Bharath Hariharan +# -------------------------------------------------------- + +"""Python implementation of the PASCAL VOC devkit's AP evaluation code.""" + + +@lru_cache(maxsize=None) +def parse_rec(filename): + """Parse a PASCAL VOC xml file.""" + with PathManager.open(filename) as f: + tree = ET.parse(f) + objects = [] + for obj in tree.findall("object"): + obj_struct = {} + obj_struct["name"] = obj.find("name").text + obj_struct["pose"] = obj.find("pose").text + obj_struct["truncated"] = int(obj.find("truncated").text) + obj_struct["difficult"] = int(obj.find("difficult").text) + bbox = obj.find("bndbox") + obj_struct["bbox"] = [ + int(bbox.find("xmin").text), + int(bbox.find("ymin").text), + int(bbox.find("xmax").text), + int(bbox.find("ymax").text), + ] + objects.append(obj_struct) + + return objects + + +def voc_ap(rec, prec, use_07_metric=False): + """Compute VOC AP given precision and recall. If use_07_metric is true, uses + the VOC 07 11-point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0.0 + for t in np.arange(0.0, 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11.0 + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.0], rec, [1.0])) + mpre = np.concatenate(([0.0], prec, [0.0])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False): + """rec, prec, ap = voc_eval(detpath, + annopath, + imagesetfile, + classname, + [ovthresh], + [use_07_metric]) + + Top level function that does the PASCAL VOC evaluation. + + detpath: Path to detections + detpath.format(classname) should produce the detection results file. + annopath: Path to annotations + annopath.format(imagename) should be the xml annotations file. + imagesetfile: Text file containing the list of images, one image per line. + classname: Category name (duh) + [ovthresh]: Overlap threshold (default = 0.5) + [use_07_metric]: Whether to use VOC07's 11 point AP computation + (default False) + """ + # assumes detections are in detpath.format(classname) + # assumes annotations are in annopath.format(imagename) + # assumes imagesetfile is a text file with each line an image name + + # first load gt + # read list of images + with PathManager.open(imagesetfile, "r") as f: + lines = f.readlines() + imagenames = [x.strip() for x in lines] + + # load annots + recs = {} + for imagename in imagenames: + recs[imagename] = parse_rec(annopath.format(imagename)) + + # extract gt objects for this class + class_recs = {} + npos = 0 + for imagename in imagenames: + R = [obj for obj in recs[imagename] if obj["name"] == classname] + bbox = np.array([x["bbox"] for x in R]) + difficult = np.array([x["difficult"] for x in R]).astype(bool) + # difficult = np.array([False for x in R]).astype(bool) # treat all "difficult" as GT + det = [False] * len(R) + npos = npos + sum(~difficult) + class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} + + # read dets + detfile = detpath.format(classname) + with open(detfile, "r") as f: + lines = f.readlines() + + splitlines = [x.strip().split(" ") for x in lines] + image_ids = [x[0] for x in splitlines] + confidence = np.array([float(x[1]) for x in splitlines]) + BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + BB = BB[sorted_ind, :] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + R = class_recs[image_ids[d]] + bb = BB[d, :].astype(float) + ovmax = -np.inf + BBGT = R["bbox"].astype(float) + + if BBGT.size > 0: + # compute overlaps + # intersection + ixmin = np.maximum(BBGT[:, 0], bb[0]) + iymin = np.maximum(BBGT[:, 1], bb[1]) + ixmax = np.minimum(BBGT[:, 2], bb[2]) + iymax = np.minimum(BBGT[:, 3], bb[3]) + iw = np.maximum(ixmax - ixmin + 1.0, 0.0) + ih = np.maximum(iymax - iymin + 1.0, 0.0) + inters = iw * ih + + # union + uni = ( + (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) + - inters + ) + + overlaps = inters / uni + ovmax = np.max(overlaps) + jmax = np.argmax(overlaps) + + if ovmax > ovthresh: + if not R["difficult"][jmax]: + if not R["det"][jmax]: + tp[d] = 1.0 + R["det"][jmax] = 1 + else: + fp[d] = 1.0 + else: + fp[d] = 1.0 + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/rotated_coco_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/rotated_coco_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d8b1a1cbb8243331f29f6a877f07373a7a6d97 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/rotated_coco_evaluation.py @@ -0,0 +1,207 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import json +import numpy as np +import os +import torch +from custom_pycocotools.cocoeval import COCOeval, maskUtils + +from custom_detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated +from custom_detectron2.utils.file_io import PathManager + +from .coco_evaluation import COCOEvaluator + + +class RotatedCOCOeval(COCOeval): + @staticmethod + def is_rotated(box_list): + if type(box_list) == np.ndarray: + return box_list.shape[1] == 5 + elif type(box_list) == list: + if box_list == []: # cannot decide the box_dim + return False + return np.all( + np.array( + [ + (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray)) + for obj in box_list + ] + ) + ) + return False + + @staticmethod + def boxlist_to_tensor(boxlist, output_box_dim): + if type(boxlist) == np.ndarray: + box_tensor = torch.from_numpy(boxlist) + elif type(boxlist) == list: + if boxlist == []: + return torch.zeros((0, output_box_dim), dtype=torch.float32) + else: + box_tensor = torch.FloatTensor(boxlist) + else: + raise Exception("Unrecognized boxlist type") + + input_box_dim = box_tensor.shape[1] + if input_box_dim != output_box_dim: + if input_box_dim == 4 and output_box_dim == 5: + box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) + else: + raise Exception( + "Unable to convert from {}-dim box to {}-dim box".format( + input_box_dim, output_box_dim + ) + ) + return box_tensor + + def compute_iou_dt_gt(self, dt, gt, is_crowd): + if self.is_rotated(dt) or self.is_rotated(gt): + # TODO: take is_crowd into consideration + assert all(c == 0 for c in is_crowd) + dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5)) + gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5)) + return pairwise_iou_rotated(dt, gt) + else: + # This is the same as the classical COCO evaluation + return maskUtils.iou(dt, gt, is_crowd) + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + assert p.iouType == "bbox", "unsupported iouType for iou computation" + + g = [g["bbox"] for g in gt] + d = [d["bbox"] for d in dt] + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in gt] + + # Note: this function is copied from cocoeval.py in cocoapi + # and the major difference is here. + ious = self.compute_iou_dt_gt(d, g, iscrowd) + return ious + + +class RotatedCOCOEvaluator(COCOEvaluator): + """ + Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs, + with rotated boxes support. + Note: this uses IOU only and does not consider angle differences. + """ + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + + prediction["instances"] = self.instances_to_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def instances_to_json(self, instances, img_id): + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + if boxes.shape[1] == 4: + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + + results.append(result) + return results + + def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused + """ + Evaluate predictions on the given tasks. + Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in coco_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + + assert self._tasks is None or set(self._tasks) == { + "bbox" + }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported" + coco_eval = ( + self._evaluate_predictions_on_coco(self._coco_api, coco_results) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + task = "bbox" + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _evaluate_predictions_on_coco(self, coco_gt, coco_results): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + coco_dt = coco_gt.loadRes(coco_results) + + # Only bbox is supported for now + coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox") + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/sem_seg_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/sem_seg_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..b82c23ddee9a916d0ba42b1536465e8a359c7932 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/sem_seg_evaluation.py @@ -0,0 +1,265 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import json +import logging +import numpy as np +import os +from collections import OrderedDict +from typing import Optional, Union +import custom_pycocotools.mask as mask_util +import torch +from PIL import Image + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.utils.comm import all_gather, is_main_process, synchronize +from custom_detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + +_CV2_IMPORTED = True +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + _CV2_IMPORTED = False + + +def load_image_into_numpy_array( + filename: str, + copy: bool = False, + dtype: Optional[Union[np.dtype, str]] = None, +) -> np.ndarray: + with PathManager.open(filename, "rb") as f: + array = np.array(Image.open(f), copy=copy, dtype=dtype) + return array + + +class SemSegEvaluator(DatasetEvaluator): + """ + Evaluate semantic segmentation metrics. + """ + + def __init__( + self, + dataset_name, + distributed=True, + output_dir=None, + *, + sem_seg_loading_fn=load_image_into_numpy_array, + num_classes=None, + ignore_label=None, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + distributed (bool): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): an output directory to dump results. + sem_seg_loading_fn: function to read sem seg file and load into numpy array. + Default provided, but projects can customize. + num_classes, ignore_label: deprecated argument + """ + self._logger = logging.getLogger(__name__) + if num_classes is not None: + self._logger.warn( + "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata." + ) + if ignore_label is not None: + self._logger.warn( + "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata." + ) + self._dataset_name = dataset_name + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + + self.input_file_to_gt_file = { + dataset_record["file_name"]: dataset_record["sem_seg_file_name"] + for dataset_record in DatasetCatalog.get(dataset_name) + } + + meta = MetadataCatalog.get(dataset_name) + # Dict that maps contiguous training ids to COCO category ids + try: + c2d = meta.stuff_dataset_id_to_contiguous_id + self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} + except AttributeError: + self._contiguous_id_to_dataset_id = None + self._class_names = meta.stuff_classes + self.sem_seg_loading_fn = sem_seg_loading_fn + self._num_classes = len(meta.stuff_classes) + if num_classes is not None: + assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}" + self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label + + # This is because cv2.erode did not work for int datatype. Only works for uint8. + self._compute_boundary_iou = True + if not _CV2_IMPORTED: + self._compute_boundary_iou = False + self._logger.warn( + """Boundary IoU calculation requires OpenCV. B-IoU metrics are + not going to be computed because OpenCV is not available to import.""" + ) + if self._num_classes >= np.iinfo(np.uint8).max: + self._compute_boundary_iou = False + self._logger.warn( + f"""SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation! + B-IoU metrics are not going to be computed. Max allowed value (exclusive) + for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}. + The number of classes of dataset {self._dataset_name} is {self._num_classes}""" + ) + + def reset(self): + self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64) + self._b_conf_matrix = np.zeros( + (self._num_classes + 1, self._num_classes + 1), dtype=np.int64 + ) + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a model. + It is a list of dicts. Each dict corresponds to an image and + contains keys like "height", "width", "file_name". + outputs: the outputs of a model. It is either list of semantic segmentation predictions + (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic + segmentation prediction in the same format. + """ + for input, output in zip(inputs, outputs): + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) + pred = np.array(output, dtype=np.int) + gt_filename = self.input_file_to_gt_file[input["file_name"]] + gt = self.sem_seg_loading_fn(gt_filename, dtype=np.int) + + gt[gt == self._ignore_label] = self._num_classes + + self._conf_matrix += np.bincount( + (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), + minlength=self._conf_matrix.size, + ).reshape(self._conf_matrix.shape) + + if self._compute_boundary_iou: + b_gt = self._mask_to_boundary(gt.astype(np.uint8)) + b_pred = self._mask_to_boundary(pred.astype(np.uint8)) + + self._b_conf_matrix += np.bincount( + (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), + minlength=self._conf_matrix.size, + ).reshape(self._conf_matrix.shape) + + self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + + * Mean intersection-over-union averaged across classes (mIoU) + * Frequency Weighted IoU (fwIoU) + * Mean pixel accuracy averaged across classes (mACC) + * Pixel Accuracy (pACC) + """ + if self._distributed: + synchronize() + conf_matrix_list = all_gather(self._conf_matrix) + b_conf_matrix_list = all_gather(self._b_conf_matrix) + self._predictions = all_gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not is_main_process(): + return + + self._conf_matrix = np.zeros_like(self._conf_matrix) + for conf_matrix in conf_matrix_list: + self._conf_matrix += conf_matrix + + self._b_conf_matrix = np.zeros_like(self._b_conf_matrix) + for b_conf_matrix in b_conf_matrix_list: + self._b_conf_matrix += b_conf_matrix + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(self._predictions)) + + acc = np.full(self._num_classes, np.nan, dtype=np.float) + iou = np.full(self._num_classes, np.nan, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + acc_valid = pos_gt > 0 + acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] + union = pos_gt + pos_pred - tp + iou_valid = np.logical_and(acc_valid, union > 0) + iou[iou_valid] = tp[iou_valid] / union[iou_valid] + macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) + miou = np.sum(iou[iou_valid]) / np.sum(iou_valid) + fiou = np.sum(iou[iou_valid] * class_weights[iou_valid]) + pacc = np.sum(tp) / np.sum(pos_gt) + + if self._compute_boundary_iou: + b_iou = np.full(self._num_classes, np.nan, dtype=np.float) + b_tp = self._b_conf_matrix.diagonal()[:-1].astype(np.float) + b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(np.float) + b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(np.float) + b_union = b_pos_gt + b_pos_pred - b_tp + b_iou_valid = b_union > 0 + b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid] + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + for i, name in enumerate(self._class_names): + res[f"IoU-{name}"] = 100 * iou[i] + if self._compute_boundary_iou: + res[f"BoundaryIoU-{name}"] = 100 * b_iou[i] + res[f"min(IoU, B-Iou)-{name}"] = 100 * min(iou[i], b_iou[i]) + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + for i, name in enumerate(self._class_names): + res[f"ACC-{name}"] = 100 * acc[i] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(res, f) + results = OrderedDict({"sem_seg": res}) + self._logger.info(results) + return results + + def encode_json_sem_seg(self, sem_seg, input_file_name): + """ + Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. + See http://cocodataset.org/#format-results + """ + json_list = [] + for label in np.unique(sem_seg): + if self._contiguous_id_to_dataset_id is not None: + assert ( + label in self._contiguous_id_to_dataset_id + ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) + dataset_id = self._contiguous_id_to_dataset_id[label] + else: + dataset_id = int(label) + mask = (sem_seg == label).astype(np.uint8) + mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] + mask_rle["counts"] = mask_rle["counts"].decode("utf-8") + json_list.append( + {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} + ) + return json_list + + def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02): + assert mask.ndim == 2, "mask_to_boundary expects a 2-dimensional image" + h, w = mask.shape + diag_len = np.sqrt(h**2 + w**2) + dilation = max(1, int(round(dilation_ratio * diag_len))) + kernel = np.ones((3, 3), dtype=np.uint8) + + padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0) + eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation) + eroded_mask = eroded_mask_with_padding[1:-1, 1:-1] + boundary = mask - eroded_mask + return boundary diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/testing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..9e5ae625bb0593fc20739dd3ea549157e4df4f3d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/evaluation/testing.py @@ -0,0 +1,85 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +import pprint +import sys +from collections.abc import Mapping + + +def print_csv_format(results): + """ + Print main metrics in a format similar to Detectron, + so that they are easy to copypaste into a spreadsheet. + + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + unordered dict can also be printed, but in arbitrary order + """ + assert isinstance(results, Mapping) or not len(results), results + logger = logging.getLogger(__name__) + for task, res in results.items(): + if isinstance(res, Mapping): + # Don't print "AP-category" metrics since they are usually not tracked. + important_res = [(k, v) for k, v in res.items() if "-" not in k] + logger.info("copypaste: Task: {}".format(task)) + logger.info("copypaste: " + ",".join([k[0] for k in important_res])) + logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) + else: + logger.info(f"copypaste: {task}={res}") + + +def verify_results(cfg, results): + """ + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + + Returns: + bool: whether the verification succeeds or not + """ + expected_results = cfg.TEST.EXPECTED_RESULTS + if not len(expected_results): + return True + + ok = True + for task, metric, expected, tolerance in expected_results: + actual = results[task].get(metric, None) + if actual is None: + ok = False + continue + if not np.isfinite(actual): + ok = False + continue + diff = abs(actual - expected) + if diff > tolerance: + ok = False + + logger = logging.getLogger(__name__) + if not ok: + logger.error("Result verification failed!") + logger.error("Expected Results: " + str(expected_results)) + logger.error("Actual Results: " + pprint.pformat(results)) + + sys.exit(1) + else: + logger.info("Results verification passed.") + return ok + + +def flatten_results_dict(results): + """ + Expand a hierarchical dict of scalars into a flat dict of scalars. + If results[k1][k2][k3] = v, the returned dict will have the entry + {"k1/k2/k3": v}. + + Args: + results (dict): + """ + r = {} + for k, v in results.items(): + if isinstance(v, Mapping): + v = flatten_results_dict(v) + for kk, vv in v.items(): + r[k + "/" + kk] = vv + else: + r[k] = v + return r diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/README.md b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c86ff62516f4e8e4b1a6c1f33f11192933cf3861 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/README.md @@ -0,0 +1,15 @@ + +This directory contains code to prepare a detectron2 model for deployment. +Currently it supports exporting a detectron2 model to TorchScript, ONNX, or (deprecated) Caffe2 format. + +Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. + + +### Acknowledgements + +Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools. + +Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who +help export Detectron2 models to TorchScript. + +Thanks to ONNX Converter team at Microsoft who help export Detectron2 models to ONNX. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5a58758f64aae6071fa688be4400622ce6036efa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import warnings + +from .flatten import TracingAdapter +from .torchscript import dump_torchscript_IR, scripting_with_instances + +try: + from caffe2.proto import caffe2_pb2 as _tmp + from caffe2.python import core + + # caffe2 is optional +except ImportError: + pass +else: + from .api import * + + +# TODO: Update ONNX Opset version and run tests when a newer PyTorch is supported +STABLE_ONNX_OPSET_VERSION = 11 + + +def add_export_config(cfg): + warnings.warn( + "add_export_config has been deprecated and behaves as no-op function.", DeprecationWarning + ) + return cfg + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/api.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/api.py new file mode 100644 index 0000000000000000000000000000000000000000..4f9d7dac1d086902f082910059d539083bd85c0d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/api.py @@ -0,0 +1,230 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import logging +import os +import torch +from caffe2.proto import caffe2_pb2 +from torch import nn + +from custom_detectron2.config import CfgNode +from custom_detectron2.utils.file_io import PathManager + +from .caffe2_inference import ProtobufDetectionModel +from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format +from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph + +__all__ = [ + "Caffe2Model", + "Caffe2Tracer", +] + + +class Caffe2Tracer: + """ + Make a detectron2 model traceable with Caffe2 operators. + This class creates a traceable version of a detectron2 model which: + + 1. Rewrite parts of the model using ops in Caffe2. Note that some ops do + not have GPU implementation in Caffe2. + 2. Remove post-processing and only produce raw layer outputs + + After making a traceable model, the class provide methods to export such a + model to different deployment formats. + Exported graph produced by this class take two input tensors: + + 1. (1, C, H, W) float "data" which is an image (usually in [0, 255]). + (H, W) often has to be padded to multiple of 32 (depend on the model + architecture). + 2. 1x3 float "im_info", each row of which is (height, width, 1.0). + Height and width are true image shapes before padding. + + The class currently only supports models using builtin meta architectures. + Batch inference is not supported, and contributions are welcome. + """ + + def __init__(self, cfg: CfgNode, model: nn.Module, inputs): + """ + Args: + cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model. + model (nn.Module): An original pytorch model. Must be among a few official models + in detectron2 that can be converted to become caffe2-compatible automatically. + Weights have to be already loaded to this model. + inputs: sample inputs that the given model takes for inference. + Will be used to trace the model. For most models, random inputs with + no detected objects will not work as they lead to wrong traces. + """ + assert isinstance(cfg, CfgNode), cfg + assert isinstance(model, torch.nn.Module), type(model) + + # TODO make it support custom models, by passing in c2 model directly + C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE] + self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model)) + self.inputs = inputs + self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs) + + def export_caffe2(self): + """ + Export the model to Caffe2's protobuf format. + The returned object can be saved with its :meth:`.save_protobuf()` method. + The result can be loaded and executed using Caffe2 runtime. + + Returns: + :class:`Caffe2Model` + """ + from .caffe2_export import export_caffe2_detection_model + + predict_net, init_net = export_caffe2_detection_model( + self.traceable_model, self.traceable_inputs + ) + return Caffe2Model(predict_net, init_net) + + def export_onnx(self): + """ + Export the model to ONNX format. + Note that the exported model contains custom ops only available in caffe2, therefore it + cannot be directly executed by other runtime (such as onnxruntime or TensorRT). + Post-processing or transformation passes may be applied on the model to accommodate + different runtimes, but we currently do not provide support for them. + + Returns: + onnx.ModelProto: an onnx model. + """ + from .caffe2_export import export_onnx_model as export_onnx_model_impl + + return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,)) + + def export_torchscript(self): + """ + Export the model to a ``torch.jit.TracedModule`` by tracing. + The returned object can be saved to a file by ``.save()``. + + Returns: + torch.jit.TracedModule: a torch TracedModule + """ + logger = logging.getLogger(__name__) + logger.info("Tracing the model with torch.jit.trace ...") + with torch.no_grad(): + return torch.jit.trace(self.traceable_model, (self.traceable_inputs,)) + + +class Caffe2Model(nn.Module): + """ + A wrapper around the traced model in Caffe2's protobuf format. + The exported graph has different inputs/outputs from the original Pytorch + model, as explained in :class:`Caffe2Tracer`. This class wraps around the + exported graph to simulate the same interface as the original Pytorch model. + It also provides functions to save/load models in Caffe2's format.' + + Examples: + :: + c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2() + inputs = [{"image": img_tensor_CHW}] + outputs = c2_model(inputs) + orig_outputs = torch_model(inputs) + """ + + def __init__(self, predict_net, init_net): + super().__init__() + self.eval() # always in eval mode + self._predict_net = predict_net + self._init_net = init_net + self._predictor = None + + __init__.__HIDE_SPHINX_DOC__ = True + + @property + def predict_net(self): + """ + caffe2.core.Net: the underlying caffe2 predict net + """ + return self._predict_net + + @property + def init_net(self): + """ + caffe2.core.Net: the underlying caffe2 init net + """ + return self._init_net + + def save_protobuf(self, output_dir): + """ + Save the model as caffe2's protobuf format. + It saves the following files: + + * "model.pb": definition of the graph. Can be visualized with + tools like `netron `_. + * "model_init.pb": model parameters + * "model.pbtxt": human-readable definition of the graph. Not + needed for deployment. + + Args: + output_dir (str): the output directory to save protobuf files. + """ + logger = logging.getLogger(__name__) + logger.info("Saving model to {} ...".format(output_dir)) + if not PathManager.exists(output_dir): + PathManager.mkdirs(output_dir) + + with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f: + f.write(self._predict_net.SerializeToString()) + with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f: + f.write(str(self._predict_net)) + with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f: + f.write(self._init_net.SerializeToString()) + + def save_graph(self, output_file, inputs=None): + """ + Save the graph as SVG format. + + Args: + output_file (str): a SVG file + inputs: optional inputs given to the model. + If given, the inputs will be used to run the graph to record + shape of every tensor. The shape information will be + saved together with the graph. + """ + from .caffe2_export import run_and_save_graph + + if inputs is None: + save_graph(self._predict_net, output_file, op_only=False) + else: + size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0) + device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii") + inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device) + inputs = [x.cpu().numpy() for x in inputs] + run_and_save_graph(self._predict_net, self._init_net, inputs, output_file) + + @staticmethod + def load_protobuf(dir): + """ + Args: + dir (str): a directory used to save Caffe2Model with + :meth:`save_protobuf`. + The files "model.pb" and "model_init.pb" are needed. + + Returns: + Caffe2Model: the caffe2 model loaded from this directory. + """ + predict_net = caffe2_pb2.NetDef() + with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f: + predict_net.ParseFromString(f.read()) + + init_net = caffe2_pb2.NetDef() + with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f: + init_net.ParseFromString(f.read()) + + return Caffe2Model(predict_net, init_net) + + def __call__(self, inputs): + """ + An interface that wraps around a Caffe2 model and mimics detectron2's models' + input/output format. See details about the format at :doc:`/tutorials/models`. + This is used to compare the outputs of caffe2 model with its original torch model. + + Due to the extra conversion between Pytorch/Caffe2, this method is not meant for + benchmark. Because of the conversion, this method also has dependency + on detectron2 in order to convert to detectron2's output format. + """ + if self._predictor is None: + self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net) + return self._predictor(inputs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/c10.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/c10.py new file mode 100644 index 0000000000000000000000000000000000000000..c657db1eecb3a80622bee34d8ea58b9c3ff913aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/c10.py @@ -0,0 +1,557 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import math +from typing import Dict +import torch +import torch.nn.functional as F + +from custom_detectron2.layers import ShapeSpec, cat +from custom_detectron2.layers.roi_align_rotated import ROIAlignRotated +from custom_detectron2.modeling import poolers +from custom_detectron2.modeling.proposal_generator import rpn +from custom_detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference +from custom_detectron2.structures import Boxes, ImageList, Instances, Keypoints, RotatedBoxes + +from .shared import alias, to_device + + +""" +This file contains caffe2-compatible implementation of several detectron2 components. +""" + + +class Caffe2Boxes(Boxes): + """ + Representing a list of detectron2.structures.Boxes from minibatch, each box + is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector + (batch index + 5 coordinates) for RotatedBoxes. + """ + + def __init__(self, tensor): + assert isinstance(tensor, torch.Tensor) + assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size() + # TODO: make tensor immutable when dim is Nx5 for Boxes, + # and Nx6 for RotatedBoxes? + self.tensor = tensor + + +# TODO clean up this class, maybe just extend Instances +class InstancesList(object): + """ + Tensor representation of a list of Instances object for a batch of images. + + When dealing with a batch of images with Caffe2 ops, a list of bboxes + (instances) are usually represented by single Tensor with size + (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is + for providing common functions to convert between these two representations. + """ + + def __init__(self, im_info, indices, extra_fields=None): + # [N, 3] -> (H, W, Scale) + self.im_info = im_info + # [N,] -> indice of batch to which the instance belongs + self.indices = indices + # [N, ...] + self.batch_extra_fields = extra_fields or {} + + self.image_size = self.im_info + + def get_fields(self): + """like `get_fields` in the Instances object, + but return each field in tensor representations""" + ret = {} + for k, v in self.batch_extra_fields.items(): + # if isinstance(v, torch.Tensor): + # tensor_rep = v + # elif isinstance(v, (Boxes, Keypoints)): + # tensor_rep = v.tensor + # else: + # raise ValueError("Can't find tensor representation for: {}".format()) + ret[k] = v + return ret + + def has(self, name): + return name in self.batch_extra_fields + + def set(self, name, value): + # len(tensor) is a bad practice that generates ONNX constants during tracing. + # Although not a problem for the `assert` statement below, torch ONNX exporter + # still raises a misleading warning as it does not this call comes from `assert` + if isinstance(value, Boxes): + data_len = value.tensor.shape[0] + elif isinstance(value, torch.Tensor): + data_len = value.shape[0] + else: + data_len = len(value) + if len(self.batch_extra_fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self.batch_extra_fields[name] = value + + def __getattr__(self, name): + if name not in self.batch_extra_fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self.batch_extra_fields[name] + + def __len__(self): + return len(self.indices) + + def flatten(self): + ret = [] + for _, v in self.batch_extra_fields.items(): + if isinstance(v, (Boxes, Keypoints)): + ret.append(v.tensor) + else: + ret.append(v) + return ret + + @staticmethod + def to_d2_instances_list(instances_list): + """ + Convert InstancesList to List[Instances]. The input `instances_list` can + also be a List[Instances], in this case this method is a non-op. + """ + if not isinstance(instances_list, InstancesList): + assert all(isinstance(x, Instances) for x in instances_list) + return instances_list + + ret = [] + for i, info in enumerate(instances_list.im_info): + instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())])) + + ids = instances_list.indices == i + for k, v in instances_list.batch_extra_fields.items(): + if isinstance(v, torch.Tensor): + instances.set(k, v[ids]) + continue + elif isinstance(v, Boxes): + instances.set(k, v[ids, -4:]) + continue + + target_type, tensor_source = v + assert isinstance(tensor_source, torch.Tensor) + assert tensor_source.shape[0] == instances_list.indices.shape[0] + tensor_source = tensor_source[ids] + + if issubclass(target_type, Boxes): + instances.set(k, Boxes(tensor_source[:, -4:])) + elif issubclass(target_type, Keypoints): + instances.set(k, Keypoints(tensor_source)) + elif issubclass(target_type, torch.Tensor): + instances.set(k, tensor_source) + else: + raise ValueError("Can't handle targe type: {}".format(target_type)) + + ret.append(instances) + return ret + + +class Caffe2Compatible(object): + """ + A model can inherit this class to indicate that it can be traced and deployed with caffe2. + """ + + def _get_tensor_mode(self): + return self._tensor_mode + + def _set_tensor_mode(self, v): + self._tensor_mode = v + + tensor_mode = property(_get_tensor_mode, _set_tensor_mode) + """ + If true, the model expects C2-style tensor only inputs/outputs format. + """ + + +class Caffe2RPN(Caffe2Compatible, rpn.RPN): + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = super(Caffe2Compatible, cls).from_config(cfg, input_shape) + assert tuple(cfg.MODEL.RPN.BBOX_REG_WEIGHTS) == (1.0, 1.0, 1.0, 1.0) or tuple( + cfg.MODEL.RPN.BBOX_REG_WEIGHTS + ) == (1.0, 1.0, 1.0, 1.0, 1.0) + return ret + + def _generate_proposals( + self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None + ): + assert isinstance(images, ImageList) + if self.tensor_mode: + im_info = images.image_sizes + else: + im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to( + images.tensor.device + ) + assert isinstance(im_info, torch.Tensor) + + rpn_rois_list = [] + rpn_roi_probs_list = [] + for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip( + objectness_logits_pred, + anchor_deltas_pred, + [b for (n, b) in self.anchor_generator.cell_anchors.named_buffers()], + self.anchor_generator.strides, + ): + scores = scores.detach() + bbox_deltas = bbox_deltas.detach() + + rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals( + scores, + bbox_deltas, + im_info, + cell_anchors_tensor, + spatial_scale=1.0 / feat_stride, + pre_nms_topN=self.pre_nms_topk[self.training], + post_nms_topN=self.post_nms_topk[self.training], + nms_thresh=self.nms_thresh, + min_size=self.min_box_size, + # correct_transform_coords=True, # deprecated argument + angle_bound_on=True, # Default + angle_bound_lo=-180, + angle_bound_hi=180, + clip_angle_thresh=1.0, # Default + legacy_plus_one=False, + ) + rpn_rois_list.append(rpn_rois) + rpn_roi_probs_list.append(rpn_roi_probs) + + # For FPN in D2, in RPN all proposals from different levels are concated + # together, ranked and picked by top post_nms_topk. Then in ROIPooler + # it calculates level_assignments and calls the RoIAlign from + # the corresponding level. + + if len(objectness_logits_pred) == 1: + rpn_rois = rpn_rois_list[0] + rpn_roi_probs = rpn_roi_probs_list[0] + else: + assert len(rpn_rois_list) == len(rpn_roi_probs_list) + rpn_post_nms_topN = self.post_nms_topk[self.training] + + device = rpn_rois_list[0].device + input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)] + + # TODO remove this after confirming rpn_max_level/rpn_min_level + # is not needed in CollectRpnProposals. + feature_strides = list(self.anchor_generator.strides) + rpn_min_level = int(math.log2(feature_strides[0])) + rpn_max_level = int(math.log2(feature_strides[-1])) + assert (rpn_max_level - rpn_min_level + 1) == len( + rpn_rois_list + ), "CollectRpnProposals requires continuous levels" + + rpn_rois = torch.ops._caffe2.CollectRpnProposals( + input_list, + # NOTE: in current implementation, rpn_max_level and rpn_min_level + # are not needed, only the subtraction of two matters and it + # can be infer from the number of inputs. Keep them now for + # consistency. + rpn_max_level=2 + len(rpn_rois_list) - 1, + rpn_min_level=2, + rpn_post_nms_topN=rpn_post_nms_topN, + ) + rpn_rois = to_device(rpn_rois, device) + rpn_roi_probs = [] + + proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode) + return proposals, {} + + def forward(self, images, features, gt_instances=None): + assert not self.training + features = [features[f] for f in self.in_features] + objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features) + return self._generate_proposals( + images, + objectness_logits_pred, + anchor_deltas_pred, + gt_instances, + ) + + @staticmethod + def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode): + proposals = InstancesList( + im_info=im_info, + indices=rpn_rois[:, 0], + extra_fields={ + "proposal_boxes": Caffe2Boxes(rpn_rois), + "objectness_logits": (torch.Tensor, rpn_roi_probs), + }, + ) + if not tensor_mode: + proposals = InstancesList.to_d2_instances_list(proposals) + else: + proposals = [proposals] + return proposals + + +class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler): + @staticmethod + def c2_preprocess(box_lists): + assert all(isinstance(x, Boxes) for x in box_lists) + if all(isinstance(x, Caffe2Boxes) for x in box_lists): + # input is pure-tensor based + assert len(box_lists) == 1 + pooler_fmt_boxes = box_lists[0].tensor + else: + pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists) + return pooler_fmt_boxes + + def forward(self, x, box_lists): + assert not self.training + + pooler_fmt_boxes = self.c2_preprocess(box_lists) + num_level_assignments = len(self.level_poolers) + + if num_level_assignments == 1: + if isinstance(self.level_poolers[0], ROIAlignRotated): + c2_roi_align = torch.ops._caffe2.RoIAlignRotated + aligned = True + else: + c2_roi_align = torch.ops._caffe2.RoIAlign + aligned = self.level_poolers[0].aligned + + x0 = x[0] + if x0.is_quantized: + x0 = x0.dequantize() + + out = c2_roi_align( + x0, + pooler_fmt_boxes, + order="NCHW", + spatial_scale=float(self.level_poolers[0].spatial_scale), + pooled_h=int(self.output_size[0]), + pooled_w=int(self.output_size[1]), + sampling_ratio=int(self.level_poolers[0].sampling_ratio), + aligned=aligned, + ) + return out + + device = pooler_fmt_boxes.device + assert ( + self.max_level - self.min_level + 1 == 4 + ), "Currently DistributeFpnProposals only support 4 levels" + fpn_outputs = torch.ops._caffe2.DistributeFpnProposals( + to_device(pooler_fmt_boxes, "cpu"), + roi_canonical_scale=self.canonical_box_size, + roi_canonical_level=self.canonical_level, + roi_max_level=self.max_level, + roi_min_level=self.min_level, + legacy_plus_one=False, + ) + fpn_outputs = [to_device(x, device) for x in fpn_outputs] + + rois_fpn_list = fpn_outputs[:-1] + rois_idx_restore_int32 = fpn_outputs[-1] + + roi_feat_fpn_list = [] + for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers): + if isinstance(pooler, ROIAlignRotated): + c2_roi_align = torch.ops._caffe2.RoIAlignRotated + aligned = True + else: + c2_roi_align = torch.ops._caffe2.RoIAlign + aligned = bool(pooler.aligned) + + if x_level.is_quantized: + x_level = x_level.dequantize() + + roi_feat_fpn = c2_roi_align( + x_level, + roi_fpn, + order="NCHW", + spatial_scale=float(pooler.spatial_scale), + pooled_h=int(self.output_size[0]), + pooled_w=int(self.output_size[1]), + sampling_ratio=int(pooler.sampling_ratio), + aligned=aligned, + ) + roi_feat_fpn_list.append(roi_feat_fpn) + + roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0) + assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, ( + "Caffe2 export requires tracing with a model checkpoint + input that can produce valid" + " detections. But no detections were obtained with the given checkpoint and input!" + ) + roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32) + return roi_feat + + +class Caffe2FastRCNNOutputsInference: + def __init__(self, tensor_mode): + self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode + + def __call__(self, box_predictor, predictions, proposals): + """equivalent to FastRCNNOutputLayers.inference""" + num_classes = box_predictor.num_classes + score_thresh = box_predictor.test_score_thresh + nms_thresh = box_predictor.test_nms_thresh + topk_per_image = box_predictor.test_topk_per_image + is_rotated = len(box_predictor.box2box_transform.weights) == 5 + + if is_rotated: + box_dim = 5 + assert box_predictor.box2box_transform.weights[4] == 1, ( + "The weights for Rotated BBoxTransform in C2 have only 4 dimensions," + + " thus enforcing the angle weight to be 1 for now" + ) + box2box_transform_weights = box_predictor.box2box_transform.weights[:4] + else: + box_dim = 4 + box2box_transform_weights = box_predictor.box2box_transform.weights + + class_logits, box_regression = predictions + if num_classes + 1 == class_logits.shape[1]: + class_prob = F.softmax(class_logits, -1) + else: + assert num_classes == class_logits.shape[1] + class_prob = F.sigmoid(class_logits) + # BoxWithNMSLimit will infer num_classes from the shape of the class_prob + # So append a zero column as placeholder for the background class + class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1) + + assert box_regression.shape[1] % box_dim == 0 + cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1 + + input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1 + + proposal_boxes = proposals[0].proposal_boxes + if isinstance(proposal_boxes, Caffe2Boxes): + rois = Caffe2Boxes.cat([p.proposal_boxes for p in proposals]) + elif isinstance(proposal_boxes, RotatedBoxes): + rois = RotatedBoxes.cat([p.proposal_boxes for p in proposals]) + elif isinstance(proposal_boxes, Boxes): + rois = Boxes.cat([p.proposal_boxes for p in proposals]) + else: + raise NotImplementedError( + 'Expected proposals[0].proposal_boxes to be type "Boxes", ' + f"instead got {type(proposal_boxes)}" + ) + + device, dtype = rois.tensor.device, rois.tensor.dtype + if input_tensor_mode: + im_info = proposals[0].image_size + rois = rois.tensor + else: + im_info = torch.tensor( + [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]] + ) + batch_ids = cat( + [ + torch.full((b, 1), i, dtype=dtype, device=device) + for i, b in enumerate(len(p) for p in proposals) + ], + dim=0, + ) + rois = torch.cat([batch_ids, rois.tensor], dim=1) + + roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform( + to_device(rois, "cpu"), + to_device(box_regression, "cpu"), + to_device(im_info, "cpu"), + weights=box2box_transform_weights, + apply_scale=True, + rotated=is_rotated, + angle_bound_on=True, + angle_bound_lo=-180, + angle_bound_hi=180, + clip_angle_thresh=1.0, + legacy_plus_one=False, + ) + roi_pred_bbox = to_device(roi_pred_bbox, device) + roi_batch_splits = to_device(roi_batch_splits, device) + + nms_outputs = torch.ops._caffe2.BoxWithNMSLimit( + to_device(class_prob, "cpu"), + to_device(roi_pred_bbox, "cpu"), + to_device(roi_batch_splits, "cpu"), + score_thresh=float(score_thresh), + nms=float(nms_thresh), + detections_per_im=int(topk_per_image), + soft_nms_enabled=False, + soft_nms_method="linear", + soft_nms_sigma=0.5, + soft_nms_min_score_thres=0.001, + rotated=is_rotated, + cls_agnostic_bbox_reg=cls_agnostic_bbox_reg, + input_boxes_include_bg_cls=False, + output_classes_include_bg_cls=False, + legacy_plus_one=False, + ) + roi_score_nms = to_device(nms_outputs[0], device) + roi_bbox_nms = to_device(nms_outputs[1], device) + roi_class_nms = to_device(nms_outputs[2], device) + roi_batch_splits_nms = to_device(nms_outputs[3], device) + roi_keeps_nms = to_device(nms_outputs[4], device) + roi_keeps_size_nms = to_device(nms_outputs[5], device) + if not self.tensor_mode: + roi_class_nms = roi_class_nms.to(torch.int64) + + roi_batch_ids = cat( + [ + torch.full((b, 1), i, dtype=dtype, device=device) + for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms) + ], + dim=0, + ) + + roi_class_nms = alias(roi_class_nms, "class_nms") + roi_score_nms = alias(roi_score_nms, "score_nms") + roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms") + roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms") + roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms") + roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms") + + results = InstancesList( + im_info=im_info, + indices=roi_batch_ids[:, 0], + extra_fields={ + "pred_boxes": Caffe2Boxes(roi_bbox_nms), + "scores": roi_score_nms, + "pred_classes": roi_class_nms, + }, + ) + + if not self.tensor_mode: + results = InstancesList.to_d2_instances_list(results) + batch_splits = roi_batch_splits_nms.int().tolist() + kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits)) + else: + results = [results] + kept_indices = [roi_keeps_nms] + + return results, kept_indices + + +class Caffe2MaskRCNNInference: + def __call__(self, pred_mask_logits, pred_instances): + """equivalent to mask_head.mask_rcnn_inference""" + if all(isinstance(x, InstancesList) for x in pred_instances): + assert len(pred_instances) == 1 + mask_probs_pred = pred_mask_logits.sigmoid() + mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs") + pred_instances[0].set("pred_masks", mask_probs_pred) + else: + mask_rcnn_inference(pred_mask_logits, pred_instances) + + +class Caffe2KeypointRCNNInference: + def __init__(self, use_heatmap_max_keypoint): + self.use_heatmap_max_keypoint = use_heatmap_max_keypoint + + def __call__(self, pred_keypoint_logits, pred_instances): + # just return the keypoint heatmap for now, + # there will be option to call HeatmapMaxKeypointOp + output = alias(pred_keypoint_logits, "kps_score") + if all(isinstance(x, InstancesList) for x in pred_instances): + assert len(pred_instances) == 1 + if self.use_heatmap_max_keypoint: + device = output.device + output = torch.ops._caffe2.HeatmapMaxKeypoint( + to_device(output, "cpu"), + pred_instances[0].pred_boxes.tensor, + should_output_softmax=True, # worth make it configerable? + ) + output = to_device(output, device) + output = alias(output, "keypoints_out") + pred_instances[0].set("pred_keypoints", output) + return pred_keypoint_logits diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_export.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_export.py new file mode 100644 index 0000000000000000000000000000000000000000..d609c27c7deb396352967dbcbc79b1e00f2a2de1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_export.py @@ -0,0 +1,203 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import copy +import io +import logging +import numpy as np +from typing import List +import onnx +import onnx.optimizer +import torch +from caffe2.proto import caffe2_pb2 +from caffe2.python import core +from caffe2.python.onnx.backend import Caffe2Backend +from tabulate import tabulate +from termcolor import colored +from torch.onnx import OperatorExportTypes + +from .shared import ( + ScopedWS, + construct_init_net_from_params, + fuse_alias_placeholder, + fuse_copy_between_cpu_and_gpu, + get_params_from_init_net, + group_norm_replace_aten_with_caffe2, + infer_device_type, + remove_dead_end_ops, + remove_reshape_for_fc, + save_graph, +) + +logger = logging.getLogger(__name__) + + +def export_onnx_model(model, inputs): + """ + Trace and export a model to onnx format. + + Args: + model (nn.Module): + inputs (tuple[args]): the model will be called by `model(*inputs)` + + Returns: + an onnx model + """ + assert isinstance(model, torch.nn.Module) + + # make sure all modules are in eval mode, onnx may change the training state + # of the module if the states are not consistent + def _check_eval(module): + assert not module.training + + model.apply(_check_eval) + + # Export the model to ONNX + with torch.no_grad(): + with io.BytesIO() as f: + torch.onnx.export( + model, + inputs, + f, + operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK, + # verbose=True, # NOTE: uncomment this for debugging + # export_params=True, + ) + onnx_model = onnx.load_from_string(f.getvalue()) + + return onnx_model + + +def _op_stats(net_def): + type_count = {} + for t in [op.type for op in net_def.op]: + type_count[t] = type_count.get(t, 0) + 1 + type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet + type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count + return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list) + + +def _assign_device_option( + predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor] +): + """ + ONNX exported network doesn't have concept of device, assign necessary + device option for each op in order to make it runable on GPU runtime. + """ + + def _get_device_type(torch_tensor): + assert torch_tensor.device.type in ["cpu", "cuda"] + assert torch_tensor.device.index == 0 + return torch_tensor.device.type + + def _assign_op_device_option(net_proto, net_ssa, blob_device_types): + for op, ssa_i in zip(net_proto.op, net_ssa): + if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]: + op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) + else: + devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]] + assert all(d == devices[0] for d in devices) + if devices[0] == "cuda": + op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0)) + + # update ops in predict_net + predict_net_input_device_types = { + (name, 0): _get_device_type(tensor) + for name, tensor in zip(predict_net.external_input, tensor_inputs) + } + predict_net_device_types = infer_device_type( + predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch" + ) + predict_net_ssa, _ = core.get_ssa(predict_net) + _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types) + + # update ops in init_net + init_net_ssa, versions = core.get_ssa(init_net) + init_net_output_device_types = { + (name, versions[name]): predict_net_device_types[(name, 0)] + for name in init_net.external_output + } + init_net_device_types = infer_device_type( + init_net, known_status=init_net_output_device_types, device_name_style="pytorch" + ) + _assign_op_device_option(init_net, init_net_ssa, init_net_device_types) + + +def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]): + """ + Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX. + + Arg: + model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py + tensor_inputs: a list of tensors that caffe2 model takes as input. + """ + model = copy.deepcopy(model) + assert isinstance(model, torch.nn.Module) + assert hasattr(model, "encode_additional_info") + + # Export via ONNX + logger.info( + "Exporting a {} model via ONNX ...".format(type(model).__name__) + + " Some warnings from ONNX are expected and are usually not to worry about." + ) + onnx_model = export_onnx_model(model, (tensor_inputs,)) + # Convert ONNX model to Caffe2 protobuf + init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) + ops_table = [[op.type, op.input, op.output] for op in predict_net.op] + table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe") + logger.info( + "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan") + ) + + # Apply protobuf optimization + fuse_alias_placeholder(predict_net, init_net) + if any(t.device.type != "cpu" for t in tensor_inputs): + fuse_copy_between_cpu_and_gpu(predict_net) + remove_dead_end_ops(init_net) + _assign_device_option(predict_net, init_net, tensor_inputs) + params, device_options = get_params_from_init_net(init_net) + predict_net, params = remove_reshape_for_fc(predict_net, params) + init_net = construct_init_net_from_params(params, device_options) + group_norm_replace_aten_with_caffe2(predict_net) + + # Record necessary information for running the pb model in Detectron2 system. + model.encode_additional_info(predict_net, init_net) + + logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net))) + logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net))) + + return predict_net, init_net + + +def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path): + """ + Run the caffe2 model on given inputs, recording the shape and draw the graph. + + predict_net/init_net: caffe2 model. + tensor_inputs: a list of tensors that caffe2 model takes as input. + graph_save_path: path for saving graph of exported model. + """ + + logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path)) + save_graph(predict_net, graph_save_path, op_only=False) + + # Run the exported Caffe2 net + logger.info("Running ONNX exported model ...") + with ScopedWS("__ws_tmp__", True) as ws: + ws.RunNetOnce(init_net) + initialized_blobs = set(ws.Blobs()) + uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs] + for name, blob in zip(uninitialized, tensor_inputs): + ws.FeedBlob(name, blob) + + try: + ws.RunNetOnce(predict_net) + except RuntimeError as e: + logger.warning("Encountered RuntimeError: \n{}".format(str(e))) + + ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()} + blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)} + + logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path)) + save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes) + + return ws_blobs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_inference.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..deb886c0417285ed1d5ad85eb941fa1ac757cdab --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_inference.py @@ -0,0 +1,161 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +import numpy as np +from itertools import count +import torch +from caffe2.proto import caffe2_pb2 +from caffe2.python import core + +from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format +from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type + +logger = logging.getLogger(__name__) + + +# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ====== +class ProtobufModel(torch.nn.Module): + """ + Wrapper of a caffe2's protobuf model. + It works just like nn.Module, but running caffe2 under the hood. + Input/Output are tuple[tensor] that match the caffe2 net's external_input/output. + """ + + _ids = count(0) + + def __init__(self, predict_net, init_net): + logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...") + super().__init__() + assert isinstance(predict_net, caffe2_pb2.NetDef) + assert isinstance(init_net, caffe2_pb2.NetDef) + # create unique temporary workspace for each instance + self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids)) + self.net = core.Net(predict_net) + + logger.info("Running init_net once to fill the parameters ...") + with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws: + ws.RunNetOnce(init_net) + uninitialized_external_input = [] + for blob in self.net.Proto().external_input: + if blob not in ws.Blobs(): + uninitialized_external_input.append(blob) + ws.CreateBlob(blob) + ws.CreateNet(self.net) + + self._error_msgs = set() + self._input_blobs = uninitialized_external_input + + def _infer_output_devices(self, inputs): + """ + Returns: + list[str]: list of device for each external output + """ + + def _get_device_type(torch_tensor): + assert torch_tensor.device.type in ["cpu", "cuda"] + assert torch_tensor.device.index == 0 + return torch_tensor.device.type + + predict_net = self.net.Proto() + input_device_types = { + (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs) + } + device_type_map = infer_device_type( + predict_net, known_status=input_device_types, device_name_style="pytorch" + ) + ssa, versions = core.get_ssa(predict_net) + versioned_outputs = [(name, versions[name]) for name in predict_net.external_output] + output_devices = [device_type_map[outp] for outp in versioned_outputs] + return output_devices + + def forward(self, inputs): + """ + Args: + inputs (tuple[torch.Tensor]) + + Returns: + tuple[torch.Tensor] + """ + assert len(inputs) == len(self._input_blobs), ( + f"Length of inputs ({len(inputs)}) " + f"doesn't match the required input blobs: {self._input_blobs}" + ) + + with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws: + for b, tensor in zip(self._input_blobs, inputs): + ws.FeedBlob(b, tensor) + + try: + ws.RunNet(self.net.Proto().name) + except RuntimeError as e: + if not str(e) in self._error_msgs: + self._error_msgs.add(str(e)) + logger.warning("Encountered new RuntimeError: \n{}".format(str(e))) + logger.warning("Catch the error and use partial results.") + + c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output] + # Remove outputs of current run, this is necessary in order to + # prevent fetching the result from previous run if the model fails + # in the middle. + for b in self.net.Proto().external_output: + # Needs to create uninitialized blob to make the net runable. + # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b), + # but there'no such API. + ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).") + + # Cast output to torch.Tensor on the desired device + output_devices = ( + self._infer_output_devices(inputs) + if any(t.device.type != "cpu" for t in inputs) + else ["cpu" for _ in self.net.Proto().external_output] + ) + + outputs = [] + for name, c2_output, device in zip( + self.net.Proto().external_output, c2_outputs, output_devices + ): + if not isinstance(c2_output, np.ndarray): + raise RuntimeError( + "Invalid output for blob {}, received: {}".format(name, c2_output) + ) + outputs.append(torch.tensor(c2_output).to(device=device)) + return tuple(outputs) + + +class ProtobufDetectionModel(torch.nn.Module): + """ + A class works just like a pytorch meta arch in terms of inference, but running + caffe2 model under the hood. + """ + + def __init__(self, predict_net, init_net, *, convert_outputs=None): + """ + Args: + predict_net, init_net (core.Net): caffe2 nets + convert_outptus (callable): a function that converts caffe2 + outputs to the same format of the original pytorch model. + By default, use the one defined in the caffe2 meta_arch. + """ + super().__init__() + self.protobuf_model = ProtobufModel(predict_net, init_net) + self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0) + self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii") + + if convert_outputs is None: + meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN") + meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")] + self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net) + else: + self._convert_outputs = convert_outputs + + def _convert_inputs(self, batched_inputs): + # currently all models convert inputs in the same way + return convert_batched_inputs_to_c2_format( + batched_inputs, self.size_divisibility, self.device + ) + + def forward(self, batched_inputs): + c2_inputs = self._convert_inputs(batched_inputs) + c2_results = self.protobuf_model(c2_inputs) + c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results)) + return self._convert_outputs(batched_inputs, c2_inputs, c2_results) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_modeling.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..050751370255a8986bf75d02da38536f9abe9065 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_modeling.py @@ -0,0 +1,419 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import functools +import io +import struct +import types +import torch + +from custom_detectron2.modeling import meta_arch +from custom_detectron2.modeling.box_regression import Box2BoxTransform +from custom_detectron2.modeling.roi_heads import keypoint_head +from custom_detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes + +from .c10 import Caffe2Compatible +from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn +from .shared import ( + alias, + check_set_pb_arg, + get_pb_arg_floats, + get_pb_arg_valf, + get_pb_arg_vali, + get_pb_arg_vals, + mock_torch_nn_functional_interpolate, +) + + +def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False): + """ + A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor]) + to detectron2's format (i.e. list of Instances instance). + This only works when the model follows the Caffe2 detectron's naming convention. + + Args: + image_sizes (List[List[int, int]]): [H, W] of every image. + tensor_outputs (Dict[str, Tensor]): external_output to its tensor. + + force_mask_on (Bool): if true, the it make sure there'll be pred_masks even + if the mask is not found from tensor_outputs (usually due to model crash) + """ + + results = [Instances(image_size) for image_size in image_sizes] + + batch_splits = tensor_outputs.get("batch_splits", None) + if batch_splits: + raise NotImplementedError() + assert len(image_sizes) == 1 + result = results[0] + + bbox_nms = tensor_outputs["bbox_nms"] + score_nms = tensor_outputs["score_nms"] + class_nms = tensor_outputs["class_nms"] + # Detection will always success because Conv support 0-batch + assert bbox_nms is not None + assert score_nms is not None + assert class_nms is not None + if bbox_nms.shape[1] == 5: + result.pred_boxes = RotatedBoxes(bbox_nms) + else: + result.pred_boxes = Boxes(bbox_nms) + result.scores = score_nms + result.pred_classes = class_nms.to(torch.int64) + + mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None) + if mask_fcn_probs is not None: + # finish the mask pred + mask_probs_pred = mask_fcn_probs + num_masks = mask_probs_pred.shape[0] + class_pred = result.pred_classes + indices = torch.arange(num_masks, device=class_pred.device) + mask_probs_pred = mask_probs_pred[indices, class_pred][:, None] + result.pred_masks = mask_probs_pred + elif force_mask_on: + # NOTE: there's no way to know the height/width of mask here, it won't be + # used anyway when batch size is 0, so just set them to 0. + result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8) + + keypoints_out = tensor_outputs.get("keypoints_out", None) + kps_score = tensor_outputs.get("kps_score", None) + if keypoints_out is not None: + # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob) + keypoints_tensor = keypoints_out + # NOTE: it's possible that prob is not calculated if "should_output_softmax" + # is set to False in HeatmapMaxKeypoint, so just using raw score, seems + # it doesn't affect mAP. TODO: check more carefully. + keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]] + result.pred_keypoints = keypoint_xyp + elif kps_score is not None: + # keypoint heatmap to sparse data structure + pred_keypoint_logits = kps_score + keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result]) + + return results + + +def _cast_to_f32(f64): + return struct.unpack("f", struct.pack("f", f64))[0] + + +def set_caffe2_compatible_tensor_mode(model, enable=True): + def _fn(m): + if isinstance(m, Caffe2Compatible): + m.tensor_mode = enable + + model.apply(_fn) + + +def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device): + """ + See get_caffe2_inputs() below. + """ + assert all(isinstance(x, dict) for x in batched_inputs) + assert all(x["image"].dim() == 3 for x in batched_inputs) + + images = [x["image"] for x in batched_inputs] + images = ImageList.from_tensors(images, size_divisibility) + + im_info = [] + for input_per_image, image_size in zip(batched_inputs, images.image_sizes): + target_height = input_per_image.get("height", image_size[0]) + target_width = input_per_image.get("width", image_size[1]) # noqa + # NOTE: The scale inside im_info is kept as convention and for providing + # post-processing information if further processing is needed. For + # current Caffe2 model definitions that don't include post-processing inside + # the model, this number is not used. + # NOTE: There can be a slight difference between width and height + # scales, using a single number can results in numerical difference + # compared with D2's post-processing. + scale = target_height / image_size[0] + im_info.append([image_size[0], image_size[1], scale]) + im_info = torch.Tensor(im_info) + + return images.tensor.to(device), im_info.to(device) + + +class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module): + """ + Base class for caffe2-compatible implementation of a meta architecture. + The forward is traceable and its traced graph can be converted to caffe2 + graph through ONNX. + """ + + def __init__(self, cfg, torch_model): + """ + Args: + cfg (CfgNode): + torch_model (nn.Module): the detectron2 model (meta_arch) to be + converted. + """ + super().__init__() + self._wrapped_model = torch_model + self.eval() + set_caffe2_compatible_tensor_mode(self, True) + + def get_caffe2_inputs(self, batched_inputs): + """ + Convert pytorch-style structured inputs to caffe2-style inputs that + are tuples of tensors. + + Args: + batched_inputs (list[dict]): inputs to a detectron2 model + in its standard format. Each dict has "image" (CHW tensor), and optionally + "height" and "width". + + Returns: + tuple[Tensor]: + tuple of tensors that will be the inputs to the + :meth:`forward` method. For existing models, the first + is an NCHW tensor (padded and batched); the second is + a im_info Nx3 tensor, where the rows are + (height, width, unused legacy parameter) + """ + return convert_batched_inputs_to_c2_format( + batched_inputs, + self._wrapped_model.backbone.size_divisibility, + self._wrapped_model.device, + ) + + def encode_additional_info(self, predict_net, init_net): + """ + Save extra metadata that will be used by inference in the output protobuf. + """ + pass + + def forward(self, inputs): + """ + Run the forward in caffe2-style. It has to use caffe2-compatible ops + and the method will be used for tracing. + + Args: + inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`. + They will be the inputs of the converted caffe2 graph. + + Returns: + tuple[Tensor]: output tensors. They will be the outputs of the + converted caffe2 graph. + """ + raise NotImplementedError + + def _caffe2_preprocess_image(self, inputs): + """ + Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward. + It normalizes the input images, and the final caffe2 graph assumes the + inputs have been batched already. + """ + data, im_info = inputs + data = alias(data, "data") + im_info = alias(im_info, "im_info") + mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std + normalized_data = (data - mean) / std + normalized_data = alias(normalized_data, "normalized_data") + + # Pack (data, im_info) into ImageList which is recognized by self.inference. + images = ImageList(tensor=normalized_data, image_sizes=im_info) + return images + + @staticmethod + def get_outputs_converter(predict_net, init_net): + """ + Creates a function that converts outputs of the caffe2 model to + detectron2's standard format. + The function uses information in `predict_net` and `init_net` that are + available at inferene time. Therefore the function logic can be used in inference. + + The returned function has the following signature: + + def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs + + Where + + * batched_inputs (list[dict]): the original input format of the meta arch + * c2_inputs (tuple[Tensor]): the caffe2 inputs. + * c2_results (dict[str, Tensor]): the caffe2 output format, + corresponding to the outputs of the :meth:`forward` function. + * detectron2_outputs: the original output format of the meta arch. + + This function can be used to compare the outputs of the original meta arch and + the converted caffe2 graph. + + Returns: + callable: a callable of the above signature. + """ + raise NotImplementedError + + +class Caffe2GeneralizedRCNN(Caffe2MetaArch): + def __init__(self, cfg, torch_model): + assert isinstance(torch_model, meta_arch.GeneralizedRCNN) + torch_model = patch_generalized_rcnn(torch_model) + super().__init__(cfg, torch_model) + + try: + use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT + except AttributeError: + use_heatmap_max_keypoint = False + self.roi_heads_patcher = ROIHeadsPatcher( + self._wrapped_model.roi_heads, use_heatmap_max_keypoint + ) + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN") + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + if not self.tensor_mode: + return self._wrapped_model.inference(inputs) + images = self._caffe2_preprocess_image(inputs) + features = self._wrapped_model.backbone(images.tensor) + proposals, _ = self._wrapped_model.proposal_generator(images, features) + with self.roi_heads_patcher.mock_roi_heads(): + detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) + return tuple(detector_results[0].flatten()) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + def f(batched_inputs, c2_inputs, c2_results): + _, im_info = c2_inputs + image_sizes = [[int(im[0]), int(im[1])] for im in im_info] + results = assemble_rcnn_outputs_by_name(image_sizes, c2_results) + return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) + + return f + + +class Caffe2RetinaNet(Caffe2MetaArch): + def __init__(self, cfg, torch_model): + assert isinstance(torch_model, meta_arch.RetinaNet) + super().__init__(cfg, torch_model) + + @mock_torch_nn_functional_interpolate() + def forward(self, inputs): + assert self.tensor_mode + images = self._caffe2_preprocess_image(inputs) + + # explicitly return the images sizes to avoid removing "im_info" by ONNX + # since it's not used in the forward path + return_tensors = [images.image_sizes] + + features = self._wrapped_model.backbone(images.tensor) + features = [features[f] for f in self._wrapped_model.head_in_features] + for i, feature_i in enumerate(features): + features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True) + return_tensors.append(features[i]) + + pred_logits, pred_anchor_deltas = self._wrapped_model.head(features) + for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)): + return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i))) + return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i))) + + return tuple(return_tensors) + + def encode_additional_info(self, predict_net, init_net): + size_divisibility = self._wrapped_model.backbone.size_divisibility + check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) + check_set_pb_arg( + predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") + ) + check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet") + + # Inference parameters: + check_set_pb_arg( + predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh) + ) + check_set_pb_arg( + predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates + ) + check_set_pb_arg( + predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh) + ) + check_set_pb_arg( + predict_net, + "max_detections_per_image", + "i", + self._wrapped_model.max_detections_per_image, + ) + + check_set_pb_arg( + predict_net, + "bbox_reg_weights", + "floats", + [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights], + ) + self._encode_anchor_generator_cfg(predict_net) + + def _encode_anchor_generator_cfg(self, predict_net): + # serialize anchor_generator for future use + serialized_anchor_generator = io.BytesIO() + torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator) + # Ideally we can put anchor generating inside the model, then we don't + # need to store this information. + bytes = serialized_anchor_generator.getvalue() + check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes) + + @staticmethod + def get_outputs_converter(predict_net, init_net): + self = types.SimpleNamespace() + serialized_anchor_generator = io.BytesIO( + get_pb_arg_vals(predict_net, "serialized_anchor_generator", None) + ) + self.anchor_generator = torch.load(serialized_anchor_generator) + bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None) + self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights)) + self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None) + self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None) + self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None) + self.max_detections_per_image = get_pb_arg_vali( + predict_net, "max_detections_per_image", None + ) + + # hack to reuse inference code from RetinaNet + for meth in [ + "forward_inference", + "inference_single_image", + "_transpose_dense_predictions", + "_decode_multi_level_predictions", + "_decode_per_level_predictions", + ]: + setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self)) + + def f(batched_inputs, c2_inputs, c2_results): + _, im_info = c2_inputs + image_sizes = [[int(im[0]), int(im[1])] for im in im_info] + dummy_images = ImageList( + torch.randn( + ( + len(im_info), + 3, + ) + + tuple(image_sizes[0]) + ), + image_sizes, + ) + + num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")]) + pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)] + pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)] + + # For each feature level, feature should have the same batch size and + # spatial dimension as the box_cls and box_delta. + dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits] + # self.num_classess can be inferred + self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4) + + results = self.forward_inference( + dummy_images, dummy_features, [pred_logits, pred_anchor_deltas] + ) + return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) + + return f + + +META_ARCH_CAFFE2_EXPORT_TYPE_MAP = { + "GeneralizedRCNN": Caffe2GeneralizedRCNN, + "RetinaNet": Caffe2RetinaNet, +} diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_patch.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..40d50429be5666006a760a3add98981a3d9b78c4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/caffe2_patch.py @@ -0,0 +1,152 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import contextlib +from unittest import mock +import torch + +from custom_detectron2.modeling import poolers +from custom_detectron2.modeling.proposal_generator import rpn +from custom_detectron2.modeling.roi_heads import keypoint_head, mask_head +from custom_detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers + +from .c10 import ( + Caffe2Compatible, + Caffe2FastRCNNOutputsInference, + Caffe2KeypointRCNNInference, + Caffe2MaskRCNNInference, + Caffe2ROIPooler, + Caffe2RPN, +) + + +class GenericMixin(object): + pass + + +class Caffe2CompatibleConverter(object): + """ + A GenericUpdater which implements the `create_from` interface, by modifying + module object and assign it with another class replaceCls. + """ + + def __init__(self, replaceCls): + self.replaceCls = replaceCls + + def create_from(self, module): + # update module's class to the new class + assert isinstance(module, torch.nn.Module) + if issubclass(self.replaceCls, GenericMixin): + # replaceCls should act as mixin, create a new class on-the-fly + new_class = type( + "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), + (self.replaceCls, module.__class__), + {}, # {"new_method": lambda self: ...}, + ) + module.__class__ = new_class + else: + # replaceCls is complete class, this allow arbitrary class swap + module.__class__ = self.replaceCls + + # initialize Caffe2Compatible + if isinstance(module, Caffe2Compatible): + module.tensor_mode = False + + return module + + +def patch(model, target, updater, *args, **kwargs): + """ + recursively (post-order) update all modules with the target type and its + subclasses, make a initialization/composition/inheritance/... via the + updater.create_from. + """ + for name, module in model.named_children(): + model._modules[name] = patch(module, target, updater, *args, **kwargs) + if isinstance(model, target): + return updater.create_from(model, *args, **kwargs) + return model + + +def patch_generalized_rcnn(model): + ccc = Caffe2CompatibleConverter + model = patch(model, rpn.RPN, ccc(Caffe2RPN)) + model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) + + return model + + +@contextlib.contextmanager +def mock_fastrcnn_outputs_inference( + tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers +): + with mock.patch.object( + box_predictor_type, + "inference", + autospec=True, + side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +@contextlib.contextmanager +def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): + with mock.patch( + "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +@contextlib.contextmanager +def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): + with mock.patch( + "{}.keypoint_rcnn_inference".format(patched_module), + side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint), + ) as mocked_func: + yield + if check: + assert mocked_func.call_count > 0 + + +class ROIHeadsPatcher: + def __init__(self, heads, use_heatmap_max_keypoint): + self.heads = heads + self.use_heatmap_max_keypoint = use_heatmap_max_keypoint + + @contextlib.contextmanager + def mock_roi_heads(self, tensor_mode=True): + """ + Patching several inference functions inside ROIHeads and its subclasses + + Args: + tensor_mode (bool): whether the inputs/outputs are caffe2's tensor + format or not. Default to True. + """ + # NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference` + # are called inside the same file as BaseXxxHead due to using mock.patch. + kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__ + mask_head_mod = mask_head.BaseMaskRCNNHead.__module__ + + mock_ctx_managers = [ + mock_fastrcnn_outputs_inference( + tensor_mode=tensor_mode, + check=True, + box_predictor_type=type(self.heads.box_predictor), + ) + ] + if getattr(self.heads, "keypoint_on", False): + mock_ctx_managers += [ + mock_keypoint_rcnn_inference( + tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint + ) + ] + if getattr(self.heads, "mask_on", False): + mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)] + + with contextlib.ExitStack() as stack: # python 3.3+ + for mgr in mock_ctx_managers: + stack.enter_context(mgr) + yield diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/flatten.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/flatten.py new file mode 100644 index 0000000000000000000000000000000000000000..36c757b82ff1b2a106725c14ae959f08f035b6ba --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/flatten.py @@ -0,0 +1,330 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import collections +from dataclasses import dataclass +from typing import Callable, List, Optional, Tuple +import torch +from torch import nn + +from custom_detectron2.structures import Boxes, Instances, ROIMasks +from custom_detectron2.utils.registry import _convert_target_to_string, locate + +from .torchscript_patch import patch_builtin_len + + +@dataclass +class Schema: + """ + A Schema defines how to flatten a possibly hierarchical object into tuple of + primitive objects, so it can be used as inputs/outputs of PyTorch's tracing. + + PyTorch does not support tracing a function that produces rich output + structures (e.g. dict, Instances, Boxes). To trace such a function, we + flatten the rich object into tuple of tensors, and return this tuple of tensors + instead. Meanwhile, we also need to know how to "rebuild" the original object + from the flattened results, so we can evaluate the flattened results. + A Schema defines how to flatten an object, and while flattening it, it records + necessary schemas so that the object can be rebuilt using the flattened outputs. + + The flattened object and the schema object is returned by ``.flatten`` classmethod. + Then the original object can be rebuilt with the ``__call__`` method of schema. + + A Schema is a dataclass that can be serialized easily. + """ + + # inspired by FetchMapper in tensorflow/python/client/session.py + + @classmethod + def flatten(cls, obj): + raise NotImplementedError + + def __call__(self, values): + raise NotImplementedError + + @staticmethod + def _concat(values): + ret = () + sizes = [] + for v in values: + assert isinstance(v, tuple), "Flattened results must be a tuple" + ret = ret + v + sizes.append(len(v)) + return ret, sizes + + @staticmethod + def _split(values, sizes): + if len(sizes): + expected_len = sum(sizes) + assert ( + len(values) == expected_len + ), f"Values has length {len(values)} but expect length {expected_len}." + ret = [] + for k in range(len(sizes)): + begin, end = sum(sizes[:k]), sum(sizes[: k + 1]) + ret.append(values[begin:end]) + return ret + + +@dataclass +class ListSchema(Schema): + schemas: List[Schema] # the schemas that define how to flatten each element in the list + sizes: List[int] # the flattened length of each element + + def __call__(self, values): + values = self._split(values, self.sizes) + if len(values) != len(self.schemas): + raise ValueError( + f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!" + ) + values = [m(v) for m, v in zip(self.schemas, values)] + return list(values) + + @classmethod + def flatten(cls, obj): + res = [flatten_to_tuple(k) for k in obj] + values, sizes = cls._concat([k[0] for k in res]) + return values, cls([k[1] for k in res], sizes) + + +@dataclass +class TupleSchema(ListSchema): + def __call__(self, values): + return tuple(super().__call__(values)) + + +@dataclass +class IdentitySchema(Schema): + def __call__(self, values): + return values[0] + + @classmethod + def flatten(cls, obj): + return (obj,), cls() + + +@dataclass +class DictSchema(ListSchema): + keys: List[str] + + def __call__(self, values): + values = super().__call__(values) + return dict(zip(self.keys, values)) + + @classmethod + def flatten(cls, obj): + for k in obj.keys(): + if not isinstance(k, str): + raise KeyError("Only support flattening dictionaries if keys are str.") + keys = sorted(obj.keys()) + values = [obj[k] for k in keys] + ret, schema = ListSchema.flatten(values) + return ret, cls(schema.schemas, schema.sizes, keys) + + +@dataclass +class InstancesSchema(DictSchema): + def __call__(self, values): + image_size, fields = values[-1], values[:-1] + fields = super().__call__(fields) + return Instances(image_size, **fields) + + @classmethod + def flatten(cls, obj): + ret, schema = super().flatten(obj.get_fields()) + size = obj.image_size + if not isinstance(size, torch.Tensor): + size = torch.tensor(size) + return ret + (size,), schema + + +@dataclass +class TensorWrapSchema(Schema): + """ + For classes that are simple wrapper of tensors, e.g. + Boxes, RotatedBoxes, BitMasks + """ + + class_name: str + + def __call__(self, values): + return locate(self.class_name)(values[0]) + + @classmethod + def flatten(cls, obj): + return (obj.tensor,), cls(_convert_target_to_string(type(obj))) + + +# if more custom structures needed in the future, can allow +# passing in extra schemas for custom types +def flatten_to_tuple(obj): + """ + Flatten an object so it can be used for PyTorch tracing. + Also returns how to rebuild the original object from the flattened outputs. + + Returns: + res (tuple): the flattened results that can be used as tracing outputs + schema: an object with a ``__call__`` method such that ``schema(res) == obj``. + It is a pure dataclass that can be serialized. + """ + schemas = [ + ((str, bytes), IdentitySchema), + (list, ListSchema), + (tuple, TupleSchema), + (collections.abc.Mapping, DictSchema), + (Instances, InstancesSchema), + ((Boxes, ROIMasks), TensorWrapSchema), + ] + for klass, schema in schemas: + if isinstance(obj, klass): + F = schema + break + else: + F = IdentitySchema + + return F.flatten(obj) + + +class TracingAdapter(nn.Module): + """ + A model may take rich input/output format (e.g. dict or custom classes), + but `torch.jit.trace` requires tuple of tensors as input/output. + This adapter flattens input/output format of a model so it becomes traceable. + + It also records the necessary schema to rebuild model's inputs/outputs from flattened + inputs/outputs. + + Example: + :: + outputs = model(inputs) # inputs/outputs may be rich structure + adapter = TracingAdapter(model, inputs) + + # can now trace the model, with adapter.flattened_inputs, or another + # tuple of tensors with the same length and meaning + traced = torch.jit.trace(adapter, adapter.flattened_inputs) + + # traced model can only produce flattened outputs (tuple of tensors) + flattened_outputs = traced(*adapter.flattened_inputs) + # adapter knows the schema to convert it back (new_outputs == outputs) + new_outputs = adapter.outputs_schema(flattened_outputs) + """ + + flattened_inputs: Tuple[torch.Tensor] = None + """ + Flattened version of inputs given to this class's constructor. + """ + + inputs_schema: Schema = None + """ + Schema of the inputs given to this class's constructor. + """ + + outputs_schema: Schema = None + """ + Schema of the output produced by calling the given model with inputs. + """ + + def __init__( + self, + model: nn.Module, + inputs, + inference_func: Optional[Callable] = None, + allow_non_tensor: bool = False, + ): + """ + Args: + model: an nn.Module + inputs: An input argument or a tuple of input arguments used to call model. + After flattening, it has to only consist of tensors. + inference_func: a callable that takes (model, *inputs), calls the + model with inputs, and return outputs. By default it + is ``lambda model, *inputs: model(*inputs)``. Can be override + if you need to call the model differently. + allow_non_tensor: allow inputs/outputs to contain non-tensor objects. + This option will filter out non-tensor objects to make the + model traceable, but ``inputs_schema``/``outputs_schema`` cannot be + used anymore because inputs/outputs cannot be rebuilt from pure tensors. + This is useful when you're only interested in the single trace of + execution (e.g. for flop count), but not interested in + generalizing the traced graph to new inputs. + """ + super().__init__() + if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): + model = model.module + self.model = model + if not isinstance(inputs, tuple): + inputs = (inputs,) + self.inputs = inputs + self.allow_non_tensor = allow_non_tensor + + if inference_func is None: + inference_func = lambda model, *inputs: model(*inputs) # noqa + self.inference_func = inference_func + + self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs) + + if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs): + return + if self.allow_non_tensor: + self.flattened_inputs = tuple( + [x for x in self.flattened_inputs if isinstance(x, torch.Tensor)] + ) + self.inputs_schema = None + else: + for input in self.flattened_inputs: + if not isinstance(input, torch.Tensor): + raise ValueError( + "Inputs for tracing must only contain tensors. " + f"Got a {type(input)} instead." + ) + + def forward(self, *args: torch.Tensor): + with torch.no_grad(), patch_builtin_len(): + if self.inputs_schema is not None: + inputs_orig_format = self.inputs_schema(args) + else: + if len(args) != len(self.flattened_inputs) or any( + x is not y for x, y in zip(args, self.flattened_inputs) + ): + raise ValueError( + "TracingAdapter does not contain valid inputs_schema." + " So it cannot generalize to other inputs and must be" + " traced with `.flattened_inputs`." + ) + inputs_orig_format = self.inputs + + outputs = self.inference_func(self.model, *inputs_orig_format) + flattened_outputs, schema = flatten_to_tuple(outputs) + + flattened_output_tensors = tuple( + [x for x in flattened_outputs if isinstance(x, torch.Tensor)] + ) + if len(flattened_output_tensors) < len(flattened_outputs): + if self.allow_non_tensor: + flattened_outputs = flattened_output_tensors + self.outputs_schema = None + else: + raise ValueError( + "Model cannot be traced because some model outputs " + "cannot flatten to tensors." + ) + else: # schema is valid + if self.outputs_schema is None: + self.outputs_schema = schema + else: + assert self.outputs_schema == schema, ( + "Model should always return outputs with the same " + "structure so it can be traced!" + ) + return flattened_outputs + + def _create_wrapper(self, traced_model): + """ + Return a function that has an input/output interface the same as the + original model, but it calls the given traced model under the hood. + """ + + def forward(*args): + flattened_inputs, _ = flatten_to_tuple(args) + flattened_outputs = traced_model(*flattened_inputs) + return self.outputs_schema(flattened_outputs) + + return forward diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/shared.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..53ba9335e26819f9381115eba17bbbe3816b469c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/shared.py @@ -0,0 +1,1039 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import collections +import copy +import functools +import logging +import numpy as np +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from unittest import mock +import caffe2.python.utils as putils +import torch +import torch.nn.functional as F +from caffe2.proto import caffe2_pb2 +from caffe2.python import core, net_drawer, workspace +from torch.nn.functional import interpolate as interp + +logger = logging.getLogger(__name__) + + +# ==== torch/utils_toffee/cast.py ======================================= + + +def to_device(t, device_str): + """ + This function is a replacement of .to(another_device) such that it allows the + casting to be traced properly by explicitly calling the underlying copy ops. + It also avoids introducing unncessary op when casting to the same device. + """ + src = t.device + dst = torch.device(device_str) + + if src == dst: + return t + elif src.type == "cuda" and dst.type == "cpu": + return torch.ops._caffe2.CopyGPUToCPU(t) + elif src.type == "cpu" and dst.type == "cuda": + return torch.ops._caffe2.CopyCPUToGPU(t) + else: + raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst)) + + +# ==== torch/utils_toffee/interpolate.py ======================================= + + +# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py +def BilinearInterpolation(tensor_in, up_scale): + assert up_scale % 2 == 0, "Scale should be even" + + def upsample_filt(size): + factor = (size + 1) // 2 + if size % 2 == 1: + center = factor - 1 + else: + center = factor - 0.5 + + og = np.ogrid[:size, :size] + return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) + + kernel_size = int(up_scale) * 2 + bil_filt = upsample_filt(kernel_size) + + dim = int(tensor_in.shape[1]) + kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32) + kernel[range(dim), range(dim), :, :] = bil_filt + + tensor_out = F.conv_transpose2d( + tensor_in, + weight=to_device(torch.Tensor(kernel), tensor_in.device), + bias=None, + stride=int(up_scale), + padding=int(up_scale / 2), + ) + + return tensor_out + + +# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if +# using dynamic `scale_factor` rather than static `size`. (T43166860) +# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly. +def onnx_compatibale_interpolate( + input, size=None, scale_factor=None, mode="nearest", align_corners=None +): + # NOTE: The input dimensions are interpreted in the form: + # `mini-batch x channels x [optional depth] x [optional height] x width`. + if size is None and scale_factor is not None: + if input.dim() == 4: + if isinstance(scale_factor, (int, float)): + height_scale, width_scale = (scale_factor, scale_factor) + else: + assert isinstance(scale_factor, (tuple, list)) + assert len(scale_factor) == 2 + height_scale, width_scale = scale_factor + + assert not align_corners, "No matching C2 op for align_corners == True" + if mode == "nearest": + return torch.ops._caffe2.ResizeNearest( + input, order="NCHW", width_scale=width_scale, height_scale=height_scale + ) + elif mode == "bilinear": + logger.warning( + "Use F.conv_transpose2d for bilinear interpolate" + " because there's no such C2 op, this may cause significant" + " slowdown and the boundary pixels won't be as same as" + " using F.interpolate due to padding." + ) + assert height_scale == width_scale + return BilinearInterpolation(input, up_scale=height_scale) + logger.warning("Output size is not static, it might cause ONNX conversion issue") + + return interp(input, size, scale_factor, mode, align_corners) + + +def mock_torch_nn_functional_interpolate(): + def decorator(func): + @functools.wraps(func) + def _mock_torch_nn_functional_interpolate(*args, **kwargs): + if torch.onnx.is_in_onnx_export(): + with mock.patch( + "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate + ): + return func(*args, **kwargs) + else: + return func(*args, **kwargs) + + return _mock_torch_nn_functional_interpolate + + return decorator + + +# ==== torch/utils_caffe2/ws_utils.py ========================================== + + +class ScopedWS(object): + def __init__(self, ws_name, is_reset, is_cleanup=False): + self.ws_name = ws_name + self.is_reset = is_reset + self.is_cleanup = is_cleanup + self.org_ws = "" + + def __enter__(self): + self.org_ws = workspace.CurrentWorkspace() + if self.ws_name is not None: + workspace.SwitchWorkspace(self.ws_name, True) + if self.is_reset: + workspace.ResetWorkspace() + + return workspace + + def __exit__(self, *args): + if self.is_cleanup: + workspace.ResetWorkspace() + if self.ws_name is not None: + workspace.SwitchWorkspace(self.org_ws) + + +def fetch_any_blob(name): + bb = None + try: + bb = workspace.FetchBlob(name) + except TypeError: + bb = workspace.FetchInt8Blob(name) + except Exception as e: + logger.error("Get blob {} error: {}".format(name, e)) + + return bb + + +# ==== torch/utils_caffe2/protobuf.py ========================================== + + +def get_pb_arg(pb, arg_name): + for x in pb.arg: + if x.name == arg_name: + return x + return None + + +def get_pb_arg_valf(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.f if arg is not None else default_val + + +def get_pb_arg_floats(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(map(float, arg.floats)) if arg is not None else default_val + + +def get_pb_arg_ints(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(map(int, arg.ints)) if arg is not None else default_val + + +def get_pb_arg_vali(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.i if arg is not None else default_val + + +def get_pb_arg_vals(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return arg.s if arg is not None else default_val + + +def get_pb_arg_valstrings(pb, arg_name, default_val): + arg = get_pb_arg(pb, arg_name) + return list(arg.strings) if arg is not None else default_val + + +def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False): + arg = get_pb_arg(pb, arg_name) + if arg is None: + arg = putils.MakeArgument(arg_name, arg_value) + assert hasattr(arg, arg_attr) + pb.arg.extend([arg]) + if allow_override and getattr(arg, arg_attr) != arg_value: + logger.warning( + "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value) + ) + setattr(arg, arg_attr, arg_value) + else: + assert arg is not None + assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format( + getattr(arg, arg_attr), arg_value + ) + + +def _create_const_fill_op_from_numpy(name, tensor, device_option=None): + assert type(tensor) == np.ndarray + kTypeNameMapper = { + np.dtype("float32"): "GivenTensorFill", + np.dtype("int32"): "GivenTensorIntFill", + np.dtype("int64"): "GivenTensorInt64Fill", + np.dtype("uint8"): "GivenTensorStringFill", + } + + args_dict = {} + if tensor.dtype == np.dtype("uint8"): + args_dict.update({"values": [str(tensor.data)], "shape": [1]}) + else: + args_dict.update({"values": tensor, "shape": tensor.shape}) + + if device_option is not None: + args_dict["device_option"] = device_option + + return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict) + + +def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor): + assert type(int8_tensor) == workspace.Int8Tensor + kTypeNameMapper = { + np.dtype("int32"): "Int8GivenIntTensorFill", + np.dtype("uint8"): "Int8GivenTensorFill", + } + + tensor = int8_tensor.data + assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")] + values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor + + return core.CreateOperator( + kTypeNameMapper[tensor.dtype], + [], + [name], + values=values, + shape=tensor.shape, + Y_scale=int8_tensor.scale, + Y_zero_point=int8_tensor.zero_point, + ) + + +def create_const_fill_op( + name: str, + blob: Union[np.ndarray, workspace.Int8Tensor], + device_option: Optional[caffe2_pb2.DeviceOption] = None, +) -> caffe2_pb2.OperatorDef: + """ + Given a blob object, return the Caffe2 operator that creates this blob + as constant. Currently support NumPy tensor and Caffe2 Int8Tensor. + """ + + tensor_type = type(blob) + assert tensor_type in [ + np.ndarray, + workspace.Int8Tensor, + ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format( + name, type(blob) + ) + + if tensor_type == np.ndarray: + return _create_const_fill_op_from_numpy(name, blob, device_option) + elif tensor_type == workspace.Int8Tensor: + assert device_option is None + return _create_const_fill_op_from_c2_int8_tensor(name, blob) + + +def construct_init_net_from_params( + params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None +) -> caffe2_pb2.NetDef: + """ + Construct the init_net from params dictionary + """ + init_net = caffe2_pb2.NetDef() + device_options = device_options or {} + for name, blob in params.items(): + if isinstance(blob, str): + logger.warning( + ( + "Blob {} with type {} is not supported in generating init net," + " skipped.".format(name, type(blob)) + ) + ) + continue + init_net.op.extend( + [create_const_fill_op(name, blob, device_option=device_options.get(name, None))] + ) + init_net.external_output.append(name) + return init_net + + +def get_producer_map(ssa): + """ + Return dict from versioned blob to (i, j), + where i is index of producer op, j is the index of output of that op. + """ + producer_map = {} + for i in range(len(ssa)): + outputs = ssa[i][1] + for j, outp in enumerate(outputs): + producer_map[outp] = (i, j) + return producer_map + + +def get_consumer_map(ssa): + """ + Return dict from versioned blob to list of (i, j), + where i is index of consumer op, j is the index of input of that op. + """ + consumer_map = collections.defaultdict(list) + for i in range(len(ssa)): + inputs = ssa[i][0] + for j, inp in enumerate(inputs): + consumer_map[inp].append((i, j)) + return consumer_map + + +def get_params_from_init_net( + init_net: caffe2_pb2.NetDef, +) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]: + """ + Take the output blobs from init_net by running it. + Outputs: + params: dict from blob name to numpy array + device_options: dict from blob name to the device option of its creating op + """ + # NOTE: this assumes that the params is determined by producer op with the + # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor. + def _get_device_option(producer_op): + if producer_op.type == "CopyGPUToCPU": + return caffe2_pb2.DeviceOption() + else: + return producer_op.device_option + + with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws: + ws.RunNetOnce(init_net) + params = {b: fetch_any_blob(b) for b in init_net.external_output} + ssa, versions = core.get_ssa(init_net) + producer_map = get_producer_map(ssa) + device_options = { + b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]]) + for b in init_net.external_output + } + return params, device_options + + +def _updater_raise(op, input_types, output_types): + raise RuntimeError( + "Failed to apply updater for op {} given input_types {} and" + " output_types {}".format(op, input_types, output_types) + ) + + +def _generic_status_identifier( + predict_net: caffe2_pb2.NetDef, + status_updater: Callable, + known_status: Dict[Tuple[str, int], Any], +) -> Dict[Tuple[str, int], Any]: + """ + Statically infer the status of each blob, the status can be such as device type + (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here + is versioned blob (Tuple[str, int]) in the format compatible with ssa. + Inputs: + predict_net: the caffe2 network + status_updater: a callable, given an op and the status of its input/output, + it returns the updated status of input/output. `None` is used for + representing unknown status. + known_status: a dict containing known status, used as initialization. + Outputs: + A dict mapping from versioned blob to its status + """ + ssa, versions = core.get_ssa(predict_net) + versioned_ext_input = [(b, 0) for b in predict_net.external_input] + versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output] + all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa]) + + allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output) + assert all(k in allowed_vbs for k in known_status) + assert all(v is not None for v in known_status.values()) + _known_status = copy.deepcopy(known_status) + + def _check_and_update(key, value): + assert value is not None + if key in _known_status: + if not _known_status[key] == value: + raise RuntimeError( + "Confilict status for {}, existing status {}, new status {}".format( + key, _known_status[key], value + ) + ) + _known_status[key] = value + + def _update_i(op, ssa_i): + versioned_inputs = ssa_i[0] + versioned_outputs = ssa_i[1] + + inputs_status = [_known_status.get(b, None) for b in versioned_inputs] + outputs_status = [_known_status.get(b, None) for b in versioned_outputs] + + new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status) + + for versioned_blob, status in zip( + versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status + ): + if status is not None: + _check_and_update(versioned_blob, status) + + for op, ssa_i in zip(predict_net.op, ssa): + _update_i(op, ssa_i) + for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)): + _update_i(op, ssa_i) + + # NOTE: This strictly checks all the blob from predict_net must be assgined + # a known status. However sometimes it's impossible (eg. having deadend op), + # we may relax this constraint if + for k in all_versioned_blobs: + if k not in _known_status: + raise NotImplementedError( + "Can not infer the status for {}. Currently only support the case where" + " a single forward and backward pass can identify status for all blobs.".format(k) + ) + + return _known_status + + +def infer_device_type( + predict_net: caffe2_pb2.NetDef, + known_status: Dict[Tuple[str, int], Any], + device_name_style: str = "caffe2", +) -> Dict[Tuple[str, int], str]: + """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob""" + + assert device_name_style in ["caffe2", "pytorch"] + _CPU_STR = "cpu" + _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda" + + def _copy_cpu_to_gpu_updater(op, input_types, output_types): + if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR: + _updater_raise(op, input_types, output_types) + return ([_CPU_STR], [_GPU_STR]) + + def _copy_gpu_to_cpu_updater(op, input_types, output_types): + if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR: + _updater_raise(op, input_types, output_types) + return ([_GPU_STR], [_CPU_STR]) + + def _other_ops_updater(op, input_types, output_types): + non_none_types = [x for x in input_types + output_types if x is not None] + if len(non_none_types) > 0: + the_type = non_none_types[0] + if not all(x == the_type for x in non_none_types): + _updater_raise(op, input_types, output_types) + else: + the_type = None + return ([the_type for _ in op.input], [the_type for _ in op.output]) + + def _device_updater(op, *args, **kwargs): + return { + "CopyCPUToGPU": _copy_cpu_to_gpu_updater, + "CopyGPUToCPU": _copy_gpu_to_cpu_updater, + }.get(op.type, _other_ops_updater)(op, *args, **kwargs) + + return _generic_status_identifier(predict_net, _device_updater, known_status) + + +# ==== torch/utils_caffe2/vis.py =============================================== + + +def _modify_blob_names(ops, blob_rename_f): + ret = [] + + def _replace_list(blob_list, replaced_list): + del blob_list[:] + blob_list.extend(replaced_list) + + for x in ops: + cur = copy.deepcopy(x) + _replace_list(cur.input, list(map(blob_rename_f, cur.input))) + _replace_list(cur.output, list(map(blob_rename_f, cur.output))) + ret.append(cur) + + return ret + + +def _rename_blob(name, blob_sizes, blob_ranges): + def _list_to_str(bsize): + ret = ", ".join([str(x) for x in bsize]) + ret = "[" + ret + "]" + return ret + + ret = name + if blob_sizes is not None and name in blob_sizes: + ret += "\n" + _list_to_str(blob_sizes[name]) + if blob_ranges is not None and name in blob_ranges: + ret += "\n" + _list_to_str(blob_ranges[name]) + + return ret + + +# graph_name could not contain word 'graph' +def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None): + blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges) + return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f) + + +def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None): + graph = None + ops = net.op + if blob_rename_func is not None: + ops = _modify_blob_names(ops, blob_rename_func) + if not op_only: + graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB") + else: + graph = net_drawer.GetPydotGraphMinimal( + ops, graph_name, rankdir="TB", minimal_dependency=True + ) + + try: + par_dir = os.path.dirname(file_name) + if not os.path.exists(par_dir): + os.makedirs(par_dir) + + format = os.path.splitext(os.path.basename(file_name))[-1] + if format == ".png": + graph.write_png(file_name) + elif format == ".pdf": + graph.write_pdf(file_name) + elif format == ".svg": + graph.write_svg(file_name) + else: + print("Incorrect format {}".format(format)) + except Exception as e: + print("Error when writing graph to image {}".format(e)) + + return graph + + +# ==== torch/utils_toffee/aten_to_caffe2.py ==================================== + + +def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef): + """ + For ONNX exported model, GroupNorm will be represented as ATen op, + this can be a drop in replacement from ATen to GroupNorm + """ + count = 0 + for op in predict_net.op: + if op.type == "ATen": + op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3 + if op_name and op_name.decode() == "group_norm": + op.arg.remove(get_pb_arg(op, "operator")) + + if get_pb_arg_vali(op, "cudnn_enabled", None): + op.arg.remove(get_pb_arg(op, "cudnn_enabled")) + + num_groups = get_pb_arg_vali(op, "num_groups", None) + if num_groups is not None: + op.arg.remove(get_pb_arg(op, "num_groups")) + check_set_pb_arg(op, "group", "i", num_groups) + + op.type = "GroupNorm" + count += 1 + if count > 1: + logger.info("Replaced {} ATen operator to GroupNormOp".format(count)) + + +# ==== torch/utils_toffee/alias.py ============================================= + + +def alias(x, name, is_backward=False): + if not torch.onnx.is_in_onnx_export(): + return x + assert isinstance(x, torch.Tensor) + return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward) + + +def fuse_alias_placeholder(predict_net, init_net): + """Remove AliasWithName placeholder and rename the input/output of it""" + # First we finish all the re-naming + for i, op in enumerate(predict_net.op): + if op.type == "AliasWithName": + assert len(op.input) == 1 + assert len(op.output) == 1 + name = get_pb_arg_vals(op, "name", None).decode() + is_backward = bool(get_pb_arg_vali(op, "is_backward", 0)) + rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward) + rename_op_output(predict_net, i, 0, name) + + # Remove AliasWithName, should be very safe since it's a non-op + new_ops = [] + for op in predict_net.op: + if op.type != "AliasWithName": + new_ops.append(op) + else: + # safety check + assert op.input == op.output + assert op.input[0] == op.arg[0].s.decode() + del predict_net.op[:] + predict_net.op.extend(new_ops) + + +# ==== torch/utils_caffe2/graph_transform.py =================================== + + +class IllegalGraphTransformError(ValueError): + """When a graph transform function call can't be executed.""" + + +def _rename_versioned_blob_in_proto( + proto: caffe2_pb2.NetDef, + old_name: str, + new_name: str, + version: int, + ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]], + start_versions: Dict[str, int], + end_versions: Dict[str, int], +): + """In given proto, rename all blobs with matched version""" + # Operater list + for op, i_th_ssa in zip(proto.op, ssa): + versioned_inputs, versioned_outputs = i_th_ssa + for i in range(len(op.input)): + if versioned_inputs[i] == (old_name, version): + op.input[i] = new_name + for i in range(len(op.output)): + if versioned_outputs[i] == (old_name, version): + op.output[i] = new_name + # external_input + if start_versions.get(old_name, 0) == version: + for i in range(len(proto.external_input)): + if proto.external_input[i] == old_name: + proto.external_input[i] = new_name + # external_output + if end_versions.get(old_name, 0) == version: + for i in range(len(proto.external_output)): + if proto.external_output[i] == old_name: + proto.external_output[i] = new_name + + +def rename_op_input( + predict_net: caffe2_pb2.NetDef, + init_net: caffe2_pb2.NetDef, + op_id: int, + input_id: int, + new_name: str, + from_producer: bool = False, +): + """ + Rename the op_id-th operator in predict_net, change it's input_id-th input's + name to the new_name. It also does automatic re-route and change + external_input and init_net if necessary. + - It requires the input is only consumed by this op. + - This function modifies predict_net and init_net in-place. + - When from_producer is enable, this also updates other operators that consumes + the same input. Be cautious because may trigger unintended behavior. + """ + assert isinstance(predict_net, caffe2_pb2.NetDef) + assert isinstance(init_net, caffe2_pb2.NetDef) + + init_net_ssa, init_net_versions = core.get_ssa(init_net) + predict_net_ssa, predict_net_versions = core.get_ssa( + predict_net, copy.deepcopy(init_net_versions) + ) + + versioned_inputs, versioned_outputs = predict_net_ssa[op_id] + old_name, version = versioned_inputs[input_id] + + if from_producer: + producer_map = get_producer_map(predict_net_ssa) + if not (old_name, version) in producer_map: + raise NotImplementedError( + "Can't find producer, the input {} is probably from" + " init_net, this is not supported yet.".format(old_name) + ) + producer = producer_map[(old_name, version)] + rename_op_output(predict_net, producer[0], producer[1], new_name) + return + + def contain_targets(op_ssa): + return (old_name, version) in op_ssa[0] + + is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa] + if sum(is_consumer) > 1: + raise IllegalGraphTransformError( + ( + "Input '{}' of operator(#{}) are consumed by other ops, please use" + + " rename_op_output on the producer instead. Offending op: \n{}" + ).format(old_name, op_id, predict_net.op[op_id]) + ) + + # update init_net + _rename_versioned_blob_in_proto( + init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions + ) + # update predict_net + _rename_versioned_blob_in_proto( + predict_net, + old_name, + new_name, + version, + predict_net_ssa, + init_net_versions, + predict_net_versions, + ) + + +def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str): + """ + Rename the op_id-th operator in predict_net, change it's output_id-th input's + name to the new_name. It also does automatic re-route and change + external_output and if necessary. + - It allows multiple consumers of its output. + - This function modifies predict_net in-place, doesn't need init_net. + """ + assert isinstance(predict_net, caffe2_pb2.NetDef) + + ssa, blob_versions = core.get_ssa(predict_net) + + versioned_inputs, versioned_outputs = ssa[op_id] + old_name, version = versioned_outputs[output_id] + + # update predict_net + _rename_versioned_blob_in_proto( + predict_net, old_name, new_name, version, ssa, {}, blob_versions + ) + + +def get_sub_graph_external_input_output( + predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int] +) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]: + """ + Return the list of external input/output of sub-graph, + each element is tuple of the name and corresponding version in predict_net. + + external input/output is defined the same way as caffe2 NetDef. + """ + ssa, versions = core.get_ssa(predict_net) + + all_inputs = [] + all_outputs = [] + for op_id in sub_graph_op_indices: + all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs] + all_outputs += list(ssa[op_id][1]) # ssa output won't repeat + + # for versioned blobs, external inputs are just those blob in all_inputs + # but not in all_outputs + ext_inputs = [inp for inp in all_inputs if inp not in all_outputs] + + # external outputs are essentially outputs of this subgraph that are used + # outside of this sub-graph (including predict_net.external_output) + all_other_inputs = sum( + (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices), + [(outp, versions[outp]) for outp in predict_net.external_output], + ) + ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)] + + return ext_inputs, ext_outputs + + +class DiGraph: + """A DAG representation of caffe2 graph, each vertice is a versioned blob.""" + + def __init__(self): + self.vertices = set() + self.graph = collections.defaultdict(list) + + def add_edge(self, u, v): + self.graph[u].append(v) + self.vertices.add(u) + self.vertices.add(v) + + # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/ + def get_all_paths(self, s, d): + visited = {k: False for k in self.vertices} + path = [] + all_paths = [] + + def _get_all_paths_util(graph, u, d, visited, path): + visited[u] = True + path.append(u) + if u == d: + all_paths.append(copy.deepcopy(path)) + else: + for i in graph[u]: + if not visited[i]: + _get_all_paths_util(graph, i, d, visited, path) + path.pop() + visited[u] = False + + _get_all_paths_util(self.graph, s, d, visited, path) + return all_paths + + @staticmethod + def from_ssa(ssa): + graph = DiGraph() + for op_id in range(len(ssa)): + for inp in ssa[op_id][0]: + for outp in ssa[op_id][1]: + graph.add_edge(inp, outp) + return graph + + +def _get_dependency_chain(ssa, versioned_target, versioned_source): + """ + Return the index list of relevant operator to produce target blob from source blob, + if there's no dependency, return empty list. + """ + + # finding all paths between nodes can be O(N!), thus we can only search + # in the subgraph using the op starting from the first consumer of source blob + # to the producer of the target blob. + consumer_map = get_consumer_map(ssa) + producer_map = get_producer_map(ssa) + start_op = min(x[0] for x in consumer_map[versioned_source]) - 15 + end_op = ( + producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op + ) + sub_graph_ssa = ssa[start_op : end_op + 1] + if len(sub_graph_ssa) > 30: + logger.warning( + "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it" + " might take non-trival time to find all paths between them.".format( + versioned_source, versioned_target, start_op, end_op + ) + ) + + dag = DiGraph.from_ssa(sub_graph_ssa) + paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends + ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths] + return sorted(set().union(*[set(ops) for ops in ops_in_paths])) + + +def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]: + """ + Idenfity the reshape sub-graph in a protobuf. + The reshape sub-graph is defined as matching the following pattern: + + (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐ + └-------------------------------------------> Reshape -> (output_blob) + + Return: + List of sub-graphs, each sub-graph is represented as a list of indices + of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape] + """ + + ssa, _ = core.get_ssa(predict_net) + + ret = [] + for i, op in enumerate(predict_net.op): + if op.type == "Reshape": + assert len(op.input) == 2 + input_ssa = ssa[i][0] + data_source = input_ssa[0] + shape_source = input_ssa[1] + op_indices = _get_dependency_chain(ssa, shape_source, data_source) + ret.append(op_indices + [i]) + return ret + + +def remove_reshape_for_fc(predict_net, params): + """ + In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape + a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping + doesn't work well with ONNX and Int8 tools, and cause using extra + ops (eg. ExpandDims) that might not be available on mobile. + Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape + after exporting ONNX model. + """ + from caffe2.python import core + + # find all reshape sub-graph that can be removed, which is now all Reshape + # sub-graph whose output is only consumed by FC. + # TODO: to make it safer, we may need the actually value to better determine + # if a Reshape before FC is removable. + reshape_sub_graphs = identify_reshape_sub_graph(predict_net) + sub_graphs_to_remove = [] + for reshape_sub_graph in reshape_sub_graphs: + reshape_op_id = reshape_sub_graph[-1] + assert predict_net.op[reshape_op_id].type == "Reshape" + ssa, _ = core.get_ssa(predict_net) + reshape_output = ssa[reshape_op_id][1][0] + consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]] + if all(predict_net.op[consumer].type == "FC" for consumer in consumers): + # safety check if the sub-graph is isolated, for this reshape sub-graph, + # it means it has one non-param external input and one external output. + ext_inputs, ext_outputs = get_sub_graph_external_input_output( + predict_net, reshape_sub_graph + ) + non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] + if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1: + sub_graphs_to_remove.append(reshape_sub_graph) + + # perform removing subgraph by: + # 1: rename the Reshape's output to its input, then the graph can be + # seen as in-place itentify, meaning whose external input/output are the same. + # 2: simply remove those ops. + remove_op_ids = [] + params_to_remove = [] + for sub_graph in sub_graphs_to_remove: + logger.info( + "Remove Reshape sub-graph:\n{}".format( + "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph]) + ) + ) + reshape_op_id = sub_graph[-1] + new_reshap_output = predict_net.op[reshape_op_id].input[0] + rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output) + ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph) + non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0] + params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0] + assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1 + assert ext_outputs[0][0] == non_params_ext_inputs[0][0] + assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1 + remove_op_ids.extend(sub_graph) + params_to_remove.extend(params_ext_inputs) + + predict_net = copy.deepcopy(predict_net) + new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids] + del predict_net.op[:] + predict_net.op.extend(new_ops) + for versioned_params in params_to_remove: + name = versioned_params[0] + logger.info("Remove params: {} from init_net and predict_net.external_input".format(name)) + del params[name] + predict_net.external_input.remove(name) + + return predict_net, params + + +def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef): + """ + In-place fuse extra copy ops between cpu/gpu for the following case: + a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1 + -CopyBToA> c2 -NextOp2-> d2 + The fused network will look like: + a -NextOp1-> d1 + -NextOp2-> d2 + """ + + _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"] + + def _fuse_once(predict_net): + ssa, blob_versions = core.get_ssa(predict_net) + consumer_map = get_consumer_map(ssa) + versioned_external_output = [ + (name, blob_versions[name]) for name in predict_net.external_output + ] + + for op_id, op in enumerate(predict_net.op): + if op.type in _COPY_OPS: + fw_copy_versioned_output = ssa[op_id][1][0] + consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]] + reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)] + + is_fusable = ( + len(consumer_ids) > 0 + and fw_copy_versioned_output not in versioned_external_output + and all( + predict_net.op[_op_id].type == reverse_op_type + and ssa[_op_id][1][0] not in versioned_external_output + for _op_id in consumer_ids + ) + ) + + if is_fusable: + for rv_copy_op_id in consumer_ids: + # making each NextOp uses "a" directly and removing Copy ops + rs_copy_versioned_output = ssa[rv_copy_op_id][1][0] + next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0] + predict_net.op[next_op_id].input[inp_id] = op.input[0] + # remove CopyOps + new_ops = [ + op + for i, op in enumerate(predict_net.op) + if i != op_id and i not in consumer_ids + ] + del predict_net.op[:] + predict_net.op.extend(new_ops) + return True + + return False + + # _fuse_once returns False is nothing can be fused + while _fuse_once(predict_net): + pass + + +def remove_dead_end_ops(net_def: caffe2_pb2.NetDef): + """remove ops if its output is not used or not in external_output""" + ssa, versions = core.get_ssa(net_def) + versioned_external_output = [(name, versions[name]) for name in net_def.external_output] + consumer_map = get_consumer_map(ssa) + removed_op_ids = set() + + def _is_dead_end(versioned_blob): + return not ( + versioned_blob in versioned_external_output + or ( + len(consumer_map[versioned_blob]) > 0 + and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob]) + ) + ) + + for i, ssa_i in reversed(list(enumerate(ssa))): + versioned_outputs = ssa_i[1] + if all(_is_dead_end(outp) for outp in versioned_outputs): + removed_op_ids.add(i) + + # simply removing those deadend ops should have no effect to external_output + new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids] + del net_def.op[:] + net_def.op.extend(new_ops) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/torchscript.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/torchscript.py new file mode 100644 index 0000000000000000000000000000000000000000..19965ad967cf9686a10728c98dcccf9e0fb7c447 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/torchscript.py @@ -0,0 +1,132 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import torch + +from custom_detectron2.utils.file_io import PathManager + +from .torchscript_patch import freeze_training_mode, patch_instances + +__all__ = ["scripting_with_instances", "dump_torchscript_IR"] + + +def scripting_with_instances(model, fields): + """ + Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since + attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult + for scripting to support it out of the box. This function is made to support scripting + a model that uses :class:`Instances`. It does the following: + + 1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``, + but with all attributes been "static". + The attributes need to be statically declared in the ``fields`` argument. + 2. Register ``new_Instances``, and force scripting compiler to + use it when trying to compile ``Instances``. + + After this function, the process will be reverted. User should be able to script another model + using different fields. + + Example: + Assume that ``Instances`` in the model consist of two attributes named + ``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and + :class:`Tensor` respectively during inference. You can call this function like: + :: + fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor} + torchscipt_model = scripting_with_instances(model, fields) + + Note: + It only support models in evaluation mode. + + Args: + model (nn.Module): The input model to be exported by scripting. + fields (Dict[str, type]): Attribute names and corresponding type that + ``Instances`` will use in the model. Note that all attributes used in ``Instances`` + need to be added, regardless of whether they are inputs/outputs of the model. + Data type not defined in detectron2 is not supported for now. + + Returns: + torch.jit.ScriptModule: the model in torchscript format + """ + assert ( + not model.training + ), "Currently we only support exporting models in evaluation mode to torchscript" + + with freeze_training_mode(model), patch_instances(fields): + scripted_model = torch.jit.script(model) + return scripted_model + + +# alias for old name +export_torchscript_with_instances = scripting_with_instances + + +def dump_torchscript_IR(model, dir): + """ + Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph, + inlined graph). Useful for debugging. + + Args: + model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module + dir (str): output directory to dump files. + """ + dir = os.path.expanduser(dir) + PathManager.mkdirs(dir) + + def _get_script_mod(mod): + if isinstance(mod, torch.jit.TracedModule): + return mod._actual_script_module + return mod + + # Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code + with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f: + + def get_code(mod): + # Try a few ways to get code using private attributes. + try: + # This contains more information than just `mod.code` + return _get_script_mod(mod)._c.code + except AttributeError: + pass + try: + return mod.code + except AttributeError: + return None + + def dump_code(prefix, mod): + code = get_code(mod) + name = prefix or "root model" + if code is None: + f.write(f"Could not found code for {name} (type={mod.original_name})\n") + f.write("\n") + else: + f.write(f"\nCode for {name}, type={mod.original_name}:\n") + f.write(code) + f.write("\n") + f.write("-" * 80) + + for name, m in mod.named_children(): + dump_code(prefix + "." + name, m) + + if isinstance(model, torch.jit.ScriptFunction): + f.write(get_code(model)) + else: + dump_code("", model) + + def _get_graph(model): + try: + # Recursively dump IR of all modules + return _get_script_mod(model)._c.dump_to_str(True, False, False) + except AttributeError: + return model.graph.str() + + with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f: + f.write(_get_graph(model)) + + # Dump IR of the entire graph (all submodules inlined) + with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f: + f.write(str(model.inlined_graph)) + + if not isinstance(model, torch.jit.ScriptFunction): + # Dump the model structure in pytorch style + with PathManager.open(os.path.join(dir, "model.txt"), "w") as f: + f.write(str(model)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/torchscript_patch.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/torchscript_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..9c031d75579addc30ddbc9c7f46280724078c328 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/export/torchscript_patch.py @@ -0,0 +1,406 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import sys +import tempfile +from contextlib import ExitStack, contextmanager +from copy import deepcopy +from unittest import mock +import torch +from torch import nn + +# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964 +import custom_detectron2 # noqa F401 +from custom_detectron2.structures import Boxes, Instances +from custom_detectron2.utils.env import _import_file + +_counter = 0 + + +def _clear_jit_cache(): + from torch.jit._recursive import concrete_type_store + from torch.jit._state import _jit_caching_layer + + concrete_type_store.type_store.clear() # for modules + _jit_caching_layer.clear() # for free functions + + +def _add_instances_conversion_methods(newInstances): + """ + Add from_instances methods to the scripted Instances class. + """ + cls_name = newInstances.__name__ + + @torch.jit.unused + def from_instances(instances: Instances): + """ + Create scripted Instances from original Instances + """ + fields = instances.get_fields() + image_size = instances.image_size + ret = newInstances(image_size) + for name, val in fields.items(): + assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}" + setattr(ret, name, deepcopy(val)) + return ret + + newInstances.from_instances = from_instances + + +@contextmanager +def patch_instances(fields): + """ + A contextmanager, under which the Instances class in detectron2 is replaced + by a statically-typed scriptable class, defined by `fields`. + See more in `scripting_with_instances`. + """ + + with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile( + mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False + ) as f: + try: + # Objects that use Instances should not reuse previously-compiled + # results in cache, because `Instances` could be a new class each time. + _clear_jit_cache() + + cls_name, s = _gen_instance_module(fields) + f.write(s) + f.flush() + f.close() + + module = _import(f.name) + new_instances = getattr(module, cls_name) + _ = torch.jit.script(new_instances) + # let torchscript think Instances was scripted already + Instances.__torch_script_class__ = True + # let torchscript find new_instances when looking for the jit type of Instances + Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances) + + _add_instances_conversion_methods(new_instances) + yield new_instances + finally: + try: + del Instances.__torch_script_class__ + del Instances._jit_override_qualname + except AttributeError: + pass + sys.modules.pop(module.__name__) + + +def _gen_instance_class(fields): + """ + Args: + fields (dict[name: type]) + """ + + class _FieldType: + def __init__(self, name, type_): + assert isinstance(name, str), f"Field name must be str, got {name}" + self.name = name + self.type_ = type_ + self.annotation = f"{type_.__module__}.{type_.__name__}" + + fields = [_FieldType(k, v) for k, v in fields.items()] + + def indent(level, s): + return " " * 4 * level + s + + lines = [] + + global _counter + _counter += 1 + + cls_name = "ScriptedInstances{}".format(_counter) + + field_names = tuple(x.name for x in fields) + extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields]) + lines.append( + f""" +class {cls_name}: + def __init__(self, image_size: Tuple[int, int], {extra_args}): + self.image_size = image_size + self._field_names = {field_names} +""" + ) + + for f in fields: + lines.append( + indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})") + ) + + for f in fields: + lines.append( + f""" + @property + def {f.name}(self) -> {f.annotation}: + # has to use a local for type refinement + # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement + t = self._{f.name} + assert t is not None, "{f.name} is None and cannot be accessed!" + return t + + @{f.name}.setter + def {f.name}(self, value: {f.annotation}) -> None: + self._{f.name} = value +""" + ) + + # support method `__len__` + lines.append( + """ + def __len__(self) -> int: +""" + ) + for f in fields: + lines.append( + f""" + t = self._{f.name} + if t is not None: + return len(t) +""" + ) + lines.append( + """ + raise NotImplementedError("Empty Instances does not support __len__!") +""" + ) + + # support method `has` + lines.append( + """ + def has(self, name: str) -> bool: +""" + ) + for f in fields: + lines.append( + f""" + if name == "{f.name}": + return self._{f.name} is not None +""" + ) + lines.append( + """ + return False +""" + ) + + # support method `to` + none_args = ", None" * len(fields) + lines.append( + f""" + def to(self, device: torch.device) -> "{cls_name}": + ret = {cls_name}(self.image_size{none_args}) +""" + ) + for f in fields: + if hasattr(f.type_, "to"): + lines.append( + f""" + t = self._{f.name} + if t is not None: + ret._{f.name} = t.to(device) +""" + ) + else: + # For now, ignore fields that cannot be moved to devices. + # Maybe can support other tensor-like classes (e.g. __torch_function__) + pass + lines.append( + """ + return ret +""" + ) + + # support method `getitem` + none_args = ", None" * len(fields) + lines.append( + f""" + def __getitem__(self, item) -> "{cls_name}": + ret = {cls_name}(self.image_size{none_args}) +""" + ) + for f in fields: + lines.append( + f""" + t = self._{f.name} + if t is not None: + ret._{f.name} = t[item] +""" + ) + lines.append( + """ + return ret +""" + ) + + # support method `cat` + # this version does not contain checks that all instances have same size and fields + none_args = ", None" * len(fields) + lines.append( + f""" + def cat(self, instances: List["{cls_name}"]) -> "{cls_name}": + ret = {cls_name}(self.image_size{none_args}) +""" + ) + for f in fields: + lines.append( + f""" + t = self._{f.name} + if t is not None: + values: List[{f.annotation}] = [x.{f.name} for x in instances] + if torch.jit.isinstance(t, torch.Tensor): + ret._{f.name} = torch.cat(values, dim=0) + else: + ret._{f.name} = t.cat(values) +""" + ) + lines.append( + """ + return ret""" + ) + + # support method `get_fields()` + lines.append( + """ + def get_fields(self) -> Dict[str, Tensor]: + ret = {} + """ + ) + for f in fields: + if f.type_ == Boxes: + stmt = "t.tensor" + elif f.type_ == torch.Tensor: + stmt = "t" + else: + stmt = f'assert False, "unsupported type {str(f.type_)}"' + lines.append( + f""" + t = self._{f.name} + if t is not None: + ret["{f.name}"] = {stmt} + """ + ) + lines.append( + """ + return ret""" + ) + return cls_name, os.linesep.join(lines) + + +def _gen_instance_module(fields): + # TODO: find a more automatic way to enable import of other classes + s = """ +from copy import deepcopy +import torch +from torch import Tensor +import typing +from typing import * + +import custom_detectron2 +from custom_detectron2.structures import Boxes, Instances + +""" + + cls_name, cls_def = _gen_instance_class(fields) + s += cls_def + return cls_name, s + + +def _import(path): + return _import_file( + "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True + ) + + +@contextmanager +def patch_builtin_len(modules=()): + """ + Patch the builtin len() function of a few detectron2 modules + to use __len__ instead, because __len__ does not convert values to + integers and therefore is friendly to tracing. + + Args: + modules (list[stsr]): names of extra modules to patch len(), in + addition to those in detectron2. + """ + + def _new_len(obj): + return obj.__len__() + + with ExitStack() as stack: + MODULES = [ + "detectron2.modeling.roi_heads.fast_rcnn", + "detectron2.modeling.roi_heads.mask_head", + "detectron2.modeling.roi_heads.keypoint_head", + ] + list(modules) + ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES] + for m in ctxs: + m.side_effect = _new_len + yield + + +def patch_nonscriptable_classes(): + """ + Apply patches on a few nonscriptable detectron2 classes. + Should not have side-effects on eager usage. + """ + # __prepare_scriptable__ can also be added to models for easier maintenance. + # But it complicates the clean model code. + + from custom_detectron2.modeling.backbone import ResNet, FPN + + # Due to https://github.com/pytorch/pytorch/issues/36061, + # we change backbone to use ModuleList for scripting. + # (note: this changes param names in state_dict) + + def prepare_resnet(self): + ret = deepcopy(self) + ret.stages = nn.ModuleList(ret.stages) + for k in self.stage_names: + delattr(ret, k) + return ret + + ResNet.__prepare_scriptable__ = prepare_resnet + + def prepare_fpn(self): + ret = deepcopy(self) + ret.lateral_convs = nn.ModuleList(ret.lateral_convs) + ret.output_convs = nn.ModuleList(ret.output_convs) + for name, _ in self.named_children(): + if name.startswith("fpn_"): + delattr(ret, name) + return ret + + FPN.__prepare_scriptable__ = prepare_fpn + + # Annotate some attributes to be constants for the purpose of scripting, + # even though they are not constants in eager mode. + from custom_detectron2.modeling.roi_heads import StandardROIHeads + + if hasattr(StandardROIHeads, "__annotations__"): + # copy first to avoid editing annotations of base class + StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__) + StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool] + StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool] + + +# These patches are not supposed to have side-effects. +patch_nonscriptable_classes() + + +@contextmanager +def freeze_training_mode(model): + """ + A context manager that annotates the "training" attribute of every submodule + to constant, so that the training codepath in these modules can be + meta-compiled away. Upon exiting, the annotations are reverted. + """ + classes = {type(x) for x in model.modules()} + # __constants__ is the old way to annotate constants and not compatible + # with __annotations__ . + classes = {x for x in classes if not hasattr(x, "__constants__")} + for cls in classes: + cls.__annotations__["training"] = torch.jit.Final[bool] + yield + for cls in classes: + cls.__annotations__["training"] = bool diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..761a3d1c7afa049e9779ee9fc4d299e9aae38cad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList +from .deform_conv import DeformConv, ModulatedDeformConv +from .mask_ops import paste_masks_in_image +from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated +from .roi_align import ROIAlign, roi_align +from .roi_align_rotated import ROIAlignRotated, roi_align_rotated +from .shape_spec import ShapeSpec +from .wrappers import ( + BatchNorm2d, + Conv2d, + ConvTranspose2d, + cat, + interpolate, + Linear, + nonzero_tuple, + cross_entropy, + empty_input_loss_func_wrapper, + shapes_to_tensor, + move_device_like, +) +from .blocks import CNNBlockBase, DepthwiseSeparableConv2d +from .aspp import ASPP +from .losses import ciou_loss, diou_loss + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/aspp.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/aspp.py new file mode 100644 index 0000000000000000000000000000000000000000..14861aa9ede4fea6a69a49f189bcab997b558148 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/aspp.py @@ -0,0 +1,144 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from copy import deepcopy +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from .batch_norm import get_norm +from .blocks import DepthwiseSeparableConv2d +from .wrappers import Conv2d + + +class ASPP(nn.Module): + """ + Atrous Spatial Pyramid Pooling (ASPP). + """ + + def __init__( + self, + in_channels, + out_channels, + dilations, + *, + norm, + activation, + pool_kernel_size=None, + dropout: float = 0.0, + use_depthwise_separable_conv=False, + ): + """ + Args: + in_channels (int): number of input channels for ASPP. + out_channels (int): number of output channels. + dilations (list): a list of 3 dilations in ASPP. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. norm is + applied to all conv layers except the conv following + global average pooling. + activation (callable): activation function. + pool_kernel_size (tuple, list): the average pooling size (kh, kw) + for image pooling layer in ASPP. If set to None, it always + performs global average pooling. If not None, it must be + divisible by the shape of inputs in forward(). It is recommended + to use a fixed input feature size in training, and set this + option to match this size, so that it performs global average + pooling in training, and the size of the pooling window stays + consistent in inference. + dropout (float): apply dropout on the output of ASPP. It is used in + the official DeepLab implementation with a rate of 0.1: + https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa + use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d + for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`. + """ + super(ASPP, self).__init__() + assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations)) + self.pool_kernel_size = pool_kernel_size + self.dropout = dropout + use_bias = norm == "" + self.convs = nn.ModuleList() + # conv 1x1 + self.convs.append( + Conv2d( + in_channels, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + ) + weight_init.c2_xavier_fill(self.convs[-1]) + # atrous convs + for dilation in dilations: + if use_depthwise_separable_conv: + self.convs.append( + DepthwiseSeparableConv2d( + in_channels, + out_channels, + kernel_size=3, + padding=dilation, + dilation=dilation, + norm1=norm, + activation1=deepcopy(activation), + norm2=norm, + activation2=deepcopy(activation), + ) + ) + else: + self.convs.append( + Conv2d( + in_channels, + out_channels, + kernel_size=3, + padding=dilation, + dilation=dilation, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + ) + weight_init.c2_xavier_fill(self.convs[-1]) + # image pooling + # We do not add BatchNorm because the spatial resolution is 1x1, + # the original TF implementation has BatchNorm. + if pool_kernel_size is None: + image_pooling = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), + ) + else: + image_pooling = nn.Sequential( + nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1), + Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), + ) + weight_init.c2_xavier_fill(image_pooling[1]) + self.convs.append(image_pooling) + + self.project = Conv2d( + 5 * out_channels, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + weight_init.c2_xavier_fill(self.project) + + def forward(self, x): + size = x.shape[-2:] + if self.pool_kernel_size is not None: + if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]: + raise ValueError( + "`pool_kernel_size` must be divisible by the shape of inputs. " + "Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size) + ) + res = [] + for conv in self.convs: + res.append(conv(x)) + res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False) + res = torch.cat(res, dim=1) + res = self.project(res) + res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res + return res diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/batch_norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/batch_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..24899c56420a3e8db3793f580566ee9d8d44f84c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/batch_norm.py @@ -0,0 +1,300 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +import torch.distributed as dist +from fvcore.nn.distributed import differentiable_all_reduce +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.utils import comm, env + +from .wrappers import BatchNorm2d + + +class FrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + It contains non-trainable buffers called + "weight" and "bias", "running_mean", "running_var", + initialized to perform identity transformation. + + The pre-trained backbone models from Caffe2 only contain "weight" and "bias", + which are computed from the original four parameters of BN. + The affine transform `x * weight + bias` will perform the equivalent + computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. + When loading a backbone model from Caffe2, "running_mean" and "running_var" + will be left unchanged as identity transformation. + + Other pre-trained backbone models may contain all 4 parameters. + + The forward is implemented by `F.batch_norm(..., training=False)`. + """ + + _version = 3 + + def __init__(self, num_features, eps=1e-5): + super().__init__() + self.num_features = num_features + self.eps = eps + self.register_buffer("weight", torch.ones(num_features)) + self.register_buffer("bias", torch.zeros(num_features)) + self.register_buffer("running_mean", torch.zeros(num_features)) + self.register_buffer("running_var", torch.ones(num_features) - eps) + + def forward(self, x): + if x.requires_grad: + # When gradients are needed, F.batch_norm will use extra memory + # because its backward op computes gradients for weight/bias as well. + scale = self.weight * (self.running_var + self.eps).rsqrt() + bias = self.bias - self.running_mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + out_dtype = x.dtype # may be half + return x * scale.to(out_dtype) + bias.to(out_dtype) + else: + # When gradients are not needed, F.batch_norm is a single fused op + # and provide more optimization opportunities. + return F.batch_norm( + x, + self.running_mean, + self.running_var, + self.weight, + self.bias, + training=False, + eps=self.eps, + ) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + # No running_mean/var in early versions + # This will silent the warnings + if prefix + "running_mean" not in state_dict: + state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) + if prefix + "running_var" not in state_dict: + state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def __repr__(self): + return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) + + @classmethod + def convert_frozen_batchnorm(cls, module): + """ + Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. + + Args: + module (torch.nn.Module): + + Returns: + If module is BatchNorm/SyncBatchNorm, returns a new module. + Otherwise, in-place convert module and return it. + + Similar to convert_sync_batchnorm in + https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py + """ + bn_module = nn.modules.batchnorm + bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) + res = module + if isinstance(module, bn_module): + res = cls(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = cls.convert_frozen_batchnorm(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def get_norm(norm, out_channels): + """ + Args: + norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; + or a callable that takes a channel number and returns + the normalization layer as a nn.Module. + + Returns: + nn.Module or None: the normalization layer + """ + if norm is None: + return None + if isinstance(norm, str): + if len(norm) == 0: + return None + norm = { + "BN": BatchNorm2d, + # Fixed in https://github.com/pytorch/pytorch/pull/36382 + "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, + "FrozenBN": FrozenBatchNorm2d, + "GN": lambda channels: nn.GroupNorm(32, channels), + # for debugging: + "nnSyncBN": nn.SyncBatchNorm, + "naiveSyncBN": NaiveSyncBatchNorm, + # expose stats_mode N as an option to caller, required for zero-len inputs + "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"), + "LN": lambda channels: LayerNorm(channels), + }[norm] + return norm(out_channels) + + +class NaiveSyncBatchNorm(BatchNorm2d): + """ + In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient + when the batch size on each worker is different. + (e.g., when scale augmentation is used, or when it is applied to mask head). + + This is a slower but correct alternative to `nn.SyncBatchNorm`. + + Note: + There isn't a single definition of Sync BatchNorm. + + When ``stats_mode==""``, this module computes overall statistics by using + statistics of each worker with equal weight. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (N, H, W). This mode does not support inputs with zero batch size. + + When ``stats_mode=="N"``, this module computes overall statistics by weighting + the statistics of each worker by their ``N``. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (H, W). It is slower than ``stats_mode==""``. + + Even though the result of this module may not be the true statistics of all samples, + it may still be reasonable because it might be preferrable to assign equal weights + to all workers, regardless of their (H, W) dimension, instead of putting larger weight + on larger images. From preliminary experiments, little difference is found between such + a simplified implementation and an accurate computation of overall mean & variance. + """ + + def __init__(self, *args, stats_mode="", **kwargs): + super().__init__(*args, **kwargs) + assert stats_mode in ["", "N"] + self._stats_mode = stats_mode + + def forward(self, input): + if comm.get_world_size() == 1 or not self.training: + return super().forward(input) + + B, C = input.shape[0], input.shape[1] + + half_input = input.dtype == torch.float16 + if half_input: + # fp16 does not have good enough numerics for the reduction here + input = input.float() + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + if self._stats_mode == "": + assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' + vec = torch.cat([mean, meansqr], dim=0) + vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size()) + mean, meansqr = torch.split(vec, C) + momentum = self.momentum + else: + if B == 0: + vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) + vec = vec + input.sum() # make sure there is gradient w.r.t input + else: + vec = torch.cat( + [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0 + ) + vec = differentiable_all_reduce(vec * B) + + total_batch = vec[-1].detach() + momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 + mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero + + var = meansqr - mean * mean + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + + self.running_mean += momentum * (mean.detach() - self.running_mean) + self.running_var += momentum * (var.detach() - self.running_var) + ret = input * scale + bias + if half_input: + ret = ret.half() + return ret + + +class CycleBatchNormList(nn.ModuleList): + """ + Implement domain-specific BatchNorm by cycling. + + When a BatchNorm layer is used for multiple input domains or input + features, it might need to maintain a separate test-time statistics + for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`. + + This module implements it by using N separate BN layers + and it cycles through them every time a forward() is called. + + NOTE: The caller of this module MUST guarantee to always call + this module by multiple of N times. Otherwise its test-time statistics + will be incorrect. + """ + + def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs): + """ + Args: + length: number of BatchNorm layers to cycle. + bn_class: the BatchNorm class to use + kwargs: arguments of the BatchNorm class, such as num_features. + """ + self._affine = kwargs.pop("affine", True) + super().__init__([bn_class(**kwargs, affine=False) for k in range(length)]) + if self._affine: + # shared affine, domain-specific BN + channels = self[0].num_features + self.weight = nn.Parameter(torch.ones(channels)) + self.bias = nn.Parameter(torch.zeros(channels)) + self._pos = 0 + + def forward(self, x): + ret = self[self._pos](x) + self._pos = (self._pos + 1) % len(self) + + if self._affine: + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + return ret * w + b + else: + return ret + + def extra_repr(self): + return f"affine={self._affine}" + + +class LayerNorm(nn.Module): + """ + A LayerNorm variant, popularized by Transformers, that performs point-wise mean and + variance normalization over the channel dimension for inputs that have shape + (batch_size, channels, height, width). + https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950 + """ + + def __init__(self, normalized_shape, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.normalized_shape = (normalized_shape,) + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/blocks.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..1995a4bf7339e8deb7eaaffda4f819dda55e7ac7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/blocks.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import fvcore.nn.weight_init as weight_init +from torch import nn + +from .batch_norm import FrozenBatchNorm2d, get_norm +from .wrappers import Conv2d + + +""" +CNN building blocks. +""" + + +class CNNBlockBase(nn.Module): + """ + A CNN block is assumed to have input channels, output channels and a stride. + The input and output of `forward()` method must be NCHW tensors. + The method can perform arbitrary computation but must match the given + channels and stride specification. + + Attribute: + in_channels (int): + out_channels (int): + stride (int): + """ + + def __init__(self, in_channels, out_channels, stride): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + in_channels (int): + out_channels (int): + stride (int): + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + + def freeze(self): + """ + Make this block not trainable. + This method sets all parameters to `requires_grad=False`, + and convert all BatchNorm layers to FrozenBatchNorm + + Returns: + the block itself + """ + for p in self.parameters(): + p.requires_grad = False + FrozenBatchNorm2d.convert_frozen_batchnorm(self) + return self + + +class DepthwiseSeparableConv2d(nn.Module): + """ + A kxk depthwise convolution + a 1x1 convolution. + + In :paper:`xception`, norm & activation are applied on the second conv. + :paper:`mobilenet` uses norm & activation on both convs. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + padding=1, + dilation=1, + *, + norm1=None, + activation1=None, + norm2=None, + activation2=None, + ): + """ + Args: + norm1, norm2 (str or callable): normalization for the two conv layers. + activation1, activation2 (callable(Tensor) -> Tensor): activation + function for the two conv layers. + """ + super().__init__() + self.depthwise = Conv2d( + in_channels, + in_channels, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=not norm1, + norm=get_norm(norm1, in_channels), + activation=activation1, + ) + self.pointwise = Conv2d( + in_channels, + out_channels, + kernel_size=1, + bias=not norm2, + norm=get_norm(norm2, out_channels), + activation=activation2, + ) + + # default initialization + weight_init.c2_msra_fill(self.depthwise) + weight_init.c2_msra_fill(self.pointwise) + + def forward(self, x): + return self.pointwise(self.depthwise(x)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/README.md b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..778ed3da0bae89820831bcd8a72ff7b9cad8d4dd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/README.md @@ -0,0 +1,7 @@ + + +To add a new Op: + +1. Create a new directory +2. Implement new ops there +3. Delcare its Python interface in `vision.cpp`. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h new file mode 100644 index 0000000000000000000000000000000000000000..03f4211003f42f601f0cfcf4a690f5da4a0a1f67 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h @@ -0,0 +1,115 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); +#endif + +// Interface for Python +inline at::Tensor ROIAlignRotated_forward( + const at::Tensor& input, + const at::Tensor& rois, + const double spatial_scale, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t sampling_ratio) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return ROIAlignRotated_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + return ROIAlignRotated_forward_cpu( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +} + +inline at::Tensor ROIAlignRotated_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const double spatial_scale, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t batch_size, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t sampling_ratio) { + if (grad.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return ROIAlignRotated_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + return ROIAlignRotated_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2a3d3056cc71a4acaafb570739a9dd247a7eb1ed --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp @@ -0,0 +1,522 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include "ROIAlignRotated.h" + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + T roi_center_h, + T roi_center_w, + T cos_theta, + T sin_theta, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + // In image space, (y, x) is the order for Right Handed System, + // and this is essentially multiplying the point by a rotation matrix + // to rotate it counterclockwise through angle theta. + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y < 0) { + y = 0; + } + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +} // namespace + +template +void ROIAlignRotatedForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + roi_center_h, + roi_center_w, + cos_theta, + sin_theta, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void ROIAlignRotatedBackward( + const int nthreads, + // may not be contiguous. should index using n_stride, etc + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) { + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedForward( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + return output; +} + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedBackward( + grad.numel(), + grad.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..fca186519143b168a912c880a4cf495a0a5a9322 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu @@ -0,0 +1,443 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { + +template +__device__ T bilinear_interpolate( + const T* input, + const int height, + const int width, + T y, + T x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +} // namespace + +template +__global__ void RoIAlignRotatedForward( + const int nthreads, + const T* input, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (inte gral) pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T val = bilinear_interpolate(offset_input, height, width, y, x); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__global__ void RoIAlignRotatedBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(output_size), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + RoIAlignRotatedForward<<>>( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(grad.numel()), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + grad.scalar_type(), "ROIAlignRotated_backward", [&] { + RoIAlignRotatedBackwardFeature<<>>( + grad.numel(), + grad_.data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h new file mode 100644 index 0000000000000000000000000000000000000000..3bf383b8ed9b358b5313d433a9682c294dfb77e4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor box_iou_rotated_cpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor box_iou_rotated_cuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor box_iou_rotated( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); + if (boxes1.device().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + + return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c843487b5fa4e8077dd27402ec99009266ddda8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "box_iou_rotated.h" +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +template +void box_iou_rotated_cpu_kernel( + const at::Tensor& boxes1, + const at::Tensor& boxes2, + at::Tensor& ious) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + for (int i = 0; i < num_boxes1; i++) { + for (int j = 0; j < num_boxes2; j++) { + ious[i * num_boxes2 + j] = single_box_iou_rotated( + boxes1[i].data_ptr(), boxes2[j].data_ptr()); + } + } +} + +at::Tensor box_iou_rotated_cpu( + // input must be contiguous: + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + return ious.reshape(shape); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..952710e53041187907fbd113f8d0d0fa24134a86 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu @@ -0,0 +1,130 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +// 2D block with 32 * 16 = 512 threads per block +const int BLOCK_DIM_X = 32; +const int BLOCK_DIM_Y = 16; + +template +__global__ void box_iou_rotated_cuda_kernel( + const int n_boxes1, + const int n_boxes2, + const T* dev_boxes1, + const T* dev_boxes2, + T* dev_ious) { + const int row_start = blockIdx.x * blockDim.x; + const int col_start = blockIdx.y * blockDim.y; + + const int row_size = min(n_boxes1 - row_start, blockDim.x); + const int col_size = min(n_boxes2 - col_start, blockDim.y); + + __shared__ float block_boxes1[BLOCK_DIM_X * 5]; + __shared__ float block_boxes2[BLOCK_DIM_Y * 5]; + + // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y + if (threadIdx.x < row_size && threadIdx.y == 0) { + block_boxes1[threadIdx.x * 5 + 0] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 0]; + block_boxes1[threadIdx.x * 5 + 1] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 1]; + block_boxes1[threadIdx.x * 5 + 2] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 2]; + block_boxes1[threadIdx.x * 5 + 3] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 3]; + block_boxes1[threadIdx.x * 5 + 4] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 4]; + } + + if (threadIdx.x < col_size && threadIdx.y == 0) { + block_boxes2[threadIdx.x * 5 + 0] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 0]; + block_boxes2[threadIdx.x * 5 + 1] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 1]; + block_boxes2[threadIdx.x * 5 + 2] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 2]; + block_boxes2[threadIdx.x * 5 + 3] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 3]; + block_boxes2[threadIdx.x * 5 + 4] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size && threadIdx.y < col_size) { + int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y; + dev_ious[offset] = single_box_iou_rotated( + block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5); + } +} + +at::Tensor box_iou_rotated_cuda( + // input must be contiguous + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + using scalar_t = float; + AT_ASSERTM( + boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor"); + AT_ASSERTM( + boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor"); + AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor"); + AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(boxes1.device()); + + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + bool transpose = false; + if (num_boxes1 > 0 && num_boxes2 > 0) { + scalar_t *data1 = boxes1.data_ptr(), + *data2 = boxes2.data_ptr(); + + if (num_boxes2 > 65535 * BLOCK_DIM_Y) { + AT_ASSERTM( + num_boxes1 <= 65535 * BLOCK_DIM_Y, + "Too many boxes for box_iou_rotated_cuda!"); + // x dim is allowed to be large, but y dim cannot, + // so we transpose the two to avoid "invalid configuration argument" + // error. We assume one of them is small. Otherwise the result is hard to + // fit in memory anyway. + std::swap(num_boxes1, num_boxes2); + std::swap(data1, data2); + transpose = true; + } + + const int blocks_x = + at::cuda::ATenCeilDiv(static_cast(num_boxes1), BLOCK_DIM_X); + const int blocks_y = + at::cuda::ATenCeilDiv(static_cast(num_boxes2), BLOCK_DIM_Y); + + dim3 blocks(blocks_x, blocks_y); + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + box_iou_rotated_cuda_kernel<<>>( + num_boxes1, + num_boxes2, + data1, + data2, + (scalar_t*)ious.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + } + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + if (transpose) { + return ious.view(shape).t(); + } else { + return ious.view(shape); + } +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..b54a5dde2ca11a74d29c4d8adb7fe1634f5baf9c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h @@ -0,0 +1,370 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once + +#include +#include + +#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 +// Designates functions callable from the host (CPU) and the device (GPU) +#define HOST_DEVICE __host__ __device__ +#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ +#else +#include +#define HOST_DEVICE +#define HOST_DEVICE_INLINE HOST_DEVICE inline +#endif + +namespace detectron2 { + +namespace { + +template +struct RotatedBox { + T x_ctr, y_ctr, w, h, a; +}; + +template +struct Point { + T x, y; + HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} + HOST_DEVICE_INLINE Point operator+(const Point& p) const { + return Point(x + p.x, y + p.y); + } + HOST_DEVICE_INLINE Point& operator+=(const Point& p) { + x += p.x; + y += p.y; + return *this; + } + HOST_DEVICE_INLINE Point operator-(const Point& p) const { + return Point(x - p.x, y - p.y); + } + HOST_DEVICE_INLINE Point operator*(const T coeff) const { + return Point(x * coeff, y * coeff); + } +}; + +template +HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { + return A.x * B.x + A.y * B.y; +} + +// R: result type. can be different from input type +template +HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) { + return static_cast(A.x) * static_cast(B.y) - + static_cast(B.x) * static_cast(A.y); +} + +template +HOST_DEVICE_INLINE void get_rotated_vertices( + const RotatedBox& box, + Point (&pts)[4]) { + // M_PI / 180. == 0.01745329251 + double theta = box.a * 0.01745329251; + T cosTheta2 = (T)cos(theta) * 0.5f; + T sinTheta2 = (T)sin(theta) * 0.5f; + + // y: top --> down; x: left --> right + pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w; + pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; + pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w; + pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; + pts[2].x = 2 * box.x_ctr - pts[0].x; + pts[2].y = 2 * box.y_ctr - pts[0].y; + pts[3].x = 2 * box.x_ctr - pts[1].x; + pts[3].y = 2 * box.y_ctr - pts[1].y; +} + +template +HOST_DEVICE_INLINE int get_intersection_points( + const Point (&pts1)[4], + const Point (&pts2)[4], + Point (&intersections)[24]) { + // Line vector + // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] + Point vec1[4], vec2[4]; + for (int i = 0; i < 4; i++) { + vec1[i] = pts1[(i + 1) % 4] - pts1[i]; + vec2[i] = pts2[(i + 1) % 4] - pts2[i]; + } + + // When computing the intersection area, it doesn't hurt if we have + // more (duplicated/approximate) intersections/vertices than needed, + // while it can cause drastic difference if we miss an intersection/vertex. + // Therefore, we add an epsilon to relax the comparisons between + // the float point numbers that decide the intersection points. + double EPS = 1e-5; + + // Line test - test all line combos for intersection + int num = 0; // number of intersections + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + // Solve for 2x2 Ax=b + T det = cross_2d(vec2[j], vec1[i]); + + // This takes care of parallel lines + if (fabs(det) <= 1e-14) { + continue; + } + + auto vec12 = pts2[j] - pts1[i]; + + T t1 = cross_2d(vec2[j], vec12) / det; + T t2 = cross_2d(vec1[i], vec12) / det; + + if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) { + intersections[num++] = pts1[i] + vec1[i] * t1; + } + } + } + + // Check for vertices of rect1 inside rect2 + { + const auto& AB = vec2[0]; + const auto& DA = vec2[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + // assume ABCD is the rectangle, and P is the point to be judged + // P is inside ABCD iff. P's projection on AB lies within AB + // and P's projection on AD lies within AD + + auto AP = pts1[i] - pts2[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && + (APdotAD < ADdotAD + EPS)) { + intersections[num++] = pts1[i]; + } + } + } + + // Reverse the check - check for vertices of rect2 inside rect1 + { + const auto& AB = vec1[0]; + const auto& DA = vec1[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + auto AP = pts2[i] - pts1[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && + (APdotAD < ADdotAD + EPS)) { + intersections[num++] = pts2[i]; + } + } + } + + return num; +} + +template +HOST_DEVICE_INLINE int convex_hull_graham( + const Point (&p)[24], + const int& num_in, + Point (&q)[24], + bool shift_to_zero = false) { + assert(num_in >= 2); + + // Step 1: + // Find point with minimum y + // if more than 1 points have the same minimum y, + // pick the one with the minimum x. + int t = 0; + for (int i = 1; i < num_in; i++) { + if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { + t = i; + } + } + auto& start = p[t]; // starting point + + // Step 2: + // Subtract starting point from every points (for sorting in the next step) + for (int i = 0; i < num_in; i++) { + q[i] = p[i] - start; + } + + // Swap the starting point to position 0 + auto tmp = q[0]; + q[0] = q[t]; + q[t] = tmp; + + // Step 3: + // Sort point 1 ~ num_in according to their relative cross-product values + // (essentially sorting according to angles) + // If the angles are the same, sort according to their distance to origin + T dist[24]; +#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 + // compute distance to origin before sort, and sort them together with the + // points + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + + // CUDA version + // In the future, we can potentially use thrust + // for sorting here to improve speed (though not guaranteed) + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } +#else + // CPU version + std::sort( + q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { + T temp = cross_2d(A, B); + if (fabs(temp) < 1e-6) { + return dot_2d(A, A) < dot_2d(B, B); + } else { + return temp > 0; + } + }); + // compute distance to origin after sort, since the points are now different. + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } +#endif + + // Step 4: + // Make sure there are at least 2 points (that don't overlap with each other) + // in the stack + int k; // index of the non-overlapped second point + for (k = 1; k < num_in; k++) { + if (dist[k] > 1e-8) { + break; + } + } + if (k == num_in) { + // We reach the end, which means the convex hull is just one point + q[0] = p[t]; + return 1; + } + q[1] = q[k]; + int m = 2; // 2 points in the stack + // Step 5: + // Finally we can start the scanning process. + // When a non-convex relationship between the 3 points is found + // (either concave shape or duplicated points), + // we pop the previous point from the stack + // until the 3-point relationship is convex again, or + // until the stack only contains two points + for (int i = k + 1; i < num_in; i++) { + while (m > 1) { + auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2]; + // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) - + // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we + // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means + // round to nearest floating point). + if (q1.x * q2.y >= q2.x * q1.y) + m--; + else + break; + } + // Using double also helps, but float can solve the issue for now. + // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) + // >= 0) { + // m--; + // } + q[m++] = q[i]; + } + + // Step 6 (Optional): + // In general sense we need the original coordinates, so we + // need to shift the points back (reverting Step 2) + // But if we're only interested in getting the area/perimeter of the shape + // We can simply return. + if (!shift_to_zero) { + for (int i = 0; i < m; i++) { + q[i] += start; + } + } + + return m; +} + +template +HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { + if (m <= 2) { + return 0; + } + + T area = 0; + for (int i = 1; i < m - 1; i++) { + area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); + } + + return area / 2.0; +} + +template +HOST_DEVICE_INLINE T rotated_boxes_intersection( + const RotatedBox& box1, + const RotatedBox& box2) { + // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned + // from rotated_rect_intersection_pts + Point intersectPts[24], orderedPts[24]; + + Point pts1[4]; + Point pts2[4]; + get_rotated_vertices(box1, pts1); + get_rotated_vertices(box2, pts2); + + int num = get_intersection_points(pts1, pts2, intersectPts); + + if (num <= 2) { + return 0.0; + } + + // Convex Hull to order the intersection points in clockwise order and find + // the contour area. + int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); + return polygon_area(orderedPts, num_convex); +} + +} // namespace + +template +HOST_DEVICE_INLINE T +single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { + // shift center to the middle point to achieve higher precision in result + RotatedBox box1, box2; + auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; + auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; + box1.x_ctr = box1_raw[0] - center_shift_x; + box1.y_ctr = box1_raw[1] - center_shift_y; + box1.w = box1_raw[2]; + box1.h = box1_raw[3]; + box1.a = box1_raw[4]; + box2.x_ctr = box2_raw[0] - center_shift_x; + box2.y_ctr = box2_raw[1] - center_shift_y; + box2.w = box2_raw[2]; + box2.h = box2_raw[3]; + box2.a = box2_raw[4]; + + T area1 = box1.w * box1.h; + T area2 = box2.w * box2.h; + if (area1 < 1e-14 || area2 < 1e-14) { + return 0.f; + } + + T intersection = rotated_boxes_intersection(box1, box2); + T iou = intersection / (area1 + area2 - intersection); + return iou; +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cocoeval/cocoeval.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cocoeval/cocoeval.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0a5b7b907c06720fefc77b0dfd921b8ec3ecf2be --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cocoeval/cocoeval.cpp @@ -0,0 +1,507 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "cocoeval.h" +#include +#include +#include +#include + +using namespace pybind11::literals; + +namespace detectron2 { + +namespace COCOeval { + +// Sort detections from highest score to lowest, such that +// detection_instances[detection_sorted_indices[t]] >= +// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match +// original COCO API +void SortInstancesByDetectionScore( + const std::vector& detection_instances, + std::vector* detection_sorted_indices) { + detection_sorted_indices->resize(detection_instances.size()); + std::iota( + detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); + std::stable_sort( + detection_sorted_indices->begin(), + detection_sorted_indices->end(), + [&detection_instances](size_t j1, size_t j2) { + return detection_instances[j1].score > detection_instances[j2].score; + }); +} + +// Partition the ground truth objects based on whether or not to ignore them +// based on area +void SortInstancesByIgnore( + const std::array& area_range, + const std::vector& ground_truth_instances, + std::vector* ground_truth_sorted_indices, + std::vector* ignores) { + ignores->clear(); + ignores->reserve(ground_truth_instances.size()); + for (auto o : ground_truth_instances) { + ignores->push_back( + o.ignore || o.area < area_range[0] || o.area > area_range[1]); + } + + ground_truth_sorted_indices->resize(ground_truth_instances.size()); + std::iota( + ground_truth_sorted_indices->begin(), + ground_truth_sorted_indices->end(), + 0); + std::stable_sort( + ground_truth_sorted_indices->begin(), + ground_truth_sorted_indices->end(), + [&ignores](size_t j1, size_t j2) { + return (int)(*ignores)[j1] < (int)(*ignores)[j2]; + }); +} + +// For each IOU threshold, greedily match each detected instance to a ground +// truth instance (if possible) and store the results +void MatchDetectionsToGroundTruth( + const std::vector& detection_instances, + const std::vector& detection_sorted_indices, + const std::vector& ground_truth_instances, + const std::vector& ground_truth_sorted_indices, + const std::vector& ignores, + const std::vector>& ious, + const std::vector& iou_thresholds, + const std::array& area_range, + ImageEvaluation* results) { + // Initialize memory to store return data matches and ignore + const int num_iou_thresholds = iou_thresholds.size(); + const int num_ground_truth = ground_truth_sorted_indices.size(); + const int num_detections = detection_sorted_indices.size(); + std::vector ground_truth_matches( + num_iou_thresholds * num_ground_truth, 0); + std::vector& detection_matches = results->detection_matches; + std::vector& detection_ignores = results->detection_ignores; + std::vector& ground_truth_ignores = results->ground_truth_ignores; + detection_matches.resize(num_iou_thresholds * num_detections, 0); + detection_ignores.resize(num_iou_thresholds * num_detections, false); + ground_truth_ignores.resize(num_ground_truth); + for (auto g = 0; g < num_ground_truth; ++g) { + ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; + } + + for (auto t = 0; t < num_iou_thresholds; ++t) { + for (auto d = 0; d < num_detections; ++d) { + // information about best match so far (match=-1 -> unmatched) + double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); + int match = -1; + for (auto g = 0; g < num_ground_truth; ++g) { + // if this ground truth instance is already matched and not a + // crowd, it cannot be matched to another detection + if (ground_truth_matches[t * num_ground_truth + g] > 0 && + !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { + continue; + } + + // if detected instance matched to a regular ground truth + // instance, we can break on the first ground truth instance + // tagged as ignore (because they are sorted by the ignore tag) + if (match >= 0 && !ground_truth_ignores[match] && + ground_truth_ignores[g]) { + break; + } + + // if IOU overlap is the best so far, store the match appropriately + if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { + best_iou = ious[d][ground_truth_sorted_indices[g]]; + match = g; + } + } + // if match was made, store id of match for both detection and + // ground truth + if (match >= 0) { + detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; + detection_matches[t * num_detections + d] = + ground_truth_instances[ground_truth_sorted_indices[match]].id; + ground_truth_matches[t * num_ground_truth + match] = + detection_instances[detection_sorted_indices[d]].id; + } + + // set unmatched detections outside of area range to ignore + const InstanceAnnotation& detection = + detection_instances[detection_sorted_indices[d]]; + detection_ignores[t * num_detections + d] = + detection_ignores[t * num_detections + d] || + (detection_matches[t * num_detections + d] == 0 && + (detection.area < area_range[0] || detection.area > area_range[1])); + } + } + + // store detection score results + results->detection_scores.resize(detection_sorted_indices.size()); + for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { + results->detection_scores[d] = + detection_instances[detection_sorted_indices[d]].score; + } +} + +std::vector EvaluateImages( + const std::vector>& area_ranges, + int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const ImageCategoryInstances& + image_category_ground_truth_instances, + const ImageCategoryInstances& + image_category_detection_instances) { + const int num_area_ranges = area_ranges.size(); + const int num_images = image_category_ground_truth_instances.size(); + const int num_categories = + image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; + std::vector detection_sorted_indices; + std::vector ground_truth_sorted_indices; + std::vector ignores; + std::vector results_all( + num_images * num_area_ranges * num_categories); + + // Store results for each image, category, and area range combination. Results + // for each IOU threshold are packed into the same ImageEvaluation object + for (auto i = 0; i < num_images; ++i) { + for (auto c = 0; c < num_categories; ++c) { + const std::vector& ground_truth_instances = + image_category_ground_truth_instances[i][c]; + const std::vector& detection_instances = + image_category_detection_instances[i][c]; + + SortInstancesByDetectionScore( + detection_instances, &detection_sorted_indices); + if ((int)detection_sorted_indices.size() > max_detections) { + detection_sorted_indices.resize(max_detections); + } + + for (size_t a = 0; a < area_ranges.size(); ++a) { + SortInstancesByIgnore( + area_ranges[a], + ground_truth_instances, + &ground_truth_sorted_indices, + &ignores); + + MatchDetectionsToGroundTruth( + detection_instances, + detection_sorted_indices, + ground_truth_instances, + ground_truth_sorted_indices, + ignores, + image_category_ious[i][c], + iou_thresholds, + area_ranges[a], + &results_all + [c * num_area_ranges * num_images + a * num_images + i]); + } + } + } + + return results_all; +} + +// Convert a python list to a vector +template +std::vector list_to_vec(const py::list& l) { + std::vector v(py::len(l)); + for (int i = 0; i < (int)py::len(l); ++i) { + v[i] = l[i].cast(); + } + return v; +} + +// Helper function to Accumulate() +// Considers the evaluation results applicable to a particular category, area +// range, and max_detections parameter setting, which begin at +// evaluations[evaluation_index]. Extracts a sorted list of length n of all +// applicable detection instances concatenated across all images in the dataset, +// which are represented by the outputs evaluation_indices, detection_scores, +// image_detection_indices, and detection_sorted_indices--all of which are +// length n. evaluation_indices[i] stores the applicable index into +// evaluations[] for instance i, which has detection score detection_score[i], +// and is the image_detection_indices[i]'th of the list of detections +// for the image containing i. detection_sorted_indices[] defines a sorted +// permutation of the 3 other outputs +int BuildSortedDetectionList( + const std::vector& evaluations, + const int64_t evaluation_index, + const int64_t num_images, + const int max_detections, + std::vector* evaluation_indices, + std::vector* detection_scores, + std::vector* detection_sorted_indices, + std::vector* image_detection_indices) { + assert(evaluations.size() >= evaluation_index + num_images); + + // Extract a list of object instances of the applicable category, area + // range, and max detections requirements such that they can be sorted + image_detection_indices->clear(); + evaluation_indices->clear(); + detection_scores->clear(); + image_detection_indices->reserve(num_images * max_detections); + evaluation_indices->reserve(num_images * max_detections); + detection_scores->reserve(num_images * max_detections); + int num_valid_ground_truth = 0; + for (auto i = 0; i < num_images; ++i) { + const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; + + for (int d = 0; + d < (int)evaluation.detection_scores.size() && d < max_detections; + ++d) { // detected instances + evaluation_indices->push_back(evaluation_index + i); + image_detection_indices->push_back(d); + detection_scores->push_back(evaluation.detection_scores[d]); + } + for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { + if (!ground_truth_ignore) { + ++num_valid_ground_truth; + } + } + } + + // Sort detections by decreasing score, using stable sort to match + // python implementation + detection_sorted_indices->resize(detection_scores->size()); + std::iota( + detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); + std::stable_sort( + detection_sorted_indices->begin(), + detection_sorted_indices->end(), + [&detection_scores](size_t j1, size_t j2) { + return (*detection_scores)[j1] > (*detection_scores)[j2]; + }); + + return num_valid_ground_truth; +} + +// Helper function to Accumulate() +// Compute a precision recall curve given a sorted list of detected instances +// encoded in evaluations, evaluation_indices, detection_scores, +// detection_sorted_indices, image_detection_indices (see +// BuildSortedDetectionList()). Using vectors precisions and recalls +// and temporary storage, output the results into precisions_out, recalls_out, +// and scores_out, which are large buffers containing many precion/recall curves +// for all possible parameter settings, with precisions_out_index and +// recalls_out_index defining the applicable indices to store results. +void ComputePrecisionRecallCurve( + const int64_t precisions_out_index, + const int64_t precisions_out_stride, + const int64_t recalls_out_index, + const std::vector& recall_thresholds, + const int iou_threshold_index, + const int num_iou_thresholds, + const int num_valid_ground_truth, + const std::vector& evaluations, + const std::vector& evaluation_indices, + const std::vector& detection_scores, + const std::vector& detection_sorted_indices, + const std::vector& image_detection_indices, + std::vector* precisions, + std::vector* recalls, + std::vector* precisions_out, + std::vector* scores_out, + std::vector* recalls_out) { + assert(recalls_out->size() > recalls_out_index); + + // Compute precision/recall for each instance in the sorted list of detections + int64_t true_positives_sum = 0, false_positives_sum = 0; + precisions->clear(); + recalls->clear(); + precisions->reserve(detection_sorted_indices.size()); + recalls->reserve(detection_sorted_indices.size()); + assert(!evaluations.empty() || detection_sorted_indices.empty()); + for (auto detection_sorted_index : detection_sorted_indices) { + const ImageEvaluation& evaluation = + evaluations[evaluation_indices[detection_sorted_index]]; + const auto num_detections = + evaluation.detection_matches.size() / num_iou_thresholds; + const auto detection_index = iou_threshold_index * num_detections + + image_detection_indices[detection_sorted_index]; + assert(evaluation.detection_matches.size() > detection_index); + assert(evaluation.detection_ignores.size() > detection_index); + const int64_t detection_match = + evaluation.detection_matches[detection_index]; + const bool detection_ignores = + evaluation.detection_ignores[detection_index]; + const auto true_positive = detection_match > 0 && !detection_ignores; + const auto false_positive = detection_match == 0 && !detection_ignores; + if (true_positive) { + ++true_positives_sum; + } + if (false_positive) { + ++false_positives_sum; + } + + const double recall = + static_cast(true_positives_sum) / num_valid_ground_truth; + recalls->push_back(recall); + const int64_t num_valid_detections = + true_positives_sum + false_positives_sum; + const double precision = num_valid_detections > 0 + ? static_cast(true_positives_sum) / num_valid_detections + : 0.0; + precisions->push_back(precision); + } + + (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; + + for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { + if ((*precisions)[i] > (*precisions)[i - 1]) { + (*precisions)[i - 1] = (*precisions)[i]; + } + } + + // Sample the per instance precision/recall list at each recall threshold + for (size_t r = 0; r < recall_thresholds.size(); ++r) { + // first index in recalls >= recall_thresholds[r] + std::vector::iterator low = std::lower_bound( + recalls->begin(), recalls->end(), recall_thresholds[r]); + size_t precisions_index = low - recalls->begin(); + + const auto results_ind = precisions_out_index + r * precisions_out_stride; + assert(results_ind < precisions_out->size()); + assert(results_ind < scores_out->size()); + if (precisions_index < precisions->size()) { + (*precisions_out)[results_ind] = (*precisions)[precisions_index]; + (*scores_out)[results_ind] = + detection_scores[detection_sorted_indices[precisions_index]]; + } else { + (*precisions_out)[results_ind] = 0; + (*scores_out)[results_ind] = 0; + } + } +} +py::dict Accumulate( + const py::object& params, + const std::vector& evaluations) { + const std::vector recall_thresholds = + list_to_vec(params.attr("recThrs")); + const std::vector max_detections = + list_to_vec(params.attr("maxDets")); + const int num_iou_thresholds = py::len(params.attr("iouThrs")); + const int num_recall_thresholds = py::len(params.attr("recThrs")); + const int num_categories = params.attr("useCats").cast() == 1 + ? py::len(params.attr("catIds")) + : 1; + const int num_area_ranges = py::len(params.attr("areaRng")); + const int num_max_detections = py::len(params.attr("maxDets")); + const int num_images = py::len(params.attr("imgIds")); + + std::vector precisions_out( + num_iou_thresholds * num_recall_thresholds * num_categories * + num_area_ranges * num_max_detections, + -1); + std::vector recalls_out( + num_iou_thresholds * num_categories * num_area_ranges * + num_max_detections, + -1); + std::vector scores_out( + num_iou_thresholds * num_recall_thresholds * num_categories * + num_area_ranges * num_max_detections, + -1); + + // Consider the list of all detected instances in the entire dataset in one + // large list. evaluation_indices, detection_scores, + // image_detection_indices, and detection_sorted_indices all have the same + // length as this list, such that each entry corresponds to one detected + // instance + std::vector evaluation_indices; // indices into evaluations[] + std::vector detection_scores; // detection scores of each instance + std::vector detection_sorted_indices; // sorted indices of all + // instances in the dataset + std::vector + image_detection_indices; // indices into the list of detected instances in + // the same image as each instance + std::vector precisions, recalls; + + for (auto c = 0; c < num_categories; ++c) { + for (auto a = 0; a < num_area_ranges; ++a) { + for (auto m = 0; m < num_max_detections; ++m) { + // The COCO PythonAPI assumes evaluations[] (the return value of + // COCOeval::EvaluateImages() is one long list storing results for each + // combination of category, area range, and image id, with categories in + // the outermost loop and images in the innermost loop. + const int64_t evaluations_index = + c * num_area_ranges * num_images + a * num_images; + int num_valid_ground_truth = BuildSortedDetectionList( + evaluations, + evaluations_index, + num_images, + max_detections[m], + &evaluation_indices, + &detection_scores, + &detection_sorted_indices, + &image_detection_indices); + + if (num_valid_ground_truth == 0) { + continue; + } + + for (auto t = 0; t < num_iou_thresholds; ++t) { + // recalls_out is a flattened vectors representing a + // num_iou_thresholds X num_categories X num_area_ranges X + // num_max_detections matrix + const int64_t recalls_out_index = + t * num_categories * num_area_ranges * num_max_detections + + c * num_area_ranges * num_max_detections + + a * num_max_detections + m; + + // precisions_out and scores_out are flattened vectors + // representing a num_iou_thresholds X num_recall_thresholds X + // num_categories X num_area_ranges X num_max_detections matrix + const int64_t precisions_out_stride = + num_categories * num_area_ranges * num_max_detections; + const int64_t precisions_out_index = t * num_recall_thresholds * + num_categories * num_area_ranges * num_max_detections + + c * num_area_ranges * num_max_detections + + a * num_max_detections + m; + + ComputePrecisionRecallCurve( + precisions_out_index, + precisions_out_stride, + recalls_out_index, + recall_thresholds, + t, + num_iou_thresholds, + num_valid_ground_truth, + evaluations, + evaluation_indices, + detection_scores, + detection_sorted_indices, + image_detection_indices, + &precisions, + &recalls, + &precisions_out, + &scores_out, + &recalls_out); + } + } + } + } + + time_t rawtime; + struct tm local_time; + std::array buffer; + time(&rawtime); +#ifdef _WIN32 + localtime_s(&local_time, &rawtime); +#else + localtime_r(&rawtime, &local_time); +#endif + strftime( + buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); + return py::dict( + "params"_a = params, + "counts"_a = std::vector( + {num_iou_thresholds, + num_recall_thresholds, + num_categories, + num_area_ranges, + num_max_detections}), + "date"_a = buffer, + "precision"_a = precisions_out, + "recall"_a = recalls_out, + "scores"_a = scores_out); +} + +} // namespace COCOeval + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cocoeval/cocoeval.h b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cocoeval/cocoeval.h new file mode 100644 index 0000000000000000000000000000000000000000..db246e49a026b7cd989b305f4d3d98100be3c912 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cocoeval/cocoeval.h @@ -0,0 +1,88 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once + +#include +#include +#include +#include +#include + +namespace py = pybind11; + +namespace detectron2 { + +namespace COCOeval { + +// Annotation data for a single object instance in an image +struct InstanceAnnotation { + InstanceAnnotation( + uint64_t id, + double score, + double area, + bool is_crowd, + bool ignore) + : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {} + uint64_t id; + double score = 0.; + double area = 0.; + bool is_crowd = false; + bool ignore = false; +}; + +// Stores intermediate results for evaluating detection results for a single +// image that has D detected instances and G ground truth instances. This stores +// matches between detected and ground truth instances +struct ImageEvaluation { + // For each of the D detected instances, the id of the matched ground truth + // instance, or 0 if unmatched + std::vector detection_matches; + + // The detection score of each of the D detected instances + std::vector detection_scores; + + // Marks whether or not each of G instances was ignored from evaluation (e.g., + // because it's outside area_range) + std::vector ground_truth_ignores; + + // Marks whether or not each of D instances was ignored from evaluation (e.g., + // because it's outside aRng) + std::vector detection_ignores; +}; + +template +using ImageCategoryInstances = std::vector>>; + +// C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each +// combination of image, category, area range settings, and IOU thresholds to +// evaluate, it matches detected instances to ground truth instances and stores +// the results into a vector of ImageEvaluation results, which will be +// interpreted by the COCOeval::Accumulate() function to produce precion-recall +// curves. The parameters of nested vectors have the following semantics: +// image_category_ious[i][c][d][g] is the intersection over union of the d'th +// detected instance and g'th ground truth instance of +// category category_ids[c] in image image_ids[i] +// image_category_ground_truth_instances[i][c] is a vector of ground truth +// instances in image image_ids[i] of category category_ids[c] +// image_category_detection_instances[i][c] is a vector of detected +// instances in image image_ids[i] of category category_ids[c] +std::vector EvaluateImages( + const std::vector>& area_ranges, // vector of 2-tuples + int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const ImageCategoryInstances& + image_category_ground_truth_instances, + const ImageCategoryInstances& + image_category_detection_instances); + +// C++ implementation of COCOeval.accumulate(), which generates precision +// recall curves for each set of category, IOU threshold, detection area range, +// and max number of detections parameters. It is assumed that the parameter +// evaluations is the return value of the functon COCOeval::EvaluateImages(), +// which was called with the same parameter settings params +py::dict Accumulate( + const py::object& params, + const std::vector& evalutations); + +} // namespace COCOeval +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cuda_version.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cuda_version.cu new file mode 100644 index 0000000000000000000000000000000000000000..6dfe1b90c1f65c443681813fd3e3386c9faa3360 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/cuda_version.cu @@ -0,0 +1,26 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include + +namespace detectron2 { +int get_cudart_version() { +// Not a ROCM platform: Either HIP is not used, or +// it is used, but platform is not ROCM (i.e. it is CUDA) +#if !defined(__HIP_PLATFORM_HCC__) + return CUDART_VERSION; +#else + int version = 0; + +#if HIP_VERSION_MAJOR != 0 + // Create a convention similar to that of CUDA, as assumed by other + // parts of the code. + + version = HIP_VERSION_MINOR; + version += (HIP_VERSION_MAJOR * 100); +#else + hipRuntimeGetVersion(&version); +#endif + return version; +#endif +} +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv.h b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..965c1bfd47b58f9802d1c3fd69a5962517b2da61 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv.h @@ -0,0 +1,377 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step); + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias); + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias); + +#endif + +inline int deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_forward_cuda( + input, + weight, + offset, + output, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline int deform_conv_backward_input( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (gradOutput.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_input_cuda( + input, + offset, + gradOutput, + gradInput, + gradOffset, + weight, + columns, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline int deform_conv_backward_filter( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + if (gradOutput.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_parameters_cuda( + input, + offset, + gradOutput, + gradWeight, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + scale, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline void modulated_deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_forward( + input, + weight, + bias, + ones, + offset, + mask, + output, + columns, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline void modulated_deform_conv_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + if (grad_output.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_backward( + input, + weight, + bias, + ones, + offset, + mask, + columns, + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv_cuda.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..2072bb856ec40b61c3826cead2fb7bb7c971a089 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv_cuda.cu @@ -0,0 +1,1223 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp +// Original license: Apache 2.0 + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c +// Original license: Apache 2.0 + +#include + +#include "deform_conv.h" + +#include +#include + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col); + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check( + at::Tensor input, + at::Tensor offset, + at::Tensor* gradOutput, + at::Tensor weight, + int kH, + int kW, + int dH, + int dW, + int padH, + int padW, + int dilationH, + int dilationW, + int group, + int deformable_group) { + TORCH_CHECK( + weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + TORCH_CHECK( + kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", + kH, + kW); + + TORCH_CHECK( + (weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", + kH, + kW, + weight.size(2), + weight.size(3)); + + TORCH_CHECK( + dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", + dH, + dW); + + TORCH_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, + dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + TORCH_CHECK( + ndim == 3 || ndim == 4, + "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + TORCH_CHECK( + nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, + inputHeight, + inputWidth, + nOutputPlane, + outputHeight, + outputWidth); + + TORCH_CHECK( + input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, + input.size(1)); + + TORCH_CHECK( + (inputHeight + 2 * padH >= kH && inputWidth + 2 * padW >= kW), + "input image is smaller than kernel"); + + TORCH_CHECK( + (offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, + outputWidth, + offset.size(2), + offset.size(3)); + + TORCH_CHECK( + (offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + TORCH_CHECK( + gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, + gradOutput->size(dimf)); + + TORCH_CHECK( + (gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, + outputWidth, + gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check( + input, + offset, + NULL, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + at::Tensor output_buffer = at::zeros( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), + group, + output_buffer.size(1) / group, + output_buffer.size(2), + output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), + output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), + output_buffer.size(4)}); + + output_buffer = output_buffer.view( + {batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + shape_check( + input, + offset, + &gradOutput, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + gradOffset = gradOffset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), + group, + gradOutput.size(1) / group, + gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), + gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4), + gradOutput.size(5)}); + + deformable_col2im_coord( + columns, + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradOffset[elt]); + + deformable_col2im( + columns, + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check( + input, + offset, + &gradOutput, + gradWeight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = gradOutputBuffer.view( + {batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + gradOutputBuffer.copy_(gradOutput); + // gradOutput is not contiguous, so we do reshape (instead of view) next + gradOutputBuffer = gradOutputBuffer.reshape( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + group, + gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), + gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = gradWeight.view( + {group, + gradWeight.size(0) / group, + gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_( + gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), + 1.0, + scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), + gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view( + {gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + shape_check( + input, + offset, + NULL, + weight, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group); + + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + // mask shape check + TORCH_CHECK( + (mask.size(2) == height_out && mask.size(3) == width_out), + "invalid spatial size of mask, expected height: %d width: %d, but " + "got height: %d width: %d", + height_out, + width_out, + mask.size(2), + mask.size(3)); + + TORCH_CHECK( + (mask.size(1) == deformable_group * kernel_h * kernel_w), + "invalid number of channels of mask"); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = at::zeros( + {channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view( + {output.size(0), + group, + output.size(1) / group, + output.size(2), + output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + // divide into group + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view( + {weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view( + {output.size(0), + output.size(1) * output.size(2), + output.size(3), + output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + shape_check( + input, + offset, + &grad_output, + weight, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group); + + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + // mask shape check + TORCH_CHECK( + (mask.size(2) == height_out && mask.size(3) == width_out), + "invalid spatial size of mask, expected height: %d width: %d, but " + "got height: %d width: %d", + height_out, + width_out, + mask.size(2), + mask.size(3)); + + TORCH_CHECK( + (mask.size(1) == deformable_group * kernel_h * kernel_w), + "invalid number of channels of mask"); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros( + {channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = grad_output.view( + {grad_output.size(0), + group, + grad_output.size(1) / group, + grad_output.size(2), + grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view( + {weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view( + {group, + grad_weight.size(0) / group, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view( + {grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view( + {grad_output.size(0) * grad_output.size(1), + grad_output.size(2), + grad_output.size(3), + grad_output.size(4)}); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..f299c7add116685e9c87a187a85ea63f9f808867 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu @@ -0,0 +1,1288 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu +// Original license: Apache 2.0 +// clang-format off + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +#include +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + + +namespace { + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) { + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +} + +template +__device__ scalar_t deformable_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const scalar_t map_h = i * dilation_h + offset_h; + // const scalar_t map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = deformable_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + + +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_); + })); +} + +} // namespace detectron2 + + +template +__device__ scalar_t dmcn_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const scalar_t* data_mask_ptr = data_mask + + (b_col * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const float map_h = i * dilation_h + offset_h; + // const float map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = dmcn_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + // data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset, + scalar_t* grad_mask) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } else { + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear( + data_im_ptr + cnt * height * width, + width, + height, + width, + inv_h, + inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask + [(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + + +namespace detectron2 { + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + data_mask_, + height_im, + width_im, + kernel_h, + kenerl_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_im2col_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + scalar_t* grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + 2 * kernel_h * kernel_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_, + grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_coord_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated.h b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated.h new file mode 100644 index 0000000000000000000000000000000000000000..12aca388e47b12dafd20999f2991a9d42f4b904b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated.h @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor nms_rotated_cpu( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor nms_rotated_cuda( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor nms_rotated( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + assert(dets.device().is_cuda() == scores.device().is_cuda()); + if (dets.device().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return nms_rotated_cuda( + dets.contiguous(), scores.contiguous(), iou_threshold); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + + return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d7556e645b604aa83d86cc702b783fd8ecedffcc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp @@ -0,0 +1,75 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#include "nms_rotated.h" + +namespace detectron2 { + +template +at::Tensor nms_rotated_cpu_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, + // however, the code in this function is much shorter because + // we delegate the IoU computation for rotated boxes to + // the single_box_iou_rotated function in box_iou_rotated_utils.h + AT_ASSERTM(dets.device().is_cpu(), "dets must be a CPU tensor"); + AT_ASSERTM(scores.device().is_cpu(), "scores must be a CPU tensor"); + AT_ASSERTM( + dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr(); + auto keep = keep_t.data_ptr(); + auto order = order_t.data_ptr(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) { + continue; + } + + keep[num_to_keep++] = i; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) { + continue; + } + + auto ovr = single_box_iou_rotated( + dets[i].data_ptr(), dets[j].data_ptr()); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + } + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor nms_rotated_cpu( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + auto result = at::empty({0}, dets.options()); + + AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] { + result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..2a3db5c62e7a2da52ccf5bac980653c943d630fd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu @@ -0,0 +1,145 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include +#ifdef WITH_CUDA +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#endif +// TODO avoid this when pytorch supports "same directory" hipification +#ifdef WITH_HIP +#include "box_iou_rotated/box_iou_rotated_utils.h" +#endif + +using namespace detectron2; + +namespace { +int const threadsPerBlock = sizeof(unsigned long long) * 8; +} + +template +__global__ void nms_rotated_cuda_kernel( + const int n_boxes, + const double iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask) { + // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 5) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +namespace detectron2 { + +at::Tensor nms_rotated_cuda( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + // using scalar_t = float; + AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); + AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(dets.device()); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto dets_sorted = dets.index_select(0, order_t); + + auto dets_num = dets.size(0); + + const int col_blocks = + at::cuda::ATenCeilDiv(static_cast(dets_num), threadsPerBlock); + + at::Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES( + dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { + nms_rotated_cuda_kernel<<>>( + dets_num, + iou_threshold, + dets_sorted.data_ptr(), + (unsigned long long*)mask.data_ptr()); + }); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data_ptr(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} + +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/vision.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/vision.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c9a2cd4f20e6f58be1c5783d67c64232dd59b560 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/csrc/vision.cpp @@ -0,0 +1,117 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include +#include "ROIAlignRotated/ROIAlignRotated.h" +#include "box_iou_rotated/box_iou_rotated.h" +#include "cocoeval/cocoeval.h" +#include "deformable/deform_conv.h" +#include "nms_rotated/nms_rotated.h" + +namespace detectron2 { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +extern int get_cudart_version(); +#endif + +std::string get_cuda_version() { +#if defined(WITH_CUDA) || defined(WITH_HIP) + std::ostringstream oss; + +#if defined(WITH_CUDA) + oss << "CUDA "; +#else + oss << "HIP "; +#endif + + // copied from + // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 + auto printCudaStyleVersion = [&](int v) { + oss << (v / 1000) << "." << (v / 10 % 100); + if (v % 10 != 0) { + oss << "." << (v % 10); + } + }; + printCudaStyleVersion(get_cudart_version()); + return oss.str(); +#else // neither CUDA nor HIP + return std::string("not available"); +#endif +} + +bool has_cuda() { +#if defined(WITH_CUDA) + return true; +#else + return false; +#endif +} + +// similar to +// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp +std::string get_compiler_version() { + std::ostringstream ss; +#if defined(__GNUC__) +#ifndef __clang__ + +#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) +#error "GCC >= 4.9 is required!" +#endif + + { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } +#endif +#endif + +#if defined(__clang_major__) + { + ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." + << __clang_patchlevel__; + } +#endif + +#if defined(_MSC_VER) + { ss << "MSVC " << _MSC_FULL_VER; } +#endif + return ss.str(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); + m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); + m.def("has_cuda", &has_cuda, "has_cuda"); + + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); + m.def( + "deform_conv_backward_input", + &deform_conv_backward_input, + "deform_conv_backward_input"); + m.def( + "deform_conv_backward_filter", + &deform_conv_backward_filter, + "deform_conv_backward_filter"); + m.def( + "modulated_deform_conv_forward", + &modulated_deform_conv_forward, + "modulated_deform_conv_forward"); + m.def( + "modulated_deform_conv_backward", + &modulated_deform_conv_backward, + "modulated_deform_conv_backward"); + + m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); + m.def( + "COCOevalEvaluateImages", + &COCOeval::EvaluateImages, + "COCOeval::EvaluateImages"); + pybind11::class_(m, "InstanceAnnotation") + .def(pybind11::init()); + pybind11::class_(m, "ImageEvaluation") + .def(pybind11::init<>()); +} + +TORCH_LIBRARY(detectron2, m) { + m.def("nms_rotated", &nms_rotated); + m.def("box_iou_rotated", &box_iou_rotated); + m.def("roi_align_rotated_forward", &ROIAlignRotated_forward); + m.def("roi_align_rotated_backward", &ROIAlignRotated_backward); +} +} // namespace detectron2 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/deform_conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/deform_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..9a8a039649bb71634c8da8fbcd7f295356ebadd5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/deform_conv.py @@ -0,0 +1,514 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from functools import lru_cache +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair +from torchvision.ops import deform_conv2d + +from custom_detectron2.utils.develop import create_dummy_class, create_dummy_func + +from .wrappers import _NewEmptyTensorOp + + +class _DeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64, + ): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim()) + ) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride) + ) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + # TODO: let torchvision support full features of our deformconv. + if deformable_groups != 1: + raise NotImplementedError( + "Deformable Conv with deformable_groups != 1 is not supported on CPUs!" + ) + return deform_conv2d( + input, offset, weight, stride=stride, padding=padding, dilation=dilation + ) + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + _C.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + _C.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + _C.deform_conv_backward_filter( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + 1, + cur_im2col_step, + ) + + return grad_input, grad_offset, grad_weight, None, None, None, None, None, None + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + "x".join(map(str, output_size)) + ) + ) + return output_size + + @staticmethod + @lru_cache(maxsize=128) + def _cal_im2col_step(input_size, default_size): + """ + Calculate proper im2col step size, which should be divisible by input_size and not larger + than prefer_size. Meanwhile the step size should be as large as possible to be more + efficient. So we choose the largest one among all divisors of input_size which are smaller + than prefer_size. + :param input_size: input batch size . + :param default_size: default preferred im2col step size. + :return: the largest proper step size. + """ + if input_size <= default_size: + return input_size + best_step = 1 + for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)): + if input_size % step == 0: + if input_size // step <= default_size: + return input_size // step + best_step = step + + return best_step + + +class _ModulatedDeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + ): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + if ( + weight.requires_grad + or mask.requires_grad + or offset.requires_grad + or input.requires_grad + ): + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + _C.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + _C.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + if not ctx.with_bias: + grad_bias = None + + return ( + grad_input, + grad_offset, + grad_mask, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + ) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = ( + height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1) + ) // ctx.stride + 1 + width_out = ( + width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1) + ) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = _DeformConv.apply +modulated_deform_conv = _ModulatedDeformConv.apply + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False, + norm=None, + activation=None, + ): + """ + Deformable convolution from :paper:`deformconv`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(DeformConv, self).__init__() + + assert not bias + assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( + in_channels, groups + ) + assert ( + out_channels % groups == 0 + ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) + ) + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + + def forward(self, x, offset): + if x.numel() == 0: + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = deform_conv( + x, + offset, + self.weight, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=False" + return tmpstr + + +class ModulatedDeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True, + norm=None, + activation=None, + ): + """ + Modulated deformable convolution from :paper:`deformconv2`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, x, offset, mask): + if x.numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = modulated_deform_conv( + x, + offset, + mask, + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr + + +try: + from custom_detectron2 import _C +except ImportError: + # TODO: register ops natively so there is no need to import _C. + _msg = "detectron2 is not compiled successfully, please build following the instructions!" + _args = ("detectron2._C", _msg) + DeformConv = create_dummy_class("DeformConv", *_args) + ModulatedDeformConv = create_dummy_class("ModulatedDeformConv", *_args) + deform_conv = create_dummy_func("deform_conv", *_args) + modulated_deform_conv = create_dummy_func("modulated_deform_conv", *_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/losses.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..850a852a2f0986d4d1ce89a526d96db42c76e44f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/losses.py @@ -0,0 +1,133 @@ +import math +import torch + + +def diou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + """ + Distance Intersection over Union Loss (Zhaohui Zheng et. al) + https://arxiv.org/abs/1911.08287 + Args: + boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # TODO: use torch._assert_async() when pytorch 1.8 support is dropped + assert (x2 >= x1).all(), "bad box: x1 larger than x2" + assert (y2 >= y1).all(), "bad box: y1 larger than y2" + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsct = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps + iou = intsct / union + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps + + # centers of boxes + x_p = (x2 + x1) / 2 + y_p = (y2 + y1) / 2 + x_g = (x1g + x2g) / 2 + y_g = (y1g + y2g) / 2 + distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) + + # Eqn. (7) + loss = 1 - iou + (distance / diag_len) + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss + + +def ciou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + """ + Complete Intersection over Union Loss (Zhaohui Zheng et. al) + https://arxiv.org/abs/1911.08287 + Args: + boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # TODO: use torch._assert_async() when pytorch 1.8 support is dropped + assert (x2 >= x1).all(), "bad box: x1 larger than x2" + assert (y2 >= y1).all(), "bad box: y1 larger than y2" + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsct = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps + iou = intsct / union + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps + + # centers of boxes + x_p = (x2 + x1) / 2 + y_p = (y2 + y1) / 2 + x_g = (x1g + x2g) / 2 + y_g = (y1g + y2g) / 2 + distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) + + # width and height of boxes + w_pred = x2 - x1 + h_pred = y2 - y1 + w_gt = x2g - x1g + h_gt = y2g - y1g + v = (4 / (math.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) + with torch.no_grad(): + alpha = v / (1 - iou + v + eps) + + # Eqn. (10) + loss = 1 - iou + (distance / diag_len) + alpha * v + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/mask_ops.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/mask_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..990d04abbb120e40fe07a21d024dfead471bc998 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/mask_ops.py @@ -0,0 +1,275 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Tuple +import torch +from PIL import Image +from torch.nn import functional as F + +__all__ = ["paste_masks_in_image"] + + +BYTES_PER_FLOAT = 4 +# TODO: This memory limit may be too much or too little. It would be better to +# determine it based on available resources. +GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit + + +def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True): + """ + Args: + masks: N, 1, H, W + boxes: N, 4 + img_h, img_w (int): + skip_empty (bool): only paste masks within the region that + tightly bound all boxes, and returns the results this region only. + An important optimization for CPU. + + Returns: + if skip_empty == False, a mask of shape (N, img_h, img_w) + if skip_empty == True, a mask of shape (N, h', w'), and the slice + object for the corresponding region. + """ + # On GPU, paste all masks together (up to chunk size) + # by using the entire image to sample the masks + # Compared to pasting them one by one, + # this has more operations but is faster on COCO-scale dataset. + device = masks.device + + if skip_empty and not torch.jit.is_scripting(): + x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( + dtype=torch.int32 + ) + x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) + y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) + else: + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 + + N = masks.shape[0] + + img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + # img_x, img_y have shapes (N, w), (N, h) + + gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) + gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) + grid = torch.stack([gx, gy], dim=3) + + if not torch.jit.is_scripting(): + if not masks.dtype.is_floating_point: + masks = masks.float() + img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False) + + if skip_empty and not torch.jit.is_scripting(): + return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) + else: + return img_masks[:, 0], () + + +# Annotate boxes as Tensor (but not Boxes) in order to use scripting +@torch.jit.script_if_tracing +def paste_masks_in_image( + masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5 +): + """ + Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Note: + This is a complicated but more accurate implementation. In actual deployment, it is + often enough to use a faster but less accurate implementation. + See :func:`paste_mask_in_image_old` in this file for an alternative implementation. + + Args: + masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of + detected object instances in the image and Hmask, Wmask are the mask width and mask + height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. + boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). + boxes[i] and masks[i] correspond to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + + assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" + N = len(masks) + if N == 0: + return masks.new_empty((0,) + image_shape, dtype=torch.uint8) + if not isinstance(boxes, torch.Tensor): + boxes = boxes.tensor + device = boxes.device + assert len(boxes) == N, boxes.shape + + img_h, img_w = image_shape + + # The actual implementation split the input into chunks, + # and paste them chunk by chunk. + if device.type == "cpu" or torch.jit.is_scripting(): + # CPU is most efficient when they are pasted one by one with skip_empty=True + # so that it performs minimal number of operations. + num_chunks = N + else: + # GPU benefits from parallelism for larger chunks, but may have memory issue + # int(img_h) because shape may be tensors in tracing + num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) + assert ( + num_chunks <= N + ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" + chunks = torch.chunk(torch.arange(N, device=device), num_chunks) + + img_masks = torch.zeros( + N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 + ) + for inds in chunks: + masks_chunk, spatial_inds = _do_paste_mask( + masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" + ) + + if threshold >= 0: + masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) + else: + # for visualization and debugging + masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) + + if torch.jit.is_scripting(): # Scripting does not use the optimized codepath + img_masks[inds] = masks_chunk + else: + img_masks[(inds,) + spatial_inds] = masks_chunk + return img_masks + + +# The below are the original paste function (from Detectron1) which has +# larger quantization error. +# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. + + +def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): + """ + Paste a single mask in an image. + This is a per-box implementation of :func:`paste_masks_in_image`. + This function has larger quantization error due to incorrect pixel + modeling and is not used any more. + + Args: + mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single + object instance. Values are in [0, 1]. + box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners + of the object instance. + img_h, img_w (int): Image height and width. + threshold (float): Mask binarization threshold in [0, 1]. + + Returns: + im_mask (Tensor): + The resized and binarized object mask pasted into the original + image plane (a tensor of shape (img_h, img_w)). + """ + # Conversion from continuous box coordinates to discrete pixel coordinates + # via truncation (cast to int32). This determines which pixels to paste the + # mask onto. + box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion + # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to + # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 + # pixels (not x1 - x0 pixels). + samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width + samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height + + # Resample the mask from it's original grid to the new samples_w x samples_h grid + mask = Image.fromarray(mask.cpu().numpy()) + mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) + mask = np.array(mask, copy=False) + + if threshold >= 0: + mask = np.array(mask > threshold, dtype=np.uint8) + mask = torch.from_numpy(mask) + else: + # for visualization and debugging, we also + # allow it to return an unmodified mask + mask = torch.from_numpy(mask * 255).to(torch.uint8) + + im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, img_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, img_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) + ] + return im_mask + + +# Our pixel modeling requires extrapolation for any continuous +# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, +# we would like this extrapolation to be an interpolation between boundary values and zero, +# instead of using absolute zero or boundary values. +# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: +# masks, scale = pad_masks(masks[:, 0, :, :], 1) +# boxes = scale_boxes(boxes.tensor, scale) + + +def pad_masks(masks, padding): + """ + Args: + masks (tensor): A tensor of shape (B, M, M) representing B masks. + padding (int): Number of cells to pad on all sides. + + Returns: + The padded masks and the scale factor of the padding size / original size. + """ + B = masks.shape[0] + M = masks.shape[-1] + pad2 = 2 * padding + scale = float(M + pad2) / M + padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) + padded_masks[:, padding:-padding, padding:-padding] = masks + return padded_masks, scale + + +def scale_boxes(boxes, scale): + """ + Args: + boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 + coords representing the corners x0, y0, x1, y1, + scale (float): The box scaling factor. + + Returns: + Scaled boxes. + """ + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + scaled_boxes = torch.zeros_like(boxes) + scaled_boxes[:, 0] = x_c - w_half + scaled_boxes[:, 2] = x_c + w_half + scaled_boxes[:, 1] = y_c - h_half + scaled_boxes[:, 3] = y_c + h_half + return scaled_boxes + + +@torch.jit.script_if_tracing +def _paste_masks_tensor_shape( + masks: torch.Tensor, + boxes: torch.Tensor, + image_shape: Tuple[torch.Tensor, torch.Tensor], + threshold: float = 0.5, +): + """ + A wrapper of paste_masks_in_image where image_shape is Tensor. + During tracing, shapes might be tensors instead of ints. The Tensor->int + conversion should be scripted rather than traced. + """ + return paste_masks_in_image(masks, boxes, (int(image_shape[0]), int(image_shape[1])), threshold) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/nms.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..1019e7f4c8c58f2def34a019e4c3a0573c5f69bb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/nms.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch +from torchvision.ops import boxes as box_ops +from torchvision.ops import nms # noqa . for compatibility + + +def batched_nms( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +): + """ + Same as torchvision.ops.boxes.batched_nms, but with float(). + """ + assert boxes.shape[-1] == 4 + # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311) + # to decide whether to use coordinate trick or for loop to implement batched_nms. So we + # just call it directly. + # Fp16 does not have enough range for batched NMS, so adding float(). + return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold) + + +# Note: this function (nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def nms_rotated(boxes: torch.Tensor, scores: torch.Tensor, iou_threshold: float): + """ + Performs non-maximum suppression (NMS) on the rotated boxes according + to their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as + RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they + can be representing completely different objects in certain tasks, e.g., OCR. + + As for the question of whether rotated-NMS should treat them as faraway boxes + even though their IOU is 1, it depends on the application and/or ground truth annotation. + + As an extreme example, consider a single character v and the square box around it. + + If the angle is 0 degree, the object (text) would be read as 'v'; + + If the angle is 90 degrees, the object (text) would become '>'; + + If the angle is 180 degrees, the object (text) would become '^'; + + If the angle is 270/-90 degrees, the object (text) would become '<' + + All of these cases have IoU of 1 to each other, and rotated NMS that only + uses IoU as criterion would only keep one of them with the highest score - + which, practically, still makes sense in most cases because typically + only one of theses orientations is the correct one. Also, it does not matter + as much if the box is only used to classify the object (instead of transcribing + them with a sequential OCR recognition model) later. + + On the other hand, when we use IoU to filter proposals that are close to the + ground truth during training, we should definitely take the angle into account if + we know the ground truth is labeled with the strictly correct orientation (as in, + upside-down words are annotated with -180 degrees even though they can be covered + with a 0/90/-90 degree box, etc.) + + The way the original dataset is annotated also matters. For example, if the dataset + is a 4-point polygon dataset that does not enforce ordering of vertices/orientation, + we can estimate a minimum rotated bounding box to this polygon, but there's no way + we can tell the correct angle with 100% confidence (as shown above, there could be 4 different + rotated boxes, with angles differed by 90 degrees to each other, covering the exactly + same region). In that case we have to just use IoU to determine the box + proximity (as many detection benchmarks (even for text) do) unless there're other + assumptions we can make (like width is always larger than height, or the object is not + rotated by more than 90 degrees CCW/CW, etc.) + + In summary, not considering angles in rotated NMS seems to be a good option for now, + but we should be aware of its implications. + + Args: + boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in + (x_center, y_center, width, height, angle_degrees) format. + scores (Tensor[N]): Scores for each one of the rotated boxes + iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold + + Returns: + keep (Tensor): int64 tensor with the indices of the elements that have been kept + by Rotated NMS, sorted in decreasing order of scores + """ + return torch.ops.detectron2.nms_rotated(boxes, scores, iou_threshold) + + +# Note: this function (batched_nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future + + +@torch.jit.script_if_tracing +def batched_nms_rotated( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +): + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Args: + boxes (Tensor[N, 5]): + boxes where NMS will be performed. They + are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format + scores (Tensor[N]): + scores for each one of the boxes + idxs (Tensor[N]): + indices of the categories for each one of the boxes. + iou_threshold (float): + discards all overlapping boxes + with IoU < iou_threshold + + Returns: + Tensor: + int64 tensor with the indices of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + assert boxes.shape[-1] == 5 + + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + boxes = boxes.float() # fp16 does not have enough range for batched NMS + # Strategy: in order to perform NMS independently per class, + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + + # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate, + # which won't handle negative coordinates correctly. + # Here by using min_coordinate we can make sure the negative coordinates are + # correctly handled. + max_coordinate = ( + torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).max() + min_coordinate = ( + torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).min() + offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1) + boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes + boxes_for_nms[:, :2] += offsets[:, None] + keep = nms_rotated(boxes_for_nms, scores, iou_threshold) + return keep diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/roi_align.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..163462e1f194e1e4100da92d76d9516f7cc22e35 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/roi_align.py @@ -0,0 +1,74 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from torch import nn +from torchvision.ops import roi_align + + +# NOTE: torchvision's RoIAlign has a different default aligned=False +class ROIAlign(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + aligned (bool): if False, use the legacy implementation in + Detectron. If True, align the results more perfectly. + + Note: + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). But the original + roi_align (aligned=False) does not subtract the 0.5 when computing neighboring + pixel indices and therefore it uses pixels with a slightly incorrect alignment + (relative to our pixel model) when performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; see + detectron2/tests/test_roi_align.py for verification. + + The difference does not make a difference to the model's performance if + ROIAlign is used together with conv layers. + """ + super().__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + from torchvision import __version__ + + version = tuple(int(x) for x in __version__.split(".")[:2]) + # https://github.com/pytorch/vision/pull/2438 + assert version >= (0, 7), "Require torchvision >= 0.7" + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + assert rois.dim() == 2 and rois.size(1) == 5 + if input.is_quantized: + input = input.dequantize() + return roi_align( + input, + rois.to(dtype=input.dtype), + self.output_size, + self.spatial_scale, + self.sampling_ratio, + self.aligned, + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ", aligned=" + str(self.aligned) + tmpstr += ")" + return tmpstr diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/roi_align_rotated.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/roi_align_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..2a523992e7c736262ad5a158f209aae7875f6f0b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/roi_align_rotated.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + + +class _ROIAlignRotated(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + output = torch.ops.detectron2.roi_align_rotated_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + (rois,) = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = torch.ops.detectron2.roi_align_rotated_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ) + return grad_input, None, None, None, None, None + + +roi_align_rotated = _ROIAlignRotated.apply + + +class ROIAlignRotated(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + + Note: + ROIAlignRotated supports continuous coordinate by default: + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). + """ + super(ROIAlignRotated, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx6 boxes. First column is the index into N. + The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees). + """ + assert rois.dim() == 2 and rois.size(1) == 6 + orig_dtype = input.dtype + if orig_dtype == torch.float16: + input = input.float() + rois = rois.float() + output_size = _pair(self.output_size) + + # Scripting for Autograd is currently unsupported. + # This is a quick fix without having to rewrite code on the C++ side + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return torch.ops.detectron2.roi_align_rotated_forward( + input, rois, self.spatial_scale, output_size[0], output_size[1], self.sampling_ratio + ).to(dtype=orig_dtype) + + return roi_align_rotated( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio + ).to(dtype=orig_dtype) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ")" + return tmpstr diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/rotated_boxes.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/rotated_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..03f73b3bb99275931a887ad9b2d8c0ac9f412bf3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/rotated_boxes.py @@ -0,0 +1,21 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import absolute_import, division, print_function, unicode_literals +import torch + + +def pairwise_iou_rotated(boxes1, boxes2): + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in + (x_center, y_center, width, height, angle) format. + + Arguments: + boxes1 (Tensor[N, 5]) + boxes2 (Tensor[M, 5]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/shape_spec.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/shape_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..8dac3c59b96576710656abebe9b5eac25868abbb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/shape_spec.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class ShapeSpec: + """ + A simple structure that contains basic shape specification about a tensor. + It is often used as the auxiliary inputs/outputs of models, + to complement the lack of shape inference ability among pytorch modules. + """ + + channels: Optional[int] = None + height: Optional[int] = None + width: Optional[int] = None + stride: Optional[int] = None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/wrappers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..3736f54335574a90d21e4812bedf982eae6ca681 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/layers/wrappers.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Wrappers around on some nn functions, mainly to support empty tensors. + +Ideally, add support directly in PyTorch to empty tensors in those functions. + +These can be removed once https://github.com/pytorch/pytorch/issues/12013 +is implemented +""" + +import warnings +from typing import List, Optional +import torch +from torch.nn import functional as F + +from custom_detectron2.utils.env import TORCH_VERSION + + +def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor: + """ + Turn a list of integer scalars or integer Tensor scalars into a vector, + in a way that's both traceable and scriptable. + + In tracing, `x` should be a list of scalar Tensor, so the output can trace to the inputs. + In scripting or eager, `x` should be a list of int. + """ + if torch.jit.is_scripting(): + return torch.as_tensor(x, device=device) + if torch.jit.is_tracing(): + assert all( + [isinstance(t, torch.Tensor) for t in x] + ), "Shape should be tensor during tracing!" + # as_tensor should not be used in tracing because it records a constant + ret = torch.stack(x) + if ret.device != device: # avoid recording a hard-coded device if not necessary + ret = ret.to(device=device) + return ret + return torch.as_tensor(x, device=device) + + +def check_if_dynamo_compiling(): + if TORCH_VERSION >= (1, 14): + from torch._dynamo import is_compiling + + return is_compiling() + else: + return False + + +def cat(tensors: List[torch.Tensor], dim: int = 0): + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +def empty_input_loss_func_wrapper(loss_func): + def wrapped_loss_func(input, target, *, reduction="mean", **kwargs): + """ + Same as `loss_func`, but returns 0 (instead of nan) for empty inputs. + """ + if target.numel() == 0 and reduction == "mean": + return input.sum() * 0.0 # connect the gradient + return loss_func(input, target, reduction=reduction, **kwargs) + + return wrapped_loss_func + + +cross_entropy = empty_input_loss_func_wrapper(F.cross_entropy) + + +class _NewEmptyTensorOp(torch.autograd.Function): + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return _NewEmptyTensorOp.apply(grad, shape), None + + +class Conv2d(torch.nn.Conv2d): + """ + A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. + """ + + def __init__(self, *args, **kwargs): + """ + Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: + + Args: + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + + It assumes that norm layer is used before activation. + """ + norm = kwargs.pop("norm", None) + activation = kwargs.pop("activation", None) + super().__init__(*args, **kwargs) + + self.norm = norm + self.activation = activation + + def forward(self, x): + # torchscript does not support SyncBatchNorm yet + # https://github.com/pytorch/pytorch/issues/40507 + # and we skip these codes in torchscript since: + # 1. currently we only support torchscript in evaluation mode + # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or + # later version, `Conv2d` in these PyTorch versions has already supported empty inputs. + if not torch.jit.is_scripting(): + # Dynamo doesn't support context managers yet + is_dynamo_compiling = check_if_dynamo_compiling() + if not is_dynamo_compiling: + with warnings.catch_warnings(record=True): + if x.numel() == 0 and self.training: + # https://github.com/pytorch/pytorch/issues/12013 + assert not isinstance( + self.norm, torch.nn.SyncBatchNorm + ), "SyncBatchNorm does not support empty inputs!" + + x = F.conv2d( + x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + +ConvTranspose2d = torch.nn.ConvTranspose2d +BatchNorm2d = torch.nn.BatchNorm2d +interpolate = F.interpolate +Linear = torch.nn.Linear + + +def nonzero_tuple(x): + """ + A 'as_tuple=True' version of torch.nonzero to support torchscript. + because of https://github.com/pytorch/pytorch/issues/38718 + """ + if torch.jit.is_scripting(): + if x.dim() == 0: + return x.unsqueeze(0).nonzero().unbind(1) + return x.nonzero().unbind(1) + else: + return x.nonzero(as_tuple=True) + + +@torch.jit.script_if_tracing +def move_device_like(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor: + """ + Tracing friendly way to cast tensor to another tensor's device. Device will be treated + as constant during tracing, scripting the casting process as whole can workaround this issue. + """ + return src.to(dst.device) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/model_zoo/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/model_zoo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6204208198d813728cf6419e8eef4a733f20c18f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/model_zoo/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Model Zoo API for Detectron2: a collection of functions to create common model architectures +listed in `MODEL_ZOO.md `_, +and optionally load their pre-trained weights. +""" + +from .model_zoo import get, get_config_file, get_checkpoint_url, get_config + +__all__ = ["get_checkpoint_url", "get", "get_config_file", "get_config"] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/model_zoo/model_zoo.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/model_zoo/model_zoo.py new file mode 100644 index 0000000000000000000000000000000000000000..fdbdf300bbf638ff4e01bc05226e8fd22499a83e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/model_zoo/model_zoo.py @@ -0,0 +1,213 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import os +from typing import Optional +import pkg_resources +import torch + +from custom_detectron2.checkpoint import DetectionCheckpointer +from custom_detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate +from custom_detectron2.modeling import build_model + + +class _ModelZooUrls(object): + """ + Mapping from names to officially released Detectron2 pre-trained models. + """ + + S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl + CONFIG_PATH_TO_URL_SUFFIX = { + # COCO Detection with Faster R-CNN + "COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl", + "COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl", + "COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl", + "COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl", + "COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl", + "COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl", + "COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl", + "COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl", + "COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl", + "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl", + # COCO Detection with RetinaNet + "COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl", + "COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl", + "COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl", + # COCO Detection with RPN and Fast R-CNN + "COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl", + "COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl", + "COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl", + # COCO Instance Segmentation Baselines with Mask R-CNN + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl", + "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl", + "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa + # New baselines using Large-Scale Jitter and Longer Training Schedule + "new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ": "42047764/model_final_bb69de.pkl", + "new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ": "42047638/model_final_89a8d3.pkl", + "new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ": "42019571/model_final_14d201.pkl", + "new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ": "42025812/model_final_4f7b58.pkl", + "new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ": "42131867/model_final_0bb7ae.pkl", + "new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ": "42073830/model_final_f96b26.pkl", + "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ": "42047771/model_final_b7fbab.pkl", # noqa + "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ": "42132721/model_final_5d87c1.pkl", # noqa + "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ": "42025447/model_final_f1362d.pkl", # noqa + "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ": "42047784/model_final_6ba57e.pkl", # noqa + "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ": "42047642/model_final_27b9c1.pkl", # noqa + "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ": "42045954/model_final_ef3a80.pkl", # noqa + # COCO Person Keypoint Detection Baselines with Keypoint R-CNN + "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl", + "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl", + "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl", + "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl", + # COCO Panoptic Segmentation Baselines with Panoptic FPN + "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl", + "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl", + "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl", + # LVIS Instance Segmentation Baselines with Mask R-CNN + "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa + "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa + "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa + # Cityscapes & Pascal VOC Baselines + "Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl", + "PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl", + # Other Settings + "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl", + "Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl", + "Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl", + "Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl", + "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl", + "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl", + "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa + # D1 Comparisons + "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa + "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa + "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl", + } + + @staticmethod + def query(config_path: str) -> Optional[str]: + """ + Args: + config_path: relative config filename + """ + name = config_path.replace(".yaml", "").replace(".py", "") + if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX: + suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name] + return _ModelZooUrls.S3_PREFIX + name + "/" + suffix + return None + + +def get_checkpoint_url(config_path): + """ + Returns the URL to the model trained using the given config + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + Returns: + str: a URL to the model + """ + url = _ModelZooUrls.query(config_path) + if url is None: + raise RuntimeError("Pretrained model for {} is not available!".format(config_path)) + return url + + +def get_config_file(config_path): + """ + Returns path to a builtin config file. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + + Returns: + str: the real path to the config file. + """ + cfg_file = pkg_resources.resource_filename( + "detectron2.model_zoo", os.path.join("configs", config_path) + ) + if not os.path.exists(cfg_file): + raise RuntimeError("{} not available in Model Zoo!".format(config_path)) + return cfg_file + + +def get_config(config_path, trained: bool = False): + """ + Returns a config object for a model in model zoo. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights. + If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used + instead; this will typically (though not always) initialize a subset of weights using + an ImageNet pre-trained model, while randomly initializing the other weights. + + Returns: + CfgNode or omegaconf.DictConfig: a config object + """ + cfg_file = get_config_file(config_path) + if cfg_file.endswith(".yaml"): + cfg = get_cfg() + cfg.merge_from_file(cfg_file) + if trained: + cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path) + return cfg + elif cfg_file.endswith(".py"): + cfg = LazyConfig.load(cfg_file) + if trained: + url = get_checkpoint_url(config_path) + if "train" in cfg and "init_checkpoint" in cfg.train: + cfg.train.init_checkpoint = url + else: + raise NotImplementedError + return cfg + + +def get(config_path, trained: bool = False, device: Optional[str] = None): + """ + Get a model specified by relative path under Detectron2's official ``configs/`` directory. + + Args: + config_path (str): config file name relative to detectron2's "configs/" + directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" + trained (bool): see :func:`get_config`. + device (str or None): overwrite the device in config, if given. + + Returns: + nn.Module: a detectron2 model. Will be in training mode. + + Example: + :: + from custom_detectron2 import model_zoo + model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True) + """ + cfg = get_config(config_path, trained) + if device is None and not torch.cuda.is_available(): + device = "cpu" + if device is not None and isinstance(cfg, CfgNode): + cfg.MODEL.DEVICE = device + + if isinstance(cfg, CfgNode): + model = build_model(cfg) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + else: + model = instantiate(cfg.model) + if device is not None: + model = model.to(device) + if "train" in cfg and "init_checkpoint" in cfg.train: + DetectionCheckpointer(model).load(cfg.train.init_checkpoint) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..247d457aabadb1264f7b5dc464cae5761b6a78e7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/__init__.py @@ -0,0 +1,64 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from custom_detectron2.layers import ShapeSpec + +from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY +from .backbone import ( + BACKBONE_REGISTRY, + FPN, + Backbone, + ResNet, + ResNetBlockBase, + build_backbone, + build_resnet_backbone, + make_stage, + ViT, + SimpleFeaturePyramid, + get_vit_lr_decay_rate, + MViT, + SwinTransformer, +) +from .meta_arch import ( + META_ARCH_REGISTRY, + SEM_SEG_HEADS_REGISTRY, + GeneralizedRCNN, + PanopticFPN, + ProposalNetwork, + RetinaNet, + SemanticSegmentor, + build_model, + build_sem_seg_head, + FCOS, +) +from .postprocessing import detector_postprocess +from .proposal_generator import ( + PROPOSAL_GENERATOR_REGISTRY, + build_proposal_generator, + RPN_HEAD_REGISTRY, + build_rpn_head, +) +from .roi_heads import ( + ROI_BOX_HEAD_REGISTRY, + ROI_HEADS_REGISTRY, + ROI_KEYPOINT_HEAD_REGISTRY, + ROI_MASK_HEAD_REGISTRY, + ROIHeads, + StandardROIHeads, + BaseMaskRCNNHead, + BaseKeypointRCNNHead, + FastRCNNOutputLayers, + build_box_head, + build_keypoint_head, + build_mask_head, + build_roi_heads, +) +from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA +from .mmdet_wrapper import MMDetBackbone, MMDetDetector + +_EXCLUDE = {"ShapeSpec"} +__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] + + +from custom_detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/anchor_generator.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/anchor_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..6768f7d06415fee4cb707092dddc7bbc764d59b0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/anchor_generator.py @@ -0,0 +1,386 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import collections +import math +from typing import List +import torch +from torch import nn + +from custom_detectron2.config import configurable +from custom_detectron2.layers import ShapeSpec, move_device_like +from custom_detectron2.structures import Boxes, RotatedBoxes +from custom_detectron2.utils.registry import Registry + +ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") +ANCHOR_GENERATOR_REGISTRY.__doc__ = """ +Registry for modules that creates object detection anchors for feature maps. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +class BufferList(nn.Module): + """ + Similar to nn.ParameterList, but for buffers + """ + + def __init__(self, buffers): + super().__init__() + for i, buffer in enumerate(buffers): + # Use non-persistent buffer so the values are not saved in checkpoint + self.register_buffer(str(i), buffer, persistent=False) + + def __len__(self): + return len(self._buffers) + + def __iter__(self): + return iter(self._buffers.values()) + + +def _create_grid_offsets( + size: List[int], stride: int, offset: float, target_device_tensor: torch.Tensor +): + grid_height, grid_width = size + shifts_x = move_device_like( + torch.arange(offset * stride, grid_width * stride, step=stride, dtype=torch.float32), + target_device_tensor, + ) + shifts_y = move_device_like( + torch.arange(offset * stride, grid_height * stride, step=stride, dtype=torch.float32), + target_device_tensor, + ) + + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + return shift_x, shift_y + + +def _broadcast_params(params, num_features, name): + """ + If one size (or aspect ratio) is specified and there are multiple feature + maps, we "broadcast" anchors of that single size (or aspect ratio) + over all feature maps. + + If params is list[float], or list[list[float]] with len(params) == 1, repeat + it num_features time. + + Returns: + list[list[float]]: param for each feature + """ + assert isinstance( + params, collections.abc.Sequence + ), f"{name} in anchor generator has to be a list! Got {params}." + assert len(params), f"{name} in anchor generator cannot be empty!" + if not isinstance(params[0], collections.abc.Sequence): # params is list[float] + return [params] * num_features + if len(params) == 1: + return list(params) * num_features + assert len(params) == num_features, ( + f"Got {name} of length {len(params)} in anchor generator, " + f"but the number of input features is {num_features}!" + ) + return params + + +@ANCHOR_GENERATOR_REGISTRY.register() +class DefaultAnchorGenerator(nn.Module): + """ + Compute anchors in the standard ways described in + "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks". + """ + + box_dim: torch.jit.Final[int] = 4 + """ + the dimension of each anchor box. + """ + + @configurable + def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5): + """ + This interface is experimental. + + Args: + sizes (list[list[float]] or list[float]): + If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes + (i.e. sqrt of anchor area) to use for the i-th feature map. + If ``sizes`` is list[float], ``sizes`` is used for all feature maps. + Anchor sizes are given in absolute lengths in units of + the input image; they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]] or list[float]): list of aspect ratios + (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. + strides (list[int]): stride of each input feature. + offset (float): Relative offset between the center of the first anchor and the top-left + corner of the image. Value has to be in [0, 1). + Recommend to use 0.5, which means half stride. + """ + super().__init__() + + self.strides = strides + self.num_features = len(self.strides) + sizes = _broadcast_params(sizes, self.num_features, "sizes") + aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios) + + self.offset = offset + assert 0.0 <= self.offset < 1.0, self.offset + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + return { + "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, + "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, + "strides": [x.stride for x in input_shape], + "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, + } + + def _calculate_anchors(self, sizes, aspect_ratios): + cell_anchors = [ + self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios) + ] + return BufferList(cell_anchors) + + @property + @torch.jit.unused + def num_cell_anchors(self): + """ + Alias of `num_anchors`. + """ + return self.num_anchors + + @property + @torch.jit.unused + def num_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios and 5 sizes, the number of anchors is 15. + (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config) + + In standard RPN models, `num_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def _grid_anchors(self, grid_sizes: List[List[int]]): + """ + Returns: + list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4 + """ + anchors = [] + # buffers() not supported by torchscript. use named_buffers() instead + buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()] + for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers): + shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) + + return anchors + + def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): + """ + Generate a tensor storing canonical anchor boxes, which are all anchor + boxes of different sizes and aspect_ratios centered at (0, 0). + We can later build the set of anchors for a full feature map by + shifting and tiling these tensors (see `meth:_grid_anchors`). + + Args: + sizes (tuple[float]): + aspect_ratios (tuple[float]]): + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes + in XYXY format. + """ + + # This is different from the anchor generator defined in the original Faster R-CNN + # code or Detectron. They yield the same AP, however the old version defines cell + # anchors in a less natural way with a shift relative to the feature grid and + # quantization that results in slightly different sizes for different aspect ratios. + # See also https://github.com/facebookresearch/Detectron/issues/227 + + anchors = [] + for size in sizes: + area = size**2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 + anchors.append([x0, y0, x1, y1]) + return torch.tensor(anchors) + + def forward(self, features: List[torch.Tensor]): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[Boxes]: a list of Boxes containing all the anchors for each feature map + (i.e. the cell anchors repeated over all locations in the feature map). + The number of anchors of each feature map is Hi x Wi x num_cell_anchors, + where Hi, Wi are resolution of the feature map divided by anchor stride. + """ + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) + return [Boxes(x) for x in anchors_over_all_feature_maps] + + +@ANCHOR_GENERATOR_REGISTRY.register() +class RotatedAnchorGenerator(nn.Module): + """ + Compute rotated anchors used by Rotated RPN (RRPN), described in + "Arbitrary-Oriented Scene Text Detection via Rotation Proposals". + """ + + box_dim: int = 5 + """ + the dimension of each anchor box. + """ + + @configurable + def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5): + """ + This interface is experimental. + + Args: + sizes (list[list[float]] or list[float]): + If sizes is list[list[float]], sizes[i] is the list of anchor sizes + (i.e. sqrt of anchor area) to use for the i-th feature map. + If sizes is list[float], the sizes are used for all feature maps. + Anchor sizes are given in absolute lengths in units of + the input image; they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]] or list[float]): list of aspect ratios + (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. + strides (list[int]): stride of each input feature. + angles (list[list[float]] or list[float]): list of angles (in degrees CCW) + to use for anchors. Same "broadcast" rule for `sizes` applies. + offset (float): Relative offset between the center of the first anchor and the top-left + corner of the image. Value has to be in [0, 1). + Recommend to use 0.5, which means half stride. + """ + super().__init__() + + self.strides = strides + self.num_features = len(self.strides) + sizes = _broadcast_params(sizes, self.num_features, "sizes") + aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") + angles = _broadcast_params(angles, self.num_features, "angles") + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles) + + self.offset = offset + assert 0.0 <= self.offset < 1.0, self.offset + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + return { + "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, + "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, + "strides": [x.stride for x in input_shape], + "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, + "angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES, + } + + def _calculate_anchors(self, sizes, aspect_ratios, angles): + cell_anchors = [ + self.generate_cell_anchors(size, aspect_ratio, angle).float() + for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles) + ] + return BufferList(cell_anchors) + + @property + def num_cell_anchors(self): + """ + Alias of `num_anchors`. + """ + return self.num_anchors + + @property + def num_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios, 2 sizes and 5 angles, the number of anchors is 30. + (See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS + and ANCHOR_GENERATOR.ANGLES in config) + + In standard RRPN models, `num_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def _grid_anchors(self, grid_sizes): + anchors = [] + for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): + shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors) + zeros = torch.zeros_like(shift_x) + shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) + + anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5)) + + return anchors + + def generate_cell_anchors( + self, + sizes=(32, 64, 128, 256, 512), + aspect_ratios=(0.5, 1, 2), + angles=(-90, -60, -30, 0, 30, 60, 90), + ): + """ + Generate a tensor storing canonical anchor boxes, which are all anchor + boxes of different sizes, aspect_ratios, angles centered at (0, 0). + We can later build the set of anchors for a full feature map by + shifting and tiling these tensors (see `meth:_grid_anchors`). + + Args: + sizes (tuple[float]): + aspect_ratios (tuple[float]]): + angles (tuple[float]]): + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5) + storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format. + """ + anchors = [] + for size in sizes: + area = size**2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + anchors.extend([0, 0, w, h, a] for a in angles) + + return torch.tensor(anchors) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map + (i.e. the cell anchors repeated over all locations in the feature map). + The number of anchors of each feature map is Hi x Wi x num_cell_anchors, + where Hi, Wi are resolution of the feature map divided by anchor stride. + """ + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) + return [RotatedBoxes(x) for x in anchors_over_all_feature_maps] + + +def build_anchor_generator(cfg, input_shape): + """ + Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. + """ + anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME + return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3358a4061b143c78eba8e7bf81fe9f7ffac1aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip + +from .backbone import Backbone +from .fpn import FPN +from .regnet import RegNet +from .resnet import ( + BasicStem, + ResNet, + ResNetBlockBase, + build_resnet_backbone, + make_stage, + BottleneckBlock, +) +from .vit import ViT, SimpleFeaturePyramid, get_vit_lr_decay_rate +from .mvit import MViT +from .swin import SwinTransformer + +__all__ = [k for k in globals().keys() if not k.startswith("_")] +# TODO can expose more resnet blocks after careful consideration diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/backbone.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..1ac190f7e16ba41c73e8d2a335442b200319b203 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/backbone.py @@ -0,0 +1,74 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from abc import ABCMeta, abstractmethod +from typing import Dict +import torch.nn as nn + +from custom_detectron2.layers import ShapeSpec + +__all__ = ["Backbone"] + + +class Backbone(nn.Module, metaclass=ABCMeta): + """ + Abstract base class for network backbones. + """ + + def __init__(self): + """ + The `__init__` method of any subclass can specify its own set of arguments. + """ + super().__init__() + + @abstractmethod + def forward(self): + """ + Subclasses must override this method, but adhere to the same return type. + + Returns: + dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor + """ + pass + + @property + def size_divisibility(self) -> int: + """ + Some backbones require the input height and width to be divisible by a + specific integer. This is typically true for encoder / decoder type networks + with lateral connection (e.g., FPN) for which feature maps need to match + dimension in the "bottom up" and "top down" paths. Set to 0 if no specific + input size divisibility is required. + """ + return 0 + + @property + def padding_constraints(self) -> Dict[str, int]: + """ + This property is a generalization of size_divisibility. Some backbones and training + recipes require specific padding constraints, such as enforcing divisibility by a specific + integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter + in :paper:vitdet). `padding_constraints` contains these optional items like: + { + "size_divisibility": int, + "square_size": int, + # Future options are possible + } + `size_divisibility` will read from here if presented and `square_size` indicates the + square padding size if `square_size` > 0. + + TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints + could be generalized as TypedDict (Python 3.8+) to support more types in the future. + """ + return {} + + def output_shape(self): + """ + Returns: + dict[str->ShapeSpec] + """ + # this is a backward-compatible default + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/build.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/build.py new file mode 100644 index 0000000000000000000000000000000000000000..378d6dd9dbb8d1d19646f7ef23b443024dbfeec1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/build.py @@ -0,0 +1,33 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from custom_detectron2.layers import ShapeSpec +from custom_detectron2.utils.registry import Registry + +from .backbone import Backbone + +BACKBONE_REGISTRY = Registry("BACKBONE") +BACKBONE_REGISTRY.__doc__ = """ +Registry for backbones, which extract feature maps from images + +The registered object must be a callable that accepts two arguments: + +1. A :class:`detectron2.config.CfgNode` +2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. + +Registered object must return instance of :class:`Backbone`. +""" + + +def build_backbone(cfg, input_shape=None): + """ + Build a backbone from `cfg.MODEL.BACKBONE.NAME`. + + Returns: + an instance of :class:`Backbone` + """ + if input_shape is None: + input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) + + backbone_name = cfg.MODEL.BACKBONE.NAME + backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) + assert isinstance(backbone, Backbone) + return backbone diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/fpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..476241a28c1876adbdcb0434d3ab7a6060dd4c25 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/fpn.py @@ -0,0 +1,268 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F +from torch import nn + +from custom_detectron2.layers import Conv2d, ShapeSpec, get_norm + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY +from .resnet import build_resnet_backbone + +__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"] + + +class FPN(Backbone): + """ + This module implements :paper:`FPN`. + It creates pyramid features built on top of some input feature maps. + """ + + _fuse_type: torch.jit.Final[str] + + def __init__( + self, + bottom_up, + in_features, + out_channels, + norm="", + top_block=None, + fuse_type="sum", + square_pad=0, + ): + """ + Args: + bottom_up (Backbone): module representing the bottom up subnetwork. + Must be a subclass of :class:`Backbone`. The multi-scale feature + maps generated by the bottom up network, and listed in `in_features`, + are used to generate FPN levels. + in_features (list[str]): names of the input feature maps coming + from the backbone to which FPN is attached. For example, if the + backbone produces ["res2", "res3", "res4"], any *contiguous* sublist + of these may be used; order must be from high to low resolution. + out_channels (int): number of channels in the output feature maps. + norm (str): the normalization to use. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + FPN output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra FPN levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + fuse_type (str): types for fusing the top down features and the lateral + ones. It can be "sum" (default), which sums up element-wise; or "avg", + which takes the element-wise mean of the two. + square_pad (int): If > 0, require input images to be padded to specific square size. + """ + super(FPN, self).__init__() + assert isinstance(bottom_up, Backbone) + assert in_features, in_features + + # Feature map strides and channels from the bottom up network (e.g. ResNet) + input_shapes = bottom_up.output_shape() + strides = [input_shapes[f].stride for f in in_features] + in_channels_per_feature = [input_shapes[f].channels for f in in_features] + + _assert_strides_are_log2_contiguous(strides) + lateral_convs = [] + output_convs = [] + + use_bias = norm == "" + for idx, in_channels in enumerate(in_channels_per_feature): + lateral_norm = get_norm(norm, out_channels) + output_norm = get_norm(norm, out_channels) + + lateral_conv = Conv2d( + in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm + ) + output_conv = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + ) + weight_init.c2_xavier_fill(lateral_conv) + weight_init.c2_xavier_fill(output_conv) + stage = int(math.log2(strides[idx])) + self.add_module("fpn_lateral{}".format(stage), lateral_conv) + self.add_module("fpn_output{}".format(stage), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + self.top_block = top_block + self.in_features = tuple(in_features) + self.bottom_up = bottom_up + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = strides[-1] + self._square_pad = square_pad + assert fuse_type in {"avg", "sum"} + self._fuse_type = fuse_type + + @property + def size_divisibility(self): + return self._size_divisibility + + @property + def padding_constraints(self): + return {"square_size": self._square_pad} + + def forward(self, x): + """ + Args: + input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to + feature map tensor for each feature level in high to low resolution order. + + Returns: + dict[str->Tensor]: + mapping from feature map name to FPN feature map tensor + in high to low resolution order. Returned feature names follow the FPN + paper convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + bottom_up_features = self.bottom_up(x) + results = [] + prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]]) + results.append(self.output_convs[0](prev_features)) + + # Reverse feature maps into top-down order (from low to high resolution) + for idx, (lateral_conv, output_conv) in enumerate( + zip(self.lateral_convs, self.output_convs) + ): + # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336 + # Therefore we loop over all modules but skip the first one + if idx > 0: + features = self.in_features[-idx - 1] + features = bottom_up_features[features] + top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest") + lateral_features = lateral_conv(features) + prev_features = lateral_features + top_down_features + if self._fuse_type == "avg": + prev_features /= 2 + results.insert(0, output_conv(prev_features)) + + if self.top_block is not None: + if self.top_block.in_feature in bottom_up_features: + top_block_in_feature = bottom_up_features[self.top_block.in_feature] + else: + top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] + results.extend(self.top_block(top_block_in_feature)) + assert len(self._out_features) == len(results) + return {f: res for f, res in zip(self._out_features, results)} + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + +def _assert_strides_are_log2_contiguous(strides): + """ + Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". + """ + for i, stride in enumerate(strides[1:], 1): + assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format( + stride, strides[i - 1] + ) + + +class LastLevelMaxPool(nn.Module): + """ + This module is used in the original FPN to generate a downsampled + P6 feature from P5. + """ + + def __init__(self): + super().__init__() + self.num_levels = 1 + self.in_feature = "p5" + + def forward(self, x): + return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] + + +class LastLevelP6P7(nn.Module): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7 from + C5 feature. + """ + + def __init__(self, in_channels, out_channels, in_feature="res5"): + super().__init__() + self.num_levels = 2 + self.in_feature = in_feature + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + weight_init.c2_xavier_fill(module) + + def forward(self, c5): + p6 = self.p6(c5) + p7 = self.p7(F.relu(p6)) + return [p6, p7] + + +@BACKBONE_REGISTRY.register() +def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelMaxPool(), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone + + +@BACKBONE_REGISTRY.register() +def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + in_channels_p6p7 = bottom_up.output_shape()["res5"].channels + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelP6P7(in_channels_p6p7, out_channels), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/mvit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/mvit.py new file mode 100644 index 0000000000000000000000000000000000000000..a12e00ff1819a09f90a02715f6c56aa35cbd6d63 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/mvit.py @@ -0,0 +1,448 @@ +import logging +import numpy as np +import torch +import torch.nn as nn + +from .backbone import Backbone +from .utils import ( + PatchEmbed, + add_decomposed_rel_pos, + get_abs_pos, + window_partition, + window_unpartition, +) + +logger = logging.getLogger(__name__) + + +__all__ = ["MViT"] + + +def attention_pool(x, pool, norm=None): + # (B, H, W, C) -> (B, C, H, W) + x = x.permute(0, 3, 1, 2) + x = pool(x) + # (B, C, H1, W1) -> (B, H1, W1, C) + x = x.permute(0, 2, 3, 1) + if norm: + x = norm(x) + + return x + + +class MultiScaleAttention(nn.Module): + """Multiscale Multi-head Attention block.""" + + def __init__( + self, + dim, + dim_out, + num_heads, + qkv_bias=True, + norm_layer=nn.LayerNorm, + pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + residual_pooling=True, + window_size=0, + use_rel_pos=False, + rel_pos_zero_init=True, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + dim_out (int): Number of output channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + pool_kernel (tuple): kernel size for qkv pooling layers. + stride_q (int): stride size for q pooling layer. + stride_kv (int): stride size for kv pooling layer. + residual_pooling (bool): If true, enable residual pooling. + use_rel_pos (bool): If True, add relative postional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim_out // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) + self.proj = nn.Linear(dim_out, dim_out) + + # qkv pooling + pool_padding = [k // 2 for k in pool_kernel] + dim_conv = dim_out // num_heads + self.pool_q = nn.Conv2d( + dim_conv, + dim_conv, + pool_kernel, + stride=stride_q, + padding=pool_padding, + groups=dim_conv, + bias=False, + ) + self.norm_q = norm_layer(dim_conv) + self.pool_k = nn.Conv2d( + dim_conv, + dim_conv, + pool_kernel, + stride=stride_kv, + padding=pool_padding, + groups=dim_conv, + bias=False, + ) + self.norm_k = norm_layer(dim_conv) + self.pool_v = nn.Conv2d( + dim_conv, + dim_conv, + pool_kernel, + stride=stride_kv, + padding=pool_padding, + groups=dim_conv, + bias=False, + ) + self.norm_v = norm_layer(dim_conv) + + self.window_size = window_size + if window_size: + self.q_win_size = window_size // stride_q + self.kv_win_size = window_size // stride_kv + self.residual_pooling = residual_pooling + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + # initialize relative positional embeddings + assert input_size[0] == input_size[1] + size = input_size[0] + rel_dim = 2 * max(size // stride_q, size // stride_kv) - 1 + self.rel_pos_h = nn.Parameter(torch.zeros(rel_dim, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_dim, head_dim)) + + if not rel_pos_zero_init: + nn.init.trunc_normal_(self.rel_pos_h, std=0.02) + nn.init.trunc_normal_(self.rel_pos_w, std=0.02) + + def forward(self, x): + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H, W, C) + qkv = self.qkv(x).reshape(B, H, W, 3, self.num_heads, -1).permute(3, 0, 4, 1, 2, 5) + # q, k, v with shape (B * nHead, H, W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H, W, -1).unbind(0) + + q = attention_pool(q, self.pool_q, self.norm_q) + k = attention_pool(k, self.pool_k, self.norm_k) + v = attention_pool(v, self.pool_v, self.norm_v) + + ori_q = q + if self.window_size: + q, q_hw_pad = window_partition(q, self.q_win_size) + k, kv_hw_pad = window_partition(k, self.kv_win_size) + v, _ = window_partition(v, self.kv_win_size) + q_hw = (self.q_win_size, self.q_win_size) + kv_hw = (self.kv_win_size, self.kv_win_size) + else: + q_hw = q.shape[1:3] + kv_hw = k.shape[1:3] + + q = q.view(q.shape[0], np.prod(q_hw), -1) + k = k.view(k.shape[0], np.prod(kv_hw), -1) + v = v.view(v.shape[0], np.prod(kv_hw), -1) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, q_hw, kv_hw) + + attn = attn.softmax(dim=-1) + x = attn @ v + + x = x.view(x.shape[0], q_hw[0], q_hw[1], -1) + + if self.window_size: + x = window_unpartition(x, self.q_win_size, q_hw_pad, ori_q.shape[1:3]) + + if self.residual_pooling: + x += ori_q + + H, W = x.shape[1], x.shape[2] + x = x.view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +class MultiScaleBlock(nn.Module): + """Multiscale Transformer blocks""" + + def __init__( + self, + dim, + dim_out, + num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + qkv_pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + residual_pooling=True, + window_size=0, + use_rel_pos=False, + rel_pos_zero_init=True, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + dim_out (int): Number of output channels. + num_heads (int): Number of attention heads in the MViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + stride_q (int): stride size for q pooling layer. + stride_kv (int): stride size for kv pooling layer. + residual_pooling (bool): If true, enable residual pooling. + window_size (int): Window size for window attention blocks. If it equals 0, then not + use window attention. + use_rel_pos (bool): If True, add relative postional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = MultiScaleAttention( + dim, + dim_out, + num_heads=num_heads, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + pool_kernel=qkv_pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + residual_pooling=residual_pooling, + window_size=window_size, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size, + ) + + from custom_timm.models.layers import DropPath, Mlp + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim_out) + self.mlp = Mlp( + in_features=dim_out, + hidden_features=int(dim_out * mlp_ratio), + out_features=dim_out, + act_layer=act_layer, + ) + + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + + if stride_q > 1: + kernel_skip = stride_q + 1 + padding_skip = int(kernel_skip // 2) + self.pool_skip = nn.MaxPool2d(kernel_skip, stride_q, padding_skip, ceil_mode=False) + + def forward(self, x): + x_norm = self.norm1(x) + x_block = self.attn(x_norm) + + if hasattr(self, "proj"): + x = self.proj(x_norm) + if hasattr(self, "pool_skip"): + x = attention_pool(x, self.pool_skip) + + x = x + self.drop_path(x_block) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class MViT(Backbone): + """ + This module implements Multiscale Vision Transformer (MViT) backbone in :paper:'mvitv2'. + """ + + def __init__( + self, + img_size=224, + patch_kernel=(7, 7), + patch_stride=(4, 4), + patch_padding=(3, 3), + in_chans=3, + embed_dim=96, + depth=16, + num_heads=1, + last_block_indexes=(0, 2, 11, 15), + qkv_pool_kernel=(3, 3), + adaptive_kv_stride=4, + adaptive_window_size=56, + residual_pooling=True, + mlp_ratio=4.0, + qkv_bias=True, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + use_abs_pos=False, + use_rel_pos=True, + rel_pos_zero_init=True, + use_act_checkpoint=False, + pretrain_img_size=224, + pretrain_use_cls_token=True, + out_features=("scale2", "scale3", "scale4", "scale5"), + ): + """ + Args: + img_size (int): Input image size. + patch_kernel (tuple): kernel size for patch embedding. + patch_stride (tuple): stride size for patch embedding. + patch_padding (tuple): padding size for patch embedding. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of MViT. + num_heads (int): Number of base attention heads in each MViT block. + last_block_indexes (tuple): Block indexes for last blocks in each stage. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + adaptive_kv_stride (int): adaptive stride size for kv pooling. + adaptive_window_size (int): adaptive window size for window attention blocks. + residual_pooling (bool): If true, enable residual pooling. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path_rate (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative postional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + use_act_checkpoint (bool): If True, use activation checkpointing. + pretrain_img_size (int): input image size for pretraining models. + pretrain_use_cls_token (bool): If True, pretrainig models use class token. + out_features (tuple): name of the feature maps from each stage. + """ + super().__init__() + self.pretrain_use_cls_token = pretrain_use_cls_token + + self.patch_embed = PatchEmbed( + kernel_size=patch_kernel, + stride=patch_stride, + padding=patch_padding, + in_chans=in_chans, + embed_dim=embed_dim, + ) + + if use_abs_pos: + # Initialize absoluate positional embedding with pretrain image size. + num_patches = (pretrain_img_size // patch_stride[0]) * ( + pretrain_img_size // patch_stride[1] + ) + num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) + else: + self.pos_embed = None + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + dim_out = embed_dim + stride_kv = adaptive_kv_stride + window_size = adaptive_window_size + input_size = (img_size // patch_stride[0], img_size // patch_stride[1]) + stage = 2 + stride = patch_stride[0] + self._out_feature_strides = {} + self._out_feature_channels = {} + self.blocks = nn.ModuleList() + for i in range(depth): + # Multiply stride_kv by 2 if it's the last block of stage2 and stage3. + if i == last_block_indexes[1] or i == last_block_indexes[2]: + stride_kv_ = stride_kv * 2 + else: + stride_kv_ = stride_kv + # hybrid window attention: global attention in last three stages. + window_size_ = 0 if i in last_block_indexes[1:] else window_size + block = MultiScaleBlock( + dim=embed_dim, + dim_out=dim_out, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + qkv_pool_kernel=qkv_pool_kernel, + stride_q=2 if i - 1 in last_block_indexes else 1, + stride_kv=stride_kv_, + residual_pooling=residual_pooling, + window_size=window_size_, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size, + ) + if use_act_checkpoint: + # TODO: use torch.utils.checkpoint + from fairscale.nn.checkpoint import checkpoint_wrapper + + block = checkpoint_wrapper(block) + self.blocks.append(block) + + embed_dim = dim_out + if i in last_block_indexes: + name = f"scale{stage}" + if name in out_features: + self._out_feature_channels[name] = dim_out + self._out_feature_strides[name] = stride + self.add_module(f"{name}_norm", norm_layer(dim_out)) + + dim_out *= 2 + num_heads *= 2 + stride_kv = max(stride_kv // 2, 1) + stride *= 2 + stage += 1 + if i - 1 in last_block_indexes: + window_size = window_size // 2 + input_size = [s // 2 for s in input_size] + + self._out_features = out_features + self._last_block_indexes = last_block_indexes + + if self.pos_embed is not None: + nn.init.trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + x = self.patch_embed(x) + + if self.pos_embed is not None: + x = x + get_abs_pos(self.pos_embed, self.pretrain_use_cls_token, x.shape[1:3]) + + outputs = {} + stage = 2 + for i, blk in enumerate(self.blocks): + x = blk(x) + if i in self._last_block_indexes: + name = f"scale{stage}" + if name in self._out_features: + x_out = getattr(self, f"{name}_norm")(x) + outputs[name] = x_out.permute(0, 3, 1, 2) + stage += 1 + + return outputs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/regnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..7283fa12d365349eda70efc8994e396af4dc6389 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/regnet.py @@ -0,0 +1,452 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Implementation of RegNet models from :paper:`dds` and :paper:`scaling`. + +This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications. +Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify +model loading. +""" + +import numpy as np +from torch import nn + +from custom_detectron2.layers import CNNBlockBase, ShapeSpec, get_norm + +from .backbone import Backbone + +__all__ = [ + "AnyNet", + "RegNet", + "ResStem", + "SimpleStem", + "VanillaBlock", + "ResBasicBlock", + "ResBottleneckBlock", +] + + +def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False): + """Helper for building a conv2d layer.""" + assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." + s, p, g, b = stride, (k - 1) // 2, groups, bias + return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b) + + +def gap2d(): + """Helper for building a global average pooling layer.""" + return nn.AdaptiveAvgPool2d((1, 1)) + + +def pool2d(k, *, stride=1): + """Helper for building a pool2d layer.""" + assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." + return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2) + + +def init_weights(m): + """Performs ResNet-style weight initialization.""" + if isinstance(m, nn.Conv2d): + # Note that there is no bias due to BN + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.weight.data.normal_(mean=0.0, std=0.01) + m.bias.data.zero_() + + +class ResStem(CNNBlockBase): + """ResNet stem for ImageNet: 7x7, BN, AF, MaxPool.""" + + def __init__(self, w_in, w_out, norm, activation_class): + super().__init__(w_in, w_out, 4) + self.conv = conv2d(w_in, w_out, 7, stride=2) + self.bn = get_norm(norm, w_out) + self.af = activation_class() + self.pool = pool2d(3, stride=2) + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class SimpleStem(CNNBlockBase): + """Simple stem for ImageNet: 3x3, BN, AF.""" + + def __init__(self, w_in, w_out, norm, activation_class): + super().__init__(w_in, w_out, 2) + self.conv = conv2d(w_in, w_out, 3, stride=2) + self.bn = get_norm(norm, w_out) + self.af = activation_class() + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class SE(nn.Module): + """Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.""" + + def __init__(self, w_in, w_se, activation_class): + super().__init__() + self.avg_pool = gap2d() + self.f_ex = nn.Sequential( + conv2d(w_in, w_se, 1, bias=True), + activation_class(), + conv2d(w_se, w_in, 1, bias=True), + nn.Sigmoid(), + ) + + def forward(self, x): + return x * self.f_ex(self.avg_pool(x)) + + +class VanillaBlock(CNNBlockBase): + """Vanilla block: [3x3 conv, BN, Relu] x2.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, _params): + super().__init__(w_in, w_out, stride) + self.a = conv2d(w_in, w_out, 3, stride=stride) + self.a_bn = get_norm(norm, w_out) + self.a_af = activation_class() + self.b = conv2d(w_out, w_out, 3) + self.b_bn = get_norm(norm, w_out) + self.b_af = activation_class() + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class BasicTransform(nn.Module): + """Basic transformation: [3x3 conv, BN, Relu] x2.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, _params): + super().__init__() + self.a = conv2d(w_in, w_out, 3, stride=stride) + self.a_bn = get_norm(norm, w_out) + self.a_af = activation_class() + self.b = conv2d(w_out, w_out, 3) + self.b_bn = get_norm(norm, w_out) + self.b_bn.final_bn = True + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class ResBasicBlock(CNNBlockBase): + """Residual basic block: x + f(x), f = basic transform.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, params): + super().__init__(w_in, w_out, stride) + self.proj, self.bn = None, None + if (w_in != w_out) or (stride != 1): + self.proj = conv2d(w_in, w_out, 1, stride=stride) + self.bn = get_norm(norm, w_out) + self.f = BasicTransform(w_in, w_out, stride, norm, activation_class, params) + self.af = activation_class() + + def forward(self, x): + x_p = self.bn(self.proj(x)) if self.proj else x + return self.af(x_p + self.f(x)) + + +class BottleneckTransform(nn.Module): + """Bottleneck transformation: 1x1, 3x3 [+SE], 1x1.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, params): + super().__init__() + w_b = int(round(w_out * params["bot_mul"])) + w_se = int(round(w_in * params["se_r"])) + groups = w_b // params["group_w"] + self.a = conv2d(w_in, w_b, 1) + self.a_bn = get_norm(norm, w_b) + self.a_af = activation_class() + self.b = conv2d(w_b, w_b, 3, stride=stride, groups=groups) + self.b_bn = get_norm(norm, w_b) + self.b_af = activation_class() + self.se = SE(w_b, w_se, activation_class) if w_se else None + self.c = conv2d(w_b, w_out, 1) + self.c_bn = get_norm(norm, w_out) + self.c_bn.final_bn = True + + def forward(self, x): + for layer in self.children(): + x = layer(x) + return x + + +class ResBottleneckBlock(CNNBlockBase): + """Residual bottleneck block: x + f(x), f = bottleneck transform.""" + + def __init__(self, w_in, w_out, stride, norm, activation_class, params): + super().__init__(w_in, w_out, stride) + self.proj, self.bn = None, None + if (w_in != w_out) or (stride != 1): + self.proj = conv2d(w_in, w_out, 1, stride=stride) + self.bn = get_norm(norm, w_out) + self.f = BottleneckTransform(w_in, w_out, stride, norm, activation_class, params) + self.af = activation_class() + + def forward(self, x): + x_p = self.bn(self.proj(x)) if self.proj else x + return self.af(x_p + self.f(x)) + + +class AnyStage(nn.Module): + """AnyNet stage (sequence of blocks w/ the same output shape).""" + + def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params): + super().__init__() + for i in range(d): + block = block_class(w_in, w_out, stride, norm, activation_class, params) + self.add_module("b{}".format(i + 1), block) + stride, w_in = 1, w_out + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +class AnyNet(Backbone): + """AnyNet model. See :paper:`dds`.""" + + def __init__( + self, + *, + stem_class, + stem_width, + block_class, + depths, + widths, + group_widths, + strides, + bottleneck_ratios, + se_ratio, + activation_class, + freeze_at=0, + norm="BN", + out_features=None, + ): + """ + Args: + stem_class (callable): A callable taking 4 arguments (channels in, channels out, + normalization, callable returning an activation function) that returns another + callable implementing the stem module. + stem_width (int): The number of output channels that the stem produces. + block_class (callable): A callable taking 6 arguments (channels in, channels out, + stride, normalization, callable returning an activation function, a dict of + block-specific parameters) that returns another callable implementing the repeated + block module. + depths (list[int]): Number of blocks in each stage. + widths (list[int]): For each stage, the number of output channels of each block. + group_widths (list[int]): For each stage, the number of channels per group in group + convolution, if the block uses group convolution. + strides (list[int]): The stride that each network stage applies to its input. + bottleneck_ratios (list[float]): For each stage, the ratio of the number of bottleneck + channels to the number of block input channels (or, equivalently, output channels), + if the block uses a bottleneck. + se_ratio (float): The ratio of the number of channels used inside the squeeze-excitation + (SE) module to it number of input channels, if SE the block uses SE. + activation_class (callable): A callable taking no arguments that returns another + callable implementing an activation function. + freeze_at (int): The number of stages at the beginning to freeze. + see :meth:`freeze` for detailed explanation. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + out_features (list[str]): name of the layers whose outputs should + be returned in forward. RegNet's use "stem" and "s1", "s2", etc for the stages after + the stem. If None, will return the output of the last layer. + """ + super().__init__() + self.stem = stem_class(3, stem_width, norm, activation_class) + + current_stride = self.stem.stride + self._out_feature_strides = {"stem": current_stride} + self._out_feature_channels = {"stem": self.stem.out_channels} + self.stages_and_names = [] + prev_w = stem_width + + for i, (d, w, s, b, g) in enumerate( + zip(depths, widths, strides, bottleneck_ratios, group_widths) + ): + params = {"bot_mul": b, "group_w": g, "se_r": se_ratio} + stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params) + name = "s{}".format(i + 1) + self.add_module(name, stage) + self.stages_and_names.append((stage, name)) + self._out_feature_strides[name] = current_stride = int( + current_stride * np.prod([k.stride for k in stage.children()]) + ) + self._out_feature_channels[name] = list(stage.children())[-1].out_channels + prev_w = w + + self.apply(init_weights) + + if out_features is None: + out_features = [name] + self._out_features = out_features + assert len(self._out_features) + children = [x[0] for x in self.named_children()] + for out_feature in self._out_features: + assert out_feature in children, "Available children: {} does not include {}".format( + ", ".join(children), out_feature + ) + self.freeze(freeze_at) + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: names and the corresponding features + """ + assert x.dim() == 4, f"Model takes an input of shape (N, C, H, W). Got {x.shape} instead!" + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for stage, name in self.stages_and_names: + x = stage(x) + if name in self._out_features: + outputs[name] = x + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + def freeze(self, freeze_at=0): + """ + Freeze the first several stages of the model. Commonly used in fine-tuning. + + Layers that produce the same feature map spatial size are defined as one + "stage" by :paper:`FPN`. + + Args: + freeze_at (int): number of stages to freeze. + `1` means freezing the stem. `2` means freezing the stem and + one residual stage, etc. + + Returns: + nn.Module: this model itself + """ + if freeze_at >= 1: + self.stem.freeze() + for idx, (stage, _) in enumerate(self.stages_and_names, start=2): + if freeze_at >= idx: + for block in stage.children(): + block.freeze() + return self + + +def adjust_block_compatibility(ws, bs, gs): + """Adjusts the compatibility of widths, bottlenecks, and groups.""" + assert len(ws) == len(bs) == len(gs) + assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) + vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] + gs = [int(min(g, v)) for g, v in zip(gs, vs)] + ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] + vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] + ws = [int(v / b) for v, b in zip(vs, bs)] + assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) + return ws, bs, gs + + +def generate_regnet_parameters(w_a, w_0, w_m, d, q=8): + """Generates per stage widths and depths from RegNet parameters.""" + assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 + # Generate continuous per-block ws + ws_cont = np.arange(d) * w_a + w_0 + # Generate quantized per-block ws + ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) + ws_all = w_0 * np.power(w_m, ks) + ws_all = np.round(np.divide(ws_all, q)).astype(int) * q + # Generate per stage ws and ds (assumes ws_all are sorted) + ws, ds = np.unique(ws_all, return_counts=True) + # Compute number of actual stages and total possible stages + num_stages, total_stages = len(ws), ks.max() + 1 + # Convert numpy arrays to lists and return + ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont)) + return ws, ds, num_stages, total_stages, ws_all, ws_cont + + +class RegNet(AnyNet): + """RegNet model. See :paper:`dds`.""" + + def __init__( + self, + *, + stem_class, + stem_width, + block_class, + depth, + w_a, + w_0, + w_m, + group_width, + stride=2, + bottleneck_ratio=1.0, + se_ratio=0.0, + activation_class=None, + freeze_at=0, + norm="BN", + out_features=None, + ): + """ + Build a RegNet from the parameterization described in :paper:`dds` Section 3.3. + + Args: + See :class:`AnyNet` for arguments that are not listed here. + depth (int): Total number of blocks in the RegNet. + w_a (float): Factor by which block width would increase prior to quantizing block widths + by stage. See :paper:`dds` Section 3.3. + w_0 (int): Initial block width. See :paper:`dds` Section 3.3. + w_m (float): Parameter controlling block width quantization. + See :paper:`dds` Section 3.3. + group_width (int): Number of channels per group in group convolution, if the block uses + group convolution. + bottleneck_ratio (float): The ratio of the number of bottleneck channels to the number + of block input channels (or, equivalently, output channels), if the block uses a + bottleneck. + stride (int): The stride that each network stage applies to its input. + """ + ws, ds = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2] + ss = [stride for _ in ws] + bs = [bottleneck_ratio for _ in ws] + gs = [group_width for _ in ws] + ws, bs, gs = adjust_block_compatibility(ws, bs, gs) + + def default_activation_class(): + return nn.ReLU(inplace=True) + + super().__init__( + stem_class=stem_class, + stem_width=stem_width, + block_class=block_class, + depths=ds, + widths=ws, + strides=ss, + group_widths=gs, + bottleneck_ratios=bs, + se_ratio=se_ratio, + activation_class=default_activation_class + if activation_class is None + else activation_class, + freeze_at=freeze_at, + norm=norm, + out_features=out_features, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/resnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c373db499750c537ff884a7d2d3d834e57297050 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/resnet.py @@ -0,0 +1,694 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F +from torch import nn + +from custom_detectron2.layers import ( + CNNBlockBase, + Conv2d, + DeformConv, + ModulatedDeformConv, + ShapeSpec, + get_norm, +) + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY + +__all__ = [ + "ResNetBlockBase", + "BasicBlock", + "BottleneckBlock", + "DeformBottleneckBlock", + "BasicStem", + "ResNet", + "make_stage", + "build_resnet_backbone", +] + + +class BasicBlock(CNNBlockBase): + """ + The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`, + with two 3x3 conv layers and a projection shortcut if needed. + """ + + def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"): + """ + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + stride (int): Stride for the first conv. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=stride, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + self.conv2 = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + out = self.conv2(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class BottleneckBlock(CNNBlockBase): + """ + The standard bottleneck residual block used by ResNet-50, 101 and 152 + defined in :paper:`ResNet`. It contains 3 conv layers with kernels + 1x1, 3x3, 1x1, and a projection shortcut if needed. + """ + + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + ): + """ + Args: + bottleneck_channels (int): number of output channels for the 3x3 + "bottleneck" conv layers. + num_groups (int): number of groups for the 3x3 conv layer. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + stride_in_1x1 (bool): when stride>1, whether to put stride in the + first 1x1 convolution or the bottleneck 3x3 convolution. + dilation (int): the dilation rate of the 3x3 conv layer. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + # The original MSRA ResNet models have stride in the first 1x1 conv + # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have + # stride in the 3x3 conv + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + # Zero-initialize the last normalization in each residual branch, + # so that at the beginning, the residual branch starts with zeros, + # and each residual block behaves like an identity. + # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "For BN layers, the learnable scaling coefficient γ is initialized + # to be 1, except for each residual block's last BN + # where γ is initialized to be 0." + + # nn.init.constant_(self.conv3.norm.weight, 0) + # TODO this somehow hurts performance when training GN models from scratch. + # Add it as an option when we need to use this code to train a backbone. + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + out = self.conv2(out) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class DeformBottleneckBlock(CNNBlockBase): + """ + Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv ` + in the 3x3 convolution. + """ + + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + deform_modulated=False, + deform_num_groups=1, + ): + super().__init__(in_channels, out_channels, stride) + self.deform_modulated = deform_modulated + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + if deform_modulated: + deform_conv_op = ModulatedDeformConv + # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size + offset_channels = 27 + else: + deform_conv_op = DeformConv + offset_channels = 18 + + self.conv2_offset = Conv2d( + bottleneck_channels, + offset_channels * deform_num_groups, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + dilation=dilation, + ) + self.conv2 = deform_conv_op( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + deformable_groups=deform_num_groups, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + nn.init.constant_(self.conv2_offset.weight, 0) + nn.init.constant_(self.conv2_offset.bias, 0) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + if self.deform_modulated: + offset_mask = self.conv2_offset(out) + offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) + offset = torch.cat((offset_x, offset_y), dim=1) + mask = mask.sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class BasicStem(CNNBlockBase): + """ + The standard ResNet stem (layers before the first residual block), + with a conv, relu and max_pool. + """ + + def __init__(self, in_channels=3, out_channels=64, norm="BN"): + """ + Args: + norm (str or callable): norm after the first conv layer. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, 4) + self.in_channels = in_channels + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False, + norm=get_norm(norm, out_channels), + ) + weight_init.c2_msra_fill(self.conv1) + + def forward(self, x): + x = self.conv1(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x + + +class ResNet(Backbone): + """ + Implement :paper:`ResNet`. + """ + + def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0): + """ + Args: + stem (nn.Module): a stem module + stages (list[list[CNNBlockBase]]): several (typically 4) stages, + each contains multiple :class:`CNNBlockBase`. + num_classes (None or int): if None, will not perform classification. + Otherwise, will create a linear layer. + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "linear", or "res2" ... + If None, will return the output of the last layer. + freeze_at (int): The number of stages at the beginning to freeze. + see :meth:`freeze` for detailed explanation. + """ + super().__init__() + self.stem = stem + self.num_classes = num_classes + + current_stride = self.stem.stride + self._out_feature_strides = {"stem": current_stride} + self._out_feature_channels = {"stem": self.stem.out_channels} + + self.stage_names, self.stages = [], [] + + if out_features is not None: + # Avoid keeping unused layers in this module. They consume extra memory + # and may cause allreduce to fail + num_stages = max( + [{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features] + ) + stages = stages[:num_stages] + for i, blocks in enumerate(stages): + assert len(blocks) > 0, len(blocks) + for block in blocks: + assert isinstance(block, CNNBlockBase), block + + name = "res" + str(i + 2) + stage = nn.Sequential(*blocks) + + self.add_module(name, stage) + self.stage_names.append(name) + self.stages.append(stage) + + self._out_feature_strides[name] = current_stride = int( + current_stride * np.prod([k.stride for k in blocks]) + ) + self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels + self.stage_names = tuple(self.stage_names) # Make it static for scripting + + if num_classes is not None: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.linear = nn.Linear(curr_channels, num_classes) + + # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "The 1000-way fully-connected layer is initialized by + # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." + nn.init.normal_(self.linear.weight, std=0.01) + name = "linear" + + if out_features is None: + out_features = [name] + self._out_features = out_features + assert len(self._out_features) + children = [x[0] for x in self.named_children()] + for out_feature in self._out_features: + assert out_feature in children, "Available children: {}".format(", ".join(children)) + self.freeze(freeze_at) + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: names and the corresponding features + """ + assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!" + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for name, stage in zip(self.stage_names, self.stages): + x = stage(x) + if name in self._out_features: + outputs[name] = x + if self.num_classes is not None: + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.linear(x) + if "linear" in self._out_features: + outputs["linear"] = x + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + def freeze(self, freeze_at=0): + """ + Freeze the first several stages of the ResNet. Commonly used in + fine-tuning. + + Layers that produce the same feature map spatial size are defined as one + "stage" by :paper:`FPN`. + + Args: + freeze_at (int): number of stages to freeze. + `1` means freezing the stem. `2` means freezing the stem and + one residual stage, etc. + + Returns: + nn.Module: this ResNet itself + """ + if freeze_at >= 1: + self.stem.freeze() + for idx, stage in enumerate(self.stages, start=2): + if freeze_at >= idx: + for block in stage.children(): + block.freeze() + return self + + @staticmethod + def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs): + """ + Create a list of blocks of the same type that forms one ResNet stage. + + Args: + block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this + stage. A module of this type must not change spatial resolution of inputs unless its + stride != 1. + num_blocks (int): number of blocks in this stage + in_channels (int): input channels of the entire stage. + out_channels (int): output channels of **every block** in the stage. + kwargs: other arguments passed to the constructor of + `block_class`. If the argument name is "xx_per_block", the + argument is a list of values to be passed to each block in the + stage. Otherwise, the same argument is passed to every block + in the stage. + + Returns: + list[CNNBlockBase]: a list of block module. + + Examples: + :: + stage = ResNet.make_stage( + BottleneckBlock, 3, in_channels=16, out_channels=64, + bottleneck_channels=16, num_groups=1, + stride_per_block=[2, 1, 1], + dilations_per_block=[1, 1, 2] + ) + + Usually, layers that produce the same feature map spatial size are defined as one + "stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should + all be 1. + """ + blocks = [] + for i in range(num_blocks): + curr_kwargs = {} + for k, v in kwargs.items(): + if k.endswith("_per_block"): + assert len(v) == num_blocks, ( + f"Argument '{k}' of make_stage should have the " + f"same length as num_blocks={num_blocks}." + ) + newk = k[: -len("_per_block")] + assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" + curr_kwargs[newk] = v[i] + else: + curr_kwargs[k] = v + + blocks.append( + block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs) + ) + in_channels = out_channels + return blocks + + @staticmethod + def make_default_stages(depth, block_class=None, **kwargs): + """ + Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152). + If it doesn't create the ResNet variant you need, please use :meth:`make_stage` + instead for fine-grained customization. + + Args: + depth (int): depth of ResNet + block_class (type): the CNN block class. Has to accept + `bottleneck_channels` argument for depth > 50. + By default it is BasicBlock or BottleneckBlock, based on the + depth. + kwargs: + other arguments to pass to `make_stage`. Should not contain + stride and channels, as they are predefined for each depth. + + Returns: + list[list[CNNBlockBase]]: modules in all stages; see arguments of + :class:`ResNet.__init__`. + """ + num_blocks_per_stage = { + 18: [2, 2, 2, 2], + 34: [3, 4, 6, 3], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3], + }[depth] + if block_class is None: + block_class = BasicBlock if depth < 50 else BottleneckBlock + if depth < 50: + in_channels = [64, 64, 128, 256] + out_channels = [64, 128, 256, 512] + else: + in_channels = [64, 256, 512, 1024] + out_channels = [256, 512, 1024, 2048] + ret = [] + for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels): + if depth >= 50: + kwargs["bottleneck_channels"] = o // 4 + ret.append( + ResNet.make_stage( + block_class=block_class, + num_blocks=n, + stride_per_block=[s] + [1] * (n - 1), + in_channels=i, + out_channels=o, + **kwargs, + ) + ) + return ret + + +ResNetBlockBase = CNNBlockBase +""" +Alias for backward compatibiltiy. +""" + + +def make_stage(*args, **kwargs): + """ + Deprecated alias for backward compatibiltiy. + """ + return ResNet.make_stage(*args, **kwargs) + + +@BACKBONE_REGISTRY.register() +def build_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + + # fmt: off + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = { + 18: [2, 2, 2, 2], + 34: [3, 4, 6, 3], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3], + }[depth] + + if depth in [18, 34]: + assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" + assert not any( + deform_on_per_stage + ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" + assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" + assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" + + stages = [] + + for idx, stage_idx in enumerate(range(2, 6)): + # res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), + "in_channels": in_channels, + "out_channels": out_channels, + "norm": norm, + } + # Use BasicBlock for R18 and R34. + if depth in [18, 34]: + stage_kargs["block_class"] = BasicBlock + else: + stage_kargs["bottleneck_channels"] = bottleneck_channels + stage_kargs["stride_in_1x1"] = stride_in_1x1 + stage_kargs["dilation"] = dilation + stage_kargs["num_groups"] = num_groups + if deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = ResNet.make_stage(**stage_kargs) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/swin.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/swin.py new file mode 100644 index 0000000000000000000000000000000000000000..9794139c1aa84bd447f9fd0ac11448535bb858ee --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/swin.py @@ -0,0 +1,695 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Implementation of Swin models from :paper:`swin`. + +This code is adapted from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py with minimal modifications. # noqa +-------------------------------------------------------- +Swin Transformer +Copyright (c) 2021 Microsoft +Licensed under The MIT License [see LICENSE for details] +Written by Ze Liu, Yutong Lin, Yixuan Wei +-------------------------------------------------------- +LICENSE: https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/461e003166a8083d0b620beacd4662a2df306bd6/LICENSE +""" + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from custom_detectron2.modeling.backbone.backbone import Backbone + +_to_2tuple = nn.modules.utils._ntuple(2) + + +class Mlp(nn.Module): + """Multilayer perceptron.""" + + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + """Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. + Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__( + self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """Forward function. + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B_, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1) + ].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1 + ).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """Swin Transformer Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=_to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + if drop_path > 0.0: + from custom_timm.models.layers import DropPath + + self.drop_path = DropPath(drop_path) + else: + self.drop_path = nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop + ) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + mask_matrix: Attention mask for cyclic shift. + """ + B, L, C = x.shape + H, W = self.H, self.W + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size + ) # nW*B, window_size, window_size, C + x_windows = x_windows.view( + -1, self.window_size * self.window_size, C + ) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + """Patch Merging Layer + Args: + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of feature channels + depth (int): Depths of this stage. + num_heads (int): Number of attention head. + window_size (int): Local window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. + Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False, + ): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + + # calculate attention mask for SW-MSA + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 + h_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + w_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size + ) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( + attn_mask == 0, float(0.0) + ) + + for blk in self.blocks: + blk.H, blk.W = H, W + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding + Args: + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + patch_size = _to_2tuple(patch_size) + self.patch_size = patch_size + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(Backbone): + """Swin Transformer backbone. + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted + Windows` - https://arxiv.org/pdf/2103.14030 + Args: + pretrain_img_size (int): Input image size for training the pretrained model, + used in absolute postion embedding. Default 224. + patch_size (int | tuple(int)): Patch size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + depths (tuple[int]): Depths of each Swin Transformer stage. + num_heads (tuple[int]): Number of attention head of each stage. + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): Dropout rate. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0.2. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. + patch_norm (bool): If True, add normalization after patch embedding. Default: True. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + pretrain_img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.2, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + use_checkpoint=False, + ): + super().__init__() + + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, + ) + + # absolute position embedding + if self.ape: + pretrain_img_size = _to_2tuple(pretrain_img_size) + patch_size = _to_2tuple(patch_size) + patches_resolution = [ + pretrain_img_size[0] // patch_size[0], + pretrain_img_size[1] // patch_size[1], + ] + + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) + ) + nn.init.trunc_normal_(self.absolute_pos_embed, std=0.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2**i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + ) + self.layers.append(layer) + + num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)] + self.num_features = num_features + + # add a norm layer for each output + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f"norm{i_layer}" + self.add_module(layer_name, layer) + + self._freeze_stages() + self._out_features = ["p{}".format(i) for i in self.out_indices] + self._out_feature_channels = { + "p{}".format(i): self.embed_dim * 2**i for i in self.out_indices + } + self._out_feature_strides = {"p{}".format(i): 2 ** (i + 2) for i in self.out_indices} + self._size_devisibility = 32 + + self.apply(self._init_weights) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 1 and self.ape: + self.absolute_pos_embed.requires_grad = False + + if self.frozen_stages >= 2: + self.pos_drop.eval() + for i in range(0, self.frozen_stages - 1): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @property + def size_divisibility(self): + return self._size_divisibility + + def forward(self, x): + """Forward function.""" + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate( + self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" + ) + x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C + else: + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + + outs = {} + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f"norm{i}") + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs["p{}".format(i)] = out + + return outs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2b89a4c3fbe079a77fd0cef947cf9ada787fc55d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/utils.py @@ -0,0 +1,186 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = [ + "window_partition", + "window_unpartition", + "add_decomposed_rel_pos", + "get_abs_pos", + "PatchEmbed", +] + + +def window_partition(x, window_size): + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition(windows, window_size, pad_hw, hw): + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size, k_size, rel_pos): + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size): + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +def get_abs_pos(abs_pos, has_cls_token, hw): + """ + Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token + dimension for the original embeddings. + Args: + abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). + has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. + hw (Tuple): size of input image tokens. + + Returns: + Absolute positional embeddings after processing with shape (1, H, W, C) + """ + h, w = hw + if has_cls_token: + abs_pos = abs_pos[:, 1:] + xy_num = abs_pos.shape[1] + size = int(math.sqrt(xy_num)) + assert size * size == xy_num + + if size != h or size != w: + new_abs_pos = F.interpolate( + abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2), + size=(h, w), + mode="bicubic", + align_corners=False, + ) + + return new_abs_pos.permute(0, 2, 3, 1) + else: + return abs_pos.reshape(1, h, w, -1) + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768 + ): + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x): + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/vit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..3505fc095a67f39e17c4bea1a4c52463912c3c13 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/backbone/vit.py @@ -0,0 +1,524 @@ +import logging +import math +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn as nn + +from custom_detectron2.layers import CNNBlockBase, Conv2d, get_norm +from custom_detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous + +from .backbone import Backbone +from .utils import ( + PatchEmbed, + add_decomposed_rel_pos, + get_abs_pos, + window_partition, + window_unpartition, +) + +logger = logging.getLogger(__name__) + + +__all__ = ["ViT", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"] + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=True, + use_rel_pos=False, + rel_pos_zero_init=True, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + if not rel_pos_zero_init: + nn.init.trunc_normal_(self.rel_pos_h, std=0.02) + nn.init.trunc_normal_(self.rel_pos_w, std=0.02) + + def forward(self, x): + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +class ResBottleneckBlock(CNNBlockBase): + """ + The standard bottleneck residual block without the last activation layer. + It contains 3 conv layers with kernels 1x1, 3x3, 1x1. + """ + + def __init__( + self, + in_channels, + out_channels, + bottleneck_channels, + norm="LN", + act_layer=nn.GELU, + ): + """ + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + bottleneck_channels (int): number of output channels for the 3x3 + "bottleneck" conv layers. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. + act_layer (callable): activation for all conv layers. + """ + super().__init__(in_channels, out_channels, 1) + + self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False) + self.norm1 = get_norm(norm, bottleneck_channels) + self.act1 = act_layer() + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + 3, + padding=1, + bias=False, + ) + self.norm2 = get_norm(norm, bottleneck_channels) + self.act2 = act_layer() + + self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False) + self.norm3 = get_norm(norm, out_channels) + + for layer in [self.conv1, self.conv2, self.conv3]: + weight_init.c2_msra_fill(layer) + for layer in [self.norm1, self.norm2]: + layer.weight.data.fill_(1.0) + layer.bias.data.zero_() + # zero init last norm layer. + self.norm3.weight.data.zero_() + self.norm3.bias.data.zero_() + + def forward(self, x): + out = x + for layer in self.children(): + out = layer(out) + + out = x + out + return out + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + use_rel_pos=False, + rel_pos_zero_init=True, + window_size=0, + use_residual_block=False, + input_size=None, + ): + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then not + use window attention. + use_residual_block (bool): If True, use a residual block after the MLP block. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + from custom_timm.models.layers import DropPath, Mlp + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) + + self.window_size = window_size + + self.use_residual_block = use_residual_block + if use_residual_block: + # Use a residual block with bottleneck channel as dim // 2 + self.residual = ResBottleneckBlock( + in_channels=dim, + out_channels=dim, + bottleneck_channels=dim // 2, + norm="LN", + act_layer=act_layer, + ) + + def forward(self, x): + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + if self.use_residual_block: + x = self.residual(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + + return x + + +class ViT(Backbone): + """ + This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`. + "Exploring Plain Vision Transformer Backbones for Object Detection", + https://arxiv.org/abs/2203.16527 + """ + + def __init__( + self, + img_size=1024, + patch_size=16, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + act_layer=nn.GELU, + use_abs_pos=True, + use_rel_pos=False, + rel_pos_zero_init=True, + window_size=0, + window_block_indexes=(), + residual_block_indexes=(), + use_act_checkpoint=False, + pretrain_img_size=224, + pretrain_use_cls_token=True, + out_feature="last_feat", + ): + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + drop_path_rate (float): Stochastic depth rate. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + window_block_indexes (list): Indexes for blocks using window attention. + residual_block_indexes (list): Indexes for blocks using conv propagation. + use_act_checkpoint (bool): If True, use activation checkpointing. + pretrain_img_size (int): input image size for pretraining models. + pretrain_use_cls_token (bool): If True, pretrainig models use class token. + out_feature (str): name of the feature from the last block. + """ + super().__init__() + self.pretrain_use_cls_token = pretrain_use_cls_token + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size) + num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim)) + else: + self.pos_embed = None + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i in window_block_indexes else 0, + use_residual_block=i in residual_block_indexes, + input_size=(img_size // patch_size, img_size // patch_size), + ) + if use_act_checkpoint: + # TODO: use torch.utils.checkpoint + from fairscale.nn.checkpoint import checkpoint_wrapper + + block = checkpoint_wrapper(block) + self.blocks.append(block) + + self._out_feature_channels = {out_feature: embed_dim} + self._out_feature_strides = {out_feature: patch_size} + self._out_features = [out_feature] + + if self.pos_embed is not None: + nn.init.trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + x = self.patch_embed(x) + if self.pos_embed is not None: + x = x + get_abs_pos( + self.pos_embed, self.pretrain_use_cls_token, (x.shape[1], x.shape[2]) + ) + + for blk in self.blocks: + x = blk(x) + + outputs = {self._out_features[0]: x.permute(0, 3, 1, 2)} + return outputs + + +class SimpleFeaturePyramid(Backbone): + """ + This module implements SimpleFeaturePyramid in :paper:`vitdet`. + It creates pyramid features built on top of the input feature map. + """ + + def __init__( + self, + net, + in_feature, + out_channels, + scale_factors, + top_block=None, + norm="LN", + square_pad=0, + ): + """ + Args: + net (Backbone): module representing the subnetwork backbone. + Must be a subclass of :class:`Backbone`. + in_feature (str): names of the input feature maps coming + from the net. + out_channels (int): number of channels in the output feature maps. + scale_factors (list[float]): list of scaling factors to upsample or downsample + the input features for creating pyramid features. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + pyramid output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra pyramid levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + norm (str): the normalization to use. + square_pad (int): If > 0, require input images to be padded to specific square size. + """ + super(SimpleFeaturePyramid, self).__init__() + assert isinstance(net, Backbone) + + self.scale_factors = scale_factors + + input_shapes = net.output_shape() + strides = [int(input_shapes[in_feature].stride / scale) for scale in scale_factors] + _assert_strides_are_log2_contiguous(strides) + + dim = input_shapes[in_feature].channels + self.stages = [] + use_bias = norm == "" + for idx, scale in enumerate(scale_factors): + out_dim = dim + if scale == 4.0: + layers = [ + nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2), + get_norm(norm, dim // 2), + nn.GELU(), + nn.ConvTranspose2d(dim // 2, dim // 4, kernel_size=2, stride=2), + ] + out_dim = dim // 4 + elif scale == 2.0: + layers = [nn.ConvTranspose2d(dim, dim // 2, kernel_size=2, stride=2)] + out_dim = dim // 2 + elif scale == 1.0: + layers = [] + elif scale == 0.5: + layers = [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + raise NotImplementedError(f"scale_factor={scale} is not supported yet.") + + layers.extend( + [ + Conv2d( + out_dim, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + ), + Conv2d( + out_channels, + out_channels, + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + ), + ] + ) + layers = nn.Sequential(*layers) + + stage = int(math.log2(strides[idx])) + self.add_module(f"simfp_{stage}", layers) + self.stages.append(layers) + + self.net = net + self.in_feature = in_feature + self.top_block = top_block + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = strides[-1] + self._square_pad = square_pad + + @property + def padding_constraints(self): + return { + "size_divisiblity": self._size_divisibility, + "square_size": self._square_pad, + } + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + + Returns: + dict[str->Tensor]: + mapping from feature map name to pyramid feature map tensor + in high to low resolution order. Returned feature names follow the FPN + convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + bottom_up_features = self.net(x) + features = bottom_up_features[self.in_feature] + results = [] + + for stage in self.stages: + results.append(stage(features)) + + if self.top_block is not None: + if self.top_block.in_feature in bottom_up_features: + top_block_in_feature = bottom_up_features[self.top_block.in_feature] + else: + top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] + results.extend(self.top_block(top_block_in_feature)) + assert len(self._out_features) == len(results) + return {f: res for f, res in zip(self._out_features, results)} + + +def get_vit_lr_decay_rate(name, lr_decay_rate=1.0, num_layers=12): + """ + Calculate lr decay rate for different ViT blocks. + Args: + name (string): parameter name. + lr_decay_rate (float): base lr decay rate. + num_layers (int): number of ViT blocks. + + Returns: + lr decay rate for the given parameter. + """ + layer_id = num_layers + 1 + if name.startswith("backbone"): + if ".pos_embed" in name or ".patch_embed" in name: + layer_id = 0 + elif ".blocks." in name and ".residual." not in name: + layer_id = int(name[name.find(".blocks.") :].split(".")[2]) + 1 + + return lr_decay_rate ** (num_layers + 1 - layer_id) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/box_regression.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/box_regression.py new file mode 100644 index 0000000000000000000000000000000000000000..944a41b81a022b21e4a5e52ac925558187da428a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/box_regression.py @@ -0,0 +1,369 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Tuple, Union +import torch +from fvcore.nn import giou_loss, smooth_l1_loss +from torch.nn import functional as F + +from custom_detectron2.layers import cat, ciou_loss, diou_loss +from custom_detectron2.structures import Boxes + +# Value for clamping large dw and dh predictions. The heuristic is that we clamp +# such that dw and dh are no larger than what would transform a 16px box into a +# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). +_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16) + + +__all__ = ["Box2BoxTransform", "Box2BoxTransformRotated", "Box2BoxTransformLinear"] + + +@torch.jit.script +class Box2BoxTransform(object): + """ + The box-to-box transform defined in R-CNN. The transformation is parameterized + by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height + by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). + """ + + def __init__( + self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP + ): + """ + Args: + weights (4-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set + such that the deltas have unit variance; now they are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): source boxes, e.g., object proposals + target_boxes (Tensor): target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_widths = src_boxes[:, 2] - src_boxes[:, 0] + src_heights = src_boxes[:, 3] - src_boxes[:, 1] + src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths + src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights + + target_widths = target_boxes[:, 2] - target_boxes[:, 0] + target_heights = target_boxes[:, 3] - target_boxes[:, 1] + target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths + target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights + + wx, wy, ww, wh = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + + deltas = torch.stack((dx, dy, dw, dh), dim=1) + assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. + deltas[i] represents k potentially different class-specific + box transformations for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 4) + """ + deltas = deltas.float() # ensure fp32 for decoding precision + boxes = boxes.to(deltas.dtype) + + widths = boxes[:, 2] - boxes[:, 0] + heights = boxes[:, 3] - boxes[:, 1] + ctr_x = boxes[:, 0] + 0.5 * widths + ctr_y = boxes[:, 1] + 0.5 * heights + + wx, wy, ww, wh = self.weights + dx = deltas[:, 0::4] / wx + dy = deltas[:, 1::4] / wy + dw = deltas[:, 2::4] / ww + dh = deltas[:, 3::4] / wh + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] + pred_w = torch.exp(dw) * widths[:, None] + pred_h = torch.exp(dh) * heights[:, None] + + x1 = pred_ctr_x - 0.5 * pred_w + y1 = pred_ctr_y - 0.5 * pred_h + x2 = pred_ctr_x + 0.5 * pred_w + y2 = pred_ctr_y + 0.5 * pred_h + pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1) + return pred_boxes.reshape(deltas.shape) + + +@torch.jit.script +class Box2BoxTransformRotated(object): + """ + The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized + by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height + by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height), + and rotate a box's angle by da (radians). + Note: angles of deltas are in radians while angles of boxes are in degrees. + """ + + def __init__( + self, + weights: Tuple[float, float, float, float, float], + scale_clamp: float = _DEFAULT_SCALE_CLAMP, + ): + """ + Args: + weights (5-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh, da) deltas. These are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): Nx5 source boxes, e.g., object proposals + target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1) + + target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind( + target_boxes, dim=1 + ) + + wx, wy, ww, wh, wa = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + # Angles of deltas are in radians while angles of boxes are in degrees. + # the conversion to radians serve as a way to normalize the values + da = target_angles - src_angles + da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180) + da *= wa * math.pi / 180.0 + + deltas = torch.stack((dx, dy, dw, dh, da), dim=1) + assert ( + (src_widths > 0).all().item() + ), "Input boxes to Box2BoxTransformRotated are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*5). + deltas[i] represents box transformation for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 5) + """ + assert deltas.shape[1] % 5 == 0 and boxes.shape[1] == 5 + + boxes = boxes.to(deltas.dtype).unsqueeze(2) + + ctr_x = boxes[:, 0] + ctr_y = boxes[:, 1] + widths = boxes[:, 2] + heights = boxes[:, 3] + angles = boxes[:, 4] + + wx, wy, ww, wh, wa = self.weights + + dx = deltas[:, 0::5] / wx + dy = deltas[:, 1::5] / wy + dw = deltas[:, 2::5] / ww + dh = deltas[:, 3::5] / wh + da = deltas[:, 4::5] / wa + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0::5] = dx * widths + ctr_x # x_ctr + pred_boxes[:, 1::5] = dy * heights + ctr_y # y_ctr + pred_boxes[:, 2::5] = torch.exp(dw) * widths # width + pred_boxes[:, 3::5] = torch.exp(dh) * heights # height + + # Following original RRPN implementation, + # angles of deltas are in radians while angles of boxes are in degrees. + pred_angle = da * 180.0 / math.pi + angles + pred_angle = (pred_angle + 180.0) % 360.0 - 180.0 # make it in [-180, 180) + + pred_boxes[:, 4::5] = pred_angle + + return pred_boxes + + +class Box2BoxTransformLinear(object): + """ + The linear box-to-box transform defined in FCOS. The transformation is parameterized + by the distance from the center of (square) src box to 4 edges of the target box. + """ + + def __init__(self, normalize_by_size=True): + """ + Args: + normalize_by_size: normalize deltas by the size of src (anchor) boxes. + """ + self.normalize_by_size = normalize_by_size + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx1, dy1, dx2, dy2) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true. + The center of src must be inside target boxes. + + Args: + src_boxes (Tensor): square source boxes, e.g., anchors + target_boxes (Tensor): target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_ctr_x = 0.5 * (src_boxes[:, 0] + src_boxes[:, 2]) + src_ctr_y = 0.5 * (src_boxes[:, 1] + src_boxes[:, 3]) + + target_l = src_ctr_x - target_boxes[:, 0] + target_t = src_ctr_y - target_boxes[:, 1] + target_r = target_boxes[:, 2] - src_ctr_x + target_b = target_boxes[:, 3] - src_ctr_y + + deltas = torch.stack((target_l, target_t, target_r, target_b), dim=1) + if self.normalize_by_size: + stride_w = src_boxes[:, 2] - src_boxes[:, 0] + stride_h = src_boxes[:, 3] - src_boxes[:, 1] + strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1) + deltas = deltas / strides + + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx1, dy1, dx2, dy2) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. + deltas[i] represents k potentially different class-specific + box transformations for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 4) + """ + # Ensure the output is a valid box. See Sec 2.1 of https://arxiv.org/abs/2006.09214 + deltas = F.relu(deltas) + boxes = boxes.to(deltas.dtype) + + ctr_x = 0.5 * (boxes[:, 0] + boxes[:, 2]) + ctr_y = 0.5 * (boxes[:, 1] + boxes[:, 3]) + if self.normalize_by_size: + stride_w = boxes[:, 2] - boxes[:, 0] + stride_h = boxes[:, 3] - boxes[:, 1] + strides = torch.stack([stride_w, stride_h, stride_w, stride_h], axis=1) + deltas = deltas * strides + + l = deltas[:, 0::4] + t = deltas[:, 1::4] + r = deltas[:, 2::4] + b = deltas[:, 3::4] + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0::4] = ctr_x[:, None] - l # x1 + pred_boxes[:, 1::4] = ctr_y[:, None] - t # y1 + pred_boxes[:, 2::4] = ctr_x[:, None] + r # x2 + pred_boxes[:, 3::4] = ctr_y[:, None] + b # y2 + return pred_boxes + + +def _dense_box_regression_loss( + anchors: List[Union[Boxes, torch.Tensor]], + box2box_transform: Box2BoxTransform, + pred_anchor_deltas: List[torch.Tensor], + gt_boxes: List[torch.Tensor], + fg_mask: torch.Tensor, + box_reg_loss_type="smooth_l1", + smooth_l1_beta=0.0, +): + """ + Compute loss for dense multi-level box regression. + Loss is accumulated over ``fg_mask``. + + Args: + anchors: #lvl anchor boxes, each is (HixWixA, 4) + pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4) + gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A)) + fg_mask: the foreground boolean mask of shape (N, R) to compute loss on + box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou", + "diou", "ciou". + smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to + use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" + """ + if isinstance(anchors[0], Boxes): + anchors = type(anchors[0]).cat(anchors).tensor # (R, 4) + else: + anchors = cat(anchors) + if box_reg_loss_type == "smooth_l1": + gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes] + gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4) + loss_box_reg = smooth_l1_loss( + cat(pred_anchor_deltas, dim=1)[fg_mask], + gt_anchor_deltas[fg_mask], + beta=smooth_l1_beta, + reduction="sum", + ) + elif box_reg_loss_type == "giou": + pred_boxes = [ + box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) + ] + loss_box_reg = giou_loss( + torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" + ) + elif box_reg_loss_type == "diou": + pred_boxes = [ + box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) + ] + loss_box_reg = diou_loss( + torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" + ) + elif box_reg_loss_type == "ciou": + pred_boxes = [ + box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) + ] + loss_box_reg = ciou_loss( + torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" + ) + else: + raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'") + return loss_box_reg diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/matcher.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..5f1c16e3f80e1ad90899df46d7efda448c68cff8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/matcher.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import torch + +from custom_detectron2.layers import nonzero_tuple + + +# TODO: the name is too general +class Matcher(object): + """ + This class assigns to each predicted "element" (e.g., a box) a ground-truth + element. Each predicted element will have exactly zero or one matches; each + ground-truth element may be matched to zero or more predicted elements. + + The matching is determined by the MxN match_quality_matrix, that characterizes + how well each (ground-truth, prediction)-pair match each other. For example, + if the elements are boxes, this matrix may contain box intersection-over-union + overlap values. + + The matcher returns (a) a vector of length N containing the index of the + ground-truth element m in [0, M) that matches to prediction n in [0, N). + (b) a vector of length N containing the labels for each prediction. + """ + + def __init__( + self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False + ): + """ + Args: + thresholds (list): a list of thresholds used to stratify predictions + into levels. + labels (list): a list of values to label predictions belonging at + each level. A label can be one of {-1, 0, 1} signifying + {ignore, negative class, positive class}, respectively. + allow_low_quality_matches (bool): if True, produce additional matches + for predictions with maximum match quality lower than high_threshold. + See set_low_quality_matches_ for more details. + + For example, + thresholds = [0.3, 0.5] + labels = [0, -1, 1] + All predictions with iou < 0.3 will be marked with 0 and + thus will be considered as false positives while training. + All predictions with 0.3 <= iou < 0.5 will be marked with -1 and + thus will be ignored. + All predictions with 0.5 <= iou will be marked with 1 and + thus will be considered as true positives. + """ + # Add -inf and +inf to first and last position in thresholds + thresholds = thresholds[:] + assert thresholds[0] > 0 + thresholds.insert(0, -float("inf")) + thresholds.append(float("inf")) + # Currently torchscript does not support all + generator + assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])]) + assert all([l in [-1, 0, 1] for l in labels]) + assert len(labels) == len(thresholds) - 1 + self.thresholds = thresholds + self.labels = labels + self.allow_low_quality_matches = allow_low_quality_matches + + def __call__(self, match_quality_matrix): + """ + Args: + match_quality_matrix (Tensor[float]): an MxN tensor, containing the + pairwise quality between M ground-truth elements and N predicted + elements. All elements must be >= 0 (due to the us of `torch.nonzero` + for selecting indices in :meth:`set_low_quality_matches_`). + + Returns: + matches (Tensor[int64]): a vector of length N, where matches[i] is a matched + ground-truth index in [0, M) + match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates + whether a prediction is a true or false positive or ignored + """ + assert match_quality_matrix.dim() == 2 + if match_quality_matrix.numel() == 0: + default_matches = match_quality_matrix.new_full( + (match_quality_matrix.size(1),), 0, dtype=torch.int64 + ) + # When no gt boxes exist, we define IOU = 0 and therefore set labels + # to `self.labels[0]`, which usually defaults to background class 0 + # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds + default_match_labels = match_quality_matrix.new_full( + (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 + ) + return default_matches, default_match_labels + + assert torch.all(match_quality_matrix >= 0) + + # match_quality_matrix is M (gt) x N (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = match_quality_matrix.max(dim=0) + + match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) + + for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): + low_high = (matched_vals >= low) & (matched_vals < high) + match_labels[low_high] = l + + if self.allow_low_quality_matches: + self.set_low_quality_matches_(match_labels, match_quality_matrix) + + return matches, match_labels + + def set_low_quality_matches_(self, match_labels, match_quality_matrix): + """ + Produce additional matches for predictions that have only low-quality matches. + Specifically, for each ground-truth G find the set of predictions that have + maximum overlap with it (including ties); for each prediction in that set, if + it is unmatched, then match it to the ground-truth G. + + This function implements the RPN assignment case (i) in Sec. 3.1.2 of + :paper:`Faster R-CNN`. + """ + # For each gt, find the prediction with which it has highest quality + highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) + # Find the highest quality match available, even if it is low, including ties. + # Note that the matches qualities must be positive due to the use of + # `torch.nonzero`. + _, pred_inds_with_highest_quality = nonzero_tuple( + match_quality_matrix == highest_quality_foreach_gt[:, None] + ) + # If an anchor was labeled positive only due to a low-quality match + # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. + # This follows the implementation in Detectron, and is found to have no significant impact. + match_labels[pred_inds_with_highest_quality] = 1 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0668157052ce7b796ef50bc7ee85361e7605b9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +from .build import META_ARCH_REGISTRY, build_model # isort:skip + +from .panoptic_fpn import PanopticFPN + +# import all the meta_arch, so they will be registered +from .rcnn import GeneralizedRCNN, ProposalNetwork +from .dense_detector import DenseDetector +from .retinanet import RetinaNet +from .fcos import FCOS +from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head + + +__all__ = list(globals().keys()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/build.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/build.py new file mode 100644 index 0000000000000000000000000000000000000000..545ebbd1c481e6f1fb6b322770afdcb661126d68 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/build.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from custom_detectron2.utils.logger import _log_api_usage +from custom_detectron2.utils.registry import Registry + +META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip +META_ARCH_REGISTRY.__doc__ = """ +Registry for meta-architectures, i.e. the whole model. + +The registered object will be called with `obj(cfg)` +and expected to return a `nn.Module` object. +""" + + +def build_model(cfg): + """ + Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. + Note that it does not load any weights from ``cfg``. + """ + meta_arch = cfg.MODEL.META_ARCHITECTURE + model = META_ARCH_REGISTRY.get(meta_arch)(cfg) + _log_api_usage("modeling.meta_arch." + meta_arch) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/dense_detector.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/dense_detector.py new file mode 100644 index 0000000000000000000000000000000000000000..148fcd6cf0d37068346b525b54c84af5feb731c2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/dense_detector.py @@ -0,0 +1,294 @@ +import numpy as np +from typing import Dict, List, Optional, Tuple +import torch +from torch import Tensor, nn + +from custom_detectron2.data.detection_utils import convert_image_to_rgb +from custom_detectron2.layers import move_device_like +from custom_detectron2.modeling import Backbone +from custom_detectron2.structures import Boxes, ImageList, Instances +from custom_detectron2.utils.events import get_event_storage + +from ..postprocessing import detector_postprocess + + +def permute_to_N_HWA_K(tensor, K: int): + """ + Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K) + """ + assert tensor.dim() == 4, tensor.shape + N, _, H, W = tensor.shape + tensor = tensor.view(N, -1, K, H, W) + tensor = tensor.permute(0, 3, 4, 1, 2) + tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) + return tensor + + +class DenseDetector(nn.Module): + """ + Base class for dense detector. We define a dense detector as a fully-convolutional model that + makes per-pixel (i.e. dense) predictions. + """ + + def __init__( + self, + backbone: Backbone, + head: nn.Module, + head_in_features: Optional[List[str]] = None, + *, + pixel_mean, + pixel_std, + ): + """ + Args: + backbone: backbone module + head: head module + head_in_features: backbone features to use in head. Default to all backbone features. + pixel_mean (Tuple[float]): + Values to be used for image normalization (BGR order). + To train on images of different number of channels, set different mean & std. + Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] + pixel_std (Tuple[float]): + When using pre-trained models in Detectron1 or any MSRA models, + std has been absorbed into its conv1 weights, so the std needs to be set 1. + Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) + """ + super().__init__() + + self.backbone = backbone + self.head = head + if head_in_features is None: + shapes = self.backbone.output_shape() + self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride) + else: + self.head_in_features = head_in_features + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self): + return self.pixel_mean.device + + def _move_to_current_device(self, x): + return move_device_like(x, self.pixel_mean) + + def forward(self, batched_inputs: List[Dict[str, Tensor]]): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances: Instances + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the + loss. Used during training only. In inference, the standard output format, described + in :doc:`/tutorials/models`. + """ + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + features = [features[f] for f in self.head_in_features] + predictions = self.head(features) + + if self.training: + assert not torch.jit.is_scripting(), "Not supported" + assert "instances" in batched_inputs[0], "Instance annotations are missing in training!" + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + return self.forward_training(images, features, predictions, gt_instances) + else: + results = self.forward_inference(images, features, predictions) + if torch.jit.is_scripting(): + return results + + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + def forward_training(self, images, features, predictions, gt_instances): + raise NotImplementedError() + + def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]): + """ + Normalize, pad and batch the input images. + """ + images = [self._move_to_current_device(x["image"]) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + return images + + def _transpose_dense_predictions( + self, predictions: List[List[Tensor]], dims_per_anchor: List[int] + ) -> List[List[Tensor]]: + """ + Transpose the dense per-level predictions. + + Args: + predictions: a list of outputs, each is a list of per-level + predictions with shape (N, Ai x K, Hi, Wi), where N is the + number of images, Ai is the number of anchors per location on + level i, K is the dimension of predictions per anchor. + dims_per_anchor: the value of K for each predictions. e.g. 4 for + box prediction, #classes for classification prediction. + + Returns: + List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K). + """ + assert len(predictions) == len(dims_per_anchor) + res: List[List[Tensor]] = [] + for pred, dim_per_anchor in zip(predictions, dims_per_anchor): + pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred] + res.append(pred) + return res + + def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9): + """ + Apply EMA update to `self.name` using `value`. + + This is mainly used for loss normalizer. In Detectron1, loss is normalized by number + of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a + large variance and using it lead to lower performance. Therefore we maintain an EMA of + #foreground to stabilize the normalizer. + + Args: + name: name of the normalizer + value: the new value to update + initial_value: the initial value to start with + momentum: momentum of EMA + + Returns: + float: the updated EMA value + """ + if hasattr(self, name): + old = getattr(self, name) + else: + old = initial_value + new = old * momentum + value * (1 - momentum) + setattr(self, name, new) + return new + + def _decode_per_level_predictions( + self, + anchors: Boxes, + pred_scores: Tensor, + pred_deltas: Tensor, + score_thresh: float, + topk_candidates: int, + image_size: Tuple[int, int], + ) -> Instances: + """ + Decode boxes and classification predictions of one featuer level, by + the following steps: + 1. filter the predictions based on score threshold and top K scores. + 2. transform the box regression outputs + 3. return the predicted scores, classes and boxes + + Args: + anchors: Boxes, anchor for this feature level + pred_scores: HxWxA,K + pred_deltas: HxWxA,4 + + Returns: + Instances: with field "scores", "pred_boxes", "pred_classes". + """ + # Apply two filtering to make NMS faster. + # 1. Keep boxes with confidence score higher than threshold + keep_idxs = pred_scores > score_thresh + pred_scores = pred_scores[keep_idxs] + topk_idxs = torch.nonzero(keep_idxs) # Kx2 + + # 2. Keep top k top scoring boxes only + topk_idxs_size = topk_idxs.shape[0] + if isinstance(topk_idxs_size, Tensor): + # It's a tensor in tracing + num_topk = torch.clamp(topk_idxs_size, max=topk_candidates) + else: + num_topk = min(topk_idxs_size, topk_candidates) + pred_scores, idxs = pred_scores.topk(num_topk) + topk_idxs = topk_idxs[idxs] + + anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1) + + pred_boxes = self.box2box_transform.apply_deltas( + pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs] + ) + return Instances( + image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs + ) + + def _decode_multi_level_predictions( + self, + anchors: List[Boxes], + pred_scores: List[Tensor], + pred_deltas: List[Tensor], + score_thresh: float, + topk_candidates: int, + image_size: Tuple[int, int], + ) -> Instances: + """ + Run `_decode_per_level_predictions` for all feature levels and concat the results. + """ + predictions = [ + self._decode_per_level_predictions( + anchors_i, + box_cls_i, + box_reg_i, + self.test_score_thresh, + self.test_topk_candidates, + image_size, + ) + # Iterate over every feature level + for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors) + ] + return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is + + def visualize_training(self, batched_inputs, results): + """ + A function used to visualize ground truth images and final network predictions. + It shows ground truth bounding boxes on the original image and up to 20 + predicted object bounding boxes on the original image. + + Args: + batched_inputs (list): a list that contains input to the model. + results (List[Instances]): a list of #images elements returned by forward_inference(). + """ + from custom_detectron2.utils.visualizer import Visualizer + + assert len(batched_inputs) == len( + results + ), "Cannot visualize inputs and results of different sizes" + storage = get_event_storage() + max_boxes = 20 + + image_index = 0 # only visualize a single image + img = batched_inputs[image_index]["image"] + img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) + v_gt = Visualizer(img, None) + v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes) + anno_img = v_gt.get_image() + processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1]) + predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy() + + v_pred = Visualizer(img, None) + v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes]) + prop_img = v_pred.get_image() + vis_img = np.vstack((anno_img, prop_img)) + vis_img = vis_img.transpose(2, 0, 1) + vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results" + storage.put_image(vis_name, vis_img) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/fcos.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/fcos.py new file mode 100644 index 0000000000000000000000000000000000000000..e3f8603f776e1a0ab405684c4f8380ba78cf2600 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/fcos.py @@ -0,0 +1,328 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from typing import List, Optional, Tuple +import torch +from fvcore.nn import sigmoid_focal_loss_jit +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.layers import ShapeSpec, batched_nms +from custom_detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance +from custom_detectron2.utils.events import get_event_storage + +from ..anchor_generator import DefaultAnchorGenerator +from ..backbone import Backbone +from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss +from .dense_detector import DenseDetector +from .retinanet import RetinaNetHead + +__all__ = ["FCOS"] + +logger = logging.getLogger(__name__) + + +class FCOS(DenseDetector): + """ + Implement FCOS in :paper:`fcos`. + """ + + def __init__( + self, + *, + backbone: Backbone, + head: nn.Module, + head_in_features: Optional[List[str]] = None, + box2box_transform=None, + num_classes, + center_sampling_radius: float = 1.5, + focal_loss_alpha=0.25, + focal_loss_gamma=2.0, + test_score_thresh=0.2, + test_topk_candidates=1000, + test_nms_thresh=0.6, + max_detections_per_image=100, + pixel_mean, + pixel_std, + ): + """ + Args: + center_sampling_radius: radius of the "center" of a groundtruth box, + within which all anchor points are labeled positive. + Other arguments mean the same as in :class:`RetinaNet`. + """ + super().__init__( + backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std + ) + + self.num_classes = num_classes + + # FCOS uses one anchor point per location. + # We represent the anchor point by a box whose size equals the anchor stride. + feature_shapes = backbone.output_shape() + fpn_strides = [feature_shapes[k].stride for k in self.head_in_features] + self.anchor_generator = DefaultAnchorGenerator( + sizes=[[k] for k in fpn_strides], aspect_ratios=[1.0], strides=fpn_strides + ) + + # FCOS parameterizes box regression by a linear transform, + # where predictions are normalized by anchor stride (equal to anchor size). + if box2box_transform is None: + box2box_transform = Box2BoxTransformLinear(normalize_by_size=True) + self.box2box_transform = box2box_transform + + self.center_sampling_radius = float(center_sampling_radius) + + # Loss parameters: + self.focal_loss_alpha = focal_loss_alpha + self.focal_loss_gamma = focal_loss_gamma + + # Inference parameters: + self.test_score_thresh = test_score_thresh + self.test_topk_candidates = test_topk_candidates + self.test_nms_thresh = test_nms_thresh + self.max_detections_per_image = max_detections_per_image + + def forward_training(self, images, features, predictions, gt_instances): + # Transpose the Hi*Wi*A dimension to the middle: + pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions( + predictions, [self.num_classes, 4, 1] + ) + anchors = self.anchor_generator(features) + gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) + return self.losses( + anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness + ) + + @torch.no_grad() + def _match_anchors(self, gt_boxes: Boxes, anchors: List[Boxes]): + """ + Match ground-truth boxes to a set of multi-level anchors. + + Args: + gt_boxes: Ground-truth boxes from instances of an image. + anchors: List of anchors for each feature map (of different scales). + + Returns: + torch.Tensor + A tensor of shape `(M, R)`, given `M` ground-truth boxes and total + `R` anchor points from all feature levels, indicating the quality + of match between m-th box and r-th anchor. Higher value indicates + better match. + """ + # Naming convention: (M = ground-truth boxes, R = anchor points) + # Anchor points are represented as square boxes of size = stride. + num_anchors_per_level = [len(x) for x in anchors] + anchors = Boxes.cat(anchors) # (R, 4) + anchor_centers = anchors.get_centers() # (R, 2) + anchor_sizes = anchors.tensor[:, 2] - anchors.tensor[:, 0] # (R, ) + + lower_bound = anchor_sizes * 4 + lower_bound[: num_anchors_per_level[0]] = 0 + upper_bound = anchor_sizes * 8 + upper_bound[-num_anchors_per_level[-1] :] = float("inf") + + gt_centers = gt_boxes.get_centers() + + # FCOS with center sampling: anchor point must be close enough to + # ground-truth box center. + center_dists = (anchor_centers[None, :, :] - gt_centers[:, None, :]).abs_() + sampling_regions = self.center_sampling_radius * anchor_sizes[None, :] + + match_quality_matrix = center_dists.max(dim=2).values < sampling_regions + + pairwise_dist = pairwise_point_box_distance(anchor_centers, gt_boxes) + pairwise_dist = pairwise_dist.permute(1, 0, 2) # (M, R, 4) + + # The original FCOS anchor matching rule: anchor point must be inside GT. + match_quality_matrix &= pairwise_dist.min(dim=2).values > 0 + + # Multilevel anchor matching in FCOS: each anchor is only responsible + # for certain scale range. + pairwise_dist = pairwise_dist.max(dim=2).values + match_quality_matrix &= (pairwise_dist > lower_bound[None, :]) & ( + pairwise_dist < upper_bound[None, :] + ) + # Match the GT box with minimum area, if there are multiple GT matches. + gt_areas = gt_boxes.area() # (M, ) + + match_quality_matrix = match_quality_matrix.to(torch.float32) + match_quality_matrix *= 1e8 - gt_areas[:, None] + return match_quality_matrix # (M, R) + + @torch.no_grad() + def label_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]): + """ + Same interface as :meth:`RetinaNet.label_anchors`, but implemented with FCOS + anchor matching rule. + + Unlike RetinaNet, there are no ignored anchors. + """ + + gt_labels, matched_gt_boxes = [], [] + + for inst in gt_instances: + if len(inst) > 0: + match_quality_matrix = self._match_anchors(inst.gt_boxes, anchors) + + # Find matched ground-truth box per anchor. Un-matched anchors are + # assigned -1. This is equivalent to using an anchor matcher as used + # in R-CNN/RetinaNet: `Matcher(thresholds=[1e-5], labels=[0, 1])` + match_quality, matched_idxs = match_quality_matrix.max(dim=0) + matched_idxs[match_quality < 1e-5] = -1 + + matched_gt_boxes_i = inst.gt_boxes.tensor[matched_idxs.clip(min=0)] + gt_labels_i = inst.gt_classes[matched_idxs.clip(min=0)] + + # Anchors with matched_idxs = -1 are labeled background. + gt_labels_i[matched_idxs < 0] = self.num_classes + else: + matched_gt_boxes_i = torch.zeros_like(Boxes.cat(anchors).tensor) + gt_labels_i = torch.full( + (len(matched_gt_boxes_i),), + fill_value=self.num_classes, + dtype=torch.long, + device=matched_gt_boxes_i.device, + ) + + gt_labels.append(gt_labels_i) + matched_gt_boxes.append(matched_gt_boxes_i) + + return gt_labels, matched_gt_boxes + + def losses( + self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness + ): + """ + This method is almost identical to :meth:`RetinaNet.losses`, with an extra + "loss_centerness" in the returned dict. + """ + num_images = len(gt_labels) + gt_labels = torch.stack(gt_labels) # (M, R) + + pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) + num_pos_anchors = pos_mask.sum().item() + get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) + normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 300) + + # classification and regression loss + gt_labels_target = F.one_hot(gt_labels, num_classes=self.num_classes + 1)[ + :, :, :-1 + ] # no loss for the last (background) class + loss_cls = sigmoid_focal_loss_jit( + torch.cat(pred_logits, dim=1), + gt_labels_target.to(pred_logits[0].dtype), + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) + + loss_box_reg = _dense_box_regression_loss( + anchors, + self.box2box_transform, + pred_anchor_deltas, + gt_boxes, + pos_mask, + box_reg_loss_type="giou", + ) + + ctrness_targets = self.compute_ctrness_targets(anchors, gt_boxes) # (M, R) + pred_centerness = torch.cat(pred_centerness, dim=1).squeeze(dim=2) # (M, R) + ctrness_loss = F.binary_cross_entropy_with_logits( + pred_centerness[pos_mask], ctrness_targets[pos_mask], reduction="sum" + ) + return { + "loss_fcos_cls": loss_cls / normalizer, + "loss_fcos_loc": loss_box_reg / normalizer, + "loss_fcos_ctr": ctrness_loss / normalizer, + } + + def compute_ctrness_targets(self, anchors: List[Boxes], gt_boxes: List[torch.Tensor]): + anchors = Boxes.cat(anchors).tensor # Rx4 + reg_targets = [self.box2box_transform.get_deltas(anchors, m) for m in gt_boxes] + reg_targets = torch.stack(reg_targets, dim=0) # NxRx4 + if len(reg_targets) == 0: + return reg_targets.new_zeros(len(reg_targets)) + left_right = reg_targets[:, :, [0, 2]] + top_bottom = reg_targets[:, :, [1, 3]] + ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( + top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0] + ) + return torch.sqrt(ctrness) + + def forward_inference( + self, + images: ImageList, + features: List[torch.Tensor], + predictions: List[List[torch.Tensor]], + ): + pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions( + predictions, [self.num_classes, 4, 1] + ) + anchors = self.anchor_generator(features) + + results: List[Instances] = [] + for img_idx, image_size in enumerate(images.image_sizes): + scores_per_image = [ + # Multiply and sqrt centerness & classification scores + # (See eqn. 4 in https://arxiv.org/abs/2006.09214) + torch.sqrt(x[img_idx].sigmoid_() * y[img_idx].sigmoid_()) + for x, y in zip(pred_logits, pred_centerness) + ] + deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] + results_per_image = self.inference_single_image( + anchors, scores_per_image, deltas_per_image, image_size + ) + results.append(results_per_image) + return results + + def inference_single_image( + self, + anchors: List[Boxes], + box_cls: List[torch.Tensor], + box_delta: List[torch.Tensor], + image_size: Tuple[int, int], + ): + """ + Identical to :meth:`RetinaNet.inference_single_image. + """ + pred = self._decode_multi_level_predictions( + anchors, + box_cls, + box_delta, + self.test_score_thresh, + self.test_topk_candidates, + image_size, + ) + keep = batched_nms( + pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh + ) + return pred[keep[: self.max_detections_per_image]] + + +class FCOSHead(RetinaNetHead): + """ + The head used in :paper:`fcos`. It adds an additional centerness + prediction branch on top of :class:`RetinaNetHead`. + """ + + def __init__(self, *, input_shape: List[ShapeSpec], conv_dims: List[int], **kwargs): + super().__init__(input_shape=input_shape, conv_dims=conv_dims, num_anchors=1, **kwargs) + # Unlike original FCOS, we do not add an additional learnable scale layer + # because it's found to have no benefits after normalizing regression targets by stride. + self._num_features = len(input_shape) + self.ctrness = nn.Conv2d(conv_dims[-1], 1, kernel_size=3, stride=1, padding=1) + torch.nn.init.normal_(self.ctrness.weight, std=0.01) + torch.nn.init.constant_(self.ctrness.bias, 0) + + def forward(self, features): + assert len(features) == self._num_features + logits = [] + bbox_reg = [] + ctrness = [] + for feature in features: + logits.append(self.cls_score(self.cls_subnet(feature))) + bbox_feature = self.bbox_subnet(feature) + bbox_reg.append(self.bbox_pred(bbox_feature)) + ctrness.append(self.ctrness(bbox_feature)) + return logits, bbox_reg, ctrness diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/panoptic_fpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/panoptic_fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..e3784b62574d6cff63f434c681ad4aa1ec20d0f3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/panoptic_fpn.py @@ -0,0 +1,269 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from typing import Dict, List +import torch +from torch import nn + +from custom_detectron2.config import configurable +from custom_detectron2.structures import ImageList + +from ..postprocessing import detector_postprocess, sem_seg_postprocess +from .build import META_ARCH_REGISTRY +from .rcnn import GeneralizedRCNN +from .semantic_seg import build_sem_seg_head + +__all__ = ["PanopticFPN"] + + +@META_ARCH_REGISTRY.register() +class PanopticFPN(GeneralizedRCNN): + """ + Implement the paper :paper:`PanopticFPN`. + """ + + @configurable + def __init__( + self, + *, + sem_seg_head: nn.Module, + combine_overlap_thresh: float = 0.5, + combine_stuff_area_thresh: float = 4096, + combine_instances_score_thresh: float = 0.5, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + sem_seg_head: a module for the semantic segmentation head. + combine_overlap_thresh: combine masks into one instances if + they have enough overlap + combine_stuff_area_thresh: ignore stuff areas smaller than this threshold + combine_instances_score_thresh: ignore instances whose score is + smaller than this threshold + + Other arguments are the same as :class:`GeneralizedRCNN`. + """ + super().__init__(**kwargs) + self.sem_seg_head = sem_seg_head + # options when combining instance & semantic outputs + self.combine_overlap_thresh = combine_overlap_thresh + self.combine_stuff_area_thresh = combine_stuff_area_thresh + self.combine_instances_score_thresh = combine_instances_score_thresh + + @classmethod + def from_config(cls, cfg): + ret = super().from_config(cfg) + ret.update( + { + "combine_overlap_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH, + "combine_stuff_area_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT, + "combine_instances_score_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa + } + ) + ret["sem_seg_head"] = build_sem_seg_head(cfg, ret["backbone"].output_shape()) + logger = logging.getLogger(__name__) + if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED: + logger.warning( + "PANOPTIC_FPN.COMBINED.ENABLED is no longer used. " + " model.inference(do_postprocess=) should be used to toggle postprocessing." + ) + if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0: + w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT + logger.warning( + "PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head." + ) + + def update_weight(x): + if isinstance(x, dict): + return {k: v * w for k, v in x.items()} + else: + return x * w + + roi_heads = ret["roi_heads"] + roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight) + roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight) + return ret + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + + * "image": Tensor, image in (C, H, W) format. + * "instances": Instances + * "sem_seg": semantic segmentation ground truth. + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + each dict has the results for one image. The dict contains the following keys: + + * "instances": see :meth:`GeneralizedRCNN.forward` for its format. + * "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. + * "panoptic_seg": See the return value of + :func:`combine_semantic_and_instance_outputs` for its format. + """ + if not self.training: + return self.inference(batched_inputs) + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + + assert "sem_seg" in batched_inputs[0] + gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] + gt_sem_seg = ImageList.from_tensors( + gt_sem_seg, + self.backbone.size_divisibility, + self.sem_seg_head.ignore_value, + self.backbone.padding_constraints, + ).tensor + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) + + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + detector_results, detector_losses = self.roi_heads( + images, features, proposals, gt_instances + ) + + losses = sem_seg_losses + losses.update(proposal_losses) + losses.update(detector_losses) + return losses + + def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + When do_postprocess=True, see docs in :meth:`forward`. + Otherwise, returns a (list[Instances], list[Tensor]) that contains + the raw detector outputs, and raw semantic segmentation outputs. + """ + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None) + proposals, _ = self.proposal_generator(images, features, None) + detector_results, _ = self.roi_heads(images, features, proposals, None) + + if do_postprocess: + processed_results = [] + for sem_seg_result, detector_result, input_per_image, image_size in zip( + sem_seg_results, detector_results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) + detector_r = detector_postprocess(detector_result, height, width) + + processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) + + panoptic_r = combine_semantic_and_instance_outputs( + detector_r, + sem_seg_r.argmax(dim=0), + self.combine_overlap_thresh, + self.combine_stuff_area_thresh, + self.combine_instances_score_thresh, + ) + processed_results[-1]["panoptic_seg"] = panoptic_r + return processed_results + else: + return detector_results, sem_seg_results + + +def combine_semantic_and_instance_outputs( + instance_results, + semantic_results, + overlap_threshold, + stuff_area_thresh, + instances_score_thresh, +): + """ + Implement a simple combining logic following + "combine_semantic_and_instance_predictions.py" in panopticapi + to produce panoptic segmentation outputs. + + Args: + instance_results: output of :func:`detector_postprocess`. + semantic_results: an (H, W) tensor, each element is the contiguous semantic + category id + + Returns: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + """ + panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) + + # sort instance outputs by scores + sorted_inds = torch.argsort(-instance_results.scores) + + current_segment_id = 0 + segments_info = [] + + instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) + + # Add instances one-by-one, check for overlaps with existing ones + for inst_id in sorted_inds: + score = instance_results.scores[inst_id].item() + if score < instances_score_thresh: + break + mask = instance_masks[inst_id] # H,W + mask_area = mask.sum().item() + + if mask_area == 0: + continue + + intersect = (mask > 0) & (panoptic_seg > 0) + intersect_area = intersect.sum().item() + + if intersect_area * 1.0 / mask_area > overlap_threshold: + continue + + if intersect_area > 0: + mask = mask & (panoptic_seg == 0) + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": True, + "score": score, + "category_id": instance_results.pred_classes[inst_id].item(), + "instance_id": inst_id.item(), + } + ) + + # Add semantic results to remaining empty areas + semantic_labels = torch.unique(semantic_results).cpu().tolist() + for semantic_label in semantic_labels: + if semantic_label == 0: # 0 is a special "thing" class + continue + mask = (semantic_results == semantic_label) & (panoptic_seg == 0) + mask_area = mask.sum().item() + if mask_area < stuff_area_thresh: + continue + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": False, + "category_id": semantic_label, + "area": mask_area, + } + ) + + return panoptic_seg, segments_info diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/rcnn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..31d54c8a0f463be6266ef3b3248c7cde53b85bba --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/rcnn.py @@ -0,0 +1,341 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from typing import Dict, List, Optional, Tuple +import torch +from torch import nn + +from custom_detectron2.config import configurable +from custom_detectron2.data.detection_utils import convert_image_to_rgb +from custom_detectron2.layers import move_device_like +from custom_detectron2.structures import ImageList, Instances +from custom_detectron2.utils.events import get_event_storage +from custom_detectron2.utils.logger import log_first_n + +from ..backbone import Backbone, build_backbone +from ..postprocessing import detector_postprocess +from ..proposal_generator import build_proposal_generator +from ..roi_heads import build_roi_heads +from .build import META_ARCH_REGISTRY + +__all__ = ["GeneralizedRCNN", "ProposalNetwork"] + + +@META_ARCH_REGISTRY.register() +class GeneralizedRCNN(nn.Module): + """ + Generalized R-CNN. Any models that contains the following three components: + 1. Per-image feature extraction (aka backbone) + 2. Region proposal generation + 3. Per-region feature extraction and prediction + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + proposal_generator: nn.Module, + roi_heads: nn.Module, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + input_format: Optional[str] = None, + vis_period: int = 0, + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + proposal_generator: a module that generates proposals using backbone features + roi_heads: a ROI head that performs per-region computation + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + input_format: describe the meaning of channels of input. Needed by visualization + vis_period: the period to run visualization. Set to 0 to disable. + """ + super().__init__() + self.backbone = backbone + self.proposal_generator = proposal_generator + self.roi_heads = roi_heads + + self.input_format = input_format + self.vis_period = vis_period + if vis_period > 0: + assert input_format is not None, "input_format is required for visualization!" + + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + assert ( + self.pixel_mean.shape == self.pixel_std.shape + ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + return { + "backbone": backbone, + "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), + "roi_heads": build_roi_heads(cfg, backbone.output_shape()), + "input_format": cfg.INPUT.FORMAT, + "vis_period": cfg.VIS_PERIOD, + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + } + + @property + def device(self): + return self.pixel_mean.device + + def _move_to_current_device(self, x): + return move_device_like(x, self.pixel_mean) + + def visualize_training(self, batched_inputs, proposals): + """ + A function used to visualize images and proposals. It shows ground truth + bounding boxes on the original image and up to 20 top-scoring predicted + object proposals on the original image. Users can implement different + visualization functions for different models. + + Args: + batched_inputs (list): a list that contains input to the model. + proposals (list): a list that contains predicted proposals. Both + batched_inputs and proposals should have the same length. + """ + from custom_detectron2.utils.visualizer import Visualizer + + storage = get_event_storage() + max_vis_prop = 20 + + for input, prop in zip(batched_inputs, proposals): + img = input["image"] + img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) + v_gt = Visualizer(img, None) + v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes) + anno_img = v_gt.get_image() + box_size = min(len(prop.proposal_boxes), max_vis_prop) + v_pred = Visualizer(img, None) + v_pred = v_pred.overlay_instances( + boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy() + ) + prop_img = v_pred.get_image() + vis_img = np.concatenate((anno_img, prop_img), axis=1) + vis_img = vis_img.transpose(2, 0, 1) + vis_name = "Left: GT bounding boxes; Right: Predicted proposals" + storage.put_image(vis_name, vis_img) + break # only visualize one image in a batch + + def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances (optional): groundtruth :class:`Instances` + * proposals (optional): :class:`Instances`, precomputed proposals. + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "instances" whose value is a :class:`Instances`. + The :class:`Instances` object has the following keys: + "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" + """ + if not self.training: + return self.inference(batched_inputs) + + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + + if self.proposal_generator is not None: + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + proposal_losses = {} + + _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) + if self.vis_period > 0: + storage = get_event_storage() + if storage.iter % self.vis_period == 0: + self.visualize_training(batched_inputs, proposals) + + losses = {} + losses.update(detector_losses) + losses.update(proposal_losses) + return losses + + def inference( + self, + batched_inputs: List[Dict[str, torch.Tensor]], + detected_instances: Optional[List[Instances]] = None, + do_postprocess: bool = True, + ): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + detected_instances (None or list[Instances]): if not None, it + contains an `Instances` object per image. The `Instances` + object contains "pred_boxes" and "pred_classes" which are + known boxes in the image. + The inference will then skip the detection of bounding boxes, + and only predict other per-ROI outputs. + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + When do_postprocess=True, same as in :meth:`forward`. + Otherwise, a list[Instances] containing raw network outputs. + """ + assert not self.training + + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + + if detected_instances is None: + if self.proposal_generator is not None: + proposals, _ = self.proposal_generator(images, features, None) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + + results, _ = self.roi_heads(images, features, proposals, None) + else: + detected_instances = [x.to(self.device) for x in detected_instances] + results = self.roi_heads.forward_with_given_boxes(features, detected_instances) + + if do_postprocess: + assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess." + return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes) + return results + + def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]): + """ + Normalize, pad and batch the input images. + """ + images = [self._move_to_current_device(x["image"]) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + return images + + @staticmethod + def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]], image_sizes): + """ + Rescale the output instances to the target size. + """ + # note: private function; subject to changes + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + instances, batched_inputs, image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + +@META_ARCH_REGISTRY.register() +class ProposalNetwork(nn.Module): + """ + A meta architecture that only predicts object proposals. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + proposal_generator: nn.Module, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + proposal_generator: a module that generates proposals using backbone features + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + """ + super().__init__() + self.backbone = backbone + self.proposal_generator = proposal_generator + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + return { + "backbone": backbone, + "proposal_generator": build_proposal_generator(cfg, backbone.output_shape()), + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + } + + @property + def device(self): + return self.pixel_mean.device + + def _move_to_current_device(self, x): + return move_device_like(x, self.pixel_mean) + + def forward(self, batched_inputs): + """ + Args: + Same as in :class:`GeneralizedRCNN.forward` + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "proposals" whose value is a + :class:`Instances` with keys "proposal_boxes" and "objectness_logits". + """ + images = [self._move_to_current_device(x["image"]) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + features = self.backbone(images.tensor) + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + # In training, the proposals are not useful at all but we generate them anyway. + # This makes RPN-only models about 5% slower. + if self.training: + return proposal_losses + + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + proposals, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"proposals": r}) + return processed_results diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/retinanet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/retinanet.py new file mode 100644 index 0000000000000000000000000000000000000000..cdfca1fface9d53b0f22572699bdb6625978722e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/retinanet.py @@ -0,0 +1,439 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +from typing import List, Tuple +import torch +from fvcore.nn import sigmoid_focal_loss_jit +from torch import Tensor, nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm +from custom_detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from custom_detectron2.utils.events import get_event_storage + +from ..anchor_generator import build_anchor_generator +from ..backbone import Backbone, build_backbone +from ..box_regression import Box2BoxTransform, _dense_box_regression_loss +from ..matcher import Matcher +from .build import META_ARCH_REGISTRY +from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa + +__all__ = ["RetinaNet"] + + +logger = logging.getLogger(__name__) + + +@META_ARCH_REGISTRY.register() +class RetinaNet(DenseDetector): + """ + Implement RetinaNet in :paper:`RetinaNet`. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + head: nn.Module, + head_in_features, + anchor_generator, + box2box_transform, + anchor_matcher, + num_classes, + focal_loss_alpha=0.25, + focal_loss_gamma=2.0, + smooth_l1_beta=0.0, + box_reg_loss_type="smooth_l1", + test_score_thresh=0.05, + test_topk_candidates=1000, + test_nms_thresh=0.5, + max_detections_per_image=100, + pixel_mean, + pixel_std, + vis_period=0, + input_format="BGR", + ): + """ + NOTE: this interface is experimental. + + Args: + backbone: a backbone module, must follow detectron2's backbone interface + head (nn.Module): a module that predicts logits and regression deltas + for each level from a list of per-level features + head_in_features (Tuple[str]): Names of the input feature maps to be used in head + anchor_generator (nn.Module): a module that creates anchors from a + list of features. Usually an instance of :class:`AnchorGenerator` + box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to + instance boxes + anchor_matcher (Matcher): label the anchors by matching them with ground truth. + num_classes (int): number of classes. Used to label background proposals. + + # Loss parameters: + focal_loss_alpha (float): focal_loss_alpha + focal_loss_gamma (float): focal_loss_gamma + smooth_l1_beta (float): smooth_l1_beta + box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" + + # Inference parameters: + test_score_thresh (float): Inference cls score threshold, only anchors with + score > INFERENCE_TH are considered for inference (to improve speed) + test_topk_candidates (int): Select topk candidates before NMS + test_nms_thresh (float): Overlap threshold used for non-maximum suppression + (suppress boxes with IoU >= this threshold) + max_detections_per_image (int): + Maximum number of detections to return per image during inference + (100 is based on the limit established for the COCO dataset). + + pixel_mean, pixel_std: see :class:`DenseDetector`. + """ + super().__init__( + backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std + ) + self.num_classes = num_classes + + # Anchors + self.anchor_generator = anchor_generator + self.box2box_transform = box2box_transform + self.anchor_matcher = anchor_matcher + + # Loss parameters: + self.focal_loss_alpha = focal_loss_alpha + self.focal_loss_gamma = focal_loss_gamma + self.smooth_l1_beta = smooth_l1_beta + self.box_reg_loss_type = box_reg_loss_type + # Inference parameters: + self.test_score_thresh = test_score_thresh + self.test_topk_candidates = test_topk_candidates + self.test_nms_thresh = test_nms_thresh + self.max_detections_per_image = max_detections_per_image + # Vis parameters + self.vis_period = vis_period + self.input_format = input_format + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + backbone_shape = backbone.output_shape() + feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] + head = RetinaNetHead(cfg, feature_shapes) + anchor_generator = build_anchor_generator(cfg, feature_shapes) + return { + "backbone": backbone, + "head": head, + "anchor_generator": anchor_generator, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), + "anchor_matcher": Matcher( + cfg.MODEL.RETINANET.IOU_THRESHOLDS, + cfg.MODEL.RETINANET.IOU_LABELS, + allow_low_quality_matches=True, + ), + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, + "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, + # Loss parameters: + "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, + "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, + "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, + "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, + # Inference parameters: + "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, + "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, + "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, + "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, + # Vis parameters + "vis_period": cfg.VIS_PERIOD, + "input_format": cfg.INPUT.FORMAT, + } + + def forward_training(self, images, features, predictions, gt_instances): + # Transpose the Hi*Wi*A dimension to the middle: + pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( + predictions, [self.num_classes, 4] + ) + anchors = self.anchor_generator(features) + gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) + return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) + + def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): + """ + Args: + anchors (list[Boxes]): a list of #feature level Boxes + gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. + Their shapes are (N, R) and (N, R, 4), respectively, where R is + the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) + pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the + list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). + Where K is the number of classes used in `pred_logits`. + + Returns: + dict[str, Tensor]: + mapping from a named loss to a scalar tensor storing the loss. + Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" + """ + num_images = len(gt_labels) + gt_labels = torch.stack(gt_labels) # (N, R) + + valid_mask = gt_labels >= 0 + pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) + num_pos_anchors = pos_mask.sum().item() + get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) + normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) + + # classification and regression loss + gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ + :, :-1 + ] # no loss for the last (background) class + loss_cls = sigmoid_focal_loss_jit( + cat(pred_logits, dim=1)[valid_mask], + gt_labels_target.to(pred_logits[0].dtype), + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) + + loss_box_reg = _dense_box_regression_loss( + anchors, + self.box2box_transform, + pred_anchor_deltas, + gt_boxes, + pos_mask, + box_reg_loss_type=self.box_reg_loss_type, + smooth_l1_beta=self.smooth_l1_beta, + ) + + return { + "loss_cls": loss_cls / normalizer, + "loss_box_reg": loss_box_reg / normalizer, + } + + @torch.no_grad() + def label_anchors(self, anchors, gt_instances): + """ + Args: + anchors (list[Boxes]): A list of #feature level Boxes. + The Boxes contains anchors of this image on the specific feature level. + gt_instances (list[Instances]): a list of N `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. + + Returns: + list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is + the total number of anchors across all feature maps (sum(Hi * Wi * A)). + Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. + + list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors + across feature maps. The values are the matched gt boxes for each anchor. + Values are undefined for those anchors not labeled as foreground. + """ + anchors = Boxes.cat(anchors) # Rx4 + + gt_labels = [] + matched_gt_boxes = [] + for gt_per_image in gt_instances: + match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) + matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) + del match_quality_matrix + + if len(gt_per_image) > 0: + matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] + + gt_labels_i = gt_per_image.gt_classes[matched_idxs] + # Anchors with label 0 are treated as background. + gt_labels_i[anchor_labels == 0] = self.num_classes + # Anchors with label -1 are ignored. + gt_labels_i[anchor_labels == -1] = -1 + else: + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes + + gt_labels.append(gt_labels_i) + matched_gt_boxes.append(matched_gt_boxes_i) + + return gt_labels, matched_gt_boxes + + def forward_inference( + self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] + ): + pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( + predictions, [self.num_classes, 4] + ) + anchors = self.anchor_generator(features) + + results: List[Instances] = [] + for img_idx, image_size in enumerate(images.image_sizes): + scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] + deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] + results_per_image = self.inference_single_image( + anchors, scores_per_image, deltas_per_image, image_size + ) + results.append(results_per_image) + return results + + def inference_single_image( + self, + anchors: List[Boxes], + box_cls: List[Tensor], + box_delta: List[Tensor], + image_size: Tuple[int, int], + ): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Arguments: + anchors (list[Boxes]): list of #feature levels. Each entry contains + a Boxes object, which contains all the anchors in that feature level. + box_cls (list[Tensor]): list of #feature levels. Each entry contains + tensor of size (H x W x A, K) + box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. + image_size (tuple(H, W)): a tuple of the image height and width. + + Returns: + Same as `inference`, but for only one image. + """ + pred = self._decode_multi_level_predictions( + anchors, + box_cls, + box_delta, + self.test_score_thresh, + self.test_topk_candidates, + image_size, + ) + keep = batched_nms( # per-class NMS + pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh + ) + return pred[keep[: self.max_detections_per_image]] + + +class RetinaNetHead(nn.Module): + """ + The head used in RetinaNet for object classification and box regression. + It has two subnets for the two tasks, with a common structure but separate parameters. + """ + + @configurable + def __init__( + self, + *, + input_shape: List[ShapeSpec], + num_classes, + num_anchors, + conv_dims: List[int], + norm="", + prior_prob=0.01, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (List[ShapeSpec]): input shape + num_classes (int): number of classes. Used to label background proposals. + num_anchors (int): number of generated anchors + conv_dims (List[int]): dimensions for each convolution layer + norm (str or callable): + Normalization for conv layers except for the two output layers. + See :func:`detectron2.layers.get_norm` for supported types. + prior_prob (float): Prior weight for computing bias + """ + super().__init__() + + self._num_features = len(input_shape) + if norm == "BN" or norm == "SyncBN": + logger.info( + f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." + ) + bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm + + def norm(c): + return CycleBatchNormList( + length=self._num_features, bn_class=bn_class, num_features=c + ) + + else: + norm_name = str(type(get_norm(norm, 32))) + if "BN" in norm_name: + logger.warning( + f"Shared BatchNorm (type={norm_name}) may not work well in RetinaNetHead." + ) + + cls_subnet = [] + bbox_subnet = [] + for in_channels, out_channels in zip( + [input_shape[0].channels] + list(conv_dims), conv_dims + ): + cls_subnet.append( + nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + ) + if norm: + cls_subnet.append(get_norm(norm, out_channels)) + cls_subnet.append(nn.ReLU()) + bbox_subnet.append( + nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + ) + if norm: + bbox_subnet.append(get_norm(norm, out_channels)) + bbox_subnet.append(nn.ReLU()) + + self.cls_subnet = nn.Sequential(*cls_subnet) + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.cls_score = nn.Conv2d( + conv_dims[-1], num_anchors * num_classes, kernel_size=3, stride=1, padding=1 + ) + self.bbox_pred = nn.Conv2d( + conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1 + ) + + # Initialization + for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]: + for layer in modules.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + # Use prior in model initialization to improve stability + bias_value = -(math.log((1 - prior_prob) / prior_prob)) + torch.nn.init.constant_(self.cls_score.bias, bias_value) + + @classmethod + def from_config(cls, cfg, input_shape: List[ShapeSpec]): + num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors + assert ( + len(set(num_anchors)) == 1 + ), "Using different number of anchors between levels is not currently supported!" + num_anchors = num_anchors[0] + + return { + "input_shape": input_shape, + "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, + "conv_dims": [input_shape[0].channels] * cfg.MODEL.RETINANET.NUM_CONVS, + "prior_prob": cfg.MODEL.RETINANET.PRIOR_PROB, + "norm": cfg.MODEL.RETINANET.NORM, + "num_anchors": num_anchors, + } + + def forward(self, features: List[Tensor]): + """ + Arguments: + features (list[Tensor]): FPN feature map tensors in high to low resolution. + Each tensor in the list correspond to different feature levels. + + Returns: + logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). + The tensor predicts the classification probability + at each spatial position for each of the A anchors and K object + classes. + bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). + The tensor predicts 4-vector (dx,dy,dw,dh) box + regression values for every anchor. These values are the + relative offset between the anchor and the ground truth box. + """ + assert len(features) == self._num_features + logits = [] + bbox_reg = [] + for feature in features: + logits.append(self.cls_score(self.cls_subnet(feature))) + bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature))) + return logits, bbox_reg diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/semantic_seg.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/semantic_seg.py new file mode 100644 index 0000000000000000000000000000000000000000..e723b854f99da18850da0d7ee7914bd8a7bcd80d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/meta_arch/semantic_seg.py @@ -0,0 +1,267 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Callable, Dict, Optional, Tuple, Union +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ShapeSpec, get_norm +from custom_detectron2.structures import ImageList +from custom_detectron2.utils.registry import Registry + +from ..backbone import Backbone, build_backbone +from ..postprocessing import sem_seg_postprocess +from .build import META_ARCH_REGISTRY + +__all__ = [ + "SemanticSegmentor", + "SEM_SEG_HEADS_REGISTRY", + "SemSegFPNHead", + "build_sem_seg_head", +] + + +SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") +SEM_SEG_HEADS_REGISTRY.__doc__ = """ +Registry for semantic segmentation heads, which make semantic segmentation predictions +from feature maps. +""" + + +@META_ARCH_REGISTRY.register() +class SemanticSegmentor(nn.Module): + """ + Main class for semantic segmentation architectures. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + sem_seg_head: nn.Module, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + sem_seg_head: a module that predicts semantic segmentation from backbone features + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + """ + super().__init__() + self.backbone = backbone + self.sem_seg_head = sem_seg_head + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) + return { + "backbone": backbone, + "sem_seg_head": sem_seg_head, + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + } + + @property + def device(self): + return self.pixel_mean.device + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + + * "image": Tensor, image in (C, H, W) format. + * "sem_seg": semantic segmentation ground truth + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model (may be different + from input resolution), used in inference. + + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "sem_seg" whose value is a + Tensor that represents the + per-pixel segmentation prediced by the head. + The prediction has shape KxHxW that represents the logits of + each class for each pixel. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors( + images, + self.backbone.size_divisibility, + padding_constraints=self.backbone.padding_constraints, + ) + + features = self.backbone(images.tensor) + + if "sem_seg" in batched_inputs[0]: + targets = [x["sem_seg"].to(self.device) for x in batched_inputs] + targets = ImageList.from_tensors( + targets, + self.backbone.size_divisibility, + self.sem_seg_head.ignore_value, + self.backbone.padding_constraints, + ).tensor + else: + targets = None + results, losses = self.sem_seg_head(features, targets) + + if self.training: + return losses + + processed_results = [] + for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = sem_seg_postprocess(result, image_size, height, width) + processed_results.append({"sem_seg": r}) + return processed_results + + +def build_sem_seg_head(cfg, input_shape): + """ + Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. + """ + name = cfg.MODEL.SEM_SEG_HEAD.NAME + return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +@SEM_SEG_HEADS_REGISTRY.register() +class SemSegFPNHead(nn.Module): + """ + A semantic segmentation head described in :paper:`PanopticFPN`. + It takes a list of FPN features as input, and applies a sequence of + 3x3 convs and upsampling to scale all of them to the stride defined by + ``common_stride``. Then these features are added and used to make final + predictions by another 1x1 conv layer. + """ + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + num_classes: int, + conv_dims: int, + common_stride: int, + loss_weight: float = 1.0, + norm: Optional[Union[str, Callable]] = None, + ignore_value: int = -1, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape: shapes (channels and stride) of the input features + num_classes: number of classes to predict + conv_dims: number of output channels for the intermediate conv layers. + common_stride: the common stride that all features will be upscaled to + loss_weight: loss weight + norm (str or callable): normalization for all conv layers + ignore_value: category id to be ignored during training. + """ + super().__init__() + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + if not len(input_shape): + raise ValueError("SemSegFPNHead(input_shape=) cannot be empty!") + self.in_features = [k for k, v in input_shape] + feature_strides = [v.stride for k, v in input_shape] + feature_channels = [v.channels for k, v in input_shape] + + self.ignore_value = ignore_value + self.common_stride = common_stride + self.loss_weight = loss_weight + + self.scale_heads = [] + for in_feature, stride, channels in zip( + self.in_features, feature_strides, feature_channels + ): + head_ops = [] + head_length = max(1, int(np.log2(stride) - np.log2(self.common_stride))) + for k in range(head_length): + norm_module = get_norm(norm, conv_dims) + conv = Conv2d( + channels if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not norm, + norm=norm_module, + activation=F.relu, + ) + weight_init.c2_msra_fill(conv) + head_ops.append(conv) + if stride != self.common_stride: + head_ops.append( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) + ) + self.scale_heads.append(nn.Sequential(*head_ops)) + self.add_module(in_feature, self.scale_heads[-1]) + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + weight_init.c2_msra_fill(self.predictor) + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + return { + "input_shape": { + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + }, + "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + "conv_dims": cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM, + "common_stride": cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, + "norm": cfg.MODEL.SEM_SEG_HEAD.NORM, + "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, + } + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + x = self.layers(features) + if self.training: + return None, self.losses(x, targets) + else: + x = F.interpolate( + x, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return x, {} + + def layers(self, features): + for i, f in enumerate(self.in_features): + if i == 0: + x = self.scale_heads[i](features[f]) + else: + x = x + self.scale_heads[i](features[f]) + x = self.predictor(x) + return x + + def losses(self, predictions, targets): + predictions = predictions.float() # https://github.com/pytorch/pytorch/issues/48163 + predictions = F.interpolate( + predictions, + scale_factor=self.common_stride, + mode="bilinear", + align_corners=False, + ) + loss = F.cross_entropy( + predictions, targets, reduction="mean", ignore_index=self.ignore_value + ) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/mmdet_wrapper.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/mmdet_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..ae02094cbf7b2d8d664a845ad02fff5386c52e8b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/mmdet_wrapper.py @@ -0,0 +1,273 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +import numpy as np +from collections import OrderedDict +from collections.abc import Mapping +from typing import Dict, List, Optional, Tuple, Union +import torch +from omegaconf import DictConfig, OmegaConf +from torch import Tensor, nn + +from custom_detectron2.layers import ShapeSpec +from custom_detectron2.structures import BitMasks, Boxes, ImageList, Instances +from custom_detectron2.utils.events import get_event_storage + +from .backbone import Backbone + +logger = logging.getLogger(__name__) + + +def _to_container(cfg): + """ + mmdet will assert the type of dict/list. + So convert omegaconf objects to dict/list. + """ + if isinstance(cfg, DictConfig): + cfg = OmegaConf.to_container(cfg, resolve=True) + from custom_mmpkg.custom_mmcv.utils import ConfigDict + + return ConfigDict(cfg) + + +class MMDetBackbone(Backbone): + """ + Wrapper of mmdetection backbones to use in detectron2. + + mmdet backbones produce list/tuple of tensors, while detectron2 backbones + produce a dict of tensors. This class wraps the given backbone to produce + output in detectron2's convention, so it can be used in place of detectron2 + backbones. + """ + + def __init__( + self, + backbone: Union[nn.Module, Mapping], + neck: Union[nn.Module, Mapping, None] = None, + *, + output_shapes: List[ShapeSpec], + output_names: Optional[List[str]] = None, + ): + """ + Args: + backbone: either a backbone module or a mmdet config dict that defines a + backbone. The backbone takes a 4D image tensor and returns a + sequence of tensors. + neck: either a backbone module or a mmdet config dict that defines a + neck. The neck takes outputs of backbone and returns a + sequence of tensors. If None, no neck is used. + output_shapes: shape for every output of the backbone (or neck, if given). + stride and channels are often needed. + output_names: names for every output of the backbone (or neck, if given). + By default, will use "out0", "out1", ... + """ + super().__init__() + if isinstance(backbone, Mapping): + from mmdet.models import build_backbone + + backbone = build_backbone(_to_container(backbone)) + self.backbone = backbone + + if isinstance(neck, Mapping): + from mmdet.models import build_neck + + neck = build_neck(_to_container(neck)) + self.neck = neck + + # "Neck" weights, if any, are part of neck itself. This is the interface + # of mmdet so we follow it. Reference: + # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py + logger.info("Initializing mmdet backbone weights...") + self.backbone.init_weights() + # train() in mmdet modules is non-trivial, and has to be explicitly + # called. Reference: + # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py + self.backbone.train() + if self.neck is not None: + logger.info("Initializing mmdet neck weights ...") + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + self.neck.train() + + self._output_shapes = output_shapes + if not output_names: + output_names = [f"out{i}" for i in range(len(output_shapes))] + self._output_names = output_names + + def forward(self, x) -> Dict[str, Tensor]: + outs = self.backbone(x) + if self.neck is not None: + outs = self.neck(outs) + assert isinstance( + outs, (list, tuple) + ), "mmdet backbone should return a list/tuple of tensors!" + if len(outs) != len(self._output_shapes): + raise ValueError( + "Length of output_shapes does not match outputs from the mmdet backbone: " + f"{len(outs)} != {len(self._output_shapes)}" + ) + return {k: v for k, v in zip(self._output_names, outs)} + + def output_shape(self) -> Dict[str, ShapeSpec]: + return {k: v for k, v in zip(self._output_names, self._output_shapes)} + + +class MMDetDetector(nn.Module): + """ + Wrapper of a mmdetection detector model, for detection and instance segmentation. + Input/output formats of this class follow detectron2's convention, so a + mmdetection model can be trained and evaluated in detectron2. + """ + + def __init__( + self, + detector: Union[nn.Module, Mapping], + *, + # Default is 32 regardless of model: + # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets + size_divisibility=32, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + ): + """ + Args: + detector: a mmdet detector, or a mmdet config dict that defines a detector. + size_divisibility: pad input images to multiple of this number + pixel_mean: per-channel mean to normalize input image + pixel_std: per-channel stddev to normalize input image + """ + super().__init__() + if isinstance(detector, Mapping): + from mmdet.models import build_detector + + detector = build_detector(_to_container(detector)) + self.detector = detector + self.detector.init_weights() + self.size_divisibility = size_divisibility + + self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) + assert ( + self.pixel_mean.shape == self.pixel_std.shape + ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" + + def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]): + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor + metas = [] + rescale = {"height" in x for x in batched_inputs} + if len(rescale) != 1: + raise ValueError("Some inputs have original height/width, but some don't!") + rescale = list(rescale)[0] + output_shapes = [] + for input in batched_inputs: + meta = {} + c, h, w = input["image"].shape + meta["img_shape"] = meta["ori_shape"] = (h, w, c) + if rescale: + scale_factor = np.array( + [w / input["width"], h / input["height"]] * 2, dtype="float32" + ) + ori_shape = (input["height"], input["width"]) + output_shapes.append(ori_shape) + meta["ori_shape"] = ori_shape + (c,) + else: + scale_factor = 1.0 + output_shapes.append((h, w)) + meta["scale_factor"] = scale_factor + meta["flip"] = False + padh, padw = images.shape[-2:] + meta["pad_shape"] = (padh, padw, c) + metas.append(meta) + + if self.training: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + if gt_instances[0].has("gt_masks"): + from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks + + def convert_mask(m, shape): + # mmdet mask format + if isinstance(m, BitMasks): + return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) + else: + return mm_PolygonMasks(m.polygons, shape[0], shape[1]) + + gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] + losses_and_metrics = self.detector.forward_train( + images, + metas, + [x.gt_boxes.tensor for x in gt_instances], + [x.gt_classes for x in gt_instances], + gt_masks=gt_masks, + ) + else: + losses_and_metrics = self.detector.forward_train( + images, + metas, + [x.gt_boxes.tensor for x in gt_instances], + [x.gt_classes for x in gt_instances], + ) + return _parse_losses(losses_and_metrics) + else: + results = self.detector.simple_test(images, metas, rescale=rescale) + results = [ + {"instances": _convert_mmdet_result(r, shape)} + for r, shape in zip(results, output_shapes) + ] + return results + + @property + def device(self): + return self.pixel_mean.device + + +# Reference: show_result() in +# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py +def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] + else: + bbox_result, segm_result = result, None + + bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 + bboxes, scores = bboxes[:, :4], bboxes[:, -1] + labels = [ + torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) + ] + labels = torch.cat(labels) + inst = Instances(shape) + inst.pred_boxes = Boxes(bboxes) + inst.scores = scores + inst.pred_classes = labels + + if segm_result is not None and len(labels) > 0: + segm_result = list(itertools.chain(*segm_result)) + segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] + segm_result = torch.stack(segm_result, dim=0) + inst.pred_masks = segm_result + return inst + + +# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py +def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError(f"{loss_name} is not a tensor or list of tensors") + + if "loss" not in loss_name: + # put metrics to storage; don't return them + storage = get_event_storage() + value = log_vars.pop(loss_name).cpu().item() + storage.put_scalar(loss_name, value) + return log_vars diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/poolers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/poolers.py new file mode 100644 index 0000000000000000000000000000000000000000..7a077a9af5e9541069b834ee290118d3628c9cb2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/poolers.py @@ -0,0 +1,263 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Optional +import torch +from torch import nn +from torchvision.ops import RoIPool + +from custom_detectron2.layers import ROIAlign, ROIAlignRotated, cat, nonzero_tuple, shapes_to_tensor +from custom_detectron2.structures import Boxes +from custom_detectron2.utils.tracing import assert_fx_safe, is_fx_tracing + +""" +To export ROIPooler to torchscript, in this file, variables that should be annotated with +`Union[List[Boxes], List[RotatedBoxes]]` are only annotated with `List[Boxes]`. + +TODO: Correct these annotations when torchscript support `Union`. +https://github.com/pytorch/pytorch/issues/41412 +""" + +__all__ = ["ROIPooler"] + + +def assign_boxes_to_levels( + box_lists: List[Boxes], + min_level: int, + max_level: int, + canonical_box_size: int, + canonical_level: int, +): + """ + Map each box in `box_lists` to a feature map level index and return the assignment + vector. + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, + where N is the number of images in the batch. + min_level (int): Smallest feature map level index. The input is considered index 0, + the output of stage 1 is index 1, and so. + max_level (int): Largest feature map level index. + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). + canonical_level (int): The feature map level index on which a canonically-sized box + should be placed. + + Returns: + A tensor of length M, where M is the total number of boxes aggregated over all + N batch images. The memory layout corresponds to the concatenation of boxes + from all images. Each element is the feature map index, as an offset from + `self.min_level`, for the corresponding box (so value i means the box is at + `self.min_level + i`). + """ + box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists])) + # Eqn.(1) in FPN paper + level_assignments = torch.floor( + canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8) + ) + # clamp level to (min, max), in case the box size is too large or too small + # for the available feature maps + level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) + return level_assignments.to(torch.int64) - min_level + + +# script the module to avoid hardcoded device type +@torch.jit.script_if_tracing +def _convert_boxes_to_pooler_format(boxes: torch.Tensor, sizes: torch.Tensor) -> torch.Tensor: + sizes = sizes.to(device=boxes.device) + indices = torch.repeat_interleave( + torch.arange(len(sizes), dtype=boxes.dtype, device=boxes.device), sizes + ) + return cat([indices[:, None], boxes], dim=1) + + +def convert_boxes_to_pooler_format(box_lists: List[Boxes]): + """ + Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops + (see description under Returns). + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + + Returns: + When input is list[Boxes]: + A tensor of shape (M, 5), where M is the total number of boxes aggregated over all + N batch images. + The 5 columns are (batch index, x0, y0, x1, y1), where batch index + is the index in [0, N) identifying which batch image the box with corners at + (x0, y0, x1, y1) comes from. + When input is list[RotatedBoxes]: + A tensor of shape (M, 6), where M is the total number of boxes aggregated over all + N batch images. + The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), + where batch index is the index in [0, N) identifying which batch image the + rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. + """ + boxes = torch.cat([x.tensor for x in box_lists], dim=0) + # __len__ returns Tensor in tracing. + sizes = shapes_to_tensor([x.__len__() for x in box_lists]) + return _convert_boxes_to_pooler_format(boxes, sizes) + + +@torch.jit.script_if_tracing +def _create_zeros( + batch_target: Optional[torch.Tensor], + channels: int, + height: int, + width: int, + like_tensor: torch.Tensor, +) -> torch.Tensor: + batches = batch_target.shape[0] if batch_target is not None else 0 + sizes = (batches, channels, height, width) + return torch.zeros(sizes, dtype=like_tensor.dtype, device=like_tensor.device) + + +class ROIPooler(nn.Module): + """ + Region of interest feature map pooler that supports pooling from one or more + feature maps. + """ + + def __init__( + self, + output_size, + scales, + sampling_ratio, + pooler_type, + canonical_box_size=224, + canonical_level=4, + ): + """ + Args: + output_size (int, tuple[int] or list[int]): output size of the pooled region, + e.g., 14 x 14. If tuple or list is given, the length must be 2. + scales (list[float]): The scale for each low-level pooling op relative to + the input image. For a feature map with stride s relative to the input + image, scale is defined as 1/s. The stride must be power of 2. + When there are multiple scales, they must form a pyramid, i.e. they must be + a monotically decreasing geometric sequence with a factor of 1/2. + sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. + pooler_type (string): Name of the type of pooling operation that should be applied. + For instance, "ROIPool" or "ROIAlignV2". + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default + is heuristically defined as 224 pixels in the FPN paper (based on ImageNet + pre-training). + canonical_level (int): The feature map level index from which a canonically-sized box + should be placed. The default is defined as level 4 (stride=16) in the FPN paper, + i.e., a box of size 224x224 will be placed on the feature with stride=16. + The box placement for all boxes will be determined from their sizes w.r.t + canonical_box_size. For example, a box whose area is 4x that of a canonical box + should be used to pool features from feature level ``canonical_level+1``. + + Note that the actual input feature maps given to this module may not have + sufficiently many levels for the input boxes. If the boxes are too large or too + small for the input feature maps, the closest level will be used. + """ + super().__init__() + + if isinstance(output_size, int): + output_size = (output_size, output_size) + assert len(output_size) == 2 + assert isinstance(output_size[0], int) and isinstance(output_size[1], int) + self.output_size = output_size + + if pooler_type == "ROIAlign": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False + ) + for scale in scales + ) + elif pooler_type == "ROIAlignV2": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True + ) + for scale in scales + ) + elif pooler_type == "ROIPool": + self.level_poolers = nn.ModuleList( + RoIPool(output_size, spatial_scale=scale) for scale in scales + ) + elif pooler_type == "ROIAlignRotated": + self.level_poolers = nn.ModuleList( + ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) + for scale in scales + ) + else: + raise ValueError("Unknown pooler type: {}".format(pooler_type)) + + # Map scale (defined as 1 / stride) to its feature map level under the + # assumption that stride is a power of 2. + min_level = -(math.log2(scales[0])) + max_level = -(math.log2(scales[-1])) + assert math.isclose(min_level, int(min_level)) and math.isclose( + max_level, int(max_level) + ), "Featuremap stride is not power of 2!" + self.min_level = int(min_level) + self.max_level = int(max_level) + assert ( + len(scales) == self.max_level - self.min_level + 1 + ), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!" + assert 0 <= self.min_level and self.min_level <= self.max_level + self.canonical_level = canonical_level + assert canonical_box_size > 0 + self.canonical_box_size = canonical_box_size + + def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): + """ + Args: + x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those + used to construct this module. + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + The box coordinates are defined on the original image and + will be scaled by the `scales` argument of :class:`ROIPooler`. + + Returns: + Tensor: + A tensor of shape (M, C, output_size, output_size) where M is the total number of + boxes aggregated over all N batch images and C is the number of channels in `x`. + """ + num_level_assignments = len(self.level_poolers) + + if not is_fx_tracing(): + torch._assert( + isinstance(x, list) and isinstance(box_lists, list), + "Arguments to pooler must be lists", + ) + assert_fx_safe( + len(x) == num_level_assignments, + "unequal value, num_level_assignments={}, but x is list of {} Tensors".format( + num_level_assignments, len(x) + ), + ) + assert_fx_safe( + len(box_lists) == x[0].size(0), + "unequal value, x[0] batch dim 0 is {}, but box_list has length {}".format( + x[0].size(0), len(box_lists) + ), + ) + if len(box_lists) == 0: + return _create_zeros(None, x[0].shape[1], *self.output_size, x[0]) + + pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) + + if num_level_assignments == 1: + return self.level_poolers[0](x[0], pooler_fmt_boxes) + + level_assignments = assign_boxes_to_levels( + box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level + ) + + num_channels = x[0].shape[1] + output_size = self.output_size[0] + + output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0]) + + for level, pooler in enumerate(self.level_poolers): + inds = nonzero_tuple(level_assignments == level)[0] + pooler_fmt_boxes_level = pooler_fmt_boxes[inds] + # Use index_put_ instead of advance indexing, to avoid pytorch/issues/49852 + output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level)) + + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/postprocessing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/postprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..97ac46bb548a3133084227db337d62aefb1c9d5f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/postprocessing.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch.nn import functional as F + +from custom_detectron2.structures import Instances, ROIMasks + + +# perhaps should rename to "resize_instance" +def detector_postprocess( + results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5 +): + """ + Resize the output instances. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will resize the raw outputs of an R-CNN detector + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. + output_height, output_width: the desired output resolution. + Returns: + Instances: the resized output from the model, based on the output resolution + """ + if isinstance(output_width, torch.Tensor): + # This shape might (but not necessarily) be tensors during tracing. + # Converts integer tensors to float temporaries to ensure true + # division is performed when computing scale_x and scale_y. + output_width_tmp = output_width.float() + output_height_tmp = output_height.float() + new_size = torch.stack([output_height, output_width]) + else: + new_size = (output_height, output_width) + output_width_tmp = output_width + output_height_tmp = output_height + + scale_x, scale_y = ( + output_width_tmp / results.image_size[1], + output_height_tmp / results.image_size[0], + ) + results = Instances(new_size, **results.get_fields()) + + if results.has("pred_boxes"): + output_boxes = results.pred_boxes + elif results.has("proposal_boxes"): + output_boxes = results.proposal_boxes + else: + output_boxes = None + assert output_boxes is not None, "Predictions must contain boxes!" + + output_boxes.scale(scale_x, scale_y) + output_boxes.clip(results.image_size) + + results = results[output_boxes.nonempty()] + + if results.has("pred_masks"): + if isinstance(results.pred_masks, ROIMasks): + roi_masks = results.pred_masks + else: + # pred_masks is a tensor of shape (N, 1, M, M) + roi_masks = ROIMasks(results.pred_masks[:, 0, :, :]) + results.pred_masks = roi_masks.to_bitmasks( + results.pred_boxes, output_height, output_width, mask_threshold + ).tensor # TODO return ROIMasks/BitMask object in the future + + if results.has("pred_keypoints"): + results.pred_keypoints[:, :, 0] *= scale_x + results.pred_keypoints[:, :, 1] *= scale_y + + return results + + +def sem_seg_postprocess(result, img_size, output_height, output_width): + """ + Return semantic segmentation predictions in the original resolution. + + The input images are often resized when entering semantic segmentor. Moreover, in same + cases, they also padded inside segmentor to be divisible by maximum network stride. + As a result, we often need the predictions of the segmentor in a different + resolution from its inputs. + + Args: + result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), + where C is the number of classes, and H, W are the height and width of the prediction. + img_size (tuple): image size that segmentor is taking as input. + output_height, output_width: the desired output resolution. + + Returns: + semantic segmentation prediction (Tensor): A tensor of the shape + (C, output_height, output_width) that contains per-pixel soft predictions. + """ + result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) + result = F.interpolate( + result, size=(output_height, output_width), mode="bilinear", align_corners=False + )[0] + return result diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4e4df7645c67b7a013295207b98fe70b2e574c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator +from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead + +__all__ = list(globals().keys()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/build.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/build.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc0dca8574598562d10fe66a6900666e6a0cc04 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/build.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from custom_detectron2.utils.registry import Registry + +PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") +PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ +Registry for proposal generator, which produces object proposals from feature maps. + +The registered object will be called with `obj(cfg, input_shape)`. +The call should return a `nn.Module` object. +""" + +from . import rpn, rrpn # noqa F401 isort:skip + + +def build_proposal_generator(cfg, input_shape): + """ + Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. + The name can be "PrecomputedProposals" to use no proposal generator. + """ + name = cfg.MODEL.PROPOSAL_GENERATOR.NAME + if name == "PrecomputedProposals": + return None + + return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/proposal_utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/proposal_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f8ead4ce29ff03a475621e44df910391ea59378f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/proposal_utils.py @@ -0,0 +1,205 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +from typing import List, Tuple, Union +import torch + +from custom_detectron2.layers import batched_nms, cat, move_device_like +from custom_detectron2.structures import Boxes, Instances + +logger = logging.getLogger(__name__) + + +def _is_tracing(): + # (fixed in TORCH_VERSION >= 1.9) + if torch.jit.is_scripting(): + # https://github.com/pytorch/pytorch/issues/47379 + return False + else: + return torch.jit.is_tracing() + + +def find_top_rpn_proposals( + proposals: List[torch.Tensor], + pred_objectness_logits: List[torch.Tensor], + image_sizes: List[Tuple[int, int]], + nms_thresh: float, + pre_nms_topk: int, + post_nms_topk: int, + min_box_size: float, + training: bool, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps for each image. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + image_sizes (list[tuple]): sizes (h, w) for each image + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_size (float): minimum proposal box side length in pixels (absolute units + wrt input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + list[Instances]: list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i, sorted by their + objectness score in descending order. + """ + num_images = len(image_sizes) + device = ( + proposals[0].device + if torch.jit.is_scripting() + else ("cpu" if torch.jit.is_tracing() else proposals[0].device) + ) + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = move_device_like(torch.arange(num_images, device=device), proposals[0]) + for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)): + Hi_Wi_A = logits_i.shape[1] + if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing + num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) + else: + num_proposals_i = min(Hi_Wi_A, pre_nms_topk) + + topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append( + move_device_like( + torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device), + proposals[0], + ) + ) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results: List[Instances] = [] + for n, image_size in enumerate(image_sizes): + boxes = Boxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + lvl = level_ids + + valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) + if not valid_mask.all(): + if training: + raise FloatingPointError( + "Predicted boxes or scores contain Inf/NaN. Training has diverged." + ) + boxes = boxes[valid_mask] + scores_per_img = scores_per_img[valid_mask] + lvl = lvl[valid_mask] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_size) + if _is_tracing() or keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] + + keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] # keep is already sorted + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +def add_ground_truth_to_proposals( + gt: Union[List[Instances], List[Boxes]], proposals: List[Instances] +) -> List[Instances]: + """ + Call `add_ground_truth_to_proposals_single_image` for all images. + + Args: + gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances + representing the ground-truth for image i. + proposals (list[Instances]): list of N elements. Element i is a Instances + representing the proposals for image i. + + Returns: + list[Instances]: list of N Instances. Each is the proposals for the image, + with field "proposal_boxes" and "objectness_logits". + """ + assert gt is not None + + if len(proposals) != len(gt): + raise ValueError("proposals and gt should have the same length as the number of images!") + if len(proposals) == 0: + return proposals + + return [ + add_ground_truth_to_proposals_single_image(gt_i, proposals_i) + for gt_i, proposals_i in zip(gt, proposals) + ] + + +def add_ground_truth_to_proposals_single_image( + gt: Union[Instances, Boxes], proposals: Instances +) -> Instances: + """ + Augment `proposals` with `gt`. + + Args: + Same as `add_ground_truth_to_proposals`, but with gt and proposals + per image. + + Returns: + Same as `add_ground_truth_to_proposals`, but for only one image. + """ + if isinstance(gt, Boxes): + # convert Boxes to Instances + gt = Instances(proposals.image_size, gt_boxes=gt) + + gt_boxes = gt.gt_boxes + device = proposals.objectness_logits.device + # Assign all ground-truth boxes an objectness logit corresponding to + # P(object) = sigmoid(logit) =~ 1. + gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) + gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) + + # Concatenating gt_boxes with proposals requires them to have the same fields + gt_proposal = Instances(proposals.image_size, **gt.get_fields()) + gt_proposal.proposal_boxes = gt_boxes + gt_proposal.objectness_logits = gt_logits + + for key in proposals.get_fields().keys(): + assert gt_proposal.has( + key + ), "The attribute '{}' in `proposals` does not exist in `gt`".format(key) + + # NOTE: Instances.cat only use fields from the first item. Extra fields in latter items + # will be thrown away. + new_proposals = Instances.cat([proposals, gt_proposal]) + + return new_proposals diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/rpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/rpn.py new file mode 100644 index 0000000000000000000000000000000000000000..dff181bcd489f89cb65a1978a4bf9efac4f30d06 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/rpn.py @@ -0,0 +1,533 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import Dict, List, Optional, Tuple, Union +import torch +import torch.nn.functional as F +from torch import nn + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ShapeSpec, cat +from custom_detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from custom_detectron2.utils.events import get_event_storage +from custom_detectron2.utils.memory import retry_if_cuda_oom +from custom_detectron2.utils.registry import Registry + +from ..anchor_generator import build_anchor_generator +from ..box_regression import Box2BoxTransform, _dense_box_regression_loss +from ..matcher import Matcher +from ..sampling import subsample_labels +from .build import PROPOSAL_GENERATOR_REGISTRY +from .proposal_utils import find_top_rpn_proposals + +RPN_HEAD_REGISTRY = Registry("RPN_HEAD") +RPN_HEAD_REGISTRY.__doc__ = """ +Registry for RPN heads, which take feature maps and perform +objectness classification and bounding box regression for anchors. + +The registered object will be called with `obj(cfg, input_shape)`. +The call should return a `nn.Module` object. +""" + + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + L: number of feature maps per image on which RPN is run + A: number of cell anchors (must be the same for all feature maps) + Hi, Wi: height and width of the i-th feature map + B: size of the box parameterization + +Naming convention: + + objectness: refers to the binary classification of an anchor as object vs. not object. + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`), or 5d for rotated boxes. + + pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use + sigmoid(pred_objectness_logits) to estimate P(object). + + gt_labels: ground-truth binary classification labels for objectness + + pred_anchor_deltas: predicted box2box transform deltas + + gt_anchor_deltas: ground-truth box2box transform deltas +""" + + +def build_rpn_head(cfg, input_shape): + """ + Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. + """ + name = cfg.MODEL.RPN.HEAD_NAME + return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +@RPN_HEAD_REGISTRY.register() +class StandardRPNHead(nn.Module): + """ + Standard RPN classification and regression heads described in :paper:`Faster R-CNN`. + Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts + objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas + specifying how to deform each anchor into an object proposal. + """ + + @configurable + def __init__( + self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,) + ): + """ + NOTE: this interface is experimental. + + Args: + in_channels (int): number of input feature channels. When using multiple + input features, they must have the same number of channels. + num_anchors (int): number of anchors to predict for *each spatial position* + on the feature map. The total number of anchors for each + feature map will be `num_anchors * H * W`. + box_dim (int): dimension of a box, which is also the number of box regression + predictions to make for each anchor. An axis aligned box has + box_dim=4, while a rotated box has box_dim=5. + conv_dims (list[int]): a list of integers representing the output channels + of N conv layers. Set it to -1 to use the same number of output channels + as input channels. + """ + super().__init__() + cur_channels = in_channels + # Keeping the old variable names and structure for backwards compatiblity. + # Otherwise the old checkpoints will fail to load. + if len(conv_dims) == 1: + out_channels = cur_channels if conv_dims[0] == -1 else conv_dims[0] + # 3x3 conv for the hidden representation + self.conv = self._get_rpn_conv(cur_channels, out_channels) + cur_channels = out_channels + else: + self.conv = nn.Sequential() + for k, conv_dim in enumerate(conv_dims): + out_channels = cur_channels if conv_dim == -1 else conv_dim + if out_channels <= 0: + raise ValueError( + f"Conv output channels should be greater than 0. Got {out_channels}" + ) + conv = self._get_rpn_conv(cur_channels, out_channels) + self.conv.add_module(f"conv{k}", conv) + cur_channels = out_channels + # 1x1 conv for predicting objectness logits + self.objectness_logits = nn.Conv2d(cur_channels, num_anchors, kernel_size=1, stride=1) + # 1x1 conv for predicting box2box transform deltas + self.anchor_deltas = nn.Conv2d(cur_channels, num_anchors * box_dim, kernel_size=1, stride=1) + + # Keeping the order of weights initialization same for backwards compatiblility. + for layer in self.modules(): + if isinstance(layer, nn.Conv2d): + nn.init.normal_(layer.weight, std=0.01) + nn.init.constant_(layer.bias, 0) + + def _get_rpn_conv(self, in_channels, out_channels): + return Conv2d( + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + activation=nn.ReLU(), + ) + + @classmethod + def from_config(cls, cfg, input_shape): + # Standard RPN is shared across levels: + in_channels = [s.channels for s in input_shape] + assert len(set(in_channels)) == 1, "Each level must have the same channel!" + in_channels = in_channels[0] + + # RPNHead should take the same input as anchor generator + # NOTE: it assumes that creating an anchor generator does not have unwanted side effect. + anchor_generator = build_anchor_generator(cfg, input_shape) + num_anchors = anchor_generator.num_anchors + box_dim = anchor_generator.box_dim + assert ( + len(set(num_anchors)) == 1 + ), "Each level must have the same number of anchors per spatial position" + return { + "in_channels": in_channels, + "num_anchors": num_anchors[0], + "box_dim": box_dim, + "conv_dims": cfg.MODEL.RPN.CONV_DIMS, + } + + def forward(self, features: List[torch.Tensor]): + """ + Args: + features (list[Tensor]): list of feature maps + + Returns: + list[Tensor]: A list of L elements. + Element i is a tensor of shape (N, A, Hi, Wi) representing + the predicted objectness logits for all anchors. A is the number of cell anchors. + list[Tensor]: A list of L elements. Element i is a tensor of shape + (N, A*box_dim, Hi, Wi) representing the predicted "deltas" used to transform anchors + to proposals. + """ + pred_objectness_logits = [] + pred_anchor_deltas = [] + for x in features: + t = self.conv(x) + pred_objectness_logits.append(self.objectness_logits(t)) + pred_anchor_deltas.append(self.anchor_deltas(t)) + return pred_objectness_logits, pred_anchor_deltas + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RPN(nn.Module): + """ + Region Proposal Network, introduced by :paper:`Faster R-CNN`. + """ + + @configurable + def __init__( + self, + *, + in_features: List[str], + head: nn.Module, + anchor_generator: nn.Module, + anchor_matcher: Matcher, + box2box_transform: Box2BoxTransform, + batch_size_per_image: int, + positive_fraction: float, + pre_nms_topk: Tuple[float, float], + post_nms_topk: Tuple[float, float], + nms_thresh: float = 0.7, + min_box_size: float = 0.0, + anchor_boundary_thresh: float = -1.0, + loss_weight: Union[float, Dict[str, float]] = 1.0, + box_reg_loss_type: str = "smooth_l1", + smooth_l1_beta: float = 0.0, + ): + """ + NOTE: this interface is experimental. + + Args: + in_features (list[str]): list of names of input features to use + head (nn.Module): a module that predicts logits and regression deltas + for each level from a list of per-level features + anchor_generator (nn.Module): a module that creates anchors from a + list of features. Usually an instance of :class:`AnchorGenerator` + anchor_matcher (Matcher): label the anchors by matching them with ground truth. + box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to + instance boxes + batch_size_per_image (int): number of anchors per image to sample for training + positive_fraction (float): fraction of foreground anchors to sample for training + pre_nms_topk (tuple[float]): (train, test) that represents the + number of top k proposals to select before NMS, in + training and testing. + post_nms_topk (tuple[float]): (train, test) that represents the + number of top k proposals to select after NMS, in + training and testing. + nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals + min_box_size (float): remove proposal boxes with any side smaller than this threshold, + in the unit of input image pixels + anchor_boundary_thresh (float): legacy option + loss_weight (float|dict): weights to use for losses. Can be single float for weighting + all rpn losses together, or a dict of individual weightings. Valid dict keys are: + "loss_rpn_cls" - applied to classification loss + "loss_rpn_loc" - applied to box regression loss + box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou". + smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to + use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" + """ + super().__init__() + self.in_features = in_features + self.rpn_head = head + self.anchor_generator = anchor_generator + self.anchor_matcher = anchor_matcher + self.box2box_transform = box2box_transform + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + # Map from self.training state to train/test settings + self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]} + self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]} + self.nms_thresh = nms_thresh + self.min_box_size = float(min_box_size) + self.anchor_boundary_thresh = anchor_boundary_thresh + if isinstance(loss_weight, float): + loss_weight = {"loss_rpn_cls": loss_weight, "loss_rpn_loc": loss_weight} + self.loss_weight = loss_weight + self.box_reg_loss_type = box_reg_loss_type + self.smooth_l1_beta = smooth_l1_beta + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + in_features = cfg.MODEL.RPN.IN_FEATURES + ret = { + "in_features": in_features, + "min_box_size": cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE, + "nms_thresh": cfg.MODEL.RPN.NMS_THRESH, + "batch_size_per_image": cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, + "positive_fraction": cfg.MODEL.RPN.POSITIVE_FRACTION, + "loss_weight": { + "loss_rpn_cls": cfg.MODEL.RPN.LOSS_WEIGHT, + "loss_rpn_loc": cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT, + }, + "anchor_boundary_thresh": cfg.MODEL.RPN.BOUNDARY_THRESH, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS), + "box_reg_loss_type": cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE, + "smooth_l1_beta": cfg.MODEL.RPN.SMOOTH_L1_BETA, + } + + ret["pre_nms_topk"] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST) + ret["post_nms_topk"] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST) + + ret["anchor_generator"] = build_anchor_generator(cfg, [input_shape[f] for f in in_features]) + ret["anchor_matcher"] = Matcher( + cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True + ) + ret["head"] = build_rpn_head(cfg, [input_shape[f] for f in in_features]) + return ret + + def _subsample_labels(self, label): + """ + Randomly sample a subset of positive and negative examples, and overwrite + the label vector to the ignore value (-1) for all elements that are not + included in the sample. + + Args: + labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. + """ + pos_idx, neg_idx = subsample_labels( + label, self.batch_size_per_image, self.positive_fraction, 0 + ) + # Fill with the ignore label (-1), then set positive and negative labels + label.fill_(-1) + label.scatter_(0, pos_idx, 1) + label.scatter_(0, neg_idx, 0) + return label + + @torch.jit.unused + @torch.no_grad() + def label_and_sample_anchors( + self, anchors: List[Boxes], gt_instances: List[Instances] + ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """ + Args: + anchors (list[Boxes]): anchors for each feature map. + gt_instances: the ground-truth instances for each image. + + Returns: + list[Tensor]: + List of #img tensors. i-th element is a vector of labels whose length is + the total number of anchors across all feature maps R = sum(Hi * Wi * A). + Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative + class; 1 = positive class. + list[Tensor]: + i-th element is a Rx4 tensor. The values are the matched gt boxes for each + anchor. Values are undefined for those anchors not labeled as 1. + """ + anchors = Boxes.cat(anchors) + + gt_boxes = [x.gt_boxes for x in gt_instances] + image_sizes = [x.image_size for x in gt_instances] + del gt_instances + + gt_labels = [] + matched_gt_boxes = [] + for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes): + """ + image_size_i: (h, w) for the i-th image + gt_boxes_i: ground-truth boxes for i-th image + """ + + match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors) + matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) + # Matching is memory-expensive and may result in CPU tensors. But the result is small + gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) + del match_quality_matrix + + if self.anchor_boundary_thresh >= 0: + # Discard anchors that go out of the boundaries of the image + # NOTE: This is legacy functionality that is turned off by default in Detectron2 + anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh) + gt_labels_i[~anchors_inside_image] = -1 + + # A vector of labels (-1, 0, 1) for each anchor + gt_labels_i = self._subsample_labels(gt_labels_i) + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + else: + # TODO wasted indexing computation for ignored boxes + matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor + + gt_labels.append(gt_labels_i) # N,AHW + matched_gt_boxes.append(matched_gt_boxes_i) + return gt_labels, matched_gt_boxes + + @torch.jit.unused + def losses( + self, + anchors: List[Boxes], + pred_objectness_logits: List[torch.Tensor], + gt_labels: List[torch.Tensor], + pred_anchor_deltas: List[torch.Tensor], + gt_boxes: List[torch.Tensor], + ) -> Dict[str, torch.Tensor]: + """ + Return the losses from a set of RPN predictions and their associated ground-truth. + + Args: + anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each + has shape (Hi*Wi*A, B), where B is box dimension (4 or 5). + pred_objectness_logits (list[Tensor]): A list of L elements. + Element i is a tensor of shape (N, Hi*Wi*A) representing + the predicted objectness logits for all anchors. + gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`. + pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape + (N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors + to proposals. + gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`. + + Returns: + dict[loss name -> loss value]: A dict mapping from loss name to loss value. + Loss names are: `loss_rpn_cls` for objectness classification and + `loss_rpn_loc` for proposal localization. + """ + num_images = len(gt_labels) + gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai)) + + # Log the number of positive/negative anchors per-image that's used in training + pos_mask = gt_labels == 1 + num_pos_anchors = pos_mask.sum().item() + num_neg_anchors = (gt_labels == 0).sum().item() + storage = get_event_storage() + storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images) + storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images) + + localization_loss = _dense_box_regression_loss( + anchors, + self.box2box_transform, + pred_anchor_deltas, + gt_boxes, + pos_mask, + box_reg_loss_type=self.box_reg_loss_type, + smooth_l1_beta=self.smooth_l1_beta, + ) + + valid_mask = gt_labels >= 0 + objectness_loss = F.binary_cross_entropy_with_logits( + cat(pred_objectness_logits, dim=1)[valid_mask], + gt_labels[valid_mask].to(torch.float32), + reduction="sum", + ) + normalizer = self.batch_size_per_image * num_images + losses = { + "loss_rpn_cls": objectness_loss / normalizer, + # The original Faster R-CNN paper uses a slightly different normalizer + # for loc loss. But it doesn't matter in practice + "loss_rpn_loc": localization_loss / normalizer, + } + losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} + return losses + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + gt_instances: Optional[List[Instances]] = None, + ): + """ + Args: + images (ImageList): input images of length `N` + features (dict[str, Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + gt_instances (list[Instances], optional): a length `N` list of `Instances`s. + Each `Instances` stores ground-truth instances for the corresponding image. + + Returns: + proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits" + loss: dict[Tensor] or None + """ + features = [features[f] for f in self.in_features] + anchors = self.anchor_generator(features) + + pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) + # Transpose the Hi*Wi*A dimension to the middle: + pred_objectness_logits = [ + # (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) + score.permute(0, 2, 3, 1).flatten(1) + for score in pred_objectness_logits + ] + pred_anchor_deltas = [ + # (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B) + x.view(x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]) + .permute(0, 3, 4, 1, 2) + .flatten(1, -2) + for x in pred_anchor_deltas + ] + + if self.training: + assert gt_instances is not None, "RPN requires gt_instances in training!" + gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances) + losses = self.losses( + anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes + ) + else: + losses = {} + proposals = self.predict_proposals( + anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes + ) + return proposals, losses + + def predict_proposals( + self, + anchors: List[Boxes], + pred_objectness_logits: List[torch.Tensor], + pred_anchor_deltas: List[torch.Tensor], + image_sizes: List[Tuple[int, int]], + ): + """ + Decode all the predicted box regression deltas to proposals. Find the top proposals + by applying NMS and removing boxes that are too small. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i, sorted by their + objectness score in descending order. + """ + # The proposals are treated as fixed for joint training with roi heads. + # This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that + # are also network responses. + with torch.no_grad(): + pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) + return find_top_rpn_proposals( + pred_proposals, + pred_objectness_logits, + image_sizes, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_size, + self.training, + ) + + def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]): + """ + Transform anchors into proposals by applying the predicted anchor deltas. + + Returns: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape + (N, Hi*Wi*A, B) + """ + N = pred_anchor_deltas[0].shape[0] + proposals = [] + # For each feature map + for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas): + B = anchors_i.tensor.size(1) + pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) + # Expand anchors to shape (N*Hi*Wi*A, B) + anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) + proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) + # Append feature map proposals with shape (N, Hi*Wi*A, B) + proposals.append(proposals_i.view(N, -1, B)) + return proposals diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/rrpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/rrpn.py new file mode 100644 index 0000000000000000000000000000000000000000..5c0d038190c1a9ec3cbd6cbb32322623aa3e7278 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/proposal_generator/rrpn.py @@ -0,0 +1,209 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import logging +from typing import Dict, List +import torch + +from custom_detectron2.config import configurable +from custom_detectron2.layers import ShapeSpec, batched_nms_rotated, cat +from custom_detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated +from custom_detectron2.utils.memory import retry_if_cuda_oom + +from ..box_regression import Box2BoxTransformRotated +from .build import PROPOSAL_GENERATOR_REGISTRY +from .proposal_utils import _is_tracing +from .rpn import RPN + +logger = logging.getLogger(__name__) + + +def find_top_rrpn_proposals( + proposals, + pred_objectness_logits, + image_sizes, + nms_thresh, + pre_nms_topk, + post_nms_topk, + min_box_size, + training, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps if `training` is True, + otherwise, returns the highest `post_nms_topk` scoring proposals for each + feature map. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + image_sizes (list[tuple]): sizes (h, w) for each image + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_size(float): minimum proposal box side length in pixels (absolute units wrt + input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i. + """ + num_images = len(image_sizes) + device = proposals[0].device + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = torch.arange(num_images, device=device) + for level_id, proposals_i, logits_i in zip( + itertools.count(), proposals, pred_objectness_logits + ): + Hi_Wi_A = logits_i.shape[1] + if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing + num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) + else: + num_proposals_i = min(Hi_Wi_A, pre_nms_topk) + + topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results = [] + for n, image_size in enumerate(image_sizes): + boxes = RotatedBoxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + lvl = level_ids + + valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) + if not valid_mask.all(): + if training: + raise FloatingPointError( + "Predicted boxes or scores contain Inf/NaN. Training has diverged." + ) + boxes = boxes[valid_mask] + scores_per_img = scores_per_img[valid_mask] + lvl = lvl[valid_mask] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_size) + if _is_tracing() or keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], lvl[keep]) + + keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RRPN(RPN): + """ + Rotated Region Proposal Network described in :paper:`RRPN`. + """ + + @configurable + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if self.anchor_boundary_thresh >= 0: + raise NotImplementedError( + "anchor_boundary_thresh is a legacy option not implemented for RRPN." + ) + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = super().from_config(cfg, input_shape) + ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + return ret + + @torch.no_grad() + def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]): + """ + Args: + anchors (list[RotatedBoxes]): anchors for each feature map. + gt_instances: the ground-truth instances for each image. + + Returns: + list[Tensor]: + List of #img tensors. i-th element is a vector of labels whose length is + the total number of anchors across feature maps. Label values are in {-1, 0, 1}, + with meanings: -1 = ignore; 0 = negative class; 1 = positive class. + list[Tensor]: + i-th element is a Nx5 tensor, where N is the total number of anchors across + feature maps. The values are the matched gt boxes for each anchor. + Values are undefined for those anchors not labeled as 1. + """ + anchors = RotatedBoxes.cat(anchors) + + gt_boxes = [x.gt_boxes for x in gt_instances] + del gt_instances + + gt_labels = [] + matched_gt_boxes = [] + for gt_boxes_i in gt_boxes: + """ + gt_boxes_i: ground-truth boxes for i-th image + """ + match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors) + matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) + # Matching is memory-expensive and may result in CPU tensors. But the result is small + gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) + + # A vector of labels (-1, 0, 1) for each anchor + gt_labels_i = self._subsample_labels(gt_labels_i) + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + matched_gt_boxes_i = torch.zeros_like(anchors.tensor) + else: + # TODO wasted indexing computation for ignored boxes + matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor + + gt_labels.append(gt_labels_i) # N,AHW + matched_gt_boxes.append(matched_gt_boxes_i) + return gt_labels, matched_gt_boxes + + @torch.no_grad() + def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes): + pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) + return find_top_rrpn_proposals( + pred_proposals, + pred_objectness_logits, + image_sizes, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_size, + self.training, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d13e9c57235b982f3e0645bc316de2b75755dfda --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head, FastRCNNConvFCHead +from .keypoint_head import ( + ROI_KEYPOINT_HEAD_REGISTRY, + build_keypoint_head, + BaseKeypointRCNNHead, + KRCNNConvDeconvUpsampleHead, +) +from .mask_head import ( + ROI_MASK_HEAD_REGISTRY, + build_mask_head, + BaseMaskRCNNHead, + MaskRCNNConvUpsampleHead, +) +from .roi_heads import ( + ROI_HEADS_REGISTRY, + ROIHeads, + Res5ROIHeads, + StandardROIHeads, + build_roi_heads, + select_foreground_proposals, +) +from .cascade_rcnn import CascadeROIHeads +from .rotated_fast_rcnn import RROIHeads +from .fast_rcnn import FastRCNNOutputLayers + +from . import cascade_rcnn # isort:skip + +__all__ = list(globals().keys()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/box_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/box_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c2312d53a6959ba5b8bb6746ea49f8a956112298 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/box_head.py @@ -0,0 +1,118 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import List +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ShapeSpec, get_norm +from custom_detectron2.utils.registry import Registry + +__all__ = ["FastRCNNConvFCHead", "build_box_head", "ROI_BOX_HEAD_REGISTRY"] + +ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") +ROI_BOX_HEAD_REGISTRY.__doc__ = """ +Registry for box heads, which make box predictions from per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +# To get torchscript support, we make the head a subclass of `nn.Sequential`. +# Therefore, to add new layers in this head class, please make sure they are +# added in the order they will be used in forward(). +@ROI_BOX_HEAD_REGISTRY.register() +class FastRCNNConvFCHead(nn.Sequential): + """ + A head with several 3x3 conv layers (each followed by norm & relu) and then + several fc layers (each followed by relu). + """ + + @configurable + def __init__( + self, input_shape: ShapeSpec, *, conv_dims: List[int], fc_dims: List[int], conv_norm="" + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature. + conv_dims (list[int]): the output dimensions of the conv layers + fc_dims (list[int]): the output dimensions of the fc layers + conv_norm (str or callable): normalization for the conv layers. + See :func:`detectron2.layers.get_norm` for supported types. + """ + super().__init__() + assert len(conv_dims) + len(fc_dims) > 0 + + self._output_size = (input_shape.channels, input_shape.height, input_shape.width) + + self.conv_norm_relus = [] + for k, conv_dim in enumerate(conv_dims): + conv = Conv2d( + self._output_size[0], + conv_dim, + kernel_size=3, + padding=1, + bias=not conv_norm, + norm=get_norm(conv_norm, conv_dim), + activation=nn.ReLU(), + ) + self.add_module("conv{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + self._output_size = (conv_dim, self._output_size[1], self._output_size[2]) + + self.fcs = [] + for k, fc_dim in enumerate(fc_dims): + if k == 0: + self.add_module("flatten", nn.Flatten()) + fc = nn.Linear(int(np.prod(self._output_size)), fc_dim) + self.add_module("fc{}".format(k + 1), fc) + self.add_module("fc_relu{}".format(k + 1), nn.ReLU()) + self.fcs.append(fc) + self._output_size = fc_dim + + for layer in self.conv_norm_relus: + weight_init.c2_msra_fill(layer) + for layer in self.fcs: + weight_init.c2_xavier_fill(layer) + + @classmethod + def from_config(cls, cfg, input_shape): + num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV + conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM + num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC + fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM + return { + "input_shape": input_shape, + "conv_dims": [conv_dim] * num_conv, + "fc_dims": [fc_dim] * num_fc, + "conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM, + } + + def forward(self, x): + for layer in self: + x = layer(x) + return x + + @property + @torch.jit.unused + def output_shape(self): + """ + Returns: + ShapeSpec: the output feature shape + """ + o = self._output_size + if isinstance(o, int): + return ShapeSpec(channels=o) + else: + return ShapeSpec(channels=o[0], height=o[1], width=o[2]) + + +def build_box_head(cfg, input_shape): + """ + Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_BOX_HEAD.NAME + return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/cascade_rcnn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/cascade_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..441da8e5741a5c8f008025996d39cc3a9a6b53c6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/cascade_rcnn.py @@ -0,0 +1,299 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import torch +from torch import nn +from torch.autograd.function import Function + +from custom_detectron2.config import configurable +from custom_detectron2.layers import ShapeSpec +from custom_detectron2.structures import Boxes, Instances, pairwise_iou +from custom_detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..poolers import ROIPooler +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers, fast_rcnn_inference +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + + +class _ScaleGradient(Function): + @staticmethod + def forward(ctx, input, scale): + ctx.scale = scale + return input + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ctx.scale, None + + +@ROI_HEADS_REGISTRY.register() +class CascadeROIHeads(StandardROIHeads): + """ + The ROI heads that implement :paper:`Cascade R-CNN`. + """ + + @configurable + def __init__( + self, + *, + box_in_features: List[str], + box_pooler: ROIPooler, + box_heads: List[nn.Module], + box_predictors: List[nn.Module], + proposal_matchers: List[Matcher], + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + box_pooler (ROIPooler): pooler that extracts region features from given boxes + box_heads (list[nn.Module]): box head for each cascade stage + box_predictors (list[nn.Module]): box predictor for each cascade stage + proposal_matchers (list[Matcher]): matcher with different IoU thresholds to + match boxes with ground truth for each stage. The first matcher matches + RPN proposals with ground truth, the other matchers use boxes predicted + by the previous stage as proposals and match them with ground truth. + """ + assert "proposal_matcher" not in kwargs, ( + "CascadeROIHeads takes 'proposal_matchers=' for each stage instead " + "of one 'proposal_matcher='." + ) + # The first matcher matches RPN proposals with ground truth, done in the base class + kwargs["proposal_matcher"] = proposal_matchers[0] + num_stages = self.num_cascade_stages = len(box_heads) + box_heads = nn.ModuleList(box_heads) + box_predictors = nn.ModuleList(box_predictors) + assert len(box_predictors) == num_stages, f"{len(box_predictors)} != {num_stages}!" + assert len(proposal_matchers) == num_stages, f"{len(proposal_matchers)} != {num_stages}!" + super().__init__( + box_in_features=box_in_features, + box_pooler=box_pooler, + box_head=box_heads, + box_predictor=box_predictors, + **kwargs, + ) + self.proposal_matchers = proposal_matchers + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret.pop("proposal_matcher") + return ret + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS + cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS + assert len(cascade_bbox_reg_weights) == len(cascade_ious) + assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \ + "CascadeROIHeads only support class-agnostic regression now!" + assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0] + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + pooled_shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + + box_heads, box_predictors, proposal_matchers = [], [], [] + for match_iou, bbox_reg_weights in zip(cascade_ious, cascade_bbox_reg_weights): + box_head = build_box_head(cfg, pooled_shape) + box_heads.append(box_head) + box_predictors.append( + FastRCNNOutputLayers( + cfg, + box_head.output_shape, + box2box_transform=Box2BoxTransform(weights=bbox_reg_weights), + ) + ) + proposal_matchers.append(Matcher([match_iou], [0, 1], allow_low_quality_matches=False)) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_heads": box_heads, + "box_predictors": box_predictors, + "proposal_matchers": proposal_matchers, + } + + def forward(self, images, features, proposals, targets=None): + del images + if self.training: + proposals = self.label_and_sample_proposals(proposals, targets) + + if self.training: + # Need targets to box head + losses = self._forward_box(features, proposals, targets) + losses.update(self._forward_mask(features, proposals)) + losses.update(self._forward_keypoint(features, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def _forward_box(self, features, proposals, targets=None): + """ + Args: + features, targets: the same as in + Same as in :meth:`ROIHeads.forward`. + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + """ + features = [features[f] for f in self.box_in_features] + head_outputs = [] # (predictor, predictions, proposals) + prev_pred_boxes = None + image_sizes = [x.image_size for x in proposals] + for k in range(self.num_cascade_stages): + if k > 0: + # The output boxes of the previous stage are used to create the input + # proposals of the next stage. + proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes) + if self.training: + proposals = self._match_and_label_boxes(proposals, k, targets) + predictions = self._run_stage(features, proposals, k) + prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals) + head_outputs.append((self.box_predictor[k], predictions, proposals)) + + if self.training: + losses = {} + storage = get_event_storage() + for stage, (predictor, predictions, proposals) in enumerate(head_outputs): + with storage.name_scope("stage{}".format(stage)): + stage_losses = predictor.losses(predictions, proposals) + losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) + return losses + else: + # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) + scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs] + + # Average the scores across heads + scores = [ + sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) + for scores_per_image in zip(*scores_per_stage) + ] + # Use the boxes of the last head + predictor, predictions, proposals = head_outputs[-1] + boxes = predictor.predict_boxes(predictions, proposals) + pred_instances, _ = fast_rcnn_inference( + boxes, + scores, + image_sizes, + predictor.test_score_thresh, + predictor.test_nms_thresh, + predictor.test_topk_per_image, + ) + return pred_instances + + @torch.no_grad() + def _match_and_label_boxes(self, proposals, stage, targets): + """ + Match proposals with groundtruth using the matcher at the given stage. + Label the proposals as foreground or background based on the match. + + Args: + proposals (list[Instances]): One Instances for each image, with + the field "proposal_boxes". + stage (int): the current stage + targets (list[Instances]): the ground truth instances + + Returns: + list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes" + """ + num_fg_samples, num_bg_samples = [], [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + # proposal_labels are 0 or 1 + matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix) + if len(targets_per_image) > 0: + gt_classes = targets_per_image.gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[proposal_labels == 0] = self.num_classes + gt_boxes = targets_per_image.gt_boxes[matched_idxs] + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + gt_boxes = Boxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)) + ) + proposals_per_image.gt_classes = gt_classes + proposals_per_image.gt_boxes = gt_boxes + + num_fg_samples.append((proposal_labels == 1).sum().item()) + num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1]) + + # Log the number of fg/bg samples in each stage + storage = get_event_storage() + storage.put_scalar( + "stage{}/roi_head/num_fg_samples".format(stage), + sum(num_fg_samples) / len(num_fg_samples), + ) + storage.put_scalar( + "stage{}/roi_head/num_bg_samples".format(stage), + sum(num_bg_samples) / len(num_bg_samples), + ) + return proposals + + def _run_stage(self, features, proposals, stage): + """ + Args: + features (list[Tensor]): #lvl input features to ROIHeads + proposals (list[Instances]): #image Instances, with the field "proposal_boxes" + stage (int): the current stage + + Returns: + Same output as `FastRCNNOutputLayers.forward()`. + """ + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + # The original implementation averages the losses among heads, + # but scale up the parameter gradients of the heads. + # This is equivalent to adding the losses among heads, + # but scale down the gradients on features. + if self.training: + box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages) + box_features = self.box_head[stage](box_features) + return self.box_predictor[stage](box_features) + + def _create_proposals_from_boxes(self, boxes, image_sizes): + """ + Args: + boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4 + image_sizes (list[tuple]): list of image shapes in (h, w) + + Returns: + list[Instances]: per-image proposals with the given boxes. + """ + # Just like RPN, the proposals should not have gradients + boxes = [Boxes(b.detach()) for b in boxes] + proposals = [] + for boxes_per_image, image_size in zip(boxes, image_sizes): + boxes_per_image.clip(image_size) + if self.training: + # do not filter empty boxes at inference time, + # because the scores from each stage need to be aligned and added later + boxes_per_image = boxes_per_image[boxes_per_image.nonempty()] + prop = Instances(image_size) + prop.proposal_boxes = boxes_per_image + proposals.append(prop) + return proposals diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/fast_rcnn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/fast_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..6d2f8476f40c5fff30a16a41d9cabee21e7aad6c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/fast_rcnn.py @@ -0,0 +1,569 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +from typing import Callable, Dict, List, Optional, Tuple, Union +import torch +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.data.detection_utils import get_fed_loss_cls_weights +from custom_detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple +from custom_detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss +from custom_detectron2.structures import Boxes, Instances +from custom_detectron2.utils.events import get_event_storage + +__all__ = ["fast_rcnn_inference", "FastRCNNOutputLayers"] + + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth box2box transform deltas +""" + + +def fast_rcnn_inference( + boxes: List[torch.Tensor], + scores: List[torch.Tensor], + image_shapes: List[Tuple[int, int]], + score_thresh: float, + nms_thresh: float, + topk_per_image: int, +): + """ + Call `fast_rcnn_inference_single_image` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 4) if doing + class-specific regression, or (Ri, 4) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return [x[0] for x in result_per_image], [x[1] for x in result_per_image] + + +def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"): + """ + Log the classification metrics to EventStorage. + + Args: + pred_logits: Rx(K+1) logits. The last column is for background class. + gt_classes: R labels + """ + num_instances = gt_classes.numel() + if num_instances == 0: + return + pred_classes = pred_logits.argmax(dim=1) + bg_class_ind = pred_logits.shape[1] - 1 + + fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind) + num_fg = fg_inds.nonzero().numel() + fg_gt_classes = gt_classes[fg_inds] + fg_pred_classes = pred_classes[fg_inds] + + num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() + num_accurate = (pred_classes == gt_classes).nonzero().numel() + fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() + + storage = get_event_storage() + storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances) + if num_fg > 0: + storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg) + storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg) + + +def fast_rcnn_inference_single_image( + boxes, + scores, + image_shape: Tuple[int, int], + score_thresh: float, + nms_thresh: float, + topk_per_image: int, +): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Args: + Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference`, but for only one image. + """ + valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores = scores[valid_mask] + + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // 4 + # Convert to Boxes to use the `clip` function ... + boxes = Boxes(boxes.reshape(-1, 4)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 + + # 1. Filter results based on detection scores. It can make NMS more efficient + # by filtering out low-confidence detections. + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # 2. Apply NMS for each class independently. + keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) + if topk_per_image >= 0: + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = Boxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + return result, filter_inds[:, 0] + + +class FastRCNNOutputLayers(nn.Module): + """ + Two linear layers for predicting Fast R-CNN outputs: + + 1. proposal-to-detection box regression deltas + 2. classification scores + """ + + @configurable + def __init__( + self, + input_shape: ShapeSpec, + *, + box2box_transform, + num_classes: int, + test_score_thresh: float = 0.0, + test_nms_thresh: float = 0.5, + test_topk_per_image: int = 100, + cls_agnostic_bbox_reg: bool = False, + smooth_l1_beta: float = 0.0, + box_reg_loss_type: str = "smooth_l1", + loss_weight: Union[float, Dict[str, float]] = 1.0, + use_fed_loss: bool = False, + use_sigmoid_ce: bool = False, + get_fed_loss_cls_weights: Optional[Callable] = None, + fed_loss_num_classes: int = 50, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature to this module + box2box_transform (Box2BoxTransform or Box2BoxTransformRotated): + num_classes (int): number of foreground classes + test_score_thresh (float): threshold to filter predictions results. + test_nms_thresh (float): NMS threshold for prediction results. + test_topk_per_image (int): number of top predictions to produce per image. + cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression + smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if + `box_reg_loss_type` is "smooth_l1" + box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou", + "diou", "ciou" + loss_weight (float|dict): weights to use for losses. Can be single float for weighting + all losses, or a dict of individual weightings. Valid dict keys are: + * "loss_cls": applied to classification loss + * "loss_box_reg": applied to box regression loss + use_fed_loss (bool): whether to use federated loss which samples additional negative + classes to calculate the loss + use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary + cross entropy with logits. This could be used together with federated loss + get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency + weight power, and returns the probabilities to sample negative classes for + federated loss. The implementation can be found in + detectron2/data/detection_utils.py + fed_loss_num_classes (int): number of federated classes to keep in total + """ + super().__init__() + if isinstance(input_shape, int): # some backward compatibility + input_shape = ShapeSpec(channels=input_shape) + self.num_classes = num_classes + input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) + # prediction layer for num_classes foreground classes and one background class (hence + 1) + self.cls_score = nn.Linear(input_size, num_classes + 1) + num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes + box_dim = len(box2box_transform.weights) + self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) + + nn.init.normal_(self.cls_score.weight, std=0.01) + nn.init.normal_(self.bbox_pred.weight, std=0.001) + for l in [self.cls_score, self.bbox_pred]: + nn.init.constant_(l.bias, 0) + + self.box2box_transform = box2box_transform + self.smooth_l1_beta = smooth_l1_beta + self.test_score_thresh = test_score_thresh + self.test_nms_thresh = test_nms_thresh + self.test_topk_per_image = test_topk_per_image + self.box_reg_loss_type = box_reg_loss_type + if isinstance(loss_weight, float): + loss_weight = {"loss_cls": loss_weight, "loss_box_reg": loss_weight} + self.loss_weight = loss_weight + self.use_fed_loss = use_fed_loss + self.use_sigmoid_ce = use_sigmoid_ce + self.fed_loss_num_classes = fed_loss_num_classes + + if self.use_fed_loss: + assert self.use_sigmoid_ce, "Please use sigmoid cross entropy loss with federated loss" + fed_loss_cls_weights = get_fed_loss_cls_weights() + assert ( + len(fed_loss_cls_weights) == self.num_classes + ), "Please check the provided fed_loss_cls_weights. Their size should match num_classes" + self.register_buffer("fed_loss_cls_weights", fed_loss_cls_weights) + + @classmethod + def from_config(cls, cfg, input_shape): + return { + "input_shape": input_shape, + "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS), + # fmt: off + "num_classes" : cfg.MODEL.ROI_HEADS.NUM_CLASSES, + "cls_agnostic_bbox_reg" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, + "smooth_l1_beta" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA, + "test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST, + "test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + "test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE, + "box_reg_loss_type" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE, + "loss_weight" : {"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa + "use_fed_loss" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS, + "use_sigmoid_ce" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE, + "get_fed_loss_cls_weights" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa + "fed_loss_num_classes" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES, + # fmt: on + } + + def forward(self, x): + """ + Args: + x: per-region features of shape (N, ...) for N bounding boxes to predict. + + Returns: + (Tensor, Tensor): + First tensor: shape (N,K+1), scores for each of the N box. Each row contains the + scores for K object categories and 1 background class. + + Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4), + or (N,4) for class-agnostic regression. + """ + if x.dim() > 2: + x = torch.flatten(x, start_dim=1) + scores = self.cls_score(x) + proposal_deltas = self.bbox_pred(x) + return scores, proposal_deltas + + def losses(self, predictions, proposals): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were used + to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``, + ``gt_classes`` are expected. + + Returns: + Dict[str, Tensor]: dict of losses + """ + scores, proposal_deltas = predictions + + # parse classification outputs + gt_classes = ( + cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0) + ) + _log_classification_stats(scores, gt_classes) + + # parse box regression outputs + if len(proposals): + proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4 + assert not proposal_boxes.requires_grad, "Proposals should not require gradients!" + # If "gt_boxes" does not exist, the proposals must be all negative and + # should not be included in regression loss computation. + # Here we just use proposal_boxes as an arbitrary placeholder because its + # value won't be used in self.box_reg_loss(). + gt_boxes = cat( + [(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals], + dim=0, + ) + else: + proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device) + + if self.use_sigmoid_ce: + loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes) + else: + loss_cls = cross_entropy(scores, gt_classes, reduction="mean") + + losses = { + "loss_cls": loss_cls, + "loss_box_reg": self.box_reg_loss( + proposal_boxes, gt_boxes, proposal_deltas, gt_classes + ), + } + return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} + + # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa + # with slight modifications + def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight): + """ + Args: + gt_classes: a long tensor of shape R that contains the gt class label of each proposal. + num_fed_loss_classes: minimum number of classes to keep when calculating federated loss. + Will sample negative classes if number of unique gt_classes is smaller than this value. + num_classes: number of foreground classes + weight: probabilities used to sample negative classes + + Returns: + Tensor: + classes to keep when calculating the federated loss, including both unique gt + classes and sampled negative classes. + """ + unique_gt_classes = torch.unique(gt_classes) + prob = unique_gt_classes.new_ones(num_classes + 1).float() + prob[-1] = 0 + if len(unique_gt_classes) < num_fed_loss_classes: + prob[:num_classes] = weight.float().clone() + prob[unique_gt_classes] = 0 + sampled_negative_classes = torch.multinomial( + prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False + ) + fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes]) + else: + fed_loss_classes = unique_gt_classes + return fed_loss_classes + + # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa + # with slight modifications + def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes): + """ + Args: + pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the + scores for K object categories and 1 background class + gt_classes: a long tensor of shape R that contains the gt class label of each proposal. + """ + if pred_class_logits.numel() == 0: + return pred_class_logits.new_zeros([1])[0] + + N = pred_class_logits.shape[0] + K = pred_class_logits.shape[1] - 1 + + target = pred_class_logits.new_zeros(N, K + 1) + target[range(len(gt_classes)), gt_classes] = 1 + target = target[:, :K] + + cls_loss = F.binary_cross_entropy_with_logits( + pred_class_logits[:, :-1], target, reduction="none" + ) + + if self.use_fed_loss: + fed_loss_classes = self.get_fed_loss_classes( + gt_classes, + num_fed_loss_classes=self.fed_loss_num_classes, + num_classes=K, + weight=self.fed_loss_cls_weights, + ) + fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1) + fed_loss_classes_mask[fed_loss_classes] = 1 + fed_loss_classes_mask = fed_loss_classes_mask[:K] + weight = fed_loss_classes_mask.view(1, K).expand(N, K).float() + else: + weight = 1 + + loss = torch.sum(cls_loss * weight) / N + return loss + + def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes): + """ + Args: + proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5). + pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)). + gt_classes is a long tensor of shape R, the gt class label of each proposal. + R shall be the number of proposals. + """ + box_dim = proposal_boxes.shape[1] # 4 or 5 + # Regression loss is only computed for foreground proposals (those matched to a GT) + fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0] + if pred_deltas.shape[1] == box_dim: # cls-agnostic regression + fg_pred_deltas = pred_deltas[fg_inds] + else: + fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[ + fg_inds, gt_classes[fg_inds] + ] + + loss_box_reg = _dense_box_regression_loss( + [proposal_boxes[fg_inds]], + self.box2box_transform, + [fg_pred_deltas.unsqueeze(0)], + [gt_boxes[fg_inds]], + ..., + self.box_reg_loss_type, + self.smooth_l1_beta, + ) + + # The reg loss is normalized using the total number of regions (R), not the number + # of foreground regions even though the box regression loss is only defined on + # foreground regions. Why? Because doing so gives equal training influence to + # each foreground example. To see how, consider two different minibatches: + # (1) Contains a single foreground region + # (2) Contains 100 foreground regions + # If we normalize by the number of foreground regions, the single example in + # minibatch (1) will be given 100 times as much influence as each foreground + # example in minibatch (2). Normalizing by the total number of regions, R, + # means that the single example in minibatch (1) and each of the 100 examples + # in minibatch (2) are given equal influence. + return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty + + def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were + used to compute predictions. The ``proposal_boxes`` field is expected. + + Returns: + list[Instances]: same as `fast_rcnn_inference`. + list[Tensor]: same as `fast_rcnn_inference`. + """ + boxes = self.predict_boxes(predictions, proposals) + scores = self.predict_probs(predictions, proposals) + image_shapes = [x.image_size for x in proposals] + return fast_rcnn_inference( + boxes, + scores, + image_shapes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_topk_per_image, + ) + + def predict_boxes_for_gt_classes(self, predictions, proposals): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were used + to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected. + + Returns: + list[Tensor]: + A list of Tensors of predicted boxes for GT classes in case of + class-specific box head. Element i of the list has shape (Ri, B), where Ri is + the number of proposals for image i and B is the box dimension (4 or 5) + """ + if not len(proposals): + return [] + scores, proposal_deltas = predictions + proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) + N, B = proposal_boxes.shape + predict_boxes = self.box2box_transform.apply_deltas( + proposal_deltas, proposal_boxes + ) # Nx(KxB) + + K = predict_boxes.shape[1] // B + if K > 1: + gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0) + # Some proposals are ignored or have a background class. Their gt_classes + # cannot be used as index. + gt_classes = gt_classes.clamp_(0, K - 1) + + predict_boxes = predict_boxes.view(N, K, B)[ + torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes + ] + num_prop_per_image = [len(p) for p in proposals] + return predict_boxes.split(num_prop_per_image) + + def predict_boxes( + self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances] + ): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were + used to compute predictions. The ``proposal_boxes`` field is expected. + + Returns: + list[Tensor]: + A list of Tensors of predicted class-specific or class-agnostic boxes + for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is + the number of proposals for image i and B is the box dimension (4 or 5) + """ + if not len(proposals): + return [] + _, proposal_deltas = predictions + num_prop_per_image = [len(p) for p in proposals] + proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) + predict_boxes = self.box2box_transform.apply_deltas( + proposal_deltas, + proposal_boxes, + ) # Nx(KxB) + return predict_boxes.split(num_prop_per_image) + + def predict_probs( + self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances] + ): + """ + Args: + predictions: return values of :meth:`forward()`. + proposals (list[Instances]): proposals that match the features that were + used to compute predictions. + + Returns: + list[Tensor]: + A list of Tensors of predicted class probabilities for each image. + Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i. + """ + scores, _ = predictions + num_inst_per_image = [len(p) for p in proposals] + if self.use_sigmoid_ce: + probs = scores.sigmoid() + else: + probs = F.softmax(scores, dim=-1) + return probs.split(num_inst_per_image, dim=0) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/keypoint_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/keypoint_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d0af8e66c502f724ca97f3c72f19c0e54ee2d6e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/keypoint_head.py @@ -0,0 +1,272 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import torch +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate +from custom_detectron2.structures import Instances, heatmaps_to_keypoints +from custom_detectron2.utils.events import get_event_storage +from custom_detectron2.utils.registry import Registry + +_TOTAL_SKIPPED = 0 + + +__all__ = [ + "ROI_KEYPOINT_HEAD_REGISTRY", + "build_keypoint_head", + "BaseKeypointRCNNHead", + "KRCNNConvDeconvUpsampleHead", +] + + +ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD") +ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """ +Registry for keypoint heads, which make keypoint predictions from per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +def build_keypoint_head(cfg, input_shape): + """ + Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME + return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer): + """ + Arguments: + pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number + of instances in the batch, K is the number of keypoints, and S is the side length + of the keypoint heatmap. The values are spatial logits. + instances (list[Instances]): A list of M Instances, where M is the batch size. + These instances are predictions from the model + that are in 1:1 correspondence with pred_keypoint_logits. + Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` + instance. + normalizer (float): Normalize the loss by this amount. + If not specified, we normalize by the number of visible keypoints in the minibatch. + + Returns a scalar tensor containing the loss. + """ + heatmaps = [] + valid = [] + + keypoint_side_len = pred_keypoint_logits.shape[2] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + keypoints = instances_per_image.gt_keypoints + heatmaps_per_image, valid_per_image = keypoints.to_heatmap( + instances_per_image.proposal_boxes.tensor, keypoint_side_len + ) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + if len(heatmaps): + keypoint_targets = cat(heatmaps, dim=0) + valid = cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.nonzero(valid).squeeze(1) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if len(heatmaps) == 0 or valid.numel() == 0: + global _TOTAL_SKIPPED + _TOTAL_SKIPPED += 1 + storage = get_event_storage() + storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False) + return pred_keypoint_logits.sum() * 0 + + N, K, H, W = pred_keypoint_logits.shape + pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy( + pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum" + ) + + # If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch + if normalizer is None: + normalizer = valid.numel() + keypoint_loss /= normalizer + + return keypoint_loss + + +def keypoint_rcnn_inference(pred_keypoint_logits: torch.Tensor, pred_instances: List[Instances]): + """ + Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score) + and add it to the `pred_instances` as a `pred_keypoints` field. + + Args: + pred_keypoint_logits (Tensor): A tensor of shape (R, K, S, S) where R is the total number + of instances in the batch, K is the number of keypoints, and S is the side length of + the keypoint heatmap. The values are spatial logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images. + + Returns: + None. Each element in pred_instances will contain extra "pred_keypoints" and + "pred_keypoint_heatmaps" fields. "pred_keypoints" is a tensor of shape + (#instance, K, 3) where the last dimension corresponds to (x, y, score). + The scores are larger than 0. "pred_keypoint_heatmaps" contains the raw + keypoint logits as passed to this function. + """ + # flatten all bboxes from all images together (list[Boxes] -> Rx4 tensor) + bboxes_flat = cat([b.pred_boxes.tensor for b in pred_instances], dim=0) + + pred_keypoint_logits = pred_keypoint_logits.detach() + keypoint_results = heatmaps_to_keypoints(pred_keypoint_logits, bboxes_flat.detach()) + num_instances_per_image = [len(i) for i in pred_instances] + keypoint_results = keypoint_results[:, :, [0, 1, 3]].split(num_instances_per_image, dim=0) + heatmap_results = pred_keypoint_logits.split(num_instances_per_image, dim=0) + + for keypoint_results_per_image, heatmap_results_per_image, instances_per_image in zip( + keypoint_results, heatmap_results, pred_instances + ): + # keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score) + # heatmap_results_per_image is (num instances)x(num keypoints)x(side)x(side) + instances_per_image.pred_keypoints = keypoint_results_per_image + instances_per_image.pred_keypoint_heatmaps = heatmap_results_per_image + + +class BaseKeypointRCNNHead(nn.Module): + """ + Implement the basic Keypoint R-CNN losses and inference logic described in + Sec. 5 of :paper:`Mask R-CNN`. + """ + + @configurable + def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0): + """ + NOTE: this interface is experimental. + + Args: + num_keypoints (int): number of keypoints to predict + loss_weight (float): weight to multiple on the keypoint loss + loss_normalizer (float or str): + If float, divide the loss by `loss_normalizer * #images`. + If 'visible', the loss is normalized by the total number of + visible keypoints across images. + """ + super().__init__() + self.num_keypoints = num_keypoints + self.loss_weight = loss_weight + assert loss_normalizer == "visible" or isinstance(loss_normalizer, float), loss_normalizer + self.loss_normalizer = loss_normalizer + + @classmethod + def from_config(cls, cfg, input_shape): + ret = { + "loss_weight": cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT, + "num_keypoints": cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS, + } + normalize_by_visible = ( + cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS + ) # noqa + if not normalize_by_visible: + batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE + positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION + ret["loss_normalizer"] = ( + ret["num_keypoints"] * batch_size_per_image * positive_sample_fraction + ) + else: + ret["loss_normalizer"] = "visible" + return ret + + def forward(self, x, instances: List[Instances]): + """ + Args: + x: input 4D region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + + Returns: + A dict of losses if in training. The predicted "instances" if in inference. + """ + x = self.layers(x) + if self.training: + num_images = len(instances) + normalizer = ( + None if self.loss_normalizer == "visible" else num_images * self.loss_normalizer + ) + return { + "loss_keypoint": keypoint_rcnn_loss(x, instances, normalizer=normalizer) + * self.loss_weight + } + else: + keypoint_rcnn_inference(x, instances) + return instances + + def layers(self, x): + """ + Neural network layers that makes predictions from regional input features. + """ + raise NotImplementedError + + +# To get torchscript support, we make the head a subclass of `nn.Sequential`. +# Therefore, to add new layers in this head class, please make sure they are +# added in the order they will be used in forward(). +@ROI_KEYPOINT_HEAD_REGISTRY.register() +class KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead, nn.Sequential): + """ + A standard keypoint head containing a series of 3x3 convs, followed by + a transpose convolution and bilinear interpolation for upsampling. + It is described in Sec. 5 of :paper:`Mask R-CNN`. + """ + + @configurable + def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + conv_dims: an iterable of output channel counts for each conv in the head + e.g. (512, 512, 512) for three convs outputting 512 channels. + """ + super().__init__(num_keypoints=num_keypoints, **kwargs) + + # default up_scale to 2.0 (this can be made an option) + up_scale = 2.0 + in_channels = input_shape.channels + + for idx, layer_channels in enumerate(conv_dims, 1): + module = Conv2d(in_channels, layer_channels, 3, stride=1, padding=1) + self.add_module("conv_fcn{}".format(idx), module) + self.add_module("conv_fcn_relu{}".format(idx), nn.ReLU()) + in_channels = layer_channels + + deconv_kernel = 4 + self.score_lowres = ConvTranspose2d( + in_channels, num_keypoints, deconv_kernel, stride=2, padding=deconv_kernel // 2 - 1 + ) + self.up_scale = up_scale + + for name, param in self.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + # Caffe2 implementation uses MSRAFill, which in fact + # corresponds to kaiming_normal_ in PyTorch + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + ret["input_shape"] = input_shape + ret["conv_dims"] = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS + return ret + + def layers(self, x): + for layer in self: + x = layer(x) + x = interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/mask_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/mask_head.py new file mode 100644 index 0000000000000000000000000000000000000000..43b4318d7aab5896b4842a4e0efee3cb8f3a787f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/mask_head.py @@ -0,0 +1,298 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import List +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm +from custom_detectron2.layers.wrappers import move_device_like +from custom_detectron2.structures import Instances +from custom_detectron2.utils.events import get_event_storage +from custom_detectron2.utils.registry import Registry + +__all__ = [ + "BaseMaskRCNNHead", + "MaskRCNNConvUpsampleHead", + "build_mask_head", + "ROI_MASK_HEAD_REGISTRY", +] + + +ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") +ROI_MASK_HEAD_REGISTRY.__doc__ = """ +Registry for mask heads, which predicts instance masks given +per-region features. + +The registered object will be called with `obj(cfg, input_shape)`. +""" + + +@torch.jit.unused +def mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0): + """ + Compute the mask prediction loss defined in the Mask R-CNN paper. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 + correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, + ...) associated with each instance are stored in fields. + vis_period (int): the period (in steps) to dump visualization. + + Returns: + mask_loss (Tensor): A scalar tensor containing the loss. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + total_num_masks = pred_mask_logits.size(0) + mask_side_len = pred_mask_logits.size(2) + assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" + + gt_classes = [] + gt_masks = [] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + if not cls_agnostic_mask: + gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) + gt_classes.append(gt_classes_per_image) + + gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( + instances_per_image.proposal_boxes.tensor, mask_side_len + ).to(device=pred_mask_logits.device) + # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len + gt_masks.append(gt_masks_per_image) + + if len(gt_masks) == 0: + return pred_mask_logits.sum() * 0 + + gt_masks = cat(gt_masks, dim=0) + + if cls_agnostic_mask: + pred_mask_logits = pred_mask_logits[:, 0] + else: + indices = torch.arange(total_num_masks) + gt_classes = cat(gt_classes, dim=0) + pred_mask_logits = pred_mask_logits[indices, gt_classes] + + if gt_masks.dtype == torch.bool: + gt_masks_bool = gt_masks + else: + # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) + gt_masks_bool = gt_masks > 0.5 + gt_masks = gt_masks.to(dtype=torch.float32) + + # Log the training accuracy (using gt classes and 0.5 threshold) + mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool + mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) + num_positive = gt_masks_bool.sum().item() + false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( + gt_masks_bool.numel() - num_positive, 1.0 + ) + false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) + + storage = get_event_storage() + storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) + storage.put_scalar("mask_rcnn/false_positive", false_positive) + storage.put_scalar("mask_rcnn/false_negative", false_negative) + if vis_period > 0 and storage.iter % vis_period == 0: + pred_masks = pred_mask_logits.sigmoid() + vis_masks = torch.cat([pred_masks, gt_masks], axis=2) + name = "Left: mask prediction; Right: mask GT" + for idx, vis_mask in enumerate(vis_masks): + vis_mask = torch.stack([vis_mask] * 3, axis=0) + storage.put_image(name + f" ({idx})", vis_mask) + + mask_loss = F.binary_cross_entropy_with_logits(pred_mask_logits, gt_masks, reduction="mean") + return mask_loss + + +def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]): + """ + Convert pred_mask_logits to estimated foreground probability masks while also + extracting only the masks for the predicted classes in pred_instances. For each + predicted box, the mask of the same class is attached to the instance by adding a + new "pred_masks" field to pred_instances. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. Each Instances must have field "pred_classes". + + Returns: + None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, + Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) + masks the resolution predicted by the network; post-processing steps, such as resizing + the predicted masks to the original image resolution and/or binarizing them, is left + to the caller. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + + if cls_agnostic_mask: + mask_probs_pred = pred_mask_logits.sigmoid() + else: + # Select masks corresponding to the predicted classes + num_masks = pred_mask_logits.shape[0] + class_pred = cat([i.pred_classes for i in pred_instances]) + device = ( + class_pred.device + if torch.jit.is_scripting() + else ("cpu" if torch.jit.is_tracing() else class_pred.device) + ) + indices = move_device_like(torch.arange(num_masks, device=device), class_pred) + mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() + # mask_probs_pred.shape: (B, 1, Hmask, Wmask) + + num_boxes_per_image = [len(i) for i in pred_instances] + mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) + + for prob, instances in zip(mask_probs_pred, pred_instances): + instances.pred_masks = prob # (1, Hmask, Wmask) + + +class BaseMaskRCNNHead(nn.Module): + """ + Implement the basic Mask R-CNN losses and inference logic described in :paper:`Mask R-CNN` + """ + + @configurable + def __init__(self, *, loss_weight: float = 1.0, vis_period: int = 0): + """ + NOTE: this interface is experimental. + + Args: + loss_weight (float): multiplier of the loss + vis_period (int): visualization period + """ + super().__init__() + self.vis_period = vis_period + self.loss_weight = loss_weight + + @classmethod + def from_config(cls, cfg, input_shape): + return {"vis_period": cfg.VIS_PERIOD} + + def forward(self, x, instances: List[Instances]): + """ + Args: + x: input region feature(s) provided by :class:`ROIHeads`. + instances (list[Instances]): contains the boxes & labels corresponding + to the input features. + Exact format is up to its caller to decide. + Typically, this is the foreground instances in training, with + "proposal_boxes" field and other gt annotations. + In inference, it contains boxes that are already predicted. + + Returns: + A dict of losses in training. The predicted "instances" in inference. + """ + x = self.layers(x) + if self.training: + return {"loss_mask": mask_rcnn_loss(x, instances, self.vis_period) * self.loss_weight} + else: + mask_rcnn_inference(x, instances) + return instances + + def layers(self, x): + """ + Neural network layers that makes predictions from input features. + """ + raise NotImplementedError + + +# To get torchscript support, we make the head a subclass of `nn.Sequential`. +# Therefore, to add new layers in this head class, please make sure they are +# added in the order they will be used in forward(). +@ROI_MASK_HEAD_REGISTRY.register() +class MaskRCNNConvUpsampleHead(BaseMaskRCNNHead, nn.Sequential): + """ + A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). + Predictions are made with a final 1x1 conv layer. + """ + + @configurable + def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm="", **kwargs): + """ + NOTE: this interface is experimental. + + Args: + input_shape (ShapeSpec): shape of the input feature + num_classes (int): the number of foreground classes (i.e. background is not + included). 1 if using class agnostic prediction. + conv_dims (list[int]): a list of N>0 integers representing the output dimensions + of N-1 conv layers and the last upsample layer. + conv_norm (str or callable): normalization for the conv layers. + See :func:`detectron2.layers.get_norm` for supported types. + """ + super().__init__(**kwargs) + assert len(conv_dims) >= 1, "conv_dims have to be non-empty!" + + self.conv_norm_relus = [] + + cur_channels = input_shape.channels + for k, conv_dim in enumerate(conv_dims[:-1]): + conv = Conv2d( + cur_channels, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=not conv_norm, + norm=get_norm(conv_norm, conv_dim), + activation=nn.ReLU(), + ) + self.add_module("mask_fcn{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + cur_channels = conv_dim + + self.deconv = ConvTranspose2d( + cur_channels, conv_dims[-1], kernel_size=2, stride=2, padding=0 + ) + self.add_module("deconv_relu", nn.ReLU()) + cur_channels = conv_dims[-1] + + self.predictor = Conv2d(cur_channels, num_classes, kernel_size=1, stride=1, padding=0) + + for layer in self.conv_norm_relus + [self.deconv]: + weight_init.c2_msra_fill(layer) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.predictor.weight, std=0.001) + if self.predictor.bias is not None: + nn.init.constant_(self.predictor.bias, 0) + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg, input_shape) + conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM + num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV + ret.update( + conv_dims=[conv_dim] * (num_conv + 1), # +1 for ConvTranspose + conv_norm=cfg.MODEL.ROI_MASK_HEAD.NORM, + input_shape=input_shape, + ) + if cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK: + ret["num_classes"] = 1 + else: + ret["num_classes"] = cfg.MODEL.ROI_HEADS.NUM_CLASSES + return ret + + def layers(self, x): + for layer in self: + x = layer(x) + return x + + +def build_mask_head(cfg, input_shape): + """ + Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_MASK_HEAD.NAME + return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/roi_heads.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..1722ac72dc07f142e6b56e4996c975edf5a79973 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/roi_heads.py @@ -0,0 +1,877 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import inspect +import logging +import numpy as np +from typing import Dict, List, Optional, Tuple +import torch +from torch import nn + +from custom_detectron2.config import configurable +from custom_detectron2.layers import ShapeSpec, nonzero_tuple +from custom_detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from custom_detectron2.utils.events import get_event_storage +from custom_detectron2.utils.registry import Registry + +from ..backbone.resnet import BottleneckBlock, ResNet +from ..matcher import Matcher +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from ..sampling import subsample_labels +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers +from .keypoint_head import build_keypoint_head +from .mask_head import build_mask_head + +ROI_HEADS_REGISTRY = Registry("ROI_HEADS") +ROI_HEADS_REGISTRY.__doc__ = """ +Registry for ROI heads in a generalized R-CNN model. +ROIHeads take feature maps and region proposals, and +perform per-region computation. + +The registered object will be called with `obj(cfg, input_shape)`. +The call is expected to return an :class:`ROIHeads`. +""" + +logger = logging.getLogger(__name__) + + +def build_roi_heads(cfg, input_shape): + """ + Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. + """ + name = cfg.MODEL.ROI_HEADS.NAME + return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +def select_foreground_proposals( + proposals: List[Instances], bg_label: int +) -> Tuple[List[Instances], List[torch.Tensor]]: + """ + Given a list of N Instances (for N images), each containing a `gt_classes` field, + return a list of Instances that contain only instances with `gt_classes != -1 && + gt_classes != bg_label`. + + Args: + proposals (list[Instances]): A list of N Instances, where N is the number of + images in the batch. + bg_label: label index of background class. + + Returns: + list[Instances]: N Instances, each contains only the selected foreground instances. + list[Tensor]: N boolean vector, correspond to the selection mask of + each Instances object. True for selected instances. + """ + assert isinstance(proposals, (list, tuple)) + assert isinstance(proposals[0], Instances) + assert proposals[0].has("gt_classes") + fg_proposals = [] + fg_selection_masks = [] + for proposals_per_image in proposals: + gt_classes = proposals_per_image.gt_classes + fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) + fg_idxs = fg_selection_mask.nonzero().squeeze(1) + fg_proposals.append(proposals_per_image[fg_idxs]) + fg_selection_masks.append(fg_selection_mask) + return fg_proposals, fg_selection_masks + + +def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: + """ + Args: + proposals (list[Instances]): a list of N Instances, where N is the + number of images. + + Returns: + proposals: only contains proposals with at least one visible keypoint. + + Note that this is still slightly different from Detectron. + In Detectron, proposals for training keypoint head are re-sampled from + all the proposals with IOU>threshold & >=1 visible keypoint. + + Here, the proposals are first sampled from all proposals with + IOU>threshold, then proposals with no visible keypoint are filtered out. + This strategy seems to make no difference on Detectron and is easier to implement. + """ + ret = [] + all_num_fg = [] + for proposals_per_image in proposals: + # If empty/unannotated image (hard negatives), skip filtering for train + if len(proposals_per_image) == 0: + ret.append(proposals_per_image) + continue + gt_keypoints = proposals_per_image.gt_keypoints.tensor + # #fg x K x 3 + vis_mask = gt_keypoints[:, :, 2] >= 1 + xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] + proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 + kp_in_box = ( + (xs >= proposal_boxes[:, :, 0]) + & (xs <= proposal_boxes[:, :, 2]) + & (ys >= proposal_boxes[:, :, 1]) + & (ys <= proposal_boxes[:, :, 3]) + ) + selection = (kp_in_box & vis_mask).any(dim=1) + selection_idxs = nonzero_tuple(selection)[0] + all_num_fg.append(selection_idxs.numel()) + ret.append(proposals_per_image[selection_idxs]) + + storage = get_event_storage() + storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) + return ret + + +class ROIHeads(torch.nn.Module): + """ + ROIHeads perform all per-region computation in an R-CNN. + + It typically contains logic to + + 1. (in training only) match proposals with ground truth and sample them + 2. crop the regions and extract per-region features using proposals + 3. make per-region predictions with different heads + + It can have many variants, implemented as subclasses of this class. + This base class contains the logic to match/sample proposals. + But it is not necessary to inherit this class if the sampling logic is not needed. + """ + + @configurable + def __init__( + self, + *, + num_classes, + batch_size_per_image, + positive_fraction, + proposal_matcher, + proposal_append_gt=True, + ): + """ + NOTE: this interface is experimental. + + Args: + num_classes (int): number of foreground classes (i.e. background is not included) + batch_size_per_image (int): number of proposals to sample for training + positive_fraction (float): fraction of positive (foreground) proposals + to sample for training. + proposal_matcher (Matcher): matcher that matches proposals and ground truth + proposal_append_gt (bool): whether to include ground truth as proposals as well + """ + super().__init__() + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + self.num_classes = num_classes + self.proposal_matcher = proposal_matcher + self.proposal_append_gt = proposal_append_gt + + @classmethod + def from_config(cls, cfg): + return { + "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, + "positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION, + "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES, + "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT, + # Matcher to assign box proposals to gt boxes + "proposal_matcher": Matcher( + cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS, + cfg.MODEL.ROI_HEADS.IOU_LABELS, + allow_low_quality_matches=False, + ), + } + + def _sample_proposals( + self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Based on the matching between N proposals and M groundtruth, + sample the proposals and set their classification labels. + + Args: + matched_idxs (Tensor): a vector of length N, each is the best-matched + gt index in [0, M) for each proposal. + matched_labels (Tensor): a vector of length N, the matcher's label + (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. + gt_classes (Tensor): a vector of length M. + + Returns: + Tensor: a vector of indices of sampled proposals. Each is in [0, N). + Tensor: a vector of the same length, the classification label for + each sampled proposal. Each sample is labeled as either a category in + [0, num_classes) or the background (num_classes). + """ + has_gt = gt_classes.numel() > 0 + # Get the corresponding GT for each proposal + if has_gt: + gt_classes = gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[matched_labels == 0] = self.num_classes + # Label ignore proposals (-1 label) + gt_classes[matched_labels == -1] = -1 + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + + sampled_fg_idxs, sampled_bg_idxs = subsample_labels( + gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes + ) + + sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) + return sampled_idxs, gt_classes[sampled_idxs] + + @torch.no_grad() + def label_and_sample_proposals( + self, proposals: List[Instances], targets: List[Instances] + ) -> List[Instances]: + """ + Prepare some proposals to be used to train the ROI heads. + It performs box matching between `proposals` and `targets`, and assigns + training labels to the proposals. + It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth + boxes, with a fraction of positives that is no larger than + ``self.positive_fraction``. + + Args: + See :meth:`ROIHeads.forward` + + Returns: + list[Instances]: + length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + + - proposal_boxes: the proposal boxes + - gt_boxes: the ground-truth box that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + + Other fields such as "gt_classes", "gt_masks", that's included in `targets`. + """ + # Augment proposals with ground-truth boxes. + # In the case of learned proposals (e.g., RPN), when training starts + # the proposals will be low quality due to random initialization. + # It's possible that none of these initial + # proposals have high enough overlap with the gt objects to be used + # as positive examples for the second stage components (box head, + # cls head, mask head). Adding the gt boxes to the set of proposals + # ensures that the second stage components will have some positive + # examples from the start of training. For RPN, this augmentation improves + # convergence and empirically improves box AP on COCO by about 0.5 + # points (under one tested configuration). + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(targets, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) + sampled_idxs, gt_classes = self._sample_proposals( + matched_idxs, matched_labels, targets_per_image.gt_classes + ) + + # Set target attributes of the sampled proposals: + proposals_per_image = proposals_per_image[sampled_idxs] + proposals_per_image.gt_classes = gt_classes + + if has_gt: + sampled_targets = matched_idxs[sampled_idxs] + # We index all the attributes of targets that start with "gt_" + # and have not been added to proposals yet (="gt_classes"). + # NOTE: here the indexing waste some compute, because heads + # like masks, keypoints, etc, will filter the proposals again, + # (by foreground/background, or number of keypoints in the image, etc) + # so we essentially index the data twice. + for (trg_name, trg_value) in targets_per_image.get_fields().items(): + if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): + proposals_per_image.set(trg_name, trg_value[sampled_targets]) + # If no GT is given in the image, we don't know what a dummy gt value can be. + # Therefore the returned proposals won't have any gt_* fields, except for a + # gt_classes full of background label. + + num_bg_samples.append((gt_classes == self.num_classes).sum().item()) + num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: + """ + Args: + images (ImageList): + features (dict[str,Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + proposals (list[Instances]): length `N` list of `Instances`. The i-th + `Instances` contains object proposals for the i-th input image, + with fields "proposal_boxes" and "objectness_logits". + targets (list[Instances], optional): length `N` list of `Instances`. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + It may have the following fields: + + - gt_boxes: the bounding box of each instance. + - gt_classes: the label for each instance with a category ranging in [0, #class]. + - gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance. + - gt_keypoints: NxKx3, the groud-truth keypoints for each instance. + + Returns: + list[Instances]: length `N` list of `Instances` containing the + detected instances. Returned during inference only; may be [] during training. + + dict[str->Tensor]: + mapping from a named loss to a tensor storing the loss. Used during training only. + """ + raise NotImplementedError() + + +@ROI_HEADS_REGISTRY.register() +class Res5ROIHeads(ROIHeads): + """ + The ROIHeads in a typical "C4" R-CNN model, where + the box and mask head share the cropping and + the per-region feature computation by a Res5 block. + See :paper:`ResNet` Appendix A. + """ + + @configurable + def __init__( + self, + *, + in_features: List[str], + pooler: ROIPooler, + res5: nn.Module, + box_predictor: nn.Module, + mask_head: Optional[nn.Module] = None, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + in_features (list[str]): list of backbone feature map names to use for + feature extraction + pooler (ROIPooler): pooler to extra region features from backbone + res5 (nn.Sequential): a CNN to compute per-region features, to be used by + ``box_predictor`` and ``mask_head``. Typically this is a "res5" + block from a ResNet. + box_predictor (nn.Module): make box predictions from the feature. + Should have the same interface as :class:`FastRCNNOutputLayers`. + mask_head (nn.Module): transform features to make mask predictions + """ + super().__init__(**kwargs) + self.in_features = in_features + self.pooler = pooler + if isinstance(res5, (list, tuple)): + res5 = nn.Sequential(*res5) + self.res5 = res5 + self.box_predictor = box_predictor + self.mask_on = mask_head is not None + if self.mask_on: + self.mask_head = mask_head + + @classmethod + def from_config(cls, cfg, input_shape): + # fmt: off + ret = super().from_config(cfg) + in_features = ret["in_features"] = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + pooler_scales = (1.0 / input_shape[in_features[0]].stride, ) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + mask_on = cfg.MODEL.MASK_ON + # fmt: on + assert not cfg.MODEL.KEYPOINT_ON + assert len(in_features) == 1 + + ret["pooler"] = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + + # Compatbility with old moco code. Might be useful. + # See notes in StandardROIHeads.from_config + if not inspect.ismethod(cls._build_res5_block): + logger.warning( + "The behavior of _build_res5_block may change. " + "Please do not depend on private methods." + ) + cls._build_res5_block = classmethod(cls._build_res5_block) + + ret["res5"], out_channels = cls._build_res5_block(cfg) + ret["box_predictor"] = FastRCNNOutputLayers( + cfg, ShapeSpec(channels=out_channels, height=1, width=1) + ) + + if mask_on: + ret["mask_head"] = build_mask_head( + cfg, + ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution), + ) + return ret + + @classmethod + def _build_res5_block(cls, cfg): + # fmt: off + stage_channel_factor = 2 ** 3 # res5 is 8x res2 + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group * stage_channel_factor + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + norm = cfg.MODEL.RESNETS.NORM + assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \ + "Deformable conv is not yet supported in res5 head." + # fmt: on + + blocks = ResNet.make_stage( + BottleneckBlock, + 3, + stride_per_block=[2, 1, 1], + in_channels=out_channels // 2, + bottleneck_channels=bottleneck_channels, + out_channels=out_channels, + num_groups=num_groups, + norm=norm, + stride_in_1x1=stride_in_1x1, + ) + return nn.Sequential(*blocks), out_channels + + def _shared_roi_transform(self, features: List[torch.Tensor], boxes: List[Boxes]): + x = self.pooler(features, boxes) + return self.res5(x) + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ): + """ + See :meth:`ROIHeads.forward`. + """ + del images + + if self.training: + assert targets + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + proposal_boxes = [x.proposal_boxes for x in proposals] + box_features = self._shared_roi_transform( + [features[f] for f in self.in_features], proposal_boxes + ) + predictions = self.box_predictor(box_features.mean(dim=[2, 3])) + + if self.training: + del features + losses = self.box_predictor.losses(predictions, proposals) + if self.mask_on: + proposals, fg_selection_masks = select_foreground_proposals( + proposals, self.num_classes + ) + # Since the ROI feature transform is shared between boxes and masks, + # we don't need to recompute features. The mask loss is only defined + # on foreground proposals, so we need to select out the foreground + # features. + mask_features = box_features[torch.cat(fg_selection_masks, dim=0)] + del box_features + losses.update(self.mask_head(mask_features, proposals)) + return [], losses + else: + pred_instances, _ = self.box_predictor.inference(predictions, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> List[Instances]: + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (Instances): + the same `Instances` object, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + if self.mask_on: + feature_list = [features[f] for f in self.in_features] + x = self._shared_roi_transform(feature_list, [x.pred_boxes for x in instances]) + return self.mask_head(x, instances) + else: + return instances + + +@ROI_HEADS_REGISTRY.register() +class StandardROIHeads(ROIHeads): + """ + It's "standard" in a sense that there is no ROI transform sharing + or feature sharing between tasks. + Each head independently processes the input features by each head's + own pooler and head. + + This class is used by most models, such as FPN and C5. + To implement more models, you can subclass it and implement a different + :meth:`forward()` or a head. + """ + + @configurable + def __init__( + self, + *, + box_in_features: List[str], + box_pooler: ROIPooler, + box_head: nn.Module, + box_predictor: nn.Module, + mask_in_features: Optional[List[str]] = None, + mask_pooler: Optional[ROIPooler] = None, + mask_head: Optional[nn.Module] = None, + keypoint_in_features: Optional[List[str]] = None, + keypoint_pooler: Optional[ROIPooler] = None, + keypoint_head: Optional[nn.Module] = None, + train_on_pred_boxes: bool = False, + **kwargs, + ): + """ + NOTE: this interface is experimental. + + Args: + box_in_features (list[str]): list of feature names to use for the box head. + box_pooler (ROIPooler): pooler to extra region features for box head + box_head (nn.Module): transform features to make box predictions + box_predictor (nn.Module): make box predictions from the feature. + Should have the same interface as :class:`FastRCNNOutputLayers`. + mask_in_features (list[str]): list of feature names to use for the mask + pooler or mask head. None if not using mask head. + mask_pooler (ROIPooler): pooler to extract region features from image features. + The mask head will then take region features to make predictions. + If None, the mask head will directly take the dict of image features + defined by `mask_in_features` + mask_head (nn.Module): transform features to make mask predictions + keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask_*``. + train_on_pred_boxes (bool): whether to use proposal boxes or + predicted boxes from the box head to train other heads. + """ + super().__init__(**kwargs) + # keep self.in_features for backward compatibility + self.in_features = self.box_in_features = box_in_features + self.box_pooler = box_pooler + self.box_head = box_head + self.box_predictor = box_predictor + + self.mask_on = mask_in_features is not None + if self.mask_on: + self.mask_in_features = mask_in_features + self.mask_pooler = mask_pooler + self.mask_head = mask_head + + self.keypoint_on = keypoint_in_features is not None + if self.keypoint_on: + self.keypoint_in_features = keypoint_in_features + self.keypoint_pooler = keypoint_pooler + self.keypoint_head = keypoint_head + + self.train_on_pred_boxes = train_on_pred_boxes + + @classmethod + def from_config(cls, cfg, input_shape): + ret = super().from_config(cfg) + ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES + # Subclasses that have not been updated to use from_config style construction + # may have overridden _init_*_head methods. In this case, those overridden methods + # will not be classmethods and we need to avoid trying to call them here. + # We test for this with ismethod which only returns True for bound methods of cls. + # Such subclasses will need to handle calling their overridden _init_*_head methods. + if inspect.ismethod(cls._init_box_head): + ret.update(cls._init_box_head(cfg, input_shape)) + if inspect.ismethod(cls._init_mask_head): + ret.update(cls._init_mask_head(cfg, input_shape)) + if inspect.ismethod(cls._init_keypoint_head): + ret.update(cls._init_keypoint_head(cfg, input_shape)) + return ret + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + + # If StandardROIHeads is applied on multiple feature maps (as in FPN), + # then we share the same predictors and therefore the channel counts must be the same + in_channels = [input_shape[f].channels for f in in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + # Here we split "box head" and "box predictor", which is mainly due to historical reasons. + # They are used together so the "box predictor" layers should be part of the "box head". + # New subclasses of ROIHeads do not need "box predictor"s. + box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_head": box_head, + "box_predictor": box_predictor, + } + + @classmethod + def _init_mask_head(cls, cfg, input_shape): + if not cfg.MODEL.MASK_ON: + return {} + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features][0] + + ret = {"mask_in_features": in_features} + ret["mask_pooler"] = ( + ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + if pooler_type + else None + ) + if pooler_type: + shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + else: + shape = {f: input_shape[f] for f in in_features} + ret["mask_head"] = build_mask_head(cfg, shape) + return ret + + @classmethod + def _init_keypoint_head(cls, cfg, input_shape): + if not cfg.MODEL.KEYPOINT_ON: + return {} + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa + sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [input_shape[f].channels for f in in_features][0] + + ret = {"keypoint_in_features": in_features} + ret["keypoint_pooler"] = ( + ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + if pooler_type + else None + ) + if pooler_type: + shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + else: + shape = {f: input_shape[f] for f in in_features} + ret["keypoint_head"] = build_keypoint_head(cfg, shape) + return ret + + def forward( + self, + images: ImageList, + features: Dict[str, torch.Tensor], + proposals: List[Instances], + targets: Optional[List[Instances]] = None, + ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]: + """ + See :class:`ROIHeads.forward`. + """ + del images + if self.training: + assert targets, "'targets' argument is required during training" + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + if self.training: + losses = self._forward_box(features, proposals) + # Usually the original proposals used by the box head are used by the mask, keypoint + # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes + # predicted by the box head. + losses.update(self._forward_mask(features, proposals)) + losses.update(self._forward_keypoint(features, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features, proposals) + # During inference cascaded prediction is used: the mask and keypoints heads are only + # applied to the top scoring box detections. + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes( + self, features: Dict[str, torch.Tensor], instances: List[Instances] + ) -> List[Instances]: + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + This is useful for downstream tasks where a box is known, but need to obtain + other attributes (outputs of other heads). + Test-time augmentation also uses this. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + list[Instances]: + the same `Instances` objects, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + instances = self._forward_mask(features, instances) + instances = self._forward_keypoint(features, instances) + return instances + + def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]): + """ + Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`, + the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + + Returns: + In training, a dict of losses. + In inference, a list of `Instances`, the predicted instances. + """ + features = [features[f] for f in self.box_in_features] + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + box_features = self.box_head(box_features) + predictions = self.box_predictor(box_features) + del box_features + + if self.training: + losses = self.box_predictor.losses(predictions, proposals) + # proposals is modified in-place below, so losses must be computed first. + if self.train_on_pred_boxes: + with torch.no_grad(): + pred_boxes = self.box_predictor.predict_boxes_for_gt_classes( + predictions, proposals + ) + for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes): + proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image) + return losses + else: + pred_instances, _ = self.box_predictor.inference(predictions, proposals) + return pred_instances + + def _forward_mask(self, features: Dict[str, torch.Tensor], instances: List[Instances]): + """ + Forward logic of the mask prediction branch. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + instances (list[Instances]): the per-image instances to train/predict masks. + In training, they can be the proposals. + In inference, they can be the boxes predicted by R-CNN box head. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_masks" and return it. + """ + if not self.mask_on: + return {} if self.training else instances + + if self.training: + # head is only trained on positive proposals. + instances, _ = select_foreground_proposals(instances, self.num_classes) + + if self.mask_pooler is not None: + features = [features[f] for f in self.mask_in_features] + boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances] + features = self.mask_pooler(features, boxes) + else: + features = {f: features[f] for f in self.mask_in_features} + return self.mask_head(features, instances) + + def _forward_keypoint(self, features: Dict[str, torch.Tensor], instances: List[Instances]): + """ + Forward logic of the keypoint prediction branch. + + Args: + features (dict[str, Tensor]): mapping from feature map names to tensor. + Same as in :meth:`ROIHeads.forward`. + instances (list[Instances]): the per-image instances to train/predict keypoints. + In training, they can be the proposals. + In inference, they can be the boxes predicted by R-CNN box head. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_keypoints" and return it. + """ + if not self.keypoint_on: + return {} if self.training else instances + + if self.training: + # head is only trained on positive proposals with >=1 visible keypoints. + instances, _ = select_foreground_proposals(instances, self.num_classes) + instances = select_proposals_with_visible_keypoints(instances) + + if self.keypoint_pooler is not None: + features = [features[f] for f in self.keypoint_in_features] + boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances] + features = self.keypoint_pooler(features, boxes) + else: + features = {f: features[f] for f in self.keypoint_in_features} + return self.keypoint_head(features, instances) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/rotated_fast_rcnn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/rotated_fast_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..1e29bb9b9cd44420bb3cc4136dd51c4c848b32c1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/roi_heads/rotated_fast_rcnn.py @@ -0,0 +1,271 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +import torch + +from custom_detectron2.config import configurable +from custom_detectron2.layers import ShapeSpec, batched_nms_rotated +from custom_detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated +from custom_detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransformRotated +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransformRotated`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth rotated box2box transform deltas +""" + + +def fast_rcnn_inference_rotated( + boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image +): + """ + Call `fast_rcnn_inference_single_image_rotated` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 5) if doing + class-specific regression, or (Ri, 5) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image_rotated( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return [x[0] for x in result_per_image], [x[1] for x in result_per_image] + + +@torch.no_grad() +def fast_rcnn_inference_single_image_rotated( + boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image +): + """ + Single-image inference. Return rotated bounding-box detection results by thresholding + on scores and applying rotated non-maximum suppression (Rotated NMS). + + Args: + Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference_rotated`, but for only one image. + """ + valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) + if not valid_mask.all(): + boxes = boxes[valid_mask] + scores = scores[valid_mask] + + B = 5 # box dimension + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // B + # Convert to Boxes to use the `clip` function ... + boxes = RotatedBoxes(boxes.reshape(-1, B)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B + # Filter results based on detection scores + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # Apply per-class Rotated NMS + keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) + if topk_per_image >= 0: + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = RotatedBoxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + + return result, filter_inds[:, 0] + + +class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers): + """ + Two linear layers for predicting Rotated Fast R-CNN outputs. + """ + + @classmethod + def from_config(cls, cfg, input_shape): + args = super().from_config(cfg, input_shape) + args["box2box_transform"] = Box2BoxTransformRotated( + weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS + ) + return args + + def inference(self, predictions, proposals): + """ + Returns: + list[Instances]: same as `fast_rcnn_inference_rotated`. + list[Tensor]: same as `fast_rcnn_inference_rotated`. + """ + boxes = self.predict_boxes(predictions, proposals) + scores = self.predict_probs(predictions, proposals) + image_shapes = [x.image_size for x in proposals] + + return fast_rcnn_inference_rotated( + boxes, + scores, + image_shapes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_topk_per_image, + ) + + +@ROI_HEADS_REGISTRY.register() +class RROIHeads(StandardROIHeads): + """ + This class is used by Rotated Fast R-CNN to detect rotated boxes. + For now, it only supports box predictions but not mask or keypoints. + """ + + @configurable + def __init__(self, **kwargs): + """ + NOTE: this interface is experimental. + """ + super().__init__(**kwargs) + assert ( + not self.mask_on and not self.keypoint_on + ), "Mask/Keypoints not supported in Rotated ROIHeads." + assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!" + + @classmethod + def _init_box_head(cls, cfg, input_shape): + # fmt: off + in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + assert pooler_type in ["ROIAlignRotated"], pooler_type + # assume all channel counts are equal + in_channels = [input_shape[f].channels for f in in_features][0] + + box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + # This line is the only difference v.s. StandardROIHeads + box_predictor = RotatedFastRCNNOutputLayers(cfg, box_head.output_shape) + return { + "box_in_features": in_features, + "box_pooler": box_pooler, + "box_head": box_head, + "box_predictor": box_predictor, + } + + @torch.no_grad() + def label_and_sample_proposals(self, proposals, targets): + """ + Prepare some proposals to be used to train the RROI heads. + It performs box matching between `proposals` and `targets`, and assigns + training labels to the proposals. + It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, + with a fraction of positives that is no larger than `self.positive_sample_fraction. + + Args: + See :meth:`StandardROIHeads.forward` + + Returns: + list[Instances]: length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + - proposal_boxes: the rotated proposal boxes + - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + - gt_classes: the ground-truth classification lable for each proposal + """ + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(targets, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou_rotated( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix) + sampled_idxs, gt_classes = self._sample_proposals( + matched_idxs, matched_labels, targets_per_image.gt_classes + ) + + proposals_per_image = proposals_per_image[sampled_idxs] + proposals_per_image.gt_classes = gt_classes + + if has_gt: + sampled_targets = matched_idxs[sampled_idxs] + proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] + + num_bg_samples.append((gt_classes == self.num_classes).sum().item()) + num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/sampling.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..e143cf8073bd22f86b5d18de8e8c336874d3cda8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/sampling.py @@ -0,0 +1,54 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from custom_detectron2.layers import nonzero_tuple + +__all__ = ["subsample_labels"] + + +def subsample_labels( + labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int +): + """ + Return `num_samples` (or fewer, if not enough found) + random samples from `labels` which is a mixture of positives & negatives. + It will try to return as many positives as possible without + exceeding `positive_fraction * num_samples`, and then try to + fill the remaining slots with negatives. + + Args: + labels (Tensor): (N, ) label vector with values: + * -1: ignore + * bg_label: background ("negative") class + * otherwise: one or more foreground ("positive") classes + num_samples (int): The total number of labels with value >= 0 to return. + Values that are not sampled will be filled with -1 (ignore). + positive_fraction (float): The number of subsampled labels with values > 0 + is `min(num_positives, int(positive_fraction * num_samples))`. The number + of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. + In order words, if there are not enough positives, the sample is filled with + negatives. If there are also not enough negatives, then as many elements are + sampled as is possible. + bg_label (int): label index of background ("negative") class. + + Returns: + pos_idx, neg_idx (Tensor): + 1D vector of indices. The total length of both is `num_samples` or fewer. + """ + positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] + negative = nonzero_tuple(labels == bg_label)[0] + + num_pos = int(num_samples * positive_fraction) + # protect against not enough positive examples + num_pos = min(positive.numel(), num_pos) + num_neg = num_samples - num_pos + # protect against not enough negative examples + num_neg = min(negative.numel(), num_neg) + + # randomly select positive and negative examples + perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] + perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] + + pos_idx = positive[perm1] + neg_idx = negative[perm2] + return pos_idx, neg_idx diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/test_time_augmentation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/test_time_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3539057fb519885914bfe1838ff39a384a2e96 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/modeling/test_time_augmentation.py @@ -0,0 +1,307 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import numpy as np +from contextlib import contextmanager +from itertools import count +from typing import List +import torch +from fvcore.transforms import HFlipTransform, NoOpTransform +from torch import nn +from torch.nn.parallel import DistributedDataParallel + +from custom_detectron2.config import configurable +from custom_detectron2.data.detection_utils import read_image +from custom_detectron2.data.transforms import ( + RandomFlip, + ResizeShortestEdge, + ResizeTransform, + apply_augmentations, +) +from custom_detectron2.structures import Boxes, Instances + +from .meta_arch import GeneralizedRCNN +from .postprocessing import detector_postprocess +from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image + +__all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"] + + +class DatasetMapperTTA: + """ + Implement test-time augmentation for detection data. + It is a callable which takes a dataset dict from a detection dataset, + and returns a list of dataset dicts where the images + are augmented from the input image by the transformations defined in the config. + This is used for test-time augmentation. + """ + + @configurable + def __init__(self, min_sizes: List[int], max_size: int, flip: bool): + """ + Args: + min_sizes: list of short-edge size to resize the image to + max_size: maximum height or width of resized images + flip: whether to apply flipping augmentation + """ + self.min_sizes = min_sizes + self.max_size = max_size + self.flip = flip + + @classmethod + def from_config(cls, cfg): + return { + "min_sizes": cfg.TEST.AUG.MIN_SIZES, + "max_size": cfg.TEST.AUG.MAX_SIZE, + "flip": cfg.TEST.AUG.FLIP, + } + + def __call__(self, dataset_dict): + """ + Args: + dict: a dict in standard model input format. See tutorials for details. + + Returns: + list[dict]: + a list of dicts, which contain augmented version of the input image. + The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``. + Each dict has field "transforms" which is a TransformList, + containing the transforms that are used to generate this image. + """ + numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy() + shape = numpy_image.shape + orig_shape = (dataset_dict["height"], dataset_dict["width"]) + if shape[:2] != orig_shape: + # It transforms the "original" image in the dataset to the input image + pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1]) + else: + pre_tfm = NoOpTransform() + + # Create all combinations of augmentations to use + aug_candidates = [] # each element is a list[Augmentation] + for min_size in self.min_sizes: + resize = ResizeShortestEdge(min_size, self.max_size) + aug_candidates.append([resize]) # resize only + if self.flip: + flip = RandomFlip(prob=1.0) + aug_candidates.append([resize, flip]) # resize + flip + + # Apply all the augmentations + ret = [] + for aug in aug_candidates: + new_image, tfms = apply_augmentations(aug, np.copy(numpy_image)) + torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1))) + + dic = copy.deepcopy(dataset_dict) + dic["transforms"] = pre_tfm + tfms + dic["image"] = torch_image + ret.append(dic) + return ret + + +class GeneralizedRCNNWithTTA(nn.Module): + """ + A GeneralizedRCNN with test-time augmentation enabled. + Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`. + """ + + def __init__(self, cfg, model, tta_mapper=None, batch_size=3): + """ + Args: + cfg (CfgNode): + model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. + tta_mapper (callable): takes a dataset dict and returns a list of + augmented versions of the dataset dict. Defaults to + `DatasetMapperTTA(cfg)`. + batch_size (int): batch the augmented images into this batch size for inference. + """ + super().__init__() + if isinstance(model, DistributedDataParallel): + model = model.module + assert isinstance( + model, GeneralizedRCNN + ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model)) + self.cfg = cfg.clone() + assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet" + assert ( + not self.cfg.MODEL.LOAD_PROPOSALS + ), "TTA for pre-computed proposals is not supported yet" + + self.model = model + + if tta_mapper is None: + tta_mapper = DatasetMapperTTA(cfg) + self.tta_mapper = tta_mapper + self.batch_size = batch_size + + @contextmanager + def _turn_off_roi_heads(self, attrs): + """ + Open a context where some heads in `model.roi_heads` are temporarily turned off. + Args: + attr (list[str]): the attribute in `model.roi_heads` which can be used + to turn off a specific head, e.g., "mask_on", "keypoint_on". + """ + roi_heads = self.model.roi_heads + old = {} + for attr in attrs: + try: + old[attr] = getattr(roi_heads, attr) + except AttributeError: + # The head may not be implemented in certain ROIHeads + pass + + if len(old.keys()) == 0: + yield + else: + for attr in old.keys(): + setattr(roi_heads, attr, False) + yield + for attr in old.keys(): + setattr(roi_heads, attr, old[attr]) + + def _batch_inference(self, batched_inputs, detected_instances=None): + """ + Execute inference on a list of inputs, + using batch size = self.batch_size, instead of the length of the list. + + Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` + """ + if detected_instances is None: + detected_instances = [None] * len(batched_inputs) + + outputs = [] + inputs, instances = [], [] + for idx, input, instance in zip(count(), batched_inputs, detected_instances): + inputs.append(input) + instances.append(instance) + if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1: + outputs.extend( + self.model.inference( + inputs, + instances if instances[0] is not None else None, + do_postprocess=False, + ) + ) + inputs, instances = [], [] + return outputs + + def __call__(self, batched_inputs): + """ + Same input/output format as :meth:`GeneralizedRCNN.forward` + """ + + def _maybe_read_image(dataset_dict): + ret = copy.copy(dataset_dict) + if "image" not in ret: + image = read_image(ret.pop("file_name"), self.model.input_format) + image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW + ret["image"] = image + if "height" not in ret and "width" not in ret: + ret["height"] = image.shape[1] + ret["width"] = image.shape[2] + return ret + + return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs] + + def _inference_one_image(self, input): + """ + Args: + input (dict): one dataset dict with "image" field being a CHW tensor + + Returns: + dict: one output dict + """ + orig_shape = (input["height"], input["width"]) + augmented_inputs, tfms = self._get_augmented_inputs(input) + # Detect boxes from all augmented versions + with self._turn_off_roi_heads(["mask_on", "keypoint_on"]): + # temporarily disable roi heads + all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms) + # merge all detected boxes to obtain final predictions for boxes + merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape) + + if self.cfg.MODEL.MASK_ON: + # Use the detected boxes to obtain masks + augmented_instances = self._rescale_detected_boxes( + augmented_inputs, merged_instances, tfms + ) + # run forward on the detected boxes + outputs = self._batch_inference(augmented_inputs, augmented_instances) + # Delete now useless variables to avoid being out of memory + del augmented_inputs, augmented_instances + # average the predictions + merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms) + merged_instances = detector_postprocess(merged_instances, *orig_shape) + return {"instances": merged_instances} + else: + return {"instances": merged_instances} + + def _get_augmented_inputs(self, input): + augmented_inputs = self.tta_mapper(input) + tfms = [x.pop("transforms") for x in augmented_inputs] + return augmented_inputs, tfms + + def _get_augmented_boxes(self, augmented_inputs, tfms): + # 1: forward with all augmented images + outputs = self._batch_inference(augmented_inputs) + # 2: union the results + all_boxes = [] + all_scores = [] + all_classes = [] + for output, tfm in zip(outputs, tfms): + # Need to inverse the transforms on boxes, to obtain results on original image + pred_boxes = output.pred_boxes.tensor + original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy()) + all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device)) + + all_scores.extend(output.scores) + all_classes.extend(output.pred_classes) + all_boxes = torch.cat(all_boxes, dim=0) + return all_boxes, all_scores, all_classes + + def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw): + # select from the union of all results + num_boxes = len(all_boxes) + num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES + # +1 because fast_rcnn_inference expects background scores as well + all_scores_2d = torch.zeros(num_boxes, num_classes + 1, device=all_boxes.device) + for idx, cls, score in zip(count(), all_classes, all_scores): + all_scores_2d[idx, cls] = score + + merged_instances, _ = fast_rcnn_inference_single_image( + all_boxes, + all_scores_2d, + shape_hw, + 1e-8, + self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + self.cfg.TEST.DETECTIONS_PER_IMAGE, + ) + + return merged_instances + + def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms): + augmented_instances = [] + for input, tfm in zip(augmented_inputs, tfms): + # Transform the target box to the augmented image's coordinate space + pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy() + pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes)) + + aug_instances = Instances( + image_size=input["image"].shape[1:3], + pred_boxes=Boxes(pred_boxes), + pred_classes=merged_instances.pred_classes, + scores=merged_instances.scores, + ) + augmented_instances.append(aug_instances) + return augmented_instances + + def _reduce_pred_masks(self, outputs, tfms): + # Should apply inverse transforms on masks. + # We assume only resize & flip are used. pred_masks is a scale-invariant + # representation, so we handle flip specially + for output, tfm in zip(outputs, tfms): + if any(isinstance(t, HFlipTransform) for t in tfm.transforms): + output.pred_masks = output.pred_masks.flip(dims=[3]) + all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0) + avg_pred_masks = torch.mean(all_pred_masks, dim=0) + return avg_pred_masks diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/README.md b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/README.md new file mode 100644 index 0000000000000000000000000000000000000000..95afe7ff8c8a9bd2f56621fcc3c1bdac11c256a9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/README.md @@ -0,0 +1,2 @@ + +Projects live in the [`projects` directory](../../projects) under the root of this repository, but not here. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d0540b93ebbad78d6ff2cc0adc0fe8375816c2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/__init__.py @@ -0,0 +1,34 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import importlib.abc +import importlib.util +from pathlib import Path + +__all__ = [] + +_PROJECTS = { + "point_rend": "PointRend", + "deeplab": "DeepLab", + "panoptic_deeplab": "Panoptic-DeepLab", +} +_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent / "projects" + +if _PROJECT_ROOT.is_dir(): + # This is true only for in-place installation (pip install -e, setup.py develop), + # where setup(package_dir=) does not work: https://github.com/pypa/setuptools/issues/230 + + class _D2ProjectsFinder(importlib.abc.MetaPathFinder): + def find_spec(self, name, path, target=None): + if not name.startswith("detectron2.projects."): + return + project_name = name.split(".")[-1] + project_dir = _PROJECTS.get(project_name) + if not project_dir: + return + target_file = _PROJECT_ROOT / f"{project_dir}/{project_name}/__init__.py" + if not target_file.is_file(): + return + return importlib.util.spec_from_file_location(name, target_file) + + import sys + + sys.meta_path.append(_D2ProjectsFinder()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcd88ff0c09d630577e3ac9f8afb5324a80a7be4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build_solver import build_lr_scheduler +from .config import add_deeplab_config +from .resnet import build_resnet_deeplab_backbone +from .semantic_seg import DeepLabV3Head, DeepLabV3PlusHead diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/build_solver.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/build_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..bc81cb27493af340dde2bd09ac828ba47b272f70 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/build_solver.py @@ -0,0 +1,27 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch + +from custom_detectron2.config import CfgNode +from custom_detectron2.solver import LRScheduler +from custom_detectron2.solver import build_lr_scheduler as build_d2_lr_scheduler + +from .lr_scheduler import WarmupPolyLR + + +def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> LRScheduler: + """ + Build a LR scheduler from config. + """ + name = cfg.SOLVER.LR_SCHEDULER_NAME + if name == "WarmupPolyLR": + return WarmupPolyLR( + optimizer, + cfg.SOLVER.MAX_ITER, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + power=cfg.SOLVER.POLY_LR_POWER, + constant_ending=cfg.SOLVER.POLY_LR_CONSTANT_ENDING, + ) + else: + return build_d2_lr_scheduler(cfg, optimizer) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/config.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/config.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5e45a9124e61c12d90cfc5032b268496891a4a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/config.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + + +def add_deeplab_config(cfg): + """ + Add config for DeepLab. + """ + # We retry random cropping until no single category in semantic segmentation GT occupies more + # than `SINGLE_CATEGORY_MAX_AREA` part of the crop. + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0 + # Used for `poly` learning rate schedule. + cfg.SOLVER.POLY_LR_POWER = 0.9 + cfg.SOLVER.POLY_LR_CONSTANT_ENDING = 0.0 + # Loss type, choose from `cross_entropy`, `hard_pixel_mining`. + cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE = "hard_pixel_mining" + # DeepLab settings + cfg.MODEL.SEM_SEG_HEAD.PROJECT_FEATURES = ["res2"] + cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS = [48] + cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS = 256 + cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS = [6, 12, 18] + cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT = 0.1 + cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV = False + # Backbone new configs + cfg.MODEL.RESNETS.RES4_DILATION = 1 + cfg.MODEL.RESNETS.RES5_MULTI_GRID = [1, 2, 4] + # ResNet stem type from: `basic`, `deeplab` + cfg.MODEL.RESNETS.STEM_TYPE = "deeplab" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/loss.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..3a43087b7c1a2b4d2b249fad117724dbd0f14fdd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/loss.py @@ -0,0 +1,40 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +import torch.nn as nn + + +class DeepLabCE(nn.Module): + """ + Hard pixel mining with cross entropy loss, for semantic segmentation. + This is used in TensorFlow DeepLab frameworks. + Paper: DeeperLab: Single-Shot Image Parser + Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33 # noqa + Arguments: + ignore_label: Integer, label to ignore. + top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its + value < 1.0, only compute the loss for the top k percent pixels + (e.g., the top 20% pixels). This is useful for hard pixel mining. + weight: Tensor, a manual rescaling weight given to each class. + """ + + def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None): + super(DeepLabCE, self).__init__() + self.top_k_percent_pixels = top_k_percent_pixels + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss( + weight=weight, ignore_index=ignore_label, reduction="none" + ) + + def forward(self, logits, labels, weights=None): + if weights is None: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + else: + # Apply per-pixel loss weights. + pixel_losses = self.criterion(logits, labels) * weights + pixel_losses = pixel_losses.contiguous().view(-1) + if self.top_k_percent_pixels == 1.0: + return pixel_losses.mean() + + top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel()) + pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels) + return pixel_losses.mean() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/lr_scheduler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..1f9ad8fededad41d9bb0ee2314532fb2bcc03b39 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/lr_scheduler.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List +import torch + +from custom_detectron2.solver.lr_scheduler import LRScheduler, _get_warmup_factor_at_iter + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupPolyLR(LRScheduler): + """ + Poly learning rate schedule used to train DeepLab. + Paper: DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, + Atrous Convolution, and Fully Connected CRFs. + Reference: https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/utils/train_utils.py#L337 # noqa + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + power: float = 0.9, + constant_ending: float = 0.0, + ): + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + self.power = power + self.constant_ending = constant_ending + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + if self.constant_ending > 0 and warmup_factor == 1.0: + # Constant ending lr. + if ( + math.pow((1.0 - self.last_epoch / self.max_iters), self.power) + < self.constant_ending + ): + return [base_lr * self.constant_ending for base_lr in self.base_lrs] + return [ + base_lr * warmup_factor * math.pow((1.0 - self.last_epoch / self.max_iters), self.power) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/resnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..08bc22557b2e0d630548790610cc4c461fe04bdc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/resnet.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import fvcore.nn.weight_init as weight_init +import torch.nn.functional as F + +from custom_detectron2.layers import CNNBlockBase, Conv2d, get_norm +from custom_detectron2.modeling import BACKBONE_REGISTRY +from custom_detectron2.modeling.backbone.resnet import ( + BasicStem, + BottleneckBlock, + DeformBottleneckBlock, + ResNet, +) + + +class DeepLabStem(CNNBlockBase): + """ + The DeepLab ResNet stem (layers before the first residual block). + """ + + def __init__(self, in_channels=3, out_channels=128, norm="BN"): + """ + Args: + norm (str or callable): norm after the first conv layer. + See :func:`layers.get_norm` for supported format. + """ + super().__init__(in_channels, out_channels, 4) + self.in_channels = in_channels + self.conv1 = Conv2d( + in_channels, + out_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False, + norm=get_norm(norm, out_channels // 2), + ) + self.conv2 = Conv2d( + out_channels // 2, + out_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels // 2), + ) + self.conv3 = Conv2d( + out_channels // 2, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + weight_init.c2_msra_fill(self.conv1) + weight_init.c2_msra_fill(self.conv2) + weight_init.c2_msra_fill(self.conv3) + + def forward(self, x): + x = self.conv1(x) + x = F.relu_(x) + x = self.conv2(x) + x = F.relu_(x) + x = self.conv3(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x + + +@BACKBONE_REGISTRY.register() +def build_resnet_deeplab_backbone(cfg, input_shape): + """ + Create a ResNet instance from config. + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + if cfg.MODEL.RESNETS.STEM_TYPE == "basic": + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + elif cfg.MODEL.RESNETS.STEM_TYPE == "deeplab": + stem = DeepLabStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + else: + raise ValueError("Unknown stem type: {}".format(cfg.MODEL.RESNETS.STEM_TYPE)) + + # fmt: off + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res4_dilation = cfg.MODEL.RESNETS.RES4_DILATION + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + res5_multi_grid = cfg.MODEL.RESNETS.RES5_MULTI_GRID + # fmt: on + assert res4_dilation in {1, 2}, "res4_dilation cannot be {}.".format(res4_dilation) + assert res5_dilation in {1, 2, 4}, "res5_dilation cannot be {}.".format(res5_dilation) + if res4_dilation == 2: + # Always dilate res5 if res4 is dilated. + assert res5_dilation == 4 + + num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] + + stages = [] + + # Avoid creating variables without gradients + # It consumes extra memory and may cause allreduce to fail + out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + if stage_idx == 4: + dilation = res4_dilation + elif stage_idx == 5: + dilation = res5_dilation + else: + dilation = 1 + first_stride = 1 if idx == 0 or dilation > 1 else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), + "in_channels": in_channels, + "out_channels": out_channels, + "norm": norm, + } + stage_kargs["bottleneck_channels"] = bottleneck_channels + stage_kargs["stride_in_1x1"] = stride_in_1x1 + stage_kargs["dilation"] = dilation + stage_kargs["num_groups"] = num_groups + if deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + if stage_idx == 5: + stage_kargs.pop("dilation") + stage_kargs["dilation_per_block"] = [dilation * mg for mg in res5_multi_grid] + blocks = ResNet.make_stage(**stage_kargs) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features).freeze(freeze_at) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/semantic_seg.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/semantic_seg.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad38aa610ebc2f0ffcab914aa20558fffef9576 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/projects/deeplab/semantic_seg.py @@ -0,0 +1,348 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import Callable, Dict, List, Optional, Tuple, Union +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import ASPP, Conv2d, DepthwiseSeparableConv2d, ShapeSpec, get_norm +from custom_detectron2.modeling import SEM_SEG_HEADS_REGISTRY + +from .loss import DeepLabCE + + +@SEM_SEG_HEADS_REGISTRY.register() +class DeepLabV3PlusHead(nn.Module): + """ + A semantic segmentation head described in :paper:`DeepLabV3+`. + """ + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + project_channels: List[int], + aspp_dilations: List[int], + aspp_dropout: float, + decoder_channels: List[int], + common_stride: int, + norm: Union[str, Callable], + train_size: Optional[Tuple], + loss_weight: float = 1.0, + loss_type: str = "cross_entropy", + ignore_value: int = -1, + num_classes: Optional[int] = None, + use_depthwise_separable_conv: bool = False, + ): + """ + NOTE: this interface is experimental. + + Args: + input_shape: shape of the input features. They will be ordered by stride + and the last one (with largest stride) is used as the input to the + decoder (i.e. the ASPP module); the rest are low-level feature for + the intermediate levels of decoder. + project_channels (list[int]): a list of low-level feature channels. + The length should be len(in_features) - 1. + aspp_dilations (list(int)): a list of 3 dilations in ASPP. + aspp_dropout (float): apply dropout on the output of ASPP. + decoder_channels (list[int]): a list of output channels of each + decoder stage. It should have the same length as "in_features" + (each element in "in_features" corresponds to one decoder stage). + common_stride (int): output stride of decoder. + norm (str or callable): normalization for all conv layers. + train_size (tuple): (height, width) of training images. + loss_weight (float): loss weight. + loss_type (str): type of loss function, 2 opptions: + (1) "cross_entropy" is the standard cross entropy loss. + (2) "hard_pixel_mining" is the loss in DeepLab that samples + top k% hardest pixels. + ignore_value (int): category to be ignored during training. + num_classes (int): number of classes, if set to None, the decoder + will not construct a predictor. + use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d + in ASPP and decoder. + """ + super().__init__() + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + + # fmt: off + self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" + in_channels = [x[1].channels for x in input_shape] + in_strides = [x[1].stride for x in input_shape] + aspp_channels = decoder_channels[-1] + self.ignore_value = ignore_value + self.common_stride = common_stride # output stride + self.loss_weight = loss_weight + self.loss_type = loss_type + self.decoder_only = num_classes is None + self.use_depthwise_separable_conv = use_depthwise_separable_conv + # fmt: on + + assert ( + len(project_channels) == len(self.in_features) - 1 + ), "Expected {} project_channels, got {}".format( + len(self.in_features) - 1, len(project_channels) + ) + assert len(decoder_channels) == len( + self.in_features + ), "Expected {} decoder_channels, got {}".format( + len(self.in_features), len(decoder_channels) + ) + self.decoder = nn.ModuleDict() + + use_bias = norm == "" + for idx, in_channel in enumerate(in_channels): + decoder_stage = nn.ModuleDict() + + if idx == len(self.in_features) - 1: + # ASPP module + if train_size is not None: + train_h, train_w = train_size + encoder_stride = in_strides[-1] + if train_h % encoder_stride or train_w % encoder_stride: + raise ValueError("Crop size need to be divisible by encoder stride.") + pool_h = train_h // encoder_stride + pool_w = train_w // encoder_stride + pool_kernel_size = (pool_h, pool_w) + else: + pool_kernel_size = None + project_conv = ASPP( + in_channel, + aspp_channels, + aspp_dilations, + norm=norm, + activation=F.relu, + pool_kernel_size=pool_kernel_size, + dropout=aspp_dropout, + use_depthwise_separable_conv=use_depthwise_separable_conv, + ) + fuse_conv = None + else: + project_conv = Conv2d( + in_channel, + project_channels[idx], + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, project_channels[idx]), + activation=F.relu, + ) + weight_init.c2_xavier_fill(project_conv) + if use_depthwise_separable_conv: + # We use a single 5x5 DepthwiseSeparableConv2d to replace + # 2 3x3 Conv2d since they have the same receptive field, + # proposed in :paper:`Panoptic-DeepLab`. + fuse_conv = DepthwiseSeparableConv2d( + project_channels[idx] + decoder_channels[idx + 1], + decoder_channels[idx], + kernel_size=5, + padding=2, + norm1=norm, + activation1=F.relu, + norm2=norm, + activation2=F.relu, + ) + else: + fuse_conv = nn.Sequential( + Conv2d( + project_channels[idx] + decoder_channels[idx + 1], + decoder_channels[idx], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[idx]), + activation=F.relu, + ), + Conv2d( + decoder_channels[idx], + decoder_channels[idx], + kernel_size=3, + padding=1, + bias=use_bias, + norm=get_norm(norm, decoder_channels[idx]), + activation=F.relu, + ), + ) + weight_init.c2_xavier_fill(fuse_conv[0]) + weight_init.c2_xavier_fill(fuse_conv[1]) + + decoder_stage["project_conv"] = project_conv + decoder_stage["fuse_conv"] = fuse_conv + + self.decoder[self.in_features[idx]] = decoder_stage + + if not self.decoder_only: + self.predictor = Conv2d( + decoder_channels[0], num_classes, kernel_size=1, stride=1, padding=0 + ) + nn.init.normal_(self.predictor.weight, 0, 0.001) + nn.init.constant_(self.predictor.bias, 0) + + if self.loss_type == "cross_entropy": + self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value) + elif self.loss_type == "hard_pixel_mining": + self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2) + else: + raise ValueError("Unexpected loss type: %s" % self.loss_type) + + @classmethod + def from_config(cls, cfg, input_shape): + if cfg.INPUT.CROP.ENABLED: + assert cfg.INPUT.CROP.TYPE == "absolute" + train_size = cfg.INPUT.CROP.SIZE + else: + train_size = None + decoder_channels = [cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM] * ( + len(cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES) - 1 + ) + [cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS] + ret = dict( + input_shape={ + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + }, + project_channels=cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS, + aspp_dilations=cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS, + aspp_dropout=cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT, + decoder_channels=decoder_channels, + common_stride=cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE, + norm=cfg.MODEL.SEM_SEG_HEAD.NORM, + train_size=train_size, + loss_weight=cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, + loss_type=cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE, + ignore_value=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + use_depthwise_separable_conv=cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV, + ) + return ret + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + y = self.layers(features) + if self.decoder_only: + # Output from self.layers() only contains decoder feature. + return y + if self.training: + return None, self.losses(y, targets) + else: + y = F.interpolate( + y, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return y, {} + + def layers(self, features): + # Reverse feature maps into top-down order (from low to high resolution) + for f in self.in_features[::-1]: + x = features[f] + proj_x = self.decoder[f]["project_conv"](x) + if self.decoder[f]["fuse_conv"] is None: + # This is aspp module + y = proj_x + else: + # Upsample y + y = F.interpolate(y, size=proj_x.size()[2:], mode="bilinear", align_corners=False) + y = torch.cat([proj_x, y], dim=1) + y = self.decoder[f]["fuse_conv"](y) + if not self.decoder_only: + y = self.predictor(y) + return y + + def losses(self, predictions, targets): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = self.loss(predictions, targets) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses + + +@SEM_SEG_HEADS_REGISTRY.register() +class DeepLabV3Head(nn.Module): + """ + A semantic segmentation head described in :paper:`DeepLabV3`. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + # fmt: off + self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + in_channels = [input_shape[f].channels for f in self.in_features] + aspp_channels = cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS + aspp_dilations = cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES + conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE # output stride + norm = cfg.MODEL.SEM_SEG_HEAD.NORM + self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT + self.loss_type = cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE + train_crop_size = cfg.INPUT.CROP.SIZE + aspp_dropout = cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT + use_depthwise_separable_conv = cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV + # fmt: on + + assert len(self.in_features) == 1 + assert len(in_channels) == 1 + + # ASPP module + if cfg.INPUT.CROP.ENABLED: + assert cfg.INPUT.CROP.TYPE == "absolute" + train_crop_h, train_crop_w = train_crop_size + if train_crop_h % self.common_stride or train_crop_w % self.common_stride: + raise ValueError("Crop size need to be divisible by output stride.") + pool_h = train_crop_h // self.common_stride + pool_w = train_crop_w // self.common_stride + pool_kernel_size = (pool_h, pool_w) + else: + pool_kernel_size = None + self.aspp = ASPP( + in_channels[0], + aspp_channels, + aspp_dilations, + norm=norm, + activation=F.relu, + pool_kernel_size=pool_kernel_size, + dropout=aspp_dropout, + use_depthwise_separable_conv=use_depthwise_separable_conv, + ) + + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + nn.init.normal_(self.predictor.weight, 0, 0.001) + nn.init.constant_(self.predictor.bias, 0) + + if self.loss_type == "cross_entropy": + self.loss = nn.CrossEntropyLoss(reduction="mean", ignore_index=self.ignore_value) + elif self.loss_type == "hard_pixel_mining": + self.loss = DeepLabCE(ignore_label=self.ignore_value, top_k_percent_pixels=0.2) + else: + raise ValueError("Unexpected loss type: %s" % self.loss_type) + + def forward(self, features, targets=None): + """ + Returns: + In training, returns (None, dict of losses) + In inference, returns (CxHxW logits, {}) + """ + x = features[self.in_features[0]] + x = self.aspp(x) + x = self.predictor(x) + if self.training: + return None, self.losses(x, targets) + else: + x = F.interpolate( + x, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + return x, {} + + def losses(self, predictions, targets): + predictions = F.interpolate( + predictions, scale_factor=self.common_stride, mode="bilinear", align_corners=False + ) + loss = self.loss(predictions, targets) + losses = {"loss_sem_seg": loss * self.loss_weight} + return losses diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7e36c64f60f38f41d01dd2c9fb30364489a03841 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params +from .lr_scheduler import ( + LRMultiplier, + LRScheduler, + WarmupCosineLR, + WarmupMultiStepLR, + WarmupParamScheduler, +) + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/build.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/build.py new file mode 100644 index 0000000000000000000000000000000000000000..5630936e9d52102e07029a685f79668476b6b141 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/build.py @@ -0,0 +1,310 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import logging +from collections import defaultdict +from enum import Enum +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union +import torch +from fvcore.common.param_scheduler import ( + CosineParamScheduler, + MultiStepParamScheduler, + StepWithFixedGammaParamScheduler, +) + +from custom_detectron2.config import CfgNode +from custom_detectron2.utils.env import TORCH_VERSION + +from .lr_scheduler import LRMultiplier, LRScheduler, WarmupParamScheduler + +_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] +_GradientClipper = Callable[[_GradientClipperInput], None] + + +class GradientClipType(Enum): + VALUE = "value" + NORM = "norm" + + +def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper: + """ + Creates gradient clipping closure to clip by value or by norm, + according to the provided config. + """ + cfg = copy.deepcopy(cfg) + + def clip_grad_norm(p: _GradientClipperInput): + torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) + + def clip_grad_value(p: _GradientClipperInput): + torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) + + _GRADIENT_CLIP_TYPE_TO_CLIPPER = { + GradientClipType.VALUE: clip_grad_value, + GradientClipType.NORM: clip_grad_norm, + } + return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] + + +def _generate_optimizer_class_with_gradient_clipping( + optimizer: Type[torch.optim.Optimizer], + *, + per_param_clipper: Optional[_GradientClipper] = None, + global_clipper: Optional[_GradientClipper] = None, +) -> Type[torch.optim.Optimizer]: + """ + Dynamically creates a new type that inherits the type of a given instance + and overrides the `step` method to add gradient clipping + """ + assert ( + per_param_clipper is None or global_clipper is None + ), "Not allowed to use both per-parameter clipping and global clipping" + + def optimizer_wgc_step(self, closure=None): + if per_param_clipper is not None: + for group in self.param_groups: + for p in group["params"]: + per_param_clipper(p) + else: + # global clipper for future use with detr + # (https://github.com/facebookresearch/detr/pull/287) + all_params = itertools.chain(*[g["params"] for g in self.param_groups]) + global_clipper(all_params) + super(type(self), self).step(closure) + + OptimizerWithGradientClip = type( + optimizer.__name__ + "WithGradientClip", + (optimizer,), + {"step": optimizer_wgc_step}, + ) + return OptimizerWithGradientClip + + +def maybe_add_gradient_clipping( + cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] +) -> Type[torch.optim.Optimizer]: + """ + If gradient clipping is enabled through config options, wraps the existing + optimizer type to become a new dynamically created class OptimizerWithGradientClip + that inherits the given optimizer and overrides the `step` method to + include gradient clipping. + + Args: + cfg: CfgNode, configuration options + optimizer: type. A subclass of torch.optim.Optimizer + + Return: + type: either the input `optimizer` (if gradient clipping is disabled), or + a subclass of it with gradient clipping included in the `step` method. + """ + if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: + return optimizer + if isinstance(optimizer, torch.optim.Optimizer): + optimizer_type = type(optimizer) + else: + assert issubclass(optimizer, torch.optim.Optimizer), optimizer + optimizer_type = optimizer + + grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) + OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( + optimizer_type, per_param_clipper=grad_clipper + ) + if isinstance(optimizer, torch.optim.Optimizer): + optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended + return optimizer + else: + return OptimizerWithGradientClip + + +def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: + """ + Build an optimizer from config. + """ + params = get_default_optimizer_params( + model, + base_lr=cfg.SOLVER.BASE_LR, + weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, + bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, + weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, + ) + sgd_args = { + "params": params, + "lr": cfg.SOLVER.BASE_LR, + "momentum": cfg.SOLVER.MOMENTUM, + "nesterov": cfg.SOLVER.NESTEROV, + "weight_decay": cfg.SOLVER.WEIGHT_DECAY, + } + if TORCH_VERSION >= (1, 12): + sgd_args["foreach"] = True + return maybe_add_gradient_clipping(cfg, torch.optim.SGD(**sgd_args)) + + +def get_default_optimizer_params( + model: torch.nn.Module, + base_lr: Optional[float] = None, + weight_decay: Optional[float] = None, + weight_decay_norm: Optional[float] = None, + bias_lr_factor: Optional[float] = 1.0, + weight_decay_bias: Optional[float] = None, + lr_factor_func: Optional[Callable] = None, + overrides: Optional[Dict[str, Dict[str, float]]] = None, +) -> List[Dict[str, Any]]: + """ + Get default param list for optimizer, with support for a few types of + overrides. If no overrides needed, this is equivalent to `model.parameters()`. + + Args: + base_lr: lr for every group by default. Can be omitted to use the one in optimizer. + weight_decay: weight decay for every group by default. Can be omitted to use the one + in optimizer. + weight_decay_norm: override weight decay for params in normalization layers + bias_lr_factor: multiplier of lr for bias parameters. + weight_decay_bias: override weight decay for bias parameters. + lr_factor_func: function to calculate lr decay rate by mapping the parameter names to + corresponding lr decay rate. Note that setting this option requires + also setting ``base_lr``. + overrides: if not `None`, provides values for optimizer hyperparameters + (LR, weight decay) for module parameters with a given name; e.g. + ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and + weight decay values for all module parameters named `embedding`. + + For common detection models, ``weight_decay_norm`` is the only option + needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings + from Detectron1 that are not found useful. + + Example: + :: + torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), + lr=0.01, weight_decay=1e-4, momentum=0.9) + """ + if overrides is None: + overrides = {} + defaults = {} + if base_lr is not None: + defaults["lr"] = base_lr + if weight_decay is not None: + defaults["weight_decay"] = weight_decay + bias_overrides = {} + if bias_lr_factor is not None and bias_lr_factor != 1.0: + # NOTE: unlike Detectron v1, we now by default make bias hyperparameters + # exactly the same as regular weights. + if base_lr is None: + raise ValueError("bias_lr_factor requires base_lr") + bias_overrides["lr"] = base_lr * bias_lr_factor + if weight_decay_bias is not None: + bias_overrides["weight_decay"] = weight_decay_bias + if len(bias_overrides): + if "bias" in overrides: + raise ValueError("Conflicting overrides for 'bias'") + overrides["bias"] = bias_overrides + if lr_factor_func is not None: + if base_lr is None: + raise ValueError("lr_factor_func requires base_lr") + norm_module_types = ( + torch.nn.BatchNorm1d, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.nn.SyncBatchNorm, + # NaiveSyncBatchNorm inherits from BatchNorm2d + torch.nn.GroupNorm, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.LayerNorm, + torch.nn.LocalResponseNorm, + ) + params: List[Dict[str, Any]] = [] + memo: Set[torch.nn.parameter.Parameter] = set() + for module_name, module in model.named_modules(): + for module_param_name, value in module.named_parameters(recurse=False): + if not value.requires_grad: + continue + # Avoid duplicating parameters + if value in memo: + continue + memo.add(value) + + hyperparams = copy.copy(defaults) + if isinstance(module, norm_module_types) and weight_decay_norm is not None: + hyperparams["weight_decay"] = weight_decay_norm + if lr_factor_func is not None: + hyperparams["lr"] *= lr_factor_func(f"{module_name}.{module_param_name}") + + hyperparams.update(overrides.get(module_param_name, {})) + params.append({"params": [value], **hyperparams}) + return reduce_param_groups(params) + + +def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + # Transform parameter groups into per-parameter structure. + # Later items in `params` can overwrite parameters set in previous items. + ret = defaultdict(dict) + for item in params: + assert "params" in item + cur_params = {x: y for x, y in item.items() if x != "params"} + for param in item["params"]: + ret[param].update({"params": [param], **cur_params}) + return list(ret.values()) + + +def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + # Reorganize the parameter groups and merge duplicated groups. + # The number of parameter groups needs to be as small as possible in order + # to efficiently use the PyTorch multi-tensor optimizer. Therefore instead + # of using a parameter_group per single parameter, we reorganize the + # parameter groups and merge duplicated groups. This approach speeds + # up multi-tensor optimizer significantly. + params = _expand_param_groups(params) + groups = defaultdict(list) # re-group all parameter groups by their hyperparams + for item in params: + cur_params = tuple((x, y) for x, y in item.items() if x != "params") + groups[cur_params].extend(item["params"]) + ret = [] + for param_keys, param_values in groups.items(): + cur = {kv[0]: kv[1] for kv in param_keys} + cur["params"] = param_values + ret.append(cur) + return ret + + +def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> LRScheduler: + """ + Build a LR scheduler from config. + """ + name = cfg.SOLVER.LR_SCHEDULER_NAME + + if name == "WarmupMultiStepLR": + steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] + if len(steps) != len(cfg.SOLVER.STEPS): + logger = logging.getLogger(__name__) + logger.warning( + "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " + "These values will be ignored." + ) + sched = MultiStepParamScheduler( + values=[cfg.SOLVER.GAMMA**k for k in range(len(steps) + 1)], + milestones=steps, + num_updates=cfg.SOLVER.MAX_ITER, + ) + elif name == "WarmupCosineLR": + end_value = cfg.SOLVER.BASE_LR_END / cfg.SOLVER.BASE_LR + assert end_value >= 0.0 and end_value <= 1.0, end_value + sched = CosineParamScheduler(1, end_value) + elif name == "WarmupStepWithFixedGammaLR": + sched = StepWithFixedGammaParamScheduler( + base_value=1.0, + gamma=cfg.SOLVER.GAMMA, + num_decays=cfg.SOLVER.NUM_DECAYS, + num_updates=cfg.SOLVER.MAX_ITER, + ) + else: + raise ValueError("Unknown LR scheduler: {}".format(name)) + + sched = WarmupParamScheduler( + sched, + cfg.SOLVER.WARMUP_FACTOR, + min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), + cfg.SOLVER.WARMUP_METHOD, + cfg.SOLVER.RESCALE_INTERVAL, + ) + return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/lr_scheduler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..d6aed2bb20c418bf6cc5594c1244b241796d7086 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/solver/lr_scheduler.py @@ -0,0 +1,246 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import math +from bisect import bisect_right +from typing import List +import torch +from fvcore.common.param_scheduler import ( + CompositeParamScheduler, + ConstantParamScheduler, + LinearParamScheduler, + ParamScheduler, +) + +try: + from torch.optim.lr_scheduler import LRScheduler +except ImportError: + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler + +logger = logging.getLogger(__name__) + + +class WarmupParamScheduler(CompositeParamScheduler): + """ + Add an initial warmup stage to another scheduler. + """ + + def __init__( + self, + scheduler: ParamScheduler, + warmup_factor: float, + warmup_length: float, + warmup_method: str = "linear", + rescale_interval: bool = False, + ): + """ + Args: + scheduler: warmup will be added at the beginning of this scheduler + warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001 + warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire + training, e.g. 0.01 + warmup_method: one of "linear" or "constant" + rescale_interval: whether we will rescale the interval of the scheduler after + warmup + """ + end_value = scheduler(warmup_length) # the value to reach when warmup ends + start_value = warmup_factor * scheduler(0.0) + if warmup_method == "constant": + warmup = ConstantParamScheduler(start_value) + elif warmup_method == "linear": + warmup = LinearParamScheduler(start_value, end_value) + else: + raise ValueError("Unknown warmup method: {}".format(warmup_method)) + super().__init__( + [warmup, scheduler], + interval_scaling=["rescaled", "rescaled" if rescale_interval else "fixed"], + lengths=[warmup_length, 1 - warmup_length], + ) + + +class LRMultiplier(LRScheduler): + """ + A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the + learning rate of each param in the optimizer. + Every step, the learning rate of each parameter becomes its initial value + multiplied by the output of the given :class:`ParamScheduler`. + + The absolute learning rate value of each parameter can be different. + This scheduler can be used as long as the relative scale among them do + not change during training. + + Examples: + :: + LRMultiplier( + opt, + WarmupParamScheduler( + MultiStepParamScheduler( + [1, 0.1, 0.01], + milestones=[60000, 80000], + num_updates=90000, + ), 0.001, 100 / 90000 + ), + max_iter=90000 + ) + """ + + # NOTES: in the most general case, every LR can use its own scheduler. + # Supporting this requires interaction with the optimizer when its parameter + # group is initialized. For example, classyvision implements its own optimizer + # that allows different schedulers for every parameter group. + # To avoid this complexity, we use this class to support the most common cases + # where the relative scale among all LRs stay unchanged during training. In this + # case we only need a total of one scheduler that defines the relative LR multiplier. + + def __init__( + self, + optimizer: torch.optim.Optimizer, + multiplier: ParamScheduler, + max_iter: int, + last_iter: int = -1, + ): + """ + Args: + optimizer, last_iter: See ``torch.optim.lr_scheduler.LRScheduler``. + ``last_iter`` is the same as ``last_epoch``. + multiplier: a fvcore ParamScheduler that defines the multiplier on + every LR of the optimizer + max_iter: the total number of training iterations + """ + if not isinstance(multiplier, ParamScheduler): + raise ValueError( + "_LRMultiplier(multiplier=) must be an instance of fvcore " + f"ParamScheduler. Got {multiplier} instead." + ) + self._multiplier = multiplier + self._max_iter = max_iter + super().__init__(optimizer, last_epoch=last_iter) + + def state_dict(self): + # fvcore schedulers are stateless. Only keep pytorch scheduler states + return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch} + + def get_lr(self) -> List[float]: + multiplier = self._multiplier(self.last_epoch / self._max_iter) + return [base_lr * multiplier for base_lr in self.base_lrs] + + +""" +Content below is no longer needed! +""" + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupMultiStepLR(LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + milestones: List[int], + gamma: float = 0.1, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + logger.warning( + "WarmupMultiStepLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" + ) + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + return [ + base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +class WarmupCosineLR(LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + logger.warning( + "WarmupCosineLR is deprecated! Use LRMultipilier with fvcore ParamScheduler instead!" + ) + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + # Different definitions of half-cosine with warmup are possible. For + # simplicity we multiply the standard half-cosine schedule by the warmup + # factor. An alternative is to start the period of the cosine at warmup_iters + # instead of at 0. In the case that warmup_iters << max_iters the two are + # very close to each other. + return [ + base_lr + * warmup_factor + * 0.5 + * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +def _get_warmup_factor_at_iter( + method: str, iter: int, warmup_iters: int, warmup_factor: float +) -> float: + """ + Return the learning rate warmup factor at a specific iteration. + See :paper:`ImageNet in 1h` for more details. + + Args: + method (str): warmup method; either "constant" or "linear". + iter (int): iteration at which to calculate the warmup factor. + warmup_iters (int): the number of warmup iterations. + warmup_factor (float): the base warmup factor (the meaning changes according + to the method used). + + Returns: + float: the effective warmup factor at the given iteration. + """ + if iter >= warmup_iters: + return 1.0 + + if method == "constant": + return warmup_factor + elif method == "linear": + alpha = iter / warmup_iters + return warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup method: {}".format(method)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a23142782a68816b64fe0e3690f099b83008e9df --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .boxes import Boxes, BoxMode, pairwise_iou, pairwise_ioa, pairwise_point_box_distance +from .image_list import ImageList + +from .instances import Instances +from .keypoints import Keypoints, heatmaps_to_keypoints +from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks +from .rotated_boxes import RotatedBoxes +from .rotated_boxes import pairwise_iou as pairwise_iou_rotated + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +from custom_detectron2.utils.env import fixup_module_metadata + +fixup_module_metadata(__name__, globals(), __all__) +del fixup_module_metadata diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/boxes.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..fd396f68645db1d6946056eed868ffcc02cd7a22 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/boxes.py @@ -0,0 +1,425 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +import numpy as np +from enum import IntEnum, unique +from typing import List, Tuple, Union +import torch +from torch import device + +_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] + + +@unique +class BoxMode(IntEnum): + """ + Enum of different ways to represent a box. + """ + + XYXY_ABS = 0 + """ + (x0, y0, x1, y1) in absolute floating points coordinates. + The coordinates in range [0, width or height]. + """ + XYWH_ABS = 1 + """ + (x0, y0, w, h) in absolute floating points coordinates. + """ + XYXY_REL = 2 + """ + Not yet supported! + (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. + """ + XYWH_REL = 3 + """ + Not yet supported! + (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. + """ + XYWHA_ABS = 4 + """ + (xc, yc, w, h, a) in absolute floating points coordinates. + (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. + """ + + @staticmethod + def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: + """ + Args: + box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 + from_mode, to_mode (BoxMode) + + Returns: + The converted box of the same type. + """ + if from_mode == to_mode: + return box + + original_type = type(box) + is_numpy = isinstance(box, np.ndarray) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) == 4 or len(box) == 5, ( + "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," + " where k == 4 or 5" + ) + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + else: + arr = box.clone() + + assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ + BoxMode.XYXY_REL, + BoxMode.XYWH_REL, + ], "Relative mode not yet supported!" + + if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: + assert ( + arr.shape[-1] == 5 + ), "The last dimension of input shape must be 5 for XYWHA format" + original_dtype = arr.dtype + arr = arr.double() + + w = arr[:, 2] + h = arr[:, 3] + a = arr[:, 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + new_w = c * w + s * h + new_h = c * h + s * w + + # convert center to top-left corner + arr[:, 0] -= new_w / 2.0 + arr[:, 1] -= new_h / 2.0 + # bottom-right corner + arr[:, 2] = arr[:, 0] + new_w + arr[:, 3] = arr[:, 1] + new_h + + arr = arr[:, :4].to(dtype=original_dtype) + elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: + original_dtype = arr.dtype + arr = arr.double() + arr[:, 0] += arr[:, 2] / 2.0 + arr[:, 1] += arr[:, 3] / 2.0 + angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) + arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) + else: + if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: + arr[:, 2] += arr[:, 0] + arr[:, 3] += arr[:, 1] + elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: + arr[:, 2] -= arr[:, 0] + arr[:, 3] -= arr[:, 1] + else: + raise NotImplementedError( + "Conversion from BoxMode {} to {} is not supported yet".format( + from_mode, to_mode + ) + ) + + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + else: + return arr + + +class Boxes: + """ + This structure stores a list of boxes as a Nx4 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + + Attributes: + tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). + """ + if not isinstance(tensor, torch.Tensor): + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device("cpu")) + else: + tensor = tensor.to(torch.float32) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32) + assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() + + self.tensor = tensor + + def clone(self) -> "Boxes": + """ + Clone the Boxes. + + Returns: + Boxes + """ + return Boxes(self.tensor.clone()) + + def to(self, device: torch.device): + # Boxes are assumed float32 and does not support to(dtype) + return Boxes(self.tensor.to(device=device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + return area + + def clip(self, box_size: Tuple[int, int]) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + Args: + box_size (height, width): The clipping box's size. + """ + assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" + h, w = box_size + x1 = self.tensor[:, 0].clamp(min=0, max=w) + y1 = self.tensor[:, 1].clamp(min=0, max=h) + x2 = self.tensor[:, 2].clamp(min=0, max=w) + y2 = self.tensor[:, 3].clamp(min=0, max=h) + self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: + a binary vector which represents whether each box is empty + (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] - box[:, 0] + heights = box[:, 3] - box[:, 1] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item) -> "Boxes": + """ + Args: + item: int, slice, or a BoolTensor + + Returns: + Boxes: Create a new :class:`Boxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Boxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) + return Boxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "Boxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box. + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + inds_inside = ( + (self.tensor[..., 0] >= -boundary_threshold) + & (self.tensor[..., 1] >= -boundary_threshold) + & (self.tensor[..., 2] < width + boundary_threshold) + & (self.tensor[..., 3] < height + boundary_threshold) + ) + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the box with horizontal and vertical scaling factors + """ + self.tensor[:, 0::2] *= scale_x + self.tensor[:, 1::2] *= scale_y + + @classmethod + def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": + """ + Concatenates a list of Boxes into a single Boxes + + Arguments: + boxes_list (list[Boxes]) + + Returns: + Boxes: the concatenated Boxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all([isinstance(box, Boxes) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> device: + return self.tensor.device + + # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript + # https://github.com/pytorch/pytorch/issues/18627 + @torch.jit.unused + def __iter__(self): + """ + Yield a box as a Tensor of shape (4,) at a time. + """ + yield from self.tensor + + +def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, + compute the intersection area between __all__ N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax) + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: intersection, sized [N,M]. + """ + boxes1, boxes2 = boxes1.tensor, boxes2.tensor + width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( + boxes1[:, None, :2], boxes2[:, :2] + ) # [N,M,2] + + width_height.clamp_(min=0) # [N,M,2] + intersection = width_height.prod(dim=2) # [N,M] + return intersection + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, compute the IoU + (intersection over union) between **all** N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [M] + inter = pairwise_intersection(boxes1, boxes2) + + # handle empty boxes + iou = torch.where( + inter > 0, + inter / (area1[:, None] + area2 - inter), + torch.zeros(1, dtype=inter.dtype, device=inter.device), + ) + return iou + + +def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoA, sized [N,M]. + """ + area2 = boxes2.area() # [M] + inter = pairwise_intersection(boxes1, boxes2) + + # handle empty boxes + ioa = torch.where( + inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) + ) + return ioa + + +def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes): + """ + Pairwise distance between N points and M boxes. The distance between a + point and a box is represented by the distance from the point to 4 edges + of the box. Distances are all positive when the point is inside the box. + + Args: + points: Nx2 coordinates. Each row is (x, y) + boxes: M boxes + + Returns: + Tensor: distances of size (N, M, 4). The 4 values are distances from + the point to the left, top, right, bottom of the box. + """ + x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1) + x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M) + return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) + + +def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Compute pairwise intersection over union (IOU) of two sets of matched + boxes that have the same number of boxes. + Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix. + + Args: + boxes1 (Boxes): bounding boxes, sized [N,4]. + boxes2 (Boxes): same length as boxes1 + Returns: + Tensor: iou, sized [N]. + """ + assert len(boxes1) == len( + boxes2 + ), "boxlists should have the same" "number of entries, got {}, {}".format( + len(boxes1), len(boxes2) + ) + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [N] + box1, box2 = boxes1.tensor, boxes2.tensor + lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] + rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + iou = inter / (area1 + area2 - inter) # [N] + return iou diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/image_list.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/image_list.py new file mode 100644 index 0000000000000000000000000000000000000000..09c3d0ebf9517eaf6a651dab4f29cd549ee4784a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/image_list.py @@ -0,0 +1,129 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import division +from typing import Any, Dict, List, Optional, Tuple +import torch +from torch import device +from torch.nn import functional as F + +from custom_detectron2.layers.wrappers import move_device_like, shapes_to_tensor + + +class ImageList(object): + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size. + The original sizes of each image is stored in `image_sizes`. + + Attributes: + image_sizes (list[tuple[int, int]]): each tuple is (h, w). + During tracing, it becomes list[Tensor] instead. + """ + + def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): + """ + Arguments: + tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 + image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can + be smaller than (H, W) due to padding. + """ + self.tensor = tensor + self.image_sizes = image_sizes + + def __len__(self) -> int: + return len(self.image_sizes) + + def __getitem__(self, idx) -> torch.Tensor: + """ + Access the individual image in its original size. + + Args: + idx: int or slice + + Returns: + Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 + """ + size = self.image_sizes[idx] + return self.tensor[idx, ..., : size[0], : size[1]] + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "ImageList": + cast_tensor = self.tensor.to(*args, **kwargs) + return ImageList(cast_tensor, self.image_sizes) + + @property + def device(self) -> device: + return self.tensor.device + + @staticmethod + def from_tensors( + tensors: List[torch.Tensor], + size_divisibility: int = 0, + pad_value: float = 0.0, + padding_constraints: Optional[Dict[str, int]] = None, + ) -> "ImageList": + """ + Args: + tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or + (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded + to the same shape with `pad_value`. + size_divisibility (int): If `size_divisibility > 0`, add padding to ensure + the common height and width is divisible by `size_divisibility`. + This depends on the model and many models need a divisibility of 32. + pad_value (float): value to pad. + padding_constraints (optional[Dict]): If given, it would follow the format as + {"size_divisibility": int, "square_size": int}, where `size_divisibility` will + overwrite the above one if presented and `square_size` indicates the + square padding size if `square_size` > 0. + Returns: + an `ImageList`. + """ + assert len(tensors) > 0 + assert isinstance(tensors, (tuple, list)) + for t in tensors: + assert isinstance(t, torch.Tensor), type(t) + assert t.shape[:-2] == tensors[0].shape[:-2], t.shape + + image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] + image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] + max_size = torch.stack(image_sizes_tensor).max(0).values + + if padding_constraints is not None: + square_size = padding_constraints.get("square_size", 0) + if square_size > 0: + # pad to square. + max_size[0] = max_size[1] = square_size + if "size_divisibility" in padding_constraints: + size_divisibility = padding_constraints["size_divisibility"] + if size_divisibility > 1: + stride = size_divisibility + # the last two dims are H,W, both subject to divisibility requirement + max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride + + # handle weirdness of scripting and tracing ... + if torch.jit.is_scripting(): + max_size: List[int] = max_size.to(dtype=torch.long).tolist() + else: + if torch.jit.is_tracing(): + image_sizes = image_sizes_tensor + + if len(tensors) == 1: + # This seems slightly (2%) faster. + # TODO: check whether it's faster for multiple images as well + image_size = image_sizes[0] + padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] + batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) + else: + # max_size can be a tensor in tracing mode, therefore convert to list + batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) + device = ( + None if torch.jit.is_scripting() else ("cpu" if torch.jit.is_tracing() else None) + ) + batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device) + batched_imgs = move_device_like(batched_imgs, tensors[0]) + for i, img in enumerate(tensors): + # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)` + # Tracing mode cannot capture `copy_()` of temporary locals + batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img) + + return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/instances.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/instances.py new file mode 100644 index 0000000000000000000000000000000000000000..c9579bce2730f42e256c6eed99d9014d09304c99 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/instances.py @@ -0,0 +1,194 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import warnings +from typing import Any, Dict, List, Tuple, Union +import torch + + +class Instances: + """ + This class represents a list of instances in an image. + It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". + All fields must have the same ``__len__`` which is the number of instances. + + All other (non-field) attributes of this class are considered private: + they must start with '_' and are not modifiable by a user. + + Some basic usage: + + 1. Set/get/check a field: + + .. code-block:: python + + instances.gt_boxes = Boxes(...) + print(instances.pred_masks) # a tensor of shape (N, H, W) + print('gt_masks' in instances) + + 2. ``len(instances)`` returns the number of instances + 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields + and returns a new :class:`Instances`. + Typically, ``indices`` is a integer vector of indices, + or a binary mask of length ``num_instances`` + + .. code-block:: python + + category_3_detections = instances[instances.pred_classes == 3] + confident_detections = instances[instances.scores > 0.9] + """ + + def __init__(self, image_size: Tuple[int, int], **kwargs: Any): + """ + Args: + image_size (height, width): the spatial size of the image. + kwargs: fields to add to this `Instances`. + """ + self._image_size = image_size + self._fields: Dict[str, Any] = {} + for k, v in kwargs.items(): + self.set(k, v) + + @property + def image_size(self) -> Tuple[int, int]: + """ + Returns: + tuple: height, width + """ + return self._image_size + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name: str) -> Any: + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self._fields[name] + + def set(self, name: str, value: Any) -> None: + """ + Set the field named `name` to `value`. + The length of `value` must be the number of instances, + and must agree with other existing fields in this object. + """ + with warnings.catch_warnings(record=True): + data_len = len(value) + if len(self._fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self._fields[name] = value + + def has(self, name: str) -> bool: + """ + Returns: + bool: whether the field called `name` exists. + """ + return name in self._fields + + def remove(self, name: str) -> None: + """ + Remove the field called `name`. + """ + del self._fields[name] + + def get(self, name: str) -> Any: + """ + Returns the field called `name`. + """ + return self._fields[name] + + def get_fields(self) -> Dict[str, Any]: + """ + Returns: + dict: a dict which maps names (str) to data of the fields + + Modifying the returned dict will modify this instance. + """ + return self._fields + + # Tensor-like methods + def to(self, *args: Any, **kwargs: Any) -> "Instances": + """ + Returns: + Instances: all fields are called with a `to(device)`, if the field has this method. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "to"): + v = v.to(*args, **kwargs) + ret.set(k, v) + return ret + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": + """ + Args: + item: an index-like object and will be used to index all the fields. + + Returns: + If `item` is a string, return the data in the corresponding field. + Otherwise, returns an `Instances` where all fields are indexed by `item`. + """ + if type(item) == int: + if item >= len(self) or item < -len(self): + raise IndexError("Instances index out of range!") + else: + item = slice(item, None, len(self)) + + ret = Instances(self._image_size) + for k, v in self._fields.items(): + ret.set(k, v[item]) + return ret + + def __len__(self) -> int: + for v in self._fields.values(): + # use __len__ because len() has to be int and is not friendly to tracing + return v.__len__() + raise NotImplementedError("Empty Instances does not support __len__!") + + def __iter__(self): + raise NotImplementedError("`Instances` object is not iterable!") + + @staticmethod + def cat(instance_lists: List["Instances"]) -> "Instances": + """ + Args: + instance_lists (list[Instances]) + + Returns: + Instances + """ + assert all(isinstance(i, Instances) for i in instance_lists) + assert len(instance_lists) > 0 + if len(instance_lists) == 1: + return instance_lists[0] + + image_size = instance_lists[0].image_size + if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing + for i in instance_lists[1:]: + assert i.image_size == image_size + ret = Instances(image_size) + for k in instance_lists[0]._fields.keys(): + values = [i.get(k) for i in instance_lists] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = torch.cat(values, dim=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + elif hasattr(type(v0), "cat"): + values = type(v0).cat(values) + else: + raise ValueError("Unsupported type {} for concatenation".format(type(v0))) + ret.set(k, values) + return ret + + def __str__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) + return s + + __repr__ = __str__ diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/keypoints.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/keypoints.py new file mode 100644 index 0000000000000000000000000000000000000000..b93ebed4f6554e67ba9bde8d3af90e8dbb3246b6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/keypoints.py @@ -0,0 +1,235 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Any, List, Tuple, Union +import torch +from torch.nn import functional as F + + +class Keypoints: + """ + Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property + containing the x,y location and visibility flag of each keypoint. This tensor has shape + (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. + + The visibility flag follows the COCO format and must be one of three integers: + + * v=0: not labeled (in which case x=y=0) + * v=1: labeled but not visible + * v=2: labeled and visible + """ + + def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): + """ + Arguments: + keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. + The shape should be (N, K, 3) where N is the number of + instances, and K is the number of keypoints per instance. + """ + device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") + keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) + assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape + self.tensor = keypoints + + def __len__(self) -> int: + return self.tensor.size(0) + + def to(self, *args: Any, **kwargs: Any) -> "Keypoints": + return type(self)(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: + """ + Convert keypoint annotations to a heatmap of one-hot labels for training, + as described in :paper:`Mask R-CNN`. + + Arguments: + boxes: Nx4 tensor, the boxes to draw the keypoints to + + Returns: + heatmaps: + A tensor of shape (N, K), each element is integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: + A tensor of shape (N, K) containing whether each keypoint is in the roi or not. + """ + return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": + """ + Create a new `Keypoints` by indexing on this `Keypoints`. + + The following usage are allowed: + + 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. + 2. `new_kpts = kpts[2:10]`: return a slice of key points. + 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor + with `length = len(kpts)`. Nonzero elements in the vector will be selected. + + Note that the returned Keypoints might share storage with this Keypoints, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Keypoints([self.tensor[item]]) + return Keypoints(self.tensor[item]) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + @staticmethod + def cat(keypoints_list: List["Keypoints"]) -> "Keypoints": + """ + Concatenates a list of Keypoints into a single Keypoints + + Arguments: + keypoints_list (list[Keypoints]) + + Returns: + Keypoints: the concatenated Keypoints + """ + assert isinstance(keypoints_list, (list, tuple)) + assert len(keypoints_list) > 0 + assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list) + + cat_kpts = type(keypoints_list[0])( + torch.cat([kpts.tensor for kpts in keypoints_list], dim=0) + ) + return cat_kpts + + +# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) +def _keypoints_to_heatmap( + keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. + + Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the + closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the + continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): + d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + + Arguments: + keypoints: tensor of keypoint locations in of shape (N, K, 3). + rois: Nx4 tensor of rois in xyxy format + heatmap_size: integer side length of square heatmap. + + Returns: + heatmaps: A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: A tensor of shape (N, K) containing whether each keypoint is in + the roi or not. + """ + + if rois.numel() == 0: + return rois.new().long(), rois.new().long() + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +@torch.jit.script_if_tracing +def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: + """ + Extract predicted keypoint locations from heatmaps. + + Args: + maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for + each ROI and each keypoint. + rois (Tensor): (#ROIs, 4). The box of each ROI. + + Returns: + Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to + (x, y, logit, score) for each keypoint. + + When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, + we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from + Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + """ + + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) + heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_rois, num_keypoints = maps.shape[:2] + xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) + + width_corrections = widths / widths_ceil + height_corrections = heights / heights_ceil + + keypoints_idx = torch.arange(num_keypoints, device=maps.device) + + for i in range(num_rois): + outsize = (int(heights_ceil[i]), int(widths_ceil[i])) + roi_map = F.interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False) + + # Although semantically equivalent, `reshape` is used instead of `squeeze` due + # to limitation during ONNX export of `squeeze` in scripting mode + roi_map = roi_map.reshape(roi_map.shape[1:]) # keypoints x H x W + + # softmax over the spatial region + max_score, _ = roi_map.view(num_keypoints, -1).max(1) + max_score = max_score.view(num_keypoints, 1, 1) + tmp_full_resolution = (roi_map - max_score).exp_() + tmp_pool_resolution = (maps[i] - max_score).exp_() + # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, + # so that the scores of objects of different absolute sizes will be more comparable + roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) + + w = roi_map.shape[2] + pos = roi_map.view(num_keypoints, -1).argmax(1) + + x_int = pos % w + y_int = (pos - x_int) // w + + assert ( + roi_map_scores[keypoints_idx, y_int, x_int] + == roi_map_scores.view(num_keypoints, -1).max(1)[0] + ).all() + + x = (x_int.float() + 0.5) * width_corrections[i] + y = (y_int.float() + 0.5) * height_corrections[i] + + xy_preds[i, :, 0] = x + offset_x[i] + xy_preds[i, :, 1] = y + offset_y[i] + xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] + xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] + + return xy_preds diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/masks.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/masks.py new file mode 100644 index 0000000000000000000000000000000000000000..cc41656e414b801cb4cb088460caad79b40939f3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/masks.py @@ -0,0 +1,534 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import numpy as np +from typing import Any, Iterator, List, Union +import custom_pycocotools.mask as mask_util +import torch +from torch import device + +from custom_detectron2.layers.roi_align import ROIAlign +from custom_detectron2.utils.memory import retry_if_cuda_oom + +from .boxes import Boxes + + +def polygon_area(x, y): + # Using the shoelace formula + # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: + """ + Args: + polygons (list[ndarray]): each array has shape (Nx2,) + height, width (int) + + Returns: + ndarray: a bool mask of shape (height, width) + """ + if len(polygons) == 0: + # COCOAPI does not support empty polygons + return np.zeros((height, width)).astype(bool) + rles = mask_util.frPyObjects(polygons, height, width) + rle = mask_util.merge(rles) + return mask_util.decode(rle).astype(bool) + + +def rasterize_polygons_within_box( + polygons: List[np.ndarray], box: np.ndarray, mask_size: int +) -> torch.Tensor: + """ + Rasterize the polygons into a mask image and + crop the mask content in the given box. + The cropped mask is resized to (mask_size, mask_size). + + This function is used when generating training targets for mask head in Mask R-CNN. + Given original ground-truth masks for an image, new ground-truth mask + training targets in the size of `mask_size x mask_size` + must be provided for each predicted box. This function will be called to + produce such targets. + + Args: + polygons (list[ndarray[float]]): a list of polygons, which represents an instance. + box: 4-element numpy array + mask_size (int): + + Returns: + Tensor: BoolTensor of shape (mask_size, mask_size) + """ + # 1. Shift the polygons w.r.t the boxes + w, h = box[2] - box[0], box[3] - box[1] + + polygons = copy.deepcopy(polygons) + for p in polygons: + p[0::2] = p[0::2] - box[0] + p[1::2] = p[1::2] - box[1] + + # 2. Rescale the polygons to the new box size + # max() to avoid division by small number + ratio_h = mask_size / max(h, 0.1) + ratio_w = mask_size / max(w, 0.1) + + if ratio_h == ratio_w: + for p in polygons: + p *= ratio_h + else: + for p in polygons: + p[0::2] *= ratio_w + p[1::2] *= ratio_h + + # 3. Rasterize the polygons with coco api + mask = polygons_to_bitmask(polygons, mask_size, mask_size) + mask = torch.from_numpy(mask) + return mask + + +class BitMasks: + """ + This class stores the segmentation masks for all objects in one image, in + the form of bitmaps. + + Attributes: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + + def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): + """ + Args: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + if isinstance(tensor, torch.Tensor): + tensor = tensor.to(torch.bool) + else: + tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device("cpu")) + assert tensor.dim() == 3, tensor.size() + self.image_size = tensor.shape[1:] + self.tensor = tensor + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "BitMasks": + return BitMasks(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + @torch.jit.unused + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": + """ + Returns: + BitMasks: Create a new :class:`BitMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. + 2. `new_masks = masks[2:10]`: return a slice of masks. + 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return BitMasks(self.tensor[item].unsqueeze(0)) + m = self.tensor[item] + assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( + item, m.shape + ) + return BitMasks(m) + + @torch.jit.unused + def __iter__(self) -> torch.Tensor: + yield from self.tensor + + @torch.jit.unused + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + def __len__(self) -> int: + return self.tensor.shape[0] + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: a BoolTensor which represents + whether each mask is empty (False) or non-empty (True). + """ + return self.tensor.flatten(1).any(dim=1) + + @staticmethod + def from_polygon_masks( + polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int + ) -> "BitMasks": + """ + Args: + polygon_masks (list[list[ndarray]] or PolygonMasks) + height, width (int) + """ + if isinstance(polygon_masks, PolygonMasks): + polygon_masks = polygon_masks.polygons + masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] + if len(masks): + return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) + else: + return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) + + @staticmethod + def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": + """ + Args: + roi_masks: + height, width (int): + """ + return roi_masks.to_bitmasks(height, width) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each bitmask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + It has less reconstruction error compared to rasterization with polygons. + However we observe no difference in accuracy, + but BitMasks requires more memory to store all the masks. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: + A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + device = self.tensor.device + + batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] + rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 + + bit_masks = self.tensor.to(dtype=torch.float32) + rois = rois.to(device=device) + output = ( + ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) + .forward(bit_masks[:, None, :, :], rois) + .squeeze(1) + ) + output = output >= 0.5 + return output + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around bitmasks. + If a mask is empty, it's bounding box will be all zero. + """ + boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) + x_any = torch.any(self.tensor, dim=1) + y_any = torch.any(self.tensor, dim=2) + for idx in range(self.tensor.shape[0]): + x = torch.where(x_any[idx, :])[0] + y = torch.where(y_any[idx, :])[0] + if len(x) > 0 and len(y) > 0: + boxes[idx, :] = torch.as_tensor( + [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 + ) + return Boxes(boxes) + + @staticmethod + def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": + """ + Concatenates a list of BitMasks into a single BitMasks + + Arguments: + bitmasks_list (list[BitMasks]) + + Returns: + BitMasks: the concatenated BitMasks + """ + assert isinstance(bitmasks_list, (list, tuple)) + assert len(bitmasks_list) > 0 + assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) + + cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) + return cat_bitmasks + + +class PolygonMasks: + """ + This class stores the segmentation masks for all objects in one image, in the form of polygons. + + Attributes: + polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. + """ + + def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): + """ + Arguments: + polygons (list[list[np.ndarray]]): The first + level of the list correspond to individual instances, + the second level to all the polygons that compose the + instance, and the third level to the polygon coordinates. + The third level array should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + """ + if not isinstance(polygons, list): + raise ValueError( + "Cannot create PolygonMasks: Expect a list of list of polygons per image. " + "Got '{}' instead.".format(type(polygons)) + ) + + def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: + # Use float64 for higher precision, because why not? + # Always put polygons on CPU (self.to is a no-op) since they + # are supposed to be small tensors. + # May need to change this assumption if GPU placement becomes useful + if isinstance(t, torch.Tensor): + t = t.cpu().numpy() + return np.asarray(t).astype("float64") + + def process_polygons( + polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] + ) -> List[np.ndarray]: + if not isinstance(polygons_per_instance, list): + raise ValueError( + "Cannot create polygons: Expect a list of polygons per instance. " + "Got '{}' instead.".format(type(polygons_per_instance)) + ) + # transform each polygon to a numpy array + polygons_per_instance = [_make_array(p) for p in polygons_per_instance] + for polygon in polygons_per_instance: + if len(polygon) % 2 != 0 or len(polygon) < 6: + raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") + return polygons_per_instance + + self.polygons: List[List[np.ndarray]] = [ + process_polygons(polygons_per_instance) for polygons_per_instance in polygons + ] + + def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": + return self + + @property + def device(self) -> torch.device: + return torch.device("cpu") + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around polygon masks. + """ + boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) + for idx, polygons_per_instance in enumerate(self.polygons): + minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) + maxxy = torch.zeros(2, dtype=torch.float32) + for polygon in polygons_per_instance: + coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) + minxy = torch.min(minxy, torch.min(coords, dim=0).values) + maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) + boxes[idx, :2] = minxy + boxes[idx, 2:] = maxxy + return Boxes(boxes) + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: + a BoolTensor which represents whether each mask is empty (False) or not (True). + """ + keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] + return torch.from_numpy(np.asarray(keep, dtype=bool)) + + def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": + """ + Support indexing over the instances and return a `PolygonMasks` object. + `item` can be: + + 1. An integer. It will return an object with only one instance. + 2. A slice. It will return an object with the selected instances. + 3. A list[int]. It will return an object with the selected instances, + correpsonding to the indices in the list. + 4. A vector mask of type BoolTensor, whose length is num_instances. + It will return an object with the instances whose mask is nonzero. + """ + if isinstance(item, int): + selected_polygons = [self.polygons[item]] + elif isinstance(item, slice): + selected_polygons = self.polygons[item] + elif isinstance(item, list): + selected_polygons = [self.polygons[i] for i in item] + elif isinstance(item, torch.Tensor): + # Polygons is a list, so we have to move the indices back to CPU. + if item.dtype == torch.bool: + assert item.dim() == 1, item.shape + item = item.nonzero().squeeze(1).cpu().numpy().tolist() + elif item.dtype in [torch.int32, torch.int64]: + item = item.cpu().numpy().tolist() + else: + raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) + selected_polygons = [self.polygons[i] for i in item] + return PolygonMasks(selected_polygons) + + def __iter__(self) -> Iterator[List[np.ndarray]]: + """ + Yields: + list[ndarray]: the polygons for one instance. + Each Tensor is a float64 vector representing a polygon. + """ + return iter(self.polygons) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.polygons)) + return s + + def __len__(self) -> int: + return len(self.polygons) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each mask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + + device = boxes.device + # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise + # (several small tensors for representing a single instance mask) + boxes = boxes.to(torch.device("cpu")) + + results = [ + rasterize_polygons_within_box(poly, box.numpy(), mask_size) + for poly, box in zip(self.polygons, boxes) + ] + """ + poly: list[list[float]], the polygons for one instance + box: a tensor of shape (4,) + """ + if len(results) == 0: + return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) + return torch.stack(results, dim=0).to(device=device) + + def area(self): + """ + Computes area of the mask. + Only works with Polygons, using the shoelace formula: + https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + + Returns: + Tensor: a vector, area for each instance + """ + + area = [] + for polygons_per_instance in self.polygons: + area_per_instance = 0 + for p in polygons_per_instance: + area_per_instance += polygon_area(p[0::2], p[1::2]) + area.append(area_per_instance) + + return torch.tensor(area) + + @staticmethod + def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": + """ + Concatenates a list of PolygonMasks into a single PolygonMasks + + Arguments: + polymasks_list (list[PolygonMasks]) + + Returns: + PolygonMasks: the concatenated PolygonMasks + """ + assert isinstance(polymasks_list, (list, tuple)) + assert len(polymasks_list) > 0 + assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) + + cat_polymasks = type(polymasks_list[0])( + list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) + ) + return cat_polymasks + + +class ROIMasks: + """ + Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, + full-image bitmask can be obtained by "pasting" the mask on the region defined + by the corresponding ROI box. + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor: (N, M, M) mask tensor that defines the mask within each ROI. + """ + if tensor.dim() != 3: + raise ValueError("ROIMasks must take a masks of 3 dimension.") + self.tensor = tensor + + def to(self, device: torch.device) -> "ROIMasks": + return ROIMasks(self.tensor.to(device)) + + @property + def device(self) -> device: + return self.tensor.device + + def __len__(self): + return self.tensor.shape[0] + + def __getitem__(self, item) -> "ROIMasks": + """ + Returns: + ROIMasks: Create a new :class:`ROIMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[2:10]`: return a slice of masks. + 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + t = self.tensor[item] + if t.dim() != 3: + raise ValueError( + f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" + ) + return ROIMasks(t) + + @torch.jit.unused + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + @torch.jit.unused + def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): + """ + Args: see documentation of :func:`paste_masks_in_image`. + """ + from custom_detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape + + if torch.jit.is_tracing(): + if isinstance(height, torch.Tensor): + paste_func = _paste_masks_tensor_shape + else: + paste_func = paste_masks_in_image + else: + paste_func = retry_if_cuda_oom(paste_masks_in_image) + bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) + return BitMasks(bitmasks) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/rotated_boxes.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/rotated_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc0ae02935b69fbb7a603bafccb0d4e728eda31 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/structures/rotated_boxes.py @@ -0,0 +1,505 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Tuple +import torch + +from custom_detectron2.layers.rotated_boxes import pairwise_iou_rotated + +from .boxes import Boxes + + +class RotatedBoxes(Boxes): + """ + This structure stores a list of rotated boxes as a Nx5 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx5 matrix. Each row is + (x_center, y_center, width, height, angle), + in which angle is represented in degrees. + While there's no strict range restriction for it, + the recommended principal range is between [-180, 180) degrees. + + Assume we have a horizontal box B = (x_center, y_center, width, height), + where width is along the x-axis and height is along the y-axis. + The rotated box B_rot (x_center, y_center, width, height, angle) + can be seen as: + + 1. When angle == 0: + B_rot == B + 2. When angle > 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW; + 3. When angle < 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW. + + Mathematically, since the right-handed coordinate system for image space + is (y, x), where y is top->down and x is left->right, the 4 vertices of the + rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from + the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4) + in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians, + :math:`(y_c, x_c)` is the center of the rectangle): + + .. math:: + + yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c, + + xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c, + + which is the standard rigid-body rotation transformation. + + Intuitively, the angle is + (1) the rotation angle from y-axis in image space + to the height vector (top->down in the box's local coordinate system) + of the box in CCW, and + (2) the rotation angle from x-axis in image space + to the width vector (left->right in the box's local coordinate system) + of the box in CCW. + + More intuitively, consider the following horizontal box ABCD represented + in (x1, y1, x2, y2): (3, 2, 7, 4), + covering the [3, 7] x [2, 4] region of the continuous coordinate system + which looks like this: + + .. code:: none + + O--------> x + | + | A---B + | | | + | D---C + | + v y + + Note that each capital letter represents one 0-dimensional geometric point + instead of a 'square pixel' here. + + In the example above, using (x, y) to represent a point we have: + + .. math:: + + O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) + + We name vector AB = vector DC as the width vector in box's local coordinate system, and + vector AD = vector BC as the height vector in box's local coordinate system. Initially, + when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis + in the image space, respectively. + + For better illustration, we denote the center of the box as E, + + .. code:: none + + O--------> x + | + | A---B + | | E | + | D---C + | + v y + + where the center E = ((3+7)/2, (2+4)/2) = (5, 3). + + Also, + + .. math:: + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Therefore, the corresponding representation for the same shape in rotated box in + (x_center, y_center, width, height, angle) format is: + + (5, 3, 4, 2, 0), + + Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees + CCW (counter-clockwise) by definition. It looks like this: + + .. code:: none + + O--------> x + | B-C + | | | + | |E| + | | | + | A-D + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CCW with regard to E: + A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) + + Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to + vector AD or vector BC (the top->down height vector in box's local coordinate system), + or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right + width vector in box's local coordinate system). + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) + by definition? It looks like this: + + .. code:: none + + O--------> x + | D-A + | | | + | |E| + | | | + | C-B + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CW with regard to E: + A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU + will be 1. However, these two will generate different RoI Pooling results and + should not be treated as an identical box. + + On the other hand, it's easy to see that (X, Y, W, H, A) is identical to + (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be + identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is + equivalent to rotating the same shape 90 degrees CW. + + We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): + + .. code:: none + + O--------> x + | + | C---D + | | E | + | B---A + | + v y + + .. math:: + + A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Finally, this is a very inaccurate (heavily quantized) illustration of + how (5, 3, 4, 2, 60) looks like in case anyone wonders: + + .. code:: none + + O--------> x + | B\ + | / C + | /E / + | A / + | `D + v y + + It's still a rectangle with center of (5, 3), width of 4 and height of 2, + but its angle (and thus orientation) is somewhere between + (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() + + self.tensor = tensor + + def clone(self) -> "RotatedBoxes": + """ + Clone the RotatedBoxes. + + Returns: + RotatedBoxes + """ + return RotatedBoxes(self.tensor.clone()) + + def to(self, device: torch.device): + # Boxes are assumed float32 and does not support to(dtype) + return RotatedBoxes(self.tensor.to(device=device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = box[:, 2] * box[:, 3] + return area + + # Avoid in-place operations so that we can torchscript; NOTE: this creates a new tensor + def normalize_angles(self) -> None: + """ + Restrict angles to the range of [-180, 180) degrees + """ + angle_tensor = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0 + self.tensor = torch.cat((self.tensor[:, :4], angle_tensor[:, None]), dim=1) + + def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + For RRPN: + Only clip boxes that are almost horizontal with a tolerance of + clip_angle_threshold to maintain backward compatibility. + + Rotated boxes beyond this threshold are not clipped for two reasons: + + 1. There are potentially multiple ways to clip a rotated box to make it + fit within the image. + 2. It's tricky to make the entire rectangular box fit within the image + and still be able to not leave out pixels of interest. + + Therefore we rely on ops like RoIAlignRotated to safely handle this. + + Args: + box_size (height, width): The clipping box's size. + clip_angle_threshold: + Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), + we do the clipping as horizontal boxes. + """ + h, w = box_size + + # normalize angles to be within (-180, 180] degrees + self.normalize_angles() + + idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] + + # convert to (x1, y1, x2, y2) + x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 + y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 + x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 + y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 + + # clip + x1.clamp_(min=0, max=w) + y1.clamp_(min=0, max=h) + x2.clamp_(min=0, max=w) + y2.clamp_(min=0, max=h) + + # convert back to (xc, yc, w, h) + self.tensor[idx, 0] = (x1 + x2) / 2.0 + self.tensor[idx, 1] = (y1 + y2) / 2.0 + # make sure widths and heights do not increase due to numerical errors + self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) + self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: a binary vector which represents + whether each box is empty (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] + heights = box[:, 3] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item) -> "RotatedBoxes": + """ + Returns: + RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned RotatedBoxes might share storage with this RotatedBoxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return RotatedBoxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( + item + ) + return RotatedBoxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "RotatedBoxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box covering + [0, width] x [0, height] + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + For RRPN, it might not be necessary to call this function since it's common + for rotated box to extend to outside of the image boundaries + (the clip function only clips the near-horizontal boxes) + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + + cnt_x = self.tensor[..., 0] + cnt_y = self.tensor[..., 1] + half_w = self.tensor[..., 2] / 2.0 + half_h = self.tensor[..., 3] / 2.0 + a = self.tensor[..., 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + max_rect_dx = c * half_w + s * half_h + max_rect_dy = c * half_h + s * half_w + + inds_inside = ( + (cnt_x - max_rect_dx >= -boundary_threshold) + & (cnt_y - max_rect_dy >= -boundary_threshold) + & (cnt_x + max_rect_dx < width + boundary_threshold) + & (cnt_y + max_rect_dy < height + boundary_threshold) + ) + + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return self.tensor[:, :2] + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the rotated box with horizontal and vertical scaling factors + Note: when scale_factor_x != scale_factor_y, + the rotated box does not preserve the rectangular shape when the angle + is not a multiple of 90 degrees under resize transformation. + Instead, the shape is a parallelogram (that has skew) + Here we make an approximation by fitting a rotated rectangle to the parallelogram. + """ + self.tensor[:, 0] *= scale_x + self.tensor[:, 1] *= scale_y + theta = self.tensor[:, 4] * math.pi / 180.0 + c = torch.cos(theta) + s = torch.sin(theta) + + # In image space, y is top->down and x is left->right + # Consider the local coordintate system for the rotated box, + # where the box center is located at (0, 0), and the four vertices ABCD are + # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) + # the midpoint of the left edge AD of the rotated box E is: + # E = (A+D)/2 = (-w / 2, 0) + # the midpoint of the top edge AB of the rotated box F is: + # F(0, -h / 2) + # To get the old coordinates in the global system, apply the rotation transformation + # (Note: the right-handed coordinate system for image space is yOx): + # (old_x, old_y) = (s * y + c * x, c * y - s * x) + # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) + # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) + # After applying the scaling factor (sfx, sfy): + # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) + # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) + # The new width after scaling tranformation becomes: + + # w(new) = |E(new) - O| * 2 + # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 + # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w + # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y + self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2) + + # h(new) = |F(new) - O| * 2 + # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 + # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h + # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x + self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2) + + # The angle is the rotation angle from y-axis in image space to the height + # vector (top->down in the box's local coordinate system) of the box in CCW. + # + # angle(new) = angle_yOx(O - F(new)) + # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) + # = atan2(sfx * s * h / 2, sfy * c * h / 2) + # = atan2(sfx * s, sfy * c) + # + # For example, + # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) + self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi + + @classmethod + def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes": + """ + Concatenates a list of RotatedBoxes into a single RotatedBoxes + + Arguments: + boxes_list (list[RotatedBoxes]) + + Returns: + RotatedBoxes: the concatenated RotatedBoxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all([isinstance(box, RotatedBoxes) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> torch.device: + return self.tensor.device + + @torch.jit.unused + def __iter__(self): + """ + Yield a box as a Tensor of shape (5,) at a time. + """ + yield from self.tensor + + +def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: + """ + Given two lists of rotated boxes of size N and M, + compute the IoU (intersection over union) + between **all** N x M pairs of boxes. + The box order must be (x_center, y_center, width, height, angle). + + Args: + boxes1, boxes2 (RotatedBoxes): + two `RotatedBoxes`. Contains N & M rotated boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + + return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21078ae822b04b71dbd8b056b5993d173eaf6bff --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .base_tracker import ( # noqa + BaseTracker, + build_tracker_head, + TRACKER_HEADS_REGISTRY, +) +from .bbox_iou_tracker import BBoxIOUTracker # noqa +from .hungarian_tracker import BaseHungarianTracker # noqa +from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa + IOUWeightedHungarianBBoxIOUTracker, +) +from .utils import create_prediction_pairs # noqa +from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/base_tracker.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/base_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..66a85b9b9ee2dbc47bec17ccd66df412cc4371ec --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/base_tracker.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. +from custom_detectron2.config import configurable +from custom_detectron2.utils.registry import Registry + +from ..config.config import CfgNode as CfgNode_ +from ..structures import Instances + +TRACKER_HEADS_REGISTRY = Registry("TRACKER_HEADS") +TRACKER_HEADS_REGISTRY.__doc__ = """ +Registry for tracking classes. +""" + + +class BaseTracker(object): + """ + A parent class for all trackers + """ + + @configurable + def __init__(self, **kwargs): + self._prev_instances = None # (D2)instances for previous frame + self._matched_idx = set() # indices in prev_instances found matching + self._matched_ID = set() # idendities in prev_instances found matching + self._untracked_prev_idx = set() # indices in prev_instances not found matching + self._id_count = 0 # used to assign new id + + @classmethod + def from_config(cls, cfg: CfgNode_): + raise NotImplementedError("Calling BaseTracker::from_config") + + def update(self, predictions: Instances) -> Instances: + """ + Args: + predictions: D2 Instances for predictions of the current frame + Return: + D2 Instances for predictions of the current frame with ID assigned + + _prev_instances and instances will have the following fields: + .pred_boxes (shape=[N, 4]) + .scores (shape=[N,]) + .pred_classes (shape=[N,]) + .pred_keypoints (shape=[N, M, 3], Optional) + .pred_masks (shape=List[2D_MASK], Optional) 2D_MASK: shape=[H, W] + .ID (shape=[N,]) + + N: # of detected bboxes + H and W: height and width of 2D mask + """ + raise NotImplementedError("Calling BaseTracker::update") + + +def build_tracker_head(cfg: CfgNode_) -> BaseTracker: + """ + Build a tracker head from `cfg.TRACKER_HEADS.TRACKER_NAME`. + + Args: + cfg: D2 CfgNode, config file with tracker information + Return: + tracker object + """ + name = cfg.TRACKER_HEADS.TRACKER_NAME + tracker_class = TRACKER_HEADS_REGISTRY.get(name) + return tracker_class(cfg) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/bbox_iou_tracker.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/bbox_iou_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..f306db6d45d55e0741066d8d1f750ca1a3687eca --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/bbox_iou_tracker.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. +import copy +import numpy as np +from typing import List +import torch + +from custom_detectron2.config import configurable +from custom_detectron2.structures import Boxes, Instances +from custom_detectron2.structures.boxes import pairwise_iou + +from ..config.config import CfgNode as CfgNode_ +from .base_tracker import TRACKER_HEADS_REGISTRY, BaseTracker + + +@TRACKER_HEADS_REGISTRY.register() +class BBoxIOUTracker(BaseTracker): + """ + A bounding box tracker to assign ID based on IoU between current and previous instances + """ + + @configurable + def __init__( + self, + *, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + track_iou_threshold: float = 0.5, + **kwargs, + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + track_iou_threshold: iou threshold, below this number a bbox pair is removed + from tracking + """ + super().__init__(**kwargs) + self._video_height = video_height + self._video_width = video_width + self._max_num_instances = max_num_instances + self._max_lost_frame_count = max_lost_frame_count + self._min_box_rel_dim = min_box_rel_dim + self._min_instance_period = min_instance_period + self._track_iou_threshold = track_iou_threshold + + @classmethod + def from_config(cls, cfg: CfgNode_): + """ + Old style initialization using CfgNode + + Args: + cfg: D2 CfgNode, config file + Return: + dictionary storing arguments for __init__ method + """ + assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS + assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS + video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") + video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") + max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) + max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) + min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) + min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) + track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) + return { + "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", + "video_height": video_height, + "video_width": video_width, + "max_num_instances": max_num_instances, + "max_lost_frame_count": max_lost_frame_count, + "min_box_rel_dim": min_box_rel_dim, + "min_instance_period": min_instance_period, + "track_iou_threshold": track_iou_threshold, + } + + def update(self, instances: Instances) -> Instances: + """ + See BaseTracker description + """ + instances = self._initialize_extra_fields(instances) + if self._prev_instances is not None: + # calculate IoU of all bbox pairs + iou_all = pairwise_iou( + boxes1=instances.pred_boxes, + boxes2=self._prev_instances.pred_boxes, + ) + # sort IoU in descending order + bbox_pairs = self._create_prediction_pairs(instances, iou_all) + # assign previous ID to current bbox if IoU > track_iou_threshold + self._reset_fields() + for bbox_pair in bbox_pairs: + idx = bbox_pair["idx"] + prev_id = bbox_pair["prev_id"] + if ( + idx in self._matched_idx + or prev_id in self._matched_ID + or bbox_pair["IoU"] < self._track_iou_threshold + ): + continue + instances.ID[idx] = prev_id + instances.ID_period[idx] = bbox_pair["prev_period"] + 1 + instances.lost_frame_count[idx] = 0 + self._matched_idx.add(idx) + self._matched_ID.add(prev_id) + self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) + instances = self._assign_new_id(instances) + instances = self._merge_untracked_instances(instances) + self._prev_instances = copy.deepcopy(instances) + return instances + + def _create_prediction_pairs(self, instances: Instances, iou_all: np.ndarray) -> List: + """ + For all instances in previous and current frames, create pairs. For each + pair, store index of the instance in current frame predcitions, index in + previous predictions, ID in previous predictions, IoU of the bboxes in this + pair, period in previous predictions. + + Args: + instances: D2 Instances, for predictions of the current frame + iou_all: IoU for all bboxes pairs + Return: + A list of IoU for all pairs + """ + bbox_pairs = [] + for i in range(len(instances)): + for j in range(len(self._prev_instances)): + bbox_pairs.append( + { + "idx": i, + "prev_idx": j, + "prev_id": self._prev_instances.ID[j], + "IoU": iou_all[i, j], + "prev_period": self._prev_instances.ID_period[j], + } + ) + return bbox_pairs + + def _initialize_extra_fields(self, instances: Instances) -> Instances: + """ + If input instances don't have ID, ID_period, lost_frame_count fields, + this method is used to initialize these fields. + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances with extra fields added + """ + if not instances.has("ID"): + instances.set("ID", [None] * len(instances)) + if not instances.has("ID_period"): + instances.set("ID_period", [None] * len(instances)) + if not instances.has("lost_frame_count"): + instances.set("lost_frame_count", [None] * len(instances)) + if self._prev_instances is None: + instances.ID = list(range(len(instances))) + self._id_count += len(instances) + instances.ID_period = [1] * len(instances) + instances.lost_frame_count = [0] * len(instances) + return instances + + def _reset_fields(self): + """ + Before each uodate call, reset fields first + """ + self._matched_idx = set() + self._matched_ID = set() + self._untracked_prev_idx = set(range(len(self._prev_instances))) + + def _assign_new_id(self, instances: Instances) -> Instances: + """ + For each untracked instance, assign a new id + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances with new ID assigned + """ + untracked_idx = set(range(len(instances))).difference(self._matched_idx) + for idx in untracked_idx: + instances.ID[idx] = self._id_count + self._id_count += 1 + instances.ID_period[idx] = 1 + instances.lost_frame_count[idx] = 0 + return instances + + def _merge_untracked_instances(self, instances: Instances) -> Instances: + """ + For untracked previous instances, under certain condition, still keep them + in tracking and merge with the current instances. + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances merging current instances and instances from previous + frame decided to keep tracking + """ + untracked_instances = Instances( + image_size=instances.image_size, + pred_boxes=[], + pred_classes=[], + scores=[], + ID=[], + ID_period=[], + lost_frame_count=[], + ) + prev_bboxes = list(self._prev_instances.pred_boxes) + prev_classes = list(self._prev_instances.pred_classes) + prev_scores = list(self._prev_instances.scores) + prev_ID_period = self._prev_instances.ID_period + if instances.has("pred_masks"): + untracked_instances.set("pred_masks", []) + prev_masks = list(self._prev_instances.pred_masks) + if instances.has("pred_keypoints"): + untracked_instances.set("pred_keypoints", []) + prev_keypoints = list(self._prev_instances.pred_keypoints) + if instances.has("pred_keypoint_heatmaps"): + untracked_instances.set("pred_keypoint_heatmaps", []) + prev_keypoint_heatmaps = list(self._prev_instances.pred_keypoint_heatmaps) + for idx in self._untracked_prev_idx: + x_left, y_top, x_right, y_bot = prev_bboxes[idx] + if ( + (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) + or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) + or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count + or prev_ID_period[idx] <= self._min_instance_period + ): + continue + untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) + untracked_instances.pred_classes.append(int(prev_classes[idx])) + untracked_instances.scores.append(float(prev_scores[idx])) + untracked_instances.ID.append(self._prev_instances.ID[idx]) + untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) + untracked_instances.lost_frame_count.append( + self._prev_instances.lost_frame_count[idx] + 1 + ) + if instances.has("pred_masks"): + untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) + if instances.has("pred_keypoints"): + untracked_instances.pred_keypoints.append( + prev_keypoints[idx].numpy().astype(np.uint8) + ) + if instances.has("pred_keypoint_heatmaps"): + untracked_instances.pred_keypoint_heatmaps.append( + prev_keypoint_heatmaps[idx].numpy().astype(np.float32) + ) + untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) + untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) + untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) + if instances.has("pred_masks"): + untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) + if instances.has("pred_keypoints"): + untracked_instances.pred_keypoints = torch.IntTensor(untracked_instances.pred_keypoints) + if instances.has("pred_keypoint_heatmaps"): + untracked_instances.pred_keypoint_heatmaps = torch.FloatTensor( + untracked_instances.pred_keypoint_heatmaps + ) + + return Instances.cat( + [ + instances, + untracked_instances, + ] + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/hungarian_tracker.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/hungarian_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..00b3126057f665aeeb1742ce77bf8da1b0a56577 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/hungarian_tracker.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. +import copy +import numpy as np +from typing import Dict +import torch +from scipy.optimize import linear_sum_assignment + +from custom_detectron2.config import configurable +from custom_detectron2.structures import Boxes, Instances + +from ..config.config import CfgNode as CfgNode_ +from .base_tracker import BaseTracker + + +class BaseHungarianTracker(BaseTracker): + """ + A base class for all Hungarian trackers + """ + + @configurable + def __init__( + self, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + **kwargs + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + """ + super().__init__(**kwargs) + self._video_height = video_height + self._video_width = video_width + self._max_num_instances = max_num_instances + self._max_lost_frame_count = max_lost_frame_count + self._min_box_rel_dim = min_box_rel_dim + self._min_instance_period = min_instance_period + + @classmethod + def from_config(cls, cfg: CfgNode_) -> Dict: + raise NotImplementedError("Calling HungarianTracker::from_config") + + def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: + raise NotImplementedError("Calling HungarianTracker::build_matrix") + + def update(self, instances: Instances) -> Instances: + if instances.has("pred_keypoints"): + raise NotImplementedError("Need to add support for keypoints") + instances = self._initialize_extra_fields(instances) + if self._prev_instances is not None: + self._untracked_prev_idx = set(range(len(self._prev_instances))) + cost_matrix = self.build_cost_matrix(instances, self._prev_instances) + matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) + instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) + instances = self._process_unmatched_idx(instances, matched_idx) + instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) + self._prev_instances = copy.deepcopy(instances) + return instances + + def _initialize_extra_fields(self, instances: Instances) -> Instances: + """ + If input instances don't have ID, ID_period, lost_frame_count fields, + this method is used to initialize these fields. + + Args: + instances: D2 Instances, for predictions of the current frame + Return: + D2 Instances with extra fields added + """ + if not instances.has("ID"): + instances.set("ID", [None] * len(instances)) + if not instances.has("ID_period"): + instances.set("ID_period", [None] * len(instances)) + if not instances.has("lost_frame_count"): + instances.set("lost_frame_count", [None] * len(instances)) + if self._prev_instances is None: + instances.ID = list(range(len(instances))) + self._id_count += len(instances) + instances.ID_period = [1] * len(instances) + instances.lost_frame_count = [0] * len(instances) + return instances + + def _process_matched_idx( + self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray + ) -> Instances: + assert matched_idx.size == matched_prev_idx.size + for i in range(matched_idx.size): + instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] + instances.ID_period[matched_idx[i]] = ( + self._prev_instances.ID_period[matched_prev_idx[i]] + 1 + ) + instances.lost_frame_count[matched_idx[i]] = 0 + return instances + + def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: + untracked_idx = set(range(len(instances))).difference(set(matched_idx)) + for idx in untracked_idx: + instances.ID[idx] = self._id_count + self._id_count += 1 + instances.ID_period[idx] = 1 + instances.lost_frame_count[idx] = 0 + return instances + + def _process_unmatched_prev_idx( + self, instances: Instances, matched_prev_idx: np.ndarray + ) -> Instances: + untracked_instances = Instances( + image_size=instances.image_size, + pred_boxes=[], + pred_masks=[], + pred_classes=[], + scores=[], + ID=[], + ID_period=[], + lost_frame_count=[], + ) + prev_bboxes = list(self._prev_instances.pred_boxes) + prev_classes = list(self._prev_instances.pred_classes) + prev_scores = list(self._prev_instances.scores) + prev_ID_period = self._prev_instances.ID_period + if instances.has("pred_masks"): + prev_masks = list(self._prev_instances.pred_masks) + untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) + for idx in untracked_prev_idx: + x_left, y_top, x_right, y_bot = prev_bboxes[idx] + if ( + (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) + or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) + or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count + or prev_ID_period[idx] <= self._min_instance_period + ): + continue + untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) + untracked_instances.pred_classes.append(int(prev_classes[idx])) + untracked_instances.scores.append(float(prev_scores[idx])) + untracked_instances.ID.append(self._prev_instances.ID[idx]) + untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) + untracked_instances.lost_frame_count.append( + self._prev_instances.lost_frame_count[idx] + 1 + ) + if instances.has("pred_masks"): + untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) + + untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) + untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) + untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) + if instances.has("pred_masks"): + untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) + else: + untracked_instances.remove("pred_masks") + + return Instances.cat( + [ + instances, + untracked_instances, + ] + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..07fa28c1b6af274827a45fee1fb7beabc1f130e5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. + +import numpy as np +from typing import List + +from custom_detectron2.config import CfgNode as CfgNode_ +from custom_detectron2.config import configurable + +from .base_tracker import TRACKER_HEADS_REGISTRY +from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker + + +@TRACKER_HEADS_REGISTRY.register() +class IOUWeightedHungarianBBoxIOUTracker(VanillaHungarianBBoxIOUTracker): + """ + A tracker using IoU as weight in Hungarian algorithm, also known + as Munkres or Kuhn-Munkres algorithm + """ + + @configurable + def __init__( + self, + *, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + track_iou_threshold: float = 0.5, + **kwargs, + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + track_iou_threshold: iou threshold, below this number a bbox pair is removed + from tracking + """ + super().__init__( + video_height=video_height, + video_width=video_width, + max_num_instances=max_num_instances, + max_lost_frame_count=max_lost_frame_count, + min_box_rel_dim=min_box_rel_dim, + min_instance_period=min_instance_period, + track_iou_threshold=track_iou_threshold, + ) + + @classmethod + def from_config(cls, cfg: CfgNode_): + """ + Old style initialization using CfgNode + + Args: + cfg: D2 CfgNode, config file + Return: + dictionary storing arguments for __init__ method + """ + assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS + assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS + video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") + video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") + max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) + max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) + min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) + min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) + track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) + return { + "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa + "video_height": video_height, + "video_width": video_width, + "max_num_instances": max_num_instances, + "max_lost_frame_count": max_lost_frame_count, + "min_box_rel_dim": min_box_rel_dim, + "min_instance_period": min_instance_period, + "track_iou_threshold": track_iou_threshold, + } + + def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray: + """ + Based on IoU for each pair of bbox, assign the associated value in cost matrix + + Args: + cost_matrix: np.ndarray, initialized 2D array with target dimensions + bbox_pairs: list of bbox pair, in each pair, iou value is stored + Return: + np.ndarray, cost_matrix with assigned values + """ + for pair in bbox_pairs: + # assign (-1 * IoU) for above threshold pairs, algorithms will minimize cost + cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"] + return cost_matrix diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dd81e6a064312d60af6b3c506b61cd3aaec3aa81 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/utils.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +import numpy as np +from typing import List + +from custom_detectron2.structures import Instances + + +def create_prediction_pairs( + instances: Instances, + prev_instances: Instances, + iou_all: np.ndarray, + threshold: float = 0.5, +) -> List: + """ + Args: + instances: predictions from current frame + prev_instances: predictions from previous frame + iou_all: 2D numpy array containing iou for each bbox pair + threshold: below the threshold, doesn't consider the pair of bbox is valid + Return: + List of bbox pairs + """ + bbox_pairs = [] + for i in range(len(instances)): + for j in range(len(prev_instances)): + if iou_all[i, j] < threshold: + continue + bbox_pairs.append( + { + "idx": i, + "prev_idx": j, + "prev_id": prev_instances.ID[j], + "IoU": iou_all[i, j], + "prev_period": prev_instances.ID_period[j], + } + ) + return bbox_pairs + + +LARGE_COST_VALUE = 100000 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..42dbbebbbf1b0f9368686183765b6f9db61fea92 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/tracking/vanilla_hungarian_bbox_iou_tracker.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +# Copyright 2004-present Facebook. All Rights Reserved. + +import numpy as np +from typing import List + +from custom_detectron2.config import CfgNode as CfgNode_ +from custom_detectron2.config import configurable +from custom_detectron2.structures import Instances +from custom_detectron2.structures.boxes import pairwise_iou +from custom_detectron2.tracking.utils import LARGE_COST_VALUE, create_prediction_pairs + +from .base_tracker import TRACKER_HEADS_REGISTRY +from .hungarian_tracker import BaseHungarianTracker + + +@TRACKER_HEADS_REGISTRY.register() +class VanillaHungarianBBoxIOUTracker(BaseHungarianTracker): + """ + Hungarian algo based tracker using bbox iou as metric + """ + + @configurable + def __init__( + self, + *, + video_height: int, + video_width: int, + max_num_instances: int = 200, + max_lost_frame_count: int = 0, + min_box_rel_dim: float = 0.02, + min_instance_period: int = 1, + track_iou_threshold: float = 0.5, + **kwargs, + ): + """ + Args: + video_height: height the video frame + video_width: width of the video frame + max_num_instances: maximum number of id allowed to be tracked + max_lost_frame_count: maximum number of frame an id can lost tracking + exceed this number, an id is considered as lost + forever + min_box_rel_dim: a percentage, smaller than this dimension, a bbox is + removed from tracking + min_instance_period: an instance will be shown after this number of period + since its first showing up in the video + track_iou_threshold: iou threshold, below this number a bbox pair is removed + from tracking + """ + super().__init__( + video_height=video_height, + video_width=video_width, + max_num_instances=max_num_instances, + max_lost_frame_count=max_lost_frame_count, + min_box_rel_dim=min_box_rel_dim, + min_instance_period=min_instance_period, + ) + self._track_iou_threshold = track_iou_threshold + + @classmethod + def from_config(cls, cfg: CfgNode_): + """ + Old style initialization using CfgNode + + Args: + cfg: D2 CfgNode, config file + Return: + dictionary storing arguments for __init__ method + """ + assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS + assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS + video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") + video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") + max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) + max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) + min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) + min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) + track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) + return { + "_target_": "detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker", # noqa + "video_height": video_height, + "video_width": video_width, + "max_num_instances": max_num_instances, + "max_lost_frame_count": max_lost_frame_count, + "min_box_rel_dim": min_box_rel_dim, + "min_instance_period": min_instance_period, + "track_iou_threshold": track_iou_threshold, + } + + def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: + """ + Build the cost matrix for assignment problem + (https://en.wikipedia.org/wiki/Assignment_problem) + + Args: + instances: D2 Instances, for current frame predictions + prev_instances: D2 Instances, for previous frame predictions + + Return: + the cost matrix in numpy array + """ + assert instances is not None and prev_instances is not None + # calculate IoU of all bbox pairs + iou_all = pairwise_iou( + boxes1=instances.pred_boxes, + boxes2=self._prev_instances.pred_boxes, + ) + bbox_pairs = create_prediction_pairs( + instances, self._prev_instances, iou_all, threshold=self._track_iou_threshold + ) + # assign large cost value to make sure pair below IoU threshold won't be matched + cost_matrix = np.full((len(instances), len(prev_instances)), LARGE_COST_VALUE) + return self.assign_cost_matrix_values(cost_matrix, bbox_pairs) + + def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray: + """ + Based on IoU for each pair of bbox, assign the associated value in cost matrix + + Args: + cost_matrix: np.ndarray, initialized 2D array with target dimensions + bbox_pairs: list of bbox pair, in each pair, iou value is stored + Return: + np.ndarray, cost_matrix with assigned values + """ + for pair in bbox_pairs: + # assign -1 for IoU above threshold pairs, algorithms will minimize cost + cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 + return cost_matrix diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/README.md b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9765b24a730b77556104187ac3ef5439ab0859fd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/README.md @@ -0,0 +1,5 @@ +# Utility functions + +This folder contain utility functions that are not used in the +core library, but are useful for building models or training +code using the config system. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9020c2df23e2af280b7bb168b996ae9eaf312eb8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/analysis.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..35b7bdcca4c545213c4cc92f9f71dd75cf288936 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/analysis.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# -*- coding: utf-8 -*- + +import typing +from typing import Any, List +import fvcore +from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table +from torch import nn + +from custom_detectron2.export import TracingAdapter + +__all__ = [ + "activation_count_operators", + "flop_count_operators", + "parameter_count_table", + "parameter_count", + "FlopCountAnalysis", +] + +FLOPS_MODE = "flops" +ACTIVATIONS_MODE = "activations" + + +# Some extra ops to ignore from counting, including elementwise and reduction ops +_IGNORED_OPS = { + "aten::add", + "aten::add_", + "aten::argmax", + "aten::argsort", + "aten::batch_norm", + "aten::constant_pad_nd", + "aten::div", + "aten::div_", + "aten::exp", + "aten::log2", + "aten::max_pool2d", + "aten::meshgrid", + "aten::mul", + "aten::mul_", + "aten::neg", + "aten::nonzero_numpy", + "aten::reciprocal", + "aten::repeat_interleave", + "aten::rsub", + "aten::sigmoid", + "aten::sigmoid_", + "aten::softmax", + "aten::sort", + "aten::sqrt", + "aten::sub", + "torchvision::nms", # TODO estimate flop for nms +} + + +class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis): + """ + Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models. + """ + + def __init__(self, model, inputs): + """ + Args: + model (nn.Module): + inputs (Any): inputs of the given model. Does not have to be tuple of tensors. + """ + wrapper = TracingAdapter(model, inputs, allow_non_tensor=True) + super().__init__(wrapper, wrapper.flattened_inputs) + self.set_op_handle(**{k: None for k in _IGNORED_OPS}) + + +def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]: + """ + Implement operator-level flops counting using jit. + This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard + detection models in detectron2. + Please use :class:`FlopCountAnalysis` for more advanced functionalities. + + Note: + The function runs the input through the model to compute flops. + The flops of a detection model is often input-dependent, for example, + the flops of box & mask head depends on the number of proposals & + the number of detected objects. + Therefore, the flops counting using a single input may not accurately + reflect the computation cost of a model. It's recommended to average + across a number of inputs. + + Args: + model: a detectron2 model that takes `list[dict]` as input. + inputs (list[dict]): inputs to model, in detectron2's standard format. + Only "image" key will be used. + supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count` + + Returns: + Counter: Gflop count per operator + """ + old_train = model.training + model.eval() + ret = FlopCountAnalysis(model, inputs).by_operator() + model.train(old_train) + return {k: v / 1e9 for k, v in ret.items()} + + +def activation_count_operators( + model: nn.Module, inputs: list, **kwargs +) -> typing.DefaultDict[str, float]: + """ + Implement operator-level activations counting using jit. + This is a wrapper of fvcore.nn.activation_count, that supports standard detection models + in detectron2. + + Note: + The function runs the input through the model to compute activations. + The activations of a detection model is often input-dependent, for example, + the activations of box & mask head depends on the number of proposals & + the number of detected objects. + + Args: + model: a detectron2 model that takes `list[dict]` as input. + inputs (list[dict]): inputs to model, in detectron2's standard format. + Only "image" key will be used. + + Returns: + Counter: activation count per operator + """ + return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs) + + +def _wrapper_count_operators( + model: nn.Module, inputs: list, mode: str, **kwargs +) -> typing.DefaultDict[str, float]: + # ignore some ops + supported_ops = {k: lambda *args, **kwargs: {} for k in _IGNORED_OPS} + supported_ops.update(kwargs.pop("supported_ops", {})) + kwargs["supported_ops"] = supported_ops + + assert len(inputs) == 1, "Please use batch size=1" + tensor_input = inputs[0]["image"] + inputs = [{"image": tensor_input}] # remove other keys, in case there are any + + old_train = model.training + if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)): + model = model.module + wrapper = TracingAdapter(model, inputs) + wrapper.eval() + if mode == FLOPS_MODE: + ret = flop_count(wrapper, (tensor_input,), **kwargs) + elif mode == ACTIVATIONS_MODE: + ret = activation_count(wrapper, (tensor_input,), **kwargs) + else: + raise NotImplementedError("Count for mode {} is not supported yet.".format(mode)) + # compatible with change in fvcore + if isinstance(ret, tuple): + ret = ret[0] + model.train(old_train) + return ret + + +def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]: + """ + Given a model, find parameters that do not contribute + to the loss. + + Args: + model: a model in training mode that returns losses + inputs: argument or a tuple of arguments. Inputs of the model + + Returns: + list[str]: the name of unused parameters + """ + assert model.training + for _, prm in model.named_parameters(): + prm.grad = None + + if isinstance(inputs, tuple): + losses = model(*inputs) + else: + losses = model(inputs) + + if isinstance(losses, dict): + losses = sum(losses.values()) + losses.backward() + + unused: List[str] = [] + for name, prm in model.named_parameters(): + if prm.grad is None: + unused.append(name) + prm.grad = None + return unused diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/collect_env.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..bbc55fd9ff10506fe4d49f8fa2346ecc8a4b4606 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/collect_env.py @@ -0,0 +1,246 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import importlib +import numpy as np +import os +import re +import subprocess +import sys +from collections import defaultdict +import PIL +import torch +import torchvision +from tabulate import tabulate + +__all__ = ["collect_env_info"] + + +def collect_torch_env(): + try: + import torch.__config__ + + return torch.__config__.show() + except ImportError: + # compatible with older versions of pytorch + from torch.utils.collect_env import get_pretty_env_info + + return get_pretty_env_info() + + +def get_env_module(): + var_name = "DETECTRON2_ENV_MODULE" + return var_name, os.environ.get(var_name, "") + + +def detect_compute_compatibility(CUDA_HOME, so_file): + try: + cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") + if os.path.isfile(cuobjdump): + output = subprocess.check_output( + "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True + ) + output = output.decode("utf-8").strip().split("\n") + arch = [] + for line in output: + line = re.findall(r"\.sm_([0-9]*)\.", line)[0] + arch.append(".".join(line)) + arch = sorted(set(arch)) + return ", ".join(arch) + else: + return so_file + "; cannot find cuobjdump" + except Exception: + # unhandled failure + return so_file + + +def collect_env_info(): + has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM + torch_version = torch.__version__ + + # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional + from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME + + has_rocm = False + if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): + has_rocm = True + has_cuda = has_gpu and (not has_rocm) + + data = [] + data.append(("sys.platform", sys.platform)) # check-template.yml depends on it + data.append(("Python", sys.version.replace("\n", ""))) + data.append(("numpy", np.__version__)) + + try: + import custom_detectron2 # noqa + + data.append( + ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) + ) + except ImportError: + data.append(("detectron2", "failed to import")) + except AttributeError: + data.append(("detectron2", "imported a wrong installation")) + + try: + import custom_detectron2._C as _C + except ImportError as e: + data.append(("detectron2._C", f"not built correctly: {e}")) + + # print system compilers when extension fails to build + if sys.platform != "win32": # don't know what to do for windows + try: + # this is how torch/utils/cpp_extensions.py choose compiler + cxx = os.environ.get("CXX", "c++") + cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) + cxx = cxx.decode("utf-8").strip().split("\n")[0] + except subprocess.SubprocessError: + cxx = "Not found" + data.append(("Compiler ($CXX)", cxx)) + + if has_cuda and CUDA_HOME is not None: + try: + nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") + nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) + nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] + except subprocess.SubprocessError: + nvcc = "Not found" + data.append(("CUDA compiler", nvcc)) + if has_cuda and sys.platform != "win32": + try: + so_file = importlib.util.find_spec("detectron2._C").origin + except (ImportError, AttributeError): + pass + else: + data.append( + ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) + ) + else: + # print compilers that are used to build extension + data.append(("Compiler", _C.get_compiler_version())) + data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip + if has_cuda and getattr(_C, "has_cuda", lambda: True)(): + data.append( + ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) + ) + + data.append(get_env_module()) + data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) + data.append(("PyTorch debug build", torch.version.debug)) + try: + data.append(("torch._C._GLIBCXX_USE_CXX11_ABI", torch._C._GLIBCXX_USE_CXX11_ABI)) + except Exception: + pass + + if not has_gpu: + has_gpu_text = "No: torch.cuda.is_available() == False" + else: + has_gpu_text = "Yes" + data.append(("GPU available", has_gpu_text)) + if has_gpu: + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) + name = torch.cuda.get_device_name(k) + f" (arch={cap})" + devices[name].append(str(k)) + for name, devids in devices.items(): + data.append(("GPU " + ",".join(devids), name)) + + if has_rocm: + msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" + data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) + else: + try: + from torch.utils.collect_env import get_nvidia_driver_version, run as _run + + data.append(("Driver version", get_nvidia_driver_version(_run))) + except Exception: + pass + msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" + data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) + + cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) + if cuda_arch_list: + data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) + data.append(("Pillow", PIL.__version__)) + + try: + data.append( + ( + "torchvision", + str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), + ) + ) + if has_cuda: + try: + torchvision_C = importlib.util.find_spec("torchvision._C").origin + msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) + data.append(("torchvision arch flags", msg)) + except (ImportError, AttributeError): + data.append(("torchvision._C", "Not found")) + except AttributeError: + data.append(("torchvision", "unknown")) + + try: + import fvcore + + data.append(("fvcore", fvcore.__version__)) + except (ImportError, AttributeError): + pass + + try: + import iopath + + data.append(("iopath", iopath.__version__)) + except (ImportError, AttributeError): + pass + + try: + import cv2 + + data.append(("cv2", cv2.__version__)) + except (ImportError, AttributeError): + data.append(("cv2", "Not found")) + env_str = tabulate(data) + "\n" + env_str += collect_torch_env() + return env_str + + +def test_nccl_ops(): + num_gpu = torch.cuda.device_count() + if os.access("/tmp", os.W_OK): + import torch.multiprocessing as mp + + dist_url = "file:///tmp/nccl_tmp_file" + print("Testing NCCL connectivity ... this should not hang.") + mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False) + print("NCCL succeeded.") + + +def _test_nccl_worker(rank, num_gpu, dist_url): + import torch.distributed as dist + + dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu) + dist.barrier(device_ids=[rank]) + + +if __name__ == "__main__": + try: + from custom_detectron2.utils.collect_env import collect_env_info as f + + print(f()) + except ImportError: + print(collect_env_info()) + + if torch.cuda.is_available(): + num_gpu = torch.cuda.device_count() + for k in range(num_gpu): + device = f"cuda:{k}" + try: + x = torch.tensor([1, 2.0], dtype=torch.float32) + x = x.to(device) + except Exception as e: + print( + f"Unable to copy tensor to device={device}: {e}. " + "Your CUDA environment is broken." + ) + if num_gpu > 1: + test_nccl_ops() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/colormap.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/colormap.py new file mode 100644 index 0000000000000000000000000000000000000000..14ded1659b40b161358c4aaf9cc84ffe0ffafe64 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/colormap.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +An awesome colormap for really neat visualizations. +Copied from Detectron, and removed gray colors. +""" + +import numpy as np +import random + +__all__ = ["colormap", "random_color", "random_colors"] + +# fmt: off +# RGB: +_COLORS = np.array( + [ + 0.000, 0.447, 0.741, + 0.850, 0.325, 0.098, + 0.929, 0.694, 0.125, + 0.494, 0.184, 0.556, + 0.466, 0.674, 0.188, + 0.301, 0.745, 0.933, + 0.635, 0.078, 0.184, + 0.300, 0.300, 0.300, + 0.600, 0.600, 0.600, + 1.000, 0.000, 0.000, + 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 1.000, + 0.667, 0.000, 1.000, + 0.333, 0.333, 0.000, + 0.333, 0.667, 0.000, + 0.333, 1.000, 0.000, + 0.667, 0.333, 0.000, + 0.667, 0.667, 0.000, + 0.667, 1.000, 0.000, + 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, + 1.000, 1.000, 0.000, + 0.000, 0.333, 0.500, + 0.000, 0.667, 0.500, + 0.000, 1.000, 0.500, + 0.333, 0.000, 0.500, + 0.333, 0.333, 0.500, + 0.333, 0.667, 0.500, + 0.333, 1.000, 0.500, + 0.667, 0.000, 0.500, + 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, + 0.667, 1.000, 0.500, + 1.000, 0.000, 0.500, + 1.000, 0.333, 0.500, + 1.000, 0.667, 0.500, + 1.000, 1.000, 0.500, + 0.000, 0.333, 1.000, + 0.000, 0.667, 1.000, + 0.000, 1.000, 1.000, + 0.333, 0.000, 1.000, + 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, + 0.333, 1.000, 1.000, + 0.667, 0.000, 1.000, + 0.667, 0.333, 1.000, + 0.667, 0.667, 1.000, + 0.667, 1.000, 1.000, + 1.000, 0.000, 1.000, + 1.000, 0.333, 1.000, + 1.000, 0.667, 1.000, + 0.333, 0.000, 0.000, + 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, + 0.000, 0.167, 0.000, + 0.000, 0.333, 0.000, + 0.000, 0.500, 0.000, + 0.000, 0.667, 0.000, + 0.000, 0.833, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, + 0.000, 0.000, 0.667, + 0.000, 0.000, 0.833, + 0.000, 0.000, 1.000, + 0.000, 0.000, 0.000, + 0.143, 0.143, 0.143, + 0.857, 0.857, 0.857, + 1.000, 1.000, 1.000 + ] +).astype(np.float32).reshape(-1, 3) +# fmt: on + + +def colormap(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] + """ + assert maximum in [255, 1], maximum + c = _COLORS * maximum + if not rgb: + c = c[:, ::-1] + return c + + +def random_color(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +def random_colors(N, rgb=False, maximum=255): + """ + Args: + N (int): number of unique colors needed + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a list of random_color + """ + indices = random.sample(range(len(_COLORS)), N) + ret = [_COLORS[i] * maximum for i in indices] + if not rgb: + ret = [x[::-1] for x in ret] + return ret + + +if __name__ == "__main__": + import cv2 + + size = 100 + H, W = 10, 10 + canvas = np.random.rand(H * size, W * size, 3).astype("float32") + for h in range(H): + for w in range(W): + idx = h * W + w + if idx >= len(_COLORS): + break + canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] + cv2.imshow("a", canvas) + cv2.waitKey(0) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/comm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ea9a9f578c5704d1e7ff563ef156e9133ab465 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/comm.py @@ -0,0 +1,238 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" + +import functools +import numpy as np +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None +_MISSING_LOCAL_PG_ERROR = ( + "Local process group is not yet created! Please use detectron2's `launch()` " + "to start processes and initialize pytorch process group. If you need to start " + "processes in other ways, please call comm.create_local_process_group(" + "num_workers_per_machine) after calling torch.distributed.init_process_group()." +) + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +@functools.lru_cache() +def create_local_process_group(num_workers_per_machine: int) -> None: + """ + Create a process group that contains ranks within the same machine. + + Detectron2's launch() in engine/launch.py will call this function. If you start + workers without launch(), you'll have to also call this. Otherwise utilities + like `get_local_rank()` will not work. + + This function contains a barrier. All processes must call it together. + + Args: + num_workers_per_machine: the number of worker processes per machine. Typically + the number of GPUs. + """ + global _LOCAL_PROCESS_GROUP + assert _LOCAL_PROCESS_GROUP is None + assert get_world_size() % num_workers_per_machine == 0 + num_machines = get_world_size() // num_workers_per_machine + machine_rank = get_rank() // num_workers_per_machine + for i in range(num_machines): + ranks_on_i = list(range(i * num_workers_per_machine, (i + 1) * num_workers_per_machine)) + pg = dist.new_group(ranks_on_i) + if i == machine_rank: + _LOCAL_PROCESS_GROUP = pg + + +def get_local_process_group(): + """ + Returns: + A torch process group which only includes processes that are on the same + machine as the current process. This group can be useful for communication + within a machine, e.g. a per-machine SyncBN. + """ + assert _LOCAL_PROCESS_GROUP is not None, _MISSING_LOCAL_PG_ERROR + return _LOCAL_PROCESS_GROUP + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None, _MISSING_LOCAL_PG_ERROR + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + assert _LOCAL_PROCESS_GROUP is not None, _MISSING_LOCAL_PG_ERROR + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + if dist.get_backend() == dist.Backend.NCCL: + # This argument is needed to avoid warnings. + # It's valid only for NCCL backend. + dist.barrier(device_ids=[torch.cuda.current_device()]) + else: + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage. + world_size = dist.get_world_size(group) + if world_size == 1: + return [data] + + output = [None for _ in range(world_size)] + dist.all_gather_object(output, data, group=group) + return output + + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + world_size = dist.get_world_size(group=group) + if world_size == 1: + return [data] + rank = dist.get_rank(group=group) + + if rank == dst: + output = [None for _ in range(world_size)] + dist.gather_object(data, output, dst=dst, group=group) + return output + else: + dist.gather_object(data, None, dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2**31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """ + Reduce the values in the dictionary from all processes so that process with rank + 0 has the reduced results. + + Args: + input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/develop.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/develop.py new file mode 100644 index 0000000000000000000000000000000000000000..e8416984954f7b32fc269100620e3c0d0d0f9585 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/develop.py @@ -0,0 +1,59 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" Utilities for developers only. +These are not visible to users (not automatically imported). And should not +appeared in docs.""" +# adapted from https://github.com/tensorpack/tensorpack/blob/master/tensorpack/utils/develop.py + + +def create_dummy_class(klass, dependency, message=""): + """ + When a dependency of a class is not available, create a dummy class which throws ImportError + when used. + + Args: + klass (str): name of the class. + dependency (str): name of the dependency. + message: extra message to print + Returns: + class: a class object + """ + err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass) + if message: + err = err + " " + message + + class _DummyMetaClass(type): + # throw error on class attribute access + def __getattr__(_, __): # noqa: B902 + raise ImportError(err) + + class _Dummy(object, metaclass=_DummyMetaClass): + # throw error on constructor + def __init__(self, *args, **kwargs): + raise ImportError(err) + + return _Dummy + + +def create_dummy_func(func, dependency, message=""): + """ + When a dependency of a function is not available, create a dummy function which throws + ImportError when used. + + Args: + func (str): name of the function. + dependency (str or list[str]): name(s) of the dependency. + message: extra message to print + Returns: + function: a function object + """ + err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func) + if message: + err = err + " " + message + + if isinstance(dependency, (list, tuple)): + dependency = ",".join(dependency) + + def _dummy(*args, **kwargs): + raise ImportError(err) + + return _dummy diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/env.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/env.py new file mode 100644 index 0000000000000000000000000000000000000000..40634c17c73273ac8927632be164f466cfe7d1fa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/env.py @@ -0,0 +1,170 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import importlib +import importlib.util +import logging +import numpy as np +import os +import random +import sys +from datetime import datetime +import torch + +__all__ = ["seed_all_rng"] + + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) +""" +PyTorch version as a tuple of 2 ints. Useful for comparison. +""" + + +DOC_BUILDING = os.getenv("_DOC_BUILDING", False) # set in docs/conf.py +""" +Whether we're building documentation. +""" + + +def seed_all_rng(seed=None): + """ + Set the random seed for the RNG in torch, numpy and python. + + Args: + seed (int): if None, will use a strong random seed. + """ + if seed is None: + seed = ( + os.getpid() + + int(datetime.now().strftime("%S%f")) + + int.from_bytes(os.urandom(2), "big") + ) + logger = logging.getLogger(__name__) + logger.info("Using a generated random seed {}".format(seed)) + np.random.seed(seed) + torch.manual_seed(seed) + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + + +# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path +def _import_file(module_name, file_path, make_importable=False): + spec = importlib.util.spec_from_file_location(module_name, file_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + if make_importable: + sys.modules[module_name] = module + return module + + +def _configure_libraries(): + """ + Configurations for some libraries. + """ + # An environment option to disable `import cv2` globally, + # in case it leads to negative performance impact + disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) + if disable_cv2: + sys.modules["cv2"] = None + else: + # Disable opencl in opencv since its interaction with cuda often has negative effects + # This envvar is supported after OpenCV 3.4.0 + os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" + try: + import cv2 + + if int(cv2.__version__.split(".")[0]) >= 3: + cv2.ocl.setUseOpenCL(False) + except ModuleNotFoundError: + # Other types of ImportError, if happened, should not be ignored. + # Because a failed opencv import could mess up address space + # https://github.com/skvark/opencv-python/issues/381 + pass + + def get_version(module, digit=2): + return tuple(map(int, module.__version__.split(".")[:digit])) + + # fmt: off + assert get_version(torch) >= (1, 4), "Requires torch>=1.4" + import fvcore + assert get_version(fvcore, 3) >= (0, 1, 2), "Requires fvcore>=0.1.2" + import yaml + assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" + # fmt: on + + +_ENV_SETUP_DONE = False + + +def setup_environment(): + """Perform environment setup work. The default setup is a no-op, but this + function allows the user to specify a Python source file or a module in + the $DETECTRON2_ENV_MODULE environment variable, that performs + custom setup work that may be necessary to their computing environment. + """ + global _ENV_SETUP_DONE + if _ENV_SETUP_DONE: + return + _ENV_SETUP_DONE = True + + _configure_libraries() + + custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") + + if custom_module_path: + setup_custom_environment(custom_module_path) + else: + # The default setup is a no-op + pass + + +def setup_custom_environment(custom_module): + """ + Load custom environment setup by importing a Python source file or a + module, and run the setup function. + """ + if custom_module.endswith(".py"): + module = _import_file("detectron2.utils.env.custom_module", custom_module) + else: + module = importlib.import_module(custom_module) + assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( + "Custom environment module defined in {} does not have the " + "required callable attribute 'setup_environment'." + ).format(custom_module) + module.setup_environment() + + +def fixup_module_metadata(module_name, namespace, keys=None): + """ + Fix the __qualname__ of module members to be their exported api name, so + when they are referenced in docs, sphinx can find them. Reference: + https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 + """ + if not DOC_BUILDING: + return + seen_ids = set() + + def fix_one(qualname, name, obj): + # avoid infinite recursion (relevant when using + # typing.Generic, for example) + if id(obj) in seen_ids: + return + seen_ids.add(id(obj)) + + mod = getattr(obj, "__module__", None) + if mod is not None and (mod.startswith(module_name) or mod.startswith("fvcore.")): + obj.__module__ = module_name + # Modules, unlike everything else in Python, put fully-qualitied + # names into their __name__ attribute. We check for "." to avoid + # rewriting these. + if hasattr(obj, "__name__") and "." not in obj.__name__: + obj.__name__ = name + obj.__qualname__ = qualname + if isinstance(obj, type): + for attr_name, attr_value in obj.__dict__.items(): + fix_one(objname + "." + attr_name, attr_name, attr_value) + + if keys is None: + keys = namespace.keys() + for objname in keys: + if not objname.startswith("_"): + obj = namespace[objname] + fix_one(objname, objname, obj) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/events.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/events.py new file mode 100644 index 0000000000000000000000000000000000000000..cd059b49742f943f50fa530bb4519cb7ee0a9bb9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/events.py @@ -0,0 +1,534 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import datetime +import json +import logging +import os +import time +from collections import defaultdict +from contextlib import contextmanager +from typing import Optional +import torch +from fvcore.common.history_buffer import HistoryBuffer + +from custom_detectron2.utils.file_io import PathManager + +__all__ = [ + "get_event_storage", + "JSONWriter", + "TensorboardXWriter", + "CommonMetricPrinter", + "EventStorage", +] + +_CURRENT_STORAGE_STACK = [] + + +def get_event_storage(): + """ + Returns: + The :class:`EventStorage` object that's currently being used. + Throws an error if no :class:`EventStorage` is currently enabled. + """ + assert len( + _CURRENT_STORAGE_STACK + ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" + return _CURRENT_STORAGE_STACK[-1] + + +class EventWriter: + """ + Base class for writers that obtain events from :class:`EventStorage` and process them. + """ + + def write(self): + raise NotImplementedError + + def close(self): + pass + + +class JSONWriter(EventWriter): + """ + Write scalars to a json file. + + It saves scalars as one json per line (instead of a big json) for easy parsing. + + Examples parsing such a json file: + :: + $ cat metrics.json | jq -s '.[0:2]' + [ + { + "data_time": 0.008433341979980469, + "iteration": 19, + "loss": 1.9228371381759644, + "loss_box_reg": 0.050025828182697296, + "loss_classifier": 0.5316952466964722, + "loss_mask": 0.7236229181289673, + "loss_rpn_box": 0.0856662318110466, + "loss_rpn_cls": 0.48198649287223816, + "lr": 0.007173333333333333, + "time": 0.25401854515075684 + }, + { + "data_time": 0.007216215133666992, + "iteration": 39, + "loss": 1.282649278640747, + "loss_box_reg": 0.06222952902317047, + "loss_classifier": 0.30682939291000366, + "loss_mask": 0.6970193982124329, + "loss_rpn_box": 0.038663312792778015, + "loss_rpn_cls": 0.1471673548221588, + "lr": 0.007706666666666667, + "time": 0.2490077018737793 + } + ] + + $ cat metrics.json | jq '.loss_mask' + 0.7126231789588928 + 0.689423680305481 + 0.6776131987571716 + ... + + """ + + def __init__(self, json_file, window_size=20): + """ + Args: + json_file (str): path to the json file. New data will be appended if the file exists. + window_size (int): the window size of median smoothing for the scalars whose + `smoothing_hint` are True. + """ + self._file_handle = PathManager.open(json_file, "a") + self._window_size = window_size + self._last_write = -1 + + def write(self): + storage = get_event_storage() + to_save = defaultdict(dict) + + for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): + # keep scalars that have not been written + if iter <= self._last_write: + continue + to_save[iter][k] = v + if len(to_save): + all_iters = sorted(to_save.keys()) + self._last_write = max(all_iters) + + for itr, scalars_per_iter in to_save.items(): + scalars_per_iter["iteration"] = itr + self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n") + self._file_handle.flush() + try: + os.fsync(self._file_handle.fileno()) + except AttributeError: + pass + + def close(self): + self._file_handle.close() + + +class TensorboardXWriter(EventWriter): + """ + Write all scalars to a tensorboard file. + """ + + def __init__(self, log_dir: str, window_size: int = 20, **kwargs): + """ + Args: + log_dir (str): the directory to save the output events + window_size (int): the scalars will be median-smoothed by this window size + + kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` + """ + self._window_size = window_size + from torch.utils.tensorboard import SummaryWriter + + self._writer = SummaryWriter(log_dir, **kwargs) + self._last_write = -1 + + def write(self): + storage = get_event_storage() + new_last_write = self._last_write + for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): + if iter > self._last_write: + self._writer.add_scalar(k, v, iter) + new_last_write = max(new_last_write, iter) + self._last_write = new_last_write + + # storage.put_{image,histogram} is only meant to be used by + # tensorboard writer. So we access its internal fields directly from here. + if len(storage._vis_data) >= 1: + for img_name, img, step_num in storage._vis_data: + self._writer.add_image(img_name, img, step_num) + # Storage stores all image data and rely on this writer to clear them. + # As a result it assumes only one writer will use its image data. + # An alternative design is to let storage store limited recent + # data (e.g. only the most recent image) that all writers can access. + # In that case a writer may not see all image data if its period is long. + storage.clear_images() + + if len(storage._histograms) >= 1: + for params in storage._histograms: + self._writer.add_histogram_raw(**params) + storage.clear_histograms() + + def close(self): + if hasattr(self, "_writer"): # doesn't exist when the code fails at import + self._writer.close() + + +class CommonMetricPrinter(EventWriter): + """ + Print **common** metrics to the terminal, including + iteration time, ETA, memory, all losses, and the learning rate. + It also applies smoothing using a window of 20 elements. + + It's meant to print common metrics in common ways. + To print something in more customized ways, please implement a similar printer by yourself. + """ + + def __init__(self, max_iter: Optional[int] = None, window_size: int = 20): + """ + Args: + max_iter: the maximum number of iterations to train. + Used to compute ETA. If not given, ETA will not be printed. + window_size (int): the losses will be median-smoothed by this window size + """ + self.logger = logging.getLogger(__name__) + self._max_iter = max_iter + self._window_size = window_size + self._last_write = None # (step, time) of last call to write(). Used to compute ETA + + def _get_eta(self, storage) -> Optional[str]: + if self._max_iter is None: + return "" + iteration = storage.iter + try: + eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1) + storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) + return str(datetime.timedelta(seconds=int(eta_seconds))) + except KeyError: + # estimate eta on our own - more noisy + eta_string = None + if self._last_write is not None: + estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( + iteration - self._last_write[0] + ) + eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + self._last_write = (iteration, time.perf_counter()) + return eta_string + + def write(self): + storage = get_event_storage() + iteration = storage.iter + if iteration == self._max_iter: + # This hook only reports training progress (loss, ETA, etc) but not other data, + # therefore do not write anything after training succeeds, even if this method + # is called. + return + + try: + avg_data_time = storage.history("data_time").avg( + storage.count_samples("data_time", self._window_size) + ) + last_data_time = storage.history("data_time").latest() + except KeyError: + # they may not exist in the first few iterations (due to warmup) + # or when SimpleTrainer is not used + avg_data_time = None + last_data_time = None + try: + avg_iter_time = storage.history("time").global_avg() + last_iter_time = storage.history("time").latest() + except KeyError: + avg_iter_time = None + last_iter_time = None + try: + lr = "{:.5g}".format(storage.history("lr").latest()) + except KeyError: + lr = "N/A" + + eta_string = self._get_eta(storage) + + if torch.cuda.is_available(): + max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 + else: + max_mem_mb = None + + # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" + self.logger.info( + str.format( + " {eta}iter: {iter} {losses} {non_losses} {avg_time}{last_time}" + + "{avg_data_time}{last_data_time} lr: {lr} {memory}", + eta=f"eta: {eta_string} " if eta_string else "", + iter=iteration, + losses=" ".join( + [ + "{}: {:.4g}".format( + k, v.median(storage.count_samples(k, self._window_size)) + ) + for k, v in storage.histories().items() + if "loss" in k + ] + ), + non_losses=" ".join( + [ + "{}: {:.4g}".format( + k, v.median(storage.count_samples(k, self._window_size)) + ) + for k, v in storage.histories().items() + if "[metric]" in k + ] + ), + avg_time="time: {:.4f} ".format(avg_iter_time) + if avg_iter_time is not None + else "", + last_time="last_time: {:.4f} ".format(last_iter_time) + if last_iter_time is not None + else "", + avg_data_time="data_time: {:.4f} ".format(avg_data_time) + if avg_data_time is not None + else "", + last_data_time="last_data_time: {:.4f} ".format(last_data_time) + if last_data_time is not None + else "", + lr=lr, + memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", + ) + ) + + +class EventStorage: + """ + The user-facing class that provides metric storage functionalities. + + In the future we may add support for storing / logging other types of data if needed. + """ + + def __init__(self, start_iter=0): + """ + Args: + start_iter (int): the iteration number to start with + """ + self._history = defaultdict(HistoryBuffer) + self._smoothing_hints = {} + self._latest_scalars = {} + self._iter = start_iter + self._current_prefix = "" + self._vis_data = [] + self._histograms = [] + + def put_image(self, img_name, img_tensor): + """ + Add an `img_tensor` associated with `img_name`, to be shown on + tensorboard. + + Args: + img_name (str): The name of the image to put into tensorboard. + img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` + Tensor of shape `[channel, height, width]` where `channel` is + 3. The image format should be RGB. The elements in img_tensor + can either have values in [0, 1] (float32) or [0, 255] (uint8). + The `img_tensor` will be visualized in tensorboard. + """ + self._vis_data.append((img_name, img_tensor, self._iter)) + + def put_scalar(self, name, value, smoothing_hint=True): + """ + Add a scalar `value` to the `HistoryBuffer` associated with `name`. + + Args: + smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be + smoothed when logged. The hint will be accessible through + :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint + and apply custom smoothing rule. + + It defaults to True because most scalars we save need to be smoothed to + provide any useful signal. + """ + name = self._current_prefix + name + history = self._history[name] + value = float(value) + history.update(value, self._iter) + self._latest_scalars[name] = (value, self._iter) + + existing_hint = self._smoothing_hints.get(name) + if existing_hint is not None: + assert ( + existing_hint == smoothing_hint + ), "Scalar {} was put with a different smoothing_hint!".format(name) + else: + self._smoothing_hints[name] = smoothing_hint + + def put_scalars(self, *, smoothing_hint=True, **kwargs): + """ + Put multiple scalars from keyword arguments. + + Examples: + + storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) + """ + for k, v in kwargs.items(): + self.put_scalar(k, v, smoothing_hint=smoothing_hint) + + def put_histogram(self, hist_name, hist_tensor, bins=1000): + """ + Create a histogram from a tensor. + + Args: + hist_name (str): The name of the histogram to put into tensorboard. + hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted + into a histogram. + bins (int): Number of histogram bins. + """ + ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() + + # Create a histogram with PyTorch + hist_counts = torch.histc(hist_tensor, bins=bins) + hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) + + # Parameter for the add_histogram_raw function of SummaryWriter + hist_params = dict( + tag=hist_name, + min=ht_min, + max=ht_max, + num=len(hist_tensor), + sum=float(hist_tensor.sum()), + sum_squares=float(torch.sum(hist_tensor**2)), + bucket_limits=hist_edges[1:].tolist(), + bucket_counts=hist_counts.tolist(), + global_step=self._iter, + ) + self._histograms.append(hist_params) + + def history(self, name): + """ + Returns: + HistoryBuffer: the scalar history for name + """ + ret = self._history.get(name, None) + if ret is None: + raise KeyError("No history metric available for {}!".format(name)) + return ret + + def histories(self): + """ + Returns: + dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars + """ + return self._history + + def latest(self): + """ + Returns: + dict[str -> (float, int)]: mapping from the name of each scalar to the most + recent value and the iteration number its added. + """ + return self._latest_scalars + + def latest_with_smoothing_hint(self, window_size=20): + """ + Similar to :meth:`latest`, but the returned values + are either the un-smoothed original latest value, + or a median of the given window_size, + depend on whether the smoothing_hint is True. + + This provides a default behavior that other writers can use. + + Note: All scalars saved in the past `window_size` iterations are used for smoothing. + This is different from the `window_size` definition in HistoryBuffer. + Use :meth:`get_history_window_size` to get the `window_size` used in HistoryBuffer. + """ + result = {} + for k, (v, itr) in self._latest_scalars.items(): + result[k] = ( + self._history[k].median(self.count_samples(k, window_size)) + if self._smoothing_hints[k] + else v, + itr, + ) + return result + + def count_samples(self, name, window_size=20): + """ + Return the number of samples logged in the past `window_size` iterations. + """ + samples = 0 + data = self._history[name].values() + for _, iter_ in reversed(data): + if iter_ > data[-1][1] - window_size: + samples += 1 + else: + break + return samples + + def smoothing_hints(self): + """ + Returns: + dict[name -> bool]: the user-provided hint on whether the scalar + is noisy and needs smoothing. + """ + return self._smoothing_hints + + def step(self): + """ + User should either: (1) Call this function to increment storage.iter when needed. Or + (2) Set `storage.iter` to the correct iteration number before each iteration. + + The storage will then be able to associate the new data with an iteration number. + """ + self._iter += 1 + + @property + def iter(self): + """ + Returns: + int: The current iteration number. When used together with a trainer, + this is ensured to be the same as trainer.iter. + """ + return self._iter + + @iter.setter + def iter(self, val): + self._iter = int(val) + + @property + def iteration(self): + # for backward compatibility + return self._iter + + def __enter__(self): + _CURRENT_STORAGE_STACK.append(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert _CURRENT_STORAGE_STACK[-1] == self + _CURRENT_STORAGE_STACK.pop() + + @contextmanager + def name_scope(self, name): + """ + Yields: + A context within which all the events added to this storage + will be prefixed by the name scope. + """ + old_prefix = self._current_prefix + self._current_prefix = name.rstrip("/") + "/" + yield + self._current_prefix = old_prefix + + def clear_images(self): + """ + Delete all the stored images for visualization. This should be called + after images are written to tensorboard. + """ + self._vis_data = [] + + def clear_histograms(self): + """ + Delete all the stored histograms for visualization. + This should be called after histograms are written to tensorboard. + """ + self._histograms = [] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/file_io.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/file_io.py new file mode 100644 index 0000000000000000000000000000000000000000..09f7dffdb36199350bba57bd3b4e9e8babb40594 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/file_io.py @@ -0,0 +1,39 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler +from iopath.common.file_io import PathManager as PathManagerBase + +__all__ = ["PathManager", "PathHandler"] + + +PathManager = PathManagerBase() +""" +This is a detectron2 project-specific PathManager. +We try to stay away from global PathManager in fvcore as it +introduces potential conflicts among other libraries. +""" + + +class Detectron2Handler(PathHandler): + """ + Resolve anything that's hosted under detectron2's namespace. + """ + + PREFIX = "detectron2://" + S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path, **kwargs): + name = path[len(self.PREFIX) :] + return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs) + + def _open(self, path, mode="r", **kwargs): + return PathManager.open( + self.S3_DETECTRON2_PREFIX + path[len(self.PREFIX) :], mode, **kwargs + ) + + +PathManager.register_handler(HTTPURLHandler()) +PathManager.register_handler(OneDrivePathHandler()) +PathManager.register_handler(Detectron2Handler()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/logger.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..1bce0439512808c3cb31147edcc664485ae9e8fe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/logger.py @@ -0,0 +1,237 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import atexit +import functools +import logging +import os +import sys +import time +from collections import Counter +import torch +from tabulate import tabulate +from termcolor import colored + +from custom_detectron2.utils.file_io import PathManager + +__all__ = ["setup_logger", "log_first_n", "log_every_n", "log_every_n_seconds"] + + +class _ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + self._root_name = kwargs.pop("root_name") + "." + self._abbrev_name = kwargs.pop("abbrev_name", "") + if len(self._abbrev_name): + self._abbrev_name = self._abbrev_name + "." + super(_ColorfulFormatter, self).__init__(*args, **kwargs) + + def formatMessage(self, record): + record.name = record.name.replace(self._root_name, self._abbrev_name) + log = super(_ColorfulFormatter, self).formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers +def setup_logger( + output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None +): + """ + Initialize the detectron2 logger and set its verbosity level to "DEBUG". + + Args: + output (str): a file name or a directory to save log. If None, will not save log file. + If ends with ".txt" or ".log", assumed to be a file name. + Otherwise, logs will be saved to `output/log.txt`. + name (str): the root module name of this logger + abbrev_name (str): an abbreviation of the module, to avoid long names in logs. + Set to "" to not log the root module in logs. + By default, will abbreviate "detectron2" to "d2" and leave other + modules unchanged. + + Returns: + logging.Logger: a logger + """ + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + + if abbrev_name is None: + abbrev_name = "d2" if name == "detectron2" else name + + plain_formatter = logging.Formatter( + "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" + ) + # stdout logging: master only + if distributed_rank == 0: + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + if color: + formatter = _ColorfulFormatter( + colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", + datefmt="%m/%d %H:%M:%S", + root_name=name, + abbrev_name=str(abbrev_name), + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + + # file logging: all workers + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "log.txt") + if distributed_rank > 0: + filename = filename + ".rank{}".format(distributed_rank) + PathManager.mkdirs(os.path.dirname(filename)) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + return logger + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + # use 1K buffer if writing to cloud storage + io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1) + atexit.register(io.close) + return io + + +""" +Below are some other convenient logging methods. +They are mainly adopted from +https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py +""" + + +def _find_caller(): + """ + Returns: + str: module name of the caller + tuple: a hashable key to be used to identify different callers + """ + frame = sys._getframe(2) + while frame: + code = frame.f_code + if os.path.join("utils", "logger.") not in code.co_filename: + mod_name = frame.f_globals["__name__"] + if mod_name == "__main__": + mod_name = "detectron2" + return mod_name, (code.co_filename, frame.f_lineno, code.co_name) + frame = frame.f_back + + +_LOG_COUNTER = Counter() +_LOG_TIMER = {} + + +def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): + """ + Log only for the first n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + key (str or tuple[str]): the string(s) can be one of "caller" or + "message", which defines how to identify duplicated logs. + For example, if called with `n=1, key="caller"`, this function + will only log the first call from the same caller, regardless of + the message content. + If called with `n=1, key="message"`, this function will log the + same content only once, even if they are called from different places. + If called with `n=1, key=("caller", "message")`, this function + will not log only if the same caller has logged the same message before. + """ + if isinstance(key, str): + key = (key,) + assert len(key) > 0 + + caller_module, caller_key = _find_caller() + hash_key = () + if "caller" in key: + hash_key = hash_key + caller_key + if "message" in key: + hash_key = hash_key + (msg,) + + _LOG_COUNTER[hash_key] += 1 + if _LOG_COUNTER[hash_key] <= n: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n(lvl, msg, n=1, *, name=None): + """ + Log once per n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + _LOG_COUNTER[key] += 1 + if n == 1 or _LOG_COUNTER[key] % n == 1: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n_seconds(lvl, msg, n=1, *, name=None): + """ + Log no more than once per n seconds. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + last_logged = _LOG_TIMER.get(key, None) + current_time = time.time() + if last_logged is None or current_time - last_logged >= n: + logging.getLogger(name or caller_module).log(lvl, msg) + _LOG_TIMER[key] = current_time + + +def create_small_table(small_dict): + """ + Create a small table using the keys of small_dict as headers. This is only + suitable for small dictionaries. + + Args: + small_dict (dict): a result dictionary of only a few items. + + Returns: + str: the table as a string. + """ + keys, values = tuple(zip(*small_dict.items())) + table = tabulate( + [values], + headers=keys, + tablefmt="pipe", + floatfmt=".3f", + stralign="center", + numalign="center", + ) + return table + + +def _log_api_usage(identifier: str): + """ + Internal function used to log the usage of different detectron2 components + inside facebook's infra. + """ + torch._C._log_api_usage_once("detectron2." + identifier) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/memory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..bd494780b9dbbd1571688cd270bb9b53d113c13e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/memory.py @@ -0,0 +1,84 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from contextlib import contextmanager +from functools import wraps +import torch + +__all__ = ["retry_if_cuda_oom"] + + +@contextmanager +def _ignore_torch_cuda_oom(): + """ + A context which ignores CUDA OOM exception from pytorch. + """ + try: + yield + except RuntimeError as e: + # NOTE: the string may change? + if "CUDA out of memory. " in str(e): + pass + else: + raise + + +def retry_if_cuda_oom(func): + """ + Makes a function retry itself after encountering + pytorch's CUDA OOM error. + It will first retry after calling `torch.cuda.empty_cache()`. + + If that still fails, it will then retry by trying to convert inputs to CPUs. + In this case, it expects the function to dispatch to CPU implementation. + The return values may become CPU tensors as well and it's user's + responsibility to convert it back to CUDA tensor if needed. + + Args: + func: a stateless callable that takes tensor-like objects as arguments + + Returns: + a callable which retries `func` if OOM is encountered. + + Examples: + :: + output = retry_if_cuda_oom(some_torch_function)(input1, input2) + # output may be on CPU even if inputs are on GPU + + Note: + 1. When converting inputs to CPU, it will only look at each argument and check + if it has `.device` and `.to` for conversion. Nested structures of tensors + are not supported. + + 2. Since the function might be called more than once, it has to be + stateless. + """ + + def maybe_to_cpu(x): + try: + like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") + except AttributeError: + like_gpu_tensor = False + if like_gpu_tensor: + return x.to(device="cpu") + else: + return x + + @wraps(func) + def wrapped(*args, **kwargs): + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Clear cache and retry + torch.cuda.empty_cache() + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Try on CPU. This slows down the code significantly, therefore print a notice. + logger = logging.getLogger(__name__) + logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) + new_args = (maybe_to_cpu(x) for x in args) + new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} + return func(*new_args, **new_kwargs) + + return wrapped diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/registry.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..4b01e9007c2578a7b5ae555c926cc06c8a3010f9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/registry.py @@ -0,0 +1,60 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from typing import Any +import pydoc +from fvcore.common.registry import Registry # for backward compatibility. + +""" +``Registry`` and `locate` provide ways to map a string (typically found +in config files) to callable objects. +""" + +__all__ = ["Registry", "locate"] + + +def _convert_target_to_string(t: Any) -> str: + """ + Inverse of ``locate()``. + + Args: + t: any object with ``__module__`` and ``__qualname__`` + """ + module, qualname = t.__module__, t.__qualname__ + + # Compress the path to this object, e.g. ``module.submodule._impl.class`` + # may become ``module.submodule.class``, if the later also resolves to the same + # object. This simplifies the string, and also is less affected by moving the + # class implementation. + module_parts = module.split(".") + for k in range(1, len(module_parts)): + prefix = ".".join(module_parts[:k]) + candidate = f"{prefix}.{qualname}" + try: + if locate(candidate) is t: + return candidate + except ImportError: + pass + return f"{module}.{qualname}" + + +def locate(name: str) -> Any: + """ + Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, + such as "module.submodule.class_name". + + Raise Exception if it cannot be found. + """ + obj = pydoc.locate(name) + + # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly + # by pydoc.locate. Try a private function from hydra. + if obj is None: + try: + # from hydra.utils import get_method - will print many errors + from hydra.utils import _locate + except ImportError as e: + raise ImportError(f"Cannot dynamically locate object {name}!") from e + else: + obj = _locate(name) # it raises if fails + + return obj diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/serialize.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/serialize.py new file mode 100644 index 0000000000000000000000000000000000000000..ed45065184f0512ef65c8f38d398de553ce576ca --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/serialize.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# import cloudpickle + + +class PicklableWrapper(object): + """ + Wrap an object to make it more picklable, note that it uses + heavy weight serialization libraries that are slower than pickle. + It's best to use it only on closures (which are usually not picklable). + + This is a simplified version of + https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py + """ + + def __init__(self, obj): + while isinstance(obj, PicklableWrapper): + # Wrapping an object twice is no-op + obj = obj._obj + self._obj = obj + + # def __reduce__(self): + # s = cloudpickle.dumps(self._obj) + # return cloudpickle.loads, (s,) + + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seamlessly as the previous object. + if attr not in ["_obj"]: + return getattr(self._obj, attr) + return getattr(self, attr) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/testing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..b8f31836f6d702228e1661457c5647f1c908cf21 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/testing.py @@ -0,0 +1,478 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import io +import numpy as np +import os +import re +import tempfile +import unittest +from typing import Callable +import torch +import torch.onnx.symbolic_helper as sym_help +from packaging import version +from torch._C import ListType +from torch.onnx import register_custom_op_symbolic + +from custom_detectron2 import model_zoo +from custom_detectron2.config import CfgNode, LazyConfig, instantiate +from custom_detectron2.data import DatasetCatalog +from custom_detectron2.data.detection_utils import read_image +from custom_detectron2.modeling import build_model +from custom_detectron2.structures import Boxes, Instances, ROIMasks +from custom_detectron2.utils.file_io import PathManager + + +""" +Internal utilities for tests. Don't use except for writing tests. +""" + + +def get_model_no_weights(config_path): + """ + Like model_zoo.get, but do not load any weights (even pretrained) + """ + cfg = model_zoo.get_config(config_path) + if isinstance(cfg, CfgNode): + if not torch.cuda.is_available(): + cfg.MODEL.DEVICE = "cpu" + return build_model(cfg) + else: + return instantiate(cfg.model) + + +def random_boxes(num_boxes, max_coord=100, device="cpu"): + """ + Create a random Nx4 boxes tensor, with coordinates < max_coord. + """ + boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5) + boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression + # Note: the implementation of this function in torchvision is: + # boxes[:, 2:] += torch.rand(N, 2) * 100 + # but it does not guarantee non-negative widths/heights constraints: + # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: + boxes[:, 2:] += boxes[:, :2] + return boxes + + +def get_sample_coco_image(tensor=True): + """ + Args: + tensor (bool): if True, returns 3xHxW tensor. + else, returns a HxWx3 numpy array. + + Returns: + an image, in BGR color. + """ + try: + file_name = DatasetCatalog.get("coco_2017_val_100")[0]["file_name"] + if not PathManager.exists(file_name): + raise FileNotFoundError() + except IOError: + # for public CI to run + file_name = PathManager.get_local_path( + "http://images.cocodataset.org/train2017/000000000009.jpg" + ) + ret = read_image(file_name, format="BGR") + if tensor: + ret = torch.from_numpy(np.ascontiguousarray(ret.transpose(2, 0, 1))) + return ret + + +def convert_scripted_instances(instances): + """ + Convert a scripted Instances object to a regular :class:`Instances` object + """ + assert hasattr( + instances, "image_size" + ), f"Expect an Instances object, but got {type(instances)}!" + ret = Instances(instances.image_size) + for name in instances._field_names: + val = getattr(instances, "_" + name, None) + if val is not None: + ret.set(name, val) + return ret + + +def assert_instances_allclose(input, other, *, rtol=1e-5, msg="", size_as_tensor=False): + """ + Args: + input, other (Instances): + size_as_tensor: compare image_size of the Instances as tensors (instead of tuples). + Useful for comparing outputs of tracing. + """ + if not isinstance(input, Instances): + input = convert_scripted_instances(input) + if not isinstance(other, Instances): + other = convert_scripted_instances(other) + + if not msg: + msg = "Two Instances are different! " + else: + msg = msg.rstrip() + " " + + size_error_msg = msg + f"image_size is {input.image_size} vs. {other.image_size}!" + if size_as_tensor: + assert torch.equal( + torch.tensor(input.image_size), torch.tensor(other.image_size) + ), size_error_msg + else: + assert input.image_size == other.image_size, size_error_msg + fields = sorted(input.get_fields().keys()) + fields_other = sorted(other.get_fields().keys()) + assert fields == fields_other, msg + f"Fields are {fields} vs {fields_other}!" + + for f in fields: + val1, val2 = input.get(f), other.get(f) + if isinstance(val1, (Boxes, ROIMasks)): + # boxes in the range of O(100) and can have a larger tolerance + assert torch.allclose(val1.tensor, val2.tensor, atol=100 * rtol), ( + msg + f"Field {f} differs too much!" + ) + elif isinstance(val1, torch.Tensor): + if val1.dtype.is_floating_point: + mag = torch.abs(val1).max().cpu().item() + assert torch.allclose(val1, val2, atol=mag * rtol), ( + msg + f"Field {f} differs too much!" + ) + else: + assert torch.equal(val1, val2), msg + f"Field {f} is different!" + else: + raise ValueError(f"Don't know how to compare type {type(val1)}") + + +def reload_script_model(module): + """ + Save a jit module and load it back. + Similar to the `getExportImportCopy` function in torch/testing/ + """ + buffer = io.BytesIO() + torch.jit.save(module, buffer) + buffer.seek(0) + return torch.jit.load(buffer) + + +def reload_lazy_config(cfg): + """ + Save an object by LazyConfig.save and load it back. + This is used to test that a config still works the same after + serialization/deserialization. + """ + with tempfile.TemporaryDirectory(prefix="detectron2") as d: + fname = os.path.join(d, "d2_cfg_test.yaml") + LazyConfig.save(cfg, fname) + return LazyConfig.load(fname) + + +def min_torch_version(min_version: str) -> bool: + """ + Returns True when torch's version is at least `min_version`. + """ + try: + import torch + except ImportError: + return False + + installed_version = version.parse(torch.__version__.split("+")[0]) + min_version = version.parse(min_version) + return installed_version >= min_version + + +def has_dynamic_axes(onnx_model): + """ + Return True when all ONNX input/output have only dynamic axes for all ranks + """ + return all( + not dim.dim_param.isnumeric() + for inp in onnx_model.graph.input + for dim in inp.type.tensor_type.shape.dim + ) and all( + not dim.dim_param.isnumeric() + for out in onnx_model.graph.output + for dim in out.type.tensor_type.shape.dim + ) + + +def register_custom_op_onnx_export( + opname: str, symbolic_fn: Callable, opset_version: int, min_version: str +) -> None: + """ + Register `symbolic_fn` as PyTorch's symbolic `opname`-`opset_version` for ONNX export. + The registration is performed only when current PyTorch's version is < `min_version.` + IMPORTANT: symbolic must be manually unregistered after the caller function returns + """ + if min_torch_version(min_version): + return + register_custom_op_symbolic(opname, symbolic_fn, opset_version) + print(f"_register_custom_op_onnx_export({opname}, {opset_version}) succeeded.") + + +def unregister_custom_op_onnx_export(opname: str, opset_version: int, min_version: str) -> None: + """ + Unregister PyTorch's symbolic `opname`-`opset_version` for ONNX export. + The un-registration is performed only when PyTorch's version is < `min_version` + IMPORTANT: The symbolic must have been manually registered by the caller, otherwise + the incorrect symbolic may be unregistered instead. + """ + + # TODO: _unregister_custom_op_symbolic is introduced PyTorch>=1.10 + # Remove after PyTorch 1.10+ is used by ALL detectron2's CI + try: + from torch.onnx import unregister_custom_op_symbolic as _unregister_custom_op_symbolic + except ImportError: + + def _unregister_custom_op_symbolic(symbolic_name, opset_version): + import torch.onnx.symbolic_registry as sym_registry + from torch.onnx.symbolic_helper import _onnx_main_opset, _onnx_stable_opsets + + def _get_ns_op_name_from_custom_op(symbolic_name): + try: + from torch.onnx.utils import get_ns_op_name_from_custom_op + + ns, op_name = get_ns_op_name_from_custom_op(symbolic_name) + except ImportError as import_error: + if not bool( + re.match(r"^[a-zA-Z0-9-_]*::[a-zA-Z-_]+[a-zA-Z0-9-_]*$", symbolic_name) + ): + raise ValueError( + f"Invalid symbolic name {symbolic_name}. Must be `domain::name`" + ) from import_error + + ns, op_name = symbolic_name.split("::") + if ns == "onnx": + raise ValueError(f"{ns} domain cannot be modified.") from import_error + + if ns == "aten": + ns = "" + + return ns, op_name + + def _unregister_op(opname: str, domain: str, version: int): + try: + sym_registry.unregister_op(op_name, ns, ver) + except AttributeError as attribute_error: + if sym_registry.is_registered_op(opname, domain, version): + del sym_registry._registry[(domain, version)][opname] + if not sym_registry._registry[(domain, version)]: + del sym_registry._registry[(domain, version)] + else: + raise RuntimeError( + f"The opname {opname} is not registered." + ) from attribute_error + + ns, op_name = _get_ns_op_name_from_custom_op(symbolic_name) + for ver in _onnx_stable_opsets + [_onnx_main_opset]: + if ver >= opset_version: + _unregister_op(op_name, ns, ver) + + if min_torch_version(min_version): + return + _unregister_custom_op_symbolic(opname, opset_version) + print(f"_unregister_custom_op_onnx_export({opname}, {opset_version}) succeeded.") + + +skipIfOnCPUCI = unittest.skipIf( + os.environ.get("CI") and not torch.cuda.is_available(), + "The test is too slow on CPUs and will be executed on CircleCI's GPU jobs.", +) + + +def skipIfUnsupportedMinOpsetVersion(min_opset_version, current_opset_version=None): + """ + Skips tests for ONNX Opset versions older than min_opset_version. + """ + + def skip_dec(func): + def wrapper(self): + try: + opset_version = self.opset_version + except AttributeError: + opset_version = current_opset_version + if opset_version < min_opset_version: + raise unittest.SkipTest( + f"Unsupported opset_version {opset_version}" + f", required is {min_opset_version}" + ) + return func(self) + + return wrapper + + return skip_dec + + +def skipIfUnsupportedMinTorchVersion(min_version): + """ + Skips tests for PyTorch versions older than min_version. + """ + reason = f"module 'torch' has __version__ {torch.__version__}" f", required is: {min_version}" + return unittest.skipIf(not min_torch_version(min_version), reason) + + +# TODO: Remove after PyTorch 1.11.1+ is used by detectron2's CI +def _pytorch1111_symbolic_opset9_to(g, self, *args): + """aten::to() symbolic that must be used for testing with PyTorch < 1.11.1.""" + + def is_aten_to_device_only(args): + if len(args) == 4: + # aten::to(Tensor, Device, bool, bool, memory_format) + return ( + args[0].node().kind() == "prim::device" + or args[0].type().isSubtypeOf(ListType.ofInts()) + or ( + sym_help._is_value(args[0]) + and args[0].node().kind() == "onnx::Constant" + and isinstance(args[0].node()["value"], str) + ) + ) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + # When dtype is None, this is a aten::to(device) call + dtype = sym_help._get_const(args[1], "i", "dtype") + return dtype is None + elif len(args) in (6, 7): + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) + # When dtype is None, this is a aten::to(device) call + dtype = sym_help._get_const(args[0], "i", "dtype") + return dtype is None + return False + + # ONNX doesn't have a concept of a device, so we ignore device-only casts + if is_aten_to_device_only(args): + return self + + if len(args) == 4: + # TestONNXRuntime::test_ones_bool shows args[0] of aten::to can be onnx::Constant[Tensor] + # In this case, the constant value is a tensor not int, + # so sym_help._maybe_get_const(args[0], 'i') would not work. + dtype = args[0] + if sym_help._is_value(args[0]) and args[0].node().kind() == "onnx::Constant": + tval = args[0].node()["value"] + if isinstance(tval, torch.Tensor): + if len(tval.shape) == 0: + tval = tval.item() + dtype = int(tval) + else: + dtype = tval + + if sym_help._is_value(dtype) or isinstance(dtype, torch.Tensor): + # aten::to(Tensor, Tensor, bool, bool, memory_format) + dtype = args[0].type().scalarType() + return g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx[dtype]) + else: + # aten::to(Tensor, ScalarType, bool, bool, memory_format) + # memory_format is ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + elif len(args) == 5: + # aten::to(Tensor, Device, ScalarType, bool, bool, memory_format) + dtype = sym_help._get_const(args[1], "i", "dtype") + # memory_format is ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + elif len(args) == 6: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) + dtype = sym_help._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + elif len(args) == 7: + # aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) + dtype = sym_help._get_const(args[0], "i", "dtype") + # Layout, device and memory_format are ignored + return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype]) + else: + return sym_help._onnx_unsupported("Unknown aten::to signature") + + +# TODO: Remove after PyTorch 1.11.1+ is used by detectron2's CI +def _pytorch1111_symbolic_opset9_repeat_interleave(g, self, repeats, dim=None, output_size=None): + + # from torch.onnx.symbolic_helper import ScalarType + from torch.onnx.symbolic_opset9 import expand, unsqueeze + + input = self + # if dim is None flatten + # By default, use the flattened input array, and return a flat output array + if sym_help._is_none(dim): + input = sym_help._reshape_helper(g, self, g.op("Constant", value_t=torch.tensor([-1]))) + dim = 0 + else: + dim = sym_help._maybe_get_scalar(dim) + + repeats_dim = sym_help._get_tensor_rank(repeats) + repeats_sizes = sym_help._get_tensor_sizes(repeats) + input_sizes = sym_help._get_tensor_sizes(input) + if repeats_dim is None: + raise RuntimeError( + "Unsupported: ONNX export of repeat_interleave for unknown " "repeats rank." + ) + if repeats_sizes is None: + raise RuntimeError( + "Unsupported: ONNX export of repeat_interleave for unknown " "repeats size." + ) + if input_sizes is None: + raise RuntimeError( + "Unsupported: ONNX export of repeat_interleave for unknown " "input size." + ) + + input_sizes_temp = input_sizes.copy() + for idx, input_size in enumerate(input_sizes): + if input_size is None: + input_sizes[idx], input_sizes_temp[idx] = 0, -1 + + # Cases where repeats is an int or single value tensor + if repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1): + if not sym_help._is_tensor(repeats): + repeats = g.op("Constant", value_t=torch.LongTensor(repeats)) + if input_sizes[dim] == 0: + return sym_help._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + ) + else: + reps = input_sizes[dim] + repeats = expand(g, repeats, g.op("Constant", value_t=torch.tensor([reps])), None) + + # Cases where repeats is a 1 dim Tensor + elif repeats_dim == 1: + if input_sizes[dim] == 0: + return sym_help._onnx_opset_unsupported_detailed( + "repeat_interleave", + 9, + 13, + "Unsupported along dimension with unknown input size", + ) + if repeats_sizes[0] is None: + return sym_help._onnx_opset_unsupported_detailed( + "repeat_interleave", 9, 13, "Unsupported for cases with dynamic repeats" + ) + assert ( + repeats_sizes[0] == input_sizes[dim] + ), "repeats must have the same size as input along dim" + reps = repeats_sizes[0] + else: + raise RuntimeError("repeats must be 0-dim or 1-dim tensor") + + final_splits = list() + r_splits = sym_help._repeat_interleave_split_helper(g, repeats, reps, 0) + if isinstance(r_splits, torch._C.Value): + r_splits = [r_splits] + i_splits = sym_help._repeat_interleave_split_helper(g, input, reps, dim) + if isinstance(i_splits, torch._C.Value): + i_splits = [i_splits] + input_sizes[dim], input_sizes_temp[dim] = -1, 1 + for idx, r_split in enumerate(r_splits): + i_split = unsqueeze(g, i_splits[idx], dim + 1) + r_concat = [ + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[: dim + 1])), + r_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[dim + 1 :])), + ] + r_concat = g.op("Concat", *r_concat, axis_i=0) + i_split = expand(g, i_split, r_concat, None) + i_split = sym_help._reshape_helper( + g, + i_split, + g.op("Constant", value_t=torch.LongTensor(input_sizes)), + allowzero=0, + ) + final_splits.append(i_split) + return g.op("Concat", *final_splits, axis_i=dim) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/tracing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..3ffc44d23cb6aa43e940bf8562a9130f2a4f27a8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/tracing.py @@ -0,0 +1,71 @@ +import inspect +import torch + +from custom_detectron2.utils.env import TORCH_VERSION + +try: + from torch.fx._symbolic_trace import is_fx_tracing as is_fx_tracing_current + + tracing_current_exists = True +except ImportError: + tracing_current_exists = False + +try: + from torch.fx._symbolic_trace import _orig_module_call + + tracing_legacy_exists = True +except ImportError: + tracing_legacy_exists = False + + +@torch.jit.ignore +def is_fx_tracing_legacy() -> bool: + """ + Returns a bool indicating whether torch.fx is currently symbolically tracing a module. + Can be useful for gating module logic that is incompatible with symbolic tracing. + """ + return torch.nn.Module.__call__ is not _orig_module_call + + +@torch.jit.ignore +def is_fx_tracing() -> bool: + """Returns whether execution is currently in + Torch FX tracing mode""" + if TORCH_VERSION >= (1, 10) and tracing_current_exists: + return is_fx_tracing_current() + elif tracing_legacy_exists: + return is_fx_tracing_legacy() + else: + # Can't find either current or legacy tracing indication code. + # Enabling this assert_fx_safe() call regardless of tracing status. + return False + + +@torch.jit.ignore +def assert_fx_safe(condition: bool, message: str) -> torch.Tensor: + """An FX-tracing safe version of assert. + Avoids erroneous type assertion triggering when types are masked inside + an fx.proxy.Proxy object during tracing. + Args: condition - either a boolean expression or a string representing + the condition to test. If this assert triggers an exception when tracing + due to dynamic control flow, try encasing the expression in quotation + marks and supplying it as a string.""" + # Must return a concrete tensor for compatibility with PyTorch <=1.8. + # If <=1.8 compatibility is not needed, return type can be converted to None + if not is_fx_tracing(): + try: + if isinstance(condition, str): + caller_frame = inspect.currentframe().f_back + torch._assert( + eval(condition, caller_frame.f_globals, caller_frame.f_locals), message + ) + return torch.ones(1) + else: + torch._assert(condition, message) + return torch.ones(1) + except torch.fx.proxy.TraceError as e: + print( + "Found a non-FX compatible assertion. Skipping the check. Failure is shown below" + + str(e) + ) + return torch.zeros(1) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/video_visualizer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/video_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..a22836d17eb5895d66fd5d8f68cc109d132ea9fe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/video_visualizer.py @@ -0,0 +1,287 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import List +import custom_pycocotools.mask as mask_util + +from custom_detectron2.structures import Instances +from custom_detectron2.utils.visualizer import ( + ColorMode, + Visualizer, + _create_text_labels, + _PanopticPrediction, +) + +from .colormap import random_color, random_colors + + +class _DetectedInstance: + """ + Used to store data about detected objects in video frame, + in order to transfer color to objects in the future frames. + + Attributes: + label (int): + bbox (tuple[float]): + mask_rle (dict): + color (tuple[float]): RGB colors in range (0, 1) + ttl (int): time-to-live for the instance. For example, if ttl=2, + the instance color can be transferred to objects in the next two frames. + """ + + __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] + + def __init__(self, label, bbox, mask_rle, color, ttl): + self.label = label + self.bbox = bbox + self.mask_rle = mask_rle + self.color = color + self.ttl = ttl + + +class VideoVisualizer: + def __init__(self, metadata, instance_mode=ColorMode.IMAGE): + """ + Args: + metadata (MetadataCatalog): image metadata. + """ + self.metadata = metadata + self._old_instances = [] + assert instance_mode in [ + ColorMode.IMAGE, + ColorMode.IMAGE_BW, + ], "Other mode not supported yet." + self._instance_mode = instance_mode + self._max_num_instances = self.metadata.get("max_num_instances", 74) + self._assigned_colors = {} + self._color_pool = random_colors(self._max_num_instances, rgb=True, maximum=1) + self._color_idx_set = set(range(len(self._color_pool))) + + def draw_instance_predictions(self, frame, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255]. + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + frame_visualizer = Visualizer(frame, self.metadata) + num_instances = len(predictions) + if num_instances == 0: + return frame_visualizer.output + + boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + colors = predictions.COLOR if predictions.has("COLOR") else [None] * len(predictions) + periods = predictions.ID_period if predictions.has("ID_period") else None + period_threshold = self.metadata.get("period_threshold", 0) + visibilities = ( + [True] * len(predictions) + if periods is None + else [x > period_threshold for x in periods] + ) + + if predictions.has("pred_masks"): + masks = predictions.pred_masks + # mask IOU is not yet enabled + # masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F")) + # assert len(masks_rles) == num_instances + else: + masks = None + + if not predictions.has("COLOR"): + if predictions.has("ID"): + colors = self._assign_colors_by_id(predictions) + else: + # ToDo: clean old assign color method and use a default tracker to assign id + detected = [ + _DetectedInstance(classes[i], boxes[i], mask_rle=None, color=colors[i], ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + + if self._instance_mode == ColorMode.IMAGE_BW: + # any() returns uint8 tensor + frame_visualizer.output.reset_image( + frame_visualizer._create_grayscale_image( + (masks.any(dim=0) > 0).numpy() if masks is not None else None + ) + ) + alpha = 0.3 + else: + alpha = 0.5 + + labels = ( + None + if labels is None + else [y[0] for y in filter(lambda x: x[1], zip(labels, visibilities))] + ) # noqa + assigned_colors = ( + None + if colors is None + else [y[0] for y in filter(lambda x: x[1], zip(colors, visibilities))] + ) # noqa + frame_visualizer.overlay_instances( + boxes=None if masks is not None else boxes[visibilities], # boxes are a bit distracting + masks=None if masks is None else masks[visibilities], + labels=labels, + keypoints=None if keypoints is None else keypoints[visibilities], + assigned_colors=assigned_colors, + alpha=alpha, + ) + + return frame_visualizer.output + + def draw_sem_seg(self, frame, sem_seg, area_threshold=None): + """ + Args: + sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W), + each value is the integer label. + area_threshold (Optional[int]): only draw segmentations larger than the threshold + """ + # don't need to do anything special + frame_visualizer = Visualizer(frame, self.metadata) + frame_visualizer.draw_sem_seg(sem_seg, area_threshold=None) + return frame_visualizer.output + + def draw_panoptic_seg_predictions( + self, frame, panoptic_seg, segments_info, area_threshold=None, alpha=0.5 + ): + frame_visualizer = Visualizer(frame, self.metadata) + pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) + + if self._instance_mode == ColorMode.IMAGE_BW: + frame_visualizer.output.reset_image( + frame_visualizer._create_grayscale_image(pred.non_empty_mask()) + ) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + frame_visualizer.draw_binary_mask( + mask, + color=mask_color, + text=self.metadata.stuff_classes[category_idx], + alpha=alpha, + area_threshold=area_threshold, + ) + + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return frame_visualizer.output + # draw mask for all instances second + masks, sinfo = list(zip(*all_instances)) + num_instances = len(masks) + masks_rles = mask_util.encode( + np.asarray(np.asarray(masks).transpose(1, 2, 0), dtype=np.uint8, order="F") + ) + assert len(masks_rles) == num_instances + + category_ids = [x["category_id"] for x in sinfo] + detected = [ + _DetectedInstance(category_ids[i], bbox=None, mask_rle=masks_rles[i], color=None, ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + labels = [self.metadata.thing_classes[k] for k in category_ids] + + frame_visualizer.overlay_instances( + boxes=None, + masks=masks, + labels=labels, + keypoints=None, + assigned_colors=colors, + alpha=alpha, + ) + return frame_visualizer.output + + def _assign_colors(self, instances): + """ + Naive tracking heuristics to assign same color to the same instance, + will update the internal state of tracked instances. + + Returns: + list[tuple[float]]: list of colors. + """ + + # Compute iou with either boxes or masks: + is_crowd = np.zeros((len(instances),), dtype=bool) + if instances[0].bbox is None: + assert instances[0].mask_rle is not None + # use mask iou only when box iou is None + # because box seems good enough + rles_old = [x.mask_rle for x in self._old_instances] + rles_new = [x.mask_rle for x in instances] + ious = mask_util.iou(rles_old, rles_new, is_crowd) + threshold = 0.5 + else: + boxes_old = [x.bbox for x in self._old_instances] + boxes_new = [x.bbox for x in instances] + ious = mask_util.iou(boxes_old, boxes_new, is_crowd) + threshold = 0.6 + if len(ious) == 0: + ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32") + + # Only allow matching instances of the same label: + for old_idx, old in enumerate(self._old_instances): + for new_idx, new in enumerate(instances): + if old.label != new.label: + ious[old_idx, new_idx] = 0 + + matched_new_per_old = np.asarray(ious).argmax(axis=1) + max_iou_per_old = np.asarray(ious).max(axis=1) + + # Try to find match for each old instance: + extra_instances = [] + for idx, inst in enumerate(self._old_instances): + if max_iou_per_old[idx] > threshold: + newidx = matched_new_per_old[idx] + if instances[newidx].color is None: + instances[newidx].color = inst.color + continue + # If an old instance does not match any new instances, + # keep it for the next frame in case it is just missed by the detector + inst.ttl -= 1 + if inst.ttl > 0: + extra_instances.append(inst) + + # Assign random color to newly-detected instances: + for inst in instances: + if inst.color is None: + inst.color = random_color(rgb=True, maximum=1) + self._old_instances = instances[:] + extra_instances + return [d.color for d in instances] + + def _assign_colors_by_id(self, instances: Instances) -> List: + colors = [] + untracked_ids = set(self._assigned_colors.keys()) + for id in instances.ID: + if id in self._assigned_colors: + colors.append(self._color_pool[self._assigned_colors[id]]) + untracked_ids.remove(id) + else: + assert ( + len(self._color_idx_set) >= 1 + ), f"Number of id exceeded maximum, \ + max = {self._max_num_instances}" + idx = self._color_idx_set.pop() + color = self._color_pool[idx] + self._assigned_colors[id] = idx + colors.append(color) + for id in untracked_ids: + self._color_idx_set.add(self._assigned_colors[id]) + del self._assigned_colors[id] + return colors diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/visualizer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a6fd59fb67867a9a420f03fc42d36779f989ed --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_detectron2/utils/visualizer.py @@ -0,0 +1,1267 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import colorsys +import logging +import math +import numpy as np +from enum import Enum, unique +import cv2 +import matplotlib as mpl +import matplotlib.colors as mplc +import matplotlib.figure as mplfigure +import custom_pycocotools.mask as mask_util +import torch +from matplotlib.backends.backend_agg import FigureCanvasAgg +from PIL import Image + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes +from custom_detectron2.utils.file_io import PathManager + +from .colormap import random_color + +logger = logging.getLogger(__name__) + +__all__ = ["ColorMode", "VisImage", "Visualizer"] + + +_SMALL_OBJECT_AREA_THRESH = 1000 +_LARGE_MASK_AREA_THRESH = 120000 +_OFF_WHITE = (1.0, 1.0, 240.0 / 255) +_BLACK = (0, 0, 0) +_RED = (1.0, 0, 0) + +_KEYPOINT_THRESHOLD = 0.05 + + +@unique +class ColorMode(Enum): + """ + Enum of different color modes to use for instance visualizations. + """ + + IMAGE = 0 + """ + Picks a random color for every instance and overlay segmentations with low opacity. + """ + SEGMENTATION = 1 + """ + Let instances of the same category have similar colors + (from metadata.thing_colors), and overlay them with + high opacity. This provides more attention on the quality of segmentation. + """ + IMAGE_BW = 2 + """ + Same as IMAGE, but convert all areas without masks to gray-scale. + Only available for drawing per-instance mask predictions. + """ + + +class GenericMask: + """ + Attribute: + polygons (list[ndarray]): list[ndarray]: polygons for this mask. + Each ndarray has format [x, y, x, y, ...] + mask (ndarray): a binary mask + """ + + def __init__(self, mask_or_polygons, height, width): + self._mask = self._polygons = self._has_holes = None + self.height = height + self.width = width + + m = mask_or_polygons + if isinstance(m, dict): + # RLEs + assert "counts" in m and "size" in m + if isinstance(m["counts"], list): # uncompressed RLEs + h, w = m["size"] + assert h == height and w == width + m = mask_util.frPyObjects(m, h, w) + self._mask = mask_util.decode(m)[:, :] + return + + if isinstance(m, list): # list[ndarray] + self._polygons = [np.asarray(x).reshape(-1) for x in m] + return + + if isinstance(m, np.ndarray): # assumed to be a binary mask + assert m.shape[1] != 2, m.shape + assert m.shape == ( + height, + width, + ), f"mask shape: {m.shape}, target dims: {height}, {width}" + self._mask = m.astype("uint8") + return + + raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) + + @property + def mask(self): + if self._mask is None: + self._mask = self.polygons_to_mask(self._polygons) + return self._mask + + @property + def polygons(self): + if self._polygons is None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + return self._polygons + + @property + def has_holes(self): + if self._has_holes is None: + if self._mask is not None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + else: + self._has_holes = False # if original format is polygon, does not have holes + return self._has_holes + + def mask_to_polygons(self, mask): + # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level + # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. + # Internal contours (holes) are placed in hierarchy-2. + # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. + mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr + res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + hierarchy = res[-1] + if hierarchy is None: # empty mask + return [], False + has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 + res = res[-2] + res = [x.flatten() for x in res] + # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. + # We add 0.5 to turn them into real-value coordinate space. A better solution + # would be to first +0.5 and then dilate the returned polygon by 0.5. + res = [x + 0.5 for x in res if len(x) >= 6] + return res, has_holes + + def polygons_to_mask(self, polygons): + rle = mask_util.frPyObjects(polygons, self.height, self.width) + rle = mask_util.merge(rle) + return mask_util.decode(rle)[:, :] + + def area(self): + return self.mask.sum() + + def bbox(self): + p = mask_util.frPyObjects(self.polygons, self.height, self.width) + p = mask_util.merge(p) + bbox = mask_util.toBbox(p) + bbox[2] += bbox[0] + bbox[3] += bbox[1] + return bbox + + +class _PanopticPrediction: + """ + Unify different panoptic annotation/prediction formats + """ + + def __init__(self, panoptic_seg, segments_info, metadata=None): + if segments_info is None: + assert metadata is not None + # If "segments_info" is None, we assume "panoptic_img" is a + # H*W int32 image storing the panoptic_id in the format of + # category_id * label_divisor + instance_id. We reserve -1 for + # VOID label. + label_divisor = metadata.label_divisor + segments_info = [] + for panoptic_label in np.unique(panoptic_seg.numpy()): + if panoptic_label == -1: + # VOID region. + continue + pred_class = panoptic_label // label_divisor + isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() + segments_info.append( + { + "id": int(panoptic_label), + "category_id": int(pred_class), + "isthing": bool(isthing), + } + ) + del metadata + + self._seg = panoptic_seg + + self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info + segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) + areas = areas.numpy() + sorted_idxs = np.argsort(-areas) + self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] + self._seg_ids = self._seg_ids.tolist() + for sid, area in zip(self._seg_ids, self._seg_areas): + if sid in self._sinfo: + self._sinfo[sid]["area"] = float(area) + + def non_empty_mask(self): + """ + Returns: + (H, W) array, a mask for all pixels that have a prediction + """ + empty_ids = [] + for id in self._seg_ids: + if id not in self._sinfo: + empty_ids.append(id) + if len(empty_ids) == 0: + return np.zeros(self._seg.shape, dtype=np.uint8) + assert ( + len(empty_ids) == 1 + ), ">1 ids corresponds to no labels. This is currently not supported" + return (self._seg != empty_ids[0]).numpy().astype(bool) + + def semantic_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or sinfo["isthing"]: + # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. + continue + yield (self._seg == sid).numpy().astype(bool), sinfo + + def instance_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or not sinfo["isthing"]: + continue + mask = (self._seg == sid).numpy().astype(bool) + if mask.sum() > 0: + yield mask, sinfo + + +def _create_text_labels(classes, scores, class_names, is_crowd=None): + """ + Args: + classes (list[int] or None): + scores (list[float] or None): + class_names (list[str] or None): + is_crowd (list[bool] or None): + + Returns: + list[str] or None + """ + labels = None + if classes is not None: + if class_names is not None and len(class_names) > 0: + labels = [class_names[i] for i in classes] + else: + labels = [str(i) for i in classes] + if scores is not None: + if labels is None: + labels = ["{:.0f}%".format(s * 100) for s in scores] + else: + labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] + if labels is not None and is_crowd is not None: + labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] + return labels + + +class VisImage: + def __init__(self, img, scale=1.0): + """ + Args: + img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. + scale (float): scale the input image + """ + self.img = img + self.scale = scale + self.width, self.height = img.shape[1], img.shape[0] + self._setup_figure(img) + + def _setup_figure(self, img): + """ + Args: + Same as in :meth:`__init__()`. + + Returns: + fig (matplotlib.pyplot.figure): top level container for all the image plot elements. + ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. + """ + fig = mplfigure.Figure(frameon=False) + self.dpi = fig.get_dpi() + # add a small 1e-2 to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches( + (self.width * self.scale + 1e-2) / self.dpi, + (self.height * self.scale + 1e-2) / self.dpi, + ) + self.canvas = FigureCanvasAgg(fig) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) + ax.axis("off") + self.fig = fig + self.ax = ax + self.reset_image(img) + + def reset_image(self, img): + """ + Args: + img: same as in __init__ + """ + img = img.astype("uint8") + self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") + + def save(self, filepath): + """ + Args: + filepath (str): a string that contains the absolute path, including the file name, where + the visualized image will be saved. + """ + self.fig.savefig(filepath) + + def get_image(self): + """ + Returns: + ndarray: + the visualized image of shape (H, W, 3) (RGB) in uint8 type. + The shape is scaled w.r.t the input image using the given `scale` argument. + """ + canvas = self.canvas + s, (width, height) = canvas.print_to_buffer() + # buf = io.BytesIO() # works for cairo backend + # canvas.print_rgba(buf) + # width, height = self.width, self.height + # s = buf.getvalue() + + buffer = np.frombuffer(s, dtype="uint8") + + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + return rgb.astype("uint8") + + +class Visualizer: + """ + Visualizer that draws data about detection/segmentation on images. + + It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` + that draw primitive objects to images, as well as high-level wrappers like + `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` + that draw composite data in some pre-defined style. + + Note that the exact visualization style for the high-level wrappers are subject to change. + Style such as color, opacity, label contents, visibility of labels, or even the visibility + of objects themselves (e.g. when the object is too small) may change according + to different heuristics, as long as the results still look visually reasonable. + + To obtain a consistent style, you can implement custom drawing functions with the + abovementioned primitive methods instead. If you need more customized visualization + styles, you can process the data yourself following their format documented in + tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not + intend to satisfy everyone's preference on drawing styles. + + This visualizer focuses on high rendering quality rather than performance. It is not + designed to be used for real-time applications. + """ + + # TODO implement a fast, rasterized version using OpenCV + + def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): + """ + Args: + img_rgb: a numpy array of shape (H, W, C), where H and W correspond to + the height and width of the image respectively. C is the number of + color channels. The image is required to be in RGB format since that + is a requirement of the Matplotlib library. The image is also expected + to be in the range [0, 255]. + metadata (Metadata): dataset metadata (e.g. class names and colors) + instance_mode (ColorMode): defines one of the pre-defined style for drawing + instances on an image. + """ + self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) + if metadata is None: + metadata = MetadataCatalog.get("__nonexist__") + self.metadata = metadata + self.output = VisImage(self.img, scale=scale) + self.cpu_device = torch.device("cpu") + + # too small texts are useless, therefore clamp to 9 + self._default_font_size = max( + np.sqrt(self.output.height * self.output.width) // 90, 10 // scale + ) + self._instance_mode = instance_mode + self.keypoint_threshold = _KEYPOINT_THRESHOLD + + def draw_instance_predictions(self, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = np.asarray(predictions.pred_masks) + masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] + else: + masks = None + + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes + ] + alpha = 0.8 + else: + colors = None + alpha = 0.5 + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image( + self._create_grayscale_image( + (predictions.pred_masks.any(dim=0) > 0).numpy() + if predictions.has("pred_masks") + else None + ) + ) + alpha = 0.3 + + self.overlay_instances( + masks=masks, + boxes=boxes, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + return self.output + + def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): + """ + Draw semantic segmentation predictions/labels. + + Args: + sem_seg (Tensor or ndarray): the segmentation of shape (H, W). + Each value is the integer label of the pixel. + area_threshold (int): segments with less than `area_threshold` are not drawn. + alpha (float): the larger it is, the more opaque the segmentations are. + + Returns: + output (VisImage): image object with visualizations. + """ + if isinstance(sem_seg, torch.Tensor): + sem_seg = sem_seg.numpy() + labels, areas = np.unique(sem_seg, return_counts=True) + sorted_idxs = np.argsort(-areas).tolist() + labels = labels[sorted_idxs] + for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] + except (AttributeError, IndexError): + mask_color = None + + binary_mask = (sem_seg == label).astype(np.uint8) + text = self.metadata.stuff_classes[label] + self.draw_binary_mask( + binary_mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + return self.output + + def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): + """ + Draw panoptic prediction annotations or results. + + Args: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each + segment. + segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. + If it is a ``list[dict]``, each dict contains keys "id", "category_id". + If None, category id of each pixel is computed by + ``pixel // metadata.label_divisor``. + area_threshold (int): stuff segments with less than `area_threshold` are not drawn. + + Returns: + output (VisImage): image object with visualizations. + """ + pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + text = self.metadata.stuff_classes[category_idx] + self.draw_binary_mask( + mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + + # draw mask for all instances second + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return self.output + masks, sinfo = list(zip(*all_instances)) + category_ids = [x["category_id"] for x in sinfo] + + try: + scores = [x["score"] for x in sinfo] + except KeyError: + scores = None + labels = _create_text_labels( + category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] + ) + + try: + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids + ] + except AttributeError: + colors = None + self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) + + return self.output + + draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility + + def draw_dataset_dict(self, dic): + """ + Draw annotations/segmentations in Detectron2 Dataset format. + + Args: + dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. + + Returns: + output (VisImage): image object with visualizations. + """ + annos = dic.get("annotations", None) + if annos: + if "segmentation" in annos[0]: + masks = [x["segmentation"] for x in annos] + else: + masks = None + if "keypoints" in annos[0]: + keypts = [x["keypoints"] for x in annos] + keypts = np.array(keypts).reshape(len(annos), -1, 3) + else: + keypts = None + + boxes = [ + BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) + if len(x["bbox"]) == 4 + else x["bbox"] + for x in annos + ] + + colors = None + category_ids = [x["category_id"] for x in annos] + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) + for c in category_ids + ] + names = self.metadata.get("thing_classes", None) + labels = _create_text_labels( + category_ids, + scores=None, + class_names=names, + is_crowd=[x.get("iscrowd", 0) for x in annos], + ) + self.overlay_instances( + labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors + ) + + sem_seg = dic.get("sem_seg", None) + if sem_seg is None and "sem_seg_file_name" in dic: + with PathManager.open(dic["sem_seg_file_name"], "rb") as f: + sem_seg = Image.open(f) + sem_seg = np.asarray(sem_seg, dtype="uint8") + if sem_seg is not None: + self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) + + pan_seg = dic.get("pan_seg", None) + if pan_seg is None and "pan_seg_file_name" in dic: + with PathManager.open(dic["pan_seg_file_name"], "rb") as f: + pan_seg = Image.open(f) + pan_seg = np.asarray(pan_seg) + from panopticapi.utils import rgb2id + + pan_seg = rgb2id(pan_seg) + if pan_seg is not None: + segments_info = dic["segments_info"] + pan_seg = torch.tensor(pan_seg) + self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) + return self.output + + def overlay_instances( + self, + *, + boxes=None, + labels=None, + masks=None, + keypoints=None, + assigned_colors=None, + alpha=0.5, + ): + """ + Args: + boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, + or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, + or a :class:`RotatedBoxes`, + or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image, + labels (list[str]): the text to be displayed for each instance. + masks (masks-like object): Supported types are: + + * :class:`detectron2.structures.PolygonMasks`, + :class:`detectron2.structures.BitMasks`. + * list[list[ndarray]]: contains the segmentation masks for all objects in one image. + The first level of the list corresponds to individual instances. The second + level to all the polygon that compose the instance, and the third level + to the polygon coordinates. The third level should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + * list[ndarray]: each ndarray is a binary mask of shape (H, W). + * list[dict]: each dict is a COCO-style RLE. + keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), + where the N is the number of instances and K is the number of keypoints. + The last dimension corresponds to (x, y, visibility or score). + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = 0 + if boxes is not None: + boxes = self._convert_boxes(boxes) + num_instances = len(boxes) + if masks is not None: + masks = self._convert_masks(masks) + if num_instances: + assert len(masks) == num_instances + else: + num_instances = len(masks) + if keypoints is not None: + if num_instances: + assert len(keypoints) == num_instances + else: + num_instances = len(keypoints) + keypoints = self._convert_keypoints(keypoints) + if labels is not None: + assert len(labels) == num_instances + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + if boxes is not None and boxes.shape[1] == 5: + return self.overlay_rotated_instances( + boxes=boxes, labels=labels, assigned_colors=assigned_colors + ) + + # Display in largest to smallest order to reduce occlusion. + areas = None + if boxes is not None: + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + elif masks is not None: + areas = np.asarray([x.area() for x in masks]) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + keypoints = keypoints[sorted_idxs] if keypoints is not None else None + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if masks is not None: + for segment in masks[i].polygons: + self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + elif masks is not None: + # skip small mask without polygon + if len(masks[i].polygons) == 0: + continue + + x0, y0, x1, y1 = masks[i].bbox() + + # draw text in the center (defined by median) when box is not drawn + # median is less sensitive to outliers. + text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] + horiz_align = "center" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occlusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + # draw keypoints + if keypoints is not None: + for keypoints_per_instance in keypoints: + self.draw_and_connect_keypoints(keypoints_per_instance) + + return self.output + + def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): + """ + Args: + boxes (ndarray): an Nx5 numpy array of + (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image. + labels (list[str]): the text to be displayed for each instance. + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = len(boxes) + + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + if boxes is not None: + areas = boxes[:, 2] * boxes[:, 3] + + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + colors = [assigned_colors[idx] for idx in sorted_idxs] + + for i in range(num_instances): + self.draw_rotated_box_with_label( + boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None + ) + + return self.output + + def draw_and_connect_keypoints(self, keypoints): + """ + Draws keypoints of an instance and follows the rules for keypoint connections + to draw lines between appropriate keypoints. This follows color heuristics for + line color. + + Args: + keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints + and the last dimension corresponds to (x, y, probability). + + Returns: + output (VisImage): image object with visualizations. + """ + visible = {} + keypoint_names = self.metadata.get("keypoint_names") + for idx, keypoint in enumerate(keypoints): + + # draw keypoint + x, y, prob = keypoint + if prob > self.keypoint_threshold: + self.draw_circle((x, y), color=_RED) + if keypoint_names: + keypoint_name = keypoint_names[idx] + visible[keypoint_name] = (x, y) + + if self.metadata.get("keypoint_connection_rules"): + for kp0, kp1, color in self.metadata.keypoint_connection_rules: + if kp0 in visible and kp1 in visible: + x0, y0 = visible[kp0] + x1, y1 = visible[kp1] + color = tuple(x / 255.0 for x in color) + self.draw_line([x0, x1], [y0, y1], color=color) + + # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip + # Note that this strategy is specific to person keypoints. + # For other keypoints, it should just do nothing + try: + ls_x, ls_y = visible["left_shoulder"] + rs_x, rs_y = visible["right_shoulder"] + mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 + except KeyError: + pass + else: + # draw line from nose to mid-shoulder + nose_x, nose_y = visible.get("nose", (None, None)) + if nose_x is not None: + self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) + + try: + # draw line from mid-shoulder to mid-hip + lh_x, lh_y = visible["left_hip"] + rh_x, rh_y = visible["right_hip"] + except KeyError: + pass + else: + mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 + self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) + return self.output + + """ + Primitive drawing functions: + """ + + def draw_text( + self, + text, + position, + *, + font_size=None, + color="g", + horizontal_alignment="center", + rotation=0, + ): + """ + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + rotation: rotation angle in degrees CCW + + Returns: + output (VisImage): image object with text drawn. + """ + if not font_size: + font_size = self._default_font_size + + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.2) + color[np.argmax(color)] = max(0.8, np.max(color)) + + x, y = position + self.output.ax.text( + x, + y, + text, + size=font_size * self.output.scale, + family="sans-serif", + bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + rotation=rotation, + ) + return self.output + + def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): + """ + Args: + box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 + are the coordinates of the image's top left corner. x1 and y1 are the + coordinates of the image's bottom right corner. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + + Returns: + output (VisImage): image object with box drawn. + """ + x0, y0, x1, y1 = box_coord + width = x1 - x0 + height = y1 - y0 + + linewidth = max(self._default_font_size / 4, 1) + + self.output.ax.add_patch( + mpl.patches.Rectangle( + (x0, y0), + width, + height, + fill=False, + edgecolor=edge_color, + linewidth=linewidth * self.output.scale, + alpha=alpha, + linestyle=line_style, + ) + ) + return self.output + + def draw_rotated_box_with_label( + self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None + ): + """ + Draw a rotated box with label on its top-left corner. + + Args: + rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), + where cnt_x and cnt_y are the center coordinates of the box. + w and h are the width and height of the box. angle represents how + many degrees the box is rotated CCW with regard to the 0-degree box. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + label (string): label for rotated box. It will not be rendered when set to None. + + Returns: + output (VisImage): image object with box drawn. + """ + cnt_x, cnt_y, w, h, angle = rotated_box + area = w * h + # use thinner lines when the box is small + linewidth = self._default_font_size / ( + 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 + ) + + theta = angle * math.pi / 180.0 + c = math.cos(theta) + s = math.sin(theta) + rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] + # x: left->right ; y: top->down + rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] + for k in range(4): + j = (k + 1) % 4 + self.draw_line( + [rotated_rect[k][0], rotated_rect[j][0]], + [rotated_rect[k][1], rotated_rect[j][1]], + color=edge_color, + linestyle="--" if k == 1 else line_style, + linewidth=linewidth, + ) + + if label is not None: + text_pos = rotated_rect[1] # topleft corner + + height_ratio = h / np.sqrt(self.output.height * self.output.width) + label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size + ) + self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) + + return self.output + + def draw_circle(self, circle_coord, color, radius=3): + """ + Args: + circle_coord (list(int) or tuple(int)): contains the x and y coordinates + of the center of the circle. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + radius (int): radius of the circle. + + Returns: + output (VisImage): image object with box drawn. + """ + x, y = circle_coord + self.output.ax.add_patch( + mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) + ) + return self.output + + def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): + """ + Args: + x_data (list[int]): a list containing x values of all the points being drawn. + Length of list should match the length of y_data. + y_data (list[int]): a list containing y values of all the points being drawn. + Length of list should match the length of x_data. + color: color of the line. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + linestyle: style of the line. Refer to `matplotlib.lines.Line2D` + for a full list of formats that are accepted. + linewidth (float or None): width of the line. When it's None, + a default value will be computed and used. + + Returns: + output (VisImage): image object with line drawn. + """ + if linewidth is None: + linewidth = self._default_font_size / 3 + linewidth = max(linewidth, 1) + self.output.ax.add_line( + mpl.lines.Line2D( + x_data, + y_data, + linewidth=linewidth * self.output.scale, + color=color, + linestyle=linestyle, + ) + ) + return self.output + + def draw_binary_mask( + self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component smaller than this area will not be shown. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + has_valid_segment = False + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + if area < (area_threshold or 0): + continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + # TODO: Use Path/PathPatch to draw vector graphics: + # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None and has_valid_segment: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): + """ + Args: + soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + shape2d = (soft_mask.shape[0], soft_mask.shape[1]) + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = soft_mask * alpha + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + binary_mask = (soft_mask > 0.5).astype("uint8") + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): + """ + Args: + segment: numpy array of shape Nx2, containing all the points in the polygon. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. If not provided, a darker shade + of the polygon color will be used instead. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with polygon drawn. + """ + if edge_color is None: + # make edge color darker than the polygon color + if alpha > 0.8: + edge_color = self._change_color_brightness(color, brightness_factor=-0.7) + else: + edge_color = color + edge_color = mplc.to_rgb(edge_color) + (1,) + + polygon = mpl.patches.Polygon( + segment, + fill=True, + facecolor=mplc.to_rgb(color) + (alpha,), + edgecolor=edge_color, + linewidth=max(self._default_font_size // 15 * self.output.scale, 1), + ) + self.output.ax.add_patch(polygon) + return self.output + + """ + Internal methods: + """ + + def _jitter(self, color): + """ + Randomly modifies given color to produce a slightly different color than the color given. + + Args: + color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color + picked. The values in the list are in the [0.0, 1.0] range. + + Returns: + jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the + color after being jittered. The values in the list are in the [0.0, 1.0] range. + """ + color = mplc.to_rgb(color) + vec = np.random.rand(3) + # better to do it in another color space + vec = vec / np.linalg.norm(vec) * 0.5 + res = np.clip(vec + color, 0, 1) + return tuple(res) + + def _create_grayscale_image(self, mask=None): + """ + Create a grayscale version of the original image. + The colors in masked area, if given, will be kept. + """ + img_bw = self.img.astype("f4").mean(axis=2) + img_bw = np.stack([img_bw] * 3, axis=2) + if mask is not None: + img_bw[mask] = self.img[mask] + return img_bw + + def _change_color_brightness(self, color, brightness_factor): + """ + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a lighter color. + + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return tuple(np.clip(modified_color, 0.0, 1.0)) + + def _convert_boxes(self, boxes): + """ + Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. + """ + if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): + return boxes.tensor.detach().numpy() + else: + return np.asarray(boxes) + + def _convert_masks(self, masks_or_polygons): + """ + Convert different format of masks or polygons to a tuple of masks and polygons. + + Returns: + list[GenericMask]: + """ + + m = masks_or_polygons + if isinstance(m, PolygonMasks): + m = m.polygons + if isinstance(m, BitMasks): + m = m.tensor.numpy() + if isinstance(m, torch.Tensor): + m = m.numpy() + ret = [] + for x in m: + if isinstance(x, GenericMask): + ret.append(x) + else: + ret.append(GenericMask(x, self.output.height, self.output.width)) + return ret + + def _draw_text_in_mask(self, binary_mask, text, color): + """ + Find proper places to draw text given a binary mask. + """ + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + if stats[1:, -1].size == 0: + return + largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # draw text on the largest component, as well as other very large components. + for cid in range(1, _num_cc): + if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # median is more stable than centroid + # center = centroids[largest_component_id] + center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + self.draw_text(text, center, color=color) + + def _convert_keypoints(self, keypoints): + if isinstance(keypoints, Keypoints): + keypoints = keypoints.tensor + keypoints = np.asarray(keypoints) + return keypoints + + def get_output(self): + """ + Returns: + output (VisImage): the image output containing the visualizations added + to the image. + """ + return self.output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/LICENSE b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..277b5c11be103f028a8d10985139f1da10c2f08e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/README.md b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9568ea71c755b6938ee5482ba9f09be722e75943 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/README.md @@ -0,0 +1,259 @@ +## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer + +This repository contains code to compute depth from a single image. It accompanies our [paper](https://arxiv.org/abs/1907.01341v3): + +>Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer +René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, Vladlen Koltun + + +and our [preprint](https://arxiv.org/abs/2103.13413): + +> Vision Transformers for Dense Prediction +> René Ranftl, Alexey Bochkovskiy, Vladlen Koltun + + +MiDaS was trained on up to 12 datasets (ReDWeb, DIML, Movies, MegaDepth, WSVD, TartanAir, HRWSI, ApolloScape, BlendedMVS, IRS, KITTI, NYU Depth V2) with +multi-objective optimization. +The original model that was trained on 5 datasets (`MIX 5` in the paper) can be found [here](https://github.com/isl-org/MiDaS/releases/tag/v2). +The figure below shows an overview of the different MiDaS models; the bubble size scales with number of parameters. + +![](figures/Improvement_vs_FPS.png) + +### Setup + +1) Pick one or more models and download the corresponding weights to the `weights` folder: + +MiDaS 3.1 +- For highest quality: [dpt_beit_large_512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) +- For moderately less quality, but better speed-performance trade-off: [dpt_swin2_large_384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt) +- For embedded devices: [dpt_swin2_tiny_256](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt), [dpt_levit_224](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt) +- For inference on Intel CPUs, OpenVINO may be used for the small legacy model: openvino_midas_v21_small [.xml](https://github.com/isl-org/MiDaS/releases/download/v3_1/openvino_midas_v21_small_256.xml), [.bin](https://github.com/isl-org/MiDaS/releases/download/v3_1/openvino_midas_v21_small_256.bin) + +MiDaS 3.0: Legacy transformer models [dpt_large_384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt) and [dpt_hybrid_384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt) + +MiDaS 2.1: Legacy convolutional models [midas_v21_384](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt) and [midas_v21_small_256](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt) + +1) Set up dependencies: + + ```shell + conda env create -f environment.yaml + conda activate midas-py310 + ``` + +#### optional + +For the Next-ViT model, execute + +```shell +git submodule add https://github.com/isl-org/Next-ViT midas/external/next_vit +``` + +For the OpenVINO model, install + +```shell +pip install openvino +``` + +### Usage + +1) Place one or more input images in the folder `input`. + +2) Run the model with + + ```shell + python run.py --model_type --input_path input --output_path output + ``` + where `````` is chosen from [dpt_beit_large_512](#model_type), [dpt_beit_large_384](#model_type), + [dpt_beit_base_384](#model_type), [dpt_swin2_large_384](#model_type), [dpt_swin2_base_384](#model_type), + [dpt_swin2_tiny_256](#model_type), [dpt_swin_large_384](#model_type), [dpt_next_vit_large_384](#model_type), + [dpt_levit_224](#model_type), [dpt_large_384](#model_type), [dpt_hybrid_384](#model_type), + [midas_v21_384](#model_type), [midas_v21_small_256](#model_type), [openvino_midas_v21_small_256](#model_type). + +3) The resulting depth maps are written to the `output` folder. + +#### optional + +1) By default, the inference resizes the height of input images to the size of a model to fit into the encoder. This + size is given by the numbers in the model names of the [accuracy table](#accuracy). Some models do not only support a single + inference height but a range of different heights. Feel free to explore different heights by appending the extra + command line argument `--height`. Unsupported height values will throw an error. Note that using this argument may + decrease the model accuracy. +2) By default, the inference keeps the aspect ratio of input images when feeding them into the encoder if this is + supported by a model (all models except for Swin, Swin2, LeViT). In order to resize to a square resolution, + disregarding the aspect ratio while preserving the height, use the command line argument `--square`. + +#### via Camera + + If you want the input images to be grabbed from the camera and shown in a window, leave the input and output paths + away and choose a model type as shown above: + + ```shell + python run.py --model_type --side + ``` + + The argument `--side` is optional and causes both the input RGB image and the output depth map to be shown + side-by-side for comparison. + +#### via Docker + +1) Make sure you have installed Docker and the + [NVIDIA Docker runtime](https://github.com/NVIDIA/nvidia-docker/wiki/Installation-\(Native-GPU-Support\)). + +2) Build the Docker image: + + ```shell + docker build -t midas . + ``` + +3) Run inference: + + ```shell + docker run --rm --gpus all -v $PWD/input:/opt/MiDaS/input -v $PWD/output:/opt/MiDaS/output -v $PWD/weights:/opt/MiDaS/weights midas + ``` + + This command passes through all of your NVIDIA GPUs to the container, mounts the + `input` and `output` directories and then runs the inference. + +#### via PyTorch Hub + +The pretrained model is also available on [PyTorch Hub](https://pytorch.org/hub/intelisl_midas_v2/) + +#### via TensorFlow or ONNX + +See [README](https://github.com/isl-org/MiDaS/tree/master/tf) in the `tf` subdirectory. + +Currently only supports MiDaS v2.1. + + +#### via Mobile (iOS / Android) + +See [README](https://github.com/isl-org/MiDaS/tree/master/mobile) in the `mobile` subdirectory. + +#### via ROS1 (Robot Operating System) + +See [README](https://github.com/isl-org/MiDaS/tree/master/ros) in the `ros` subdirectory. + +Currently only supports MiDaS v2.1. DPT-based models to be added. + + +### Accuracy + +We provide a **zero-shot error** $\epsilon_d$ which is evaluated for 6 different datasets +(see [paper](https://arxiv.org/abs/1907.01341v3)). **Lower error values are better**. +$\color{green}{\textsf{Overall model quality is represented by the improvement}}$ ([Imp.](#improvement)) with respect to +MiDaS 3.0 DPTL-384. The models are grouped by the height used for inference, whereas the square training resolution is given by +the numbers in the model names. The table also shows the **number of parameters** (in millions) and the +**frames per second** for inference at the training resolution (for GPU RTX 3090): + +| MiDaS Model | DIW
WHDR | Eth3d
AbsRel | Sintel
AbsRel | TUM
δ1 | KITTI
δ1 | NYUv2
δ1 | $\color{green}{\textsf{Imp.}}$
% | Par.
M | FPS
  | +|-----------------------------------------------------------------------------------------------------------------------|-------------------------:|-----------------------------:|------------------------------:|-------------------------:|-------------------------:|-------------------------:|-------------------------------------------------:|----------------------:|--------------------------:| +| **Inference height 512** | | | | | | | | | | +| [v3.1 BEiTL-512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) | 0.1137 | 0.0659 | 0.2366 | **6.13** | 11.56* | **1.86*** | $\color{green}{\textsf{19}}$ | **345** | **5.7** | +| [v3.1 BEiTL-512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt)$\tiny{\square}$ | **0.1121** | **0.0614** | **0.2090** | 6.46 | **5.00*** | 1.90* | $\color{green}{\textsf{34}}$ | **345** | **5.7** | +| | | | | | | | | | | +| **Inference height 384** | | | | | | | | | | +| [v3.1 BEiTL-512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) | 0.1245 | 0.0681 | **0.2176** | **6.13** | 6.28* | **2.16*** | $\color{green}{\textsf{28}}$ | 345 | 12 | +| [v3.1 Swin2L-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt)$\tiny{\square}$ | 0.1106 | 0.0732 | 0.2442 | 8.87 | **5.84*** | 2.92* | $\color{green}{\textsf{22}}$ | 213 | 41 | +| [v3.1 Swin2B-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt)$\tiny{\square}$ | 0.1095 | 0.0790 | 0.2404 | 8.93 | 5.97* | 3.28* | $\color{green}{\textsf{22}}$ | 102 | 39 | +| [v3.1 SwinL-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin_large_384.pt)$\tiny{\square}$ | 0.1126 | 0.0853 | 0.2428 | 8.74 | 6.60* | 3.34* | $\color{green}{\textsf{17}}$ | 213 | 49 | +| [v3.1 BEiTL-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt) | 0.1239 | **0.0667** | 0.2545 | 7.17 | 9.84* | 2.21* | $\color{green}{\textsf{17}}$ | 344 | 13 | +| [v3.1 Next-ViTL-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_next_vit_large_384.pt) | **0.1031** | 0.0954 | 0.2295 | 9.21 | 6.89* | 3.47* | $\color{green}{\textsf{16}}$ | **72** | 30 | +| [v3.1 BEiTB-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt) | 0.1159 | 0.0967 | 0.2901 | 9.88 | 26.60* | 3.91* | $\color{green}{\textsf{-31}}$ | 112 | 31 | +| [v3.0 DPTL-384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt) | 0.1082 | 0.0888 | 0.2697 | 9.97 | 8.46 | 8.32 | $\color{green}{\textsf{0}}$ | 344 | **61** | +| [v3.0 DPTH-384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt) | 0.1106 | 0.0934 | 0.2741 | 10.89 | 11.56 | 8.69 | $\color{green}{\textsf{-10}}$ | 123 | 50 | +| [v2.1 Large384](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt) | 0.1295 | 0.1155 | 0.3285 | 12.51 | 16.08 | 8.71 | $\color{green}{\textsf{-32}}$ | 105 | 47 | +| | | | | | | | | | | +| **Inference height 256** | | | | | | | | | | +| [v3.1 Swin2T-256](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt)$\tiny{\square}$ | **0.1211** | **0.1106** | **0.2868** | **13.43** | **10.13*** | **5.55*** | $\color{green}{\textsf{-11}}$ | 42 | 64 | +| [v2.1 Small256](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt) | 0.1344 | 0.1344 | 0.3370 | 14.53 | 29.27 | 13.43 | $\color{green}{\textsf{-76}}$ | **21** | **90** | +| | | | | | | | | | | +| **Inference height 224** | | | | | | | | | | +| [v3.1 LeViT224](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt)$\tiny{\square}$ | **0.1314** | **0.1206** | **0.3148** | **18.21** | **15.27*** | **8.64*** | $\color{green}{\textsf{-40}}$ | **51** | **73** | + +* No zero-shot error, because models are also trained on KITTI and NYU Depth V2\ +$\square$ Validation performed at **square resolution**, either because the transformer encoder backbone of a model +does not support non-square resolutions (Swin, Swin2, LeViT) or for comparison with these models. All other +validations keep the aspect ratio. A difference in resolution limits the comparability of the zero-shot error and the +improvement, because these quantities are averages over the pixels of an image and do not take into account the +advantage of more details due to a higher resolution.\ +Best values per column and same validation height in bold + +#### Improvement + +The improvement in the above table is defined as the relative zero-shot error with respect to MiDaS v3.0 +DPTL-384 and averaging over the datasets. So, if $\epsilon_d$ is the zero-shot error for dataset $d$, then +the $\color{green}{\textsf{improvement}}$ is given by $100(1-(1/6)\sum_d\epsilon_d/\epsilon_{d,\rm{DPT_{L-384}}})$%. + +Note that the improvements of 10% for MiDaS v2.0 → v2.1 and 21% for MiDaS v2.1 → v3.0 are not visible from the +improvement column (Imp.) in the table but would require an evaluation with respect to MiDaS v2.1 Large384 +and v2.0 Large384 respectively instead of v3.0 DPTL-384. + +### Depth map comparison + +Zoom in for better visibility +![](figures/Comparison.png) + +### Speed on Camera Feed + +Test configuration +- Windows 10 +- 11th Gen Intel Core i7-1185G7 3.00GHz +- 16GB RAM +- Camera resolution 640x480 +- openvino_midas_v21_small_256 + +Speed: 22 FPS + +### Changelog + +* [Dec 2022] Released MiDaS v3.1: + - New models based on 5 different types of transformers ([BEiT](https://arxiv.org/pdf/2106.08254.pdf), [Swin2](https://arxiv.org/pdf/2111.09883.pdf), [Swin](https://arxiv.org/pdf/2103.14030.pdf), [Next-ViT](https://arxiv.org/pdf/2207.05501.pdf), [LeViT](https://arxiv.org/pdf/2104.01136.pdf)) + - Training datasets extended from 10 to 12, including also KITTI and NYU Depth V2 using [BTS](https://github.com/cleinc/bts) split + - Best model, BEiTLarge 512, with resolution 512x512, is on average about [28% more accurate](#Accuracy) than MiDaS v3.0 + - Integrated live depth estimation from camera feed +* [Sep 2021] Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/DPT-Large). +* [Apr 2021] Released MiDaS v3.0: + - New models based on [Dense Prediction Transformers](https://arxiv.org/abs/2103.13413) are on average [21% more accurate](#Accuracy) than MiDaS v2.1 + - Additional models can be found [here](https://github.com/isl-org/DPT) +* [Nov 2020] Released MiDaS v2.1: + - New model that was trained on 10 datasets and is on average about [10% more accurate](#Accuracy) than [MiDaS v2.0](https://github.com/isl-org/MiDaS/releases/tag/v2) + - New light-weight model that achieves [real-time performance](https://github.com/isl-org/MiDaS/tree/master/mobile) on mobile platforms. + - Sample applications for [iOS](https://github.com/isl-org/MiDaS/tree/master/mobile/ios) and [Android](https://github.com/isl-org/MiDaS/tree/master/mobile/android) + - [ROS package](https://github.com/isl-org/MiDaS/tree/master/ros) for easy deployment on robots +* [Jul 2020] Added TensorFlow and ONNX code. Added [online demo](http://35.202.76.57/). +* [Dec 2019] Released new version of MiDaS - the new model is significantly more accurate and robust +* [Jul 2019] Initial release of MiDaS ([Link](https://github.com/isl-org/MiDaS/releases/tag/v1)) + +### Citation + +Please cite our paper if you use this code or any of the models: +``` +@ARTICLE {Ranftl2022, + author = "Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun", + title = "Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-Shot Cross-Dataset Transfer", + journal = "IEEE Transactions on Pattern Analysis and Machine Intelligence", + year = "2022", + volume = "44", + number = "3" +} +``` + +If you use a DPT-based model, please also cite: + +``` +@article{Ranftl2021, + author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, + title = {Vision Transformers for Dense Prediction}, + journal = {ICCV}, + year = {2021}, +} +``` + +### Acknowledgements + +Our work builds on and uses code from [timm](https://github.com/rwightman/pytorch-image-models) and [Next-ViT](https://github.com/bytedance/Next-ViT). +We'd like to thank the authors for making these libraries available. + +### License + +MIT License diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/hubconf.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/hubconf.py new file mode 100644 index 0000000000000000000000000000000000000000..fcfbeb59ecadeb0976bb6faac243ae485bc60280 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/hubconf.py @@ -0,0 +1,435 @@ +dependencies = ["torch"] + +import torch + +from custom_midas_repo.midas.dpt_depth import DPTDepthModel +from custom_midas_repo.midas.midas_net import MidasNet +from custom_midas_repo.midas.midas_net_custom import MidasNet_small + +def DPT_BEiT_L_512(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_BEiT_L_512 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="beitl16_512", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_BEiT_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_BEiT_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="beitl16_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_BEiT_B_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_BEiT_B_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="beitb16_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_SwinV2_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_SwinV2_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swin2l24_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_SwinV2_B_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_SwinV2_B_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swin2b24_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_SwinV2_T_256(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_SwinV2_T_256 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swin2t16_256", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Swin_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_Swin_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swinl12_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Next_ViT_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_Next_ViT_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="next_vit_large_6m", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_next_vit_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_LeViT_224(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_LeViT_224 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="levit_384", + non_negative=True, + head_features_1=64, + head_features_2=8, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Large(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT-Large model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="vitl16_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Hybrid(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT-Hybrid model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="vitb_rn50_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def MiDaS(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS v2.1 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = MidasNet() + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def MiDaS_small(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS v2.1 small model for monocular depth estimation on resource-constrained devices + pretrained (bool): load pretrained weights into model + """ + + model = MidasNet_small(None, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True}) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + + +def transforms(): + import cv2 + from torchvision.transforms import Compose + from custom_midas_repo.midas.transforms import Resize, NormalizeImage, PrepareForNet + from custom_midas_repo.midas import transforms + + transforms.default_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 384, + 384, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="upper_bound", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.small_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 256, + 256, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="upper_bound", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.dpt_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 384, + 384, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.beit512_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 512, + 512, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.swin384_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 384, + 384, + resize_target=None, + keep_aspect_ratio=False, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.swin256_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 256, + 256, + resize_target=None, + keep_aspect_ratio=False, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.levit_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 224, + 224, + resize_target=None, + keep_aspect_ratio=False, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + return transforms diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/beit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/beit.py new file mode 100644 index 0000000000000000000000000000000000000000..3f15f657d21dd7e90cdb57e7dfb6bdcb7eb0921a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/beit.py @@ -0,0 +1,196 @@ +import custom_timm as timm +import torch +import types + +import numpy as np +import torch.nn.functional as F + +from .utils import forward_adapted_unflatten, make_backbone_default +from custom_timm.models.beit import gen_relative_position_index +from torch.utils.checkpoint import checkpoint +from typing import Optional + + +def forward_beit(pretrained, x): + return forward_adapted_unflatten(pretrained, x, "forward_features") + + +def patch_embed_forward(self, x): + """ + Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes. + """ + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + return x + + +def _get_rel_pos_bias(self, window_size): + """ + Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes. + """ + old_height = 2 * self.window_size[0] - 1 + old_width = 2 * self.window_size[1] - 1 + + new_height = 2 * window_size[0] - 1 + new_width = 2 * window_size[1] - 1 + + old_relative_position_bias_table = self.relative_position_bias_table + + old_num_relative_distance = self.num_relative_distance + new_num_relative_distance = new_height * new_width + 3 + + old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3] + + old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2) + new_sub_table = F.interpolate(old_sub_table, size=(int(new_height), int(new_width)), mode="bilinear") + new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1) + + new_relative_position_bias_table = torch.cat( + [new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]]) + + key = str(window_size[1]) + "," + str(window_size[0]) + if key not in self.relative_position_indices.keys(): + self.relative_position_indices[key] = gen_relative_position_index(window_size) + + relative_position_bias = new_relative_position_bias_table[ + self.relative_position_indices[key].view(-1)].view( + window_size[0] * window_size[1] + 1, + window_size[0] * window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + + +def attention_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None): + """ + Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes. + """ + B, N, C = x.shape + + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + window_size = tuple(np.array(resolution) // 16) + attn = attn + self._get_rel_pos_bias(window_size) + if shared_rel_pos_bias is not None: + attn = attn + shared_rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +def block_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None): + """ + Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes. + """ + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), resolution, + shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +def beit_forward_features(self, x): + """ + Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes. + """ + resolution = x.shape[2:] + + x = self.patch_embed(x) + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias) + else: + x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias) + x = self.norm(x) + return x + + +def _make_beit_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[0, 4, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, + start_index_readout=1, +): + backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, + start_index_readout) + + backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed) + backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model) + + for block in backbone.model.blocks: + attn = block.attn + attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn) + attn.forward = types.MethodType(attention_forward, attn) + attn.relative_position_indices = {} + + block.forward = types.MethodType(block_forward, block) + + return backbone + + +def _make_pretrained_beitl16_512(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("beit_large_patch16_512", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks is None else hooks + + features = [256, 512, 1024, 1024] + + return _make_beit_backbone( + model, + features=features, + size=[512, 512], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_beitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("beit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks is None else hooks + return _make_beit_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_beitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("beit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks is None else hooks + return _make_beit_backbone( + model, + features=[96, 192, 384, 768], + hooks=hooks, + use_readout=use_readout, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/levit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/levit.py new file mode 100644 index 0000000000000000000000000000000000000000..a5fb6934cf76c0680dd71cfe66f90374cfadf5a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/levit.py @@ -0,0 +1,106 @@ +import custom_timm as timm +import torch +import torch.nn as nn +import numpy as np + +from .utils import activations, get_activation, Transpose + + +def forward_levit(pretrained, x): + pretrained.model.forward_features(x) + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + + layer_1 = pretrained.act_postprocess1(layer_1) + layer_2 = pretrained.act_postprocess2(layer_2) + layer_3 = pretrained.act_postprocess3(layer_3) + + return layer_1, layer_2, layer_3 + + +def _make_levit_backbone( + model, + hooks=[3, 11, 21], + patch_grid=[14, 14] +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + + pretrained.activations = activations + + patch_grid_size = np.array(patch_grid, dtype=int) + + pretrained.act_postprocess1 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size(patch_grid_size.tolist())) + ) + pretrained.act_postprocess2 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 2).astype(int)).tolist())) + ) + pretrained.act_postprocess3 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 4).astype(int)).tolist())) + ) + + return pretrained + + +class ConvTransposeNorm(nn.Sequential): + """ + Modification of + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: ConvNorm + such that ConvTranspose2d is used instead of Conv2d. + """ + + def __init__( + self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1, + groups=1, bn_weight_init=1): + super().__init__() + self.add_module('c', + nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False)) + self.add_module('bn', nn.BatchNorm2d(out_chs)) + + nn.init.constant_(self.bn.weight, bn_weight_init) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.ConvTranspose2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b4_transpose(in_chs, out_chs, activation): + """ + Modification of + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: stem_b16 + such that ConvTranspose2d is used instead of Conv2d and stem is also reduced to the half. + """ + return nn.Sequential( + ConvTransposeNorm(in_chs, out_chs, 3, 2, 1), + activation(), + ConvTransposeNorm(out_chs, out_chs // 2, 3, 2, 1), + activation()) + + +def _make_pretrained_levit_384(pretrained, hooks=None): + model = timm.create_model("levit_384", pretrained=pretrained) + + hooks = [3, 11, 21] if hooks == None else hooks + return _make_levit_backbone( + model, + hooks=hooks + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/next_vit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/next_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..a3ffae5930e1dd5288999f9a8dbee4f723a01ed5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/next_vit.py @@ -0,0 +1,39 @@ +import custom_timm as timm + +import torch.nn as nn + +from pathlib import Path +from .utils import activations, forward_default, get_activation + +from ..external.next_vit.classification.nextvit import * + + +def forward_next_vit(pretrained, x): + return forward_default(pretrained, x, "forward") + + +def _make_next_vit_backbone( + model, + hooks=[2, 6, 36, 39], +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.features[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.features[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.features[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.features[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + return pretrained + + +def _make_pretrained_next_vit_large_6m(hooks=None): + model = timm.create_model("nextvit_large") + + hooks = [2, 6, 36, 39] if hooks == None else hooks + return _make_next_vit_backbone( + model, + hooks=hooks, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin.py new file mode 100644 index 0000000000000000000000000000000000000000..d08ce41f7b8fe8900a1022e24cc5b47d32bf93e8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin.py @@ -0,0 +1,13 @@ +import custom_timm as timm + +from .swin_common import _make_swin_backbone + + +def _make_pretrained_swinl12_384(pretrained, hooks=None): + model = timm.create_model("swin_large_patch4_window12_384", pretrained=pretrained) + + hooks = [1, 1, 17, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin2.py new file mode 100644 index 0000000000000000000000000000000000000000..2687b58732d86d7854b6edc435841e38a2e2958b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin2.py @@ -0,0 +1,34 @@ +import custom_timm as timm + +from .swin_common import _make_swin_backbone + + +def _make_pretrained_swin2l24_384(pretrained, hooks=None): + model = timm.create_model("swinv2_large_window12to24_192to384_22kft1k", pretrained=pretrained) + + hooks = [1, 1, 17, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks + ) + + +def _make_pretrained_swin2b24_384(pretrained, hooks=None): + model = timm.create_model("swinv2_base_window12to24_192to384_22kft1k", pretrained=pretrained) + + hooks = [1, 1, 17, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks + ) + + +def _make_pretrained_swin2t16_256(pretrained, hooks=None): + model = timm.create_model("swinv2_tiny_window16_256", pretrained=pretrained) + + hooks = [1, 1, 5, 1] if hooks == None else hooks + return _make_swin_backbone( + model, + hooks=hooks, + patch_grid=[64, 64] + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin_common.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin_common.py new file mode 100644 index 0000000000000000000000000000000000000000..94d63d408f18511179d90b3ac6f697385d1e556d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/swin_common.py @@ -0,0 +1,52 @@ +import torch + +import torch.nn as nn +import numpy as np + +from .utils import activations, forward_default, get_activation, Transpose + + +def forward_swin(pretrained, x): + return forward_default(pretrained, x) + + +def _make_swin_backbone( + model, + hooks=[1, 1, 17, 1], + patch_grid=[96, 96] +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + if hasattr(model, "patch_grid"): + used_patch_grid = model.patch_grid + else: + used_patch_grid = patch_grid + + patch_grid_size = np.array(used_patch_grid, dtype=int) + + pretrained.act_postprocess1 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size(patch_grid_size.tolist())) + ) + pretrained.act_postprocess2 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist())) + ) + pretrained.act_postprocess3 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist())) + ) + pretrained.act_postprocess4 = nn.Sequential( + Transpose(1, 2), + nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist())) + ) + + return pretrained diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0558899dddcfccec5f01a764d4f21738eb612149 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/utils.py @@ -0,0 +1,249 @@ +import torch + +import torch.nn as nn + + +class Slice(nn.Module): + def __init__(self, start_index=1): + super(Slice, self).__init__() + self.start_index = start_index + + def forward(self, x): + return x[:, self.start_index:] + + +class AddReadout(nn.Module): + def __init__(self, start_index=1): + super(AddReadout, self).__init__() + self.start_index = start_index + + def forward(self, x): + if self.start_index == 2: + readout = (x[:, 0] + x[:, 1]) / 2 + else: + readout = x[:, 0] + return x[:, self.start_index:] + readout.unsqueeze(1) + + +class ProjectReadout(nn.Module): + def __init__(self, in_features, start_index=1): + super(ProjectReadout, self).__init__() + self.start_index = start_index + + self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) + + def forward(self, x): + readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:]) + features = torch.cat((x[:, self.start_index:], readout), -1) + + return self.project(features) + + +class Transpose(nn.Module): + def __init__(self, dim0, dim1): + super(Transpose, self).__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + x = x.transpose(self.dim0, self.dim1) + return x + + +activations = {} + + +def get_activation(name): + def hook(model, input, output): + activations[name] = output + + return hook + + +def forward_default(pretrained, x, function_name="forward_features"): + exec(f"pretrained.model.{function_name}(x)") + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + if hasattr(pretrained, "act_postprocess1"): + layer_1 = pretrained.act_postprocess1(layer_1) + if hasattr(pretrained, "act_postprocess2"): + layer_2 = pretrained.act_postprocess2(layer_2) + if hasattr(pretrained, "act_postprocess3"): + layer_3 = pretrained.act_postprocess3(layer_3) + if hasattr(pretrained, "act_postprocess4"): + layer_4 = pretrained.act_postprocess4(layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def forward_adapted_unflatten(pretrained, x, function_name="forward_features"): + b, c, h, w = x.shape + + exec(f"glob = pretrained.model.{function_name}(x)") + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + layer_1 = pretrained.act_postprocess1[0:2](layer_1) + layer_2 = pretrained.act_postprocess2[0:2](layer_2) + layer_3 = pretrained.act_postprocess3[0:2](layer_3) + layer_4 = pretrained.act_postprocess4[0:2](layer_4) + + unflatten = nn.Sequential( + nn.Unflatten( + 2, + torch.Size( + [ + h // pretrained.model.patch_size[1], + w // pretrained.model.patch_size[0], + ] + ), + ) + ) + + if layer_1.ndim == 3: + layer_1 = unflatten(layer_1) + if layer_2.ndim == 3: + layer_2 = unflatten(layer_2) + if layer_3.ndim == 3: + layer_3 = unflatten(layer_3) + if layer_4.ndim == 3: + layer_4 = unflatten(layer_4) + + layer_1 = pretrained.act_postprocess1[3: len(pretrained.act_postprocess1)](layer_1) + layer_2 = pretrained.act_postprocess2[3: len(pretrained.act_postprocess2)](layer_2) + layer_3 = pretrained.act_postprocess3[3: len(pretrained.act_postprocess3)](layer_3) + layer_4 = pretrained.act_postprocess4[3: len(pretrained.act_postprocess4)](layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def get_readout_oper(vit_features, features, use_readout, start_index=1): + if use_readout == "ignore": + readout_oper = [Slice(start_index)] * len(features) + elif use_readout == "add": + readout_oper = [AddReadout(start_index)] * len(features) + elif use_readout == "project": + readout_oper = [ + ProjectReadout(vit_features, start_index) for out_feat in features + ] + else: + assert ( + False + ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" + + return readout_oper + + +def make_backbone_default( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, + start_index_readout=1, +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index_readout) + + # 32, 48, 136, 384 + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + return pretrained diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/vit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..6be3e316208cf82b3f02efc6fde5ddd4791fecc6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/backbones/vit.py @@ -0,0 +1,221 @@ +import torch +import torch.nn as nn +import custom_timm as timm +import types +import math +import torch.nn.functional as F + +from .utils import (activations, forward_adapted_unflatten, get_activation, get_readout_oper, + make_backbone_default, Transpose) + + +def forward_vit(pretrained, x): + return forward_adapted_unflatten(pretrained, x, "forward_flex") + + +def _resize_pos_embed(self, posemb, gs_h, gs_w): + posemb_tok, posemb_grid = ( + posemb[:, : self.start_index], + posemb[0, self.start_index:], + ) + + gs_old = int(math.sqrt(len(posemb_grid))) + + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) + + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + + return posemb + + +def forward_flex(self, x): + b, c, h, w = x.shape + + pos_embed = self._resize_pos_embed( + self.pos_embed, h // self.patch_size[1], w // self.patch_size[0] + ) + + B = x.shape[0] + + if hasattr(self.patch_embed, "backbone"): + x = self.patch_embed.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) + + if getattr(self, "dist_token", None) is not None: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + else: + if self.no_embed_class: + x = x + pos_embed + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + if not self.no_embed_class: + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + + return x + + +def _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, + start_index_readout=1, +): + pretrained = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, + start_index_readout) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=[0, 1, 8, 11], + vit_features=768, + patch_size=[16, 16], + number_stages=2, + use_vit_only=False, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + + used_number_stages = 0 if use_vit_only else number_stages + for s in range(used_number_stages): + pretrained.model.patch_embed.backbone.stages[s].register_forward_hook( + get_activation(str(s + 1)) + ) + for s in range(used_number_stages, 4): + pretrained.model.blocks[hooks[s]].register_forward_hook(get_activation(str(s + 1))) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + for s in range(used_number_stages): + value = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity()) + exec(f"pretrained.act_postprocess{s + 1}=value") + for s in range(used_number_stages, 4): + if s < number_stages: + final_layer = nn.ConvTranspose2d( + in_channels=features[s], + out_channels=features[s], + kernel_size=4 // (2 ** s), + stride=4 // (2 ** s), + padding=0, + bias=True, + dilation=1, + groups=1, + ) + elif s > number_stages: + final_layer = nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ) + else: + final_layer = None + + layers = [ + readout_oper[s], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[s], + kernel_size=1, + stride=1, + padding=0, + ), + ] + if final_layer is not None: + layers.append(final_layer) + + value = nn.Sequential(*layers) + exec(f"pretrained.act_postprocess{s + 1}=value") + + pretrained.model.start_index = start_index + pretrained.model.patch_size = patch_size + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitb_rn50_384( + pretrained, use_readout="ignore", hooks=None, use_vit_only=False +): + model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained) + + hooks = [0, 1, 8, 11] if hooks == None else hooks + return _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/base_model.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf430239b47ec5ec07531263f26f5c24a2311cd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/base_model.py @@ -0,0 +1,16 @@ +import torch + + +class BaseModel(torch.nn.Module): + def load(self, path): + """Load model from file. + + Args: + path (str): file path + """ + parameters = torch.load(path, map_location=torch.device('cpu')) + + if "optimizer" in parameters: + parameters = parameters["model"] + + self.load_state_dict(parameters) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/blocks.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..6d87a00680bb6ed9a6d7c3043ea30a1e90361794 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/blocks.py @@ -0,0 +1,439 @@ +import torch +import torch.nn as nn + +from .backbones.beit import ( + _make_pretrained_beitl16_512, + _make_pretrained_beitl16_384, + _make_pretrained_beitb16_384, + forward_beit, +) +from .backbones.swin_common import ( + forward_swin, +) +from .backbones.swin2 import ( + _make_pretrained_swin2l24_384, + _make_pretrained_swin2b24_384, + _make_pretrained_swin2t16_256, +) +from .backbones.swin import ( + _make_pretrained_swinl12_384, +) +from .backbones.levit import ( + _make_pretrained_levit_384, + forward_levit, +) +from .backbones.vit import ( + _make_pretrained_vitb_rn50_384, + _make_pretrained_vitl16_384, + _make_pretrained_vitb16_384, + forward_vit, +) + +def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, + use_vit_only=False, use_readout="ignore", in_features=[96, 256, 512, 1024]): + if backbone == "beitl16_512": + pretrained = _make_pretrained_beitl16_512( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # BEiT_512-L (backbone) + elif backbone == "beitl16_384": + pretrained = _make_pretrained_beitl16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # BEiT_384-L (backbone) + elif backbone == "beitb16_384": + pretrained = _make_pretrained_beitb16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # BEiT_384-B (backbone) + elif backbone == "swin2l24_384": + pretrained = _make_pretrained_swin2l24_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [192, 384, 768, 1536], features, groups=groups, expand=expand + ) # Swin2-L/12to24 (backbone) + elif backbone == "swin2b24_384": + pretrained = _make_pretrained_swin2b24_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [128, 256, 512, 1024], features, groups=groups, expand=expand + ) # Swin2-B/12to24 (backbone) + elif backbone == "swin2t16_256": + pretrained = _make_pretrained_swin2t16_256( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # Swin2-T/16 (backbone) + elif backbone == "swinl12_384": + pretrained = _make_pretrained_swinl12_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [192, 384, 768, 1536], features, groups=groups, expand=expand + ) # Swin-L/12 (backbone) + elif backbone == "next_vit_large_6m": + from .backbones.next_vit import _make_pretrained_next_vit_large_6m + pretrained = _make_pretrained_next_vit_large_6m(hooks=hooks) + scratch = _make_scratch( + in_features, features, groups=groups, expand=expand + ) # Next-ViT-L on ImageNet-1K-6M (backbone) + elif backbone == "levit_384": + pretrained = _make_pretrained_levit_384( + use_pretrained, hooks=hooks + ) + scratch = _make_scratch( + [384, 512, 768], features, groups=groups, expand=expand + ) # LeViT 384 (backbone) + elif backbone == "vitl16_384": + pretrained = _make_pretrained_vitl16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # ViT-L/16 - 85.0% Top1 (backbone) + elif backbone == "vitb_rn50_384": + pretrained = _make_pretrained_vitb_rn50_384( + use_pretrained, + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) + scratch = _make_scratch( + [256, 512, 768, 768], features, groups=groups, expand=expand + ) # ViT-H/16 - 85.0% Top1 (backbone) + elif backbone == "vitb16_384": + pretrained = _make_pretrained_vitb16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # ViT-B/16 - 84.6% Top1 (backbone) + elif backbone == "resnext101_wsl": + pretrained = _make_pretrained_resnext101_wsl(use_pretrained) + scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3 + elif backbone == "efficientnet_lite3": + pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) + scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3 + else: + print(f"Backbone '{backbone}' not implemented") + assert False + + return pretrained, scratch + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + if len(in_shape) >= 4: + out_shape4 = out_shape + + if expand: + out_shape1 = out_shape + out_shape2 = out_shape*2 + out_shape3 = out_shape*4 + if len(in_shape) >= 4: + out_shape4 = out_shape*8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + if len(in_shape) >= 4: + scratch.layer4_rn = nn.Conv2d( + in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + + return scratch + + +def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): + efficientnet = torch.hub.load( + "rwightman/gen-efficientnet-pytorch", + "tf_efficientnet_lite3", + pretrained=use_pretrained, + exportable=exportable + ) + return _make_efficientnet_backbone(efficientnet) + + +def _make_efficientnet_backbone(effnet): + pretrained = nn.Module() + + pretrained.layer1 = nn.Sequential( + effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2] + ) + pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) + pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) + pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) + + return pretrained + + +def _make_resnet_backbone(resnet): + pretrained = nn.Module() + pretrained.layer1 = nn.Sequential( + resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 + ) + + pretrained.layer2 = resnet.layer2 + pretrained.layer3 = resnet.layer3 + pretrained.layer4 = resnet.layer4 + + return pretrained + + +def _make_pretrained_resnext101_wsl(use_pretrained): + resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") + return _make_resnet_backbone(resnet) + + + +class Interpolate(nn.Module): + """Interpolation module. + """ + + def __init__(self, scale_factor, mode, align_corners=False): + """Init. + + Args: + scale_factor (float): scaling + mode (str): interpolation mode + """ + super(Interpolate, self).__init__() + + self.interp = nn.functional.interpolate + self.scale_factor = scale_factor + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: interpolated data + """ + + x = self.interp( + x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners + ) + + return x + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + out = self.relu(x) + out = self.conv1(out) + out = self.relu(out) + out = self.conv2(out) + + return out + x + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.resConfUnit1 = ResidualConvUnit(features) + self.resConfUnit2 = ResidualConvUnit(features) + + def forward(self, *xs): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + output += self.resConfUnit1(xs[1]) + + output = self.resConfUnit2(output) + + output = nn.functional.interpolate( + output, scale_factor=2, mode="bilinear", align_corners=True + ) + + return output + + + + +class ResidualConvUnit_custom(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + if self.bn==True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn==True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn==True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + # return out + x + + +class FeatureFusionBlock_custom(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock_custom, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand==True: + out_features = features//2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + self.size=size + + def forward(self, *xs, size=None): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + # output += res + + output = self.resConfUnit2(output) + + if (size is None) and (self.size is None): + modifier = {"scale_factor": 2} + elif size is None: + modifier = {"size": self.size} + else: + modifier = {"size": size} + + output = nn.functional.interpolate( + output, **modifier, mode="bilinear", align_corners=self.align_corners + ) + + output = self.out_conv(output) + + return output + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/dpt_depth.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/dpt_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..993d296405dcb1d2bf58a9f0258cef4587b75b47 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/dpt_depth.py @@ -0,0 +1,166 @@ +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import ( + FeatureFusionBlock_custom, + Interpolate, + _make_encoder, + forward_beit, + forward_swin, + forward_levit, + forward_vit, +) +from .backbones.levit import stem_b4_transpose +from custom_timm.models.layers import get_act_layer + + +def _make_fusion_block(features, use_bn, size = None): + return FeatureFusionBlock_custom( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + size=size, + ) + + +class DPT(BaseModel): + def __init__( + self, + head, + features=256, + backbone="vitb_rn50_384", + readout="project", + channels_last=False, + use_bn=False, + **kwargs + ): + + super(DPT, self).__init__() + + self.channels_last = channels_last + + # For the Swin, Swin 2, LeViT and Next-ViT Transformers, the hierarchical architectures prevent setting the + # hooks freely. Instead, the hooks have to be chosen according to the ranges specified in the comments. + hooks = { + "beitl16_512": [5, 11, 17, 23], + "beitl16_384": [5, 11, 17, 23], + "beitb16_384": [2, 5, 8, 11], + "swin2l24_384": [1, 1, 17, 1], # Allowed ranges: [0, 1], [0, 1], [ 0, 17], [ 0, 1] + "swin2b24_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1] + "swin2t16_256": [1, 1, 5, 1], # [0, 1], [0, 1], [ 0, 5], [ 0, 1] + "swinl12_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1] + "next_vit_large_6m": [2, 6, 36, 39], # [0, 2], [3, 6], [ 7, 36], [37, 39] + "levit_384": [3, 11, 21], # [0, 3], [6, 11], [14, 21] + "vitb_rn50_384": [0, 1, 8, 11], + "vitb16_384": [2, 5, 8, 11], + "vitl16_384": [5, 11, 17, 23], + }[backbone] + + if "next_vit" in backbone: + in_features = { + "next_vit_large_6m": [96, 256, 512, 1024], + }[backbone] + else: + in_features = None + + # Instantiate backbone and reassemble blocks + self.pretrained, self.scratch = _make_encoder( + backbone, + features, + False, # Set to true of you want to train from scratch, uses ImageNet weights + groups=1, + expand=False, + exportable=False, + hooks=hooks, + use_readout=readout, + in_features=in_features, + ) + + self.number_layers = len(hooks) if hooks is not None else 4 + size_refinenet3 = None + self.scratch.stem_transpose = None + + if "beit" in backbone: + self.forward_transformer = forward_beit + elif "swin" in backbone: + self.forward_transformer = forward_swin + elif "next_vit" in backbone: + from .backbones.next_vit import forward_next_vit + self.forward_transformer = forward_next_vit + elif "levit" in backbone: + self.forward_transformer = forward_levit + size_refinenet3 = 7 + self.scratch.stem_transpose = stem_b4_transpose(256, 128, get_act_layer("hard_swish")) + else: + self.forward_transformer = forward_vit + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn, size_refinenet3) + if self.number_layers >= 4: + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + self.scratch.output_conv = head + + + def forward(self, x): + if self.channels_last == True: + x.contiguous(memory_format=torch.channels_last) + + layers = self.forward_transformer(self.pretrained, x) + if self.number_layers == 3: + layer_1, layer_2, layer_3 = layers + else: + layer_1, layer_2, layer_3, layer_4 = layers + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + if self.number_layers >= 4: + layer_4_rn = self.scratch.layer4_rn(layer_4) + + if self.number_layers == 3: + path_3 = self.scratch.refinenet3(layer_3_rn, size=layer_2_rn.shape[2:]) + else: + path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + if self.scratch.stem_transpose is not None: + path_1 = self.scratch.stem_transpose(path_1) + + out = self.scratch.output_conv(path_1) + + return out + + +class DPTDepthModel(DPT): + def __init__(self, path=None, non_negative=True, **kwargs): + features = kwargs["features"] if "features" in kwargs else 256 + head_features_1 = kwargs["head_features_1"] if "head_features_1" in kwargs else features + head_features_2 = kwargs["head_features_2"] if "head_features_2" in kwargs else 32 + kwargs.pop("head_features_1", None) + kwargs.pop("head_features_2", None) + + head = nn.Sequential( + nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + super().__init__(head, **kwargs) + + if path is not None: + self.load(path) + + def forward(self, x): + return super().forward(x).squeeze(dim=1) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/midas_net.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/midas_net.py new file mode 100644 index 0000000000000000000000000000000000000000..8a954977800b0a0f48807e80fa63041910e33c1f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/midas_net.py @@ -0,0 +1,76 @@ +"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import FeatureFusionBlock, Interpolate, _make_encoder + + +class MidasNet(BaseModel): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=256, non_negative=True): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + backbone (str, optional): Backbone network for encoder. Defaults to resnet50 + """ + print("Loading weights: ", path) + + super(MidasNet, self).__init__() + + use_pretrained = False if path is None else True + + self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained) + + self.scratch.refinenet4 = FeatureFusionBlock(features) + self.scratch.refinenet3 = FeatureFusionBlock(features) + self.scratch.refinenet2 = FeatureFusionBlock(features) + self.scratch.refinenet1 = FeatureFusionBlock(features) + + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear"), + nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + ) + + if path: + self.load(path) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return torch.squeeze(out, dim=1) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/midas_net_custom.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/midas_net_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..50e4acb5e53d5fabefe3dde16ab49c33c2b7797c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/midas_net_custom.py @@ -0,0 +1,128 @@ +"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder + + +class MidasNet_small(BaseModel): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, + blocks={'expand': True}): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + backbone (str, optional): Backbone network for encoder. Defaults to resnet50 + """ + print("Loading weights: ", path) + + super(MidasNet_small, self).__init__() + + use_pretrained = False if path else True + + self.channels_last = channels_last + self.blocks = blocks + self.backbone = backbone + + self.groups = 1 + + features1=features + features2=features + features3=features + features4=features + self.expand = False + if "expand" in self.blocks and self.blocks['expand'] == True: + self.expand = True + features1=features + features2=features*2 + features3=features*4 + features4=features*8 + + self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) + + self.scratch.activation = nn.ReLU(False) + + self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) + + + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), + Interpolate(scale_factor=2, mode="bilinear"), + nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), + self.scratch.activation, + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + if path: + self.load(path) + + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + if self.channels_last==True: + print("self.channels_last = ", self.channels_last) + x.contiguous(memory_format=torch.channels_last) + + + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return torch.squeeze(out, dim=1) + + + +def fuse_model(m): + prev_previous_type = nn.Identity() + prev_previous_name = '' + previous_type = nn.Identity() + previous_name = '' + for name, module in m.named_modules(): + if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: + # print("FUSED ", prev_previous_name, previous_name, name) + torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) + elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: + # print("FUSED ", prev_previous_name, previous_name) + torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) + # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: + # print("FUSED ", previous_name, name) + # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) + + prev_previous_type = previous_type + prev_previous_name = previous_name + previous_type = type(module) + previous_name = name \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/model_loader.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/model_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..5119d8a200eaecd50ab0c8da8ea7e4141e47c634 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/model_loader.py @@ -0,0 +1,242 @@ +import cv2 +import torch + +from custom_midas_repo.midas.dpt_depth import DPTDepthModel +from custom_midas_repo.midas.midas_net import MidasNet +from custom_midas_repo.midas.midas_net_custom import MidasNet_small +from custom_midas_repo.midas.transforms import Resize, NormalizeImage, PrepareForNet + +from torchvision.transforms import Compose + +default_models = { + "dpt_beit_large_512": "weights/dpt_beit_large_512.pt", + "dpt_beit_large_384": "weights/dpt_beit_large_384.pt", + "dpt_beit_base_384": "weights/dpt_beit_base_384.pt", + "dpt_swin2_large_384": "weights/dpt_swin2_large_384.pt", + "dpt_swin2_base_384": "weights/dpt_swin2_base_384.pt", + "dpt_swin2_tiny_256": "weights/dpt_swin2_tiny_256.pt", + "dpt_swin_large_384": "weights/dpt_swin_large_384.pt", + "dpt_next_vit_large_384": "weights/dpt_next_vit_large_384.pt", + "dpt_levit_224": "weights/dpt_levit_224.pt", + "dpt_large_384": "weights/dpt_large_384.pt", + "dpt_hybrid_384": "weights/dpt_hybrid_384.pt", + "midas_v21_384": "weights/midas_v21_384.pt", + "midas_v21_small_256": "weights/midas_v21_small_256.pt", + "openvino_midas_v21_small_256": "weights/openvino_midas_v21_small_256.xml", +} + + +def load_model(device, model_path, model_type="dpt_large_384", optimize=True, height=None, square=False): + """Load the specified network. + + Args: + device (device): the torch device used + model_path (str): path to saved model + model_type (str): the type of the model to be loaded + optimize (bool): optimize the model to half-integer on CUDA? + height (int): inference encoder image height + square (bool): resize to a square resolution? + + Returns: + The loaded network, the transform which prepares images as input to the network and the dimensions of the + network input + """ + if "openvino" in model_type: + from openvino.runtime import Core + + keep_aspect_ratio = not square + + if model_type == "dpt_beit_large_512": + model = DPTDepthModel( + path=model_path, + backbone="beitl16_512", + non_negative=True, + ) + net_w, net_h = 512, 512 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_beit_large_384": + model = DPTDepthModel( + path=model_path, + backbone="beitl16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_beit_base_384": + model = DPTDepthModel( + path=model_path, + backbone="beitb16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin2_large_384": + model = DPTDepthModel( + path=model_path, + backbone="swin2l24_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin2_base_384": + model = DPTDepthModel( + path=model_path, + backbone="swin2b24_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin2_tiny_256": + model = DPTDepthModel( + path=model_path, + backbone="swin2t16_256", + non_negative=True, + ) + net_w, net_h = 256, 256 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_swin_large_384": + model = DPTDepthModel( + path=model_path, + backbone="swinl12_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_next_vit_large_384": + model = DPTDepthModel( + path=model_path, + backbone="next_vit_large_6m", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + # We change the notation from dpt_levit_224 (MiDaS notation) to levit_384 (timm notation) here, where the 224 refers + # to the resolution 224x224 used by LeViT and 384 is the first entry of the embed_dim, see _cfg and model_cfgs of + # https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/levit.py + # (commit id: 927f031293a30afb940fff0bee34b85d9c059b0e) + elif model_type == "dpt_levit_224": + model = DPTDepthModel( + path=model_path, + backbone="levit_384", + non_negative=True, + head_features_1=64, + head_features_2=8, + ) + net_w, net_h = 224, 224 + keep_aspect_ratio = False + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_large_384": + model = DPTDepthModel( + path=model_path, + backbone="vitl16_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "dpt_hybrid_384": + model = DPTDepthModel( + path=model_path, + backbone="vitb_rn50_384", + non_negative=True, + ) + net_w, net_h = 384, 384 + resize_mode = "minimal" + normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + + elif model_type == "midas_v21_384": + model = MidasNet(model_path, non_negative=True) + net_w, net_h = 384, 384 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + elif model_type == "midas_v21_small_256": + model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, + non_negative=True, blocks={'expand': True}) + net_w, net_h = 256, 256 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + elif model_type == "openvino_midas_v21_small_256": + ie = Core() + uncompiled_model = ie.read_model(model=model_path) + model = ie.compile_model(uncompiled_model, "CPU") + net_w, net_h = 256, 256 + resize_mode = "upper_bound" + normalization = NormalizeImage( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] + ) + + else: + print(f"model_type '{model_type}' not implemented, use: --model_type large") + assert False + + if not "openvino" in model_type: + print("Model loaded, number of parameters = {:.0f}M".format(sum(p.numel() for p in model.parameters()) / 1e6)) + else: + print("Model loaded, optimized with OpenVINO") + + if "openvino" in model_type: + keep_aspect_ratio = False + + if height is not None: + net_w, net_h = height, height + + transform = Compose( + [ + Resize( + net_w, + net_h, + resize_target=None, + keep_aspect_ratio=keep_aspect_ratio, + ensure_multiple_of=32, + resize_method=resize_mode, + image_interpolation_method=cv2.INTER_CUBIC, + ), + normalization, + PrepareForNet(), + ] + ) + + if not "openvino" in model_type: + model.eval() + + if optimize and (device == torch.device("cuda")): + if not "openvino" in model_type: + model = model.to(memory_format=torch.channels_last) + model = model.half() + else: + print("Error: OpenVINO models are already optimized. No optimization to half-float possible.") + exit() + + if not "openvino" in model_type: + model.to(device) + + return model, transform, net_w, net_h diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/transforms.py b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..350cbc11662633ad7f8968eb10be2e7de6e384e9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_midas_repo/midas/transforms.py @@ -0,0 +1,234 @@ +import numpy as np +import cv2 +import math + + +def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): + """Rezise the sample to ensure the given size. Keeps aspect ratio. + + Args: + sample (dict): sample + size (tuple): image size + + Returns: + tuple: new size + """ + shape = list(sample["disparity"].shape) + + if shape[0] >= size[0] and shape[1] >= size[1]: + return sample + + scale = [0, 0] + scale[0] = size[0] / shape[0] + scale[1] = size[1] / shape[1] + + scale = max(scale) + + shape[0] = math.ceil(scale * shape[0]) + shape[1] = math.ceil(scale * shape[1]) + + # resize + sample["image"] = cv2.resize( + sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method + ) + + sample["disparity"] = cv2.resize( + sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST + ) + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + tuple(shape[::-1]), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return tuple(shape) + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, sample): + width, height = self.get_size( + sample["image"].shape[1], sample["image"].shape[0] + ) + + # resize sample + sample["image"] = cv2.resize( + sample["image"], + (width, height), + interpolation=self.__image_interpolation_method, + ) + + if self.__resize_target: + if "disparity" in sample: + sample["disparity"] = cv2.resize( + sample["disparity"], + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + + if "depth" in sample: + sample["depth"] = cv2.resize( + sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST + ) + + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return sample + + +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + if "disparity" in sample: + disparity = sample["disparity"].astype(np.float32) + sample["disparity"] = np.ascontiguousarray(disparity) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + return sample diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33e7a7f594ef441479257c788e4c0d6e08657fc8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/__init__.py @@ -0,0 +1 @@ +#Dummy file ensuring this package will be recognized \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..210a2989138380559f23045b568d0fbbeb918c03 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# flake8: noqa +from .arraymisc import * +from .fileio import * +from .image import * +from .utils import * +from .version import * +from .video import * +from .visualization import * + +# The following modules are not imported to this level, so mmcv may be used +# without PyTorch. +# - runner +# - parallel +# - op diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/arraymisc/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/arraymisc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4b4700d6139ae3d604ff6e542468cce4200c020c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/arraymisc/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .quantization import dequantize, quantize + +__all__ = ['quantize', 'dequantize'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/arraymisc/quantization.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/arraymisc/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..8e47a3545780cf071a1ef8195efb0b7b662c8186 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/arraymisc/quantization.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + + +def quantize(arr, min_val, max_val, levels, dtype=np.int64): + """Quantize an array of (-inf, inf) to [0, levels-1]. + + Args: + arr (ndarray): Input array. + min_val (scalar): Minimum value to be clipped. + max_val (scalar): Maximum value to be clipped. + levels (int): Quantization levels. + dtype (np.type): The type of the quantized array. + + Returns: + tuple: Quantized array. + """ + if not (isinstance(levels, int) and levels > 1): + raise ValueError( + f'levels must be a positive integer, but got {levels}') + if min_val >= max_val: + raise ValueError( + f'min_val ({min_val}) must be smaller than max_val ({max_val})') + + arr = np.clip(arr, min_val, max_val) - min_val + quantized_arr = np.minimum( + np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1) + + return quantized_arr + + +def dequantize(arr, min_val, max_val, levels, dtype=np.float64): + """Dequantize an array. + + Args: + arr (ndarray): Input array. + min_val (scalar): Minimum value to be clipped. + max_val (scalar): Maximum value to be clipped. + levels (int): Quantization levels. + dtype (np.type): The type of the dequantized array. + + Returns: + tuple: Dequantized array. + """ + if not (isinstance(levels, int) and levels > 1): + raise ValueError( + f'levels must be a positive integer, but got {levels}') + if min_val >= max_val: + raise ValueError( + f'min_val ({min_val}) must be smaller than max_val ({max_val})') + + dequantized_arr = (arr + 0.5).astype(dtype) * (max_val - + min_val) / levels + min_val + + return dequantized_arr diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7246c897430f0cc7ce12719ad8608824fc734446 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .alexnet import AlexNet +# yapf: disable +from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, + PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS, + ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule, + ConvTranspose2d, ConvTranspose3d, ConvWS2d, + DepthwiseSeparableConvModule, GeneralizedAttention, + HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d, + NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish, + build_activation_layer, build_conv_layer, + build_norm_layer, build_padding_layer, build_plugin_layer, + build_upsample_layer, conv_ws_2d, is_norm) +from .builder import MODELS, build_model_from_cfg +# yapf: enable +from .resnet import ResNet, make_res_layer +from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit, + NormalInit, PretrainedInit, TruncNormalInit, UniformInit, + XavierInit, bias_init_with_prob, caffe2_xavier_init, + constant_init, fuse_conv_bn, get_model_complexity_info, + initialize, kaiming_init, normal_init, trunc_normal_init, + uniform_init, xavier_init) +from .vgg import VGG, make_vgg_layer + +__all__ = [ + 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', + 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init', + 'uniform_init', 'kaiming_init', 'caffe2_xavier_init', + 'bias_init_with_prob', 'ConvModule', 'build_activation_layer', + 'build_conv_layer', 'build_norm_layer', 'build_padding_layer', + 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d', + 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish', + 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', + 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', + 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d', + 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d', + 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', + 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit', + 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit', + 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/alexnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/alexnet.py new file mode 100644 index 0000000000000000000000000000000000000000..89e36b8c7851f895d9ae7f07149f0e707456aab0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/alexnet.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +import torch.nn as nn + + +class AlexNet(nn.Module): + """AlexNet backbone. + + Args: + num_classes (int): number of classes for classification. + """ + + def __init__(self, num_classes=-1): + super(AlexNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + from ..runner import load_checkpoint + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + # use default initializer + pass + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f33124ed23fc6f27119a37bcb5ab004d3572be0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .activation import build_activation_layer +from .context_block import ContextBlock +from .conv import build_conv_layer +from .conv2d_adaptive_padding import Conv2dAdaptivePadding +from .conv_module import ConvModule +from .conv_ws import ConvAWS2d, ConvWS2d, conv_ws_2d +from .depthwise_separable_conv_module import DepthwiseSeparableConvModule +from .drop import Dropout, DropPath +from .generalized_attention import GeneralizedAttention +from .hsigmoid import HSigmoid +from .hswish import HSwish +from .non_local import NonLocal1d, NonLocal2d, NonLocal3d +from .norm import build_norm_layer, is_norm +from .padding import build_padding_layer +from .plugin import build_plugin_layer +from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, + PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS) +from .scale import Scale +from .swish import Swish +from .upsample import build_upsample_layer +from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d, + Linear, MaxPool2d, MaxPool3d) + +__all__ = [ + 'ConvModule', 'build_activation_layer', 'build_conv_layer', + 'build_norm_layer', 'build_padding_layer', 'build_upsample_layer', + 'build_plugin_layer', 'is_norm', 'HSigmoid', 'HSwish', 'NonLocal1d', + 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'GeneralizedAttention', + 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS', + 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'ConvAWS2d', 'ConvWS2d', + 'conv_ws_2d', 'DepthwiseSeparableConvModule', 'Swish', 'Linear', + 'Conv2dAdaptivePadding', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d', + 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'Dropout', 'DropPath' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/activation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..0881d7201de63ea47c9e585eead35f5c12c1881f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/activation.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version +from .registry import ACTIVATION_LAYERS + +for module in [ + nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU, + nn.Sigmoid, nn.Tanh +]: + ACTIVATION_LAYERS.register_module(module=module) + + +@ACTIVATION_LAYERS.register_module(name='Clip') +@ACTIVATION_LAYERS.register_module() +class Clamp(nn.Module): + """Clamp activation layer. + + This activation function is to clamp the feature map value within + :math:`[min, max]`. More details can be found in ``torch.clamp()``. + + Args: + min (Number | optional): Lower-bound of the range to be clamped to. + Default to -1. + max (Number | optional): Upper-bound of the range to be clamped to. + Default to 1. + """ + + def __init__(self, min=-1., max=1.): + super(Clamp, self).__init__() + self.min = min + self.max = max + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): The input tensor. + + Returns: + torch.Tensor: Clamped tensor. + """ + return torch.clamp(x, min=self.min, max=self.max) + + +class GELU(nn.Module): + r"""Applies the Gaussian Error Linear Units function: + + .. math:: + \text{GELU}(x) = x * \Phi(x) + where :math:`\Phi(x)` is the Cumulative Distribution Function for + Gaussian Distribution. + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/GELU.png + + Examples:: + + >>> m = nn.GELU() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return F.gelu(input) + + +if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.4')): + ACTIVATION_LAYERS.register_module(module=GELU) +else: + ACTIVATION_LAYERS.register_module(module=nn.GELU) + + +def build_activation_layer(cfg): + """Build activation layer. + + Args: + cfg (dict): The activation layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an activation layer. + + Returns: + nn.Module: Created activation layer. + """ + return build_from_cfg(cfg, ACTIVATION_LAYERS) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/context_block.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/context_block.py new file mode 100644 index 0000000000000000000000000000000000000000..d60fdb904c749ce3b251510dff3cc63cea70d42e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/context_block.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn + +from ..utils import constant_init, kaiming_init +from .registry import PLUGIN_LAYERS + + +def last_zero_init(m): + if isinstance(m, nn.Sequential): + constant_init(m[-1], val=0) + else: + constant_init(m, val=0) + + +@PLUGIN_LAYERS.register_module() +class ContextBlock(nn.Module): + """ContextBlock module in GCNet. + + See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' + (https://arxiv.org/abs/1904.11492) for details. + + Args: + in_channels (int): Channels of the input feature map. + ratio (float): Ratio of channels of transform bottleneck + pooling_type (str): Pooling method for context modeling. + Options are 'att' and 'avg', stand for attention pooling and + average pooling respectively. Default: 'att'. + fusion_types (Sequence[str]): Fusion method for feature fusion, + Options are 'channels_add', 'channel_mul', stand for channelwise + addition and multiplication respectively. Default: ('channel_add',) + """ + + _abbr_ = 'context_block' + + def __init__(self, + in_channels, + ratio, + pooling_type='att', + fusion_types=('channel_add', )): + super(ContextBlock, self).__init__() + assert pooling_type in ['avg', 'att'] + assert isinstance(fusion_types, (list, tuple)) + valid_fusion_types = ['channel_add', 'channel_mul'] + assert all([f in valid_fusion_types for f in fusion_types]) + assert len(fusion_types) > 0, 'at least one fusion should be used' + self.in_channels = in_channels + self.ratio = ratio + self.planes = int(in_channels * ratio) + self.pooling_type = pooling_type + self.fusion_types = fusion_types + if pooling_type == 'att': + self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1) + self.softmax = nn.Softmax(dim=2) + else: + self.avg_pool = nn.AdaptiveAvgPool2d(1) + if 'channel_add' in fusion_types: + self.channel_add_conv = nn.Sequential( + nn.Conv2d(self.in_channels, self.planes, kernel_size=1), + nn.LayerNorm([self.planes, 1, 1]), + nn.ReLU(inplace=True), # yapf: disable + nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) + else: + self.channel_add_conv = None + if 'channel_mul' in fusion_types: + self.channel_mul_conv = nn.Sequential( + nn.Conv2d(self.in_channels, self.planes, kernel_size=1), + nn.LayerNorm([self.planes, 1, 1]), + nn.ReLU(inplace=True), # yapf: disable + nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) + else: + self.channel_mul_conv = None + self.reset_parameters() + + def reset_parameters(self): + if self.pooling_type == 'att': + kaiming_init(self.conv_mask, mode='fan_in') + self.conv_mask.inited = True + + if self.channel_add_conv is not None: + last_zero_init(self.channel_add_conv) + if self.channel_mul_conv is not None: + last_zero_init(self.channel_mul_conv) + + def spatial_pool(self, x): + batch, channel, height, width = x.size() + if self.pooling_type == 'att': + input_x = x + # [N, C, H * W] + input_x = input_x.view(batch, channel, height * width) + # [N, 1, C, H * W] + input_x = input_x.unsqueeze(1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = context_mask.view(batch, 1, height * width) + # [N, 1, H * W] + context_mask = self.softmax(context_mask) + # [N, 1, H * W, 1] + context_mask = context_mask.unsqueeze(-1) + # [N, 1, C, 1] + context = torch.matmul(input_x, context_mask) + # [N, C, 1, 1] + context = context.view(batch, channel, 1, 1) + else: + # [N, C, 1, 1] + context = self.avg_pool(x) + + return context + + def forward(self, x): + # [N, C, 1, 1] + context = self.spatial_pool(x) + + out = x + if self.channel_mul_conv is not None: + # [N, C, 1, 1] + channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) + out = out * channel_mul_term + if self.channel_add_conv is not None: + # [N, C, 1, 1] + channel_add_term = self.channel_add_conv(context) + out = out + channel_add_term + + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..cf54491997a48ac3e7fadc4183ab7bf3e831024c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch import nn + +from .registry import CONV_LAYERS + +CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d) +CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d) +CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d) +CONV_LAYERS.register_module('Conv', module=nn.Conv2d) + + +def build_conv_layer(cfg, *args, **kwargs): + """Build convolution layer. + + Args: + cfg (None or dict): The conv layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an conv layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding conv layer. + + Returns: + nn.Module: Created conv layer. + """ + if cfg is None: + cfg_ = dict(type='Conv2d') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in CONV_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + else: + conv_layer = CONV_LAYERS.get(layer_type) + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv2d_adaptive_padding.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv2d_adaptive_padding.py new file mode 100644 index 0000000000000000000000000000000000000000..b45e758ac6cf8dfb0382d072fe09125bc7e9b888 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv2d_adaptive_padding.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +from torch import nn +from torch.nn import functional as F + +from .registry import CONV_LAYERS + + +@CONV_LAYERS.register_module() +class Conv2dAdaptivePadding(nn.Conv2d): + """Implementation of 2D convolution in tensorflow with `padding` as "same", + which applies padding to input (if needed) so that input image gets fully + covered by filter and stride you specified. For stride 1, this will ensure + that output image size is same as input. For stride of 2, output dimensions + will be half, for example. + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of + the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. + Default: 1 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the + output. Default: ``True`` + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True): + super().__init__(in_channels, out_channels, kernel_size, stride, 0, + dilation, groups, bias) + + def forward(self, x): + img_h, img_w = x.size()[-2:] + kernel_h, kernel_w = self.weight.size()[-2:] + stride_h, stride_w = self.stride + output_h = math.ceil(img_h / stride_h) + output_w = math.ceil(img_w / stride_w) + pad_h = ( + max((output_h - 1) * self.stride[0] + + (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0)) + pad_w = ( + max((output_w - 1) * self.stride[1] + + (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0)) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2 + ]) + return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, + self.dilation, self.groups) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv_module.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv_module.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b82b6b35939be7031462d3febb6561e42854ea --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv_module.py @@ -0,0 +1,206 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn + +from custom_mmpkg.custom_mmcv.utils import _BatchNorm, _InstanceNorm +from ..utils import constant_init, kaiming_init +from .activation import build_activation_layer +from .conv import build_conv_layer +from .norm import build_norm_layer +from .padding import build_padding_layer +from .registry import PLUGIN_LAYERS + + +@PLUGIN_LAYERS.register_module() +class ConvModule(nn.Module): + """A conv block that bundles conv/norm/activation layers. + + This block simplifies the usage of convolution layers, which are commonly + used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). + It is based upon three build methods: `build_conv_layer()`, + `build_norm_layer()` and `build_activation_layer()`. + + Besides, we add some additional features in this module. + 1. Automatically set `bias` of the conv layer. + 2. Spectral norm is supported. + 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only + supports zero and circular padding, and we add "reflect" padding mode. + + Args: + in_channels (int): Number of channels in the input feature map. + Same as that in ``nn._ConvNd``. + out_channels (int): Number of channels produced by the convolution. + Same as that in ``nn._ConvNd``. + kernel_size (int | tuple[int]): Size of the convolving kernel. + Same as that in ``nn._ConvNd``. + stride (int | tuple[int]): Stride of the convolution. + Same as that in ``nn._ConvNd``. + padding (int | tuple[int]): Zero-padding added to both sides of + the input. Same as that in ``nn._ConvNd``. + dilation (int | tuple[int]): Spacing between kernel elements. + Same as that in ``nn._ConvNd``. + groups (int): Number of blocked connections from input channels to + output channels. Same as that in ``nn._ConvNd``. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise + False. Default: "auto". + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + inplace (bool): Whether to use inplace mode for activation. + Default: True. + with_spectral_norm (bool): Whether use spectral norm in conv module. + Default: False. + padding_mode (str): If the `padding_mode` has not been supported by + current `Conv2d` in PyTorch, we will use our own padding layer + instead. Currently, we support ['zeros', 'circular'] with official + implementation and ['reflect'] with our own implementation. + Default: 'zeros'. + order (tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Common examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + Default: ('conv', 'norm', 'act'). + """ + + _abbr_ = 'conv_block' + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias='auto', + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + inplace=True, + with_spectral_norm=False, + padding_mode='zeros', + order=('conv', 'norm', 'act')): + super(ConvModule, self).__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + assert act_cfg is None or isinstance(act_cfg, dict) + official_padding_mode = ['zeros', 'circular'] + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.inplace = inplace + self.with_spectral_norm = with_spectral_norm + self.with_explicit_padding = padding_mode not in official_padding_mode + self.order = order + assert isinstance(self.order, tuple) and len(self.order) == 3 + assert set(order) == set(['conv', 'norm', 'act']) + + self.with_norm = norm_cfg is not None + self.with_activation = act_cfg is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = not self.with_norm + self.with_bias = bias + + if self.with_explicit_padding: + pad_cfg = dict(type=padding_mode) + self.padding_layer = build_padding_layer(pad_cfg, padding) + + # reset padding to 0 for conv module + conv_padding = 0 if self.with_explicit_padding else padding + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=conv_padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + if self.with_spectral_norm: + self.conv = nn.utils.spectral_norm(self.conv) + + # build normalization layers + if self.with_norm: + # norm layer is after conv layer + if order.index('norm') > order.index('conv'): + norm_channels = out_channels + else: + norm_channels = in_channels + self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) + self.add_module(self.norm_name, norm) + if self.with_bias: + if isinstance(norm, (_BatchNorm, _InstanceNorm)): + warnings.warn( + 'Unnecessary conv bias before batch/instance norm') + else: + self.norm_name = None + + # build activation layer + if self.with_activation: + act_cfg_ = act_cfg.copy() + # nn.Tanh has no 'inplace' argument + if act_cfg_['type'] not in [ + 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish' + ]: + act_cfg_.setdefault('inplace', inplace) + self.activate = build_activation_layer(act_cfg_) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + if self.norm_name: + return getattr(self, self.norm_name) + else: + return None + + def init_weights(self): + # 1. It is mainly for customized conv layers with their own + # initialization manners by calling their own ``init_weights()``, + # and we do not want ConvModule to override the initialization. + # 2. For customized conv layers without their own initialization + # manners (that is, they don't have their own ``init_weights()``) + # and PyTorch's conv layers, they will be initialized by + # this method with default ``kaiming_init``. + # Note: For PyTorch's conv layers, they will be overwritten by our + # initialization implementation using default ``kaiming_init``. + if not hasattr(self.conv, 'init_weights'): + if self.with_activation and self.act_cfg['type'] == 'LeakyReLU': + nonlinearity = 'leaky_relu' + a = self.act_cfg.get('negative_slope', 0.01) + else: + nonlinearity = 'relu' + a = 0 + kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0) + + def forward(self, x, activate=True, norm=True): + for layer in self.order: + if layer == 'conv': + if self.with_explicit_padding: + x = self.padding_layer(x) + x = self.conv(x) + elif layer == 'norm' and norm and self.with_norm: + x = self.norm(x) + elif layer == 'act' and activate and self.with_activation: + x = self.activate(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv_ws.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..a3941e27874993418b3b5708d5a7485f175ff9c8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/conv_ws.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .registry import CONV_LAYERS + + +def conv_ws_2d(input, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + eps=1e-5): + c_in = weight.size(0) + weight_flat = weight.view(c_in, -1) + mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) + std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) + weight = (weight - mean) / (std + eps) + return F.conv2d(input, weight, bias, stride, padding, dilation, groups) + + +@CONV_LAYERS.register_module('ConvWS') +class ConvWS2d(nn.Conv2d): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + eps=1e-5): + super(ConvWS2d, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.eps = eps + + def forward(self, x): + return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, + self.dilation, self.groups, self.eps) + + +@CONV_LAYERS.register_module(name='ConvAWS') +class ConvAWS2d(nn.Conv2d): + """AWS (Adaptive Weight Standardization) + + This is a variant of Weight Standardization + (https://arxiv.org/pdf/1903.10520.pdf) + It is used in DetectoRS to avoid NaN + (https://arxiv.org/pdf/2006.02334.pdf) + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the conv kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of + the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. + Default: 1 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + bias (bool, optional): If set True, adds a learnable bias to the + output. Default: True + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True): + super().__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.register_buffer('weight_gamma', + torch.ones(self.out_channels, 1, 1, 1)) + self.register_buffer('weight_beta', + torch.zeros(self.out_channels, 1, 1, 1)) + + def _get_weight(self, weight): + weight_flat = weight.view(weight.size(0), -1) + mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1) + std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1) + weight = (weight - mean) / std + weight = self.weight_gamma * weight + self.weight_beta + return weight + + def forward(self, x): + weight = self._get_weight(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, + self.dilation, self.groups) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """Override default load function. + + AWS overrides the function _load_from_state_dict to recover + weight_gamma and weight_beta if they are missing. If weight_gamma and + weight_beta are found in the checkpoint, this function will return + after super()._load_from_state_dict. Otherwise, it will compute the + mean and std of the pretrained weights and store them in weight_beta + and weight_gamma. + """ + + self.weight_gamma.data.fill_(-1) + local_missing_keys = [] + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, local_missing_keys, + unexpected_keys, error_msgs) + if self.weight_gamma.data.mean() > 0: + for k in local_missing_keys: + missing_keys.append(k) + return + weight = self.weight.data + weight_flat = weight.view(weight.size(0), -1) + mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1) + std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1) + self.weight_beta.data.copy_(mean) + self.weight_gamma.data.copy_(std) + missing_gamma_beta = [ + k for k in local_missing_keys + if k.endswith('weight_gamma') or k.endswith('weight_beta') + ] + for k in missing_gamma_beta: + local_missing_keys.remove(k) + for k in local_missing_keys: + missing_keys.append(k) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/depthwise_separable_conv_module.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/depthwise_separable_conv_module.py new file mode 100644 index 0000000000000000000000000000000000000000..722d5d8d71f75486e2db3008907c4eadfca41d63 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/depthwise_separable_conv_module.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from .conv_module import ConvModule + + +class DepthwiseSeparableConvModule(nn.Module): + """Depthwise separable convolution module. + + See https://arxiv.org/pdf/1704.04861.pdf for details. + + This module can replace a ConvModule with the conv block replaced by two + conv block: depthwise conv block and pointwise conv block. The depthwise + conv block contains depthwise-conv/norm/activation layers. The pointwise + conv block contains pointwise-conv/norm/activation layers. It should be + noted that there will be norm/activation layer in the depthwise conv block + if `norm_cfg` and `act_cfg` are specified. + + Args: + in_channels (int): Number of channels in the input feature map. + Same as that in ``nn._ConvNd``. + out_channels (int): Number of channels produced by the convolution. + Same as that in ``nn._ConvNd``. + kernel_size (int | tuple[int]): Size of the convolving kernel. + Same as that in ``nn._ConvNd``. + stride (int | tuple[int]): Stride of the convolution. + Same as that in ``nn._ConvNd``. Default: 1. + padding (int | tuple[int]): Zero-padding added to both sides of + the input. Same as that in ``nn._ConvNd``. Default: 0. + dilation (int | tuple[int]): Spacing between kernel elements. + Same as that in ``nn._ConvNd``. Default: 1. + norm_cfg (dict): Default norm config for both depthwise ConvModule and + pointwise ConvModule. Default: None. + act_cfg (dict): Default activation config for both depthwise ConvModule + and pointwise ConvModule. Default: dict(type='ReLU'). + dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is + 'default', it will be the same as `norm_cfg`. Default: 'default'. + dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is + 'default', it will be the same as `act_cfg`. Default: 'default'. + pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is + 'default', it will be the same as `norm_cfg`. Default: 'default'. + pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is + 'default', it will be the same as `act_cfg`. Default: 'default'. + kwargs (optional): Other shared arguments for depthwise and pointwise + ConvModule. See ConvModule for ref. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dw_norm_cfg='default', + dw_act_cfg='default', + pw_norm_cfg='default', + pw_act_cfg='default', + **kwargs): + super(DepthwiseSeparableConvModule, self).__init__() + assert 'groups' not in kwargs, 'groups should not be specified' + + # if norm/activation config of depthwise/pointwise ConvModule is not + # specified, use default config. + dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg + dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg + pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg + pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg + + # depthwise convolution + self.depthwise_conv = ConvModule( + in_channels, + in_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + norm_cfg=dw_norm_cfg, + act_cfg=dw_act_cfg, + **kwargs) + + self.pointwise_conv = ConvModule( + in_channels, + out_channels, + 1, + norm_cfg=pw_norm_cfg, + act_cfg=pw_act_cfg, + **kwargs) + + def forward(self, x): + x = self.depthwise_conv(x) + x = self.pointwise_conv(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/drop.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/drop.py new file mode 100644 index 0000000000000000000000000000000000000000..90d192e3d3855d432bab5575406a09d5ff1aa94c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/drop.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from custom_mmpkg.custom_mmcv import build_from_cfg +from .registry import DROPOUT_LAYERS + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + We follow the implementation + https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + # handle tensors with different dimensions, not just 4D tensors. + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand( + shape, dtype=x.dtype, device=x.device) + output = x.div(keep_prob) * random_tensor.floor() + return output + + +@DROPOUT_LAYERS.register_module() +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + We follow the implementation + https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 + + Args: + drop_prob (float): Probability of the path to be zeroed. Default: 0.1 + """ + + def __init__(self, drop_prob=0.1): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +@DROPOUT_LAYERS.register_module() +class Dropout(nn.Dropout): + """A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of + ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with + ``DropPath`` + + Args: + drop_prob (float): Probability of the elements to be + zeroed. Default: 0.5. + inplace (bool): Do the operation inplace or not. Default: False. + """ + + def __init__(self, drop_prob=0.5, inplace=False): + super().__init__(p=drop_prob, inplace=inplace) + + +def build_dropout(cfg, default_args=None): + """Builder for drop out layers.""" + return build_from_cfg(cfg, DROPOUT_LAYERS, default_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/generalized_attention.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/generalized_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..988d9adf2f289ef223bd1c680a5ae1d3387f0269 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/generalized_attention.py @@ -0,0 +1,412 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import kaiming_init +from .registry import PLUGIN_LAYERS + + +@PLUGIN_LAYERS.register_module() +class GeneralizedAttention(nn.Module): + """GeneralizedAttention module. + + See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' + (https://arxiv.org/abs/1711.07971) for details. + + Args: + in_channels (int): Channels of the input feature map. + spatial_range (int): The spatial range. -1 indicates no spatial range + constraint. Default: -1. + num_heads (int): The head number of empirical_attention module. + Default: 9. + position_embedding_dim (int): The position embedding dimension. + Default: -1. + position_magnitude (int): A multiplier acting on coord difference. + Default: 1. + kv_stride (int): The feature stride acting on key/value feature map. + Default: 2. + q_stride (int): The feature stride acting on query feature map. + Default: 1. + attention_type (str): A binary indicator string for indicating which + items in generalized empirical_attention module are used. + Default: '1111'. + + - '1000' indicates 'query and key content' (appr - appr) item, + - '0100' indicates 'query content and relative position' + (appr - position) item, + - '0010' indicates 'key content only' (bias - appr) item, + - '0001' indicates 'relative position only' (bias - position) item. + """ + + _abbr_ = 'gen_attention_block' + + def __init__(self, + in_channels, + spatial_range=-1, + num_heads=9, + position_embedding_dim=-1, + position_magnitude=1, + kv_stride=2, + q_stride=1, + attention_type='1111'): + + super(GeneralizedAttention, self).__init__() + + # hard range means local range for non-local operation + self.position_embedding_dim = ( + position_embedding_dim + if position_embedding_dim > 0 else in_channels) + + self.position_magnitude = position_magnitude + self.num_heads = num_heads + self.in_channels = in_channels + self.spatial_range = spatial_range + self.kv_stride = kv_stride + self.q_stride = q_stride + self.attention_type = [bool(int(_)) for _ in attention_type] + self.qk_embed_dim = in_channels // num_heads + out_c = self.qk_embed_dim * num_heads + + if self.attention_type[0] or self.attention_type[1]: + self.query_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_c, + kernel_size=1, + bias=False) + self.query_conv.kaiming_init = True + + if self.attention_type[0] or self.attention_type[2]: + self.key_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_c, + kernel_size=1, + bias=False) + self.key_conv.kaiming_init = True + + self.v_dim = in_channels // num_heads + self.value_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=self.v_dim * num_heads, + kernel_size=1, + bias=False) + self.value_conv.kaiming_init = True + + if self.attention_type[1] or self.attention_type[3]: + self.appr_geom_fc_x = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_x.kaiming_init = True + + self.appr_geom_fc_y = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_y.kaiming_init = True + + if self.attention_type[2]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.appr_bias = nn.Parameter(appr_bias_value) + + if self.attention_type[3]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.geom_bias = nn.Parameter(geom_bias_value) + + self.proj_conv = nn.Conv2d( + in_channels=self.v_dim * num_heads, + out_channels=in_channels, + kernel_size=1, + bias=True) + self.proj_conv.kaiming_init = True + self.gamma = nn.Parameter(torch.zeros(1)) + + if self.spatial_range >= 0: + # only works when non local is after 3*3 conv + if in_channels == 256: + max_len = 84 + elif in_channels == 512: + max_len = 42 + + max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) + local_constraint_map = np.ones( + (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) + for iy in range(max_len): + for ix in range(max_len): + local_constraint_map[ + iy, ix, + max((iy - self.spatial_range) // + self.kv_stride, 0):min((iy + self.spatial_range + + 1) // self.kv_stride + + 1, max_len), + max((ix - self.spatial_range) // + self.kv_stride, 0):min((ix + self.spatial_range + + 1) // self.kv_stride + + 1, max_len)] = 0 + + self.local_constraint_map = nn.Parameter( + torch.from_numpy(local_constraint_map).byte(), + requires_grad=False) + + if self.q_stride > 1: + self.q_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.q_stride) + else: + self.q_downsample = None + + if self.kv_stride > 1: + self.kv_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.kv_stride) + else: + self.kv_downsample = None + + self.init_weights() + + def get_position_embedding(self, + h, + w, + h_kv, + w_kv, + q_stride, + kv_stride, + device, + dtype, + feat_dim, + wave_length=1000): + # the default type of Tensor is float32, leading to type mismatch + # in fp16 mode. Cast it to support fp16 mode. + h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype) + h_idxs = h_idxs.view((h, 1)) * q_stride + + w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype) + w_idxs = w_idxs.view((w, 1)) * q_stride + + h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to( + device=device, dtype=dtype) + h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride + + w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to( + device=device, dtype=dtype) + w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride + + # (h, h_kv, 1) + h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) + h_diff *= self.position_magnitude + + # (w, w_kv, 1) + w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) + w_diff *= self.position_magnitude + + feat_range = torch.arange(0, feat_dim / 4).to( + device=device, dtype=dtype) + + dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype) + dim_mat = dim_mat**((4. / feat_dim) * feat_range) + dim_mat = dim_mat.view((1, 1, -1)) + + embedding_x = torch.cat( + ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) + + embedding_y = torch.cat( + ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) + + return embedding_x, embedding_y + + def forward(self, x_input): + num_heads = self.num_heads + + # use empirical_attention + if self.q_downsample is not None: + x_q = self.q_downsample(x_input) + else: + x_q = x_input + n, _, h, w = x_q.shape + + if self.kv_downsample is not None: + x_kv = self.kv_downsample(x_input) + else: + x_kv = x_input + _, _, h_kv, w_kv = x_kv.shape + + if self.attention_type[0] or self.attention_type[1]: + proj_query = self.query_conv(x_q).view( + (n, num_heads, self.qk_embed_dim, h * w)) + proj_query = proj_query.permute(0, 1, 3, 2) + + if self.attention_type[0] or self.attention_type[2]: + proj_key = self.key_conv(x_kv).view( + (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) + + if self.attention_type[1] or self.attention_type[3]: + position_embed_x, position_embed_y = self.get_position_embedding( + h, w, h_kv, w_kv, self.q_stride, self.kv_stride, + x_input.device, x_input.dtype, self.position_embedding_dim) + # (n, num_heads, w, w_kv, dim) + position_feat_x = self.appr_geom_fc_x(position_embed_x).\ + view(1, w, w_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + # (n, num_heads, h, h_kv, dim) + position_feat_y = self.appr_geom_fc_y(position_embed_y).\ + view(1, h, h_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + position_feat_x /= math.sqrt(2) + position_feat_y /= math.sqrt(2) + + # accelerate for saliency only + if (np.sum(self.attention_type) == 1) and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy = torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, h_kv * w_kv) + + h = 1 + w = 1 + else: + # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for + if not self.attention_type[0]: + energy = torch.zeros( + n, + num_heads, + h, + w, + h_kv, + w_kv, + dtype=x_input.dtype, + device=x_input.device) + + # attention_type[0]: appr - appr + # attention_type[1]: appr - position + # attention_type[2]: bias - appr + # attention_type[3]: bias - position + if self.attention_type[0] or self.attention_type[2]: + if self.attention_type[0] and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + energy = torch.matmul(proj_query + appr_bias, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[0]: + energy = torch.matmul(proj_query, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy += torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, 1, h_kv, w_kv) + + if self.attention_type[1] or self.attention_type[3]: + if self.attention_type[1] and self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + + proj_query_reshape = (proj_query + geom_bias).\ + view(n, num_heads, h, w, self.qk_embed_dim) + + energy_x = torch.matmul( + proj_query_reshape.permute(0, 1, 3, 2, 4), + position_feat_x.permute(0, 1, 2, 4, 3)) + energy_x = energy_x.\ + permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul( + proj_query_reshape, + position_feat_y.permute(0, 1, 2, 4, 3)) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[1]: + proj_query_reshape = proj_query.\ + view(n, num_heads, h, w, self.qk_embed_dim) + proj_query_reshape = proj_query_reshape.\ + permute(0, 1, 3, 2, 4) + position_feat_x_reshape = position_feat_x.\ + permute(0, 1, 2, 4, 3) + position_feat_y_reshape = position_feat_y.\ + permute(0, 1, 2, 4, 3) + + energy_x = torch.matmul(proj_query_reshape, + position_feat_x_reshape) + energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul(proj_query_reshape, + position_feat_y_reshape) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, self.qk_embed_dim, 1).\ + repeat(n, 1, 1, 1) + + position_feat_x_reshape = position_feat_x.\ + view(n, num_heads, w*w_kv, self.qk_embed_dim) + + position_feat_y_reshape = position_feat_y.\ + view(n, num_heads, h * h_kv, self.qk_embed_dim) + + energy_x = torch.matmul(position_feat_x_reshape, geom_bias) + energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) + + energy_y = torch.matmul(position_feat_y_reshape, geom_bias) + energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) + + energy += energy_x + energy_y + + energy = energy.view(n, num_heads, h * w, h_kv * w_kv) + + if self.spatial_range >= 0: + cur_local_constraint_map = \ + self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ + contiguous().\ + view(1, 1, h*w, h_kv*w_kv) + + energy = energy.masked_fill_(cur_local_constraint_map, + float('-inf')) + + attention = F.softmax(energy, 3) + + proj_value = self.value_conv(x_kv) + proj_value_reshape = proj_value.\ + view((n, num_heads, self.v_dim, h_kv * w_kv)).\ + permute(0, 1, 3, 2) + + out = torch.matmul(attention, proj_value_reshape).\ + permute(0, 1, 3, 2).\ + contiguous().\ + view(n, self.v_dim * self.num_heads, h, w) + + out = self.proj_conv(out) + + # output is downsampled, upsample back to input size + if self.q_downsample is not None: + out = F.interpolate( + out, + size=x_input.shape[2:], + mode='bilinear', + align_corners=False) + + out = self.gamma * out + x_input + return out + + def init_weights(self): + for m in self.modules(): + if hasattr(m, 'kaiming_init') and m.kaiming_init: + kaiming_init( + m, + mode='fan_in', + nonlinearity='leaky_relu', + bias=0, + distribution='uniform', + a=1) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/hsigmoid.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/hsigmoid.py new file mode 100644 index 0000000000000000000000000000000000000000..30b1a3d6580cf0360710426fbea1f05acdf07b4b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/hsigmoid.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from .registry import ACTIVATION_LAYERS + + +@ACTIVATION_LAYERS.register_module() +class HSigmoid(nn.Module): + """Hard Sigmoid Module. Apply the hard sigmoid function: + Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value) + Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1) + + Args: + bias (float): Bias of the input feature map. Default: 1.0. + divisor (float): Divisor of the input feature map. Default: 2.0. + min_value (float): Lower bound value. Default: 0.0. + max_value (float): Upper bound value. Default: 1.0. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0): + super(HSigmoid, self).__init__() + self.bias = bias + self.divisor = divisor + assert self.divisor != 0 + self.min_value = min_value + self.max_value = max_value + + def forward(self, x): + x = (x + self.bias) / self.divisor + + return x.clamp_(self.min_value, self.max_value) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/hswish.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/hswish.py new file mode 100644 index 0000000000000000000000000000000000000000..7e0c090ff037c99ee6c5c84c4592e87beae02208 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/hswish.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from .registry import ACTIVATION_LAYERS + + +@ACTIVATION_LAYERS.register_module() +class HSwish(nn.Module): + """Hard Swish Module. + + This module applies the hard swish function: + + .. math:: + Hswish(x) = x * ReLU6(x + 3) / 6 + + Args: + inplace (bool): can optionally do the operation in-place. + Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, inplace=False): + super(HSwish, self).__init__() + self.act = nn.ReLU6(inplace) + + def forward(self, x): + return x * self.act(x + 3) / 6 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/non_local.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/non_local.py new file mode 100644 index 0000000000000000000000000000000000000000..92d00155ef275c1201ea66bba30470a1785cc5d7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/non_local.py @@ -0,0 +1,306 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta + +import torch +import torch.nn as nn + +from ..utils import constant_init, normal_init +from .conv_module import ConvModule +from .registry import PLUGIN_LAYERS + + +class _NonLocalNd(nn.Module, metaclass=ABCMeta): + """Basic Non-local module. + + This module is proposed in + "Non-local Neural Networks" + Paper reference: https://arxiv.org/abs/1711.07971 + Code reference: https://github.com/AlexHex7/Non-local_pytorch + + Args: + in_channels (int): Channels of the input feature map. + reduction (int): Channel reduction ratio. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`. + Default: True. + conv_cfg (None | dict): The config dict for convolution layers. + If not specified, it will use `nn.Conv2d` for convolution layers. + Default: None. + norm_cfg (None | dict): The config dict for normalization layers. + Default: None. (This parameter is only applicable to conv_out.) + mode (str): Options are `gaussian`, `concatenation`, + `embedded_gaussian` and `dot_product`. Default: embedded_gaussian. + """ + + def __init__(self, + in_channels, + reduction=2, + use_scale=True, + conv_cfg=None, + norm_cfg=None, + mode='embedded_gaussian', + **kwargs): + super(_NonLocalNd, self).__init__() + self.in_channels = in_channels + self.reduction = reduction + self.use_scale = use_scale + self.inter_channels = max(in_channels // reduction, 1) + self.mode = mode + + if mode not in [ + 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation' + ]: + raise ValueError("Mode should be in 'gaussian', 'concatenation', " + f"'embedded_gaussian' or 'dot_product', but got " + f'{mode} instead.') + + # g, theta, phi are defaulted as `nn.ConvNd`. + # Here we use ConvModule for potential usage. + self.g = ConvModule( + self.in_channels, + self.inter_channels, + kernel_size=1, + conv_cfg=conv_cfg, + act_cfg=None) + self.conv_out = ConvModule( + self.inter_channels, + self.in_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + if self.mode != 'gaussian': + self.theta = ConvModule( + self.in_channels, + self.inter_channels, + kernel_size=1, + conv_cfg=conv_cfg, + act_cfg=None) + self.phi = ConvModule( + self.in_channels, + self.inter_channels, + kernel_size=1, + conv_cfg=conv_cfg, + act_cfg=None) + + if self.mode == 'concatenation': + self.concat_project = ConvModule( + self.inter_channels * 2, + 1, + kernel_size=1, + stride=1, + padding=0, + bias=False, + act_cfg=dict(type='ReLU')) + + self.init_weights(**kwargs) + + def init_weights(self, std=0.01, zeros_init=True): + if self.mode != 'gaussian': + for m in [self.g, self.theta, self.phi]: + normal_init(m.conv, std=std) + else: + normal_init(self.g.conv, std=std) + if zeros_init: + if self.conv_out.norm_cfg is None: + constant_init(self.conv_out.conv, 0) + else: + constant_init(self.conv_out.norm, 0) + else: + if self.conv_out.norm_cfg is None: + normal_init(self.conv_out.conv, std=std) + else: + normal_init(self.conv_out.norm, std=std) + + def gaussian(self, theta_x, phi_x): + # NonLocal1d pairwise_weight: [N, H, H] + # NonLocal2d pairwise_weight: [N, HxW, HxW] + # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def embedded_gaussian(self, theta_x, phi_x): + # NonLocal1d pairwise_weight: [N, H, H] + # NonLocal2d pairwise_weight: [N, HxW, HxW] + # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + if self.use_scale: + # theta_x.shape[-1] is `self.inter_channels` + pairwise_weight /= theta_x.shape[-1]**0.5 + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def dot_product(self, theta_x, phi_x): + # NonLocal1d pairwise_weight: [N, H, H] + # NonLocal2d pairwise_weight: [N, HxW, HxW] + # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + pairwise_weight /= pairwise_weight.shape[-1] + return pairwise_weight + + def concatenation(self, theta_x, phi_x): + # NonLocal1d pairwise_weight: [N, H, H] + # NonLocal2d pairwise_weight: [N, HxW, HxW] + # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] + h = theta_x.size(2) + w = phi_x.size(3) + theta_x = theta_x.repeat(1, 1, 1, w) + phi_x = phi_x.repeat(1, 1, h, 1) + + concat_feature = torch.cat([theta_x, phi_x], dim=1) + pairwise_weight = self.concat_project(concat_feature) + n, _, h, w = pairwise_weight.size() + pairwise_weight = pairwise_weight.view(n, h, w) + pairwise_weight /= pairwise_weight.shape[-1] + + return pairwise_weight + + def forward(self, x): + # Assume `reduction = 1`, then `inter_channels = C` + # or `inter_channels = C` when `mode="gaussian"` + + # NonLocal1d x: [N, C, H] + # NonLocal2d x: [N, C, H, W] + # NonLocal3d x: [N, C, T, H, W] + n = x.size(0) + + # NonLocal1d g_x: [N, H, C] + # NonLocal2d g_x: [N, HxW, C] + # NonLocal3d g_x: [N, TxHxW, C] + g_x = self.g(x).view(n, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + # NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H] + # NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW] + # NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW] + if self.mode == 'gaussian': + theta_x = x.view(n, self.in_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + if self.sub_sample: + phi_x = self.phi(x).view(n, self.in_channels, -1) + else: + phi_x = x.view(n, self.in_channels, -1) + elif self.mode == 'concatenation': + theta_x = self.theta(x).view(n, self.inter_channels, -1, 1) + phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) + else: + theta_x = self.theta(x).view(n, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + phi_x = self.phi(x).view(n, self.inter_channels, -1) + + pairwise_func = getattr(self, self.mode) + # NonLocal1d pairwise_weight: [N, H, H] + # NonLocal2d pairwise_weight: [N, HxW, HxW] + # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] + pairwise_weight = pairwise_func(theta_x, phi_x) + + # NonLocal1d y: [N, H, C] + # NonLocal2d y: [N, HxW, C] + # NonLocal3d y: [N, TxHxW, C] + y = torch.matmul(pairwise_weight, g_x) + # NonLocal1d y: [N, C, H] + # NonLocal2d y: [N, C, H, W] + # NonLocal3d y: [N, C, T, H, W] + y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, + *x.size()[2:]) + + output = x + self.conv_out(y) + + return output + + +class NonLocal1d(_NonLocalNd): + """1D Non-local module. + + Args: + in_channels (int): Same as `NonLocalND`. + sub_sample (bool): Whether to apply max pooling after pairwise + function (Note that the `sub_sample` is applied on spatial only). + Default: False. + conv_cfg (None | dict): Same as `NonLocalND`. + Default: dict(type='Conv1d'). + """ + + def __init__(self, + in_channels, + sub_sample=False, + conv_cfg=dict(type='Conv1d'), + **kwargs): + super(NonLocal1d, self).__init__( + in_channels, conv_cfg=conv_cfg, **kwargs) + + self.sub_sample = sub_sample + + if sub_sample: + max_pool_layer = nn.MaxPool1d(kernel_size=2) + self.g = nn.Sequential(self.g, max_pool_layer) + if self.mode != 'gaussian': + self.phi = nn.Sequential(self.phi, max_pool_layer) + else: + self.phi = max_pool_layer + + +@PLUGIN_LAYERS.register_module() +class NonLocal2d(_NonLocalNd): + """2D Non-local module. + + Args: + in_channels (int): Same as `NonLocalND`. + sub_sample (bool): Whether to apply max pooling after pairwise + function (Note that the `sub_sample` is applied on spatial only). + Default: False. + conv_cfg (None | dict): Same as `NonLocalND`. + Default: dict(type='Conv2d'). + """ + + _abbr_ = 'nonlocal_block' + + def __init__(self, + in_channels, + sub_sample=False, + conv_cfg=dict(type='Conv2d'), + **kwargs): + super(NonLocal2d, self).__init__( + in_channels, conv_cfg=conv_cfg, **kwargs) + + self.sub_sample = sub_sample + + if sub_sample: + max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) + self.g = nn.Sequential(self.g, max_pool_layer) + if self.mode != 'gaussian': + self.phi = nn.Sequential(self.phi, max_pool_layer) + else: + self.phi = max_pool_layer + + +class NonLocal3d(_NonLocalNd): + """3D Non-local module. + + Args: + in_channels (int): Same as `NonLocalND`. + sub_sample (bool): Whether to apply max pooling after pairwise + function (Note that the `sub_sample` is applied on spatial only). + Default: False. + conv_cfg (None | dict): Same as `NonLocalND`. + Default: dict(type='Conv3d'). + """ + + def __init__(self, + in_channels, + sub_sample=False, + conv_cfg=dict(type='Conv3d'), + **kwargs): + super(NonLocal3d, self).__init__( + in_channels, conv_cfg=conv_cfg, **kwargs) + self.sub_sample = sub_sample + + if sub_sample: + max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) + self.g = nn.Sequential(self.g, max_pool_layer) + if self.mode != 'gaussian': + self.phi = nn.Sequential(self.phi, max_pool_layer) + else: + self.phi = max_pool_layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/norm.py new file mode 100644 index 0000000000000000000000000000000000000000..da7a4d5d1ec957e885c48afb2dac772b6f792fd2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/norm.py @@ -0,0 +1,144 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect + +import torch.nn as nn + +from custom_mmpkg.custom_mmcv.utils import is_tuple_of +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm +from .registry import NORM_LAYERS + +NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d) +NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d) +NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d) +NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d) +NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm) +NORM_LAYERS.register_module('GN', module=nn.GroupNorm) +NORM_LAYERS.register_module('LN', module=nn.LayerNorm) +NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d) +NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d) +NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d) +NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d) + + +def infer_abbr(class_type): + """Infer abbreviation from the class name. + + When we build a norm layer with `build_norm_layer()`, we want to preserve + the norm type in variable names, e.g, self.bn1, self.gn. This method will + infer the abbreviation to map class types to abbreviations. + + Rule 1: If the class has the property "_abbr_", return the property. + Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or + InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and + "in" respectively. + Rule 3: If the class name contains "batch", "group", "layer" or "instance", + the abbreviation of this layer will be "bn", "gn", "ln" and "in" + respectively. + Rule 4: Otherwise, the abbreviation falls back to "norm". + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ + if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN + return 'in' + elif issubclass(class_type, _BatchNorm): + return 'bn' + elif issubclass(class_type, nn.GroupNorm): + return 'gn' + elif issubclass(class_type, nn.LayerNorm): + return 'ln' + else: + class_name = class_type.__name__.lower() + if 'batch' in class_name: + return 'bn' + elif 'group' in class_name: + return 'gn' + elif 'layer' in class_name: + return 'ln' + elif 'instance' in class_name: + return 'in' + else: + return 'norm_layer' + + +def build_norm_layer(cfg, num_features, postfix=''): + """Build normalization layer. + + Args: + cfg (dict): The norm layer config, which should contain: + + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + num_features (int): Number of input channels. + postfix (int | str): The postfix to be appended into norm abbreviation + to create named layer. + + Returns: + (str, nn.Module): The first element is the layer name consisting of + abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in NORM_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + + norm_layer = NORM_LAYERS.get(layer_type) + abbr = infer_abbr(norm_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer + + +def is_norm(layer, exclude=None): + """Check if a layer is a normalization layer. + + Args: + layer (nn.Module): The layer to be checked. + exclude (type | tuple[type]): Types to be excluded. + + Returns: + bool: Whether the layer is a norm layer. + """ + if exclude is not None: + if not isinstance(exclude, tuple): + exclude = (exclude, ) + if not is_tuple_of(exclude, type): + raise TypeError( + f'"exclude" must be either None or type or a tuple of types, ' + f'but got {type(exclude)}: {exclude}') + + if exclude and isinstance(layer, exclude): + return False + + all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) + return isinstance(layer, all_norm_bases) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/padding.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/padding.py new file mode 100644 index 0000000000000000000000000000000000000000..e4ac6b28a1789bd551c613a7d3e7b622433ac7ec --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/padding.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from .registry import PADDING_LAYERS + +PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d) +PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d) +PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d) + + +def build_padding_layer(cfg, *args, **kwargs): + """Build padding layer. + + Args: + cfg (None or dict): The padding layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate a padding layer. + + Returns: + nn.Module: Created padding layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + + cfg_ = cfg.copy() + padding_type = cfg_.pop('type') + if padding_type not in PADDING_LAYERS: + raise KeyError(f'Unrecognized padding type {padding_type}.') + else: + padding_layer = PADDING_LAYERS.get(padding_type) + + layer = padding_layer(*args, **kwargs, **cfg_) + + return layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/plugin.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..07c010d4053174dd41107aa654ea67e82b46a25c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/plugin.py @@ -0,0 +1,88 @@ +import inspect +import platform + +from .registry import PLUGIN_LAYERS + +if platform.system() == 'Windows': + import regex as re +else: + import re + + +def infer_abbr(class_type): + """Infer abbreviation from the class name. + + This method will infer the abbreviation to map class types to + abbreviations. + + Rule 1: If the class has the property "abbr", return the property. + Rule 2: Otherwise, the abbreviation falls back to snake case of class + name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + + def camel2snack(word): + """Convert camel case word into snack case. + + Modified from `inflection lib + `_. + + Example:: + + >>> camel2snack("FancyBlock") + 'fancy_block' + """ + + word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) + word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) + word = word.replace('-', '_') + return word.lower() + + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ + else: + return camel2snack(class_type.__name__) + + +def build_plugin_layer(cfg, postfix='', **kwargs): + """Build plugin layer. + + Args: + cfg (None or dict): cfg should contain: + type (str): identify plugin layer type. + layer args: args needed to instantiate a plugin layer. + postfix (int, str): appended into norm abbreviation to + create named layer. Default: ''. + + Returns: + tuple[str, nn.Module]: + name (str): abbreviation + postfix + layer (nn.Module): created plugin layer + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in PLUGIN_LAYERS: + raise KeyError(f'Unrecognized plugin type {layer_type}') + + plugin_layer = PLUGIN_LAYERS.get(layer_type) + abbr = infer_abbr(plugin_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + layer = plugin_layer(**kwargs, **cfg_) + + return name, layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/registry.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..496c18796f08a9de159b489fbef278ded22749d8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/registry.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from custom_mmpkg.custom_mmcv.utils import Registry + +CONV_LAYERS = Registry('conv layer') +NORM_LAYERS = Registry('norm layer') +ACTIVATION_LAYERS = Registry('activation layer') +PADDING_LAYERS = Registry('padding layer') +UPSAMPLE_LAYERS = Registry('upsample layer') +PLUGIN_LAYERS = Registry('plugin layer') + +DROPOUT_LAYERS = Registry('drop out layers') +POSITIONAL_ENCODING = Registry('position encoding') +ATTENTION = Registry('attention') +FEEDFORWARD_NETWORK = Registry('feed-forward Network') +TRANSFORMER_LAYER = Registry('transformerLayer') +TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/scale.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/scale.py new file mode 100644 index 0000000000000000000000000000000000000000..c905fffcc8bf998d18d94f927591963c428025e2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/scale.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + + +class Scale(nn.Module): + """A learnable scale parameter. + + This layer scales the input by a learnable factor. It multiplies a + learnable scale parameter of shape (1,) with input of any shape. + + Args: + scale (float): Initial value of scale factor. Default: 1.0 + """ + + def __init__(self, scale=1.0): + super(Scale, self).__init__() + self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) + + def forward(self, x): + return x * self.scale diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/swish.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/swish.py new file mode 100644 index 0000000000000000000000000000000000000000..e2ca8ed7b749413f011ae54aac0cab27e6f0b51f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/swish.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from .registry import ACTIVATION_LAYERS + + +@ACTIVATION_LAYERS.register_module() +class Swish(nn.Module): + """Swish Module. + + This module applies the swish function: + + .. math:: + Swish(x) = x * Sigmoid(x) + + Returns: + Tensor: The output tensor. + """ + + def __init__(self): + super(Swish, self).__init__() + + def forward(self, x): + return x * torch.sigmoid(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/transformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..d4cd4655d30aef5cecb65522bc6b854fb60eca8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/transformer.py @@ -0,0 +1,595 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +import torch +import torch.nn as nn + +from custom_mmpkg.custom_mmcv import ConfigDict, deprecated_api_warning +from custom_mmpkg.custom_mmcv.cnn import Linear, build_activation_layer, build_norm_layer +from custom_mmpkg.custom_mmcv.runner.base_module import BaseModule, ModuleList, Sequential +from custom_mmpkg.custom_mmcv.utils import build_from_cfg +from .drop import build_dropout +from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING, + TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) + +# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file +try: + from custom_mmpkg.custom_mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401 + warnings.warn( + ImportWarning( + '``MultiScaleDeformableAttention`` has been moved to ' + '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501 + '``from custom_mmpkg.custom_mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501 + 'to ``from custom_mmpkg.custom_mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501 + )) + +except ImportError: + warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from ' + '``mmcv.ops.multi_scale_deform_attn``, ' + 'You should install ``mmcv-full`` if you need this module. ') + + +def build_positional_encoding(cfg, default_args=None): + """Builder for Position Encoding.""" + return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) + + +def build_attention(cfg, default_args=None): + """Builder for attention.""" + return build_from_cfg(cfg, ATTENTION, default_args) + + +def build_feedforward_network(cfg, default_args=None): + """Builder for feed-forward network (FFN).""" + return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args) + + +def build_transformer_layer(cfg, default_args=None): + """Builder for transformer layer.""" + return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args) + + +def build_transformer_layer_sequence(cfg, default_args=None): + """Builder for transformer encoder and transformer decoder.""" + return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args) + + +@ATTENTION.register_module() +class MultiheadAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + batch_first=False, + **kwargs): + super(MultiheadAttention, self).__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn('The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ') + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = batch_first + + self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, + **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + out = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + +@FEEDFORWARD_NETWORK.register_module() +class FFN(BaseModule): + """Implements feed-forward networks (FFNs) with identity connection. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Default: 2. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + add_identity (bool, optional): Whether to add the + identity connection. Default: `True`. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + @deprecated_api_warning( + { + 'dropout': 'ffn_drop', + 'add_residual': 'add_identity' + }, + cls_name='FFN') + def __init__(self, + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0., + dropout_layer=None, + add_identity=True, + init_cfg=None, + **kwargs): + super(FFN, self).__init__(init_cfg) + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + self.activate = build_activation_layer(act_cfg) + + layers = [] + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + Sequential( + Linear(in_channels, feedforward_channels), self.activate, + nn.Dropout(ffn_drop))) + in_channels = feedforward_channels + layers.append(Linear(feedforward_channels, embed_dims)) + layers.append(nn.Dropout(ffn_drop)) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + self.add_identity = add_identity + + @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') + def forward(self, x, identity=None): + """Forward function for `FFN`. + + The function would add x to the output tensor if residue is None. + """ + out = self.layers(x) + if not self.add_identity: + return self.dropout_layer(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +@TRANSFORMER_LAYER.register_module() +class BaseTransformerLayer(BaseModule): + """Base `TransformerLayer` for vision transformer. + + It can be built from `mmcv.ConfigDict` and support more flexible + customization, for example, using any number of `FFN or LN ` and + use different kinds of `attention` by specifying a list of `ConfigDict` + named `attn_cfgs`. It is worth mentioning that it supports `prenorm` + when you specifying `norm` as the first element of `operation_order`. + More details about the `prenorm`: `On Layer Normalization in the + Transformer Architecture `_ . + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for `self_attention` or `cross_attention` modules, + The order of the configs in the list should be consistent with + corresponding attentions in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. Default: None. + ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for FFN, The order of the configs in the list should be + consistent with corresponding ffn in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Support `prenorm` when you specifying first element as `norm`. + Default:None. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape + of (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + """ + + def __init__(self, + attn_cfgs=None, + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=None, + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=False, + **kwargs): + + deprecated_args = dict( + feedforward_channels='feedforward_channels', + ffn_dropout='ffn_drop', + ffn_num_fcs='num_fcs') + for ori_name, new_name in deprecated_args.items(): + if ori_name in kwargs: + warnings.warn( + f'The arguments `{ori_name}` in BaseTransformerLayer ' + f'has been deprecated, now you should set `{new_name}` ' + f'and other FFN related arguments ' + f'to a dict named `ffn_cfgs`. ') + ffn_cfgs[new_name] = kwargs[ori_name] + + super(BaseTransformerLayer, self).__init__(init_cfg) + + self.batch_first = batch_first + + assert set(operation_order) & set( + ['self_attn', 'norm', 'ffn', 'cross_attn']) == \ + set(operation_order), f'The operation_order of' \ + f' {self.__class__.__name__} should ' \ + f'contains all four operation type ' \ + f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" + + num_attn = operation_order.count('self_attn') + operation_order.count( + 'cross_attn') + if isinstance(attn_cfgs, dict): + attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] + else: + assert num_attn == len(attn_cfgs), f'The length ' \ + f'of attn_cfg {num_attn} is ' \ + f'not consistent with the number of attention' \ + f'in operation_order {operation_order}.' + + self.num_attn = num_attn + self.operation_order = operation_order + self.norm_cfg = norm_cfg + self.pre_norm = operation_order[0] == 'norm' + self.attentions = ModuleList() + + index = 0 + for operation_name in operation_order: + if operation_name in ['self_attn', 'cross_attn']: + if 'batch_first' in attn_cfgs[index]: + assert self.batch_first == attn_cfgs[index]['batch_first'] + else: + attn_cfgs[index]['batch_first'] = self.batch_first + attention = build_attention(attn_cfgs[index]) + # Some custom attentions used as `self_attn` + # or `cross_attn` can have different behavior. + attention.operation_name = operation_name + self.attentions.append(attention) + index += 1 + + self.embed_dims = self.attentions[0].embed_dims + + self.ffns = ModuleList() + num_ffns = operation_order.count('ffn') + if isinstance(ffn_cfgs, dict): + ffn_cfgs = ConfigDict(ffn_cfgs) + if isinstance(ffn_cfgs, dict): + ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] + assert len(ffn_cfgs) == num_ffns + for ffn_index in range(num_ffns): + if 'embed_dims' not in ffn_cfgs[ffn_index]: + ffn_cfgs['embed_dims'] = self.embed_dims + else: + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + self.ffns.append( + build_feedforward_network(ffn_cfgs[ffn_index], + dict(type='FFN'))) + + self.norms = ModuleList() + num_norms = operation_order.count('norm') + for _ in range(num_norms): + self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + + **kwargs contains some specific arguments of attentions. + + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + if layer == 'self_attn': + temp_key = temp_value = query + query = self.attentions[attn_index]( + query, + temp_key, + temp_value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class TransformerLayerSequence(BaseModule): + """Base class for TransformerEncoder and TransformerDecoder in vision + transformer. + + As base-class of Encoder and Decoder in vision transformer. + Support customization such as specifying different kind + of `transformer_layer` in `transformer_coder`. + + Args: + transformerlayer (list[obj:`mmcv.ConfigDict`] | + obj:`mmcv.ConfigDict`): Config of transformerlayer + in TransformerCoder. If it is obj:`mmcv.ConfigDict`, + it would be repeated `num_layer` times to a + list[`mmcv.ConfigDict`]. Default: None. + num_layers (int): The number of `TransformerLayer`. Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): + super(TransformerLayerSequence, self).__init__(init_cfg) + if isinstance(transformerlayers, dict): + transformerlayers = [ + copy.deepcopy(transformerlayers) for _ in range(num_layers) + ] + else: + assert isinstance(transformerlayers, list) and \ + len(transformerlayers) == num_layers + self.num_layers = num_layers + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append(build_transformer_layer(transformerlayers[i])) + self.embed_dims = self.layers[0].embed_dims + self.pre_norm = self.layers[0].pre_norm + + def forward(self, + query, + key, + value, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerCoder`. + + Args: + query (Tensor): Input query with shape + `(num_queries, bs, embed_dims)`. + key (Tensor): The key tensor with shape + `(num_keys, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_keys, bs, embed_dims)`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor], optional): Each element is 2D Tensor + which is used in calculation of corresponding attention in + operation_order. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in self-attention + Default: None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: results with shape [num_queries, bs, embed_dims]. + """ + for layer in self.layers: + query = layer( + query, + key, + value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + **kwargs) + return query diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/upsample.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/upsample.py new file mode 100644 index 0000000000000000000000000000000000000000..a1a353767d0ce8518f0d7289bed10dba0178ed12 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/upsample.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..utils import xavier_init +from .registry import UPSAMPLE_LAYERS + +UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample) +UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample) + + +@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle') +class PixelShufflePack(nn.Module): + """Pixel Shuffle upsample layer. + + This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to + achieve a simple upsampling with pixel shuffle. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + scale_factor (int): Upsample ratio. + upsample_kernel (int): Kernel size of the conv layer to expand the + channels. + """ + + def __init__(self, in_channels, out_channels, scale_factor, + upsample_kernel): + super(PixelShufflePack, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.scale_factor = scale_factor + self.upsample_kernel = upsample_kernel + self.upsample_conv = nn.Conv2d( + self.in_channels, + self.out_channels * scale_factor * scale_factor, + self.upsample_kernel, + padding=(self.upsample_kernel - 1) // 2) + self.init_weights() + + def init_weights(self): + xavier_init(self.upsample_conv, distribution='uniform') + + def forward(self, x): + x = self.upsample_conv(x) + x = F.pixel_shuffle(x, self.scale_factor) + return x + + +def build_upsample_layer(cfg, *args, **kwargs): + """Build upsample layer. + + Args: + cfg (dict): The upsample layer config, which should contain: + + - type (str): Layer type. + - scale_factor (int): Upsample ratio, which is not applicable to + deconv. + - layer args: Args needed to instantiate a upsample layer. + args (argument list): Arguments passed to the ``__init__`` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the + ``__init__`` method of the corresponding conv layer. + + Returns: + nn.Module: Created upsample layer. + """ + if not isinstance(cfg, dict): + raise TypeError(f'cfg must be a dict, but got {type(cfg)}') + if 'type' not in cfg: + raise KeyError( + f'the cfg dict must contain the key "type", but got {cfg}') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in UPSAMPLE_LAYERS: + raise KeyError(f'Unrecognized upsample type {layer_type}') + else: + upsample = UPSAMPLE_LAYERS.get(layer_type) + + if upsample is nn.Upsample: + cfg_['mode'] = layer_type + layer = upsample(*args, **kwargs, **cfg_) + return layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/wrappers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..8aebf67bf52355a513f21756ee74fe510902d075 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/bricks/wrappers.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501 + +Wrap some nn modules to support empty tensor input. Currently, these wrappers +are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask +heads are trained on only positive RoIs. +""" +import math + +import torch +import torch.nn as nn +from torch.nn.modules.utils import _pair, _triple + +from .registry import CONV_LAYERS, UPSAMPLE_LAYERS + +if torch.__version__ == 'parrots': + TORCH_VERSION = torch.__version__ +else: + # torch.__version__ could be 1.3.1+cu92, we only need the first two + # for comparison + TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) + + +def obsolete_torch_version(torch_version, version_threshold): + return torch_version == 'parrots' or torch_version <= version_threshold + + +class NewEmptyTensorOp(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return NewEmptyTensorOp.apply(grad, shape), None + + +@CONV_LAYERS.register_module('Conv', force=True) +class Conv2d(nn.Conv2d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size, + self.padding, self.stride, self.dilation): + o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +@CONV_LAYERS.register_module('Conv3d', force=True) +class Conv3d(nn.Conv3d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size, + self.padding, self.stride, self.dilation): + o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +@CONV_LAYERS.register_module() +@CONV_LAYERS.register_module('deconv') +@UPSAMPLE_LAYERS.register_module('deconv', force=True) +class ConvTranspose2d(nn.ConvTranspose2d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size, + self.padding, self.stride, + self.dilation, self.output_padding): + out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +@CONV_LAYERS.register_module() +@CONV_LAYERS.register_module('deconv3d') +@UPSAMPLE_LAYERS.register_module('deconv3d', force=True) +class ConvTranspose3d(nn.ConvTranspose3d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size, + self.padding, self.stride, + self.dilation, self.output_padding): + out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +class MaxPool2d(nn.MaxPool2d): + + def forward(self, x): + # PyTorch 1.9 does not support empty tensor inference yet + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): + out_shape = list(x.shape[:2]) + for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size), + _pair(self.padding), _pair(self.stride), + _pair(self.dilation)): + o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 + o = math.ceil(o) if self.ceil_mode else math.floor(o) + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + return empty + + return super().forward(x) + + +class MaxPool3d(nn.MaxPool3d): + + def forward(self, x): + # PyTorch 1.9 does not support empty tensor inference yet + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): + out_shape = list(x.shape[:2]) + for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size), + _triple(self.padding), + _triple(self.stride), + _triple(self.dilation)): + o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 + o = math.ceil(o) if self.ceil_mode else math.floor(o) + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + return empty + + return super().forward(x) + + +class Linear(torch.nn.Linear): + + def forward(self, x): + # empty tensor forward of Linear layer is supported in Pytorch 1.6 + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)): + out_shape = [x.shape[0], self.out_features] + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..7567316c566bd3aca6d8f65a84b00e9e890948a7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/builder.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..runner import Sequential +from ..utils import Registry, build_from_cfg + + +def build_model_from_cfg(cfg, registry, default_args=None): + """Build a PyTorch model from config dict(s). Different from + ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built. + + Args: + cfg (dict, list[dict]): The config of modules, is is either a config + dict or a list of config dicts. If cfg is a list, a + the built modules will be wrapped with ``nn.Sequential``. + registry (:obj:`Registry`): A registry the module belongs to. + default_args (dict, optional): Default arguments to build the module. + Defaults to None. + + Returns: + nn.Module: A built nn module. + """ + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return Sequential(*modules) + else: + return build_from_cfg(cfg, registry, default_args) + + +MODELS = Registry('model', build_func=build_model_from_cfg) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/resnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb3ac057ee2d52c46fc94685b5d4e698aad8d5f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/resnet.py @@ -0,0 +1,316 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +import torch.nn as nn +import torch.utils.checkpoint as cp + +from .utils import constant_init, kaiming_init + + +def conv3x3(in_planes, out_planes, stride=1, dilation=1): + """3x3 convolution with padding.""" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False): + super(BasicBlock, self).__init__() + assert style in ['pytorch', 'caffe'] + self.conv1 = conv3x3(inplanes, planes, stride, dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + assert not with_cp + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False): + """Bottleneck block. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + if style == 'pytorch': + conv1_stride = 1 + conv2_stride = stride + else: + conv1_stride = stride + conv2_stride = 1 + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + stride=conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.bn1 = nn.BatchNorm2d(planes) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + with_cp=False): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + dilation, + downsample, + style=style, + with_cp=with_cp)) + inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) + + return nn.Sequential(*layers) + + +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze + running stats (mean and var). + bn_frozen (bool): Whether to freeze weight and bias of BN layers. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + frozen_stages=-1, + bn_eval=True, + bn_frozen=False, + with_cp=False): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + assert num_stages >= 1 and num_stages <= 4 + block, stage_blocks = self.arch_settings[depth] + stage_blocks = stage_blocks[:num_stages] + assert len(strides) == len(dilations) == num_stages + assert max(out_indices) < num_stages + + self.out_indices = out_indices + self.style = style + self.frozen_stages = frozen_stages + self.bn_eval = bn_eval + self.bn_frozen = bn_frozen + self.with_cp = with_cp + + self.inplanes = 64 + self.conv1 = nn.Conv2d( + 3, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.res_layers = [] + for i, num_blocks in enumerate(stage_blocks): + stride = strides[i] + dilation = dilations[i] + planes = 64 * 2**i + res_layer = make_res_layer( + block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + with_cp=with_cp) + self.inplanes = planes * block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + from ..runner import load_checkpoint + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + if self.bn_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + if self.bn_frozen: + for params in m.parameters(): + params.requires_grad = False + if mode and self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for param in self.bn1.parameters(): + param.requires_grad = False + self.bn1.eval() + self.bn1.weight.requires_grad = False + self.bn1.bias.requires_grad = False + for i in range(1, self.frozen_stages + 1): + mod = getattr(self, f'layer{i}') + mod.eval() + for param in mod.parameters(): + param.requires_grad = False diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a263e31c1e3977712827ca229bbc04910b4e928e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .flops_counter import get_model_complexity_info +from .fuse_conv_bn import fuse_conv_bn +from .sync_bn import revert_sync_batchnorm +from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit, + KaimingInit, NormalInit, PretrainedInit, + TruncNormalInit, UniformInit, XavierInit, + bias_init_with_prob, caffe2_xavier_init, + constant_init, initialize, kaiming_init, normal_init, + trunc_normal_init, uniform_init, xavier_init) + +__all__ = [ + 'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init', + 'constant_init', 'kaiming_init', 'normal_init', 'trunc_normal_init', + 'uniform_init', 'xavier_init', 'fuse_conv_bn', 'initialize', + 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit', + 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit', + 'Caffe2XavierInit', 'revert_sync_batchnorm' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/flops_counter.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/flops_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..a445d7a0ef90b371c74476c2b50b7b66eabc6d80 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/flops_counter.py @@ -0,0 +1,599 @@ +# Modified from flops-counter.pytorch by Vladislav Sovrasov +# original repo: https://github.com/sovrasov/flops-counter.pytorch + +# MIT License + +# Copyright (c) 2018 Vladislav Sovrasov + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys +from functools import partial + +import numpy as np +import torch +import torch.nn as nn + +import custom_mmpkg.custom_mmcv as mmcv + + +def get_model_complexity_info(model, + input_shape, + print_per_layer_stat=True, + as_strings=True, + input_constructor=None, + flush=False, + ost=sys.stdout): + """Get complexity information of a model. + + This method can calculate FLOPs and parameter counts of a model with + corresponding input shape. It can also print complexity information for + each layer in a model. + + Supported layers are listed as below: + - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``. + - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``, + ``nn.ReLU6``. + - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``, + ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, + ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, + ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, + ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. + - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``, + ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``, + ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``. + - Linear: ``nn.Linear``. + - Deconvolution: ``nn.ConvTranspose2d``. + - Upsample: ``nn.Upsample``. + + Args: + model (nn.Module): The model for complexity calculation. + input_shape (tuple): Input shape used for calculation. + print_per_layer_stat (bool): Whether to print complexity information + for each layer in a model. Default: True. + as_strings (bool): Output FLOPs and params counts in a string form. + Default: True. + input_constructor (None | callable): If specified, it takes a callable + method that generates input. otherwise, it will generate a random + tensor with input shape to calculate FLOPs. Default: None. + flush (bool): same as that in :func:`print`. Default: False. + ost (stream): same as ``file`` param in :func:`print`. + Default: sys.stdout. + + Returns: + tuple[float | str]: If ``as_strings`` is set to True, it will return + FLOPs and parameter counts in a string format. otherwise, it will + return those in a float number format. + """ + assert type(input_shape) is tuple + assert len(input_shape) >= 1 + assert isinstance(model, nn.Module) + flops_model = add_flops_counting_methods(model) + flops_model.eval() + flops_model.start_flops_count() + if input_constructor: + input = input_constructor(input_shape) + _ = flops_model(**input) + else: + try: + batch = torch.ones(()).new_empty( + (1, *input_shape), + dtype=next(flops_model.parameters()).dtype, + device=next(flops_model.parameters()).device) + except StopIteration: + # Avoid StopIteration for models which have no parameters, + # like `nn.Relu()`, `nn.AvgPool2d`, etc. + batch = torch.ones(()).new_empty((1, *input_shape)) + + _ = flops_model(batch) + + flops_count, params_count = flops_model.compute_average_flops_cost() + if print_per_layer_stat: + print_model_with_flops( + flops_model, flops_count, params_count, ost=ost, flush=flush) + flops_model.stop_flops_count() + + if as_strings: + return flops_to_string(flops_count), params_to_string(params_count) + + return flops_count, params_count + + +def flops_to_string(flops, units='GFLOPs', precision=2): + """Convert FLOPs number into a string. + + Note that Here we take a multiply-add counts as one FLOP. + + Args: + flops (float): FLOPs number to be converted. + units (str | None): Converted FLOPs units. Options are None, 'GFLOPs', + 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically + choose the most suitable unit for FLOPs. Default: 'GFLOPs'. + precision (int): Digit number after the decimal point. Default: 2. + + Returns: + str: The converted FLOPs number with units. + + Examples: + >>> flops_to_string(1e9) + '1.0 GFLOPs' + >>> flops_to_string(2e5, 'MFLOPs') + '0.2 MFLOPs' + >>> flops_to_string(3e-9, None) + '3e-09 FLOPs' + """ + if units is None: + if flops // 10**9 > 0: + return str(round(flops / 10.**9, precision)) + ' GFLOPs' + elif flops // 10**6 > 0: + return str(round(flops / 10.**6, precision)) + ' MFLOPs' + elif flops // 10**3 > 0: + return str(round(flops / 10.**3, precision)) + ' KFLOPs' + else: + return str(flops) + ' FLOPs' + else: + if units == 'GFLOPs': + return str(round(flops / 10.**9, precision)) + ' ' + units + elif units == 'MFLOPs': + return str(round(flops / 10.**6, precision)) + ' ' + units + elif units == 'KFLOPs': + return str(round(flops / 10.**3, precision)) + ' ' + units + else: + return str(flops) + ' FLOPs' + + +def params_to_string(num_params, units=None, precision=2): + """Convert parameter number into a string. + + Args: + num_params (float): Parameter number to be converted. + units (str | None): Converted FLOPs units. Options are None, 'M', + 'K' and ''. If set to None, it will automatically choose the most + suitable unit for Parameter number. Default: None. + precision (int): Digit number after the decimal point. Default: 2. + + Returns: + str: The converted parameter number with units. + + Examples: + >>> params_to_string(1e9) + '1000.0 M' + >>> params_to_string(2e5) + '200.0 k' + >>> params_to_string(3e-9) + '3e-09' + """ + if units is None: + if num_params // 10**6 > 0: + return str(round(num_params / 10**6, precision)) + ' M' + elif num_params // 10**3: + return str(round(num_params / 10**3, precision)) + ' k' + else: + return str(num_params) + else: + if units == 'M': + return str(round(num_params / 10.**6, precision)) + ' ' + units + elif units == 'K': + return str(round(num_params / 10.**3, precision)) + ' ' + units + else: + return str(num_params) + + +def print_model_with_flops(model, + total_flops, + total_params, + units='GFLOPs', + precision=3, + ost=sys.stdout, + flush=False): + """Print a model with FLOPs for each layer. + + Args: + model (nn.Module): The model to be printed. + total_flops (float): Total FLOPs of the model. + total_params (float): Total parameter counts of the model. + units (str | None): Converted FLOPs units. Default: 'GFLOPs'. + precision (int): Digit number after the decimal point. Default: 3. + ost (stream): same as `file` param in :func:`print`. + Default: sys.stdout. + flush (bool): same as that in :func:`print`. Default: False. + + Example: + >>> class ExampleModel(nn.Module): + + >>> def __init__(self): + >>> super().__init__() + >>> self.conv1 = nn.Conv2d(3, 8, 3) + >>> self.conv2 = nn.Conv2d(8, 256, 3) + >>> self.conv3 = nn.Conv2d(256, 8, 3) + >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + >>> self.flatten = nn.Flatten() + >>> self.fc = nn.Linear(8, 1) + + >>> def forward(self, x): + >>> x = self.conv1(x) + >>> x = self.conv2(x) + >>> x = self.conv3(x) + >>> x = self.avg_pool(x) + >>> x = self.flatten(x) + >>> x = self.fc(x) + >>> return x + + >>> model = ExampleModel() + >>> x = (3, 16, 16) + to print the complexity information state for each layer, you can use + >>> get_model_complexity_info(model, x) + or directly use + >>> print_model_with_flops(model, 4579784.0, 37361) + ExampleModel( + 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs, + (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501 + (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1)) + (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1)) + (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1)) + (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, ) + (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True) + ) + """ + + def accumulate_params(self): + if is_supported_instance(self): + return self.__params__ + else: + sum = 0 + for m in self.children(): + sum += m.accumulate_params() + return sum + + def accumulate_flops(self): + if is_supported_instance(self): + return self.__flops__ / model.__batch_counter__ + else: + sum = 0 + for m in self.children(): + sum += m.accumulate_flops() + return sum + + def flops_repr(self): + accumulated_num_params = self.accumulate_params() + accumulated_flops_cost = self.accumulate_flops() + return ', '.join([ + params_to_string( + accumulated_num_params, units='M', precision=precision), + '{:.3%} Params'.format(accumulated_num_params / total_params), + flops_to_string( + accumulated_flops_cost, units=units, precision=precision), + '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops), + self.original_extra_repr() + ]) + + def add_extra_repr(m): + m.accumulate_flops = accumulate_flops.__get__(m) + m.accumulate_params = accumulate_params.__get__(m) + flops_extra_repr = flops_repr.__get__(m) + if m.extra_repr != flops_extra_repr: + m.original_extra_repr = m.extra_repr + m.extra_repr = flops_extra_repr + assert m.extra_repr != m.original_extra_repr + + def del_extra_repr(m): + if hasattr(m, 'original_extra_repr'): + m.extra_repr = m.original_extra_repr + del m.original_extra_repr + if hasattr(m, 'accumulate_flops'): + del m.accumulate_flops + + model.apply(add_extra_repr) + print(model, file=ost, flush=flush) + model.apply(del_extra_repr) + + +def get_model_parameters_number(model): + """Calculate parameter number of a model. + + Args: + model (nn.module): The model for parameter number calculation. + + Returns: + float: Parameter number of the model. + """ + num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + return num_params + + +def add_flops_counting_methods(net_main_module): + # adding additional methods to the existing module object, + # this is done this way so that each function has access to self object + net_main_module.start_flops_count = start_flops_count.__get__( + net_main_module) + net_main_module.stop_flops_count = stop_flops_count.__get__( + net_main_module) + net_main_module.reset_flops_count = reset_flops_count.__get__( + net_main_module) + net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501 + net_main_module) + + net_main_module.reset_flops_count() + + return net_main_module + + +def compute_average_flops_cost(self): + """Compute average FLOPs cost. + + A method to compute average FLOPs cost, which will be available after + `add_flops_counting_methods()` is called on a desired net object. + + Returns: + float: Current mean flops consumption per image. + """ + batches_count = self.__batch_counter__ + flops_sum = 0 + for module in self.modules(): + if is_supported_instance(module): + flops_sum += module.__flops__ + params_sum = get_model_parameters_number(self) + return flops_sum / batches_count, params_sum + + +def start_flops_count(self): + """Activate the computation of mean flops consumption per image. + + A method to activate the computation of mean flops consumption per image. + which will be available after ``add_flops_counting_methods()`` is called on + a desired net object. It should be called before running the network. + """ + add_batch_counter_hook_function(self) + + def add_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + return + + else: + handle = module.register_forward_hook( + get_modules_mapping()[type(module)]) + + module.__flops_handle__ = handle + + self.apply(partial(add_flops_counter_hook_function)) + + +def stop_flops_count(self): + """Stop computing the mean flops consumption per image. + + A method to stop computing the mean flops consumption per image, which will + be available after ``add_flops_counting_methods()`` is called on a desired + net object. It can be called to pause the computation whenever. + """ + remove_batch_counter_hook_function(self) + self.apply(remove_flops_counter_hook_function) + + +def reset_flops_count(self): + """Reset statistics computed so far. + + A method to Reset computed statistics, which will be available after + `add_flops_counting_methods()` is called on a desired net object. + """ + add_batch_counter_variables_or_reset(self) + self.apply(add_flops_counter_variable_or_reset) + + +# ---- Internal functions +def empty_flops_counter_hook(module, input, output): + module.__flops__ += 0 + + +def upsample_flops_counter_hook(module, input, output): + output_size = output[0] + batch_size = output_size.shape[0] + output_elements_count = batch_size + for val in output_size.shape[1:]: + output_elements_count *= val + module.__flops__ += int(output_elements_count) + + +def relu_flops_counter_hook(module, input, output): + active_elements_count = output.numel() + module.__flops__ += int(active_elements_count) + + +def linear_flops_counter_hook(module, input, output): + input = input[0] + output_last_dim = output.shape[ + -1] # pytorch checks dimensions, so here we don't care much + module.__flops__ += int(np.prod(input.shape) * output_last_dim) + + +def pool_flops_counter_hook(module, input, output): + input = input[0] + module.__flops__ += int(np.prod(input.shape)) + + +def norm_flops_counter_hook(module, input, output): + input = input[0] + + batch_flops = np.prod(input.shape) + if (getattr(module, 'affine', False) + or getattr(module, 'elementwise_affine', False)): + batch_flops *= 2 + module.__flops__ += int(batch_flops) + + +def deconv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + input_height, input_width = input.shape[2:] + + kernel_height, kernel_width = conv_module.kernel_size + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = ( + kernel_height * kernel_width * in_channels * filters_per_channel) + + active_elements_count = batch_size * input_height * input_width + overall_conv_flops = conv_per_position_flops * active_elements_count + bias_flops = 0 + if conv_module.bias is not None: + output_height, output_width = output.shape[2:] + bias_flops = out_channels * batch_size * output_height * output_height + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def conv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + output_dims = list(output.shape[2:]) + + kernel_dims = list(conv_module.kernel_size) + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = int( + np.prod(kernel_dims)) * in_channels * filters_per_channel + + active_elements_count = batch_size * int(np.prod(output_dims)) + + overall_conv_flops = conv_per_position_flops * active_elements_count + + bias_flops = 0 + + if conv_module.bias is not None: + + bias_flops = out_channels * active_elements_count + + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def batch_counter_hook(module, input, output): + batch_size = 1 + if len(input) > 0: + # Can have multiple inputs, getting the first one + input = input[0] + batch_size = len(input) + else: + pass + print('Warning! No positional inputs found for a module, ' + 'assuming batch size is 1.') + module.__batch_counter__ += batch_size + + +def add_batch_counter_variables_or_reset(module): + + module.__batch_counter__ = 0 + + +def add_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + return + + handle = module.register_forward_hook(batch_counter_hook) + module.__batch_counter_handle__ = handle + + +def remove_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + module.__batch_counter_handle__.remove() + del module.__batch_counter_handle__ + + +def add_flops_counter_variable_or_reset(module): + if is_supported_instance(module): + if hasattr(module, '__flops__') or hasattr(module, '__params__'): + print('Warning: variables __flops__ or __params__ are already ' + 'defined for the module' + type(module).__name__ + + ' ptflops can affect your code!') + module.__flops__ = 0 + module.__params__ = get_model_parameters_number(module) + + +def is_supported_instance(module): + if type(module) in get_modules_mapping(): + return True + return False + + +def remove_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + module.__flops_handle__.remove() + del module.__flops_handle__ + + +def get_modules_mapping(): + return { + # convolutions + nn.Conv1d: conv_flops_counter_hook, + nn.Conv2d: conv_flops_counter_hook, + mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, + nn.Conv3d: conv_flops_counter_hook, + mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, + # activations + nn.ReLU: relu_flops_counter_hook, + nn.PReLU: relu_flops_counter_hook, + nn.ELU: relu_flops_counter_hook, + nn.LeakyReLU: relu_flops_counter_hook, + nn.ReLU6: relu_flops_counter_hook, + # poolings + nn.MaxPool1d: pool_flops_counter_hook, + nn.AvgPool1d: pool_flops_counter_hook, + nn.AvgPool2d: pool_flops_counter_hook, + nn.MaxPool2d: pool_flops_counter_hook, + mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook, + nn.MaxPool3d: pool_flops_counter_hook, + mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook, + nn.AvgPool3d: pool_flops_counter_hook, + nn.AdaptiveMaxPool1d: pool_flops_counter_hook, + nn.AdaptiveAvgPool1d: pool_flops_counter_hook, + nn.AdaptiveMaxPool2d: pool_flops_counter_hook, + nn.AdaptiveAvgPool2d: pool_flops_counter_hook, + nn.AdaptiveMaxPool3d: pool_flops_counter_hook, + nn.AdaptiveAvgPool3d: pool_flops_counter_hook, + # normalizations + nn.BatchNorm1d: norm_flops_counter_hook, + nn.BatchNorm2d: norm_flops_counter_hook, + nn.BatchNorm3d: norm_flops_counter_hook, + nn.GroupNorm: norm_flops_counter_hook, + nn.InstanceNorm1d: norm_flops_counter_hook, + nn.InstanceNorm2d: norm_flops_counter_hook, + nn.InstanceNorm3d: norm_flops_counter_hook, + nn.LayerNorm: norm_flops_counter_hook, + # FC + nn.Linear: linear_flops_counter_hook, + mmcv.cnn.bricks.Linear: linear_flops_counter_hook, + # Upscale + nn.Upsample: upsample_flops_counter_hook, + # Deconvolution + nn.ConvTranspose2d: deconv_flops_counter_hook, + mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook, + } diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/fuse_conv_bn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/fuse_conv_bn.py new file mode 100644 index 0000000000000000000000000000000000000000..cb7076f80bf37f7931185bf0293ffcc1ce19c8ef --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/fuse_conv_bn.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + + +def _fuse_conv_bn(conv, bn): + """Fuse conv and bn into one module. + + Args: + conv (nn.Module): Conv to be fused. + bn (nn.Module): BN to be fused. + + Returns: + nn.Module: Fused module. + """ + conv_w = conv.weight + conv_b = conv.bias if conv.bias is not None else torch.zeros_like( + bn.running_mean) + + factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) + conv.weight = nn.Parameter(conv_w * + factor.reshape([conv.out_channels, 1, 1, 1])) + conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) + return conv + + +def fuse_conv_bn(module): + """Recursively fuse conv and bn in a module. + + During inference, the functionary of batch norm layers is turned off + but only the mean and var alone channels are used, which exposes the + chance to fuse it with the preceding conv layers to save computations and + simplify network structures. + + Args: + module (nn.Module): Module to be fused. + + Returns: + nn.Module: Fused module. + """ + last_conv = None + last_conv_name = None + + for name, child in module.named_children(): + if isinstance(child, + (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)): + if last_conv is None: # only fuse BN that is after Conv + continue + fused_conv = _fuse_conv_bn(last_conv, child) + module._modules[last_conv_name] = fused_conv + # To reduce changes, set BN as Identity instead of deleting it. + module._modules[name] = nn.Identity() + last_conv = None + elif isinstance(child, nn.Conv2d): + last_conv = child + last_conv_name = name + else: + fuse_conv_bn(child) + return module diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/sync_bn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/sync_bn.py new file mode 100644 index 0000000000000000000000000000000000000000..f75291daab5cfbf367621cef62b0067aed9fbd0d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/sync_bn.py @@ -0,0 +1,59 @@ +import torch + +import custom_mmpkg.custom_mmcv as mmcv + + +class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): + """A general BatchNorm layer without input dimension check. + + Reproduced from @kapily's work: + (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547) + The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc + is `_check_input_dim` that is designed for tensor sanity checks. + The check has been bypassed in this class for the convenience of converting + SyncBatchNorm. + """ + + def _check_input_dim(self, input): + return + + +def revert_sync_batchnorm(module): + """Helper function to convert all `SyncBatchNorm` (SyncBN) and + `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to + `BatchNormXd` layers. + + Adapted from @kapily's work: + (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547) + + Args: + module (nn.Module): The module containing `SyncBatchNorm` layers. + + Returns: + module_output: The converted module with `BatchNormXd` layers. + """ + module_output = module + module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm] + if hasattr(mmcv, 'ops'): + module_checklist.append(mmcv.ops.SyncBatchNorm) + if isinstance(module, tuple(module_checklist)): + module_output = _BatchNormXd(module.num_features, module.eps, + module.momentum, module.affine, + module.track_running_stats) + if module.affine: + # no_grad() may not be needed here but + # just to be consistent with `convert_sync_batchnorm()` + with torch.no_grad(): + module_output.weight = module.weight + module_output.bias = module.bias + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + module_output.training = module.training + # qconfig exists in quantized models + if hasattr(module, 'qconfig'): + module_output.qconfig = module.qconfig + for name, child in module.named_children(): + module_output.add_module(name, revert_sync_batchnorm(child)) + del module + return module_output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/weight_init.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/weight_init.py new file mode 100644 index 0000000000000000000000000000000000000000..7a5bb1755d2269829c113b98026aa0310a3d70cb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/utils/weight_init.py @@ -0,0 +1,684 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +import warnings + +import numpy as np +import torch +import torch.nn as nn +from torch import Tensor + +from custom_mmpkg.custom_mmcv.utils import Registry, build_from_cfg, get_logger, print_log + +INITIALIZERS = Registry('initializer') + + +def update_init_info(module, init_info): + """Update the `_params_init_info` in the module if the value of parameters + are changed. + + Args: + module (obj:`nn.Module`): The module of PyTorch with a user-defined + attribute `_params_init_info` which records the initialization + information. + init_info (str): The string that describes the initialization. + """ + assert hasattr( + module, + '_params_init_info'), f'Can not find `_params_init_info` in {module}' + for name, param in module.named_parameters(): + + assert param in module._params_init_info, ( + f'Find a new :obj:`Parameter` ' + f'named `{name}` during executing the ' + f'`init_weights` of ' + f'`{module.__class__.__name__}`. ' + f'Please do not add or ' + f'replace parameters during executing ' + f'the `init_weights`. ') + + # The parameter has been changed during executing the + # `init_weights` of module + mean_value = param.data.mean() + if module._params_init_info[param]['tmp_mean_value'] != mean_value: + module._params_init_info[param]['init_info'] = init_info + module._params_init_info[param]['tmp_mean_value'] = mean_value + + +def constant_init(module, val, bias=0): + if hasattr(module, 'weight') and module.weight is not None: + nn.init.constant_(module.weight, val) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def xavier_init(module, gain=1, bias=0, distribution='normal'): + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def normal_init(module, mean=0, std=1, bias=0): + if hasattr(module, 'weight') and module.weight is not None: + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def trunc_normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + trunc_normal_(module.weight, mean, std, a, b) # type: ignore + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) # type: ignore + + +def uniform_init(module, a=0, b=1, bias=0): + if hasattr(module, 'weight') and module.weight is not None: + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def kaiming_init(module, + a=0, + mode='fan_out', + nonlinearity='relu', + bias=0, + distribution='normal'): + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def caffe2_xavier_init(module, bias=0): + # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch + # Acknowledgment to FAIR's internal code + kaiming_init( + module, + a=1, + mode='fan_in', + nonlinearity='leaky_relu', + bias=bias, + distribution='uniform') + + +def bias_init_with_prob(prior_prob): + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init + + +def _get_bases_name(m): + return [b.__name__ for b in m.__class__.__bases__] + + +class BaseInit(object): + + def __init__(self, *, bias=0, bias_prob=None, layer=None): + self.wholemodule = False + if not isinstance(bias, (int, float)): + raise TypeError(f'bias must be a number, but got a {type(bias)}') + + if bias_prob is not None: + if not isinstance(bias_prob, float): + raise TypeError(f'bias_prob type must be float, \ + but got {type(bias_prob)}') + + if layer is not None: + if not isinstance(layer, (str, list)): + raise TypeError(f'layer must be a str or a list of str, \ + but got a {type(layer)}') + else: + layer = [] + + if bias_prob is not None: + self.bias = bias_init_with_prob(bias_prob) + else: + self.bias = bias + self.layer = [layer] if isinstance(layer, str) else layer + + def _get_init_info(self): + info = f'{self.__class__.__name__}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Constant') +class ConstantInit(BaseInit): + """Initialize module parameters with constant values. + + Args: + val (int | float): the value to fill the weights in the module with + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, val, **kwargs): + super().__init__(**kwargs) + self.val = val + + def __call__(self, module): + + def init(m): + if self.wholemodule: + constant_init(m, self.val, self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + constant_init(m, self.val, self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Xavier') +class XavierInit(BaseInit): + r"""Initialize module parameters with values according to the method + described in `Understanding the difficulty of training deep feedforward + neural networks - Glorot, X. & Bengio, Y. (2010). + `_ + + Args: + gain (int | float): an optional scaling factor. Defaults to 1. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + distribution (str): distribution either be ``'normal'`` + or ``'uniform'``. Defaults to ``'normal'``. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, gain=1, distribution='normal', **kwargs): + super().__init__(**kwargs) + self.gain = gain + self.distribution = distribution + + def __call__(self, module): + + def init(m): + if self.wholemodule: + xavier_init(m, self.gain, self.bias, self.distribution) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + xavier_init(m, self.gain, self.bias, self.distribution) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: gain={self.gain}, ' \ + f'distribution={self.distribution}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Normal') +class NormalInit(BaseInit): + r"""Initialize module parameters with the values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + + Args: + mean (int | float):the mean of the normal distribution. Defaults to 0. + std (int | float): the standard deviation of the normal distribution. + Defaults to 1. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + + """ + + def __init__(self, mean=0, std=1, **kwargs): + super().__init__(**kwargs) + self.mean = mean + self.std = std + + def __call__(self, module): + + def init(m): + if self.wholemodule: + normal_init(m, self.mean, self.std, self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + normal_init(m, self.mean, self.std, self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: mean={self.mean},' \ + f' std={self.std}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='TruncNormal') +class TruncNormalInit(BaseInit): + r"""Initialize module parameters with the values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values + outside :math:`[a, b]`. + + Args: + mean (float): the mean of the normal distribution. Defaults to 0. + std (float): the standard deviation of the normal distribution. + Defaults to 1. + a (float): The minimum cutoff value. + b ( float): The maximum cutoff value. + bias (float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + + """ + + def __init__(self, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + **kwargs) -> None: + super().__init__(**kwargs) + self.mean = mean + self.std = std + self.a = a + self.b = b + + def __call__(self, module: nn.Module) -> None: + + def init(m): + if self.wholemodule: + trunc_normal_init(m, self.mean, self.std, self.a, self.b, + self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + trunc_normal_init(m, self.mean, self.std, self.a, self.b, + self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \ + f' mean={self.mean}, std={self.std}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Uniform') +class UniformInit(BaseInit): + r"""Initialize module parameters with values drawn from the uniform + distribution :math:`\mathcal{U}(a, b)`. + + Args: + a (int | float): the lower bound of the uniform distribution. + Defaults to 0. + b (int | float): the upper bound of the uniform distribution. + Defaults to 1. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, a=0, b=1, **kwargs): + super().__init__(**kwargs) + self.a = a + self.b = b + + def __call__(self, module): + + def init(m): + if self.wholemodule: + uniform_init(m, self.a, self.b, self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + uniform_init(m, self.a, self.b, self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: a={self.a},' \ + f' b={self.b}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Kaiming') +class KaimingInit(BaseInit): + r"""Initialize module parameters with the values according to the method + described in `Delving deep into rectifiers: Surpassing human-level + performance on ImageNet classification - He, K. et al. (2015). + `_ + + Args: + a (int | float): the negative slope of the rectifier used after this + layer (only used with ``'leaky_relu'``). Defaults to 0. + mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing + ``'fan_in'`` preserves the magnitude of the variance of the weights + in the forward pass. Choosing ``'fan_out'`` preserves the + magnitudes in the backwards pass. Defaults to ``'fan_out'``. + nonlinearity (str): the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` . + Defaults to 'relu'. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + distribution (str): distribution either be ``'normal'`` or + ``'uniform'``. Defaults to ``'normal'``. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, + a=0, + mode='fan_out', + nonlinearity='relu', + distribution='normal', + **kwargs): + super().__init__(**kwargs) + self.a = a + self.mode = mode + self.nonlinearity = nonlinearity + self.distribution = distribution + + def __call__(self, module): + + def init(m): + if self.wholemodule: + kaiming_init(m, self.a, self.mode, self.nonlinearity, + self.bias, self.distribution) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + kaiming_init(m, self.a, self.mode, self.nonlinearity, + self.bias, self.distribution) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \ + f'nonlinearity={self.nonlinearity}, ' \ + f'distribution ={self.distribution}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Caffe2Xavier') +class Caffe2XavierInit(KaimingInit): + # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch + # Acknowledgment to FAIR's internal code + def __init__(self, **kwargs): + super().__init__( + a=1, + mode='fan_in', + nonlinearity='leaky_relu', + distribution='uniform', + **kwargs) + + def __call__(self, module): + super().__call__(module) + + +@INITIALIZERS.register_module(name='Pretrained') +class PretrainedInit(object): + """Initialize module by loading a pretrained model. + + Args: + checkpoint (str): the checkpoint file of the pretrained model should + be load. + prefix (str, optional): the prefix of a sub-module in the pretrained + model. it is for loading a part of the pretrained model to + initialize. For example, if we would like to only load the + backbone of a detector model, we can set ``prefix='backbone.'``. + Defaults to None. + map_location (str): map tensors into proper locations. + """ + + def __init__(self, checkpoint, prefix=None, map_location=None): + self.checkpoint = checkpoint + self.prefix = prefix + self.map_location = map_location + + def __call__(self, module): + from custom_mmpkg.custom_mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint, + load_state_dict) + logger = get_logger('mmcv') + if self.prefix is None: + print_log(f'load model from: {self.checkpoint}', logger=logger) + load_checkpoint( + module, + self.checkpoint, + map_location=self.map_location, + strict=False, + logger=logger) + else: + print_log( + f'load {self.prefix} in model from: {self.checkpoint}', + logger=logger) + state_dict = _load_checkpoint_with_prefix( + self.prefix, self.checkpoint, map_location=self.map_location) + load_state_dict(module, state_dict, strict=False, logger=logger) + + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: load from {self.checkpoint}' + return info + + +def _initialize(module, cfg, wholemodule=False): + func = build_from_cfg(cfg, INITIALIZERS) + # wholemodule flag is for override mode, there is no layer key in override + # and initializer will give init values for the whole module with the name + # in override. + func.wholemodule = wholemodule + func(module) + + +def _initialize_override(module, override, cfg): + if not isinstance(override, (dict, list)): + raise TypeError(f'override must be a dict or a list of dict, \ + but got {type(override)}') + + override = [override] if isinstance(override, dict) else override + + for override_ in override: + + cp_override = copy.deepcopy(override_) + name = cp_override.pop('name', None) + if name is None: + raise ValueError('`override` must contain the key "name",' + f'but got {cp_override}') + # if override only has name key, it means use args in init_cfg + if not cp_override: + cp_override.update(cfg) + # if override has name key and other args except type key, it will + # raise error + elif 'type' not in cp_override.keys(): + raise ValueError( + f'`override` need "type" key, but got {cp_override}') + + if hasattr(module, name): + _initialize(getattr(module, name), cp_override, wholemodule=True) + else: + raise RuntimeError(f'module did not have attribute {name}, ' + f'but init_cfg is {cp_override}.') + + +def initialize(module, init_cfg): + """Initialize a module. + + Args: + module (``torch.nn.Module``): the module will be initialized. + init_cfg (dict | list[dict]): initialization configuration dict to + define initializer. OpenMMLab has implemented 6 initializers + including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``, + ``Kaiming``, and ``Pretrained``. + Example: + >>> module = nn.Linear(2, 3, bias=True) + >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2) + >>> initialize(module, init_cfg) + + >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2)) + >>> # define key ``'layer'`` for initializing layer with different + >>> # configuration + >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1), + dict(type='Constant', layer='Linear', val=2)] + >>> initialize(module, init_cfg) + + >>> # define key``'override'`` to initialize some specific part in + >>> # module + >>> class FooNet(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.feat = nn.Conv2d(3, 16, 3) + >>> self.reg = nn.Conv2d(16, 10, 3) + >>> self.cls = nn.Conv2d(16, 5, 3) + >>> model = FooNet() + >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d', + >>> override=dict(type='Constant', name='reg', val=3, bias=4)) + >>> initialize(model, init_cfg) + + >>> model = ResNet(depth=50) + >>> # Initialize weights with the pretrained model. + >>> init_cfg = dict(type='Pretrained', + checkpoint='torchvision://resnet50') + >>> initialize(model, init_cfg) + + >>> # Initialize weights of a sub-module with the specific part of + >>> # a pretrained model by using "prefix". + >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\ + >>> 'retinanet_r50_fpn_1x_coco/'\ + >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth' + >>> init_cfg = dict(type='Pretrained', + checkpoint=url, prefix='backbone.') + """ + if not isinstance(init_cfg, (dict, list)): + raise TypeError(f'init_cfg must be a dict or a list of dict, \ + but got {type(init_cfg)}') + + if isinstance(init_cfg, dict): + init_cfg = [init_cfg] + + for cfg in init_cfg: + # should deeply copy the original config because cfg may be used by + # other modules, e.g., one init_cfg shared by multiple bottleneck + # blocks, the expected cfg will be changed after pop and will change + # the initialization behavior of other modules + cp_cfg = copy.deepcopy(cfg) + override = cp_cfg.pop('override', None) + _initialize(module, cp_cfg) + + if override is not None: + cp_cfg.pop('layer', None) + _initialize_override(module, override, cp_cfg) + else: + # All attributes in module have same initialization. + pass + + +def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, + b: float) -> Tensor: + # Method based on + # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + # Modified from + # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' + 'The distribution of values may be incorrect.', + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + lower = norm_cdf((a - mean) / std) + upper = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [lower, upper], then translate + # to [2lower-1, 2upper-1]. + tensor.uniform_(2 * lower - 1, 2 * upper - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor: Tensor, + mean: float = 0., + std: float = 1., + a: float = -2., + b: float = 2.) -> Tensor: + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + Modified from + https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py + + Args: + tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`. + mean (float): the mean of the normal distribution. + std (float): the standard deviation of the normal distribution. + a (float): the minimum cutoff value. + b (float): the maximum cutoff value. + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/vgg.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..8778b649561a45a9652b1a15a26c2d171e58f3e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/cnn/vgg.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +import torch.nn as nn + +from .utils import constant_init, kaiming_init, normal_init + + +def conv3x3(in_planes, out_planes, dilation=1): + """3x3 convolution with padding.""" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + padding=dilation, + dilation=dilation) + + +def make_vgg_layer(inplanes, + planes, + num_blocks, + dilation=1, + with_bn=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layers.append(conv3x3(inplanes, planes, dilation)) + if with_bn: + layers.append(nn.BatchNorm2d(planes)) + layers.append(nn.ReLU(inplace=True)) + inplanes = planes + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +class VGG(nn.Module): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_bn (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze + running stats (mean and var). + bn_frozen (bool): Whether to freeze weight and bias of BN layers. + """ + + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + with_bn=False, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=(0, 1, 2, 3, 4), + frozen_stages=-1, + bn_eval=True, + bn_frozen=False, + ceil_mode=False, + with_last_pool=True): + super(VGG, self).__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + assert max(out_indices) <= num_stages + + self.num_classes = num_classes + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.bn_eval = bn_eval + self.bn_frozen = bn_frozen + + self.inplanes = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks * (2 + with_bn) + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + planes = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.inplanes, + planes, + num_blocks, + dilation=dilation, + with_bn=with_bn, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.inplanes = planes + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + from ..runner import load_checkpoint + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + elif isinstance(m, nn.Linear): + normal_init(m, std=0.01) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def train(self, mode=True): + super(VGG, self).train(mode) + if self.bn_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + if self.bn_frozen: + for params in m.parameters(): + params.requires_grad = False + vgg_layers = getattr(self, self.module_name) + if mode and self.frozen_stages >= 0: + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + mod = vgg_layers[j] + mod.eval() + for param in mod.parameters(): + param.requires_grad = False diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/engine/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3193b7f664e19ce2458d81c836597fa22e4bb082 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/engine/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test, + single_gpu_test) + +__all__ = [ + 'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test', + 'single_gpu_test' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/engine/test.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/engine/test.py new file mode 100644 index 0000000000000000000000000000000000000000..ac64007f1784b8999b969b9fe4baca393c44d257 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/engine/test.py @@ -0,0 +1,202 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import torch +import torch.distributed as dist + +import custom_mmpkg.custom_mmcv as mmcv +from custom_mmpkg.custom_mmcv.runner import get_dist_info + + +def single_gpu_test(model, data_loader): + """Test model with a single gpu. + + This method tests model with a single gpu and displays test progress bar. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for data in data_loader: + with torch.no_grad(): + result = model(return_loss=False, **data) + results.extend(result) + + # Assume result has the same length of batch_size + # refer to https://github.com/open-mmlab/mmcv/issues/985 + batch_size = len(result) + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting + ``gpu_collect=True``, it encodes results to gpu tensors and use gpu + communication for results collection. On cpu mode it saves the results on + different gpus to ``tmpdir`` and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + results.extend(result) + + if rank == 0: + batch_size = len(result) + batch_size_all = batch_size * world_size + if batch_size_all + prog_bar.completed > len(dataset): + batch_size_all = len(dataset) - prog_bar.completed + for _ in range(batch_size_all): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + """Collect results under cpu mode. + + On cpu mode, this function will save the results on different gpus to + ``tmpdir`` and collect them by the rank 0 worker. + + Args: + result_part (list): Result list containing result parts + to be collected. + size (int): Size of the results, commonly equal to length of + the results. + tmpdir (str | None): temporal directory for collected results to + store. If set to None, it will create a random temporal directory + for it. + + Returns: + list: The collected results. + """ + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mmcv.mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_result = mmcv.load(part_file) + # When data is severely insufficient, an empty part_result + # on a certain gpu could makes the overall outputs empty. + if part_result: + part_list.append(part_result) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + """Collect results under gpu mode. + + On gpu mode, this function will encode results to gpu tensors and use gpu + communication for results collection. + + Args: + result_part (list): Result list containing result parts + to be collected. + size (int): Size of the results, commonly equal to length of + the results. + + Returns: + list: The collected results. + """ + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()) + # When data is severely insufficient, an empty part_result + # on a certain gpu could makes the overall outputs empty. + if part_result: + part_list.append(part_result) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2051b85f7e59bff7bdbaa131849ce8cd31f059a4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .file_client import BaseStorageBackend, FileClient +from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler +from .io import dump, load, register_handler +from .parse import dict_from_file, list_from_file + +__all__ = [ + 'BaseStorageBackend', 'FileClient', 'load', 'dump', 'register_handler', + 'BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler', + 'list_from_file', 'dict_from_file' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/file_client.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/file_client.py new file mode 100644 index 0000000000000000000000000000000000000000..c060e6e88cce26d13b297d7aeca83e7b2be119bc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/file_client.py @@ -0,0 +1,1148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import os +import os.path as osp +import re +import tempfile +import warnings +from abc import ABCMeta, abstractmethod +from contextlib import contextmanager +from pathlib import Path +from typing import Iterable, Iterator, Optional, Tuple, Union +from urllib.request import urlopen + +import custom_mmpkg.custom_mmcv as mmcv +from custom_mmpkg.custom_mmcv.utils.misc import has_method +from custom_mmpkg.custom_mmcv.utils.path import is_filepath + + +class BaseStorageBackend(metaclass=ABCMeta): + """Abstract class of storage backends. + + All backends need to implement two apis: ``get()`` and ``get_text()``. + ``get()`` reads the file as a byte stream and ``get_text()`` reads the file + as texts. + """ + + # a flag to indicate whether the backend can create a symlink for a file + _allow_symlink = False + + @property + def name(self): + return self.__class__.__name__ + + @property + def allow_symlink(self): + return self._allow_symlink + + @abstractmethod + def get(self, filepath): + pass + + @abstractmethod + def get_text(self, filepath): + pass + + +class CephBackend(BaseStorageBackend): + """Ceph storage backend (for internal use). + + Args: + path_mapping (dict|None): path mapping dict from local path to Petrel + path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath`` + will be replaced by ``dst``. Default: None. + + .. warning:: + :class:`mmcv.fileio.file_client.CephBackend` will be deprecated, + please use :class:`mmcv.fileio.file_client.PetrelBackend` instead. + """ + + def __init__(self, path_mapping=None): + try: + import ceph + except ImportError: + raise ImportError('Please install ceph to enable CephBackend.') + + warnings.warn( + 'CephBackend will be deprecated, please use PetrelBackend instead') + self._client = ceph.S3Client() + assert isinstance(path_mapping, dict) or path_mapping is None + self.path_mapping = path_mapping + + def get(self, filepath): + filepath = str(filepath) + if self.path_mapping is not None: + for k, v in self.path_mapping.items(): + filepath = filepath.replace(k, v) + value = self._client.Get(filepath) + value_buf = memoryview(value) + return value_buf + + def get_text(self, filepath, encoding=None): + raise NotImplementedError + + +class PetrelBackend(BaseStorageBackend): + """Petrel storage backend (for internal use). + + PetrelBackend supports reading and writing data to multiple clusters. + If the file path contains the cluster name, PetrelBackend will read data + from specified cluster or write data to it. Otherwise, PetrelBackend will + access the default cluster. + + Args: + path_mapping (dict, optional): Path mapping dict from local path to + Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in + ``filepath`` will be replaced by ``dst``. Default: None. + enable_mc (bool, optional): Whether to enable memcached support. + Default: True. + + Examples: + >>> filepath1 = 's3://path/of/file' + >>> filepath2 = 'cluster-name:s3://path/of/file' + >>> client = PetrelBackend() + >>> client.get(filepath1) # get data from default cluster + >>> client.get(filepath2) # get data from 'cluster-name' cluster + """ + + def __init__(self, + path_mapping: Optional[dict] = None, + enable_mc: bool = True): + try: + from petrel_client import client + except ImportError: + raise ImportError('Please install petrel_client to enable ' + 'PetrelBackend.') + + self._client = client.Client(enable_mc=enable_mc) + assert isinstance(path_mapping, dict) or path_mapping is None + self.path_mapping = path_mapping + + def _map_path(self, filepath: Union[str, Path]) -> str: + """Map ``filepath`` to a string path whose prefix will be replaced by + :attr:`self.path_mapping`. + + Args: + filepath (str): Path to be mapped. + """ + filepath = str(filepath) + if self.path_mapping is not None: + for k, v in self.path_mapping.items(): + filepath = filepath.replace(k, v) + return filepath + + def _format_path(self, filepath: str) -> str: + """Convert a ``filepath`` to standard format of petrel oss. + + If the ``filepath`` is concatenated by ``os.path.join``, in a Windows + environment, the ``filepath`` will be the format of + 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the + above ``filepath`` will be converted to 's3://bucket_name/image.jpg'. + + Args: + filepath (str): Path to be formatted. + """ + return re.sub(r'\\+', '/', filepath) + + def get(self, filepath: Union[str, Path]) -> memoryview: + """Read data from a given ``filepath`` with 'rb' mode. + + Args: + filepath (str or Path): Path to read data. + + Returns: + memoryview: A memory view of expected bytes object to avoid + copying. The memoryview object can be converted to bytes by + ``value_buf.tobytes()``. + """ + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + value = self._client.Get(filepath) + value_buf = memoryview(value) + return value_buf + + def get_text(self, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> str: + """Read data from a given ``filepath`` with 'r' mode. + + Args: + filepath (str or Path): Path to read data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + + Returns: + str: Expected text reading from ``filepath``. + """ + return str(self.get(filepath), encoding=encoding) + + def put(self, obj: bytes, filepath: Union[str, Path]) -> None: + """Save data to a given ``filepath``. + + Args: + obj (bytes): Data to be saved. + filepath (str or Path): Path to write data. + """ + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + self._client.put(filepath, obj) + + def put_text(self, + obj: str, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> None: + """Save data to a given ``filepath``. + + Args: + obj (str): Data to be written. + filepath (str or Path): Path to write data. + encoding (str): The encoding format used to encode the ``obj``. + Default: 'utf-8'. + """ + self.put(bytes(obj, encoding=encoding), filepath) + + def remove(self, filepath: Union[str, Path]) -> None: + """Remove a file. + + Args: + filepath (str or Path): Path to be removed. + """ + if not has_method(self._client, 'delete'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `delete` method, please use a higher version or dev' + ' branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + self._client.delete(filepath) + + def exists(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path exists. + + Args: + filepath (str or Path): Path to be checked whether exists. + + Returns: + bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. + """ + if not (has_method(self._client, 'contains') + and has_method(self._client, 'isdir')): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `contains` and `isdir` methods, please use a higher' + 'version or dev branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + return self._client.contains(filepath) or self._client.isdir(filepath) + + def isdir(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a directory. + + Args: + filepath (str or Path): Path to be checked whether it is a + directory. + + Returns: + bool: Return ``True`` if ``filepath`` points to a directory, + ``False`` otherwise. + """ + if not has_method(self._client, 'isdir'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `isdir` method, please use a higher version or dev' + ' branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + return self._client.isdir(filepath) + + def isfile(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a file. + + Args: + filepath (str or Path): Path to be checked whether it is a file. + + Returns: + bool: Return ``True`` if ``filepath`` points to a file, ``False`` + otherwise. + """ + if not has_method(self._client, 'contains'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `contains` method, please use a higher version or ' + 'dev branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + return self._client.contains(filepath) + + def join_path(self, filepath: Union[str, Path], + *filepaths: Union[str, Path]) -> str: + """Concatenate all file paths. + + Args: + filepath (str or Path): Path to be concatenated. + + Returns: + str: The result after concatenation. + """ + filepath = self._format_path(self._map_path(filepath)) + if filepath.endswith('/'): + filepath = filepath[:-1] + formatted_paths = [filepath] + for path in filepaths: + formatted_paths.append(self._format_path(self._map_path(path))) + return '/'.join(formatted_paths) + + @contextmanager + def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]: + """Download a file from ``filepath`` and return a temporary path. + + ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It + can be called with ``with`` statement, and when exists from the + ``with`` statement, the temporary path will be released. + + Args: + filepath (str | Path): Download a file from ``filepath``. + + Examples: + >>> client = PetrelBackend() + >>> # After existing from the ``with`` clause, + >>> # the path will be removed + >>> with client.get_local_path('s3://path/of/your/file') as path: + ... # do something here + + Yields: + Iterable[str]: Only yield one temporary path. + """ + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + assert self.isfile(filepath) + try: + f = tempfile.NamedTemporaryFile(delete=False) + f.write(self.get(filepath)) + f.close() + yield f.name + finally: + os.remove(f.name) + + def list_dir_or_file(self, + dir_path: Union[str, Path], + list_dir: bool = True, + list_file: bool = True, + suffix: Optional[Union[str, Tuple[str]]] = None, + recursive: bool = False) -> Iterator[str]: + """Scan a directory to find the interested directories or files in + arbitrary order. + + Note: + Petrel has no concept of directories but it simulates the directory + hierarchy in the filesystem through public prefixes. In addition, + if the returned path ends with '/', it means the path is a public + prefix which is a logical directory. + + Note: + :meth:`list_dir_or_file` returns the path relative to ``dir_path``. + In addition, the returned path of directory will not contains the + suffix '/' which is consistent with other backends. + + Args: + dir_path (str | Path): Path of the directory. + list_dir (bool): List the directories. Default: True. + list_file (bool): List the path of files. Default: True. + suffix (str or tuple[str], optional): File suffix + that we are interested in. Default: None. + recursive (bool): If set to True, recursively scan the + directory. Default: False. + + Yields: + Iterable[str]: A relative path to ``dir_path``. + """ + if not has_method(self._client, 'list'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `list` method, please use a higher version or dev' + ' branch instead.')) + + dir_path = self._map_path(dir_path) + dir_path = self._format_path(dir_path) + if list_dir and suffix is not None: + raise TypeError( + '`list_dir` should be False when `suffix` is not None') + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('`suffix` must be a string or tuple of strings') + + # Petrel's simulated directory hierarchy assumes that directory paths + # should end with `/` + if not dir_path.endswith('/'): + dir_path += '/' + + root = dir_path + + def _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive): + for path in self._client.list(dir_path): + # the `self.isdir` is not used here to determine whether path + # is a directory, because `self.isdir` relies on + # `self._client.list` + if path.endswith('/'): # a directory path + next_dir_path = self.join_path(dir_path, path) + if list_dir: + # get the relative path and exclude the last + # character '/' + rel_dir = next_dir_path[len(root):-1] + yield rel_dir + if recursive: + yield from _list_dir_or_file(next_dir_path, list_dir, + list_file, suffix, + recursive) + else: # a file path + absolute_path = self.join_path(dir_path, path) + rel_path = absolute_path[len(root):] + if (suffix is None + or rel_path.endswith(suffix)) and list_file: + yield rel_path + + return _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive) + + +class MemcachedBackend(BaseStorageBackend): + """Memcached storage backend. + + Attributes: + server_list_cfg (str): Config file for memcached server list. + client_cfg (str): Config file for memcached client. + sys_path (str | None): Additional path to be appended to `sys.path`. + Default: None. + """ + + def __init__(self, server_list_cfg, client_cfg, sys_path=None): + if sys_path is not None: + import sys + sys.path.append(sys_path) + try: + import mc + except ImportError: + raise ImportError( + 'Please install memcached to enable MemcachedBackend.') + + self.server_list_cfg = server_list_cfg + self.client_cfg = client_cfg + self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, + self.client_cfg) + # mc.pyvector servers as a point which points to a memory cache + self._mc_buffer = mc.pyvector() + + def get(self, filepath): + filepath = str(filepath) + import mc + self._client.Get(filepath, self._mc_buffer) + value_buf = mc.ConvertBuffer(self._mc_buffer) + return value_buf + + def get_text(self, filepath, encoding=None): + raise NotImplementedError + + +class LmdbBackend(BaseStorageBackend): + """Lmdb storage backend. + + Args: + db_path (str): Lmdb database path. + readonly (bool, optional): Lmdb environment parameter. If True, + disallow any write operations. Default: True. + lock (bool, optional): Lmdb environment parameter. If False, when + concurrent access occurs, do not lock the database. Default: False. + readahead (bool, optional): Lmdb environment parameter. If False, + disable the OS filesystem readahead mechanism, which may improve + random read performance when a database is larger than RAM. + Default: False. + + Attributes: + db_path (str): Lmdb database path. + """ + + def __init__(self, + db_path, + readonly=True, + lock=False, + readahead=False, + **kwargs): + try: + import lmdb + except ImportError: + raise ImportError('Please install lmdb to enable LmdbBackend.') + + self.db_path = str(db_path) + self._client = lmdb.open( + self.db_path, + readonly=readonly, + lock=lock, + readahead=readahead, + **kwargs) + + def get(self, filepath): + """Get values according to the filepath. + + Args: + filepath (str | obj:`Path`): Here, filepath is the lmdb key. + """ + filepath = str(filepath) + with self._client.begin(write=False) as txn: + value_buf = txn.get(filepath.encode('ascii')) + return value_buf + + def get_text(self, filepath, encoding=None): + raise NotImplementedError + + +class HardDiskBackend(BaseStorageBackend): + """Raw hard disks storage backend.""" + + _allow_symlink = True + + def get(self, filepath: Union[str, Path]) -> bytes: + """Read data from a given ``filepath`` with 'rb' mode. + + Args: + filepath (str or Path): Path to read data. + + Returns: + bytes: Expected bytes object. + """ + with open(filepath, 'rb') as f: + value_buf = f.read() + return value_buf + + def get_text(self, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> str: + """Read data from a given ``filepath`` with 'r' mode. + + Args: + filepath (str or Path): Path to read data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + + Returns: + str: Expected text reading from ``filepath``. + """ + with open(filepath, 'r', encoding=encoding) as f: + value_buf = f.read() + return value_buf + + def put(self, obj: bytes, filepath: Union[str, Path]) -> None: + """Write data to a given ``filepath`` with 'wb' mode. + + Note: + ``put`` will create a directory if the directory of ``filepath`` + does not exist. + + Args: + obj (bytes): Data to be written. + filepath (str or Path): Path to write data. + """ + mmcv.mkdir_or_exist(osp.dirname(filepath)) + with open(filepath, 'wb') as f: + f.write(obj) + + def put_text(self, + obj: str, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> None: + """Write data to a given ``filepath`` with 'w' mode. + + Note: + ``put_text`` will create a directory if the directory of + ``filepath`` does not exist. + + Args: + obj (str): Data to be written. + filepath (str or Path): Path to write data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + """ + mmcv.mkdir_or_exist(osp.dirname(filepath)) + with open(filepath, 'w', encoding=encoding) as f: + f.write(obj) + + def remove(self, filepath: Union[str, Path]) -> None: + """Remove a file. + + Args: + filepath (str or Path): Path to be removed. + """ + os.remove(filepath) + + def exists(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path exists. + + Args: + filepath (str or Path): Path to be checked whether exists. + + Returns: + bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. + """ + return osp.exists(filepath) + + def isdir(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a directory. + + Args: + filepath (str or Path): Path to be checked whether it is a + directory. + + Returns: + bool: Return ``True`` if ``filepath`` points to a directory, + ``False`` otherwise. + """ + return osp.isdir(filepath) + + def isfile(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a file. + + Args: + filepath (str or Path): Path to be checked whether it is a file. + + Returns: + bool: Return ``True`` if ``filepath`` points to a file, ``False`` + otherwise. + """ + return osp.isfile(filepath) + + def join_path(self, filepath: Union[str, Path], + *filepaths: Union[str, Path]) -> str: + """Concatenate all file paths. + + Join one or more filepath components intelligently. The return value + is the concatenation of filepath and any members of *filepaths. + + Args: + filepath (str or Path): Path to be concatenated. + + Returns: + str: The result of concatenation. + """ + return osp.join(filepath, *filepaths) + + @contextmanager + def get_local_path( + self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]: + """Only for unified API and do nothing.""" + yield filepath + + def list_dir_or_file(self, + dir_path: Union[str, Path], + list_dir: bool = True, + list_file: bool = True, + suffix: Optional[Union[str, Tuple[str]]] = None, + recursive: bool = False) -> Iterator[str]: + """Scan a directory to find the interested directories or files in + arbitrary order. + + Note: + :meth:`list_dir_or_file` returns the path relative to ``dir_path``. + + Args: + dir_path (str | Path): Path of the directory. + list_dir (bool): List the directories. Default: True. + list_file (bool): List the path of files. Default: True. + suffix (str or tuple[str], optional): File suffix + that we are interested in. Default: None. + recursive (bool): If set to True, recursively scan the + directory. Default: False. + + Yields: + Iterable[str]: A relative path to ``dir_path``. + """ + if list_dir and suffix is not None: + raise TypeError('`suffix` should be None when `list_dir` is True') + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('`suffix` must be a string or tuple of strings') + + root = dir_path + + def _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + rel_path = osp.relpath(entry.path, root) + if (suffix is None + or rel_path.endswith(suffix)) and list_file: + yield rel_path + elif osp.isdir(entry.path): + if list_dir: + rel_dir = osp.relpath(entry.path, root) + yield rel_dir + if recursive: + yield from _list_dir_or_file(entry.path, list_dir, + list_file, suffix, + recursive) + + return _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive) + + +class HTTPBackend(BaseStorageBackend): + """HTTP and HTTPS storage bachend.""" + + def get(self, filepath): + value_buf = urlopen(filepath).read() + return value_buf + + def get_text(self, filepath, encoding='utf-8'): + value_buf = urlopen(filepath).read() + return value_buf.decode(encoding) + + @contextmanager + def get_local_path(self, filepath: str) -> Iterable[str]: + """Download a file from ``filepath``. + + ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It + can be called with ``with`` statement, and when exists from the + ``with`` statement, the temporary path will be released. + + Args: + filepath (str): Download a file from ``filepath``. + + Examples: + >>> client = HTTPBackend() + >>> # After existing from the ``with`` clause, + >>> # the path will be removed + >>> with client.get_local_path('http://path/of/your/file') as path: + ... # do something here + """ + try: + f = tempfile.NamedTemporaryFile(delete=False) + f.write(self.get(filepath)) + f.close() + yield f.name + finally: + os.remove(f.name) + + +class FileClient: + """A general file client to access files in different backends. + + The client loads a file or text in a specified backend from its path + and returns it as a binary or text file. There are two ways to choose a + backend, the name of backend and the prefix of path. Although both of them + can be used to choose a storage backend, ``backend`` has a higher priority + that is if they are all set, the storage backend will be chosen by the + backend argument. If they are all `None`, the disk backend will be chosen. + Note that It can also register other backend accessor with a given name, + prefixes, and backend class. In addition, We use the singleton pattern to + avoid repeated object creation. If the arguments are the same, the same + object will be returned. + + Args: + backend (str, optional): The storage backend type. Options are "disk", + "ceph", "memcached", "lmdb", "http" and "petrel". Default: None. + prefix (str, optional): The prefix of the registered storage backend. + Options are "s3", "http", "https". Default: None. + + Examples: + >>> # only set backend + >>> file_client = FileClient(backend='petrel') + >>> # only set prefix + >>> file_client = FileClient(prefix='s3') + >>> # set both backend and prefix but use backend to choose client + >>> file_client = FileClient(backend='petrel', prefix='s3') + >>> # if the arguments are the same, the same object is returned + >>> file_client1 = FileClient(backend='petrel') + >>> file_client1 is file_client + True + + Attributes: + client (:obj:`BaseStorageBackend`): The backend object. + """ + + _backends = { + 'disk': HardDiskBackend, + 'ceph': CephBackend, + 'memcached': MemcachedBackend, + 'lmdb': LmdbBackend, + 'petrel': PetrelBackend, + 'http': HTTPBackend, + } + # This collection is used to record the overridden backends, and when a + # backend appears in the collection, the singleton pattern is disabled for + # that backend, because if the singleton pattern is used, then the object + # returned will be the backend before overwriting + _overridden_backends = set() + _prefix_to_backends = { + 's3': PetrelBackend, + 'http': HTTPBackend, + 'https': HTTPBackend, + } + _overridden_prefixes = set() + + _instances = {} + + def __new__(cls, backend=None, prefix=None, **kwargs): + if backend is None and prefix is None: + backend = 'disk' + if backend is not None and backend not in cls._backends: + raise ValueError( + f'Backend {backend} is not supported. Currently supported ones' + f' are {list(cls._backends.keys())}') + if prefix is not None and prefix not in cls._prefix_to_backends: + raise ValueError( + f'prefix {prefix} is not supported. Currently supported ones ' + f'are {list(cls._prefix_to_backends.keys())}') + + # concatenate the arguments to a unique key for determining whether + # objects with the same arguments were created + arg_key = f'{backend}:{prefix}' + for key, value in kwargs.items(): + arg_key += f':{key}:{value}' + + # if a backend was overridden, it will create a new object + if (arg_key in cls._instances + and backend not in cls._overridden_backends + and prefix not in cls._overridden_prefixes): + _instance = cls._instances[arg_key] + else: + # create a new object and put it to _instance + _instance = super().__new__(cls) + if backend is not None: + _instance.client = cls._backends[backend](**kwargs) + else: + _instance.client = cls._prefix_to_backends[prefix](**kwargs) + + cls._instances[arg_key] = _instance + + return _instance + + @property + def name(self): + return self.client.name + + @property + def allow_symlink(self): + return self.client.allow_symlink + + @staticmethod + def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]: + """Parse the prefix of a uri. + + Args: + uri (str | Path): Uri to be parsed that contains the file prefix. + + Examples: + >>> FileClient.parse_uri_prefix('s3://path/of/your/file') + 's3' + + Returns: + str | None: Return the prefix of uri if the uri contains '://' + else ``None``. + """ + assert is_filepath(uri) + uri = str(uri) + if '://' not in uri: + return None + else: + prefix, _ = uri.split('://') + # In the case of PetrelBackend, the prefix may contains the cluster + # name like clusterName:s3 + if ':' in prefix: + _, prefix = prefix.split(':') + return prefix + + @classmethod + def infer_client(cls, + file_client_args: Optional[dict] = None, + uri: Optional[Union[str, Path]] = None) -> 'FileClient': + """Infer a suitable file client based on the URI and arguments. + + Args: + file_client_args (dict, optional): Arguments to instantiate a + FileClient. Default: None. + uri (str | Path, optional): Uri to be parsed that contains the file + prefix. Default: None. + + Examples: + >>> uri = 's3://path/of/your/file' + >>> file_client = FileClient.infer_client(uri=uri) + >>> file_client_args = {'backend': 'petrel'} + >>> file_client = FileClient.infer_client(file_client_args) + + Returns: + FileClient: Instantiated FileClient object. + """ + assert file_client_args is not None or uri is not None + if file_client_args is None: + file_prefix = cls.parse_uri_prefix(uri) # type: ignore + return cls(prefix=file_prefix) + else: + return cls(**file_client_args) + + @classmethod + def _register_backend(cls, name, backend, force=False, prefixes=None): + if not isinstance(name, str): + raise TypeError('the backend name should be a string, ' + f'but got {type(name)}') + if not inspect.isclass(backend): + raise TypeError( + f'backend should be a class but got {type(backend)}') + if not issubclass(backend, BaseStorageBackend): + raise TypeError( + f'backend {backend} is not a subclass of BaseStorageBackend') + if not force and name in cls._backends: + raise KeyError( + f'{name} is already registered as a storage backend, ' + 'add "force=True" if you want to override it') + + if name in cls._backends and force: + cls._overridden_backends.add(name) + cls._backends[name] = backend + + if prefixes is not None: + if isinstance(prefixes, str): + prefixes = [prefixes] + else: + assert isinstance(prefixes, (list, tuple)) + for prefix in prefixes: + if prefix not in cls._prefix_to_backends: + cls._prefix_to_backends[prefix] = backend + elif (prefix in cls._prefix_to_backends) and force: + cls._overridden_prefixes.add(prefix) + cls._prefix_to_backends[prefix] = backend + else: + raise KeyError( + f'{prefix} is already registered as a storage backend,' + ' add "force=True" if you want to override it') + + @classmethod + def register_backend(cls, name, backend=None, force=False, prefixes=None): + """Register a backend to FileClient. + + This method can be used as a normal class method or a decorator. + + .. code-block:: python + + class NewBackend(BaseStorageBackend): + + def get(self, filepath): + return filepath + + def get_text(self, filepath): + return filepath + + FileClient.register_backend('new', NewBackend) + + or + + .. code-block:: python + + @FileClient.register_backend('new') + class NewBackend(BaseStorageBackend): + + def get(self, filepath): + return filepath + + def get_text(self, filepath): + return filepath + + Args: + name (str): The name of the registered backend. + backend (class, optional): The backend class to be registered, + which must be a subclass of :class:`BaseStorageBackend`. + When this method is used as a decorator, backend is None. + Defaults to None. + force (bool, optional): Whether to override the backend if the name + has already been registered. Defaults to False. + prefixes (str or list[str] or tuple[str], optional): The prefixes + of the registered storage backend. Default: None. + `New in version 1.3.15.` + """ + if backend is not None: + cls._register_backend( + name, backend, force=force, prefixes=prefixes) + return + + def _register(backend_cls): + cls._register_backend( + name, backend_cls, force=force, prefixes=prefixes) + return backend_cls + + return _register + + def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]: + """Read data from a given ``filepath`` with 'rb' mode. + + Note: + There are two types of return values for ``get``, one is ``bytes`` + and the other is ``memoryview``. The advantage of using memoryview + is that you can avoid copying, and if you want to convert it to + ``bytes``, you can use ``.tobytes()``. + + Args: + filepath (str or Path): Path to read data. + + Returns: + bytes | memoryview: Expected bytes object or a memory view of the + bytes object. + """ + return self.client.get(filepath) + + def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str: + """Read data from a given ``filepath`` with 'r' mode. + + Args: + filepath (str or Path): Path to read data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + + Returns: + str: Expected text reading from ``filepath``. + """ + return self.client.get_text(filepath, encoding) + + def put(self, obj: bytes, filepath: Union[str, Path]) -> None: + """Write data to a given ``filepath`` with 'wb' mode. + + Note: + ``put`` should create a directory if the directory of ``filepath`` + does not exist. + + Args: + obj (bytes): Data to be written. + filepath (str or Path): Path to write data. + """ + self.client.put(obj, filepath) + + def put_text(self, obj: str, filepath: Union[str, Path]) -> None: + """Write data to a given ``filepath`` with 'w' mode. + + Note: + ``put_text`` should create a directory if the directory of + ``filepath`` does not exist. + + Args: + obj (str): Data to be written. + filepath (str or Path): Path to write data. + encoding (str, optional): The encoding format used to open the + `filepath`. Default: 'utf-8'. + """ + self.client.put_text(obj, filepath) + + def remove(self, filepath: Union[str, Path]) -> None: + """Remove a file. + + Args: + filepath (str, Path): Path to be removed. + """ + self.client.remove(filepath) + + def exists(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path exists. + + Args: + filepath (str or Path): Path to be checked whether exists. + + Returns: + bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. + """ + return self.client.exists(filepath) + + def isdir(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a directory. + + Args: + filepath (str or Path): Path to be checked whether it is a + directory. + + Returns: + bool: Return ``True`` if ``filepath`` points to a directory, + ``False`` otherwise. + """ + return self.client.isdir(filepath) + + def isfile(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a file. + + Args: + filepath (str or Path): Path to be checked whether it is a file. + + Returns: + bool: Return ``True`` if ``filepath`` points to a file, ``False`` + otherwise. + """ + return self.client.isfile(filepath) + + def join_path(self, filepath: Union[str, Path], + *filepaths: Union[str, Path]) -> str: + """Concatenate all file paths. + + Join one or more filepath components intelligently. The return value + is the concatenation of filepath and any members of *filepaths. + + Args: + filepath (str or Path): Path to be concatenated. + + Returns: + str: The result of concatenation. + """ + return self.client.join_path(filepath, *filepaths) + + @contextmanager + def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]: + """Download data from ``filepath`` and write the data to local path. + + ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It + can be called with ``with`` statement, and when exists from the + ``with`` statement, the temporary path will be released. + + Note: + If the ``filepath`` is a local path, just return itself. + + .. warning:: + ``get_local_path`` is an experimental interface that may change in + the future. + + Args: + filepath (str or Path): Path to be read data. + + Examples: + >>> file_client = FileClient(prefix='s3') + >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path: + ... # do something here + + Yields: + Iterable[str]: Only yield one path. + """ + with self.client.get_local_path(str(filepath)) as local_path: + yield local_path + + def list_dir_or_file(self, + dir_path: Union[str, Path], + list_dir: bool = True, + list_file: bool = True, + suffix: Optional[Union[str, Tuple[str]]] = None, + recursive: bool = False) -> Iterator[str]: + """Scan a directory to find the interested directories or files in + arbitrary order. + + Note: + :meth:`list_dir_or_file` returns the path relative to ``dir_path``. + + Args: + dir_path (str | Path): Path of the directory. + list_dir (bool): List the directories. Default: True. + list_file (bool): List the path of files. Default: True. + suffix (str or tuple[str], optional): File suffix + that we are interested in. Default: None. + recursive (bool): If set to True, recursively scan the + directory. Default: False. + + Yields: + Iterable[str]: A relative path to ``dir_path``. + """ + yield from self.client.list_dir_or_file(dir_path, list_dir, list_file, + suffix, recursive) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa24d91972837b8756b225f4879bac20436eb72a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseFileHandler +from .json_handler import JsonHandler +from .pickle_handler import PickleHandler +from .yaml_handler import YamlHandler + +__all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/base.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..288878bc57282fbb2f12b32290152ca8e9d3cab0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/base.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + + +class BaseFileHandler(metaclass=ABCMeta): + # `str_like` is a flag to indicate whether the type of file object is + # str-like object or bytes-like object. Pickle only processes bytes-like + # objects but json only processes str-like object. If it is str-like + # object, `StringIO` will be used to process the buffer. + str_like = True + + @abstractmethod + def load_from_fileobj(self, file, **kwargs): + pass + + @abstractmethod + def dump_to_fileobj(self, obj, file, **kwargs): + pass + + @abstractmethod + def dump_to_str(self, obj, **kwargs): + pass + + def load_from_path(self, filepath, mode='r', **kwargs): + with open(filepath, mode) as f: + return self.load_from_fileobj(f, **kwargs) + + def dump_to_path(self, obj, filepath, mode='w', **kwargs): + with open(filepath, mode) as f: + self.dump_to_fileobj(obj, f, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/json_handler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/json_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..18d4f15f74139d20adff18b20be5529c592a66b6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/json_handler.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json + +import numpy as np + +from .base import BaseFileHandler + + +def set_default(obj): + """Set default json values for non-serializable values. + + It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list. + It also converts ``np.generic`` (including ``np.int32``, ``np.float32``, + etc.) into plain numbers of plain python built-in types. + """ + if isinstance(obj, (set, range)): + return list(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, np.generic): + return obj.item() + raise TypeError(f'{type(obj)} is unsupported for json dump') + + +class JsonHandler(BaseFileHandler): + + def load_from_fileobj(self, file): + return json.load(file) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault('default', set_default) + json.dump(obj, file, **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault('default', set_default) + return json.dumps(obj, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/pickle_handler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/pickle_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/pickle_handler.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pickle + +from .base import BaseFileHandler + + +class PickleHandler(BaseFileHandler): + + str_like = False + + def load_from_fileobj(self, file, **kwargs): + return pickle.load(file, **kwargs) + + def load_from_path(self, filepath, **kwargs): + return super(PickleHandler, self).load_from_path( + filepath, mode='rb', **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault('protocol', 2) + return pickle.dumps(obj, **kwargs) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault('protocol', 2) + pickle.dump(obj, file, **kwargs) + + def dump_to_path(self, obj, filepath, **kwargs): + super(PickleHandler, self).dump_to_path( + obj, filepath, mode='wb', **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/yaml_handler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/yaml_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..c5aa2eea1e8c76f8baf753d1c8c959dee665e543 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/handlers/yaml_handler.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import yaml + +try: + from yaml import CLoader as Loader, CDumper as Dumper +except ImportError: + from yaml import Loader, Dumper + +from .base import BaseFileHandler # isort:skip + + +class YamlHandler(BaseFileHandler): + + def load_from_fileobj(self, file, **kwargs): + kwargs.setdefault('Loader', Loader) + return yaml.load(file, **kwargs) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault('Dumper', Dumper) + yaml.dump(obj, file, **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault('Dumper', Dumper) + return yaml.dump(obj, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/io.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/io.py new file mode 100644 index 0000000000000000000000000000000000000000..aaefde58aa3ea5b58f86249ce7e1c40c186eb8dd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/io.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from io import BytesIO, StringIO +from pathlib import Path + +from ..utils import is_list_of, is_str +from .file_client import FileClient +from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler + +file_handlers = { + 'json': JsonHandler(), + 'yaml': YamlHandler(), + 'yml': YamlHandler(), + 'pickle': PickleHandler(), + 'pkl': PickleHandler() +} + + +def load(file, file_format=None, file_client_args=None, **kwargs): + """Load data from json/yaml/pickle files. + + This method provides a unified api for loading data from serialized files. + + Note: + In v1.3.16 and later, ``load`` supports loading data from serialized + files those can be storaged in different backends. + + Args: + file (str or :obj:`Path` or file-like object): Filename or a file-like + object. + file_format (str, optional): If not specified, the file format will be + inferred from the file extension, otherwise use the specified one. + Currently supported formats include "json", "yaml/yml" and + "pickle/pkl". + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> load('/path/of/your/file') # file is storaged in disk + >>> load('https://path/of/your/file') # file is storaged in Internet + >>> load('s3://path/of/your/file') # file is storaged in petrel + + Returns: + The content from the file. + """ + if isinstance(file, Path): + file = str(file) + if file_format is None and is_str(file): + file_format = file.split('.')[-1] + if file_format not in file_handlers: + raise TypeError(f'Unsupported format: {file_format}') + + handler = file_handlers[file_format] + if is_str(file): + file_client = FileClient.infer_client(file_client_args, file) + if handler.str_like: + with StringIO(file_client.get_text(file)) as f: + obj = handler.load_from_fileobj(f, **kwargs) + else: + with BytesIO(file_client.get(file)) as f: + obj = handler.load_from_fileobj(f, **kwargs) + elif hasattr(file, 'read'): + obj = handler.load_from_fileobj(file, **kwargs) + else: + raise TypeError('"file" must be a filepath str or a file-object') + return obj + + +def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs): + """Dump data to json/yaml/pickle strings or files. + + This method provides a unified api for dumping data as strings or to files, + and also supports custom arguments for each file format. + + Note: + In v1.3.16 and later, ``dump`` supports dumping data as strings or to + files which is saved to different backends. + + Args: + obj (any): The python object to be dumped. + file (str or :obj:`Path` or file-like object, optional): If not + specified, then the object is dumped to a str, otherwise to a file + specified by the filename or file-like object. + file_format (str, optional): Same as :func:`load`. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> dump('hello world', '/path/of/your/file') # disk + >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel + + Returns: + bool: True for success, False otherwise. + """ + if isinstance(file, Path): + file = str(file) + if file_format is None: + if is_str(file): + file_format = file.split('.')[-1] + elif file is None: + raise ValueError( + 'file_format must be specified since file is None') + if file_format not in file_handlers: + raise TypeError(f'Unsupported format: {file_format}') + + handler = file_handlers[file_format] + if file is None: + return handler.dump_to_str(obj, **kwargs) + elif is_str(file): + file_client = FileClient.infer_client(file_client_args, file) + if handler.str_like: + with StringIO() as f: + handler.dump_to_fileobj(obj, f, **kwargs) + file_client.put_text(f.getvalue(), file) + else: + with BytesIO() as f: + handler.dump_to_fileobj(obj, f, **kwargs) + file_client.put(f.getvalue(), file) + elif hasattr(file, 'write'): + handler.dump_to_fileobj(obj, file, **kwargs) + else: + raise TypeError('"file" must be a filename str or a file-object') + + +def _register_handler(handler, file_formats): + """Register a handler for some file extensions. + + Args: + handler (:obj:`BaseFileHandler`): Handler to be registered. + file_formats (str or list[str]): File formats to be handled by this + handler. + """ + if not isinstance(handler, BaseFileHandler): + raise TypeError( + f'handler must be a child of BaseFileHandler, not {type(handler)}') + if isinstance(file_formats, str): + file_formats = [file_formats] + if not is_list_of(file_formats, str): + raise TypeError('file_formats must be a str or a list of str') + for ext in file_formats: + file_handlers[ext] = handler + + +def register_handler(file_formats, **kwargs): + + def wrap(cls): + _register_handler(cls(**kwargs), file_formats) + return cls + + return wrap diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/parse.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/parse.py new file mode 100644 index 0000000000000000000000000000000000000000..f60f0d611b8d75692221d0edd7dc993b0a6445c9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/fileio/parse.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from io import StringIO + +from .file_client import FileClient + + +def list_from_file(filename, + prefix='', + offset=0, + max_num=0, + encoding='utf-8', + file_client_args=None): + """Load a text file and parse the content as a list of strings. + + Note: + In v1.3.16 and later, ``list_from_file`` supports loading a text file + which can be storaged in different backends and parsing the content as + a list for strings. + + Args: + filename (str): Filename. + prefix (str): The prefix to be inserted to the beginning of each item. + offset (int): The offset of lines. + max_num (int): The maximum number of lines to be read, + zeros and negatives mean no limitation. + encoding (str): Encoding used to open the file. Default utf-8. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> list_from_file('/path/of/your/file') # disk + ['hello', 'world'] + >>> list_from_file('s3://path/of/your/file') # ceph or petrel + ['hello', 'world'] + + Returns: + list[str]: A list of strings. + """ + cnt = 0 + item_list = [] + file_client = FileClient.infer_client(file_client_args, filename) + with StringIO(file_client.get_text(filename, encoding)) as f: + for _ in range(offset): + f.readline() + for line in f: + if 0 < max_num <= cnt: + break + item_list.append(prefix + line.rstrip('\n\r')) + cnt += 1 + return item_list + + +def dict_from_file(filename, + key_type=str, + encoding='utf-8', + file_client_args=None): + """Load a text file and parse the content as a dict. + + Each line of the text file will be two or more columns split by + whitespaces or tabs. The first column will be parsed as dict keys, and + the following columns will be parsed as dict values. + + Note: + In v1.3.16 and later, ``dict_from_file`` supports loading a text file + which can be storaged in different backends and parsing the content as + a dict. + + Args: + filename(str): Filename. + key_type(type): Type of the dict keys. str is user by default and + type conversion will be performed if specified. + encoding (str): Encoding used to open the file. Default utf-8. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> dict_from_file('/path/of/your/file') # disk + {'key1': 'value1', 'key2': 'value2'} + >>> dict_from_file('s3://path/of/your/file') # ceph or petrel + {'key1': 'value1', 'key2': 'value2'} + + Returns: + dict: The parsed contents. + """ + mapping = {} + file_client = FileClient.infer_client(file_client_args, filename) + with StringIO(file_client.get_text(filename, encoding)) as f: + for line in f: + items = line.rstrip('\n').split() + assert len(items) >= 2 + key = key_type(items[0]) + val = items[1:] if len(items) > 2 else items[1] + mapping[key] = val + return mapping diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0051d609d3de4e7562e3fe638335c66617c4d91 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr, + gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert, + rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb) +from .geometric import (cutout, imcrop, imflip, imflip_, impad, + impad_to_multiple, imrescale, imresize, imresize_like, + imresize_to_multiple, imrotate, imshear, imtranslate, + rescale_size) +from .io import imfrombytes, imread, imwrite, supported_backends, use_backend +from .misc import tensor2imgs +from .photometric import (adjust_brightness, adjust_color, adjust_contrast, + adjust_lighting, adjust_sharpness, auto_contrast, + clahe, imdenormalize, imequalize, iminvert, + imnormalize, imnormalize_, lut_transform, posterize, + solarize) + +__all__ = [ + 'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb', + 'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale', + 'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size', + 'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate', + 'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend', + 'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize', + 'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr', + 'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize', + 'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe', + 'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/colorspace.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/colorspace.py new file mode 100644 index 0000000000000000000000000000000000000000..814533952fdfda23d67cb6a3073692d8c1156add --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/colorspace.py @@ -0,0 +1,306 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import numpy as np + + +def imconvert(img, src, dst): + """Convert an image from the src colorspace to dst colorspace. + + Args: + img (ndarray): The input image. + src (str): The source colorspace, e.g., 'rgb', 'hsv'. + dst (str): The destination colorspace, e.g., 'rgb', 'hsv'. + + Returns: + ndarray: The converted image. + """ + code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') + out_img = cv2.cvtColor(img, code) + return out_img + + +def bgr2gray(img, keepdim=False): + """Convert a BGR image to grayscale image. + + Args: + img (ndarray): The input image. + keepdim (bool): If False (by default), then return the grayscale image + with 2 dims, otherwise 3 dims. + + Returns: + ndarray: The converted grayscale image. + """ + out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if keepdim: + out_img = out_img[..., None] + return out_img + + +def rgb2gray(img, keepdim=False): + """Convert a RGB image to grayscale image. + + Args: + img (ndarray): The input image. + keepdim (bool): If False (by default), then return the grayscale image + with 2 dims, otherwise 3 dims. + + Returns: + ndarray: The converted grayscale image. + """ + out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + if keepdim: + out_img = out_img[..., None] + return out_img + + +def gray2bgr(img): + """Convert a grayscale image to BGR image. + + Args: + img (ndarray): The input image. + + Returns: + ndarray: The converted BGR image. + """ + img = img[..., None] if img.ndim == 2 else img + out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + return out_img + + +def gray2rgb(img): + """Convert a grayscale image to RGB image. + + Args: + img (ndarray): The input image. + + Returns: + ndarray: The converted RGB image. + """ + img = img[..., None] if img.ndim == 2 else img + out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + return out_img + + +def _convert_input_type_range(img): + """Convert the type and range of the input image. + + It converts the input image to np.float32 type and range of [0, 1]. + It is mainly used for pre-processing the input image in colorspace + conversion functions such as rgb2ycbcr and ycbcr2rgb. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + (ndarray): The converted image with type of np.float32 and range of + [0, 1]. + """ + img_type = img.dtype + img = img.astype(np.float32) + if img_type == np.float32: + pass + elif img_type == np.uint8: + img /= 255. + else: + raise TypeError('The img type should be np.float32 or np.uint8, ' + f'but got {img_type}') + return img + + +def _convert_output_type_range(img, dst_type): + """Convert the type and range of the image according to dst_type. + + It converts the image to desired type and range. If `dst_type` is np.uint8, + images will be converted to np.uint8 type with range [0, 255]. If + `dst_type` is np.float32, it converts the image to np.float32 type with + range [0, 1]. + It is mainly used for post-processing images in colorspace conversion + functions such as rgb2ycbcr and ycbcr2rgb. + + Args: + img (ndarray): The image to be converted with np.float32 type and + range [0, 255]. + dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it + converts the image to np.uint8 type with range [0, 255]. If + dst_type is np.float32, it converts the image to np.float32 type + with range [0, 1]. + + Returns: + (ndarray): The converted image with desired type and range. + """ + if dst_type not in (np.uint8, np.float32): + raise TypeError('The dst_type should be np.float32 or np.uint8, ' + f'but got {dst_type}') + if dst_type == np.uint8: + img = img.round() + else: + img /= 255. + return img.astype(dst_type) + + +def rgb2ycbcr(img, y_only=False): + """Convert a RGB image to YCbCr image. + + This function produces the same results as Matlab's `rgb2ycbcr` function. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + y_only (bool): Whether to only return Y channel. Default: False. + + Returns: + ndarray: The converted YCbCr image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) + if y_only: + out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0 + else: + out_img = np.matmul( + img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) + [16, 128, 128] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def bgr2ycbcr(img, y_only=False): + """Convert a BGR image to YCbCr image. + + The bgr version of rgb2ycbcr. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + y_only (bool): Whether to only return Y channel. Default: False. + + Returns: + ndarray: The converted YCbCr image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) + if y_only: + out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 + else: + out_img = np.matmul( + img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) + [16, 128, 128] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def ycbcr2rgb(img): + """Convert a YCbCr image to RGB image. + + This function produces the same results as Matlab's ycbcr2rgb function. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + ndarray: The converted RGB image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) * 255 + out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], + [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [ + -222.921, 135.576, -276.836 + ] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def ycbcr2bgr(img): + """Convert a YCbCr image to BGR image. + + The bgr version of ycbcr2rgb. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + ndarray: The converted BGR image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) * 255 + out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], + [0.00791071, -0.00153632, 0], + [0, -0.00318811, 0.00625893]]) * 255.0 + [ + -276.836, 135.576, -222.921 + ] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def convert_color_factory(src, dst): + + code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') + + def convert_color(img): + out_img = cv2.cvtColor(img, code) + return out_img + + convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()} + image. + + Args: + img (ndarray or str): The input image. + + Returns: + ndarray: The converted {dst.upper()} image. + """ + + return convert_color + + +bgr2rgb = convert_color_factory('bgr', 'rgb') + +rgb2bgr = convert_color_factory('rgb', 'bgr') + +bgr2hsv = convert_color_factory('bgr', 'hsv') + +hsv2bgr = convert_color_factory('hsv', 'bgr') + +bgr2hls = convert_color_factory('bgr', 'hls') + +hls2bgr = convert_color_factory('hls', 'bgr') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/geometric.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/geometric.py new file mode 100644 index 0000000000000000000000000000000000000000..cf97c201cb4e43796c911919d03fb26a07ed817d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/geometric.py @@ -0,0 +1,728 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numbers + +import cv2 +import numpy as np + +from ..utils import to_2tuple +from .io import imread_backend + +try: + from PIL import Image +except ImportError: + Image = None + + +def _scale_size(size, scale): + """Rescale a size by a ratio. + + Args: + size (tuple[int]): (w, h). + scale (float | tuple(float)): Scaling factor. + + Returns: + tuple[int]: scaled size. + """ + if isinstance(scale, (float, int)): + scale = (scale, scale) + w, h = size + return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5) + + +cv2_interp_codes = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'bicubic': cv2.INTER_CUBIC, + 'area': cv2.INTER_AREA, + 'lanczos': cv2.INTER_LANCZOS4 +} + +if Image is not None: + pillow_interp_codes = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'bicubic': Image.BICUBIC, + 'box': Image.BOX, + 'lanczos': Image.LANCZOS, + 'hamming': Image.HAMMING + } + + +def imresize(img, + size, + return_scale=False, + interpolation='bilinear', + out=None, + backend=None): + """Resize image to a given size. + + Args: + img (ndarray): The input image. + size (tuple[int]): Target size (w, h). + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' + backend, "nearest", "bilinear" for 'pillow' backend. + out (ndarray): The output destination. + backend (str | None): The image resize backend type. Options are `cv2`, + `pillow`, `None`. If backend is None, the global imread_backend + specified by ``mmcv.use_backend()`` will be used. Default: None. + + Returns: + tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = img.shape[:2] + if backend is None: + backend = imread_backend + if backend not in ['cv2', 'pillow']: + raise ValueError(f'backend: {backend} is not supported for resize.' + f"Supported backends are 'cv2', 'pillow'") + + if backend == 'pillow': + assert img.dtype == np.uint8, 'Pillow backend only support uint8 type' + pil_image = Image.fromarray(img) + pil_image = pil_image.resize(size, pillow_interp_codes[interpolation]) + resized_img = np.array(pil_image) + else: + resized_img = cv2.resize( + img, size, dst=out, interpolation=cv2_interp_codes[interpolation]) + if not return_scale: + return resized_img + else: + w_scale = size[0] / w + h_scale = size[1] / h + return resized_img, w_scale, h_scale + + +def imresize_to_multiple(img, + divisor, + size=None, + scale_factor=None, + keep_ratio=False, + return_scale=False, + interpolation='bilinear', + out=None, + backend=None): + """Resize image according to a given size or scale factor and then rounds + up the the resized or rescaled image size to the nearest value that can be + divided by the divisor. + + Args: + img (ndarray): The input image. + divisor (int | tuple): Resized image size will be a multiple of + divisor. If divisor is a tuple, divisor should be + (w_divisor, h_divisor). + size (None | int | tuple[int]): Target size (w, h). Default: None. + scale_factor (None | float | tuple[float]): Multiplier for spatial + size. Should match input size if it is a tuple and the 2D style is + (w_scale_factor, h_scale_factor). Default: None. + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. Default: False. + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' + backend, "nearest", "bilinear" for 'pillow' backend. + out (ndarray): The output destination. + backend (str | None): The image resize backend type. Options are `cv2`, + `pillow`, `None`. If backend is None, the global imread_backend + specified by ``mmcv.use_backend()`` will be used. Default: None. + + Returns: + tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = img.shape[:2] + if size is not None and scale_factor is not None: + raise ValueError('only one of size or scale_factor should be defined') + elif size is None and scale_factor is None: + raise ValueError('one of size or scale_factor should be defined') + elif size is not None: + size = to_2tuple(size) + if keep_ratio: + size = rescale_size((w, h), size, return_scale=False) + else: + size = _scale_size((w, h), scale_factor) + + divisor = to_2tuple(divisor) + size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)]) + resized_img, w_scale, h_scale = imresize( + img, + size, + return_scale=True, + interpolation=interpolation, + out=out, + backend=backend) + if return_scale: + return resized_img, w_scale, h_scale + else: + return resized_img + + +def imresize_like(img, + dst_img, + return_scale=False, + interpolation='bilinear', + backend=None): + """Resize image to the same size of a given image. + + Args: + img (ndarray): The input image. + dst_img (ndarray): The target image. + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Same as :func:`resize`. + backend (str | None): Same as :func:`resize`. + + Returns: + tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = dst_img.shape[:2] + return imresize(img, (w, h), return_scale, interpolation, backend=backend) + + +def rescale_size(old_size, scale, return_scale=False): + """Calculate the new size to be rescaled to. + + Args: + old_size (tuple[int]): The old size (w, h) of image. + scale (float | tuple[int]): The scaling factor or maximum size. + If it is a float number, then the image will be rescaled by this + factor, else if it is a tuple of 2 integers, then the image will + be rescaled as large as possible within the scale. + return_scale (bool): Whether to return the scaling factor besides the + rescaled image size. + + Returns: + tuple[int]: The new rescaled image size. + """ + w, h = old_size + if isinstance(scale, (float, int)): + if scale <= 0: + raise ValueError(f'Invalid scale {scale}, must be positive.') + scale_factor = scale + elif isinstance(scale, tuple): + max_long_edge = max(scale) + max_short_edge = min(scale) + scale_factor = min(max_long_edge / max(h, w), + max_short_edge / min(h, w)) + else: + raise TypeError( + f'Scale must be a number or tuple of int, but got {type(scale)}') + + new_size = _scale_size((w, h), scale_factor) + + if return_scale: + return new_size, scale_factor + else: + return new_size + + +def imrescale(img, + scale, + return_scale=False, + interpolation='bilinear', + backend=None): + """Resize image while keeping the aspect ratio. + + Args: + img (ndarray): The input image. + scale (float | tuple[int]): The scaling factor or maximum size. + If it is a float number, then the image will be rescaled by this + factor, else if it is a tuple of 2 integers, then the image will + be rescaled as large as possible within the scale. + return_scale (bool): Whether to return the scaling factor besides the + rescaled image. + interpolation (str): Same as :func:`resize`. + backend (str | None): Same as :func:`resize`. + + Returns: + ndarray: The rescaled image. + """ + h, w = img.shape[:2] + new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) + rescaled_img = imresize( + img, new_size, interpolation=interpolation, backend=backend) + if return_scale: + return rescaled_img, scale_factor + else: + return rescaled_img + + +def imflip(img, direction='horizontal'): + """Flip an image horizontally or vertically. + + Args: + img (ndarray): Image to be flipped. + direction (str): The flip direction, either "horizontal" or + "vertical" or "diagonal". + + Returns: + ndarray: The flipped image. + """ + assert direction in ['horizontal', 'vertical', 'diagonal'] + if direction == 'horizontal': + return np.flip(img, axis=1) + elif direction == 'vertical': + return np.flip(img, axis=0) + else: + return np.flip(img, axis=(0, 1)) + + +def imflip_(img, direction='horizontal'): + """Inplace flip an image horizontally or vertically. + + Args: + img (ndarray): Image to be flipped. + direction (str): The flip direction, either "horizontal" or + "vertical" or "diagonal". + + Returns: + ndarray: The flipped image (inplace). + """ + assert direction in ['horizontal', 'vertical', 'diagonal'] + if direction == 'horizontal': + return cv2.flip(img, 1, img) + elif direction == 'vertical': + return cv2.flip(img, 0, img) + else: + return cv2.flip(img, -1, img) + + +def imrotate(img, + angle, + center=None, + scale=1.0, + border_value=0, + interpolation='bilinear', + auto_bound=False): + """Rotate an image. + + Args: + img (ndarray): Image to be rotated. + angle (float): Rotation angle in degrees, positive values mean + clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If not specified, the center of the image will be + used. + scale (float): Isotropic scale factor. + border_value (int): Border value. + interpolation (str): Same as :func:`resize`. + auto_bound (bool): Whether to adjust the image size to cover the whole + rotated image. + + Returns: + ndarray: The rotated image. + """ + if center is not None and auto_bound: + raise ValueError('`auto_bound` conflicts with `center`') + h, w = img.shape[:2] + if center is None: + center = ((w - 1) * 0.5, (h - 1) * 0.5) + assert isinstance(center, tuple) + + matrix = cv2.getRotationMatrix2D(center, -angle, scale) + if auto_bound: + cos = np.abs(matrix[0, 0]) + sin = np.abs(matrix[0, 1]) + new_w = h * sin + w * cos + new_h = h * cos + w * sin + matrix[0, 2] += (new_w - w) * 0.5 + matrix[1, 2] += (new_h - h) * 0.5 + w = int(np.round(new_w)) + h = int(np.round(new_h)) + rotated = cv2.warpAffine( + img, + matrix, (w, h), + flags=cv2_interp_codes[interpolation], + borderValue=border_value) + return rotated + + +def bbox_clip(bboxes, img_shape): + """Clip bboxes to fit the image shape. + + Args: + bboxes (ndarray): Shape (..., 4*k) + img_shape (tuple[int]): (height, width) of the image. + + Returns: + ndarray: Clipped bboxes. + """ + assert bboxes.shape[-1] % 4 == 0 + cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype) + cmin[0::2] = img_shape[1] - 1 + cmin[1::2] = img_shape[0] - 1 + clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0) + return clipped_bboxes + + +def bbox_scaling(bboxes, scale, clip_shape=None): + """Scaling bboxes w.r.t the box center. + + Args: + bboxes (ndarray): Shape(..., 4). + scale (float): Scaling factor. + clip_shape (tuple[int], optional): If specified, bboxes that exceed the + boundary will be clipped according to the given shape (h, w). + + Returns: + ndarray: Scaled bboxes. + """ + if float(scale) == 1.0: + scaled_bboxes = bboxes.copy() + else: + w = bboxes[..., 2] - bboxes[..., 0] + 1 + h = bboxes[..., 3] - bboxes[..., 1] + 1 + dw = (w * (scale - 1)) * 0.5 + dh = (h * (scale - 1)) * 0.5 + scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) + if clip_shape is not None: + return bbox_clip(scaled_bboxes, clip_shape) + else: + return scaled_bboxes + + +def imcrop(img, bboxes, scale=1.0, pad_fill=None): + """Crop image patches. + + 3 steps: scale the bboxes -> clip bboxes -> crop and pad. + + Args: + img (ndarray): Image to be cropped. + bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. + scale (float, optional): Scale ratio of bboxes, the default value + 1.0 means no padding. + pad_fill (Number | list[Number]): Value to be filled for padding. + Default: None, which means no padding. + + Returns: + list[ndarray] | ndarray: The cropped image patches. + """ + chn = 1 if img.ndim == 2 else img.shape[2] + if pad_fill is not None: + if isinstance(pad_fill, (int, float)): + pad_fill = [pad_fill for _ in range(chn)] + assert len(pad_fill) == chn + + _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes + scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) + clipped_bbox = bbox_clip(scaled_bboxes, img.shape) + + patches = [] + for i in range(clipped_bbox.shape[0]): + x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) + if pad_fill is None: + patch = img[y1:y2 + 1, x1:x2 + 1, ...] + else: + _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) + if chn == 1: + patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) + else: + patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) + patch = np.array( + pad_fill, dtype=img.dtype) * np.ones( + patch_shape, dtype=img.dtype) + x_start = 0 if _x1 >= 0 else -_x1 + y_start = 0 if _y1 >= 0 else -_y1 + w = x2 - x1 + 1 + h = y2 - y1 + 1 + patch[y_start:y_start + h, x_start:x_start + w, + ...] = img[y1:y1 + h, x1:x1 + w, ...] + patches.append(patch) + + if bboxes.ndim == 1: + return patches[0] + else: + return patches + + +def impad(img, + *, + shape=None, + padding=None, + pad_val=0, + padding_mode='constant'): + """Pad the given image to a certain shape or pad on all sides with + specified padding mode and padding value. + + Args: + img (ndarray): Image to be padded. + shape (tuple[int]): Expected padding shape (h, w). Default: None. + padding (int or tuple[int]): Padding on each border. If a single int is + provided this is used to pad all borders. If tuple of length 2 is + provided this is the padding on left/right and top/bottom + respectively. If a tuple of length 4 is provided this is the + padding for the left, top, right and bottom borders respectively. + Default: None. Note that `shape` and `padding` can not be both + set. + pad_val (Number | Sequence[Number]): Values to be filled in padding + areas when padding_mode is 'constant'. Default: 0. + padding_mode (str): Type of padding. Should be: constant, edge, + reflect or symmetric. Default: constant. + + - constant: pads with a constant value, this value is specified + with pad_val. + - edge: pads with the last value at the edge of the image. + - reflect: pads with reflection of image without repeating the + last value on the edge. For example, padding [1, 2, 3, 4] + with 2 elements on both sides in reflect mode will result + in [3, 2, 1, 2, 3, 4, 3, 2]. + - symmetric: pads with reflection of image repeating the last + value on the edge. For example, padding [1, 2, 3, 4] with + 2 elements on both sides in symmetric mode will result in + [2, 1, 1, 2, 3, 4, 4, 3] + + Returns: + ndarray: The padded image. + """ + + assert (shape is not None) ^ (padding is not None) + if shape is not None: + padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0]) + + # check pad_val + if isinstance(pad_val, tuple): + assert len(pad_val) == img.shape[-1] + elif not isinstance(pad_val, numbers.Number): + raise TypeError('pad_val must be a int or a tuple. ' + f'But received {type(pad_val)}') + + # check padding + if isinstance(padding, tuple) and len(padding) in [2, 4]: + if len(padding) == 2: + padding = (padding[0], padding[1], padding[0], padding[1]) + elif isinstance(padding, numbers.Number): + padding = (padding, padding, padding, padding) + else: + raise ValueError('Padding must be a int or a 2, or 4 element tuple.' + f'But received {padding}') + + # check padding mode + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] + + border_type = { + 'constant': cv2.BORDER_CONSTANT, + 'edge': cv2.BORDER_REPLICATE, + 'reflect': cv2.BORDER_REFLECT_101, + 'symmetric': cv2.BORDER_REFLECT + } + img = cv2.copyMakeBorder( + img, + padding[1], + padding[3], + padding[0], + padding[2], + border_type[padding_mode], + value=pad_val) + + return img + + +def impad_to_multiple(img, divisor, pad_val=0): + """Pad an image to ensure each edge to be multiple to some number. + + Args: + img (ndarray): Image to be padded. + divisor (int): Padded image edges will be multiple to divisor. + pad_val (Number | Sequence[Number]): Same as :func:`impad`. + + Returns: + ndarray: The padded image. + """ + pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor + pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor + return impad(img, shape=(pad_h, pad_w), pad_val=pad_val) + + +def cutout(img, shape, pad_val=0): + """Randomly cut out a rectangle from the original img. + + Args: + img (ndarray): Image to be cutout. + shape (int | tuple[int]): Expected cutout shape (h, w). If given as a + int, the value will be used for both h and w. + pad_val (int | float | tuple[int | float]): Values to be filled in the + cut area. Defaults to 0. + + Returns: + ndarray: The cutout image. + """ + + channels = 1 if img.ndim == 2 else img.shape[2] + if isinstance(shape, int): + cut_h, cut_w = shape, shape + else: + assert isinstance(shape, tuple) and len(shape) == 2, \ + f'shape must be a int or a tuple with length 2, but got type ' \ + f'{type(shape)} instead.' + cut_h, cut_w = shape + if isinstance(pad_val, (int, float)): + pad_val = tuple([pad_val] * channels) + elif isinstance(pad_val, tuple): + assert len(pad_val) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(pad_val), channels) + else: + raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`') + + img_h, img_w = img.shape[:2] + y0 = np.random.uniform(img_h) + x0 = np.random.uniform(img_w) + + y1 = int(max(0, y0 - cut_h / 2.)) + x1 = int(max(0, x0 - cut_w / 2.)) + y2 = min(img_h, y1 + cut_h) + x2 = min(img_w, x1 + cut_w) + + if img.ndim == 2: + patch_shape = (y2 - y1, x2 - x1) + else: + patch_shape = (y2 - y1, x2 - x1, channels) + + img_cutout = img.copy() + patch = np.array( + pad_val, dtype=img.dtype) * np.ones( + patch_shape, dtype=img.dtype) + img_cutout[y1:y2, x1:x2, ...] = patch + + return img_cutout + + +def _get_shear_matrix(magnitude, direction='horizontal'): + """Generate the shear matrix for transformation. + + Args: + magnitude (int | float): The magnitude used for shear. + direction (str): The flip direction, either "horizontal" + or "vertical". + + Returns: + ndarray: The shear matrix with dtype float32. + """ + if direction == 'horizontal': + shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]]) + elif direction == 'vertical': + shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]]) + return shear_matrix + + +def imshear(img, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Shear an image. + + Args: + img (ndarray): Image to be sheared with format (h, w) + or (h, w, c). + magnitude (int | float): The magnitude used for shear. + direction (str): The flip direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. + interpolation (str): Same as :func:`resize`. + + Returns: + ndarray: The sheared image. + """ + assert direction in ['horizontal', + 'vertical'], f'Invalid direction: {direction}' + height, width = img.shape[:2] + if img.ndim == 2: + channels = 1 + elif img.ndim == 3: + channels = img.shape[-1] + if isinstance(border_value, int): + border_value = tuple([border_value] * channels) + elif isinstance(border_value, tuple): + assert len(border_value) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(border_value), channels) + else: + raise ValueError( + f'Invalid type {type(border_value)} for `border_value`') + shear_matrix = _get_shear_matrix(magnitude, direction) + sheared = cv2.warpAffine( + img, + shear_matrix, + (width, height), + # Note case when the number elements in `border_value` + # greater than 3 (e.g. shearing masks whose channels large + # than 3) will raise TypeError in `cv2.warpAffine`. + # Here simply slice the first 3 values in `border_value`. + borderValue=border_value[:3], + flags=cv2_interp_codes[interpolation]) + return sheared + + +def _get_translate_matrix(offset, direction='horizontal'): + """Generate the translate matrix. + + Args: + offset (int | float): The offset used for translate. + direction (str): The translate direction, either + "horizontal" or "vertical". + + Returns: + ndarray: The translate matrix with dtype float32. + """ + if direction == 'horizontal': + translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]]) + elif direction == 'vertical': + translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]]) + return translate_matrix + + +def imtranslate(img, + offset, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Translate an image. + + Args: + img (ndarray): Image to be translated with format + (h, w) or (h, w, c). + offset (int | float): The offset used for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. + interpolation (str): Same as :func:`resize`. + + Returns: + ndarray: The translated image. + """ + assert direction in ['horizontal', + 'vertical'], f'Invalid direction: {direction}' + height, width = img.shape[:2] + if img.ndim == 2: + channels = 1 + elif img.ndim == 3: + channels = img.shape[-1] + if isinstance(border_value, int): + border_value = tuple([border_value] * channels) + elif isinstance(border_value, tuple): + assert len(border_value) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(border_value), channels) + else: + raise ValueError( + f'Invalid type {type(border_value)} for `border_value`.') + translate_matrix = _get_translate_matrix(offset, direction) + translated = cv2.warpAffine( + img, + translate_matrix, + (width, height), + # Note case when the number elements in `border_value` + # greater than 3 (e.g. translating masks whose channels + # large than 3) will raise TypeError in `cv2.warpAffine`. + # Here simply slice the first 3 values in `border_value`. + borderValue=border_value[:3], + flags=cv2_interp_codes[interpolation]) + return translated diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/io.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/io.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe4400ddc5751cd01a554131b33eca3154e4ca7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/io.py @@ -0,0 +1,258 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import io +import os.path as osp +from pathlib import Path + +import cv2 +import numpy as np +from cv2 import (IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_IGNORE_ORIENTATION, + IMREAD_UNCHANGED) + +from custom_mmpkg.custom_mmcv.utils import check_file_exist, is_str, mkdir_or_exist + +try: + from turbojpeg import TJCS_RGB, TJPF_BGR, TJPF_GRAY, TurboJPEG +except ImportError: + TJCS_RGB = TJPF_GRAY = TJPF_BGR = TurboJPEG = None + +try: + from PIL import Image, ImageOps +except ImportError: + Image = None + +try: + import tifffile +except ImportError: + tifffile = None + +jpeg = None +supported_backends = ['cv2', 'turbojpeg', 'pillow', 'tifffile'] + +imread_flags = { + 'color': IMREAD_COLOR, + 'grayscale': IMREAD_GRAYSCALE, + 'unchanged': IMREAD_UNCHANGED, + 'color_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_COLOR, + 'grayscale_ignore_orientation': + IMREAD_IGNORE_ORIENTATION | IMREAD_GRAYSCALE +} + +imread_backend = 'cv2' + + +def use_backend(backend): + """Select a backend for image decoding. + + Args: + backend (str): The image decoding backend type. Options are `cv2`, + `pillow`, `turbojpeg` (see https://github.com/lilohuang/PyTurboJPEG) + and `tifffile`. `turbojpeg` is faster but it only supports `.jpeg` + file format. + """ + assert backend in supported_backends + global imread_backend + imread_backend = backend + if imread_backend == 'turbojpeg': + if TurboJPEG is None: + raise ImportError('`PyTurboJPEG` is not installed') + global jpeg + if jpeg is None: + jpeg = TurboJPEG() + elif imread_backend == 'pillow': + if Image is None: + raise ImportError('`Pillow` is not installed') + elif imread_backend == 'tifffile': + if tifffile is None: + raise ImportError('`tifffile` is not installed') + + +def _jpegflag(flag='color', channel_order='bgr'): + channel_order = channel_order.lower() + if channel_order not in ['rgb', 'bgr']: + raise ValueError('channel order must be either "rgb" or "bgr"') + + if flag == 'color': + if channel_order == 'bgr': + return TJPF_BGR + elif channel_order == 'rgb': + return TJCS_RGB + elif flag == 'grayscale': + return TJPF_GRAY + else: + raise ValueError('flag must be "color" or "grayscale"') + + +def _pillow2array(img, flag='color', channel_order='bgr'): + """Convert a pillow image to numpy array. + + Args: + img (:obj:`PIL.Image.Image`): The image loaded using PIL + flag (str): Flags specifying the color type of a loaded image, + candidates are 'color', 'grayscale' and 'unchanged'. + Default to 'color'. + channel_order (str): The channel order of the output image array, + candidates are 'bgr' and 'rgb'. Default to 'bgr'. + + Returns: + np.ndarray: The converted numpy array + """ + channel_order = channel_order.lower() + if channel_order not in ['rgb', 'bgr']: + raise ValueError('channel order must be either "rgb" or "bgr"') + + if flag == 'unchanged': + array = np.array(img) + if array.ndim >= 3 and array.shape[2] >= 3: # color image + array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR + else: + # Handle exif orientation tag + if flag in ['color', 'grayscale']: + img = ImageOps.exif_transpose(img) + # If the image mode is not 'RGB', convert it to 'RGB' first. + if img.mode != 'RGB': + if img.mode != 'LA': + # Most formats except 'LA' can be directly converted to RGB + img = img.convert('RGB') + else: + # When the mode is 'LA', the default conversion will fill in + # the canvas with black, which sometimes shadows black objects + # in the foreground. + # + # Therefore, a random color (124, 117, 104) is used for canvas + img_rgba = img.convert('RGBA') + img = Image.new('RGB', img_rgba.size, (124, 117, 104)) + img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha + if flag in ['color', 'color_ignore_orientation']: + array = np.array(img) + if channel_order != 'rgb': + array = array[:, :, ::-1] # RGB to BGR + elif flag in ['grayscale', 'grayscale_ignore_orientation']: + img = img.convert('L') + array = np.array(img) + else: + raise ValueError( + 'flag must be "color", "grayscale", "unchanged", ' + f'"color_ignore_orientation" or "grayscale_ignore_orientation"' + f' but got {flag}') + return array + + +def imread(img_or_path, flag='color', channel_order='bgr', backend=None): + """Read an image. + + Args: + img_or_path (ndarray or str or Path): Either a numpy array or str or + pathlib.Path. If it is a numpy array (loaded image), then + it will be returned as is. + flag (str): Flags specifying the color type of a loaded image, + candidates are `color`, `grayscale`, `unchanged`, + `color_ignore_orientation` and `grayscale_ignore_orientation`. + By default, `cv2` and `pillow` backend would rotate the image + according to its EXIF info unless called with `unchanged` or + `*_ignore_orientation` flags. `turbojpeg` and `tifffile` backend + always ignore image's EXIF info regardless of the flag. + The `turbojpeg` backend only supports `color` and `grayscale`. + channel_order (str): Order of channel, candidates are `bgr` and `rgb`. + backend (str | None): The image decoding backend type. Options are + `cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`. + If backend is None, the global imread_backend specified by + ``mmcv.use_backend()`` will be used. Default: None. + + Returns: + ndarray: Loaded image array. + """ + + if backend is None: + backend = imread_backend + if backend not in supported_backends: + raise ValueError(f'backend: {backend} is not supported. Supported ' + "backends are 'cv2', 'turbojpeg', 'pillow'") + if isinstance(img_or_path, Path): + img_or_path = str(img_or_path) + + if isinstance(img_or_path, np.ndarray): + return img_or_path + elif is_str(img_or_path): + check_file_exist(img_or_path, + f'img file does not exist: {img_or_path}') + if backend == 'turbojpeg': + with open(img_or_path, 'rb') as in_file: + img = jpeg.decode(in_file.read(), + _jpegflag(flag, channel_order)) + if img.shape[-1] == 1: + img = img[:, :, 0] + return img + elif backend == 'pillow': + img = Image.open(img_or_path) + img = _pillow2array(img, flag, channel_order) + return img + elif backend == 'tifffile': + img = tifffile.imread(img_or_path) + return img + else: + flag = imread_flags[flag] if is_str(flag) else flag + img = cv2.imread(img_or_path, flag) + if flag == IMREAD_COLOR and channel_order == 'rgb': + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + return img + else: + raise TypeError('"img" must be a numpy array or a str or ' + 'a pathlib.Path object') + + +def imfrombytes(content, flag='color', channel_order='bgr', backend=None): + """Read an image from bytes. + + Args: + content (bytes): Image bytes got from files or other streams. + flag (str): Same as :func:`imread`. + backend (str | None): The image decoding backend type. Options are + `cv2`, `pillow`, `turbojpeg`, `None`. If backend is None, the + global imread_backend specified by ``mmcv.use_backend()`` will be + used. Default: None. + + Returns: + ndarray: Loaded image array. + """ + + if backend is None: + backend = imread_backend + if backend not in supported_backends: + raise ValueError(f'backend: {backend} is not supported. Supported ' + "backends are 'cv2', 'turbojpeg', 'pillow'") + if backend == 'turbojpeg': + img = jpeg.decode(content, _jpegflag(flag, channel_order)) + if img.shape[-1] == 1: + img = img[:, :, 0] + return img + elif backend == 'pillow': + buff = io.BytesIO(content) + img = Image.open(buff) + img = _pillow2array(img, flag, channel_order) + return img + else: + img_np = np.frombuffer(content, np.uint8) + flag = imread_flags[flag] if is_str(flag) else flag + img = cv2.imdecode(img_np, flag) + if flag == IMREAD_COLOR and channel_order == 'rgb': + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + return img + + +def imwrite(img, file_path, params=None, auto_mkdir=True): + """Write image to file. + + Args: + img (ndarray): Image array to be written. + file_path (str): Image file path. + params (None or list): Same as opencv :func:`imwrite` interface. + auto_mkdir (bool): If the parent folder of `file_path` does not exist, + whether to create it automatically. + + Returns: + bool: Successful or not. + """ + if auto_mkdir: + dir_name = osp.abspath(osp.dirname(file_path)) + mkdir_or_exist(dir_name) + return cv2.imwrite(file_path, img, params) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/misc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a1aae4510cdef05b9f61a664818c06760cea77 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/misc.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np + +import custom_mmpkg.custom_mmcv as mmcv + +try: + import torch +except ImportError: + torch = None + + +def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): + """Convert tensor to 3-channel images. + + Args: + tensor (torch.Tensor): Tensor that contains multiple images, shape ( + N, C, H, W). + mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0). + std (tuple[float], optional): Standard deviation of images. + Defaults to (1, 1, 1). + to_rgb (bool, optional): Whether the tensor was converted to RGB + format in the first place. If so, convert it back to BGR. + Defaults to True. + + Returns: + list[np.ndarray]: A list that contains multiple images. + """ + + if torch is None: + raise RuntimeError('pytorch is not installed') + assert torch.is_tensor(tensor) and tensor.ndim == 4 + assert len(mean) == 3 + assert len(std) == 3 + + num_imgs = tensor.size(0) + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + imgs = [] + for img_id in range(num_imgs): + img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) + img = mmcv.imdenormalize( + img, mean, std, to_bgr=to_rgb).astype(np.uint8) + imgs.append(np.ascontiguousarray(img)) + return imgs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/photometric.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/photometric.py new file mode 100644 index 0000000000000000000000000000000000000000..5085d012019c0cbf56f66f421a378278c1a058ae --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/image/photometric.py @@ -0,0 +1,428 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import numpy as np + +from ..utils import is_tuple_of +from .colorspace import bgr2gray, gray2bgr + + +def imnormalize(img, mean, std, to_rgb=True): + """Normalize an image with mean and std. + + Args: + img (ndarray): Image to be normalized. + mean (ndarray): The mean to be used for normalize. + std (ndarray): The std to be used for normalize. + to_rgb (bool): Whether to convert to rgb. + + Returns: + ndarray: The normalized image. + """ + img = img.copy().astype(np.float32) + return imnormalize_(img, mean, std, to_rgb) + + +def imnormalize_(img, mean, std, to_rgb=True): + """Inplace normalize an image with mean and std. + + Args: + img (ndarray): Image to be normalized. + mean (ndarray): The mean to be used for normalize. + std (ndarray): The std to be used for normalize. + to_rgb (bool): Whether to convert to rgb. + + Returns: + ndarray: The normalized image. + """ + # cv2 inplace normalization does not accept uint8 + assert img.dtype != np.uint8 + mean = np.float64(mean.reshape(1, -1)) + stdinv = 1 / np.float64(std.reshape(1, -1)) + if to_rgb: + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace + cv2.subtract(img, mean, img) # inplace + cv2.multiply(img, stdinv, img) # inplace + return img + + +def imdenormalize(img, mean, std, to_bgr=True): + assert img.dtype != np.uint8 + mean = mean.reshape(1, -1).astype(np.float64) + std = std.reshape(1, -1).astype(np.float64) + img = cv2.multiply(img, std) # make a copy + cv2.add(img, mean, img) # inplace + if to_bgr: + cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace + return img + + +def iminvert(img): + """Invert (negate) an image. + + Args: + img (ndarray): Image to be inverted. + + Returns: + ndarray: The inverted image. + """ + return np.full_like(img, 255) - img + + +def solarize(img, thr=128): + """Solarize an image (invert all pixel values above a threshold) + + Args: + img (ndarray): Image to be solarized. + thr (int): Threshold for solarizing (0 - 255). + + Returns: + ndarray: The solarized image. + """ + img = np.where(img < thr, img, 255 - img) + return img + + +def posterize(img, bits): + """Posterize an image (reduce the number of bits for each color channel) + + Args: + img (ndarray): Image to be posterized. + bits (int): Number of bits (1 to 8) to use for posterizing. + + Returns: + ndarray: The posterized image. + """ + shift = 8 - bits + img = np.left_shift(np.right_shift(img, shift), shift) + return img + + +def adjust_color(img, alpha=1, beta=None, gamma=0): + r"""It blends the source image and its gray image: + + .. math:: + output = img * alpha + gray\_img * beta + gamma + + Args: + img (ndarray): The input source image. + alpha (int | float): Weight for the source image. Default 1. + beta (int | float): Weight for the converted gray image. + If None, it's assigned the value (1 - `alpha`). + gamma (int | float): Scalar added to each sum. + Same as :func:`cv2.addWeighted`. Default 0. + + Returns: + ndarray: Colored image which has the same size and dtype as input. + """ + gray_img = bgr2gray(img) + gray_img = np.tile(gray_img[..., None], [1, 1, 3]) + if beta is None: + beta = 1 - alpha + colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma) + if not colored_img.dtype == np.uint8: + # Note when the dtype of `img` is not the default `np.uint8` + # (e.g. np.float32), the value in `colored_img` got from cv2 + # is not guaranteed to be in range [0, 255], so here clip + # is needed. + colored_img = np.clip(colored_img, 0, 255) + return colored_img + + +def imequalize(img): + """Equalize the image histogram. + + This function applies a non-linear mapping to the input image, + in order to create a uniform distribution of grayscale values + in the output image. + + Args: + img (ndarray): Image to be equalized. + + Returns: + ndarray: The equalized image. + """ + + def _scale_channel(im, c): + """Scale the data in the corresponding channel.""" + im = im[:, :, c] + # Compute the histogram of the image channel. + histo = np.histogram(im, 256, (0, 255))[0] + # For computing the step, filter out the nonzeros. + nonzero_histo = histo[histo > 0] + step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255 + if not step: + lut = np.array(range(256)) + else: + # Compute the cumulative sum, shifted by step // 2 + # and then normalized by step. + lut = (np.cumsum(histo) + (step // 2)) // step + # Shift lut, prepending with 0. + lut = np.concatenate([[0], lut[:-1]], 0) + # handle potential integer overflow + lut[lut > 255] = 255 + # If step is zero, return the original image. + # Otherwise, index from lut. + return np.where(np.equal(step, 0), im, lut[im]) + + # Scales each channel independently and then stacks + # the result. + s1 = _scale_channel(img, 0) + s2 = _scale_channel(img, 1) + s3 = _scale_channel(img, 2) + equalized_img = np.stack([s1, s2, s3], axis=-1) + return equalized_img.astype(img.dtype) + + +def adjust_brightness(img, factor=1.): + """Adjust image brightness. + + This function controls the brightness of an image. An + enhancement factor of 0.0 gives a black image. + A factor of 1.0 gives the original image. This function + blends the source image and the degenerated black image: + + .. math:: + output = img * factor + degenerated * (1 - factor) + + Args: + img (ndarray): Image to be brightened. + factor (float): A value controls the enhancement. + Factor 1.0 returns the original image, lower + factors mean less color (brightness, contrast, + etc), and higher values more. Default 1. + + Returns: + ndarray: The brightened image. + """ + degenerated = np.zeros_like(img) + # Note manually convert the dtype to np.float32, to + # achieve as close results as PIL.ImageEnhance.Brightness. + # Set beta=1-factor, and gamma=0 + brightened_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + brightened_img = np.clip(brightened_img, 0, 255) + return brightened_img.astype(img.dtype) + + +def adjust_contrast(img, factor=1.): + """Adjust image contrast. + + This function controls the contrast of an image. An + enhancement factor of 0.0 gives a solid grey + image. A factor of 1.0 gives the original image. It + blends the source image and the degenerated mean image: + + .. math:: + output = img * factor + degenerated * (1 - factor) + + Args: + img (ndarray): Image to be contrasted. BGR order. + factor (float): Same as :func:`mmcv.adjust_brightness`. + + Returns: + ndarray: The contrasted image. + """ + gray_img = bgr2gray(img) + hist = np.histogram(gray_img, 256, (0, 255))[0] + mean = round(np.sum(gray_img) / np.sum(hist)) + degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype) + degenerated = gray2bgr(degenerated) + contrasted_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + contrasted_img = np.clip(contrasted_img, 0, 255) + return contrasted_img.astype(img.dtype) + + +def auto_contrast(img, cutoff=0): + """Auto adjust image contrast. + + This function maximize (normalize) image contrast by first removing cutoff + percent of the lightest and darkest pixels from the histogram and remapping + the image so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + + Args: + img (ndarray): Image to be contrasted. BGR order. + cutoff (int | float | tuple): The cutoff percent of the lightest and + darkest pixels to be removed. If given as tuple, it shall be + (low, high). Otherwise, the single value will be used for both. + Defaults to 0. + + Returns: + ndarray: The contrasted image. + """ + + def _auto_contrast_channel(im, c, cutoff): + im = im[:, :, c] + # Compute the histogram of the image channel. + histo = np.histogram(im, 256, (0, 255))[0] + # Remove cut-off percent pixels from histo + histo_sum = np.cumsum(histo) + cut_low = histo_sum[-1] * cutoff[0] // 100 + cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 + histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low + histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) + + # Compute mapping + low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] + # If all the values have been cut off, return the origin img + if low >= high: + return im + scale = 255.0 / (high - low) + offset = -low * scale + lut = np.array(range(256)) + lut = lut * scale + offset + lut = np.clip(lut, 0, 255) + return lut[im] + + if isinstance(cutoff, (int, float)): + cutoff = (cutoff, cutoff) + else: + assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ + f'float or tuple, but got {type(cutoff)} instead.' + # Auto adjusts contrast for each channel independently and then stacks + # the result. + s1 = _auto_contrast_channel(img, 0, cutoff) + s2 = _auto_contrast_channel(img, 1, cutoff) + s3 = _auto_contrast_channel(img, 2, cutoff) + contrasted_img = np.stack([s1, s2, s3], axis=-1) + return contrasted_img.astype(img.dtype) + + +def adjust_sharpness(img, factor=1., kernel=None): + """Adjust image sharpness. + + This function controls the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image. A + factor of 1.0 gives the original image. And a factor + of 2.0 gives a sharpened image. It blends the source + image and the degenerated mean image: + + .. math:: + output = img * factor + degenerated * (1 - factor) + + Args: + img (ndarray): Image to be sharpened. BGR order. + factor (float): Same as :func:`mmcv.adjust_brightness`. + kernel (np.ndarray, optional): Filter kernel to be applied on the img + to obtain the degenerated img. Defaults to None. + + Note: + No value sanity check is enforced on the kernel set by users. So with + an inappropriate kernel, the ``adjust_sharpness`` may fail to perform + the function its name indicates but end up performing whatever + transform determined by the kernel. + + Returns: + ndarray: The sharpened image. + """ + + if kernel is None: + # adopted from PIL.ImageFilter.SMOOTH + kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 + assert isinstance(kernel, np.ndarray), \ + f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' + assert kernel.ndim == 2, \ + f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' + + degenerated = cv2.filter2D(img, -1, kernel) + sharpened_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + sharpened_img = np.clip(sharpened_img, 0, 255) + return sharpened_img.astype(img.dtype) + + +def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True): + """AlexNet-style PCA jitter. + + This data augmentation is proposed in `ImageNet Classification with Deep + Convolutional Neural Networks + `_. + + Args: + img (ndarray): Image to be adjusted lighting. BGR order. + eigval (ndarray): the eigenvalue of the convariance matrix of pixel + values, respectively. + eigvec (ndarray): the eigenvector of the convariance matrix of pixel + values, respectively. + alphastd (float): The standard deviation for distribution of alpha. + Defaults to 0.1 + to_rgb (bool): Whether to convert img to rgb. + + Returns: + ndarray: The adjusted image. + """ + assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \ + f'eigval and eigvec should both be of type np.ndarray, got ' \ + f'{type(eigval)} and {type(eigvec)} instead.' + + assert eigval.ndim == 1 and eigvec.ndim == 2 + assert eigvec.shape == (3, eigval.shape[0]) + n_eigval = eigval.shape[0] + assert isinstance(alphastd, float), 'alphastd should be of type float, ' \ + f'got {type(alphastd)} instead.' + + img = img.copy().astype(np.float32) + if to_rgb: + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace + + alpha = np.random.normal(0, alphastd, n_eigval) + alter = eigvec \ + * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \ + * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval)) + alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape) + img_adjusted = img + alter + return img_adjusted + + +def lut_transform(img, lut_table): + """Transform array by look-up table. + + The function lut_transform fills the output array with values from the + look-up table. Indices of the entries are taken from the input array. + + Args: + img (ndarray): Image to be transformed. + lut_table (ndarray): look-up table of 256 elements; in case of + multi-channel input array, the table should either have a single + channel (in this case the same table is used for all channels) or + the same number of channels as in the input array. + + Returns: + ndarray: The transformed image. + """ + assert isinstance(img, np.ndarray) + assert 0 <= np.min(img) and np.max(img) <= 255 + assert isinstance(lut_table, np.ndarray) + assert lut_table.shape == (256, ) + + return cv2.LUT(np.array(img, dtype=np.uint8), lut_table) + + +def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)): + """Use CLAHE method to process the image. + + See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. + Graphics Gems, 1994:474-485.` for more information. + + Args: + img (ndarray): Image to be processed. + clip_limit (float): Threshold for contrast limiting. Default: 40.0. + tile_grid_size (tuple[int]): Size of grid for histogram equalization. + Input image will be divided into equally sized rectangular tiles. + It defines the number of tiles in row and column. Default: (8, 8). + + Returns: + ndarray: The processed image. + """ + assert isinstance(img, np.ndarray) + assert img.ndim == 2 + assert isinstance(clip_limit, (float, int)) + assert is_tuple_of(tile_grid_size, int) + assert len(tile_grid_size) == 2 + + clahe = cv2.createCLAHE(clip_limit, tile_grid_size) + return clahe.apply(np.array(img, dtype=np.uint8)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/deprecated.json b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/deprecated.json new file mode 100644 index 0000000000000000000000000000000000000000..25cf6f28caecc22a77e3136fefa6b8dfc0e6cb5b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/deprecated.json @@ -0,0 +1,6 @@ +{ + "resnet50_caffe": "detectron/resnet50_caffe", + "resnet50_caffe_bgr": "detectron2/resnet50_caffe_bgr", + "resnet101_caffe": "detectron/resnet101_caffe", + "resnet101_caffe_bgr": "detectron2/resnet101_caffe_bgr" +} diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/mmcls.json b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/mmcls.json new file mode 100644 index 0000000000000000000000000000000000000000..bdb311d9fe6d9f317290feedc9e37236c6cf6e8f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/mmcls.json @@ -0,0 +1,31 @@ +{ + "vgg11": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth", + "vgg13": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth", + "vgg16": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth", + "vgg19": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth", + "vgg11_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth", + "vgg13_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth", + "vgg16_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth", + "vgg19_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth", + "resnet18": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.pth", + "resnet34": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.pth", + "resnet50": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth", + "resnet101": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.pth", + "resnet152": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.pth", + "resnet50_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_batch256_imagenet_20200708-1ad0ce94.pth", + "resnet101_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_batch256_imagenet_20200708-9cb302ef.pth", + "resnet152_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_batch256_imagenet_20200708-e79cb6a2.pth", + "resnext50_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth", + "resnext101_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth", + "resnext101_32x8d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth", + "resnext152_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth", + "se-resnet50": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth", + "se-resnet101": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth", + "resnest50": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest50_imagenet_converted-1ebf0afe.pth", + "resnest101": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest101_imagenet_converted-032caa52.pth", + "resnest200": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest200_imagenet_converted-581a60f2.pth", + "resnest269": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest269_imagenet_converted-59930960.pth", + "shufflenet_v1": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth", + "shufflenet_v2": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth", + "mobilenet_v2": "https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth" +} diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/open_mmlab.json b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/open_mmlab.json new file mode 100644 index 0000000000000000000000000000000000000000..8311db4feef92faa0841c697d75efbee8430c3a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/model_zoo/open_mmlab.json @@ -0,0 +1,50 @@ +{ + "vgg16_caffe": "https://download.openmmlab.com/pretrain/third_party/vgg16_caffe-292e1171.pth", + "detectron/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth", + "detectron2/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_msra-5891d200.pth", + "detectron/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth", + "detectron2/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_msra-6cc46731.pth", + "detectron2/resnext101_32x8d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth", + "resnext50_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext50-32x4d-0ab1a123.pth", + "resnext101_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d-a5af3160.pth", + "resnext101_64x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_64x4d-ee2c6f71.pth", + "contrib/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_thangvubk-ad1730dd.pth", + "detectron/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn-9186a21c.pth", + "detectron/resnet101_gn": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn-cac0ab98.pth", + "jhu/resnet50_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_ws-15beedd8.pth", + "jhu/resnet101_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn_ws-3e3c308c.pth", + "jhu/resnext50_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn_ws-0d87ac85.pth", + "jhu/resnext101_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn_ws-34ac1a9e.pth", + "jhu/resnext50_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn-c7e8b754.pth", + "jhu/resnext101_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn-ac3bb84e.pth", + "msra/hrnetv2_w18_small": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18_small-b5a04e21.pth", + "msra/hrnetv2_w18": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18-00eb2006.pth", + "msra/hrnetv2_w32": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth", + "msra/hrnetv2_w40": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w40-ed0b031c.pth", + "msra/hrnetv2_w48": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w48-d2186c55.pth", + "bninception_caffe": "https://download.openmmlab.com/pretrain/third_party/bn_inception_caffe-ed2e8665.pth", + "kin400/i3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/i3d_r50_f32s2_k400-2c57e077.pth", + "kin400/nl3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/nl3d_r50_f32s2_k400-fa7e7caa.pth", + "res2net101_v1d_26w_4s": "https://download.openmmlab.com/pretrain/third_party/res2net101_v1d_26w_4s_mmdetv2-f0a600f9.pth", + "regnetx_400mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_400mf-a5b10d96.pth", + "regnetx_800mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth", + "regnetx_1.6gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_1.6gf-5791c176.pth", + "regnetx_3.2gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth", + "regnetx_4.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_4.0gf-a88f671e.pth", + "regnetx_6.4gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_6.4gf-006af45d.pth", + "regnetx_8.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_8.0gf-3c68abe7.pth", + "regnetx_12gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_12gf-4c2a3350.pth", + "resnet18_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet18_v1c-b5776b93.pth", + "resnet50_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet50_v1c-2cccc1ad.pth", + "resnet101_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet101_v1c-e67eebb6.pth", + "mmedit/vgg16": "https://download.openmmlab.com/mmediting/third_party/vgg_state_dict.pth", + "mmedit/res34_en_nomixup": "https://download.openmmlab.com/mmediting/third_party/model_best_resnet34_En_nomixup.pth", + "mmedit/mobilenet_v2": "https://download.openmmlab.com/mmediting/third_party/mobilenet_v2.pth", + "contrib/mobilenet_v3_large": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_large-bc2c3fd3.pth", + "contrib/mobilenet_v3_small": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_small-47085aa1.pth", + "resnest50": "https://download.openmmlab.com/pretrain/third_party/resnest50_d2-7497a55b.pth", + "resnest101": "https://download.openmmlab.com/pretrain/third_party/resnest101_d2-f3b931b2.pth", + "resnest200": "https://download.openmmlab.com/pretrain/third_party/resnest200_d2-ca88e41f.pth", + "darknet53": "https://download.openmmlab.com/pretrain/third_party/darknet53-a628ea1b.pth", + "mmdet/mobilenet_v2": "https://download.openmmlab.com/mmdetection/v2.0/third_party/mobilenet_v2_batch256_imagenet-ff34753d.pth" +} diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..999e090a458ee148ceca0649f1e3806a40e909bd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/__init__.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .assign_score_withk import assign_score_withk +from .ball_query import ball_query +from .bbox import bbox_overlaps +from .border_align import BorderAlign, border_align +from .box_iou_rotated import box_iou_rotated +from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive +from .cc_attention import CrissCrossAttention +from .contour_expand import contour_expand +from .corner_pool import CornerPool +from .correlation import Correlation +from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d +from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack, + ModulatedDeformRoIPoolPack, deform_roi_pool) +from .deprecated_wrappers import Conv2d_deprecated as Conv2d +from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d +from .deprecated_wrappers import Linear_deprecated as Linear +from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d +from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss, + sigmoid_focal_loss, softmax_focal_loss) +from .furthest_point_sample import (furthest_point_sample, + furthest_point_sample_with_dist) +from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu +from .gather_points import gather_points +from .group_points import GroupAll, QueryAndGroup, grouping_operation +from .info import (get_compiler_version, get_compiling_cuda_version, + get_onnxruntime_op_path) +from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev +from .knn import knn +from .masked_conv import MaskedConv2d, masked_conv2d +from .modulated_deform_conv import (ModulatedDeformConv2d, + ModulatedDeformConv2dPack, + modulated_deform_conv2d) +from .multi_scale_deform_attn import MultiScaleDeformableAttention +from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms +from .pixel_group import pixel_group +from .point_sample import (SimpleRoIAlign, point_sample, + rel_roi_point_to_rel_img_point) +from .points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu, + points_in_boxes_part) +from .points_sampler import PointsSampler +from .psa_mask import PSAMask +from .roi_align import RoIAlign, roi_align +from .roi_align_rotated import RoIAlignRotated, roi_align_rotated +from .roi_pool import RoIPool, roi_pool +from .roiaware_pool3d import RoIAwarePool3d +from .roipoint_pool3d import RoIPointPool3d +from .saconv import SAConv2d +from .scatter_points import DynamicScatter, dynamic_scatter +from .sync_bn import SyncBatchNorm +from .three_interpolate import three_interpolate +from .three_nn import three_nn +from .tin_shift import TINShift, tin_shift +from .upfirdn2d import upfirdn2d +from .voxelize import Voxelization, voxelization + +__all__ = [ + 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe', + 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack', + 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack', + 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss', + 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss', + 'get_compiler_version', 'get_compiling_cuda_version', + 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d', + 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack', + 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match', + 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d', + 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask', + 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign', + 'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk', + 'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query', + 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu', + 'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup', + 'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn', + 'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign', + 'border_align', 'gather_points', 'furthest_point_sample', + 'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation', + 'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization', + 'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d', + 'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/assign_score_withk.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/assign_score_withk.py new file mode 100644 index 0000000000000000000000000000000000000000..4906adaa2cffd1b46912fbe7d4f87ef2f9fa0012 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/assign_score_withk.py @@ -0,0 +1,123 @@ +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['assign_score_withk_forward', 'assign_score_withk_backward']) + + +class AssignScoreWithK(Function): + r"""Perform weighted sum to generate output features according to scores. + Modified from `PAConv `_. + + This is a memory-efficient CUDA implementation of assign_scores operation, + which first transform all point features with weight bank, then assemble + neighbor features with ``knn_idx`` and perform weighted sum of ``scores``. + + See the `paper `_ appendix Sec. D for + more detailed descriptions. + + Note: + This implementation assumes using ``neighbor`` kernel input, which is + (point_features - center_features, point_features). + See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/ + pointnet2/paconv.py#L128 for more details. + """ + + @staticmethod + def forward(ctx, + scores, + point_features, + center_features, + knn_idx, + aggregate='sum'): + """ + Args: + scores (torch.Tensor): (B, npoint, K, M), predicted scores to + aggregate weight matrices in the weight bank. + ``npoint`` is the number of sampled centers. + ``K`` is the number of queried neighbors. + ``M`` is the number of weight matrices in the weight bank. + point_features (torch.Tensor): (B, N, M, out_dim) + Pre-computed point features to be aggregated. + center_features (torch.Tensor): (B, N, M, out_dim) + Pre-computed center features to be aggregated. + knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN. + We assume the first idx in each row is the idx of the center. + aggregate (str, optional): Aggregation method. + Can be 'sum', 'avg' or 'max'. Defaults: 'sum'. + + Returns: + torch.Tensor: (B, out_dim, npoint, K), the aggregated features. + """ + agg = {'sum': 0, 'avg': 1, 'max': 2} + + B, N, M, out_dim = point_features.size() + _, npoint, K, _ = scores.size() + + output = point_features.new_zeros((B, out_dim, npoint, K)) + ext_module.assign_score_withk_forward( + point_features.contiguous(), + center_features.contiguous(), + scores.contiguous(), + knn_idx.contiguous(), + output, + B=B, + N0=N, + N1=npoint, + M=M, + K=K, + O=out_dim, + aggregate=agg[aggregate]) + + ctx.save_for_backward(output, point_features, center_features, scores, + knn_idx) + ctx.agg = agg[aggregate] + + return output + + @staticmethod + def backward(ctx, grad_out): + """ + Args: + grad_out (torch.Tensor): (B, out_dim, npoint, K) + + Returns: + grad_scores (torch.Tensor): (B, npoint, K, M) + grad_point_features (torch.Tensor): (B, N, M, out_dim) + grad_center_features (torch.Tensor): (B, N, M, out_dim) + """ + _, point_features, center_features, scores, knn_idx = ctx.saved_tensors + + agg = ctx.agg + + B, N, M, out_dim = point_features.size() + _, npoint, K, _ = scores.size() + + grad_point_features = point_features.new_zeros(point_features.shape) + grad_center_features = center_features.new_zeros(center_features.shape) + grad_scores = scores.new_zeros(scores.shape) + + ext_module.assign_score_withk_backward( + grad_out.contiguous(), + point_features.contiguous(), + center_features.contiguous(), + scores.contiguous(), + knn_idx.contiguous(), + grad_point_features, + grad_center_features, + grad_scores, + B=B, + N0=N, + N1=npoint, + M=M, + K=K, + O=out_dim, + aggregate=agg) + + return grad_scores, grad_point_features, \ + grad_center_features, None, None + + +assign_score_withk = AssignScoreWithK.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/ball_query.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/ball_query.py new file mode 100644 index 0000000000000000000000000000000000000000..d0466847c6e5c1239e359a0397568413ebc1504a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/ball_query.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['ball_query_forward']) + + +class BallQuery(Function): + """Find nearby points in spherical space.""" + + @staticmethod + def forward(ctx, min_radius: float, max_radius: float, sample_num: int, + xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor: + """ + Args: + min_radius (float): minimum radius of the balls. + max_radius (float): maximum radius of the balls. + sample_num (int): maximum number of features in the balls. + xyz (Tensor): (B, N, 3) xyz coordinates of the features. + center_xyz (Tensor): (B, npoint, 3) centers of the ball query. + + Returns: + Tensor: (B, npoint, nsample) tensor with the indices of + the features that form the query balls. + """ + assert center_xyz.is_contiguous() + assert xyz.is_contiguous() + assert min_radius < max_radius + + B, N, _ = xyz.size() + npoint = center_xyz.size(1) + idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int) + + ext_module.ball_query_forward( + center_xyz, + xyz, + idx, + b=B, + n=N, + m=npoint, + min_radius=min_radius, + max_radius=max_radius, + nsample=sample_num) + if torch.__version__ != 'parrots': + ctx.mark_non_differentiable(idx) + return idx + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/bbox.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/bbox.py new file mode 100644 index 0000000000000000000000000000000000000000..0c4d58b6c91f652933974f519acd3403a833e906 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/bbox.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['bbox_overlaps']) + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0): + """Calculate overlap between two set of bboxes. + + If ``aligned`` is ``False``, then calculate the ious between each bbox + of bboxes1 and bboxes2, otherwise the ious between each aligned pair of + bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (m, 4) in format or empty. + bboxes2 (Tensor): shape (n, 4) in format or empty. + If aligned is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union) or iof (intersection over + foreground). + + Returns: + ious(Tensor): shape (m, n) if aligned == False else shape (m, 1) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> bbox_overlaps(bboxes1, bboxes2) + tensor([[0.5000, 0.0000, 0.0000], + [0.0000, 0.0000, 1.0000], + [0.0000, 0.0000, 0.0000]]) + + Example: + >>> empty = torch.FloatTensor([]) + >>> nonempty = torch.FloatTensor([ + >>> [0, 0, 10, 9], + >>> ]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + mode_dict = {'iou': 0, 'iof': 1} + assert mode in mode_dict.keys() + mode_flag = mode_dict[mode] + # Either the boxes are empty or the length of boxes' last dimension is 4 + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + assert offset == 1 or offset == 0 + + rows = bboxes1.size(0) + cols = bboxes2.size(0) + if aligned: + assert rows == cols + + if rows * cols == 0: + return bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols) + + if aligned: + ious = bboxes1.new_zeros(rows) + else: + ious = bboxes1.new_zeros((rows, cols)) + ext_module.bbox_overlaps( + bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset) + return ious diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/border_align.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/border_align.py new file mode 100644 index 0000000000000000000000000000000000000000..ff305be328e9b0a15e1bbb5e6b41beb940f55c81 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/border_align.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# modified from +# https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/layers/border_align.py + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['border_align_forward', 'border_align_backward']) + + +class BorderAlignFunction(Function): + + @staticmethod + def symbolic(g, input, boxes, pool_size): + return g.op( + 'mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size) + + @staticmethod + def forward(ctx, input, boxes, pool_size): + ctx.pool_size = pool_size + ctx.input_shape = input.size() + + assert boxes.ndim == 3, 'boxes must be with shape [B, H*W, 4]' + assert boxes.size(2) == 4, \ + 'the last dimension of boxes must be (x1, y1, x2, y2)' + assert input.size(1) % 4 == 0, \ + 'the channel for input feature must be divisible by factor 4' + + # [B, C//4, H*W, 4] + output_shape = (input.size(0), input.size(1) // 4, boxes.size(1), 4) + output = input.new_zeros(output_shape) + # `argmax_idx` only used for backward + argmax_idx = input.new_zeros(output_shape).to(torch.int) + + ext_module.border_align_forward( + input, boxes, output, argmax_idx, pool_size=ctx.pool_size) + + ctx.save_for_backward(boxes, argmax_idx) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + boxes, argmax_idx = ctx.saved_tensors + grad_input = grad_output.new_zeros(ctx.input_shape) + # complex head architecture may cause grad_output uncontiguous + grad_output = grad_output.contiguous() + ext_module.border_align_backward( + grad_output, + boxes, + argmax_idx, + grad_input, + pool_size=ctx.pool_size) + return grad_input, None, None + + +border_align = BorderAlignFunction.apply + + +class BorderAlign(nn.Module): + r"""Border align pooling layer. + + Applies border_align over the input feature based on predicted bboxes. + The details were described in the paper + `BorderDet: Border Feature for Dense Object Detection + `_. + + For each border line (e.g. top, left, bottom or right) of each box, + border_align does the following: + 1. uniformly samples `pool_size`+1 positions on this line, involving \ + the start and end points. + 2. the corresponding features on these points are computed by \ + bilinear interpolation. + 3. max pooling over all the `pool_size`+1 positions are used for \ + computing pooled feature. + + Args: + pool_size (int): number of positions sampled over the boxes' borders + (e.g. top, bottom, left, right). + + """ + + def __init__(self, pool_size): + super(BorderAlign, self).__init__() + self.pool_size = pool_size + + def forward(self, input, boxes): + """ + Args: + input: Features with shape [N,4C,H,W]. Channels ranged in [0,C), + [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom, + right features respectively. + boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2). + + Returns: + Tensor: Pooled features with shape [N,C,H*W,4]. The order is + (top,left,bottom,right) for the last dimension. + """ + return border_align(input, boxes, self.pool_size) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(pool_size={self.pool_size})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/box_iou_rotated.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/box_iou_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..2d78015e9c2a9e7a52859b4e18f84a9aa63481a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/box_iou_rotated.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['box_iou_rotated']) + + +def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False): + """Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in + (x_center, y_center, width, height, angle) format. + + If ``aligned`` is ``False``, then calculate the ious between each bbox + of bboxes1 and bboxes2, otherwise the ious between each aligned pair of + bboxes1 and bboxes2. + + Arguments: + boxes1 (Tensor): rotated bboxes 1. \ + It has shape (N, 5), indicating (x, y, w, h, theta) for each row. + Note that theta is in radian. + boxes2 (Tensor): rotated bboxes 2. \ + It has shape (M, 5), indicating (x, y, w, h, theta) for each row. + Note that theta is in radian. + mode (str): "iou" (intersection over union) or iof (intersection over + foreground). + + Returns: + ious(Tensor): shape (N, M) if aligned == False else shape (N,) + """ + assert mode in ['iou', 'iof'] + mode_dict = {'iou': 0, 'iof': 1} + mode_flag = mode_dict[mode] + rows = bboxes1.size(0) + cols = bboxes2.size(0) + if aligned: + ious = bboxes1.new_zeros(rows) + else: + ious = bboxes1.new_zeros((rows * cols)) + bboxes1 = bboxes1.contiguous() + bboxes2 = bboxes2.contiguous() + ext_module.box_iou_rotated( + bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned) + if not aligned: + ious = ious.view(rows, cols) + return ious diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/carafe.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/carafe.py new file mode 100644 index 0000000000000000000000000000000000000000..5154cb3abfccfbbe0a1b2daa67018dbf80aaf6d2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/carafe.py @@ -0,0 +1,287 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Function +from torch.nn.modules.module import Module + +from ..cnn import UPSAMPLE_LAYERS, normal_init, xavier_init +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'carafe_naive_forward', 'carafe_naive_backward', 'carafe_forward', + 'carafe_backward' +]) + + +class CARAFENaiveFunction(Function): + + @staticmethod + def symbolic(g, features, masks, kernel_size, group_size, scale_factor): + return g.op( + 'mmcv::MMCVCARAFENaive', + features, + masks, + kernel_size_i=kernel_size, + group_size_i=group_size, + scale_factor_f=scale_factor) + + @staticmethod + def forward(ctx, features, masks, kernel_size, group_size, scale_factor): + assert scale_factor >= 1 + assert masks.size(1) == kernel_size * kernel_size * group_size + assert masks.size(-1) == features.size(-1) * scale_factor + assert masks.size(-2) == features.size(-2) * scale_factor + assert features.size(1) % group_size == 0 + assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1 + ctx.kernel_size = kernel_size + ctx.group_size = group_size + ctx.scale_factor = scale_factor + ctx.feature_size = features.size() + ctx.mask_size = masks.size() + + n, c, h, w = features.size() + output = features.new_zeros((n, c, h * scale_factor, w * scale_factor)) + ext_module.carafe_naive_forward( + features, + masks, + output, + kernel_size=kernel_size, + group_size=group_size, + scale_factor=scale_factor) + + if features.requires_grad or masks.requires_grad: + ctx.save_for_backward(features, masks) + return output + + @staticmethod + def backward(ctx, grad_output): + assert grad_output.is_cuda + + features, masks = ctx.saved_tensors + kernel_size = ctx.kernel_size + group_size = ctx.group_size + scale_factor = ctx.scale_factor + + grad_input = torch.zeros_like(features) + grad_masks = torch.zeros_like(masks) + ext_module.carafe_naive_backward( + grad_output.contiguous(), + features, + masks, + grad_input, + grad_masks, + kernel_size=kernel_size, + group_size=group_size, + scale_factor=scale_factor) + + return grad_input, grad_masks, None, None, None + + +carafe_naive = CARAFENaiveFunction.apply + + +class CARAFENaive(Module): + + def __init__(self, kernel_size, group_size, scale_factor): + super(CARAFENaive, self).__init__() + + assert isinstance(kernel_size, int) and isinstance( + group_size, int) and isinstance(scale_factor, int) + self.kernel_size = kernel_size + self.group_size = group_size + self.scale_factor = scale_factor + + def forward(self, features, masks): + return carafe_naive(features, masks, self.kernel_size, self.group_size, + self.scale_factor) + + +class CARAFEFunction(Function): + + @staticmethod + def symbolic(g, features, masks, kernel_size, group_size, scale_factor): + return g.op( + 'mmcv::MMCVCARAFE', + features, + masks, + kernel_size_i=kernel_size, + group_size_i=group_size, + scale_factor_f=scale_factor) + + @staticmethod + def forward(ctx, features, masks, kernel_size, group_size, scale_factor): + assert scale_factor >= 1 + assert masks.size(1) == kernel_size * kernel_size * group_size + assert masks.size(-1) == features.size(-1) * scale_factor + assert masks.size(-2) == features.size(-2) * scale_factor + assert features.size(1) % group_size == 0 + assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1 + ctx.kernel_size = kernel_size + ctx.group_size = group_size + ctx.scale_factor = scale_factor + ctx.feature_size = features.size() + ctx.mask_size = masks.size() + + n, c, h, w = features.size() + output = features.new_zeros((n, c, h * scale_factor, w * scale_factor)) + routput = features.new_zeros(output.size(), requires_grad=False) + rfeatures = features.new_zeros(features.size(), requires_grad=False) + rmasks = masks.new_zeros(masks.size(), requires_grad=False) + ext_module.carafe_forward( + features, + masks, + rfeatures, + routput, + rmasks, + output, + kernel_size=kernel_size, + group_size=group_size, + scale_factor=scale_factor) + + if features.requires_grad or masks.requires_grad: + ctx.save_for_backward(features, masks, rfeatures) + return output + + @staticmethod + def backward(ctx, grad_output): + assert grad_output.is_cuda + + features, masks, rfeatures = ctx.saved_tensors + kernel_size = ctx.kernel_size + group_size = ctx.group_size + scale_factor = ctx.scale_factor + + rgrad_output = torch.zeros_like(grad_output, requires_grad=False) + rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False) + rgrad_input = torch.zeros_like(features, requires_grad=False) + rgrad_masks = torch.zeros_like(masks, requires_grad=False) + grad_input = torch.zeros_like(features, requires_grad=False) + grad_masks = torch.zeros_like(masks, requires_grad=False) + ext_module.carafe_backward( + grad_output.contiguous(), + rfeatures, + masks, + rgrad_output, + rgrad_input_hs, + rgrad_input, + rgrad_masks, + grad_input, + grad_masks, + kernel_size=kernel_size, + group_size=group_size, + scale_factor=scale_factor) + return grad_input, grad_masks, None, None, None + + +carafe = CARAFEFunction.apply + + +class CARAFE(Module): + """ CARAFE: Content-Aware ReAssembly of FEatures + + Please refer to https://arxiv.org/abs/1905.02188 for more details. + + Args: + kernel_size (int): reassemble kernel size + group_size (int): reassemble group size + scale_factor (int): upsample ratio + + Returns: + upsampled feature map + """ + + def __init__(self, kernel_size, group_size, scale_factor): + super(CARAFE, self).__init__() + + assert isinstance(kernel_size, int) and isinstance( + group_size, int) and isinstance(scale_factor, int) + self.kernel_size = kernel_size + self.group_size = group_size + self.scale_factor = scale_factor + + def forward(self, features, masks): + return carafe(features, masks, self.kernel_size, self.group_size, + self.scale_factor) + + +@UPSAMPLE_LAYERS.register_module(name='carafe') +class CARAFEPack(nn.Module): + """A unified package of CARAFE upsampler that contains: 1) channel + compressor 2) content encoder 3) CARAFE op. + + Official implementation of ICCV 2019 paper + CARAFE: Content-Aware ReAssembly of FEatures + Please refer to https://arxiv.org/abs/1905.02188 for more details. + + Args: + channels (int): input feature channels + scale_factor (int): upsample ratio + up_kernel (int): kernel size of CARAFE op + up_group (int): group size of CARAFE op + encoder_kernel (int): kernel size of content encoder + encoder_dilation (int): dilation of content encoder + compressed_channels (int): output channels of channels compressor + + Returns: + upsampled feature map + """ + + def __init__(self, + channels, + scale_factor, + up_kernel=5, + up_group=1, + encoder_kernel=3, + encoder_dilation=1, + compressed_channels=64): + super(CARAFEPack, self).__init__() + self.channels = channels + self.scale_factor = scale_factor + self.up_kernel = up_kernel + self.up_group = up_group + self.encoder_kernel = encoder_kernel + self.encoder_dilation = encoder_dilation + self.compressed_channels = compressed_channels + self.channel_compressor = nn.Conv2d(channels, self.compressed_channels, + 1) + self.content_encoder = nn.Conv2d( + self.compressed_channels, + self.up_kernel * self.up_kernel * self.up_group * + self.scale_factor * self.scale_factor, + self.encoder_kernel, + padding=int((self.encoder_kernel - 1) * self.encoder_dilation / 2), + dilation=self.encoder_dilation, + groups=1) + self.init_weights() + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + normal_init(self.content_encoder, std=0.001) + + def kernel_normalizer(self, mask): + mask = F.pixel_shuffle(mask, self.scale_factor) + n, mask_c, h, w = mask.size() + # use float division explicitly, + # to void inconsistency while exporting to onnx + mask_channel = int(mask_c / float(self.up_kernel**2)) + mask = mask.view(n, mask_channel, -1, h, w) + + mask = F.softmax(mask, dim=2, dtype=mask.dtype) + mask = mask.view(n, mask_c, h, w).contiguous() + + return mask + + def feature_reassemble(self, x, mask): + x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor) + return x + + def forward(self, x): + compressed_x = self.channel_compressor(x) + mask = self.content_encoder(compressed_x) + mask = self.kernel_normalizer(mask) + + x = self.feature_reassemble(x, mask) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/cc_attention.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/cc_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..d6868974cae6e5a7b9a6841845f9fca909a27155 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/cc_attention.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_mmpkg.custom_mmcv.cnn import PLUGIN_LAYERS, Scale + + +def NEG_INF_DIAG(n, device): + """Returns a diagonal matrix of size [n, n]. + + The diagonal are all "-inf". This is for avoiding calculating the + overlapped element in the Criss-Cross twice. + """ + return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0) + + +@PLUGIN_LAYERS.register_module() +class CrissCrossAttention(nn.Module): + """Criss-Cross Attention Module. + + .. note:: + Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch + to a pure PyTorch and equivalent implementation. For more + details, please refer to https://github.com/open-mmlab/mmcv/pull/1201. + + Speed comparison for one forward pass + + - Input size: [2,512,97,97] + - Device: 1 NVIDIA GeForce RTX 2080 Ti + + +-----------------------+---------------+------------+---------------+ + | |PyTorch version|CUDA version|Relative speed | + +=======================+===============+============+===============+ + |with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x | + +-----------------------+---------------+------------+---------------+ + |no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x | + +-----------------------+---------------+------------+---------------+ + + Args: + in_channels (int): Channels of the input feature map. + """ + + def __init__(self, in_channels): + super().__init__() + self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.value_conv = nn.Conv2d(in_channels, in_channels, 1) + self.gamma = Scale(0.) + self.in_channels = in_channels + + def forward(self, x): + """forward function of Criss-Cross Attention. + + Args: + x (Tensor): Input feature. \ + shape (batch_size, in_channels, height, width) + Returns: + Tensor: Output of the layer, with shape of \ + (batch_size, in_channels, height, width) + """ + B, C, H, W = x.size() + query = self.query_conv(x) + key = self.key_conv(x) + value = self.value_conv(x) + energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG( + H, query.device) + energy_H = energy_H.transpose(1, 2) + energy_W = torch.einsum('bchw,bchj->bhwj', query, key) + attn = F.softmax( + torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)] + out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H]) + out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:]) + + out = self.gamma(out) + x + out = out.contiguous() + + return out + + def __repr__(self): + s = self.__class__.__name__ + s += f'(in_channels={self.in_channels})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/contour_expand.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/contour_expand.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1111e1768b5f27e118bf7dbc0d9c70a7afd6d7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/contour_expand.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['contour_expand']) + + +def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area, + kernel_num): + """Expand kernel contours so that foreground pixels are assigned into + instances. + + Arguments: + kernel_mask (np.array or Tensor): The instance kernel mask with + size hxw. + internal_kernel_label (np.array or Tensor): The instance internal + kernel label with size hxw. + min_kernel_area (int): The minimum kernel area. + kernel_num (int): The instance kernel number. + + Returns: + label (list): The instance index map with size hxw. + """ + assert isinstance(kernel_mask, (torch.Tensor, np.ndarray)) + assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray)) + assert isinstance(min_kernel_area, int) + assert isinstance(kernel_num, int) + + if isinstance(kernel_mask, np.ndarray): + kernel_mask = torch.from_numpy(kernel_mask) + if isinstance(internal_kernel_label, np.ndarray): + internal_kernel_label = torch.from_numpy(internal_kernel_label) + + if torch.__version__ == 'parrots': + if kernel_mask.shape[0] == 0 or internal_kernel_label.shape[0] == 0: + label = [] + else: + label = ext_module.contour_expand( + kernel_mask, + internal_kernel_label, + min_kernel_area=min_kernel_area, + kernel_num=kernel_num) + label = label.tolist() + else: + label = ext_module.contour_expand(kernel_mask, internal_kernel_label, + min_kernel_area, kernel_num) + return label diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/corner_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/corner_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..a33d798b43d405e4c86bee4cd6389be21ca9c637 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/corner_pool.py @@ -0,0 +1,161 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward', + 'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward', + 'right_pool_forward', 'right_pool_backward' +]) + +_mode_dict = {'top': 0, 'bottom': 1, 'left': 2, 'right': 3} + + +class TopPoolFunction(Function): + + @staticmethod + def symbolic(g, input): + output = g.op( + 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top'])) + return output + + @staticmethod + def forward(ctx, input): + output = ext_module.top_pool_forward(input) + ctx.save_for_backward(input) + return output + + @staticmethod + def backward(ctx, grad_output): + input, = ctx.saved_tensors + output = ext_module.top_pool_backward(input, grad_output) + return output + + +class BottomPoolFunction(Function): + + @staticmethod + def symbolic(g, input): + output = g.op( + 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom'])) + return output + + @staticmethod + def forward(ctx, input): + output = ext_module.bottom_pool_forward(input) + ctx.save_for_backward(input) + return output + + @staticmethod + def backward(ctx, grad_output): + input, = ctx.saved_tensors + output = ext_module.bottom_pool_backward(input, grad_output) + return output + + +class LeftPoolFunction(Function): + + @staticmethod + def symbolic(g, input): + output = g.op( + 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left'])) + return output + + @staticmethod + def forward(ctx, input): + output = ext_module.left_pool_forward(input) + ctx.save_for_backward(input) + return output + + @staticmethod + def backward(ctx, grad_output): + input, = ctx.saved_tensors + output = ext_module.left_pool_backward(input, grad_output) + return output + + +class RightPoolFunction(Function): + + @staticmethod + def symbolic(g, input): + output = g.op( + 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right'])) + return output + + @staticmethod + def forward(ctx, input): + output = ext_module.right_pool_forward(input) + ctx.save_for_backward(input) + return output + + @staticmethod + def backward(ctx, grad_output): + input, = ctx.saved_tensors + output = ext_module.right_pool_backward(input, grad_output) + return output + + +class CornerPool(nn.Module): + """Corner Pooling. + + Corner Pooling is a new type of pooling layer that helps a + convolutional network better localize corners of bounding boxes. + + Please refer to https://arxiv.org/abs/1808.01244 for more details. + Code is modified from https://github.com/princeton-vl/CornerNet-Lite. + + Args: + mode(str): Pooling orientation for the pooling layer + + - 'bottom': Bottom Pooling + - 'left': Left Pooling + - 'right': Right Pooling + - 'top': Top Pooling + + Returns: + Feature map after pooling. + """ + + pool_functions = { + 'bottom': BottomPoolFunction, + 'left': LeftPoolFunction, + 'right': RightPoolFunction, + 'top': TopPoolFunction, + } + + cummax_dim_flip = { + 'bottom': (2, False), + 'left': (3, True), + 'right': (3, False), + 'top': (2, True), + } + + def __init__(self, mode): + super(CornerPool, self).__init__() + assert mode in self.pool_functions + self.mode = mode + self.corner_pool = self.pool_functions[mode] + + def forward(self, x): + if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0': + if torch.onnx.is_in_onnx_export(): + assert torch.__version__ >= '1.7.0', \ + 'When `cummax` serves as an intermediate component whose '\ + 'outputs is used as inputs for another modules, it\'s '\ + 'expected that pytorch version must be >= 1.7.0, '\ + 'otherwise Error appears like: `RuntimeError: tuple '\ + 'appears in op that does not forward tuples, unsupported '\ + 'kind: prim::PythonOp`.' + + dim, flip = self.cummax_dim_flip[self.mode] + if flip: + x = x.flip(dim) + pool_tensor, _ = torch.cummax(x, dim=dim) + if flip: + pool_tensor = pool_tensor.flip(dim) + return pool_tensor + else: + return self.corner_pool.apply(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/correlation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..3d0b79c301b29915dfaf4d2b1846c59be73127d3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/correlation.py @@ -0,0 +1,196 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor, nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['correlation_forward', 'correlation_backward']) + + +class CorrelationFunction(Function): + + @staticmethod + def forward(ctx, + input1, + input2, + kernel_size=1, + max_displacement=1, + stride=1, + padding=1, + dilation=1, + dilation_patch=1): + + ctx.save_for_backward(input1, input2) + + kH, kW = ctx.kernel_size = _pair(kernel_size) + patch_size = max_displacement * 2 + 1 + ctx.patch_size = patch_size + dH, dW = ctx.stride = _pair(stride) + padH, padW = ctx.padding = _pair(padding) + dilationH, dilationW = ctx.dilation = _pair(dilation) + dilation_patchH, dilation_patchW = ctx.dilation_patch = _pair( + dilation_patch) + + output_size = CorrelationFunction._output_size(ctx, input1) + + output = input1.new_zeros(output_size) + + ext_module.correlation_forward( + input1, + input2, + output, + kH=kH, + kW=kW, + patchH=patch_size, + patchW=patch_size, + padH=padH, + padW=padW, + dilationH=dilationH, + dilationW=dilationW, + dilation_patchH=dilation_patchH, + dilation_patchW=dilation_patchW, + dH=dH, + dW=dW) + + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input1, input2 = ctx.saved_tensors + + kH, kW = ctx.kernel_size + patch_size = ctx.patch_size + padH, padW = ctx.padding + dilationH, dilationW = ctx.dilation + dilation_patchH, dilation_patchW = ctx.dilation_patch + dH, dW = ctx.stride + grad_input1 = torch.zeros_like(input1) + grad_input2 = torch.zeros_like(input2) + + ext_module.correlation_backward( + grad_output, + input1, + input2, + grad_input1, + grad_input2, + kH=kH, + kW=kW, + patchH=patch_size, + patchW=patch_size, + padH=padH, + padW=padW, + dilationH=dilationH, + dilationW=dilationW, + dilation_patchH=dilation_patchH, + dilation_patchW=dilation_patchW, + dH=dH, + dW=dW) + return grad_input1, grad_input2, None, None, None, None, None, None + + @staticmethod + def _output_size(ctx, input1): + iH, iW = input1.size(2), input1.size(3) + batch_size = input1.size(0) + kH, kW = ctx.kernel_size + patch_size = ctx.patch_size + dH, dW = ctx.stride + padH, padW = ctx.padding + dilationH, dilationW = ctx.dilation + dilatedKH = (kH - 1) * dilationH + 1 + dilatedKW = (kW - 1) * dilationW + 1 + + oH = int((iH + 2 * padH - dilatedKH) / dH + 1) + oW = int((iW + 2 * padW - dilatedKW) / dW + 1) + + output_size = (batch_size, patch_size, patch_size, oH, oW) + return output_size + + +class Correlation(nn.Module): + r"""Correlation operator + + This correlation operator works for optical flow correlation computation. + + There are two batched tensors with shape :math:`(N, C, H, W)`, + and the correlation output's shape is :math:`(N, max\_displacement \times + 2 + 1, max\_displacement * 2 + 1, H_{out}, W_{out})` + + where + + .. math:: + H_{out} = \left\lfloor\frac{H_{in} + 2 \times padding - + dilation \times (kernel\_size - 1) - 1} + {stride} + 1\right\rfloor + + .. math:: + W_{out} = \left\lfloor\frac{W_{in} + 2 \times padding - dilation + \times (kernel\_size - 1) - 1} + {stride} + 1\right\rfloor + + the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding + window convolution between input1 and shifted input2, + + .. math:: + Corr(N_i, dx, dy) = + \sum_{c=0}^{C-1} + input1(N_i, c) \star + \mathcal{S}(input2(N_i, c), dy, dx) + + where :math:`\star` is the valid 2d sliding window convolution operator, + and :math:`\mathcal{S}` means shifting the input features (auto-complete + zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \in + [-max\_displacement \times dilation\_patch, max\_displacement \times + dilation\_patch]`. + + Args: + kernel_size (int): The size of sliding window i.e. local neighborhood + representing the center points and involved in correlation + computation. Defaults to 1. + max_displacement (int): The radius for computing correlation volume, + but the actual working space can be dilated by dilation_patch. + Defaults to 1. + stride (int): The stride of the sliding blocks in the input spatial + dimensions. Defaults to 1. + padding (int): Zero padding added to all four sides of the input1. + Defaults to 0. + dilation (int): The spacing of local neighborhood that will involved + in correlation. Defaults to 1. + dilation_patch (int): The spacing between position need to compute + correlation. Defaults to 1. + """ + + def __init__(self, + kernel_size: int = 1, + max_displacement: int = 1, + stride: int = 1, + padding: int = 0, + dilation: int = 1, + dilation_patch: int = 1) -> None: + super().__init__() + self.kernel_size = kernel_size + self.max_displacement = max_displacement + self.stride = stride + self.padding = padding + self.dilation = dilation + self.dilation_patch = dilation_patch + + def forward(self, input1: Tensor, input2: Tensor) -> Tensor: + return CorrelationFunction.apply(input1, input2, self.kernel_size, + self.max_displacement, self.stride, + self.padding, self.dilation, + self.dilation_patch) + + def __repr__(self) -> str: + s = self.__class__.__name__ + s += f'(kernel_size={self.kernel_size}, ' + s += f'max_displacement={self.max_displacement}, ' + s += f'stride={self.stride}, ' + s += f'padding={self.padding}, ' + s += f'dilation={self.dilation}, ' + s += f'dilation_patch={self.dilation_patch})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deform_conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deform_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..c8e80c5af51a525915875a1f9cb030e77d24f190 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deform_conv.py @@ -0,0 +1,405 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair, _single + +from custom_mmpkg.custom_mmcv.utils import deprecated_api_warning +from ..cnn import CONV_LAYERS +from ..utils import ext_loader, print_log + +ext_module = ext_loader.load_ext('_ext', [ + 'deform_conv_forward', 'deform_conv_backward_input', + 'deform_conv_backward_parameters' +]) + + +class DeformConv2dFunction(Function): + + @staticmethod + def symbolic(g, + input, + offset, + weight, + stride, + padding, + dilation, + groups, + deform_groups, + bias=False, + im2col_step=32): + return g.op( + 'mmcv::MMCVDeformConv2d', + input, + offset, + weight, + stride_i=stride, + padding_i=padding, + dilation_i=dilation, + groups_i=groups, + deform_groups_i=deform_groups, + bias_i=bias, + im2col_step_i=im2col_step) + + @staticmethod + def forward(ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deform_groups=1, + bias=False, + im2col_step=32): + if input is not None and input.dim() != 4: + raise ValueError( + f'Expected 4D tensor as input, got {input.dim()}D tensor \ + instead.') + assert bias is False, 'Only support bias is False.' + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deform_groups = deform_groups + ctx.im2col_step = im2col_step + + # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; + # amp won't cast the type of model (float32), but "offset" is cast + # to float16 by nn.Conv2d automatically, leading to the type + # mismatch with input (when it is float32) or weight. + # The flag for whether to use fp16 or amp is the type of "offset", + # we cast weight and input to temporarily support fp16 and amp + # whatever the pytorch version is. + input = input.type_as(offset) + weight = weight.type_as(input) + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + DeformConv2dFunction._output_size(ctx, input, weight)) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + cur_im2col_step = min(ctx.im2col_step, input.size(0)) + assert (input.size(0) % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + ext_module.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + kW=weight.size(3), + kH=weight.size(2), + dW=ctx.stride[1], + dH=ctx.stride[0], + padW=ctx.padding[1], + padH=ctx.padding[0], + dilationW=ctx.dilation[1], + dilationH=ctx.dilation[0], + group=ctx.groups, + deformable_group=ctx.deform_groups, + im2col_step=cur_im2col_step) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + cur_im2col_step = min(ctx.im2col_step, input.size(0)) + assert (input.size(0) % cur_im2col_step + ) == 0, 'batch size must be divisible by im2col_step' + + grad_output = grad_output.contiguous() + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + ext_module.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + kW=weight.size(3), + kH=weight.size(2), + dW=ctx.stride[1], + dH=ctx.stride[0], + padW=ctx.padding[1], + padH=ctx.padding[0], + dilationW=ctx.dilation[1], + dilationH=ctx.dilation[0], + group=ctx.groups, + deformable_group=ctx.deform_groups, + im2col_step=cur_im2col_step) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + ext_module.deform_conv_backward_parameters( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + kW=weight.size(3), + kH=weight.size(2), + dW=ctx.stride[1], + dH=ctx.stride[0], + padW=ctx.padding[1], + padH=ctx.padding[0], + dilationW=ctx.dilation[1], + dilationH=ctx.dilation[0], + group=ctx.groups, + deformable_group=ctx.deform_groups, + scale=1, + im2col_step=cur_im2col_step) + + return grad_input, grad_offset, grad_weight, \ + None, None, None, None, None, None, None + + @staticmethod + def _output_size(ctx, input, weight): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = ctx.padding[d] + kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = ctx.stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + 'convolution input is too small (output would be ' + + 'x'.join(map(str, output_size)) + ')') + return output_size + + +deform_conv2d = DeformConv2dFunction.apply + + +class DeformConv2d(nn.Module): + r"""Deformable 2D convolution. + + Applies a deformable 2D convolution over an input signal composed of + several input planes. DeformConv2d was described in the paper + `Deformable Convolutional Networks + `_ + + Note: + The argument ``im2col_step`` was added in version 1.3.17, which means + number of samples processed by the ``im2col_cuda_kernel`` per call. + It enables users to define ``batch_size`` and ``im2col_step`` more + flexibly and solved `issue mmcv#1440 + `_. + + Args: + in_channels (int): Number of channels in the input image. + out_channels (int): Number of channels produced by the convolution. + kernel_size(int, tuple): Size of the convolving kernel. + stride(int, tuple): Stride of the convolution. Default: 1. + padding (int or tuple): Zero-padding added to both sides of the input. + Default: 0. + dilation (int or tuple): Spacing between kernel elements. Default: 1. + groups (int): Number of blocked connections from input. + channels to output channels. Default: 1. + deform_groups (int): Number of deformable group partitions. + bias (bool): If True, adds a learnable bias to the output. + Default: False. + im2col_step (int): Number of samples processed by im2col_cuda_kernel + per call. It will work when ``batch_size`` > ``im2col_step``, but + ``batch_size`` must be divisible by ``im2col_step``. Default: 32. + `New in version 1.3.17.` + """ + + @deprecated_api_warning({'deformable_groups': 'deform_groups'}, + cls_name='DeformConv2d') + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, ...]], + stride: Union[int, Tuple[int, ...]] = 1, + padding: Union[int, Tuple[int, ...]] = 0, + dilation: Union[int, Tuple[int, ...]] = 1, + groups: int = 1, + deform_groups: int = 1, + bias: bool = False, + im2col_step: int = 32) -> None: + super(DeformConv2d, self).__init__() + + assert not bias, \ + f'bias={bias} is not supported in DeformConv2d.' + assert in_channels % groups == 0, \ + f'in_channels {in_channels} cannot be divisible by groups {groups}' + assert out_channels % groups == 0, \ + f'out_channels {out_channels} cannot be divisible by groups \ + {groups}' + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deform_groups = deform_groups + self.im2col_step = im2col_step + # enable compatibility with nn.Conv2d + self.transposed = False + self.output_padding = _single(0) + + # only weight, no bias + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, + *self.kernel_size)) + + self.reset_parameters() + + def reset_parameters(self): + # switch the initialization of `self.weight` to the standard kaiming + # method described in `Delving deep into rectifiers: Surpassing + # human-level performance on ImageNet classification` - He, K. et al. + # (2015), using a uniform distribution + nn.init.kaiming_uniform_(self.weight, nonlinearity='relu') + + def forward(self, x: Tensor, offset: Tensor) -> Tensor: + """Deformable Convolutional forward function. + + Args: + x (Tensor): Input feature, shape (B, C_in, H_in, W_in) + offset (Tensor): Offset for deformable convolution, shape + (B, deform_groups*kernel_size[0]*kernel_size[1]*2, + H_out, W_out), H_out, W_out are equal to the output's. + + An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. + The spatial arrangement is like: + + .. code:: text + + (x0, y0) (x1, y1) (x2, y2) + (x3, y3) (x4, y4) (x5, y5) + (x6, y6) (x7, y7) (x8, y8) + + Returns: + Tensor: Output of the layer. + """ + # To fix an assert error in deform_conv_cuda.cpp:128 + # input image is smaller than kernel + input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) < + self.kernel_size[1]) + if input_pad: + pad_h = max(self.kernel_size[0] - x.size(2), 0) + pad_w = max(self.kernel_size[1] - x.size(3), 0) + x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous() + offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0) + offset = offset.contiguous() + out = deform_conv2d(x, offset, self.weight, self.stride, self.padding, + self.dilation, self.groups, self.deform_groups, + False, self.im2col_step) + if input_pad: + out = out[:, :, :out.size(2) - pad_h, :out.size(3) - + pad_w].contiguous() + return out + + def __repr__(self): + s = self.__class__.__name__ + s += f'(in_channels={self.in_channels},\n' + s += f'out_channels={self.out_channels},\n' + s += f'kernel_size={self.kernel_size},\n' + s += f'stride={self.stride},\n' + s += f'padding={self.padding},\n' + s += f'dilation={self.dilation},\n' + s += f'groups={self.groups},\n' + s += f'deform_groups={self.deform_groups},\n' + # bias is not supported in DeformConv2d. + s += 'bias=False)' + return s + + +@CONV_LAYERS.register_module('DCN') +class DeformConv2dPack(DeformConv2d): + """A Deformable Conv Encapsulation that acts as normal Conv layers. + + The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. + The spatial arrangement is like: + + .. code:: text + + (x0, y0) (x1, y1) (x2, y2) + (x3, y3) (x4, y4) (x5, y5) + (x6, y6) (x7, y7) (x8, y8) + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int or tuple[int]): Same as nn.Conv2d. + padding (int or tuple[int]): Same as nn.Conv2d. + dilation (int or tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + """ + + _version = 2 + + def __init__(self, *args, **kwargs): + super(DeformConv2dPack, self).__init__(*args, **kwargs) + self.conv_offset = nn.Conv2d( + self.in_channels, + self.deform_groups * 2 * self.kernel_size[0] * self.kernel_size[1], + kernel_size=self.kernel_size, + stride=_pair(self.stride), + padding=_pair(self.padding), + dilation=_pair(self.dilation), + bias=True) + self.init_offset() + + def init_offset(self): + self.conv_offset.weight.data.zero_() + self.conv_offset.bias.data.zero_() + + def forward(self, x): + offset = self.conv_offset(x) + return deform_conv2d(x, offset, self.weight, self.stride, self.padding, + self.dilation, self.groups, self.deform_groups, + False, self.im2col_step) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + + if version is None or version < 2: + # the key is different in early versions + # In version < 2, DeformConvPack loads previous benchmark models. + if (prefix + 'conv_offset.weight' not in state_dict + and prefix[:-1] + '_offset.weight' in state_dict): + state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( + prefix[:-1] + '_offset.weight') + if (prefix + 'conv_offset.bias' not in state_dict + and prefix[:-1] + '_offset.bias' in state_dict): + state_dict[prefix + + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + + '_offset.bias') + + if version is not None and version > 1: + print_log( + f'DeformConv2dPack {prefix.rstrip(".")} is upgraded to ' + 'version 2.', + logger='root') + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deform_roi_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deform_roi_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..cc245ba91fee252226ba22e76bb94a35db9a629b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deform_roi_pool.py @@ -0,0 +1,204 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward']) + + +class DeformRoIPoolFunction(Function): + + @staticmethod + def symbolic(g, input, rois, offset, output_size, spatial_scale, + sampling_ratio, gamma): + return g.op( + 'mmcv::MMCVDeformRoIPool', + input, + rois, + offset, + pooled_height_i=output_size[0], + pooled_width_i=output_size[1], + spatial_scale_f=spatial_scale, + sampling_ratio_f=sampling_ratio, + gamma_f=gamma) + + @staticmethod + def forward(ctx, + input, + rois, + offset, + output_size, + spatial_scale=1.0, + sampling_ratio=0, + gamma=0.1): + if offset is None: + offset = input.new_zeros(0) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = float(spatial_scale) + ctx.sampling_ratio = int(sampling_ratio) + ctx.gamma = float(gamma) + + assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' + + output_shape = (rois.size(0), input.size(1), ctx.output_size[0], + ctx.output_size[1]) + output = input.new_zeros(output_shape) + + ext_module.deform_roi_pool_forward( + input, + rois, + offset, + output, + pooled_height=ctx.output_size[0], + pooled_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale, + sampling_ratio=ctx.sampling_ratio, + gamma=ctx.gamma) + + ctx.save_for_backward(input, rois, offset) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, rois, offset = ctx.saved_tensors + grad_input = grad_output.new_zeros(input.shape) + grad_offset = grad_output.new_zeros(offset.shape) + + ext_module.deform_roi_pool_backward( + grad_output, + input, + rois, + offset, + grad_input, + grad_offset, + pooled_height=ctx.output_size[0], + pooled_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale, + sampling_ratio=ctx.sampling_ratio, + gamma=ctx.gamma) + if grad_offset.numel() == 0: + grad_offset = None + return grad_input, None, grad_offset, None, None, None, None + + +deform_roi_pool = DeformRoIPoolFunction.apply + + +class DeformRoIPool(nn.Module): + + def __init__(self, + output_size, + spatial_scale=1.0, + sampling_ratio=0, + gamma=0.1): + super(DeformRoIPool, self).__init__() + self.output_size = _pair(output_size) + self.spatial_scale = float(spatial_scale) + self.sampling_ratio = int(sampling_ratio) + self.gamma = float(gamma) + + def forward(self, input, rois, offset=None): + return deform_roi_pool(input, rois, offset, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.gamma) + + +class DeformRoIPoolPack(DeformRoIPool): + + def __init__(self, + output_size, + output_channels, + deform_fc_channels=1024, + spatial_scale=1.0, + sampling_ratio=0, + gamma=0.1): + super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, + sampling_ratio, gamma) + + self.output_channels = output_channels + self.deform_fc_channels = deform_fc_channels + + self.offset_fc = nn.Sequential( + nn.Linear( + self.output_size[0] * self.output_size[1] * + self.output_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, + self.output_size[0] * self.output_size[1] * 2)) + self.offset_fc[-1].weight.data.zero_() + self.offset_fc[-1].bias.data.zero_() + + def forward(self, input, rois): + assert input.size(1) == self.output_channels + x = deform_roi_pool(input, rois, None, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.gamma) + rois_num = rois.size(0) + offset = self.offset_fc(x.view(rois_num, -1)) + offset = offset.view(rois_num, 2, self.output_size[0], + self.output_size[1]) + return deform_roi_pool(input, rois, offset, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.gamma) + + +class ModulatedDeformRoIPoolPack(DeformRoIPool): + + def __init__(self, + output_size, + output_channels, + deform_fc_channels=1024, + spatial_scale=1.0, + sampling_ratio=0, + gamma=0.1): + super(ModulatedDeformRoIPoolPack, + self).__init__(output_size, spatial_scale, sampling_ratio, gamma) + + self.output_channels = output_channels + self.deform_fc_channels = deform_fc_channels + + self.offset_fc = nn.Sequential( + nn.Linear( + self.output_size[0] * self.output_size[1] * + self.output_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, + self.output_size[0] * self.output_size[1] * 2)) + self.offset_fc[-1].weight.data.zero_() + self.offset_fc[-1].bias.data.zero_() + + self.mask_fc = nn.Sequential( + nn.Linear( + self.output_size[0] * self.output_size[1] * + self.output_channels, self.deform_fc_channels), + nn.ReLU(inplace=True), + nn.Linear(self.deform_fc_channels, + self.output_size[0] * self.output_size[1] * 1), + nn.Sigmoid()) + self.mask_fc[2].weight.data.zero_() + self.mask_fc[2].bias.data.zero_() + + def forward(self, input, rois): + assert input.size(1) == self.output_channels + x = deform_roi_pool(input, rois, None, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.gamma) + rois_num = rois.size(0) + offset = self.offset_fc(x.view(rois_num, -1)) + offset = offset.view(rois_num, 2, self.output_size[0], + self.output_size[1]) + mask = self.mask_fc(x.view(rois_num, -1)) + mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1]) + d = deform_roi_pool(input, rois, offset, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.gamma) + return d * mask diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deprecated_wrappers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deprecated_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e593df9ee57637038683d7a1efaa347b2b69e7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/deprecated_wrappers.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This file is for backward compatibility. +# Module wrappers for empty tensor have been moved to mmcv.cnn.bricks. +import warnings + +from ..cnn.bricks.wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d + + +class Conv2d_deprecated(Conv2d): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + 'Importing Conv2d wrapper from "mmcv.ops" will be deprecated in' + ' the future. Please import them from "mmcv.cnn" instead') + + +class ConvTranspose2d_deprecated(ConvTranspose2d): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + 'Importing ConvTranspose2d wrapper from "mmcv.ops" will be ' + 'deprecated in the future. Please import them from "mmcv.cnn" ' + 'instead') + + +class MaxPool2d_deprecated(MaxPool2d): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + 'Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in' + ' the future. Please import them from "mmcv.cnn" instead') + + +class Linear_deprecated(Linear): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + 'Importing Linear wrapper from "mmcv.ops" will be deprecated in' + ' the future. Please import them from "mmcv.cnn" instead') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/focal_loss.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/focal_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..763bc93bd2575c49ca8ccf20996bbd92d1e0d1a4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/focal_loss.py @@ -0,0 +1,212 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'sigmoid_focal_loss_forward', 'sigmoid_focal_loss_backward', + 'softmax_focal_loss_forward', 'softmax_focal_loss_backward' +]) + + +class SigmoidFocalLossFunction(Function): + + @staticmethod + def symbolic(g, input, target, gamma, alpha, weight, reduction): + return g.op( + 'mmcv::MMCVSigmoidFocalLoss', + input, + target, + gamma_f=gamma, + alpha_f=alpha, + weight_f=weight, + reduction_s=reduction) + + @staticmethod + def forward(ctx, + input, + target, + gamma=2.0, + alpha=0.25, + weight=None, + reduction='mean'): + + assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) + assert input.dim() == 2 + assert target.dim() == 1 + assert input.size(0) == target.size(0) + if weight is None: + weight = input.new_empty(0) + else: + assert weight.dim() == 1 + assert input.size(1) == weight.size(0) + ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} + assert reduction in ctx.reduction_dict.keys() + + ctx.gamma = float(gamma) + ctx.alpha = float(alpha) + ctx.reduction = ctx.reduction_dict[reduction] + + output = input.new_zeros(input.size()) + + ext_module.sigmoid_focal_loss_forward( + input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha) + if ctx.reduction == ctx.reduction_dict['mean']: + output = output.sum() / input.size(0) + elif ctx.reduction == ctx.reduction_dict['sum']: + output = output.sum() + ctx.save_for_backward(input, target, weight) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, target, weight = ctx.saved_tensors + + grad_input = input.new_zeros(input.size()) + + ext_module.sigmoid_focal_loss_backward( + input, + target, + weight, + grad_input, + gamma=ctx.gamma, + alpha=ctx.alpha) + + grad_input *= grad_output + if ctx.reduction == ctx.reduction_dict['mean']: + grad_input /= input.size(0) + return grad_input, None, None, None, None, None + + +sigmoid_focal_loss = SigmoidFocalLossFunction.apply + + +class SigmoidFocalLoss(nn.Module): + + def __init__(self, gamma, alpha, weight=None, reduction='mean'): + super(SigmoidFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.register_buffer('weight', weight) + self.reduction = reduction + + def forward(self, input, target): + return sigmoid_focal_loss(input, target, self.gamma, self.alpha, + self.weight, self.reduction) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(gamma={self.gamma}, ' + s += f'alpha={self.alpha}, ' + s += f'reduction={self.reduction})' + return s + + +class SoftmaxFocalLossFunction(Function): + + @staticmethod + def symbolic(g, input, target, gamma, alpha, weight, reduction): + return g.op( + 'mmcv::MMCVSoftmaxFocalLoss', + input, + target, + gamma_f=gamma, + alpha_f=alpha, + weight_f=weight, + reduction_s=reduction) + + @staticmethod + def forward(ctx, + input, + target, + gamma=2.0, + alpha=0.25, + weight=None, + reduction='mean'): + + assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) + assert input.dim() == 2 + assert target.dim() == 1 + assert input.size(0) == target.size(0) + if weight is None: + weight = input.new_empty(0) + else: + assert weight.dim() == 1 + assert input.size(1) == weight.size(0) + ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} + assert reduction in ctx.reduction_dict.keys() + + ctx.gamma = float(gamma) + ctx.alpha = float(alpha) + ctx.reduction = ctx.reduction_dict[reduction] + + channel_stats, _ = torch.max(input, dim=1) + input_softmax = input - channel_stats.unsqueeze(1).expand_as(input) + input_softmax.exp_() + + channel_stats = input_softmax.sum(dim=1) + input_softmax /= channel_stats.unsqueeze(1).expand_as(input) + + output = input.new_zeros(input.size(0)) + ext_module.softmax_focal_loss_forward( + input_softmax, + target, + weight, + output, + gamma=ctx.gamma, + alpha=ctx.alpha) + + if ctx.reduction == ctx.reduction_dict['mean']: + output = output.sum() / input.size(0) + elif ctx.reduction == ctx.reduction_dict['sum']: + output = output.sum() + ctx.save_for_backward(input_softmax, target, weight) + return output + + @staticmethod + def backward(ctx, grad_output): + input_softmax, target, weight = ctx.saved_tensors + buff = input_softmax.new_zeros(input_softmax.size(0)) + grad_input = input_softmax.new_zeros(input_softmax.size()) + + ext_module.softmax_focal_loss_backward( + input_softmax, + target, + weight, + buff, + grad_input, + gamma=ctx.gamma, + alpha=ctx.alpha) + + grad_input *= grad_output + if ctx.reduction == ctx.reduction_dict['mean']: + grad_input /= input_softmax.size(0) + return grad_input, None, None, None, None, None + + +softmax_focal_loss = SoftmaxFocalLossFunction.apply + + +class SoftmaxFocalLoss(nn.Module): + + def __init__(self, gamma, alpha, weight=None, reduction='mean'): + super(SoftmaxFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.register_buffer('weight', weight) + self.reduction = reduction + + def forward(self, input, target): + return softmax_focal_loss(input, target, self.gamma, self.alpha, + self.weight, self.reduction) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(gamma={self.gamma}, ' + s += f'alpha={self.alpha}, ' + s += f'reduction={self.reduction})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/furthest_point_sample.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/furthest_point_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..374b7a878f1972c183941af28ba1df216ac1a60f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/furthest_point_sample.py @@ -0,0 +1,83 @@ +import torch +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'furthest_point_sampling_forward', + 'furthest_point_sampling_with_dist_forward' +]) + + +class FurthestPointSampling(Function): + """Uses iterative furthest point sampling to select a set of features whose + corresponding points have the furthest distance.""" + + @staticmethod + def forward(ctx, points_xyz: torch.Tensor, + num_points: int) -> torch.Tensor: + """ + Args: + points_xyz (Tensor): (B, N, 3) where N > num_points. + num_points (int): Number of points in the sampled set. + + Returns: + Tensor: (B, num_points) indices of the sampled points. + """ + assert points_xyz.is_contiguous() + + B, N = points_xyz.size()[:2] + output = torch.cuda.IntTensor(B, num_points) + temp = torch.cuda.FloatTensor(B, N).fill_(1e10) + + ext_module.furthest_point_sampling_forward( + points_xyz, + temp, + output, + b=B, + n=N, + m=num_points, + ) + if torch.__version__ != 'parrots': + ctx.mark_non_differentiable(output) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +class FurthestPointSamplingWithDist(Function): + """Uses iterative furthest point sampling to select a set of features whose + corresponding points have the furthest distance.""" + + @staticmethod + def forward(ctx, points_dist: torch.Tensor, + num_points: int) -> torch.Tensor: + """ + Args: + points_dist (Tensor): (B, N, N) Distance between each point pair. + num_points (int): Number of points in the sampled set. + + Returns: + Tensor: (B, num_points) indices of the sampled points. + """ + assert points_dist.is_contiguous() + + B, N, _ = points_dist.size() + output = points_dist.new_zeros([B, num_points], dtype=torch.int32) + temp = points_dist.new_zeros([B, N]).fill_(1e10) + + ext_module.furthest_point_sampling_with_dist_forward( + points_dist, temp, output, b=B, n=N, m=num_points) + if torch.__version__ != 'parrots': + ctx.mark_non_differentiable(output) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply +furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/fused_bias_leakyrelu.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/fused_bias_leakyrelu.py new file mode 100644 index 0000000000000000000000000000000000000000..6d12508469c6c8fa1884debece44c58d158cb6fa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/fused_bias_leakyrelu.py @@ -0,0 +1,268 @@ +# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501 + +# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. +# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator +# Augmentation (ADA) +# ======================================================================= + +# 1. Definitions + +# "Licensor" means any person or entity that distributes its Work. + +# "Software" means the original work of authorship made available under +# this License. + +# "Work" means the Software and any additions to or derivative works of +# the Software that are made available under this License. + +# The terms "reproduce," "reproduction," "derivative works," and +# "distribution" have the meaning as provided under U.S. copyright law; +# provided, however, that for the purposes of this License, derivative +# works shall not include works that remain separable from, or merely +# link (or bind by name) to the interfaces of, the Work. + +# Works, including the Software, are "made available" under this License +# by including in or with the Work either (a) a copyright notice +# referencing the applicability of this License to the Work, or (b) a +# copy of this License. + +# 2. License Grants + +# 2.1 Copyright Grant. Subject to the terms and conditions of this +# License, each Licensor grants to you a perpetual, worldwide, +# non-exclusive, royalty-free, copyright license to reproduce, +# prepare derivative works of, publicly display, publicly perform, +# sublicense and distribute its Work and any resulting derivative +# works in any form. + +# 3. Limitations + +# 3.1 Redistribution. You may reproduce or distribute the Work only +# if (a) you do so under this License, (b) you include a complete +# copy of this License with your distribution, and (c) you retain +# without modification any copyright, patent, trademark, or +# attribution notices that are present in the Work. + +# 3.2 Derivative Works. You may specify that additional or different +# terms apply to the use, reproduction, and distribution of your +# derivative works of the Work ("Your Terms") only if (a) Your Terms +# provide that the use limitation in Section 3.3 applies to your +# derivative works, and (b) you identify the specific derivative +# works that are subject to Your Terms. Notwithstanding Your Terms, +# this License (including the redistribution requirements in Section +# 3.1) will continue to apply to the Work itself. + +# 3.3 Use Limitation. The Work and any derivative works thereof only +# may be used or intended for use non-commercially. Notwithstanding +# the foregoing, NVIDIA and its affiliates may use the Work and any +# derivative works commercially. As used herein, "non-commercially" +# means for research or evaluation purposes only. + +# 3.4 Patent Claims. If you bring or threaten to bring a patent claim +# against any Licensor (including any claim, cross-claim or +# counterclaim in a lawsuit) to enforce any patents that you allege +# are infringed by any Work, then your rights under this License from +# such Licensor (including the grant in Section 2.1) will terminate +# immediately. + +# 3.5 Trademarks. This License does not grant any rights to use any +# Licensor’s or its affiliates’ names, logos, or trademarks, except +# as necessary to reproduce the notices described in this License. + +# 3.6 Termination. If you violate any term of this License, then your +# rights under this License (including the grant in Section 2.1) will +# terminate immediately. + +# 4. Disclaimer of Warranty. + +# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR +# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER +# THIS LICENSE. + +# 5. Limitation of Liability. + +# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL +# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE +# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, +# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF +# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK +# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, +# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER +# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGES. + +# ======================================================================= + +import torch +import torch.nn.functional as F +from torch import nn +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['fused_bias_leakyrelu']) + + +class FusedBiasLeakyReLUFunctionBackward(Function): + """Calculate second order deviation. + + This function is to compute the second order deviation for the fused leaky + relu operation. + """ + + @staticmethod + def forward(ctx, grad_output, out, negative_slope, scale): + ctx.save_for_backward(out) + ctx.negative_slope = negative_slope + ctx.scale = scale + + empty = grad_output.new_empty(0) + + grad_input = ext_module.fused_bias_leakyrelu( + grad_output, + empty, + out, + act=3, + grad=1, + alpha=negative_slope, + scale=scale) + + dim = [0] + + if grad_input.ndim > 2: + dim += list(range(2, grad_input.ndim)) + + grad_bias = grad_input.sum(dim).detach() + + return grad_input, grad_bias + + @staticmethod + def backward(ctx, gradgrad_input, gradgrad_bias): + out, = ctx.saved_tensors + + # The second order deviation, in fact, contains two parts, while the + # the first part is zero. Thus, we direct consider the second part + # which is similar with the first order deviation in implementation. + gradgrad_out = ext_module.fused_bias_leakyrelu( + gradgrad_input, + gradgrad_bias.to(out.dtype), + out, + act=3, + grad=1, + alpha=ctx.negative_slope, + scale=ctx.scale) + + return gradgrad_out, None, None, None + + +class FusedBiasLeakyReLUFunction(Function): + + @staticmethod + def forward(ctx, input, bias, negative_slope, scale): + empty = input.new_empty(0) + + out = ext_module.fused_bias_leakyrelu( + input, + bias, + empty, + act=3, + grad=0, + alpha=negative_slope, + scale=scale) + ctx.save_for_backward(out) + ctx.negative_slope = negative_slope + ctx.scale = scale + + return out + + @staticmethod + def backward(ctx, grad_output): + out, = ctx.saved_tensors + + grad_input, grad_bias = FusedBiasLeakyReLUFunctionBackward.apply( + grad_output, out, ctx.negative_slope, ctx.scale) + + return grad_input, grad_bias, None, None + + +class FusedBiasLeakyReLU(nn.Module): + """Fused bias leaky ReLU. + + This function is introduced in the StyleGAN2: + http://arxiv.org/abs/1912.04958 + + The bias term comes from the convolution operation. In addition, to keep + the variance of the feature map or gradients unchanged, they also adopt a + scale similarly with Kaiming initialization. However, since the + :math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the + final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501 + your own scale. + + TODO: Implement the CPU version. + + Args: + channel (int): The channel number of the feature map. + negative_slope (float, optional): Same as nn.LeakyRelu. + Defaults to 0.2. + scale (float, optional): A scalar to adjust the variance of the feature + map. Defaults to 2**0.5. + """ + + def __init__(self, num_channels, negative_slope=0.2, scale=2**0.5): + super(FusedBiasLeakyReLU, self).__init__() + + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, input): + return fused_bias_leakyrelu(input, self.bias, self.negative_slope, + self.scale) + + +def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5): + """Fused bias leaky ReLU function. + + This function is introduced in the StyleGAN2: + http://arxiv.org/abs/1912.04958 + + The bias term comes from the convolution operation. In addition, to keep + the variance of the feature map or gradients unchanged, they also adopt a + scale similarly with Kaiming initialization. However, since the + :math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the + final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501 + your own scale. + + Args: + input (torch.Tensor): Input feature map. + bias (nn.Parameter): The bias from convolution operation. + negative_slope (float, optional): Same as nn.LeakyRelu. + Defaults to 0.2. + scale (float, optional): A scalar to adjust the variance of the feature + map. Defaults to 2**0.5. + + Returns: + torch.Tensor: Feature map after non-linear activation. + """ + + if not input.is_cuda: + return bias_leakyrelu_ref(input, bias, negative_slope, scale) + + return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype), + negative_slope, scale) + + +def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=2**0.5): + + if bias is not None: + assert bias.ndim == 1 + assert bias.shape[0] == x.shape[1] + x = x + bias.reshape([-1 if i == 1 else 1 for i in range(x.ndim)]) + + x = F.leaky_relu(x, negative_slope) + if scale != 1: + x = x * scale + + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/gather_points.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/gather_points.py new file mode 100644 index 0000000000000000000000000000000000000000..f52f1677d8ea0facafc56a3672d37adb44677ff3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/gather_points.py @@ -0,0 +1,57 @@ +import torch +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['gather_points_forward', 'gather_points_backward']) + + +class GatherPoints(Function): + """Gather points with given index.""" + + @staticmethod + def forward(ctx, features: torch.Tensor, + indices: torch.Tensor) -> torch.Tensor: + """ + Args: + features (Tensor): (B, C, N) features to gather. + indices (Tensor): (B, M) where M is the number of points. + + Returns: + Tensor: (B, C, M) where M is the number of points. + """ + assert features.is_contiguous() + assert indices.is_contiguous() + + B, npoint = indices.size() + _, C, N = features.size() + output = torch.cuda.FloatTensor(B, C, npoint) + + ext_module.gather_points_forward( + features, indices, output, b=B, c=C, n=N, npoints=npoint) + + ctx.for_backwards = (indices, C, N) + if torch.__version__ != 'parrots': + ctx.mark_non_differentiable(indices) + return output + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + B, npoint = idx.size() + + grad_features = torch.cuda.FloatTensor(B, C, N).zero_() + grad_out_data = grad_out.data.contiguous() + ext_module.gather_points_backward( + grad_out_data, + idx, + grad_features.data, + b=B, + c=C, + n=N, + npoints=npoint) + return grad_features, None + + +gather_points = GatherPoints.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/group_points.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/group_points.py new file mode 100644 index 0000000000000000000000000000000000000000..6c3ec9d758ebe4e1c2205882af4be154008253a5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/group_points.py @@ -0,0 +1,224 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +from torch import nn as nn +from torch.autograd import Function + +from ..utils import ext_loader +from .ball_query import ball_query +from .knn import knn + +ext_module = ext_loader.load_ext( + '_ext', ['group_points_forward', 'group_points_backward']) + + +class QueryAndGroup(nn.Module): + """Groups points with a ball query of radius. + + Args: + max_radius (float): The maximum radius of the balls. + If None is given, we will use kNN sampling instead of ball query. + sample_num (int): Maximum number of features to gather in the ball. + min_radius (float, optional): The minimum radius of the balls. + Default: 0. + use_xyz (bool, optional): Whether to use xyz. + Default: True. + return_grouped_xyz (bool, optional): Whether to return grouped xyz. + Default: False. + normalize_xyz (bool, optional): Whether to normalize xyz. + Default: False. + uniform_sample (bool, optional): Whether to sample uniformly. + Default: False + return_unique_cnt (bool, optional): Whether to return the count of + unique samples. Default: False. + return_grouped_idx (bool, optional): Whether to return grouped idx. + Default: False. + """ + + def __init__(self, + max_radius, + sample_num, + min_radius=0, + use_xyz=True, + return_grouped_xyz=False, + normalize_xyz=False, + uniform_sample=False, + return_unique_cnt=False, + return_grouped_idx=False): + super().__init__() + self.max_radius = max_radius + self.min_radius = min_radius + self.sample_num = sample_num + self.use_xyz = use_xyz + self.return_grouped_xyz = return_grouped_xyz + self.normalize_xyz = normalize_xyz + self.uniform_sample = uniform_sample + self.return_unique_cnt = return_unique_cnt + self.return_grouped_idx = return_grouped_idx + if self.return_unique_cnt: + assert self.uniform_sample, \ + 'uniform_sample should be True when ' \ + 'returning the count of unique samples' + if self.max_radius is None: + assert not self.normalize_xyz, \ + 'can not normalize grouped xyz when max_radius is None' + + def forward(self, points_xyz, center_xyz, features=None): + """ + Args: + points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. + center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. + features (Tensor): (B, C, N) Descriptors of the features. + + Returns: + Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. + """ + # if self.max_radius is None, we will perform kNN instead of ball query + # idx is of shape [B, npoint, sample_num] + if self.max_radius is None: + idx = knn(self.sample_num, points_xyz, center_xyz, False) + idx = idx.transpose(1, 2).contiguous() + else: + idx = ball_query(self.min_radius, self.max_radius, self.sample_num, + points_xyz, center_xyz) + + if self.uniform_sample: + unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) + for i_batch in range(idx.shape[0]): + for i_region in range(idx.shape[1]): + unique_ind = torch.unique(idx[i_batch, i_region, :]) + num_unique = unique_ind.shape[0] + unique_cnt[i_batch, i_region] = num_unique + sample_ind = torch.randint( + 0, + num_unique, (self.sample_num - num_unique, ), + dtype=torch.long) + all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) + idx[i_batch, i_region, :] = all_ind + + xyz_trans = points_xyz.transpose(1, 2).contiguous() + # (B, 3, npoint, sample_num) + grouped_xyz = grouping_operation(xyz_trans, idx) + grouped_xyz_diff = grouped_xyz - \ + center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets + if self.normalize_xyz: + grouped_xyz_diff /= self.max_radius + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + # (B, C + 3, npoint, sample_num) + new_features = torch.cat([grouped_xyz_diff, grouped_features], + dim=1) + else: + new_features = grouped_features + else: + assert (self.use_xyz + ), 'Cannot have not features and not use xyz as a feature!' + new_features = grouped_xyz_diff + + ret = [new_features] + if self.return_grouped_xyz: + ret.append(grouped_xyz) + if self.return_unique_cnt: + ret.append(unique_cnt) + if self.return_grouped_idx: + ret.append(idx) + if len(ret) == 1: + return ret[0] + else: + return tuple(ret) + + +class GroupAll(nn.Module): + """Group xyz with feature. + + Args: + use_xyz (bool): Whether to use xyz. + """ + + def __init__(self, use_xyz: bool = True): + super().__init__() + self.use_xyz = use_xyz + + def forward(self, + xyz: torch.Tensor, + new_xyz: torch.Tensor, + features: torch.Tensor = None): + """ + Args: + xyz (Tensor): (B, N, 3) xyz coordinates of the features. + new_xyz (Tensor): new xyz coordinates of the features. + features (Tensor): (B, C, N) features to group. + + Returns: + Tensor: (B, C + 3, 1, N) Grouped feature. + """ + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + # (B, 3 + C, 1, N) + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + return new_features + + +class GroupingOperation(Function): + """Group feature with given index.""" + + @staticmethod + def forward(ctx, features: torch.Tensor, + indices: torch.Tensor) -> torch.Tensor: + """ + Args: + features (Tensor): (B, C, N) tensor of features to group. + indices (Tensor): (B, npoint, nsample) the indices of + features to group with. + + Returns: + Tensor: (B, C, npoint, nsample) Grouped features. + """ + features = features.contiguous() + indices = indices.contiguous() + + B, nfeatures, nsample = indices.size() + _, C, N = features.size() + output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) + + ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, + indices, output) + + ctx.for_backwards = (indices, N) + return output + + @staticmethod + def backward(ctx, + grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients + of the output from forward. + + Returns: + Tensor: (B, C, N) gradient of the features. + """ + idx, N = ctx.for_backwards + + B, C, npoint, nsample = grad_out.size() + grad_features = torch.cuda.FloatTensor(B, C, N).zero_() + + grad_out_data = grad_out.data.contiguous() + ext_module.group_points_backward(B, C, N, npoint, nsample, + grad_out_data, idx, + grad_features.data) + return grad_features, None + + +grouping_operation = GroupingOperation.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/info.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/info.py new file mode 100644 index 0000000000000000000000000000000000000000..29f2e5598ae2bb5866ccd15a7d3b4de33c0cd14d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/info.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import glob +import os + +import torch + +if torch.__version__ == 'parrots': + import parrots + + def get_compiler_version(): + return 'GCC ' + parrots.version.compiler + + def get_compiling_cuda_version(): + return parrots.version.cuda +else: + from ..utils import ext_loader + ext_module = ext_loader.load_ext( + '_ext', ['get_compiler_version', 'get_compiling_cuda_version']) + + def get_compiler_version(): + return ext_module.get_compiler_version() + + def get_compiling_cuda_version(): + return ext_module.get_compiling_cuda_version() + + +def get_onnxruntime_op_path(): + wildcard = os.path.join( + os.path.abspath(os.path.dirname(os.path.dirname(__file__))), + '_ext_ort.*.so') + + paths = glob.glob(wildcard) + if len(paths) > 0: + return paths[0] + else: + return '' diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/iou3d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/iou3d.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc71979190323f44c09f8b7e1761cf49cd2d76b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/iou3d.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward', + 'iou3d_nms_normal_forward' +]) + + +def boxes_iou_bev(boxes_a, boxes_b): + """Calculate boxes IoU in the Bird's Eye View. + + Args: + boxes_a (torch.Tensor): Input boxes a with shape (M, 5). + boxes_b (torch.Tensor): Input boxes b with shape (N, 5). + + Returns: + ans_iou (torch.Tensor): IoU result with shape (M, N). + """ + ans_iou = boxes_a.new_zeros( + torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) + + ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), + boxes_b.contiguous(), ans_iou) + + return ans_iou + + +def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None): + """NMS function GPU implementation (for BEV boxes). The overlap of two + boxes for IoU calculation is defined as the exact overlapping area of the + two boxes. In this function, one can also set ``pre_max_size`` and + ``post_max_size``. + + Args: + boxes (torch.Tensor): Input boxes with the shape of [N, 5] + ([x1, y1, x2, y2, ry]). + scores (torch.Tensor): Scores of boxes with the shape of [N]. + thresh (float): Overlap threshold of NMS. + pre_max_size (int, optional): Max size of boxes before NMS. + Default: None. + post_max_size (int, optional): Max size of boxes after NMS. + Default: None. + + Returns: + torch.Tensor: Indexes after NMS. + """ + assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' + order = scores.sort(0, descending=True)[1] + + if pre_max_size is not None: + order = order[:pre_max_size] + boxes = boxes[order].contiguous() + + keep = torch.zeros(boxes.size(0), dtype=torch.long) + num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh) + keep = order[keep[:num_out].cuda(boxes.device)].contiguous() + if post_max_size is not None: + keep = keep[:post_max_size] + return keep + + +def nms_normal_bev(boxes, scores, thresh): + """Normal NMS function GPU implementation (for BEV boxes). The overlap of + two boxes for IoU calculation is defined as the exact overlapping area of + the two boxes WITH their yaw angle set to 0. + + Args: + boxes (torch.Tensor): Input boxes with shape (N, 5). + scores (torch.Tensor): Scores of predicted boxes with shape (N). + thresh (float): Overlap threshold of NMS. + + Returns: + torch.Tensor: Remaining indices with scores in descending order. + """ + assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' + order = scores.sort(0, descending=True)[1] + + boxes = boxes[order].contiguous() + + keep = torch.zeros(boxes.size(0), dtype=torch.long) + num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh) + return order[keep[:num_out].cuda(boxes.device)].contiguous() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/knn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/knn.py new file mode 100644 index 0000000000000000000000000000000000000000..f335785036669fc19239825b0aae6dde3f73bf92 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/knn.py @@ -0,0 +1,77 @@ +import torch +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['knn_forward']) + + +class KNN(Function): + r"""KNN (CUDA) based on heap data structure. + Modified from `PAConv `_. + + Find k-nearest points. + """ + + @staticmethod + def forward(ctx, + k: int, + xyz: torch.Tensor, + center_xyz: torch.Tensor = None, + transposed: bool = False) -> torch.Tensor: + """ + Args: + k (int): number of nearest neighbors. + xyz (Tensor): (B, N, 3) if transposed == False, else (B, 3, N). + xyz coordinates of the features. + center_xyz (Tensor, optional): (B, npoint, 3) if transposed == + False, else (B, 3, npoint). centers of the knn query. + Default: None. + transposed (bool, optional): whether the input tensors are + transposed. Should not explicitly use this keyword when + calling knn (=KNN.apply), just add the fourth param. + Default: False. + + Returns: + Tensor: (B, k, npoint) tensor with the indices of + the features that form k-nearest neighbours. + """ + assert (k > 0) & (k < 100), 'k should be in range(0, 100)' + + if center_xyz is None: + center_xyz = xyz + + if transposed: + xyz = xyz.transpose(2, 1).contiguous() + center_xyz = center_xyz.transpose(2, 1).contiguous() + + assert xyz.is_contiguous() # [B, N, 3] + assert center_xyz.is_contiguous() # [B, npoint, 3] + + center_xyz_device = center_xyz.get_device() + assert center_xyz_device == xyz.get_device(), \ + 'center_xyz and xyz should be put on the same device' + if torch.cuda.current_device() != center_xyz_device: + torch.cuda.set_device(center_xyz_device) + + B, npoint, _ = center_xyz.shape + N = xyz.shape[1] + + idx = center_xyz.new_zeros((B, npoint, k)).int() + dist2 = center_xyz.new_zeros((B, npoint, k)).float() + + ext_module.knn_forward( + xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k) + # idx shape to [B, k, npoint] + idx = idx.transpose(2, 1).contiguous() + if torch.__version__ != 'parrots': + ctx.mark_non_differentiable(idx) + return idx + + @staticmethod + def backward(ctx, a=None): + return None, None, None + + +knn = KNN.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/masked_conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/masked_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..cd514cc204c1d571ea5dc7e74b038c0f477a008b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/masked_conv.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['masked_im2col_forward', 'masked_col2im_forward']) + + +class MaskedConv2dFunction(Function): + + @staticmethod + def symbolic(g, features, mask, weight, bias, padding, stride): + return g.op( + 'mmcv::MMCVMaskedConv2d', + features, + mask, + weight, + bias, + padding_i=padding, + stride_i=stride) + + @staticmethod + def forward(ctx, features, mask, weight, bias, padding=0, stride=1): + assert mask.dim() == 3 and mask.size(0) == 1 + assert features.dim() == 4 and features.size(0) == 1 + assert features.size()[2:] == mask.size()[1:] + pad_h, pad_w = _pair(padding) + stride_h, stride_w = _pair(stride) + if stride_h != 1 or stride_w != 1: + raise ValueError( + 'Stride could not only be 1 in masked_conv2d currently.') + out_channel, in_channel, kernel_h, kernel_w = weight.size() + + batch_size = features.size(0) + out_h = int( + math.floor((features.size(2) + 2 * pad_h - + (kernel_h - 1) - 1) / stride_h + 1)) + out_w = int( + math.floor((features.size(3) + 2 * pad_w - + (kernel_h - 1) - 1) / stride_w + 1)) + mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False) + output = features.new_zeros(batch_size, out_channel, out_h, out_w) + if mask_inds.numel() > 0: + mask_h_idx = mask_inds[:, 0].contiguous() + mask_w_idx = mask_inds[:, 1].contiguous() + data_col = features.new_zeros(in_channel * kernel_h * kernel_w, + mask_inds.size(0)) + ext_module.masked_im2col_forward( + features, + mask_h_idx, + mask_w_idx, + data_col, + kernel_h=kernel_h, + kernel_w=kernel_w, + pad_h=pad_h, + pad_w=pad_w) + + masked_output = torch.addmm(1, bias[:, None], 1, + weight.view(out_channel, -1), data_col) + ext_module.masked_col2im_forward( + masked_output, + mask_h_idx, + mask_w_idx, + output, + height=out_h, + width=out_w, + channels=out_channel) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + return (None, ) * 5 + + +masked_conv2d = MaskedConv2dFunction.apply + + +class MaskedConv2d(nn.Conv2d): + """A MaskedConv2d which inherits the official Conv2d. + + The masked forward doesn't implement the backward function and only + supports the stride parameter to be 1 currently. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True): + super(MaskedConv2d, + self).__init__(in_channels, out_channels, kernel_size, stride, + padding, dilation, groups, bias) + + def forward(self, input, mask=None): + if mask is None: # fallback to the normal Conv2d + return super(MaskedConv2d, self).forward(input) + else: + return masked_conv2d(input, mask, self.weight, self.bias, + self.padding) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/merge_cells.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/merge_cells.py new file mode 100644 index 0000000000000000000000000000000000000000..48ca8cc0a8aca8432835bd760c0403a3c35b34cf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/merge_cells.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..cnn import ConvModule + + +class BaseMergeCell(nn.Module): + """The basic class for cells used in NAS-FPN and NAS-FCOS. + + BaseMergeCell takes 2 inputs. After applying convolution + on them, they are resized to the target size. Then, + they go through binary_op, which depends on the type of cell. + If with_out_conv is True, the result of output will go through + another convolution layer. + + Args: + in_channels (int): number of input channels in out_conv layer. + out_channels (int): number of output channels in out_conv layer. + with_out_conv (bool): Whether to use out_conv layer + out_conv_cfg (dict): Config dict for convolution layer, which should + contain "groups", "kernel_size", "padding", "bias" to build + out_conv layer. + out_norm_cfg (dict): Config dict for normalization layer in out_conv. + out_conv_order (tuple): The order of conv/norm/activation layers in + out_conv. + with_input1_conv (bool): Whether to use convolution on input1. + with_input2_conv (bool): Whether to use convolution on input2. + input_conv_cfg (dict): Config dict for building input1_conv layer and + input2_conv layer, which is expected to contain the type of + convolution. + Default: None, which means using conv2d. + input_norm_cfg (dict): Config dict for normalization layer in + input1_conv and input2_conv layer. Default: None. + upsample_mode (str): Interpolation method used to resize the output + of input1_conv and input2_conv to target size. Currently, we + support ['nearest', 'bilinear']. Default: 'nearest'. + """ + + def __init__(self, + fused_channels=256, + out_channels=256, + with_out_conv=True, + out_conv_cfg=dict( + groups=1, kernel_size=3, padding=1, bias=True), + out_norm_cfg=None, + out_conv_order=('act', 'conv', 'norm'), + with_input1_conv=False, + with_input2_conv=False, + input_conv_cfg=None, + input_norm_cfg=None, + upsample_mode='nearest'): + super(BaseMergeCell, self).__init__() + assert upsample_mode in ['nearest', 'bilinear'] + self.with_out_conv = with_out_conv + self.with_input1_conv = with_input1_conv + self.with_input2_conv = with_input2_conv + self.upsample_mode = upsample_mode + + if self.with_out_conv: + self.out_conv = ConvModule( + fused_channels, + out_channels, + **out_conv_cfg, + norm_cfg=out_norm_cfg, + order=out_conv_order) + + self.input1_conv = self._build_input_conv( + out_channels, input_conv_cfg, + input_norm_cfg) if with_input1_conv else nn.Sequential() + self.input2_conv = self._build_input_conv( + out_channels, input_conv_cfg, + input_norm_cfg) if with_input2_conv else nn.Sequential() + + def _build_input_conv(self, channel, conv_cfg, norm_cfg): + return ConvModule( + channel, + channel, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + bias=True) + + @abstractmethod + def _binary_op(self, x1, x2): + pass + + def _resize(self, x, size): + if x.shape[-2:] == size: + return x + elif x.shape[-2:] < size: + return F.interpolate(x, size=size, mode=self.upsample_mode) + else: + assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0 + kernel_size = x.shape[-1] // size[-1] + x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size) + return x + + def forward(self, x1, x2, out_size=None): + assert x1.shape[:2] == x2.shape[:2] + assert out_size is None or len(out_size) == 2 + if out_size is None: # resize to larger one + out_size = max(x1.size()[2:], x2.size()[2:]) + + x1 = self.input1_conv(x1) + x2 = self.input2_conv(x2) + + x1 = self._resize(x1, out_size) + x2 = self._resize(x2, out_size) + + x = self._binary_op(x1, x2) + if self.with_out_conv: + x = self.out_conv(x) + return x + + +class SumCell(BaseMergeCell): + + def __init__(self, in_channels, out_channels, **kwargs): + super(SumCell, self).__init__(in_channels, out_channels, **kwargs) + + def _binary_op(self, x1, x2): + return x1 + x2 + + +class ConcatCell(BaseMergeCell): + + def __init__(self, in_channels, out_channels, **kwargs): + super(ConcatCell, self).__init__(in_channels * 2, out_channels, + **kwargs) + + def _binary_op(self, x1, x2): + ret = torch.cat([x1, x2], dim=1) + return ret + + +class GlobalPoolingCell(BaseMergeCell): + + def __init__(self, in_channels=None, out_channels=None, **kwargs): + super().__init__(in_channels, out_channels, **kwargs) + self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) + + def _binary_op(self, x1, x2): + x2_att = self.global_pool(x2).sigmoid() + return x2 + x2_att * x1 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/modulated_deform_conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/modulated_deform_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..95b4828ef5ba35445856f6e19c0d565d8855c2ed --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/modulated_deform_conv.py @@ -0,0 +1,282 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair, _single + +from custom_mmpkg.custom_mmcv.utils import deprecated_api_warning +from ..cnn import CONV_LAYERS +from ..utils import ext_loader, print_log + +ext_module = ext_loader.load_ext( + '_ext', + ['modulated_deform_conv_forward', 'modulated_deform_conv_backward']) + + +class ModulatedDeformConv2dFunction(Function): + + @staticmethod + def symbolic(g, input, offset, mask, weight, bias, stride, padding, + dilation, groups, deform_groups): + input_tensors = [input, offset, mask, weight] + if bias is not None: + input_tensors.append(bias) + return g.op( + 'mmcv::MMCVModulatedDeformConv2d', + *input_tensors, + stride_i=stride, + padding_i=padding, + dilation_i=dilation, + groups_i=groups, + deform_groups_i=deform_groups) + + @staticmethod + def forward(ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deform_groups=1): + if input is not None and input.dim() != 4: + raise ValueError( + f'Expected 4D tensor as input, got {input.dim()}D tensor \ + instead.') + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deform_groups = deform_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(0) # fake tensor + # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; + # amp won't cast the type of model (float32), but "offset" is cast + # to float16 by nn.Conv2d automatically, leading to the type + # mismatch with input (when it is float32) or weight. + # The flag for whether to use fp16 or amp is the type of "offset", + # we cast weight and input to temporarily support fp16 and amp + # whatever the pytorch version is. + input = input.type_as(offset) + weight = weight.type_as(input) + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty( + ModulatedDeformConv2dFunction._output_size(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + ext_module.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + kernel_h=weight.size(2), + kernel_w=weight.size(3), + stride_h=ctx.stride[0], + stride_w=ctx.stride[1], + pad_h=ctx.padding[0], + pad_w=ctx.padding[1], + dilation_h=ctx.dilation[0], + dilation_w=ctx.dilation[1], + group=ctx.groups, + deformable_group=ctx.deform_groups, + with_bias=ctx.with_bias) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + grad_output = grad_output.contiguous() + ext_module.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h=weight.size(2), + kernel_w=weight.size(3), + stride_h=ctx.stride[0], + stride_w=ctx.stride[1], + pad_h=ctx.padding[0], + pad_w=ctx.padding[1], + dilation_h=ctx.dilation[0], + dilation_w=ctx.dilation[1], + group=ctx.groups, + deformable_group=ctx.deform_groups, + with_bias=ctx.with_bias) + if not ctx.with_bias: + grad_bias = None + + return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, + None, None, None, None, None) + + @staticmethod + def _output_size(ctx, input, weight): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = ctx.padding[d] + kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = ctx.stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + 'convolution input is too small (output would be ' + + 'x'.join(map(str, output_size)) + ')') + return output_size + + +modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply + + +class ModulatedDeformConv2d(nn.Module): + + @deprecated_api_warning({'deformable_groups': 'deform_groups'}, + cls_name='ModulatedDeformConv2d') + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deform_groups=1, + bias=True): + super(ModulatedDeformConv2d, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deform_groups = deform_groups + # enable compatibility with nn.Conv2d + self.transposed = False + self.output_padding = _single(0) + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, + *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + self.init_weights() + + def init_weights(self): + n = self.in_channels + for k in self.kernel_size: + n *= k + stdv = 1. / math.sqrt(n) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.zero_() + + def forward(self, x, offset, mask): + return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, + self.stride, self.padding, + self.dilation, self.groups, + self.deform_groups) + + +@CONV_LAYERS.register_module('DCNv2') +class ModulatedDeformConv2dPack(ModulatedDeformConv2d): + """A ModulatedDeformable Conv Encapsulation that acts as normal Conv + layers. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int): Same as nn.Conv2d, while tuple is not supported. + padding (int): Same as nn.Conv2d, while tuple is not supported. + dilation (int): Same as nn.Conv2d, while tuple is not supported. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + """ + + _version = 2 + + def __init__(self, *args, **kwargs): + super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs) + self.conv_offset = nn.Conv2d( + self.in_channels, + self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1], + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + bias=True) + self.init_weights() + + def init_weights(self): + super(ModulatedDeformConv2dPack, self).init_weights() + if hasattr(self, 'conv_offset'): + self.conv_offset.weight.data.zero_() + self.conv_offset.bias.data.zero_() + + def forward(self, x): + out = self.conv_offset(x) + o1, o2, mask = torch.chunk(out, 3, dim=1) + offset = torch.cat((o1, o2), dim=1) + mask = torch.sigmoid(mask) + return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, + self.stride, self.padding, + self.dilation, self.groups, + self.deform_groups) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + + if version is None or version < 2: + # the key is different in early versions + # In version < 2, ModulatedDeformConvPack + # loads previous benchmark models. + if (prefix + 'conv_offset.weight' not in state_dict + and prefix[:-1] + '_offset.weight' in state_dict): + state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( + prefix[:-1] + '_offset.weight') + if (prefix + 'conv_offset.bias' not in state_dict + and prefix[:-1] + '_offset.bias' in state_dict): + state_dict[prefix + + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + + '_offset.bias') + + if version is not None and version > 1: + print_log( + f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to ' + 'version 2.', + logger='root') + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/multi_scale_deform_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/multi_scale_deform_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..8696322b086872322185b6be4daf15f94d5981a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/multi_scale_deform_attn.py @@ -0,0 +1,358 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd.function import Function, once_differentiable + +from custom_mmpkg.custom_mmcv import deprecated_api_warning +from custom_mmpkg.custom_mmcv.cnn import constant_init, xavier_init +from custom_mmpkg.custom_mmcv.cnn.bricks.registry import ATTENTION +from custom_mmpkg.custom_mmcv.runner import BaseModule +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +class MultiScaleDeformableAttnFunction(Function): + + @staticmethod + def forward(ctx, value, value_spatial_shapes, value_level_start_index, + sampling_locations, attention_weights, im2col_step): + """GPU version of multi-scale deformable attention. + + Args: + value (Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + im2col_step (Tensor): The step used in image to column. + + Returns: + Tensor: has shape (bs, num_queries, embed_dims) + """ + + ctx.im2col_step = im2col_step + output = ext_module.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step=ctx.im2col_step) + ctx.save_for_backward(value, value_spatial_shapes, + value_level_start_index, sampling_locations, + attention_weights) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + """GPU version of backward function. + + Args: + grad_output (Tensor): Gradient + of output tensor of forward. + + Returns: + Tuple[Tensor]: Gradient + of input tensors in forward. + """ + value, value_spatial_shapes, value_level_start_index,\ + sampling_locations, attention_weights = ctx.saved_tensors + grad_value = torch.zeros_like(value) + grad_sampling_loc = torch.zeros_like(sampling_locations) + grad_attn_weight = torch.zeros_like(attention_weights) + + ext_module.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output.contiguous(), + grad_value, + grad_sampling_loc, + grad_attn_weight, + im2col_step=ctx.im2col_step) + + return grad_value, None, None, \ + grad_sampling_loc, grad_attn_weight, None + + +def multi_scale_deformable_attn_pytorch(value, value_spatial_shapes, + sampling_locations, attention_weights): + """CPU version of multi-scale deformable attention. + + Args: + value (Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + + Returns: + Tensor: has shape (bs, num_queries, embed_dims) + """ + + bs, _, num_heads, embed_dims = value.shape + _, num_queries, num_heads, num_levels, num_points, _ =\ + sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], + dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level, (H_, W_) in enumerate(value_spatial_shapes): + # bs, H_*W_, num_heads, embed_dims -> + # bs, H_*W_, num_heads*embed_dims -> + # bs, num_heads*embed_dims, H_*W_ -> + # bs*num_heads, embed_dims, H_, W_ + value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape( + bs * num_heads, embed_dims, H_, W_) + # bs, num_queries, num_heads, num_points, 2 -> + # bs, num_heads, num_queries, num_points, 2 -> + # bs*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, + level].transpose(1, 2).flatten(0, 1) + # bs*num_heads, embed_dims, num_queries, num_points + sampling_value_l_ = F.grid_sample( + value_l_, + sampling_grid_l_, + mode='bilinear', + padding_mode='zeros', + align_corners=False) + sampling_value_list.append(sampling_value_l_) + # (bs, num_queries, num_heads, num_levels, num_points) -> + # (bs, num_heads, num_queries, num_levels, num_points) -> + # (bs, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + bs * num_heads, 1, num_queries, num_levels * num_points) + output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * + attention_weights).sum(-1).view(bs, num_heads * embed_dims, + num_queries) + return output.transpose(1, 2).contiguous() + + +@ATTENTION.register_module() +class MultiScaleDeformableAttention(BaseModule): + """An attention module used in Deformable-Detr. + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + im2col_step=64, + dropout=0.1, + batch_first=False, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.dropout = nn.Dropout(dropout) + self.batch_first = batch_first + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.sampling_offsets = nn.Linear( + embed_dims, num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims, + num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 2).repeat(1, self.num_levels, self.num_points, 1) + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiScaleDeformableAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + **kwargs): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + value = query + + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + + bs, num_query, _ = query.shape + bs, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_levels * self.num_points) + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_levels, + self.num_points) + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets \ + / offset_normalizer[None, None, None, :, None, :] + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.num_points \ + * reference_points[:, :, None, :, None, 2:] \ + * 0.5 + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 2 or 4, but get {reference_points.shape[-1]} instead.') + if torch.cuda.is_available() and value.is_cuda: + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + + output = self.output_proj(output) + + if not self.batch_first: + # (num_query, bs ,embed_dims) + output = output.permute(1, 0, 2) + + return self.dropout(output) + identity diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/nms.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/nms.py new file mode 100644 index 0000000000000000000000000000000000000000..080c0cf0f2ddef9c4d502b8011c85ed10eff94af --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/nms.py @@ -0,0 +1,417 @@ +import os + +import numpy as np +import torch + +from custom_mmpkg.custom_mmcv.utils import deprecated_api_warning +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['nms', 'softnms', 'nms_match', 'nms_rotated']) + + +# This function is modified from: https://github.com/pytorch/vision/ +class NMSop(torch.autograd.Function): + + @staticmethod + def forward(ctx, bboxes, scores, iou_threshold, offset, score_threshold, + max_num): + is_filtering_by_score = score_threshold > 0 + if is_filtering_by_score: + valid_mask = scores > score_threshold + bboxes, scores = bboxes[valid_mask], scores[valid_mask] + valid_inds = torch.nonzero( + valid_mask, as_tuple=False).squeeze(dim=1) + + inds = ext_module.nms( + bboxes, scores, iou_threshold=float(iou_threshold), offset=offset) + + if max_num > 0: + inds = inds[:max_num] + if is_filtering_by_score: + inds = valid_inds[inds] + return inds + + @staticmethod + def symbolic(g, bboxes, scores, iou_threshold, offset, score_threshold, + max_num): + from ..onnx import is_custom_op_loaded + has_custom_op = is_custom_op_loaded() + # TensorRT nms plugin is aligned with original nms in ONNXRuntime + is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' + if has_custom_op and (not is_trt_backend): + return g.op( + 'mmcv::NonMaxSuppression', + bboxes, + scores, + iou_threshold_f=float(iou_threshold), + offset_i=int(offset)) + else: + from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze + from ..onnx.onnx_utils.symbolic_helper import _size_helper + + boxes = unsqueeze(g, bboxes, 0) + scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) + + if max_num > 0: + max_num = g.op( + 'Constant', + value_t=torch.tensor(max_num, dtype=torch.long)) + else: + dim = g.op('Constant', value_t=torch.tensor(0)) + max_num = _size_helper(g, bboxes, dim) + max_output_per_class = max_num + iou_threshold = g.op( + 'Constant', + value_t=torch.tensor([iou_threshold], dtype=torch.float)) + score_threshold = g.op( + 'Constant', + value_t=torch.tensor([score_threshold], dtype=torch.float)) + nms_out = g.op('NonMaxSuppression', boxes, scores, + max_output_per_class, iou_threshold, + score_threshold) + return squeeze( + g, + select( + g, nms_out, 1, + g.op( + 'Constant', + value_t=torch.tensor([2], dtype=torch.long))), 1) + + +class SoftNMSop(torch.autograd.Function): + + @staticmethod + def forward(ctx, boxes, scores, iou_threshold, sigma, min_score, method, + offset): + dets = boxes.new_empty((boxes.size(0), 5), device='cpu') + inds = ext_module.softnms( + boxes.cpu(), + scores.cpu(), + dets.cpu(), + iou_threshold=float(iou_threshold), + sigma=float(sigma), + min_score=float(min_score), + method=int(method), + offset=int(offset)) + return dets, inds + + @staticmethod + def symbolic(g, boxes, scores, iou_threshold, sigma, min_score, method, + offset): + from packaging import version + assert version.parse(torch.__version__) >= version.parse('1.7.0') + nms_out = g.op( + 'mmcv::SoftNonMaxSuppression', + boxes, + scores, + iou_threshold_f=float(iou_threshold), + sigma_f=float(sigma), + min_score_f=float(min_score), + method_i=int(method), + offset_i=int(offset), + outputs=2) + return nms_out + + +@deprecated_api_warning({'iou_thr': 'iou_threshold'}) +def nms(boxes, scores, iou_threshold, offset=0, score_threshold=0, max_num=-1): + """Dispatch to either CPU or GPU NMS implementations. + + The input can be either torch tensor or numpy array. GPU NMS will be used + if the input is gpu tensor, otherwise CPU NMS + will be used. The returned type will always be the same as inputs. + + Arguments: + boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4). + scores (torch.Tensor or np.ndarray): scores in shape (N, ). + iou_threshold (float): IoU threshold for NMS. + offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset). + score_threshold (float): score threshold for NMS. + max_num (int): maximum number of boxes after NMS. + + Returns: + tuple: kept dets(boxes and scores) and indice, which is always the \ + same data type as the input. + + Example: + >>> boxes = np.array([[49.1, 32.4, 51.0, 35.9], + >>> [49.3, 32.9, 51.0, 35.3], + >>> [49.2, 31.8, 51.0, 35.4], + >>> [35.1, 11.5, 39.1, 15.7], + >>> [35.6, 11.8, 39.3, 14.2], + >>> [35.3, 11.5, 39.9, 14.5], + >>> [35.2, 11.7, 39.7, 15.7]], dtype=np.float32) + >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.5, 0.4, 0.3],\ + dtype=np.float32) + >>> iou_threshold = 0.6 + >>> dets, inds = nms(boxes, scores, iou_threshold) + >>> assert len(inds) == len(dets) == 3 + """ + assert isinstance(boxes, (torch.Tensor, np.ndarray)) + assert isinstance(scores, (torch.Tensor, np.ndarray)) + is_numpy = False + if isinstance(boxes, np.ndarray): + is_numpy = True + boxes = torch.from_numpy(boxes) + if isinstance(scores, np.ndarray): + scores = torch.from_numpy(scores) + assert boxes.size(1) == 4 + assert boxes.size(0) == scores.size(0) + assert offset in (0, 1) + + if torch.__version__ == 'parrots': + indata_list = [boxes, scores] + indata_dict = { + 'iou_threshold': float(iou_threshold), + 'offset': int(offset) + } + inds = ext_module.nms(*indata_list, **indata_dict) + else: + inds = NMSop.apply(boxes, scores, iou_threshold, offset, + score_threshold, max_num) + dets = torch.cat((boxes[inds], scores[inds].reshape(-1, 1)), dim=1) + if is_numpy: + dets = dets.cpu().numpy() + inds = inds.cpu().numpy() + return dets, inds + + +@deprecated_api_warning({'iou_thr': 'iou_threshold'}) +def soft_nms(boxes, + scores, + iou_threshold=0.3, + sigma=0.5, + min_score=1e-3, + method='linear', + offset=0): + """Dispatch to only CPU Soft NMS implementations. + + The input can be either a torch tensor or numpy array. + The returned type will always be the same as inputs. + + Arguments: + boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4). + scores (torch.Tensor or np.ndarray): scores in shape (N, ). + iou_threshold (float): IoU threshold for NMS. + sigma (float): hyperparameter for gaussian method + min_score (float): score filter threshold + method (str): either 'linear' or 'gaussian' + offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset). + + Returns: + tuple: kept dets(boxes and scores) and indice, which is always the \ + same data type as the input. + + Example: + >>> boxes = np.array([[4., 3., 5., 3.], + >>> [4., 3., 5., 4.], + >>> [3., 1., 3., 1.], + >>> [3., 1., 3., 1.], + >>> [3., 1., 3., 1.], + >>> [3., 1., 3., 1.]], dtype=np.float32) + >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.4, 0.0], dtype=np.float32) + >>> iou_threshold = 0.6 + >>> dets, inds = soft_nms(boxes, scores, iou_threshold, sigma=0.5) + >>> assert len(inds) == len(dets) == 5 + """ + + assert isinstance(boxes, (torch.Tensor, np.ndarray)) + assert isinstance(scores, (torch.Tensor, np.ndarray)) + is_numpy = False + if isinstance(boxes, np.ndarray): + is_numpy = True + boxes = torch.from_numpy(boxes) + if isinstance(scores, np.ndarray): + scores = torch.from_numpy(scores) + assert boxes.size(1) == 4 + assert boxes.size(0) == scores.size(0) + assert offset in (0, 1) + method_dict = {'naive': 0, 'linear': 1, 'gaussian': 2} + assert method in method_dict.keys() + + if torch.__version__ == 'parrots': + dets = boxes.new_empty((boxes.size(0), 5), device='cpu') + indata_list = [boxes.cpu(), scores.cpu(), dets.cpu()] + indata_dict = { + 'iou_threshold': float(iou_threshold), + 'sigma': float(sigma), + 'min_score': min_score, + 'method': method_dict[method], + 'offset': int(offset) + } + inds = ext_module.softnms(*indata_list, **indata_dict) + else: + dets, inds = SoftNMSop.apply(boxes.cpu(), scores.cpu(), + float(iou_threshold), float(sigma), + float(min_score), method_dict[method], + int(offset)) + + dets = dets[:inds.size(0)] + + if is_numpy: + dets = dets.cpu().numpy() + inds = inds.cpu().numpy() + return dets, inds + else: + return dets.to(device=boxes.device), inds.to(device=boxes.device) + + +def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False): + """Performs non-maximum suppression in a batched fashion. + + Modified from https://github.com/pytorch/vision/blob + /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39. + In order to perform NMS independently per class, we add an offset to all + the boxes. The offset is dependent only on the class idx, and is large + enough so that boxes from different classes do not overlap. + + Arguments: + boxes (torch.Tensor): boxes in shape (N, 4). + scores (torch.Tensor): scores in shape (N, ). + idxs (torch.Tensor): each index value correspond to a bbox cluster, + and NMS will not be applied between elements of different idxs, + shape (N, ). + nms_cfg (dict): specify nms type and other parameters like iou_thr. + Possible keys includes the following. + + - iou_thr (float): IoU threshold used for NMS. + - split_thr (float): threshold number of boxes. In some cases the + number of boxes is large (e.g., 200k). To avoid OOM during + training, the users could set `split_thr` to a small value. + If the number of boxes is greater than the threshold, it will + perform NMS on each group of boxes separately and sequentially. + Defaults to 10000. + class_agnostic (bool): if true, nms is class agnostic, + i.e. IoU thresholding happens over all boxes, + regardless of the predicted class. + + Returns: + tuple: kept dets and indice. + """ + nms_cfg_ = nms_cfg.copy() + class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic) + if class_agnostic: + boxes_for_nms = boxes + else: + max_coordinate = boxes.max() + offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) + boxes_for_nms = boxes + offsets[:, None] + + nms_type = nms_cfg_.pop('type', 'nms') + nms_op = eval(nms_type) + + split_thr = nms_cfg_.pop('split_thr', 10000) + # Won't split to multiple nms nodes when exporting to onnx + if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export(): + dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_) + boxes = boxes[keep] + # -1 indexing works abnormal in TensorRT + # This assumes `dets` has 5 dimensions where + # the last dimension is score. + # TODO: more elegant way to handle the dimension issue. + # Some type of nms would reweight the score, such as SoftNMS + scores = dets[:, 4] + else: + max_num = nms_cfg_.pop('max_num', -1) + total_mask = scores.new_zeros(scores.size(), dtype=torch.bool) + # Some type of nms would reweight the score, such as SoftNMS + scores_after_nms = scores.new_zeros(scores.size()) + for id in torch.unique(idxs): + mask = (idxs == id).nonzero(as_tuple=False).view(-1) + dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_) + total_mask[mask[keep]] = True + scores_after_nms[mask[keep]] = dets[:, -1] + keep = total_mask.nonzero(as_tuple=False).view(-1) + + scores, inds = scores_after_nms[keep].sort(descending=True) + keep = keep[inds] + boxes = boxes[keep] + + if max_num > 0: + keep = keep[:max_num] + boxes = boxes[:max_num] + scores = scores[:max_num] + + return torch.cat([boxes, scores[:, None]], -1), keep + + +def nms_match(dets, iou_threshold): + """Matched dets into different groups by NMS. + + NMS match is Similar to NMS but when a bbox is suppressed, nms match will + record the indice of suppressed bbox and form a group with the indice of + kept bbox. In each group, indice is sorted as score order. + + Arguments: + dets (torch.Tensor | np.ndarray): Det boxes with scores, shape (N, 5). + iou_thr (float): IoU thresh for NMS. + + Returns: + List[torch.Tensor | np.ndarray]: The outer list corresponds different + matched group, the inner Tensor corresponds the indices for a group + in score order. + """ + if dets.shape[0] == 0: + matched = [] + else: + assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \ + f'but get {dets.shape}' + if isinstance(dets, torch.Tensor): + dets_t = dets.detach().cpu() + else: + dets_t = torch.from_numpy(dets) + indata_list = [dets_t] + indata_dict = {'iou_threshold': float(iou_threshold)} + matched = ext_module.nms_match(*indata_list, **indata_dict) + if torch.__version__ == 'parrots': + matched = matched.tolist() + + if isinstance(dets, torch.Tensor): + return [dets.new_tensor(m, dtype=torch.long) for m in matched] + else: + return [np.array(m, dtype=np.int) for m in matched] + + +def nms_rotated(dets, scores, iou_threshold, labels=None): + """Performs non-maximum suppression (NMS) on the rotated boxes according to + their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Args: + boxes (Tensor): Rotated boxes in shape (N, 5). They are expected to \ + be in (x_ctr, y_ctr, width, height, angle_radian) format. + scores (Tensor): scores in shape (N, ). + iou_threshold (float): IoU thresh for NMS. + labels (Tensor): boxes' label in shape (N,). + + Returns: + tuple: kept dets(boxes and scores) and indice, which is always the \ + same data type as the input. + """ + if dets.shape[0] == 0: + return dets, None + multi_label = labels is not None + if multi_label: + dets_wl = torch.cat((dets, labels.unsqueeze(1)), 1) + else: + dets_wl = dets + _, order = scores.sort(0, descending=True) + dets_sorted = dets_wl.index_select(0, order) + + if torch.__version__ == 'parrots': + keep_inds = ext_module.nms_rotated( + dets_wl, + scores, + order, + dets_sorted, + iou_threshold=iou_threshold, + multi_label=multi_label) + else: + keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted, + iou_threshold, multi_label) + dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)), + dim=1) + return dets, keep_inds diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/pixel_group.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/pixel_group.py new file mode 100644 index 0000000000000000000000000000000000000000..2143c75f835a467c802fc3c37ecd3ac0f85bcda4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/pixel_group.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['pixel_group']) + + +def pixel_group(score, mask, embedding, kernel_label, kernel_contour, + kernel_region_num, distance_threshold): + """Group pixels into text instances, which is widely used text detection + methods. + + Arguments: + score (np.array or Tensor): The foreground score with size hxw. + mask (np.array or Tensor): The foreground mask with size hxw. + embedding (np.array or Tensor): The embedding with size hxwxc to + distinguish instances. + kernel_label (np.array or Tensor): The instance kernel index with + size hxw. + kernel_contour (np.array or Tensor): The kernel contour with size hxw. + kernel_region_num (int): The instance kernel region number. + distance_threshold (float): The embedding distance threshold between + kernel and pixel in one instance. + + Returns: + pixel_assignment (List[List[float]]): The instance coordinate list. + Each element consists of averaged confidence, pixel number, and + coordinates (x_i, y_i for all pixels) in order. + """ + assert isinstance(score, (torch.Tensor, np.ndarray)) + assert isinstance(mask, (torch.Tensor, np.ndarray)) + assert isinstance(embedding, (torch.Tensor, np.ndarray)) + assert isinstance(kernel_label, (torch.Tensor, np.ndarray)) + assert isinstance(kernel_contour, (torch.Tensor, np.ndarray)) + assert isinstance(kernel_region_num, int) + assert isinstance(distance_threshold, float) + + if isinstance(score, np.ndarray): + score = torch.from_numpy(score) + if isinstance(mask, np.ndarray): + mask = torch.from_numpy(mask) + if isinstance(embedding, np.ndarray): + embedding = torch.from_numpy(embedding) + if isinstance(kernel_label, np.ndarray): + kernel_label = torch.from_numpy(kernel_label) + if isinstance(kernel_contour, np.ndarray): + kernel_contour = torch.from_numpy(kernel_contour) + + if torch.__version__ == 'parrots': + label = ext_module.pixel_group( + score, + mask, + embedding, + kernel_label, + kernel_contour, + kernel_region_num=kernel_region_num, + distance_threshold=distance_threshold) + label = label.tolist() + label = label[0] + list_index = kernel_region_num + pixel_assignment = [] + for x in range(kernel_region_num): + pixel_assignment.append( + np.array( + label[list_index:list_index + int(label[x])], + dtype=np.float)) + list_index = list_index + int(label[x]) + else: + pixel_assignment = ext_module.pixel_group(score, mask, embedding, + kernel_label, kernel_contour, + kernel_region_num, + distance_threshold) + return pixel_assignment diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/point_sample.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/point_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..0f09ce3ce366b9f5050f04a5f611a338484b30e7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/point_sample.py @@ -0,0 +1,336 @@ +# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa + +from os import path as osp + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.modules.utils import _pair +from torch.onnx.operators import shape_as_tensor + + +def bilinear_grid_sample(im, grid, align_corners=False): + """Given an input and a flow-field grid, computes the output using input + values and pixel locations from grid. Supported only bilinear interpolation + method to sample the input pixels. + + Args: + im (torch.Tensor): Input feature map, shape (N, C, H, W) + grid (torch.Tensor): Point coordinates, shape (N, Hg, Wg, 2) + align_corners {bool}: If set to True, the extrema (-1 and 1) are + considered as referring to the center points of the input’s + corner pixels. If set to False, they are instead considered as + referring to the corner points of the input’s corner pixels, + making the sampling more resolution agnostic. + Returns: + torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg) + """ + n, c, h, w = im.shape + gn, gh, gw, _ = grid.shape + assert n == gn + + x = grid[:, :, :, 0] + y = grid[:, :, :, 1] + + if align_corners: + x = ((x + 1) / 2) * (w - 1) + y = ((y + 1) / 2) * (h - 1) + else: + x = ((x + 1) * w - 1) / 2 + y = ((y + 1) * h - 1) / 2 + + x = x.view(n, -1) + y = y.view(n, -1) + + x0 = torch.floor(x).long() + y0 = torch.floor(y).long() + x1 = x0 + 1 + y1 = y0 + 1 + + wa = ((x1 - x) * (y1 - y)).unsqueeze(1) + wb = ((x1 - x) * (y - y0)).unsqueeze(1) + wc = ((x - x0) * (y1 - y)).unsqueeze(1) + wd = ((x - x0) * (y - y0)).unsqueeze(1) + + # Apply default for grid_sample function zero padding + im_padded = F.pad(im, pad=[1, 1, 1, 1], mode='constant', value=0) + padded_h = h + 2 + padded_w = w + 2 + # save points positions after padding + x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1 + + # Clip coordinates to padded image size + x0 = torch.where(x0 < 0, torch.tensor(0), x0) + x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0) + x1 = torch.where(x1 < 0, torch.tensor(0), x1) + x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1) + y0 = torch.where(y0 < 0, torch.tensor(0), y0) + y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0) + y1 = torch.where(y1 < 0, torch.tensor(0), y1) + y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1) + + im_padded = im_padded.view(n, c, -1) + + x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) + x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) + x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) + x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) + + Ia = torch.gather(im_padded, 2, x0_y0) + Ib = torch.gather(im_padded, 2, x0_y1) + Ic = torch.gather(im_padded, 2, x1_y0) + Id = torch.gather(im_padded, 2, x1_y1) + + return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw) + + +def is_in_onnx_export_without_custom_ops(): + from custom_mmpkg.custom_mmcv.ops import get_onnxruntime_op_path + ort_custom_op_path = get_onnxruntime_op_path() + return torch.onnx.is_in_onnx_export( + ) and not osp.exists(ort_custom_op_path) + + +def normalize(grid): + """Normalize input grid from [-1, 1] to [0, 1] + Args: + grid (Tensor): The grid to be normalize, range [-1, 1]. + Returns: + Tensor: Normalized grid, range [0, 1]. + """ + + return (grid + 1.0) / 2.0 + + +def denormalize(grid): + """Denormalize input grid from range [0, 1] to [-1, 1] + Args: + grid (Tensor): The grid to be denormalize, range [0, 1]. + Returns: + Tensor: Denormalized grid, range [-1, 1]. + """ + + return grid * 2.0 - 1.0 + + +def generate_grid(num_grid, size, device): + """Generate regular square grid of points in [0, 1] x [0, 1] coordinate + space. + + Args: + num_grid (int): The number of grids to sample, one for each region. + size (tuple(int, int)): The side size of the regular grid. + device (torch.device): Desired device of returned tensor. + + Returns: + (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that + contains coordinates for the regular grids. + """ + + affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device) + grid = F.affine_grid( + affine_trans, torch.Size((1, 1, *size)), align_corners=False) + grid = normalize(grid) + return grid.view(1, -1, 2).expand(num_grid, -1, -1) + + +def rel_roi_point_to_abs_img_point(rois, rel_roi_points): + """Convert roi based relative point coordinates to image based absolute + point coordinates. + + Args: + rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5) + rel_roi_points (Tensor): Point coordinates inside RoI, relative to + RoI, location, range (0, 1), shape (N, P, 2) + Returns: + Tensor: Image based absolute point coordinates, shape (N, P, 2) + """ + + with torch.no_grad(): + assert rel_roi_points.size(0) == rois.size(0) + assert rois.dim() == 2 + assert rel_roi_points.dim() == 3 + assert rel_roi_points.size(2) == 2 + # remove batch idx + if rois.size(1) == 5: + rois = rois[:, 1:] + abs_img_points = rel_roi_points.clone() + # To avoid an error during exporting to onnx use independent + # variables instead inplace computation + xs = abs_img_points[:, :, 0] * (rois[:, None, 2] - rois[:, None, 0]) + ys = abs_img_points[:, :, 1] * (rois[:, None, 3] - rois[:, None, 1]) + xs += rois[:, None, 0] + ys += rois[:, None, 1] + abs_img_points = torch.stack([xs, ys], dim=2) + return abs_img_points + + +def get_shape_from_feature_map(x): + """Get spatial resolution of input feature map considering exporting to + onnx mode. + + Args: + x (torch.Tensor): Input tensor, shape (N, C, H, W) + Returns: + torch.Tensor: Spatial resolution (width, height), shape (1, 1, 2) + """ + if torch.onnx.is_in_onnx_export(): + img_shape = shape_as_tensor(x)[2:].flip(0).view(1, 1, 2).to( + x.device).float() + else: + img_shape = torch.tensor(x.shape[2:]).flip(0).view(1, 1, 2).to( + x.device).float() + return img_shape + + +def abs_img_point_to_rel_img_point(abs_img_points, img, spatial_scale=1.): + """Convert image based absolute point coordinates to image based relative + coordinates for sampling. + + Args: + abs_img_points (Tensor): Image based absolute point coordinates, + shape (N, P, 2) + img (tuple/Tensor): (height, width) of image or feature map. + spatial_scale (float): Scale points by this factor. Default: 1. + + Returns: + Tensor: Image based relative point coordinates for sampling, + shape (N, P, 2) + """ + + assert (isinstance(img, tuple) and len(img) == 2) or \ + (isinstance(img, torch.Tensor) and len(img.shape) == 4) + + if isinstance(img, tuple): + h, w = img + scale = torch.tensor([w, h], + dtype=torch.float, + device=abs_img_points.device) + scale = scale.view(1, 1, 2) + else: + scale = get_shape_from_feature_map(img) + + return abs_img_points / scale * spatial_scale + + +def rel_roi_point_to_rel_img_point(rois, + rel_roi_points, + img, + spatial_scale=1.): + """Convert roi based relative point coordinates to image based absolute + point coordinates. + + Args: + rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5) + rel_roi_points (Tensor): Point coordinates inside RoI, relative to + RoI, location, range (0, 1), shape (N, P, 2) + img (tuple/Tensor): (height, width) of image or feature map. + spatial_scale (float): Scale points by this factor. Default: 1. + + Returns: + Tensor: Image based relative point coordinates for sampling, + shape (N, P, 2) + """ + + abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points) + rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img, + spatial_scale) + + return rel_img_point + + +def point_sample(input, points, align_corners=False, **kwargs): + """A wrapper around :func:`grid_sample` to support 3D point_coords tensors + Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to + lie inside ``[0, 1] x [0, 1]`` square. + + Args: + input (Tensor): Feature map, shape (N, C, H, W). + points (Tensor): Image based absolute point coordinates (normalized), + range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2). + align_corners (bool): Whether align_corners. Default: False + + Returns: + Tensor: Features of `point` on `input`, shape (N, C, P) or + (N, C, Hgrid, Wgrid). + """ + + add_dim = False + if points.dim() == 3: + add_dim = True + points = points.unsqueeze(2) + if is_in_onnx_export_without_custom_ops(): + # If custom ops for onnx runtime not compiled use python + # implementation of grid_sample function to make onnx graph + # with supported nodes + output = bilinear_grid_sample( + input, denormalize(points), align_corners=align_corners) + else: + output = F.grid_sample( + input, denormalize(points), align_corners=align_corners, **kwargs) + if add_dim: + output = output.squeeze(3) + return output + + +class SimpleRoIAlign(nn.Module): + + def __init__(self, output_size, spatial_scale, aligned=True): + """Simple RoI align in PointRend, faster than standard RoIAlign. + + Args: + output_size (tuple[int]): h, w + spatial_scale (float): scale the input boxes by this number + aligned (bool): if False, use the legacy implementation in + MMDetection, align_corners=True will be used in F.grid_sample. + If True, align the results more perfectly. + """ + + super(SimpleRoIAlign, self).__init__() + self.output_size = _pair(output_size) + self.spatial_scale = float(spatial_scale) + # to be consistent with other RoI ops + self.use_torchvision = False + self.aligned = aligned + + def forward(self, features, rois): + num_imgs = features.size(0) + num_rois = rois.size(0) + rel_roi_points = generate_grid( + num_rois, self.output_size, device=rois.device) + + if torch.onnx.is_in_onnx_export(): + rel_img_points = rel_roi_point_to_rel_img_point( + rois, rel_roi_points, features, self.spatial_scale) + rel_img_points = rel_img_points.reshape(num_imgs, -1, + *rel_img_points.shape[1:]) + point_feats = point_sample( + features, rel_img_points, align_corners=not self.aligned) + point_feats = point_feats.transpose(1, 2) + else: + point_feats = [] + for batch_ind in range(num_imgs): + # unravel batch dim + feat = features[batch_ind].unsqueeze(0) + inds = (rois[:, 0].long() == batch_ind) + if inds.any(): + rel_img_points = rel_roi_point_to_rel_img_point( + rois[inds], rel_roi_points[inds], feat, + self.spatial_scale).unsqueeze(0) + point_feat = point_sample( + feat, rel_img_points, align_corners=not self.aligned) + point_feat = point_feat.squeeze(0).transpose(0, 1) + point_feats.append(point_feat) + + point_feats = torch.cat(point_feats, dim=0) + + channels = features.size(1) + roi_feats = point_feats.reshape(num_rois, channels, *self.output_size) + + return roi_feats + + def __repr__(self): + format_str = self.__class__.__name__ + format_str += '(output_size={}, spatial_scale={}'.format( + self.output_size, self.spatial_scale) + return format_str diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/points_in_boxes.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/points_in_boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..4003173a53052161dbcd687a2fa1d755642fdab8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/points_in_boxes.py @@ -0,0 +1,133 @@ +import torch + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'points_in_boxes_part_forward', 'points_in_boxes_cpu_forward', + 'points_in_boxes_all_forward' +]) + + +def points_in_boxes_part(points, boxes): + """Find the box in which each point is (CUDA). + + Args: + points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate + boxes (torch.Tensor): [B, T, 7], + num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in + LiDAR/DEPTH coordinate, (x, y, z) is the bottom center + + Returns: + box_idxs_of_pts (torch.Tensor): (B, M), default background = -1 + """ + assert points.shape[0] == boxes.shape[0], \ + 'Points and boxes should have the same batch size, ' \ + f'but got {points.shape[0]} and {boxes.shape[0]}' + assert boxes.shape[2] == 7, \ + 'boxes dimension should be 7, ' \ + f'but got unexpected shape {boxes.shape[2]}' + assert points.shape[2] == 3, \ + 'points dimension should be 3, ' \ + f'but got unexpected shape {points.shape[2]}' + batch_size, num_points, _ = points.shape + + box_idxs_of_pts = points.new_zeros((batch_size, num_points), + dtype=torch.int).fill_(-1) + + # If manually put the tensor 'points' or 'boxes' on a device + # which is not the current device, some temporary variables + # will be created on the current device in the cuda op, + # and the output will be incorrect. + # Therefore, we force the current device to be the same + # as the device of the tensors if it was not. + # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305 + # for the incorrect output before the fix. + points_device = points.get_device() + assert points_device == boxes.get_device(), \ + 'Points and boxes should be put on the same device' + if torch.cuda.current_device() != points_device: + torch.cuda.set_device(points_device) + + ext_module.points_in_boxes_part_forward(boxes.contiguous(), + points.contiguous(), + box_idxs_of_pts) + + return box_idxs_of_pts + + +def points_in_boxes_cpu(points, boxes): + """Find all boxes in which each point is (CPU). The CPU version of + :meth:`points_in_boxes_all`. + + Args: + points (torch.Tensor): [B, M, 3], [x, y, z] in + LiDAR/DEPTH coordinate + boxes (torch.Tensor): [B, T, 7], + num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], + (x, y, z) is the bottom center. + + Returns: + box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0. + """ + assert points.shape[0] == boxes.shape[0], \ + 'Points and boxes should have the same batch size, ' \ + f'but got {points.shape[0]} and {boxes.shape[0]}' + assert boxes.shape[2] == 7, \ + 'boxes dimension should be 7, ' \ + f'but got unexpected shape {boxes.shape[2]}' + assert points.shape[2] == 3, \ + 'points dimension should be 3, ' \ + f'but got unexpected shape {points.shape[2]}' + batch_size, num_points, _ = points.shape + num_boxes = boxes.shape[1] + + point_indices = points.new_zeros((batch_size, num_boxes, num_points), + dtype=torch.int) + for b in range(batch_size): + ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(), + points[b].float().contiguous(), + point_indices[b]) + point_indices = point_indices.transpose(1, 2) + + return point_indices + + +def points_in_boxes_all(points, boxes): + """Find all boxes in which each point is (CUDA). + + Args: + points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate + boxes (torch.Tensor): [B, T, 7], + num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], + (x, y, z) is the bottom center. + + Returns: + box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0. + """ + assert boxes.shape[0] == points.shape[0], \ + 'Points and boxes should have the same batch size, ' \ + f'but got {boxes.shape[0]} and {boxes.shape[0]}' + assert boxes.shape[2] == 7, \ + 'boxes dimension should be 7, ' \ + f'but got unexpected shape {boxes.shape[2]}' + assert points.shape[2] == 3, \ + 'points dimension should be 3, ' \ + f'but got unexpected shape {points.shape[2]}' + batch_size, num_points, _ = points.shape + num_boxes = boxes.shape[1] + + box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), + dtype=torch.int).fill_(0) + + # Same reason as line 25-32 + points_device = points.get_device() + assert points_device == boxes.get_device(), \ + 'Points and boxes should be put on the same device' + if torch.cuda.current_device() != points_device: + torch.cuda.set_device(points_device) + + ext_module.points_in_boxes_all_forward(boxes.contiguous(), + points.contiguous(), + box_idxs_of_pts) + + return box_idxs_of_pts diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/points_sampler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/points_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..1df321530990289ebfe426434635351b3687dce6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/points_sampler.py @@ -0,0 +1,177 @@ +from typing import List + +import torch +from torch import nn as nn + +from custom_mmpkg.custom_mmcv.runner import force_fp32 +from .furthest_point_sample import (furthest_point_sample, + furthest_point_sample_with_dist) + + +def calc_square_dist(point_feat_a, point_feat_b, norm=True): + """Calculating square distance between a and b. + + Args: + point_feat_a (Tensor): (B, N, C) Feature vector of each point. + point_feat_b (Tensor): (B, M, C) Feature vector of each point. + norm (Bool, optional): Whether to normalize the distance. + Default: True. + + Returns: + Tensor: (B, N, M) Distance between each pair points. + """ + num_channel = point_feat_a.shape[-1] + # [bs, n, 1] + a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1) + # [bs, 1, m] + b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1) + + corr_matrix = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2)) + + dist = a_square + b_square - 2 * corr_matrix + if norm: + dist = torch.sqrt(dist) / num_channel + return dist + + +def get_sampler_cls(sampler_type): + """Get the type and mode of points sampler. + + Args: + sampler_type (str): The type of points sampler. + The valid value are "D-FPS", "F-FPS", or "FS". + + Returns: + class: Points sampler type. + """ + sampler_mappings = { + 'D-FPS': DFPSSampler, + 'F-FPS': FFPSSampler, + 'FS': FSSampler, + } + try: + return sampler_mappings[sampler_type] + except KeyError: + raise KeyError( + f'Supported `sampler_type` are {sampler_mappings.keys()}, but got \ + {sampler_type}') + + +class PointsSampler(nn.Module): + """Points sampling. + + Args: + num_point (list[int]): Number of sample points. + fps_mod_list (list[str], optional): Type of FPS method, valid mod + ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS']. + F-FPS: using feature distances for FPS. + D-FPS: using Euclidean distances of points for FPS. + FS: using F-FPS and D-FPS simultaneously. + fps_sample_range_list (list[int], optional): + Range of points to apply FPS. Default: [-1]. + """ + + def __init__(self, + num_point: List[int], + fps_mod_list: List[str] = ['D-FPS'], + fps_sample_range_list: List[int] = [-1]): + super().__init__() + # FPS would be applied to different fps_mod in the list, + # so the length of the num_point should be equal to + # fps_mod_list and fps_sample_range_list. + assert len(num_point) == len(fps_mod_list) == len( + fps_sample_range_list) + self.num_point = num_point + self.fps_sample_range_list = fps_sample_range_list + self.samplers = nn.ModuleList() + for fps_mod in fps_mod_list: + self.samplers.append(get_sampler_cls(fps_mod)()) + self.fp16_enabled = False + + @force_fp32() + def forward(self, points_xyz, features): + """ + Args: + points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. + features (Tensor): (B, C, N) Descriptors of the features. + + Returns: + Tensor: (B, npoint, sample_num) Indices of sampled points. + """ + indices = [] + last_fps_end_index = 0 + + for fps_sample_range, sampler, npoint in zip( + self.fps_sample_range_list, self.samplers, self.num_point): + assert fps_sample_range < points_xyz.shape[1] + + if fps_sample_range == -1: + sample_points_xyz = points_xyz[:, last_fps_end_index:] + if features is not None: + sample_features = features[:, :, last_fps_end_index:] + else: + sample_features = None + else: + sample_points_xyz = \ + points_xyz[:, last_fps_end_index:fps_sample_range] + if features is not None: + sample_features = features[:, :, last_fps_end_index: + fps_sample_range] + else: + sample_features = None + + fps_idx = sampler(sample_points_xyz.contiguous(), sample_features, + npoint) + + indices.append(fps_idx + last_fps_end_index) + last_fps_end_index += fps_sample_range + indices = torch.cat(indices, dim=1) + + return indices + + +class DFPSSampler(nn.Module): + """Using Euclidean distances of points for FPS.""" + + def __init__(self): + super().__init__() + + def forward(self, points, features, npoint): + """Sampling points with D-FPS.""" + fps_idx = furthest_point_sample(points.contiguous(), npoint) + return fps_idx + + +class FFPSSampler(nn.Module): + """Using feature distances for FPS.""" + + def __init__(self): + super().__init__() + + def forward(self, points, features, npoint): + """Sampling points with F-FPS.""" + assert features is not None, \ + 'feature input to FFPS_Sampler should not be None' + features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2) + features_dist = calc_square_dist( + features_for_fps, features_for_fps, norm=False) + fps_idx = furthest_point_sample_with_dist(features_dist, npoint) + return fps_idx + + +class FSSampler(nn.Module): + """Using F-FPS and D-FPS simultaneously.""" + + def __init__(self): + super().__init__() + + def forward(self, points, features, npoint): + """Sampling points with FS_Sampling.""" + assert features is not None, \ + 'feature input to FS_Sampler should not be None' + ffps_sampler = FFPSSampler() + dfps_sampler = DFPSSampler() + fps_idx_ffps = ffps_sampler(points, features, npoint) + fps_idx_dfps = dfps_sampler(points, features, npoint) + fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1) + return fps_idx diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/psa_mask.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/psa_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..cdf14e62b50e8d4dd6856c94333c703bcc4c9ab6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/psa_mask.py @@ -0,0 +1,92 @@ +# Modified from https://github.com/hszhao/semseg/blob/master/lib/psa +from torch import nn +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', + ['psamask_forward', 'psamask_backward']) + + +class PSAMaskFunction(Function): + + @staticmethod + def symbolic(g, input, psa_type, mask_size): + return g.op( + 'mmcv::MMCVPSAMask', + input, + psa_type_i=psa_type, + mask_size_i=mask_size) + + @staticmethod + def forward(ctx, input, psa_type, mask_size): + ctx.psa_type = psa_type + ctx.mask_size = _pair(mask_size) + ctx.save_for_backward(input) + + h_mask, w_mask = ctx.mask_size + batch_size, channels, h_feature, w_feature = input.size() + assert channels == h_mask * w_mask + output = input.new_zeros( + (batch_size, h_feature * w_feature, h_feature, w_feature)) + + ext_module.psamask_forward( + input, + output, + psa_type=psa_type, + num_=batch_size, + h_feature=h_feature, + w_feature=w_feature, + h_mask=h_mask, + w_mask=w_mask, + half_h_mask=(h_mask - 1) // 2, + half_w_mask=(w_mask - 1) // 2) + return output + + @staticmethod + def backward(ctx, grad_output): + input = ctx.saved_tensors[0] + psa_type = ctx.psa_type + h_mask, w_mask = ctx.mask_size + batch_size, channels, h_feature, w_feature = input.size() + grad_input = grad_output.new_zeros( + (batch_size, channels, h_feature, w_feature)) + ext_module.psamask_backward( + grad_output, + grad_input, + psa_type=psa_type, + num_=batch_size, + h_feature=h_feature, + w_feature=w_feature, + h_mask=h_mask, + w_mask=w_mask, + half_h_mask=(h_mask - 1) // 2, + half_w_mask=(w_mask - 1) // 2) + return grad_input, None, None, None + + +psa_mask = PSAMaskFunction.apply + + +class PSAMask(nn.Module): + + def __init__(self, psa_type, mask_size=None): + super(PSAMask, self).__init__() + assert psa_type in ['collect', 'distribute'] + if psa_type == 'collect': + psa_type_enum = 0 + else: + psa_type_enum = 1 + self.psa_type_enum = psa_type_enum + self.mask_size = mask_size + self.psa_type = psa_type + + def forward(self, input): + return psa_mask(input, self.psa_type_enum, self.mask_size) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(psa_type={self.psa_type}, ' + s += f'mask_size={self.mask_size})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_align.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..0755aefc66e67233ceae0f4b77948301c443e9fb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_align.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import deprecated_api_warning, ext_loader + +ext_module = ext_loader.load_ext('_ext', + ['roi_align_forward', 'roi_align_backward']) + + +class RoIAlignFunction(Function): + + @staticmethod + def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, + pool_mode, aligned): + from ..onnx import is_custom_op_loaded + has_custom_op = is_custom_op_loaded() + if has_custom_op: + return g.op( + 'mmcv::MMCVRoiAlign', + input, + rois, + output_height_i=output_size[0], + output_width_i=output_size[1], + spatial_scale_f=spatial_scale, + sampling_ratio_i=sampling_ratio, + mode_s=pool_mode, + aligned_i=aligned) + else: + from torch.onnx.symbolic_opset9 import sub, squeeze + from torch.onnx.symbolic_helper import _slice_helper + from torch.onnx import TensorProtoDataType + # batch_indices = rois[:, 0].long() + batch_indices = _slice_helper( + g, rois, axes=[1], starts=[0], ends=[1]) + batch_indices = squeeze(g, batch_indices, 1) + batch_indices = g.op( + 'Cast', batch_indices, to_i=TensorProtoDataType.INT64) + # rois = rois[:, 1:] + rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5]) + if aligned: + # rois -= 0.5/spatial_scale + aligned_offset = g.op( + 'Constant', + value_t=torch.tensor([0.5 / spatial_scale], + dtype=torch.float32)) + rois = sub(g, rois, aligned_offset) + # roi align + return g.op( + 'RoiAlign', + input, + rois, + batch_indices, + output_height_i=output_size[0], + output_width_i=output_size[1], + spatial_scale_f=spatial_scale, + sampling_ratio_i=max(0, sampling_ratio), + mode_s=pool_mode) + + @staticmethod + def forward(ctx, + input, + rois, + output_size, + spatial_scale=1.0, + sampling_ratio=0, + pool_mode='avg', + aligned=True): + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + assert pool_mode in ('max', 'avg') + ctx.pool_mode = 0 if pool_mode == 'max' else 1 + ctx.aligned = aligned + ctx.input_shape = input.size() + + assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' + + output_shape = (rois.size(0), input.size(1), ctx.output_size[0], + ctx.output_size[1]) + output = input.new_zeros(output_shape) + if ctx.pool_mode == 0: + argmax_y = input.new_zeros(output_shape) + argmax_x = input.new_zeros(output_shape) + else: + argmax_y = input.new_zeros(0) + argmax_x = input.new_zeros(0) + + ext_module.roi_align_forward( + input, + rois, + output, + argmax_y, + argmax_x, + aligned_height=ctx.output_size[0], + aligned_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale, + sampling_ratio=ctx.sampling_ratio, + pool_mode=ctx.pool_mode, + aligned=ctx.aligned) + + ctx.save_for_backward(rois, argmax_y, argmax_x) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + rois, argmax_y, argmax_x = ctx.saved_tensors + grad_input = grad_output.new_zeros(ctx.input_shape) + # complex head architecture may cause grad_output uncontiguous. + grad_output = grad_output.contiguous() + ext_module.roi_align_backward( + grad_output, + rois, + argmax_y, + argmax_x, + grad_input, + aligned_height=ctx.output_size[0], + aligned_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale, + sampling_ratio=ctx.sampling_ratio, + pool_mode=ctx.pool_mode, + aligned=ctx.aligned) + return grad_input, None, None, None, None, None, None + + +roi_align = RoIAlignFunction.apply + + +class RoIAlign(nn.Module): + """RoI align pooling layer. + + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each + output sample. 0 to take samples densely for current models. + pool_mode (str, 'avg' or 'max'): pooling mode in each bin. + aligned (bool): if False, use the legacy implementation in + MMDetection. If True, align the results more perfectly. + use_torchvision (bool): whether to use roi_align from torchvision. + + Note: + The implementation of RoIAlign when aligned=True is modified from + https://github.com/facebookresearch/detectron2/ + + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel + indices (in our pixel model) are computed by floor(c - 0.5) and + ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete + indices [0] and [1] (which are sampled from the underlying signal + at continuous coordinates 0.5 and 1.5). But the original roi_align + (aligned=False) does not subtract the 0.5 when computing + neighboring pixel indices and therefore it uses pixels with a + slightly incorrect alignment (relative to our pixel model) when + performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; + + The difference does not make a difference to the model's + performance if ROIAlign is used together with conv layers. + """ + + @deprecated_api_warning( + { + 'out_size': 'output_size', + 'sample_num': 'sampling_ratio' + }, + cls_name='RoIAlign') + def __init__(self, + output_size, + spatial_scale=1.0, + sampling_ratio=0, + pool_mode='avg', + aligned=True, + use_torchvision=False): + super(RoIAlign, self).__init__() + + self.output_size = _pair(output_size) + self.spatial_scale = float(spatial_scale) + self.sampling_ratio = int(sampling_ratio) + self.pool_mode = pool_mode + self.aligned = aligned + self.use_torchvision = use_torchvision + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N.\ + The other 4 columns are xyxy. + """ + if self.use_torchvision: + from torchvision.ops import roi_align as tv_roi_align + if 'aligned' in tv_roi_align.__code__.co_varnames: + return tv_roi_align(input, rois, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.aligned) + else: + if self.aligned: + rois -= rois.new_tensor([0.] + + [0.5 / self.spatial_scale] * 4) + return tv_roi_align(input, rois, self.output_size, + self.spatial_scale, self.sampling_ratio) + else: + return roi_align(input, rois, self.output_size, self.spatial_scale, + self.sampling_ratio, self.pool_mode, self.aligned) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(output_size={self.output_size}, ' + s += f'spatial_scale={self.spatial_scale}, ' + s += f'sampling_ratio={self.sampling_ratio}, ' + s += f'pool_mode={self.pool_mode}, ' + s += f'aligned={self.aligned}, ' + s += f'use_torchvision={self.use_torchvision})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_align_rotated.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_align_rotated.py new file mode 100644 index 0000000000000000000000000000000000000000..0ce4961a3555d4da8bc3e32f1f7d5ad50036587d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_align_rotated.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['roi_align_rotated_forward', 'roi_align_rotated_backward']) + + +class RoIAlignRotatedFunction(Function): + + @staticmethod + def symbolic(g, features, rois, out_size, spatial_scale, sample_num, + aligned, clockwise): + if isinstance(out_size, int): + out_h = out_size + out_w = out_size + elif isinstance(out_size, tuple): + assert len(out_size) == 2 + assert isinstance(out_size[0], int) + assert isinstance(out_size[1], int) + out_h, out_w = out_size + else: + raise TypeError( + '"out_size" must be an integer or tuple of integers') + return g.op( + 'mmcv::MMCVRoIAlignRotated', + features, + rois, + output_height_i=out_h, + output_width_i=out_h, + spatial_scale_f=spatial_scale, + sampling_ratio_i=sample_num, + aligned_i=aligned, + clockwise_i=clockwise) + + @staticmethod + def forward(ctx, + features, + rois, + out_size, + spatial_scale, + sample_num=0, + aligned=True, + clockwise=False): + if isinstance(out_size, int): + out_h = out_size + out_w = out_size + elif isinstance(out_size, tuple): + assert len(out_size) == 2 + assert isinstance(out_size[0], int) + assert isinstance(out_size[1], int) + out_h, out_w = out_size + else: + raise TypeError( + '"out_size" must be an integer or tuple of integers') + ctx.spatial_scale = spatial_scale + ctx.sample_num = sample_num + ctx.aligned = aligned + ctx.clockwise = clockwise + ctx.save_for_backward(rois) + ctx.feature_size = features.size() + + batch_size, num_channels, data_height, data_width = features.size() + num_rois = rois.size(0) + + output = features.new_zeros(num_rois, num_channels, out_h, out_w) + ext_module.roi_align_rotated_forward( + features, + rois, + output, + pooled_height=out_h, + pooled_width=out_w, + spatial_scale=spatial_scale, + sample_num=sample_num, + aligned=aligned, + clockwise=clockwise) + return output + + @staticmethod + def backward(ctx, grad_output): + feature_size = ctx.feature_size + spatial_scale = ctx.spatial_scale + aligned = ctx.aligned + clockwise = ctx.clockwise + sample_num = ctx.sample_num + rois = ctx.saved_tensors[0] + assert feature_size is not None + batch_size, num_channels, data_height, data_width = feature_size + + out_w = grad_output.size(3) + out_h = grad_output.size(2) + + grad_input = grad_rois = None + + if ctx.needs_input_grad[0]: + grad_input = rois.new_zeros(batch_size, num_channels, data_height, + data_width) + ext_module.roi_align_rotated_backward( + grad_output.contiguous(), + rois, + grad_input, + pooled_height=out_h, + pooled_width=out_w, + spatial_scale=spatial_scale, + sample_num=sample_num, + aligned=aligned, + clockwise=clockwise) + return grad_input, grad_rois, None, None, None, None, None + + +roi_align_rotated = RoIAlignRotatedFunction.apply + + +class RoIAlignRotated(nn.Module): + """RoI align pooling layer for rotated proposals. + + It accepts a feature map of shape (N, C, H, W) and rois with shape + (n, 6) with each roi decoded as (batch_index, center_x, center_y, + w, h, angle). The angle is in radian. + + Args: + out_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sample_num (int): number of inputs samples to take for each + output sample. 0 to take samples densely for current models. + aligned (bool): if False, use the legacy implementation in + MMDetection. If True, align the results more perfectly. + Default: True. + clockwise (bool): If True, the angle in each proposal follows a + clockwise fashion in image space, otherwise, the angle is + counterclockwise. Default: False. + + Note: + The implementation of RoIAlign when aligned=True is modified from + https://github.com/facebookresearch/detectron2/ + + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel + indices (in our pixel model) are computed by floor(c - 0.5) and + ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete + indices [0] and [1] (which are sampled from the underlying signal + at continuous coordinates 0.5 and 1.5). But the original roi_align + (aligned=False) does not subtract the 0.5 when computing + neighboring pixel indices and therefore it uses pixels with a + slightly incorrect alignment (relative to our pixel model) when + performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; + + The difference does not make a difference to the model's + performance if ROIAlign is used together with conv layers. + """ + + def __init__(self, + out_size, + spatial_scale, + sample_num=0, + aligned=True, + clockwise=False): + super(RoIAlignRotated, self).__init__() + + self.out_size = out_size + self.spatial_scale = float(spatial_scale) + self.sample_num = int(sample_num) + self.aligned = aligned + self.clockwise = clockwise + + def forward(self, features, rois): + return RoIAlignRotatedFunction.apply(features, rois, self.out_size, + self.spatial_scale, + self.sample_num, self.aligned, + self.clockwise) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..d339d8f2941eabc1cbe181a9c6c5ab5ff4ff4e5f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roi_pool.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', + ['roi_pool_forward', 'roi_pool_backward']) + + +class RoIPoolFunction(Function): + + @staticmethod + def symbolic(g, input, rois, output_size, spatial_scale): + return g.op( + 'MaxRoiPool', + input, + rois, + pooled_shape_i=output_size, + spatial_scale_f=spatial_scale) + + @staticmethod + def forward(ctx, input, rois, output_size, spatial_scale=1.0): + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.input_shape = input.size() + + assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' + + output_shape = (rois.size(0), input.size(1), ctx.output_size[0], + ctx.output_size[1]) + output = input.new_zeros(output_shape) + argmax = input.new_zeros(output_shape, dtype=torch.int) + + ext_module.roi_pool_forward( + input, + rois, + output, + argmax, + pooled_height=ctx.output_size[0], + pooled_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale) + + ctx.save_for_backward(rois, argmax) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + rois, argmax = ctx.saved_tensors + grad_input = grad_output.new_zeros(ctx.input_shape) + + ext_module.roi_pool_backward( + grad_output, + rois, + argmax, + grad_input, + pooled_height=ctx.output_size[0], + pooled_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale) + + return grad_input, None, None, None + + +roi_pool = RoIPoolFunction.apply + + +class RoIPool(nn.Module): + + def __init__(self, output_size, spatial_scale=1.0): + super(RoIPool, self).__init__() + + self.output_size = _pair(output_size) + self.spatial_scale = float(spatial_scale) + + def forward(self, input, rois): + return roi_pool(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(output_size={self.output_size}, ' + s += f'spatial_scale={self.spatial_scale})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roiaware_pool3d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roiaware_pool3d.py new file mode 100644 index 0000000000000000000000000000000000000000..00d8a4d7f99181f224bda079ff7487aae5b92383 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roiaware_pool3d.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn as nn +from torch.autograd import Function + +import custom_mmpkg.custom_mmcv as mmcv +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['roiaware_pool3d_forward', 'roiaware_pool3d_backward']) + + +class RoIAwarePool3d(nn.Module): + """Encode the geometry-specific features of each 3D proposal. + + Please refer to `PartA2 `_ for more + details. + + Args: + out_size (int or tuple): The size of output features. n or + [n1, n2, n3]. + max_pts_per_voxel (int, optional): The maximum number of points per + voxel. Default: 128. + mode (str, optional): Pooling method of RoIAware, 'max' or 'avg'. + Default: 'max'. + """ + + def __init__(self, out_size, max_pts_per_voxel=128, mode='max'): + super().__init__() + + self.out_size = out_size + self.max_pts_per_voxel = max_pts_per_voxel + assert mode in ['max', 'avg'] + pool_mapping = {'max': 0, 'avg': 1} + self.mode = pool_mapping[mode] + + def forward(self, rois, pts, pts_feature): + """ + Args: + rois (torch.Tensor): [N, 7], in LiDAR coordinate, + (x, y, z) is the bottom center of rois. + pts (torch.Tensor): [npoints, 3], coordinates of input points. + pts_feature (torch.Tensor): [npoints, C], features of input points. + + Returns: + pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C] + """ + + return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, + self.out_size, + self.max_pts_per_voxel, self.mode) + + +class RoIAwarePool3dFunction(Function): + + @staticmethod + def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, + mode): + """ + Args: + rois (torch.Tensor): [N, 7], in LiDAR coordinate, + (x, y, z) is the bottom center of rois. + pts (torch.Tensor): [npoints, 3], coordinates of input points. + pts_feature (torch.Tensor): [npoints, C], features of input points. + out_size (int or tuple): The size of output features. n or + [n1, n2, n3]. + max_pts_per_voxel (int): The maximum number of points per voxel. + Default: 128. + mode (int): Pooling method of RoIAware, 0 (max pool) or 1 (average + pool). + + Returns: + pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C], output + pooled features. + """ + + if isinstance(out_size, int): + out_x = out_y = out_z = out_size + else: + assert len(out_size) == 3 + assert mmcv.is_tuple_of(out_size, int) + out_x, out_y, out_z = out_size + + num_rois = rois.shape[0] + num_channels = pts_feature.shape[-1] + num_pts = pts.shape[0] + + pooled_features = pts_feature.new_zeros( + (num_rois, out_x, out_y, out_z, num_channels)) + argmax = pts_feature.new_zeros( + (num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int) + pts_idx_of_voxels = pts_feature.new_zeros( + (num_rois, out_x, out_y, out_z, max_pts_per_voxel), + dtype=torch.int) + + ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax, + pts_idx_of_voxels, pooled_features, + mode) + + ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, + num_pts, num_channels) + return pooled_features + + @staticmethod + def backward(ctx, grad_out): + ret = ctx.roiaware_pool3d_for_backward + pts_idx_of_voxels, argmax, mode, num_pts, num_channels = ret + + grad_in = grad_out.new_zeros((num_pts, num_channels)) + ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax, + grad_out.contiguous(), grad_in, + mode) + + return None, None, grad_in, None, None, None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roipoint_pool3d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roipoint_pool3d.py new file mode 100644 index 0000000000000000000000000000000000000000..0a21412c0728431c04b84245bc2e3109eea9aefc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/roipoint_pool3d.py @@ -0,0 +1,77 @@ +from torch import nn as nn +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['roipoint_pool3d_forward']) + + +class RoIPointPool3d(nn.Module): + """Encode the geometry-specific features of each 3D proposal. + + Please refer to `Paper of PartA2 `_ + for more details. + + Args: + num_sampled_points (int, optional): Number of samples in each roi. + Default: 512. + """ + + def __init__(self, num_sampled_points=512): + super().__init__() + self.num_sampled_points = num_sampled_points + + def forward(self, points, point_features, boxes3d): + """ + Args: + points (torch.Tensor): Input points whose shape is (B, N, C). + point_features (torch.Tensor): Features of input points whose shape + is (B, N, C). + boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7). + + Returns: + pooled_features (torch.Tensor): The output pooled features whose + shape is (B, M, 512, 3 + C). + pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M). + """ + return RoIPointPool3dFunction.apply(points, point_features, boxes3d, + self.num_sampled_points) + + +class RoIPointPool3dFunction(Function): + + @staticmethod + def forward(ctx, points, point_features, boxes3d, num_sampled_points=512): + """ + Args: + points (torch.Tensor): Input points whose shape is (B, N, C). + point_features (torch.Tensor): Features of input points whose shape + is (B, N, C). + boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7). + num_sampled_points (int, optional): The num of sampled points. + Default: 512. + + Returns: + pooled_features (torch.Tensor): The output pooled features whose + shape is (B, M, 512, 3 + C). + pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M). + """ + assert len(points.shape) == 3 and points.shape[2] == 3 + batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[ + 1], point_features.shape[2] + pooled_boxes3d = boxes3d.view(batch_size, -1, 7) + pooled_features = point_features.new_zeros( + (batch_size, boxes_num, num_sampled_points, 3 + feature_len)) + pooled_empty_flag = point_features.new_zeros( + (batch_size, boxes_num)).int() + + ext_module.roipoint_pool3d_forward(points.contiguous(), + pooled_boxes3d.contiguous(), + point_features.contiguous(), + pooled_features, pooled_empty_flag) + + return pooled_features, pooled_empty_flag + + @staticmethod + def backward(ctx, grad_out): + raise NotImplementedError diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/saconv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/saconv.py new file mode 100644 index 0000000000000000000000000000000000000000..46d26992534cba3ba0ee36f08b700c5489fea30d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/saconv.py @@ -0,0 +1,145 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_mmpkg.custom_mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init +from custom_mmpkg.custom_mmcv.ops.deform_conv import deform_conv2d +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, digit_version + + +@CONV_LAYERS.register_module(name='SAC') +class SAConv2d(ConvAWS2d): + """SAC (Switchable Atrous Convolution) + + This is an implementation of SAC in DetectoRS + (https://arxiv.org/pdf/2006.02334.pdf). + + Args: + in_channels (int): Number of channels in the input image + out_channels (int): Number of channels produced by the convolution + kernel_size (int or tuple): Size of the convolving kernel + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of + the input. Default: 0 + padding_mode (string, optional): ``'zeros'``, ``'reflect'``, + ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` + dilation (int or tuple, optional): Spacing between kernel elements. + Default: 1 + groups (int, optional): Number of blocked connections from input + channels to output channels. Default: 1 + bias (bool, optional): If ``True``, adds a learnable bias to the + output. Default: ``True`` + use_deform: If ``True``, replace convolution with deformable + convolution. Default: ``False``. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + use_deform=False): + super().__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.use_deform = use_deform + self.switch = nn.Conv2d( + self.in_channels, 1, kernel_size=1, stride=stride, bias=True) + self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size())) + self.pre_context = nn.Conv2d( + self.in_channels, self.in_channels, kernel_size=1, bias=True) + self.post_context = nn.Conv2d( + self.out_channels, self.out_channels, kernel_size=1, bias=True) + if self.use_deform: + self.offset_s = nn.Conv2d( + self.in_channels, + 18, + kernel_size=3, + padding=1, + stride=stride, + bias=True) + self.offset_l = nn.Conv2d( + self.in_channels, + 18, + kernel_size=3, + padding=1, + stride=stride, + bias=True) + self.init_weights() + + def init_weights(self): + constant_init(self.switch, 0, bias=1) + self.weight_diff.data.zero_() + constant_init(self.pre_context, 0) + constant_init(self.post_context, 0) + if self.use_deform: + constant_init(self.offset_s, 0) + constant_init(self.offset_l, 0) + + def forward(self, x): + # pre-context + avg_x = F.adaptive_avg_pool2d(x, output_size=1) + avg_x = self.pre_context(avg_x) + avg_x = avg_x.expand_as(x) + x = x + avg_x + # switch + avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect') + avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0) + switch = self.switch(avg_x) + # sac + weight = self._get_weight(self.weight) + zero_bias = torch.zeros( + self.out_channels, device=weight.device, dtype=weight.dtype) + + if self.use_deform: + offset = self.offset_s(avg_x) + out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, + self.dilation, self.groups, 1) + else: + if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.5.0')): + out_s = super().conv2d_forward(x, weight) + elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): + # bias is a required argument of _conv_forward in torch 1.8.0 + out_s = super()._conv_forward(x, weight, zero_bias) + else: + out_s = super()._conv_forward(x, weight) + ori_p = self.padding + ori_d = self.dilation + self.padding = tuple(3 * p for p in self.padding) + self.dilation = tuple(3 * d for d in self.dilation) + weight = weight + self.weight_diff + if self.use_deform: + offset = self.offset_l(avg_x) + out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, + self.dilation, self.groups, 1) + else: + if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.5.0')): + out_l = super().conv2d_forward(x, weight) + elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): + # bias is a required argument of _conv_forward in torch 1.8.0 + out_l = super()._conv_forward(x, weight, zero_bias) + else: + out_l = super()._conv_forward(x, weight) + + out = switch * out_s + (1 - switch) * out_l + self.padding = ori_p + self.dilation = ori_d + # post-context + avg_x = F.adaptive_avg_pool2d(out, output_size=1) + avg_x = self.post_context(avg_x) + avg_x = avg_x.expand_as(out) + out = out + avg_x + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/scatter_points.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/scatter_points.py new file mode 100644 index 0000000000000000000000000000000000000000..2b8aa4169e9f6ca4a6f845ce17d6d1e4db416bb8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/scatter_points.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', + ['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward']) + + +class _DynamicScatter(Function): + + @staticmethod + def forward(ctx, feats, coors, reduce_type='max'): + """convert kitti points(N, >=3) to voxels. + + Args: + feats (torch.Tensor): [N, C]. Points features to be reduced + into voxels. + coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates + (specifically multi-dim voxel index) of each points. + reduce_type (str, optional): Reduce op. support 'max', 'sum' and + 'mean'. Default: 'max'. + + Returns: + voxel_feats (torch.Tensor): [M, C]. Reduced features, input + features that shares the same voxel coordinates are reduced to + one row. + voxel_coors (torch.Tensor): [M, ndim]. Voxel coordinates. + """ + results = ext_module.dynamic_point_to_voxel_forward( + feats, coors, reduce_type) + (voxel_feats, voxel_coors, point2voxel_map, + voxel_points_count) = results + ctx.reduce_type = reduce_type + ctx.save_for_backward(feats, voxel_feats, point2voxel_map, + voxel_points_count) + ctx.mark_non_differentiable(voxel_coors) + return voxel_feats, voxel_coors + + @staticmethod + def backward(ctx, grad_voxel_feats, grad_voxel_coors=None): + (feats, voxel_feats, point2voxel_map, + voxel_points_count) = ctx.saved_tensors + grad_feats = torch.zeros_like(feats) + # TODO: whether to use index put or use cuda_backward + # To use index put, need point to voxel index + ext_module.dynamic_point_to_voxel_backward( + grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats, + point2voxel_map, voxel_points_count, ctx.reduce_type) + return grad_feats, None, None + + +dynamic_scatter = _DynamicScatter.apply + + +class DynamicScatter(nn.Module): + """Scatters points into voxels, used in the voxel encoder with dynamic + voxelization. + + Note: + The CPU and GPU implementation get the same output, but have numerical + difference after summation and division (e.g., 5e-7). + + Args: + voxel_size (list): list [x, y, z] size of three dimension. + point_cloud_range (list): The coordinate range of points, [x_min, + y_min, z_min, x_max, y_max, z_max]. + average_points (bool): whether to use avg pooling to scatter points + into voxel. + """ + + def __init__(self, voxel_size, point_cloud_range, average_points: bool): + super().__init__() + + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.average_points = average_points + + def forward_single(self, points, coors): + """Scatters points into voxels. + + Args: + points (torch.Tensor): Points to be reduced into voxels. + coors (torch.Tensor): Corresponding voxel coordinates (specifically + multi-dim voxel index) of each points. + + Returns: + voxel_feats (torch.Tensor): Reduced features, input features that + shares the same voxel coordinates are reduced to one row. + voxel_coors (torch.Tensor): Voxel coordinates. + """ + reduce = 'mean' if self.average_points else 'max' + return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce) + + def forward(self, points, coors): + """Scatters points/features into voxels. + + Args: + points (torch.Tensor): Points to be reduced into voxels. + coors (torch.Tensor): Corresponding voxel coordinates (specifically + multi-dim voxel index) of each points. + + Returns: + voxel_feats (torch.Tensor): Reduced features, input features that + shares the same voxel coordinates are reduced to one row. + voxel_coors (torch.Tensor): Voxel coordinates. + """ + if coors.size(-1) == 3: + return self.forward_single(points, coors) + else: + batch_size = coors[-1, 0] + 1 + voxels, voxel_coors = [], [] + for i in range(batch_size): + inds = torch.where(coors[:, 0] == i) + voxel, voxel_coor = self.forward_single( + points[inds], coors[inds][:, 1:]) + coor_pad = nn.functional.pad( + voxel_coor, (1, 0), mode='constant', value=i) + voxel_coors.append(coor_pad) + voxels.append(voxel) + features = torch.cat(voxels, dim=0) + feature_coors = torch.cat(voxel_coors, dim=0) + + return features, feature_coors + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += 'voxel_size=' + str(self.voxel_size) + s += ', point_cloud_range=' + str(self.point_cloud_range) + s += ', average_points=' + str(self.average_points) + s += ')' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/sync_bn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/sync_bn.py new file mode 100644 index 0000000000000000000000000000000000000000..f885caac860ae7197ba2a29433b3c3debfdb2e65 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/sync_bn.py @@ -0,0 +1,279 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.distributed as dist +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.module import Module +from torch.nn.parameter import Parameter + +from custom_mmpkg.custom_mmcv.cnn import NORM_LAYERS +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'sync_bn_forward_mean', 'sync_bn_forward_var', 'sync_bn_forward_output', + 'sync_bn_backward_param', 'sync_bn_backward_data' +]) + + +class SyncBatchNormFunction(Function): + + @staticmethod + def symbolic(g, input, running_mean, running_var, weight, bias, momentum, + eps, group, group_size, stats_mode): + return g.op( + 'mmcv::MMCVSyncBatchNorm', + input, + running_mean, + running_var, + weight, + bias, + momentum_f=momentum, + eps_f=eps, + group_i=group, + group_size_i=group_size, + stats_mode=stats_mode) + + @staticmethod + def forward(self, input, running_mean, running_var, weight, bias, momentum, + eps, group, group_size, stats_mode): + self.momentum = momentum + self.eps = eps + self.group = group + self.group_size = group_size + self.stats_mode = stats_mode + + assert isinstance( + input, (torch.HalfTensor, torch.FloatTensor, + torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \ + f'only support Half or Float Tensor, but {input.type()}' + output = torch.zeros_like(input) + input3d = input.flatten(start_dim=2) + output3d = output.view_as(input3d) + num_channels = input3d.size(1) + + # ensure mean/var/norm/std are initialized as zeros + # ``torch.empty()`` does not guarantee that + mean = torch.zeros( + num_channels, dtype=torch.float, device=input3d.device) + var = torch.zeros( + num_channels, dtype=torch.float, device=input3d.device) + norm = torch.zeros_like( + input3d, dtype=torch.float, device=input3d.device) + std = torch.zeros( + num_channels, dtype=torch.float, device=input3d.device) + + batch_size = input3d.size(0) + if batch_size > 0: + ext_module.sync_bn_forward_mean(input3d, mean) + batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype) + else: + # skip updating mean and leave it as zeros when the input is empty + batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype) + + # synchronize mean and the batch flag + vec = torch.cat([mean, batch_flag]) + if self.stats_mode == 'N': + vec *= batch_size + if self.group_size > 1: + dist.all_reduce(vec, group=self.group) + total_batch = vec[-1].detach() + mean = vec[:num_channels] + + if self.stats_mode == 'default': + mean = mean / self.group_size + elif self.stats_mode == 'N': + mean = mean / total_batch.clamp(min=1) + else: + raise NotImplementedError + + # leave var as zeros when the input is empty + if batch_size > 0: + ext_module.sync_bn_forward_var(input3d, mean, var) + + if self.stats_mode == 'N': + var *= batch_size + if self.group_size > 1: + dist.all_reduce(var, group=self.group) + + if self.stats_mode == 'default': + var /= self.group_size + elif self.stats_mode == 'N': + var /= total_batch.clamp(min=1) + else: + raise NotImplementedError + + # if the total batch size over all the ranks is zero, + # we should not update the statistics in the current batch + update_flag = total_batch.clamp(max=1) + momentum = update_flag * self.momentum + ext_module.sync_bn_forward_output( + input3d, + mean, + var, + weight, + bias, + running_mean, + running_var, + norm, + std, + output3d, + eps=self.eps, + momentum=momentum, + group_size=self.group_size) + self.save_for_backward(norm, std, weight) + return output + + @staticmethod + @once_differentiable + def backward(self, grad_output): + norm, std, weight = self.saved_tensors + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(weight) + grad_input = torch.zeros_like(grad_output) + grad_output3d = grad_output.flatten(start_dim=2) + grad_input3d = grad_input.view_as(grad_output3d) + + batch_size = grad_input3d.size(0) + if batch_size > 0: + ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight, + grad_bias) + + # all reduce + if self.group_size > 1: + dist.all_reduce(grad_weight, group=self.group) + dist.all_reduce(grad_bias, group=self.group) + grad_weight /= self.group_size + grad_bias /= self.group_size + + if batch_size > 0: + ext_module.sync_bn_backward_data(grad_output3d, weight, + grad_weight, grad_bias, norm, std, + grad_input3d) + + return grad_input, None, None, grad_weight, grad_bias, \ + None, None, None, None, None + + +@NORM_LAYERS.register_module(name='MMSyncBN') +class SyncBatchNorm(Module): + """Synchronized Batch Normalization. + + Args: + num_features (int): number of features/chennels in input tensor + eps (float, optional): a value added to the denominator for numerical + stability. Defaults to 1e-5. + momentum (float, optional): the value used for the running_mean and + running_var computation. Defaults to 0.1. + affine (bool, optional): whether to use learnable affine parameters. + Defaults to True. + track_running_stats (bool, optional): whether to track the running + mean and variance during training. When set to False, this + module does not track such statistics, and initializes statistics + buffers ``running_mean`` and ``running_var`` as ``None``. When + these buffers are ``None``, this module always uses batch + statistics in both training and eval modes. Defaults to True. + group (int, optional): synchronization of stats happen within + each process group individually. By default it is synchronization + across the whole world. Defaults to None. + stats_mode (str, optional): The statistical mode. Available options + includes ``'default'`` and ``'N'``. Defaults to 'default'. + When ``stats_mode=='default'``, it computes the overall statistics + using those from each worker with equal weight, i.e., the + statistics are synchronized and simply divied by ``group``. This + mode will produce inaccurate statistics when empty tensors occur. + When ``stats_mode=='N'``, it compute the overall statistics using + the total number of batches in each worker ignoring the number of + group, i.e., the statistics are synchronized and then divied by + the total batch ``N``. This mode is beneficial when empty tensors + occur during training, as it average the total mean by the real + number of batch. + """ + + def __init__(self, + num_features, + eps=1e-5, + momentum=0.1, + affine=True, + track_running_stats=True, + group=None, + stats_mode='default'): + super(SyncBatchNorm, self).__init__() + self.num_features = num_features + self.eps = eps + self.momentum = momentum + self.affine = affine + self.track_running_stats = track_running_stats + group = dist.group.WORLD if group is None else group + self.group = group + self.group_size = dist.get_world_size(group) + assert stats_mode in ['default', 'N'], \ + f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"' + self.stats_mode = stats_mode + if self.affine: + self.weight = Parameter(torch.Tensor(num_features)) + self.bias = Parameter(torch.Tensor(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + if self.track_running_stats: + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.register_buffer('num_batches_tracked', + torch.tensor(0, dtype=torch.long)) + else: + self.register_buffer('running_mean', None) + self.register_buffer('running_var', None) + self.register_buffer('num_batches_tracked', None) + self.reset_parameters() + + def reset_running_stats(self): + if self.track_running_stats: + self.running_mean.zero_() + self.running_var.fill_(1) + self.num_batches_tracked.zero_() + + def reset_parameters(self): + self.reset_running_stats() + if self.affine: + self.weight.data.uniform_() # pytorch use ones_() + self.bias.data.zero_() + + def forward(self, input): + if input.dim() < 2: + raise ValueError( + f'expected at least 2D input, got {input.dim()}D input') + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + if self.num_batches_tracked is not None: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float( + self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + if self.training or not self.track_running_stats: + return SyncBatchNormFunction.apply( + input, self.running_mean, self.running_var, self.weight, + self.bias, exponential_average_factor, self.eps, self.group, + self.group_size, self.stats_mode) + else: + return F.batch_norm(input, self.running_mean, self.running_var, + self.weight, self.bias, False, + exponential_average_factor, self.eps) + + def __repr__(self): + s = self.__class__.__name__ + s += f'({self.num_features}, ' + s += f'eps={self.eps}, ' + s += f'momentum={self.momentum}, ' + s += f'affine={self.affine}, ' + s += f'track_running_stats={self.track_running_stats}, ' + s += f'group_size={self.group_size},' + s += f'stats_mode={self.stats_mode})' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/three_interpolate.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/three_interpolate.py new file mode 100644 index 0000000000000000000000000000000000000000..203f47f05d58087e034fb3cd8cd6a09233947b4a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/three_interpolate.py @@ -0,0 +1,68 @@ +from typing import Tuple + +import torch +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['three_interpolate_forward', 'three_interpolate_backward']) + + +class ThreeInterpolate(Function): + """Performs weighted linear interpolation on 3 features. + + Please refer to `Paper of PointNet++ `_ + for more details. + """ + + @staticmethod + def forward(ctx, features: torch.Tensor, indices: torch.Tensor, + weight: torch.Tensor) -> torch.Tensor: + """ + Args: + features (Tensor): (B, C, M) Features descriptors to be + interpolated + indices (Tensor): (B, n, 3) index three nearest neighbors + of the target features in features + weight (Tensor): (B, n, 3) weights of interpolation + + Returns: + Tensor: (B, C, N) tensor of the interpolated features + """ + assert features.is_contiguous() + assert indices.is_contiguous() + assert weight.is_contiguous() + + B, c, m = features.size() + n = indices.size(1) + ctx.three_interpolate_for_backward = (indices, weight, m) + output = torch.cuda.FloatTensor(B, c, n) + + ext_module.three_interpolate_forward( + features, indices, weight, output, b=B, c=c, m=m, n=n) + return output + + @staticmethod + def backward( + ctx, grad_out: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + grad_out (Tensor): (B, C, N) tensor with gradients of outputs + + Returns: + Tensor: (B, C, M) tensor with gradients of features + """ + idx, weight, m = ctx.three_interpolate_for_backward + B, c, n = grad_out.size() + + grad_features = torch.cuda.FloatTensor(B, c, m).zero_() + grad_out_data = grad_out.data.contiguous() + + ext_module.three_interpolate_backward( + grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m) + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/three_nn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/three_nn.py new file mode 100644 index 0000000000000000000000000000000000000000..2b01047a129989cd5545a0a86f23a487f4a13ce1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/three_nn.py @@ -0,0 +1,51 @@ +from typing import Tuple + +import torch +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', ['three_nn_forward']) + + +class ThreeNN(Function): + """Find the top-3 nearest neighbors of the target set from the source set. + + Please refer to `Paper of PointNet++ `_ + for more details. + """ + + @staticmethod + def forward(ctx, target: torch.Tensor, + source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + target (Tensor): shape (B, N, 3), points set that needs to + find the nearest neighbors. + source (Tensor): shape (B, M, 3), points set that is used + to find the nearest neighbors of points in target set. + + Returns: + Tensor: shape (B, N, 3), L2 distance of each point in target + set to their corresponding nearest neighbors. + """ + target = target.contiguous() + source = source.contiguous() + + B, N, _ = target.size() + m = source.size(1) + dist2 = torch.cuda.FloatTensor(B, N, 3) + idx = torch.cuda.IntTensor(B, N, 3) + + ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m) + if torch.__version__ != 'parrots': + ctx.mark_non_differentiable(idx) + + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/tin_shift.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/tin_shift.py new file mode 100644 index 0000000000000000000000000000000000000000..472c9fcfe45a124e819b7ed5653e585f94a8811e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/tin_shift.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Code reference from "Temporal Interlacing Network" +# https://github.com/deepcs233/TIN/blob/master/cuda_shift/rtc_wrap.py +# Hao Shao, Shengju Qian, Yu Liu +# shaoh19@mails.tsinghua.edu.cn, sjqian@cse.cuhk.edu.hk, yuliu@ee.cuhk.edu.hk + +import torch +import torch.nn as nn +from torch.autograd import Function + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', + ['tin_shift_forward', 'tin_shift_backward']) + + +class TINShiftFunction(Function): + + @staticmethod + def forward(ctx, input, shift): + C = input.size(2) + num_segments = shift.size(1) + if C // num_segments <= 0 or C % num_segments != 0: + raise ValueError('C should be a multiple of num_segments, ' + f'but got C={C} and num_segments={num_segments}.') + + ctx.save_for_backward(shift) + + out = torch.zeros_like(input) + ext_module.tin_shift_forward(input, shift, out) + + return out + + @staticmethod + def backward(ctx, grad_output): + + shift = ctx.saved_tensors[0] + data_grad_input = grad_output.new(*grad_output.size()).zero_() + shift_grad_input = shift.new(*shift.size()).zero_() + ext_module.tin_shift_backward(grad_output, shift, data_grad_input) + + return data_grad_input, shift_grad_input + + +tin_shift = TINShiftFunction.apply + + +class TINShift(nn.Module): + """Temporal Interlace Shift. + + Temporal Interlace shift is a differentiable temporal-wise frame shifting + which is proposed in "Temporal Interlacing Network" + + Please refer to https://arxiv.org/abs/2001.06499 for more details. + Code is modified from https://github.com/mit-han-lab/temporal-shift-module + """ + + def forward(self, input, shift): + """Perform temporal interlace shift. + + Args: + input (Tensor): Feature map with shape [N, num_segments, C, H * W]. + shift (Tensor): Shift tensor with shape [N, num_segments]. + + Returns: + Feature map after temporal interlace shift. + """ + return tin_shift(input, shift) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/upfirdn2d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/upfirdn2d.py new file mode 100644 index 0000000000000000000000000000000000000000..ef4a5236dda57340017f0e16857bca297d4e1b2f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/upfirdn2d.py @@ -0,0 +1,330 @@ +# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501 + +# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. +# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator +# Augmentation (ADA) +# ======================================================================= + +# 1. Definitions + +# "Licensor" means any person or entity that distributes its Work. + +# "Software" means the original work of authorship made available under +# this License. + +# "Work" means the Software and any additions to or derivative works of +# the Software that are made available under this License. + +# The terms "reproduce," "reproduction," "derivative works," and +# "distribution" have the meaning as provided under U.S. copyright law; +# provided, however, that for the purposes of this License, derivative +# works shall not include works that remain separable from, or merely +# link (or bind by name) to the interfaces of, the Work. + +# Works, including the Software, are "made available" under this License +# by including in or with the Work either (a) a copyright notice +# referencing the applicability of this License to the Work, or (b) a +# copy of this License. + +# 2. License Grants + +# 2.1 Copyright Grant. Subject to the terms and conditions of this +# License, each Licensor grants to you a perpetual, worldwide, +# non-exclusive, royalty-free, copyright license to reproduce, +# prepare derivative works of, publicly display, publicly perform, +# sublicense and distribute its Work and any resulting derivative +# works in any form. + +# 3. Limitations + +# 3.1 Redistribution. You may reproduce or distribute the Work only +# if (a) you do so under this License, (b) you include a complete +# copy of this License with your distribution, and (c) you retain +# without modification any copyright, patent, trademark, or +# attribution notices that are present in the Work. + +# 3.2 Derivative Works. You may specify that additional or different +# terms apply to the use, reproduction, and distribution of your +# derivative works of the Work ("Your Terms") only if (a) Your Terms +# provide that the use limitation in Section 3.3 applies to your +# derivative works, and (b) you identify the specific derivative +# works that are subject to Your Terms. Notwithstanding Your Terms, +# this License (including the redistribution requirements in Section +# 3.1) will continue to apply to the Work itself. + +# 3.3 Use Limitation. The Work and any derivative works thereof only +# may be used or intended for use non-commercially. Notwithstanding +# the foregoing, NVIDIA and its affiliates may use the Work and any +# derivative works commercially. As used herein, "non-commercially" +# means for research or evaluation purposes only. + +# 3.4 Patent Claims. If you bring or threaten to bring a patent claim +# against any Licensor (including any claim, cross-claim or +# counterclaim in a lawsuit) to enforce any patents that you allege +# are infringed by any Work, then your rights under this License from +# such Licensor (including the grant in Section 2.1) will terminate +# immediately. + +# 3.5 Trademarks. This License does not grant any rights to use any +# Licensor’s or its affiliates’ names, logos, or trademarks, except +# as necessary to reproduce the notices described in this License. + +# 3.6 Termination. If you violate any term of this License, then your +# rights under this License (including the grant in Section 2.1) will +# terminate immediately. + +# 4. Disclaimer of Warranty. + +# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR +# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER +# THIS LICENSE. + +# 5. Limitation of Liability. + +# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL +# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE +# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, +# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF +# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK +# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, +# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER +# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGES. + +# ======================================================================= + +import torch +from torch.autograd import Function +from torch.nn import functional as F + +from custom_mmpkg.custom_mmcv.utils import to_2tuple +from ..utils import ext_loader + +upfirdn2d_ext = ext_loader.load_ext('_ext', ['upfirdn2d']) + + +class UpFirDn2dBackward(Function): + + @staticmethod + def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, + in_size, out_size): + + up_x, up_y = up + down_x, down_y = down + g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad + + grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) + + grad_input = upfirdn2d_ext.upfirdn2d( + grad_output, + grad_kernel, + up_x=down_x, + up_y=down_y, + down_x=up_x, + down_y=up_y, + pad_x0=g_pad_x0, + pad_x1=g_pad_x1, + pad_y0=g_pad_y0, + pad_y1=g_pad_y1) + grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], + in_size[3]) + + ctx.save_for_backward(kernel) + + pad_x0, pad_x1, pad_y0, pad_y1 = pad + + ctx.up_x = up_x + ctx.up_y = up_y + ctx.down_x = down_x + ctx.down_y = down_y + ctx.pad_x0 = pad_x0 + ctx.pad_x1 = pad_x1 + ctx.pad_y0 = pad_y0 + ctx.pad_y1 = pad_y1 + ctx.in_size = in_size + ctx.out_size = out_size + + return grad_input + + @staticmethod + def backward(ctx, gradgrad_input): + kernel, = ctx.saved_tensors + + gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], + ctx.in_size[3], 1) + + gradgrad_out = upfirdn2d_ext.upfirdn2d( + gradgrad_input, + kernel, + up_x=ctx.up_x, + up_y=ctx.up_y, + down_x=ctx.down_x, + down_y=ctx.down_y, + pad_x0=ctx.pad_x0, + pad_x1=ctx.pad_x1, + pad_y0=ctx.pad_y0, + pad_y1=ctx.pad_y1) + # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], + # ctx.out_size[1], ctx.in_size[3]) + gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], + ctx.out_size[0], ctx.out_size[1]) + + return gradgrad_out, None, None, None, None, None, None, None, None + + +class UpFirDn2d(Function): + + @staticmethod + def forward(ctx, input, kernel, up, down, pad): + up_x, up_y = up + down_x, down_y = down + pad_x0, pad_x1, pad_y0, pad_y1 = pad + + kernel_h, kernel_w = kernel.shape + batch, channel, in_h, in_w = input.shape + ctx.in_size = input.shape + + input = input.reshape(-1, in_h, in_w, 1) + + ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 + ctx.out_size = (out_h, out_w) + + ctx.up = (up_x, up_y) + ctx.down = (down_x, down_y) + ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) + + g_pad_x0 = kernel_w - pad_x0 - 1 + g_pad_y0 = kernel_h - pad_y0 - 1 + g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 + g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 + + ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) + + out = upfirdn2d_ext.upfirdn2d( + input, + kernel, + up_x=up_x, + up_y=up_y, + down_x=down_x, + down_y=down_y, + pad_x0=pad_x0, + pad_x1=pad_x1, + pad_y0=pad_y0, + pad_y1=pad_y1) + # out = out.view(major, out_h, out_w, minor) + out = out.view(-1, channel, out_h, out_w) + + return out + + @staticmethod + def backward(ctx, grad_output): + kernel, grad_kernel = ctx.saved_tensors + + grad_input = UpFirDn2dBackward.apply( + grad_output, + kernel, + grad_kernel, + ctx.up, + ctx.down, + ctx.pad, + ctx.g_pad, + ctx.in_size, + ctx.out_size, + ) + + return grad_input, None, None, None, None + + +def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): + """UpFRIDn for 2d features. + + UpFIRDn is short for upsample, apply FIR filter and downsample. More + details can be found in: + https://www.mathworks.com/help/signal/ref/upfirdn.html + + Args: + input (Tensor): Tensor with shape of (n, c, h, w). + kernel (Tensor): Filter kernel. + up (int | tuple[int], optional): Upsampling factor. If given a number, + we will use this factor for the both height and width side. + Defaults to 1. + down (int | tuple[int], optional): Downsampling factor. If given a + number, we will use this factor for the both height and width side. + Defaults to 1. + pad (tuple[int], optional): Padding for tensors, (x_pad, y_pad) or + (x_pad_0, x_pad_1, y_pad_0, y_pad_1). Defaults to (0, 0). + + Returns: + Tensor: Tensor after UpFIRDn. + """ + if input.device.type == 'cpu': + if len(pad) == 2: + pad = (pad[0], pad[1], pad[0], pad[1]) + + up = to_2tuple(up) + + down = to_2tuple(down) + + out = upfirdn2d_native(input, kernel, up[0], up[1], down[0], down[1], + pad[0], pad[1], pad[2], pad[3]) + else: + _up = to_2tuple(up) + + _down = to_2tuple(down) + + if len(pad) == 4: + _pad = pad + elif len(pad) == 2: + _pad = (pad[0], pad[1], pad[0], pad[1]) + + out = UpFirDn2d.apply(input, kernel, _up, _down, _pad) + + return out + + +def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, + pad_y0, pad_y1): + _, channel, in_h, in_w = input.shape + input = input.reshape(-1, in_h, in_w, 1) + + _, in_h, in_w, minor = input.shape + kernel_h, kernel_w = kernel.shape + + out = input.view(-1, in_h, 1, in_w, 1, minor) + out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) + out = out.view(-1, in_h * up_y, in_w * up_x, minor) + + out = F.pad( + out, + [0, 0, + max(pad_x0, 0), + max(pad_x1, 0), + max(pad_y0, 0), + max(pad_y1, 0)]) + out = out[:, + max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), + max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ] + + out = out.permute(0, 3, 1, 2) + out = out.reshape( + [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + out = out.permute(0, 2, 3, 1) + out = out[:, ::down_y, ::down_x, :] + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 + + return out.view(-1, channel, out_h, out_w) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/voxelize.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/voxelize.py new file mode 100644 index 0000000000000000000000000000000000000000..ca3226a4fbcbfe58490fa2ea8e1c16b531214121 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/ops/voxelize.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['dynamic_voxelize_forward', 'hard_voxelize_forward']) + + +class _Voxelization(Function): + + @staticmethod + def forward(ctx, + points, + voxel_size, + coors_range, + max_points=35, + max_voxels=20000): + """Convert kitti points(N, >=3) to voxels. + + Args: + points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points + and points[:, 3:] contain other information like reflectivity. + voxel_size (tuple or float): The size of voxel with the shape of + [3]. + coors_range (tuple or float): The coordinate range of voxel with + the shape of [6]. + max_points (int, optional): maximum points contained in a voxel. if + max_points=-1, it means using dynamic_voxelize. Default: 35. + max_voxels (int, optional): maximum voxels this function create. + for second, 20000 is a good choice. Users should shuffle points + before call this function because max_voxels may drop points. + Default: 20000. + + Returns: + voxels_out (torch.Tensor): Output voxels with the shape of [M, + max_points, ndim]. Only contain points and returned when + max_points != -1. + coors_out (torch.Tensor): Output coordinates with the shape of + [M, 3]. + num_points_per_voxel_out (torch.Tensor): Num points per voxel with + the shape of [M]. Only returned when max_points != -1. + """ + if max_points == -1 or max_voxels == -1: + coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int) + ext_module.dynamic_voxelize_forward(points, coors, voxel_size, + coors_range, 3) + return coors + else: + voxels = points.new_zeros( + size=(max_voxels, max_points, points.size(1))) + coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int) + num_points_per_voxel = points.new_zeros( + size=(max_voxels, ), dtype=torch.int) + voxel_num = ext_module.hard_voxelize_forward( + points, voxels, coors, num_points_per_voxel, voxel_size, + coors_range, max_points, max_voxels, 3) + # select the valid voxels + voxels_out = voxels[:voxel_num] + coors_out = coors[:voxel_num] + num_points_per_voxel_out = num_points_per_voxel[:voxel_num] + return voxels_out, coors_out, num_points_per_voxel_out + + +voxelization = _Voxelization.apply + + +class Voxelization(nn.Module): + """Convert kitti points(N, >=3) to voxels. + + Please refer to `PVCNN `_ for more + details. + + Args: + voxel_size (tuple or float): The size of voxel with the shape of [3]. + point_cloud_range (tuple or float): The coordinate range of voxel with + the shape of [6]. + max_num_points (int): maximum points contained in a voxel. if + max_points=-1, it means using dynamic_voxelize. + max_voxels (int, optional): maximum voxels this function create. + for second, 20000 is a good choice. Users should shuffle points + before call this function because max_voxels may drop points. + Default: 20000. + """ + + def __init__(self, + voxel_size, + point_cloud_range, + max_num_points, + max_voxels=20000): + super().__init__() + + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.max_num_points = max_num_points + if isinstance(max_voxels, tuple): + self.max_voxels = max_voxels + else: + self.max_voxels = _pair(max_voxels) + + point_cloud_range = torch.tensor( + point_cloud_range, dtype=torch.float32) + voxel_size = torch.tensor(voxel_size, dtype=torch.float32) + grid_size = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_size = torch.round(grid_size).long() + input_feat_shape = grid_size[:2] + self.grid_size = grid_size + # the origin shape is as [x-len, y-len, z-len] + # [w, h, d] -> [d, h, w] + self.pcd_shape = [*input_feat_shape, 1][::-1] + + def forward(self, input): + if self.training: + max_voxels = self.max_voxels[0] + else: + max_voxels = self.max_voxels[1] + + return voxelization(input, self.voxel_size, self.point_cloud_range, + self.max_num_points, max_voxels) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += 'voxel_size=' + str(self.voxel_size) + s += ', point_cloud_range=' + str(self.point_cloud_range) + s += ', max_num_points=' + str(self.max_num_points) + s += ', max_voxels=' + str(self.max_voxels) + s += ')' + return s diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ed2c17ad357742e423beeaf4d35db03fe9af469 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collate import collate +from .data_container import DataContainer +from .data_parallel import MMDataParallel +from .distributed import MMDistributedDataParallel +from .registry import MODULE_WRAPPERS +from .scatter_gather import scatter, scatter_kwargs +from .utils import is_module_wrapper + +__all__ = [ + 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', + 'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/_functions.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..9b5a8a44483ab991411d07122b22a1d027e4be8e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/_functions.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.nn.parallel._functions import _get_stream + + +def scatter(input, devices, streams=None): + """Scatters tensor across multiple GPUs.""" + if streams is None: + streams = [None] * len(devices) + + if isinstance(input, list): + chunk_size = (len(input) - 1) // len(devices) + 1 + outputs = [ + scatter(input[i], [devices[i // chunk_size]], + [streams[i // chunk_size]]) for i in range(len(input)) + ] + return outputs + elif isinstance(input, torch.Tensor): + output = input.contiguous() + # TODO: copy to a pinned buffer first (if copying from CPU) + stream = streams[0] if output.numel() > 0 else None + if devices != [-1]: + with torch.cuda.device(devices[0]), torch.cuda.stream(stream): + output = output.cuda(devices[0], non_blocking=True) + else: + # unsqueeze the first dimension thus the tensor's shape is the + # same as those scattered with GPU. + output = output.unsqueeze(0) + return output + else: + raise Exception(f'Unknown type {type(input)}.') + + +def synchronize_stream(output, devices, streams): + if isinstance(output, list): + chunk_size = len(output) // len(devices) + for i in range(len(devices)): + for j in range(chunk_size): + synchronize_stream(output[i * chunk_size + j], [devices[i]], + [streams[i]]) + elif isinstance(output, torch.Tensor): + if output.numel() != 0: + with torch.cuda.device(devices[0]): + main_stream = torch.cuda.current_stream() + main_stream.wait_stream(streams[0]) + output.record_stream(main_stream) + else: + raise Exception(f'Unknown type {type(output)}.') + + +def get_input_device(input): + if isinstance(input, list): + for item in input: + input_device = get_input_device(item) + if input_device != -1: + return input_device + return -1 + elif isinstance(input, torch.Tensor): + return input.get_device() if input.is_cuda else -1 + else: + raise Exception(f'Unknown type {type(input)}.') + + +class Scatter: + + @staticmethod + def forward(target_gpus, input): + input_device = get_input_device(input) + streams = None + if input_device == -1 and target_gpus != [-1]: + # Perform CPU to GPU copies in a background stream + streams = [_get_stream(device) for device in target_gpus] + + outputs = scatter(input, target_gpus, streams) + # Synchronize with the copy stream + if streams is not None: + synchronize_stream(outputs, target_gpus, streams) + + return tuple(outputs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/collate.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/collate.py new file mode 100644 index 0000000000000000000000000000000000000000..ad749197df21b0d74297548be5f66a696adebf7f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/collate.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Mapping, Sequence + +import torch +import torch.nn.functional as F +from torch.utils.data.dataloader import default_collate + +from .data_container import DataContainer + + +def collate(batch, samples_per_gpu=1): + """Puts each data field into a tensor/DataContainer with outer dimension + batch size. + + Extend default_collate to add support for + :type:`~mmcv.parallel.DataContainer`. There are 3 cases. + + 1. cpu_only = True, e.g., meta data + 2. cpu_only = False, stack = True, e.g., images tensors + 3. cpu_only = False, stack = False, e.g., gt bboxes + """ + + if not isinstance(batch, Sequence): + raise TypeError(f'{batch.dtype} is not supported.') + + if isinstance(batch[0], DataContainer): + stacked = [] + if batch[0].cpu_only: + for i in range(0, len(batch), samples_per_gpu): + stacked.append( + [sample.data for sample in batch[i:i + samples_per_gpu]]) + return DataContainer( + stacked, batch[0].stack, batch[0].padding_value, cpu_only=True) + elif batch[0].stack: + for i in range(0, len(batch), samples_per_gpu): + assert isinstance(batch[i].data, torch.Tensor) + + if batch[i].pad_dims is not None: + ndim = batch[i].dim() + assert ndim > batch[i].pad_dims + max_shape = [0 for _ in range(batch[i].pad_dims)] + for dim in range(1, batch[i].pad_dims + 1): + max_shape[dim - 1] = batch[i].size(-dim) + for sample in batch[i:i + samples_per_gpu]: + for dim in range(0, ndim - batch[i].pad_dims): + assert batch[i].size(dim) == sample.size(dim) + for dim in range(1, batch[i].pad_dims + 1): + max_shape[dim - 1] = max(max_shape[dim - 1], + sample.size(-dim)) + padded_samples = [] + for sample in batch[i:i + samples_per_gpu]: + pad = [0 for _ in range(batch[i].pad_dims * 2)] + for dim in range(1, batch[i].pad_dims + 1): + pad[2 * dim - + 1] = max_shape[dim - 1] - sample.size(-dim) + padded_samples.append( + F.pad( + sample.data, pad, value=sample.padding_value)) + stacked.append(default_collate(padded_samples)) + elif batch[i].pad_dims is None: + stacked.append( + default_collate([ + sample.data + for sample in batch[i:i + samples_per_gpu] + ])) + else: + raise ValueError( + 'pad_dims should be either None or integers (1-3)') + + else: + for i in range(0, len(batch), samples_per_gpu): + stacked.append( + [sample.data for sample in batch[i:i + samples_per_gpu]]) + return DataContainer(stacked, batch[0].stack, batch[0].padding_value) + elif isinstance(batch[0], Sequence): + transposed = zip(*batch) + return [collate(samples, samples_per_gpu) for samples in transposed] + elif isinstance(batch[0], Mapping): + return { + key: collate([d[key] for d in batch], samples_per_gpu) + for key in batch[0] + } + else: + return default_collate(batch) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/data_container.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/data_container.py new file mode 100644 index 0000000000000000000000000000000000000000..cedb0d32a51a1f575a622b38de2cee3ab4757821 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/data_container.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + +import torch + + +def assert_tensor_type(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not isinstance(args[0].data, torch.Tensor): + raise AttributeError( + f'{args[0].__class__.__name__} has no attribute ' + f'{func.__name__} for type {args[0].datatype}') + return func(*args, **kwargs) + + return wrapper + + +class DataContainer: + """A container for any type of objects. + + Typically tensors will be stacked in the collate function and sliced along + some dimension in the scatter function. This behavior has some limitations. + 1. All tensors have to be the same size. + 2. Types are limited (numpy array or Tensor). + + We design `DataContainer` and `MMDataParallel` to overcome these + limitations. The behavior can be either of the following. + + - copy to GPU, pad all tensors to the same size and stack them + - copy to GPU without stacking + - leave the objects as is and pass it to the model + - pad_dims specifies the number of last few dimensions to do padding + """ + + def __init__(self, + data, + stack=False, + padding_value=0, + cpu_only=False, + pad_dims=2): + self._data = data + self._cpu_only = cpu_only + self._stack = stack + self._padding_value = padding_value + assert pad_dims in [None, 1, 2, 3] + self._pad_dims = pad_dims + + def __repr__(self): + return f'{self.__class__.__name__}({repr(self.data)})' + + def __len__(self): + return len(self._data) + + @property + def data(self): + return self._data + + @property + def datatype(self): + if isinstance(self.data, torch.Tensor): + return self.data.type() + else: + return type(self.data) + + @property + def cpu_only(self): + return self._cpu_only + + @property + def stack(self): + return self._stack + + @property + def padding_value(self): + return self._padding_value + + @property + def pad_dims(self): + return self._pad_dims + + @assert_tensor_type + def size(self, *args, **kwargs): + return self.data.size(*args, **kwargs) + + @assert_tensor_type + def dim(self): + return self.data.dim() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/data_parallel.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..79b5f69b654cf647dc7ae9174223781ab5c607d2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/data_parallel.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import chain + +from torch.nn.parallel import DataParallel + +from .scatter_gather import scatter_kwargs + + +class MMDataParallel(DataParallel): + """The DataParallel module that supports DataContainer. + + MMDataParallel has two main differences with PyTorch DataParallel: + + - It supports a custom type :class:`DataContainer` which allows more + flexible control of input data during both GPU and CPU inference. + - It implement two more APIs ``train_step()`` and ``val_step()``. + + Args: + module (:class:`nn.Module`): Module to be encapsulated. + device_ids (list[int]): Device IDS of modules to be scattered to. + Defaults to None when GPU is not available. + output_device (str | int): Device ID for output. Defaults to None. + dim (int): Dimension used to scatter the data. Defaults to 0. + """ + + def __init__(self, *args, dim=0, **kwargs): + super(MMDataParallel, self).__init__(*args, dim=dim, **kwargs) + self.dim = dim + + def forward(self, *inputs, **kwargs): + """Override the original forward function. + + The main difference lies in the CPU inference where the data in + :class:`DataContainers` will still be gathered. + """ + if not self.device_ids: + # We add the following line thus the module could gather and + # convert data containers as those in GPU inference + inputs, kwargs = self.scatter(inputs, kwargs, [-1]) + return self.module(*inputs[0], **kwargs[0]) + else: + return super().forward(*inputs, **kwargs) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def train_step(self, *inputs, **kwargs): + if not self.device_ids: + # We add the following line thus the module could gather and + # convert data containers as those in GPU inference + inputs, kwargs = self.scatter(inputs, kwargs, [-1]) + return self.module.train_step(*inputs[0], **kwargs[0]) + + assert len(self.device_ids) == 1, \ + ('MMDataParallel only supports single GPU training, if you need to' + ' train with multiple GPUs, please use MMDistributedDataParallel' + 'instead.') + + for t in chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError( + 'module must have its parameters and buffers ' + f'on device {self.src_device_obj} (device_ids[0]) but ' + f'found one of them on device: {t.device}') + + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + return self.module.train_step(*inputs[0], **kwargs[0]) + + def val_step(self, *inputs, **kwargs): + if not self.device_ids: + # We add the following line thus the module could gather and + # convert data containers as those in GPU inference + inputs, kwargs = self.scatter(inputs, kwargs, [-1]) + return self.module.val_step(*inputs[0], **kwargs[0]) + + assert len(self.device_ids) == 1, \ + ('MMDataParallel only supports single GPU training, if you need to' + ' train with multiple GPUs, please use MMDistributedDataParallel' + ' instead.') + + for t in chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError( + 'module must have its parameters and buffers ' + f'on device {self.src_device_obj} (device_ids[0]) but ' + f'found one of them on device: {t.device}') + + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + return self.module.val_step(*inputs[0], **kwargs[0]) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/distributed.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..fa1bae90f8d4078f7c52bfc565f8349f1e5c8db0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/distributed.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.nn.parallel.distributed import (DistributedDataParallel, + _find_tensors) + +from custom_mmpkg.custom_mmcv import print_log +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, digit_version +from .scatter_gather import scatter_kwargs + + +class MMDistributedDataParallel(DistributedDataParallel): + """The DDP module that supports DataContainer. + + MMDDP has two main differences with PyTorch DDP: + + - It supports a custom type :class:`DataContainer` which allows more + flexible control of input data. + - It implement two APIs ``train_step()`` and ``val_step()``. + """ + + def to_kwargs(self, inputs, kwargs, device_id): + # Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8 + # to move all tensors to device_id + return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def train_step(self, *inputs, **kwargs): + """train_step() API for module wrapped by DistributedDataParallel. + + This method is basically the same as + ``DistributedDataParallel.forward()``, while replacing + ``self.module.forward()`` with ``self.module.train_step()``. + It is compatible with PyTorch 1.1 - 1.5. + """ + + # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the + # end of backward to the beginning of forward. + if ('parrots' not in TORCH_VERSION + and digit_version(TORCH_VERSION) >= digit_version('1.7') + and self.reducer._rebuild_buckets()): + print_log( + 'Reducer buckets have been rebuilt in this iteration.', + logger='mmcv') + + if getattr(self, 'require_forward_param_sync', True): + self._sync_params() + if self.device_ids: + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if len(self.device_ids) == 1: + output = self.module.train_step(*inputs[0], **kwargs[0]) + else: + outputs = self.parallel_apply( + self._module_copies[:len(inputs)], inputs, kwargs) + output = self.gather(outputs, self.output_device) + else: + output = self.module.train_step(*inputs, **kwargs) + + if torch.is_grad_enabled() and getattr( + self, 'require_backward_grad_sync', True): + if self.find_unused_parameters: + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + if ('parrots' not in TORCH_VERSION + and digit_version(TORCH_VERSION) > digit_version('1.2')): + self.require_forward_param_sync = False + return output + + def val_step(self, *inputs, **kwargs): + """val_step() API for module wrapped by DistributedDataParallel. + + This method is basically the same as + ``DistributedDataParallel.forward()``, while replacing + ``self.module.forward()`` with ``self.module.val_step()``. + It is compatible with PyTorch 1.1 - 1.5. + """ + # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the + # end of backward to the beginning of forward. + if ('parrots' not in TORCH_VERSION + and digit_version(TORCH_VERSION) >= digit_version('1.7') + and self.reducer._rebuild_buckets()): + print_log( + 'Reducer buckets have been rebuilt in this iteration.', + logger='mmcv') + + if getattr(self, 'require_forward_param_sync', True): + self._sync_params() + if self.device_ids: + inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) + if len(self.device_ids) == 1: + output = self.module.val_step(*inputs[0], **kwargs[0]) + else: + outputs = self.parallel_apply( + self._module_copies[:len(inputs)], inputs, kwargs) + output = self.gather(outputs, self.output_device) + else: + output = self.module.val_step(*inputs, **kwargs) + + if torch.is_grad_enabled() and getattr( + self, 'require_backward_grad_sync', True): + if self.find_unused_parameters: + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + if ('parrots' not in TORCH_VERSION + and digit_version(TORCH_VERSION) > digit_version('1.2')): + self.require_forward_param_sync = False + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/distributed_deprecated.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/distributed_deprecated.py new file mode 100644 index 0000000000000000000000000000000000000000..d31f7be0eb5b7f92c0d2fca6faca69152472ac27 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/distributed_deprecated.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.distributed as dist +import torch.nn as nn +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, digit_version +from .registry import MODULE_WRAPPERS +from .scatter_gather import scatter_kwargs + + +@MODULE_WRAPPERS.register_module() +class MMDistributedDataParallel(nn.Module): + + def __init__(self, + module, + dim=0, + broadcast_buffers=True, + bucket_cap_mb=25): + super(MMDistributedDataParallel, self).__init__() + self.module = module + self.dim = dim + self.broadcast_buffers = broadcast_buffers + + self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024 + self._sync_params() + + def _dist_broadcast_coalesced(self, tensors, buffer_size): + for tensors in _take_tensors(tensors, buffer_size): + flat_tensors = _flatten_dense_tensors(tensors) + dist.broadcast(flat_tensors, 0) + for tensor, synced in zip( + tensors, _unflatten_dense_tensors(flat_tensors, tensors)): + tensor.copy_(synced) + + def _sync_params(self): + module_states = list(self.module.state_dict().values()) + if len(module_states) > 0: + self._dist_broadcast_coalesced(module_states, + self.broadcast_bucket_size) + if self.broadcast_buffers: + if (TORCH_VERSION != 'parrots' + and digit_version(TORCH_VERSION) < digit_version('1.0')): + buffers = [b.data for b in self.module._all_buffers()] + else: + buffers = [b.data for b in self.module.buffers()] + if len(buffers) > 0: + self._dist_broadcast_coalesced(buffers, + self.broadcast_bucket_size) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def forward(self, *inputs, **kwargs): + inputs, kwargs = self.scatter(inputs, kwargs, + [torch.cuda.current_device()]) + return self.module(*inputs[0], **kwargs[0]) + + def train_step(self, *inputs, **kwargs): + inputs, kwargs = self.scatter(inputs, kwargs, + [torch.cuda.current_device()]) + output = self.module.train_step(*inputs[0], **kwargs[0]) + return output + + def val_step(self, *inputs, **kwargs): + inputs, kwargs = self.scatter(inputs, kwargs, + [torch.cuda.current_device()]) + output = self.module.val_step(*inputs[0], **kwargs[0]) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/registry.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0e9f6639628c444e4682d639eabbef76114d01 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/registry.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch.nn.parallel import DataParallel, DistributedDataParallel + +from custom_mmpkg.custom_mmcv.utils import Registry + +MODULE_WRAPPERS = Registry('module wrapper') +MODULE_WRAPPERS.register_module(module=DataParallel) +MODULE_WRAPPERS.register_module(module=DistributedDataParallel) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/scatter_gather.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/scatter_gather.py new file mode 100644 index 0000000000000000000000000000000000000000..900ff88566f8f14830590459dc4fd16d4b382e47 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/scatter_gather.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch.nn.parallel._functions import Scatter as OrigScatter + +from ._functions import Scatter +from .data_container import DataContainer + + +def scatter(inputs, target_gpus, dim=0): + """Scatter inputs to target gpus. + + The only difference from original :func:`scatter` is to add support for + :type:`~mmcv.parallel.DataContainer`. + """ + + def scatter_map(obj): + if isinstance(obj, torch.Tensor): + if target_gpus != [-1]: + return OrigScatter.apply(target_gpus, None, dim, obj) + else: + # for CPU inference we use self-implemented scatter + return Scatter.forward(target_gpus, obj) + if isinstance(obj, DataContainer): + if obj.cpu_only: + return obj.data + else: + return Scatter.forward(target_gpus, obj.data) + if isinstance(obj, tuple) and len(obj) > 0: + return list(zip(*map(scatter_map, obj))) + if isinstance(obj, list) and len(obj) > 0: + out = list(map(list, zip(*map(scatter_map, obj)))) + return out + if isinstance(obj, dict) and len(obj) > 0: + out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) + return out + return [obj for targets in target_gpus] + + # After scatter_map is called, a scatter_map cell will exist. This cell + # has a reference to the actual function scatter_map, which has references + # to a closure that has a reference to the scatter_map cell (because the + # fn is recursive). To avoid this reference cycle, we set the function to + # None, clearing the cell + try: + return scatter_map(inputs) + finally: + scatter_map = None + + +def scatter_kwargs(inputs, kwargs, target_gpus, dim=0): + """Scatter with support for kwargs dictionary.""" + inputs = scatter(inputs, target_gpus, dim) if inputs else [] + kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] + if len(inputs) < len(kwargs): + inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) + elif len(kwargs) < len(inputs): + kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) + inputs = tuple(inputs) + kwargs = tuple(kwargs) + return inputs, kwargs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0f5712cb42c38a2e8563bf563efb6681383cab9b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/parallel/utils.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .registry import MODULE_WRAPPERS + + +def is_module_wrapper(module): + """Check if a module is a module wrapper. + + The following 3 modules in MMCV (and their subclasses) are regarded as + module wrappers: DataParallel, DistributedDataParallel, + MMDistributedDataParallel (the deprecated version). You may add you own + module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS. + + Args: + module (nn.Module): The module to be checked. + + Returns: + bool: True if the input module is a module wrapper. + """ + module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values()) + return isinstance(module, module_wrappers) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52e4b48d383a84a055dcd7f6236f6e8e58eab924 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/__init__.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_module import BaseModule, ModuleList, Sequential +from .base_runner import BaseRunner +from .builder import RUNNERS, build_runner +from .checkpoint import (CheckpointLoader, _load_checkpoint, + _load_checkpoint_with_prefix, load_checkpoint, + load_state_dict, save_checkpoint, weights_to_cpu) +from .default_constructor import DefaultRunnerConstructor +from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info, + init_dist, master_only) +from .epoch_based_runner import EpochBasedRunner, Runner +from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model +from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook, + DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook, + Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, + GradientCumulativeOptimizerHook, Hook, IterTimerHook, + LoggerHook, LrUpdaterHook, MlflowLoggerHook, + NeptuneLoggerHook, OptimizerHook, PaviLoggerHook, + SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook, + WandbLoggerHook) +from .iter_based_runner import IterBasedRunner, IterLoader +from .log_buffer import LogBuffer +from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS, + DefaultOptimizerConstructor, build_optimizer, + build_optimizer_constructor) +from .priority import Priority, get_priority +from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed + +__all__ = [ + 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer', + 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', + 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook', + 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', + 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook', + 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict', + 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority', + 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict', + 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS', + 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer', + 'build_optimizer_constructor', 'IterLoader', 'set_random_seed', + 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook', + 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads', + 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule', + '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential', + 'ModuleList', 'GradientCumulativeOptimizerHook', + 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/base_module.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/base_module.py new file mode 100644 index 0000000000000000000000000000000000000000..362b0ae39a9e5e92b22f52918eaecc11dfde10b3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/base_module.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings +from abc import ABCMeta +from collections import defaultdict +from logging import FileHandler + +import torch.nn as nn + +from custom_mmpkg.custom_mmcv.runner.dist_utils import master_only +from custom_mmpkg.custom_mmcv.utils.logging import get_logger, logger_initialized, print_log + + +class BaseModule(nn.Module, metaclass=ABCMeta): + """Base module for all modules in openmmlab. + + ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional + functionality of parameter initialization. Compared with + ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. + + - ``init_cfg``: the config to control the initialization. + - ``init_weights``: The function of parameter + initialization and recording initialization + information. + - ``_params_init_info``: Used to track the parameter + initialization information. This attribute only + exists during executing the ``init_weights``. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, init_cfg=None): + """Initialize BaseModule, inherited from `torch.nn.Module`""" + + # NOTE init_cfg can be defined in different levels, but init_cfg + # in low levels has a higher priority. + + super(BaseModule, self).__init__() + # define default value of init_cfg instead of hard code + # in init_weights() function + self._is_init = False + + self.init_cfg = copy.deepcopy(init_cfg) + + # Backward compatibility in derived classes + # if pretrained is not None: + # warnings.warn('DeprecationWarning: pretrained is a deprecated \ + # key, please consider using init_cfg') + # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + @property + def is_init(self): + return self._is_init + + def init_weights(self): + """Initialize the weights.""" + + is_top_level_module = False + # check if it is top-level module + if not hasattr(self, '_params_init_info'): + # The `_params_init_info` is used to record the initialization + # information of the parameters + # the key should be the obj:`nn.Parameter` of model and the value + # should be a dict containing + # - init_info (str): The string that describes the initialization. + # - tmp_mean_value (FloatTensor): The mean of the parameter, + # which indicates whether the parameter has been modified. + # this attribute would be deleted after all parameters + # is initialized. + self._params_init_info = defaultdict(dict) + is_top_level_module = True + + # Initialize the `_params_init_info`, + # When detecting the `tmp_mean_value` of + # the corresponding parameter is changed, update related + # initialization information + for name, param in self.named_parameters(): + self._params_init_info[param][ + 'init_info'] = f'The value is the same before and ' \ + f'after calling `init_weights` ' \ + f'of {self.__class__.__name__} ' + self._params_init_info[param][ + 'tmp_mean_value'] = param.data.mean() + + # pass `params_init_info` to all submodules + # All submodules share the same `params_init_info`, + # so it will be updated when parameters are + # modified at any level of the model. + for sub_module in self.modules(): + sub_module._params_init_info = self._params_init_info + + # Get the initialized logger, if not exist, + # create a logger named `mmcv` + logger_names = list(logger_initialized.keys()) + logger_name = logger_names[0] if logger_names else 'mmcv' + + from ..cnn import initialize + from ..cnn.utils.weight_init import update_init_info + module_name = self.__class__.__name__ + if not self._is_init: + if self.init_cfg: + print_log( + f'initialize {module_name} with init_cfg {self.init_cfg}', + logger=logger_name) + initialize(self, self.init_cfg) + if isinstance(self.init_cfg, dict): + # prevent the parameters of + # the pre-trained model + # from being overwritten by + # the `init_weights` + if self.init_cfg['type'] == 'Pretrained': + return + + for m in self.children(): + if hasattr(m, 'init_weights'): + m.init_weights() + # users may overload the `init_weights` + update_init_info( + m, + init_info=f'Initialized by ' + f'user-defined `init_weights`' + f' in {m.__class__.__name__} ') + + self._is_init = True + else: + warnings.warn(f'init_weights of {self.__class__.__name__} has ' + f'been called more than once.') + + if is_top_level_module: + self._dump_init_info(logger_name) + + for sub_module in self.modules(): + del sub_module._params_init_info + + @master_only + def _dump_init_info(self, logger_name): + """Dump the initialization information to a file named + `initialization.log.json` in workdir. + + Args: + logger_name (str): The name of logger. + """ + + logger = get_logger(logger_name) + + with_file_handler = False + # dump the information to the logger file if there is a `FileHandler` + for handler in logger.handlers: + if isinstance(handler, FileHandler): + handler.stream.write( + 'Name of parameter - Initialization information\n') + for name, param in self.named_parameters(): + handler.stream.write( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n") + handler.stream.flush() + with_file_handler = True + if not with_file_handler: + for name, param in self.named_parameters(): + print_log( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n ", + logger=logger_name) + + def __repr__(self): + s = super().__repr__() + if self.init_cfg: + s += f'\ninit_cfg={self.init_cfg}' + return s + + +class Sequential(BaseModule, nn.Sequential): + """Sequential module in openmmlab. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, *args, init_cfg=None): + BaseModule.__init__(self, init_cfg) + nn.Sequential.__init__(self, *args) + + +class ModuleList(BaseModule, nn.ModuleList): + """ModuleList in openmmlab. + + Args: + modules (iterable, optional): an iterable of modules to add. + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, modules=None, init_cfg=None): + BaseModule.__init__(self, init_cfg) + nn.ModuleList.__init__(self, modules) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/base_runner.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/base_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..6f15d71940ae558c10fcd4372d0c87f1efde93a9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/base_runner.py @@ -0,0 +1,542 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import logging +import os.path as osp +import warnings +from abc import ABCMeta, abstractmethod + +import torch +from torch.optim import Optimizer + +import custom_mmpkg.custom_mmcv as mmcv +from ..parallel import is_module_wrapper +from .checkpoint import load_checkpoint +from .dist_utils import get_dist_info +from .hooks import HOOKS, Hook +from .log_buffer import LogBuffer +from .priority import Priority, get_priority +from .utils import get_time_str + + +class BaseRunner(metaclass=ABCMeta): + """The base class of Runner, a training helper for PyTorch. + + All subclasses should implement the following APIs: + + - ``run()`` + - ``train()`` + - ``val()`` + - ``save_checkpoint()`` + + Args: + model (:obj:`torch.nn.Module`): The model to be run. + batch_processor (callable): A callable method that process a data + batch. The interface of this method should be + `batch_processor(model, data, train_mode) -> dict` + optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an + optimizer (in most cases) or a dict of optimizers (in models that + requires more than one optimizer, e.g., GAN). + work_dir (str, optional): The working directory to save checkpoints + and logs. Defaults to None. + logger (:obj:`logging.Logger`): Logger used during training. + Defaults to None. (The default value is just for backward + compatibility) + meta (dict | None): A dict records some import information such as + environment info and seed, which will be logged in logger hook. + Defaults to None. + max_epochs (int, optional): Total training epochs. + max_iters (int, optional): Total training iterations. + """ + + def __init__(self, + model, + batch_processor=None, + optimizer=None, + work_dir=None, + logger=None, + meta=None, + max_iters=None, + max_epochs=None): + if batch_processor is not None: + if not callable(batch_processor): + raise TypeError('batch_processor must be callable, ' + f'but got {type(batch_processor)}') + warnings.warn('batch_processor is deprecated, please implement ' + 'train_step() and val_step() in the model instead.') + # raise an error is `batch_processor` is not None and + # `model.train_step()` exists. + if is_module_wrapper(model): + _model = model.module + else: + _model = model + if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'): + raise RuntimeError( + 'batch_processor and model.train_step()/model.val_step() ' + 'cannot be both available.') + else: + assert hasattr(model, 'train_step') + + # check the type of `optimizer` + if isinstance(optimizer, dict): + for name, optim in optimizer.items(): + if not isinstance(optim, Optimizer): + raise TypeError( + f'optimizer must be a dict of torch.optim.Optimizers, ' + f'but optimizer["{name}"] is a {type(optim)}') + elif not isinstance(optimizer, Optimizer) and optimizer is not None: + raise TypeError( + f'optimizer must be a torch.optim.Optimizer object ' + f'or dict or None, but got {type(optimizer)}') + + # check the type of `logger` + if not isinstance(logger, logging.Logger): + raise TypeError(f'logger must be a logging.Logger object, ' + f'but got {type(logger)}') + + # check the type of `meta` + if meta is not None and not isinstance(meta, dict): + raise TypeError( + f'meta must be a dict or None, but got {type(meta)}') + + self.model = model + self.batch_processor = batch_processor + self.optimizer = optimizer + self.logger = logger + self.meta = meta + # create work_dir + if mmcv.is_str(work_dir): + self.work_dir = osp.abspath(work_dir) + mmcv.mkdir_or_exist(self.work_dir) + elif work_dir is None: + self.work_dir = None + else: + raise TypeError('"work_dir" must be a str or None') + + # get model name from the model class + if hasattr(self.model, 'module'): + self._model_name = self.model.module.__class__.__name__ + else: + self._model_name = self.model.__class__.__name__ + + self._rank, self._world_size = get_dist_info() + self.timestamp = get_time_str() + self.mode = None + self._hooks = [] + self._epoch = 0 + self._iter = 0 + self._inner_iter = 0 + + if max_epochs is not None and max_iters is not None: + raise ValueError( + 'Only one of `max_epochs` or `max_iters` can be set.') + + self._max_epochs = max_epochs + self._max_iters = max_iters + # TODO: Redesign LogBuffer, it is not flexible and elegant enough + self.log_buffer = LogBuffer() + + @property + def model_name(self): + """str: Name of the model, usually the module class name.""" + return self._model_name + + @property + def rank(self): + """int: Rank of current process. (distributed training)""" + return self._rank + + @property + def world_size(self): + """int: Number of processes participating in the job. + (distributed training)""" + return self._world_size + + @property + def hooks(self): + """list[:obj:`Hook`]: A list of registered hooks.""" + return self._hooks + + @property + def epoch(self): + """int: Current epoch.""" + return self._epoch + + @property + def iter(self): + """int: Current iteration.""" + return self._iter + + @property + def inner_iter(self): + """int: Iteration in an epoch.""" + return self._inner_iter + + @property + def max_epochs(self): + """int: Maximum training epochs.""" + return self._max_epochs + + @property + def max_iters(self): + """int: Maximum training iterations.""" + return self._max_iters + + @abstractmethod + def train(self): + pass + + @abstractmethod + def val(self): + pass + + @abstractmethod + def run(self, data_loaders, workflow, **kwargs): + pass + + @abstractmethod + def save_checkpoint(self, + out_dir, + filename_tmpl, + save_optimizer=True, + meta=None, + create_symlink=True): + pass + + def current_lr(self): + """Get current learning rates. + + Returns: + list[float] | dict[str, list[float]]: Current learning rates of all + param groups. If the runner has a dict of optimizers, this + method will return a dict. + """ + if isinstance(self.optimizer, torch.optim.Optimizer): + lr = [group['lr'] for group in self.optimizer.param_groups] + elif isinstance(self.optimizer, dict): + lr = dict() + for name, optim in self.optimizer.items(): + lr[name] = [group['lr'] for group in optim.param_groups] + else: + raise RuntimeError( + 'lr is not applicable because optimizer does not exist.') + return lr + + def current_momentum(self): + """Get current momentums. + + Returns: + list[float] | dict[str, list[float]]: Current momentums of all + param groups. If the runner has a dict of optimizers, this + method will return a dict. + """ + + def _get_momentum(optimizer): + momentums = [] + for group in optimizer.param_groups: + if 'momentum' in group.keys(): + momentums.append(group['momentum']) + elif 'betas' in group.keys(): + momentums.append(group['betas'][0]) + else: + momentums.append(0) + return momentums + + if self.optimizer is None: + raise RuntimeError( + 'momentum is not applicable because optimizer does not exist.') + elif isinstance(self.optimizer, torch.optim.Optimizer): + momentums = _get_momentum(self.optimizer) + elif isinstance(self.optimizer, dict): + momentums = dict() + for name, optim in self.optimizer.items(): + momentums[name] = _get_momentum(optim) + return momentums + + def register_hook(self, hook, priority='NORMAL'): + """Register a hook into the hook list. + + The hook will be inserted into a priority queue, with the specified + priority (See :class:`Priority` for details of priorities). + For hooks with the same priority, they will be triggered in the same + order as they are registered. + + Args: + hook (:obj:`Hook`): The hook to be registered. + priority (int or str or :obj:`Priority`): Hook priority. + Lower value means higher priority. + """ + assert isinstance(hook, Hook) + if hasattr(hook, 'priority'): + raise ValueError('"priority" is a reserved attribute for hooks') + priority = get_priority(priority) + hook.priority = priority + # insert the hook to a sorted list + inserted = False + for i in range(len(self._hooks) - 1, -1, -1): + if priority >= self._hooks[i].priority: + self._hooks.insert(i + 1, hook) + inserted = True + break + if not inserted: + self._hooks.insert(0, hook) + + def register_hook_from_cfg(self, hook_cfg): + """Register a hook from its cfg. + + Args: + hook_cfg (dict): Hook config. It should have at least keys 'type' + and 'priority' indicating its type and priority. + + Notes: + The specific hook class to register should not use 'type' and + 'priority' arguments during initialization. + """ + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop('priority', 'NORMAL') + hook = mmcv.build_from_cfg(hook_cfg, HOOKS) + self.register_hook(hook, priority=priority) + + def call_hook(self, fn_name): + """Call all hooks. + + Args: + fn_name (str): The function name in each hook to be called, such as + "before_train_epoch". + """ + for hook in self._hooks: + getattr(hook, fn_name)(self) + + def get_hook_info(self): + # Get hooks info in each stage + stage_hook_map = {stage: [] for stage in Hook.stages} + for hook in self.hooks: + try: + priority = Priority(hook.priority).name + except ValueError: + priority = hook.priority + classname = hook.__class__.__name__ + hook_info = f'({priority:<12}) {classname:<35}' + for trigger_stage in hook.get_triggered_stages(): + stage_hook_map[trigger_stage].append(hook_info) + + stage_hook_infos = [] + for stage in Hook.stages: + hook_infos = stage_hook_map[stage] + if len(hook_infos) > 0: + info = f'{stage}:\n' + info += '\n'.join(hook_infos) + info += '\n -------------------- ' + stage_hook_infos.append(info) + return '\n'.join(stage_hook_infos) + + def load_checkpoint(self, + filename, + map_location='cpu', + strict=False, + revise_keys=[(r'^module.', '')]): + return load_checkpoint( + self.model, + filename, + map_location, + strict, + self.logger, + revise_keys=revise_keys) + + def resume(self, + checkpoint, + resume_optimizer=True, + map_location='default'): + if map_location == 'default': + if torch.cuda.is_available(): + device_id = torch.cuda.current_device() + checkpoint = self.load_checkpoint( + checkpoint, + map_location=lambda storage, loc: storage.cuda(device_id)) + else: + checkpoint = self.load_checkpoint(checkpoint) + else: + checkpoint = self.load_checkpoint( + checkpoint, map_location=map_location) + + self._epoch = checkpoint['meta']['epoch'] + self._iter = checkpoint['meta']['iter'] + if self.meta is None: + self.meta = {} + self.meta.setdefault('hook_msgs', {}) + # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages + self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {})) + + # Re-calculate the number of iterations when resuming + # models with different number of GPUs + if 'config' in checkpoint['meta']: + config = mmcv.Config.fromstring( + checkpoint['meta']['config'], file_format='.py') + previous_gpu_ids = config.get('gpu_ids', None) + if previous_gpu_ids and len(previous_gpu_ids) > 0 and len( + previous_gpu_ids) != self.world_size: + self._iter = int(self._iter * len(previous_gpu_ids) / + self.world_size) + self.logger.info('the iteration number is changed due to ' + 'change of GPU number') + + # resume meta information meta + self.meta = checkpoint['meta'] + + if 'optimizer' in checkpoint and resume_optimizer: + if isinstance(self.optimizer, Optimizer): + self.optimizer.load_state_dict(checkpoint['optimizer']) + elif isinstance(self.optimizer, dict): + for k in self.optimizer.keys(): + self.optimizer[k].load_state_dict( + checkpoint['optimizer'][k]) + else: + raise TypeError( + 'Optimizer should be dict or torch.optim.Optimizer ' + f'but got {type(self.optimizer)}') + + self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) + + def register_lr_hook(self, lr_config): + if lr_config is None: + return + elif isinstance(lr_config, dict): + assert 'policy' in lr_config + policy_type = lr_config.pop('policy') + # If the type of policy is all in lower case, e.g., 'cyclic', + # then its first letter will be capitalized, e.g., to be 'Cyclic'. + # This is for the convenient usage of Lr updater. + # Since this is not applicable for ` + # CosineAnnealingLrUpdater`, + # the string will not be changed if it contains capital letters. + if policy_type == policy_type.lower(): + policy_type = policy_type.title() + hook_type = policy_type + 'LrUpdaterHook' + lr_config['type'] = hook_type + hook = mmcv.build_from_cfg(lr_config, HOOKS) + else: + hook = lr_config + self.register_hook(hook, priority='VERY_HIGH') + + def register_momentum_hook(self, momentum_config): + if momentum_config is None: + return + if isinstance(momentum_config, dict): + assert 'policy' in momentum_config + policy_type = momentum_config.pop('policy') + # If the type of policy is all in lower case, e.g., 'cyclic', + # then its first letter will be capitalized, e.g., to be 'Cyclic'. + # This is for the convenient usage of momentum updater. + # Since this is not applicable for + # `CosineAnnealingMomentumUpdater`, + # the string will not be changed if it contains capital letters. + if policy_type == policy_type.lower(): + policy_type = policy_type.title() + hook_type = policy_type + 'MomentumUpdaterHook' + momentum_config['type'] = hook_type + hook = mmcv.build_from_cfg(momentum_config, HOOKS) + else: + hook = momentum_config + self.register_hook(hook, priority='HIGH') + + def register_optimizer_hook(self, optimizer_config): + if optimizer_config is None: + return + if isinstance(optimizer_config, dict): + optimizer_config.setdefault('type', 'OptimizerHook') + hook = mmcv.build_from_cfg(optimizer_config, HOOKS) + else: + hook = optimizer_config + self.register_hook(hook, priority='ABOVE_NORMAL') + + def register_checkpoint_hook(self, checkpoint_config): + if checkpoint_config is None: + return + if isinstance(checkpoint_config, dict): + checkpoint_config.setdefault('type', 'CheckpointHook') + hook = mmcv.build_from_cfg(checkpoint_config, HOOKS) + else: + hook = checkpoint_config + self.register_hook(hook, priority='NORMAL') + + def register_logger_hooks(self, log_config): + if log_config is None: + return + log_interval = log_config['interval'] + for info in log_config['hooks']: + logger_hook = mmcv.build_from_cfg( + info, HOOKS, default_args=dict(interval=log_interval)) + self.register_hook(logger_hook, priority='VERY_LOW') + + def register_timer_hook(self, timer_config): + if timer_config is None: + return + if isinstance(timer_config, dict): + timer_config_ = copy.deepcopy(timer_config) + hook = mmcv.build_from_cfg(timer_config_, HOOKS) + else: + hook = timer_config + self.register_hook(hook, priority='LOW') + + def register_custom_hooks(self, custom_config): + if custom_config is None: + return + + if not isinstance(custom_config, list): + custom_config = [custom_config] + + for item in custom_config: + if isinstance(item, dict): + self.register_hook_from_cfg(item) + else: + self.register_hook(item, priority='NORMAL') + + def register_profiler_hook(self, profiler_config): + if profiler_config is None: + return + if isinstance(profiler_config, dict): + profiler_config.setdefault('type', 'ProfilerHook') + hook = mmcv.build_from_cfg(profiler_config, HOOKS) + else: + hook = profiler_config + self.register_hook(hook) + + def register_training_hooks(self, + lr_config, + optimizer_config=None, + checkpoint_config=None, + log_config=None, + momentum_config=None, + timer_config=dict(type='IterTimerHook'), + custom_hooks_config=None): + """Register default and custom hooks for training. + + Default and custom hooks include: + + +----------------------+-------------------------+ + | Hooks | Priority | + +======================+=========================+ + | LrUpdaterHook | VERY_HIGH (10) | + +----------------------+-------------------------+ + | MomentumUpdaterHook | HIGH (30) | + +----------------------+-------------------------+ + | OptimizerStepperHook | ABOVE_NORMAL (40) | + +----------------------+-------------------------+ + | CheckpointSaverHook | NORMAL (50) | + +----------------------+-------------------------+ + | IterTimerHook | LOW (70) | + +----------------------+-------------------------+ + | LoggerHook(s) | VERY_LOW (90) | + +----------------------+-------------------------+ + | CustomHook(s) | defaults to NORMAL (50) | + +----------------------+-------------------------+ + + If custom hooks have same priority with default hooks, custom hooks + will be triggered after default hooks. + """ + self.register_lr_hook(lr_config) + self.register_momentum_hook(momentum_config) + self.register_optimizer_hook(optimizer_config) + self.register_checkpoint_hook(checkpoint_config) + self.register_timer_hook(timer_config) + self.register_logger_hooks(log_config) + self.register_custom_hooks(custom_hooks_config) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..77c96ba0b2f30ead9da23f293c5dc84dd3e4a74f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/builder.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +from ..utils import Registry + +RUNNERS = Registry('runner') +RUNNER_BUILDERS = Registry('runner builder') + + +def build_runner_constructor(cfg): + return RUNNER_BUILDERS.build(cfg) + + +def build_runner(cfg, default_args=None): + runner_cfg = copy.deepcopy(cfg) + constructor_type = runner_cfg.pop('constructor', + 'DefaultRunnerConstructor') + runner_constructor = build_runner_constructor( + dict( + type=constructor_type, + runner_cfg=runner_cfg, + default_args=default_args)) + runner = runner_constructor() + return runner diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/checkpoint.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..da1481088ceb805007b3f1a7cad8bd528d5853f6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/checkpoint.py @@ -0,0 +1,707 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import io +import os +import os.path as osp +import pkgutil +import re +import time +import warnings +from collections import OrderedDict +from importlib import import_module +from tempfile import TemporaryDirectory + +import torch +import torchvision +from torch.optim import Optimizer +from torch.utils import model_zoo + +import custom_mmpkg.custom_mmcv as mmcv +from ..fileio import FileClient +from ..fileio import load as load_file +from ..parallel import is_module_wrapper +from ..utils import mkdir_or_exist +from .dist_utils import get_dist_info + +ENV_MMCV_HOME = 'MMCV_HOME' +ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' +DEFAULT_CACHE_DIR = '~/.cache' + + +def _get_mmcv_home(): + mmcv_home = os.path.expanduser( + os.getenv( + ENV_MMCV_HOME, + os.path.join( + os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) + + mkdir_or_exist(mmcv_home) + return mmcv_home + + +def load_state_dict(module, state_dict, strict=False, logger=None): + """Load state_dict to a module. + + This method is modified from :meth:`torch.nn.Module.load_state_dict`. + Default value for ``strict`` is set to ``False`` and the message for + param mismatch will be shown even if strict is False. + + Args: + module (Module): Module that receives the state_dict. + state_dict (OrderedDict): Weights. + strict (bool): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. + logger (:obj:`logging.Logger`, optional): Logger to log the error + message. If not specified, print function will be used. + """ + unexpected_keys = [] + all_missing_keys = [] + err_msg = [] + + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + # use _load_from_state_dict to enable checkpoint version control + def load(module, prefix=''): + # recursively check parallel module in case that the model has a + # complicated structure, e.g., nn.Module(nn.Module(DDP)) + if is_module_wrapper(module): + module = module.module + local_metadata = {} if metadata is None else metadata.get( + prefix[:-1], {}) + module._load_from_state_dict(state_dict, prefix, local_metadata, True, + all_missing_keys, unexpected_keys, + err_msg) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(module) + load = None # break load->load reference cycle + + # ignore "num_batches_tracked" of BN layers + missing_keys = [ + key for key in all_missing_keys if 'num_batches_tracked' not in key + ] + + if unexpected_keys: + err_msg.append('unexpected key in source ' + f'state_dict: {", ".join(unexpected_keys)}\n') + if missing_keys: + err_msg.append( + f'missing keys in source state_dict: {", ".join(missing_keys)}\n') + + rank, _ = get_dist_info() + if len(err_msg) > 0 and rank == 0: + err_msg.insert( + 0, 'The model and loaded state dict do not match exactly\n') + err_msg = '\n'.join(err_msg) + if strict: + raise RuntimeError(err_msg) + elif logger is not None: + logger.warning(err_msg) + else: + print(err_msg) + + +def get_torchvision_models(): + model_urls = dict() + for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): + if ispkg: + continue + _zoo = import_module(f'torchvision.models.{name}') + if hasattr(_zoo, 'model_urls'): + _urls = getattr(_zoo, 'model_urls') + model_urls.update(_urls) + return model_urls + + +def get_external_models(): + mmcv_home = _get_mmcv_home() + default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') + default_urls = load_file(default_json_path) + assert isinstance(default_urls, dict) + external_json_path = osp.join(mmcv_home, 'open_mmlab.json') + if osp.exists(external_json_path): + external_urls = load_file(external_json_path) + assert isinstance(external_urls, dict) + default_urls.update(external_urls) + + return default_urls + + +def get_mmcls_models(): + mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') + mmcls_urls = load_file(mmcls_json_path) + + return mmcls_urls + + +def get_deprecated_model_names(): + deprecate_json_path = osp.join(mmcv.__path__[0], + 'model_zoo/deprecated.json') + deprecate_urls = load_file(deprecate_json_path) + assert isinstance(deprecate_urls, dict) + + return deprecate_urls + + +def _process_mmcls_checkpoint(checkpoint): + state_dict = checkpoint['state_dict'] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if k.startswith('backbone.'): + new_state_dict[k[9:]] = v + new_checkpoint = dict(state_dict=new_state_dict) + + return new_checkpoint + + +class CheckpointLoader: + """A general checkpoint loader to manage all schemes.""" + + _schemes = {} + + @classmethod + def _register_scheme(cls, prefixes, loader, force=False): + if isinstance(prefixes, str): + prefixes = [prefixes] + else: + assert isinstance(prefixes, (list, tuple)) + for prefix in prefixes: + if (prefix not in cls._schemes) or force: + cls._schemes[prefix] = loader + else: + raise KeyError( + f'{prefix} is already registered as a loader backend, ' + 'add "force=True" if you want to override it') + # sort, longer prefixes take priority + cls._schemes = OrderedDict( + sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True)) + + @classmethod + def register_scheme(cls, prefixes, loader=None, force=False): + """Register a loader to CheckpointLoader. + + This method can be used as a normal class method or a decorator. + + Args: + prefixes (str or list[str] or tuple[str]): + The prefix of the registered loader. + loader (function, optional): The loader function to be registered. + When this method is used as a decorator, loader is None. + Defaults to None. + force (bool, optional): Whether to override the loader + if the prefix has already been registered. Defaults to False. + """ + + if loader is not None: + cls._register_scheme(prefixes, loader, force=force) + return + + def _register(loader_cls): + cls._register_scheme(prefixes, loader_cls, force=force) + return loader_cls + + return _register + + @classmethod + def _get_checkpoint_loader(cls, path): + """Finds a loader that supports the given path. Falls back to the local + loader if no other loader is found. + + Args: + path (str): checkpoint path + + Returns: + loader (function): checkpoint loader + """ + + for p in cls._schemes: + if path.startswith(p): + return cls._schemes[p] + + @classmethod + def load_checkpoint(cls, filename, map_location=None, logger=None): + """load checkpoint through URL scheme path. + + Args: + filename (str): checkpoint file name with given prefix + map_location (str, optional): Same as :func:`torch.load`. + Default: None + logger (:mod:`logging.Logger`, optional): The logger for message. + Default: None + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + + checkpoint_loader = cls._get_checkpoint_loader(filename) + class_name = checkpoint_loader.__name__ + mmcv.print_log( + f'load checkpoint from {class_name[10:]} path: {filename}', logger) + return checkpoint_loader(filename, map_location) + + +@CheckpointLoader.register_scheme(prefixes='') +def load_from_local(filename, map_location): + """load checkpoint by local file path. + + Args: + filename (str): local checkpoint file path + map_location (str, optional): Same as :func:`torch.load`. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + + if not osp.isfile(filename): + raise IOError(f'{filename} is not a checkpoint file') + checkpoint = torch.load(filename, map_location=map_location) + return checkpoint + + +@CheckpointLoader.register_scheme(prefixes=('http://', 'https://')) +def load_from_http(filename, map_location=None, model_dir=None): + """load checkpoint through HTTP or HTTPS scheme path. In distributed + setting, this function only download checkpoint at local rank 0. + + Args: + filename (str): checkpoint file path with modelzoo or + torchvision prefix + map_location (str, optional): Same as :func:`torch.load`. + model_dir (string, optional): directory in which to save the object, + Default: None + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + rank, world_size = get_dist_info() + rank = int(os.environ.get('LOCAL_RANK', rank)) + if rank == 0: + checkpoint = model_zoo.load_url( + filename, model_dir=model_dir, map_location=map_location) + if world_size > 1: + torch.distributed.barrier() + if rank > 0: + checkpoint = model_zoo.load_url( + filename, model_dir=model_dir, map_location=map_location) + return checkpoint + + +@CheckpointLoader.register_scheme(prefixes='pavi://') +def load_from_pavi(filename, map_location=None): + """load checkpoint through the file path prefixed with pavi. In distributed + setting, this function download ckpt at all ranks to different temporary + directories. + + Args: + filename (str): checkpoint file path with pavi prefix + map_location (str, optional): Same as :func:`torch.load`. + Default: None + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + assert filename.startswith('pavi://'), \ + f'Expected filename startswith `pavi://`, but get {filename}' + model_path = filename[7:] + + try: + from pavi import modelcloud + except ImportError: + raise ImportError( + 'Please install pavi to load checkpoint from modelcloud.') + + model = modelcloud.get(model_path) + with TemporaryDirectory() as tmp_dir: + downloaded_file = osp.join(tmp_dir, model.name) + model.download(downloaded_file) + checkpoint = torch.load(downloaded_file, map_location=map_location) + return checkpoint + + +@CheckpointLoader.register_scheme(prefixes='s3://') +def load_from_ceph(filename, map_location=None, backend='petrel'): + """load checkpoint through the file path prefixed with s3. In distributed + setting, this function download ckpt at all ranks to different temporary + directories. + + Args: + filename (str): checkpoint file path with s3 prefix + map_location (str, optional): Same as :func:`torch.load`. + backend (str, optional): The storage backend type. Options are 'ceph', + 'petrel'. Default: 'petrel'. + + .. warning:: + :class:`mmcv.fileio.file_client.CephBackend` will be deprecated, + please use :class:`mmcv.fileio.file_client.PetrelBackend` instead. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + allowed_backends = ['ceph', 'petrel'] + if backend not in allowed_backends: + raise ValueError(f'Load from Backend {backend} is not supported.') + + if backend == 'ceph': + warnings.warn( + 'CephBackend will be deprecated, please use PetrelBackend instead') + + # CephClient and PetrelBackend have the same prefix 's3://' and the latter + # will be chosen as default. If PetrelBackend can not be instantiated + # successfully, the CephClient will be chosen. + try: + file_client = FileClient(backend=backend) + except ImportError: + allowed_backends.remove(backend) + file_client = FileClient(backend=allowed_backends[0]) + + with io.BytesIO(file_client.get(filename)) as buffer: + checkpoint = torch.load(buffer, map_location=map_location) + return checkpoint + + +@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://')) +def load_from_torchvision(filename, map_location=None): + """load checkpoint through the file path prefixed with modelzoo or + torchvision. + + Args: + filename (str): checkpoint file path with modelzoo or + torchvision prefix + map_location (str, optional): Same as :func:`torch.load`. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + model_urls = get_torchvision_models() + if filename.startswith('modelzoo://'): + warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' + 'use "torchvision://" instead') + model_name = filename[11:] + else: + model_name = filename[14:] + return load_from_http(model_urls[model_name], map_location=map_location) + + +@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://')) +def load_from_openmmlab(filename, map_location=None): + """load checkpoint through the file path prefixed with open-mmlab or + openmmlab. + + Args: + filename (str): checkpoint file path with open-mmlab or + openmmlab prefix + map_location (str, optional): Same as :func:`torch.load`. + Default: None + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + + model_urls = get_external_models() + prefix_str = 'open-mmlab://' + if filename.startswith(prefix_str): + model_name = filename[13:] + else: + model_name = filename[12:] + prefix_str = 'openmmlab://' + + deprecated_urls = get_deprecated_model_names() + if model_name in deprecated_urls: + warnings.warn(f'{prefix_str}{model_name} is deprecated in favor ' + f'of {prefix_str}{deprecated_urls[model_name]}') + model_name = deprecated_urls[model_name] + model_url = model_urls[model_name] + # check if is url + if model_url.startswith(('http://', 'https://')): + checkpoint = load_from_http(model_url, map_location=map_location) + else: + filename = osp.join(_get_mmcv_home(), model_url) + if not osp.isfile(filename): + raise IOError(f'{filename} is not a checkpoint file') + checkpoint = torch.load(filename, map_location=map_location) + return checkpoint + + +@CheckpointLoader.register_scheme(prefixes='mmcls://') +def load_from_mmcls(filename, map_location=None): + """load checkpoint through the file path prefixed with mmcls. + + Args: + filename (str): checkpoint file path with mmcls prefix + map_location (str, optional): Same as :func:`torch.load`. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + + model_urls = get_mmcls_models() + model_name = filename[8:] + checkpoint = load_from_http( + model_urls[model_name], map_location=map_location) + checkpoint = _process_mmcls_checkpoint(checkpoint) + return checkpoint + + +def _load_checkpoint(filename, map_location=None, logger=None): + """Load checkpoint from somewhere (modelzoo, file, url). + + Args: + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for + details. + map_location (str, optional): Same as :func:`torch.load`. + Default: None. + logger (:mod:`logging.Logger`, optional): The logger for error message. + Default: None + + Returns: + dict or OrderedDict: The loaded checkpoint. It can be either an + OrderedDict storing model weights or a dict containing other + information, which depends on the checkpoint. + """ + return CheckpointLoader.load_checkpoint(filename, map_location, logger) + + +def _load_checkpoint_with_prefix(prefix, filename, map_location=None): + """Load partial pretrained model with specific prefix. + + Args: + prefix (str): The prefix of sub-module. + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for + details. + map_location (str | None): Same as :func:`torch.load`. Default: None. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + + checkpoint = _load_checkpoint(filename, map_location=map_location) + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + if not prefix.endswith('.'): + prefix += '.' + prefix_len = len(prefix) + + state_dict = { + k[prefix_len:]: v + for k, v in state_dict.items() if k.startswith(prefix) + } + + assert state_dict, f'{prefix} is not in the pretrained model' + return state_dict + + +def load_checkpoint(model, + filename, + map_location=None, + strict=False, + logger=None, + revise_keys=[(r'^module\.', '')]): + """Load checkpoint from a file or URI. + + Args: + model (Module): Module to load checkpoint. + filename (str): Accept local filepath, URL, ``torchvision://xxx``, + ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for + details. + map_location (str): Same as :func:`torch.load`. + strict (bool): Whether to allow different params for the model and + checkpoint. + logger (:mod:`logging.Logger` or None): The logger for error message. + revise_keys (list): A list of customized keywords to modify the + state_dict in checkpoint. Each item is a (pattern, replacement) + pair of the regular expression operations. Default: strip + the prefix 'module.' by [(r'^module\\.', '')]. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + checkpoint = _load_checkpoint(filename, map_location, logger) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + # strip prefix of state_dict + metadata = getattr(state_dict, '_metadata', OrderedDict()) + for p, r in revise_keys: + state_dict = OrderedDict( + {re.sub(p, r, k): v + for k, v in state_dict.items()}) + # Keep metadata in state_dict + state_dict._metadata = metadata + + # load state_dict + load_state_dict(model, state_dict, strict, logger) + return checkpoint + + +def weights_to_cpu(state_dict): + """Copy a model state_dict to cpu. + + Args: + state_dict (OrderedDict): Model weights on GPU. + + Returns: + OrderedDict: Model weights on GPU. + """ + state_dict_cpu = OrderedDict() + for key, val in state_dict.items(): + state_dict_cpu[key] = val.cpu() + # Keep metadata in state_dict + state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict()) + return state_dict_cpu + + +def _save_to_state_dict(module, destination, prefix, keep_vars): + """Saves module state to `destination` dictionary. + + This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. + + Args: + module (nn.Module): The module to generate state_dict. + destination (dict): A dict where state will be stored. + prefix (str): The prefix for parameters and buffers used in this + module. + """ + for name, param in module._parameters.items(): + if param is not None: + destination[prefix + name] = param if keep_vars else param.detach() + for name, buf in module._buffers.items(): + # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d + if buf is not None: + destination[prefix + name] = buf if keep_vars else buf.detach() + + +def get_state_dict(module, destination=None, prefix='', keep_vars=False): + """Returns a dictionary containing a whole state of the module. + + Both parameters and persistent buffers (e.g. running averages) are + included. Keys are corresponding parameter and buffer names. + + This method is modified from :meth:`torch.nn.Module.state_dict` to + recursively check parallel module in case that the model has a complicated + structure, e.g., nn.Module(nn.Module(DDP)). + + Args: + module (nn.Module): The module to generate state_dict. + destination (OrderedDict): Returned dict for the state of the + module. + prefix (str): Prefix of the key. + keep_vars (bool): Whether to keep the variable property of the + parameters. Default: False. + + Returns: + dict: A dictionary containing a whole state of the module. + """ + # recursively check parallel module in case that the model has a + # complicated structure, e.g., nn.Module(nn.Module(DDP)) + if is_module_wrapper(module): + module = module.module + + # below is the same as torch.nn.Module.state_dict() + if destination is None: + destination = OrderedDict() + destination._metadata = OrderedDict() + destination._metadata[prefix[:-1]] = local_metadata = dict( + version=module._version) + _save_to_state_dict(module, destination, prefix, keep_vars) + for name, child in module._modules.items(): + if child is not None: + get_state_dict( + child, destination, prefix + name + '.', keep_vars=keep_vars) + for hook in module._state_dict_hooks.values(): + hook_result = hook(module, destination, prefix, local_metadata) + if hook_result is not None: + destination = hook_result + return destination + + +def save_checkpoint(model, + filename, + optimizer=None, + meta=None, + file_client_args=None): + """Save checkpoint to file. + + The checkpoint will have 3 fields: ``meta``, ``state_dict`` and + ``optimizer``. By default ``meta`` will contain version and time info. + + Args: + model (Module): Module whose params are to be saved. + filename (str): Checkpoint filename. + optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. + meta (dict, optional): Metadata to be saved in checkpoint. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + `New in version 1.3.16.` + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError(f'meta must be a dict or None, but got {type(meta)}') + meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) + + if is_module_wrapper(model): + model = model.module + + if hasattr(model, 'CLASSES') and model.CLASSES is not None: + # save class name to the meta + meta.update(CLASSES=model.CLASSES) + + checkpoint = { + 'meta': meta, + 'state_dict': weights_to_cpu(get_state_dict(model)) + } + # save optimizer state dict in the checkpoint + if isinstance(optimizer, Optimizer): + checkpoint['optimizer'] = optimizer.state_dict() + elif isinstance(optimizer, dict): + checkpoint['optimizer'] = {} + for name, optim in optimizer.items(): + checkpoint['optimizer'][name] = optim.state_dict() + + if filename.startswith('pavi://'): + if file_client_args is not None: + raise ValueError( + 'file_client_args should be "None" if filename starts with' + f'"pavi://", but got {file_client_args}') + try: + from pavi import modelcloud + from pavi import exception + except ImportError: + raise ImportError( + 'Please install pavi to load checkpoint from modelcloud.') + model_path = filename[7:] + root = modelcloud.Folder() + model_dir, model_name = osp.split(model_path) + try: + model = modelcloud.get(model_dir) + except exception.NodeNotFoundError: + model = root.create_training_model(model_dir) + with TemporaryDirectory() as tmp_dir: + checkpoint_file = osp.join(tmp_dir, model_name) + with open(checkpoint_file, 'wb') as f: + torch.save(checkpoint, f) + f.flush() + model.create_file(checkpoint_file, name=model_name) + else: + file_client = FileClient.infer_client(file_client_args, filename) + with io.BytesIO() as f: + torch.save(checkpoint, f) + file_client.put(f.getvalue(), filename) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/default_constructor.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/default_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..fed2e0c83e19133ce3873ea092c1a872ca254bbf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/default_constructor.py @@ -0,0 +1,44 @@ +from .builder import RUNNER_BUILDERS, RUNNERS + + +@RUNNER_BUILDERS.register_module() +class DefaultRunnerConstructor: + """Default constructor for runners. + + Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`. + For example, We can inject some new properties and functions for `Runner`. + + Example: + >>> from custom_mmpkg.custom_mmcv.runner import RUNNER_BUILDERS, build_runner + >>> # Define a new RunnerReconstructor + >>> @RUNNER_BUILDERS.register_module() + >>> class MyRunnerConstructor: + ... def __init__(self, runner_cfg, default_args=None): + ... if not isinstance(runner_cfg, dict): + ... raise TypeError('runner_cfg should be a dict', + ... f'but got {type(runner_cfg)}') + ... self.runner_cfg = runner_cfg + ... self.default_args = default_args + ... + ... def __call__(self): + ... runner = RUNNERS.build(self.runner_cfg, + ... default_args=self.default_args) + ... # Add new properties for existing runner + ... runner.my_name = 'my_runner' + ... runner.my_function = lambda self: print(self.my_name) + ... ... + >>> # build your runner + >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40, + ... constructor='MyRunnerConstructor') + >>> runner = build_runner(runner_cfg) + """ + + def __init__(self, runner_cfg, default_args=None): + if not isinstance(runner_cfg, dict): + raise TypeError('runner_cfg should be a dict', + f'but got {type(runner_cfg)}') + self.runner_cfg = runner_cfg + self.default_args = default_args + + def __call__(self): + return RUNNERS.build(self.runner_cfg, default_args=self.default_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/dist_utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d3a1ef3fda5ceeb31bf15a73779da1b1903ab0fe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/dist_utils.py @@ -0,0 +1,164 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +import os +import subprocess +from collections import OrderedDict + +import torch +import torch.multiprocessing as mp +from torch import distributed as dist +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + + +def init_dist(launcher, backend='nccl', **kwargs): + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + if launcher == 'pytorch': + _init_dist_pytorch(backend, **kwargs) + elif launcher == 'mpi': + _init_dist_mpi(backend, **kwargs) + elif launcher == 'slurm': + _init_dist_slurm(backend, **kwargs) + else: + raise ValueError(f'Invalid launcher type: {launcher}') + + +def _init_dist_pytorch(backend, **kwargs): + # TODO: use local_rank instead of rank % num_gpus + rank = int(os.environ['RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_mpi(backend, **kwargs): + # TODO: use local_rank instead of rank % num_gpus + rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_slurm(backend, port=None): + """Initialize slurm distributed training environment. + + If argument ``port`` is not specified, then the master port will be system + environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system + environment variable, then a default port ``29500`` will be used. + + Args: + backend (str): Backend of torch.distributed. + port (int, optional): Master port. Defaults to None. + """ + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(proc_id % num_gpus) + addr = subprocess.getoutput( + f'scontrol show hostname {node_list} | head -n1') + # specify master port + if port is not None: + os.environ['MASTER_PORT'] = str(port) + elif 'MASTER_PORT' in os.environ: + pass # use MASTER_PORT in the environment variable + else: + # 29500 is torch.distributed default port + os.environ['MASTER_PORT'] = '29500' + # use MASTER_ADDR in the environment variable if it already exists + if 'MASTER_ADDR' not in os.environ: + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) + os.environ['RANK'] = str(proc_id) + dist.init_process_group(backend=backend) + + +def get_dist_info(): + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + +def allreduce_params(params, coalesce=True, bucket_size_mb=-1): + """Allreduce parameters. + + Args: + params (list[torch.Parameters]): List of parameters or buffers of a + model. + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + _, world_size = get_dist_info() + if world_size == 1: + return + params = [param.data for param in params] + if coalesce: + _allreduce_coalesced(params, world_size, bucket_size_mb) + else: + for tensor in params: + dist.all_reduce(tensor.div_(world_size)) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + """Allreduce gradients. + + Args: + params (list[torch.Parameters]): List of parameters of a model + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + _, world_size = get_dist_info() + if world_size == 1: + return + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/epoch_based_runner.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/epoch_based_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..46b96618daec0941513cc0188edfa45e4c42dfe2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/epoch_based_runner.py @@ -0,0 +1,187 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import platform +import shutil +import time +import warnings + +import torch + +import custom_mmpkg.custom_mmcv as mmcv +from .base_runner import BaseRunner +from .builder import RUNNERS +from .checkpoint import save_checkpoint +from .utils import get_host_info + + +@RUNNERS.register_module() +class EpochBasedRunner(BaseRunner): + """Epoch-based Runner. + + This runner train models epoch by epoch. + """ + + def run_iter(self, data_batch, train_mode, **kwargs): + if self.batch_processor is not None: + outputs = self.batch_processor( + self.model, data_batch, train_mode=train_mode, **kwargs) + elif train_mode: + outputs = self.model.train_step(data_batch, self.optimizer, + **kwargs) + else: + outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('"batch_processor()" or "model.train_step()"' + 'and "model.val_step()" must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + + def train(self, data_loader, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._max_iters = self._max_epochs * len(self.data_loader) + self.call_hook('before_train_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self._inner_iter = i + self.call_hook('before_train_iter') + self.run_iter(data_batch, train_mode=True, **kwargs) + self.call_hook('after_train_iter') + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + + @torch.no_grad() + def val(self, data_loader, **kwargs): + self.model.eval() + self.mode = 'val' + self.data_loader = data_loader + self.call_hook('before_val_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self._inner_iter = i + self.call_hook('before_val_iter') + self.run_iter(data_batch, train_mode=False) + self.call_hook('after_val_iter') + + self.call_hook('after_val_epoch') + + def run(self, data_loaders, workflow, max_epochs=None, **kwargs): + """Start running. + + Args: + data_loaders (list[:obj:`DataLoader`]): Dataloaders for training + and validation. + workflow (list[tuple]): A list of (phase, epochs) to specify the + running order and epochs. E.g, [('train', 2), ('val', 1)] means + running 2 epochs for training and 1 epoch for validation, + iteratively. + """ + assert isinstance(data_loaders, list) + assert mmcv.is_list_of(workflow, tuple) + assert len(data_loaders) == len(workflow) + if max_epochs is not None: + warnings.warn( + 'setting max_epochs in run is deprecated, ' + 'please set max_epochs in runner_config', DeprecationWarning) + self._max_epochs = max_epochs + + assert self._max_epochs is not None, ( + 'max_epochs must be specified during instantiation') + + for i, flow in enumerate(workflow): + mode, epochs = flow + if mode == 'train': + self._max_iters = self._max_epochs * len(data_loaders[i]) + break + + work_dir = self.work_dir if self.work_dir is not None else 'NONE' + self.logger.info('Start running, host: %s, work_dir: %s', + get_host_info(), work_dir) + self.logger.info('Hooks will be executed in the following order:\n%s', + self.get_hook_info()) + self.logger.info('workflow: %s, max: %d epochs', workflow, + self._max_epochs) + self.call_hook('before_run') + + while self.epoch < self._max_epochs: + for i, flow in enumerate(workflow): + mode, epochs = flow + if isinstance(mode, str): # self.train() + if not hasattr(self, mode): + raise ValueError( + f'runner has no method named "{mode}" to run an ' + 'epoch') + epoch_runner = getattr(self, mode) + else: + raise TypeError( + 'mode in workflow must be a str, but got {}'.format( + type(mode))) + + for _ in range(epochs): + if mode == 'train' and self.epoch >= self._max_epochs: + break + epoch_runner(data_loaders[i], **kwargs) + + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_run') + + def save_checkpoint(self, + out_dir, + filename_tmpl='epoch_{}.pth', + save_optimizer=True, + meta=None, + create_symlink=True): + """Save the checkpoint. + + Args: + out_dir (str): The directory that checkpoints are saved. + filename_tmpl (str, optional): The checkpoint filename template, + which contains a placeholder for the epoch number. + Defaults to 'epoch_{}.pth'. + save_optimizer (bool, optional): Whether to save the optimizer to + the checkpoint. Defaults to True. + meta (dict, optional): The meta information to be saved in the + checkpoint. Defaults to None. + create_symlink (bool, optional): Whether to create a symlink + "latest.pth" to point to the latest checkpoint. + Defaults to True. + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError( + f'meta should be a dict or None, but got {type(meta)}') + if self.meta is not None: + meta.update(self.meta) + # Note: meta.update(self.meta) should be done before + # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise + # there will be problems with resumed checkpoints. + # More details in https://github.com/open-mmlab/mmcv/pull/1108 + meta.update(epoch=self.epoch + 1, iter=self.iter) + + filename = filename_tmpl.format(self.epoch + 1) + filepath = osp.join(out_dir, filename) + optimizer = self.optimizer if save_optimizer else None + save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) + # in some environments, `os.symlink` is not supported, you may need to + # set `create_symlink` to False + if create_symlink: + dst_file = osp.join(out_dir, 'latest.pth') + if platform.system() != 'Windows': + mmcv.symlink(filename, dst_file) + else: + shutil.copy(filepath, dst_file) + + +@RUNNERS.register_module() +class Runner(EpochBasedRunner): + """Deprecated name of EpochBasedRunner.""" + + def __init__(self, *args, **kwargs): + warnings.warn( + 'Runner was deprecated, please use EpochBasedRunner instead') + super().__init__(*args, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/fp16_utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/fp16_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..752e1b23fb971a56f72ea6dfee36166670221e93 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/fp16_utils.py @@ -0,0 +1,410 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +import warnings +from collections import abc +from inspect import getfullargspec + +import numpy as np +import torch +import torch.nn as nn + +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, digit_version +from .dist_utils import allreduce_grads as _allreduce_grads + +try: + # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported + # and used; otherwise, auto fp16 will adopt mmcv's implementation. + # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 + # manually, so the behavior may not be consistent with real amp. + from torch.cuda.amp import autocast +except ImportError: + pass + + +def cast_tensor_type(inputs, src_type, dst_type): + """Recursively convert Tensor in inputs from src_type to dst_type. + + Args: + inputs: Inputs that to be casted. + src_type (torch.dtype): Source type.. + dst_type (torch.dtype): Destination type. + + Returns: + The same type with inputs, but all contained Tensors have been cast. + """ + if isinstance(inputs, nn.Module): + return inputs + elif isinstance(inputs, torch.Tensor): + return inputs.to(dst_type) + elif isinstance(inputs, str): + return inputs + elif isinstance(inputs, np.ndarray): + return inputs + elif isinstance(inputs, abc.Mapping): + return type(inputs)({ + k: cast_tensor_type(v, src_type, dst_type) + for k, v in inputs.items() + }) + elif isinstance(inputs, abc.Iterable): + return type(inputs)( + cast_tensor_type(item, src_type, dst_type) for item in inputs) + else: + return inputs + + +def auto_fp16(apply_to=None, out_fp32=False): + """Decorator to enable fp16 training automatically. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If inputs arguments are fp32 tensors, they will + be converted to fp16 automatically. Arguments other than fp32 tensors are + ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the + backend, otherwise, original mmcv implementation will be adopted. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp32 (bool): Whether to convert the output back to fp32. + + Example: + + >>> import torch.nn as nn + >>> class MyModule1(nn.Module): + >>> + >>> # Convert x and y to fp16 + >>> @auto_fp16() + >>> def forward(self, x, y): + >>> pass + + >>> import torch.nn as nn + >>> class MyModule2(nn.Module): + >>> + >>> # convert pred to fp16 + >>> @auto_fp16(apply_to=('pred', )) + >>> def do_something(self, pred, others): + >>> pass + """ + + def auto_fp16_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@auto_fp16 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + # NOTE: default args are not taken into consideration + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.float, torch.half)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = {} + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.float, torch.half) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + if (TORCH_VERSION != 'parrots' and + digit_version(TORCH_VERSION) >= digit_version('1.6.0')): + with autocast(enabled=True): + output = old_func(*new_args, **new_kwargs) + else: + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp32: + output = cast_tensor_type(output, torch.half, torch.float) + return output + + return new_func + + return auto_fp16_wrapper + + +def force_fp32(apply_to=None, out_fp16=False): + """Decorator to convert input arguments to fp32 in force. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If there are some inputs that must be processed + in fp32 mode, then this decorator can handle it. If inputs arguments are + fp16 tensors, they will be converted to fp32 automatically. Arguments other + than fp16 tensors are ignored. If you are using PyTorch >= 1.6, + torch.cuda.amp is used as the backend, otherwise, original mmcv + implementation will be adopted. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp16 (bool): Whether to convert the output back to fp16. + + Example: + + >>> import torch.nn as nn + >>> class MyModule1(nn.Module): + >>> + >>> # Convert x and y to fp32 + >>> @force_fp32() + >>> def loss(self, x, y): + >>> pass + + >>> import torch.nn as nn + >>> class MyModule2(nn.Module): + >>> + >>> # convert pred to fp32 + >>> @force_fp32(apply_to=('pred', )) + >>> def post_process(self, pred, others): + >>> pass + """ + + def force_fp32_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@force_fp32 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.half, torch.float)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = dict() + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.half, torch.float) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + if (TORCH_VERSION != 'parrots' and + digit_version(TORCH_VERSION) >= digit_version('1.6.0')): + with autocast(enabled=False): + output = old_func(*new_args, **new_kwargs) + else: + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp16: + output = cast_tensor_type(output, torch.float, torch.half) + return output + + return new_func + + return force_fp32_wrapper + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + warnings.warning( + '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be ' + 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads') + _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) + + +def wrap_fp16_model(model): + """Wrap the FP32 model to FP16. + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the + backend, otherwise, original mmcv implementation will be adopted. + + For PyTorch >= 1.6, this function will + 1. Set fp16 flag inside the model to True. + + Otherwise: + 1. Convert FP32 model to FP16. + 2. Remain some necessary layers to be FP32, e.g., normalization layers. + 3. Set `fp16_enabled` flag inside the model to True. + + Args: + model (nn.Module): Model in FP32. + """ + if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.6.0')): + # convert model to fp16 + model.half() + # patch the normalization layers to make it work in fp32 mode + patch_norm_fp32(model) + # set `fp16_enabled` flag + for m in model.modules(): + if hasattr(m, 'fp16_enabled'): + m.fp16_enabled = True + + +def patch_norm_fp32(module): + """Recursively convert normalization layers from FP16 to FP32. + + Args: + module (nn.Module): The modules to be converted in FP16. + + Returns: + nn.Module: The converted module, the normalization layers have been + converted to FP32. + """ + if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): + module.float() + if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': + module.forward = patch_forward_method(module.forward, torch.half, + torch.float) + for child in module.children(): + patch_norm_fp32(child) + return module + + +def patch_forward_method(func, src_type, dst_type, convert_output=True): + """Patch the forward method of a module. + + Args: + func (callable): The original forward method. + src_type (torch.dtype): Type of input arguments to be converted from. + dst_type (torch.dtype): Type of input arguments to be converted to. + convert_output (bool): Whether to convert the output back to src_type. + + Returns: + callable: The patched forward method. + """ + + def new_forward(*args, **kwargs): + output = func(*cast_tensor_type(args, src_type, dst_type), + **cast_tensor_type(kwargs, src_type, dst_type)) + if convert_output: + output = cast_tensor_type(output, dst_type, src_type) + return output + + return new_forward + + +class LossScaler: + """Class that manages loss scaling in mixed precision training which + supports both dynamic or static mode. + + The implementation refers to + https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. + Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. + It's important to understand how :class:`LossScaler` operates. + Loss scaling is designed to combat the problem of underflowing + gradients encountered at long times when training fp16 networks. + Dynamic loss scaling begins by attempting a very high loss + scale. Ironically, this may result in OVERflowing gradients. + If overflowing gradients are encountered, :class:`FP16_Optimizer` then + skips the update step for this particular iteration/minibatch, + and :class:`LossScaler` adjusts the loss scale to a lower value. + If a certain number of iterations occur without overflowing gradients + detected,:class:`LossScaler` increases the loss scale once more. + In this way :class:`LossScaler` attempts to "ride the edge" of always + using the highest loss scale possible without incurring overflow. + + Args: + init_scale (float): Initial loss scale value, default: 2**32. + scale_factor (float): Factor used when adjusting the loss scale. + Default: 2. + mode (str): Loss scaling mode. 'dynamic' or 'static' + scale_window (int): Number of consecutive iterations without an + overflow to wait before increasing the loss scale. Default: 1000. + """ + + def __init__(self, + init_scale=2**32, + mode='dynamic', + scale_factor=2., + scale_window=1000): + self.cur_scale = init_scale + self.cur_iter = 0 + assert mode in ('dynamic', + 'static'), 'mode can only be dynamic or static' + self.mode = mode + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + + def has_overflow(self, params): + """Check if params contain overflow.""" + if self.mode != 'dynamic': + return False + for p in params: + if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): + return True + return False + + def _has_inf_or_nan(x): + """Check if params contain NaN.""" + try: + cpu_sum = float(x.float().sum()) + except RuntimeError as instance: + if 'value cannot be converted' not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') \ + or cpu_sum != cpu_sum: + return True + return False + + def update_scale(self, overflow): + """update the current loss scale value when overflow happens.""" + if self.mode != 'dynamic': + return + if overflow: + self.cur_scale = max(self.cur_scale / self.scale_factor, 1) + self.last_overflow_iter = self.cur_iter + else: + if (self.cur_iter - self.last_overflow_iter) % \ + self.scale_window == 0: + self.cur_scale *= self.scale_factor + self.cur_iter += 1 + + def state_dict(self): + """Returns the state of the scaler as a :class:`dict`.""" + return dict( + cur_scale=self.cur_scale, + cur_iter=self.cur_iter, + mode=self.mode, + last_overflow_iter=self.last_overflow_iter, + scale_factor=self.scale_factor, + scale_window=self.scale_window) + + def load_state_dict(self, state_dict): + """Loads the loss_scaler state dict. + + Args: + state_dict (dict): scaler state. + """ + self.cur_scale = state_dict['cur_scale'] + self.cur_iter = state_dict['cur_iter'] + self.mode = state_dict['mode'] + self.last_overflow_iter = state_dict['last_overflow_iter'] + self.scale_factor = state_dict['scale_factor'] + self.scale_window = state_dict['scale_window'] + + @property + def loss_scale(self): + return self.cur_scale diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..915af28cefab14a14c1188ed861161080fd138a3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .checkpoint import CheckpointHook +from .closure import ClosureHook +from .ema import EMAHook +from .evaluation import DistEvalHook, EvalHook +from .hook import HOOKS, Hook +from .iter_timer import IterTimerHook +from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook, + NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook, + TextLoggerHook, WandbLoggerHook) +from .lr_updater import LrUpdaterHook +from .memory import EmptyCacheHook +from .momentum_updater import MomentumUpdaterHook +from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook, + GradientCumulativeOptimizerHook, OptimizerHook) +from .profiler import ProfilerHook +from .sampler_seed import DistSamplerSeedHook +from .sync_buffer import SyncBuffersHook + +__all__ = [ + 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', + 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook', + 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook', + 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook', + 'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook', + 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook', + 'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook', + 'GradientCumulativeFp16OptimizerHook' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/checkpoint.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..0a1b688bcbd9877423ba3930a81093464aed34f6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/checkpoint.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +from custom_mmpkg.custom_mmcv.fileio import FileClient +from ..dist_utils import allreduce_params, master_only +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class CheckpointHook(Hook): + """Save checkpoints periodically. + + Args: + interval (int): The saving period. If ``by_epoch=True``, interval + indicates epochs, otherwise it indicates iterations. + Default: -1, which means "never". + by_epoch (bool): Saving checkpoints by epoch or by iteration. + Default: True. + save_optimizer (bool): Whether to save optimizer state_dict in the + checkpoint. It is usually used for resuming experiments. + Default: True. + out_dir (str, optional): The root directory to save checkpoints. If not + specified, ``runner.work_dir`` will be used by default. If + specified, the ``out_dir`` will be the concatenation of ``out_dir`` + and the last level directory of ``runner.work_dir``. + `Changed in version 1.3.16.` + max_keep_ckpts (int, optional): The maximum checkpoints to keep. + In some cases we want only the latest few checkpoints and would + like to delete old ones to save the disk space. + Default: -1, which means unlimited. + save_last (bool, optional): Whether to force the last checkpoint to be + saved regardless of interval. Default: True. + sync_buffer (bool, optional): Whether to synchronize buffers in + different gpus. Default: False. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + `New in version 1.3.16.` + + .. warning:: + Before v1.3.16, the ``out_dir`` argument indicates the path where the + checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the + root directory and the final path to save checkpoint is the + concatenation of ``out_dir`` and the last level directory of + ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A" + and the value of ``runner.work_dir`` is "/path/of/B", then the final + path will be "/path/of/A/B". + """ + + def __init__(self, + interval=-1, + by_epoch=True, + save_optimizer=True, + out_dir=None, + max_keep_ckpts=-1, + save_last=True, + sync_buffer=False, + file_client_args=None, + **kwargs): + self.interval = interval + self.by_epoch = by_epoch + self.save_optimizer = save_optimizer + self.out_dir = out_dir + self.max_keep_ckpts = max_keep_ckpts + self.save_last = save_last + self.args = kwargs + self.sync_buffer = sync_buffer + self.file_client_args = file_client_args + + def before_run(self, runner): + if not self.out_dir: + self.out_dir = runner.work_dir + + self.file_client = FileClient.infer_client(self.file_client_args, + self.out_dir) + + # if `self.out_dir` is not equal to `runner.work_dir`, it means that + # `self.out_dir` is set so the final `self.out_dir` is the + # concatenation of `self.out_dir` and the last level directory of + # `runner.work_dir` + if self.out_dir != runner.work_dir: + basename = osp.basename(runner.work_dir.rstrip(osp.sep)) + self.out_dir = self.file_client.join_path(self.out_dir, basename) + + runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by ' + f'{self.file_client.name}.')) + + # disable the create_symlink option because some file backends do not + # allow to create a symlink + if 'create_symlink' in self.args: + if self.args[ + 'create_symlink'] and not self.file_client.allow_symlink: + self.args['create_symlink'] = False + warnings.warn( + ('create_symlink is set as True by the user but is changed' + 'to be False because creating symbolic link is not ' + f'allowed in {self.file_client.name}')) + else: + self.args['create_symlink'] = self.file_client.allow_symlink + + def after_train_epoch(self, runner): + if not self.by_epoch: + return + + # save checkpoint for following cases: + # 1. every ``self.interval`` epochs + # 2. reach the last epoch of training + if self.every_n_epochs( + runner, self.interval) or (self.save_last + and self.is_last_epoch(runner)): + runner.logger.info( + f'Saving checkpoint at {runner.epoch + 1} epochs') + if self.sync_buffer: + allreduce_params(runner.model.buffers()) + self._save_checkpoint(runner) + + @master_only + def _save_checkpoint(self, runner): + """Save the current checkpoint and delete unwanted checkpoint.""" + runner.save_checkpoint( + self.out_dir, save_optimizer=self.save_optimizer, **self.args) + if runner.meta is not None: + if self.by_epoch: + cur_ckpt_filename = self.args.get( + 'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1) + else: + cur_ckpt_filename = self.args.get( + 'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1) + runner.meta.setdefault('hook_msgs', dict()) + runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path( + self.out_dir, cur_ckpt_filename) + # remove other checkpoints + if self.max_keep_ckpts > 0: + if self.by_epoch: + name = 'epoch_{}.pth' + current_ckpt = runner.epoch + 1 + else: + name = 'iter_{}.pth' + current_ckpt = runner.iter + 1 + redundant_ckpts = range( + current_ckpt - self.max_keep_ckpts * self.interval, 0, + -self.interval) + filename_tmpl = self.args.get('filename_tmpl', name) + for _step in redundant_ckpts: + ckpt_path = self.file_client.join_path( + self.out_dir, filename_tmpl.format(_step)) + if self.file_client.isfile(ckpt_path): + self.file_client.remove(ckpt_path) + else: + break + + def after_train_iter(self, runner): + if self.by_epoch: + return + + # save checkpoint for following cases: + # 1. every ``self.interval`` iterations + # 2. reach the last iteration of training + if self.every_n_iters( + runner, self.interval) or (self.save_last + and self.is_last_iter(runner)): + runner.logger.info( + f'Saving checkpoint at {runner.iter + 1} iterations') + if self.sync_buffer: + allreduce_params(runner.model.buffers()) + self._save_checkpoint(runner) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/closure.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/closure.py new file mode 100644 index 0000000000000000000000000000000000000000..b955f81f425be4ac3e6bb3f4aac653887989e872 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/closure.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class ClosureHook(Hook): + + def __init__(self, fn_name, fn): + assert hasattr(self, fn_name) + assert callable(fn) + setattr(self, fn_name, fn) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/ema.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/ema.py new file mode 100644 index 0000000000000000000000000000000000000000..15c7e68088f019802a59e7ae41cc1fe0c7f28f96 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/ema.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ...parallel import is_module_wrapper +from ..hooks.hook import HOOKS, Hook + + +@HOOKS.register_module() +class EMAHook(Hook): + r"""Exponential Moving Average Hook. + + Use Exponential Moving Average on all parameters of model in training + process. All parameters have a ema backup, which update by the formula + as below. EMAHook takes priority over EvalHook and CheckpointSaverHook. + + .. math:: + + \text{Xema\_{t+1}} = (1 - \text{momentum}) \times + \text{Xema\_{t}} + \text{momentum} \times X_t + + Args: + momentum (float): The momentum used for updating ema parameter. + Defaults to 0.0002. + interval (int): Update ema parameter every interval iteration. + Defaults to 1. + warm_up (int): During first warm_up steps, we may use smaller momentum + to update ema parameters more slowly. Defaults to 100. + resume_from (str): The checkpoint path. Defaults to None. + """ + + def __init__(self, + momentum=0.0002, + interval=1, + warm_up=100, + resume_from=None): + assert isinstance(interval, int) and interval > 0 + self.warm_up = warm_up + self.interval = interval + assert momentum > 0 and momentum < 1 + self.momentum = momentum**interval + self.checkpoint = resume_from + + def before_run(self, runner): + """To resume model with it's ema parameters more friendly. + + Register ema parameter as ``named_buffer`` to model + """ + model = runner.model + if is_module_wrapper(model): + model = model.module + self.param_ema_buffer = {} + self.model_parameters = dict(model.named_parameters(recurse=True)) + for name, value in self.model_parameters.items(): + # "." is not allowed in module's buffer name + buffer_name = f"ema_{name.replace('.', '_')}" + self.param_ema_buffer[name] = buffer_name + model.register_buffer(buffer_name, value.data.clone()) + self.model_buffers = dict(model.named_buffers(recurse=True)) + if self.checkpoint is not None: + runner.resume(self.checkpoint) + + def after_train_iter(self, runner): + """Update ema parameter every self.interval iterations.""" + curr_step = runner.iter + # We warm up the momentum considering the instability at beginning + momentum = min(self.momentum, + (1 + curr_step) / (self.warm_up + curr_step)) + if curr_step % self.interval != 0: + return + for name, parameter in self.model_parameters.items(): + buffer_name = self.param_ema_buffer[name] + buffer_parameter = self.model_buffers[buffer_name] + buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data) + + def after_train_epoch(self, runner): + """We load parameter values from ema backup to model before the + EvalHook.""" + self._swap_ema_parameters() + + def before_train_epoch(self, runner): + """We recover model's parameter from ema backup after last epoch's + EvalHook.""" + self._swap_ema_parameters() + + def _swap_ema_parameters(self): + """Swap the parameter of model with parameter in ema_buffer.""" + for name, value in self.model_parameters.items(): + temp = value.data.clone() + ema_buffer = self.model_buffers[self.param_ema_buffer[name]] + value.data.copy_(ema_buffer.data) + ema_buffer.data.copy_(temp) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..1d76699d3d2d297539cdd49e1fe0626c379ec26f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/evaluation.py @@ -0,0 +1,509 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from math import inf + +import torch.distributed as dist +from torch.nn.modules.batchnorm import _BatchNorm +from torch.utils.data import DataLoader + +from custom_mmpkg.custom_mmcv.fileio import FileClient +from custom_mmpkg.custom_mmcv.utils import is_seq_of +from .hook import Hook +from .logger import LoggerHook + + +class EvalHook(Hook): + """Non-Distributed evaluation hook. + + This hook will regularly perform evaluation in a given interval when + performing in non-distributed environment. + + Args: + dataloader (DataLoader): A PyTorch dataloader, whose dataset has + implemented ``evaluate`` function. + start (int | None, optional): Evaluation starting epoch. It enables + evaluation before the training starts if ``start`` <= the resuming + epoch. If None, whether to evaluate is merely decided by + ``interval``. Default: None. + interval (int): Evaluation interval. Default: 1. + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + Default: True. + save_best (str, optional): If a metric is specified, it would measure + the best checkpoint during evaluation. The information about best + checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep + best score value and best checkpoint path, which will be also + loaded when resume checkpoint. Options are the evaluation metrics + on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox + detection and instance segmentation. ``AR@100`` for proposal + recall. If ``save_best`` is ``auto``, the first key of the returned + ``OrderedDict`` result will be used. Default: None. + rule (str | None, optional): Comparison rule for best score. If set to + None, it will infer a reasonable rule. Keys such as 'acc', 'top' + .etc will be inferred by 'greater' rule. Keys contain 'loss' will + be inferred by 'less' rule. Options are 'greater', 'less', None. + Default: None. + test_fn (callable, optional): test a model with samples from a + dataloader, and return the test results. If ``None``, the default + test function ``mmcv.engine.single_gpu_test`` will be used. + (default: ``None``) + greater_keys (List[str] | None, optional): Metric keys that will be + inferred by 'greater' comparison rule. If ``None``, + _default_greater_keys will be used. (default: ``None``) + less_keys (List[str] | None, optional): Metric keys that will be + inferred by 'less' comparison rule. If ``None``, _default_less_keys + will be used. (default: ``None``) + out_dir (str, optional): The root directory to save checkpoints. If not + specified, `runner.work_dir` will be used by default. If specified, + the `out_dir` will be the concatenation of `out_dir` and the last + level directory of `runner.work_dir`. + `New in version 1.3.16.` + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. Default: None. + `New in version 1.3.16.` + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + + Notes: + If new arguments are added for EvalHook, tools/test.py, + tools/eval_metric.py may be affected. + """ + + # Since the key for determine greater or less is related to the downstream + # tasks, downstream repos may need to overwrite the following inner + # variable accordingly. + + rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y} + init_value_map = {'greater': -inf, 'less': inf} + _default_greater_keys = [ + 'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU', + 'mAcc', 'aAcc' + ] + _default_less_keys = ['loss'] + + def __init__(self, + dataloader, + start=None, + interval=1, + by_epoch=True, + save_best=None, + rule=None, + test_fn=None, + greater_keys=None, + less_keys=None, + out_dir=None, + file_client_args=None, + **eval_kwargs): + if not isinstance(dataloader, DataLoader): + raise TypeError(f'dataloader must be a pytorch DataLoader, ' + f'but got {type(dataloader)}') + + if interval <= 0: + raise ValueError(f'interval must be a positive number, ' + f'but got {interval}') + + assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean' + + if start is not None and start < 0: + raise ValueError(f'The evaluation start epoch {start} is smaller ' + f'than 0') + + self.dataloader = dataloader + self.interval = interval + self.start = start + self.by_epoch = by_epoch + + assert isinstance(save_best, str) or save_best is None, \ + '""save_best"" should be a str or None ' \ + f'rather than {type(save_best)}' + self.save_best = save_best + self.eval_kwargs = eval_kwargs + self.initial_flag = True + + if test_fn is None: + from custom_mmpkg.custom_mmcv.engine import single_gpu_test + self.test_fn = single_gpu_test + else: + self.test_fn = test_fn + + if greater_keys is None: + self.greater_keys = self._default_greater_keys + else: + if not isinstance(greater_keys, (list, tuple)): + greater_keys = (greater_keys, ) + assert is_seq_of(greater_keys, str) + self.greater_keys = greater_keys + + if less_keys is None: + self.less_keys = self._default_less_keys + else: + if not isinstance(less_keys, (list, tuple)): + less_keys = (less_keys, ) + assert is_seq_of(less_keys, str) + self.less_keys = less_keys + + if self.save_best is not None: + self.best_ckpt_path = None + self._init_rule(rule, self.save_best) + + self.out_dir = out_dir + self.file_client_args = file_client_args + + def _init_rule(self, rule, key_indicator): + """Initialize rule, key_indicator, comparison_func, and best score. + + Here is the rule to determine which rule is used for key indicator + when the rule is not specific (note that the key indicator matching + is case-insensitive): + 1. If the key indicator is in ``self.greater_keys``, the rule will be + specified as 'greater'. + 2. Or if the key indicator is in ``self.less_keys``, the rule will be + specified as 'less'. + 3. Or if the key indicator is equal to the substring in any one item + in ``self.greater_keys``, the rule will be specified as 'greater'. + 4. Or if the key indicator is equal to the substring in any one item + in ``self.less_keys``, the rule will be specified as 'less'. + + Args: + rule (str | None): Comparison rule for best score. + key_indicator (str | None): Key indicator to determine the + comparison rule. + """ + if rule not in self.rule_map and rule is not None: + raise KeyError(f'rule must be greater, less or None, ' + f'but got {rule}.') + + if rule is None: + if key_indicator != 'auto': + # `_lc` here means we use the lower case of keys for + # case-insensitive matching + key_indicator_lc = key_indicator.lower() + greater_keys = [key.lower() for key in self.greater_keys] + less_keys = [key.lower() for key in self.less_keys] + + if key_indicator_lc in greater_keys: + rule = 'greater' + elif key_indicator_lc in less_keys: + rule = 'less' + elif any(key in key_indicator_lc for key in greater_keys): + rule = 'greater' + elif any(key in key_indicator_lc for key in less_keys): + rule = 'less' + else: + raise ValueError(f'Cannot infer the rule for key ' + f'{key_indicator}, thus a specific rule ' + f'must be specified.') + self.rule = rule + self.key_indicator = key_indicator + if self.rule is not None: + self.compare_func = self.rule_map[self.rule] + + def before_run(self, runner): + if not self.out_dir: + self.out_dir = runner.work_dir + + self.file_client = FileClient.infer_client(self.file_client_args, + self.out_dir) + + # if `self.out_dir` is not equal to `runner.work_dir`, it means that + # `self.out_dir` is set so the final `self.out_dir` is the + # concatenation of `self.out_dir` and the last level directory of + # `runner.work_dir` + if self.out_dir != runner.work_dir: + basename = osp.basename(runner.work_dir.rstrip(osp.sep)) + self.out_dir = self.file_client.join_path(self.out_dir, basename) + runner.logger.info( + (f'The best checkpoint will be saved to {self.out_dir} by ' + f'{self.file_client.name}')) + + if self.save_best is not None: + if runner.meta is None: + warnings.warn('runner.meta is None. Creating an empty one.') + runner.meta = dict() + runner.meta.setdefault('hook_msgs', dict()) + self.best_ckpt_path = runner.meta['hook_msgs'].get( + 'best_ckpt', None) + + def before_train_iter(self, runner): + """Evaluate the model only at the start of training by iteration.""" + if self.by_epoch or not self.initial_flag: + return + if self.start is not None and runner.iter >= self.start: + self.after_train_iter(runner) + self.initial_flag = False + + def before_train_epoch(self, runner): + """Evaluate the model only at the start of training by epoch.""" + if not (self.by_epoch and self.initial_flag): + return + if self.start is not None and runner.epoch >= self.start: + self.after_train_epoch(runner) + self.initial_flag = False + + def after_train_iter(self, runner): + """Called after every training iter to evaluate the results.""" + if not self.by_epoch and self._should_evaluate(runner): + # Because the priority of EvalHook is higher than LoggerHook, the + # training log and the evaluating log are mixed. Therefore, + # we need to dump the training log and clear it before evaluating + # log is generated. In addition, this problem will only appear in + # `IterBasedRunner` whose `self.by_epoch` is False, because + # `EpochBasedRunner` whose `self.by_epoch` is True calls + # `_do_evaluate` in `after_train_epoch` stage, and at this stage + # the training log has been printed, so it will not cause any + # problem. more details at + # https://github.com/open-mmlab/mmsegmentation/issues/694 + for hook in runner._hooks: + if isinstance(hook, LoggerHook): + hook.after_train_iter(runner) + runner.log_buffer.clear() + + self._do_evaluate(runner) + + def after_train_epoch(self, runner): + """Called after every training epoch to evaluate the results.""" + if self.by_epoch and self._should_evaluate(runner): + self._do_evaluate(runner) + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + results = self.test_fn(runner.model, self.dataloader) + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to save + # the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) + + def _should_evaluate(self, runner): + """Judge whether to perform evaluation. + + Here is the rule to judge whether to perform evaluation: + 1. It will not perform evaluation during the epoch/iteration interval, + which is determined by ``self.interval``. + 2. It will not perform evaluation if the start time is larger than + current time. + 3. It will not perform evaluation when current time is larger than + the start time but during epoch/iteration interval. + + Returns: + bool: The flag indicating whether to perform evaluation. + """ + if self.by_epoch: + current = runner.epoch + check_time = self.every_n_epochs + else: + current = runner.iter + check_time = self.every_n_iters + + if self.start is None: + if not check_time(runner, self.interval): + # No evaluation during the interval. + return False + elif (current + 1) < self.start: + # No evaluation if start is larger than the current time. + return False + else: + # Evaluation only at epochs/iters 3, 5, 7... + # if start==3 and interval==2 + if (current + 1 - self.start) % self.interval: + return False + return True + + def _save_ckpt(self, runner, key_score): + """Save the best checkpoint. + + It will compare the score according to the compare function, write + related information (best score, best checkpoint path) and save the + best checkpoint into ``work_dir``. + """ + if self.by_epoch: + current = f'epoch_{runner.epoch + 1}' + cur_type, cur_time = 'epoch', runner.epoch + 1 + else: + current = f'iter_{runner.iter + 1}' + cur_type, cur_time = 'iter', runner.iter + 1 + + best_score = runner.meta['hook_msgs'].get( + 'best_score', self.init_value_map[self.rule]) + if self.compare_func(key_score, best_score): + best_score = key_score + runner.meta['hook_msgs']['best_score'] = best_score + + if self.best_ckpt_path and self.file_client.isfile( + self.best_ckpt_path): + self.file_client.remove(self.best_ckpt_path) + runner.logger.info( + (f'The previous best checkpoint {self.best_ckpt_path} was ' + 'removed')) + + best_ckpt_name = f'best_{self.key_indicator}_{current}.pth' + self.best_ckpt_path = self.file_client.join_path( + self.out_dir, best_ckpt_name) + runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path + + runner.save_checkpoint( + self.out_dir, best_ckpt_name, create_symlink=False) + runner.logger.info( + f'Now best checkpoint is saved as {best_ckpt_name}.') + runner.logger.info( + f'Best {self.key_indicator} is {best_score:0.4f} ' + f'at {cur_time} {cur_type}.') + + def evaluate(self, runner, results): + """Evaluate the results. + + Args: + runner (:obj:`mmcv.Runner`): The underlined training runner. + results (list): Output results. + """ + eval_res = self.dataloader.dataset.evaluate( + results, logger=runner.logger, **self.eval_kwargs) + + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True + + if self.save_best is not None: + # If the performance of model is pool, the `eval_res` may be an + # empty dict and it will raise exception when `self.save_best` is + # not None. More details at + # https://github.com/open-mmlab/mmdetection/issues/6265. + if not eval_res: + warnings.warn( + 'Since `eval_res` is an empty dict, the behavior to save ' + 'the best checkpoint will be skipped in this evaluation.') + return None + + if self.key_indicator == 'auto': + # infer from eval_results + self._init_rule(self.rule, list(eval_res.keys())[0]) + return eval_res[self.key_indicator] + + return None + + +class DistEvalHook(EvalHook): + """Distributed evaluation hook. + + This hook will regularly perform evaluation in a given interval when + performing in distributed environment. + + Args: + dataloader (DataLoader): A PyTorch dataloader, whose dataset has + implemented ``evaluate`` function. + start (int | None, optional): Evaluation starting epoch. It enables + evaluation before the training starts if ``start`` <= the resuming + epoch. If None, whether to evaluate is merely decided by + ``interval``. Default: None. + interval (int): Evaluation interval. Default: 1. + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + default: True. + save_best (str, optional): If a metric is specified, it would measure + the best checkpoint during evaluation. The information about best + checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep + best score value and best checkpoint path, which will be also + loaded when resume checkpoint. Options are the evaluation metrics + on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox + detection and instance segmentation. ``AR@100`` for proposal + recall. If ``save_best`` is ``auto``, the first key of the returned + ``OrderedDict`` result will be used. Default: None. + rule (str | None, optional): Comparison rule for best score. If set to + None, it will infer a reasonable rule. Keys such as 'acc', 'top' + .etc will be inferred by 'greater' rule. Keys contain 'loss' will + be inferred by 'less' rule. Options are 'greater', 'less', None. + Default: None. + test_fn (callable, optional): test a model with samples from a + dataloader in a multi-gpu manner, and return the test results. If + ``None``, the default test function ``mmcv.engine.multi_gpu_test`` + will be used. (default: ``None``) + tmpdir (str | None): Temporary directory to save the results of all + processes. Default: None. + gpu_collect (bool): Whether to use gpu or cpu to collect results. + Default: False. + broadcast_bn_buffer (bool): Whether to broadcast the + buffer(running_mean and running_var) of rank 0 to other rank + before evaluation. Default: True. + out_dir (str, optional): The root directory to save checkpoints. If not + specified, `runner.work_dir` will be used by default. If specified, + the `out_dir` will be the concatenation of `out_dir` and the last + level directory of `runner.work_dir`. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. Default: None. + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + """ + + def __init__(self, + dataloader, + start=None, + interval=1, + by_epoch=True, + save_best=None, + rule=None, + test_fn=None, + greater_keys=None, + less_keys=None, + broadcast_bn_buffer=True, + tmpdir=None, + gpu_collect=False, + out_dir=None, + file_client_args=None, + **eval_kwargs): + + if test_fn is None: + from custom_mmpkg.custom_mmcv.engine import multi_gpu_test + test_fn = multi_gpu_test + + super().__init__( + dataloader, + start=start, + interval=interval, + by_epoch=by_epoch, + save_best=save_best, + rule=rule, + test_fn=test_fn, + greater_keys=greater_keys, + less_keys=less_keys, + out_dir=out_dir, + file_client_args=file_client_args, + **eval_kwargs) + + self.broadcast_bn_buffer = broadcast_bn_buffer + self.tmpdir = tmpdir + self.gpu_collect = gpu_collect + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + results = self.test_fn( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to + # save the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/hook.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/hook.py new file mode 100644 index 0000000000000000000000000000000000000000..9e497e18e080f726fc95e62386248425a8848b3f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/hook.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from custom_mmpkg.custom_mmcv.utils import Registry, is_method_overridden + +HOOKS = Registry('hook') + + +class Hook: + stages = ('before_run', 'before_train_epoch', 'before_train_iter', + 'after_train_iter', 'after_train_epoch', 'before_val_epoch', + 'before_val_iter', 'after_val_iter', 'after_val_epoch', + 'after_run') + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass + + def before_train_epoch(self, runner): + self.before_epoch(runner) + + def before_val_epoch(self, runner): + self.before_epoch(runner) + + def after_train_epoch(self, runner): + self.after_epoch(runner) + + def after_val_epoch(self, runner): + self.after_epoch(runner) + + def before_train_iter(self, runner): + self.before_iter(runner) + + def before_val_iter(self, runner): + self.before_iter(runner) + + def after_train_iter(self, runner): + self.after_iter(runner) + + def after_val_iter(self, runner): + self.after_iter(runner) + + def every_n_epochs(self, runner, n): + return (runner.epoch + 1) % n == 0 if n > 0 else False + + def every_n_inner_iters(self, runner, n): + return (runner.inner_iter + 1) % n == 0 if n > 0 else False + + def every_n_iters(self, runner, n): + return (runner.iter + 1) % n == 0 if n > 0 else False + + def end_of_epoch(self, runner): + return runner.inner_iter + 1 == len(runner.data_loader) + + def is_last_epoch(self, runner): + return runner.epoch + 1 == runner._max_epochs + + def is_last_iter(self, runner): + return runner.iter + 1 == runner._max_iters + + def get_triggered_stages(self): + trigger_stages = set() + for stage in Hook.stages: + if is_method_overridden(stage, Hook, self): + trigger_stages.add(stage) + + # some methods will be triggered in multi stages + # use this dict to map method to stages. + method_stages_map = { + 'before_epoch': ['before_train_epoch', 'before_val_epoch'], + 'after_epoch': ['after_train_epoch', 'after_val_epoch'], + 'before_iter': ['before_train_iter', 'before_val_iter'], + 'after_iter': ['after_train_iter', 'after_val_iter'], + } + + for method, map_stages in method_stages_map.items(): + if is_method_overridden(method, Hook, self): + trigger_stages.update(map_stages) + + return [stage for stage in Hook.stages if stage in trigger_stages] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/iter_timer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/iter_timer.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd5002fe85ffc6992155ac01003878064a1d9be --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/iter_timer.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import time + +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class IterTimerHook(Hook): + + def before_epoch(self, runner): + self.t = time.time() + + def before_iter(self, runner): + runner.log_buffer.update({'data_time': time.time() - self.t}) + + def after_iter(self, runner): + runner.log_buffer.update({'time': time.time() - self.t}) + self.t = time.time() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a0b6b345640a895368ac8a647afef6f24333d90e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import LoggerHook +from .dvclive import DvcliveLoggerHook +from .mlflow import MlflowLoggerHook +from .neptune import NeptuneLoggerHook +from .pavi import PaviLoggerHook +from .tensorboard import TensorboardLoggerHook +from .text import TextLoggerHook +from .wandb import WandbLoggerHook + +__all__ = [ + 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook', + 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook', + 'NeptuneLoggerHook', 'DvcliveLoggerHook' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/base.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/base.py new file mode 100644 index 0000000000000000000000000000000000000000..f845256729458ced821762a1b8ef881e17ff9955 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/base.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numbers +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch + +from ..hook import Hook + + +class LoggerHook(Hook): + """Base class for logger hooks. + + Args: + interval (int): Logging interval (every k iterations). + ignore_last (bool): Ignore the log of last iterations in each epoch + if less than `interval`. + reset_flag (bool): Whether to clear the output buffer after logging. + by_epoch (bool): Whether EpochBasedRunner is used. + """ + + __metaclass__ = ABCMeta + + def __init__(self, + interval=10, + ignore_last=True, + reset_flag=False, + by_epoch=True): + self.interval = interval + self.ignore_last = ignore_last + self.reset_flag = reset_flag + self.by_epoch = by_epoch + + @abstractmethod + def log(self, runner): + pass + + @staticmethod + def is_scalar(val, include_np=True, include_torch=True): + """Tell the input variable is a scalar or not. + + Args: + val: Input variable. + include_np (bool): Whether include 0-d np.ndarray as a scalar. + include_torch (bool): Whether include 0-d torch.Tensor as a scalar. + + Returns: + bool: True or False. + """ + if isinstance(val, numbers.Number): + return True + elif include_np and isinstance(val, np.ndarray) and val.ndim == 0: + return True + elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1: + return True + else: + return False + + def get_mode(self, runner): + if runner.mode == 'train': + if 'time' in runner.log_buffer.output: + mode = 'train' + else: + mode = 'val' + elif runner.mode == 'val': + mode = 'val' + else: + raise ValueError(f"runner mode should be 'train' or 'val', " + f'but got {runner.mode}') + return mode + + def get_epoch(self, runner): + if runner.mode == 'train': + epoch = runner.epoch + 1 + elif runner.mode == 'val': + # normal val mode + # runner.epoch += 1 has been done before val workflow + epoch = runner.epoch + else: + raise ValueError(f"runner mode should be 'train' or 'val', " + f'but got {runner.mode}') + return epoch + + def get_iter(self, runner, inner_iter=False): + """Get the current training iteration step.""" + if self.by_epoch and inner_iter: + current_iter = runner.inner_iter + 1 + else: + current_iter = runner.iter + 1 + return current_iter + + def get_lr_tags(self, runner): + tags = {} + lrs = runner.current_lr() + if isinstance(lrs, dict): + for name, value in lrs.items(): + tags[f'learning_rate/{name}'] = value[0] + else: + tags['learning_rate'] = lrs[0] + return tags + + def get_momentum_tags(self, runner): + tags = {} + momentums = runner.current_momentum() + if isinstance(momentums, dict): + for name, value in momentums.items(): + tags[f'momentum/{name}'] = value[0] + else: + tags['momentum'] = momentums[0] + return tags + + def get_loggable_tags(self, + runner, + allow_scalar=True, + allow_text=False, + add_mode=True, + tags_to_skip=('time', 'data_time')): + tags = {} + for var, val in runner.log_buffer.output.items(): + if var in tags_to_skip: + continue + if self.is_scalar(val) and not allow_scalar: + continue + if isinstance(val, str) and not allow_text: + continue + if add_mode: + var = f'{self.get_mode(runner)}/{var}' + tags[var] = val + tags.update(self.get_lr_tags(runner)) + tags.update(self.get_momentum_tags(runner)) + return tags + + def before_run(self, runner): + for hook in runner.hooks[::-1]: + if isinstance(hook, LoggerHook): + hook.reset_flag = True + break + + def before_epoch(self, runner): + runner.log_buffer.clear() # clear logs of last epoch + + def after_train_iter(self, runner): + if self.by_epoch and self.every_n_inner_iters(runner, self.interval): + runner.log_buffer.average(self.interval) + elif not self.by_epoch and self.every_n_iters(runner, self.interval): + runner.log_buffer.average(self.interval) + elif self.end_of_epoch(runner) and not self.ignore_last: + # not precise but more stable + runner.log_buffer.average(self.interval) + + if runner.log_buffer.ready: + self.log(runner) + if self.reset_flag: + runner.log_buffer.clear_output() + + def after_train_epoch(self, runner): + if runner.log_buffer.ready: + self.log(runner) + if self.reset_flag: + runner.log_buffer.clear_output() + + def after_val_epoch(self, runner): + runner.log_buffer.average() + self.log(runner) + if self.reset_flag: + runner.log_buffer.clear_output() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/dvclive.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/dvclive.py new file mode 100644 index 0000000000000000000000000000000000000000..687cdc58c0336c92b1e4f9a410ba67ebaab2bc7a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/dvclive.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ...dist_utils import master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class DvcliveLoggerHook(LoggerHook): + """Class to log metrics with dvclive. + + It requires `dvclive`_ to be installed. + + Args: + path (str): Directory where dvclive will write TSV log files. + interval (int): Logging interval (every k iterations). + Default 10. + ignore_last (bool): Ignore the log of last iterations in each epoch + if less than `interval`. + Default: True. + reset_flag (bool): Whether to clear the output buffer after logging. + Default: True. + by_epoch (bool): Whether EpochBasedRunner is used. + Default: True. + + .. _dvclive: + https://dvc.org/doc/dvclive + """ + + def __init__(self, + path, + interval=10, + ignore_last=True, + reset_flag=True, + by_epoch=True): + + super(DvcliveLoggerHook, self).__init__(interval, ignore_last, + reset_flag, by_epoch) + self.path = path + self.import_dvclive() + + def import_dvclive(self): + try: + import dvclive + except ImportError: + raise ImportError( + 'Please run "pip install dvclive" to install dvclive') + self.dvclive = dvclive + + @master_only + def before_run(self, runner): + self.dvclive.init(self.path) + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner) + if tags: + for k, v in tags.items(): + self.dvclive.log(k, v, step=self.get_iter(runner)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/mlflow.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/mlflow.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a72592be47b534ce22573775fd5a7e8e86d72d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/mlflow.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ...dist_utils import master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class MlflowLoggerHook(LoggerHook): + + def __init__(self, + exp_name=None, + tags=None, + log_model=True, + interval=10, + ignore_last=True, + reset_flag=False, + by_epoch=True): + """Class to log metrics and (optionally) a trained model to MLflow. + + It requires `MLflow`_ to be installed. + + Args: + exp_name (str, optional): Name of the experiment to be used. + Default None. + If not None, set the active experiment. + If experiment does not exist, an experiment with provided name + will be created. + tags (dict of str: str, optional): Tags for the current run. + Default None. + If not None, set tags for the current run. + log_model (bool, optional): Whether to log an MLflow artifact. + Default True. + If True, log runner.model as an MLflow artifact + for the current run. + interval (int): Logging interval (every k iterations). + ignore_last (bool): Ignore the log of last iterations in each epoch + if less than `interval`. + reset_flag (bool): Whether to clear the output buffer after logging + by_epoch (bool): Whether EpochBasedRunner is used. + + .. _MLflow: + https://www.mlflow.org/docs/latest/index.html + """ + super(MlflowLoggerHook, self).__init__(interval, ignore_last, + reset_flag, by_epoch) + self.import_mlflow() + self.exp_name = exp_name + self.tags = tags + self.log_model = log_model + + def import_mlflow(self): + try: + import mlflow + import mlflow.pytorch as mlflow_pytorch + except ImportError: + raise ImportError( + 'Please run "pip install mlflow" to install mlflow') + self.mlflow = mlflow + self.mlflow_pytorch = mlflow_pytorch + + @master_only + def before_run(self, runner): + super(MlflowLoggerHook, self).before_run(runner) + if self.exp_name is not None: + self.mlflow.set_experiment(self.exp_name) + if self.tags is not None: + self.mlflow.set_tags(self.tags) + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner) + if tags: + self.mlflow.log_metrics(tags, step=self.get_iter(runner)) + + @master_only + def after_run(self, runner): + if self.log_model: + self.mlflow_pytorch.log_model(runner.model, 'models') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/neptune.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/neptune.py new file mode 100644 index 0000000000000000000000000000000000000000..7a38772b0c93a8608f32c6357b8616e77c139dc9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/neptune.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ...dist_utils import master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class NeptuneLoggerHook(LoggerHook): + """Class to log metrics to NeptuneAI. + + It requires `neptune-client` to be installed. + + Args: + init_kwargs (dict): a dict contains the initialization keys as below: + - project (str): Name of a project in a form of + namespace/project_name. If None, the value of + NEPTUNE_PROJECT environment variable will be taken. + - api_token (str): User’s API token. + If None, the value of NEPTUNE_API_TOKEN environment + variable will be taken. Note: It is strongly recommended + to use NEPTUNE_API_TOKEN environment variable rather than + placing your API token in plain text in your source code. + - name (str, optional, default is 'Untitled'): Editable name of + the run. Name is displayed in the run's Details and in + Runs table as a column. + Check https://docs.neptune.ai/api-reference/neptune#init for + more init arguments. + interval (int): Logging interval (every k iterations). + ignore_last (bool): Ignore the log of last iterations in each epoch + if less than `interval`. + reset_flag (bool): Whether to clear the output buffer after logging + by_epoch (bool): Whether EpochBasedRunner is used. + + .. _NeptuneAI: + https://docs.neptune.ai/you-should-know/logging-metadata + """ + + def __init__(self, + init_kwargs=None, + interval=10, + ignore_last=True, + reset_flag=True, + with_step=True, + by_epoch=True): + + super(NeptuneLoggerHook, self).__init__(interval, ignore_last, + reset_flag, by_epoch) + self.import_neptune() + self.init_kwargs = init_kwargs + self.with_step = with_step + + def import_neptune(self): + try: + import neptune.new as neptune + except ImportError: + raise ImportError( + 'Please run "pip install neptune-client" to install neptune') + self.neptune = neptune + self.run = None + + @master_only + def before_run(self, runner): + if self.init_kwargs: + self.run = self.neptune.init(**self.init_kwargs) + else: + self.run = self.neptune.init() + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner) + if tags: + for tag_name, tag_value in tags.items(): + if self.with_step: + self.run[tag_name].log( + tag_value, step=self.get_iter(runner)) + else: + tags['global_step'] = self.get_iter(runner) + self.run[tag_name].log(tags) + + @master_only + def after_run(self, runner): + self.run.stop() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/pavi.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/pavi.py new file mode 100644 index 0000000000000000000000000000000000000000..0c5f14224cc4762cd1ef18a5d3b49d023f22a1dc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/pavi.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import os.path as osp + +import torch +import yaml + +import custom_mmpkg.custom_mmcv as mmcv +from ....parallel.utils import is_module_wrapper +from ...dist_utils import master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class PaviLoggerHook(LoggerHook): + + def __init__(self, + init_kwargs=None, + add_graph=False, + add_last_ckpt=False, + interval=10, + ignore_last=True, + reset_flag=False, + by_epoch=True, + img_key='img_info'): + super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag, + by_epoch) + self.init_kwargs = init_kwargs + self.add_graph = add_graph + self.add_last_ckpt = add_last_ckpt + self.img_key = img_key + + @master_only + def before_run(self, runner): + super(PaviLoggerHook, self).before_run(runner) + try: + from pavi import SummaryWriter + except ImportError: + raise ImportError('Please run "pip install pavi" to install pavi.') + + self.run_name = runner.work_dir.split('/')[-1] + + if not self.init_kwargs: + self.init_kwargs = dict() + self.init_kwargs['name'] = self.run_name + self.init_kwargs['model'] = runner._model_name + if runner.meta is not None: + if 'config_dict' in runner.meta: + config_dict = runner.meta['config_dict'] + assert isinstance( + config_dict, + dict), ('meta["config_dict"] has to be of a dict, ' + f'but got {type(config_dict)}') + elif 'config_file' in runner.meta: + config_file = runner.meta['config_file'] + config_dict = dict(mmcv.Config.fromfile(config_file)) + else: + config_dict = None + if config_dict is not None: + # 'max_.*iter' is parsed in pavi sdk as the maximum iterations + # to properly set up the progress bar. + config_dict = config_dict.copy() + config_dict.setdefault('max_iter', runner.max_iters) + # non-serializable values are first converted in + # mmcv.dump to json + config_dict = json.loads( + mmcv.dump(config_dict, file_format='json')) + session_text = yaml.dump(config_dict) + self.init_kwargs['session_text'] = session_text + self.writer = SummaryWriter(**self.init_kwargs) + + def get_step(self, runner): + """Get the total training step/epoch.""" + if self.get_mode(runner) == 'val' and self.by_epoch: + return self.get_epoch(runner) + else: + return self.get_iter(runner) + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner, add_mode=False) + if tags: + self.writer.add_scalars( + self.get_mode(runner), tags, self.get_step(runner)) + + @master_only + def after_run(self, runner): + if self.add_last_ckpt: + ckpt_path = osp.join(runner.work_dir, 'latest.pth') + if osp.islink(ckpt_path): + ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path)) + + if osp.isfile(ckpt_path): + # runner.epoch += 1 has been done before `after_run`. + iteration = runner.epoch if self.by_epoch else runner.iter + return self.writer.add_snapshot_file( + tag=self.run_name, + snapshot_file_path=ckpt_path, + iteration=iteration) + + # flush the buffer and send a task ending signal to Pavi + self.writer.close() + + @master_only + def before_epoch(self, runner): + if runner.epoch == 0 and self.add_graph: + if is_module_wrapper(runner.model): + _model = runner.model.module + else: + _model = runner.model + device = next(_model.parameters()).device + data = next(iter(runner.data_loader)) + image = data[self.img_key][0:1].to(device) + with torch.no_grad(): + self.writer.add_graph(_model, image) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/tensorboard.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/tensorboard.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9c727ff9776c5c8d41838f2f0676a4db56186b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/tensorboard.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, digit_version +from ...dist_utils import master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class TensorboardLoggerHook(LoggerHook): + + def __init__(self, + log_dir=None, + interval=10, + ignore_last=True, + reset_flag=False, + by_epoch=True): + super(TensorboardLoggerHook, self).__init__(interval, ignore_last, + reset_flag, by_epoch) + self.log_dir = log_dir + + @master_only + def before_run(self, runner): + super(TensorboardLoggerHook, self).before_run(runner) + if (TORCH_VERSION == 'parrots' + or digit_version(TORCH_VERSION) < digit_version('1.1')): + try: + from tensorboardX import SummaryWriter + except ImportError: + raise ImportError('Please install tensorboardX to use ' + 'TensorboardLoggerHook.') + else: + try: + from torch.utils.tensorboard import SummaryWriter + except ImportError: + raise ImportError( + 'Please run "pip install future tensorboard" to install ' + 'the dependencies to use torch.utils.tensorboard ' + '(applicable to PyTorch 1.1 or higher)') + + if self.log_dir is None: + self.log_dir = osp.join(runner.work_dir, 'tf_logs') + self.writer = SummaryWriter(self.log_dir) + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner, allow_text=True) + for tag, val in tags.items(): + if isinstance(val, str): + self.writer.add_text(tag, val, self.get_iter(runner)) + else: + self.writer.add_scalar(tag, val, self.get_iter(runner)) + + @master_only + def after_run(self, runner): + self.writer.close() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/text.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/text.py new file mode 100644 index 0000000000000000000000000000000000000000..ea12c02a96d590493ae48055196bb28798bfefff --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/text.py @@ -0,0 +1,256 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import os +import os.path as osp +from collections import OrderedDict + +import torch +import torch.distributed as dist + +import custom_mmpkg.custom_mmcv as mmcv +from custom_mmpkg.custom_mmcv.fileio.file_client import FileClient +from custom_mmpkg.custom_mmcv.utils import is_tuple_of, scandir +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class TextLoggerHook(LoggerHook): + """Logger hook in text. + + In this logger hook, the information will be printed on terminal and + saved in json file. + + Args: + by_epoch (bool, optional): Whether EpochBasedRunner is used. + Default: True. + interval (int, optional): Logging interval (every k iterations). + Default: 10. + ignore_last (bool, optional): Ignore the log of last iterations in each + epoch if less than :attr:`interval`. Default: True. + reset_flag (bool, optional): Whether to clear the output buffer after + logging. Default: False. + interval_exp_name (int, optional): Logging interval for experiment + name. This feature is to help users conveniently get the experiment + information from screen or log file. Default: 1000. + out_dir (str, optional): Logs are saved in ``runner.work_dir`` default. + If ``out_dir`` is specified, logs will be copied to a new directory + which is the concatenation of ``out_dir`` and the last level + directory of ``runner.work_dir``. Default: None. + `New in version 1.3.16.` + out_suffix (str or tuple[str], optional): Those filenames ending with + ``out_suffix`` will be copied to ``out_dir``. + Default: ('.log.json', '.log', '.py'). + `New in version 1.3.16.` + keep_local (bool, optional): Whether to keep local log when + :attr:`out_dir` is specified. If False, the local log will be + removed. Default: True. + `New in version 1.3.16.` + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + `New in version 1.3.16.` + """ + + def __init__(self, + by_epoch=True, + interval=10, + ignore_last=True, + reset_flag=False, + interval_exp_name=1000, + out_dir=None, + out_suffix=('.log.json', '.log', '.py'), + keep_local=True, + file_client_args=None): + super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, + by_epoch) + self.by_epoch = by_epoch + self.time_sec_tot = 0 + self.interval_exp_name = interval_exp_name + + if out_dir is None and file_client_args is not None: + raise ValueError( + 'file_client_args should be "None" when `out_dir` is not' + 'specified.') + self.out_dir = out_dir + + if not (out_dir is None or isinstance(out_dir, str) + or is_tuple_of(out_dir, str)): + raise TypeError('out_dir should be "None" or string or tuple of ' + 'string, but got {out_dir}') + self.out_suffix = out_suffix + + self.keep_local = keep_local + self.file_client_args = file_client_args + if self.out_dir is not None: + self.file_client = FileClient.infer_client(file_client_args, + self.out_dir) + + def before_run(self, runner): + super(TextLoggerHook, self).before_run(runner) + + if self.out_dir is not None: + self.file_client = FileClient.infer_client(self.file_client_args, + self.out_dir) + # The final `self.out_dir` is the concatenation of `self.out_dir` + # and the last level directory of `runner.work_dir` + basename = osp.basename(runner.work_dir.rstrip(osp.sep)) + self.out_dir = self.file_client.join_path(self.out_dir, basename) + runner.logger.info( + (f'Text logs will be saved to {self.out_dir} by ' + f'{self.file_client.name} after the training process.')) + + self.start_iter = runner.iter + self.json_log_path = osp.join(runner.work_dir, + f'{runner.timestamp}.log.json') + if runner.meta is not None: + self._dump_log(runner.meta, runner) + + def _get_max_memory(self, runner): + device = getattr(runner.model, 'output_device', None) + mem = torch.cuda.max_memory_allocated(device=device) + mem_mb = torch.tensor([mem / (1024 * 1024)], + dtype=torch.int, + device=device) + if runner.world_size > 1: + dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX) + return mem_mb.item() + + def _log_info(self, log_dict, runner): + # print exp name for users to distinguish experiments + # at every ``interval_exp_name`` iterations and the end of each epoch + if runner.meta is not None and 'exp_name' in runner.meta: + if (self.every_n_iters(runner, self.interval_exp_name)) or ( + self.by_epoch and self.end_of_epoch(runner)): + exp_info = f'Exp name: {runner.meta["exp_name"]}' + runner.logger.info(exp_info) + + if log_dict['mode'] == 'train': + if isinstance(log_dict['lr'], dict): + lr_str = [] + for k, val in log_dict['lr'].items(): + lr_str.append(f'lr_{k}: {val:.3e}') + lr_str = ' '.join(lr_str) + else: + lr_str = f'lr: {log_dict["lr"]:.3e}' + + # by epoch: Epoch [4][100/1000] + # by iter: Iter [100/100000] + if self.by_epoch: + log_str = f'Epoch [{log_dict["epoch"]}]' \ + f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' + else: + log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t' + log_str += f'{lr_str}, ' + + if 'time' in log_dict.keys(): + self.time_sec_tot += (log_dict['time'] * self.interval) + time_sec_avg = self.time_sec_tot / ( + runner.iter - self.start_iter + 1) + eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) + eta_str = str(datetime.timedelta(seconds=int(eta_sec))) + log_str += f'eta: {eta_str}, ' + log_str += f'time: {log_dict["time"]:.3f}, ' \ + f'data_time: {log_dict["data_time"]:.3f}, ' + # statistic memory + if torch.cuda.is_available(): + log_str += f'memory: {log_dict["memory"]}, ' + else: + # val/test time + # here 1000 is the length of the val dataloader + # by epoch: Epoch[val] [4][1000] + # by iter: Iter[val] [1000] + if self.by_epoch: + log_str = f'Epoch({log_dict["mode"]}) ' \ + f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t' + else: + log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t' + + log_items = [] + for name, val in log_dict.items(): + # TODO: resolve this hack + # these items have been in log_str + if name in [ + 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', + 'memory', 'epoch' + ]: + continue + if isinstance(val, float): + val = f'{val:.4f}' + log_items.append(f'{name}: {val}') + log_str += ', '.join(log_items) + + runner.logger.info(log_str) + + def _dump_log(self, log_dict, runner): + # dump log in json format + json_log = OrderedDict() + for k, v in log_dict.items(): + json_log[k] = self._round_float(v) + # only append log at last line + if runner.rank == 0: + with open(self.json_log_path, 'a+') as f: + mmcv.dump(json_log, f, file_format='json') + f.write('\n') + + def _round_float(self, items): + if isinstance(items, list): + return [self._round_float(item) for item in items] + elif isinstance(items, float): + return round(items, 5) + else: + return items + + def log(self, runner): + if 'eval_iter_num' in runner.log_buffer.output: + # this doesn't modify runner.iter and is regardless of by_epoch + cur_iter = runner.log_buffer.output.pop('eval_iter_num') + else: + cur_iter = self.get_iter(runner, inner_iter=True) + + log_dict = OrderedDict( + mode=self.get_mode(runner), + epoch=self.get_epoch(runner), + iter=cur_iter) + + # only record lr of the first param group + cur_lr = runner.current_lr() + if isinstance(cur_lr, list): + log_dict['lr'] = cur_lr[0] + else: + assert isinstance(cur_lr, dict) + log_dict['lr'] = {} + for k, lr_ in cur_lr.items(): + assert isinstance(lr_, list) + log_dict['lr'].update({k: lr_[0]}) + + if 'time' in runner.log_buffer.output: + # statistic memory + if torch.cuda.is_available(): + log_dict['memory'] = self._get_max_memory(runner) + + log_dict = dict(log_dict, **runner.log_buffer.output) + + self._log_info(log_dict, runner) + self._dump_log(log_dict, runner) + return log_dict + + def after_run(self, runner): + # copy or upload logs to self.out_dir + if self.out_dir is not None: + for filename in scandir(runner.work_dir, self.out_suffix, True): + local_filepath = osp.join(runner.work_dir, filename) + out_filepath = self.file_client.join_path( + self.out_dir, filename) + with open(local_filepath, 'r') as f: + self.file_client.put_text(f.read(), out_filepath) + + runner.logger.info( + (f'The file {local_filepath} has been uploaded to ' + f'{out_filepath}.')) + + if not self.keep_local: + os.remove(local_filepath) + runner.logger.info( + (f'{local_filepath} was removed due to the ' + '`self.keep_local=False`')) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/wandb.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/wandb.py new file mode 100644 index 0000000000000000000000000000000000000000..9f6808462eb79ab2b04806a5d9f0d3dd079b5ea9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/logger/wandb.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ...dist_utils import master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class WandbLoggerHook(LoggerHook): + + def __init__(self, + init_kwargs=None, + interval=10, + ignore_last=True, + reset_flag=False, + commit=True, + by_epoch=True, + with_step=True): + super(WandbLoggerHook, self).__init__(interval, ignore_last, + reset_flag, by_epoch) + self.import_wandb() + self.init_kwargs = init_kwargs + self.commit = commit + self.with_step = with_step + + def import_wandb(self): + try: + import wandb + except ImportError: + raise ImportError( + 'Please run "pip install wandb" to install wandb') + self.wandb = wandb + + @master_only + def before_run(self, runner): + super(WandbLoggerHook, self).before_run(runner) + if self.wandb is None: + self.import_wandb() + if self.init_kwargs: + self.wandb.init(**self.init_kwargs) + else: + self.wandb.init() + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner) + if tags: + if self.with_step: + self.wandb.log( + tags, step=self.get_iter(runner), commit=self.commit) + else: + tags['global_step'] = self.get_iter(runner) + self.wandb.log(tags, commit=self.commit) + + @master_only + def after_run(self, runner): + self.wandb.join() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/lr_updater.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/lr_updater.py new file mode 100644 index 0000000000000000000000000000000000000000..f375932319cdbce2d50a7fc60b68ea750a60bb70 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/lr_updater.py @@ -0,0 +1,670 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numbers +from math import cos, pi + +import custom_mmpkg.custom_mmcv as mmcv +from .hook import HOOKS, Hook + + +class LrUpdaterHook(Hook): + """LR Scheduler in MMCV. + + Args: + by_epoch (bool): LR changes epoch by epoch + warmup (string): Type of warmup used. It can be None(use no warmup), + 'constant', 'linear' or 'exp' + warmup_iters (int): The number of iterations or epochs that warmup + lasts + warmup_ratio (float): LR used at the beginning of warmup equals to + warmup_ratio * initial_lr + warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters + means the number of epochs that warmup lasts, otherwise means the + number of iteration that warmup lasts + """ + + def __init__(self, + by_epoch=True, + warmup=None, + warmup_iters=0, + warmup_ratio=0.1, + warmup_by_epoch=False): + # validate the "warmup" argument + if warmup is not None: + if warmup not in ['constant', 'linear', 'exp']: + raise ValueError( + f'"{warmup}" is not a supported type for warming up, valid' + ' types are "constant" and "linear"') + if warmup is not None: + assert warmup_iters > 0, \ + '"warmup_iters" must be a positive integer' + assert 0 < warmup_ratio <= 1.0, \ + '"warmup_ratio" must be in range (0,1]' + + self.by_epoch = by_epoch + self.warmup = warmup + self.warmup_iters = warmup_iters + self.warmup_ratio = warmup_ratio + self.warmup_by_epoch = warmup_by_epoch + + if self.warmup_by_epoch: + self.warmup_epochs = self.warmup_iters + self.warmup_iters = None + else: + self.warmup_epochs = None + + self.base_lr = [] # initial lr for all param groups + self.regular_lr = [] # expected lr if no warming up is performed + + def _set_lr(self, runner, lr_groups): + if isinstance(runner.optimizer, dict): + for k, optim in runner.optimizer.items(): + for param_group, lr in zip(optim.param_groups, lr_groups[k]): + param_group['lr'] = lr + else: + for param_group, lr in zip(runner.optimizer.param_groups, + lr_groups): + param_group['lr'] = lr + + def get_lr(self, runner, base_lr): + raise NotImplementedError + + def get_regular_lr(self, runner): + if isinstance(runner.optimizer, dict): + lr_groups = {} + for k in runner.optimizer.keys(): + _lr_group = [ + self.get_lr(runner, _base_lr) + for _base_lr in self.base_lr[k] + ] + lr_groups.update({k: _lr_group}) + + return lr_groups + else: + return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] + + def get_warmup_lr(self, cur_iters): + + def _get_warmup_lr(cur_iters, regular_lr): + if self.warmup == 'constant': + warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr] + elif self.warmup == 'linear': + k = (1 - cur_iters / self.warmup_iters) * (1 - + self.warmup_ratio) + warmup_lr = [_lr * (1 - k) for _lr in regular_lr] + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_lr = [_lr * k for _lr in regular_lr] + return warmup_lr + + if isinstance(self.regular_lr, dict): + lr_groups = {} + for key, regular_lr in self.regular_lr.items(): + lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) + return lr_groups + else: + return _get_warmup_lr(cur_iters, self.regular_lr) + + def before_run(self, runner): + # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, + # it will be set according to the optimizer params + if isinstance(runner.optimizer, dict): + self.base_lr = {} + for k, optim in runner.optimizer.items(): + for group in optim.param_groups: + group.setdefault('initial_lr', group['lr']) + _base_lr = [ + group['initial_lr'] for group in optim.param_groups + ] + self.base_lr.update({k: _base_lr}) + else: + for group in runner.optimizer.param_groups: + group.setdefault('initial_lr', group['lr']) + self.base_lr = [ + group['initial_lr'] for group in runner.optimizer.param_groups + ] + + def before_train_epoch(self, runner): + if self.warmup_iters is None: + epoch_len = len(runner.data_loader) + self.warmup_iters = self.warmup_epochs * epoch_len + + if not self.by_epoch: + return + + self.regular_lr = self.get_regular_lr(runner) + self._set_lr(runner, self.regular_lr) + + def before_train_iter(self, runner): + cur_iter = runner.iter + if not self.by_epoch: + self.regular_lr = self.get_regular_lr(runner) + if self.warmup is None or cur_iter >= self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + elif self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + return + elif cur_iter == self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + + +@HOOKS.register_module() +class FixedLrUpdaterHook(LrUpdaterHook): + + def __init__(self, **kwargs): + super(FixedLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + return base_lr + + +@HOOKS.register_module() +class StepLrUpdaterHook(LrUpdaterHook): + """Step LR scheduler with min_lr clipping. + + Args: + step (int | list[int]): Step to decay the LR. If an int value is given, + regard it as the decay interval. If a list is given, decay LR at + these steps. + gamma (float, optional): Decay LR ratio. Default: 0.1. + min_lr (float, optional): Minimum LR value to keep. If LR after decay + is lower than `min_lr`, it will be clipped to this value. If None + is given, we don't perform lr clipping. Default: None. + """ + + def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): + if isinstance(step, list): + assert mmcv.is_list_of(step, int) + assert all([s > 0 for s in step]) + elif isinstance(step, int): + assert step > 0 + else: + raise TypeError('"step" must be a list or integer') + self.step = step + self.gamma = gamma + self.min_lr = min_lr + super(StepLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + + # calculate exponential term + if isinstance(self.step, int): + exp = progress // self.step + else: + exp = len(self.step) + for i, s in enumerate(self.step): + if progress < s: + exp = i + break + + lr = base_lr * (self.gamma**exp) + if self.min_lr is not None: + # clip to a minimum value + lr = max(lr, self.min_lr) + return lr + + +@HOOKS.register_module() +class ExpLrUpdaterHook(LrUpdaterHook): + + def __init__(self, gamma, **kwargs): + self.gamma = gamma + super(ExpLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * self.gamma**progress + + +@HOOKS.register_module() +class PolyLrUpdaterHook(LrUpdaterHook): + + def __init__(self, power=1., min_lr=0., **kwargs): + self.power = power + self.min_lr = min_lr + super(PolyLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + coeff = (1 - progress / max_progress)**self.power + return (base_lr - self.min_lr) * coeff + self.min_lr + + +@HOOKS.register_module() +class InvLrUpdaterHook(LrUpdaterHook): + + def __init__(self, gamma, power=1., **kwargs): + self.gamma = gamma + self.power = power + super(InvLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * (1 + self.gamma * progress)**(-self.power) + + +@HOOKS.register_module() +class CosineAnnealingLrUpdaterHook(LrUpdaterHook): + + def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + return annealing_cos(base_lr, target_lr, progress / max_progress) + + +@HOOKS.register_module() +class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): + """Flat + Cosine lr schedule. + + Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501 + + Args: + start_percent (float): When to start annealing the learning rate + after the percentage of the total training steps. + The value should be in range [0, 1). + Default: 0.75 + min_lr (float, optional): The minimum lr. Default: None. + min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. + Either `min_lr` or `min_lr_ratio` should be specified. + Default: None. + """ + + def __init__(self, + start_percent=0.75, + min_lr=None, + min_lr_ratio=None, + **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + if start_percent < 0 or start_percent > 1 or not isinstance( + start_percent, float): + raise ValueError( + 'expected float between 0 and 1 start_percent, but ' + f'got {start_percent}') + self.start_percent = start_percent + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + start = round(runner.max_epochs * self.start_percent) + progress = runner.epoch - start + max_progress = runner.max_epochs - start + else: + start = round(runner.max_iters * self.start_percent) + progress = runner.iter - start + max_progress = runner.max_iters - start + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + if progress < 0: + return base_lr + else: + return annealing_cos(base_lr, target_lr, progress / max_progress) + + +@HOOKS.register_module() +class CosineRestartLrUpdaterHook(LrUpdaterHook): + """Cosine annealing with restarts learning rate scheme. + + Args: + periods (list[int]): Periods for each cosine anneling cycle. + restart_weights (list[float], optional): Restart weights at each + restart iteration. Default: [1]. + min_lr (float, optional): The minimum lr. Default: None. + min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. + Either `min_lr` or `min_lr_ratio` should be specified. + Default: None. + """ + + def __init__(self, + periods, + restart_weights=[1], + min_lr=None, + min_lr_ratio=None, + **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.periods = periods + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + self.restart_weights = restart_weights + assert (len(self.periods) == len(self.restart_weights) + ), 'periods and restart_weights should have the same length.' + super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) + + self.cumulative_periods = [ + sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) + ] + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + else: + progress = runner.iter + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + idx = get_position_from_periods(progress, self.cumulative_periods) + current_weight = self.restart_weights[idx] + nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] + current_periods = self.periods[idx] + + alpha = min((progress - nearest_restart) / current_periods, 1) + return annealing_cos(base_lr, target_lr, alpha, current_weight) + + +def get_position_from_periods(iteration, cumulative_periods): + """Get the position from a period list. + + It will return the index of the right-closest number in the period list. + For example, the cumulative_periods = [100, 200, 300, 400], + if iteration == 50, return 0; + if iteration == 210, return 2; + if iteration == 300, return 3. + + Args: + iteration (int): Current iteration. + cumulative_periods (list[int]): Cumulative period list. + + Returns: + int: The position of the right-closest number in the period list. + """ + for i, period in enumerate(cumulative_periods): + if iteration < period: + return i + raise ValueError(f'Current iteration {iteration} exceeds ' + f'cumulative_periods {cumulative_periods}') + + +@HOOKS.register_module() +class CyclicLrUpdaterHook(LrUpdaterHook): + """Cyclic LR Scheduler. + + Implement the cyclical learning rate policy (CLR) described in + https://arxiv.org/pdf/1506.01186.pdf + + Different from the original paper, we use cosine annealing rather than + triangular policy inside a cycle. This improves the performance in the + 3D detection area. + + Args: + by_epoch (bool): Whether to update LR by epoch. + target_ratio (tuple[float]): Relative ratio of the highest LR and the + lowest LR to the initial LR. + cyclic_times (int): Number of cycles during training + step_ratio_up (float): The ratio of the increasing process of LR in + the total cycle. + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: 'cos' for cosine annealing, + 'linear' for linear annealing. Default: 'cos'. + """ + + def __init__(self, + by_epoch=False, + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + anneal_strategy='cos', + **kwargs): + if isinstance(target_ratio, float): + target_ratio = (target_ratio, target_ratio / 1e5) + elif isinstance(target_ratio, tuple): + target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ + if len(target_ratio) == 1 else target_ratio + else: + raise ValueError('target_ratio should be either float ' + f'or tuple, got {type(target_ratio)}') + + assert len(target_ratio) == 2, \ + '"target_ratio" must be list or tuple of two floats' + assert 0 <= step_ratio_up < 1.0, \ + '"step_ratio_up" must be in range [0,1)' + + self.target_ratio = target_ratio + self.cyclic_times = cyclic_times + self.step_ratio_up = step_ratio_up + self.lr_phases = [] # init lr_phases + # validate anneal_strategy + if anneal_strategy not in ['cos', 'linear']: + raise ValueError('anneal_strategy must be one of "cos" or ' + f'"linear", instead got {anneal_strategy}') + elif anneal_strategy == 'cos': + self.anneal_func = annealing_cos + elif anneal_strategy == 'linear': + self.anneal_func = annealing_linear + + assert not by_epoch, \ + 'currently only support "by_epoch" = False' + super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) + + def before_run(self, runner): + super(CyclicLrUpdaterHook, self).before_run(runner) + # initiate lr_phases + # total lr_phases are separated as up and down + max_iter_per_phase = runner.max_iters // self.cyclic_times + iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) + self.lr_phases.append( + [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) + self.lr_phases.append([ + iter_up_phase, max_iter_per_phase, max_iter_per_phase, + self.target_ratio[0], self.target_ratio[1] + ]) + + def get_lr(self, runner, base_lr): + curr_iter = runner.iter + for (start_iter, end_iter, max_iter_per_phase, start_ratio, + end_ratio) in self.lr_phases: + curr_iter %= max_iter_per_phase + if start_iter <= curr_iter < end_iter: + progress = curr_iter - start_iter + return self.anneal_func(base_lr * start_ratio, + base_lr * end_ratio, + progress / (end_iter - start_iter)) + + +@HOOKS.register_module() +class OneCycleLrUpdaterHook(LrUpdaterHook): + """One Cycle LR Scheduler. + + The 1cycle learning rate policy changes the learning rate after every + batch. The one cycle learning rate policy is described in + https://arxiv.org/pdf/1708.07120.pdf + + Args: + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. + total_steps (int, optional): The total number of steps in the cycle. + Note that if a value is not provided here, it will be the max_iter + of runner. Default: None. + pct_start (float): The percentage of the cycle (in number of steps) + spent increasing the learning rate. + Default: 0.3 + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: 'cos' for cosine annealing, + 'linear' for linear annealing. + Default: 'cos' + div_factor (float): Determines the initial learning rate via + initial_lr = max_lr/div_factor + Default: 25 + final_div_factor (float): Determines the minimum learning rate via + min_lr = initial_lr/final_div_factor + Default: 1e4 + three_phase (bool): If three_phase is True, use a third phase of the + schedule to annihilate the learning rate according to + final_div_factor instead of modifying the second phase (the first + two phases will be symmetrical about the step indicated by + pct_start). + Default: False + """ + + def __init__(self, + max_lr, + total_steps=None, + pct_start=0.3, + anneal_strategy='cos', + div_factor=25, + final_div_factor=1e4, + three_phase=False, + **kwargs): + # validate by_epoch, currently only support by_epoch = False + if 'by_epoch' not in kwargs: + kwargs['by_epoch'] = False + else: + assert not kwargs['by_epoch'], \ + 'currently only support "by_epoch" = False' + if not isinstance(max_lr, (numbers.Number, list, dict)): + raise ValueError('the type of max_lr must be the one of list or ' + f'dict, but got {type(max_lr)}') + self._max_lr = max_lr + if total_steps is not None: + if not isinstance(total_steps, int): + raise ValueError('the type of total_steps must be int, but' + f'got {type(total_steps)}') + self.total_steps = total_steps + # validate pct_start + if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): + raise ValueError('expected float between 0 and 1 pct_start, but ' + f'got {pct_start}') + self.pct_start = pct_start + # validate anneal_strategy + if anneal_strategy not in ['cos', 'linear']: + raise ValueError('anneal_strategy must be one of "cos" or ' + f'"linear", instead got {anneal_strategy}') + elif anneal_strategy == 'cos': + self.anneal_func = annealing_cos + elif anneal_strategy == 'linear': + self.anneal_func = annealing_linear + self.div_factor = div_factor + self.final_div_factor = final_div_factor + self.three_phase = three_phase + self.lr_phases = [] # init lr_phases + super(OneCycleLrUpdaterHook, self).__init__(**kwargs) + + def before_run(self, runner): + if hasattr(self, 'total_steps'): + total_steps = self.total_steps + else: + total_steps = runner.max_iters + if total_steps < runner.max_iters: + raise ValueError( + 'The total steps must be greater than or equal to max ' + f'iterations {runner.max_iters} of runner, but total steps ' + f'is {total_steps}.') + + if isinstance(runner.optimizer, dict): + self.base_lr = {} + for k, optim in runner.optimizer.items(): + _max_lr = format_param(k, optim, self._max_lr) + self.base_lr[k] = [lr / self.div_factor for lr in _max_lr] + for group, lr in zip(optim.param_groups, self.base_lr[k]): + group.setdefault('initial_lr', lr) + else: + k = type(runner.optimizer).__name__ + _max_lr = format_param(k, runner.optimizer, self._max_lr) + self.base_lr = [lr / self.div_factor for lr in _max_lr] + for group, lr in zip(runner.optimizer.param_groups, self.base_lr): + group.setdefault('initial_lr', lr) + + if self.three_phase: + self.lr_phases.append( + [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) + self.lr_phases.append([ + float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1 + ]) + self.lr_phases.append( + [total_steps - 1, 1, 1 / self.final_div_factor]) + else: + self.lr_phases.append( + [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) + self.lr_phases.append( + [total_steps - 1, self.div_factor, 1 / self.final_div_factor]) + + def get_lr(self, runner, base_lr): + curr_iter = runner.iter + start_iter = 0 + for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases): + if curr_iter <= end_iter: + pct = (curr_iter - start_iter) / (end_iter - start_iter) + lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr, + pct) + break + start_iter = end_iter + return lr + + +def annealing_cos(start, end, factor, weight=1): + """Calculate annealing cos learning rate. + + Cosine anneal from `weight * start + (1 - weight) * end` to `end` as + percentage goes from 0.0 to 1.0. + + Args: + start (float): The starting learning rate of the cosine annealing. + end (float): The ending learing rate of the cosine annealing. + factor (float): The coefficient of `pi` when calculating the current + percentage. Range from 0.0 to 1.0. + weight (float, optional): The combination factor of `start` and `end` + when calculating the actual starting learning rate. Default to 1. + """ + cos_out = cos(pi * factor) + 1 + return end + 0.5 * weight * (start - end) * cos_out + + +def annealing_linear(start, end, factor): + """Calculate annealing linear learning rate. + + Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0. + + Args: + start (float): The starting learning rate of the linear annealing. + end (float): The ending learing rate of the linear annealing. + factor (float): The coefficient of `pi` when calculating the current + percentage. Range from 0.0 to 1.0. + """ + return start + (end - start) * factor + + +def format_param(name, optim, param): + if isinstance(param, numbers.Number): + return [param] * len(optim.param_groups) + elif isinstance(param, (list, tuple)): # multi param groups + if len(param) != len(optim.param_groups): + raise ValueError(f'expected {len(optim.param_groups)} ' + f'values for {name}, got {len(param)}') + return param + else: # multi optimizers + if name not in param: + raise KeyError(f'{name} is not found in {param.keys()}') + return param[name] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/memory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..70cf9a838fb314e3bd3c07aadbc00921a81e83ed --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/memory.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class EmptyCacheHook(Hook): + + def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): + self._before_epoch = before_epoch + self._after_epoch = after_epoch + self._after_iter = after_iter + + def after_iter(self, runner): + if self._after_iter: + torch.cuda.empty_cache() + + def before_epoch(self, runner): + if self._before_epoch: + torch.cuda.empty_cache() + + def after_epoch(self, runner): + if self._after_epoch: + torch.cuda.empty_cache() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/momentum_updater.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/momentum_updater.py new file mode 100644 index 0000000000000000000000000000000000000000..29b6c7c531a24603cbfee463f23e0c310cbfff41 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/momentum_updater.py @@ -0,0 +1,493 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import custom_mmpkg.custom_mmcv as mmcv +from .hook import HOOKS, Hook +from .lr_updater import annealing_cos, annealing_linear, format_param + + +class MomentumUpdaterHook(Hook): + + def __init__(self, + by_epoch=True, + warmup=None, + warmup_iters=0, + warmup_ratio=0.9): + # validate the "warmup" argument + if warmup is not None: + if warmup not in ['constant', 'linear', 'exp']: + raise ValueError( + f'"{warmup}" is not a supported type for warming up, valid' + ' types are "constant" and "linear"') + if warmup is not None: + assert warmup_iters > 0, \ + '"warmup_iters" must be a positive integer' + assert 0 < warmup_ratio <= 1.0, \ + '"warmup_momentum" must be in range (0,1]' + + self.by_epoch = by_epoch + self.warmup = warmup + self.warmup_iters = warmup_iters + self.warmup_ratio = warmup_ratio + + self.base_momentum = [] # initial momentum for all param groups + self.regular_momentum = [ + ] # expected momentum if no warming up is performed + + def _set_momentum(self, runner, momentum_groups): + if isinstance(runner.optimizer, dict): + for k, optim in runner.optimizer.items(): + for param_group, mom in zip(optim.param_groups, + momentum_groups[k]): + if 'momentum' in param_group.keys(): + param_group['momentum'] = mom + elif 'betas' in param_group.keys(): + param_group['betas'] = (mom, param_group['betas'][1]) + else: + for param_group, mom in zip(runner.optimizer.param_groups, + momentum_groups): + if 'momentum' in param_group.keys(): + param_group['momentum'] = mom + elif 'betas' in param_group.keys(): + param_group['betas'] = (mom, param_group['betas'][1]) + + def get_momentum(self, runner, base_momentum): + raise NotImplementedError + + def get_regular_momentum(self, runner): + if isinstance(runner.optimizer, dict): + momentum_groups = {} + for k in runner.optimizer.keys(): + _momentum_group = [ + self.get_momentum(runner, _base_momentum) + for _base_momentum in self.base_momentum[k] + ] + momentum_groups.update({k: _momentum_group}) + return momentum_groups + else: + return [ + self.get_momentum(runner, _base_momentum) + for _base_momentum in self.base_momentum + ] + + def get_warmup_momentum(self, cur_iters): + + def _get_warmup_momentum(cur_iters, regular_momentum): + if self.warmup == 'constant': + warmup_momentum = [ + _momentum / self.warmup_ratio + for _momentum in self.regular_momentum + ] + elif self.warmup == 'linear': + k = (1 - cur_iters / self.warmup_iters) * (1 - + self.warmup_ratio) + warmup_momentum = [ + _momentum / (1 - k) for _momentum in self.regular_mom + ] + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_momentum = [ + _momentum / k for _momentum in self.regular_mom + ] + return warmup_momentum + + if isinstance(self.regular_momentum, dict): + momentum_groups = {} + for key, regular_momentum in self.regular_momentum.items(): + momentum_groups[key] = _get_warmup_momentum( + cur_iters, regular_momentum) + return momentum_groups + else: + return _get_warmup_momentum(cur_iters, self.regular_momentum) + + def before_run(self, runner): + # NOTE: when resuming from a checkpoint, + # if 'initial_momentum' is not saved, + # it will be set according to the optimizer params + if isinstance(runner.optimizer, dict): + self.base_momentum = {} + for k, optim in runner.optimizer.items(): + for group in optim.param_groups: + if 'momentum' in group.keys(): + group.setdefault('initial_momentum', group['momentum']) + else: + group.setdefault('initial_momentum', group['betas'][0]) + _base_momentum = [ + group['initial_momentum'] for group in optim.param_groups + ] + self.base_momentum.update({k: _base_momentum}) + else: + for group in runner.optimizer.param_groups: + if 'momentum' in group.keys(): + group.setdefault('initial_momentum', group['momentum']) + else: + group.setdefault('initial_momentum', group['betas'][0]) + self.base_momentum = [ + group['initial_momentum'] + for group in runner.optimizer.param_groups + ] + + def before_train_epoch(self, runner): + if not self.by_epoch: + return + self.regular_mom = self.get_regular_momentum(runner) + self._set_momentum(runner, self.regular_mom) + + def before_train_iter(self, runner): + cur_iter = runner.iter + if not self.by_epoch: + self.regular_mom = self.get_regular_momentum(runner) + if self.warmup is None or cur_iter >= self.warmup_iters: + self._set_momentum(runner, self.regular_mom) + else: + warmup_momentum = self.get_warmup_momentum(cur_iter) + self._set_momentum(runner, warmup_momentum) + elif self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + return + elif cur_iter == self.warmup_iters: + self._set_momentum(runner, self.regular_mom) + else: + warmup_momentum = self.get_warmup_momentum(cur_iter) + self._set_momentum(runner, warmup_momentum) + + +@HOOKS.register_module() +class StepMomentumUpdaterHook(MomentumUpdaterHook): + """Step momentum scheduler with min value clipping. + + Args: + step (int | list[int]): Step to decay the momentum. If an int value is + given, regard it as the decay interval. If a list is given, decay + momentum at these steps. + gamma (float, optional): Decay momentum ratio. Default: 0.5. + min_momentum (float, optional): Minimum momentum value to keep. If + momentum after decay is lower than this value, it will be clipped + accordingly. If None is given, we don't perform lr clipping. + Default: None. + """ + + def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs): + if isinstance(step, list): + assert mmcv.is_list_of(step, int) + assert all([s > 0 for s in step]) + elif isinstance(step, int): + assert step > 0 + else: + raise TypeError('"step" must be a list or integer') + self.step = step + self.gamma = gamma + self.min_momentum = min_momentum + super(StepMomentumUpdaterHook, self).__init__(**kwargs) + + def get_momentum(self, runner, base_momentum): + progress = runner.epoch if self.by_epoch else runner.iter + + # calculate exponential term + if isinstance(self.step, int): + exp = progress // self.step + else: + exp = len(self.step) + for i, s in enumerate(self.step): + if progress < s: + exp = i + break + + momentum = base_momentum * (self.gamma**exp) + if self.min_momentum is not None: + # clip to a minimum value + momentum = max(momentum, self.min_momentum) + return momentum + + +@HOOKS.register_module() +class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): + + def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): + assert (min_momentum is None) ^ (min_momentum_ratio is None) + self.min_momentum = min_momentum + self.min_momentum_ratio = min_momentum_ratio + super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs) + + def get_momentum(self, runner, base_momentum): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + if self.min_momentum_ratio is not None: + target_momentum = base_momentum * self.min_momentum_ratio + else: + target_momentum = self.min_momentum + return annealing_cos(base_momentum, target_momentum, + progress / max_progress) + + +@HOOKS.register_module() +class CyclicMomentumUpdaterHook(MomentumUpdaterHook): + """Cyclic momentum Scheduler. + + Implement the cyclical momentum scheduler policy described in + https://arxiv.org/pdf/1708.07120.pdf + + This momentum scheduler usually used together with the CyclicLRUpdater + to improve the performance in the 3D detection area. + + Attributes: + target_ratio (tuple[float]): Relative ratio of the lowest momentum and + the highest momentum to the initial momentum. + cyclic_times (int): Number of cycles during training + step_ratio_up (float): The ratio of the increasing process of momentum + in the total cycle. + by_epoch (bool): Whether to update momentum by epoch. + """ + + def __init__(self, + by_epoch=False, + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, + **kwargs): + if isinstance(target_ratio, float): + target_ratio = (target_ratio, target_ratio / 1e5) + elif isinstance(target_ratio, tuple): + target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ + if len(target_ratio) == 1 else target_ratio + else: + raise ValueError('target_ratio should be either float ' + f'or tuple, got {type(target_ratio)}') + + assert len(target_ratio) == 2, \ + '"target_ratio" must be list or tuple of two floats' + assert 0 <= step_ratio_up < 1.0, \ + '"step_ratio_up" must be in range [0,1)' + + self.target_ratio = target_ratio + self.cyclic_times = cyclic_times + self.step_ratio_up = step_ratio_up + self.momentum_phases = [] # init momentum_phases + # currently only support by_epoch=False + assert not by_epoch, \ + 'currently only support "by_epoch" = False' + super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs) + + def before_run(self, runner): + super(CyclicMomentumUpdaterHook, self).before_run(runner) + # initiate momentum_phases + # total momentum_phases are separated as up and down + max_iter_per_phase = runner.max_iters // self.cyclic_times + iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) + self.momentum_phases.append( + [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) + self.momentum_phases.append([ + iter_up_phase, max_iter_per_phase, max_iter_per_phase, + self.target_ratio[0], self.target_ratio[1] + ]) + + def get_momentum(self, runner, base_momentum): + curr_iter = runner.iter + for (start_iter, end_iter, max_iter_per_phase, start_ratio, + end_ratio) in self.momentum_phases: + curr_iter %= max_iter_per_phase + if start_iter <= curr_iter < end_iter: + progress = curr_iter - start_iter + return annealing_cos(base_momentum * start_ratio, + base_momentum * end_ratio, + progress / (end_iter - start_iter)) + + +@HOOKS.register_module() +class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): + """OneCycle momentum Scheduler. + + This momentum scheduler usually used together with the OneCycleLrUpdater + to improve the performance. + + Args: + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.85 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is + 'max_momentum' and learning rate is 'base_lr' + Default: 0.95 + pct_start (float): The percentage of the cycle (in number of steps) + spent increasing the learning rate. + Default: 0.3 + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: 'cos' for cosine annealing, + 'linear' for linear annealing. + Default: 'cos' + three_phase (bool): If three_phase is True, use a third phase of the + schedule to annihilate the learning rate according to + final_div_factor instead of modifying the second phase (the first + two phases will be symmetrical about the step indicated by + pct_start). + Default: False + """ + + def __init__(self, + base_momentum=0.85, + max_momentum=0.95, + pct_start=0.3, + anneal_strategy='cos', + three_phase=False, + **kwargs): + # validate by_epoch, currently only support by_epoch=False + if 'by_epoch' not in kwargs: + kwargs['by_epoch'] = False + else: + assert not kwargs['by_epoch'], \ + 'currently only support "by_epoch" = False' + if not isinstance(base_momentum, (float, list, dict)): + raise ValueError('base_momentum must be the type among of float,' + 'list or dict.') + self._base_momentum = base_momentum + if not isinstance(max_momentum, (float, list, dict)): + raise ValueError('max_momentum must be the type among of float,' + 'list or dict.') + self._max_momentum = max_momentum + # validate pct_start + if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): + raise ValueError('Expected float between 0 and 1 pct_start, but ' + f'got {pct_start}') + self.pct_start = pct_start + # validate anneal_strategy + if anneal_strategy not in ['cos', 'linear']: + raise ValueError('anneal_strategy must by one of "cos" or ' + f'"linear", instead got {anneal_strategy}') + elif anneal_strategy == 'cos': + self.anneal_func = annealing_cos + elif anneal_strategy == 'linear': + self.anneal_func = annealing_linear + self.three_phase = three_phase + self.momentum_phases = [] # init momentum_phases + super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs) + + def before_run(self, runner): + if isinstance(runner.optimizer, dict): + for k, optim in runner.optimizer.items(): + if ('momentum' not in optim.defaults + and 'betas' not in optim.defaults): + raise ValueError('optimizer must support momentum with' + 'option enabled') + self.use_beta1 = 'betas' in optim.defaults + _base_momentum = format_param(k, optim, self._base_momentum) + _max_momentum = format_param(k, optim, self._max_momentum) + for group, b_momentum, m_momentum in zip( + optim.param_groups, _base_momentum, _max_momentum): + if self.use_beta1: + _, beta2 = group['betas'] + group['betas'] = (m_momentum, beta2) + else: + group['momentum'] = m_momentum + group['base_momentum'] = b_momentum + group['max_momentum'] = m_momentum + else: + optim = runner.optimizer + if ('momentum' not in optim.defaults + and 'betas' not in optim.defaults): + raise ValueError('optimizer must support momentum with' + 'option enabled') + self.use_beta1 = 'betas' in optim.defaults + k = type(optim).__name__ + _base_momentum = format_param(k, optim, self._base_momentum) + _max_momentum = format_param(k, optim, self._max_momentum) + for group, b_momentum, m_momentum in zip(optim.param_groups, + _base_momentum, + _max_momentum): + if self.use_beta1: + _, beta2 = group['betas'] + group['betas'] = (m_momentum, beta2) + else: + group['momentum'] = m_momentum + group['base_momentum'] = b_momentum + group['max_momentum'] = m_momentum + + if self.three_phase: + self.momentum_phases.append({ + 'end_iter': + float(self.pct_start * runner.max_iters) - 1, + 'start_momentum': + 'max_momentum', + 'end_momentum': + 'base_momentum' + }) + self.momentum_phases.append({ + 'end_iter': + float(2 * self.pct_start * runner.max_iters) - 2, + 'start_momentum': + 'base_momentum', + 'end_momentum': + 'max_momentum' + }) + self.momentum_phases.append({ + 'end_iter': runner.max_iters - 1, + 'start_momentum': 'max_momentum', + 'end_momentum': 'max_momentum' + }) + else: + self.momentum_phases.append({ + 'end_iter': + float(self.pct_start * runner.max_iters) - 1, + 'start_momentum': + 'max_momentum', + 'end_momentum': + 'base_momentum' + }) + self.momentum_phases.append({ + 'end_iter': runner.max_iters - 1, + 'start_momentum': 'base_momentum', + 'end_momentum': 'max_momentum' + }) + + def _set_momentum(self, runner, momentum_groups): + if isinstance(runner.optimizer, dict): + for k, optim in runner.optimizer.items(): + for param_group, mom in zip(optim.param_groups, + momentum_groups[k]): + if 'momentum' in param_group.keys(): + param_group['momentum'] = mom + elif 'betas' in param_group.keys(): + param_group['betas'] = (mom, param_group['betas'][1]) + else: + for param_group, mom in zip(runner.optimizer.param_groups, + momentum_groups): + if 'momentum' in param_group.keys(): + param_group['momentum'] = mom + elif 'betas' in param_group.keys(): + param_group['betas'] = (mom, param_group['betas'][1]) + + def get_momentum(self, runner, param_group): + curr_iter = runner.iter + start_iter = 0 + for i, phase in enumerate(self.momentum_phases): + end_iter = phase['end_iter'] + if curr_iter <= end_iter or i == len(self.momentum_phases) - 1: + pct = (curr_iter - start_iter) / (end_iter - start_iter) + momentum = self.anneal_func( + param_group[phase['start_momentum']], + param_group[phase['end_momentum']], pct) + break + start_iter = end_iter + return momentum + + def get_regular_momentum(self, runner): + if isinstance(runner.optimizer, dict): + momentum_groups = {} + for k, optim in runner.optimizer.items(): + _momentum_group = [ + self.get_momentum(runner, param_group) + for param_group in optim.param_groups + ] + momentum_groups.update({k: _momentum_group}) + return momentum_groups + else: + momentum_groups = [] + for param_group in runner.optimizer.param_groups: + momentum_groups.append(self.get_momentum(runner, param_group)) + return momentum_groups diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/optimizer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f111733b6d37a86dc396442e39b67a8880c99a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/optimizer.py @@ -0,0 +1,508 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from collections import defaultdict +from itertools import chain + +from torch.nn.utils import clip_grad + +from custom_mmpkg.custom_mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version +from ..dist_utils import allreduce_grads +from ..fp16_utils import LossScaler, wrap_fp16_model +from .hook import HOOKS, Hook + +try: + # If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported + # and used; otherwise, auto fp16 will adopt mmcv's implementation. + from torch.cuda.amp import GradScaler +except ImportError: + pass + + +@HOOKS.register_module() +class OptimizerHook(Hook): + + def __init__(self, grad_clip=None): + self.grad_clip = grad_clip + + def clip_grads(self, params): + params = list( + filter(lambda p: p.requires_grad and p.grad is not None, params)) + if len(params) > 0: + return clip_grad.clip_grad_norm_(params, **self.grad_clip) + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + runner.optimizer.step() + + +@HOOKS.register_module() +class GradientCumulativeOptimizerHook(OptimizerHook): + """Optimizer Hook implements multi-iters gradient cumulating. + + Args: + cumulative_iters (int, optional): Num of gradient cumulative iters. + The optimizer will step every `cumulative_iters` iters. + Defaults to 1. + + Examples: + >>> # Use cumulative_iters to simulate a large batch size + >>> # It is helpful when the hardware cannot handle a large batch size. + >>> loader = DataLoader(data, batch_size=64) + >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4) + >>> # almost equals to + >>> loader = DataLoader(data, batch_size=256) + >>> optim_hook = OptimizerHook() + """ + + def __init__(self, cumulative_iters=1, **kwargs): + super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) + + assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \ + f'cumulative_iters only accepts positive int, but got ' \ + f'{type(cumulative_iters)} instead.' + + self.cumulative_iters = cumulative_iters + self.divisible_iters = 0 + self.remainder_iters = 0 + self.initialized = False + + def has_batch_norm(self, module): + if isinstance(module, _BatchNorm): + return True + for m in module.children(): + if self.has_batch_norm(m): + return True + return False + + def _init(self, runner): + if runner.iter % self.cumulative_iters != 0: + runner.logger.warning( + 'Resume iter number is not divisible by cumulative_iters in ' + 'GradientCumulativeOptimizerHook, which means the gradient of ' + 'some iters is lost and the result may be influenced slightly.' + ) + + if self.has_batch_norm(runner.model) and self.cumulative_iters > 1: + runner.logger.warning( + 'GradientCumulativeOptimizerHook may slightly decrease ' + 'performance if the model has BatchNorm layers.') + + residual_iters = runner.max_iters - runner.iter + + self.divisible_iters = ( + residual_iters // self.cumulative_iters * self.cumulative_iters) + self.remainder_iters = residual_iters - self.divisible_iters + + self.initialized = True + + def after_train_iter(self, runner): + if not self.initialized: + self._init(runner) + + if runner.iter < self.divisible_iters: + loss_factor = self.cumulative_iters + else: + loss_factor = self.remainder_iters + loss = runner.outputs['loss'] + loss = loss / loss_factor + loss.backward() + + if (self.every_n_iters(runner, self.cumulative_iters) + or self.is_last_iter(runner)): + + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + runner.optimizer.step() + runner.optimizer.zero_grad() + + +if (TORCH_VERSION != 'parrots' + and digit_version(TORCH_VERSION) >= digit_version('1.6.0')): + + @HOOKS.register_module() + class Fp16OptimizerHook(OptimizerHook): + """FP16 optimizer hook (using PyTorch's implementation). + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, + to take care of the optimization procedure. + + Args: + loss_scale (float | str | dict): Scale factor configuration. + If loss_scale is a float, static loss scaling will be used with + the specified scale. If loss_scale is a string, it must be + 'dynamic', then dynamic loss scaling will be used. + It can also be a dict containing arguments of GradScalar. + Defaults to 512. For Pytorch >= 1.6, mmcv uses official + implementation of GradScaler. If you use a dict version of + loss_scale to create GradScaler, please refer to: + https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler + for the parameters. + + Examples: + >>> loss_scale = dict( + ... init_scale=65536.0, + ... growth_factor=2.0, + ... backoff_factor=0.5, + ... growth_interval=2000 + ... ) + >>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale) + """ + + def __init__(self, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + loss_scale=512., + distributed=True): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.distributed = distributed + self._scale_update_param = None + if loss_scale == 'dynamic': + self.loss_scaler = GradScaler() + elif isinstance(loss_scale, float): + self._scale_update_param = loss_scale + self.loss_scaler = GradScaler(init_scale=loss_scale) + elif isinstance(loss_scale, dict): + self.loss_scaler = GradScaler(**loss_scale) + else: + raise ValueError('loss_scale must be of type float, dict, or ' + f'"dynamic", got {loss_scale}') + + def before_run(self, runner): + """Preparing steps before Mixed Precision Training.""" + # wrap model mode to fp16 + wrap_fp16_model(runner.model) + # resume from state dict + if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: + scaler_state_dict = runner.meta['fp16']['loss_scaler'] + self.loss_scaler.load_state_dict(scaler_state_dict) + + def copy_grads_to_fp32(self, fp16_net, fp32_weights): + """Copy gradients from fp16 model to fp32 weight copy.""" + for fp32_param, fp16_param in zip(fp32_weights, + fp16_net.parameters()): + if fp16_param.grad is not None: + if fp32_param.grad is None: + fp32_param.grad = fp32_param.data.new( + fp32_param.size()) + fp32_param.grad.copy_(fp16_param.grad) + + def copy_params_to_fp16(self, fp16_net, fp32_weights): + """Copy updated params from fp32 weight copy to fp16 model.""" + for fp16_param, fp32_param in zip(fp16_net.parameters(), + fp32_weights): + fp16_param.data.copy_(fp32_param.data) + + def after_train_iter(self, runner): + """Backward optimization steps for Mixed Precision Training. For + dynamic loss scaling, please refer to + https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler. + + 1. Scale the loss by a scale factor. + 2. Backward the loss to obtain the gradients. + 3. Unscale the optimizer’s gradient tensors. + 4. Call optimizer.step() and update scale factor. + 5. Save loss_scaler state_dict for resume purpose. + """ + # clear grads of last iteration + runner.model.zero_grad() + runner.optimizer.zero_grad() + + self.loss_scaler.scale(runner.outputs['loss']).backward() + self.loss_scaler.unscale_(runner.optimizer) + # grad clip + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + # backward and update scaler + self.loss_scaler.step(runner.optimizer) + self.loss_scaler.update(self._scale_update_param) + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + @HOOKS.register_module() + class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, + Fp16OptimizerHook): + """Fp16 optimizer Hook (using PyTorch's implementation) implements + multi-iters gradient cumulating. + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, + to take care of the optimization procedure. + """ + + def __init__(self, *args, **kwargs): + super(GradientCumulativeFp16OptimizerHook, + self).__init__(*args, **kwargs) + + def after_train_iter(self, runner): + if not self.initialized: + self._init(runner) + + if runner.iter < self.divisible_iters: + loss_factor = self.cumulative_iters + else: + loss_factor = self.remainder_iters + loss = runner.outputs['loss'] + loss = loss / loss_factor + + self.loss_scaler.scale(loss).backward() + + if (self.every_n_iters(runner, self.cumulative_iters) + or self.is_last_iter(runner)): + + # copy fp16 grads in the model to fp32 params in the optimizer + self.loss_scaler.unscale_(runner.optimizer) + + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update( + {'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + + # backward and update scaler + self.loss_scaler.step(runner.optimizer) + self.loss_scaler.update(self._scale_update_param) + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + # clear grads + runner.model.zero_grad() + runner.optimizer.zero_grad() + +else: + + @HOOKS.register_module() + class Fp16OptimizerHook(OptimizerHook): + """FP16 optimizer hook (mmcv's implementation). + + The steps of fp16 optimizer is as follows. + 1. Scale the loss value. + 2. BP in the fp16 model. + 2. Copy gradients from fp16 model to fp32 weights. + 3. Update fp32 weights. + 4. Copy updated parameters from fp32 weights to fp16 model. + + Refer to https://arxiv.org/abs/1710.03740 for more details. + + Args: + loss_scale (float | str | dict): Scale factor configuration. + If loss_scale is a float, static loss scaling will be used with + the specified scale. If loss_scale is a string, it must be + 'dynamic', then dynamic loss scaling will be used. + It can also be a dict containing arguments of LossScaler. + Defaults to 512. + """ + + def __init__(self, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + loss_scale=512., + distributed=True): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.distributed = distributed + if loss_scale == 'dynamic': + self.loss_scaler = LossScaler(mode='dynamic') + elif isinstance(loss_scale, float): + self.loss_scaler = LossScaler( + init_scale=loss_scale, mode='static') + elif isinstance(loss_scale, dict): + self.loss_scaler = LossScaler(**loss_scale) + else: + raise ValueError('loss_scale must be of type float, dict, or ' + f'"dynamic", got {loss_scale}') + + def before_run(self, runner): + """Preparing steps before Mixed Precision Training. + + 1. Make a master copy of fp32 weights for optimization. + 2. Convert the main model from fp32 to fp16. + """ + # keep a copy of fp32 weights + old_groups = runner.optimizer.param_groups + runner.optimizer.param_groups = copy.deepcopy( + runner.optimizer.param_groups) + state = defaultdict(dict) + p_map = { + old_p: p + for old_p, p in zip( + chain(*(g['params'] for g in old_groups)), + chain(*(g['params'] + for g in runner.optimizer.param_groups))) + } + for k, v in runner.optimizer.state.items(): + state[p_map[k]] = v + runner.optimizer.state = state + # convert model to fp16 + wrap_fp16_model(runner.model) + # resume from state dict + if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: + scaler_state_dict = runner.meta['fp16']['loss_scaler'] + self.loss_scaler.load_state_dict(scaler_state_dict) + + def copy_grads_to_fp32(self, fp16_net, fp32_weights): + """Copy gradients from fp16 model to fp32 weight copy.""" + for fp32_param, fp16_param in zip(fp32_weights, + fp16_net.parameters()): + if fp16_param.grad is not None: + if fp32_param.grad is None: + fp32_param.grad = fp32_param.data.new( + fp32_param.size()) + fp32_param.grad.copy_(fp16_param.grad) + + def copy_params_to_fp16(self, fp16_net, fp32_weights): + """Copy updated params from fp32 weight copy to fp16 model.""" + for fp16_param, fp32_param in zip(fp16_net.parameters(), + fp32_weights): + fp16_param.data.copy_(fp32_param.data) + + def after_train_iter(self, runner): + """Backward optimization steps for Mixed Precision Training. For + dynamic loss scaling, please refer `loss_scalar.py` + + 1. Scale the loss by a scale factor. + 2. Backward the loss to obtain the gradients (fp16). + 3. Copy gradients from the model to the fp32 weight copy. + 4. Scale the gradients back and update the fp32 weight copy. + 5. Copy back the params from fp32 weight copy to the fp16 model. + 6. Save loss_scaler state_dict for resume purpose. + """ + # clear grads of last iteration + runner.model.zero_grad() + runner.optimizer.zero_grad() + # scale the loss value + scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale + scaled_loss.backward() + # copy fp16 grads in the model to fp32 params in the optimizer + + fp32_weights = [] + for param_group in runner.optimizer.param_groups: + fp32_weights += param_group['params'] + self.copy_grads_to_fp32(runner.model, fp32_weights) + # allreduce grads + if self.distributed: + allreduce_grads(fp32_weights, self.coalesce, + self.bucket_size_mb) + + has_overflow = self.loss_scaler.has_overflow(fp32_weights) + # if has overflow, skip this iteration + if not has_overflow: + # scale the gradients back + for param in fp32_weights: + if param.grad is not None: + param.grad.div_(self.loss_scaler.loss_scale) + if self.grad_clip is not None: + grad_norm = self.clip_grads(fp32_weights) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update( + {'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + # update fp32 params + runner.optimizer.step() + # copy fp32 params to the fp16 model + self.copy_params_to_fp16(runner.model, fp32_weights) + self.loss_scaler.update_scale(has_overflow) + if has_overflow: + runner.logger.warning('Check overflow, downscale loss scale ' + f'to {self.loss_scaler.cur_scale}') + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + @HOOKS.register_module() + class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, + Fp16OptimizerHook): + """Fp16 optimizer Hook (using mmcv implementation) implements multi- + iters gradient cumulating.""" + + def __init__(self, *args, **kwargs): + super(GradientCumulativeFp16OptimizerHook, + self).__init__(*args, **kwargs) + + def after_train_iter(self, runner): + if not self.initialized: + self._init(runner) + + if runner.iter < self.divisible_iters: + loss_factor = self.cumulative_iters + else: + loss_factor = self.remainder_iters + + loss = runner.outputs['loss'] + loss = loss / loss_factor + + # scale the loss value + scaled_loss = loss * self.loss_scaler.loss_scale + scaled_loss.backward() + + if (self.every_n_iters(runner, self.cumulative_iters) + or self.is_last_iter(runner)): + + # copy fp16 grads in the model to fp32 params in the optimizer + fp32_weights = [] + for param_group in runner.optimizer.param_groups: + fp32_weights += param_group['params'] + self.copy_grads_to_fp32(runner.model, fp32_weights) + # allreduce grads + if self.distributed: + allreduce_grads(fp32_weights, self.coalesce, + self.bucket_size_mb) + + has_overflow = self.loss_scaler.has_overflow(fp32_weights) + # if has overflow, skip this iteration + if not has_overflow: + # scale the gradients back + for param in fp32_weights: + if param.grad is not None: + param.grad.div_(self.loss_scaler.loss_scale) + if self.grad_clip is not None: + grad_norm = self.clip_grads(fp32_weights) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update( + {'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + # update fp32 params + runner.optimizer.step() + # copy fp32 params to the fp16 model + self.copy_params_to_fp16(runner.model, fp32_weights) + else: + runner.logger.warning( + 'Check overflow, downscale loss scale ' + f'to {self.loss_scaler.cur_scale}') + + self.loss_scaler.update_scale(has_overflow) + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + # clear grads + runner.model.zero_grad() + runner.optimizer.zero_grad() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/profiler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b70236997eec59c2209ef351ae38863b4112d0ec --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/profiler.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Callable, List, Optional, Union + +import torch + +from ..dist_utils import master_only +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class ProfilerHook(Hook): + """Profiler to analyze performance during training. + + PyTorch Profiler is a tool that allows the collection of the performance + metrics during the training. More details on Profiler can be found at + https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile + + Args: + by_epoch (bool): Profile performance by epoch or by iteration. + Default: True. + profile_iters (int): Number of iterations for profiling. + If ``by_epoch=True``, profile_iters indicates that they are the + first profile_iters epochs at the beginning of the + training, otherwise it indicates the first profile_iters + iterations. Default: 1. + activities (list[str]): List of activity groups (CPU, CUDA) to use in + profiling. Default: ['cpu', 'cuda']. + schedule (dict, optional): Config of generating the callable schedule. + if schedule is None, profiler will not add step markers into the + trace and table view. Default: None. + on_trace_ready (callable, dict): Either a handler or a dict of generate + handler. Default: None. + record_shapes (bool): Save information about operator's input shapes. + Default: False. + profile_memory (bool): Track tensor memory allocation/deallocation. + Default: False. + with_stack (bool): Record source information (file and line number) + for the ops. Default: False. + with_flops (bool): Use formula to estimate the FLOPS of specific + operators (matrix multiplication and 2D convolution). + Default: False. + json_trace_path (str, optional): Exports the collected trace in Chrome + JSON format. Default: None. + + Example: + >>> runner = ... # instantiate a Runner + >>> # tensorboard trace + >>> trace_config = dict(type='tb_trace', dir_name='work_dir') + >>> profiler_config = dict(on_trace_ready=trace_config) + >>> runner.register_profiler_hook(profiler_config) + >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)]) + """ + + def __init__(self, + by_epoch: bool = True, + profile_iters: int = 1, + activities: List[str] = ['cpu', 'cuda'], + schedule: Optional[dict] = None, + on_trace_ready: Optional[Union[Callable, dict]] = None, + record_shapes: bool = False, + profile_memory: bool = False, + with_stack: bool = False, + with_flops: bool = False, + json_trace_path: Optional[str] = None) -> None: + try: + from torch import profiler # torch version >= 1.8.1 + except ImportError: + raise ImportError('profiler is the new feature of torch1.8.1, ' + f'but your version is {torch.__version__}') + + assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' + self.by_epoch = by_epoch + + if profile_iters < 1: + raise ValueError('profile_iters should be greater than 0, but got ' + f'{profile_iters}') + self.profile_iters = profile_iters + + if not isinstance(activities, list): + raise ValueError( + f'activities should be list, but got {type(activities)}') + self.activities = [] + for activity in activities: + activity = activity.lower() + if activity == 'cpu': + self.activities.append(profiler.ProfilerActivity.CPU) + elif activity == 'cuda': + self.activities.append(profiler.ProfilerActivity.CUDA) + else: + raise ValueError( + f'activity should be "cpu" or "cuda", but got {activity}') + + if schedule is not None: + self.schedule = profiler.schedule(**schedule) + else: + self.schedule = None + + self.on_trace_ready = on_trace_ready + self.record_shapes = record_shapes + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_flops = with_flops + self.json_trace_path = json_trace_path + + @master_only + def before_run(self, runner): + if self.by_epoch and runner.max_epochs < self.profile_iters: + raise ValueError('self.profile_iters should not be greater than ' + f'{runner.max_epochs}') + + if not self.by_epoch and runner.max_iters < self.profile_iters: + raise ValueError('self.profile_iters should not be greater than ' + f'{runner.max_iters}') + + if callable(self.on_trace_ready): # handler + _on_trace_ready = self.on_trace_ready + elif isinstance(self.on_trace_ready, dict): # config of handler + trace_cfg = self.on_trace_ready.copy() + trace_type = trace_cfg.pop('type') # log_trace handler + if trace_type == 'log_trace': + + def _log_handler(prof): + print(prof.key_averages().table(**trace_cfg)) + + _on_trace_ready = _log_handler + elif trace_type == 'tb_trace': # tensorboard_trace handler + try: + import torch_tb_profiler # noqa: F401 + except ImportError: + raise ImportError('please run "pip install ' + 'torch-tb-profiler" to install ' + 'torch_tb_profiler') + _on_trace_ready = torch.profiler.tensorboard_trace_handler( + **trace_cfg) + else: + raise ValueError('trace_type should be "log_trace" or ' + f'"tb_trace", but got {trace_type}') + elif self.on_trace_ready is None: + _on_trace_ready = None # type: ignore + else: + raise ValueError('on_trace_ready should be handler, dict or None, ' + f'but got {type(self.on_trace_ready)}') + + if runner.max_epochs > 1: + warnings.warn(f'profiler will profile {runner.max_epochs} epochs ' + 'instead of 1 epoch. Since profiler will slow down ' + 'the training, it is recommended to train 1 epoch ' + 'with ProfilerHook and adjust your setting according' + ' to the profiler summary. During normal training ' + '(epoch > 1), you may disable the ProfilerHook.') + + self.profiler = torch.profiler.profile( + activities=self.activities, + schedule=self.schedule, + on_trace_ready=_on_trace_ready, + record_shapes=self.record_shapes, + profile_memory=self.profile_memory, + with_stack=self.with_stack, + with_flops=self.with_flops) + + self.profiler.__enter__() + runner.logger.info('profiler is profiling...') + + @master_only + def after_train_epoch(self, runner): + if self.by_epoch and runner.epoch == self.profile_iters - 1: + runner.logger.info('profiler may take a few minutes...') + self.profiler.__exit__(None, None, None) + if self.json_trace_path is not None: + self.profiler.export_chrome_trace(self.json_trace_path) + + @master_only + def after_train_iter(self, runner): + self.profiler.step() + if not self.by_epoch and runner.iter == self.profile_iters - 1: + runner.logger.info('profiler may take a few minutes...') + self.profiler.__exit__(None, None, None) + if self.json_trace_path is not None: + self.profiler.export_chrome_trace(self.json_trace_path) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/sampler_seed.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/sampler_seed.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0dc6bdd8df5775857028aaed5444c0f59caf80 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/sampler_seed.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class DistSamplerSeedHook(Hook): + """Data-loading sampler for distributed training. + + When distributed training, it is only useful in conjunction with + :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same + purpose with :obj:`IterLoader`. + """ + + def before_epoch(self, runner): + if hasattr(runner.data_loader.sampler, 'set_epoch'): + # in case the data loader uses `SequentialSampler` in Pytorch + runner.data_loader.sampler.set_epoch(runner.epoch) + elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): + # batch sampler in pytorch warps the sampler as its attributes. + runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/sync_buffer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/sync_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..6376b7ff894280cb2782243b25e8973650591577 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/hooks/sync_buffer.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..dist_utils import allreduce_params +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class SyncBuffersHook(Hook): + """Synchronize model buffers such as running_mean and running_var in BN at + the end of each epoch. + + Args: + distributed (bool): Whether distributed training is used. It is + effective only for distributed training. Defaults to True. + """ + + def __init__(self, distributed=True): + self.distributed = distributed + + def after_epoch(self, runner): + """All-reduce model buffers at the end of each epoch.""" + if self.distributed: + allreduce_params(runner.model.buffers()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/iter_based_runner.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/iter_based_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..075e4b93996c7e5c267a1cd01afd439a5ac06e53 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/iter_based_runner.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import platform +import shutil +import time +import warnings + +import torch +from torch.optim import Optimizer + +import custom_mmpkg.custom_mmcv as mmcv +from .base_runner import BaseRunner +from .builder import RUNNERS +from .checkpoint import save_checkpoint +from .hooks import IterTimerHook +from .utils import get_host_info + + +class IterLoader: + + def __init__(self, dataloader): + self._dataloader = dataloader + self.iter_loader = iter(self._dataloader) + self._epoch = 0 + + @property + def epoch(self): + return self._epoch + + def __next__(self): + try: + data = next(self.iter_loader) + except StopIteration: + self._epoch += 1 + if hasattr(self._dataloader.sampler, 'set_epoch'): + self._dataloader.sampler.set_epoch(self._epoch) + time.sleep(2) # Prevent possible deadlock during epoch transition + self.iter_loader = iter(self._dataloader) + data = next(self.iter_loader) + + return data + + def __len__(self): + return len(self._dataloader) + + +@RUNNERS.register_module() +class IterBasedRunner(BaseRunner): + """Iteration-based Runner. + + This runner train models iteration by iteration. + """ + + def train(self, data_loader, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._epoch = data_loader.epoch + data_batch = next(data_loader) + self.call_hook('before_train_iter') + outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('model.train_step() must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + self.call_hook('after_train_iter') + self._inner_iter += 1 + self._iter += 1 + + @torch.no_grad() + def val(self, data_loader, **kwargs): + self.model.eval() + self.mode = 'val' + self.data_loader = data_loader + data_batch = next(data_loader) + self.call_hook('before_val_iter') + outputs = self.model.val_step(data_batch, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('model.val_step() must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + self.call_hook('after_val_iter') + self._inner_iter += 1 + + def run(self, data_loaders, workflow, max_iters=None, **kwargs): + """Start running. + + Args: + data_loaders (list[:obj:`DataLoader`]): Dataloaders for training + and validation. + workflow (list[tuple]): A list of (phase, iters) to specify the + running order and iterations. E.g, [('train', 10000), + ('val', 1000)] means running 10000 iterations for training and + 1000 iterations for validation, iteratively. + """ + assert isinstance(data_loaders, list) + assert mmcv.is_list_of(workflow, tuple) + assert len(data_loaders) == len(workflow) + if max_iters is not None: + warnings.warn( + 'setting max_iters in run is deprecated, ' + 'please set max_iters in runner_config', DeprecationWarning) + self._max_iters = max_iters + assert self._max_iters is not None, ( + 'max_iters must be specified during instantiation') + + work_dir = self.work_dir if self.work_dir is not None else 'NONE' + self.logger.info('Start running, host: %s, work_dir: %s', + get_host_info(), work_dir) + self.logger.info('Hooks will be executed in the following order:\n%s', + self.get_hook_info()) + self.logger.info('workflow: %s, max: %d iters', workflow, + self._max_iters) + self.call_hook('before_run') + + iter_loaders = [IterLoader(x) for x in data_loaders] + + self.call_hook('before_epoch') + + while self.iter < self._max_iters: + for i, flow in enumerate(workflow): + self._inner_iter = 0 + mode, iters = flow + if not isinstance(mode, str) or not hasattr(self, mode): + raise ValueError( + 'runner has no method named "{}" to run a workflow'. + format(mode)) + iter_runner = getattr(self, mode) + for _ in range(iters): + if mode == 'train' and self.iter >= self._max_iters: + break + iter_runner(iter_loaders[i], **kwargs) + + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_epoch') + self.call_hook('after_run') + + def resume(self, + checkpoint, + resume_optimizer=True, + map_location='default'): + """Resume model from checkpoint. + + Args: + checkpoint (str): Checkpoint to resume from. + resume_optimizer (bool, optional): Whether resume the optimizer(s) + if the checkpoint file includes optimizer(s). Default to True. + map_location (str, optional): Same as :func:`torch.load`. + Default to 'default'. + """ + if map_location == 'default': + device_id = torch.cuda.current_device() + checkpoint = self.load_checkpoint( + checkpoint, + map_location=lambda storage, loc: storage.cuda(device_id)) + else: + checkpoint = self.load_checkpoint( + checkpoint, map_location=map_location) + + self._epoch = checkpoint['meta']['epoch'] + self._iter = checkpoint['meta']['iter'] + self._inner_iter = checkpoint['meta']['iter'] + if 'optimizer' in checkpoint and resume_optimizer: + if isinstance(self.optimizer, Optimizer): + self.optimizer.load_state_dict(checkpoint['optimizer']) + elif isinstance(self.optimizer, dict): + for k in self.optimizer.keys(): + self.optimizer[k].load_state_dict( + checkpoint['optimizer'][k]) + else: + raise TypeError( + 'Optimizer should be dict or torch.optim.Optimizer ' + f'but got {type(self.optimizer)}') + + self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') + + def save_checkpoint(self, + out_dir, + filename_tmpl='iter_{}.pth', + meta=None, + save_optimizer=True, + create_symlink=True): + """Save checkpoint to file. + + Args: + out_dir (str): Directory to save checkpoint files. + filename_tmpl (str, optional): Checkpoint file template. + Defaults to 'iter_{}.pth'. + meta (dict, optional): Metadata to be saved in checkpoint. + Defaults to None. + save_optimizer (bool, optional): Whether save optimizer. + Defaults to True. + create_symlink (bool, optional): Whether create symlink to the + latest checkpoint file. Defaults to True. + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError( + f'meta should be a dict or None, but got {type(meta)}') + if self.meta is not None: + meta.update(self.meta) + # Note: meta.update(self.meta) should be done before + # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise + # there will be problems with resumed checkpoints. + # More details in https://github.com/open-mmlab/mmcv/pull/1108 + meta.update(epoch=self.epoch + 1, iter=self.iter) + + filename = filename_tmpl.format(self.iter + 1) + filepath = osp.join(out_dir, filename) + optimizer = self.optimizer if save_optimizer else None + save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) + # in some environments, `os.symlink` is not supported, you may need to + # set `create_symlink` to False + if create_symlink: + dst_file = osp.join(out_dir, 'latest.pth') + if platform.system() != 'Windows': + mmcv.symlink(filename, dst_file) + else: + shutil.copy(filepath, dst_file) + + def register_training_hooks(self, + lr_config, + optimizer_config=None, + checkpoint_config=None, + log_config=None, + momentum_config=None, + custom_hooks_config=None): + """Register default hooks for iter-based training. + + Checkpoint hook, optimizer stepper hook and logger hooks will be set to + `by_epoch=False` by default. + + Default hooks include: + + +----------------------+-------------------------+ + | Hooks | Priority | + +======================+=========================+ + | LrUpdaterHook | VERY_HIGH (10) | + +----------------------+-------------------------+ + | MomentumUpdaterHook | HIGH (30) | + +----------------------+-------------------------+ + | OptimizerStepperHook | ABOVE_NORMAL (40) | + +----------------------+-------------------------+ + | CheckpointSaverHook | NORMAL (50) | + +----------------------+-------------------------+ + | IterTimerHook | LOW (70) | + +----------------------+-------------------------+ + | LoggerHook(s) | VERY_LOW (90) | + +----------------------+-------------------------+ + | CustomHook(s) | defaults to NORMAL (50) | + +----------------------+-------------------------+ + + If custom hooks have same priority with default hooks, custom hooks + will be triggered after default hooks. + """ + if checkpoint_config is not None: + checkpoint_config.setdefault('by_epoch', False) + if lr_config is not None: + lr_config.setdefault('by_epoch', False) + if log_config is not None: + for info in log_config['hooks']: + info.setdefault('by_epoch', False) + super(IterBasedRunner, self).register_training_hooks( + lr_config=lr_config, + momentum_config=momentum_config, + optimizer_config=optimizer_config, + checkpoint_config=checkpoint_config, + log_config=log_config, + timer_config=IterTimerHook(), + custom_hooks_config=custom_hooks_config) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/log_buffer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/log_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..d949e2941c5400088c7cd8a1dc893d8b233ae785 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/log_buffer.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +import numpy as np + + +class LogBuffer: + + def __init__(self): + self.val_history = OrderedDict() + self.n_history = OrderedDict() + self.output = OrderedDict() + self.ready = False + + def clear(self): + self.val_history.clear() + self.n_history.clear() + self.clear_output() + + def clear_output(self): + self.output.clear() + self.ready = False + + def update(self, vars, count=1): + assert isinstance(vars, dict) + for key, var in vars.items(): + if key not in self.val_history: + self.val_history[key] = [] + self.n_history[key] = [] + self.val_history[key].append(var) + self.n_history[key].append(count) + + def average(self, n=0): + """Average latest n values or all values.""" + assert n >= 0 + for key in self.val_history: + values = np.array(self.val_history[key][-n:]) + nums = np.array(self.n_history[key][-n:]) + avg = np.sum(values * nums) / np.sum(nums) + self.output[key] = avg + self.ready = True diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53c34d0470992cbc374f29681fdd00dc0e57968d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer, + build_optimizer_constructor) +from .default_constructor import DefaultOptimizerConstructor + +__all__ = [ + 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', + 'build_optimizer', 'build_optimizer_constructor' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..f9234eed8f1f186d9d8dfda34562157ee39bdb3a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/builder.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import inspect + +import torch + +from ...utils import Registry, build_from_cfg + +OPTIMIZERS = Registry('optimizer') +OPTIMIZER_BUILDERS = Registry('optimizer builder') + + +def register_torch_optimizers(): + torch_optimizers = [] + for module_name in dir(torch.optim): + if module_name.startswith('__'): + continue + _optim = getattr(torch.optim, module_name) + if inspect.isclass(_optim) and issubclass(_optim, + torch.optim.Optimizer): + OPTIMIZERS.register_module()(_optim) + torch_optimizers.append(module_name) + return torch_optimizers + + +TORCH_OPTIMIZERS = register_torch_optimizers() + + +def build_optimizer_constructor(cfg): + return build_from_cfg(cfg, OPTIMIZER_BUILDERS) + + +def build_optimizer(model, cfg): + optimizer_cfg = copy.deepcopy(cfg) + constructor_type = optimizer_cfg.pop('constructor', + 'DefaultOptimizerConstructor') + paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) + optim_constructor = build_optimizer_constructor( + dict( + type=constructor_type, + optimizer_cfg=optimizer_cfg, + paramwise_cfg=paramwise_cfg)) + optimizer = optim_constructor(model) + return optimizer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/default_constructor.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/default_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..5901955857ab2d650907a284312c0a989de7b9a7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/optimizer/default_constructor.py @@ -0,0 +1,249 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +from torch.nn import GroupNorm, LayerNorm + +from custom_mmpkg.custom_mmcv.utils import _BatchNorm, _InstanceNorm, build_from_cfg, is_list_of +from custom_mmpkg.custom_mmcv.utils.ext_loader import check_ops_exist +from .builder import OPTIMIZER_BUILDERS, OPTIMIZERS + + +@OPTIMIZER_BUILDERS.register_module() +class DefaultOptimizerConstructor: + """Default constructor for optimizers. + + By default each parameter share the same optimizer settings, and we + provide an argument ``paramwise_cfg`` to specify parameter-wise settings. + It is a dict and may contain the following fields: + + - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If + one of the keys in ``custom_keys`` is a substring of the name of one + parameter, then the setting of the parameter will be specified by + ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will + be ignored. It should be noted that the aforementioned ``key`` is the + longest key that is a substring of the name of the parameter. If there + are multiple matched keys with the same length, then the key with lower + alphabet order will be chosen. + ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult`` + and ``decay_mult``. See Example 2 below. + - ``bias_lr_mult`` (float): It will be multiplied to the learning + rate for all bias parameters (except for those in normalization + layers and offset layers of DCN). + - ``bias_decay_mult`` (float): It will be multiplied to the weight + decay for all bias parameters (except for those in + normalization layers, depthwise conv layers, offset layers of DCN). + - ``norm_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of normalization + layers. + - ``dwconv_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of depthwise conv + layers. + - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning + rate for parameters of offset layer in the deformable convs + of a model. + - ``bypass_duplicate`` (bool): If true, the duplicate parameters + would not be added into optimizer. Default: False. + + Note: + 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will + override the effect of ``bias_lr_mult`` in the bias of offset + layer. So be careful when using both ``bias_lr_mult`` and + ``dcn_offset_lr_mult``. If you wish to apply both of them to the + offset layer in deformable convs, set ``dcn_offset_lr_mult`` + to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``. + 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will + apply it to all the DCN layers in the model. So be careful when + the model contains multiple DCN layers in places other than + backbone. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are + + - `type`: class name of the optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer type, e.g., + lr, weight_decay, momentum, etc. + paramwise_cfg (dict, optional): Parameter-wise options. + + Example 1: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + >>> weight_decay=0.0001) + >>> paramwise_cfg = dict(norm_decay_mult=0.) + >>> optim_builder = DefaultOptimizerConstructor( + >>> optimizer_cfg, paramwise_cfg) + >>> optimizer = optim_builder(model) + + Example 2: + >>> # assume model have attribute model.backbone and model.cls_head + >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95) + >>> paramwise_cfg = dict(custom_keys={ + '.backbone': dict(lr_mult=0.1, decay_mult=0.9)}) + >>> optim_builder = DefaultOptimizerConstructor( + >>> optimizer_cfg, paramwise_cfg) + >>> optimizer = optim_builder(model) + >>> # Then the `lr` and `weight_decay` for model.backbone is + >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for + >>> # model.cls_head is (0.01, 0.95). + """ + + def __init__(self, optimizer_cfg, paramwise_cfg=None): + if not isinstance(optimizer_cfg, dict): + raise TypeError('optimizer_cfg should be a dict', + f'but got {type(optimizer_cfg)}') + self.optimizer_cfg = optimizer_cfg + self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg + self.base_lr = optimizer_cfg.get('lr', None) + self.base_wd = optimizer_cfg.get('weight_decay', None) + self._validate_cfg() + + def _validate_cfg(self): + if not isinstance(self.paramwise_cfg, dict): + raise TypeError('paramwise_cfg should be None or a dict, ' + f'but got {type(self.paramwise_cfg)}') + + if 'custom_keys' in self.paramwise_cfg: + if not isinstance(self.paramwise_cfg['custom_keys'], dict): + raise TypeError( + 'If specified, custom_keys must be a dict, ' + f'but got {type(self.paramwise_cfg["custom_keys"])}') + if self.base_wd is None: + for key in self.paramwise_cfg['custom_keys']: + if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]: + raise ValueError('base_wd should not be None') + + # get base lr and weight decay + # weight_decay must be explicitly specified if mult is specified + if ('bias_decay_mult' in self.paramwise_cfg + or 'norm_decay_mult' in self.paramwise_cfg + or 'dwconv_decay_mult' in self.paramwise_cfg): + if self.base_wd is None: + raise ValueError('base_wd should not be None') + + def _is_in(self, param_group, param_group_list): + assert is_list_of(param_group_list, dict) + param = set(param_group['params']) + param_set = set() + for group in param_group_list: + param_set.update(set(group['params'])) + + return not param.isdisjoint(param_set) + + def add_params(self, params, module, prefix='', is_dcn_module=None): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + + bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.) + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.) + dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.) + bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) + dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) + is_dwconv = ( + isinstance(module, torch.nn.Conv2d) + and module.in_channels == module.groups) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + if not param.requires_grad: + params.append(param_group) + continue + if bypass_duplicate and self._is_in(param_group, params): + warnings.warn(f'{prefix} is duplicate. It is skipped since ' + f'bypass_duplicate={bypass_duplicate}') + continue + # if the parameter match one of the custom keys, ignore other rules + is_custom = False + for key in sorted_keys: + if key in f'{prefix}.{name}': + is_custom = True + lr_mult = custom_keys[key].get('lr_mult', 1.) + param_group['lr'] = self.base_lr * lr_mult + if self.base_wd is not None: + decay_mult = custom_keys[key].get('decay_mult', 1.) + param_group['weight_decay'] = self.base_wd * decay_mult + break + + if not is_custom: + # bias_lr_mult affects all bias parameters + # except for norm.bias dcn.conv_offset.bias + if name == 'bias' and not (is_norm or is_dcn_module): + param_group['lr'] = self.base_lr * bias_lr_mult + + if (prefix.find('conv_offset') != -1 and is_dcn_module + and isinstance(module, torch.nn.Conv2d)): + # deal with both dcn_offset's bias & weight + param_group['lr'] = self.base_lr * dcn_offset_lr_mult + + # apply weight decay policies + if self.base_wd is not None: + # norm decay + if is_norm: + param_group[ + 'weight_decay'] = self.base_wd * norm_decay_mult + # depth-wise conv + elif is_dwconv: + param_group[ + 'weight_decay'] = self.base_wd * dwconv_decay_mult + # bias lr and decay + elif name == 'bias' and not is_dcn_module: + # TODO: current bias_decay_mult will have affect on DCN + param_group[ + 'weight_decay'] = self.base_wd * bias_decay_mult + params.append(param_group) + + if check_ops_exist(): + from custom_mmpkg.custom_mmcv.ops import DeformConv2d, ModulatedDeformConv2d + is_dcn_module = isinstance(module, + (DeformConv2d, ModulatedDeformConv2d)) + else: + is_dcn_module = False + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}.{child_name}' if prefix else child_name + self.add_params( + params, + child_mod, + prefix=child_prefix, + is_dcn_module=is_dcn_module) + + def __call__(self, model): + if hasattr(model, 'module'): + model = model.module + + optimizer_cfg = self.optimizer_cfg.copy() + # if no paramwise option is specified, just use the global setting + if not self.paramwise_cfg: + optimizer_cfg['params'] = model.parameters() + return build_from_cfg(optimizer_cfg, OPTIMIZERS) + + # set param-wise lr and weight decay recursively + params = [] + self.add_params(params, model) + optimizer_cfg['params'] = params + + return build_from_cfg(optimizer_cfg, OPTIMIZERS) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/priority.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/priority.py new file mode 100644 index 0000000000000000000000000000000000000000..64cc4e3a05f8d5b89ab6eb32461e6e80f1d62e67 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/priority.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from enum import Enum + + +class Priority(Enum): + """Hook priority levels. + + +--------------+------------+ + | Level | Value | + +==============+============+ + | HIGHEST | 0 | + +--------------+------------+ + | VERY_HIGH | 10 | + +--------------+------------+ + | HIGH | 30 | + +--------------+------------+ + | ABOVE_NORMAL | 40 | + +--------------+------------+ + | NORMAL | 50 | + +--------------+------------+ + | BELOW_NORMAL | 60 | + +--------------+------------+ + | LOW | 70 | + +--------------+------------+ + | VERY_LOW | 90 | + +--------------+------------+ + | LOWEST | 100 | + +--------------+------------+ + """ + + HIGHEST = 0 + VERY_HIGH = 10 + HIGH = 30 + ABOVE_NORMAL = 40 + NORMAL = 50 + BELOW_NORMAL = 60 + LOW = 70 + VERY_LOW = 90 + LOWEST = 100 + + +def get_priority(priority): + """Get priority value. + + Args: + priority (int or str or :obj:`Priority`): Priority. + + Returns: + int: The priority value. + """ + if isinstance(priority, int): + if priority < 0 or priority > 100: + raise ValueError('priority must be between 0 and 100') + return priority + elif isinstance(priority, Priority): + return priority.value + elif isinstance(priority, str): + return Priority[priority.upper()].value + else: + raise TypeError('priority must be an integer or Priority enum value') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..32fa4a7297f2cb10f7f2824470434aa34d8de0bb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/runner/utils.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import random +import sys +import time +import warnings +from getpass import getuser +from socket import gethostname + +import numpy as np +import torch + +import custom_mmpkg.custom_mmcv as mmcv + + +def get_host_info(): + """Get hostname and username. + + Return empty string if exception raised, e.g. ``getpass.getuser()`` will + lead to error in docker container + """ + host = '' + try: + host = f'{getuser()}@{gethostname()}' + except Exception as e: + warnings.warn(f'Host or user not found: {str(e)}') + finally: + return host + + +def get_time_str(): + return time.strftime('%Y%m%d_%H%M%S', time.localtime()) + + +def obj_from_dict(info, parent=None, default_args=None): + """Initialize an object from dict. + + The dict must contain the key "type", which indicates the object type, it + can be either a string or type, such as "list" or ``list``. Remaining + fields are treated as the arguments for constructing the object. + + Args: + info (dict): Object types and arguments. + parent (:class:`module`): Module which may containing expected object + classes. + default_args (dict, optional): Default arguments for initializing the + object. + + Returns: + any type: Object built from the dict. + """ + assert isinstance(info, dict) and 'type' in info + assert isinstance(default_args, dict) or default_args is None + args = info.copy() + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + if parent is not None: + obj_type = getattr(parent, obj_type) + else: + obj_type = sys.modules[obj_type] + elif not isinstance(obj_type, type): + raise TypeError('type must be a str or valid type, but ' + f'got {type(obj_type)}') + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + return obj_type(**args) + + +def set_random_seed(seed, deterministic=False, use_rank_shift=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + rank_shift (bool): Whether to add rank number to the random seed to + have different random seed in different threads. Default: False. + """ + if use_rank_shift: + rank, _ = mmcv.runner.get_dist_info() + seed += rank + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..378a0068432a371af364de9d73785901c0f83383 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/__init__.py @@ -0,0 +1,69 @@ +# flake8: noqa +# Copyright (c) OpenMMLab. All rights reserved. +from .config import Config, ConfigDict, DictAction +from .misc import (check_prerequisites, concat_list, deprecated_api_warning, + has_method, import_modules_from_strings, is_list_of, + is_method_overridden, is_seq_of, is_str, is_tuple_of, + iter_cast, list_cast, requires_executable, requires_package, + slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple, + to_ntuple, tuple_cast) +from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist, + scandir, symlink) +from .progressbar import (ProgressBar, track_iter_progress, + track_parallel_progress, track_progress) +from .testing import (assert_attrs_equal, assert_dict_contains_subset, + assert_dict_has_keys, assert_is_norm_layer, + assert_keys_equal, assert_params_all_zeros, + check_python_script) +from .timer import Timer, TimerError, check_time +from .version_utils import digit_version, get_git_hash + +try: + import torch +except ImportError: + __all__ = [ + 'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast', + 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of', + 'slice_list', 'concat_list', 'check_prerequisites', 'requires_package', + 'requires_executable', 'is_filepath', 'fopen', 'check_file_exist', + 'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar', + 'track_progress', 'track_iter_progress', 'track_parallel_progress', + 'Timer', 'TimerError', 'check_time', 'deprecated_api_warning', + 'digit_version', 'get_git_hash', 'import_modules_from_strings', + 'assert_dict_contains_subset', 'assert_attrs_equal', + 'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script', + 'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple', + 'is_method_overridden', 'has_method' + ] +else: + from .env import collect_env + from .logging import get_logger, print_log + from .parrots_jit import jit, skip_no_elena + from .parrots_wrapper import ( + TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader, + PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, + _AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm, + _MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home) + from .registry import Registry, build_from_cfg + from .trace import is_jit_tracing + __all__ = [ + 'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger', + 'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', + 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', + 'check_prerequisites', 'requires_package', 'requires_executable', + 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', + 'symlink', 'scandir', 'ProgressBar', 'track_progress', + 'track_iter_progress', 'track_parallel_progress', 'Registry', + 'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm', + '_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm', + '_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd', + 'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension', + 'DataLoader', 'PoolDataLoader', 'TORCH_VERSION', + 'deprecated_api_warning', 'digit_version', 'get_git_hash', + 'import_modules_from_strings', 'jit', 'skip_no_elena', + 'assert_dict_contains_subset', 'assert_attrs_equal', + 'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer', + 'assert_params_all_zeros', 'check_python_script', + 'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch', + '_get_cuda_home', 'has_method' + ] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/config.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/config.py new file mode 100644 index 0000000000000000000000000000000000000000..098a706764a1c18fee26bdaae6d5898d9af23282 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/config.py @@ -0,0 +1,688 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import ast +import copy +import os +import os.path as osp +import platform +import shutil +import sys +import tempfile +import uuid +import warnings +from argparse import Action, ArgumentParser +from collections import abc +from importlib import import_module + +from addict import Dict +from yapf.yapflib.yapf_api import FormatCode + +from .misc import import_modules_from_strings +from .path import check_file_exist + +if platform.system() == 'Windows': + import regex as re +else: + import re + +BASE_KEY = '_base_' +DELETE_KEY = '_delete_' +DEPRECATION_KEY = '_deprecation_' +RESERVED_KEYS = ['filename', 'text', 'pretty_text'] + + +class ConfigDict(Dict): + + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + try: + value = super(ConfigDict, self).__getattr__(name) + except KeyError: + ex = AttributeError(f"'{self.__class__.__name__}' object has no " + f"attribute '{name}'") + except Exception as e: + ex = e + else: + return value + raise ex + + +def add_args(parser, cfg, prefix=''): + for k, v in cfg.items(): + if isinstance(v, str): + parser.add_argument('--' + prefix + k) + elif isinstance(v, int): + parser.add_argument('--' + prefix + k, type=int) + elif isinstance(v, float): + parser.add_argument('--' + prefix + k, type=float) + elif isinstance(v, bool): + parser.add_argument('--' + prefix + k, action='store_true') + elif isinstance(v, dict): + add_args(parser, v, prefix + k + '.') + elif isinstance(v, abc.Iterable): + parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+') + else: + print(f'cannot parse key {prefix + k} of type {type(v)}') + return parser + + +class Config: + """A facility for config and config files. + + It supports common file formats as configs: python/json/yaml. The interface + is the same as a dict object and also allows access config values as + attributes. + + Example: + >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1]))) + >>> cfg.a + 1 + >>> cfg.b + {'b1': [0, 1]} + >>> cfg.b.b1 + [0, 1] + >>> cfg = Config.fromfile('tests/data/config/a.py') + >>> cfg.filename + "/home/kchen/projects/mmcv/tests/data/config/a.py" + >>> cfg.item4 + 'test' + >>> cfg + "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: " + "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}" + """ + + @staticmethod + def _validate_py_syntax(filename): + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + content = f.read() + try: + ast.parse(content) + except SyntaxError as e: + raise SyntaxError('There are syntax errors in config ' + f'file {filename}: {e}') + + @staticmethod + def _substitute_predefined_vars(filename, temp_config_name): + file_dirname = osp.dirname(filename) + file_basename = osp.basename(filename) + file_basename_no_extension = osp.splitext(file_basename)[0] + file_extname = osp.splitext(filename)[1] + support_templates = dict( + fileDirname=file_dirname, + fileBasename=file_basename, + fileBasenameNoExtension=file_basename_no_extension, + fileExtname=file_extname) + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + config_file = f.read() + for key, value in support_templates.items(): + regexp = r'\{\{\s*' + str(key) + r'\s*\}\}' + value = value.replace('\\', '/') + config_file = re.sub(regexp, value, config_file) + with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: + tmp_config_file.write(config_file) + + @staticmethod + def _pre_substitute_base_vars(filename, temp_config_name): + """Substitute base variable placehoders to string, so that parsing + would work.""" + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + config_file = f.read() + base_var_dict = {} + regexp = r'\{\{\s*' + BASE_KEY + r'\.([\w\.]+)\s*\}\}' + base_vars = set(re.findall(regexp, config_file)) + for base_var in base_vars: + randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' + base_var_dict[randstr] = base_var + regexp = r'\{\{\s*' + BASE_KEY + r'\.' + base_var + r'\s*\}\}' + config_file = re.sub(regexp, f'"{randstr}"', config_file) + with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: + tmp_config_file.write(config_file) + return base_var_dict + + @staticmethod + def _substitute_base_vars(cfg, base_var_dict, base_cfg): + """Substitute variable strings to their actual values.""" + cfg = copy.deepcopy(cfg) + + if isinstance(cfg, dict): + for k, v in cfg.items(): + if isinstance(v, str) and v in base_var_dict: + new_v = base_cfg + for new_k in base_var_dict[v].split('.'): + new_v = new_v[new_k] + cfg[k] = new_v + elif isinstance(v, (list, tuple, dict)): + cfg[k] = Config._substitute_base_vars( + v, base_var_dict, base_cfg) + elif isinstance(cfg, tuple): + cfg = tuple( + Config._substitute_base_vars(c, base_var_dict, base_cfg) + for c in cfg) + elif isinstance(cfg, list): + cfg = [ + Config._substitute_base_vars(c, base_var_dict, base_cfg) + for c in cfg + ] + elif isinstance(cfg, str) and cfg in base_var_dict: + new_v = base_cfg + for new_k in base_var_dict[cfg].split('.'): + new_v = new_v[new_k] + cfg = new_v + + return cfg + + @staticmethod + def _file2dict(filename, use_predefined_variables=True): + filename = osp.abspath(osp.expanduser(filename)) + check_file_exist(filename) + fileExtname = osp.splitext(filename)[1] + if fileExtname not in ['.py', '.json', '.yaml', '.yml']: + raise IOError('Only py/yml/yaml/json type are supported now!') + + with tempfile.TemporaryDirectory() as temp_config_dir: + temp_config_file = tempfile.NamedTemporaryFile( + dir=temp_config_dir, suffix=fileExtname) + if platform.system() == 'Windows': + temp_config_file.close() + temp_config_name = osp.basename(temp_config_file.name) + # Substitute predefined variables + if use_predefined_variables: + Config._substitute_predefined_vars(filename, + temp_config_file.name) + else: + shutil.copyfile(filename, temp_config_file.name) + # Substitute base variables from placeholders to strings + base_var_dict = Config._pre_substitute_base_vars( + temp_config_file.name, temp_config_file.name) + + if filename.endswith('.py'): + temp_module_name = osp.splitext(temp_config_name)[0] + sys.path.insert(0, temp_config_dir) + Config._validate_py_syntax(filename) + mod = import_module(temp_module_name) + sys.path.pop(0) + cfg_dict = { + name: value + for name, value in mod.__dict__.items() + if not name.startswith('__') + } + # delete imported module + del sys.modules[temp_module_name] + elif filename.endswith(('.yml', '.yaml', '.json')): + import custom_mmpkg.custom_mmcv as mmcv + cfg_dict = mmcv.load(temp_config_file.name) + # close temp file + temp_config_file.close() + + # check deprecation information + if DEPRECATION_KEY in cfg_dict: + deprecation_info = cfg_dict.pop(DEPRECATION_KEY) + warning_msg = f'The config file {filename} will be deprecated ' \ + 'in the future.' + if 'expected' in deprecation_info: + warning_msg += f' Please use {deprecation_info["expected"]} ' \ + 'instead.' + if 'reference' in deprecation_info: + warning_msg += ' More information can be found at ' \ + f'{deprecation_info["reference"]}' + warnings.warn(warning_msg) + + cfg_text = filename + '\n' + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + cfg_text += f.read() + + if BASE_KEY in cfg_dict: + cfg_dir = osp.dirname(filename) + base_filename = cfg_dict.pop(BASE_KEY) + base_filename = base_filename if isinstance( + base_filename, list) else [base_filename] + + cfg_dict_list = list() + cfg_text_list = list() + for f in base_filename: + _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f)) + cfg_dict_list.append(_cfg_dict) + cfg_text_list.append(_cfg_text) + + base_cfg_dict = dict() + for c in cfg_dict_list: + duplicate_keys = base_cfg_dict.keys() & c.keys() + if len(duplicate_keys) > 0: + raise KeyError('Duplicate key is not allowed among bases. ' + f'Duplicate keys: {duplicate_keys}') + base_cfg_dict.update(c) + + # Substitute base variables from strings to their actual values + cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, + base_cfg_dict) + + base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) + cfg_dict = base_cfg_dict + + # merge cfg_text + cfg_text_list.append(cfg_text) + cfg_text = '\n'.join(cfg_text_list) + + return cfg_dict, cfg_text + + @staticmethod + def _merge_a_into_b(a, b, allow_list_keys=False): + """merge dict ``a`` into dict ``b`` (non-inplace). + + Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid + in-place modifications. + + Args: + a (dict): The source dict to be merged into ``b``. + b (dict): The origin dict to be fetch keys from ``a``. + allow_list_keys (bool): If True, int string keys (e.g. '0', '1') + are allowed in source ``a`` and will replace the element of the + corresponding index in b if b is a list. Default: False. + + Returns: + dict: The modified dict of ``b`` using ``a``. + + Examples: + # Normally merge a into b. + >>> Config._merge_a_into_b( + ... dict(obj=dict(a=2)), dict(obj=dict(a=1))) + {'obj': {'a': 2}} + + # Delete b first and merge a into b. + >>> Config._merge_a_into_b( + ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1))) + {'obj': {'a': 2}} + + # b is a list + >>> Config._merge_a_into_b( + ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True) + [{'a': 2}, {'b': 2}] + """ + b = b.copy() + for k, v in a.items(): + if allow_list_keys and k.isdigit() and isinstance(b, list): + k = int(k) + if len(b) <= k: + raise KeyError(f'Index {k} exceeds the length of list {b}') + b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) + elif isinstance(v, + dict) and k in b and not v.pop(DELETE_KEY, False): + allowed_types = (dict, list) if allow_list_keys else dict + if not isinstance(b[k], allowed_types): + raise TypeError( + f'{k}={v} in child config cannot inherit from base ' + f'because {k} is a dict in the child config but is of ' + f'type {type(b[k])} in base config. You may set ' + f'`{DELETE_KEY}=True` to ignore the base config') + b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) + else: + b[k] = v + return b + + @staticmethod + def fromfile(filename, + use_predefined_variables=True, + import_custom_modules=True): + cfg_dict, cfg_text = Config._file2dict(filename, + use_predefined_variables) + if import_custom_modules and cfg_dict.get('custom_imports', None): + import_modules_from_strings(**cfg_dict['custom_imports']) + return Config(cfg_dict, cfg_text=cfg_text, filename=filename) + + @staticmethod + def fromstring(cfg_str, file_format): + """Generate config from config str. + + Args: + cfg_str (str): Config str. + file_format (str): Config file format corresponding to the + config str. Only py/yml/yaml/json type are supported now! + + Returns: + obj:`Config`: Config obj. + """ + if file_format not in ['.py', '.json', '.yaml', '.yml']: + raise IOError('Only py/yml/yaml/json type are supported now!') + if file_format != '.py' and 'dict(' in cfg_str: + # check if users specify a wrong suffix for python + warnings.warn( + 'Please check "file_format", the file format may be .py') + with tempfile.NamedTemporaryFile( + 'w', encoding='utf-8', suffix=file_format, + delete=False) as temp_file: + temp_file.write(cfg_str) + # on windows, previous implementation cause error + # see PR 1077 for details + cfg = Config.fromfile(temp_file.name) + os.remove(temp_file.name) + return cfg + + @staticmethod + def auto_argparser(description=None): + """Generate argparser from config file automatically (experimental)""" + partial_parser = ArgumentParser(description=description) + partial_parser.add_argument('config', help='config file path') + cfg_file = partial_parser.parse_known_args()[0].config + cfg = Config.fromfile(cfg_file) + parser = ArgumentParser(description=description) + parser.add_argument('config', help='config file path') + add_args(parser, cfg) + return parser, cfg + + def __init__(self, cfg_dict=None, cfg_text=None, filename=None): + if cfg_dict is None: + cfg_dict = dict() + elif not isinstance(cfg_dict, dict): + raise TypeError('cfg_dict must be a dict, but ' + f'got {type(cfg_dict)}') + for key in cfg_dict: + if key in RESERVED_KEYS: + raise KeyError(f'{key} is reserved for config file') + + super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) + super(Config, self).__setattr__('_filename', filename) + if cfg_text: + text = cfg_text + elif filename: + with open(filename, 'r') as f: + text = f.read() + else: + text = '' + super(Config, self).__setattr__('_text', text) + + @property + def filename(self): + return self._filename + + @property + def text(self): + return self._text + + @property + def pretty_text(self): + + indent = 4 + + def _indent(s_, num_spaces): + s = s_.split('\n') + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(num_spaces * ' ') + line for line in s] + s = '\n'.join(s) + s = first + '\n' + s + return s + + def _format_basic_types(k, v, use_mapping=False): + if isinstance(v, str): + v_str = f"'{v}'" + else: + v_str = str(v) + + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f'{k_str}: {v_str}' + else: + attr_str = f'{str(k)}={v_str}' + attr_str = _indent(attr_str, indent) + + return attr_str + + def _format_list(k, v, use_mapping=False): + # check if all items in the list are dict + if all(isinstance(_, dict) for _ in v): + v_str = '[\n' + v_str += '\n'.join( + f'dict({_indent(_format_dict(v_), indent)}),' + for v_ in v).rstrip(',') + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f'{k_str}: {v_str}' + else: + attr_str = f'{str(k)}={v_str}' + attr_str = _indent(attr_str, indent) + ']' + else: + attr_str = _format_basic_types(k, v, use_mapping) + return attr_str + + def _contain_invalid_identifier(dict_str): + contain_invalid_identifier = False + for key_name in dict_str: + contain_invalid_identifier |= \ + (not str(key_name).isidentifier()) + return contain_invalid_identifier + + def _format_dict(input_dict, outest_level=False): + r = '' + s = [] + + use_mapping = _contain_invalid_identifier(input_dict) + if use_mapping: + r += '{' + for idx, (k, v) in enumerate(input_dict.items()): + is_last = idx >= len(input_dict) - 1 + end = '' if outest_level or is_last else ',' + if isinstance(v, dict): + v_str = '\n' + _format_dict(v) + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f'{k_str}: dict({v_str}' + else: + attr_str = f'{str(k)}=dict({v_str}' + attr_str = _indent(attr_str, indent) + ')' + end + elif isinstance(v, list): + attr_str = _format_list(k, v, use_mapping) + end + else: + attr_str = _format_basic_types(k, v, use_mapping) + end + + s.append(attr_str) + r += '\n'.join(s) + if use_mapping: + r += '}' + return r + + cfg_dict = self._cfg_dict.to_dict() + text = _format_dict(cfg_dict, outest_level=True) + # copied from setup.cfg + yapf_style = dict( + based_on_style='pep8', + blank_line_before_nested_class_or_def=True, + split_before_expression_after_opening_paren=True) + text, _ = FormatCode(text, style_config=yapf_style, verify=True) + + return text + + def __repr__(self): + return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' + + def __len__(self): + return len(self._cfg_dict) + + def __getattr__(self, name): + return getattr(self._cfg_dict, name) + + def __getitem__(self, name): + return self._cfg_dict.__getitem__(name) + + def __setattr__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setattr__(name, value) + + def __setitem__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setitem__(name, value) + + def __iter__(self): + return iter(self._cfg_dict) + + def __getstate__(self): + return (self._cfg_dict, self._filename, self._text) + + def __setstate__(self, state): + _cfg_dict, _filename, _text = state + super(Config, self).__setattr__('_cfg_dict', _cfg_dict) + super(Config, self).__setattr__('_filename', _filename) + super(Config, self).__setattr__('_text', _text) + + def dump(self, file=None): + cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() + if self.filename.endswith('.py'): + if file is None: + return self.pretty_text + else: + with open(file, 'w', encoding='utf-8') as f: + f.write(self.pretty_text) + else: + import custom_mmpkg.custom_mmcv as mmcv + if file is None: + file_format = self.filename.split('.')[-1] + return mmcv.dump(cfg_dict, file_format=file_format) + else: + mmcv.dump(cfg_dict, file) + + def merge_from_dict(self, options, allow_list_keys=True): + """Merge list into cfg_dict. + + Merge the dict parsed by MultipleKVAction into this cfg. + + Examples: + >>> options = {'model.backbone.depth': 50, + ... 'model.backbone.with_cp':True} + >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet')))) + >>> cfg.merge_from_dict(options) + >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + >>> assert cfg_dict == dict( + ... model=dict(backbone=dict(depth=50, with_cp=True))) + + # Merge list element + >>> cfg = Config(dict(pipeline=[ + ... dict(type='LoadImage'), dict(type='LoadAnnotations')])) + >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')}) + >>> cfg.merge_from_dict(options, allow_list_keys=True) + >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + >>> assert cfg_dict == dict(pipeline=[ + ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')]) + + Args: + options (dict): dict of configs to merge from. + allow_list_keys (bool): If True, int string keys (e.g. '0', '1') + are allowed in ``options`` and will replace the element of the + corresponding index in the config if the config is a list. + Default: True. + """ + option_cfg_dict = {} + for full_key, v in options.items(): + d = option_cfg_dict + key_list = full_key.split('.') + for subkey in key_list[:-1]: + d.setdefault(subkey, ConfigDict()) + d = d[subkey] + subkey = key_list[-1] + d[subkey] = v + + cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + super(Config, self).__setattr__( + '_cfg_dict', + Config._merge_a_into_b( + option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys)) + + +class DictAction(Action): + """ + argparse action to split an argument into KEY=VALUE form + on the first = and append to a dictionary. List options can + be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit + brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build + list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]' + """ + + @staticmethod + def _parse_int_float_bool(val): + try: + return int(val) + except ValueError: + pass + try: + return float(val) + except ValueError: + pass + if val.lower() in ['true', 'false']: + return True if val.lower() == 'true' else False + return val + + @staticmethod + def _parse_iterable(val): + """Parse iterable values in the string. + + All elements inside '()' or '[]' are treated as iterable values. + + Args: + val (str): Value string. + + Returns: + list | tuple: The expanded list or tuple from the string. + + Examples: + >>> DictAction._parse_iterable('1,2,3') + [1, 2, 3] + >>> DictAction._parse_iterable('[a, b, c]') + ['a', 'b', 'c'] + >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]') + [(1, 2, 3), ['a', 'b'], 'c'] + """ + + def find_next_comma(string): + """Find the position of next comma in the string. + + If no ',' is found in the string, return the string length. All + chars inside '()' and '[]' are treated as one element and thus ',' + inside these brackets are ignored. + """ + assert (string.count('(') == string.count(')')) and ( + string.count('[') == string.count(']')), \ + f'Imbalanced brackets exist in {string}' + end = len(string) + for idx, char in enumerate(string): + pre = string[:idx] + # The string before this ',' is balanced + if ((char == ',') and (pre.count('(') == pre.count(')')) + and (pre.count('[') == pre.count(']'))): + end = idx + break + return end + + # Strip ' and " characters and replace whitespace. + val = val.strip('\'\"').replace(' ', '') + is_tuple = False + if val.startswith('(') and val.endswith(')'): + is_tuple = True + val = val[1:-1] + elif val.startswith('[') and val.endswith(']'): + val = val[1:-1] + elif ',' not in val: + # val is a single value + return DictAction._parse_int_float_bool(val) + + values = [] + while len(val) > 0: + comma_idx = find_next_comma(val) + element = DictAction._parse_iterable(val[:comma_idx]) + values.append(element) + val = val[comma_idx + 1:] + if is_tuple: + values = tuple(values) + return values + + def __call__(self, parser, namespace, values, option_string=None): + options = {} + for kv in values: + key, val = kv.split('=', maxsplit=1) + options[key] = self._parse_iterable(val) + setattr(namespace, self.dest, options) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/env.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/env.py new file mode 100644 index 0000000000000000000000000000000000000000..ffc2e44d2d272d81c74fb2333849265011cd5fec --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/env.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""This file holding some environment constant for sharing by other files.""" + +import os.path as osp +import subprocess +import sys +from collections import defaultdict + +import cv2 +import torch + +import custom_mmpkg.custom_mmcv as mmcv +from .parrots_wrapper import get_build_config + + +def collect_env(): + """Collect the information of the running environments. + + Returns: + dict: The environment information. The following fields are contained. + + - sys.platform: The variable of ``sys.platform``. + - Python: Python version. + - CUDA available: Bool, indicating if CUDA is available. + - GPU devices: Device type of each GPU. + - CUDA_HOME (optional): The env var ``CUDA_HOME``. + - NVCC (optional): NVCC version. + - GCC: GCC version, "n/a" if GCC is not installed. + - PyTorch: PyTorch version. + - PyTorch compiling details: The output of \ + ``torch.__config__.show()``. + - TorchVision (optional): TorchVision version. + - OpenCV: OpenCV version. + - MMCV: MMCV version. + - MMCV Compiler: The GCC version for compiling MMCV ops. + - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops. + """ + env_info = {} + env_info['sys.platform'] = sys.platform + env_info['Python'] = sys.version.replace('\n', '') + + cuda_available = torch.cuda.is_available() + env_info['CUDA available'] = cuda_available + + if cuda_available: + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + devices[torch.cuda.get_device_name(k)].append(str(k)) + for name, device_ids in devices.items(): + env_info['GPU ' + ','.join(device_ids)] = name + + from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import _get_cuda_home + CUDA_HOME = _get_cuda_home() + env_info['CUDA_HOME'] = CUDA_HOME + + if CUDA_HOME is not None and osp.isdir(CUDA_HOME): + try: + nvcc = osp.join(CUDA_HOME, 'bin/nvcc') + nvcc = subprocess.check_output( + f'"{nvcc}" -V | tail -n1', shell=True) + nvcc = nvcc.decode('utf-8').strip() + except subprocess.SubprocessError: + nvcc = 'Not Available' + env_info['NVCC'] = nvcc + + try: + gcc = subprocess.check_output('gcc --version | head -n1', shell=True) + gcc = gcc.decode('utf-8').strip() + env_info['GCC'] = gcc + except subprocess.CalledProcessError: # gcc is unavailable + env_info['GCC'] = 'n/a' + + env_info['PyTorch'] = torch.__version__ + env_info['PyTorch compiling details'] = get_build_config() + + try: + import torchvision + env_info['TorchVision'] = torchvision.__version__ + except ModuleNotFoundError: + pass + + env_info['OpenCV'] = cv2.__version__ + + env_info['MMCV'] = mmcv.__version__ + + try: + from custom_mmpkg.custom_mmcv.ops import get_compiler_version, get_compiling_cuda_version + except ModuleNotFoundError: + env_info['MMCV Compiler'] = 'n/a' + env_info['MMCV CUDA Compiler'] = 'n/a' + else: + env_info['MMCV Compiler'] = get_compiler_version() + env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() + + return env_info diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/ext_loader.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/ext_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..9fbdddf85818f8c6f2fb8b121c9fdc26259a64b8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/ext_loader.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import importlib +import os +import pkgutil +import warnings +from collections import namedtuple + +import torch + +if torch.__version__ != 'parrots': + + def load_ext(name, funcs): + ext = importlib.import_module('custom_mmcv.' + name) + for fun in funcs: + assert hasattr(ext, fun), f'{fun} miss in module {name}' + return ext +else: + from parrots import extension + from parrots.base import ParrotsException + + has_return_value_ops = [ + 'nms', + 'softnms', + 'nms_match', + 'nms_rotated', + 'top_pool_forward', + 'top_pool_backward', + 'bottom_pool_forward', + 'bottom_pool_backward', + 'left_pool_forward', + 'left_pool_backward', + 'right_pool_forward', + 'right_pool_backward', + 'fused_bias_leakyrelu', + 'upfirdn2d', + 'ms_deform_attn_forward', + 'pixel_group', + 'contour_expand', + ] + + def get_fake_func(name, e): + + def fake_func(*args, **kwargs): + warnings.warn(f'{name} is not supported in parrots now') + raise e + + return fake_func + + def load_ext(name, funcs): + ExtModule = namedtuple('ExtModule', funcs) + ext_list = [] + lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + for fun in funcs: + try: + ext_fun = extension.load(fun, name, lib_dir=lib_root) + except ParrotsException as e: + if 'No element registered' not in e.message: + warnings.warn(e.message) + ext_fun = get_fake_func(fun, e) + ext_list.append(ext_fun) + else: + if fun in has_return_value_ops: + ext_list.append(ext_fun.op) + else: + ext_list.append(ext_fun.op_) + return ExtModule(*ext_list) + + +def check_ops_exist(): + ext_loader = pkgutil.find_loader('mmcv._ext') + return ext_loader is not None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/logging.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa0e04bb9b3ab2a4bfbc4def50404ccbac2c6e6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/logging.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +import torch.distributed as dist + +logger_initialized = {} + + +def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): + """Initialize and get a logger by name. + + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified and the process rank is 0, a FileHandler + will also be added. + + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + # handle hierarchical names + # e.g., logger "a" is initialized, then logger "a.b" will skip the + # initialization since it is a child of "a". + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + # handle duplicate logs to the console + # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) + # to the root logger. As logger.propagate is True by default, this root + # level handler causes logging messages from rank>0 processes to + # unexpectedly show up on the console, creating much unwanted clutter. + # To fix this issue, we set the root logger's StreamHandler, if any, to log + # at the ERROR level. + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + else: + rank = 0 + + # only rank 0 will add a FileHandler + if rank == 0 and log_file is not None: + # Here, the default behaviour of the official logger is 'a'. Thus, we + # provide an interface to change the file mode to the default + # behaviour. + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + + if rank == 0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + + logger_initialized[name] = True + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. + Some special loggers are: + - "silent": no message will be printed. + - other str: the logger obtained with `get_root_logger(logger)`. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger == 'silent': + pass + elif isinstance(logger, str): + _logger = get_logger(logger) + _logger.log(level, msg) + else: + raise TypeError( + 'logger should be either a logging.Logger object, str, ' + f'"silent" or None, but got {type(logger)}') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/misc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..2c58d0d7fee9fe3d4519270ad8c1e998d0d8a18c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/misc.py @@ -0,0 +1,377 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections.abc +import functools +import itertools +import subprocess +import warnings +from collections import abc +from importlib import import_module +from inspect import getfullargspec +from itertools import repeat + + +# From PyTorch internals +def _ntuple(n): + + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def is_str(x): + """Whether the input is an string instance. + + Note: This method is deprecated since python 2 is no longer supported. + """ + return isinstance(x, str) + + +def import_modules_from_strings(imports, allow_failed_imports=False): + """Import modules from the given list of strings. + + Args: + imports (list | str | None): The given module names to be imported. + allow_failed_imports (bool): If True, the failed imports will return + None. Otherwise, an ImportError is raise. Default: False. + + Returns: + list[module] | module | None: The imported modules. + + Examples: + >>> osp, sys = import_modules_from_strings( + ... ['os.path', 'sys']) + >>> import os.path as osp_ + >>> import sys as sys_ + >>> assert osp == osp_ + >>> assert sys == sys_ + """ + if not imports: + return + single_import = False + if isinstance(imports, str): + single_import = True + imports = [imports] + if not isinstance(imports, list): + raise TypeError( + f'custom_imports must be a list but got type {type(imports)}') + imported = [] + for imp in imports: + if not isinstance(imp, str): + raise TypeError( + f'{imp} is of type {type(imp)} and cannot be imported.') + try: + imported_tmp = import_module(imp) + except ImportError: + if allow_failed_imports: + warnings.warn(f'{imp} failed to import and is ignored.', + UserWarning) + imported_tmp = None + else: + raise ImportError + imported.append(imported_tmp) + if single_import: + imported = imported[0] + return imported + + +def iter_cast(inputs, dst_type, return_type=None): + """Cast elements of an iterable object into some type. + + Args: + inputs (Iterable): The input object. + dst_type (type): Destination type. + return_type (type, optional): If specified, the output object will be + converted to this type, otherwise an iterator. + + Returns: + iterator or specified type: The converted object. + """ + if not isinstance(inputs, abc.Iterable): + raise TypeError('inputs must be an iterable object') + if not isinstance(dst_type, type): + raise TypeError('"dst_type" must be a valid type') + + out_iterable = map(dst_type, inputs) + + if return_type is None: + return out_iterable + else: + return return_type(out_iterable) + + +def list_cast(inputs, dst_type): + """Cast elements of an iterable object into a list of some type. + + A partial method of :func:`iter_cast`. + """ + return iter_cast(inputs, dst_type, return_type=list) + + +def tuple_cast(inputs, dst_type): + """Cast elements of an iterable object into a tuple of some type. + + A partial method of :func:`iter_cast`. + """ + return iter_cast(inputs, dst_type, return_type=tuple) + + +def is_seq_of(seq, expected_type, seq_type=None): + """Check whether it is a sequence of some type. + + Args: + seq (Sequence): The sequence to be checked. + expected_type (type): Expected type of sequence items. + seq_type (type, optional): Expected sequence type. + + Returns: + bool: Whether the sequence is valid. + """ + if seq_type is None: + exp_seq_type = abc.Sequence + else: + assert isinstance(seq_type, type) + exp_seq_type = seq_type + if not isinstance(seq, exp_seq_type): + return False + for item in seq: + if not isinstance(item, expected_type): + return False + return True + + +def is_list_of(seq, expected_type): + """Check whether it is a list of some type. + + A partial method of :func:`is_seq_of`. + """ + return is_seq_of(seq, expected_type, seq_type=list) + + +def is_tuple_of(seq, expected_type): + """Check whether it is a tuple of some type. + + A partial method of :func:`is_seq_of`. + """ + return is_seq_of(seq, expected_type, seq_type=tuple) + + +def slice_list(in_list, lens): + """Slice a list into several sub lists by a list of given length. + + Args: + in_list (list): The list to be sliced. + lens(int or list): The expected length of each out list. + + Returns: + list: A list of sliced list. + """ + if isinstance(lens, int): + assert len(in_list) % lens == 0 + lens = [lens] * int(len(in_list) / lens) + if not isinstance(lens, list): + raise TypeError('"indices" must be an integer or a list of integers') + elif sum(lens) != len(in_list): + raise ValueError('sum of lens and list length does not ' + f'match: {sum(lens)} != {len(in_list)}') + out_list = [] + idx = 0 + for i in range(len(lens)): + out_list.append(in_list[idx:idx + lens[i]]) + idx += lens[i] + return out_list + + +def concat_list(in_list): + """Concatenate a list of list into a single list. + + Args: + in_list (list): The list of list to be merged. + + Returns: + list: The concatenated flat list. + """ + return list(itertools.chain(*in_list)) + + +def check_prerequisites( + prerequisites, + checker, + msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' + 'found, please install them first.'): # yapf: disable + """A decorator factory to check if prerequisites are satisfied. + + Args: + prerequisites (str of list[str]): Prerequisites to be checked. + checker (callable): The checker method that returns True if a + prerequisite is meet, False otherwise. + msg_tmpl (str): The message template with two variables. + + Returns: + decorator: A specific decorator. + """ + + def wrap(func): + + @functools.wraps(func) + def wrapped_func(*args, **kwargs): + requirements = [prerequisites] if isinstance( + prerequisites, str) else prerequisites + missing = [] + for item in requirements: + if not checker(item): + missing.append(item) + if missing: + print(msg_tmpl.format(', '.join(missing), func.__name__)) + raise RuntimeError('Prerequisites not meet.') + else: + return func(*args, **kwargs) + + return wrapped_func + + return wrap + + +def _check_py_package(package): + try: + import_module(package) + except ImportError: + return False + else: + return True + + +def _check_executable(cmd): + if subprocess.call(f'which {cmd}', shell=True) != 0: + return False + else: + return True + + +def requires_package(prerequisites): + """A decorator to check if some python packages are installed. + + Example: + >>> @requires_package('numpy') + >>> func(arg1, args): + >>> return numpy.zeros(1) + array([0.]) + >>> @requires_package(['numpy', 'non_package']) + >>> func(arg1, args): + >>> return numpy.zeros(1) + ImportError + """ + return check_prerequisites(prerequisites, checker=_check_py_package) + + +def requires_executable(prerequisites): + """A decorator to check if some executable files are installed. + + Example: + >>> @requires_executable('ffmpeg') + >>> func(arg1, args): + >>> print(1) + 1 + """ + return check_prerequisites(prerequisites, checker=_check_executable) + + +def deprecated_api_warning(name_dict, cls_name=None): + """A decorator to check if some arguments are deprecate and try to replace + deprecate src_arg_name to dst_arg_name. + + Args: + name_dict(dict): + key (str): Deprecate argument names. + val (str): Expected argument names. + + Returns: + func: New function. + """ + + def api_warning_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get name of the function + func_name = old_func.__name__ + if cls_name is not None: + func_name = f'{cls_name}.{func_name}' + if args: + arg_names = args_info.args[:len(args)] + for src_arg_name, dst_arg_name in name_dict.items(): + if src_arg_name in arg_names: + warnings.warn( + f'"{src_arg_name}" is deprecated in ' + f'`{func_name}`, please use "{dst_arg_name}" ' + 'instead') + arg_names[arg_names.index(src_arg_name)] = dst_arg_name + if kwargs: + for src_arg_name, dst_arg_name in name_dict.items(): + if src_arg_name in kwargs: + + assert dst_arg_name not in kwargs, ( + f'The expected behavior is to replace ' + f'the deprecated key `{src_arg_name}` to ' + f'new key `{dst_arg_name}`, but got them ' + f'in the arguments at the same time, which ' + f'is confusing. `{src_arg_name} will be ' + f'deprecated in the future, please ' + f'use `{dst_arg_name}` instead.') + + warnings.warn( + f'"{src_arg_name}" is deprecated in ' + f'`{func_name}`, please use "{dst_arg_name}" ' + 'instead') + kwargs[dst_arg_name] = kwargs.pop(src_arg_name) + + # apply converted arguments to the decorated method + output = old_func(*args, **kwargs) + return output + + return new_func + + return api_warning_wrapper + + +def is_method_overridden(method, base_class, derived_class): + """Check if a method of base class is overridden in derived class. + + Args: + method (str): the method name to check. + base_class (type): the class of the base class. + derived_class (type | Any): the class or instance of the derived class. + """ + assert isinstance(base_class, type), \ + "base_class doesn't accept instance, Please pass class instead." + + if not isinstance(derived_class, type): + derived_class = derived_class.__class__ + + base_method = getattr(base_class, method) + derived_method = getattr(derived_class, method) + return derived_method != base_method + + +def has_method(obj: object, method: str) -> bool: + """Check whether the object has a method. + + Args: + method (str): The method name to check. + obj (object): The object to check. + + Returns: + bool: True if the object has the method else False. + """ + return hasattr(obj, method) and callable(getattr(obj, method)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/parrots_jit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/parrots_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..61873f6dbb9b10ed972c90aa8faa321e3cb3249e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/parrots_jit.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os + +from .parrots_wrapper import TORCH_VERSION + +parrots_jit_option = os.getenv('PARROTS_JIT_OPTION') + +if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON': + from parrots.jit import pat as jit +else: + + def jit(func=None, + check_input=None, + full_shape=True, + derivate=False, + coderize=False, + optimize=False): + + def wrapper(func): + + def wrapper_inner(*args, **kargs): + return func(*args, **kargs) + + return wrapper_inner + + if func is None: + return wrapper + else: + return func + + +if TORCH_VERSION == 'parrots': + from parrots.utils.tester import skip_no_elena +else: + + def skip_no_elena(func): + + def wrapper(*args, **kargs): + return func(*args, **kargs) + + return wrapper diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/parrots_wrapper.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/parrots_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..93c97640d4b9ed088ca82cfe03e6efebfcfa9dbf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/parrots_wrapper.py @@ -0,0 +1,107 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial + +import torch + +TORCH_VERSION = torch.__version__ + + +def is_rocm_pytorch() -> bool: + is_rocm = False + if TORCH_VERSION != 'parrots': + try: + from torch.utils.cpp_extension import ROCM_HOME + is_rocm = True if ((torch.version.hip is not None) and + (ROCM_HOME is not None)) else False + except ImportError: + pass + return is_rocm + + +def _get_cuda_home(): + if TORCH_VERSION == 'parrots': + from parrots.utils.build_extension import CUDA_HOME + else: + if is_rocm_pytorch(): + from torch.utils.cpp_extension import ROCM_HOME + CUDA_HOME = ROCM_HOME + else: + from torch.utils.cpp_extension import CUDA_HOME + return CUDA_HOME + + +def get_build_config(): + if TORCH_VERSION == 'parrots': + from parrots.config import get_build_info + return get_build_info() + else: + return torch.__config__.show() + + +def _get_conv(): + if TORCH_VERSION == 'parrots': + from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin + else: + from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin + return _ConvNd, _ConvTransposeMixin + + +def _get_dataloader(): + if TORCH_VERSION == 'parrots': + from torch.utils.data import DataLoader, PoolDataLoader + else: + from torch.utils.data import DataLoader + PoolDataLoader = DataLoader + return DataLoader, PoolDataLoader + + +def _get_extension(): + if TORCH_VERSION == 'parrots': + from parrots.utils.build_extension import BuildExtension, Extension + CppExtension = partial(Extension, cuda=False) + CUDAExtension = partial(Extension, cuda=True) + else: + from torch.utils.cpp_extension import (BuildExtension, CppExtension, + CUDAExtension) + return BuildExtension, CppExtension, CUDAExtension + + +def _get_pool(): + if TORCH_VERSION == 'parrots': + from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd, + _AdaptiveMaxPoolNd, _AvgPoolNd, + _MaxPoolNd) + else: + from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, + _AdaptiveMaxPoolNd, _AvgPoolNd, + _MaxPoolNd) + return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd + + +def _get_norm(): + if TORCH_VERSION == 'parrots': + from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm + SyncBatchNorm_ = torch.nn.SyncBatchNorm2d + else: + from torch.nn.modules.instancenorm import _InstanceNorm + from torch.nn.modules.batchnorm import _BatchNorm + SyncBatchNorm_ = torch.nn.SyncBatchNorm + return _BatchNorm, _InstanceNorm, SyncBatchNorm_ + + +_ConvNd, _ConvTransposeMixin = _get_conv() +DataLoader, PoolDataLoader = _get_dataloader() +BuildExtension, CppExtension, CUDAExtension = _get_extension() +_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm() +_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool() + + +class SyncBatchNorm(SyncBatchNorm_): + + def _check_input_dim(self, input): + if TORCH_VERSION == 'parrots': + if input.dim() < 2: + raise ValueError( + f'expected at least 2D input (got {input.dim()}D input)') + else: + super()._check_input_dim(input) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/path.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/path.py new file mode 100644 index 0000000000000000000000000000000000000000..7dab4b3041413b1432b0f434b8b14783097d33c6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/path.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +from pathlib import Path + +from .misc import is_str + + +def is_filepath(x): + return is_str(x) or isinstance(x, Path) + + +def fopen(filepath, *args, **kwargs): + if is_str(filepath): + return open(filepath, *args, **kwargs) + elif isinstance(filepath, Path): + return filepath.open(*args, **kwargs) + raise ValueError('`filepath` should be a string or a Path') + + +def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): + if not osp.isfile(filename): + raise FileNotFoundError(msg_tmpl.format(filename)) + + +def mkdir_or_exist(dir_name, mode=0o777): + if dir_name == '': + return + dir_name = osp.expanduser(dir_name) + os.makedirs(dir_name, mode=mode, exist_ok=True) + + +def symlink(src, dst, overwrite=True, **kwargs): + if os.path.lexists(dst) and overwrite: + os.remove(dst) + os.symlink(src, dst, **kwargs) + + +def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): + """Scan a directory to find the interested files. + + Args: + dir_path (str | obj:`Path`): Path of the directory. + suffix (str | tuple(str), optional): File suffix that we are + interested in. Default: None. + recursive (bool, optional): If set to True, recursively scan the + directory. Default: False. + case_sensitive (bool, optional) : If set to False, ignore the case of + suffix. Default: True. + + Returns: + A generator for all the interested files with relative paths. + """ + if isinstance(dir_path, (str, Path)): + dir_path = str(dir_path) + else: + raise TypeError('"dir_path" must be a string or Path object') + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + + if suffix is not None and not case_sensitive: + suffix = suffix.lower() if isinstance(suffix, str) else tuple( + item.lower() for item in suffix) + + root = dir_path + + def _scandir(dir_path, suffix, recursive, case_sensitive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + rel_path = osp.relpath(entry.path, root) + _rel_path = rel_path if case_sensitive else rel_path.lower() + if suffix is None or _rel_path.endswith(suffix): + yield rel_path + elif recursive and os.path.isdir(entry.path): + # scan recursively if entry.path is a directory + yield from _scandir(entry.path, suffix, recursive, + case_sensitive) + + return _scandir(dir_path, suffix, recursive, case_sensitive) + + +def find_vcs_root(path, markers=('.git', )): + """Finds the root directory (including itself) of specified markers. + + Args: + path (str): Path of directory or file. + markers (list[str], optional): List of file or directory names. + + Returns: + The directory contained one of the markers or None if not found. + """ + if osp.isfile(path): + path = osp.dirname(path) + + prev, cur = None, osp.abspath(osp.expanduser(path)) + while cur != prev: + if any(osp.exists(osp.join(cur, marker)) for marker in markers): + return cur + prev, cur = cur, osp.split(cur)[0] + return None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/progressbar.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/progressbar.py new file mode 100644 index 0000000000000000000000000000000000000000..0062f670dd94fa9da559ab26ef85517dcf5211c7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/progressbar.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import sys +from collections.abc import Iterable +from multiprocessing import Pool +from shutil import get_terminal_size + +from .timer import Timer + + +class ProgressBar: + """A progress bar which can print the progress.""" + + def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): + self.task_num = task_num + self.bar_width = bar_width + self.completed = 0 + self.file = file + if start: + self.start() + + @property + def terminal_width(self): + width, _ = get_terminal_size() + return width + + def start(self): + if self.task_num > 0: + self.file.write(f'[{" " * self.bar_width}] 0/{self.task_num}, ' + 'elapsed: 0s, ETA:') + else: + self.file.write('completed: 0, elapsed: 0s') + self.file.flush() + self.timer = Timer() + + def update(self, num_tasks=1): + assert num_tasks > 0 + self.completed += num_tasks + elapsed = self.timer.since_start() + if elapsed > 0: + fps = self.completed / elapsed + else: + fps = float('inf') + if self.task_num > 0: + percentage = self.completed / float(self.task_num) + eta = int(elapsed * (1 - percentage) / percentage + 0.5) + msg = f'\r[{{}}] {self.completed}/{self.task_num}, ' \ + f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' \ + f'ETA: {eta:5}s' + + bar_width = min(self.bar_width, + int(self.terminal_width - len(msg)) + 2, + int(self.terminal_width * 0.6)) + bar_width = max(2, bar_width) + mark_width = int(bar_width * percentage) + bar_chars = '>' * mark_width + ' ' * (bar_width - mark_width) + self.file.write(msg.format(bar_chars)) + else: + self.file.write( + f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s,' + f' {fps:.1f} tasks/s') + self.file.flush() + + +def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): + """Track the progress of tasks execution with a progress bar. + + Tasks are done with a simple for-loop. + + Args: + func (callable): The function to be applied to each task. + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + bar_width (int): Width of progress bar. + + Returns: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + prog_bar = ProgressBar(task_num, bar_width, file=file) + results = [] + for task in tasks: + results.append(func(task, **kwargs)) + prog_bar.update() + prog_bar.file.write('\n') + return results + + +def init_pool(process_num, initializer=None, initargs=None): + if initializer is None: + return Pool(process_num) + elif initargs is None: + return Pool(process_num, initializer) + else: + if not isinstance(initargs, tuple): + raise TypeError('"initargs" must be a tuple') + return Pool(process_num, initializer, initargs) + + +def track_parallel_progress(func, + tasks, + nproc, + initializer=None, + initargs=None, + bar_width=50, + chunksize=1, + skip_first=False, + keep_order=True, + file=sys.stdout): + """Track the progress of parallel task execution with a progress bar. + + The built-in :mod:`multiprocessing` module is used for process pools and + tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`. + + Args: + func (callable): The function to be applied to each task. + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + nproc (int): Process (worker) number. + initializer (None or callable): Refer to :class:`multiprocessing.Pool` + for details. + initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for + details. + chunksize (int): Refer to :class:`multiprocessing.Pool` for details. + bar_width (int): Width of progress bar. + skip_first (bool): Whether to skip the first sample for each worker + when estimating fps, since the initialization step may takes + longer. + keep_order (bool): If True, :func:`Pool.imap` is used, otherwise + :func:`Pool.imap_unordered` is used. + + Returns: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + pool = init_pool(nproc, initializer, initargs) + start = not skip_first + task_num -= nproc * chunksize * int(skip_first) + prog_bar = ProgressBar(task_num, bar_width, start, file=file) + results = [] + if keep_order: + gen = pool.imap(func, tasks, chunksize) + else: + gen = pool.imap_unordered(func, tasks, chunksize) + for result in gen: + results.append(result) + if skip_first: + if len(results) < nproc * chunksize: + continue + elif len(results) == nproc * chunksize: + prog_bar.start() + continue + prog_bar.update() + prog_bar.file.write('\n') + pool.close() + pool.join() + return results + + +def track_iter_progress(tasks, bar_width=50, file=sys.stdout): + """Track the progress of tasks iteration or enumeration with a progress + bar. + + Tasks are yielded with a simple for-loop. + + Args: + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + bar_width (int): Width of progress bar. + + Yields: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + prog_bar = ProgressBar(task_num, bar_width, file=file) + for task in tasks: + yield task + prog_bar.update() + prog_bar.file.write('\n') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/registry.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..fa9df39bc9f3d8d568361e7250ab35468f2b74e0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/registry.py @@ -0,0 +1,315 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import warnings +from functools import partial + +from .misc import is_seq_of + + +def build_from_cfg(cfg, registry, default_args=None): + """Build a module from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + registry (:obj:`Registry`): The registry to search the type from. + default_args (dict, optional): Default initialization arguments. + + Returns: + object: The constructed object. + """ + if not isinstance(cfg, dict): + raise TypeError(f'cfg must be a dict, but got {type(cfg)}') + if 'type' not in cfg: + if default_args is None or 'type' not in default_args: + raise KeyError( + '`cfg` or `default_args` must contain the key "type", ' + f'but got {cfg}\n{default_args}') + if not isinstance(registry, Registry): + raise TypeError('registry must be an mmcv.Registry object, ' + f'but got {type(registry)}') + if not (isinstance(default_args, dict) or default_args is None): + raise TypeError('default_args must be a dict or None, ' + f'but got {type(default_args)}') + + args = cfg.copy() + + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + + obj_type = args.pop('type') + if isinstance(obj_type, str): + obj_cls = registry.get(obj_type) + if obj_cls is None: + raise KeyError( + f'{obj_type} is not in the {registry.name} registry') + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + try: + return obj_cls(**args) + except Exception as e: + # Normal TypeError does not print class name. + raise type(e)(f'{obj_cls.__name__}: {e}') + + +class Registry: + """A registry to map strings to classes. + + Registered object could be built from registry. + Example: + >>> MODELS = Registry('models') + >>> @MODELS.register_module() + >>> class ResNet: + >>> pass + >>> resnet = MODELS.build(dict(type='ResNet')) + + Please refer to + https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for + advanced usage. + + Args: + name (str): Registry name. + build_func(func, optional): Build function to construct instance from + Registry, func:`build_from_cfg` is used if neither ``parent`` or + ``build_func`` is specified. If ``parent`` is specified and + ``build_func`` is not given, ``build_func`` will be inherited + from ``parent``. Default: None. + parent (Registry, optional): Parent registry. The class registered in + children registry could be built from parent. Default: None. + scope (str, optional): The scope of registry. It is the key to search + for children registry. If not specified, scope will be the name of + the package where class is defined, e.g. mmdet, mmcls, mmseg. + Default: None. + """ + + def __init__(self, name, build_func=None, parent=None, scope=None): + self._name = name + self._module_dict = dict() + self._children = dict() + self._scope = self.infer_scope() if scope is None else scope + + # self.build_func will be set with the following priority: + # 1. build_func + # 2. parent.build_func + # 3. build_from_cfg + if build_func is None: + if parent is not None: + self.build_func = parent.build_func + else: + self.build_func = build_from_cfg + else: + self.build_func = build_func + if parent is not None: + assert isinstance(parent, Registry) + parent._add_children(self) + self.parent = parent + else: + self.parent = None + + def __len__(self): + return len(self._module_dict) + + def __contains__(self, key): + return self.get(key) is not None + + def __repr__(self): + format_str = self.__class__.__name__ + \ + f'(name={self._name}, ' \ + f'items={self._module_dict})' + return format_str + + @staticmethod + def infer_scope(): + """Infer the scope of registry. + + The name of the package where registry is defined will be returned. + + Example: + # in mmdet/models/backbone/resnet.py + >>> MODELS = Registry('models') + >>> @MODELS.register_module() + >>> class ResNet: + >>> pass + The scope of ``ResNet`` will be ``mmdet``. + + + Returns: + scope (str): The inferred scope name. + """ + # inspect.stack() trace where this function is called, the index-2 + # indicates the frame where `infer_scope()` is called + filename = inspect.getmodule(inspect.stack()[2][0]).__name__ + split_filename = filename.split('.') + return split_filename[0] + + @staticmethod + def split_scope_key(key): + """Split scope and key. + + The first scope will be split from key. + + Examples: + >>> Registry.split_scope_key('mmdet.ResNet') + 'mmdet', 'ResNet' + >>> Registry.split_scope_key('ResNet') + None, 'ResNet' + + Return: + scope (str, None): The first scope. + key (str): The remaining key. + """ + split_index = key.find('.') + if split_index != -1: + return key[:split_index], key[split_index + 1:] + else: + return None, key + + @property + def name(self): + return self._name + + @property + def scope(self): + return self._scope + + @property + def module_dict(self): + return self._module_dict + + @property + def children(self): + return self._children + + def get(self, key): + """Get the registry record. + + Args: + key (str): The class name in string format. + + Returns: + class: The corresponding class. + """ + scope, real_key = self.split_scope_key(key) + if scope is None or scope == self._scope: + # get from self + if real_key in self._module_dict: + return self._module_dict[real_key] + else: + # get from self._children + if scope in self._children: + return self._children[scope].get(real_key) + else: + # goto root + parent = self.parent + while parent.parent is not None: + parent = parent.parent + return parent.get(key) + + def build(self, *args, **kwargs): + return self.build_func(*args, **kwargs, registry=self) + + def _add_children(self, registry): + """Add children for a registry. + + The ``registry`` will be added as children based on its scope. + The parent registry could build objects from children registry. + + Example: + >>> models = Registry('models') + >>> mmdet_models = Registry('models', parent=models) + >>> @mmdet_models.register_module() + >>> class ResNet: + >>> pass + >>> resnet = models.build(dict(type='mmdet.ResNet')) + """ + + assert isinstance(registry, Registry) + assert registry.scope is not None + assert registry.scope not in self.children, \ + f'scope {registry.scope} exists in {self.name} registry' + self.children[registry.scope] = registry + + def _register_module(self, module_class, module_name=None, force=False): + if not inspect.isclass(module_class): + raise TypeError('module must be a class, ' + f'but got {type(module_class)}') + + if module_name is None: + module_name = module_class.__name__ + if isinstance(module_name, str): + module_name = [module_name] + for name in module_name: + if not force and name in self._module_dict: + raise KeyError(f'{name} is already registered ' + f'in {self.name}') + self._module_dict[name] = module_class + + def deprecated_register_module(self, cls=None, force=False): + warnings.warn( + 'The old API of register_module(module, force=False) ' + 'is deprecated and will be removed, please use the new API ' + 'register_module(name=None, force=False, module=None) instead.') + if cls is None: + return partial(self.deprecated_register_module, force=force) + self._register_module(cls, force=force) + return cls + + def register_module(self, name=None, force=False, module=None): + """Register a module. + + A record will be added to `self._module_dict`, whose key is the class + name or the specified name, and value is the class itself. + It can be used as a decorator or a normal function. + + Example: + >>> backbones = Registry('backbone') + >>> @backbones.register_module() + >>> class ResNet: + >>> pass + + >>> backbones = Registry('backbone') + >>> @backbones.register_module(name='mnet') + >>> class MobileNet: + >>> pass + + >>> backbones = Registry('backbone') + >>> class ResNet: + >>> pass + >>> backbones.register_module(ResNet) + + Args: + name (str | None): The module name to be registered. If not + specified, the class name will be used. + force (bool, optional): Whether to override an existing class with + the same name. Default: False. + module (type): Module class to be registered. + """ + if not isinstance(force, bool): + raise TypeError(f'force must be a boolean, but got {type(force)}') + # NOTE: This is a walkaround to be compatible with the old api, + # while it may introduce unexpected bugs. + if isinstance(name, type): + return self.deprecated_register_module(name, force=force) + + # raise the error ahead of time + if not (name is None or isinstance(name, str) or is_seq_of(name, str)): + raise TypeError( + 'name must be either of None, an instance of str or a sequence' + f' of str, but got {type(name)}') + + # use it as a normal method: x.register_module(module=SomeClass) + if module is not None: + self._register_module( + module_class=module, module_name=name, force=force) + return module + + # use it as a decorator: @x.register_module() + def _register(cls): + self._register_module( + module_class=cls, module_name=name, force=force) + return cls + + return _register diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/testing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..a27f936da8ec14bac18562ede0a79d476d82f797 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/testing.py @@ -0,0 +1,140 @@ +# Copyright (c) Open-MMLab. +import sys +from collections.abc import Iterable +from runpy import run_path +from shlex import split +from typing import Any, Dict, List +from unittest.mock import patch + + +def check_python_script(cmd): + """Run the python cmd script with `__main__`. The difference between + `os.system` is that, this function exectues code in the current process, so + that it can be tracked by coverage tools. Currently it supports two forms: + + - ./tests/data/scripts/hello.py zz + - python tests/data/scripts/hello.py zz + """ + args = split(cmd) + if args[0] == 'python': + args = args[1:] + with patch.object(sys, 'argv', args): + run_path(args[0], run_name='__main__') + + +def _any(judge_result): + """Since built-in ``any`` works only when the element of iterable is not + iterable, implement the function.""" + if not isinstance(judge_result, Iterable): + return judge_result + + try: + for element in judge_result: + if _any(element): + return True + except TypeError: + # Maybe encounter the case: torch.tensor(True) | torch.tensor(False) + if judge_result: + return True + return False + + +def assert_dict_contains_subset(dict_obj: Dict[Any, Any], + expected_subset: Dict[Any, Any]) -> bool: + """Check if the dict_obj contains the expected_subset. + + Args: + dict_obj (Dict[Any, Any]): Dict object to be checked. + expected_subset (Dict[Any, Any]): Subset expected to be contained in + dict_obj. + + Returns: + bool: Whether the dict_obj contains the expected_subset. + """ + + for key, value in expected_subset.items(): + if key not in dict_obj.keys() or _any(dict_obj[key] != value): + return False + return True + + +def assert_attrs_equal(obj: Any, expected_attrs: Dict[str, Any]) -> bool: + """Check if attribute of class object is correct. + + Args: + obj (object): Class object to be checked. + expected_attrs (Dict[str, Any]): Dict of the expected attrs. + + Returns: + bool: Whether the attribute of class object is correct. + """ + for attr, value in expected_attrs.items(): + if not hasattr(obj, attr) or _any(getattr(obj, attr) != value): + return False + return True + + +def assert_dict_has_keys(obj: Dict[str, Any], + expected_keys: List[str]) -> bool: + """Check if the obj has all the expected_keys. + + Args: + obj (Dict[str, Any]): Object to be checked. + expected_keys (List[str]): Keys expected to contained in the keys of + the obj. + + Returns: + bool: Whether the obj has the expected keys. + """ + return set(expected_keys).issubset(set(obj.keys())) + + +def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool: + """Check if target_keys is equal to result_keys. + + Args: + result_keys (List[str]): Result keys to be checked. + target_keys (List[str]): Target keys to be checked. + + Returns: + bool: Whether target_keys is equal to result_keys. + """ + return set(result_keys) == set(target_keys) + + +def assert_is_norm_layer(module) -> bool: + """Check if the module is a norm layer. + + Args: + module (nn.Module): The module to be checked. + + Returns: + bool: Whether the module is a norm layer. + """ + from .parrots_wrapper import _BatchNorm, _InstanceNorm + from torch.nn import GroupNorm, LayerNorm + norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm) + return isinstance(module, norm_layer_candidates) + + +def assert_params_all_zeros(module) -> bool: + """Check if the parameters of the module is all zeros. + + Args: + module (nn.Module): The module to be checked. + + Returns: + bool: Whether the parameters of the module is all zeros. + """ + weight_data = module.weight.data + is_weight_zero = weight_data.allclose( + weight_data.new_zeros(weight_data.size())) + + if hasattr(module, 'bias') and module.bias is not None: + bias_data = module.bias.data + is_bias_zero = bias_data.allclose( + bias_data.new_zeros(bias_data.size())) + else: + is_bias_zero = True + + return is_weight_zero and is_bias_zero diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/timer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..5907e0edfdee7ab002e41d151e4c4386e1d9f294 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/timer.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from time import time + + +class TimerError(Exception): + + def __init__(self, message): + self.message = message + super(TimerError, self).__init__(message) + + +class Timer: + """A flexible Timer class. + + :Example: + + >>> import time + >>> import custom_mmpkg.custom_mmcv as mmcv + >>> with mmcv.Timer(): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + 1.000 + >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + it takes 1.0 seconds + >>> timer = mmcv.Timer() + >>> time.sleep(0.5) + >>> print(timer.since_start()) + 0.500 + >>> time.sleep(0.5) + >>> print(timer.since_last_check()) + 0.500 + >>> print(timer.since_start()) + 1.000 + """ + + def __init__(self, start=True, print_tmpl=None): + self._is_running = False + self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}' + if start: + self.start() + + @property + def is_running(self): + """bool: indicate whether the timer is running""" + return self._is_running + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + print(self.print_tmpl.format(self.since_last_check())) + self._is_running = False + + def start(self): + """Start the timer.""" + if not self._is_running: + self._t_start = time() + self._is_running = True + self._t_last = time() + + def since_start(self): + """Total time since the timer is started. + + Returns (float): Time in seconds. + """ + if not self._is_running: + raise TimerError('timer is not running') + self._t_last = time() + return self._t_last - self._t_start + + def since_last_check(self): + """Time since the last checking. + + Either :func:`since_start` or :func:`since_last_check` is a checking + operation. + + Returns (float): Time in seconds. + """ + if not self._is_running: + raise TimerError('timer is not running') + dur = time() - self._t_last + self._t_last = time() + return dur + + +_g_timers = {} # global timers + + +def check_time(timer_id): + """Add check points in a single line. + + This method is suitable for running a task on a list of items. A timer will + be registered when the method is called for the first time. + + :Example: + + >>> import time + >>> import custom_mmpkg.custom_mmcv as mmcv + >>> for i in range(1, 6): + >>> # simulate a code block + >>> time.sleep(i) + >>> mmcv.check_time('task1') + 2.000 + 3.000 + 4.000 + 5.000 + + Args: + timer_id (str): Timer identifier. + """ + if timer_id not in _g_timers: + _g_timers[timer_id] = Timer() + return 0 + else: + return _g_timers[timer_id].since_last_check() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/trace.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/trace.py new file mode 100644 index 0000000000000000000000000000000000000000..3907185bf82775e8ed4c2bf4cd4667c5c623d188 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/trace.py @@ -0,0 +1,23 @@ +import warnings + +import torch + +from custom_mmpkg.custom_mmcv.utils import digit_version + + +def is_jit_tracing() -> bool: + if (torch.__version__ != 'parrots' + and digit_version(torch.__version__) >= digit_version('1.6.0')): + on_trace = torch.jit.is_tracing() + # In PyTorch 1.6, torch.jit.is_tracing has a bug. + # Refers to https://github.com/pytorch/pytorch/issues/42448 + if isinstance(on_trace, bool): + return on_trace + else: + return torch._C._is_tracing() + else: + warnings.warn( + 'torch.jit.is_tracing is only supported after v1.6.0. ' + 'Therefore is_tracing returns False automatically. Please ' + 'set on_trace manually if you are using trace.', UserWarning) + return False diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/version_utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/version_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..963c45a2e8a86a88413ab6c18c22481fb9831985 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/utils/version_utils.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import subprocess +import warnings + +from packaging.version import parse + + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + assert 'parrots' not in version_str + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) + else: + release.extend([0, 0]) + return tuple(release) + + +def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen( + cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + +def get_git_hash(fallback='unknown', digits=None): + """Get the git hash of the current repo. + + Args: + fallback (str, optional): The fallback string when git hash is + unavailable. Defaults to 'unknown'. + digits (int, optional): kept digits of the hash. Defaults to None, + meaning all digits are kept. + + Returns: + str: Git commit hash. + """ + + if digits is not None and not isinstance(digits, int): + raise TypeError('digits must be None or an integer') + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + if digits is not None: + sha = sha[:digits] + except OSError: + sha = fallback + + return sha diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/version.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1cce4e50bd692d4002e3cac3c545a3fb2efe95d0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/version.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +__version__ = '1.3.17' + + +def parse_version_info(version_str: str, length: int = 4) -> tuple: + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into + (2, 0, 0, 0, 'rc', 1) (when length is set to 4). + """ + from packaging.version import parse + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + release.extend(list(version.pre)) + elif version.is_postrelease: + release.extend(list(version.post)) + else: + release.extend([0, 0]) + return tuple(release) + + +version_info = tuple(int(x) for x in __version__.split('.')[:3]) + +__all__ = ['__version__', 'version_info', 'parse_version_info'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73199b01dec52820dc6ca0139903536344d5a1eb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .io import Cache, VideoReader, frames2video +from .optflow import (dequantize_flow, flow_from_bytes, flow_warp, flowread, + flowwrite, quantize_flow, sparse_flow_from_bytes) +from .processing import concat_video, convert_video, cut_video, resize_video + +__all__ = [ + 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video', + 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow', + 'dequantize_flow', 'flow_warp', 'flow_from_bytes', 'sparse_flow_from_bytes' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/io.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/io.py new file mode 100644 index 0000000000000000000000000000000000000000..f9c20cee37aec3e36413300b88fbdb0156bfa8a4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/io.py @@ -0,0 +1,318 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from collections import OrderedDict + +import cv2 +from cv2 import (CAP_PROP_FOURCC, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT, + CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH, + CAP_PROP_POS_FRAMES, VideoWriter_fourcc) + +from custom_mmpkg.custom_mmcv.utils import (check_file_exist, mkdir_or_exist, scandir, + track_progress) + + +class Cache: + + def __init__(self, capacity): + self._cache = OrderedDict() + self._capacity = int(capacity) + if capacity <= 0: + raise ValueError('capacity must be a positive integer') + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._cache) + + def put(self, key, val): + if key in self._cache: + return + if len(self._cache) >= self.capacity: + self._cache.popitem(last=False) + self._cache[key] = val + + def get(self, key, default=None): + val = self._cache[key] if key in self._cache else default + return val + + +class VideoReader: + """Video class with similar usage to a list object. + + This video warpper class provides convenient apis to access frames. + There exists an issue of OpenCV's VideoCapture class that jumping to a + certain frame may be inaccurate. It is fixed in this class by checking + the position after jumping each time. + Cache is used when decoding videos. So if the same frame is visited for + the second time, there is no need to decode again if it is stored in the + cache. + + :Example: + + >>> import custom_mmpkg.custom_mmcv as mmcv + >>> v = mmcv.VideoReader('sample.mp4') + >>> len(v) # get the total frame number with `len()` + 120 + >>> for img in v: # v is iterable + >>> mmcv.imshow(img) + >>> v[5] # get the 6th frame + """ + + def __init__(self, filename, cache_capacity=10): + # Check whether the video path is a url + if not filename.startswith(('https://', 'http://')): + check_file_exist(filename, 'Video file not found: ' + filename) + self._vcap = cv2.VideoCapture(filename) + assert cache_capacity > 0 + self._cache = Cache(cache_capacity) + self._position = 0 + # get basic info + self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) + self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) + self._fps = self._vcap.get(CAP_PROP_FPS) + self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) + self._fourcc = self._vcap.get(CAP_PROP_FOURCC) + + @property + def vcap(self): + """:obj:`cv2.VideoCapture`: The raw VideoCapture object.""" + return self._vcap + + @property + def opened(self): + """bool: Indicate whether the video is opened.""" + return self._vcap.isOpened() + + @property + def width(self): + """int: Width of video frames.""" + return self._width + + @property + def height(self): + """int: Height of video frames.""" + return self._height + + @property + def resolution(self): + """tuple: Video resolution (width, height).""" + return (self._width, self._height) + + @property + def fps(self): + """float: FPS of the video.""" + return self._fps + + @property + def frame_cnt(self): + """int: Total frames of the video.""" + return self._frame_cnt + + @property + def fourcc(self): + """str: "Four character code" of the video.""" + return self._fourcc + + @property + def position(self): + """int: Current cursor position, indicating frame decoded.""" + return self._position + + def _get_real_position(self): + return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) + + def _set_real_position(self, frame_id): + self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) + pos = self._get_real_position() + for _ in range(frame_id - pos): + self._vcap.read() + self._position = frame_id + + def read(self): + """Read the next frame. + + If the next frame have been decoded before and in the cache, then + return it directly, otherwise decode, cache and return it. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + # pos = self._position + if self._cache: + img = self._cache.get(self._position) + if img is not None: + ret = True + else: + if self._position != self._get_real_position(): + self._set_real_position(self._position) + ret, img = self._vcap.read() + if ret: + self._cache.put(self._position, img) + else: + ret, img = self._vcap.read() + if ret: + self._position += 1 + return img + + def get_frame(self, frame_id): + """Get frame by index. + + Args: + frame_id (int): Index of the expected frame, 0-based. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + if frame_id < 0 or frame_id >= self._frame_cnt: + raise IndexError( + f'"frame_id" must be between 0 and {self._frame_cnt - 1}') + if frame_id == self._position: + return self.read() + if self._cache: + img = self._cache.get(frame_id) + if img is not None: + self._position = frame_id + 1 + return img + self._set_real_position(frame_id) + ret, img = self._vcap.read() + if ret: + if self._cache: + self._cache.put(self._position, img) + self._position += 1 + return img + + def current_frame(self): + """Get the current frame (frame that is just visited). + + Returns: + ndarray or None: If the video is fresh, return None, otherwise + return the frame. + """ + if self._position == 0: + return None + return self._cache.get(self._position - 1) + + def cvt2frames(self, + frame_dir, + file_start=0, + filename_tmpl='{:06d}.jpg', + start=0, + max_num=0, + show_progress=True): + """Convert a video to frame images. + + Args: + frame_dir (str): Output directory to store all the frame images. + file_start (int): Filenames will start from the specified number. + filename_tmpl (str): Filename template with the index as the + placeholder. + start (int): The starting frame index. + max_num (int): Maximum number of frames to be written. + show_progress (bool): Whether to show a progress bar. + """ + mkdir_or_exist(frame_dir) + if max_num == 0: + task_num = self.frame_cnt - start + else: + task_num = min(self.frame_cnt - start, max_num) + if task_num <= 0: + raise ValueError('start must be less than total frame number') + if start > 0: + self._set_real_position(start) + + def write_frame(file_idx): + img = self.read() + if img is None: + return + filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) + cv2.imwrite(filename, img) + + if show_progress: + track_progress(write_frame, range(file_start, + file_start + task_num)) + else: + for i in range(task_num): + write_frame(file_start + i) + + def __len__(self): + return self.frame_cnt + + def __getitem__(self, index): + if isinstance(index, slice): + return [ + self.get_frame(i) + for i in range(*index.indices(self.frame_cnt)) + ] + # support negative indexing + if index < 0: + index += self.frame_cnt + if index < 0: + raise IndexError('index out of range') + return self.get_frame(index) + + def __iter__(self): + self._set_real_position(0) + return self + + def __next__(self): + img = self.read() + if img is not None: + return img + else: + raise StopIteration + + next = __next__ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._vcap.release() + + +def frames2video(frame_dir, + video_file, + fps=30, + fourcc='XVID', + filename_tmpl='{:06d}.jpg', + start=0, + end=0, + show_progress=True): + """Read the frame images from a directory and join them as a video. + + Args: + frame_dir (str): The directory containing video frames. + video_file (str): Output filename. + fps (float): FPS of the output video. + fourcc (str): Fourcc of the output video, this should be compatible + with the output file type. + filename_tmpl (str): Filename template with the index as the variable. + start (int): Starting frame index. + end (int): Ending frame index. + show_progress (bool): Whether to show a progress bar. + """ + if end == 0: + ext = filename_tmpl.split('.')[-1] + end = len([name for name in scandir(frame_dir, ext)]) + first_file = osp.join(frame_dir, filename_tmpl.format(start)) + check_file_exist(first_file, 'The start frame not found: ' + first_file) + img = cv2.imread(first_file) + height, width = img.shape[:2] + resolution = (width, height) + vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, + resolution) + + def write_frame(file_idx): + filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) + img = cv2.imread(filename) + vwriter.write(img) + + if show_progress: + track_progress(write_frame, range(start, end)) + else: + for i in range(start, end): + write_frame(i) + vwriter.release() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/optflow.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/optflow.py new file mode 100644 index 0000000000000000000000000000000000000000..71c7cc1c48a896191e36d159680df29ac1d70dc4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/optflow.py @@ -0,0 +1,254 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import cv2 +import numpy as np + +from custom_mmpkg.custom_mmcv.arraymisc import dequantize, quantize +from custom_mmpkg.custom_mmcv.image import imread, imwrite +from custom_mmpkg.custom_mmcv.utils import is_str + + +def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): + """Read an optical flow map. + + Args: + flow_or_path (ndarray or str): A flow map or filepath. + quantize (bool): whether to read quantized pair, if set to True, + remaining args will be passed to :func:`dequantize_flow`. + concat_axis (int): The axis that dx and dy are concatenated, + can be either 0 or 1. Ignored if quantize is False. + + Returns: + ndarray: Optical flow represented as a (h, w, 2) numpy array + """ + if isinstance(flow_or_path, np.ndarray): + if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2): + raise ValueError(f'Invalid flow with shape {flow_or_path.shape}') + return flow_or_path + elif not is_str(flow_or_path): + raise TypeError(f'"flow_or_path" must be a filename or numpy array, ' + f'not {type(flow_or_path)}') + + if not quantize: + with open(flow_or_path, 'rb') as f: + try: + header = f.read(4).decode('utf-8') + except Exception: + raise IOError(f'Invalid flow file: {flow_or_path}') + else: + if header != 'PIEH': + raise IOError(f'Invalid flow file: {flow_or_path}, ' + 'header does not contain PIEH') + + w = np.fromfile(f, np.int32, 1).squeeze() + h = np.fromfile(f, np.int32, 1).squeeze() + flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) + else: + assert concat_axis in [0, 1] + cat_flow = imread(flow_or_path, flag='unchanged') + if cat_flow.ndim != 2: + raise IOError( + f'{flow_or_path} is not a valid quantized flow file, ' + f'its dimension is {cat_flow.ndim}.') + assert cat_flow.shape[concat_axis] % 2 == 0 + dx, dy = np.split(cat_flow, 2, axis=concat_axis) + flow = dequantize_flow(dx, dy, *args, **kwargs) + + return flow.astype(np.float32) + + +def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs): + """Write optical flow to file. + + If the flow is not quantized, it will be saved as a .flo file losslessly, + otherwise a jpeg image which is lossy but of much smaller size. (dx and dy + will be concatenated horizontally into a single image if quantize is True.) + + Args: + flow (ndarray): (h, w, 2) array of optical flow. + filename (str): Output filepath. + quantize (bool): Whether to quantize the flow and save it to 2 jpeg + images. If set to True, remaining args will be passed to + :func:`quantize_flow`. + concat_axis (int): The axis that dx and dy are concatenated, + can be either 0 or 1. Ignored if quantize is False. + """ + if not quantize: + with open(filename, 'wb') as f: + f.write('PIEH'.encode('utf-8')) + np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) + flow = flow.astype(np.float32) + flow.tofile(f) + f.flush() + else: + assert concat_axis in [0, 1] + dx, dy = quantize_flow(flow, *args, **kwargs) + dxdy = np.concatenate((dx, dy), axis=concat_axis) + imwrite(dxdy, filename) + + +def quantize_flow(flow, max_val=0.02, norm=True): + """Quantize flow to [0, 255]. + + After this step, the size of flow will be much smaller, and can be + dumped as jpeg images. + + Args: + flow (ndarray): (h, w, 2) array of optical flow. + max_val (float): Maximum value of flow, values beyond + [-max_val, max_val] will be truncated. + norm (bool): Whether to divide flow values by image width/height. + + Returns: + tuple[ndarray]: Quantized dx and dy. + """ + h, w, _ = flow.shape + dx = flow[..., 0] + dy = flow[..., 1] + if norm: + dx = dx / w # avoid inplace operations + dy = dy / h + # use 255 levels instead of 256 to make sure 0 is 0 after dequantization. + flow_comps = [ + quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy] + ] + return tuple(flow_comps) + + +def dequantize_flow(dx, dy, max_val=0.02, denorm=True): + """Recover from quantized flow. + + Args: + dx (ndarray): Quantized dx. + dy (ndarray): Quantized dy. + max_val (float): Maximum value used when quantizing. + denorm (bool): Whether to multiply flow values with width/height. + + Returns: + ndarray: Dequantized flow. + """ + assert dx.shape == dy.shape + assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) + + dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] + + if denorm: + dx *= dx.shape[1] + dy *= dx.shape[0] + flow = np.dstack((dx, dy)) + return flow + + +def flow_warp(img, flow, filling_value=0, interpolate_mode='nearest'): + """Use flow to warp img. + + Args: + img (ndarray, float or uint8): Image to be warped. + flow (ndarray, float): Optical Flow. + filling_value (int): The missing pixels will be set with filling_value. + interpolate_mode (str): bilinear -> Bilinear Interpolation; + nearest -> Nearest Neighbor. + + Returns: + ndarray: Warped image with the same shape of img + """ + warnings.warn('This function is just for prototyping and cannot ' + 'guarantee the computational efficiency.') + assert flow.ndim == 3, 'Flow must be in 3D arrays.' + height = flow.shape[0] + width = flow.shape[1] + channels = img.shape[2] + + output = np.ones( + (height, width, channels), dtype=img.dtype) * filling_value + + grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2) + dx = grid[:, :, 0] + flow[:, :, 1] + dy = grid[:, :, 1] + flow[:, :, 0] + sx = np.floor(dx).astype(int) + sy = np.floor(dy).astype(int) + valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1) + + if interpolate_mode == 'nearest': + output[valid, :] = img[dx[valid].round().astype(int), + dy[valid].round().astype(int), :] + elif interpolate_mode == 'bilinear': + # dirty walkround for integer positions + eps_ = 1e-6 + dx, dy = dx + eps_, dy + eps_ + left_top_ = img[np.floor(dx[valid]).astype(int), + np.floor(dy[valid]).astype(int), :] * ( + np.ceil(dx[valid]) - dx[valid])[:, None] * ( + np.ceil(dy[valid]) - dy[valid])[:, None] + left_down_ = img[np.ceil(dx[valid]).astype(int), + np.floor(dy[valid]).astype(int), :] * ( + dx[valid] - np.floor(dx[valid]))[:, None] * ( + np.ceil(dy[valid]) - dy[valid])[:, None] + right_top_ = img[np.floor(dx[valid]).astype(int), + np.ceil(dy[valid]).astype(int), :] * ( + np.ceil(dx[valid]) - dx[valid])[:, None] * ( + dy[valid] - np.floor(dy[valid]))[:, None] + right_down_ = img[np.ceil(dx[valid]).astype(int), + np.ceil(dy[valid]).astype(int), :] * ( + dx[valid] - np.floor(dx[valid]))[:, None] * ( + dy[valid] - np.floor(dy[valid]))[:, None] + output[valid, :] = left_top_ + left_down_ + right_top_ + right_down_ + else: + raise NotImplementedError( + 'We only support interpolation modes of nearest and bilinear, ' + f'but got {interpolate_mode}.') + return output.astype(img.dtype) + + +def flow_from_bytes(content): + """Read dense optical flow from bytes. + + .. note:: + This load optical flow function works for FlyingChairs, FlyingThings3D, + Sintel, FlyingChairsOcc datasets, but cannot load the data from + ChairsSDHom. + + Args: + content (bytes): Optical flow bytes got from files or other streams. + + Returns: + ndarray: Loaded optical flow with the shape (H, W, 2). + """ + + # header in first 4 bytes + header = content[:4] + if header.decode('utf-8') != 'PIEH': + raise Exception('Flow file header does not contain PIEH') + # width in second 4 bytes + width = np.frombuffer(content[4:], np.int32, 1).squeeze() + # height in third 4 bytes + height = np.frombuffer(content[8:], np.int32, 1).squeeze() + # after first 12 bytes, all bytes are flow + flow = np.frombuffer(content[12:], np.float32, width * height * 2).reshape( + (height, width, 2)) + + return flow + + +def sparse_flow_from_bytes(content): + """Read the optical flow in KITTI datasets from bytes. + + This function is modified from RAFT load the `KITTI datasets + `_. + + Args: + content (bytes): Optical flow bytes got from files or other streams. + + Returns: + Tuple(ndarray, ndarray): Loaded optical flow with the shape (H, W, 2) + and flow valid mask with the shape (H, W). + """ # nopa + + content = np.frombuffer(content, np.uint8) + flow = cv2.imdecode(content, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR) + flow = flow[:, :, ::-1].astype(np.float32) + # flow shape (H, W, 2) valid shape (H, W) + flow, valid = flow[:, :, :2], flow[:, :, 2] + flow = (flow - 2**15) / 64.0 + return flow, valid diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/processing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/processing.py new file mode 100644 index 0000000000000000000000000000000000000000..72865d9041f5d8a9717b41b02beca67fa622fd9a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/video/processing.py @@ -0,0 +1,160 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import subprocess +import tempfile + +from custom_mmpkg.custom_mmcv.utils import requires_executable + + +@requires_executable('ffmpeg') +def convert_video(in_file, + out_file, + print_cmd=False, + pre_options='', + **kwargs): + """Convert a video with ffmpeg. + + This provides a general api to ffmpeg, the executed command is:: + + `ffmpeg -y -i ` + + Options(kwargs) are mapped to ffmpeg commands with the following rules: + + - key=val: "-key val" + - key=True: "-key" + - key=False: "" + + Args: + in_file (str): Input video filename. + out_file (str): Output video filename. + pre_options (str): Options appears before "-i ". + print_cmd (bool): Whether to print the final ffmpeg command. + """ + options = [] + for k, v in kwargs.items(): + if isinstance(v, bool): + if v: + options.append(f'-{k}') + elif k == 'log_level': + assert v in [ + 'quiet', 'panic', 'fatal', 'error', 'warning', 'info', + 'verbose', 'debug', 'trace' + ] + options.append(f'-loglevel {v}') + else: + options.append(f'-{k} {v}') + cmd = f'ffmpeg -y {pre_options} -i {in_file} {" ".join(options)} ' \ + f'{out_file}' + if print_cmd: + print(cmd) + subprocess.call(cmd, shell=True) + + +@requires_executable('ffmpeg') +def resize_video(in_file, + out_file, + size=None, + ratio=None, + keep_ar=False, + log_level='info', + print_cmd=False): + """Resize a video. + + Args: + in_file (str): Input video filename. + out_file (str): Output video filename. + size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1). + ratio (tuple or float): Expected resize ratio, (2, 0.5) means + (w*2, h*0.5). + keep_ar (bool): Whether to keep original aspect ratio. + log_level (str): Logging level of ffmpeg. + print_cmd (bool): Whether to print the final ffmpeg command. + """ + if size is None and ratio is None: + raise ValueError('expected size or ratio must be specified') + if size is not None and ratio is not None: + raise ValueError('size and ratio cannot be specified at the same time') + options = {'log_level': log_level} + if size: + if not keep_ar: + options['vf'] = f'scale={size[0]}:{size[1]}' + else: + options['vf'] = f'scale=w={size[0]}:h={size[1]}:' \ + 'force_original_aspect_ratio=decrease' + else: + if not isinstance(ratio, tuple): + ratio = (ratio, ratio) + options['vf'] = f'scale="trunc(iw*{ratio[0]}):trunc(ih*{ratio[1]})"' + convert_video(in_file, out_file, print_cmd, **options) + + +@requires_executable('ffmpeg') +def cut_video(in_file, + out_file, + start=None, + end=None, + vcodec=None, + acodec=None, + log_level='info', + print_cmd=False): + """Cut a clip from a video. + + Args: + in_file (str): Input video filename. + out_file (str): Output video filename. + start (None or float): Start time (in seconds). + end (None or float): End time (in seconds). + vcodec (None or str): Output video codec, None for unchanged. + acodec (None or str): Output audio codec, None for unchanged. + log_level (str): Logging level of ffmpeg. + print_cmd (bool): Whether to print the final ffmpeg command. + """ + options = {'log_level': log_level} + if vcodec is None: + options['vcodec'] = 'copy' + if acodec is None: + options['acodec'] = 'copy' + if start: + options['ss'] = start + else: + start = 0 + if end: + options['t'] = end - start + convert_video(in_file, out_file, print_cmd, **options) + + +@requires_executable('ffmpeg') +def concat_video(video_list, + out_file, + vcodec=None, + acodec=None, + log_level='info', + print_cmd=False): + """Concatenate multiple videos into a single one. + + Args: + video_list (list): A list of video filenames + out_file (str): Output video filename + vcodec (None or str): Output video codec, None for unchanged + acodec (None or str): Output audio codec, None for unchanged + log_level (str): Logging level of ffmpeg. + print_cmd (bool): Whether to print the final ffmpeg command. + """ + tmp_filehandler, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) + with open(tmp_filename, 'w') as f: + for filename in video_list: + f.write(f'file {osp.abspath(filename)}\n') + options = {'log_level': log_level} + if vcodec is None: + options['vcodec'] = 'copy' + if acodec is None: + options['acodec'] = 'copy' + convert_video( + tmp_filename, + out_file, + print_cmd, + pre_options='-f concat -safe 0', + **options) + os.close(tmp_filehandler) + os.remove(tmp_filename) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..835df136bdcf69348281d22914d41aa84cdf92b1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .color import Color, color_val +from .image import imshow, imshow_bboxes, imshow_det_bboxes +from .optflow import flow2rgb, flowshow, make_color_wheel + +__all__ = [ + 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', + 'flowshow', 'flow2rgb', 'make_color_wheel' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/color.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/color.py new file mode 100644 index 0000000000000000000000000000000000000000..2bff8a9dc94fc5ff8dbd5425faeea165332ac10a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/color.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from enum import Enum + +import numpy as np + +from custom_mmpkg.custom_mmcv.utils import is_str + + +class Color(Enum): + """An enum that defines common colors. + + Contains red, green, blue, cyan, yellow, magenta, white and black. + """ + red = (0, 0, 255) + green = (0, 255, 0) + blue = (255, 0, 0) + cyan = (255, 255, 0) + yellow = (0, 255, 255) + magenta = (255, 0, 255) + white = (255, 255, 255) + black = (0, 0, 0) + + +def color_val(color): + """Convert various input to color tuples. + + Args: + color (:obj:`Color`/str/tuple/int/ndarray): Color inputs + + Returns: + tuple[int]: A tuple of 3 integers indicating BGR channels. + """ + if is_str(color): + return Color[color].value + elif isinstance(color, Color): + return color.value + elif isinstance(color, tuple): + assert len(color) == 3 + for channel in color: + assert 0 <= channel <= 255 + return color + elif isinstance(color, int): + assert 0 <= color <= 255 + return color, color, color + elif isinstance(color, np.ndarray): + assert color.ndim == 1 and color.size == 3 + assert np.all((color >= 0) & (color <= 255)) + color = color.astype(np.uint8) + return tuple(color) + else: + raise TypeError(f'Invalid type for color: {type(color)}') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/image.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/image.py new file mode 100644 index 0000000000000000000000000000000000000000..3f77c6d1033dd2a5968cedf3a5fe77d91cd948b8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/image.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import numpy as np + +from custom_mmpkg.custom_mmcv.image import imread, imwrite +from .color import color_val + + +def imshow(img, win_name='', wait_time=0): + """Show an image. + + Args: + img (str or ndarray): The image to be displayed. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + """ + cv2.imshow(win_name, imread(img)) + if wait_time == 0: # prevent from hanging if windows was closed + while True: + ret = cv2.waitKey(1) + + closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1 + # if user closed window or if some key pressed + if closed or ret != -1: + break + else: + ret = cv2.waitKey(wait_time) + + +def imshow_bboxes(img, + bboxes, + colors='green', + top_k=-1, + thickness=1, + show=True, + win_name='', + wait_time=0, + out_file=None): + """Draw bboxes on an image. + + Args: + img (str or ndarray): The image to be displayed. + bboxes (list or ndarray): A list of ndarray of shape (k, 4). + colors (list[str or tuple or Color]): A list of colors. + top_k (int): Plot the first k bboxes only if set positive. + thickness (int): Thickness of lines. + show (bool): Whether to show the image. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + out_file (str, optional): The filename to write the image. + + Returns: + ndarray: The image with bboxes drawn on it. + """ + img = imread(img) + img = np.ascontiguousarray(img) + + if isinstance(bboxes, np.ndarray): + bboxes = [bboxes] + if not isinstance(colors, list): + colors = [colors for _ in range(len(bboxes))] + colors = [color_val(c) for c in colors] + assert len(bboxes) == len(colors) + + for i, _bboxes in enumerate(bboxes): + _bboxes = _bboxes.astype(np.int32) + if top_k <= 0: + _top_k = _bboxes.shape[0] + else: + _top_k = min(top_k, _bboxes.shape[0]) + for j in range(_top_k): + left_top = (_bboxes[j, 0], _bboxes[j, 1]) + right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) + cv2.rectangle( + img, left_top, right_bottom, colors[i], thickness=thickness) + + if show: + imshow(img, win_name, wait_time) + if out_file is not None: + imwrite(img, out_file) + return img + + +def imshow_det_bboxes(img, + bboxes, + labels, + class_names=None, + score_thr=0, + bbox_color='green', + text_color='green', + thickness=1, + font_scale=0.5, + show=True, + win_name='', + wait_time=0, + out_file=None): + """Draw bboxes and class labels (with scores) on an image. + + Args: + img (str or ndarray): The image to be displayed. + bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or + (n, 5). + labels (ndarray): Labels of bboxes. + class_names (list[str]): Names of each classes. + score_thr (float): Minimum score of bboxes to be shown. + bbox_color (str or tuple or :obj:`Color`): Color of bbox lines. + text_color (str or tuple or :obj:`Color`): Color of texts. + thickness (int): Thickness of lines. + font_scale (float): Font scales of texts. + show (bool): Whether to show the image. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + out_file (str or None): The filename to write the image. + + Returns: + ndarray: The image with bboxes drawn on it. + """ + assert bboxes.ndim == 2 + assert labels.ndim == 1 + assert bboxes.shape[0] == labels.shape[0] + assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5 + img = imread(img) + img = np.ascontiguousarray(img) + + if score_thr > 0: + assert bboxes.shape[1] == 5 + scores = bboxes[:, -1] + inds = scores > score_thr + bboxes = bboxes[inds, :] + labels = labels[inds] + + bbox_color = color_val(bbox_color) + text_color = color_val(text_color) + + for bbox, label in zip(bboxes, labels): + bbox_int = bbox.astype(np.int32) + left_top = (bbox_int[0], bbox_int[1]) + right_bottom = (bbox_int[2], bbox_int[3]) + cv2.rectangle( + img, left_top, right_bottom, bbox_color, thickness=thickness) + label_text = class_names[ + label] if class_names is not None else f'cls {label}' + if len(bbox) > 4: + label_text += f'|{bbox[-1]:.02f}' + cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2), + cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color) + + if show: + imshow(img, win_name, wait_time) + if out_file is not None: + imwrite(img, out_file) + return img diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/optflow.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/optflow.py new file mode 100644 index 0000000000000000000000000000000000000000..8b13b411f7161205eba2653c357a84f8916a353a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmcv/visualization/optflow.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from __future__ import division + +import numpy as np + +from custom_mmpkg.custom_mmcv.image import rgb2bgr +from custom_mmpkg.custom_mmcv.video import flowread +from .image import imshow + + +def flowshow(flow, win_name='', wait_time=0): + """Show optical flow. + + Args: + flow (ndarray or str): The optical flow to be displayed. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + """ + flow = flowread(flow) + flow_img = flow2rgb(flow) + imshow(rgb2bgr(flow_img), win_name, wait_time) + + +def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): + """Convert flow map to RGB image. + + Args: + flow (ndarray): Array of optical flow. + color_wheel (ndarray or None): Color wheel used to map flow field to + RGB colorspace. Default color wheel will be used if not specified. + unknown_thr (str): Values above this threshold will be marked as + unknown and thus ignored. + + Returns: + ndarray: RGB image that can be visualized. + """ + assert flow.ndim == 3 and flow.shape[-1] == 2 + if color_wheel is None: + color_wheel = make_color_wheel() + assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 + num_bins = color_wheel.shape[0] + + dx = flow[:, :, 0].copy() + dy = flow[:, :, 1].copy() + + ignore_inds = ( + np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | + (np.abs(dy) > unknown_thr)) + dx[ignore_inds] = 0 + dy[ignore_inds] = 0 + + rad = np.sqrt(dx**2 + dy**2) + if np.any(rad > np.finfo(float).eps): + max_rad = np.max(rad) + dx /= max_rad + dy /= max_rad + + rad = np.sqrt(dx**2 + dy**2) + angle = np.arctan2(-dy, -dx) / np.pi + + bin_real = (angle + 1) / 2 * (num_bins - 1) + bin_left = np.floor(bin_real).astype(int) + bin_right = (bin_left + 1) % num_bins + w = (bin_real - bin_left.astype(np.float32))[..., None] + flow_img = (1 - + w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] + small_ind = rad <= 1 + flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) + flow_img[np.logical_not(small_ind)] *= 0.75 + + flow_img[ignore_inds, :] = 0 + + return flow_img + + +def make_color_wheel(bins=None): + """Build a color wheel. + + Args: + bins(list or tuple, optional): Specify the number of bins for each + color range, corresponding to six ranges: red -> yellow, + yellow -> green, green -> cyan, cyan -> blue, blue -> magenta, + magenta -> red. [15, 6, 4, 11, 13, 6] is used for default + (see Middlebury). + + Returns: + ndarray: Color wheel of shape (total_bins, 3). + """ + if bins is None: + bins = [15, 6, 4, 11, 13, 6] + assert len(bins) == 6 + + RY, YG, GC, CB, BM, MR = tuple(bins) + + ry = [1, np.arange(RY) / RY, 0] + yg = [1 - np.arange(YG) / YG, 1, 0] + gc = [0, 1, np.arange(GC) / GC] + cb = [0, 1 - np.arange(CB) / CB, 1] + bm = [np.arange(BM) / BM, 0, 1] + mr = [1, 0, 1 - np.arange(MR) / MR] + + num_bins = RY + YG + GC + CB + BM + MR + + color_wheel = np.zeros((3, num_bins), dtype=np.float32) + + col = 0 + for i, color in enumerate([ry, yg, gc, cb, bm, mr]): + for j in range(3): + color_wheel[j, col:col + bins[i]] = color[j] + col += bins[i] + + return color_wheel.T diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..170724be38de42daf2bc1a1910e181d68818f165 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/__init__.py @@ -0,0 +1,9 @@ +from .inference import inference_segmentor, init_segmentor, show_result_pyplot +from .test import multi_gpu_test, single_gpu_test +from .train import get_root_logger, set_random_seed, train_segmentor + +__all__ = [ + 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', + 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', + 'show_result_pyplot' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/inference.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..0ee57d61e59f67926c7be6a139d057805026b816 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/inference.py @@ -0,0 +1,137 @@ +import matplotlib.pyplot as plt +import custom_mmpkg.custom_mmcv as mmcv +import torch +from custom_mmpkg.custom_mmcv.parallel import collate, scatter +from custom_mmpkg.custom_mmcv.runner import load_checkpoint + +from custom_mmpkg.custom_mmseg.datasets.pipelines import Compose +from custom_mmpkg.custom_mmseg.models import build_segmentor + + +def init_segmentor(config, checkpoint=None, device="cpu"): + """Initialize a segmentor from config file. + + Args: + config (str or :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + device (str, optional) CPU/CUDA device option. Default 'cuda:0'. + Use 'cpu' for loading model on CPU. + Returns: + nn.Module: The constructed segmentor. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + 'but got {}'.format(type(config))) + config.model.pretrained = None + config.model.train_cfg = None + model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) + if checkpoint is not None: + checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') + model.CLASSES = checkpoint['meta']['CLASSES'] + model.PALETTE = checkpoint['meta']['PALETTE'] + model.cfg = config # save the config in the model for convenience + model.to(device) + model.eval() + return model + + +class LoadImage: + """A simple pipeline to load image.""" + + def __call__(self, results): + """Call function to load images into results. + + Args: + results (dict): A result dict contains the file name + of the image to be read. + + Returns: + dict: ``results`` will be returned containing loaded image. + """ + + if isinstance(results['img'], str): + results['filename'] = results['img'] + results['ori_filename'] = results['img'] + else: + results['filename'] = None + results['ori_filename'] = None + img = mmcv.imread(results['img']) + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + return results + + +def inference_segmentor(model, img): + """Inference image(s) with the segmentor. + + Args: + model (nn.Module): The loaded segmentor. + imgs (str/ndarray or list[str/ndarray]): Either image files or loaded + images. + + Returns: + (list[Tensor]): The segmentation result. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] + test_pipeline = Compose(test_pipeline) + # prepare data + data = dict(img=img) + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + else: + data['img'][0] = data['img'][0].to(device) + data['img_metas'] = [i.data[0] for i in data['img_metas']] + + # forward the model + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + return result + + +def show_result_pyplot(model, + img, + result, + palette=None, + fig_size=(15, 10), + opacity=0.5, + title='', + block=True): + """Visualize the segmentation results on the image. + + Args: + model (nn.Module): The loaded segmentor. + img (str or np.ndarray): Image filename or loaded image. + result (list): The segmentation result. + palette (list[list[int]]] | None): The palette of segmentation + map. If None is given, random palette will be generated. + Default: None + fig_size (tuple): Figure size of the pyplot figure. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + title (str): The title of pyplot figure. + Default is ''. + block (bool): Whether to block the pyplot figure. + Default is True. + """ + if hasattr(model, 'module'): + model = model.module + img = model.show_result( + img, result, palette=palette, show=False, opacity=opacity) + # plt.figure(figsize=fig_size) + # plt.imshow(mmcv.bgr2rgb(img)) + # plt.title(title) + # plt.tight_layout() + # plt.show(block=block) + return mmcv.bgr2rgb(img) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/test.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/test.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0078b5b52eca53ddb0c4bb28adb7b1afe59728 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/test.py @@ -0,0 +1,238 @@ +import os.path as osp +import pickle +import shutil +import tempfile + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +import torch +import torch.distributed as dist +from custom_mmpkg.custom_mmcv.image import tensor2imgs +from custom_mmpkg.custom_mmcv.runner import get_dist_info + + +def np2tmp(array, temp_file_name=None): + """Save ndarray to local numpy file. + + Args: + array (ndarray): Ndarray to save. + temp_file_name (str): Numpy file name. If 'temp_file_name=None', this + function will generate a file name with tempfile.NamedTemporaryFile + to save ndarray. Default: None. + + Returns: + str: The numpy file name. + """ + + if temp_file_name is None: + temp_file_name = tempfile.NamedTemporaryFile( + suffix='.npy', delete=False).name + np.save(temp_file_name, array) + return temp_file_name + + +def single_gpu_test(model, + data_loader, + show=False, + out_dir=None, + efficient_test=False, + opacity=0.5): + """Test with single GPU. + + Args: + model (nn.Module): Model to be tested. + data_loader (utils.data.Dataloader): Pytorch data loader. + show (bool): Whether show results during inference. Default: False. + out_dir (str, optional): If specified, the results will be dumped into + the directory to save output results. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Default: False. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + Returns: + list: The prediction results. + """ + + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, **data) + + if show or out_dir: + img_tensor = data['img'][0] + img_metas = data['img_metas'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) + assert len(imgs) == len(img_metas) + + for img, img_meta in zip(imgs, img_metas): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + ori_h, ori_w = img_meta['ori_shape'][:-1] + img_show = mmcv.imresize(img_show, (ori_w, ori_h)) + + if out_dir: + out_file = osp.join(out_dir, img_meta['ori_filename']) + else: + out_file = None + + model.module.show_result( + img_show, + result, + palette=dataset.PALETTE, + show=show, + out_file=out_file, + opacity=opacity) + + if isinstance(result, list): + if efficient_test: + result = [np2tmp(_) for _ in result] + results.extend(result) + else: + if efficient_test: + result = np2tmp(result) + results.append(result) + + batch_size = len(result) + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, + data_loader, + tmpdir=None, + gpu_collect=False, + efficient_test=False): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (utils.data.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Default: False. + + Returns: + list: The prediction results. + """ + + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + + if isinstance(result, list): + if efficient_test: + result = [np2tmp(_) for _ in result] + results.extend(result) + else: + if efficient_test: + result = np2tmp(result) + results.append(result) + + if rank == 0: + batch_size = data['img'][0].size(0) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + """Collect results with CPU.""" + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + tmpdir = tempfile.mkdtemp() + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + """Collect results with GPU.""" + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_list.append( + pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/train.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/train.py new file mode 100644 index 0000000000000000000000000000000000000000..61eb4768b375cf8e3cd5323d5533221e8238c4c8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/apis/train.py @@ -0,0 +1,116 @@ +import random +import warnings + +import numpy as np +import torch +from custom_mmpkg.custom_mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from custom_mmpkg.custom_mmcv.runner import build_optimizer, build_runner + +from custom_mmpkg.custom_mmseg.core import DistEvalHook, EvalHook +from custom_mmpkg.custom_mmseg.datasets import build_dataloader, build_dataset +from custom_mmpkg.custom_mmseg.utils import get_root_logger + + +def set_random_seed(seed, deterministic=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + """ + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def train_segmentor(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + """Launch segmentor training.""" + logger = get_root_logger(cfg.log_level) + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + data_loaders = [ + build_dataloader( + ds, + cfg.data.samples_per_gpu, + cfg.data.workers_per_gpu, + # cfg.gpus will be ignored if distributed + len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + drop_last=True) for ds in dataset + ] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = MMDataParallel( + model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if cfg.get('runner') is None: + cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters} + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # register hooks + runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + + # an ugly walkaround to make the .log and .log.json filenames the same + runner.timestamp = timestamp + + # register eval hooks + if validate: + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + val_dataloader = build_dataloader( + val_dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_hook = DistEvalHook if distributed else EvalHook + runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW') + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..965605587211b7bf0bd6bc3acdbb33dd49cab023 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/__init__.py @@ -0,0 +1,3 @@ +from .evaluation import * # noqa: F401, F403 +from .seg import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7cc4b23413a0639e9de00eeb0bf600632d2c6cd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/__init__.py @@ -0,0 +1,8 @@ +from .class_names import get_classes, get_palette +from .eval_hooks import DistEvalHook, EvalHook +from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou + +__all__ = [ + 'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', + 'eval_metrics', 'get_classes', 'get_palette' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/class_names.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/class_names.py new file mode 100644 index 0000000000000000000000000000000000000000..3e79082966879d06da504a8105646257f103a07c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/class_names.py @@ -0,0 +1,152 @@ +import custom_mmpkg.custom_mmcv as mmcv + + +def cityscapes_classes(): + """Cityscapes class names for external use.""" + return [ + 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle' + ] + + +def ade_classes(): + """ADE20K class names for external use.""" + return [ + 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', + 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', + 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', + 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', + 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', + 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', + 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', + 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', + 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', + 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', + 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', + 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', + 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', + 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', + 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', + 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', + 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', + 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', + 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag' + ] + + +def voc_classes(): + """Pascal VOC class names for external use.""" + return [ + 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor' + ] + + +def cityscapes_palette(): + """Cityscapes palette for external use.""" + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + + +def ade_palette(): + """ADE20K palette for external use.""" + return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + + +def voc_palette(): + """Pascal VOC palette for external use.""" + return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], + [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], + [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], + [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], + [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] + + +dataset_aliases = { + 'cityscapes': ['cityscapes'], + 'ade': ['ade', 'ade20k'], + 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels + + +def get_palette(dataset): + """Get class palette (RGB) of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_palette()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/eval_hooks.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/eval_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..684fd6c291bae6255cd835ba3d32c1cacca536c8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/eval_hooks.py @@ -0,0 +1,109 @@ +import os.path as osp + +from custom_mmpkg.custom_mmcv.runner import DistEvalHook as _DistEvalHook +from custom_mmpkg.custom_mmcv.runner import EvalHook as _EvalHook + + +class EvalHook(_EvalHook): + """Single GPU EvalHook, with efficient test support. + + Args: + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + Default: False. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Default: False. + Returns: + list: The prediction results. + """ + + greater_keys = ['mIoU', 'mAcc', 'aAcc'] + + def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs): + super().__init__(*args, by_epoch=by_epoch, **kwargs) + self.efficient_test = efficient_test + + def after_train_iter(self, runner): + """After train epoch hook. + + Override default ``single_gpu_test``. + """ + if self.by_epoch or not self.every_n_iters(runner, self.interval): + return + from custom_mmpkg.custom_mmseg.apis import single_gpu_test + runner.log_buffer.clear() + results = single_gpu_test( + runner.model, + self.dataloader, + show=False, + efficient_test=self.efficient_test) + self.evaluate(runner, results) + + def after_train_epoch(self, runner): + """After train epoch hook. + + Override default ``single_gpu_test``. + """ + if not self.by_epoch or not self.every_n_epochs(runner, self.interval): + return + from custom_mmpkg.custom_mmseg.apis import single_gpu_test + runner.log_buffer.clear() + results = single_gpu_test(runner.model, self.dataloader, show=False) + self.evaluate(runner, results) + + +class DistEvalHook(_DistEvalHook): + """Distributed EvalHook, with efficient test support. + + Args: + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + Default: False. + efficient_test (bool): Whether save the results as local numpy files to + save CPU memory during evaluation. Default: False. + Returns: + list: The prediction results. + """ + + greater_keys = ['mIoU', 'mAcc', 'aAcc'] + + def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs): + super().__init__(*args, by_epoch=by_epoch, **kwargs) + self.efficient_test = efficient_test + + def after_train_iter(self, runner): + """After train epoch hook. + + Override default ``multi_gpu_test``. + """ + if self.by_epoch or not self.every_n_iters(runner, self.interval): + return + from custom_mmpkg.custom_mmseg.apis import multi_gpu_test + runner.log_buffer.clear() + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=osp.join(runner.work_dir, '.eval_hook'), + gpu_collect=self.gpu_collect, + efficient_test=self.efficient_test) + if runner.rank == 0: + print('\n') + self.evaluate(runner, results) + + def after_train_epoch(self, runner): + """After train epoch hook. + + Override default ``multi_gpu_test``. + """ + if not self.by_epoch or not self.every_n_epochs(runner, self.interval): + return + from custom_mmpkg.custom_mmseg.apis import multi_gpu_test + runner.log_buffer.clear() + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=osp.join(runner.work_dir, '.eval_hook'), + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + self.evaluate(runner, results) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/metrics.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..db4b29f6c277ce43e4a0f39c3898a2938e11dba8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/evaluation/metrics.py @@ -0,0 +1,326 @@ +from collections import OrderedDict + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +import torch + + +def f_score(precision, recall, beta=1): + """calcuate the f-score value. + + Args: + precision (float | torch.Tensor): The precision value. + recall (float | torch.Tensor): The recall value. + beta (int): Determines the weight of recall in the combined score. + Default: False. + + Returns: + [torch.tensor]: The f-score value. + """ + score = (1 + beta**2) * (precision * recall) / ( + (beta**2 * precision) + recall) + return score + + +def intersect_and_union(pred_label, + label, + num_classes, + ignore_index, + label_map=dict(), + reduce_zero_label=False): + """Calculate intersection and Union. + + Args: + pred_label (ndarray | str): Prediction segmentation map + or predict result filename. + label (ndarray | str): Ground truth segmentation map + or label filename. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + label_map (dict): Mapping old labels to new labels. The parameter will + work only when label is str. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. The parameter will + work only when label is str. Default: False. + + Returns: + torch.Tensor: The intersection of prediction and ground truth + histogram on all classes. + torch.Tensor: The union of prediction and ground truth histogram on + all classes. + torch.Tensor: The prediction histogram on all classes. + torch.Tensor: The ground truth histogram on all classes. + """ + + if isinstance(pred_label, str): + pred_label = torch.from_numpy(np.load(pred_label)) + else: + pred_label = torch.from_numpy((pred_label)) + + if isinstance(label, str): + label = torch.from_numpy( + mmcv.imread(label, flag='unchanged', backend='pillow')) + else: + label = torch.from_numpy(label) + + if label_map is not None: + for old_id, new_id in label_map.items(): + label[label == old_id] = new_id + if reduce_zero_label: + label[label == 0] = 255 + label = label - 1 + label[label == 254] = 255 + + mask = (label != ignore_index) + pred_label = pred_label[mask] + label = label[mask] + + intersect = pred_label[pred_label == label] + area_intersect = torch.histc( + intersect.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_pred_label = torch.histc( + pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_label = torch.histc( + label.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_union = area_pred_label + area_label - area_intersect + return area_intersect, area_union, area_pred_label, area_label + + +def total_intersect_and_union(results, + gt_seg_maps, + num_classes, + ignore_index, + label_map=dict(), + reduce_zero_label=False): + """Calculate Total Intersection and Union. + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + + Returns: + ndarray: The intersection of prediction and ground truth histogram + on all classes. + ndarray: The union of prediction and ground truth histogram on all + classes. + ndarray: The prediction histogram on all classes. + ndarray: The ground truth histogram on all classes. + """ + num_imgs = len(results) + assert len(gt_seg_maps) == num_imgs + total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_union = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_label = torch.zeros((num_classes, ), dtype=torch.float64) + for i in range(num_imgs): + area_intersect, area_union, area_pred_label, area_label = \ + intersect_and_union( + results[i], gt_seg_maps[i], num_classes, ignore_index, + label_map, reduce_zero_label) + total_area_intersect += area_intersect + total_area_union += area_union + total_area_pred_label += area_pred_label + total_area_label += area_label + return total_area_intersect, total_area_union, total_area_pred_label, \ + total_area_label + + +def mean_iou(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False): + """Calculate Mean Intersection and Union (mIoU) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + + Returns: + dict[str, float | ndarray]: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category IoU, shape (num_classes, ). + """ + iou_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mIoU'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label) + return iou_result + + +def mean_dice(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False): + """Calculate Mean Dice (mDice) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + + Returns: + dict[str, float | ndarray]: Default metrics. + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category dice, shape (num_classes, ). + """ + + dice_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mDice'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label) + return dice_result + + +def mean_fscore(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False, + beta=1): + """Calculate Mean Intersection and Union (mIoU) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + beta (int): Determines the weight of recall in the combined score. + Default: False. + + + Returns: + dict[str, float | ndarray]: Default metrics. + float: Overall accuracy on all images. + ndarray: Per category recall, shape (num_classes, ). + ndarray: Per category precision, shape (num_classes, ). + ndarray: Per category f-score, shape (num_classes, ). + """ + fscore_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mFscore'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label, + beta=beta) + return fscore_result + + +def eval_metrics(results, + gt_seg_maps, + num_classes, + ignore_index, + metrics=['mIoU'], + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False, + beta=1): + """Calculate evaluation metrics + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Whether ignore zero label. Default: False. + Returns: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category evaluation metrics, shape (num_classes, ). + """ + if isinstance(metrics, str): + metrics = [metrics] + allowed_metrics = ['mIoU', 'mDice', 'mFscore'] + if not set(metrics).issubset(set(allowed_metrics)): + raise KeyError('metrics {} is not supported'.format(metrics)) + + total_area_intersect, total_area_union, total_area_pred_label, \ + total_area_label = total_intersect_and_union( + results, gt_seg_maps, num_classes, ignore_index, label_map, + reduce_zero_label) + all_acc = total_area_intersect.sum() / total_area_label.sum() + ret_metrics = OrderedDict({'aAcc': all_acc}) + for metric in metrics: + if metric == 'mIoU': + iou = total_area_intersect / total_area_union + acc = total_area_intersect / total_area_label + ret_metrics['IoU'] = iou + ret_metrics['Acc'] = acc + elif metric == 'mDice': + dice = 2 * total_area_intersect / ( + total_area_pred_label + total_area_label) + acc = total_area_intersect / total_area_label + ret_metrics['Dice'] = dice + ret_metrics['Acc'] = acc + elif metric == 'mFscore': + precision = total_area_intersect / total_area_pred_label + recall = total_area_intersect / total_area_label + f_value = torch.tensor( + [f_score(x[0], x[1], beta) for x in zip(precision, recall)]) + ret_metrics['Fscore'] = f_value + ret_metrics['Precision'] = precision + ret_metrics['Recall'] = recall + + ret_metrics = { + metric: value.numpy() + for metric, value in ret_metrics.items() + } + if nan_to_num is not None: + ret_metrics = OrderedDict({ + metric: np.nan_to_num(metric_value, nan=nan_to_num) + for metric, metric_value in ret_metrics.items() + }) + return ret_metrics diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..93bc129b685e4a3efca2cc891729981b2865900d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/__init__.py @@ -0,0 +1,4 @@ +from .builder import build_pixel_sampler +from .sampler import BasePixelSampler, OHEMPixelSampler + +__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..8c6971fce1e60b12c521413bf62127da76f441d4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/builder.py @@ -0,0 +1,8 @@ +from custom_mmpkg.custom_mmcv.utils import Registry, build_from_cfg + +PIXEL_SAMPLERS = Registry('pixel sampler') + + +def build_pixel_sampler(cfg, **default_args): + """Build pixel sampler for segmentation map.""" + return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..332b242c03d1c5e80d4577df442a9a037b1816e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/__init__.py @@ -0,0 +1,4 @@ +from .base_pixel_sampler import BasePixelSampler +from .ohem_pixel_sampler import OHEMPixelSampler + +__all__ = ['BasePixelSampler', 'OHEMPixelSampler'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/base_pixel_sampler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/base_pixel_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..b75b1566c9f18169cee51d4b55d75e0357b69c57 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/base_pixel_sampler.py @@ -0,0 +1,12 @@ +from abc import ABCMeta, abstractmethod + + +class BasePixelSampler(metaclass=ABCMeta): + """Base class of pixel sampler.""" + + def __init__(self, **kwargs): + pass + + @abstractmethod + def sample(self, seg_logit, seg_label): + """Placeholder for sample function.""" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/ohem_pixel_sampler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/ohem_pixel_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..88bb10d44026ba9f21756eaea9e550841cd59b9f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/seg/sampler/ohem_pixel_sampler.py @@ -0,0 +1,76 @@ +import torch +import torch.nn.functional as F + +from ..builder import PIXEL_SAMPLERS +from .base_pixel_sampler import BasePixelSampler + + +@PIXEL_SAMPLERS.register_module() +class OHEMPixelSampler(BasePixelSampler): + """Online Hard Example Mining Sampler for segmentation. + + Args: + context (nn.Module): The context of sampler, subclass of + :obj:`BaseDecodeHead`. + thresh (float, optional): The threshold for hard example selection. + Below which, are prediction with low confidence. If not + specified, the hard examples will be pixels of top ``min_kept`` + loss. Default: None. + min_kept (int, optional): The minimum number of predictions to keep. + Default: 100000. + """ + + def __init__(self, context, thresh=None, min_kept=100000): + super(OHEMPixelSampler, self).__init__() + self.context = context + assert min_kept > 1 + self.thresh = thresh + self.min_kept = min_kept + + def sample(self, seg_logit, seg_label): + """Sample pixels that have high loss or with low prediction confidence. + + Args: + seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W) + seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W) + + Returns: + torch.Tensor: segmentation weight, shape (N, H, W) + """ + with torch.no_grad(): + assert seg_logit.shape[2:] == seg_label.shape[2:] + assert seg_label.shape[1] == 1 + seg_label = seg_label.squeeze(1).long() + batch_kept = self.min_kept * seg_label.size(0) + valid_mask = seg_label != self.context.ignore_index + seg_weight = seg_logit.new_zeros(size=seg_label.size()) + valid_seg_weight = seg_weight[valid_mask] + if self.thresh is not None: + seg_prob = F.softmax(seg_logit, dim=1) + + tmp_seg_label = seg_label.clone().unsqueeze(1) + tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0 + seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1) + sort_prob, sort_indices = seg_prob[valid_mask].sort() + + if sort_prob.numel() > 0: + min_threshold = sort_prob[min(batch_kept, + sort_prob.numel() - 1)] + else: + min_threshold = 0.0 + threshold = max(min_threshold, self.thresh) + valid_seg_weight[seg_prob[valid_mask] < threshold] = 1. + else: + losses = self.context.loss_decode( + seg_logit, + seg_label, + weight=None, + ignore_index=self.context.ignore_index, + reduction_override='none') + # faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa + _, sort_indices = losses[valid_mask].sort(descending=True) + valid_seg_weight[sort_indices[:batch_kept]] = 1. + + seg_weight[valid_mask] = valid_seg_weight + + return seg_weight diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2678b321c295bcceaef945111ac3524be19d6e4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/utils/__init__.py @@ -0,0 +1,3 @@ +from .misc import add_prefix + +__all__ = ['add_prefix'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/utils/misc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..eb862a82bd47c8624db3dd5c6fb6ad8a03b62466 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/core/utils/misc.py @@ -0,0 +1,17 @@ +def add_prefix(inputs, prefix): + """Add prefix for dict. + + Args: + inputs (dict): The input dict with str keys. + prefix (str): The prefix to add. + + Returns: + + dict: The dict with keys updated with ``prefix``. + """ + + outputs = dict() + for name, value in inputs.items(): + outputs[f'{prefix}.{name}'] = value + + return outputs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebeaef4a28ef655e43578552a8aef6b77f13a636 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/__init__.py @@ -0,0 +1,19 @@ +from .ade import ADE20KDataset +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .chase_db1 import ChaseDB1Dataset +from .cityscapes import CityscapesDataset +from .custom import CustomDataset +from .dataset_wrappers import ConcatDataset, RepeatDataset +from .drive import DRIVEDataset +from .hrf import HRFDataset +from .pascal_context import PascalContextDataset, PascalContextDataset59 +from .stare import STAREDataset +from .voc import PascalVOCDataset + +__all__ = [ + 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', + 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', + 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', + 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', + 'STAREDataset' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/ade.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/ade.py new file mode 100644 index 0000000000000000000000000000000000000000..5913e43775ed4920b6934c855eb5a37c54218ebf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/ade.py @@ -0,0 +1,84 @@ +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ADE20KDataset(CustomDataset): + """ADE20K dataset. + + In segmentation map annotation for ADE20K, 0 stands for background, which + is not included in 150 categories. ``reduce_zero_label`` is fixed to True. + The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to + '.png'. + """ + CLASSES = ( + 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', + 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', + 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', + 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', + 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', + 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', + 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', + 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', + 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', + 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', + 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', + 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', + 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', + 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', + 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', + 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', + 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', + 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', + 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag') + + PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + + def __init__(self, **kwargs): + super(ADE20KDataset, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=True, + **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..ceb3d5cbe6a2a2bcd1515177324039cd42320e13 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/builder.py @@ -0,0 +1,169 @@ +import copy +import platform +import random +from functools import partial + +import numpy as np +from custom_mmpkg.custom_mmcv.parallel import collate +from custom_mmpkg.custom_mmcv.runner import get_dist_info +from custom_mmpkg.custom_mmcv.utils import Registry, build_from_cfg +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader +from torch.utils.data import DistributedSampler + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + hard_limit = rlimit[1] + soft_limit = min(4096, hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') + + +def _concat_dataset(cfg, default_args=None): + """Build :obj:`ConcatDataset by.""" + from .dataset_wrappers import ConcatDataset + img_dir = cfg['img_dir'] + ann_dir = cfg.get('ann_dir', None) + split = cfg.get('split', None) + num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1 + if ann_dir is not None: + num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1 + else: + num_ann_dir = 0 + if split is not None: + num_split = len(split) if isinstance(split, (list, tuple)) else 1 + else: + num_split = 0 + if num_img_dir > 1: + assert num_img_dir == num_ann_dir or num_ann_dir == 0 + assert num_img_dir == num_split or num_split == 0 + else: + assert num_split == num_ann_dir or num_ann_dir <= 1 + num_dset = max(num_split, num_img_dir) + + datasets = [] + for i in range(num_dset): + data_cfg = copy.deepcopy(cfg) + if isinstance(img_dir, (list, tuple)): + data_cfg['img_dir'] = img_dir[i] + if isinstance(ann_dir, (list, tuple)): + data_cfg['ann_dir'] = ann_dir[i] + if isinstance(split, (list, tuple)): + data_cfg['split'] = split[i] + datasets.append(build_dataset(data_cfg, default_args)) + + return ConcatDataset(datasets) + + +def build_dataset(cfg, default_args=None): + """Build datasets.""" + from .dataset_wrappers import ConcatDataset, RepeatDataset + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance( + cfg.get('split', None), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_dataloader(dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + seed=None, + drop_last=False, + pin_memory=True, + dataloader_type='PoolDataLoader', + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + seed (int | None): Seed to be used. Default: None. + drop_last (bool): Whether to drop the last incomplete batch in epoch. + Default: False + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True + dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader' + kwargs: any keyword argument to be used to initialize DataLoader + + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + if dist: + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=shuffle) + shuffle = False + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + sampler = None + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + + assert dataloader_type in ( + 'DataLoader', + 'PoolDataLoader'), f'unsupported dataloader {dataloader_type}' + + if dataloader_type == 'PoolDataLoader': + dataloader = PoolDataLoader + elif dataloader_type == 'DataLoader': + dataloader = DataLoader + + data_loader = dataloader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=pin_memory, + shuffle=shuffle, + worker_init_fn=init_fn, + drop_last=drop_last, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + """Worker init func for dataloader. + + The seed of each worker equals to num_worker * rank + worker_id + user_seed + + Args: + worker_id (int): Worker id. + num_workers (int): Number of workers. + rank (int): The rank of current process. + seed (int): The random seed to use. + """ + + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/chase_db1.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/chase_db1.py new file mode 100644 index 0000000000000000000000000000000000000000..8bc29bea14704a4407f83474610cbc3bef32c708 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/chase_db1.py @@ -0,0 +1,27 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class ChaseDB1Dataset(CustomDataset): + """Chase_db1 dataset. + + In segmentation map annotation for Chase_db1, 0 stands for background, + which is included in 2 categories. ``reduce_zero_label`` is fixed to False. + The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_1stHO.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(ChaseDB1Dataset, self).__init__( + img_suffix='.png', + seg_map_suffix='_1stHO.png', + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/cityscapes.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..4c4d09372290d8d1d35fc75846a2802417d6b0db --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/cityscapes.py @@ -0,0 +1,217 @@ +import os.path as osp +import tempfile + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +from custom_mmpkg.custom_mmcv.utils import print_log +from PIL import Image + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class CityscapesDataset(CustomDataset): + """Cityscapes dataset. + + The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is + fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset. + """ + + CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle') + + PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], + [0, 80, 100], [0, 0, 230], [119, 11, 32]] + + def __init__(self, **kwargs): + super(CityscapesDataset, self).__init__( + img_suffix='_leftImg8bit.png', + seg_map_suffix='_gtFine_labelTrainIds.png', + **kwargs) + + @staticmethod + def _convert_to_label_id(result): + """Convert trainId to id for cityscapes.""" + if isinstance(result, str): + result = np.load(result) + import cityscapesscripts.helpers.labels as CSLabels + result_copy = result.copy() + for trainId, label in CSLabels.trainId2label.items(): + result_copy[result == trainId] = label.id + + return result_copy + + def results2img(self, results, imgfile_prefix, to_label_id): + """Write the segmentation results to images. + + Args: + results (list[list | tuple | ndarray]): Testing results of the + dataset. + imgfile_prefix (str): The filename prefix of the png files. + If the prefix is "somepath/xxx", + the png files will be named "somepath/xxx.png". + to_label_id (bool): whether convert output to label_id for + submission + + Returns: + list[str: str]: result txt files which contains corresponding + semantic segmentation images. + """ + mmcv.mkdir_or_exist(imgfile_prefix) + result_files = [] + prog_bar = mmcv.ProgressBar(len(self)) + for idx in range(len(self)): + result = results[idx] + if to_label_id: + result = self._convert_to_label_id(result) + filename = self.img_infos[idx]['filename'] + basename = osp.splitext(osp.basename(filename))[0] + + png_filename = osp.join(imgfile_prefix, f'{basename}.png') + + output = Image.fromarray(result.astype(np.uint8)).convert('P') + import cityscapesscripts.helpers.labels as CSLabels + palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8) + for label_id, label in CSLabels.id2label.items(): + palette[label_id] = label.color + + output.putpalette(palette) + output.save(png_filename) + result_files.append(png_filename) + prog_bar.update() + + return result_files + + def format_results(self, results, imgfile_prefix=None, to_label_id=True): + """Format the results into dir (standard format for Cityscapes + evaluation). + + Args: + results (list): Testing results of the dataset. + imgfile_prefix (str | None): The prefix of images files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Default: None. + to_label_id (bool): whether convert output to label_id for + submission. Default: False + + Returns: + tuple: (result_files, tmp_dir), result_files is a list containing + the image paths, tmp_dir is the temporal directory created + for saving json/png files when img_prefix is not specified. + """ + + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: ' + f'{len(results)} != {len(self)}') + + if imgfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + imgfile_prefix = tmp_dir.name + else: + tmp_dir = None + result_files = self.results2img(results, imgfile_prefix, to_label_id) + + return result_files, tmp_dir + + def evaluate(self, + results, + metric='mIoU', + logger=None, + imgfile_prefix=None, + efficient_test=False): + """Evaluation in Cityscapes/default protocol. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + imgfile_prefix (str | None): The prefix of output image file, + for cityscapes evaluation only. It includes the file path and + the prefix of filename, e.g., "a/b/prefix". + If results are evaluated with cityscapes protocol, it would be + the prefix of output png files. The output files would be + png images under folder "a/b/prefix/xxx.png", where "xxx" is + the image name of cityscapes. If not specified, a temp file + will be created for evaluation. + Default: None. + + Returns: + dict[str, float]: Cityscapes/default metrics. + """ + + eval_results = dict() + metrics = metric.copy() if isinstance(metric, list) else [metric] + if 'cityscapes' in metrics: + eval_results.update( + self._evaluate_cityscapes(results, logger, imgfile_prefix)) + metrics.remove('cityscapes') + if len(metrics) > 0: + eval_results.update( + super(CityscapesDataset, + self).evaluate(results, metrics, logger, efficient_test)) + + return eval_results + + def _evaluate_cityscapes(self, results, logger, imgfile_prefix): + """Evaluation in Cityscapes protocol. + + Args: + results (list): Testing results of the dataset. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + imgfile_prefix (str | None): The prefix of output image file + + Returns: + dict[str: float]: Cityscapes evaluation results. + """ + try: + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa + except ImportError: + raise ImportError('Please run "pip install cityscapesscripts" to ' + 'install cityscapesscripts first.') + msg = 'Evaluating in Cityscapes style' + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + result_files, tmp_dir = self.format_results(results, imgfile_prefix) + + if tmp_dir is None: + result_dir = imgfile_prefix + else: + result_dir = tmp_dir.name + + eval_results = dict() + print_log(f'Evaluating results under {result_dir} ...', logger=logger) + + CSEval.args.evalInstLevelScore = True + CSEval.args.predictionPath = osp.abspath(result_dir) + CSEval.args.evalPixelAccuracy = True + CSEval.args.JSONOutput = False + + seg_map_list = [] + pred_list = [] + + # when evaluating with official cityscapesscripts, + # **_gtFine_labelIds.png is used + for seg_map in mmcv.scandir( + self.ann_dir, 'gtFine_labelIds.png', recursive=True): + seg_map_list.append(osp.join(self.ann_dir, seg_map)) + pred_list.append(CSEval.getPrediction(CSEval.args, seg_map)) + + eval_results.update( + CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args)) + + if tmp_dir is not None: + tmp_dir.cleanup() + + return eval_results diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/custom.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/custom.py new file mode 100644 index 0000000000000000000000000000000000000000..5096a1d718784fcfcc6ae0b30aa256dfb57bc768 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/custom.py @@ -0,0 +1,403 @@ +import os +import os.path as osp +from collections import OrderedDict +from functools import reduce + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +from custom_mmpkg.custom_mmcv.utils import print_log +from torch.utils.data import Dataset + +from custom_mmpkg.custom_mmseg.core import eval_metrics +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from .builder import DATASETS +from .pipelines import Compose + + +@DATASETS.register_module() +class CustomDataset(Dataset): + """Custom dataset for semantic segmentation. An example of file structure + is as followed. + + .. code-block:: none + + ├── data + │ ├── my_dataset + │ │ ├── img_dir + │ │ │ ├── train + │ │ │ │ ├── xxx{img_suffix} + │ │ │ │ ├── yyy{img_suffix} + │ │ │ │ ├── zzz{img_suffix} + │ │ │ ├── val + │ │ ├── ann_dir + │ │ │ ├── train + │ │ │ │ ├── xxx{seg_map_suffix} + │ │ │ │ ├── yyy{seg_map_suffix} + │ │ │ │ ├── zzz{seg_map_suffix} + │ │ │ ├── val + + The img/gt_semantic_seg pair of CustomDataset should be of the same + except suffix. A valid img/gt_semantic_seg filename pair should be like + ``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included + in the suffix). If split is given, then ``xxx`` is specified in txt file. + Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded. + Please refer to ``docs/tutorials/new_dataset.md`` for more details. + + + Args: + pipeline (list[dict]): Processing pipeline + img_dir (str): Path to image directory + img_suffix (str): Suffix of images. Default: '.jpg' + ann_dir (str, optional): Path to annotation directory. Default: None + seg_map_suffix (str): Suffix of segmentation maps. Default: '.png' + split (str, optional): Split txt file. If split is specified, only + file with suffix in the splits will be loaded. Otherwise, all + images in img_dir/ann_dir will be loaded. Default: None + data_root (str, optional): Data root for img_dir/ann_dir. Default: + None. + test_mode (bool): If test_mode=True, gt wouldn't be loaded. + ignore_index (int): The label index to be ignored. Default: 255 + reduce_zero_label (bool): Whether to mark label zero as ignored. + Default: False + classes (str | Sequence[str], optional): Specify classes to load. + If is None, ``cls.CLASSES`` will be used. Default: None. + palette (Sequence[Sequence[int]]] | np.ndarray | None): + The palette of segmentation map. If None is given, and + self.PALETTE is None, random palette will be generated. + Default: None + """ + + CLASSES = None + + PALETTE = None + + def __init__(self, + pipeline, + img_dir, + img_suffix='.jpg', + ann_dir=None, + seg_map_suffix='.png', + split=None, + data_root=None, + test_mode=False, + ignore_index=255, + reduce_zero_label=False, + classes=None, + palette=None): + self.pipeline = Compose(pipeline) + self.img_dir = img_dir + self.img_suffix = img_suffix + self.ann_dir = ann_dir + self.seg_map_suffix = seg_map_suffix + self.split = split + self.data_root = data_root + self.test_mode = test_mode + self.ignore_index = ignore_index + self.reduce_zero_label = reduce_zero_label + self.label_map = None + self.CLASSES, self.PALETTE = self.get_classes_and_palette( + classes, palette) + + # join paths if data_root is specified + if self.data_root is not None: + if not osp.isabs(self.img_dir): + self.img_dir = osp.join(self.data_root, self.img_dir) + if not (self.ann_dir is None or osp.isabs(self.ann_dir)): + self.ann_dir = osp.join(self.data_root, self.ann_dir) + if not (self.split is None or osp.isabs(self.split)): + self.split = osp.join(self.data_root, self.split) + + # load annotations + self.img_infos = self.load_annotations(self.img_dir, self.img_suffix, + self.ann_dir, + self.seg_map_suffix, self.split) + + def __len__(self): + """Total number of samples of data.""" + return len(self.img_infos) + + def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix, + split): + """Load annotation from directory. + + Args: + img_dir (str): Path to image directory + img_suffix (str): Suffix of images. + ann_dir (str|None): Path to annotation directory. + seg_map_suffix (str|None): Suffix of segmentation maps. + split (str|None): Split txt file. If split is specified, only file + with suffix in the splits will be loaded. Otherwise, all images + in img_dir/ann_dir will be loaded. Default: None + + Returns: + list[dict]: All image info of dataset. + """ + + img_infos = [] + if split is not None: + with open(split) as f: + for line in f: + img_name = line.strip() + img_info = dict(filename=img_name + img_suffix) + if ann_dir is not None: + seg_map = img_name + seg_map_suffix + img_info['ann'] = dict(seg_map=seg_map) + img_infos.append(img_info) + else: + for img in mmcv.scandir(img_dir, img_suffix, recursive=True): + img_info = dict(filename=img) + if ann_dir is not None: + seg_map = img.replace(img_suffix, seg_map_suffix) + img_info['ann'] = dict(seg_map=seg_map) + img_infos.append(img_info) + + print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger()) + return img_infos + + def get_ann_info(self, idx): + """Get annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + return self.img_infos[idx]['ann'] + + def pre_pipeline(self, results): + """Prepare results dict for pipeline.""" + results['seg_fields'] = [] + results['img_prefix'] = self.img_dir + results['seg_prefix'] = self.ann_dir + if self.custom_classes: + results['label_map'] = self.label_map + + def __getitem__(self, idx): + """Get training/test data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training/test data (with annotation if `test_mode` is set + False). + """ + + if self.test_mode: + return self.prepare_test_img(idx) + else: + return self.prepare_train_img(idx) + + def prepare_train_img(self, idx): + """Get training data and annotations after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training data and annotation after pipeline with new keys + introduced by pipeline. + """ + + img_info = self.img_infos[idx] + ann_info = self.get_ann_info(idx) + results = dict(img_info=img_info, ann_info=ann_info) + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + """Get testing data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Testing data after pipeline with new keys introduced by + pipeline. + """ + + img_info = self.img_infos[idx] + results = dict(img_info=img_info) + self.pre_pipeline(results) + return self.pipeline(results) + + def format_results(self, results, **kwargs): + """Place holder to format result to dataset specific output.""" + + def get_gt_seg_maps(self, efficient_test=False): + """Get ground truth segmentation maps for evaluation.""" + gt_seg_maps = [] + for img_info in self.img_infos: + seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map']) + if efficient_test: + gt_seg_map = seg_map + else: + gt_seg_map = mmcv.imread( + seg_map, flag='unchanged', backend='pillow') + gt_seg_maps.append(gt_seg_map) + return gt_seg_maps + + def get_classes_and_palette(self, classes=None, palette=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + palette (Sequence[Sequence[int]]] | np.ndarray | None): + The palette of segmentation map. If None is given, random + palette will be generated. Default: None + """ + if classes is None: + self.custom_classes = False + return self.CLASSES, self.PALETTE + + self.custom_classes = True + if isinstance(classes, str): + # take it as a file path + class_names = mmcv.list_from_file(classes) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + if self.CLASSES: + if not set(classes).issubset(self.CLASSES): + raise ValueError('classes is not a subset of CLASSES.') + + # dictionary, its keys are the old label ids and its values + # are the new label ids. + # used for changing pixel labels in load_annotations. + self.label_map = {} + for i, c in enumerate(self.CLASSES): + if c not in class_names: + self.label_map[i] = -1 + else: + self.label_map[i] = classes.index(c) + + palette = self.get_palette_for_custom_classes(class_names, palette) + + return class_names, palette + + def get_palette_for_custom_classes(self, class_names, palette=None): + + if self.label_map is not None: + # return subset of palette + palette = [] + for old_id, new_id in sorted( + self.label_map.items(), key=lambda x: x[1]): + if new_id != -1: + palette.append(self.PALETTE[old_id]) + palette = type(self.PALETTE)(palette) + + elif palette is None: + if self.PALETTE is None: + palette = np.random.randint(0, 255, size=(len(class_names), 3)) + else: + palette = self.PALETTE + + return palette + + def evaluate(self, + results, + metric='mIoU', + logger=None, + efficient_test=False, + **kwargs): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. 'mIoU', + 'mDice' and 'mFscore' are supported. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str, float]: Default metrics. + """ + + if isinstance(metric, str): + metric = [metric] + allowed_metrics = ['mIoU', 'mDice', 'mFscore'] + if not set(metric).issubset(set(allowed_metrics)): + raise KeyError('metric {} is not supported'.format(metric)) + eval_results = {} + gt_seg_maps = self.get_gt_seg_maps(efficient_test) + if self.CLASSES is None: + num_classes = len( + reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps])) + else: + num_classes = len(self.CLASSES) + ret_metrics = eval_metrics( + results, + gt_seg_maps, + num_classes, + self.ignore_index, + metric, + label_map=self.label_map, + reduce_zero_label=self.reduce_zero_label) + + if self.CLASSES is None: + class_names = tuple(range(num_classes)) + else: + class_names = self.CLASSES + + # summary table + ret_metrics_summary = OrderedDict({ + ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2) + for ret_metric, ret_metric_value in ret_metrics.items() + }) + + # each class table + ret_metrics.pop('aAcc', None) + ret_metrics_class = OrderedDict({ + ret_metric: np.round(ret_metric_value * 100, 2) + for ret_metric, ret_metric_value in ret_metrics.items() + }) + ret_metrics_class.update({'Class': class_names}) + ret_metrics_class.move_to_end('Class', last=False) + + try: + from prettytable import PrettyTable + # for logger + class_table_data = PrettyTable() + for key, val in ret_metrics_class.items(): + class_table_data.add_column(key, val) + + summary_table_data = PrettyTable() + for key, val in ret_metrics_summary.items(): + if key == 'aAcc': + summary_table_data.add_column(key, [val]) + else: + summary_table_data.add_column('m' + key, [val]) + + print_log('per class results:', logger) + print_log('\n' + class_table_data.get_string(), logger=logger) + print_log('Summary:', logger) + print_log('\n' + summary_table_data.get_string(), logger=logger) + except ImportError: # prettytable is not installed + pass + + # each metric dict + for key, value in ret_metrics_summary.items(): + if key == 'aAcc': + eval_results[key] = value / 100.0 + else: + eval_results['m' + key] = value / 100.0 + + ret_metrics_class.pop('Class', None) + for key, value in ret_metrics_class.items(): + eval_results.update({ + key + '.' + str(name): value[idx] / 100.0 + for idx, name in enumerate(class_names) + }) + + if mmcv.is_list_of(results, str): + for file_name in results: + os.remove(file_name) + return eval_results diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/dataset_wrappers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/dataset_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..d6a5e957ec3b44465432617cf6e8f0b86a8a5efa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/dataset_wrappers.py @@ -0,0 +1,50 @@ +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .builder import DATASETS + + +@DATASETS.register_module() +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + concat the group flag for image aspect ratio. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + """ + + def __init__(self, datasets): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + self.PALETTE = datasets[0].PALETTE + + +@DATASETS.register_module() +class RepeatDataset(object): + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + self.PALETTE = dataset.PALETTE + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + """Get item from original dataset.""" + return self.dataset[idx % self._ori_len] + + def __len__(self): + """The length is multiplied by ``times``""" + return self.times * self._ori_len diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/drive.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/drive.py new file mode 100644 index 0000000000000000000000000000000000000000..3cbfda8ae74bdf26c5aef197ff2866a7c7ad0cfd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/drive.py @@ -0,0 +1,27 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class DRIVEDataset(CustomDataset): + """DRIVE dataset. + + In segmentation map annotation for DRIVE, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '_manual1.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(DRIVEDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='_manual1.png', + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/hrf.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/hrf.py new file mode 100644 index 0000000000000000000000000000000000000000..923203b51377f9344277fc561803d7a78bd2c684 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/hrf.py @@ -0,0 +1,27 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class HRFDataset(CustomDataset): + """HRF dataset. + + In segmentation map annotation for HRF, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(HRFDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pascal_context.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pascal_context.py new file mode 100644 index 0000000000000000000000000000000000000000..541a63c66a13fb16fd52921e755715ad8d078fdd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pascal_context.py @@ -0,0 +1,103 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class PascalContextDataset(CustomDataset): + """PascalContext dataset. + + In segmentation map annotation for PascalContext, 0 stands for background, + which is included in 60 categories. ``reduce_zero_label`` is fixed to + False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png'. + + Args: + split (str): Split txt file for PascalContext. + """ + + CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench', + 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', + 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', + 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', + 'floor', 'flower', 'food', 'grass', 'ground', 'horse', + 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', + 'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep', + 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', + 'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water', + 'window', 'wood') + + PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]] + + def __init__(self, split, **kwargs): + super(PascalContextDataset, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + split=split, + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) and self.split is not None + + +@DATASETS.register_module() +class PascalContextDataset59(CustomDataset): + """PascalContext dataset. + + In segmentation map annotation for PascalContext, 0 stands for background, + which is included in 60 categories. ``reduce_zero_label`` is fixed to + False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is + fixed to '.png'. + + Args: + split (str): Split txt file for PascalContext. + """ + + CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', + 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', + 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', + 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', + 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', + 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', + 'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', + 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', + 'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood') + + PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], + [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], + [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], + [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], + [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], + [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], + [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], + [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], + [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], + [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], + [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], + [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], + [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], + [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], + [0, 235, 255], [0, 173, 255], [31, 0, 255]] + + def __init__(self, split, **kwargs): + super(PascalContextDataset59, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + split=split, + reduce_zero_label=True, + **kwargs) + assert osp.exists(self.img_dir) and self.split is not None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b9046b07bb4ddea7a707a392b42e72db7c9df67 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/__init__.py @@ -0,0 +1,16 @@ +from .compose import Compose +from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, + Transpose, to_tensor) +from .loading import LoadAnnotations, LoadImageFromFile +from .test_time_aug import MultiScaleFlipAug +from .transforms import (CLAHE, AdjustGamma, Normalize, Pad, + PhotoMetricDistortion, RandomCrop, RandomFlip, + RandomRotate, Rerange, Resize, RGB2Gray, SegRescale) + +__all__ = [ + 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', + 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', + 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', + 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate', + 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/compose.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/compose.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c8027c235140c6d1cca510bb4d2c81baf439c2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/compose.py @@ -0,0 +1,51 @@ +import collections + +from custom_mmpkg.custom_mmcv.utils import build_from_cfg + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Compose(object): + """Compose multiple transforms sequentially. + + Args: + transforms (Sequence[dict | callable]): Sequence of transform object or + config dict to be composed. + """ + + def __init__(self, transforms): + assert isinstance(transforms, collections.abc.Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict') + + def __call__(self, data): + """Call function to apply transforms sequentially. + + Args: + data (dict): A result dict contains the data to transform. + + Returns: + dict: Transformed data. + """ + + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/formating.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/formating.py new file mode 100644 index 0000000000000000000000000000000000000000..0c259f185c9a55faf083dc3bec6d571902125e2d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/formating.py @@ -0,0 +1,288 @@ +from collections.abc import Sequence + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +import torch +from custom_mmpkg.custom_mmcv.parallel import DataContainer as DC + +from ..builder import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + + Args: + data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to + be converted. + """ + + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError(f'type {type(data)} cannot be converted to tensor.') + + +@PIPELINES.register_module() +class ToTensor(object): + """Convert some results to :obj:`torch.Tensor` by given keys. + + Args: + keys (Sequence[str]): Keys that need to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert data in results to :obj:`torch.Tensor`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted + to :obj:`torch.Tensor`. + """ + + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class ImageToTensor(object): + """Convert image to :obj:`torch.Tensor` by given keys. + + The dimension order of input image is (H, W, C). The pipeline will convert + it to (C, H, W). If only 2 dimension (H, W) is given, the output would be + (1, H, W). + + Args: + keys (Sequence[str]): Key of images to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert image in results to :obj:`torch.Tensor` and + transpose the channel order. + + Args: + results (dict): Result dict contains the image data to convert. + + Returns: + dict: The result dict contains the image converted + to :obj:`torch.Tensor` and transposed to (C, H, W) order. + """ + + for key in self.keys: + img = results[key] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + results[key] = to_tensor(img.transpose(2, 0, 1)) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose(object): + """Transpose some results by given keys. + + Args: + keys (Sequence[str]): Keys of results to be transposed. + order (Sequence[int]): Order of transpose. + """ + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + """Call function to convert image in results to :obj:`torch.Tensor` and + transpose the channel order. + + Args: + results (dict): Result dict contains the image data to convert. + + Returns: + dict: The result dict contains the image converted + to :obj:`torch.Tensor` and transposed to (C, H, W) order. + """ + + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@PIPELINES.register_module() +class ToDataContainer(object): + """Convert results to :obj:`mmcv.DataContainer` by given fields. + + Args: + fields (Sequence[dict]): Each field is a dict like + ``dict(key='xxx', **kwargs)``. The ``key`` in result will + be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. + Default: ``(dict(key='img', stack=True), + dict(key='gt_semantic_seg'))``. + """ + + def __init__(self, + fields=(dict(key='img', + stack=True), dict(key='gt_semantic_seg'))): + self.fields = fields + + def __call__(self, results): + """Call function to convert data in results to + :obj:`mmcv.DataContainer`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted to + :obj:`mmcv.DataContainer`. + """ + + for field in self.fields: + field = field.copy() + key = field.pop('key') + results[key] = DC(results[key], **field) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(fields={self.fields})' + + +@PIPELINES.register_module() +class DefaultFormatBundle(object): + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img" + and "gt_semantic_seg". These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, + (3)to DataContainer (stack=True) + """ + + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + + if 'img' in results: + img = results['img'] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + results['img'] = DC(to_tensor(img), stack=True) + if 'gt_semantic_seg' in results: + # convert to long + results['gt_semantic_seg'] = DC( + to_tensor(results['gt_semantic_seg'][None, + ...].astype(np.int64)), + stack=True) + return results + + def __repr__(self): + return self.__class__.__name__ + + +@PIPELINES.register_module() +class Collect(object): + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img", "gt_semantic_seg". + + The "img_meta" item is always populated. The contents of the "img_meta" + dictionary depends on "meta_keys". By default this includes: + + - "img_shape": shape of the image input to the network as a tuple + (h, w, c). Note that images may be zero padded on the bottom/right + if the batch tensor is larger than this shape. + + - "scale_factor": a float indicating the preprocessing scale + + - "flip": a boolean indicating if image flip transform was used + + - "filename": path to the image file + + - "ori_shape": original shape of the image as a tuple (h, w, c) + + - "pad_shape": image shape after padding + + - "img_norm_cfg": a dict of normalization information: + - mean - per channel mean subtraction + - std - per channel std divisor + - to_rgb - bool indicating if bgr was converted to rgb + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', + 'pad_shape', 'scale_factor', 'flip', 'flip_direction', + 'img_norm_cfg')`` + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + """Call function to collect keys in results. The keys in ``meta_keys`` + will be converted to :obj:mmcv.DataContainer. + + Args: + results (dict): Result dict contains the data to collect. + + Returns: + dict: The result dict contains the following keys + - keys in``self.keys`` + - ``img_metas`` + """ + + data = {} + img_meta = {} + for key in self.meta_keys: + img_meta[key] = results[key] + data['img_metas'] = DC(img_meta, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/loading.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/loading.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef470c7a4b09deaaee6ca145f5f686610e38497 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/loading.py @@ -0,0 +1,153 @@ +import os.path as osp + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class LoadImageFromFile(object): + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), + "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: + 'cv2' + """ + + def __init__(self, + to_float32=False, + color_type='color', + file_client_args=dict(backend='disk'), + imdecode_backend='cv2'): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + self.imdecode_backend = imdecode_backend + + def __call__(self, results): + """Call functions to load image and get image meta information. + + Args: + results (dict): Result dict from :obj:`mmseg.CustomDataset`. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results.get('img_prefix') is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + img_bytes = self.file_client.get(filename) + img = mmcv.imfrombytes( + img_bytes, flag=self.color_type, backend=self.imdecode_backend) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(to_float32={self.to_float32},' + repr_str += f"color_type='{self.color_type}'," + repr_str += f"imdecode_backend='{self.imdecode_backend}')" + return repr_str + + +@PIPELINES.register_module() +class LoadAnnotations(object): + """Load annotations for semantic segmentation. + + Args: + reduce_zero_label (bool): Whether reduce all label value by 1. + Usually used for datasets where 0 is background label. + Default: False. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default: + 'pillow' + """ + + def __init__(self, + reduce_zero_label=False, + file_client_args=dict(backend='disk'), + imdecode_backend='pillow'): + self.reduce_zero_label = reduce_zero_label + self.file_client_args = file_client_args.copy() + self.file_client = None + self.imdecode_backend = imdecode_backend + + def __call__(self, results): + """Call function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:`mmseg.CustomDataset`. + + Returns: + dict: The dict contains loaded semantic segmentation annotations. + """ + + if self.file_client is None: + self.file_client = mmcv.FileClient(**self.file_client_args) + + if results.get('seg_prefix', None) is not None: + filename = osp.join(results['seg_prefix'], + results['ann_info']['seg_map']) + else: + filename = results['ann_info']['seg_map'] + img_bytes = self.file_client.get(filename) + gt_semantic_seg = mmcv.imfrombytes( + img_bytes, flag='unchanged', + backend=self.imdecode_backend).squeeze().astype(np.uint8) + # modify if custom classes + if results.get('label_map', None) is not None: + for old_id, new_id in results['label_map'].items(): + gt_semantic_seg[gt_semantic_seg == old_id] = new_id + # reduce zero_label + if self.reduce_zero_label: + # avoid using underflow conversion + gt_semantic_seg[gt_semantic_seg == 0] = 255 + gt_semantic_seg = gt_semantic_seg - 1 + gt_semantic_seg[gt_semantic_seg == 254] = 255 + results['gt_semantic_seg'] = gt_semantic_seg + results['seg_fields'].append('gt_semantic_seg') + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(reduce_zero_label={self.reduce_zero_label},' + repr_str += f"imdecode_backend='{self.imdecode_backend}')" + return repr_str diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/test_time_aug.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/test_time_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..93fe21433378b9c87d9e45243c550755bcafefe5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/test_time_aug.py @@ -0,0 +1,133 @@ +import warnings + +import custom_mmpkg.custom_mmcv as mmcv + +from ..builder import PIPELINES +from .compose import Compose + + +@PIPELINES.register_module() +class MultiScaleFlipAug(object): + """Test-time augmentation with multiple scales and flipping. + + An example configuration is as followed: + + .. code-block:: + + img_scale=(2048, 1024), + img_ratios=[0.5, 1.0], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ] + + After MultiScaleFLipAug with above configuration, the results are wrapped + into lists of the same length as followed: + + .. code-block:: + + dict( + img=[...], + img_shape=[...], + scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)] + flip=[False, True, False, True] + ... + ) + + Args: + transforms (list[dict]): Transforms to apply in each augmentation. + img_scale (None | tuple | list[tuple]): Images scales for resizing. + img_ratios (float | list[float]): Image ratios for resizing + flip (bool): Whether apply flip augmentation. Default: False. + flip_direction (str | list[str]): Flip augmentation directions, + options are "horizontal" and "vertical". If flip_direction is list, + multiple flip augmentations will be applied. + It has no effect when flip == False. Default: "horizontal". + """ + + def __init__(self, + transforms, + img_scale, + img_ratios=None, + flip=False, + flip_direction='horizontal'): + self.transforms = Compose(transforms) + if img_ratios is not None: + img_ratios = img_ratios if isinstance(img_ratios, + list) else [img_ratios] + assert mmcv.is_list_of(img_ratios, float) + if img_scale is None: + # mode 1: given img_scale=None and a range of image ratio + self.img_scale = None + assert mmcv.is_list_of(img_ratios, float) + elif isinstance(img_scale, tuple) and mmcv.is_list_of( + img_ratios, float): + assert len(img_scale) == 2 + # mode 2: given a scale and a range of image ratio + self.img_scale = [(int(img_scale[0] * ratio), + int(img_scale[1] * ratio)) + for ratio in img_ratios] + else: + # mode 3: given multiple scales + self.img_scale = img_scale if isinstance(img_scale, + list) else [img_scale] + assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None + self.flip = flip + self.img_ratios = img_ratios + self.flip_direction = flip_direction if isinstance( + flip_direction, list) else [flip_direction] + assert mmcv.is_list_of(self.flip_direction, str) + if not self.flip and self.flip_direction != ['horizontal']: + warnings.warn( + 'flip_direction has no effect when flip is set to False') + if (self.flip + and not any([t['type'] == 'RandomFlip' for t in transforms])): + warnings.warn( + 'flip has no effect when RandomFlip is not in transforms') + + def __call__(self, results): + """Call function to apply test time augment transforms on results. + + Args: + results (dict): Result dict contains the data to transform. + + Returns: + dict[str: list]: The augmented data, where each value is wrapped + into a list. + """ + + aug_data = [] + if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float): + h, w = results['img'].shape[:2] + img_scale = [(int(w * ratio), int(h * ratio)) + for ratio in self.img_ratios] + else: + img_scale = self.img_scale + flip_aug = [False, True] if self.flip else [False] + for scale in img_scale: + for flip in flip_aug: + for direction in self.flip_direction: + _results = results.copy() + _results['scale'] = scale + _results['flip'] = flip + _results['flip_direction'] = direction + data = self.transforms(_results) + aug_data.append(data) + # list of dict to dict of list + aug_data_dict = {key: [] for key in aug_data[0]} + for data in aug_data: + for key, val in data.items(): + aug_data_dict[key].append(val) + return aug_data_dict + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(transforms={self.transforms}, ' + repr_str += f'img_scale={self.img_scale}, flip={self.flip})' + repr_str += f'flip_direction={self.flip_direction}' + return repr_str diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/transforms.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..677191de984592456c145fe83579a049879443d4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/pipelines/transforms.py @@ -0,0 +1,889 @@ +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +from custom_mmpkg.custom_mmcv.utils import deprecated_api_warning, is_tuple_of +from numpy import random + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Resize(object): + """Resize images & seg. + + This transform resizes the input image to some scale. If the input dict + contains the key "scale", then the scale in the input dict is used, + otherwise the specified scale in the init method is used. + + ``img_scale`` can be None, a tuple (single-scale) or a list of tuple + (multi-scale). There are 4 multiscale modes: + + - ``ratio_range is not None``: + 1. When img_scale is None, img_scale is the shape of image in results + (img_scale = results['img'].shape[:2]) and the image is resized based + on the original size. (mode 1) + 2. When img_scale is a tuple (single-scale), randomly sample a ratio from + the ratio range and multiply it with the image scale. (mode 2) + + - ``ratio_range is None and multiscale_mode == "range"``: randomly sample a + scale from the a range. (mode 3) + + - ``ratio_range is None and multiscale_mode == "value"``: randomly sample a + scale from multiple scales. (mode 4) + + Args: + img_scale (tuple or list[tuple]): Images scales for resizing. + multiscale_mode (str): Either "range" or "value". + ratio_range (tuple[float]): (min_ratio, max_ratio) + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. + """ + + def __init__(self, + img_scale=None, + multiscale_mode='range', + ratio_range=None, + keep_ratio=True): + if img_scale is None: + self.img_scale = None + else: + if isinstance(img_scale, list): + self.img_scale = img_scale + else: + self.img_scale = [img_scale] + assert mmcv.is_list_of(self.img_scale, tuple) + + if ratio_range is not None: + # mode 1: given img_scale=None and a range of image ratio + # mode 2: given a scale and a range of image ratio + assert self.img_scale is None or len(self.img_scale) == 1 + else: + # mode 3 and 4: given multiple scales or a range of scales + assert multiscale_mode in ['value', 'range'] + + self.multiscale_mode = multiscale_mode + self.ratio_range = ratio_range + self.keep_ratio = keep_ratio + + @staticmethod + def random_select(img_scales): + """Randomly select an img_scale from given candidates. + + Args: + img_scales (list[tuple]): Images scales for selection. + + Returns: + (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, + where ``img_scale`` is the selected image scale and + ``scale_idx`` is the selected index in the given candidates. + """ + + assert mmcv.is_list_of(img_scales, tuple) + scale_idx = np.random.randint(len(img_scales)) + img_scale = img_scales[scale_idx] + return img_scale, scale_idx + + @staticmethod + def random_sample(img_scales): + """Randomly sample an img_scale when ``multiscale_mode=='range'``. + + Args: + img_scales (list[tuple]): Images scale range for sampling. + There must be two tuples in img_scales, which specify the lower + and upper bound of image scales. + + Returns: + (tuple, None): Returns a tuple ``(img_scale, None)``, where + ``img_scale`` is sampled scale and None is just a placeholder + to be consistent with :func:`random_select`. + """ + + assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + return img_scale, None + + @staticmethod + def random_sample_ratio(img_scale, ratio_range): + """Randomly sample an img_scale when ``ratio_range`` is specified. + + A ratio will be randomly sampled from the range specified by + ``ratio_range``. Then it would be multiplied with ``img_scale`` to + generate sampled scale. + + Args: + img_scale (tuple): Images scale base to multiply with ratio. + ratio_range (tuple[float]): The minimum and maximum ratio to scale + the ``img_scale``. + + Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where + ``scale`` is sampled ratio multiplied with ``img_scale`` and + None is just a placeholder to be consistent with + :func:`random_select`. + """ + + assert isinstance(img_scale, tuple) and len(img_scale) == 2 + min_ratio, max_ratio = ratio_range + assert min_ratio <= max_ratio + ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio + scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) + return scale, None + + def _random_scale(self, results): + """Randomly sample an img_scale according to ``ratio_range`` and + ``multiscale_mode``. + + If ``ratio_range`` is specified, a ratio will be sampled and be + multiplied with ``img_scale``. + If multiple scales are specified by ``img_scale``, a scale will be + sampled according to ``multiscale_mode``. + Otherwise, single scale will be used. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: Two new keys 'scale` and 'scale_idx` are added into + ``results``, which would be used by subsequent pipelines. + """ + + if self.ratio_range is not None: + if self.img_scale is None: + h, w = results['img'].shape[:2] + scale, scale_idx = self.random_sample_ratio((w, h), + self.ratio_range) + else: + scale, scale_idx = self.random_sample_ratio( + self.img_scale[0], self.ratio_range) + elif len(self.img_scale) == 1: + scale, scale_idx = self.img_scale[0], 0 + elif self.multiscale_mode == 'range': + scale, scale_idx = self.random_sample(self.img_scale) + elif self.multiscale_mode == 'value': + scale, scale_idx = self.random_select(self.img_scale) + else: + raise NotImplementedError + + results['scale'] = scale + results['scale_idx'] = scale_idx + + def _resize_img(self, results): + """Resize images with ``results['scale']``.""" + if self.keep_ratio: + img, scale_factor = mmcv.imrescale( + results['img'], results['scale'], return_scale=True) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results['img'].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = mmcv.imresize( + results['img'], results['scale'], return_scale=True) + scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], + dtype=np.float32) + results['img'] = img + results['img_shape'] = img.shape + results['pad_shape'] = img.shape # in case that there is no padding + results['scale_factor'] = scale_factor + results['keep_ratio'] = self.keep_ratio + + def _resize_seg(self, results): + """Resize semantic segmentation map with ``results['scale']``.""" + for key in results.get('seg_fields', []): + if self.keep_ratio: + gt_seg = mmcv.imrescale( + results[key], results['scale'], interpolation='nearest') + else: + gt_seg = mmcv.imresize( + results[key], results['scale'], interpolation='nearest') + results[key] = gt_seg + + def __call__(self, results): + """Call function to resize images, bounding boxes, masks, semantic + segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', + 'keep_ratio' keys are added into result dict. + """ + + if 'scale' not in results: + self._random_scale(results) + self._resize_img(results) + self._resize_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(img_scale={self.img_scale}, ' + f'multiscale_mode={self.multiscale_mode}, ' + f'ratio_range={self.ratio_range}, ' + f'keep_ratio={self.keep_ratio})') + return repr_str + + +@PIPELINES.register_module() +class RandomFlip(object): + """Flip the image & seg. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + Args: + prob (float, optional): The flipping probability. Default: None. + direction(str, optional): The flipping direction. Options are + 'horizontal' and 'vertical'. Default: 'horizontal'. + """ + + @deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip') + def __init__(self, prob=None, direction='horizontal'): + self.prob = prob + self.direction = direction + if prob is not None: + assert prob >= 0 and prob <= 1 + assert direction in ['horizontal', 'vertical'] + + def __call__(self, results): + """Call function to flip bounding boxes, masks, semantic segmentation + maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction' keys are added into + result dict. + """ + + if 'flip' not in results: + flip = True if np.random.rand() < self.prob else False + results['flip'] = flip + if 'flip_direction' not in results: + results['flip_direction'] = self.direction + if results['flip']: + # flip image + results['img'] = mmcv.imflip( + results['img'], direction=results['flip_direction']) + + # flip segs + for key in results.get('seg_fields', []): + # use copy() to make numpy stride positive + results[key] = mmcv.imflip( + results[key], direction=results['flip_direction']).copy() + return results + + def __repr__(self): + return self.__class__.__name__ + f'(prob={self.prob})' + + +@PIPELINES.register_module() +class Pad(object): + """Pad the image & mask. + + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + """ + + def __init__(self, + size=None, + size_divisor=None, + pad_val=0, + seg_pad_val=255): + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + # only one of size and size_divisor should be valid + assert size is not None or size_divisor is not None + assert size is None or size_divisor is None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + if self.size is not None: + padded_img = mmcv.impad( + results['img'], shape=self.size, pad_val=self.pad_val) + elif self.size_divisor is not None: + padded_img = mmcv.impad_to_multiple( + results['img'], self.size_divisor, pad_val=self.pad_val) + results['img'] = padded_img + results['pad_shape'] = padded_img.shape + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def _pad_seg(self, results): + """Pad masks according to ``results['pad_shape']``.""" + for key in results.get('seg_fields', []): + results[key] = mmcv.impad( + results[key], + shape=results['pad_shape'][:2], + pad_val=self.seg_pad_val) + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Updated result dict. + """ + + self._pad_img(results) + self._pad_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \ + f'pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class Normalize(object): + """Normalize the image. + + Added key is "img_norm_cfg". + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + """Call function to normalize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ + + results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \ + f'{self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class Rerange(object): + """Rerange the image pixel value. + + Args: + min_value (float or int): Minimum value of the reranged image. + Default: 0. + max_value (float or int): Maximum value of the reranged image. + Default: 255. + """ + + def __init__(self, min_value=0, max_value=255): + assert isinstance(min_value, float) or isinstance(min_value, int) + assert isinstance(max_value, float) or isinstance(max_value, int) + assert min_value < max_value + self.min_value = min_value + self.max_value = max_value + + def __call__(self, results): + """Call function to rerange images. + + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Reranged results. + """ + + img = results['img'] + img_min_value = np.min(img) + img_max_value = np.max(img) + + assert img_min_value < img_max_value + # rerange to [0, 1] + img = (img - img_min_value) / (img_max_value - img_min_value) + # rerange to [min_value, max_value] + img = img * (self.max_value - self.min_value) + self.min_value + results['img'] = img + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(min_value={self.min_value}, max_value={self.max_value})' + return repr_str + + +@PIPELINES.register_module() +class CLAHE(object): + """Use CLAHE method to process the image. + + See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. + Graphics Gems, 1994:474-485.` for more information. + + Args: + clip_limit (float): Threshold for contrast limiting. Default: 40.0. + tile_grid_size (tuple[int]): Size of grid for histogram equalization. + Input image will be divided into equally sized rectangular tiles. + It defines the number of tiles in row and column. Default: (8, 8). + """ + + def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)): + assert isinstance(clip_limit, (float, int)) + self.clip_limit = clip_limit + assert is_tuple_of(tile_grid_size, int) + assert len(tile_grid_size) == 2 + self.tile_grid_size = tile_grid_size + + def __call__(self, results): + """Call function to Use CLAHE method process images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + + for i in range(results['img'].shape[2]): + results['img'][:, :, i] = mmcv.clahe( + np.array(results['img'][:, :, i], dtype=np.uint8), + self.clip_limit, self.tile_grid_size) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(clip_limit={self.clip_limit}, '\ + f'tile_grid_size={self.tile_grid_size})' + return repr_str + + +@PIPELINES.register_module() +class RandomCrop(object): + """Random crop the image & seg. + + Args: + crop_size (tuple): Expected size after cropping, (h, w). + cat_max_ratio (float): The maximum ratio that single category could + occupy. + """ + + def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255): + assert crop_size[0] > 0 and crop_size[1] > 0 + self.crop_size = crop_size + self.cat_max_ratio = cat_max_ratio + self.ignore_index = ignore_index + + def get_crop_bbox(self, img): + """Randomly get a crop bounding box.""" + margin_h = max(img.shape[0] - self.crop_size[0], 0) + margin_w = max(img.shape[1] - self.crop_size[1], 0) + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1] + + return crop_y1, crop_y2, crop_x1, crop_x2 + + def crop(self, img, crop_bbox): + """Crop from ``img``""" + crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + return img + + def __call__(self, results): + """Call function to randomly crop images, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + + img = results['img'] + crop_bbox = self.get_crop_bbox(img) + if self.cat_max_ratio < 1.: + # Repeat 10 times + for _ in range(10): + seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox) + labels, cnt = np.unique(seg_temp, return_counts=True) + cnt = cnt[labels != self.ignore_index] + if len(cnt) > 1 and np.max(cnt) / np.sum( + cnt) < self.cat_max_ratio: + break + crop_bbox = self.get_crop_bbox(img) + + # crop the image + img = self.crop(img, crop_bbox) + img_shape = img.shape + results['img'] = img + results['img_shape'] = img_shape + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = self.crop(results[key], crop_bbox) + + return results + + def __repr__(self): + return self.__class__.__name__ + f'(crop_size={self.crop_size})' + + +@PIPELINES.register_module() +class RandomRotate(object): + """Rotate the image & seg. + + Args: + prob (float): The rotation probability. + degree (float, tuple[float]): Range of degrees to select from. If + degree is a number instead of tuple like (min, max), + the range of degree will be (``-degree``, ``+degree``) + pad_val (float, optional): Padding value of image. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If not specified, the center of the image will be + used. Default: None. + auto_bound (bool): Whether to adjust the image size to cover the whole + rotated image. Default: False + """ + + def __init__(self, + prob, + degree, + pad_val=0, + seg_pad_val=255, + center=None, + auto_bound=False): + self.prob = prob + assert prob >= 0 and prob <= 1 + if isinstance(degree, (float, int)): + assert degree > 0, f'degree {degree} should be positive' + self.degree = (-degree, degree) + else: + self.degree = degree + assert len(self.degree) == 2, f'degree {self.degree} should be a ' \ + f'tuple of (min, max)' + self.pal_val = pad_val + self.seg_pad_val = seg_pad_val + self.center = center + self.auto_bound = auto_bound + + def __call__(self, results): + """Call function to rotate image, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Rotated results. + """ + + rotate = True if np.random.rand() < self.prob else False + degree = np.random.uniform(min(*self.degree), max(*self.degree)) + if rotate: + # rotate image + results['img'] = mmcv.imrotate( + results['img'], + angle=degree, + border_value=self.pal_val, + center=self.center, + auto_bound=self.auto_bound) + + # rotate segs + for key in results.get('seg_fields', []): + results[key] = mmcv.imrotate( + results[key], + angle=degree, + border_value=self.seg_pad_val, + center=self.center, + auto_bound=self.auto_bound, + interpolation='nearest') + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob}, ' \ + f'degree={self.degree}, ' \ + f'pad_val={self.pal_val}, ' \ + f'seg_pad_val={self.seg_pad_val}, ' \ + f'center={self.center}, ' \ + f'auto_bound={self.auto_bound})' + return repr_str + + +@PIPELINES.register_module() +class RGB2Gray(object): + """Convert RGB image to grayscale image. + + This transform calculate the weighted mean of input image channels with + ``weights`` and then expand the channels to ``out_channels``. When + ``out_channels`` is None, the number of output channels is the same as + input channels. + + Args: + out_channels (int): Expected number of output channels after + transforming. Default: None. + weights (tuple[float]): The weights to calculate the weighted mean. + Default: (0.299, 0.587, 0.114). + """ + + def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)): + assert out_channels is None or out_channels > 0 + self.out_channels = out_channels + assert isinstance(weights, tuple) + for item in weights: + assert isinstance(item, (float, int)) + self.weights = weights + + def __call__(self, results): + """Call function to convert RGB image to grayscale image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with grayscale image. + """ + img = results['img'] + assert len(img.shape) == 3 + assert img.shape[2] == len(self.weights) + weights = np.array(self.weights).reshape((1, 1, -1)) + img = (img * weights).sum(2, keepdims=True) + if self.out_channels is None: + img = img.repeat(weights.shape[2], axis=2) + else: + img = img.repeat(self.out_channels, axis=2) + + results['img'] = img + results['img_shape'] = img.shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(out_channels={self.out_channels}, ' \ + f'weights={self.weights})' + return repr_str + + +@PIPELINES.register_module() +class AdjustGamma(object): + """Using gamma correction to process the image. + + Args: + gamma (float or int): Gamma value used in gamma correction. + Default: 1.0. + """ + + def __init__(self, gamma=1.0): + assert isinstance(gamma, float) or isinstance(gamma, int) + assert gamma > 0 + self.gamma = gamma + inv_gamma = 1.0 / gamma + self.table = np.array([(i / 255.0)**inv_gamma * 255 + for i in np.arange(256)]).astype('uint8') + + def __call__(self, results): + """Call function to process the image with gamma correction. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + + results['img'] = mmcv.lut_transform( + np.array(results['img'], dtype=np.uint8), self.table) + + return results + + def __repr__(self): + return self.__class__.__name__ + f'(gamma={self.gamma})' + + +@PIPELINES.register_module() +class SegRescale(object): + """Rescale semantic segmentation maps. + + Args: + scale_factor (float): The scale factor of the final output. + """ + + def __init__(self, scale_factor=1): + self.scale_factor = scale_factor + + def __call__(self, results): + """Call function to scale the semantic segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with semantic segmentation map scaled. + """ + for key in results.get('seg_fields', []): + if self.scale_factor != 1: + results[key] = mmcv.imrescale( + results[key], self.scale_factor, interpolation='nearest') + return results + + def __repr__(self): + return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' + + +@PIPELINES.register_module() +class PhotoMetricDistortion(object): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def convert(self, img, alpha=1, beta=0): + """Multiple with alpha and add beat with clip.""" + img = img.astype(np.float32) * alpha + beta + img = np.clip(img, 0, 255) + return img.astype(np.uint8) + + def brightness(self, img): + """Brightness distortion.""" + if random.randint(2): + return self.convert( + img, + beta=random.uniform(-self.brightness_delta, + self.brightness_delta)) + return img + + def contrast(self, img): + """Contrast distortion.""" + if random.randint(2): + return self.convert( + img, + alpha=random.uniform(self.contrast_lower, self.contrast_upper)) + return img + + def saturation(self, img): + """Saturation distortion.""" + if random.randint(2): + img = mmcv.bgr2hsv(img) + img[:, :, 1] = self.convert( + img[:, :, 1], + alpha=random.uniform(self.saturation_lower, + self.saturation_upper)) + img = mmcv.hsv2bgr(img) + return img + + def hue(self, img): + """Hue distortion.""" + if random.randint(2): + img = mmcv.bgr2hsv(img) + img[:, :, + 0] = (img[:, :, 0].astype(int) + + random.randint(-self.hue_delta, self.hue_delta)) % 180 + img = mmcv.hsv2bgr(img) + return img + + def __call__(self, results): + """Call function to perform photometric distortion on images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + img = results['img'] + # random brightness + img = self.brightness(img) + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + img = self.contrast(img) + + # random saturation + img = self.saturation(img) + + # random hue + img = self.hue(img) + + # random contrast + if mode == 0: + img = self.contrast(img) + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(brightness_delta={self.brightness_delta}, ' + f'contrast_range=({self.contrast_lower}, ' + f'{self.contrast_upper}), ' + f'saturation_range=({self.saturation_lower}, ' + f'{self.saturation_upper}), ' + f'hue_delta={self.hue_delta})') + return repr_str diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/stare.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/stare.py new file mode 100644 index 0000000000000000000000000000000000000000..cbd14e0920e7f6a73baff1432e5a32ccfdb0dfae --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/stare.py @@ -0,0 +1,27 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class STAREDataset(CustomDataset): + """STARE dataset. + + In segmentation map annotation for STARE, 0 stands for background, which is + included in 2 categories. ``reduce_zero_label`` is fixed to False. The + ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to + '.ah.png'. + """ + + CLASSES = ('background', 'vessel') + + PALETTE = [[120, 120, 120], [6, 230, 230]] + + def __init__(self, **kwargs): + super(STAREDataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.ah.png', + reduce_zero_label=False, + **kwargs) + assert osp.exists(self.img_dir) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/voc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/voc.py new file mode 100644 index 0000000000000000000000000000000000000000..a8855203b14ee0dc4da9099a2945d4aedcffbcd6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/datasets/voc.py @@ -0,0 +1,29 @@ +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class PascalVOCDataset(CustomDataset): + """Pascal VOC dataset. + + Args: + split (str): Split txt file for Pascal VOC. + """ + + CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', + 'train', 'tvmonitor') + + PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], + [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], + [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], + [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], + [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] + + def __init__(self, split, **kwargs): + super(PascalVOCDataset, self).__init__( + img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) + assert osp.exists(self.img_dir) and self.split is not None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf93f8bec9cf0cef0a3bd76ca3ca92eb188f535 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/__init__.py @@ -0,0 +1,12 @@ +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone, + build_head, build_loss, build_segmentor) +from .decode_heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .segmentors import * # noqa: F401,F403 + +__all__ = [ + 'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone', + 'build_head', 'build_loss', 'build_segmentor' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1116c00a17c8bd9ed7f18743baee22b3b7d3f8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/__init__.py @@ -0,0 +1,16 @@ +from .cgnet import CGNet +# from .fast_scnn import FastSCNN +from .hrnet import HRNet +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1c, ResNetV1d +from .resnext import ResNeXt +from .unet import UNet +from .vit import VisionTransformer + +__all__ = [ + 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', + 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', + 'VisionTransformer' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/cgnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/cgnet.py new file mode 100644 index 0000000000000000000000000000000000000000..18b158be8dffa5e119c4f73e84d399815ec714ac --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/cgnet.py @@ -0,0 +1,367 @@ +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from custom_mmpkg.custom_mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer, + constant_init, kaiming_init) +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import _BatchNorm + +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from ..builder import BACKBONES + + +class GlobalContextExtractor(nn.Module): + """Global Context Extractor for CGNet. + + This class is employed to refine the joint feature of both local feature + and surrounding context. + + Args: + channel (int): Number of input feature channels. + reduction (int): Reductions for global context extractor. Default: 16. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, channel, reduction=16, with_cp=False): + super(GlobalContextExtractor, self).__init__() + self.channel = channel + self.reduction = reduction + assert reduction >= 1 and channel >= reduction + self.with_cp = with_cp + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True), + nn.Linear(channel // reduction, channel), nn.Sigmoid()) + + def forward(self, x): + + def _inner_forward(x): + num_batch, num_channel = x.size()[:2] + y = self.avg_pool(x).view(num_batch, num_channel) + y = self.fc(y).view(num_batch, num_channel, 1, 1) + return x * y + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class ContextGuidedBlock(nn.Module): + """Context Guided Block for CGNet. + + This class consists of four components: local feature extractor, + surrounding feature extractor, joint feature extractor and global + context extractor. + + Args: + in_channels (int): Number of input feature channels. + out_channels (int): Number of output feature channels. + dilation (int): Dilation rate for surrounding context extractor. + Default: 2. + reduction (int): Reduction for global context extractor. Default: 16. + skip_connect (bool): Add input to output or not. Default: True. + downsample (bool): Downsample the input to 1/2 or not. Default: False. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='PReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + in_channels, + out_channels, + dilation=2, + reduction=16, + skip_connect=True, + downsample=False, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='PReLU'), + with_cp=False): + super(ContextGuidedBlock, self).__init__() + self.with_cp = with_cp + self.downsample = downsample + + channels = out_channels if downsample else out_channels // 2 + if 'type' in act_cfg and act_cfg['type'] == 'PReLU': + act_cfg['num_parameters'] = channels + kernel_size = 3 if downsample else 1 + stride = 2 if downsample else 1 + padding = (kernel_size - 1) // 2 + + self.conv1x1 = ConvModule( + in_channels, + channels, + kernel_size, + stride, + padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.f_loc = build_conv_layer( + conv_cfg, + channels, + channels, + kernel_size=3, + padding=1, + groups=channels, + bias=False) + self.f_sur = build_conv_layer( + conv_cfg, + channels, + channels, + kernel_size=3, + padding=dilation, + groups=channels, + dilation=dilation, + bias=False) + + self.bn = build_norm_layer(norm_cfg, 2 * channels)[1] + self.activate = nn.PReLU(2 * channels) + + if downsample: + self.bottleneck = build_conv_layer( + conv_cfg, + 2 * channels, + out_channels, + kernel_size=1, + bias=False) + + self.skip_connect = skip_connect and not downsample + self.f_glo = GlobalContextExtractor(out_channels, reduction, with_cp) + + def forward(self, x): + + def _inner_forward(x): + out = self.conv1x1(x) + loc = self.f_loc(out) + sur = self.f_sur(out) + + joi_feat = torch.cat([loc, sur], 1) # the joint feature + joi_feat = self.bn(joi_feat) + joi_feat = self.activate(joi_feat) + if self.downsample: + joi_feat = self.bottleneck(joi_feat) # channel = out_channels + # f_glo is employed to refine the joint feature + out = self.f_glo(joi_feat) + + if self.skip_connect: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class InputInjection(nn.Module): + """Downsampling module for CGNet.""" + + def __init__(self, num_downsampling): + super(InputInjection, self).__init__() + self.pool = nn.ModuleList() + for i in range(num_downsampling): + self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) + + def forward(self, x): + for pool in self.pool: + x = pool(x) + return x + + +@BACKBONES.register_module() +class CGNet(nn.Module): + """CGNet backbone. + + A Light-weight Context Guided Network for Semantic Segmentation + arXiv: https://arxiv.org/abs/1811.08201 + + Args: + in_channels (int): Number of input image channels. Normally 3. + num_channels (tuple[int]): Numbers of feature channels at each stages. + Default: (32, 64, 128). + num_blocks (tuple[int]): Numbers of CG blocks at stage 1 and stage 2. + Default: (3, 21). + dilations (tuple[int]): Dilation rate for surrounding context + extractors at stage 1 and stage 2. Default: (2, 4). + reductions (tuple[int]): Reductions for global context extractors at + stage 1 and stage 2. Default: (8, 16). + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='PReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + in_channels=3, + num_channels=(32, 64, 128), + num_blocks=(3, 21), + dilations=(2, 4), + reductions=(8, 16), + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='PReLU'), + norm_eval=False, + with_cp=False): + + super(CGNet, self).__init__() + self.in_channels = in_channels + self.num_channels = num_channels + assert isinstance(self.num_channels, tuple) and len( + self.num_channels) == 3 + self.num_blocks = num_blocks + assert isinstance(self.num_blocks, tuple) and len(self.num_blocks) == 2 + self.dilations = dilations + assert isinstance(self.dilations, tuple) and len(self.dilations) == 2 + self.reductions = reductions + assert isinstance(self.reductions, tuple) and len(self.reductions) == 2 + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + if 'type' in self.act_cfg and self.act_cfg['type'] == 'PReLU': + self.act_cfg['num_parameters'] = num_channels[0] + self.norm_eval = norm_eval + self.with_cp = with_cp + + cur_channels = in_channels + self.stem = nn.ModuleList() + for i in range(3): + self.stem.append( + ConvModule( + cur_channels, + num_channels[0], + 3, + 2 if i == 0 else 1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + cur_channels = num_channels[0] + + self.inject_2x = InputInjection(1) # down-sample for Input, factor=2 + self.inject_4x = InputInjection(2) # down-sample for Input, factor=4 + + cur_channels += in_channels + self.norm_prelu_0 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + # stage 1 + self.level1 = nn.ModuleList() + for i in range(num_blocks[0]): + self.level1.append( + ContextGuidedBlock( + cur_channels if i == 0 else num_channels[1], + num_channels[1], + dilations[0], + reductions[0], + downsample=(i == 0), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) # CG block + + cur_channels = 2 * num_channels[1] + in_channels + self.norm_prelu_1 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + # stage 2 + self.level2 = nn.ModuleList() + for i in range(num_blocks[1]): + self.level2.append( + ContextGuidedBlock( + cur_channels if i == 0 else num_channels[2], + num_channels[2], + dilations[1], + reductions[1], + downsample=(i == 0), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) # CG block + + cur_channels = 2 * num_channels[2] + self.norm_prelu_2 = nn.Sequential( + build_norm_layer(norm_cfg, cur_channels)[1], + nn.PReLU(cur_channels)) + + def forward(self, x): + output = [] + + # stage 0 + inp_2x = self.inject_2x(x) + inp_4x = self.inject_4x(x) + for layer in self.stem: + x = layer(x) + x = self.norm_prelu_0(torch.cat([x, inp_2x], 1)) + output.append(x) + + # stage 1 + for i, layer in enumerate(self.level1): + x = layer(x) + if i == 0: + down1 = x + x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1)) + output.append(x) + + # stage 2 + for i, layer in enumerate(self.level2): + x = layer(x) + if i == 0: + down2 = x + x = self.norm_prelu_2(torch.cat([down2, x], 1)) + output.append(x) + + return output + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, (nn.Conv2d, nn.Linear)): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + elif isinstance(m, nn.PReLU): + constant_init(m, 0) + else: + raise TypeError('pretrained must be a str or None') + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(CGNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/fast_scnn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/fast_scnn.py new file mode 100644 index 0000000000000000000000000000000000000000..0d06faa7c4e3a0d6e85acaf3f2bd21ec28e1f435 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/fast_scnn.py @@ -0,0 +1,375 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, constant_init, + kaiming_init) +from torch.nn.modules.batchnorm import _BatchNorm + +from custom_mmpkg.custom_mmseg.models.decode_heads.psp_head import PPM +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import BACKBONES +from ..utils.inverted_residual import InvertedResidual + + +class LearningToDownsample(nn.Module): + """Learning to downsample module. + + Args: + in_channels (int): Number of input channels. + dw_channels (tuple[int]): Number of output channels of the first and + the second depthwise conv (dwconv) layers. + out_channels (int): Number of output channels of the whole + 'learning to downsample' module. + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + """ + + def __init__(self, + in_channels, + dw_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU')): + super(LearningToDownsample, self).__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + dw_channels1 = dw_channels[0] + dw_channels2 = dw_channels[1] + + self.conv = ConvModule( + in_channels, + dw_channels1, + 3, + stride=2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.dsconv1 = DepthwiseSeparableConvModule( + dw_channels1, + dw_channels2, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg) + self.dsconv2 = DepthwiseSeparableConvModule( + dw_channels2, + out_channels, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg) + + def forward(self, x): + x = self.conv(x) + x = self.dsconv1(x) + x = self.dsconv2(x) + return x + + +class GlobalFeatureExtractor(nn.Module): + """Global feature extractor module. + + Args: + in_channels (int): Number of input channels of the GFE module. + Default: 64 + block_channels (tuple[int]): Tuple of ints. Each int specifies the + number of output channels of each Inverted Residual module. + Default: (64, 96, 128) + out_channels(int): Number of output channels of the GFE module. + Default: 128 + expand_ratio (int): Adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + Default: 6 + num_blocks (tuple[int]): Tuple of ints. Each int specifies the + number of times each Inverted Residual module is repeated. + The repeated Inverted Residual modules are called a 'group'. + Default: (3, 3, 3) + strides (tuple[int]): Tuple of ints. Each int specifies + the downsampling factor of each 'group'. + Default: (2, 2, 1) + pool_scales (tuple[int]): Tuple of ints. Each int specifies + the parameter required in 'global average pooling' within PPM. + Default: (1, 2, 3, 6) + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + """ + + def __init__(self, + in_channels=64, + block_channels=(64, 96, 128), + out_channels=128, + expand_ratio=6, + num_blocks=(3, 3, 3), + strides=(2, 2, 1), + pool_scales=(1, 2, 3, 6), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False): + super(GlobalFeatureExtractor, self).__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + assert len(block_channels) == len(num_blocks) == 3 + self.bottleneck1 = self._make_layer(in_channels, block_channels[0], + num_blocks[0], strides[0], + expand_ratio) + self.bottleneck2 = self._make_layer(block_channels[0], + block_channels[1], num_blocks[1], + strides[1], expand_ratio) + self.bottleneck3 = self._make_layer(block_channels[1], + block_channels[2], num_blocks[2], + strides[2], expand_ratio) + self.ppm = PPM( + pool_scales, + block_channels[2], + block_channels[2] // 4, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=align_corners) + self.out = ConvModule( + block_channels[2] * 2, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _make_layer(self, + in_channels, + out_channels, + blocks, + stride=1, + expand_ratio=6): + layers = [ + InvertedResidual( + in_channels, + out_channels, + stride, + expand_ratio, + norm_cfg=self.norm_cfg) + ] + for i in range(1, blocks): + layers.append( + InvertedResidual( + out_channels, + out_channels, + 1, + expand_ratio, + norm_cfg=self.norm_cfg)) + return nn.Sequential(*layers) + + def forward(self, x): + x = self.bottleneck1(x) + x = self.bottleneck2(x) + x = self.bottleneck3(x) + x = torch.cat([x, *self.ppm(x)], dim=1) + x = self.out(x) + return x + + +class FeatureFusionModule(nn.Module): + """Feature fusion module. + + Args: + higher_in_channels (int): Number of input channels of the + higher-resolution branch. + lower_in_channels (int): Number of input channels of the + lower-resolution branch. + out_channels (int): Number of output channels. + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + """ + + def __init__(self, + higher_in_channels, + lower_in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False): + super(FeatureFusionModule, self).__init__() + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.align_corners = align_corners + self.dwconv = ConvModule( + lower_in_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.conv_lower_res = ConvModule( + out_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + self.conv_higher_res = ConvModule( + higher_in_channels, + out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + self.relu = nn.ReLU(True) + + def forward(self, higher_res_feature, lower_res_feature): + lower_res_feature = resize( + lower_res_feature, + size=higher_res_feature.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + lower_res_feature = self.dwconv(lower_res_feature) + lower_res_feature = self.conv_lower_res(lower_res_feature) + + higher_res_feature = self.conv_higher_res(higher_res_feature) + out = higher_res_feature + lower_res_feature + return self.relu(out) + + +@BACKBONES.register_module() +class FastSCNN(nn.Module): + """Fast-SCNN Backbone. + + Args: + in_channels (int): Number of input image channels. Default: 3. + downsample_dw_channels (tuple[int]): Number of output channels after + the first conv layer & the second conv layer in + Learning-To-Downsample (LTD) module. + Default: (32, 48). + global_in_channels (int): Number of input channels of + Global Feature Extractor(GFE). + Equal to number of output channels of LTD. + Default: 64. + global_block_channels (tuple[int]): Tuple of integers that describe + the output channels for each of the MobileNet-v2 bottleneck + residual blocks in GFE. + Default: (64, 96, 128). + global_block_strides (tuple[int]): Tuple of integers + that describe the strides (downsampling factors) for each of the + MobileNet-v2 bottleneck residual blocks in GFE. + Default: (2, 2, 1). + global_out_channels (int): Number of output channels of GFE. + Default: 128. + higher_in_channels (int): Number of input channels of the higher + resolution branch in FFM. + Equal to global_in_channels. + Default: 64. + lower_in_channels (int): Number of input channels of the lower + resolution branch in FFM. + Equal to global_out_channels. + Default: 128. + fusion_out_channels (int): Number of output channels of FFM. + Default: 128. + out_indices (tuple): Tuple of indices of list + [higher_res_features, lower_res_features, fusion_output]. + Often set to (0,1,2) to enable aux. heads. + Default: (0, 1, 2). + conv_cfg (dict | None): Config of conv layers. Default: None + norm_cfg (dict | None): Config of norm layers. Default: + dict(type='BN') + act_cfg (dict): Config of activation layers. Default: + dict(type='ReLU') + align_corners (bool): align_corners argument of F.interpolate. + Default: False + """ + + def __init__(self, + in_channels=3, + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + align_corners=False): + + super(FastSCNN, self).__init__() + if global_in_channels != higher_in_channels: + raise AssertionError('Global Input Channels must be the same \ + with Higher Input Channels!') + elif global_out_channels != lower_in_channels: + raise AssertionError('Global Output Channels must be the same \ + with Lower Input Channels!') + + self.in_channels = in_channels + self.downsample_dw_channels1 = downsample_dw_channels[0] + self.downsample_dw_channels2 = downsample_dw_channels[1] + self.global_in_channels = global_in_channels + self.global_block_channels = global_block_channels + self.global_block_strides = global_block_strides + self.global_out_channels = global_out_channels + self.higher_in_channels = higher_in_channels + self.lower_in_channels = lower_in_channels + self.fusion_out_channels = fusion_out_channels + self.out_indices = out_indices + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.align_corners = align_corners + self.learning_to_downsample = LearningToDownsample( + in_channels, + downsample_dw_channels, + global_in_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.global_feature_extractor = GlobalFeatureExtractor( + global_in_channels, + global_block_channels, + global_out_channels, + strides=self.global_block_strides, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.feature_fusion = FeatureFusionModule( + higher_in_channels, + lower_in_channels, + fusion_out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + + def init_weights(self, pretrained=None): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + def forward(self, x): + higher_res_features = self.learning_to_downsample(x) + lower_res_features = self.global_feature_extractor(higher_res_features) + fusion_output = self.feature_fusion(higher_res_features, + lower_res_features) + + outs = [higher_res_features, lower_res_features, fusion_output] + outs = [outs[i] for i in self.out_indices] + return tuple(outs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/hrnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/hrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..7df19e7bef0ccacbef039633fa5c26344593bf3c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/hrnet.py @@ -0,0 +1,555 @@ +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, + kaiming_init) +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import _BatchNorm + +from custom_mmpkg.custom_mmseg.ops import Upsample, resize +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from ..builder import BACKBONES +from .resnet import BasicBlock, Bottleneck + + +class HRModule(nn.Module): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=True, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True)): + super(HRModule, self).__init__() + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=False) + + def _check_branches(self, num_branches, num_blocks, in_channels, + num_channels): + """Check branches configuration.""" + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \ + f'{len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \ + f'{len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \ + f'{len(in_channels)})' + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + """Build one branch.""" + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, num_channels[branch_index] * + block.expansion)[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + """Build multiple branch.""" + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + """Build fuse layer.""" + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + # we set align_corners=False for HRNet + Upsample( + scale_factor=2**(j - i), + mode='bilinear', + align_corners=False))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=False))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + elif j > i: + y = y + resize( + self.fuse_layers[i][j](x[j]), + size=x[i].shape[2:], + mode='bilinear', + align_corners=False) + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@BACKBONES.register_module() +class HRNet(nn.Module): + """HRNet backbone. + + High-Resolution Representations for Labeling Pixels and Regions + arXiv: https://arxiv.org/abs/1904.04514 + + Args: + extra (dict): detailed configuration for each stage of HRNet. + in_channels (int): Number of input image channels. Normally 3. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from custom_mmpkg.custom_mmseg.models import HRNet + >>> import torch + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__(self, + extra, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=False): + super(HRNet, self).__init__() + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * block.expansion + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + """Make transition layer.""" + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + """Make each layer.""" + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + """Make each stage.""" + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*hr_modules), in_channels + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Forward function.""" + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return y_list + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(HRNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/mobilenet_v2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..bcec93a22124fbc58f84cedd96d11f1e8dd90393 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/mobilenet_v2.py @@ -0,0 +1,180 @@ +import logging + +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule, constant_init, kaiming_init +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidual, make_divisible + + +@BACKBONES.register_module() +class MobileNetV2(nn.Module): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + strides (Sequence[int], optional): Strides of the first block of each + layer. If not specified, default config in ``arch_setting`` will + be used. + dilations (Sequence[int]): Dilation of each layer. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + # Parameters to build layers. 3 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks. + arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], + [6, 96, 3], [6, 160, 3], [6, 320, 1]] + + def __init__(self, + widen_factor=1., + strides=(1, 2, 2, 2, 1, 2, 1), + dilations=(1, 1, 1, 1, 1, 1, 1), + out_indices=(1, 2, 4, 6), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False): + super(MobileNetV2, self).__init__() + self.widen_factor = widen_factor + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == len(self.arch_settings) + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 7): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 7): + raise ValueError('frozen_stages must be in range(-1, 7). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks = layer_cfg + stride = self.strides[i] + dilation = self.dilations[i] + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + def make_layer(self, out_channels, num_blocks, stride, dilation, + expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): Number of blocks. + stride (int): Stride of the first block. + dilation (int): Dilation of the first block. + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. + """ + layers = [] + for i in range(num_blocks): + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride if i == 0 else 1, + expand_ratio=expand_ratio, + dilation=dilation if i == 0 else 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/mobilenet_v3.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..172103273f385b8dcd4e89a7f8ee0714be87113e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/mobilenet_v3.py @@ -0,0 +1,255 @@ +import logging + +import custom_mmpkg.custom_mmcv as mmcv +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule, constant_init, kaiming_init +from custom_mmpkg.custom_mmcv.cnn.bricks import Conv2dAdaptivePadding +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import InvertedResidualV3 as InvertedResidual + + +@BACKBONES.register_module() +class MobileNetV3(nn.Module): + """MobileNetV3 backbone. + + This backbone is the improved implementation of `Searching for MobileNetV3 + `_. + + Args: + arch (str): Architecture of mobilnetv3, from {'small', 'large'}. + Default: 'small'. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (tuple[int]): Output from which layer. + Default: (0, 1, 12). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], # block0 layer1 os=4 + [3, 72, 24, False, 'ReLU', 2], # block1 layer2 os=8 + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], # block2 layer4 os=16 + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], # block3 layer7 os=16 + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], # block4 layer9 os=32 + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], # block0 layer1 os=2 + [3, 64, 24, False, 'ReLU', 2], # block1 layer2 os=4 + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], # block2 layer4 os=8 + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], # block3 layer7 os=16 + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], # block4 layer11 os=16 + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], # block5 layer13 os=32 + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 12), + frozen_stages=-1, + reduction_factor=1, + norm_eval=False, + with_cp=False): + super(MobileNetV3, self).__init__() + assert arch in self.arch_settings + assert isinstance(reduction_factor, int) and reduction_factor > 0 + assert mmcv.is_tuple_of(out_indices, int) + for index in out_indices: + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch])+2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch])+2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.reduction_factor = reduction_factor + self.norm_eval = norm_eval + self.with_cp = with_cp + self.layers = self._make_layer() + + def _make_layer(self): + layers = [] + + # build the first layer (layer0) + in_channels = 16 + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + layer_setting = self.arch_settings[self.arch] + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + + if self.arch == 'large' and i >= 12 or self.arch == 'small' and \ + i >= 8: + mid_channels = mid_channels // self.reduction_factor + out_channels = out_channels // self.reduction_factor + + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + with_expand_conv=(in_channels != mid_channels), + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # build the last layer + # block5 layer12 os=32 for small model + # block6 layer16 os=32 for large model + layer = ConvModule( + in_channels=in_channels, + out_channels=576 if self.arch == 'small' else 960, + kernel_size=1, + stride=1, + dilation=4, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = 'layer{}'.format(len(layer_setting) + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # next, convert backbone MobileNetV3 to a semantic segmentation version + if self.arch == 'small': + self.layer4.depthwise_conv.conv.stride = (1, 1) + self.layer9.depthwise_conv.conv.stride = (1, 1) + for i in range(4, len(layers)): + layer = getattr(self, layers[i]) + if isinstance(layer, InvertedResidual): + modified_module = layer.depthwise_conv.conv + else: + modified_module = layer.conv + + if i < 9: + modified_module.dilation = (2, 2) + pad = 2 + else: + modified_module.dilation = (4, 4) + pad = 4 + + if not isinstance(modified_module, Conv2dAdaptivePadding): + # Adjust padding + pad *= (modified_module.kernel_size[0] - 1) // 2 + modified_module.padding = (pad, pad) + else: + self.layer7.depthwise_conv.conv.stride = (1, 1) + self.layer13.depthwise_conv.conv.stride = (1, 1) + for i in range(7, len(layers)): + layer = getattr(self, layers[i]) + if isinstance(layer, InvertedResidual): + modified_module = layer.depthwise_conv.conv + else: + modified_module = layer.conv + + if i < 13: + modified_module.dilation = (2, 2) + pad = 2 + else: + modified_module.dilation = (4, 4) + pad = 4 + + if not isinstance(modified_module, Conv2dAdaptivePadding): + # Adjust padding + pad *= (modified_module.kernel_size[0] - 1) // 2 + modified_module.padding = (pad, pad) + + return layers + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + return outs + + def _freeze_stages(self): + for i in range(self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV3, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnest.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnest.py new file mode 100644 index 0000000000000000000000000000000000000000..3ea8fbe3aa2149de6367abf11273ec845a17e013 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnest.py @@ -0,0 +1,314 @@ +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from custom_mmpkg.custom_mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(nn.Module): + """Split-Attention Conv2d in ResNeSt. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels. Default: 4. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + dcn (dict): Config dict for DCN. Default: None. + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None): + super(SplitAttentionConv2d, self).__init__() + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.with_dcn = dcn is not None + self.dcn = dcn + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if self.with_dcn and not fallback_on_stride: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + conv_cfg = dcn + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + """nn.Module: the normalization layer named "norm0" """ + return getattr(self, self.norm0_name) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + batch = x.size(0) + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + inplane (int): Input planes of this block. + planes (int): Middle planes of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Key word arguments for base class. + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + """Bottleneck block for ResNeSt.""" + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.with_modulated_dcn = False + self.conv2 = SplitAttentionConv2d( + width, + width, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=self.dcn) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Args: + groups (int): Number of groups of Bottleneck. Default: 1 + base_width (int): Base width of Bottleneck. Default: 4 + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of inter_channels in + SplitAttentionConv2d. Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + kwargs (dict): Keyword arguments for ResNet. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)) + } + + def __init__(self, + groups=1, + base_width=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.base_width = base_width + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super(ResNeSt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..9585254cabbf84fd54cf1644b6bd7c8304f730b8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnet.py @@ -0,0 +1,688 @@ +import torch.nn as nn +import torch.utils.checkpoint as cp +from custom_mmpkg.custom_mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer, + constant_init, kaiming_init) +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import _BatchNorm + +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import ResLayer + + +class BasicBlock(nn.Module): + """Basic block for ResNet.""" + + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None): + super(BasicBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is + "caffe", the stride-two layer is the first 1x1 conv layer. + """ + + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None): + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + """Forward function for plugins.""" + out = x + for name in plugin_names: + out = getattr(self, name)(x) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default" 3. + stem_channels (int): Number of stem channels. Default: 64. + base_channels (int): Number of base channels of res layer. Default: 64. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + + - position (str, required): Position inside block to insert plugin, + options: 'after_conv1', 'after_conv2', 'after_conv3'. + + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages' + multi_grid (Sequence[int]|None): Multi grid dilation rates of last + stage. Default: None + contract_dilation (bool): Whether contract first dilation of each layer + Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from custom_mmpkg.custom_mmseg.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + multi_grid=None, + contract_dilation=False, + with_cp=False, + zero_init_residual=True): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.multi_grid = multi_grid + self.contract_dilation = contract_dilation + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + # multi grid is applied to last layer only + stage_multi_grid = multi_grid if i == len( + self.stage_blocks) - 1 else None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + multi_grid=stage_multi_grid, + contract_dilation=contract_dilation) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i+1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins, stage_idx): + """make plugins for ResNet 'stage_idx'th stage . + + Currently we support to insert 'context_block', + 'empirical_attention_block', 'nonlocal_block' into the backbone like + ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be : + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose 'stage_idx=0', the structure of blocks in the stage would be: + conv1-> conv2->conv3->yyy->zzz1->zzz2 + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + + Returns: + list[dict]: Plugins for current stage + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + """Make stem layer for ResNet.""" + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + """Freeze stages param and norm stats.""" + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.dcn is not None: + for m in self.modules(): + if isinstance(m, Bottleneck) and hasattr( + m, 'conv2_offset'): + constant_init(m.conv2_offset, 0) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1c(ResNet): + """ResNetV1c variant described in [1]_. + + Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv + in the input stem with three 3x3 convs. + + References: + .. [1] https://arxiv.org/pdf/1812.01187.pdf + """ + + def __init__(self, **kwargs): + super(ResNetV1c, self).__init__( + deep_stem=True, avg_down=False, **kwargs) + + +@BACKBONES.register_module() +class ResNetV1d(ResNet): + """ResNetV1d variant described in [1]_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnext.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnext.py new file mode 100644 index 0000000000000000000000000000000000000000..d6a2910074e1671c2e7db2fd3e86f995c590d18b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/resnext.py @@ -0,0 +1,145 @@ +import math + +from custom_mmpkg.custom_mmcv.cnn import build_conv_layer, build_norm_layer + +from ..builder import BACKBONES +from ..utils import ResLayer +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is + "caffe", the stride-two layer is the first 1x1 conv layer. + """ + + def __init__(self, + inplanes, + planes, + groups=1, + base_width=4, + base_channels=64, + **kwargs): + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * + (base_width / base_channels)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + self.dcn, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Normally 3. + num_stages (int): Resnet stages, normally 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + + Example: + >>> from custom_mmpkg.custom_mmseg.models import ResNeXt + >>> import torch + >>> self = ResNeXt(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + self.groups = groups + self.base_width = base_width + super(ResNeXt, self).__init__(**kwargs) + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``""" + return ResLayer( + groups=self.groups, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/unet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/unet.py new file mode 100644 index 0000000000000000000000000000000000000000..694272114506e42ebc2531996432a567e1e588b6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/unet.py @@ -0,0 +1,429 @@ +import torch.nn as nn +import torch.utils.checkpoint as cp +from custom_mmpkg.custom_mmcv.cnn import (UPSAMPLE_LAYERS, ConvModule, build_activation_layer, + build_norm_layer, constant_init, kaiming_init) +from custom_mmpkg.custom_mmcv.runner import load_checkpoint +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import _BatchNorm + +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import UpConvBlock + + +class BasicConvBlock(nn.Module): + """Basic convolutional block for UNet. + + This module consists of several plain convolutional layers. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers. Default: 2. + stride (int): Whether use stride convolution to downsample + the input feature map. If stride=2, it only uses stride convolution + in the first convolutional layer to downsample the input feature + map. Options are 1 or 2. Default: 1. + dilation (int): Whether use dilated convolution to expand the + receptive field. Set dilation rate of each convolutional layer and + the dilation rate of the first convolutional layer is always 1. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + dcn=None, + plugins=None): + super(BasicConvBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.with_cp = with_cp + convs = [] + for i in range(num_convs): + convs.append( + ConvModule( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride if i == 0 else 1, + dilation=1 if i == 0 else dilation, + padding=1 if i == 0 else dilation, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.convs = nn.Sequential(*convs) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.convs, x) + else: + out = self.convs(x) + return out + + +@UPSAMPLE_LAYERS.register_module() +class DeconvModule(nn.Module): + """Deconvolution upsample module in decoder for UNet (2X upsample). + + This module uses deconvolution to upsample feature map in the decoder + of UNet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + kernel_size (int): Kernel size of the convolutional layer. Default: 4. + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + kernel_size=4, + scale_factor=2): + super(DeconvModule, self).__init__() + + assert (kernel_size - scale_factor >= 0) and\ + (kernel_size - scale_factor) % 2 == 0,\ + f'kernel_size should be greater than or equal to scale_factor '\ + f'and (kernel_size - scale_factor) should be even numbers, '\ + f'while the kernel size is {kernel_size} and scale_factor is '\ + f'{scale_factor}.' + + stride = scale_factor + padding = (kernel_size - scale_factor) // 2 + self.with_cp = with_cp + deconv = nn.ConvTranspose2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding) + + norm_name, norm = build_norm_layer(norm_cfg, out_channels) + activate = build_activation_layer(act_cfg) + self.deconv_upsamping = nn.Sequential(deconv, norm, activate) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.deconv_upsamping, x) + else: + out = self.deconv_upsamping(x) + return out + + +@UPSAMPLE_LAYERS.register_module() +class InterpConv(nn.Module): + """Interpolation upsample module in decoder for UNet. + + This module uses interpolation to upsample feature map in the decoder + of UNet. It consists of one interpolation upsample layer and one + convolutional layer. It can be one interpolation upsample layer followed + by one convolutional layer (conv_first=False) or one convolutional layer + followed by one interpolation upsample layer (conv_first=True). + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + conv_first (bool): Whether convolutional layer or interpolation + upsample layer first. Default: False. It means interpolation + upsample layer followed by one convolutional layer. + kernel_size (int): Kernel size of the convolutional layer. Default: 1. + stride (int): Stride of the convolutional layer. Default: 1. + padding (int): Padding of the convolutional layer. Default: 1. + upsample_cfg (dict): Interpolation config of the upsample layer. + Default: dict( + scale_factor=2, mode='bilinear', align_corners=False). + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + conv_cfg=None, + conv_first=False, + kernel_size=1, + stride=1, + padding=0, + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False)): + super(InterpConv, self).__init__() + + self.with_cp = with_cp + conv = ConvModule( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + upsample = nn.Upsample(**upsample_cfg) + if conv_first: + self.interp_upsample = nn.Sequential(conv, upsample) + else: + self.interp_upsample = nn.Sequential(upsample, conv) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.interp_upsample, x) + else: + out = self.interp_upsample(x) + return out + + +@BACKBONES.register_module() +class UNet(nn.Module): + """UNet backbone. + U-Net: Convolutional Networks for Biomedical Image Segmentation. + https://arxiv.org/pdf/1505.04597.pdf + + Args: + in_channels (int): Number of input image channels. Default" 3. + base_channels (int): Number of base channels of each stage. + The output channels of the first stage. Default: 64. + num_stages (int): Number of stages in encoder, normally 5. Default: 5. + strides (Sequence[int 1 | 2]): Strides of each stage in encoder. + len(strides) is equal to num_stages. Normally the stride of the + first stage in encoder is 1. If strides[i]=2, it uses stride + convolution to downsample in the correspondence encoder stage. + Default: (1, 1, 1, 1, 1). + enc_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence encoder stage. + Default: (2, 2, 2, 2, 2). + dec_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence decoder stage. + Default: (2, 2, 2, 2). + downsamples (Sequence[int]): Whether use MaxPool to downsample the + feature map after the first stage of encoder + (stages: [1, num_stages)). If the correspondence encoder stage use + stride convolution (strides[i]=2), it will never use MaxPool to + downsample, even downsamples[i-1]=True. + Default: (True, True, True, True). + enc_dilations (Sequence[int]): Dilation rate of each stage in encoder. + Default: (1, 1, 1, 1, 1). + dec_dilations (Sequence[int]): Dilation rate of each stage in decoder. + Default: (1, 1, 1, 1). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='InterpConv'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + + Notice: + The input image size should be divisible by the whole downsample rate + of the encoder. More detail of the whole downsample rate can be found + in UNet._check_input_divisible. + + """ + + def __init__(self, + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + norm_eval=False, + dcn=None, + plugins=None): + super(UNet, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + assert len(strides) == num_stages, \ + 'The length of strides should be equal to num_stages, '\ + f'while the strides is {strides}, the length of '\ + f'strides is {len(strides)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_num_convs) == num_stages, \ + 'The length of enc_num_convs should be equal to num_stages, '\ + f'while the enc_num_convs is {enc_num_convs}, the length of '\ + f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_num_convs) == (num_stages-1), \ + 'The length of dec_num_convs should be equal to (num_stages-1), '\ + f'while the dec_num_convs is {dec_num_convs}, the length of '\ + f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(downsamples) == (num_stages-1), \ + 'The length of downsamples should be equal to (num_stages-1), '\ + f'while the downsamples is {downsamples}, the length of '\ + f'downsamples is {len(downsamples)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_dilations) == num_stages, \ + 'The length of enc_dilations should be equal to num_stages, '\ + f'while the enc_dilations is {enc_dilations}, the length of '\ + f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_dilations) == (num_stages-1), \ + 'The length of dec_dilations should be equal to (num_stages-1), '\ + f'while the dec_dilations is {dec_dilations}, the length of '\ + f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\ + f'{num_stages}.' + self.num_stages = num_stages + self.strides = strides + self.downsamples = downsamples + self.norm_eval = norm_eval + self.base_channels = base_channels + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + for i in range(num_stages): + enc_conv_block = [] + if i != 0: + if strides[i] == 1 and downsamples[i - 1]: + enc_conv_block.append(nn.MaxPool2d(kernel_size=2)) + upsample = (strides[i] != 1 or downsamples[i - 1]) + self.decoder.append( + UpConvBlock( + conv_block=BasicConvBlock, + in_channels=base_channels * 2**i, + skip_channels=base_channels * 2**(i - 1), + out_channels=base_channels * 2**(i - 1), + num_convs=dec_num_convs[i - 1], + stride=1, + dilation=dec_dilations[i - 1], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + upsample_cfg=upsample_cfg if upsample else None, + dcn=None, + plugins=None)) + + enc_conv_block.append( + BasicConvBlock( + in_channels=in_channels, + out_channels=base_channels * 2**i, + num_convs=enc_num_convs[i], + stride=strides[i], + dilation=enc_dilations[i], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None)) + self.encoder.append((nn.Sequential(*enc_conv_block))) + in_channels = base_channels * 2**i + + def forward(self, x): + self._check_input_divisible(x) + enc_outs = [] + for enc in self.encoder: + x = enc(x) + enc_outs.append(x) + dec_outs = [x] + for i in reversed(range(len(self.decoder))): + x = self.decoder[i](enc_outs[i], x) + dec_outs.append(x) + + return dec_outs + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(UNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _check_input_divisible(self, x): + h, w = x.shape[-2:] + whole_downsample_rate = 1 + for i in range(1, self.num_stages): + if self.strides[i] == 2 or self.downsamples[i - 1]: + whole_downsample_rate *= 2 + assert (h % whole_downsample_rate == 0) \ + and (w % whole_downsample_rate == 0),\ + f'The input image size {(h, w)} should be divisible by the whole '\ + f'downsample rate {whole_downsample_rate}, when num_stages is '\ + f'{self.num_stages}, strides is {self.strides}, and downsamples '\ + f'is {self.downsamples}.' + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/vit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..93bae38424b69dd6699089163db30fa787efb9ac --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/backbones/vit.py @@ -0,0 +1,459 @@ +"""Modified from https://github.com/rwightman/pytorch-image- +models/blob/master/timm/models/vision_transformer.py.""" + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from custom_mmpkg.custom_mmcv.cnn import (Conv2d, Linear, build_activation_layer, build_norm_layer, + constant_init, kaiming_init, normal_init) +from custom_mmpkg.custom_mmcv.runner import _load_checkpoint +from custom_mmpkg.custom_mmcv.utils.parrots_wrapper import _BatchNorm + +from custom_mmpkg.custom_mmseg.utils import get_root_logger +from ..builder import BACKBONES +from ..utils import DropPath, trunc_normal_ + + +class Mlp(nn.Module): + """MLP layer for Encoder block. + + Args: + in_features(int): Input dimension for the first fully + connected layer. + hidden_features(int): Output dimension for the first fully + connected layer. + out_features(int): Output dementsion for the second fully + connected layer. + act_cfg(dict): Config dict for activation layer. + Default: dict(type='GELU'). + drop(float): Drop rate for the dropout layer. Dropout rate has + to be between 0 and 1. Default: 0. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_cfg=dict(type='GELU'), + drop=0.): + super(Mlp, self).__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = Linear(in_features, hidden_features) + self.act = build_activation_layer(act_cfg) + self.fc2 = Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + """Attention layer for Encoder block. + + Args: + dim (int): Dimension for the input vector. + num_heads (int): Number of parallel attention heads. + qkv_bias (bool): Enable bias for qkv if True. Default: False. + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + attn_drop (float): Drop rate for attention output weights. + Default: 0. + proj_drop (float): Drop rate for output weights. Default: 0. + """ + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super(Attention, self).__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + b, n, c = x.shape + qkv = self.qkv(x).reshape(b, n, 3, self.num_heads, + c // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(b, n, c) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """Implements encoder block with residual connection. + + Args: + dim (int): The feature dimension. + num_heads (int): Number of parallel attention heads. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop (float): Drop rate for mlp output weights. Default: 0. + attn_drop (float): Drop rate for attention output weights. + Default: 0. + proj_drop (float): Drop rate for attn layer output weights. + Default: 0. + drop_path (float): Drop rate for paths of model. + Default: 0. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN', requires_grad=True). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + dim, + num_heads, + mlp_ratio=4, + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + proj_drop=0., + drop_path=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN', eps=1e-6), + with_cp=False): + super(Block, self).__init__() + self.with_cp = with_cp + _, self.norm1 = build_norm_layer(norm_cfg, dim) + self.attn = Attention(dim, num_heads, qkv_bias, qk_scale, attn_drop, + proj_drop) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + _, self.norm2 = build_norm_layer(norm_cfg, dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + def forward(self, x): + + def _inner_forward(x): + out = x + self.drop_path(self.attn(self.norm1(x))) + out = out + self.drop_path(self.mlp(self.norm2(out))) + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding. + + Args: + img_size (int | tuple): Input image size. + default: 224. + patch_size (int): Width and height for a patch. + default: 16. + in_channels (int): Input channels for images. Default: 3. + embed_dim (int): The embedding dimension. Default: 768. + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dim=768): + super(PatchEmbed, self).__init__() + if isinstance(img_size, int): + self.img_size = (img_size, img_size) + elif isinstance(img_size, tuple): + self.img_size = img_size + else: + raise TypeError('img_size must be type of int or tuple') + h, w = self.img_size + self.patch_size = (patch_size, patch_size) + self.num_patches = (h // patch_size) * (w // patch_size) + self.proj = Conv2d( + in_channels, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + return self.proj(x).flatten(2).transpose(1, 2) + + +@BACKBONES.register_module() +class VisionTransformer(nn.Module): + """Vision transformer backbone. + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for + Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 + + Args: + img_size (tuple): input image size. Default: (224, 224). + patch_size (int, tuple): patch size. Default: 16. + in_channels (int): number of input channels. Default: 3. + embed_dim (int): embedding dimension. Default: 768. + depth (int): depth of transformer. Default: 12. + num_heads (int): number of attention heads. Default: 12. + mlp_ratio (int): ratio of mlp hidden dim to embedding dim. + Default: 4. + out_indices (list | tuple | int): Output from which stages. + Default: -1. + qkv_bias (bool): enable bias for qkv if True. Default: True. + qk_scale (float): override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): dropout rate. Default: 0. + attn_drop_rate (float): attention dropout rate. Default: 0. + drop_path_rate (float): Rate of DropPath. Default: 0. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN', eps=1e-6, requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='GELU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Default: False. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Default: bicubic. + with_cls_token (bool): If concatenating class token into image tokens + as transformer input. Default: True. + with_cp (bool): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + """ + + def __init__(self, + img_size=(224, 224), + patch_size=16, + in_channels=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + out_indices=11, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6, requires_grad=True), + act_cfg=dict(type='GELU'), + norm_eval=False, + final_norm=False, + with_cls_token=True, + interpolate_mode='bicubic', + with_cp=False): + super(VisionTransformer, self).__init__() + self.img_size = img_size + self.patch_size = patch_size + self.features = self.embed_dim = embed_dim + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dim=embed_dim) + + self.with_cls_token = with_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + self.pos_embed = nn.Parameter( + torch.zeros(1, self.patch_embed.num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + self.out_indices = [out_indices] + elif isinstance(out_indices, list) or isinstance(out_indices, tuple): + self.out_indices = out_indices + else: + raise TypeError('out_indices must be type of int, list or tuple') + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=dpr[i], + attn_drop=attn_drop_rate, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp) for i in range(depth) + ]) + + self.interpolate_mode = interpolate_mode + self.final_norm = final_norm + if final_norm: + _, self.norm = build_norm_layer(norm_cfg, embed_dim) + + self.norm_eval = norm_eval + self.with_cp = with_cp + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = get_root_logger() + checkpoint = _load_checkpoint(pretrained, logger=logger) + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + if 'pos_embed' in state_dict.keys(): + if self.pos_embed.shape != state_dict['pos_embed'].shape: + logger.info(msg=f'Resize the pos_embed shape from \ +{state_dict["pos_embed"].shape} to {self.pos_embed.shape}') + h, w = self.img_size + pos_size = int( + math.sqrt(state_dict['pos_embed'].shape[1] - 1)) + state_dict['pos_embed'] = self.resize_pos_embed( + state_dict['pos_embed'], (h, w), (pos_size, pos_size), + self.patch_size, self.interpolate_mode) + + self.load_state_dict(state_dict, False) + + elif pretrained is None: + # We only implement the 'jax_impl' initialization implemented at + # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501 + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + for n, m in self.named_modules(): + if isinstance(m, Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + if 'mlp' in n: + normal_init(m.bias, std=1e-6) + else: + constant_init(m.bias, 0) + elif isinstance(m, Conv2d): + kaiming_init(m.weight, mode='fan_in') + if m.bias is not None: + constant_init(m.bias, 0) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m.bias, 0) + constant_init(m.weight, 1.0) + else: + raise TypeError('pretrained must be a str or None') + + def _pos_embeding(self, img, patched_img, pos_embed): + """Positiong embeding method. + + Resize the pos_embed, if the input image size doesn't match + the training size. + Args: + img (torch.Tensor): The inference image tensor, the shape + must be [B, C, H, W]. + patched_img (torch.Tensor): The patched image, it should be + shape of [B, L1, C]. + pos_embed (torch.Tensor): The pos_embed weighs, it should be + shape of [B, L2, c]. + Return: + torch.Tensor: The pos encoded image feature. + """ + assert patched_img.ndim == 3 and pos_embed.ndim == 3, \ + 'the shapes of patched_img and pos_embed must be [B, L, C]' + x_len, pos_len = patched_img.shape[1], pos_embed.shape[1] + if x_len != pos_len: + if pos_len == (self.img_size[0] // self.patch_size) * ( + self.img_size[1] // self.patch_size) + 1: + pos_h = self.img_size[0] // self.patch_size + pos_w = self.img_size[1] // self.patch_size + else: + raise ValueError( + 'Unexpected shape of pos_embed, got {}.'.format( + pos_embed.shape)) + pos_embed = self.resize_pos_embed(pos_embed, img.shape[2:], + (pos_h, pos_w), self.patch_size, + self.interpolate_mode) + return self.pos_drop(patched_img + pos_embed) + + @staticmethod + def resize_pos_embed(pos_embed, input_shpae, pos_shape, patch_size, mode): + """Resize pos_embed weights. + + Resize pos_embed using bicubic interpolate method. + Args: + pos_embed (torch.Tensor): pos_embed weights. + input_shpae (tuple): Tuple for (input_h, intput_w). + pos_shape (tuple): Tuple for (pos_h, pos_w). + patch_size (int): Patch size. + Return: + torch.Tensor: The resized pos_embed of shape [B, L_new, C] + """ + assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' + input_h, input_w = input_shpae + pos_h, pos_w = pos_shape + cls_token_weight = pos_embed[:, 0] + pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] + pos_embed_weight = pos_embed_weight.reshape( + 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2) + pos_embed_weight = F.interpolate( + pos_embed_weight, + size=[input_h // patch_size, input_w // patch_size], + align_corners=False, + mode=mode) + cls_token_weight = cls_token_weight.unsqueeze(1) + pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2) + pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1) + return pos_embed + + def forward(self, inputs): + B = inputs.shape[0] + + x = self.patch_embed(inputs) + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = self._pos_embeding(inputs, x, self.pos_embed) + + if not self.with_cls_token: + # Remove class token for transformer input + x = x[:, 1:] + + outs = [] + for i, blk in enumerate(self.blocks): + x = blk(x) + if i == len(self.blocks) - 1: + if self.final_norm: + x = self.norm(x) + if i in self.out_indices: + if self.with_cls_token: + # Remove class token and reshape token for decoder head + out = x[:, 1:] + else: + out = x + B, _, C = out.shape + out = out.reshape(B, inputs.shape[2] // self.patch_size, + inputs.shape[3] // self.patch_size, + C).permute(0, 3, 1, 2) + outs.append(out) + + return tuple(outs) + + def train(self, mode=True): + super(VisionTransformer, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.LayerNorm): + m.eval() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e5920e9dec9d62e5a62ed688cab7d3bfd1ac74 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/builder.py @@ -0,0 +1,46 @@ +import warnings + +from custom_mmpkg.custom_mmcv.cnn import MODELS as MMCV_MODELS +from custom_mmpkg.custom_mmcv.utils import Registry + +MODELS = Registry('models', parent=MMCV_MODELS) + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +SEGMENTORS = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_segmentor(cfg, train_cfg=None, test_cfg=None): + """Build segmentor.""" + if train_cfg is not None or test_cfg is not None: + warnings.warn( + 'train_cfg and test_cfg is deprecated, ' + 'please specify them in model', UserWarning) + assert cfg.get('train_cfg') is None or train_cfg is None, \ + 'train_cfg specified in both outer field and model field ' + assert cfg.get('test_cfg') is None or test_cfg is None, \ + 'test_cfg specified in both outer field and model field ' + return SEGMENTORS.build( + cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ac66d3cfe0ea04af45c0f3594bf135841c3812e3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/__init__.py @@ -0,0 +1,28 @@ +from .ann_head import ANNHead +from .apc_head import APCHead +from .aspp_head import ASPPHead +from .cc_head import CCHead +from .da_head import DAHead +from .dm_head import DMHead +from .dnl_head import DNLHead +from .ema_head import EMAHead +from .enc_head import EncHead +from .fcn_head import FCNHead +from .fpn_head import FPNHead +from .gc_head import GCHead +from .lraspp_head import LRASPPHead +from .nl_head import NLHead +from .ocr_head import OCRHead +# from .point_head import PointHead +from .psa_head import PSAHead +from .psp_head import PSPHead +from .sep_aspp_head import DepthwiseSeparableASPPHead +from .sep_fcn_head import DepthwiseSeparableFCNHead +from .uper_head import UPerHead + +__all__ = [ + 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead', + 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead', + 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead', + 'APCHead', 'DMHead', 'LRASPPHead' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ann_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ann_head.py new file mode 100644 index 0000000000000000000000000000000000000000..bebbc4f1ba6f76508a3f71265e519cbd24a509cc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ann_head.py @@ -0,0 +1,245 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .decode_head import BaseDecodeHead + + +class PPMConcat(nn.ModuleList): + """Pyramid Pooling Module that only concat the features of each layer. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. + """ + + def __init__(self, pool_scales=(1, 3, 6, 8)): + super(PPMConcat, self).__init__( + [nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales]) + + def forward(self, feats): + """Forward function.""" + ppm_outs = [] + for ppm in self: + ppm_out = ppm(feats) + ppm_outs.append(ppm_out.view(*feats.shape[:2], -1)) + concat_outs = torch.cat(ppm_outs, dim=2) + return concat_outs + + +class SelfAttentionBlock(_SelfAttentionBlock): + """Make a ANN used SelfAttentionBlock. + + Args: + low_in_channels (int): Input channels of lower level feature, + which is the key feature for self-attention. + high_in_channels (int): Input channels of higher level feature, + which is the query feature for self-attention. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + share_key_query (bool): Whether share projection weight between key + and query projection. + query_scale (int): The scale of query feature map. + key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module of key feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, low_in_channels, high_in_channels, channels, + out_channels, share_key_query, query_scale, key_pool_scales, + conv_cfg, norm_cfg, act_cfg): + key_psp = PPMConcat(key_pool_scales) + if query_scale > 1: + query_downsample = nn.MaxPool2d(kernel_size=query_scale) + else: + query_downsample = None + super(SelfAttentionBlock, self).__init__( + key_in_channels=low_in_channels, + query_in_channels=high_in_channels, + channels=channels, + out_channels=out_channels, + share_key_query=share_key_query, + query_downsample=query_downsample, + key_downsample=key_psp, + key_query_num_convs=1, + key_query_norm=True, + value_out_num_convs=1, + value_out_norm=False, + matmul_norm=True, + with_out=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + +class AFNB(nn.Module): + """Asymmetric Fusion Non-local Block(AFNB) + + Args: + low_in_channels (int): Input channels of lower level feature, + which is the key feature for self-attention. + high_in_channels (int): Input channels of higher level feature, + which is the query feature for self-attention. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + and query projection. + query_scales (tuple[int]): The scales of query feature map. + Default: (1,) + key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module of key feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, low_in_channels, high_in_channels, channels, + out_channels, query_scales, key_pool_scales, conv_cfg, + norm_cfg, act_cfg): + super(AFNB, self).__init__() + self.stages = nn.ModuleList() + for query_scale in query_scales: + self.stages.append( + SelfAttentionBlock( + low_in_channels=low_in_channels, + high_in_channels=high_in_channels, + channels=channels, + out_channels=out_channels, + share_key_query=False, + query_scale=query_scale, + key_pool_scales=key_pool_scales, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottleneck = ConvModule( + out_channels + high_in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, low_feats, high_feats): + """Forward function.""" + priors = [stage(high_feats, low_feats) for stage in self.stages] + context = torch.stack(priors, dim=0).sum(dim=0) + output = self.bottleneck(torch.cat([context, high_feats], 1)) + return output + + +class APNB(nn.Module): + """Asymmetric Pyramid Non-local Block (APNB) + + Args: + in_channels (int): Input channels of key/query feature, + which is the key feature for self-attention. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + query_scales (tuple[int]): The scales of query feature map. + Default: (1,) + key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module of key feature. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, in_channels, channels, out_channels, query_scales, + key_pool_scales, conv_cfg, norm_cfg, act_cfg): + super(APNB, self).__init__() + self.stages = nn.ModuleList() + for query_scale in query_scales: + self.stages.append( + SelfAttentionBlock( + low_in_channels=in_channels, + high_in_channels=in_channels, + channels=channels, + out_channels=out_channels, + share_key_query=True, + query_scale=query_scale, + key_pool_scales=key_pool_scales, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + self.bottleneck = ConvModule( + 2 * in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, feats): + """Forward function.""" + priors = [stage(feats, feats) for stage in self.stages] + context = torch.stack(priors, dim=0).sum(dim=0) + output = self.bottleneck(torch.cat([context, feats], 1)) + return output + + +@HEADS.register_module() +class ANNHead(BaseDecodeHead): + """Asymmetric Non-local Neural Networks for Semantic Segmentation. + + This head is the implementation of `ANNNet + `_. + + Args: + project_channels (int): Projection channels for Nonlocal. + query_scales (tuple[int]): The scales of query feature map. + Default: (1,) + key_pool_scales (tuple[int]): The pooling scales of key feature map. + Default: (1, 3, 6, 8). + """ + + def __init__(self, + project_channels, + query_scales=(1, ), + key_pool_scales=(1, 3, 6, 8), + **kwargs): + super(ANNHead, self).__init__( + input_transform='multiple_select', **kwargs) + assert len(self.in_channels) == 2 + low_in_channels, high_in_channels = self.in_channels + self.project_channels = project_channels + self.fusion = AFNB( + low_in_channels=low_in_channels, + high_in_channels=high_in_channels, + out_channels=high_in_channels, + channels=project_channels, + query_scales=query_scales, + key_pool_scales=key_pool_scales, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + high_in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.context = APNB( + in_channels=self.channels, + out_channels=self.channels, + channels=project_channels, + query_scales=query_scales, + key_pool_scales=key_pool_scales, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + low_feats, high_feats = self._transform_inputs(inputs) + output = self.fusion(low_feats, high_feats) + output = self.dropout(output) + output = self.bottleneck(output) + output = self.context(output) + output = self.cls_seg(output) + + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/apc_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/apc_head.py new file mode 100644 index 0000000000000000000000000000000000000000..119c083a3422b939615a2310d647993d31cb4dc0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/apc_head.py @@ -0,0 +1,158 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class ACM(nn.Module): + """Adaptive Context Module used in APCNet. + + Args: + pool_scale (int): Pooling scale used in Adaptive Context + Module to extract region features. + fusion (bool): Add one conv to fuse residual feature. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, pool_scale, fusion, in_channels, channels, conv_cfg, + norm_cfg, act_cfg): + super(ACM, self).__init__() + self.pool_scale = pool_scale + self.fusion = fusion + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.pooled_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.input_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.global_info = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.gla = nn.Conv2d(self.channels, self.pool_scale**2, 1, 1, 0) + + self.residual_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + if self.fusion: + self.fusion_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x): + """Forward function.""" + pooled_x = F.adaptive_avg_pool2d(x, self.pool_scale) + # [batch_size, channels, h, w] + x = self.input_redu_conv(x) + # [batch_size, channels, pool_scale, pool_scale] + pooled_x = self.pooled_redu_conv(pooled_x) + batch_size = x.size(0) + # [batch_size, pool_scale * pool_scale, channels] + pooled_x = pooled_x.view(batch_size, self.channels, + -1).permute(0, 2, 1).contiguous() + # [batch_size, h * w, pool_scale * pool_scale] + affinity_matrix = self.gla(x + resize( + self.global_info(F.adaptive_avg_pool2d(x, 1)), size=x.shape[2:]) + ).permute(0, 2, 3, 1).reshape( + batch_size, -1, self.pool_scale**2) + affinity_matrix = F.sigmoid(affinity_matrix) + # [batch_size, h * w, channels] + z_out = torch.matmul(affinity_matrix, pooled_x) + # [batch_size, channels, h * w] + z_out = z_out.permute(0, 2, 1).contiguous() + # [batch_size, channels, h, w] + z_out = z_out.view(batch_size, self.channels, x.size(2), x.size(3)) + z_out = self.residual_conv(z_out) + z_out = F.relu(z_out + x) + if self.fusion: + z_out = self.fusion_conv(z_out) + + return z_out + + +@HEADS.register_module() +class APCHead(BaseDecodeHead): + """Adaptive Pyramid Context Network for Semantic Segmentation. + + This head is the implementation of + `APCNet `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Adaptive Context + Module. Default: (1, 2, 3, 6). + fusion (bool): Add one conv to fuse residual feature. + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), fusion=True, **kwargs): + super(APCHead, self).__init__(**kwargs) + assert isinstance(pool_scales, (list, tuple)) + self.pool_scales = pool_scales + self.fusion = fusion + acm_modules = [] + for pool_scale in self.pool_scales: + acm_modules.append( + ACM(pool_scale, + self.fusion, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.acm_modules = nn.ModuleList(acm_modules) + self.bottleneck = ConvModule( + self.in_channels + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + acm_outs = [x] + for acm_module in self.acm_modules: + acm_outs.append(acm_module(x)) + acm_outs = torch.cat(acm_outs, dim=1) + output = self.bottleneck(acm_outs) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/aspp_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/aspp_head.py new file mode 100644 index 0000000000000000000000000000000000000000..5b251f2659b9800df341d214610f3766ef81a835 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/aspp_head.py @@ -0,0 +1,107 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class ASPPModule(nn.ModuleList): + """Atrous Spatial Pyramid Pooling (ASPP) Module. + + Args: + dilations (tuple[int]): Dilation rate of each layer. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg, + act_cfg): + super(ASPPModule, self).__init__() + self.dilations = dilations + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + for dilation in dilations: + self.append( + ConvModule( + self.in_channels, + self.channels, + 1 if dilation == 1 else 3, + dilation=dilation, + padding=0 if dilation == 1 else dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def forward(self, x): + """Forward function.""" + aspp_outs = [] + for aspp_module in self: + aspp_outs.append(aspp_module(x)) + + return aspp_outs + + +@HEADS.register_module() +class ASPPHead(BaseDecodeHead): + """Rethinking Atrous Convolution for Semantic Image Segmentation. + + This head is the implementation of `DeepLabV3 + `_. + + Args: + dilations (tuple[int]): Dilation rates for ASPP module. + Default: (1, 6, 12, 18). + """ + + def __init__(self, dilations=(1, 6, 12, 18), **kwargs): + super(ASPPHead, self).__init__(**kwargs) + assert isinstance(dilations, (list, tuple)) + self.dilations = dilations + self.image_pool = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.aspp_modules = ASPPModule( + dilations, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + (len(dilations) + 1) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + aspp_outs = [ + resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ] + aspp_outs.extend(self.aspp_modules(x)) + aspp_outs = torch.cat(aspp_outs, dim=1) + output = self.bottleneck(aspp_outs) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/cascade_decode_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/cascade_decode_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d02122ca0e68743b1bf7a893afae96042f23838c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/cascade_decode_head.py @@ -0,0 +1,57 @@ +from abc import ABCMeta, abstractmethod + +from .decode_head import BaseDecodeHead + + +class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta): + """Base class for cascade decode head used in + :class:`CascadeEncoderDecoder.""" + + def __init__(self, *args, **kwargs): + super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs) + + @abstractmethod + def forward(self, inputs, prev_output): + """Placeholder of forward function.""" + pass + + def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, + train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + seg_logits = self.forward(inputs, prev_output) + losses = self.losses(seg_logits, gt_semantic_seg) + + return losses + + def forward_test(self, inputs, prev_output, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + return self.forward(inputs, prev_output) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/cc_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/cc_head.py new file mode 100644 index 0000000000000000000000000000000000000000..a582718478dab5c55eec3de6bcf7ac842da25e8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/cc_head.py @@ -0,0 +1,42 @@ +import torch + +from ..builder import HEADS +from .fcn_head import FCNHead + +try: + from custom_mmpkg.custom_mmcv.ops import CrissCrossAttention +except ModuleNotFoundError: + CrissCrossAttention = None + + +@HEADS.register_module() +class CCHead(FCNHead): + """CCNet: Criss-Cross Attention for Semantic Segmentation. + + This head is the implementation of `CCNet + `_. + + Args: + recurrence (int): Number of recurrence of Criss Cross Attention + module. Default: 2. + """ + + def __init__(self, recurrence=2, **kwargs): + if CrissCrossAttention is None: + raise RuntimeError('Please install mmcv-full for ' + 'CrissCrossAttention ops') + super(CCHead, self).__init__(num_convs=2, **kwargs) + self.recurrence = recurrence + self.cca = CrissCrossAttention(self.channels) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + for _ in range(self.recurrence): + output = self.cca(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/da_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/da_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ce384a5f040e815c61ffd4a0e46d058fa874e11a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/da_head.py @@ -0,0 +1,178 @@ +import torch +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule, Scale +from torch import nn + +from custom_mmpkg.custom_mmseg.core import add_prefix +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .decode_head import BaseDecodeHead + + +class PAM(_SelfAttentionBlock): + """Position Attention Module (PAM) + + Args: + in_channels (int): Input channels of key/query feature. + channels (int): Output channels of key/query transform. + """ + + def __init__(self, in_channels, channels): + super(PAM, self).__init__( + key_in_channels=in_channels, + query_in_channels=in_channels, + channels=channels, + out_channels=in_channels, + share_key_query=False, + query_downsample=None, + key_downsample=None, + key_query_num_convs=1, + key_query_norm=False, + value_out_num_convs=1, + value_out_norm=False, + matmul_norm=False, + with_out=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None) + + self.gamma = Scale(0) + + def forward(self, x): + """Forward function.""" + out = super(PAM, self).forward(x, x) + + out = self.gamma(out) + x + return out + + +class CAM(nn.Module): + """Channel Attention Module (CAM)""" + + def __init__(self): + super(CAM, self).__init__() + self.gamma = Scale(0) + + def forward(self, x): + """Forward function.""" + batch_size, channels, height, width = x.size() + proj_query = x.view(batch_size, channels, -1) + proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1) + energy = torch.bmm(proj_query, proj_key) + energy_new = torch.max( + energy, -1, keepdim=True)[0].expand_as(energy) - energy + attention = F.softmax(energy_new, dim=-1) + proj_value = x.view(batch_size, channels, -1) + + out = torch.bmm(attention, proj_value) + out = out.view(batch_size, channels, height, width) + + out = self.gamma(out) + x + return out + + +@HEADS.register_module() +class DAHead(BaseDecodeHead): + """Dual Attention Network for Scene Segmentation. + + This head is the implementation of `DANet + `_. + + Args: + pam_channels (int): The channels of Position Attention Module(PAM). + """ + + def __init__(self, pam_channels, **kwargs): + super(DAHead, self).__init__(**kwargs) + self.pam_channels = pam_channels + self.pam_in_conv = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.pam = PAM(self.channels, pam_channels) + self.pam_out_conv = ConvModule( + self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.pam_conv_seg = nn.Conv2d( + self.channels, self.num_classes, kernel_size=1) + + self.cam_in_conv = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.cam = CAM() + self.cam_out_conv = ConvModule( + self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.cam_conv_seg = nn.Conv2d( + self.channels, self.num_classes, kernel_size=1) + + def pam_cls_seg(self, feat): + """PAM feature classification.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.pam_conv_seg(feat) + return output + + def cam_cls_seg(self, feat): + """CAM feature classification.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.cam_conv_seg(feat) + return output + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + pam_feat = self.pam_in_conv(x) + pam_feat = self.pam(pam_feat) + pam_feat = self.pam_out_conv(pam_feat) + pam_out = self.pam_cls_seg(pam_feat) + + cam_feat = self.cam_in_conv(x) + cam_feat = self.cam(cam_feat) + cam_feat = self.cam_out_conv(cam_feat) + cam_out = self.cam_cls_seg(cam_feat) + + feat_sum = pam_feat + cam_feat + pam_cam_out = self.cls_seg(feat_sum) + + return pam_cam_out, pam_out, cam_out + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing, only ``pam_cam`` is used.""" + return self.forward(inputs)[0] + + def losses(self, seg_logit, seg_label): + """Compute ``pam_cam``, ``pam``, ``cam`` loss.""" + pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit + loss = dict() + loss.update( + add_prefix( + super(DAHead, self).losses(pam_cam_seg_logit, seg_label), + 'pam_cam')) + loss.update( + add_prefix( + super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam')) + loss.update( + add_prefix( + super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam')) + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/decode_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/decode_head.py new file mode 100644 index 0000000000000000000000000000000000000000..2ed88037dd0e2200d359a2e3dd40dc24ba40feeb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/decode_head.py @@ -0,0 +1,234 @@ +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import normal_init +from custom_mmpkg.custom_mmcv.runner import auto_fp16, force_fp32 + +from custom_mmpkg.custom_mmseg.core import build_pixel_sampler +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import build_loss +from ..losses import accuracy + + +class BaseDecodeHead(nn.Module, metaclass=ABCMeta): + """Base class for BaseDecodeHead. + + Args: + in_channels (int|Sequence[int]): Input channels. + channels (int): Channels after modules, before conv_seg. + num_classes (int): Number of classes. + dropout_ratio (float): Ratio of dropout layer. Default: 0.1. + conv_cfg (dict|None): Config of conv layers. Default: None. + norm_cfg (dict|None): Config of norm layers. Default: None. + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU') + in_index (int|Sequence[int]): Input feature index. Default: -1 + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + Default: None. + loss_decode (dict): Config of decode loss. + Default: dict(type='CrossEntropyLoss'). + ignore_index (int | None): The label index to be ignored. When using + masked BCE loss, ignore_index should be set to None. Default: 255 + sampler (dict|None): The config of segmentation map sampler. + Default: None. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + """ + + def __init__(self, + in_channels, + channels, + *, + num_classes, + dropout_ratio=0.1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + in_index=-1, + input_transform=None, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + ignore_index=255, + sampler=None, + align_corners=False): + super(BaseDecodeHead, self).__init__() + self._init_inputs(in_channels, in_index, input_transform) + self.channels = channels + self.num_classes = num_classes + self.dropout_ratio = dropout_ratio + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.in_index = in_index + self.loss_decode = build_loss(loss_decode) + self.ignore_index = ignore_index + self.align_corners = align_corners + if sampler is not None: + self.sampler = build_pixel_sampler(sampler, context=self) + else: + self.sampler = None + + self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) + if dropout_ratio > 0: + self.dropout = nn.Dropout2d(dropout_ratio) + else: + self.dropout = None + self.fp16_enabled = False + + def extra_repr(self): + """Extra repr.""" + s = f'input_transform={self.input_transform}, ' \ + f'ignore_index={self.ignore_index}, ' \ + f'align_corners={self.align_corners}' + return s + + def _init_inputs(self, in_channels, in_index, input_transform): + """Check and initialize input transforms. + + The in_channels, in_index and input_transform must match. + Specifically, when input_transform is None, only single feature map + will be selected. So in_channels and in_index must be of type int. + When input_transform + + Args: + in_channels (int|Sequence[int]): Input channels. + in_index (int|Sequence[int]): Input feature index. + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + """ + + if input_transform is not None: + assert input_transform in ['resize_concat', 'multiple_select'] + self.input_transform = input_transform + self.in_index = in_index + if input_transform is not None: + assert isinstance(in_channels, (list, tuple)) + assert isinstance(in_index, (list, tuple)) + assert len(in_channels) == len(in_index) + if input_transform == 'resize_concat': + self.in_channels = sum(in_channels) + else: + self.in_channels = in_channels + else: + assert isinstance(in_channels, int) + assert isinstance(in_index, int) + self.in_channels = in_channels + + def init_weights(self): + """Initialize weights of classification layer.""" + normal_init(self.conv_seg, mean=0, std=0.01) + + def _transform_inputs(self, inputs): + """Transform inputs for decoder. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + Tensor: The transformed inputs + """ + + if self.input_transform == 'resize_concat': + inputs = [inputs[i] for i in self.in_index] + upsampled_inputs = [ + resize( + input=x, + size=inputs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + inputs = torch.cat(upsampled_inputs, dim=1) + elif self.input_transform == 'multiple_select': + inputs = [inputs[i] for i in self.in_index] + else: + inputs = inputs[self.in_index] + + return inputs + + @auto_fp16() + @abstractmethod + def forward(self, inputs): + """Placeholder of forward function.""" + pass + + def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + seg_logits = self.forward(inputs) + losses = self.losses(seg_logits, gt_semantic_seg) + return losses + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + return self.forward(inputs) + + def cls_seg(self, feat): + """Classify each pixel.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.conv_seg(feat) + return output + + @force_fp32(apply_to=('seg_logit', )) + def losses(self, seg_logit, seg_label): + """Compute segmentation loss.""" + loss = dict() + seg_logit = resize( + input=seg_logit, + size=seg_label.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + if self.sampler is not None: + seg_weight = self.sampler.sample(seg_logit, seg_label) + else: + seg_weight = None + seg_label = seg_label.squeeze(1) + loss['loss_seg'] = self.loss_decode( + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + loss['acc_seg'] = accuracy(seg_logit, seg_label) + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/dm_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/dm_head.py new file mode 100644 index 0000000000000000000000000000000000000000..607cd3dd2219a7971319c84ad2383bca25306b3d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/dm_head.py @@ -0,0 +1,140 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer + +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class DCM(nn.Module): + """Dynamic Convolutional Module used in DMNet. + + Args: + filter_size (int): The filter size of generated convolution kernel + used in Dynamic Convolutional Module. + fusion (bool): Add one conv to fuse DCM output feature. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict | None): Config of conv layers. + norm_cfg (dict | None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg, + norm_cfg, act_cfg): + super(DCM, self).__init__() + self.filter_size = filter_size + self.fusion = fusion + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1, + 0) + + self.input_redu_conv = ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + if self.norm_cfg is not None: + self.norm = build_norm_layer(self.norm_cfg, self.channels)[1] + else: + self.norm = None + self.activate = build_activation_layer(self.act_cfg) + + if self.fusion: + self.fusion_conv = ConvModule( + self.channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, x): + """Forward function.""" + generated_filter = self.filter_gen_conv( + F.adaptive_avg_pool2d(x, self.filter_size)) + x = self.input_redu_conv(x) + b, c, h, w = x.shape + # [1, b * c, h, w], c = self.channels + x = x.view(1, b * c, h, w) + # [b * c, 1, filter_size, filter_size] + generated_filter = generated_filter.view(b * c, 1, self.filter_size, + self.filter_size) + pad = (self.filter_size - 1) // 2 + if (self.filter_size - 1) % 2 == 0: + p2d = (pad, pad, pad, pad) + else: + p2d = (pad + 1, pad, pad + 1, pad) + x = F.pad(input=x, pad=p2d, mode='constant', value=0) + # [1, b * c, h, w] + output = F.conv2d(input=x, weight=generated_filter, groups=b * c) + # [b, c, h, w] + output = output.view(b, c, h, w) + if self.norm is not None: + output = self.norm(output) + output = self.activate(output) + + if self.fusion: + output = self.fusion_conv(output) + + return output + + +@HEADS.register_module() +class DMHead(BaseDecodeHead): + """Dynamic Multi-scale Filters for Semantic Segmentation. + + This head is the implementation of + `DMNet `_. + + Args: + filter_sizes (tuple[int]): The size of generated convolutional filters + used in Dynamic Convolutional Module. Default: (1, 3, 5, 7). + fusion (bool): Add one conv to fuse DCM output feature. + """ + + def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs): + super(DMHead, self).__init__(**kwargs) + assert isinstance(filter_sizes, (list, tuple)) + self.filter_sizes = filter_sizes + self.fusion = fusion + dcm_modules = [] + for filter_size in self.filter_sizes: + dcm_modules.append( + DCM(filter_size, + self.fusion, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.dcm_modules = nn.ModuleList(dcm_modules) + self.bottleneck = ConvModule( + self.in_channels + len(filter_sizes) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + dcm_outs = [x] + for dcm_module in self.dcm_modules: + dcm_outs.append(dcm_module(x)) + dcm_outs = torch.cat(dcm_outs, dim=1) + output = self.bottleneck(dcm_outs) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/dnl_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/dnl_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5b7c1936aa6114d0370625482b677db58a43e8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/dnl_head.py @@ -0,0 +1,131 @@ +import torch +from custom_mmpkg.custom_mmcv.cnn import NonLocal2d +from torch import nn + +from ..builder import HEADS +from .fcn_head import FCNHead + + +class DisentangledNonLocal2d(NonLocal2d): + """Disentangled Non-Local Blocks. + + Args: + temperature (float): Temperature to adjust attention. Default: 0.05 + """ + + def __init__(self, *arg, temperature, **kwargs): + super().__init__(*arg, **kwargs) + self.temperature = temperature + self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1) + + def embedded_gaussian(self, theta_x, phi_x): + """Embedded gaussian with temperature.""" + + # NonLocal2d pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + if self.use_scale: + # theta_x.shape[-1] is `self.inter_channels` + pairwise_weight /= theta_x.shape[-1]**0.5 + pairwise_weight /= self.temperature + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def forward(self, x): + # x: [N, C, H, W] + n = x.size(0) + + # g_x: [N, HxW, C] + g_x = self.g(x).view(n, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + # theta_x: [N, HxW, C], phi_x: [N, C, HxW] + if self.mode == 'gaussian': + theta_x = x.view(n, self.in_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + if self.sub_sample: + phi_x = self.phi(x).view(n, self.in_channels, -1) + else: + phi_x = x.view(n, self.in_channels, -1) + elif self.mode == 'concatenation': + theta_x = self.theta(x).view(n, self.inter_channels, -1, 1) + phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) + else: + theta_x = self.theta(x).view(n, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + phi_x = self.phi(x).view(n, self.inter_channels, -1) + + # subtract mean + theta_x -= theta_x.mean(dim=-2, keepdim=True) + phi_x -= phi_x.mean(dim=-1, keepdim=True) + + pairwise_func = getattr(self, self.mode) + # pairwise_weight: [N, HxW, HxW] + pairwise_weight = pairwise_func(theta_x, phi_x) + + # y: [N, HxW, C] + y = torch.matmul(pairwise_weight, g_x) + # y: [N, C, H, W] + y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, + *x.size()[2:]) + + # unary_mask: [N, 1, HxW] + unary_mask = self.conv_mask(x) + unary_mask = unary_mask.view(n, 1, -1) + unary_mask = unary_mask.softmax(dim=-1) + # unary_x: [N, 1, C] + unary_x = torch.matmul(unary_mask, g_x) + # unary_x: [N, C, 1, 1] + unary_x = unary_x.permute(0, 2, 1).contiguous().reshape( + n, self.inter_channels, 1, 1) + + output = x + self.conv_out(y + unary_x) + + return output + + +@HEADS.register_module() +class DNLHead(FCNHead): + """Disentangled Non-Local Neural Networks. + + This head is the implementation of `DNLNet + `_. + + Args: + reduction (int): Reduction factor of projection transform. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + sqrt(1/inter_channels). Default: False. + mode (str): The nonlocal mode. Options are 'embedded_gaussian', + 'dot_product'. Default: 'embedded_gaussian.'. + temperature (float): Temperature to adjust attention. Default: 0.05 + """ + + def __init__(self, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + temperature=0.05, + **kwargs): + super(DNLHead, self).__init__(num_convs=2, **kwargs) + self.reduction = reduction + self.use_scale = use_scale + self.mode = mode + self.temperature = temperature + self.dnl_block = DisentangledNonLocal2d( + in_channels=self.channels, + reduction=self.reduction, + use_scale=self.use_scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + mode=self.mode, + temperature=self.temperature) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.dnl_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ema_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ema_head.py new file mode 100644 index 0000000000000000000000000000000000000000..e2279d53be90e3aee8e109eae47277f7c3266cef --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ema_head.py @@ -0,0 +1,168 @@ +import math + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +def reduce_mean(tensor): + """Reduce mean when distributed training.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor + + +class EMAModule(nn.Module): + """Expectation Maximization Attention Module used in EMANet. + + Args: + channels (int): Channels of the whole module. + num_bases (int): Number of bases. + num_stages (int): Number of the EM iterations. + """ + + def __init__(self, channels, num_bases, num_stages, momentum): + super(EMAModule, self).__init__() + assert num_stages >= 1, 'num_stages must be at least 1!' + self.num_bases = num_bases + self.num_stages = num_stages + self.momentum = momentum + + bases = torch.zeros(1, channels, self.num_bases) + bases.normal_(0, math.sqrt(2. / self.num_bases)) + # [1, channels, num_bases] + bases = F.normalize(bases, dim=1, p=2) + self.register_buffer('bases', bases) + + def forward(self, feats): + """Forward function.""" + batch_size, channels, height, width = feats.size() + # [batch_size, channels, height*width] + feats = feats.view(batch_size, channels, height * width) + # [batch_size, channels, num_bases] + bases = self.bases.repeat(batch_size, 1, 1) + + with torch.no_grad(): + for i in range(self.num_stages): + # [batch_size, height*width, num_bases] + attention = torch.einsum('bcn,bck->bnk', feats, bases) + attention = F.softmax(attention, dim=2) + # l1 norm + attention_normed = F.normalize(attention, dim=1, p=1) + # [batch_size, channels, num_bases] + bases = torch.einsum('bcn,bnk->bck', feats, attention_normed) + # l2 norm + bases = F.normalize(bases, dim=1, p=2) + + feats_recon = torch.einsum('bck,bnk->bcn', bases, attention) + feats_recon = feats_recon.view(batch_size, channels, height, width) + + if self.training: + bases = bases.mean(dim=0, keepdim=True) + bases = reduce_mean(bases) + # l2 norm + bases = F.normalize(bases, dim=1, p=2) + self.bases = (1 - + self.momentum) * self.bases + self.momentum * bases + + return feats_recon + + +@HEADS.register_module() +class EMAHead(BaseDecodeHead): + """Expectation Maximization Attention Networks for Semantic Segmentation. + + This head is the implementation of `EMANet + `_. + + Args: + ema_channels (int): EMA module channels + num_bases (int): Number of bases. + num_stages (int): Number of the EM iterations. + concat_input (bool): Whether concat the input and output of convs + before classification layer. Default: True + momentum (float): Momentum to update the base. Default: 0.1. + """ + + def __init__(self, + ema_channels, + num_bases, + num_stages, + concat_input=True, + momentum=0.1, + **kwargs): + super(EMAHead, self).__init__(**kwargs) + self.ema_channels = ema_channels + self.num_bases = num_bases + self.num_stages = num_stages + self.concat_input = concat_input + self.momentum = momentum + self.ema_module = EMAModule(self.ema_channels, self.num_bases, + self.num_stages, self.momentum) + + self.ema_in_conv = ConvModule( + self.in_channels, + self.ema_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + # project (0, inf) -> (-inf, inf) + self.ema_mid_conv = ConvModule( + self.ema_channels, + self.ema_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=None, + act_cfg=None) + for param in self.ema_mid_conv.parameters(): + param.requires_grad = False + + self.ema_out_conv = ConvModule( + self.ema_channels, + self.ema_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + self.bottleneck = ConvModule( + self.ema_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.concat_input: + self.conv_cat = ConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + feats = self.ema_in_conv(x) + identity = feats + feats = self.ema_mid_conv(feats) + recon = self.ema_module(feats) + recon = F.relu(recon, inplace=True) + recon = self.ema_out_conv(recon) + output = F.relu(identity + recon, inplace=True) + output = self.bottleneck(output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/enc_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/enc_head.py new file mode 100644 index 0000000000000000000000000000000000000000..ee8ecd7401ec9619eb2ac176d380e4e513294ea3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/enc_head.py @@ -0,0 +1,187 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule, build_norm_layer + +from custom_mmpkg.custom_mmseg.ops import Encoding, resize +from ..builder import HEADS, build_loss +from .decode_head import BaseDecodeHead + + +class EncModule(nn.Module): + """Encoding Module used in EncNet. + + Args: + in_channels (int): Input channels. + num_codes (int): Number of code words. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, in_channels, num_codes, conv_cfg, norm_cfg, act_cfg): + super(EncModule, self).__init__() + self.encoding_project = ConvModule( + in_channels, + in_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + # TODO: resolve this hack + # change to 1d + if norm_cfg is not None: + encoding_norm_cfg = norm_cfg.copy() + if encoding_norm_cfg['type'] in ['BN', 'IN']: + encoding_norm_cfg['type'] += '1d' + else: + encoding_norm_cfg['type'] = encoding_norm_cfg['type'].replace( + '2d', '1d') + else: + # fallback to BN1d + encoding_norm_cfg = dict(type='BN1d') + self.encoding = nn.Sequential( + Encoding(channels=in_channels, num_codes=num_codes), + build_norm_layer(encoding_norm_cfg, num_codes)[1], + nn.ReLU(inplace=True)) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels), nn.Sigmoid()) + + def forward(self, x): + """Forward function.""" + encoding_projection = self.encoding_project(x) + encoding_feat = self.encoding(encoding_projection).mean(dim=1) + batch_size, channels, _, _ = x.size() + gamma = self.fc(encoding_feat) + y = gamma.view(batch_size, channels, 1, 1) + output = F.relu_(x + x * y) + return encoding_feat, output + + +@HEADS.register_module() +class EncHead(BaseDecodeHead): + """Context Encoding for Semantic Segmentation. + + This head is the implementation of `EncNet + `_. + + Args: + num_codes (int): Number of code words. Default: 32. + use_se_loss (bool): Whether use Semantic Encoding Loss (SE-loss) to + regularize the training. Default: True. + add_lateral (bool): Whether use lateral connection to fuse features. + Default: False. + loss_se_decode (dict): Config of decode loss. + Default: dict(type='CrossEntropyLoss', use_sigmoid=True). + """ + + def __init__(self, + num_codes=32, + use_se_loss=True, + add_lateral=False, + loss_se_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=0.2), + **kwargs): + super(EncHead, self).__init__( + input_transform='multiple_select', **kwargs) + self.use_se_loss = use_se_loss + self.add_lateral = add_lateral + self.num_codes = num_codes + self.bottleneck = ConvModule( + self.in_channels[-1], + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if add_lateral: + self.lateral_convs = nn.ModuleList() + for in_channels in self.in_channels[:-1]: # skip the last one + self.lateral_convs.append( + ConvModule( + in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.fusion = ConvModule( + len(self.in_channels) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.enc_module = EncModule( + self.channels, + num_codes=num_codes, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if self.use_se_loss: + self.loss_se_decode = build_loss(loss_se_decode) + self.se_layer = nn.Linear(self.channels, self.num_classes) + + def forward(self, inputs): + """Forward function.""" + inputs = self._transform_inputs(inputs) + feat = self.bottleneck(inputs[-1]) + if self.add_lateral: + laterals = [ + resize( + lateral_conv(inputs[i]), + size=feat.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + feat = self.fusion(torch.cat([feat, *laterals], 1)) + encode_feat, output = self.enc_module(feat) + output = self.cls_seg(output) + if self.use_se_loss: + se_output = self.se_layer(encode_feat) + return output, se_output + else: + return output + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing, ignore se_loss.""" + if self.use_se_loss: + return self.forward(inputs)[0] + else: + return self.forward(inputs) + + @staticmethod + def _convert_to_onehot_labels(seg_label, num_classes): + """Convert segmentation label to onehot. + + Args: + seg_label (Tensor): Segmentation label of shape (N, H, W). + num_classes (int): Number of classes. + + Returns: + Tensor: Onehot labels of shape (N, num_classes). + """ + + batch_size = seg_label.size(0) + onehot_labels = seg_label.new_zeros((batch_size, num_classes)) + for i in range(batch_size): + hist = seg_label[i].float().histc( + bins=num_classes, min=0, max=num_classes - 1) + onehot_labels[i] = hist > 0 + return onehot_labels + + def losses(self, seg_logit, seg_label): + """Compute segmentation and semantic encoding loss.""" + seg_logit, se_seg_logit = seg_logit + loss = dict() + loss.update(super(EncHead, self).losses(seg_logit, seg_label)) + se_loss = self.loss_se_decode( + se_seg_logit, + self._convert_to_onehot_labels(seg_label, self.num_classes)) + loss['loss_se'] = se_loss + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/fcn_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/fcn_head.py new file mode 100644 index 0000000000000000000000000000000000000000..7f0c384381a1f1b26f795e2ed53c571823858317 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/fcn_head.py @@ -0,0 +1,81 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class FCNHead(BaseDecodeHead): + """Fully Convolution Networks for Semantic Segmentation. + + This head is implemented of `FCNNet `_. + + Args: + num_convs (int): Number of convs in the head. Default: 2. + kernel_size (int): The kernel size for convs in the head. Default: 3. + concat_input (bool): Whether concat the input and output of convs + before classification layer. + dilation (int): The dilation rate for convs in the head. Default: 1. + """ + + def __init__(self, + num_convs=2, + kernel_size=3, + concat_input=True, + dilation=1, + **kwargs): + assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int) + self.num_convs = num_convs + self.concat_input = concat_input + self.kernel_size = kernel_size + super(FCNHead, self).__init__(**kwargs) + if num_convs == 0: + assert self.in_channels == self.channels + + conv_padding = (kernel_size // 2) * dilation + convs = [] + convs.append( + ConvModule( + self.in_channels, + self.channels, + kernel_size=kernel_size, + padding=conv_padding, + dilation=dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + for i in range(num_convs - 1): + convs.append( + ConvModule( + self.channels, + self.channels, + kernel_size=kernel_size, + padding=conv_padding, + dilation=dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + if num_convs == 0: + self.convs = nn.Identity() + else: + self.convs = nn.Sequential(*convs) + if self.concat_input: + self.conv_cat = ConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=kernel_size, + padding=kernel_size // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs(x) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/fpn_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/fpn_head.py new file mode 100644 index 0000000000000000000000000000000000000000..26d0849b1ace5911974437be5ae328e6107b44bc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/fpn_head.py @@ -0,0 +1,68 @@ +import numpy as np +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class FPNHead(BaseDecodeHead): + """Panoptic Feature Pyramid Networks. + + This head is the implementation of `Semantic FPN + `_. + + Args: + feature_strides (tuple[int]): The strides for input feature maps. + stack_lateral. All strides suppose to be power of 2. The first + one is of largest resolution. + """ + + def __init__(self, feature_strides, **kwargs): + super(FPNHead, self).__init__( + input_transform='multiple_select', **kwargs) + assert len(feature_strides) == len(self.in_channels) + assert min(feature_strides) == feature_strides[0] + self.feature_strides = feature_strides + + self.scale_heads = nn.ModuleList() + for i in range(len(feature_strides)): + head_length = max( + 1, + int(np.log2(feature_strides[i]) - np.log2(feature_strides[0]))) + scale_head = [] + for k in range(head_length): + scale_head.append( + ConvModule( + self.in_channels[i] if k == 0 else self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + if feature_strides[i] != feature_strides[0]: + scale_head.append( + nn.Upsample( + scale_factor=2, + mode='bilinear', + align_corners=self.align_corners)) + self.scale_heads.append(nn.Sequential(*scale_head)) + + def forward(self, inputs): + + x = self._transform_inputs(inputs) + + output = self.scale_heads[0](x[0]) + for i in range(1, len(self.feature_strides)): + # non inplace + output = output + resize( + self.scale_heads[i](x[i]), + size=output.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/gc_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/gc_head.py new file mode 100644 index 0000000000000000000000000000000000000000..600049998d04fc5f469e8da41243bb2d51b64cc1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/gc_head.py @@ -0,0 +1,47 @@ +import torch +from custom_mmpkg.custom_mmcv.cnn import ContextBlock + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class GCHead(FCNHead): + """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. + + This head is the implementation of `GCNet + `_. + + Args: + ratio (float): Multiplier of channels ratio. Default: 1/4. + pooling_type (str): The pooling type of context aggregation. + Options are 'att', 'avg'. Default: 'avg'. + fusion_types (tuple[str]): The fusion type for feature fusion. + Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) + """ + + def __init__(self, + ratio=1 / 4., + pooling_type='att', + fusion_types=('channel_add', ), + **kwargs): + super(GCHead, self).__init__(num_convs=2, **kwargs) + self.ratio = ratio + self.pooling_type = pooling_type + self.fusion_types = fusion_types + self.gc_block = ContextBlock( + in_channels=self.channels, + ratio=self.ratio, + pooling_type=self.pooling_type, + fusion_types=self.fusion_types) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.gc_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/lraspp_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/lraspp_head.py new file mode 100644 index 0000000000000000000000000000000000000000..5395a8f57fdbdf6828842f2c4c9a291ecf0b2cdc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/lraspp_head.py @@ -0,0 +1,90 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv import is_tuple_of +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +@HEADS.register_module() +class LRASPPHead(BaseDecodeHead): + """Lite R-ASPP (LRASPP) head is proposed in Searching for MobileNetV3. + + This head is the improved implementation of `Searching for MobileNetV3 + `_. + + Args: + branch_channels (tuple[int]): The number of output channels in every + each branch. Default: (32, 64). + """ + + def __init__(self, branch_channels=(32, 64), **kwargs): + super(LRASPPHead, self).__init__(**kwargs) + if self.input_transform != 'multiple_select': + raise ValueError('in Lite R-ASPP (LRASPP) head, input_transform ' + f'must be \'multiple_select\'. But received ' + f'\'{self.input_transform}\'') + assert is_tuple_of(branch_channels, int) + assert len(branch_channels) == len(self.in_channels) - 1 + self.branch_channels = branch_channels + + self.convs = nn.Sequential() + self.conv_ups = nn.Sequential() + for i in range(len(branch_channels)): + self.convs.add_module( + f'conv{i}', + nn.Conv2d( + self.in_channels[i], branch_channels[i], 1, bias=False)) + self.conv_ups.add_module( + f'conv_up{i}', + ConvModule( + self.channels + branch_channels[i], + self.channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=False)) + + self.conv_up_input = nn.Conv2d(self.channels, self.channels, 1) + + self.aspp_conv = ConvModule( + self.in_channels[-1], + self.channels, + 1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + bias=False) + self.image_pool = nn.Sequential( + nn.AvgPool2d(kernel_size=49, stride=(16, 20)), + ConvModule( + self.in_channels[2], + self.channels, + 1, + act_cfg=dict(type='Sigmoid'), + bias=False)) + + def forward(self, inputs): + """Forward function.""" + inputs = self._transform_inputs(inputs) + + x = inputs[-1] + + x = self.aspp_conv(x) * resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + x = self.conv_up_input(x) + + for i in range(len(self.branch_channels) - 1, -1, -1): + x = resize( + x, + size=inputs[i].size()[2:], + mode='bilinear', + align_corners=self.align_corners) + x = torch.cat([x, self.convs[i](inputs[i])], 1) + x = self.conv_ups[i](x) + + return self.cls_seg(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/nl_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/nl_head.py new file mode 100644 index 0000000000000000000000000000000000000000..9010d303cb3808de4893c3900ebbd9917a9cc57e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/nl_head.py @@ -0,0 +1,49 @@ +import torch +from custom_mmpkg.custom_mmcv.cnn import NonLocal2d + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class NLHead(FCNHead): + """Non-local Neural Networks. + + This head is the implementation of `NLNet + `_. + + Args: + reduction (int): Reduction factor of projection transform. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + sqrt(1/inter_channels). Default: True. + mode (str): The nonlocal mode. Options are 'embedded_gaussian', + 'dot_product'. Default: 'embedded_gaussian.'. + """ + + def __init__(self, + reduction=2, + use_scale=True, + mode='embedded_gaussian', + **kwargs): + super(NLHead, self).__init__(num_convs=2, **kwargs) + self.reduction = reduction + self.use_scale = use_scale + self.mode = mode + self.nl_block = NonLocal2d( + in_channels=self.channels, + reduction=self.reduction, + use_scale=self.use_scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + mode=self.mode) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + output = self.convs[0](x) + output = self.nl_block(output) + output = self.convs[1](output) + if self.concat_input: + output = self.conv_cat(torch.cat([x, output], dim=1)) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ocr_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ocr_head.py new file mode 100644 index 0000000000000000000000000000000000000000..b31f00233355ea610ea61a4f40cc3dfb6d84c8b7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/ocr_head.py @@ -0,0 +1,127 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from ..utils import SelfAttentionBlock as _SelfAttentionBlock +from .cascade_decode_head import BaseCascadeDecodeHead + + +class SpatialGatherModule(nn.Module): + """Aggregate the context features according to the initial predicted + probability distribution. + + Employ the soft-weighted method to aggregate the context. + """ + + def __init__(self, scale): + super(SpatialGatherModule, self).__init__() + self.scale = scale + + def forward(self, feats, probs): + """Forward function.""" + batch_size, num_classes, height, width = probs.size() + channels = feats.size(1) + probs = probs.view(batch_size, num_classes, -1) + feats = feats.view(batch_size, channels, -1) + # [batch_size, height*width, num_classes] + feats = feats.permute(0, 2, 1) + # [batch_size, channels, height*width] + probs = F.softmax(self.scale * probs, dim=2) + # [batch_size, channels, num_classes] + ocr_context = torch.matmul(probs, feats) + ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3) + return ocr_context + + +class ObjectAttentionBlock(_SelfAttentionBlock): + """Make a OCR used SelfAttentionBlock.""" + + def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg, + act_cfg): + if scale > 1: + query_downsample = nn.MaxPool2d(kernel_size=scale) + else: + query_downsample = None + super(ObjectAttentionBlock, self).__init__( + key_in_channels=in_channels, + query_in_channels=in_channels, + channels=channels, + out_channels=in_channels, + share_key_query=False, + query_downsample=query_downsample, + key_downsample=None, + key_query_num_convs=2, + key_query_norm=True, + value_out_num_convs=1, + value_out_norm=True, + matmul_norm=True, + with_out=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.bottleneck = ConvModule( + in_channels * 2, + in_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, query_feats, key_feats): + """Forward function.""" + context = super(ObjectAttentionBlock, + self).forward(query_feats, key_feats) + output = self.bottleneck(torch.cat([context, query_feats], dim=1)) + if self.query_downsample is not None: + output = resize(query_feats) + + return output + + +@HEADS.register_module() +class OCRHead(BaseCascadeDecodeHead): + """Object-Contextual Representations for Semantic Segmentation. + + This head is the implementation of `OCRNet + `_. + + Args: + ocr_channels (int): The intermediate channels of OCR block. + scale (int): The scale of probability map in SpatialGatherModule in + Default: 1. + """ + + def __init__(self, ocr_channels, scale=1, **kwargs): + super(OCRHead, self).__init__(**kwargs) + self.ocr_channels = ocr_channels + self.scale = scale + self.object_context_block = ObjectAttentionBlock( + self.channels, + self.ocr_channels, + self.scale, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.spatial_gather_module = SpatialGatherModule(self.scale) + + self.bottleneck = ConvModule( + self.in_channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs, prev_output): + """Forward function.""" + x = self._transform_inputs(inputs) + feats = self.bottleneck(x) + context = self.spatial_gather_module(feats, prev_output) + object_context = self.object_context_block(feats, context) + output = self.cls_seg(object_context) + + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/point_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/point_head.py new file mode 100644 index 0000000000000000000000000000000000000000..71c9f8e078536a733616a9f33de2dd35a8adbc26 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/point_head.py @@ -0,0 +1,350 @@ +# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa + +import torch +import torch.nn as nn + +from custom_mmpkg.custom_mmcv.cnn import ConvModule, normal_init +from custom_mmpkg.custom_mmcv.ops import point_sample + +from custom_mmpkg.custom_mmseg.models.builder import HEADS +from custom_mmpkg.custom_mmseg.ops import resize +from ..losses import accuracy +from .cascade_decode_head import BaseCascadeDecodeHead + + +def calculate_uncertainty(seg_logits): + """Estimate uncertainty based on seg logits. + + For each location of the prediction ``seg_logits`` we estimate + uncertainty as the difference between top first and top second + predicted logits. + + Args: + seg_logits (Tensor): Semantic segmentation logits, + shape (batch_size, num_classes, height, width). + + Returns: + scores (Tensor): T uncertainty scores with the most uncertain + locations having the highest uncertainty score, shape ( + batch_size, 1, height, width) + """ + top2_scores = torch.topk(seg_logits, k=2, dim=1)[0] + return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) + + +@HEADS.register_module() +class PointHead(BaseCascadeDecodeHead): + """A mask point head use in PointRend. + + ``PointHead`` use shared multi-layer perceptron (equivalent to + nn.Conv1d) to predict the logit of input points. The fine-grained feature + and coarse feature will be concatenate together for predication. + + Args: + num_fcs (int): Number of fc layers in the head. Default: 3. + in_channels (int): Number of input channels. Default: 256. + fc_channels (int): Number of fc channels. Default: 256. + num_classes (int): Number of classes for logits. Default: 80. + class_agnostic (bool): Whether use class agnostic classification. + If so, the output channels of logits will be 1. Default: False. + coarse_pred_each_layer (bool): Whether concatenate coarse feature with + the output of each fc layer. Default: True. + conv_cfg (dict|None): Dictionary to construct and config conv layer. + Default: dict(type='Conv1d')) + norm_cfg (dict|None): Dictionary to construct and config norm layer. + Default: None. + loss_point (dict): Dictionary to construct and config loss layer of + point head. Default: dict(type='CrossEntropyLoss', use_mask=True, + loss_weight=1.0). + """ + + def __init__(self, + num_fcs=3, + coarse_pred_each_layer=True, + conv_cfg=dict(type='Conv1d'), + norm_cfg=None, + act_cfg=dict(type='ReLU', inplace=False), + **kwargs): + super(PointHead, self).__init__( + input_transform='multiple_select', + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **kwargs) + + self.num_fcs = num_fcs + self.coarse_pred_each_layer = coarse_pred_each_layer + + fc_in_channels = sum(self.in_channels) + self.num_classes + fc_channels = self.channels + self.fcs = nn.ModuleList() + for k in range(num_fcs): + fc = ConvModule( + fc_in_channels, + fc_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.fcs.append(fc) + fc_in_channels = fc_channels + fc_in_channels += self.num_classes if self.coarse_pred_each_layer \ + else 0 + self.fc_seg = nn.Conv1d( + fc_in_channels, + self.num_classes, + kernel_size=1, + stride=1, + padding=0) + if self.dropout_ratio > 0: + self.dropout = nn.Dropout(self.dropout_ratio) + delattr(self, 'conv_seg') + + def init_weights(self): + """Initialize weights of classification layer.""" + normal_init(self.fc_seg, std=0.001) + + def cls_seg(self, feat): + """Classify each pixel with fc.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.fc_seg(feat) + return output + + def forward(self, fine_grained_point_feats, coarse_point_feats): + x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1) + for fc in self.fcs: + x = fc(x) + if self.coarse_pred_each_layer: + x = torch.cat((x, coarse_point_feats), dim=1) + return self.cls_seg(x) + + def _get_fine_grained_point_feats(self, x, points): + """Sample from fine grained features. + + Args: + x (list[Tensor]): Feature pyramid from by neck or backbone. + points (Tensor): Point coordinates, shape (batch_size, + num_points, 2). + + Returns: + fine_grained_feats (Tensor): Sampled fine grained feature, + shape (batch_size, sum(channels of x), num_points). + """ + + fine_grained_feats_list = [ + point_sample(_, points, align_corners=self.align_corners) + for _ in x + ] + if len(fine_grained_feats_list) > 1: + fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1) + else: + fine_grained_feats = fine_grained_feats_list[0] + + return fine_grained_feats + + def _get_coarse_point_feats(self, prev_output, points): + """Sample from fine grained features. + + Args: + prev_output (list[Tensor]): Prediction of previous decode head. + points (Tensor): Point coordinates, shape (batch_size, + num_points, 2). + + Returns: + coarse_feats (Tensor): Sampled coarse feature, shape (batch_size, + num_classes, num_points). + """ + + coarse_feats = point_sample( + prev_output, points, align_corners=self.align_corners) + + return coarse_feats + + def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, + train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + x = self._transform_inputs(inputs) + with torch.no_grad(): + points = self.get_points_train( + prev_output, calculate_uncertainty, cfg=train_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, points) + coarse_point_feats = self._get_coarse_point_feats(prev_output, points) + point_logits = self.forward(fine_grained_point_feats, + coarse_point_feats) + point_label = point_sample( + gt_semantic_seg.float(), + points, + mode='nearest', + align_corners=self.align_corners) + point_label = point_label.squeeze(1).long() + + losses = self.losses(point_logits, point_label) + + return losses + + def forward_test(self, inputs, prev_output, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + prev_output (Tensor): The output of previous decode head. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + + x = self._transform_inputs(inputs) + refined_seg_logits = prev_output.clone() + for _ in range(test_cfg.subdivision_steps): + refined_seg_logits = resize( + refined_seg_logits, + scale_factor=test_cfg.scale_factor, + mode='bilinear', + align_corners=self.align_corners) + batch_size, channels, height, width = refined_seg_logits.shape + point_indices, points = self.get_points_test( + refined_seg_logits, calculate_uncertainty, cfg=test_cfg) + fine_grained_point_feats = self._get_fine_grained_point_feats( + x, points) + coarse_point_feats = self._get_coarse_point_feats( + prev_output, points) + point_logits = self.forward(fine_grained_point_feats, + coarse_point_feats) + + point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) + refined_seg_logits = refined_seg_logits.reshape( + batch_size, channels, height * width) + refined_seg_logits = refined_seg_logits.scatter_( + 2, point_indices, point_logits) + refined_seg_logits = refined_seg_logits.view( + batch_size, channels, height, width) + + return refined_seg_logits + + def losses(self, point_logits, point_label): + """Compute segmentation loss.""" + loss = dict() + loss['loss_point'] = self.loss_decode( + point_logits, point_label, ignore_index=self.ignore_index) + loss['acc_point'] = accuracy(point_logits, point_label) + return loss + + def get_points_train(self, seg_logits, uncertainty_func, cfg): + """Sample points for training. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + 'uncertainty_func' function that takes point's logit prediction as + input. + + Args: + seg_logits (Tensor): Semantic segmentation logits, shape ( + batch_size, num_classes, height, width). + uncertainty_func (func): uncertainty calculation function. + cfg (dict): Training config of point head. + + Returns: + point_coords (Tensor): A tensor of shape (batch_size, num_points, + 2) that contains the coordinates of ``num_points`` sampled + points. + """ + num_points = cfg.num_points + oversample_ratio = cfg.oversample_ratio + importance_sample_ratio = cfg.importance_sample_ratio + assert oversample_ratio >= 1 + assert 0 <= importance_sample_ratio <= 1 + batch_size = seg_logits.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand( + batch_size, num_sampled, 2, device=seg_logits.device) + point_logits = point_sample(seg_logits, point_coords) + # It is crucial to calculate uncertainty based on the sampled + # prediction value for the points. Calculating uncertainties of the + # coarse predictions first and sampling them for points leads to + # incorrect results. To illustrate this: assume uncertainty func( + # logits)=-abs(logits), a sampled point between two coarse + # predictions with -1 and 1 logits has 0 logits, and therefore 0 + # uncertainty value. However, if we calculate uncertainties for the + # coarse predictions first, both will have -1 uncertainty, + # and sampled point will get -1 uncertainty. + point_uncertainties = uncertainty_func(point_logits) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk( + point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange( + batch_size, dtype=torch.long, device=seg_logits.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + batch_size, num_uncertain_points, 2) + if num_random_points > 0: + rand_point_coords = torch.rand( + batch_size, num_random_points, 2, device=seg_logits.device) + point_coords = torch.cat((point_coords, rand_point_coords), dim=1) + return point_coords + + def get_points_test(self, seg_logits, uncertainty_func, cfg): + """Sample points for testing. + + Find ``num_points`` most uncertain points from ``uncertainty_map``. + + Args: + seg_logits (Tensor): A tensor of shape (batch_size, num_classes, + height, width) for class-specific or class-agnostic prediction. + uncertainty_func (func): uncertainty calculation function. + cfg (dict): Testing config of point head. + + Returns: + point_indices (Tensor): A tensor of shape (batch_size, num_points) + that contains indices from [0, height x width) of the most + uncertain points. + point_coords (Tensor): A tensor of shape (batch_size, num_points, + 2) that contains [0, 1] x [0, 1] normalized coordinates of the + most uncertain points from the ``height x width`` grid . + """ + + num_points = cfg.subdivision_num_points + uncertainty_map = uncertainty_func(seg_logits) + batch_size, _, height, width = uncertainty_map.shape + h_step = 1.0 / height + w_step = 1.0 / width + + uncertainty_map = uncertainty_map.view(batch_size, height * width) + num_points = min(height * width, num_points) + point_indices = uncertainty_map.topk(num_points, dim=1)[1] + point_coords = torch.zeros( + batch_size, + num_points, + 2, + dtype=torch.float, + device=seg_logits.device) + point_coords[:, :, 0] = w_step / 2.0 + (point_indices % + width).float() * w_step + point_coords[:, :, 1] = h_step / 2.0 + (point_indices // + width).float() * h_step + return point_indices, point_coords diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/psa_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/psa_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c1424a85d7ca93eb299ad1e9116600f703940fc9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/psa_head.py @@ -0,0 +1,196 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + +try: + from custom_mmpkg.custom_mmcv.ops import PSAMask +except ModuleNotFoundError: + PSAMask = None + + +@HEADS.register_module() +class PSAHead(BaseDecodeHead): + """Point-wise Spatial Attention Network for Scene Parsing. + + This head is the implementation of `PSANet + `_. + + Args: + mask_size (tuple[int]): The PSA mask size. It usually equals input + size. + psa_type (str): The type of psa module. Options are 'collect', + 'distribute', 'bi-direction'. Default: 'bi-direction' + compact (bool): Whether use compact map for 'collect' mode. + Default: True. + shrink_factor (int): The downsample factors of psa mask. Default: 2. + normalization_factor (float): The normalize factor of attention. + psa_softmax (bool): Whether use softmax for attention. + """ + + def __init__(self, + mask_size, + psa_type='bi-direction', + compact=False, + shrink_factor=2, + normalization_factor=1.0, + psa_softmax=True, + **kwargs): + if PSAMask is None: + raise RuntimeError('Please install mmcv-full for PSAMask ops') + super(PSAHead, self).__init__(**kwargs) + assert psa_type in ['collect', 'distribute', 'bi-direction'] + self.psa_type = psa_type + self.compact = compact + self.shrink_factor = shrink_factor + self.mask_size = mask_size + mask_h, mask_w = mask_size + self.psa_softmax = psa_softmax + if normalization_factor is None: + normalization_factor = mask_h * mask_w + self.normalization_factor = normalization_factor + + self.reduce = ConvModule( + self.in_channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.attention = nn.Sequential( + ConvModule( + self.channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d( + self.channels, mask_h * mask_w, kernel_size=1, bias=False)) + if psa_type == 'bi-direction': + self.reduce_p = ConvModule( + self.in_channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.attention_p = nn.Sequential( + ConvModule( + self.channels, + self.channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.Conv2d( + self.channels, mask_h * mask_w, kernel_size=1, bias=False)) + self.psamask_collect = PSAMask('collect', mask_size) + self.psamask_distribute = PSAMask('distribute', mask_size) + else: + self.psamask = PSAMask(psa_type, mask_size) + self.proj = ConvModule( + self.channels * (2 if psa_type == 'bi-direction' else 1), + self.in_channels, + kernel_size=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + self.in_channels * 2, + self.channels, + kernel_size=3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + identity = x + align_corners = self.align_corners + if self.psa_type in ['collect', 'distribute']: + out = self.reduce(x) + n, c, h, w = out.size() + if self.shrink_factor != 1: + if h % self.shrink_factor and w % self.shrink_factor: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + align_corners = True + else: + h = h // self.shrink_factor + w = w // self.shrink_factor + align_corners = False + out = resize( + out, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + y = self.attention(out) + if self.compact: + if self.psa_type == 'collect': + y = y.view(n, h * w, + h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y = self.psamask(y) + if self.psa_softmax: + y = F.softmax(y, dim=1) + out = torch.bmm( + out.view(n, c, h * w), y.view(n, h * w, h * w)).view( + n, c, h, w) * (1.0 / self.normalization_factor) + else: + x_col = self.reduce(x) + x_dis = self.reduce_p(x) + n, c, h, w = x_col.size() + if self.shrink_factor != 1: + if h % self.shrink_factor and w % self.shrink_factor: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + align_corners = True + else: + h = h // self.shrink_factor + w = w // self.shrink_factor + align_corners = False + x_col = resize( + x_col, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + x_dis = resize( + x_dis, + size=(h, w), + mode='bilinear', + align_corners=align_corners) + y_col = self.attention(x_col) + y_dis = self.attention_p(x_dis) + if self.compact: + y_dis = y_dis.view(n, h * w, + h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y_col = self.psamask_collect(y_col) + y_dis = self.psamask_distribute(y_dis) + if self.psa_softmax: + y_col = F.softmax(y_col, dim=1) + y_dis = F.softmax(y_dis, dim=1) + x_col = torch.bmm( + x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view( + n, c, h, w) * (1.0 / self.normalization_factor) + x_dis = torch.bmm( + x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view( + n, c, h, w) * (1.0 / self.normalization_factor) + out = torch.cat([x_col, x_dis], 1) + out = self.proj(out) + out = resize( + out, + size=identity.shape[2:], + mode='bilinear', + align_corners=align_corners) + out = self.bottleneck(torch.cat((identity, out), dim=1)) + out = self.cls_seg(out) + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/psp_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/psp_head.py new file mode 100644 index 0000000000000000000000000000000000000000..0f7880f21319ac6035f604b0fc54f2237f7ed988 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/psp_head.py @@ -0,0 +1,101 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead + + +class PPM(nn.ModuleList): + """Pooling Pyramid Module used in PSPNet. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. + in_channels (int): Input channels. + channels (int): Channels after modules, before conv_seg. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + align_corners (bool): align_corners argument of F.interpolate. + """ + + def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, + act_cfg, align_corners): + super(PPM, self).__init__() + self.pool_scales = pool_scales + self.align_corners = align_corners + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + for pool_scale in pool_scales: + self.append( + nn.Sequential( + nn.AdaptiveAvgPool2d(pool_scale), + ConvModule( + self.in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg))) + + def forward(self, x): + """Forward function.""" + ppm_outs = [] + for ppm in self: + ppm_out = ppm(x) + upsampled_ppm_out = resize( + ppm_out, + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ppm_outs.append(upsampled_ppm_out) + return ppm_outs + + +@HEADS.register_module() +class PSPHead(BaseDecodeHead): + """Pyramid Scene Parsing Network. + + This head is the implementation of + `PSPNet `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module. Default: (1, 2, 3, 6). + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): + super(PSPHead, self).__init__(**kwargs) + assert isinstance(pool_scales, (list, tuple)) + self.pool_scales = pool_scales + self.psp_modules = PPM( + self.pool_scales, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.bottleneck = ConvModule( + self.in_channels + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + psp_outs = [x] + psp_outs.extend(self.psp_modules(x)) + psp_outs = torch.cat(psp_outs, dim=1) + output = self.bottleneck(psp_outs) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/sep_aspp_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/sep_aspp_head.py new file mode 100644 index 0000000000000000000000000000000000000000..26942ae135d172ec2dbb3775c0dc548c6976e729 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/sep_aspp_head.py @@ -0,0 +1,101 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule, DepthwiseSeparableConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .aspp_head import ASPPHead, ASPPModule + + +class DepthwiseSeparableASPPModule(ASPPModule): + """Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable + conv.""" + + def __init__(self, **kwargs): + super(DepthwiseSeparableASPPModule, self).__init__(**kwargs) + for i, dilation in enumerate(self.dilations): + if dilation > 1: + self[i] = DepthwiseSeparableConvModule( + self.in_channels, + self.channels, + 3, + dilation=dilation, + padding=dilation, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + +@HEADS.register_module() +class DepthwiseSeparableASPPHead(ASPPHead): + """Encoder-Decoder with Atrous Separable Convolution for Semantic Image + Segmentation. + + This head is the implementation of `DeepLabV3+ + `_. + + Args: + c1_in_channels (int): The input channels of c1 decoder. If is 0, + the no decoder will be used. + c1_channels (int): The intermediate channels of c1 decoder. + """ + + def __init__(self, c1_in_channels, c1_channels, **kwargs): + super(DepthwiseSeparableASPPHead, self).__init__(**kwargs) + assert c1_in_channels >= 0 + self.aspp_modules = DepthwiseSeparableASPPModule( + dilations=self.dilations, + in_channels=self.in_channels, + channels=self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + if c1_in_channels > 0: + self.c1_bottleneck = ConvModule( + c1_in_channels, + c1_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + self.c1_bottleneck = None + self.sep_bottleneck = nn.Sequential( + DepthwiseSeparableConvModule( + self.channels + c1_channels, + self.channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + DepthwiseSeparableConvModule( + self.channels, + self.channels, + 3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def forward(self, inputs): + """Forward function.""" + x = self._transform_inputs(inputs) + aspp_outs = [ + resize( + self.image_pool(x), + size=x.size()[2:], + mode='bilinear', + align_corners=self.align_corners) + ] + aspp_outs.extend(self.aspp_modules(x)) + aspp_outs = torch.cat(aspp_outs, dim=1) + output = self.bottleneck(aspp_outs) + if self.c1_bottleneck is not None: + c1_output = self.c1_bottleneck(inputs[0]) + output = resize( + input=output, + size=c1_output.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + output = torch.cat([output, c1_output], dim=1) + output = self.sep_bottleneck(output) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/sep_fcn_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/sep_fcn_head.py new file mode 100644 index 0000000000000000000000000000000000000000..fabb624a530098e44ed1d9a9a7762addeaa126d6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/sep_fcn_head.py @@ -0,0 +1,51 @@ +from custom_mmpkg.custom_mmcv.cnn import DepthwiseSeparableConvModule + +from ..builder import HEADS +from .fcn_head import FCNHead + + +@HEADS.register_module() +class DepthwiseSeparableFCNHead(FCNHead): + """Depthwise-Separable Fully Convolutional Network for Semantic + Segmentation. + + This head is implemented according to Fast-SCNN paper. + Args: + in_channels(int): Number of output channels of FFM. + channels(int): Number of middle-stage channels in the decode head. + concat_input(bool): Whether to concatenate original decode input into + the result of several consecutive convolution layers. + Default: True. + num_classes(int): Used to determine the dimension of + final prediction tensor. + in_index(int): Correspond with 'out_indices' in FastSCNN backbone. + norm_cfg (dict | None): Config of norm layers. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + loss_decode(dict): Config of loss type and some + relevant additional options. + """ + + def __init__(self, **kwargs): + super(DepthwiseSeparableFCNHead, self).__init__(**kwargs) + self.convs[0] = DepthwiseSeparableConvModule( + self.in_channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg) + for i in range(1, self.num_convs): + self.convs[i] = DepthwiseSeparableConvModule( + self.channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg) + + if self.concat_input: + self.conv_cat = DepthwiseSeparableConvModule( + self.in_channels + self.channels, + self.channels, + kernel_size=self.kernel_size, + padding=self.kernel_size // 2, + norm_cfg=self.norm_cfg) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/uper_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/uper_head.py new file mode 100644 index 0000000000000000000000000000000000000000..d4990010074568484b8ea768bbca8e43d407659a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/decode_heads/uper_head.py @@ -0,0 +1,126 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from custom_mmpkg.custom_mmseg.ops import resize +from ..builder import HEADS +from .decode_head import BaseDecodeHead +from .psp_head import PPM + + +@HEADS.register_module() +class UPerHead(BaseDecodeHead): + """Unified Perceptual Parsing for Scene Understanding. + + This head is the implementation of `UPerNet + `_. + + Args: + pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid + Module applied on the last feature. Default: (1, 2, 3, 6). + """ + + def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): + super(UPerHead, self).__init__( + input_transform='multiple_select', **kwargs) + # PSP Module + self.psp_modules = PPM( + pool_scales, + self.in_channels[-1], + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + align_corners=self.align_corners) + self.bottleneck = ConvModule( + self.in_channels[-1] + len(pool_scales) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + # FPN Module + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + for in_channels in self.in_channels[:-1]: # skip the top layer + l_conv = ConvModule( + in_channels, + self.channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + inplace=False) + fpn_conv = ConvModule( + self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + self.fpn_bottleneck = ConvModule( + len(self.in_channels) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def psp_forward(self, inputs): + """Forward function of PSP module.""" + x = inputs[-1] + psp_outs = [x] + psp_outs.extend(self.psp_modules(x)) + psp_outs = torch.cat(psp_outs, dim=1) + output = self.bottleneck(psp_outs) + + return output + + def forward(self, inputs): + """Forward function.""" + + inputs = self._transform_inputs(inputs) + + # build laterals + laterals = [ + lateral_conv(inputs[i]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + laterals.append(self.psp_forward(inputs)) + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += resize( + laterals[i], + size=prev_shape, + mode='bilinear', + align_corners=self.align_corners) + + # build outputs + fpn_outs = [ + self.fpn_convs[i](laterals[i]) + for i in range(used_backbone_levels - 1) + ] + # append psp feature + fpn_outs.append(laterals[-1]) + + for i in range(used_backbone_levels - 1, 0, -1): + fpn_outs[i] = resize( + fpn_outs[i], + size=fpn_outs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) + fpn_outs = torch.cat(fpn_outs, dim=1) + output = self.fpn_bottleneck(fpn_outs) + output = self.cls_seg(output) + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..beca72045694273d63465bac2f27dbc6672271db --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/__init__.py @@ -0,0 +1,12 @@ +from .accuracy import Accuracy, accuracy +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy, mask_cross_entropy) +from .dice_loss import DiceLoss +from .lovasz_loss import LovaszLoss +from .utils import reduce_loss, weight_reduce_loss, weighted_loss + +__all__ = [ + 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', + 'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss', + 'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/accuracy.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..c0fd2e7e74a0f721c4a814c09d6e453e5956bb38 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/accuracy.py @@ -0,0 +1,78 @@ +import torch.nn as nn + + +def accuracy(pred, target, topk=1, thresh=None): + """Calculate accuracy according to the prediction and target. + + Args: + pred (torch.Tensor): The model prediction, shape (N, num_class, ...) + target (torch.Tensor): The target of each prediction, shape (N, , ...) + topk (int | tuple[int], optional): If the predictions in ``topk`` + matches the target, the predictions will be regarded as + correct ones. Defaults to 1. + thresh (float, optional): If not None, predictions with scores under + this threshold are considered incorrect. Default to None. + + Returns: + float | tuple[float]: If the input ``topk`` is a single integer, + the function will return a single float as accuracy. If + ``topk`` is a tuple containing multiple integers, the + function will return a tuple containing accuracies of + each ``topk`` number. + """ + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + if pred.size(0) == 0: + accu = [pred.new_tensor(0.) for i in range(len(topk))] + return accu[0] if return_single else accu + assert pred.ndim == target.ndim + 1 + assert pred.size(0) == target.size(0) + assert maxk <= pred.size(1), \ + f'maxk {maxk} exceeds pred dimension {pred.size(1)}' + pred_value, pred_label = pred.topk(maxk, dim=1) + # transpose to shape (maxk, N, ...) + pred_label = pred_label.transpose(0, 1) + correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) + if thresh is not None: + # Only prediction values larger than thresh are counted as correct + correct = correct & (pred_value > thresh).t() + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / target.numel())) + return res[0] if return_single else res + + +class Accuracy(nn.Module): + """Accuracy calculation module.""" + + def __init__(self, topk=(1, ), thresh=None): + """Module to calculate the accuracy. + + Args: + topk (tuple, optional): The criterion used to calculate the + accuracy. Defaults to (1,). + thresh (float, optional): If not None, predictions with scores + under this threshold are considered incorrect. Default to None. + """ + super().__init__() + self.topk = topk + self.thresh = thresh + + def forward(self, pred, target): + """Forward function to calculate accuracy. + + Args: + pred (torch.Tensor): Prediction of models. + target (torch.Tensor): Target for each prediction. + + Returns: + tuple[float]: The accuracies under different topk criterions. + """ + return accuracy(pred, target, self.topk, self.thresh) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/cross_entropy_loss.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..42c0790c98616bb69621deed55547fc04c7392ef --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/cross_entropy_loss.py @@ -0,0 +1,198 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import get_class_weight, weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=-100): + """The wrapper function for :func:`F.cross_entropy`""" + # class_weight is a manual rescaling weight given to each class. + # If given, has to be a Tensor of size C element-wise losses + loss = F.cross_entropy( + pred, + label, + weight=class_weight, + reduction='none', + ignore_index=ignore_index) + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): + """Expand onehot labels to match the size of prediction.""" + bin_labels = labels.new_zeros(target_shape) + valid_mask = (labels >= 0) & (labels != ignore_index) + inds = torch.nonzero(valid_mask, as_tuple=True) + + if inds[0].numel() > 0: + if labels.dim() == 3: + bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 + else: + bin_labels[inds[0], labels[valid_mask]] = 1 + + valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() + if label_weights is None: + bin_label_weights = valid_mask + else: + bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) + bin_label_weights *= valid_mask + + return bin_labels, bin_label_weights + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=255): + """Calculate the binary CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, 1). + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int | None): The label index to be ignored. Default: 255 + + Returns: + torch.Tensor: The calculated loss + """ + if pred.dim() != label.dim(): + assert (pred.dim() == 2 and label.dim() == 1) or ( + pred.dim() == 4 and label.dim() == 3), \ + 'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \ + 'H, W], label shape [N, H, W] are supported' + label, weight = _expand_onehot_labels(label, weight, pred.shape, + ignore_index) + + # weighted element-wise losses + if weight is not None: + weight = weight.float() + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), pos_weight=class_weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss( + loss, weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, + target, + label, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=None): + """Calculate the CrossEntropy loss for masks. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. + label (torch.Tensor): ``label`` indicates the class label of the mask' + corresponding object. This will be used to select the mask in the + of the class which the object belongs to when the mask prediction + if not class-agnostic. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (None): Placeholder, to be consistent with other loss. + Default: None. + + Returns: + torch.Tensor: The calculated loss + """ + assert ignore_index is None, 'BCE loss does not support ignore_index' + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, weight=class_weight, reduction='mean')[None] + + +@LOSSES.register_module() +class CrossEntropyLoss(nn.Module): + """CrossEntropyLoss. + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_mask (bool, optional): Whether to use mask cross entropy loss. + Defaults to False. + reduction (str, optional): . Defaults to 'mean'. + Options are "none", "mean" and "sum". + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + """ + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + class_weight=None, + loss_weight=1.0): + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = get_class_weight(class_weight) + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/dice_loss.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/dice_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..27a77b962d7d8b3079c7d6cd9db52280c6fb4970 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/dice_loss.py @@ -0,0 +1,119 @@ +"""Modified from https://github.com/LikeLy-Journey/SegmenTron/blob/master/ +segmentron/solver/loss.py (Apache-2.0 License)""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import get_class_weight, weighted_loss + + +@weighted_loss +def dice_loss(pred, + target, + valid_mask, + smooth=1, + exponent=2, + class_weight=None, + ignore_index=255): + assert pred.shape[0] == target.shape[0] + total_loss = 0 + num_classes = pred.shape[1] + for i in range(num_classes): + if i != ignore_index: + dice_loss = binary_dice_loss( + pred[:, i], + target[..., i], + valid_mask=valid_mask, + smooth=smooth, + exponent=exponent) + if class_weight is not None: + dice_loss *= class_weight[i] + total_loss += dice_loss + return total_loss / num_classes + + +@weighted_loss +def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): + assert pred.shape[0] == target.shape[0] + pred = pred.reshape(pred.shape[0], -1) + target = target.reshape(target.shape[0], -1) + valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) + + num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth + den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth + + return 1 - num / den + + +@LOSSES.register_module() +class DiceLoss(nn.Module): + """DiceLoss. + + This loss is proposed in `V-Net: Fully Convolutional Neural Networks for + Volumetric Medical Image Segmentation `_. + + Args: + loss_type (str, optional): Binary or multi-class loss. + Default: 'multi_class'. Options are "binary" and "multi_class". + smooth (float): A float number to smooth loss, and avoid NaN error. + Default: 1 + exponent (float): An float number to calculate denominator + value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Default to 1.0. + ignore_index (int | None): The label index to be ignored. Default: 255. + """ + + def __init__(self, + smooth=1, + exponent=2, + reduction='mean', + class_weight=None, + loss_weight=1.0, + ignore_index=255, + **kwards): + super(DiceLoss, self).__init__() + self.smooth = smooth + self.exponent = exponent + self.reduction = reduction + self.class_weight = get_class_weight(class_weight) + self.loss_weight = loss_weight + self.ignore_index = ignore_index + + def forward(self, + pred, + target, + avg_factor=None, + reduction_override=None, + **kwards): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = pred.new_tensor(self.class_weight) + else: + class_weight = None + + pred = F.softmax(pred, dim=1) + num_classes = pred.shape[1] + one_hot_target = F.one_hot( + torch.clamp(target.long(), 0, num_classes - 1), + num_classes=num_classes) + valid_mask = (target != self.ignore_index).long() + + loss = self.loss_weight * dice_loss( + pred, + one_hot_target, + valid_mask=valid_mask, + reduction=reduction, + avg_factor=avg_factor, + smooth=self.smooth, + exponent=self.exponent, + class_weight=class_weight, + ignore_index=self.ignore_index) + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/lovasz_loss.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/lovasz_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..e381378522673d41a4ce2b9b9d6d70b9b3102bb0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/lovasz_loss.py @@ -0,0 +1,303 @@ +"""Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor +ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim +Berman 2018 ESAT-PSI KU Leuven (MIT License)""" + +import custom_mmpkg.custom_mmcv as mmcv +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import LOSSES +from .utils import get_class_weight, weight_reduce_loss + + +def lovasz_grad(gt_sorted): + """Computes gradient of the Lovasz extension w.r.t sorted errors. + + See Alg. 1 in paper. + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def flatten_binary_logits(logits, labels, ignore_index=None): + """Flattens predictions in the batch (binary case) Remove labels equal to + 'ignore_index'.""" + logits = logits.view(-1) + labels = labels.view(-1) + if ignore_index is None: + return logits, labels + valid = (labels != ignore_index) + vlogits = logits[valid] + vlabels = labels[valid] + return vlogits, vlabels + + +def flatten_probs(probs, labels, ignore_index=None): + """Flattens predictions in the batch.""" + if probs.dim() == 3: + # assumes output of a sigmoid layer + B, H, W = probs.size() + probs = probs.view(B, 1, H, W) + B, C, H, W = probs.size() + probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C + labels = labels.view(-1) + if ignore_index is None: + return probs, labels + valid = (labels != ignore_index) + vprobs = probs[valid.nonzero().squeeze()] + vlabels = labels[valid] + return vprobs, vlabels + + +def lovasz_hinge_flat(logits, labels): + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): [P], logits at each prediction + (between -infty and +infty). + labels (torch.Tensor): [P], binary ground truth labels (0 or 1). + + Returns: + torch.Tensor: The calculated loss. + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * signs) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), grad) + return loss + + +def lovasz_hinge(logits, + labels, + classes='present', + per_image=False, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=255): + """Binary Lovasz hinge loss. + + Args: + logits (torch.Tensor): [B, H, W], logits at each pixel + (between -infty and +infty). + labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1). + classes (str | list[int], optional): Placeholder, to be consistent with + other loss. Default: None. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + class_weight (list[float], optional): Placeholder, to be consistent + with other loss. Default: None. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_image is True. + Default: None. + ignore_index (int | None): The label index to be ignored. Default: 255. + + Returns: + torch.Tensor: The calculated loss. + """ + if per_image: + loss = [ + lovasz_hinge_flat(*flatten_binary_logits( + logit.unsqueeze(0), label.unsqueeze(0), ignore_index)) + for logit, label in zip(logits, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_hinge_flat( + *flatten_binary_logits(logits, labels, ignore_index)) + return loss + + +def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None): + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): [P, C], class probabilities at each prediction + (between 0 and 1). + labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1). + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + class_weight (list[float], optional): The weight for each class. + Default: None. + + Returns: + torch.Tensor: The calculated loss. + """ + if probs.numel() == 0: + # only void pixels, the gradients should be 0 + return probs * 0. + C = probs.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + fg = (labels == c).float() # foreground for class c + if (classes == 'present' and fg.sum() == 0): + continue + if C == 1: + if len(classes) > 1: + raise ValueError('Sigmoid output possible only with 1 class') + class_pred = probs[:, 0] + else: + class_pred = probs[:, c] + errors = (fg - class_pred).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted)) + if class_weight is not None: + loss *= class_weight[c] + losses.append(loss) + return torch.stack(losses).mean() + + +def lovasz_softmax(probs, + labels, + classes='present', + per_image=False, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=255): + """Multi-class Lovasz-Softmax loss. + + Args: + probs (torch.Tensor): [B, C, H, W], class probabilities at each + prediction (between 0 and 1). + labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and + C - 1). + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + class_weight (list[float], optional): The weight for each class. + Default: None. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. This parameter only works when per_image is True. + Default: None. + ignore_index (int | None): The label index to be ignored. Default: 255. + + Returns: + torch.Tensor: The calculated loss. + """ + + if per_image: + loss = [ + lovasz_softmax_flat( + *flatten_probs( + prob.unsqueeze(0), label.unsqueeze(0), ignore_index), + classes=classes, + class_weight=class_weight) + for prob, label in zip(probs, labels) + ] + loss = weight_reduce_loss( + torch.stack(loss), None, reduction, avg_factor) + else: + loss = lovasz_softmax_flat( + *flatten_probs(probs, labels, ignore_index), + classes=classes, + class_weight=class_weight) + return loss + + +@LOSSES.register_module() +class LovaszLoss(nn.Module): + """LovaszLoss. + + This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate + for the optimization of the intersection-over-union measure in neural + networks `_. + + Args: + loss_type (str, optional): Binary or multi-class loss. + Default: 'multi_class'. Options are "binary" and "multi_class". + classes (str | list[int], optional): Classes chosen to calculate loss. + 'all' for all classes, 'present' for classes present in labels, or + a list of classes to average. Default: 'present'. + per_image (bool, optional): If per_image is True, compute the loss per + image instead of per batch. Default: False. + reduction (str, optional): The method used to reduce the loss. Options + are "none", "mean" and "sum". This parameter only works when + per_image is True. Default: 'mean'. + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + """ + + def __init__(self, + loss_type='multi_class', + classes='present', + per_image=False, + reduction='mean', + class_weight=None, + loss_weight=1.0): + super(LovaszLoss, self).__init__() + assert loss_type in ('binary', 'multi_class'), "loss_type should be \ + 'binary' or 'multi_class'." + + if loss_type == 'binary': + self.cls_criterion = lovasz_hinge + else: + self.cls_criterion = lovasz_softmax + assert classes in ('all', 'present') or mmcv.is_list_of(classes, int) + if not per_image: + assert reduction == 'none', "reduction should be 'none' when \ + per_image is False." + + self.classes = classes + self.per_image = per_image + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = get_class_weight(class_weight) + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + # if multi-class loss, transform logits to probs + if self.cls_criterion == lovasz_softmax: + cls_score = F.softmax(cls_score, dim=1) + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + self.classes, + self.per_image, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cdfbd436d2305f82af065a853e789d6fa37614cd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/losses/utils.py @@ -0,0 +1,121 @@ +import functools + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +import torch.nn.functional as F + + +def get_class_weight(class_weight): + """Get class weight for loss function. + + Args: + class_weight (list[float] | str | None): If class_weight is a str, + take it as a file name and read from it. + """ + if isinstance(class_weight, str): + # take it as a file path + if class_weight.endswith('.npy'): + class_weight = np.load(class_weight) + else: + # pkl, json or yaml + class_weight = mmcv.load(class_weight) + + return class_weight + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Avarage factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + assert weight.dim() == loss.dim() + if weight.dim() > 1: + assert weight.size(1) == 1 or weight.size(1) == loss.size(1) + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b9d3d5b3fe80247642d962edd6fb787537d01d6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/__init__.py @@ -0,0 +1,4 @@ +from .fpn import FPN +from .multilevel_neck import MultiLevelNeck + +__all__ = ['FPN', 'MultiLevelNeck'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/fpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..3c32cc5e44f92e7779ba1ba913c2482107e5900d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/fpn.py @@ -0,0 +1,212 @@ +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule, xavier_init + +from ..builder import NECKS + + +@NECKS.register_module() +class FPN(nn.Module): + """Feature Pyramid Network. + + This is an implementation of - Feature Pyramid Networks for Object + Detection (https://arxiv.org/abs/1612.03144) + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, its actual mode is specified by `extra_convs_on_inputs`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs + on the original feature from the backbone. If True, + it is equivalent to `add_extra_convs='on_input'`. If False, it is + equivalent to set `add_extra_convs='on_output'`. Default to True. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (str): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(mode='nearest')` + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest')): + super(FPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + if extra_convs_on_inputs: + # For compatibility with previous release + # TODO: deprecate `extra_convs_on_inputs` + self.add_extra_convs = 'on_input' + else: + self.add_extra_convs = 'on_output' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/multilevel_neck.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/multilevel_neck.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5e8563f7d7b27944fbad2c247f789967433bba --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/necks/multilevel_neck.py @@ -0,0 +1,70 @@ +import torch.nn as nn +import torch.nn.functional as F +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from ..builder import NECKS + + +@NECKS.register_module() +class MultiLevelNeck(nn.Module): + """MultiLevelNeck. + + A neck structure connect vit backbone and decoder_heads. + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale). + scales (List[int]): Scale factors for each input feature map. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer in ConvModule. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + scales=[0.5, 1, 2, 4], + norm_cfg=None, + act_cfg=None): + super(MultiLevelNeck, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.scales = scales + self.num_outs = len(scales) + self.lateral_convs = nn.ModuleList() + self.convs = nn.ModuleList() + for in_channel in in_channels: + self.lateral_convs.append( + ConvModule( + in_channel, + out_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + for _ in range(self.num_outs): + self.convs.append( + ConvModule( + out_channels, + out_channels, + kernel_size=3, + padding=1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + print(inputs[0].shape) + inputs = [ + lateral_conv(inputs[i]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + # for len(inputs) not equal to self.num_outs + if len(inputs) == 1: + inputs = [inputs[0] for _ in range(self.num_outs)] + outs = [] + for i in range(self.num_outs): + x_resize = F.interpolate( + inputs[i], scale_factor=self.scales[i], mode='bilinear') + outs.append(self.convs[i](x_resize)) + return tuple(outs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dca2f09405330743c476e190896bee39c45498ea --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/__init__.py @@ -0,0 +1,5 @@ +from .base import BaseSegmentor +from .cascade_encoder_decoder import CascadeEncoderDecoder +from .encoder_decoder import EncoderDecoder + +__all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/base.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/base.py new file mode 100644 index 0000000000000000000000000000000000000000..5fd073f4a9d7713f02b107b7ad541384c3d27d6b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/base.py @@ -0,0 +1,273 @@ +import logging +import warnings +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +import custom_mmpkg.custom_mmcv as mmcv +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +from custom_mmpkg.custom_mmcv.runner import auto_fp16 + + +class BaseSegmentor(nn.Module): + """Base class for segmentors.""" + + __metaclass__ = ABCMeta + + def __init__(self): + super(BaseSegmentor, self).__init__() + self.fp16_enabled = False + + @property + def with_neck(self): + """bool: whether the segmentor has neck""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_auxiliary_head(self): + """bool: whether the segmentor has auxiliary head""" + return hasattr(self, + 'auxiliary_head') and self.auxiliary_head is not None + + @property + def with_decode_head(self): + """bool: whether the segmentor has decode head""" + return hasattr(self, 'decode_head') and self.decode_head is not None + + @abstractmethod + def extract_feat(self, imgs): + """Placeholder for extract features from images.""" + pass + + @abstractmethod + def encode_decode(self, img, img_metas): + """Placeholder for encode images with backbone and decode into a + semantic segmentation map of the same size as input.""" + pass + + @abstractmethod + def forward_train(self, imgs, img_metas, **kwargs): + """Placeholder for Forward function for training.""" + pass + + @abstractmethod + def simple_test(self, img, img_meta, **kwargs): + """Placeholder for single image test.""" + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + """Placeholder for augmentation test.""" + pass + + def init_weights(self, pretrained=None): + """Initialize the weights in segmentor. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + if pretrained is not None: + logger = logging.getLogger() + logger.info(f'load model from: {pretrained}') + + def forward_test(self, imgs, img_metas, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got ' + f'{type(var)}') + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(imgs)}) != ' + f'num of image meta ({len(img_metas)})') + # all images in the same aug batch all of the same ori_shape and pad + # shape + for img_meta in img_metas: + ori_shapes = [_['ori_shape'] for _ in img_meta] + assert all(shape == ori_shapes[0] for shape in ori_shapes) + img_shapes = [_['img_shape'] for _ in img_meta] + assert all(shape == img_shapes[0] for shape in img_shapes) + pad_shapes = [_['pad_shape'] for _ in img_meta] + assert all(shape == pad_shapes[0] for shape in pad_shapes) + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + return self.aug_test(imgs, img_metas, **kwargs) + + @auto_fp16(apply_to=('img', )) + def forward(self, img, img_metas, return_loss=True, **kwargs): + """Calls either :func:`forward_train` or :func:`forward_test` depending + on whether ``return_loss`` is ``True``. + + Note this setting will change the expected inputs. When + ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor + and List[dict]), and when ``resturn_loss=False``, img and img_meta + should be double nested (i.e. List[Tensor], List[List[dict]]), with + the outer list indicating test time augmentations. + """ + if return_loss: + return self.forward_train(img, img_metas, **kwargs) + else: + return self.forward_test(img, img_metas, **kwargs) + + def train_step(self, data_batch, optimizer, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, + ``num_samples``. + ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + ``log_vars`` contains all the variables to be sent to the + logger. + ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + losses = self(**data_batch) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(data_batch['img_metas'])) + + return outputs + + def val_step(self, data_batch, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + output = self(**data_batch, **kwargs) + return output + + @staticmethod + def _parse_losses(losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary information. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor + which may be a weighted sum of all losses, log_vars contains + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def show_result(self, + img, + result, + palette=None, + win_name='', + show=False, + wait_time=0, + out_file=None, + opacity=0.5): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (Tensor): The semantic segmentation results to draw over + `img`. + palette (list[list[int]]] | np.ndarray | None): The palette of + segmentation map. If None is given, random palette will be + generated. Default: None + win_name (str): The window name. + wait_time (int): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + img = mmcv.imread(img) + img = img.copy() + seg = result[0] + if palette is None: + if self.PALETTE is None: + palette = np.random.randint( + 0, 255, size=(len(self.CLASSES), 3)) + else: + palette = self.PALETTE + palette = np.array(palette) + assert palette.shape[0] == len(self.CLASSES) + assert palette.shape[1] == 3 + assert len(palette.shape) == 2 + assert 0 < opacity <= 1.0 + color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) + for label, color in enumerate(palette): + color_seg[seg == label, :] = color + # convert to BGR + color_seg = color_seg[..., ::-1] + + img = img * (1 - opacity) + color_seg * opacity + img = img.astype(np.uint8) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + + if show: + mmcv.imshow(img, win_name, wait_time) + if out_file is not None: + mmcv.imwrite(img, out_file) + + if not (show or out_file): + warnings.warn('show==False and out_file is not specified, only ' + 'result image will be returned') + return img diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/cascade_encoder_decoder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/cascade_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..cdece0ac89713a0cead10d98959bad2e2d05c4e7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/cascade_encoder_decoder.py @@ -0,0 +1,98 @@ +from torch import nn + +from custom_mmpkg.custom_mmseg.core import add_prefix +from custom_mmpkg.custom_mmseg.ops import resize +from .. import builder +from ..builder import SEGMENTORS +from .encoder_decoder import EncoderDecoder + + +@SEGMENTORS.register_module() +class CascadeEncoderDecoder(EncoderDecoder): + """Cascade Encoder Decoder segmentors. + + CascadeEncoderDecoder almost the same as EncoderDecoder, while decoders of + CascadeEncoderDecoder are cascaded. The output of previous decoder_head + will be the input of next decoder_head. + """ + + def __init__(self, + num_stages, + backbone, + decode_head, + neck=None, + auxiliary_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + self.num_stages = num_stages + super(CascadeEncoderDecoder, self).__init__( + backbone=backbone, + decode_head=decode_head, + neck=neck, + auxiliary_head=auxiliary_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) + + def _init_decode_head(self, decode_head): + """Initialize ``decode_head``""" + assert isinstance(decode_head, list) + assert len(decode_head) == self.num_stages + self.decode_head = nn.ModuleList() + for i in range(self.num_stages): + self.decode_head.append(builder.build_head(decode_head[i])) + self.align_corners = self.decode_head[-1].align_corners + self.num_classes = self.decode_head[-1].num_classes + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone and heads. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + self.backbone.init_weights(pretrained=pretrained) + for i in range(self.num_stages): + self.decode_head[i].init_weights() + if self.with_auxiliary_head: + if isinstance(self.auxiliary_head, nn.ModuleList): + for aux_head in self.auxiliary_head: + aux_head.init_weights() + else: + self.auxiliary_head.init_weights() + + def encode_decode(self, img, img_metas): + """Encode images with backbone and decode into a semantic segmentation + map of the same size as input.""" + x = self.extract_feat(img) + out = self.decode_head[0].forward_test(x, img_metas, self.test_cfg) + for i in range(1, self.num_stages): + out = self.decode_head[i].forward_test(x, out, img_metas, + self.test_cfg) + out = resize( + input=out, + size=img.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + return out + + def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for decode head in + training.""" + losses = dict() + + loss_decode = self.decode_head[0].forward_train( + x, img_metas, gt_semantic_seg, self.train_cfg) + + losses.update(add_prefix(loss_decode, 'decode_0')) + + for i in range(1, self.num_stages): + # forward test again, maybe unnecessary for most methods. + prev_outputs = self.decode_head[i - 1].forward_test( + x, img_metas, self.test_cfg) + loss_decode = self.decode_head[i].forward_train( + x, prev_outputs, img_metas, gt_semantic_seg, self.train_cfg) + losses.update(add_prefix(loss_decode, f'decode_{i}')) + + return losses diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/encoder_decoder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..b8e42be53aba4b492efec32877bfe4c6a7a1e2aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/segmentors/encoder_decoder.py @@ -0,0 +1,298 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_mmpkg.custom_mmseg.core import add_prefix +from custom_mmpkg.custom_mmseg.ops import resize +from .. import builder +from ..builder import SEGMENTORS +from .base import BaseSegmentor + + +@SEGMENTORS.register_module() +class EncoderDecoder(BaseSegmentor): + """Encoder Decoder segmentors. + + EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. + Note that auxiliary_head is only used for deep supervision during training, + which could be dumped during inference. + """ + + def __init__(self, + backbone, + decode_head, + neck=None, + auxiliary_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(EncoderDecoder, self).__init__() + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + self._init_decode_head(decode_head) + self._init_auxiliary_head(auxiliary_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.init_weights(pretrained=pretrained) + + assert self.with_decode_head + + def _init_decode_head(self, decode_head): + """Initialize ``decode_head``""" + self.decode_head = builder.build_head(decode_head) + self.align_corners = self.decode_head.align_corners + self.num_classes = self.decode_head.num_classes + + def _init_auxiliary_head(self, auxiliary_head): + """Initialize ``auxiliary_head``""" + if auxiliary_head is not None: + if isinstance(auxiliary_head, list): + self.auxiliary_head = nn.ModuleList() + for head_cfg in auxiliary_head: + self.auxiliary_head.append(builder.build_head(head_cfg)) + else: + self.auxiliary_head = builder.build_head(auxiliary_head) + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone and heads. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + + super(EncoderDecoder, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + self.decode_head.init_weights() + if self.with_auxiliary_head: + if isinstance(self.auxiliary_head, nn.ModuleList): + for aux_head in self.auxiliary_head: + aux_head.init_weights() + else: + self.auxiliary_head.init_weights() + + def extract_feat(self, img): + """Extract features from images.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def encode_decode(self, img, img_metas): + """Encode images with backbone and decode into a semantic segmentation + map of the same size as input.""" + x = self.extract_feat(img) + out = self._decode_head_forward_test(x, img_metas) + out = resize( + input=out, + size=img.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + return out + + def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for decode head in + training.""" + losses = dict() + loss_decode = self.decode_head.forward_train(x, img_metas, + gt_semantic_seg, + self.train_cfg) + + losses.update(add_prefix(loss_decode, 'decode')) + return losses + + def _decode_head_forward_test(self, x, img_metas): + """Run forward function and calculate loss for decode head in + inference.""" + seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg) + return seg_logits + + def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for auxiliary head in + training.""" + losses = dict() + if isinstance(self.auxiliary_head, nn.ModuleList): + for idx, aux_head in enumerate(self.auxiliary_head): + loss_aux = aux_head.forward_train(x, img_metas, + gt_semantic_seg, + self.train_cfg) + losses.update(add_prefix(loss_aux, f'aux_{idx}')) + else: + loss_aux = self.auxiliary_head.forward_train( + x, img_metas, gt_semantic_seg, self.train_cfg) + losses.update(add_prefix(loss_aux, 'aux')) + + return losses + + def forward_dummy(self, img): + """Dummy forward function.""" + seg_logit = self.encode_decode(img, None) + + return seg_logit + + def forward_train(self, img, img_metas, gt_semantic_seg): + """Forward function for training. + + Args: + img (Tensor): Input images. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + x = self.extract_feat(img) + + losses = dict() + + loss_decode = self._decode_head_forward_train(x, img_metas, + gt_semantic_seg) + losses.update(loss_decode) + + if self.with_auxiliary_head: + loss_aux = self._auxiliary_head_forward_train( + x, img_metas, gt_semantic_seg) + losses.update(loss_aux) + + return losses + + # TODO refactor + def slide_inference(self, img, img_meta, rescale): + """Inference by sliding-window with overlap. + + If h_crop > h_img or w_crop > w_img, the small patch will be used to + decode without padding. + """ + + h_stride, w_stride = self.test_cfg.stride + h_crop, w_crop = self.test_cfg.crop_size + batch_size, _, h_img, w_img = img.size() + num_classes = self.num_classes + h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 + w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 + preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) + count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) + for h_idx in range(h_grids): + for w_idx in range(w_grids): + y1 = h_idx * h_stride + x1 = w_idx * w_stride + y2 = min(y1 + h_crop, h_img) + x2 = min(x1 + w_crop, w_img) + y1 = max(y2 - h_crop, 0) + x1 = max(x2 - w_crop, 0) + crop_img = img[:, :, y1:y2, x1:x2] + crop_seg_logit = self.encode_decode(crop_img, img_meta) + preds += F.pad(crop_seg_logit, + (int(x1), int(preds.shape[3] - x2), int(y1), + int(preds.shape[2] - y2))) + + count_mat[:, :, y1:y2, x1:x2] += 1 + assert (count_mat == 0).sum() == 0 + if torch.onnx.is_in_onnx_export(): + # cast count_mat to constant while exporting to ONNX + count_mat = torch.from_numpy( + count_mat.cpu().detach().numpy()).to(device=img.device) + preds = preds / count_mat + if rescale: + preds = resize( + preds, + size=img_meta[0]['ori_shape'][:2], + mode='bilinear', + align_corners=self.align_corners, + warning=False) + return preds + + def whole_inference(self, img, img_meta, rescale): + """Inference with full image.""" + + seg_logit = self.encode_decode(img, img_meta) + if rescale: + # support dynamic shape for onnx + if torch.onnx.is_in_onnx_export(): + size = img.shape[2:] + else: + size = img_meta[0]['ori_shape'][:2] + seg_logit = resize( + seg_logit, + size=size, + mode='bilinear', + align_corners=self.align_corners, + warning=False) + + return seg_logit + + def inference(self, img, img_meta, rescale): + """Inference with slide/whole style. + + Args: + img (Tensor): The input image of shape (N, 3, H, W). + img_meta (dict): Image info dict where each dict has: 'img_shape', + 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + rescale (bool): Whether rescale back to original shape. + + Returns: + Tensor: The output segmentation map. + """ + + assert self.test_cfg.mode in ['slide', 'whole'] + ori_shape = img_meta[0]['ori_shape'] + assert all(_['ori_shape'] == ori_shape for _ in img_meta) + if self.test_cfg.mode == 'slide': + seg_logit = self.slide_inference(img, img_meta, rescale) + else: + seg_logit = self.whole_inference(img, img_meta, rescale) + output = F.softmax(seg_logit, dim=1) + flip = img_meta[0]['flip'] + if flip: + flip_direction = img_meta[0]['flip_direction'] + assert flip_direction in ['horizontal', 'vertical'] + if flip_direction == 'horizontal': + output = output.flip(dims=(3, )) + elif flip_direction == 'vertical': + output = output.flip(dims=(2, )) + + return output + + def simple_test(self, img, img_meta, rescale=True): + """Simple test with single image.""" + seg_logit = self.inference(img, img_meta, rescale) + seg_pred = seg_logit.argmax(dim=1) + if torch.onnx.is_in_onnx_export(): + # our inference backend only support 4D output + seg_pred = seg_pred.unsqueeze(0) + return seg_pred + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred + + def aug_test(self, imgs, img_metas, rescale=True): + """Test with augmentations. + + Only rescale=True is supported. + """ + # aug_test rescale all imgs back to ori_shape for now + assert rescale + # to save memory, we get augmented seg logit inplace + seg_logit = self.inference(imgs[0], img_metas[0], rescale) + for i in range(1, len(imgs)): + cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale) + seg_logit += cur_seg_logit + seg_logit /= len(imgs) + seg_pred = seg_logit.argmax(dim=1) + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d3bdd349b9f2ae499a2fcb2ac1d2e3c77befebe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/__init__.py @@ -0,0 +1,13 @@ +from .drop import DropPath +from .inverted_residual import InvertedResidual, InvertedResidualV3 +from .make_divisible import make_divisible +from .res_layer import ResLayer +from .se_layer import SELayer +from .self_attention_block import SelfAttentionBlock +from .up_conv_block import UpConvBlock +from .weight_init import trunc_normal_ + +__all__ = [ + 'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual', + 'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'DropPath', 'trunc_normal_' +] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/drop.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/drop.py new file mode 100644 index 0000000000000000000000000000000000000000..4520b0ff407d2a95a864086bdbca0065f222aa63 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/drop.py @@ -0,0 +1,31 @@ +"""Modified from https://github.com/rwightman/pytorch-image- +models/blob/master/timm/models/layers/drop.py.""" + +import torch +from torch import nn + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + Args: + drop_prob (float): Drop rate for paths of model. Dropout rate has + to be between 0 and 1. Default: 0. + """ + + def __init__(self, drop_prob=0.): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.keep_prob = 1 - drop_prob + + def forward(self, x): + if self.drop_prob == 0. or not self.training: + return x + shape = (x.shape[0], ) + (1, ) * ( + x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = self.keep_prob + torch.rand( + shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(self.keep_prob) * random_tensor + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/inverted_residual.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/inverted_residual.py new file mode 100644 index 0000000000000000000000000000000000000000..0fb93391f83c15c91cca833a296b922380607e66 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/inverted_residual.py @@ -0,0 +1,208 @@ +from custom_mmpkg.custom_mmcv.cnn import ConvModule +from torch import nn +from torch.utils import checkpoint as cp + +from .se_layer import SELayer + + +class InvertedResidual(nn.Module): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): Adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + dilation (int): Dilation rate of depthwise conv. Default: 1 + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + dilation=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +class InvertedResidualV3(nn.Module): + """Inverted Residual Block for MobileNetV3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Default: 3. + stride (int): The stride of the depthwise convolution. Default: 1. + se_cfg (dict): Config dict for se layer. Default: None, which means no + se layer. + with_expand_conv (bool): Use expand conv or not. If set False, + mid_channels must be the same with in_channels. Default: True. + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_expand_conv=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super(InvertedResidualV3, self).__init__() + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.with_se = se_cfg is not None + self.with_expand_conv = with_expand_conv + + if self.with_se: + assert isinstance(se_cfg, dict) + if not self.with_expand_conv: + assert mid_channels == in_channels + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=dict( + type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + out + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/make_divisible.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/make_divisible.py new file mode 100644 index 0000000000000000000000000000000000000000..75ad756052529f52fe83bb95dd1f0ecfc9a13078 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/make_divisible.py @@ -0,0 +1,27 @@ +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number to the nearest value that can be + divisible by the divisor. It is taken from the original tf repo. It ensures + that all layers have a channel number that is divisible by divisor. It can + be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel number to + the original channel number. Default: 0.9. + + Returns: + int: The modified output channel number. + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/res_layer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/res_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..1379e62e7c6e591fae25e57d548c5f735e3ad33a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/res_layer.py @@ -0,0 +1,94 @@ +from custom_mmpkg.custom_mmcv.cnn import build_conv_layer, build_norm_layer +from torch import nn as nn + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + multi_grid (int | None): Multi grid dilation rates of last + stage. Default: None + contract_dilation (bool): Whether contract first dilation of each layer + Default: False + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + dilation=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + multi_grid=None, + contract_dilation=False, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if multi_grid is None: + if dilation > 1 and contract_dilation: + first_dilation = dilation // 2 + else: + first_dilation = dilation + else: + first_dilation = multi_grid[0] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=first_dilation, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for i in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation if multi_grid is None else multi_grid[i], + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/se_layer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/se_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec17f8d6713a441de2400186ded50ff651821ca --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/se_layer.py @@ -0,0 +1,57 @@ +import custom_mmpkg.custom_mmcv as mmcv +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule + +from .make_divisible import make_divisible + + +class SELayer(nn.Module): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configured + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configured by the first dict and the + second activation layer will be configured by the second dict. + Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, + divisor=6.0)). + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), + dict(type='HSigmoid', bias=3.0, divisor=6.0))): + super(SELayer, self).__init__() + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=make_divisible(channels // ratio, 8), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=make_divisible(channels // ratio, 8), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/self_attention_block.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/self_attention_block.py new file mode 100644 index 0000000000000000000000000000000000000000..ad20ca6cf14b4dce040f350dbdd0fee6ce5ed9cf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/self_attention_block.py @@ -0,0 +1,159 @@ +import torch +from custom_mmpkg.custom_mmcv.cnn import ConvModule, constant_init +from torch import nn as nn +from torch.nn import functional as F + + +class SelfAttentionBlock(nn.Module): + """General self-attention block/non-local block. + + Please refer to https://arxiv.org/abs/1706.03762 for details about key, + query and value. + + Args: + key_in_channels (int): Input channels of key feature. + query_in_channels (int): Input channels of query feature. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + share_key_query (bool): Whether share projection weight between key + and query projection. + query_downsample (nn.Module): Query downsample module. + key_downsample (nn.Module): Key downsample module. + key_query_num_convs (int): Number of convs for key/query projection. + value_num_convs (int): Number of convs for value projection. + matmul_norm (bool): Whether normalize attention map with sqrt of + channels + with_out (bool): Whether use out projection. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict|None): Config of activation layers. + """ + + def __init__(self, key_in_channels, query_in_channels, channels, + out_channels, share_key_query, query_downsample, + key_downsample, key_query_num_convs, value_out_num_convs, + key_query_norm, value_out_norm, matmul_norm, with_out, + conv_cfg, norm_cfg, act_cfg): + super(SelfAttentionBlock, self).__init__() + if share_key_query: + assert key_in_channels == query_in_channels + self.key_in_channels = key_in_channels + self.query_in_channels = query_in_channels + self.out_channels = out_channels + self.channels = channels + self.share_key_query = share_key_query + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.key_project = self.build_project( + key_in_channels, + channels, + num_convs=key_query_num_convs, + use_conv_module=key_query_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if share_key_query: + self.query_project = self.key_project + else: + self.query_project = self.build_project( + query_in_channels, + channels, + num_convs=key_query_num_convs, + use_conv_module=key_query_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.value_project = self.build_project( + key_in_channels, + channels if with_out else out_channels, + num_convs=value_out_num_convs, + use_conv_module=value_out_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if with_out: + self.out_project = self.build_project( + channels, + out_channels, + num_convs=value_out_num_convs, + use_conv_module=value_out_norm, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.out_project = None + + self.query_downsample = query_downsample + self.key_downsample = key_downsample + self.matmul_norm = matmul_norm + + self.init_weights() + + def init_weights(self): + """Initialize weight of later layer.""" + if self.out_project is not None: + if not isinstance(self.out_project, ConvModule): + constant_init(self.out_project, 0) + + def build_project(self, in_channels, channels, num_convs, use_conv_module, + conv_cfg, norm_cfg, act_cfg): + """Build projection layer for key/query/value/out.""" + if use_conv_module: + convs = [ + ConvModule( + in_channels, + channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + ] + for _ in range(num_convs - 1): + convs.append( + ConvModule( + channels, + channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + else: + convs = [nn.Conv2d(in_channels, channels, 1)] + for _ in range(num_convs - 1): + convs.append(nn.Conv2d(channels, channels, 1)) + if len(convs) > 1: + convs = nn.Sequential(*convs) + else: + convs = convs[0] + return convs + + def forward(self, query_feats, key_feats): + """Forward function.""" + batch_size = query_feats.size(0) + query = self.query_project(query_feats) + if self.query_downsample is not None: + query = self.query_downsample(query) + query = query.reshape(*query.shape[:2], -1) + query = query.permute(0, 2, 1).contiguous() + + key = self.key_project(key_feats) + value = self.value_project(key_feats) + if self.key_downsample is not None: + key = self.key_downsample(key) + value = self.key_downsample(value) + key = key.reshape(*key.shape[:2], -1) + value = value.reshape(*value.shape[:2], -1) + value = value.permute(0, 2, 1).contiguous() + + sim_map = torch.matmul(query, key) + if self.matmul_norm: + sim_map = (self.channels**-.5) * sim_map + sim_map = F.softmax(sim_map, dim=-1) + + context = torch.matmul(sim_map, value) + context = context.permute(0, 2, 1).contiguous() + context = context.reshape(batch_size, -1, *query_feats.shape[2:]) + if self.out_project is not None: + context = self.out_project(context) + return context diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/up_conv_block.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/up_conv_block.py new file mode 100644 index 0000000000000000000000000000000000000000..a4320e261db00a8bd0ba2578bcf2fdde952d0270 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/up_conv_block.py @@ -0,0 +1,101 @@ +import torch +import torch.nn as nn +from custom_mmpkg.custom_mmcv.cnn import ConvModule, build_upsample_layer + + +class UpConvBlock(nn.Module): + """Upsample convolution block in decoder for UNet. + + This upsample convolution block consists of one upsample module + followed by one convolution block. The upsample module expands the + high-level low-resolution feature map and the convolution block fuses + the upsampled high-level low-resolution feature map and the low-level + high-resolution feature map from encoder. + + Args: + conv_block (nn.Sequential): Sequential of convolutional layers. + in_channels (int): Number of input channels of the high-level + skip_channels (int): Number of input channels of the low-level + high-resolution feature map from encoder. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers in the conv_block. + Default: 2. + stride (int): Stride of convolutional layer in conv_block. Default: 1. + dilation (int): Dilation rate of convolutional layer in conv_block. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='InterpConv'). If the size of + high-level feature map is the same as that of skip feature map + (low-level feature map from encoder), it does not need upsample the + high-level feature map and the upsample_cfg is None. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + conv_block, + in_channels, + skip_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='InterpConv'), + dcn=None, + plugins=None): + super(UpConvBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.conv_block = conv_block( + in_channels=2 * skip_channels, + out_channels=out_channels, + num_convs=num_convs, + stride=stride, + dilation=dilation, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None) + if upsample_cfg is not None: + self.upsample = build_upsample_layer( + cfg=upsample_cfg, + in_channels=in_channels, + out_channels=skip_channels, + with_cp=with_cp, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.upsample = ConvModule( + in_channels, + skip_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, skip, x): + """Forward function.""" + + x = self.upsample(x) + out = torch.cat([skip, x], dim=1) + out = self.conv_block(out) + + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/weight_init.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/weight_init.py new file mode 100644 index 0000000000000000000000000000000000000000..38141ba3d61f64ddfc0a31574b4648cbad96d7dd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/models/utils/weight_init.py @@ -0,0 +1,62 @@ +"""Modified from https://github.com/rwightman/pytorch-image- +models/blob/master/timm/models/layers/drop.py.""" + +import math +import warnings + +import torch + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + """Reference: https://people.sc.fsu.edu/~jburkardt/presentations + /truncated_normal.pdf""" + + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' + 'The distribution of values may be incorrect.', + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + lower_bound = norm_cdf((a - mean) / std) + upper_bound = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * lower_bound - 1, 2 * upper_bound - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor (``torch.Tensor``): an n-dimensional `torch.Tensor` + mean (float): the mean of the normal distribution + std (float): the standard deviation of the normal distribution + a (float): the minimum cutoff value + b (float): the maximum cutoff value + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bec51c75b9363a9a19e9fb5c35f4e7dbd6f7751c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/__init__.py @@ -0,0 +1,4 @@ +from .encoding import Encoding +from .wrappers import Upsample, resize + +__all__ = ['Upsample', 'resize', 'Encoding'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/encoding.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb3629a6426550b8e4c537ee1ff4341893e489e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/encoding.py @@ -0,0 +1,74 @@ +import torch +from torch import nn +from torch.nn import functional as F + + +class Encoding(nn.Module): + """Encoding Layer: a learnable residual encoder. + + Input is of shape (batch_size, channels, height, width). + Output is of shape (batch_size, num_codes, channels). + + Args: + channels: dimension of the features or feature channels + num_codes: number of code words + """ + + def __init__(self, channels, num_codes): + super(Encoding, self).__init__() + # init codewords and smoothing factor + self.channels, self.num_codes = channels, num_codes + std = 1. / ((num_codes * channels)**0.5) + # [num_codes, channels] + self.codewords = nn.Parameter( + torch.empty(num_codes, channels, + dtype=torch.float).uniform_(-std, std), + requires_grad=True) + # [num_codes] + self.scale = nn.Parameter( + torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0), + requires_grad=True) + + @staticmethod + def scaled_l2(x, codewords, scale): + num_codes, channels = codewords.size() + batch_size = x.size(0) + reshaped_scale = scale.view((1, 1, num_codes)) + expanded_x = x.unsqueeze(2).expand( + (batch_size, x.size(1), num_codes, channels)) + reshaped_codewords = codewords.view((1, 1, num_codes, channels)) + + scaled_l2_norm = reshaped_scale * ( + expanded_x - reshaped_codewords).pow(2).sum(dim=3) + return scaled_l2_norm + + @staticmethod + def aggregate(assignment_weights, x, codewords): + num_codes, channels = codewords.size() + reshaped_codewords = codewords.view((1, 1, num_codes, channels)) + batch_size = x.size(0) + + expanded_x = x.unsqueeze(2).expand( + (batch_size, x.size(1), num_codes, channels)) + encoded_feat = (assignment_weights.unsqueeze(3) * + (expanded_x - reshaped_codewords)).sum(dim=1) + return encoded_feat + + def forward(self, x): + assert x.dim() == 4 and x.size(1) == self.channels + # [batch_size, channels, height, width] + batch_size = x.size(0) + # [batch_size, height x width, channels] + x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() + # assignment_weights: [batch_size, channels, num_codes] + assignment_weights = F.softmax( + self.scaled_l2(x, self.codewords, self.scale), dim=2) + # aggregate + encoded_feat = self.aggregate(assignment_weights, x, self.codewords) + return encoded_feat + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \ + f'x{self.channels})' + return repr_str diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/wrappers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..0ed9a0cb8d7c0e0ec2748dd89c652756653cac78 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/ops/wrappers.py @@ -0,0 +1,50 @@ +import warnings + +import torch.nn as nn +import torch.nn.functional as F + + +def resize(input, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None, + warning=True): + if warning: + if size is not None and align_corners: + input_h, input_w = tuple(int(x) for x in input.shape[2:]) + output_h, output_w = tuple(int(x) for x in size) + if output_h > input_h or output_w > output_h: + if ((output_h > 1 and output_w > 1 and input_h > 1 + and input_w > 1) and (output_h - 1) % (input_h - 1) + and (output_w - 1) % (input_w - 1)): + warnings.warn( + f'When align_corners={align_corners}, ' + 'the output would more aligned if ' + f'input size {(input_h, input_w)} is `x+1` and ' + f'out size {(output_h, output_w)} is `nx+1`') + return F.interpolate(input, size, scale_factor, mode, align_corners) + + +class Upsample(nn.Module): + + def __init__(self, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None): + super(Upsample, self).__init__() + self.size = size + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + if not self.size: + size = [int(t * self.scale_factor) for t in x.shape[-2:]] + else: + size = self.size + return resize(x, size, None, self.mode, self.align_corners) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ac489e2dbbc0e6fa87f5088b4edcc20f8cadc1a6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/__init__.py @@ -0,0 +1,4 @@ +from .collect_env import collect_env +from .logger import get_root_logger + +__all__ = ['get_root_logger', 'collect_env'] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/collect_env.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5cd6ffee77a234e7c54d6990d273bbd872b8f5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/collect_env.py @@ -0,0 +1,17 @@ +from custom_mmpkg.custom_mmcv.utils import collect_env as collect_base_env +from custom_mmpkg.custom_mmcv.utils import get_git_hash + +import custom_mmpkg.custom_mmseg as mmseg + + +def collect_env(): + """Collect the information of the running environments.""" + env_info = collect_base_env() + env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}' + + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print('{}: {}'.format(name, val)) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/logger.py b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..bc2ac05b20a1f7f24a3e7876757d87b00972a69d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_mmpkg/custom_mmseg/utils/logger.py @@ -0,0 +1,27 @@ +import logging + +from custom_mmpkg.custom_mmcv.utils import get_logger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmseg". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + + logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level) + + return logger diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39ebcd384f616ae2ba170407cee3267d461a5914 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from . import data # register all new datasets +from . import modeling + +# config +from .config import * + +# models +from .oneformer_model import OneFormer \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/config.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/config.py new file mode 100644 index 0000000000000000000000000000000000000000..fb803f468277ba0c1fdbb8635910e75c80cd90c3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/config.py @@ -0,0 +1,239 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +from custom_detectron2.config import CfgNode as CN + +__all__ = ["add_common_config", "add_oneformer_config", "add_swin_config", + "add_dinat_config", "add_beit_adapter_config", "add_convnext_config"] + +def add_common_config(cfg): + """ + Add config for common configuration + """ + # data config + # select the dataset mapper + cfg.INPUT.DATASET_MAPPER_NAME = "oneformer_unified" + # Color augmentation + cfg.INPUT.COLOR_AUG_SSD = False + # We retry random cropping until no single category in semantic segmentation GT occupies more + # than `SINGLE_CATEGORY_MAX_AREA` part of the crop. + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0 + # Pad image and segmentation GT in dataset mapper. + cfg.INPUT.SIZE_DIVISIBILITY = -1 + + cfg.INPUT.TASK_SEQ_LEN = 77 + cfg.INPUT.MAX_SEQ_LEN = 77 + + cfg.INPUT.TASK_PROB = CN() + cfg.INPUT.TASK_PROB.SEMANTIC = 0.33 + cfg.INPUT.TASK_PROB.INSTANCE = 0.66 + + # test dataset + cfg.DATASETS.TEST_PANOPTIC = ("",) + cfg.DATASETS.TEST_INSTANCE = ("",) + cfg.DATASETS.TEST_SEMANTIC = ("",) + + # solver config + # weight decay on embedding + cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0 + # optimizer + cfg.SOLVER.OPTIMIZER = "ADAMW" + cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1 + + # wandb + cfg.WANDB = CN() + cfg.WANDB.PROJECT = "unified_dense_recognition" + cfg.WANDB.NAME = None + + cfg.MODEL.IS_TRAIN = False + cfg.MODEL.IS_DEMO = True + + # text encoder config + cfg.MODEL.TEXT_ENCODER = CN() + + cfg.MODEL.TEXT_ENCODER.WIDTH = 256 + cfg.MODEL.TEXT_ENCODER.CONTEXT_LENGTH = 77 + cfg.MODEL.TEXT_ENCODER.NUM_LAYERS = 12 + cfg.MODEL.TEXT_ENCODER.VOCAB_SIZE = 49408 + cfg.MODEL.TEXT_ENCODER.PROJ_NUM_LAYERS = 2 + cfg.MODEL.TEXT_ENCODER.N_CTX = 16 + + # mask_former inference config + cfg.MODEL.TEST = CN() + cfg.MODEL.TEST.SEMANTIC_ON = True + cfg.MODEL.TEST.INSTANCE_ON = False + cfg.MODEL.TEST.PANOPTIC_ON = False + cfg.MODEL.TEST.DETECTION_ON = False + cfg.MODEL.TEST.OBJECT_MASK_THRESHOLD = 0.0 + cfg.MODEL.TEST.OVERLAP_THRESHOLD = 0.0 + cfg.MODEL.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False + cfg.MODEL.TEST.TASK = "panoptic" + + # TEST AUG Slide + cfg.TEST.AUG.IS_SLIDE = False + cfg.TEST.AUG.CROP_SIZE = (640, 640) + cfg.TEST.AUG.STRIDE = (426, 426) + cfg.TEST.AUG.SCALE = (2048, 640) + cfg.TEST.AUG.SETR_MULTI_SCALE = True + cfg.TEST.AUG.KEEP_RATIO = True + cfg.TEST.AUG.SIZE_DIVISOR = 32 + + # pixel decoder config + cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256 + # adding transformer in pixel decoder + cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0 + # pixel decoder + cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = "BasePixelDecoder" + cfg.MODEL.SEM_SEG_HEAD.SEM_EMBED_DIM = 256 + cfg.MODEL.SEM_SEG_HEAD.INST_EMBED_DIM = 256 + + # LSJ aug + cfg.INPUT.IMAGE_SIZE = 1024 + cfg.INPUT.MIN_SCALE = 0.1 + cfg.INPUT.MAX_SCALE = 2.0 + + # MSDeformAttn encoder configs + cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = ["res3", "res4", "res5"] + cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4 + cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8 + +def add_oneformer_config(cfg): + """ + Add config for ONE_FORMER. + """ + + # mask_former model config + cfg.MODEL.ONE_FORMER = CN() + + # loss + cfg.MODEL.ONE_FORMER.DEEP_SUPERVISION = True + cfg.MODEL.ONE_FORMER.NO_OBJECT_WEIGHT = 0.1 + cfg.MODEL.ONE_FORMER.CLASS_WEIGHT = 1.0 + cfg.MODEL.ONE_FORMER.DICE_WEIGHT = 1.0 + cfg.MODEL.ONE_FORMER.MASK_WEIGHT = 20.0 + cfg.MODEL.ONE_FORMER.CONTRASTIVE_WEIGHT = 0.5 + cfg.MODEL.ONE_FORMER.CONTRASTIVE_TEMPERATURE = 0.07 + + # transformer config + cfg.MODEL.ONE_FORMER.NHEADS = 8 + cfg.MODEL.ONE_FORMER.DROPOUT = 0.1 + cfg.MODEL.ONE_FORMER.DIM_FEEDFORWARD = 2048 + cfg.MODEL.ONE_FORMER.ENC_LAYERS = 0 + cfg.MODEL.ONE_FORMER.CLASS_DEC_LAYERS = 2 + cfg.MODEL.ONE_FORMER.DEC_LAYERS = 6 + cfg.MODEL.ONE_FORMER.PRE_NORM = False + + cfg.MODEL.ONE_FORMER.HIDDEN_DIM = 256 + cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES = 120 + cfg.MODEL.ONE_FORMER.NUM_OBJECT_CTX = 16 + cfg.MODEL.ONE_FORMER.USE_TASK_NORM = True + + cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE = "res5" + cfg.MODEL.ONE_FORMER.ENFORCE_INPUT_PROJ = False + + # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet) + # you can use this config to override + cfg.MODEL.ONE_FORMER.SIZE_DIVISIBILITY = 32 + + # transformer module + cfg.MODEL.ONE_FORMER.TRANSFORMER_DECODER_NAME = "ContrastiveMultiScaleMaskedTransformerDecoder" + + # point loss configs + # Number of points sampled during training for a mask point head. + cfg.MODEL.ONE_FORMER.TRAIN_NUM_POINTS = 112 * 112 + # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the + # original paper. + cfg.MODEL.ONE_FORMER.OVERSAMPLE_RATIO = 3.0 + # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in + # the original paper. + cfg.MODEL.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75 + +def add_swin_config(cfg): + """ + Add config forSWIN Backbone. + """ + + # swin transformer backbone + cfg.MODEL.SWIN = CN() + cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224 + cfg.MODEL.SWIN.PATCH_SIZE = 4 + cfg.MODEL.SWIN.EMBED_DIM = 96 + cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] + cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] + cfg.MODEL.SWIN.WINDOW_SIZE = 7 + cfg.MODEL.SWIN.MLP_RATIO = 4.0 + cfg.MODEL.SWIN.QKV_BIAS = True + cfg.MODEL.SWIN.QK_SCALE = None + cfg.MODEL.SWIN.DROP_RATE = 0.0 + cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0 + cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3 + cfg.MODEL.SWIN.APE = False + cfg.MODEL.SWIN.PATCH_NORM = True + cfg.MODEL.SWIN.OUT_FEATURES = ["res2", "res3", "res4", "res5"] + cfg.MODEL.SWIN.USE_CHECKPOINT = False + ## Semask additions + cfg.MODEL.SWIN.SEM_WINDOW_SIZE = 7 + cfg.MODEL.SWIN.NUM_SEM_BLOCKS = 1 + +def add_dinat_config(cfg): + """ + Add config for NAT Backbone. + """ + + # DINAT transformer backbone + cfg.MODEL.DiNAT = CN() + cfg.MODEL.DiNAT.DEPTHS = [3, 4, 18, 5] + cfg.MODEL.DiNAT.OUT_FEATURES = ["res2", "res3", "res4", "res5"] + cfg.MODEL.DiNAT.EMBED_DIM = 64 + cfg.MODEL.DiNAT.MLP_RATIO = 3.0 + cfg.MODEL.DiNAT.NUM_HEADS = [2, 4, 8, 16] + cfg.MODEL.DiNAT.DROP_PATH_RATE = 0.2 + cfg.MODEL.DiNAT.KERNEL_SIZE = 7 + cfg.MODEL.DiNAT.DILATIONS = [[1, 16, 1], [1, 4, 1, 8], [1, 2, 1, 3, 1, 4], [1, 2, 1, 2, 1]] + cfg.MODEL.DiNAT.OUT_INDICES = (0, 1, 2, 3) + cfg.MODEL.DiNAT.QKV_BIAS = True + cfg.MODEL.DiNAT.QK_SCALE = None + cfg.MODEL.DiNAT.DROP_RATE = 0 + cfg.MODEL.DiNAT.ATTN_DROP_RATE = 0. + cfg.MODEL.DiNAT.IN_PATCH_SIZE = 4 + +def add_convnext_config(cfg): + """ + Add config for ConvNeXt Backbone. + """ + + # swin transformer backbone + cfg.MODEL.CONVNEXT = CN() + cfg.MODEL.CONVNEXT.IN_CHANNELS = 3 + cfg.MODEL.CONVNEXT.DEPTHS = [3, 3, 27, 3] + cfg.MODEL.CONVNEXT.DIMS = [192, 384, 768, 1536] + cfg.MODEL.CONVNEXT.DROP_PATH_RATE = 0.4 + cfg.MODEL.CONVNEXT.LSIT = 1.0 + cfg.MODEL.CONVNEXT.OUT_INDICES = [0, 1, 2, 3] + cfg.MODEL.CONVNEXT.OUT_FEATURES = ["res2", "res3", "res4", "res5"] + +def add_beit_adapter_config(cfg): + """ + Add config for BEiT Adapter Backbone. + """ + + # beit adapter backbone + cfg.MODEL.BEiTAdapter = CN() + cfg.MODEL.BEiTAdapter.IMG_SIZE = 640 + cfg.MODEL.BEiTAdapter.PATCH_SIZE = 16 + cfg.MODEL.BEiTAdapter.EMBED_DIM = 1024 + cfg.MODEL.BEiTAdapter.DEPTH = 24 + cfg.MODEL.BEiTAdapter.NUM_HEADS = 16 + cfg.MODEL.BEiTAdapter.MLP_RATIO = 4 + cfg.MODEL.BEiTAdapter.QKV_BIAS = True + cfg.MODEL.BEiTAdapter.USE_ABS_POS_EMB = False + cfg.MODEL.BEiTAdapter.USE_REL_POS_BIAS = True + cfg.MODEL.BEiTAdapter.INIT_VALUES = 1e-6 + cfg.MODEL.BEiTAdapter.DROP_PATH_RATE = 0.3 + cfg.MODEL.BEiTAdapter.CONV_INPLANE = 64 + cfg.MODEL.BEiTAdapter.N_POINTS = 4 + cfg.MODEL.BEiTAdapter.DEFORM_NUM_HEADS = 16 + cfg.MODEL.BEiTAdapter.CFFN_RATIO = 0.25 + cfg.MODEL.BEiTAdapter.DEFORM_RATIO = 0.5 + cfg.MODEL.BEiTAdapter.WITH_CP = True + cfg.MODEL.BEiTAdapter.INTERACTION_INDEXES=[[0, 5], [6, 11], [12, 17], [18, 23]] + cfg.MODEL.BEiTAdapter.OUT_FEATURES = ["res2", "res3", "res4", "res5"] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63ba265b1effc69f1eef16e57a04db8902ee347e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from . import datasets diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/bpe_simple_vocab_16e6.txt.gz b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a +size 1356917 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/build.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/build.py new file mode 100644 index 0000000000000000000000000000000000000000..7668f3451de4d5e338e6987a9da97d5812e838ca --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/build.py @@ -0,0 +1,117 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from typing import Any, Callable, Dict, List, Optional, Union +import torch.utils.data as torchdata + +from custom_detectron2.config import configurable + + +from custom_detectron2.data.common import DatasetFromList, MapDataset +from custom_detectron2.data.dataset_mapper import DatasetMapper +from custom_detectron2.data.samplers import ( + InferenceSampler, +) +from custom_detectron2.data.build import ( + get_detection_dataset_dicts, + trivial_batch_collator +) +""" +This file contains the default logic to build a dataloader for training or testing. +""" + +__all__ = [ + "build_detection_test_loader", +] + + +def _test_loader_from_config(cfg, dataset_name, mapper=None): + """ + Uses the given `dataset_name` argument (instead of the names in cfg), because the + standard practice is to evaluate each test set individually (not combining them). + """ + if isinstance(dataset_name, str): + dataset_name = [dataset_name] + + dataset = get_detection_dataset_dicts( + dataset_name, + filter_empty=False, + proposal_files=[ + cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name + ] + if cfg.MODEL.LOAD_PROPOSALS + else None, + ) + if mapper is None: + mapper = DatasetMapper(cfg, False) + return { + "dataset": dataset, + "mapper": mapper, + "num_workers": cfg.DATALOADER.NUM_WORKERS, + "sampler": InferenceSampler(len(dataset)) + if not isinstance(dataset, torchdata.IterableDataset) + else None, + } + + +@configurable(from_config=_test_loader_from_config) +def build_detection_test_loader( + dataset: Union[List[Any], torchdata.Dataset], + *, + mapper: Callable[[Dict[str, Any]], Any], + sampler: Optional[torchdata.Sampler] = None, + batch_size: int = 1, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, +) -> torchdata.DataLoader: + """ + Similar to `build_detection_train_loader`, with default batch size = 1, + and sampler = :class:`InferenceSampler`. This sampler coordinates all workers + to produce the exact set of all samples. + + Args: + dataset: a list of dataset dicts, + or a pytorch dataset (either map-style or iterable). They can be obtained + by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. + mapper: a callable which takes a sample (dict) from dataset + and returns the format to be consumed by the model. + When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. + sampler: a sampler that produces + indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, + which splits the dataset across all workers. Sampler must be None + if `dataset` is iterable. + batch_size: the batch size of the data loader to be created. + Default to 1 image per worker since this is the standard when reporting + inference time in papers. + num_workers: number of parallel data loading workers + collate_fn: same as the argument of `torch.utils.data.DataLoader`. + Defaults to do no collation and return a list of data. + + Returns: + DataLoader: a torch DataLoader, that loads the given detection + dataset, with test-time transformation and batching. + + Examples: + :: + data_loader = build_detection_test_loader( + DatasetRegistry.get("my_test"), + mapper=DatasetMapper(...)) + + # or, instantiate with a CfgNode: + data_loader = build_detection_test_loader(cfg, "my_test") + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False) + if mapper is not None: + dataset = MapDataset(dataset, mapper) + if isinstance(dataset, torchdata.IterableDataset): + assert sampler is None, "sampler must be None if dataset is IterableDataset" + else: + if sampler is None: + sampler = InferenceSampler(len(dataset)) + return torchdata.DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + drop_last=False, + num_workers=num_workers, + collate_fn=trivial_batch_collator if collate_fn is None else collate_fn, + ) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/__init__.py @@ -0,0 +1 @@ + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..44f3598e4b2c220356d55f3a415253f4b4a1fd79 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py @@ -0,0 +1,341 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import copy +import logging + +import numpy as np +import torch + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.config import configurable +from custom_detectron2.data import detection_utils as utils +from custom_detectron2.data import transforms as T +from custom_detectron2.structures import BitMasks, Instances +from custom_oneformer.utils.box_ops import masks_to_boxes +from custom_oneformer.data.tokenizer import SimpleTokenizer, Tokenize + +__all__ = ["COCOUnifiedNewBaselineDatasetMapper"] + + +def build_transform_gen(cfg, is_train): + """ + Create a list of default :class:`Augmentation` from config. + Now it includes resizing and flipping. + Returns: + list[Augmentation] + """ + assert is_train, "Only support training augmentation" + image_size = cfg.INPUT.IMAGE_SIZE + min_scale = cfg.INPUT.MIN_SCALE + max_scale = cfg.INPUT.MAX_SCALE + + augmentation = [] + + if cfg.INPUT.RANDOM_FLIP != "none": + augmentation.append( + T.RandomFlip( + horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", + vertical=cfg.INPUT.RANDOM_FLIP == "vertical", + ) + ) + + augmentation.extend([ + T.ResizeScale( + min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size + ), + T.FixedSizeCrop(crop_size=(image_size, image_size)), + ]) + + return augmentation + + +# This is specifically designed for the COCO dataset. +class COCOUnifiedNewBaselineDatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by OneFormer. + + This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies geometric transforms to the image and annotation + 3. Find and applies suitable cropping to the image and annotation + 4. Prepare image and annotation to Tensors + """ + + @configurable + def __init__( + self, + is_train=True, + *, + num_queries, + tfm_gens, + meta, + image_format, + max_seq_len, + task_seq_len, + semantic_prob, + instance_prob, + ): + """ + NOTE: this interface is experimental. + Args: + is_train: for training or inference + augmentations: a list of augmentations or deterministic transforms to apply + crop_gen: crop augmentation + tfm_gens: data augmentation + image_format: an image format supported by :func:`detection_utils.read_image`. + """ + self.tfm_gens = tfm_gens + logging.getLogger(__name__).info( + "[COCOUnifiedNewBaselineDatasetMapper] Full TransformGens used in training: {}".format( + str(self.tfm_gens) + ) + ) + + self.img_format = image_format + self.is_train = is_train + self.meta = meta + self.ignore_label = self.meta.ignore_label + self.num_queries = num_queries + + self.things = [] + for k,v in self.meta.thing_dataset_id_to_contiguous_id.items(): + self.things.append(v) + self.class_names = self.meta.stuff_classes + self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len) + self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) + self.semantic_prob = semantic_prob + self.instance_prob = instance_prob + + @classmethod + def from_config(cls, cfg, is_train=True): + # Build augmentation + tfm_gens = build_transform_gen(cfg, is_train) + dataset_names = cfg.DATASETS.TRAIN + meta = MetadataCatalog.get(dataset_names[0]) + + ret = { + "is_train": is_train, + "meta": meta, + "tfm_gens": tfm_gens, + "image_format": cfg.INPUT.FORMAT, + "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES - cfg.MODEL.TEXT_ENCODER.N_CTX, + "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, + "max_seq_len": cfg.INPUT.MAX_SEQ_LEN, + "semantic_prob": cfg.INPUT.TASK_PROB.SEMANTIC, + "instance_prob": cfg.INPUT.TASK_PROB.INSTANCE, + } + return ret + + def _get_semantic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): + instances = Instances(image_shape) + + classes = [] + texts = ["a semantic photo"] * self.num_queries + masks = [] + label = np.ones_like(pan_seg_gt) * self.ignore_label + + for segment_info in segments_info: + class_id = segment_info["category_id"] + if not segment_info["iscrowd"]: + mask = pan_seg_gt == segment_info["id"] + if not np.all(mask == False): + if class_id not in classes: + cls_name = self.class_names[class_id] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + else: + idx = classes.index(class_id) + masks[idx] += mask + masks[idx] = np.clip(masks[idx], 0, 1).astype(np.bool) + label[mask] = class_id + + num = 0 + for i, cls_name in enumerate(self.class_names): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + instances.gt_classes = torch.tensor(classes, dtype=torch.int64) + if len(masks) == 0: + # Some image does not have annotation (all ignored) + instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) + instances.gt_bboxes = torch.zeros((0, 4)) + else: + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) + ) + instances.gt_masks = masks.tensor + # Placeholder bounding boxes for stuff regions. Note that these are not used during training. + instances.gt_bboxes = torch.stack([torch.tensor([0., 0., 1., 1.])] * instances.gt_masks.shape[0]) + return instances, texts, label + + def _get_instance_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): + instances = Instances(image_shape) + + classes = [] + texts = ["an instance photo"] * self.num_queries + masks = [] + label = np.ones_like(pan_seg_gt) * self.ignore_label + + for segment_info in segments_info: + class_id = segment_info["category_id"] + if class_id in self.things: + if not segment_info["iscrowd"]: + mask = pan_seg_gt == segment_info["id"] + if not np.all(mask == False): + cls_name = self.class_names[class_id] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + label[mask] = class_id + + num = 0 + for i, cls_name in enumerate(self.class_names): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + instances.gt_classes = torch.tensor(classes, dtype=torch.int64) + if len(masks) == 0: + # Some image does not have annotation (all ignored) + instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) + instances.gt_bboxes = torch.zeros((0, 4)) + else: + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) + ) + instances.gt_masks = masks.tensor + instances.gt_bboxes = masks_to_boxes(instances.gt_masks) + return instances, texts, label + + def _get_panoptic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): + instances = Instances(image_shape) + + classes = [] + texts = ["a panoptic photo"] * self.num_queries + masks = [] + label = np.ones_like(pan_seg_gt) * self.ignore_label + + for segment_info in segments_info: + class_id = segment_info["category_id"] + if not segment_info["iscrowd"]: + mask = pan_seg_gt == segment_info["id"] + if not np.all(mask == False): + cls_name = self.class_names[class_id] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + label[mask] = class_id + + num = 0 + for i, cls_name in enumerate(self.class_names): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + instances.gt_classes = torch.tensor(classes, dtype=torch.int64) + if len(masks) == 0: + # Some image does not have annotation (all ignored) + instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) + instances.gt_bboxes = torch.zeros((0, 4)) + else: + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) + ) + instances.gt_masks = masks.tensor + instances.gt_bboxes = masks_to_boxes(instances.gt_masks) + for i in range(instances.gt_classes.shape[0]): + # Placeholder bounding boxes for stuff regions. Note that these are not used during training. + if instances.gt_classes[i].item() not in self.things: + instances.gt_bboxes[i] = torch.tensor([0., 0., 1., 1.]) + return instances, texts, label + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + + image, transforms = T.apply_transform_gens(self.tfm_gens, image) + image_shape = image.shape[:2] # h, w + + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + + if not self.is_train: + # USER: Modify this if you want to keep them for some reason. + dataset_dict.pop("annotations", None) + return dataset_dict + + # semantic segmentation + if "sem_seg_file_name" in dataset_dict: + # PyTorch transformation not implemented for uint16, so converting it to double first + sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double") + sem_seg_gt = transforms.apply_segmentation(sem_seg_gt) + else: + sem_seg_gt = None + + if "pan_seg_file_name" in dataset_dict: + pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") + segments_info = dataset_dict["segments_info"] + + # apply the same transformation to panoptic segmentation + pan_seg_gt = transforms.apply_segmentation(pan_seg_gt) + + from panopticapi.utils import rgb2id + pan_seg_gt = rgb2id(pan_seg_gt) + + prob_task = np.random.uniform(0,1.) + + num_class_obj = {} + + for name in self.class_names: + num_class_obj[name] = 0 + + if prob_task < self.semantic_prob: + task = "The task is semantic" + instances, text, sem_seg = self._get_semantic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) + elif prob_task < self.instance_prob: + task = "The task is instance" + instances, text, sem_seg = self._get_instance_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) + else: + task = "The task is panoptic" + instances, text, sem_seg = self._get_panoptic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) + + + dataset_dict["sem_seg"] = torch.from_numpy(sem_seg).long() + dataset_dict["instances"] = instances + dataset_dict["orig_shape"] = image_shape + dataset_dict["task"] = task + dataset_dict["text"] = text + dataset_dict["thing_ids"] = self.things + + return dataset_dict diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/dataset_mapper.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/dataset_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..004947e5044ac18eaa199986f4ede3e11eb78145 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/dataset_mapper.py @@ -0,0 +1,203 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/dataset_mapper.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import copy +import logging +import numpy as np +from typing import List, Optional, Union +import torch + +from custom_detectron2.config import configurable + +from custom_detectron2.data import detection_utils as utils +from custom_detectron2.data import transforms as T +from custom_oneformer.data.tokenizer import SimpleTokenizer, Tokenize + +__all__ = ["DatasetMapper"] + + +class DatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by the model. + + This is the default callable to be used to map your dataset dict into training data. + You may need to follow it to implement your own one for customized logic, + such as a different way to read or transform images. + See :doc:`/tutorials/data_loading` for details. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies cropping/geometric transforms to the image and annotations + 3. Prepare data and annotations to Tensor and :class:`Instances` + """ + + @configurable + def __init__( + self, + is_train: bool, + *, + augmentations: List[Union[T.Augmentation, T.Transform]], + image_format: str, + task_seq_len: int, + task: str = "panoptic", + use_instance_mask: bool = False, + use_keypoint: bool = False, + instance_mask_format: str = "polygon", + keypoint_hflip_indices: Optional[np.ndarray] = None, + precomputed_proposal_topk: Optional[int] = None, + recompute_boxes: bool = False, + ): + """ + NOTE: this interface is experimental. + + Args: + is_train: whether it's used in training or inference + augmentations: a list of augmentations or deterministic transforms to apply + image_format: an image format supported by :func:`detection_utils.read_image`. + use_instance_mask: whether to process instance segmentation annotations, if available + use_keypoint: whether to process keypoint annotations if available + instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation + masks into this format. + keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` + precomputed_proposal_topk: if given, will load pre-computed + proposals from dataset_dict and keep the top k proposals for each image. + recompute_boxes: whether to overwrite bounding box annotations + by computing tight bounding boxes from instance mask annotations. + """ + if recompute_boxes: + assert use_instance_mask, "recompute_boxes requires instance masks" + # fmt: off + self.is_train = is_train + self.augmentations = T.AugmentationList(augmentations) + self.image_format = image_format + self.use_instance_mask = use_instance_mask + self.instance_mask_format = instance_mask_format + self.use_keypoint = use_keypoint + self.keypoint_hflip_indices = keypoint_hflip_indices + self.proposal_topk = precomputed_proposal_topk + self.recompute_boxes = recompute_boxes + self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) + self.task = task + assert self.task in ["panoptic", "semantic", "instance"] + + # fmt: on + logger = logging.getLogger(__name__) + mode = "training" if is_train else "inference" + logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") + + @classmethod + def from_config(cls, cfg, is_train: bool = True): + augs = utils.build_augmentation(cfg, is_train) + if cfg.INPUT.CROP.ENABLED and is_train: + augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + recompute_boxes = cfg.MODEL.MASK_ON + else: + recompute_boxes = False + + ret = { + "is_train": is_train, + "augmentations": augs, + "image_format": cfg.INPUT.FORMAT, + "use_instance_mask": cfg.MODEL.MASK_ON, + "instance_mask_format": cfg.INPUT.MASK_FORMAT, + "use_keypoint": cfg.MODEL.KEYPOINT_ON, + "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, + "recompute_boxes": recompute_boxes, + "task": cfg.MODEL.TEST.TASK, + } + + if cfg.MODEL.KEYPOINT_ON: + ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) + + if cfg.MODEL.LOAD_PROPOSALS: + ret["precomputed_proposal_topk"] = ( + cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN + if is_train + else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST + ) + return ret + + def _transform_annotations(self, dataset_dict, transforms, image_shape): + # USER: Modify this if you want to keep them for some reason. + for anno in dataset_dict["annotations"]: + if not self.use_instance_mask: + anno.pop("segmentation", None) + if not self.use_keypoint: + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + annos = [ + utils.transform_instance_annotations( + obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances( + annos, image_shape, mask_format=self.instance_mask_format + ) + + # After transforms such as cropping are applied, the bounding box may no longer + # tightly bound the object. As an example, imagine a triangle object + # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight + # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to + # the intersection of original bounding box and the cropping box. + if self.recompute_boxes: + instances.gt_boxes = instances.gt_masks.get_bounding_boxes() + dataset_dict["instances"] = utils.filter_empty_instances(instances) + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + # USER: Write your own image loading if it's not from a file + image = utils.read_image(dataset_dict["file_name"], format=self.image_format) + utils.check_image_size(dataset_dict, image) + + task = f"The task is {self.task}" + dataset_dict["task"] = task + + # USER: Remove if you don't do semantic/panoptic segmentation. + if "sem_seg_file_name" in dataset_dict: + sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) + else: + sem_seg_gt = None + + aug_input = T.AugInput(image, sem_seg=sem_seg_gt) + transforms = self.augmentations(aug_input) + image, sem_seg_gt = aug_input.image, aug_input.sem_seg + + image_shape = image.shape[:2] # h, w + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + if sem_seg_gt is not None: + dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) + + # USER: Remove if you don't use pre-computed proposals. + # Most users would not need this feature. + if self.proposal_topk is not None: + utils.transform_proposals( + dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk + ) + + if not self.is_train: + # USER: Modify this if you want to keep them for some reason. + dataset_dict.pop("annotations", None) + dataset_dict.pop("sem_seg_file_name", None) + return dataset_dict + + if "annotations" in dataset_dict: + self._transform_annotations(dataset_dict, transforms, image_shape) + + return dataset_dict \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/oneformer_unified_dataset_mapper.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/oneformer_unified_dataset_mapper.py new file mode 100644 index 0000000000000000000000000000000000000000..5c067c3015cdf361ef72549d2c6ca05cd9ea035f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/dataset_mappers/oneformer_unified_dataset_mapper.py @@ -0,0 +1,375 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import copy +import logging +import os + +import numpy as np +import torch +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.data import detection_utils as utils +from custom_detectron2.data import transforms as T +from custom_detectron2.structures import BitMasks, Instances +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.projects.point_rend import ColorAugSSDTransform +from custom_oneformer.utils.box_ops import masks_to_boxes +from custom_oneformer.data.tokenizer import SimpleTokenizer, Tokenize + +__all__ = ["OneFormerUnifiedDatasetMapper"] + + +class OneFormerUnifiedDatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by OneFormer for universal segmentation. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies geometric transforms to the image and annotation + 3. Find and applies suitable cropping to the image and annotation + 4. Prepare image and annotation to Tensors + """ + + @configurable + def __init__( + self, + is_train=True, + *, + name, + num_queries, + meta, + augmentations, + image_format, + ignore_label, + size_divisibility, + task_seq_len, + max_seq_len, + semantic_prob, + instance_prob, + ): + """ + NOTE: this interface is experimental. + Args: + is_train: for training or inference + augmentations: a list of augmentations or deterministic transforms to apply + image_format: an image format supported by :func:`detection_utils.read_image`. + ignore_label: the label that is ignored to evaluation + size_divisibility: pad image size to be divisible by this value + """ + self.is_train = is_train + self.meta = meta + self.name = name + self.tfm_gens = augmentations + self.img_format = image_format + self.ignore_label = ignore_label + self.size_divisibility = size_divisibility + self.num_queries = num_queries + + logger = logging.getLogger(__name__) + mode = "training" if is_train else "inference" + logger.info(f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}") + + self.things = [] + for k,v in self.meta.thing_dataset_id_to_contiguous_id.items(): + self.things.append(v) + self.class_names = self.meta.stuff_classes + self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len) + self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) + self.semantic_prob = semantic_prob + self.instance_prob = instance_prob + + @classmethod + def from_config(cls, cfg, is_train=True): + # Build augmentation + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, + cfg.INPUT.MAX_SIZE_TRAIN, + cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING, + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append( + T.RandomCrop_CategoryAreaConstraint( + cfg.INPUT.CROP.TYPE, + cfg.INPUT.CROP.SIZE, + cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA, + cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + ) + ) + if cfg.INPUT.COLOR_AUG_SSD: + augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT)) + augs.append(T.RandomFlip()) + + # Assume always applies to the training set. + dataset_names = cfg.DATASETS.TRAIN + meta = MetadataCatalog.get(dataset_names[0]) + ignore_label = meta.ignore_label + + ret = { + "is_train": is_train, + "meta": meta, + "name": dataset_names[0], + "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES - cfg.MODEL.TEXT_ENCODER.N_CTX, + "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, + "max_seq_len": cfg.INPUT.MAX_SEQ_LEN, + "augmentations": augs, + "image_format": cfg.INPUT.FORMAT, + "ignore_label": ignore_label, + "size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY, + "semantic_prob": cfg.INPUT.TASK_PROB.SEMANTIC, + "instance_prob": cfg.INPUT.TASK_PROB.INSTANCE, + } + return ret + + def _get_semantic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): + pan_seg_gt = pan_seg_gt.numpy() + instances = Instances(image_shape) + + classes = [] + texts = ["a semantic photo"] * self.num_queries + masks = [] + label = np.ones_like(pan_seg_gt) * self.ignore_label + + for segment_info in segments_info: + class_id = segment_info["category_id"] + if not segment_info["iscrowd"]: + mask = pan_seg_gt == segment_info["id"] + if not np.all(mask == False): + if class_id not in classes: + cls_name = self.class_names[class_id] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + else: + idx = classes.index(class_id) + masks[idx] += mask + masks[idx] = np.clip(masks[idx], 0, 1).astype(np.bool) + label[mask] = class_id + + num = 0 + for i, cls_name in enumerate(self.class_names): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + instances.gt_classes = torch.tensor(classes, dtype=torch.int64) + if len(masks) == 0: + # Some image does not have annotation (all ignored) + instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) + instances.gt_bboxes = torch.zeros((0, 4)) + else: + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) + ) + instances.gt_masks = masks.tensor + # Placeholder bounding boxes for stuff regions. Note that these are not used during training. + instances.gt_bboxes = torch.stack([torch.tensor([0., 0., 1., 1.])] * instances.gt_masks.shape[0]) + return instances, texts, label + + def _get_instance_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): + pan_seg_gt = pan_seg_gt.numpy() + instances = Instances(image_shape) + + classes = [] + texts = ["an instance photo"] * self.num_queries + masks = [] + label = np.ones_like(pan_seg_gt) * self.ignore_label + + for segment_info in segments_info: + class_id = segment_info["category_id"] + if class_id in self.things: + if not segment_info["iscrowd"]: + mask = pan_seg_gt == segment_info["id"] + if not np.all(mask == False): + cls_name = self.class_names[class_id] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + label[mask] = class_id + + num = 0 + for i, cls_name in enumerate(self.class_names): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + instances.gt_classes = torch.tensor(classes, dtype=torch.int64) + if len(masks) == 0: + # Some image does not have annotation (all ignored) + instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) + instances.gt_bboxes = torch.zeros((0, 4)) + else: + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) + ) + instances.gt_masks = masks.tensor + instances.gt_bboxes = masks_to_boxes(instances.gt_masks) + return instances, texts, label + + def _get_panoptic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): + pan_seg_gt = pan_seg_gt.numpy() + instances = Instances(image_shape) + + classes = [] + texts = ["a panoptic photo"] * self.num_queries + masks = [] + label = np.ones_like(pan_seg_gt) * self.ignore_label + + for segment_info in segments_info: + class_id = segment_info["category_id"] + if not segment_info["iscrowd"]: + mask = pan_seg_gt == segment_info["id"] + if not np.all(mask == False): + cls_name = self.class_names[class_id] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + label[mask] = class_id + + num = 0 + for i, cls_name in enumerate(self.class_names): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + instances.gt_classes = torch.tensor(classes, dtype=torch.int64) + if len(masks) == 0: + # Some image does not have annotation (all ignored) + instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) + instances.gt_bboxes = torch.zeros((0, 4)) + else: + masks = BitMasks( + torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) + ) + instances.gt_masks = masks.tensor + instances.gt_bboxes = masks_to_boxes(instances.gt_masks) + for i in range(instances.gt_classes.shape[0]): + # Placeholder bounding boxes for stuff regions. Note that these are not used during training. + if instances.gt_classes[i].item() not in self.things: + instances.gt_bboxes[i] = torch.tensor([0., 0., 1., 1.]) + return instances, texts, label + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + assert self.is_train, "OneFormerUnifiedDatasetMapper should only be used for training!" + + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + + # semantic segmentation + if "sem_seg_file_name" in dataset_dict: + # PyTorch transformation not implemented for uint16, so converting it to double first + sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double") + else: + sem_seg_gt = None + + # panoptic segmentation + if "pan_seg_file_name" in dataset_dict: + pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") + segments_info = dataset_dict["segments_info"] + else: + pan_seg_gt = None + segments_info = None + + if pan_seg_gt is None: + raise ValueError( + "Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.".format( + dataset_dict["file_name"] + ) + ) + + aug_input = T.AugInput(image, sem_seg=sem_seg_gt) + aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input) + image = aug_input.image + if sem_seg_gt is not None: + sem_seg_gt = aug_input.sem_seg + + # apply the same transformation to panoptic segmentation + pan_seg_gt = transforms.apply_segmentation(pan_seg_gt) + + from panopticapi.utils import rgb2id + + pan_seg_gt = rgb2id(pan_seg_gt) + + # Pad image and segmentation label here! + image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + if sem_seg_gt is not None: + sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long")) + pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long")) + + if self.size_divisibility > 0: + image_size = (image.shape[-2], image.shape[-1]) + padding_size = [ + 0, + self.size_divisibility - image_size[1], + 0, + self.size_divisibility - image_size[0], + ] + image = F.pad(image, padding_size, value=128).contiguous() + if sem_seg_gt is not None: + sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous() + pan_seg_gt = F.pad( + pan_seg_gt, padding_size, value=0 + ).contiguous() # 0 is the VOID panoptic label + + image_shape = (image.shape[-2], image.shape[-1]) # h, w + + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = image + + if "annotations" in dataset_dict: + raise ValueError("Pemantic segmentation dataset should not have 'annotations'.") + + prob_task = np.random.uniform(0,1.) + + num_class_obj = {} + + for name in self.class_names: + num_class_obj[name] = 0 + + if prob_task < self.semantic_prob: + task = "The task is semantic" + instances, text, sem_seg = self._get_semantic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) + elif prob_task < self.instance_prob: + task = "The task is instance" + instances, text, sem_seg = self._get_instance_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) + else: + task = "The task is panoptic" + instances, text, sem_seg = self._get_panoptic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) + + dataset_dict["sem_seg"] = torch.from_numpy(sem_seg).long() + dataset_dict["instances"] = instances + dataset_dict["orig_shape"] = image_shape + dataset_dict["task"] = task + dataset_dict["text"] = text + dataset_dict["thing_ids"] = self.things + + return dataset_dict diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..59ce30713f63d056107b2a06ecd434eb27a30b7d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/__init__.py @@ -0,0 +1,7 @@ +from . import ( + register_ade20k_panoptic, + register_cityscapes_panoptic, + register_coco_panoptic_annos_semseg, + register_ade20k_instance, + register_coco_panoptic2instance, +) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_ade20k_instance.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_ade20k_instance.py new file mode 100644 index 0000000000000000000000000000000000000000..2cfa20c26ab8ceb54d122dd187096be14d0f5015 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_ade20k_instance.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/datasets/register_ade20k_instance.py +# ------------------------------------------------------------------------------ + +import json +import logging +import numpy as np +import os +from PIL import Image + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.data.datasets.coco import load_coco_json, register_coco_instances +from custom_detectron2.utils.file_io import PathManager + +ADE_CATEGORIES = [{'id': 7, 'name': 'bed'}, {'id': 8, 'name': 'windowpane'}, {'id': 10, 'name': 'cabinet'}, {'id': 12, 'name': 'person'}, {'id': 14, 'name': 'door'}, {'id': 15, 'name': 'table'}, {'id': 18, 'name': 'curtain'}, {'id': 19, 'name': 'chair'}, {'id': 20, 'name': 'car'}, {'id': 22, 'name': 'painting'}, {'id': 23, 'name': 'sofa'}, {'id': 24, 'name': 'shelf'}, {'id': 27, 'name': 'mirror'}, {'id': 30, 'name': 'armchair'}, {'id': 31, 'name': 'seat'}, {'id': 32, 'name': 'fence'}, {'id': 33, 'name': 'desk'}, {'id': 35, 'name': 'wardrobe'}, {'id': 36, 'name': 'lamp'}, {'id': 37, 'name': 'bathtub'}, {'id': 38, 'name': 'railing'}, {'id': 39, 'name': 'cushion'}, {'id': 41, 'name': 'box'}, {'id': 42, 'name': 'column'}, {'id': 43, 'name': 'signboard'}, {'id': 44, 'name': 'chest of drawers'}, {'id': 45, 'name': 'counter'}, {'id': 47, 'name': 'sink'}, {'id': 49, 'name': 'fireplace'}, {'id': 50, 'name': 'refrigerator'}, {'id': 53, 'name': 'stairs'}, {'id': 55, 'name': 'case'}, {'id': 56, 'name': 'pool table'}, {'id': 57, 'name': 'pillow'}, {'id': 58, 'name': 'screen door'}, {'id': 62, 'name': 'bookcase'}, {'id': 64, 'name': 'coffee table'}, {'id': 65, 'name': 'toilet'}, {'id': 66, 'name': 'flower'}, {'id': 67, 'name': 'book'}, {'id': 69, 'name': 'bench'}, {'id': 70, 'name': 'countertop'}, {'id': 71, 'name': 'stove'}, {'id': 72, 'name': 'palm'}, {'id': 73, 'name': 'kitchen island'}, {'id': 74, 'name': 'computer'}, {'id': 75, 'name': 'swivel chair'}, {'id': 76, 'name': 'boat'}, {'id': 78, 'name': 'arcade machine'}, {'id': 80, 'name': 'bus'}, {'id': 81, 'name': 'towel'}, {'id': 82, 'name': 'light'}, {'id': 83, 'name': 'truck'}, {'id': 85, 'name': 'chandelier'}, {'id': 86, 'name': 'awning'}, {'id': 87, 'name': 'streetlight'}, {'id': 88, 'name': 'booth'}, {'id': 89, 'name': 'television receiver'}, {'id': 90, 'name': 'airplane'}, {'id': 92, 'name': 'apparel'}, {'id': 93, 'name': 'pole'}, {'id': 95, 'name': 'bannister'}, {'id': 97, 'name': 'ottoman'}, {'id': 98, 'name': 'bottle'}, {'id': 102, 'name': 'van'}, {'id': 103, 'name': 'ship'}, {'id': 104, 'name': 'fountain'}, {'id': 107, 'name': 'washer'}, {'id': 108, 'name': 'plaything'}, {'id': 110, 'name': 'stool'}, {'id': 111, 'name': 'barrel'}, {'id': 112, 'name': 'basket'}, {'id': 115, 'name': 'bag'}, {'id': 116, 'name': 'minibike'}, {'id': 118, 'name': 'oven'}, {'id': 119, 'name': 'ball'}, {'id': 120, 'name': 'food'}, {'id': 121, 'name': 'step'}, {'id': 123, 'name': 'trade name'}, {'id': 124, 'name': 'microwave'}, {'id': 125, 'name': 'pot'}, {'id': 126, 'name': 'animal'}, {'id': 127, 'name': 'bicycle'}, {'id': 129, 'name': 'dishwasher'}, {'id': 130, 'name': 'screen'}, {'id': 132, 'name': 'sculpture'}, {'id': 133, 'name': 'hood'}, {'id': 134, 'name': 'sconce'}, {'id': 135, 'name': 'vase'}, {'id': 136, 'name': 'traffic light'}, {'id': 137, 'name': 'tray'}, {'id': 138, 'name': 'ashcan'}, {'id': 139, 'name': 'fan'}, {'id': 142, 'name': 'plate'}, {'id': 143, 'name': 'monitor'}, {'id': 144, 'name': 'bulletin board'}, {'id': 146, 'name': 'radiator'}, {'id': 147, 'name': 'glass'}, {'id': 148, 'name': 'clock'}, {'id': 149, 'name': 'flag'}] + + +_PREDEFINED_SPLITS = { + # point annotations without masks + "ade20k_instance_train": ( + "ADEChallengeData2016/images/training", + "ADEChallengeData2016/ade20k_instance_train.json", + ), + "ade20k_instance_val": ( + "ADEChallengeData2016/images/validation", + "ADEChallengeData2016/ade20k_instance_val.json", + ), +} + + +def _get_ade_instances_meta(): + thing_ids = [k["id"] for k in ADE_CATEGORIES] + assert len(thing_ids) == 100, len(thing_ids) + # Mapping from the incontiguous ADE category id to an id in [0, 99] + thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} + thing_classes = [k["name"] for k in ADE_CATEGORIES] + ret = { + "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, + "thing_classes": thing_classes, + } + return ret + + +def register_all_ade20k_instance(root): + for key, (image_root, json_file) in _PREDEFINED_SPLITS.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances( + key, + _get_ade_instances_meta(), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + +_root = os.getenv("DETECTRON2_DATASETS", "datasets") +register_all_ade20k_instance(_root) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_ade20k_panoptic.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_ade20k_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..1300c30f03617ecf2f6614eee29b39320edfa079 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_ade20k_panoptic.py @@ -0,0 +1,394 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/datasets/register_ade20k_panoptic.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import json +import os + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.utils.file_io import PathManager + +ADE20K_150_CATEGORIES = [ + {"color": [120, 120, 120], "id": 0, "isthing": 0, "name": "wall"}, + {"color": [180, 120, 120], "id": 1, "isthing": 0, "name": "building"}, + {"color": [6, 230, 230], "id": 2, "isthing": 0, "name": "sky"}, + {"color": [80, 50, 50], "id": 3, "isthing": 0, "name": "floor"}, + {"color": [4, 200, 3], "id": 4, "isthing": 0, "name": "tree"}, + {"color": [120, 120, 80], "id": 5, "isthing": 0, "name": "ceiling"}, + {"color": [140, 140, 140], "id": 6, "isthing": 0, "name": "road, route"}, + {"color": [204, 5, 255], "id": 7, "isthing": 1, "name": "bed"}, + {"color": [230, 230, 230], "id": 8, "isthing": 1, "name": "window "}, + {"color": [4, 250, 7], "id": 9, "isthing": 0, "name": "grass"}, + {"color": [224, 5, 255], "id": 10, "isthing": 1, "name": "cabinet"}, + {"color": [235, 255, 7], "id": 11, "isthing": 0, "name": "sidewalk, pavement"}, + {"color": [150, 5, 61], "id": 12, "isthing": 1, "name": "person"}, + {"color": [120, 120, 70], "id": 13, "isthing": 0, "name": "earth, ground"}, + {"color": [8, 255, 51], "id": 14, "isthing": 1, "name": "door"}, + {"color": [255, 6, 82], "id": 15, "isthing": 1, "name": "table"}, + {"color": [143, 255, 140], "id": 16, "isthing": 0, "name": "mountain, mount"}, + {"color": [204, 255, 4], "id": 17, "isthing": 0, "name": "plant"}, + {"color": [255, 51, 7], "id": 18, "isthing": 1, "name": "curtain"}, + {"color": [204, 70, 3], "id": 19, "isthing": 1, "name": "chair"}, + {"color": [0, 102, 200], "id": 20, "isthing": 1, "name": "car"}, + {"color": [61, 230, 250], "id": 21, "isthing": 0, "name": "water"}, + {"color": [255, 6, 51], "id": 22, "isthing": 1, "name": "painting, picture"}, + {"color": [11, 102, 255], "id": 23, "isthing": 1, "name": "sofa"}, + {"color": [255, 7, 71], "id": 24, "isthing": 1, "name": "shelf"}, + {"color": [255, 9, 224], "id": 25, "isthing": 0, "name": "house"}, + {"color": [9, 7, 230], "id": 26, "isthing": 0, "name": "sea"}, + {"color": [220, 220, 220], "id": 27, "isthing": 1, "name": "mirror"}, + {"color": [255, 9, 92], "id": 28, "isthing": 0, "name": "rug"}, + {"color": [112, 9, 255], "id": 29, "isthing": 0, "name": "field"}, + {"color": [8, 255, 214], "id": 30, "isthing": 1, "name": "armchair"}, + {"color": [7, 255, 224], "id": 31, "isthing": 1, "name": "seat"}, + {"color": [255, 184, 6], "id": 32, "isthing": 1, "name": "fence"}, + {"color": [10, 255, 71], "id": 33, "isthing": 1, "name": "desk"}, + {"color": [255, 41, 10], "id": 34, "isthing": 0, "name": "rock, stone"}, + {"color": [7, 255, 255], "id": 35, "isthing": 1, "name": "wardrobe, closet, press"}, + {"color": [224, 255, 8], "id": 36, "isthing": 1, "name": "lamp"}, + {"color": [102, 8, 255], "id": 37, "isthing": 1, "name": "tub"}, + {"color": [255, 61, 6], "id": 38, "isthing": 1, "name": "rail"}, + {"color": [255, 194, 7], "id": 39, "isthing": 1, "name": "cushion"}, + {"color": [255, 122, 8], "id": 40, "isthing": 0, "name": "base, pedestal, stand"}, + {"color": [0, 255, 20], "id": 41, "isthing": 1, "name": "box"}, + {"color": [255, 8, 41], "id": 42, "isthing": 1, "name": "column, pillar"}, + {"color": [255, 5, 153], "id": 43, "isthing": 1, "name": "signboard, sign"}, + { + "color": [6, 51, 255], + "id": 44, + "isthing": 1, + "name": "chest of drawers, chest, bureau, dresser", + }, + {"color": [235, 12, 255], "id": 45, "isthing": 1, "name": "counter"}, + {"color": [160, 150, 20], "id": 46, "isthing": 0, "name": "sand"}, + {"color": [0, 163, 255], "id": 47, "isthing": 1, "name": "sink"}, + {"color": [140, 140, 140], "id": 48, "isthing": 0, "name": "skyscraper"}, + {"color": [250, 10, 15], "id": 49, "isthing": 1, "name": "fireplace"}, + {"color": [20, 255, 0], "id": 50, "isthing": 1, "name": "refrigerator, icebox"}, + {"color": [31, 255, 0], "id": 51, "isthing": 0, "name": "grandstand, covered stand"}, + {"color": [255, 31, 0], "id": 52, "isthing": 0, "name": "path"}, + {"color": [255, 224, 0], "id": 53, "isthing": 1, "name": "stairs"}, + {"color": [153, 255, 0], "id": 54, "isthing": 0, "name": "runway"}, + {"color": [0, 0, 255], "id": 55, "isthing": 1, "name": "case, display case, showcase, vitrine"}, + { + "color": [255, 71, 0], + "id": 56, + "isthing": 1, + "name": "pool table, billiard table, snooker table", + }, + {"color": [0, 235, 255], "id": 57, "isthing": 1, "name": "pillow"}, + {"color": [0, 173, 255], "id": 58, "isthing": 1, "name": "screen door, screen"}, + {"color": [31, 0, 255], "id": 59, "isthing": 0, "name": "stairway, staircase"}, + {"color": [11, 200, 200], "id": 60, "isthing": 0, "name": "river"}, + {"color": [255, 82, 0], "id": 61, "isthing": 0, "name": "bridge, span"}, + {"color": [0, 255, 245], "id": 62, "isthing": 1, "name": "bookcase"}, + {"color": [0, 61, 255], "id": 63, "isthing": 0, "name": "blind, screen"}, + {"color": [0, 255, 112], "id": 64, "isthing": 1, "name": "coffee table"}, + { + "color": [0, 255, 133], + "id": 65, + "isthing": 1, + "name": "toilet, can, commode, crapper, pot, potty, stool, throne", + }, + {"color": [255, 0, 0], "id": 66, "isthing": 1, "name": "flower"}, + {"color": [255, 163, 0], "id": 67, "isthing": 1, "name": "book"}, + {"color": [255, 102, 0], "id": 68, "isthing": 0, "name": "hill"}, + {"color": [194, 255, 0], "id": 69, "isthing": 1, "name": "bench"}, + {"color": [0, 143, 255], "id": 70, "isthing": 1, "name": "countertop"}, + {"color": [51, 255, 0], "id": 71, "isthing": 1, "name": "stove"}, + {"color": [0, 82, 255], "id": 72, "isthing": 1, "name": "palm, palm tree"}, + {"color": [0, 255, 41], "id": 73, "isthing": 1, "name": "kitchen island"}, + {"color": [0, 255, 173], "id": 74, "isthing": 1, "name": "computer"}, + {"color": [10, 0, 255], "id": 75, "isthing": 1, "name": "swivel chair"}, + {"color": [173, 255, 0], "id": 76, "isthing": 1, "name": "boat"}, + {"color": [0, 255, 153], "id": 77, "isthing": 0, "name": "bar"}, + {"color": [255, 92, 0], "id": 78, "isthing": 1, "name": "arcade machine"}, + {"color": [255, 0, 255], "id": 79, "isthing": 0, "name": "hovel, hut, hutch, shack, shanty"}, + {"color": [255, 0, 245], "id": 80, "isthing": 1, "name": "bus"}, + {"color": [255, 0, 102], "id": 81, "isthing": 1, "name": "towel"}, + {"color": [255, 173, 0], "id": 82, "isthing": 1, "name": "light"}, + {"color": [255, 0, 20], "id": 83, "isthing": 1, "name": "truck"}, + {"color": [255, 184, 184], "id": 84, "isthing": 0, "name": "tower"}, + {"color": [0, 31, 255], "id": 85, "isthing": 1, "name": "chandelier"}, + {"color": [0, 255, 61], "id": 86, "isthing": 1, "name": "awning, sunshade, sunblind"}, + {"color": [0, 71, 255], "id": 87, "isthing": 1, "name": "street lamp"}, + {"color": [255, 0, 204], "id": 88, "isthing": 1, "name": "booth"}, + {"color": [0, 255, 194], "id": 89, "isthing": 1, "name": "tv"}, + {"color": [0, 255, 82], "id": 90, "isthing": 1, "name": "plane"}, + {"color": [0, 10, 255], "id": 91, "isthing": 0, "name": "dirt track"}, + {"color": [0, 112, 255], "id": 92, "isthing": 1, "name": "clothes"}, + {"color": [51, 0, 255], "id": 93, "isthing": 1, "name": "pole"}, + {"color": [0, 194, 255], "id": 94, "isthing": 0, "name": "land, ground, soil"}, + { + "color": [0, 122, 255], + "id": 95, + "isthing": 1, + "name": "bannister, banister, balustrade, balusters, handrail", + }, + { + "color": [0, 255, 163], + "id": 96, + "isthing": 0, + "name": "escalator, moving staircase, moving stairway", + }, + { + "color": [255, 153, 0], + "id": 97, + "isthing": 1, + "name": "ottoman, pouf, pouffe, puff, hassock", + }, + {"color": [0, 255, 10], "id": 98, "isthing": 1, "name": "bottle"}, + {"color": [255, 112, 0], "id": 99, "isthing": 0, "name": "buffet, counter, sideboard"}, + { + "color": [143, 255, 0], + "id": 100, + "isthing": 0, + "name": "poster, posting, placard, notice, bill, card", + }, + {"color": [82, 0, 255], "id": 101, "isthing": 0, "name": "stage"}, + {"color": [163, 255, 0], "id": 102, "isthing": 1, "name": "van"}, + {"color": [255, 235, 0], "id": 103, "isthing": 1, "name": "ship"}, + {"color": [8, 184, 170], "id": 104, "isthing": 1, "name": "fountain"}, + { + "color": [133, 0, 255], + "id": 105, + "isthing": 0, + "name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter", + }, + {"color": [0, 255, 92], "id": 106, "isthing": 0, "name": "canopy"}, + { + "color": [184, 0, 255], + "id": 107, + "isthing": 1, + "name": "washer, automatic washer, washing machine", + }, + {"color": [255, 0, 31], "id": 108, "isthing": 1, "name": "plaything, toy"}, + {"color": [0, 184, 255], "id": 109, "isthing": 0, "name": "pool"}, + {"color": [0, 214, 255], "id": 110, "isthing": 1, "name": "stool"}, + {"color": [255, 0, 112], "id": 111, "isthing": 1, "name": "barrel, cask"}, + {"color": [92, 255, 0], "id": 112, "isthing": 1, "name": "basket, handbasket"}, + {"color": [0, 224, 255], "id": 113, "isthing": 0, "name": "falls"}, + {"color": [112, 224, 255], "id": 114, "isthing": 0, "name": "tent"}, + {"color": [70, 184, 160], "id": 115, "isthing": 1, "name": "bag"}, + {"color": [163, 0, 255], "id": 116, "isthing": 1, "name": "minibike, motorbike"}, + {"color": [153, 0, 255], "id": 117, "isthing": 0, "name": "cradle"}, + {"color": [71, 255, 0], "id": 118, "isthing": 1, "name": "oven"}, + {"color": [255, 0, 163], "id": 119, "isthing": 1, "name": "ball"}, + {"color": [255, 204, 0], "id": 120, "isthing": 1, "name": "food, solid food"}, + {"color": [255, 0, 143], "id": 121, "isthing": 1, "name": "step, stair"}, + {"color": [0, 255, 235], "id": 122, "isthing": 0, "name": "tank, storage tank"}, + {"color": [133, 255, 0], "id": 123, "isthing": 1, "name": "trade name"}, + {"color": [255, 0, 235], "id": 124, "isthing": 1, "name": "microwave"}, + {"color": [245, 0, 255], "id": 125, "isthing": 1, "name": "pot"}, + {"color": [255, 0, 122], "id": 126, "isthing": 1, "name": "animal"}, + {"color": [255, 245, 0], "id": 127, "isthing": 1, "name": "bicycle"}, + {"color": [10, 190, 212], "id": 128, "isthing": 0, "name": "lake"}, + {"color": [214, 255, 0], "id": 129, "isthing": 1, "name": "dishwasher"}, + {"color": [0, 204, 255], "id": 130, "isthing": 1, "name": "screen"}, + {"color": [20, 0, 255], "id": 131, "isthing": 0, "name": "blanket, cover"}, + {"color": [255, 255, 0], "id": 132, "isthing": 1, "name": "sculpture"}, + {"color": [0, 153, 255], "id": 133, "isthing": 1, "name": "hood, exhaust hood"}, + {"color": [0, 41, 255], "id": 134, "isthing": 1, "name": "sconce"}, + {"color": [0, 255, 204], "id": 135, "isthing": 1, "name": "vase"}, + {"color": [41, 0, 255], "id": 136, "isthing": 1, "name": "traffic light"}, + {"color": [41, 255, 0], "id": 137, "isthing": 1, "name": "tray"}, + {"color": [173, 0, 255], "id": 138, "isthing": 1, "name": "trash can"}, + {"color": [0, 245, 255], "id": 139, "isthing": 1, "name": "fan"}, + {"color": [71, 0, 255], "id": 140, "isthing": 0, "name": "pier"}, + {"color": [122, 0, 255], "id": 141, "isthing": 0, "name": "crt screen"}, + {"color": [0, 255, 184], "id": 142, "isthing": 1, "name": "plate"}, + {"color": [0, 92, 255], "id": 143, "isthing": 1, "name": "monitor"}, + {"color": [184, 255, 0], "id": 144, "isthing": 1, "name": "bulletin board"}, + {"color": [0, 133, 255], "id": 145, "isthing": 0, "name": "shower"}, + {"color": [255, 214, 0], "id": 146, "isthing": 1, "name": "radiator"}, + {"color": [25, 194, 194], "id": 147, "isthing": 1, "name": "glass, drinking glass"}, + {"color": [102, 255, 0], "id": 148, "isthing": 1, "name": "clock"}, + {"color": [92, 0, 255], "id": 149, "isthing": 1, "name": "flag"}, +] + +ADE20k_COLORS = [k["color"] for k in ADE20K_150_CATEGORIES] + +MetadataCatalog.get("ade20k_sem_seg_train").set( + stuff_colors=ADE20k_COLORS[:], +) + +MetadataCatalog.get("ade20k_sem_seg_val").set( + stuff_colors=ADE20k_COLORS[:], +) + + +def load_ade20k_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". + gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". + json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = True + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = False + return segment_info + + with PathManager.open(json_file) as f: + json_info = json.load(f) + + ret = [] + for ann in json_info["annotations"]: + image_id = ann["image_id"] + # TODO: currently we assume image and label has the same filename but + # different extension, and images have extension ".jpg" for COCO. Need + # to make image extension a user-provided argument if we extend this + # function to support other COCO-like datasets. + image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") + label_file = os.path.join(gt_dir, ann["file_name"]) + sem_label_file = os.path.join(semseg_dir, ann["file_name"]) + segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] + ret.append( + { + "file_name": image_file, + "image_id": image_id, + "pan_seg_file_name": label_file, + "sem_seg_file_name": sem_label_file, + "segments_info": segments_info, + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] + assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] + assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"] + return ret + + +def register_ade20k_panoptic( + name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None, +): + """ + Register a "standard" version of ADE20k panoptic segmentation dataset named `name`. + The dictionaries in this registered dataset follows detectron2's standard format. + Hence it's called "standard". + Args: + name (str): the name that identifies a dataset, + e.g. "ade20k_panoptic_train" + metadata (dict): extra metadata associated with this dataset. + image_root (str): directory which contains all the images + panoptic_root (str): directory which contains panoptic annotation images in COCO format + panoptic_json (str): path to the json panoptic annotation file in COCO format + sem_seg_root (none): not used, to be consistent with + `register_coco_panoptic_separated`. + instances_json (str): path to the json instance annotation file + """ + panoptic_name = name + DatasetCatalog.register( + panoptic_name, + lambda: load_ade20k_panoptic_json( + panoptic_json, image_root, panoptic_root, semantic_root, metadata + ), + ) + MetadataCatalog.get(panoptic_name).set( + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + json_file=instances_json, + evaluator_type="ade20k_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **metadata, + ) + + +_PREDEFINED_SPLITS_ADE20K_PANOPTIC = { + "ade20k_panoptic_train": ( + "ADEChallengeData2016/images/training", + "ADEChallengeData2016/ade20k_panoptic_train", + "ADEChallengeData2016/ade20k_panoptic_train.json", + "ADEChallengeData2016/annotations_detectron2/training", + "ADEChallengeData2016/ade20k_instance_train.json", + ), + "ade20k_panoptic_val": ( + "ADEChallengeData2016/images/validation", + "ADEChallengeData2016/ade20k_panoptic_val", + "ADEChallengeData2016/ade20k_panoptic_val.json", + "ADEChallengeData2016/annotations_detectron2/validation", + "ADEChallengeData2016/ade20k_instance_val.json", + ), +} + + +def get_metadata(): + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1] + thing_colors = [k["color"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1] + stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES] + stuff_colors = [k["color"] for k in ADE20K_150_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # Convert category id for training: + # category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the linear + # softmax classifier. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for i, cat in enumerate(ADE20K_150_CATEGORIES): + if cat["isthing"]: + thing_dataset_id_to_contiguous_id[cat["id"]] = i + # else: + # stuff_dataset_id_to_contiguous_id[cat["id"]] = i + + # in order to use sem_seg evaluator + stuff_dataset_id_to_contiguous_id[cat["id"]] = i + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + return meta + + +def register_all_ade20k_panoptic(root): + metadata = get_metadata() + for ( + prefix, + (image_root, panoptic_root, panoptic_json, semantic_root, instance_json), + ) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items(): + # The "standard" version of COCO panoptic segmentation dataset, + # e.g. used by Panoptic-DeepLab + register_ade20k_panoptic( + prefix, + metadata, + os.path.join(root, image_root), + os.path.join(root, panoptic_root), + os.path.join(root, semantic_root), + os.path.join(root, panoptic_json), + os.path.join(root, instance_json), + ) + + +_root = os.getenv("DETECTRON2_DATASETS", "datasets") +register_all_ade20k_panoptic(_root) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_cityscapes_panoptic.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_cityscapes_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3341607a258baa57379120908b30104f205b80 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_cityscapes_panoptic.py @@ -0,0 +1,199 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/cityscapes_panoptic.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import json +import logging +import os + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES +from custom_detectron2.utils.file_io import PathManager + +""" +This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog. +""" + + +logger = logging.getLogger(__name__) + + +def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info): + files = [] + # scan through the directory + cities = PathManager.ls(image_dir) + logger.info(f"{len(cities)} cities found in '{image_dir}'.") + image_dict = {} + for city in cities: + city_img_dir = os.path.join(image_dir, city) + for basename in PathManager.ls(city_img_dir): + image_file = os.path.join(city_img_dir, basename) + + suffix = "_leftImg8bit.png" + assert basename.endswith(suffix), basename + basename = os.path.basename(basename)[: -len(suffix)] + + image_dict[basename] = image_file + + for ann in json_info["annotations"]: + image_file = image_dict.get(ann["image_id"], None) + assert image_file is not None, "No image {} found for annotation {}".format( + ann["image_id"], ann["file_name"] + ) + label_file = os.path.join(gt_dir, ann["file_name"]) + segments_info = ann["segments_info"] + files.append((image_file, label_file, segments_info)) + + assert len(files), "No images found in {}".format(image_dir) + assert PathManager.isfile(files[0][0]), files[0][0] + assert PathManager.isfile(files[0][1]), files[0][1] + return files + + +def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., + "~/cityscapes/gtFine/cityscapes_panoptic_train". + gt_json (str): path to the json file. e.g., + "~/cityscapes/gtFine/cityscapes_panoptic_train.json". + meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id" + and "stuff_dataset_id_to_contiguous_id" to map category ids to + contiguous ids for training. + + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + return segment_info + + assert os.path.exists( + gt_json + ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa + + + with open(gt_json) as f: + json_info = json.load(f) + + files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info) + ret = [] + for image_file, label_file, segments_info in files: + sem_label_file = ( + image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png" + ) + segments_info = [_convert_category_id(x, meta) for x in segments_info] + ret.append( + { + "file_name": image_file, + "image_id": "_".join( + os.path.splitext(os.path.basename(image_file))[0].split("_")[:3] + ), + "sem_seg_file_name": sem_label_file, + "pan_seg_file_name": label_file, + "segments_info": segments_info, + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile( + ret[0]["sem_seg_file_name"] + ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa + assert PathManager.isfile( + ret[0]["pan_seg_file_name"] + ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa + return ret + + +_RAW_CITYSCAPES_PANOPTIC_SPLITS = { + "cityscapes_fine_panoptic_train": ( + "cityscapes/leftImg8bit/train", + "cityscapes/gtFine/cityscapes_panoptic_train", + "cityscapes/gtFine/cityscapes_panoptic_train.json", + ), + "cityscapes_fine_panoptic_val": ( + "cityscapes/leftImg8bit/val", + "cityscapes/gtFine/cityscapes_panoptic_val", + "cityscapes/gtFine/cityscapes_panoptic_val.json", + ), + # "cityscapes_fine_panoptic_test": not supported yet +} + + +def register_all_cityscapes_panoptic(root): + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] + thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] + stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] + stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # There are three types of ids in cityscapes panoptic segmentation: + # (1) category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the classifier + # (2) instance id: this id is used to differentiate different instances from + # the same category. For "stuff" classes, the instance id is always 0; for + # "thing" classes, the instance id starts from 1 and 0 is reserved for + # ignored instances (e.g. crowd annotation). + # (3) panoptic id: this is the compact id that encode both category and + # instance id by: category_id * 1000 + instance_id. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for k in CITYSCAPES_CATEGORIES: + if k["isthing"] == 1: + thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] + else: + stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items(): + image_dir = os.path.join(root, image_dir) + gt_dir = os.path.join(root, gt_dir) + gt_json = os.path.join(root, gt_json) + + if key in DatasetCatalog.list(): + DatasetCatalog.remove(key) + + DatasetCatalog.register( + key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta) + ) + MetadataCatalog.get(key).set( + panoptic_root=gt_dir, + image_root=image_dir, + panoptic_json=gt_json, + gt_dir=gt_dir.replace("cityscapes_panoptic_", ""), + evaluator_type="cityscapes_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **meta, + ) + +_root = os.getenv("DETECTRON2_DATASETS", "datasets") +register_all_cityscapes_panoptic(_root) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_coco_panoptic2instance.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_coco_panoptic2instance.py new file mode 100644 index 0000000000000000000000000000000000000000..fb0d101135f703e4688acee3df85ea0d7ee12f69 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_coco_panoptic2instance.py @@ -0,0 +1,44 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/builtin.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + + +""" +This file registers pre-defined datasets at hard-coded paths, and their metadata. + +We hard-code metadata for common datasets. This will enable: +1. Consistency check when loading the datasets +2. Use models on these standard datasets directly and run demos, + without having to download the dataset annotations + +We hard-code some paths to the dataset that's assumed to +exist in "./datasets/". + +Users SHOULD NOT use this file to create new dataset / metadata for new dataset. +To add new dataset, refer to the tutorial "docs/DATASETS.md". +""" + +import os +from custom_detectron2.data.datasets.builtin_meta import _get_builtin_metadata +from custom_detectron2.data.datasets.coco import register_coco_instances + + +_PREDEFINED_SPLITS_COCO = { + "coco_2017_val_panoptic2instance": ("coco/val2017", "coco/annotations/panoptic2instances_val2017.json"), +} + + +def register_panoptic2instances_coco(root): + for key, (image_root, json_file) in _PREDEFINED_SPLITS_COCO.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances( + key, + _get_builtin_metadata("coco"), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + +_root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets")) +register_panoptic2instances_coco(_root) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_coco_panoptic_annos_semseg.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_coco_panoptic_annos_semseg.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d60d75a11aa8faee4ae8961cfcc24bdacbdac5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/datasets/register_coco_panoptic_annos_semseg.py @@ -0,0 +1,367 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/datasets/register_coco_panoptic_annos_semseg.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import json +import os + +from custom_detectron2.data import DatasetCatalog, MetadataCatalog +from custom_detectron2.data.datasets import load_sem_seg +from custom_detectron2.data.datasets.builtin_meta import COCO_CATEGORIES +from custom_detectron2.utils.file_io import PathManager +import contextlib +import logging +import io +from fvcore.common.timer import Timer +import custom_pycocotools.mask as mask_util +from custom_detectron2.structures import BoxMode + + +logger = logging.getLogger(__name__) + + +_PREDEFINED_SPLITS_COCO_PANOPTIC = { + "coco_2017_train_panoptic": ( + # This is the original panoptic annotation directory + "coco/panoptic_train2017", + "coco/annotations/panoptic_train2017.json", + # This directory contains semantic annotations that are + # converted from panoptic annotations. + # It is used by PanopticFPN. + # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py + # to create these directories. + "coco/panoptic_semseg_train2017", + ), + "coco_2017_val_panoptic": ( + "coco/panoptic_val2017", + "coco/annotations/panoptic_val2017.json", + "coco/panoptic_semseg_val2017", + ), +} + +def load_coco_instance_json(json_file, image_root, dataset_name=None): + from custom_pycocotools.coco import COCO + + timer = Timer() + json_file = PathManager.get_local_path(json_file) + with contextlib.redirect_stdout(io.StringIO()): + coco_api = COCO(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + id_map = None + if dataset_name is not None: + meta = MetadataCatalog.get(dataset_name) + cat_ids = sorted(coco_api.getCatIds()) + cats = coco_api.loadCats(cat_ids) + # The categories in a custom json file may not be sorted. + thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] + meta.thing_classes = thing_classes + + # In COCO, certain category ids are artificially removed, + # and by convention they are always ignored. + # We deal with COCO's id issue and translate + # the category ids to contiguous ids in [0, 80). + + # It works by looking at the "categories" field in the json, therefore + # if users' own json also have incontiguous ids, we'll + # apply this mapping as well but print a warning. + if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): + if "coco" not in dataset_name: + logger.warning( + """ +Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. +""" + ) + id_map = {v: i for i, v in enumerate(cat_ids)} + meta.thing_dataset_id_to_contiguous_id = id_map + + # sort indices for reproducible results + img_ids = sorted(coco_api.imgs.keys()) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = coco_api.loadImgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'iscrowd': 0, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] + total_num_valid_anns = sum([len(x) for x in anns]) + total_num_anns = len(coco_api.anns) + if total_num_valid_anns < total_num_anns: + logger.warning( + f"{json_file} contains {total_num_anns} annotations, but only " + f"{total_num_valid_anns} of them match to images in the file." + ) + + if "minival" not in json_file: + # The popular valminusminival & minival annotations for COCO2014 contain this bug. + # However the ratio of buggy annotations there is tiny and does not affect accuracy. + # Therefore we explicitly white-list them. + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) + + dataset_dicts = {} + + ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + + num_instances_without_valid_segmentation = 0 + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + record["file_name"] = os.path.join(image_root, img_dict["file_name"]) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + + # The original COCO valminusminival2014 & minival2014 annotation files + # actually contains bugs that, together with certain ways of using COCO API, + # can trigger this assertion. + assert anno["image_id"] == image_id + + assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' + + obj = {key: anno[key] for key in ann_keys if key in anno} + if "bbox" in obj and len(obj["bbox"]) == 0: + raise ValueError( + f"One annotation of image {image_id} contains empty 'bbox' value! " + "This json does not have valid COCO format." + ) + + segm = anno.get("segmentation", None) + if segm: # either list[list[float]] or dict(RLE) + if isinstance(segm, dict): + if isinstance(segm["counts"], list): + # convert to compressed RLE + segm = mask_util.frPyObjects(segm, *segm["size"]) + else: + # filter out invalid polygons (< 3 points) + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + if len(segm) == 0: + num_instances_without_valid_segmentation += 1 + continue # ignore this instance + obj["segmentation"] = segm + + keypts = anno.get("keypoints", None) + if keypts: # list[int] + for idx, v in enumerate(keypts): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # Therefore we assume the coordinates are "pixel indices" and + # add 0.5 to convert to floating point coordinates. + keypts[idx] = v + 0.5 + obj["keypoints"] = keypts + + obj["bbox_mode"] = BoxMode.XYWH_ABS + if id_map: + annotation_category_id = obj["category_id"] + try: + obj["category_id"] = id_map[annotation_category_id] + except KeyError as e: + raise KeyError( + f"Encountered category_id={annotation_category_id} " + "but this id does not exist in 'categories' of the json file." + ) from e + objs.append(obj) + record["annotations"] = objs + dataset_dicts[image_id] = record + + if num_instances_without_valid_segmentation > 0: + logger.warning( + "Filtered out {} instances without valid segmentation. ".format( + num_instances_without_valid_segmentation + ) + + "There might be issues in your dataset generation process. Please " + "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" + ) + return dataset_dicts + +def get_metadata(): + meta = {} + # The following metadata maps contiguous id from [0, #thing categories + + # #stuff categories) to their names and colors. We have to replica of the + # same name and color under "thing_*" and "stuff_*" because the current + # visualization function in D2 handles thing and class classes differently + # due to some heuristic used in Panoptic FPN. We keep the same naming to + # enable reusing existing visualization functions. + thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1] + thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1] + stuff_classes = [k["name"] for k in COCO_CATEGORIES] + stuff_colors = [k["color"] for k in COCO_CATEGORIES] + + meta["thing_classes"] = thing_classes + meta["thing_colors"] = thing_colors + meta["stuff_classes"] = stuff_classes + meta["stuff_colors"] = stuff_colors + + # Convert category id for training: + # category id: like semantic segmentation, it is the class id for each + # pixel. Since there are some classes not used in evaluation, the category + # id is not always contiguous and thus we have two set of category ids: + # - original category id: category id in the original dataset, mainly + # used for evaluation. + # - contiguous category id: [0, #classes), in order to train the linear + # softmax classifier. + thing_dataset_id_to_contiguous_id = {} + stuff_dataset_id_to_contiguous_id = {} + + for i, cat in enumerate(COCO_CATEGORIES): + if cat["isthing"]: + thing_dataset_id_to_contiguous_id[cat["id"]] = i + # else: + # stuff_dataset_id_to_contiguous_id[cat["id"]] = i + + # in order to use sem_seg evaluator + stuff_dataset_id_to_contiguous_id[cat["id"]] = i + + meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id + meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id + + return meta + + +def load_coco_panoptic_json(json_file, instances_json, instances_name, image_dir, gt_dir, semseg_dir, meta): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/coco/train2017". + gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017". + json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json". + Returns: + list[dict]: a list of dicts in Detectron2 standard format. (See + `Using Custom Datasets `_ ) + """ + + def _convert_category_id(segment_info, meta): + if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: + segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = True + else: + segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ + segment_info["category_id"] + ] + segment_info["isthing"] = False + return segment_info + + with PathManager.open(json_file) as f: + json_info = json.load(f) + + instance_data_dicts = load_coco_instance_json(instances_json, image_dir.replace("panoptic_", ""), instances_name) + + ret = [] + for ann in json_info["annotations"]: + image_id = int(ann["image_id"]) + # TODO: currently we assume image and label has the same filename but + # different extension, and images have extension ".jpg" for COCO. Need + # to make image extension a user-provided argument if we extend this + # function to support other COCO-like datasets. + image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg") + label_file = os.path.join(gt_dir, ann["file_name"]) + sem_label_file = os.path.join(semseg_dir, ann["file_name"]) + segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]] + ret.append( + { + "file_name": image_file, + "image_id": image_id, + "pan_seg_file_name": label_file, + "sem_seg_file_name": sem_label_file, + "segments_info": segments_info, + "annotations": instance_data_dicts[image_id]["annotations"], + } + ) + assert len(ret), f"No images found in {image_dir}!" + assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"] + assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"] + assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"] + return ret + + +def register_coco_panoptic_annos_sem_seg( + name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json, instances_name, +): + panoptic_name = name + delattr(MetadataCatalog.get(panoptic_name), "thing_classes") + delattr(MetadataCatalog.get(panoptic_name), "thing_colors") + MetadataCatalog.get(panoptic_name).set( + thing_classes=metadata["thing_classes"], + thing_colors=metadata["thing_colors"], + # thing_dataset_id_to_contiguous_id=metadata["thing_dataset_id_to_contiguous_id"], + ) + + # the name is "coco_2017_train_panoptic_with_sem_seg" and "coco_2017_val_panoptic_with_sem_seg" + semantic_name = name + "_with_sem_seg" + DatasetCatalog.register( + semantic_name, + lambda: load_coco_panoptic_json(panoptic_json, instances_json, instances_name, image_root, panoptic_root, sem_seg_root, metadata), + ) + MetadataCatalog.get(semantic_name).set( + sem_seg_root=sem_seg_root, + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + json_file=instances_json, + evaluator_type="coco_panoptic_seg", + ignore_label=255, + label_divisor=1000, + **metadata, + ) + + +def register_all_coco_panoptic_annos_sem_seg(root): + for ( + prefix, + (panoptic_root, panoptic_json, semantic_root), + ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): + + prefix_instances = prefix[: -len("_panoptic")] + instances_meta = MetadataCatalog.get(prefix_instances) + image_root, instances_json = instances_meta.image_root, instances_meta.json_file + + if 'val' in instances_json: + instances_json = instances_json.replace('instances_', 'panoptic2instances_') + + register_coco_panoptic_annos_sem_seg( + prefix, + get_metadata(), + image_root, + os.path.join(root, panoptic_root), + os.path.join(root, panoptic_json), + os.path.join(root, semantic_root), + instances_json, + prefix_instances, + ) + + +_root = os.getenv("DETECTRON2_DATASETS", "datasets") +register_all_coco_panoptic_annos_sem_seg(_root) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/tokenizer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..05d4c29c2d1ed03e5748e7346eeea494a2cd9144 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/data/tokenizer.py @@ -0,0 +1,192 @@ +# ------------------------------------------------------------------------- +# MIT License +# +# Copyright (c) 2021 OpenAI +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# Modified by Jiarui Xu +# ------------------------------------------------------------------------- + +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re +import torch + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz') + +@lru_cache() +def bytes_to_unicode(): + """Returns list of utf-8 byte and a corresponding list of unicode strings. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent + coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables + between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + +class Tokenize: + + def __init__(self, tokenizer, max_seq_len=77, truncate=True): + self.tokenizer = tokenizer + self.max_seq_len = max_seq_len + self.truncate = truncate + + def __call__(self, texts): + expanded_dim = False + if isinstance(texts, str): + texts = [texts] + expanded_dim = True + + sot_token = self.tokenizer.encoder['<|startoftext|>'] + eot_token = self.tokenizer.encoder['<|endoftext|>'] + all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), self.max_seq_len, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > self.max_seq_len: + if self.truncate: + tokens = tokens[:self.max_seq_len] + tokens[-1] = eot_token + else: + raise RuntimeError(f'Input {texts[i]} is too long for context length {self.max_seq_len}') + result[i, :len(tokens)] = torch.tensor(tokens) + + if expanded_dim: + return result[0] + + return result + + +class SimpleTokenizer(object): + + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') + merges = merges[1:49152 - 256 - 2 + 1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + '' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + '', ) + pairs = get_pairs(word) + + if not pairs: + return token + '' + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: # noqa: E722 + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('', ' ') + return text \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/colormap.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/colormap.py new file mode 100644 index 0000000000000000000000000000000000000000..3eff9a46d37a1926c48ef0ad6e3308128438140f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/colormap.py @@ -0,0 +1,170 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +""" +An awesome colormap for really neat visualizations. +Copied from Detectron, and removed gray colors. +""" + +import numpy as np +import random +random.seed(0) + +__all__ = ["colormap", "random_color", "random_colors"] + +# fmt: off +# RGB: +# _COLORS = np.array( +# [ +# 0.000, 0.447, 0.741, +# 0.850, 0.325, 0.098, +# 0.929, 0.694, 0.125, +# 0.494, 0.184, 0.556, +# 0.466, 0.674, 0.188, +# 0.301, 0.745, 0.933, +# 0.635, 0.078, 0.184, +# 0.300, 0.300, 0.300, +# 0.600, 0.600, 0.600, +# 1.000, 0.000, 0.000, +# 1.000, 0.500, 0.000, +# 0.749, 0.749, 0.000, +# 0.000, 1.000, 0.000, +# 0.000, 0.000, 1.000, +# 0.667, 0.000, 1.000, +# 0.333, 0.333, 0.000, +# 0.333, 0.667, 0.000, +# 0.333, 1.000, 0.000, +# 0.667, 0.333, 0.000, +# 0.667, 0.667, 0.000, +# 0.667, 1.000, 0.000, +# 1.000, 0.333, 0.000, +# 1.000, 0.667, 0.000, +# 1.000, 1.000, 0.000, +# 0.000, 0.333, 0.500, +# 0.000, 0.667, 0.500, +# 0.000, 1.000, 0.500, +# 0.333, 0.000, 0.500, +# 0.333, 0.333, 0.500, +# 0.333, 0.667, 0.500, +# 0.333, 1.000, 0.500, +# 0.667, 0.000, 0.500, +# 0.667, 0.333, 0.500, +# 0.667, 0.667, 0.500, +# 0.667, 1.000, 0.500, +# 1.000, 0.000, 0.500, +# 1.000, 0.333, 0.500, +# 1.000, 0.667, 0.500, +# 1.000, 1.000, 0.500, +# 0.000, 0.333, 1.000, +# 0.000, 0.667, 1.000, +# 0.000, 1.000, 1.000, +# 0.333, 0.000, 1.000, +# 0.333, 0.333, 1.000, +# 0.333, 0.667, 1.000, +# 0.333, 1.000, 1.000, +# 0.667, 0.000, 1.000, +# 0.667, 0.333, 1.000, +# 0.667, 0.667, 1.000, +# 0.667, 1.000, 1.000, +# 1.000, 0.000, 1.000, +# 1.000, 0.333, 1.000, +# 1.000, 0.667, 1.000, +# 0.333, 0.000, 0.000, +# 0.500, 0.000, 0.000, +# 0.667, 0.000, 0.000, +# 0.833, 0.000, 0.000, +# 1.000, 0.000, 0.000, +# 0.000, 0.167, 0.000, +# 0.000, 0.333, 0.000, +# 0.000, 0.500, 0.000, +# 0.000, 0.667, 0.000, +# 0.000, 0.833, 0.000, +# 0.000, 1.000, 0.000, +# 0.000, 0.000, 0.167, +# 0.000, 0.000, 0.333, +# 0.000, 0.000, 0.500, +# 0.000, 0.000, 0.667, +# 0.000, 0.000, 0.833, +# 0.000, 0.000, 1.000, +# 0.000, 0.000, 0.000, +# 0.143, 0.143, 0.143, +# 0.857, 0.857, 0.857, +# 1.000, 1.000, 1.000 +# ] +# ).astype(np.float32).reshape(-1, 3) +# fmt: on + +_COLORS = [] + + +def gen_color(): + color = tuple(np.round(np.random.choice(range(256), size=3)/255, 3)) + if color not in _COLORS and np.mean(color) != 0.0: + _COLORS.append(color) + else: + gen_color() + + +for _ in range(300): + gen_color() + + +def colormap(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] + """ + assert maximum in [255, 1], maximum + c = _COLORS * maximum + if not rgb: + c = c[:, ::-1] + return c + + +def random_color(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +def random_colors(N, rgb=False, maximum=255): + """ + Args: + N (int): number of unique colors needed + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a list of random_color + """ + indices = random.sample(range(len(_COLORS)), N) + ret = [_COLORS[i] * maximum for i in indices] + if not rgb: + ret = [x[::-1] for x in ret] + return ret + + +if __name__ == "__main__": + import cv2 + + size = 100 + H, W = 10, 10 + canvas = np.random.rand(H * size, W * size, 3).astype("float32") + for h in range(H): + for w in range(W): + idx = h * W + w + if idx >= len(_COLORS): + break + canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] + cv2.imshow("a", canvas) + cv2.waitKey(0) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/defaults.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..9895d61a32fd02857f7313e5e739ce6918604463 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/defaults.py @@ -0,0 +1,77 @@ +import torch +import custom_detectron2.data.transforms as T +from custom_detectron2.checkpoint import DetectionCheckpointer +from custom_detectron2.data import ( + MetadataCatalog, +) +from custom_detectron2.modeling import build_model + + +__all__ = [ + "DefaultPredictor", +] + + +class DefaultPredictor: + """ + Create a simple end-to-end predictor with the given config that runs on + single device for a single input image. + Compared to using the model directly, this class does the following additions: + 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. + 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. + 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. + 4. Take one input image and produce a single output, instead of a batch. + This is meant for simple demo purposes, so it does the above steps automatically. + This is not meant for benchmarks or running complicated inference logic. + If you'd like to do anything more complicated, please refer to its source code as + examples to build and use the model manually. + Attributes: + metadata (Metadata): the metadata of the underlying dataset, obtained from + cfg.DATASETS.TEST. + Examples: + :: + pred = DefaultPredictor(cfg) + inputs = cv2.imread("input.jpg") + outputs = pred(inputs) + """ + + def __init__(self, cfg): + self.cfg = cfg.clone() # cfg can be modified by model + self.model = build_model(self.cfg) + self.model.eval() + if len(cfg.DATASETS.TEST): + self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) + + checkpointer = DetectionCheckpointer(self.model) + checkpointer.load(cfg.MODEL.WEIGHTS) + + self.aug = T.ResizeShortestEdge( + [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST + ) + + self.input_format = cfg.INPUT.FORMAT + assert self.input_format in ["RGB", "BGR"], self.input_format + + def __call__(self, original_image, task): + """ + Args: + original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). + Returns: + predictions (dict): + the output of the model for one image only. + See :doc:`/tutorials/models` for details about the format. + """ + with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 + # Apply pre-processing to image. + if self.input_format == "RGB": + # whether the model expects BGR inputs or RGB + original_image = original_image[:, :, ::-1] + height, width = original_image.shape[:2] + image = self.aug.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) + + task = f"The task is {task}" + + inputs = {"image": image, "height": height, "width": width, "task": task} + predictions = self.model([inputs])[0] + return predictions \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/predictor.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..f6805a3aa4cc9f5c356b8155fda297feed463d33 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/predictor.py @@ -0,0 +1,190 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Copied from: https://github.com/facebookresearch/detectron2/blob/master/demo/predictor.py +import atexit +import bisect +import multiprocessing as mp +from collections import deque + +import cv2 +import torch + +from custom_detectron2.data import MetadataCatalog +from defaults import DefaultPredictor +from custom_detectron2.utils.video_visualizer import VideoVisualizer +from visualizer import ColorMode, Visualizer + + +class VisualizationDemo(object): + def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): + """ + Args: + cfg (CfgNode): + instance_mode (ColorMode): + parallel (bool): whether to run the model in different processes from visualization. + Useful since the visualization logic can be slow. + """ + self.metadata = MetadataCatalog.get( + cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused" + ) + if 'cityscapes_fine_sem_seg_val' in cfg.DATASETS.TEST[0]: + from cityscapesscripts.helpers.labels import labels + stuff_colors = [k.color for k in labels if k.trainId != 255] + self.metadata = self.metadata.set(stuff_colors=stuff_colors) + self.cpu_device = torch.device("cpu") + self.instance_mode = instance_mode + + self.parallel = parallel + if parallel: + num_gpu = torch.cuda.device_count() + self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) + else: + self.predictor = DefaultPredictor(cfg) + + def run_on_image(self, image, task, sem_gt, pan_gt, ins_gt, box_gt): + """ + Args: + image (np.ndarray): an image of shape (H, W, C) (in BGR order). + This is the format used by OpenCV. + Returns: + predictions (dict): the output of the model. + vis_output (VisImage): the visualized image output. + """ + vis_output = None + # Convert image from OpenCV BGR format to Matplotlib RGB format. + image = image[:, :, ::-1] + vis_output = {} + + if task == 'panoptic': + visualizer = Visualizer(image, metadata=self.metadata, instance_mode=0) + predictions = self.predictor(image, "panoptic") + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_output['panoptic'] = visualizer.draw_panoptic_seg_predictions( + panoptic_seg.to(self.cpu_device), segments_info, alpha=1 + ) + + # visualizer = Visualizer(image, metadata=self.metadata, instance_mode=0) + # vis_output['pan_gt'] = visualizer.draw_panoptic_seg( + # pan_gt[0].to(self.cpu_device), pan_gt[1], alpha=1 + # ) + + if task == 'panoptic' or task == 'semantic': + visualizer = Visualizer(image, metadata=self.metadata, instance_mode=1) + predictions = self.predictor(image, "semantic") + vis_output['semantic'] = visualizer.draw_sem_seg( + predictions["sem_seg"].argmax(dim=0).to(self.cpu_device), alpha=1 + ) + + # visualizer = Visualizer(image, metadata=self.metadata, instance_mode=1) + # vis_output['gt_sem'] = visualizer.draw_sem_seg( + # sem_gt.to(self.cpu_device), alpha=1 + # ) + + if task == 'panoptic' or task == 'instance': + visualizer = Visualizer(image, metadata=self.metadata, instance_mode=2) + predictions = self.predictor(image, "instance") + instances = predictions["instances"].to(self.cpu_device) + vis_output['instance'] = visualizer.draw_instance_predictions(predictions=instances, alpha=1) + + if 'boxes' in predictions: + boxes, labels, scores = predictions["boxes"] + visualizer = Visualizer(image, False, metadata=self.metadata, instance_mode=0) + vis_output['boxes'] = visualizer.draw_box_predictions( + boxes.to(self.cpu_device), labels.to(self.cpu_device), scores.to(self.cpu_device)) + + + # visualizer = Visualizer(image, metadata=self.metadata, instance_mode=2) + # vis_output['ins_gt'] = visualizer.draw_instance_predictions(predictions=ins_gt.to(self.cpu_device), alpha=1) + # vis_output['input'] = visualizer.get_image(image) + + return predictions, vis_output + + +class AsyncPredictor: + """ + A predictor that runs the model asynchronously, possibly on >1 GPUs. + Because rendering the visualization takes considerably amount of time, + this helps improve throughput a little bit when rendering videos. + """ + + class _StopToken: + pass + + class _PredictWorker(mp.Process): + def __init__(self, cfg, task_queue, result_queue): + self.cfg = cfg + self.task_queue = task_queue + self.result_queue = result_queue + super().__init__() + + def run(self): + predictor = DefaultPredictor(self.cfg) + + while True: + task = self.task_queue.get() + if isinstance(task, AsyncPredictor._StopToken): + break + idx, data = task + result = predictor(data) + self.result_queue.put((idx, result)) + + def __init__(self, cfg, num_gpus: int = 1): + """ + Args: + cfg (CfgNode): + num_gpus (int): if 0, will run on CPU + """ + num_workers = max(num_gpus, 1) + self.task_queue = mp.Queue(maxsize=num_workers * 3) + self.result_queue = mp.Queue(maxsize=num_workers * 3) + self.procs = [] + for gpuid in range(max(num_gpus, 1)): + cfg = cfg.clone() + cfg.defrost() + cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" + self.procs.append( + AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) + ) + + self.put_idx = 0 + self.get_idx = 0 + self.result_rank = [] + self.result_data = [] + + for p in self.procs: + p.start() + atexit.register(self.shutdown) + + def put(self, image): + self.put_idx += 1 + self.task_queue.put((self.put_idx, image)) + + def get(self): + self.get_idx += 1 # the index needed for this request + if len(self.result_rank) and self.result_rank[0] == self.get_idx: + res = self.result_data[0] + del self.result_data[0], self.result_rank[0] + return res + + while True: + # make sure the results are returned in the correct order + idx, res = self.result_queue.get() + if idx == self.get_idx: + return res + insert = bisect.bisect(self.result_rank, idx) + self.result_rank.insert(insert, idx) + self.result_data.insert(insert, res) + + def __len__(self): + return self.put_idx - self.get_idx + + def __call__(self, image): + self.put(image) + return self.get() + + def shutdown(self): + for _ in self.procs: + self.task_queue.put(AsyncPredictor._StopToken()) + + @property + def default_buffer_size(self): + return len(self.procs) * 5 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/visualizer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..4f03034510b5b98141dbe9522a08313d5413e555 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/demo/visualizer.py @@ -0,0 +1,1350 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import colorsys +import logging +import math +import numpy as np +from enum import Enum, unique +import cv2 +import matplotlib as mpl +import matplotlib.colors as mplc +import matplotlib.figure as mplfigure +import custom_pycocotools.mask as mask_util +import torch +from matplotlib.backends.backend_agg import FigureCanvasAgg +from PIL import Image + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes +from custom_detectron2.utils.file_io import PathManager +import random +random.seed(0) +from .colormap import random_color, _COLORS +logger = logging.getLogger(__name__) + +__all__ = ["ColorMode", "VisImage", "Visualizer"] + + +_SMALL_OBJECT_AREA_THRESH = 1000 +_LARGE_MASK_AREA_THRESH = 120000 +_OFF_WHITE = (1.0, 1.0, 1.0) +_BLACK = (0, 0, 0) +_RED = (1.0, 0, 0) + +_KEYPOINT_THRESHOLD = 0.05 + + +def instance_color(rgb=False, idx=1, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a vector of 3 numbers + """ + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + +@unique +class ColorMode(Enum): + """ + Enum of different color modes to use for instance visualizations. + """ + + IMAGE = 0 + """ + Picks a random color for every instance and overlay segmentations with low opacity. + """ + SEGMENTATION = 1 + """ + Let instances of the same category have similar colors + (from metadata.thing_colors), and overlay them with + high opacity. This provides more attention on the quality of segmentation. + """ + IMAGE_BW = 2 + """ + Same as IMAGE, but convert all areas without masks to gray-scale. + Only available for drawing per-instance mask predictions. + """ + + +class GenericMask: + """ + Attribute: + polygons (list[ndarray]): list[ndarray]: polygons for this mask. + Each ndarray has format [x, y, x, y, ...] + mask (ndarray): a binary mask + """ + + def __init__(self, mask_or_polygons, height, width): + self._mask = self._polygons = self._has_holes = None + self.height = height + self.width = width + + m = mask_or_polygons + if isinstance(m, dict): + # RLEs + assert "counts" in m and "size" in m + if isinstance(m["counts"], list): # uncompressed RLEs + h, w = m["size"] + assert h == height and w == width + m = mask_util.frPyObjects(m, h, w) + self._mask = mask_util.decode(m)[:, :] + return + + if isinstance(m, list): # list[ndarray] + self._polygons = [np.asarray(x).reshape(-1) for x in m] + return + + if isinstance(m, np.ndarray): # assumed to be a binary mask + assert m.shape[1] != 2, m.shape + assert m.shape == ( + height, + width, + ), f"mask shape: {m.shape}, target dims: {height}, {width}" + self._mask = m.astype("uint8") + return + + raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) + + @property + def mask(self): + if self._mask is None: + self._mask = self.polygons_to_mask(self._polygons) + return self._mask + + @property + def polygons(self): + if self._polygons is None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + return self._polygons + + @property + def has_holes(self): + if self._has_holes is None: + if self._mask is not None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + else: + self._has_holes = False # if original format is polygon, does not have holes + return self._has_holes + + def mask_to_polygons(self, mask): + # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level + # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. + # Internal contours (holes) are placed in hierarchy-2. + # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. + mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr + res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + hierarchy = res[-1] + if hierarchy is None: # empty mask + return [], False + has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 + res = res[-2] + res = [x.flatten() for x in res] + # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. + # We add 0.5 to turn them into real-value coordinate space. A better solution + # would be to first +0.5 and then dilate the returned polygon by 0.5. + res = [x + 0.5 for x in res if len(x) >= 6] + return res, has_holes + + def polygons_to_mask(self, polygons): + rle = mask_util.frPyObjects(polygons, self.height, self.width) + rle = mask_util.merge(rle) + return mask_util.decode(rle)[:, :] + + def area(self): + return self.mask.sum() + + def bbox(self): + p = mask_util.frPyObjects(self.polygons, self.height, self.width) + p = mask_util.merge(p) + bbox = mask_util.toBbox(p) + bbox[2] += bbox[0] + bbox[3] += bbox[1] + return bbox + + +class _PanopticPrediction: + """ + Unify different panoptic annotation/prediction formats + """ + + def __init__(self, panoptic_seg, segments_info, metadata=None): + if segments_info is None: + assert metadata is not None + # If "segments_info" is None, we assume "panoptic_img" is a + # H*W int32 image storing the panoptic_id in the format of + # category_id * label_divisor + instance_id. We reserve -1 for + # VOID label. + label_divisor = metadata.label_divisor + segments_info = [] + for panoptic_label in np.unique(panoptic_seg.numpy()): + if panoptic_label == -1: + # VOID region. + continue + pred_class = panoptic_label // label_divisor + isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() + segments_info.append( + { + "id": int(panoptic_label), + "category_id": int(pred_class), + "isthing": bool(isthing), + } + ) + del metadata + + self._seg = panoptic_seg + + self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info + segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) + areas = areas.numpy() + sorted_idxs = np.argsort(-areas) + self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] + self._seg_ids = self._seg_ids.tolist() + for sid, area in zip(self._seg_ids, self._seg_areas): + if sid in self._sinfo: + self._sinfo[sid]["area"] = float(area) + + def non_empty_mask(self): + """ + Returns: + (H, W) array, a mask for all pixels that have a prediction + """ + empty_ids = [] + for id in self._seg_ids: + if id not in self._sinfo: + empty_ids.append(id) + if len(empty_ids) == 0: + return np.zeros(self._seg.shape, dtype=np.uint8) + assert ( + len(empty_ids) == 1 + ), ">1 ids corresponds to no labels. This is currently not supported" + return (self._seg != empty_ids[0]).numpy().astype(np.bool) + + def semantic_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or sinfo["isthing"]: + # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. + continue + yield (self._seg == sid).numpy().astype(np.bool), sinfo + + def instance_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or not sinfo["isthing"]: + continue + mask = (self._seg == sid).numpy().astype(np.bool) + if mask.sum() > 0: + yield mask, sinfo + + +def _create_text_labels(classes, scores, class_names, is_crowd=None): + """ + Args: + classes (list[int] or None): + scores (list[float] or None): + class_names (list[str] or None): + is_crowd (list[bool] or None): + Returns: + list[str] or None + """ + labels = None + if classes is not None: + if class_names is not None and len(class_names) > 0: + labels = [class_names[i] for i in classes] + else: + labels = [str(i) for i in classes] + if scores is not None: + if labels is None: + labels = ["{:.0f}%".format(s * 100) for s in scores] + else: + labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] + if labels is not None and is_crowd is not None: + labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] + return labels + + +class VisImage: + def __init__(self, img, scale=1.0): + """ + Args: + img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. + scale (float): scale the input image + """ + self.img = img + self.scale = scale + self.width, self.height = img.shape[1], img.shape[0] + self._setup_figure(img) + + def _setup_figure(self, img): + """ + Args: + Same as in :meth:`__init__()`. + Returns: + fig (matplotlib.pyplot.figure): top level container for all the image plot elements. + ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. + """ + fig = mplfigure.Figure(frameon=False) + self.dpi = fig.get_dpi() + # add a small 1e-2 to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches( + (self.width * self.scale + 1e-2) / self.dpi, + (self.height * self.scale + 1e-2) / self.dpi, + ) + self.canvas = FigureCanvasAgg(fig) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) + ax.axis("off") + self.fig = fig + self.ax = ax + self.reset_image(img) + + def reset_image(self, img): + """ + Args: + img: same as in __init__ + """ + img = img.astype("uint8") + self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") + + def save(self, filepath): + """ + Args: + filepath (str): a string that contains the absolute path, including the file name, where + the visualized image will be saved. + """ + self.fig.savefig(filepath) + + def get_image(self): + """ + Returns: + ndarray: + the visualized image of shape (H, W, 3) (RGB) in uint8 type. + The shape is scaled w.r.t the input image using the given `scale` argument. + """ + canvas = self.canvas + s, (width, height) = canvas.print_to_buffer() + # buf = io.BytesIO() # works for cairo backend + # canvas.print_rgba(buf) + # width, height = self.width, self.height + # s = buf.getvalue() + + buffer = np.frombuffer(s, dtype="uint8") + + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + return rgb.astype("uint8") + + +class Visualizer: + """ + Visualizer that draws data about detection/segmentation on images. + It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` + that draw primitive objects to images, as well as high-level wrappers like + `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` + that draw composite data in some pre-defined style. + Note that the exact visualization style for the high-level wrappers are subject to change. + Style such as color, opacity, label contents, visibility of labels, or even the visibility + of objects themselves (e.g. when the object is too small) may change according + to different heuristics, as long as the results still look visually reasonable. + To obtain a consistent style, you can implement custom drawing functions with the + abovementioned primitive methods instead. If you need more customized visualization + styles, you can process the data yourself following their format documented in + tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not + intend to satisfy everyone's preference on drawing styles. + This visualizer focuses on high rendering quality rather than performance. It is not + designed to be used for real-time applications. + """ + + # TODO implement a fast, rasterized version using OpenCV + + def __init__(self, img_rgb, is_img=True, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): + """ + Args: + img_rgb: a numpy array of shape (H, W, C), where H and W correspond to + the height and width of the image respectively. C is the number of + color channels. The image is required to be in RGB format since that + is a requirement of the Matplotlib library. The image is also expected + to be in the range [0, 255]. + metadata (Metadata): dataset metadata (e.g. class names and colors) + instance_mode (ColorMode): defines one of the pre-defined style for drawing + instances on an image. + """ + if is_img: + self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) + else: + self.img = np.zeros_like(img_rgb).clip(0, 255).astype(np.uint8) + 255 + if metadata is None: + metadata = MetadataCatalog.get("__nonexist__") + self.metadata = metadata + self.output = VisImage(self.img, scale=scale) + self.cpu_device = torch.device("cpu") + + # too small texts are useless, therefore clamp to 9 + self._default_font_size = max( + np.sqrt(self.output.height * self.output.width) // 90, 10 // scale + ) + self._instance_mode = instance_mode + self.keypoint_threshold = _KEYPOINT_THRESHOLD + + def get_image(self, img): + img = np.asarray(img).clip(0, 255).astype(np.uint8) + return VisImage(img, scale=1.0) + + def draw_box_predictions( + self, + boxes=None, + labels=None, + scores=None, + assigned_colors=None + ): + """ + Args: + boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, + or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, + or a :class:`RotatedBoxes`, + or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image, + labels (list[str]): the text to be displayed for each instance. + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = 0 + boxes = self._convert_boxes(boxes) + classes = labels.tolist() + scores = scores.tolist() + labels = _create_text_labels(classes, scores, self.metadata.get("stuff_classes", None)) + num_instances = len(boxes) + assert len(labels) == num_instances + if assigned_colors is None: + # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + areas = None + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occlusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + return self.output + + + def draw_instance_predictions(self, predictions, alpha=0.8, is_text=True): + """ + Draw instance-level prediction results on an image. + Args: + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + Returns: + output (VisImage): image object with visualizations. + """ + boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None + labels = _create_text_labels(classes, scores, self.metadata.get("stuff_classes", None)) + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = np.asarray(predictions.pred_masks) + masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] + else: + masks = None + + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("stuff_colors"): + # colors = [ + # self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes + # ] + colors = [ + instance_color(rgb=True, idx=c, maximum=1) for c in classes + ] + else: + colors = None + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image( + self._create_grayscale_image( + (predictions.pred_masks.any(dim=0) > 0).numpy() + if predictions.has("pred_masks") + else None + ) + ) + + self.overlay_instances( + masks=masks, + boxes=boxes, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + is_text=is_text, + ) + return self.output + + def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8, is_text=True, edge_color=_OFF_WHITE): + """ + Draw semantic segmentation predictions/labels. + Args: + sem_seg (Tensor or ndarray): the segmentation of shape (H, W). + Each value is the integer label of the pixel. + area_threshold (int): segments with less than `area_threshold` are not drawn. + alpha (float): the larger it is, the more opaque the segmentations are. + Returns: + output (VisImage): image object with visualizations. + """ + if isinstance(sem_seg, torch.Tensor): + sem_seg = sem_seg.numpy() + labels, areas = np.unique(sem_seg, return_counts=True) + sorted_idxs = np.argsort(-areas).tolist() + labels = labels[sorted_idxs] + for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] + except (AttributeError, IndexError): + mask_color = None + + binary_mask = (sem_seg == label).astype(np.uint8) + text = self.metadata.stuff_classes[label] + self.draw_binary_mask( + binary_mask, + color=mask_color, + edge_color=edge_color, + text=text, + alpha=alpha, + area_threshold=area_threshold, + is_text=is_text, + ) + return self.output + + def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7, is_text=True,): + """ + Draw panoptic prediction annotations or results. + Args: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each + segment. + segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. + If it is a ``list[dict]``, each dict contains keys "id", "category_id". + If None, category id of each pixel is computed by + ``pixel // metadata.label_divisor``. + area_threshold (int): stuff segments with less than `area_threshold` are not drawn. + Returns: + output (VisImage): image object with visualizations. + """ + pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + text = self.metadata.stuff_classes[category_idx] + self.draw_binary_mask( + mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + is_text=is_text, + ) + + # draw mask for all instances second + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return self.output + masks, sinfo = list(zip(*all_instances)) + category_ids = [x["category_id"] for x in sinfo] + + try: + scores = [x["score"] for x in sinfo] + except KeyError: + scores = None + labels = _create_text_labels( + category_ids, scores, self.metadata.stuff_classes, [x.get("iscrowd", 0) for x in sinfo] + ) + + try: + colors = [ + self._jitter([x / 255 for x in self.metadata.stuff_colors[c]]) for c in category_ids + ] + except AttributeError: + colors = None + self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha, is_text=is_text) + + return self.output + + draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility + + def draw_dataset_dict(self, dic): + """ + Draw annotations/segmentaions in Detectron2 Dataset format. + Args: + dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. + Returns: + output (VisImage): image object with visualizations. + """ + annos = dic.get("annotations", None) + if annos: + if "segmentation" in annos[0]: + masks = [x["segmentation"] for x in annos] + else: + masks = None + if "keypoints" in annos[0]: + keypts = [x["keypoints"] for x in annos] + keypts = np.array(keypts).reshape(len(annos), -1, 3) + else: + keypts = None + + boxes = [ + BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) + if len(x["bbox"]) == 4 + else x["bbox"] + for x in annos + ] + + colors = None + category_ids = [x["category_id"] for x in annos] + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("stuff_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.stuff_colors[c]]) + for c in category_ids + ] + names = self.metadata.get("stuff_classes", None) + labels = _create_text_labels( + category_ids, + scores=None, + class_names=names, + is_crowd=[x.get("iscrowd", 0) for x in annos], + ) + self.overlay_instances( + labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors + ) + + sem_seg = dic.get("sem_seg", None) + if sem_seg is None and "sem_seg_file_name" in dic: + with PathManager.open(dic["sem_seg_file_name"], "rb") as f: + sem_seg = Image.open(f) + sem_seg = np.asarray(sem_seg, dtype="uint8") + if sem_seg is not None: + self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) + + pan_seg = dic.get("pan_seg", None) + # if pan_seg is None and "pan_seg_file_name" in dic: + # with PathManager.open(dic["pan_seg_file_name"], "rb") as f: + # pan_seg = Image.open(f) + # pan_seg = np.asarray(pan_seg) + # from panopticapi.utils import rgb2id + # + # pan_seg = rgb2id(pan_seg) + if pan_seg is not None: + segments_info = dic["segments_info"] + pan_seg = torch.tensor(pan_seg) + self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) + return self.output + + def overlay_instances( + self, + *, + boxes=None, + labels=None, + masks=None, + keypoints=None, + assigned_colors=None, + alpha=0.5, + is_text=True, + ): + """ + Args: + boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, + or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, + or a :class:`RotatedBoxes`, + or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image, + labels (list[str]): the text to be displayed for each instance. + masks (masks-like object): Supported types are: + * :class:`detectron2.structures.PolygonMasks`, + :class:`detectron2.structures.BitMasks`. + * list[list[ndarray]]: contains the segmentation masks for all objects in one image. + The first level of the list corresponds to individual instances. The second + level to all the polygon that compose the instance, and the third level + to the polygon coordinates. The third level should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + * list[ndarray]: each ndarray is a binary mask of shape (H, W). + * list[dict]: each dict is a COCO-style RLE. + keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), + where the N is the number of instances and K is the number of keypoints. + The last dimension corresponds to (x, y, visibility or score). + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = 0 + if boxes is not None: + boxes = self._convert_boxes(boxes) + num_instances = len(boxes) + if masks is not None: + masks = self._convert_masks(masks) + if num_instances: + assert len(masks) == num_instances + else: + num_instances = len(masks) + if keypoints is not None: + if num_instances: + assert len(keypoints) == num_instances + else: + num_instances = len(keypoints) + keypoints = self._convert_keypoints(keypoints) + if labels is not None: + assert len(labels) == num_instances + if assigned_colors is None: + # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] + if num_instances == 0: + return self.output + if boxes is not None and boxes.shape[1] == 5: + return self.overlay_rotated_instances( + boxes=boxes, labels=labels, assigned_colors=assigned_colors + ) + + # Display in largest to smallest order to reduce occlusion. + areas = None + if boxes is not None: + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + elif masks is not None: + areas = np.asarray([x.area() for x in masks]) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + keypoints = keypoints[sorted_idxs] if keypoints is not None else None + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if masks is not None: + for segment in masks[i].polygons: + self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + elif masks is not None: + # skip small mask without polygon + if len(masks[i].polygons) == 0: + continue + + x0, y0, x1, y1 = masks[i].bbox() + + # draw text in the center (defined by median) when box is not drawn + # median is less sensitive to outliers. + text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] + horiz_align = "center" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occlusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + if is_text: + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + # draw keypoints + if keypoints is not None: + for keypoints_per_instance in keypoints: + self.draw_and_connect_keypoints(keypoints_per_instance) + + return self.output + + def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): + """ + Args: + boxes (ndarray): an Nx5 numpy array of + (x_center, y_center, width, height, angle_degrees) format + for the N objects in a single image. + labels (list[str]): the text to be displayed for each instance. + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = len(boxes) + + if assigned_colors is None: + # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + if boxes is not None: + areas = boxes[:, 2] * boxes[:, 3] + + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + colors = [assigned_colors[idx] for idx in sorted_idxs] + + for i in range(num_instances): + self.draw_rotated_box_with_label( + boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None + ) + + return self.output + + def draw_and_connect_keypoints(self, keypoints): + """ + Draws keypoints of an instance and follows the rules for keypoint connections + to draw lines between appropriate keypoints. This follows color heuristics for + line color. + Args: + keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints + and the last dimension corresponds to (x, y, probability). + Returns: + output (VisImage): image object with visualizations. + """ + visible = {} + keypoint_names = self.metadata.get("keypoint_names") + for idx, keypoint in enumerate(keypoints): + + # draw keypoint + x, y, prob = keypoint + if prob > self.keypoint_threshold: + self.draw_circle((x, y), color=_RED) + if keypoint_names: + keypoint_name = keypoint_names[idx] + visible[keypoint_name] = (x, y) + + if self.metadata.get("keypoint_connection_rules"): + for kp0, kp1, color in self.metadata.keypoint_connection_rules: + if kp0 in visible and kp1 in visible: + x0, y0 = visible[kp0] + x1, y1 = visible[kp1] + color = tuple(x / 255.0 for x in color) + self.draw_line([x0, x1], [y0, y1], color=color) + + # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip + # Note that this strategy is specific to person keypoints. + # For other keypoints, it should just do nothing + try: + ls_x, ls_y = visible["left_shoulder"] + rs_x, rs_y = visible["right_shoulder"] + mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 + except KeyError: + pass + else: + # draw line from nose to mid-shoulder + nose_x, nose_y = visible.get("nose", (None, None)) + if nose_x is not None: + self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) + + try: + # draw line from mid-shoulder to mid-hip + lh_x, lh_y = visible["left_hip"] + rh_x, rh_y = visible["right_hip"] + except KeyError: + pass + else: + mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 + self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) + return self.output + + """ + Primitive drawing functions: + """ + + def draw_text( + self, + text, + position, + *, + font_size=None, + color="g", + horizontal_alignment="center", + rotation=0, + ): + """ + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + rotation: rotation angle in degrees CCW + Returns: + output (VisImage): image object with text drawn. + """ + if not font_size: + font_size = self._default_font_size + + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.2) + color[np.argmax(color)] = max(0.8, np.max(color)) + + x, y = position + self.output.ax.text( + x, + y, + text, + size=font_size * self.output.scale, + family="sans-serif", + bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + rotation=rotation, + ) + return self.output + + def draw_box(self, box_coord, alpha=1.0, edge_color="g", line_style="-"): + """ + Args: + box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 + are the coordinates of the image's top left corner. x1 and y1 are the + coordinates of the image's bottom right corner. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + Returns: + output (VisImage): image object with box drawn. + """ + x0, y0, x1, y1 = box_coord + width = x1 - x0 + height = y1 - y0 + + linewidth = 2 + + self.output.ax.add_patch( + mpl.patches.Rectangle( + (x0, y0), + width, + height, + fill=False, + edgecolor=edge_color, + linewidth=linewidth * self.output.scale, + alpha=alpha, + linestyle=line_style, + ) + ) + return self.output + + def draw_rotated_box_with_label( + self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None + ): + """ + Draw a rotated box with label on its top-left corner. + Args: + rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), + where cnt_x and cnt_y are the center coordinates of the box. + w and h are the width and height of the box. angle represents how + many degrees the box is rotated CCW with regard to the 0-degree box. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + label (string): label for rotated box. It will not be rendered when set to None. + Returns: + output (VisImage): image object with box drawn. + """ + cnt_x, cnt_y, w, h, angle = rotated_box + area = w * h + # use thinner lines when the box is small + linewidth = self._default_font_size / ( + 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 + ) + + theta = angle * math.pi / 180.0 + c = math.cos(theta) + s = math.sin(theta) + rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] + # x: left->right ; y: top->down + rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] + for k in range(4): + j = (k + 1) % 4 + self.draw_line( + [rotated_rect[k][0], rotated_rect[j][0]], + [rotated_rect[k][1], rotated_rect[j][1]], + color=edge_color, + linestyle="--" if k == 1 else line_style, + linewidth=linewidth, + ) + + if label is not None: + text_pos = rotated_rect[1] # topleft corner + + height_ratio = h / np.sqrt(self.output.height * self.output.width) + label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size + ) + self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) + + return self.output + + def draw_circle(self, circle_coord, color, radius=3): + """ + Args: + circle_coord (list(int) or tuple(int)): contains the x and y coordinates + of the center of the circle. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + radius (int): radius of the circle. + Returns: + output (VisImage): image object with box drawn. + """ + x, y = circle_coord + self.output.ax.add_patch( + mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) + ) + return self.output + + def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): + """ + Args: + x_data (list[int]): a list containing x values of all the points being drawn. + Length of list should match the length of y_data. + y_data (list[int]): a list containing y values of all the points being drawn. + Length of list should match the length of x_data. + color: color of the line. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + linestyle: style of the line. Refer to `matplotlib.lines.Line2D` + for a full list of formats that are accepted. + linewidth (float or None): width of the line. When it's None, + a default value will be computed and used. + Returns: + output (VisImage): image object with line drawn. + """ + if linewidth is None: + linewidth = self._default_font_size / 3 + linewidth = max(linewidth, 1) + self.output.ax.add_line( + mpl.lines.Line2D( + x_data, + y_data, + linewidth=linewidth * self.output.scale, + color=color, + linestyle=linestyle, + ) + ) + return self.output + + def draw_binary_mask( + self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10, is_text=True, + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component smaller than this area will not be shown. + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + has_valid_segment = False + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + # area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + # if area < (area_threshold or 0): + # continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + # TODO: Use Path/PathPatch to draw vector graphics: + # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if is_text: + if text is not None and has_valid_segment: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): + """ + Args: + soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + text (str): if None, will be drawn on the object + alpha (float): blending efficient. Smaller values lead to more transparent masks. + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + color = mplc.to_rgb(color) + + shape2d = (soft_mask.shape[0], soft_mask.shape[1]) + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = soft_mask * alpha + self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) + + if text is not None: + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + binary_mask = (soft_mask > 0.5).astype("uint8") + # self._draw_text_in_mask(binary_mask, text, lighter_color) + return self.output + + def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): + """ + Args: + segment: numpy array of shape Nx2, containing all the points in the polygon. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. If not provided, a darker shade + of the polygon color will be used instead. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + Returns: + output (VisImage): image object with polygon drawn. + """ + if edge_color is None: + # make edge color darker than the polygon color + if alpha > 0.8: + edge_color = self._change_color_brightness(color, brightness_factor=-0.7) + else: + edge_color = color + edge_color = mplc.to_rgb(edge_color) + (1,) + + polygon = mpl.patches.Polygon( + segment, + fill=True, + facecolor=mplc.to_rgb(color) + (alpha,), + edgecolor=edge_color, + linewidth=max(self._default_font_size // 15 * self.output.scale, 1), + ) + self.output.ax.add_patch(polygon) + return self.output + + """ + Internal methods: + """ + + def _jitter(self, color): + """ + Randomly modifies given color to produce a slightly different color than the color given. + Args: + color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color + picked. The values in the list are in the [0.0, 1.0] range. + Returns: + jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the + color after being jittered. The values in the list are in the [0.0, 1.0] range. + """ + color = mplc.to_rgb(color) + vec = np.random.rand(3) + # better to do it in another color space + vec = vec / np.linalg.norm(vec) * 0.5 + res = np.clip(vec + color, 0, 1) + return tuple(res) + + def _create_grayscale_image(self, mask=None): + """ + Create a grayscale version of the original image. + The colors in masked area, if given, will be kept. + """ + img_bw = self.img.astype("f4").mean(axis=2) + img_bw = np.stack([img_bw] * 3, axis=2) + if mask is not None: + img_bw[mask] = self.img[mask] + return img_bw + + def _change_color_brightness(self, color, brightness_factor): + """ + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a lighter color. + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return modified_color + + def _convert_boxes(self, boxes): + """ + Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. + """ + if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): + return boxes.tensor.detach().numpy() + else: + return np.asarray(boxes) + + def _convert_masks(self, masks_or_polygons): + """ + Convert different format of masks or polygons to a tuple of masks and polygons. + Returns: + list[GenericMask]: + """ + + m = masks_or_polygons + if isinstance(m, PolygonMasks): + m = m.polygons + if isinstance(m, BitMasks): + m = m.tensor.numpy() + if isinstance(m, torch.Tensor): + m = m.numpy() + ret = [] + for x in m: + if isinstance(x, GenericMask): + ret.append(x) + else: + ret.append(GenericMask(x, self.output.height, self.output.width)) + return ret + + def _draw_text_in_mask(self, binary_mask, text, color): + """ + Find proper places to draw text given a binary mask. + """ + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + if stats[1:, -1].size == 0: + return + largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # draw text on the largest component, as well as other very large components. + for cid in range(1, _num_cc): + if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # median is more stable than centroid + # center = centroids[largest_component_id] + center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + self.draw_text(text, center, color=color) + + def _convert_keypoints(self, keypoints): + if isinstance(keypoints, Keypoints): + keypoints = keypoints.tensor + keypoints = np.asarray(keypoints) + return keypoints + + def get_output(self): + """ + Returns: + output (VisImage): the image output containing the visualizations added + to the image. + """ + return self.output \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..49f62369cca38a3c85884f8dea6baea674cb9060 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/__init__.py @@ -0,0 +1,3 @@ +from .detection_coco_evaluator import * +from .coco_evaluator import * +from .cityscapes_evaluation import CityscapesInstanceEvaluator \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/cityscapes_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/cityscapes_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..2019165f605bfb60bdbaeef365b1f8fb937747e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/cityscapes_evaluation.py @@ -0,0 +1,201 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/cityscapes_evaluation.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import glob +import logging +import numpy as np +import os +import tempfile +from collections import OrderedDict +import torch +from PIL import Image + +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.utils import comm +from custom_detectron2.utils.file_io import PathManager + +from .evaluator import DatasetEvaluator + + +class CityscapesEvaluator(DatasetEvaluator): + """ + Base class for evaluation using cityscapes API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): the name of the dataset. + It must have the following metadata associated with it: + "thing_classes", "gt_dir". + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") + self._temp_dir = self._working_dir.name + # All workers will write to the same results directory + # TODO this does not work in distributed training + assert ( + comm.get_local_size() == comm.get_world_size() + ), "CityscapesEvaluator currently do not work with multiple machines." + self._temp_dir = comm.all_gather(self._temp_dir)[0] + if self._temp_dir != self._working_dir.name: + self._working_dir.cleanup() + self._logger.info( + "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) + ) + + +class CityscapesInstanceEvaluator(CityscapesEvaluator): + """ + Evaluate instance segmentation results on cityscapes dataset using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import name2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") + + if "instances" in output: + output = output["instances"].to(self._cpu_device) + num_instances = len(output) + with open(pred_txt, "w") as fout: + for i in range(num_instances): + pred_class = output.pred_classes[i] + classes = self._metadata.stuff_classes[pred_class] + class_id = name2label[classes].id + score = output.scores[i] + mask = output.pred_masks[i].numpy().astype("uint8") + png_filename = os.path.join( + self._temp_dir, basename + "_{}_{}.png".format(i, classes) + ) + + Image.fromarray(mask * 255).save(png_filename) + fout.write( + "{} {} {}\n".format(os.path.basename(png_filename), class_id, score) + ) + else: + # Cityscapes requires a prediction file for every ground truth image. + with open(pred_txt, "w") as fout: + pass + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + comm.synchronize() + if comm.get_rank() > 0: + return + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + )["averages"] + + ret = OrderedDict() + ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} + self._working_dir.cleanup() + return ret + + +class CityscapesSemSegEvaluator(CityscapesEvaluator): + """ + Evaluate semantic segmentation results on cityscapes dataset using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + * Only the main process runs evaluation. + """ + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import trainId2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_filename = os.path.join(self._temp_dir, basename + "_pred.png") + + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy() + pred = 255 * np.ones(output.shape, dtype=np.uint8) + for train_id, label in trainId2label.items(): + if label.ignoreInEval: + continue + pred[output == train_id] = label.id + Image.fromarray(pred).save(pred_filename) + + def evaluate(self): + comm.synchronize() + if comm.get_rank() > 0: + return + # Load the Cityscapes eval script *after* setting the required env var, + # since the script reads CITYSCAPES_DATASET into global variables at load time. + import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._metadata.gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + ) + ret = OrderedDict() + ret["sem_seg"] = { + "IoU": 100.0 * results["averageScoreClasses"], + "iIoU": 100.0 * results["averageScoreInstClasses"], + "IoU_sup": 100.0 * results["averageScoreCategories"], + "iIoU_sup": 100.0 * results["averageScoreInstCategories"], + } + self._working_dir.cleanup() + return ret diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/coco_evaluator.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/coco_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..fc8b93447cd58a721a20eedfce822b144aaac9f0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/coco_evaluator.py @@ -0,0 +1,563 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/coco_evaluation.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import custom_pycocotools.mask as mask_util +import torch +from custom_pycocotools.coco import COCO +from custom_pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import custom_detectron2.utils.comm as comm +from custom_detectron2.config import CfgNode +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.data.datasets.coco import convert_to_coco_json +from custom_detectron2.structures import Boxes, BoxMode, pairwise_iou +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import create_small_table + +from .evaluator import DatasetEvaluator + +try: + from custom_detectron2.evaluation.fast_eval_api import COCOeval_opt +except ImportError: + COCOeval_opt = COCOeval + + +class COCOEvaluator(DatasetEvaluator): + """ + Evaluate AP for instance detection/segmentation, AP + for keypoint detection outputs using COCO's metrics. + See http://cocodataset.org/#detection-eval and + http://cocodataset.org/#keypoints-eval to understand its metrics. + The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means + the metric cannot be computed (e.g. due to no predictions made). + + In addition to COCO, this evaluator is able to support any bounding box detection, + instance segmentation, or keypoint detection dataset. + """ + + def __init__( + self, + dataset_name, + tasks=None, + distributed=True, + output_dir=None, + *, + max_dets_per_image=None, + use_fast_impl=True, + kpt_oks_sigmas=(), + allow_cached_coco=True, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have either the following corresponding metadata: + + "json_file": the path to the COCO format annotation + + Or it must be in detectron2's standard dataset format + so it can be converted to COCO format automatically. + tasks (tuple[str]): tasks that can be evaluated under the given + configuration. A task is one of "bbox", "segm", "keypoints". + By default, will infer this automatically from predictions. + distributed (True): if True, will collect results from all ranks and run evaluation + in the main process. + Otherwise, will only evaluate the results in the current process. + output_dir (str): optional, an output directory to dump all + results predicted on the dataset. The dump contains two files: + + 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and + contains all the results in the format they are produced by the model. + 2. "coco_instances_results.json" a json file in COCO's result format. + max_dets_per_image (int): limit on the maximum number of detections per image. + By default in COCO, this limit is to 100, but this can be customized + to be greater, as is needed in evaluation metrics AP fixed and AP pool + (see https://arxiv.org/pdf/2102.01066.pdf) + This doesn't affect keypoint evaluation. + use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. + Although the results should be very close to the official implementation in COCO + API, it is still recommended to compute results with the official API for use in + papers. The faster implementation also uses more RAM. + kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. + See http://cocodataset.org/#keypoints-eval + When empty, it will use the defaults in COCO. + Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. + allow_cached_coco (bool): Whether to use cached coco json from previous validation + runs. You should set this to False if you need to use different validation data. + Defaults to True. + """ + self._logger = logging.getLogger(__name__) + self._distributed = distributed + self._output_dir = output_dir + + if use_fast_impl and (COCOeval_opt is COCOeval): + self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") + use_fast_impl = False + self._use_fast_impl = use_fast_impl + + # COCOeval requires the limit on the number of detections per image (maxDets) to be a list + # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the + # 3rd element (100) is used as the limit on the number of detections per image when + # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, + # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] + else: + max_dets_per_image = [1, 10, max_dets_per_image] + self._max_dets_per_image = max_dets_per_image + + if tasks is not None and isinstance(tasks, CfgNode): + kpt_oks_sigmas = ( + tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas + ) + self._logger.warn( + "COCO Evaluator instantiated using config, this is deprecated behavior." + " Please pass in explicit arguments instead." + ) + self._tasks = None # Infering it from predictions should be better + else: + self._tasks = tasks + + self._cpu_device = torch.device("cpu") + + self._metadata = MetadataCatalog.get(dataset_name) + if not hasattr(self._metadata, "json_file"): + if output_dir is None: + raise ValueError( + "output_dir must be provided to COCOEvaluator " + "for datasets not in COCO format." + ) + self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") + + cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") + self._metadata.json_file = cache_path + convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) + + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + # Test set json files do not contain annotations (evaluation must be + # performed using the COCO evaluation server). + self._do_evaluation = "annotations" in self._coco_api.dataset + if self._do_evaluation: + self._kpt_oks_sigmas = kpt_oks_sigmas + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + prediction["instances"] = instances_to_coco_json(instances, input["image_id"]) + if len(prediction) > 1: + self._predictions.append(prediction) + + def evaluate(self, img_ids=None): + """ + Args: + img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset + """ + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return {} + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "instances" in predictions[0]: + self._eval_predictions(predictions, img_ids=img_ids) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _tasks_from_predictions(self, predictions): + """ + Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. + """ + for pred in predictions: + if "segmentation" in pred: + tasks = {"segm"} + if "keypoints" in pred: + tasks.add("keypoints") + return sorted(tasks) + + def _eval_predictions(self, predictions, img_ids=None): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(coco_results) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id + all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) + num_classes = len(all_contiguous_ids) + assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 + + reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} + for result in coco_results: + category_id = result["category_id"] + assert category_id < num_classes, ( + f"A prediction has class={category_id}, " + f"but the dataset only has {num_classes} classes and " + f"predicted class id should be in [0, {num_classes - 1}]." + ) + result["category_id"] = reverse_id_mapping[category_id] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info( + "Evaluating predictions with {} COCO API...".format( + "unofficial" if self._use_fast_impl else "official" + ) + ) + for task in sorted(tasks): + assert task in {"segm", "keypoints"}, f"Got unknown task: {task}!" + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, + coco_results, + task, + kpt_oks_sigmas=self._kpt_oks_sigmas, + use_fast_impl=self._use_fast_impl, + img_ids=img_ids, + max_dets_per_image=self._max_dets_per_image, + ) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _derive_coco_results(self, coco_eval, iou_type, class_names=None): + """ + Derive the desired score numbers from summarized COCOeval. + + Args: + coco_eval (None or COCOEval): None represents no predictions from model. + iou_type (str): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + + metrics = { + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], + }[iou_type] + + if coco_eval is None: + self._logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + # the standard metrics + results = { + metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") + for idx, metric in enumerate(metrics) + } + self._logger.info( + "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) + ) + if not np.isfinite(sum(results.values())): + self._logger.info("Some metrics cannot be computed and is shown as NaN.") + + if class_names is None or len(class_names) <= 1: + return results + # Compute per-category AP + # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append(("{}".format(name), float(ap * 100))) + + # tabulate it + N_COLS = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (N_COLS // 2), + numalign="left", + ) + self._logger.info("Per-category {} AP: \n".format(iou_type) + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def instances_to_coco_json(instances, img_id): + """ + Dump an "Instances" object to a COCO-format json that's used for evaluation. + + Args: + instances (Instances): + img_id (int): the image id + + Returns: + list[dict]: list of json annotations in COCO format. + """ + num_instance = len(instances) + if num_instance == 0: + return [] + + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + has_mask = instances.has("pred_masks") + if has_mask: + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the custom_pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + + has_keypoints = instances.has("pred_keypoints") + if has_keypoints: + keypoints = instances.pred_keypoints + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "score": scores[k], + } + if has_mask: + result["segmentation"] = rles[k] + if has_keypoints: + # In COCO annotations, + # keypoints coordinates are pixel indices. + # However our predictions are floating point coordinates. + # Therefore we subtract 0.5 to be consistent with the annotation format. + # This is the inverse of data loading logic in `datasets/coco.py`. + keypoints[k][:, :2] -= 0.5 + result["keypoints"] = keypoints[k].flatten().tolist() + results.append(result) + return results + +def _evaluate_predictions_on_coco( + coco_gt, + coco_results, + iou_type, + kpt_oks_sigmas=None, + use_fast_impl=True, + img_ids=None, + max_dets_per_image=None, +): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + if iou_type == "segm": + coco_results = copy.deepcopy(coco_results) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type) + # For COCO, the default max_dets_per_image is [1, 10, 100]. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] # Default from COCOEval + else: + assert ( + len(max_dets_per_image) >= 3 + ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" + # In the case that user supplies a custom input for max_dets_per_image, + # apply COCOevalMaxDets to evaluate AP with the custom input. + if max_dets_per_image[2] != 100: + coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) + if iou_type != "keypoints": + coco_eval.params.maxDets = max_dets_per_image + + if img_ids is not None: + coco_eval.params.imgIds = img_ids + + if iou_type == "keypoints": + # Use the COCO default keypoint OKS sigmas unless overrides are specified + if kpt_oks_sigmas: + assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "custom_pycocotools is too old!" + coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) + # COCOAPI requires every detection and every gt to have keypoints, so + # we just take the first entry from both + num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 + num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 + num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) + assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( + f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " + f"Ground truth contains {num_keypoints_gt} keypoints. " + f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " + "They have to agree with each other. For meaning of OKS, please refer to " + "http://cocodataset.org/#keypoints-eval." + ) + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval + + +class COCOevalMaxDets(COCOeval): + """ + Modified version of COCOeval for evaluating AP with a custom + maxDets (by default for COCO, maxDets is 100) + """ + + def summarize(self): + """ + Compute and display summary metrics for evaluation results given + a custom value for max_dets_per_image + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + # Evaluate AP using the custom limit on maximum detections per image + stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType == "segm": + summarize = _summarizeDets + elif iouType == "keypoints": + summarize = _summarizeKps + self.stats = summarize() + + def __str__(self): + self.summarize() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/detection_coco_evaluator.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/detection_coco_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..9eb543344e2aafb0b663b32fa66e80a15f537c37 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/detection_coco_evaluator.py @@ -0,0 +1,723 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/coco_evaluation.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import custom_pycocotools.mask as mask_util +import torch +from custom_pycocotools.coco import COCO +from custom_pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import custom_detectron2.utils.comm as comm +from custom_detectron2.config import CfgNode +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.data.datasets.coco import convert_to_coco_json +from custom_detectron2.structures import Boxes, BoxMode, pairwise_iou +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import create_small_table + +from .evaluator import DatasetEvaluator + +try: + from custom_detectron2.evaluation.fast_eval_api import COCOeval_opt +except ImportError: + COCOeval_opt = COCOeval + + +class DetectionCOCOEvaluator(DatasetEvaluator): + """ + Evaluate AR for object proposals, AP for instance detection/segmentation, AP + for keypoint detection outputs using COCO's metrics. + See http://cocodataset.org/#detection-eval and + http://cocodataset.org/#keypoints-eval to understand its metrics. + The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means + the metric cannot be computed (e.g. due to no predictions made). + + In addition to COCO, this evaluator is able to support any bounding box detection, + instance segmentation, or keypoint detection dataset. + """ + + def __init__( + self, + dataset_name, + tasks=None, + distributed=True, + output_dir=None, + *, + max_dets_per_image=None, + use_fast_impl=True, + kpt_oks_sigmas=(), + allow_cached_coco=True, + ): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have either the following corresponding metadata: + + "json_file": the path to the COCO format annotation + + Or it must be in detectron2's standard dataset format + so it can be converted to COCO format automatically. + tasks (tuple[str]): tasks that can be evaluated under the given + configuration. A task is one of "bbox", "segm", "keypoints". + By default, will infer this automatically from predictions. + distributed (True): if True, will collect results from all ranks and run evaluation + in the main process. + Otherwise, will only evaluate the results in the current process. + output_dir (str): optional, an output directory to dump all + results predicted on the dataset. The dump contains two files: + + 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and + contains all the results in the format they are produced by the model. + 2. "coco_instances_results.json" a json file in COCO's result format. + max_dets_per_image (int): limit on the maximum number of detections per image. + By default in COCO, this limit is to 100, but this can be customized + to be greater, as is needed in evaluation metrics AP fixed and AP pool + (see https://arxiv.org/pdf/2102.01066.pdf) + This doesn't affect keypoint evaluation. + use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. + Although the results should be very close to the official implementation in COCO + API, it is still recommended to compute results with the official API for use in + papers. The faster implementation also uses more RAM. + kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. + See http://cocodataset.org/#keypoints-eval + When empty, it will use the defaults in COCO. + Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. + allow_cached_coco (bool): Whether to use cached coco json from previous validation + runs. You should set this to False if you need to use different validation data. + Defaults to True. + """ + self._logger = logging.getLogger(__name__) + self._distributed = distributed + self._output_dir = output_dir + + if use_fast_impl and (COCOeval_opt is COCOeval): + self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") + use_fast_impl = False + self._use_fast_impl = use_fast_impl + + # COCOeval requires the limit on the number of detections per image (maxDets) to be a list + # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the + # 3rd element (100) is used as the limit on the number of detections per image when + # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, + # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] + else: + max_dets_per_image = [1, 10, max_dets_per_image] + self._max_dets_per_image = max_dets_per_image + + if tasks is not None and isinstance(tasks, CfgNode): + kpt_oks_sigmas = ( + tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas + ) + self._logger.warn( + "COCO Evaluator instantiated using config, this is deprecated behavior." + " Please pass in explicit arguments instead." + ) + self._tasks = None # Infering it from predictions should be better + else: + self._tasks = tasks + + self._cpu_device = torch.device("cpu") + + self._metadata = MetadataCatalog.get(dataset_name) + if not hasattr(self._metadata, "json_file"): + if output_dir is None: + raise ValueError( + "output_dir must be provided to COCOEvaluator " + "for datasets not in COCO format." + ) + self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") + + cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") + self._metadata.json_file = cache_path + convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) + + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + # Test set json files do not contain annotations (evaluation must be + # performed using the COCO evaluation server). + self._do_evaluation = "annotations" in self._coco_api.dataset + if self._do_evaluation: + self._kpt_oks_sigmas = kpt_oks_sigmas + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "box_instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + if "box_instances" in output: + instances = output["box_instances"].to(self._cpu_device) + prediction["box_instances"] = instances_to_coco_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + if len(prediction) > 1: + self._predictions.append(prediction) + + def evaluate(self, img_ids=None): + """ + Args: + img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset + """ + if self._distributed: + comm.synchronize() + predictions = comm.gather(self._predictions, dst=0) + predictions = list(itertools.chain(*predictions)) + + if not comm.is_main_process(): + return {} + else: + predictions = self._predictions + + if len(predictions) == 0: + self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(predictions, f) + + self._results = OrderedDict() + if "proposals" in predictions[0]: + self._eval_box_proposals(predictions) + if "box_instances" in predictions[0]: + self._eval_predictions(predictions, img_ids=img_ids) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _tasks_from_predictions(self, predictions): + """ + Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. + """ + tasks = {"bbox"} + for pred in predictions: + if "keypoints" in pred: + tasks.add("keypoints") + return sorted(tasks) + + def _eval_predictions(self, predictions, img_ids=None): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["box_instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(coco_results) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id + all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) + num_classes = len(all_contiguous_ids) + assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 + + reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} + for result in coco_results: + category_id = result["category_id"] + assert category_id < num_classes, ( + f"A prediction has class={category_id}, " + f"but the dataset only has {num_classes} classes and " + f"predicted class id should be in [0, {num_classes - 1}]." + ) + result["category_id"] = reverse_id_mapping[category_id] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info( + "Evaluating predictions with {} COCO API...".format( + "unofficial" if self._use_fast_impl else "official" + ) + ) + for task in sorted(tasks): + assert task in {"bbox", "keypoints"}, f"Got unknown task: {task}!" + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, + coco_results, + task, + kpt_oks_sigmas=self._kpt_oks_sigmas, + use_fast_impl=self._use_fast_impl, + img_ids=img_ids, + max_dets_per_image=self._max_dets_per_image, + ) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _eval_box_proposals(self, predictions): + """ + Evaluate the box proposals in predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + def _derive_coco_results(self, coco_eval, iou_type, class_names=None): + """ + Derive the desired score numbers from summarized COCOeval. + + Args: + coco_eval (None or COCOEval): None represents no predictions from model. + iou_type (str): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], + }[iou_type] + + if coco_eval is None: + self._logger.warn("No predictions from the model!") + return {metric: float("nan") for metric in metrics} + + # the standard metrics + results = { + metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") + for idx, metric in enumerate(metrics) + } + self._logger.info( + "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) + ) + if not np.isfinite(sum(results.values())): + self._logger.info("Some metrics cannot be computed and is shown as NaN.") + + if class_names is None or len(class_names) <= 1: + return results + # Compute per-category AP + # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append(("{}".format(name), float(ap * 100))) + + # tabulate it + N_COLS = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (N_COLS // 2), + numalign="left", + ) + self._logger.info("Per-category {} AP: \n".format(iou_type) + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def instances_to_coco_json(instances, img_id): + """ + Dump an "Instances" object to a COCO-format json that's used for evaluation. + + Args: + instances (Instances): + img_id (int): the image id + + Returns: + list[dict]: list of json annotations in COCO format. + """ + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + has_mask = instances.has("pred_masks") + if has_mask: + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the custom_pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + + has_keypoints = instances.has("pred_keypoints") + if has_keypoints: + keypoints = instances.pred_keypoints + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + if has_mask: + result["segmentation"] = rles[k] + if has_keypoints: + # In COCO annotations, + # keypoints coordinates are pixel indices. + # However our predictions are floating point coordinates. + # Therefore we subtract 0.5 to be consistent with the annotation format. + # This is the inverse of data loading logic in `datasets/coco.py`. + keypoints[k][:, :2] -= 0.5 + result["keypoints"] = keypoints[k].flatten().tolist() + results.append(result) + return results + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official COCO API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0**2, 1e5**2], # all + [0**2, 32**2], # small + [32**2, 96**2], # medium + [96**2, 1e5**2], # large + [96**2, 128**2], # 96-128 + [128**2, 256**2], # 128-256 + [256**2, 512**2], # 256-512 + [512**2, 1e5**2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) + anno = coco_api.loadAnns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + for obj in anno + if obj["iscrowd"] == 0 + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = ( + torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) + ) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_coco( + coco_gt, + coco_results, + iou_type, + kpt_oks_sigmas=None, + use_fast_impl=True, + img_ids=None, + max_dets_per_image=None, +): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + if iou_type == "segm": + coco_results = copy.deepcopy(coco_results) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type) + # For COCO, the default max_dets_per_image is [1, 10, 100]. + if max_dets_per_image is None: + max_dets_per_image = [1, 10, 100] # Default from COCOEval + else: + assert ( + len(max_dets_per_image) >= 3 + ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" + # In the case that user supplies a custom input for max_dets_per_image, + # apply COCOevalMaxDets to evaluate AP with the custom input. + if max_dets_per_image[2] != 100: + coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) + if iou_type != "keypoints": + coco_eval.params.maxDets = max_dets_per_image + + if img_ids is not None: + coco_eval.params.imgIds = img_ids + + if iou_type == "keypoints": + # Use the COCO default keypoint OKS sigmas unless overrides are specified + if kpt_oks_sigmas: + assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "custom_pycocotools is too old!" + coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) + # COCOAPI requires every detection and every gt to have keypoints, so + # we just take the first entry from both + num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 + num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 + num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) + assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( + f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " + f"Ground truth contains {num_keypoints_gt} keypoints. " + f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " + "They have to agree with each other. For meaning of OKS, please refer to " + "http://cocodataset.org/#keypoints-eval." + ) + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval + + +class COCOevalMaxDets(COCOeval): + """ + Modified version of COCOeval for evaluating AP with a custom + maxDets (by default for COCO, maxDets is 100) + """ + + def summarize(self): + """ + Compute and display summary metrics for evaluation results given + a custom value for max_dets_per_image + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + # Evaluate AP using the custom limit on maximum detections per image + stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType == "segm" or iouType == "bbox": + summarize = _summarizeDets + elif iouType == "keypoints": + summarize = _summarizeKps + self.stats = summarize() + + def __str__(self): + self.summarize() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/evaluator.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..a15a1d365349c13e400840e3c4a0753646459fff --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/evaluator.py @@ -0,0 +1,228 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/evaluator.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import datetime +import logging +import time +from collections import OrderedDict, abc +from contextlib import ExitStack, contextmanager +from typing import List, Union +import torch +from torch import nn + +from custom_detectron2.utils.comm import get_world_size, is_main_process +from custom_detectron2.utils.logger import log_every_n_seconds + + +class DatasetEvaluator: + """ + Base class for a dataset evaluator. + + The function :func:`inference_on_dataset` runs the model over + all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. + + This class will accumulate information of the inputs/outputs (by :meth:`process`), + and produce evaluation results in the end (by :meth:`evaluate`). + """ + + def reset(self): + """ + Preparation for a new round of evaluation. + Should be called before starting a round of evaluation. + """ + pass + + def process(self, inputs, outputs): + """ + Process the pair of inputs and outputs. + If they contain batches, the pairs can be consumed one-by-one using `zip`: + + .. code-block:: python + + for input_, output in zip(inputs, outputs): + # do evaluation on single input/output pair + ... + + Args: + inputs (list): the inputs that's used to call the model. + outputs (list): the return value of `model(inputs)` + """ + pass + + def evaluate(self): + """ + Evaluate/summarize the performance, after processing all input/output pairs. + + Returns: + dict: + A new evaluator class can return a dict of arbitrary format + as long as the user can process the results. + In our train_net.py, we expect the following format: + + * key: the name of the task (e.g., bbox) + * value: a dict of {metric name: score}, e.g.: {"AP50": 80} + """ + pass + + +class DatasetEvaluators(DatasetEvaluator): + """ + Wrapper class to combine multiple :class:`DatasetEvaluator` instances. + + This class dispatches every evaluation call to + all of its :class:`DatasetEvaluator`. + """ + + def __init__(self, evaluators): + """ + Args: + evaluators (list): the evaluators to combine. + """ + super().__init__() + self._evaluators = evaluators + + def reset(self): + for evaluator in self._evaluators: + evaluator.reset() + + def process(self, inputs, outputs): + for evaluator in self._evaluators: + evaluator.process(inputs, outputs) + + def evaluate(self): + results = OrderedDict() + for evaluator in self._evaluators: + result = evaluator.evaluate() + if is_main_process() and result is not None: + for k, v in result.items(): + assert ( + k not in results + ), "Different evaluators produce results with the same key {}".format(k) + results[k] = v + return results + + +def inference_on_dataset( + model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] +): + """ + Run model on the data_loader and evaluate the metrics with evaluator. + Also benchmark the inference speed of `model.__call__` accurately. + The model will be used in eval mode. + + Args: + model (callable): a callable which takes an object from + `data_loader` and returns some outputs. + + If it's an nn.Module, it will be temporarily set to `eval` mode. + If you wish to evaluate a model in `training` mode instead, you can + wrap the given model and override its behavior of `.eval()` and `.train()`. + data_loader: an iterable object with a length. + The elements it generates will be the inputs to the model. + evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, + but don't want to do any evaluation. + + Returns: + The return value of `evaluator.evaluate()` + """ + num_devices = get_world_size() + logger = logging.getLogger(__name__) + logger.info("Start inference on {} batches".format(len(data_loader))) + + total = len(data_loader) # inference data loader must have a fixed length + if evaluator is None: + # create a no-op evaluator + evaluator = DatasetEvaluators([]) + if isinstance(evaluator, abc.MutableSequence): + evaluator = DatasetEvaluators(evaluator) + evaluator.reset() + + num_warmup = min(5, total - 1) + start_time = time.perf_counter() + total_data_time = 0 + total_compute_time = 0 + total_eval_time = 0 + with ExitStack() as stack: + if isinstance(model, nn.Module): + stack.enter_context(inference_context(model)) + stack.enter_context(torch.no_grad()) + + start_data_time = time.perf_counter() + for idx, inputs in enumerate(data_loader): + total_data_time += time.perf_counter() - start_data_time + if idx == num_warmup: + start_time = time.perf_counter() + total_data_time = 0 + total_compute_time = 0 + total_eval_time = 0 + + start_compute_time = time.perf_counter() + outputs = model(inputs) + if torch.cuda.is_available(): + torch.cuda.synchronize() + total_compute_time += time.perf_counter() - start_compute_time + + start_eval_time = time.perf_counter() + evaluator.process(inputs, outputs) + total_eval_time += time.perf_counter() - start_eval_time + + iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) + data_seconds_per_iter = total_data_time / iters_after_start + compute_seconds_per_iter = total_compute_time / iters_after_start + eval_seconds_per_iter = total_eval_time / iters_after_start + total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start + if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: + eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) + log_every_n_seconds( + logging.INFO, + ( + f"Inference done {idx + 1}/{total}. " + f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " + f"Inference: {compute_seconds_per_iter:.4f} s/iter. " + f"Eval: {eval_seconds_per_iter:.4f} s/iter. " + f"Total: {total_seconds_per_iter:.4f} s/iter. " + f"ETA={eta}" + ), + n=5, + ) + start_data_time = time.perf_counter() + + # Measure the time only for this worker (before the synchronization barrier) + total_time = time.perf_counter() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + # NOTE this format is parsed by grep + logger.info( + "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( + total_time_str, total_time / (total - num_warmup), num_devices + ) + ) + total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) + logger.info( + "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( + total_compute_time_str, total_compute_time / (total - num_warmup), num_devices + ) + ) + + results = evaluator.evaluate() + # An evaluator may return None when not in main process. + # Replace it by an empty dict instead to make it easier for downstream code to handle + if results is None: + results = {} + return results + + +@contextmanager +def inference_context(model): + """ + A context where the model is temporarily changed to eval mode, + and restored to previous mode afterwards. + + Args: + model: a torch Module + """ + training_mode = model.training + model.eval() + yield + model.train(training_mode) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/instance_evaluation.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/instance_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..47d8346b28796fd96d2325ae2cea2100afa5fb44 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/evaluation/instance_evaluation.py @@ -0,0 +1,110 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/evaluation/instance_evaluation.py +# ------------------------------------------------------------------------------ + +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import custom_pycocotools.mask as mask_util +import torch +from custom_pycocotools.coco import COCO +from custom_pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import custom_detectron2.utils.comm as comm +from custom_detectron2.config import CfgNode +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.data.datasets.coco import convert_to_coco_json +from custom_detectron2.evaluation.coco_evaluation import COCOEvaluator, _evaluate_predictions_on_coco +from custom_detectron2.evaluation.fast_eval_api import COCOeval_opt +from custom_detectron2.structures import Boxes, BoxMode, pairwise_iou +from custom_detectron2.utils.file_io import PathManager +from custom_detectron2.utils.logger import create_small_table + + +# modified from COCOEvaluator for instance segmetnat +class InstanceSegEvaluator(COCOEvaluator): + """ + Evaluate AR for object proposals, AP for instance detection/segmentation, AP + for keypoint detection outputs using COCO's metrics. + See http://cocodataset.org/#detection-eval and + http://cocodataset.org/#keypoints-eval to understand its metrics. + The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means + the metric cannot be computed (e.g. due to no predictions made). + + In addition to COCO, this evaluator is able to support any bounding box detection, + instance segmentation, or keypoint detection dataset. + """ + + def _eval_predictions(self, predictions, img_ids=None): + """ + Evaluate predictions. Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + coco_results = list(itertools.chain(*[x["instances"] for x in predictions])) + tasks = self._tasks or self._tasks_from_predictions(coco_results) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id + # all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) + # num_classes = len(all_contiguous_ids) + # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 + + reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} + for result in coco_results: + category_id = result["category_id"] + # assert category_id < num_classes, ( + # f"A prediction has class={category_id}, " + # f"but the dataset only has {num_classes} classes and " + # f"predicted class id should be in [0, {num_classes - 1}]." + # ) + assert category_id in reverse_id_mapping, ( + f"A prediction has class={category_id}, " + f"but the dataset only has class ids in {dataset_id_to_contiguous_id}." + ) + result["category_id"] = reverse_id_mapping[category_id] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info( + "Evaluating predictions with {} COCO API...".format( + "unofficial" if self._use_fast_impl else "official" + ) + ) + for task in sorted(tasks): + assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!" + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, + coco_results, + task, + kpt_oks_sigmas=self._kpt_oks_sigmas, + use_fast_impl=self._use_fast_impl, + img_ids=img_ids, + max_dets_per_image=self._max_dets_per_image, + ) + if len(coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1338369a958062d6ca4a122435b2be6ad27315 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/__init__.py @@ -0,0 +1,5 @@ +from .backbone.swin import D2SwinTransformer +from .backbone.dinat import D2DiNAT +from .pixel_decoder.fpn import BasePixelDecoder +from .pixel_decoder.msdeformattn import MSDeformAttnPixelDecoder +from .meta_arch.oneformer_head import OneFormerHead diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9020c2df23e2af280b7bb168b996ae9eaf312eb8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/dinat.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/dinat.py new file mode 100644 index 0000000000000000000000000000000000000000..164d5bc5c9c6d29d95316a1b2a2a4bbfc2f76843 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/dinat.py @@ -0,0 +1,324 @@ +# -------------------------------------------------------- +# Neighborhood Attention Transformer +# Licensed under The MIT License +# Written by Ali Hassani +# -------------------------------------------------------- + +# Modified by Jitesh Jain + +import torch +import torch.nn as nn +from custom_timm.models.layers import DropPath +from custom_detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec + +class NeighborhoodAttention(nn.Module): + """ + Neighborhood Attention 2D Module + """ + + def __init__( + self, + dim, + num_heads, + kernel_size, + dilation=1, + bias=True, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + + + def forward(self, x): + + return x + + def extra_repr(self) -> str: + return ( + f"head_dim={self.head_dim}, num_heads={self.num_heads}, " + + f"kernel_size={self.kernel_size}, dilation={self.dilation}, " + + f"rel_pos_bias={self.rpb is not None}" + ) + +class ConvTokenizer(nn.Module): + def __init__(self, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), + ) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + x = self.proj(x).permute(0, 2, 3, 1) + if self.norm is not None: + x = self.norm(x) + return x + + +class ConvDownsampler(nn.Module): + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + self.norm = norm_layer(2 * dim) + + def forward(self, x): + x = self.reduction(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + x = self.norm(x) + return x + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class NATLayer(nn.Module): + def __init__(self, dim, num_heads, kernel_size=7, dilation=None, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, layer_scale=None): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.mlp_ratio = mlp_ratio + + self.norm1 = norm_layer(dim) + self.attn = NeighborhoodAttention( + dim, kernel_size=kernel_size, dilation=dilation, num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.layer_scale = False + if layer_scale is not None and type(layer_scale) in [int, float]: + self.layer_scale = True + self.gamma1 = nn.Parameter(layer_scale * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(layer_scale * torch.ones(dim), requires_grad=True) + + def forward(self, x): + if not self.layer_scale: + shortcut = x + x = self.norm1(x) + x = self.attn(x) + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + shortcut = x + x = self.norm1(x) + x = self.attn(x) + x = shortcut + self.drop_path(self.gamma1 * x) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + + +class NATBlock(nn.Module): + def __init__(self, dim, depth, num_heads, kernel_size, dilations=None, + downsample=True, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, layer_scale=None): + super().__init__() + self.dim = dim + self.depth = depth + + self.blocks = nn.ModuleList([ + NATLayer(dim=dim, + num_heads=num_heads, + kernel_size=kernel_size, + dilation=None if dilations is None else dilations[i], + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + layer_scale=layer_scale) + for i in range(depth)]) + + self.downsample = None if not downsample else ConvDownsampler(dim=dim, norm_layer=norm_layer) + + def forward(self, x): + for blk in self.blocks: + x = blk(x) + if self.downsample is None: + return x, x + return self.downsample(x), x + + +class DiNAT(nn.Module): + def __init__(self, + embed_dim, + mlp_ratio, + depths, + num_heads, + drop_path_rate=0.2, + in_chans=3, + kernel_size=7, + dilations=None, + out_indices=(0, 1, 2, 3), + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + norm_layer=nn.LayerNorm, + frozen_stages=-1, + layer_scale=None, + **kwargs): + super().__init__() + self.num_levels = len(depths) + self.embed_dim = embed_dim + self.num_features = [int(embed_dim * 2 ** i) for i in range(self.num_levels)] + self.mlp_ratio = mlp_ratio + + self.patch_embed = ConvTokenizer(in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] + self.levels = nn.ModuleList() + for i in range(self.num_levels): + level = NATBlock(dim=int(embed_dim * 2 ** i), + depth=depths[i], + num_heads=num_heads[i], + kernel_size=kernel_size, + dilations=None if dilations is None else dilations[i], + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])], + norm_layer=norm_layer, + downsample=(i < self.num_levels - 1), + layer_scale=layer_scale) + self.levels.append(level) + + # add a norm layer for each output + self.out_indices = out_indices + for i_layer in self.out_indices: + layer = norm_layer(self.num_features[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self.frozen_stages = frozen_stages + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 2: + for i in range(0, self.frozen_stages - 1): + m = self.network[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(DiNAT, self).train(mode) + self._freeze_stages() + + def forward_embeddings(self, x): + x = self.patch_embed(x) + return x + + def forward_tokens(self, x): + outs = {} + for idx, level in enumerate(self.levels): + x, xo = level(x) + if idx in self.out_indices: + norm_layer = getattr(self, f'norm{idx}') + x_out = norm_layer(xo) + outs["res{}".format(idx + 2)] = x_out.permute(0, 3, 1, 2).contiguous() + return outs + + def forward(self, x): + x = self.forward_embeddings(x) + return self.forward_tokens(x) + + +@BACKBONE_REGISTRY.register() +class D2DiNAT(DiNAT, Backbone): + def __init__(self, cfg, input_shape): + + embed_dim = cfg.MODEL.DiNAT.EMBED_DIM + mlp_ratio = cfg.MODEL.DiNAT.MLP_RATIO + depths = cfg.MODEL.DiNAT.DEPTHS + num_heads = cfg.MODEL.DiNAT.NUM_HEADS + drop_path_rate = cfg.MODEL.DiNAT.DROP_PATH_RATE + kernel_size = cfg.MODEL.DiNAT.KERNEL_SIZE + out_indices = cfg.MODEL.DiNAT.OUT_INDICES + dilations = cfg.MODEL.DiNAT.DILATIONS + + super().__init__( + embed_dim=embed_dim, + mlp_ratio=mlp_ratio, + depths=depths, + num_heads=num_heads, + drop_path_rate=drop_path_rate, + kernel_size=kernel_size, + out_indices=out_indices, + dilations=dilations, + ) + + self._out_features = cfg.MODEL.DiNAT.OUT_FEATURES + + self._out_feature_strides = { + "res2": 4, + "res3": 8, + "res4": 16, + "res5": 32, + } + self._out_feature_channels = { + "res2": self.num_features[0], + "res3": self.num_features[1], + "res4": self.num_features[2], + "res5": self.num_features[3], + } + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + Returns: + dict[str->Tensor]: names and the corresponding features + """ + assert ( + x.dim() == 4 + ), f"DiNAT takes an input of shape (N, C, H, W). Got {x.shape} instead!" + outputs = {} + y = super().forward(x) + for k in y.keys(): + if k in self._out_features: + outputs[k] = y[k] + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + @property + def size_divisibility(self): + return 32 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/swin.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/swin.py new file mode 100644 index 0000000000000000000000000000000000000000..06ce67a5041d5bfd1efe3c02321b1cb423209d38 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/backbone/swin.py @@ -0,0 +1,771 @@ +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu, Yutong Lin, Yixuan Wei +# -------------------------------------------------------- + +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former +# ------------------------------------------------------------------------------ + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from custom_timm.models.layers import DropPath, to_2tuple, trunc_normal_ + +from custom_detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec + + +class Mlp(nn.Module): + """Multilayer perceptron.""" + + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + """Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__( + self, + dim, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) + ) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=0.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """Forward function. + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B_, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1) + ].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1 + ).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """Swin Transformer Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, + dim, + num_heads, + window_size=7, + shift_size=0, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop + ) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + mask_matrix: Attention mask for cyclic shift. + """ + B, L, C = x.shape + H, W = self.H, self.W + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + # partition windows + x_windows = window_partition( + shifted_x, self.window_size + ) # nW*B, window_size, window_size, C + x_windows = x_windows.view( + -1, self.window_size * self.window_size, C + ) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + """Patch Merging Layer + Args: + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of feature channels + depth (int): Depths of this stage. + num_heads (int): Number of attention head. + window_size (int): Local window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False, + ): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList( + [ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, H, W): + """Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + + # calculate attention mask for SW-MSA + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 + h_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + w_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition( + img_mask, self.window_size + ) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( + attn_mask == 0, float(0.0) + ) + + for blk in self.blocks: + blk.H, blk.W = H, W + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding + Args: + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + patch_size = to_2tuple(patch_size) + self.patch_size = patch_size + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(nn.Module): + """Swin Transformer backbone. + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + Args: + pretrain_img_size (int): Input image size for training the pretrained model, + used in absolute postion embedding. Default 224. + patch_size (int | tuple(int)): Patch size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + depths (tuple[int]): Depths of each Swin Transformer stage. + num_heads (tuple[int]): Number of attention head of each stage. + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): Dropout rate. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0.2. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. + patch_norm (bool): If True, add normalization after patch embedding. Default: True. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__( + self, + pretrain_img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.2, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + use_checkpoint=False, + ): + super().__init__() + + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None, + ) + + # absolute position embedding + if self.ape: + pretrain_img_size = to_2tuple(pretrain_img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [ + pretrain_img_size[0] // patch_size[0], + pretrain_img_size[1] // patch_size[1], + ] + + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) + ) + trunc_normal_(self.absolute_pos_embed, std=0.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + ) + self.layers.append(layer) + + num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + self.num_features = num_features + + # add a norm layer for each output + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f"norm{i_layer}" + self.add_module(layer_name, layer) + + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 1 and self.ape: + self.absolute_pos_embed.requires_grad = False + + if self.frozen_stages >= 2: + self.pos_drop.eval() + for i in range(0, self.frozen_stages - 1): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + """Initialize the weights in backbone. + Args: + pretrained (str, optional): Path to pre-trained weights. + Defaults to None. + """ + + def _init_weights(m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x): + """Forward function.""" + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate( + self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" + ) + x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C + else: + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + + outs = {} + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f"norm{i}") + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs["res{}".format(i + 2)] = out + + return outs + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + +@BACKBONE_REGISTRY.register() +class D2SwinTransformer(SwinTransformer, Backbone): + def __init__(self, cfg, input_shape): + + pretrain_img_size = cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE + patch_size = cfg.MODEL.SWIN.PATCH_SIZE + in_chans = 3 + embed_dim = cfg.MODEL.SWIN.EMBED_DIM + depths = cfg.MODEL.SWIN.DEPTHS + num_heads = cfg.MODEL.SWIN.NUM_HEADS + window_size = cfg.MODEL.SWIN.WINDOW_SIZE + mlp_ratio = cfg.MODEL.SWIN.MLP_RATIO + qkv_bias = cfg.MODEL.SWIN.QKV_BIAS + qk_scale = cfg.MODEL.SWIN.QK_SCALE + drop_rate = cfg.MODEL.SWIN.DROP_RATE + attn_drop_rate = cfg.MODEL.SWIN.ATTN_DROP_RATE + drop_path_rate = cfg.MODEL.SWIN.DROP_PATH_RATE + norm_layer = nn.LayerNorm + ape = cfg.MODEL.SWIN.APE + patch_norm = cfg.MODEL.SWIN.PATCH_NORM + use_checkpoint = cfg.MODEL.SWIN.USE_CHECKPOINT + + super().__init__( + pretrain_img_size, + patch_size, + in_chans, + embed_dim, + depths, + num_heads, + window_size, + mlp_ratio, + qkv_bias, + qk_scale, + drop_rate, + attn_drop_rate, + drop_path_rate, + norm_layer, + ape, + patch_norm, + use_checkpoint=use_checkpoint, + ) + + self._out_features = cfg.MODEL.SWIN.OUT_FEATURES + + self._out_feature_strides = { + "res2": 4, + "res3": 8, + "res4": 16, + "res5": 32, + } + self._out_feature_channels = { + "res2": self.num_features[0], + "res3": self.num_features[1], + "res4": self.num_features[2], + "res5": self.num_features[3], + } + + def forward(self, x): + """ + Args: + x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. + Returns: + dict[str->Tensor]: names and the corresponding features + """ + assert ( + x.dim() == 4 + ), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!" + outputs = {} + y = super().forward(x) + for k in y.keys(): + if k in self._out_features: + outputs[k] = y[k] + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + @property + def size_divisibility(self): + return 32 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/matcher.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..2564940d610d57a1206ce5232e696fc6b34672da --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/matcher.py @@ -0,0 +1,212 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/matcher.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" +import torch +import torch.nn.functional as F +from scipy.optimize import linear_sum_assignment +from torch import nn +from torch.cuda.amp import autocast +import numpy as np + +# from custom_detectron2.projects.point_rend.point_features import point_sample + + +def linear_sum_assignment_with_nan(cost_matrix): + cost_matrix = np.asarray(cost_matrix) + nan = np.isnan(cost_matrix).any() + nan_all = np.isnan(cost_matrix).all() + empty = cost_matrix.size == 0 + + if not empty: + if nan_all: + print('Matrix contains all NaN values!') + elif nan: + print('Matrix contains NaN values!') + + if nan_all: + cost_matrix = np.empty(shape=(0, 0)) + elif nan: + cost_matrix[np.isnan(cost_matrix)] = 100 + + return linear_sum_assignment(cost_matrix) + +def batch_dice_loss(inputs: torch.Tensor, targets: torch.Tensor): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets) + denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :] + loss = 1 - (numerator + 1) / (denominator + 1) + return loss + + +batch_dice_loss_jit = torch.jit.script( + batch_dice_loss +) # type: torch.jit.ScriptModule + + +def batch_sigmoid_ce_loss(inputs: torch.Tensor, targets: torch.Tensor): + """ + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + Returns: + Loss tensor + """ + hw = inputs.shape[1] + + pos = F.binary_cross_entropy_with_logits( + inputs, torch.ones_like(inputs), reduction="none" + ) + neg = F.binary_cross_entropy_with_logits( + inputs, torch.zeros_like(inputs), reduction="none" + ) + + loss = torch.einsum("nc,mc->nm", pos, targets) + torch.einsum( + "nc,mc->nm", neg, (1 - targets) + ) + + return loss / hw + + +batch_sigmoid_ce_loss_jit = torch.jit.script( + batch_sigmoid_ce_loss +) # type: torch.jit.ScriptModule + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__(self, cost_class: float = 1, cost_mask: float = 1, + cost_dice: float = 1, num_points: int = 0): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost + cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_mask = cost_mask + self.cost_dice = cost_dice + + assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, "all costs cant be 0" + + self.num_points = num_points + + @torch.no_grad() + def memory_efficient_forward(self, outputs, targets): + """More memory-friendly matching""" + bs, num_queries = outputs["pred_logits"].shape[:2] + + indices = [] + + # Iterate through batch size + for b in range(bs): + out_prob = outputs["pred_logits"][b].softmax(-1) # [num_queries, num_classes] + tgt_ids = targets[b]["labels"] + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -out_prob[:, tgt_ids] + + out_mask = outputs["pred_masks"][b] # [num_queries, H_pred, W_pred] + # gt masks are already padded when preparing target + tgt_mask = targets[b]["masks"].to(out_mask) + + out_mask = out_mask[:, None] + tgt_mask = tgt_mask[:, None] + # all masks share the same set of points for efficient matching! + point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device) + # get gt labels + tgt_mask = point_sample( + tgt_mask, + point_coords.repeat(tgt_mask.shape[0], 1, 1), + align_corners=False, + ).squeeze(1) + + out_mask = point_sample( + out_mask, + point_coords.repeat(out_mask.shape[0], 1, 1), + align_corners=False, + ).squeeze(1) + + with autocast(enabled=False): + out_mask = out_mask.float() + tgt_mask = tgt_mask.float() + # Compute the focal loss between masks + cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask) + # Compute the dice loss betwen masks + cost_dice = batch_dice_loss(out_mask, tgt_mask) + + # Final cost matrix + C = ( + self.cost_mask * cost_mask + + self.cost_class * cost_class + + self.cost_dice * cost_dice + ) + C = C.reshape(num_queries, -1).cpu() + + indices.append(linear_sum_assignment_with_nan(C)) + + return [ + (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) + for i, j in indices + ] + + @torch.no_grad() + def forward(self, outputs, targets): + """Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + + return self.memory_efficient_forward(outputs, targets) + + def __repr__(self, _repr_indent=4): + head = "Matcher " + self.__class__.__name__ + body = [ + "cost_class: {}".format(self.cost_class), + "cost_mask: {}".format(self.cost_mask), + "cost_dice: {}".format(self.cost_dice), + ] + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/meta_arch/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/meta_arch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/meta_arch/__init__.py @@ -0,0 +1 @@ + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/meta_arch/oneformer_head.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/meta_arch/oneformer_head.py new file mode 100644 index 0000000000000000000000000000000000000000..e7fba1624dd351d263b90187d3fcece33027cf34 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/meta_arch/oneformer_head.py @@ -0,0 +1,135 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/meta_arch/mask_former_head.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import logging +from copy import deepcopy +from typing import Callable, Dict, List, Optional, Tuple, Union + +import fvcore.nn.weight_init as weight_init +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ShapeSpec, get_norm +from custom_detectron2.modeling import SEM_SEG_HEADS_REGISTRY +from ..pixel_decoder.fpn import build_pixel_decoder +from ..transformer_decoder.oneformer_transformer_decoder import build_transformer_decoder + +@SEM_SEG_HEADS_REGISTRY.register() +class OneFormerHead(nn.Module): + + _version = 2 + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + if version is None or version < 2: + # Do not warn if train from scratch + scratch = True + logger = logging.getLogger(__name__) + for k in list(state_dict.keys()): + newk = k + if "sem_seg_head" in k and not k.startswith(prefix + "predictor"): + newk = k.replace(prefix, prefix + "pixel_decoder.") + # logger.debug(f"{k} ==> {newk}") + if newk != k: + state_dict[newk] = state_dict[k] + del state_dict[k] + scratch = False + + if not scratch: + logger.warning( + f"Weight format of {self.__class__.__name__} have changed! " + "Please upgrade your models. Applying automatic conversion now ..." + ) + + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + num_classes: int, + pixel_decoder: nn.Module, + loss_weight: float = 1.0, + ignore_value: int = -1, + # extra parameters + transformer_predictor: nn.Module, + transformer_in_feature: str, + ): + """ + NOTE: this interface is experimental. + Args: + input_shape: shapes (channels and stride) of the input features + num_classes: number of classes to predict + pixel_decoder: the pixel decoder module + loss_weight: loss weight + ignore_value: category id to be ignored during training. + transformer_predictor: the transformer decoder that makes prediction + transformer_in_feature: input feature name to the transformer_predictor + """ + super().__init__() + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + self.in_features = [k for k, v in input_shape] + feature_strides = [v.stride for k, v in input_shape] + feature_channels = [v.channels for k, v in input_shape] + + self.ignore_value = ignore_value + self.common_stride = 4 + self.loss_weight = loss_weight + + self.pixel_decoder = pixel_decoder + self.predictor = transformer_predictor + self.transformer_in_feature = transformer_in_feature + + self.num_classes = num_classes + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + # figure out in_channels to transformer predictor + if cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder": + transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + elif cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding": + transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM + elif cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": + transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + else: + transformer_predictor_in_channels = input_shape[cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE].channels + + return { + "input_shape": { + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + }, + "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + "pixel_decoder": build_pixel_decoder(cfg, input_shape), + "loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, + "transformer_in_feature": cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE, + "transformer_predictor": build_transformer_decoder( + cfg, + transformer_predictor_in_channels, + mask_classification=True, + ), + } + + def forward(self, features, tasks, mask=None): + return self.layers(features, tasks, mask) + + def layers(self, features, tasks, mask=None): + mask_features, transformer_encoder_features, multi_scale_features, _, _ = self.pixel_decoder.forward_features(features) + + if self.transformer_in_feature == "multi_scale_pixel_decoder": + predictions = self.predictor(multi_scale_features, mask_features, tasks, mask) + else: + if self.transformer_in_feature == "transformer_encoder": + assert ( + transformer_encoder_features is not None + ), "Please use the TransformerEncoderPixelDecoder." + predictions = self.predictor(transformer_encoder_features, mask_features, mask) + elif self.transformer_in_feature == "pixel_embedding": + predictions = self.predictor(mask_features, mask_features, mask) + else: + predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask) + return predictions diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9020c2df23e2af280b7bb168b996ae9eaf312eb8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/fpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/fpn.py new file mode 100644 index 0000000000000000000000000000000000000000..a7003e39f5254e73351646e766f49080dda7533e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/fpn.py @@ -0,0 +1,312 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from typing import Callable, Dict, List, Optional, Tuple, Union + +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_ +from torch.cuda.amp import autocast + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm +from custom_detectron2.modeling import SEM_SEG_HEADS_REGISTRY + +from ..transformer_decoder.position_encoding import PositionEmbeddingSine +from ..transformer_decoder.transformer import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn + + +def build_pixel_decoder(cfg, input_shape): + """ + Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`. + """ + name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME + model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) + forward_features = getattr(model, "forward_features", None) + if not callable(forward_features): + raise ValueError( + "Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. " + f"Please implement forward_features for {name} to only return mask features." + ) + return model + + +# This is a modified FPN decoder. +@SEM_SEG_HEADS_REGISTRY.register() +class BasePixelDecoder(nn.Module): + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + conv_dim: int, + mask_dim: int, + norm: Optional[Union[str, Callable]] = None, + ): + """ + NOTE: this interface is experimental. + Args: + input_shape: shapes (channels and stride) of the input features + conv_dims: number of output channels for the intermediate conv layers. + mask_dim: number of output channels for the final conv layer. + norm (str or callable): normalization for all conv layers + """ + super().__init__() + + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" + feature_channels = [v.channels for k, v in input_shape] + + lateral_convs = [] + output_convs = [] + + use_bias = norm == "" + for idx, in_channels in enumerate(feature_channels): + if idx == len(self.in_features) - 1: + output_norm = get_norm(norm, conv_dim) + output_conv = Conv2d( + in_channels, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + activation=F.relu, + ) + weight_init.c2_xavier_fill(output_conv) + self.add_module("layer_{}".format(idx + 1), output_conv) + + lateral_convs.append(None) + output_convs.append(output_conv) + else: + lateral_norm = get_norm(norm, conv_dim) + output_norm = get_norm(norm, conv_dim) + + lateral_conv = Conv2d( + in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm + ) + output_conv = Conv2d( + conv_dim, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + activation=F.relu, + ) + weight_init.c2_xavier_fill(lateral_conv) + weight_init.c2_xavier_fill(output_conv) + self.add_module("adapter_{}".format(idx + 1), lateral_conv) + self.add_module("layer_{}".format(idx + 1), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + + self.mask_dim = mask_dim + self.mask_features = Conv2d( + conv_dim, + mask_dim, + kernel_size=3, + stride=1, + padding=1, + ) + weight_init.c2_xavier_fill(self.mask_features) + + self.oneformer_num_feature_levels = 3 # always use 3 scales + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = {} + ret["input_shape"] = { + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + } + ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM + ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM + return ret + + def forward_features(self, features): + multi_scale_features = [] + num_cur_levels = 0 + # Reverse feature maps into top-down order (from low to high resolution) + for idx, f in enumerate(self.in_features[::-1]): + x = features[f] + lateral_conv = self.lateral_convs[idx] + output_conv = self.output_convs[idx] + if lateral_conv is None: + y = output_conv(x) + else: + cur_fpn = lateral_conv(x) + # Following FPN implementation, we use nearest upsampling here + y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest") + y = output_conv(y) + if num_cur_levels < self.oneformer_num_feature_levels: + multi_scale_features.append(y) + num_cur_levels += 1 + return self.mask_features(y), None, multi_scale_features + + def forward(self, features, targets=None): + logger = logging.getLogger(__name__) + logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.") + return self.forward_features(features) + + +class TransformerEncoderOnly(nn.Module): + def __init__( + self, + d_model=512, + nhead=8, + num_encoder_layers=6, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + + encoder_layer = TransformerEncoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, pos_embed): + # flatten NxCxHxW to HWxNxC + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + if mask is not None: + mask = mask.flatten(1) + + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + return memory.permute(1, 2, 0).view(bs, c, h, w) + + +# This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map. +@SEM_SEG_HEADS_REGISTRY.register() +class TransformerEncoderPixelDecoder(BasePixelDecoder): + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + transformer_dropout: float, + transformer_nheads: int, + transformer_dim_feedforward: int, + transformer_enc_layers: int, + transformer_pre_norm: bool, + conv_dim: int, + mask_dim: int, + norm: Optional[Union[str, Callable]] = None, + ): + """ + NOTE: this interface is experimental. + Args: + input_shape: shapes (channels and stride) of the input features + transformer_dropout: dropout probability in transformer + transformer_nheads: number of heads in transformer + transformer_dim_feedforward: dimension of feedforward network + transformer_enc_layers: number of transformer encoder layers + transformer_pre_norm: whether to use pre-layernorm or not + conv_dims: number of output channels for the intermediate conv layers. + mask_dim: number of output channels for the final conv layer. + norm (str or callable): normalization for all conv layers + """ + super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm) + + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" + feature_strides = [v.stride for k, v in input_shape] + feature_channels = [v.channels for k, v in input_shape] + + in_channels = feature_channels[len(self.in_features) - 1] + self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1) + weight_init.c2_xavier_fill(self.input_proj) + self.transformer = TransformerEncoderOnly( + d_model=conv_dim, + dropout=transformer_dropout, + nhead=transformer_nheads, + dim_feedforward=transformer_dim_feedforward, + num_encoder_layers=transformer_enc_layers, + normalize_before=transformer_pre_norm, + ) + N_steps = conv_dim // 2 + self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) + + # update layer + use_bias = norm == "" + output_norm = get_norm(norm, conv_dim) + output_conv = Conv2d( + conv_dim, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + activation=F.relu, + ) + weight_init.c2_xavier_fill(output_conv) + delattr(self, "layer_{}".format(len(self.in_features))) + self.add_module("layer_{}".format(len(self.in_features)), output_conv) + self.output_convs[0] = output_conv + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = super().from_config(cfg, input_shape) + ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT + ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS + ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD + ret[ + "transformer_enc_layers" + ] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config + ret["transformer_pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM + return ret + + def forward_features(self, features): + multi_scale_features = [] + num_cur_levels = 0 + # Reverse feature maps into top-down order (from low to high resolution) + for idx, f in enumerate(self.in_features[::-1]): + x = features[f] + lateral_conv = self.lateral_convs[idx] + output_conv = self.output_convs[idx] + if lateral_conv is None: + transformer = self.input_proj(x) + pos = self.pe_layer(x) + transformer = self.transformer(transformer, None, pos) + y = output_conv(transformer) + # save intermediate feature as input to Transformer decoder + transformer_encoder_features = transformer + else: + cur_fpn = lateral_conv(x) + # Following FPN implementation, we use nearest upsampling here + y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest") + y = output_conv(y) + if num_cur_levels < self.oneformer_num_feature_levels: + multi_scale_features.append(y) + num_cur_levels += 1 + return self.mask_features(y), transformer_encoder_features, multi_scale_features + + def forward(self, features, targets=None): + logger = logging.getLogger(__name__) + logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.") + return self.forward_features(features) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/msdeformattn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/msdeformattn.py new file mode 100644 index 0000000000000000000000000000000000000000..69a094da773c7873cf66f61a9afbd9dcdc4b6c8e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/msdeformattn.py @@ -0,0 +1,358 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import logging +import numpy as np +from typing import Callable, Dict, List, Optional, Tuple, Union + +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_ +from torch.cuda.amp import autocast + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d, ShapeSpec, get_norm +from custom_detectron2.modeling import SEM_SEG_HEADS_REGISTRY + +from ..transformer_decoder.position_encoding import PositionEmbeddingSine +from ..transformer_decoder.transformer import _get_clones, _get_activation_fn +from .ops.modules import MSDeformAttn + + +# MSDeformAttn Transformer encoder in deformable detr +class MSDeformAttnTransformerEncoderOnly(nn.Module): + def __init__(self, d_model=256, nhead=8, + num_encoder_layers=6, dim_feedforward=1024, dropout=0.1, + activation="relu", + num_feature_levels=4, enc_n_points=4, + ): + super().__init__() + + self.d_model = d_model + self.nhead = nhead + + encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward, + dropout, activation, + num_feature_levels, nhead, enc_n_points) + self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers) + + self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformAttn): + m._reset_parameters() + normal_(self.level_embed) + + def get_valid_ratio(self, mask): + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def forward(self, srcs, pos_embeds): + masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs] + # prepare input for encoder + src_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): + bs, c, h, w = src.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + src = src.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + src_flatten.append(src) + mask_flatten.append(mask) + src_flatten = torch.cat(src_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) + + # encoder + memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten) + + return memory, spatial_shapes, level_start_index, valid_ratios + + +class MSDeformAttnTransformerEncoderLayer(nn.Module): + def __init__(self, + d_model=256, d_ffn=1024, + dropout=0.1, activation="relu", + n_levels=4, n_heads=8, n_points=4): + super().__init__() + + # self attention + self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) + self.dropout1 = nn.Dropout(dropout) + self.norm1 = nn.LayerNorm(d_model) + + # ffn + self.linear1 = nn.Linear(d_model, d_ffn) + self.activation = _get_activation_fn(activation) + self.dropout2 = nn.Dropout(dropout) + self.linear2 = nn.Linear(d_ffn, d_model) + self.dropout3 = nn.Dropout(dropout) + self.norm2 = nn.LayerNorm(d_model) + + @staticmethod + def with_pos_embed(tensor, pos): + return tensor if pos is None else tensor + pos + + def forward_ffn(self, src): + src2 = self.linear2(self.dropout2(self.activation(self.linear1(src)))) + src = src + self.dropout3(src2) + src = self.norm2(src) + return src + + def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None): + # self attention + src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask) + src = src + self.dropout1(src2) + src = self.norm1(src) + + # ffn + src = self.forward_ffn(src) + + return src + + +class MSDeformAttnTransformerEncoder(nn.Module): + def __init__(self, encoder_layer, num_layers): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + reference_points_list = [] + for lvl, (H_, W_) in enumerate(spatial_shapes): + + ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), + torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None): + output = src + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device) + for _, layer in enumerate(self.layers): + output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask) + + return output + + +@SEM_SEG_HEADS_REGISTRY.register() +class MSDeformAttnPixelDecoder(nn.Module): + @configurable + def __init__( + self, + input_shape: Dict[str, ShapeSpec], + *, + transformer_dropout: float, + transformer_nheads: int, + transformer_dim_feedforward: int, + transformer_enc_layers: int, + conv_dim: int, + mask_dim: int, + norm: Optional[Union[str, Callable]] = None, + # deformable transformer encoder args + transformer_in_features: List[str], + common_stride: int, + ): + """ + NOTE: this interface is experimental. + Args: + input_shape: shapes (channels and stride) of the input features + transformer_dropout: dropout probability in transformer + transformer_nheads: number of heads in transformer + transformer_dim_feedforward: dimension of feedforward network + transformer_enc_layers: number of transformer encoder layers + conv_dims: number of output channels for the intermediate conv layers. + mask_dim: number of output channels for the final conv layer. + norm (str or callable): normalization for all conv layers + """ + super().__init__() + transformer_input_shape = { + k: v for k, v in input_shape.items() if k in transformer_in_features + } + + # this is the input shape of pixel decoder + input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) + self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" + self.feature_strides = [v.stride for k, v in input_shape] + self.feature_channels = [v.channels for k, v in input_shape] + + # this is the input shape of transformer encoder (could use less features than pixel decoder + transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride) + self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5" + transformer_in_channels = [v.channels for k, v in transformer_input_shape] + self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers + + self.transformer_num_feature_levels = len(self.transformer_in_features) + if self.transformer_num_feature_levels > 1: + input_proj_list = [] + # from low resolution to high resolution (res5 -> res2) + for in_channels in transformer_in_channels[::-1]: + input_proj_list.append(nn.Sequential( + nn.Conv2d(in_channels, conv_dim, kernel_size=1), + nn.GroupNorm(32, conv_dim), + )) + self.input_proj = nn.ModuleList(input_proj_list) + else: + self.input_proj = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1), + nn.GroupNorm(32, conv_dim), + )]) + + for proj in self.input_proj: + nn.init.xavier_uniform_(proj[0].weight, gain=1) + nn.init.constant_(proj[0].bias, 0) + + self.transformer = MSDeformAttnTransformerEncoderOnly( + d_model=conv_dim, + dropout=transformer_dropout, + nhead=transformer_nheads, + dim_feedforward=transformer_dim_feedforward, + num_encoder_layers=transformer_enc_layers, + num_feature_levels=self.transformer_num_feature_levels, + ) + N_steps = conv_dim // 2 + self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) + + self.mask_dim = mask_dim + # use 1x1 conv instead + self.mask_features = Conv2d( + conv_dim, + mask_dim, + kernel_size=1, + stride=1, + padding=0, + ) + weight_init.c2_xavier_fill(self.mask_features) + + self.oneformer_num_feature_levels = 3 # always use 3 scales + self.common_stride = common_stride + + # extra fpn levels + stride = min(self.transformer_feature_strides) + self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride)) + + lateral_convs = [] + output_convs = [] + + use_bias = norm == "" + for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]): + lateral_norm = get_norm(norm, conv_dim) + output_norm = get_norm(norm, conv_dim) + + lateral_conv = Conv2d( + in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm + ) + output_conv = Conv2d( + conv_dim, + conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + activation=F.relu, + ) + weight_init.c2_xavier_fill(lateral_conv) + weight_init.c2_xavier_fill(output_conv) + self.add_module("adapter_{}".format(idx + 1), lateral_conv) + self.add_module("layer_{}".format(idx + 1), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + + @classmethod + def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): + ret = {} + ret["input_shape"] = { + k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + } + ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM + ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM + ret["transformer_dropout"] = cfg.MODEL.ONE_FORMER.DROPOUT + ret["transformer_nheads"] = cfg.MODEL.ONE_FORMER.NHEADS + # ret["transformer_dim_feedforward"] = cfg.MODEL.ONE_FORMER.DIM_FEEDFORWARD + ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder + ret[ + "transformer_enc_layers" + ] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config + ret["transformer_in_features"] = cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES + ret["common_stride"] = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE + return ret + + @autocast(enabled=False) + def forward_features(self, features): + srcs = [] + pos = [] + # Reverse feature maps into top-down order (from low to high resolution) + for idx, f in enumerate(self.transformer_in_features[::-1]): + x = features[f].float() # deformable detr does not support half precision + srcs.append(self.input_proj[idx](x)) + pos.append(self.pe_layer(x)) + + y, spatial_shapes, level_start_index, valid_ratios = self.transformer(srcs, pos) + bs = y.shape[0] + + split_size_or_sections = [None] * self.transformer_num_feature_levels + for i in range(self.transformer_num_feature_levels): + if i < self.transformer_num_feature_levels - 1: + split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i] + else: + split_size_or_sections[i] = y.shape[1] - level_start_index[i] + y = torch.split(y, split_size_or_sections, dim=1) + + out = [] + multi_scale_features = [] + num_cur_levels = 0 + for i, z in enumerate(y): + out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1])) + + # append `out` with extra FPN levels + # Reverse feature maps into top-down order (from low to high resolution) + for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]): + x = features[f].float() + lateral_conv = self.lateral_convs[idx] + output_conv = self.output_convs[idx] + cur_fpn = lateral_conv(x) + # Following FPN implementation, we use nearest upsampling here + y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False) + y = output_conv(y) + out.append(y) + + for o in out: + if num_cur_levels < self.oneformer_num_feature_levels: + multi_scale_features.append(o) + num_cur_levels += 1 + + return self.mask_features(out[-1]), out[0], multi_scale_features, spatial_shapes, level_start_index diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/functions/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b06b5ac538b63bdb9a6c82e4635b95bb5491d5b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/functions/__init__.py @@ -0,0 +1,13 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + +from .ms_deform_attn_func import MSDeformAttnFunction + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py new file mode 100644 index 0000000000000000000000000000000000000000..e074eb69819151add821a8ff9ed215ed9b874070 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py @@ -0,0 +1,77 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +# if torch.cuda.is_available(): +# try: +# import MultiScaleDeformableAttention as MSDA +# except ModuleNotFoundError as e: +# info_string = ( +# "\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n" +# "\t`cd oneformer/modeling/pixel_decoder/ops`\n" +# "\t`sh make.sh`\n" +# ) +# raise ModuleNotFoundError(info_string) +# else: +# MultiScaleDeformableAttention = None + + + +class MSDeformAttnFunction(Function): + @staticmethod + def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step): + # ctx.im2col_step = im2col_step + output = ms_deform_attn_core_pytorch( + value, value_spatial_shapes, sampling_locations, attention_weights) + # ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) + return output + + # @staticmethod + # @once_differentiable + # def backward(ctx, grad_output): + # value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors + # grad_value, grad_sampling_loc, grad_attn_weight = \ + # MSDA.ms_deform_attn_backward( + # value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step) + # + # return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None + + +def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights): + # for debug and test only, + # need to use cuda version instead + N_, S_, M_, D_ = value.shape + _, Lq_, M_, L_, P_, _ = sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for lid_, (H_, W_) in enumerate(value_spatial_shapes): + # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_ + value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_) + # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2 + sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1) + # N_*M_, D_, Lq_, P_ + sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_, + mode='bilinear', padding_mode='zeros', align_corners=False) + sampling_value_list.append(sampling_value_l_) + # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_) + attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_) + output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_) + return output.transpose(1, 2).contiguous() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/make.sh b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/make.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca5c0b469da786c847ba04d437bb31ee0fc938da --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/make.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + +FORCE_CUDA=1 python setup.py build install diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/modules/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6fdbf03359958f3d67ab00f879bf6b61a6c8f06a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/modules/__init__.py @@ -0,0 +1,12 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + +from .ms_deform_attn import MSDeformAttn diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/modules/ms_deform_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/modules/ms_deform_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc471d2da550c839a3446a6041e40d338425129 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/modules/ms_deform_attn.py @@ -0,0 +1,120 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import warnings +import math + +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn.init import xavier_uniform_, constant_ + +MSDeformAttnFunction = None +from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))) + return (n & (n-1) == 0) and n != 0 + + +class MSDeformAttn(nn.Module): + def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4): + """ + Multi-Scale Deformable Attention Module + :param d_model hidden dimension + :param n_levels number of feature levels + :param n_heads number of attention heads + :param n_points number of sampling points per attention head per feature level + """ + super().__init__() + if d_model % n_heads != 0: + raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads)) + _d_per_head = d_model // n_heads + # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_head): + warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 " + "which is more efficient in our CUDA implementation.") + + self.im2col_step = 128 + + self.d_model = d_model + self.n_levels = n_levels + self.n_heads = n_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) + self.value_proj = nn.Linear(d_model, d_model) + self.output_proj = nn.Linear(d_model, d_model) + + self._reset_parameters() + + def _reset_parameters(self): + constant_(self.sampling_offsets.weight.data, 0.) + thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + constant_(self.attention_weights.weight.data, 0.) + constant_(self.attention_weights.bias.data, 0.) + xavier_uniform_(self.value_proj.weight.data) + constant_(self.value_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None): + """ + :param query (N, Length_{query}, C) + :param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area + or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes + :param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C) + :param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] + :param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}] + :param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements + :return output (N, Length_{query}, C) + """ + N, Len_q, _ = query.shape + N, Len_in, _ = input_flatten.shape + assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in + + value = self.value_proj(input_flatten) + if input_padding_mask is not None: + value = value.masked_fill(input_padding_mask[..., None], float(0)) + value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2) + attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points) + attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points) + # N, Len_q, n_heads, n_levels, n_points, 2 + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + else: + raise ValueError( + 'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1])) + # try: + output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights) + # # For FLOPs calculation only + # output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights) + output = self.output_proj(output) + return output \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/setup.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..3b57ad313ac8f9b6586892142da8ba943e516cec --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/setup.py @@ -0,0 +1,78 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + +import os +import glob + +import torch + +from torch.utils.cpp_extension import CUDA_HOME +from torch.utils.cpp_extension import CppExtension +from torch.utils.cpp_extension import CUDAExtension + +from setuptools import find_packages +from setuptools import setup + +requirements = ["torch", "torchvision"] + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "src") + + main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) + + sources = main_file + source_cpu + extension = CppExtension + extra_compile_args = {"cxx": []} + define_macros = [] + + # Force cuda since torch ask for a device, not if cuda is in fact available. + if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None: + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + else: + if CUDA_HOME is None: + raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.') + else: + raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().') + + sources = [os.path.join(extensions_dir, s) for s in sources] + include_dirs = [extensions_dir] + ext_modules = [ + extension( + "MultiScaleDeformableAttention", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + return ext_modules + +setup( + name="MultiScaleDeformableAttention", + version="1.0", + author="Weijie Su", + url="https://github.com/fundamentalvision/Deformable-DETR", + description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention", + packages=find_packages(exclude=("configs", "tests",)), + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..48757e2b0156b2c1513b615d2a17e5aee5172ae7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.cpp @@ -0,0 +1,46 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#include + +#include +#include + + +at::Tensor +ms_deform_attn_cpu_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ERROR("Not implement on cpu"); +} + +std::vector +ms_deform_attn_cpu_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + AT_ERROR("Not implement on cpu"); +} + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.h b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..51bb27e9ee828f967e8aa854c2d55574040c6d7e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cpu/ms_deform_attn_cpu.h @@ -0,0 +1,38 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#pragma once +#include + +at::Tensor +ms_deform_attn_cpu_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector +ms_deform_attn_cpu_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.cu b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c465dab3d636dfd6a44523c63f148b6e15084d9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.cu @@ -0,0 +1,158 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#include +#include "cuda/ms_deform_im2col_cuda.cuh" + +#include +#include +#include +#include + + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { + ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + columns.data()); + + })); + } + + output = output.view({batch, num_query, num_heads*channels}); + + return output; +} + + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto grad_value = at::zeros_like(value); + auto grad_sampling_loc = at::zeros_like(sampling_loc); + auto grad_attn_weight = at::zeros_like(attn_weight); + + const int batch_n = im2col_step_; + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] { + ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + grad_value.data() + n * im2col_step_ * per_value_size, + grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size); + + })); + } + + return { + grad_value, grad_sampling_loc, grad_attn_weight + }; +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..4f0658e8668a11f0e7d71deff9adac71884f2e87 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_attn_cuda.h @@ -0,0 +1,35 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#pragma once +#include + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_im2col_cuda.cuh b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_im2col_cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c04e0d4ab97d25c1756fcd8d08dd1e5a6d280b7c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/cuda/ms_deform_im2col_cuda.cuh @@ -0,0 +1,1332 @@ +/*! +************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************** +* Modified from DCN (https://github.com/msracver/Deformable-ConvNets) +* Copyright (c) 2018 Microsoft +************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#include +#include +#include + +#include +#include + +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) +{ + return (N + num_threads - 1) / num_threads; +} + + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + + +template +__global__ void ms_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockSize; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockSize/2; s>0; s>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_value, + const int64_t* data_spatial_shapes, + const int64_t* data_level_start_index, + const scalar_t* data_sampling_loc, + const scalar_t* data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* data_col) +{ + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void ms_deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* grad_col, + const scalar_t* data_value, + const int64_t * data_spatial_shapes, + const int64_t * data_level_start_index, + const scalar_t * data_sampling_loc, + const scalar_t * data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) + { + if ((channels & 1023) == 0) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_gm + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + else{ + switch(channels) + { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + default: + if (channels < 64) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/ms_deform_attn.h b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/ms_deform_attn.h new file mode 100644 index 0000000000000000000000000000000000000000..2f80a1b294c55b37d13bb3558ff7aeadba3b37de --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/ms_deform_attn.h @@ -0,0 +1,67 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#pragma once + +#include "cpu/ms_deform_attn_cpu.h" + +#ifdef WITH_CUDA +#include "cuda/ms_deform_attn_cuda.h" +#endif + + +at::Tensor +ms_deform_attn_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + if (value.type().is_cuda()) + { +#ifdef WITH_CUDA + return ms_deform_attn_cuda_forward( + value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +ms_deform_attn_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + if (value.type().is_cuda()) + { +#ifdef WITH_CUDA + return ms_deform_attn_cuda_backward( + value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/vision.cpp b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/vision.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4a08821e0121a77556aa7a263ec8ebfa928b13b6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/src/vision.cpp @@ -0,0 +1,21 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +/*! +* Copyright (c) Facebook, Inc. and its affiliates. +* Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR +*/ + +#include "ms_deform_attn.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward"); + m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward"); +} diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/test.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/test.py new file mode 100644 index 0000000000000000000000000000000000000000..6e1b545459f6fd3235767e721eb5a1090ae14bef --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/pixel_decoder/ops/test.py @@ -0,0 +1,92 @@ +# ------------------------------------------------------------------------------------------------ +# Deformable DETR +# Copyright (c) 2020 SenseTime. All Rights Reserved. +# Licensed under the Apache License, Version 2.0 [see LICENSE for details] +# ------------------------------------------------------------------------------------------------ +# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +# ------------------------------------------------------------------------------------------------ + +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import time +import torch +import torch.nn as nn +from torch.autograd import gradcheck + +from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch + + +N, M, D = 1, 2, 2 +Lq, L, P = 2, 2, 2 +shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() +level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1])) +S = sum([(H*W).item() for H, W in shapes]) + + +torch.manual_seed(3) + + +@torch.no_grad() +def check_forward_equal_with_pytorch_double(): + value = torch.rand(N, S, M, D).cuda() * 0.01 + sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() + attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5 + attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True) + im2col_step = 2 + output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu() + output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu() + fwdok = torch.allclose(output_cuda, output_pytorch) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() + + print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +@torch.no_grad() +def check_forward_equal_with_pytorch_float(): + value = torch.rand(N, S, M, D).cuda() * 0.01 + sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() + attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5 + attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True) + im2col_step = 2 + output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu() + output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu() + fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() + + print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True): + + value = torch.rand(N, S, M, channels).cuda() * 0.01 + sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() + attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5 + attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True) + im2col_step = 2 + func = MSDeformAttnFunction.apply + + value.requires_grad = grad_value + sampling_locations.requires_grad = grad_sampling_loc + attention_weights.requires_grad = grad_attn_weight + + gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step)) + + print(f'* {gradok} check_gradient_numerical(D={channels})') + + +if __name__ == '__main__': + check_forward_equal_with_pytorch_double() + check_forward_equal_with_pytorch_float() + + for channels in [30, 32, 64, 71, 1025, 2048, 3096]: + check_gradient_numerical(channels, True, True, True) + + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b84bd4ecb48f134ccc218c4d5f02c50f7033bcd9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .oneformer_transformer_decoder import ContrastiveMultiScaleMaskedTransformerDecoder \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/oneformer_transformer_decoder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/oneformer_transformer_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..39374b77578b1676f0976d0e8bd2ec048db4785c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/oneformer_transformer_decoder.py @@ -0,0 +1,528 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/transformer_decoder/mask2former_transformer_decoder.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +import logging +import fvcore.nn.weight_init as weight_init +from typing import Optional +import torch +from torch import nn, Tensor +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.layers import Conv2d + +from .position_encoding import PositionEmbeddingSine +from .transformer import Transformer + +from custom_detectron2.utils.registry import Registry + + +TRANSFORMER_DECODER_REGISTRY = Registry("TRANSFORMER_MODULE") +TRANSFORMER_DECODER_REGISTRY.__doc__ = """ +Registry for transformer module in OneFormer. +""" + + +def build_transformer_decoder(cfg, in_channels, mask_classification=True): + """ + Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. + """ + name = cfg.MODEL.ONE_FORMER.TRANSFORMER_DECODER_NAME + return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) + + +class SelfAttentionLayer(nn.Module): + + def __init__(self, d_model, nhead, dropout=0.0, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, tgt_mask, + tgt_key_padding_mask, query_pos) + return self.forward_post(tgt, tgt_mask, + tgt_key_padding_mask, query_pos) + + +class CrossAttentionLayer(nn.Module): + + def __init__(self, d_model, nhead, dropout=0.0, + activation="relu", normalize_before=False): + super().__init__() + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + self.norm = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + + return tgt + + def forward_pre(self, tgt, memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout(tgt2) + + return tgt + + def forward(self, tgt, memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, memory_mask, + memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, memory_mask, + memory_key_padding_mask, pos, query_pos) + + +class FFNLayer(nn.Module): + + def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, + activation="relu", normalize_before=False): + super().__init__() + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm = nn.LayerNorm(d_model) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt): + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout(tgt2) + tgt = self.norm(tgt) + return tgt + + def forward_pre(self, tgt): + tgt2 = self.norm(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout(tgt2) + return tgt + + def forward(self, tgt): + if self.normalize_before: + return self.forward_pre(tgt) + return self.forward_post(tgt) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + +class MLP(nn.Module): + """ Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +@TRANSFORMER_DECODER_REGISTRY.register() +class ContrastiveMultiScaleMaskedTransformerDecoder(nn.Module): + + _version = 2 + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + if version is None or version < 2: + # Do not warn if train from scratch + scratch = True + logger = logging.getLogger(__name__) + for k in list(state_dict.keys()): + newk = k + if "static_query" in k: + newk = k.replace("static_query", "query_feat") + if newk != k: + state_dict[newk] = state_dict[k] + del state_dict[k] + scratch = False + + if not scratch: + logger.warning( + f"Weight format of {self.__class__.__name__} have changed! " + "Please upgrade your models. Applying automatic conversion now ..." + ) + + @configurable + def __init__( + self, + in_channels, + mask_classification=True, + *, + num_classes: int, + hidden_dim: int, + num_queries: int, + nheads: int, + dropout: float, + dim_feedforward: int, + enc_layers: int, + is_train: bool, + dec_layers: int, + class_dec_layers: int, + pre_norm: bool, + mask_dim: int, + enforce_input_project: bool, + use_task_norm: bool, + ): + """ + NOTE: this interface is experimental. + Args: + in_channels: channels of the input features + mask_classification: whether to add mask classifier or not + num_classes: number of classes + hidden_dim: Transformer feature dimension + num_queries: number of queries + nheads: number of heads + dim_feedforward: feature dimension in feedforward network + enc_layers: number of Transformer encoder layers + dec_layers: number of Transformer decoder layers + pre_norm: whether to use pre-LayerNorm or not + mask_dim: mask feature dimension + enforce_input_project: add input project 1x1 conv even if input + channels and hidden dim is identical + """ + super().__init__() + + assert mask_classification, "Only support mask classification model" + self.mask_classification = mask_classification + self.is_train = is_train + self.use_task_norm = use_task_norm + + # positional encoding + N_steps = hidden_dim // 2 + self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True) + + self.class_transformer = Transformer( + d_model=hidden_dim, + dropout=dropout, + nhead=nheads, + dim_feedforward=dim_feedforward, + num_encoder_layers=enc_layers, + num_decoder_layers=class_dec_layers, + normalize_before=pre_norm, + return_intermediate_dec=False, + ) + + # define Transformer decoder here + self.num_heads = nheads + self.num_layers = dec_layers + self.transformer_self_attention_layers = nn.ModuleList() + self.transformer_cross_attention_layers = nn.ModuleList() + self.transformer_ffn_layers = nn.ModuleList() + + for _ in range(self.num_layers): + self.transformer_self_attention_layers.append( + SelfAttentionLayer( + d_model=hidden_dim, + nhead=nheads, + dropout=0.0, + normalize_before=pre_norm, + ) + ) + + self.transformer_cross_attention_layers.append( + CrossAttentionLayer( + d_model=hidden_dim, + nhead=nheads, + dropout=0.0, + normalize_before=pre_norm, + ) + ) + + self.transformer_ffn_layers.append( + FFNLayer( + d_model=hidden_dim, + dim_feedforward=dim_feedforward, + dropout=0.0, + normalize_before=pre_norm, + ) + ) + + self.decoder_norm = nn.LayerNorm(hidden_dim) + + self.num_queries = num_queries + # learnable query p.e. + self.query_embed = nn.Embedding(num_queries, hidden_dim) + + # level embedding (we always use 3 scales) + self.num_feature_levels = 3 + self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim) + self.input_proj = nn.ModuleList() + for _ in range(self.num_feature_levels): + if in_channels != hidden_dim or enforce_input_project: + self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1)) + weight_init.c2_xavier_fill(self.input_proj[-1]) + else: + self.input_proj.append(nn.Sequential()) + + self.class_input_proj = Conv2d(in_channels, hidden_dim, kernel_size=1) + weight_init.c2_xavier_fill(self.class_input_proj) + + # output FFNs + if self.mask_classification: + self.class_embed = nn.Linear(hidden_dim, num_classes + 1) + self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3) + + @classmethod + def from_config(cls, cfg, in_channels, mask_classification): + ret = {} + ret["in_channels"] = in_channels + ret["mask_classification"] = mask_classification + + ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES + ret["hidden_dim"] = cfg.MODEL.ONE_FORMER.HIDDEN_DIM + ret["num_queries"] = cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES + # Transformer parameters: + ret["nheads"] = cfg.MODEL.ONE_FORMER.NHEADS + ret["dim_feedforward"] = cfg.MODEL.ONE_FORMER.DIM_FEEDFORWARD + + # NOTE: because we add learnable query features which requires supervision, + # we add minus 1 to decoder layers to be consistent with our loss + # implementation: that is, number of auxiliary losses is always + # equal to number of decoder layers. With learnable query features, the number of + # auxiliary losses equals number of decoders plus 1. + assert cfg.MODEL.ONE_FORMER.DEC_LAYERS >= 1 + ret["dec_layers"] = cfg.MODEL.ONE_FORMER.DEC_LAYERS - 1 + ret["class_dec_layers"] = cfg.MODEL.ONE_FORMER.CLASS_DEC_LAYERS + ret["enc_layers"] = cfg.MODEL.ONE_FORMER.ENC_LAYERS + ret["dropout"] = cfg.MODEL.ONE_FORMER.DROPOUT + ret["pre_norm"] = cfg.MODEL.ONE_FORMER.PRE_NORM + ret["enforce_input_project"] = cfg.MODEL.ONE_FORMER.ENFORCE_INPUT_PROJ + ret["is_train"] = cfg.MODEL.IS_TRAIN + ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM + ret["use_task_norm"] = cfg.MODEL.ONE_FORMER.USE_TASK_NORM + + return ret + + def forward(self, x, mask_features, tasks, mask = None): + # x is a list of multi-scale feature + assert len(x) == self.num_feature_levels + src = [] + pos = [] + size_list = [] + + # disable mask, it does not affect performance + del mask + + for i in range(self.num_feature_levels): + size_list.append(x[i].shape[-2:]) + pos.append(self.pe_layer(x[i], None).flatten(2)) + src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None]) + + # flatten NxCxHxW to HWxNxC + pos[-1] = pos[-1].permute(2, 0, 1) + src[-1] = src[-1].permute(2, 0, 1) + + _, bs, _ = src[0].shape + + # QxNxC + query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1) + tasks = tasks.unsqueeze(0) + if self.use_task_norm: + tasks = self.decoder_norm(tasks) + + feats = self.pe_layer(mask_features, None) + + out_t, _ = self.class_transformer(feats, None, + self.query_embed.weight[:-1], + self.class_input_proj(mask_features), + tasks if self.use_task_norm else None) + out_t = out_t[0].permute(1, 0, 2) + + out = torch.cat([out_t, tasks], dim=0) + + output = out.clone() + + predictions_class = [] + predictions_mask = [] + + # prediction heads on learnable query features + outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], i=0) + predictions_class.append(outputs_class) + predictions_mask.append(outputs_mask) + + for i in range(self.num_layers): + level_index = i % self.num_feature_levels + attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False + # attention: cross-attention first + output = self.transformer_cross_attention_layers[i]( + output, src[level_index], + memory_mask=attn_mask, + memory_key_padding_mask=None, # here we do not apply masking on padded region + pos=pos[level_index], query_pos=query_embed + ) + + output = self.transformer_self_attention_layers[i]( + output, tgt_mask=None, + tgt_key_padding_mask=None, + query_pos=query_embed + ) + + # FFN + output = self.transformer_ffn_layers[i]( + output + ) + + outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], i=i+1) + predictions_class.append(outputs_class) + predictions_mask.append(outputs_mask) + + assert len(predictions_class) == self.num_layers + 1 + if self.is_train: + query_class = out.permute(1, 0, 2) + else: + query_class = None + out = { + 'contrastive_logits': query_class, + 'pred_logits': predictions_class[-1], + 'pred_masks': predictions_mask[-1], + 'aux_outputs': self._set_aux_loss( + predictions_class if self.mask_classification else None, + predictions_mask, + ) + } + + return out + + def forward_prediction_heads(self, output, mask_features, attn_mask_target_size, i): + decoder_output = self.decoder_norm(output) + decoder_output = decoder_output.transpose(0, 1) + outputs_class = self.class_embed(decoder_output) + mask_embed = self.mask_embed(decoder_output) + outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features) + + # NOTE: prediction is of higher-resolution + # [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW] + attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False) + + # save_attn_masks(attn_mask.sigmoid() < 0.5, fname=f'demo/maps/{i}_pre_bool') + + # must use bool type + # If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. + attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool() + attn_mask = attn_mask.detach() + + return outputs_class, outputs_mask, attn_mask + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_seg_masks): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + if self.mask_classification: + aux_list = [ + {"pred_logits": a, "pred_masks": b} + for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) + ] + else: + aux_list = [{"pred_masks": b} for b, in outputs_seg_masks[:-1]] + + return aux_list \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/position_encoding.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..051984d9ea6e04e834f6fae3daf7d8317c2f0819 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/position_encoding.py @@ -0,0 +1,67 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/transformer_decoder/position_encoding.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +""" +Various positional encodings for the transformer. +""" +import math + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, x, mask=None): + if mask is None: + mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + def __repr__(self, _repr_indent=4): + head = "Positional encoding " + self.__class__.__name__ + body = [ + "num_pos_feats: {}".format(self.num_pos_feats), + "temperature: {}".format(self.temperature), + "normalize: {}".format(self.normalize), + "scale: {}".format(self.scale), + ] + # _repr_indent = 4 + lines = [head] + [" " * _repr_indent + line for line in body] + return "\n".join(lines) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/text_transformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/text_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2edc3058d14f83a7d1c07e78e27cd3a73bb47090 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/text_transformer.py @@ -0,0 +1,257 @@ +# ------------------------------------------------------------------------- +# MIT License +# +# Copyright (c) 2021 OpenAI +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# ------------------------------------------------------------------------- + +import torch +import torch.utils.checkpoint as checkpoint +from torch import nn +from collections import OrderedDict +from custom_timm.models.layers import trunc_normal_ + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.q_proj = nn.Linear(dim, dim, bias=qkv_bias) + self.k_proj = nn.Linear(dim, dim, bias=qkv_bias) + self.v_proj = nn.Linear(dim, dim, bias=qkv_bias) + + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, q, k, v): + B, N, C = q.shape + assert k.shape == v.shape + B, M, C = k.shape + q = self.q_proj(q).reshape(B, N, self.num_heads, C // self.num_heads) + k = self.k_proj(k).reshape(B, M, self.num_heads, C // self.num_heads) + v = self.v_proj(v).reshape(B, M, self.num_heads, C // self.num_heads) + + attn = torch.einsum('bnkc,bmkc->bknm', q, k) * self.scale + + attn = attn.softmax(dim=-1) + + x = torch.einsum('bknm,bmkc->bnkc', attn, v).reshape(B, N, C) + + x = self.proj(x) + x = self.proj_drop(x) + return x + +class TransformerDecoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dropout=0.1, + ): + super().__init__() + self.self_attn = Attention(d_model, nhead, proj_drop=dropout) + self.cross_attn = Attention(d_model, nhead, proj_drop=dropout) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(dropout) + + self.mlp = nn.Sequential( + nn.Linear(d_model, d_model * 4), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(d_model * 4, d_model) + ) + + def forward(self, x, mem): + q = k = v = self.norm1(x) + x = x + self.self_attn(q, k, v) + q = self.norm2(x) + x = x + self.cross_attn(q, mem, mem) + x = x + self.dropout(self.mlp(self.norm3(x))) + return x + + +class ContextDecoder(nn.Module): + def __init__(self, + transformer_width=256, + transformer_heads=4, + transformer_layers=6, + visual_dim=1024, + dropout=0.1, + **kwargs): + super().__init__() + + self.memory_proj = nn.Sequential( + nn.LayerNorm(visual_dim), + nn.Linear(visual_dim, transformer_width), + nn.LayerNorm(transformer_width), + ) + + self.text_proj = nn.Sequential( + nn.LayerNorm(visual_dim), + nn.Linear(visual_dim, transformer_width), + ) + + self.decoder = nn.ModuleList([ + TransformerDecoderLayer(transformer_width, transformer_heads, dropout) for _ in range(transformer_layers) + ]) + + self.out_proj = nn.Sequential( + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, visual_dim) + ) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + + def forward(self, text, visual): + B, N, C = visual.shape + visual = self.memory_proj(visual) + x = self.text_proj(text) + + for layer in self.decoder: + x = layer(x, visual) + + return self.out_proj(x) + + +class QuickGELU(nn.Module): + + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + self.mlp = nn.Sequential( + OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), + ('c_proj', nn.Linear(d_model * 4, d_model))])) + self.ln_2 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor, key_padding_mask: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=key_padding_mask)[0] + + def forward(self, x: torch.Tensor, key_padding_mask=None): + x = x + self.attention(self.ln_1(x), key_padding_mask=key_padding_mask) + x = x + self.mlp(self.ln_2(x)) + return x + +class Transformer(nn.Module): + + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_checkpoint=False): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + proj_std = (self.width**-0.5) * ((2 * self.layers)**-0.5) + attn_std = self.width**-0.5 + fc_std = (2 * self.width)**-0.5 + for block in self.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + self.use_checkpoint = use_checkpoint + + def forward(self, x: torch.Tensor): + for resblock in self.resblocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(resblock, x) + else: + x = resblock(x) + return x + + +class TextTransformer(nn.Module): + + def __init__( + self, + context_length: int, + width: int, + layers: int, + vocab_size, + use_checkpoint=False, + ): + + super().__init__() + heads = width // 64 + self.context_length = context_length + self.width = width + self.transformer = Transformer( + width=width, + layers=layers, + heads=heads, + attn_mask=self.build_attention_mask(), + use_checkpoint=use_checkpoint) + + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width)) + self.ln_final = nn.LayerNorm(width) + self.token_embedding = nn.Embedding(vocab_size, width) + nn.init.normal_(self.token_embedding.weight, std=0.02) + + # initialization + nn.init.normal_(self.positional_embedding, std=0.01) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float('-inf')) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward(self, text): + x = self.token_embedding(text) + x = x + self.positional_embedding + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + + return x \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/transformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..cd07525673b9b1165e1fdd0c9990a8f29c84f199 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/modeling/transformer_decoder/transformer.py @@ -0,0 +1,376 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/transformer_decoder/transformer.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +""" +Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import List, Optional + +import torch +import torch.nn.functional as F +from torch import Tensor, nn + + +class Transformer(nn.Module): + def __init__( + self, + d_model=512, + nhead=8, + num_encoder_layers=6, + num_decoder_layers=6, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + return_intermediate_dec=False, + ): + super().__init__() + + encoder_layer = TransformerEncoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + decoder_layer = TransformerDecoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before + ) + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder( + decoder_layer, + num_decoder_layers, + decoder_norm, + return_intermediate=return_intermediate_dec, + ) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed, task_token=None): + # flatten NxCxHxW to HWxNxC + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + if mask is not None: + mask = mask.flatten(1) + + if task_token is None: + tgt = torch.zeros_like(query_embed) + else: + tgt = task_token.repeat(query_embed.shape[0], 1, 1) + + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + hs = self.decoder( + tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed + ) + return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) + + +class TransformerEncoder(nn.Module): + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward( + self, + src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + output = src + + for layer in self.layers: + output = layer( + output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos + ) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, + query_pos=query_pos, + ) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class TransformerEncoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn( + q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask + )[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn( + q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask + )[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward( + self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn( + q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn( + q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask + )[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn( + query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre( + tgt, + memory, + tgt_mask, + memory_mask, + tgt_key_padding_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + return self.forward_post( + tgt, + memory, + tgt_mask, + memory_mask, + tgt_key_padding_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(f"activation should be relu/gelu, not {activation}.") diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/oneformer_model.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/oneformer_model.py new file mode 100644 index 0000000000000000000000000000000000000000..1a84485f86f57d4794a48ba8a93c742a9004f601 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/oneformer_model.py @@ -0,0 +1,470 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/maskformer_model.py +# Modified by Jitesh Jain (https://github.com/praeclarumjj3) +# ------------------------------------------------------------------------------ + +from typing import Tuple + +import torch +from torch import nn +from torch.nn import functional as F + +from custom_detectron2.config import configurable +from custom_detectron2.data import MetadataCatalog +from custom_detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head +from custom_detectron2.modeling.backbone import Backbone +from custom_detectron2.modeling.postprocessing import sem_seg_postprocess +from custom_detectron2.structures import Boxes, ImageList, Instances, BitMasks +from custom_detectron2.utils.memory import retry_if_cuda_oom + +from .modeling.matcher import HungarianMatcher +from einops import rearrange +from .modeling.transformer_decoder.text_transformer import TextTransformer +from .modeling.transformer_decoder.oneformer_transformer_decoder import MLP +from custom_oneformer.data.tokenizer import SimpleTokenizer, Tokenize + +@META_ARCH_REGISTRY.register() +class OneFormer(nn.Module): + """ + Main class for mask classification semantic segmentation architectures. + """ + + @configurable + def __init__( + self, + *, + backbone: Backbone, + sem_seg_head: nn.Module, + task_mlp: nn.Module, + text_encoder: nn.Module, + text_projector: nn.Module, + prompt_ctx: nn.Embedding, + num_queries: int, + object_mask_threshold: float, + overlap_threshold: float, + metadata, + size_divisibility: int, + sem_seg_postprocess_before_inference: bool, + pixel_mean: Tuple[float], + pixel_std: Tuple[float], + # inference + semantic_on: bool, + panoptic_on: bool, + instance_on: bool, + detection_on: bool, + test_topk_per_image: int, + task_seq_len: int, + max_seq_len: int, + is_demo: bool, + ): + """ + Args: + backbone: a backbone module, must follow detectron2's backbone interface + sem_seg_head: a module that predicts semantic segmentation from backbone features + criterion: a module that defines the loss + num_queries: int, number of queries + object_mask_threshold: float, threshold to filter query based on classification score + for panoptic segmentation inference + overlap_threshold: overlap threshold used in general inference for panoptic segmentation + metadata: dataset meta, get `thing` and `stuff` category names for panoptic + segmentation inference + size_divisibility: Some backbones require the input height and width to be divisible by a + specific integer. We can use this to override such requirement. + sem_seg_postprocess_before_inference: whether to resize the prediction back + to original input size before semantic segmentation inference or after. + For high-resolution dataset like Mapillary, resizing predictions before + inference will cause OOM error. + pixel_mean, pixel_std: list or tuple with #channels element, representing + the per-channel mean and std to be used to normalize the input image + semantic_on: bool, whether to output semantic segmentation prediction + instance_on: bool, whether to output instance segmentation prediction + panoptic_on: bool, whether to output panoptic segmentation prediction + test_topk_per_image: int, instance segmentation parameter, keep topk instances per image + """ + super().__init__() + self.backbone = backbone + self.sem_seg_head = sem_seg_head + self.task_mlp = task_mlp + self.text_encoder = text_encoder + self.text_projector = text_projector + self.prompt_ctx = prompt_ctx + self.num_queries = num_queries + self.overlap_threshold = overlap_threshold + self.object_mask_threshold = object_mask_threshold + self.metadata = metadata + if size_divisibility < 0: + # use backbone size_divisibility if not set + size_divisibility = self.backbone.size_divisibility + self.size_divisibility = size_divisibility + self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + # additional args + self.semantic_on = semantic_on + self.instance_on = instance_on + self.panoptic_on = panoptic_on + self.detection_on = detection_on + self.test_topk_per_image = test_topk_per_image + + self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len) + self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) + self.is_demo = is_demo + + self.thing_indices = [k for k in self.metadata.thing_dataset_id_to_contiguous_id.keys()] + + if not self.semantic_on: + assert self.sem_seg_postprocess_before_inference + + @classmethod + def from_config(cls, cfg): + backbone = build_backbone(cfg) + sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) + + if cfg.MODEL.IS_TRAIN: + text_encoder = TextTransformer(context_length=cfg.MODEL.TEXT_ENCODER.CONTEXT_LENGTH, + width=cfg.MODEL.TEXT_ENCODER.WIDTH, + layers=cfg.MODEL.TEXT_ENCODER.NUM_LAYERS, + vocab_size=cfg.MODEL.TEXT_ENCODER.VOCAB_SIZE) + text_projector = MLP(text_encoder.width, cfg.MODEL.ONE_FORMER.HIDDEN_DIM, + cfg.MODEL.ONE_FORMER.HIDDEN_DIM, cfg.MODEL.TEXT_ENCODER.PROJ_NUM_LAYERS) + if cfg.MODEL.TEXT_ENCODER.N_CTX > 0: + prompt_ctx = nn.Embedding(cfg.MODEL.TEXT_ENCODER.N_CTX, cfg.MODEL.TEXT_ENCODER.WIDTH) + else: + prompt_ctx = None + else: + text_encoder = None + text_projector = None + prompt_ctx = None + + task_mlp = MLP(cfg.INPUT.TASK_SEQ_LEN, cfg.MODEL.ONE_FORMER.HIDDEN_DIM, + cfg.MODEL.ONE_FORMER.HIDDEN_DIM, 2) + + # Loss parameters: + deep_supervision = cfg.MODEL.ONE_FORMER.DEEP_SUPERVISION + no_object_weight = cfg.MODEL.ONE_FORMER.NO_OBJECT_WEIGHT + + # loss weights + class_weight = cfg.MODEL.ONE_FORMER.CLASS_WEIGHT + dice_weight = cfg.MODEL.ONE_FORMER.DICE_WEIGHT + mask_weight = cfg.MODEL.ONE_FORMER.MASK_WEIGHT + contrastive_weight = cfg.MODEL.ONE_FORMER.CONTRASTIVE_WEIGHT + + # building criterion + matcher = HungarianMatcher( + cost_class=class_weight, + cost_mask=mask_weight, + cost_dice=dice_weight, + num_points=cfg.MODEL.ONE_FORMER.TRAIN_NUM_POINTS, + ) + + weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, + "loss_dice": dice_weight, "loss_contrastive": contrastive_weight} + + + if deep_supervision: + dec_layers = cfg.MODEL.ONE_FORMER.DEC_LAYERS + aux_weight_dict = {} + for i in range(dec_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + + losses = ["labels", "masks", "contrastive"] + + return { + "backbone": backbone, + "sem_seg_head": sem_seg_head, + "task_mlp": task_mlp, + "prompt_ctx": prompt_ctx, + "text_encoder": text_encoder, + "text_projector": text_projector, + "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES, + "object_mask_threshold": cfg.MODEL.TEST.OBJECT_MASK_THRESHOLD, + "overlap_threshold": cfg.MODEL.TEST.OVERLAP_THRESHOLD, + "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), + "size_divisibility": cfg.MODEL.ONE_FORMER.SIZE_DIVISIBILITY, + "sem_seg_postprocess_before_inference": ( + cfg.MODEL.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE + or cfg.MODEL.TEST.PANOPTIC_ON + or cfg.MODEL.TEST.INSTANCE_ON + ), + "pixel_mean": cfg.MODEL.PIXEL_MEAN, + "pixel_std": cfg.MODEL.PIXEL_STD, + # inference + "semantic_on": cfg.MODEL.TEST.SEMANTIC_ON, + "instance_on": cfg.MODEL.TEST.INSTANCE_ON, + "panoptic_on": cfg.MODEL.TEST.PANOPTIC_ON, + "detection_on": cfg.MODEL.TEST.DETECTION_ON, + "test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, + "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, + "max_seq_len": cfg.INPUT.MAX_SEQ_LEN, + "is_demo": cfg.MODEL.IS_DEMO, + } + + @property + def device(self): + return self.pixel_mean.device + + def encode_text(self, text): + assert text.ndim in [2, 3], text.ndim + b = text.shape[0] + squeeze_dim = False + num_text = 1 + if text.ndim == 3: + num_text = text.shape[1] + text = rearrange(text, 'b n l -> (b n) l', n=num_text) + squeeze_dim = True + + # [B, C] + x = self.text_encoder(text) + + text_x = self.text_projector(x) + + if squeeze_dim: + text_x = rearrange(text_x, '(b n) c -> b n c', n=num_text) + if self.prompt_ctx is not None: + text_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_x.shape[0], 1, 1) + text_x = torch.cat([text_x, text_ctx], dim=1) + + return {"texts": text_x} + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + * "image": Tensor, image in (C, H, W) format. + * "instances": per-region ground truth + * Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model (may be different + from input resolution), used in inference. + Returns: + list[dict]: + each dict has the results for one image. The dict contains the following keys: + * "sem_seg": + A Tensor that represents the + per-pixel segmentation prediced by the head. + The prediction has shape KxHxW that represents the logits of + each class for each pixel. + * "panoptic_seg": + A tuple that represent panoptic output + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [(x - self.pixel_mean) / self.pixel_std for x in images] + images = ImageList.from_tensors(images, self.size_divisibility) + + tasks = torch.cat([self.task_tokenizer(x["task"]).to(self.device).unsqueeze(0) for x in batched_inputs], dim=0) + tasks = self.task_mlp(tasks.float()) + + features = self.backbone(images.tensor) + outputs = self.sem_seg_head(features, tasks) + + if self.training: + texts = torch.cat([self.text_tokenizer(x["text"]).to(self.device).unsqueeze(0) for x in batched_inputs], dim=0) + texts_x = self.encode_text(texts) + + outputs = {**outputs, **texts_x} + + # mask classification target + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + targets = self.prepare_targets(gt_instances, images) + else: + targets = None + + # bipartite matching-based loss + losses = self.criterion(outputs, targets) + + for k in list(losses.keys()): + if k in self.criterion.weight_dict: + losses[k] *= self.criterion.weight_dict[k] + else: + # remove this loss if not specified in `weight_dict` + losses.pop(k) + return losses + else: + mask_cls_results = outputs["pred_logits"] + mask_pred_results = outputs["pred_masks"] + # upsample masks + mask_pred_results = F.interpolate( + mask_pred_results, + size=(images.tensor.shape[-2], images.tensor.shape[-1]), + mode="bilinear", + align_corners=False, + ) + + del outputs + + processed_results = [] + for i, data in enumerate(zip( + mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes + )): + mask_cls_result, mask_pred_result, input_per_image, image_size = data + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + processed_results.append({}) + + if self.sem_seg_postprocess_before_inference: + mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)( + mask_pred_result, image_size, height, width + ) + mask_cls_result = mask_cls_result.to(mask_pred_result) + + # semantic segmentation inference + if self.semantic_on: + r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result) + if not self.sem_seg_postprocess_before_inference: + r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width) + processed_results[-1]["sem_seg"] = r + + # panoptic segmentation inference + if self.panoptic_on: + panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result) + processed_results[-1]["panoptic_seg"] = panoptic_r + + # instance segmentation inference + if self.instance_on: + instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) + processed_results[-1]["instances"] = instance_r + + if self.detection_on: + bbox_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) + processed_results[-1]["box_instances"] = bbox_r + + return processed_results + + def prepare_targets(self, targets, images): + h_pad, w_pad = images.tensor.shape[-2:] + new_targets = [] + for targets_per_image in targets: + # pad gt + gt_masks = targets_per_image.gt_masks + padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device) + padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks + new_targets.append( + { + "labels": targets_per_image.gt_classes, + "masks": padded_masks, + } + ) + return new_targets + + def semantic_inference(self, mask_cls, mask_pred): + mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] + mask_pred = mask_pred.sigmoid() + semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) + return semseg + + def panoptic_inference(self, mask_cls, mask_pred): + scores, labels = F.softmax(mask_cls, dim=-1).max(-1) + mask_pred = mask_pred.sigmoid() + + keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) + cur_scores = scores[keep] + cur_classes = labels[keep] + cur_masks = mask_pred[keep] + cur_mask_cls = mask_cls[keep] + cur_mask_cls = cur_mask_cls[:, :-1] + + cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks + + h, w = cur_masks.shape[-2:] + panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device) + segments_info = [] + + current_segment_id = 0 + + if cur_masks.shape[0] == 0: + # We didn't detect any mask :( + return panoptic_seg, segments_info + else: + # take argmax + cur_mask_ids = cur_prob_masks.argmax(0) + stuff_memory_list = {} + for k in range(cur_classes.shape[0]): + pred_class = cur_classes[k].item() + isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values() + mask_area = (cur_mask_ids == k).sum().item() + original_area = (cur_masks[k] >= 0.5).sum().item() + mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) + + if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: + if mask_area / original_area < self.overlap_threshold: + continue + + # merge stuff regions + if not isthing: + if int(pred_class) in stuff_memory_list.keys(): + panoptic_seg[mask] = stuff_memory_list[int(pred_class)] + continue + else: + stuff_memory_list[int(pred_class)] = current_segment_id + 1 + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + + segments_info.append( + { + "id": current_segment_id, + "isthing": bool(isthing), + "category_id": int(pred_class), + } + ) + + return panoptic_seg, segments_info + + def instance_inference(self, mask_cls, mask_pred): + # mask_pred is already processed to have the same shape as original input + image_size = mask_pred.shape[-2:] + + # [Q, K] + scores = F.softmax(mask_cls, dim=-1)[:, :-1] + labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) + + # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) + scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False) + labels_per_image = labels[topk_indices] + + topk_indices = topk_indices // self.sem_seg_head.num_classes + # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) + mask_pred = mask_pred[topk_indices] + + # Only consider scores with confidence over [self.object_mask_threshold] for demo + if self.is_demo: + keep = scores_per_image > self.object_mask_threshold + scores_per_image = scores_per_image[keep] + labels_per_image = labels_per_image[keep] + mask_pred = mask_pred[keep] + + # if this is panoptic segmentation, we only keep the "thing" classes + if self.panoptic_on: + keep = torch.zeros_like(scores_per_image).bool() + for i, lab in enumerate(labels_per_image): + keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values() + + scores_per_image = scores_per_image[keep] + labels_per_image = labels_per_image[keep] + mask_pred = mask_pred[keep] + + if 'ade20k' in self.metadata.name: + for i in range(labels_per_image.shape[0]): + labels_per_image[i] = self.thing_indices.index(labels_per_image[i].item()) + + result = Instances(image_size) + # mask (before sigmoid) + result.pred_masks = (mask_pred > 0).float() + if self.detection_on: + # Uncomment the following to get boxes from masks (this is slow) + result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes() + else: + result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4)) + + # calculate average mask prob + mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6) + result.scores = scores_per_image * mask_scores_per_image + result.pred_classes = labels_per_image + return result \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..130d3011b032f91df1a9cf965625e54922f6c81b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .events import setup_wandb, WandbWriter \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/box_ops.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/box_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a2b62ad99ed1fc35cdb10a9e11acdeb0ff1abcc4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/box_ops.py @@ -0,0 +1,133 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Utilities for bounding box manipulation and GIoU. +""" +import torch, os +from torchvision.ops.boxes import box_area + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), + (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, + (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +# modified from torchvision to also return the union +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + # import ipdb; ipdb.set_trace() + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / (union + 1e-6) + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + The boxes should be in [x0, y0, x1, y1] format + Returns a [N, M] pairwise matrix, where N = len(boxes1) + and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + # except: + # import ipdb; ipdb.set_trace() + iou, union = box_iou(boxes1, boxes2) + + lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,M,2] + area = wh[:, :, 0] * wh[:, :, 1] + + return iou - (area - union) / (area + 1e-6) + + + +# modified from torchvision to also return the union +def box_iou_pairwise(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2] + rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2] + + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + + union = area1 + area2 - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou_pairwise(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + Input: + - boxes1, boxes2: N,4 + Output: + - giou: N, 4 + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + assert boxes1.shape == boxes2.shape + iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4 + + lt = torch.min(boxes1[:, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,2] + area = wh[:, 0] * wh[:, 1] + + return iou - (area - union) / area + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float) + x = torch.arange(0, w, dtype=torch.float) + y, x = torch.meshgrid(y, x) + + x_mask = (masks * x.unsqueeze(0)) + x_max = x_mask.flatten(1).max(-1)[0] + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = (masks * y.unsqueeze(0)) + y_max = y_mask.flatten(1).max(-1)[0] + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + return torch.stack([x_min, y_min, x_max, y_max], 1) + +if __name__ == '__main__': + x = torch.rand(5, 4) + y = torch.rand(3, 4) + iou, union = box_iou(x, y) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/events.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/events.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6519ca86572218eedf465b6fe6bf19472a56e8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/events.py @@ -0,0 +1,120 @@ +import os +import wandb +from custom_detectron2.utils import comm +from custom_detectron2.utils.events import EventWriter, get_event_storage + + +def setup_wandb(cfg, args): + if comm.is_main_process(): + init_args = { + k.lower(): v + for k, v in cfg.WANDB.items() + if isinstance(k, str) and k not in ["config"] + } + # only include most related part to avoid too big table + # TODO: add configurable params to select which part of `cfg` should be saved in config + if "config_exclude_keys" in init_args: + init_args["config"] = cfg + init_args["config"]["cfg_file"] = args.config_file + else: + init_args["config"] = { + "model": cfg.MODEL, + "solver": cfg.SOLVER, + "cfg_file": args.config_file, + } + if ("name" not in init_args) or (init_args["name"] is None): + init_args["name"] = os.path.basename(args.config_file) + else: + init_args["name"] = init_args["name"] + '_' + os.path.basename(args.config_file) + wandb.init(**init_args) + + +class BaseRule(object): + def __call__(self, target): + return target + + +class IsIn(BaseRule): + def __init__(self, keyword: str): + self.keyword = keyword + + def __call__(self, target): + return self.keyword in target + + +class Prefix(BaseRule): + def __init__(self, keyword: str): + self.keyword = keyword + + def __call__(self, target): + return "/".join([self.keyword, target]) + + +class WandbWriter(EventWriter): + """ + Write all scalars to a tensorboard file. + """ + + def __init__(self): + """ + Args: + log_dir (str): the directory to save the output events + kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` + """ + self._last_write = -1 + self._group_rules = [ + (IsIn("/"), BaseRule()), + (IsIn("loss"), Prefix("train")), + ] + + def write(self): + + storage = get_event_storage() + + def _group_name(scalar_name): + for (rule, op) in self._group_rules: + if rule(scalar_name): + return op(scalar_name) + return scalar_name + + stats = { + _group_name(name): scalars[0] + for name, scalars in storage.latest().items() + if scalars[1] > self._last_write + } + if len(stats) > 0: + self._last_write = max([v[1] for k, v in storage.latest().items()]) + + # storage.put_{image,histogram} is only meant to be used by + # tensorboard writer. So we access its internal fields directly from here. + if len(storage._vis_data) >= 1: + stats["image"] = [ + wandb.Image(img, caption=img_name) + for img_name, img, step_num in storage._vis_data + ] + # Storage stores all image data and rely on this writer to clear them. + # As a result it assumes only one writer will use its image data. + # An alternative design is to let storage store limited recent + # data (e.g. only the most recent image) that all writers can access. + # In that case a writer may not see all image data if its period is long. + storage.clear_images() + + if len(storage._histograms) >= 1: + + def create_bar(tag, bucket_limits, bucket_counts, **kwargs): + data = [ + [label, val] for (label, val) in zip(bucket_limits, bucket_counts) + ] + table = wandb.Table(data=data, columns=["label", "value"]) + return wandb.plot.bar(table, "label", "value", title=tag) + + stats["hist"] = [create_bar(**params) for params in storage._histograms] + + storage.clear_histograms() + + if len(stats) == 0: + return + wandb.log(stats, step=storage.iter) + + def close(self): + wandb.finish() \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/misc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..f2bca7733278c3a4b1f145bd7e5da23683b74961 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/misc.py @@ -0,0 +1,197 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +from typing import List, Optional + +import torch +import torch.distributed as dist +import torchvision +from torch import Tensor +import warnings +import torch.nn.functional as F +import math + +def inverse_sigmoid(x, eps=1e-3): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1/x2) + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + +def resize(input, + size=None, + scale_factor=None, + mode='nearest', + align_corners=None, + warning=True): + if warning: + if size is not None and align_corners: + input_h, input_w = tuple(int(x) for x in input.shape[2:]) + output_h, output_w = tuple(int(x) for x in size) + if output_h > input_h or output_w > output_h: + if ((output_h > 1 and output_w > 1 and input_h > 1 + and input_w > 1) and (output_h - 1) % (input_h - 1) + and (output_w - 1) % (input_w - 1)): + warnings.warn( + f'When align_corners={align_corners}, ' + 'the output would more aligned if ' + f'input size {(input_h, input_w)} is `x+1` and ' + f'out size {(output_h, output_w)} is `nx+1`') + if isinstance(size, torch.Size): + size = tuple(int(x) for x in size) + return F.interpolate(input, size, scale_factor, mode, align_corners) + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + if torchvision._is_tracing(): + # nested_tensor_from_tensor_list() does not export well to ONNX + # call _onnx_nested_tensor_from_tensor_list() instead + return _onnx_nested_tensor_from_tensor_list(tensor_list) + + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("not supported") + return NestedTensor(tensor, mask) + + +# _onnx_nested_tensor_from_tensor_list() is an implementation of +# nested_tensor_from_tensor_list() that is supported by ONNX tracing. +@torch.jit.unused +def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: + max_size = [] + for i in range(tensor_list[0].dim()): + max_size_i = torch.max( + torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32) + ).to(torch.int64) + max_size.append(max_size_i) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # m[: img.shape[1], :img.shape[2]] = False + # which is not yet supported in onnx + padded_imgs = [] + padded_masks = [] + for img in tensor_list: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) + padded_imgs.append(padded_img) + + m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) + padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) + padded_masks.append(padded_mask.to(torch.bool)) + + tensor = torch.stack(padded_imgs) + mask = torch.stack(padded_masks) + + return NestedTensor(tensor, mask=mask) + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/pos_embed.py b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/pos_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..aa11d60db65fa98c140e7d75bdf985ff7ece8f18 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_oneformer/utils/pos_embed.py @@ -0,0 +1,122 @@ +# -------------------------------------------------------- +# Position embedding utils +# -------------------------------------------------------- + +from typing import Tuple + +import numpy as np +import torch + + +# -------------------------------------------------------- +# 2D sine-cosine position embedding +# References: +# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000 ** omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model, pos_embed_key): + if pos_embed_key in checkpoint_model: + pos_embed_checkpoint = checkpoint_model[pos_embed_key] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.num_patches + if pos_embed_key.startswith("decoder"): + num_extra_tokens = model.decoder_pos_embed.shape[-2] - num_patches + else: + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print( + "Position interpolate from %dx%d to %dx%d" + % (orig_size, orig_size, new_size, new_size) + ) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape( + -1, orig_size, orig_size, embedding_size + ).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, + size=(new_size, new_size), + mode="bicubic", + align_corners=False, + ) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model[pos_embed_key] = new_pos_embed + + +def interpolate_pos_embed_online( + pos_embed, orig_size: Tuple[int], new_size: Tuple[int], num_extra_tokens: int +): + extra_tokens = pos_embed[:, :num_extra_tokens] + pos_tokens = pos_embed[:, num_extra_tokens:] + embedding_size = pos_tokens.shape[-1] + pos_tokens = pos_tokens.reshape( + -1, orig_size[0], orig_size[1], embedding_size + ).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=new_size, mode="bicubic", align_corners=False, + ) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + return new_pos_embed diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7d85bba884ea8f83fc6ab2a1e6ade80d98d4d9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/__init__.py @@ -0,0 +1 @@ +__author__ = 'tylin' diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/coco.py b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/coco.py new file mode 100644 index 0000000000000000000000000000000000000000..7485e23ab38fdd3cca9d4b86bb6c5a9bb17e8c92 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/coco.py @@ -0,0 +1,444 @@ +__author__ = 'tylin' +__version__ = '2.0' +# Interface for accessing the Microsoft COCO dataset. + +# Microsoft COCO is a large image dataset designed for object detection, +# segmentation, and caption generation. custom_pycocotools is a Python API that +# assists in loading, parsing and visualizing the annotations in COCO. +# Please visit http://mscoco.org/ for more information on COCO, including +# for the data, paper, and tutorials. The exact format of the annotations +# is also described on the COCO website. For example usage of the custom_pycocotools +# please see custom_pycocotools_demo.ipynb. In addition to this API, please download both +# the COCO images and annotations in order to run the demo. + +# An alternative to using the API is to load the annotations directly +# into Python dictionary +# Using the API provides additional utility functions. Note that this API +# supports both *instance* and *caption* annotations. In the case of +# captions not all functions are defined (e.g. categories are undefined). + +# The following API functions are defined: +# COCO - COCO api class that loads COCO annotation file and prepare data structures. +# decodeMask - Decode binary mask M encoded via run-length encoding. +# encodeMask - Encode binary mask M using run-length encoding. +# getAnnIds - Get ann ids that satisfy given filter conditions. +# getCatIds - Get cat ids that satisfy given filter conditions. +# getImgIds - Get img ids that satisfy given filter conditions. +# loadAnns - Load anns with the specified ids. +# loadCats - Load cats with the specified ids. +# loadImgs - Load imgs with the specified ids. +# annToMask - Convert segmentation in an annotation to binary mask. +# showAnns - Display the specified annotations. +# loadRes - Load algorithm results and create API for accessing them. +# download - Download COCO images from mscoco.org server. +# Throughout the API "ann"=annotation, "cat"=category, and "img"=image. +# Help on each functions can be accessed by: "help COCO>function". + +# See also COCO>decodeMask, +# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, +# COCO>getImgIds, COCO>loadAnns, COCO>loadCats, +# COCO>loadImgs, COCO>annToMask, COCO>showAnns + +# Microsoft COCO Toolbox. version 2.0 +# Data, paper, and tutorials available at: http://mscoco.org/ +# Code written by Piotr Dollar and Tsung-Yi Lin, 2014. +# Licensed under the Simplified BSD License [see bsd.txt] + +import json +import time +import numpy as np +import copy +import itertools +from . import mask as maskUtils +import os +from collections import defaultdict +import sys +PYTHON_VERSION = sys.version_info[0] +if PYTHON_VERSION == 2: + from urllib import urlretrieve +elif PYTHON_VERSION == 3: + from urllib.request import urlretrieve + + +def _isArrayLike(obj): + return hasattr(obj, '__iter__') and hasattr(obj, '__len__') + + +class COCO: + def __init__(self, annotation_file=None): + """ + Constructor of Microsoft COCO helper class for reading and visualizing annotations. + :param annotation_file (str): location of annotation file + :param image_folder (str): location to the folder that hosts images. + :return: + """ + # load dataset + self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() + self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) + if not annotation_file == None: + print('loading annotations into memory...') + tic = time.time() + with open(annotation_file, 'r') as f: + dataset = json.load(f) + assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset)) + print('Done (t={:0.2f}s)'.format(time.time()- tic)) + self.dataset = dataset + self.createIndex() + + def createIndex(self): + # create index + print('creating index...') + anns, cats, imgs = {}, {}, {} + imgToAnns,catToImgs = defaultdict(list),defaultdict(list) + if 'annotations' in self.dataset: + for ann in self.dataset['annotations']: + imgToAnns[ann['image_id']].append(ann) + anns[ann['id']] = ann + + if 'images' in self.dataset: + for img in self.dataset['images']: + imgs[img['id']] = img + + if 'categories' in self.dataset: + for cat in self.dataset['categories']: + cats[cat['id']] = cat + + if 'annotations' in self.dataset and 'categories' in self.dataset: + for ann in self.dataset['annotations']: + catToImgs[ann['category_id']].append(ann['image_id']) + + print('index created!') + + # create class members + self.anns = anns + self.imgToAnns = imgToAnns + self.catToImgs = catToImgs + self.imgs = imgs + self.cats = cats + + def info(self): + """ + Print information about the annotation file. + :return: + """ + for key, value in self.dataset['info'].items(): + print('{}: {}'.format(key, value)) + + def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): + """ + Get ann ids that satisfy given filter conditions. default skips that filter + :param imgIds (int array) : get anns for given imgs + catIds (int array) : get anns for given cats + areaRng (float array) : get anns for given area range (e.g. [0 inf]) + iscrowd (boolean) : get anns for given crowd label (False or True) + :return: ids (int array) : integer array of ann ids + """ + imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] + catIds = catIds if _isArrayLike(catIds) else [catIds] + + if len(imgIds) == len(catIds) == len(areaRng) == 0: + anns = self.dataset['annotations'] + else: + if not len(imgIds) == 0: + lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] + anns = list(itertools.chain.from_iterable(lists)) + else: + anns = self.dataset['annotations'] + anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] + anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] + if not iscrowd == None: + ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] + else: + ids = [ann['id'] for ann in anns] + return ids + + def getCatIds(self, catNms=[], supNms=[], catIds=[]): + """ + filtering parameters. default skips that filter. + :param catNms (str array) : get cats for given cat names + :param supNms (str array) : get cats for given supercategory names + :param catIds (int array) : get cats for given cat ids + :return: ids (int array) : integer array of cat ids + """ + catNms = catNms if _isArrayLike(catNms) else [catNms] + supNms = supNms if _isArrayLike(supNms) else [supNms] + catIds = catIds if _isArrayLike(catIds) else [catIds] + + if len(catNms) == len(supNms) == len(catIds) == 0: + cats = self.dataset['categories'] + else: + cats = self.dataset['categories'] + cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] + cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] + cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] + ids = [cat['id'] for cat in cats] + return ids + + def getImgIds(self, imgIds=[], catIds=[]): + ''' + Get img ids that satisfy given filter conditions. + :param imgIds (int array) : get imgs for given ids + :param catIds (int array) : get imgs with all given cats + :return: ids (int array) : integer array of img ids + ''' + imgIds = imgIds if _isArrayLike(imgIds) else [imgIds] + catIds = catIds if _isArrayLike(catIds) else [catIds] + + if len(imgIds) == len(catIds) == 0: + ids = self.imgs.keys() + else: + ids = set(imgIds) + for i, catId in enumerate(catIds): + if i == 0 and len(ids) == 0: + ids = set(self.catToImgs[catId]) + else: + ids &= set(self.catToImgs[catId]) + return list(ids) + + def loadAnns(self, ids=[]): + """ + Load anns with the specified ids. + :param ids (int array) : integer ids specifying anns + :return: anns (object array) : loaded ann objects + """ + if _isArrayLike(ids): + return [self.anns[id] for id in ids] + elif type(ids) == int: + return [self.anns[ids]] + + def loadCats(self, ids=[]): + """ + Load cats with the specified ids. + :param ids (int array) : integer ids specifying cats + :return: cats (object array) : loaded cat objects + """ + if _isArrayLike(ids): + return [self.cats[id] for id in ids] + elif type(ids) == int: + return [self.cats[ids]] + + def loadImgs(self, ids=[]): + """ + Load anns with the specified ids. + :param ids (int array) : integer ids specifying img + :return: imgs (object array) : loaded img objects + """ + if _isArrayLike(ids): + return [self.imgs[id] for id in ids] + elif type(ids) == int: + return [self.imgs[ids]] + + def showAnns(self, anns, draw_bbox=False): + """ + Display the specified annotations. + :param anns (array of object): annotations to display + :return: None + """ + if len(anns) == 0: + return 0 + if 'segmentation' in anns[0] or 'keypoints' in anns[0]: + datasetType = 'instances' + elif 'caption' in anns[0]: + datasetType = 'captions' + else: + raise Exception('datasetType not supported') + if datasetType == 'instances': + import matplotlib.pyplot as plt + from matplotlib.collections import PatchCollection + from matplotlib.patches import Polygon + + ax = plt.gca() + ax.set_autoscale_on(False) + polygons = [] + color = [] + for ann in anns: + c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] + if 'segmentation' in ann: + if type(ann['segmentation']) == list: + # polygon + for seg in ann['segmentation']: + poly = np.array(seg).reshape((int(len(seg)/2), 2)) + polygons.append(Polygon(poly)) + color.append(c) + else: + # mask + t = self.imgs[ann['image_id']] + if type(ann['segmentation']['counts']) == list: + rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width']) + else: + rle = [ann['segmentation']] + m = maskUtils.decode(rle) + img = np.ones( (m.shape[0], m.shape[1], 3) ) + if ann['iscrowd'] == 1: + color_mask = np.array([2.0,166.0,101.0])/255 + if ann['iscrowd'] == 0: + color_mask = np.random.random((1, 3)).tolist()[0] + for i in range(3): + img[:,:,i] = color_mask[i] + ax.imshow(np.dstack( (img, m*0.5) )) + if 'keypoints' in ann and type(ann['keypoints']) == list: + # turn skeleton into zero-based index + sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 + kp = np.array(ann['keypoints']) + x = kp[0::3] + y = kp[1::3] + v = kp[2::3] + for sk in sks: + if np.all(v[sk]>0): + plt.plot(x[sk],y[sk], linewidth=3, color=c) + plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) + plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) + + if draw_bbox: + [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox'] + poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]] + np_poly = np.array(poly).reshape((4,2)) + polygons.append(Polygon(np_poly)) + color.append(c) + + p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) + ax.add_collection(p) + p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2) + ax.add_collection(p) + elif datasetType == 'captions': + for ann in anns: + print(ann['caption']) + + def loadRes(self, resFile): + """ + Load result file and return a result api object. + :param resFile (str) : file name of result file + :return: res (obj) : result api object + """ + res = COCO() + res.dataset['images'] = [img for img in self.dataset['images']] + + print('Loading and preparing results...') + tic = time.time() + if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode): + with open(resFile) as f: + anns = json.load(f) + elif type(resFile) == np.ndarray: + anns = self.loadNumpyAnnotations(resFile) + else: + anns = resFile + assert type(anns) == list, 'results in not an array of objects' + annsImgIds = [ann['image_id'] for ann in anns] + assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ + 'Results do not correspond to current coco set' + if 'caption' in anns[0]: + imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) + res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] + for id, ann in enumerate(anns): + ann['id'] = id+1 + elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + for id, ann in enumerate(anns): + bb = ann['bbox'] + x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] + if not 'segmentation' in ann: + ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] + ann['area'] = bb[2]*bb[3] + ann['id'] = id+1 + ann['iscrowd'] = 0 + elif 'segmentation' in anns[0]: + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + for id, ann in enumerate(anns): + # now only support compressed RLE format as segmentation results + ann['area'] = maskUtils.area(ann['segmentation']) + if not 'bbox' in ann: + ann['bbox'] = maskUtils.toBbox(ann['segmentation']) + ann['id'] = id+1 + ann['iscrowd'] = 0 + elif 'keypoints' in anns[0]: + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + for id, ann in enumerate(anns): + s = ann['keypoints'] + x = s[0::3] + y = s[1::3] + x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) + ann['area'] = (x1-x0)*(y1-y0) + ann['id'] = id + 1 + ann['bbox'] = [x0,y0,x1-x0,y1-y0] + print('DONE (t={:0.2f}s)'.format(time.time()- tic)) + + res.dataset['annotations'] = anns + res.createIndex() + return res + + def download(self, tarDir = None, imgIds = [] ): + ''' + Download COCO images from mscoco.org server. + :param tarDir (str): COCO results directory name + imgIds (list): images to be downloaded + :return: + ''' + if tarDir is None: + print('Please specify target directory') + return -1 + if len(imgIds) == 0: + imgs = self.imgs.values() + else: + imgs = self.loadImgs(imgIds) + N = len(imgs) + if not os.path.exists(tarDir): + os.makedirs(tarDir) + for i, img in enumerate(imgs): + tic = time.time() + fname = os.path.join(tarDir, img['file_name']) + if not os.path.exists(fname): + urlretrieve(img['coco_url'], fname) + print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic)) + + def loadNumpyAnnotations(self, data): + """ + Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} + :param data (numpy.ndarray) + :return: annotations (python nested list) + """ + print('Converting ndarray to lists...') + assert(type(data) == np.ndarray) + print(data.shape) + assert(data.shape[1] == 7) + N = data.shape[0] + ann = [] + for i in range(N): + if i % 1000000 == 0: + print('{}/{}'.format(i,N)) + ann += [{ + 'image_id' : int(data[i, 0]), + 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], + 'score' : data[i, 5], + 'category_id': int(data[i, 6]), + }] + return ann + + def annToRLE(self, ann): + """ + Convert annotation which can be polygons, uncompressed RLE to RLE. + :return: binary mask (numpy 2D array) + """ + t = self.imgs[ann['image_id']] + h, w = t['height'], t['width'] + segm = ann['segmentation'] + if type(segm) == list: + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(segm, h, w) + rle = maskUtils.merge(rles) + elif type(segm['counts']) == list: + # uncompressed RLE + rle = maskUtils.frPyObjects(segm, h, w) + else: + # rle + rle = ann['segmentation'] + return rle + + def annToMask(self, ann): + """ + Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask. + :return: binary mask (numpy 2D array) + """ + rle = self.annToRLE(ann) + m = maskUtils.decode(rle) + return m diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/cocoeval.py b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/cocoeval.py new file mode 100644 index 0000000000000000000000000000000000000000..89c251e1652a0cfc7e8ff1bbb1024a801ed2ebe7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/cocoeval.py @@ -0,0 +1,534 @@ +__author__ = 'tsungyi' + +import numpy as np +import datetime +import time +from collections import defaultdict +from . import mask as maskUtils +import copy + +class COCOeval: + # Interface for evaluating detection on the Microsoft COCO dataset. + # + # The usage for CocoEval is as follows: + # cocoGt=..., cocoDt=... # load dataset and results + # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object + # E.params.recThrs = ...; # set parameters as desired + # E.evaluate(); # run per image evaluation + # E.accumulate(); # accumulate per image results + # E.summarize(); # display summary metrics of results + # For example usage see evalDemo.m and http://mscoco.org/. + # + # The evaluation parameters are as follows (defaults in brackets): + # imgIds - [all] N img ids to use for evaluation + # catIds - [all] K cat ids to use for evaluation + # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation + # recThrs - [0:.01:1] R=101 recall thresholds for evaluation + # areaRng - [...] A=4 object area ranges for evaluation + # maxDets - [1 10 100] M=3 thresholds on max detections per image + # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints' + # iouType replaced the now DEPRECATED useSegm parameter. + # useCats - [1] if true use category labels for evaluation + # Note: if useCats=0 category labels are ignored as in proposal scoring. + # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. + # + # evaluate(): evaluates detections on every image and every category and + # concats the results into the "evalImgs" with fields: + # dtIds - [1xD] id for each of the D detections (dt) + # gtIds - [1xG] id for each of the G ground truths (gt) + # dtMatches - [TxD] matching gt id at each IoU or 0 + # gtMatches - [TxG] matching dt id at each IoU or 0 + # dtScores - [1xD] confidence of each dt + # gtIgnore - [1xG] ignore flag for each gt + # dtIgnore - [TxD] ignore flag for each dt at each IoU + # + # accumulate(): accumulates the per-image, per-category evaluation + # results in "evalImgs" into the dictionary "eval" with fields: + # params - parameters used for evaluation + # date - date evaluation was performed + # counts - [T,R,K,A,M] parameter dimensions (see above) + # precision - [TxRxKxAxM] precision for every evaluation setting + # recall - [TxKxAxM] max recall for every evaluation setting + # Note: precision and recall==-1 for settings with no gt objects. + # + # See also coco, mask, pycocoDemo, pycocoEvalDemo + # + # Microsoft COCO Toolbox. version 2.0 + # Data, paper, and tutorials available at: http://mscoco.org/ + # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. + # Licensed under the Simplified BSD License [see coco/license.txt] + def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'): + ''' + Initialize CocoEval using coco APIs for gt and dt + :param cocoGt: coco object with ground truth annotations + :param cocoDt: coco object with detection results + :return: None + ''' + if not iouType: + print('iouType not specified. use default iouType segm') + self.cocoGt = cocoGt # ground truth COCO API + self.cocoDt = cocoDt # detections COCO API + self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements + self.eval = {} # accumulated evaluation results + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self.params = Params(iouType=iouType) # parameters + self._paramsEval = {} # parameters for evaluation + self.stats = [] # result summarization + self.ious = {} # ious between all gts and dts + if not cocoGt is None: + self.params.imgIds = sorted(cocoGt.getImgIds()) + self.params.catIds = sorted(cocoGt.getCatIds()) + + + def _prepare(self): + ''' + Prepare ._gts and ._dts for evaluation based on params + :return: None + ''' + def _toMask(anns, coco): + # modify ann['segmentation'] by reference + for ann in anns: + rle = coco.annToRLE(ann) + ann['segmentation'] = rle + p = self.params + if p.useCats: + gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + else: + gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) + dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) + + # convert ground truth to mask if iouType == 'segm' + if p.iouType == 'segm': + _toMask(gts, self.cocoGt) + _toMask(dts, self.cocoDt) + # set ignore flag + for gt in gts: + gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0 + gt['ignore'] = 'iscrowd' in gt and gt['iscrowd'] + if p.iouType == 'keypoints': + gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore'] + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + for gt in gts: + self._gts[gt['image_id'], gt['category_id']].append(gt) + for dt in dts: + self._dts[dt['image_id'], dt['category_id']].append(dt) + self.evalImgs = defaultdict(list) # per-image per-category evaluation results + self.eval = {} # accumulated evaluation results + + def evaluate(self): + ''' + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + ''' + tic = time.time() + print('Running per image evaluation...') + p = self.params + # add backward compatibility if useSegm is specified in params + if not p.useSegm is None: + p.iouType = 'segm' if p.useSegm == 1 else 'bbox' + print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) + print('Evaluate annotation type *{}*'.format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params=p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType == 'segm' or p.iouType == 'bbox': + computeIoU = self.computeIoU + elif p.iouType == 'keypoints': + computeIoU = self.computeOks + self.ious = {(imgId, catId): computeIoU(imgId, catId) \ + for imgId in p.imgIds + for catId in catIds} + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + print('DONE (t={:0.2f}s).'.format(toc-tic)) + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId,catId] + dt = self._dts[imgId,catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]] + if len(gt) == 0 and len(dt) ==0: + return [] + inds = np.argsort([-d['score'] for d in dt], kind='mergesort') + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt=dt[0:p.maxDets[-1]] + + if p.iouType == 'segm': + g = [g['segmentation'] for g in gt] + d = [d['segmentation'] for d in dt] + elif p.iouType == 'bbox': + g = [g['bbox'] for g in gt] + d = [d['bbox'] for d in dt] + else: + raise Exception('unknown iouType for iou computation') + + # compute iou between each dt and gt region + iscrowd = [int(o['iscrowd']) for o in gt] + ious = maskUtils.iou(d,g,iscrowd) + return ious + + def computeOks(self, imgId, catId): + p = self.params + # dimention here should be Nxm + gts = self._gts[imgId, catId] + dts = self._dts[imgId, catId] + inds = np.argsort([-d['score'] for d in dts], kind='mergesort') + dts = [dts[i] for i in inds] + if len(dts) > p.maxDets[-1]: + dts = dts[0:p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(gts) == 0 or len(dts) == 0: + return [] + ious = np.zeros((len(dts), len(gts))) + sigmas = p.kpt_oks_sigmas + vars = (sigmas * 2)**2 + k = len(sigmas) + # compute oks between each detection and ground truth object + for j, gt in enumerate(gts): + # create bounds for ignore regions(double the gt bbox) + g = np.array(gt['keypoints']) + xg = g[0::3]; yg = g[1::3]; vg = g[2::3] + k1 = np.count_nonzero(vg > 0) + bb = gt['bbox'] + x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2 + y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2 + for i, dt in enumerate(dts): + d = np.array(dt['keypoints']) + xd = d[0::3]; yd = d[1::3] + if k1>0: + # measure the per-keypoint distance if keypoints visible + dx = xd - xg + dy = yd - yg + else: + # measure minimum distance to keypoints in (x0,y0) & (x1,y1) + z = np.zeros((k)) + dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0) + dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0) + e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2 + if k1 > 0: + e=e[vg > 0] + ious[i, j] = np.sum(np.exp(-e)) / e.shape[0] + return ious + + def evaluateImg(self, imgId, catId, aRng, maxDet): + ''' + perform evaluation for single category and image + :return: dict (single image results) + ''' + p = self.params + if p.useCats: + gt = self._gts[imgId,catId] + dt = self._dts[imgId,catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]] + if len(gt) == 0 and len(dt) ==0: + return None + + for g in gt: + if g['ignore'] or (g['area']aRng[1]): + g['_ignore'] = 1 + else: + g['_ignore'] = 0 + + # sort dt highest score first, sort gt ignore last + gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort') + gt = [gt[i] for i in gtind] + dtind = np.argsort([-d['score'] for d in dt], kind='mergesort') + dt = [dt[i] for i in dtind[0:maxDet]] + iscrowd = [int(o['iscrowd']) for o in gt] + # load computed ious + ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId] + + T = len(p.iouThrs) + G = len(gt) + D = len(dt) + gtm = np.zeros((T,G)) + dtm = np.zeros((T,D)) + gtIg = np.array([g['_ignore'] for g in gt]) + dtIg = np.zeros((T,D)) + if not len(ious)==0: + for tind, t in enumerate(p.iouThrs): + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + iou = min([t,1-1e-10]) + m = -1 + for gind, g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind,gind]>0 and not iscrowd[gind]: + continue + # if dt matched to reg gt, and on ignore gt, stop + if m>-1 and gtIg[m]==0 and gtIg[gind]==1: + break + # continue to next gt unless better match made + if ious[dind,gind] < iou: + continue + # if match successful and best so far, store appropriately + iou=ious[dind,gind] + m=gind + # if match made store id of match for both dt and gt + if m ==-1: + continue + dtIg[tind,dind] = gtIg[m] + dtm[tind,dind] = gt[m]['id'] + gtm[tind,m] = d['id'] + # set unmatched detections outside of area range to ignore + a = np.array([d['area']aRng[1] for d in dt]).reshape((1, len(dt))) + dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0))) + # store results for given image and category + return { + 'image_id': imgId, + 'category_id': catId, + 'aRng': aRng, + 'maxDet': maxDet, + 'dtIds': [d['id'] for d in dt], + 'gtIds': [g['id'] for g in gt], + 'dtMatches': dtm, + 'gtMatches': gtm, + 'dtScores': [d['score'] for d in dt], + 'gtIgnore': gtIg, + 'dtIgnore': dtIg, + } + + def accumulate(self, p = None): + ''' + Accumulate per image evaluation results and store the result in self.eval + :param p: input params for evaluation + :return: None + ''' + print('Accumulating evaluation results...') + tic = time.time() + if not self.evalImgs: + print('Please run evaluate() first') + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories + recall = -np.ones((T,K,A,M)) + scores = -np.ones((T,R,K,A,M)) + + # create dictionary for future indexing + _pe = self._paramsEval + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0*A0*I0 + for a, a0 in enumerate(a_list): + Na = a0*I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if not e is None] + if len(E) == 0: + continue + dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind='mergesort') + dtScoresSorted = dtScores[inds] + + dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds] + dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds] + gtIg = np.concatenate([e['gtIgnore'] for e in E]) + npig = np.count_nonzero(gtIg==0 ) + if npig == 0: + continue + tps = np.logical_and( dtm, np.logical_not(dtIg) ) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) ) + + tp_sum = np.cumsum(tps, axis=1).astype(dtype=float) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=float) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp+tp+np.spacing(1)) + q = np.zeros((R,)) + ss = np.zeros((R,)) + + if nd: + recall[t,k,a,m] = rc[-1] + else: + recall[t,k,a,m] = 0 + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist(); q = q.tolist() + + for i in range(nd-1, 0, -1): + if pr[i] > pr[i-1]: + pr[i-1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side='left') + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + ss[ri] = dtScoresSorted[pi] + except: + pass + precision[t,:,k,a,m] = np.array(q) + scores[t,:,k,a,m] = np.array(ss) + self.eval = { + 'params': p, + 'counts': [T, R, K, A, M], + 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), + 'precision': precision, + 'recall': recall, + 'scores': scores, + } + toc = time.time() + print('DONE (t={:0.2f}s).'.format( toc-tic)) + + def summarize(self): + ''' + Compute and display summary metrics for evaluation results. + Note this functin can *only* be applied on the default parameter setting + ''' + def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ): + p = self.params + iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}' + titleStr = 'Average Precision' if ap == 1 else 'Average Recall' + typeStr = '(AP)' if ap==1 else '(AR)' + iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \ + if iouThr is None else '{:0.2f}'.format(iouThr) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval['precision'] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:,:,:,aind,mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval['recall'] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:,:,aind,mind] + if len(s[s>-1])==0: + mean_s = -1 + else: + mean_s = np.mean(s[s>-1]) + print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) + return mean_s + def _summarizeDets(): + stats = np.zeros((12,)) + stats[0] = _summarize(1) + stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2]) + return stats + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=.5) + stats[2] = _summarize(1, maxDets=20, iouThr=.75) + stats[3] = _summarize(1, maxDets=20, areaRng='medium') + stats[4] = _summarize(1, maxDets=20, areaRng='large') + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=.5) + stats[7] = _summarize(0, maxDets=20, iouThr=.75) + stats[8] = _summarize(0, maxDets=20, areaRng='medium') + stats[9] = _summarize(0, maxDets=20, areaRng='large') + return stats + if not self.eval: + raise Exception('Please run accumulate() first') + iouType = self.params.iouType + if iouType == 'segm' or iouType == 'bbox': + summarize = _summarizeDets + elif iouType == 'keypoints': + summarize = _summarizeKps + self.stats = summarize() + + def __str__(self): + self.summarize() + +class Params: + ''' + Params for coco evaluation api + ''' + def setDetParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True) + self.maxDets = [1, 10, 100] + self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] + self.areaRngLbl = ['all', 'small', 'medium', 'large'] + self.useCats = 1 + + def setKpParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] + self.areaRngLbl = ['all', 'medium', 'large'] + self.useCats = 1 + self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0 + + def __init__(self, iouType='segm'): + if iouType == 'segm' or iouType == 'bbox': + self.setDetParams() + elif iouType == 'keypoints': + self.setKpParams() + else: + raise Exception('iouType not supported') + self.iouType = iouType + # useSegm is deprecated + self.useSegm = None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/mask.py b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/mask.py new file mode 100644 index 0000000000000000000000000000000000000000..8ed8b91bbc8f23c94c495d66780ba88fac342445 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_pycocotools/mask.py @@ -0,0 +1,107 @@ +__author__ = 'tsungyi' + +# import custom_pycocotools._mask as _mask + +# Interface for manipulating masks stored in RLE format. +# +# RLE is a simple yet efficient format for storing binary masks. RLE +# first divides a vector (or vectorized image) into a series of piecewise +# constant regions and then for each piece simply stores the length of +# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would +# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1] +# (note that the odd counts are always the numbers of zeros). Instead of +# storing the counts directly, additional compression is achieved with a +# variable bitrate representation based on a common scheme called LEB128. +# +# Compression is greatest given large piecewise constant regions. +# Specifically, the size of the RLE is proportional to the number of +# *boundaries* in M (or for an image the number of boundaries in the y +# direction). Assuming fairly simple shapes, the RLE representation is +# O(sqrt(n)) where n is number of pixels in the object. Hence space usage +# is substantially lower, especially for large simple objects (large n). +# +# Many common operations on masks can be computed directly using the RLE +# (without need for decoding). This includes computations such as area, +# union, intersection, etc. All of these operations are linear in the +# size of the RLE, in other words they are O(sqrt(n)) where n is the area +# of the object. Computing these operations on the original mask is O(n). +# Thus, using the RLE can result in substantial computational savings. +# +# The following API functions are defined: +# encode - Encode binary masks using RLE. +# decode - Decode binary masks encoded via RLE. +# merge - Compute union or intersection of encoded masks. +# iou - Compute intersection over union between masks. +# area - Compute area of encoded masks. +# toBbox - Get bounding boxes surrounding encoded masks. +# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask. +# +# Usage: +# Rs = encode( masks ) +# masks = decode( Rs ) +# R = merge( Rs, intersect=false ) +# o = iou( dt, gt, iscrowd ) +# a = area( Rs ) +# bbs = toBbox( Rs ) +# Rs = frPyObjects( [pyObjects], h, w ) +# +# In the API the following formats are used: +# Rs - [dict] Run-length encoding of binary masks +# R - dict Run-length encoding of binary mask +# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order) +# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore +# bbs - [nx4] Bounding box(es) stored as [x y w h] +# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list) +# dt,gt - May be either bounding boxes or encoded masks +# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). +# +# Finally, a note about the intersection over union (iou) computation. +# The standard iou of a ground truth (gt) and detected (dt) object is +# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt)) +# For "crowd" regions, we use a modified criteria. If a gt object is +# marked as "iscrowd", we allow a dt to match any subregion of the gt. +# Choosing gt' in the crowd gt that best matches the dt can be done using +# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing +# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt) +# For crowd gt regions we use this modified criteria above for the iou. +# +# To compile run "python setup.py build_ext --inplace" +# Please do not contact us for help with compiling. +# +# Microsoft COCO Toolbox. version 2.0 +# Data, paper, and tutorials available at: http://mscoco.org/ +# Code written by Piotr Dollar and Tsung-Yi Lin, 2015. +# Licensed under the Simplified BSD License [see coco/license.txt] + +# iou = _mask.iou +# merge = _mask.merge +# frPyObjects = _mask.frPyObjects + +def encode(bimask): + pass + # if len(bimask.shape) == 3: + # return _mask.encode(bimask) + # elif len(bimask.shape) == 2: + # h, w = bimask.shape + # return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0] + +def decode(rleObjs): + pass + # if type(rleObjs) == list: + # return _mask.decode(rleObjs) + # else: + # return _mask.decode([rleObjs])[:,:,0] + +def area(rleObjs): + pass + # if type(rleObjs) == list: + # return _mask.area(rleObjs) + # else: + # return _mask.area([rleObjs])[0] + +def toBbox(rleObjs): + pass + # if type(rleObjs) == list: + # return _mask.toBbox(rleObjs) + # else: + # return _mask.toBbox([rleObjs])[0] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f797b156d939831ba0173ce29e33583b0a05a3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/__init__.py @@ -0,0 +1,4 @@ +from .version import __version__ +from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ + is_scriptable, is_exportable, set_scriptable, set_exportable, has_pretrained_cfg_key, is_pretrained_cfg_key, \ + get_pretrained_cfg_value, is_model_pretrained diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb10a660c1195250fc418884fc93482efd4f144 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/__init__.py @@ -0,0 +1,13 @@ +from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ + rand_augment_transform, auto_augment_transform +from .config import resolve_data_config +from .constants import * +from .dataset import ImageDataset, IterableImageDataset, AugMixDataset +from .dataset_factory import create_dataset +from .loader import create_loader +from .mixup import Mixup, FastCollateMixup +from .parsers import create_parser,\ + get_img_extensions, is_img_extension, set_img_extensions, add_img_extensions, del_img_extensions +from .real_labels import RealLabelsImagenet +from .transforms import * +from .transforms_factory import create_transform diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/auto_augment.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/auto_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..1b51ccb458fe8f614bcd3389cc943c7d201f63de --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/auto_augment.py @@ -0,0 +1,870 @@ +""" AutoAugment, RandAugment, and AugMix for PyTorch + +This code implements the searched ImageNet policies with various tweaks and improvements and +does not include any of the search code. + +AA and RA Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py + +AugMix adapted from: + https://github.com/google-research/augmix + +Papers: + AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2019, Ross Wightman +""" +import random +import math +import re +from PIL import Image, ImageOps, ImageEnhance, ImageChops +import PIL +import numpy as np + + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) + +_FILL = (128, 128, 128) + +_LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments + +_HPARAMS_DEFAULT = dict( + translate_const=250, + img_mean=_FILL, +) + +if hasattr(Image, "Resampling"): + _RANDOM_INTERPOLATION = (Image.Resampling.BILINEAR, Image.Resampling.BICUBIC) + _DEFAULT_INTERPOLATION = Image.Resampling.BICUBIC +else: + _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + _DEFAULT_INTERPOLATION = Image.BICUBIC + + +def _interpolation(kwargs): + interpolation = kwargs.pop('resample', _DEFAULT_INTERPOLATION) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if 'fillcolor' in kwargs and _PIL_VER < (5, 0): + kwargs.pop('fillcolor') + kwargs['resample'] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs['resample']) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _LEVEL_DENOM) * 30. + level = _randomly_negate(level) + return level, + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return (level / _LEVEL_DENOM) * 1.8 + 0.1, + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] if level <= _LEVEL_DENOM + level = (level / _LEVEL_DENOM) * .9 + level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1 + return level, + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _LEVEL_DENOM) * 0.3 + level = _randomly_negate(level) + return level, + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams['translate_const'] + level = (level / _LEVEL_DENOM) * float(translate_const) + level = _randomly_negate(level) + return level, + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get('translate_pct', 0.45) + level = (level / _LEVEL_DENOM) * translate_pct + level = _randomly_negate(level) + return level, + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4), + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return 4 - _posterize_level_to_arg(level, hparams)[0], + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4) + 4, + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 256), + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - _solarize_level_to_arg(level, _hparams)[0], + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return int((level / _LEVEL_DENOM) * 110), + + +LEVEL_TO_ARG = { + 'AutoContrast': None, + 'Equalize': None, + 'Invert': None, + 'Rotate': _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + 'Posterize': _posterize_level_to_arg, + 'PosterizeIncreasing': _posterize_increasing_level_to_arg, + 'PosterizeOriginal': _posterize_original_level_to_arg, + 'Solarize': _solarize_level_to_arg, + 'SolarizeIncreasing': _solarize_increasing_level_to_arg, + 'SolarizeAdd': _solarize_add_level_to_arg, + 'Color': _enhance_level_to_arg, + 'ColorIncreasing': _enhance_increasing_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'ContrastIncreasing': _enhance_increasing_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'BrightnessIncreasing': _enhance_increasing_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'SharpnessIncreasing': _enhance_increasing_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'TranslateX': _translate_abs_level_to_arg, + 'TranslateY': _translate_abs_level_to_arg, + 'TranslateXRel': _translate_rel_level_to_arg, + 'TranslateYRel': _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + 'AutoContrast': auto_contrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': rotate, + 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, + 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'ColorIncreasing': color, + 'Contrast': contrast, + 'ContrastIncreasing': contrast, + 'Brightness': brightness, + 'BrightnessIncreasing': brightness, + 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x_abs, + 'TranslateY': translate_y_abs, + 'TranslateXRel': translate_x_rel, + 'TranslateYRel': translate_y_rel, +} + + +class AugmentOp: + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.name = name + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = dict( + fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, + resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, + ) + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + # If magnitude_std is inf, we sample magnitude from a uniform distribution + self.magnitude_std = self.hparams.get('magnitude_std', 0) + self.magnitude_max = self.hparams.get('magnitude_max', None) + + def __call__(self, img): + if self.prob < 1.0 and random.random() > self.prob: + return img + magnitude = self.magnitude + if self.magnitude_std > 0: + # magnitude randomization enabled + if self.magnitude_std == float('inf'): + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + # default upper_bound for the timm RA impl is _LEVEL_DENOM (10) + # setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl) + upper_bound = self.magnitude_max or _LEVEL_DENOM + magnitude = max(0., min(magnitude, upper_bound)) + level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() + return self.aug_fn(img, *level_args, **self.kwargs) + + def __repr__(self): + fs = self.__class__.__name__ + f'(name={self.name}, p={self.prob}' + fs += f', m={self.magnitude}, mstd={self.magnitude_std}' + if self.magnitude_max is not None: + fs += f', mmax={self.magnitude_max}' + fs += ')' + return fs + + +def auto_augment_policy_v0(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_v0r(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_original(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 + policy = [ + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_originalr(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation + policy = [ + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy(name='v0', hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + if name == 'original': + return auto_augment_policy_original(hparams) + elif name == 'originalr': + return auto_augment_policy_originalr(hparams) + elif name == 'v0': + return auto_augment_policy_v0(hparams) + elif name == 'v0r': + return auto_augment_policy_v0r(hparams) + else: + assert False, 'Unknown AA policy (%s)' % name + + +class AutoAugment: + + def __init__(self, policy): + self.policy = policy + + def __call__(self, img): + sub_policy = random.choice(self.policy) + for op in sub_policy: + img = op(img) + return img + + def __repr__(self): + fs = self.__class__.__name__ + f'(policy=' + for p in self.policy: + fs += '\n\t[' + fs += ', '.join([str(op) for op in p]) + fs += ']' + fs += ')' + return fs + + +def auto_augment_transform(config_str, hparams): + """ + Create a AutoAugment transform + + :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). + The remaining sections, not order sepecific determine + 'mstd' - float std deviation of magnitude noise applied + Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 + + :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme + + :return: A PyTorch compatible Transform + """ + config = config_str.split('-') + policy_name = config[0] + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + else: + assert False, 'Unknown AutoAugment config section' + aa_policy = auto_augment_policy(policy_name, hparams=hparams) + return AutoAugment(aa_policy) + + +_RAND_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'Posterize', + 'Solarize', + 'SolarizeAdd', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + +_RAND_INCREASING_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'SolarizeAdd', + 'ColorIncreasing', + 'ContrastIncreasing', + 'BrightnessIncreasing', + 'SharpnessIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + 'Rotate': 0.3, + 'ShearX': 0.2, + 'ShearY': 0.2, + 'TranslateXRel': 0.1, + 'TranslateYRel': 0.1, + 'Color': .025, + 'Sharpness': 0.025, + 'AutoContrast': 0.025, + 'Solarize': .005, + 'SolarizeAdd': .005, + 'Contrast': .005, + 'Brightness': .005, + 'Equalize': .005, + 'Posterize': 0, + 'Invert': 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [AugmentOp( + name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) + for op in ops: + img = op(img) + return img + + def __repr__(self): + fs = self.__class__.__name__ + f'(n={self.num_layers}, ops=' + for op in self.ops: + fs += f'\n\t{op}' + fs += ')' + return fs + + +def rand_augment_transform(config_str, hparams): + """ + Create a RandAugment transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100) + 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10) + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + + :return: A PyTorch compatible Transform + """ + magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split('-') + assert config[0] == 'rand' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param / randomization of magnitude values + mstd = float(val) + if mstd > 100: + # use uniform sampling in 0 to magnitude if mstd is > 100 + mstd = float('inf') + hparams.setdefault('magnitude_std', mstd) + elif key == 'mmax': + # clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM] + hparams.setdefault('magnitude_max', int(val)) + elif key == 'inc': + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == 'm': + magnitude = int(val) + elif key == 'n': + num_layers = int(val) + elif key == 'w': + weight_idx = int(val) + else: + assert False, 'Unknown RandAugment config section' + ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) + choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) + + +_AUGMIX_TRANSFORMS = [ + 'AutoContrast', + 'ColorIncreasing', # not in paper + 'ContrastIncreasing', # not in paper + 'BrightnessIncreasing', # not in paper + 'SharpnessIncreasing', # not in paper + 'Equalize', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', +] + + +def augmix_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _AUGMIX_TRANSFORMS + return [AugmentOp( + name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class AugMixAugment: + """ AugMix Transform + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + """ + def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): + self.ops = ops + self.alpha = alpha + self.width = width + self.depth = depth + self.blended = blended # blended mode is faster but not well tested + + def _calc_blended_weights(self, ws, m): + ws = ws * m + cump = 1. + rws = [] + for w in ws[::-1]: + alpha = w / cump + cump *= (1 - alpha) + rws.append(alpha) + return np.array(rws[::-1], dtype=np.float32) + + def _apply_blended(self, img, mixing_weights, m): + # This is my first crack and implementing a slightly faster mixed augmentation. Instead + # of accumulating the mix for each chain in a Numpy array and then blending with original, + # it recomputes the blending coefficients and applies one PIL image blend per chain. + # TODO the results appear in the right ballpark but they differ by more than rounding. + img_orig = img.copy() + ws = self._calc_blended_weights(mixing_weights, m) + for w in ws: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img_orig # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + img = Image.blend(img, img_aug, w) + return img + + def _apply_basic(self, img, mixing_weights, m): + # This is a literal adaptation of the paper/official implementation without normalizations and + # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the + # typical augmentation transforms, could use a GPU / Kornia implementation. + img_shape = img.size[0], img.size[1], len(img.getbands()) + mixed = np.zeros(img_shape, dtype=np.float32) + for mw in mixing_weights: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + mixed += mw * np.asarray(img_aug, dtype=np.float32) + np.clip(mixed, 0, 255., out=mixed) + mixed = Image.fromarray(mixed.astype(np.uint8)) + return Image.blend(img, mixed, m) + + def __call__(self, img): + mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) + if self.blended: + mixed = self._apply_blended(img, mixing_weights, m) + else: + mixed = self._apply_basic(img, mixing_weights, m) + return mixed + + def __repr__(self): + fs = self.__class__.__name__ + f'(alpha={self.alpha}, width={self.width}, depth={self.depth}, ops=' + for op in self.ops: + fs += f'\n\t{op}' + fs += ')' + return fs + + +def augment_and_mix_transform(config_str, hparams): + """ Create AugMix PyTorch transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude (severity) of augmentation mix (default: 3) + 'w' - integer width of augmentation chain (default: 3) + 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) + 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) + 'mstd' - float std deviation of magnitude noise applied (default: 0) + Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 + + :param hparams: Other hparams (kwargs) for the Augmentation transforms + + :return: A PyTorch compatible Transform + """ + magnitude = 3 + width = 3 + depth = -1 + alpha = 1. + blended = False + config = config_str.split('-') + assert config[0] == 'augmix' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'm': + magnitude = int(val) + elif key == 'w': + width = int(val) + elif key == 'd': + depth = int(val) + elif key == 'a': + alpha = float(val) + elif key == 'b': + blended = bool(val) + else: + assert False, 'Unknown AugMix config section' + hparams.setdefault('magnitude_std', float('inf')) # default to uniform sampling (if not set via mstd arg) + ops = augmix_ops(magnitude=magnitude, hparams=hparams) + return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/config.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..78176e4ba9a16fab4373395edef1fb1dee313c33 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/config.py @@ -0,0 +1,82 @@ +import logging +from .constants import * + + +_logger = logging.getLogger(__name__) + + +def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False): + new_config = {} + default_cfg = default_cfg + if not default_cfg and model is not None and hasattr(model, 'default_cfg'): + default_cfg = model.default_cfg + + # Resolve input/image size + in_chans = 3 + if 'chans' in args and args['chans'] is not None: + in_chans = args['chans'] + + input_size = (in_chans, 224, 224) + if 'input_size' in args and args['input_size'] is not None: + assert isinstance(args['input_size'], (tuple, list)) + assert len(args['input_size']) == 3 + input_size = tuple(args['input_size']) + in_chans = input_size[0] # input_size overrides in_chans + elif 'img_size' in args and args['img_size'] is not None: + assert isinstance(args['img_size'], int) + input_size = (in_chans, args['img_size'], args['img_size']) + else: + if use_test_size and 'test_input_size' in default_cfg: + input_size = default_cfg['test_input_size'] + elif 'input_size' in default_cfg: + input_size = default_cfg['input_size'] + new_config['input_size'] = input_size + + # resolve interpolation method + new_config['interpolation'] = 'bicubic' + if 'interpolation' in args and args['interpolation']: + new_config['interpolation'] = args['interpolation'] + elif 'interpolation' in default_cfg: + new_config['interpolation'] = default_cfg['interpolation'] + + # resolve dataset + model mean for normalization + new_config['mean'] = IMAGENET_DEFAULT_MEAN + if 'mean' in args and args['mean'] is not None: + mean = tuple(args['mean']) + if len(mean) == 1: + mean = tuple(list(mean) * in_chans) + else: + assert len(mean) == in_chans + new_config['mean'] = mean + elif 'mean' in default_cfg: + new_config['mean'] = default_cfg['mean'] + + # resolve dataset + model std deviation for normalization + new_config['std'] = IMAGENET_DEFAULT_STD + if 'std' in args and args['std'] is not None: + std = tuple(args['std']) + if len(std) == 1: + std = tuple(list(std) * in_chans) + else: + assert len(std) == in_chans + new_config['std'] = std + elif 'std' in default_cfg: + new_config['std'] = default_cfg['std'] + + # resolve default crop percentage + crop_pct = DEFAULT_CROP_PCT + if 'crop_pct' in args and args['crop_pct'] is not None: + crop_pct = args['crop_pct'] + else: + if use_test_size and 'test_crop_pct' in default_cfg: + crop_pct = default_cfg['test_crop_pct'] + elif 'crop_pct' in default_cfg: + crop_pct = default_cfg['crop_pct'] + new_config['crop_pct'] = crop_pct + + if verbose: + _logger.info('Data processing configuration for current model + dataset:') + for n, v in new_config.items(): + _logger.info('\t%s: %s' % (n, str(v))) + + return new_config diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/constants.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d8bb7ed4419bda758bfc22448572cdc32e8f27 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/constants.py @@ -0,0 +1,9 @@ +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) +IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) +IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) +OPENAI_CLIP_MEAN = (0.48145466, 0.4578275, 0.40821073) +OPENAI_CLIP_STD = (0.26862954, 0.26130258, 0.27577711) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/dataset.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..20b663cecaacacf7efbe8a05bb00414610c810f7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/dataset.py @@ -0,0 +1,152 @@ +""" Quick n Simple Image Folder, Tarfile based DataSet + +Hacked together by / Copyright 2019, Ross Wightman +""" +import torch.utils.data as data +import os +import torch +import logging + +from PIL import Image + +from .parsers import create_parser + +_logger = logging.getLogger(__name__) + + +_ERROR_RETRY = 50 + + +class ImageDataset(data.Dataset): + + def __init__( + self, + root, + parser=None, + class_map=None, + load_bytes=False, + transform=None, + target_transform=None, + ): + if parser is None or isinstance(parser, str): + parser = create_parser(parser or '', root=root, class_map=class_map) + self.parser = parser + self.load_bytes = load_bytes + self.transform = transform + self.target_transform = target_transform + self._consecutive_errors = 0 + + def __getitem__(self, index): + img, target = self.parser[index] + try: + img = img.read() if self.load_bytes else Image.open(img).convert('RGB') + except Exception as e: + _logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}') + self._consecutive_errors += 1 + if self._consecutive_errors < _ERROR_RETRY: + return self.__getitem__((index + 1) % len(self.parser)) + else: + raise e + self._consecutive_errors = 0 + if self.transform is not None: + img = self.transform(img) + if target is None: + target = -1 + elif self.target_transform is not None: + target = self.target_transform(target) + return img, target + + def __len__(self): + return len(self.parser) + + def filename(self, index, basename=False, absolute=False): + return self.parser.filename(index, basename, absolute) + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class IterableImageDataset(data.IterableDataset): + + def __init__( + self, + root, + parser=None, + split='train', + is_training=False, + batch_size=None, + repeats=0, + download=False, + transform=None, + target_transform=None, + ): + assert parser is not None + if isinstance(parser, str): + self.parser = create_parser( + parser, root=root, split=split, is_training=is_training, + batch_size=batch_size, repeats=repeats, download=download) + else: + self.parser = parser + self.transform = transform + self.target_transform = target_transform + self._consecutive_errors = 0 + + def __iter__(self): + for img, target in self.parser: + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + yield img, target + + def __len__(self): + if hasattr(self.parser, '__len__'): + return len(self.parser) + else: + return 0 + + def filename(self, index, basename=False, absolute=False): + assert False, 'Filename lookup by index not supported, use filenames().' + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class AugMixDataset(torch.utils.data.Dataset): + """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" + + def __init__(self, dataset, num_splits=2): + self.augmentation = None + self.normalize = None + self.dataset = dataset + if self.dataset.transform is not None: + self._set_transforms(self.dataset.transform) + self.num_splits = num_splits + + def _set_transforms(self, x): + assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' + self.dataset.transform = x[0] + self.augmentation = x[1] + self.normalize = x[2] + + @property + def transform(self): + return self.dataset.transform + + @transform.setter + def transform(self, x): + self._set_transforms(x) + + def _normalize(self, x): + return x if self.normalize is None else self.normalize(x) + + def __getitem__(self, i): + x, y = self.dataset[i] # all splits share the same dataset base transform + x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) + # run the full augmentation on the remaining splits + for _ in range(self.num_splits - 1): + x_list.append(self._normalize(self.augmentation(x))) + return tuple(x_list), y + + def __len__(self): + return len(self.dataset) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/dataset_factory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/dataset_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ac30b168080a1d359dfe9fb57e3a4e5901ada4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/dataset_factory.py @@ -0,0 +1,143 @@ +""" Dataset Factory + +Hacked together by / Copyright 2021, Ross Wightman +""" +import os + +from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder +try: + from torchvision.datasets import Places365 + has_places365 = True +except ImportError: + has_places365 = False +try: + from torchvision.datasets import INaturalist + has_inaturalist = True +except ImportError: + has_inaturalist = False + +from .dataset import IterableImageDataset, ImageDataset + +_TORCH_BASIC_DS = dict( + cifar10=CIFAR10, + cifar100=CIFAR100, + mnist=MNIST, + qmist=QMNIST, + kmnist=KMNIST, + fashion_mnist=FashionMNIST, +) +_TRAIN_SYNONYM = dict(train=None, training=None) +_EVAL_SYNONYM = dict(val=None, valid=None, validation=None, eval=None, evaluation=None) + + +def _search_split(root, split): + # look for sub-folder with name of split in root and use that if it exists + split_name = split.split('[')[0] + try_root = os.path.join(root, split_name) + if os.path.exists(try_root): + return try_root + + def _try(syn): + for s in syn: + try_root = os.path.join(root, s) + if os.path.exists(try_root): + return try_root + return root + if split_name in _TRAIN_SYNONYM: + root = _try(_TRAIN_SYNONYM) + elif split_name in _EVAL_SYNONYM: + root = _try(_EVAL_SYNONYM) + return root + + +def create_dataset( + name, + root, + split='validation', + search_split=True, + class_map=None, + load_bytes=False, + is_training=False, + download=False, + batch_size=None, + repeats=0, + **kwargs +): + """ Dataset factory method + + In parenthesis after each arg are the type of dataset supported for each arg, one of: + * folder - default, timm folder (or tar) based ImageDataset + * torch - torchvision based datasets + * TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset + * all - any of the above + + Args: + name: dataset name, empty is okay for folder based datasets + root: root folder of dataset (all) + split: dataset split (all) + search_split: search for split specific child fold from root so one can specify + `imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder) + class_map: specify class -> index mapping via text file or dict (folder) + load_bytes: load data, return images as undecoded bytes (folder) + download: download dataset if not present and supported (TFDS, torch) + is_training: create dataset in train mode, this is different from the split. + For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS) + batch_size: batch size hint for (TFDS) + repeats: dataset repeats per iteration i.e. epoch (TFDS) + **kwargs: other args to pass to dataset + + Returns: + Dataset object + """ + name = name.lower() + if name.startswith('torch/'): + name = name.split('/', 2)[-1] + torch_kwargs = dict(root=root, download=download, **kwargs) + if name in _TORCH_BASIC_DS: + ds_class = _TORCH_BASIC_DS[name] + use_train = split in _TRAIN_SYNONYM + ds = ds_class(train=use_train, **torch_kwargs) + elif name == 'inaturalist' or name == 'inat': + assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' + target_type = 'full' + split_split = split.split('/') + if len(split_split) > 1: + target_type = split_split[0].split('_') + if len(target_type) == 1: + target_type = target_type[0] + split = split_split[-1] + if split in _TRAIN_SYNONYM: + split = '2021_train' + elif split in _EVAL_SYNONYM: + split = '2021_valid' + ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) + elif name == 'places365': + assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' + if split in _TRAIN_SYNONYM: + split = 'train-standard' + elif split in _EVAL_SYNONYM: + split = 'val' + ds = Places365(split=split, **torch_kwargs) + elif name == 'imagenet': + if split in _EVAL_SYNONYM: + split = 'val' + ds = ImageNet(split=split, **torch_kwargs) + elif name == 'image_folder' or name == 'folder': + # in case torchvision ImageFolder is preferred over timm ImageDataset for some reason + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageFolder(root, **kwargs) + else: + assert False, f"Unknown torchvision dataset {name}" + elif name.startswith('tfds/'): + ds = IterableImageDataset( + root, parser=name, split=split, is_training=is_training, + download=download, batch_size=batch_size, repeats=repeats, **kwargs) + else: + # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs) + return ds diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/distributed_sampler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/distributed_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..54ff0459504a7f952d701720727ebd50d07e9c7a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/distributed_sampler.py @@ -0,0 +1,135 @@ +import math +import torch +from torch.utils.data import Sampler +import torch.distributed as dist + + +class OrderedDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + +class RepeatAugSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset for distributed, + with repeated augmentation. + It ensures that different each augmented version of a sample will be visible to a + different process (GPU). Heavily based on torch.utils.data.DistributedSampler + + This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py + Used in + Copyright (c) 2015-present, Facebook, Inc. + """ + + def __init__( + self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + num_repeats=3, + selected_round=256, + selected_ratio=0, + ): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.shuffle = shuffle + self.num_repeats = num_repeats + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + # Determine the number of samples to select per epoch for each rank. + # num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked + # via selected_ratio and selected_round args. + selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0 + if selected_round: + self.num_selected_samples = int(math.floor( + len(self.dataset) // selected_round * selected_round / selected_ratio)) + else: + self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio)) + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = torch.randperm(len(self.dataset), generator=g) + else: + indices = torch.arange(start=0, end=len(self.dataset)) + + # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] + if isinstance(self.num_repeats, float) and not self.num_repeats.is_integer(): + # resample for repeats w/ non-integer ratio + repeat_size = math.ceil(self.num_repeats * len(self.dataset)) + indices = indices[torch.tensor([int(i // self.num_repeats) for i in range(repeat_size)])] + else: + indices = torch.repeat_interleave(indices, repeats=int(self.num_repeats), dim=0) + indices = indices.tolist() # leaving as tensor thrashes dataloader memory + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + if padding_size > 0: + indices += indices[:padding_size] + assert len(indices) == self.total_size + + # subsample per rank + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + # return up to num selected samples + return iter(indices[:self.num_selected_samples]) + + def __len__(self): + return self.num_selected_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/loader.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc075c027b4dd46e63fb1f33d819d2555bff25e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/loader.py @@ -0,0 +1,308 @@ +""" Loader Factory, Fast Collate, CUDA Prefetcher + +Prefetcher and Fast Collate inspired by NVIDIA APEX example at +https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf + +Hacked together by / Copyright 2019, Ross Wightman +""" +import random +from functools import partial +from itertools import repeat +from typing import Callable + +import torch.utils.data +import numpy as np + +from .transforms_factory import create_transform +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler +from .random_erasing import RandomErasing +from .mixup import FastCollateMixup + + +def fast_collate(batch): + """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" + assert isinstance(batch[0], tuple) + batch_size = len(batch) + if isinstance(batch[0][0], tuple): + # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position + # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position + inner_tuple_size = len(batch[0][0]) + flattened_batch_size = batch_size * inner_tuple_size + targets = torch.zeros(flattened_batch_size, dtype=torch.int64) + tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length + for j in range(inner_tuple_size): + targets[i + j * batch_size] = batch[i][1] + tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) + return tensor, targets + elif isinstance(batch[0][0], np.ndarray): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i] += torch.from_numpy(batch[i][0]) + return tensor, targets + elif isinstance(batch[0][0], torch.Tensor): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i].copy_(batch[i][0]) + return tensor, targets + else: + assert False + + +def expand_to_chs(x, n): + if not isinstance(x, (tuple, list)): + x = tuple(repeat(x, n)) + elif len(x) == 1: + x = x * n + else: + assert len(x) == n, 'normalization stats must match image channels' + return x + + +class PrefetchLoader: + + def __init__( + self, + loader, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + channels=3, + fp16=False, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0): + + mean = expand_to_chs(mean, channels) + std = expand_to_chs(std, channels) + normalization_shape = (1, channels, 1, 1) + + self.loader = loader + self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(normalization_shape) + self.std = torch.tensor([x * 255 for x in std]).cuda().view(normalization_shape) + self.fp16 = fp16 + if fp16: + self.mean = self.mean.half() + self.std = self.std.half() + if re_prob > 0.: + self.random_erasing = RandomErasing( + probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits) + else: + self.random_erasing = None + + def __iter__(self): + stream = torch.cuda.Stream() + first = True + + for next_input, next_target in self.loader: + with torch.cuda.stream(stream): + next_input = next_input.cuda(non_blocking=True) + next_target = next_target.cuda(non_blocking=True) + if self.fp16: + next_input = next_input.half().sub_(self.mean).div_(self.std) + else: + next_input = next_input.float().sub_(self.mean).div_(self.std) + if self.random_erasing is not None: + next_input = self.random_erasing(next_input) + + if not first: + yield input, target + else: + first = False + + torch.cuda.current_stream().wait_stream(stream) + input = next_input + target = next_target + + yield input, target + + def __len__(self): + return len(self.loader) + + @property + def sampler(self): + return self.loader.sampler + + @property + def dataset(self): + return self.loader.dataset + + @property + def mixup_enabled(self): + if isinstance(self.loader.collate_fn, FastCollateMixup): + return self.loader.collate_fn.mixup_enabled + else: + return False + + @mixup_enabled.setter + def mixup_enabled(self, x): + if isinstance(self.loader.collate_fn, FastCollateMixup): + self.loader.collate_fn.mixup_enabled = x + + +def _worker_init(worker_id, worker_seeding='all'): + worker_info = torch.utils.data.get_worker_info() + assert worker_info.id == worker_id + if isinstance(worker_seeding, Callable): + seed = worker_seeding(worker_info) + random.seed(seed) + torch.manual_seed(seed) + np.random.seed(seed % (2 ** 32 - 1)) + else: + assert worker_seeding in ('all', 'part') + # random / torch seed already called in dataloader iter class w/ worker_info.seed + # to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed) + if worker_seeding == 'all': + np.random.seed(worker_info.seed % (2 ** 32 - 1)) + + +def create_loader( + dataset, + input_size, + batch_size, + is_training=False, + use_prefetcher=True, + no_aug=False, + re_prob=0., + re_mode='const', + re_count=1, + re_split=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + num_aug_repeats=0, + num_aug_splits=0, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=1, + distributed=False, + crop_pct=None, + collate_fn=None, + pin_memory=False, + fp16=False, + tf_preprocessing=False, + use_multi_epochs_loader=False, + persistent_workers=True, + worker_seeding='all', +): + re_num_splits = 0 + if re_split: + # apply RE to second half of batch if no aug split otherwise line up with aug split + re_num_splits = num_aug_splits or 2 + dataset.transform = create_transform( + input_size, + is_training=is_training, + use_prefetcher=use_prefetcher, + no_aug=no_aug, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + mean=mean, + std=std, + crop_pct=crop_pct, + tf_preprocessing=tf_preprocessing, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=num_aug_splits > 0, + ) + + sampler = None + if distributed and not isinstance(dataset, torch.utils.data.IterableDataset): + if is_training: + if num_aug_repeats: + sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats) + else: + sampler = torch.utils.data.distributed.DistributedSampler(dataset) + else: + # This will add extra duplicate entries to result in equal num + # of samples per-process, will slightly alter validation results + sampler = OrderedDistributedSampler(dataset) + else: + assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use" + + if collate_fn is None: + collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate + + loader_class = torch.utils.data.DataLoader + if use_multi_epochs_loader: + loader_class = MultiEpochsDataLoader + + loader_args = dict( + batch_size=batch_size, + shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training, + num_workers=num_workers, + sampler=sampler, + collate_fn=collate_fn, + pin_memory=pin_memory, + drop_last=is_training, + worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), + persistent_workers=persistent_workers + ) + try: + loader = loader_class(dataset, **loader_args) + except TypeError as e: + loader_args.pop('persistent_workers') # only in Pytorch 1.7+ + loader = loader_class(dataset, **loader_args) + if use_prefetcher: + prefetch_re_prob = re_prob if is_training and not no_aug else 0. + loader = PrefetchLoader( + loader, + mean=mean, + std=std, + channels=input_size[0], + fp16=fp16, + re_prob=prefetch_re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits + ) + + return loader + + +class MultiEpochsDataLoader(torch.utils.data.DataLoader): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._DataLoader__initialized = False + self.batch_sampler = _RepeatSampler(self.batch_sampler) + self._DataLoader__initialized = True + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever. + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/mixup.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/mixup.py new file mode 100644 index 0000000000000000000000000000000000000000..c8789a0c35cbf545b508cdf513b890cab80eebf9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/mixup.py @@ -0,0 +1,316 @@ +""" Mixup and Cutmix + +Papers: +mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412) + +CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899) + +Code Reference: +CutMix: https://github.com/clovaai/CutMix-PyTorch + +Hacked together by / Copyright 2019, Ross Wightman +""" +import numpy as np +import torch + + +def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'): + x = x.long().view(-1, 1) + return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value) + + +def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'): + off_value = smoothing / num_classes + on_value = 1. - smoothing + off_value + y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device) + y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device) + return y1 * lam + y2 * (1. - lam) + + +def rand_bbox(img_shape, lam, margin=0., count=None): + """ Standard CutMix bounding-box + Generates a random square bbox based on lambda value. This impl includes + support for enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image) + count (int): Number of bbox to generate + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape[-2:] + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + +def rand_bbox_minmax(img_shape, minmax, count=None): + """ Min-Max CutMix bounding-box + Inspired by Darknet cutmix impl, generates a random rectangular bbox + based on min/max percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + minmax (tuple or list): Min and max bbox ratios (as percent of image size) + count (int): Number of bbox to generate + """ + assert len(minmax) == 2 + img_h, img_w = img_shape[-2:] + cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count) + cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + +def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None): + """ Generate bbox and apply lambda correction. + """ + if ratio_minmax is not None: + yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count) + else: + yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count) + if correct_lam or ratio_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) + return (yl, yu, xl, xu), lam + + +class Mixup: + """ Mixup/Cutmix that applies different params to each element or whole batch + + Args: + mixup_alpha (float): mixup alpha value, mixup is active if > 0. + cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0. + cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None. + prob (float): probability of applying mixup or cutmix per batch or element + switch_prob (float): probability of switching to cutmix instead of mixup when both are active + mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element) + correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders + label_smoothing (float): apply label smoothing to the mixed target tensor + num_classes (int): number of classes for target + """ + def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5, + mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000): + self.mixup_alpha = mixup_alpha + self.cutmix_alpha = cutmix_alpha + self.cutmix_minmax = cutmix_minmax + if self.cutmix_minmax is not None: + assert len(self.cutmix_minmax) == 2 + # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe + self.cutmix_alpha = 1.0 + self.mix_prob = prob + self.switch_prob = switch_prob + self.label_smoothing = label_smoothing + self.num_classes = num_classes + self.mode = mode + self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix + self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop) + + def _params_per_elem(self, batch_size): + lam = np.ones(batch_size, dtype=np.float32) + use_cutmix = np.zeros(batch_size, dtype=np.bool) + if self.mixup_enabled: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand(batch_size) < self.switch_prob + lam_mix = np.where( + use_cutmix, + np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size), + np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size) + elif self.cutmix_alpha > 0.: + use_cutmix = np.ones(batch_size, dtype=np.bool) + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam) + return lam, use_cutmix + + def _params_per_batch(self): + lam = 1. + use_cutmix = False + if self.mixup_enabled and np.random.rand() < self.mix_prob: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand() < self.switch_prob + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \ + np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.cutmix_alpha > 0.: + use_cutmix = True + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = float(lam_mix) + return lam, use_cutmix + + def _mix_elem(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_pair(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + x[j] = x[j] * lam + x_orig[i] * (1 - lam) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_batch(self, x): + lam, use_cutmix = self._params_per_batch() + if lam == 1.: + return 1. + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] + else: + x_flipped = x.flip(0).mul_(1. - lam) + x.mul_(lam).add_(x_flipped) + return lam + + def __call__(self, x, target): + assert len(x) % 2 == 0, 'Batch size should be even when using this' + if self.mode == 'elem': + lam = self._mix_elem(x) + elif self.mode == 'pair': + lam = self._mix_pair(x) + else: + lam = self._mix_batch(x) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device) + return x, target + + +class FastCollateMixup(Mixup): + """ Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch + + A Mixup impl that's performed while collating the batches. + """ + + def _mix_elem_collate(self, output, batch, half=False): + batch_size = len(batch) + num_elem = batch_size // 2 if half else batch_size + assert len(output) == num_elem + lam_batch, use_cutmix = self._params_per_elem(num_elem) + for i in range(num_elem): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed = batch[i][0] + if lam != 1.: + if use_cutmix[i]: + if not half: + mixed = mixed.copy() + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + if half: + lam_batch = np.concatenate((lam_batch, np.ones(num_elem))) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_pair_collate(self, output, batch): + batch_size = len(batch) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed_i = batch[i][0] + mixed_j = batch[j][0] + assert 0 <= lam <= 1.0 + if lam < 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + patch_i = mixed_i[:, yl:yh, xl:xh].copy() + mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh] + mixed_j[:, yl:yh, xl:xh] = patch_i + lam_batch[i] = lam + else: + mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam) + mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam) + mixed_i = mixed_temp + np.rint(mixed_j, out=mixed_j) + np.rint(mixed_i, out=mixed_i) + output[i] += torch.from_numpy(mixed_i.astype(np.uint8)) + output[j] += torch.from_numpy(mixed_j.astype(np.uint8)) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_batch_collate(self, output, batch): + batch_size = len(batch) + lam, use_cutmix = self._params_per_batch() + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + for i in range(batch_size): + j = batch_size - i - 1 + mixed = batch[i][0] + if lam != 1.: + if use_cutmix: + mixed = mixed.copy() # don't want to modify the original while iterating + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + return lam + + def __call__(self, batch, _=None): + batch_size = len(batch) + assert batch_size % 2 == 0, 'Batch size should be even when using this' + half = 'half' in self.mode + if half: + batch_size //= 2 + output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + if self.mode == 'elem' or self.mode == 'half': + lam = self._mix_elem_collate(output, batch, half=half) + elif self.mode == 'pair': + lam = self._mix_pair_collate(output, batch) + else: + lam = self._mix_batch_collate(output, batch) + target = torch.tensor([b[1] for b in batch], dtype=torch.int64) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu') + target = target[:batch_size] + return output, target + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e820d5e027ba82c937829ad50b2b2c9a97d2f28 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/__init__.py @@ -0,0 +1,2 @@ +from .parser_factory import create_parser +from .img_extensions import * diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/class_map.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/class_map.py new file mode 100644 index 0000000000000000000000000000000000000000..6cf3f57e014566e165374acae8dec031c02048f8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/class_map.py @@ -0,0 +1,22 @@ +import os +import pickle + +def load_class_map(map_or_filename, root=''): + if isinstance(map_or_filename, dict): + assert dict, 'class_map dict must be non-empty' + return map_or_filename + class_map_path = map_or_filename + if not os.path.exists(class_map_path): + class_map_path = os.path.join(root, class_map_path) + assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename + class_map_ext = os.path.splitext(map_or_filename)[-1].lower() + if class_map_ext == '.txt': + with open(class_map_path) as f: + class_to_idx = {v.strip(): k for k, v in enumerate(f)} + elif class_map_ext == '.pkl': + with open(class_map_path,'rb') as f: + class_to_idx = pickle.load(f) + else: + assert False, f'Unsupported class map file extension ({class_map_ext}).' + return class_to_idx + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/img_extensions.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/img_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..45c85aabd00ca5ebf7bd6fa85c674570fe60f9c8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/img_extensions.py @@ -0,0 +1,50 @@ +from copy import deepcopy + +__all__ = ['get_img_extensions', 'is_img_extension', 'set_img_extensions', 'add_img_extensions', 'del_img_extensions'] + + +IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') # singleton, kept public for bwd compat use +_IMG_EXTENSIONS_SET = set(IMG_EXTENSIONS) # set version, private, kept in sync + + +def _set_extensions(extensions): + global IMG_EXTENSIONS + global _IMG_EXTENSIONS_SET + dedupe = set() # NOTE de-duping tuple while keeping original order + IMG_EXTENSIONS = tuple(x for x in extensions if x not in dedupe and not dedupe.add(x)) + _IMG_EXTENSIONS_SET = set(extensions) + + +def _valid_extension(x: str): + return x and isinstance(x, str) and len(x) >= 2 and x.startswith('.') + + +def is_img_extension(ext): + return ext in _IMG_EXTENSIONS_SET + + +def get_img_extensions(as_set=False): + return deepcopy(_IMG_EXTENSIONS_SET if as_set else IMG_EXTENSIONS) + + +def set_img_extensions(extensions): + assert len(extensions) + for x in extensions: + assert _valid_extension(x) + _set_extensions(extensions) + + +def add_img_extensions(ext): + if not isinstance(ext, (list, tuple, set)): + ext = (ext,) + for x in ext: + assert _valid_extension(x) + extensions = IMG_EXTENSIONS + tuple(ext) + _set_extensions(extensions) + + +def del_img_extensions(ext): + if not isinstance(ext, (list, tuple, set)): + ext = (ext,) + extensions = tuple(x for x in IMG_EXTENSIONS if x not in ext) + _set_extensions(extensions) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..76ab6d18283644702424d0ff2af5832d6d6dd3b7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser.py @@ -0,0 +1,17 @@ +from abc import abstractmethod + + +class Parser: + def __init__(self): + pass + + @abstractmethod + def _filename(self, index, basename=False, absolute=False): + pass + + def filename(self, index, basename=False, absolute=False): + return self._filename(index, basename=basename, absolute=absolute) + + def filenames(self, basename=False, absolute=False): + return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_factory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..0665c02a8b4db12b8ac6b7095999751c5b26f384 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_factory.py @@ -0,0 +1,28 @@ +import os + +from .parser_image_folder import ParserImageFolder +from .parser_image_in_tar import ParserImageInTar + + +def create_parser(name, root, split='train', **kwargs): + name = name.lower() + name = name.split('/', 2) + prefix = '' + if len(name) > 1: + prefix = name[0] + name = name[-1] + + # FIXME improve the selection right now just tfds prefix or fallback path, will need options to + # explicitly select other options shortly + if prefix == 'tfds': + from .parser_tfds import ParserTfds # defer tensorflow import + parser = ParserTfds(root, name, split=split, **kwargs) + else: + assert os.path.exists(root) + # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder + # FIXME support split here, in parser? + if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': + parser = ParserImageInTar(root, **kwargs) + else: + parser = ParserImageFolder(root, **kwargs) + return parser diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_folder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..d82b024377e99a26fb87c92256a076505d894666 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_folder.py @@ -0,0 +1,90 @@ +""" A dataset parser that reads images from folders + +Folders are scannerd recursively to find image files. Labels are based +on the folder hierarchy, just leaf folders by default. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +from typing import Dict, List, Optional, Set, Tuple, Union + +from custom_timm.utils.misc import natural_key + +from .class_map import load_class_map +from .img_extensions import get_img_extensions +from .parser import Parser + + +def find_images_and_targets( + folder: str, + types: Optional[Union[List, Tuple, Set]] = None, + class_to_idx: Optional[Dict] = None, + leaf_name_only: bool = True, + sort: bool = True +): + """ Walk folder recursively to discover images and map them to classes by folder names. + + Args: + folder: root of folder to recrusively search + types: types (file extensions) to search for in path + class_to_idx: specify mapping for class (folder name) to class index if set + leaf_name_only: use only leaf-name of folder walk for class names + sort: re-sort found images by name (for consistent ordering) + + Returns: + A list of image and target tuples, class_to_idx mapping + """ + types = get_img_extensions(as_set=True) if not types else set(types) + labels = [] + filenames = [] + for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): + rel_path = os.path.relpath(root, folder) if (root != folder) else '' + label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') + for f in files: + base, ext = os.path.splitext(f) + if ext.lower() in types: + filenames.append(os.path.join(root, f)) + labels.append(label) + if class_to_idx is None: + # building class index + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] + if sort: + images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) + return images_and_targets, class_to_idx + + +class ParserImageFolder(Parser): + + def __init__( + self, + root, + class_map=''): + super().__init__() + + self.root = root + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) + if len(self.samples) == 0: + raise RuntimeError( + f'Found 0 images in subfolders of {root}. ' + f'Supported image extensions are {", ".join(get_img_extensions())}') + + def __getitem__(self, index): + path, target = self.samples[index] + return open(path, 'rb'), target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0] + if basename: + filename = os.path.basename(filename) + elif not absolute: + filename = os.path.relpath(filename, self.root) + return filename diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_in_tar.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_in_tar.py new file mode 100644 index 0000000000000000000000000000000000000000..7d3c1765b5bd3809f93a5c1707b472f7f54e5eb7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_in_tar.py @@ -0,0 +1,229 @@ +""" A dataset parser that reads tarfile based datasets + +This parser can read and extract image samples from: +* a single tar of image files +* a folder of multiple tarfiles containing imagefiles +* a tar of tars containing image files + +Labels are based on the combined folder and/or tar name structure. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import os +import pickle +import tarfile +from glob import glob +from typing import List, Tuple, Dict, Set, Optional, Union + +import numpy as np + +from custom_timm.utils.misc import natural_key + +from .class_map import load_class_map +from .img_extensions import get_img_extensions +from .parser import Parser + +_logger = logging.getLogger(__name__) +CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' + + +class TarState: + + def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): + self.tf: tarfile.TarFile = tf + self.ti: tarfile.TarInfo = ti + self.children: Dict[str, TarState] = {} # child states (tars within tars) + + def reset(self): + self.tf = None + + +def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions: Set[str]): + sample_count = 0 + for i, ti in enumerate(tf): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + name, ext = os.path.splitext(basename) + ext = ext.lower() + if ext == '.tar': + with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: + child_info = dict( + name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) + sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) + _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') + parent_info['children'].append(child_info) + elif ext in extensions: + parent_info['samples'].append(ti) + sample_count += 1 + return sample_count + + +def extract_tarinfos( + root, + class_name_to_idx: Optional[Dict] = None, + cache_tarinfo: Optional[bool] = None, + extensions: Optional[Union[List, Tuple, Set]] = None, + sort: bool = True +): + extensions = get_img_extensions(as_set=True) if not extensions else set(extensions) + root_is_tar = False + if os.path.isfile(root): + assert os.path.splitext(root)[-1].lower() == '.tar' + tar_filenames = [root] + root, root_name = os.path.split(root) + root_name = os.path.splitext(root_name)[0] + root_is_tar = True + else: + root_name = root.strip(os.path.sep).split(os.path.sep)[-1] + tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) + num_tars = len(tar_filenames) + tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) + assert num_tars, f'No .tar files found at specified path ({root}).' + + _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') + info = dict(tartrees=[]) + cache_path = '' + if cache_tarinfo is None: + cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB + if cache_tarinfo: + cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX + cache_path = os.path.join(root, cache_filename) + if os.path.exists(cache_path): + _logger.info(f'Reading tar info from cache file {cache_path}.') + with open(cache_path, 'rb') as pf: + info = pickle.load(pf) + assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" + else: + for i, fn in enumerate(tar_filenames): + path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] + with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode + parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) + num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) + num_children = len(parent_info["children"]) + _logger.debug( + f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') + info['tartrees'].append(parent_info) + if cache_path: + _logger.info(f'Writing tar info to cache file {cache_path}.') + with open(cache_path, 'wb') as pf: + pickle.dump(info, pf) + + samples = [] + labels = [] + build_class_map = False + if class_name_to_idx is None: + build_class_map = True + + # Flatten tartree info into lists of samples and targets w/ targets based on label id via + # class map arg or from unique paths. + # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children + # this covers my current use cases and keeps things a little easier to test for now. + tarfiles = [] + + def _label_from_paths(*path, leaf_only=True): + path = os.path.join(*path).strip(os.path.sep) + return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') + + def _add_samples(info, fn): + added = 0 + for s in info['samples']: + label = _label_from_paths(info['path'], os.path.dirname(s.path)) + if not build_class_map and label not in class_name_to_idx: + continue + samples.append((s, fn, info['ti'])) + labels.append(label) + added += 1 + return added + + _logger.info(f'Collecting samples and building tar states.') + for parent_info in info['tartrees']: + # if tartree has children, we assume all samples are at the child level + tar_name = None if root_is_tar else parent_info['name'] + tar_state = TarState() + parent_added = 0 + for child_info in parent_info['children']: + child_added = _add_samples(child_info, fn=tar_name) + if child_added: + tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) + parent_added += child_added + parent_added += _add_samples(parent_info, fn=tar_name) + if parent_added: + tarfiles.append((tar_name, tar_state)) + del info + + if build_class_map: + # build class index + sorted_labels = list(sorted(set(labels), key=natural_key)) + class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + + _logger.info(f'Mapping targets and sorting samples.') + samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] + if sort: + samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) + samples, targets = zip(*samples_and_targets) + samples = np.array(samples) + targets = np.array(targets) + _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') + return samples, targets, class_name_to_idx, tarfiles + + +class ParserImageInTar(Parser): + """ Multi-tarfile dataset parser where there is one .tar file per class + """ + + def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): + super().__init__() + + class_name_to_idx = None + if class_map: + class_name_to_idx = load_class_map(class_map, root) + self.root = root + self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( + self.root, + class_name_to_idx=class_name_to_idx, + cache_tarinfo=cache_tarinfo + ) + self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} + if len(tarfiles) == 1 and tarfiles[0][0] is None: + self.root_is_tar = True + self.tar_state = tarfiles[0][1] + else: + self.root_is_tar = False + self.tar_state = dict(tarfiles) + self.cache_tarfiles = cache_tarfiles + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + sample = self.samples[index] + target = self.targets[index] + sample_ti, parent_fn, child_ti = sample + parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root + + tf = None + cache_state = None + if self.cache_tarfiles: + cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] + tf = cache_state.tf + if tf is None: + tf = tarfile.open(parent_abs) + if self.cache_tarfiles: + cache_state.tf = tf + if child_ti is not None: + ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None + if ctf is None: + ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) + if self.cache_tarfiles: + cache_state.children[child_ti.name].tf = ctf + tf = ctf + + return tf.extractfile(sample_ti), target + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_tar.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_tar.py new file mode 100644 index 0000000000000000000000000000000000000000..c5520ee64c1d798a37d45b5361ab3b800f5adbe6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_image_tar.py @@ -0,0 +1,74 @@ +""" A dataset parser that reads single tarfile based datasets + +This parser can read datasets consisting if a single tarfile containing images. +I am planning to deprecated it in favour of ParerImageInTar. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile + +from custom_timm.utils.misc import natural_key + +from .class_map import load_class_map +from .img_extensions import get_img_extensions +from .parser import Parser + + +def extract_tarinfo(tarfile, class_to_idx=None, sort=True): + extensions = get_img_extensions(as_set=True) + files = [] + labels = [] + for ti in tarfile.getmembers(): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + label = os.path.basename(dirname) + ext = os.path.splitext(basename)[1] + if ext.lower() in extensions: + files.append(ti) + labels.append(label) + if class_to_idx is None: + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] + if sort: + tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) + return tarinfo_and_targets, class_to_idx + + +class ParserImageTar(Parser): + """ Single tarfile dataset where classes are mapped to folders within tar + NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can + operate on folders of tars or tars in tars. + """ + def __init__(self, root, class_map=''): + super().__init__() + + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + assert os.path.isfile(root) + self.root = root + + with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later + self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) + self.imgs = self.samples + self.tarfile = None # lazy init in __getitem__ + + def __getitem__(self, index): + if self.tarfile is None: + self.tarfile = tarfile.open(self.root) + tarinfo, target = self.samples[index] + fileobj = self.tarfile.extractfile(tarinfo) + return fileobj, target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_tfds.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_tfds.py new file mode 100644 index 0000000000000000000000000000000000000000..739f3813d0ad20bcb92676662dad62d53be1fe70 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/parsers/parser_tfds.py @@ -0,0 +1,301 @@ +""" Dataset parser interface that wraps TFDS datasets + +Wraps many (most?) TFDS image-classification datasets +from https://github.com/tensorflow/datasets +https://www.tensorflow.org/datasets/catalog/overview#image_classification + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch +import torch.distributed as dist +from PIL import Image + +try: + import tensorflow as tf + tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu) + import tensorflow_datasets as tfds + try: + tfds.even_splits('', 1, drop_remainder=False) # non-buggy even_splits has drop_remainder arg + has_buggy_even_splits = False + except TypeError: + print("Warning: This version of tfds doesn't have the latest even_splits impl. " + "Please update or use tfds-nightly for better fine-grained split behaviour.") + has_buggy_even_splits = True + # NOTE uncomment below if having file limit issues on dataset build (or alter your OS defaults) + # import resource + # low, high = resource.getrlimit(resource.RLIMIT_NOFILE) + # resource.setrlimit(resource.RLIMIT_NOFILE, (high, high)) +except ImportError as e: + print(e) + print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.") + exit(1) +from .parser import Parser + + +MAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities +SHUFFLE_SIZE = 8192 # examples to shuffle in DS queue +PREFETCH_SIZE = 2048 # examples to prefetch + + +def even_split_indices(split, n, num_examples): + partitions = [round(i * num_examples / n) for i in range(n + 1)] + return [f"{split}[{partitions[i]}:{partitions[i + 1]}]" for i in range(n)] + + +def get_class_labels(info): + if 'label' not in info.features: + return {} + class_label = info.features['label'] + class_to_idx = {n: class_label.str2int(n) for n in class_label.names} + return class_to_idx + + +class ParserTfds(Parser): + """ Wrap Tensorflow Datasets for use in PyTorch + + There several things to be aware of: + * To prevent excessive examples being dropped per epoch w/ distributed training or multiplicity of + dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last + https://github.com/pytorch/pytorch/issues/33413 + * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch + from each worker could be a different size. For training this is worked around by option above, for + validation extra examples are inserted iff distributed mode is enabled so that the batches being reduced + across replicas are of same size. This will slightly alter the results, distributed validation will not be + 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse + since there are up to N * J extra examples with IterableDatasets. + * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of + replicas and dataloader workers you can use. For really small datasets that only contain a few shards + you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the + benefit of distributed training or fast dataloading should be much less for small datasets. + * This wrapper is currently configured to return individual, decompressed image examples from the TFDS + dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible + to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream + components. + + """ + + def __init__( + self, + root, + name, + split='train', + is_training=False, + batch_size=None, + download=False, + repeats=0, + seed=42, + input_name='image', + input_image='RGB', + target_name='label', + target_image='', + prefetch_size=None, + shuffle_size=None, + max_threadpool_size=None + ): + """ Tensorflow-datasets Wrapper + + Args: + root: root data dir (ie your TFDS_DATA_DIR. not dataset specific sub-dir) + name: tfds dataset name (eg `imagenet2012`) + split: tfds dataset split (can use all TFDS split strings eg `train[:10%]`) + is_training: training mode, shuffle enabled, dataset len rounded by batch_size + batch_size: batch_size to use to unsure total examples % batch_size == 0 in training across all dis nodes + download: download and build TFDS dataset if set, otherwise must use tfds CLI + repeats: iterate through (repeat) the dataset this many times per iteration (once if 0 or 1) + seed: common seed for shard shuffle across all distributed/worker instances + input_name: name of Feature to return as data (input) + input_image: image mode if input is an image (currently PIL mode string) + target_name: name of Feature to return as target (label) + target_image: image mode if target is an image (currently PIL mode string) + prefetch_size: override default tf.data prefetch buffer size + shuffle_size: override default tf.data shuffle buffer size + max_threadpool_size: override default threadpool size for tf.data + """ + super().__init__() + self.root = root + self.split = split + self.is_training = is_training + if self.is_training: + assert batch_size is not None, \ + "Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper" + self.batch_size = batch_size + self.repeats = repeats + self.common_seed = seed # a seed that's fixed across all worker / distributed instances + + # performance settings + self.prefetch_size = prefetch_size or PREFETCH_SIZE + self.shuffle_size = shuffle_size or SHUFFLE_SIZE + self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE + + # TFDS builder and split information + self.input_name = input_name # FIXME support tuples / lists of inputs and targets and full range of Feature + self.input_image = input_image + self.target_name = target_name + self.target_image = target_image + self.builder = tfds.builder(name, data_dir=root) + # NOTE: the tfds command line app can be used download & prepare datasets if you don't enable download flag + if download: + self.builder.download_and_prepare() + self.class_to_idx = get_class_labels(self.builder.info) if self.target_name == 'label' else {} + self.split_info = self.builder.info.splits[split] + self.num_examples = self.split_info.num_examples + + # Distributed world state + self.dist_rank = 0 + self.dist_num_replicas = 1 + if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1: + self.dist_rank = dist.get_rank() + self.dist_num_replicas = dist.get_world_size() + + # Attributes that are updated in _lazy_init, including the tf.data pipeline itself + self.global_num_workers = 1 + self.worker_info = None + self.worker_seed = 0 # seed unique to each work instance + self.subsplit = None # set when data is distributed across workers using sub-splits + self.ds = None # initialized lazily on each dataloader worker process + + def _lazy_init(self): + """ Lazily initialize the dataset. + + This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that + will be using the dataset instance. The __init__ method is called on the main process, + this will be called in a dataloader worker process. + + NOTE: There will be problems if you try to re-use this dataset across different loader/worker + instances once it has been initialized. Do not call any dataset methods that can call _lazy_init + before it is passed to dataloader. + """ + worker_info = torch.utils.data.get_worker_info() + + # setup input context to split dataset across distributed processes + num_workers = 1 + global_worker_id = 0 + if worker_info is not None: + self.worker_info = worker_info + self.worker_seed = worker_info.seed + num_workers = worker_info.num_workers + self.global_num_workers = self.dist_num_replicas * num_workers + global_worker_id = self.dist_rank * num_workers + worker_info.id + + """ Data sharding + InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used. + My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True) + between the splits each iteration, but that understanding could be wrong. + + I am currently using a mix of InputContext shard assignment and fine-grained sub-splits for distributing + the data across workers. For training InputContext is used to assign shards to nodes unless num_shards + in dataset < total number of workers. Otherwise sub-split API is used for datasets without enough shards or + for validation where we can't drop examples and need to avoid minimize uneven splits to avoid padding. + """ + should_subsplit = self.global_num_workers > 1 and ( + self.split_info.num_shards < self.global_num_workers or not self.is_training) + if should_subsplit: + # split the dataset w/o using sharding for more even examples / worker, can result in less optimal + # read patterns for distributed training (overlap across shards) so better to use InputContext there + if has_buggy_even_splits: + # my even_split workaround doesn't work on subsplits, upgrade tfds! + if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo): + subsplits = even_split_indices(self.split, self.global_num_workers, self.num_examples) + self.subsplit = subsplits[global_worker_id] + else: + subsplits = tfds.even_splits(self.split, self.global_num_workers) + self.subsplit = subsplits[global_worker_id] + + input_context = None + if self.global_num_workers > 1 and self.subsplit is None: + # set input context to divide shards among distributed replicas + input_context = tf.distribute.InputContext( + num_input_pipelines=self.global_num_workers, + input_pipeline_id=global_worker_id, + num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact? + ) + read_config = tfds.ReadConfig( + shuffle_seed=self.common_seed, + shuffle_reshuffle_each_iteration=True, + input_context=input_context) + ds = self.builder.as_dataset( + split=self.subsplit or self.split, shuffle_files=self.is_training, read_config=read_config) + # avoid overloading threading w/ combo of TF ds threads + PyTorch workers + options = tf.data.Options() + thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading' + getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // num_workers) + getattr(options, thread_member).max_intra_op_parallelism = 1 + ds = ds.with_options(options) + if self.is_training or self.repeats > 1: + # to prevent excessive drop_last batch behaviour w/ IterableDatasets + # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading + ds = ds.repeat() # allow wrap around and break iteration manually + if self.is_training: + ds = ds.shuffle(min(self.num_examples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed) + ds = ds.prefetch(min(self.num_examples // self.global_num_workers, self.prefetch_size)) + self.ds = tfds.as_numpy(ds) + + def __iter__(self): + if self.ds is None: + self._lazy_init() + + # Compute a rounded up sample count that is used to: + # 1. make batches even cross workers & replicas in distributed validation. + # This adds extra examples and will slightly alter validation results. + # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size + # batches are produced (underlying tfds iter wraps around) + target_example_count = math.ceil(max(1, self.repeats) * self.num_examples / self.global_num_workers) + if self.is_training: + # round up to nearest batch_size per worker-replica + target_example_count = math.ceil(target_example_count / self.batch_size) * self.batch_size + + # Iterate until exhausted or sample count hits target when training (ds.repeat enabled) + example_count = 0 + for example in self.ds: + input_data = example[self.input_name] + if self.input_image: + input_data = Image.fromarray(input_data, mode=self.input_image) + target_data = example[self.target_name] + if self.target_image: + target_data = Image.fromarray(target_data, mode=self.target_image) + yield input_data, target_data + example_count += 1 + if self.is_training and example_count >= target_example_count: + # Need to break out of loop when repeat() is enabled for training w/ oversampling + # this results in extra examples per epoch but seems more desirable than dropping + # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes) + break + + # Pad across distributed nodes (make counts equal by adding examples) + if not self.is_training and self.dist_num_replicas > 1 and self.subsplit is not None and \ + 0 < example_count < target_example_count: + # Validation batch padding only done for distributed training where results are reduced across nodes. + # For single process case, it won't matter if workers return different batch sizes. + # If using input_context or % based splits, sample count can vary significantly across workers and this + # approach should not be used (hence disabled if self.subsplit isn't set). + while example_count < target_example_count: + yield input_data, target_data # yield prev sample again + example_count += 1 + + def __len__(self): + # this is just an estimate and does not factor in extra examples added to pad batches based on + # complete worker & replica info (not available until init in dataloader). + return math.ceil(max(1, self.repeats) * self.num_examples / self.dist_num_replicas) + + def _filename(self, index, basename=False, absolute=False): + assert False, "Not supported" # no random access to examples + + def filenames(self, basename=False, absolute=False): + """ Return all filenames in dataset, overrides base""" + if self.ds is None: + self._lazy_init() + names = [] + for sample in self.ds: + if len(names) > self.num_examples: + break # safety for ds.repeat() case + if 'file_name' in sample: + name = sample['file_name'] + elif 'filename' in sample: + name = sample['filename'] + elif 'id' in sample: + name = sample['id'] + else: + assert False, "No supported name field present" + names.append(name) + return names diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/random_erasing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/random_erasing.py new file mode 100644 index 0000000000000000000000000000000000000000..98108488da5392787d6502e2d21487259fe8c5e3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/random_erasing.py @@ -0,0 +1,103 @@ +""" Random Erasing (Cutout) + +Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 +Copyright Zhun Zhong & Liang Zheng + +Hacked together by / Copyright 2019, Ross Wightman +""" +import random +import math +import torch + + +def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, + mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + self.mode = mode.lower() + self.rand_color = False + self.per_pixel = False + if self.mode == 'rand': + self.rand_color = True # per block random normal + elif self.mode == 'pixel': + self.per_pixel = True # per pixel random normal + else: + assert not self.mode or self.mode == 'const' + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = self.min_count if self.min_count == self.max_count else \ + random.randint(self.min_count, self.max_count) + for _ in range(count): + for attempt in range(10): + target_area = random.uniform(self.min_area, self.max_area) * area / count + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top + h, left:left + w] = _get_pixels( + self.per_pixel, self.rand_color, (chan, h, w), + dtype=dtype, device=self.device) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input + + def __repr__(self): + # NOTE simplified state for repr + fs = self.__class__.__name__ + f'(p={self.probability}, mode={self.mode}' + fs += f', count=({self.min_count}, {self.max_count}))' + return fs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/real_labels.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/real_labels.py new file mode 100644 index 0000000000000000000000000000000000000000..939c34867e7915ce3e4cc7da04a5bc1653ec4f2c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/real_labels.py @@ -0,0 +1,42 @@ +""" Real labels evaluator for ImageNet +Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159 +Based on Numpy example at https://github.com/google-research/reassessed-imagenet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import json +import numpy as np + + +class RealLabelsImagenet: + + def __init__(self, filenames, real_json='real.json', topk=(1, 5)): + with open(real_json) as real_labels: + real_labels = json.load(real_labels) + real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)} + self.real_labels = real_labels + self.filenames = filenames + assert len(self.filenames) == len(self.real_labels) + self.topk = topk + self.is_correct = {k: [] for k in topk} + self.sample_idx = 0 + + def add_result(self, output): + maxk = max(self.topk) + _, pred_batch = output.topk(maxk, 1, True, True) + pred_batch = pred_batch.cpu().numpy() + for pred in pred_batch: + filename = self.filenames[self.sample_idx] + filename = os.path.basename(filename) + if self.real_labels[filename]: + for k in self.topk: + self.is_correct[k].append( + any([p in self.real_labels[filename] for p in pred[:k]])) + self.sample_idx += 1 + + def get_accuracy(self, k=None): + if k is None: + return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk} + else: + return float(np.mean(self.is_correct[k])) * 100 diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/tf_preprocessing.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/tf_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..44b4a3af7372c6865b1cdddda0a8da0ccc6b93a0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/tf_preprocessing.py @@ -0,0 +1,232 @@ +""" Tensorflow Preprocessing Adapter + +Allows use of Tensorflow preprocessing pipeline in PyTorch Transform + +Copyright of original Tensorflow code below. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ImageNet preprocessing for MnasNet.""" +import tensorflow as tf +import numpy as np + +IMAGE_SIZE = 224 +CROP_PADDING = 32 + + +def distorted_bounding_box_crop(image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image_bytes: `Tensor` of binary image data. + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + cropped image `Tensor` + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): + shape = tf.image.extract_jpeg_shape(image_bytes) + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + shape, + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + + return image + + +def _at_least_x_are_equal(a, b, x): + """At least `x` of `a` and `b` `Tensors` are equal.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _decode_and_random_crop(image_bytes, image_size, resize_method): + """Make a random crop of image_size.""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + image = distorted_bounding_box_crop( + image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=10, + scope=None) + original_shape = tf.image.extract_jpeg_shape(image_bytes) + bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) + + image = tf.cond( + bad, + lambda: _decode_and_center_crop(image_bytes, image_size), + lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) + + return image + + +def _decode_and_center_crop(image_bytes, image_size, resize_method): + """Crops to center of image with padding then scales image_size.""" + shape = tf.image.extract_jpeg_shape(image_bytes) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + ((image_size / (image_size + CROP_PADDING)) * + tf.cast(tf.minimum(image_height, image_width), tf.float32)), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + image = tf.image.resize([image], [image_size, image_size], resize_method)[0] + + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_random_crop(image_bytes, image_size, resize_method) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_center_crop(image_bytes, image_size, resize_method) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_image(image_bytes, + is_training=False, + use_bfloat16=False, + image_size=IMAGE_SIZE, + interpolation='bicubic'): + """Preprocesses the given image. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + is_training: `bool` for whether the preprocessing is for training. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor` with value range of [0, 255]. + """ + if is_training: + return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) + else: + return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) + + +class TfPreprocessTransform: + + def __init__(self, is_training=False, size=224, interpolation='bicubic'): + self.is_training = is_training + self.size = size[0] if isinstance(size, tuple) else size + self.interpolation = interpolation + self._image_bytes = None + self.process_image = self._build_tf_graph() + self.sess = None + + def _build_tf_graph(self): + with tf.device('/cpu:0'): + self._image_bytes = tf.placeholder( + shape=[], + dtype=tf.string, + ) + img = preprocess_image( + self._image_bytes, self.is_training, False, self.size, self.interpolation) + return img + + def __call__(self, image_bytes): + if self.sess is None: + self.sess = tf.Session() + img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) + img = img.round().clip(0, 255).astype(np.uint8) + if img.ndim < 3: + img = np.expand_dims(img, axis=-1) + img = np.rollaxis(img, 2) # HWC to CHW + return img diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/transforms.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb3bc32768f8c153233dc5bf7aa19dff9a80d39 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/transforms.py @@ -0,0 +1,197 @@ +import torch +import torchvision.transforms.functional as F +try: + from torchvision.transforms.functional import InterpolationMode + has_interpolation_mode = True +except ImportError: + has_interpolation_mode = False +from PIL import Image +import warnings +import math +import random +import numpy as np + + +class ToNumpy: + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img + + +class ToTensor: + + def __init__(self, dtype=torch.float32): + self.dtype = dtype + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return torch.from_numpy(np_img).to(dtype=self.dtype) + + +# Pillow is deprecating the top-level resampling attributes (e.g., Image.BILINEAR) in +# favor of the Image.Resampling enum. The top-level resampling attributes will be +# removed in Pillow 10. +if hasattr(Image, "Resampling"): + _pil_interpolation_to_str = { + Image.Resampling.NEAREST: 'nearest', + Image.Resampling.BILINEAR: 'bilinear', + Image.Resampling.BICUBIC: 'bicubic', + Image.Resampling.BOX: 'box', + Image.Resampling.HAMMING: 'hamming', + Image.Resampling.LANCZOS: 'lanczos', + } +else: + _pil_interpolation_to_str = { + Image.NEAREST: 'nearest', + Image.BILINEAR: 'bilinear', + Image.BICUBIC: 'bicubic', + Image.BOX: 'box', + Image.HAMMING: 'hamming', + Image.LANCZOS: 'lanczos', + } + +_str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()} + + +if has_interpolation_mode: + _torch_interpolation_to_str = { + InterpolationMode.NEAREST: 'nearest', + InterpolationMode.BILINEAR: 'bilinear', + InterpolationMode.BICUBIC: 'bicubic', + InterpolationMode.BOX: 'box', + InterpolationMode.HAMMING: 'hamming', + InterpolationMode.LANCZOS: 'lanczos', + } + _str_to_torch_interpolation = {b: a for a, b in _torch_interpolation_to_str.items()} +else: + _pil_interpolation_to_torch = {} + _torch_interpolation_to_str = {} + + +def str_to_pil_interp(mode_str): + return _str_to_pil_interpolation[mode_str] + + +def str_to_interp_mode(mode_str): + if has_interpolation_mode: + return _str_to_torch_interpolation[mode_str] + else: + return _str_to_pil_interpolation[mode_str] + + +def interp_mode_to_str(mode): + if has_interpolation_mode: + return _torch_interpolation_to_str[mode] + else: + return _pil_interpolation_to_str[mode] + + +_RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic')) + + +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), + interpolation='bilinear'): + if isinstance(size, (list, tuple)): + self.size = tuple(size) + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = str_to_interp_mode(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation]) + else: + interpolate_str = interp_mode_to_str(self.interpolation) + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/transforms_factory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/transforms_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..70f05dbf8393d94f41999cfa599b3e4bdf80f8e6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/data/transforms_factory.py @@ -0,0 +1,236 @@ +""" Transforms Factory +Factory methods for building image transforms for use with TIMM (PyTorch Image Models) + +Hacked together by / Copyright 2019, Ross Wightman +""" +import math + +import torch +from torchvision import transforms + +from custom_timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from custom_timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform +from custom_timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, ToNumpy +from custom_timm.data.random_erasing import RandomErasing + + +def transforms_noaug_train( + img_size=224, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, +): + if interpolation == 'random': + # random interpolation not supported with no-aug + interpolation = 'bilinear' + tfl = [ + transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)), + transforms.CenterCrop(img_size) + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + return transforms.Compose(tfl) + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='random', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)] + if hflip > 0.: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, (tuple, list)): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != 'random': + aa_params['interpolation'] = str_to_pil_interp(interpolation) + if auto_augment.startswith('rand'): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith('augmix'): + aa_params['translate_pct'] = 0.3 + secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] + else: + secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + final_tfl += [ToNumpy()] + else: + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + if re_prob > 0.: + final_tfl.append( + RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) + + if separate: + return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + + +def transforms_imagenet_eval( + img_size=224, + crop_pct=None, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD): + crop_pct = crop_pct or DEFAULT_CROP_PCT + + if isinstance(img_size, (tuple, list)): + assert len(img_size) == 2 + if img_size[-1] == img_size[-2]: + # fall-back to older behaviour so Resize scales to shortest edge if target is square + scale_size = int(math.floor(img_size[0] / crop_pct)) + else: + scale_size = tuple([int(x / crop_pct) for x in img_size]) + else: + scale_size = int(math.floor(img_size / crop_pct)) + + tfl = [ + transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)), + transforms.CenterCrop(img_size), + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + + return transforms.Compose(tfl) + + +def create_transform( + input_size, + is_training=False, + use_prefetcher=False, + no_aug=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + crop_pct=None, + tf_preprocessing=False, + separate=False): + + if isinstance(input_size, (tuple, list)): + img_size = input_size[-2:] + else: + img_size = input_size + + if tf_preprocessing and use_prefetcher: + assert not separate, "Separate transforms not supported for TF preprocessing" + from custom_timm.data.tf_preprocessing import TfPreprocessTransform + transform = TfPreprocessTransform( + is_training=is_training, size=img_size, interpolation=interpolation) + else: + if is_training and no_aug: + assert not separate, "Cannot perform split augmentation with no_aug" + transform = transforms_noaug_train( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std) + elif is_training: + transform = transforms_imagenet_train( + img_size, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=separate) + else: + assert not separate, "Separate transforms not supported for validation preprocessing" + transform = transforms_imagenet_eval( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + crop_pct=crop_pct) + + return transform diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7f15f2f79673c962f68d6d4b06898e73ac1df6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/__init__.py @@ -0,0 +1,4 @@ +from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel +from .binary_cross_entropy import BinaryCrossEntropy +from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from .jsd import JsdCrossEntropy diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/asymmetric_loss.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/asymmetric_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b10f9c797c2cb3b2652302717b592dada216f3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/asymmetric_loss.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn + + +class AsymmetricLossMultiLabel(nn.Module): + def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): + super(AsymmetricLossMultiLabel, self).__init__() + + self.gamma_neg = gamma_neg + self.gamma_pos = gamma_pos + self.clip = clip + self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss + self.eps = eps + + def forward(self, x, y): + """" + Parameters + ---------- + x: input logits + y: targets (multi-label binarized vector) + """ + + # Calculating Probabilities + x_sigmoid = torch.sigmoid(x) + xs_pos = x_sigmoid + xs_neg = 1 - x_sigmoid + + # Asymmetric Clipping + if self.clip is not None and self.clip > 0: + xs_neg = (xs_neg + self.clip).clamp(max=1) + + # Basic CE calculation + los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) + los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) + loss = los_pos + los_neg + + # Asymmetric Focusing + if self.gamma_neg > 0 or self.gamma_pos > 0: + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(False) + pt0 = xs_pos * y + pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p + pt = pt0 + pt1 + one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) + one_sided_w = torch.pow(1 - pt, one_sided_gamma) + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(True) + loss *= one_sided_w + + return -loss.sum() + + +class AsymmetricLossSingleLabel(nn.Module): + def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): + super(AsymmetricLossSingleLabel, self).__init__() + + self.eps = eps + self.logsoftmax = nn.LogSoftmax(dim=-1) + self.targets_classes = [] # prevent gpu repeated memory allocation + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.reduction = reduction + + def forward(self, inputs, target, reduction=None): + """" + Parameters + ---------- + x: input logits + y: targets (1-hot vector) + """ + + num_classes = inputs.size()[-1] + log_preds = self.logsoftmax(inputs) + self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) + + # ASL weights + targets = self.targets_classes + anti_targets = 1 - targets + xs_pos = torch.exp(log_preds) + xs_neg = 1 - xs_pos + xs_pos = xs_pos * targets + xs_neg = xs_neg * anti_targets + asymmetric_w = torch.pow(1 - xs_pos - xs_neg, + self.gamma_pos * targets + self.gamma_neg * anti_targets) + log_preds = log_preds * asymmetric_w + + if self.eps > 0: # label smoothing + self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes) + + # loss calculation + loss = - self.targets_classes.mul(log_preds) + + loss = loss.sum(dim=-1) + if self.reduction == 'mean': + loss = loss.mean() + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/binary_cross_entropy.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/binary_cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..ed76c1e8e004ca9a7255cf3650e322e6525c0577 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/binary_cross_entropy.py @@ -0,0 +1,47 @@ +""" Binary Cross Entropy w/ a few extras + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BinaryCrossEntropy(nn.Module): + """ BCE with optional one-hot from dense targets, label smoothing, thresholding + NOTE for experiments comparing CE to BCE /w label smoothing, may remove + """ + def __init__( + self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None, + reduction: str = 'mean', pos_weight: Optional[torch.Tensor] = None): + super(BinaryCrossEntropy, self).__init__() + assert 0. <= smoothing < 1.0 + self.smoothing = smoothing + self.target_threshold = target_threshold + self.reduction = reduction + self.register_buffer('weight', weight) + self.register_buffer('pos_weight', pos_weight) + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + assert x.shape[0] == target.shape[0] + if target.shape != x.shape: + # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse + num_classes = x.shape[-1] + # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ + off_value = self.smoothing / num_classes + on_value = 1. - self.smoothing + off_value + target = target.long().view(-1, 1) + target = torch.full( + (target.size()[0], num_classes), + off_value, + device=x.device, dtype=x.dtype).scatter_(1, target, on_value) + if self.target_threshold is not None: + # Make target 0, or 1 if threshold set + target = target.gt(self.target_threshold).to(dtype=target.dtype) + return F.binary_cross_entropy_with_logits( + x, target, + self.weight, + pos_weight=self.pos_weight, + reduction=self.reduction) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/cross_entropy.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..85198107f3ad2a1ff775a677d77c03569ff5d04d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/cross_entropy.py @@ -0,0 +1,36 @@ +""" Cross Entropy w/ smoothing or soft targets + +Hacked together by / Copyright 2021 Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LabelSmoothingCrossEntropy(nn.Module): + """ NLL loss with label smoothing. + """ + def __init__(self, smoothing=0.1): + super(LabelSmoothingCrossEntropy, self).__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + logprobs = F.log_softmax(x, dim=-1) + nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) + nll_loss = nll_loss.squeeze(1) + smooth_loss = -logprobs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + return loss.mean() + + +class SoftTargetCrossEntropy(nn.Module): + + def __init__(self): + super(SoftTargetCrossEntropy, self).__init__() + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) + return loss.mean() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/jsd.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/jsd.py new file mode 100644 index 0000000000000000000000000000000000000000..dd64e156c23d27aa03817a587ae367e8175fc126 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/loss/jsd.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .cross_entropy import LabelSmoothingCrossEntropy + + +class JsdCrossEntropy(nn.Module): + """ Jensen-Shannon Divergence + Cross-Entropy Loss + + Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + + Hacked together by / Copyright 2020 Ross Wightman + """ + def __init__(self, num_splits=3, alpha=12, smoothing=0.1): + super().__init__() + self.num_splits = num_splits + self.alpha = alpha + if smoothing is not None and smoothing > 0: + self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) + else: + self.cross_entropy_loss = torch.nn.CrossEntropyLoss() + + def __call__(self, output, target): + split_size = output.shape[0] // self.num_splits + assert split_size * self.num_splits == output.shape[0] + logits_split = torch.split(output, split_size) + + # Cross-entropy is only computed on clean images + loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) + probs = [F.softmax(logits, dim=1) for logits in logits_split] + + # Clamp mixture distribution to avoid exploding KL divergence + logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() + loss += self.alpha * sum([F.kl_div( + logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff79595d83197ecfb9a164ae9b9125ec3804863 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/__init__.py @@ -0,0 +1,74 @@ +from .beit import * +from .byoanet import * +from .byobnet import * +from .cait import * +from .coat import * +from .convit import * +from .convmixer import * +from .convnext import * +from .crossvit import * +from .cspnet import * +from .deit import * +from .densenet import * +from .dla import * +from .dpn import * +from .edgenext import * +from .efficientformer import * +from .efficientnet import * +from .gcvit import * +from .ghostnet import * +from .gluon_resnet import * +from .gluon_xception import * +from .hardcorenas import * +from .hrnet import * +from .inception_resnet_v2 import * +from .inception_v3 import * +from .inception_v4 import * +from .levit import * +from .maxxvit import * +from .mlp_mixer import * +from .mobilenetv3 import * +from .mobilevit import * +from .mvitv2 import * +from .nasnet import * +from .nest import * +from .nfnet import * +from .pit import * +from .pnasnet import * +from .poolformer import * +from .pvt_v2 import * +from .regnet import * +from .res2net import * +from .resnest import * +from .resnet import * +from .resnetv2 import * +from .rexnet import * +from .selecsls import * +from .senet import * +from .sequencer import * +from .sknet import * +from .swin_transformer import * +from .swin_transformer_v2 import * +from .swin_transformer_v2_cr import * +from .tnt import * +from .tresnet import * +from .twins import * +from .vgg import * +from .visformer import * +from .vision_transformer import * +from .vision_transformer_hybrid import * +from .vision_transformer_relpos import * +from .volo import * +from .vovnet import * +from .xception import * +from .xception_aligned import * +from .xcit import * + +from .factory import create_model, parse_model_name, safe_model_name +from .helpers import load_checkpoint, resume_checkpoint, model_parameters +from .layers import TestTimePoolHead, apply_test_time_pool +from .layers import convert_splitbn_model, convert_sync_batchnorm +from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit +from .layers import set_fast_norm +from .registry import register_model, model_entrypoint, list_models, is_model, list_modules, is_model_in_modules,\ + is_model_pretrained, get_pretrained_cfg, has_pretrained_cfg_key, is_pretrained_cfg_key, get_pretrained_cfg_value diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/beit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/beit.py new file mode 100644 index 0000000000000000000000000000000000000000..2f81b008ebfc372aef4c211babc95be32c910629 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/beit.py @@ -0,0 +1,502 @@ +""" BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) + +Model from official source: https://github.com/microsoft/unilm/tree/master/beit +and +https://github.com/microsoft/unilm/tree/master/beit2 + +@inproceedings{beit, +title={{BEiT}: {BERT} Pre-Training of Image Transformers}, +author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei}, +booktitle={International Conference on Learning Representations}, +year={2022}, +url={https://openreview.net/forum?id=p-BhZSz59o4} +} + +@article{beitv2, +title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers}, +author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei}, +year={2022}, +eprint={2208.06366}, +archivePrefix={arXiv}, +primaryClass={cs.CV} +} + +At this point only the 1k fine-tuned classification weights and model configs have been added, +see original source above for pre-training models and procedure. + +Modifications by / Copyright 2021 Ross Wightman, original copyrights below +""" +# -------------------------------------------------------- +# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) +# Github source: https://github.com/microsoft/unilm/tree/master/beit +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# By Hangbo Bao +# Based on timm and DeiT code bases +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'beit_base_patch16_224': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth'), + 'beit_base_patch16_384': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_base_patch16_224_in22k': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth', + num_classes=21841, + ), + 'beit_large_patch16_224': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth'), + 'beit_large_patch16_384': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_large_patch16_512': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth', + input_size=(3, 512, 512), crop_pct=1.0, + ), + 'beit_large_patch16_224_in22k': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth', + num_classes=21841, + ), + + 'beitv2_base_patch16_224': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_base_patch16_224_in22k': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth', + num_classes=21841, + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_large_patch16_224': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth', + crop_pct=0.95, + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), + 'beitv2_large_patch16_224_in22k': _cfg( + url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth', + num_classes=21841, + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD + ), +} + + +def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor: + num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + # cls to token & token 2 cls & cls to cls + # get pair-wise relative position index for each token inside the window + window_area = window_size[0] * window_size[1] + coords = torch.stack(torch.meshgrid( + [torch.arange(window_size[0]), + torch.arange(window_size[1])])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = num_relative_distance - 3 + relative_position_index[0:, 0] = num_relative_distance - 2 + relative_position_index[0, 0] = num_relative_distance - 1 + return relative_position_index + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + self.register_buffer("relative_position_index", gen_relative_position_index(window_size)) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def _get_rel_pos_bias(self): + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + + def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None): + B, N, C = x.shape + + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + attn = attn + self._get_rel_pos_bias() + if shared_rel_pos_bias is not None: + attn = attn + shared_rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + window_size=window_size, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values: + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.window_area = window_size[0] * window_size[1] + num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) + # trunc_normal_(self.relative_position_bias_table, std=.02) + self.register_buffer("relative_position_index", gen_relative_position_index(window_size)) + + def forward(self): + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class Beit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='avg', + embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., + attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), + init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + head_init_scale=0.001): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.grad_checkpointing = False + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.grid_size, num_heads=num_heads) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.grid_size if use_rel_pos_bias else None) + for i in range(depth)]) + use_fc_norm = self.global_pool == 'avg' + self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_fc_norm else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) + self.fix_init_weight() + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(head_init_scale) + self.head.bias.data.mul_(head_init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + nwd = {'pos_embed', 'cls_token'} + for n, _ in self.named_parameters(): + if 'relative_position_bias_table' in n: + nwd.add(n) + return nwd + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], + ) + return matcher + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias) + else: + x = blk(x, shared_rel_pos_bias=rel_pos_bias) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.fc_norm is not None: + x = x[:, 1:].mean(dim=1) + x = self.fc_norm(x) + else: + x = x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _beit_checkpoint_filter_fn(state_dict, model): + if 'module' in state_dict: + # beit v2 didn't strip module + state_dict = state_dict['module'] + return checkpoint_filter_fn(state_dict, model) + + +def _create_beit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Beit models.') + + model = build_model_with_cfg( + Beit, variant, pretrained, + # FIXME an updated filter fn needed to interpolate rel pos emb if fine tuning to diff model sizes + pretrained_filter_fn=_beit_checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def beit_base_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_512(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beitv2_base_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beitv2_base_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beitv2_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beitv2_large_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beitv2_large_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beitv2_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/byoanet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/byoanet.py new file mode 100644 index 0000000000000000000000000000000000000000..34a557be90fc1af1ed858a08feb1987ed2281dac --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/byoanet.py @@ -0,0 +1,442 @@ +""" Bring-Your-Own-Attention Network + +A flexible network w/ dataclass based config for stacking NN blocks including +self-attention (or similar) layers. + +Currently used to implement experimental variants of: + * Bottleneck Transformers + * Lambda ResNets + * HaloNets + +Consider all of the models definitions here as experimental WIP and likely to change. + +Hacked together by / copyright Ross Wightman, 2021. +""" +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks +from .helpers import build_model_with_cfg +from .registry import register_model + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, 'min_input_size': (3, 224, 224), + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'botnet26t_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'sebotnet33ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'botnet50ts_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_botnext26ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + + 'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'halonet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'sehalonet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'halonet50ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'eca_halonext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + + 'lambda_resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'lambda_resnet50ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), + 'lambda_resnet26rpt_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + + 'haloregnetz_b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94), + + 'lamhalobotnet50ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'halo2botnet50ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), +} + + +model_cfgs = dict( + + botnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + sebotnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + eca_botnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + act_layer='silu', + attn_layer='eca', + self_attn_layer='bottleneck', + self_attn_kwargs=dict(dim_head=16) + ), + + halonet_h1=ByoModelCfg( + blocks=( + ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), + ), + stem_chs=64, + stem_type='7x7', + stem_pool='maxpool', + + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3), + ), + halonet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2) + ), + sehalonet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + halonet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + eca_halonext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + + lambda_resnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet26rpt_256=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=None) + ), + + # experimental + haloregnetz_b=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33) + ), + + # experimental + lamhalobotnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='lambda', self_attn_kwargs=dict(r=13)), + interleave_blocks( + types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, + self_attn_layer='bottleneck', self_attn_kwargs=dict()), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + ), + halo2botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, + self_attn_layer='bottleneck', self_attn_kwargs=dict()), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + ), +) + + +def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def botnet26t_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sebotnet33ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, + """ + return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def botnet50ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_botnext26ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet_h1(pretrained=False, **kwargs): + """ HaloNet-H1. Halo attention in all stages as per the paper. + NOTE: This runs very slowly! + """ + return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) + + +@register_model +def halonet26t(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages + """ + return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sehalonet33ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. + """ + return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet50ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_halonext26ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26t(pretrained=False, **kwargs): + """ Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet50ts(pretrained=False, **kwargs): + """ Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26rpt_256(pretrained=False, **kwargs): + """ Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) + + +@register_model +def haloregnetz_b(pretrained=False, **kwargs): + """ Halo + RegNetZ + """ + return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs) + + +@register_model +def lamhalobotnet50ts_256(pretrained=False, **kwargs): + """ Combo Attention (Lambda + Halo + Bot) Network + """ + return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def halo2botnet50ts_256(pretrained=False, **kwargs): + """ Combo Attention (Halo + Halo + Bot) Network + """ + return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/byobnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/byobnet.py new file mode 100644 index 0000000000000000000000000000000000000000..71b6dd446af4d779012a6ea149fb7862b2ff3e27 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/byobnet.py @@ -0,0 +1,1587 @@ +""" Bring-Your-Own-Blocks Network + +A flexible network w/ dataclass based config for stacking those NN blocks. + +This model is currently used to implement the following networks: + +GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). +Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 + +RepVGG - repvgg_* +Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT + +In all cases the models have been modified to fit within the design of ByobNet. I've remapped +the original weights and verified accuracies. + +For GPU Efficient nets, I used the original names for the blocks since they were for the most part +the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some +changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. + +A significant number of different network archs can be implemented here, including variants of the +above nets that include attention. + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field, replace +from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply, checkpoint_seq +from .layers import ClassifierHead, ConvNormAct, BatchNormAct2d, DropPath, AvgPool2dSame, \ + create_conv2d, get_act_layer, get_norm_act_layer, get_attn, make_divisible, to_2tuple, EvoNorm2dS0, EvoNorm2dS0a,\ + EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a, FilterResponseNormAct2d, FilterResponseNormTlu2d +from .registry import register_model + +__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +def _cfgr(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'gernet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'), + 'gernet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'), + 'gernet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # RepVGG weights + 'repvgg_a2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + + # experimental configs + 'resnet51q': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', + first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 288, 288), crop_pct=1.0), + 'resnet61q': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', + test_input_size=(3, 288, 288), crop_pct=1.0), + + 'resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth'), + 'gcresnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth'), + 'seresnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth'), + 'eca_resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth'), + 'bat_resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', + min_input_size=(3, 256, 256)), + + 'resnet32ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth'), + 'resnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth'), + 'gcresnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth'), + 'seresnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth'), + 'eca_resnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth'), + + 'gcresnet50t': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth'), + + 'gcresnext50ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth'), + + # experimental models, likely to change ot be removed + 'regnetz_b16': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), pool_size=(7, 7), test_input_size=(3, 288, 288), first_conv='stem.conv', crop_pct=0.94), + 'regnetz_c16': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), first_conv='stem.conv', crop_pct=0.94), + 'regnetz_d32': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=0.95), + 'regnetz_d8': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=1.0), + 'regnetz_e8': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=1.0), + + 'regnetz_b16_evos': _cfgr( + url='', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), pool_size=(7, 7), test_input_size=(3, 288, 288), first_conv='stem.conv', + crop_pct=0.94), + 'regnetz_c16_evos': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_c16_evos_ch-d8311942.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), first_conv='stem.conv', crop_pct=0.95), + 'regnetz_d8_evos': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_d8_evos_ch-2bc12646.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=0.95), +} + + +@dataclass +class ByoBlockCfg: + type: Union[str, nn.Module] + d: int # block depth (number of block repeats in stage) + c: int # number of output channels for each block in stage + s: int = 2 # stride of stage (first block) + gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 + br: float = 1. # bottleneck-ratio of blocks in stage + + # NOTE: these config items override the model cfgs that are applied to all blocks by default + attn_layer: Optional[str] = None + attn_kwargs: Optional[Dict[str, Any]] = None + self_attn_layer: Optional[str] = None + self_attn_kwargs: Optional[Dict[str, Any]] = None + block_kwargs: Optional[Dict[str, Any]] = None + + +@dataclass +class ByoModelCfg: + blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] + downsample: str = 'conv1x1' + stem_type: str = '3x3' + stem_pool: Optional[str] = 'maxpool' + stem_chs: int = 32 + width_factor: float = 1.0 + num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation + + act_layer: str = 'relu' + norm_layer: str = 'batchnorm' + + # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there + attn_layer: Optional[str] = None + attn_kwargs: dict = field(default_factory=lambda: dict()) + self_attn_layer: Optional[str] = None + self_attn_kwargs: dict = field(default_factory=lambda: dict()) + block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) + + +def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): + c = (64, 128, 256, 512) + group_size = 0 + if groups > 0: + group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 + bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) + return bcfg + + +def interleave_blocks( + types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs +) -> Tuple[ByoBlockCfg]: + """ interleave 2 block types in stack + """ + assert len(types) == 2 + if isinstance(every, int): + every = list(range(0 if first else every, d, every + 1)) + if not every: + every = [d - 1] + set(every) + blocks = [] + for i in range(d): + block_type = types[1] if i in every else types[0] + blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] + return tuple(blocks) + + +model_cfgs = dict( + gernet_l=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_m=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_s=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), + ), + stem_chs=13, + stem_pool=None, + num_features=1920, + ), + + repvgg_a2=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b0=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + + # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks + # DW convs in last block, 2048 pre-FC, silu act + resnet51q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad2', + stem_pool=None, + num_features=2048, + act_layer='silu', + ), + + # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks + # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act + resnet61q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad', + stem_pool=None, + num_features=2048, + act_layer='silu', + block_kwargs=dict(extra_conv=True), + ), + + # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, + # and a tiered stem w/ maxpool + resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + ), + gcresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='gca', + ), + seresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='se', + ), + eca_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + ), + bat_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='bat', + attn_kwargs=dict(block_size=8) + ), + + # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool + resnet32ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=0, + act_layer='silu', + ), + + # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool + resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + ), + + # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat + # and a tiered stem w/ no maxpool + gcresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='gca', + ), + seresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='se', + ), + eca_resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='eca', + ), + + gcresnet50t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + attn_layer='gca', + ), + + gcresnext50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + # stem_pool=None, + act_layer='silu', + attn_layer='gca', + ), + + # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW + regnetz_b16=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_c16=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d32=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d8=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_e8=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=2048, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + + # experimental EvoNorm configs + regnetz_b16_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + norm_layer=partial(EvoNorm2dS0a, group_size=16), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_c16_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + norm_layer=partial(EvoNorm2dS0a, group_size=16), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d8_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='deep', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + norm_layer=partial(EvoNorm2dS0a, group_size=16), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), +) + +@register_model +def gernet_l(pretrained=False, **kwargs): + """ GEResNet-Large (GENet-Large from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_m(pretrained=False, **kwargs): + """ GEResNet-Medium (GENet-Normal from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_s(pretrained=False, **kwargs): + """ EResNet-Small (GENet-Small from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a2(pretrained=False, **kwargs): + """ RepVGG-A2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b0(pretrained=False, **kwargs): + """ RepVGG-B0 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1(pretrained=False, **kwargs): + """ RepVGG-B1 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1g4(pretrained=False, **kwargs): + """ RepVGG-B1g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2(pretrained=False, **kwargs): + """ RepVGG-B2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2g4(pretrained=False, **kwargs): + """ RepVGG-B2g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3(pretrained=False, **kwargs): + """ RepVGG-B3 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3g4(pretrained=False, **kwargs): + """ RepVGG-B3g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) + + +@register_model +def resnet51q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) + + +@register_model +def resnet61q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) + + +@register_model +def resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def bat_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet32ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet50t(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext50ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_b16(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_c16(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d32(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_e8(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_b16_evos(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_b16_evos', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_c16_evos(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_c16_evos', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8_evos(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) + + +def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: + if not isinstance(stage_blocks_cfg, Sequence): + stage_blocks_cfg = (stage_blocks_cfg,) + block_cfgs = [] + for i, cfg in enumerate(stage_blocks_cfg): + block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] + return block_cfgs + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +@dataclass +class LayerFn: + conv_norm_act: Callable = ConvNormAct + norm_act: Callable = BatchNormAct2d + act: Callable = nn.ReLU + attn: Optional[Callable] = None + self_attn: Optional[Callable] = None + + +class DownsampleAvg(nn.Module): + def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None): + """ AvgPool Downsampling as in 'D' ResNet variants.""" + super(DownsampleAvg, self).__init__() + layers = layers or LayerFn() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) + + def forward(self, x): + return self.conv(self.pool(x)) + + +def create_shortcut(downsample_type, layers: LayerFn, in_chs, out_chs, stride, dilation, **kwargs): + assert downsample_type in ('avg', 'conv1x1', '') + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + if not downsample_type: + return None # no shortcut + elif downsample_type == 'avg': + return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) + else: + return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) + else: + return nn.Identity() # identity shortcut + + +class BasicBlock(nn.Module): + """ ResNet Basic Block - kxk + kxk + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(BasicBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_layer=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.conv2_kxk(x) + x = self.attn(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class BottleneckBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', attn_last=False, linear_out=False, extra_conv=False, bottle_in=False, + layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(BottleneckBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block) + if extra_conv: + self.conv2b_kxk = layers.conv_norm_act(mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups) + else: + self.conv2b_kxk = nn.Identity() + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.conv2b_kxk(x) + x = self.attn(x) + x = self.conv3_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class DarkBlock(nn.Module): + """ DarkNet-like (1x1 + 3x3 w/ stride) block + + The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. + This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet + uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). + + If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) + for more optimal compute. + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(DarkBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_layer=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.attn(x) + x = self.conv2_kxk(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class EdgeBlock(nn.Module): + """ EdgeResidual-like (3x3 + 1x1) block + + A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. + Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is + intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. + + FIXME is there a more common 3x3 + 1x1 conv block to name this after? + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None, + drop_block=None, drop_path_rate=0.): + super(EdgeBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_kxk = layers.conv_norm_act( + in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.attn(x) + x = self.conv2_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class RepVggBlock(nn.Module): + """ RepVGG Block. + + Adapted from impl at https://github.com/DingXiaoH/RepVGG + + This version does not currently support the deploy optimization. It is currently fixed in 'train' mode. + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(RepVggBlock, self).__init__() + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + + use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] + self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None + self.conv_kxk = layers.conv_norm_act( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_layer=drop_block, apply_act=False) + self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) + self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() + self.act = layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + # NOTE this init overrides that base model init with specific changes for the block type + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + nn.init.normal_(m.weight, .1, .1) + nn.init.normal_(m.bias, 0, .1) + if hasattr(self.attn, 'reset_parameters'): + self.attn.reset_parameters() + + def forward(self, x): + if self.identity is None: + x = self.conv_1x1(x) + self.conv_kxk(x) + else: + identity = self.identity(x) + x = self.conv_1x1(x) + self.conv_kxk(x) + x = self.drop_path(x) # not in the paper / official impl, experimental + x = x + identity + x = self.attn(x) # no attn in the paper / official impl, experimental + return self.act(x) + + +class SelfAttnBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', extra_conv=False, linear_out=False, bottle_in=False, post_attn_na=True, + feat_size=None, layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(SelfAttnBlock, self).__init__() + assert layers is not None + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + if extra_conv: + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_layer=drop_block) + stride = 1 # striding done via conv if enabled + else: + self.conv2_kxk = nn.Identity() + opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) + # FIXME need to dilate self attn to have dilated network support, moop moop + self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) + self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + if hasattr(self.self_attn, 'reset_parameters'): + self.self_attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.self_attn(x) + x = self.post_attn(x) + x = self.conv3_1x1(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + +_block_registry = dict( + basic=BasicBlock, + bottle=BottleneckBlock, + dark=DarkBlock, + edge=EdgeBlock, + rep=RepVggBlock, + self_attn=SelfAttnBlock, +) + + +def register_block(block_type:str, block_fn: nn.Module): + _block_registry[block_type] = block_fn + + +def create_block(block: Union[str, nn.Module], **kwargs): + if isinstance(block, (nn.Module, partial)): + return block(**kwargs) + assert block in _block_registry, f'Unknown block type ({block}' + return _block_registry[block](**kwargs) + + +class Stem(nn.Sequential): + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool', + num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None): + super().__init__() + assert stride in (2, 4) + layers = layers or LayerFn() + + if isinstance(out_chs, (list, tuple)): + num_rep = len(out_chs) + stem_chs = out_chs + else: + stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] + + self.stride = stride + self.feature_info = [] # track intermediate features + prev_feat = '' + stem_strides = [2] + [1] * (num_rep - 1) + if stride == 4 and not pool: + # set last conv in stack to be strided if stride == 4 and no pooling layer + stem_strides[-1] = 2 + + num_act = num_rep if num_act is None else num_act + # if num_act < num_rep, first convs in stack won't have bn + act + stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act + prev_chs = in_chs + curr_stride = 1 + for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): + layer_fn = layers.conv_norm_act if na else create_conv2d + conv_name = f'conv{i + 1}' + if i > 0 and s > 1: + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) + prev_chs = ch + curr_stride *= s + prev_feat = conv_name + + if pool and 'max' in pool.lower(): + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module('pool', nn.MaxPool2d(3, 2, 1)) + curr_stride *= 2 + prev_feat = 'pool' + + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + assert curr_stride == stride + + +def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None): + layers = layers or LayerFn() + assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3') + if 'quad' in stem_type: + # based on NFNet stem, stack of 4 3x3 convs + num_act = 2 if 'quad2' in stem_type else None + stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) + elif 'tiered' in stem_type: + # 3x3 stack of 3 convs as in my ResNet-T + stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) + elif 'deep' in stem_type: + # 3x3 stack of 3 convs as in ResNet-D + stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) + elif 'rep' in stem_type: + stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) + elif '7x7' in stem_type: + # 7x7 stem conv as in ResNet + if pool_type: + stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) + else: + # 3x3 stem conv as in RegNet is the default + if pool_type: + stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) + + if isinstance(stem, Stem): + feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] + else: + feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)] + return stem, feature_info + + +def reduce_feat_size(feat_size, stride=2): + return None if feat_size is None else tuple([s // stride for s in feat_size]) + + +def override_kwargs(block_kwargs, model_kwargs): + """ Override model level attn/self-attn/block kwargs w/ block level + + NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs + for the block if set to anything that isn't None. + + i.e. an empty block_kwargs dict will remove kwargs set at model level for that block + """ + out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs + return out_kwargs or {} # make sure None isn't returned + + +def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): + layer_fns = block_kwargs['layers'] + + # override attn layer / args with block local config + attn_set = block_cfg.attn_layer is not None + if attn_set or block_cfg.attn_kwargs is not None: + # override attn layer config + if attn_set and not block_cfg.attn_layer: + # empty string for attn_layer type will disable attn for this block + attn_layer = None + else: + attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) + attn_layer = block_cfg.attn_layer or model_cfg.attn_layer + attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None + layer_fns = replace(layer_fns, attn=attn_layer) + + # override self-attn layer / args with block local cfg + self_attn_set = block_cfg.self_attn_layer is not None + if self_attn_set or block_cfg.self_attn_kwargs is not None: + # override attn layer config + if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' + # empty string for self_attn_layer type will disable attn for this block + self_attn_layer = None + else: + self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) + self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer + self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ + if self_attn_layer is not None else None + layer_fns = replace(layer_fns, self_attn=self_attn_layer) + + block_kwargs['layers'] = layer_fns + + # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set + block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) + + +def create_byob_stages( + cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], + feat_size: Optional[int] = None, + layers: Optional[LayerFn] = None, + block_kwargs_fn: Optional[Callable] = update_block_kwargs): + + layers = layers or LayerFn() + feature_info = [] + block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] + depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + stages = [] + for stage_idx, stage_block_cfgs in enumerate(block_cfgs): + stride = stage_block_cfgs[0].s + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx, block_cfg in enumerate(stage_block_cfgs): + out_chs = make_divisible(block_cfg.c * cfg.width_factor) + group_size = block_cfg.gs + if isinstance(group_size, Callable): + group_size = group_size(out_chs, block_idx) + block_kwargs = dict( # Blocks used in this model must accept these arguments + in_chs=prev_chs, + out_chs=out_chs, + stride=stride if block_idx == 0 else 1, + dilation=(first_dilation, dilation), + group_size=group_size, + bottle_ratio=block_cfg.br, + downsample=cfg.downsample, + drop_path_rate=dpr[stage_idx][block_idx], + layers=layers, + ) + if block_cfg.type in ('self_attn',): + # add feat_size arg for blocks that support/need it + block_kwargs['feat_size'] = feat_size + block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) + blocks += [create_block(block_cfg.type, **block_kwargs)] + first_dilation = dilation + prev_chs = out_chs + if stride > 1 and block_idx == 0: + feat_size = reduce_feat_size(feat_size, stride) + + stages += [nn.Sequential(*blocks)] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info + + +def get_layer_fns(cfg: ByoModelCfg): + act = get_act_layer(cfg.act_layer) + norm_act = get_norm_act_layer(norm_layer=cfg.norm_layer, act_layer=act) + conv_norm_act = partial(ConvNormAct, norm_layer=cfg.norm_layer, act_layer=act) + attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None + layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) + return layer_fn + + +class ByobNet(nn.Module): + """ 'Bring-your-own-blocks' Net + + A flexible network backbone that allows building model stem + blocks via + dataclass cfg definition w/ factory functions for module instantiation. + + Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). + """ + def __init__( + self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + layers = get_layer_fns(cfg) + if cfg.fixed_input_size: + assert img_size is not None, 'img_size argument is required for fixed input size model' + feat_size = to_2tuple(img_size) if img_size is not None else None + + self.feature_info = [] + stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) + self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers) + self.feature_info.extend(stem_feat[:-1]) + feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) + + self.stages, stage_feat = create_byob_stages( + cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size) + self.feature_info.extend(stage_feat[:-1]) + + prev_chs = stage_feat[-1]['num_chs'] + if cfg.num_features: + self.num_features = int(round(cfg.width_factor * cfg.num_features)) + self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1) + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.feature_info += [ + dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')] + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + # init weights + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=[ + (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), + (r'^final_conv', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.final_conv(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights(zero_init_last=zero_init_last) + + +def _create_byobnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/cait.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/cait.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8ec277e8fa8027b340872ccb7a6179479d4bee --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/cait.py @@ -0,0 +1,421 @@ +""" Class-Attention in Image Transformers (CaiT) + +Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239 + +Original code and weights from https://github.com/facebookresearch/deit, copyright below + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model + + +__all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + cait_xxs24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth', + ), + cait_xxs36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth', + ), + cait_xs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth', + ), + cait_s24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', + input_size=(3, 224, 224), + ), + cait_s24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_384.pth', + ), + cait_s36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S36_384.pth', + ), + cait_m36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M36_384.pth', + ), + cait_m48_448=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', + input_size=(3, 448, 448), + ), +) + + +class ClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to do CA + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + q = q * self.scale + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C) + x_cls = self.proj(x_cls) + x_cls = self.proj_drop(x_cls) + + return x_cls + + +class LayerScaleBlockClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add CA and LayerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=ClassAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x, x_cls): + u = torch.cat((x_cls, x), dim=1) + x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) + x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) + return x_cls + + +class TalkingHeadAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + + self.num_heads = num_heads + + head_dim = dim // num_heads + + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + + self.proj_l = nn.Linear(num_heads, num_heads) + self.proj_w = nn.Linear(num_heads, num_heads) + + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) + + attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attn = attn.softmax(dim=-1) + + attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScaleBlock(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add layerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) + self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class Cait(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to adapt to our cait models + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', + embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + block_layers=LayerScaleBlock, + block_layers_token=LayerScaleBlockClassAttn, + patch_layer=PatchEmbed, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + attn_block=TalkingHeadAttn, + mlp_block=Mlp, + init_values=1e-4, + attn_block_token_only=ClassAttn, + mlp_block_token_only=Mlp, + depth_token_only=2, + mlp_ratio_token_only=4.0 + ): + super().__init__() + assert global_pool in ('', 'token', 'avg') + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = embed_dim + self.grad_checkpointing = False + + self.patch_embed = patch_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [drop_path_rate for i in range(depth)] + self.blocks = nn.Sequential(*[ + block_layers( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block, mlp_block=mlp_block, init_values=init_values) + for i in range(depth)]) + + self.blocks_token_only = nn.ModuleList([ + block_layers_token( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio_token_only, qkv_bias=qkv_bias, + drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block_token_only, + mlp_block=mlp_block_token_only, init_values=init_values) + for i in range(depth_token_only)]) + + self.norm = norm_layer(embed_dim) + + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def group_matcher(self, coarse=False): + def _matcher(name): + if any([name.startswith(n) for n in ('cls_token', 'pos_embed', 'patch_embed')]): + return 0 + elif name.startswith('blocks.'): + return int(name.split('.')[1]) + 1 + elif name.startswith('blocks_token_only.'): + # overlap token only blocks with last blocks + to_offset = len(self.blocks) - len(self.blocks_token_only) + 1 + return int(name.split('.')[1]) + to_offset + elif name.startswith('norm.'): + return len(self.blocks) + else: + return float('inf') + return _matcher + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + for i, blk in enumerate(self.blocks_token_only): + cls_tokens = blk(x, cls_tokens) + x = torch.cat((cls_tokens, x), dim=1) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model=None): + if 'model' in state_dict: + state_dict = state_dict['model'] + checkpoint_no_module = {} + for k, v in state_dict.items(): + checkpoint_no_module[k.replace('module.', '')] = v + return checkpoint_no_module + + +def _create_cait(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Cait, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def cait_xxs24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5, **kwargs) + model = _create_cait('cait_xxs24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_values=1e-5, **kwargs) + model = _create_cait('cait_xxs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5, **kwargs) + model = _create_cait('cait_xxs36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_values=1e-5, **kwargs) + model = _create_cait('cait_xxs36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_values=1e-5, **kwargs) + model = _create_cait('cait_xs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5, **kwargs) + model = _create_cait('cait_s24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_values=1e-5, **kwargs) + model = _create_cait('cait_s24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_values=1e-6, **kwargs) + model = _create_cait('cait_s36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_values=1e-6, **kwargs) + model = _create_cait('cait_m36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m48_448(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_values=1e-6, **kwargs) + model = _create_cait('cait_m48_448', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/coat.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/coat.py new file mode 100644 index 0000000000000000000000000000000000000000..6af1bd8824141c9bfe4404970606d0d9def9ce6a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/coat.py @@ -0,0 +1,689 @@ +""" +CoaT architecture. + +Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399 + +Official CoaT code at: https://github.com/mlpc-ucsd/CoaT + +Modified from custom_timm/models/vision_transformer.py +""" +from copy import deepcopy +from functools import partial +from typing import Tuple, List, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .layers import _assert + + +__all__ = [ + "coat_tiny", + "coat_mini", + "coat_lite_tiny", + "coat_lite_mini", + "coat_lite_small" +] + + +def _cfg_coat(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed1.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'coat_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_tiny-473c2a20.pth' + ), + 'coat_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_mini-2c6baf49.pth' + ), + 'coat_lite_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_tiny-461b07a7.pth' + ), + 'coat_lite_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_mini-d7842000.pth' + ), + 'coat_lite_small': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_small-fea1d5a1.pth' + ), +} + + +class ConvRelPosEnc(nn.Module): + """ Convolutional relative position encoding. """ + def __init__(self, Ch, h, window): + """ + Initialization. + Ch: Channels per head. + h: Number of heads. + window: Window size(s) in convolutional relative positional encoding. It can have two forms: + 1. An integer of window size, which assigns all attention heads with the same window s + size in ConvRelPosEnc. + 2. A dict mapping window size to #attention head splits ( + e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2}) + It will apply different window size to the attention head splits. + """ + super().__init__() + + if isinstance(window, int): + # Set the same window size for all attention heads. + window = {window: h} + self.window = window + elif isinstance(window, dict): + self.window = window + else: + raise ValueError() + + self.conv_list = nn.ModuleList() + self.head_splits = [] + for cur_window, cur_head_split in window.items(): + dilation = 1 + # Determine padding size. + # Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 + padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 + cur_conv = nn.Conv2d(cur_head_split*Ch, cur_head_split*Ch, + kernel_size=(cur_window, cur_window), + padding=(padding_size, padding_size), + dilation=(dilation, dilation), + groups=cur_head_split*Ch, + ) + self.conv_list.append(cur_conv) + self.head_splits.append(cur_head_split) + self.channel_splits = [x*Ch for x in self.head_splits] + + def forward(self, q, v, size: Tuple[int, int]): + B, h, N, Ch = q.shape + H, W = size + _assert(N == 1 + H * W, '') + + # Convolutional relative position encoding. + q_img = q[:, :, 1:, :] # [B, h, H*W, Ch] + v_img = v[:, :, 1:, :] # [B, h, H*W, Ch] + + v_img = v_img.transpose(-1, -2).reshape(B, h * Ch, H, W) + v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels + conv_v_img_list = [] + for i, conv in enumerate(self.conv_list): + conv_v_img_list.append(conv(v_img_list[i])) + conv_v_img = torch.cat(conv_v_img_list, dim=1) + conv_v_img = conv_v_img.reshape(B, h, Ch, H * W).transpose(-1, -2) + + EV_hat = q_img * conv_v_img + EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch]. + return EV_hat + + +class FactorAttnConvRelPosEnc(nn.Module): + """ Factorized attention with convolutional relative position encoding class. """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., shared_crpe=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used. + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + # Shared convolutional relative position encoding. + self.crpe = shared_crpe + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + + # Generate Q, K, V. + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # [B, h, N, Ch] + + # Factorized attention. + k_softmax = k.softmax(dim=2) + factor_att = k_softmax.transpose(-1, -2) @ v + factor_att = q @ factor_att + + # Convolutional relative position encoding. + crpe = self.crpe(q, v, size=size) # [B, h, N, Ch] + + # Merge and reshape. + x = self.scale * factor_att + crpe + x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C] + + # Output projection. + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class ConvPosEnc(nn.Module): + """ Convolutional Position Encoding. + Note: This module is similar to the conditional position encoding in CPVT. + """ + def __init__(self, dim, k=3): + super(ConvPosEnc, self).__init__() + self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim) + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + H, W = size + _assert(N == 1 + H * W, '') + + # Extract CLS token and image tokens. + cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C] + + # Depthwise convolution. + feat = img_tokens.transpose(1, 2).view(B, C, H, W) + x = self.proj(feat) + feat + x = x.flatten(2).transpose(1, 2) + + # Combine with CLS token. + x = torch.cat((cls_token, x), dim=1) + + return x + + +class SerialBlock(nn.Module): + """ Serial block class. + Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None): + super().__init__() + + # Conv-Attention. + self.cpe = shared_cpe + + self.norm1 = norm_layer(dim) + self.factoratt_crpe = FactorAttnConvRelPosEnc( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Tuple[int, int]): + # Conv-Attention. + x = self.cpe(x, size) + cur = self.norm1(x) + cur = self.factoratt_crpe(cur, size) + x = x + self.drop_path(cur) + + # MLP. + cur = self.norm2(x) + cur = self.mlp(cur) + x = x + self.drop_path(cur) + + return x + + +class ParallelBlock(nn.Module): + """ Parallel block class. """ + def __init__(self, dims, num_heads, mlp_ratios=[], qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_crpes=None): + super().__init__() + + # Conv-Attention. + self.norm12 = norm_layer(dims[1]) + self.norm13 = norm_layer(dims[2]) + self.norm14 = norm_layer(dims[3]) + self.factoratt_crpe2 = FactorAttnConvRelPosEnc( + dims[1], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[1] + ) + self.factoratt_crpe3 = FactorAttnConvRelPosEnc( + dims[2], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[2] + ) + self.factoratt_crpe4 = FactorAttnConvRelPosEnc( + dims[3], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[3] + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm22 = norm_layer(dims[1]) + self.norm23 = norm_layer(dims[2]) + self.norm24 = norm_layer(dims[3]) + # In parallel block, we assume dimensions are the same and share the linear transformation. + assert dims[1] == dims[2] == dims[3] + assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] + mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) + self.mlp2 = self.mlp3 = self.mlp4 = Mlp( + in_features=dims[1], hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def upsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map up-sampling. """ + return self.interpolate(x, scale_factor=factor, size=size) + + def downsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map down-sampling. """ + return self.interpolate(x, scale_factor=1.0/factor, size=size) + + def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): + """ Feature map interpolation. """ + B, N, C = x.shape + H, W = size + _assert(N == 1 + H * W, '') + + cls_token = x[:, :1, :] + img_tokens = x[:, 1:, :] + + img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) + img_tokens = F.interpolate( + img_tokens, scale_factor=scale_factor, recompute_scale_factor=False, mode='bilinear', align_corners=False) + img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) + + out = torch.cat((cls_token, img_tokens), dim=1) + + return out + + def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): + _, S2, S3, S4 = sizes + cur2 = self.norm12(x2) + cur3 = self.norm13(x3) + cur4 = self.norm14(x4) + cur2 = self.factoratt_crpe2(cur2, size=S2) + cur3 = self.factoratt_crpe3(cur3, size=S3) + cur4 = self.factoratt_crpe4(cur4, size=S4) + upsample3_2 = self.upsample(cur3, factor=2., size=S3) + upsample4_3 = self.upsample(cur4, factor=2., size=S4) + upsample4_2 = self.upsample(cur4, factor=4., size=S4) + downsample2_3 = self.downsample(cur2, factor=2., size=S2) + downsample3_4 = self.downsample(cur3, factor=2., size=S3) + downsample2_4 = self.downsample(cur2, factor=4., size=S2) + cur2 = cur2 + upsample3_2 + upsample4_2 + cur3 = cur3 + upsample4_3 + downsample2_3 + cur4 = cur4 + downsample3_4 + downsample2_4 + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + # MLP. + cur2 = self.norm22(x2) + cur3 = self.norm23(x3) + cur4 = self.norm24(x4) + cur2 = self.mlp2(cur2) + cur3 = self.mlp3(cur3) + cur4 = self.mlp4(cur4) + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + return x1, x2, x3, x4 + + +class CoaT(nn.Module): + """ CoaT class. """ + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(0, 0, 0, 0), + serial_depths=(0, 0, 0, 0), parallel_depth=0, num_heads=0, mlp_ratios=(0, 0, 0, 0), qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), + return_interm_layers=False, out_features=None, crpe_window=None, global_pool='token'): + super().__init__() + assert global_pool in ('token', 'avg') + crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} + self.return_interm_layers = return_interm_layers + self.out_features = out_features + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + self.num_classes = num_classes + self.global_pool = global_pool + + # Patch embeddings. + img_size = to_2tuple(img_size) + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) + self.patch_embed2 = PatchEmbed( + img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) + self.patch_embed3 = PatchEmbed( + img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) + self.patch_embed4 = PatchEmbed( + img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], + embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) + + # Class tokens. + self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) + self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) + self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) + self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) + + # Convolutional position encodings. + self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) + self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) + self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) + self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) + + # Convolutional relative position encodings. + self.crpe1 = ConvRelPosEnc(Ch=embed_dims[0] // num_heads, h=num_heads, window=crpe_window) + self.crpe2 = ConvRelPosEnc(Ch=embed_dims[1] // num_heads, h=num_heads, window=crpe_window) + self.crpe3 = ConvRelPosEnc(Ch=embed_dims[2] // num_heads, h=num_heads, window=crpe_window) + self.crpe4 = ConvRelPosEnc(Ch=embed_dims[3] // num_heads, h=num_heads, window=crpe_window) + + # Disable stochastic depth. + dpr = drop_path_rate + assert dpr == 0.0 + + # Serial blocks 1. + self.serial_blocks1 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[0], num_heads=num_heads, mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe1, shared_crpe=self.crpe1 + ) + for _ in range(serial_depths[0])] + ) + + # Serial blocks 2. + self.serial_blocks2 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[1], num_heads=num_heads, mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe2, shared_crpe=self.crpe2 + ) + for _ in range(serial_depths[1])] + ) + + # Serial blocks 3. + self.serial_blocks3 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[2], num_heads=num_heads, mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe3, shared_crpe=self.crpe3 + ) + for _ in range(serial_depths[2])] + ) + + # Serial blocks 4. + self.serial_blocks4 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[3], num_heads=num_heads, mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe4, shared_crpe=self.crpe4 + ) + for _ in range(serial_depths[3])] + ) + + # Parallel blocks. + self.parallel_depth = parallel_depth + if self.parallel_depth > 0: + self.parallel_blocks = nn.ModuleList([ + ParallelBlock( + dims=embed_dims, num_heads=num_heads, mlp_ratios=mlp_ratios, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4) + ) + for _ in range(parallel_depth)] + ) + else: + self.parallel_blocks = None + + # Classification head(s). + if not self.return_interm_layers: + if self.parallel_blocks is not None: + self.norm2 = norm_layer(embed_dims[1]) + self.norm3 = norm_layer(embed_dims[2]) + else: + self.norm2 = self.norm3 = None + self.norm4 = norm_layer(embed_dims[3]) + + if self.parallel_depth > 0: + # CoaT series: Aggregate features of last three scales for classification. + assert embed_dims[1] == embed_dims[2] == embed_dims[3] + self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + # CoaT-Lite series: Use feature of last scale for classification. + self.aggregate = None + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Initialize weights. + trunc_normal_(self.cls_token1, std=.02) + trunc_normal_(self.cls_token2, std=.02) + trunc_normal_(self.cls_token3, std=.02) + trunc_normal_(self.cls_token4, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem1=r'^cls_token1|patch_embed1|crpe1|cpe1', + serial_blocks1=r'^serial_blocks1\.(\d+)', + stem2=r'^cls_token2|patch_embed2|crpe2|cpe2', + serial_blocks2=r'^serial_blocks2\.(\d+)', + stem3=r'^cls_token3|patch_embed3|crpe3|cpe3', + serial_blocks3=r'^serial_blocks3\.(\d+)', + stem4=r'^cls_token4|patch_embed4|crpe4|cpe4', + serial_blocks4=r'^serial_blocks4\.(\d+)', + parallel_blocks=[ # FIXME (partially?) overlap parallel w/ serial blocks?? + (r'^parallel_blocks\.(\d+)', None), + (r'^norm|aggregate', (99999,)), + ] + ) + return matcher + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x0): + B = x0.shape[0] + + # Serial blocks 1. + x1 = self.patch_embed1(x0) + H1, W1 = self.patch_embed1.grid_size + x1 = insert_cls(x1, self.cls_token1) + for blk in self.serial_blocks1: + x1 = blk(x1, size=(H1, W1)) + x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 2. + x2 = self.patch_embed2(x1_nocls) + H2, W2 = self.patch_embed2.grid_size + x2 = insert_cls(x2, self.cls_token2) + for blk in self.serial_blocks2: + x2 = blk(x2, size=(H2, W2)) + x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 3. + x3 = self.patch_embed3(x2_nocls) + H3, W3 = self.patch_embed3.grid_size + x3 = insert_cls(x3, self.cls_token3) + for blk in self.serial_blocks3: + x3 = blk(x3, size=(H3, W3)) + x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 4. + x4 = self.patch_embed4(x3_nocls) + H4, W4 = self.patch_embed4.grid_size + x4 = insert_cls(x4, self.cls_token4) + for blk in self.serial_blocks4: + x4 = blk(x4, size=(H4, W4)) + x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + + # Only serial blocks: Early return. + if self.parallel_blocks is None: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + # Return features for classification. + x4 = self.norm4(x4) + return x4 + + # Parallel blocks. + for blk in self.parallel_blocks: + x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4)) + x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) + + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + x2 = self.norm2(x2) + x3 = self.norm3(x3) + x4 = self.norm4(x4) + return [x2, x3, x4] + + def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool = False): + if isinstance(x_feat, list): + assert self.aggregate is not None + if self.global_pool == 'avg': + x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) # [B, 3, C] + else: + x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) # [B, 3, C] + x = self.aggregate(x).squeeze(dim=1) # Shape: [B, C] + else: + x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x) -> torch.Tensor: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features (for down-stream tasks). + return self.forward_features(x) + else: + # Return features for classification. + x_feat = self.forward_features(x) + x = self.forward_head(x_feat) + return x + + +def insert_cls(x, cls_token): + """ Insert CLS token. """ + cls_tokens = cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + return x + + +def remove_cls(x): + """ Remove CLS token. """ + return x[:, 1:, :] + + +def checkpoint_filter_fn(state_dict, model): + out_dict = {} + for k, v in state_dict.items(): + # original model had unused norm layers, removing them requires filtering pretrained checkpoints + if k.startswith('norm1') or \ + (model.norm2 is None and k.startswith('norm2')) or \ + (model.norm3 is None and k.startswith('norm3')): + continue + out_dict[k] = v + return out_dict + + +def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + CoaT, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def coat_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_small(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_small', pretrained=pretrained, **model_cfg) + return model \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convit.py new file mode 100644 index 0000000000000000000000000000000000000000..b23e1c5504cfb12a47a651e45eb7ffd488e32acb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convit.py @@ -0,0 +1,369 @@ +""" ConViT Model + +@article{d2021convit, + title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, + author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, + journal={arXiv preprint arXiv:2103.10697}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.10697 +Original code: https://github.com/facebookresearch/convit, original copyright below + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the CC-by-NC license found in the +# LICENSE file in the root directory of this source tree. +# +'''These modules are adapted from those of timm, see +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +''' + +import torch +import torch.nn as nn +from functools import partial +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp +from .registry import register_model +from .vision_transformer_hybrid import HybridEmbed +from .fx_features import register_notrace_module + +import torch +import torch.nn as nn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # ConViT + 'convit_tiny': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"), + 'convit_small': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"), + 'convit_base': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_base.pth") +} + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class GPSA(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., locality_strength=1.): + super().__init__() + self.num_heads = num_heads + self.dim = dim + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.locality_strength = locality_strength + + self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.pos_proj = nn.Linear(3, num_heads) + self.proj_drop = nn.Dropout(proj_drop) + self.gating_param = nn.Parameter(torch.ones(self.num_heads)) + self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None + + def forward(self, x): + B, N, C = x.shape + if self.rel_indices is None or self.rel_indices.shape[1] != N: + self.rel_indices = self.get_rel_indices(N) + attn = self.get_attention(x) + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def get_attention(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] + pos_score = self.rel_indices.expand(B, -1, -1, -1) + pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) + patch_score = (q @ k.transpose(-2, -1)) * self.scale + patch_score = patch_score.softmax(dim=-1) + pos_score = pos_score.softmax(dim=-1) + + gating = self.gating_param.view(1, -1, 1, 1) + attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score + attn /= attn.sum(dim=-1).unsqueeze(-1) + attn = self.attn_drop(attn) + return attn + + def get_attention_map(self, x, return_map=False): + attn_map = self.get_attention(x).mean(0) # average over batch + distances = self.rel_indices.squeeze()[:, :, -1] ** .5 + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) + if return_map: + return dist, attn_map + else: + return dist + + def local_init(self): + self.v.weight.data.copy_(torch.eye(self.dim)) + locality_distance = 1 # max(1,1/locality_strength**.5) + + kernel_size = int(self.num_heads ** .5) + center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 + for h1 in range(kernel_size): + for h2 in range(kernel_size): + position = h1 + kernel_size * h2 + self.pos_proj.weight.data[position, 2] = -1 + self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance + self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance + self.pos_proj.weight.data *= self.locality_strength + + def get_rel_indices(self, num_patches: int) -> torch.Tensor: + img_size = int(num_patches ** .5) + rel_indices = torch.zeros(1, num_patches, num_patches, 3) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + rel_indices[:, :, :, 2] = indd.unsqueeze(0) + rel_indices[:, :, :, 1] = indy.unsqueeze(0) + rel_indices[:, :, :, 0] = indx.unsqueeze(0) + device = self.qk.weight.device + return rel_indices.to(device) + + +class MHSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def get_attention_map(self, x, return_map=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + attn_map = (q @ k.transpose(-2, -1)) * self.scale + attn_map = attn_map.softmax(dim=-1).mean(0) + + img_size = int(N ** .5) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + distances = indd ** .5 + distances = distances.to(x.device) + + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N + if return_map: + return dist, attn_map + else: + return dist + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs): + super().__init__() + self.norm1 = norm_layer(dim) + self.use_gpsa = use_gpsa + if self.use_gpsa: + self.attn = GPSA( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, **kwargs) + else: + self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', + embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, + local_up_to_layer=3, locality_strength=1., use_pos_embed=True): + super().__init__() + assert global_pool in ('', 'avg', 'token') + embed_dim *= num_heads + self.num_classes = num_classes + self.global_pool = global_pool + self.local_up_to_layer = local_up_to_layer + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.locality_strength = locality_strength + self.use_pos_embed = use_pos_embed + + if hybrid_backbone is not None: + self.patch_embed = HybridEmbed( + hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) + else: + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + if self.use_pos_embed: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.pos_embed, std=.02) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=True, + locality_strength=locality_strength) + if i < local_up_to_layer else + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=False) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + for n, m in self.named_modules(): + if hasattr(m, 'local_init'): + m.local_init() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'token', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.use_pos_embed: + x = x + self.pos_embed + x = self.pos_drop(x) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + for u, blk in enumerate(self.blocks): + if u == self.local_up_to_layer: + x = torch.cat((cls_tokens, x), dim=1) + x = blk(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_convit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + return build_model_with_cfg(ConViT, variant, pretrained, **kwargs) + + +@register_model +def convit_tiny(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_small(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_base(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convmixer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convmixer.py new file mode 100644 index 0000000000000000000000000000000000000000..e2140241a2af7f6e7a7427d9fc926e9b71c233b0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convmixer.py @@ -0,0 +1,125 @@ +""" ConvMixer + +""" +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from custom_timm.models.registry import register_model +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import SelectAdaptivePool2d + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', + 'first_conv': 'stem.0', + **kwargs + } + + +default_cfgs = { + 'convmixer_1536_20': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1536_20_ks9_p7.pth.tar'), + 'convmixer_768_32': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_768_32_ks7_p7_relu.pth.tar'), + 'convmixer_1024_20_ks9_p14': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1024_20_ks9_p14.pth.tar') +} + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x): + return self.fn(x) + x + + +class ConvMixer(nn.Module): + def __init__( + self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, global_pool='avg', + act_layer=nn.GELU, **kwargs): + super().__init__() + self.num_classes = num_classes + self.num_features = dim + self.grad_checkpointing = False + + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size), + act_layer(), + nn.BatchNorm2d(dim) + ) + self.blocks = nn.Sequential( + *[nn.Sequential( + Residual(nn.Sequential( + nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), + act_layer(), + nn.BatchNorm2d(dim) + )), + nn.Conv2d(dim, dim, kernel_size=1), + act_layer(), + nn.BatchNorm2d(dim) + ) for i in range(depth)] + ) + self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) + self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^stem', blocks=r'^blocks\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.pooling(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_convmixer(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs) + + +@register_model +def convmixer_1536_20(pretrained=False, **kwargs): + model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs) + return _create_convmixer('convmixer_1536_20', pretrained, **model_args) + + +@register_model +def convmixer_768_32(pretrained=False, **kwargs): + model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs) + return _create_convmixer('convmixer_768_32', pretrained, **model_args) + + +@register_model +def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs): + model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs) + return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convnext.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convnext.py new file mode 100644 index 0000000000000000000000000000000000000000..f76d972236dbae1a8df24d70ee35f05f6207f815 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/convnext.py @@ -0,0 +1,673 @@ +""" ConvNeXt + +Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf + +Original code and weights from https://github.com/facebookresearch/ConvNeXt, original copyright below + +Model defs atto, femto, pico, nano and _ols / _hnf variants are timm specific. + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# This source code is licensed under the MIT license +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import named_apply, build_model_with_cfg, checkpoint_seq +from .layers import trunc_normal_, SelectAdaptivePool2d, DropPath, ConvMlp, Mlp, LayerNorm2d, LayerNorm, \ + create_conv2d, get_act_layer, make_divisible, to_ntuple +from .registry import register_model + + +__all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + # timm specific variants + convnext_atto=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + convnext_atto_ols=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + convnext_femto=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + convnext_femto_ols=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + convnext_pico=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth', + test_input_size=(3, 288, 288), test_crop_pct=0.95), + convnext_pico_ols=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_nano=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_nano_ols=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_tiny_hnf=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth', + crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + + convnext_tiny=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_small=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth", + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_base=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth", + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_large=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth", + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + convnext_tiny_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_small_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_base_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_large_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + convnext_xlarge_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + convnext_tiny_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + convnext_small_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + convnext_base_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + convnext_large_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + convnext_xlarge_384_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + + convnext_tiny_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", num_classes=21841), + convnext_small_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", num_classes=21841), + convnext_base_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", num_classes=21841), + convnext_large_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", num_classes=21841), + convnext_xlarge_in22k=_cfg( + url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", num_classes=21841), +) + + +class ConvNeXtBlock(nn.Module): + """ ConvNeXt Block + There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + + Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate + choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear + is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW. + + Args: + in_chs (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + ls_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + in_chs, + out_chs=None, + kernel_size=7, + stride=1, + dilation=1, + mlp_ratio=4, + conv_mlp=False, + conv_bias=True, + ls_init_value=1e-6, + act_layer='gelu', + norm_layer=None, + drop_path=0., + ): + super().__init__() + out_chs = out_chs or in_chs + act_layer = get_act_layer(act_layer) + if not norm_layer: + norm_layer = LayerNorm2d if conv_mlp else LayerNorm + mlp_layer = ConvMlp if conv_mlp else Mlp + self.use_conv_mlp = conv_mlp + + self.conv_dw = create_conv2d( + in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation, depthwise=True, bias=conv_bias) + self.norm = norm_layer(out_chs) + self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + if self.use_conv_mlp: + x = self.norm(x) + x = self.mlp(x) + else: + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + x = self.mlp(x) + x = x.permute(0, 3, 1, 2) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + + x = self.drop_path(x) + shortcut + return x + + +class ConvNeXtStage(nn.Module): + + def __init__( + self, + in_chs, + out_chs, + kernel_size=7, + stride=2, + depth=2, + dilation=(1, 1), + drop_path_rates=None, + ls_init_value=1.0, + conv_mlp=False, + conv_bias=True, + act_layer='gelu', + norm_layer=None, + norm_layer_cl=None + ): + super().__init__() + self.grad_checkpointing = False + + if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]: + ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1 + pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used + self.downsample = nn.Sequential( + norm_layer(in_chs), + create_conv2d( + in_chs, out_chs, kernel_size=ds_ks, stride=stride, + dilation=dilation[0], padding=pad, bias=conv_bias), + ) + in_chs = out_chs + else: + self.downsample = nn.Identity() + + drop_path_rates = drop_path_rates or [0.] * depth + stage_blocks = [] + for i in range(depth): + stage_blocks.append(ConvNeXtBlock( + in_chs=in_chs, + out_chs=out_chs, + kernel_size=kernel_size, + dilation=dilation[1], + drop_path=drop_path_rates[i], + ls_init_value=ls_init_value, + conv_mlp=conv_mlp, + conv_bias=conv_bias, + act_layer=act_layer, + norm_layer=norm_layer if conv_mlp else norm_layer_cl + )) + in_chs = out_chs + self.blocks = nn.Sequential(*stage_blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class ConvNeXt(nn.Module): + r""" ConvNeXt + A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf + + Args: + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] + dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768] + drop_rate (float): Head dropout rate + drop_path_rate (float): Stochastic depth rate. Default: 0. + ls_init_value (float): Init value for Layer Scale. Default: 1e-6. + head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. + """ + + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + output_stride=32, + depths=(3, 3, 9, 3), + dims=(96, 192, 384, 768), + kernel_sizes=7, + ls_init_value=1e-6, + stem_type='patch', + patch_size=4, + head_init_scale=1., + head_norm_first=False, + conv_mlp=False, + conv_bias=True, + act_layer='gelu', + norm_layer=None, + drop_rate=0., + drop_path_rate=0., + ): + super().__init__() + assert output_stride in (8, 16, 32) + kernel_sizes = to_ntuple(4)(kernel_sizes) + if norm_layer is None: + norm_layer = LayerNorm2d + norm_layer_cl = norm_layer if conv_mlp else LayerNorm + else: + assert conv_mlp,\ + 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' + norm_layer_cl = norm_layer + + self.num_classes = num_classes + self.drop_rate = drop_rate + self.feature_info = [] + + assert stem_type in ('patch', 'overlap', 'overlap_tiered') + if stem_type == 'patch': + # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias), + norm_layer(dims[0]) + ) + stem_stride = patch_size + else: + mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0] + self.stem = nn.Sequential( + nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), + nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias), + norm_layer(dims[0]), + ) + stem_stride = 4 + + self.stages = nn.Sequential() + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + stages = [] + prev_chs = dims[0] + curr_stride = stem_stride + dilation = 1 + # 4 feature resolution stages, each consisting of multiple residual blocks + for i in range(4): + stride = 2 if curr_stride == 2 or i > 0 else 1 + if curr_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + curr_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + out_chs = dims[i] + stages.append(ConvNeXtStage( + prev_chs, + out_chs, + kernel_size=kernel_sizes[i], + stride=stride, + dilation=(first_dilation, dilation), + depth=depths[i], + drop_path_rates=dp_rates[i], + ls_init_value=ls_init_value, + conv_mlp=conv_mlp, + conv_bias=conv_bias, + act_layer=act_layer, + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl + )) + prev_chs = out_chs + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + self.stages = nn.Sequential(*stages) + self.num_features = prev_chs + + # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets + # otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights) + self.norm_pre = norm_layer(self.num_features) if head_norm_first else nn.Identity() + self.head = nn.Sequential(OrderedDict([ + ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), + ('norm', nn.Identity() if head_norm_first else norm_layer(self.num_features)), + ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())])) + + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm_pre', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes=0, global_pool=None): + if global_pool is not None: + self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity() + self.head.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm_pre(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + # NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :( + x = self.head.global_pool(x) + x = self.head.norm(x) + x = self.head.flatten(x) + x = self.head.drop(x) + return x if pre_logits else self.head.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + nn.init.zeros_(module.bias) + if name and 'head.' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap FB checkpoints -> timm """ + if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: + return state_dict # non-FB checkpoint + if 'model' in state_dict: + state_dict = state_dict['model'] + out_dict = {} + import re + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) + k = k.replace('dwconv', 'conv_dw') + k = k.replace('pwconv', 'mlp.fc') + k = k.replace('head.', 'head.fc.') + if k.startswith('norm.'): + k = k.replace('norm', 'head.norm') + if v.ndim == 2 and 'head' not in k: + model_shape = model.state_dict()[k].shape + v = v.reshape(model_shape) + out_dict[k] = v + return out_dict + + +def _create_convnext(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + ConvNeXt, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs) + return model + + +@register_model +def convnext_atto(pretrained=False, **kwargs): + # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M + model_args = dict( + depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, **kwargs) + model = _create_convnext('convnext_atto', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_atto_ols(pretrained=False, **kwargs): + # timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M + model_args = dict( + depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered', **kwargs) + model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_femto(pretrained=False, **kwargs): + # timm femto variant + model_args = dict( + depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, **kwargs) + model = _create_convnext('convnext_femto', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_femto_ols(pretrained=False, **kwargs): + # timm femto variant + model_args = dict( + depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered', **kwargs) + model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_pico(pretrained=False, **kwargs): + # timm pico variant + model_args = dict( + depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, **kwargs) + model = _create_convnext('convnext_pico', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_pico_ols(pretrained=False, **kwargs): + # timm nano variant with overlapping 3x3 conv stem + model_args = dict( + depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered', **kwargs) + model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_nano(pretrained=False, **kwargs): + # timm nano variant with standard stem and head + model_args = dict( + depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, **kwargs) + model = _create_convnext('convnext_nano', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_nano_ols(pretrained=False, **kwargs): + # experimental nano variant with overlapping conv stem + model_args = dict( + depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap', **kwargs) + model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_tiny_hnf(pretrained=False, **kwargs): + # experimental tiny variant with norm before pooling in head (head norm first) + model_args = dict( + depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True, **kwargs) + model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_tiny(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs) + model = _create_convnext('convnext_tiny', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_small(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) + model = _create_convnext('convnext_small', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_tiny_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs) + model = _create_convnext('convnext_tiny_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_small_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) + model = _create_convnext('convnext_small_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_xlarge_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) + model = _create_convnext('convnext_xlarge_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_tiny_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs) + model = _create_convnext('convnext_tiny_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_small_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) + model = _create_convnext('convnext_small_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_xlarge_384_in22ft1k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) + model = _create_convnext('convnext_xlarge_384_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_tiny_in22k(pretrained=False, **kwargs): + model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), **kwargs) + model = _create_convnext('convnext_tiny_in22k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_small_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) + model = _create_convnext('convnext_small_in22k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_base_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) + model = _create_convnext('convnext_base_in22k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_large_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) + model = _create_convnext('convnext_large_in22k', pretrained=pretrained, **model_args) + return model + + +@register_model +def convnext_xlarge_in22k(pretrained=False, **kwargs): + model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) + model = _create_convnext('convnext_xlarge_in22k', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/crossvit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/crossvit.py new file mode 100644 index 0000000000000000000000000000000000000000..bb996207da81e19b932c44d36af020267e227357 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/crossvit.py @@ -0,0 +1,539 @@ +""" CrossViT Model + +@inproceedings{ + chen2021crossvit, + title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, + author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, + booktitle={International Conference on Computer Vision (ICCV)}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.14899 +Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py + +NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +""" +Modifed from custom_timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + +""" +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.hub +from functools import partial +from typing import List + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, _assert +from .registry import register_model +from .vision_transformer import Mlp, Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), + 'classifier': ('head.0', 'head.1'), + **kwargs + } + + +default_cfgs = { + 'crossvit_15_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_224.pth'), + 'crossvit_15_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_15_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_18_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_224.pth'), + 'crossvit_18_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_18_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_9_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_224.pth'), + 'crossvit_9_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_base_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_base_224.pth'), + 'crossvit_small_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_small_224.pth'), + 'crossvit_tiny_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_tiny_224.pth'), +} + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if multi_conv: + if patch_size[0] == 12: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), + ) + elif patch_size[0] == 16: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + else: + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class CrossAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.wq = nn.Linear(dim, dim, bias=qkv_bias) + self.wk = nn.Linear(dim, dim, bias=qkv_bias) + self.wv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # B1C -> B1H(C/H) -> BH1(C/H) + q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CrossAttentionBlock(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = CrossAttention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) + return x + + +class MultiScaleBlock(nn.Module): + + def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + + num_branches = len(dim) + self.num_branches = num_branches + # different branch could have different embedding size, the first one is the base + self.blocks = nn.ModuleList() + for d in range(num_branches): + tmp = [] + for i in range(depth[d]): + tmp.append(Block( + dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer)) + if len(tmp) != 0: + self.blocks.append(nn.Sequential(*tmp)) + + if len(self.blocks) == 0: + self.blocks = None + + self.projs = nn.ModuleList() + for d in range(num_branches): + if dim[d] == dim[(d + 1) % num_branches] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] + self.projs.append(nn.Sequential(*tmp)) + + self.fusion = nn.ModuleList() + for d in range(num_branches): + d_ = (d + 1) % num_branches + nh = num_heads[d_] + if depth[-1] == 0: # backward capability: + self.fusion.append( + CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + else: + tmp = [] + for _ in range(depth[-1]): + tmp.append(CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + self.fusion.append(nn.Sequential(*tmp)) + + self.revert_projs = nn.ModuleList() + for d in range(num_branches): + if dim[(d + 1) % num_branches] == dim[d] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), + nn.Linear(dim[(d + 1) % num_branches], dim[d])] + self.revert_projs.append(nn.Sequential(*tmp)) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + + outs_b = [] + for i, block in enumerate(self.blocks): + outs_b.append(block(x[i])) + + # only take the cls token out + proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) + for i, proj in enumerate(self.projs): + proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) + + # cross attention + outs = [] + for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): + tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) + tmp = fusion(tmp) + reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) + tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) + outs.append(tmp) + return outs + + +def _compute_num_patches(img_size, patches): + return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] + + +@register_notrace_function +def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript + """ + Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing. + Args: + x (Tensor): input image + ss (tuple[int, int]): height and width to scale to + crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False + Returns: + Tensor: the "scaled" image batch tensor + """ + H, W = x.shape[-2:] + if H != ss[0] or W != ss[1]: + if crop_scale and ss[0] <= H and ss[1] <= W: + cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) + x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]] + else: + x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False) + return x + + +class CrossViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, + embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.), + multi_conv=False, crop_scale=False, qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), global_pool='token', + ): + super().__init__() + assert global_pool in ('token', 'avg') + + self.num_classes = num_classes + self.global_pool = global_pool + self.img_size = to_2tuple(img_size) + img_scale = to_2tuple(img_scale) + self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] + self.crop_scale = crop_scale # crop instead of interpolate for scale + num_patches = _compute_num_patches(self.img_size_scaled, patch_size) + self.num_branches = len(patch_size) + self.embed_dim = embed_dim + self.num_features = sum(embed_dim) + self.patch_embed = nn.ModuleList() + + # hard-coded for torch jit script + for i in range(self.num_branches): + setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) + setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) + + for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): + self.patch_embed.append( + PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv)) + + self.pos_drop = nn.Dropout(p=drop_rate) + + total_depth = sum([sum(x[-2:]) for x in depth]) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule + dpr_ptr = 0 + self.blocks = nn.ModuleList() + for idx, block_cfg in enumerate(depth): + curr_depth = max(block_cfg[:-1]) + block_cfg[-1] + dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] + blk = MultiScaleBlock( + embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer) + dpr_ptr += curr_depth + self.blocks.append(blk) + + self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) + self.head = nn.ModuleList([ + nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches)]) + + for i in range(self.num_branches): + trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) + trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + out = set() + for i in range(self.num_branches): + out.add(f'cls_token_{i}') + pe = getattr(self, f'pos_embed_{i}', None) + if pe is not None and pe.requires_grad: + out.add(f'pos_embed_{i}') + return out + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('token', 'avg') + self.global_pool = global_pool + self.head = nn.ModuleList( + [nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in + range(self.num_branches)]) + + def forward_features(self, x) -> List[torch.Tensor]: + B = x.shape[0] + xs = [] + for i, patch_embed in enumerate(self.patch_embed): + x_ = x + ss = self.img_size_scaled[i] + x_ = scale_image(x_, ss, self.crop_scale) + x_ = patch_embed(x_) + cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script + cls_tokens = cls_tokens.expand(B, -1, -1) + x_ = torch.cat((cls_tokens, x_), dim=1) + pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script + x_ = x_ + pos_embed + x_ = self.pos_drop(x_) + xs.append(x_) + + for i, blk in enumerate(self.blocks): + xs = blk(xs) + + # NOTE: was before branch token section, move to here to assure all branch token are before layer norm + xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] + return xs + + def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor: + xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs] + if pre_logits or isinstance(self.head[0], nn.Identity): + return torch.cat([x for x in xs], dim=1) + return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0) + + def forward(self, x): + xs = self.forward_features(x) + x = self.forward_head(xs) + return x + + +def _create_crossvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + def pretrained_filter_fn(state_dict): + new_state_dict = {} + for key in state_dict.keys(): + if 'pos_embed' in key or 'cls_token' in key: + new_key = key.replace(".", "_") + else: + new_key = key + new_state_dict[new_key] = state_dict[key] + return new_state_dict + + return build_model_with_cfg( + CrossViT, variant, pretrained, + pretrained_filter_fn=pretrained_filter_fn, + **kwargs) + + +@register_model +def crossvit_tiny_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[3, 3], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_small_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[6, 6], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_base_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[12, 12], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/cspnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/cspnet.py new file mode 100644 index 0000000000000000000000000000000000000000..8e19ec29f7b14cdf58368a8cbea5cdccee43b07e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/cspnet.py @@ -0,0 +1,1083 @@ +"""PyTorch CspNet + +A PyTorch implementation of Cross Stage Partial Networks including: +* CSPResNet50 +* CSPResNeXt50 +* CSPDarkNet53 +* and DarkNet53 for good measure + +Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + +Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks + +Hacked together by / Copyright 2020 Ross Wightman +""" +import collections.abc +from dataclasses import dataclass, field, asdict +from functools import partial +from typing import Any, Callable, Dict, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply, MATCH_PREV_GROUP +from .layers import ClassifierHead, ConvNormAct, ConvNormActAa, DropPath, get_attn, create_act_layer, make_divisible +from .registry import register_model + + +__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.887, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'cspresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), + 'cspresnet50d': _cfg(url=''), + 'cspresnet50w': _cfg(url=''), + 'cspresnext50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', + ), + 'cspdarknet53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), + + 'darknet17': _cfg(url=''), + 'darknet21': _cfg(url=''), + 'sedarknet21': _cfg(url=''), + 'darknet53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknet53_256_c2ns-3aeff817.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'darknetaa53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknetaa53_c2ns-5c28ec8a.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'cs3darknet_s': _cfg( + url='', interpolation='bicubic'), + 'cs3darknet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_m_c2ns-43f06604.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95, + ), + 'cs3darknet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_l_c2ns-16220c5d.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3darknet_x': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_x_c2ns-4e4490aa.pth', + interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'cs3darknet_focus_s': _cfg( + url='', interpolation='bicubic'), + 'cs3darknet_focus_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_m_c2ns-e23bed41.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3darknet_focus_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_l_c2ns-65ef8888.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3darknet_focus_x': _cfg( + url='', interpolation='bicubic'), + + 'cs3sedarknet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_l_c2ns-e8d1dc13.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), + 'cs3sedarknet_x': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_x_c2ns-b4d0abc0.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), + + 'cs3sedarknet_xdw': _cfg( + url='', interpolation='bicubic'), + + 'cs3edgenet_x': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3edgenet_x_c2-2e1610a9.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), + 'cs3se_edgenet_x': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3se_edgenet_x_c2ns-76f8e3ac.pth', + interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), +} + + +@dataclass +class CspStemCfg: + out_chs: Union[int, Tuple[int, ...]] = 32 + stride: Union[int, Tuple[int, ...]] = 2 + kernel_size: int = 3 + padding: Union[int, str] = '' + pool: Optional[str] = '' + + +def _pad_arg(x, n): + # pads an argument tuple to specified n by padding with last value + if not isinstance(x, (tuple, list)): + x = (x,) + curr_n = len(x) + pad_n = n - curr_n + if pad_n <= 0: + return x[:n] + return tuple(x + (x[-1],) * pad_n) + + +@dataclass +class CspStagesCfg: + depth: Tuple[int, ...] = (3, 3, 5, 2) # block depth (number of block repeats in stages) + out_chs: Tuple[int, ...] = (128, 256, 512, 1024) # number of output channels for blocks in stage + stride: Union[int, Tuple[int, ...]] = 2 # stride of stage + groups: Union[int, Tuple[int, ...]] = 1 # num kxk conv groups + block_ratio: Union[float, Tuple[float, ...]] = 1.0 + bottle_ratio: Union[float, Tuple[float, ...]] = 1. # bottleneck-ratio of blocks in stage + avg_down: Union[bool, Tuple[bool, ...]] = False + attn_layer: Optional[Union[str, Tuple[str, ...]]] = None + attn_kwargs: Optional[Union[Dict, Tuple[Dict]]] = None + stage_type: Union[str, Tuple[str]] = 'csp' # stage type ('csp', 'cs2', 'dark') + block_type: Union[str, Tuple[str]] = 'bottle' # blocks type for stages ('bottle', 'dark') + + # cross-stage only + expand_ratio: Union[float, Tuple[float, ...]] = 1.0 + cross_linear: Union[bool, Tuple[bool, ...]] = False + down_growth: Union[bool, Tuple[bool, ...]] = False + + def __post_init__(self): + n = len(self.depth) + assert len(self.out_chs) == n + self.stride = _pad_arg(self.stride, n) + self.groups = _pad_arg(self.groups, n) + self.block_ratio = _pad_arg(self.block_ratio, n) + self.bottle_ratio = _pad_arg(self.bottle_ratio, n) + self.avg_down = _pad_arg(self.avg_down, n) + self.attn_layer = _pad_arg(self.attn_layer, n) + self.attn_kwargs = _pad_arg(self.attn_kwargs, n) + self.stage_type = _pad_arg(self.stage_type, n) + self.block_type = _pad_arg(self.block_type, n) + + self.expand_ratio = _pad_arg(self.expand_ratio, n) + self.cross_linear = _pad_arg(self.cross_linear, n) + self.down_growth = _pad_arg(self.down_growth, n) + + +@dataclass +class CspModelCfg: + stem: CspStemCfg + stages: CspStagesCfg + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + act_layer: str = 'leaky_relu' + norm_layer: str = 'batchnorm' + aa_layer: Optional[str] = None # FIXME support string factory for this + + +def _cs3_cfg( + width_multiplier=1.0, + depth_multiplier=1.0, + avg_down=False, + act_layer='silu', + focus=False, + attn_layer=None, + attn_kwargs=None, + bottle_ratio=1.0, + block_type='dark', +): + if focus: + stem_cfg = CspStemCfg( + out_chs=make_divisible(64 * width_multiplier), + kernel_size=6, stride=2, padding=2, pool='') + else: + stem_cfg = CspStemCfg( + out_chs=tuple([make_divisible(c * width_multiplier) for c in (32, 64)]), + kernel_size=3, stride=2, pool='') + return CspModelCfg( + stem=stem_cfg, + stages=CspStagesCfg( + out_chs=tuple([make_divisible(c * width_multiplier) for c in (128, 256, 512, 1024)]), + depth=tuple([int(d * depth_multiplier) for d in (3, 6, 9, 3)]), + stride=2, + bottle_ratio=bottle_ratio, + block_ratio=0.5, + avg_down=avg_down, + attn_layer=attn_layer, + attn_kwargs=attn_kwargs, + stage_type='cs3', + block_type=block_type, + ), + act_layer=act_layer, + ) + + +model_cfgs = dict( + cspresnet50=CspModelCfg( + stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(128, 256, 512, 1024), + stride=(1, 2), + expand_ratio=2., + bottle_ratio=0.5, + cross_linear=True, + ), + ), + cspresnet50d=CspModelCfg( + stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(128, 256, 512, 1024), + stride=(1,) + (2,), + expand_ratio=2., + bottle_ratio=0.5, + block_ratio=1., + cross_linear=True, + ), + ), + cspresnet50w=CspModelCfg( + stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(256, 512, 1024, 2048), + stride=(1,) + (2,), + expand_ratio=1., + bottle_ratio=0.25, + block_ratio=0.5, + cross_linear=True, + ), + ), + cspresnext50=CspModelCfg( + stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), + stages=CspStagesCfg( + depth=(3, 3, 5, 2), + out_chs=(256, 512, 1024, 2048), + stride=(1,) + (2,), + groups=32, + expand_ratio=1., + bottle_ratio=1., + block_ratio=0.5, + cross_linear=True, + ), + ), + cspdarknet53=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 2, 8, 8, 4), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + expand_ratio=(2.,) + (1.,), + bottle_ratio=(0.5,) + (1.,), + block_ratio=(1.,) + (0.5,), + down_growth=True, + block_type='dark', + ), + ), + darknet17=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1,) * 5, + out_chs=(64, 128, 256, 512, 1024), + stride=(2,), + bottle_ratio=(0.5,), + block_ratio=(1.,), + stage_type='dark', + block_type='dark', + ), + ), + darknet21=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 1, 1, 2, 2), + out_chs=(64, 128, 256, 512, 1024), + stride=(2,), + bottle_ratio=(0.5,), + block_ratio=(1.,), + stage_type='dark', + block_type='dark', + + ), + ), + sedarknet21=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 1, 1, 2, 2), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + bottle_ratio=0.5, + block_ratio=1., + attn_layer='se', + stage_type='dark', + block_type='dark', + + ), + ), + darknet53=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 2, 8, 8, 4), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + bottle_ratio=0.5, + block_ratio=1., + stage_type='dark', + block_type='dark', + ), + ), + darknetaa53=CspModelCfg( + stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), + stages=CspStagesCfg( + depth=(1, 2, 8, 8, 4), + out_chs=(64, 128, 256, 512, 1024), + stride=2, + bottle_ratio=0.5, + block_ratio=1., + avg_down=True, + stage_type='dark', + block_type='dark', + ), + ), + + cs3darknet_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5), + cs3darknet_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67), + cs3darknet_l=_cs3_cfg(), + cs3darknet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33), + + cs3darknet_focus_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5, focus=True), + cs3darknet_focus_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67, focus=True), + cs3darknet_focus_l=_cs3_cfg(focus=True), + cs3darknet_focus_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, focus=True), + + cs3sedarknet_l=_cs3_cfg(attn_layer='se', attn_kwargs=dict(rd_ratio=.25)), + cs3sedarknet_x=_cs3_cfg(attn_layer='se', width_multiplier=1.25, depth_multiplier=1.33), + + cs3sedarknet_xdw=CspModelCfg( + stem=CspStemCfg(out_chs=(32, 64), kernel_size=3, stride=2, pool=''), + stages=CspStagesCfg( + depth=(3, 6, 12, 4), + out_chs=(256, 512, 1024, 2048), + stride=2, + groups=(1, 1, 256, 512), + bottle_ratio=0.5, + block_ratio=0.5, + attn_layer='se', + ), + act_layer='silu', + ), + + cs3edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge'), + cs3se_edgenet_x=_cs3_cfg( + width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge', + attn_layer='se', attn_kwargs=dict(rd_ratio=.25)), +) + + +class BottleneckBlock(nn.Module): + """ ResNe(X)t Bottleneck Block + """ + + def __init__( + self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.25, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_last=False, + attn_layer=None, + drop_block=None, + drop_path=0. + ): + super(BottleneckBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + attn_last = attn_layer is not None and attn_last + attn_first = attn_layer is not None and not attn_last + + self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvNormAct( + mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, + drop_layer=drop_block, **ckwargs) + self.attn2 = attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity() + self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) + self.attn3 = attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + self.act3 = create_act_layer(act_layer) + + def zero_init_last(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.attn2(x) + x = self.conv3(x) + x = self.attn3(x) + x = self.drop_path(x) + shortcut + # FIXME partial shortcut needed if first block handled as per original, not used for my current impl + #x[:, :shortcut.size(1)] += shortcut + x = self.act3(x) + return x + + +class DarkBlock(nn.Module): + """ DarkNet Block + """ + + def __init__( + self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.5, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + drop_block=None, + drop_path=0. + ): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + + self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() + self.conv2 = ConvNormAct( + mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, + drop_layer=drop_block, **ckwargs) + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.attn(x) + x = self.conv2(x) + x = self.drop_path(x) + shortcut + return x + + +class EdgeBlock(nn.Module): + """ EdgeResidual / Fused-MBConv / MobileNetV1-like 3x3 + 1x1 block (w/ activated output) + """ + + def __init__( + self, + in_chs, + out_chs, + dilation=1, + bottle_ratio=0.5, + groups=1, + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + attn_layer=None, + drop_block=None, + drop_path=0. + ): + super(EdgeBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + + self.conv1 = ConvNormAct( + in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, + drop_layer=drop_block, **ckwargs) + self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() + self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs) + self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.attn(x) + x = self.conv2(x) + x = self.drop_path(x) + shortcut + return x + + +class CrossStage(nn.Module): + """Cross Stage.""" + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + expand_ratio=1., + groups=1, + first_dilation=None, + avg_down=False, + down_growth=False, + cross_linear=False, + block_dpr=None, + block_fn=BottleneckBlock, + **block_kwargs + ): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + aa_layer = block_kwargs.pop('aa_layer', None) + + if stride != 1 or first_dilation != dilation: + if avg_down: + self.conv_down = nn.Sequential( + nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling + ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) + ) + else: + self.conv_down = ConvNormActAa( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=aa_layer, **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = nn.Identity() + prev_chs = in_chs + + # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, + # there is also special case for the first stage for some of the model that results in uneven split + # across the two paths. I did it this way for simplicity for now. + self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_module(str(i), block_fn( + in_chs=prev_chs, + out_chs=block_out_chs, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + drop_path=block_dpr[i] if block_dpr is not None else 0., + **block_kwargs + )) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvNormAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + x = self.conv_down(x) + x = self.conv_exp(x) + xs, xb = x.split(self.expand_chs // 2, dim=1) + xb = self.blocks(xb) + xb = self.conv_transition_b(xb).contiguous() + out = self.conv_transition(torch.cat([xs, xb], dim=1)) + return out + + +class CrossStage3(nn.Module): + """Cross Stage 3. + Similar to CrossStage, but with only one transition conv for the output. + """ + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + expand_ratio=1., + groups=1, + first_dilation=None, + avg_down=False, + down_growth=False, + cross_linear=False, + block_dpr=None, + block_fn=BottleneckBlock, + **block_kwargs + ): + super(CrossStage3, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + aa_layer = block_kwargs.pop('aa_layer', None) + + if stride != 1 or first_dilation != dilation: + if avg_down: + self.conv_down = nn.Sequential( + nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling + ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) + ) + else: + self.conv_down = ConvNormActAa( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=aa_layer, **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + # expansion conv + self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # expanded output is split in 2 for blocks and cross stage + + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_module(str(i), block_fn( + in_chs=prev_chs, + out_chs=block_out_chs, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + drop_path=block_dpr[i] if block_dpr is not None else 0., + **block_kwargs + )) + prev_chs = block_out_chs + + # transition convs + self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + x = self.conv_down(x) + x = self.conv_exp(x) + x1, x2 = x.split(self.expand_chs // 2, dim=1) + x1 = self.blocks(x1) + out = self.conv_transition(torch.cat([x1, x2], dim=1)) + return out + + +class DarkStage(nn.Module): + """DarkNet stage.""" + + def __init__( + self, + in_chs, + out_chs, + stride, + dilation, + depth, + block_ratio=1., + bottle_ratio=1., + groups=1, + first_dilation=None, + avg_down=False, + block_fn=BottleneckBlock, + block_dpr=None, + **block_kwargs + ): + super(DarkStage, self).__init__() + first_dilation = first_dilation or dilation + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + aa_layer = block_kwargs.pop('aa_layer', None) + + if avg_down: + self.conv_down = nn.Sequential( + nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling + ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) + ) + else: + self.conv_down = ConvNormActAa( + in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=aa_layer, **conv_kwargs) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + self.blocks.add_module(str(i), block_fn( + in_chs=prev_chs, + out_chs=block_out_chs, + dilation=dilation, + bottle_ratio=bottle_ratio, + groups=groups, + drop_path=block_dpr[i] if block_dpr is not None else 0., + **block_kwargs + )) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def create_csp_stem( + in_chans=3, + out_chs=32, + kernel_size=3, + stride=2, + pool='', + padding='', + act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, + aa_layer=None +): + stem = nn.Sequential() + feature_info = [] + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + stem_depth = len(out_chs) + assert stem_depth + assert stride in (1, 2, 4) + prev_feat = None + prev_chs = in_chans + last_idx = stem_depth - 1 + stem_stride = 1 + for i, chs in enumerate(out_chs): + conv_name = f'conv{i + 1}' + conv_stride = 2 if (i == 0 and stride > 1) or (i == last_idx and stride > 2 and not pool) else 1 + if conv_stride > 1 and prev_feat is not None: + feature_info.append(prev_feat) + stem.add_module(conv_name, ConvNormAct( + prev_chs, chs, kernel_size, + stride=conv_stride, + padding=padding if i == 0 else '', + act_layer=act_layer, + norm_layer=norm_layer + )) + stem_stride *= conv_stride + prev_chs = chs + prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name])) + if pool: + assert stride > 2 + if prev_feat is not None: + feature_info.append(prev_feat) + if aa_layer is not None: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + stem.add_module('aa', aa_layer(channels=prev_chs, stride=2)) + pool_name = 'aa' + else: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + pool_name = 'pool' + stem_stride *= 2 + prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name])) + feature_info.append(prev_feat) + return stem, feature_info + + +def _get_stage_fn(stage_args): + stage_type = stage_args.pop('stage_type') + assert stage_type in ('dark', 'csp', 'cs3') + if stage_type == 'dark': + stage_args.pop('expand_ratio', None) + stage_args.pop('cross_linear', None) + stage_args.pop('down_growth', None) + stage_fn = DarkStage + elif stage_type == 'csp': + stage_fn = CrossStage + else: + stage_fn = CrossStage3 + return stage_fn, stage_args + + +def _get_block_fn(stage_args): + block_type = stage_args.pop('block_type') + assert block_type in ('dark', 'edge', 'bottle') + if block_type == 'dark': + return DarkBlock, stage_args + elif block_type == 'edge': + return EdgeBlock, stage_args + else: + return BottleneckBlock, stage_args + + +def _get_attn_fn(stage_args): + attn_layer = stage_args.pop('attn_layer') + attn_kwargs = stage_args.pop('attn_kwargs', None) or {} + if attn_layer is not None: + attn_layer = get_attn(attn_layer) + if attn_kwargs: + attn_layer = partial(attn_layer, **attn_kwargs) + return attn_layer, stage_args + + +def create_csp_stages( + cfg: CspModelCfg, + drop_path_rate: float, + output_stride: int, + stem_feat: Dict[str, Any] +): + cfg_dict = asdict(cfg.stages) + num_stages = len(cfg.stages.depth) + cfg_dict['block_dpr'] = [None] * num_stages if not drop_path_rate else \ + [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.stages.depth)).split(cfg.stages.depth)] + stage_args = [dict(zip(cfg_dict.keys(), values)) for values in zip(*cfg_dict.values())] + block_kwargs = dict( + act_layer=cfg.act_layer, + norm_layer=cfg.norm_layer, + ) + + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + feature_info = [] + stages = [] + for stage_idx, stage_args in enumerate(stage_args): + stage_fn, stage_args = _get_stage_fn(stage_args) + block_fn, stage_args = _get_block_fn(stage_args) + attn_fn, stage_args = _get_attn_fn(stage_args) + stride = stage_args.pop('stride') + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + stages += [stage_fn( + prev_chs, + **stage_args, + stride=stride, + first_dilation=first_dilation, + dilation=dilation, + block_fn=block_fn, + aa_layer=cfg.aa_layer, + attn_layer=attn_fn, # will be passed through stage as block_kwargs + **block_kwargs, + )] + prev_chs = stage_args['out_chs'] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info + + +class CspNet(nn.Module): + """Cross Stage Partial base model. + + Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + + NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the + darknet impl. I did it this way for simplicity and less special cases. + """ + + def __init__( + self, + cfg: CspModelCfg, + in_chans=3, + num_classes=1000, + output_stride=32, + global_pool='avg', + drop_rate=0., + drop_path_rate=0., + zero_init_last=True + ): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + layer_args = dict( + act_layer=cfg.act_layer, + norm_layer=cfg.norm_layer, + aa_layer=cfg.aa_layer + ) + self.feature_info = [] + + # Construct the stem + self.stem, stem_feat_info = create_csp_stem(in_chans, **asdict(cfg.stem), **layer_args) + self.feature_info.extend(stem_feat_info[:-1]) + + # Construct the stages + self.stages, stage_feat_info = create_csp_stages( + cfg, + drop_path_rate=drop_path_rate, + output_stride=output_stride, + stem_feat=stem_feat_info[-1], + ) + prev_chs = stage_feat_info[-1]['num_chs'] + self.feature_info.extend(stage_feat_info) + + # Construct the head + self.num_features = prev_chs + self.head = ClassifierHead( + in_chs=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^stages\.(\d+)\..*transition', MATCH_PREV_GROUP), # map to last block in stage + (r'^stages\.(\d+)', (0,)), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name, zero_init_last=False): + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +def _create_cspnet(variant, pretrained=False, **kwargs): + if variant.startswith('darknet') or variant.startswith('cspdarknet'): + # NOTE: DarkNet is one of few models with stride==1 features w/ 6 out_indices [0..5] + default_out_indices = (0, 1, 2, 3, 4, 5) + else: + default_out_indices = (0, 1, 2, 3, 4) + out_indices = kwargs.pop('out_indices', default_out_indices) + return build_model_with_cfg( + CspNet, variant, pretrained, + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + **kwargs) + + +@register_model +def cspresnet50(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50d(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50w(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50(pretrained=False, **kwargs): + return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) + + +@register_model +def cspdarknet53(pretrained=False, **kwargs): + return _create_cspnet('cspdarknet53', pretrained=pretrained, **kwargs) + + +@register_model +def darknet17(pretrained=False, **kwargs): + return _create_cspnet('darknet17', pretrained=pretrained, **kwargs) + + +@register_model +def darknet21(pretrained=False, **kwargs): + return _create_cspnet('darknet21', pretrained=pretrained, **kwargs) + + +@register_model +def sedarknet21(pretrained=False, **kwargs): + return _create_cspnet('sedarknet21', pretrained=pretrained, **kwargs) + + +@register_model +def darknet53(pretrained=False, **kwargs): + return _create_cspnet('darknet53', pretrained=pretrained, **kwargs) + + +@register_model +def darknetaa53(pretrained=False, **kwargs): + return _create_cspnet('darknetaa53', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_s(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_s', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_m(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_m', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_l(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_l', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_x(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_s(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_focus_s', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_m(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_focus_m', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_l(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_focus_l', pretrained=pretrained, **kwargs) + + +@register_model +def cs3darknet_focus_x(pretrained=False, **kwargs): + return _create_cspnet('cs3darknet_focus_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3sedarknet_l(pretrained=False, **kwargs): + return _create_cspnet('cs3sedarknet_l', pretrained=pretrained, **kwargs) + + +@register_model +def cs3sedarknet_x(pretrained=False, **kwargs): + return _create_cspnet('cs3sedarknet_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3sedarknet_xdw(pretrained=False, **kwargs): + return _create_cspnet('cs3sedarknet_xdw', pretrained=pretrained, **kwargs) + + +@register_model +def cs3edgenet_x(pretrained=False, **kwargs): + return _create_cspnet('cs3edgenet_x', pretrained=pretrained, **kwargs) + + +@register_model +def cs3se_edgenet_x(pretrained=False, **kwargs): + return _create_cspnet('cs3se_edgenet_x', pretrained=pretrained, **kwargs) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/deit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/deit.py new file mode 100644 index 0000000000000000000000000000000000000000..19d9e14d1420b45383829cfe00c822216994b114 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/deit.py @@ -0,0 +1,449 @@ +""" DeiT - Data-efficient Image Transformers + +DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below + +paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118 + +Modifications copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from functools import partial + +import torch +from torch import nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from custom_timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn + +from .helpers import build_model_with_cfg, checkpoint_seq +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # deit models (FB weights) + 'deit_tiny_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'), + 'deit_small_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'), + 'deit_base_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'), + 'deit_base_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'deit_tiny_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', + classifier=('head', 'head_dist')), + 'deit_small_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', + classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', + classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', + input_size=(3, 384, 384), crop_pct=1.0, + classifier=('head', 'head_dist')), + + 'deit3_small_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'), + 'deit3_small_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_medium_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'), + 'deit3_base_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'), + 'deit3_base_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_large_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'), + 'deit3_large_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_huge_patch14_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'), + + 'deit3_small_patch16_224_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth', + crop_pct=1.0), + 'deit3_small_patch16_384_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_medium_patch16_224_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth', + crop_pct=1.0), + 'deit3_base_patch16_224_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth', + crop_pct=1.0), + 'deit3_base_patch16_384_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_large_patch16_224_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth', + crop_pct=1.0), + 'deit3_large_patch16_384_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'deit3_huge_patch14_224_in21ft1k': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth', + crop_pct=1.0), +} + + +class VisionTransformerDistilled(VisionTransformer): + """ Vision Transformer w/ Distillation Token and Head + + Distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, *args, **kwargs): + weight_init = kwargs.pop('weight_init', '') + super().__init__(*args, **kwargs, weight_init='skip') + assert self.global_pool in ('token',) + + self.num_prefix_tokens = 2 + self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + self.pos_embed = nn.Parameter( + torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + trunc_normal_(self.dist_token, std=.02) + super().init_weights(mode=mode) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed|dist_token', + blocks=[ + (r'^blocks\.(\d+)', None), + (r'^norm', (99999,))] # final norm w/ last block + ) + + @torch.jit.ignore + def get_classifier(self): + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def forward_features(self, x) -> torch.Tensor: + x = self.patch_embed(x) + x = torch.cat(( + self.cls_token.expand(x.shape[0], -1, -1), + self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: + if pre_logits: + return (x[:, 0] + x[:, 1]) / 2 + x, x_dist = self.head(x[:, 0]), self.head_dist(x[:, 1]) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train / finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + +def _create_deit(variant, pretrained=False, distilled=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model_cls = VisionTransformerDistilled if distilled else VisionTransformer + model = build_model_with_cfg( + model_cls, variant, pretrained, + pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True), + **kwargs) + return model + + +@register_model +def deit_tiny_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_224(pretrained=False, **kwargs): + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_384(pretrained=False, **kwargs): + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_deit( + 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_small_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_deit( + 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_deit( + 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_384(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_deit( + 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit3_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_small_patch16_384(pretrained=False, **kwargs): + """ DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_medium_patch16_224(pretrained=False, **kwargs): + """ DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_base_patch16_224(pretrained=False, **kwargs): + """ DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_base_patch16_384(pretrained=False, **kwargs): + """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_large_patch16_224(pretrained=False, **kwargs): + """ DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_large_patch16_384(pretrained=False, **kwargs): + """ DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_huge_patch14_224(pretrained=False, **kwargs): + """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_small_patch16_224_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_small_patch16_224_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_small_patch16_384_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_small_patch16_384_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_medium_patch16_224_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_medium_patch16_224_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_base_patch16_224_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_base_patch16_224_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_base_patch16_384_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_base_patch16_384_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_large_patch16_224_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_large_patch16_224_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_large_patch16_384_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_large_patch16_384_in21ft1k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit3_huge_patch14_224_in21ft1k(pretrained=False, **kwargs): + """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). + ImageNet-21k pretrained weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6, **kwargs) + model = _create_deit('deit3_huge_patch14_224_in21ft1k', pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/densenet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..357afe0a341389787067efd66207108d15400a84 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/densenet.py @@ -0,0 +1,400 @@ +"""Pytorch Densenet implementation w/ tweaks +This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with +fixed kwargs passthrough and addition of dynamic global avg/max pool. +""" +import re +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from torch.jit.annotations import List + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, MATCH_PREV_GROUP +from .layers import BatchNormAct2d, create_norm_act_layer, BlurPool2d, create_classifier +from .registry import register_model + +__all__ = ['DenseNet'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.conv0', 'classifier': 'classifier', + } + + +default_cfgs = { + 'densenet121': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'), + 'densenet121d': _cfg(url=''), + 'densenetblur121d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'), + 'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'), + 'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'), + 'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'), + 'densenet264': _cfg(url=''), + 'densenet264d_iabn': _cfg(url=''), + 'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'), +} + + +class DenseLayer(nn.Module): + def __init__( + self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, + drop_rate=0., memory_efficient=False): + super(DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('conv1', nn.Conv2d( + num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('conv2', nn.Conv2d( + bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + return cp.checkpoint(closure, *x) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, x): # noqa: F811 + if isinstance(x, torch.Tensor): + prev_features = [x] + else: + prev_features = x + + if self.memory_efficient and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bottleneck_fn(prev_features) + + new_features = self.conv2(self.norm2(bottleneck_output)) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__( + self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=BatchNormAct2d, + drop_rate=0., memory_efficient=False): + super(DenseBlock, self).__init__() + for i in range(num_layers): + layer = DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=BatchNormAct2d, aa_layer=None): + super(DenseTransition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('conv', nn.Conv2d( + num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if aa_layer is not None: + self.add_module('pool', aa_layer(num_output_features, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__( + self, growth_rate=32, block_config=(6, 12, 24, 16), num_classes=1000, in_chans=3, global_pool='avg', + bn_size=4, stem_type='', norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, + memory_efficient=False, aa_stem_only=True): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(DenseNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type # 3x3 deep stem + num_init_features = growth_rate * 2 + if aa_layer is None: + stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + stem_pool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=num_init_features, stride=2)]) + if deep_stem: + stem_chs_1 = stem_chs_2 = growth_rate + if 'tiered' in stem_type: + stem_chs_1 = 3 * (growth_rate // 4) + stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), + ('norm0', norm_layer(stem_chs_1)), + ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), + ('norm1', norm_layer(stem_chs_2)), + ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), + ('norm2', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + self.feature_info = [ + dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] + current_stride = 4 + + # DenseBlocks + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + module_name = f'denseblock{(i + 1)}' + self.features.add_module(module_name, block) + num_features = num_features + num_layers * growth_rate + transition_aa_layer = None if aa_stem_only else aa_layer + if i != len(block_config) - 1: + self.feature_info += [ + dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] + current_stride *= 2 + trans = DenseTransition( + num_input_features=num_features, num_output_features=num_features // 2, + norm_layer=norm_layer, aa_layer=transition_aa_layer) + self.features.add_module(f'transition{i + 1}', trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] + self.num_features = num_features + + # Linear layer + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^features\.conv[012]|features\.norm[012]|features\.pool[012]', + blocks=r'^features\.(?:denseblock|transition)(\d+)' if coarse else [ + (r'^features\.denseblock(\d+)\.denselayer(\d+)', None), + (r'^features\.transition(\d+)', MATCH_PREV_GROUP) # FIXME combine with previous denselayer + ] + ) + return matcher + + @torch.jit.ignore + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + # both classifier and block drop? + # if self.drop_rate > 0.: + # x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def _filter_torchvision_pretrained(state_dict): + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + return state_dict + + +def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): + kwargs['growth_rate'] = growth_rate + kwargs['block_config'] = block_config + return build_model_with_cfg( + DenseNet, variant, pretrained, + feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, + **kwargs) + + +@register_model +def densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenetblur121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', + aa_layer=BlurPool2d, **kwargs) + return model + + +@register_model +def densenet121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', + pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet169(pretrained=False, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet201(pretrained=False, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet161(pretrained=False, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264(pretrained=False, **kwargs): + r"""Densenet-264 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264d_iabn(pretrained=False, **kwargs): + r"""Densenet-264 model with deep stem and Inplace-ABN + """ + def norm_act_fn(num_features, **kwargs): + return create_norm_act_layer('iabn', num_features, act_layer='leaky_relu', **kwargs) + model = _create_densenet( + 'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', + norm_layer=norm_act_fn, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tv_densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model with original Torchvision weights, from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'tv_densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/dla.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/dla.py new file mode 100644 index 0000000000000000000000000000000000000000..e61146e2449e6599f4e584578e0550493eb7111a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/dla.py @@ -0,0 +1,474 @@ +""" Deep Layer Aggregation and DLA w/ Res2Net +DLA original adapted from Official Pytorch impl at: +DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 + +Res2Net additions from: https://github.com/gasvn/Res2Net/ +Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['DLA'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'base_layer.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'dla34': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla34-2b83ff04.pth'), + 'dla46_c': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla46_c-9b68d685.pth'), + 'dla46x_c': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla46x_c-6bc5b5c8.pth'), + 'dla60x_c': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla60x_c-a38e054a.pth'), + 'dla60': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla60-9e91bd4d.pth'), + 'dla60x': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla60x-6818f6bb.pth'), + 'dla102': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla102-21f57b54.pth'), + 'dla102x': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla102x-7ec0aa2a.pth'), + 'dla102x2': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla102x2-ac4239c4.pth'), + 'dla169': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dla169-7c767967.pth'), + 'dla60_res2net': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'), + 'dla60_res2next': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'), +} + + +class DlaBasic(nn.Module): + """DLA Basic""" + + def __init__(self, inplanes, planes, stride=1, dilation=1, **_): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.stride = stride + + def forward(self, x, shortcut=None, children: Optional[List[torch.Tensor]] = None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Module): + """DLA/DLA-X Bottleneck""" + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.conv2 = nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, + bias=False, dilation=dilation, groups=cardinality) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottle2neck(nn.Module): + """ Res2Net/Res2NeXT DLA Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py + """ + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): + super(DlaBottle2neck, self).__init__() + self.is_first = stride > 1 + self.scale = scale + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + self.width = mid_planes + + self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes * scale) + + num_scale_convs = max(1, scale - 1) + convs = [] + bns = [] + for _ in range(num_scale_convs): + convs.append(nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, + padding=dilation, dilation=dilation, groups=cardinality, bias=False)) + bns.append(nn.BatchNorm2d(mid_planes)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None + + self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaRoot(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.shortcut = shortcut + + def forward(self, x_children: List[torch.Tensor]): + x = self.conv(torch.cat(x_children, 1)) + x = self.bn(x) + if self.shortcut: + x += x_children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Module): + def __init__( + self, levels, block, in_channels, out_channels, stride=1, dilation=1, cardinality=1, + base_width=64, level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() + self.project = nn.Identity() + cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + # NOTE the official impl/weights have project layers in levels > 1 case that are never + # used, I've moved the project layer here to avoid wasted params but old checkpoints will + # need strict=False while loading. + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(out_channels)) + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) + else: + cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) + self.tree1 = DlaTree( + levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) + self.tree2 = DlaTree( + levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) + self.root = None + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): + if children is None: + children = [] + bottom = self.downsample(x) + shortcut = self.project(bottom) + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, shortcut) + if self.root is not None: # levels == 1 + x2 = self.tree2(x1) + x = self.root([x2, x1] + children) + else: + children.append(x1) + x = self.tree2(x1, None, children) + return x + + +class DLA(nn.Module): + def __init__( + self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, global_pool='avg', + cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, drop_rate=0.0): + super(DLA, self).__init__() + self.channels = channels + self.num_classes = num_classes + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + assert output_stride == 32 # FIXME support dilation + + self.base_layer = nn.Sequential( + nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(channels[0]), + nn.ReLU(inplace=True)) + self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) + self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) + cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) + self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) + self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) + self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) + self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) + self.feature_info = [ + dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level + dict(num_chs=channels[1], reduction=2, module='level1'), + dict(num_chs=channels[2], reduction=4, module='level2'), + dict(num_chs=channels[3], reduction=8, module='level3'), + dict(num_chs=channels[4], reduction=16, module='level4'), + dict(num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2d( + inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, + padding=dilation, bias=False, dilation=dilation), + nn.BatchNorm2d(planes), + nn.ReLU(inplace=True)]) + inplanes = planes + return nn.Sequential(*modules) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^base_layer', + blocks=r'^level(\d+)' if coarse else [ + # an unusual arch, this achieves somewhat more granularity without getting super messy + (r'^level(\d+)\.tree(\d+)', None), + (r'^level(\d+)\.root', (2,)), + (r'^level(\d+)', (1,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + x = self.base_layer(x) + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + if pre_logits: + return x.flatten(1) + else: + x = self.fc(x) + return self.flatten(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_dla(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DLA, variant, pretrained, + pretrained_strict=False, + feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), + **kwargs) + + +@register_model +def dla60_res2net(pretrained=False, **kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) + return _create_dla('dla60_res2net', pretrained, **model_kwargs) + + +@register_model +def dla60_res2next(pretrained=False,**kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=8, base_width=4, **kwargs) + return _create_dla('dla60_res2next', pretrained, **model_kwargs) + + +@register_model +def dla34(pretrained=False, **kwargs): # DLA-34 + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], + block=DlaBasic, **kwargs) + return _create_dla('dla34', pretrained, **model_kwargs) + + +@register_model +def dla46_c(pretrained=False, **kwargs): # DLA-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, **kwargs) + return _create_dla('dla46_c', pretrained, **model_kwargs) + + +@register_model +def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla46x_c', pretrained, **model_kwargs) + + +@register_model +def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x_c', pretrained, **model_kwargs) + + +@register_model +def dla60(pretrained=False, **kwargs): # DLA-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, **kwargs) + return _create_dla('dla60', pretrained, **model_kwargs) + + +@register_model +def dla60x(pretrained=False, **kwargs): # DLA-X-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x', pretrained, **model_kwargs) + + +@register_model +def dla102(pretrained=False, **kwargs): # DLA-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla102', pretrained, **model_kwargs) + + +@register_model +def dla102x(pretrained=False, **kwargs): # DLA-X-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x', pretrained, **model_kwargs) + + +@register_model +def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x2', pretrained, **model_kwargs) + + +@register_model +def dla169(pretrained=False, **kwargs): # DLA-169 + model_kwargs = dict( + levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla169', pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/dpn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/dpn.py new file mode 100644 index 0000000000000000000000000000000000000000..4231735672b682fffc0577fe16578950ff3b85bb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/dpn.py @@ -0,0 +1,339 @@ +""" PyTorch implementation of DualPathNetworks +Based on original MXNet implementation https://github.com/cypw/DPNs with +many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. + +This implementation is compatible with the pretrained weights from cypw's MXNet implementation. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['DPN'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, + 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'dpn68': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth'), + 'dpn68b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'dpn92': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth'), + 'dpn98': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth'), + 'dpn131': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth'), + 'dpn107': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth') +} + + +class CatBnAct(nn.Module): + def __init__(self, in_chs, norm_layer=BatchNormAct2d): + super(CatBnAct, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + def forward(self, x): + if isinstance(x, tuple): + x = torch.cat(x, dim=1) + return self.bn(x) + + +class BnActConv2d(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): + super(BnActConv2d, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) + + def forward(self, x): + return self.conv(self.bn(x)) + + +class DualPathBlock(nn.Module): + def __init__( + self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False): + super(DualPathBlock, self).__init__() + self.num_1x1_c = num_1x1_c + self.inc = inc + self.b = b + if block_type == 'proj': + self.key_stride = 1 + self.has_proj = True + elif block_type == 'down': + self.key_stride = 2 + self.has_proj = True + else: + assert block_type == 'normal' + self.key_stride = 1 + self.has_proj = False + + self.c1x1_w_s1 = None + self.c1x1_w_s2 = None + if self.has_proj: + # Using different member names here to allow easier parameter key matching for conversion + if self.key_stride == 2: + self.c1x1_w_s2 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) + else: + self.c1x1_w_s1 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) + + self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) + self.c3x3_b = BnActConv2d( + in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) + if b: + self.c1x1_c = CatBnAct(in_chs=num_3x3_b) + self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) + self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) + else: + self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) + self.c1x1_c1 = None + self.c1x1_c2 = None + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + pass + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(x, tuple): + x_in = torch.cat(x, dim=1) + else: + x_in = x + if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: + # self.has_proj == False, torchscript requires condition on module == None + x_s1 = x[0] + x_s2 = x[1] + else: + # self.has_proj == True + if self.c1x1_w_s1 is not None: + # self.key_stride = 1 + x_s = self.c1x1_w_s1(x_in) + else: + # self.key_stride = 2 + x_s = self.c1x1_w_s2(x_in) + x_s1 = x_s[:, :self.num_1x1_c, :, :] + x_s2 = x_s[:, self.num_1x1_c:, :, :] + x_in = self.c1x1_a(x_in) + x_in = self.c3x3_b(x_in) + x_in = self.c1x1_c(x_in) + if self.c1x1_c1 is not None: + # self.b == True, using None check for torchscript compat + out1 = self.c1x1_c1(x_in) + out2 = self.c1x1_c2(x_in) + else: + out1 = x_in[:, :self.num_1x1_c, :, :] + out2 = x_in[:, self.num_1x1_c:, :, :] + resid = x_s1 + out1 + dense = torch.cat([x_s2, out2], dim=1) + return resid, dense + + +class DPN(nn.Module): + def __init__( + self, small=False, num_init_features=64, k_r=96, groups=32, global_pool='avg', + b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), output_stride=32, + num_classes=1000, in_chans=3, drop_rate=0., fc_act_layer=nn.ELU): + super(DPN, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.b = b + assert output_stride == 32 # FIXME look into dilation support + norm_layer = partial(BatchNormAct2d, eps=.001) + fc_norm_layer = partial(BatchNormAct2d, eps=.001, act_layer=fc_act_layer, inplace=False) + bw_factor = 1 if small else 4 + blocks = OrderedDict() + + # conv1 + blocks['conv1_1'] = ConvNormAct( + in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) + blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] + + # conv2 + bw = 64 * bw_factor + inc = inc_sec[0] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[0] + 1): + blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] + + # conv3 + bw = 128 * bw_factor + inc = inc_sec[1] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[1] + 1): + blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] + + # conv4 + bw = 256 * bw_factor + inc = inc_sec[2] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[2] + 1): + blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] + + # conv5 + bw = 512 * bw_factor + inc = inc_sec[3] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[3] + 1): + blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] + + blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) + + self.num_features = in_chs + self.features = nn.Sequential(blocks) + + # Using 1x1 conv for the FC layer to allow the extra pooling scheme + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^features\.conv1', + blocks=[ + (r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None), + (r'^features\.conv5_bn_ac', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + return self.features(x) + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + if pre_logits: + return x.flatten(1) + else: + x = self.classifier(x) + return self.flatten(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_dpn(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DPN, variant, pretrained, + feature_cfg=dict(feature_concat=True, flatten_sequential=True), + **kwargs) + + +@register_model +def dpn68(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn68b(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68b', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn92(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=64, k_r=96, groups=32, + k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), **kwargs) + return _create_dpn('dpn92', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn98(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=96, k_r=160, groups=40, + k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn98', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn131(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=160, groups=40, + k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn131', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn107(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=200, groups=50, + k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128), **kwargs) + return _create_dpn('dpn107', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/edgenext.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/edgenext.py new file mode 100644 index 0000000000000000000000000000000000000000..202c89ba8a9cf6c15087efd441a437e85d0ce515 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/edgenext.py @@ -0,0 +1,572 @@ +""" EdgeNeXt + +Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications` + - https://arxiv.org/abs/2206.10589 + +Original code and weights from https://github.com/mmaaz60/EdgeNeXt + +Modifications and additions for timm by / Copyright 2022, Ross Wightman +""" +import math +import torch +from collections import OrderedDict +from functools import partial +from typing import Tuple + +from torch import nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_module +from .layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, SelectAdaptivePool2d, create_conv2d +from .helpers import named_apply, build_model_with_cfg, checkpoint_seq +from .registry import register_model + + +__all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + edgenext_xx_small=_cfg( + url="https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.0/edgenext_xx_small.pth", + test_input_size=(3, 288, 288), test_crop_pct=1.0), + edgenext_x_small=_cfg( + url="https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.0/edgenext_x_small.pth", + test_input_size=(3, 288, 288), test_crop_pct=1.0), + # edgenext_small=_cfg( + # url="https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.0/edgenext_small.pth"), + edgenext_small=_cfg( # USI weights + url="https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.1/edgenext_small_usi.pth", + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), + # edgenext_base=_cfg( + # url="https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.2/edgenext_base_usi.pth"), + edgenext_base=_cfg( # USI weights + url="https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.2/edgenext_base_usi.pth", + crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), + + edgenext_small_rw=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/edgenext_small_rw-sw-b00041bb.pth', + test_input_size=(3, 320, 320), test_crop_pct=1.0, + ), +) + + +@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method +class PositionalEncodingFourier(nn.Module): + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + + def forward(self, shape: Tuple[int, int, int]): + inv_mask = ~torch.zeros(shape).to(device=self.token_projection.weight.device, dtype=torch.bool) + y_embed = inv_mask.cumsum(1, dtype=torch.float32) + x_embed = inv_mask.cumsum(2, dtype=torch.float32) + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=inv_mask.device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), + pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), + pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + + return pos + + +class ConvBlock(nn.Module): + def __init__( + self, + dim, + dim_out=None, + kernel_size=7, + stride=1, + conv_bias=True, + expand_ratio=4, + ls_init_value=1e-6, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, drop_path=0., + ): + super().__init__() + dim_out = dim_out or dim + self.shortcut_after_dw = stride > 1 or dim != dim_out + + self.conv_dw = create_conv2d( + dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) + self.norm = norm_layer(dim_out) + self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + if self.shortcut_after_dw: + shortcut = x + + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.mlp(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = shortcut + self.drop_path(x) + return x + + +class CrossCovarianceAttn(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + attn_drop=0., + proj_drop=0. + ): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) + + # NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class SplitTransposeBlock(nn.Module): + def __init__( + self, + dim, + num_scales=1, + num_heads=8, + expand_ratio=4, + use_pos_emb=True, + conv_bias=True, + qkv_bias=True, + ls_init_value=1e-6, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_path=0., + attn_drop=0., + proj_drop=0. + ): + super().__init__() + width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) + self.width = width + self.num_scales = max(1, num_scales - 1) + + convs = [] + for i in range(self.num_scales): + convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) + self.convs = nn.ModuleList(convs) + + self.pos_embd = None + if use_pos_emb: + self.pos_embd = PositionalEncodingFourier(dim=dim) + self.norm_xca = norm_layer(dim) + self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None + self.xca = CrossCovarianceAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) + + self.norm = norm_layer(dim, eps=1e-6) + self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) + self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + + # scales code re-written for torchscript as per my res2net fixes -rw + # NOTE torch.split(x, self.width, 1) causing issues with ONNX export + spx = x.chunk(len(self.convs) + 1, dim=1) + spo = [] + sp = spx[0] + for i, conv in enumerate(self.convs): + if i > 0: + sp = sp + spx[i] + sp = conv(sp) + spo.append(sp) + spo.append(spx[-1]) + x = torch.cat(spo, 1) + + # XCA + B, C, H, W = x.shape + x = x.reshape(B, C, H * W).permute(0, 2, 1) + if self.pos_embd is not None: + pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) + x = x.reshape(B, H, W, C) + + # Inverted Bottleneck + x = self.norm(x) + x = self.mlp(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = shortcut + self.drop_path(x) + return x + + +class EdgeNeXtStage(nn.Module): + def __init__( + self, + in_chs, + out_chs, + stride=2, + depth=2, + num_global_blocks=1, + num_heads=4, + scales=2, + kernel_size=7, + expand_ratio=4, + use_pos_emb=False, + downsample_block=False, + conv_bias=True, + ls_init_value=1.0, + drop_path_rates=None, + norm_layer=LayerNorm2d, + norm_layer_cl=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU + ): + super().__init__() + self.grad_checkpointing = False + + if downsample_block or stride == 1: + self.downsample = nn.Identity() + else: + self.downsample = nn.Sequential( + norm_layer(in_chs), + nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias) + ) + in_chs = out_chs + + stage_blocks = [] + for i in range(depth): + if i < depth - num_global_blocks: + stage_blocks.append( + ConvBlock( + dim=in_chs, + dim_out=out_chs, + stride=stride if downsample_block and i == 0 else 1, + conv_bias=conv_bias, + kernel_size=kernel_size, + expand_ratio=expand_ratio, + ls_init_value=ls_init_value, + drop_path=drop_path_rates[i], + norm_layer=norm_layer_cl, + act_layer=act_layer, + ) + ) + else: + stage_blocks.append( + SplitTransposeBlock( + dim=in_chs, + num_scales=scales, + num_heads=num_heads, + expand_ratio=expand_ratio, + use_pos_emb=use_pos_emb, + conv_bias=conv_bias, + ls_init_value=ls_init_value, + drop_path=drop_path_rates[i], + norm_layer=norm_layer_cl, + act_layer=act_layer, + ) + ) + in_chs = out_chs + self.blocks = nn.Sequential(*stage_blocks) + + def forward(self, x): + x = self.downsample(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class EdgeNeXt(nn.Module): + def __init__( + self, + in_chans=3, + num_classes=1000, + global_pool='avg', + dims=(24, 48, 88, 168), + depths=(3, 3, 9, 3), + global_block_counts=(0, 1, 1, 1), + kernel_sizes=(3, 5, 7, 9), + heads=(8, 8, 8, 8), + d2_scales=(2, 2, 3, 4), + use_pos_emb=(False, True, False, False), + ls_init_value=1e-6, + head_init_scale=1., + expand_ratio=4, + downsample_block=False, + conv_bias=True, + stem_type='patch', + head_norm_first=False, + act_layer=nn.GELU, + drop_path_rate=0., + drop_rate=0., + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.drop_rate = drop_rate + norm_layer = partial(LayerNorm2d, eps=1e-6) + norm_layer_cl = partial(nn.LayerNorm, eps=1e-6) + self.feature_info = [] + + assert stem_type in ('patch', 'overlap') + if stem_type == 'patch': + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), + norm_layer(dims[0]), + ) + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), + norm_layer(dims[0]), + ) + + curr_stride = 4 + stages = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + in_chs = dims[0] + for i in range(4): + stride = 2 if curr_stride == 2 or i > 0 else 1 + # FIXME support dilation / output_stride + curr_stride *= stride + stages.append(EdgeNeXtStage( + in_chs=in_chs, + out_chs=dims[i], + stride=stride, + depth=depths[i], + num_global_blocks=global_block_counts[i], + num_heads=heads[i], + drop_path_rates=dp_rates[i], + scales=d2_scales[i], + expand_ratio=expand_ratio, + kernel_size=kernel_sizes[i], + use_pos_emb=use_pos_emb[i], + ls_init_value=ls_init_value, + downsample_block=downsample_block, + conv_bias=conv_bias, + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl, + act_layer=act_layer, + )) + # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 + in_chs = dims[i] + self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.num_features = dims[-1] + self.norm_pre = norm_layer(self.num_features) if head_norm_first else nn.Identity() + self.head = nn.Sequential(OrderedDict([ + ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), + ('norm', nn.Identity() if head_norm_first else norm_layer(self.num_features)), + ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())])) + + named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.downsample', (0,)), # blocks + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm_pre', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes=0, global_pool=None): + if global_pool is not None: + self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity() + self.head.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm_pre(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + # NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :( + x = self.head.global_pool(x) + x = self.head.norm(x) + x = self.head.flatten(x) + x = self.head.drop(x) + return x if pre_logits else self.head.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name=None, head_init_scale=1.0): + if isinstance(module, nn.Conv2d): + trunc_normal_tf_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Linear): + trunc_normal_tf_(module.weight, std=.02) + nn.init.zeros_(module.bias) + if name and 'head.' in name: + module.weight.data.mul_(head_init_scale) + module.bias.data.mul_(head_init_scale) + + +def checkpoint_filter_fn(state_dict, model): + """ Remap FB checkpoints -> timm """ + if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: + return state_dict # non-FB checkpoint + + # models were released as train checkpoints... :/ + if 'model_ema' in state_dict: + state_dict = state_dict['model_ema'] + elif 'model' in state_dict: + state_dict = state_dict['model'] + elif 'state_dict' in state_dict: + state_dict = state_dict['state_dict'] + + out_dict = {} + import re + for k, v in state_dict.items(): + k = k.replace('downsample_layers.0.', 'stem.') + k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) + k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) + k = k.replace('dwconv', 'conv_dw') + k = k.replace('pwconv', 'mlp.fc') + k = k.replace('head.', 'head.fc.') + if k.startswith('norm.'): + k = k.replace('norm', 'head.norm') + if v.ndim == 2 and 'head' not in k: + model_shape = model.state_dict()[k].shape + v = v.reshape(model_shape) + out_dict[k] = v + return out_dict + + +def _create_edgenext(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + EdgeNeXt, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), + **kwargs) + return model + + +@register_model +def edgenext_xx_small(pretrained=False, **kwargs): + # 1.33M & 260.58M @ 256 resolution + # 71.23% Top-1 accuracy + # No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=51.66 versus 47.67 for MobileViT_XXS + # For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS + model_kwargs = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4), **kwargs) + return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def edgenext_x_small(pretrained=False, **kwargs): + # 2.34M & 538.0M @ 256 resolution + # 75.00% Top-1 accuracy + # No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=31.61 versus 28.49 for MobileViT_XS + # For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS + model_kwargs = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4), **kwargs) + return _create_edgenext('edgenext_x_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def edgenext_small(pretrained=False, **kwargs): + # 5.59M & 1260.59M @ 256 resolution + # 79.43% Top-1 accuracy + # AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=20.47 versus 18.86 for MobileViT_S + # For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S + model_kwargs = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304), **kwargs) + return _create_edgenext('edgenext_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def edgenext_base(pretrained=False, **kwargs): + # 18.51M & 3840.93M @ 256 resolution + # 82.5% (normal) 83.7% (USI) Top-1 accuracy + # AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler + # Jetson FPS=xx.xx versus xx.xx for MobileViT_S + # For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx + model_kwargs = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584], **kwargs) + return _create_edgenext('edgenext_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def edgenext_small_rw(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), + downsample_block=True, conv_bias=False, stem_type='overlap', **kwargs) + return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **model_kwargs) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientformer.py new file mode 100644 index 0000000000000000000000000000000000000000..0f5c71ab8766892c10d8063df055883484dc04c4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientformer.py @@ -0,0 +1,551 @@ +""" EfficientFormer + +@article{li2022efficientformer, + title={EfficientFormer: Vision Transformers at MobileNet Speed}, + author={Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov, + Sergey and Wang, Yanzhi and Ren, Jian}, + journal={arXiv preprint arXiv:2206.01191}, + year={2022} +} + +Based on Apache 2.0 licensed code at https://github.com/snap-research/EfficientFormer, Copyright (c) 2022 Snap Inc. + +Modifications and timm support by / Copyright 2022, Ross Wightman +""" +from typing import Dict + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, trunc_normal_, to_2tuple, Mlp +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, + 'crop_pct': .95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'), + **kwargs + } + + +default_cfgs = dict( + efficientformer_l1=_cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/efficientformer_l1_1000d_224-5b08fab0.pth", + ), + efficientformer_l3=_cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/efficientformer_l3_300d_224-6816624f.pth", + ), + efficientformer_l7=_cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/efficientformer_l7_300d_224-e957ab75.pth", + ), +) + +EfficientFormer_width = { + 'l1': (48, 96, 224, 448), + 'l3': (64, 128, 320, 512), + 'l7': (96, 192, 384, 768), +} + +EfficientFormer_depth = { + 'l1': (3, 2, 6, 4), + 'l3': (4, 4, 12, 6), + 'l7': (6, 6, 18, 8), +} + + +class Attention(torch.nn.Module): + attention_bias_cache: Dict[str, torch.Tensor] + + def __init__( + self, + dim=384, + key_dim=32, + num_heads=8, + attn_ratio=4, + resolution=7 + ): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.key_attn_dim = key_dim * num_heads + self.val_dim = int(attn_ratio * key_dim) + self.val_attn_dim = self.val_dim * num_heads + self.attn_ratio = attn_ratio + + self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim) + self.proj = nn.Linear(self.val_attn_dim, dim) + + resolution = to_2tuple(resolution) + pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) + rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] + self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) + self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos)) + self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.attention_bias_cache: + self.attention_bias_cache = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.attention_bias_cache: + self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.attention_bias_cache[device_key] + + def forward(self, x): # x (B,N,C) + B, N, C = x.shape + qkv = self.qkv(x) + qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + q, k, v = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn + self.get_attention_biases(x.device) + + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) + x = self.proj(x) + return x + + +class Stem4(nn.Sequential): + def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super().__init__() + self.stride = 4 + + self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1)) + self.add_module('norm1', norm_layer(out_chs // 2)) + self.add_module('act1', act_layer()) + self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1)) + self.add_module('norm2', norm_layer(out_chs)) + self.add_module('act2', act_layer()) + + +class Downsample(nn.Module): + """ + Downsampling via strided conv w/ norm + Input: tensor in shape [B, C, H, W] + Output: tensor in shape [B, C, H/stride, W/stride] + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d): + super().__init__() + if padding is None: + padding = kernel_size // 2 + self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) + self.norm = norm_layer(out_chs) + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class Flat(nn.Module): + + def __init__(self, ): + super().__init__() + + def forward(self, x): + x = x.flatten(2).transpose(1, 2) + return x + + +class Pooling(nn.Module): + """ + Implementation of pooling for PoolFormer + --pool_size: pooling size + """ + + def __init__(self, pool_size=3): + super().__init__() + self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) + + def forward(self, x): + return self.pool(x) - x + + +class ConvMlpWithNorm(nn.Module): + """ + Implementation of MLP with 1*1 convolutions. + Input: tensor with shape [B, C, H, W] + """ + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + drop=0. + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity() + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.norm1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.norm2(x) + x = self.drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class MetaBlock1d(nn.Module): + + def __init__( + self, + dim, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + drop=0., + drop_path=0., + layer_scale_init_value=1e-5 + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.token_mixer = Attention(dim) + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.ls1 = LayerScale(dim, layer_scale_init_value) + self.ls2 = LayerScale(dim, layer_scale_init_value) + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x)))) + x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class MetaBlock2d(nn.Module): + + def __init__( + self, + dim, + pool_size=3, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + drop=0., + drop_path=0., + layer_scale_init_value=1e-5 + ): + super().__init__() + self.token_mixer = Pooling(pool_size=pool_size) + self.mlp = ConvMlpWithNorm( + dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.ls1 = LayerScale2d(dim, layer_scale_init_value) + self.ls2 = LayerScale2d(dim, layer_scale_init_value) + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(x))) + x = x + self.drop_path(self.ls2(self.mlp(x))) + return x + + +class EfficientFormerStage(nn.Module): + + def __init__( + self, + dim, + dim_out, + depth, + downsample=True, + num_vit=1, + pool_size=3, + mlp_ratio=4., + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + norm_layer_cl=nn.LayerNorm, + drop=.0, + drop_path=0., + layer_scale_init_value=1e-5, +): + super().__init__() + self.grad_checkpointing = False + + if downsample: + self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer) + dim = dim_out + else: + assert dim == dim_out + self.downsample = nn.Identity() + + blocks = [] + if num_vit and num_vit >= depth: + blocks.append(Flat()) + + for block_idx in range(depth): + remain_idx = depth - block_idx - 1 + if num_vit and num_vit > remain_idx: + blocks.append( + MetaBlock1d( + dim, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + norm_layer=norm_layer_cl, + drop=drop, + drop_path=drop_path[block_idx], + layer_scale_init_value=layer_scale_init_value, + )) + else: + blocks.append( + MetaBlock2d( + dim, + pool_size=pool_size, + mlp_ratio=mlp_ratio, + act_layer=act_layer, + norm_layer=norm_layer, + drop=drop, + drop_path=drop_path[block_idx], + layer_scale_init_value=layer_scale_init_value, + )) + if num_vit and num_vit == remain_idx: + blocks.append(Flat()) + + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + x = self.downsample(x) + x = self.blocks(x) + return x + + +class EfficientFormer(nn.Module): + + def __init__( + self, + depths, + embed_dims=None, + in_chans=3, + num_classes=1000, + global_pool='avg', + downsamples=None, + num_vit=0, + mlp_ratios=4, + pool_size=3, + layer_scale_init_value=1e-5, + act_layer=nn.GELU, + norm_layer=nn.BatchNorm2d, + norm_layer_cl=nn.LayerNorm, + drop_rate=0., + drop_path_rate=0., + **kwargs + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + + self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer) + prev_dim = embed_dims[0] + + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) + stages = [] + for i in range(len(depths)): + stage = EfficientFormerStage( + prev_dim, + embed_dims[i], + depths[i], + downsample=downsamples[i], + num_vit=num_vit if i == 3 else 0, + pool_size=pool_size, + mlp_ratio=mlp_ratios, + act_layer=act_layer, + norm_layer_cl=norm_layer_cl, + norm_layer=norm_layer, + drop=drop_rate, + drop_path=dpr[i], + layer_scale_init_value=layer_scale_init_value, + ) + prev_dim = embed_dims[i] + stages.append(stage) + + self.stages = nn.Sequential(*stages) + + # Classifier head + self.num_features = embed_dims[-1] + self.norm = norm_layer_cl(self.num_features) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + # assuming model is always distilled (valid for current checkpoints, will split def if that changes) + self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + self.apply(self._init_weights) + + # init for classification + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {k for k, _ in self.named_parameters() if 'attention_biases' in k} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + if pre_logits: + return x + x, x_dist = self.head(x), self.head_dist(x) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train/finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'stem.0.weight' in state_dict: + return state_dict # non-original checkpoint, no remapping needed + + out_dict = {} + import re + stage_idx = 0 + for k, v in state_dict.items(): + if k.startswith('patch_embed'): + k = k.replace('patch_embed.0', 'stem.conv1') + k = k.replace('patch_embed.1', 'stem.norm1') + k = k.replace('patch_embed.3', 'stem.conv2') + k = k.replace('patch_embed.4', 'stem.norm2') + + if re.match(r'network\.(\d+)\.proj\.weight', k): + stage_idx += 1 + k = re.sub(r'network.(\d+).(\d+)', f'stages.{stage_idx}.blocks.\\2', k) + k = re.sub(r'network.(\d+).proj', f'stages.{stage_idx}.downsample.conv', k) + k = re.sub(r'network.(\d+).norm', f'stages.{stage_idx}.downsample.norm', k) + + k = re.sub(r'layer_scale_([0-9])', r'ls\1.gamma', k) + k = k.replace('dist_head', 'head_dist') + out_dict[k] = v + return out_dict + + +def _create_efficientformer(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + EfficientFormer, variant, pretrained, + pretrained_filter_fn=_checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def efficientformer_l1(pretrained=False, **kwargs): + model_kwargs = dict( + depths=EfficientFormer_depth['l1'], + embed_dims=EfficientFormer_width['l1'], + num_vit=1, + **kwargs) + return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **model_kwargs) + + +@register_model +def efficientformer_l3(pretrained=False, **kwargs): + model_kwargs = dict( + depths=EfficientFormer_depth['l3'], + embed_dims=EfficientFormer_width['l3'], + num_vit=4, + **kwargs) + return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **model_kwargs) + + +@register_model +def efficientformer_l7(pretrained=False, **kwargs): + model_kwargs = dict( + depths=EfficientFormer_depth['l7'], + embed_dims=EfficientFormer_width['l7'], + num_vit=8, + **kwargs) + return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **model_kwargs) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet.py new file mode 100644 index 0000000000000000000000000000000000000000..90dd9eb85dfc6ab473e48df9aacdccd73bdff22b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet.py @@ -0,0 +1,2403 @@ +""" The EfficientNet Family in PyTorch + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet-V2 + - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* TinyNet + - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 + - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch + +* And likely more... + +The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available +by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing +the models and weights open source! + +Hacked together by / Copyright 2019, Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, pretrained_cfg_for_features, checkpoint_seq +from .layers import create_conv2d, create_classifier, get_norm_act_layer, EvoNorm2dS0, GroupNormAct +from .registry import register_model + +__all__ = ['EfficientNet', 'EfficientNetFeatures'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mnasnet_050': _cfg(url=''), + 'mnasnet_075': _cfg(url=''), + 'mnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'), + 'mnasnet_140': _cfg(url=''), + + 'semnasnet_050': _cfg(url=''), + 'semnasnet_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth'), + 'semnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'), + 'semnasnet_140': _cfg(url=''), + 'mnasnet_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth'), + + 'mobilenetv2_035': _cfg( + url=''), + 'mobilenetv2_050': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', + interpolation='bicubic', + ), + 'mobilenetv2_075': _cfg( + url=''), + 'mobilenetv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'), + 'mobilenetv2_110d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'), + 'mobilenetv2_120d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'), + 'mobilenetv2_140': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'), + + 'fbnetc_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + interpolation='bilinear'), + 'spnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + interpolation='bilinear'), + + # NOTE experimenting with alternate attention + 'efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'), + 'efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + test_input_size=(3, 256, 256), crop_pct=1.0), + 'efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), + 'efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', + input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), + 'efficientnet_b5': _cfg( + url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'efficientnet_b6': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + # FIXME experimental + 'efficientnet_b0_gn': _cfg( + url=''), + 'efficientnet_b0_g8_gn': _cfg( + url=''), + 'efficientnet_b0_g16_evos': _cfg( + url=''), + 'efficientnet_b3_gn': _cfg( + url='', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b3_g8_gn': _cfg( + url='', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + + 'efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'), + 'efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_es_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth'), + 'efficientnet_el_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e': _cfg(url=''), + 'efficientnet_cc_b0_8e': _cfg(url=''), + 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth'), + 'efficientnet_lite1': _cfg( + url='', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2': _cfg( + url='', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3': _cfg( + url='', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'gc_efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'efficientnetv2_rw_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_rw_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + + 'efficientnetv2_s': _cfg( + url='', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_m': _cfg( + url='', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + 'efficientnetv2_l': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'efficientnetv2_xl': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2_ns_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'tf_efficientnetv2_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', + input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), + 'tf_efficientnetv2_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', + input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), + 'tf_efficientnetv2_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', + input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), + 'tf_efficientnetv2_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + + 'mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'), + 'mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'), + 'mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'), + 'mixnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'), + 'mixnet_xxl': _cfg(), + + 'tf_mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'), + 'tf_mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), + 'tf_mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), + + "tinynet_a": _cfg( + input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth'), + "tinynet_b": _cfg( + input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth'), + "tinynet_c": _cfg( + input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth'), + "tinynet_d": _cfg( + input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth'), + "tinynet_e": _cfg( + input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth'), +} + + +class EfficientNet(nn.Module): + """ EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * MobileNet-V2 + * FBNet C + * Single-Path NAS Pixel1 + * TinyNet + """ + + def __init__( + self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, + output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, + se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(EfficientNet, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_act_layer(stem_size, inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) + self.bn2 = norm_act_layer(self.num_features, inplace=True) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.global_pool]) + layers.extend([nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^conv_stem|bn1', + blocks=[ + (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), + (r'conv_head|bn2', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__( + self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(EfficientNetFeatures, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_act_layer(stem_size, inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, + feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_effnet(variant, pretrained=False, **kwargs): + features_only = False + model_cls = EfficientNet + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') + model_cls = EfficientNetFeatures + model = build_model_with_cfg( + model_cls, variant, pretrained, + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = pretrained_cfg_for_features(model.default_cfg) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, + group_size=None, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_edge( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'swish'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + act_layer=resolve_act_layer(kwargs, 'relu6'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_base( + variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 base model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r2_k3_s2_e4_c32'], + ['er_r2_k3_s2_e4_c48'], + ['ir_r3_k3_s2_e4_c96_se0.25'], + ['ir_r5_k3_s1_e6_c112_se0.25'], + ['ir_r8_k3_s2_e6_c192_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_s( + variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Small model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + + NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, + before ref the impl was released. + """ + arch_def = [ + ['cn_r2_k3_s1_e1_c24_skip'], + ['er_r4_k3_s2_e4_c48'], + ['er_r4_k3_s2_e4_c64'], + ['ir_r6_k3_s2_e4_c128_se0.25'], + ['ir_r9_k3_s1_e6_c160_se0.25'], + ['ir_r15_k3_s2_e6_c256_se0.25'], + ] + num_features = 1280 + if rw: + # my original variant, based on paper figure differs from the official release + arch_def[0] = ['er_r2_k3_s1_e1_c24'] + arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] + num_features = 1792 + + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), + num_features=round_chs_fn(num_features), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Medium model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r3_k3_s1_e1_c24_skip'], + ['er_r5_k3_s2_e4_c48'], + ['er_r5_k3_s2_e4_c80'], + ['ir_r7_k3_s2_e4_c160_se0.25'], + ['ir_r14_k3_s1_e6_c176_se0.25'], + ['ir_r18_k3_s2_e6_c304_se0.25'], + ['ir_r5_k3_s1_e6_c512_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r7_k3_s2_e4_c64'], + ['er_r7_k3_s2_e4_c96'], + ['ir_r10_k3_s2_e4_c192_se0.25'], + ['ir_r19_k3_s1_e6_c224_se0.25'], + ['ir_r25_k3_s2_e6_c384_se0.25'], + ['ir_r7_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Xtra-Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r8_k3_s2_e4_c64'], + ['er_r8_k3_s2_e4_c96'], + ['ir_r16_k3_s2_e4_c192_se0.25'], + ['ir_r24_k3_s1_e6_c256_se0.25'], + ['ir_r32_k3_s2_e6_c512_se0.25'], + ['ir_r8_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_tinynet( + variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs +): + """Creates a TinyNet model. + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=max(1280, round_channels(1280, model_width, 8, None)), + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=model_width), + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +@register_model +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +@register_model +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_035(pretrained=False, **kwargs): + """ MobileNet V2 w/ 0.35 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_050(pretrained=False, **kwargs): + """ MobileNet V2 w/ 0.5 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_075(pretrained=False, **kwargs): + """ MobileNet V2 w/ 0.75 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2a(pretrained=False, **kwargs): + """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b2(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3a(pretrained=False, **kwargs): + """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b3(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +# FIXME experimental group cong / GroupNorm / EvoNorm experiments +@register_model +def efficientnet_b0_gn(pretrained=False, **kwargs): + """ EfficientNet-B0 + GroupNorm""" + model = _gen_efficientnet( + 'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0_g8_gn(pretrained=False, **kwargs): + """ EfficientNet-B0 w/ group conv + GroupNorm""" + model = _gen_efficientnet( + 'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0_g16_evos(pretrained=False, **kwargs): + """ EfficientNet-B0 w/ group 16 conv + EvoNorm""" + model = _gen_efficientnet( + 'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, + pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16), + return model + + +@register_model +def efficientnet_b3_gn(pretrained=False, **kwargs): + """ EfficientNet-B3 w/ GroupNorm """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, + norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_g8_gn(pretrained=False, **kwargs): + """ EfficientNet-B3 w/ grouped conv + BN""" + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, + norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_el_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1_pruned(pretrained=False, **kwargs): + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2_pruned(pretrained=False, **kwargs): + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_pruned(pretrained=False, **kwargs): + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) + return model + + +@register_model +def gc_efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, + rw=False, se_layer='gc', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small (RW variant). + NOTE: This is my initial (pre official code release) w/ some differences. + See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding + """ + model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium (RW variant). + """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. """ + model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. """ + model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. """ + model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_xl(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. """ + model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + + +@register_model +def tf_efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b0(pretrained=False, **kwargs): + """ EfficientNet-V2-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b1(pretrained=False, **kwargs): + """ EfficientNet-V2-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b2(pretrained=False, **kwargs): + """ EfficientNet-V2-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b3(pretrained=False, **kwargs): + """ EfficientNet-V2-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_a(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_b(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_c(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_d(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_e(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet_blocks.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..34a317571c99132cbd6c00561f1eaf9699eabaff --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet_blocks.py @@ -0,0 +1,281 @@ +""" EfficientNet, MobileNetV3, etc Blocks + +Hacked together by / Copyright 2019, Ross Wightman +""" +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from .layers import create_conv2d, DropPath, make_divisible, create_act_layer, get_norm_act_layer + +__all__ = [ + 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual'] + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +class SqueezeExcite(nn.Module): + """ Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family + + Args: + in_chs (int): input channels to layer + rd_ratio (float): ratio of squeeze reduction + act_layer (nn.Module): activation layer of containing block + gate_layer (Callable): attention gate function + force_act_layer (nn.Module): override block's activation fn if this is set/bound + rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs + """ + + def __init__( + self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, + gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None): + super(SqueezeExcite, self).__init__() + if rd_channels is None: + rd_round_fn = rd_round_fn or round + rd_channels = rd_round_fn(in_chs * rd_ratio) + act_layer = force_act_layer or act_layer + self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class ConvBnAct(nn.Module): + """ Conv + Norm Layer + Activation w/ optional skip connection + """ + def __init__( + self, in_chs, out_chs, kernel_size, stride=1, dilation=1, group_size=0, pad_type='', + skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.): + super(ConvBnAct, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + groups = num_groups(group_size, in_chs) + self.has_skip = skip and stride == 1 and in_chs == out_chs + + self.conv = create_conv2d( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type) + self.bn1 = norm_act_layer(out_chs, inplace=True) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # output of conv after act, same as block coutput + return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck', block output + return dict(module='', hook_type='', num_chs=self.conv.out_channels) + + def forward(self, x): + shortcut = x + x = self.conv(x) + x = self.bn1(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='', + noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + se_layer=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + groups = num_groups(group_size, in_chs) + self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, groups=groups) + self.bn1 = norm_act_layer(in_chs, inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + x = self.bn1(x) + x = self.se(x) + x = self.conv_pw(x) + x = self.bn2(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE + + Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often + referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in + * MNasNet - https://arxiv.org/abs/1807.11626 + * EfficientNet - https://arxiv.org/abs/1905.11946 + * MobileNet-V3 - https://arxiv.org/abs/1905.02244 + """ + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + groups = num_groups(group_size, mid_chs) + self.has_skip = (in_chs == out_chs and stride == 1) and not noskip + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_act_layer(mid_chs, inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + groups=groups, padding=pad_type, **conv_kwargs) + self.bn2 = norm_act_layer(mid_chs, inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_act_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + + def forward(self, x): + shortcut = x + x = self.conv_pw(x) + x = self.bn1(x) + x = self.conv_dw(x) + x = self.bn2(x) + x = self.se(x) + x = self.conv_pwl(x) + x = self.bn3(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, group_size=group_size, + pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + shortcut = x + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.se(x) + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x + + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride + + Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` + - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + + This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers + * MobileDet - https://arxiv.org/abs/2004.14525 + * EfficientNet-X - https://arxiv.org/abs/2102.05610 + * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 + """ + + def __init__( + self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, group_size=0, pad_type='', + force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.): + super(EdgeResidual, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + if force_in_chs > 0: + mid_chs = make_divisible(force_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + groups = num_groups(group_size, in_chs) + self.has_skip = (in_chs == out_chs and stride == 1) and not noskip + + # Expansion convolution + self.conv_exp = create_conv2d( + in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type) + self.bn1 = norm_act_layer(mid_chs, inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_act_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, before PWL + return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + return dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + + def forward(self, x): + shortcut = x + x = self.conv_exp(x) + x = self.bn1(x) + x = self.se(x) + x = self.conv_pwl(x) + x = self.bn2(x) + if self.has_skip: + x = self.drop_path(x) + shortcut + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet_builder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..67d15a8692dc99d735c94b37505f3c01b2c29fea --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/efficientnet_builder.py @@ -0,0 +1,477 @@ +""" EfficientNet, MobileNetV3, etc Builder + +Assembles EfficieNet and related network feature blocks from string definitions. +Handles stride, dilation calculations, and selects feature extraction points. + +Hacked together by / Copyright 2019, Ross Wightman +""" + +import logging +import math +import re +from copy import deepcopy +from functools import partial + +import torch.nn as nn + +from .efficientnet_blocks import * +from .layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible + +__all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights", + 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] + +_logger = logging.getLogger(__name__) + + +_DEBUG_BUILDER = False + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def resolve_act_layer(kwargs, default='relu'): + return get_act_layer(kwargs.pop('act_layer', default)) + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) + + +def _log_info_if(msg, condition): + if condition: + _logger.info(msg) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + skip = None + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + skip = False # force no skip connection + elif op == 'skip': + skip = True # force a skip connection + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') # aka SiLU + elif v == 'mi': + value = get_act_layer('mish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + num_repeat = int(options['r']) + + # each type of block has different valid arguments, fill accordingly + block_args = dict( + block_type=block_type, + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + if block_type == 'ir': + block_args.update(dict( + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else 0., + noskip=skip is False, + )) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args.update(dict( + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + se_ratio=float(options['se']) if 'se' in options else 0., + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or skip is False, + )) + elif block_type == 'er': + block_args.update(dict( + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + exp_ratio=float(options['e']), + force_in_chs=force_in_chs, + se_ratio=float(options['se']) if 'se' in options else 0., + noskip=skip is False, + )) + elif block_type == 'cn': + block_args.update(dict( + kernel_size=int(options['k']), + skip=skip is True, + )) + else: + assert False, 'Unknown block type (%s)' % block_type + if 'gs' in options: + block_args['group_size'] = options['gs'] + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def( + arch_def, + depth_multiplier=1.0, + depth_trunc='ceil', + experts_multiplier=1, + fix_first_last=False, + group_size=None, +): + """ Decode block architecture definition strings -> block kwargs + + Args: + arch_def: architecture definition strings, list of list of strings + depth_multiplier: network depth multiplier + depth_trunc: networ depth truncation mode when applying multiplier + experts_multiplier: CondConv experts multiplier + fix_first_last: fix first and last block depths when multiplier is applied + group_size: group size override for all blocks that weren't explicitly set in arch string + + Returns: + list of list of block kwargs + """ + arch_args = [] + if isinstance(depth_multiplier, tuple): + assert len(depth_multiplier) == len(arch_def) + else: + depth_multiplier = (depth_multiplier,) * len(arch_def) + for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + if group_size is not None: + ba.setdefault('group_size', group_size) + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False, + act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''): + self.output_stride = output_stride + self.pad_type = pad_type + self.round_chs_fn = round_chs_fn + self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs + self.act_layer = act_layer + self.norm_layer = norm_layer + self.se_layer = get_attn(se_layer) + try: + self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg + self.se_has_ratio = True + except TypeError: + self.se_has_ratio = False + self.drop_path_rate = drop_path_rate + if feature_location == 'depthwise': + # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense + _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") + feature_location = 'expansion' + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'expansion', '') + self.verbose = _DEBUG_BUILDER + + # state updated during build, consumed by model + self.in_chs = None + self.features = [] + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self.round_chs_fn(ba['out_chs']) + if 'force_in_chs' in ba and ba['force_in_chs']: + # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl + ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + ba['norm_layer'] = self.norm_layer + ba['drop_path_rate'] = drop_path_rate + if bt != 'cn': + se_ratio = ba.pop('se_ratio') + if se_ratio and self.se_layer is not None: + if not self.se_from_exp: + # adjust se_ratio by expansion ratio if calculating se channels from block input + se_ratio /= ba.get('exp_ratio', 1.0) + if self.se_has_ratio: + ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) + else: + ba['se_layer'] = self.se_layer + + if bt == 'ir': + _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = EdgeResidual(**ba) + elif bt == 'cn': + _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + stages = [] + if model_block_args[0][0]['stride'] > 1: + # if the first block starts with a stride, we need to extract first level feat from stem + feature_info = dict( + module='act1', num_chs=in_chs, stage=0, reduction=current_stride, + hook_type='forward' if self.feature_location != 'bottleneck' else '') + self.features.append(feature_info) + + # outer list of block_args defines the stacks + for stack_idx, stack_args in enumerate(model_block_args): + last_stack = stack_idx + 1 == len(model_block_args) + _log_info_if('Stack: {}'.format(stack_idx), self.verbose) + assert isinstance(stack_args, list) + + blocks = [] + # each stack (stage of blocks) contains a list of block arguments + for block_idx, block_args in enumerate(stack_args): + last_block = block_idx + 1 == len(stack_args) + _log_info_if(' Block: {}'.format(block_idx), self.verbose) + + assert block_args['stride'] in (1, 2) + if block_idx >= 1: # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + extract_features = False + if last_block: + next_stack_idx = stack_idx + 1 + extract_features = next_stack_idx >= len(model_block_args) or \ + model_block_args[next_stack_idx][0]['stride'] > 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride), self.verbose) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = dict( + stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location)) + module_name = f'blocks.{stack_idx}.{block_idx}' + leaf_name = feature_info.get('module', '') + feature_info['module'] = '.'.join([module_name, leaf_name]) if leaf_name else module_name + self.features.append(feature_info) + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + nn.init.uniform_(m.weight, -init_range, init_range) + nn.init.zeros_(m.bias) + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/factory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..f7a8fd9cddf04633d6f5160dd1e2e96bab4737ad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/factory.py @@ -0,0 +1,76 @@ +from urllib.parse import urlsplit, urlunsplit +import os + +from .registry import is_model, is_model_in_modules, model_entrypoint +from .helpers import load_checkpoint +from .layers import set_layer_config +from .hub import load_model_config_from_hf + + +def parse_model_name(model_name): + model_name = model_name.replace('hf_hub', 'hf-hub') # NOTE for backwards compat, to deprecate hf_hub use + parsed = urlsplit(model_name) + assert parsed.scheme in ('', 'timm', 'hf-hub') + if parsed.scheme == 'hf-hub': + # FIXME may use fragment as revision, currently `@` in URI path + return parsed.scheme, parsed.path + else: + model_name = os.path.split(parsed.path)[-1] + return 'timm', model_name + + +def safe_model_name(model_name, remove_source=True): + def make_safe(name): + return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') + if remove_source: + model_name = parse_model_name(model_name)[-1] + return make_safe(model_name) + + +def create_model( + model_name, + pretrained=False, + pretrained_cfg=None, + checkpoint_path='', + scriptable=None, + exportable=None, + no_jit=None, + **kwargs): + """Create a model + + Args: + model_name (str): name of model to instantiate + pretrained (bool): load pretrained ImageNet-1k weights if true + checkpoint_path (str): path of checkpoint to load after model is initialized + scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet) + exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet) + no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only) + + Keyword Args: + drop_rate (float): dropout rate for training (default: 0.0) + global_pool (str): global pool type (default: 'avg') + **: other kwargs are model specific + """ + # Parameters that aren't supported by all models or are intended to only override model defaults if set + # should default to None in command line args/cfg. Remove them if they are present and not set so that + # non-supporting models don't break and default args remain in effect. + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + model_source, model_name = parse_model_name(model_name) + if model_source == 'hf-hub': + # FIXME hf-hub source overrides any passed in pretrained_cfg, warn? + # For model names specified in the form `hf-hub:path/architecture_name@revision`, + # load model weights + pretrained_cfg from Hugging Face hub. + pretrained_cfg, model_name = load_model_config_from_hf(model_name) + + if not is_model(model_name): + raise RuntimeError('Unknown model (%s)' % model_name) + + create_fn = model_entrypoint(model_name) + with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): + model = create_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg, **kwargs) + + if checkpoint_path: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/features.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/features.py new file mode 100644 index 0000000000000000000000000000000000000000..0bc46419d16f9759221a39061f4eb34e76aa6efd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/features.py @@ -0,0 +1,284 @@ +""" PyTorch Feature Extraction Helpers + +A collection of classes, functions, modules to help extract features from models +and provide a common interface for describing them. + +The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter +https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict, defaultdict +from copy import deepcopy +from functools import partial +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn + + +class FeatureInfo: + + def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]): + prev_reduction = 1 + for fi in feature_info: + # sanity check the mandatory fields, there may be additional fields depending on the model + assert 'num_chs' in fi and fi['num_chs'] > 0 + assert 'reduction' in fi and fi['reduction'] >= prev_reduction + prev_reduction = fi['reduction'] + assert 'module' in fi + self.out_indices = out_indices + self.info = feature_info + + def from_other(self, out_indices: Tuple[int]): + return FeatureInfo(deepcopy(self.info), out_indices) + + def get(self, key, idx=None): + """ Get value by key at specified index (indices) + if idx == None, returns value for key at each output index + if idx is an integer, return value for that feature module index (ignoring output indices) + if idx is a list/tupple, return value for each module index (ignoring output indices) + """ + if idx is None: + return [self.info[i][key] for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i][key] for i in idx] + else: + return self.info[idx][key] + + def get_dicts(self, keys=None, idx=None): + """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) + """ + if idx is None: + if keys is None: + return [self.info[i] for i in self.out_indices] + else: + return [{k: self.info[i][k] for k in keys} for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] + else: + return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} + + def channels(self, idx=None): + """ feature channels accessor + """ + return self.get('num_chs', idx) + + def reduction(self, idx=None): + """ feature reduction (output stride) accessor + """ + return self.get('reduction', idx) + + def module_name(self, idx=None): + """ feature module name accessor + """ + return self.get('module', idx) + + def __getitem__(self, item): + return self.info[item] + + def __len__(self): + return len(self.info) + + +class FeatureHooks: + """ Feature Hook Helper + + This module helps with the setup and extraction of hooks for extracting features from + internal nodes in a model by node name. This works quite well in eager Python but needs + redesign for torchscript. + """ + + def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'): + # setup feature hooks + modules = {k: v for k, v in named_modules} + for i, h in enumerate(hooks): + hook_name = h['module'] + m = modules[hook_name] + hook_id = out_map[i] if out_map else hook_name + hook_fn = partial(self._collect_output_hook, hook_id) + hook_type = h.get('hook_type', default_hook_type) + if hook_type == 'forward_pre': + m.register_forward_pre_hook(hook_fn) + elif hook_type == 'forward': + m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._feature_outputs = defaultdict(OrderedDict) + + def _collect_output_hook(self, hook_id, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][hook_id] = x + + def get_output(self, device) -> Dict[str, torch.tensor]: + output = self._feature_outputs[device] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output + + +def _module_list(module, flatten_sequential=False): + # a yield/iter would be better for this but wouldn't be compatible with torchscript + ml = [] + for name, module in module.named_children(): + if flatten_sequential and isinstance(module, nn.Sequential): + # first level of Sequential containers is flattened into containing model + for child_name, child_module in module.named_children(): + combined = [name, child_name] + ml.append(('_'.join(combined), '.'.join(combined), child_module)) + else: + ml.append((name, name, module)) + return ml + + +def _get_feature_info(net, out_indices): + feature_info = getattr(net, 'feature_info') + if isinstance(feature_info, FeatureInfo): + return feature_info.from_other(out_indices) + elif isinstance(feature_info, (list, tuple)): + return FeatureInfo(net.feature_info, out_indices) + else: + assert False, "Provided feature_info is not valid" + + +def _get_return_layers(feature_info, out_map): + module_names = feature_info.module_name() + return_layers = {} + for i, name in enumerate(module_names): + return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] + return return_layers + + +class FeatureDictNet(nn.ModuleDict): + """ Feature extractor with OrderedDict return + + Wrap a model and extract features as specified by the out indices, the network is + partially re-built from contained modules. + + There is a strong assumption that the modules have been registered into the model in the same + order as they are used. There should be no reuse of the same nn.Module more than once, including + trivial modules like `self.relu = nn.ReLU`. + + Only submodules that are directly assigned to the model class (`model.feature1`) or at most + one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. + All Sequential containers that are directly assigned to the original model will have their + modules assigned to this module with the name `model.features.1` being changed to `model.features_1` + + Arguments: + model (nn.Module): model from which we will extract the features + out_indices (tuple[int]): model output indices to extract features for + out_map (sequence): list or tuple specifying desired return id for each out index, + otherwise str(index) is used + feature_concat (bool): whether to concatenate intermediate features that are lists or tuples + vs select element [0] + flatten_sequential (bool): whether to flatten sequential modules assigned to model + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureDictNet, self).__init__() + self.feature_info = _get_feature_info(model, out_indices) + self.concat = feature_concat + self.return_layers = {} + return_layers = _get_return_layers(self.feature_info, out_map) + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = set(return_layers.keys()) + layers = OrderedDict() + for new_name, old_name, module in modules: + layers[new_name] = module + if old_name in remaining: + # return id has to be consistently str type for torchscript + self.return_layers[new_name] = str(return_layers[old_name]) + remaining.remove(old_name) + if not remaining: + break + assert not remaining and len(self.return_layers) == len(return_layers), \ + f'Return layers ({remaining}) are not present in model' + self.update(layers) + + def _collect(self, x) -> (Dict[str, torch.Tensor]): + out = OrderedDict() + for name, module in self.items(): + x = module(x) + if name in self.return_layers: + out_id = self.return_layers[name] + if isinstance(x, (tuple, list)): + # If model tap is a tuple or list, concat or select first element + # FIXME this may need to be more generic / flexible for some nets + out[out_id] = torch.cat(x, 1) if self.concat else x[0] + else: + out[out_id] = x + return out + + def forward(self, x) -> Dict[str, torch.Tensor]: + return self._collect(x) + + +class FeatureListNet(FeatureDictNet): + """ Feature extractor with list return + + See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints. + In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool. + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureListNet, self).__init__( + model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, + flatten_sequential=flatten_sequential) + + def forward(self, x) -> (List[torch.Tensor]): + return list(self._collect(x).values()) + + +class FeatureHookNet(nn.ModuleDict): + """ FeatureHookNet + + Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. + + If `no_rewrite` is True, features are extracted via hooks without modifying the underlying + network in any way. + + If `no_rewrite` is False, the model will be re-written as in the + FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. + + FIXME this does not currently work with Torchscript, see FeatureHooks class + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, + feature_concat=False, flatten_sequential=False, default_hook_type='forward'): + super(FeatureHookNet, self).__init__() + assert not torch.jit.is_scripting() + self.feature_info = _get_feature_info(model, out_indices) + self.out_as_dict = out_as_dict + layers = OrderedDict() + hooks = [] + if no_rewrite: + assert not flatten_sequential + if hasattr(model, 'reset_classifier'): # make sure classifier is removed? + model.reset_classifier(0) + layers['body'] = model + hooks.extend(self.feature_info.get_dicts()) + else: + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type + for f in self.feature_info.get_dicts()} + for new_name, old_name, module in modules: + layers[new_name] = module + for fn, fm in module.named_modules(prefix=old_name): + if fn in remaining: + hooks.append(dict(module=fn, hook_type=remaining[fn])) + del remaining[fn] + if not remaining: + break + assert not remaining, f'Return layers ({remaining}) are not present in model' + self.update(layers) + self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) + + def forward(self, x): + for name, module in self.items(): + x = module(x) + out = self.hooks.get_output(x.device) + return out if self.out_as_dict else list(out.values()) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/fx_features.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/fx_features.py new file mode 100644 index 0000000000000000000000000000000000000000..4fadcbf2ed9447496c744db95af84e697e527a4b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/fx_features.py @@ -0,0 +1,106 @@ +""" PyTorch FX Based Feature Extraction Helpers +Using https://pytorch.org/vision/stable/feature_extraction.html +""" +from typing import Callable, List, Dict, Union, Type + +import torch +from torch import nn + +from .features import _get_feature_info + +try: + from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor + has_fx_feature_extraction = True +except ImportError: + has_fx_feature_extraction = False + +# Layers we went to treat as leaf modules +from .layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame +from .layers.non_local_attn import BilinearAttnTransform +from .layers.pool2d_same import MaxPool2dSame, AvgPool2dSame + +# NOTE: By default, any modules from custom_timm.models.layers that we want to treat as leaf modules go here +# BUT modules from custom_timm.models should use the registration mechanism below +_leaf_modules = { + BilinearAttnTransform, # reason: flow control t <= 1 + # Reason: get_same_padding has a max which raises a control flow error + Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame, + CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]) +} + +try: + from .layers import InplaceAbn + _leaf_modules.add(InplaceAbn) +except ImportError: + pass + + +def register_notrace_module(module: Type[nn.Module]): + """ + Any module not under timm.models.layers should get this decorator if we don't want to trace through it. + """ + _leaf_modules.add(module) + return module + + +# Functions we want to autowrap (treat them as leaves) +_autowrap_functions = set() + + +def register_notrace_function(func: Callable): + """ + Decorator for functions which ought not to be traced through + """ + _autowrap_functions.add(func) + return func + + +def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]): + assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' + return _create_feature_extractor( + model, return_nodes, + tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)} + ) + + +class FeatureGraphNet(nn.Module): + """ A FX Graph based feature extractor that works with the model feature_info metadata + """ + def __init__(self, model, out_indices, out_map=None): + super().__init__() + assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' + self.feature_info = _get_feature_info(model, out_indices) + if out_map is not None: + assert len(out_map) == len(out_indices) + return_nodes = { + info['module']: out_map[i] if out_map is not None else info['module'] + for i, info in enumerate(self.feature_info) if i in out_indices} + self.graph_module = create_feature_extractor(model, return_nodes) + + def forward(self, x): + return list(self.graph_module(x).values()) + + +class GraphExtractNet(nn.Module): + """ A standalone feature extraction wrapper that maps dict -> list or single tensor + NOTE: + * one can use feature_extractor directly if dictionary output is desired + * unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info + metadata for builtin feature extraction mode + * create_feature_extractor can be used directly if dictionary output is acceptable + + Args: + model: model to extract features from + return_nodes: node names to return features from (dict or list) + squeeze_out: if only one output, and output in list format, flatten to single tensor + """ + def __init__(self, model, return_nodes: Union[Dict[str, str], List[str]], squeeze_out: bool = True): + super().__init__() + self.squeeze_out = squeeze_out + self.graph_module = create_feature_extractor(model, return_nodes) + + def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]: + out = list(self.graph_module(x).values()) + if self.squeeze_out and len(out) == 1: + return out[0] + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gcvit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gcvit.py new file mode 100644 index 0000000000000000000000000000000000000000..e8984dfe2b60b1e574ed42458bce292ce8bf1fe2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gcvit.py @@ -0,0 +1,592 @@ +""" Global Context ViT + +From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py + +Global Context Vision Transformers -https://arxiv.org/abs/2206.09959 + +@article{hatamizadeh2022global, + title={Global Context Vision Transformers}, + author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo}, + journal={arXiv preprint arXiv:2206.09959}, + year={2022} +} + +Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit. +The license for this code release is Apache 2.0 with no commercial restrictions. + +However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license +(https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones... + +Hacked together by / Copyright 2022, Ross Wightman +""" +import math +from functools import partial +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply +from .layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d,\ + get_attn, get_act_layer, get_norm_layer, _assert +from .registry import register_model +from .vision_transformer_relpos import RelPosMlp, RelPosBias # FIXME move to common location + +__all__ = ['GlobalContextVit'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + 'fixed_input_size': True, + **kwargs + } + + +default_cfgs = { + 'gcvit_xxtiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'), + 'gcvit_xtiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'), + 'gcvit_tiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'), + 'gcvit_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'), + 'gcvit_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'), +} + + +class MbConvBlock(nn.Module): + """ A depthwise separable / fused mbconv style residual block with SE, `no norm. + """ + def __init__( + self, + in_chs, + out_chs=None, + expand_ratio=1.0, + attn_layer='se', + bias=False, + act_layer=nn.GELU, + ): + super().__init__() + attn_kwargs = dict(act_layer=act_layer) + if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca': + attn_kwargs['rd_ratio'] = 0.25 + attn_kwargs['bias'] = False + attn_layer = get_attn(attn_layer) + out_chs = out_chs or in_chs + mid_chs = int(expand_ratio * in_chs) + + self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias) + self.act = act_layer() + self.se = attn_layer(mid_chs, **attn_kwargs) + self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias) + + def forward(self, x): + shortcut = x + x = self.conv_dw(x) + x = self.act(x) + x = self.se(x) + x = self.conv_pw(x) + x = x + shortcut + return x + + +class Downsample2d(nn.Module): + def __init__( + self, + dim, + dim_out=None, + reduction='conv', + act_layer=nn.GELU, + norm_layer=LayerNorm2d, # NOTE in NCHW + ): + super().__init__() + dim_out = dim_out or dim + + self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity() + self.conv_block = MbConvBlock(dim, act_layer=act_layer) + assert reduction in ('conv', 'max', 'avg') + if reduction == 'conv': + self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False) + elif reduction == 'max': + assert dim == dim_out + self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + assert dim == dim_out + self.reduction = nn.AvgPool2d(kernel_size=2) + self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity() + + def forward(self, x): + x = self.norm1(x) + x = self.conv_block(x) + x = self.reduction(x) + x = self.norm2(x) + return x + + +class FeatureBlock(nn.Module): + def __init__( + self, + dim, + levels=0, + reduction='max', + act_layer=nn.GELU, + ): + super().__init__() + reductions = levels + levels = max(1, levels) + if reduction == 'avg': + pool_fn = partial(nn.AvgPool2d, kernel_size=2) + else: + pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1) + self.blocks = nn.Sequential() + for i in range(levels): + self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer)) + if reductions: + self.blocks.add_module(f'pool{i+1}', pool_fn()) + reductions -= 1 + + def forward(self, x): + return self.blocks(x) + + +class Stem(nn.Module): + def __init__( + self, + in_chs: int = 3, + out_chs: int = 96, + act_layer: Callable = nn.GELU, + norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW + ): + super().__init__() + self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1) + self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer) + + def forward(self, x): + x = self.conv1(x) + x = self.down(x) + return x + + +class WindowAttentionGlobal(nn.Module): + + def __init__( + self, + dim: int, + num_heads: int, + window_size: Tuple[int, int], + use_global: bool = True, + qkv_bias: bool = True, + attn_drop: float = 0., + proj_drop: float = 0., + ): + super().__init__() + window_size = to_2tuple(window_size) + self.window_size = window_size + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + self.use_global = use_global + + self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads) + if self.use_global: + self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias) + else: + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, q_global: Optional[torch.Tensor] = None): + B, N, C = x.shape + if self.use_global and q_global is not None: + _assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal') + + kv = self.qkv(x) + kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + + q = q_global.repeat(B // q_global.shape[0], 1, 1, 1) + q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) + else: + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + q = q * self.scale + + attn = (q @ k.transpose(-2, -1)) + attn = self.rel_pos(attn) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +def window_partition(x, window_size: Tuple[int, int]): + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): + H, W = img_size + B = int(windows.shape[0] / (H * W / window_size[0] / window_size[1])) + x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class GlobalContextVitBlock(nn.Module): + def __init__( + self, + dim: int, + feat_size: Tuple[int, int], + num_heads: int, + window_size: int = 7, + mlp_ratio: float = 4., + use_global: bool = True, + qkv_bias: bool = True, + layer_scale: Optional[float] = None, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + attn_layer: Callable = WindowAttentionGlobal, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + ): + super().__init__() + feat_size = to_2tuple(feat_size) + window_size = to_2tuple(window_size) + self.window_size = window_size + self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1])) + + self.norm1 = norm_layer(dim) + self.attn = attn_layer( + dim, + num_heads=num_heads, + window_size=window_size, + use_global=use_global, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop, + ) + self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) + self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _window_attn(self, x, q_global: Optional[torch.Tensor] = None): + B, H, W, C = x.shape + x_win = window_partition(x, self.window_size) + x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C) + attn_win = self.attn(x_win, q_global) + x = window_reverse(attn_win, self.window_size, (H, W)) + return x + + def forward(self, x, q_global: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class GlobalContextVitStage(nn.Module): + def __init__( + self, + dim, + depth: int, + num_heads: int, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + downsample: bool = True, + global_norm: bool = False, + stage_norm: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + layer_scale: Optional[float] = None, + proj_drop: float = 0., + attn_drop: float = 0., + drop_path: Union[List[float], float] = 0.0, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + norm_layer_cl: Callable = LayerNorm2d, + ): + super().__init__() + if downsample: + self.downsample = Downsample2d( + dim=dim, + dim_out=dim * 2, + norm_layer=norm_layer, + ) + dim = dim * 2 + feat_size = (feat_size[0] // 2, feat_size[1] // 2) + else: + self.downsample = nn.Identity() + self.feat_size = feat_size + window_size = to_2tuple(window_size) + + feat_levels = int(math.log2(min(feat_size) / min(window_size))) + self.global_block = FeatureBlock(dim, feat_levels) + self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity() + + self.blocks = nn.ModuleList([ + GlobalContextVitBlock( + dim=dim, + num_heads=num_heads, + feat_size=feat_size, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + use_global=(i % 2 != 0), + layer_scale=layer_scale, + proj_drop=proj_drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + act_layer=act_layer, + norm_layer=norm_layer_cl, + ) + for i in range(depth) + ]) + self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity() + self.dim = dim + self.feat_size = feat_size + self.grad_checkpointing = False + + def forward(self, x): + # input NCHW, downsample & global block are 2d conv + pooling + x = self.downsample(x) + global_query = self.global_block(x) + + # reshape NCHW --> NHWC for transformer blocks + x = x.permute(0, 2, 3, 1) + global_query = self.global_norm(global_query.permute(0, 2, 3, 1)) + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x, global_query) + x = self.norm(x) + x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW + return x + + +class GlobalContextVit(nn.Module): + def __init__( + self, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + img_size: Tuple[int, int] = 224, + window_ratio: Tuple[int, ...] = (32, 32, 16, 32), + window_size: Tuple[int, ...] = None, + embed_dim: int = 64, + depths: Tuple[int, ...] = (3, 4, 19, 5), + num_heads: Tuple[int, ...] = (2, 4, 8, 16), + mlp_ratio: float = 3.0, + qkv_bias: bool = True, + layer_scale: Optional[float] = None, + drop_rate: float = 0., + proj_drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + weight_init='', + act_layer: str = 'gelu', + norm_layer: str = 'layernorm2d', + norm_layer_cl: str = 'layernorm', + norm_eps: float = 1e-5, + ): + super().__init__() + act_layer = get_act_layer(act_layer) + norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) + norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) + + img_size = to_2tuple(img_size) + feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4 + self.global_pool = global_pool + self.num_classes = num_classes + self.drop_rate = drop_rate + num_stages = len(depths) + self.num_features = int(embed_dim * 2 ** (num_stages - 1)) + if window_size is not None: + window_size = to_ntuple(num_stages)(window_size) + else: + assert window_ratio is not None + window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)]) + + self.stem = Stem( + in_chs=in_chans, + out_chs=embed_dim, + act_layer=act_layer, + norm_layer=norm_layer + ) + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + stages = [] + for i in range(num_stages): + last_stage = i == num_stages - 1 + stage_scale = 2 ** max(i - 1, 0) + stages.append(GlobalContextVitStage( + dim=embed_dim * stage_scale, + depth=depths[i], + num_heads=num_heads[i], + feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale), + window_size=window_size[i], + downsample=i != 0, + stage_norm=last_stage, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + layer_scale=layer_scale, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + act_layer=act_layer, + norm_layer=norm_layer, + norm_layer_cl=norm_layer_cl, + )) + self.stages = nn.Sequential(*stages) + + # Classifier head + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + if weight_init: + named_apply(partial(self._init_weights, scheme=weight_init), self) + + def _init_weights(self, module, name, scheme='vit'): + # note Conv2d left as default init + if scheme == 'vit': + if isinstance(module, nn.Linear): + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + else: + if isinstance(module, nn.Linear): + nn.init.normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + + @torch.jit.ignore + def no_weight_decay(self): + return { + k for k, _ in self.named_parameters() + if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=r'^stages\.(\d+)' + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is None: + global_pool = self.head.global_pool.pool_type + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.stem(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_gcvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs) + return model + + +@register_model +def gcvit_xxtiny(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(2, 2, 6, 2), + num_heads=(2, 4, 8, 16), + **kwargs) + return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_xtiny(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 6, 5), + num_heads=(2, 4, 8, 16), + **kwargs) + return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_tiny(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 19, 5), + num_heads=(2, 4, 8, 16), + **kwargs) + return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_small(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 19, 5), + num_heads=(3, 6, 12, 24), + embed_dim=96, + mlp_ratio=2, + layer_scale=1e-5, + **kwargs) + return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def gcvit_base(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 19, 5), + num_heads=(4, 8, 16, 32), + embed_dim=128, + mlp_ratio=2, + layer_scale=1e-5, + **kwargs) + return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/ghostnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/ghostnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f31127dd86409b5fe2e9b54036e72a0a938da09f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/ghostnet.py @@ -0,0 +1,302 @@ +""" +An implementation of GhostNet Model as defined in: +GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 +The train script of the model is similar to that of MobileNetV3 +Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import SelectAdaptivePool2d, Linear, make_divisible +from .efficientnet_blocks import SqueezeExcite, ConvBnAct +from .helpers import build_model_with_cfg, checkpoint_seq +from .registry import register_model + + +__all__ = ['GhostNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'ghostnet_050': _cfg(url=''), + 'ghostnet_100': _cfg( + url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'), + 'ghostnet_130': _cfg(url=''), +} + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class GhostModule(nn.Module): + def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): + super(GhostModule, self).__init__() + self.oup = oup + init_channels = math.ceil(oup / ratio) + new_channels = init_channels * (ratio - 1) + + self.primary_conv = nn.Sequential( + nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), + nn.BatchNorm2d(init_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False), + nn.BatchNorm2d(new_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.oup, :, :] + + +class GhostBottleneck(nn.Module): + """ Ghost bottleneck w/ optional SE""" + + def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, + stride=1, act_layer=nn.ReLU, se_ratio=0.): + super(GhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = GhostModule(mid_chs, out_chs, relu=False) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class GhostNet(nn.Module): + def __init__( + self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2): + super(GhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + block = GhostBottleneck + stage_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio)) + prev_chs = out_chs + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + # FIXME init + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv_stem|bn1', + blocks=[ + (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), + (r'conv_head', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + return x + + def forward_head(self, x): + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a GhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 16, 16, 0, 1]], + # stage2 + [[3, 48, 24, 0, 2]], + [[3, 72, 24, 0, 1]], + # stage3 + [[5, 72, 40, 0.25, 2]], + [[5, 120, 40, 0.25, 1]], + # stage4 + [[3, 240, 80, 0, 2]], + [[3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 0.25, 1], + [3, 672, 112, 0.25, 1] + ], + # stage5 + [[5, 672, 160, 0.25, 2]], + [[5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + GhostNet, variant, pretrained, + feature_cfg=dict(flatten_sequential=True), + **model_kwargs) + + +@register_model +def ghostnet_050(pretrained=False, **kwargs): + """ GhostNet-0.5x """ + model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_100(pretrained=False, **kwargs): + """ GhostNet-1.0x """ + model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_130(pretrained=False, **kwargs): + """ GhostNet-1.3x """ + model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gluon_resnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gluon_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f24eb3e682bc09df9434ba3bdf0248f303095f6f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gluon_resnet.py @@ -0,0 +1,245 @@ +"""Pytorch impl of MxNet Gluon ResNet/(SE)ResNeXt variants +This file evolved from https://github.com/pytorch/vision 'resnet.py' with (SE)-ResNeXt additions +and ports of Gluon variations (https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnet.py) +by Ross Wightman +""" + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SEModule +from .registry import register_model +from .resnet import ResNet, Bottleneck, BasicBlock + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'gluon_resnet18_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), + 'gluon_resnet34_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), + 'gluon_resnet50_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), + 'gluon_resnet101_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), + 'gluon_resnet152_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), + 'gluon_resnet50_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', + first_conv='conv1.0'), + 'gluon_resnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), + 'gluon_resnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), + 'gluon_resnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), + 'gluon_seresnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), + 'gluon_seresnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), + 'gluon_seresnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), + 'gluon_senet154': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', + first_conv='conv1.0'), +} + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +@register_model +def gluon_resnet18_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('gluon_resnet18_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet34_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet34_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet50_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('gluon_resnet101_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('gluon_resnet152_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet50_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet101_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet152_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnet101_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1s', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt50-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_64x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-64x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_senet154(pretrained=False, **kwargs): + """Constructs an SENet-154 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_senet154', pretrained, **model_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gluon_xception.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gluon_xception.py new file mode 100644 index 0000000000000000000000000000000000000000..809251b28dbecf867169010ac962a5fb5ca09e8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/gluon_xception.py @@ -0,0 +1,267 @@ +"""Pytorch impl of Gluon Xception +This is a port of the Gluon Xception code and weights, itself ported from a PyTorch DeepLab impl. + +Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html) +Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier, get_padding +from .registry import register_model + +__all__ = ['Xception65'] + +default_cfgs = { + 'gluon_xception65': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth', + 'input_size': (3, 299, 299), + 'crop_pct': 0.903, + 'pool_size': (10, 10), + 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + }, +} + +""" PADDING NOTES +The original PyTorch and Gluon impl of these models dutifully reproduced the +aligned padding added to Tensorflow models for Deeplab. This padding was compensating +for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to. +""" + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + padding = get_padding(kernel_size, stride, dilation) + self.conv_dw = nn.Conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(num_features=inplanes) + # pointwise convolution + self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias) + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn(x) + x = self.conv_pw(x) + return x + + +class Block(nn.Module): + def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True, norm_layer=None): + super(Block, self).__init__() + if isinstance(planes, (list, tuple)): + assert len(planes) == 3 + else: + planes = (planes,) * 3 + outplanes = planes[-1] + + if outplanes != inplanes or stride != 1: + self.skip = nn.Sequential() + self.skip.add_module('conv1', nn.Conv2d( + inplanes, outplanes, 1, stride=stride, bias=False)), + self.skip.add_module('bn1', norm_layer(num_features=outplanes)) + else: + self.skip = None + + rep = OrderedDict() + for i in range(3): + rep['act%d' % (i + 1)] = nn.ReLU(inplace=True) + rep['conv%d' % (i + 1)] = SeparableConv2d( + inplanes, planes[i], 3, stride=stride if i == 2 else 1, dilation=dilation, norm_layer=norm_layer) + rep['bn%d' % (i + 1)] = norm_layer(planes[i]) + inplanes = planes[i] + + if not start_with_relu: + del rep['act1'] + else: + rep['act1'] = nn.ReLU(inplace=False) + self.rep = nn.Sequential(rep) + + def forward(self, x): + skip = x + if self.skip is not None: + skip = self.skip(skip) + x = self.rep(x) + skip + return x + + +class Xception65(nn.Module): + """Modified Aligned Xception. + + NOTE: only the 65 layer version is included here, the 71 layer variant + was not correct and had no pretrained weights + """ + + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, + drop_rate=0., global_pool='avg'): + super(Xception65, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_dilation = 1 + exit_dilation = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_dilation = 1 + exit_dilation = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_dilation = 2 + exit_dilation = (2, 4) + else: + raise NotImplementedError + + # Entry flow + self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = norm_layer(num_features=32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = norm_layer(num_features=64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block1_act = nn.ReLU(inplace=True) + self.block2 = Block(128, 256, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block3 = Block(256, 728, stride=entry_block3_stride, norm_layer=norm_layer) + + # Middle flow + self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( + 728, 728, stride=1, dilation=middle_dilation, norm_layer=norm_layer)) for i in range(4, 20)])) + + # Exit flow + self.block20 = Block( + 728, (728, 1024, 1024), stride=exit_block20_stride, dilation=exit_dilation[0], norm_layer=norm_layer) + self.block20_act = nn.ReLU(inplace=True) + + self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn3 = norm_layer(num_features=1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn4 = norm_layer(num_features=1536) + self.act4 = nn.ReLU(inplace=True) + + self.num_features = 2048 + self.conv5 = SeparableConv2d( + 1536, self.num_features, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn5 = norm_layer(num_features=self.num_features) + self.act5 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block1_act'), + dict(num_chs=256, reduction=8, module='block3.rep.act1'), + dict(num_chs=728, reduction=16, module='block20.rep.act1'), + dict(num_chs=2048, reduction=32, module='act5'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv[12]|bn[12]', + blocks=[ + (r'^mid\.block(\d+)', None), + (r'^block(\d+)', None), + (r'^conv[345]|bn[345]', (99,)), + ], + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "gradient checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block1_act(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.mid(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.block20_act(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.act5(x) + return x + + def forward_head(self, x): + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_gluon_xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception65, variant, pretrained, + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def gluon_xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + return _create_gluon_xception('gluon_xception65', pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hardcorenas.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hardcorenas.py new file mode 100644 index 0000000000000000000000000000000000000000..e53134b3235feffe24fedbe451e1680cbcfed27e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hardcorenas.py @@ -0,0 +1,151 @@ +from functools import partial + +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels +from .helpers import build_model_with_cfg, pretrained_cfg_for_features +from .layers import get_act_fn +from .mobilenetv3 import MobileNetV3, MobileNetV3Features +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hardcorenas_a': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/hardcorenas_a_green_38ms_75_9-31dc7186.pth'), + 'hardcorenas_b': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/hardcorenas_b_green_40ms_76_5-32d91ff2.pth'), + 'hardcorenas_c': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/hardcorenas_c_green_44ms_77_1-631a0983.pth'), + 'hardcorenas_d': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/hardcorenas_d_green_50ms_77_4-998d9d7a.pth'), + 'hardcorenas_e': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/hardcorenas_e_green_55ms_77_9-482886a3.pth'), + 'hardcorenas_f': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/hardcorenas_f_green_60ms_78_1-14b9e780.pth'), +} + + +def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): + """Creates a hardcorenas model + + Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS + Paper: https://arxiv.org/abs/2102.11646 + + """ + num_features = 1280 + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=32, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=se_layer, + **kwargs, + ) + + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if model_kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = pretrained_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hardcorenas_a(pretrained=False, **kwargs): + """ hardcorenas_A """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_b(pretrained=False, **kwargs): + """ hardcorenas_B """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], + ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_c(pretrained=False, **kwargs): + """ hardcorenas_C """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', + 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_d(pretrained=False, **kwargs): + """ hardcorenas_D """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], + ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_e(pretrained=False, **kwargs): + """ hardcorenas_E """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', + 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_f(pretrained=False, **kwargs): + """ hardcorenas_F """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k3_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/helpers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..d68c7e6541ae5f39af0d962ff3b453e4b0c266c4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/helpers.py @@ -0,0 +1,796 @@ +""" Model creation / weight loading / state_dict helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import collections.abc +import logging +import math +import os +import re +from collections import OrderedDict, defaultdict +from copy import deepcopy +from itertools import chain +from typing import Any, Callable, Optional, Tuple, Dict, Union + +import torch +import torch.nn as nn +from torch.hub import load_state_dict_from_url +from torch.utils.checkpoint import checkpoint + +from .features import FeatureListNet, FeatureDictNet, FeatureHookNet +from .fx_features import FeatureGraphNet +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf +from .layers import Conv2dSame, Linear, BatchNormAct2d +from .registry import get_pretrained_cfg + + +_logger = logging.getLogger(__name__) + + +# Global variables for rarely used pretrained checkpoint download progress and hash check. +# Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle. +_DOWNLOAD_PROGRESS = False +_CHECK_HASH = False + + +def clean_state_dict(state_dict): + # 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training + cleaned_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] if k.startswith('module.') else k + cleaned_state_dict[name] = v + return cleaned_state_dict + + +def load_state_dict(checkpoint_path, use_ema=True): + if checkpoint_path and os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict_key = '' + if isinstance(checkpoint, dict): + if use_ema and checkpoint.get('state_dict_ema', None) is not None: + state_dict_key = 'state_dict_ema' + elif use_ema and checkpoint.get('model_ema', None) is not None: + state_dict_key = 'model_ema' + elif 'state_dict' in checkpoint: + state_dict_key = 'state_dict' + elif 'model' in checkpoint: + state_dict_key = 'model' + state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint) + _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_checkpoint(model, checkpoint_path, use_ema=True, strict=True): + if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): + # numpy checkpoint, try to load via model specific load_pretrained fn + if hasattr(model, 'load_pretrained'): + model.load_pretrained(checkpoint_path) + else: + raise NotImplementedError('Model cannot load numpy checkpoint') + return + state_dict = load_state_dict(checkpoint_path, use_ema) + incompatible_keys = model.load_state_dict(state_dict, strict=strict) + return incompatible_keys + + +def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True): + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + if log_info: + _logger.info('Restoring model state from checkpoint...') + state_dict = clean_state_dict(checkpoint['state_dict']) + model.load_state_dict(state_dict) + + if optimizer is not None and 'optimizer' in checkpoint: + if log_info: + _logger.info('Restoring optimizer state from checkpoint...') + optimizer.load_state_dict(checkpoint['optimizer']) + + if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: + if log_info: + _logger.info('Restoring AMP loss scaler state from checkpoint...') + loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) + + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + + if log_info: + _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + if log_info: + _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return resume_epoch + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def _resolve_pretrained_source(pretrained_cfg): + cfg_source = pretrained_cfg.get('source', '') + pretrained_url = pretrained_cfg.get('url', None) + pretrained_file = pretrained_cfg.get('file', None) + hf_hub_id = pretrained_cfg.get('hf_hub_id', None) + # resolve where to load pretrained weights from + load_from = '' + pretrained_loc = '' + if cfg_source == 'hf-hub' and has_hf_hub(necessary=True): + # hf-hub specified as source via model identifier + load_from = 'hf-hub' + assert hf_hub_id + pretrained_loc = hf_hub_id + else: + # default source == timm or unspecified + if pretrained_file: + load_from = 'file' + pretrained_loc = pretrained_file + elif pretrained_url: + load_from = 'url' + pretrained_loc = pretrained_url + elif hf_hub_id and has_hf_hub(necessary=True): + # hf-hub available as alternate weight source in default_cfg + load_from = 'hf-hub' + pretrained_loc = hf_hub_id + if load_from == 'hf-hub' and 'hf_hub_filename' in pretrained_cfg: + # if a filename override is set, return tuple for location w/ (hub_id, filename) + pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename'] + return load_from, pretrained_loc + + +def set_pretrained_download_progress(enable=True): + """ Set download progress for pretrained weights on/off (globally). """ + global _DOWNLOAD_PROGRESS + _DOWNLOAD_PROGRESS = enable + + +def set_pretrained_check_hash(enable=True): + """ Set hash checking for pretrained weights on/off (globally). """ + global _CHECK_HASH + _CHECK_HASH = enable + + +def load_custom_pretrained( + model: nn.Module, + pretrained_cfg: Optional[Dict] = None, + load_fn: Optional[Callable] = None, +): + r"""Loads a custom (read non .pth) weight file + + Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls + a passed in custom load fun, or the `load_pretrained` model member fn. + + If the object is already present in `model_dir`, it's deserialized and returned. + The default value of `model_dir` is ``/checkpoints`` where + `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + model: The instantiated model to load weights into + pretrained_cfg (dict): Default pretrained model cfg + load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named + 'laod_pretrained' on the model will be called if it exists + """ + pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) or {} + load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) + if not load_from: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + if load_from == 'hf-hub': # FIXME + _logger.warning("Hugging Face hub not currently supported for custom load pretrained models.") + elif load_from == 'url': + pretrained_loc = download_cached_file(pretrained_loc, check_hash=_CHECK_HASH, progress=_DOWNLOAD_PROGRESS) + + if load_fn is not None: + load_fn(model, pretrained_loc) + elif hasattr(model, 'load_pretrained'): + model.load_pretrained(pretrained_loc) + else: + _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") + + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained( + model: nn.Module, + pretrained_cfg: Optional[Dict] = None, + num_classes: int = 1000, + in_chans: int = 3, + filter_fn: Optional[Callable] = None, + strict: bool = True, +): + """ Load pretrained checkpoint + + Args: + model (nn.Module) : PyTorch model module + pretrained_cfg (Optional[Dict]): configuration for pretrained weights / target dataset + num_classes (int): num_classes for model + in_chans (int): in_chans for model + filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) + strict (bool): strict load of checkpoint + + """ + pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None) or {} + load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg) + if load_from == 'file': + _logger.info(f'Loading pretrained weights from file ({pretrained_loc})') + state_dict = load_state_dict(pretrained_loc) + elif load_from == 'url': + _logger.info(f'Loading pretrained weights from url ({pretrained_loc})') + state_dict = load_state_dict_from_url( + pretrained_loc, map_location='cpu', progress=_DOWNLOAD_PROGRESS, check_hash=_CHECK_HASH) + elif load_from == 'hf-hub': + _logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})') + if isinstance(pretrained_loc, (list, tuple)): + state_dict = load_state_dict_from_hf(*pretrained_loc) + else: + state_dict = load_state_dict_from_hf(pretrained_loc) + else: + _logger.warning("No pretrained weights exist or were found for this model. Using random initialization.") + return + + if filter_fn is not None: + # for backwards compat with filter fn that take one arg, try one first, the two + try: + state_dict = filter_fn(state_dict) + except TypeError: + state_dict = filter_fn(state_dict, model) + + input_convs = pretrained_cfg.get('first_conv', None) + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs,) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) + _logger.info( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + _logger.warning( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') + + classifiers = pretrained_cfg.get('classifier', None) + label_offset = pretrained_cfg.get('label_offset', 0) + if classifiers is not None: + if isinstance(classifiers, str): + classifiers = (classifiers,) + if num_classes != pretrained_cfg['num_classes']: + for classifier_name in classifiers: + # completely discard fully connected if model num_classes doesn't match pretrained weights + state_dict.pop(classifier_name + '.weight', None) + state_dict.pop(classifier_name + '.bias', None) + strict = False + elif label_offset > 0: + for classifier_name in classifiers: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + model.load_state_dict(state_dict, strict=strict) + + +def extract_layer(model, layer): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + if not hasattr(model, 'module') and layer[0] == 'module': + layer = layer[1:] + for l in layer: + if hasattr(module, l): + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + else: + return module + return module + + +def set_layer(model, layer, val): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + lst_index = 0 + module2 = module + for l in layer: + if hasattr(module2, l): + if not l.isdigit(): + module2 = getattr(module2, l) + else: + module2 = module2[int(l)] + lst_index += 1 + lst_index -= 1 + for l in layer[:lst_index]: + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + l = layer[lst_index] + setattr(module, l, val) + + +def adapt_model_from_string(parent_module, model_string): + separator = '***' + state_dict = {} + lst_shape = model_string.split(separator) + for k in lst_shape: + k = k.split(':') + key = k[0] + shape = k[1][1:-1].split(',') + if shape[0] != '': + state_dict[key] = [int(i) for i in shape] + + new_module = deepcopy(parent_module) + for n, m in parent_module.named_modules(): + old_module = extract_layer(parent_module, n) + if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): + if isinstance(old_module, Conv2dSame): + conv = Conv2dSame + else: + conv = nn.Conv2d + s = state_dict[n + '.weight'] + in_channels = s[1] + out_channels = s[0] + g = 1 + if old_module.groups > 1: + in_channels = out_channels + g = in_channels + new_conv = conv( + in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, + bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, + groups=g, stride=old_module.stride) + set_layer(new_module, n, new_conv) + elif isinstance(old_module, BatchNormAct2d): + new_bn = BatchNormAct2d( + state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + new_bn.drop = old_module.drop + new_bn.act = old_module.act + set_layer(new_module, n, new_bn) + elif isinstance(old_module, nn.BatchNorm2d): + new_bn = nn.BatchNorm2d( + num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + set_layer(new_module, n, new_bn) + elif isinstance(old_module, nn.Linear): + # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer? + num_features = state_dict[n + '.weight'][1] + new_fc = Linear( + in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) + set_layer(new_module, n, new_fc) + if hasattr(new_module, 'num_features'): + new_module.num_features = num_features + new_module.eval() + parent_module.eval() + + return new_module + + +def adapt_model_from_file(parent_module, model_variant): + adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt') + with open(adapt_file, 'r') as f: + return adapt_model_from_string(parent_module, f.read().strip()) + + +def pretrained_cfg_for_features(pretrained_cfg): + pretrained_cfg = deepcopy(pretrained_cfg) + # remove default pretrained cfg fields that don't have much relevance for feature backbone + to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size? + for tr in to_remove: + pretrained_cfg.pop(tr, None) + return pretrained_cfg + + +def set_default_kwargs(kwargs, names, pretrained_cfg): + for n in names: + # for legacy reasons, model __init__args uses img_size + in_chans as separate args while + # pretrained_cfg has one input_size=(C, H ,W) entry + if n == 'img_size': + input_size = pretrained_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[-2:]) + elif n == 'in_chans': + input_size = pretrained_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[0]) + else: + default_val = pretrained_cfg.get(n, None) + if default_val is not None: + kwargs.setdefault(n, pretrained_cfg[n]) + + +def filter_kwargs(kwargs, names): + if not kwargs or not names: + return + for n in names: + kwargs.pop(n, None) + + +def update_pretrained_cfg_and_kwargs(pretrained_cfg, kwargs, kwargs_filter): + """ Update the default_cfg and kwargs before passing to model + + Args: + pretrained_cfg: input pretrained cfg (updated in-place) + kwargs: keyword args passed to model build fn (updated in-place) + kwargs_filter: keyword arg keys that must be removed before model __init__ + """ + # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) + default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') + if pretrained_cfg.get('fixed_input_size', False): + # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size + default_kwarg_names += ('img_size',) + set_default_kwargs(kwargs, names=default_kwarg_names, pretrained_cfg=pretrained_cfg) + # Filter keyword args for task specific model variants (some 'features only' models, etc.) + filter_kwargs(kwargs, names=kwargs_filter) + + +def resolve_pretrained_cfg(variant: str, pretrained_cfg=None): + if pretrained_cfg and isinstance(pretrained_cfg, dict): + # highest priority, pretrained_cfg available and passed as arg + return deepcopy(pretrained_cfg) + # fallback to looking up pretrained cfg in model registry by variant identifier + pretrained_cfg = get_pretrained_cfg(variant) + if not pretrained_cfg: + _logger.warning( + f"No pretrained configuration specified for {variant} model. Using a default." + f" Please add a config to the model pretrained_cfg registry or pass explicitly.") + pretrained_cfg = dict( + url='', + num_classes=1000, + input_size=(3, 224, 224), + pool_size=None, + crop_pct=.9, + interpolation='bicubic', + first_conv='', + classifier='', + ) + return pretrained_cfg + + +def build_model_with_cfg( + model_cls: Callable, + variant: str, + pretrained: bool, + pretrained_cfg: Optional[Dict] = None, + model_cfg: Optional[Any] = None, + feature_cfg: Optional[Dict] = None, + pretrained_strict: bool = True, + pretrained_filter_fn: Optional[Callable] = None, + pretrained_custom_load: bool = False, + kwargs_filter: Optional[Tuple[str]] = None, + **kwargs): + """ Build model with specified default_cfg and optional model_cfg + + This helper fn aids in the construction of a model including: + * handling default_cfg and associated pretrained weight loading + * passing through optional model_cfg for models with config based arch spec + * features_only model adaptation + * pruning config / model adaptation + + Args: + model_cls (nn.Module): model class + variant (str): model variant name + pretrained (bool): load pretrained weights + pretrained_cfg (dict): model's pretrained weight/task config + model_cfg (Optional[Dict]): model's architecture config + feature_cfg (Optional[Dict]: feature extraction adapter config + pretrained_strict (bool): load pretrained weights strictly + pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights + pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights + kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model + **kwargs: model args passed through to model __init__ + """ + pruned = kwargs.pop('pruned', False) + features = False + feature_cfg = feature_cfg or {} + + # resolve and update model pretrained config and model kwargs + pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=pretrained_cfg) + update_pretrained_cfg_and_kwargs(pretrained_cfg, kwargs, kwargs_filter) + pretrained_cfg.setdefault('architecture', variant) + + # Setup for feature extraction wrapper done at end of this fn + if kwargs.pop('features_only', False): + features = True + feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) + if 'out_indices' in kwargs: + feature_cfg['out_indices'] = kwargs.pop('out_indices') + + # Build the model + model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs) + model.pretrained_cfg = pretrained_cfg + model.default_cfg = model.pretrained_cfg # alias for backwards compat + + if pruned: + model = adapt_model_from_file(model, variant) + + # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats + num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) + if pretrained: + if pretrained_custom_load: + # FIXME improve custom load trigger + load_custom_pretrained(model, pretrained_cfg=pretrained_cfg) + else: + load_pretrained( + model, + pretrained_cfg=pretrained_cfg, + num_classes=num_classes_pretrained, + in_chans=kwargs.get('in_chans', 3), + filter_fn=pretrained_filter_fn, + strict=pretrained_strict) + + # Wrap the model in a feature extraction module if enabled + if features: + feature_cls = FeatureListNet + if 'feature_cls' in feature_cfg: + feature_cls = feature_cfg.pop('feature_cls') + if isinstance(feature_cls, str): + feature_cls = feature_cls.lower() + if 'hook' in feature_cls: + feature_cls = FeatureHookNet + elif feature_cls == 'fx': + feature_cls = FeatureGraphNet + else: + assert False, f'Unknown feature class {feature_cls}' + model = feature_cls(model, **feature_cfg) + model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back default_cfg + model.default_cfg = model.pretrained_cfg # alias for backwards compat + + return model + + +def model_parameters(model, exclude_head=False): + if exclude_head: + # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering + return [p for p in model.parameters()][:-2] + else: + return model.parameters() + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +def named_modules(module: nn.Module, name='', depth_first=True, include_root=False): + if not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + yield name, module + + +def named_modules_with_params(module: nn.Module, name='', depth_first=True, include_root=False): + if module._parameters and not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules_with_params( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if module._parameters and depth_first and include_root: + yield name, module + + +MATCH_PREV_GROUP = (99999,) + + +def group_with_matcher( + named_objects, + group_matcher: Union[Dict, Callable], + output_values: bool = False, + reverse: bool = False +): + if isinstance(group_matcher, dict): + # dictionary matcher contains a dict of raw-string regex expr that must be compiled + compiled = [] + for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()): + if mspec is None: + continue + # map all matching specifications into 3-tuple (compiled re, prefix, suffix) + if isinstance(mspec, (tuple, list)): + # multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix) + for sspec in mspec: + compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])] + else: + compiled += [(re.compile(mspec), (group_ordinal,), None)] + group_matcher = compiled + + def _get_grouping(name): + if isinstance(group_matcher, (list, tuple)): + for match_fn, prefix, suffix in group_matcher: + r = match_fn.match(name) + if r: + parts = (prefix, r.groups(), suffix) + # map all tuple elem to int for numeric sort, filter out None entries + return tuple(map(float, chain.from_iterable(filter(None, parts)))) + return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal + else: + ord = group_matcher(name) + if not isinstance(ord, collections.abc.Iterable): + return ord, + return tuple(ord) + + # map layers into groups via ordinals (ints or tuples of ints) from matcher + grouping = defaultdict(list) + for k, v in named_objects: + grouping[_get_grouping(k)].append(v if output_values else k) + + # remap to integers + layer_id_to_param = defaultdict(list) + lid = -1 + for k in sorted(filter(lambda x: x is not None, grouping.keys())): + if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]: + lid += 1 + layer_id_to_param[lid].extend(grouping[k]) + + if reverse: + assert not output_values, "reverse mapping only sensible for name output" + # output reverse mapping + param_to_layer_id = {} + for lid, lm in layer_id_to_param.items(): + for n in lm: + param_to_layer_id[n] = lid + return param_to_layer_id + + return layer_id_to_param + + +def group_parameters( + module: nn.Module, + group_matcher, + output_values=False, + reverse=False, +): + return group_with_matcher( + module.named_parameters(), group_matcher, output_values=output_values, reverse=reverse) + + +def group_modules( + module: nn.Module, + group_matcher, + output_values=False, + reverse=False, +): + return group_with_matcher( + named_modules_with_params(module), group_matcher, output_values=output_values, reverse=reverse) + + +def checkpoint_seq( + functions, + x, + every=1, + flatten=False, + skip_last=False, + preserve_rng_state=True +): + r"""A helper function for checkpointing sequential models. + + Sequential models execute a list of modules/functions in order + (sequentially). Therefore, we can divide such a sequence into segments + and checkpoint each segment. All segments except run in :func:`torch.no_grad` + manner, i.e., not storing the intermediate activations. The inputs of each + checkpointed segment will be saved for re-running the segment in the backward pass. + + See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. + + .. warning:: + Checkpointing currently only supports :func:`torch.autograd.backward` + and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` + is not supported. + + .. warning: + At least one of the inputs needs to have :code:`requires_grad=True` if + grads are needed for model inputs, otherwise the checkpointed part of the + model won't have gradients. + + Args: + functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially. + x: A Tensor that is input to :attr:`functions` + every: checkpoint every-n functions (default: 1) + flatten (bool): flatten nn.Sequential of nn.Sequentials + skip_last (bool): skip checkpointing the last function in the sequence if True + preserve_rng_state (bool, optional, default=True): Omit stashing and restoring + the RNG state during each checkpoint. + + Returns: + Output of running :attr:`functions` sequentially on :attr:`*inputs` + + Example: + >>> model = nn.Sequential(...) + >>> input_var = checkpoint_seq(model, input_var, every=2) + """ + def run_function(start, end, functions): + def forward(_x): + for j in range(start, end + 1): + _x = functions[j](_x) + return _x + return forward + + if isinstance(functions, torch.nn.Sequential): + functions = functions.children() + if flatten: + functions = chain.from_iterable(functions) + if not isinstance(functions, (tuple, list)): + functions = tuple(functions) + + num_checkpointed = len(functions) + if skip_last: + num_checkpointed -= 1 + end = -1 + for start in range(0, num_checkpointed, every): + end = min(start + every - 1, num_checkpointed - 1) + x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state) + if skip_last: + return run_function(end + 1, len(functions) - 1, functions)(x) + return x + + +def flatten_modules(named_modules, depth=1, prefix='', module_types='sequential'): + prefix_is_tuple = isinstance(prefix, tuple) + if isinstance(module_types, str): + if module_types == 'container': + module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict) + else: + module_types = (nn.Sequential,) + for name, module in named_modules: + if depth and isinstance(module, module_types): + yield from flatten_modules( + module.named_children(), + depth - 1, + prefix=(name,) if prefix_is_tuple else name, + module_types=module_types, + ) + else: + if prefix_is_tuple: + name = prefix + (name,) + yield name, module + else: + if prefix: + name = '.'.join([prefix, name]) + yield name, module diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hrnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hrnet.py new file mode 100644 index 0000000000000000000000000000000000000000..08405e8793f4600a40bcea0cb6d5855e1d2f34b0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hrnet.py @@ -0,0 +1,858 @@ +""" HRNet + +Copied from https://github.com/HRNet/HRNet-Image-Classification + +Original header: + Copyright (c) Microsoft + Licensed under the MIT License. + Written by Bin Xiao (Bin.Xiao@microsoft.com) + Modified by Ke Sun (sunk@mail.ustc.edu.cn) +""" +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .features import FeatureInfo +from .helpers import build_model_with_cfg, pretrained_cfg_for_features +from .layers import create_classifier +from .registry import register_model +from .resnet import BasicBlock, Bottleneck # leveraging ResNet blocks w/ additional features like SE + +_BN_MOMENTUM = 0.1 +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hrnet_w18_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth'), + 'hrnet_w18_small_v2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth'), + 'hrnet_w18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth'), + 'hrnet_w30': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth'), + 'hrnet_w32': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth'), + 'hrnet_w40': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth'), + 'hrnet_w44': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth'), + 'hrnet_w48': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth'), + 'hrnet_w64': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth'), +} + +cfg_cls = dict( + hrnet_w18_small=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(1,), + NUM_CHANNELS=(32,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(16, 32), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=1, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(16, 32, 64), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=1, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(16, 32, 64, 128), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18_small_v2=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(2,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=3, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=2, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w30=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(30, 60), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(30, 60, 120), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(30, 60, 120, 240), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w32=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(32, 64), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(32, 64, 128), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(32, 64, 128, 256), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w40=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(40, 80), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(40, 80, 160), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(40, 80, 160, 320), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w44=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(44, 88), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(44, 88, 176), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(44, 88, 176, 352), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w48=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(48, 96), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(48, 96, 192), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(48, 96, 192, 384), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w64=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(64, 128), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(64, 128, 256), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(64, 128, 256, 512), + FUSE_METHOD='SUM', + ), + ) +) + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_in_chs, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_in_chs, num_channels) + + self.num_in_chs = num_in_chs + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.fuse_act = nn.ReLU(False) + + def _check_branches(self, num_branches, blocks, num_blocks, num_in_chs, num_channels): + error_msg = '' + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks)) + elif num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels)) + elif num_branches != len(num_in_chs): + error_msg = 'NUM_BRANCHES({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) + if error_msg: + _logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_in_chs[branch_index], num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] + self.num_in_chs[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_in_chs[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return nn.Identity() + + num_branches = self.num_branches + num_in_chs = self.num_in_chs + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(nn.Identity()) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_in_chs[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_in_chs[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM))) + else: + num_outchannels_conv3x3 = num_in_chs[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_in_chs[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_in_chs(self): + return self.num_in_chs + + def forward(self, x: List[torch.Tensor]): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i, branch in enumerate(self.branches): + x[i] = branch(x[i]) + + x_fuse = [] + for i, fuse_outer in enumerate(self.fuse_layers): + y = x[0] if i == 0 else fuse_outer[0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + fuse_outer[j](x[j]) + x_fuse.append(self.fuse_act(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, head='classification'): + super(HighResolutionNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + + stem_width = cfg['STEM_WIDTH'] + self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) + self.act2 = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion * num_channels + + self.stage2_cfg = cfg['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + self.head = head + self.head_channels = None # set if _make_head called + if head == 'classification': + # Classification Head + self.num_features = 2048 + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + elif head == 'incre': + self.num_features = 2048 + self.incre_modules, _, _ = self._make_head(pre_stage_channels, True) + else: + self.incre_modules = None + self.num_features = 256 + + curr_stride = 2 + # module names aren't actually valid here, hook or FeatureNet based extraction would not work + self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] + for i, c in enumerate(self.head_channels if self.head_channels else num_channels): + curr_stride *= 2 + c = c * 4 if self.head_channels else c # head block expansion factor of 4 + self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] + + self.init_weights() + + def _make_head(self, pre_stage_channels, incre_only=False): + head_block = Bottleneck + self.head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_modules.append(self._make_layer(head_block, channels, self.head_channels[i], 1, stride=1)) + incre_modules = nn.ModuleList(incre_modules) + if incre_only: + return incre_modules, None, None + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = self.head_channels[i] * head_block.expansion + out_channels = self.head_channels[i + 1] * head_block.expansion + downsamp_module = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), + nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=self.head_channels[3] * head_block.expansion, + out_channels=self.num_features, kernel_size=1, stride=1, padding=0 + ), + nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(inplanes, planes, stride, downsample)] + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + reset_multi_scale_output = multi_scale_output or i < num_modules - 1 + modules.append(HighResolutionModule( + num_branches, block, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output) + ) + num_in_chs = modules[-1].get_num_in_chs() + + return nn.Sequential(*modules), num_in_chs + + @torch.jit.ignore + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv[12]|bn[12]', + blocks=r'^(?:layer|stage|transition)(\d+)' if coarse else [ + (r'^layer(\d+)\.(\d+)', None), + (r'^stage(\d+)\.(\d+)', None), + (r'^transition(\d+)', (99999,)), + ], + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "gradient checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def stages(self, x) -> List[torch.Tensor]: + x = self.layer1(x) + + xl = [t(x) for i, t in enumerate(self.transition1)] + yl = self.stage2(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] + yl = self.stage3(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] + yl = self.stage4(xl) + return yl + + def forward_features(self, x): + # Stem + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + # Stages + yl = self.stages(x) + if self.incre_modules is None or self.downsamp_modules is None: + return yl + y = self.incre_modules[0](yl[0]) + for i, down in enumerate(self.downsamp_modules): + y = self.incre_modules[i + 1](yl[i + 1]) + down(y) + y = self.final_layer(y) + return y + + def forward_head(self, x, pre_logits: bool = False): + # Classification Head + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.classifier(x) + + def forward(self, x): + y = self.forward_features(x) + x = self.forward_head(y) + return x + + +class HighResolutionNetFeatures(HighResolutionNet): + """HighResolutionNet feature extraction + + The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. + It would be more complicated to use the FeatureNet helpers. + + The `feature_location=incre` allows grabbing increased channel count features using part of the + classification head. If `feature_location=''` the default HRNet features are returned. First stem + conv is used for stride 2 features. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, + feature_location='incre', out_indices=(0, 1, 2, 3, 4)): + assert feature_location in ('incre', '') + super(HighResolutionNetFeatures, self).__init__( + cfg, in_chans=in_chans, num_classes=num_classes, global_pool=global_pool, + drop_rate=drop_rate, head=feature_location) + self.feature_info = FeatureInfo(self.feature_info, out_indices) + self._out_idx = {i for i in out_indices} + + def forward_features(self, x): + assert False, 'Not supported' + + def forward(self, x) -> List[torch.tensor]: + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if 0 in self._out_idx: + out.append(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + x = self.stages(x) + if self.incre_modules is not None: + x = [incre(f) for f, incre in zip(x, self.incre_modules)] + for i, f in enumerate(x): + if i + 1 in self._out_idx: + out.append(f) + return out + + +def _create_hrnet(variant, pretrained, **model_kwargs): + model_cls = HighResolutionNet + features_only = False + kwargs_filter = None + if model_kwargs.pop('features_only', False): + model_cls = HighResolutionNetFeatures + kwargs_filter = ('num_classes', 'global_pool') + features_only = True + model = build_model_with_cfg( + model_cls, variant, pretrained, + model_cfg=cfg_cls[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) + model.default_cfg = model.pretrained_cfg # backwards compat + return model + + +@register_model +def hrnet_w18_small(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) + + +@register_model +def hrnet_w18_small_v2(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) + + +@register_model +def hrnet_w18(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w18', pretrained, **kwargs) + + +@register_model +def hrnet_w30(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w30', pretrained, **kwargs) + + +@register_model +def hrnet_w32(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w32', pretrained, **kwargs) + + +@register_model +def hrnet_w40(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w40', pretrained, **kwargs) + + +@register_model +def hrnet_w44(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w44', pretrained, **kwargs) + + +@register_model +def hrnet_w48(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w48', pretrained, **kwargs) + + +@register_model +def hrnet_w64(pretrained=False, **kwargs): + return _create_hrnet('hrnet_w64', pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hub.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1a6e5df0279d99b2a57f0762f5214de13dad94 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/hub.py @@ -0,0 +1,170 @@ +import json +import logging +import os +from functools import partial +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Optional, Union + +import torch +from torch.hub import HASH_REGEX, download_url_to_file, urlparse + +try: + from torch.hub import get_dir +except ImportError: + from torch.hub import _get_torch_home as get_dir + +from custom_timm import __version__ + +try: + from huggingface_hub import (create_repo, get_hf_file_metadata, + hf_hub_download, hf_hub_url, + repo_type_and_id_from_hf_id, upload_folder) + from huggingface_hub.utils import EntryNotFoundError + hf_hub_download = partial(hf_hub_download, library_name="timm", library_version=__version__) + _has_hf_hub = True +except ImportError: + hf_hub_download = None + _has_hf_hub = False + +_logger = logging.getLogger(__name__) + + +def get_cache_dir(child_dir=''): + """ + Returns the location of the directory where models are cached (and creates it if necessary). + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + hub_dir = get_dir() + child_dir = () if not child_dir else (child_dir,) + model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) + os.makedirs(model_dir, exist_ok=True) + return model_dir + + +def download_cached_file(url, check_hash=True, progress=False): + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(get_cache_dir(), filename) + if not os.path.exists(cached_file): + _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return cached_file + + +def has_hf_hub(necessary=False): + if not _has_hf_hub and necessary: + # if no HF Hub module installed, and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return _has_hf_hub + + +def hf_split(hf_id): + # FIXME I may change @ -> # and be parsed as fragment in a URI model name scheme + rev_split = hf_id.split('@') + assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' + hf_model_id = rev_split[0] + hf_revision = rev_split[-1] if len(rev_split) > 1 else None + return hf_model_id, hf_revision + + +def load_cfg_from_json(json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + +def _download_from_hf(model_id: str, filename: str): + hf_model_id, hf_revision = hf_split(model_id) + return hf_hub_download(hf_model_id, filename, revision=hf_revision) + + +def load_model_config_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'config.json') + pretrained_cfg = load_cfg_from_json(cached_file) + pretrained_cfg['hf_hub_id'] = model_id # insert hf_hub id for pretrained weight load during model creation + pretrained_cfg['source'] = 'hf-hub' + model_name = pretrained_cfg.get('architecture') + return pretrained_cfg, model_name + + +def load_state_dict_from_hf(model_id: str, filename: str = 'pytorch_model.bin'): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, filename) + state_dict = torch.load(cached_file, map_location='cpu') + return state_dict + + +def save_for_hf(model, save_directory, model_config=None): + assert has_hf_hub(True) + model_config = model_config or {} + save_directory = Path(save_directory) + save_directory.mkdir(exist_ok=True, parents=True) + + weights_path = save_directory / 'pytorch_model.bin' + torch.save(model.state_dict(), weights_path) + + config_path = save_directory / 'config.json' + hf_config = model.pretrained_cfg + hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) + hf_config['num_features'] = model_config.pop('num_features', model.num_features) + hf_config['labels'] = model_config.pop('labels', [f"LABEL_{i}" for i in range(hf_config['num_classes'])]) + hf_config.update(model_config) + + with config_path.open('w') as f: + json.dump(hf_config, f, indent=2) + + +def push_to_hf_hub( + model, + repo_id: str, + commit_message: str ='Add model', + token: Optional[str] = None, + revision: Optional[str] = None, + private: bool = False, + create_pr: bool = False, + model_config: Optional[dict] = None, +): + # Create repo if doesn't exist yet + repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True) + + # Infer complete repo_id from repo_url + # Can be different from the input `repo_id` if repo_owner was implicit + _, repo_owner, repo_name = repo_type_and_id_from_hf_id(repo_url) + repo_id = f"{repo_owner}/{repo_name}" + + # Check if README file already exist in repo + try: + get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename="README.md", revision=revision)) + has_readme = True + except EntryNotFoundError: + has_readme = False + + # Dump model and push to Hub + with TemporaryDirectory() as tmpdir: + # Save model weights and config. + save_for_hf(model, tmpdir, model_config=model_config) + + # Add readme if does not exist + if not has_readme: + readme_path = Path(tmpdir) / "README.md" + readme_text = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_id}' + readme_path.write_text(readme_text) + + # Upload model and return + return upload_folder( + repo_id=repo_id, + folder_path=tmpdir, + revision=revision, + create_pr=create_pr, + commit_message=commit_message, + ) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_resnet_v2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_resnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..ae932786961457dd149817dd58e7d50ba2345b6c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_resnet_v2.py @@ -0,0 +1,382 @@ +""" Pytorch Inception-Resnet-V2 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, flatten_modules +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionResnetV2'] + +default_cfgs = { + # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz + 'inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + }, + # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz + 'ens_adv_inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=.001) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_5b(nn.Module): + def __init__(self): + super(Mixed_5b, self).__init__() + + self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(192, 48, kernel_size=1, stride=1), + BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(192, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + def __init__(self, scale=1.0): + super(Block35, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), + BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_6a(nn.Module): + def __init__(self): + super(Mixed_6a, self).__init__() + + self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + def __init__(self, scale=1.0): + super(Block17, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 128, kernel_size=1, stride=1), + BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_7a(nn.Module): + def __init__(self): + super(Mixed_7a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), + BasicConv2d(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, no_relu=False): + super(Block8, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(2080, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), + BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + self.relu = None if no_relu else nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if self.relu is not None: + out = self.relu(out) + return out + + +class InceptionResnetV2(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg'): + super(InceptionResnetV2, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + assert output_stride == 32 + + self.conv2d_1a = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] + + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) + self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] + + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b() + self.repeat = nn.Sequential( + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17) + ) + self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] + + self.mixed_6a = Mixed_6a() + self.repeat_1 = nn.Sequential( + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10) + ) + self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] + + self.mixed_7a = Mixed_7a() + self.repeat_2 = nn.Sequential( + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20) + ) + self.block8 = Block8(no_relu=True) + self.conv2d_7b = BasicConv2d(2080, self.num_features, kernel_size=1, stride=1) + self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] + + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} + module_map.pop(('classif',)) + + def _matcher(name): + if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): + return 0 + elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): + return 1 + elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): + return len(module_map) + 1 + else: + for k in module_map.keys(): + if k == tuple(name.split('.')[:len(k)]): + return module_map[k] + return float('inf') + return _matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self): + return self.classif + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.classif(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): + return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) + + +@register_model +def inception_resnet_v2(pretrained=False, **kwargs): + r"""InceptionResnetV2 model architecture from the + `"InceptionV4, Inception-ResNet..." ` paper. + """ + return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) + + +@register_model +def ens_adv_inception_resnet_v2(pretrained=False, **kwargs): + r""" Ensemble Adversarially trained InceptionResnetV2 model architecture + As per https://arxiv.org/abs/1705.07204 and + https://github.com/tensorflow/models/tree/master/research/adv_imagenet_models. + """ + return _create_inception_resnet_v2('ens_adv_inception_resnet_v2', pretrained=pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_v3.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..1e03afd9af9fbd463c17a9f0c961f73026c779e2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_v3.py @@ -0,0 +1,475 @@ +""" Inception-V3 + +Originally from torchvision Inception3 model +Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, resolve_pretrained_cfg, flatten_modules +from .registry import register_model +from .layers import trunc_normal_, create_classifier, Linear + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # original PyTorch weights, ported from Tensorflow but modified + 'inception_v3': _cfg( + url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', + has_aux=True), # checkpoint has aux logit layer weights + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + 'tf_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth', + num_classes=1000, has_aux=False, label_offset=1), + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + 'adv_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth', + num_classes=1000, has_aux=False, label_offset=1), + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + 'gluon_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_inception_v3-9f746940.pth', + mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults + std=IMAGENET_DEFAULT_STD, # also works well with inception defaults + has_aux=False, + ) +} + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) + + +class InceptionV3(nn.Module): + """Inception-V3 with no AuxLogits + FIXME two class defs are redundant, but less screwing around with torchsript fussyness and inconsistent returns + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False): + super(InceptionV3, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.aux_logits = aux_logits + + self.Conv2d_1a_3x3 = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + else: + self.AuxLogits = None + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), + dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), + dict(num_chs=288, reduction=8, module='Mixed_5d'), + dict(num_chs=768, reduction=16, module='Mixed_6e'), + dict(num_chs=2048, reduction=32, module='Mixed_7c'), + ] + + self.num_features = 2048 + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + trunc_normal_(m.weight, std=stddev) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} + module_map.pop(('fc',)) + + def _matcher(name): + if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]): + return 0 + elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]): + return 1 + else: + for k in module_map.keys(): + if k == tuple(name.split('.')[:len(k)]): + return module_map[k] + return float('inf') + return _matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_preaux(self, x): + x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147 + x = self.Pool1(x) # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71 + x = self.Pool2(x) # N x 192 x 35 x 35 + x = self.Mixed_5b(x) # N x 256 x 35 x 35 + x = self.Mixed_5c(x) # N x 288 x 35 x 35 + x = self.Mixed_5d(x) # N x 288 x 35 x 35 + x = self.Mixed_6a(x) # N x 768 x 17 x 17 + x = self.Mixed_6b(x) # N x 768 x 17 x 17 + x = self.Mixed_6c(x) # N x 768 x 17 x 17 + x = self.Mixed_6d(x) # N x 768 x 17 x 17 + x = self.Mixed_6e(x) # N x 768 x 17 x 17 + return x + + def forward_postaux(self, x): + x = self.Mixed_7a(x) # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) # N x 2048 x 8 x 8 + return x + + def forward_features(self, x): + x = self.forward_preaux(x) + x = self.forward_postaux(x) + return x + + def forward_head(self, x): + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class InceptionV3Aux(InceptionV3): + """InceptionV3 with AuxLogits + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=True): + super(InceptionV3Aux, self).__init__( + num_classes, in_chans, drop_rate, global_pool, aux_logits) + + def forward_features(self, x): + x = self.forward_preaux(x) + aux = self.AuxLogits(x) if self.training else None + x = self.forward_postaux(x) + return x, aux + + def forward(self, x): + x, aux = self.forward_features(x) + x = self.forward_head(x) + return x, aux + + +def _create_inception_v3(variant, pretrained=False, **kwargs): + pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None)) + aux_logits = kwargs.pop('aux_logits', False) + if aux_logits: + assert not kwargs.pop('features_only', False) + model_cls = InceptionV3Aux + load_strict = pretrained_cfg['has_aux'] + else: + model_cls = InceptionV3 + load_strict = not pretrained_cfg['has_aux'] + + return build_model_with_cfg( + model_cls, variant, pretrained, + pretrained_cfg=pretrained_cfg, + pretrained_strict=load_strict, + **kwargs) + + +@register_model +def inception_v3(pretrained=False, **kwargs): + # original PyTorch weights, ported from Tensorflow but modified + model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + model = _create_inception_v3('tf_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def adv_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + model = _create_inception_v3('adv_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def gluon_inception_v3(pretrained=False, **kwargs): + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + model = _create_inception_v3('gluon_inception_v3', pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_v4.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_v4.py new file mode 100644 index 0000000000000000000000000000000000000000..02d7128221c521c245d3c8832923392c43255180 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/inception_v4.py @@ -0,0 +1,330 @@ +""" Pytorch Inception-V4 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionV4'] + +default_cfgs = { + 'inception_v4': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'features.0.conv', 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=0.001) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed3a(nn.Module): + def __init__(self): + super(Mixed3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed4a(nn.Module): + def __init__(self): + super(Mixed4a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed5a(nn.Module): + def __init__(self): + super(Mixed5a, self).__init__() + self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class InceptionA(nn.Module): + def __init__(self): + super(InceptionA, self).__init__() + self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionA(nn.Module): + def __init__(self): + super(ReductionA, self).__init__() + self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), + BasicConv2d(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionB(nn.Module): + def __init__(self): + super(InceptionB, self).__init__() + self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionB(nn.Module): + def __init__(self): + super(ReductionB, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionC(nn.Module): + def __init__(self): + super(InceptionC, self).__init__() + + self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) + self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg'): + super(InceptionV4, self).__init__() + assert output_stride == 32 + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + + self.features = nn.Sequential( + BasicConv2d(in_chans, 32, kernel_size=3, stride=2), + BasicConv2d(32, 32, kernel_size=3, stride=1), + BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), + Mixed3a(), + Mixed4a(), + Mixed5a(), + InceptionA(), + InceptionA(), + InceptionA(), + InceptionA(), + ReductionA(), # Mixed6a + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + ReductionB(), # Mixed7a + InceptionC(), + InceptionC(), + InceptionC(), + ) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='features.2'), + dict(num_chs=160, reduction=4, module='features.3'), + dict(num_chs=384, reduction=8, module='features.9'), + dict(num_chs=1024, reduction=16, module='features.17'), + dict(num_chs=1536, reduction=32, module='features.21'), + ] + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^features\.[012]\.', + blocks=r'^features\.(\d+)' + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_inception_v4(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionV4, variant, pretrained, + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def inception_v4(pretrained=False, **kwargs): + return _create_inception_v4('inception_v4', pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21c641b6c3a1b5c02e0f5213fe8de3437eb1eb96 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/__init__.py @@ -0,0 +1,44 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm import get_norm_layer, create_norm_layer +from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ + EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a +from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm +from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp, ConvMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvNormAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .trace_utils import _assert, _float_to_int +from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_ diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..e16b3bd3a1898365530c1ffc5154a0a4746a136e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations.py @@ -0,0 +1,145 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + NOTE: I don't have a working inplace variant + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + + def forward(self, x): + return mish(x) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + +def hard_mish(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + if inplace: + return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) + else: + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_mish(x, self.inplace) + + +class PReLU(nn.PReLU): + """Applies PReLU (w/ dummy inplace arg) + """ + def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations_jit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a516530ad0abf41f720ac83d02791179bb7b67 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations_me.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations_me.py new file mode 100644 index 0000000000000000000000000000000000000000..9a12bb7ebbfef02c508801742d38da6b48dd1bb6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/adaptive_avgmax_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/adaptive_avgmax_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc6ada8c5b28c7eac5785b0cc2933eb01a15d46 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/attention_pool2d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/attention_pool2d.py new file mode 100644 index 0000000000000000000000000000000000000000..a13a6881feb4c7d31f6caa5ccc0d95288e322a83 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/attention_pool2d.py @@ -0,0 +1,131 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .pos_embed import apply_rot_embed, RotaryEmbedding +from .weight_init import trunc_normal_ + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + sin_emb, cos_emb = self.pos_embed.get_embed((H, W)) + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/blur_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/blur_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..e73d886367c995ee44f0ed5f6eb5b5a287aa4935 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=self.channels) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/bottleneck_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/bottleneck_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..c3db464e5ab4f2d3478293034e90a0939dadb628 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/bottleneck_attn.py @@ -0,0 +1,157 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). + num_heads (int): parallel attention heads (default: 4) + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, + qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + + self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.pos_embed.height, '') + _assert(W == self.pos_embed.width, '') + + x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W + + # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v + # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. + q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) + k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k + v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) + + if self.scale_pos_embed: + attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W + else: + attn = (q @ k) * self.scale + self.pos_embed(q) + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W + out = self.pool(out) + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/cbam.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/cbam.py new file mode 100644 index 0000000000000000000000000000000000000000..576a8306d979c3d93215253eba3affd7efd87bfe --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvNormAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/classifier.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac3338782e4c6e4c29cb9dc6a3da5a9331340c7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/classifier.py @@ -0,0 +1,56 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + fc = nn.Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + if pre_logits: + return x.flatten(1) + else: + x = self.fc(x) + return self.flatten(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/cond_conv2d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/cond_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..43654c5972167cab0224bfe720d78bae1227eb7d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/cond_conv2d.py @@ -0,0 +1,123 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + # reshape instead of view to work with channels_last input + x = x.reshape(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/config.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/config.py new file mode 100644 index 0000000000000000000000000000000000000000..f07b9d782ba0597c174dee81097c28280335fdba --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/conv2d_same.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/conv2d_same.py new file mode 100644 index 0000000000000000000000000000000000000000..75f0f98d4ec1e3f4a0dc004b977815afaa25e7fc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/conv_bn_act.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/conv_bn_act.py new file mode 100644 index 0000000000000000000000000000000000000000..9e7c64b85805b25c861d09116f9590dbebafce7e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/conv_bn_act.py @@ -0,0 +1,88 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +import functools +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer + + +class ConvNormAct(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, drop_layer=None): + super(ConvNormAct, self).__init__() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +ConvBnAct = ConvNormAct + + +def create_aa(aa_layer, channels, stride=2, enable=True): + if not aa_layer or not enable: + return nn.Identity() + if isinstance(aa_layer, functools.partial): + if issubclass(aa_layer.func, nn.AvgPool2d): + return aa_layer() + else: + return aa_layer(channels) + elif issubclass(aa_layer, nn.AvgPool2d): + return aa_layer(stride) + else: + return aa_layer(channels=channels, stride=stride) + + +class ConvNormActAa(nn.Module): + def __init__( + self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, drop_layer=None): + super(ConvNormActAa, self).__init__() + use_aa = aa_layer is not None and stride == 2 + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa) + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.aa(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_act.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_act.py new file mode 100644 index 0000000000000000000000000000000000000000..a3044a3d20b56c7739e810f91deb533c23e161e1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_act.py @@ -0,0 +1,154 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if not isinstance(name, str): + # callable, module, etc + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + if inplace is None: + return act_layer(**kwargs) + try: + return act_layer(inplace=inplace, **kwargs) + except TypeError: + # recover if act layer doesn't have inplace arg + return act_layer(**kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..cc7e91ea9af2e853fd659973c72ba5e86025a1b3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_conv2d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9489ce492d0f768c1ae8892163fa986bac8fd8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_conv2d.py @@ -0,0 +1,36 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + if 'groups' in kwargs: + groups = kwargs.pop('groups') + if groups == in_channels: + kwargs['depthwise'] = True + else: + assert groups == 1 + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..b9efae8c8c34d8ebdb5e80921768b898e10ccc7e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_norm.py @@ -0,0 +1,56 @@ +""" Norm Layer Factory + +Create norm modules by string (to mirror create_act and creat_norm-act fns) + +Copyright 2022 Ross Wightman +""" +import types +import functools + +import torch.nn as nn + +from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d + +_NORM_MAP = dict( + batchnorm=nn.BatchNorm2d, + batchnorm2d=nn.BatchNorm2d, + batchnorm1d=nn.BatchNorm1d, + groupnorm=GroupNorm, + groupnorm1=GroupNorm1, + layernorm=LayerNorm, + layernorm2d=LayerNorm2d, +) +_NORM_TYPES = {m for n, m in _NORM_MAP.items()} + + +def create_norm_layer(layer_name, num_features, act_layer=None, apply_act=True, **kwargs): + layer = get_norm_layer(layer_name, act_layer=act_layer) + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + return layer_instance + + +def get_norm_layer(norm_layer): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + norm_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + layer_name = norm_layer.replace('_', '') + norm_layer = _NORM_MAP.get(layer_name, None) + elif norm_layer in _NORM_TYPES: + norm_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, assume it is a lambda/fn that creates a norm layer + norm_layer = norm_layer + else: + type_name = norm_layer.__name__.lower().replace('_', '') + norm_layer = _NORM_MAP.get(type_name, None) + assert norm_layer is not None, f"No equivalent norm layer for {type_name}" + + if norm_kwargs: + norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args + return norm_layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_norm_act.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_norm_act.py new file mode 100644 index 0000000000000000000000000000000000000000..78dd9a51d9fdc932e50ade613d3e8a925ae2f317 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/create_norm_act.py @@ -0,0 +1,91 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +from .evo_norm import * +from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d +from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d +from .inplace_abn import InplaceAbn + +_NORM_ACT_MAP = dict( + batchnorm=BatchNormAct2d, + batchnorm2d=BatchNormAct2d, + groupnorm=GroupNormAct, + groupnorm1=functools.partial(GroupNormAct, num_groups=1), + layernorm=LayerNormAct, + layernorm2d=LayerNormAct2d, + evonormb0=EvoNorm2dB0, + evonormb1=EvoNorm2dB1, + evonormb2=EvoNorm2dB2, + evonorms0=EvoNorm2dS0, + evonorms0a=EvoNorm2dS0a, + evonorms1=EvoNorm2dS1, + evonorms1a=EvoNorm2dS1a, + evonorms2=EvoNorm2dS2, + evonorms2a=EvoNorm2dS2a, + frn=FilterResponseNormAct2d, + frntlu=FilterResponseNormTlu2d, + inplaceabn=InplaceAbn, + iabn=InplaceAbn, +) +_NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} +# has act_layer arg to define act type +_NORM_ACT_REQUIRES_ARG = { + BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} + + +def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): + layer = get_norm_act_layer(layer_name, act_layer=act_layer) + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def get_norm_act_layer(norm_layer, act_layer=None): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + layer_name = norm_layer.replace('_', '').lower().split('-')[0] + norm_act_layer = _NORM_ACT_MAP.get(layer_name, None) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + elif type_name.startswith('groupnorm1'): + norm_act_layer = functools.partial(GroupNormAct, num_groups=1) + elif type_name.startswith('layernorm2d'): + norm_act_layer = LayerNormAct2d + elif type_name.startswith('layernorm'): + norm_act_layer = LayerNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/drop.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/drop.py new file mode 100644 index 0000000000000000000000000000000000000000..1ab1c8f5ba12bd0db2f802c6b5a5dd0296dec7af --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/drop.py @@ -0,0 +1,169 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + block_mask = torch.empty_like(x).bernoulli_(gamma) + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.empty_like(x).normal_() + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + + def __init__( + self, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False, + fast: bool = True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f'drop_prob={round(self.drop_prob,3):0.3f}' diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/eca.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/eca.py new file mode 100644 index 0000000000000000000000000000000000000000..e29be6ac3c95bb61229cdcdd659ec89d541f1a53 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/evo_norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/evo_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..ea77620712c80a54d943ef0b920556cbafc1f9f6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/evo_norm.py @@ -0,0 +1,352 @@ +""" EvoNorm in PyTorch + +Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 +@inproceedings{NEURIPS2020, + author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {13539--13550}, + publisher = {Curran Associates, Inc.}, + title = {Evolving Normalization-Activation Layers}, + url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, + volume = {33}, + year = {2020} +} + +An attempt at getting decent performing EvoNorms running in PyTorch. +While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm +in terms of memory usage and throughput on GPUs. + +I'm testing these modules on TPU w/ PyTorch XLA. Promising start but +currently working around some issues with builtin torch/tensor.var/std. Unlike +GPU, similar train speeds for EvoNormS variants and BatchNorm. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Sequence, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def instance_std(x, eps: float = 1e-5): + std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) + return std.expand(x.shape) + + +def instance_std_tpu(x, eps: float = 1e-5): + std = manual_var(x, dim=(2, 3)).add(eps).sqrt() + return std.expand(x.shape) +# instance_std = instance_std_tpu + + +def instance_rms(x, eps: float = 1e-5): + rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) + return rms.expand(x.shape) + + +def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): + xm = x.mean(dim=dim, keepdim=True) + if diff_sqm: + # difference of squared mean and mean squared, faster on TPU can be less stable + var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) + else: + var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) + return var + + +def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): + B, C, H, W = x.shape + x_dtype = x.dtype + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + else: + x = x.reshape(B, groups, C // groups, H, W) + std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + return std.expand(x.shape).reshape(B, C, H, W) + + +def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): + # This is a workaround for some stability / odd behaviour of .var and .std + # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results + B, C, H, W = x.shape + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + var = manual_var(x, dim=-1, diff_sqm=diff_sqm) + else: + x = x.reshape(B, groups, C // groups, H, W) + var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) + return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) +#group_std = group_std_tpu # FIXME TPU temporary + + +def group_rms(x, groups: int = 32, eps: float = 1e-5): + B, C, H, W = x.shape + _assert(C % groups == 0, '') + x_dtype = x.dtype + x = x.reshape(B, groups, C // groups, H, W) + rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) + return rms.expand(x.shape).reshape(B, C, H, W) + + +class EvoNorm2dB0(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + # var = manual_var(x, dim=(0, 2, 3)).squeeze() + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach() * self.momentum * (n / (n - 1))) + else: + var = self.running_var + left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) + v = self.v.to(x_dtype).view(v_shape) + right = x * v + instance_std(x, self.eps) + x = x / left.max(right) + return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) + + +class EvoNorm2dB1(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = (x + 1) * instance_rms(x, self.eps) + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dB2(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = instance_rms(x, self.eps) - x + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0(nn.Module): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0a(EvoNorm2dS0): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + d = group_std(x, self.groups, self.eps) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() + x = x / d + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-5, **_): + super().__init__() + act_layer = act_layer or nn.SiLU + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.pre_act_norm = False + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1a(EvoNorm2dS1): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-5, **_): + super().__init__() + act_layer = act_layer or nn.SiLU + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2a(EvoNorm2dS2): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/fast_norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/fast_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..fb35e47df6798175945d1e0bda4c7792345100c2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/fast_norm.py @@ -0,0 +1,78 @@ +""" 'Fast' Normalization Functions + +For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32. + +Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast) + +Hacked together by / Copyright 2022 Ross Wightman +""" +from typing import List, Optional + +import torch +from torch.nn import functional as F + +try: + from apex.normalization.fused_layer_norm import fused_layer_norm_affine + has_apex = True +except ImportError: + has_apex = False + + +# fast (ie lower precision LN) can be disabled with this flag if issues crop up +_USE_FAST_NORM = False # defaulting to False for now + + +def is_fast_norm(): + return _USE_FAST_NORM + + +def set_fast_norm(enable=True): + global _USE_FAST_NORM + _USE_FAST_NORM = enable + + +def fast_group_norm( + x: torch.Tensor, + num_groups: int, + weight: Optional[torch.Tensor] = None, + bias: Optional[torch.Tensor] = None, + eps: float = 1e-5 +) -> torch.Tensor: + if torch.jit.is_scripting(): + # currently cannot use is_autocast_enabled within torchscript + return F.group_norm(x, num_groups, weight, bias, eps) + + if torch.is_autocast_enabled(): + # normally native AMP casts GN inputs to float32 + # here we use the low precision autocast dtype + # FIXME what to do re CPU autocast? + dt = torch.get_autocast_gpu_dtype() + x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) + + with torch.cuda.amp.autocast(enabled=False): + return F.group_norm(x, num_groups, weight, bias, eps) + + +def fast_layer_norm( + x: torch.Tensor, + normalized_shape: List[int], + weight: Optional[torch.Tensor] = None, + bias: Optional[torch.Tensor] = None, + eps: float = 1e-5 +) -> torch.Tensor: + if torch.jit.is_scripting(): + # currently cannot use is_autocast_enabled within torchscript + return F.layer_norm(x, normalized_shape, weight, bias, eps) + + if has_apex: + return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps) + + if torch.is_autocast_enabled(): + # normally native AMP casts LN inputs to float32 + # apex LN does not, this is behaving like Apex + dt = torch.get_autocast_gpu_dtype() + # FIXME what to do re CPU autocast? + x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) + + with torch.cuda.amp.autocast(enabled=False): + return F.layer_norm(x, normalized_shape, weight, bias, eps) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/filter_response_norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/filter_response_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..a66a1cd493e4cecec27419925a6a2045bb05f25f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/filter_response_norm.py @@ -0,0 +1,68 @@ +""" Filter Response Norm in PyTorch + +Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def inv_instance_rms(x, eps: float = 1e-5): + rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) + return rms.expand(x.shape) + + +class FilterResponseNormTlu2d(nn.Module): + def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): + super(FilterResponseNormTlu2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.tau is not None: + nn.init.zeros_(self.tau) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x + + +class FilterResponseNormAct2d(nn.Module): + def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): + super(FilterResponseNormAct2d, self).__init__() + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer, inplace=inplace) + else: + self.act = nn.Identity() + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return self.act(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/gather_excite.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/gather_excite.py new file mode 100644 index 0000000000000000000000000000000000000000..2d60dc961e2b5e135d38e290b8fa5820ef0fe18f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/global_context.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/global_context.py new file mode 100644 index 0000000000000000000000000000000000000000..de7fb5c15f08a5c2fe42cb7c174fff92d6b0d3bf --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/halo_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/halo_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ac64f85e08a24646434fc0a995afa0fd9b9ee7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/halo_attn.py @@ -0,0 +1,233 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) + stride: output stride of the module, query downscaled if > 1 (default: 1). + num_heads: parallel attention heads (default: 8). + dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + block_size (int): size of blocks. (default: 8) + halo_size (int): size of halo overlap. (default: 3) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool) : add bias to q, k, and v projections + avg_down (bool): use average pool downsample instead of strided query blocks + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, + qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + assert stride in (1, 2) + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + self.block_size = self.block_size_ds = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.block_stride = 1 + use_avg_pool = False + if stride > 1: + use_avg_pool = avg_down or block_size % stride != 0 + self.block_stride = 1 if use_avg_pool else stride + self.block_size_ds = self.block_size // self.block_stride + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H % self.block_size == 0, '') + _assert(W % self.block_size == 0, '') + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + + q = self.q(x) + # unfold + q = q.reshape( + -1, self.dim_head_qk, + num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not + # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. + # FIXME figure out how to switch impl between this and conv2d if XLA being used. + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + if self.scale_pos_embed: + attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale + else: + attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) + # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view( + B, self.dim_out_v, H // self.block_stride, W // self.block_stride) + # B, dim_out, H // block_stride, W // block_stride + out = self.pool(out) + return out + + +""" Three alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if is_xla: + # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is + # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. + WW = self.win_size ** 2 + pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) + kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) + elif self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/helpers.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..2fa296bcdd110dabe6c442238224132b6d91c79a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/helpers.py @@ -0,0 +1,43 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v + + +def extend_tuple(x, n): + # pdas a tuple to specified n by padding with last value + if not isinstance(x, (tuple, list)): + x = (x,) + else: + x = tuple(x) + pad_n = n - len(x) + if pad_n <= 0: + return x[:n] + return x + (x[-1],) * pad_n diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/inplace_abn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/inplace_abn.py new file mode 100644 index 0000000000000000000000000000000000000000..a80889339ebb992c11f84a286c3fd7a627776faa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_layer=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer is None or act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/lambda_layer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/lambda_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..e50b43c8c55a371fd7466dcb58bb329ec652b131 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/lambda_layer.py @@ -0,0 +1,133 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ + + +def rel_pos_indices(size): + size = to_2tuple(size) + pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) + rel_pos = pos[:, None, :] - pos[:, :, None] + rel_pos[0] += size[0] - 1 + rel_pos[1] += size[1] - 1 + return rel_pos # 2, H * W, H * W + + +class LambdaLayer(nn.Module): + """Lambda Layer + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + + NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. + + The internal dimensions of the lambda module are controlled via the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query (q) and key (k) dimension are determined by + * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None + * q = num_heads * dim_head, k = dim_head + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W + stride (int): output stride of the module, avg pool used if stride == 2 + num_heads (int): parallel attention heads. + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, + qk_ratio=1.0, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.num_heads = num_heads + self.dim_v = dim_out // num_heads + + self.qkv = nn.Conv2d( + dim, + num_heads * self.dim_qk + self.dim_qk + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + if r is not None: + # local lambda convolution for pos + self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) + self.pos_emb = None + self.rel_pos_indices = None + else: + # relative pos embedding + assert feat_size is not None + feat_size = to_2tuple(feat_size) + rel_size = [2 * s - 1 for s in feat_size] + self.conv_lambda = None + self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) + self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + if self.conv_lambda is not None: + trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) + if self.pos_emb is not None: + trunc_normal_(self.pos_emb, std=.02) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + if self.pos_emb is None: + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + else: + # FIXME relative pos embedding path not fully verified + pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) + position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/linear.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..38fe3380b067ea0b275c45ffd689afdeb4598f3c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/median_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/median_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..40bd71a7a3840aaebefd2af0a99605b845054cd7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/mixed_conv2d.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/mixed_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0ce565c0a9d348d4e68165960fa77fcf7f70d7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/ml_decoder.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/ml_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..3f828c6d0ab059dc9ce60ad1b3bf7c064f7bf0f9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/ml_decoder.py @@ -0,0 +1,156 @@ +from typing import Optional + +import torch +from torch import nn +from torch import nn, Tensor +from torch.nn.modules.transformer import _get_activation_fn + + +def add_ml_decoder_head(model): + if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 + model.global_pool = nn.Identity() + del model.fc + num_classes = model.num_classes + num_features = model.num_features + model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet + model.global_pool = nn.Identity() + del model.classifier + num_classes = model.num_classes + num_features = model.num_features + model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') + del model.head + num_classes = model.num_classes + num_features = model.num_features + model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) + else: + print("Model code-writing is not aligned currently with ml-decoder") + exit(-1) + if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout + model.drop_rate = 0 + return model + + +class TransformerDecoderLayerOptimal(nn.Module): + def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", + layer_norm_eps=1e-5) -> None: + super(TransformerDecoderLayerOptimal, self).__init__() + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.dropout = nn.Dropout(dropout) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) + + self.activation = _get_activation_fn(activation) + + def __setstate__(self, state): + if 'activation' not in state: + state['activation'] = torch.nn.functional.relu + super(TransformerDecoderLayerOptimal, self).__setstate__(state) + + def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: + tgt = tgt + self.dropout1(tgt) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(tgt, memory, memory)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + +# @torch.jit.script +# class ExtrapClasses(object): +# def __init__(self, num_queries: int, group_size: int): +# self.num_queries = num_queries +# self.group_size = group_size +# +# def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: +# torch.Tensor): +# # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) +# h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) +# w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) +# out = (h * w).sum(dim=2) + class_embed_b +# out = out.view((h.shape[0], self.group_size * self.num_queries)) +# return out + +@torch.jit.script +class GroupFC(object): + def __init__(self, embed_len_decoder: int): + self.embed_len_decoder = embed_len_decoder + + def __call__(self, h: torch.Tensor, duplicate_pooling: torch.Tensor, out_extrap: torch.Tensor): + for i in range(self.embed_len_decoder): + h_i = h[:, i, :] + w_i = duplicate_pooling[i, :, :] + out_extrap[:, i, :] = torch.matmul(h_i, w_i) + + +class MLDecoder(nn.Module): + def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): + super(MLDecoder, self).__init__() + embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups + if embed_len_decoder > num_classes: + embed_len_decoder = num_classes + + # switching to 768 initial embeddings + decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding + self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) + + # decoder + decoder_dropout = 0.1 + num_layers_decoder = 1 + dim_feedforward = 2048 + layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, + dim_feedforward=dim_feedforward, dropout=decoder_dropout) + self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) + + # non-learnable queries + self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) + self.query_embed.requires_grad_(False) + + # group fully-connected + self.num_classes = num_classes + self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) + self.duplicate_pooling = torch.nn.Parameter( + torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) + self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) + torch.nn.init.xavier_normal_(self.duplicate_pooling) + torch.nn.init.constant_(self.duplicate_pooling_bias, 0) + self.group_fc = GroupFC(embed_len_decoder) + + def forward(self, x): + if len(x.shape) == 4: # [bs,2048, 7,7] + embedding_spatial = x.flatten(2).transpose(1, 2) + else: # [bs, 197,468] + embedding_spatial = x + embedding_spatial_786 = self.embed_standart(embedding_spatial) + embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) + + bs = embedding_spatial_786.shape[0] + query_embed = self.query_embed.weight + # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) + tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand + h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] + h = h.transpose(0, 1) + + out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) + self.group_fc(h, self.duplicate_pooling, out_extrap) + h_out = out_extrap.flatten(1)[:, :self.num_classes] + h_out += self.duplicate_pooling_bias + logits = h_out + return logits diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/mlp.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..91e80a84c78b7c90314235fd10a50f459f36ce57 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/mlp.py @@ -0,0 +1,126 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features // 2, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, + norm_layer=None, bias=True, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.drop = nn.Dropout(drop) + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/non_local_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/non_local_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..670e8f2475374b1f31741f75f1dedf617e0e6546 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/non_local_attn.py @@ -0,0 +1,145 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvNormAct +from .helpers import make_divisible +from .trace_utils import _assert + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + _assert(block_size == block_size1, '') + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + _assert(x.shape[-1] % self.block_size == 0, '') + _assert(x.shape[-2] % self.block_size == 0, '') + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/norm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/norm.py new file mode 100644 index 0000000000000000000000000000000000000000..77d719ede3a111a32448da4c6607e6acafa1c3e5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/norm.py @@ -0,0 +1,117 @@ +""" Normalization layers and wrappers + +Norm layer definitions that support fast norm and consistent channel arg order (always first arg). + +Hacked together by / Copyright 2022 Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) + + def forward(self, x): + if self.fast_norm: + return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + else: + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class GroupNorm1(nn.GroupNorm): + """ Group Normalization with 1 group. + Input: tensor in shape [B, C, *] + """ + + def __init__(self, num_channels, **kwargs): + super().__init__(1, num_channels, **kwargs) + self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.fast_norm: + return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + else: + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm(nn.LayerNorm): + """ LayerNorm w/ fast norm option + """ + def __init__(self, num_channels, eps=1e-6, affine=True): + super().__init__(num_channels, eps=eps, elementwise_affine=affine) + self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self._fast_norm: + x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + else: + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + return x + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial NCHW tensors """ + def __init__(self, num_channels, eps=1e-6, affine=True): + super().__init__(num_channels, eps=eps, elementwise_affine=affine) + self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x.permute(0, 2, 3, 1) + if self._fast_norm: + x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + else: + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + x = x.permute(0, 3, 1, 2) + return x + + +def _is_contiguous(tensor: torch.Tensor) -> bool: + # jit is oh so lovely :/ + if torch.jit.is_scripting(): + return tensor.is_contiguous() + else: + return tensor.is_contiguous(memory_format=torch.contiguous_format) + + +@torch.jit.script +def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): + s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) + x = (x - u) * torch.rsqrt(s + eps) + x = x * weight[:, None, None] + bias[:, None, None] + return x + + +def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): + u = x.mean(dim=1, keepdim=True) + s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0) + x = (x - u) * torch.rsqrt(s + eps) + x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) + return x + + +class LayerNormExp2d(nn.LayerNorm): + """ LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). + + Experimental implementation w/ manual norm for tensors non-contiguous tensors. + + This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last + layout. However, benefits are not always clear and can perform worse on other GPUs. + """ + + def __init__(self, num_channels, eps=1e-6): + super().__init__(num_channels, eps=eps) + + def forward(self, x) -> torch.Tensor: + if _is_contiguous(x): + x = F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) + else: + x = _layer_norm_cf(x, self.weight, self.bias, self.eps) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/norm_act.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/norm_act.py new file mode 100644 index 0000000000000000000000000000000000000000..ff075fbcf40d1b055704072493865f712bd5d65e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/norm_act.py @@ -0,0 +1,252 @@ +""" Normalization + Activation Layers + +Provides Norm+Act fns for standard PyTorch norm layers such as +* BatchNorm +* GroupNorm +* LayerNorm + +This allows swapping with alternative layers that are natively both norm + act such as +* EvoNorm (evo_norm.py) +* FilterResponseNorm (filter_response_norm.py) +* InplaceABN (inplace_abn.py) + +Hacked together by / Copyright 2022 Ross Wightman +""" +from typing import Union, List, Optional, Any + +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .create_act import get_act_layer +from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm +from .trace_utils import _assert + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__( + self, + num_features, + eps=1e-5, + momentum=0.1, + affine=True, + track_running_stats=True, + apply_act=True, + act_layer=nn.ReLU, + inplace=True, + drop_layer=None, + device=None, + dtype=None + ): + try: + factory_kwargs = {'device': device, 'dtype': dtype} + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, + **factory_kwargs + ) + except TypeError: + # NOTE for backwards compat with old PyTorch w/o factory device/dtype support + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing + _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') + + # exponential_average_factor is set to self.momentum + # (when it is available) only so that it gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: # type: ignore[has-type] + self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore[has-type] + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + r""" + Decide whether the mini-batch stats should be used for normalization rather than the buffers. + Mini-batch stats are used in training mode, and in eval mode when buffers are None. + """ + if self.training: + bn_training = True + else: + bn_training = (self.running_mean is None) and (self.running_var is None) + + r""" + Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be + passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are + used for normalization (i.e. in eval mode when buffers are not None). + """ + x = F.batch_norm( + x, + # If buffers are not to be tracked, ensure that they won't be updated + self.running_mean if not self.training or self.track_running_stats else None, + self.running_var if not self.training or self.track_running_stats else None, + self.weight, + self.bias, + bn_training, + exponential_average_factor, + self.eps, + ) + x = self.drop(x) + x = self.act(x) + return x + + +class SyncBatchNormAct(nn.SyncBatchNorm): + # Thanks to Selim Seferbekov (https://github.com/rwightman/pytorch-image-models/issues/1254) + # This is a quick workaround to support SyncBatchNorm for timm BatchNormAct2d layers + # but ONLY when used in conjunction with the timm conversion function below. + # Do not create this module directly or use the PyTorch conversion function. + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = super().forward(x) # SyncBN doesn't work with torchscript anyways, so this is fine + if hasattr(self, "drop"): + x = self.drop(x) + if hasattr(self, "act"): + x = self.act(x) + return x + + +def convert_sync_batchnorm(module, process_group=None): + # convert both BatchNorm and BatchNormAct layers to Synchronized variants + module_output = module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + if isinstance(module, BatchNormAct2d): + # convert timm norm + act layer + module_output = SyncBatchNormAct( + module.num_features, + module.eps, + module.momentum, + module.affine, + module.track_running_stats, + process_group=process_group, + ) + # set act and drop attr from the original module + module_output.act = module.act + module_output.drop = module.drop + else: + # convert standard BatchNorm layers + module_output = torch.nn.SyncBatchNorm( + module.num_features, + module.eps, + module.momentum, + module.affine, + module.track_running_stats, + process_group, + ) + if module.affine: + with torch.no_grad(): + module_output.weight = module.weight + module_output.bias = module.bias + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + if hasattr(module, "qconfig"): + module_output.qconfig = module.qconfig + for name, child in module.named_children(): + module_output.add_module(name, convert_sync_batchnorm(child, process_group)) + del module + return module_output + + +def _num_groups(num_channels, num_groups, group_size): + if group_size: + assert num_channels % group_size == 0 + return num_channels // group_size + return num_groups + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__( + self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(GroupNormAct, self).__init__( + _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + self._fast_norm = is_fast_norm() + + def forward(self, x): + if self._fast_norm: + x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + else: + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.drop(x) + x = self.act(x) + return x + + +class LayerNormAct(nn.LayerNorm): + def __init__( + self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + self._fast_norm = is_fast_norm() + + def forward(self, x): + if self._fast_norm: + x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + else: + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + x = self.drop(x) + x = self.act(x) + return x + + +class LayerNormAct2d(nn.LayerNorm): + def __init__( + self, num_channels, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): + super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine) + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + act_layer = get_act_layer(act_layer) # string -> nn.Module + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + self._fast_norm = is_fast_norm() + + def forward(self, x): + x = x.permute(0, 2, 3, 1) + if self._fast_norm: + x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + else: + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + x = x.permute(0, 3, 1, 2) + x = self.drop(x) + x = self.act(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/padding.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/padding.py new file mode 100644 index 0000000000000000000000000000000000000000..34afc37c6c59c8782ad29c7a779f58177011f891 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/patch_embed.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..be8740ce89b2ea4e37edad188b9f6d72ac8bdb8d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/patch_embed.py @@ -0,0 +1,48 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple +from .trace_utils import _assert + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + norm_layer=None, + flatten=True, + bias=True, + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/pool2d_same.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/pool2d_same.py new file mode 100644 index 0000000000000000000000000000000000000000..4c2a1c44713e552be850865ada9623a1c3b1d836 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/pos_embed.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/pos_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..99a122a09bdfa6a0ac12b13de91a3db7173813c7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/pos_embed.py @@ -0,0 +1,207 @@ +import math +from typing import List, Tuple, Optional, Union + +import torch +from torch import nn as nn + + +def pixel_freq_bands( + num_bands: int, + max_freq: float = 224., + linear_bands: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + if linear_bands: + bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device) + else: + bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device) + return bands * torch.pi + + +def inv_freq_bands( + num_bands: int, + temperature: float = 100000., + step: int = 2, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> torch.Tensor: + inv_freq = 1. / (temperature ** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands)) + return inv_freq + + +def build_sincos2d_pos_embed( + feat_shape: List[int], + dim: int = 64, + temperature: float = 10000., + reverse_coord: bool = False, + interleave_sin_cos: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None +) -> torch.Tensor: + """ + + Args: + feat_shape: + dim: + temperature: + reverse_coord: stack grid order W, H instead of H, W + interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos + dtype: + device: + + Returns: + + """ + assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' + pos_dim = dim // 4 + bands = inv_freq_bands(pos_dim, temperature=temperature, step=1, dtype=dtype, device=device) + + if reverse_coord: + feat_shape = feat_shape[::-1] # stack W, H instead of H, W + grid = torch.stack( + torch.meshgrid([torch.arange(s, device=device, dtype=dtype) for s in feat_shape])).flatten(1).transpose(0, 1) + pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) + # FIXME add support for unflattened spatial dim? + + stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos + pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) + return pos_emb + + +def build_fourier_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + num_bands: int = 64, + max_res: int = 224, + linear_bands: bool = False, + include_grid: bool = False, + concat_out: bool = True, + in_pixels: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> List[torch.Tensor]: + if bands is None: + if in_pixels: + bands = pixel_freq_bands(num_bands, float(max_res), linear_bands=linear_bands, dtype=dtype, device=device) + else: + bands = inv_freq_bands(num_bands, step=1, dtype=dtype, device=device) + else: + if device is None: + device = bands.device + if dtype is None: + dtype = bands.dtype + + if in_pixels: + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in feat_shape]), dim=-1) + else: + grid = torch.stack(torch.meshgrid( + [torch.arange(s, device=device, dtype=dtype) for s in feat_shape]), dim=-1) + grid = grid.unsqueeze(-1) + pos = grid * bands + + pos_sin, pos_cos = pos.sin(), pos.cos() + out = (grid, pos_sin, pos_cos) if include_grid else (pos_sin, pos_cos) + # FIXME torchscript doesn't like multiple return types, probably need to always cat? + if concat_out: + out = torch.cat(out, dim=-1) + return out + + +class FourierEmbed(nn.Module): + + def __init__(self, max_res: int = 224, num_bands: int = 64, concat_grid=True, keep_spatial=False): + super().__init__() + self.max_res = max_res + self.num_bands = num_bands + self.concat_grid = concat_grid + self.keep_spatial = keep_spatial + self.register_buffer('bands', pixel_freq_bands(max_res, num_bands), persistent=False) + + def forward(self, x): + B, C = x.shape[:2] + feat_shape = x.shape[2:] + emb = build_fourier_pos_embed( + feat_shape, + self.bands, + include_grid=self.concat_grid, + dtype=x.dtype, + device=x.device) + emb = emb.transpose(-1, -2).flatten(len(feat_shape)) + batch_expand = (B,) + (-1,) * (x.ndim - 1) + + # FIXME support nD + if self.keep_spatial: + x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) + else: + x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) + x = x.reshape(B, feat_shape.numel(), -1) + + return x + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +def apply_rot_embed_split(x: torch.Tensor, emb): + split = emb.shape[-1] // 2 + return x * emb[:, :split] + rot(x) * emb[:, split:] + + +def build_rotary_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + dim: int = 64, + max_freq: float = 224, + linear_bands: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + """ + NOTE: shape arg should include spatial dim only + """ + feat_shape = torch.Size(feat_shape) + + sin_emb, cos_emb = build_fourier_pos_embed( + feat_shape, bands=bands, num_bands=dim // 4, max_res=max_freq, linear_bands=linear_bands, + concat_out=False, device=device, dtype=dtype) + N = feat_shape.numel() + sin_emb = sin_emb.reshape(N, -1).repeat_interleave(2, -1) + cos_emb = cos_emb.reshape(N, -1).repeat_interleave(2, -1) + return sin_emb, cos_emb + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_res=224, linear_bands: bool = False): + super().__init__() + self.dim = dim + self.register_buffer('bands', pixel_freq_bands(dim // 4, max_res, linear_bands=linear_bands), persistent=False) + + def get_embed(self, shape: List[int]): + return build_rotary_pos_embed(shape, self.bands) + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/selective_kernel.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/selective_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..3d71e3aa696b69bbfc00b39228c0e5fb7152c83c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/selective_kernel.py @@ -0,0 +1,119 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvNormActAa +from .helpers import make_divisible +from .trace_utils import _assert + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + _assert(x.shape[1] == self.num_paths, '') + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + aa_layer (nn.Module): anti-aliasing module + drop_layer (nn.Module): spatial drop module in convs (drop block, etc) + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer, drop_layer=drop_layer) + self.paths = nn.ModuleList([ + ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/separable_conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/separable_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..c081e02bc45900a7220bc7ffbb709eedbb1cc4df --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/separable_conv.py @@ -0,0 +1,76 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer + + +class SeparableConvNormAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_layer=None): + super(SeparableConvNormAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} + self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + x = self.bn(x) + return x + + +SeparableConvBnAct = SeparableConvNormAct + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/space_to_depth.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/space_to_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e8e0b2a486d51fe3e4ab0472d89b7f1b92e1dc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/split_attn.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/split_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..ac54f8988ac6bdc9e852585692248875a016b7fb --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/split_attn.py @@ -0,0 +1,84 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.drop = drop_layer() if drop_layer is not None else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + x = self.drop(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/split_batchnorm.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/split_batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..830781b335161f8d6dd74c9458070bb1fa88a918 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/squeeze_excite.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/squeeze_excite.py new file mode 100644 index 0000000000000000000000000000000000000000..2e41d956c964fbccc369864303cdb4a6e3560d9c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/std_conv.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/std_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..d896ba5c2f7f517d6ce0508d789a516e7bfb4cf1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/test_time_pool.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/test_time_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..5826d8c966d7bffa62f5f5fdd224f3f691276ce6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=False): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/trace_utils.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/trace_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..83970729e628b525d24162f5df37ee5bc253438f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/trace_utils.py @@ -0,0 +1,13 @@ +try: + from torch import _assert +except ImportError: + def _assert(condition: bool, message: str): + assert condition, message + + +def _float_to_int(x: float) -> int: + """ + Symbolic tracing helper to substitute for inbuilt `int`. + Hint: Inbuilt `int` can't accept an argument of type `Proxy` + """ + return int(x) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/weight_init.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/weight_init.py new file mode 100644 index 0000000000000000000000000000000000000000..943e4f4cec20eb03230748aa919010d017643303 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/layers/weight_init.py @@ -0,0 +1,125 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are + applied while sampling the normal with mean/std applied, therefore a, b args + should be adjusted to match the range of mean, std args. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + with torch.no_grad(): + return _trunc_normal_(tensor, mean, std, a, b) + + +def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the + bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 + and the result is subsquently scaled and shifted by the mean and std args. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + with torch.no_grad(): + _trunc_normal_(tensor, 0, 1.0, a, b) + tensor.mul_(std).add_(mean) + return tensor + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + with torch.no_grad(): + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + with torch.no_grad(): + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/levit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/levit.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8a360681a6d7381eb28d1ec716bb061fb7e5e5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/levit.py @@ -0,0 +1,592 @@ +""" LeViT + +Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` + - https://arxiv.org/abs/2104.01136 + +@article{graham2021levit, + title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, + author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, + journal={arXiv preprint arXiv:22104.01136}, + year={2021} +} + +Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. + +This version combines both conv/linear models and fixes torchscript compatibility. + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" + +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +# Modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# Copyright 2020 Ross Wightman, Apache-2.0 License +import itertools +from copy import deepcopy +from functools import partial +from typing import Dict + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import to_ntuple, get_act_layer +from .vision_transformer import trunc_normal_ +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.0.c', 'classifier': ('head.l', 'head_dist.l'), + **kwargs + } + + +default_cfgs = dict( + levit_128s=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth' + ), + levit_128=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth' + ), + levit_192=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth' + ), + levit_256=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth' + ), + levit_384=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth' + ), + + levit_256d=_cfg(url='', classifier='head.l'), +) + +model_cfgs = dict( + levit_128s=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), + levit_128=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), + levit_192=dict( + embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), + levit_256=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), + levit_384=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), + + levit_256d=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 8, 6)), +) + +__all__ = ['Levit'] + + +@register_model +def levit_128s(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128s', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_128(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_192(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_192', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_256(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_256', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_384(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_384', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_256d(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_256d', pretrained=pretrained, use_conv=use_conv, distilled=False, **kwargs) + + +class ConvNorm(nn.Sequential): + def __init__( + self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1, + groups=1, bn_weight_init=1, resolution=-10000): + super().__init__() + self.add_module('c', nn.Conv2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False)) + self.add_module('bn', nn.BatchNorm2d(out_chs)) + + nn.init.constant_(self.bn.weight, bn_weight_init) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class LinearNorm(nn.Sequential): + def __init__(self, in_features, out_features, bn_weight_init=1, resolution=-100000): + super().__init__() + self.add_module('c', nn.Linear(in_features, out_features, bias=False)) + self.add_module('bn', nn.BatchNorm1d(out_features)) + + nn.init.constant_(self.bn.weight, bn_weight_init) + + @torch.no_grad() + def fuse(self): + l, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[:, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + x = self.c(x) + return self.bn(x.flatten(0, 1)).reshape_as(x) + + +class NormLinear(nn.Sequential): + def __init__(self, in_features, out_features, bias=True, std=0.02): + super().__init__() + self.add_module('bn', nn.BatchNorm1d(in_features)) + self.add_module('l', nn.Linear(in_features, out_features, bias=bias)) + + trunc_normal_(self.l.weight, std=std) + if self.l.bias is not None: + nn.init.constant_(self.l.bias, 0) + + @torch.no_grad() + def fuse(self): + bn, l = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.l.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.l.bias + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b16(in_chs, out_chs, activation, resolution=224): + return nn.Sequential( + ConvNorm(in_chs, out_chs // 8, 3, 2, 1, resolution=resolution), + activation(), + ConvNorm(out_chs // 8, out_chs // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + ConvNorm(out_chs // 4, out_chs // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + ConvNorm(out_chs // 2, out_chs, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Module): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + return x + self.m(x) * torch.rand( + x.size(0), 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() + else: + return x + self.m(x) + + +class Subsample(nn.Module): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = x.view(B, self.resolution, self.resolution, C)[:, ::self.stride, ::self.stride] + return x.reshape(B, -1, C) + + +class Attention(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, dim, key_dim, num_heads=8, attn_ratio=4, act_layer=None, resolution=14, use_conv=False): + super().__init__() + ln_layer = ConvNorm if use_conv else LinearNorm + self.use_conv = use_conv + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.key_attn_dim = key_dim * num_heads + self.val_dim = int(attn_ratio * key_dim) + self.val_attn_dim = int(attn_ratio * key_dim) * num_heads + + self.qkv = ln_layer(dim, self.val_attn_dim + self.key_attn_dim * 2, resolution=resolution) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.val_attn_dim, dim, bn_weight_init=0, resolution=resolution) + ) + + self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution ** 2)) + pos = torch.stack(torch.meshgrid(torch.arange(resolution), torch.arange(resolution))).flatten(1) + rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * resolution) + rel_pos[1] + self.register_buffer('attention_bias_idxs', rel_pos) + self.ab = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): # x (B,C,H,W) + if self.use_conv: + B, C, H, W = x.shape + q, k, v = self.qkv(x).view( + B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.val_dim], dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + else: + B, N, C = x.shape + q, k, v = self.qkv(x).view( + B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 3, 1) + v = v.permute(0, 2, 1, 3) + + attn = q @ k * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) + x = self.proj(x) + return x + + +class AttentionSubsample(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2, + act_layer=None, stride=2, resolution=14, resolution_out=7, use_conv=False): + super().__init__() + self.stride = stride + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.key_attn_dim = key_dim * num_heads + self.val_dim = int(attn_ratio * key_dim) + self.val_attn_dim = self.val_dim * self.num_heads + self.resolution = resolution + self.resolution_out_area = resolution_out ** 2 + + self.use_conv = use_conv + if self.use_conv: + ln_layer = ConvNorm + sub_layer = partial(nn.AvgPool2d, kernel_size=1, padding=0) + else: + ln_layer = LinearNorm + sub_layer = partial(Subsample, resolution=resolution) + + self.kv = ln_layer(in_dim, self.val_attn_dim + self.key_attn_dim, resolution=resolution) + self.q = nn.Sequential( + sub_layer(stride=stride), + ln_layer(in_dim, self.key_attn_dim, resolution=resolution_out) + ) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.val_attn_dim, out_dim, resolution=resolution_out) + ) + + self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.resolution ** 2)) + k_pos = torch.stack(torch.meshgrid(torch.arange(resolution), torch.arange(resolution))).flatten(1) + q_pos = torch.stack(torch.meshgrid( + torch.arange(0, resolution, step=stride), + torch.arange(0, resolution, step=stride))).flatten(1) + rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() + rel_pos = (rel_pos[0] * resolution) + rel_pos[1] + self.register_buffer('attention_bias_idxs', rel_pos) + + self.ab = {} # per-device attention_biases cache + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): + if self.use_conv: + B, C, H, W = x.shape + k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.val_dim], dim=2) + q = self.q(x).view(B, self.num_heads, self.key_dim, self.resolution_out_area) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).reshape(B, -1, self.resolution, self.resolution) + else: + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.val_dim], dim=3) + k = k.permute(0, 2, 3, 1) # BHCN + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, self.resolution_out_area, self.num_heads, self.key_dim).permute(0, 2, 1, 3) + + attn = q @ k * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.val_attn_dim) + x = self.proj(x) + return x + + +class Levit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + + NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems + w/ train scripts that don't take tuple outputs, + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=(192,), + key_dim=64, + depth=(12,), + num_heads=(3,), + attn_ratio=2, + mlp_ratio=2, + hybrid_backbone=None, + down_ops=None, + act_layer='hard_swish', + attn_act_layer='hard_swish', + use_conv=False, + global_pool='avg', + drop_rate=0., + drop_path_rate=0.): + super().__init__() + act_layer = get_act_layer(act_layer) + attn_act_layer = get_act_layer(attn_act_layer) + ln_layer = ConvNorm if use_conv else LinearNorm + self.use_conv = use_conv + if isinstance(img_size, tuple): + # FIXME origin impl passes single img/res dim through whole hierarchy, + # not sure this model will be used enough to spend time fixing it. + assert img_size[0] == img_size[1] + img_size = img_size[0] + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + self.grad_checkpointing = False + + num_stages = len(embed_dim) + assert len(depth) == len(num_heads) == num_stages + key_dim = to_ntuple(num_stages)(key_dim) + attn_ratio = to_ntuple(num_stages)(attn_ratio) + mlp_ratio = to_ntuple(num_stages)(mlp_ratio) + down_ops = down_ops or ( + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ('Subsample', key_dim[0], embed_dim[0] // key_dim[0], 4, 2, 2), + ('Subsample', key_dim[0], embed_dim[1] // key_dim[1], 4, 2, 2), + ('',) + ) + + self.patch_embed = hybrid_backbone or stem_b16(in_chans, embed_dim[0], activation=act_layer) + + self.blocks = [] + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, kd, nh, attn_ratio=ar, act_layer=attn_act_layer, + resolution=resolution, use_conv=use_conv), + drop_path_rate)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(ed, h, resolution=resolution), + act_layer(), + ln_layer(h, ed, bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + if do[0] == 'Subsample': + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_out = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2], + attn_ratio=do[3], act_layer=attn_act_layer, stride=do[5], + resolution=resolution, resolution_out=resolution_out, use_conv=use_conv)) + resolution = resolution_out + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(embed_dim[i + 1], h, resolution=resolution), + act_layer(), + ln_layer(h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None, distillation=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class LevitDistilled(Levit): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.head_dist = NormLinear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + @torch.jit.ignore + def get_classifier(self): + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=None, distillation=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + def forward_head(self, x): + if self.global_pool == 'avg': + x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) + x, x_dist = self.head(x), self.head_dist(x) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train/finetune, inference average the classifier predictions + return (x + x_dist) / 2 + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + D = model.state_dict() + for k in state_dict.keys(): + if k in D and D[k].ndim == 4 and state_dict[k].ndim == 2: + state_dict[k] = state_dict[k][:, :, None, None] + return state_dict + + +def create_levit(variant, pretrained=False, distilled=True, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model_cfg = dict(**model_cfgs[variant], **kwargs) + model = build_model_with_cfg( + LevitDistilled if distilled else Levit, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **model_cfg) + return model + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/maxxvit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/maxxvit.py new file mode 100644 index 0000000000000000000000000000000000000000..f01e0812e86cb6a205d0bb18adf7de1d03a3e318 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/maxxvit.py @@ -0,0 +1,1914 @@ +""" MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch + +This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch. + +99% of the implementation was done from papers, however last minute some adjustments were made +based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit + +There are multiple sets of models defined for both architectures. Typically, names with a + `_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit. +These configs work well and appear to be a bit faster / lower resource than the paper. + +The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to +match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match. + +# FIXME / WARNING +This impl remains a WIP, some configs and models may vanish or change... + +Papers: + +MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697 +@article{tu2022maxvit, + title={MaxViT: Multi-Axis Vision Transformer}, + author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, + journal={ECCV}, + year={2022}, +} + +CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803 +@article{DBLP:journals/corr/abs-2106-04803, + author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan}, + title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes}, + journal = {CoRR}, + volume = {abs/2106.04803}, + year = {2021} +} + +Hacked together by / Copyright 2022, Ross Wightman +""" + +import math +from collections import OrderedDict +from dataclasses import dataclass, replace, field +from functools import partial +from typing import Callable, Optional, Union, Tuple, List + +import torch +from torch import nn +from torch.utils.checkpoint import checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq, named_apply +from .fx_features import register_notrace_function +from .layers import Mlp, ConvMlp, DropPath, ClassifierHead, trunc_normal_tf_, LayerNorm2d, LayerNorm +from .layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d +from .layers import to_2tuple, extend_tuple, make_divisible, _assert +from .registry import register_model +from .vision_transformer_relpos import RelPosMlp, RelPosBias # FIXME move these to common location + +__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + 'fixed_input_size': True, + **kwargs + } + + +default_cfgs = { + # Fiddling with configs / defaults / still pretraining + 'coatnet_pico_rw_224': _cfg(url=''), + 'coatnet_nano_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth', + crop_pct=0.9), + 'coatnet_0_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'), + 'coatnet_1_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth' + ), + 'coatnet_2_rw_224': _cfg(url=''), + 'coatnet_3_rw_224': _cfg(url=''), + + # Highly experimental configs + 'coatnet_bn_0_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, + crop_pct=0.95), + 'coatnet_rmlp_nano_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth', + crop_pct=0.9), + 'coatnet_rmlp_0_rw_224': _cfg(url=''), + 'coatnet_rmlp_1_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'), + 'coatnet_rmlp_2_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'), + 'coatnet_rmlp_3_rw_224': _cfg(url=''), + 'coatnet_nano_cc_224': _cfg(url=''), + 'coatnext_nano_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth', + crop_pct=0.9), + + # Trying to be like the CoAtNet paper configs + 'coatnet_0_224': _cfg(url=''), + 'coatnet_1_224': _cfg(url=''), + 'coatnet_2_224': _cfg(url=''), + 'coatnet_3_224': _cfg(url=''), + 'coatnet_4_224': _cfg(url=''), + 'coatnet_5_224': _cfg(url=''), + + # Experimental configs + 'maxvit_pico_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_nano_rw_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_tiny_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'), + 'maxvit_tiny_rw_256': _cfg( + url='', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_pico_rw_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_nano_rw_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_tiny_rw_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxvit_rmlp_small_rw_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth', + crop_pct=0.9, + ), + 'maxvit_rmlp_small_rw_256': _cfg( + url='', + input_size=(3, 256, 256), pool_size=(8, 8)), + + 'maxvit_tiny_pm_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), + + 'maxxvit_rmlp_nano_rw_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxxvit_rmlp_tiny_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), + 'maxxvit_rmlp_small_rw_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # Trying to be like the MaxViT paper configs + 'maxvit_tiny_224': _cfg(url=''), + 'maxvit_small_224': _cfg(url=''), + 'maxvit_base_224': _cfg(url=''), + 'maxvit_large_224': _cfg(url=''), + 'maxvit_xlarge_224': _cfg(url=''), +} + + +@dataclass +class MaxxVitTransformerCfg: + dim_head: int = 32 + expand_ratio: float = 4.0 + expand_first: bool = True + shortcut_bias: bool = True + attn_bias: bool = True + attn_drop: float = 0. + proj_drop: float = 0. + pool_type: str = 'avg2' + rel_pos_type: str = 'bias' + rel_pos_dim: int = 512 # for relative position types w/ MLP + partition_ratio: int = 32 + window_size: Optional[Tuple[int, int]] = None + grid_size: Optional[Tuple[int, int]] = None + init_values: Optional[float] = None + act_layer: str = 'gelu' + norm_layer: str = 'layernorm2d' + norm_layer_cl: str = 'layernorm' + norm_eps: float = 1e-6 + + def __post_init__(self): + if self.grid_size is not None: + self.grid_size = to_2tuple(self.grid_size) + if self.window_size is not None: + self.window_size = to_2tuple(self.window_size) + if self.grid_size is None: + self.grid_size = self.window_size + + +@dataclass +class MaxxVitConvCfg: + block_type: str = 'mbconv' + expand_ratio: float = 4.0 + expand_output: bool = True # calculate expansion channels from output (vs input chs) + kernel_size: int = 3 + group_size: int = 1 # 1 == depthwise + pre_norm_act: bool = False # activation after pre-norm + output_bias: bool = True # bias for shortcut + final 1x1 projection conv + stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' + pool_type: str = 'avg2' + downsample_pool_type: str = 'avg2' + attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2 + attn_layer: str = 'se' + attn_act_layer: str = 'silu' + attn_ratio: float = 0.25 + init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv + act_layer: str = 'gelu' + norm_layer: str = '' + norm_layer_cl: str = '' + norm_eps: Optional[float] = None + + def __post_init__(self): + # mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args + assert self.block_type in ('mbconv', 'convnext') + use_mbconv = self.block_type == 'mbconv' + if not self.norm_layer: + self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d' + if not self.norm_layer_cl and not use_mbconv: + self.norm_layer_cl = 'layernorm' + if self.norm_eps is None: + self.norm_eps = 1e-5 if use_mbconv else 1e-6 + self.downsample_pool_type = self.downsample_pool_type or self.pool_type + + +@dataclass +class MaxxVitCfg: + embed_dim: Tuple[int, ...] = (96, 192, 384, 768) + depths: Tuple[int, ...] = (2, 3, 5, 2) + block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') + stem_width: Union[int, Tuple[int, int]] = 64 + stem_bias: bool = True + conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg) + transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg) + weight_init: str = 'vit_eff' + + +def _rw_coat_cfg( + stride_mode='pool', + pool_type='avg2', + conv_output_bias=False, + conv_attn_early=False, + conv_attn_act_layer='relu', + conv_norm_layer='', + transformer_shortcut_bias=True, + transformer_norm_layer='layernorm2d', + transformer_norm_layer_cl='layernorm', + init_values=None, + rel_pos_type='bias', + rel_pos_dim=512, +): + # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit + # Common differences for initial timm models: + # - pre-norm layer in MZBConv included an activation after norm + # - mbconv expansion calculated from input instead of output chs + # - mbconv shortcut and final 1x1 conv did not have a bias + # - SE act layer was relu, not silu + # - mbconv uses silu in timm, not gelu + # - expansion in attention block done via output proj, not input proj + # Variable differences (evolved over training initial models): + # - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat) + # - SE attention was between conv2 and norm/act + # - default to avg pool for mbconv downsample instead of 1x1 or dw conv + # - transformer block shortcut has no bias + return dict( + conv_cfg=MaxxVitConvCfg( + stride_mode=stride_mode, + pool_type=pool_type, + pre_norm_act=True, + expand_output=False, + output_bias=conv_output_bias, + attn_early=conv_attn_early, + attn_act_layer=conv_attn_act_layer, + act_layer='silu', + norm_layer=conv_norm_layer, + ), + transformer_cfg=MaxxVitTransformerCfg( + expand_first=False, + shortcut_bias=transformer_shortcut_bias, + pool_type=pool_type, + init_values=init_values, + norm_layer=transformer_norm_layer, + norm_layer_cl=transformer_norm_layer_cl, + rel_pos_type=rel_pos_type, + rel_pos_dim=rel_pos_dim, + ), + ) + + +def _rw_max_cfg( + stride_mode='dw', + pool_type='avg2', + conv_output_bias=False, + conv_attn_ratio=1 / 16, + conv_norm_layer='', + transformer_norm_layer='layernorm2d', + transformer_norm_layer_cl='layernorm', + window_size=None, + dim_head=32, + init_values=None, + rel_pos_type='bias', + rel_pos_dim=512, +): + # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit + # Differences of initial timm models: + # - mbconv expansion calculated from input instead of output chs + # - mbconv shortcut and final 1x1 conv did not have a bias + # - mbconv uses silu in timm, not gelu + # - expansion in attention block done via output proj, not input proj + return dict( + conv_cfg=MaxxVitConvCfg( + stride_mode=stride_mode, + pool_type=pool_type, + expand_output=False, + output_bias=conv_output_bias, + attn_ratio=conv_attn_ratio, + act_layer='silu', + norm_layer=conv_norm_layer, + ), + transformer_cfg=MaxxVitTransformerCfg( + expand_first=False, + pool_type=pool_type, + dim_head=dim_head, + window_size=window_size, + init_values=init_values, + norm_layer=transformer_norm_layer, + norm_layer_cl=transformer_norm_layer_cl, + rel_pos_type=rel_pos_type, + rel_pos_dim=rel_pos_dim, + ), + ) + + +def _next_cfg( + stride_mode='dw', + pool_type='avg2', + conv_norm_layer='layernorm2d', + conv_norm_layer_cl='layernorm', + transformer_norm_layer='layernorm2d', + transformer_norm_layer_cl='layernorm', + window_size=None, + init_values=1e-6, + rel_pos_type='mlp', # MLP by default for maxxvit + rel_pos_dim=512, +): + # For experimental models with convnext instead of mbconv + init_values = to_2tuple(init_values) + return dict( + conv_cfg=MaxxVitConvCfg( + block_type='convnext', + stride_mode=stride_mode, + pool_type=pool_type, + expand_output=False, + init_values=init_values[0], + norm_layer=conv_norm_layer, + norm_layer_cl=conv_norm_layer_cl, + ), + transformer_cfg=MaxxVitTransformerCfg( + expand_first=False, + pool_type=pool_type, + window_size=window_size, + init_values=init_values[1], + norm_layer=transformer_norm_layer, + norm_layer_cl=transformer_norm_layer_cl, + rel_pos_type=rel_pos_type, + rel_pos_dim=rel_pos_dim, + ), + ) + + +model_cfgs = dict( + # Fiddling with configs / defaults / still pretraining + coatnet_pico_rw_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 3, 5, 2), + stem_width=(32, 64), + **_rw_max_cfg( # using newer max defaults here + conv_output_bias=True, + conv_attn_ratio=0.25, + ), + ), + coatnet_nano_rw_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + **_rw_max_cfg( # using newer max defaults here + stride_mode='pool', + conv_output_bias=True, + conv_attn_ratio=0.25, + ), + ), + coatnet_0_rw_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 7, 2), # deeper than paper '0' model + stem_width=(32, 64), + **_rw_coat_cfg( + conv_attn_early=True, + transformer_shortcut_bias=False, + ), + ), + coatnet_1_rw_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_early=True, + transformer_shortcut_bias=False, + ) + ), + coatnet_2_rw_224=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + stem_width=(64, 128), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + ), + ), + coatnet_3_rw_224=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + stem_width=(96, 192), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + ), + ), + + # Highly experimental configs + coatnet_bn_0_rw_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 7, 2), # deeper than paper '0' model + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_early=True, + transformer_shortcut_bias=False, + transformer_norm_layer='batchnorm2d', + ) + ), + coatnet_rmlp_nano_rw_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + **_rw_max_cfg( + conv_output_bias=True, + conv_attn_ratio=0.25, + rel_pos_type='mlp', + rel_pos_dim=384, + ), + ), + coatnet_rmlp_0_rw_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 7, 2), # deeper than paper '0' model + stem_width=(32, 64), + **_rw_coat_cfg( + stride_mode='dw', + rel_pos_type='mlp', + ), + ), + coatnet_rmlp_1_rw_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=(32, 64), + **_rw_coat_cfg( + pool_type='max', + conv_attn_early=True, + transformer_shortcut_bias=False, + rel_pos_type='mlp', + rel_pos_dim=384, # was supposed to be 512, woops + ), + ), + coatnet_rmlp_2_rw_224=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + stem_width=(64, 128), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + rel_pos_type='mlp' + ), + ), + coatnet_rmlp_3_rw_224=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + stem_width=(96, 192), + **_rw_coat_cfg( + stride_mode='dw', + conv_attn_act_layer='silu', + init_values=1e-6, + rel_pos_type='mlp' + ), + ), + + coatnet_nano_cc_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + block_type=('C', 'C', ('C', 'T'), ('C', 'T')), + **_rw_coat_cfg(), + ), + coatnext_nano_rw_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(3, 4, 6, 3), + stem_width=(32, 64), + weight_init='normal', + **_next_cfg( + rel_pos_type='bias', + init_values=(1e-5, None) + ), + ), + + # Trying to be like the CoAtNet paper configs + coatnet_0_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 3, 5, 2), + stem_width=64, + ), + coatnet_1_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + stem_width=64, + ), + coatnet_2_224=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + stem_width=128, + ), + coatnet_3_224=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + stem_width=192, + ), + coatnet_4_224=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 12, 28, 2), + stem_width=192, + ), + coatnet_5_224=MaxxVitCfg( + embed_dim=(256, 512, 1280, 2048), + depths=(2, 12, 28, 2), + stem_width=192, + ), + + # Experimental MaxVit configs + maxvit_pico_rw_256=MaxxVitCfg( + embed_dim=(32, 64, 128, 256), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(24, 32), + **_rw_max_cfg(), + ), + maxvit_nano_rw_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + maxvit_tiny_rw_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + maxvit_tiny_rw_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + + maxvit_rmlp_pico_rw_256=MaxxVitCfg( + embed_dim=(32, 64, 128, 256), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(24, 32), + **_rw_max_cfg(rel_pos_type='mlp'), + ), + maxvit_rmlp_nano_rw_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(rel_pos_type='mlp'), + ), + maxvit_rmlp_tiny_rw_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(rel_pos_type='mlp'), + ), + maxvit_rmlp_small_rw_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg( + rel_pos_type='mlp', + init_values=1e-6, + ), + ), + maxvit_rmlp_small_rw_256=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_rw_max_cfg( + rel_pos_type='mlp', + init_values=1e-6, + ), + ), + + maxvit_tiny_pm_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('PM',) * 4, + stem_width=(32, 64), + **_rw_max_cfg(), + ), + + maxxvit_rmlp_nano_rw_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(1, 2, 3, 1), + block_type=('M',) * 4, + stem_width=(32, 64), + weight_init='normal', + **_next_cfg(), + ), + maxxvit_rmlp_tiny_rw_256=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(32, 64), + **_next_cfg(), + ), + maxxvit_rmlp_small_rw_256=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=(48, 96), + **_next_cfg(), + ), + + # Trying to be like the MaxViT paper configs + maxvit_tiny_224=MaxxVitCfg( + embed_dim=(64, 128, 256, 512), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=64, + ), + maxvit_small_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 2, 5, 2), + block_type=('M',) * 4, + stem_width=64, + ), + maxvit_base_224=MaxxVitCfg( + embed_dim=(96, 192, 384, 768), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=64, + ), + maxvit_large_224=MaxxVitCfg( + embed_dim=(128, 256, 512, 1024), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=128, + ), + maxvit_xlarge_224=MaxxVitCfg( + embed_dim=(192, 384, 768, 1536), + depths=(2, 6, 14, 2), + block_type=('M',) * 4, + stem_width=192, + ), + +) + + +class Attention2d(nn.Module): + """ multi-head attention for 2D NCHW tensors""" + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + dim_head: int = 32, + bias: bool = True, + expand_first: bool = True, + rel_pos_cls: Callable = None, + attn_drop: float = 0., + proj_drop: float = 0. + ): + super().__init__() + dim_out = dim_out or dim + dim_attn = dim_out if expand_first else dim + self.num_heads = dim_attn // dim_head + self.dim_head = dim_head + self.scale = dim_head ** -0.5 + + self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) + self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + B, C, H, W = x.shape + + q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + if self.rel_pos is not None: + attn = self.rel_pos(attn) + elif shared_rel_pos is not None: + attn = attn + shared_rel_pos + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class AttentionCl(nn.Module): + """ Channels-last multi-head attention (B, ..., C) """ + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + dim_head: int = 32, + bias: bool = True, + expand_first: bool = True, + rel_pos_cls: Callable = None, + attn_drop: float = 0., + proj_drop: float = 0. + ): + super().__init__() + dim_out = dim_out or dim + dim_attn = dim_out if expand_first and dim_out > dim else dim + assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim' + self.num_heads = dim_attn // dim_head + self.dim_head = dim_head + self.scale = dim_head ** -0.5 + + self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias) + self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim_attn, dim_out, bias=bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + B = x.shape[0] + restore_shape = x.shape[:-1] + + q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + if self.rel_pos is not None: + attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) + elif shared_rel_pos is not None: + attn = attn + shared_rel_pos + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(restore_shape + (-1,)) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma + return x.mul_(gamma) if self.inplace else x * gamma + + +class LayerScale2d(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + + +class Downsample2d(nn.Module): + """ A downsample pooling module supporting several maxpool and avgpool modes + * 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1 + * 'max2' - MaxPool2d w/ kernel_size = stride = 2 + * 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1 + * 'avg2' - AvgPool2d w/ kernel_size = stride = 2 + """ + + def __init__( + self, + dim: int, + dim_out: int, + pool_type: str = 'avg2', + bias: bool = True, + ): + super().__init__() + assert pool_type in ('max', 'max2', 'avg', 'avg2') + if pool_type == 'max': + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + elif pool_type == 'max2': + self.pool = nn.MaxPool2d(2) # kernel_size == stride == 2 + elif pool_type == 'avg': + self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False) + else: + self.pool = nn.AvgPool2d(2) # kernel_size == stride == 2 + + if dim != dim_out: + self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) + else: + self.expand = nn.Identity() + + def forward(self, x): + x = self.pool(x) # spatial downsample + x = self.expand(x) # expand chs + return x + + +def _init_transformer(module, name, scheme=''): + if isinstance(module, (nn.Conv2d, nn.Linear)): + if scheme == 'normal': + nn.init.normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'trunc_normal': + trunc_normal_tf_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'xavier_normal': + nn.init.xavier_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # vit like + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + + +class TransformerBlock2d(nn.Module): + """ Transformer block with 2D downsampling + '2D' NCHW tensor layout + + Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW + for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs. + + This impl was faster on TPU w/ PT XLA than the 1D experiment. + """ + + def __init__( + self, + dim: int, + dim_out: int, + stride: int = 1, + rel_pos_cls: Callable = None, + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) + act_layer = get_act_layer(cfg.act_layer) + + if stride == 2: + self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias) + self.norm1 = nn.Sequential(OrderedDict([ + ('norm', norm_layer(dim)), + ('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)), + ])) + else: + assert dim == dim_out + self.shortcut = nn.Identity() + self.norm1 = norm_layer(dim) + + self.attn = Attention2d( + dim, + dim_out, + dim_head=cfg.dim_head, + expand_first=cfg.expand_first, + bias=cfg.attn_bias, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop + ) + self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim_out) + self.mlp = ConvMlp( + in_features=dim_out, + hidden_features=int(dim_out * cfg.expand_ratio), + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def init_weights(self, scheme=''): + named_apply(partial(_init_transformer, scheme=scheme), self) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +def _init_conv(module, name, scheme=''): + if isinstance(module, nn.Conv2d): + if scheme == 'normal': + nn.init.normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'trunc_normal': + trunc_normal_tf_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif scheme == 'xavier_normal': + nn.init.xavier_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # efficientnet like + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +class MbConvBlock(nn.Module): + """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) + """ + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + drop_path: float = 0. + ): + super(MbConvBlock, self).__init__() + norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps) + mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio) + groups = num_groups(cfg.group_size, mid_chs) + + if stride == 2: + self.shortcut = Downsample2d(in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias) + else: + self.shortcut = nn.Identity() + + assert cfg.stride_mode in ('pool', '1x1', 'dw') + stride_pool, stride_1, stride_2 = 1, 1, 1 + if cfg.stride_mode == 'pool': + # NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1 + stride_pool, dilation_2 = stride, dilation[1] + # FIXME handle dilation of avg pool + elif cfg.stride_mode == '1x1': + # NOTE I don't like this option described in paper, 1x1 w/ stride throws info away + stride_1, dilation_2 = stride, dilation[1] + else: + stride_2, dilation_2 = stride, dilation[0] + + self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act) + if stride_pool > 1: + self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) + else: + self.down = nn.Identity() + self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1) + self.norm1 = norm_act_layer(mid_chs) + + self.conv2_kxk = create_conv2d( + mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups) + + attn_kwargs = {} + if isinstance(cfg.attn_layer, str): + if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca': + attn_kwargs['act_layer'] = cfg.attn_act_layer + attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs)) + + # two different orderings for SE and norm2 (due to some weights and trials using SE before norm2) + if cfg.attn_early: + self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) + self.norm2 = norm_act_layer(mid_chs) + self.se = None + else: + self.se_early = None + self.norm2 = norm_act_layer(mid_chs) + self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) + + self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def init_weights(self, scheme=''): + named_apply(partial(_init_conv, scheme=scheme), self) + + def forward(self, x): + shortcut = self.shortcut(x) + x = self.pre_norm(x) + x = self.down(x) + + # 1x1 expansion conv & norm-act + x = self.conv1_1x1(x) + x = self.norm1(x) + + # depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act + x = self.conv2_kxk(x) + if self.se_early is not None: + x = self.se_early(x) + x = self.norm2(x) + if self.se is not None: + x = self.se(x) + + # 1x1 linear projection to output width + x = self.conv3_1x1(x) + x = self.drop_path(x) + shortcut + return x + + +class ConvNeXtBlock(nn.Module): + """ ConvNeXt Block + """ + + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 7, + stride: int = 1, + dilation: Tuple[int, int] = (1, 1), + cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + conv_mlp: bool = True, + drop_path: float = 0. + ): + super().__init__() + out_chs = out_chs or in_chs + act_layer = get_act_layer(cfg.act_layer) + if conv_mlp: + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) + mlp_layer = ConvMlp + else: + assert 'layernorm' in cfg.norm_layer + norm_layer = LayerNorm + mlp_layer = Mlp + self.use_conv_mlp = conv_mlp + + if stride == 2: + self.shortcut = Downsample2d(in_chs, out_chs) + elif in_chs != out_chs: + self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias) + else: + self.shortcut = nn.Identity() + + assert cfg.stride_mode in ('pool', 'dw') + stride_pool, stride_dw = 1, 1 + # FIXME handle dilation? + if cfg.stride_mode == 'pool': + stride_pool = stride + else: + stride_dw = stride + + if stride_pool == 2: + self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) + else: + self.down = nn.Identity() + + self.conv_dw = create_conv2d( + in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], + depthwise=True, bias=cfg.output_bias) + self.norm = norm_layer(out_chs) + self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer) + if conv_mlp: + self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() + else: + self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = self.shortcut(x) + x = self.down(x) + x = self.conv_dw(x) + if self.use_conv_mlp: + x = self.norm(x) + x = self.mlp(x) + x = self.ls(x) + else: + x = x.permute(0, 2, 3, 1) + x = self.norm(x) + x = self.mlp(x) + x = self.ls(x) + x = x.permute(0, 3, 1, 2) + + x = self.drop_path(x) + shortcut + return x + + +def window_partition(x, window_size: List[int]): + B, H, W, C = x.shape + _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') + _assert(W % window_size[1] == 0, '') + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) + return x + + +def grid_partition(x, grid_size: List[int]): + B, H, W, C = x.shape + _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') + _assert(W % grid_size[1] == 0, '') + x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C) + windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def grid_reverse(windows, grid_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[-1] + x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C) + x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C) + return x + + +def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size): + rel_pos_cls = None + if cfg.rel_pos_type == 'mlp': + rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim) + elif cfg.rel_pos_type == 'bias': + rel_pos_cls = partial(RelPosBias, window_size=window_size) + return rel_pos_cls + + +class PartitionAttentionCl(nn.Module): + """ Grid or Block partition + Attn + FFN. + NxC 'channels last' tensor layout. + """ + + def __init__( + self, + dim: int, + partition_type: str = 'block', + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last + act_layer = get_act_layer(cfg.act_layer) + + self.partition_block = partition_type == 'block' + self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) + rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) + + self.norm1 = norm_layer(dim) + self.attn = AttentionCl( + dim, + dim, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * cfg.expand_ratio), + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _partition_attn(self, x): + img_size = x.shape[1:3] + if self.partition_block: + partitioned = window_partition(x, self.partition_size) + else: + partitioned = grid_partition(x, self.partition_size) + + partitioned = self.attn(partitioned) + + if self.partition_block: + x = window_reverse(partitioned, self.partition_size, img_size) + else: + x = grid_reverse(partitioned, self.partition_size, img_size) + return x + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class ParallelPartitionAttention(nn.Module): + """ Experimental. Grid and Block partition + single FFN + NxC tensor layout. + """ + + def __init__( + self, + dim: int, + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + assert dim % 2 == 0 + norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last + act_layer = get_act_layer(cfg.act_layer) + + assert cfg.window_size == cfg.grid_size + self.partition_size = to_2tuple(cfg.window_size) + rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) + + self.norm1 = norm_layer(dim) + self.attn_block = AttentionCl( + dim, + dim // 2, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.attn_grid = AttentionCl( + dim, + dim // 2, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * cfg.expand_ratio), + out_features=dim, + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _partition_attn(self, x): + img_size = x.shape[1:3] + + partitioned_block = window_partition(x, self.partition_size) + partitioned_block = self.attn_block(partitioned_block) + x_window = window_reverse(partitioned_block, self.partition_size, img_size) + + partitioned_grid = grid_partition(x, self.partition_size) + partitioned_grid = self.attn_grid(partitioned_grid) + x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size) + + return torch.cat([x_window, x_grid], dim=-1) + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +def window_partition_nchw(x, window_size: List[int]): + B, C, H, W = x.shape + _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') + _assert(W % window_size[1] == 0, '') + x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1]) + windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1]) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[1] + x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1]) + x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W) + return x + + +def grid_partition_nchw(x, grid_size: List[int]): + B, C, H, W = x.shape + _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') + _assert(W % grid_size[1] == 0, '') + x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1]) + windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1]) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]): + H, W = img_size + C = windows.shape[1] + x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1]) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W) + return x + + +class PartitionAttention2d(nn.Module): + """ Grid or Block partition + Attn + FFN + + '2D' NCHW tensor layout. + """ + + def __init__( + self, + dim: int, + partition_type: str = 'block', + cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path: float = 0., + ): + super().__init__() + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last + act_layer = get_act_layer(cfg.act_layer) + + self.partition_block = partition_type == 'block' + self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) + rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) + + self.norm1 = norm_layer(dim) + self.attn = Attention2d( + dim, + dim, + dim_head=cfg.dim_head, + bias=cfg.attn_bias, + rel_pos_cls=rel_pos_cls, + attn_drop=cfg.attn_drop, + proj_drop=cfg.proj_drop, + ) + self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = ConvMlp( + in_features=dim, + hidden_features=int(dim * cfg.expand_ratio), + act_layer=act_layer, + drop=cfg.proj_drop) + self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def _partition_attn(self, x): + img_size = x.shape[-2:] + if self.partition_block: + partitioned = window_partition_nchw(x, self.partition_size) + else: + partitioned = grid_partition_nchw(x, self.partition_size) + + partitioned = self.attn(partitioned) + + if self.partition_block: + x = window_reverse_nchw(partitioned, self.partition_size, img_size) + else: + x = grid_reverse_nchw(partitioned, self.partition_size, img_size) + return x + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class MaxxVitBlock(nn.Module): + """ MaxVit conv, window partition + FFN , grid partition + FFN + """ + + def __init__( + self, + dim: int, + dim_out: int, + stride: int = 1, + conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + use_nchw_attn: bool = False, # FIXME move to cfg? True is ~20-30% faster on TPU, 5-10% slower on GPU + drop_path: float = 0., + ): + super().__init__() + + conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock + self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) + + attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) + partition_layer = PartitionAttention2d if use_nchw_attn else PartitionAttentionCl + self.nchw_attn = use_nchw_attn + self.attn_block = partition_layer(**attn_kwargs) + self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs) + + def init_weights(self, scheme=''): + named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) + named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) + named_apply(partial(_init_conv, scheme=scheme), self.conv) + + def forward(self, x): + # NCHW format + x = self.conv(x) + + if not self.nchw_attn: + x = x.permute(0, 2, 3, 1) # to NHWC (channels-last) + x = self.attn_block(x) + x = self.attn_grid(x) + if not self.nchw_attn: + x = x.permute(0, 3, 1, 2) # back to NCHW + return x + + +class ParallelMaxxVitBlock(nn.Module): + """ MaxVit block with parallel cat(window + grid), one FF + Experimental timm block. + """ + + def __init__( + self, + dim, + dim_out, + stride=1, + num_conv=2, + conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + drop_path=0., + ): + super().__init__() + + conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock + if num_conv > 1: + convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] + convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) + self.conv = nn.Sequential(*convs) + else: + self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) + self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) + + def init_weights(self, scheme=''): + named_apply(partial(_init_transformer, scheme=scheme), self.attn) + named_apply(partial(_init_conv, scheme=scheme), self.conv) + + def forward(self, x): + x = self.conv(x) + x = x.permute(0, 2, 3, 1) + x = self.attn(x) + x = x.permute(0, 3, 1, 2) + return x + + +class MaxxVitStage(nn.Module): + def __init__( + self, + in_chs: int, + out_chs: int, + stride: int = 2, + depth: int = 4, + feat_size: Tuple[int, int] = (14, 14), + block_types: Union[str, Tuple[str]] = 'C', + transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), + conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), + drop_path: Union[float, List[float]] = 0., + ): + super().__init__() + self.grad_checkpointing = False + + block_types = extend_tuple(block_types, depth) + blocks = [] + for i, t in enumerate(block_types): + block_stride = stride if i == 0 else 1 + assert t in ('C', 'T', 'M', 'PM') + if t == 'C': + conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock + blocks += [conv_cls( + in_chs, + out_chs, + stride=block_stride, + cfg=conv_cfg, + drop_path=drop_path[i], + )] + elif t == 'T': + rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) + blocks += [TransformerBlock2d( + in_chs, + out_chs, + stride=block_stride, + rel_pos_cls=rel_pos_cls, + cfg=transformer_cfg, + drop_path=drop_path[i], + )] + elif t == 'M': + blocks += [MaxxVitBlock( + in_chs, + out_chs, + stride=block_stride, + conv_cfg=conv_cfg, + transformer_cfg=transformer_cfg, + drop_path=drop_path[i], + )] + elif t == 'PM': + blocks += [ParallelMaxxVitBlock( + in_chs, + out_chs, + stride=block_stride, + conv_cfg=conv_cfg, + transformer_cfg=transformer_cfg, + drop_path=drop_path[i], + )] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class Stem(nn.Module): + + def __init__( + self, + in_chs: int, + out_chs: int, + kernel_size: int = 3, + act_layer: str = 'gelu', + norm_layer: str = 'batchnorm2d', + norm_eps: float = 1e-5, + ): + super().__init__() + if not isinstance(out_chs, (list, tuple)): + out_chs = to_2tuple(out_chs) + + norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) + self.out_chs = out_chs[-1] + self.stride = 2 + + self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2) + self.norm1 = norm_act_layer(out_chs[0]) + self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1) + + def init_weights(self, scheme=''): + named_apply(partial(_init_conv, scheme=scheme), self) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + return x + + +def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]): + if cfg.window_size is not None: + assert cfg.grid_size + return cfg + partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio + cfg = replace(cfg, window_size=partition_size, grid_size=partition_size) + return cfg + + +class MaxxVit(nn.Module): + """ CoaTNet + MaxVit base model. + + Highly configurable for different block compositions, tensor layouts, pooling types. + """ + + def __init__( + self, + cfg: MaxxVitCfg, + img_size: Union[int, Tuple[int, int]] = 224, + in_chans: int = 3, + num_classes: int = 1000, + global_pool: str = 'avg', + drop_rate: float = 0., + drop_path_rate: float = 0. + ): + super().__init__() + img_size = to_2tuple(img_size) + transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size) + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = cfg.embed_dim[-1] + self.embed_dim = cfg.embed_dim + self.drop_rate = drop_rate + self.grad_checkpointing = False + + self.stem = Stem( + in_chs=in_chans, + out_chs=cfg.stem_width, + act_layer=cfg.conv_cfg.act_layer, + norm_layer=cfg.conv_cfg.norm_layer, + norm_eps=cfg.conv_cfg.norm_eps, + ) + + stride = self.stem.stride + feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))]) + + num_stages = len(cfg.embed_dim) + assert len(cfg.depths) == num_stages + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + in_chs = self.stem.out_chs + stages = [] + for i in range(num_stages): + stage_stride = 2 + out_chs = cfg.embed_dim[i] + feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size]) + stages += [MaxxVitStage( + in_chs, + out_chs, + depth=cfg.depths[i], + block_types=cfg.block_type[i], + conv_cfg=cfg.conv_cfg, + transformer_cfg=transformer_cfg, + feat_size=feat_size, + drop_path=dpr[i], + )] + stride *= stage_stride + in_chs = out_chs + self.stages = nn.Sequential(*stages) + + final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer) + self.norm = final_norm_layer(self.num_features, eps=cfg.transformer_cfg.norm_eps) + + # Classifier head + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # Weight init (default PyTorch init works well for AdamW if scheme not set) + assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff') + if cfg.weight_init: + named_apply(partial(self._init_weights, scheme=cfg.weight_init), self) + + def _init_weights(self, module, name, scheme=''): + if hasattr(module, 'init_weights'): + try: + module.init_weights(scheme=scheme) + except TypeError: + module.init_weights() + + @torch.jit.ignore + def no_weight_decay(self): + return { + k for k, _ in self.named_parameters() + if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is None: + global_pool = self.head.global_pool.pool_type + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + MaxxVit, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def coatnet_pico_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_nano_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_0_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_1_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_2_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_3_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_bn_0_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_nano_cc_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnext_nano_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_0_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_1_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_2_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_3_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_4_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs) + + +@register_model +def coatnet_5_224(pretrained=False, **kwargs): + return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_pico_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_nano_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_pm_256(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs): + return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_tiny_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_tiny_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_small_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_small_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_base_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_base_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_large_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_large_224', pretrained=pretrained, **kwargs) + + +@register_model +def maxvit_xlarge_224(pretrained=False, **kwargs): + return _create_maxxvit('maxvit_xlarge_224', pretrained=pretrained, **kwargs) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mlp_mixer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mlp_mixer.py new file mode 100644 index 0000000000000000000000000000000000000000..b044244baa63476f32e63b63e7604748bbbf0360 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mlp_mixer.py @@ -0,0 +1,681 @@ +""" MLP-Mixer, ResMLP, and gMLP in PyTorch + +This impl originally based on MLP-Mixer paper. + +Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py + +Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + +@article{tolstikhin2021, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, + Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, + journal={arXiv preprint arXiv:2105.01601}, + year={2021} +} + +Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP + +Code: https://github.com/facebookresearch/deit +Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 +@misc{touvron2021resmlp, + title={ResMLP: Feedforward networks for image classification with data-efficient training}, + author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and + Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, + year={2021}, + eprint={2105.03404}, +} + +Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 +@misc{liu2021pay, + title={Pay Attention to MLPs}, + author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, + year={2021}, + eprint={2105.08050}, +} + +A thank you to paper authors for releasing code and weights. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply, checkpoint_seq +from .layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + mixer_s32_224=_cfg(), + mixer_s16_224=_cfg(), + mixer_b32_224=_cfg(), + mixer_b16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', + ), + mixer_b16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', + num_classes=21843 + ), + mixer_l32_224=_cfg(), + mixer_l16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', + ), + mixer_l16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', + num_classes=21843 + ), + + # Mixer ImageNet-21K-P pretraining + mixer_b16_224_miil_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + mixer_b16_224_miil=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', + ), + + gmixer_12_224=_cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + gmixer_24_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_big_24_224_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + gmlp_ti16_224=_cfg(), + gmlp_s16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', + ), + gmlp_b16_224=_cfg(), +) + + +class MixerBlock(nn.Module): + """ Residual Block w/ token mixing and channel MLPs + Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + def __init__( + self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] + self.norm1 = norm_layer(dim) + self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Affine(nn.Module): + def __init__(self, dim): + super().__init__() + self.alpha = nn.Parameter(torch.ones((1, 1, dim))) + self.beta = nn.Parameter(torch.zeros((1, 1, dim))) + + def forward(self, x): + return torch.addcmul(self.beta, self.alpha, x) + + +class ResBlock(nn.Module): + """ Residual MLP block w/ LayerScale and Affine 'norm' + + Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, + act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm1 = norm_layer(dim) + self.linear_tokens = nn.Linear(seq_len, seq_len) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) + self.ls1 = nn.Parameter(init_values * torch.ones(dim)) + self.ls2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) + return x + + +class SpatialGatingUnit(nn.Module): + """ Spatial Gating Unit + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): + super().__init__() + gate_dim = dim // 2 + self.norm = norm_layer(gate_dim) + self.proj = nn.Linear(seq_len, seq_len) + + def init_weights(self): + # special init for the projection gate, called as override by base model init + nn.init.normal_(self.proj.weight, std=1e-6) + nn.init.ones_(self.proj.bias) + + def forward(self, x): + u, v = x.chunk(2, dim=-1) + v = self.norm(v) + v = self.proj(v.transpose(-1, -2)) + return u * v.transpose(-1, -2) + + +class SpatialGatingBlock(nn.Module): + """ Residual Block w/ Spatial Gating + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm = norm_layer(dim) + sgu = partial(SpatialGatingUnit, seq_len=seq_len) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.mlp_channels(self.norm(x))) + return x + + +class MlpMixer(nn.Module): + + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + patch_size=16, + num_blocks=8, + embed_dim=512, + mlp_ratio=(0.5, 4.0), + block_layer=MixerBlock, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + global_pool='avg', + ): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.grad_checkpointing = False + + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None) + # FIXME drop_path (stochastic depth scaling rule or all the same?) + self.blocks = nn.Sequential(*[ + block_layer( + embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate) + for _ in range(num_blocks)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + @torch.jit.ignore + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.global_pool == 'avg': + x = x.mean(dim=1) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + """ Mixer weight initialization (trying to match Flax defaults) + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # like MLP init in vit (my original init) + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + # NOTE if a parent module contains init_weights method, it can override the init of the + # child modules as this will be called in depth-first order. + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ Remap checkpoints if needed """ + if 'patch_embed.proj.weight' in state_dict: + # Remap FB ResMlp models -> timm + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('patch_embed.', 'stem.') + k = k.replace('attn.', 'linear_tokens.') + k = k.replace('mlp.', 'mlp_channels.') + k = k.replace('gamma_', 'ls') + if k.endswith('.alpha') or k.endswith('.beta'): + v = v.reshape(1, 1, -1) + out_dict[k] = v + return out_dict + return state_dict + + +def _create_mixer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for MLP-Mixer models.') + + model = build_model_with_cfg( + MlpMixer, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def mixer_s32_224(pretrained=False, **kwargs): + """ Mixer-S/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_s16_224(pretrained=False, **kwargs): + """ Mixer-S/16 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b32_224(pretrained=False, **kwargs): + """ Mixer-B/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l32_224(pretrained=False, **kwargs): + """ Mixer-L/32 224x224. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224_in21k(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_12_224(pretrained=False, **kwargs): + """ Glu-Mixer-12 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_24_224(pretrained=False, **kwargs): + """ Glu-Mixer-24 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_distilled_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_distilled_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224_dino(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224_dino(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_ti16_224(pretrained=False, **kwargs): + """ gMLP-Tiny + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_s16_224(pretrained=False, **kwargs): + """ gMLP-Small + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_b16_224(pretrained=False, **kwargs): + """ gMLP-Base + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mobilenetv3.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mobilenetv3.py new file mode 100644 index 0000000000000000000000000000000000000000..19dd8b5b4bf10ea2dc307fda75ed8d49bc312f82 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mobilenetv3.py @@ -0,0 +1,739 @@ +""" MobileNet V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2019, Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, pretrained_cfg_for_features, checkpoint_seq +from .layers import SelectAdaptivePool2d, Linear, create_conv2d, get_act_fn, get_norm_act_layer +from .registry import register_model + +__all__ = ['MobileNetV3', 'MobileNetV3Features'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_large_100_miil': _cfg( + interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth'), + 'mobilenetv3_large_100_miil_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth', + interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221), + + 'mobilenetv3_small_050': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth', + interpolation='bicubic'), + 'mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth', + interpolation='bicubic'), + 'mobilenetv3_small_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth', + interpolation='bicubic'), + + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'fbnetv3_b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth', + test_input_size=(3, 256, 256), crop_pct=0.95), + 'fbnetv3_d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth', + test_input_size=(3, 256, 256), crop_pct=0.95), + 'fbnetv3_g': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth', + input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)), + + "lcnet_035": _cfg(), + "lcnet_050": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth', + interpolation='bicubic', + ), + "lcnet_075": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth', + interpolation='bicubic', + ), + "lcnet_100": _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth', + interpolation='bicubic', + ), + "lcnet_150": _cfg(), +} + + +class MobileNetV3(nn.Module): + """ MobiletNet-V3 + + Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific + 'efficient head', where global pooling is done before the head convolution without a final batch-norm + layer before the classifier. + + Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 + + Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include: + * HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class) + * FBNet-V3 - https://arxiv.org/abs/2006.02049 + * LCNet - https://arxiv.org/abs/2109.15099 + """ + + def __init__( + self, block_args, num_classes=1000, in_chans=3, stem_size=16, fix_stem=False, num_features=1280, + head_bias=True, pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, + round_chs_fn=round_channels, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(MobileNetV3, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_act_layer(stem_size, inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + num_pooled_chs = head_chs * self.global_pool.feat_mult() + self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1] + layers.extend(self.blocks) + layers.extend([self.global_pool, self.conv_head, self.act2]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^conv_stem|bn1', + blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)' + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x, flatten=True) + else: + x = self.blocks(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + if pre_logits: + return x.flatten(1) + else: + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +class MobileNetV3Features(nn.Module): + """ MobileNetV3 Feature Extractor + + A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation + and object detection models. + """ + + def __init__( + self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=16, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + se_from_exp=True, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(MobileNetV3Features, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, + drop_path_rate=drop_path_rate, feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_mnv3(variant, pretrained=False, **kwargs): + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = pretrained_cfg_for_features(model.default_cfg) + return model + + +def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=16, + fix_stem=channel_multiplier < 0.75, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetv3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNetV3 + Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` + - https://arxiv.org/abs/2006.02049 + FIXME untested, this is a preliminary impl of some FBNet-V3 variants. + """ + vl = variant.split('_')[-1] + if vl in ('a', 'b'): + stem_size = 16 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], + ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], + ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], + ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], + ['cn_r1_k1_s1_c1344'], + ] + elif vl == 'd': + stem_size = 24 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], + ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], + ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], + ['cn_r1_k1_s1_c1440'], + ] + elif vl == 'g': + stem_size = 32 + arch_def = [ + ['ds_r3_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], + ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], + ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], + ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], + ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], + ['cn_r1_k1_s1_c1728'], + ] + else: + raise NotImplemented + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) + act_layer = resolve_act_layer(kwargs, 'hard_swish') + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1984, + head_bias=False, + stem_size=stem_size, + round_chs_fn=round_chs_fn, + se_from_exp=False, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_lcnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ LCNet + Essentially a MobileNet-V3 crossed with a MobileNet-V1 + + Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['dsa_r1_k3_s1_c32'], + # stage 1, 112x112 in + ['dsa_r2_k3_s2_c64'], + # stage 2, 56x56 in + ['dsa_r2_k3_s2_c128'], + # stage 3, 28x28 in + ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], + # stage 4, 14x14in + ['dsa_r4_k5_s1_c256'], + # stage 5, 14x14in + ['dsa_r2_k5_s2_c512_se0.25'], + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), + num_features=1280, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_lcnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ LCNet + Essentially a MobileNet-V3 crossed with a MobileNet-V1 + + Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['dsa_r1_k3_s1_c32'], + # stage 1, 112x112 in + ['dsa_r2_k3_s2_c64'], + # stage 2, 56x56 in + ['dsa_r2_k3_s2_c128'], + # stage 3, 28x28 in + ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], + # stage 4, 14x14in + ['dsa_r4_k5_s1_c256'], + # stage 5, 14x14in + ['dsa_r2_k5_s2_c512_se0.25'], + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), + num_features=1280, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil(pretrained=False, **kwargs): + """ MobileNet V3 + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs): + """ MobileNet V3, 21k pretraining + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_050(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_rw(pretrained=False, **kwargs): + """ MobileNet V3 """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_b(pretrained=False, **kwargs): + """ FBNetV3-B """ + model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_d(pretrained=False, **kwargs): + """ FBNetV3-D """ + model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_g(pretrained=False, **kwargs): + """ FBNetV3-G """ + model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_035(pretrained=False, **kwargs): + """ PP-LCNet 0.35""" + model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_050(pretrained=False, **kwargs): + """ PP-LCNet 0.5""" + model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_075(pretrained=False, **kwargs): + """ PP-LCNet 1.0""" + model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_100(pretrained=False, **kwargs): + """ PP-LCNet 1.0""" + model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def lcnet_150(pretrained=False, **kwargs): + """ PP-LCNet 1.5""" + model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mobilevit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mobilevit.py new file mode 100644 index 0000000000000000000000000000000000000000..bd5479a7cf9a379cc40e918a57980db6812be045 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mobilevit.py @@ -0,0 +1,699 @@ +""" MobileViT + +Paper: +V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178 +V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680 + +MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below) +License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source) + +Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman +""" +# +# For licensing see accompanying LICENSE file. +# Copyright (C) 2020 Apple Inc. All Rights Reserved. +# +import math +from typing import Union, Callable, Dict, Tuple, Optional, Sequence + +import torch +from torch import nn +import torch.nn.functional as F + +from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups +from .fx_features import register_notrace_module +from .layers import to_2tuple, make_divisible, LayerNorm2d, GroupNorm1, ConvMlp, DropPath +from .vision_transformer import Block as TransformerBlock +from .helpers import build_model_with_cfg +from .registry import register_model + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': (0., 0., 0.), 'std': (1., 1., 1.), + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, + **kwargs + } + + +default_cfgs = { + 'mobilevit_xxs': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevit_xxs-ad385b40.pth'), + 'mobilevit_xs': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevit_xs-8fbd6366.pth'), + 'mobilevit_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevit_s-38a5a959.pth'), + 'semobilevit_s': _cfg(), + + 'mobilevitv2_050': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_050-49951ee2.pth', + crop_pct=0.888), + 'mobilevitv2_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_075-b5556ef6.pth', + crop_pct=0.888), + 'mobilevitv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_100-e464ef3b.pth', + crop_pct=0.888), + 'mobilevitv2_125': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_125-0ae35027.pth', + crop_pct=0.888), + 'mobilevitv2_150': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_150-737c5019.pth', + crop_pct=0.888), + 'mobilevitv2_175': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_175-16462ee2.pth', + crop_pct=0.888), + 'mobilevitv2_200': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_200-b3422f67.pth', + crop_pct=0.888), + + 'mobilevitv2_150_in22ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_150_in22ft1k-0b555d7b.pth', + crop_pct=0.888), + 'mobilevitv2_175_in22ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_175_in22ft1k-4117fa1f.pth', + crop_pct=0.888), + 'mobilevitv2_200_in22ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_200_in22ft1k-1d7c8927.pth', + crop_pct=0.888), + + 'mobilevitv2_150_384_in22ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_150_384_in22ft1k-9e142854.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'mobilevitv2_175_384_in22ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_175_384_in22ft1k-059cbe56.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), + 'mobilevitv2_200_384_in22ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-mvit-weights/mobilevitv2_200_384_in22ft1k-32c87503.pth', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), +} + + +def _inverted_residual_block(d, c, s, br=4.0): + # inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise) + return ByoBlockCfg( + type='bottle', d=d, c=c, s=s, gs=1, br=br, + block_kwargs=dict(bottle_in=True, linear_out=True)) + + +def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0): + # inverted residual + mobilevit blocks as per MobileViT network + return ( + _inverted_residual_block(d=d, c=c, s=s, br=br), + ByoBlockCfg( + type='mobilevit', d=1, c=c, s=1, + block_kwargs=dict( + transformer_dim=transformer_dim, + transformer_depth=transformer_depth, + patch_size=patch_size) + ) + ) + + +def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5): + # inverted residual + mobilevit blocks as per MobileViT network + return ( + _inverted_residual_block(d=d, c=c, s=s, br=br), + ByoBlockCfg( + type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1, + block_kwargs=dict( + transformer_depth=transformer_depth, + patch_size=patch_size) + ) + ) + + +def _mobilevitv2_cfg(multiplier=1.0): + chs = (64, 128, 256, 384, 512) + if multiplier != 1.0: + chs = tuple([int(c * multiplier) for c in chs]) + cfg = ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=chs[0], s=1, br=2.0), + _inverted_residual_block(d=2, c=chs[1], s=2, br=2.0), + _mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2), + _mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4), + _mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3), + ), + stem_chs=int(32 * multiplier), + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + ) + return cfg + + +model_cfgs = dict( + mobilevit_xxs=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=16, s=1, br=2.0), + _inverted_residual_block(d=3, c=24, s=2, br=2.0), + _mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0), + _mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0), + _mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + num_features=320, + ), + + mobilevit_xs=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=32, s=1), + _inverted_residual_block(d=3, c=48, s=2), + _mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2), + _mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2), + _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + num_features=384, + ), + + mobilevit_s=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=32, s=1), + _inverted_residual_block(d=3, c=64, s=2), + _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), + _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), + _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + act_layer='silu', + num_features=640, + ), + + semobilevit_s=ByoModelCfg( + blocks=( + _inverted_residual_block(d=1, c=32, s=1), + _inverted_residual_block(d=3, c=64, s=2), + _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), + _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), + _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), + ), + stem_chs=16, + stem_type='3x3', + stem_pool='', + downsample='', + attn_layer='se', + attn_kwargs=dict(rd_ratio=1/8), + num_features=640, + ), + + mobilevitv2_050=_mobilevitv2_cfg(.50), + mobilevitv2_075=_mobilevitv2_cfg(.75), + mobilevitv2_125=_mobilevitv2_cfg(1.25), + mobilevitv2_100=_mobilevitv2_cfg(1.0), + mobilevitv2_150=_mobilevitv2_cfg(1.5), + mobilevitv2_175=_mobilevitv2_cfg(1.75), + mobilevitv2_200=_mobilevitv2_cfg(2.0), +) + + +@register_notrace_module +class MobileVitBlock(nn.Module): + """ MobileViT block + Paper: https://arxiv.org/abs/2110.02178?context=cs.LG + """ + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 3, + stride: int = 1, + bottle_ratio: float = 1.0, + group_size: Optional[int] = None, + dilation: Tuple[int, int] = (1, 1), + mlp_ratio: float = 2.0, + transformer_dim: Optional[int] = None, + transformer_depth: int = 2, + patch_size: int = 8, + num_heads: int = 4, + attn_drop: float = 0., + drop: int = 0., + no_fusion: bool = False, + drop_path_rate: float = 0., + layers: LayerFn = None, + transformer_norm_layer: Callable = nn.LayerNorm, + **kwargs, # eat unused args + ): + super(MobileVitBlock, self).__init__() + + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + out_chs = out_chs or in_chs + transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) + + self.conv_kxk = layers.conv_norm_act( + in_chs, in_chs, kernel_size=kernel_size, + stride=stride, groups=groups, dilation=dilation[0]) + self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) + + self.transformer = nn.Sequential(*[ + TransformerBlock( + transformer_dim, mlp_ratio=mlp_ratio, num_heads=num_heads, qkv_bias=True, + attn_drop=attn_drop, drop=drop, drop_path=drop_path_rate, + act_layer=layers.act, norm_layer=transformer_norm_layer) + for _ in range(transformer_depth) + ]) + self.norm = transformer_norm_layer(transformer_dim) + + self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1) + + if no_fusion: + self.conv_fusion = None + else: + self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1) + + self.patch_size = to_2tuple(patch_size) + self.patch_area = self.patch_size[0] * self.patch_size[1] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + + # Local representation + x = self.conv_kxk(x) + x = self.conv_1x1(x) + + # Unfold (feature map -> patches) + patch_h, patch_w = self.patch_size + B, C, H, W = x.shape + new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w + num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w + num_patches = num_patch_h * num_patch_w # N + interpolate = False + if new_h != H or new_w != W: + # Note: Padding can be done, but then it needs to be handled in attention function. + x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False) + interpolate = True + + # [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w] + x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2) + # [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w + x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1) + + # Global representations + x = self.transformer(x) + x = self.norm(x) + + # Fold (patch -> feature map) + # [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w] + x = x.contiguous().view(B, self.patch_area, num_patches, -1) + x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w) + # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] + x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) + if interpolate: + x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False) + + x = self.conv_proj(x) + if self.conv_fusion is not None: + x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) + return x + + +class LinearSelfAttention(nn.Module): + """ + This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680` + This layer can be used for self- as well as cross-attention. + Args: + embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)` + attn_drop (float): Dropout value for context scores. Default: 0.0 + bias (bool): Use bias in learnable layers. Default: True + Shape: + - Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels, + :math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches + - Output: same as the input + .. note:: + For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels + in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor, + we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be + expensive on resource-constrained devices) that may be required to convert the unfolded tensor from + channel-first to channel-last format in case of a linear layer. + """ + + def __init__( + self, + embed_dim: int, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + bias: bool = True, + ) -> None: + super().__init__() + self.embed_dim = embed_dim + + self.qkv_proj = nn.Conv2d( + in_channels=embed_dim, + out_channels=1 + (2 * embed_dim), + bias=bias, + kernel_size=1, + ) + self.attn_drop = nn.Dropout(attn_drop) + self.out_proj = nn.Conv2d( + in_channels=embed_dim, + out_channels=embed_dim, + bias=bias, + kernel_size=1, + ) + self.out_drop = nn.Dropout(proj_drop) + + def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor: + # [B, C, P, N] --> [B, h + 2d, P, N] + qkv = self.qkv_proj(x) + + # Project x into query, key and value + # Query --> [B, 1, P, N] + # value, key --> [B, d, P, N] + query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1) + + # apply softmax along N dimension + context_scores = F.softmax(query, dim=-1) + context_scores = self.attn_drop(context_scores) + + # Compute context vector + # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1] + context_vector = (key * context_scores).sum(dim=-1, keepdim=True) + + # combine context vector with values + # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] + out = F.relu(value) * context_vector.expand_as(value) + out = self.out_proj(out) + out = self.out_drop(out) + return out + + @torch.jit.ignore() + def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: + # x --> [B, C, P, N] + # x_prev = [B, C, P, M] + batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape + q_patch_area, q_num_patches = x.shape[-2:] + + assert ( + kv_patch_area == q_patch_area + ), "The number of pixels in a patch for query and key_value should be the same" + + # compute query, key, and value + # [B, C, P, M] --> [B, 1 + d, P, M] + qk = F.conv2d( + x_prev, + weight=self.qkv_proj.weight[:self.embed_dim + 1], + bias=self.qkv_proj.bias[:self.embed_dim + 1], + ) + + # [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M] + query, key = qk.split([1, self.embed_dim], dim=1) + # [B, C, P, N] --> [B, d, P, N] + value = F.conv2d( + x, + weight=self.qkv_proj.weight[self.embed_dim + 1], + bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None, + ) + + # apply softmax along M dimension + context_scores = F.softmax(query, dim=-1) + context_scores = self.attn_drop(context_scores) + + # compute context vector + # [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1] + context_vector = (key * context_scores).sum(dim=-1, keepdim=True) + + # combine context vector with values + # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] + out = F.relu(value) * context_vector.expand_as(value) + out = self.out_proj(out) + out = self.out_drop(out) + return out + + def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: + if x_prev is None: + return self._forward_self_attn(x) + else: + return self._forward_cross_attn(x, x_prev=x_prev) + + +class LinearTransformerBlock(nn.Module): + """ + This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_ + Args: + embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)` + mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim + drop (float): Dropout rate. Default: 0.0 + attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0 + drop_path (float): Stochastic depth rate Default: 0.0 + norm_layer (Callable): Normalization layer. Default: layer_norm_2d + Shape: + - Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim, + :math:`P` is number of pixels in a patch, and :math:`N` is number of patches, + - Output: same shape as the input + """ + + def __init__( + self, + embed_dim: int, + mlp_ratio: float = 2.0, + drop: float = 0.0, + attn_drop: float = 0.0, + drop_path: float = 0.0, + act_layer=None, + norm_layer=None, + ) -> None: + super().__init__() + act_layer = act_layer or nn.SiLU + norm_layer = norm_layer or GroupNorm1 + + self.norm1 = norm_layer(embed_dim) + self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop) + self.drop_path1 = DropPath(drop_path) + + self.norm2 = norm_layer(embed_dim) + self.mlp = ConvMlp( + in_features=embed_dim, + hidden_features=int(embed_dim * mlp_ratio), + act_layer=act_layer, + drop=drop) + self.drop_path2 = DropPath(drop_path) + + def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: + if x_prev is None: + # self-attention + x = x + self.drop_path1(self.attn(self.norm1(x))) + else: + # cross-attention + res = x + x = self.norm1(x) # norm + x = self.attn(x, x_prev) # attn + x = self.drop_path1(x) + res # residual + + # Feed forward network + x = x + self.drop_path2(self.mlp(self.norm2(x))) + return x + + +@register_notrace_module +class MobileVitV2Block(nn.Module): + """ + This class defines the `MobileViTv2 block <>`_ + """ + + def __init__( + self, + in_chs: int, + out_chs: Optional[int] = None, + kernel_size: int = 3, + bottle_ratio: float = 1.0, + group_size: Optional[int] = 1, + dilation: Tuple[int, int] = (1, 1), + mlp_ratio: float = 2.0, + transformer_dim: Optional[int] = None, + transformer_depth: int = 2, + patch_size: int = 8, + attn_drop: float = 0., + drop: int = 0., + drop_path_rate: float = 0., + layers: LayerFn = None, + transformer_norm_layer: Callable = GroupNorm1, + **kwargs, # eat unused args + ): + super(MobileVitV2Block, self).__init__() + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + out_chs = out_chs or in_chs + transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) + + self.conv_kxk = layers.conv_norm_act( + in_chs, in_chs, kernel_size=kernel_size, + stride=1, groups=groups, dilation=dilation[0]) + self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) + + self.transformer = nn.Sequential(*[ + LinearTransformerBlock( + transformer_dim, + mlp_ratio=mlp_ratio, + attn_drop=attn_drop, + drop=drop, + drop_path=drop_path_rate, + act_layer=layers.act, + norm_layer=transformer_norm_layer + ) + for _ in range(transformer_depth) + ]) + self.norm = transformer_norm_layer(transformer_dim) + + self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False) + + self.patch_size = to_2tuple(patch_size) + self.patch_area = self.patch_size[0] * self.patch_size[1] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, C, H, W = x.shape + patch_h, patch_w = self.patch_size + new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w + num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w + num_patches = num_patch_h * num_patch_w # N + if new_h != H or new_w != W: + x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True) + + # Local representation + x = self.conv_kxk(x) + x = self.conv_1x1(x) + + # Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N] + C = x.shape[1] + x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4) + x = x.reshape(B, C, -1, num_patches) + + # Global representations + x = self.transformer(x) + x = self.norm(x) + + # Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W] + x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3) + x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) + + x = self.conv_proj(x) + return x + + +register_block('mobilevit', MobileVitBlock) +register_block('mobilevit2', MobileVitV2Block) + + +def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def mobilevit_xxs(pretrained=False, **kwargs): + return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevit_xs(pretrained=False, **kwargs): + return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevit_s(pretrained=False, **kwargs): + return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs) + + +@register_model +def semobilevit_s(pretrained=False, **kwargs): + return _create_mobilevit('semobilevit_s', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_050(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_075(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_100(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_125(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_150(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_175(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_200(pretrained=False, **kwargs): + return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_150_in22ft1k(pretrained=False, **kwargs): + return _create_mobilevit( + 'mobilevitv2_150_in22ft1k', cfg_variant='mobilevitv2_150', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_175_in22ft1k(pretrained=False, **kwargs): + return _create_mobilevit( + 'mobilevitv2_175_in22ft1k', cfg_variant='mobilevitv2_175', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_200_in22ft1k(pretrained=False, **kwargs): + return _create_mobilevit( + 'mobilevitv2_200_in22ft1k', cfg_variant='mobilevitv2_200', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_150_384_in22ft1k(pretrained=False, **kwargs): + return _create_mobilevit( + 'mobilevitv2_150_384_in22ft1k', cfg_variant='mobilevitv2_150', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_175_384_in22ft1k(pretrained=False, **kwargs): + return _create_mobilevit( + 'mobilevitv2_175_384_in22ft1k', cfg_variant='mobilevitv2_175', pretrained=pretrained, **kwargs) + + +@register_model +def mobilevitv2_200_384_in22ft1k(pretrained=False, **kwargs): + return _create_mobilevit( + 'mobilevitv2_200_384_in22ft1k', cfg_variant='mobilevitv2_200', pretrained=pretrained, **kwargs) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mvitv2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mvitv2.py new file mode 100644 index 0000000000000000000000000000000000000000..b7ec58979f3b2f35393f4555abcb3342d055710b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/mvitv2.py @@ -0,0 +1,1010 @@ +""" Multi-Scale Vision Transformer v2 + +@inproceedings{li2021improved, + title={MViTv2: Improved multiscale vision transformers for classification and detection}, + author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, + booktitle={CVPR}, + year={2022} +} + +Code adapted from original Apache 2.0 licensed impl at https://github.com/facebookresearch/mvit +Original copyright below. + +Modifications and timm support by / Copyright 2022, Ross Wightman +""" +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. All Rights Reserved. +import operator +from collections import OrderedDict +from dataclasses import dataclass +from functools import partial, reduce +from typing import Union, List, Tuple, Optional + +import torch +import torch.utils.checkpoint as checkpoint +from torch import nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg +from .layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', + 'fixed_input_size': True, + **kwargs + } + + +default_cfgs = dict( + mvitv2_tiny=_cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth'), + mvitv2_small=_cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth'), + mvitv2_base=_cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth'), + mvitv2_large=_cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth'), + + mvitv2_base_in21k=_cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth', + num_classes=19168), + mvitv2_large_in21k=_cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth', + num_classes=19168), + mvitv2_huge_in21k=_cfg( + url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth', + num_classes=19168), + + mvitv2_small_cls=_cfg(url=''), +) + + +@dataclass +class MultiScaleVitCfg: + depths: Tuple[int, ...] = (2, 3, 16, 3) + embed_dim: Union[int, Tuple[int, ...]] = 96 + num_heads: Union[int, Tuple[int, ...]] = 1 + mlp_ratio: float = 4. + pool_first: bool = False + expand_attn: bool = True + qkv_bias: bool = True + use_cls_token: bool = False + use_abs_pos: bool = False + residual_pooling: bool = True + mode: str = 'conv' + kernel_qkv: Tuple[int, int] = (3, 3) + stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2)) + stride_kv: Optional[Tuple[Tuple[int, int]]] = None + stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4) + patch_kernel: Tuple[int, int] = (7, 7) + patch_stride: Tuple[int, int] = (4, 4) + patch_padding: Tuple[int, int] = (3, 3) + pool_type: str = 'max' + rel_pos_type: str = 'spatial' + act_layer: Union[str, Tuple[str, str]] = 'gelu' + norm_layer: Union[str, Tuple[str, str]] = 'layernorm' + norm_eps: float = 1e-6 + + def __post_init__(self): + num_stages = len(self.depths) + if not isinstance(self.embed_dim, (tuple, list)): + self.embed_dim = tuple(self.embed_dim * 2 ** i for i in range(num_stages)) + assert len(self.embed_dim) == num_stages + + if not isinstance(self.num_heads, (tuple, list)): + self.num_heads = tuple(self.num_heads * 2 ** i for i in range(num_stages)) + assert len(self.num_heads) == num_stages + + if self.stride_kv_adaptive is not None and self.stride_kv is None: + _stride_kv = self.stride_kv_adaptive + pool_kv_stride = [] + for i in range(num_stages): + if min(self.stride_q[i]) > 1: + _stride_kv = [ + max(_stride_kv[d] // self.stride_q[i][d], 1) + for d in range(len(_stride_kv)) + ] + pool_kv_stride.append(tuple(_stride_kv)) + self.stride_kv = tuple(pool_kv_stride) + + +model_cfgs = dict( + mvitv2_tiny=MultiScaleVitCfg( + depths=(1, 2, 5, 2), + ), + mvitv2_small=MultiScaleVitCfg( + depths=(1, 2, 11, 2), + ), + mvitv2_base=MultiScaleVitCfg( + depths=(2, 3, 16, 3), + ), + mvitv2_large=MultiScaleVitCfg( + depths=(2, 6, 36, 4), + embed_dim=144, + num_heads=2, + expand_attn=False, + ), + + mvitv2_base_in21k=MultiScaleVitCfg( + depths=(2, 3, 16, 3), + ), + mvitv2_large_in21k=MultiScaleVitCfg( + depths=(2, 6, 36, 4), + embed_dim=144, + num_heads=2, + expand_attn=False, + ), + + mvitv2_small_cls=MultiScaleVitCfg( + depths=(1, 2, 11, 2), + use_cls_token=True, + ), +) + + +def prod(iterable): + return reduce(operator.mul, iterable, 1) + + +class PatchEmbed(nn.Module): + """ + PatchEmbed. + """ + + def __init__( + self, + dim_in=3, + dim_out=768, + kernel=(7, 7), + stride=(4, 4), + padding=(3, 3), + ): + super().__init__() + + self.proj = nn.Conv2d( + dim_in, + dim_out, + kernel_size=kernel, + stride=stride, + padding=padding, + ) + + def forward(self, x) -> Tuple[torch.Tensor, List[int]]: + x = self.proj(x) + # B C H W -> B HW C + return x.flatten(2).transpose(1, 2), x.shape[-2:] + + +@register_notrace_function +def reshape_pre_pool( + x, + feat_size: List[int], + has_cls_token: bool = True +) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + H, W = feat_size + if has_cls_token: + cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] + else: + cls_tok = None + x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous() + return x, cls_tok + + +@register_notrace_function +def reshape_post_pool( + x, + num_heads: int, + cls_tok: Optional[torch.Tensor] = None +) -> Tuple[torch.Tensor, List[int]]: + feat_size = [x.shape[2], x.shape[3]] + L_pooled = x.shape[2] * x.shape[3] + x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3) + if cls_tok is not None: + x = torch.cat((cls_tok, x), dim=2) + return x, feat_size + + +@register_notrace_function +def cal_rel_pos_type( + attn: torch.Tensor, + q: torch.Tensor, + has_cls_token: bool, + q_size: List[int], + k_size: List[int], + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, +): + """ + Spatial Relative Positional Embeddings. + """ + sp_idx = 1 if has_cls_token else 0 + q_h, q_w = q_size + k_h, k_w = k_size + + # Scale up rel pos if shapes for q and k are different. + q_h_ratio = max(k_h / q_h, 1.0) + k_h_ratio = max(q_h / k_h, 1.0) + dist_h = torch.arange(q_h)[:, None] * q_h_ratio - torch.arange(k_h)[None, :] * k_h_ratio + dist_h += (k_h - 1) * k_h_ratio + q_w_ratio = max(k_w / q_w, 1.0) + k_w_ratio = max(q_w / k_w, 1.0) + dist_w = torch.arange(q_w)[:, None] * q_w_ratio - torch.arange(k_w)[None, :] * k_w_ratio + dist_w += (k_w - 1) * k_w_ratio + + Rh = rel_pos_h[dist_h.long()] + Rw = rel_pos_w[dist_w.long()] + + B, n_head, q_N, dim = q.shape + + r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim) + rel_h = torch.einsum("byhwc,hkc->byhwk", r_q, Rh) + rel_w = torch.einsum("byhwc,wkc->byhwk", r_q, Rw) + + attn[:, :, sp_idx:, sp_idx:] = ( + attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + + rel_h[:, :, :, :, :, None] + + rel_w[:, :, :, :, None, :] + ).view(B, -1, q_h * q_w, k_h * k_w) + + return attn + + +class MultiScaleAttentionPoolFirst(nn.Module): + def __init__( + self, + dim, + dim_out, + feat_size, + num_heads=8, + qkv_bias=True, + mode="conv", + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + has_cls_token=True, + rel_pos_type='spatial', + residual_pooling=True, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.num_heads = num_heads + self.dim_out = dim_out + self.head_dim = dim_out // num_heads + self.scale = self.head_dim ** -0.5 + self.has_cls_token = has_cls_token + padding_q = tuple([int(q // 2) for q in kernel_q]) + padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) + + self.q = nn.Linear(dim, dim_out, bias=qkv_bias) + self.k = nn.Linear(dim, dim_out, bias=qkv_bias) + self.v = nn.Linear(dim, dim_out, bias=qkv_bias) + self.proj = nn.Linear(dim_out, dim_out) + + # Skip pooling with kernel and stride size of (1, 1, 1). + if prod(kernel_q) == 1 and prod(stride_q) == 1: + kernel_q = None + if prod(kernel_kv) == 1 and prod(stride_kv) == 1: + kernel_kv = None + self.mode = mode + self.unshared = mode == 'conv_unshared' + self.pool_q, self.pool_k, self.pool_v = None, None, None + self.norm_q, self.norm_k, self.norm_v = None, None, None + if mode in ("avg", "max"): + pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d + if kernel_q: + self.pool_q = pool_op(kernel_q, stride_q, padding_q) + if kernel_kv: + self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) + self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) + elif mode == "conv" or mode == "conv_unshared": + dim_conv = dim // num_heads if mode == "conv" else dim + if kernel_q: + self.pool_q = nn.Conv2d( + dim_conv, + dim_conv, + kernel_q, + stride=stride_q, + padding=padding_q, + groups=dim_conv, + bias=False, + ) + self.norm_q = norm_layer(dim_conv) + if kernel_kv: + self.pool_k = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_k = norm_layer(dim_conv) + self.pool_v = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_v = norm_layer(dim_conv) + else: + raise NotImplementedError(f"Unsupported model {mode}") + + # relative pos embedding + self.rel_pos_type = rel_pos_type + if self.rel_pos_type == 'spatial': + assert feat_size[0] == feat_size[1] + size = feat_size[0] + q_size = size // stride_q[1] if len(stride_q) > 0 else size + kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size + rel_sp_dim = 2 * max(q_size, kv_size) - 1 + + self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + trunc_normal_tf_(self.rel_pos_h, std=0.02) + trunc_normal_tf_(self.rel_pos_w, std=0.02) + + self.residual_pooling = residual_pooling + + def forward(self, x, feat_size: List[int]): + B, N, _ = x.shape + + fold_dim = 1 if self.unshared else self.num_heads + x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3) + q = k = v = x + + if self.pool_q is not None: + q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) + q = self.pool_q(q) + q, q_size = reshape_post_pool(q, self.num_heads, q_tok) + else: + q_size = feat_size + if self.norm_q is not None: + q = self.norm_q(q) + + if self.pool_k is not None: + k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) + k = self.pool_k(k) + k, k_size = reshape_post_pool(k, self.num_heads, k_tok) + else: + k_size = feat_size + if self.norm_k is not None: + k = self.norm_k(k) + + if self.pool_v is not None: + v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) + v = self.pool_v(v) + v, v_size = reshape_post_pool(v, self.num_heads, v_tok) + else: + v_size = feat_size + if self.norm_v is not None: + v = self.norm_v(v) + + q_N = q_size[0] * q_size[1] + int(self.has_cls_token) + q = q.permute(0, 2, 1, 3).reshape(B, q_N, -1) + q = self.q(q).reshape(B, q_N, self.num_heads, -1).permute(0, 2, 1, 3) + + k_N = k_size[0] * k_size[1] + int(self.has_cls_token) + k = k.permute(0, 2, 1, 3).reshape(B, k_N, -1) + k = self.k(k).reshape(B, k_N, self.num_heads, -1).permute(0, 2, 1, 3) + + v_N = v_size[0] * v_size[1] + int(self.has_cls_token) + v = v.permute(0, 2, 1, 3).reshape(B, v_N, -1) + v = self.v(v).reshape(B, v_N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q * self.scale) @ k.transpose(-2, -1) + if self.rel_pos_type == 'spatial': + attn = cal_rel_pos_type( + attn, + q, + self.has_cls_token, + q_size, + k_size, + self.rel_pos_h, + self.rel_pos_w, + ) + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + x = x + q + + x = x.transpose(1, 2).reshape(B, -1, self.dim_out) + x = self.proj(x) + + return x, q_size + + +class MultiScaleAttention(nn.Module): + def __init__( + self, + dim, + dim_out, + feat_size, + num_heads=8, + qkv_bias=True, + mode="conv", + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + has_cls_token=True, + rel_pos_type='spatial', + residual_pooling=True, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.num_heads = num_heads + self.dim_out = dim_out + self.head_dim = dim_out // num_heads + self.scale = self.head_dim ** -0.5 + self.has_cls_token = has_cls_token + padding_q = tuple([int(q // 2) for q in kernel_q]) + padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) + + self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) + self.proj = nn.Linear(dim_out, dim_out) + + # Skip pooling with kernel and stride size of (1, 1, 1). + if prod(kernel_q) == 1 and prod(stride_q) == 1: + kernel_q = None + if prod(kernel_kv) == 1 and prod(stride_kv) == 1: + kernel_kv = None + self.mode = mode + self.unshared = mode == 'conv_unshared' + self.norm_q, self.norm_k, self.norm_v = None, None, None + self.pool_q, self.pool_k, self.pool_v = None, None, None + if mode in ("avg", "max"): + pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d + if kernel_q: + self.pool_q = pool_op(kernel_q, stride_q, padding_q) + if kernel_kv: + self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) + self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) + elif mode == "conv" or mode == "conv_unshared": + dim_conv = dim_out // num_heads if mode == "conv" else dim_out + if kernel_q: + self.pool_q = nn.Conv2d( + dim_conv, + dim_conv, + kernel_q, + stride=stride_q, + padding=padding_q, + groups=dim_conv, + bias=False, + ) + self.norm_q = norm_layer(dim_conv) + if kernel_kv: + self.pool_k = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_k = norm_layer(dim_conv) + self.pool_v = nn.Conv2d( + dim_conv, + dim_conv, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=dim_conv, + bias=False, + ) + self.norm_v = norm_layer(dim_conv) + else: + raise NotImplementedError(f"Unsupported model {mode}") + + # relative pos embedding + self.rel_pos_type = rel_pos_type + if self.rel_pos_type == 'spatial': + assert feat_size[0] == feat_size[1] + size = feat_size[0] + q_size = size // stride_q[1] if len(stride_q) > 0 else size + kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size + rel_sp_dim = 2 * max(q_size, kv_size) - 1 + + self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) + trunc_normal_tf_(self.rel_pos_h, std=0.02) + trunc_normal_tf_(self.rel_pos_w, std=0.02) + + self.residual_pooling = residual_pooling + + def forward(self, x, feat_size: List[int]): + B, N, _ = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(dim=0) + + if self.pool_q is not None: + q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) + q = self.pool_q(q) + q, q_size = reshape_post_pool(q, self.num_heads, q_tok) + else: + q_size = feat_size + if self.norm_q is not None: + q = self.norm_q(q) + + if self.pool_k is not None: + k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) + k = self.pool_k(k) + k, k_size = reshape_post_pool(k, self.num_heads, k_tok) + else: + k_size = feat_size + if self.norm_k is not None: + k = self.norm_k(k) + + if self.pool_v is not None: + v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) + v = self.pool_v(v) + v, _ = reshape_post_pool(v, self.num_heads, v_tok) + if self.norm_v is not None: + v = self.norm_v(v) + + attn = (q * self.scale) @ k.transpose(-2, -1) + if self.rel_pos_type == 'spatial': + attn = cal_rel_pos_type( + attn, + q, + self.has_cls_token, + q_size, + k_size, + self.rel_pos_h, + self.rel_pos_w, + ) + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + x = x + q + + x = x.transpose(1, 2).reshape(B, -1, self.dim_out) + x = self.proj(x) + + return x, q_size + + +class MultiScaleBlock(nn.Module): + def __init__( + self, + dim, + dim_out, + num_heads, + feat_size, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_layer=nn.LayerNorm, + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + mode="conv", + has_cls_token=True, + expand_attn=False, + pool_first=False, + rel_pos_type='spatial', + residual_pooling=True, + ): + super().__init__() + proj_needed = dim != dim_out + self.dim = dim + self.dim_out = dim_out + self.has_cls_token = has_cls_token + + self.norm1 = norm_layer(dim) + + self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None + if stride_q and prod(stride_q) > 1: + kernel_skip = [s + 1 if s > 1 else s for s in stride_q] + stride_skip = stride_q + padding_skip = [int(skip // 2) for skip in kernel_skip] + self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip) + else: + self.shortcut_pool_attn = None + + att_dim = dim_out if expand_attn else dim + attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention + self.attn = attn_layer( + dim, + att_dim, + num_heads=num_heads, + feat_size=feat_size, + qkv_bias=qkv_bias, + kernel_q=kernel_q, + kernel_kv=kernel_kv, + stride_q=stride_q, + stride_kv=stride_kv, + norm_layer=norm_layer, + has_cls_token=has_cls_token, + mode=mode, + rel_pos_type=rel_pos_type, + residual_pooling=residual_pooling, + ) + self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(att_dim) + mlp_dim_out = dim_out + self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and not expand_attn else None + self.mlp = Mlp( + in_features=att_dim, + hidden_features=int(att_dim * mlp_ratio), + out_features=mlp_dim_out, + ) + self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def _shortcut_pool(self, x, feat_size: List[int]): + if self.shortcut_pool_attn is None: + return x + if self.has_cls_token: + cls_tok, x = x[:, :1, :], x[:, 1:, :] + else: + cls_tok = None + B, L, C = x.shape + H, W = feat_size + x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous() + x = self.shortcut_pool_attn(x) + x = x.reshape(B, C, -1).transpose(1, 2) + if cls_tok is not None: + x = torch.cat((cls_tok, x), dim=1) + return x + + def forward(self, x, feat_size: List[int]): + x_norm = self.norm1(x) + # NOTE as per the original impl, this seems odd, but shortcut uses un-normalized input if no proj + x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm) + x_shortcut = self._shortcut_pool(x_shortcut, feat_size) + x, feat_size_new = self.attn(x_norm, feat_size) + x = x_shortcut + self.drop_path1(x) + + x_norm = self.norm2(x) + x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm) + x = x_shortcut + self.drop_path2(self.mlp(x_norm)) + return x, feat_size_new + + +class MultiScaleVitStage(nn.Module): + + def __init__( + self, + dim, + dim_out, + depth, + num_heads, + feat_size, + mlp_ratio=4.0, + qkv_bias=True, + mode="conv", + kernel_q=(1, 1), + kernel_kv=(1, 1), + stride_q=(1, 1), + stride_kv=(1, 1), + has_cls_token=True, + expand_attn=False, + pool_first=False, + rel_pos_type='spatial', + residual_pooling=True, + norm_layer=nn.LayerNorm, + drop_path=0.0, + ): + super().__init__() + self.grad_checkpointing = False + + self.blocks = nn.ModuleList() + if expand_attn: + out_dims = (dim_out,) * depth + else: + out_dims = (dim,) * (depth - 1) + (dim_out,) + + for i in range(depth): + attention_block = MultiScaleBlock( + dim=dim, + dim_out=out_dims[i], + num_heads=num_heads, + feat_size=feat_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + kernel_q=kernel_q, + kernel_kv=kernel_kv, + stride_q=stride_q if i == 0 else (1, 1), + stride_kv=stride_kv, + mode=mode, + has_cls_token=has_cls_token, + pool_first=pool_first, + rel_pos_type=rel_pos_type, + residual_pooling=residual_pooling, + expand_attn=expand_attn, + norm_layer=norm_layer, + drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path, + ) + dim = out_dims[i] + self.blocks.append(attention_block) + if i == 0: + feat_size = tuple([size // stride for size, stride in zip(feat_size, stride_q)]) + + self.feat_size = feat_size + + def forward(self, x, feat_size: List[int]): + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x, feat_size = checkpoint.checkpoint(blk, x, feat_size) + else: + x, feat_size = blk(x, feat_size) + return x, feat_size + + +class MultiScaleVit(nn.Module): + """ + Improved Multiscale Vision Transformers for Classification and Detection + Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, + Christoph Feichtenhofer* + https://arxiv.org/abs/2112.01526 + + Multiscale Vision Transformers + Haoqi Fan*, Bo Xiong*, Karttikeya Mangalam*, Yanghao Li*, Zhicheng Yan, Jitendra Malik, + Christoph Feichtenhofer* + https://arxiv.org/abs/2104.11227 + """ + + def __init__( + self, + cfg: MultiScaleVitCfg, + img_size: Tuple[int, int] = (224, 224), + in_chans: int = 3, + global_pool: str = 'avg', + num_classes: int = 1000, + drop_path_rate: float = 0., + drop_rate: float = 0., + ): + super().__init__() + img_size = to_2tuple(img_size) + norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) + self.num_classes = num_classes + self.drop_rate = drop_rate + self.global_pool = global_pool + self.depths = tuple(cfg.depths) + self.expand_attn = cfg.expand_attn + + embed_dim = cfg.embed_dim[0] + self.patch_embed = PatchEmbed( + dim_in=in_chans, + dim_out=embed_dim, + kernel=cfg.patch_kernel, + stride=cfg.patch_stride, + padding=cfg.patch_padding, + ) + patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1]) + num_patches = prod(patch_dims) + + if cfg.use_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.num_prefix_tokens = 1 + pos_embed_dim = num_patches + 1 + else: + self.num_prefix_tokens = 0 + self.cls_token = None + pos_embed_dim = num_patches + + if cfg.use_abs_pos: + self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim)) + else: + self.pos_embed = None + + num_stages = len(cfg.embed_dim) + feat_size = patch_dims + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + self.stages = nn.ModuleList() + for i in range(num_stages): + if cfg.expand_attn: + dim_out = cfg.embed_dim[i] + else: + dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)] + stage = MultiScaleVitStage( + dim=embed_dim, + dim_out=dim_out, + depth=cfg.depths[i], + num_heads=cfg.num_heads[i], + feat_size=feat_size, + mlp_ratio=cfg.mlp_ratio, + qkv_bias=cfg.qkv_bias, + mode=cfg.mode, + pool_first=cfg.pool_first, + expand_attn=cfg.expand_attn, + kernel_q=cfg.kernel_qkv, + kernel_kv=cfg.kernel_qkv, + stride_q=cfg.stride_q[i], + stride_kv=cfg.stride_kv[i], + has_cls_token=cfg.use_cls_token, + rel_pos_type=cfg.rel_pos_type, + residual_pooling=cfg.residual_pooling, + norm_layer=norm_layer, + drop_path=dpr[i], + ) + embed_dim = dim_out + feat_size = stage.feat_size + self.stages.append(stage) + + self.num_features = embed_dim + self.norm = norm_layer(embed_dim) + self.head = nn.Sequential(OrderedDict([ + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) + ])) + + if self.pos_embed is not None: + trunc_normal_tf_(self.pos_embed, std=0.02) + if self.cls_token is not None: + trunc_normal_tf_(self.cls_token, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_tf_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {k for k, _ in self.named_parameters() + if any(n in k for n in ["pos_embed", "rel_pos_h", "rel_pos_w", "cls_token"])} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', # stem and embed + blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Sequential(OrderedDict([ + ('drop', nn.Dropout(self.drop_rate)), + ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) + ])) + + def forward_features(self, x): + x, feat_size = self.patch_embed(x) + B, N, C = x.shape + + if self.cls_token is not None: + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + if self.pos_embed is not None: + x = x + self.pos_embed + + for stage in self.stages: + x, feat_size = stage(x, feat_size) + + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + if self.global_pool == 'avg': + x = x[:, self.num_prefix_tokens:].mean(1) + else: + x = x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'stages.0.blocks.0.norm1.weight' in state_dict: + return state_dict + + import re + if 'model_state' in state_dict: + state_dict = state_dict['model_state'] + + depths = getattr(model, 'depths', None) + expand_attn = getattr(model, 'expand_attn', True) + assert depths is not None, 'model requires depth attribute to remap checkpoints' + depth_map = {} + block_idx = 0 + for stage_idx, d in enumerate(depths): + depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)}) + block_idx += d + + out_dict = {} + for k, v in state_dict.items(): + k = re.sub( + r'blocks\.(\d+)', + lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}', + k) + + if expand_attn: + k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k) + else: + k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k) + if 'head' in k: + k = k.replace('head.projection', 'head.fc') + out_dict[k] = v + + # for k, v in state_dict.items(): + # if model.pos_embed is not None and k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed( + # v, + # model.pos_embed, + # 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1), + # model.patch_embed.grid_size + # ) + + return out_dict + + +def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + MultiScaleVit, variant, pretrained, + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + pretrained_filter_fn=checkpoint_filter_fn, + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def mvitv2_tiny(pretrained=False, **kwargs): + return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_small(pretrained=False, **kwargs): + return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_base(pretrained=False, **kwargs): + return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_large(pretrained=False, **kwargs): + return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs) + + +# @register_model +# def mvitv2_base_in21k(pretrained=False, **kwargs): +# return _create_mvitv2('mvitv2_base_in21k', pretrained=pretrained, **kwargs) +# +# +# @register_model +# def mvitv2_large_in21k(pretrained=False, **kwargs): +# return _create_mvitv2('mvitv2_large_in21k', pretrained=pretrained, **kwargs) +# +# +# @register_model +# def mvitv2_huge_in21k(pretrained=False, **kwargs): +# return _create_mvitv2('mvitv2_huge_in21k', pretrained=pretrained, **kwargs) + + +@register_model +def mvitv2_small_cls(pretrained=False, **kwargs): + return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nasnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..50db1a3d351db0e9caa2002e16b8003b561050f8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nasnet.py @@ -0,0 +1,588 @@ +""" NasNet-A (Large) + nasnetalarge implementation grabbed from Cadene's pretrained models + https://github.com/Cadene/pretrained-models.pytorch +""" +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['NASNetALarge'] + +default_cfgs = { + 'nasnetalarge': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) + self.act_2 = nn.ReLU(inplace=True) + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + def __init__(self, stem_size, num_channels=42, pad_type=''): + super(CellStem0, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_size, num_channels, pad_type=''): + super(CellStem1, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.act(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2(x_relu) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class FirstCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(FirstCell, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_relu = self.act(x_prev) + x_path1 = self.path_1(x_relu) + x_path2 = self.path_2(x_relu) + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NormalCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NASNetALarge(nn.Module): + """NASNetALarge (6 @ 4032) """ + + def __init__( + self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, + num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same'): + super(NASNetALarge, self).__init__() + self.num_classes = num_classes + self.stem_size = stem_size + self.num_features = num_features + self.channel_multiplier = channel_multiplier + self.drop_rate = drop_rate + assert output_stride == 32 + + channels = self.num_features // 24 + # 24 is default value for the architecture + + self.conv0 = ConvNormAct( + in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) + self.cell_stem_1 = CellStem1( + self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) + + self.cell_0 = FirstCell( + in_chs_left=channels, out_chs_left=channels // 2, + in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_1 = NormalCell( + in_chs_left=2 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_2 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_3 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_4 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_5 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + + self.reduction_cell_0 = ReductionCell0( + in_chs_left=6 * channels, out_chs_left=2 * channels, + in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_6 = FirstCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_7 = NormalCell( + in_chs_left=8 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_8 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_9 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_10 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_11 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + + self.reduction_cell_1 = ReductionCell1( + in_chs_left=12 * channels, out_chs_left=4 * channels, + in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_12 = FirstCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_13 = NormalCell( + in_chs_left=16 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_14 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_15 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_16 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_17 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.act = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv0'), + dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), + dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), + dict(num_chs=4032, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^conv0|cell_stem_[01]', + blocks=[ + (r'^cell_(\d+)', None), + (r'^reduction_cell_0', (6,)), + (r'^reduction_cell_1', (12,)), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv0 = self.conv0(x) + + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + x_cell_4 = self.cell_4(x_cell_3, x_cell_2) + x_cell_5 = self.cell_5(x_cell_4, x_cell_3) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + x_cell_10 = self.cell_10(x_cell_9, x_cell_8) + x_cell_11 = self.cell_11(x_cell_10, x_cell_9) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + x_cell_16 = self.cell_16(x_cell_15, x_cell_14) + x_cell_17 = self.cell_17(x_cell_16, x_cell_15) + x = self.act(x_cell_17) + return x + + def forward_head(self, x): + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_nasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + NASNetALarge, variant, pretrained, + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def nasnetalarge(pretrained=False, **kwargs): + """NASNet-A large model architecture. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nest.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nest.py new file mode 100644 index 0000000000000000000000000000000000000000..f626a2e61b5b6137170f42e7b8bf8f1f62d7e48f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nest.py @@ -0,0 +1,486 @@ +""" Nested Transformer (NesT) in PyTorch + +A PyTorch implement of Aggregating Nested Transformers as described in: + +'Aggregating Nested Transformers' + - https://arxiv.org/abs/2105.12723 + +The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights +have been converted with convert/convert_nest_flax.py + +Acknowledgments: +* The paper authors for sharing their research, code, and model weights +* Ross Wightman's existing code off which I based this + +Copyright 2021 Alexander Soare +""" + +import collections.abc +import logging +import math +from functools import partial + +import torch +import torch.nn.functional as F +from torch import nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply, checkpoint_seq +from .layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_ +from .layers import _assert +from .layers import create_conv2d, create_pool2d, to_ntuple +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], + 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # (weights from official Google JAX impl) + 'nest_base': _cfg(), + 'nest_small': _cfg(), + 'nest_tiny': _cfg(), + 'jx_nest_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_base-8bc41011.pth'), + 'jx_nest_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_small-422eaded.pth'), + 'jx_nest_tiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_tiny-e3428fb9.pth'), +} + + +class Attention(nn.Module): + """ + This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with + an extra "image block" dim + """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """ + x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) + """ + B, T, N, C = x.shape + # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) + qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, N, N) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, T, N, C'), permute -> (B, T, N, C', H) + x = (attn @ v).permute(0, 2, 3, 4, 1).reshape(B, T, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x # (B, T, N, C) + + +class TransformerLayer(nn.Module): + """ + This is much like `.vision_transformer.Block` but: + - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") + - Uses modified Attention layer that handles the "block" dimension + """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + y = self.norm1(x) + x = x + self.drop_path(self.attn(y)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvPool(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): + super().__init__() + self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) + self.norm = norm_layer(out_channels) + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) + + def forward(self, x): + """ + x is expected to have shape (B, C, H, W) + """ + _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') + _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') + x = self.conv(x) + # Layer norm done over channel dim only + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + x = self.pool(x) + return x # (B, C, H//2, W//2) + + +def blockify(x, block_size: int): + """image to blocks + Args: + x (Tensor): with shape (B, H, W, C) + block_size (int): edge length of a single square block in units of H, W + """ + B, H, W, C = x.shape + _assert(H % block_size == 0, '`block_size` must divide input height evenly') + _assert(W % block_size == 0, '`block_size` must divide input width evenly') + grid_height = H // block_size + grid_width = W // block_size + x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) + x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) + return x # (B, T, N, C) + + +@register_notrace_function # reason: int receives Proxy +def deblockify(x, block_size: int): + """blocks to image + Args: + x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block + block_size (int): edge length of a single square block in units of desired H, W + """ + B, T, _, C = x.shape + grid_size = int(math.sqrt(T)) + height = width = grid_size * block_size + x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) + x = x.transpose(2, 3).reshape(B, height, width, C) + return x # (B, H, W, C) + + +class NestLevel(nn.Module): + """ Single hierarchical level of a Nested Transformer + """ + def __init__( + self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, + mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rates=[], + norm_layer=None, act_layer=None, pad_type=''): + super().__init__() + self.block_size = block_size + self.grad_checkpointing = False + + self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) + + if prev_embed_dim is not None: + self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) + else: + self.pool = nn.Identity() + + # Transformer encoder + if len(drop_path_rates): + assert len(drop_path_rates) == depth, 'Must provide as many drop path rates as there are transformer layers' + self.transformer_encoder = nn.Sequential(*[ + TransformerLayer( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rates[i], + norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + + def forward(self, x): + """ + expects x as (B, C, H, W) + """ + x = self.pool(x) + x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer + x = blockify(x, self.block_size) # (B, T, N, C') + x = x + self.pos_embed + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.transformer_encoder, x) + else: + x = self.transformer_encoder(x) # (B, T, N, C') + x = deblockify(x, self.block_size) # (B, H', W', C') + # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage + return x.permute(0, 3, 1, 2) # (B, C, H', W') + + +class Nest(nn.Module): + """ Nested Transformer (NesT) + + A PyTorch impl of : `Aggregating Nested Transformers` + - https://arxiv.org/abs/2105.12723 + """ + + def __init__( + self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), + num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, + pad_type='', weight_init='', global_pool='avg' + ): + """ + Args: + img_size (int, tuple): input image size + in_chans (int): number of input channels + patch_size (int): patch size + num_levels (int): number of block hierarchies (T_d in the paper) + embed_dims (int, tuple): embedding dimensions of each level + num_heads (int, tuple): number of attention heads for each level + depths (int, tuple): number of transformer layers for each level + num_classes (int): number of classes for classification head + mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer for transformer layers + act_layer: (nn.Module): activation layer in MLP of transformer layers + pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME + weight_init: (str): weight init scheme + global_pool: (str): type of pooling operation to apply to final feature map + + Notes: + - Default values follow NesT-B from the original Jax code. + - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. + - For those following the paper, Table A1 may have errors! + - https://github.com/google-research/nested-transformer/issues/2 + """ + super().__init__() + + for param_name in ['embed_dims', 'num_heads', 'depths']: + param_value = locals()[param_name] + if isinstance(param_value, collections.abc.Sequence): + assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' + + embed_dims = to_ntuple(num_levels)(embed_dims) + num_heads = to_ntuple(num_levels)(num_heads) + depths = to_ntuple(num_levels)(depths) + self.num_classes = num_classes + self.num_features = embed_dims[-1] + self.feature_info = [] + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + self.drop_rate = drop_rate + self.num_levels = num_levels + if isinstance(img_size, collections.abc.Sequence): + assert img_size[0] == img_size[1], 'Model only handles square inputs' + img_size = img_size[0] + assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' + self.patch_size = patch_size + + # Number of blocks at each level + self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() + assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ + 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' + + # Block edge size in units of patches + # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the + # number of blocks along edge of image + self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) + + # Patch embedding + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) + self.num_patches = self.patch_embed.num_patches + self.seq_length = self.num_patches // self.num_blocks[0] + + # Build up each hierarchical level + levels = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_dim = None + curr_stride = 4 + for i in range(len(self.num_blocks)): + dim = embed_dims[i] + levels.append(NestLevel( + self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, + mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, dp_rates[i], norm_layer, act_layer, pad_type=pad_type)) + self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] + prev_dim = dim + curr_stride *= 2 + self.levels = nn.Sequential(*levels) + + # Final normalization layer + self.norm = norm_layer(embed_dims[-1]) + + # Classifier + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(weight_init) + + @torch.jit.ignore + def init_weights(self, mode=''): + assert mode in ('nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + for level in self.levels: + trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) + named_apply(partial(_init_nest_weights, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {f'level.{i}.pos_embed' for i in range(len(self.levels))} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', # stem and embed + blocks=[ + (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), + (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), + (r'^norm', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for l in self.levels: + l.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.levels(x) + # Layer norm done over channel dim only (to NHWC and back) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): + """ NesT weight initialization + Can replicate Jax implementation. Otherwise follows vision_transformer.py + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + nn.init.constant_(module.bias, head_bias) + else: + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + + +def resize_pos_embed(posemb, posemb_new): + """ + Rescale the grid of position embeddings when loading from state_dict + Expected shape of position embeddings is (1, T, N, C), and considers only square images + """ + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + seq_length_old = posemb.shape[2] + num_blocks_new, seq_length_new = posemb_new.shape[1:3] + size_new = int(math.sqrt(num_blocks_new*seq_length_new)) + # First change to (1, C, H, W) + posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) + posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) + # Now change to new (1, T, N, C) + posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ resize positional embeddings of pretrained weights """ + pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] + for k in pos_embed_keys: + if state_dict[k].shape != getattr(model, k).shape: + state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) + return state_dict + + +def _create_nest(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + Nest, variant, pretrained, + feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + +@register_model +def nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224 + """ + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('jx_nest_tiny', pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nfnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nfnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e65151f4b9108ba19143cba01ac282b4c3f3c973 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/nfnet.py @@ -0,0 +1,893 @@ +""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models + +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + +Paper: `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Status: +* These models are a work in progress, experiments ongoing. +* Pretrained weights for two models so far, more to come. +* Model details updated to closer match official JAX code now that it's released +* NF-ResNet, NF-RegNet-B, and NFNet-F models supported + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field +from collections import OrderedDict +from typing import Tuple, Optional +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_module +from .helpers import build_model_with_cfg, checkpoint_seq +from .registry import register_model +from .layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame,\ + get_act_layer, get_act_fn, get_attn, make_divisible + + +def _dcfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + dm_nfnet_f0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', + pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9), + dm_nfnet_f1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91), + dm_nfnet_f2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92), + dm_nfnet_f3=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94), + dm_nfnet_f4=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', + pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951), + dm_nfnet_f5=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', + pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954), + dm_nfnet_f6=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', + pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956), + + nfnet_f0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', + hf_hub_id='timm/eca_nfnet_l0', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), crop_pct=1.0), + eca_nfnet_l2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), crop_pct=1.0), + eca_nfnet_l3=_dcfg( + url='', + pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), crop_pct=1.0), + + nf_regnet_b0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), + nf_regnet_b1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec + nf_regnet_b2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), + nf_regnet_b3=_dcfg( + url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), + nf_regnet_b4=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), + nf_regnet_b5=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), + + nf_resnet26=_dcfg(url='', first_conv='stem.conv'), + nf_resnet50=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), + nf_resnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_seresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_ecaresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet101=_dcfg(url='', first_conv='stem.conv'), +) + + +@dataclass +class NfCfg: + depths: Tuple[int, int, int, int] + channels: Tuple[int, int, int, int] + alpha: float = 0.2 + stem_type: str = '3x3' + stem_chs: Optional[int] = None + group_size: Optional[int] = None + attn_layer: Optional[str] = None + attn_kwargs: dict = None + attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used + width_factor: float = 1.0 + bottle_ratio: float = 0.5 + num_features: int = 0 # num out_channels for final conv, no final_conv if 0 + ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal + reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle + extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models + gamma_in_act: bool = False + same_padding: bool = False + std_conv_eps: float = 1e-5 + skipinit: bool = False # disabled by default, non-trivial performance impact + zero_init_fc: bool = False + act_layer: str = 'silu' + + +def _nfres_cfg( + depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None): + attn_kwargs = attn_kwargs or {} + cfg = NfCfg( + depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, + group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): + num_features = 1280 * channels[-1] // 440 + attn_kwargs = dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, + num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs) + return cfg + + +def _nfnet_cfg( + depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., + act_layer='gelu', attn_layer='se', attn_kwargs=None): + num_features = int(channels[-1] * feat_mult) + attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, + bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, + attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True): + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, + bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, + num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5)) + return cfg + + +model_cfgs = dict( + # NFNet-F models w/ GELU compatible with DeepMind weights + dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), + dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), + dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), + dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), + dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), + dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), + dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), + + # NFNet-F models w/ GELU + nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), + nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), + nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), + nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), + nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), + nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), + nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), + nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), + + # Experimental 'light' versions of NFNet-F that are little leaner + nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), + eca_nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l1=_nfnet_cfg( + depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l2=_nfnet_cfg( + depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l3=_nfnet_cfg( + depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + + # EffNet influenced RegNet defs. + # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. + nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), + nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), + nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), + nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), + nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), + nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), + # FIXME add B6-B8 + + # ResNet (preact, D style deep stem/avg down) defs + nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), + nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), + nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), + + nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + + nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), + +) + + +class GammaAct(nn.Module): + def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): + super().__init__() + self.act_fn = get_act_fn(act_type) + self.gamma = gamma + self.inplace = inplace + + def forward(self, x): + return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) + + +def act_with_gamma(act_type, gamma: float = 1.): + def _create(inplace=False): + return GammaAct(act_type, gamma=gamma, inplace=inplace) + return _create + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, conv_layer=ScaledStdConv2d): + """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + + def forward(self, x): + return self.conv(self.pool(x)) + + +@register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 +class NormFreeBlock(nn.Module): + """Normalization-Free pre-activation block. + """ + + def __init__( + self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None, + alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False, + skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + out_chs = out_chs or in_chs + # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet + mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) + groups = 1 if not group_size else mid_chs // group_size + if group_size and group_size % ch_div == 0: + mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error + self.alpha = alpha + self.beta = beta + self.attn_gain = attn_gain + + if in_chs != out_chs or stride != 1 or dilation != first_dilation: + self.downsample = DownsampleAvg( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer) + else: + self.downsample = None + + self.act1 = act_layer() + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.act2 = act_layer(inplace=True) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + if extra_conv: + self.act2b = act_layer(inplace=True) + self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) + else: + self.act2b = None + self.conv2b = None + if reg and attn_layer is not None: + self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 + else: + self.attn = None + self.act3 = act_layer() + self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) + if not reg and attn_layer is not None: + self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 + else: + self.attn_last = None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None + + def forward(self, x): + out = self.act1(x) * self.beta + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(out) + + # residual branch + out = self.conv1(out) + out = self.conv2(self.act2(out)) + if self.conv2b is not None: + out = self.conv2b(self.act2b(out)) + if self.attn is not None: + out = self.attn_gain * self.attn(out) + out = self.conv3(self.act3(out)) + if self.attn_last is not None: + out = self.attn_gain * self.attn_last(out) + out = self.drop_path(out) + + if self.skipinit_gain is not None: + out.mul_(self.skipinit_gain) # this slows things down more than expected, TBD + out = out * self.alpha + shortcut + return out + + +def create_stem(in_chs, out_chs, stem_type='', conv_layer=None, act_layer=None, preact_feature=True): + stem_stride = 2 + stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') + stem = OrderedDict() + assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') + if 'deep' in stem_type: + if 'quad' in stem_type: + # 4 deep conv stack as in NFNet-F models + assert not 'pool' in stem_type + stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) + strides = (2, 1, 1, 2) + stem_stride = 4 + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') + else: + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets + strides = (2, 1, 1) + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') + last_idx = len(stem_chs) - 1 + for i, (c, s) in enumerate(zip(stem_chs, strides)): + stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) + if i != last_idx: + stem[f'act{i + 2}'] = act_layer(inplace=True) + in_chs = c + elif '3x3' in stem_type: + # 3x3 stem conv as in RegNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) + else: + # 7x7 stem conv as in ResNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + + if 'pool' in stem_type: + stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) + stem_stride = 4 + + return nn.Sequential(stem), stem_stride, stem_feature + + +# from https://github.com/deepmind/deepmind-research/tree/master/nfnets +_nonlin_gamma = dict( + identity=1.0, + celu=1.270926833152771, + elu=1.2716004848480225, + gelu=1.7015043497085571, + leaky_relu=1.70590341091156, + log_sigmoid=1.9193484783172607, + log_softmax=1.0002083778381348, + relu=1.7139588594436646, + relu6=1.7131484746932983, + selu=1.0008515119552612, + sigmoid=4.803835391998291, + silu=1.7881293296813965, + softsign=2.338853120803833, + softplus=1.9203323125839233, + tanh=1.5939117670059204, +) + + +class NormFreeNet(nn.Module): + """ Normalization-Free Network + + As described in : + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + and + `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 + + This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and + the (preact) ResNet models described earlier in the paper. + + There are a few differences: + * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), + this changes channel dim and param counts slightly from the paper models + * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance + impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. + * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but + apply it in each activation. This is slightly slower, numerically different, but matches official impl. + * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput + for what it is/does. Approx 8-10% throughput loss. + """ + def __init__( + self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + drop_rate=0., drop_path_rate=0. + ): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." + conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d + if cfg.gamma_in_act: + act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) + conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) + else: + act_layer = get_act_layer(cfg.act_layer) + conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) + attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + + stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) + self.stem, stem_stride, stem_feat = create_stem( + in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer) + + self.feature_info = [stem_feat] + drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + prev_chs = stem_chs + net_stride = stem_stride + dilation = 1 + expected_var = 1.0 + stages = [] + for stage_idx, stage_depth in enumerate(cfg.depths): + stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx in range(cfg.depths[stage_idx]): + first_block = block_idx == 0 and stage_idx == 0 + out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) + blocks += [NormFreeBlock( + in_chs=prev_chs, out_chs=out_chs, + alpha=cfg.alpha, + beta=1. / expected_var ** 0.5, + stride=stride if block_idx == 0 else 1, + dilation=dilation, + first_dilation=first_dilation, + group_size=cfg.group_size, + bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, + ch_div=cfg.ch_div, + reg=cfg.reg, + extra_conv=cfg.extra_conv, + skipinit=cfg.skipinit, + attn_layer=attn_layer, + attn_gain=cfg.attn_gain, + act_layer=act_layer, + conv_layer=conv_layer, + drop_path_rate=drop_path_rates[stage_idx][block_idx], + )] + if block_idx == 0: + expected_var = 1. # expected var is reset after first block of each stage + expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance + first_dilation = dilation + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + if cfg.num_features: + # The paper NFRegNet models have an EfficientNet-like final head convolution. + self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) + self.final_conv = conv_layer(prev_chs, self.num_features, 1) + self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.final_act = act_layer(inplace=cfg.num_features > 0) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + for n, m in self.named_modules(): + if 'fc' in n and isinstance(m, nn.Linear): + if cfg.zero_init_fc: + nn.init.zeros_(m.weight) + else: + nn.init.normal_(m.weight, 0., .01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') + if m.bias is not None: + nn.init.zeros_(m.bias) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=[ + (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), + (r'^final_conv', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x) + else: + x = self.stages(x) + x = self.final_conv(x) + x = self.final_act(x) + return x + + def forward_head(self, x): + return self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_normfreenet(variant, pretrained=False, **kwargs): + model_cfg = model_cfgs[variant] + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + NormFreeNet, variant, pretrained, + model_cfg=model_cfg, + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def dm_nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7(pretrained=False, **kwargs): + """ NFNet-F7 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_l0(pretrained=False, **kwargs): + """ NFNet-L0b w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio + """ + return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l0(pretrained=False, **kwargs): + """ ECA-NFNet-L0 w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l1(pretrained=False, **kwargs): + """ ECA-NFNet-L1 w/ SiLU + My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l2(pretrained=False, **kwargs): + """ ECA-NFNet-L2 w/ SiLU + My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l3(pretrained=False, **kwargs): + """ ECA-NFNet-L3 w/ SiLU + My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b0(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B0 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b1(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B1 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b2(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B2 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b3(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B3 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b4(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B4 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b5(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B5 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet26(pretrained=False, **kwargs): + """ Normalization-Free ResNet-26 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet50(pretrained=False, **kwargs): + """ Normalization-Free ResNet-50 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet101(pretrained=False, **kwargs): + """ Normalization-Free ResNet-101 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet26(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet26 + """ + return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet50(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet50 + """ + return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet101(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet101 + """ + return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet26(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet26 + """ + return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet50(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet50 + """ + return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet101(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet101 + """ + return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pit.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd79c0638fccbe52b91eab348f5abf61bdac67e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pit.py @@ -0,0 +1,404 @@ +""" Pooling-based Vision Transformer (PiT) in PyTorch + +A PyTorch implement of Pooling-based Vision Transformers as described in +'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 + +This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. + +Modifications for timm by / Copyright 2020 Ross Wightman +""" +# PiT +# Copyright 2021-present NAVER Corp. +# Apache License v2.0 + +import math +import re +from copy import deepcopy +from functools import partial +from typing import Tuple + +import torch +from torch import nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import trunc_normal_, to_2tuple +from .registry import register_model +from .vision_transformer import Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # deit models (FB weights) + 'pit_ti_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'), + 'pit_xs_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'), + 'pit_s_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'), + 'pit_b_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'), + 'pit_ti_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth', + classifier=('head', 'head_dist')), + 'pit_xs_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth', + classifier=('head', 'head_dist')), + 'pit_s_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth', + classifier=('head', 'head_dist')), + 'pit_b_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth', + classifier=('head', 'head_dist')), +} + + +class SequentialTuple(nn.Sequential): + """ This module exists to work around torchscript typing issues list -> list""" + def __init__(self, *args): + super(SequentialTuple, self).__init__(*args) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + for module in self: + x = module(x) + return x + + +class Transformer(nn.Module): + def __init__( + self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None): + super(Transformer, self).__init__() + self.layers = nn.ModuleList([]) + embed_dim = base_dim * heads + + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=heads, + mlp_ratio=mlp_ratio, + qkv_bias=True, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_prob[i], + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ) + for i in range(depth)]) + + self.pool = pool + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + x, cls_tokens = x + B, C, H, W = x.shape + token_length = cls_tokens.shape[1] + + x = x.flatten(2).transpose(1, 2) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.blocks(x) + + cls_tokens = x[:, :token_length] + x = x[:, token_length:] + x = x.transpose(1, 2).reshape(B, C, H, W) + + if self.pool is not None: + x, cls_tokens = self.pool(x, cls_tokens) + return x, cls_tokens + + +class ConvHeadPooling(nn.Module): + def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): + super(ConvHeadPooling, self).__init__() + + self.conv = nn.Conv2d( + in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, + padding_mode=padding_mode, groups=in_feature) + self.fc = nn.Linear(in_feature, out_feature) + + def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: + x = self.conv(x) + cls_token = self.fc(cls_token) + return x, cls_token + + +class ConvEmbedding(nn.Module): + def __init__(self, in_channels, out_channels, patch_size, stride, padding): + super(ConvEmbedding, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) + + def forward(self, x): + x = self.conv(x) + return x + + +class PoolingVisionTransformer(nn.Module): + """ Pooling-based Vision Transformer + + A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' + - https://arxiv.org/abs/2103.16302 + """ + def __init__( + self, img_size, patch_size, stride, base_dims, depth, heads, + mlp_ratio, num_classes=1000, in_chans=3, global_pool='token', + distilled=False, attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0): + super(PoolingVisionTransformer, self).__init__() + assert global_pool in ('token',) + + padding = 0 + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1) + width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1) + + self.base_dims = base_dims + self.heads = heads + self.num_classes = num_classes + self.global_pool = global_pool + self.num_tokens = 2 if distilled else 1 + + self.patch_size = patch_size + self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width)) + self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding) + + self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0])) + self.pos_drop = nn.Dropout(p=drop_rate) + + transformers = [] + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] + for stage in range(len(depth)): + pool = None + if stage < len(heads) - 1: + pool = ConvHeadPooling( + base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2) + transformers += [Transformer( + base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage]) + ] + self.transformers = SequentialTuple(*transformers) + self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) + self.num_features = self.embed_dim = base_dims[-1] * heads[-1] + + # Classifier head + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + self.distilled_training = False # must set this True to train w/ distillation token + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def set_distilled_training(self, enable=True): + self.distilled_training = enable + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + def get_classifier(self): + if self.head_dist is not None: + return self.head, self.head_dist + else: + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.head_dist is not None: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x + self.pos_embed) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + x, cls_tokens = self.transformers((x, cls_tokens)) + cls_tokens = self.norm(cls_tokens) + return cls_tokens + + def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: + if self.head_dist is not None: + assert self.global_pool == 'token' + x, x_dist = x[:, 0], x[:, 1] + if not pre_logits: + x = self.head(x) + x_dist = self.head_dist(x_dist) + if self.distilled_training and self.training and not torch.jit.is_scripting(): + # only return separate classification predictions when training in distilled mode + return x, x_dist + else: + # during standard train / finetune, inference average the classifier predictions + return (x + x_dist) / 2 + else: + if self.global_pool == 'token': + x = x[:, 0] + if not pre_logits: + x = self.head(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ preprocess checkpoints """ + out_dict = {} + p_blocks = re.compile(r'pools\.(\d)\.') + for k, v in state_dict.items(): + # FIXME need to update resize for PiT impl + # if k == 'pos_embed' and v.shape != model.pos_embed.shape: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed(v, model.pos_embed) + k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k) + out_dict[k] = v + return out_dict + + +def _create_pit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + PoolingVisionTransformer, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def pit_b_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_b_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_s_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_xs_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_ti_224', pretrained, **model_kwargs) + + +@register_model +def pit_b_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pnasnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..81067845befcfaf5436d112af73359ae4128c2d5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pnasnet.py @@ -0,0 +1,361 @@ +""" + pnasnet5large implementation grabbed from Cadene's pretrained models + Additional credit to https://github.com/creafz + + https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py + +""" +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['PNASNet5Large'] + +default_cfgs = { + 'pnasnet5large': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv_0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=padding) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=padding) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) + self.act_2 = nn.ReLU() + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=padding) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class FactorizedReduction(nn.Module): + + def __init__(self, in_channels, out_channels, padding=''): + super(FactorizedReduction, self).__init__() + self.act = nn.ReLU() + self.path_1 = nn.Sequential(OrderedDict([ + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.path_2 = nn.Sequential(OrderedDict([ + ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x_path1 = self.path_1(x) + x_path2 = self.path_2(x) + out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + return out + + +class CellBase(nn.Module): + + def cell_forward(self, x_left, x_right): + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) + x_comb_iter_3_right = self.comb_iter_3_right(x_right) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_left) + if self.comb_iter_4_right is not None: + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + else: + x_comb_iter_4_right = x_right + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem0(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(CellStem0, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_0_right = nn.Sequential(OrderedDict([ + ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), + ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), + ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), + ])) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) + self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) + + def forward(self, x_left): + x_right = self.conv_1x1(x_left) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class Cell(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', + is_reduction=False, match_prev_layer_dims=False): + super(Cell, self).__init__() + + # If `is_reduction` is set to `True` stride 2 is used for + # convolution and pooling layers to reduce the spatial size of + # the output of a cell approximately by a factor of 2. + stride = 2 if is_reduction else 1 + + # If `match_prev_layer_dimensions` is set to `True` + # `FactorizedReduction` is used to reduce the spatial size + # of the left input of a cell approximately by a factor of 2. + self.match_prev_layer_dimensions = match_prev_layer_dims + if match_prev_layer_dims: + self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) + else: + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) + self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) + if is_reduction: + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) + else: + self.comb_iter_4_right = None + + def forward(self, x_left, x_right): + x_left = self.conv_prev_1x1(x_left) + x_right = self.conv_1x1(x_right) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class PNASNet5Large(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type=''): + super(PNASNet5Large, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.num_features = 4320 + assert output_stride == 32 + + self.conv_0 = ConvNormAct( + in_chans, 96, kernel_size=3, stride=2, padding=0, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) + + self.cell_stem_1 = Cell( + in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, + match_prev_layer_dims=True, is_reduction=True) + self.cell_0 = Cell( + in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_1 = Cell( + in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_2 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_3 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + + self.cell_4 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, + is_reduction=True) + self.cell_5 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_6 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + self.cell_7 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + + self.cell_8 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, + is_reduction=True) + self.cell_9 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_10 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.cell_11 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.act = nn.ReLU() + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv_0'), + dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), + dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), + dict(num_chs=4320, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict(stem=r'^conv_0|cell_stem_[01]', blocks=r'^cell_(\d+)') + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv_0 = self.conv_0(x) + x_stem_0 = self.cell_stem_0(x_conv_0) + x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) + x_cell_0 = self.cell_0(x_stem_0, x_stem_1) + x_cell_1 = self.cell_1(x_stem_1, x_cell_0) + x_cell_2 = self.cell_2(x_cell_0, x_cell_1) + x_cell_3 = self.cell_3(x_cell_1, x_cell_2) + x_cell_4 = self.cell_4(x_cell_2, x_cell_3) + x_cell_5 = self.cell_5(x_cell_3, x_cell_4) + x_cell_6 = self.cell_6(x_cell_4, x_cell_5) + x_cell_7 = self.cell_7(x_cell_5, x_cell_6) + x_cell_8 = self.cell_8(x_cell_6, x_cell_7) + x_cell_9 = self.cell_9(x_cell_7, x_cell_8) + x_cell_10 = self.cell_10(x_cell_8, x_cell_9) + x_cell_11 = self.cell_11(x_cell_9, x_cell_10) + x = self.act(x_cell_11) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_pnasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + PNASNet5Large, variant, pretrained, + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def pnasnet5large(pretrained=False, **kwargs): + r"""PNASNet-5 model architecture from the + `"Progressive Neural Architecture Search" + `_ paper. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/poolformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/poolformer.py new file mode 100644 index 0000000000000000000000000000000000000000..ee7167af586b63ae7ee03c8bb609061cf9244c08 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/poolformer.py @@ -0,0 +1,313 @@ +""" PoolFormer implementation + +Paper: `PoolFormer: MetaFormer is Actually What You Need for Vision` - https://arxiv.org/abs/2111.11418 + +Code adapted from official impl at https://github.com/sail-sg/poolformer, original copyright in comment below + +Modifications and additions for timm by / Copyright 2022, Ross Wightman +""" +# Copyright 2021 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import copy +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import DropPath, trunc_normal_, to_2tuple, ConvMlp, GroupNorm1 +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + poolformer_s12=_cfg( + url='https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s12.pth.tar', + crop_pct=0.9), + poolformer_s24=_cfg( + url='https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s24.pth.tar', + crop_pct=0.9), + poolformer_s36=_cfg( + url='https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s36.pth.tar', + crop_pct=0.9), + poolformer_m36=_cfg( + url='https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m36.pth.tar', + crop_pct=0.95), + poolformer_m48=_cfg( + url='https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m48.pth.tar', + crop_pct=0.95), +) + + +class PatchEmbed(nn.Module): + """ Patch Embedding that is implemented by a layer of conv. + Input: tensor in shape [B, C, H, W] + Output: tensor in shape [B, C, H/stride, W/stride] + """ + + def __init__(self, in_chs=3, embed_dim=768, patch_size=16, stride=16, padding=0, norm_layer=None): + super().__init__() + patch_size = to_2tuple(patch_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + self.proj = nn.Conv2d(in_chs, embed_dim, kernel_size=patch_size, stride=stride, padding=padding) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + x = self.proj(x) + x = self.norm(x) + return x + + +class Pooling(nn.Module): + def __init__(self, pool_size=3): + super().__init__() + self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) + + def forward(self, x): + return self.pool(x) - x + + +class PoolFormerBlock(nn.Module): + """ + Args: + dim: embedding dim + pool_size: pooling size + mlp_ratio: mlp expansion ratio + act_layer: activation + norm_layer: normalization + drop: dropout rate + drop path: Stochastic Depth, refer to https://arxiv.org/abs/1603.09382 + use_layer_scale, --layer_scale_init_value: LayerScale, refer to https://arxiv.org/abs/2103.17239 + """ + + def __init__( + self, dim, pool_size=3, mlp_ratio=4., + act_layer=nn.GELU, norm_layer=GroupNorm1, + drop=0., drop_path=0., layer_scale_init_value=1e-5): + + super().__init__() + + self.norm1 = norm_layer(dim) + self.token_mixer = Pooling(pool_size=pool_size) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = ConvMlp(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + if layer_scale_init_value: + self.layer_scale_1 = nn.Parameter(layer_scale_init_value * torch.ones(dim)) + self.layer_scale_2 = nn.Parameter(layer_scale_init_value * torch.ones(dim)) + else: + self.layer_scale_1 = None + self.layer_scale_2 = None + + def forward(self, x): + if self.layer_scale_1 is not None: + x = x + self.drop_path1(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * self.token_mixer(self.norm1(x))) + x = x + self.drop_path2(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(self.norm2(x))) + else: + x = x + self.drop_path1(self.token_mixer(self.norm1(x))) + x = x + self.drop_path2(self.mlp(self.norm2(x))) + return x + + +def basic_blocks( + dim, index, layers, + pool_size=3, mlp_ratio=4., + act_layer=nn.GELU, norm_layer=GroupNorm1, + drop_rate=.0, drop_path_rate=0., + layer_scale_init_value=1e-5, +): + """ generate PoolFormer blocks for a stage """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(PoolFormerBlock( + dim, pool_size=pool_size, mlp_ratio=mlp_ratio, + act_layer=act_layer, norm_layer=norm_layer, + drop=drop_rate, drop_path=block_dpr, + layer_scale_init_value=layer_scale_init_value, + )) + blocks = nn.Sequential(*blocks) + return blocks + + +class PoolFormer(nn.Module): + """ PoolFormer + """ + + def __init__( + self, + layers, + embed_dims=(64, 128, 320, 512), + mlp_ratios=(4, 4, 4, 4), + downsamples=(True, True, True, True), + pool_size=3, + in_chans=3, + num_classes=1000, + global_pool='avg', + norm_layer=GroupNorm1, + act_layer=nn.GELU, + in_patch_size=7, + in_stride=4, + in_pad=2, + down_patch_size=3, + down_stride=2, + down_pad=1, + drop_rate=0., drop_path_rate=0., + layer_scale_init_value=1e-5, + **kwargs): + + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = embed_dims[-1] + self.grad_checkpointing = False + + self.patch_embed = PatchEmbed( + patch_size=in_patch_size, stride=in_stride, padding=in_pad, + in_chs=in_chans, embed_dim=embed_dims[0]) + + # set the main block in network + network = [] + for i in range(len(layers)): + network.append(basic_blocks( + embed_dims[i], i, layers, + pool_size=pool_size, mlp_ratio=mlp_ratios[i], + act_layer=act_layer, norm_layer=norm_layer, + drop_rate=drop_rate, drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value) + ) + if i < len(layers) - 1 and (downsamples[i] or embed_dims[i] != embed_dims[i + 1]): + # downsampling between stages + network.append(PatchEmbed( + in_chs=embed_dims[i], embed_dim=embed_dims[i + 1], + patch_size=down_patch_size, stride=down_stride, padding=down_pad) + ) + + self.network = nn.Sequential(*network) + self.norm = norm_layer(self.num_features) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + # init for classification + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^patch_embed', # stem and embed + blocks=[ + (r'^network\.(\d+).*\.proj', (99999,)), + (r'^network\.(\d+)', None) if coarse else (r'^network\.(\d+)\.(\d+)', None), + (r'^norm', (99999,)) + ], + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.network(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean([-2, -1]) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_poolformer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg(PoolFormer, variant, pretrained, **kwargs) + return model + + +@register_model +def poolformer_s12(pretrained=False, **kwargs): + """ PoolFormer-S12 model, Params: 12M """ + model = _create_poolformer('poolformer_s12', pretrained=pretrained, layers=(2, 2, 6, 2), **kwargs) + return model + + +@register_model +def poolformer_s24(pretrained=False, **kwargs): + """ PoolFormer-S24 model, Params: 21M """ + model = _create_poolformer('poolformer_s24', pretrained=pretrained, layers=(4, 4, 12, 4), **kwargs) + return model + + +@register_model +def poolformer_s36(pretrained=False, **kwargs): + """ PoolFormer-S36 model, Params: 31M """ + model = _create_poolformer( + 'poolformer_s36', pretrained=pretrained, layers=(6, 6, 18, 6), layer_scale_init_value=1e-6, **kwargs) + return model + + +@register_model +def poolformer_m36(pretrained=False, **kwargs): + """ PoolFormer-M36 model, Params: 56M """ + layers = (6, 6, 18, 6) + embed_dims = (96, 192, 384, 768) + model = _create_poolformer( + 'poolformer_m36', pretrained=pretrained, layers=layers, embed_dims=embed_dims, + layer_scale_init_value=1e-6, **kwargs) + return model + + +@register_model +def poolformer_m48(pretrained=False, **kwargs): + """ PoolFormer-M48 model, Params: 73M """ + layers = (8, 8, 24, 8) + embed_dims = (96, 192, 384, 768) + model = _create_poolformer( + 'poolformer_m48', pretrained=pretrained, layers=layers, embed_dims=embed_dims, + layer_scale_init_value=1e-6, **kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/ecaresnet101d_pruned.txt b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/ecaresnet101d_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..2589b2f9dd3f0d1e02e1d5ddc1fbcd5c143e02c6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/ecaresnet101d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/ecaresnet50d_pruned.txt b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/ecaresnet50d_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..9a8b2bf50e0631dce74d66a1a98e26cae10572a7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/ecaresnet50d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b1_pruned.txt b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b1_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..0972b527612b283fd242cc5eaeb6e767ea106c66 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b1_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b2_pruned.txt b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b2_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..6e3fadee3e9f92eaade96afd8691a5e4437551ee --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b2_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b3_pruned.txt b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b3_pruned.txt new file mode 100644 index 0000000000000000000000000000000000000000..489781736de08e5cf40bf76528a735fff4a3f61c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pruned/efficientnet_b3_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000] \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pvt_v2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pvt_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..6e024f43c05c624fada3b682b7efedbf41e51008 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/pvt_v2.py @@ -0,0 +1,476 @@ +""" Pyramid Vision Transformer v2 + +@misc{wang2021pvtv2, + title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, + author={Wenhai Wang and Enze Xie and Xiang Li and Deng-Ping Fan and Kaitao Song and Ding Liang and + Tong Lu and Ping Luo and Ling Shao}, + year={2021}, + eprint={2106.13797}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} + +Based on Apache 2.0 licensed code at https://github.com/whai362/PVT + +Modifications and timm support by / Copyright 2022, Ross Wightman +""" + +import math +from functools import partial +from typing import Tuple, List, Callable, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, to_ntuple, trunc_normal_ +from .registry import register_model + +__all__ = ['PyramidVisionTransformerV2'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False, + **kwargs + } + + +default_cfgs = { + 'pvt_v2_b0': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b0.pth'), + 'pvt_v2_b1': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b1.pth'), + 'pvt_v2_b2': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'), + 'pvt_v2_b3': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b3.pth'), + 'pvt_v2_b4': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b4.pth'), + 'pvt_v2_b5': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b5.pth'), + 'pvt_v2_b2_li': _cfg(url='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2_li.pth') +} + + +class MlpWithDepthwiseConv(nn.Module): + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + drop=0., extra_relu=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.relu = nn.ReLU() if extra_relu else nn.Identity() + self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x, feat_size: List[int]): + x = self.fc1(x) + B, N, C = x.shape + x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1]) + x = self.relu(x) + x = self.dwconv(x) + x = x.flatten(2).transpose(1, 2) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + sr_ratio=1, + linear_attn=False, + qkv_bias=True, + attn_drop=0., + proj_drop=0. + ): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + if not linear_attn: + self.pool = None + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + self.act = None + else: + self.pool = nn.AdaptiveAvgPool2d(7) + self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1) + self.norm = nn.LayerNorm(dim) + self.act = nn.GELU() + + def forward(self, x, feat_size: List[int]): + B, N, C = x.shape + H, W = feat_size + q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + if self.pool is not None: + x_ = x.permute(0, 2, 1).reshape(B, C, H, W) + x_ = self.sr(self.pool(x_)).reshape(B, C, -1).permute(0, 2, 1) + x_ = self.norm(x_) + x_ = self.act(x_) + kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + else: + if self.sr is not None: + x_ = x.permute(0, 2, 1).reshape(B, C, H, W) + x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) + x_ = self.norm(x_) + kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + else: + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., sr_ratio=1, linear_attn=False, qkv_bias=False, + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + sr_ratio=sr_ratio, + linear_attn=linear_attn, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = MlpWithDepthwiseConv( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + act_layer=act_layer, + drop=drop, + extra_relu=linear_attn + ) + + def forward(self, x, feat_size: List[int]): + x = x + self.drop_path(self.attn(self.norm1(x), feat_size)) + x = x + self.drop_path(self.mlp(self.norm2(x), feat_size)) + + return x + + +class OverlapPatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768): + super().__init__() + patch_size = to_2tuple(patch_size) + assert max(patch_size) > stride, "Set larger patch_size than stride" + self.patch_size = patch_size + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=patch_size, stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2)) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x): + x = self.proj(x) + feat_size = x.shape[-2:] + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + return x, feat_size + + +class PyramidVisionTransformerStage(nn.Module): + def __init__( + self, + dim: int, + dim_out: int, + depth: int, + downsample: bool = True, + num_heads: int = 8, + sr_ratio: int = 1, + linear_attn: bool = False, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + drop: float = 0., + attn_drop: float = 0., + drop_path: Union[List[float], float] = 0.0, + norm_layer: Callable = nn.LayerNorm, + ): + super().__init__() + self.grad_checkpointing = False + + if downsample: + self.downsample = OverlapPatchEmbed( + patch_size=3, + stride=2, + in_chans=dim, + embed_dim=dim_out) + else: + assert dim == dim_out + self.downsample = None + + self.blocks = nn.ModuleList([Block( + dim=dim_out, + num_heads=num_heads, + sr_ratio=sr_ratio, + linear_attn=linear_attn, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + ) for i in range(depth)]) + + self.norm = norm_layer(dim_out) + + def forward(self, x, feat_size: List[int]) -> Tuple[torch.Tensor, List[int]]: + if self.downsample is not None: + x, feat_size = self.downsample(x) + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x, feat_size) + else: + x = blk(x, feat_size) + x = self.norm(x) + x = x.reshape(x.shape[0], feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous() + return x, feat_size + + +class PyramidVisionTransformerV2(nn.Module): + def __init__( + self, + img_size=None, + in_chans=3, + num_classes=1000, + global_pool='avg', + depths=(3, 4, 6, 3), + embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), + sr_ratios=(8, 4, 2, 1), + mlp_ratios=(8., 8., 4., 4.), + qkv_bias=True, + linear=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.num_classes = num_classes + assert global_pool in ('avg', '') + self.global_pool = global_pool + self.depths = depths + num_stages = len(depths) + mlp_ratios = to_ntuple(num_stages)(mlp_ratios) + num_heads = to_ntuple(num_stages)(num_heads) + sr_ratios = to_ntuple(num_stages)(sr_ratios) + assert(len(embed_dims)) == num_stages + + self.patch_embed = OverlapPatchEmbed( + patch_size=7, + stride=4, + in_chans=in_chans, + embed_dim=embed_dims[0]) + + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + cur = 0 + prev_dim = embed_dims[0] + self.stages = nn.ModuleList() + for i in range(num_stages): + self.stages.append(PyramidVisionTransformerStage( + dim=prev_dim, + dim_out=embed_dims[i], + depth=depths[i], + downsample=i > 0, + num_heads=num_heads[i], + sr_ratio=sr_ratios[i], + mlp_ratio=mlp_ratios[i], + linear_attn=linear, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer + )) + prev_dim = embed_dims[i] + cur += depths[i] + + # classification head + self.num_features = embed_dims[-1] + self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def freeze_patch_emb(self): + self.patch_embed.requires_grad = False + + @torch.jit.ignore + def no_weight_decay(self): + return {} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embed', # stem and embed + blocks=r'^stages\.(\d+)' + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('avg', '') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x, feat_size = self.patch_embed(x) + for stage in self.stages: + x, feat_size = stage(x, feat_size=feat_size) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x.mean(dim=(-1, -2)) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _checkpoint_filter_fn(state_dict, model): + """ Remap original checkpoints -> timm """ + if 'patch_embed.proj.weight' in state_dict: + return state_dict # non-original checkpoint, no remapping needed + + out_dict = {} + import re + for k, v in state_dict.items(): + if k.startswith('patch_embed'): + k = k.replace('patch_embed1', 'patch_embed') + k = k.replace('patch_embed2', 'stages.1.downsample') + k = k.replace('patch_embed3', 'stages.2.downsample') + k = k.replace('patch_embed4', 'stages.3.downsample') + k = k.replace('dwconv.dwconv', 'dwconv') + k = re.sub(r'block(\d+).(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k) + k = re.sub(r'^norm(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k) + out_dict[k] = v + return out_dict + + +def _create_pvt2(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg( + PyramidVisionTransformerV2, variant, pretrained, + pretrained_filter_fn=_checkpoint_filter_fn, + **kwargs + ) + return model + + +@register_model +def pvt_v2_b0(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8), + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **model_kwargs) + + +@register_model +def pvt_v2_b1(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **model_kwargs) + + +@register_model +def pvt_v2_b2(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **model_kwargs) + + +@register_model +def pvt_v2_b3(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **model_kwargs) + + +@register_model +def pvt_v2_b4(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **model_kwargs) + + +@register_model +def pvt_v2_b5(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), + mlp_ratios=(4, 4, 4, 4), norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **model_kwargs) + + +@register_model +def pvt_v2_b2_li(pretrained=False, **kwargs): + model_kwargs = dict( + depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), + norm_layer=partial(nn.LayerNorm, eps=1e-6), linear=True, **kwargs) + return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **model_kwargs) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/registry.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..9f58060fd0fdf1a2b3256327d479efd0bba77fc0 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/registry.py @@ -0,0 +1,159 @@ +""" Model Registry +Hacked together by / Copyright 2020 Ross Wightman +""" + +import sys +import re +import fnmatch +from collections import defaultdict +from copy import deepcopy + +__all__ = ['list_models', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', + 'is_pretrained_cfg_key', 'has_pretrained_cfg_key', 'get_pretrained_cfg_value', 'is_model_pretrained'] + +_module_to_models = defaultdict(set) # dict of sets to check membership of model in module +_model_to_module = {} # mapping of model names to module names +_model_entrypoints = {} # mapping of model names to entrypoint fns +_model_has_pretrained = set() # set of model names that have pretrained weight url present +_model_pretrained_cfgs = dict() # central repo for model default_cfgs + + +def register_model(fn): + # lookup containing module + mod = sys.modules[fn.__module__] + module_name_split = fn.__module__.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + # add model to __all__ in module + model_name = fn.__name__ + if hasattr(mod, '__all__'): + mod.__all__.append(model_name) + else: + mod.__all__ = [model_name] + + # add entries to registry dict/sets + _model_entrypoints[model_name] = fn + _model_to_module[model_name] = module_name + _module_to_models[module_name].add(model_name) + has_valid_pretrained = False # check if model has a pretrained url to allow filtering on this + if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: + # this will catch all models that have entrypoint matching cfg key, but miss any aliasing + # entrypoints or non-matching combos + cfg = mod.default_cfgs[model_name] + has_valid_pretrained = ( + ('url' in cfg and 'http' in cfg['url']) or + ('file' in cfg and cfg['file']) or + ('hf_hub_id' in cfg and cfg['hf_hub_id']) + ) + _model_pretrained_cfgs[model_name] = mod.default_cfgs[model_name] + if has_valid_pretrained: + _model_has_pretrained.add(model_name) + return fn + + +def _natural_key(string_): + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def list_models(filter='', module='', pretrained=False, exclude_filters='', name_matches_cfg=False): + """ Return list of available model names, sorted alphabetically + + Args: + filter (str) - Wildcard filter string that works with fnmatch + module (str) - Limit model selection to a specific sub-module (ie 'gen_efficientnet') + pretrained (bool) - Include only models with pretrained weights if True + exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter + name_matches_cfg (bool) - Include only models w/ model_name matching default_cfg name (excludes some aliases) + + Example: + model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' + model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module + """ + if module: + all_models = list(_module_to_models[module]) + else: + all_models = _model_entrypoints.keys() + if filter: + models = [] + include_filters = filter if isinstance(filter, (tuple, list)) else [filter] + for f in include_filters: + include_models = fnmatch.filter(all_models, f) # include these models + if len(include_models): + models = set(models).union(include_models) + else: + models = all_models + if exclude_filters: + if not isinstance(exclude_filters, (tuple, list)): + exclude_filters = [exclude_filters] + for xf in exclude_filters: + exclude_models = fnmatch.filter(models, xf) # exclude these models + if len(exclude_models): + models = set(models).difference(exclude_models) + if pretrained: + models = _model_has_pretrained.intersection(models) + if name_matches_cfg: + models = set(_model_pretrained_cfgs).intersection(models) + return list(sorted(models, key=_natural_key)) + + +def is_model(model_name): + """ Check if a model name exists + """ + return model_name in _model_entrypoints + + +def model_entrypoint(model_name): + """Fetch a model entrypoint for specified model name + """ + return _model_entrypoints[model_name] + + +def list_modules(): + """ Return list of module names that contain models / model entrypoints + """ + modules = _module_to_models.keys() + return list(sorted(modules)) + + +def is_model_in_modules(model_name, module_names): + """Check if a model exists within a subset of modules + Args: + model_name (str) - name of model to check + module_names (tuple, list, set) - names of modules to search in + """ + assert isinstance(module_names, (tuple, list, set)) + return any(model_name in _module_to_models[n] for n in module_names) + + +def is_model_pretrained(model_name): + return model_name in _model_has_pretrained + + +def get_pretrained_cfg(model_name): + if model_name in _model_pretrained_cfgs: + return deepcopy(_model_pretrained_cfgs[model_name]) + return {} + + +def has_pretrained_cfg_key(model_name, cfg_key): + """ Query model default_cfgs for existence of a specific key. + """ + if model_name in _model_pretrained_cfgs and cfg_key in _model_pretrained_cfgs[model_name]: + return True + return False + + +def is_pretrained_cfg_key(model_name, cfg_key): + """ Return truthy value for specified model default_cfg key, False if does not exist. + """ + if model_name in _model_pretrained_cfgs and _model_pretrained_cfgs[model_name].get(cfg_key, False): + return True + return False + + +def get_pretrained_cfg_value(model_name, cfg_key): + """ Get a specific model default_cfg value by key. None if it doesn't exist. + """ + if model_name in _model_pretrained_cfgs: + return _model_pretrained_cfgs[model_name].get(cfg_key, None) + return None \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/regnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/regnet.py new file mode 100644 index 0000000000000000000000000000000000000000..3ead5d9e9fe6a060e1559c2affed4698e3a4b57f --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/regnet.py @@ -0,0 +1,711 @@ +"""RegNet + +Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) +and cleaned up with more descriptive variable names. + +Weights from original impl have been modified +* first layer from BGR -> RGB as most PyTorch models are +* removed training specific dict entries from checkpoints and keep model state_dict only +* remap names to match the ones here + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from dataclasses import dataclass +from functools import partial +from typing import Optional, Union, Callable + +import numpy as np +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply, checkpoint_seq +from .layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct +from .layers import get_act_layer, get_norm_act_layer, create_conv2d +from .registry import register_model + + +@dataclass +class RegNetCfg: + depth: int = 21 + w0: int = 80 + wa: float = 42.63 + wm: float = 2.66 + group_size: int = 24 + bottle_ratio: float = 1. + se_ratio: float = 0. + stem_width: int = 32 + downsample: Optional[str] = 'conv1x1' + linear_out: bool = False + preact: bool = False + num_features: int = 0 + act_layer: Union[str, Callable] = 'relu' + norm_layer: Union[str, Callable] = 'batchnorm' + + +# Model FLOPS = three trailing digits * 10^8 +model_cfgs = dict( + # RegNet-X + regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13), + regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22), + regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16), + regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16), + regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18), + regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25), + regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23), + regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17), + regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23), + regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19), + regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22), + regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23), + + # RegNet-Y + regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25), + regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25), + regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25), + regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25), + regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25), + regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25), + regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25), + regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25), + regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25), + regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25), + regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25), + regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25), + + # Experimental + regnety_040s_gn=RegNetCfg( + w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25, + act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)), + + # regnetv = 'preact regnet y' + regnetv_040=RegNetCfg( + depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'), + regnetv_064=RegNetCfg( + depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu', + downsample='avg'), + + # RegNet-Z (unverified) + regnetz_005=RegNetCfg( + depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25, + downsample=None, linear_out=True, num_features=1024, act_layer='silu', + ), + regnetz_040=RegNetCfg( + depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, + downsample=None, linear_out=True, num_features=0, act_layer='silu', + ), + regnetz_040h=RegNetCfg( + depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, + downsample=None, linear_out=True, num_features=1536, act_layer='silu', + ), +) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + regnetx_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth'), + regnetx_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth'), + regnetx_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth'), + regnetx_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth'), + regnetx_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth'), + regnetx_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth'), + regnetx_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth'), + regnetx_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth'), + regnetx_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth'), + regnetx_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth'), + regnetx_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth'), + regnetx_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth'), + + regnety_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth'), + regnety_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth'), + regnety_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth'), + regnety_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth'), + regnety_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth'), + regnety_032=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_040=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_064=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_080=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth'), + regnety_160=_cfg( + url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth', # from Facebook DeiT GitHub repository + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth'), + + regnety_040s_gn=_cfg(url=''), + regnetv_040=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth', + first_conv='stem', crop_pct=1.0, test_input_size=(3, 288, 288)), + regnetv_064=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth', + first_conv='stem', crop_pct=1.0, test_input_size=(3, 288, 288)), + + regnetz_005=_cfg(url=''), + regnetz_040=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), + regnetz_040h=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), +) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_widths_groups_comp(widths, bottle_ratios, groups): + """Adjusts the compatibility of widths and groups.""" + bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] + bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] + widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] + return widths, groups + + +def generate_regnet(width_slope, width_initial, width_mult, depth, group_size, q=8): + """Generates per block widths from RegNet parameters.""" + assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % q == 0 + # TODO dWr scaling? + # depth = int(depth * (scale ** 0.1)) + # width_scale = scale ** 0.4 # dWr scale, exp 0.8 / 2, applied to both group and layer widths + widths_cont = np.arange(depth) * width_slope + width_initial + width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) + widths = width_initial * np.power(width_mult, width_exps) + widths = np.round(np.divide(widths, q)) * q + num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 + groups = np.array([group_size for _ in range(num_stages)]) + return widths.astype(int).tolist(), num_stages, groups.astype(int).tolist() + + +def downsample_conv(in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + dilation = dilation if kernel_size > 1 else 1 + if preact: + return create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation) + else: + return ConvNormAct( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, apply_act=False) + + +def downsample_avg(in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + pool = nn.Identity() + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + if preact: + conv = create_conv2d(in_chs, out_chs, 1, stride=1) + else: + conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False) + return nn.Sequential(*[pool, conv]) + + +def create_shortcut( + downsample_type, in_chs, out_chs, kernel_size, stride, dilation=(1, 1), norm_layer=None, preact=False): + assert downsample_type in ('avg', 'conv1x1', '', None) + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact) + if not downsample_type: + return None # no shortcut, no downsample + elif downsample_type == 'avg': + return downsample_avg(in_chs, out_chs, **dargs) + else: + return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs) + else: + return nn.Identity() # identity shortcut (no downsample) + + +class Bottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__( + self, in_chs, out_chs, stride=1, dilation=(1, 1), bottle_ratio=1, group_size=1, se_ratio=0.25, + downsample='conv1x1', linear_out=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + drop_block=None, drop_path_rate=0.): + super(Bottleneck, self).__init__() + act_layer = get_act_layer(act_layer) + bottleneck_chs = int(round(out_chs * bottle_ratio)) + groups = bottleneck_chs // group_size + + cargs = dict(act_layer=act_layer, norm_layer=norm_layer) + self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) + self.conv2 = ConvNormAct( + bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation[0], + groups=groups, drop_layer=drop_block, **cargs) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) + else: + self.se = nn.Identity() + self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs) + self.act3 = nn.Identity() if linear_out else act_layer() + self.downsample = create_shortcut(downsample, in_chs, out_chs, 1, stride, dilation, norm_layer=norm_layer) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.se(x) + x = self.conv3(x) + if self.downsample is not None: + # NOTE stuck with downsample as the attr name due to weight compatibility + # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity() + x = self.drop_path(x) + self.downsample(shortcut) + x = self.act3(x) + return x + + +class PreBottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__( + self, in_chs, out_chs, stride=1, dilation=(1, 1), bottle_ratio=1, group_size=1, se_ratio=0.25, + downsample='conv1x1', linear_out=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + drop_block=None, drop_path_rate=0.): + super(PreBottleneck, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer) + bottleneck_chs = int(round(out_chs * bottle_ratio)) + groups = bottleneck_chs // group_size + + self.norm1 = norm_act_layer(in_chs) + self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1) + self.norm2 = norm_act_layer(bottleneck_chs) + self.conv2 = create_conv2d( + bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=groups) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) + else: + self.se = nn.Identity() + self.norm3 = norm_act_layer(bottleneck_chs) + self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1) + self.downsample = create_shortcut(downsample, in_chs, out_chs, 1, stride, dilation, preact=True) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + pass + + def forward(self, x): + x = self.norm1(x) + shortcut = x + x = self.conv1(x) + x = self.norm2(x) + x = self.conv2(x) + x = self.se(x) + x = self.norm3(x) + x = self.conv3(x) + if self.downsample is not None: + # NOTE stuck with downsample as the attr name due to weight compatibility + # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity() + x = self.drop_path(x) + self.downsample(shortcut) + return x + + +class RegStage(nn.Module): + """Stage (sequence of blocks w/ the same output shape).""" + + def __init__( + self, depth, in_chs, out_chs, stride, dilation, + drop_path_rates=None, block_fn=Bottleneck, **block_kwargs): + super(RegStage, self).__init__() + self.grad_checkpointing = False + + first_dilation = 1 if dilation in (1, 2) else 2 + for i in range(depth): + block_stride = stride if i == 0 else 1 + block_in_chs = in_chs if i == 0 else out_chs + block_dilation = (first_dilation, dilation) + dpr = drop_path_rates[i] if drop_path_rates is not None else 0. + name = "b{}".format(i + 1) + self.add_module( + name, block_fn( + block_in_chs, out_chs, stride=block_stride, dilation=block_dilation, + drop_path_rate=dpr, **block_kwargs) + ) + first_dilation = dilation + + def forward(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.children(), x) + else: + for block in self.children(): + x = block(x) + return x + + +class RegNet(nn.Module): + """RegNet-X, Y, and Z Models + + Paper: https://arxiv.org/abs/2003.13678 + Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + """ + + def __init__( + self, cfg: RegNetCfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', + drop_rate=0., drop_path_rate=0., zero_init_last=True): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + # Construct the stem + stem_width = cfg.stem_width + na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) + if cfg.preact: + self.stem = create_conv2d(in_chans, stem_width, 3, stride=2) + else: + self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args) + self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] + + # Construct the stages + prev_width = stem_width + curr_stride = 2 + per_stage_args, common_args = self._get_stage_args( + cfg, output_stride=output_stride, drop_path_rate=drop_path_rate) + assert len(per_stage_args) == 4 + block_fn = PreBottleneck if cfg.preact else Bottleneck + for i, stage_args in enumerate(per_stage_args): + stage_name = "s{}".format(i + 1) + self.add_module(stage_name, RegStage(in_chs=prev_width, block_fn=block_fn, **stage_args, **common_args)) + prev_width = stage_args['out_chs'] + curr_stride *= stage_args['stride'] + self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] + + # Construct the head + if cfg.num_features: + self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args) + self.num_features = cfg.num_features + else: + final_act = cfg.linear_out or cfg.preact + self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity() + self.num_features = prev_width + self.head = ClassifierHead( + in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.): + # Generate RegNet ws per block + widths, num_stages, stage_gs = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size) + + # Convert to per stage format + stage_widths, stage_depths = np.unique(widths, return_counts=True) + stage_br = [cfg.bottle_ratio for _ in range(num_stages)] + stage_strides = [] + stage_dilations = [] + net_stride = 2 + dilation = 1 + for _ in range(num_stages): + if net_stride >= output_stride: + dilation *= default_stride + stride = 1 + else: + stride = default_stride + net_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1])) + + # Adjust the compatibility of ws and gws + stage_widths, stage_gs = adjust_widths_groups_comp(stage_widths, stage_br, stage_gs) + arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates'] + per_stage_args = [ + dict(zip(arg_names, params)) for params in + zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr)] + common_args = dict( + downsample=cfg.downsample, se_ratio=cfg.se_ratio, linear_out=cfg.linear_out, + act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) + return per_stage_args, common_args + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^s(\d+)' if coarse else r'^s(\d+)\.b(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in list(self.children())[1:-1]: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.s1(x) + x = self.s2(x) + x = self.s3(x) + x = self.s4(x) + x = self.final_conv(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if 'model' in state_dict: + # For DeiT trained regnety_160 pretraiend model + state_dict = state_dict['model'] + return state_dict + + +def _create_regnet(variant, pretrained, **kwargs): + return build_model_with_cfg( + RegNet, variant, pretrained, + model_cfg=model_cfgs[variant], + pretrained_filter_fn=_filter_fn, + **kwargs) + + +@register_model +def regnetx_002(pretrained=False, **kwargs): + """RegNetX-200MF""" + return _create_regnet('regnetx_002', pretrained, **kwargs) + + +@register_model +def regnetx_004(pretrained=False, **kwargs): + """RegNetX-400MF""" + return _create_regnet('regnetx_004', pretrained, **kwargs) + + +@register_model +def regnetx_006(pretrained=False, **kwargs): + """RegNetX-600MF""" + return _create_regnet('regnetx_006', pretrained, **kwargs) + + +@register_model +def regnetx_008(pretrained=False, **kwargs): + """RegNetX-800MF""" + return _create_regnet('regnetx_008', pretrained, **kwargs) + + +@register_model +def regnetx_016(pretrained=False, **kwargs): + """RegNetX-1.6GF""" + return _create_regnet('regnetx_016', pretrained, **kwargs) + + +@register_model +def regnetx_032(pretrained=False, **kwargs): + """RegNetX-3.2GF""" + return _create_regnet('regnetx_032', pretrained, **kwargs) + + +@register_model +def regnetx_040(pretrained=False, **kwargs): + """RegNetX-4.0GF""" + return _create_regnet('regnetx_040', pretrained, **kwargs) + + +@register_model +def regnetx_064(pretrained=False, **kwargs): + """RegNetX-6.4GF""" + return _create_regnet('regnetx_064', pretrained, **kwargs) + + +@register_model +def regnetx_080(pretrained=False, **kwargs): + """RegNetX-8.0GF""" + return _create_regnet('regnetx_080', pretrained, **kwargs) + + +@register_model +def regnetx_120(pretrained=False, **kwargs): + """RegNetX-12GF""" + return _create_regnet('regnetx_120', pretrained, **kwargs) + + +@register_model +def regnetx_160(pretrained=False, **kwargs): + """RegNetX-16GF""" + return _create_regnet('regnetx_160', pretrained, **kwargs) + + +@register_model +def regnetx_320(pretrained=False, **kwargs): + """RegNetX-32GF""" + return _create_regnet('regnetx_320', pretrained, **kwargs) + + +@register_model +def regnety_002(pretrained=False, **kwargs): + """RegNetY-200MF""" + return _create_regnet('regnety_002', pretrained, **kwargs) + + +@register_model +def regnety_004(pretrained=False, **kwargs): + """RegNetY-400MF""" + return _create_regnet('regnety_004', pretrained, **kwargs) + + +@register_model +def regnety_006(pretrained=False, **kwargs): + """RegNetY-600MF""" + return _create_regnet('regnety_006', pretrained, **kwargs) + + +@register_model +def regnety_008(pretrained=False, **kwargs): + """RegNetY-800MF""" + return _create_regnet('regnety_008', pretrained, **kwargs) + + +@register_model +def regnety_016(pretrained=False, **kwargs): + """RegNetY-1.6GF""" + return _create_regnet('regnety_016', pretrained, **kwargs) + + +@register_model +def regnety_032(pretrained=False, **kwargs): + """RegNetY-3.2GF""" + return _create_regnet('regnety_032', pretrained, **kwargs) + + +@register_model +def regnety_040(pretrained=False, **kwargs): + """RegNetY-4.0GF""" + return _create_regnet('regnety_040', pretrained, **kwargs) + + +@register_model +def regnety_064(pretrained=False, **kwargs): + """RegNetY-6.4GF""" + return _create_regnet('regnety_064', pretrained, **kwargs) + + +@register_model +def regnety_080(pretrained=False, **kwargs): + """RegNetY-8.0GF""" + return _create_regnet('regnety_080', pretrained, **kwargs) + + +@register_model +def regnety_120(pretrained=False, **kwargs): + """RegNetY-12GF""" + return _create_regnet('regnety_120', pretrained, **kwargs) + + +@register_model +def regnety_160(pretrained=False, **kwargs): + """RegNetY-16GF""" + return _create_regnet('regnety_160', pretrained, **kwargs) + + +@register_model +def regnety_320(pretrained=False, **kwargs): + """RegNetY-32GF""" + return _create_regnet('regnety_320', pretrained, **kwargs) + + +@register_model +def regnety_040s_gn(pretrained=False, **kwargs): + """RegNetY-4.0GF w/ GroupNorm """ + return _create_regnet('regnety_040s_gn', pretrained, **kwargs) + + +@register_model +def regnetv_040(pretrained=False, **kwargs): + """""" + return _create_regnet('regnetv_040', pretrained, **kwargs) + + +@register_model +def regnetv_064(pretrained=False, **kwargs): + """""" + return _create_regnet('regnetv_064', pretrained, **kwargs) + + +@register_model +def regnetz_005(pretrained=False, **kwargs): + """RegNetZ-500MF + NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py + but it's not clear it is equivalent to paper model as not detailed in the paper. + """ + return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs) + + +@register_model +def regnetz_040(pretrained=False, **kwargs): + """RegNetZ-4.0GF + NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py + but it's not clear it is equivalent to paper model as not detailed in the paper. + """ + return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs) + + +@register_model +def regnetz_040h(pretrained=False, **kwargs): + """RegNetZ-4.0GF + NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py + but it's not clear it is equivalent to paper model as not detailed in the paper. + """ + return _create_regnet('regnetz_040h', pretrained, zero_init_last=False, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/res2net.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/res2net.py new file mode 100644 index 0000000000000000000000000000000000000000..01899c6438bb88e907fb879abf27895b7d9ca970 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/res2net.py @@ -0,0 +1,213 @@ +""" Res2Net and Res2NeXt +Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ +Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .resnet import ResNet + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'res2net50_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth'), + 'res2net50_48w_2s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth'), + 'res2net50_14w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth'), + 'res2net50_26w_6s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth'), + 'res2net50_26w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth'), + 'res2net101_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth'), + 'res2next50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth'), +} + + +class Bottle2neck(nn.Module): + """ Res2Net/Res2NeXT Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py + """ + expansion = 4 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): + super(Bottle2neck, self).__init__() + self.scale = scale + self.is_first = stride > 1 or downsample is not None + self.num_scales = max(1, scale - 1) + width = int(math.floor(planes * (base_width / 64.0))) * cardinality + self.width = width + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) + self.bn1 = norm_layer(width * scale) + + convs = [] + bns = [] + for i in range(self.num_scales): + convs.append(nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False)) + bns.append(norm_layer(width)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + # FIXME this should probably have count_include_pad=False, but hurts original weights + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + else: + self.pool = None + + self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + self.se = attn_layer(outplanes) if attn_layer is not None else None + + self.relu = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + if self.se is not None: + out = self.se(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.relu(out) + + return out + + +def _create_res2net(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +@register_model +def res2net50_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net50_26w_4s', pretrained, **model_args) + + +@register_model +def res2net101_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-101 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net101_26w_4s', pretrained, **model_args) + + +@register_model +def res2net50_26w_6s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w6s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6), **kwargs) + return _create_res2net('res2net50_26w_6s', pretrained, **model_args) + + +@register_model +def res2net50_26w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_26w_8s', pretrained, **model_args) + + +@register_model +def res2net50_48w_2s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 48w2s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2), **kwargs) + return _create_res2net('res2net50_48w_2s', pretrained, **model_args) + + +@register_model +def res2net50_14w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 14w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_14w_8s', pretrained, **model_args) + + +@register_model +def res2next50(pretrained=False, **kwargs): + """Construct Res2NeXt-50 4s + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2next50', pretrained, **model_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnest.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnest.py new file mode 100644 index 0000000000000000000000000000000000000000..84f329d9551c600c321fea4e3858520466f334df --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnest.py @@ -0,0 +1,231 @@ +""" ResNeSt Models + +Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang + +Modified for torchscript compat, and consistency with timm by Ross Wightman +""" +import torch +from torch import nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SplitAttn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1.0', 'classifier': 'fc', + **kwargs + } + +default_cfgs = { + 'resnest14d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), + 'resnest26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), + 'resnest50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), + 'resnest101e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnest200e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), + 'resnest269e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), + 'resnest50d_4s2x40d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', + interpolation='bicubic'), + 'resnest50d_1s4x24d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', + interpolation='bicubic') +} + + +class ResNestBottleneck(nn.Module): + """ResNet Bottleneck + """ + # pylint: disable=unused-argument + expansion = 4 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, + radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResNestBottleneck, self).__init__() + assert reduce_first == 1 # not supported + assert attn_layer is None # not supported + assert aa_layer is None # TODO not yet supported + assert drop_path is None # TODO not yet supported + + group_width = int(planes * (base_width / 64.)) * cardinality + first_dilation = first_dilation or dilation + if avd and (stride > 1 or is_first): + avd_stride = stride + stride = 1 + else: + avd_stride = 0 + self.radix = radix + + self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) + self.bn1 = norm_layer(group_width) + self.act1 = act_layer(inplace=True) + self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None + + if self.radix >= 1: + self.conv2 = SplitAttn( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block) + self.bn2 = nn.Identity() + self.drop_block = nn.Identity() + self.act2 = nn.Identity() + else: + self.conv2 = nn.Conv2d( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(group_width) + self.drop_block = drop_block() if drop_block is not None else nn.Identity() + self.act2 = act_layer(inplace=True) + self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None + + self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_layer(planes*4) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.act1(out) + + if self.avd_first is not None: + out = self.avd_first(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.drop_block(out) + out = self.act2(out) + + if self.avd_last is not None: + out = self.avd_last(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.act3(out) + return out + + +def _create_resnest(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +@register_model +def resnest14d(pretrained=False, **kwargs): + """ ResNeSt-14d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[1, 1, 1, 1], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest26d(pretrained=False, **kwargs): + """ ResNeSt-26d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[2, 2, 2, 2], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d(pretrained=False, **kwargs): + """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest101e(pretrained=False, **kwargs): + """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 23, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest200e(pretrained=False, **kwargs): + """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 24, 36, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest269e(pretrained=False, **kwargs): + """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 30, 48, 8], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_4s2x40d(pretrained=False, **kwargs): + """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, + block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_1s4x24d(pretrained=False, **kwargs): + """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, + block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..1c3b2a9ce02bd223a17be44765bc094390b32811 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnet.py @@ -0,0 +1,1608 @@ +"""PyTorch ResNet + +This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool. + +ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman + +Copyright 2019, Ross Wightman +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, create_classifier +from .registry import register_model + +__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # ResNet and Wide ResNet + 'resnet10t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet10t_176_c3-f3215ab1.pth', + input_size=(3, 176, 176), pool_size=(6, 6), + test_crop_pct=0.95, test_input_size=(3, 224, 224), + first_conv='conv1.0'), + 'resnet14t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet14t_176_c3-c4ed2c37.pth', + input_size=(3, 176, 176), pool_size=(6, 6), + test_crop_pct=0.95, test_input_size=(3, 224, 224), + first_conv='conv1.0'), + 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'), + 'resnet18d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), + 'resnet34d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth', + interpolation='bicubic'), + 'resnet26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'resnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet50t': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet101d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet200': _cfg(url='', interpolation='bicubic'), + 'resnet200d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'), + 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'), + 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'), + 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'), + 'wide_resnet50_2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth', + interpolation='bicubic'), + 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), + + # ResNets w/ alternative norm layers + 'resnet50_gn': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', + crop_pct=0.94, interpolation='bicubic'), + + # ResNeXt + 'resnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnext50d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'resnext101_32x4d': _cfg(url=''), + 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'), + 'resnext101_64x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth', + interpolation='bicubic', crop_pct=1.0, test_input_size=(3, 288, 288)), + 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'), + + # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags + # from https://github.com/facebookresearch/WSL-Images + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'), + 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'), + 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'), + 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'), + + # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ssl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'), + 'ssl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'), + 'ssl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'), + 'ssl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'), + 'ssl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'), + 'ssl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'), + + # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'swsl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'), + 'swsl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'), + 'swsl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'), + 'swsl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'), + 'swsl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'), + 'swsl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'), + + # Efficient Channel Attention ResNets + 'ecaresnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnetlight': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnetlight-75a9c627.pth', + interpolation='bicubic'), + 'ecaresnet50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d-93c81e3b.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50d_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d_p-e4fa23c2.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnet101d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d-153dad65.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnet101d_pruned': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d_p-9e74cb91.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'ecaresnet269d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), + crop_pct=1.0, test_input_size=(3, 352, 352)), + + # Efficient Channel Attention ResNeXts + 'ecaresnext26t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnext50t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + + # Squeeze-Excitation ResNets, to eventually replace the models in senet.py + 'seresnet18': _cfg( + url='', + interpolation='bicubic'), + 'seresnet34': _cfg( + url='', + interpolation='bicubic'), + 'seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth', + interpolation='bicubic'), + 'seresnet50t': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnet101': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320) + ), + 'seresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'seresnet269d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + + # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py + 'seresnext26d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext26t_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth', + interpolation='bicubic'), + 'seresnext101_32x4d': _cfg( + url='', + interpolation='bicubic'), + 'seresnext101_32x8d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth', + interpolation='bicubic', test_input_size=(3, 288, 288), crop_pct=1.0), + 'seresnext101d_32x8d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101d_32x8d_ah-191d7b94.pth', + interpolation='bicubic', first_conv='conv1.0', test_input_size=(3, 288, 288), crop_pct=1.0), + + 'senet154': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + + # ResNets with anti-aliasing / blur pool + 'resnetblur18': _cfg( + interpolation='bicubic'), + 'resnetblur50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth', + interpolation='bicubic'), + 'resnetblur50d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnetblur101d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnetaa50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetaa50_a1h-4cf422b3.pth', + test_input_size=(3, 288, 288), test_crop_pct=1.0, interpolation='bicubic'), + 'resnetaa50d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnetaa101d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'seresnetaa50d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'seresnextaa101d_32x8d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnextaa101d_32x8d_ah-83c8ae12.pth', + interpolation='bicubic', first_conv='conv1.0', test_input_size=(3, 288, 288), crop_pct=1.0), + + # ResNet-RS models + 'resnetrs50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', + input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs200': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs270': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs350': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs420': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), + interpolation='bicubic', first_conv='conv1.0'), +} + + +def get_padding(kernel_size, stride, dilation=1): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +def create_aa(aa_layer, channels, stride=2, enable=True): + if not aa_layer or not enable: + return nn.Identity() + return aa_layer(stride) if issubclass(aa_layer, nn.AvgPool2d) else aa_layer(channels=channels, stride=stride) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(BasicBlock, self).__init__() + + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock does not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d( + inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, + dilation=first_dilation, bias=False) + self.bn1 = norm_layer(first_planes) + self.drop_block = drop_block() if drop_block is not None else nn.Identity() + self.act1 = act_layer(inplace=True) + self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa) + + self.conv2 = nn.Conv2d( + first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) + self.bn2 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act2 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_path = drop_path + + def zero_init_last(self): + nn.init.zeros_(self.bn2.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + x = self.drop_block(x) + x = self.act1(x) + x = self.aa(x) + + x = self.conv2(x) + x = self.bn2(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act2(x) + + return x + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d( + first_planes, width, kernel_size=3, stride=1 if use_aa else stride, + padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(width) + self.drop_block = drop_block() if drop_block is not None else nn.Identity() + self.act2 = act_layer(inplace=True) + self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa) + + self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_path = drop_path + + def zero_init_last(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.drop_block(x) + x = self.act2(x) + x = self.aa(x) + + x = self.conv3(x) + x = self.bn3(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + + return x + + +def downsample_conv( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 + p = get_padding(kernel_size, stride, first_dilation) + + return nn.Sequential(*[ + nn.Conv2d( + in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), + norm_layer(out_channels) + ]) + + +def downsample_avg( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + if stride == 1 and dilation == 1: + pool = nn.Identity() + else: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + + return nn.Sequential(*[ + pool, + nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), + norm_layer(out_channels) + ]) + + +def drop_blocks(drop_prob=0.): + return [ + None, None, + partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None, + partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.00) if drop_prob else None] + + +def make_blocks( + block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, + down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs): + stages = [] + feature_info = [] + net_num_blocks = sum(block_repeats) + net_block_idx = 0 + net_stride = 4 + dilation = prev_dilation = 1 + for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): + stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it + stride = 1 if stage_idx == 0 else 2 + if net_stride >= output_stride: + dilation *= stride + stride = 1 + else: + net_stride *= stride + + downsample = None + if stride != 1 or inplanes != planes * block_fn.expansion: + down_kwargs = dict( + in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, + stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) + downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) + + block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) + blocks = [] + for block_idx in range(num_blocks): + downsample = downsample if block_idx == 0 else None + stride = stride if block_idx == 0 else 1 + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + blocks.append(block_fn( + inplanes, planes, stride, downsample, first_dilation=prev_dilation, + drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs)) + prev_dilation = dilation + inplanes = planes * block_fn.expansion + net_block_idx += 1 + + stages.append((stage_name, nn.Sequential(*blocks))) + feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) + + return stages, feature_info + + +class ResNet(nn.Module): + """ResNet / ResNeXt / SE-ResNeXt / SE-Net + + This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering + + This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s + variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the + 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. + + ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample + + ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled + + SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled + + SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block + + Parameters + ---------- + block : Block, class for the residual block. Options are BasicBlockGl, BottleneckGl. + layers : list of int, number of layers in each block + num_classes : int, default 1000, number of classification classes. + in_chans : int, default 3, number of input (color) channels. + output_stride : int, default 32, output stride of the network, 32, 16, or 8. + global_pool : str, Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + cardinality : int, default 1, number of convolution groups for 3x3 conv in Bottleneck. + base_width : int, default 64, factor determining bottleneck channels. `planes * base_width / 64 * cardinality` + stem_width : int, default 64, number of channels in stem convolutions + stem_type : str, default '' + The type of stem: + * '', default - a single 7x7 conv with a width of stem_width + * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 + * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 + block_reduce_first : int, default 1 + Reduction factor for first convolution output width of residual blocks, 1 for all archs except senets, where 2 + down_kernel_size : int, default 1, kernel size of residual block downsample path, 1x1 for most, 3x3 for senets + avg_down : bool, default False, use average pooling for projection skip connection between stages/downsample. + act_layer : nn.Module, activation layer + norm_layer : nn.Module, normalization layer + aa_layer : nn.Module, anti-aliasing layer + drop_rate : float, default 0. Dropout probability before classifier, for training + """ + + def __init__( + self, block, layers, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg', + cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, block_reduce_first=1, + down_kernel_size=1, avg_down=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, + drop_rate=0.0, drop_path_rate=0., drop_block_rate=0., zero_init_last=True, block_args=None): + super(ResNet, self).__init__() + block_args = block_args or dict() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + # Stem + deep_stem = 'deep' in stem_type + inplanes = stem_width * 2 if deep_stem else 64 + if deep_stem: + stem_chs = (stem_width, stem_width) + if 'tiered' in stem_type: + stem_chs = (3 * (stem_width // 4), stem_width) + self.conv1 = nn.Sequential(*[ + nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), + norm_layer(stem_chs[0]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), + norm_layer(stem_chs[1]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) + else: + self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = norm_layer(inplanes) + self.act1 = act_layer(inplace=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] + + # Stem pooling. The name 'maxpool' remains for weight compatibility. + if replace_stem_pool: + self.maxpool = nn.Sequential(*filter(None, [ + nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), + create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None, + norm_layer(inplanes), + act_layer(inplace=True) + ])) + else: + if aa_layer is not None: + if issubclass(aa_layer, nn.AvgPool2d): + self.maxpool = aa_layer(2) + else: + self.maxpool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=inplanes, stride=2)]) + else: + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Feature Blocks + channels = [64, 128, 256, 512] + stage_modules, stage_feature_info = make_blocks( + block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, + output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, + down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, + drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) + for stage in stage_modules: + self.add_module(*stage) # layer1, layer2, etc + self.feature_info.extend(stage_feature_info) + + # Head (Pooling and Classifier) + self.num_features = 512 * block.expansion + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(zero_init_last=zero_init_last) + + @torch.jit.ignore + def init_weights(self, zero_init_last=True): + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + if zero_init_last: + for m in self.modules(): + if hasattr(m, 'zero_init_last'): + m.zero_init_last() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^conv1|bn1|maxpool', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self, name_only=False): + return 'fc' if name_only else self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.maxpool(x) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True) + else: + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + return x if pre_logits else self.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +@register_model +def resnet10t(pretrained=False, **kwargs): + """Constructs a ResNet-10-T model. + """ + model_args = dict( + block=BasicBlock, layers=[1, 1, 1, 1], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet10t', pretrained, **model_args) + + +@register_model +def resnet14t(pretrained=False, **kwargs): + """Constructs a ResNet-14-T model. + """ + model_args = dict( + block=Bottleneck, layers=[1, 1, 1, 1], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet14t', pretrained, **model_args) + + +@register_model +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet18', pretrained, **model_args) + + +@register_model +def resnet18d(pretrained=False, **kwargs): + """Constructs a ResNet-18-D model. + """ + model_args = dict( + block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet18d', pretrained, **model_args) + + +@register_model +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet34', pretrained, **model_args) + + +@register_model +def resnet34d(pretrained=False, **kwargs): + """Constructs a ResNet-34-D model. + """ + model_args = dict( + block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet34d', pretrained, **model_args) + + +@register_model +def resnet26(pretrained=False, **kwargs): + """Constructs a ResNet-26 model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet26', pretrained, **model_args) + + +@register_model +def resnet26t(pretrained=False, **kwargs): + """Constructs a ResNet-26-T model. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet26t', pretrained, **model_args) + + +@register_model +def resnet26d(pretrained=False, **kwargs): + """Constructs a ResNet-26-D model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet26d', pretrained, **model_args) + + +@register_model +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50', pretrained, **model_args) + + +@register_model +def resnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet50d', pretrained, **model_args) + + +@register_model +def resnet50t(pretrained=False, **kwargs): + """Constructs a ResNet-50-T model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet50t', pretrained, **model_args) + + +@register_model +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('resnet101', pretrained, **model_args) + + +@register_model +def resnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet101d', pretrained, **model_args) + + +@register_model +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('resnet152', pretrained, **model_args) + + +@register_model +def resnet152d(pretrained=False, **kwargs): + """Constructs a ResNet-152-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet152d', pretrained, **model_args) + + +@register_model +def resnet200(pretrained=False, **kwargs): + """Constructs a ResNet-200 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs) + return _create_resnet('resnet200', pretrained, **model_args) + + +@register_model +def resnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet200d', pretrained, **model_args) + + +@register_model +def tv_resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model with original Torchvision weights. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet34', pretrained, **model_args) + + +@register_model +def tv_resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet50', pretrained, **model_args) + + +@register_model +def tv_resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('tv_resnet101', pretrained, **model_args) + + +@register_model +def tv_resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('tv_resnet152', pretrained, **model_args) + + +@register_model +def wide_resnet50_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-50-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet50_2', pretrained, **model_args) + + +@register_model +def wide_resnet101_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-101-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet101_2', pretrained, **model_args) + + +@register_model +def resnet50_gn(pretrained=False, **kwargs): + """Constructs a ResNet-50 model w/ GroupNorm + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50_gn', pretrained, norm_layer=GroupNorm, **model_args) + + +@register_model +def resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext50_32x4d', pretrained, **model_args) + + +@register_model +def resnext50d_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnext50d_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext101_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x8d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('resnext101_32x8d', pretrained, **model_args) + + +@register_model +def resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt101-64x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('resnext101_64x4d', pretrained, **model_args) + + +@register_model +def tv_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x16d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x32d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs) + return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x48d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs) + return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) + + +@register_model +def ssl_resnet18(pretrained=False, **kwargs): + """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('ssl_resnet18', pretrained, **model_args) + + +@register_model +def ssl_resnet50(pretrained=False, **kwargs): + """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('ssl_resnet50', pretrained, **model_args) + + +@register_model +def ssl_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x16d(pretrained=False, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def swsl_resnet18(pretrained=False, **kwargs): + """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('swsl_resnet18', pretrained, **model_args) + + +@register_model +def swsl_resnet50(pretrained=False, **kwargs): + """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('swsl_resnet50', pretrained, **model_args) + + +@register_model +def swsl_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x16d(pretrained=False, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ecaresnet26t(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet26t', pretrained, **model_args) + + +@register_model +def ecaresnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d', pretrained, **model_args) + + +@register_model +def ecaresnet50d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet50t(pretrained=False, **kwargs): + """Constructs an ECA-ResNet-50-T model. + Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50t', pretrained, **model_args) + + +@register_model +def ecaresnetlight(pretrained=False, **kwargs): + """Constructs a ResNet-50-D light model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnetlight', pretrained, **model_args) + + +@register_model +def ecaresnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d', pretrained, **model_args) + + +@register_model +def ecaresnet101d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet200d', pretrained, **model_args) + + +@register_model +def ecaresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet269d', pretrained, **model_args) + + +@register_model +def ecaresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def ecaresnext50t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-50-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) + + +@register_model +def seresnet18(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet18', pretrained, **model_args) + + +@register_model +def seresnet34(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet34', pretrained, **model_args) + + +@register_model +def seresnet50(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50', pretrained, **model_args) + + +@register_model +def seresnet50t(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50t', pretrained, **model_args) + + +@register_model +def seresnet101(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet101', pretrained, **model_args) + + +@register_model +def seresnet152(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152', pretrained, **model_args) + + +@register_model +def seresnet152d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152d', pretrained, **model_args) + + +@register_model +def seresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet200d', pretrained, **model_args) + + +@register_model +def seresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet269d', pretrained, **model_args) + + +@register_model +def seresnext26d_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-D model.` + This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for + combination of deep stem and avg_pool in downsample. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26d_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNet-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26tn_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-T model. + NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note + so keeping this def for backwards compat with any uses out there. Old 't' model is lost. + """ + return seresnext26t_32x4d(pretrained=pretrained, **kwargs) + + +@register_model +def seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x8d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x8d', pretrained, **model_args) + + +@register_model +def seresnext101d_32x8d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101d_32x8d', pretrained, **model_args) + + +@register_model +def senet154(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('senet154', pretrained, **model_args) + + +@register_model +def resnetblur18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model with blur anti-aliasing + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur18', pretrained, **model_args) + + +@register_model +def resnetblur50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with blur anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur50', pretrained, **model_args) + + +@register_model +def resnetblur50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with blur anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetblur50d', pretrained, **model_args) + + +@register_model +def resnetblur101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with blur anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetblur101d', pretrained, **model_args) + + +@register_model +def resnetaa50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with avgpool anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, **kwargs) + return _create_resnet('resnetaa50', pretrained, **model_args) + + +@register_model +def resnetaa50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetaa50d', pretrained, **model_args) + + +@register_model +def resnetaa101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnetaa101d', pretrained, **model_args) + + +@register_model +def seresnetaa50d(pretrained=False, **kwargs): + """Constructs a SE=ResNet-50-D model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, + stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnetaa50d', pretrained, **model_args) + + +@register_model +def seresnextaa101d_32x8d(pretrained=False, **kwargs): + """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + stem_width=32, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnextaa101d_32x8d', pretrained, **model_args) + + +@register_model +def resnetrs50(pretrained=False, **kwargs): + """Constructs a ResNet-RS-50 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs50', pretrained, **model_args) + + +@register_model +def resnetrs101(pretrained=False, **kwargs): + """Constructs a ResNet-RS-101 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs101', pretrained, **model_args) + + +@register_model +def resnetrs152(pretrained=False, **kwargs): + """Constructs a ResNet-RS-152 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs152', pretrained, **model_args) + + +@register_model +def resnetrs200(pretrained=False, **kwargs): + """Constructs a ResNet-RS-200 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs200', pretrained, **model_args) + + +@register_model +def resnetrs270(pretrained=False, **kwargs): + """Constructs a ResNet-RS-270 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs270', pretrained, **model_args) + + + +@register_model +def resnetrs350(pretrained=False, **kwargs): + """Constructs a ResNet-RS-350 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs350', pretrained, **model_args) + + +@register_model +def resnetrs420(pretrained=False, **kwargs): + """Constructs a ResNet-RS-420 model + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs420', pretrained, **model_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnetv2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..d85677a479f75779da8edb2d112a29fd744b6e7b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/resnetv2.py @@ -0,0 +1,708 @@ +"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. + +A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfoer (BiT) source code +at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have +been included here as pretrained models from their original .NPZ checkpoints. + +Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and +extra padding support to allow porting of official Hybrid ResNet pretrained weights from +https://github.com/google-research/vision_transformer + +Thanks to the Google team for the above two repositories and associated papers: +* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 +* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 +* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + +Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. +""" +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict # pylint: disable=g-importing-member + +import torch +import torch.nn as nn +from functools import partial + +from custom_timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv, checkpoint_seq +from .registry import register_model +from .layers import GroupNormAct, BatchNormAct2d, EvoNorm2dB0, EvoNorm2dS0, EvoNorm2dS1, FilterResponseNormTlu2d,\ + ClassifierHead, DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # pretrained on imagenet21k, finetuned on imagenet1k + 'resnetv2_50x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_50x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x2_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x4_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz', + input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0), # only one at 480x480? + + # trained on imagenet-21k + 'resnetv2_50x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1.npz', + num_classes=21843), + 'resnetv2_50x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3.npz', + num_classes=21843), + 'resnetv2_101x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1.npz', + num_classes=21843), + 'resnetv2_101x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3.npz', + num_classes=21843), + 'resnetv2_152x2_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2.npz', + num_classes=21843), + 'resnetv2_152x4_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4.npz', + num_classes=21843), + + 'resnetv2_50x1_bit_distilled': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R50x1_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher_384': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_384.npz', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), + + 'resnetv2_50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_50_a1h-000cdf49.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_50d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50t': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_101_a1h-5d01f016.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_101d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_152': _cfg( + interpolation='bicubic'), + 'resnetv2_152d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + + 'resnetv2_50d_gn': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetv2_50d_gn_ah-c415c11a.pth', + interpolation='bicubic', first_conv='stem.conv1', test_input_size=(3, 288, 288), crop_pct=0.95), + 'resnetv2_50d_evob': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50d_evos': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetv2_50d_evos_ah-7c4dd548.pth', + interpolation='bicubic', first_conv='stem.conv1', test_input_size=(3, 288, 288), crop_pct=0.95), + 'resnetv2_50d_frn': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), +} + + +def make_div(v, divisor=8): + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm3 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.conv3(self.norm3(x)) + x = self.drop_path(x) + return x + shortcut + + +class Bottleneck(nn.Module): + """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. + """ + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + act_layer = act_layer or nn.ReLU + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, preact=False, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm1 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.norm3 = norm_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act3 = act_layer(inplace=True) + + def zero_init_last(self): + nn.init.zeros_(self.norm3.weight) + + def forward(self, x): + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + # residual + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.drop_path(x) + x = self.act3(x + shortcut) + return x + + +class DownsampleConv(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, + conv_layer=None, norm_layer=None): + super(DownsampleConv, self).__init__() + self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(x)) + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, + preact=True, conv_layer=None, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ResNetStage(nn.Module): + """ResNet Stage.""" + def __init__( + self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, + avg_down=False, block_dpr=None, block_fn=PreActBottleneck, + act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs): + super(ResNetStage, self).__init__() + first_dilation = 1 if dilation in (1, 2) else 2 + layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) + proj_layer = DownsampleAvg if avg_down else DownsampleConv + prev_chs = in_chs + self.blocks = nn.Sequential() + for block_idx in range(depth): + drop_path_rate = block_dpr[block_idx] if block_dpr else 0. + stride = stride if block_idx == 0 else 1 + self.blocks.add_module(str(block_idx), block_fn( + prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, + first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, + **layer_kwargs, **block_kwargs)) + prev_chs = out_chs + first_dilation = dilation + proj_layer = None + + def forward(self, x): + x = self.blocks(x) + return x + + +def is_stem_deep(stem_type): + return any([s in stem_type for s in ('deep', 'tiered')]) + + +def create_resnetv2_stem( + in_chs, out_chs=64, stem_type='', preact=True, + conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32)): + stem = OrderedDict() + assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') + + # NOTE conv padding mode can be changed by overriding the conv_layer def + if is_stem_deep(stem_type): + # A 3 deep 3x3 conv stack as in ResNet V1D models + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets + stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) + stem['norm1'] = norm_layer(stem_chs[0]) + stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) + stem['norm2'] = norm_layer(stem_chs[1]) + stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) + if not preact: + stem['norm3'] = norm_layer(out_chs) + else: + # The usual 7x7 stem conv + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + if not preact: + stem['norm'] = norm_layer(out_chs) + + if 'fixed' in stem_type: + # 'fixed' SAME padding approximation that is used in BiT models + stem['pad'] = nn.ConstantPad2d(1, 0.) + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + elif 'same' in stem_type: + # full, input size based 'SAME' padding, used in ViT Hybrid model + stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') + else: + # the usual PyTorch symmetric padding + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + return nn.Sequential(stem) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode. + """ + + def __init__( + self, layers, channels=(256, 512, 1024, 2048), + num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, + act_layer=nn.ReLU, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), + drop_rate=0., drop_path_rate=0., zero_init_last=False): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + wf = width_factor + + self.feature_info = [] + stem_chs = make_div(stem_chs * wf) + self.stem = create_resnetv2_stem( + in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer) + stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) + + prev_chs = stem_chs + curr_stride = 4 + dilation = 1 + block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + block_fn = PreActBottleneck if preact else Bottleneck + self.stages = nn.Sequential() + for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): + out_chs = make_div(c * wf) + stride = 1 if stage_idx == 0 else 2 + if curr_stride >= output_stride: + dilation *= stride + stride = 1 + stage = ResNetStage( + prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, + act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn) + prev_chs = out_chs + curr_stride *= stride + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] + self.stages.add_module(str(stage_idx), stage) + + self.num_features = prev_chs + self.norm = norm_layer(self.num_features) if preact else nn.Identity() + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + self.init_weights(zero_init_last=zero_init_last) + self.grad_checkpointing = False + + @torch.jit.ignore + def init_weights(self, zero_init_last=True): + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix='resnet/'): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+)\.blocks\.(\d+)', None), + (r'^norm', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stages, x, flatten=True) + else: + x = self.stages(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): + if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +@torch.no_grad() +def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): + import numpy as np + + def t2p(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + weights = np.load(checkpoint_path) + stem_conv_w = adapt_input_conv( + model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + model.stem.conv.weight.copy_(stem_conv_w) + model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) + model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) + if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ + model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: + model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) + model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) + for i, (sname, stage) in enumerate(model.stages.named_children()): + for j, (bname, block) in enumerate(stage.blocks.named_children()): + cname = 'standardized_conv2d' + block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' + block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) + block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) + block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) + block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) + block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) + block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) + block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) + block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) + block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) + if block.downsample is not None: + w = weights[f'{block_prefix}a/proj/{cname}/kernel'] + block.downsample.conv.weight.copy_(t2p(w)) + + +def _create_resnetv2(variant, pretrained=False, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ResNetV2, variant, pretrained, + feature_cfg=feature_cfg, + pretrained_custom_load='_bit' in variant, + **kwargs) + + +def _create_resnetv2_bit(variant, pretrained=False, **kwargs): + return _create_resnetv2( + variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs) + + +@register_model +def resnetv2_50x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x1_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bit_distilled(pretrained=False, **kwargs): + """ ResNetV2-50x1-BiT Distilled + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_50x1_bit_distilled', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher(pretrained=False, **kwargs): + """ ResNetV2-152x2-BiT Teacher + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher_384(pretrained=False, **kwargs): + """ ResNetV2-152xx-BiT Teacher @ 384x384 + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher_384', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_50(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_50d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50t(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50t', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='tiered', avg_down=True, **kwargs) + + +@register_model +def resnetv2_101(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_101d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101d', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_152(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_152d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152d', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +# Experimental configs (may change / be removed) + +@register_model +def resnetv2_50d_gn(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_gn', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50d_evob(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_evob', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dB0, + stem_type='deep', avg_down=True, zero_init_last=True, **kwargs) + + +@register_model +def resnetv2_50d_evos(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_evos', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50d_frn(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_frn', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d, + stem_type='deep', avg_down=True, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/rexnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/rexnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c7077ea6e996c624ef85052b1a6114ea681142b9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/rexnet.py @@ -0,0 +1,261 @@ +""" ReXNet + +A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - +https://arxiv.org/abs/2007.00992 + +Adapted from original impl at https://github.com/clovaai/rexnet +Copyright (c) 2020-present NAVER Corp. MIT license + +Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman +Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn +from functools import partial +from math import ceil + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule +from .registry import register_model +from .efficientnet_builder import efficientnet_init_weights + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + rexnet_100=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'), + rexnet_130=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'), + rexnet_150=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'), + rexnet_200=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'), + rexnetr_100=_cfg( + url=''), + rexnetr_130=_cfg( + url=''), + rexnetr_150=_cfg( + url=''), + rexnetr_200=_cfg( + url=''), +) + +SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) + + +class LinearBottleneck(nn.Module): + def __init__( + self, in_chs, out_chs, stride, exp_ratio=1.0, se_ratio=0., ch_div=1, + act_layer='swish', dw_act_layer='relu6', drop_path=None): + super(LinearBottleneck, self).__init__() + self.use_shortcut = stride == 1 and in_chs <= out_chs + self.in_channels = in_chs + self.out_channels = out_chs + + if exp_ratio != 1.: + dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) + self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer) + else: + dw_chs = in_chs + self.conv_exp = None + + self.conv_dw = ConvNormAct(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs, apply_act=False) + if se_ratio > 0: + self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) + else: + self.se = None + self.act_dw = create_act_layer(dw_act_layer) + + self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False) + self.drop_path = drop_path + + def feat_channels(self, exp=False): + return self.conv_dw.out_channels if exp else self.out_channels + + def forward(self, x): + shortcut = x + if self.conv_exp is not None: + x = self.conv_exp(x) + x = self.conv_dw(x) + if self.se is not None: + x = self.se(x) + x = self.act_dw(x) + x = self.conv_pwl(x) + if self.use_shortcut: + if self.drop_path is not None: + x = self.drop_path(x) + x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) + return x + + +def _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0., ch_div=1): + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) + exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) + depth = sum(layers[:]) * 3 + base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs + + # The following channel configuration is a simple instance to make each layer become an expand layer. + out_chs_list = [] + for i in range(depth // 3): + out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) + base_chs += final_chs / (depth // 3 * 1.0) + + se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) + + return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) + + +def _build_blocks( + block_cfg, prev_chs, width_mult, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0.): + feat_chs = [prev_chs] + feature_info = [] + curr_stride = 2 + features = [] + num_blocks = len(block_cfg) + for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): + if stride > 1: + fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] + curr_stride *= stride + block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule + drop_path = DropPath(block_dpr) if block_dpr > 0. else None + features.append(LinearBottleneck( + in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, se_ratio=se_ratio, + ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path)) + prev_chs = chs + feat_chs += [features[-1].feat_channels()] + pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] + features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer)) + return features, feature_info + + +class ReXNetV1(nn.Module): + def __init__( + self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, + initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1/12., + ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0. + ): + super(ReXNetV1, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + assert output_stride == 32 # FIXME support dilation + stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 + stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) + self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) + + block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) + features, self.feature_info = _build_blocks( + block_cfg, stem_chs, width_mult, ch_div, act_layer, dw_act_layer, drop_path_rate) + self.num_features = features[-1].out_channels + self.features = nn.Sequential(*features) + + self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) + + efficientnet_init_weights(self) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^stem', + blocks=r'^features\.(\d+)', + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.features, x, flatten=True) + else: + x = self.features(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_rexnet(variant, pretrained, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ReXNetV1, variant, pretrained, + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def rexnet_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x""" + return _create_rexnet('rexnet_100', pretrained, **kwargs) + + +@register_model +def rexnet_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x""" + return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) + + +@register_model +def rexnet_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x""" + return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) + + +@register_model +def rexnet_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x""" + return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) + + +@register_model +def rexnetr_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) + + +@register_model +def rexnetr_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) + + +@register_model +def rexnetr_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) + + +@register_model +def rexnetr_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/selecsls.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/selecsls.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb9e1f6dc9647e1c5071300ff030f760fba3984 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/selecsls.py @@ -0,0 +1,377 @@ +"""PyTorch SelecSLS Net example for ImageNet Classification +License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) +Author: Dushyant Mehta (@mehtadushy) + +SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D +Human Pose Estimation with a Single RGB Camera, Mehta et al." +https://arxiv.org/abs/1907.00837 + +Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models +and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'selecsls42': _cfg( + url='', + interpolation='bicubic'), + 'selecsls42b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth', + interpolation='bicubic'), + 'selecsls60': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth', + interpolation='bicubic'), + 'selecsls60b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth', + interpolation='bicubic'), + 'selecsls84': _cfg( + url='', + interpolation='bicubic'), +} + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +class SelectSeq(nn.Module): + def __init__(self, mode='index', index=0): + super(SelectSeq, self).__init__() + self.mode = mode + self.index = index + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor]) -> (torch.Tensor) + pass + + def forward(self, x) -> torch.Tensor: + if self.mode == 'index': + return x[self.index] + else: + return torch.cat(x, dim=1) + + +def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): + if padding is None: + padding = ((stride - 1) + dilation * (k - 1)) // 2 + return nn.Sequential( + nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_chs), + nn.ReLU(inplace=True) + ) + + +class SelecSLSBlock(nn.Module): + def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): + super(SelecSLSBlock, self).__init__() + self.stride = stride + self.is_first = is_first + assert stride in [1, 2] + + # Process input with 4 conv blocks with the same number of input and output channels + self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) + self.conv2 = conv_bn(mid_chs, mid_chs, 1) + self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) + self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(x, list): + x = [x] + assert len(x) in [1, 2] + + d1 = self.conv1(x[0]) + d2 = self.conv3(self.conv2(d1)) + d3 = self.conv5(self.conv4(d2)) + if self.is_first: + out = self.conv6(torch.cat([d1, d2, d3], 1)) + return [out, out] + else: + return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] + + +class SelecSLS(nn.Module): + """SelecSLS42 / SelecSLS60 / SelecSLS84 + + Parameters + ---------- + cfg : network config dictionary specifying block type, feature, and head args + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(SelecSLS, self).__init__() + + self.stem = conv_bn(in_chans, 32, stride=2) + self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) + self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way + self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) + self.num_features = cfg['num_features'] + self.feature_info = cfg['feature_info'] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^features\.(\d+)', + blocks_head=r'^head' + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + x = self.head(self.from_seq(x)) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_selecsls(variant, pretrained, **kwargs): + cfg = {} + feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] + if variant.startswith('selecsls42'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 144, 144, True, 2), + (144, 144, 144, 288, False, 1), + (288, 0, 304, 304, True, 2), + (304, 304, 304, 480, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.3'), + dict(num_chs=480, reduction=16, module='features.5'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls42b': + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant.startswith('selecsls60'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 128, 128, True, 2), + (128, 128, 128, 128, False, 1), + (128, 128, 128, 288, False, 1), + (288, 0, 288, 288, True, 2), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 416, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.4'), + dict(num_chs=416, reduction=16, module='features.8'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls60b': + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant == 'selecsls84': + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 144, False, 1), + (144, 0, 144, 144, True, 2), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 304, False, 1), + (304, 0, 304, 304, True, 2), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 512, False, 1), + ] + feature_info.extend([ + dict(num_chs=144, reduction=4, module='features.1'), + dict(num_chs=304, reduction=8, module='features.6'), + dict(num_chs=512, reduction=16, module='features.12'), + ]) + # Head can be replaced with alternative configurations depending on the problem + cfg['head'] = [ + (512, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 3, 1), + ] + cfg['num_features'] = 1280 + feature_info.extend([ + dict(num_chs=1024, reduction=32, module='head.1'), + dict(num_chs=1280, reduction=64, module='head.3') + ]) + else: + raise ValueError('Invalid net configuration ' + variant + ' !!!') + cfg['feature_info'] = feature_info + + # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? + return build_model_with_cfg( + SelecSLS, variant, pretrained, + model_cfg=cfg, + feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def selecsls42(pretrained=False, **kwargs): + """Constructs a SelecSLS42 model. + """ + return _create_selecsls('selecsls42', pretrained, **kwargs) + + +@register_model +def selecsls42b(pretrained=False, **kwargs): + """Constructs a SelecSLS42_B model. + """ + return _create_selecsls('selecsls42b', pretrained, **kwargs) + + +@register_model +def selecsls60(pretrained=False, **kwargs): + """Constructs a SelecSLS60 model. + """ + return _create_selecsls('selecsls60', pretrained, **kwargs) + + +@register_model +def selecsls60b(pretrained=False, **kwargs): + """Constructs a SelecSLS60_B model. + """ + return _create_selecsls('selecsls60b', pretrained, **kwargs) + + +@register_model +def selecsls84(pretrained=False, **kwargs): + """Constructs a SelecSLS84 model. + """ + return _create_selecsls('selecsls84', pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/senet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/senet.py new file mode 100644 index 0000000000000000000000000000000000000000..5611479f82bef79df4913c6bf0e56b35e0630651 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/senet.py @@ -0,0 +1,465 @@ +""" +SEResNet implementation from Cadene's pretrained models +https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py +Additional credit to https://github.com/creafz + +Original model: https://github.com/hujie-frank/SENet + +ResNet code gently borrowed from +https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate +support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. +""" +import math +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SENet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', + **kwargs + } + + +default_cfgs = { + 'legacy_senet154': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), + 'legacy_seresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', + interpolation='bicubic'), + 'legacy_seresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), + 'legacy_seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), + 'legacy_seresnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), + 'legacy_seresnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), + 'legacy_seresnext26_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', + interpolation='bicubic'), + 'legacy_seresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), + 'legacy_seresnext101_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'), +} + + +def _weight_init(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = x.mean((2, 3), keepdim=True) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, planes * 4, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBlock, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes, reduction=reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SENet(nn.Module): + + def __init__( + self, block, layers, groups, reduction, drop_rate=0.2, + in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, + downsample_padding=0, num_classes=1000, global_pool='avg'): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.num_classes = num_classes + self.drop_rate = drop_rate + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d( + in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. + self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] + self.num_features = 512 * block.expansion + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + _weight_init(m) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, + stride=stride, padding=downsample_padding, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.layer0(x) + x = self.pool0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return x if pre_logits else self.last_linear(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_senet(variant, pretrained=False, **kwargs): + return build_model_with_cfg(SENet, variant, pretrained, **kwargs) + + +@register_model +def legacy_seresnet18(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet18', pretrained, **model_args) + + +@register_model +def legacy_seresnet34(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet34', pretrained, **model_args) + + +@register_model +def legacy_seresnet50(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet50', pretrained, **model_args) + + +@register_model +def legacy_seresnet101(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet101', pretrained, **model_args) + + +@register_model +def legacy_seresnet152(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet152', pretrained, **model_args) + + +@register_model +def legacy_senet154(pretrained=False, **kwargs): + model_args = dict( + block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, + downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) + return _create_senet('legacy_senet154', pretrained, **model_args) + + +@register_model +def legacy_seresnext26_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/sequencer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/sequencer.py new file mode 100644 index 0000000000000000000000000000000000000000..48240d1d8625f4c0cb3c497a5c49058d722c2549 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/sequencer.py @@ -0,0 +1,417 @@ +""" Sequencer + +Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf + +""" +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + + +import math +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from .helpers import build_model_with_cfg, named_apply +from .layers import lecun_normal_, DropPath, Mlp, PatchEmbed as TimmPatchEmbed +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + sequencer2d_s=_cfg(url="https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_s.pth"), + sequencer2d_m=_cfg(url="https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_m.pth"), + sequencer2d_l=_cfg(url="https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_l.pth"), +) + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): + stdv = 1.0 / math.sqrt(module.hidden_size) + for weight in module.parameters(): + nn.init.uniform_(weight, -stdv, stdv) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_stage( + index, layers, patch_sizes, embed_dims, hidden_sizes, mlp_ratios, block_layer, rnn_layer, mlp_layer, + norm_layer, act_layer, num_layers, bidirectional, union, + with_fc, drop=0., drop_path_rate=0., **kwargs): + assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) + blocks = [] + for block_idx in range(layers[index]): + drop_path = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(block_layer( + embed_dims[index], hidden_sizes[index], mlp_ratio=mlp_ratios[index], + rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, + num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, + drop=drop, drop_path=drop_path)) + + if index < len(embed_dims) - 1: + blocks.append(Downsample2D(embed_dims[index], embed_dims[index + 1], patch_sizes[index + 1])) + + blocks = nn.Sequential(*blocks) + return blocks + + +class RNNIdentity(nn.Module): + def __init__(self, *args, **kwargs): + super(RNNIdentity, self).__init__() + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: + return x, None + + +class RNN2DBase(nn.Module): + + def __init__( + self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + union="cat", with_fc=True): + super().__init__() + + self.input_size = input_size + self.hidden_size = hidden_size + self.output_size = 2 * hidden_size if bidirectional else hidden_size + self.union = union + + self.with_vertical = True + self.with_horizontal = True + self.with_fc = with_fc + + self.fc = None + if with_fc: + if union == "cat": + self.fc = nn.Linear(2 * self.output_size, input_size) + elif union == "add": + self.fc = nn.Linear(self.output_size, input_size) + elif union == "vertical": + self.fc = nn.Linear(self.output_size, input_size) + self.with_horizontal = False + elif union == "horizontal": + self.fc = nn.Linear(self.output_size, input_size) + self.with_vertical = False + else: + raise ValueError("Unrecognized union: " + union) + elif union == "cat": + pass + if 2 * self.output_size != input_size: + raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") + elif union == "add": + pass + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + elif union == "vertical": + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + self.with_horizontal = False + elif union == "horizontal": + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + self.with_vertical = False + else: + raise ValueError("Unrecognized union: " + union) + + self.rnn_v = RNNIdentity() + self.rnn_h = RNNIdentity() + + def forward(self, x): + B, H, W, C = x.shape + + if self.with_vertical: + v = x.permute(0, 2, 1, 3) + v = v.reshape(-1, H, C) + v, _ = self.rnn_v(v) + v = v.reshape(B, W, H, -1) + v = v.permute(0, 2, 1, 3) + else: + v = None + + if self.with_horizontal: + h = x.reshape(-1, W, C) + h, _ = self.rnn_h(h) + h = h.reshape(B, H, W, -1) + else: + h = None + + if v is not None and h is not None: + if self.union == "cat": + x = torch.cat([v, h], dim=-1) + else: + x = v + h + elif v is not None: + x = v + elif h is not None: + x = h + + if self.fc is not None: + x = self.fc(x) + + return x + + +class LSTM2D(RNN2DBase): + + def __init__( + self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + union="cat", with_fc=True): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) + if self.with_vertical: + self.rnn_v = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) + if self.with_horizontal: + self.rnn_h = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) + + +class Sequencer2DBlock(nn.Module): + def __init__( + self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2D, mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, + num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0.): + super().__init__() + channels_dim = int(mlp_ratio * dim) + self.norm1 = norm_layer(dim) + self.rnn_tokens = rnn_layer(dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, + union=union, with_fc=with_fc) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class PatchEmbed(TimmPatchEmbed): + def forward(self, x): + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + else: + x = x.permute(0, 2, 3, 1) # BCHW -> BHWC + x = self.norm(x) + return x + + +class Shuffle(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + if self.training: + B, H, W, C = x.shape + r = torch.randperm(H * W) + x = x.reshape(B, -1, C) + x = x[:, r, :].reshape(B, H, W, -1) + return x + + +class Downsample2D(nn.Module): + def __init__(self, input_dim, output_dim, patch_size): + super().__init__() + self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.down(x) + x = x.permute(0, 2, 3, 1) + return x + + +class Sequencer2D(nn.Module): + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + global_pool='avg', + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + block_layer=Sequencer2DBlock, + rnn_layer=LSTM2D, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + num_rnn_layers=1, + bidirectional=True, + union="cat", + with_fc=True, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + ): + super().__init__() + assert global_pool in ('', 'avg') + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = embed_dims[-1] # num_features for consistency with other models + self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC) + self.embed_dims = embed_dims + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_sizes[0], in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, + flatten=False) + + self.blocks = nn.Sequential(*[ + get_stage( + i, layers, patch_sizes, embed_dims, hidden_sizes, mlp_ratios, block_layer=block_layer, + rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, + num_layers=num_rnn_layers, bidirectional=bidirectional, + union=union, with_fc=with_fc, drop=drop_rate, drop_path_rate=drop_path_rate, + ) + for i, _ in enumerate(embed_dims)]) + + self.norm = norm_layer(embed_dims[-1]) + self.head = nn.Linear(embed_dims[-1], self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=[ + (r'^blocks\.(\d+)\..*\.down', (99999,)), + (r'^blocks\.(\d+)', None) if coarse else (r'^blocks\.(\d+)\.(\d+)', None), + (r'^norm', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=(1, 2)) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_sequencer2d(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Sequencer2D models.') + + model = build_model_with_cfg(Sequencer2D, variant, pretrained, **kwargs) + return model + + +# main + +@register_model +def sequencer2d_s(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_m(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 14, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_l(pretrained=False, **kwargs): + model_args = dict( + layers=[8, 8, 16, 4], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/sknet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/sknet.py new file mode 100644 index 0000000000000000000000000000000000000000..342a7901325780809a3213d6188e87ea111a9a11 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/sknet.py @@ -0,0 +1,206 @@ +""" Selective Kernel Networks (ResNet base) + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) +and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer +to the original paper with some modifications of my own to better balance param count vs accuracy. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +from torch import nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SelectiveKernel, ConvNormAct, ConvNormActAa, create_attn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'skresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'), + 'skresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'), + 'skresnet50': _cfg(), + 'skresnet50d': _cfg( + first_conv='conv1.0'), + 'skresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'), +} + + +class SelectiveKernelBasic(nn.Module): + expansion = 1 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(SelectiveKernelBasic, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock doest not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = SelectiveKernel( + inplanes, first_planes, stride=stride, dilation=first_dilation, + aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) + self.conv2 = ConvNormAct( + first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +class SelectiveKernelBottleneck(nn.Module): + expansion = 4 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(SelectiveKernelBottleneck, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) + self.conv2 = SelectiveKernel( + first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, + aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) + self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +def _create_skresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) + + +@register_model +def skresnet18(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-18 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last=False, **kwargs) + return _create_skresnet('skresnet18', pretrained, **model_args) + + +@register_model +def skresnet34(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-34 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last=False, **kwargs) + return _create_skresnet('skresnet34', pretrained, **model_args) + + +@register_model +def skresnet50(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last=False, **kwargs) + return _create_skresnet('skresnet50', pretrained, **model_args) + + +@register_model +def skresnet50d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50-D model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) + return _create_skresnet('skresnet50d', pretrained, **model_args) + + +@register_model +def skresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to + the SKNet-50 model in the Select Kernel Paper + """ + sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) + return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2e215dc8d98ba91ced0f381096c2be8c3f8163 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer.py @@ -0,0 +1,700 @@ +""" Swin Transformer +A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` + - https://arxiv.org/pdf/2103.14030 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +S3 (AutoFormerV2, https://arxiv.org/abs/2111.14725) Swin weights from + - https://github.com/microsoft/Cream/tree/main/AutoFormerV2 + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import logging +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply, checkpoint_seq +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, to_ntuple, trunc_normal_, _assert +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn, get_init_weights_vit + + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'swin_base_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_base_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth', + ), + + 'swin_large_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_large_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth', + ), + + 'swin_small_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth', + ), + + 'swin_tiny_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth', + ), + + 'swin_base_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_base_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', + num_classes=21841), + + 'swin_large_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_large_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', + num_classes=21841), + + 'swin_s3_tiny_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_t-1d53f6a8.pth' + ), + 'swin_s3_small_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_s-3bb4c69d.pth' + ), + 'swin_s3_base_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_b-a1e95db4.pth' + ) +} + + +def window_partition(x, window_size: int): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: int, H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +def get_relative_position_index(win_h, win_w): + # get pair-wise relative position index for each token inside the window + coords = torch.stack(torch.meshgrid([torch.arange(win_h), torch.arange(win_w)])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += win_h - 1 # shift to start from 0 + relative_coords[:, :, 1] += win_w - 1 + relative_coords[:, :, 0] *= 2 * win_w - 1 + return relative_coords.sum(-1) # Wh*Ww, Wh*Ww + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + head_dim (int): Number of channels per head (dim // num_heads if not set) + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, num_heads, head_dim=None, window_size=7, qkv_bias=True, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = to_2tuple(window_size) # Wh, Ww + win_h, win_w = self.window_size + self.window_area = win_h * win_w + self.num_heads = num_heads + head_dim = head_dim or dim // num_heads + attn_dim = head_dim * num_heads + self.scale = head_dim ** -0.5 + + # define a parameter table of relative position bias, shape: 2*Wh-1 * 2*Ww-1, nH + self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * win_h - 1) * (2 * win_w - 1), num_heads)) + + # get pair-wise relative position index for each token inside the window + self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w)) + + self.qkv = nn.Linear(dim, attn_dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(attn_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def _get_rel_pos_bias(self) -> torch.Tensor: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + return relative_position_bias.unsqueeze(0) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + attn = attn + self._get_rel_pos_bias() + + if mask is not None: + num_win = mask.shape[0] + attn = attn.view(B_ // num_win, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + window_size (int): Window size. + num_heads (int): Number of attention heads. + head_dim (int): Enforce the number of channels per head + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__( + self, dim, input_resolution, num_heads=4, head_dim=None, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, num_heads=num_heads, head_dim=head_dim, window_size=to_2tuple(self.window_size), + qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + cnt = 0 + for h in ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)): + for w in ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)): + img_mask[:, h, w, :] = cnt + cnt += 1 + mask_windows = window_partition(img_mask, self.window_size) # num_win, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # num_win*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # num_win*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_win*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, out_dim=None, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.out_dim = out_dim or 2 * dim + self.norm = norm_layer(4 * dim) + self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + _assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.") + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + head_dim (int): Channels per head (dim // num_heads if not set) + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + """ + + def __init__( + self, dim, out_dim, input_resolution, depth, num_heads=4, head_dim=None, + window_size=7, mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.grad_checkpointing = False + + # build blocks + self.blocks = nn.Sequential(*[ + SwinTransformerBlock( + dim=dim, input_resolution=input_resolution, num_heads=num_heads, head_dim=head_dim, + window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + head_dim (int, tuple(int)): + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + """ + + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, global_pool='avg', + embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), head_dim=None, + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, weight_init='', **kwargs): + super().__init__() + assert global_pool in ('', 'avg') + self.num_classes = num_classes + self.global_pool = global_pool + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if patch_norm else None) + num_patches = self.patch_embed.num_patches + self.patch_grid = self.patch_embed.grid_size + + # absolute position embedding + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) if ape else None + self.pos_drop = nn.Dropout(p=drop_rate) + + # build layers + if not isinstance(embed_dim, (tuple, list)): + embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + embed_out_dim = embed_dim[1:] + [None] + head_dim = to_ntuple(self.num_layers)(head_dim) + window_size = to_ntuple(self.num_layers)(window_size) + mlp_ratio = to_ntuple(self.num_layers)(mlp_ratio) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + layers = [] + for i in range(self.num_layers): + layers += [BasicLayer( + dim=embed_dim[i], + out_dim=embed_out_dim[i], + input_resolution=(self.patch_grid[0] // (2 ** i), self.patch_grid[1] // (2 ** i)), + depth=depths[i], + num_heads=num_heads[i], + head_dim=head_dim[i], + window_size=window_size[i], + mlp_ratio=mlp_ratio[i], + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i < self.num_layers - 1) else None + )] + self.layers = nn.Sequential(*layers) + + self.norm = norm_layer(self.num_features) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + if weight_init != 'skip': + self.init_weights(weight_init) + + @torch.jit.ignore + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'moco', '') + if self.absolute_pos_embed is not None: + trunc_normal_(self.absolute_pos_embed, std=.02) + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + named_apply(get_init_weights_vit(mode, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + nwd = {'absolute_pos_embed'} + for n, _ in self.named_parameters(): + if 'relative_position_bias_table' in n: + nwd.add(n) + return nwd + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^absolute_pos_embed|patch_embed', # stem and embed + blocks=r'^layers\.(\d+)' if coarse else [ + (r'^layers\.(\d+).downsample', (0,)), + (r'^layers\.(\d+)\.\w+\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for l in self.layers: + l.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.absolute_pos_embed is not None: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x = self.layers(x) + x = self.norm(x) # B L C + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_swin_transformer(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + SwinTransformer, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + +@register_model +def swin_base_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_small_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-S @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_tiny_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-T @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-B @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-B @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-L @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-L @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_s3_tiny_224(pretrained=False, **kwargs): + """ Swin-S3-T @ 224x224, ImageNet-1k. https://arxiv.org/abs/2111.14725 + """ + model_kwargs = dict( + patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_s3_tiny_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_s3_small_224(pretrained=False, **kwargs): + """ Swin-S3-S @ 224x224, trained ImageNet-1k. https://arxiv.org/abs/2111.14725 + """ + model_kwargs = dict( + patch_size=4, window_size=(14, 14, 14, 7), embed_dim=96, depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_s3_small_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_s3_base_224(pretrained=False, **kwargs): + """ Swin-S3-B @ 224x224, trained ImageNet-1k. https://arxiv.org/abs/2111.14725 + """ + model_kwargs = dict( + patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 30, 2), + num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_s3_base_224', pretrained=pretrained, **model_kwargs) + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer_v2.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..ade2b050a956fe6f30811736d196d3f33e4dcc7c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer_v2.py @@ -0,0 +1,753 @@ +""" Swin Transformer V2 +A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` + - https://arxiv.org/abs/2111.09883 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer V2 +# Copyright (c) 2022 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import math +from typing import Tuple, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, to_ntuple, trunc_normal_, _assert +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'swinv2_tiny_window8_256': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth', + input_size=(3, 256, 256) + ), + 'swinv2_tiny_window16_256': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth', + input_size=(3, 256, 256) + ), + 'swinv2_small_window8_256': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth', + input_size=(3, 256, 256) + ), + 'swinv2_small_window16_256': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth', + input_size=(3, 256, 256) + ), + 'swinv2_base_window8_256': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth', + input_size=(3, 256, 256) + ), + 'swinv2_base_window16_256': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth', + input_size=(3, 256, 256) + ), + + 'swinv2_base_window12_192_22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth', + num_classes=21841, input_size=(3, 192, 192) + ), + 'swinv2_base_window12to16_192to256_22kft1k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth', + input_size=(3, 256, 256) + ), + 'swinv2_base_window12to24_192to384_22kft1k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'swinv2_large_window12_192_22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth', + num_classes=21841, input_size=(3, 192, 192) + ), + 'swinv2_large_window12to16_192to256_22kft1k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth', + input_size=(3, 256, 256) + ), + 'swinv2_large_window12to24_192to384_22kft1k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), +} + + +def window_partition(x, window_size: Tuple[int, int]): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): + """ + Args: + windows: (num_windows * B, window_size[0], window_size[1], C) + window_size (Tuple[int, int]): Window size + img_size (Tuple[int, int]): Image size + + Returns: + x: (B, H, W, C) + """ + H, W = img_size + B = int(windows.shape[0] / (H * W / window_size[0] / window_size[1])) + x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + pretrained_window_size (tuple[int]): The height and width of the window in pre-training. + """ + + def __init__( + self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., + pretrained_window_size=[0, 0]): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.pretrained_window_size = pretrained_window_size + self.num_heads = num_heads + + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) + + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential( + nn.Linear(2, 512, bias=True), + nn.ReLU(inplace=True), + nn.Linear(512, num_heads, bias=False) + ) + + # get relative_coords_table + relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) + relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) + relative_coords_table = torch.stack(torch.meshgrid([ + relative_coords_h, + relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / math.log2(8) + + self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index, persistent=False) + + self.qkv = nn.Linear(dim, dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(dim)) + self.register_buffer('k_bias', torch.zeros(dim), persistent=False) + self.v_bias = nn.Parameter(torch.zeros(dim)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + # cosine attention + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + pretrained_window_size (int): Window size in pretraining. + """ + + def __init__( + self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0): + super().__init__() + self.dim = dim + self.input_resolution = to_2tuple(input_resolution) + self.num_heads = num_heads + ws, ss = self._calc_window_shift(window_size, shift_size) + self.window_size: Tuple[int, int] = ws + self.shift_size: Tuple[int, int] = ss + self.window_area = self.window_size[0] * self.window_size[1] + self.mlp_ratio = mlp_ratio + + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + pretrained_window_size=to_2tuple(pretrained_window_size)) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + if any(self.shift_size): + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + cnt = 0 + for h in ( + slice(0, -self.window_size[0]), + slice(-self.window_size[0], -self.shift_size[0]), + slice(-self.shift_size[0], None)): + for w in ( + slice(0, -self.window_size[1]), + slice(-self.window_size[1], -self.shift_size[1]), + slice(-self.shift_size[1], None)): + img_mask[:, h, w, :] = cnt + cnt += 1 + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_area) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def _calc_window_shift(self, target_window_size, target_shift_size) -> Tuple[Tuple[int, int], Tuple[int, int]]: + target_window_size = to_2tuple(target_window_size) + target_shift_size = to_2tuple(target_shift_size) + window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] + shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] + return tuple(window_size), tuple(shift_size) + + def _attn(self, x): + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + x = x.view(B, H, W, C) + + # cyclic shift + has_shift = any(self.shift_size) + if has_shift: + shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) + shifted_x = window_reverse(attn_windows, self.window_size, self.input_resolution) # B H' W' C + + # reverse cyclic shift + if has_shift: + x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + return x + + def forward(self, x): + x = x + self.drop_path1(self.norm1(self._attn(x))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + return x + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(2 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + _assert(H % 2 == 0, f"x size ({H}*{W}) are not even.") + _assert(W % 2 == 0, f"x size ({H}*{W}) are not even.") + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.reduction(x) + x = self.norm(x) + + return x + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + pretrained_window_size (int): Local window size in pre-training. + """ + + def __init__( + self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + norm_layer=nn.LayerNorm, downsample=None, pretrained_window_size=0): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.grad_checkpointing = False + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + pretrained_window_size=pretrained_window_size) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = nn.Identity() + + def forward(self, x): + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + x = self.downsample(x) + return x + + def _init_respostnorm(self): + for blk in self.blocks: + nn.init.constant_(blk.norm1.bias, 0) + nn.init.constant_(blk.norm1.weight, 0) + nn.init.constant_(blk.norm2.bias, 0) + nn.init.constant_(blk.norm2.weight, 0) + + +class SwinTransformerV2(nn.Module): + r""" Swin Transformer V2 + A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` + - https://arxiv.org/abs/2111.09883 + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + pretrained_window_sizes (tuple(int)): Pretrained window sizes of each layer. + """ + + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, global_pool='avg', + embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + pretrained_window_sizes=(0, 0, 0, 0), **kwargs): + super().__init__() + + self.num_classes = num_classes + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + + # absolute position embedding + if ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + else: + self.absolute_pos_embed = None + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + input_resolution=( + self.patch_embed.grid_size[0] // (2 ** i_layer), + self.patch_embed.grid_size[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + pretrained_window_size=pretrained_window_sizes[i_layer] + ) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + for bly in self.layers: + bly._init_respostnorm() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + nod = {'absolute_pos_embed'} + for n, m in self.named_modules(): + if any([kw in n for kw in ("cpb_mlp", "logit_scale", 'relative_position_bias_table')]): + nod.add(n) + return nod + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^absolute_pos_embed|patch_embed', # stem and embed + blocks=r'^layers\.(\d+)' if coarse else [ + (r'^layers\.(\d+).downsample', (0,)), + (r'^layers\.(\d+)\.\w+\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for l in self.layers: + l.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.absolute_pos_embed is not None: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.norm(x) # B L C + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if any([n in k for n in ('relative_position_index', 'relative_coords_table')]): + continue # skip buffers that should not be persistent + out_dict[k] = v + return out_dict + + +def _create_swin_transformer_v2(variant, pretrained=False, **kwargs): + model = build_model_with_cfg( + SwinTransformerV2, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def swinv2_tiny_window16_256(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer_v2('swinv2_tiny_window16_256', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_tiny_window8_256(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer_v2('swinv2_tiny_window8_256', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_small_window16_256(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer_v2('swinv2_small_window16_256', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_small_window8_256(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer_v2('swinv2_small_window8_256', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_base_window16_256(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer_v2('swinv2_base_window16_256', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_base_window8_256(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer_v2('swinv2_base_window8_256', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_base_window12_192_22k(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer_v2('swinv2_base_window12_192_22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_base_window12to16_192to256_22kft1k(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), + pretrained_window_sizes=(12, 12, 12, 6), **kwargs) + return _create_swin_transformer_v2( + 'swinv2_base_window12to16_192to256_22kft1k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_base_window12to24_192to384_22kft1k(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), + pretrained_window_sizes=(12, 12, 12, 6), **kwargs) + return _create_swin_transformer_v2( + 'swinv2_base_window12to24_192to384_22kft1k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_large_window12_192_22k(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer_v2('swinv2_large_window12_192_22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_large_window12to16_192to256_22kft1k(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), + pretrained_window_sizes=(12, 12, 12, 6), **kwargs) + return _create_swin_transformer_v2( + 'swinv2_large_window12to16_192to256_22kft1k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_large_window12to24_192to384_22kft1k(pretrained=False, **kwargs): + """ + """ + model_kwargs = dict( + window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), + pretrained_window_sizes=(12, 12, 12, 6), **kwargs) + return _create_swin_transformer_v2( + 'swinv2_large_window12to24_192to384_22kft1k', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer_v2_cr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer_v2_cr.py new file mode 100644 index 0000000000000000000000000000000000000000..d3ac4ac572d0b55bc1abf278f34fa9e3bd7bcb7a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/swin_transformer_v2_cr.py @@ -0,0 +1,1029 @@ +""" Swin Transformer V2 + +A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` + - https://arxiv.org/pdf/2111.09883 + +Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below + +This implementation is experimental and subject to change in manners that will break weight compat: +* Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads? + * currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head) +* The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at + GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial. +* num_heads per stage is not detailed for Huge and Giant model variants +* 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts +* experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme + +Noteworthy additions over official Swin v1: +* MLP relative position embedding is looking promising and adapts to different image/window sizes +* This impl has been designed to allow easy change of image size with matching window size changes +* Non-square image size and window size are supported + +Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman +""" +# -------------------------------------------------------- +# Swin Transformer V2 reimplementation +# Copyright (c) 2021 Christoph Reich +# Licensed under The MIT License [see LICENSE for details] +# Written by Christoph Reich +# -------------------------------------------------------- +import logging +import math +from copy import deepcopy +from typing import Tuple, Optional, List, Union, Any, Type + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply +from .layers import DropPath, Mlp, to_2tuple, _assert +from .registry import register_model + + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, + 'input_size': (3, 224, 224), + 'pool_size': (7, 7), + 'crop_pct': 0.9, + 'interpolation': 'bicubic', + 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', + 'classifier': 'head', + **kwargs, + } + + +default_cfgs = { + 'swinv2_cr_tiny_384': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_tiny_224': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_tiny_ns_224': _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth", + input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_small_384': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_small_224': _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth", + input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_small_ns_224': _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth", + input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_base_384': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_base_224': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_base_ns_224': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_large_384': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_large_224': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_huge_384': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_huge_224': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), + 'swinv2_cr_giant_384': _cfg( + url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), + 'swinv2_cr_giant_224': _cfg( + url="", input_size=(3, 224, 224), crop_pct=0.9), +} + + +def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor: + """Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """ + return x.permute(0, 2, 3, 1) + + +def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor: + """Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """ + return x.permute(0, 3, 1, 2) + + +def window_partition(x, window_size: Tuple[int, int]): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): + """ + Args: + windows: (num_windows * B, window_size[0], window_size[1], C) + window_size (Tuple[int, int]): Window size + img_size (Tuple[int, int]): Image size + + Returns: + x: (B, H, W, C) + """ + H, W = img_size + B = int(windows.shape[0] / (H * W / window_size[0] / window_size[1])) + x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowMultiHeadAttention(nn.Module): + r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias. + + Args: + dim (int): Number of input features + window_size (int): Window size + num_heads (int): Number of attention heads + drop_attn (float): Dropout rate of attention map + drop_proj (float): Dropout rate after projection + meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network + sequential_attn (bool): If true sequential self-attention is performed + """ + + def __init__( + self, + dim: int, + num_heads: int, + window_size: Tuple[int, int], + drop_attn: float = 0.0, + drop_proj: float = 0.0, + meta_hidden_dim: int = 384, # FIXME what's the optimal value? + sequential_attn: bool = False, + ) -> None: + super(WindowMultiHeadAttention, self).__init__() + assert dim % num_heads == 0, \ + "The number of input features (in_features) are not divisible by the number of heads (num_heads)." + self.in_features: int = dim + self.window_size: Tuple[int, int] = window_size + self.num_heads: int = num_heads + self.sequential_attn: bool = sequential_attn + + self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True) + self.attn_drop = nn.Dropout(drop_attn) + self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True) + self.proj_drop = nn.Dropout(drop_proj) + # meta network for positional encodings + self.meta_mlp = Mlp( + 2, # x, y + hidden_features=meta_hidden_dim, + out_features=num_heads, + act_layer=nn.ReLU, + drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without? + ) + # NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads))) + self._make_pair_wise_relative_positions() + + def _make_pair_wise_relative_positions(self) -> None: + """Method initializes the pair-wise relative positions to compute the positional biases.""" + device = self.logit_scale.device + coordinates = torch.stack(torch.meshgrid([ + torch.arange(self.window_size[0], device=device), + torch.arange(self.window_size[1], device=device)]), dim=0).flatten(1) + relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] + relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() + relative_coordinates_log = torch.sign(relative_coordinates) * torch.log( + 1.0 + relative_coordinates.abs()) + self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False) + + def update_input_size(self, new_window_size: int, **kwargs: Any) -> None: + """Method updates the window size and so the pair-wise relative positions + + Args: + new_window_size (int): New window size + kwargs (Any): Unused + """ + # Set new window size and new pair-wise relative positions + self.window_size: int = new_window_size + self._make_pair_wise_relative_positions() + + def _relative_positional_encodings(self) -> torch.Tensor: + """Method computes the relative positional encodings + + Returns: + relative_position_bias (torch.Tensor): Relative positional encodings + (1, number of heads, window size ** 2, window size ** 2) + """ + window_area = self.window_size[0] * self.window_size[1] + relative_position_bias = self.meta_mlp(self.relative_coordinates_log) + relative_position_bias = relative_position_bias.transpose(1, 0).reshape( + self.num_heads, window_area, window_area + ) + relative_position_bias = relative_position_bias.unsqueeze(0) + return relative_position_bias + + def _forward_sequential( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + """ + # FIXME TODO figure out 'sequential' attention mentioned in paper (should reduce GPU memory) + assert False, "not implemented" + + def _forward_batch( + self, + x: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """This function performs standard (non-sequential) scaled cosine self-attention. + """ + Bw, L, C = x.shape + + qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + query, key, value = qkv.unbind(0) + + # compute attention map with scaled cosine attention + attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp() + attn = attn * logit_scale + attn = attn + self._relative_positional_encodings() + + if mask is not None: + # Apply mask if utilized + num_win: int = mask.shape[0] + attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L) + attn = attn + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, L, L) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: + """ Forward pass. + Args: + x (torch.Tensor): Input tensor of the shape (B * windows, N, C) + mask (Optional[torch.Tensor]): Attention mask for the shift case + + Returns: + Output tensor of the shape [B * windows, N, C] + """ + if self.sequential_attn: + return self._forward_sequential(x, mask) + else: + return self._forward_batch(x, mask) + + +class SwinTransformerBlock(nn.Module): + r"""This class implements the Swin transformer block. + + Args: + dim (int): Number of input channels + num_heads (int): Number of attention heads to be utilized + feat_size (Tuple[int, int]): Input resolution + window_size (Tuple[int, int]): Window size to be utilized + shift_size (int): Shifting size to be used + mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels + drop (float): Dropout in input mapping + drop_attn (float): Dropout rate of attention map + drop_path (float): Dropout in main path + extra_norm (bool): Insert extra norm on 'main' branch if True + sequential_attn (bool): If true sequential self-attention is performed + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized + """ + + def __init__( + self, + dim: int, + num_heads: int, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + shift_size: Tuple[int, int] = (0, 0), + mlp_ratio: float = 4.0, + init_values: Optional[float] = 0, + drop: float = 0.0, + drop_attn: float = 0.0, + drop_path: float = 0.0, + extra_norm: bool = False, + sequential_attn: bool = False, + norm_layer: Type[nn.Module] = nn.LayerNorm, + ) -> None: + super(SwinTransformerBlock, self).__init__() + self.dim: int = dim + self.feat_size: Tuple[int, int] = feat_size + self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size) + self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size)) + self.window_area = self.window_size[0] * self.window_size[1] + self.init_values: Optional[float] = init_values + + # attn branch + self.attn = WindowMultiHeadAttention( + dim=dim, + num_heads=num_heads, + window_size=self.window_size, + drop_attn=drop_attn, + drop_proj=drop, + sequential_attn=sequential_attn, + ) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() + + # mlp branch + self.mlp = Mlp( + in_features=dim, + hidden_features=int(dim * mlp_ratio), + drop=drop, + out_features=dim, + ) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() + + # Extra main branch norm layer mentioned for Huge/Giant models in V2 paper. + # Also being used as final network norm and optional stage ending norm while still in a C-last format. + self.norm3 = norm_layer(dim) if extra_norm else nn.Identity() + + self._make_attention_mask() + self.init_weights() + + def _calc_window_shift(self, target_window_size): + window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)] + shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, self.target_shift_size)] + return tuple(window_size), tuple(shift_size) + + def _make_attention_mask(self) -> None: + """Method generates the attention mask used in shift case.""" + # Make masks for shift case + if any(self.shift_size): + # calculate attention mask for SW-MSA + H, W = self.feat_size + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + cnt = 0 + for h in ( + slice(0, -self.window_size[0]), + slice(-self.window_size[0], -self.shift_size[0]), + slice(-self.shift_size[0], None)): + for w in ( + slice(0, -self.window_size[1]), + slice(-self.window_size[1], -self.shift_size[1]), + slice(-self.shift_size[1], None)): + img_mask[:, h, w, :] = cnt + cnt += 1 + mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_area) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + self.register_buffer("attn_mask", attn_mask, persistent=False) + + def init_weights(self): + # extra, module specific weight init + if self.init_values is not None: + nn.init.constant_(self.norm1.weight, self.init_values) + nn.init.constant_(self.norm2.weight, self.init_values) + + def update_input_size(self, new_window_size: Tuple[int, int], new_feat_size: Tuple[int, int]) -> None: + """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. + + Args: + new_window_size (int): New window size + new_feat_size (Tuple[int, int]): New input resolution + """ + # Update input resolution + self.feat_size: Tuple[int, int] = new_feat_size + self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(new_window_size)) + self.window_area = self.window_size[0] * self.window_size[1] + self.attn.update_input_size(new_window_size=self.window_size) + self._make_attention_mask() + + def _shifted_window_attn(self, x): + H, W = self.feat_size + B, L, C = x.shape + x = x.view(B, H, W, C) + + # cyclic shift + sh, sw = self.shift_size + do_shift: bool = any(self.shift_size) + if do_shift: + # FIXME PyTorch XLA needs cat impl, roll not lowered + # x = torch.cat([x[:, sh:], x[:, :sh]], dim=1) + # x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2) + x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2)) + + # partition windows + x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_windows * B, window_size * window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) + x = window_reverse(attn_windows, self.window_size, self.feat_size) # B H' W' C + + # reverse cyclic shift + if do_shift: + # FIXME PyTorch XLA needs cat impl, roll not lowered + # x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1) + # x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2) + x = torch.roll(x, shifts=(sh, sw), dims=(1, 2)) + + x = x.view(B, L, C) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass. + + Args: + x (torch.Tensor): Input tensor of the shape [B, C, H, W] + + Returns: + output (torch.Tensor): Output tensor of the shape [B, C, H, W] + """ + # post-norm branches (op -> norm -> drop) + x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant) + return x + + +class PatchMerging(nn.Module): + """ This class implements the patch merging as a strided convolution with a normalization before. + Args: + dim (int): Number of input channels + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. + """ + + def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None: + super(PatchMerging, self).__init__() + self.norm = norm_layer(4 * dim) + self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ Forward pass. + Args: + x (torch.Tensor): Input tensor of the shape [B, C, H, W] + Returns: + output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] + """ + B, C, H, W = x.shape + # unfold + BCHW -> BHWC together + # ordering, 5, 3, 1 instead of 3, 5, 1 maintains compat with original swin v1 merge + x = x.reshape(B, C, H // 2, 2, W // 2, 2).permute(0, 2, 4, 5, 3, 1).flatten(3) + x = self.norm(x) + x = bhwc_to_bchw(self.reduction(x)) + return x + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + x = self.proj(x) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + +class SwinTransformerStage(nn.Module): + r"""This class implements a stage of the Swin transformer including multiple layers. + + Args: + embed_dim (int): Number of input channels + depth (int): Depth of the stage (number of layers) + downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper) + feat_size (Tuple[int, int]): input feature map size (H, W) + num_heads (int): Number of attention heads to be utilized + window_size (int): Window size to be utilized + mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels + drop (float): Dropout in input mapping + drop_attn (float): Dropout rate of attention map + drop_path (float): Dropout in main path + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm + extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks + extra_norm_stage (bool): End each stage with an extra norm layer in main branch + sequential_attn (bool): If true sequential self-attention is performed + """ + + def __init__( + self, + embed_dim: int, + depth: int, + downscale: bool, + num_heads: int, + feat_size: Tuple[int, int], + window_size: Tuple[int, int], + mlp_ratio: float = 4.0, + init_values: Optional[float] = 0.0, + drop: float = 0.0, + drop_attn: float = 0.0, + drop_path: Union[List[float], float] = 0.0, + norm_layer: Type[nn.Module] = nn.LayerNorm, + extra_norm_period: int = 0, + extra_norm_stage: bool = False, + sequential_attn: bool = False, + ) -> None: + super(SwinTransformerStage, self).__init__() + self.downscale: bool = downscale + self.grad_checkpointing: bool = False + self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size + + self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer) if downscale else nn.Identity() + + def _extra_norm(index): + i = index + 1 + if extra_norm_period and i % extra_norm_period == 0: + return True + return i == depth if extra_norm_stage else False + + embed_dim = embed_dim * 2 if downscale else embed_dim + self.blocks = nn.Sequential(*[ + SwinTransformerBlock( + dim=embed_dim, + num_heads=num_heads, + feat_size=self.feat_size, + window_size=window_size, + shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]), + mlp_ratio=mlp_ratio, + init_values=init_values, + drop=drop, + drop_attn=drop_attn, + drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path, + extra_norm=_extra_norm(index), + sequential_attn=sequential_attn, + norm_layer=norm_layer, + ) + for index in range(depth)] + ) + + def update_input_size(self, new_window_size: int, new_feat_size: Tuple[int, int]) -> None: + """Method updates the resolution to utilize and the window size and so the pair-wise relative positions. + + Args: + new_window_size (int): New window size + new_feat_size (Tuple[int, int]): New input resolution + """ + self.feat_size: Tuple[int, int] = ( + (new_feat_size[0] // 2, new_feat_size[1] // 2) if self.downscale else new_feat_size + ) + for block in self.blocks: + block.update_input_size(new_window_size=new_window_size, new_feat_size=self.feat_size) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass. + Args: + x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C] + Returns: + output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] + """ + x = self.downsample(x) + B, C, H, W = x.shape + L = H * W + + x = bchw_to_bhwc(x).reshape(B, L, C) + for block in self.blocks: + # Perform checkpointing if utilized + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint.checkpoint(block, x) + else: + x = block(x) + x = bhwc_to_bchw(x.reshape(B, H, W, -1)) + return x + + +class SwinTransformerV2Cr(nn.Module): + r""" Swin Transformer V2 + A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - + https://arxiv.org/pdf/2111.09883 + + Args: + img_size (Tuple[int, int]): Input resolution. + window_size (Optional[int]): Window size. If None, img_size // window_div. Default: None + img_window_ratio (int): Window size to image size ratio. Default: 32 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input channels. + depths (int): Depth of the stage (number of layers). + num_heads (int): Number of attention heads to be utilized. + embed_dim (int): Patch embedding dimension. Default: 96 + num_classes (int): Number of output classes. Default: 1000 + mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels. Default: 4 + drop_rate (float): Dropout rate. Default: 0.0 + attn_drop_rate (float): Dropout rate of attention map. Default: 0.0 + drop_path_rate (float): Stochastic depth rate. Default: 0.0 + norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm + extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks in stage + extra_norm_stage (bool): End each stage with an extra norm layer in main branch + sequential_attn (bool): If true sequential self-attention is performed. Default: False + """ + + def __init__( + self, + img_size: Tuple[int, int] = (224, 224), + patch_size: int = 4, + window_size: Optional[int] = None, + img_window_ratio: int = 32, + in_chans: int = 3, + num_classes: int = 1000, + embed_dim: int = 96, + depths: Tuple[int, ...] = (2, 2, 6, 2), + num_heads: Tuple[int, ...] = (3, 6, 12, 24), + mlp_ratio: float = 4.0, + init_values: Optional[float] = 0., + drop_rate: float = 0.0, + attn_drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + norm_layer: Type[nn.Module] = nn.LayerNorm, + extra_norm_period: int = 0, + extra_norm_stage: bool = False, + sequential_attn: bool = False, + global_pool: str = 'avg', + weight_init='skip', + **kwargs: Any + ) -> None: + super(SwinTransformerV2Cr, self).__init__() + img_size = to_2tuple(img_size) + window_size = tuple([ + s // img_window_ratio for s in img_size]) if window_size is None else to_2tuple(window_size) + + self.num_classes: int = num_classes + self.patch_size: int = patch_size + self.img_size: Tuple[int, int] = img_size + self.window_size: int = window_size + self.num_features: int = int(embed_dim * 2 ** (len(depths) - 1)) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=norm_layer) + patch_grid_size: Tuple[int, int] = self.patch_embed.grid_size + + drop_path_rate = torch.linspace(0.0, drop_path_rate, sum(depths)).tolist() + stages = [] + for index, (depth, num_heads) in enumerate(zip(depths, num_heads)): + stage_scale = 2 ** max(index - 1, 0) + stages.append( + SwinTransformerStage( + embed_dim=embed_dim * stage_scale, + depth=depth, + downscale=index != 0, + feat_size=(patch_grid_size[0] // stage_scale, patch_grid_size[1] // stage_scale), + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + init_values=init_values, + drop=drop_rate, + drop_attn=attn_drop_rate, + drop_path=drop_path_rate[sum(depths[:index]):sum(depths[:index + 1])], + extra_norm_period=extra_norm_period, + extra_norm_stage=extra_norm_stage or (index + 1) == len(depths), # last stage ends w/ norm + sequential_attn=sequential_attn, + norm_layer=norm_layer, + ) + ) + self.stages = nn.Sequential(*stages) + + self.global_pool: str = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes else nn.Identity() + + # current weight init skips custom init and uses pytorch layer defaults, seems to work well + # FIXME more experiments needed + if weight_init != 'skip': + named_apply(init_weights, self) + + def update_input_size( + self, + new_img_size: Optional[Tuple[int, int]] = None, + new_window_size: Optional[int] = None, + img_window_ratio: int = 32, + ) -> None: + """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. + + Args: + new_window_size (Optional[int]): New window size, if None based on new_img_size // window_div + new_img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used + img_window_ratio (int): divisor for calculating window size from image size + """ + # Check parameters + if new_img_size is None: + new_img_size = self.img_size + else: + new_img_size = to_2tuple(new_img_size) + if new_window_size is None: + new_window_size = tuple([s // img_window_ratio for s in new_img_size]) + # Compute new patch resolution & update resolution of each stage + new_patch_grid_size = (new_img_size[0] // self.patch_size, new_img_size[1] // self.patch_size) + for index, stage in enumerate(self.stages): + stage_scale = 2 ** max(index - 1, 0) + stage.update_input_size( + new_window_size=new_window_size, + new_img_size=(new_patch_grid_size[0] // stage_scale, new_patch_grid_size[1] // stage_scale), + ) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^patch_embed', # stem and embed + blocks=r'^stages\.(\d+)' if coarse else [ + (r'^stages\.(\d+).downsample', (0,)), + (r'^stages\.(\d+)\.\w+\.(\d+)', None), + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore() + def get_classifier(self) -> nn.Module: + """Method returns the classification head of the model. + Returns: + head (nn.Module): Current classification head + """ + return self.head + + def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: + """Method results the classification head + + Args: + num_classes (int): Number of classes to be predicted + global_pool (str): Unused + """ + self.num_classes: int = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) + x = self.stages(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=(2, 3)) + return x if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def init_weights(module: nn.Module, name: str = ''): + # FIXME WIP determining if there's a better weight init + if isinstance(module, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) + nn.init.uniform_(module.weight, -val, val) + elif 'head' in name: + nn.init.zeros_(module.weight) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'tau' in k: + # convert old tau based checkpoints -> logit_scale (inverse) + v = torch.log(1 / v) + k = k.replace('tau', 'logit_scale') + out_dict[k] = v + return out_dict + + +def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg( + SwinTransformerV2Cr, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs + ) + return model + + +@register_model +def swinv2_cr_tiny_384(pretrained=False, **kwargs): + """Swin-T V2 CR @ 384x384, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_tiny_224(pretrained=False, **kwargs): + """Swin-T V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs): + """Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms. + ** Experimental, may make default if results are improved. ** + """ + model_kwargs = dict( + embed_dim=96, + depths=(2, 2, 6, 2), + num_heads=(3, 6, 12, 24), + extra_norm_stage=True, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_small_384(pretrained=False, **kwargs): + """Swin-S V2 CR @ 384x384, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **model_kwargs + ) + + +@register_model +def swinv2_cr_small_224(pretrained=False, **kwargs): + """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_small_ns_224(pretrained=False, **kwargs): + """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=96, + depths=(2, 2, 18, 2), + num_heads=(3, 6, 12, 24), + extra_norm_stage=True, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_base_384(pretrained=False, **kwargs): + """Swin-B V2 CR @ 384x384, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=128, + depths=(2, 2, 18, 2), + num_heads=(4, 8, 16, 32), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_base_224(pretrained=False, **kwargs): + """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=128, + depths=(2, 2, 18, 2), + num_heads=(4, 8, 16, 32), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_base_ns_224(pretrained=False, **kwargs): + """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=128, + depths=(2, 2, 18, 2), + num_heads=(4, 8, 16, 32), + extra_norm_stage=True, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_large_384(pretrained=False, **kwargs): + """Swin-L V2 CR @ 384x384, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=192, + depths=(2, 2, 18, 2), + num_heads=(6, 12, 24, 48), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **model_kwargs + ) + + +@register_model +def swinv2_cr_large_224(pretrained=False, **kwargs): + """Swin-L V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=192, + depths=(2, 2, 18, 2), + num_heads=(6, 12, 24, 48), + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_huge_384(pretrained=False, **kwargs): + """Swin-H V2 CR @ 384x384, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=352, + depths=(2, 2, 18, 2), + num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values + extra_norm_period=6, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_huge_224(pretrained=False, **kwargs): + """Swin-H V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=352, + depths=(2, 2, 18, 2), + num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values + extra_norm_period=6, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swinv2_cr_giant_384(pretrained=False, **kwargs): + """Swin-G V2 CR @ 384x384, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=512, + depths=(2, 2, 42, 2), + num_heads=(16, 32, 64, 128), + extra_norm_period=6, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **model_kwargs + ) + + +@register_model +def swinv2_cr_giant_224(pretrained=False, **kwargs): + """Swin-G V2 CR @ 224x224, trained ImageNet-1k""" + model_kwargs = dict( + embed_dim=512, + depths=(2, 2, 42, 2), + num_heads=(16, 32, 64, 128), + extra_norm_period=6, + **kwargs + ) + return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/tnt.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..c73bb4b252c47158177d0fb8345fa38c1104542a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/tnt.py @@ -0,0 +1,304 @@ +""" Transformer in Transformer (TNT) in PyTorch + +A PyTorch implement of TNT as described in +'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 + +The official mindspore code is released and available at +https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT +""" +import math +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from custom_timm.models.helpers import build_model_with_cfg +from custom_timm.models.layers import Mlp, DropPath, trunc_normal_ +from custom_timm.models.layers.helpers import to_2tuple +from custom_timm.models.layers import _assert +from custom_timm.models.registry import register_model +from custom_timm.models.vision_transformer import resize_pos_embed + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'pixel_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'tnt_s_patch16_224': _cfg( + url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), + 'tnt_b_patch16_224': _cfg( + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), +} + + +class Attention(nn.Module): + """ Multi-Head Attention + """ + def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop, inplace=True) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop, inplace=True) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple) + v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """ TNT Block + """ + def __init__( + self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4., + qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4), + out_features=in_dim, act_layer=act_layer, drop=drop) + + self.norm1_proj = norm_layer(in_dim) + self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True) + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, dim, num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), + out_features=dim, act_layer=act_layer, drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) + pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) + # outer + B, N, C = patch_embed.size() + patch_embed = torch.cat( + [patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))], + dim=1) + patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) + patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Module): + """ Image to Pixel Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # grid_size property necessary for resizing positional embedding + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + num_patches = (self.grid_size[0]) * (self.grid_size[1]) + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) + return x + + +class TNT(nn.Module): + """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 + """ + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', + embed_dim=768, in_dim=48, depth=12, num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4): + super().__init__() + assert global_pool in ('', 'token', 'avg') + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.grad_checkpointing = False + + self.pixel_embed = PixelEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1])) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + blocks = [] + for i in range(depth): + blocks.append(Block( + dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[i], norm_layer=norm_layer)) + self.blocks = nn.ModuleList(blocks) + self.norm = norm_layer(embed_dim) + + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'patch_pos', 'pixel_pos', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', # stem and embed / pos + blocks=[ + (r'^blocks\.(\d+)', None), + (r'^norm', (99999,)), + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'token', 'avg') + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + for blk in self.blocks: + pixel_embed, patch_embed = checkpoint(blk, pixel_embed, patch_embed) + else: + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if state_dict['patch_pos'].shape != model.patch_pos.shape: + state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], + model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) + return state_dict + + +def _create_tnt(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + TNT, variant, pretrained, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def tnt_s_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def tnt_b_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/tresnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/tresnet.py new file mode 100644 index 0000000000000000000000000000000000000000..2469acd265aaff10c9d3b04a5b9db090f3939a7b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/tresnet.py @@ -0,0 +1,331 @@ +""" +TResNet: High Performance GPU-Dedicated Architecture +https://arxiv.org/pdf/2003.13630.pdf + +Original model: https://github.com/mrT23/TResNet + +""" +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .helpers import build_model_with_cfg +from .layers import SpaceToDepthModule, BlurPool2d, InplaceAbn, ClassifierHead, SEModule +from .registry import register_model + +__all__ = ['tresnet_m', 'tresnet_l', 'tresnet_xl'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': (0., 0., 0.), 'std': (1., 1., 1.), + 'first_conv': 'body.conv1.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'tresnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_1k_miil_83_1-d236afcb.pth'), + 'tresnet_m_miil_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_miil_in21k-901b6ed4.pth', num_classes=11221), + 'tresnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), + 'tresnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth'), + 'tresnet_m_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth'), + 'tresnet_l_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth'), + 'tresnet_xl_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_448-8c1815de.pth'), + + 'tresnet_v2_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_v2_83_9-f36e4445.pth'), +} + + +def IABN2Float(module: nn.Module) -> nn.Module: + """If `module` is IABN don't use half precision.""" + if isinstance(module, InplaceAbn): + module.float() + for child in module.children(): + IABN2Float(child) + return module + + +def conv2d_iabn(ni, nf, stride, kernel_size=3, groups=1, act_layer="leaky_relu", act_param=1e-2): + return nn.Sequential( + nn.Conv2d( + ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False), + InplaceAbn(nf, act_layer=act_layer, act_param=act_param) + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None): + super(BasicBlock, self).__init__() + if stride == 1: + self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3) + else: + if aa_layer is None: + self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=1e-3) + else: + self.conv1 = nn.Sequential( + conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer="identity") + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + rd_chs = max(planes * self.expansion // 4, 64) + self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.se is not None: + out = self.se(out) + + out = out + shortcut + out = self.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__( + self, inplanes, planes, stride=1, downsample=None, use_se=True, + act_layer="leaky_relu", aa_layer=None): + super(Bottleneck, self).__init__() + self.conv1 = conv2d_iabn( + inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=1e-3) + if stride == 1: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3) + else: + if aa_layer is None: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=1e-3) + else: + self.conv2 = nn.Sequential( + conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + reduction_chs = max(planes * self.expansion // 8, 64) + self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None + + self.conv3 = conv2d_iabn( + planes, planes * self.expansion, kernel_size=1, stride=1, act_layer="identity") + + self.act = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + out = self.conv3(out) + out = out + shortcut # no inplace + out = self.act(out) + + return out + + +class TResNet(nn.Module): + def __init__( + self, + layers, + in_chans=3, + num_classes=1000, + width_factor=1.0, + v2=False, + global_pool='fast', + drop_rate=0., + ): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(TResNet, self).__init__() + + aa_layer = BlurPool2d + + # TResnet stages + self.inplanes = int(64 * width_factor) + self.planes = int(64 * width_factor) + if v2: + self.inplanes = self.inplanes // 8 * 8 + self.planes = self.planes // 8 * 8 + + conv1 = conv2d_iabn(in_chans * 16, self.planes, stride=1, kernel_size=3) + layer1 = self._make_layer( + Bottleneck if v2 else BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) + layer2 = self._make_layer( + Bottleneck if v2 else BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer) + layer3 = self._make_layer( + Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer) + layer4 = self._make_layer( + Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer) + + # body + self.body = nn.Sequential(OrderedDict([ + ('SpaceToDepth', SpaceToDepthModule()), + ('conv1', conv1), + ('layer1', layer1), + ('layer2', layer2), + ('layer3', layer3), + ('layer4', layer4)])) + + self.feature_info = [ + dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? + dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), + dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), + dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), + dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), + ] + + # head + self.num_features = (self.planes * 8) * Bottleneck.expansion + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # model initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # residual connections special initialization + for m in self.modules(): + if isinstance(m, BasicBlock): + m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero + if isinstance(m, Bottleneck): + m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero + if isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + + def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + layers = [] + if stride == 2: + # avg pooling before 1x1 conv + layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) + layers += [conv2d_iabn( + self.inplanes, planes * block.expansion, kernel_size=1, stride=1, act_layer="identity")] + downsample = nn.Sequential(*layers) + + layers = [] + layers.append(block( + self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) + return nn.Sequential(*layers) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)') + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='fast'): + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + return self.body(x) + + def forward_head(self, x, pre_logits: bool = False): + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_tresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + TResNet, variant, pretrained, + feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def tresnet_m(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_miil_in21k(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_miil_in21k', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_v2_l(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True, **kwargs) + return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl_448', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/twins.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/twins.py new file mode 100644 index 0000000000000000000000000000000000000000..dfde68ca6e85558e2b094d138fe7e522395404f8 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/twins.py @@ -0,0 +1,449 @@ +""" Twins +A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` + - https://arxiv.org/pdf/2104.13840.pdf + +Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below + +""" +# -------------------------------------------------------- +# Twins +# Copyright (c) 2021 Meituan +# Licensed under The Apache 2.0 License [see LICENSE for details] +# Written by Xinjie Li, Xiangxiang Chu +# -------------------------------------------------------- +import math +from copy import deepcopy +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import Mlp, DropPath, to_2tuple, trunc_normal_ +from .fx_features import register_notrace_module +from .registry import register_model +from .vision_transformer import Attention +from .helpers import build_model_with_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'twins_pcpvt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth', + ), + 'twins_pcpvt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth', + ), + 'twins_pcpvt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth', + ), + 'twins_svt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth', + ), + 'twins_svt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth', + ), + 'twins_svt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth', + ), +} + +Size_ = Tuple[int, int] + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class LocallyGroupedAttn(nn.Module): + """ LSA: self attention within a group + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): + assert ws != 1 + super(LocallyGroupedAttn, self).__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, size: Size_): + # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for + # both. You can choose any one, we recommend forward_padding because it's neat. However, + # the masking implementation is more reasonable and accurate. + B, N, C = x.shape + H, W = size + x = x.view(B, H, W, C) + pad_l = pad_t = 0 + pad_r = (self.ws - W % self.ws) % self.ws + pad_b = (self.ws - H % self.ws) % self.ws + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + _h, _w = Hp // self.ws, Wp // self.ws + x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) + qkv = self.qkv(x).reshape( + B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + # def forward_mask(self, x, size: Size_): + # B, N, C = x.shape + # H, W = size + # x = x.view(B, H, W, C) + # pad_l = pad_t = 0 + # pad_r = (self.ws - W % self.ws) % self.ws + # pad_b = (self.ws - H % self.ws) % self.ws + # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + # _, Hp, Wp, _ = x.shape + # _h, _w = Hp // self.ws, Wp // self.ws + # mask = torch.zeros((1, Hp, Wp), device=x.device) + # mask[:, -pad_b:, :].fill_(1) + # mask[:, :, -pad_r:].fill_(1) + # + # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C + # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) + # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws + # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) + # qkv = self.qkv(x).reshape( + # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + # # n_h, B, _w*_h, nhead, ws*ws, dim + # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head + # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws + # attn = attn + attn_mask.unsqueeze(2) + # attn = attn.softmax(dim=-1) + # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head + # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + # if pad_r > 0 or pad_b > 0: + # x = x[:, :H, :W, :].contiguous() + # x = x.reshape(B, N, C) + # x = self.proj(x) + # x = self.proj_drop(x) + # return x + + +class GlobalSubSampleAttn(nn.Module): + """ GSA: using a key to summarize the information for a group to be efficient. + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=True) + self.kv = nn.Linear(dim, dim * 2, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x, size: Size_): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, *size) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None): + super().__init__() + self.norm1 = norm_layer(dim) + if ws is None: + self.attn = Attention(dim, num_heads, False, None, attn_drop, drop) + elif ws == 1: + self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio) + else: + self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Size_): + x = x + self.drop_path(self.attn(self.norm1(x), size)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PosConv(nn.Module): + # PEG from https://arxiv.org/abs/2102.10882 + def __init__(self, in_chans, embed_dim=768, stride=1): + super(PosConv, self).__init__() + self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) + self.stride = stride + + def forward(self, x, size: Size_): + B, N, C = x.shape + cnn_feat_token = x.transpose(1, 2).view(B, C, *size) + x = self.proj(cnn_feat_token) + if self.stride == 1: + x += cnn_feat_token + x = x.flatten(2).transpose(1, 2) + return x + + def no_weight_decay(self): + return ['proj.%d.weight' % i for i in range(4)] + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ + f"img_size {img_size} should be divided by patch_size {patch_size}." + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x) -> Tuple[torch.Tensor, Size_]: + B, C, H, W = x.shape + + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + out_size = (H // self.patch_size[0], W // self.patch_size[1]) + + return x, out_size + + +class Twins(nn.Module): + """ Twins Vision Transfomer (Revisiting Spatial Attention) + + Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git + """ + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, global_pool='avg', + embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), depths=(3, 4, 6, 3), + sr_ratios=(8, 4, 2, 1), wss=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), block_cls=Block): + super().__init__() + self.num_classes = num_classes + self.global_pool = global_pool + self.depths = depths + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + self.grad_checkpointing = False + + img_size = to_2tuple(img_size) + prev_chs = in_chans + self.patch_embeds = nn.ModuleList() + self.pos_drops = nn.ModuleList() + for i in range(len(depths)): + self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + prev_chs = embed_dims[i] + img_size = tuple(t // patch_size for t in img_size) + patch_size = 2 + + self.blocks = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + for k in range(len(depths)): + _block = nn.ModuleList([block_cls( + dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], + ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])]) + self.blocks.append(_block) + cur += depths[k] + + self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) + + self.norm = norm_layer(self.num_features) + + # classification head + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # init weights + self.apply(self._init_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + matcher = dict( + stem=r'^patch_embeds.0', # stem and embed + blocks=[ + (r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None), + ('^norm', (99999,)) + ] if coarse else [ + (r'^blocks\.(\d+)\.(\d+)', None), + (r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)), + (r'^norm', (99999,)) + ] + ) + return matcher + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + for i, (embed, drop, blocks, pos_blk) in enumerate( + zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + x = x.mean(dim=1) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_twins(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg(Twins, variant, pretrained, **kwargs) + return model + + +@register_model +def twins_pcpvt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vgg.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..11cf08bd5426f58d4a831849b6780d4b05b1b592 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vgg.py @@ -0,0 +1,279 @@ +"""VGG + +Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for +timm functionality. + +Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Union, List, Dict, Any, cast + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .fx_features import register_notrace_module +from .layers import ClassifierHead +from .registry import register_model + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'vgg11': _cfg(url='https://download.pytorch.org/models/vgg11-bbd30ac9.pth'), + 'vgg13': _cfg(url='https://download.pytorch.org/models/vgg13-c768596a.pth'), + 'vgg16': _cfg(url='https://download.pytorch.org/models/vgg16-397923af.pth'), + 'vgg19': _cfg(url='https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'), + 'vgg11_bn': _cfg(url='https://download.pytorch.org/models/vgg11_bn-6002323d.pth'), + 'vgg13_bn': _cfg(url='https://download.pytorch.org/models/vgg13_bn-abd245e5.pth'), + 'vgg16_bn': _cfg(url='https://download.pytorch.org/models/vgg16_bn-6c64b313.pth'), + 'vgg19_bn': _cfg(url='https://download.pytorch.org/models/vgg19_bn-c79401a0.pth'), +} + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class ConvMlp(nn.Module): + + def __init__( + self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, + drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None): + super(ConvMlp, self).__init__() + self.input_kernel_size = kernel_size + mid_features = int(out_features * mlp_ratio) + self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) + self.act1 = act_layer(True) + self.drop = nn.Dropout(drop_rate) + self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) + self.act2 = act_layer(True) + + def forward(self, x): + if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: + # keep the input size >= 7x7 + output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) + x = F.adaptive_avg_pool2d(x, output_size) + x = self.fc1(x) + x = self.act1(x) + x = self.drop(x) + x = self.fc2(x) + x = self.act2(x) + return x + + +class VGG(nn.Module): + + def __init__( + self, + cfg: List[Any], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + mlp_ratio: float = 1.0, + act_layer: nn.Module = nn.ReLU, + conv_layer: nn.Module = nn.Conv2d, + norm_layer: nn.Module = None, + global_pool: str = 'avg', + drop_rate: float = 0., + ) -> None: + super(VGG, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.num_features = 4096 + self.drop_rate = drop_rate + self.grad_checkpointing = False + self.use_norm = norm_layer is not None + self.feature_info = [] + prev_chs = in_chans + net_stride = 1 + pool_layer = nn.MaxPool2d + layers: List[nn.Module] = [] + for v in cfg: + last_idx = len(layers) - 1 + if v == 'M': + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) + layers += [pool_layer(kernel_size=2, stride=2)] + net_stride *= 2 + else: + v = cast(int, v) + conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) + if norm_layer is not None: + layers += [conv2d, norm_layer(v), act_layer(inplace=True)] + else: + layers += [conv2d, act_layer(inplace=True)] + prev_chs = v + self.features = nn.Sequential(*layers) + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) + + self.pre_logits = ConvMlp( + prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, + drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer) + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + self._initialize_weights() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + # this treats BN layers as separate groups for bn variants, a lot of effort to fix that + return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)') + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, 'gradient checkpointing not supported' + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + return x + + def forward_head(self, x: torch.Tensor, pre_logits: bool = False): + x = self.pre_logits(x) + return x if pre_logits else self.head(x) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.forward_head(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + k_r = k + k_r = k_r.replace('classifier.0', 'pre_logits.fc1') + k_r = k_r.replace('classifier.3', 'pre_logits.fc2') + k_r = k_r.replace('classifier.6', 'head.fc') + if 'classifier.0.weight' in k: + v = v.reshape(-1, 512, 7, 7) + if 'classifier.3.weight' in k: + v = v.reshape(-1, 4096, 1, 1) + out_dict[k_r] = v + return out_dict + + +def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: + cfg = variant.split('_')[0] + # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5] + out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) + model = build_model_with_cfg( + VGG, variant, pretrained, + model_cfg=cfgs[cfg], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=_filter_fn, + **kwargs) + return model + + +@register_model +def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg11', pretrained=pretrained, **model_args) + + +@register_model +def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg13', pretrained=pretrained, **model_args) + + +@register_model +def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg16', pretrained=pretrained, **model_args) + + +@register_model +def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg19', pretrained=pretrained, **model_args) + + +@register_model +def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/visformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/visformer.py new file mode 100644 index 0000000000000000000000000000000000000000..0a95be8cbc7c92c6242cb3c3e762949f6f6be8f4 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/visformer.py @@ -0,0 +1,429 @@ +""" Visformer + +Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 + +From original at https://github.com/danczs/Visformer + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier +from .registry import register_model + + +__all__ = ['Visformer'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + visformer_tiny=_cfg(), + visformer_small=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/visformer_small-839e1f5b.pth' + ), +) + + +class SpatialMlp(nn.Module): + def __init__( + self, in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0., group=8, spatial_conv=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.in_features = in_features + self.out_features = out_features + self.spatial_conv = spatial_conv + if self.spatial_conv: + if group < 2: # net setting + hidden_features = in_features * 5 // 6 + else: + hidden_features = in_features * 2 + self.hidden_features = hidden_features + self.group = group + self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) + self.act1 = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if self.spatial_conv: + self.conv2 = nn.Conv2d( + hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) + self.act2 = act_layer() + else: + self.conv2 = None + self.act2 = None + self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) + self.drop3 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.drop1(x) + if self.conv2 is not None: + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop3(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = round(dim // num_heads * head_dim_ratio) + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, C, H, W = x.shape + x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) + q, k, v = x.unbind(0) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__( + self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, + group=8, attn_disabled=False, spatial_conv=False): + super().__init__() + self.spatial_conv = spatial_conv + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + if attn_disabled: + self.norm1 = None + self.attn = None + else: + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=drop) + + self.norm2 = norm_layer(dim) + self.mlp = SpatialMlp( + in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop, + group=group, spatial_conv=spatial_conv) # new setting + + def forward(self, x): + if self.attn is not None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Visformer(nn.Module): + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, + depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=LayerNorm2d, attn_stage='111', pos_embed=True, spatial_conv='111', + vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None): + super().__init__() + img_size = to_2tuple(img_size) + self.num_classes = num_classes + self.embed_dim = embed_dim + self.init_channels = init_channels + self.img_size = img_size + self.vit_stem = vit_stem + self.conv_init = conv_init + if isinstance(depth, (list, tuple)): + self.stage_num1, self.stage_num2, self.stage_num3 = depth + depth = sum(depth) + else: + self.stage_num1 = self.stage_num3 = depth // 3 + self.stage_num2 = depth - self.stage_num1 - self.stage_num3 + self.pos_embed = pos_embed + self.grad_checkpointing = False + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + # stage 1 + if self.vit_stem: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // patch_size for x in img_size] + else: + if self.init_channels is None: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 2) for x in img_size] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(self.init_channels), + nn.ReLU(inplace=True) + ) + img_size = [x // 2 for x in img_size] + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 4) for x in img_size] + + if self.pos_embed: + if self.vit_stem: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) + self.pos_drop = nn.Dropout(p=drop_rate) + self.stage1 = nn.Sequential(*[ + Block( + dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1') + ) + for i in range(self.stage_num1) + ]) + + # stage2 + if not self.vit_stem: + self.patch_embed2 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 8) for x in img_size] + if self.pos_embed: + self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + self.stage2 = nn.Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1') + ) + for i in range(self.stage_num1, self.stage_num1+self.stage_num2) + ]) + + # stage 3 + if not self.vit_stem: + self.patch_embed3 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, + embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 8) for x in img_size] + if self.pos_embed: + self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) + self.stage3 = nn.Sequential(*[ + Block( + dim=embed_dim*2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1') + ) + for i in range(self.stage_num1+self.stage_num2, depth) + ]) + + # head + self.num_features = embed_dim if self.vit_stem else embed_dim * 2 + self.norm = norm_layer(self.num_features) + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # weights init + if self.pos_embed: + trunc_normal_(self.pos_embed1, std=0.02) + if not self.vit_stem: + trunc_normal_(self.pos_embed2, std=0.02) + trunc_normal_(self.pos_embed3, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv2d): + if self.conv_init: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + else: + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0.) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^patch_embed1|pos_embed1|stem', # stem and embed + blocks=[ + (r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None), + (r'^(?:patch_embed|pos_embed)(\d+)', (0,)), + (r'^norm', (99999,)) + ] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.stem is not None: + x = self.stem(x) + + # stage 1 + x = self.patch_embed1(x) + if self.pos_embed: + x = self.pos_drop(x + self.pos_embed1) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stage1, x) + else: + x = self.stage1(x) + + # stage 2 + if not self.vit_stem: + x = self.patch_embed2(x) + if self.pos_embed: + x = self.pos_drop(x + self.pos_embed2) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stage2, x) + else: + x = self.stage2(x) + + # stage3 + if not self.vit_stem: + x = self.patch_embed3(x) + if self.pos_embed: + x = self.pos_drop(x + self.pos_embed3) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.stage3, x) + else: + x = self.stage3(x) + + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs) + return model + + +@register_model +def visformer_tiny(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def visformer_small(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_small', pretrained=pretrained, **model_cfg) + return model + + +# @register_model +# def visformer_net1(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net2(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net3(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net4(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net5(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net6(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net7(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model + + + + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..52c406b23b7dc1aace4e955febe59964b666894b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer.py @@ -0,0 +1,1256 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +The official jax code is released and available at https://github.com/google-research/vision_transformer + +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +Hacked together by / Copyright 2020, Ross Wightman +""" +import math +import logging +from functools import partial +from collections import OrderedDict +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD,\ + OPENAI_CLIP_MEAN, OPENAI_CLIP_STD +from .helpers import build_model_with_cfg, resolve_pretrained_cfg, named_apply, adapt_input_conv, checkpoint_seq +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_ +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (weights from official Google JAX impl) + 'vit_tiny_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_tiny_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_base_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_base_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch32_224': _cfg( + url='', # no official model weights for this combo, only for in21k + ), + 'vit_large_patch32_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + + 'vit_large_patch14_224': _cfg(url=''), + 'vit_huge_patch14_224': _cfg(url=''), + 'vit_giant_patch14_224': _cfg(url=''), + 'vit_gigantic_patch14_224': _cfg(url=''), + + + # patch models, imagenet21k (weights from official Google JAX impl) + 'vit_tiny_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_large_patch32_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + num_classes=21843), + 'vit_large_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + num_classes=21843), + 'vit_huge_patch14_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', + hf_hub_id='timm/vit_huge_patch14_224_in21k', + num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_224_sam': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'), + 'vit_base_patch16_224_sam': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'), + + # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) + 'vit_small_patch16_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_small_patch8_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch16_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + 'vit_base_patch8_224_dino': _cfg( + url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), + + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), + 'vit_base_patch16_224_miil': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', + mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), + + 'vit_base_patch16_rpn_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth'), + + # experimental (may be removed) + 'vit_base_patch32_plus_256': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), + 'vit_base_patch16_plus_240': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), + 'vit_small_patch16_36x1_224': _cfg(url=''), + 'vit_small_patch16_18x2_224': _cfg(url=''), + 'vit_base_patch16_18x2_224': _cfg(url=''), + + 'vit_base_patch32_224_clip_laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), + 'vit_large_patch14_224_clip_laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=768), + 'vit_huge_patch14_224_clip_laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=1024), + 'vit_giant_patch14_224_clip_laion2b': _cfg( + hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', + hf_hub_filename='open_clip_pytorch_model.bin', + mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=1024), + +} + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + init_values=None, + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class ResPostBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + init_values=None, + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm + ): + super().__init__() + self.init_values = init_values + + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.init_weights() + + def init_weights(self): + # NOTE this init overrides that base model init with specific changes for the block type + if self.init_values is not None: + nn.init.constant_(self.norm1.weight, self.init_values) + nn.init.constant_(self.norm2.weight, self.init_values) + + def forward(self, x): + x = x + self.drop_path1(self.norm1(self.attn(x))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + return x + + +class ParallelBlock(nn.Module): + + def __init__( + self, + dim, + num_heads, + num_parallel=2, + mlp_ratio=4., + qkv_bias=False, + init_values=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm + ): + super().__init__() + self.num_parallel = num_parallel + self.attns = nn.ModuleList() + self.ffns = nn.ModuleList() + for _ in range(num_parallel): + self.attns.append(nn.Sequential(OrderedDict([ + ('norm', norm_layer(dim)), + ('attn', Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)), + ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), + ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) + ]))) + self.ffns.append(nn.Sequential(OrderedDict([ + ('norm', norm_layer(dim)), + ('mlp', Mlp(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)), + ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), + ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) + ]))) + + def _forward_jit(self, x): + x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0) + x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0) + return x + + @torch.jit.ignore + def _forward(self, x): + x = x + sum(attn(x) for attn in self.attns) + x = x + sum(ffn(x) for ffn in self.ffns) + return x + + def forward(self, x): + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return self._forward_jit(x) + else: + return self._forward(x) + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='token', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + init_values=None, + class_token=True, + no_embed_class=False, + pre_norm=False, + fc_norm=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + weight_init='', + embed_layer=PatchEmbed, + norm_layer=None, + act_layer=None, + block_fn=Block, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + global_pool (str): type of global pooling for final sequence (default: 'token') + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + init_values: (float): layer-scale init values + class_token (bool): use class token + fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None) + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + weight_init (str): weight init scheme + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + act_layer: (nn.Module): MLP activation layer + """ + super().__init__() + assert global_pool in ('', 'avg', 'token') + assert class_token or global_pool != 'token' + use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_prefix_tokens = 1 if class_token else 0 + self.no_embed_class = no_embed_class + self.grad_checkpointing = False + + self.patch_embed = embed_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP) + ) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None + embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens + self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + self.pos_drop = nn.Dropout(p=drop_rate) + self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + init_values=init_values, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity() + + # Classifier Head + self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if weight_init != 'skip': + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'moco', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.cls_token is not None: + nn.init.normal_(self.cls_token, std=1e-6) + named_apply(get_init_weights_vit(mode, head_bias), self) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + init_weights_vit_timm(m) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes: int, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def _pos_embed(self, x): + if self.no_embed_class: + # deit-3, updated JAX (big vision) + # position embedding does not overlap with class token, add then concat + x = x + self.pos_embed + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + else: + # original timm, JAX, and deit vit impl + # pos_embed has entry for class token, concat then add + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + x = x + self.pos_embed + return self.pos_drop(x) + + def forward_features(self, x): + x = self.patch_embed(x) + x = self._pos_embed(x) + x = self.norm_pre(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def init_weights_vit_timm(module: nn.Module, name: str = ''): + """ ViT weight initialization, original timm impl (for reproducibility) """ + if isinstance(module, nn.Linear): + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.): + """ ViT weight initialization, matching JAX (Flax) impl """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def init_weights_vit_moco(module: nn.Module, name: str = ''): + """ ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """ + if isinstance(module, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) + nn.init.uniform_(module.weight, -val, val) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_init_weights_vit(mode='jax', head_bias: float = 0.): + if 'jax' in mode: + return partial(init_weights_vit_jax, head_bias=head_bias) + elif 'moco' in mode: + return init_weights_vit_moco + else: + return init_weights_vit_timm + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, + model.pos_embed, + getattr(model, 'num_prefix_tokens', 1), + model.patch_embed.grid_size + ) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights + # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def resize_pos_embed(posemb, posemb_new, num_prefix_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if num_prefix_tokens: + posemb_prefix, posemb_grid = posemb[:, :num_prefix_tokens], posemb[0, num_prefix_tokens:] + ntok_new -= num_prefix_tokens + else: + posemb_prefix, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + _logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_prefix, posemb_grid], dim=1) + return posemb + + +def _convert_openai_clip(state_dict, model): + out_dict = {} + swaps = [ + ('visual.', ''), ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'), + ('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'), + ('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'), + ] + for k, v in state_dict.items(): + if not k.startswith('visual.'): + continue + for sp in swaps: + k = k.replace(sp[0], sp[1]) + + if k == 'proj': + k = 'head.weight' + v = v.transpose(0, 1) + out_dict['head.bias'] = torch.zeros(v.shape[0]) + elif k == 'class_embedding': + k = 'cls_token' + v = v.unsqueeze(0).unsqueeze(1) + elif k == 'pos_embed': + v = v.unsqueeze(0) + if v.shape[1] != model.pos_embed.shape[1]: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, + model.pos_embed, + 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1), + model.patch_embed.grid_size + ) + out_dict[k] = v + return out_dict + + +def checkpoint_filter_fn(state_dict, model, adapt_layer_scale=False): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + import re + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + + if 'visual.class_embedding' in state_dict: + return _convert_openai_clip(state_dict, model) + + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, + model.pos_embed, + 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1), + model.patch_embed.grid_size + ) + elif adapt_layer_scale and 'gamma_' in k: + # remap layer-scale gamma into sub-module (deit3 models) + k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) + elif 'pre_logits' in k: + # NOTE representation layer removed as not used in latest 21k/1k pretrained weights + continue + out_dict[k] = v + return out_dict + + +def _create_vision_transformer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None)) + model = build_model_with_cfg( + VisionTransformer, variant, pretrained, + pretrained_cfg=pretrained_cfg, + pretrained_filter_fn=checkpoint_filter_fn, + pretrained_custom_load='npz' in pretrained_cfg['url'], + **kwargs) + return model + + +@register_model +def vit_tiny_patch16_224(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_384(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) @ 384x384. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) at 384x384. + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch14_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/14) + """ + model_kwargs = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + """ + model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_giant_patch14_224(pretrained=False, **kwargs): + """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + """ + model_kwargs = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_gigantic_patch14_224(pretrained=False, **kwargs): + """ ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + """ + model_kwargs = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_sam(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_sam', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_sam(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_sam', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_dino(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch8_224_dino(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/8) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch8_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_dino(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) /w DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224_dino(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/8) w/ DINO pretrained weights (no head) - https://arxiv.org/abs/2104.14294 + """ + model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224_dino', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs) + return model + + +# Experimental models below + +@register_model +def vit_base_patch32_plus_256(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32+) + """ + model_kwargs = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, init_values=1e-5, **kwargs) + model = _create_vision_transformer('vit_base_patch32_plus_256', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_plus_240(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16+) + """ + model_kwargs = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, init_values=1e-5, **kwargs) + model = _create_vision_transformer('vit_base_patch16_plus_240', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_rpn_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ residual post-norm + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, init_values=1e-5, class_token=False, + block_fn=ResPostBlock, global_pool=kwargs.pop('global_pool', 'avg'), **kwargs) + model = _create_vision_transformer('vit_base_patch16_rpn_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_36x1_224(pretrained=False, **kwargs): + """ ViT-Base w/ LayerScale + 36 x 1 (36 block serial) config. Experimental, may remove. + Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=36, num_heads=6, init_values=1e-5, **kwargs) + model = _create_vision_transformer('vit_small_patch16_36x1_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_18x2_224(pretrained=False, **kwargs): + """ ViT-Small w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. + Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=18, num_heads=6, init_values=1e-5, block_fn=ParallelBlock, **kwargs) + model = _create_vision_transformer('vit_small_patch16_18x2_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_18x2_224(pretrained=False, **kwargs): + """ ViT-Base w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. + Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelBlock, **kwargs) + model = _create_vision_transformer('vit_base_patch16_18x2_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_clip_laion2b(pretrained=False, **kwargs): + """ ViT-B/32 + Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_clip_laion2b', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch14_224_clip_laion2b(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/14) + Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ + model_kwargs = dict( + patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_large_patch14_224_clip_laion2b', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224_clip_laion2b(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224_clip_laion2b', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_giant_patch14_224_clip_laion2b(pretrained=False, **kwargs): + """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 + Pretrained weights from CLIP image tower trained on LAION-2B image-text pairs. + """ + model_kwargs = dict( + patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, + pre_norm=True, norm_layer=nn.LayerNorm, **kwargs) + model = _create_vision_transformer('vit_giant_patch14_224_clip_laion2b', pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer_hybrid.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer_hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..1e8a2b1354094fd5d73e4e3c4a6231ed3f44b64b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer_hybrid.py @@ -0,0 +1,371 @@ +""" Hybrid Vision Transformer (ViT) in PyTorch + +A PyTorch implement of the Hybrid Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +NOTE These hybrid model definitions depend on code in vision_transformer.py. +They were moved here to keep file sizes sane. + +Hacked together by / Copyright 2020, Ross Wightman +""" +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import StdConv2dSame, StdConv2d, to_2tuple +from .resnet import resnet26d, resnet50d +from .resnetv2 import ResNetV2, create_resnetv2_stem +from .registry import register_model +from custom_timm.models.vision_transformer import _create_vision_transformer + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # hybrid in-1k models (weights from official JAX impl where they exist) + 'vit_tiny_r_s16_p8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + first_conv='patch_embed.backbone.conv'), + 'vit_tiny_r_s16_p8_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_r26_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', + ), + 'vit_small_r26_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_r26_s32_224': _cfg(), + 'vit_base_r50_s16_224': _cfg(), + 'vit_base_r50_s16_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_r50_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz' + ), + 'vit_large_r50_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0 + ), + + # hybrid in-21k models (weights from official Google JAX impl where they exist) + 'vit_tiny_r_s16_p8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv'), + 'vit_small_r26_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + 'vit_base_r50_s16_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', + num_classes=21843, crop_pct=0.9), + 'vit_large_r50_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + + # hybrid models (using timm resnet backbones) + 'vit_small_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_small_resnet50d_s16_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet50d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), +} + + +class HybridEmbed(nn.Module): + """ CNN Feature Map Embedding + Extract feature map from CNN, flatten, project to embedding dim. + """ + def __init__( + self, + backbone, + img_size=224, + patch_size=1, + feature_size=None, + in_chans=3, + embed_dim=768, + bias=True, + ): + super().__init__() + assert isinstance(backbone, nn.Module) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # NOTE Most reliable way of determining output dims is to run forward pass + training = backbone.training + if training: + backbone.eval() + o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + o = o[-1] # last feature if backbone outputs list/tuple of features + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 + self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): + embed_layer = partial(HybridEmbed, backbone=backbone) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + return _create_vision_transformer(variant, pretrained=pretrained, embed_layer=embed_layer, **kwargs) + + +def _resnetv2(layers=(3, 4, 9), **kwargs): + """ ResNet-V2 backbone helper""" + padding_same = kwargs.get('padding_same', True) + stem_type = 'same' if padding_same else '' + conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) + if len(layers): + backbone = ResNetV2( + layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), + preact=False, stem_type=stem_type, conv_layer=conv_layer) + else: + backbone = create_resnetv2_stem( + kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) + return backbone + + +@register_model +def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_384(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-B/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224(pretrained=False, **kwargs): + """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_384(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_384(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_384(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_r50_s32_384(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_224_in21k(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid. ImageNet-21k. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224_in21k(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2(layers=(3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_224_in21k(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet50d_s16_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer_relpos.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer_relpos.py new file mode 100644 index 0000000000000000000000000000000000000000..288195adf4dde547efc7fc6af2b4350b6ea114e2 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vision_transformer_relpos.py @@ -0,0 +1,654 @@ +""" Relative Position Vision Transformer (ViT) in PyTorch + +NOTE: these models are experimental / WIP, expect changes + +Hacked together by / Copyright 2022, Ross Wightman +""" +import math +import logging +from functools import partial +from collections import OrderedDict +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, resolve_pretrained_cfg, named_apply +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_, to_2tuple +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'vit_relpos_base_patch32_plus_rpn_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth', + input_size=(3, 256, 256)), + 'vit_relpos_base_patch16_plus_240': _cfg(url='', input_size=(3, 240, 240)), + + 'vit_relpos_small_patch16_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth'), + 'vit_relpos_medium_patch16_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth'), + 'vit_relpos_base_patch16_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth'), + + 'vit_srelpos_small_patch16_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth'), + 'vit_srelpos_medium_patch16_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth'), + + 'vit_relpos_medium_patch16_cls_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth'), + 'vit_relpos_base_patch16_cls_224': _cfg( + url=''), + 'vit_relpos_base_patch16_clsgap_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth'), + + 'vit_relpos_small_patch16_rpn_224': _cfg(url=''), + 'vit_relpos_medium_patch16_rpn_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth'), + 'vit_relpos_base_patch16_rpn_224': _cfg(url=''), +} + + +def gen_relative_position_index( + q_size: Tuple[int, int], + k_size: Tuple[int, int] = None, + class_token: bool = False) -> torch.Tensor: + # Adapted with significant modifications from Swin / BeiT codebases + # get pair-wise relative position index for each token inside the window + q_coords = torch.stack(torch.meshgrid([torch.arange(q_size[0]), torch.arange(q_size[1])])).flatten(1) # 2, Wh, Ww + if k_size is None: + k_coords = q_coords + k_size = q_size + else: + # different q vs k sizes is a WIP + k_coords = torch.stack(torch.meshgrid([torch.arange(k_size[0]), torch.arange(k_size[1])])).flatten(1) + relative_coords = q_coords[:, :, None] - k_coords[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0) # Wh*Ww, Wh*Ww, 2 + _, relative_position_index = torch.unique(relative_coords.view(-1, 2), return_inverse=True, dim=0) + + if class_token: + # handle cls to token & token 2 cls & cls to cls as per beit for rel pos bias + # NOTE not intended or tested with MLP log-coords + max_size = (max(q_size[0], k_size[0]), max(q_size[1], k_size[1])) + num_relative_distance = (2 * max_size[0] - 1) * (2 * max_size[1] - 1) + 3 + relative_position_index = F.pad(relative_position_index, [1, 0, 1, 0]) + relative_position_index[0, 0:] = num_relative_distance - 3 + relative_position_index[0:, 0] = num_relative_distance - 2 + relative_position_index[0, 0] = num_relative_distance - 1 + + return relative_position_index.contiguous() + + +def gen_relative_log_coords( + win_size: Tuple[int, int], + pretrained_win_size: Tuple[int, int] = (0, 0), + mode='swin', +): + assert mode in ('swin', 'cr', 'rw') + # as per official swin-v2 impl, supporting timm specific 'cr' and 'rw' log coords as well + relative_coords_h = torch.arange(-(win_size[0] - 1), win_size[0], dtype=torch.float32) + relative_coords_w = torch.arange(-(win_size[1] - 1), win_size[1], dtype=torch.float32) + relative_coords_table = torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w])) + relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous() # 2*Wh-1, 2*Ww-1, 2 + if mode == 'swin': + if pretrained_win_size[0] > 0: + relative_coords_table[:, :, 0] /= (pretrained_win_size[0] - 1) + relative_coords_table[:, :, 1] /= (pretrained_win_size[1] - 1) + else: + relative_coords_table[:, :, 0] /= (win_size[0] - 1) + relative_coords_table[:, :, 1] /= (win_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + 1.0 + relative_coords_table.abs()) / math.log2(8) + else: + if mode == 'rw': + # cr w/ window size normalization -> [-1,1] log coords + relative_coords_table[:, :, 0] /= (win_size[0] - 1) + relative_coords_table[:, :, 1] /= (win_size[1] - 1) + relative_coords_table *= 8 # scale to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + 1.0 + relative_coords_table.abs()) + relative_coords_table /= math.log2(9) # -> [-1, 1] + else: + # mode == 'cr' + relative_coords_table = torch.sign(relative_coords_table) * torch.log( + 1.0 + relative_coords_table.abs()) + + return relative_coords_table + + +class RelPosMlp(nn.Module): + def __init__( + self, + window_size, + num_heads=8, + hidden_dim=128, + prefix_tokens=0, + mode='cr', + pretrained_window_size=(0, 0) + ): + super().__init__() + self.window_size = window_size + self.window_area = self.window_size[0] * self.window_size[1] + self.prefix_tokens = prefix_tokens + self.num_heads = num_heads + self.bias_shape = (self.window_area,) * 2 + (num_heads,) + if mode == 'swin': + self.bias_act = nn.Sigmoid() + self.bias_gain = 16 + mlp_bias = (True, False) + elif mode == 'rw': + self.bias_act = nn.Tanh() + self.bias_gain = 4 + mlp_bias = True + else: + self.bias_act = nn.Identity() + self.bias_gain = None + mlp_bias = True + + self.mlp = Mlp( + 2, # x, y + hidden_features=hidden_dim, + out_features=num_heads, + act_layer=nn.ReLU, + bias=mlp_bias, + drop=(0.125, 0.) + ) + + self.register_buffer( + "relative_position_index", + gen_relative_position_index(window_size), + persistent=False) + + # get relative_coords_table + self.register_buffer( + "rel_coords_log", + gen_relative_log_coords(window_size, pretrained_window_size, mode=mode), + persistent=False) + + def get_bias(self) -> torch.Tensor: + relative_position_bias = self.mlp(self.rel_coords_log) + if self.relative_position_index is not None: + relative_position_bias = relative_position_bias.view(-1, self.num_heads)[ + self.relative_position_index.view(-1)] # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.view(self.bias_shape) + relative_position_bias = relative_position_bias.permute(2, 0, 1) + relative_position_bias = self.bias_act(relative_position_bias) + if self.bias_gain is not None: + relative_position_bias = self.bias_gain * relative_position_bias + if self.prefix_tokens: + relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0]) + return relative_position_bias.unsqueeze(0).contiguous() + + def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): + return attn + self.get_bias() + + +class RelPosBias(nn.Module): + + def __init__(self, window_size, num_heads, prefix_tokens=0): + super().__init__() + assert prefix_tokens <= 1 + self.window_size = window_size + self.window_area = window_size[0] * window_size[1] + self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,) + + num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens + self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) + self.register_buffer( + "relative_position_index", + gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0), + persistent=False, + ) + + self.init_weights() + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=.02) + + def get_bias(self) -> torch.Tensor: + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] + # win_h * win_w, win_h * win_w, num_heads + relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1) + return relative_position_bias.unsqueeze(0).contiguous() + + def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): + return attn + self.get_bias() + + +class RelPosAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, rel_pos_cls=None, attn_drop=0., proj_drop=0.): + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + if self.rel_pos is not None: + attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) + elif shared_rel_pos is not None: + attn = attn + shared_rel_pos + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScale(nn.Module): + def __init__(self, dim, init_values=1e-5, inplace=False): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class RelPosBlock(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, rel_pos_cls=None, init_values=None, + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = RelPosAttention( + dim, num_heads, qkv_bias=qkv_bias, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=drop) + self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) + x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class ResPostRelPosBlock(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, rel_pos_cls=None, init_values=None, + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.init_values = init_values + + self.attn = RelPosAttention( + dim, num_heads, qkv_bias=qkv_bias, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=drop) + self.norm1 = norm_layer(dim) + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + self.norm2 = norm_layer(dim) + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.init_weights() + + def init_weights(self): + # NOTE this init overrides that base model init with specific changes for the block type + if self.init_values is not None: + nn.init.constant_(self.norm1.weight, self.init_values) + nn.init.constant_(self.norm2.weight, self.init_values) + + def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): + x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos))) + x = x + self.drop_path2(self.norm2(self.mlp(x))) + return x + + +class VisionTransformerRelPos(nn.Module): + """ Vision Transformer w/ Relative Position Bias + + Differing from classic vit, this impl + * uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed + * defaults to no class token (can be enabled) + * defaults to global avg pool for head (can be changed) + * layer-scale (residual branch gain) enabled + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + global_pool='avg', + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + init_values=1e-6, + class_token=False, + fc_norm=False, + rel_pos_type='mlp', + rel_pos_dim=None, + shared_rel_pos=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + weight_init='skip', + embed_layer=PatchEmbed, + norm_layer=None, + act_layer=None, + block_fn=RelPosBlock + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + global_pool (str): type of global pooling for final sequence (default: 'avg') + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + init_values: (float): layer-scale init values + class_token (bool): use class token (default: False) + fc_norm (bool): use pre classifier norm instead of pre-pool + rel_pos_ty pe (str): type of relative position + shared_rel_pos (bool): share relative pos across all blocks + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + weight_init (str): weight init scheme + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + act_layer: (nn.Module): MLP activation layer + """ + super().__init__() + assert global_pool in ('', 'avg', 'token') + assert class_token or global_pool != 'token' + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.global_pool = global_pool + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_prefix_tokens = 1 if class_token else 0 + self.grad_checkpointing = False + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + feat_size = self.patch_embed.grid_size + + rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens) + if rel_pos_type.startswith('mlp'): + if rel_pos_dim: + rel_pos_args['hidden_dim'] = rel_pos_dim + # FIXME experimenting with different relpos log coord configs + if 'swin' in rel_pos_type: + rel_pos_args['mode'] = 'swin' + elif 'rw' in rel_pos_type: + rel_pos_args['mode'] = 'rw' + rel_pos_cls = partial(RelPosMlp, **rel_pos_args) + else: + rel_pos_cls = partial(RelPosBias, **rel_pos_args) + self.shared_rel_pos = None + if shared_rel_pos: + self.shared_rel_pos = rel_pos_cls(num_heads=num_heads) + # NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both... + rel_pos_cls = None + + self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + block_fn( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, rel_pos_cls=rel_pos_cls, + init_values=init_values, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], + norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity() + + # Classifier Head + self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity() + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if weight_init != 'skip': + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'moco', '') + if self.cls_token is not None: + nn.init.normal_(self.cls_token, std=1e-6) + # FIXME weight init scheme using PyTorch defaults curently + #named_apply(get_init_weights_vit(mode, head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|patch_embed', # stem and embed + blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes: int, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.cls_token is not None: + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + + shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) + else: + x = blk(x, shared_rel_pos=shared_rel_pos) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + x = self.fc_norm(x) + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg(VisionTransformerRelPos, variant, pretrained, **kwargs) + return model + + +@register_model +def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token + """ + model_kwargs = dict( + patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock, **kwargs) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token + """ + model_kwargs = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, **kwargs) + model = _create_vision_transformer_relpos('vit_relpos_base_patch16_plus_240', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True, **kwargs) + model = _create_vision_transformer_relpos('vit_relpos_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_medium_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True, **kwargs) + model = _create_vision_transformer_relpos('vit_relpos_medium_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, **kwargs) + model = _create_vision_transformer_relpos('vit_relpos_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_srelpos_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False, + rel_pos_dim=384, shared_rel_pos=True, **kwargs) + model = _create_vision_transformer_relpos('vit_srelpos_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, + rel_pos_dim=512, shared_rel_pos=True, **kwargs) + model = _create_vision_transformer_relpos( + 'vit_srelpos_medium_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-M/16) w/ relative log-coord position, class token present + """ + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, + rel_pos_dim=256, class_token=True, global_pool='token', **kwargs) + model = _create_vision_transformer_relpos( + 'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, + class_token=True, global_pool='token', **kwargs) + model = _create_vision_transformer_relpos('vit_relpos_base_patch16_cls_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present + NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled + Leaving here for comparisons w/ a future re-train as it performs quite well. + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True, **kwargs) + model = _create_vision_transformer_relpos('vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock, **kwargs) + model = _create_vision_transformer_relpos( + 'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock, **kwargs) + model = _create_vision_transformer_relpos( + 'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock, **kwargs) + model = _create_vision_transformer_relpos( + 'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/volo.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/volo.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2886af59a29bc8bd7493a85a8158eecce70914 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/volo.py @@ -0,0 +1,750 @@ +""" Vision OutLOoker (VOLO) implementation + +Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112 + +Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below + +Modifications and additions for timm by / Copyright 2022, Ross Wightman +""" +# Copyright 2021 Sea Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from custom_timm.models.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_ +from custom_timm.models.registry import register_model +from custom_timm.models.helpers import build_model_with_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'), + **kwargs + } + + +default_cfgs = { + 'volo_d1_224': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar', + crop_pct=0.96), + 'volo_d1_384': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar', + crop_pct=1.0, input_size=(3, 384, 384)), + 'volo_d2_224': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar', + crop_pct=0.96), + 'volo_d2_384': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar', + crop_pct=1.0, input_size=(3, 384, 384)), + 'volo_d3_224': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar', + crop_pct=0.96), + 'volo_d3_448': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar', + crop_pct=1.0, input_size=(3, 448, 448)), + 'volo_d4_224': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar', + crop_pct=0.96), + 'volo_d4_448': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar', + crop_pct=1.15, input_size=(3, 448, 448)), + 'volo_d5_224': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar', + crop_pct=0.96), + 'volo_d5_448': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar', + crop_pct=1.15, input_size=(3, 448, 448)), + 'volo_d5_512': _cfg( + url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar', + crop_pct=1.15, input_size=(3, 512, 512)), +} + + +class OutlookAttention(nn.Module): + + def __init__(self, dim, num_heads, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + head_dim = dim // num_heads + self.num_heads = num_heads + self.kernel_size = kernel_size + self.padding = padding + self.stride = stride + self.scale = head_dim ** -0.5 + + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) + self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) + + def forward(self, x): + B, H, W, C = x.shape + + v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W + + h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) + v = self.unfold(v).reshape( + B, self.num_heads, C // self.num_heads, + self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H + + attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + attn = self.attn(attn).reshape( + B, h * w, self.num_heads, self.kernel_size * self.kernel_size, + self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk + attn = attn * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w) + x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) + + x = self.proj(x.permute(0, 2, 3, 1)) + x = self.proj_drop(x) + + return x + + +class Outlooker(nn.Module): + def __init__( + self, dim, kernel_size, padding, stride=1, num_heads=1, mlp_ratio=3., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, qkv_bias=False + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = OutlookAttention( + dim, num_heads, kernel_size=kernel_size, + padding=padding, stride=stride, + qkv_bias=qkv_bias, attn_drop=attn_drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Attention(nn.Module): + + def __init__( + self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, H, W, C = x.shape + + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, H, W, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Transformer(nn.Module): + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, + attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop) + + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ClassAttention(nn.Module): + + def __init__( + self, dim, num_heads=8, head_dim=None, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + if head_dim is not None: + self.head_dim = head_dim + else: + head_dim = dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias) + self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(self.head_dim * self.num_heads, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + + kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) + attn = ((q * self.scale) @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads) + cls_embed = self.proj(cls_embed) + cls_embed = self.proj_drop(cls_embed) + return cls_embed + + +class ClassBlock(nn.Module): + + def __init__( + self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = ClassAttention( + dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + cls_embed = x[:, :1] + cls_embed = cls_embed + self.drop_path(self.attn(self.norm1(x))) + cls_embed = cls_embed + self.drop_path(self.mlp(self.norm2(cls_embed))) + return torch.cat([cls_embed, x[:, 1:]], dim=1) + + +def get_block(block_type, **kargs): + if block_type == 'ca': + return ClassBlock(**kargs) + + +def rand_bbox(size, lam, scale=1): + """ + get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling) + return: bounding box + """ + W = size[1] // scale + H = size[2] // scale + cut_rat = np.sqrt(1. - lam) + cut_w = np.int(W * cut_rat) + cut_h = np.int(H * cut_rat) + + # uniform + cx = np.random.randint(W) + cy = np.random.randint(H) + + bbx1 = np.clip(cx - cut_w // 2, 0, W) + bby1 = np.clip(cy - cut_h // 2, 0, H) + bbx2 = np.clip(cx + cut_w // 2, 0, W) + bby2 = np.clip(cy + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding. + Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding + """ + + def __init__( + self, img_size=224, stem_conv=False, stem_stride=1, + patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384): + super().__init__() + assert patch_size in [4, 8, 16] + if stem_conv: + self.conv = nn.Sequential( + nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 + nn.BatchNorm2d(hidden_dim), + nn.ReLU(inplace=True), + ) + else: + self.conv = None + + self.proj = nn.Conv2d( + hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) + self.num_patches = (img_size // patch_size) * (img_size // patch_size) + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + x = self.proj(x) # B, C, H, W + return x + + +class Downsample(nn.Module): + """ Image to Patch Embedding, downsampling between stage1 and stage2 + """ + + def __init__(self, in_embed_dim, out_embed_dim, patch_size=2): + super().__init__() + self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.proj(x) # B, C, H, W + x = x.permute(0, 2, 3, 1) + return x + + +def outlooker_blocks( + block_fn, index, dim, layers, num_heads=1, kernel_size=3, padding=1, stride=2, + mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs): + """ + generate outlooker layer in stage1 + return: outlooker layers + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append( + block_fn( + dim, kernel_size=kernel_size, padding=padding, + stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr)) + blocks = nn.Sequential(*blocks) + return blocks + + +def transformer_blocks( + block_fn, index, dim, layers, num_heads, mlp_ratio=3., + qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs): + """ + generate transformer layers in stage2 + return: transformer layers + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append( + block_fn( + dim, num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + drop_path=block_dpr)) + blocks = nn.Sequential(*blocks) + return blocks + + +class VOLO(nn.Module): + """ + Vision Outlooker, the main class of our model + """ + + def __init__( + self, + layers, + img_size=224, + in_chans=3, + num_classes=1000, + global_pool='token', + patch_size=8, + stem_hidden_dim=64, + embed_dims=None, + num_heads=None, + downsamples=(True, False, False, False), + outlook_attention=(True, False, False, False), + mlp_ratio=3.0, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + post_layers=('ca', 'ca'), + use_aux_head=True, + use_mix_token=False, + pooling_scale=2, + ): + super().__init__() + num_layers = len(layers) + mlp_ratio = to_ntuple(num_layers)(mlp_ratio) + img_size = to_2tuple(img_size) + + self.num_classes = num_classes + self.global_pool = global_pool + self.mix_token = use_mix_token + self.pooling_scale = pooling_scale + self.num_features = embed_dims[-1] + if use_mix_token: # enable token mixing, see token labeling for details. + self.beta = 1.0 + assert global_pool == 'token', "return all tokens if mix_token is enabled" + self.grad_checkpointing = False + + self.patch_embed = PatchEmbed( + stem_conv=True, stem_stride=2, patch_size=patch_size, + in_chans=in_chans, hidden_dim=stem_hidden_dim, + embed_dim=embed_dims[0]) + + # inital positional encoding, we add positional encoding after outlooker blocks + patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale) + self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1])) + self.pos_drop = nn.Dropout(p=drop_rate) + + # set the main block in network + network = [] + for i in range(len(layers)): + if outlook_attention[i]: + # stage 1 + stage = outlooker_blocks( + Outlooker, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], + qkv_bias=qkv_bias, attn_drop=attn_drop_rate, norm_layer=norm_layer) + network.append(stage) + else: + # stage 2 + stage = transformer_blocks( + Transformer, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, + drop_path_rate=drop_path_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer) + network.append(stage) + + if downsamples[i]: + # downsampling between two stages + network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) + + self.network = nn.ModuleList(network) + + # set post block, for example, class attention layers + self.post_network = None + if post_layers is not None: + self.post_network = nn.ModuleList( + [ + get_block( + post_layers[i], + dim=embed_dims[-1], + num_heads=num_heads[-1], + mlp_ratio=mlp_ratio[-1], + qkv_bias=qkv_bias, + attn_drop=attn_drop_rate, + drop_path=0., + norm_layer=norm_layer) + for i in range(len(post_layers)) + ]) + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) + trunc_normal_(self.cls_token, std=.02) + + # set output type + if use_aux_head: + self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + self.aux_head = None + self.norm = norm_layer(self.num_features) + + # Classifier head + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=[ + (r'^network\.(\d+)\.(\d+)', None), + (r'^network\.(\d+)', (0,)), + ], + blocks2=[ + (r'^cls_token', (0,)), + (r'^post_network\.(\d+)', None), + (r'^norm', (99999,)) + ], + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=None): + self.num_classes = num_classes + if global_pool is not None: + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + if self.aux_head is not None: + self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_tokens(self, x): + for idx, block in enumerate(self.network): + if idx == 2: + # add positional encoding after outlooker blocks + x = x + self.pos_embed + x = self.pos_drop(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(block, x) + else: + x = block(x) + + B, H, W, C = x.shape + x = x.reshape(B, -1, C) + return x + + def forward_cls(self, x): + B, N, C = x.shape + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat([cls_tokens, x], dim=1) + for block in self.post_network: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(block, x) + else: + x = block(x) + return x + + def forward_train(self, x): + """ A separate forward fn for training with mix_token (if a train script supports). + Combining multiple modes in as single forward with different return types is torchscript hell. + """ + x = self.patch_embed(x) + x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C + + # mix token, see token labeling for details. + if self.mix_token and self.training: + lam = np.random.beta(self.beta, self.beta) + patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale + bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale) + temp_x = x.clone() + sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1 + sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2 + temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] + x = temp_x + else: + bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0 + + # step2: tokens learning in the two stages + x = self.forward_tokens(x) + + # step3: post network, apply class attention or not + if self.post_network is not None: + x = self.forward_cls(x) + x = self.norm(x) + + if self.global_pool == 'avg': + x_cls = x.mean(dim=1) + elif self.global_pool == 'token': + x_cls = x[:, 0] + else: + x_cls = x + + if self.aux_head is None: + return x_cls + + x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling + if not self.training: + return x_cls + 0.5 * x_aux.max(1)[0] + + if self.mix_token and self.training: # reverse "mix token", see token labeling for details. + x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) + temp_x = x_aux.clone() + temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] + x_aux = temp_x + x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) + + # return these: 1. class token, 2. classes from all feature tokens, 3. bounding box + return x_cls, x_aux, (bbx1, bby1, bbx2, bby2) + + def forward_features(self, x): + x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C + + # step2: tokens learning in the two stages + x = self.forward_tokens(x) + + # step3: post network, apply class attention or not + if self.post_network is not None: + x = self.forward_cls(x) + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool == 'avg': + out = x.mean(dim=1) + elif self.global_pool == 'token': + out = x[:, 0] + else: + out = x + if pre_logits: + return out + out = self.head(out) + if self.aux_head is not None: + # generate classes in all feature tokens, see token labeling + aux = self.aux_head(x[:, 1:]) + out = out + 0.5 * aux.max(1)[0] + return out + + def forward(self, x): + """ simplified forward (without mix token training) """ + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_volo(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + return build_model_with_cfg(VOLO, variant, pretrained, **kwargs) + + +@register_model +def volo_d1_224(pretrained=False, **kwargs): + """ VOLO-D1 model, Params: 27M """ + model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) + model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d1_384(pretrained=False, **kwargs): + """ VOLO-D1 model, Params: 27M """ + model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) + model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d2_224(pretrained=False, **kwargs): + """ VOLO-D2 model, Params: 59M """ + model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d2_384(pretrained=False, **kwargs): + """ VOLO-D2 model, Params: 59M """ + model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d3_224(pretrained=False, **kwargs): + """ VOLO-D3 model, Params: 86M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d3_448(pretrained=False, **kwargs): + """ VOLO-D3 model, Params: 86M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) + model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d4_224(pretrained=False, **kwargs): + """ VOLO-D4 model, Params: 193M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) + model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d4_448(pretrained=False, **kwargs): + """ VOLO-D4 model, Params: 193M """ + model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) + model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d5_224(pretrained=False, **kwargs): + """ VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + model_args = dict( + layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), + mlp_ratio=4, stem_hidden_dim=128, **kwargs) + model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d5_448(pretrained=False, **kwargs): + """ VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + model_args = dict( + layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), + mlp_ratio=4, stem_hidden_dim=128, **kwargs) + model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args) + return model + + +@register_model +def volo_d5_512(pretrained=False, **kwargs): + """ VOLO-D5 model, Params: 296M + stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 + """ + model_args = dict( + layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), + mlp_ratio=4, stem_hidden_dim=128, **kwargs) + model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vovnet.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vovnet.py new file mode 100644 index 0000000000000000000000000000000000000000..8e80ffc66c432f6e174c70f5d33bb0dbcde50409 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/vovnet.py @@ -0,0 +1,424 @@ +""" VoVNet (V1 & V2) + +Papers: +* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 +* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Looked at https://github.com/youngwanLEE/vovnet-detectron2 & +https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +for some reference, rewrote most of the code. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .registry import register_model +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath,\ + create_attn, create_norm_act_layer, get_norm_act_layer + + +# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & +# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +model_cfgs = dict( + vovnet39a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=False, + depthwise=False, + attn='', + ), + vovnet57a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=False, + depthwise=False, + attn='', + + ), + ese_vovnet19b_slim_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + + ), + ese_vovnet19b_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + ), + ese_vovnet19b_slim=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet19b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet57b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet99b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 3, 9, 3], + residual=True, + depthwise=False, + attn='ese', + ), + eca_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='eca', + ), +) +model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] +model_cfgs['ese_vovnet99b_iabn'] = model_cfgs['ese_vovnet99b'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + vovnet39a=_cfg(url=''), + vovnet57a=_cfg(url=''), + ese_vovnet19b_slim_dw=_cfg(url=''), + ese_vovnet19b_dw=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth'), + ese_vovnet19b_slim=_cfg(url=''), + ese_vovnet39b=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth'), + ese_vovnet57b=_cfg(url=''), + ese_vovnet99b=_cfg(url=''), + eca_vovnet39b=_cfg(url=''), + ese_vovnet39b_evos=_cfg(url=''), + ese_vovnet99b_iabn=_cfg(url=''), +) + + +class SequentialAppendList(nn.Sequential): + def __init__(self, *args): + super(SequentialAppendList, self).__init__(*args) + + def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: + for i, module in enumerate(self): + if i == 0: + concat_list.append(module(x)) + else: + concat_list.append(module(concat_list[-1])) + x = torch.cat(concat_list, dim=1) + return x + + +class OsaBlock(nn.Module): + + def __init__( + self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, + depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None): + super(OsaBlock, self).__init__() + + self.residual = residual + self.depthwise = depthwise + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + next_in_chs = in_chs + if self.depthwise and next_in_chs != mid_chs: + assert not residual + self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs) + else: + self.conv_reduction = None + + mid_convs = [] + for i in range(layer_per_block): + if self.depthwise: + conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs) + else: + conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs) + next_in_chs = mid_chs + mid_convs.append(conv) + self.conv_mid = SequentialAppendList(*mid_convs) + + # feature aggregation + next_in_chs = in_chs + layer_per_block * mid_chs + self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs) + + self.attn = create_attn(attn, out_chs) if attn else None + + self.drop_path = drop_path + + def forward(self, x): + output = [x] + if self.conv_reduction is not None: + x = self.conv_reduction(x) + x = self.conv_mid(x, output) + x = self.conv_concat(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.residual: + x = x + output[0] + return x + + +class OsaStage(nn.Module): + + def __init__( + self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, + residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, + drop_path_rates=None): + super(OsaStage, self).__init__() + self.grad_checkpointing = False + + if downsample: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + else: + self.pool = None + + blocks = [] + for i in range(block_per_stage): + last_block = i == block_per_stage - 1 + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + blocks += [OsaBlock( + in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, + attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path) + ] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.pool is not None: + x = self.pool(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + return x + + +class VovNet(nn.Module): + + def __init__( + self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., stem_stride=4, + output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rate=0.): + """ VovNet (v2) + """ + super(VovNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert stem_stride in (4, 2) + assert output_stride == 32 # FIXME support dilation + + stem_chs = cfg["stem_chs"] + stage_conv_chs = cfg["stage_conv_chs"] + stage_out_chs = cfg["stage_out_chs"] + block_per_stage = cfg["block_per_stage"] + layer_per_block = cfg["layer_per_block"] + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + # Stem module + last_stem_stride = stem_stride // 2 + conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct + self.stem = nn.Sequential(*[ + ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), + conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), + conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), + ]) + self.feature_info = [dict( + num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] + current_stride = stem_stride + + # OSA stages + stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) + in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] + stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) + stages = [] + for i in range(4): # num_stages + downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 + stages += [OsaStage( + in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, + downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args) + ] + self.num_features = stage_out_chs[i] + current_stride *= 2 if downsample else 1 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + for s in self.stages: + s.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _create_vovnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + VovNet, variant, pretrained, + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def vovnet39a(pretrained=False, **kwargs): + return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) + + +@register_model +def vovnet57a(pretrained=False, **kwargs): + return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet57b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet99b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) + + +@register_model +def eca_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) + + +# Experimental Models + +@register_model +def ese_vovnet39b_evos(pretrained=False, **kwargs): + def norm_act_fn(num_features, **nkwargs): + return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs) + return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) + + +@register_model +def ese_vovnet99b_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn', act_layer='leaky_relu') + return _create_vovnet( + 'ese_vovnet99b_iabn', pretrained=pretrained, norm_layer=norm_layer, act_layer=nn.LeakyReLU, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xception.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xception.py new file mode 100644 index 0000000000000000000000000000000000000000..99d02c467b5b40944fb00eed7f40f6bd62c66839 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xception.py @@ -0,0 +1,249 @@ +""" +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) + +@author: tstandley +Adapted by cadene + +Creates an Xception Model as defined in: + +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf + +This weights ported from the Keras implementation. Achieves the following performance on the validation set: + +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 + +REMEMBER to set your image size to 3x299x299 for both test and validation + +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" +import torch.jit +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['Xception'] + +default_cfgs = { + 'xception': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', + 'input_size': (3, 299, 299), + 'pool_size': (10, 10), + 'crop_pct': 0.8975, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + self.drop_rate = drop_rate + self.global_pool = global_pool + self.num_classes = num_classes + self.num_features = 2048 + + self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, 2, 2, start_with_relu=False) + self.block2 = Block(128, 256, 2, 2) + self.block3 = Block(256, 728, 2, 2) + + self.block4 = Block(728, 728, 3, 1) + self.block5 = Block(728, 728, 3, 1) + self.block6 = Block(728, 728, 3, 1) + self.block7 = Block(728, 728, 3, 1) + + self.block8 = Block(728, 728, 3, 1) + self.block9 = Block(728, 728, 3, 1) + self.block10 = Block(728, 728, 3, 1) + self.block11 = Block(728, 728, 3, 1) + + self.block12 = Block(728, 1024, 2, 2, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(self.num_features) + self.act4 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block2.rep.0'), + dict(num_chs=256, reduction=8, module='block3.rep.0'), + dict(num_chs=728, reduction=16, module='block12.rep.0'), + dict(num_chs=2048, reduction=32, module='act4'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # #------- init weights -------- + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^conv[12]|bn[12]', + blocks=[ + (r'^block(\d+)', None), + (r'^conv[34]|bn[34]', (99,)), + ], + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + assert not enable, "gradient checkpointing not supported" + + @torch.jit.ignore + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + return x if pre_logits else self.fc(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception, variant, pretrained, + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def xception(pretrained=False, **kwargs): + return _xception('xception', pretrained=pretrained, **kwargs) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xception_aligned.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xception_aligned.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac75ff05e53279b72cfaea2809f78a757f8e540 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xception_aligned.py @@ -0,0 +1,358 @@ +"""Pytorch impl of Aligned Xception 41, 65, 71 + +This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at +https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md + +Hacked together by / Copyright 2020 Ross Wightman +""" +from functools import partial + +import torch +import torch.nn as nn + +from custom_timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, checkpoint_seq +from .layers import ClassifierHead, ConvNormAct, create_conv2d, get_norm_act_layer +from .layers.helpers import to_3tuple +from .registry import register_model + +__all__ = ['XceptionAligned'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), + 'crop_pct': 0.903, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + xception41=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth'), + xception65=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/xception65_ra3-1447db8d.pth', + crop_pct=0.94, + ), + xception71=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth'), + + xception41p=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/xception41p_ra3-33195bc8.pth', + crop_pct=0.94, + ), + xception65p=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/xception65p_ra3-3c6114e4.pth', + crop_pct=0.94, + ), +) + + +class SeparableConv2d(nn.Module): + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=1, padding='', + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + self.conv_dw = create_conv2d( + in_chs, in_chs, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + self.bn_dw = norm_layer(in_chs) + self.act_dw = act_layer(inplace=True) if act_layer is not None else nn.Identity() + + # pointwise convolution + self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) + self.bn_pw = norm_layer(out_chs) + self.act_pw = act_layer(inplace=True) if act_layer is not None else nn.Identity() + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn_dw(x) + x = self.act_dw(x) + x = self.conv_pw(x) + x = self.bn_pw(x) + x = self.act_pw(x) + return x + + +class PreSeparableConv2d(nn.Module): + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=1, padding='', + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, first_act=True): + super(PreSeparableConv2d, self).__init__() + norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) + self.kernel_size = kernel_size + self.dilation = dilation + + self.norm = norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity() + # depthwise convolution + self.conv_dw = create_conv2d( + in_chs, in_chs, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + + # pointwise convolution + self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) + + def forward(self, x): + x = self.norm(x) + x = self.conv_dw(x) + x = self.conv_pw(x) + return x + + +class XceptionModule(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, pad_type='', + start_with_relu=True, no_skip=False, act_layer=nn.ReLU, norm_layer=None): + super(XceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = ConvNormAct( + in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, apply_act=False) + else: + self.shortcut = None + + separable_act_layer = None if start_with_relu else act_layer + self.stack = nn.Sequential() + for i in range(3): + if start_with_relu: + self.stack.add_module(f'act{i + 1}', act_layer(inplace=i > 0)) + self.stack.add_module(f'conv{i + 1}', SeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=separable_act_layer, norm_layer=norm_layer)) + in_chs = out_chs[i] + + def forward(self, x): + skip = x + x = self.stack(x) + if self.shortcut is not None: + skip = self.shortcut(skip) + if not self.no_skip: + x = x + skip + return x + + +class PreXceptionModule(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, pad_type='', + no_skip=False, act_layer=nn.ReLU, norm_layer=None): + super(PreXceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = create_conv2d(in_chs, self.out_channels, 1, stride=stride) + else: + self.shortcut = nn.Identity() + + self.norm = get_norm_act_layer(norm_layer, act_layer=act_layer)(in_chs, inplace=True) + self.stack = nn.Sequential() + for i in range(3): + self.stack.add_module(f'conv{i + 1}', PreSeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=act_layer, norm_layer=norm_layer, first_act=i > 0)) + in_chs = out_chs[i] + + def forward(self, x): + x = self.norm(x) + skip = x + x = self.stack(x) + if not self.no_skip: + x = x + self.shortcut(skip) + return x + + +class XceptionAligned(nn.Module): + """Modified Aligned Xception + """ + + def __init__( + self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, preact=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0., global_pool='avg'): + super(XceptionAligned, self).__init__() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + self.grad_checkpointing = False + + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + self.stem = nn.Sequential(*[ + ConvNormAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), + create_conv2d(32, 64, kernel_size=3, stride=1) if preact else + ConvNormAct(32, 64, kernel_size=3, stride=1, **layer_args) + ]) + + curr_dilation = 1 + curr_stride = 2 + self.feature_info = [] + self.blocks = nn.Sequential() + module_fn = PreXceptionModule if preact else XceptionModule + for i, b in enumerate(block_cfg): + b['dilation'] = curr_dilation + if b['stride'] > 1: + name = f'blocks.{i}.stack.conv2' if preact else f'blocks.{i}.stack.act3' + self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=name)] + next_stride = curr_stride * b['stride'] + if next_stride > output_stride: + curr_dilation *= b['stride'] + b['stride'] = 1 + else: + curr_stride = next_stride + self.blocks.add_module(str(i), module_fn(**b, **layer_args)) + self.num_features = self.blocks[-1].out_channels + + self.feature_info += [dict( + num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] + self.act = act_layer(inplace=True) if preact else nn.Identity() + self.head = ClassifierHead( + in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^stem', + blocks=r'^blocks\.(\d+)', + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint_seq(self.blocks, x) + else: + x = self.blocks(x) + x = self.act(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + return self.head(x, pre_logits=pre_logits) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + XceptionAligned, variant, pretrained, + feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), + **kwargs) + + +@register_model +def xception41(pretrained=False, **kwargs): + """ Modified Aligned Xception-41 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception41', pretrained=pretrained, **model_args) + + +@register_model +def xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception65', pretrained=pretrained, **model_args) + + +@register_model +def xception71(pretrained=False, **kwargs): + """ Modified Aligned Xception-71 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=1), + dict(in_chs=256, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=1), + dict(in_chs=728, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception71', pretrained=pretrained, **model_args) + + +@register_model +def xception41p(pretrained=False, **kwargs): + """ Modified Aligned Xception-41 w/ Pre-Act + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), no_skip=True, stride=1), + ] + model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=nn.BatchNorm2d, **kwargs) + return _xception('xception41p', pretrained=pretrained, **model_args) + + +@register_model +def xception65p(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 w/ Pre-Act + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True), + ] + model_args = dict( + block_cfg=block_cfg, preact=True, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception65p', pretrained=pretrained, **model_args) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xcit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xcit.py new file mode 100644 index 0000000000000000000000000000000000000000..8c706df76cc54703c6a74623247298449e508a17 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/models/xcit.py @@ -0,0 +1,842 @@ +""" Cross-Covariance Image Transformer (XCiT) in PyTorch + +Paper: + - https://arxiv.org/abs/2106.09681 + +Same as the official implementation, with some minor adaptations, original copyright below + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + +Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +import math +from functools import partial + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from custom_timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .vision_transformer import _cfg, Mlp +from .registry import register_model +from .layers import DropPath, trunc_normal_, to_2tuple +from .cait import ClassAttn +from .fx_features import register_notrace_module + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # Patch size 16 + 'xcit_nano_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), + 'xcit_nano_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), + 'xcit_nano_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), + 'xcit_tiny_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), + 'xcit_tiny_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), + 'xcit_tiny_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), + 'xcit_tiny_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), + 'xcit_small_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), + 'xcit_small_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), + 'xcit_small_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), + 'xcit_small_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), + 'xcit_medium_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), + 'xcit_medium_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), + 'xcit_large_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), + 'xcit_large_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), + + # Patch size 8 + 'xcit_nano_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), + 'xcit_nano_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), + 'xcit_nano_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), + 'xcit_tiny_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), + 'xcit_tiny_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), + 'xcit_tiny_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), + 'xcit_tiny_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), + 'xcit_small_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), + 'xcit_small_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), + 'xcit_small_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), + 'xcit_small_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), + 'xcit_medium_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), + 'xcit_medium_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), + 'xcit_large_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), + 'xcit_large_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), +} + + +@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method +class PositionalEncodingFourier(nn.Module): + """ + Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper. + Based on the official XCiT code + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + """ + + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.weight.device + y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W) + x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1) + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution + batch norm""" + return torch.nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + nn.BatchNorm2d(out_planes) + ) + + +class ConvPatchEmbed(nn.Module): + """Image to Patch Embedding using multiple convolutional layers""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): + super().__init__() + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + if patch_size == 16: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 8, 2), + act_layer(), + conv3x3(embed_dim // 8, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + elif patch_size == 8: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + else: + raise('For convolutional projection, patch size has to be in [8, 16]') + + def forward(self, x): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class LPI(nn.Module): + """ + Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the + implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable + 3x3 convolutions with GeLU and BatchNorm2d + """ + + def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): + super().__init__() + out_features = out_features or in_features + + padding = kernel_size // 2 + + self.conv1 = torch.nn.Conv2d( + in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) + self.act = act_layer() + self.bn = nn.BatchNorm2d(in_features) + self.conv2 = torch.nn.Conv2d( + in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) + + def forward(self, x, H: int, W: int): + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.act(x) + x = self.bn(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class ClassAttentionBlock(nn.Module): + """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" + + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False): + super().__init__() + self.norm1 = norm_layer(dim) + + self.attn = ClassAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + if eta is not None: # LayerScale Initialization (no layerscale when None) + self.gamma1 = nn.Parameter(eta * torch.ones(dim)) + self.gamma2 = nn.Parameter(eta * torch.ones(dim)) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.mlp(cls_token) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class XCA(nn.Module): + """ Cross-Covariance Attention (XCA) + Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax + normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + # Paper section 3.2 l2-Normalization and temperature scaling + q = torch.nn.functional.normalize(q, dim=-1) + k = torch.nn.functional.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, C', N), permute -> (B, N, H, C') + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class XCABlock(nn.Module): + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = norm_layer(dim) + self.local_mp = LPI(in_features=dim, act_layer=act_layer) + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + self.gamma1 = nn.Parameter(eta * torch.ones(dim)) + self.gamma3 = nn.Parameter(eta * torch.ones(dim)) + self.gamma2 = nn.Parameter(eta * torch.ones(dim)) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + +class XCiT(nn.Module): + """ + Based on timm and DeiT code bases + https://github.com/rwightman/pytorch-image-models/tree/master/timm + https://github.com/facebookresearch/deit/ + """ + + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, + depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False): + """ + Args: + img_size (int, tuple): input image size + patch_size (int): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate (constant across all layers) + norm_layer: (nn.Module): normalization layer + cls_attn_layers: (int) Depth of Class attention layers + use_pos_embed: (bool) whether to use positional encoding + eta: (float) layerscale initialization value + tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA + + Notes: + - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch + interaction (class LPI) and the patch embedding (class ConvPatchEmbed) + """ + super().__init__() + assert global_pool in ('', 'avg', 'token') + img_size = to_2tuple(img_size) + assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ + '`patch_size` should divide image dimensions evenly' + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + self.global_pool = global_pool + self.grad_checkpointing = False + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.use_pos_embed = use_pos_embed + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dim) + self.pos_drop = nn.Dropout(p=drop_rate) + + self.blocks = nn.ModuleList([ + XCABlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta) + for _ in range(depth)]) + + self.cls_attn_blocks = nn.ModuleList([ + ClassAttentionBlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm) + for _ in range(cls_attn_layers)]) + + # Classifier head + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Init weights + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + @torch.jit.ignore + def group_matcher(self, coarse=False): + return dict( + stem=r'^cls_token|pos_embed|patch_embed', # stem and embed + blocks=r'^blocks\.(\d+)', + cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))] + ) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + self.grad_checkpointing = enable + + @torch.jit.ignore + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + if global_pool is not None: + assert global_pool in ('', 'avg', 'token') + self.global_pool = global_pool + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) + x, (Hp, Wp) = self.patch_embed(x) + + if self.use_pos_embed: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + x = self.pos_drop(x) + + for blk in self.blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x, Hp, Wp) + else: + x = blk(x, Hp, Wp) + + x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) + + for blk in self.cls_attn_blocks: + if self.grad_checkpointing and not torch.jit.is_scripting(): + x = checkpoint(blk, x) + else: + x = blk(x) + + x = self.norm(x) + return x + + def forward_head(self, x, pre_logits: bool = False): + if self.global_pool: + x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] + return x if pre_logits else self.head(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.forward_head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + # For consistency with timm's transformer models while being compatible with official weights source we rename + # pos_embeder to pos_embed. Also account for use_pos_embed == False + use_pos_embed = getattr(model, 'pos_embed', None) is not None + pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] + for k in pos_embed_keys: + if use_pos_embed: + state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) + else: + del state_dict[k] + # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors + # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v + if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): + num_ca_blocks = len(model.cls_attn_blocks) + for i in range(num_ca_blocks): + qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') + qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] + qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) + if qkv_bias is not None: + qkv_bias = qkv_bias.reshape(3, -1) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] + return state_dict + + +def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): + model = build_model_with_cfg( + XCiT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384, **kwargs) + model = _create_xcit('xcit_nano_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +# Patch size 8x8 models +@register_model +def xcit_nano_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee4958eb562bcfe06a5da72be4b76ee610a0ccc --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/__init__.py @@ -0,0 +1,15 @@ +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .adamw import AdamW +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP +from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adabelief.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adabelief.py new file mode 100644 index 0000000000000000000000000000000000000000..951d715cc0b605df2f7313c95840b7784c4d0a70 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adabelief.py @@ -0,0 +1,201 @@ +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-16) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + decoupled_decay (boolean, optional): (default: True) If set as True, then + the optimizer uses decoupled weight decay as in AdamW + fixed_decay (boolean, optional): (default: False) This is used when weight_decouple + is set as True. + When fixed_decay == True, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay$. + When fixed_decay == False, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the + weight decay ratio decreases with learning rate (lr). + rectify (boolean, optional): (default: True) If set as True, then perform the rectified + update similar to RAdam + degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update + when variance of gradient is high + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' + For example train/args for EfficientNet see these gists + - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 + - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 + """ + + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, + decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, + degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, + fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) + super(AdaBelief, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p_fp32) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p_fp32) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p_fp32) + + # perform weight decay, check if decoupled weight decay + if group['decoupled_decay']: + if not group['fixed_decay']: + p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p_fp32.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p_fp32, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not group['rectify']: + # Default update + step_size = group['lr'] / bias_correction1 + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + elif group['degenerated_to_sgd']: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if num_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adafactor.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adafactor.py new file mode 100644 index 0000000000000000000000000000000000000000..06057433a9bffa555bdc13b27a1c56cff26acf15 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adafactor.py @@ -0,0 +1,167 @@ +""" Adafactor Optimizer + +Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + +Original header/copyright below. + +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +import math + + +class Adafactor(torch.optim.Optimizer): + """Implements Adafactor algorithm. + This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` + (see https://arxiv.org/abs/1804.04235) + + Note that this optimizer internally adjusts the learning rate depending on the + *scale_parameter*, *relative_step* and *warmup_init* options. + + To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and parameter scale respectively (default: (1e-30, 1e-3)) + clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) + decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) + beta1 (float): coefficient used for computing running averages of gradient (default: None) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) + warmup_init (bool): time-dependent learning rate computation depends on + whether warm-up initialization is being used (default: False) + """ + + def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, + decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): + relative_step = not lr + if warmup_init and not relative_step: + raise ValueError('warmup_init requires relative_step=True') + + beta1 = None if betas is None else betas[0] # make it compat with standard betas arg + defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, + beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, + relative_step=relative_step, warmup_init=warmup_init) + super(Adafactor, self).__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + if param_group['relative_step']: + min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 + lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) + param_scale = 1.0 + if param_group['scale_parameter']: + param_scale = max(param_group['eps_scale'], param_state['RMS']) + param_group['lr'] = lr_t * param_scale + return param_group['lr'] + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group['beta1'] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError('Adafactor does not support sparse gradients.') + + state = self.state[p] + + factored, use_first_moment = self._get_options(group, grad.shape) + # State Initialization + if len(state) == 0: + state['step'] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(grad) + if factored: + state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) + state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) + else: + state['exp_avg_sq'] = torch.zeros_like(grad) + + state['RMS'] = 0 + else: + if use_first_moment: + state['exp_avg'] = state['exp_avg'].to(grad) + if factored: + state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) + state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) + else: + state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + state['step'] += 1 + state['RMS'] = self._rms(p_fp32) + lr_t = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) + update = grad ** 2 + group['eps'] + if factored: + exp_avg_sq_row = state['exp_avg_sq_row'] + exp_avg_sq_col = state['exp_avg_sq_col'] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) + update.mul_(lr_t) + + if use_first_moment: + exp_avg = state['exp_avg'] + exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) + update = exp_avg + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) + + p_fp32.add_(-update) + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adahessian.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adahessian.py new file mode 100644 index 0000000000000000000000000000000000000000..985c67ca686a65f61f5c5b1a7db3e5bba815a19b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adahessian.py @@ -0,0 +1,156 @@ +""" AdaHessian Optimizer + +Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py +Originally licensed MIT, Copyright 2020, David Samuel +""" +import torch + + +class Adahessian(torch.optim.Optimizer): + """ + Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): learning rate (default: 0.1) + betas ((float, float), optional): coefficients used for computing running averages of gradient and the + squared hessian trace (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) + hessian_power (float, optional): exponent of the hessian trace (default: 1.0) + update_each (int, optional): compute the hessian trace approximation only after *this* number of steps + (to save time) (default: 1) + n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) + """ + + def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, + hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= hessian_power <= 1.0: + raise ValueError(f"Invalid Hessian power value: {hessian_power}") + + self.n_samples = n_samples + self.update_each = update_each + self.avg_conv_kernel = avg_conv_kernel + + # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training + self.seed = 2147483647 + self.generator = torch.Generator().manual_seed(self.seed) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) + super(Adahessian, self).__init__(params, defaults) + + for p in self.get_params(): + p.hess = 0.0 + self.state[p]["hessian step"] = 0 + + @property + def is_second_order(self): + return True + + def get_params(self): + """ + Gets all parameters in all param_groups with gradients + """ + + return (p for group in self.param_groups for p in group['params'] if p.requires_grad) + + def zero_hessian(self): + """ + Zeros out the accumalated hessian traces. + """ + + for p in self.get_params(): + if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: + p.hess.zero_() + + @torch.no_grad() + def set_hessian(self): + """ + Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. + """ + + params = [] + for p in filter(lambda p: p.grad is not None, self.get_params()): + if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step + params.append(p) + self.state[p]["hessian step"] += 1 + + if len(params) == 0: + return + + if self.generator.device != params[0].device: # hackish way of casting the generator to the right device + self.generator = torch.Generator(params[0].device).manual_seed(self.seed) + + grads = [p.grad for p in params] + + for i in range(self.n_samples): + # Rademacher distribution {-1.0, 1.0} + zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] + h_zs = torch.autograd.grad( + grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) + for h_z, z, p in zip(h_zs, zs, params): + p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step. + Arguments: + closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) + """ + + loss = None + if closure is not None: + loss = closure() + + self.zero_hessian() + self.set_hessian() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None or p.hess is None: + continue + + if self.avg_conv_kernel and p.dim() == 4: + p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() + + # Perform correct stepweight decay as in AdamW + p.mul_(1 - group['lr'] * group['weight_decay']) + + state = self.state[p] + + # State initialization + if len(state) == 1: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of Hessian diagonal square values + state['exp_hessian_diag_sq'] = torch.zeros_like(p) + + exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] + beta1, beta2 = group['betas'] + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) + exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + k = group['hessian_power'] + denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) + + # make update + step_size = group['lr'] / bias_correction1 + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adamp.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adamp.py new file mode 100644 index 0000000000000000000000000000000000000000..ee187633ab745dbb0344dcdc3dcb1cf40e6ae5e9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adamp.py @@ -0,0 +1,105 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adamw.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..66478bc6ef3c50ab9d40cabb0cfb2bd24277c815 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/adamw.py @@ -0,0 +1,122 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed +someday +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lamb.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..12c7c49b8a01ef793c97654ac938259ca6508449 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lamb.py @@ -0,0 +1,192 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): + defaults = dict( + lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, + trust_clip=trust_clip, always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where( + global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lars.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lars.py new file mode 100644 index 0000000000000000000000000000000000000000..38ca9e0b5cb90855104ce7b5ff358cb7fa343f12 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lars.py @@ -0,0 +1,135 @@ +""" PyTorch LARS / LARC Optimizer + +An implementation of LARS (SGD) + LARC in PyTorch + +Based on: + * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + +Additional cleanup and modifications to properly support PyTorch XLA. + +Copyright 2021 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lars(Optimizer): + """ LARS for PyTorch + + Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate (default: 1.0). + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) + eps (float): eps for division denominator (default: 1e-8) + trust_clip (bool): enable LARC trust ratio clipping (default: False) + always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) + """ + + def __init__( + self, + params, + lr=1.0, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + trust_coeff=0.001, + eps=1e-8, + trust_clip=False, + always_adapt=False, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coeff=trust_coeff, + eps=eps, + trust_clip=trust_clip, + always_adapt=always_adapt, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + trust_coeff = group['trust_coeff'] + eps = group['eps'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + # apply LARS LR adaptation, LARC clipping, weight decay + # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + if weight_decay != 0 or group['always_adapt']: + w_norm = p.norm(2.0) + g_norm = grad.norm(2.0) + trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, one_tensor), + one_tensor, + ) + if group['trust_clip']: + trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) + grad.add_(p, alpha=weight_decay) + grad.mul_(trust_ratio) + + # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + p.add_(grad, alpha=-group['lr']) + + return loss \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lookahead.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lookahead.py new file mode 100644 index 0000000000000000000000000000000000000000..462c3acd247016a94acd39a27dd44f29ae854d31 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/lookahead.py @@ -0,0 +1,61 @@ +""" Lookahead Optimizer Wrapper. +Implementation modified from: https://github.com/alphadl/lookahead.pytorch +Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer +from collections import defaultdict + + +class Lookahead(Optimizer): + def __init__(self, base_optimizer, alpha=0.5, k=6): + # NOTE super().__init__() not called on purpose + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) + self._base_optimizer = base_optimizer + self.param_groups = base_optimizer.param_groups + self.defaults = base_optimizer.defaults + self.defaults.update(defaults) + self.state = defaultdict(dict) + # manually add our defaults to the param groups + for name, default in defaults.items(): + for group in self._base_optimizer.param_groups: + group.setdefault(name, default) + + @torch.no_grad() + def update_slow(self, group): + for fast_p in group["params"]: + if fast_p.grad is None: + continue + param_state = self._base_optimizer.state[fast_p] + if 'lookahead_slow_buff' not in param_state: + param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) + param_state['lookahead_slow_buff'].copy_(fast_p) + slow = param_state['lookahead_slow_buff'] + slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) + fast_p.copy_(slow) + + def sync_lookahead(self): + for group in self._base_optimizer.param_groups: + self.update_slow(group) + + @torch.no_grad() + def step(self, closure=None): + loss = self._base_optimizer.step(closure) + for group in self._base_optimizer.param_groups: + group['lookahead_step'] += 1 + if group['lookahead_step'] % group['lookahead_k'] == 0: + self.update_slow(group) + return loss + + def state_dict(self): + return self._base_optimizer.state_dict() + + def load_state_dict(self, state_dict): + self._base_optimizer.load_state_dict(state_dict) + self.param_groups = self._base_optimizer.param_groups diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/madgrad.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/madgrad.py new file mode 100644 index 0000000000000000000000000000000000000000..a76713bf27ed1daf0ce598ac5f25c6238c7fdb57 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/madgrad.py @@ -0,0 +1,184 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/nadam.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..6268d5d451ed2fe26b47e46476dc1feee7da9649 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/nadam.py @@ -0,0 +1,92 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class Nadam(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, schedule_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) + super(Nadam, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/nvnovograd.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/nvnovograd.py new file mode 100644 index 0000000000000000000000000000000000000000..fda3f4a620fcca5593034dfb9683f2c8f3b78ac1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/nvnovograd.py @@ -0,0 +1,120 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/optim_factory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/optim_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..4acaec67bb094c870b5ecd34b41b14a172de8bdd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/optim_factory.py @@ -0,0 +1,340 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +from itertools import islice +from typing import Optional, Callable, Tuple + +import torch +import torch.nn as nn +import torch.optim as optim + +from custom_timm.models.helpers import group_parameters + +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + +_logger = logging.getLogger(__name__) + + +def param_groups_weight_decay( + model: nn.Module, + weight_decay=1e-5, + no_weight_decay_list=() +): + no_weight_decay_list = set(no_weight_decay_list) + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: + no_decay.append(param) + else: + decay.append(param) + + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def _group(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def _layer_map(model, layers_per_group=12, num_groups=None): + def _in_head(n, hp): + if not hp: + return True + elif isinstance(hp, (tuple, list)): + return any([n.startswith(hpi) for hpi in hp]) + else: + return n.startswith(hp) + + head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None) + names_trunk = [] + names_head = [] + for n, _ in model.named_parameters(): + names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n) + + # group non-head layers + num_trunk_layers = len(names_trunk) + if num_groups is not None: + layers_per_group = -(num_trunk_layers // -num_groups) + names_trunk = list(_group(names_trunk, layers_per_group)) + + num_trunk_groups = len(names_trunk) + layer_map = {n: i for i, l in enumerate(names_trunk) for n in l} + layer_map.update({n: num_trunk_groups for n in names_head}) + return layer_map + + +def param_groups_layer_decay( + model: nn.Module, + weight_decay: float = 0.05, + no_weight_decay_list: Tuple[str] = (), + layer_decay: float = .75, + end_layer_decay: Optional[float] = None, + verbose: bool = False, +): + """ + Parameter groups for layer-wise lr decay & weight decay + Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 + """ + no_weight_decay_list = set(no_weight_decay_list) + param_group_names = {} # NOTE for debugging + param_groups = {} + + if hasattr(model, 'group_matcher'): + # FIXME interface needs more work + layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True) + else: + # fallback + layer_map = _layer_map(model) + num_layers = max(layer_map.values()) + 1 + layer_max = num_layers - 1 + layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers)) + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + # no decay: all 1D parameters and model specific ones + if param.ndim == 1 or name in no_weight_decay_list: + g_decay = "no_decay" + this_decay = 0. + else: + g_decay = "decay" + this_decay = weight_decay + + layer_id = layer_map.get(name, layer_max) + group_name = "layer_%d_%s" % (layer_id, g_decay) + + if group_name not in param_groups: + this_scale = layer_scales[layer_id] + param_group_names[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "param_names": [], + } + param_groups[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + + param_group_names[group_name]["param_names"].append(name) + param_groups[group_name]["params"].append(param) + + if verbose: + import json + _logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) + + return list(param_groups.values()) + + +def optimizer_kwargs(cfg): + """ cfg/argparse to kwargs helper + Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. + """ + kwargs = dict( + opt=cfg.opt, + lr=cfg.lr, + weight_decay=cfg.weight_decay, + momentum=cfg.momentum) + if getattr(cfg, 'opt_eps', None) is not None: + kwargs['eps'] = cfg.opt_eps + if getattr(cfg, 'opt_betas', None) is not None: + kwargs['betas'] = cfg.opt_betas + if getattr(cfg, 'layer_decay', None) is not None: + kwargs['layer_decay'] = cfg.layer_decay + if getattr(cfg, 'opt_args', None) is not None: + kwargs.update(cfg.opt_args) + return kwargs + + +def create_optimizer(args, model, filter_bias_and_bn=True): + """ Legacy optimizer factory for backwards compatibility. + NOTE: Use create_optimizer_v2 for new code. + """ + return create_optimizer_v2( + model, + **optimizer_kwargs(cfg=args), + filter_bias_and_bn=filter_bias_and_bn, + ) + + +def create_optimizer_v2( + model_or_params, + opt: str = 'sgd', + lr: Optional[float] = None, + weight_decay: float = 0., + momentum: float = 0.9, + filter_bias_and_bn: bool = True, + layer_decay: Optional[float] = None, + param_group_fn: Optional[Callable] = None, + **kwargs): + """ Create an optimizer. + + TODO currently the model is passed in and all parameters are selected for optimization. + For more general use an interface that allows selection of parameters to optimize and lr groups, one of: + * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion + * expose the parameters interface and leave it up to caller + + Args: + model_or_params (nn.Module): model containing parameters to optimize + opt: name of optimizer to create + lr: initial learning rate + weight_decay: weight decay to apply in optimizer + momentum: momentum for momentum based optimizers (others may use betas via kwargs) + filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay + **kwargs: extra optimizer specific kwargs to pass through + + Returns: + Optimizer + """ + if isinstance(model_or_params, nn.Module): + # a model was passed in, extract parameters and add weight decays to appropriate layers + no_weight_decay = {} + if hasattr(model_or_params, 'no_weight_decay'): + no_weight_decay = model_or_params.no_weight_decay() + + if param_group_fn: + parameters = param_group_fn(model_or_params) + elif layer_decay is not None: + parameters = param_groups_layer_decay( + model_or_params, + weight_decay=weight_decay, + layer_decay=layer_decay, + no_weight_decay_list=no_weight_decay) + weight_decay = 0. + elif weight_decay and filter_bias_and_bn: + parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay) + weight_decay = 0. + else: + parameters = model_or_params.parameters() + else: + # iterable of parameters or param groups passed in + parameters = model_or_params + + opt_lower = opt.lower() + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(weight_decay=weight_decay, **kwargs) + if lr is not None: + opt_args.setdefault('lr', lr) + + # basic SGD & related + if opt_lower == 'sgd' or opt_lower == 'nesterov': + # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + + # adaptive + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'nadam': + try: + # NOTE PyTorch >= 1.10 should have native NAdam + optimizer = optim.Nadam(parameters, **opt_args) + except AttributeError: + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamax': + optimizer = optim.Adamax(parameters, **opt_args) + elif opt_lower == 'adabelief': + optimizer = AdaBelief(parameters, rectify=False, **opt_args) + elif opt_lower == 'radabelief': + optimizer = AdaBelief(parameters, rectify=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adagrad': + opt_args.setdefault('eps', 1e-8) + optimizer = optim.Adagrad(parameters, **opt_args) + elif opt_lower == 'adafactor': + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'lamb': + optimizer = Lamb(parameters, **opt_args) + elif opt_lower == 'lambc': + optimizer = Lamb(parameters, trust_clip=True, **opt_args) + elif opt_lower == 'larc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) + elif opt_lower == 'lars': + optimizer = Lars(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'nlarc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) + elif opt_lower == 'nlars': + optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'madgrad': + optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'madgradw': + optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) + elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + + # second order + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + + # NVIDIA fused optimizers, require APEX to be installed + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/radam.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..eb8d22e06c42e487c831297008851b4adc254d78 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/radam.py @@ -0,0 +1,89 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/rmsprop_tf.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/rmsprop_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..0817887db380261dfee3fcd4bd155b5d923f5248 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/rmsprop_tf.py @@ -0,0 +1,139 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/sgdp.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/sgdp.py new file mode 100644 index 0000000000000000000000000000000000000000..baf05fa55c632371498ec53ff679b11023429df6 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/optim/sgdp.py @@ -0,0 +1,70 @@ +""" +SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer, required +import math + +from .adamp import projection + + +class SGDP(Optimizer): + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): + defaults = dict( + lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, + nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) + super(SGDP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['momentum'] = torch.zeros_like(p) + + # SGD + buf = state['momentum'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if weight_decay != 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) + + # Step + p.add_(d_p, alpha=-group['lr']) + + return loss diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1961b88fc3c37cdd8c73f9fddd4bfa1ada95f23 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/__init__.py @@ -0,0 +1,8 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + +from .scheduler_factory import create_scheduler diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/cosine_lr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/cosine_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..84ee349ec281f89e331be3643b613e158bb3c194 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/cosine_lr.py @@ -0,0 +1,119 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/multistep_lr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/multistep_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..8b0ca920307fa4ee6e63340d76ca278b729091e3 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/multistep_lr.py @@ -0,0 +1,65 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from custom_timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t+1) + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/plateau_lr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/plateau_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..cacfab3ce7f073c9a99037ed85259fa3286f51ad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/plateau_lr.py @@ -0,0 +1,103 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__(self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__( + optimizer, + 'lr', + noise_range_t=noise_range_t, + noise_type=noise_type, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self._is_apply_noise(epoch): + self._apply_noise(epoch) + + def _apply_noise(self, epoch): + noise = self._calculate_noise(epoch) + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/poly_lr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/poly_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..9c351be6ed56f8fe130cd391df0a7a7f89c7a96c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/poly_lr.py @@ -0,0 +1,116 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/scheduler.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..af20be9b59d2fecfd813785ea6bc06093f57858d --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/scheduler.py @@ -0,0 +1,117 @@ +from typing import Dict, Any + +import torch + + +class Scheduler: + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch: int): + return None + + def get_update_values(self, num_updates: int): + return None + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + if 'lr_scale' in param_group: + param_group[self.param_group_field] = value * param_group['lr_scale'] + else: + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self._is_apply_noise(t): + noise = self._calculate_noise(t) + lrs = [v + v * noise for v in lrs] + return lrs + + def _is_apply_noise(self, t) -> bool: + """Return True if scheduler in noise range.""" + apply_noise = False + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + return apply_noise + + def _calculate_noise(self, t) -> float: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + return noise + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + return noise diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/scheduler_factory.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/scheduler_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..3e100fe029c3bc2405d3cae0695376603dd78618 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/scheduler_factory.py @@ -0,0 +1,107 @@ +""" Scheduler Factory +Hacked together by / Copyright 2021 Ross Wightman +""" +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + + +def create_scheduler(args, optimizer): + num_epochs = args.epochs + + if getattr(args, 'lr_noise', None) is not None: + lr_noise = getattr(args, 'lr_noise') + if isinstance(lr_noise, (list, tuple)): + noise_range = [n * num_epochs for n in lr_noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = lr_noise * num_epochs + else: + noise_range = None + noise_args = dict( + noise_range_t=noise_range, + noise_pct=getattr(args, 'lr_noise_pct', 0.67), + noise_std=getattr(args, 'lr_noise_std', 1.), + noise_seed=getattr(args, 'seed', 42), + ) + cycle_args = dict( + cycle_mul=getattr(args, 'lr_cycle_mul', 1.), + cycle_decay=getattr(args, 'lr_cycle_decay', 0.1), + cycle_limit=getattr(args, 'lr_cycle_limit', 1), + ) + + lr_scheduler = None + if args.sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'tanh': + lr_scheduler = TanhLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + t_in_epochs=True, + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + decay_t=args.decay_milestones, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'plateau': + mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max' + lr_scheduler = PlateauLRScheduler( + optimizer, + decay_rate=args.decay_rate, + patience_t=args.patience_epochs, + lr_min=args.min_lr, + mode=mode, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cooldown_t=0, + **noise_args, + ) + elif args.sched == 'poly': + lr_scheduler = PolyLRScheduler( + optimizer, + power=args.decay_rate, # overloading 'decay_rate' as polynomial power + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + + return lr_scheduler, num_epochs diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/step_lr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/step_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..f797e1a8cf35999531dd5f1ccbbe09a9d0cf30a9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/tanh_lr.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/tanh_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..f2d3c9cdb11ad31766062f1a8d3e69d3f845edc1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/scheduler/tanh_lr.py @@ -0,0 +1,117 @@ +""" TanH Scheduler + +TanH schedule with warmup, cycle/restarts, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class TanhLRScheduler(Scheduler): + """ + Hyberbolic-Tangent decay with restarts. + This is described in the paper https://arxiv.org/abs/1806.01593 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lb: float = -7., + ub: float = 3., + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + assert lb < ub + assert cycle_limit >= 0 + assert warmup_t >= 0 + assert warmup_lr_init >= 0 + self.lb = lb + self.ub = ub + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + if i < self.cycle_limit: + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + + tr = t_curr / t_i + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/__init__.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7b139852d79644f97de7cf373a1a4c3dbd17f050 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/__init__.py @@ -0,0 +1,14 @@ +from .agc import adaptive_clip_grad +from .checkpoint_saver import CheckpointSaver +from .clip_grad import dispatch_clip_grad +from .cuda import ApexScaler, NativeScaler +from .decay_batch import decay_batch_step, check_batch_size_retry +from .distributed import distribute_bn, reduce_tensor +from .jit import set_jit_legacy, set_jit_fuser +from .log import setup_default_logging, FormatterNoInfo +from .metrics import AverageMeter, accuracy +from .misc import natural_key, add_bool_arg +from .model import unwrap_model, get_state_dict, freeze, unfreeze +from .model_ema import ModelEma, ModelEmaV2 +from .random import random_seed +from .summary import update_summary, get_outdir diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/agc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/agc.py new file mode 100644 index 0000000000000000000000000000000000000000..f51401726ff6810d97d0fa567f4e31b474325a59 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/agc.py @@ -0,0 +1,42 @@ +""" Adaptive Gradient Clipping + +An impl of AGC, as per (https://arxiv.org/abs/2102.06171): + +@article{brock2021high, + author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, + title={High-Performance Large-Scale Image Recognition Without Normalization}, + journal={arXiv preprint arXiv:}, + year={2021} +} + +Code references: + * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets + * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch + + +def unitwise_norm(x, norm_type=2.0): + if x.ndim <= 1: + return x.norm(norm_type) + else: + # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor + # might need special cases for other weights (possibly MHA) where this may not be true + return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) + + +def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + for p in parameters: + if p.grad is None: + continue + p_data = p.detach() + g_data = p.grad.detach() + max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) + grad_norm = unitwise_norm(g_data, norm_type=norm_type) + clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) + new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) + p.grad.detach().copy_(new_grads) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/checkpoint_saver.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/checkpoint_saver.py new file mode 100644 index 0000000000000000000000000000000000000000..6aad74ee52655f68220f799efaffcbccdd0748ad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/checkpoint_saver.py @@ -0,0 +1,150 @@ +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from .model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + assert self.max_history >= 1 + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/clip_grad.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/clip_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..73671d3a5d2ad856630ce2b2d7b0d6e6e627c59a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/clip_grad.py @@ -0,0 +1,23 @@ +import torch + +from custom_timm.utils.agc import adaptive_clip_grad + + +def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): + """ Dispatch to gradient clipping method + + Args: + parameters (Iterable): model parameters to clip + value (float): clipping value/factor/norm, mode dependant + mode (str): clipping mode, one of 'norm', 'value', 'agc' + norm_type (float): p-norm, default 2.0 + """ + if mode == 'norm': + torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) + elif mode == 'value': + torch.nn.utils.clip_grad_value_(parameters, value) + elif mode == 'agc': + adaptive_clip_grad(parameters, value, norm_type=norm_type) + else: + assert False, f"Unknown clip mode ({mode})." + diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/cuda.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..9e7bddf30463a7be7186c7def47c4e4dfb9993aa --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/cuda.py @@ -0,0 +1,55 @@ +""" CUDA / AMP utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +try: + from apex import amp + has_apex = True +except ImportError: + amp = None + has_apex = False + +from .clip_grad import dispatch_clip_grad + + +class ApexScaler: + state_dict_key = "amp" + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward(create_graph=create_graph) + if clip_grad is not None: + dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) + optimizer.step() + + def state_dict(self): + if 'state_dict' in amp.__dict__: + return amp.state_dict() + + def load_state_dict(self, state_dict): + if 'load_state_dict' in amp.__dict__: + amp.load_state_dict(state_dict) + + +class NativeScaler: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + self._scaler.scale(loss).backward(create_graph=create_graph) + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) + self._scaler.step(optimizer) + self._scaler.update() + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/decay_batch.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/decay_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..852fa4b8dc3d46932b67ed3e42170a5de92415d9 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/decay_batch.py @@ -0,0 +1,43 @@ +""" Batch size decay and retry helpers. + +Copyright 2022 Ross Wightman +""" +import math + + +def decay_batch_step(batch_size, num_intra_steps=2, no_odd=False): + """ power of two batch-size decay with intra steps + + Decay by stepping between powers of 2: + * determine power-of-2 floor of current batch size (base batch size) + * divide above value by num_intra_steps to determine step size + * floor batch_size to nearest multiple of step_size (from base batch size) + Examples: + num_steps == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1 + num_steps (no_odd=True) == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 6, 4, 2 + num_steps == 2 --> 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1 + num_steps == 1 --> 64, 32, 16, 8, 4, 2, 1 + """ + if batch_size <= 1: + # return 0 for stopping value so easy to use in loop + return 0 + base_batch_size = int(2 ** (math.log(batch_size - 1) // math.log(2))) + step_size = max(base_batch_size // num_intra_steps, 1) + batch_size = base_batch_size + ((batch_size - base_batch_size - 1) // step_size) * step_size + if no_odd and batch_size % 2: + batch_size -= 1 + return batch_size + + +def check_batch_size_retry(error_str): + """ check failure error string for conditions where batch decay retry should not be attempted + """ + error_str = error_str.lower() + if 'required rank' in error_str: + # Errors involving phrase 'required rank' typically happen when a conv is used that's + # not compatible with channels_last memory format. + return False + if 'illegal' in error_str: + # 'Illegal memory access' errors in CUDA typically leave process in unusable state + return False + return True diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/distributed.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..3c5dba8c1de5a6ff53638207521377fdfbc4f239 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/distributed.py @@ -0,0 +1,28 @@ +""" Distributed training/validation utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import distributed as dist + +from .model import unwrap_model + + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + + +def distribute_bn(model, world_size, reduce=False): + # ensure every node has the same running bn stats + for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): + if ('running_mean' in bn_name) or ('running_var' in bn_name): + if reduce: + # average bn stats across whole group + torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) + bn_buf /= float(world_size) + else: + # broadcast bn stats from rank 0 to whole group + torch.distributed.broadcast(bn_buf, 0) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/jit.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/jit.py new file mode 100644 index 0000000000000000000000000000000000000000..d527411fd3e1985639bb0b161bd484142a3619dd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/jit.py @@ -0,0 +1,58 @@ +""" JIT scripting/tracing utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os + +import torch + + +def set_jit_legacy(): + """ Set JIT executor to legacy w/ support for op fusion + This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes + in the JIT exectutor. These API are not supported so could change. + """ + # + assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + #torch._C._jit_set_texpr_fuser_enabled(True) + + +def set_jit_fuser(fuser): + if fuser == "te": + # default fuser should be == 'te' + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(True) + try: + torch._C._jit_set_nvfuser_enabled(False) + except Exception: + pass + elif fuser == "old" or fuser == "legacy": + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + torch._C._jit_set_texpr_fuser_enabled(False) + try: + torch._C._jit_set_nvfuser_enabled(False) + except Exception: + pass + elif fuser == "nvfuser" or fuser == "nvf": + os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1' + #os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1' + #os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' + torch._C._jit_set_texpr_fuser_enabled(False) + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(True) + torch._C._jit_can_fuse_on_cpu() + torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_override_can_fuse_on_gpu(False) + torch._C._jit_set_nvfuser_guard_mode(True) + torch._C._jit_set_nvfuser_enabled(True) + else: + assert False, f"Invalid jit fuser ({fuser})" diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/log.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..c99469e0884f3e45905ef7c7f0d1e491092697ad --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/log.py @@ -0,0 +1,28 @@ +""" Logging helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import logging.handlers + + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO, log_path=''): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) + if log_path: + file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) + file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") + file_handler.setFormatter(file_formatter) + logging.root.addHandler(file_handler) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/metrics.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9fdbe13ef15c541679906239374ff8a7eedf5181 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/metrics.py @@ -0,0 +1,32 @@ +""" Eval metrics and related + +Hacked together by / Copyright 2020 Ross Wightman +""" + + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = min(max(topk), output.size()[1]) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/misc.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..39c0097c60ed602547f832f1f8dafbe37f156064 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/misc.py @@ -0,0 +1,18 @@ +""" Misc utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import re + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def add_bool_arg(parser, name, default=False, help=''): + dest_name = name.replace('-', '_') + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) + group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) + parser.set_defaults(**{dest_name: default}) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/model.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b95c45392bfb551f52bc8b8dca1aaf8c8b1940b1 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/model.py @@ -0,0 +1,273 @@ +""" Model / state_dict utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import fnmatch + +import torch +from torchvision.ops.misc import FrozenBatchNorm2d + +from .model_ema import ModelEma + + +def unwrap_model(model): + if isinstance(model, ModelEma): + return unwrap_model(model.ema) + else: + return model.module if hasattr(model, 'module') else model + + +def get_state_dict(model, unwrap_fn=unwrap_model): + return unwrap_fn(model).state_dict() + + +def avg_sq_ch_mean(model, input, output): + """ calculate average channel square mean of output activations + """ + return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() + + +def avg_ch_var(model, input, output): + """ calculate average channel variance of output activations + """ + return torch.mean(output.var(axis=[0, 2, 3])).item() + + +def avg_ch_var_residual(model, input, output): + """ calculate average channel variance of output activations + """ + return torch.mean(output.var(axis=[0, 2, 3])).item() + + +class ActivationStatsHook: + """Iterates through each of `model`'s modules and matches modules using unix pattern + matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is + a match. + + Arguments: + model (nn.Module): model from which we will extract the activation stats + hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string + matching with the name of model's modules. + hook_fns (List[Callable]): List of hook functions to be registered at every + module in `layer_names`. + + Inspiration from https://docs.fast.ai/callback.hook.html. + + Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example + on how to plot Signal Propogation Plots using `ActivationStatsHook`. + """ + + def __init__(self, model, hook_fn_locs, hook_fns): + self.model = model + self.hook_fn_locs = hook_fn_locs + self.hook_fns = hook_fns + if len(hook_fn_locs) != len(hook_fns): + raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ + their lengths are different.") + self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) + for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): + self.register_hook(hook_fn_loc, hook_fn) + + def _create_hook(self, hook_fn): + def append_activation_stats(module, input, output): + out = hook_fn(module, input, output) + self.stats[hook_fn.__name__].append(out) + + return append_activation_stats + + def register_hook(self, hook_fn_loc, hook_fn): + for name, module in self.model.named_modules(): + if not fnmatch.fnmatch(name, hook_fn_loc): + continue + module.register_forward_hook(self._create_hook(hook_fn)) + + +def extract_spp_stats( + model, + hook_fn_locs, + hook_fns, + input_shape=[8, 3, 224, 224]): + """Extract average square channel mean and variance of activations during + forward pass to plot Signal Propogation Plots (SPP). + + Paper: https://arxiv.org/abs/2101.08692 + + Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 + """ + x = torch.normal(0., 1., input_shape) + hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) + _ = model(x) + return hook.stats + + +def freeze_batch_norm_2d(module): + """ + Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is + itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and + returned. Otherwise, the module is walked recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + if isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + res = FrozenBatchNorm2d(module.num_features) + res.num_features = module.num_features + res.affine = module.affine + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = freeze_batch_norm_2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def unfreeze_batch_norm_2d(module): + """ + Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance + of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked + recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + if isinstance(module, FrozenBatchNorm2d): + res = torch.nn.BatchNorm2d(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = unfreeze_batch_norm_2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): + """ + Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is + done in place. + Args: + root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. + submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as + named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list + means that the whole root module will be (un)frozen. Defaults to [] + include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. + Defaults to `True`. + mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. + """ + assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' + + if isinstance(root_module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + # Raise assertion here because we can't convert it in place + raise AssertionError( + "You have provided a batch norm layer as the `root module`. Please use " + "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") + + if isinstance(submodules, str): + submodules = [submodules] + + named_modules = submodules + submodules = [root_module.get_submodule(m) for m in submodules] + + if not len(submodules): + named_modules, submodules = list(zip(*root_module.named_children())) + + for n, m in zip(named_modules, submodules): + # (Un)freeze parameters + for p in m.parameters(): + p.requires_grad = False if mode == 'freeze' else True + if include_bn_running_stats: + # Helper to add submodule specified as a named_module + def _add_submodule(module, name, submodule): + split = name.rsplit('.', 1) + if len(split) > 1: + module.get_submodule(split[0]).add_module(split[1], submodule) + else: + module.add_module(name, submodule) + + # Freeze batch norm + if mode == 'freeze': + res = freeze_batch_norm_2d(m) + # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't + # convert it in place, but will return the converted result. In this case `res` holds the converted + # result and we may try to re-assign the named module + if isinstance(m, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + _add_submodule(root_module, n, res) + # Unfreeze batch norm + else: + res = unfreeze_batch_norm_2d(m) + # Ditto. See note above in mode == 'freeze' branch + if isinstance(m, FrozenBatchNorm2d): + _add_submodule(root_module, n, res) + + +def freeze(root_module, submodules=[], include_bn_running_stats=True): + """ + Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. + Args: + root_module (nn.Module): Root module relative to which `submodules` are referenced. + submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as + named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list + means that the whole root module will be frozen. Defaults to `[]`. + include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and + `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, + it's good practice to freeze batch norm stats. And note that these are different to the affine parameters + which are just normal PyTorch parameters. Defaults to `True`. + + Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. + + Examples:: + + >>> model = timm.create_model('resnet18') + >>> # Freeze up to and including layer2 + >>> submodules = [n for n, _ in model.named_children()] + >>> print(submodules) + ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] + >>> freeze(model, submodules[:submodules.index('layer2') + 1]) + >>> # Check for yourself that it works as expected + >>> print(model.layer2[0].conv1.weight.requires_grad) + False + >>> print(model.layer3[0].conv1.weight.requires_grad) + True + >>> # Unfreeze + >>> unfreeze(model) + """ + _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") + + +def unfreeze(root_module, submodules=[], include_bn_running_stats=True): + """ + Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. + Args: + root_module (nn.Module): Root module relative to which `submodules` are referenced. + submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided + as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty + list means that the whole root module will be unfrozen. Defaults to `[]`. + include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. + These will be converted to `BatchNorm2d` in place. Defaults to `True`. + + See example in docstring for `freeze`. + """ + _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze") diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/model_ema.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/model_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..073d5c5ea1a4afc5aa3817b6354b2566f8cc2cf5 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/model_ema.py @@ -0,0 +1,126 @@ +""" Exponential Moving Average (EMA) of model updates + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class ModelEma: + """ Model Exponential Moving Average (DEPRECATED) + + Keep a moving average of everything in the model state_dict (parameters and buffers). + This version is deprecated, it does not work with scripted models. Will be removed eventually. + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) + + +class ModelEmaV2(nn.Module): + """ Model Exponential Moving Average V2 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V2 of this module is simpler, it does not match params/buffers based on name but simply + iterates in order. It works with torchscript (JIT of full model). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device=None): + super(ModelEmaV2, self).__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/random.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/random.py new file mode 100644 index 0000000000000000000000000000000000000000..a9679983e96a9a6634c0b77aaf7b996e70eff50b --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/random.py @@ -0,0 +1,9 @@ +import random +import numpy as np +import torch + + +def random_seed(seed=42, rank=0): + torch.manual_seed(seed + rank) + np.random.seed(seed + rank) + random.seed(seed + rank) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/summary.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/summary.py new file mode 100644 index 0000000000000000000000000000000000000000..9f5af9a08598556c3fed136f258f88bd578c1e1c --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/utils/summary.py @@ -0,0 +1,39 @@ +""" Summary utilities + +Hacked together by / Copyright 2020 Ross Wightman +""" +import csv +import os +from collections import OrderedDict +try: + import wandb +except ImportError: + pass + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + wandb.log(rowd) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/custom_nodes/comfyui_controlnet_aux/src/custom_timm/version.py b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/version.py new file mode 100644 index 0000000000000000000000000000000000000000..70039a4cbb99f75059bfe30c5e56c2295a73a5cd --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/src/custom_timm/version.py @@ -0,0 +1 @@ +__version__ = '0.6.13' diff --git a/custom_nodes/comfyui_controlnet_aux/tests/pose.png b/custom_nodes/comfyui_controlnet_aux/tests/pose.png new file mode 100644 index 0000000000000000000000000000000000000000..e566939b171a8babae471125c753446b2368bf9c Binary files /dev/null and b/custom_nodes/comfyui_controlnet_aux/tests/pose.png differ diff --git a/custom_nodes/comfyui_controlnet_aux/tests/test_cn_aux_full.json b/custom_nodes/comfyui_controlnet_aux/tests/test_cn_aux_full.json new file mode 100644 index 0000000000000000000000000000000000000000..b6445f36944ab43f99aa7640cfedc3968eb2f6d7 --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/tests/test_cn_aux_full.json @@ -0,0 +1,1737 @@ +{ + "last_node_id": 45, + "last_link_id": 44, + "nodes": [ + { + "id": 24, + "type": "PreviewImage", + "pos": [ + 843, + -430 + ], + "size": [ + 210, + 246 + ], + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 23 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 25, + "type": "PreviewImage", + "pos": [ + 1127, + -346 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 24 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 26, + "type": "PreviewImage", + "pos": [ + 832, + -222 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 25 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 27, + "type": "PreviewImage", + "pos": [ + 1144, + -123 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 26 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 28, + "type": "PreviewImage", + "pos": [ + 825, + 56 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 27 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 29, + "type": "PreviewImage", + "pos": [ + 1240, + 246 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 28 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 30, + "type": "PreviewImage", + "pos": [ + 855, + 381 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 29 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 31, + "type": "PreviewImage", + "pos": [ + 1248, + 471 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 29, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 30 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 32, + "type": "PreviewImage", + "pos": [ + 823, + 632 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 30, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 31 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 33, + "type": "PreviewImage", + "pos": [ + 1240, + 737 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 31, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 32 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 34, + "type": "PreviewImage", + "pos": [ + 844, + 833 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 32, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 33 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 35, + "type": "PreviewImage", + "pos": [ + 1216, + 1023 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 33, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 34 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 36, + "type": "PreviewImage", + "pos": [ + 838, + 1175 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 34, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 35 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 37, + "type": "PreviewImage", + "pos": [ + 1282, + 1355 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 35, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 36 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 38, + "type": "PreviewImage", + "pos": [ + 897, + 1532 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 36, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 37 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 39, + "type": "PreviewImage", + "pos": [ + 1336, + 1704 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 37, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 38 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 40, + "type": "PreviewImage", + "pos": [ + 859, + 1840 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 38, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 39 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 41, + "type": "PreviewImage", + "pos": [ + 1329, + 1939 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 39, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 40 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 42, + "type": "PreviewImage", + "pos": [ + 888, + 2056 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 40, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 42 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 43, + "type": "PreviewImage", + "pos": [ + 1278, + 2191 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 41, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 41 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 2, + "type": "PiDiNetPreprocessor", + "pos": [ + 420, + -446 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 1 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PiDiNetPreprocessor" + }, + "widgets_values": [ + "enable" + ] + }, + { + "id": 3, + "type": "ColorPreprocessor", + "pos": [ + 426, + -332 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 2 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 24 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ColorPreprocessor" + } + }, + { + "id": 4, + "type": "CannyEdgePreprocessor", + "pos": [ + 433, + -245 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 3 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 25 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CannyEdgePreprocessor" + }, + "widgets_values": [ + 100, + 200 + ] + }, + { + "id": 5, + "type": "SAMPreprocessor", + "pos": [ + 427, + -108 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 4 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 26 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "SAMPreprocessor" + } + }, + { + "id": 7, + "type": "DWPreprocessor", + "pos": [ + 440, + 95 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 6 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 27 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "DWPreprocessor" + }, + "widgets_values": [ + "enable", + "enable", + "enable" + ] + }, + { + "id": 8, + "type": "BinaryPreprocessor", + "pos": [ + 432, + 266 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 28 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BinaryPreprocessor" + }, + "widgets_values": [ + 100 + ] + }, + { + "id": 9, + "type": "ScribblePreprocessor", + "pos": [ + 462, + 376 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 29 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ScribblePreprocessor" + } + }, + { + "id": 10, + "type": "M-LSDPreprocessor", + "pos": [ + 453, + 497 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 9 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 30 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "M-LSDPreprocessor" + }, + "widgets_values": [ + 0.1, + 0.1 + ] + }, + { + "id": 11, + "type": "UniFormer-SemSegPreprocessor", + "pos": [ + 479, + 651 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 10 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 31 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "UniFormer-SemSegPreprocessor" + } + }, + { + "id": 12, + "type": "Zoe-DepthMapPreprocessor", + "pos": [ + 483, + 740 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 11 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 32 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "Zoe-DepthMapPreprocessor" + } + }, + { + "id": 13, + "type": "MiDaS-NormalMapPreprocessor", + "pos": [ + 463, + 821 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 12 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 33 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MiDaS-NormalMapPreprocessor" + }, + "widgets_values": [ + 6.283185307179586, + 0.1 + ] + }, + { + "id": 14, + "type": "MiDaS-DepthMapPreprocessor", + "pos": [ + 451, + 1009 + ], + "size": { + "0": 315, + "1": 82 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 13 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 34 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MiDaS-DepthMapPreprocessor" + }, + "widgets_values": [ + 6.283185307179586, + 0.1 + ] + }, + { + "id": 15, + "type": "OpenposePreprocessor", + "pos": [ + 466, + 1177 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 14 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 35 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "OpenposePreprocessor" + }, + "widgets_values": [ + "enable", + "enable", + "enable" + ] + }, + { + "id": 17, + "type": "LeReS-DepthMapPreprocessor", + "pos": [ + 484, + 1533 + ], + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 16 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 37 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LeReS-DepthMapPreprocessor" + }, + "widgets_values": [ + 0, + 0, + "enable" + ] + }, + { + "id": 18, + "type": "BAE-NormalMapPreprocessor", + "pos": [ + 510, + 1729 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 17 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 38 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BAE-NormalMapPreprocessor" + } + }, + { + "id": 19, + "type": "OneFormer-COCO-SemSegPreprocessor", + "pos": [ + 488, + 1843 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 18 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 39 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "OneFormer-COCO-SemSegPreprocessor" + } + }, + { + "id": 20, + "type": "OneFormer-ADE20K-SemSegPreprocessor", + "pos": [ + 470, + 1941 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 19 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 40 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "OneFormer-ADE20K-SemSegPreprocessor" + } + }, + { + "id": 22, + "type": "FakeScribblePreprocessor", + "pos": [ + 426, + 2193 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 21 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 41 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "FakeScribblePreprocessor" + }, + "widgets_values": [ + "enable" + ] + }, + { + "id": 21, + "type": "HEDPreprocessor", + "pos": [ + 460, + 2053 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 20 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 42 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "HEDPreprocessor" + }, + "widgets_values": [ + "enable" + ] + }, + { + "id": 16, + "type": "LineArtPreprocessor", + "pos": [ + 450, + 1363 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 15 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 36 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LineArtPreprocessor" + }, + "widgets_values": [ + "enable" + ] + }, + { + "id": 45, + "type": "PreviewImage", + "pos": [ + 886, + 2316 + ], + "size": { + "0": 210, + "1": 26 + }, + "flags": {}, + "order": 42, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 43 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 44, + "type": "TilePreprocessor", + "pos": [ + 419, + 2320 + ], + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 44 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 43 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "TilePreprocessor" + }, + "widgets_values": [ + 3 + ] + }, + { + "id": 1, + "type": "LoadImage", + "pos": [ + 19, + 298 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1, + 2, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 44 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "pose.png", + "image" + ] + } + ], + "links": [ + [ + 1, + 1, + 0, + 2, + 0, + "IMAGE" + ], + [ + 2, + 1, + 0, + 3, + 0, + "IMAGE" + ], + [ + 3, + 1, + 0, + 4, + 0, + "IMAGE" + ], + [ + 4, + 1, + 0, + 5, + 0, + "IMAGE" + ], + [ + 6, + 1, + 0, + 7, + 0, + "IMAGE" + ], + [ + 7, + 1, + 0, + 8, + 0, + "IMAGE" + ], + [ + 8, + 1, + 0, + 9, + 0, + "IMAGE" + ], + [ + 9, + 1, + 0, + 10, + 0, + "IMAGE" + ], + [ + 10, + 1, + 0, + 11, + 0, + "IMAGE" + ], + [ + 11, + 1, + 0, + 12, + 0, + "IMAGE" + ], + [ + 12, + 1, + 0, + 13, + 0, + "IMAGE" + ], + [ + 13, + 1, + 0, + 14, + 0, + "IMAGE" + ], + [ + 14, + 1, + 0, + 15, + 0, + "IMAGE" + ], + [ + 15, + 1, + 0, + 16, + 0, + "IMAGE" + ], + [ + 16, + 1, + 0, + 17, + 0, + "IMAGE" + ], + [ + 17, + 1, + 0, + 18, + 0, + "IMAGE" + ], + [ + 18, + 1, + 0, + 19, + 0, + "IMAGE" + ], + [ + 19, + 1, + 0, + 20, + 0, + "IMAGE" + ], + [ + 20, + 1, + 0, + 21, + 0, + "IMAGE" + ], + [ + 21, + 1, + 0, + 22, + 0, + "IMAGE" + ], + [ + 23, + 2, + 0, + 24, + 0, + "IMAGE" + ], + [ + 24, + 3, + 0, + 25, + 0, + "IMAGE" + ], + [ + 25, + 4, + 0, + 26, + 0, + "IMAGE" + ], + [ + 26, + 5, + 0, + 27, + 0, + "IMAGE" + ], + [ + 27, + 7, + 0, + 28, + 0, + "IMAGE" + ], + [ + 28, + 8, + 0, + 29, + 0, + "IMAGE" + ], + [ + 29, + 9, + 0, + 30, + 0, + "IMAGE" + ], + [ + 30, + 10, + 0, + 31, + 0, + "IMAGE" + ], + [ + 31, + 11, + 0, + 32, + 0, + "IMAGE" + ], + [ + 32, + 12, + 0, + 33, + 0, + "IMAGE" + ], + [ + 33, + 13, + 0, + 34, + 0, + "IMAGE" + ], + [ + 34, + 14, + 0, + 35, + 0, + "IMAGE" + ], + [ + 35, + 15, + 0, + 36, + 0, + "IMAGE" + ], + [ + 36, + 16, + 0, + 37, + 0, + "IMAGE" + ], + [ + 37, + 17, + 0, + 38, + 0, + "IMAGE" + ], + [ + 38, + 18, + 0, + 39, + 0, + "IMAGE" + ], + [ + 39, + 19, + 0, + 40, + 0, + "IMAGE" + ], + [ + 40, + 20, + 0, + 41, + 0, + "IMAGE" + ], + [ + 41, + 22, + 0, + 43, + 0, + "IMAGE" + ], + [ + 42, + 21, + 0, + 42, + 0, + "IMAGE" + ], + [ + 43, + 44, + 0, + 45, + 0, + "IMAGE" + ], + [ + 44, + 1, + 0, + 44, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/tests/test_controlnet_aux.py b/custom_nodes/comfyui_controlnet_aux/tests/test_controlnet_aux.py new file mode 100644 index 0000000000000000000000000000000000000000..a57b32f3cda931789a6458f3d56c03c05aabdf5a --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/tests/test_controlnet_aux.py @@ -0,0 +1,126 @@ +import os +import shutil +from io import BytesIO + +import numpy as np +import pytest +import requests +from PIL import Image + +from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector, + LeresDetector, LineartAnimeDetector, + LineartDetector, MediapipeFaceDetector, + MidasDetector, MLSDdetector, NormalBaeDetector, + OpenposeDetector, PidiNetDetector, SamDetector, + ZoeDetector, TileDetector) + +OUTPUT_DIR = "tests/outputs" + +def output(name, img): + img.save(os.path.join(OUTPUT_DIR, "{:s}.png".format(name))) + +def common(name, processor, img): + output(name, processor(img)) + output(name + "_pil_np", Image.fromarray(processor(img, output_type="np"))) + output(name + "_np_np", Image.fromarray(processor(np.array(img, dtype=np.uint8), output_type="np"))) + output(name + "_np_pil", processor(np.array(img, dtype=np.uint8), output_type="pil")) + output(name + "_scaled", processor(img, detect_resolution=640, image_resolution=768)) + +def return_pil(name, processor, img): + output(name + "_pil_false", Image.fromarray(processor(img, return_pil=False))) + output(name + "_pil_true", processor(img, return_pil=True)) + +@pytest.fixture(scope="module") +def img(): + if os.path.exists(OUTPUT_DIR): + shutil.rmtree(OUTPUT_DIR) + os.mkdir(OUTPUT_DIR) + url = "https://huggingface.co/lllyasviel/sd-controlnet-openpose/resolve/main/images/pose.png" + response = requests.get(url) + img = Image.open(BytesIO(response.content)).convert("RGB").resize((512, 512)) + return img + +def test_canny(img): + canny = CannyDetector() + common("canny", canny, img) + output("canny_img", canny(img=img)) + +def test_hed(img): + hed = HEDdetector.from_pretrained("lllyasviel/Annotators") + common("hed", hed, img) + return_pil("hed", hed, img) + output("hed_safe", hed(img, safe=True)) + output("hed_scribble", hed(img, scribble=True)) + +def test_leres(img): + leres = LeresDetector.from_pretrained("lllyasviel/Annotators") + common("leres", leres, img) + output("leres_boost", leres(img, boost=True)) + +def test_lineart(img): + lineart = LineartDetector.from_pretrained("lllyasviel/Annotators") + common("lineart", lineart, img) + return_pil("lineart", lineart, img) + output("lineart_coarse", lineart(img, coarse=True)) + +def test_lineart_anime(img): + lineart_anime = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators") + common("lineart_anime", lineart_anime, img) + return_pil("lineart_anime", lineart_anime, img) + +def test_mediapipe_face(img): + mediapipe = MediapipeFaceDetector() + common("mediapipe", mediapipe, img) + output("mediapipe_image", mediapipe(image=img)) + +def test_midas(img): + midas = MidasDetector.from_pretrained("lllyasviel/Annotators") + common("midas", midas, img) + output("midas_normal", midas(img, depth_and_normal=True)[1]) + +def test_mlsd(img): + mlsd = MLSDdetector.from_pretrained("lllyasviel/Annotators") + common("mlsd", mlsd, img) + return_pil("mlsd", mlsd, img) + +def test_normalbae(img): + normal_bae = NormalBaeDetector.from_pretrained("lllyasviel/Annotators") + common("normal_bae", normal_bae, img) + return_pil("normal_bae", normal_bae, img) + +def test_openpose(img): + openpose = OpenposeDetector.from_pretrained("lllyasviel/Annotators") + common("openpose", openpose, img) + return_pil("openpose", openpose, img) + output("openpose_hand_and_face_false", openpose(img, hand_and_face=False)) + output("openpose_hand_and_face_true", openpose(img, hand_and_face=True)) + output("openpose_face", openpose(img, include_body=True, include_hand=False, include_face=True)) + output("openpose_faceonly", openpose(img, include_body=False, include_hand=False, include_face=True)) + output("openpose_full", openpose(img, include_body=True, include_hand=True, include_face=True)) + output("openpose_hand", openpose(img, include_body=True, include_hand=True, include_face=False)) + +def test_pidi(img): + pidi = PidiNetDetector.from_pretrained("lllyasviel/Annotators") + common("pidi", pidi, img) + return_pil("pidi", pidi, img) + output("pidi_safe", pidi(img, safe=True)) + output("pidi_scribble", pidi(img, scribble=True)) + +def test_sam(img): + sam = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") + common("sam", sam, img) + output("sam_image", sam(image=img)) + +def test_shuffle(img): + shuffle = ContentShuffleDetector() + common("shuffle", shuffle, img) + return_pil("shuffle", shuffle, img) + +def test_zoe(img): + zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators") + common("zoe", zoe, img) + +def test_tile(img): + tile = TileDetector() + common("tile", tile, img) + output("tile_img", tile(img)) \ No newline at end of file diff --git a/custom_nodes/comfyui_controlnet_aux/utils.py b/custom_nodes/comfyui_controlnet_aux/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4aebf89a9e2e18fe20aa860ca324678a4735c80e --- /dev/null +++ b/custom_nodes/comfyui_controlnet_aux/utils.py @@ -0,0 +1,172 @@ +import torch +import numpy as np +import os +import cv2 +import yaml +from pathlib import Path +from enum import Enum +from .log import log + +here = Path(__file__).parent.resolve() + +config_path = Path(here, "config.yaml") + +if os.path.exists(config_path): + config = yaml.load(open(config_path, "r"), Loader=yaml.FullLoader) + + annotator_ckpts_path = str(Path(here, config["annotator_ckpts_path"])) + USE_SYMLINKS = config["USE_SYMLINKS"] + ORT_PROVIDERS = config["EP_list"] + + if USE_SYMLINKS is None or type(USE_SYMLINKS) != bool: + log.error("USE_SYMLINKS must be a boolean. Using False by default.") + USE_SYMLINKS = False + + if not os.path.isdir(annotator_ckpts_path): + try: + os.makedirs(annotator_ckpts_path) + except: + log.error("Failed to create config ckpts directory. Using default.") + annotator_ckpts_path = str(Path(here, "./ckpts")) +else: + annotator_ckpts_path = str(Path(here, "./ckpts")) + USE_SYMLINKS = False + ORT_PROVIDERS = ["CUDAExecutionProvider", "DirectMLExecutionProvider", "OpenVINOExecutionProvider", "ROCMExecutionProvider", "CPUExecutionProvider"] + +os.environ['AUX_USE_SYMLINKS'] = str(USE_SYMLINKS) +os.environ['AUX_ANNOTATOR_CKPTS_PATH'] = annotator_ckpts_path +os.environ['AUX_ORT_PROVIDERS'] = str(",".join(ORT_PROVIDERS)) + +log.info(f"Using ckpts path: {annotator_ckpts_path}") +log.info(f"Using symlinks: {USE_SYMLINKS}") +log.info(f"Using ort providers: {ORT_PROVIDERS}") + +MAX_RESOLUTION=2048 #Who the hell feed 4k images to ControlNet? +HF_MODEL_NAME = "lllyasviel/Annotators" +DWPOSE_MODEL_NAME = "yzd-v/DWPose" +ANIFACESEG_MODEL_NAME = "bdsqlsz/qinglong_controlnet-lllite" + + +def common_annotator_call(model, tensor_image, input_batch=False, **kwargs): + if "detect_resolution" in kwargs: + del kwargs["detect_resolution"] #Prevent weird case? + + if "resolution" in kwargs: + detect_resolution = kwargs["resolution"] if type(kwargs["resolution"]) == int and kwargs["resolution"] >= 64 else 512 + del kwargs["resolution"] + else: + detect_resolution = 512 + + if input_batch: + np_images = np.asarray(tensor_image * 255., dtype=np.uint8) + np_results = model(np_images, output_type="np", detect_resolution=detect_resolution, **kwargs) + return torch.from_numpy(np_results.astype(np.float32) / 255.0) + + out_list = [] + for image in tensor_image: + np_image = np.asarray(image * 255., dtype=np.uint8) + np_result = model(np_image, output_type="np", detect_resolution=detect_resolution, **kwargs) + out_list.append(torch.from_numpy(np_result.astype(np.float32) / 255.0)) + return torch.stack(out_list, dim=0) + +def create_node_input_types(**extra_kwargs): + return { + "required": { + "image": ("IMAGE",) + }, + "optional": { + **extra_kwargs, + "resolution": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}) + } + } + +class ResizeMode(Enum): + """ + Resize modes for ControlNet input images. + """ + + RESIZE = "Just Resize" + INNER_FIT = "Crop and Resize" + OUTER_FIT = "Resize and Fill" + + def int_value(self): + if self == ResizeMode.RESIZE: + return 0 + elif self == ResizeMode.INNER_FIT: + return 1 + elif self == ResizeMode.OUTER_FIT: + return 2 + assert False, "NOTREACHED" + +#https://github.com/Mikubill/sd-webui-controlnet/blob/e67e017731aad05796b9615dc6eadce911298ea1/internal_controlnet/external_code.py#L89 +#Replaced logger with internal log +def pixel_perfect_resolution( + image: np.ndarray, + target_H: int, + target_W: int, + resize_mode: ResizeMode, +) -> int: + """ + Calculate the estimated resolution for resizing an image while preserving aspect ratio. + + The function first calculates scaling factors for height and width of the image based on the target + height and width. Then, based on the chosen resize mode, it either takes the smaller or the larger + scaling factor to estimate the new resolution. + + If the resize mode is OUTER_FIT, the function uses the smaller scaling factor, ensuring the whole image + fits within the target dimensions, potentially leaving some empty space. + + If the resize mode is not OUTER_FIT, the function uses the larger scaling factor, ensuring the target + dimensions are fully filled, potentially cropping the image. + + After calculating the estimated resolution, the function prints some debugging information. + + Args: + image (np.ndarray): A 3D numpy array representing an image. The dimensions represent [height, width, channels]. + target_H (int): The target height for the image. + target_W (int): The target width for the image. + resize_mode (ResizeMode): The mode for resizing. + + Returns: + int: The estimated resolution after resizing. + """ + raw_H, raw_W, _ = image.shape + + k0 = float(target_H) / float(raw_H) + k1 = float(target_W) / float(raw_W) + + if resize_mode == ResizeMode.OUTER_FIT: + estimation = min(k0, k1) * float(min(raw_H, raw_W)) + else: + estimation = max(k0, k1) * float(min(raw_H, raw_W)) + + log.debug(f"Pixel Perfect Computation:") + log.debug(f"resize_mode = {resize_mode}") + log.debug(f"raw_H = {raw_H}") + log.debug(f"raw_W = {raw_W}") + log.debug(f"target_H = {target_H}") + log.debug(f"target_W = {target_W}") + log.debug(f"estimation = {estimation}") + + return int(np.round(estimation)) + +#https://github.com/Mikubill/sd-webui-controlnet/blob/e67e017731aad05796b9615dc6eadce911298ea1/scripts/controlnet.py#L404 +def safe_numpy(x): + # A very safe method to make sure that Apple/Mac works + y = x + + # below is very boring but do not change these. If you change these Apple or Mac may fail. + y = y.copy() + y = np.ascontiguousarray(y) + y = y.copy() + return y + +#https://github.com/Mikubill/sd-webui-controlnet/blob/e67e017731aad05796b9615dc6eadce911298ea1/scripts/utils.py#L140 +def get_unique_axis0(data): + arr = np.asanyarray(data) + idxs = np.lexsort(arr.T) + arr = arr[idxs] + unique_idxs = np.empty(len(arr), dtype=np.bool_) + unique_idxs[:1] = True + unique_idxs[1:] = np.any(arr[:-1, :] != arr[1:, :], axis=-1) + return arr[unique_idxs] diff --git a/custom_nodes/example_node.py.example b/custom_nodes/example_node.py.example new file mode 100644 index 0000000000000000000000000000000000000000..733014f3c7d393a81130b41810e3e1d574dd256b --- /dev/null +++ b/custom_nodes/example_node.py.example @@ -0,0 +1,102 @@ +class Example: + """ + A example node + + Class methods + ------------- + INPUT_TYPES (dict): + Tell the main program input parameters of nodes. + + Attributes + ---------- + RETURN_TYPES (`tuple`): + The type of each element in the output tulple. + RETURN_NAMES (`tuple`): + Optional: The name of each output in the output tulple. + FUNCTION (`str`): + The name of the entry-point method. For example, if `FUNCTION = "execute"` then it will run Example().execute() + OUTPUT_NODE ([`bool`]): + If this node is an output node that outputs a result/image from the graph. The SaveImage node is an example. + The backend iterates on these output nodes and tries to execute all their parents if their parent graph is properly connected. + Assumed to be False if not present. + CATEGORY (`str`): + The category the node should appear in the UI. + execute(s) -> tuple || None: + The entry point method. The name of this method must be the same as the value of property `FUNCTION`. + For example, if `FUNCTION = "execute"` then this method's name must be `execute`, if `FUNCTION = "foo"` then it must be `foo`. + """ + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + """ + Return a dictionary which contains config for all input fields. + Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". + Input types "INT", "STRING" or "FLOAT" are special values for fields on the node. + The type can be a list for selection. + + Returns: `dict`: + - Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required` + - Value input_fields (`dict`): Contains input fields config: + * Key field_name (`string`): Name of a entry-point method's argument + * Value field_config (`tuple`): + + First value is a string indicate the type of field or a list for selection. + + Secound value is a config for type "INT", "STRING" or "FLOAT". + """ + return { + "required": { + "image": ("IMAGE",), + "int_field": ("INT", { + "default": 0, + "min": 0, #Minimum value + "max": 4096, #Maximum value + "step": 64, #Slider's step + "display": "number" # Cosmetic only: display as "number" or "slider" + }), + "float_field": ("FLOAT", { + "default": 1.0, + "min": 0.0, + "max": 10.0, + "step": 0.01, + "round": 0.001, #The value represeting the precision to round to, will be set to the step value by default. Can be set to False to disable rounding. + "display": "number"}), + "print_to_screen": (["enable", "disable"],), + "string_field": ("STRING", { + "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node + "default": "Hello World!" + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + #RETURN_NAMES = ("image_output_name",) + + FUNCTION = "test" + + #OUTPUT_NODE = False + + CATEGORY = "Example" + + def test(self, image, string_field, int_field, float_field, print_to_screen): + if print_to_screen == "enable": + print(f"""Your input contains: + string_field aka input text: {string_field} + int_field: {int_field} + float_field: {float_field} + """) + #do some processing on the image, in this example I just invert it + image = 1.0 - image + return (image,) + + +# A dictionary that contains all nodes you want to export with their names +# NOTE: names should be globally unique +NODE_CLASS_MAPPINGS = { + "Example": Example +} + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "Example": "Example Node" +} diff --git a/custom_nodes/was-node-suite-comfyui/.gitignore b/custom_nodes/was-node-suite-comfyui/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..739a6495be7104f9a9de710c8d633df34a8bc500 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/.gitignore @@ -0,0 +1,165 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +*.pyc + +# Custom +was_suite_settings.json +styles.json +was_suite_config.json +workflows/ +was_history.json +nsp_pantry.json +cache/ +*.latent +*.image +*.conditioning diff --git a/custom_nodes/was-node-suite-comfyui/ComfyUI_+_WAS_Node_Suite_and_ComfyUI_Manager.ipynb b/custom_nodes/was-node-suite-comfyui/ComfyUI_+_WAS_Node_Suite_and_ComfyUI_Manager.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..f9bf91e790a250b0b33e927c4a21446db7d6e686 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/ComfyUI_+_WAS_Node_Suite_and_ComfyUI_Manager.ipynb @@ -0,0 +1,429 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aaaaaaaaaa" + }, + "source": [ + "# **Comfy**UI + **WAS** Node Suite   [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fcolab.research.google.com%2Fgithub%2FWASasquatch%2Fcomfyui-colab-was-node-suite%2Fblob%2Fmain%2FComfyUI_%252B_WAS_Node_Suite.ipynb&count_bg=%23EAAC00&title_bg=%233092C6&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://hits.seeyoufarm.com)\n", + "A version of ComfyUI Colab with WAS Node Suite installatoin." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bbbbbbbbbb", + "cellView": "form" + }, + "outputs": [], + "source": [ + "#@title Environment Setup\n", + "#@markdown Download and install ComfyUI + WAS Node Suite. You can run this cell again with the `UPDATE_COMFY_UI` or `UPDATE_WAS_NS` options selected to update.\n", + "\n", + "from pathlib import Path\n", + "import time\n", + "\n", + "OPTIONS = {}\n", + "\n", + "#@markdown Store ComfyUI on Google Drive instead of Colab\n", + "USE_GOOGLE_DRIVE = True #@param {type:\"boolean\"}\n", + "#markdown Update ComfyUI\n", + "UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n", + "#@markdown Update WAS Node Suite\n", + "UPDATE_WAS_NS = True #@param {type:\"boolean\"}\n", + "#@markdown Update Pillow for WAS NS:\n", + "UPDATE_PILLOW = False #@param {type:\"boolean\"}\n", + "#@markdown ComfyUI Manager:\n", + "USE_COMFYUI_MANAGER = True #@param {type:\"boolean\"}\n", + "UPDATE_COMFYUI_MANAGER = True #@param {type:\"boolean\"}\n", + "\n", + "WORKSPACE = '/content/ComfyUI'\n", + "OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n", + "OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n", + "\n", + "if USE_GOOGLE_DRIVE:\n", + " !echo \"Mounting Google Drive...\"\n", + " %cd /\n", + " from google.colab import drive\n", + " drive.mount('/content/drive')\n", + " WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n", + " %cd /content/drive/MyDrive\n", + "\n", + "![ ! -d $WORKSPACE ] && echo -= Initial setup ComfyUI =- && git clone https://github.com/comfyanonymous/ComfyUI $WORKSPACE\n", + "%cd $WORKSPACE\n", + "\n", + "if UPDATE_COMFY_UI:\n", + " !echo -= Updating ComfyUI =-\n", + " !git pull\n", + "\n", + "!echo -= Install dependencies =-\n", + "!pip install xformers -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118\n", + "!git clone https://github.com/WASasquatch/was-node-suite-comfyui $WORKSPACE/custom_nodes/was-node-suite-comfyui\n", + "\n", + "if USE_COMFYUI_MANAGER:\n", + " !git clone https://github.com/ltdrdata/ComfyUI-Manager.git $WORKSPACE/custom_nodes/ComfyUI-Manager\n", + "\n", + "if UPDATE_WAS_NS:\n", + " %cd $WORKSPACE/custom_nodes/was-node-suite-comfyui\n", + " !git pull\n", + " %cd $WORKSPACE\n", + "\n", + "if UPDATE_COMFYUI_MANAGER:\n", + " %cd $WORKSPACE/custom_nodes/ComfyUI-Manager\n", + " !git pull\n", + " %cd $WORKSPACE\n", + "\n", + "if UPDATE_PILLOW:\n", + " !pip install --upgrade --force-reinstall pillow\n", + " print('\\n\\033[91m\\033[1mRestarting runtime for Pillow Update. Run this cell again without `UPDATE_PILLOW` selected!\\033[0m')\n", + " time.sleep(5)\n", + "\n", + " import os\n", + " os.kill(os.getpid(), 9)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cccccccccc" + }, + "source": [ + "Download some models/checkpoints/vae or custom comfyui nodes (uncomment the commands for the ones you want)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dddddddddd", + "cellView": "form" + }, + "outputs": [], + "source": [ + "#@title Download Models\n", + "#@markdown Download models and other resources to use in ComfyUI. Select your options and run the this cell. Can run multiple times with different options.\n", + "\n", + "#@markdown ---\n", + "\n", + "# Checkpoints\n", + "\n", + "MODEL_OPTION = 'stable-diffusion-xl-base-1.0.safetensors' #@param['None', 'stable-diffusion-xl-base-1.0.safetensors', 'sd_xl_base_1.0_0.9vae.safetensors', 'sd_xl_base_0.9.safetensors', 'v1-5-pruned-emaonly.ckpt', 'v2-1_512-ema-pruned.safetensors', 'v2-1_768-ema-pruned.safetensors', 'AbyssOrangeMix2_hard.safetensors', 'AOM3A1_orangemixs.safetensors', 'AOM3A3_orangemixs.safetensors', 'wd-1-5-beta2-fp16.safetensors']\n", + "VAE_OPTION = 'vae-ft-mse-840000-ema-pruned.safetensors' #@param['None', 'vae-ft-mse-840000-ema-pruned.safetensors', 'orangemix.vae.pt', 'kl-f8-anime2.ckpt']\n", + "UPSCALE_MODEL_OPTION = 'None' #@param['None', 'RealESRGAN_x2.pth', 'RealESRGAN_x4.pth', '4x-UltraSharp', '4x_RealisticRescaler_100000_G.pth', 'BSRGAN.pth', 'BSRGANx2.pth']\n", + "LORA_OPTION = 'None' #@param['None', 'theovercomer8sContrastFix_sd21768.safetensors', 'theovercomer8sContrastFix_sd15.safetensors']\n", + "T2I_OPTION = 'None' #@param['None', 't2iadapter_depth_sd14v1.pth', 't2iadapter_seg_sd14v1.pth', 't2iadapter_sketch_sd14v1.pth', 't2iadapter_keypose_sd14v1.pth', 't2iadapter_openpose_sd14v1.pth', 't2iadapter_color_sd14v1.pth', 't2iadapter_canny_sd14v1.pth', '/t2iadapter_style_sd14v1.pth']\n", + "CONTROLNET_OPTION = 'None' #@param['None', 'control_depth-fp16.safetensors', 'control_scribble-fp16.safetensors', 'control_openpose-fp16.safetensors']\n", + "\n", + "#@markdown ---\n", + "\n", + "#@markdown **Download and instlal CLIPVision**:\n", + "DOWNLOAD_CLIPVISION = False #@param {type:\"boolean\"}\n", + "#@markdown **ControlNet Preprocessor Nodes** by Fannovel16:\n", + "INSTALL_CONTROLNET_NODES = False #@param {type:\"boolean\"}\n", + "\n", + "# SDXL\n", + "if MODEL_OPTION == 'stable-diffusion-xl-base-1.0.safetensors':\n", + " !wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P $WORKSPACE/models/checkpoints/\n", + "if MODEL_OPTION == 'sd_xl_base_1.0_0.9vae.safetensors':\n", + " !wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors -P $WORKSPACE/models/checkpoints/\n", + "if MODEL_OPTION == 'sd_xl_base_0.9.safetensors':\n", + " !wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/sd_xl_base_0.9.safetensors -P $WORKSPACE/models/checkpoints/\n", + "\n", + "\n", + "# SD1.5\n", + "if MODEL_OPTION == 'v1-5-pruned-emaonly.ckpt':\n", + " !wget -c https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -P $WORKSPACE/models/checkpoints/\n", + "\n", + "# SD2\n", + "if MODEL_OPTION == 'v2-1_512-ema-pruned.safetensors':\n", + " !wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.safetensors -P $WORKSPACE/models/checkpoints/\n", + "if MODEL_OPTION == 'v2-1_768-ema-pruned.safetensors':\n", + " !wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -P $WORKSPACE/models/checkpoints/\n", + "\n", + "# Some SD1.5 anime style\n", + "if MODEL_OPTION == 'AbyssOrangeMix2_hard.safetensors':\n", + " !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors -P $WORKSPACE/models/checkpoints/\n", + "if MODEL_OPTION == 'AOM3A1_orangemixs.safetensors':\n", + " !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1_orangemixs.safetensors -P $WORKSPACE/models/checkpoints/\n", + "if MODEL_OPTION == 'AOM3A3_orangemixs.safetensors':\n", + " !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3_orangemixs.safetensors -P $WORKSPACE/models/checkpoints/\n", + "if MODEL_OPTION == 'anything-v3-fp16-pruned.safetensors':\n", + " !wget -c https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors -P $WORKSPACE/models/checkpoints/\n", + "\n", + "# Waifu Diffusion 1.5 (anime style SD2.x 768-v)\n", + "if MODEL_OPTION == 'wd-1-5-beta2-fp16.safetensors':\n", + " !wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta2/resolve/main/checkpoints/wd-1-5-beta2-fp16.safetensors -P $WORKSPACE/models/checkpoints/\n", + "\n", + "\n", + "# VAE\n", + "if VAE_OPTION == 'vae-ft-mse-840000-ema-pruned.safetensors':\n", + " !wget -c https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors -P $WORKSPACE/models/vae/\n", + "if VAE_OPTION == 'orangemix.vae.pt':\n", + " !wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt -P $WORKSPACE/models/vae/'\n", + "if VAE_OPTION == 'kl-f8-anime2.ckpt':\n", + " !wget -c https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime2.ckpt -P $WORKSPACE/models/vae/\n", + "\n", + "\n", + "# Loras\n", + "if LORA_OPTION == 'theovercomer8sContrastFix_sd21768.safetensors':\n", + " !wget -c https://civitai.com/api/download/models/10350 -O $WORKSPACE/models/loras/theovercomer8sContrastFix_sd21768.safetensors #theovercomer8sContrastFix SD2.x 768-v\n", + "if LORA_OPTION == 'theovercomer8sContrastFix_sd15.safetensors':\n", + " !wget -c https://civitai.com/api/download/models/10638 -O $WORKSPACE/models/loras/theovercomer8sContrastFix_sd15.safetensors #theovercomer8sContrastFix SD1.x\n", + "\n", + "\n", + "# T2I-Adapter\n", + "if T2I_OPTION == 't2iadapter_depth_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "if T2I_OPTION == 't2iadapter_seg_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "if T2I_OPTION == 't2iadapter_sketch_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "if T2I_OPTION == 't2iadapter_keypose_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "if T2I_OPTION == 't2iadapter_openpose_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "if T2I_OPTION == 't2iadapter_color_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "if T2I_OPTION == 't2iadapter_canny_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P $WORKSPACE/models/controlnet/\n", + "\n", + "# T2I Styles Model\n", + "if T2I_OPTION == '/t2iadapter_style_sd14v1.pth':\n", + " !wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth -P $WORKSPACE/models/style_models/\n", + "\n", + "# CLIPVision model (needed for styles model)\n", + "if DOWNLOAD_CLIPVISION:\n", + " !wget -c https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin -O $WORKSPACE/models/clip_vision/clip_vit14.bin\n", + "\n", + "\n", + "# ControlNet\n", + "if CONTROLNET_OPTION == 'control_depth-fp16.safetensors':\n", + " !wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors -P $WORKSPACE/models/controlnet/\n", + "if CONTROLNET_OPTION == 'control_scribble-fp16.safetensors':\n", + " !wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors -P $WORKSPACE/models/controlnet/\n", + "if CONTROLNET_OPTION == 'control_openpose-fp16.safetensors':\n", + " !wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors -P $WORKSPACE/models/controlnet/\n", + "\n", + "\n", + "# Controlnet Preprocessor nodes by Fannovel16\n", + "if INSTALL_CONTROLNET_NODES:\n", + " !cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n", + "\n", + "# ESRGAN upscale model\n", + "if UPSCALE_MODEL_OPTION == 'RealESRGAN_x2.pth':\n", + " !wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth -P $WORKSPACE/models/upscale_models/\n", + "if UPSCALE_MODEL_OPTION == 'RealESRGAN_x4.pth':\n", + " !wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth -P $WORKSPACE/models/upscale_models/\n", + "if UPSCALE_MODEL_OPTION == '4x-UltraSharp':\n", + " !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/4x-UltraSharp.pth -P $WORKSPACE/models/upscale_models/\n", + "if UPSCALE_MODEL_OPTION == '4x_RealisticRescaler_100000_G.pth':\n", + " !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/4x_RealisticRescaler_100000_G.pth -P $WORKSPACE/models/upscale_models/\n", + "if UPSCALE_MODEL_OPTION == 'BSRGAN.pth':\n", + " !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/BSRGAN.pth -P $WORKSPACE/models/upscale_models/\n", + "if UPSCALE_MODEL_OPTION == 'BSRGANx2.pth':\n", + " !wget -c https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/BSRGANx2.pth -P $WORKSPACE/models/upscale_models/\n", + "\n" + ] + }, + { + "cell_type": "code", + "source": [ + "#@title Direct Download Models\n", + "\n", + "import os\n", + "\n", + "types = {\n", + " 'CHECKPOINTS': os.path.join(WORKSPACE, 'models/checkpoints'),\n", + " 'CLIP': os.path.join(WORKSPACE, 'models/clip'),\n", + " 'CLIP_VISION': os.path.join(WORKSPACE, 'models/clip_vision'),\n", + " 'CONFIGS': os.path.join(WORKSPACE, 'models/configs'),\n", + " 'CONTROLNET': os.path.join(WORKSPACE, 'models/controlnet'),\n", + " 'DIFFUSERS': os.path.join(WORKSPACE, 'models/diffusers'),\n", + " 'EMBEDDINGS': os.path.join(WORKSPACE, 'models/embeddings'),\n", + " 'GLIGEN': os.path.join(WORKSPACE, 'models/gligen'),\n", + " 'HYPERNETWORKS': os.path.join(WORKSPACE, 'models/hypernetworks'),\n", + " 'LORAS': os.path.join(WORKSPACE, 'models/loras'),\n", + " 'STYLE_MODEL': os.path.join(WORKSPACE, 'models/style_models'),\n", + " 'UNET': os.path.join(WORKSPACE, 'models/unet'),\n", + " 'UPSCALE_MODELS': os.path.join(WORKSPACE, 'models/upscale_models'),\n", + " 'VAE': os.path.join(WORKSPACE, 'models/vae'),\n", + " 'VAE_APPROX': os.path.join(WORKSPACE, 'models/vae_approx')\n", + "}\n", + "\n", + "TYPE = 'CHECKPOINTS' #@param ['CHECKPOINTS', 'CLIP', 'CLIP_VISION', 'CONFIGS', 'CONTROLNET', 'DIFFUSERS', 'EMBEDDINGS', 'GLIGEN', 'HYPERNETWORKS', 'LORAS', 'STYLE_MODEL', 'UNET', 'UPSCALE_MODELS', 'VAE', 'VAE_APPROX']\n", + "DIRECT_URL = 'https://civitai.com/api/download/models/141627' #@param {type:\"string\"}\n", + "SAVE_AS = 'Differentia_V1.safetensors' #@param {type: 'string'}\n", + "#@markdown Direct link to the download. The example URL is Differentia (https://civitai.com/models/129232)\n", + "\n", + "if TYPE and DIRECT_URL:\n", + " target = os.path.join(types[TYPE], SAVE_AS)\n", + " !wget -c $DIRECT_URL -O $target" + ], + "metadata": { + "cellView": "form", + "id": "Bo0pf4So3tCK" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "#@title ComfyUI Cloudfare (Recommended)\n", + "#@markdown Running ComfyUI with Cloudfare is now the recommended method.\n", + "!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n", + "!dpkg -i cloudflared-linux-amd64.deb\n", + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "import urllib.request\n", + "\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\\n\")\n", + "\n", + " p = subprocess.Popen([\"cloudflared\", \"tunnel\", \"--url\", \"http://127.0.0.1:{}\".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", + " for line in p.stderr:\n", + " l = line.decode()\n", + " if \"trycloudflare.com \" in l:\n", + " print(\"This is the URL to access ComfyUI:\", l[l.find(\"http\"):], end='')\n", + " #print(l, end='')\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ], + "metadata": { + "cellView": "form", + "id": "StSynv5tp2nL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jjjjjjjjjjjjj", + "cellView": "form" + }, + "outputs": [], + "source": [ + "#@title ComfyUI Localtunnel\n", + "#@markdown Run this cell to start ComfyUI. You'll see a link similar to `your url is: https://slow-yaks-jog-34-72-173-3.loca.lt` (example)\n", + "#@markdown
*If you have trouble with the red screen of death \"reminder\" not letting you generate, use the iFrame version below.*\n", + "!npm install -g localtunnel\n", + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "import urllib.request\n", + "\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\\n\")\n", + "\n", + " print(\"The password/enpoint ip for localtunnel is:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n", + " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n", + " for line in p.stdout:\n", + " print(line.decode(), end='')\n", + "\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gggggggggg" + }, + "source": [ + "### Run ComfyUI with colab iframe ***(use only in case the previous ways don't work)***\n", + "\n", + "You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n", + "\n", + "If you want to open it in another window use the link.\n", + "\n", + "Note that some UI features like live image previews won't work because the colab iframe blocks websockets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hhhhhhhhhh", + "cellView": "form" + }, + "outputs": [], + "source": [ + "#@title ComfyUI iFrame\n", + "import threading\n", + "import time\n", + "import socket\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " from google.colab import output\n", + " output.serve_kernel_port_as_iframe(port, height=1024)\n", + " print(\"to open it in a window you can open this link here:\")\n", + " output.serve_kernel_port_as_window(port)\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [], + "private_outputs": true, + "gpuType": "T4", + "include_colab_link": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/LICENSE b/custom_nodes/was-node-suite-comfyui/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cda3c409482e31c2a47d349f7ce01818ca903419 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Jordan Thompson (WASasquatch) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/custom_nodes/was-node-suite-comfyui/README.md b/custom_nodes/was-node-suite-comfyui/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ce8c477335074d2ae14565071b2dd4dba6f20864 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/README.md @@ -0,0 +1,397 @@ +# **WAS** Node Suite   [![Colab](https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667)](https://colab.research.google.com/github/WASasquatch/was-node-suite-comfyui/blob/main/ComfyUI_%2B_WAS_Node_Suite_and_ComfyUI_Manager.ipynb) [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2FWASasquatch%2Fwas-node-suite-comfyui&count_bg=%233D9CC8&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://hits.seeyoufarm.com) [![Donate](https://img.shields.io/badge/Donate-PayPal-blue.svg)](https://paypal.me/ThompsonJordan?country.x=US&locale.x=en_US) + +

+ +

+ +### A node suite for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) with many new nodes, such as image processing, text processing, and more. + +#### [Share Workflows](https://github.com/WASasquatch/was-node-suite-comfyui/wiki/Workflow-Examples) to the workflows wiki. Preferably embedded PNGs with workflows, but JSON is OK too. [You can use this tool to add a workflow to a PNG file easily](https://colab.research.google.com/drive/1hQMjNUdhMQ3rw1Wcm3_umvmOMeS_K4s8?usp=sharing). +#### Consider [donating to the project](https://paypal.me/ThompsonJordan?country.x=US&locale.x=en_US) to help it's continued development. + +# Important Updates + +- **[Updated 10/8/2023]** BLIP is now a shipped module of WAS-NS and no longer requires the BLIP Repo + - **[Updated 5/29/2023]** `ASCII` **is deprecated**. The new preferred method of text node output is `STRING`. This is a change from `ASCII` so that it is more clear what data is being passed. + - The `was_suite_config.json` will automatically set `use_legacy_ascii_text` to `false`. + - [Video Nodes](https://github.com/WASasquatch/was-node-suite-comfyui#video-nodes) - There are two new video nodes, `Write to Video` and `Create Video from Path`. These are experimental nodes. + +# Current Nodes: + +
+ $\Large\color{orange}{Expand\ Node\ List}$ + +
+ + - BLIP Model Loader: Load a BLIP model to input into the BLIP Analyze node + - BLIP Analyze Image: Get a text caption from a image, or interrogate the image with a question. + - Model will download automatically from default URL, but you can point the download to another location/caption model in `was_suite_config` + - Models will be stored in `ComfyUI/models/blip/checkpoints/` + - SAM Model Loader: Load a SAM Segmentation model + - SAM Parameters: Define your SAM parameters for segmentation of a image + - SAM Parameters Combine: Combine SAM parameters + - SAM Image Mask: SAM image masking + - Image Bounds: Bounds a image + - Inset Image Bounds: Inset a image bounds + - Bounded Image Blend: Blend bounds image + - Bounded Image Blend with Mask: Blend a bounds image by mask + - Bounded Image Crop: Crop a bounds image + - Bounded Image Crop with Mask: Crop a bounds image by mask + - Bus Node: condense the 5 common connectors into one, keep your workspace tidy (Model, Clip, VAE, Positive Conditioning, Negative Conditioning) + - Cache Node: Cache Latnet, Tensor Batches (Image), and Conditioning to disk to use later. + - CLIPTextEncode (NSP): Parse noodle soups from the NSP pantry, or parse wildcards from a directory containing A1111 style wildacrds. + - Wildcards are in the style of `__filename__`, which also includes subdirectories like `__appearance/haircolour__` (if you noodle_key is set to `__`) + - You can set a custom wildcards path in `was_suite_config.json` file with key: + - ` "wildcards_path": "E:\\python\\automatic\\webui3\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards"` + - If no path is set the wildcards dir is located at the root of WAS Node Suite as `/wildcards` + - CLIP Input Switch: Switch between two CLIP inputs based on a boolean switch. + - CLIP Vision Input Switch: Switch between two CLIP Vision inputs based on a boolean switch. + - Conditioning Input Switch: Switch between two conditioning inputs. + - Constant Number + - Control Net Model Input Switch: Switch between two Control Net Model inputs based on a boolean switch. + - Create Grid Image: Create a image grid from images at a destination with customizable glob pattern. Optional border size and color. + - Create Grid Image from Batch: Create a grid image from a batch tensor of images. + - Create Morph Image: Create a GIF/APNG animation from two images, fading between them. + - Create Morph Image by Path: Create a GIF/APNG animation from a path to a directory containing images, with optional pattern. + - Create Video from Path: Create video from images from a specified path. + - CLIPSeg Masking: Mask a image with CLIPSeg and return a raw mask + - CLIPSeg Masking Batch: Create a batch image (from image inputs) and batch mask with CLIPSeg + - Dictionary to Console: Print a dictionary input to the console + - Image Analyze + - Black White Levels + - RGB Levels + - Depends on `matplotlib`, will attempt to install on first run + - Diffusers Hub Down-Loader: Download a diffusers model from the HuggingFace Hub and load it + - Image SSAO (Ambient Occlusion): [Expiremental Beta Node] Create Screen Space Ambient Occlusion with a image and MiDaS depth approximation (or provided depth map). + - Image SSDO (Direct Occlusion): [Expiremental Beta Node] Create a Screen Space Direct Occlusion with a image input. Direct Occlusion presents you with direct lighting highliths, similar to how Ambient Occlusion finds the crevices and shadowy areas around objets. + - Image Aspect Ratio: Fetch image aspect ratio in float format, common format (eg 16:9), and in if the image is portrait, landscape, or square. + - Image Batch: Create one batch out of multiple batched tensors. + - Image Blank: Create a blank image in any color + - Image Blend by Mask: Blend two images by a mask + - Image Blend: Blend two images by opacity + - Image Blending Mode: Blend two images by various blending modes + - Image Bloom Filter: Apply a high-pass based bloom filter + - Image Canny Filter: Apply a canny filter to a image + - Image Chromatic Aberration: Apply chromatic aberration lens effect to a image like in sci-fi films, movie theaters, and video games + - Image Color Palette + - Generate a color palette based on the input image. + - Depends on `scikit-learn`, will attempt to install on first run. + - Supports color range of 8-256 + - Utilizes font in `./res/` unless unavailable, then it will utilize internal better then nothing font. + - Image Crop Face: Crop a face out of a image + - **Limitations:** + - Sometimes no faces are found in badly generated images, or faces at angles + - Sometimes face crop is black, this is because the padding is too large and intersected with the image edge. Use a smaller padding size. + - face_recognition mode sometimes finds random things as faces. It also requires a [CUDA] GPU. + - Only detects one face. This is a design choice to make it's use easy. + - **Notes:** + - Detection runs in succession. If nothing is found with the selected detection cascades, it will try the next available cascades file. + - Image Crop Location: Crop a image to specified location in top, left, right, and bottom locations relating to the pixel dimensions of the image in X and Y coordinats. + - Image Crop Square Location: Crop a location by X/Y center, creating a square crop around that point. + - Image Displacement Warp: Warp a image by a displacement map image by a given amplitude. + - Image Dragan Photography Filter: Apply a Andrzej Dragan photography style to a image + - Image Edge Detection Filter: Detect edges in a image + - Image Film Grain: Apply film grain to a image + - Image Filter Adjustments: Apply various image adjustments to a image + - Image Flip: Flip a image horizontal, or vertical + - Image Gradient Map: Apply a gradient map to a image + - Image Generate Gradient: Generate a gradient map with desired stops and colors + - Image High Pass Filter: Apply a high frequency pass to the image returning the details + - Image History Loader: Load images from history based on the Load Image Batch node. Can define max history in config file. *(requires restart to show last sessions files at this time)* + - Image Input Switch: Switch between two image inputs based on a boolean switch + - Image Levels Adjustment: Adjust the levels of a image + - Image Load: Load a *image* from any path on the system, or a url starting with `http` + - Image Median Filter: Apply a median filter to a image, such as to smooth out details in surfaces + - Image Mix RGB Channels: Mix together RGB channels into a single iamge + - Image Monitor Effects Filter: Apply various monitor effects to a image + - Digital Distortion + - A digital breakup distortion effect + - Signal Distortion + - A analog signal distortion effect on vertical bands like a CRT monitor + - TV Distortion + - A TV scanline and bleed distortion effect + - Image Nova Filter: A image that uses a sinus frequency to break apart a image into RGB frequencies + - Image Perlin Noise: Generate perlin noise + - Image Perlin Power Fractal: Generate a perlin power fractal + - Image Paste Face Crop: Paste face crop back on a image at it's original location and size + - Features a better blending funciton than GFPGAN/CodeFormer so there shouldn't be visible seams, and coupled with Diffusion Result, looks better than GFPGAN/CodeFormer. + - Image Paste Crop: Paste a crop (such as from Image Crop Location) at it's original location and size utilizing the `crop_data` node input. This uses a different blending algorithm then Image Paste Face Crop, which may be desired in certain instances. + - Image Power Noise: Generate power-law noise + - frequency: The frequency parameter controls the distribution of the noise across different frequencies. In the context of Fourier analysis, higher frequencies represent fine details or high-frequency components, while lower frequencies represent coarse details or low-frequency components. Adjusting the frequency parameter can result in different textures and levels of detail in the generated noise. The specific range and meaning of the frequency parameter may vary depending on the noise type. + - attenuation: The attenuation parameter determines the strength or intensity of the noise. It controls how much the noise values deviate from the mean or central value. Higher values of attenuation lead to more significant variations and a stronger presence of noise, while lower values result in a smoother and less noticeable noise. The specific range and interpretation of the attenuation parameter may vary depending on the noise type. + - noise_type: The tyoe of Power-Law noise to generate (white, grey, pink, green, blue) + - Image Paste Crop by Location: Paste a crop top a custom location. This uses the same blending algorithm as Image Paste Crop. + - Image Pixelate: Turn a image into pixel art! Define the max number of colors, the pixelation mode, the random state, and max iterations, and max those sprites shine. + - Image Remove Background (Alpha): Remove the background from a image by threshold and tolerance. + - Image Remove Color: Remove a color from a image and replace it with another + - Image Resize + - Image Rotate: Rotate an image + - Image Rotate Hue: Rotate the hue of a image. A hue_shift of `0.0` would represent no change, and `1.0` would represent a full circle of the hue, and also exhibit no change. + - Image Save: A save image node with format support and path support. + - `show_history` will show previously saved images with the WAS Save Image node. ComfyUI unfortunately resizes displayed images to the same size however, so if images are in different sizes it will force them in a different size. + - Doesn't display images saved outside `/ComfyUI/output/` + - You can save as `webp` if you have webp available to you system. On windows you can get that support with this [precompiled libarary](https://storage.googleapis.com/downloads.webmproject.org/releases/webp/libwebp-1.3.0-windows-x64.zip) from the [webp project](https://developers.google.com/speed/webp/download). On linux you can run `apt-get install webp`. + - Image Seamless Texture: Create a seamless texture out of a image with optional tiling + - Image Select Channel: Select a single channel of an RGB image + - Image Select Color: Return the select image only on a black canvas + - Image Shadows and Highlights: Adjust the shadows and highlights of an image + - Image Size to Number: Get the `width` and `height` of an input image to use with **Number** nodes. + - Image Stitch: Stitch images together on different sides with optional feathering blending between them. + - Image Style Filter: Style a image with Pilgram instragram-like filters + - Depends on `pilgram` module + - Image Threshold: Return the desired threshold range of a image + - Image Tile: Split a image up into a image batch of tiles. Can be used with Tensor Batch to Image to select a individual tile from the batch. + - Image Transpose + - Image fDOF Filter: Apply a fake depth of field effect to an image + - Image to Latent Mask: Convert a image into a latent mask + - Image to Noise: Convert a image into noise, useful for init blending or init input to theme a diffusion. + - Images to RGB: Convert a tensor image batch to RGB if they are RGBA or some other mode. + - Image to Seed: Convert a image to a reproducible seed + - Image Voronoi Noise Filter + - A custom implementation of the worley voronoi noise diagram + - Input Switch (Disable until `*` wildcard fix) + - KSampler (WAS): A sampler that accepts a seed as a node inputs + - KSampler Cycle: A KSampler able to do HR pass loops, you can specify an upscale factor, and how many steps to achieve that factor. Accepts a upscale_model, as well as a 1x processor model. A secondary diffusion model can also be used. + - Load Cache: Load cached Latent, Tensor Batch (image), and Conditioning files. + - Load Text File + - Now supports outputting a dictionary named after the file, or custom input. + - The dictionary contains a list of all lines in the file. + - Load Batch Images + - Increment images in a folder, or fetch a single image out of a batch. + - Will reset it's place if the path, or pattern is changed. + - pattern is a glob that allows you to do things like `**/*` to get all files in the directory and subdirectory + or things like `*.jpg` to select only JPEG images in the directory specified. + - Mask to Image: Convert `MASK` to `IMAGE` + - Mask Batch to Mask: Return a single mask from a batch of masks + - Mask Invert: Invert a mask. + - Mask Add: Add masks together. + - Mask Subtract: Subtract from a mask by another. + - Mask Dominant Region: Return the dominant region in a mask (the largest area) + - Mask Minority Region: Return the smallest region in a mask (the smallest area) + - Mask Crop Dominant Region: Crop mask to the dominant region with optional padding in pixels + - Mask Crop Minority Region: Crop mask to the minority region with optional padding in pixels + - Mask Crop Region: Crop to dominant or minority region and return `crop_data` for pasting back. Additionally outputs region location and size for other nodes like Crop Image Location. + - Mask Arbitrary Region: Return a region that most closely matches the size input (size is not a direct representation of pixels, but approximate) + - Mask Smooth Region: Smooth the boundaries of a mask + - Mask Erode Region: Erode the boundaries of a mask + - Mask Dilate Region: Dilate the boundaries of a mask + - Mask Fill Region: Fill holes within the masks regions + - Mask Ceiling Region": Return only white pixels within a offset range. + - Mask Floor Region: Return the lower most pixel values as white (255) + - Mask Threshold Region: Apply a thresholded image between a black value and white value + - Mask Gaussian Region: Apply a Gaussian blur to the mask + - Masks Combine Masks: Combine 2 or more masks into one mask. + - Masks Combine Batch: Combine batched masks into one mask. + - Model Input Switch: Switch between two model inputs based on a boolean switch + - ComfyUI Loaders: A set of ComfyUI loaders that also output a string that contains the name of the model being loaded. + - Latent Noise Injection: Inject latent noise into a latent image + - Latent Size to Number: Latent sizes in tensor width/height + - Latent Upscale by Factor: Upscale a latent image by a factor + - Latent Input Switch: Switch between two latent inputs based on a boolean switch + - Logic Boolean: A simple `1` or `0` output to use with logic + - Lora Input Switch: Switch between two LORAs based on a boolean switch + - MiDaS Model Loader: Load a MiDaS model as an optional input for MiDaS Depth Approximation + - MiDaS Depth Approximation: Produce a depth approximation of a single image input + - MiDaS Mask Image: Mask a input image using MiDaS with a desired color + - Number Operation + - Number to Seed + - Number to Float + - Number Input Switch: Switch between two number inputs based on a boolean switch + - Number Input Condition: Compare between two inputs or against the A input + - Number to Int + - Number to String + - Number to Text + - Perlin Power Fractal Latent: Create a power fractal based latent image. Doesn't work with all samplers (unless you add noise). + - Random Number + - Random integer between min and max (inclusive), uniformly distributed random number + - Random float between min and max (inclusive), uniformly distributed random number + - Random number from 0 to 1 inclusive, this will be a 0 or 1 boolean if you use the 'int' output + - Random shuffled list of integers between min and max inclusive. E.g. if min=0 and max=3, a possible outcome would be the string '3,1,2,0' + - Save Text File: Save a text string to a file + - Samples Passthrough (Stat System): Logs RAM, VRAM, and Disk usage to the console. + - Seed: Return a seed + - Tensor Batch to Image: Select a single image out of a latent batch for post processing with filters + - Text Add Tokens: Add custom tokens to parse in filenames or other text. + - Text Add Token by Input: Add custom token by inputs representing single **single line** name and value of the token + - Text Compare: Compare two strings. Returns a boolean if they are the same, a score of similarity, and the similarity or difference text. + - Text Concatenate: Merge two strings + - Text Dictionary Update: Merge two dictionaries + - Text File History: Show previously opened text files *(requires restart to show last sessions files at this time)* + - Text Find and Replace: Find and replace a substring in a string + - Text Find and Replace by Dictionary: Replace substrings in a ASCII text input with a dictionary. + - The dictionary keys are used as the key to replace, and the list of lines it contains chosen at random based on the seed. + - Text Input Switch: Switch between two text inputs + - Text List: Create a list of text strings + - Text Load Line From File: Load lines from a file sequentially each *batch prompt* run, or select a line index. + - Text Concatenate: Merge lists of strings + - Text Multiline: Write a multiline text string + - Text Parse A1111 Embeddings: Convert embeddings filenames in your prompts to `embedding:[filename]]` format based on your `/ComfyUI/models/embeddings/` files. + - Text Parse Noodle Soup Prompts: Parse NSP in a text input + - Text Parse Tokens: Parse custom tokens in text. + - Text Random Line: Select a random line from a text input string + - Text Random Prompt: Feeling lucky? Get a random prompt based on a search seed, such as "superhero" + - Text String: Write a single line text string value + - Text String Truncate: Truncate a string from the beginning or end by characters or words. + - Text to Conditioning: Convert a text string to conditioning. + - True Random.org Number Generator: Generate a truly random number online from atmospheric noise with [Random.org](https://random.org/) + - [Get your API key from your account page](https://accounts.random.org/) + - Upscale Model Input Switch: Switch between two Upscale Models inputs based on a boolean switch. + - Write to Morph GIF: Write a new frame to an existing GIF (or create new one) with interpolation between frames. + - Write to Video: Write a frame as you generate to a video (Best used with FFV1 for lossless images) + - VAE Input Switch: Switch between two VAE inputs based on boolean input +
+ + +
+ + ### Extra Nodes + + - CLIPTextEncode (BlenderNeko Advanced + NSP): Only available if you have BlenderNeko's [Advanced CLIP Text Encode](https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb). Allows for NSP and Wildcard use with their advanced CLIPTextEncode. + + + ### Notes: + + - **CLIPTextEncode (NSP)** and **CLIPTextEncode (BlenderNeko Advanced + NSP)**: Accept dynamic prompts in `` format. This will respect the nodes input seed to yield reproducible results like NSP and Wildcards. + - **CLIPTextEncode (NSP)** and **CLIPTextEncode (BlenderNeko Advanced + NSP)**: Assign variables with `$|prompt words|$` format. You can then print this word again within the prompt with the number corresponding the order you created them. So the first prompt var would be printed with `$1` and the second with `$2` and so on. + +--- + + +## Video Nodes + +### Codecs +You can use codecs that are available to your ffmpeg binaries by adding their fourcc ID (in one string), and appropriate container extension to the `was_suite_config.json` + +Example [H264 Codecs](https://github.com/cisco/openh264/releases/tag/v1.8.0) (Defaults) +``` + "ffmpeg_extra_codecs": { + "avc1": ".mp4", + "h264": ".mkv" + } +``` + +### Notes + - For now I am only supporting **Windows** installations for video nodes. + - I do not have access to Mac or a stand-alone linux distro. If you get them working and want to PR a patch/directions, feel free. + - Video nodes require [FFMPEG](https://ffmpeg.org/download.html). You should download the proper FFMPEG binaries for you system and set the FFMPEG path in the config file. + - Additionally, if you want to use H264 codec need to [download OpenH264 1.8.0](https://github.com/cisco/openh264/releases/tag/v1.8.0) and place it in the root of ComfyUI (Example: `C:\ComfyUI_windows_portable`). + - FFV1 will complain about invalid container. You can ignore this. The resulting MKV file is readable. I have not figured out what this issue is about. Documentaion tells me to use MKV, but it's telling me it's unsupported. + - If you know how to resolve this, I'd love a PR + - `Write to Video` node should use a lossless video codec or when it copies frames, and reapplies compression, it will start expontentially ruining the starting frames run to run. + +--- + +# Text Tokens +Text tokens can be used in the Save Text File and Save Image nodes. You can also add your own custom tokens with the Text Add Tokens node. + +The token name can be anything excluding the `:` character to define your token. It can also be simple Regular Expressions. + +## Built-in Tokens + - [time] + - The current system microtime + - [time(`format_code`)] + - The current system time in human readable format. Utilizing [datetime](https://docs.python.org/3/library/datetime.html) formatting + - Example: `[hostname]_[time]__[time(%Y-%m-%d__%I-%M%p)]` would output: **SKYNET-MASTER_1680897261__2023-04-07__07-54PM** + - [hostname] + - The hostname of the system executing ComfyUI + - [cuda_device] + - The cuda device from `comfy.model_management.get_cuda_device()` + - [cuda_name] + - The cuda name from `comfy.model_management.get_cuda_device_name()` + +
+ +
+ $\color{orange}{Expand\ Date\ Code\ List}$ + +
+ +| Directive | Meaning | Example | Notes | +| --- | --- | --- | --- | +| %a | Weekday as locale’s abbreviated name. | Sun, Mon, …, Sat (en_US); So, Mo, …, Sa (de_DE) | (1) | +| %A | Weekday as locale’s full name. | Sunday, Monday, …, Saturday (en_US); Sonntag, Montag, …, Samstag (de_DE) | (1) | +| %w | Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. | 0, 1, …, 6 | | +| %d | Day of the month as a zero-padded decimal number. | 01, 02, …, 31 | (9) | +| %b | Month as locale’s abbreviated name. | Jan, Feb, …, Dec (en_US); Jan, Feb, …, Dez (de_DE) | (1) | +| %B | Month as locale’s full name. | January, February, …, December (en_US); Januar, Februar, …, Dezember (de_DE) | (1) | +| %m | Month as a zero-padded decimal number. | 01, 02, …, 12 | (9) | +| %y | Year without century as a zero-padded decimal number. | 00, 01, …, 99 | (9) | +| %Y | Year with century as a decimal number. | 0001, 0002, …, 2013, 2014, …, 9998, 9999 | (2) | +| %H | Hour (24-hour clock) as a zero-padded decimal number. | 00, 01, …, 23 | (9) | +| %I | Hour (12-hour clock) as a zero-padded decimal number. | 01, 02, …, 12 | (9) | +| %p | Locale’s equivalent of either AM or PM. | AM, PM (en_US); am, pm (de_DE) | (1), (3) | +| %M | Minute as a zero-padded decimal number. | 00, 01, …, 59 | (9) | +| %S | Second as a zero-padded decimal number. | 00, 01, …, 59 | (4), (9) | +| %f | Microsecond as a decimal number, zero-padded to 6 digits. | 000000, 000001, …, 999999 | (5) | +| %z | UTC offset in the form ±HHMM[SS[.ffffff]] (empty string if the object is naive). | (empty), +0000, -0400, +1030, +063415, -030712.345216 | (6) | +| %Z | Time zone name (empty string if the object is naive). | (empty), UTC, GMT | (6) | +| %j | Day of the year as a zero-padded decimal number. | 001, 002, …, 366 | (9) | +| %U | Week number of the year (Sunday as the first day of the week) as a zero-padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0. | 00, 01, …, 53 | (7), (9) | +| %W | Week number of the year (Monday as the first day of the week) as a zero-padded decimal number. All days in a new year preceding the first Monday are considered to be in week 0. | 00, 01, …, 53 | (7), (9) | +| %c | Locale’s appropriate date and time representation. | Tue Aug 16 21:30:00 1988 (en_US); Di 16 Aug 21:30:00 1988 (de_DE) | (1) | +| %x | Locale’s appropriate date representation. | 08/16/88 (None); 08/16/1988 (en_US); 16.08.1988 (de_DE) | (1) | +| %X | Locale’s appropriate time representation. | 21:30:00 (en_US); 21:30:00 (de_DE) | (1) | +| %% | A literal '%' character. | % | | + +
+ +
+ +--- + +# Other Features + +### Import AUTOMATIC1111 WebUI Styles +When using the latest builds of WAS Node Suite a `was_suite_config.json` file will be generated (if it doesn't exist). In this file you can setup a A1111 styles import. + + - Run ComfyUI to generate the new `/custom-nodes/was-node-suite-comfyui/was_Suite_config.json` file. + - Open the `was_suite_config.json` file with a text editor. + - Replace the `webui_styles` value from `None` to the path of your A1111 styles file called **styles.csv**. Be sure to use double backslashes for Windows paths. + - Example `C:\\python\\stable-diffusion-webui\\styles.csv` + - Restart ComfyUI + - Select a style with the `Prompt Styles Node`. + - The first ASCII output is your positive prompt, and the second ASCII output is your negative prompt. + +You can set `webui_styles_persistent_update` to `true` to update the WAS Node Suite styles from WebUI every start of ComfyUI + +# Recommended Installation: +If you're running on Linux, or non-admin account on windows you'll want to ensure `/ComfyUI/custom_nodes`, `was-node-suite-comfyui`, and `WAS_Node_Suite.py` has write permissions. + +There is now a **install.bat** you can run to install to portable if detected. Otherwise it will default to system and assume you followed ConfyUI's manual installation steps. + + - Navigate to your `/ComfyUI/custom_nodes/` folder + - Run `git clone https://github.com/WASasquatch/was-node-suite-comfyui/` + - Navigate to your `was-node-suite-comfyui` folder + - Portable/venv: + - Run `path/to/ComfUI/python_embeded/python.exe -s -m pip install -r requirements.txt` + - With system python + - Run `pip install -r requirements.txt` + - Start ComfyUI + - WAS Suite should uninstall legacy nodes automatically for you. + - Tools will be located in the WAS Suite menu. + +## Alternate [Legacy] Installation: +If you're running on Linux, or non-admin account on windows you'll want to ensure `/ComfyUI/custom_nodes`, and `WAS_Node_Suite.py` has write permissions. + + - Download `WAS_Node_Suite.py` + - Move the file to your `/ComfyUI/custom_nodes/` folder + - WAS Node Suite will attempt install dependencies on it's own, but you may need to manually do so. The dependencies required are in the `requirements.txt` on this repo. See installation steps above. + - If this process fails attempt the following: + - Navigate to your `was-node-suite-comfyui` folder + - Portable/venv: + - Run `path/to/ComfUI/python_embeded/python.exe -s -m pip install -r requirements.txt` + - With system python + - Run `pip install -r requirements.txt` + - Start, or Restart ComfyUI + - WAS Suite should uninstall legacy nodes automatically for you. + - Tools will be located in the WAS Suite menu. + +This method will not install the resources required for Image Crop Face node, and you'll have to download the [./res/](https://github.com/WASasquatch/was-node-suite-comfyui/tree/main/res) folder yourself. + +## Installing on Colab +Create a new cell and add the following code, then run the cell. You may need to edit the path to your `custom_nodes` folder. You can also use the [colab hosted here](https://colab.research.google.com/github/WASasquatch/comfyui-colab-was-node-suite/blob/main/ComfyUI_%2B_WAS_Node_Suite.ipynb) + + - `!git clone https://github.com/WASasquatch/was-node-suite-comfyui /content/ComfyUI/custom_nodes/was-node-suite-comfyui` + - `!pip install -r /content/ComfyUI/custom_nodes/was-node-suite-comfyui/requirements.txt` + - Restart Colab Runtime (don't disconnect) + - Tools will be located in the WAS Suite menu. diff --git a/custom_nodes/was-node-suite-comfyui/WAS_Node_Suite.py b/custom_nodes/was-node-suite-comfyui/WAS_Node_Suite.py new file mode 100644 index 0000000000000000000000000000000000000000..bfbf230f6205204df2bb1d0a19fdcbbf29da71f9 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/WAS_Node_Suite.py @@ -0,0 +1,13862 @@ +# By WASasquatch (Discord: WAS#0263) +# +# Copyright 2023 Jordan Thompson (WASasquatch) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to +# deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + + +from PIL import Image, ImageFilter, ImageEnhance, ImageOps, ImageDraw, ImageChops, ImageFont +from PIL.PngImagePlugin import PngInfo +from io import BytesIO +from typing import Optional, Union, List +from urllib.request import urlopen +import comfy.diffusers_convert +import comfy.samplers +import comfy.sd +import comfy.utils +import comfy.clip_vision +import comfy.model_management +import folder_paths as comfy_paths +from comfy_extras.chainner_models import model_loading +import glob +import hashlib +import json +import nodes +import math +import numpy as np +from numba import jit +import os +import random +import re +import requests +import socket +import subprocess +import sys +import datetime +import time +import torch +from tqdm import tqdm + +p310_plus = (sys.version_info >= (3, 10)) + +MANIFEST = { + "name": "WAS Node Suite", + "version": (2,2,2), + "author": "WASasquatch", + "project": "https://github.com/WASasquatch/was-node-suite-comfyui", + "description": "An extensive node suite for ComfyUI with over 180 new nodes", +} + +sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "was_node_suite_comfyui")) +sys.path.append(comfy_paths.base_path) + +#! SYSTEM HOOKS + +class cstr(str): + class color: + END = '\33[0m' + BOLD = '\33[1m' + ITALIC = '\33[3m' + UNDERLINE = '\33[4m' + BLINK = '\33[5m' + BLINK2 = '\33[6m' + SELECTED = '\33[7m' + + BLACK = '\33[30m' + RED = '\33[31m' + GREEN = '\33[32m' + YELLOW = '\33[33m' + BLUE = '\33[34m' + VIOLET = '\33[35m' + BEIGE = '\33[36m' + WHITE = '\33[37m' + + BLACKBG = '\33[40m' + REDBG = '\33[41m' + GREENBG = '\33[42m' + YELLOWBG = '\33[43m' + BLUEBG = '\33[44m' + VIOLETBG = '\33[45m' + BEIGEBG = '\33[46m' + WHITEBG = '\33[47m' + + GREY = '\33[90m' + LIGHTRED = '\33[91m' + LIGHTGREEN = '\33[92m' + LIGHTYELLOW = '\33[93m' + LIGHTBLUE = '\33[94m' + LIGHTVIOLET = '\33[95m' + LIGHTBEIGE = '\33[96m' + LIGHTWHITE = '\33[97m' + + GREYBG = '\33[100m' + LIGHTREDBG = '\33[101m' + LIGHTGREENBG = '\33[102m' + LIGHTYELLOWBG = '\33[103m' + LIGHTBLUEBG = '\33[104m' + LIGHTVIOLETBG = '\33[105m' + LIGHTBEIGEBG = '\33[106m' + LIGHTWHITEBG = '\33[107m' + + @staticmethod + def add_code(name, code): + if not hasattr(cstr.color, name.upper()): + setattr(cstr.color, name.upper(), code) + else: + raise ValueError(f"'cstr' object already contains a code with the name '{name}'.") + + def __new__(cls, text): + return super().__new__(cls, text) + + def __getattr__(self, attr): + if attr.lower().startswith("_cstr"): + code = getattr(self.color, attr.upper().lstrip("_cstr")) + modified_text = self.replace(f"__{attr[1:]}__", f"{code}") + return cstr(modified_text) + elif attr.upper() in dir(self.color): + code = getattr(self.color, attr.upper()) + modified_text = f"{code}{self}{self.color.END}" + return cstr(modified_text) + elif attr.lower() in dir(cstr): + return getattr(cstr, attr.lower()) + else: + raise AttributeError(f"'cstr' object has no attribute '{attr}'") + + def print(self, **kwargs): + print(self, **kwargs) + +#! MESSAGE TEMPLATES +cstr.color.add_code("msg", f"{cstr.color.BLUE}WAS Node Suite: {cstr.color.END}") +cstr.color.add_code("warning", f"{cstr.color.BLUE}WAS Node Suite {cstr.color.LIGHTYELLOW}Warning: {cstr.color.END}") +cstr.color.add_code("error", f"{cstr.color.RED}WAS Node Suite {cstr.color.END}Error: {cstr.color.END}") + +#! GLOBALS +NODE_FILE = os.path.abspath(__file__) +MIDAS_INSTALLED = False +CUSTOM_NODES_DIR = comfy_paths.folder_names_and_paths["custom_nodes"][0][0] +MODELS_DIR = comfy_paths.models_dir +WAS_SUITE_ROOT = os.path.dirname(NODE_FILE) +WAS_DATABASE = os.path.join(WAS_SUITE_ROOT, 'was_suite_settings.json') +WAS_HISTORY_DATABASE = os.path.join(WAS_SUITE_ROOT, 'was_history.json') +WAS_CONFIG_FILE = os.path.join(WAS_SUITE_ROOT, 'was_suite_config.json') +STYLES_PATH = os.path.join(WAS_SUITE_ROOT, 'styles.json') +ALLOWED_EXT = ('.jpeg', '.jpg', '.png', + '.tiff', '.gif', '.bmp', '.webp') + + +#! INSTALLATION CLEANUP + +# Delete legacy nodes +legacy_was_nodes = ['fDOF_WAS.py', 'Image_Blank_WAS.py', 'Image_Blend_WAS.py', 'Image_Canny_Filter_WAS.py', 'Canny_Filter_WAS.py', 'Image_Combine_WAS.py', 'Image_Edge_Detection_WAS.py', 'Image_Film_Grain_WAS.py', 'Image_Filters_WAS.py', + 'Image_Flip_WAS.py', 'Image_Nova_Filter_WAS.py', 'Image_Rotate_WAS.py', 'Image_Style_Filter_WAS.py', 'Latent_Noise_Injection_WAS.py', 'Latent_Upscale_WAS.py', 'MiDaS_Depth_Approx_WAS.py', 'NSP_CLIPTextEncoder.py', 'Samplers_WAS.py'] +legacy_was_nodes_found = [] + +if os.path.basename(CUSTOM_NODES_DIR) == 'was-node-suite-comfyui': + legacy_was_nodes.append('WAS_Node_Suite.py') + +f_disp = False +node_path_dir = os.getcwd()+os.sep+'ComfyUI'+os.sep+'custom_nodes'+os.sep +for f in legacy_was_nodes: + file = f'{node_path_dir}{f}' + if os.path.exists(file): + if not f_disp: + cstr("Found legacy nodes. Archiving legacy nodes...").msg.print() + f_disp = True + legacy_was_nodes_found.append(file) +if legacy_was_nodes_found: + import zipfile + from os.path import basename + archive = zipfile.ZipFile( + f'{node_path_dir}WAS_Legacy_Nodes_Backup_{round(time.time())}.zip', "w") + for f in legacy_was_nodes_found: + archive.write(f, basename(f)) + try: + os.remove(f) + except OSError: + pass + archive.close() +if f_disp: + cstr("Legacy cleanup complete.").msg.print() + +#! WAS SUITE CONFIG + +was_conf_template = { + "run_requirements": True, + "suppress_uncomfy_warnings": True, + "show_startup_junk": True, + "show_inspiration_quote": True, + "text_nodes_type": "STRING", + "webui_styles": None, + "webui_styles_persistent_update": True, + "blip_model_url": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth", + "blip_model_vqa_url": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth", + "sam_model_vith_url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", + "sam_model_vitl_url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth", + "sam_model_vitb_url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", + "history_display_limit": 36, + "use_legacy_ascii_text": False, + "ffmpeg_bin_path": "/path/to/ffmpeg", + "ffmpeg_extra_codecs": { + "avc1": ".mp4", + "h264": ".mkv", + }, + "wildcards_path": os.path.join(WAS_SUITE_ROOT, "wildcards"), + "wildcard_api": True, + } + +# Create, Load, or Update Config + +def getSuiteConfig(): + global was_conf_template + try: + with open(WAS_CONFIG_FILE, "r") as f: + was_config = json.load(f) + except OSError as e: + cstr(f"Unable to load conf file at `{WAS_CONFIG_FILE}`. Using internal config template.").error.print() + return was_conf_template + except Exception as e: + cstr(f"Unable to load conf file at `{WAS_CONFIG_FILE}`. Using internal config template.").error.print() + return was_conf_template + return was_config + return was_config + +def updateSuiteConfig(conf): + try: + with open(WAS_CONFIG_FILE, "w", encoding='utf-8') as f: + json.dump(conf, f, indent=4) + except OSError as e: + print(e) + return False + except Exception as e: + print(e) + return False + return True + +if not os.path.exists(WAS_CONFIG_FILE): + if updateSuiteConfig(was_conf_template): + cstr(f'Created default conf file at `{WAS_CONFIG_FILE}`.').msg.print() + was_config = getSuiteConfig() + else: + cstr(f"Unable to create default conf file at `{WAS_CONFIG_FILE}`. Using internal config template.").error.print() + was_config = was_conf_template + +else: + was_config = getSuiteConfig() + + update_config = False + for sett_ in was_conf_template.keys(): + if not was_config.__contains__(sett_): + was_config.update({sett_: was_conf_template[sett_]}) + update_config = True + + if update_config: + updateSuiteConfig(was_config) + +# WAS Suite Locations Debug +if was_config.__contains__('show_startup_junk'): + if was_config['show_startup_junk']: + cstr(f"Running At: {NODE_FILE}") + cstr(f"Running From: {WAS_SUITE_ROOT}") + +# Check Write Access +if not os.access(WAS_SUITE_ROOT, os.W_OK) or not os.access(MODELS_DIR, os.W_OK): + cstr(f"There is no write access to `{WAS_SUITE_ROOT}` or `{MODELS_DIR}`. Write access is required!").error.print() + exit + +# SET TEXT TYPE +TEXT_TYPE = "STRING" +if was_config and was_config.__contains__('text_nodes_type'): + if was_config['text_nodes_type'].strip() != '': + TEXT_TYPE = was_config['text_nodes_type'].strip() +if was_config and was_config.__contains__('use_legacy_ascii_text'): + if was_config['use_legacy_ascii_text']: + TEXT_TYPE = "ASCII" + cstr("use_legacy_ascii_text is `True` in `was_suite_config.json`. `ASCII` type is deprecated and the default will be `STRING` in the future.").warning.print() + +# Convert WebUI Styles - TODO: Convert to PromptStyles class +if was_config.__contains__('webui_styles'): + + if was_config['webui_styles'] not in [None, 'None', 'none', '']: + + webui_styles_file = was_config['webui_styles'] + + if was_config.__contains__('webui_styles_persistent_update'): + styles_persist = was_config['webui_styles_persistent_update'] + else: + styles_persist = True + + if webui_styles_file not in [None, 'none', 'None', ''] and os.path.exists(webui_styles_file): + + cstr(f"Importing styles from `{webui_styles_file}`.").msg.print() + + import csv + + styles = {} + with open(webui_styles_file, "r", encoding="utf-8-sig", newline='') as file: + reader = csv.DictReader(file) + for row in reader: + prompt = row.get("prompt") or row.get("text", "") # Old files + negative_prompt = row.get("negative_prompt", "") + styles[row["name"]] = { + "prompt": prompt, + "negative_prompt": negative_prompt + } + + if styles: + if not os.path.exists(STYLES_PATH) or styles_persist: + with open(STYLES_PATH, "w", encoding='utf-8') as f: + json.dump(styles, f, indent=4) + + del styles + + cstr(f"Styles import complete.").msg.print() + + else: + cstr(f"Styles file `{webui_styles_file}` does not exist.").error.print() + + +#! SUITE SPECIFIC CLASSES & FUNCTIONS + +# Freeze PIP modules +def packages(versions=False): + import sys + import subprocess + return [( r.decode().split('==')[0] if not versions else r.decode() ) for r in subprocess.check_output([sys.executable, '-s', '-m', 'pip', 'freeze']).split()] + +def install_package(package, uninstall_first: Union[List[str], str] = None): + if os.getenv("WAS_BLOCK_AUTO_INSTALL", 'False').lower() in ('true', '1', 't'): + cstr(f"Preventing package install of '{package}' due to WAS_BLOCK_INSTALL env").msg.print() + else: + if uninstall_first is None: + return + + if isinstance(uninstall_first, str): + uninstall_first = [uninstall_first] + + cstr(f"Uninstalling {', '.join(uninstall_first)}..") + subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'uninstall', *uninstall_first]) + cstr("Installing package...").msg.print() + subprocess.check_call([sys.executable, '-s', '-m', 'pip', '-q', 'install', package]) + +# Tensor to PIL +def tensor2pil(image): + return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + +# PIL to Tensor +def pil2tensor(image): + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + +# PIL Hex +def pil2hex(image): + return hashlib.sha256(np.array(tensor2pil(image)).astype(np.uint16).tobytes()).hexdigest() + +# PIL to Mask +def pil2mask(image): + image_np = np.array(image.convert("L")).astype(np.float32) / 255.0 + mask = torch.from_numpy(image_np) + return 1.0 - mask + +# Mask to PIL +def mask2pil(mask): + if mask.ndim > 2: + mask = mask.squeeze(0) + mask_np = mask.cpu().numpy().astype('uint8') + mask_pil = Image.fromarray(mask_np, mode="L") + return mask_pil + +# Tensor to SAM-compatible NumPy +def tensor2sam(image): + # Convert tensor to numpy array in HWC uint8 format with pixel values in [0, 255] + sam_image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + # Transpose the image to HWC format if it's in CHW format + if sam_image.shape[0] == 3: + sam_image = np.transpose(sam_image, (1, 2, 0)) + return sam_image + +# SAM-compatible NumPy to tensor +def sam2tensor(image): + # Convert the image to float32 and normalize the pixel values to [0, 1] + float_image = image.astype(np.float32) / 255.0 + # Transpose the image from HWC format to CHW format + chw_image = np.transpose(float_image, (2, 0, 1)) + # Convert the numpy array to a tensor + tensor_image = torch.from_numpy(chw_image) + return tensor_image + +# Median Filter +def medianFilter(img, diameter, sigmaColor, sigmaSpace): + import cv2 as cv + diameter = int(diameter) + sigmaColor = int(sigmaColor) + sigmaSpace = int(sigmaSpace) + img = img.convert('RGB') + img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR) + img = cv.bilateralFilter(img, diameter, sigmaColor, sigmaSpace) + img = cv.cvtColor(np.array(img), cv.COLOR_BGR2RGB) + return Image.fromarray(img).convert('RGB') + +# Resize Image +def resizeImage(image, max_size): + width, height = image.size + if width > height: + if width > max_size: + new_width = max_size + new_height = int(height * (max_size / width)) + else: + if height > max_size: + new_height = max_size + new_width = int(width * (max_size / height)) + resized_image = image.resize((new_width, new_height)) + return resized_image + +# Image Seed +def image2seed(image): + image_data = image.tobytes() + hash_object = hashlib.sha256(image_data) + hash_digest = hash_object.digest() + seed = int.from_bytes(hash_digest[:4], byteorder='big') + return seed + + +# SHA-256 Hash +def get_sha256(file_path): + sha256_hash = hashlib.sha256() + with open(file_path, 'rb') as file: + for chunk in iter(lambda: file.read(4096), b''): + sha256_hash.update(chunk) + return sha256_hash.hexdigest() + +# Batch Seed Generator +def seed_batch(seed, batches, seeds): + rng = np.random.default_rng(seed) + btch = [rng.choice(2**32 - 1, seeds, replace=False).tolist() for _ in range(batches)] + return btch + +# Download File +def download_file(url, filename=None, path=None): + if not filename: + filename = url.split('/')[-1] + if not path: + path = '.' + save_path = os.path.join(path, filename) + response = requests.get(url, stream=True) + if response.status_code == requests.codes.ok: + file_size = int(response.headers.get('Content-Length', 0)) + with open(save_path, 'wb') as file: + with tqdm(total=file_size, unit='B', unit_scale=True, unit_divisor=1024) as progress: + for chunk in response.iter_content(chunk_size=1024): + file.write(chunk) + progress.update(len(chunk)) + print(f"Downloaded file saved at: {save_path}") + return True + elif response.status_code == requests.codes.not_found: + cstr("Error: File not found.").error.print() + else: + cstr(f"Error: Failed to download file. Status code: {response.status_code}").error.print() + return False + +# NSP Function + +def nsp_parse(text, seed=0, noodle_key='__', nspterminology=None, pantry_path=None): + if nspterminology is None: + # Fetch the NSP Pantry + if pantry_path is None: + pantry_path = os.path.join(WAS_SUITE_ROOT, 'nsp_pantry.json') + if not os.path.exists(pantry_path): + response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') + tmp_pantry = json.loads(response.read()) + # Dump JSON locally + pantry_serialized = json.dumps(tmp_pantry, indent=4) + with open(pantry_path, "w") as f: + f.write(pantry_serialized) + del response, tmp_pantry + + # Load local pantry + with open(pantry_path, 'r') as f: + nspterminology = json.load(f) + + if seed > 0 or seed < 0: + random.seed(seed) + + # Parse Text + new_text = text + for term in nspterminology: + # Target Noodle + tkey = f'{noodle_key}{term}{noodle_key}' + # How many occurrences? + tcount = new_text.count(tkey) + # Apply random results for each noodle counted + for _ in range(tcount): + new_text = new_text.replace( + tkey, random.choice(nspterminology[term]), 1) + seed += 1 + random.seed(seed) + + return new_text + +# Simple wildcard parser: + +def replace_wildcards(text, seed=None, noodle_key='__'): + + def replace_nested(text, key_path_dict): + if re.findall(f"{noodle_key}(.+?){noodle_key}", text): + for key, file_path in key_path_dict.items(): + with open(file_path, "r", encoding="utf-8") as file: + lines = file.readlines() + if lines: + random_line = None + while not random_line: + line = random.choice(lines).strip() + if not line.startswith('#') and not line.startswith('//'): + random_line = line + text = text.replace(key, random_line) + return text + + conf = getSuiteConfig() + wildcard_dir = os.path.join(WAS_SUITE_ROOT, 'wildcards') + if not os.path.exists(wildcard_dir): + os.makedirs(wildcard_dir, exist_ok=True) + if conf.__contains__('wildcards_path'): + if conf['wildcards_path'] not in [None, ""]: + wildcard_dir = conf['wildcards_path'] + + cstr(f"Wildcard Path: {wildcard_dir}").msg.print() + + # Set the random seed for reproducibility + if seed: + random.seed(seed) + + # Create a dictionary of key to file path pairs + key_path_dict = {} + for root, dirs, files in os.walk(wildcard_dir): + for file in files: + file_path = os.path.join(root, file) + key = os.path.relpath(file_path, wildcard_dir).replace(os.path.sep, "/").rsplit(".", 1)[0] + key_path_dict[f"{noodle_key}{key}{noodle_key}"] = os.path.abspath(file_path) + + # Replace keys in text with random lines from corresponding files + for key, file_path in key_path_dict.items(): + with open(file_path, "r", encoding="utf-8") as file: + lines = file.readlines() + if lines: + random_line = None + while not random_line: + line = random.choice(lines).strip() + if not line.startswith('#') and not line.startswith('//'): + random_line = line + text = text.replace(key, random_line) + + # Replace sub-wildacrds in result + text = replace_nested(text, key_path_dict) + + return text + +# Parse Prompt Variables + +def parse_prompt_vars(input_string, optional_vars=None): + variables = optional_vars or {} + pattern = r"\$\|(.*?)\|\$" + variable_count = len(variables) + 1 + + def replace_variable(match): + nonlocal variable_count + variable_name = f"${variable_count}" + variables[variable_name] = match.group(1) + variable_count += 1 + return variable_name + + output_string = re.sub(pattern, replace_variable, input_string) + + for variable_name, phrase in variables.items(): + variable_pattern = re.escape(variable_name) + output_string = re.sub(variable_pattern, phrase, output_string) + + return output_string, variables + +# Parse Dynamic Prompts + +def parse_dynamic_prompt(prompt, seed): + random.seed(seed) + + def replace_match(match): + options = match.group(1).split('|') + return random.choice(options) + + parse_prompt = re.sub(r'\<(.*?)\>', replace_match, prompt) + while re.search(r'\<(.*?)\>', parse_prompt): + parse_prompt = re.sub(r'\<(.*?)\>', replace_match, parse_prompt) + + return parse_prompt + +# Ambient Occlusion Factor + +@jit(nopython=True) +def calculate_ambient_occlusion_factor(rgb_normalized, depth_normalized, height, width, radius): + occlusion_array = np.zeros((height, width), dtype=np.uint8) + + for y in range(height): + for x in range(width): + if radius == 0: + occlusion_factor = 0 + else: + y_min = max(y - radius, 0) + y_max = min(y + radius + 1, height) + x_min = max(x - radius, 0) + x_max = min(x + radius + 1, width) + + neighborhood_depth = depth_normalized[y_min:y_max, x_min:x_max] + neighborhood_rgb = rgb_normalized[y_min:y_max, x_min:x_max, :] + + depth_diff = depth_normalized[y, x] - neighborhood_depth + rgb_diff = np.abs(rgb_normalized[y, x] - neighborhood_rgb) + occlusion_factor = np.maximum(0, depth_diff).mean() + np.maximum(0, np.sum(rgb_diff, axis=2)).mean() + + occlusion_value = int(255 - occlusion_factor * 255) + occlusion_array[y, x] = occlusion_value + + return occlusion_array + +# Direct Occlusion Factor + +@jit(nopython=True) +def calculate_direct_occlusion_factor(rgb_normalized, depth_normalized, height, width, radius): + occlusion_array = np.empty((int(height), int(width)), dtype=np.uint8) + depth_normalized = depth_normalized[:, :, 0] + + for y in range(int(height)): + for x in range(int(width)): + if radius == 0: + occlusion_factor = 0 + else: + y_min = max(int(y - radius), 0) + y_max = min(int(y + radius + 1), int(height)) + x_min = max(int(x - radius), 0) + x_max = min(int(x + radius + 1), int(width)) + + neighborhood_depth = np.zeros((y_max - y_min, x_max - x_min), dtype=np.float64) + neighborhood_rgb = np.empty((y_max - y_min, x_max - x_min, 3)) + + for i in range(y_min, y_max): + for j in range(x_min, x_max): + neighborhood_depth[i - y_min, j - x_min] = depth_normalized[i, j] + neighborhood_rgb[i - y_min, j - x_min, :] = rgb_normalized[i, j, :] + + depth_diff = neighborhood_depth - depth_normalized[y, x] + rgb_diff = np.abs(neighborhood_rgb - rgb_normalized[y, x]) + occlusion_factor = np.maximum(0, depth_diff).mean() + np.maximum(0, np.sum(np.abs(rgb_diff), axis=2)).mean() + + occlusion_value = int(occlusion_factor * 255) + occlusion_array[y, x] = occlusion_value + + occlusion_min = np.min(occlusion_array) + occlusion_max = np.max(occlusion_array) + occlusion_scaled = ((occlusion_array - occlusion_min) / (occlusion_max - occlusion_min) * 255).astype(np.uint8) + + return occlusion_scaled + + +class PromptStyles: + def __init__(self, styles_file, preview_length = 32): + self.styles_file = styles_file + self.styles = {} + self.preview_length = preview_length + + if os.path.exists(self.styles_file): + with open(self.styles_file, 'r') as f: + self.styles = json.load(f) + + def add_style(self, prompt="", negative_prompt="", auto=False, name=None): + if auto: + date_format = '%A, %d %B %Y %I:%M %p' + date_str = datetime.datetime.now().strftime(date_format) + key = None + if prompt.strip() != "": + if len(prompt) > self.preview_length: + length = self.preview_length + else: + length = len(prompt) + key = f"[{date_str}] Positive: {prompt[:length]} ..." + elif negative_prompt.strip() != "": + if len(negative_prompt) > self.preview_length: + length = self.preview_length + else: + length = len(negative_prompt) + key = f"[{date_str}] Negative: {negative_prompt[:length]} ..." + else: + cstr("At least a `prompt`, or `negative_prompt` input is required!").error.print() + return + else: + if name == None or str(name).strip() == "": + cstr("A `name` input is required when not using `auto=True`").error.print() + return + key = str(name) + + + for k, v in self.styles.items(): + if v['prompt'] == prompt and v['negative_prompt'] == negative_prompt: + return + + self.styles[key] = {"prompt": prompt, "negative_prompt": negative_prompt} + + with open(self.styles_file, "w", encoding='utf-8') as f: + json.dump(self.styles, f, indent=4) + + def get_prompts(self): + return self.styles + + def get_prompt(self, prompt_key): + if prompt_key in self.styles: + return self.styles[prompt_key]['prompt'], self.styles[prompt_key]['negative_prompt'] + else: + cstr(f"Prompt style `{prompt_key}` was not found!").error.print() + return None, None + + + +# WAS SETTINGS MANAGER + +class WASDatabase: + """ + The WAS Suite Database Class provides a simple key-value database that stores + data in a flatfile using the JSON format. Each key-value pair is associated with + a category. + + Attributes: + filepath (str): The path to the JSON file where the data is stored. + data (dict): The dictionary that holds the data read from the JSON file. + + Methods: + insert(category, key, value): Inserts a key-value pair into the database + under the specified category. + get(category, key): Retrieves the value associated with the specified + key and category from the database. + update(category, key): Update a value associated with the specified + key and category from the database. + delete(category, key): Deletes the key-value pair associated with the + specified key and category from the database. + _save(): Saves the current state of the database to the JSON file. + """ + def __init__(self, filepath): + self.filepath = filepath + try: + with open(filepath, 'r') as f: + self.data = json.load(f) + except FileNotFoundError: + self.data = {} + + def catExists(self, category): + return category in self.data + + def keyExists(self, category, key): + return category in self.data and key in self.data[category] + + def insert(self, category, key, value): + if not isinstance(category, str) or not isinstance(key, str): + cstr("Category and key must be strings").error.print() + return + + if category not in self.data: + self.data[category] = {} + self.data[category][key] = value + self._save() + + def update(self, category, key, value): + if category in self.data and key in self.data[category]: + self.data[category][key] = value + self._save() + + def updateCat(self, category, dictionary): + self.data[category].update(dictionary) + self._save() + + def get(self, category, key): + return self.data.get(category, {}).get(key, None) + + def getDB(self): + return self.data + + def insertCat(self, category): + if not isinstance(category, str): + cstr("Category must be a string").error.print() + return + + if category in self.data: + cstr(f"The database category '{category}' already exists!").error.print() + return + self.data[category] = {} + self._save() + + def getDict(self, category): + if category not in self.data: + cstr(f"The database category '{category}' does not exist!").error.print() + return {} + return self.data[category] + + def delete(self, category, key): + if category in self.data and key in self.data[category]: + del self.data[category][key] + self._save() + + def _save(self): + try: + with open(self.filepath, 'w') as f: + json.dump(self.data, f, indent=4) + except FileNotFoundError: + cstr(f"Cannot save database to file '{self.filepath}'. " + "Storing the data in the object instead. Does the folder and node file have write permissions?").warning.print() + except Exception as e: + cstr(f"Error while saving JSON data: {e}").error.print() + +# Initialize the settings database +WDB = WASDatabase(WAS_DATABASE) + +# WAS Token Class + +class TextTokens: + def __init__(self): + self.WDB = WDB + if not self.WDB.getDB().__contains__('custom_tokens'): + self.WDB.insertCat('custom_tokens') + self.custom_tokens = self.WDB.getDict('custom_tokens') + + self.tokens = { + '[time]': str(time.time()).replace('.','_'), + '[hostname]': socket.gethostname(), + '[cuda_device]': str(comfy.model_management.get_torch_device()), + '[cuda_name]': str(comfy.model_management.get_torch_device_name(device=comfy.model_management.get_torch_device())), + } + + if '.' in self.tokens['[time]']: + self.tokens['[time]'] = self.tokens['[time]'].split('.')[0] + + try: + self.tokens['[user]'] = os.getlogin() if os.getlogin() else 'null' + except Exception: + self.tokens['[user]'] = 'null' + + def addToken(self, name, value): + self.custom_tokens.update({name: value}) + self._update() + + def removeToken(self, name): + self.custom_tokens.pop(name) + self._update() + + def format_time(self, format_code): + return time.strftime(format_code, time.localtime(time.time())) + + def parseTokens(self, text): + tokens = self.tokens.copy() + if self.custom_tokens: + tokens.update(self.custom_tokens) + + # Update time + tokens['[time]'] = str(time.time()) + if '.' in tokens['[time]']: + tokens['[time]'] = tokens['[time]'].split('.')[0] + + for token, value in tokens.items(): + if token.startswith('[time('): + continue + pattern = re.compile(re.escape(token)) + text = pattern.sub(value, text) + + def replace_custom_time(match): + format_code = match.group(1) + return self.format_time(format_code) + + text = re.sub(r'\[time\((.*?)\)\]', replace_custom_time, text) + + return text + + def _update(self): + self.WDB.updateCat('custom_tokens', self.custom_tokens) + + +# Update image history + +def update_history_images(new_paths): + HDB = WASDatabase(WAS_HISTORY_DATABASE) + if HDB.catExists("History") and HDB.keyExists("History", "Images"): + saved_paths = HDB.get("History", "Images") + for path_ in saved_paths: + if not os.path.exists(path_): + saved_paths.remove(path_) + if isinstance(new_paths, str): + if new_paths in saved_paths: + saved_paths.remove(new_paths) + saved_paths.append(new_paths) + elif isinstance(new_paths, list): + for path_ in new_paths: + if path_ in saved_paths: + saved_paths.remove(path_) + saved_paths.append(path_) + HDB.update("History", "Images", saved_paths) + else: + if not HDB.catExists("History"): + HDB.insertCat("History") + if isinstance(new_paths, str): + HDB.insert("History", "Images", [new_paths]) + elif isinstance(new_paths, list): + HDB.insert("History", "Images", new_paths) + +# Update output image history + +def update_history_output_images(new_paths): + HDB = WASDatabase(WAS_HISTORY_DATABASE) + category = "Output_Images" + if HDB.catExists("History") and HDB.keyExists("History", category): + saved_paths = HDB.get("History", category) + for path_ in saved_paths: + if not os.path.exists(path_): + saved_paths.remove(path_) + if isinstance(new_paths, str): + if new_paths in saved_paths: + saved_paths.remove(new_paths) + saved_paths.append(new_paths) + elif isinstance(new_paths, list): + for path_ in new_paths: + if path_ in saved_paths: + saved_paths.remove(path_) + saved_paths.append(path_) + HDB.update("History", category, saved_paths) + else: + if not HDB.catExists("History"): + HDB.insertCat("History") + if isinstance(new_paths, str): + HDB.insert("History", category, [new_paths]) + elif isinstance(new_paths, list): + HDB.insert("History", category, new_paths) + +# Update text file history + +def update_history_text_files(new_paths): + HDB = WASDatabase(WAS_HISTORY_DATABASE) + if HDB.catExists("History") and HDB.keyExists("History", "TextFiles"): + saved_paths = HDB.get("History", "TextFiles") + for path_ in saved_paths: + if not os.path.exists(path_): + saved_paths.remove(path_) + if isinstance(new_paths, str): + if new_paths in saved_paths: + saved_paths.remove(new_paths) + saved_paths.append(new_paths) + elif isinstance(new_paths, list): + for path_ in new_paths: + if path_ in saved_paths: + saved_paths.remove(path_) + saved_paths.append(path_) + HDB.update("History", "TextFiles", saved_paths) + else: + if not HDB.catExists("History"): + HDB.insertCat("History") + if isinstance(new_paths, str): + HDB.insert("History", "TextFiles", [new_paths]) + elif isinstance(new_paths, list): + HDB.insert("History", "TextFiles", new_paths) +# WAS Filter Class + +class WAS_Tools_Class(): + """ + Contains various tools and filters for WAS Node Suite + """ + # TOOLS + + def fig2img(self, plot): + import io + buf = io.BytesIO() + plot.savefig(buf) + buf.seek(0) + img = Image.open(buf) + return img + + def stitch_image(self, image_a, image_b, mode='right', fuzzy_zone=50): + + def linear_gradient(start_color, end_color, size, start, end, mode='horizontal'): + width, height = size + gradient = Image.new('RGB', (width, height), end_color) + draw = ImageDraw.Draw(gradient) + + for i in range(0, start): + if mode == "horizontal": + draw.line((i, 0, i, height-1), start_color) + elif mode == "vertical": + draw.line((0, i, width-1, i), start_color) + + for i in range(start, end): + if mode == "horizontal": + curr_color = ( + int(start_color[0] + (float(i - start) / (end - start)) * (end_color[0] - start_color[0])), + int(start_color[1] + (float(i - start) / (end - start)) * (end_color[1] - start_color[1])), + int(start_color[2] + (float(i - start) / (end - start)) * (end_color[2] - start_color[2])) + ) + draw.line((i, 0, i, height-1), curr_color) + elif mode == "vertical": + curr_color = ( + int(start_color[0] + (float(i - start) / (end - start)) * (end_color[0] - start_color[0])), + int(start_color[1] + (float(i - start) / (end - start)) * (end_color[1] - start_color[1])), + int(start_color[2] + (float(i - start) / (end - start)) * (end_color[2] - start_color[2])) + ) + draw.line((0, i, width-1, i), curr_color) + + for i in range(end, width if mode == 'horizontal' else height): + if mode == "horizontal": + draw.line((i, 0, i, height-1), end_color) + elif mode == "vertical": + draw.line((0, i, width-1, i), end_color) + + return gradient + + image_a = image_a.convert('RGB') + image_b = image_b.convert('RGB') + + offset = int(fuzzy_zone / 2) + canvas_width = int(image_a.size[0] + image_b.size[0] - fuzzy_zone) if mode == 'right' or mode == 'left' else image_a.size[0] + canvas_height = int(image_a.size[1] + image_b.size[1] - fuzzy_zone) if mode == 'top' or mode == 'bottom' else image_a.size[1] + canvas = Image.new('RGB', (canvas_width, canvas_height), (0,0,0)) + + im_ax = 0 + im_ay = 0 + im_bx = 0 + im_by = 0 + + image_a_mask = None + image_b_mask = None + + if mode == 'top': + + image_a_mask = linear_gradient((0,0,0), (255,255,255), image_a.size, 0, fuzzy_zone, 'vertical') + image_b_mask = linear_gradient((255,255,255), (0,0,0), image_b.size, int(image_b.size[1] - fuzzy_zone), image_b.size[1], 'vertical') + im_ay = image_b.size[1] - fuzzy_zone + + elif mode == 'bottom': + + image_a_mask = linear_gradient((255,255,255), (0,0,0), image_a.size, int(image_a.size[1] - fuzzy_zone), image_a.size[1], 'vertical') + image_b_mask = linear_gradient((0,0,0), (255,255,255), image_b.size, 0, fuzzy_zone, 'vertical').convert('L') + im_by = image_a.size[1] - fuzzy_zone + + elif mode == 'left': + + image_a_mask = linear_gradient((0,0,0), (255,255,255), image_a.size, 0, fuzzy_zone, 'horizontal') + image_b_mask = linear_gradient((255,255,255), (0,0,0), image_b.size, int(image_b.size[0] - fuzzy_zone), image_b.size[0], 'horizontal') + im_ax = image_b.size[0] - fuzzy_zone + + elif mode == 'right': + + image_a_mask = linear_gradient((255,255,255), (0,0,0), image_a.size, int(image_a.size[0] - fuzzy_zone), image_a.size[0], 'horizontal') + image_b_mask = linear_gradient((0,0,0), (255,255,255), image_b.size, 0, fuzzy_zone, 'horizontal') + im_bx = image_a.size[0] - fuzzy_zone + + Image.Image.paste(canvas, image_a, (im_ax, im_ay), image_a_mask.convert('L')) + Image.Image.paste(canvas, image_b, (im_bx, im_by), image_b_mask.convert('L')) + + + return canvas + + + def morph_images(self, images, steps=10, max_size=512, loop=None, still_duration=30, duration=0.1, output_path='output', filename="morph", filetype="GIF"): + + import cv2 + import imageio + + output_file = os.path.abspath(os.path.join(os.path.join(*output_path.split('/')), filename)) + output_file += ( '.png' if filetype == 'APNG' else '.gif' ) + + max_width = max(im.size[0] for im in images) + max_height = max(im.size[1] for im in images) + max_aspect_ratio = max_width / max_height + + def padded_images(): + for im in images: + aspect_ratio = im.size[0] / im.size[1] + if aspect_ratio > max_aspect_ratio: + new_height = int(max_width / aspect_ratio) + padding = (max_height - new_height) // 2 + padded_im = Image.new('RGB', (max_width, max_height), color=(0, 0, 0)) + padded_im.paste(im.resize((max_width, new_height)), (0, padding)) + else: + new_width = int(max_height * aspect_ratio) + padding = (max_width - new_width) // 2 + padded_im = Image.new('RGB', (max_width, max_height), color=(0, 0, 0)) + padded_im.paste(im.resize((new_width, max_height)), (padding, 0)) + yield np.array(padded_im) + + padded_images = list(padded_images()) + padded_images.append(padded_images[0].copy()) + images = padded_images + frames = [] + durations = [] + + for i in range(len(images)-1): + frames.append(Image.fromarray(images[i]).convert('RGB')) + durations.append(still_duration) + + for j in range(steps): + alpha = j / float(steps) + morph = cv2.addWeighted(images[i], 1 - alpha, images[i+1], alpha, 0) + frames.append(Image.fromarray(morph).convert('RGB')) + durations.append(duration) + + frames.append(Image.fromarray(images[-1]).convert('RGB')) + durations.insert(0, still_duration) + + if loop is not None: + for i in range(loop): + durations.insert(0, still_duration) + durations.append(still_duration) + + try: + imageio.mimsave(output_file, frames, filetype, duration=durations, loop=loop) + except OSError as e: + cstr(f"Unable to save output to {output_file} due to the following error:").error.print() + print(e) + return + except Exception as e: + cstr(f"\033[34mWAS NS\033[0m Error: Unable to generate GIF due to the following error:").error.print() + print(e) + + cstr(f"Morphing completed. Output saved as {output_file}").msg.print() + + return output_file + + class GifMorphWriter: + def __init__(self, transition_frames=30, duration_ms=100, still_image_delay_ms=2500, loop=0): + self.transition_frames = transition_frames + self.duration_ms = duration_ms + self.still_image_delay_ms = still_image_delay_ms + self.loop = loop + + def write(self, image, gif_path): + + import cv2 + + if not os.path.isfile(gif_path): + with Image.new("RGBA", image.size) as new_gif: + new_gif.paste(image.convert("RGBA")) + new_gif.info["duration"] = self.still_image_delay_ms + new_gif.save(gif_path, format="GIF", save_all=True, append_images=[], duration=self.still_image_delay_ms, loop=0) + cstr(f"Created new GIF animation at: {gif_path}").msg.print() + else: + with Image.open(gif_path) as gif: + n_frames = gif.n_frames + if n_frames > 0: + gif.seek(n_frames - 1) + last_frame = gif.copy() + else: + last_frame = None + + end_image = image + steps = self.transition_frames - 1 if last_frame is not None else self.transition_frames + + if last_frame is not None: + image = self.pad_to_size(image, last_frame.size) + + frames = self.generate_transition_frames(last_frame, image, steps) + + still_frame = end_image.copy() + + gif_frames = [] + for i in range(n_frames): + gif.seek(i) + gif_frame = gif.copy() + gif_frames.append(gif_frame) + + for frame in frames: + frame.info["duration"] = self.duration_ms + gif_frames.append(frame) + + still_frame.info['duration'] = self.still_image_delay_ms + gif_frames.append(still_frame) + + gif_frames[0].save( + gif_path, + format="GIF", + save_all=True, + append_images=gif_frames[1:], + optimize=True, + loop=self.loop, + ) + + cstr(f"Edited existing GIF animation at: {gif_path}").msg.print() + + + def pad_to_size(self, image, size): + new_image = Image.new("RGBA", size, color=(0, 0, 0, 0)) + x_offset = (size[0] - image.width) // 2 + y_offset = (size[1] - image.height) // 2 + new_image.paste(image, (x_offset, y_offset)) + return new_image + + def generate_transition_frames(self, start_frame, end_image, num_frames): + + if start_frame is None: + return [] + + start_frame = start_frame.convert("RGBA") + end_image = end_image.convert("RGBA") + + frames = [] + for i in range(1, num_frames + 1): + weight = i / (num_frames + 1) + frame = Image.blend(start_frame, end_image, weight) + frames.append(frame) + return frames + + class VideoWriter: + def __init__(self, transition_frames=30, fps=25, still_image_delay_sec=2, + max_size=512, codec="mp4v"): + conf = getSuiteConfig() + self.transition_frames = transition_frames + self.fps = fps + self.still_image_delay_frames = round(still_image_delay_sec * fps) + self.max_size = int(max_size) + self.valid_codecs = ["ffv1","mp4v"] + self.extensions = {"ffv1":".mkv","mp4v":".mp4"} + if conf.__contains__('ffmpeg_extra_codecs'): + self.add_codecs(conf['ffmpeg_extra_codecs']) + self.codec = codec.lower() if codec.lower() in self.valid_codecs else "mp4v" + + def write(self, image, video_path): + video_path += self.extensions[self.codec] + end_image = self.rescale(self.pil2cv(image), self.max_size) + + if os.path.isfile(video_path): + cap = cv2.VideoCapture(video_path) + + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = int(cap.get(cv2.CAP_PROP_FPS)) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + if width <= 0 or height <= 0: + raise ValueError("Invalid video dimensions") + + temp_file_path = video_path.replace(self.extensions[self.codec], '_temp' + self.extensions[self.codec]) + fourcc = cv2.VideoWriter_fourcc(*self.codec) + out = cv2.VideoWriter(temp_file_path, fourcc, fps, (width, height), isColor=True) + + for i in tqdm(range(total_frames), desc="Copying original frames"): + ret, frame = cap.read() + if not ret: + break + out.write(frame) + + if self.transition_frames > 0: + cap.set(cv2.CAP_PROP_POS_FRAMES, total_frames - 1) + ret, last_frame = cap.read() + if ret: + transition_frames = self.generate_transition_frames(last_frame, end_image, self.transition_frames) + for i, transition_frame in tqdm(enumerate(transition_frames), desc="Generating transition frames", total=self.transition_frames): + try: + transition_frame_resized = cv2.resize(transition_frame, (width, height)) + out.write(transition_frame_resized) + except cv2.error as e: + print(f"Error resizing frame {i}: {e}") + continue + + for i in tqdm(range(self.still_image_delay_frames), desc="Adding new frames"): + out.write(end_image) + + cap.release() + out.release() + + os.remove(video_path) + os.rename(temp_file_path, video_path) + + cstr(f"Edited video at: {video_path}").msg.print() + + return video_path + + else: + fourcc = cv2.VideoWriter_fourcc(*self.codec) + height, width, _ = end_image.shape + if width <= 0 or height <= 0: + raise ValueError("Invalid image dimensions") + + out = cv2.VideoWriter(video_path, fourcc, self.fps, (width, height), isColor=True) + + for i in tqdm(range(self.still_image_delay_frames), desc="Adding new frames"): + out.write(end_image) + + out.release() + + cstr(f"Created new video at: {video_path}").msg.print() + + return video_path + + return "" + + def create_video(self, image_folder, video_path): + import cv2 + from tqdm import tqdm + + image_paths = sorted([os.path.join(image_folder, f) for f in os.listdir(image_folder) + if os.path.isfile(os.path.join(image_folder, f)) + and os.path.join(image_folder, f).lower().endswith(ALLOWED_EXT)]) + + if len(image_paths) == 0: + cstr(f"No valid image files found in `{image_folder}` directory.").error.print() + cstr(f"The valid formats are: {', '.join(sorted(ALLOWED_EXT))}").error.print() + return + + output_file = video_path + self.extensions[self.codec] + image = self.rescale(cv2.imread(image_paths[0]), self.max_size) + height, width = image.shape[:2] + fourcc = cv2.VideoWriter_fourcc(*self.codec) + out = cv2.VideoWriter(output_file, fourcc, self.fps, (width, height), isColor=True) + out.write(image) + for _ in range(self.still_image_delay_frames - 1): + out.write(image) + + for i in tqdm(range(len(image_paths)), desc="Writing video frames"): + start_frame = cv2.imread(image_paths[i]) + end_frame = None + if i+1 <= len(image_paths)-1: + end_frame = self.rescale(cv2.imread(image_paths[i+1]), self.max_size) + + if isinstance(end_frame, np.ndarray): + transition_frames = self.generate_transition_frames(start_frame, end_frame, self.transition_frames) + transition_frames = [cv2.resize(frame, (width, height)) for frame in transition_frames] + for _, frame in enumerate(transition_frames): + out.write(frame) + + for _ in range(self.still_image_delay_frames - self.transition_frames): + out.write(end_frame) + + else: + out.write(start_frame) + for _ in range(self.still_image_delay_frames - 1): + out.write(start_frame) + + out.release() + + if os.path.exists(output_file): + cstr(f"Created video at: {output_file}").msg.print() + return output_file + else: + cstr(f"Unable to create video at: {output_file}").error.print() + return "" + + def extract(self, video_file, output_folder, prefix='frame_', extension="png", zero_padding_digits=-1): + os.makedirs(output_folder, exist_ok=True) + + video = cv2.VideoCapture(video_file) + + fps = video.get(cv2.CAP_PROP_FPS) + frame_number = 0 + + while True: + success, frame = video.read() + + if success: + if zero_padding_digits > 0: + frame_path = os.path.join(output_folder, f"{prefix}{frame_number:0{zero_padding_digits}}.{extension}") + else: + frame_path = os.path.join(output_folder, f"{prefix}{frame_number}.{extension}") + + cv2.imwrite(frame_path, frame) + print(f"Saved frame {frame_number} to {frame_path}") + frame_number += 1 + else: + break + + video.release() + + def rescale(self, image, max_size): + f1 = max_size / image.shape[1] + f2 = max_size / image.shape[0] + f = min(f1, f2) + dim = (int(image.shape[1] * f), int(image.shape[0] * f)) + resized = cv2.resize(image, dim) + return resized + + def generate_transition_frames(self, img1, img2, num_frames): + import cv2 + if img1 is None and img2 is None: + return [] + + if img1 is not None and img2 is not None: + if img1.shape != img2.shape: + img2 = cv2.resize(img2, img1.shape[:2][::-1]) + elif img1 is not None: + img2 = np.zeros_like(img1) + else: + img1 = np.zeros_like(img2) + + height, width, _ = img2.shape + + frame_sequence = [] + for i in range(num_frames): + alpha = i / float(num_frames) + blended = cv2.addWeighted(img1, 1 - alpha, img2, alpha, + gamma=0.0, dtype=cv2.CV_8U) + frame_sequence.append(blended) + + return frame_sequence + + def pil2cv(self, img): + import cv2 + img = np.array(img) + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + return img + + def add_codecs(self, codecs): + if isinstance(codecs, dict): + codec_forcc_codes = codecs.keys() + self.valid_codecs.extend(codec_forcc_codes) + self.extensions.update(codecs) + + def get_codecs(self): + return self.valid_codecs + + + # FILTERS + + class Masking: + + @staticmethod + def crop_dominant_region(image, padding=0): + from scipy.ndimage import label + grayscale_image = image.convert("L") + binary_image = grayscale_image.point(lambda x: 255 if x > 128 else 0, mode="1") + labeled_image, num_labels = label(np.array(binary_image)) + largest_label = max(range(1, num_labels + 1), key=lambda i: np.sum(labeled_image == i)) + largest_region_mask = (labeled_image == largest_label).astype(np.uint8) * 255 + bbox = Image.fromarray(largest_region_mask, mode="L").getbbox() + cropped_image = image.crop(bbox) + size = max(cropped_image.size) + padded_size = size + 2 * padding + centered_crop = Image.new("L", (padded_size, padded_size), color="black") + left = (padded_size - cropped_image.width) // 2 + top = (padded_size - cropped_image.height) // 2 + centered_crop.paste(cropped_image, (left, top), mask=cropped_image) + + return ImageOps.invert(centered_crop) + + @staticmethod + def crop_minority_region(image, padding=0): + from scipy.ndimage import label + grayscale_image = image.convert("L") + binary_image = grayscale_image.point(lambda x: 255 if x > 128 else 0, mode="1") + labeled_image, num_labels = label(np.array(binary_image)) + smallest_label = min(range(1, num_labels + 1), key=lambda i: np.sum(labeled_image == i)) + smallest_region_mask = (labeled_image == smallest_label).astype(np.uint8) * 255 + bbox = Image.fromarray(smallest_region_mask, mode="L").getbbox() + cropped_image = image.crop(bbox) + size = max(cropped_image.size) + padded_size = size + 2 * padding + centered_crop = Image.new("L", (padded_size, padded_size), color="black") + left = (padded_size - cropped_image.width) // 2 + top = (padded_size - cropped_image.height) // 2 + centered_crop.paste(cropped_image, (left, top), mask=cropped_image) + + return ImageOps.invert(centered_crop) + + @staticmethod + def crop_region(mask, region_type, padding=0): + from scipy.ndimage import label, find_objects + binary_mask = np.array(mask.convert("L")) > 0 + bbox = mask.getbbox() + if bbox is None: + return mask, (mask.size, (0, 0, 0, 0)) + + bbox_width = bbox[2] - bbox[0] + bbox_height = bbox[3] - bbox[1] + + side_length = max(bbox_width, bbox_height) + 2 * padding + + center_x = (bbox[2] + bbox[0]) // 2 + center_y = (bbox[3] + bbox[1]) // 2 + + crop_x = center_x - side_length // 2 + crop_y = center_y - side_length // 2 + + crop_x = max(crop_x, 0) + crop_y = max(crop_y, 0) + crop_x2 = min(crop_x + side_length, mask.width) + crop_y2 = min(crop_y + side_length, mask.height) + + cropped_mask = mask.crop((crop_x, crop_y, crop_x2, crop_y2)) + crop_data = (cropped_mask.size, (crop_x, crop_y, crop_x2, crop_y2)) + + return cropped_mask, crop_data + + @staticmethod + def dominant_region(image, threshold=128): + from scipy.ndimage import label + image = ImageOps.invert(image.convert("L")) + binary_image = image.point(lambda x: 255 if x > threshold else 0, mode="1") + l, n = label(np.array(binary_image)) + sizes = np.bincount(l.flatten()) + dominant = 0 + try: + dominant = np.argmax(sizes[1:]) + 1 + except ValueError: + pass + dominant_region_mask = (l == dominant).astype(np.uint8) * 255 + result = Image.fromarray(dominant_region_mask, mode="L") + return result.convert("RGB") + + @staticmethod + def minority_region(image, threshold=128): + from scipy.ndimage import label + image = image.convert("L") + binary_image = image.point(lambda x: 255 if x > threshold else 0, mode="1") + labeled_array, num_features = label(np.array(binary_image)) + sizes = np.bincount(labeled_array.flatten()) + smallest_region = 0 + try: + smallest_region = np.argmin(sizes[1:]) + 1 + except ValueError: + pass + smallest_region_mask = (labeled_array == smallest_region).astype(np.uint8) * 255 + inverted_mask = Image.fromarray(smallest_region_mask, mode="L") + rgb_image = Image.merge("RGB", [inverted_mask, inverted_mask, inverted_mask]) + + return rgb_image + + @staticmethod + def arbitrary_region(image, size, threshold=128): + from skimage.measure import label, regionprops + image = image.convert("L") + binary_image = image.point(lambda x: 255 if x > threshold else 0, mode="1") + labeled_image = label(np.array(binary_image)) + regions = regionprops(labeled_image) + + image_area = binary_image.size[0] * binary_image.size[1] + scaled_size = size * image_area / 10000 + + filtered_regions = [region for region in regions if region.area >= scaled_size] + if len(filtered_regions) > 0: + filtered_regions.sort(key=lambda region: region.area) + smallest_region = filtered_regions[0] + region_mask = (labeled_image == smallest_region.label).astype(np.uint8) * 255 + result = Image.fromarray(region_mask, mode="L") + return result + + return image + + @staticmethod + def smooth_region(image, tolerance): + from scipy.ndimage import gaussian_filter + image = image.convert("L") + mask_array = np.array(image) + smoothed_array = gaussian_filter(mask_array, sigma=tolerance) + threshold = np.max(smoothed_array) / 2 + smoothed_mask = np.where(smoothed_array >= threshold, 255, 0).astype(np.uint8) + smoothed_image = Image.fromarray(smoothed_mask, mode="L") + return ImageOps.invert(smoothed_image.convert("RGB")) + + @staticmethod + def erode_region(image, iterations=1): + from scipy.ndimage import binary_erosion + image = image.convert("L") + binary_mask = np.array(image) > 0 + eroded_mask = binary_erosion(binary_mask, iterations=iterations) + eroded_image = Image.fromarray(eroded_mask.astype(np.uint8) * 255, mode="L") + return ImageOps.invert(eroded_image.convert("RGB")) + + @staticmethod + def dilate_region(image, iterations=1): + from scipy.ndimage import binary_dilation + image = image.convert("L") + binary_mask = np.array(image) > 0 + dilated_mask = binary_dilation(binary_mask, iterations=iterations) + dilated_image = Image.fromarray(dilated_mask.astype(np.uint8) * 255, mode="L") + return ImageOps.invert(dilated_image.convert("RGB")) + + @staticmethod + def fill_region(image): + from scipy.ndimage import binary_fill_holes + image = image.convert("L") + binary_mask = np.array(image) > 0 + filled_mask = binary_fill_holes(binary_mask) + filled_image = Image.fromarray(filled_mask.astype(np.uint8) * 255, mode="L") + return ImageOps.invert(filled_image.convert("RGB")) + + @staticmethod + def combine_masks(*masks): + if len(masks) < 1: + raise ValueError("\033[34mWAS NS\033[0m Error: At least one mask must be provided.") + dimensions = masks[0].size + for mask in masks: + if mask.size != dimensions: + raise ValueError("\033[34mWAS NS\033[0m Error: All masks must have the same dimensions.") + + inverted_masks = [mask.convert("L") for mask in masks] + combined_mask = Image.new("L", dimensions, 255) + for mask in inverted_masks: + combined_mask = Image.fromarray(np.minimum(np.array(combined_mask), np.array(mask)), mode="L") + + return combined_mask + + @staticmethod + def threshold_region(image, black_threshold=0, white_threshold=255): + gray_image = image.convert("L") + mask_array = np.array(gray_image) + mask_array[mask_array < black_threshold] = 0 + mask_array[mask_array > white_threshold] = 255 + thresholded_image = Image.fromarray(mask_array, mode="L") + return ImageOps.invert(thresholded_image) + + @staticmethod + def floor_region(image): + gray_image = image.convert("L") + mask_array = np.array(gray_image) + non_black_pixels = mask_array[mask_array > 0] + + if non_black_pixels.size > 0: + threshold_value = non_black_pixels.min() + mask_array[mask_array > threshold_value] = 255 # Set whites to 255 + mask_array[mask_array <= threshold_value] = 0 # Set blacks to 0 + + thresholded_image = Image.fromarray(mask_array, mode="L") + return ImageOps.invert(thresholded_image) + + @staticmethod + def ceiling_region(image, offset=30): + if offset < 0: + offset = 0 + elif offset > 255: + offset = 255 + grayscale_image = image.convert("L") + mask_array = np.array(grayscale_image) + mask_array[mask_array < 255 - offset] = 0 + mask_array[mask_array >= 250] = 255 + filtered_image = Image.fromarray(mask_array, mode="L") + return ImageOps.invert(filtered_image) + + @staticmethod + def gaussian_region(image, radius=5.0): + image = ImageOps.invert(image.convert("L")) + image = image.filter(ImageFilter.GaussianBlur(radius=int(radius))) + return image.convert("RGB") + + # SHADOWS AND HIGHLIGHTS ADJUSTMENTS + + def shadows_and_highlights(self, image, shadow_thresh=30, highlight_thresh=220, shadow_factor=0.5, highlight_factor=1.5, shadow_smooth=None, highlight_smooth=None, simplify_masks=None): + + if 'pilgram' not in packages(): + install_package('pilgram') + + import pilgram + + alpha = None + if image.mode.endswith('A'): + alpha = image.getchannel('A') + image = image.convert('RGB') + + grays = image.convert('L') + + if shadow_smooth is not None or highlight_smooth is not None and simplify_masks is not None: + simplify = float(simplify_masks) + grays = grays.filter(ImageFilter.GaussianBlur(radius=simplify)) + + shadow_mask = Image.eval(grays, lambda x: 255 if x < shadow_thresh else 0) + highlight_mask = Image.eval(grays, lambda x: 255 if x > highlight_thresh else 0) + + image_shadow = image.copy() + image_highlight = image.copy() + + if shadow_smooth is not None: + shadow_mask = shadow_mask.filter(ImageFilter.GaussianBlur(radius=shadow_smooth)) + if highlight_smooth is not None: + highlight_mask = highlight_mask.filter(ImageFilter.GaussianBlur(radius=highlight_smooth)) + + image_shadow = Image.eval(image_shadow, lambda x: x * shadow_factor) + image_highlight = Image.eval(image_highlight, lambda x: x * highlight_factor) + + if shadow_smooth is not None: + shadow_mask = shadow_mask.filter(ImageFilter.GaussianBlur(radius=shadow_smooth)) + if highlight_smooth is not None: + highlight_mask = highlight_mask.filter(ImageFilter.GaussianBlur(radius=highlight_smooth)) + + result = image.copy() + result.paste(image_shadow, shadow_mask) + result.paste(image_highlight, highlight_mask) + result = pilgram.css.blending.color(result, image) + + if alpha: + result.putalpha(alpha) + + return (result, shadow_mask, highlight_mask) + + # DRAGAN PHOTOGRAPHY FILTER + + + def dragan_filter(self, image, saturation=1, contrast=1, sharpness=1, brightness=1, highpass_radius=3, highpass_samples=1, highpass_strength=1, colorize=True): + + if 'pilgram' not in packages(): + install_package('pilgram') + + import pilgram + + alpha = None + if image.mode == 'RGBA': + alpha = image.getchannel('A') + + grayscale_image = image if image.mode == 'L' else image.convert('L') + + contrast_enhancer = ImageEnhance.Contrast(grayscale_image) + contrast_image = contrast_enhancer.enhance(contrast) + + saturation_enhancer = ImageEnhance.Color(contrast_image) if image.mode != 'L' else None + saturation_image = contrast_image if saturation_enhancer is None else saturation_enhancer.enhance(saturation) + + sharpness_enhancer = ImageEnhance.Sharpness(saturation_image) + sharpness_image = sharpness_enhancer.enhance(sharpness) + + brightness_enhancer = ImageEnhance.Brightness(sharpness_image) + brightness_image = brightness_enhancer.enhance(brightness) + + blurred_image = brightness_image.filter(ImageFilter.GaussianBlur(radius=-highpass_radius)) + highpass_filter = ImageChops.subtract(image, blurred_image.convert('RGB')) + blank_image = Image.new('RGB', image.size, (127, 127, 127)) + highpass_image = ImageChops.screen(blank_image, highpass_filter.resize(image.size)) + if not colorize: + highpass_image = highpass_image.convert('L').convert('RGB') + highpassed_image = pilgram.css.blending.overlay(brightness_image.convert('RGB'), highpass_image) + for _ in range((highpass_samples if highpass_samples > 0 else 1)): + highpassed_image = pilgram.css.blending.overlay(highpassed_image, highpass_image) + + final_image = ImageChops.blend(brightness_image.convert('RGB'), highpassed_image, highpass_strength) + + if colorize: + final_image = pilgram.css.blending.color(final_image, image) + + if alpha: + final_image.putalpha(alpha) + + return final_image + + def sparkle(self, image): + + if 'pilgram' not in packages(): + install_package('pilgram') + + import pilgram + + image = image.convert('RGBA') + contrast_enhancer = ImageEnhance.Contrast(image) + image = contrast_enhancer.enhance(1.25) + saturation_enhancer = ImageEnhance.Color(image) + image = saturation_enhancer.enhance(1.5) + + bloom = image.filter(ImageFilter.GaussianBlur(radius=20)) + bloom = ImageEnhance.Brightness(bloom).enhance(1.2) + bloom.putalpha(128) + bloom = bloom.convert(image.mode) + image = Image.alpha_composite(image, bloom) + + width, height = image.size + + particles = Image.new('RGBA', (width, height), (0, 0, 0, 0)) + draw = ImageDraw.Draw(particles) + for i in range(5000): + x = random.randint(0, width) + y = random.randint(0, height) + r = random.randint(0, 255) + g = random.randint(0, 255) + b = random.randint(0, 255) + draw.point((x, y), fill=(r, g, b, 255)) + particles = particles.filter(ImageFilter.GaussianBlur(radius=1)) + particles.putalpha(128) + + particles2 = Image.new('RGBA', (width, height), (0, 0, 0, 0)) + draw = ImageDraw.Draw(particles2) + for i in range(5000): + x = random.randint(0, width) + y = random.randint(0, height) + r = random.randint(0, 255) + g = random.randint(0, 255) + b = random.randint(0, 255) + draw.point((x, y), fill=(r, g, b, 255)) + particles2 = particles2.filter(ImageFilter.GaussianBlur(radius=1)) + particles2.putalpha(128) + + image = pilgram.css.blending.color_dodge(image, particles) + image = pilgram.css.blending.lighten(image, particles2) + + return image + + def digital_distortion(self, image, amplitude=5, line_width=2): + + im = np.array(image) + + x, y, z = im.shape + sine_wave = amplitude * np.sin(np.linspace(-np.pi, np.pi, y)) + sine_wave = sine_wave.astype(int) + + left_distortion = np.zeros((x, y, z), dtype=np.uint8) + right_distortion = np.zeros((x, y, z), dtype=np.uint8) + for i in range(y): + left_distortion[:, i, :] = np.roll(im[:, i, :], -sine_wave[i], axis=0) + right_distortion[:, i, :] = np.roll(im[:, i, :], sine_wave[i], axis=0) + + distorted_image = np.maximum(left_distortion, right_distortion) + scan_lines = np.zeros((x, y), dtype=np.float32) + scan_lines[::line_width, :] = 1 + scan_lines = np.minimum(scan_lines * amplitude*50.0, 1) # Scale scan line values + scan_lines = np.tile(scan_lines[:, :, np.newaxis], (1, 1, z)) # Add channel dimension + distorted_image = np.where(scan_lines > 0, np.random.permutation(im), distorted_image) + distorted_image = np.roll(distorted_image, np.random.randint(0, y), axis=1) + + distorted_image = Image.fromarray(distorted_image) + + return distorted_image + + def signal_distortion(self, image, amplitude): + + img_array = np.array(image) + row_shifts = np.random.randint(-amplitude, amplitude + 1, size=img_array.shape[0]) + distorted_array = np.zeros_like(img_array) + + for y in range(img_array.shape[0]): + x_shift = row_shifts[y] + x_shift = x_shift + y % (amplitude * 2) - amplitude + distorted_array[y,:] = np.roll(img_array[y,:], x_shift, axis=0) + + distorted_image = Image.fromarray(distorted_array) + + return distorted_image + + def tv_vhs_distortion(self, image, amplitude=10): + np_image = np.array(image) + offset_variance = int(image.height / amplitude) + row_shifts = np.random.randint(-offset_variance, offset_variance + 1, size=image.height) + distorted_array = np.zeros_like(np_image) + + for y in range(np_image.shape[0]): + x_shift = row_shifts[y] + x_shift = x_shift + y % (offset_variance * 2) - offset_variance + distorted_array[y,:] = np.roll(np_image[y,:], x_shift, axis=0) + + h, w, c = distorted_array.shape + x_scale = np.linspace(0, 1, w) + y_scale = np.linspace(0, 1, h) + x_idx = np.broadcast_to(x_scale, (h, w)) + y_idx = np.broadcast_to(y_scale.reshape(h, 1), (h, w)) + noise = np.random.rand(h, w, c) * 0.1 + distortion = np.sin(x_idx * 50) * 0.5 + np.sin(y_idx * 50) * 0.5 + distorted_array = distorted_array + distortion[:, :, np.newaxis] + noise + + distorted_image = Image.fromarray(np.uint8(distorted_array)) + distorted_image = distorted_image.resize((image.width, image.height)) + + image_enhance = ImageEnhance.Color(image) + image = image_enhance.enhance(0.5) + + effect_image = ImageChops.overlay(image, distorted_image) + result_image = ImageChops.overlay(image, effect_image) + result_image = ImageChops.blend(image, result_image, 0.25) + + return result_image + + def gradient(self, size, mode='horizontal', colors=None, tolerance=0): + + if isinstance(colors, str): + colors = json.loads(colors) + + if colors is None: + colors = {0: [255, 0, 0], 50: [0, 255, 0], 100: [0, 0, 255]} + + colors = {int(k): [int(c) for c in v] for k, v in colors.items()} + + colors[0] = colors[min(colors.keys())] + colors[255] = colors[max(colors.keys())] + + img = Image.new('RGB', size, color=(0, 0, 0)) + + color_stop_positions = sorted(colors.keys()) + color_stop_count = len(color_stop_positions) + spectrum = [] + for i in range(256): + start_pos = max(p for p in color_stop_positions if p <= i) + end_pos = min(p for p in color_stop_positions if p >= i) + start = colors[start_pos] + end = colors[end_pos] + + if start_pos == end_pos: + factor = 0 + else: + factor = (i - start_pos) / (end_pos - start_pos) + + r = round(start[0] + (end[0] - start[0]) * factor) + g = round(start[1] + (end[1] - start[1]) * factor) + b = round(start[2] + (end[2] - start[2]) * factor) + spectrum.append((r, g, b)) + + draw = ImageDraw.Draw(img) + if mode == 'horizontal': + for x in range(size[0]): + pos = int(x * 100 / (size[0] - 1)) + color = spectrum[pos] + if tolerance > 0: + color = tuple([round(c / tolerance) * tolerance for c in color]) + draw.line((x, 0, x, size[1]), fill=color) + elif mode == 'vertical': + for y in range(size[1]): + pos = int(y * 100 / (size[1] - 1)) + color = spectrum[pos] + if tolerance > 0: + color = tuple([round(c / tolerance) * tolerance for c in color]) + draw.line((0, y, size[0], y), fill=color) + + blur = 1.5 + if size[0] > 512 or size[1] > 512: + multiplier = max(size[0], size[1]) / 512 + if multiplier < 1.5: + multiplier = 1.5 + blur = blur * multiplier + + img = img.filter(ImageFilter.GaussianBlur(radius=blur)) + + return img + + + + + # Version 2 optimized based on Mark Setchell's ideas + def gradient_map(self, image, gradient_map, reverse=False): + + # Reverse the image + if reverse: + gradient_map = gradient_map.transpose(Image.FLIP_LEFT_RIGHT) + + # Convert image to Numpy array and average RGB channels + na = np.array(image) + grey = np.mean(na, axis=2).astype(np.uint8) + + # Convert gradient map to Numpy array + cmap = np.array(gradient_map.convert('RGB')) + + # Make output image, same height and width as grey image, but 3-channel RGB + result = np.zeros((*grey.shape, 3), dtype=np.uint8) + + # Reshape grey to match the shape of result + grey_reshaped = grey.reshape(-1) + + # Take entries from RGB gradient map according to grayscale values in image + np.take(cmap.reshape(-1, 3), grey_reshaped, axis=0, out=result.reshape(-1, 3)) + + # Convert result to PIL image + result_image = Image.fromarray(result) + + return result_image + + + # Generate Perlin Noise (Finally in house version) + + def perlin_noise(self, width, height, octaves, persistence, scale, seed=None): + + @jit(nopython=True) + def fade(t): + return 6 * t**5 - 15 * t**4 + 10 * t**3 + + + @jit(nopython=True) + def lerp(t, a, b): + return a + t * (b - a) + + + @jit(nopython=True) + def grad(hash, x, y, z): + h = hash & 15 + u = x if h < 8 else y + v = y if h < 4 else (x if h == 12 or h == 14 else z) + return (u if (h & 1) == 0 else -u) + (v if (h & 2) == 0 else -v) + + + @jit(nopython=True) + def noise(x, y, z, p): + X = np.int32(np.floor(x)) & 255 + Y = np.int32(np.floor(y)) & 255 + Z = np.int32(np.floor(z)) & 255 + + x -= np.floor(x) + y -= np.floor(y) + z -= np.floor(z) + + u = fade(x) + v = fade(y) + w = fade(z) + + A = p[X] + Y + AA = p[A] + Z + AB = p[A + 1] + Z + B = p[X + 1] + Y + BA = p[B] + Z + BB = p[B + 1] + Z + + return lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z), grad(p[BA], x - 1, y, z)), + lerp(u, grad(p[AB], x, y - 1, z), grad(p[BB], x - 1, y - 1, z))), + lerp(v, lerp(u, grad(p[AA + 1], x, y, z - 1), grad(p[BA + 1], x - 1, y, z - 1)), + lerp(u, grad(p[AB + 1], x, y - 1, z - 1), grad(p[BB + 1], x - 1, y - 1, z - 1)))) + + if seed: + random.seed(seed) + + p = np.arange(256, dtype=np.int32) + random.shuffle(p) + p = np.concatenate((p, p)) + + noise_map = np.zeros((height, width)) + amplitude = 1.0 + total_amplitude = 0.0 + + for octave in range(octaves): + frequency = 2 ** octave + total_amplitude += amplitude + + for y in range(height): + for x in range(width): + nx = x / scale * frequency + ny = y / scale * frequency + noise_value = noise(nx, ny, 0, p) * amplitude + current_value = noise_map[y, x] + noise_map[y, x] = current_value + noise_value + + amplitude *= persistence + + min_value = np.min(noise_map) + max_value = np.max(noise_map) + noise_map = np.interp(noise_map, (min_value, max_value), (0, 255)).astype(np.uint8) + image = Image.fromarray(noise_map, mode='L').convert("RGB") + + return image + + + # Generate Perlin Power Fractal (Based on in-house perlin noise) + + def perlin_power_fractal(self, width, height, octaves, persistence, lacunarity, exponent, scale, seed=None): + + @jit(nopython=True) + def fade(t): + return 6 * t**5 - 15 * t**4 + 10 * t**3 + + @jit(nopython=True) + def lerp(t, a, b): + return a + t * (b - a) + + @jit(nopython=True) + def grad(hash, x, y, z): + h = hash & 15 + u = x if h < 8 else y + v = y if h < 4 else (x if h == 12 or h == 14 else z) + return (u if (h & 1) == 0 else -u) + (v if (h & 2) == 0 else -v) + + @jit(nopython=True) + def noise(x, y, z, p): + X = np.int32(np.floor(x)) & 255 + Y = np.int32(np.floor(y)) & 255 + Z = np.int32(np.floor(z)) & 255 + + x -= np.floor(x) + y -= np.floor(y) + z -= np.floor(z) + + u = fade(x) + v = fade(y) + w = fade(z) + + A = p[X] + Y + AA = p[A] + Z + AB = p[A + 1] + Z + B = p[X + 1] + Y + BA = p[B] + Z + BB = p[B + 1] + Z + + return lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z), grad(p[BA], x - 1, y, z)), + lerp(u, grad(p[AB], x, y - 1, z), grad(p[BB], x - 1, y - 1, z))), + lerp(v, lerp(u, grad(p[AA + 1], x, y, z - 1), grad(p[BA + 1], x - 1, y, z - 1)), + lerp(u, grad(p[AB + 1], x, y - 1, z - 1), grad(p[BB + 1], x - 1, y - 1, z - 1)))) + + if seed: + random.seed(seed) + + p = np.arange(256, dtype=np.int32) + random.shuffle(p) + p = np.concatenate((p, p)) + + noise_map = np.zeros((height, width)) + amplitude = 1.0 + total_amplitude = 0.0 + + for octave in range(octaves): + frequency = lacunarity ** octave + amplitude *= persistence + total_amplitude += amplitude + + for y in range(height): + for x in range(width): + nx = x / scale * frequency + ny = y / scale * frequency + noise_value = noise(nx, ny, 0, p) * amplitude ** exponent + current_value = noise_map[y, x] + noise_map[y, x] = current_value + noise_value + + min_value = np.min(noise_map) + max_value = np.max(noise_map) + noise_map = np.interp(noise_map, (min_value, max_value), (0, 255)).astype(np.uint8) + image = Image.fromarray(noise_map, mode='L').convert("RGB") + + return image + + # Worley Noise Generator + class worley_noise: + + def __init__(self, height=512, width=512, density=50, option=0, use_broadcast_ops=True, flat=False, seed=None): + + self.height = height + self.width = width + self.density = density + self.use_broadcast_ops = use_broadcast_ops + self.seed = seed + self.generate_points_and_colors() + self.calculate_noise(option) + self.image = self.generateImage(option, flat_mode=flat) + + def generate_points_and_colors(self): + rng = np.random.default_rng(self.seed) + self.points = rng.integers(0, self.width, (self.density, 2)) + self.colors = rng.integers(0, 256, (self.density, 3)) + + def calculate_noise(self, option): + self.data = np.zeros((self.height, self.width)) + for h in range(self.height): + for w in range(self.width): + distances = np.sqrt(np.sum((self.points - np.array([w, h])) ** 2, axis=1)) + self.data[h, w] = np.sort(distances)[option] + + def broadcast_calculate_noise(self, option): + xs = np.arange(self.width) + ys = np.arange(self.height) + x_dist = np.power(self.points[:, 0, np.newaxis] - xs, 2) + y_dist = np.power(self.points[:, 1, np.newaxis] - ys, 2) + d = np.sqrt(x_dist[:, :, np.newaxis] + y_dist[:, np.newaxis, :]) + distances = np.sort(d, axis=0) + self.data = distances[option] + + def generateImage(self, option, flat_mode=False): + if flat_mode: + flat_color_data = np.zeros((self.height, self.width, 3), dtype=np.uint8) + for h in range(self.height): + for w in range(self.width): + closest_point_idx = np.argmin(np.sum((self.points - np.array([w, h])) ** 2, axis=1)) + flat_color_data[h, w, :] = self.colors[closest_point_idx] + return Image.fromarray(flat_color_data, 'RGB') + else: + min_val, max_val = np.min(self.data), np.max(self.data) + data_scaled = (self.data - min_val) / (max_val - min_val) * 255 + data_scaled = data_scaled.astype(np.uint8) + return Image.fromarray(data_scaled, 'L') + + # Make Image Seamless + + def make_seamless(self, image, blending=0.5, tiled=False, tiles=2): + + if 'img2texture' not in packages(): + install_package('git+https://github.com/WASasquatch/img2texture.git') + + from img2texture import img2tex + from img2texture._tiling import tile + + texture = img2tex(src=image, dst=None, pct=blending, return_result=True) + if tiled: + texture = tile(source=texture, target=None, horizontal=tiles, vertical=tiles, return_result=True) + + return texture + + # Image Displacement Warp + + def displace_image(self, image, displacement_map, amplitude): + + image = image.convert('RGB') + displacement_map = displacement_map.convert('L') + width, height = image.size + result = Image.new('RGB', (width, height)) + + for y in range(height): + for x in range(width): + + # Calculate the displacements n' stuff + displacement = displacement_map.getpixel((x, y)) + displacement_amount = amplitude * (displacement / 255) + new_x = x + int(displacement_amount) + new_y = y + int(displacement_amount) + + # Apply mirror reflection at edges and corners + if new_x < 0: + new_x = abs(new_x) + elif new_x >= width: + new_x = 2 * width - new_x - 1 + + if new_y < 0: + new_y = abs(new_y) + elif new_y >= height: + new_y = 2 * height - new_y - 1 + + if new_x < 0: + new_x = abs(new_x) + if new_y < 0: + new_y = abs(new_y) + + if new_x >= width: + new_x = 2 * width - new_x - 1 + if new_y >= height: + new_y = 2 * height - new_y - 1 + + # Consider original image color at new location for RGB results, oops + pixel = image.getpixel((new_x, new_y)) + result.putpixel((x, y), pixel) + + return result + + # Analyze Filters + + def black_white_levels(self, image): + + if 'matplotlib' not in packages(): + install_package('matplotlib') + + import matplotlib.pyplot as plt + + # convert to grayscale + image = image.convert('L') + + # Calculate the histogram of grayscale intensities + hist = image.histogram() + + # Find the minimum and maximum grayscale intensity values + min_val = 0 + max_val = 255 + for i in range(256): + if hist[i] > 0: + min_val = i + break + for i in range(255, -1, -1): + if hist[i] > 0: + max_val = i + break + + # Create a graph of the grayscale histogram + plt.figure(figsize=(16, 8)) + plt.hist(image.getdata(), bins=256, range=(0, 256), color='black', alpha=0.7) + plt.xlim([0, 256]) + plt.ylim([0, max(hist)]) + plt.axvline(min_val, color='red', linestyle='dashed') + plt.axvline(max_val, color='red', linestyle='dashed') + plt.title('Black and White Levels') + plt.xlabel('Intensity') + plt.ylabel('Frequency') + + return self.fig2img(plt) + + def channel_frequency(self, image): + + if 'matplotlib' not in packages(): + install_package('matplotlib') + + import matplotlib.pyplot as plt + + # Split the image into its RGB channels + r, g, b = image.split() + + # Calculate the frequency of each color in each channel + r_freq = r.histogram() + g_freq = g.histogram() + b_freq = b.histogram() + + # Create a graph to hold the frequency maps + fig, axs = plt.subplots(1, 3, figsize=(16, 4)) + axs[0].set_title('Red Channel') + axs[1].set_title('Green Channel') + axs[2].set_title('Blue Channel') + + # Plot the frequency of each color in each channel + axs[0].plot(range(256), r_freq, color='red') + axs[1].plot(range(256), g_freq, color='green') + axs[2].plot(range(256), b_freq, color='blue') + + # Set the axis limits and labels + for ax in axs: + ax.set_xlim([0, 255]) + ax.set_xlabel('Color Intensity') + ax.set_ylabel('Frequency') + + return self.fig2img(plt) + + def generate_palette(self, img, n_colors=16, cell_size=128, padding=0, font_path=None, font_size=15, mode='chart'): + if 'scikit-learn' not in packages(): + install_package('scikit-learn') + + from sklearn.cluster import KMeans + + img = img.resize((img.width // 2, img.height // 2), resample=Image.BILINEAR) + pixels = np.array(img) + pixels = pixels.reshape((-1, 3)) + kmeans = KMeans(n_clusters=n_colors, random_state=0, n_init='auto').fit(pixels) + cluster_centers = np.uint8(kmeans.cluster_centers_) + + # Get the sorted indices based on luminance + luminance = np.sqrt(np.dot(cluster_centers, [0.299, 0.587, 0.114])) + sorted_indices = np.argsort(luminance) + + # Rearrange the cluster centers and luminance based on sorted indices + cluster_centers = cluster_centers[sorted_indices] + luminance = luminance[sorted_indices] + + # Group colors by their individual types + reds = [] + greens = [] + blues = [] + others = [] + + for i in range(n_colors): + color = cluster_centers[i] + color_type = np.argmax(color) # Find the dominant color component + + if color_type == 0: + reds.append((color, luminance[i])) + elif color_type == 1: + greens.append((color, luminance[i])) + elif color_type == 2: + blues.append((color, luminance[i])) + else: + others.append((color, luminance[i])) + + # Sort each color group by luminance + reds.sort(key=lambda x: x[1]) + greens.sort(key=lambda x: x[1]) + blues.sort(key=lambda x: x[1]) + others.sort(key=lambda x: x[1]) + + # Combine the sorted color groups + sorted_colors = reds + greens + blues + others + + if mode == 'back_to_back': + # Calculate the size of the palette image based on the number of colors + palette_width = n_colors * cell_size + palette_height = cell_size + else: + # Calculate the number of rows and columns based on the number of colors + num_rows = int(np.sqrt(n_colors)) + num_cols = int(np.ceil(n_colors / num_rows)) + + # Calculate the size of the palette image based on the number of rows and columns + palette_width = num_cols * cell_size + palette_height = num_rows * cell_size + + palette_size = (palette_width, palette_height) + + palette = Image.new('RGB', palette_size, color='white') + draw = ImageDraw.Draw(palette) + if font_path: + font = ImageFont.truetype(font_path, font_size) + else: + font = ImageFont.load_default() + + hex_palette = [] + for i, (color, _) in enumerate(sorted_colors): + if mode == 'back_to_back': + cell_x = i * cell_size + cell_y = 0 + else: + row = i % num_rows + col = i // num_rows + cell_x = col * cell_size + cell_y = row * cell_size + + cell_width = cell_size + cell_height = cell_size + + color = tuple(color) + + cell = Image.new('RGB', (cell_width, cell_height), color=color) + palette.paste(cell, (cell_x, cell_y)) + + if mode != 'back_to_back': + text_x = cell_x + (cell_width / 2) + text_y = cell_y + cell_height + padding + + draw.text((text_x + 1, text_y + 1), f"R: {color[0]} G: {color[1]} B: {color[2]}", font=font, fill='black', anchor='ms') + draw.text((text_x, text_y), f"R: {color[0]} G: {color[1]} B: {color[2]}", font=font, fill='white', anchor='ms') + + hex_palette.append('#%02x%02x%02x' % color) + + return palette, '\n'.join(hex_palette) + +#! IMAGE FILTER NODES + +# IMAGE ADJUSTMENTS NODES + +# IMAGE SHADOW AND HIGHLIGHT ADJUSTMENTS + +class WAS_Shadow_And_Highlight_Adjustment: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "shadow_threshold": ("FLOAT", {"default": 75, "min": 0.0, "max": 255.0, "step": 0.1}), + "shadow_factor": ("FLOAT", {"default": 1.5, "min": -12.0, "max": 12.0, "step": 0.1}), + "shadow_smoothing": ("FLOAT", {"default": 0.25, "min": -255.0, "max": 255.0, "step": 0.1}), + "highlight_threshold": ("FLOAT", {"default": 175, "min": 0.0, "max": 255.0, "step": 0.1}), + "highlight_factor": ("FLOAT", {"default": 0.5, "min": -12.0, "max": 12.0, "step": 0.1}), + "highlight_smoothing": ("FLOAT", {"default": 0.25, "min": -255.0, "max": 255.0, "step": 0.1}), + "simplify_isolation": ("FLOAT", {"default": 0, "min": -255.0, "max": 255.0, "step": 0.1}), + } + } + + RETURN_TYPES = ("IMAGE","IMAGE","IMAGE") + RETURN_NAMES = ("image","shadow_map","highlight_map") + FUNCTION = "apply_shadow_and_highlight" + + CATEGORY = "WAS Suite/Image/Adjustment" + + def apply_shadow_and_highlight(self, image, shadow_threshold=30, highlight_threshold=220, shadow_factor=1.5, highlight_factor=0.5, shadow_smoothing=0, highlight_smoothing=0, simplify_isolation=0): + + WTools = WAS_Tools_Class() + + result, shadows, highlights = WTools.shadows_and_highlights(tensor2pil(image), shadow_threshold, highlight_threshold, shadow_factor, highlight_factor, shadow_smoothing, highlight_smoothing, simplify_isolation) + result, shadows, highlights = WTools.shadows_and_highlights(tensor2pil(image), shadow_threshold, highlight_threshold, shadow_factor, highlight_factor, shadow_smoothing, highlight_smoothing, simplify_isolation) + + return (pil2tensor(result), pil2tensor(shadows), pil2tensor(highlights) ) + + +# IMAGE PIXATE + +class WAS_Image_Pixelate: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "pixelation_size": ("FLOAT", {"default": 164, "min": 16, "max": 480, "step": 1}), + "num_colors": ("FLOAT", {"default": 16, "min": 2, "max": 256, "step": 1}), + "init_mode": (["k-means++", "random", "none"],), + "max_iterations": ("FLOAT", {"default": 100, "min": 1, "max": 256, "step": 1}), + "dither": (["False", "True"],), + "dither_mode": (["FloydSteinberg", "Ordered"],), + }, + "optional": { + "color_palettes": ("LIST", {"forceInput": True}), + "color_palette_mode": (["Brightness", "BrightnessAndTonal", "Linear", "Tonal"],), + "reverse_palette":(["False","True"],), + } + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "image_pixelate" + + CATEGORY = "WAS Suite/Image/Process" + + def image_pixelate(self, images, pixelation_size=164, num_colors=16, init_mode='random', max_iterations=100, + color_palettes=None, color_palette_mode="Linear", reverse_palette='False', dither='False', dither_mode='FloydSteinberg'): + + if 'scikit-learn' not in packages(): + install_package('scikit-learn') + + pixelation_size = int(pixelation_size) + num_colors = int(num_colors) + max_iterations = int(max_iterations) + color_palette_mode = color_palette_mode + dither = (dither == 'True') + + color_palettes_list = [] + if color_palettes: + for palette in color_palettes: + color_palettes_list.append([color.strip() for color in palette.splitlines() if not color.startswith('//') or not color.startswith(';')]) + + reverse_palette = (True if reverse_palette == 'True' else False) + + return ( self.pixel_art_batch(images, pixelation_size, num_colors, init_mode, max_iterations, 42, + (color_palettes_list if color_palettes_list else None), color_palette_mode, reverse_palette, dither, dither_mode), ) + + def pixel_art_batch(self, batch, min_size, num_colors=16, init_mode='random', max_iter=100, random_state=42, + palette=None, palette_mode="Linear", reverse_palette=False, dither=False, dither_mode='FloydSteinberg'): + + from sklearn.cluster import KMeans + + hex_palette_to_rgb = lambda hex: tuple(int(hex[i:i+2], 16) for i in (0, 2, 4)) + + def flatten_colors(image, num_colors, init_mode='random', max_iter=100, random_state=42): + np_image = np.array(image) + pixels = np_image.reshape(-1, 3) + kmeans = KMeans(n_clusters=num_colors, init=init_mode, max_iter=max_iter, tol=1e-3, random_state=random_state, n_init='auto') + labels = kmeans.fit_predict(pixels) + colors = kmeans.cluster_centers_.astype(np.uint8) + flattened_pixels = colors[labels] + flattened_image = flattened_pixels.reshape(np_image.shape) + return Image.fromarray(flattened_image) + + def dither_image(image, mode, nc): + + def clamp(value, min_value=0, max_value=255): + return max(min(value, max_value), min_value) + + def get_new_val(old_val, nc): + return np.round(old_val * (nc - 1)) / (nc - 1) + + def fs_dither(img, nc): + arr = np.array(img, dtype=float) / 255 + new_width, new_height = img.size + + for ir in range(new_height): + for ic in range(new_width): + old_val = arr[ir, ic].copy() + new_val = get_new_val(old_val, nc) + arr[ir, ic] = new_val + err = old_val - new_val + + if ic < new_width - 1: + arr[ir, ic + 1] += err * 7/16 + if ir < new_height - 1: + if ic > 0: + arr[ir + 1, ic - 1] += err * 3/16 + arr[ir + 1, ic] += err * 5/16 + if ic < new_width - 1: + arr[ir + 1, ic + 1] += err / 16 + + carr = np.array(arr * 255, dtype=np.uint8) + return Image.fromarray(carr) + + def ordered_dither(img, nc): + width, height = img.size + dither_matrix = [ + [0, 8, 2, 10], + [12, 4, 14, 6], + [3, 11, 1, 9], + [15, 7, 13, 5] + ] + dithered_image = Image.new('RGB', (width, height)) + num_colors = min(2 ** int(np.log2(nc)), 16) + + for y in range(height): + for x in range(width): + old_pixel = img.getpixel((x, y)) + threshold = dither_matrix[x % 4][y % 4] * num_colors + new_pixel = tuple(int(c * num_colors / 256) * (256 // num_colors) for c in old_pixel) + error = tuple(old - new for old, new in zip(old_pixel, new_pixel)) + dithered_image.putpixel((x, y), new_pixel) + + if x < width - 1: + neighboring_pixel = img.getpixel((x + 1, y)) + neighboring_pixel = tuple(int(c * num_colors / 256) * (256 // num_colors) for c in neighboring_pixel) + neighboring_error = tuple(neighboring - new for neighboring, new in zip(neighboring_pixel, new_pixel)) + neighboring_pixel = tuple(int(clamp(pixel + error * 7 / 16)) for pixel, error in zip(neighboring_pixel, neighboring_error)) + img.putpixel((x + 1, y), neighboring_pixel) + + if x < width - 1 and y < height - 1: + neighboring_pixel = img.getpixel((x + 1, y + 1)) + neighboring_pixel = tuple(int(c * num_colors / 256) * (256 // num_colors) for c in neighboring_pixel) + neighboring_error = tuple(neighboring - new for neighboring, new in zip(neighboring_pixel, new_pixel)) + neighboring_pixel = tuple(int(clamp(pixel + error * 1 / 16)) for pixel, error in zip(neighboring_pixel, neighboring_error)) + img.putpixel((x + 1, y + 1), neighboring_pixel) + + if y < height - 1: + neighboring_pixel = img.getpixel((x, y + 1)) + neighboring_pixel = tuple(int(c * num_colors / 256) * (256 // num_colors) for c in neighboring_pixel) + neighboring_error = tuple(neighboring - new for neighboring, new in zip(neighboring_pixel, new_pixel)) + neighboring_pixel = tuple(int(clamp(pixel + error * 5 / 16)) for pixel, error in zip(neighboring_pixel, neighboring_error)) + img.putpixel((x, y + 1), neighboring_pixel) + + if x > 0 and y < height - 1: + neighboring_pixel = img.getpixel((x - 1, y + 1)) + neighboring_pixel = tuple(int(c * num_colors / 256) * (256 // num_colors) for c in neighboring_pixel) + neighboring_error = tuple(neighboring - new for neighboring, new in zip(neighboring_pixel, new_pixel)) + neighboring_pixel = tuple(int(clamp(pixel + error * 3 / 16)) for pixel, error in zip(neighboring_pixel, neighboring_error)) + img.putpixel((x - 1, y + 1), neighboring_pixel) + + return dithered_image + + if mode == 'FloydSteinberg': + return fs_dither(image, nc) + elif mode == 'Ordered': + return ordered_dither(image, nc) + else: + cstr(f"Inavlid dithering mode `{mode}` selected.").error.print() + return image + + return image + + def color_palette_from_hex_lines(image, colors, palette_mode='Linear', reverse_palette=False): + + def color_distance(color1, color2): + r1, g1, b1 = color1 + r2, g2, b2 = color2 + return np.sqrt((r1 - r2)**2 + (g1 - g2)**2 + (b1 - b2)**2) + + def find_nearest_color_index(color, palette): + distances = [color_distance(color, palette_color) for palette_color in palette] + return distances.index(min(distances)) + + def find_nearest_color_index_tonal(color, palette): + distances = [color_distance_tonal(color, palette_color) for palette_color in palette] + return distances.index(min(distances)) + + def find_nearest_color_index_both(color, palette): + distances = [color_distance_both(color, palette_color) for palette_color in palette] + return distances.index(min(distances)) + + def color_distance_tonal(color1, color2): + r1, g1, b1 = color1 + r2, g2, b2 = color2 + l1 = 0.299 * r1 + 0.587 * g1 + 0.114 * b1 + l2 = 0.299 * r2 + 0.587 * g2 + 0.114 * b2 + return abs(l1 - l2) + + def color_distance_both(color1, color2): + r1, g1, b1 = color1 + r2, g2, b2 = color2 + l1 = 0.299 * r1 + 0.587 * g1 + 0.114 * b1 + l2 = 0.299 * r2 + 0.587 * g2 + 0.114 * b2 + return abs(l1 - l2) + sum(abs(c1 - c2) for c1, c2 in zip(color1, color2)) + + def color_distance(color1, color2): + return sum(abs(c1 - c2) for c1, c2 in zip(color1, color2)) + + color_palette = [hex_palette_to_rgb(color.lstrip('#')) for color in colors] + + if reverse_palette: + color_palette = color_palette[::-1] + + np_image = np.array(image) + labels = np_image.reshape(image.size[1], image.size[0], -1) + width, height = image.size + new_image = Image.new("RGB", image.size) + + if palette_mode == 'Linear': + color_palette_indices = list(range(len(color_palette))) + elif palette_mode == 'Brightness': + color_palette_indices = sorted(range(len(color_palette)), key=lambda i: sum(color_palette[i]) / 3) + elif palette_mode == 'Tonal': + color_palette_indices = sorted(range(len(color_palette)), key=lambda i: color_distance(color_palette[i], (128, 128, 128))) + elif palette_mode == 'BrightnessAndTonal': + color_palette_indices = sorted(range(len(color_palette)), key=lambda i: (sum(color_palette[i]) / 3, color_distance(color_palette[i], (128, 128, 128)))) + else: + raise ValueError(f"Unsupported mapping mode: {palette_mode}") + + for x in range(width): + for y in range(height): + pixel_color = labels[y, x, :] + + if palette_mode == 'Linear': + color_index = pixel_color[0] % len(color_palette) + elif palette_mode == 'Brightness': + color_index = find_nearest_color_index(pixel_color, [color_palette[i] for i in color_palette_indices]) + elif palette_mode == 'Tonal': + color_index = find_nearest_color_index_tonal(pixel_color, [color_palette[i] for i in color_palette_indices]) + elif palette_mode == 'BrightnessAndTonal': + color_index = find_nearest_color_index_both(pixel_color, [color_palette[i] for i in color_palette_indices]) + else: + raise ValueError(f"Unsupported mapping mode: {palette_mode}") + + color = color_palette[color_palette_indices[color_index]] + new_image.putpixel((x, y), color) + + return new_image + + pil_images = [tensor2pil(image) for image in batch] + pixel_art_images = [] + original_sizes = [] + total_images = len(pil_images) + for image in pil_images: + width, height = image.size + original_sizes.append((width, height)) + if max(width, height) > min_size: + if width > height: + new_width = min_size + new_height = int(height * (min_size / width)) + else: + new_height = min_size + new_width = int(width * (min_size / height)) + pixel_art_images.append(image.resize((new_width, int(new_height)), Image.NEAREST)) + else: + pixel_art_images.append(image) + if init_mode != 'none': + pixel_art_images = [flatten_colors(image, num_colors, init_mode) for image in pixel_art_images] + if dither: + pixel_art_images = [dither_image(image, dither_mode, num_colors) for image in pixel_art_images] + if palette: + pixel_art_images = [color_palette_from_hex_lines(pixel_art_image, palette[i], palette_mode, reverse_palette) for i, pixel_art_image in enumerate(pixel_art_images)] + else: + pixel_art_images = pixel_art_images + pixel_art_images = [image.resize(size, Image.NEAREST) for image, size in zip(pixel_art_images, original_sizes)] + + tensor_images = [pil2tensor(image) for image in pixel_art_images] + + batch_tensor = torch.cat(tensor_images, dim=0) + return batch_tensor + +# SIMPLE IMAGE ADJUST + +class WAS_Image_Filters: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}), + "contrast": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 2.0, "step": 0.01}), + "saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}), + "sharpness": ("FLOAT", {"default": 1.0, "min": -5.0, "max": 5.0, "step": 0.01}), + "blur": ("INT", {"default": 0, "min": 0, "max": 16, "step": 1}), + "gaussian_blur": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1024.0, "step": 0.1}), + "edge_enhance": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "detail_enhance": (["false", "true"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_filters" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_filters(self, image, brightness, contrast, saturation, sharpness, blur, gaussian_blur, edge_enhance, detail_enhance): + + + tensors = [] + if len(image) > 1: + for img in image: + + pil_image = None + + # Apply NP Adjustments + if brightness > 0.0 or brightness < 0.0: + # Apply brightness + img = np.clip(img + brightness, 0.0, 1.0) + + if contrast > 1.0 or contrast < 1.0: + # Apply contrast + img = np.clip(img * contrast, 0.0, 1.0) + + # Apply PIL Adjustments + if saturation > 1.0 or saturation < 1.0: + # PIL Image + pil_image = tensor2pil(img) + # Apply saturation + pil_image = ImageEnhance.Color(pil_image).enhance(saturation) + + if sharpness > 1.0 or sharpness < 1.0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Apply sharpness + pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness) + + if blur > 0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Apply blur + for _ in range(blur): + pil_image = pil_image.filter(ImageFilter.BLUR) + + if gaussian_blur > 0.0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Apply Gaussian blur + pil_image = pil_image.filter( + ImageFilter.GaussianBlur(radius=gaussian_blur)) + + if edge_enhance > 0.0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Edge Enhancement + edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE) + # Blend Mask + blend_mask = Image.new( + mode="L", size=pil_image.size, color=(round(edge_enhance * 255))) + # Composite Original and Enhanced Version + pil_image = Image.composite( + edge_enhanced_img, pil_image, blend_mask) + # Clean-up + del blend_mask, edge_enhanced_img + + if detail_enhance == "true": + pil_image = pil_image if pil_image else tensor2pil(img) + pil_image = pil_image.filter(ImageFilter.DETAIL) + + # Output image + out_image = (pil2tensor(pil_image) if pil_image else img) + + tensors.append(out_image) + + tensors = torch.cat(tensors, dim=0) + + else: + + pil_image = None + img = image + + # Apply NP Adjustments + if brightness > 0.0 or brightness < 0.0: + # Apply brightness + img = np.clip(img + brightness, 0.0, 1.0) + + if contrast > 1.0 or contrast < 1.0: + # Apply contrast + img = np.clip(img * contrast, 0.0, 1.0) + + # Apply PIL Adjustments + if saturation > 1.0 or saturation < 1.0: + # PIL Image + pil_image = tensor2pil(img) + # Apply saturation + pil_image = ImageEnhance.Color(pil_image).enhance(saturation) + + if sharpness > 1.0 or sharpness < 1.0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Apply sharpness + pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness) + + if blur > 0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Apply blur + for _ in range(blur): + pil_image = pil_image.filter(ImageFilter.BLUR) + + if gaussian_blur > 0.0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Apply Gaussian blur + pil_image = pil_image.filter( + ImageFilter.GaussianBlur(radius=gaussian_blur)) + + if edge_enhance > 0.0: + # Assign or create PIL Image + pil_image = pil_image if pil_image else tensor2pil(img) + # Edge Enhancement + edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE) + # Blend Mask + blend_mask = Image.new( + mode="L", size=pil_image.size, color=(round(edge_enhance * 255))) + # Composite Original and Enhanced Version + pil_image = Image.composite( + edge_enhanced_img, pil_image, blend_mask) + # Clean-up + del blend_mask, edge_enhanced_img + + if detail_enhance == "true": + pil_image = pil_image if pil_image else tensor2pil(img) + pil_image = pil_image.filter(ImageFilter.DETAIL) + + # Output image + out_image = (pil2tensor(pil_image) if pil_image else img) + + tensors = out_image + + return (tensors, ) + +# RICHARDSON LUCY SHARPEN + +class WAS_Lucy_Sharpen: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "iterations": ("INT", {"default": 2, "min": 1, "max": 12, "step": 1}), + "kernel_size": ("INT", {"default": 3, "min": 1, "max": 16, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "sharpen" + + CATEGORY = "WAS Suite/Image/Filter" + + def sharpen(self, images, iterations, kernel_size): + + tensors = [] + if len(images) > 1: + for img in images: + tensors.append(pil2tensor(self.lucy_sharpen(tensor2pil(img), iterations, kernel_size))) + tensors = torch.cat(tensors, dim=0) + else: + return (pil2tensor(self.lucy_sharpen(tensor2pil(images), iterations, kernel_size)),) + + return (tensors,) + + + def lucy_sharpen(self, image, iterations=10, kernel_size=3): + + from scipy.signal import convolve2d + + image_array = np.array(image, dtype=np.float32) / 255.0 + kernel = np.ones((kernel_size, kernel_size), dtype=np.float32) / (kernel_size ** 2) + sharpened_channels = [] + + padded_image_array = np.pad(image_array, ((kernel_size, kernel_size), (kernel_size, kernel_size), (0, 0)), mode='edge') + + for channel in range(3): + channel_array = padded_image_array[:, :, channel] + + for _ in range(iterations): + blurred_channel = convolve2d(channel_array, kernel, mode='same') + ratio = channel_array / (blurred_channel + 1e-6) + channel_array *= convolve2d(ratio, kernel, mode='same') + + sharpened_channels.append(channel_array) + + cropped_sharpened_image_array = np.stack(sharpened_channels, axis=-1)[kernel_size:-kernel_size, kernel_size:-kernel_size, :] + sharpened_image_array = np.clip(cropped_sharpened_image_array * 255.0, 0, 255).astype(np.uint8) + sharpened_image = Image.fromarray(sharpened_image_array) + return sharpened_image + +# IMAGE STYLE FILTER + +class WAS_Image_Style_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "style": ([ + "1977", + "aden", + "brannan", + "brooklyn", + "clarendon", + "earlybird", + "fairy tale", + "gingham", + "hudson", + "inkwell", + "kelvin", + "lark", + "lofi", + "maven", + "mayfair", + "moon", + "nashville", + "perpetua", + "reyes", + "rise", + "slumber", + "stinson", + "toaster", + "valencia", + "walden", + "willow", + "xpro2" + ],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_style_filter" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_style_filter(self, image, style): + + # Install Pilgram + if 'pilgram' not in packages(): + install_package('pilgram') + + # Import Pilgram module + import pilgram + + # WAS Filters + WTools = WAS_Tools_Class() + + # Apply blending + tensors = [] + for img in image: + if style == "1977": + tensors.append(pil2tensor(pilgram._1977(tensor2pil(img)))) + elif style == "aden": + tensors.append(pil2tensor(pilgram.aden(tensor2pil(img)))) + elif style == "brannan": + tensors.append(pil2tensor(pilgram.brannan(tensor2pil(img)))) + elif style == "brooklyn": + tensors.append(pil2tensor(pilgram.brooklyn(tensor2pil(img)))) + elif style == "clarendon": + tensors.append(pil2tensor(pilgram.clarendon(tensor2pil(img)))) + elif style == "earlybird": + tensors.append(pil2tensor(pilgram.earlybird(tensor2pil(img)))) + elif style == "fairy tale": + tensors.append(pil2tensor(WTools.sparkle(tensor2pil(img)))) + elif style == "gingham": + tensors.append(pil2tensor(pilgram.gingham(tensor2pil(img)))) + elif style == "hudson": + tensors.append(pil2tensor(pilgram.hudson(tensor2pil(img)))) + elif style == "inkwell": + tensors.append(pil2tensor(pilgram.inkwell(tensor2pil(img)))) + elif style == "kelvin": + tensors.append(pil2tensor(pilgram.kelvin(tensor2pil(img)))) + elif style == "lark": + tensors.append(pil2tensor(pilgram.lark(tensor2pil(img)))) + elif style == "lofi": + tensors.append(pil2tensor(pilgram.lofi(tensor2pil(img)))) + elif style == "maven": + tensors.append(pil2tensor(pilgram.maven(tensor2pil(img)))) + elif style == "mayfair": + tensors.append(pil2tensor(pilgram.mayfair(tensor2pil(img)))) + elif style == "moon": + tensors.append(pil2tensor(pilgram.moon(tensor2pil(img)))) + elif style == "nashville": + tensors.append(pil2tensor(pilgram.nashville(tensor2pil(img)))) + elif style == "perpetua": + tensors.append(pil2tensor(pilgram.perpetua(tensor2pil(img)))) + elif style == "reyes": + tensors.append(pil2tensor(pilgram.reyes(tensor2pil(img)))) + elif style == "rise": + tensors.append(pil2tensor(pilgram.rise(tensor2pil(img)))) + elif style == "slumber": + tensors.append(pil2tensor(pilgram.slumber(tensor2pil(img)))) + elif style == "stinson": + tensors.append(pil2tensor(pilgram.stinson(tensor2pil(img)))) + elif style == "toaster": + tensors.append(pil2tensor(pilgram.toaster(tensor2pil(img)))) + elif style == "valencia": + tensors.append(pil2tensor(pilgram.valencia(tensor2pil(img)))) + elif style == "walden": + tensors.append(pil2tensor(pilgram.walden(tensor2pil(img)))) + elif style == "willow": + tensors.append(pil2tensor(pilgram.willow(tensor2pil(img)))) + elif style == "xpro2": + tensors.append(pil2tensor(pilgram.xpro2(tensor2pil(img)))) + else: + tensors.append(img) + + tensors = torch.cat(tensors, dim=0) + + return (tensors, ) + + +# IMAGE CROP FACE + +class WAS_Image_Crop_Face: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "crop_padding_factor": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 2.0, "step": 0.01}), + "cascade_xml": ([ + "lbpcascade_animeface.xml", + "haarcascade_frontalface_default.xml", + "haarcascade_frontalface_alt.xml", + "haarcascade_frontalface_alt2.xml", + "haarcascade_frontalface_alt_tree.xml", + "haarcascade_profileface.xml", + "haarcascade_upperbody.xml", + "haarcascade_eye.xml" + ],), + } + } + + RETURN_TYPES = ("IMAGE", "CROP_DATA") + FUNCTION = "image_crop_face" + + CATEGORY = "WAS Suite/Image/Process" + + def image_crop_face(self, image, cascade_xml=None, crop_padding_factor=0.25): + return self.crop_face(tensor2pil(image), cascade_xml, crop_padding_factor) + + def crop_face(self, image, cascade_name=None, padding=0.25): + + import cv2 + + img = np.array(image.convert('RGB')) + + face_location = None + + cascades = [ os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'lbpcascade_animeface.xml'), + os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'haarcascade_frontalface_default.xml'), + os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'haarcascade_frontalface_alt.xml'), + os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'haarcascade_frontalface_alt2.xml'), + os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'haarcascade_frontalface_alt_tree.xml'), + os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'haarcascade_profileface.xml'), + os.path.join(os.path.join(WAS_SUITE_ROOT, 'res'), 'haarcascade_upperbody.xml') ] + + if cascade_name: + for cascade in cascades: + if os.path.basename(cascade) == cascade_name: + cascades.remove(cascade) + cascades.insert(0, cascade) + break + + faces = None + if not face_location: + for cascade in cascades: + if not os.path.exists(cascade): + cstr(f"Unable to find cascade XML file at `{cascade}`. Did you pull the latest files from https://github.com/WASasquatch/was-node-suite-comfyui repo?").error.print() + return (pil2tensor(Image.new("RGB", (512,512), (0,0,0))), False) + face_cascade = cv2.CascadeClassifier(cascade) + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5) + if len(faces) != 0: + cstr(f"Face found with: {os.path.basename(cascade)}").msg.print() + break + if len(faces) == 0: + cstr("No faces found in the image!").warning.print() + return (pil2tensor(Image.new("RGB", (512,512), (0,0,0))), False) + else: + cstr("Face found with: face_recognition model").warning.print() + faces = face_location + + # Assume there is only one face in the image + x, y, w, h = faces[0] + + # Check if the face region aligns with the edges of the original image + left_adjust = max(0, -x) + right_adjust = max(0, x + w - img.shape[1]) + top_adjust = max(0, -y) + bottom_adjust = max(0, y + h - img.shape[0]) + + # Check if the face region is near any edges, and if so, pad in the opposite direction + if left_adjust < w: + x += right_adjust + elif right_adjust < w: + x -= left_adjust + if top_adjust < h: + y += bottom_adjust + elif bottom_adjust < h: + y -= top_adjust + + w -= left_adjust + right_adjust + h -= top_adjust + bottom_adjust + + # Calculate padding around face + face_size = min(h, w) + y_pad = int(face_size * padding) + x_pad = int(face_size * padding) + + # Calculate square coordinates around face + center_x = x + w // 2 + center_y = y + h // 2 + half_size = (face_size + max(x_pad, y_pad)) // 2 + top = max(0, center_y - half_size) + bottom = min(img.shape[0], center_y + half_size) + left = max(0, center_x - half_size) + right = min(img.shape[1], center_x + half_size) + + # Ensure square crop of the original image + crop_size = min(right - left, bottom - top) + left = center_x - crop_size // 2 + right = center_x + crop_size // 2 + top = center_y - crop_size // 2 + bottom = center_y + crop_size // 2 + + # Crop face from original image + face_img = img[top:bottom, left:right, :] + + # Resize image + size = max(face_img.copy().shape[:2]) + pad_h = (size - face_img.shape[0]) // 2 + pad_w = (size - face_img.shape[1]) // 2 + face_img = cv2.copyMakeBorder(face_img, pad_h, pad_h, pad_w, pad_w, cv2.BORDER_CONSTANT, value=[0,0,0]) + min_size = 64 # Set minimum size for padded image + if size < min_size: + size = min_size + face_img = cv2.resize(face_img, (size, size)) + + # Convert numpy array back to PIL image + face_img = Image.fromarray(face_img) + + # Resize image to a multiple of 64 + original_size = face_img.size + face_img.resize((((face_img.size[0] // 64) * 64 + 64), ((face_img.size[1] // 64) * 64 + 64))) + + # Return face image and coordinates + return (pil2tensor(face_img.convert('RGB')), (original_size, (left, top, right, bottom))) + + +# IMAGE PASTE FACE CROP + +class WAS_Image_Paste_Face_Crop: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "crop_image": ("IMAGE",), + "crop_data": ("CROP_DATA",), + "crop_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}), + "crop_sharpening": ("INT", {"default": 0, "min": 0, "max": 3, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + RETURN_NAMES = ("IMAGE", "MASK_IMAGE") + FUNCTION = "image_paste_face" + + CATEGORY = "WAS Suite/Image/Process" + + def image_paste_face(self, image, crop_image, crop_data=None, crop_blending=0.25, crop_sharpening=0): + + if crop_data == False: + cstr("No valid crop data found!").error.print() + return (image, pil2tensor(Image.new("RGB", tensor2pil(image).size, (0,0,0)))) + + result_image, result_mask = self.paste_image(tensor2pil(image), tensor2pil(crop_image), crop_data, crop_blending, crop_sharpening) + return(result_image, result_mask) + + def paste_image(self, image, crop_image, crop_data, blend_amount=0.25, sharpen_amount=1): + + def lingrad(size, direction, white_ratio): + image = Image.new('RGB', size) + draw = ImageDraw.Draw(image) + if direction == 'vertical': + black_end = int(size[1] * (1 - white_ratio)) + range_start = 0 + range_end = size[1] + range_step = 1 + for y in range(range_start, range_end, range_step): + color_ratio = y / size[1] + if y <= black_end: + color = (0, 0, 0) + else: + color_value = int(((y - black_end) / (size[1] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(0, y), (size[0], y)], fill=color) + elif direction == 'horizontal': + black_end = int(size[0] * (1 - white_ratio)) + range_start = 0 + range_end = size[0] + range_step = 1 + for x in range(range_start, range_end, range_step): + color_ratio = x / size[0] + if x <= black_end: + color = (0, 0, 0) + else: + color_value = int(((x - black_end) / (size[0] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(x, 0), (x, size[1])], fill=color) + + return image.convert("L") + + crop_size, (top, left, right, bottom) = crop_data + crop_image = crop_image.resize(crop_size) + + if sharpen_amount > 0: + for _ in range(int(sharpen_amount)): + crop_image = crop_image.filter(ImageFilter.SHARPEN) + + blended_image = Image.new('RGBA', image.size, (0, 0, 0, 255)) + blended_mask = Image.new('L', image.size, 0) + crop_padded = Image.new('RGBA', image.size, (0, 0, 0, 0)) + blended_image.paste(image, (0, 0)) + crop_padded.paste(crop_image, (top, left)) + crop_mask = Image.new('L', crop_image.size, 0) + + if top > 0: + gradient_image = ImageOps.flip(lingrad(crop_image.size, 'vertical', blend_amount)) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if left > 0: + gradient_image = ImageOps.mirror(lingrad(crop_image.size, 'horizontal', blend_amount)) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if right < image.width: + gradient_image = lingrad(crop_image.size, 'horizontal', blend_amount) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if bottom < image.height: + gradient_image = lingrad(crop_image.size, 'vertical', blend_amount) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + crop_mask = ImageOps.invert(crop_mask) + blended_mask.paste(crop_mask, (top, left)) + blended_mask = blended_mask.convert("L") + blended_image.paste(crop_padded, (0, 0), blended_mask) + + return (pil2tensor(blended_image.convert("RGB")), pil2tensor(blended_mask.convert("RGB"))) + + +# IMAGE CROP LOCATION + +class WAS_Image_Crop_Location: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "top": ("INT", {"default":0, "max": 10000000, "min":0, "step":1}), + "left": ("INT", {"default":0, "max": 10000000, "min":0, "step":1}), + "right": ("INT", {"default":256, "max": 10000000, "min":0, "step":1}), + "bottom": ("INT", {"default":256, "max": 10000000, "min":0, "step":1}), + } + } + + RETURN_TYPES = ("IMAGE", "CROP_DATA") + FUNCTION = "image_crop_location" + + CATEGORY = "WAS Suite/Image/Process" + + def image_crop_location(self, image, top=0, left=0, right=256, bottom=256): + image = tensor2pil(image) + img_width, img_height = image.size + + # Calculate the final coordinates for cropping + crop_top = max(top, 0) + crop_left = max(left, 0) + crop_bottom = min(bottom, img_height) + crop_right = min(right, img_width) + + # Ensure that the cropping region has non-zero width and height + crop_width = crop_right - crop_left + crop_height = crop_bottom - crop_top + if crop_width <= 0 or crop_height <= 0: + raise ValueError("Invalid crop dimensions. Please check the values for top, left, right, and bottom.") + + # Crop the image and resize + crop = image.crop((crop_left, crop_top, crop_right, crop_bottom)) + crop_data = (crop.size, (crop_left, crop_top, crop_right, crop_bottom)) + crop = crop.resize((((crop.size[0] // 8) * 8), ((crop.size[1] // 8) * 8))) + + return (pil2tensor(crop), crop_data) + + +# IMAGE SQUARE CROP LOCATION + +class WAS_Image_Crop_Square_Location: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "x": ("INT", {"default":0, "max": 24576, "min":0, "step":1}), + "y": ("INT", {"default":0, "max": 24576, "min":0, "step":1}), + "size": ("INT", {"default":256, "max": 4096, "min":5, "step":1}), + } + } + + RETURN_TYPES = ("IMAGE", "CROP_DATA") + FUNCTION = "image_crop_location" + + CATEGORY = "WAS Suite/Image/Process" + + def image_crop_location(self, image, x=256, y=256, size=512): + + image = tensor2pil(image) + img_width, img_height = image.size + exp_size = size // 2 + left = max(x - exp_size, 0) + top = max(y - exp_size, 0) + right = min(x + exp_size, img_width) + bottom = min(y + exp_size, img_height) + + if right - left < size: + if right < img_width: + right = min(right + size - (right - left), img_width) + elif left > 0: + left = max(left - (size - (right - left)), 0) + if bottom - top < size: + if bottom < img_height: + bottom = min(bottom + size - (bottom - top), img_height) + elif top > 0: + top = max(top - (size - (bottom - top)), 0) + + crop = image.crop((left, top, right, bottom)) + + # Original Crop Data + crop_data = (crop.size, (left, top, right, bottom)) + + # Output resize + crop = crop.resize((((crop.size[0] // 8) * 8), ((crop.size[1] // 8) * 8))) + + return (pil2tensor(crop), crop_data) + + +# IMAGE SQUARE CROP LOCATION + +class WAS_Image_Tile_Batch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "num_tiles": ("INT", {"default":4, "max": 64, "min":2, "step":1}), + } + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("IMAGES",) + FUNCTION = "tile_image" + + CATEGORY = "WAS Suite/Image/Process" + + def tile_image(self, image, num_tiles=6): + image = tensor2pil(image.squeeze(0)) + img_width, img_height = image.size + + num_rows = int(num_tiles ** 0.5) + num_cols = (num_tiles + num_rows - 1) // num_rows + tile_width = img_width // num_cols + tile_height = img_height // num_rows + + tiles = [] + for y in range(0, img_height, tile_height): + for x in range(0, img_width, tile_width): + tile = image.crop((x, y, x + tile_width, y + tile_height)) + tiles.append(pil2tensor(tile)) + + tiles = torch.stack(tiles, dim=0).squeeze(1) + + return (tiles, ) + + +# IMAGE PASTE CROP + +class WAS_Image_Paste_Crop: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "crop_image": ("IMAGE",), + "crop_data": ("CROP_DATA",), + "crop_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}), + "crop_sharpening": ("INT", {"default": 0, "min": 0, "max": 3, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + FUNCTION = "image_paste_crop" + + CATEGORY = "WAS Suite/Image/Process" + + def image_paste_crop(self, image, crop_image, crop_data=None, crop_blending=0.25, crop_sharpening=0): + + if crop_data == False: + cstr("No valid crop data found!").error.print() + return (image, pil2tensor(Image.new("RGB", tensor2pil(image).size, (0,0,0)))) + + result_image, result_mask = self.paste_image(tensor2pil(image), tensor2pil(crop_image), crop_data, crop_blending, crop_sharpening) + + return (result_image, result_mask) + + def paste_image(self, image, crop_image, crop_data, blend_amount=0.25, sharpen_amount=1): + + def lingrad(size, direction, white_ratio): + image = Image.new('RGB', size) + draw = ImageDraw.Draw(image) + if direction == 'vertical': + black_end = int(size[1] * (1 - white_ratio)) + range_start = 0 + range_end = size[1] + range_step = 1 + for y in range(range_start, range_end, range_step): + color_ratio = y / size[1] + if y <= black_end: + color = (0, 0, 0) + else: + color_value = int(((y - black_end) / (size[1] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(0, y), (size[0], y)], fill=color) + elif direction == 'horizontal': + black_end = int(size[0] * (1 - white_ratio)) + range_start = 0 + range_end = size[0] + range_step = 1 + for x in range(range_start, range_end, range_step): + color_ratio = x / size[0] + if x <= black_end: + color = (0, 0, 0) + else: + color_value = int(((x - black_end) / (size[0] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(x, 0), (x, size[1])], fill=color) + + return image.convert("L") + + crop_size, (left, top, right, bottom) = crop_data + crop_image = crop_image.resize(crop_size) + + if sharpen_amount > 0: + for _ in range(int(sharpen_amount)): + crop_image = crop_image.filter(ImageFilter.SHARPEN) + + blended_image = Image.new('RGBA', image.size, (0, 0, 0, 255)) + blended_mask = Image.new('L', image.size, 0) + crop_padded = Image.new('RGBA', image.size, (0, 0, 0, 0)) + blended_image.paste(image, (0, 0)) + crop_padded.paste(crop_image, (left, top)) + crop_mask = Image.new('L', crop_image.size, 0) + + if top > 0: + gradient_image = ImageOps.flip(lingrad(crop_image.size, 'vertical', blend_amount)) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if left > 0: + gradient_image = ImageOps.mirror(lingrad(crop_image.size, 'horizontal', blend_amount)) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if right < image.width: + gradient_image = lingrad(crop_image.size, 'horizontal', blend_amount) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if bottom < image.height: + gradient_image = lingrad(crop_image.size, 'vertical', blend_amount) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + crop_mask = ImageOps.invert(crop_mask) + blended_mask.paste(crop_mask, (left, top)) + blended_mask = blended_mask.convert("L") + blended_image.paste(crop_padded, (0, 0), blended_mask) + + return (pil2tensor(blended_image.convert("RGB")), pil2tensor(blended_mask.convert("RGB"))) + + +# IMAGE PASTE CROP BY LOCATION + +class WAS_Image_Paste_Crop_Location: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "crop_image": ("IMAGE",), + "top": ("INT", {"default":0, "max": 10000000, "min":0, "step":1}), + "left": ("INT", {"default":0, "max": 10000000, "min":0, "step":1}), + "right": ("INT", {"default":256, "max": 10000000, "min":0, "step":1}), + "bottom": ("INT", {"default":256, "max": 10000000, "min":0, "step":1}), + "crop_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}), + "crop_sharpening": ("INT", {"default": 0, "min": 0, "max": 3, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + FUNCTION = "image_paste_crop_location" + + CATEGORY = "WAS Suite/Image/Process" + + def image_paste_crop_location(self, image, crop_image, top=0, left=0, right=256, bottom=256, crop_blending=0.25, crop_sharpening=0): + result_image, result_mask = self.paste_image(tensor2pil(image), tensor2pil(crop_image), top, left, right, bottom, crop_blending, crop_sharpening) + return (result_image, result_mask) + + def paste_image(self, image, crop_image, top=0, left=0, right=256, bottom=256, blend_amount=0.25, sharpen_amount=1): + + image = image.convert("RGBA") + crop_image = crop_image.convert("RGBA") + + def inset_border(image, border_width=20, border_color=(0)): + width, height = image.size + bordered_image = Image.new(image.mode, (width, height), border_color) + bordered_image.paste(image, (0, 0)) + draw = ImageDraw.Draw(bordered_image) + draw.rectangle((0, 0, width-1, height-1), outline=border_color, width=border_width) + return bordered_image + + img_width, img_height = image.size + + # Ensure that the coordinates are within the image bounds + top = min(max(top, 0), img_height) + left = min(max(left, 0), img_width) + bottom = min(max(bottom, 0), img_height) + right = min(max(right, 0), img_width) + + crop_size = (right - left, bottom - top) + crop_img = crop_image.resize(crop_size) + crop_img = crop_img.convert("RGBA") + + if sharpen_amount > 0: + for _ in range(sharpen_amount): + crop_img = crop_img.filter(ImageFilter.SHARPEN) + + if blend_amount > 1.0: + blend_amount = 1.0 + elif blend_amount < 0.0: + blend_amount = 0.0 + blend_ratio = (max(crop_size) / 2) * float(blend_amount) + + blend = image.copy() + mask = Image.new("L", image.size, 0) + + mask_block = Image.new("L", crop_size, 255) + mask_block = inset_border(mask_block, int(blend_ratio/2), (0)) + + Image.Image.paste(mask, mask_block, (left, top)) + blend.paste(crop_img, (left, top), crop_img) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio/4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio/4)) + + blend.putalpha(mask) + image = Image.alpha_composite(image, blend) + + return (pil2tensor(image), pil2tensor(mask.convert('RGB'))) + + +# IMAGE GRID IMAGE + +class WAS_Image_Grid_Image_Batch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "border_width": ("INT", {"default":3, "min": 0, "max": 100, "step":1}), + "number_of_columns": ("INT", {"default":6, "min": 1, "max": 24, "step":1}), + "max_cell_size": ("INT", {"default":256, "min":32, "max":2048, "step":1}), + "border_red": ("INT", {"default":0, "min": 0, "max": 255, "step":1}), + "border_green": ("INT", {"default":0, "min": 0, "max": 255, "step":1}), + "border_blue": ("INT", {"default":0, "min": 0, "max": 255, "step":1}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "smart_grid_image" + + CATEGORY = "WAS Suite/Image/Process" + + def smart_grid_image(self, images, number_of_columns=6, max_cell_size=256, add_border=False, border_red=255, border_green=255, border_blue=255, border_width=3): + + cols = number_of_columns + border_color = (border_red, border_green, border_blue) + + images_resized = [] + max_row_height = 0 + + for tensor_img in images: + img = tensor2pil(tensor_img) + img_w, img_h = img.size + aspect_ratio = img_w / img_h + + if img_w > img_h: + cell_w = min(img_w, max_cell_size) + cell_h = int(cell_w / aspect_ratio) + else: + cell_h = min(img_h, max_cell_size) + cell_w = int(cell_h * aspect_ratio) + + img_resized = img.resize((cell_w, cell_h)) + + if add_border: + img_resized = ImageOps.expand(img_resized, border=border_width // 2, fill=border_color) + + images_resized.append(img_resized) + max_row_height = max(max_row_height, cell_h) + + max_row_height = int(max_row_height) + total_images = len(images_resized) + rows = math.ceil(total_images / cols) + + grid_width = cols * max_cell_size + (cols - 1) * border_width + grid_height = rows * max_row_height + (rows - 1) * border_width + + new_image = Image.new('RGB', (grid_width, grid_height), border_color) + + for i, img in enumerate(images_resized): + x = (i % cols) * (max_cell_size + border_width) + y = (i // cols) * (max_row_height + border_width) + + img_w, img_h = img.size + paste_x = x + (max_cell_size - img_w) // 2 + paste_y = y + (max_row_height - img_h) // 2 + + new_image.paste(img, (paste_x, paste_y, paste_x + img_w, paste_y + img_h)) + + if add_border: + new_image = ImageOps.expand(new_image, border=border_width, fill=border_color) + + return (pil2tensor(new_image), ) + + +# IMAGE GRID IMAGE FROM PATH + +class WAS_Image_Grid_Image: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images_path": ("STRING", {"default":"./ComfyUI/input/", "multiline": False}), + "pattern_glob": ("STRING", {"default":"*", "multiline": False}), + "include_subfolders": (["false", "true"],), + "border_width": ("INT", {"default":3, "min": 0, "max": 100, "step":1}), + "number_of_columns": ("INT", {"default":6, "min": 1, "max": 24, "step":1}), + "max_cell_size": ("INT", {"default":256, "min":32, "max":1280, "step":1}), + "border_red": ("INT", {"default":0, "min": 0, "max": 255, "step":1}), + "border_green": ("INT", {"default":0, "min": 0, "max": 255, "step":1}), + "border_blue": ("INT", {"default":0, "min": 0, "max": 255, "step":1}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "create_grid_image" + + CATEGORY = "WAS Suite/Image/Process" + + def create_grid_image(self, images_path, pattern_glob="*", include_subfolders="false", number_of_columns=6, + max_cell_size=256, border_width=3, border_red=0, border_green=0, border_blue=0): + + if not os.path.exists(images_path): + cstr(f"The grid image path `{images_path}` does not exist!").error.print() + return (pil2tensor(Image.new("RGB", (512,512), (0,0,0))),) + + paths = glob.glob(os.path.join(images_path, pattern_glob), recursive=(False if include_subfolders == "false" else True)) + image_paths = [] + for path in paths: + if path.lower().endswith(ALLOWED_EXT) and os.path.exists(path): + image_paths.append(path) + + grid_image = self.smart_grid_image(image_paths, int(number_of_columns), (int(max_cell_size), int(max_cell_size)), + (False if border_width <= 0 else True), (int(border_red), + int(border_green), int(border_blue)), int(border_width)) + + return (pil2tensor(grid_image),) + + def smart_grid_image(self, images, cols=6, size=(256,256), add_border=False, border_color=(0,0,0), border_width=3): + + # calculate row height + max_width, max_height = size + row_height = 0 + images_resized = [] + for image in images: + img = Image.open(image).convert('RGB') + + img_w, img_h = img.size + aspect_ratio = img_w / img_h + if aspect_ratio > 1: # landscape + thumb_w = min(max_width, img_w-border_width) + thumb_h = thumb_w / aspect_ratio + else: # portrait + thumb_h = min(max_height, img_h-border_width) + thumb_w = thumb_h * aspect_ratio + + # pad the image to match the maximum size and center it within the cell + pad_w = max_width - int(thumb_w) + pad_h = max_height - int(thumb_h) + left = pad_w // 2 + top = pad_h // 2 + right = pad_w - left + bottom = pad_h - top + padding = (left, top, right, bottom) # left, top, right, bottom + img_resized = ImageOps.expand(img.resize((int(thumb_w), int(thumb_h))), padding) + + if add_border: + img_resized_bordered = ImageOps.expand(img_resized, border=border_width//2, fill=border_color) + + images_resized.append(img_resized) + row_height = max(row_height, img_resized.size[1]) + row_height = int(row_height) + + # calculate the number of rows + total_images = len(images_resized) + rows = math.ceil(total_images / cols) + + # create empty image to put thumbnails + new_image = Image.new('RGB', (cols*size[0]+(cols-1)*border_width, rows*row_height+(rows-1)*border_width), border_color) + + for i, img in enumerate(images_resized): + if add_border: + border_img = ImageOps.expand(img, border=border_width//2, fill=border_color) + x = (i % cols) * (size[0]+border_width) + y = (i // cols) * (row_height+border_width) + if border_img.size == (size[0], size[1]): + new_image.paste(border_img, (x, y, x+size[0], y+size[1])) + else: + # Resize image to match size parameter + border_img = border_img.resize((size[0], size[1])) + new_image.paste(border_img, (x, y, x+size[0], y+size[1])) + else: + x = (i % cols) * (size[0]+border_width) + y = (i // cols) * (row_height+border_width) + if img.size == (size[0], size[1]): + new_image.paste(img, (x, y, x+img.size[0], y+img.size[1])) + else: + # Resize image to match size parameter + img = img.resize((size[0], size[1])) + new_image.paste(img, (x, y, x+size[0], y+size[1])) + + new_image = ImageOps.expand(new_image, border=border_width, fill=border_color) + + return new_image + +# IMAGE MORPH GIF + +class WAS_Image_Morph_GIF: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "transition_frames": ("INT", {"default":30, "min":2, "max":60, "step":1}), + "still_image_delay_ms": ("FLOAT", {"default":2500.0, "min":0.1, "max":60000.0, "step":0.1}), + "duration_ms": ("FLOAT", {"default":0.1, "min":0.1, "max":60000.0, "step":0.1}), + "loops": ("INT", {"default":0, "min":0, "max":100, "step":1}), + "max_size": ("INT", {"default":512, "min":128, "max":1280, "step":1}), + "output_path": ("STRING", {"default": "./ComfyUI/output", "multiline": False}), + "filename": ("STRING", {"default": "morph", "multiline": False}), + "filetype": (["GIF", "APNG"],), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = ("IMAGE","IMAGE",TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("image_a_pass","image_b_pass","filepath_text","filename_text") + FUNCTION = "create_morph_gif" + + CATEGORY = "WAS Suite/Animation" + + def create_morph_gif(self, image_a, image_b, transition_frames=10, still_image_delay_ms=10, duration_ms=0.1, loops=0, max_size=512, + output_path="./ComfyUI/output", filename="morph", filetype="GIF"): + + tokens = TextTokens() + WTools = WAS_Tools_Class() + + if 'imageio' not in packages(): + install_package('imageio') + + if filetype not in ["APNG", "GIF"]: + filetype = "GIF" + if output_path.strip() in [None, "", "."]: + output_path = "./ComfyUI/output" + output_path = tokens.parseTokens(os.path.join(*output_path.split('/'))) + if not os.path.exists(output_path): + os.makedirs(output_path, exist_ok=True) + + if image_a == None: + image_a = pil2tensor(Image.new("RGB", (512,512), (0,0,0))) + if image_b == None: + image_b = pil2tensor(Image.new("RGB", (512,512), (255,255,255))) + + if transition_frames < 2: + transition_frames = 2 + elif transition_frames > 60: + transition_frames = 60 + + if duration_ms < 0.1: + duration_ms = 0.1 + elif duration_ms > 60000.0: + duration_ms = 60000.0 + + output_file = WTools.morph_images([tensor2pil(image_a), tensor2pil(image_b)], steps=int(transition_frames), max_size=int(max_size), loop=int(loops), + still_duration=int(still_image_delay_ms), duration=int(duration_ms), output_path=output_path, + filename=tokens.parseTokens(filename), filetype=filetype) + + return (image_a, image_b, output_file) + + +# IMAGE MORPH GIF WRITER + +class WAS_Image_Morph_GIF_Writer: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "transition_frames": ("INT", {"default":30, "min":2, "max":60, "step":1}), + "image_delay_ms": ("FLOAT", {"default":2500.0, "min":0.1, "max":60000.0, "step":0.1}), + "duration_ms": ("FLOAT", {"default":0.1, "min":0.1, "max":60000.0, "step":0.1}), + "loops": ("INT", {"default":0, "min":0, "max":100, "step":1}), + "max_size": ("INT", {"default":512, "min":128, "max":1280, "step":1}), + "output_path": ("STRING", {"default": comfy_paths.output_directory, "multiline": False}), + "filename": ("STRING", {"default": "morph_writer", "multiline": False}), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = ("IMAGE",TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("image_pass","filepath_text","filename_text") + FUNCTION = "write_to_morph_gif" + + CATEGORY = "WAS Suite/Animation/Writer" + + def write_to_morph_gif(self, image, transition_frames=10, image_delay_ms=10, duration_ms=0.1, loops=0, max_size=512, + output_path="./ComfyUI/output", filename="morph"): + + if 'imageio' not in packages(): + install_package("imageio") + + if output_path.strip() in [None, "", "."]: + output_path = "./ComfyUI/output" + + if image is None: + image = pil2tensor(Image.new("RGB", (512, 512), (0, 0, 0))).unsqueeze(0) + + if transition_frames < 2: + transition_frames = 2 + elif transition_frames > 60: + transition_frames = 60 + + if duration_ms < 0.1: + duration_ms = 0.1 + elif duration_ms > 60000.0: + duration_ms = 60000.0 + + tokens = TextTokens() + output_path = os.path.abspath(os.path.join(*tokens.parseTokens(output_path).split('/'))) + output_file = os.path.join(output_path, tokens.parseTokens(filename) + '.gif') + + if not os.path.exists(output_path): + os.makedirs(output_path, exist_ok=True) + + WTools = WAS_Tools_Class() + GifMorph = WTools.GifMorphWriter(int(transition_frames), int(duration_ms), int(image_delay_ms)) + + for img in image: + pil_img = tensor2pil(img) + GifMorph.write(pil_img, output_file) + + return (image, output_file, filename) + +# IMAGE MORPH GIF BY PATH + +class WAS_Image_Morph_GIF_By_Path: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "transition_frames": ("INT", {"default":30, "min":2, "max":60, "step":1}), + "still_image_delay_ms": ("FLOAT", {"default":2500.0, "min":0.1, "max":60000.0, "step":0.1}), + "duration_ms": ("FLOAT", {"default":0.1, "min":0.1, "max":60000.0, "step":0.1}), + "loops": ("INT", {"default":0, "min":0, "max":100, "step":1}), + "max_size": ("INT", {"default":512, "min":128, "max":1280, "step":1}), + "input_path": ("STRING",{"default":"./ComfyUI", "multiline": False}), + "input_pattern": ("STRING",{"default":"*", "multiline": False}), + "output_path": ("STRING", {"default": "./ComfyUI/output", "multiline": False}), + "filename": ("STRING", {"default": "morph", "multiline": False}), + "filetype": (["GIF", "APNG"],), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("filepath_text","filename_text") + FUNCTION = "create_morph_gif" + + CATEGORY = "WAS Suite/Animation" + + def create_morph_gif(self, transition_frames=30, still_image_delay_ms=2500, duration_ms=0.1, loops=0, max_size=512, + input_path="./ComfyUI/output", input_pattern="*", output_path="./ComfyUI/output", filename="morph", filetype="GIF"): + + if 'imageio' not in packages(): + install_package("imageio") + + if not os.path.exists(input_path): + cstr(f"The input_path `{input_path}` does not exist!").error.print() + return ("",) + + images = self.load_images(input_path, input_pattern) + if not images: + cstr(f"The input_path `{input_path}` does not contain any valid images!").msg.print() + return ("",) + + if filetype not in ["APNG", "GIF"]: + filetype = "GIF" + if output_path.strip() in [None, "", "."]: + output_path = "./ComfyUI/output" + + if transition_frames < 2: + transition_frames = 2 + elif transition_frames > 60: + transition_frames = 60 + + if duration_ms < 0.1: + duration_ms = 0.1 + elif duration_ms > 60000.0: + duration_ms = 60000.0 + + tokens = TextTokens() + WTools = WAS_Tools_Class() + + output_file = WTools.morph_images(images, steps=int(transition_frames), max_size=int(max_size), loop=int(loops), still_duration=int(still_image_delay_ms), + duration=int(duration_ms), output_path=tokens.parseTokens(os.path.join(*output_path.split('/'))), + filename=tokens.parseTokens(filename), filetype=filetype) + + return (output_file,filename) + + + def load_images(self, directory_path, pattern): + images = [] + for file_name in glob.glob(os.path.join(directory_path, pattern), recursive=False): + if file_name.lower().endswith(ALLOWED_EXT): + images.append(Image.open(file_name).convert("RGB")) + return images + + +# COMBINE NODE + +class WAS_Image_Blending_Mode: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "mode": ([ + "add", + "color", + "color_burn", + "color_dodge", + "darken", + "difference", + "exclusion", + "hard_light", + "hue", + "lighten", + "multiply", + "overlay", + "screen", + "soft_light" + ],), + "blend_percentage": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "image_blending_mode" + + CATEGORY = "WAS Suite/Image" + + def image_blending_mode(self, image_a, image_b, mode='add', blend_percentage=1.0): + + # Install Pilgram + if 'pilgram' not in packages(): + install_package("pilgram") + + # Import Pilgram module + import pilgram + + # Convert images to PIL + img_a = tensor2pil(image_a) + img_b = tensor2pil(image_b) + + # Apply blending + if mode: + if mode == "color": + out_image = pilgram.css.blending.color(img_a, img_b) + elif mode == "color_burn": + out_image = pilgram.css.blending.color_burn(img_a, img_b) + elif mode == "color_dodge": + out_image = pilgram.css.blending.color_dodge(img_a, img_b) + elif mode == "darken": + out_image = pilgram.css.blending.darken(img_a, img_b) + elif mode == "difference": + out_image = pilgram.css.blending.difference(img_a, img_b) + elif mode == "exclusion": + out_image = pilgram.css.blending.exclusion(img_a, img_b) + elif mode == "hard_light": + out_image = pilgram.css.blending.hard_light(img_a, img_b) + elif mode == "hue": + out_image = pilgram.css.blending.hue(img_a, img_b) + elif mode == "lighten": + out_image = pilgram.css.blending.lighten(img_a, img_b) + elif mode == "multiply": + out_image = pilgram.css.blending.multiply(img_a, img_b) + elif mode == "add": + out_image = pilgram.css.blending.normal(img_a, img_b) + elif mode == "overlay": + out_image = pilgram.css.blending.overlay(img_a, img_b) + elif mode == "screen": + out_image = pilgram.css.blending.screen(img_a, img_b) + elif mode == "soft_light": + out_image = pilgram.css.blending.soft_light(img_a, img_b) + else: + out_image = img_a + + out_image = out_image.convert("RGB") + + # Blend image + blend_mask = Image.new(mode="L", size=img_a.size, + color=(round(blend_percentage * 255))) + blend_mask = ImageOps.invert(blend_mask) + out_image = Image.composite(img_a, out_image, blend_mask) + + return (pil2tensor(out_image), ) + + +# IMAGE BLEND NODE + +class WAS_Image_Blend: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "image_blend" + + CATEGORY = "WAS Suite/Image" + + def image_blend(self, image_a, image_b, blend_percentage): + + # Convert images to PIL + img_a = tensor2pil(image_a) + img_b = tensor2pil(image_b) + + # Blend image + blend_mask = Image.new(mode="L", size=img_a.size, + color=(round(blend_percentage * 255))) + blend_mask = ImageOps.invert(blend_mask) + img_result = Image.composite(img_a, img_b, blend_mask) + + del img_a, img_b, blend_mask + + return (pil2tensor(img_result), ) + + + +# IMAGE MONITOR DISTORTION FILTER + +class WAS_Image_Monitor_Distortion_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "mode": (["Digital Distortion", "Signal Distortion", "TV Distortion"],), + "amplitude": ("INT", {"default": 5, "min": 1, "max": 255, "step": 1}), + "offset": ("INT", {"default": 10, "min": 1, "max": 255, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "image_monitor_filters" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_monitor_filters(self, image, mode="Digital Distortion", amplitude=5, offset=5): + + # Convert images to PIL + image = tensor2pil(image) + + # WAS Filters + WTools = WAS_Tools_Class() + + # Apply image effect + if mode: + if mode == 'Digital Distortion': + image = WTools.digital_distortion(image, amplitude, offset) + elif mode == 'Signal Distortion': + image = WTools.signal_distortion(image, amplitude) + elif mode == 'TV Distortion': + image = WTools.tv_vhs_distortion(image, amplitude) + else: + image = image + + return (pil2tensor(image), ) + + + +# IMAGE PERLIN NOISE + +class WAS_Image_Perlin_Noise: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "width": ("INT", {"default": 512, "max": 2048, "min": 64, "step": 1}), + "height": ("INT", {"default": 512, "max": 2048, "min": 64, "step": 1}), + "scale": ("INT", {"default": 100, "max": 2048, "min": 2, "step": 1}), + "octaves": ("INT", {"default": 4, "max": 8, "min": 0, "step": 1}), + "persistence": ("FLOAT", {"default": 0.5, "max": 100.0, "min": 0.01, "step": 0.01}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "perlin_noise" + + CATEGORY = "WAS Suite/Image/Generate/Noise" + + def perlin_noise(self, width, height, scale, octaves, persistence, seed): + + WTools = WAS_Tools_Class() + + image = WTools.perlin_noise(width, height, octaves, persistence, scale, seed) + + return (pil2tensor(image), ) + + +# IMAGE PERLIN POWER FRACTAL + +class WAS_Image_Perlin_Power_Fractal: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "width": ("INT", {"default": 512, "max": 8192, "min": 64, "step": 1}), + "height": ("INT", {"default": 512, "max": 8192, "min": 64, "step": 1}), + "scale": ("INT", {"default": 100, "max": 2048, "min": 2, "step": 1}), + "octaves": ("INT", {"default": 4, "max": 8, "min": 0, "step": 1}), + "persistence": ("FLOAT", {"default": 0.5, "max": 100.0, "min": 0.01, "step": 0.01}), + "lacunarity": ("FLOAT", {"default": 2.0, "max": 100.0, "min": 0.01, "step": 0.01}), + "exponent": ("FLOAT", {"default": 2.0, "max": 100.0, "min": 0.01, "step": 0.01}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "perlin_power_fractal" + + CATEGORY = "WAS Suite/Image/Generate/Noise" + + def perlin_power_fractal(self, width, height, scale, octaves, persistence, lacunarity, exponent, seed): + + WTools = WAS_Tools_Class() + + image = WTools.perlin_power_fractal(width, height, octaves, persistence, lacunarity, exponent, scale, seed) + + return (pil2tensor(image), ) + + +# IMAGE VORONOI NOISE FILTER + +class WAS_Image_Voronoi_Noise_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "width": ("INT", {"default": 512, "max": 4096, "min": 64, "step": 1}), + "height": ("INT", {"default": 512, "max": 4096, "min": 64, "step": 1}), + "density": ("INT", {"default": 50, "max": 256, "min": 10, "step": 2}), + "modulator": ("INT", {"default": 0, "max": 8, "min": 0, "step": 1}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + "optional": { + "flat": (["False", "True"],), + "RGB_output": (["True", "False"],), + } + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "voronoi_noise_filter" + + CATEGORY = "WAS Suite/Image/Generate/Noise" + + def voronoi_noise_filter(self, width, height, density, modulator, seed, flat="False", RGB_output="True"): + + WTools = WAS_Tools_Class() + + image = WTools.worley_noise(height=height, width=width, density=density, option=modulator, use_broadcast_ops=True, seed=seed, flat=(flat == "True")).image + + if RGB_output == "True": + image = image.convert("RGB") + else: + image = image.convert("L") + + return (pil2tensor(image), ) + +# IMAGE POWER NOISE + +class WAS_Image_Power_Noise: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "width": ("INT", {"default": 512, "max": 4096, "min": 64, "step": 1}), + "height": ("INT", {"default": 512, "max": 4096, "min": 64, "step": 1}), + "frequency": ("FLOAT", {"default": 0.5, "max": 10.0, "min": 0.0, "step": 0.01}), + "attenuation": ("FLOAT", {"default": 0.5, "max": 10.0, "min": 0.0, "step": 0.01}), + "noise_type": (["grey", "white", "pink", "blue", "green", "mix"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "power_noise" + + CATEGORY = "WAS Suite/Image/Generate/Noise" + + def power_noise(self, width, height, frequency, attenuation, noise_type, seed): + + noise_image = self.generate_power_noise(width, height, frequency, attenuation, noise_type, seed) + + return (pil2tensor(noise_image), ) + + def generate_power_noise(self, width, height, frequency=None, attenuation=None, noise_type="white", seed=None): + def white_noise(width, height): + noise = np.random.random((height, width)) + return noise + + def grey_noise(width, height, attenuation): + noise = np.random.normal(0, attenuation, (height, width)) + return noise + + def blue_noise(width, height, frequency, attenuation): + noise = grey_noise(width, height, attenuation) + scale = 1.0 / (width * height) + fy = np.fft.fftfreq(height)[:, np.newaxis] ** 2 + fx = np.fft.fftfreq(width) ** 2 + f = fy + fx + power = np.sqrt(f) + power[0, 0] = 1 + noise = np.fft.ifft2(np.fft.fft2(noise) / power) + noise *= scale / noise.std() + return np.real(noise) + + def green_noise(width, height, frequency, attenuation): + noise = grey_noise(width, height, attenuation) + scale = 1.0 / (width * height) + fy = np.fft.fftfreq(height)[:, np.newaxis] ** 2 + fx = np.fft.fftfreq(width) ** 2 + f = fy + fx + power = np.sqrt(f) + power[0, 0] = 1 + noise = np.fft.ifft2(np.fft.fft2(noise) / np.sqrt(power)) + noise *= scale / noise.std() + return np.real(noise) + + def pink_noise(width, height, frequency, attenuation): + noise = grey_noise(width, height, attenuation) + scale = 1.0 / (width * height) + fy = np.fft.fftfreq(height)[:, np.newaxis] ** 2 + fx = np.fft.fftfreq(width) ** 2 + f = fy + fx + power = np.sqrt(f) + power[0, 0] = 1 + noise = np.fft.ifft2(np.fft.fft2(noise) * power) + noise *= scale / noise.std() + return np.real(noise) + + def blue_noise_mask(width, height, frequency, attenuation, seed, num_masks=3): + masks = [] + for i in range(num_masks): + mask_seed = seed + i + np.random.seed(mask_seed) + mask = blue_noise(width, height, frequency, attenuation) + masks.append(mask) + return masks + + def blend_noise(width, height, masks, noise_types, attenuations): + blended_image = Image.new("L", (width, height), color=0) + fy = np.fft.fftfreq(height)[:, np.newaxis] ** 2 + fx = np.fft.fftfreq(width) ** 2 + f = fy + fx + i = 0 + for mask, noise_type, attenuation in zip(masks, noise_types, attenuations): + mask = Image.fromarray((255 * (mask - np.min(mask)) / (np.max(mask) - np.min(mask))).astype(np.uint8).real) + if noise_type == "white": + noise = white_noise(width, height) + noise = Image.fromarray((255 * (noise - np.min(noise)) / (np.max(noise) - np.min(noise))).astype(np.uint8).real) + elif noise_type == "grey": + noise = grey_noise(width, height, attenuation) + noise = Image.fromarray((255 * (noise - np.min(noise)) / (np.max(noise) - np.min(noise))).astype(np.uint8).real) + elif noise_type == "pink": + noise = pink_noise(width, height, frequency, attenuation) + noise = Image.fromarray((255 * (noise - np.min(noise)) / (np.max(noise) - np.min(noise))).astype(np.uint8).real) + elif noise_type == "green": + noise = green_noise(width, height, frequency, attenuation) + noise = Image.fromarray((255 * (noise - np.min(noise)) / (np.max(noise) - np.min(noise))).astype(np.uint8).real) + elif noise_type == "blue": + noise = blue_noise(width, height, frequency, attenuation) + noise = Image.fromarray((255 * (noise - np.min(noise)) / (np.max(noise) - np.min(noise))).astype(np.uint8).real) + + blended_image = Image.composite(blended_image, noise, mask) + i += 1 + + return np.asarray(blended_image) + + def shorten_to_range(value, min_value, max_value): + range_length = max_value - min_value + 1 + return ((value - min_value) % range_length) + min_value + + if seed is not None: + if seed > 4294967294: + seed = shorten_to_range(seed, 0, 4294967293) + cstr(f"Seed too large for power noise; rescaled to: {seed}").warning.print() + + np.random.seed(seed) + + if noise_type == "white": + noise = white_noise(width, height) + elif noise_type == "grey": + noise = grey_noise(width, height, attenuation) + elif noise_type == "pink": + if frequency is None: + cstr("Pink noise requires a frequency value.").error.print() + return None + noise = pink_noise(width, height, frequency, attenuation) + elif noise_type == "green": + if frequency is None: + cstr("Green noise requires a frequency value.").error.print() + return None + noise = green_noise(width, height, frequency, attenuation) + elif noise_type == "blue": + if frequency is None: + cstr("Blue noise requires a frequency value.").error.print() + return None + noise = blue_noise(width, height, frequency, attenuation) + elif noise_type == "mix": + if frequency is None: + cstr("Mix noise requires a frequency value.").error.print() + return None + if seed is None: + cstr("Mix noise requires a seed value.").error.print() + return None + + blue_noise_masks = blue_noise_mask(width, height, frequency, attenuation, seed=seed, num_masks=3) + noise_types = ["white", "grey", "pink", "green", "blue"] + attenuations = [attenuation] * len(noise_types) + noise = blend_noise(width, height, blue_noise_masks, noise_types, attenuations) + else: + cstr(f"Unsupported noise type `{noise_type}`").error.print() + return None + if noise_type != 'mix': + noise = 255 * (noise - np.min(noise)) / (np.max(noise) - np.min(noise)) + noise_image = Image.fromarray(noise.astype(np.uint8).real) + + return noise_image.convert("RGB") + +# IMAGE TO NOISE + +class WAS_Image_To_Noise: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "num_colors": ("INT", {"default": 16, "max": 256, "min": 2, "step": 2}), + "black_mix": ("INT", {"default": 0, "max": 20, "min": 0, "step": 1}), + "gaussian_mix": ("FLOAT", {"default": 0.0, "max": 1024, "min": 0, "step": 0.1}), + "brightness": ("FLOAT", {"default": 1.0, "max": 2.0, "min": 0.0, "step": 0.01}), + "output_mode": (["batch","list"],), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + OUTPUT_IS_LIST = (False,) + FUNCTION = "image_to_noise" + + CATEGORY = "WAS Suite/Image/Generate/Noise" + + def image_to_noise(self, images, num_colors, black_mix, gaussian_mix, brightness, output_mode, seed): + + noise_images = [] + for image in images: + noise_images.append(pil2tensor(self.image2noise(tensor2pil(image), num_colors, black_mix, brightness, gaussian_mix, seed))) + if output_mode == "list": + self.OUTPUT_IS_LIST = (True,) + else: + noise_images = torch.cat(noise_images, dim=0) + return (noise_images, ) + + def image2noise(self, image, num_colors=16, black_mix=0, brightness=1.0, gaussian_mix=0, seed=0): + + random.seed(int(seed)) + image = image.quantize(colors=num_colors) + image = image.convert("RGBA") + pixel_data = list(image.getdata()) + random.shuffle(pixel_data) + randomized_image = Image.new("RGBA", image.size) + randomized_image.putdata(pixel_data) + + width, height = image.size + black_noise = Image.new("RGBA", (width, height), (0, 0, 0, 0)) + + for _ in range(black_mix): + for x in range(width): + for y in range(height): + if random.randint(0,1) == 1: + black_noise.putpixel((x, y), (0, 0, 0, 255)) + + randomized_image = Image.alpha_composite(randomized_image, black_noise) + enhancer = ImageEnhance.Brightness(randomized_image) + randomized_image = enhancer.enhance(brightness) + + if gaussian_mix > 0: + original_noise = randomized_image.copy() + randomized_gaussian = randomized_image.filter(ImageFilter.GaussianBlur(radius=gaussian_mix)) + randomized_image = Image.blend(randomized_image, randomized_gaussian, 0.65) + randomized_image = Image.blend(randomized_image, original_noise, 0.25) + + return randomized_image + +# IMAGE MAKE SEAMLESS + +class WAS_Image_Make_Seamless: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "blending": ("FLOAT", {"default": 0.4, "max": 1.0, "min": 0.0, "step": 0.01}), + "tiled": (["true", "false"],), + "tiles": ("INT", {"default": 2, "max": 6, "min": 2, "step": 2}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "make_seamless" + + CATEGORY = "WAS Suite/Image/Process" + + def make_seamless(self, images, blending, tiled, tiles): + + WTools = WAS_Tools_Class() + + seamless_images = [] + for image in images: + seamless_images.append(pil2tensor(WTools.make_seamless(tensor2pil(image), blending, tiled, tiles))) + + seamless_images = torch.cat(seamless_images, dim=0) + + return (seamless_images, ) + + +# IMAGE DISPLACEMENT WARP + +class WAS_Image_Displacement_Warp: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "displacement_maps": ("IMAGE",), + "amplitude": ("FLOAT", {"default": 25.0, "min": -4096, "max": 4096, "step": 0.1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "displace_image" + + CATEGORY = "WAS Suite/Image/Transform" + + def displace_image(self, images, displacement_maps, amplitude): + + WTools = WAS_Tools_Class() + + displaced_images = [] + for i in range(len(images)): + img = tensor2pil(images[i]) + if i < len(displacement_maps): + disp = tensor2pil(displacement_maps[i]) + else: + disp = tensor2pil(displacement_maps[-1]) + disp = self.resize_and_crop(disp, img.size) + displaced_images.append(pil2tensor(WTools.displace_image(img, disp, amplitude))) + + displaced_images = torch.cat(displaced_images, dim=0) + + return (displaced_images, ) + + + def resize_and_crop(self, image, target_size): + width, height = image.size + target_width, target_height = target_size + aspect_ratio = width / height + target_aspect_ratio = target_width / target_height + + if aspect_ratio > target_aspect_ratio: + new_height = target_height + new_width = int(new_height * aspect_ratio) + else: + new_width = target_width + new_height = int(new_width / aspect_ratio) + + image = image.resize((new_width, new_height)) + left = (new_width - target_width) // 2 + top = (new_height - target_height) // 2 + right = left + target_width + bottom = top + target_height + image = image.crop((left, top, right, bottom)) + + return image + +# IMAGE TO BATCH + +class WAS_Image_Batch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + }, + "optional": { + "images_a": ("IMAGE",), + "images_b": ("IMAGE",), + "images_c": ("IMAGE",), + "images_d": ("IMAGE",), + # "images_e": ("IMAGE",), + # "images_f": ("IMAGE",), + # Theoretically, an infinite number of image input parameters can be added. + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "image_batch" + CATEGORY = "WAS Suite/Image" + + def _check_image_dimensions(self, tensors, names): + dimensions = [tensor.shape for tensor in tensors] + if len(set(dimensions)) > 1: + mismatched_indices = [i for i, dim in enumerate(dimensions) if dim[1:] != dimensions[0][1:]] + mismatched_images = [names[i] for i in mismatched_indices] + if mismatched_images: + raise ValueError(f"WAS Image Batch Warning: Input image dimensions do not match for images: {mismatched_images}") + + def image_batch(self, **kwargs): + batched_tensors = [kwargs[key] for key in kwargs if kwargs[key] is not None] + image_names = [key for key in kwargs if kwargs[key] is not None] + + if not batched_tensors: + raise ValueError("At least one input image must be provided.") + + self._check_image_dimensions(batched_tensors, image_names) + batched_tensors = torch.cat(batched_tensors, dim=0) + return (batched_tensors,) + + +# MASK TO BATCH + +class WAS_Mask_Batch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "optional": { + "masks_a": ("MASK",), + "masks_b": ("MASK",), + "masks_c": ("MASK",), + "masks_d": ("MASK",), + # "masks_e": ("MASK",), + # "masks_f": ("MASK",), + # Theoretically, an infinite number of mask input parameters can be added. + }, + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("masks",) + FUNCTION = "mask_batch" + CATEGORY = "WAS Suite/Image/Masking" + + def _check_mask_dimensions(self, tensors, names): + dimensions = [tensor.shape[1:] for tensor in tensors] # Exclude the batch dimension (if present) + if len(set(dimensions)) > 1: + mismatched_indices = [i for i, dim in enumerate(dimensions) if dim != dimensions[0]] + mismatched_masks = [names[i] for i in mismatched_indices] + raise ValueError(f"WAS Mask Batch Warning: Input mask dimensions do not match for masks: {mismatched_masks}") + + def mask_batch(self, **kwargs): + batched_tensors = [kwargs[key] for key in kwargs if kwargs[key] is not None] + mask_names = [key for key in kwargs if kwargs[key] is not None] + + if not batched_tensors: + raise ValueError("At least one input mask must be provided.") + + self._check_mask_dimensions(batched_tensors, mask_names) + batched_tensors = torch.stack(batched_tensors, dim=0) + batched_tensors = batched_tensors.unsqueeze(1) # Add a channel dimension + return (batched_tensors,) + +# IMAGE GENERATE COLOR PALETTE + +class WAS_Image_Color_Palette: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "colors": ("INT", {"default": 16, "min": 8, "max": 256, "step": 1}), + "mode": (["Chart", "back_to_back"],), + }, + } + + RETURN_TYPES = ("IMAGE","LIST") + RETURN_NAMES = ("image","color_palettes") + FUNCTION = "image_generate_palette" + + CATEGORY = "WAS Suite/Image/Analyze" + + def image_generate_palette(self, image, colors=16, mode="chart"): + + # WAS Filters + WTools = WAS_Tools_Class() + + res_dir = os.path.join(WAS_SUITE_ROOT, 'res') + font = os.path.join(res_dir, 'font.ttf') + + if not os.path.exists(font): + font = None + else: + if mode == "Chart": + cstr(f'Found font at `{font}`').msg.print() + + if len(image) > 1: + palette_strings = [] + palette_images = [] + for img in image: + img = tensor2pil(img) + palette_image, palette = WTools.generate_palette(img, colors, 128, 10, font, 15, mode.lower()) + palette_images.append(pil2tensor(palette_image)) + palette_strings.append(palette) + palette_images = torch.cat(palette_images, dim=0) + return (palette_images, palette_strings) + else: + image = tensor2pil(image) + palette_image, palette = WTools.generate_palette(image, colors, 128, 10, font, 15, mode.lower()) + return (pil2tensor(palette_image), [palette,]) + + +# IMAGE ANALYZE + +class WAS_Image_Analyze: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "mode": (["Black White Levels", "RGB Levels"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_analyze" + + CATEGORY = "WAS Suite/Image/Analyze" + + def image_analyze(self, image, mode='Black White Levels'): + + # Convert images to PIL + image = tensor2pil(image) + + # WAS Filters + WTools = WAS_Tools_Class() + + # Analye Image + if mode: + if mode == 'Black White Levels': + image = WTools.black_white_levels(image) + elif mode == 'RGB Levels': + image = WTools.channel_frequency(image) + else: + image = image + + return (pil2tensor(image), ) + + +# IMAGE GENERATE GRADIENT + +class WAS_Image_Generate_Gradient: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + gradient_stops = '''0:255,0,0 +25:255,255,255 +50:0,255,0 +75:0,0,255''' + return { + "required": { + "width": ("INT", {"default":512, "max": 4096, "min": 64, "step":1}), + "height": ("INT", {"default":512, "max": 4096, "min": 64, "step":1}), + "direction": (["horizontal", "vertical"],), + "tolerance": ("INT", {"default":0, "max": 255, "min": 0, "step":1}), + "gradient_stops": ("STRING", {"default": gradient_stops, "multiline": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_gradient" + + CATEGORY = "WAS Suite/Image/Generate" + + def image_gradient(self, gradient_stops, width=512, height=512, direction='horizontal', tolerance=0): + + import io + + # WAS Filters + WTools = WAS_Tools_Class() + + colors_dict = {} + stops = io.StringIO(gradient_stops.strip().replace(' ','')) + for stop in stops: + parts = stop.split(':') + colors = parts[1].replace('\n','').split(',') + colors_dict[parts[0].replace('\n','')] = colors + + image = WTools.gradient((width, height), direction, colors_dict, tolerance) + + return (pil2tensor(image), ) + +# IMAGE GRADIENT MAP + +class WAS_Image_Gradient_Map: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "gradient_image": ("IMAGE",), + "flip_left_right": (["false", "true"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_gradient_map" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_gradient_map(self, image, gradient_image, flip_left_right='false'): + + # Convert images to PIL + image = tensor2pil(image) + gradient_image = tensor2pil(gradient_image) + + # WAS Filters + WTools = WAS_Tools_Class() + + image = WTools.gradient_map(image, gradient_image, (True if flip_left_right == 'true' else False)) + + return (pil2tensor(image), ) + + +# IMAGE TRANSPOSE + +class WAS_Image_Transpose: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "image_overlay": ("IMAGE",), + "width": ("INT", {"default": 512, "min": -48000, "max": 48000, "step": 1}), + "height": ("INT", {"default": 512, "min": -48000, "max": 48000, "step": 1}), + "X": ("INT", {"default": 0, "min": -48000, "max": 48000, "step": 1}), + "Y": ("INT", {"default": 0, "min": -48000, "max": 48000, "step": 1}), + "rotation": ("INT", {"default": 0, "min": -360, "max": 360, "step": 1}), + "feathering": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_transpose" + + CATEGORY = "WAS Suite/Image/Transform" + + def image_transpose(self, image: torch.Tensor, image_overlay: torch.Tensor, width: int, height: int, X: int, Y: int, rotation: int, feathering: int = 0): + return (pil2tensor(self.apply_transpose_image(tensor2pil(image), tensor2pil(image_overlay), (width, height), (X, Y), rotation, feathering)), ) + + def apply_transpose_image(self, image_bg, image_element, size, loc, rotate=0, feathering=0): + + # Apply transformations to the element image + image_element = image_element.rotate(rotate, expand=True) + image_element = image_element.resize(size) + + # Create a mask for the image with the faded border + if feathering > 0: + mask = Image.new('L', image_element.size, 255) # Initialize with 255 instead of 0 + draw = ImageDraw.Draw(mask) + for i in range(feathering): + alpha_value = int(255 * (i + 1) / feathering) # Invert the calculation for alpha value + draw.rectangle((i, i, image_element.size[0] - i, image_element.size[1] - i), fill=alpha_value) + alpha_mask = Image.merge('RGBA', (mask, mask, mask, mask)) + image_element = Image.composite(image_element, Image.new('RGBA', image_element.size, (0, 0, 0, 0)), alpha_mask) + + # Create a new image of the same size as the base image with an alpha channel + new_image = Image.new('RGBA', image_bg.size, (0, 0, 0, 0)) + new_image.paste(image_element, loc) + + # Paste the new image onto the base image + image_bg = image_bg.convert('RGBA') + image_bg.paste(new_image, (0, 0), new_image) + + return image_bg + + + +# IMAGE RESCALE + +class WAS_Image_Rescale: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "mode": (["rescale", "resize"],), + "supersample": (["true", "false"],), + "resampling": (["lanczos", "nearest", "bilinear", "bicubic"],), + "rescale_factor": ("FLOAT", {"default": 2, "min": 0.01, "max": 16.0, "step": 0.01}), + "resize_width": ("INT", {"default": 1024, "min": 1, "max": 48000, "step": 1}), + "resize_height": ("INT", {"default": 1536, "min": 1, "max": 48000, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_rescale" + + CATEGORY = "WAS Suite/Image/Transform" + + def image_rescale(self, image, mode="rescale", supersample='true', resampling="lanczos", rescale_factor=2, resize_width=1024, resize_height=1024): + scaled_images = [] + for img in image: + scaled_images.append(pil2tensor(self.apply_resize_image(tensor2pil(img), mode, supersample, rescale_factor, resize_width, resize_height, resampling))) + scaled_images = torch.cat(scaled_images, dim=0) + + return (scaled_images, ) + + def apply_resize_image(self, image: Image.Image, mode='scale', supersample='true', factor: int = 2, width: int = 1024, height: int = 1024, resample='bicubic'): + + # Get the current width and height of the image + current_width, current_height = image.size + + # Calculate the new width and height based on the given mode and parameters + if mode == 'rescale': + new_width, new_height = int( + current_width * factor), int(current_height * factor) + else: + new_width = width if width % 8 == 0 else width + (8 - width % 8) + new_height = height if height % 8 == 0 else height + \ + (8 - height % 8) + + # Define a dictionary of resampling filters + resample_filters = { + 'nearest': 0, + 'bilinear': 2, + 'bicubic': 3, + 'lanczos': 1 + } + + # Apply supersample + if supersample == 'true': + image = image.resize((new_width * 8, new_height * 8), resample=Image.Resampling(resample_filters[resample])) + + # Resize the image using the given resampling filter + resized_image = image.resize((new_width, new_height), resample=Image.Resampling(resample_filters[resample])) + + return resized_image + + +# LOAD IMAGE BATCH + +class WAS_Load_Image_Batch: + def __init__(self): + self.HDB = WASDatabase(WAS_HISTORY_DATABASE) + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mode": (["single_image", "incremental_image"],), + "index": ("INT", {"default": 0, "min": 0, "max": 150000, "step": 1}), + "label": ("STRING", {"default": 'Batch 001', "multiline": False}), + "path": ("STRING", {"default": '', "multiline": False}), + "pattern": ("STRING", {"default": '*', "multiline": False}), + "allow_RGBA_output": (["false","true"],), + }, + "optional": { + "filename_text_extension": (["true", "false"],), + } + } + + RETURN_TYPES = ("IMAGE",TEXT_TYPE) + RETURN_NAMES = ("image","filename_text") + FUNCTION = "load_batch_images" + + CATEGORY = "WAS Suite/IO" + + def load_batch_images(self, path, pattern='*', index=0, mode="single_image", label='Batch 001', allow_RGBA_output='false', filename_text_extension='true'): + + allow_RGBA_output = (allow_RGBA_output == 'true') + + if not os.path.exists(path): + return (None, ) + fl = self.BatchImageLoader(path, label, pattern) + new_paths = fl.image_paths + if mode == 'single_image': + image, filename = fl.get_image_by_id(index) + if image == None: + cstr(f"No valid image was found for the inded `{index}`").error.print() + return (None, None) + else: + image, filename = fl.get_next_image() + if image == None: + cstr(f"No valid image was found for the next ID. Did you remove images from the source directory?").error.print() + return (None, None) + + # Update history + update_history_images(new_paths) + + if not allow_RGBA_output: + image = image.convert("RGB") + + if filename_text_extension == "false": + filename = os.path.splitext(filename)[0] + + return (pil2tensor(image), filename) + + class BatchImageLoader: + def __init__(self, directory_path, label, pattern): + self.WDB = WDB + self.image_paths = [] + self.load_images(directory_path, pattern) + self.image_paths.sort() + stored_directory_path = self.WDB.get('Batch Paths', label) + stored_pattern = self.WDB.get('Batch Patterns', label) + if stored_directory_path != directory_path or stored_pattern != pattern: + self.index = 0 + self.WDB.insert('Batch Counters', label, 0) + self.WDB.insert('Batch Paths', label, directory_path) + self.WDB.insert('Batch Patterns', label, pattern) + else: + self.index = self.WDB.get('Batch Counters', label) + self.label = label + + def load_images(self, directory_path, pattern): + for file_name in glob.glob(os.path.join(glob.escape(directory_path), pattern), recursive=True): + if file_name.lower().endswith(ALLOWED_EXT): + abs_file_path = os.path.abspath(file_name) + self.image_paths.append(abs_file_path) + + def get_image_by_id(self, image_id): + if image_id < 0 or image_id >= len(self.image_paths): + cstr(f"Invalid image index `{image_id}`").error.print() + return + i = Image.open(self.image_paths[image_id]) + i = ImageOps.exif_transpose(i) + return (i, os.path.basename(self.image_paths[image_id])) + + def get_next_image(self): + if self.index >= len(self.image_paths): + self.index = 0 + image_path = self.image_paths[self.index] + self.index += 1 + if self.index == len(self.image_paths): + self.index = 0 + cstr(f'{cstr.color.YELLOW}{self.label}{cstr.color.END} Index: {self.index}').msg.print() + self.WDB.insert('Batch Counters', self.label, self.index) + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + return (i, os.path.basename(image_path)) + + def get_current_image(self): + if self.index >= len(self.image_paths): + self.index = 0 + image_path = self.image_paths[self.index] + return os.path.basename(image_path) + + @classmethod + def IS_CHANGED(cls, **kwargs): + if kwargs['mode'] != 'single_image': + return float("NaN") + else: + fl = WAS_Load_Image_Batch.BatchImageLoader(kwargs['path'], kwargs['label'], kwargs['pattern']) + filename = fl.get_current_image() + image = os.path.join(kwargs['path'], filename) + sha = get_sha256(image) + return sha + + +# IMAGE HISTORY NODE + +class WAS_Image_History: + def __init__(self): + self.HDB = WASDatabase(WAS_HISTORY_DATABASE) + self.conf = getSuiteConfig() + + @classmethod + def INPUT_TYPES(cls): + HDB = WASDatabase(WAS_HISTORY_DATABASE) + conf = getSuiteConfig() + paths = ['No History'] + if HDB.catExists("History") and HDB.keyExists("History", "Images"): + history_paths = HDB.get("History", "Images") + if conf.__contains__('history_display_limit'): + history_paths = history_paths[-conf['history_display_limit']:] + paths = [] + for path_ in history_paths: + paths.append(os.path.join('...'+os.sep+os.path.basename(os.path.dirname(path_)), os.path.basename(path_))) + + return { + "required": { + "image": (paths,), + }, + } + + RETURN_TYPES = ("IMAGE",TEXT_TYPE) + RETURN_NAMES = ("image","filename_text") + FUNCTION = "image_history" + + CATEGORY = "WAS Suite/History" + + def image_history(self, image): + self.HDB = WASDatabase(WAS_HISTORY_DATABASE) + paths = {} + if self.HDB.catExists("History") and self.HDB.keyExists("History", "Images"): + history_paths = self.HDB.get("History", "Images") + for path_ in history_paths: + paths.update({os.path.join('...'+os.sep+os.path.basename(os.path.dirname(path_)), os.path.basename(path_)): path_}) + if os.path.exists(paths[image]) and paths.__contains__(image): + return (pil2tensor(Image.open(paths[image]).convert('RGB')), os.path.basename(paths[image])) + else: + cstr(f"The image `{image}` does not exist!").error.print() + return (pil2tensor(Image.new('RGB', (512,512), (0, 0, 0, 0))), 'null') + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + +# IMAGE PADDING + +class WAS_Image_Stitch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "stitch": (["top", "left", "bottom", "right"],), + "feathering": ("INT", {"default": 50, "min": 0, "max": 2048, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_stitching" + + CATEGORY = "WAS Suite/Image/Transform" + + def image_stitching(self, image_a, image_b, stitch="right", feathering=50): + + valid_stitches = ["top", "left", "bottom", "right"] + if stitch not in valid_stitches: + cstr(f"The stitch mode `{stitch}` is not valid. Valid sitch modes are {', '.join(valid_stitches)}").error.print() + if feathering > 2048: + cstr(f"The stitch feathering of `{feathering}` is too high. Please choose a value between `0` and `2048`").error.print() + + WTools = WAS_Tools_Class(); + + stitched_image = WTools.stitch_image(tensor2pil(image_a), tensor2pil(image_b), stitch, feathering) + + return (pil2tensor(stitched_image), ) + + + +# IMAGE PADDING + +class WAS_Image_Padding: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "feathering": ("INT", {"default": 120, "min": 0, "max": 2048, "step": 1}), + "feather_second_pass": (["true", "false"],), + "left_padding": ("INT", {"default": 512, "min": 8, "max": 48000, "step": 1}), + "right_padding": ("INT", {"default": 512, "min": 8, "max": 48000, "step": 1}), + "top_padding": ("INT", {"default": 512, "min": 8, "max": 48000, "step": 1}), + "bottom_padding": ("INT", {"default": 512, "min": 8, "max": 48000, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + FUNCTION = "image_padding" + + CATEGORY = "WAS Suite/Image/Transform" + + def image_padding(self, image, feathering, left_padding, right_padding, top_padding, bottom_padding, feather_second_pass=True): + padding = self.apply_image_padding(tensor2pil( + image), left_padding, right_padding, top_padding, bottom_padding, feathering, second_pass=True) + return (pil2tensor(padding[0]), pil2tensor(padding[1])) + + def apply_image_padding(self, image, left_pad=100, right_pad=100, top_pad=100, bottom_pad=100, feather_radius=50, second_pass=True): + # Create a mask for the feathered edge + mask = Image.new('L', image.size, 255) + draw = ImageDraw.Draw(mask) + + # Draw black rectangles at each edge of the image with the specified feather radius + draw.rectangle((0, 0, feather_radius*2, image.height), fill=0) + draw.rectangle((image.width-feather_radius*2, 0, + image.width, image.height), fill=0) + draw.rectangle((0, 0, image.width, feather_radius*2), fill=0) + draw.rectangle((0, image.height-feather_radius*2, + image.width, image.height), fill=0) + + # Blur the mask to create a smooth gradient between the black shapes and the white background + mask = mask.filter(ImageFilter.GaussianBlur(radius=feather_radius)) + + # Apply mask if second_pass is False, apply both masks if second_pass is True + if second_pass: + + # Create a second mask for the additional feathering pass + mask2 = Image.new('L', image.size, 255) + draw2 = ImageDraw.Draw(mask2) + + # Draw black rectangles at each edge of the image with a smaller feather radius + feather_radius2 = int(feather_radius / 4) + draw2.rectangle((0, 0, feather_radius2*2, image.height), fill=0) + draw2.rectangle((image.width-feather_radius2*2, 0, + image.width, image.height), fill=0) + draw2.rectangle((0, 0, image.width, feather_radius2*2), fill=0) + draw2.rectangle((0, image.height-feather_radius2*2, + image.width, image.height), fill=0) + + # Blur the mask to create a smooth gradient between the black shapes and the white background + mask2 = mask2.filter( + ImageFilter.GaussianBlur(radius=feather_radius2)) + + feathered_im = Image.new('RGBA', image.size, (0, 0, 0, 0)) + feathered_im.paste(image, (0, 0), mask) + feathered_im.paste(image, (0, 0), mask) + + # Apply the second mask to the feathered image + feathered_im.paste(image, (0, 0), mask2) + feathered_im.paste(image, (0, 0), mask2) + + else: + + # Apply the fist maskk + feathered_im = Image.new('RGBA', image.size, (0, 0, 0, 0)) + feathered_im.paste(image, (0, 0), mask) + + # Calculate the new size of the image with padding added + new_size = (feathered_im.width + left_pad + right_pad, + feathered_im.height + top_pad + bottom_pad) + + # Create a new transparent image with the new size + new_im = Image.new('RGBA', new_size, (0, 0, 0, 0)) + + # Paste the feathered image onto the new image with the padding + new_im.paste(feathered_im, (left_pad, top_pad)) + + # Create Padding Mask + padding_mask = Image.new('L', new_size, 0) + + # Create a mask where the transparent pixels have a gradient + gradient = [(int(255 * (1 - p[3] / 255)) if p[3] != 0 else 255) + for p in new_im.getdata()] + padding_mask.putdata(gradient) + + # Save the new image with alpha channel as a PNG file + return (new_im, padding_mask.convert('RGB')) + + +# IMAGE THRESHOLD NODE + +class WAS_Image_Threshold: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_threshold" + + CATEGORY = "WAS Suite/Image/Process" + + def image_threshold(self, image, threshold=0.5): + return (pil2tensor(self.apply_threshold(tensor2pil(image), threshold)), ) + + def apply_threshold(self, input_image, threshold=0.5): + # Convert the input image to grayscale + grayscale_image = input_image.convert('L') + + # Apply the threshold to the grayscale image + threshold_value = int(threshold * 255) + thresholded_image = grayscale_image.point( + lambda x: 255 if x >= threshold_value else 0, mode='L') + + return thresholded_image + + +# IMAGE CHROMATIC ABERRATION NODE + +class WAS_Image_Chromatic_Aberration: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "red_offset": ("INT", {"default": 2, "min": -255, "max": 255, "step": 1}), + "green_offset": ("INT", {"default": -1, "min": -255, "max": 255, "step": 1}), + "blue_offset": ("INT", {"default": 1, "min": -255, "max": 255, "step": 1}), + "intensity": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "fade_radius": ("INT", {"default": 12, "min": 0, "max": 1024, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_chromatic_aberration" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_chromatic_aberration(self, image, red_offset=4, green_offset=2, blue_offset=0, intensity=1, fade_radius=12): + return (pil2tensor(self.apply_chromatic_aberration(tensor2pil(image), red_offset, green_offset, blue_offset, intensity, fade_radius)), ) + + def apply_chromatic_aberration(self, img, r_offset, g_offset, b_offset, intensity, fade_radius): + + def lingrad(size, direction, white_ratio): + image = Image.new('RGB', size) + draw = ImageDraw.Draw(image) + if direction == 'vertical': + black_end = size[1] - white_ratio + range_start = 0 + range_end = size[1] + range_step = 1 + for y in range(range_start, range_end, range_step): + color_ratio = y / size[1] + if y <= black_end: + color = (0, 0, 0) + else: + color_value = int(((y - black_end) / (size[1] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(0, y), (size[0], y)], fill=color) + elif direction == 'horizontal': + black_end = size[0] - white_ratio + range_start = 0 + range_end = size[0] + range_step = 1 + for x in range(range_start, range_end, range_step): + color_ratio = x / size[0] + if x <= black_end: + color = (0, 0, 0) + else: + color_value = int(((x - black_end) / (size[0] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(x, 0), (x, size[1])], fill=color) + + return image.convert("L") + + def create_fade_mask(size, fade_radius): + mask = Image.new("L", size, 255) + + left = ImageOps.invert(lingrad(size, 'horizontal', int(fade_radius * 2))) + right = left.copy().transpose(Image.FLIP_LEFT_RIGHT) + top = ImageOps.invert(lingrad(size, 'vertical', int(fade_radius *2))) + bottom = top.copy().transpose(Image.FLIP_TOP_BOTTOM) + + # Multiply masks with the original mask image + mask = ImageChops.multiply(mask, left) + mask = ImageChops.multiply(mask, right) + mask = ImageChops.multiply(mask, top) + mask = ImageChops.multiply(mask, bottom) + mask = ImageChops.multiply(mask, mask) + + return mask + + # split the channels of the image + r, g, b = img.split() + + # apply the offset to each channel + r_offset_img = ImageChops.offset(r, r_offset, 0) + g_offset_img = ImageChops.offset(g, 0, g_offset) + b_offset_img = ImageChops.offset(b, 0, b_offset) + + # merge the channels with the offsets + merged = Image.merge("RGB", (r_offset_img, g_offset_img, b_offset_img)) + + # create fade masks for blending + fade_mask = create_fade_mask(img.size, fade_radius) + + # merge the blended channels back into an RGB image + result = Image.composite(merged, img, fade_mask).convert("RGB") + + return result + + +# IMAGE BLOOM FILTER + +class WAS_Image_Bloom_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "radius": ("FLOAT", {"default": 10, "min": 0.0, "max": 1024, "step": 0.1}), + "intensity": ("FLOAT", {"default": 1, "min": 0.0, "max": 1.0, "step": 0.1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_bloom" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_bloom(self, image, radius=0.5, intensity=1.0): + return (pil2tensor(self.apply_bloom_filter(tensor2pil(image), radius, intensity)), ) + + def apply_bloom_filter(self, input_image, radius, bloom_factor): + # Apply a blur filter to the input image + blurred_image = input_image.filter( + ImageFilter.GaussianBlur(radius=radius)) + + # Subtract the blurred image from the input image to create a high-pass filter + high_pass_filter = ImageChops.subtract(input_image, blurred_image) + + # Create a blurred version of the bloom filter + bloom_filter = high_pass_filter.filter( + ImageFilter.GaussianBlur(radius=radius*2)) + + # Adjust brightness and levels of bloom filter + bloom_filter = ImageEnhance.Brightness(bloom_filter).enhance(2.0) + + # Multiply the bloom image with the bloom factor + bloom_filter = ImageChops.multiply(bloom_filter, Image.new('RGB', input_image.size, (int( + 255 * bloom_factor), int(255 * bloom_factor), int(255 * bloom_factor)))) + + # Multiply the bloom filter with the original image using the bloom factor + blended_image = ImageChops.screen(input_image, bloom_filter) + + return blended_image + + +# IMAGE ROTATE HUE + +class WAS_Image_Rotate_Hue: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "hue_shift": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "rotate_hue" + + CATEGORY = "WAS Suite/Image/Adjustment" + + def rotate_hue(self, image, hue_shift=0.0): + if hue_shift > 1.0 or hue_shift < 0.0: + cstr(f"The hue_shift `{cstr.color.LIGHTYELLOW}{hue_shift}{cstr.color.END}` is out of range. Valid range is {cstr.color.BOLD}0.0 - 1.0{cstr.color.END}").error.print() + hue_shift = 0.0 + shifted_hue = pil2tensor(self.hue_rotation(image, hue_shift)) + return (shifted_hue, ) + + def hue_rotation(self, image, hue_shift=0.0): + import colorsys + if hue_shift > 1.0 or hue_shift < 0.0: + print(f"The hue_shift '{hue_shift}' is out of range. Valid range is 0.0 - 1.0") + hue_shift = 0.0 + + pil_image = tensor2pil(image) + width, height = pil_image.size + rotated_image = Image.new("RGB", (width, height)) + + for x in range(width): + for y in range(height): + r, g, b = pil_image.getpixel((x, y)) + h, l, s = colorsys.rgb_to_hls(r / 255, g / 255, b / 255) + h = (h + hue_shift) % 1.0 + r, g, b = colorsys.hls_to_rgb(h, l, s) + r, g, b = int(r * 255), int(g * 255), int(b * 255) + rotated_image.putpixel((x, y), (r, g, b)) + + return rotated_image + + +# IMAGE REMOVE COLOR + +class WAS_Image_Remove_Color: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "target_red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "target_green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "target_blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "replace_red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "replace_green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "replace_blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "clip_threshold": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_remove_color" + + CATEGORY = "WAS Suite/Image/Process" + + def image_remove_color(self, image, clip_threshold=10, target_red=255, target_green=255, target_blue=255, replace_red=255, replace_green=255, replace_blue=255): + return (pil2tensor(self.apply_remove_color(tensor2pil(image), clip_threshold, (target_red, target_green, target_blue), (replace_red, replace_green, replace_blue))), ) + + def apply_remove_color(self, image, threshold=10, color=(255, 255, 255), rep_color=(0, 0, 0)): + # Create a color image with the same size as the input image + color_image = Image.new('RGB', image.size, color) + + # Calculate the difference between the input image and the color image + diff_image = ImageChops.difference(image, color_image) + + # Convert the difference image to grayscale + gray_image = diff_image.convert('L') + + # Apply a threshold to the grayscale difference image + mask_image = gray_image.point(lambda x: 255 if x > threshold else 0) + + # Invert the mask image + mask_image = ImageOps.invert(mask_image) + + # Apply the mask to the original image + result_image = Image.composite( + Image.new('RGB', image.size, rep_color), image, mask_image) + + return result_image + + +# IMAGE REMOVE BACKGROUND + +class WAS_Remove_Background: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "mode": (["background", "foreground"],), + "threshold": ("INT", {"default": 127, "min": 0, "max": 255, "step": 1}), + "threshold_tolerance": ("INT", {"default": 2, "min": 1, "max": 24, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "image_remove_background" + + CATEGORY = "WAS Suite/Image/Process" + + def image_remove_background(self, images, mode='background', threshold=127, threshold_tolerance=2): + return (self.remove_background(images, mode, threshold, threshold_tolerance), ) + + def remove_background(self, image, mode, threshold, threshold_tolerance): + images = [] + image = [tensor2pil(img) for img in image] + for img in image: + grayscale_image = img.convert('L') + if mode == 'background': + grayscale_image = ImageOps.invert(grayscale_image) + threshold = 255 - threshold # adjust the threshold for "background" mode + blurred_image = grayscale_image.filter( + ImageFilter.GaussianBlur(radius=threshold_tolerance)) + binary_image = blurred_image.point( + lambda x: 0 if x < threshold else 255, '1') + mask = binary_image.convert('L') + inverted_mask = ImageOps.invert(mask) + transparent_image = img.copy() + transparent_image.putalpha(inverted_mask) + images.append(pil2tensor(transparent_image)) + batch = torch.cat(images, dim=0) + + return batch + +# IMAGE REMBG +# Sam model needs additional input, may need to be new node entirely +# See: https://github.com/danielgatis/rembg/blob/main/USAGE.md#using-input-points +# u2net_cloth_seg model needs additional inputs, may create a new node +# An undocumented feature "putaplha" changes how alpha is applied, but does not appear to make a difference + +class WAS_Remove_Rembg: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "transparency": ("BOOLEAN", {"default": True},), + "model": (["u2net", "u2netp", "u2net_human_seg", "silueta", "isnet-general-use", "isnet-anime"],), + "post_processing": ("BOOLEAN", {"default": False}), + "only_mask": ("BOOLEAN", {"default": False},), + "alpha_matting": ("BOOLEAN", {"default": False},), + "alpha_matting_foreground_threshold": ("INT", {"default": 240, "min": 0, "max": 255}), + "alpha_matting_background_threshold": ("INT", {"default": 10, "min": 0, "max": 255}), + "alpha_matting_erode_size": ("INT", {"default": 10, "min": 0, "max": 255}), + "background_color": (["none", "black", "white", "magenta", "chroma green", "chroma blue"],), + # "putalpha": ("BOOLEAN", {"default": True},), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "image_rembg" + + CATEGORY = "WAS Suite/Image/AI" + + # A helper function to convert from strings to logical boolean + # Conforms to https://docs.python.org/3/library/stdtypes.html#truth-value-testing + # With the addition of evaluating string representations of Falsey types + def __convertToBool(self, x): + + # Evaluate string representation of False types + if type(x) == str: + x = x.strip() + if (x.lower() == 'false' + or x.lower() == 'none' + or x == '0' + or x == '0.0' + or x == '0j' + or x == "''" + or x == '""' + or x == "()" + or x == "[]" + or x == "{}" + or x.lower() == "decimal(0)" + or x.lower() == "fraction(0,1)" + or x.lower() == "set()" + or x.lower() == "range(0)" + ): + return False + else: + return True + + # Anything else will be evaluated by the bool function + return bool(x) + + def image_rembg( + self, + images, + transparency=True, + model="u2net", + alpha_matting=False, + alpha_matting_foreground_threshold=240, + alpha_matting_background_threshold=10, + alpha_matting_erode_size=10, + post_processing=False, + only_mask=False, + background_color="none", + # putalpha = False, + ): + + # ComfyUI will allow strings in place of booleans, validate the input. + transparency = transparency if type(transparency) is bool else self.__convertToBool(transparency) + alpha_matting = alpha_matting if type(alpha_matting) is bool else self.__convertToBool(alpha_matting) + post_processing = post_processing if type(post_processing) is bool else self.__convertToBool(post_processing) + only_mask = only_mask if type(only_mask) is bool else self.__convertToBool(only_mask) + + if "rembg" not in packages(): + install_package("rembg") + + from rembg import remove, new_session + + os.environ['U2NET_HOME'] = os.path.join(MODELS_DIR, 'rembg') + os.makedirs(os.environ['U2NET_HOME'], exist_ok=True) + + # Set bgcolor + bgrgba = None + if background_color == "black": + bgrgba = [0, 0, 0, 255] + elif background_color == "white": + bgrgba = [255, 255, 255, 255] + elif background_color == "magenta": + bgrgba = [255, 0, 255, 255] + elif background_color == "chroma green": + bgrgba = [0, 177, 64, 255] + elif background_color == "chroma blue": + bgrgba = [0, 71, 187, 255] + else: + bgrgba = None + + if transparency and bgrgba is not None: + bgrgba[3] = 0 + + batch_tensor = [] + for image in images: + image = tensor2pil(image) + batch_tensor.append(pil2tensor( + remove( + image, + session=new_session(model), + post_process_mask=post_processing, + alpha_matting=alpha_matting, + alpha_matting_foreground_threshold=alpha_matting_foreground_threshold, + alpha_matting_background_threshold=alpha_matting_background_threshold, + alpha_matting_erode_size=alpha_matting_erode_size, + only_mask=only_mask, + bgcolor=bgrgba, + # putalpha = putalpha, + ) + .convert(('RGBA' if transparency else 'RGB')))) + batch_tensor = torch.cat(batch_tensor, dim=0) + + return (batch_tensor,) + + +# IMAGE BLEND MASK NODE + +class WAS_Image_Blend_Mask: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "mask": ("IMAGE",), + "blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_blend_mask" + + CATEGORY = "WAS Suite/Image" + + def image_blend_mask(self, image_a, image_b, mask, blend_percentage): + + # Convert images to PIL + img_a = tensor2pil(image_a) + img_b = tensor2pil(image_b) + mask = ImageOps.invert(tensor2pil(mask).convert('L')) + + # Mask image + masked_img = Image.composite(img_a, img_b, mask.resize(img_a.size)) + + # Blend image + blend_mask = Image.new(mode="L", size=img_a.size, + color=(round(blend_percentage * 255))) + blend_mask = ImageOps.invert(blend_mask) + img_result = Image.composite(img_a, masked_img, blend_mask) + + del img_a, img_b, blend_mask, mask + + return (pil2tensor(img_result), ) + + +# IMAGE BLANK NOE + + +class WAS_Image_Blank: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 1}), + "red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + "blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "blank_image" + + CATEGORY = "WAS Suite/Image" + + def blank_image(self, width, height, red, green, blue): + + # Ensure multiples + width = (width // 8) * 8 + height = (height // 8) * 8 + + # Blend image + blank = Image.new(mode="RGB", size=(width, height), + color=(red, green, blue)) + + return (pil2tensor(blank), ) + + +# IMAGE HIGH PASS + +class WAS_Image_High_Pass_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "radius": ("INT", {"default": 10, "min": 1, "max": 500, "step": 1}), + "strength": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 255.0, "step": 0.1}), + "color_output": (["true", "false"],), + "neutral_background": (["true", "false"],), + } + } + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "high_pass" + + CATEGORY = "WAS Suite/Image/Filter" + + def high_pass(self, images, radius=10, strength=1.5, color_output="true", neutral_background="true"): + batch_tensor = [] + for image in images: + transformed_image = self.apply_hpf(tensor2pil(image), radius, strength, color_output, neutral_background) + batch_tensor.append(pil2tensor(transformed_image)) + batch_tensor = torch.cat(batch_tensor, dim=0) + return (batch_tensor, ) + + def apply_hpf(self, img, radius=10, strength=1.5, color_output="true", neutral_background="true"): + img_arr = np.array(img).astype('float') + blurred_arr = np.array(img.filter(ImageFilter.GaussianBlur(radius=radius))).astype('float') + hpf_arr = img_arr - blurred_arr + hpf_arr = np.clip(hpf_arr * strength, 0, 255).astype('uint8') + + if color_output == "true": + high_pass = Image.fromarray(hpf_arr, mode='RGB') + else: + grayscale_arr = np.mean(hpf_arr, axis=2).astype('uint8') + high_pass = Image.fromarray(grayscale_arr, mode='L') + + if neutral_background == "true": + neutral_color = (128, 128, 128) if high_pass.mode == 'RGB' else 128 + neutral_bg = Image.new(high_pass.mode, high_pass.size, neutral_color) + high_pass = ImageChops.screen(neutral_bg, high_pass) + + return high_pass.convert("RGB") + + +# IMAGE LEVELS NODE + +class WAS_Image_Levels: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "black_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 255.0, "step": 0.1}), + "mid_level": ("FLOAT", {"default": 127.5, "min": 0.0, "max": 255.0, "step": 0.1}), + "white_level": ("FLOAT", {"default": 255, "min": 0.0, "max": 255.0, "step": 0.1}), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "apply_image_levels" + + CATEGORY = "WAS Suite/Image/Adjustment" + + def apply_image_levels(self, image, black_level, mid_level, white_level): + + # Convert image to PIL + tensor_images = [] + for img in image: + img = tensor2pil(img) + levels = self.AdjustLevels(black_level, mid_level, white_level) + tensor_images.append(pil2tensor(levels.adjust(img))) + tensor_images = torch.cat(tensor_images, dim=0) + + # Return adjust image tensor + return (tensor_images, ) + + + class AdjustLevels: + def __init__(self, min_level, mid_level, max_level): + self.min_level = min_level + self.mid_level = mid_level + self.max_level = max_level + + def adjust(self, im): + + im_arr = np.array(im) + im_arr[im_arr < self.min_level] = self.min_level + im_arr = (im_arr - self.min_level) * \ + (255 / (self.max_level - self.min_level)) + im_arr[im_arr < 0] = 0 + im_arr[im_arr > 255] = 255 + im_arr = im_arr.astype(np.uint8) + + im = Image.fromarray(im_arr) + im = ImageOps.autocontrast(im, cutoff=self.max_level) + + return im + + +# FILM GRAIN NODE + +class WAS_Film_Grain: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "density": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}), + "intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}), + "highlights": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 255.0, "step": 0.01}), + "supersample_factor": ("INT", {"default": 4, "min": 1, "max": 8, "step": 1}) + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "film_grain" + + CATEGORY = "WAS Suite/Image/Filter" + + def film_grain(self, image, density, intensity, highlights, supersample_factor): + return (pil2tensor(self.apply_film_grain(tensor2pil(image), density, intensity, highlights, supersample_factor)), ) + + def apply_film_grain(self, img, density=0.1, intensity=1.0, highlights=1.0, supersample_factor=4): + """ + Apply grayscale noise with specified density, intensity, and highlights to a PIL image. + """ + img_gray = img.convert('L') + original_size = img.size + img_gray = img_gray.resize( + ((img.size[0] * supersample_factor), (img.size[1] * supersample_factor)), Image.Resampling(2)) + num_pixels = int(density * img_gray.size[0] * img_gray.size[1]) + + noise_pixels = [] + for i in range(num_pixels): + x = random.randint(0, img_gray.size[0]-1) + y = random.randint(0, img_gray.size[1]-1) + noise_pixels.append((x, y)) + + for x, y in noise_pixels: + value = random.randint(0, 255) + img_gray.putpixel((x, y), value) + + img_noise = img_gray.convert('RGB') + img_noise = img_noise.filter(ImageFilter.GaussianBlur(radius=0.125)) + img_noise = img_noise.resize(original_size, Image.Resampling(1)) + img_noise = img_noise.filter(ImageFilter.EDGE_ENHANCE_MORE) + img_final = Image.blend(img, img_noise, intensity) + enhancer = ImageEnhance.Brightness(img_final) + img_highlights = enhancer.enhance(highlights) + + # Return the final image + return img_highlights + + +# IMAGE FLIP NODE + +class WAS_Image_Flip: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "mode": (["horizontal", "vertical",],), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "image_flip" + + CATEGORY = "WAS Suite/Image/Transform" + + def image_flip(self, images, mode): + + batch_tensor = [] + for image in images: + image = tensor2pil(image) + if mode == 'horizontal': + image = image.transpose(0) + if mode == 'vertical': + image = image.transpose(1) + batch_tensor.append(pil2tensor(image)) + batch_tensor = torch.cat(batch_tensor, dim=0) + + return (batch_tensor, ) + + +class WAS_Image_Rotate: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "mode": (["transpose", "internal",],), + "rotation": ("INT", {"default": 0, "min": 0, "max": 360, "step": 90}), + "sampler": (["nearest", "bilinear", "bicubic"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "image_rotate" + + CATEGORY = "WAS Suite/Image/Transform" + + def image_rotate(self, images, mode, rotation, sampler): + + batch_tensor = [] + for image in images: + # PIL Image + image = tensor2pil(image) + + # Check rotation + if rotation > 360: + rotation = int(360) + if (rotation % 90 != 0): + rotation = int((rotation//90)*90) + + # Set Sampler + if sampler: + if sampler == 'nearest': + sampler = Image.NEAREST + elif sampler == 'bicubic': + sampler = Image.BICUBIC + elif sampler == 'bilinear': + sampler = Image.BILINEAR + else: + sampler == Image.BILINEAR + + # Rotate Image + if mode == 'internal': + image = image.rotate(rotation, sampler) + else: + rot = int(rotation / 90) + for _ in range(rot): + image = image.transpose(2) + + batch_tensor.append(pil2tensor(image)) + + batch_tensor = torch.cat(batch_tensor, dim=0) + + return (batch_tensor, ) + + +# IMAGE NOVA SINE FILTER + +class WAS_Image_Nova_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "amplitude": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.001}), + "frequency": ("FLOAT", {"default": 3.14, "min": 0.0, "max": 100.0, "step": 0.001}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "nova_sine" + + CATEGORY = "WAS Suite/Image/Filter" + + def nova_sine(self, image, amplitude, frequency): + + # Convert image to numpy + img = tensor2pil(image) + + # Convert the image to a numpy array + img_array = np.array(img) + + # Define a sine wave function + def sine(x, freq, amp): + return amp * np.sin(2 * np.pi * freq * x) + + # Calculate the sampling frequency of the image + resolution = img.info.get('dpi') # PPI + physical_size = img.size # pixels + + if resolution is not None: + # Convert PPI to pixels per millimeter (PPM) + ppm = 25.4 / resolution + physical_size = tuple(int(pix * ppm) for pix in physical_size) + + # Set the maximum frequency for the sine wave + max_freq = img.width / 2 + + # Ensure frequency isn't outside visual representable range + if frequency > max_freq: + frequency = max_freq + + # Apply levels to the image using the sine function + for i in range(img_array.shape[0]): + for j in range(img_array.shape[1]): + for k in range(img_array.shape[2]): + img_array[i, j, k] = int( + sine(img_array[i, j, k]/255, frequency, amplitude) * 255) + + return (torch.from_numpy(img_array.astype(np.float32) / 255.0).unsqueeze(0), ) + + +# IMAGE CANNY FILTER + + +class WAS_Canny_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "enable_threshold": (['false', 'true'],), + "threshold_low": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "threshold_high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "canny_filter" + + CATEGORY = "WAS Suite/Image/Filter" + + def canny_filter(self, images, threshold_low, threshold_high, enable_threshold): + + if enable_threshold == 'false': + threshold_low = None + threshold_high = None + + batch_tensor = [] + for image in images: + + image_canny = Image.fromarray(self.Canny_detector( + 255. * image.cpu().numpy().squeeze(), threshold_low, threshold_high)).convert('RGB') + + batch_tensor.append(pil2tensor(image_canny)) + + batch_tensor = torch.cat(batch_tensor, dim=0) + + return (pil2tensor(image_canny), ) + + def Canny_detector(self, img, weak_th=None, strong_th=None): + + import cv2 + + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img = cv2.GaussianBlur(img, (5, 5), 1.4) + gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3) # type: ignore + gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3) # type: ignore + + mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees=True) + + mag_max = np.max(mag) + if not weak_th: + weak_th = mag_max * 0.1 + if not strong_th: + strong_th = mag_max * 0.5 + + height, width = img.shape + + for i_x in range(width): + for i_y in range(height): + + grad_ang = ang[i_y, i_x] + grad_ang = abs( + grad_ang-180) if abs(grad_ang) > 180 else abs(grad_ang) + + neighb_1_x, neighb_1_y = -1, -1 + neighb_2_x, neighb_2_y = -1, -1 + + if grad_ang <= 22.5: + neighb_1_x, neighb_1_y = i_x-1, i_y + neighb_2_x, neighb_2_y = i_x + 1, i_y + + elif grad_ang > 22.5 and grad_ang <= (22.5 + 45): + neighb_1_x, neighb_1_y = i_x-1, i_y-1 + neighb_2_x, neighb_2_y = i_x + 1, i_y + 1 + elif grad_ang > (22.5 + 45) and grad_ang <= (22.5 + 90): + neighb_1_x, neighb_1_y = i_x, i_y-1 + neighb_2_x, neighb_2_y = i_x, i_y + 1 + elif grad_ang > (22.5 + 90) and grad_ang <= (22.5 + 135): + neighb_1_x, neighb_1_y = i_x-1, i_y + 1 + neighb_2_x, neighb_2_y = i_x + 1, i_y-1 + elif grad_ang > (22.5 + 135) and grad_ang <= (22.5 + 180): + neighb_1_x, neighb_1_y = i_x-1, i_y + neighb_2_x, neighb_2_y = i_x + 1, i_y + if width > neighb_1_x >= 0 and height > neighb_1_y >= 0: + if mag[i_y, i_x] < mag[neighb_1_y, neighb_1_x]: + mag[i_y, i_x] = 0 + continue + + if width > neighb_2_x >= 0 and height > neighb_2_y >= 0: + if mag[i_y, i_x] < mag[neighb_2_y, neighb_2_x]: + mag[i_y, i_x] = 0 + + weak_ids = np.zeros_like(img) + strong_ids = np.zeros_like(img) + ids = np.zeros_like(img) + + for i_x in range(width): + for i_y in range(height): + + grad_mag = mag[i_y, i_x] + + if grad_mag < weak_th: + mag[i_y, i_x] = 0 + elif strong_th > grad_mag >= weak_th: + ids[i_y, i_x] = 1 + else: + ids[i_y, i_x] = 2 + + return mag + +# IMAGE EDGE DETECTION + +class WAS_Image_Edge: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "mode": (["normal", "laplacian"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_edges" + + CATEGORY = "WAS Suite/Image/Filter" + + def image_edges(self, image, mode): + + # Convert image to PIL + image = tensor2pil(image) + + # Detect edges + if mode: + if mode == "normal": + image = image.filter(ImageFilter.FIND_EDGES) + elif mode == "laplacian": + image = image.filter(ImageFilter.Kernel((3, 3), (-1, -1, -1, -1, 8, + -1, -1, -1, -1), 1, 0)) + else: + image = image + + return (torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), ) + + +# IMAGE FDOF NODE + +class WAS_Image_fDOF: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "depth": ("IMAGE",), + "mode": (["mock", "gaussian", "box"],), + "radius": ("INT", {"default": 8, "min": 1, "max": 128, "step": 1}), + "samples": ("INT", {"default": 1, "min": 1, "max": 3, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "fdof_composite" + + CATEGORY = "WAS Suite/Image/Filter" + + def fdof_composite(self, image, depth, radius, samples, mode): + + import cv2 as cv + + # Convert tensor to a PIL Image + tensor_images = [] + for i in range(len(image)): + if i <= len(image): + img = tensor2pil(image[i]) + else: + img = tensor2pil(image[-1]) + if i <= len(depth): + dpth = tensor2pil(depth[i]) + else: + dpth = tensor2pil(depth[-1]) + tensor_images.append(pil2tensor(self.portraitBlur(img, dpth, radius, samples, mode))) + tensor_images = torch.cat(tensor_images, dim=0) + + return (tensor_images, ) + + def portraitBlur(self, img, mask, radius, samples, mode='mock'): + mask = mask.resize(img.size).convert('L') + bimg: Optional[Image.Image] = None + if mode == 'mock': + bimg = medianFilter(img, radius, (radius * 1500), 75) + elif mode == 'gaussian': + bimg = img.filter(ImageFilter.GaussianBlur(radius=radius)) + elif mode == 'box': + bimg = img.filter(ImageFilter.BoxBlur(radius)) + else: + return + bimg.convert(img.mode) + rimg: Optional[Image.Image] = None + if samples > 1: + for i in range(samples): + if not rimg: + rimg = Image.composite(img, bimg, mask) + else: + rimg = Image.composite(rimg, bimg, mask) + else: + rimg = Image.composite(img, bimg, mask).convert('RGB') + + return rimg + + +# IMAGE DRAGAN PHOTOGRAPHY FILTER + +class WAS_Dragon_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 16.0, "step": 0.01}), + "contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 16.0, "step": 0.01}), + "brightness": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 16.0, "step": 0.01}), + "sharpness": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 6.0, "step": 0.01}), + "highpass_radius": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 255.0, "step": 0.01}), + "highpass_samples": ("INT", {"default": 1, "min": 0, "max": 6.0, "step": 1}), + "highpass_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.01}), + "colorize": (["true","false"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "apply_dragan_filter" + + CATEGORY = "WAS Suite/Image/Filter" + + def apply_dragan_filter(self, image, saturation, contrast, sharpness, brightness, highpass_radius, highpass_samples, highpass_strength, colorize): + + WTools = WAS_Tools_Class() + + tensor_images = [] + for img in image: + tensor_images.append(pil2tensor(WTools.dragan_filter(tensor2pil(img), saturation, contrast, sharpness, brightness, highpass_radius, highpass_samples, highpass_strength, colorize))) + tensor_images = torch.cat(tensor_images, dim=0) + + return (tensor_images, ) + + + +# IMAGE MEDIAN FILTER NODE + +class WAS_Image_Median_Filter: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "diameter": ("INT", {"default": 2.0, "min": 0.1, "max": 255, "step": 1}), + "sigma_color": ("FLOAT", {"default": 10.0, "min": -255.0, "max": 255.0, "step": 0.1}), + "sigma_space": ("FLOAT", {"default": 10.0, "min": -255.0, "max": 255.0, "step": 0.1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "apply_median_filter" + + CATEGORY = "WAS Suite/Image/Filter" + + def apply_median_filter(self, image, diameter, sigma_color, sigma_space): + + tensor_images = [] + for img in image: + img = tensor2pil(img) + # Apply Median Filter effect + tensor_images.append(pil2tensor(medianFilter(img, diameter, sigma_color, sigma_space))) + tensor_images = torch.cat(tensor_images, dim=0) + + return (tensor_images, ) + +# IMAGE SELECT COLOR + + +class WAS_Image_Select_Color: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "red": ("INT", {"default": 255.0, "min": 0.0, "max": 255.0, "step": 0.1}), + "green": ("INT", {"default": 255.0, "min": 0.0, "max": 255.0, "step": 0.1}), + "blue": ("INT", {"default": 255.0, "min": 0.0, "max": 255.0, "step": 0.1}), + "variance": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "select_color" + + CATEGORY = "WAS Suite/Image/Process" + + def select_color(self, image, red=255, green=255, blue=255, variance=10): + + image = self.color_pick(tensor2pil(image), red, green, blue, variance) + + return (pil2tensor(image), ) + + def color_pick(self, image, red=255, green=255, blue=255, variance=10): + # Convert image to RGB mode + image = image.convert('RGB') + + # Create a new black image of the same size as the input image + selected_color = Image.new('RGB', image.size, (0, 0, 0)) + + # Get the width and height of the image + width, height = image.size + + # Loop through every pixel in the image + for x in range(width): + for y in range(height): + # Get the color of the pixel + pixel = image.getpixel((x, y)) + r, g, b = pixel + + # Check if the pixel is within the specified color range + if ((r >= red-variance) and (r <= red+variance) and + (g >= green-variance) and (g <= green+variance) and + (b >= blue-variance) and (b <= blue+variance)): + # Set the pixel in the selected_color image to the RGB value of the pixel + selected_color.putpixel((x, y), (r, g, b)) + + # Return the selected color image + return selected_color + +# IMAGE CONVERT TO CHANNEL + + +class WAS_Image_Select_Channel: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "channel": (['red', 'green', 'blue'],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "select_channel" + + CATEGORY = "WAS Suite/Image/Process" + + def select_channel(self, image, channel='red'): + + image = self.convert_to_single_channel(tensor2pil(image), channel) + + return (pil2tensor(image), ) + + def convert_to_single_channel(self, image, channel='red'): + + # Convert to RGB mode to access individual channels + image = image.convert('RGB') + + # Extract the desired channel and convert to greyscale + if channel == 'red': + channel_img = image.split()[0].convert('L') + elif channel == 'green': + channel_img = image.split()[1].convert('L') + elif channel == 'blue': + channel_img = image.split()[2].convert('L') + else: + raise ValueError( + "Invalid channel option. Please choose 'red', 'green', or 'blue'.") + + # Convert the greyscale channel back to RGB mode + channel_img = Image.merge( + 'RGB', (channel_img, channel_img, channel_img)) + + return channel_img + +# IMAGES TO RGB + +class WAS_Images_To_RGB: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_to_rgb" + + CATEGORY = "WAS Suite/Image" + + def image_to_rgb(self, images): + + if len(images) > 1: + tensors = [] + for image in images: + tensors.append(pil2tensor(tensor2pil(image).convert('RGB'))) + tensors = torch.cat(tensors, dim=0) + return (tensors, ) + else: + return (pil2tensor(tensor2pil(images).convert("RGB")), ) + +# IMAGES TO LINEAR + +class WAS_Images_To_Linear: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_to_linear" + + CATEGORY = "WAS Suite/Image" + + def image_to_linear(self, images): + + if len(images) > 1: + tensors = [] + for image in images: + tensors.append(pil2tensor(tensor2pil(image).convert('L'))) + tensors = torch.cat(tensors, dim=0) + return (tensors, ) + else: + return (pil2tensor(tensor2pil(images).convert("L")), ) + + +# IMAGE MERGE RGB CHANNELS + +class WAS_Image_RGB_Merge: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "red_channel": ("IMAGE",), + "green_channel": ("IMAGE",), + "blue_channel": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "merge_channels" + + CATEGORY = "WAS Suite/Image/Process" + + def merge_channels(self, red_channel, green_channel, blue_channel): + + # Apply mix rgb channels + image = self.mix_rgb_channels(tensor2pil(red_channel).convert('L'), tensor2pil( + green_channel).convert('L'), tensor2pil(blue_channel).convert('L')) + + return (pil2tensor(image), ) + + def mix_rgb_channels(self, red, green, blue): + # Create an empty image with the same size as the channels + width, height = red.size + merged_img = Image.new('RGB', (width, height)) + + # Merge the channels into the new image + merged_img = Image.merge('RGB', (red, green, blue)) + + return merged_img + +# IMAGE Ambient Occlusion + +class WAS_Image_Ambient_Occlusion: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "depth_images": ("IMAGE",), + "strength": ("FLOAT", {"min": 0.0, "max": 5.0, "default": 1.0, "step": 0.01}), + "radius": ("FLOAT", {"min": 0.01, "max": 1024, "default": 30, "step": 0.01}), + "ao_blur": ("FLOAT", {"min": 0.01, "max": 1024, "default": 2.5, "step": 0.01}), + "specular_threshold": ("INT", {"min":0, "max": 255, "default": 25, "step": 1}), + "enable_specular_masking": (["True", "False"],), + "tile_size": ("INT", {"min": 1, "max": 512, "default": 1, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE","IMAGE","IMAGE") + RETURN_NAMES = ("composited_images", "ssao_images", "specular_mask_images") + FUNCTION = "ambient_occlusion" + + CATEGORY = "WAS Suite/Image/Filter" + + def ambient_occlusion(self, images, depth_images, strength, radius, ao_blur, specular_threshold, enable_specular_masking, tile_size): + + enable_specular_masking = (enable_specular_masking == 'True') + composited = [] + occlusions = [] + speculars = [] + for i, image in enumerate(images): + cstr(f"Processing SSAO image {i+1}/{len(images)} ...").msg.print() + composited_image, occlusion_image, specular_mask = self.create_ambient_occlusion( + tensor2pil(image), + tensor2pil(depth_images[(i if len(depth_images) >= i else -1)]), + strength=strength, + radius=radius, + ao_blur=ao_blur, + spec_threshold=specular_threshold, + enable_specular_masking=enable_specular_masking, + tile_size=tile_size + ) + composited.append(pil2tensor(composited_image)) + occlusions.append(pil2tensor(occlusion_image)) + speculars.append(pil2tensor(specular_mask)) + + composited = torch.cat(composited, dim=0) + occlusions = torch.cat(occlusions, dim=0) + speculars = torch.cat(speculars, dim=0) + + return ( composited, occlusions, speculars ) + + def process_tile(self, tile_rgb, tile_depth, tile_x, tile_y, radius): + tile_occlusion = calculate_ambient_occlusion_factor(tile_rgb, tile_depth, tile_rgb.shape[0], tile_rgb.shape[1], radius) + return tile_x, tile_y, tile_occlusion + + + def create_ambient_occlusion(self, rgb_image, depth_image, strength=1.0, radius=30, ao_blur=5, spec_threshold=200, enable_specular_masking=False, tile_size=1): + + import concurrent.futures + + if depth_image.size != rgb_image.size: + depth_image = depth_image.resize(rgb_image.size) + rgb_normalized = np.array(rgb_image, dtype=np.float32) / 255.0 + depth_normalized = np.array(depth_image, dtype=np.float32) / 255.0 + + height, width, _ = rgb_normalized.shape + + if tile_size <= 1: + print("Processing single-threaded AO (highest quality) ...") + occlusion_array = calculate_ambient_occlusion_factor(rgb_normalized, depth_normalized, height, width, radius) + else: + tile_size = ((tile_size if tile_size <= 8 else 8) if tile_size > 1 else 1) + num_tiles_x = (width - 1) // tile_size + 1 + num_tiles_y = (height - 1) // tile_size + 1 + + occlusion_array = np.zeros((height, width), dtype=np.uint8) + + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [] + + with tqdm(total=num_tiles_y * num_tiles_x) as pbar: + for tile_y in range(num_tiles_y): + for tile_x in range(num_tiles_x): + tile_left = tile_x * tile_size + tile_upper = tile_y * tile_size + tile_right = min(tile_left + tile_size, width) + tile_lower = min(tile_upper + tile_size, height) + + tile_rgb = rgb_normalized[tile_upper:tile_lower, tile_left:tile_right] + tile_depth = depth_normalized[tile_upper:tile_lower, tile_left:tile_right] + + future = executor.submit(self.process_tile, tile_rgb, tile_depth, tile_x, tile_y, radius) + futures.append(future) + + for future in concurrent.futures.as_completed(futures): + tile_x, tile_y, tile_occlusion = future.result() + tile_left = tile_x * tile_size + tile_upper = tile_y * tile_size + tile_right = min(tile_left + tile_size, width) + tile_lower = min(tile_upper + tile_size, height) + + occlusion_array[tile_upper:tile_lower, tile_left:tile_right] = tile_occlusion + + pbar.update(1) + + occlusion_array = (occlusion_array * strength).clip(0, 255).astype(np.uint8) + + occlusion_image = Image.fromarray(occlusion_array, mode='L') + occlusion_image = occlusion_image.filter(ImageFilter.GaussianBlur(radius=ao_blur)) + occlusion_image = occlusion_image.filter(ImageFilter.SMOOTH) + occlusion_image = ImageChops.multiply(occlusion_image, ImageChops.multiply(occlusion_image, occlusion_image)) + + mask = rgb_image.convert('L') + mask = mask.point(lambda x: x > spec_threshold, mode='1') + mask = mask.convert("RGB") + mask = mask.filter(ImageFilter.GaussianBlur(radius=2.5)).convert("L") + + if enable_specular_masking: + occlusion_image = Image.composite(Image.new("L", rgb_image.size, 255), occlusion_image, mask) + occlsuion_result = ImageChops.multiply(rgb_image, occlusion_image.convert("RGB")) + + return occlsuion_result, occlusion_image, mask + +# IMAGE Direct Occlusion + +class WAS_Image_Direct_Occlusion: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "depth_images": ("IMAGE",), + "strength": ("FLOAT", {"min": 0.0, "max": 5.0, "default": 1.0, "step": 0.01}), + "radius": ("FLOAT", {"min": 0.01, "max": 1024, "default": 30, "step": 0.01}), + "specular_threshold": ("INT", {"min":0, "max": 255, "default": 128, "step": 1}), + "colored_occlusion": (["True", "False"],), + }, + } + + RETURN_TYPES = ("IMAGE","IMAGE","IMAGE", "IMAGE") + RETURN_NAMES = ("composited_images", "ssdo_images", "ssdo_image_masks", "light_source_image_masks") + FUNCTION = "direct_occlusion" + + CATEGORY = "WAS Suite/Image/Filter" + + def direct_occlusion(self, images, depth_images, strength, radius, specular_threshold, colored_occlusion): + + composited = [] + occlusions = [] + occlusion_masks = [] + light_sources = [] + for i, image in enumerate(images): + cstr(f"Processing SSDO image {i+1}/{len(images)} ...").msg.print() + composited_image, occlusion_image, occlusion_mask, light_source = self.create_direct_occlusion( + tensor2pil(image), + tensor2pil(depth_images[(i if len(depth_images) >= i else -1)]), + strength=strength, + radius=radius, + threshold=specular_threshold, + colored=True + ) + composited.append(pil2tensor(composited_image)) + occlusions.append(pil2tensor(occlusion_image)) + occlusion_masks.append(pil2tensor(occlusion_mask)) + light_sources.append(pil2tensor(light_source)) + + composited = torch.cat(composited, dim=0) + occlusions = torch.cat(occlusions, dim=0) + occlusion_masks = torch.cat(occlusion_masks, dim=0) + light_sources = torch.cat(light_sources, dim=0) + + return ( composited, occlusions, occlusion_masks, light_sources ) + + def find_light_source(self, rgb_normalized, threshold): + from skimage.measure import regionprops + from skimage import measure + rgb_uint8 = (rgb_normalized * 255).astype(np.uint8) + rgb_to_grey = Image.fromarray(rgb_uint8, mode="RGB") + dominant = self.dominant_region(rgb_to_grey, threshold) + grayscale_image = np.array(dominant.convert("L"), dtype=np.float32) / 255.0 + regions = measure.label(grayscale_image > 0) + + if np.max(regions) > 0: + region_sums = measure.regionprops(regions, intensity_image=grayscale_image) + brightest_region = max(region_sums, key=lambda r: r.mean_intensity) + light_y, light_x = brightest_region.centroid + light_mask = (regions == brightest_region.label).astype(np.uint8) + light_mask_cluster = light_mask + else: + light_x, light_y = np.nan, np.nan + light_mask_cluster = np.zeros_like(dominant, dtype=np.uint8) + return light_mask_cluster, light_x, light_y + + + def dominant_region(self, image, threshold=128): + from scipy.ndimage import label + image = ImageOps.invert(image.convert("L")) + binary_image = image.point(lambda x: 255 if x > threshold else 0, mode="1") + l, n = label(np.array(binary_image)) + sizes = np.bincount(l.flatten()) + dominant = 0 + try: + dominant = np.argmax(sizes[1:]) + 1 + except ValueError: + pass + dominant_region_mask = (l == dominant).astype(np.uint8) * 255 + result = Image.fromarray(dominant_region_mask, mode="L") + return result.convert("RGB") + + def create_direct_occlusion(self, rgb_image, depth_image, strength=1.0, radius=10, threshold=200, colored=False): + rgb_normalized = np.array(rgb_image, dtype=np.float32) / 255.0 + depth_normalized = np.array(depth_image, dtype=np.float32) / 255.0 + height, width, _ = rgb_normalized.shape + light_mask, light_x, light_y = self.find_light_source(rgb_normalized, threshold) + occlusion_array = calculate_direct_occlusion_factor(rgb_normalized, depth_normalized, height, width, radius) + #occlusion_scaled = (occlusion_array / np.max(occlusion_array) * 255).astype(np.uint8) + occlusion_scaled = ((occlusion_array - np.min(occlusion_array)) / (np.max(occlusion_array) - np.min(occlusion_array)) * 255).astype(np.uint8) + occlusion_image = Image.fromarray(occlusion_scaled, mode="L") + occlusion_image = occlusion_image.filter(ImageFilter.GaussianBlur(radius=0.5)) + occlusion_image = occlusion_image.filter(ImageFilter.SMOOTH_MORE) + + if colored: + occlusion_result = Image.composite( + Image.new("RGB", rgb_image.size, (0, 0, 0)), + rgb_image, + occlusion_image + ) + occlusion_result = ImageOps.autocontrast(occlusion_result, cutoff=(0, strength)) + else: + occlusion_result = Image.blend(occlusion_image, occlusion_image, strength) + + light_image = ImageOps.invert(Image.fromarray(light_mask * 255, mode="L")) + + direct_occlusion_image = ImageChops.screen(rgb_image, occlusion_result.convert("RGB")) + + return direct_occlusion_image, occlusion_result, occlusion_image, light_image + +# EXPORT API + +class WAS_Export_API: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "save_prompt_api": (["true","true"],), + "output_path": ("STRING", {"default": "./ComfyUI/output/", "multiline": False}), + "filename_prefix": ("STRING", {"default": "ComfyUI_Prompt"}), + "filename_delimiter": ("STRING", {"default":"_"}), + "filename_number_padding": ("INT", {"default":4, "min":2, "max":9, "step":1}), + "parse_text_tokens": ("BOOLEAN", {"default": False}) + }, + "hidden": { + "prompt": "PROMPT" + } + } + + OUTPUT_NODE = True + RETURN_TYPES = () + FUNCTION = "export_api" + + CATEGORY = "WAS Suite/Debug" + + def export_api(self, output_path=None, filename_prefix="ComfyUI", filename_number_padding=4, + filename_delimiter='_', prompt=None, save_prompt_api="true", parse_text_tokens=False): + delimiter = filename_delimiter + number_padding = filename_number_padding if filename_number_padding > 1 else 4 + + tokens = TextTokens() + + if output_path in [None, '', "none", "."]: + output_path = comfy_paths.output_directory + else: + output_path = tokens.parseTokens(output_path) + + pattern = f"{re.escape(filename_prefix)}{re.escape(filename_delimiter)}(\\d{{{number_padding}}})" + existing_counters = [ + int(re.search(pattern, filename).group(1)) + for filename in os.listdir(output_path) + if re.match(pattern, filename) + ] + existing_counters.sort(reverse=True) + + if existing_counters: + counter = existing_counters[0] + 1 + else: + counter = 1 + + file = f"{filename_prefix}{filename_delimiter}{counter:0{number_padding}}.json" + output_file = os.path.abspath(os.path.join(output_path, file)) + + if prompt: + + if parse_text_tokens: + prompt = self.parse_prompt(prompt, tokens, keys_to_parse) + + prompt_json = json.dumps(prompt, indent=4) + cstr("Prompt API JSON").msg.print() + print(prompt_json) + + if save_prompt_api == "true": + + with open(output_file, 'w') as f: + f.write(prompt_json) + + cstr(f"Output file path: {output_file}").msg.print() + + return {"ui": {"string": prompt_json}} + + def parse_prompt(self, obj, tokens, keys_to_parse): + if isinstance(obj, dict): + return { + key: self.parse_prompt(value, tokens, keys_to_parse) + if key in keys_to_parse else value + for key, value in obj.items() + } + elif isinstance(obj, list): + return [self.parse_prompt(element, tokens, keys_to_parse) for element in obj] + elif isinstance(obj, str): + return tokens.parseTokens(obj) + else: + return obj + + +# Image Save (NSP Compatible) +# Originally From ComfyUI/nodes.py + +class WAS_Image_Save: + def __init__(self): + self.output_dir = comfy_paths.output_directory + self.type = 'output' + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE", ), + "output_path": ("STRING", {"default": '[time(%Y-%m-%d)]', "multiline": False}), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "filename_delimiter": ("STRING", {"default":"_"}), + "filename_number_padding": ("INT", {"default":4, "min":1, "max":9, "step":1}), + "filename_number_start": (["false", "true"],), + "extension": (['png', 'jpg', 'jpeg', 'gif', 'tiff', 'webp', 'bmp'], ), + "quality": ("INT", {"default": 100, "min": 1, "max": 100, "step": 1}), + "lossless_webp": (["false", "true"],), + "overwrite_mode": (["false", "prefix_as_filename"],), + "show_history": (["false", "true"],), + "show_history_by_prefix": (["true", "false"],), + "embed_workflow": (["true", "false"],), + "show_previews": (["true", "false"],), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO" + }, + } + + RETURN_TYPES = () + FUNCTION = "was_save_images" + + OUTPUT_NODE = True + + CATEGORY = "WAS Suite/IO" + + def was_save_images(self, images, output_path='', filename_prefix="ComfyUI", filename_delimiter='_', + extension='png', quality=100, lossless_webp="false", prompt=None, extra_pnginfo=None, + overwrite_mode='false', filename_number_padding=4, filename_number_start='false', + show_history='false', show_history_by_prefix="true", embed_workflow="true", + show_previews="true"): + + delimiter = filename_delimiter + number_padding = filename_number_padding + lossless_webp = (lossless_webp == "true") + + # Define token system + tokens = TextTokens() + + original_output = self.output_dir + # Parse prefix tokens + filename_prefix = tokens.parseTokens(filename_prefix) + + # Setup output path + if output_path in [None, '', "none", "."]: + output_path = self.output_dir + else: + output_path = tokens.parseTokens(output_path) + if not os.path.isabs(output_path): + output_path = os.path.join(self.output_dir, output_path) + base_output = os.path.basename(output_path) + if output_path.endswith("ComfyUI/output") or output_path.endswith("ComfyUI\output"): + base_output = "" + + # Check output destination + if output_path.strip() != '': + if not os.path.isabs(output_path): + output_path = os.path.join(comfy_paths.output_directory, output_path) + if not os.path.exists(output_path.strip()): + cstr(f'The path `{output_path.strip()}` specified doesn\'t exist! Creating directory.').warning.print() + os.makedirs(output_path, exist_ok=True) + + # Find existing counter values + if filename_number_start == 'true': + pattern = f"(\\d+){re.escape(delimiter)}{re.escape(filename_prefix)}" + else: + pattern = f"{re.escape(filename_prefix)}{re.escape(delimiter)}(\\d+)" + existing_counters = [ + int(re.search(pattern, filename).group(1)) + for filename in os.listdir(output_path) + if re.match(pattern, os.path.basename(filename)) + ] + existing_counters.sort(reverse=True) + + # Set initial counter value + if existing_counters: + counter = existing_counters[0] + 1 + else: + counter = 1 + + # Set initial counter value + if existing_counters: + counter = existing_counters[0] + 1 + else: + counter = 1 + + # Set Extension + file_extension = '.' + extension + if file_extension not in ALLOWED_EXT: + cstr(f"The extension `{extension}` is not valid. The valid formats are: {', '.join(sorted(ALLOWED_EXT))}").error.print() + file_extension = "png" + + results = list() + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + + # Delegate metadata/pnginfo + if extension == 'webp': + img_exif = img.getexif() + workflow_metadata = '' + prompt_str = '' + if prompt is not None: + prompt_str = json.dumps(prompt) + img_exif[0x010f] = "Prompt:" + prompt_str + if extra_pnginfo is not None: + for x in extra_pnginfo: + workflow_metadata += json.dumps(extra_pnginfo[x]) + img_exif[0x010e] = "Workflow:" + workflow_metadata + exif_data = img_exif.tobytes() + else: + metadata = PngInfo() + if embed_workflow == 'true': + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + exif_data = metadata + + # Delegate the filename stuffs + if overwrite_mode == 'prefix_as_filename': + file = f"{filename_prefix}{file_extension}" + else: + if filename_number_start == 'true': + file = f"{counter:0{number_padding}}{delimiter}{filename_prefix}{file_extension}" + else: + file = f"{filename_prefix}{delimiter}{counter:0{number_padding}}{file_extension}" + if os.path.exists(os.path.join(output_path, file)): + counter += 1 + + # Save the images + try: + output_file = os.path.abspath(os.path.join(output_path, file)) + if extension in ["jpg", "jpeg"]: + img.save(output_file, + quality=quality, optimize=True) + elif extension == 'webp': + img.save(output_file, + quality=quality, lossless=lossless_webp, exif=exif_data) + elif extension == 'png': + img.save(output_file, + pnginfo=exif_data, optimize=True) + elif extension == 'bmp': + img.save(output_file) + elif extension == 'tiff': + img.save(output_file, + quality=quality, optimize=True) + else: + img.save(output_file, + pnginfo=exif_data, optimize=True) + + cstr(f"Image file saved to: {output_file}").msg.print() + + if show_history != 'true' and show_previews == 'true': + subfolder = self.get_subfolder_path(output_file, original_output) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + # Update the output image history + update_history_output_images(output_file) + + except OSError as e: + cstr(f'Unable to save file to: {output_file}').error.print() + print(e) + except Exception as e: + cstr('Unable to save file due to the to the following error:').error.print() + print(e) + + if overwrite_mode == 'false': + counter += 1 + + filtered_paths = [] + if show_history == 'true' and show_previews == 'true': + HDB = WASDatabase(WAS_HISTORY_DATABASE) + conf = getSuiteConfig() + if HDB.catExists("History") and HDB.keyExists("History", "Output_Images"): + history_paths = HDB.get("History", "Output_Images") + else: + history_paths = None + + if history_paths: + + for image_path in history_paths: + image_subdir = self.get_subfolder_path(image_path, self.output_dir) + current_subdir = self.get_subfolder_path(output_file, self.output_dir) + if not os.path.exists(image_path): + continue + if show_history_by_prefix == 'true' and image_subdir != current_subdir: + continue + if show_history_by_prefix == 'true' and not os.path.basename(image_path).startswith(filename_prefix): + continue + filtered_paths.append(image_path) + + if conf.__contains__('history_display_limit'): + filtered_paths = filtered_paths[-conf['history_display_limit']:] + + filtered_paths.reverse() + + if filtered_paths: + for image_path in filtered_paths: + subfolder = self.get_subfolder_path(image_path, self.output_dir) + image_data = { + "filename": os.path.basename(image_path), + "subfolder": subfolder, + "type": self.type + } + results.append(image_data) + + if show_previews == 'true': + return {"ui": {"images": results}} + else: + return {"ui": {"images": []}} + + def get_subfolder_path(self, image_path, output_path): + output_parts = output_path.strip(os.sep).split(os.sep) + image_parts = image_path.strip(os.sep).split(os.sep) + common_parts = os.path.commonprefix([output_parts, image_parts]) + subfolder_parts = image_parts[len(common_parts):] + subfolder_path = os.sep.join(subfolder_parts[:-1]) + return subfolder_path + + +# LOAD IMAGE NODE +class WAS_Load_Image: + + def __init__(self): + self.input_dir = comfy_paths.input_directory + self.HDB = WASDatabase(WAS_HISTORY_DATABASE) + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_path": ("STRING", {"default": './ComfyUI/input/example.png', "multiline": False}), + "RGBA": (["false","true"],), + }, + "optional": { + "filename_text_extension": (["true", "false"],), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", TEXT_TYPE) + RETURN_NAMES = ("image", "mask", "filename_text") + FUNCTION = "load_image" + + CATEGORY = "WAS Suite/IO" + + def load_image(self, image_path, RGBA='false', filename_text_extension="true"): + + RGBA = (RGBA == 'true') + + if image_path.startswith('http'): + from io import BytesIO + i = self.download_image(image_path) + else: + try: + i = Image.open(image_path) + except OSError: + cstr(f"The image `{image_path.strip()}` specified doesn't exist!").error.print() + i = Image.new(mode='RGB', size=(512, 512), color=(0, 0, 0)) + if not i: + return + + # Update history + update_history_images(image_path) + + image = i + if not RGBA: + image = image.convert('RGB') + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + if filename_text_extension == "true": + filename = os.path.basename(image_path) + else: + filename = os.path.splitext(os.path.basename(image_path))[0] + + return (image, mask, filename) + + def download_image(self, url): + try: + response = requests.get(url) + response.raise_for_status() + img = Image.open(BytesIO(response.content)) + return img + except requests.exceptions.HTTPError as errh: + cstr(f"HTTP Error: ({url}): {errh}").error.print() + except requests.exceptions.ConnectionError as errc: + cstr(f"Connection Error: ({url}): {errc}").error.print() + except requests.exceptions.Timeout as errt: + cstr(f"Timeout Error: ({url}): {errt}").error.print() + except requests.exceptions.RequestException as err: + cstr(f"Request Exception: ({url}): {err}").error.print() + + @classmethod + def IS_CHANGED(cls, image_path): + if image_path.startswith('http'): + return float("NaN") + m = hashlib.sha256() + with open(image_path, 'rb') as f: + m.update(f.read()) + return m.digest().hex() + +# MASK BATCH TO MASK + +class WAS_Mask_Batch_to_Single_Mask: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "batch_number": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), + }, + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "mask_batch_to_mask" + + CATEGORY = "WAS Suite/Image/Masking" + + def mask_batch_to_mask(self, masks=[], batch_number=0): + count = 0 + for _ in masks: + if batch_number == count: + tensor = masks[batch_number][0] + return (tensor,) + count += 1 + + cstr(f"Batch number `{batch_number}` is not defined, returning last image").error.print() + last_tensor = masks[-1][0] + return (last_tensor,) + +# TENSOR BATCH TO IMAGE + +class WAS_Tensor_Batch_to_Image: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images_batch": ("IMAGE",), + "batch_image_number": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "tensor_batch_to_image" + + CATEGORY = "WAS Suite/Latent/Transform" + + def tensor_batch_to_image(self, images_batch=[], batch_image_number=0): + + count = 0 + for _ in images_batch: + if batch_image_number == count: + return (images_batch[batch_image_number].unsqueeze(0), ) + count = count+1 + + cstr(f"Batch number `{batch_image_number}` is not defined, returning last image").error.print() + return (images_batch[-1].unsqueeze(0), ) + + +#! LATENT NODES + +# IMAGE TO MASK + +class WAS_Image_To_Mask: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "channel": (["alpha", "red", "green", "blue"], ), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "image_to_mask" + + def image_to_mask(self, images, channel): + mask_images = [] + for image in images: + + image = tensor2pil(image).convert("RGBA") + r, g, b, a = image.split() + if channel == "red": + channel_image = r + elif channel == "green": + channel_image = g + elif channel == "blue": + channel_image = b + elif channel == "alpha": + channel_image = a + + mask = torch.from_numpy(np.array(channel_image.convert("L")).astype(np.float32) / 255.0) + mask_images.append(mask) + + return (torch.cat(mask_images, dim=0), ) + + +# MASK TO IMAGE + +class WAS_Mask_To_Image: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("IMAGES",) + + FUNCTION = "mask_to_image" + + def mask_to_image(self, masks): + if masks.ndim == 4: + # If input has shape [N, C, H, W] + tensor = masks.permute(0, 2, 3, 1) + tensor_rgb = torch.cat([tensor] * 3, dim=-1) + return (tensor_rgb,) + elif masks.ndim == 3: + # If Input has shape [N, H, W] + tensor = masks.unsqueeze(-1) + tensor_rgb = torch.cat([tensor] * 3, dim=-1) + return (tensor_rgb, ) + elif masks.ndim == 2: + # If input has shape [H, W] + tensor = masks.unsqueeze(0).unsqueeze(-1) + tensor_rgb = torch.cat([tensor] * 3, dim=-1) + return (tensor_rgb,) + else: + cstr("Invalid input shape. Expected [N, C, H, W] or [H, W].").error.print() + return masks + + +# MASK CROP DOMINANT REGION + +class WAS_Mask_Crop_Dominant_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "padding": ("INT", {"default": 24, "min": 0, "max": 4096, "step": 1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "crop_dominant_region" + + def crop_dominant_region(self, masks, padding=24): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_pil = Image.fromarray(np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask = self.WT.Masking.crop_dominant_region(mask_pil, padding) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_pil = Image.fromarray(np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask = self.WT.Masking.crop_dominant_region(mask_pil, padding) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK CROP MINORITY REGION + +class WAS_Mask_Crop_Minority_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "padding": ("INT", {"default": 24, "min": 0, "max": 4096, "step": 1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "crop_minority_region" + + def crop_minority_region(self, masks, padding=24): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_pil = Image.fromarray(np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask = self.WT.Masking.crop_minority_region(mask_pil, padding) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_pil = Image.fromarray(np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask = self.WT.Masking.crop_minority_region(mask_pil, padding) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK CROP REGION + +class WAS_Mask_Crop_Region: + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "padding": ("INT",{"default": 24, "min": 0, "max": 4096, "step": 1}), + "region_type": (["dominant", "minority"],), + } + } + + RETURN_TYPES = ("MASK", "CROP_DATA", "INT", "INT", "INT", "INT", "INT", "INT") + RETURN_NAMES = ("cropped_mask", "crop_data", "top_int", "left_int", "right_int", "bottom_int", "width_int", "height_int") + FUNCTION = "mask_crop_region" + + CATEGORY = "WAS Suite/Image/Masking" + + def mask_crop_region(self, mask, padding=24, region_type="dominant"): + + mask_pil = Image.fromarray(np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask, crop_data = self.WT.Masking.crop_region(mask_pil, region_type, padding) + region_tensor = pil2mask(ImageOps.invert(region_mask)).unsqueeze(0).unsqueeze(1) + + (width, height), (left, top, right, bottom) = crop_data + + return (region_tensor, crop_data, top, left, right, bottom, width, height) + + +# IMAGE PASTE CROP + +class WAS_Mask_Paste_Region: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "crop_mask": ("MASK",), + "crop_data": ("CROP_DATA",), + "crop_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}), + "crop_sharpening": ("INT", {"default": 0, "min": 0, "max": 3, "step": 1}), + } + } + + RETURN_TYPES = ("MASK", "MASK") + FUNCTION = "mask_paste_region" + + CATEGORY = "WAS Suite/Image/Masking" + + def mask_paste_region(self, mask, crop_mask, crop_data=None, crop_blending=0.25, crop_sharpening=0): + + if crop_data == False: + cstr("No valid crop data found!").error.print() + return( pil2mask(Image.new("L", (512, 512), 0)).unsqueeze(0).unsqueeze(1), + pil2mask(Image.new("L", (512, 512), 0)).unsqueeze(0).unsqueeze(1) ) + + mask_pil = Image.fromarray(np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + mask_crop_pil = Image.fromarray(np.clip(255. * crop_mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + + result_mask, result_crop_mask = self.paste_image(mask_pil, mask_crop_pil, crop_data, crop_blending, crop_sharpening) + + return (pil2mask(result_mask).unsqueeze(0).unsqueeze(1), pil2mask(result_crop_mask).unsqueeze(0).unsqueeze(1)) + + def paste_image(self, image, crop_image, crop_data, blend_amount=0.25, sharpen_amount=1): + + def lingrad(size, direction, white_ratio): + image = Image.new('RGB', size) + draw = ImageDraw.Draw(image) + if direction == 'vertical': + black_end = int(size[1] * (1 - white_ratio)) + range_start = 0 + range_end = size[1] + range_step = 1 + for y in range(range_start, range_end, range_step): + color_ratio = y / size[1] + if y <= black_end: + color = (0, 0, 0) + else: + color_value = int(((y - black_end) / (size[1] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(0, y), (size[0], y)], fill=color) + elif direction == 'horizontal': + black_end = int(size[0] * (1 - white_ratio)) + range_start = 0 + range_end = size[0] + range_step = 1 + for x in range(range_start, range_end, range_step): + color_ratio = x / size[0] + if x <= black_end: + color = (0, 0, 0) + else: + color_value = int(((x - black_end) / (size[0] - black_end)) * 255) + color = (color_value, color_value, color_value) + draw.line([(x, 0), (x, size[1])], fill=color) + + return image.convert("L") + + crop_size, (left, top, right, bottom) = crop_data + crop_image = crop_image.resize(crop_size) + + if sharpen_amount > 0: + for _ in range(int(sharpen_amount)): + crop_image = crop_image.filter(ImageFilter.SHARPEN) + + blended_image = Image.new('RGBA', image.size, (0, 0, 0, 255)) + blended_mask = Image.new('L', image.size, 0) # Update to 'L' mode for MASK image + crop_padded = Image.new('RGBA', image.size, (0, 0, 0, 0)) + blended_image.paste(image, (0, 0)) + crop_padded.paste(crop_image, (left, top)) + crop_mask = Image.new('L', crop_image.size, 0) + + if top > 0: + gradient_image = ImageOps.flip(lingrad(crop_image.size, 'vertical', blend_amount)) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if left > 0: + gradient_image = ImageOps.mirror(lingrad(crop_image.size, 'horizontal', blend_amount)) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if right < image.width: + gradient_image = lingrad(crop_image.size, 'horizontal', blend_amount) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + if bottom < image.height: + gradient_image = lingrad(crop_image.size, 'vertical', blend_amount) + crop_mask = ImageChops.screen(crop_mask, gradient_image) + + crop_mask = ImageOps.invert(crop_mask) + blended_mask.paste(crop_mask, (left, top)) + blended_mask = blended_mask.convert("L") + blended_image.paste(crop_padded, (0, 0), blended_mask) + + return (ImageOps.invert(blended_image.convert("RGB")).convert("L"), ImageOps.invert(blended_mask.convert("RGB")).convert("L")) + + + + +# MASK DOMINANT REGION + +class WAS_Mask_Dominant_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "threshold": ("INT", {"default":128, "min":0, "max":255, "step":1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "dominant_region" + + def dominant_region(self, masks, threshold=128): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_pil = Image.fromarray(np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask = self.WT.Masking.dominant_region(mask_pil, threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_pil = Image.fromarray(np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) + region_mask = self.WT.Masking.dominant_region(mask_pil, threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK MINORITY REGION + +class WAS_Mask_Minority_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "threshold": ("INT", {"default":128, "min":0, "max":255, "step":1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "minority_region" + + def minority_region(self, masks, threshold=128): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.minority_region(pil_image, threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.minority_region(pil_image, threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + + +# MASK ARBITRARY REGION + +class WAS_Mask_Arbitrary_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "size": ("INT", {"default":256, "min":1, "max":4096, "step":1}), + "threshold": ("INT", {"default":128, "min":0, "max":255, "step":1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "arbitrary_region" + + def arbitrary_region(self, masks, size=256, threshold=128): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.arbitrary_region(pil_image, size, threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.arbitrary_region(pil_image, size, threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + +# MASK SMOOTH REGION + +class WAS_Mask_Smooth_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "sigma": ("FLOAT", {"default":5.0, "min":0.0, "max":128.0, "step":0.1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "smooth_region" + + def smooth_region(self, masks, sigma=128): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.smooth_region(pil_image, sigma) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.smooth_region(pil_image, sigma) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK ERODE REGION + +class WAS_Mask_Erode_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "iterations": ("INT", {"default":5, "min":1, "max":64, "step":1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "erode_region" + + def erode_region(self, masks, iterations=5): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.erode_region(pil_image, iterations) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.erode_region(pil_image, iterations) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + +# MASKS SUBTRACT + +class WAS_Mask_Subtract: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks_a": ("MASK",), + "masks_b": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "subtract_masks" + + def subtract_masks(self, masks_a, masks_b): + subtracted_masks = torch.clamp(masks_a - masks_b, 0, 255) + return (subtracted_masks,) + +# MASKS ADD + +class WAS_Mask_Add: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks_a": ("MASK",), + "masks_b": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "add_masks" + + def add_masks(self, masks_a, masks_b): + if masks_a.ndim > 2 and masks_b.ndim > 2: + added_masks = masks_a + masks_b + else: + added_masks = torch.clamp(masks_a.unsqueeze(1) + masks_b.unsqueeze(1), 0, 255) + added_masks = added_masks.squeeze(1) + return (added_masks,) + +# MASKS ADD + +class WAS_Mask_Invert: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "add_masks" + + def add_masks(self, masks): + return (1. - masks,) + +# MASK DILATE REGION + +class WAS_Mask_Dilate_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "iterations": ("INT", {"default":5, "min":1, "max":64, "step":1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "dilate_region" + + def dilate_region(self, masks, iterations=5): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.dilate_region(pil_image, iterations) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.dilate_region(pil_image, iterations) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK FILL REGION + +class WAS_Mask_Fill_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "fill_region" + + def fill_region(self, masks): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.fill_region(pil_image) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.fill_region(pil_image) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK THRESHOLD + +class WAS_Mask_Threshold_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "black_threshold": ("INT",{"default":75, "min":0, "max": 255, "step": 1}), + "white_threshold": ("INT",{"default":175, "min":0, "max": 255, "step": 1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "threshold_region" + + def threshold_region(self, masks, black_threshold=75, white_threshold=255): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.threshold_region(pil_image, black_threshold, white_threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.threshold_region(pil_image, black_threshold, white_threshold) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK FLOOR REGION + +class WAS_Mask_Floor_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "floor_region" + + def floor_region(self, masks): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.floor_region(pil_image) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.floor_region(pil_image) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK CEILING REGION + +class WAS_Mask_Ceiling_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "ceiling_region" + + def ceiling_region(self, masks): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.ceiling_region(pil_image) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.ceiling_region(pil_image) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK GAUSSIAN REGION + +class WAS_Mask_Gaussian_Region: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + "radius": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 1024, "step": 0.1}), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("MASKS",) + + FUNCTION = "gaussian_region" + + def gaussian_region(self, masks, radius=5.0): + if masks.ndim > 3: + regions = [] + for mask in masks: + mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.gaussian_region(pil_image, radius) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + regions.append(region_tensor) + regions_tensor = torch.cat(regions, dim=0) + return (regions_tensor,) + else: + mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + pil_image = Image.fromarray(mask_np, mode="L") + region_mask = self.WT.Masking.gaussian_region(pil_image, radius) + region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1) + return (region_tensor,) + + +# MASK COMBINE + +class WAS_Mask_Combine: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask_a": ("MASK",), + "mask_b": ("MASK",), + }, + "optional": { + "mask_c": ("MASK",), + "mask_d": ("MASK",), + "mask_e": ("MASK",), + "mask_f": ("MASK",), + } + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "combine_masks" + + def combine_masks(self, mask_a, mask_b, mask_c=None, mask_d=None, mask_e=None, mask_f=None): + masks = [mask_a, mask_b] + if mask_c: + masks.append(mask_c) + if mask_d: + masks.append(mask_d) + if mask_e: + masks.append(mask_e) + if mask_f: + masks.append(mask_f) + combined_mask = torch.sum(torch.stack(masks, dim=0), dim=0) + combined_mask = torch.clamp(combined_mask, 0, 1) # Ensure values are between 0 and 1 + return (combined_mask, ) + +class WAS_Mask_Combine_Batch: + + def __init__(self): + self.WT = WAS_Tools_Class() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + }, + } + + CATEGORY = "WAS Suite/Image/Masking" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "combine_masks" + + def combine_masks(self, masks): + combined_mask = torch.sum(torch.stack([mask.unsqueeze(0) for mask in masks], dim=0), dim=0) + combined_mask = torch.clamp(combined_mask, 0, 1) # Ensure values are between 0 and 1 + return (combined_mask, ) + +# LATENT UPSCALE NODE + +class WAS_Latent_Upscale: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return {"required": {"samples": ("LATENT",), "mode": (["area", "bicubic", "bilinear", "nearest"],), + "factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 8.0, "step": 0.01}), + "align": (["true", "false"], )}} + RETURN_TYPES = ("LATENT",) + FUNCTION = "latent_upscale" + + CATEGORY = "WAS Suite/Latent/Transform" + + def latent_upscale(self, samples, mode, factor, align): + valid_modes = ["area", "bicubic", "bilinear", "nearest"] + if mode not in valid_modes: + cstr(f"Invalid interpolation mode `{mode}` selected. Valid modes are: {', '.join(valid_modes)}").error.print() + return (s, ) + align = True if align == 'true' else False + if not isinstance(factor, float) or factor <= 0: + cstr(f"The input `factor` is `{factor}`, but should be a positive or negative float.").error.print() + return (s, ) + s = samples.copy() + shape = s['samples'].shape + size = tuple(int(round(dim * factor)) for dim in shape[-2:]) + if mode in ['linear', 'bilinear', 'bicubic', 'trilinear']: + s["samples"] = torch.nn.functional.interpolate( + s['samples'], size=size, mode=mode, align_corners=align) + else: + s["samples"] = torch.nn.functional.interpolate(s['samples'], size=size, mode=mode) + return (s,) + +# LATENT NOISE INJECTION NODE + + +class WAS_Latent_Noise: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "samples": ("LATENT",), + "noise_std": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "inject_noise" + + CATEGORY = "WAS Suite/Latent/Generate" + + def inject_noise(self, samples, noise_std): + s = samples.copy() + noise = torch.randn_like(s["samples"]) * noise_std + s["samples"] = s["samples"] + noise + return (s,) + + + +# MIDAS DEPTH APPROXIMATION NODE + +class MiDaS_Model_Loader: + def __init__(self): + self.midas_dir = os.path.join(MODELS_DIR, 'midas') + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "midas_model": (["DPT_Large", "DPT_Hybrid"],), + }, + } + + RETURN_TYPES = ("MIDAS_MODEL",) + RETURN_NAMES = ("midas_model",) + FUNCTION = "load_midas_model" + + CATEGORY = "WAS Suite/Loaders" + + def load_midas_model(self, midas_model): + + global MIDAS_INSTALLED + + if not MIDAS_INSTALLED: + self.install_midas() + + if midas_model == 'DPT_Large': + model_name = 'dpt_large_384.pt' + elif midas_model == 'DPT_Hybrid': + model_name = 'dpt_hybrid_384.pt' + else: + model_name = 'dpt_large_384.pt' + + model_path = os.path.join(self.midas_dir, 'checkpoints'+os.sep+model_name) + + torch.hub.set_dir(self.midas_dir) + if os.path.exists(model_path): + cstr(f"Loading MiDaS Model from `{model_path}`").msg.print() + midas_type = model_path + else: + cstr("Downloading and loading MiDaS Model...").msg.print() + midas = torch.hub.load("intel-isl/MiDaS", midas_model, trust_repo=True) + device = torch.device("cpu") + + cstr(f"MiDaS is using passive device `{device}` until in use.").msg.print() + + midas.to(device) + midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") + transform = midas_transforms.dpt_transform + + return ( (midas, transform), ) + + def install_midas(self): + global MIDAS_INSTALLED + if 'timm' not in packages(): + install_package("timm") + MIDAS_INSTALLED = True + + +# MIDAS DEPTH APPROXIMATION NODE + +class MiDaS_Depth_Approx: + def __init__(self): + self.midas_dir = os.path.join(MODELS_DIR, 'midas') + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "use_cpu": (["false", "true"],), + "midas_type": (["DPT_Large", "DPT_Hybrid"],), + "invert_depth": (["false", "true"],), + }, + "optional": { + "midas_model": ("MIDAS_MODEL",), + } + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "midas_approx" + + CATEGORY = "WAS Suite/Image/AI" + + def midas_approx(self, image, use_cpu, midas_type, invert_depth, midas_model=None): + + global MIDAS_INSTALLED + + if not MIDAS_INSTALLED: + self.install_midas() + + import cv2 as cv + + if midas_model: + + midas = midas_model[0] + transform = midas_model[1] + device = torch.device("cuda") if torch.cuda.is_available() and use_cpu == 'false' else torch.device("cpu") + cstr(f"MiDaS is using device: {device}").msg.print() + midas.to(device).eval() + + else: + + if midas_model == 'DPT_Large': + model_name = 'dpt_large_384.pt' + elif midas_model == 'DPT_Hybrid': + model_name = 'dpt_hybrid_384.pt' + else: + model_name = 'dpt_large_384.pt' + + model_path = os.path.join(self.midas_dir, 'checkpoints'+os.sep+model_name) + + torch.hub.set_dir(self.midas_dir) + if os.path.exists(model_path): + cstr(f"Loading MiDaS Model from `{model_path}`").msg.print() + midas_type = model_path + else: + cstr("Downloading and loading MiDaS Model...").msg.print() + midas = torch.hub.load("intel-isl/MiDaS", midas_type, trust_repo=True) + + cstr(f"MiDaS is using device: {device}").msg.print() + + midas.to(device).eval() + midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") + + transform = midas_transforms.dpt_transform + + tensor_images = [] + for i, img in enumerate(image): + + img = np.array(tensor2pil(img)) + + img = cv.cvtColor(img, cv.COLOR_BGR2RGB) + input_batch = transform(img).to(device) + + cstr(f"Approximating depth for image {i+1}/{len(image)}").msg.print() + + with torch.no_grad(): + prediction = midas(input_batch) + prediction = torch.nn.functional.interpolate( + prediction.unsqueeze(1), + size=img.shape[:2], + mode="bicubic", + align_corners=False, + ).squeeze() + + + # Normalize and convert to uint8 + min_val = torch.min(prediction) + max_val = torch.max(prediction) + prediction = (prediction - min_val) / (max_val - min_val) + prediction = (prediction * 255).clamp(0, 255).round().cpu().numpy().astype(np.uint8) + + depth = Image.fromarray(prediction) + + # Invert depth map + if invert_depth == 'true': + depth = ImageOps.invert(depth) + + tensor_images.append(pil2tensor(depth.convert("RGB"))) + + tensor_images = torch.cat(tensor_images, dim=0) + if not midas_model: + del midas, device, midas_transforms + del midas, transform, img, input_batch, prediction + + return (tensor_images, ) + + def install_midas(self): + global MIDAS_INSTALLED + if 'timm' not in packages(): + install_package("timm") + MIDAS_INSTALLED = True + +# MIDAS REMOVE BACKGROUND/FOREGROUND NODE + + +class MiDaS_Background_Foreground_Removal: + def __init__(self): + self.midas_dir = os.path.join(MODELS_DIR, 'midas') + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "use_cpu": (["false", "true"],), + "midas_model": (["DPT_Large", "DPT_Hybrid", "DPT_Small"],), + "remove": (["background", "foregroud"],), + "threshold": (["false", "true"],), + "threshold_low": ("FLOAT", {"default": 10, "min": 0, "max": 255, "step": 1}), + "threshold_mid": ("FLOAT", {"default": 200, "min": 0, "max": 255, "step": 1}), + "threshold_high": ("FLOAT", {"default": 210, "min": 0, "max": 255, "step": 1}), + "smoothing": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 16.0, "step": 0.01}), + "background_red": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + "background_green": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + "background_blue": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE") + FUNCTION = "midas_remove" + + CATEGORY = "WAS Suite/Image/AI" + + def midas_remove(self, + image, + midas_model, + use_cpu='false', + remove='background', + threshold='false', + threshold_low=0, + threshold_mid=127, + threshold_high=255, + smoothing=0.25, + background_red=0, + background_green=0, + background_blue=0): + + global MIDAS_INSTALLED + + if not MIDAS_INSTALLED: + self.install_midas() + + import cv2 as cv + + # Convert the input image tensor to a numpy and PIL Image + i = 255. * image.cpu().numpy().squeeze() + img = i + # Original image + img_original = tensor2pil(image).convert('RGB') + + cstr("Downloading and loading MiDaS Model...").msg.print() + torch.hub.set_dir(self.midas_dir) + midas = torch.hub.load("intel-isl/MiDaS", midas_model, trust_repo=True) + device = torch.device("cuda") if torch.cuda.is_available( + ) and use_cpu == 'false' else torch.device("cpu") + + cstr(f"MiDaS is using device: {device}").msg.print() + + midas.to(device).eval() + midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") + + if midas_model == "DPT_Large" or midas_model == "DPT_Hybrid": + transform = midas_transforms.dpt_transform + else: + transform = midas_transforms.small_transform + + img = cv.cvtColor(img, cv.COLOR_BGR2RGB) + input_batch = transform(img).to(device) + + cstr("Approximating depth from image.").msg.print() + + with torch.no_grad(): + prediction = midas(input_batch) + prediction = torch.nn.functional.interpolate( + prediction.unsqueeze(1), + size=img.shape[:2], + mode="bicubic", + align_corners=False, + ).squeeze() + + # Invert depth map + if remove == 'foreground': + depth = (255 - prediction.cpu().numpy().astype(np.uint8)) + depth = depth.astype(np.float32) + else: + depth = prediction.cpu().numpy().astype(np.float32) + depth = depth * 255 / (np.max(depth)) / 255 + depth = Image.fromarray(np.uint8(depth * 255)) + + # Threshold depth mask + if threshold == 'true': + levels = self.AdjustLevels( + threshold_low, threshold_mid, threshold_high) + depth = levels.adjust(depth.convert('RGB')).convert('L') + if smoothing > 0: + depth = depth.filter(ImageFilter.GaussianBlur(radius=smoothing)) + depth = depth.resize(img_original.size).convert('L') + + # Validate background color arguments + background_red = int(background_red) if isinstance( + background_red, (int, float)) else 0 + background_green = int(background_green) if isinstance( + background_green, (int, float)) else 0 + background_blue = int(background_blue) if isinstance( + background_blue, (int, float)) else 0 + + # Create background color tuple + background_color = (background_red, background_green, background_blue) + + # Create background image + background = Image.new( + mode="RGB", size=img_original.size, color=background_color) + + # Composite final image + result_img = Image.composite(img_original, background, depth) + + del midas, device, midas_transforms + del transform, img, img_original, input_batch, prediction + + return (pil2tensor(result_img), pil2tensor(depth.convert('RGB'))) + + class AdjustLevels: + def __init__(self, min_level, mid_level, max_level): + self.min_level = min_level + self.mid_level = mid_level + self.max_level = max_level + + def adjust(self, im): + # load the image + + # convert the image to a numpy array + im_arr = np.array(im) + + # apply the min level adjustment + im_arr[im_arr < self.min_level] = self.min_level + + # apply the mid level adjustment + im_arr = (im_arr - self.min_level) * \ + (255 / (self.max_level - self.min_level)) + im_arr[im_arr < 0] = 0 + im_arr[im_arr > 255] = 255 + im_arr = im_arr.astype(np.uint8) + + # apply the max level adjustment + im = Image.fromarray(im_arr) + im = ImageOps.autocontrast(im, cutoff=self.max_level) + + return im + + def install_midas(self): + global MIDAS_INSTALLED + if 'timm' not in packages(): + install_package("timm") + MIDAS_INSTALLED = True + + +#! CONDITIONING NODES + + +# NSP CLIPTextEncode NODE + +class WAS_NSP_CLIPTextEncoder: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mode": (["Noodle Soup Prompts", "Wildcards"],), + "noodle_key": ("STRING", {"default": '__', "multiline": False}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "text": ("STRING", {"multiline": True}), + "clip": ("CLIP",), + } + } + + OUTPUT_NODE = True + RETURN_TYPES = ("CONDITIONING", TEXT_TYPE, TEXT_TYPE) + RETURN_NAMES = ("conditioning", "parsed_text", "raw_text") + FUNCTION = "nsp_encode" + + CATEGORY = "WAS Suite/Conditioning" + + def nsp_encode(self, clip, text, mode="Noodle Soup Prompts", noodle_key='__', seed=0): + + if mode == "Noodle Soup Prompts": + new_text = nsp_parse(text, seed, noodle_key) + else: + new_text = replace_wildcards(text, (None if seed == 0 else seed), noodle_key) + + new_text = parse_dynamic_prompt(new_text, seed) + new_text, text_vars = parse_prompt_vars(new_text) + cstr(f"CLIPTextEncode Prased Prompt:\n {new_text}").msg.print() + CLIPTextEncode = nodes.CLIPTextEncode() + encoded = CLIPTextEncode.encode(clip=clip, text=new_text) + + return (encoded[0], new_text, text, { "ui": { "string": new_text } }) + + +#! SAMPLING NODES + +# KSAMPLER + +class WAS_KSampler: + @classmethod + def INPUT_TYPES(cls): + return {"required": + + {"model": ("MODEL", ), + "seed": ("SEED", ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "sample" + + CATEGORY = "WAS Suite/Sampling" + + def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0): + return nodes.common_ksampler(model, seed['seed'], steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) + +# KSampler Cycle + +class WAS_KSampler_Cycle: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + "tiled_vae": (["disable", "enable"], ), + "latent_upscale": (["disable","nearest-exact", "bilinear", "area", "bicubic", "bislerp"],), + "upscale_factor": ("FLOAT", {"default":2.0, "min": 0.1, "max": 8.0, "step": 0.1}), + "upscale_cycles": ("INT", {"default": 2, "min": 2, "max": 12, "step": 1}), + "starting_denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "cycle_denoise": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "scale_denoise": (["enable", "disable"],), + "scale_sampling": (["bilinear", "bicubic", "nearest", "lanczos"],), + "vae": ("VAE",), + }, + "optional": { + "secondary_model": ("MODEL",), + "secondary_start_cycle": ("INT", {"default": 2, "min": 2, "max": 16, "step": 1}), + "upscale_model": ("UPSCALE_MODEL",), + "processor_model": ("UPSCALE_MODEL",), + "pos_additive": ("CONDITIONING",), + "neg_additive": ("CONDITIONING",), + "pos_add_mode": (["increment", "decrement"],), + "pos_add_strength": ("FLOAT", {"default": 0.25, "min": 0.01, "max": 1.0, "step": 0.01}), + "pos_add_strength_scaling": (["enable", "disable"],), + "pos_add_strength_cutoff": ("FLOAT", {"default": 2.0, "min": 0.01, "max": 10.0, "step": 0.01}), + "neg_add_mode": (["increment", "decrement"],), + "neg_add_strength": ("FLOAT", {"default": 0.25, "min": 0.01, "max": 1.0, "step": 0.01}), + "neg_add_strength_scaling": (["enable", "disable"],), + "neg_add_strength_cutoff": ("FLOAT", {"default": 2.0, "min": 0.01, "max": 10.0, "step": 0.01}), + "sharpen_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "sharpen_radius": ("INT", {"default": 2, "min": 1, "max": 12, "step": 1}), + "steps_scaling": (["enable", "disable"],), + "steps_control": (["decrement", "increment"],), + "steps_scaling_value": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}), + "steps_cutoff": ("INT", {"default": 20, "min": 4, "max": 1000, "step": 1}), + "denoise_cutoff": ("FLOAT", {"default": 0.25, "min": 0.01, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("LATENT",) + RETURN_NAMES = ("latent(s)",) + FUNCTION = "sample" + + CATEGORY = "WAS Suite/Sampling" + + def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, tiled_vae, latent_upscale, upscale_factor, + upscale_cycles, starting_denoise, cycle_denoise, scale_denoise, scale_sampling, vae, secondary_model=None, secondary_start_cycle=None, + pos_additive=None, pos_add_mode=None, pos_add_strength=None, pos_add_strength_scaling=None, pos_add_strength_cutoff=None, + neg_additive=None, neg_add_mode=None, neg_add_strength=None, neg_add_strength_scaling=None, neg_add_strength_cutoff=None, + upscale_model=None, processor_model=None, sharpen_strength=0, sharpen_radius=2, steps_scaling=None, steps_control=None, + steps_scaling_value=None, steps_cutoff=None, denoise_cutoff=0.25): + + upscale_steps = upscale_cycles + division_factor = upscale_steps if steps >= upscale_steps else steps + current_upscale_factor = upscale_factor ** (1 / (division_factor - 1)) + tiled_vae = (tiled_vae == "enable") + scale_denoise = (scale_denoise == "enable") + pos_add_strength_scaling = (pos_add_strength_scaling == "enable") + neg_add_strength_scaling = (neg_add_strength_scaling == "enable") + steps_scaling = (steps_scaling == "enable") + run_model = model + secondary_switched = False + + for i in range(division_factor): + + cstr(f"Cycle Pass {i+1}/{division_factor}").msg.print() + + if scale_denoise: + denoise = ( + ( round(cycle_denoise * (2 ** (-(i-1))), 2) if i > 0 else cycle_denoise ) + if i > 0 else round(starting_denoise, 2) + ) + else: + denoise = round((cycle_denoise if i > 0 else starting_denoise), 2) + + if denoise < denoise_cutoff and scale_denoise: + denoise = denoise_cutoff + + if i >= (secondary_start_cycle - 1) and secondary_model and not secondary_switched: + run_model = secondary_model + denoise = cycle_denoise + model = None + secondary_switched = True + + if steps_scaling and i > 0: + + steps = ( + steps + steps_scaling_value + if steps_control == 'increment' + else steps - steps_scaling_value + ) + steps = ( + ( steps + if steps <= steps_cutoff + else steps_cutoff ) + if steps_control == 'increment' + else ( steps + if steps >= steps_cutoff + else steps_cutoff ) + ) + + print("Steps:", steps) + print("Denoise:", denoise) + + if pos_additive: + + pos_strength = 0.0 if i <= 0 else pos_add_strength + + if pos_add_mode == 'increment': + pos_strength = ( + ( round(pos_add_strength * (2 ** (i-1)), 2) + if i > 0 + else pos_add_strength ) + if pos_add_strength_scaling + else pos_add_strength + ) + pos_strength = ( + pos_add_strength_cutoff + if pos_strength > pos_add_strength_cutoff + else pos_strength + ) + else: + pos_strength = ( + ( round(pos_add_strength / (2 ** (i-1)), 2) + if i > 0 + else pos_add_strength ) + if pos_add_strength_scaling + else pos_add_strength + ) + pos_strength = ( + pos_add_strength_cutoff + if pos_strength < pos_add_strength_cutoff + else pos_strength + ) + comb = nodes.ConditioningAverage() + positive = comb.addWeighted(pos_additive, positive, pos_strength)[0] + print("Positive Additive Strength:", pos_strength) + + if neg_additive: + + neg_strength = 0.0 if i <= 0 else pos_add_strength + + if neg_add_mode == 'increment': + neg_strength = ( + ( round(neg_add_strength * (2 ** (i-1)), 2) + if i > 0 + else neg_add_strength ) + if neg_add_strength_scaling + else neg_add_strength + ) + neg_strength = ( + neg_add_strength_cutoff + if neg_strength > neg_add_strength_cutoff + else neg_strength + ) + else: + neg_strength = ( + ( round(neg_add_strength / (2 ** (i-1)), 2) + if i > 0 + else neg_add_strength ) + if neg_add_strength_scaling + else neg_add_strength + ) + neg_strength = ( + neg_add_strength_cutoff + if neg_strength < neg_add_strength_cutoff + else neg_strength + ) + + comb = nodes.ConditioningAverage() + negative = comb.addWeighted(neg_additive, negative, neg_strength)[0] + print("Negative Additive Strength:", neg_strength) + + if i != 0: + latent_image = latent_image_result + + samples = nodes.common_ksampler( + run_model, + seed, + steps, + cfg, + sampler_name, + scheduler, + positive, + negative, + latent_image, + denoise=denoise, + ) + + # Upscale + if i < division_factor - 1: + + tensors = None + upscaler = None + + resample_filters = { + 'nearest': 0, + 'bilinear': 2, + 'bicubic': 3, + 'lanczos': 1 + } + + if latent_upscale == 'disable': + + if tiled_vae: + tensors = vae.decode_tiled(samples[0]['samples']) + else: + tensors = vae.decode(samples[0]['samples']) + + if processor_model or upscale_model: + + from comfy_extras import nodes_upscale_model + upscaler = nodes_upscale_model.ImageUpscaleWithModel() + + if processor_model: + + original_size = tensor2pil(tensors[0]).size + upscaled_tensors = upscaler.upscale(processor_model, tensors) + tensor_images = [] + for tensor in upscaled_tensors[0]: + pil = tensor2pil(tensor) + if pil.size[0] != original_size[0] or pil.size[1] != original_size[1]: + pil = pil.resize((original_size[0], original_size[1]), Image.Resampling(resample_filters[scale_sampling])) + if sharpen_strength != 0.0: + pil = self.unsharp_filter(pil, sharpen_radius, sharpen_strength) + tensor_images.append(pil2tensor(pil)) + + tensor_images = torch.cat(tensor_images, dim=0) + + if upscale_model: + + if processor_model: + tensors = tensor_images + del tensor_images + + original_size = tensor2pil(tensors[0]).size + new_width = round(original_size[0] * current_upscale_factor) + new_height = round(original_size[1] * current_upscale_factor) + new_width = int(round(new_width / 32) * 32) + new_height = int(round(new_height / 32) * 32) + upscaled_tensors = upscaler.upscale(upscale_model, tensors) + tensor_images = [] + for tensor in upscaled_tensors[0]: + tensor = pil2tensor(tensor2pil(tensor).resize((new_width, new_height), Image.Resampling(resample_filters[scale_sampling]))) + size = max(tensor2pil(tensor).size) + if sharpen_strength != 0.0: + tensor = pil2tensor(self.unsharp_filter(tensor2pil(tensor), sharpen_radius, sharpen_strength)) + tensor_images.append(tensor) + + tensor_images = torch.cat(tensor_images, dim=0) + + else: + + tensor_images = [] + scale = WAS_Image_Rescale() + for tensor in tensors: + tensor = scale.image_rescale(tensor.unsqueeze(0), "rescale", "true", scale_sampling, current_upscale_factor, 0, 0)[0] + size = max(tensor2pil(tensor).size) + if sharpen_strength > 0.0: + tensor = pil2tensor(self.unsharp_filter(tensor2pil(tensor), sharpen_radius, sharpen_strength)) + tensor_images.append(tensor) + tensor_images = torch.cat(tensor_images, dim=0) + + if tiled_vae: + latent_image_result = {"samples": vae.encode_tiled(self.vae_encode_crop_pixels(tensor_images)[:,:,:,:3])} + else: + latent_image_result = {"samples": vae.encode(self.vae_encode_crop_pixels(tensor_images)[:,:,:,:3])} + + else: + + upscaler = nodes.LatentUpscaleBy() + latent_image_result = upscaler.upscale(samples[0], latent_upscale, current_upscale_factor)[0] + + else: + + latent_image_result = samples[0] + + return (latent_image_result, ) + + @staticmethod + def vae_encode_crop_pixels(pixels): + x = (pixels.shape[1] // 8) * 8 + y = (pixels.shape[2] // 8) * 8 + if pixels.shape[1] != x or pixels.shape[2] != y: + x_offset = (pixels.shape[1] % 8) // 2 + y_offset = (pixels.shape[2] % 8) // 2 + pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :] + return pixels + + @staticmethod + def unsharp_filter(image, radius=2, amount=1.0): + from skimage.filters import unsharp_mask + img_array = np.array(image) + img_array = img_array / 255.0 + sharpened = unsharp_mask(img_array, radius=radius, amount=amount, channel_axis=2) + sharpened = (sharpened * 255.0).astype(np.uint8) + sharpened_pil = Image.fromarray(sharpened) + + return sharpened_pil + + +# Latent Blend + +class WAS_Blend_Latents: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "latent_a": ("LATENT",), + "latent_b": ("LATENT",), + "operation": (["add", "multiply", "divide", "subtract", "overlay", "hard_light", "soft_light", "screen", "linear_dodge", "difference", "exclusion", "random"],), + "blend": ("FLOAT", {"default": 0.5, "min": 0.01, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "latent_blend" + + CATEGORY = "WAS Suite/Latent" + + def latent_blend(self, latent_a, latent_b, operation, blend): + return ( {"samples": self.blend_latents(latent_a['samples'], latent_b['samples'], operation, blend)}, ) + + def blend_latents(self, latent1, latent2, mode='add', blend_percentage=0.5): + + def overlay_blend(latent1, latent2, blend_factor): + low = 2 * latent1 * latent2 + high = 1 - 2 * (1 - latent1) * (1 - latent2) + blended_latent = (latent1 * blend_factor) * low + (latent2 * blend_factor) * high + return blended_latent + + def screen_blend(latent1, latent2, blend_factor): + inverted_latent1 = 1 - latent1 + inverted_latent2 = 1 - latent2 + blended_latent = 1 - (inverted_latent1 * inverted_latent2 * (1 - blend_factor)) + return blended_latent + + def difference_blend(latent1, latent2, blend_factor): + blended_latent = abs(latent1 - latent2) * blend_factor + return blended_latent + + def exclusion_blend(latent1, latent2, blend_factor): + blended_latent = (latent1 + latent2 - 2 * latent1 * latent2) * blend_factor + return blended_latent + + def hard_light_blend(latent1, latent2, blend_factor): + blended_latent = torch.where(latent2 < 0.5, 2 * latent1 * latent2, 1 - 2 * (1 - latent1) * (1 - latent2)) * blend_factor + return blended_latent + + def linear_dodge_blend(latent1, latent2, blend_factor): + blended_latent = torch.clamp(latent1 + latent2, 0, 1) * blend_factor + return blended_latent + + def soft_light_blend(latent1, latent2, blend_factor): + low = 2 * latent1 * latent2 + latent1 ** 2 - 2 * latent1 * latent2 * latent1 + high = 2 * latent1 * (1 - latent2) + torch.sqrt(latent1) * (2 * latent2 - 1) + blended_latent = (latent1 * blend_factor) * low + (latent2 * blend_factor) * high + return blended_latent + + def random_noise(latent1, latent2, blend_factor): + noise1 = torch.randn_like(latent1) + noise2 = torch.randn_like(latent2) + noise1 = (noise1 - noise1.min()) / (noise1.max() - noise1.min()) + noise2 = (noise2 - noise2.min()) / (noise2.max() - noise2.min()) + blended_noise = (latent1 * blend_factor) * noise1 + (latent2 * blend_factor) * noise2 + blended_noise = torch.clamp(blended_noise, 0, 1) + return blended_noise + + blend_factor1 = blend_percentage + blend_factor2 = 1 - blend_percentage + + if mode == 'add': + blended_latent = (latent1 * blend_factor1) + (latent2 * blend_factor2) + elif mode == 'multiply': + blended_latent = (latent1 * blend_factor1) * (latent2 * blend_factor2) + elif mode == 'divide': + blended_latent = (latent1 * blend_factor1) / (latent2 * blend_factor2) + elif mode == 'subtract': + blended_latent = (latent1 * blend_factor1) - (latent2 * blend_factor2) + elif mode == 'overlay': + blended_latent = overlay_blend(latent1, latent2, blend_factor1) + elif mode == 'screen': + blended_latent = screen_blend(latent1, latent2, blend_factor1) + elif mode == 'difference': + blended_latent = difference_blend(latent1, latent2, blend_factor1) + elif mode == 'exclusion': + blended_latent = exclusion_blend(latent1, latent2, blend_factor1) + elif mode == 'hard_light': + blended_latent = hard_light_blend(latent1, latent2, blend_factor1) + elif mode == 'linear_dodge': + blended_latent = linear_dodge_blend(latent1, latent2, blend_factor1) + elif mode == 'soft_light': + blended_latent = soft_light_blend(latent1, latent2, blend_factor1) + elif mode == 'random': + blended_latent = random_noise(latent1, latent2, blend_factor1) + else: + raise ValueError("Unsupported blending mode. Please choose from 'add', 'multiply', 'divide', 'subtract', 'overlay', 'screen', 'difference', 'exclusion', 'hard_light', 'linear_dodge', 'soft_light', 'custom_noise'.") + + blended_latent = self.normalize(blended_latent) + return blended_latent + + def normalize(self, latent): + return (latent - latent.min()) / (latent.max() - latent.min()) + + + +# SEED NODE + +class WAS_Seed: + @classmethod + def INPUT_TYPES(cls): + return {"required": + {"seed": ("INT", {"default": 0, "min": 0, + "max": 0xffffffffffffffff})} + } + + RETURN_TYPES = ("SEED", "NUMBER", "FLOAT", "INT") + RETURN_NAMES = ("seed", "number", "float", "int") + FUNCTION = "seed" + + CATEGORY = "WAS Suite/Number" + + def seed(self, seed): + return ({"seed": seed, }, seed, float(seed), int(seed) ) + + +# IMAGE SEED + +class WAS_Image_To_Seed: + @classmethod + def INPUT_TYPES(cls): + return {"required": { + "images": ("IMAGE",), + } + } + + RETURN_TYPES = ("INT",) + OUTPUT_IS_LIST = (True,) + + FUNCTION = "image_to_seed" + CATEGORY = "WAS Suite/Image/Analyze" + + def image_to_seed(self, images): + + seeds = [] + for image in images: + image = tensor2pil(image) + seeds.append(image2seed(image)) + + return (seeds, ) + + +#! TEXT NODES + +class WAS_Prompt_Styles_Selector: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + style_list = [] + if os.path.exists(STYLES_PATH): + with open(STYLES_PATH, "r") as f: + if len(f.readlines()) != 0: + f.seek(0) + data = f.read() + styles = json.loads(data) + for style in styles.keys(): + style_list.append(style) + if not style_list: + style_list.append("None") + return { + "required": { + "style": (style_list,), + } + } + + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("positive_string", "negative_string") + FUNCTION = "load_style" + + CATEGORY = "WAS Suite/Text" + + def load_style(self, style): + + styles = {} + if os.path.exists(STYLES_PATH): + with open(STYLES_PATH, 'r') as data: + styles = json.load(data) + else: + cstr(f"The styles file does not exist at `{STYLES_PATH}`. Unable to load styles! Have you imported your AUTOMATIC1111 WebUI styles?").error.print() + + if styles and style != None or style != 'None': + prompt = styles[style]['prompt'] + negative_prompt = styles[style]['negative_prompt'] + else: + prompt = '' + negative_prompt = '' + + return (prompt, negative_prompt) + +class WAS_Prompt_Multiple_Styles_Selector: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + style_list = [] + if os.path.exists(STYLES_PATH): + with open(STYLES_PATH, "r") as f: + if len(f.readlines()) != 0: + f.seek(0) + data = f.read() + styles = json.loads(data) + for style in styles.keys(): + style_list.append(style) + if not style_list: + style_list.append("None") + return { + "required": { + "style1": (style_list,), + "style2": (style_list,), + "style3": (style_list,), + "style4": (style_list,), + } + } + + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("positive_string", "negative_string") + FUNCTION = "load_style" + + CATEGORY = "WAS Suite/Text" + + def load_style(self, style1, style2, style3, style4): + styles = {} + if os.path.exists(STYLES_PATH): + with open(STYLES_PATH, 'r') as data: + styles = json.load(data) + else: + cstr(f"The styles file does not exist at `{STYLES_PATH}`. Unable to load styles! Have you imported your AUTOMATIC1111 WebUI styles?").error.print() + return ('', '') + + # Check if the selected styles exist in the loaded styles dictionary + selected_styles = [style1, style2, style3, style4] + for style in selected_styles: + if style not in styles: + print(f"Style '{style}' was not found in the styles file.") + return ('', '') + + prompt = "" + negative_prompt = "" + + # Concatenate the prompts and negative prompts of the selected styles + for style in selected_styles: + prompt += styles[style]['prompt'] + " " + negative_prompt += styles[style]['negative_prompt'] + " " + + return (prompt.strip(), negative_prompt.strip()) + +# Text Multiline Node + +class WAS_Text_Multiline: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"default": '', "multiline": True}), + } + } + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_multiline" + + CATEGORY = "WAS Suite/Text" + + def text_multiline(self, text): + import io + new_text = [] + for line in io.StringIO(text): + if not line.strip().startswith('#'): + if not line.strip().startswith("\n"): + line = line.replace("\n", '') + new_text.append(line) + new_text = "\n".join(new_text) + + tokens = TextTokens() + new_text = tokens.parseTokens(new_text) + + return (new_text, ) + +# Text List Node + +class WAS_Text_List_Concatenate: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "list_a": ("LIST", {"forceInput": True}), + "list_b": ("LIST", {"forceInput": True}), + "delimiter": ("STRING", {"forceInput": True}), + }, + "optional": { + "list_c": ("LIST", {"forceInput": True}), + "list_d": ("LIST", {"forceInput": True}), + } + } + RETURN_TYPES = ("LIST",) + FUNCTION = "text_concatenate_list" + + CATEGORY = "WAS Suite/Text" + + def text_concatenate_list(self, list_a, list_b=None, list_c=None, list_d=None): + + text_list = list_a + list_b + + if list_c: + text_list + list_c + if list_d: + text_list + list_d + + return (text_list,) + +# Text List Concatenate Node + +class WAS_Text_List: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text_a": ("STRING", {"forceInput": True}), + }, + "optional": { + "text_b": ("STRING", {"forceInput": True}), + "text_c": ("STRING", {"forceInput": True}), + "text_d": ("STRING", {"forceInput": True}), + "text_e": ("STRING", {"forceInput": True}), + "text_f": ("STRING", {"forceInput": True}), + "text_g": ("STRING", {"forceInput": True}), + } + } + RETURN_TYPES = ("LIST",) + FUNCTION = "text_as_list" + + CATEGORY = "WAS Suite/Text" + + def text_as_list(self, text_a, text_b=None, text_c=None, text_d=None, text_e=None, text_f=None, text_g=None): + + text_list = [text_a,] + + if text_b: + text_list.append(text_b) + if text_c: + text_list.append(text_c) + if text_d: + text_list.append(text_d) + if text_e: + text_list.append(text_e) + if text_f: + text_list.append(text_f) + if text_g: + text_list.append(text_g) + + return (text_list,) + +# Text Parse Embeddings + +class WAS_Text_Parse_Embeddings_By_Name: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_parse_embeddings" + + CATEGORY = "WAS Suite/Text/Parse" + + def text_parse_embeddings(self, text): + return (self.convert_a1111_embeddings(text), ) + + def convert_a1111_embeddings(self, text): + for embeddings_path in comfy_paths.folder_names_and_paths["embeddings"][0]: + for filename in os.listdir(embeddings_path): + basename, ext = os.path.splitext(filename) + pattern = re.compile(r'\b{}\b'.format(re.escape(basename))) + replacement = 'embedding:{}'.format(basename) + text = re.sub(pattern, replacement, text) + + return text + + +# Text Dictionary Concatenate + +class WAS_Dictionary_Update: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "dictionary_a": ("DICT", ), + "dictionary_b": ("DICT", ), + }, + "optional": { + "dictionary_c": ("DICT", ), + "dictionary_d": ("DICT", ), + } + } + RETURN_TYPES = ("DICT",) + FUNCTION = "dictionary_update" + + CATEGORY = "WAS Suite/Text" + + def dictionary_update(self, dictionary_a, dictionary_b, dictionary_c=None, dictionary_d=None): + return_dictionary = {**dictionary_a, **dictionary_b} + if dictionary_c is not None: + return_dictionary = {**return_dictionary, **dictionary_c} + if dictionary_d is not None: + return_dictionary = {**return_dictionary, **dictionary_d} + return (return_dictionary, ) + + +# Text String Node + +class WAS_Text_String: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"default": '', "multiline": False}), + }, + "optional": { + "text_b": ("STRING", {"default": '', "multiline": False}), + "text_c": ("STRING", {"default": '', "multiline": False}), + "text_d": ("STRING", {"default": '', "multiline": False}), + } + } + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE,TEXT_TYPE,TEXT_TYPE) + FUNCTION = "text_string" + + CATEGORY = "WAS Suite/Text" + + def text_string(self, text='', text_b='', text_c='', text_d=''): + + tokens = TextTokens() + + text = tokens.parseTokens(text) + text_b = tokens.parseTokens(text_b) + text_c = tokens.parseTokens(text_c) + text_d = tokens.parseTokens(text_d) + + return (text, text_b, text_c, text_d) + + +# Text String Truncation + +class WAS_Text_String_Truncate: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"forceInput": True}), + "truncate_by": (["characters", "words"],), + "truncate_from": (["end", "beginning"],), + "truncate_to": ("INT", {"default": 10, "min": -99999999, "max": 99999999, "step": 1}), + }, + "optional": { + "text_b": ("STRING", {"forceInput": True}), + "text_c": ("STRING", {"forceInput": True}), + "text_d": ("STRING", {"forceInput": True}), + } + } + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE,TEXT_TYPE,TEXT_TYPE) + FUNCTION = "truncate_string" + + CATEGORY = "WAS Suite/Text/Operations" + + def truncate_string(self, text, truncate_by, truncate_from, truncate_to, text_b='', text_c='', text_d=''): + return ( + self.truncate(text, truncate_to, truncate_from, truncate_by), + self.truncate(text_b, truncate_to, truncate_from, truncate_by), + self.truncate(text_c, truncate_to, truncate_from, truncate_by), + self.truncate(text_d, truncate_to, truncate_from, truncate_by), + ) + + def truncate(self, string, max_length, mode='end', truncate_by='characters'): + if mode not in ['beginning', 'end']: + cstr("Invalid mode. 'mode' must be either 'beginning' or 'end'.").error.print() + mode = "end" + if truncate_by not in ['characters', 'words']: + cstr("Invalid truncate_by. 'truncate_by' must be either 'characters' or 'words'.").error.print() + if truncate_by == 'characters': + if mode == 'beginning': + return string[:max_length] if max_length >= 0 else string[max_length:] + else: + return string[-max_length:] if max_length >= 0 else string[:max_length] + words = string.split() + if mode == 'beginning': + return ' '.join(words[:max_length]) if max_length >= 0 else ' '.join(words[max_length:]) + else: + return ' '.join(words[-max_length:]) if max_length >= 0 else ' '.join(words[:max_length]) + + + + +# Text Compare Strings + +class WAS_Text_Compare: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text_a": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "text_b": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "mode": (["similarity","difference"],), + "tolerance": ("FLOAT", {"default":0.0,"min":0.0,"max":1.0,"step":0.01}), + } + } + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE,"NUMBER","NUMBER",TEXT_TYPE) + RETURN_NAMES = ("TEXT_A_PASS","TEXT_B_PASS","BOOL_NUMBER","SCORE_NUMBER","COMPARISON_TEXT") + FUNCTION = "text_compare" + + CATEGORY = "WAS Suite/Text/Search" + + def text_compare(self, text_a='', text_b='', mode='similarity', tolerance=0.0): + + boolean = ( 1 if text_a == text_b else 0 ) + sim = self.string_compare(text_a, text_b, tolerance, ( True if mode == 'difference' else False )) + score = float(sim[0]) + sim_result = ' '.join(sim[1][::-1]) + sim_result = ' '.join(sim_result.split()) + + return (text_a, text_b, boolean, score, sim_result) + + def string_compare(self, str1, str2, threshold=1.0, difference_mode=False): + m = len(str1) + n = len(str2) + if difference_mode: + dp = [[0 for x in range(n+1)] for x in range(m+1)] + for i in range(m+1): + for j in range(n+1): + if i == 0: + dp[i][j] = j + elif j == 0: + dp[i][j] = i + elif str1[i-1] == str2[j-1]: + dp[i][j] = dp[i-1][j-1] + else: + dp[i][j] = 1 + min(dp[i][j-1], # Insert + dp[i-1][j], # Remove + dp[i-1][j-1]) # Replace + diff_indices = [] + i, j = m, n + while i > 0 and j > 0: + if str1[i-1] == str2[j-1]: + i -= 1 + j -= 1 + else: + diff_indices.append(i-1) + i, j = min((i, j-1), (i-1, j)) + diff_indices.reverse() + words = [] + start_idx = 0 + for i in diff_indices: + if str1[i] == " ": + words.append(str1[start_idx:i]) + start_idx = i+1 + words.append(str1[start_idx:m]) + difference_score = 1 - ((dp[m][n] - len(words)) / max(m, n)) + return (difference_score, words[::-1]) + else: + dp = [[0 for x in range(n+1)] for x in range(m+1)] + similar_words = set() + for i in range(m+1): + for j in range(n+1): + if i == 0: + dp[i][j] = j + elif j == 0: + dp[i][j] = i + elif str1[i-1] == str2[j-1]: + dp[i][j] = dp[i-1][j-1] + if i > 1 and j > 1 and str1[i-2] == ' ' and str2[j-2] == ' ': + word1_start = i-2 + word2_start = j-2 + while word1_start > 0 and str1[word1_start-1] != " ": + word1_start -= 1 + while word2_start > 0 and str2[word2_start-1] != " ": + word2_start -= 1 + word1 = str1[word1_start:i-1] + word2 = str2[word2_start:j-1] + if word1 in str2 or word2 in str1: + if word1 not in similar_words: + similar_words.add(word1) + if word2 not in similar_words: + similar_words.add(word2) + else: + dp[i][j] = 1 + min(dp[i][j-1], # Insert + dp[i-1][j], # Remove + dp[i-1][j-1]) # Replace + if dp[i][j] <= threshold and i > 0 and j > 0: + word1_start = max(0, i-dp[i][j]) + word2_start = max(0, j-dp[i][j]) + word1_end = i + word2_end = j + while word1_start > 0 and str1[word1_start-1] != " ": + word1_start -= 1 + while word2_start > 0 and str2[word2_start-1] != " ": + word2_start -= 1 + while word1_end < m and str1[word1_end] != " ": + word1_end += 1 + while word2_end < n and str2[word2_end] != " ": + word2_end += 1 + word1 = str1[word1_start:word1_end] + word2 = str2[word2_start:word2_end] + if word1 in str2 or word2 in str1: + if word1 not in similar_words: + similar_words.add(word1) + if word2 not in similar_words: + similar_words.add(word2) + if(max(m,n) == 0): + similarity_score = 1 + else: + similarity_score = 1 - (dp[m][n]/max(m,n)) + return (similarity_score, list(similar_words)) + + +# Text Random Line + +class WAS_Text_Random_Line: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_random_line" + + CATEGORY = "WAS Suite/Text" + + def text_random_line(self, text, seed): + lines = text.split("\n") + random.seed(seed) + choice = random.choice(lines) + return (choice, ) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + +# Text Concatenate + +class WAS_Text_Concatenate: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text_a": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "text_b": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "linebreak_addition": (['false', 'true'],), + }, + "optional": { + "text_c": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "text_d": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "delimiter": ('STRING', {"forceInput": False}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_concatenate" + + CATEGORY = "WAS Suite/Text" + + def text_concatenate(self, text_a, text_b, text_c=None, text_d=None, linebreak_addition='false', delimiter=''): + # Initialize return_text with text_a + return_text = text_a + + def append_text(base_text, new_text): + if linebreak_addition == 'true': + return base_text + "\n" + new_text + else: + return base_text + delimiter + new_text + + return_text = append_text(return_text, text_b) + + if text_c: + return_text = append_text(return_text, text_c) + + if text_d: + return_text = append_text(return_text, text_d) + + return (return_text, ) + + + +# Text Search and Replace + +class WAS_Search_and_Replace: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "find": ("STRING", {"default": '', "multiline": False}), + "replace": ("STRING", {"default": '', "multiline": False}), + } + } + + RETURN_TYPES = (TEXT_TYPE, "NUMBER", "FLOAT", "INT") + RETURN_NAMES = ("result_text", "replacement_count_number", "replacement_count_float", "replacement_count_int") + FUNCTION = "text_search_and_replace" + + CATEGORY = "WAS Suite/Text/Search" + + def text_search_and_replace(self, text, find, replace): + modified_text, count = self.replace_substring(text, find, replace) + return (modified_text, count, float(count), int(count)) + + def replace_substring(self, text, find, replace): + modified_text, count = re.subn(find, replace, text) + return (modified_text, count) + + +# Text Shuffle + +class WAS_Text_Shuffle: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "separator": ("STRING", {"default": ',', "multiline": False}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "shuffle" + + CATEGORY = "WAS Suite/Text/Operations" + + def shuffle(self, text, separator, seed): + + if seed is not None: + random.seed(seed) + + text_list = text.split(separator) + random.shuffle(text_list) + new_text = separator.join(text_list) + + return (new_text, ) + + + +# Text Search and Replace + +class WAS_Search_and_Replace_Input: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "find": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "replace": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + RETURN_TYPES = (TEXT_TYPE, "NUMBER", "FLOAT", "INT") + RETURN_NAMES = ("result_text", "replacement_count_number", "replacement_count_float", "replacement_count_int") + FUNCTION = "text_search_and_replace" + + CATEGORY = "WAS Suite/Text/Search" + + def text_search_and_replace(self, text, find, replace): + count = 0 + new_text = text + while find in new_text: + new_text = new_text.replace(find, replace, 1) + count += 1 + return (new_text, count, float(count), int(count)) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + + +# Text Search and Replace By Dictionary + +class WAS_Search_and_Replace_Dictionary: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "dictionary": ("DICT",), + "replacement_key": ("STRING", {"default": "__", "multiline": False}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_search_and_replace_dict" + + CATEGORY = "WAS Suite/Text/Search" + + def text_search_and_replace_dict(self, text, dictionary, replacement_key, seed): + + random.seed(seed) + + # Parse Text + new_text = text + + for term in dictionary.keys(): + tkey = f'{replacement_key}{term}{replacement_key}' + tcount = new_text.count(tkey) + for _ in range(tcount): + new_text = new_text.replace(tkey, random.choice(dictionary[term]), 1) + if seed > 0 or seed < 0: + seed = seed + 1 + random.seed(seed) + + return (new_text, ) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + +# Text Parse NSP + +class WAS_Text_Parse_NSP: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mode": (["Noodle Soup Prompts", "Wildcards"],), + "noodle_key": ("STRING", {"default": '__', "multiline": False}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + OUTPUT_NODE = True + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_parse_nsp" + + CATEGORY = "WAS Suite/Text/Parse" + + def text_parse_nsp(self, text, mode="Noodle Soup Prompts", noodle_key='__', seed=0): + + if mode == "Noodle Soup Prompts": + + new_text = nsp_parse(text, seed, noodle_key) + cstr(f"Text Parse NSP:\n{new_text}").msg.print() + + else: + + new_text = replace_wildcards(text, (None if seed == 0 else seed), noodle_key) + cstr(f"CLIPTextEncode Wildcards:\n{new_text}").msg.print() + + return (new_text, ) + + +# TEXT SEARCH AND REPLACE + +class WAS_Text_Save: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": ("STRING", {"forceInput": True}), + "path": ("STRING", {"default": './ComfyUI/output/[time(%Y-%m-%d)]', "multiline": False}), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "filename_delimiter": ("STRING", {"default":"_"}), + "filename_number_padding": ("INT", {"default":4, "min":2, "max":9, "step":1}), + + } + } + + OUTPUT_NODE = True + RETURN_TYPES = () + FUNCTION = "save_text_file" + CATEGORY = "WAS Suite/IO" + + def save_text_file(self, text, path, filename_prefix='ComfyUI', filename_delimiter='_', filename_number_padding=4): + + tokens = TextTokens() + path = tokens.parseTokens(path) + filename_prefix = tokens.parseTokens(filename_prefix) + + if not os.path.exists(path): + cstr(f"The path `{path}` doesn't exist! Creating it...").warning.print() + try: + os.makedirs(path, exist_ok=True) + except OSError as e: + cstr(f"The path `{path}` could not be created! Is there write access?\n{e}").error.print() + + if text.strip() == '': + cstr(f"There is no text specified to save! Text is empty.").error.print() + + delimiter = filename_delimiter + number_padding = int(filename_number_padding) + file_extension = '.txt' + filename = self.generate_filename(path, filename_prefix, delimiter, number_padding, file_extension) + file_path = os.path.join(path, filename) + + self.writeTextFile(file_path, text) + + update_history_text_files(file_path) + + return (text, { "ui": { "string": text } } ) + + def generate_filename(self, path, prefix, delimiter, number_padding, extension): + pattern = f"{re.escape(prefix)}{re.escape(delimiter)}(\\d{{{number_padding}}})" + existing_counters = [ + int(re.search(pattern, filename).group(1)) + for filename in os.listdir(path) + if re.match(pattern, filename) + ] + existing_counters.sort(reverse=True) + + if existing_counters: + counter = existing_counters[0] + 1 + else: + counter = 1 + + filename = f"{prefix}{delimiter}{counter:0{number_padding}}{extension}" + while os.path.exists(os.path.join(path, filename)): + counter += 1 + filename = f"{prefix}{delimiter}{counter:0{number_padding}}{extension}" + + return filename + + def writeTextFile(self, file, content): + try: + with open(file, 'w', encoding='utf-8', newline='\n') as f: + f.write(content) + except OSError: + cstr(f"Unable to save file `{file}`").error.print() + + + +# TEXT FILE HISTORY NODE + +class WAS_Text_File_History: + def __init__(self): + self.HDB = WASDatabase(WAS_HISTORY_DATABASE) + self.conf = getSuiteConfig() + + @classmethod + def INPUT_TYPES(cls): + HDB = WASDatabase(WAS_HISTORY_DATABASE) + conf = getSuiteConfig() + paths = ['No History',] + if HDB.catExists("History") and HDB.keyExists("History", "TextFiles"): + history_paths = HDB.get("History", "TextFiles") + if conf.__contains__('history_display_limit'): + history_paths = history_paths[-conf['history_display_limit']:] + paths = [] + for path_ in history_paths: + paths.append(os.path.join('...'+os.sep+os.path.basename(os.path.dirname(path_)), os.path.basename(path_))) + + return { + "required": { + "file": (paths,), + "dictionary_name": ("STRING", {"default": '[filename]', "multiline": True}), + }, + } + + RETURN_TYPES = (TEXT_TYPE,"DICT") + FUNCTION = "text_file_history" + + CATEGORY = "WAS Suite/History" + + def text_file_history(self, file=None, dictionary_name='[filename]]'): + file_path = file.strip() + filename = ( os.path.basename(file_path).split('.', 1)[0] + if '.' in os.path.basename(file_path) else os.path.basename(file_path) ) + if dictionary_name != '[filename]' or dictionary_name not in [' ', '']: + filename = dictionary_name + if not os.path.exists(file_path): + cstr(f"The path `{file_path}` specified cannot be found.").error.print() + return ('', {filename: []}) + with open(file_path, 'r', encoding="utf-8", newline='\n') as file: + text = file.read() + + # Write to file history + update_history_text_files(file_path) + + import io + lines = [] + for line in io.StringIO(text): + if not line.strip().startswith('#'): + if not line.strip().startswith("\n"): + line = line.replace("\n", '') + lines.append(line.replace("\n",'')) + dictionary = {filename: lines} + + return ("\n".join(lines), dictionary) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + +# TEXT TO CONDITIONIONG + +class WAS_Text_to_Conditioning: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "clip": ("CLIP",), + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "text_to_conditioning" + + CATEGORY = "WAS Suite/Text/Operations" + + def text_to_conditioning(self, clip, text): + encoder = nodes.CLIPTextEncode() + encoded = encoder.encode(clip=clip, text=text) + return (encoded[0], { "ui": { "string": text } }) + + + +# TEXT PARSE TOKENS + +class WAS_Text_Parse_Tokens: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_parse_tokens" + + CATEGORY = "WAS Suite/Text/Tokens" + + def text_parse_tokens(self, text): + # Token Parser + tokens = TextTokens() + return (tokens.parseTokens(text), ) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + +# TEXT ADD TOKENS + + +class WAS_Text_Add_Tokens: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "tokens": ("STRING", {"default": "[hello]: world", "multiline": True}), + "print_current_tokens": (["false", "true"],), + } + } + + RETURN_TYPES = () + FUNCTION = "text_add_tokens" + OUTPUT_NODE = True + CATEGORY = "WAS Suite/Text/Tokens" + + def text_add_tokens(self, tokens, print_current_tokens="false"): + + import io + + # Token Parser + tk = TextTokens() + + # Parse out Tokens + for line in io.StringIO(tokens): + parts = line.split(':') + token = parts[0].strip() + token_value = parts[1].strip() + tk.addToken(token, token_value) + + # Current Tokens + if print_current_tokens == "true": + cstr(f'Current Custom Tokens:').msg.print() + print(json.dumps(tk.custom_tokens, indent=4)) + + return tokens + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + +# TEXT ADD TOKEN BY INPUT + + +class WAS_Text_Add_Token_Input: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "token_name": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "token_value": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "print_current_tokens": (["false", "true"],), + } + } + + RETURN_TYPES = () + FUNCTION = "text_add_token" + OUTPUT_NODE = True + CATEGORY = "WAS Suite/Text/Tokens" + + def text_add_token(self, token_name, token_value, print_current_tokens="false"): + + if token_name.strip() == '': + cstr(f'A `token_name` is required for a token; token name provided is empty.').error.print() + pass + + # Token Parser + tk = TextTokens() + + # Add Tokens + tk.addToken(token_name, token_value) + + # Current Tokens + if print_current_tokens == "true": + cstr(f'Current Custom Tokens:').msg.print() + print(json.dumps(tk.custom_tokens, indent=4)) + + return (token_name, token_value) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + + +# TEXT TO CONSOLE + +class WAS_Text_to_Console: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "label": ("STRING", {"default": f'Text Output', "multiline": False}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + OUTPUT_NODE = True + FUNCTION = "text_to_console" + + CATEGORY = "WAS Suite/Debug" + + def text_to_console(self, text, label): + if label.strip() != '': + cstr(f'\033[33m{label}\033[0m:\n{text}\n').msg.print() + else: + cstr(f"\033[33mText to Console\033[0m:\n{text}\n").msg.print() + return (text, ) + +# DICT TO CONSOLE + +class WAS_Dictionary_To_Console: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "dictionary": ("DICT",), + "label": ("STRING", {"default": f'Dictionary Output', "multiline": False}), + } + } + + RETURN_TYPES = ("DICT",) + OUTPUT_NODE = True + FUNCTION = "text_to_console" + + CATEGORY = "WAS Suite/Debug" + + def text_to_console(self, dictionary, label): + if label.strip() != '': + print(f'\033[34mWAS Node Suite \033[33m{label}\033[0m:\n') + from pprint import pprint + pprint(dictionary, indent=4) + print('') + else: + cstr(f"\033[33mText to Console\033[0m:\n") + pprint(dictionary, indent=4) + print('') + return (dictionary, ) + + +# LOAD TEXT FILE + +class WAS_Text_Load_From_File: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "file_path": ("STRING", {"default": '', "multiline": False}), + "dictionary_name": ("STRING", {"default": '[filename]', "multiline": False}), + } + } + + RETURN_TYPES = (TEXT_TYPE,"DICT") + FUNCTION = "load_file" + + CATEGORY = "WAS Suite/IO" + + def load_file(self, file_path='', dictionary_name='[filename]]'): + + filename = ( os.path.basename(file_path).split('.', 1)[0] + if '.' in os.path.basename(file_path) else os.path.basename(file_path) ) + if dictionary_name != '[filename]': + filename = dictionary_name + if not os.path.exists(file_path): + cstr(f"The path `{file_path}` specified cannot be found.").error.print() + return ('', {filename: []}) + with open(file_path, 'r', encoding="utf-8", newline='\n') as file: + text = file.read() + + # Write to file history + update_history_text_files(file_path) + + import io + lines = [] + for line in io.StringIO(text): + if not line.strip().startswith('#'): + if ( not line.strip().startswith("\n") + or not line.strip().startswith("\r") + or not line.strip().startswith("\r\n") ): + line = line.replace("\n", '').replace("\r",'').replace("\r\n",'') + lines.append(line.replace("\n",'').replace("\r",'').replace("\r\n",'')) + dictionary = {filename: lines} + + return ("\n".join(lines), dictionary) + +# TEXT LOAD FROM FILE + +class WAS_Text_Load_Line_From_File: + def __init__(self): + self.HDB = WASDatabase(WAS_HISTORY_DATABASE) + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "file_path": ("STRING", {"default": '', "multiline": False}), + "dictionary_name": ("STRING", {"default": '[filename]', "multiline": False}), + "label": ("STRING", {"default": 'TextBatch', "multiline": False}), + "mode": (["automatic", "index"],), + "index": ("INT", {"default": 0, "min": 0, "step": 1}), + }, + "optional": { + "multiline_text": (TEXT_TYPE, {"forceInput": True}), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + if kwargs['mode'] != 'index': + return float("NaN") + else: + m = hashlib.sha256() + if os.path.exists(kwargs['file_path']): + with open(kwargs['file_path'], 'rb') as f: + m.update(f.read()) + return m.digest().hex() + else: + return False + + RETURN_TYPES = (TEXT_TYPE, "DICT") + RETURN_NAMES = ("line_text", "dictionary") + FUNCTION = "load_file" + + CATEGORY = "WAS Suite/Text" + + def load_file(self, file_path='', dictionary_name='[filename]', label='TextBatch', + mode='automatic', index=0, multiline_text=None): + if multiline_text is not None: + lines = multiline_text.strip().split('\n') + if mode == 'index': + if index < 0 or index >= len(lines): + cstr(f"Invalid line index `{index}`").error.print() + return ('', {dictionary_name: []}) + line = lines[index] + else: + line_index = self.HDB.get('TextBatch Counters', label) + if line_index is None: + line_index = 0 + line = lines[line_index % len(lines)] + self.HDB.insert('TextBatch Counters', label, line_index + 1) + return (line, {dictionary_name: lines}) + + if file_path == '': + cstr("No file path specified.").error.print() + return ('', {dictionary_name: []}) + + if not os.path.exists(file_path): + cstr(f"The path `{file_path}` specified cannot be found.").error.print() + return ('', {dictionary_name: []}) + + file_list = self.TextFileLoader(file_path, label) + line, lines = None, [] + if mode == 'automatic': + line, lines = file_list.get_next_line() + elif mode == 'index': + if index >= len(file_list.lines): + index = index % len(file_list.lines) + line, lines = file_list.get_line_by_index(index) + if line is None: + cstr("No valid line was found. The file may be empty or all lines have been read.").error.print() + return ('', {dictionary_name: []}) + file_list.store_index() + update_history_text_files(file_path) + + return (line, {dictionary_name: lines}) + + class TextFileLoader: + def __init__(self, file_path, label): + self.WDB = WDB + self.file_path = file_path + self.lines = [] + self.index = 0 + self.load_file(file_path, label) + + def load_file(self, file_path, label): + stored_file_path = self.WDB.get('TextBatch Paths', label) + stored_index = self.WDB.get('TextBatch Counters', label) + if stored_file_path != file_path: + self.index = 0 + self.WDB.insert('TextBatch Counters', label, 0) + self.WDB.insert('TextBatch Paths', label, file_path) + else: + self.index = stored_index + with open(file_path, 'r', encoding="utf-8", newline='\n') as file: + self.lines = [line.strip() for line in file] + + def get_line_index(self): + return self.index + + def set_line_index(self, index): + self.index = index + self.WDB.insert('TextBatch Counters', 'TextBatch', self.index) + + def get_next_line(self): + if self.index >= len(self.lines): + self.index = 0 + line = self.lines[self.index] + self.index += 1 + if self.index == len(self.lines): + self.index = 0 + cstr(f'{cstr.color.YELLOW}TextBatch{cstr.color.END} Index: {self.index}').msg.print() + return line, self.lines + + def get_line_by_index(self, index): + if index < 0 or index >= len(self.lines): + cstr(f"Invalid line index `{index}`").error.print() + return None, [] + self.index = index + line = self.lines[self.index] + cstr(f'{cstr.color.YELLOW}TextBatch{cstr.color.END} Index: {self.index}').msg.print() + return line, self.lines + + def store_index(self): + self.WDB.insert('TextBatch Counters', 'TextBatch', self.index) + + +class WAS_Text_To_String: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "text_to_string" + + CATEGORY = "WAS Suite/Text/Operations" + + def text_to_string(self, text): + return (text, ) + +class WAS_Text_To_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "text_to_number" + + CATEGORY = "WAS Suite/Text/Operations" + + def text_to_number(self, text): + if text.replace(".", "").isnumeric(): + number = float(text) + else: + number = int(text) + return (number, ) + + +class WAS_String_To_Text: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {}), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "string_to_text" + + CATEGORY = "WAS Suite/Text/Operations" + + def string_to_text(self, string): + return (string, ) + +# Random Prompt + +class WAS_Text_Random_Prompt: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "search_seed": ("STRING", {"multiline": False}), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "random_prompt" + + CATEGORY = "WAS Suite/Text" + + def random_prompt(self, search_seed=None): + if search_seed in ['', ' ']: + search_seed = None + return (self.search_lexica_art(search_seed), ) + + def search_lexica_art(self, query=None): + if not query: + query = random.choice(["portrait","landscape","anime","superhero","animal","nature","scenery"]) + url = f"https://lexica.art/api/v1/search?q={query}" + try: + response = requests.get(url) + data = response.json() + images = data.get("images", []) + if not images: + return "404 not found error" + random_image = random.choice(images) + prompt = random_image.get("prompt") + except Exception: + cstr("Unable to establish connection to Lexica API.").error.print() + prompt = "404 not found error" + + return prompt + +# BLIP Model Loader + +class WAS_BLIP_Model_Loader: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "blip_model": (["caption", "interrogate"], ), + } + } + + RETURN_TYPES = ("BLIP_MODEL",) + FUNCTION = "blip_model" + + CATEGORY = "WAS Suite/Loaders" + + def blip_model(self, blip_model): + + if ( 'timm' not in packages() + or 'transformers' not in packages() + or 'fairscale' not in packages() ): + cstr(f"Modules or packages are missing to use BLIP models. Please run the `{os.path.join(WAS_SUITE_ROOT, 'requirements.txt')}` through ComfyUI's ptyhon executable.").error.print() + exit + + if 'transformers==4.26.1' not in packages(True): + cstr(f"`transformers==4.26.1` is required for BLIP models. Please run the `{os.path.join(WAS_SUITE_ROOT, 'requirements.txt')}` through ComfyUI's ptyhon executable.").error.print() + exit + + device = 'cpu' + conf = getSuiteConfig() + size = 384 + + if blip_model == 'caption': + + from .modules.BLIP.blip_module import blip_decoder + + blip_dir = os.path.join(MODELS_DIR, 'blip') + if not os.path.exists(blip_dir): + os.makedirs(blip_dir, exist_ok=True) + + torch.hub.set_dir(blip_dir) + + if conf.__contains__('blip_model_url'): + model_url = conf['blip_model_url'] + else: + model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' + + model = blip_decoder(pretrained=model_url, image_size=size, vit='base') + model.eval() + model = model.to(device) + + elif blip_model == 'interrogate': + + from .modules.BLIP.blip_module import blip_vqa + + blip_dir = os.path.join(MODELS_DIR, 'blip') + if not os.path.exists(blip_dir): + os.makedirs(blip_dir, exist_ok=True) + + torch.hub.set_dir(blip_dir) + + if conf.__contains__('blip_model_vqa_url'): + model_url = conf['blip_model_vqa_url'] + else: + model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' + + model = blip_vqa(pretrained=model_url, image_size=size, vit='base') + model.eval() + model = model.to(device) + + result = ( model, blip_model ) + + return ( result, ) + + + +# BLIP CAPTION IMAGE + +class WAS_BLIP_Analyze_Image: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "mode": (["caption", "interrogate"], ), + "question": ("STRING", {"default": "What does the background consist of?", "multiline": True}), + }, + "optional": { + "blip_model": ("BLIP_MODEL",) + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "blip_caption_image" + + CATEGORY = "WAS Suite/Text/AI" + + def blip_caption_image(self, image, mode, question, blip_model=None): + + def transformImage(input_image, image_size, device): + raw_image = input_image.convert('RGB') + raw_image = raw_image.resize((image_size, image_size)) + transform = transforms.Compose([ + transforms.Resize(raw_image.size, interpolation=InterpolationMode.BICUBIC), + transforms.ToTensor(), + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ]) + image = transform(raw_image).unsqueeze(0).to(device) + return image.view(1, -1, image_size, image_size) # Change the shape of the output tensor + + from torchvision import transforms + from torchvision.transforms.functional import InterpolationMode + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + conf = getSuiteConfig() + image = tensor2pil(image) + size = 384 + tensor = transformImage(image, size, device) + + if blip_model: + mode = blip_model[1] + + if mode == 'caption': + + if blip_model: + model = blip_model[0].to(device) + else: + from .modules.BLIP.blip_module import blip_decoder + + blip_dir = os.path.join(MODELS_DIR, 'blip') + if not os.path.exists(blip_dir): + os.makedirs(blip_dir, exist_ok=True) + + torch.hub.set_dir(blip_dir) + + if conf.__contains__('blip_model_url'): + model_url = conf['blip_model_url'] + else: + model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' + + model = blip_decoder(pretrained=model_url, image_size=size, vit='base') + model.eval() + model = model.to(device) + + with torch.no_grad(): + caption = model.generate(tensor, sample=False, num_beams=6, max_length=74, min_length=20) + # nucleus sampling + #caption = model.generate(tensor, sample=True, top_p=0.9, max_length=75, min_length=10) + cstr(f"\033[33mBLIP Caption:\033[0m {caption[0]}").msg.print() + return (caption[0], ) + + elif mode == 'interrogate': + + if blip_model: + model = blip_model[0].to(device) + else: + from .modules.BLIP.blip_module import blip_vqa + + blip_dir = os.path.join(MODELS_DIR, 'blip') + if not os.path.exists(blip_dir): + os.makedirs(blip_dir, exist_ok=True) + + torch.hub.set_dir(blip_dir) + + if conf.__contains__('blip_model_vqa_url'): + model_url = conf['blip_model_vqa_url'] + else: + model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' + + model = blip_vqa(pretrained=model_url, image_size=size, vit='base') + model.eval() + model = model.to(device) + + with torch.no_grad(): + answer = model(tensor, question, train=False, inference='generate') + cstr(f"\033[33m BLIP Answer:\033[0m {answer[0]}").msg.print() + return (answer[0], ) + + else: + cstr(f"The selected mode `{mode}` is not a valid selection!").error.print() + return ('Invalid BLIP mode!', ) + + +# CLIPSeg Model Loader + +class WAS_CLIPSeg_Model_Loader: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model": ("STRING", {"default": "CIDAS/clipseg-rd64-refined", "multiline": False}), + }, + } + + RETURN_TYPES = ("CLIPSEG_MODEL",) + RETURN_NAMES = ("clipseg_model",) + FUNCTION = "clipseg_model" + + CATEGORY = "WAS Suite/Loaders" + + def clipseg_model(self, model): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + + cache = os.path.join(MODELS_DIR, 'clipseg') + + inputs = CLIPSegProcessor.from_pretrained(model, cache_dir=cache) + model = CLIPSegForImageSegmentation.from_pretrained(model, cache_dir=cache) + + return ( (inputs, model), ) + +# CLIPSeg Node + +class WAS_CLIPSeg: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "text": ("STRING", {"default":"", "multiline": False}), + }, + "optional": { + "clipseg_model": ("CLIPSEG_MODEL",), + } + } + + RETURN_TYPES = ("MASK", "IMAGE") + RETURN_NAMES = ("MASK", "MASK_IMAGE") + FUNCTION = "CLIPSeg_image" + + CATEGORY = "WAS Suite/Image/Masking" + + def CLIPSeg_image(self, image, text=None, clipseg_model=None): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + + image = tensor2pil(image) + cache = os.path.join(MODELS_DIR, 'clipseg') + + if clipseg_model: + inputs = clipseg_model[0] + model = clipseg_model[1] + else: + inputs = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined", cache_dir=cache) + model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined", cache_dir=cache) + + with torch.no_grad(): + result = model(**inputs(text=text, images=image, padding=True, return_tensors="pt")) + + tensor = torch.sigmoid(result[0]) + mask = 1. - (tensor - tensor.min()) / tensor.max() + mask = mask.unsqueeze(0) + mask = tensor2pil(mask).convert("L") + mask = mask.resize(image.size) + + return (pil2mask(mask), pil2tensor(ImageOps.invert(mask.convert("RGB")))) + +# CLIPSeg Node + +class WAS_CLIPSeg_Batch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "text_a": ("STRING", {"default":"", "multiline": False}), + "text_b": ("STRING", {"default":"", "multiline": False}), + }, + "optional": { + "image_c": ("IMAGE",), + "image_d": ("IMAGE",), + "image_e": ("IMAGE",), + "image_f": ("IMAGE",), + "text_c": ("STRING", {"default":"", "multiline": False}), + "text_d": ("STRING", {"default":"", "multiline": False}), + "text_e": ("STRING", {"default":"", "multiline": False}), + "text_f": ("STRING", {"default":"", "multiline": False}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", "IMAGE") + RETURN_NAMES = ("IMAGES_BATCH", "MASKS_BATCH", "MASK_IMAGES_BATCH") + FUNCTION = "CLIPSeg_images" + + CATEGORY = "WAS Suite/Image/Masking" + + def CLIPSeg_images(self, image_a, image_b, text_a, text_b, image_c=None, image_d=None, + image_e=None, image_f=None, text_c=None, text_d=None, text_e=None, text_f=None): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + import torch.nn.functional as F + + images_pil = [tensor2pil(image_a), tensor2pil(image_b)] + + if image_c is not None: + if image_c.shape[-2:] != image_a.shape[-2:]: + cstr("Size of image_c is different from image_a.").error.print() + return + images_pil.append(tensor2pil(image_c)) + if image_d is not None: + if image_d.shape[-2:] != image_a.shape[-2:]: + cstr("Size of image_d is different from image_a.").error.print() + return + images_pil.append(tensor2pil(image_d)) + if image_e is not None: + if image_e.shape[-2:] != image_a.shape[-2:]: + cstr("Size of image_e is different from image_a.").error.print() + return + images_pil.append(tensor2pil(image_e)) + if image_f is not None: + if image_f.shape[-2:] != image_a.shape[-2:]: + cstr("Size of image_f is different from image_a.").error.print() + return + images_pil.append(tensor2pil(image_f)) + + images_tensor = [torch.from_numpy(np.array(img.convert("RGB")).astype(np.float32) / 255.0).unsqueeze(0) for img in images_pil] + images_tensor = torch.cat(images_tensor, dim=0) + + prompts = [text_a, text_b] + if text_c: + prompts.append(text_c) + if text_d: + prompts.append(text_d) + if text_e: + prompts.append(text_e) + if text_f: + prompts.append(text_f) + + cache = os.path.join(MODELS_DIR, 'clipseg') + + inputs = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined", cache_dir=cache) + model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined", cache_dir=cache) + + with torch.no_grad(): + result = model(**inputs(text=prompts, images=images_pil, padding=True, return_tensors="pt")) + + masks = [] + mask_images = [] + for i, res in enumerate(result.logits): + tensor = torch.sigmoid(res) + mask = 1. - (tensor - tensor.min()) / tensor.max() + mask = mask.unsqueeze(0) + mask = tensor2pil(mask).convert("L") + mask = mask.resize(images_pil[0].size) + mask_batch = pil2mask(mask) + + masks.append(mask_batch.unsqueeze(0).unsqueeze(1)) + mask_images.append(pil2tensor(ImageOps.invert(mask.convert("RGB"))).squeeze(0)) + + masks_tensor = torch.cat(masks, dim=0) + mask_images_tensor = torch.stack(mask_images, dim=0) + + del inputs, model, result, tensor, masks, mask_images, images_pil + + return (images_tensor, masks_tensor, mask_images_tensor) + + +# SAM MODEL LOADER + +class WAS_SAM_Model_Loader: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "model_size": (["ViT-H", "ViT-L", "ViT-B"], ), + } + } + + RETURN_TYPES = ("SAM_MODEL",) + FUNCTION = "sam_load_model" + + CATEGORY = "WAS Suite/Image/Masking" + + def sam_load_model(self, model_size): + conf = getSuiteConfig() + + model_filename_mapping = { + "ViT-H": "sam_vit_h_4b8939.pth", + "ViT-L": "sam_vit_l_0b3195.pth", + "ViT-B": "sam_vit_b_01ec64.pth", + } + + model_url_mapping = { + "ViT-H": conf['sam_model_vith_url'] if conf.__contains__('sam_model_vith_url') else r"https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", + "ViT-L": conf['sam_model_vitl_url'] if conf.__contains__('sam_model_vitl_url') else r"https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth", + "ViT-B": conf['sam_model_vitb_url'] if conf.__contains__('sam_model_vitb_url') else r"https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", + } + + model_url = model_url_mapping[model_size] + model_filename = model_filename_mapping[model_size] + + if 'GitPython' not in packages(): + install_package("gitpython") + + if not os.path.exists(os.path.join(WAS_SUITE_ROOT, 'repos'+os.sep+'SAM')): + from git.repo.base import Repo + cstr("Installing SAM...").msg.print() + Repo.clone_from('https://github.com/facebookresearch/segment-anything', os.path.join(WAS_SUITE_ROOT, 'repos'+os.sep+'SAM')) + + sys.path.append(os.path.join(WAS_SUITE_ROOT, 'repos'+os.sep+'SAM')) + + sam_dir = os.path.join(MODELS_DIR, 'sam') + if not os.path.exists(sam_dir): + os.makedirs(sam_dir, exist_ok=True) + + sam_file = os.path.join(sam_dir, model_filename) + if not os.path.exists(sam_file): + cstr("Selected SAM model not found. Downloading...").msg.print() + r = requests.get(model_url, allow_redirects=True) + open(sam_file, 'wb').write(r.content) + + from segment_anything import build_sam_vit_h, build_sam_vit_l, build_sam_vit_b + + if model_size == 'ViT-H': + sam_model = build_sam_vit_h(sam_file) + elif model_size == 'ViT-L': + sam_model = build_sam_vit_l(sam_file) + elif model_size == 'ViT-B': + sam_model = build_sam_vit_b(sam_file) + else: + raise ValueError(f"SAM model does not match the model_size: '{model_size}'.") + + return (sam_model, ) + + +# SAM PARAMETERS +class WAS_SAM_Parameters: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "points": ("STRING", {"default": "[128, 128]; [0, 0]", "multiline": False}), + "labels": ("STRING", {"default": "[1, 0]", "multiline": False}), + } + } + + RETURN_TYPES = ("SAM_PARAMETERS",) + FUNCTION = "sam_parameters" + + CATEGORY = "WAS Suite/Image/Masking" + + def sam_parameters(self, points, labels): + parameters = { + "points": np.asarray(np.matrix(points)), + "labels": np.array(np.matrix(labels))[0] + } + + return (parameters,) + + +# SAM COMBINE PARAMETERS +class WAS_SAM_Combine_Parameters: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "sam_parameters_a": ("SAM_PARAMETERS",), + "sam_parameters_b": ("SAM_PARAMETERS",), + } + } + + RETURN_TYPES = ("SAM_PARAMETERS",) + FUNCTION = "sam_combine_parameters" + + CATEGORY = "WAS Suite/Image/Masking" + + def sam_combine_parameters(self, sam_parameters_a, sam_parameters_b): + parameters = { + "points": np.concatenate( + (sam_parameters_a["points"], + sam_parameters_b["points"]), + axis=0 + ), + "labels": np.concatenate( + (sam_parameters_a["labels"], + sam_parameters_b["labels"]) + ) + } + + return (parameters,) + + +# SAM IMAGE MASK +class WAS_SAM_Image_Mask: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "sam_model": ("SAM_MODEL",), + "sam_parameters": ("SAM_PARAMETERS",), + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "sam_image_mask" + + CATEGORY = "WAS Suite/Image/Masking" + + def sam_image_mask(self, sam_model, sam_parameters, image): + image = tensor2sam(image) + points = sam_parameters["points"] + labels = sam_parameters["labels"] + + from segment_anything import SamPredictor + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + sam_model.to(device=device) + + predictor = SamPredictor(sam_model) + predictor.set_image(image) + + masks, scores, logits = predictor.predict( + point_coords=points, + point_labels=labels, + multimask_output=False + ) + + sam_model.to(device='cpu') + + mask = np.expand_dims(masks, axis=-1) + + image = np.repeat(mask, 3, axis=-1) + image = torch.from_numpy(image) + + mask = torch.from_numpy(mask) + mask = mask.squeeze(2) + mask = mask.squeeze().to(torch.float32) + + return (image, mask, ) + +#! BOUNDED IMAGES + +# IMAGE BOUNDS + +class WAS_Image_Bounds: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE_BOUNDS",) + FUNCTION = "image_bounds" + + CATEGORY = "WAS Suite/Image/Bound" + + def image_bounds(self, image): + # Ensure we are working with batches + image = image.unsqueeze(0) if image.dim() == 3 else image + + return([(0, img.shape[0]-1 , 0, img.shape[1]-1) for img in image],) + +# INSET IMAGE BOUNDS + +class WAS_Inset_Image_Bounds: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "image_bounds": ("IMAGE_BOUNDS",), + "inset_left": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + "inset_right": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + "inset_top": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + "inset_bottom": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = ("IMAGE_BOUNDS",) + FUNCTION = "inset_image_bounds" + + CATEGORY = "WAS Suite/Image/Bound" + + def inset_image_bounds(self, image_bounds, inset_left, inset_right, inset_top, inset_bottom): + inset_bounds = [] + for rmin, rmax, cmin, cmax in image_bounds: + rmin += inset_top + rmax -= inset_bottom + cmin += inset_left + cmax -= inset_right + + if rmin > rmax or cmin > cmax: + raise ValueError("Invalid insets provided. Please make sure the insets do not exceed the image bounds.") + + inset_bounds.append((rmin, rmax, cmin, cmax)) + return (inset_bounds,) + +# BOUNDED IMAGE BLEND + +class WAS_Bounded_Image_Blend: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "target": ("IMAGE",), + "target_bounds": ("IMAGE_BOUNDS",), + "source": ("IMAGE",), + "blend_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}), + "feathering": ("INT", {"default": 16, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "bounded_image_blend" + + CATEGORY = "WAS Suite/Image/Bound" + + def bounded_image_blend(self, target, target_bounds, source, blend_factor, feathering): + # Ensure we are working with batches + target = target.unsqueeze(0) if target.dim() == 3 else target + source = source.unsqueeze(0) if source.dim() == 3 else source + + # If number of target images and source images don't match then all source images + # will be applied only to the first target image, otherwise they will be applied + # 1 to 1 + # If the number of target bounds and source images don't match then all sourcess will + # use the first target bounds for scaling and placing the source images, otherwise they + # will be applied 1 to 1 + tgt_len = 1 if len(target) != len(source) else len(source) + bounds_len = 1 if len(target_bounds) != len(source) else len(source) + + # Convert target PyTorch tensors to PIL images + tgt_arr = [tensor2pil(tgt) for tgt in target[:tgt_len]] + src_arr = [tensor2pil(src) for src in source] + + result_tensors = [] + for idx in range(len(src_arr)): + src = src_arr[idx] + # If only one target image, then ensure it is the only one used + if (tgt_len == 1 and idx == 0) or tgt_len > 1: + tgt = tgt_arr[idx] + + # If only one bounds object, no need to extract and calculate more than once. + # Additionally, if only one bounds obuect, then the mask only needs created once + if (bounds_len == 1 and idx == 0) or bounds_len > 1: + # Extract the target bounds + rmin, rmax, cmin, cmax = target_bounds[idx] + + # Calculate the dimensions of the target bounds + height, width = (rmax - rmin + 1, cmax - cmin + 1) + + # Create the feathered mask portion the size of the target bounds + if feathering > 0: + inner_mask = Image.new('L', (width - (2 * feathering), height - (2 * feathering)), 255) + inner_mask = ImageOps.expand(inner_mask, border=feathering, fill=0) + inner_mask = inner_mask.filter(ImageFilter.GaussianBlur(radius=feathering)) + else: + inner_mask = Image.new('L', (width, height), 255) + + # Create a blend mask using the inner_mask and blend factor + inner_mask = inner_mask.point(lambda p: p * blend_factor) + + # Create the blend mask with the same size as the target image + tgt_mask = Image.new('L', tgt.size, 0) + # Paste the feathered mask portion into the blend mask at the target bounds position + tgt_mask.paste(inner_mask, (cmin, rmin)) + + # Resize the source image to match the dimensions of the target bounds + src_resized = src.resize((width, height), Image.Resampling.LANCZOS) + + # Create a blank image with the same size and mode as the target + src_positioned = Image.new(tgt.mode, tgt.size) + + # Paste the source image onto the blank image using the target bounds + src_positioned.paste(src_resized, (cmin, rmin)) + + # Blend the source and target images using the blend mask + result = Image.composite(src_positioned, tgt, tgt_mask) + + # Convert the result back to a PyTorch tensor + result_tensors.append(pil2tensor(result)) + + return (torch.cat(result_tensors, dim=0),) + +# BOUNDED IMAGE CROP + +class WAS_Bounded_Image_Crop: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "image": ("IMAGE",), + "image_bounds": ("IMAGE_BOUNDS",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "bounded_image_crop" + + CATEGORY = "WAS Suite/Image/Bound" + + def bounded_image_crop(self, image, image_bounds): + # Ensure we are working with batches + image = image.unsqueeze(0) if image.dim() == 3 else image + + # If number of images and bounds don't match, then only the first bounds will be used + # to crop the images, otherwise, each bounds will be used for each image 1 to 1 + bounds_len = 1 if len(image_bounds) != len(image) else len(image) + + cropped_images = [] + for idx in range(len(image)): + # If only one bounds object, no need to extract and calculate more than once. + if (bounds_len == 1 and idx == 0) or bounds_len > 1: + rmin, rmax, cmin, cmax = image_bounds[idx] + + # Check if the provided bounds are valid + if rmin > rmax or cmin > cmax: + raise ValueError("Invalid bounds provided. Please make sure the bounds are within the image dimensions.") + + cropped_images.append(image[idx][rmin:rmax+1, cmin:cmax+1, :]) + + return (torch.stack(cropped_images, dim=0),) + + +# BOUNDED IMAGE BLEND WITH MASK + +class WAS_Bounded_Image_Blend_With_Mask: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "target": ("IMAGE",), + "target_mask": ("MASK",), + "target_bounds": ("IMAGE_BOUNDS",), + "source": ("IMAGE",), + "blend_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}), + "feathering": ("INT", {"default": 16, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "bounded_image_blend_with_mask" + + CATEGORY = "WAS Suite/Image/Bound" + + def bounded_image_blend_with_mask(self, target, target_mask, target_bounds, source, blend_factor, feathering): + # Ensure we are working with batches + target = target.unsqueeze(0) if target.dim() == 3 else target + source = source.unsqueeze(0) if source.dim() == 3 else source + target_mask = target_mask.unsqueeze(0) if target_mask.dim() == 2 else target_mask + + # If number of target masks and source images don't match, then only the first mask will be used on + # the source images, otherwise, each mask will be used for each source image 1 to 1 + # Simarly, if the number of target images and source images don't match then + # all source images will be applied only to the first target, otherwise they will be applied + # 1 to 1 + tgt_mask_len = 1 if len(target_mask) != len(source) else len(source) + tgt_len = 1 if len(target) != len(source) else len(source) + bounds_len = 1 if len(target_bounds) != len(source) else len(source) + + tgt_arr = [tensor2pil(tgt) for tgt in target[:tgt_len]] + src_arr = [tensor2pil(src) for src in source] + tgt_mask_arr=[] + + # Convert Target Mask(s) to grayscale image format + for m_idx in range(tgt_mask_len): + np_array = np.clip((target_mask[m_idx].cpu().numpy().squeeze() * 255.0), 0, 255) + tgt_mask_arr.append(Image.fromarray((np_array).astype(np.uint8), mode='L')) + + result_tensors = [] + for idx in range(len(src_arr)): + src = src_arr[idx] + # If only one target image, then ensure it is the only one used + if (tgt_len == 1 and idx == 0) or tgt_len > 1: + tgt = tgt_arr[idx] + + # If only one bounds, no need to extract and calculate more than once + if (bounds_len == 1 and idx == 0) or bounds_len > 1: + # Extract the target bounds + rmin, rmax, cmin, cmax = target_bounds[idx] + + # Calculate the dimensions of the target bounds + height, width = (rmax - rmin + 1, cmax - cmin + 1) + + # If only one mask, then ensure that is the only the first is used + if (tgt_mask_len == 1 and idx == 0) or tgt_mask_len > 1: + tgt_mask = tgt_mask_arr[idx] + + # If only one mask and one bounds, then mask only needs to + # be extended once because all targets will be the same size + if (tgt_mask_len == 1 and bounds_len == 1 and idx == 0) or \ + (tgt_mask_len > 1 or bounds_len > 1): + + # This is an imperfect, but easy way to determine if the mask based on the + # target image or source image. If not target, assume source. If neither, + # then it's not going to look right regardless + if (tgt_mask.size != tgt.size): + # Create the blend mask with the same size as the target image + mask_extended_canvas = Image.new('L', tgt.size, 0) + + # Paste the mask portion into the extended mask at the target bounds position + mask_extended_canvas.paste(tgt_mask, (cmin, rmin)) + + tgt_mask = mask_extended_canvas + + # Apply feathering (Gaussian blur) to the blend mask if feather_amount is greater than 0 + if feathering > 0: + tgt_mask = tgt_mask.filter(ImageFilter.GaussianBlur(radius=feathering)) + + # Apply blending factor to the tgt mask now that it has been extended + tgt_mask = tgt_mask.point(lambda p: p * blend_factor) + + # Resize the source image to match the dimensions of the target bounds + src_resized = src.resize((width, height), Image.Resampling.LANCZOS) + + # Create a blank image with the same size and mode as the target + src_positioned = Image.new(tgt.mode, tgt.size) + + # Paste the source image onto the blank image using the target + src_positioned.paste(src_resized, (cmin, rmin)) + + # Blend the source and target images using the blend mask + result = Image.composite(src_positioned, tgt, tgt_mask) + + # Convert the result back to a PyTorch tensor + result_tensors.append(pil2tensor(result)) + + return (torch.cat(result_tensors, dim=0),) + +# BOUNDED IMAGE CROP WITH MASK + +class WAS_Bounded_Image_Crop_With_Mask: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(self): + return { + "required": { + "image": ("IMAGE",), + "mask": ("MASK",), + "padding_left": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + "padding_right": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + "padding_top": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + "padding_bottom": ("INT", {"default": 64, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = ("IMAGE", "IMAGE_BOUNDS",) + FUNCTION = "bounded_image_crop_with_mask" + + CATEGORY = "WAS Suite/Image/Bound" + + def bounded_image_crop_with_mask(self, image, mask, padding_left, padding_right, padding_top, padding_bottom): + # Ensure we are working with batches + image = image.unsqueeze(0) if image.dim() == 3 else image + mask = mask.unsqueeze(0) if mask.dim() == 2 else mask + + # If number of masks and images don't match, then only the first mask will be used on + # the images, otherwise, each mask will be used for each image 1 to 1 + mask_len = 1 if len(image) != len(mask) else len(image) + + cropped_images = [] + all_bounds = [] + for i in range(len(image)): + # Single mask or multiple? + if (mask_len == 1 and i == 0) or mask_len > 0: + rows = torch.any(mask[i], dim=1) + cols = torch.any(mask[i], dim=0) + rmin, rmax = torch.where(rows)[0][[0, -1]] + cmin, cmax = torch.where(cols)[0][[0, -1]] + + rmin = max(rmin - padding_top, 0) + rmax = min(rmax + padding_bottom, mask[i].shape[0] - 1) + cmin = max(cmin - padding_left, 0) + cmax = min(cmax + padding_right, mask[i].shape[1] - 1) + + # Even if only a single mask, create a bounds for each cropped image + all_bounds.append([rmin, rmax, cmin, cmax]) + cropped_images.append(image[i][rmin:rmax+1, cmin:cmax+1, :]) + + return torch.stack(cropped_images), all_bounds + +# DEBUG IMAGE BOUNDS TO CONSOLE + +class WAS_Image_Bounds_to_Console: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_bounds": ("IMAGE_BOUNDS",), + "label": ("STRING", {"default": 'Debug to Console', "multiline": False}), + } + } + + RETURN_TYPES = ("IMAGE_BOUNDS",) + OUTPUT_NODE = True + FUNCTION = "debug_to_console" + + CATEGORY = "WAS Suite/Debug" + + def debug_to_console(self, image_bounds, label): + label_out = 'Debug to Console' + if label.strip() == '': + lable_out = label + + bounds_out = 'Empty' + if len(bounds_out) > 0: + bounds_out = ', \n '.join('\t(rmin={}, rmax={}, cmin={}, cmax={})' + .format(a, b, c, d) for a, b, c, d in image_bounds) + + cstr(f'\033[33m{label_out}\033[0m:\n[\n{bounds_out}\n]\n').msg.print() + return (image_bounds, ) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + +#! NUMBERS + +# RANDOM NUMBER + +class WAS_Random_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number_type": (["integer", "float", "bool"],), + "minimum": ("FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615}), + "maximum": ("FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + FUNCTION = "return_randm_number" + + CATEGORY = "WAS Suite/Number" + + def return_randm_number(self, minimum, maximum, seed, number_type='integer'): + + # Set Generator Seed + random.seed(seed) + + # Return random number + if number_type: + if number_type == 'integer': + number = random.randint(minimum, maximum) + elif number_type == 'float': + number = random.uniform(minimum, maximum) + elif number_type == 'bool': + number = random.random() + else: + return + + # Return number + return (number, float(number), round(number)) + + @classmethod + def IS_CHANGED(cls, seed, **kwargs): + m = hashlib.sha256() + m.update(seed) + return m.digest().hex() + +# TRUE RANDOM NUMBER + +class WAS_True_Random_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "api_key": ("STRING",{"default":"00000000-0000-0000-0000-000000000000", "multiline": False}), + "minimum": ("FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615}), + "maximum": ("FLOAT", {"default": 10000000, "min": -18446744073709551615, "max": 18446744073709551615}), + "mode": (["random", "fixed"],), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + FUNCTION = "return_true_randm_number" + + CATEGORY = "WAS Suite/Number" + + def return_true_randm_number(self, api_key=None, minimum=0, maximum=10): + + # Get Random Number + number = self.get_random_numbers(api_key=api_key, minimum=minimum, maximum=maximum)[0] + + # Return number + return (number, ) + + def get_random_numbers(self, api_key=None, amount=1, minimum=0, maximum=10, mode="random"): + '''Get random number(s) from random.org''' + if api_key in [None, '00000000-0000-0000-0000-000000000000', '']: + cstr("No API key provided! A valid RANDOM.ORG API key is required to use `True Random.org Number Generator`").error.print() + return [0] + + url = "https://api.random.org/json-rpc/2/invoke" + headers = {"Content-Type": "application/json"} + payload = { + "jsonrpc": "2.0", + "method": "generateIntegers", + "params": { + "apiKey": api_key, + "n": amount, + "min": minimum, + "max": maximum, + "replacement": True, + "base": 10 + }, + "id": 1 + } + + response = requests.post(url, headers=headers, data=json.dumps(payload)) + if response.status_code == 200: + data = response.json() + if "result" in data: + return data["result"]["random"]["data"], float(data["result"]["random"]["data"]), int(data["result"]["random"]["data"]) + + return [0] + + @classmethod + def IS_CHANGED(cls, api_key, mode, **kwargs): + m = hashlib.sha256() + m.update(api_key) + if mode == 'fixed': + return m.digest().hex() + return float("NaN") + + +# CONSTANT NUMBER + +class WAS_Constant_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number_type": (["integer", "float", "bool"],), + "number": ("FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615}), + }, + "optional": { + "number_as_text": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + FUNCTION = "return_constant_number" + + CATEGORY = "WAS Suite/Number" + + def return_constant_number(self, number_type, number, number_as_text=None): + + if number_as_text: + if number_type == "integer": + number = int(number_as_text) + elif number_type == "float": + number = float(number_as_text) + else: + number = bool(number_as_text) + + # Return number + if number_type: + if number_type == 'integer': + return (int(number), float(number), int(number) ) + elif number_type == 'integer': + return (float(number), float(number), int(number) ) + elif number_type == 'bool': + boolean = (1 if float(number) > 0.5 else 0) + return (int(boolean), float(boolean), int(boolean) ) + else: + return (number, float(number), int(number) ) + +# INCREMENT NUMBER + +class WAS_Number_Counter: + def __init__(self): + self.counters = {} + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number_type": (["integer", "float"],), + "mode": (["increment", "decrement", "increment_to_stop", "decrement_to_stop"],), + "start": ("FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 0.01}), + "stop": ("FLOAT", {"default": 0, "min": -18446744073709551615, "max": 18446744073709551615, "step": 0.01}), + "step": ("FLOAT", {"default": 1, "min": 0, "max": 99999, "step": 0.01}), + }, + "optional": { + "reset_bool": ("NUMBER",), + }, + "hidden": { + "unique_id": "UNIQUE_ID", + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + RETURN_NAMES = ("number", "float", "int") + FUNCTION = "increment_number" + + CATEGORY = "WAS Suite/Number" + + def increment_number(self, number_type, mode, start, stop, step, unique_id, reset_bool=0): + + counter = int(start) if mode == 'integer' else start + if self.counters.__contains__(unique_id): + counter = self.counters[unique_id] + + if round(reset_bool) >= 1: + counter = start + + if mode == 'increment': + counter += step + elif mode == 'deccrement': + counter -= step + elif mode == 'increment_to_stop': + counter = counter + step if counter < stop else counter + elif mode == 'decrement_to_stop': + counter = counter - step if counter > stop else counter + + self.counters[unique_id] = counter + + result = int(counter) if number_type == 'integer' else float(counter) + + return ( result, float(counter), int(counter) ) + + +# NUMBER TO SEED + +class WAS_Number_To_Seed: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + } + } + + RETURN_TYPES = ("SEED",) + FUNCTION = "number_to_seed" + + CATEGORY = "WAS Suite/Number/Operations" + + def number_to_seed(self, number): + return ({"seed": number, }, ) + + +# NUMBER TO INT + +class WAS_Number_To_Int: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + } + } + + RETURN_TYPES = ("INT",) + FUNCTION = "number_to_int" + + CATEGORY = "WAS Suite/Number/Operations" + + def number_to_int(self, number): + return (int(number), ) + + + +# NUMBER TO FLOAT + +class WAS_Number_To_Float: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + } + } + + RETURN_TYPES = ("FLOAT",) + FUNCTION = "number_to_float" + + CATEGORY = "WAS Suite/Number/Operations" + + def number_to_float(self, number): + return (float(number), ) + + + +# INT TO NUMBER + +class WAS_Int_To_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "int_input": ("INT",), + } + } + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "int_to_number" + + CATEGORY = "WAS Suite/Number/Operations" + + def int_to_number(self, int_input): + return (int(int_input), ) + + + +# NUMBER TO FLOAT + +class WAS_Float_To_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "float_input": ("FLOAT",), + } + } + + RETURN_TYPES = ("NUMBER",) + FUNCTION = "float_to_number" + + CATEGORY = "WAS Suite/Number/Operations" + + def float_to_number(self, float_input): + return ( float(float_input), ) + + +# NUMBER TO STRING + +class WAS_Number_To_String: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "number_to_string" + + CATEGORY = "WAS Suite/Number/Operations" + + def number_to_string(self, number): + return ( str(number), ) + +# NUMBER TO STRING + +class WAS_Number_To_Text: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "number_to_text" + + CATEGORY = "WAS Suite/Number/Operations" + + def number_to_text(self, number): + return ( str(number), ) + + +# NUMBER PI + +class WAS_Number_PI: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {} + } + + RETURN_TYPES = ("NUMBER", "FLOAT") + FUNCTION = "number_pi" + + CATEGORY = "WAS Suite/Number" + + def number_pi(self): + return (math.pi, math.pi) + +# Boolean + +class WAS_Boolean: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "boolean_number": ("FLOAT", {"default":1, "min":0, "max":1, "step":1}), + } + } + + RETURN_TYPES = ("NUMBER","INT") + FUNCTION = "return_boolean" + + CATEGORY = "WAS Suite/Logic" + + def return_boolean(self, boolean_number=1): + return (int(round(boolean_number)), int(round(boolean_number))) + +# NUMBER OPERATIONS + + +class WAS_Number_Operation: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number_a": ("NUMBER",), + "number_b": ("NUMBER",), + "operation": (["addition", "subtraction", "division", "floor division", "multiplication", "exponentiation", "modulus", "greater-than", "greater-than or equals", "less-than", "less-than or equals", "equals", "does not equal"],), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + FUNCTION = "math_operations" + + CATEGORY = "WAS Suite/Number/Operations" + + def math_operations(self, number_a, number_b, operation="addition"): + + # Return random number + if operation: + if operation == 'addition': + result = (number_a + number_b) + return result, result, int(result) + elif operation == 'subtraction': + result = (number_a - number_b) + return result, result, int(result) + elif operation == 'division': + result = (number_a / number_b) + return result, result, int(result) + elif operation == 'floor division': + result = (number_a // number_b) + return result, result, int(result) + elif operation == 'multiplication': + result = (number_a * number_b) + return result, result, int(result) + elif operation == 'exponentiation': + result = (number_a ** number_b) + return result, result, int(result) + elif operation == 'modulus': + result = (number_a % number_b) + return result, result, int(result) + elif operation == 'greater-than': + result = +(number_a > number_b) + return result, result, int(result) + elif operation == 'greater-than or equals': + result = +(number_a >= number_b) + return result, result, int(result) + elif operation == 'less-than': + result = +(number_a < number_b) + return result, result, int(result) + elif operation == 'less-than or equals': + result = +(number_a <= number_b) + return result, result, int(result) + elif operation == 'equals': + result = +(number_a == number_b) + return result, result, int(result) + elif operation == 'does not equal': + result = +(number_a != number_b) + return result, result, int(result) + else: + cstr("Invalid number operation selected.").error.print() + return (number_a, number_a, int(number_a)) + +# NUMBER MULTIPLE OF + +class WAS_Number_Multiple_Of: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + "multiple": ("INT", {"default": 8, "min": -18446744073709551615, "max": 18446744073709551615}), + } + } + + RETURN_TYPES =("NUMBER", "FLOAT", "INT") + FUNCTION = "number_multiple_of" + + CATEGORY = "WAS Suite/Number/Functions" + + def number_multiple_of(self, number, multiple=8): + if number % multiple != 0: + return ((number // multiple) * multiple + multiple, ) + return (number, number, int(number)) + + +#! MISC + + +# Bus. Converts the 5 main connectors into one, and back again. You can provide a bus as input +# or the 5 separate inputs, or a combination. If you provide a bus input and a separate +# input (e.g. a model), the model will take precedence. +# +# The term 'bus' comes from computer hardware, see https://en.wikipedia.org/wiki/Bus_(computing) +class WAS_Bus: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required":{}, + "optional": { + "bus" : ("BUS",), + "model": ("MODEL",), + "clip": ("CLIP",), + "vae": ("VAE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + } + } + RETURN_TYPES = ("BUS", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING",) + RETURN_NAMES = ("bus", "model", "clip", "vae", "positive", "negative") + FUNCTION = "bus_fn" + CATEGORY = "WAS Suite/Utilities" + + def bus_fn(self, bus=(None,None,None,None,None), model=None, clip=None, vae=None, positive=None, negative=None): + + # Unpack the 5 constituents of the bus from the bus tuple. + (bus_model, bus_clip, bus_vae, bus_positive, bus_negative) = bus + + # If you pass in specific inputs, they override what comes from the bus. + out_model = model or bus_model + out_clip = clip or bus_clip + out_vae = vae or bus_vae + out_positive = positive or bus_positive + out_negative = negative or bus_negative + + # Squash all 5 inputs into the output bus tuple. + out_bus = (out_model, out_clip, out_vae, out_positive, out_negative) + + if not out_model: + raise ValueError('Either model or bus containing a model should be supplied') + if not out_clip: + raise ValueError('Either clip or bus containing a clip should be supplied') + if not out_vae: + raise ValueError('Either vae or bus containing a vae should be supplied') + # We don't insist that a bus contains conditioning. + + return (out_bus, out_model, out_clip, out_vae, out_positive, out_negative) + + +# Image Width and Height to Number + +class WAS_Image_Size_To_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("NUMBER", "NUMBER", "FLOAT", "FLOAT", "INT", "INT") + RETURN_NAMES = ("width_num", "height_num", "width_float", "height_float", "width_int", "height_int") + FUNCTION = "image_width_height" + + CATEGORY = "WAS Suite/Number/Operations" + + def image_width_height(self, image): + image = tensor2pil(image) + if image.size: + return( image.size[0], image.size[1], float(image.size[0]), float(image.size[1]), image.size[0], image.size[1] ) + return ( 0, 0, 0, 0, 0, 0) + + +# Latent Width and Height to Number + +class WAS_Latent_Size_To_Number: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "samples": ("LATENT",), + } + } + + RETURN_TYPES = ("NUMBER", "NUMBER", "FLOAT", "FLOAT", "INT", "INT") + RETURN_NAMES = ("tensor_w_num","tensor_h_num") + FUNCTION = "latent_width_height" + + CATEGORY = "WAS Suite/Number/Operations" + + def latent_width_height(self, samples): + size_dict = {} + i = 0 + for tensor in samples['samples'][0]: + if not isinstance(tensor, torch.Tensor): + cstr(f'Input should be a torch.Tensor').error.print() + shape = tensor.shape + tensor_height = shape[-2] + tensor_width = shape[-1] + size_dict.update({i:[tensor_width, tensor_height]}) + return ( size_dict[0][0], size_dict[0][1], float(size_dict[0][0]), float(size_dict[0][1]), size_dict[0][0], size_dict[0][1] ) + + +# LATENT INPUT SWITCH + +class WAS_Latent_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "latent_a": ("LATENT",), + "latent_b": ("LATENT",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "latent_input_switch" + + CATEGORY = "WAS Suite/Logic" + + def latent_input_switch(self, latent_a, latent_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (latent_a, ) + else: + return (latent_b, ) + +# NUMBER INPUT CONDITION + +class WAS_Number_Input_Condition: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number_a": ("NUMBER",), + "number_b": ("NUMBER",), + "return_boolean": (["false", "true"],), + "comparison": (["and", "or", "greater-than", "greater-than or equals", "less-than", "less-than or equals", "equals", "does not equal", "divisible by", "if A odd", "if A even", "if A prime", "factor of"],), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + FUNCTION = "number_input_condition" + + CATEGORY = "WAS Suite/Logic" + + def number_input_condition(self, number_a, number_b, return_boolean="false", comparison="greater-than"): + + if comparison: + if return_boolean == 'true': + if comparison == 'and': + result = 1 if number_a != 0 and number_b != 0 else 0 + elif comparison == 'or': + result = 1 if number_a != 0 or number_b != 0 else 0 + elif comparison == 'greater-than': + result = 1 if number_a > number_b else 0 + elif comparison == 'greater-than or equals': + result = 1 if number_a >= number_b else 0 + elif comparison == 'less-than': + result = 1 if number_a < number_b else 0 + elif comparison == 'less-than or equals': + result = 1 if number_a <= number_b else 0 + elif comparison == 'equals': + result = 1 if number_a == number_b else 0 + elif comparison == 'does not equal': + result = 1 if number_a != number_b else 0 + elif comparison == 'divisible by': + result = 1 if number_b % number_a == 0 else 0 + elif comparison == 'if A odd': + result = 1 if number_a % 2 != 0 else 0 + elif comparison == 'if A even': + result = 1 if number_a % 2 == 0 else 0 + elif comparison == 'if A prime': + result = 1 if self.is_prime(number_a) else 0 + elif comparison == 'factor of': + result = 1 if number_b % number_a == 0 else 0 + else: + result = 0 + else: + if comparison == 'and': + result = number_a if number_a != 0 and number_b != 0 else number_b + elif comparison == 'or': + result = number_a if number_a != 0 or number_b != 0 else number_b + elif comparison == 'greater-than': + result = number_a if number_a > number_b else number_b + elif comparison == 'greater-than or equals': + result = number_a if number_a >= number_b else number_b + elif comparison == 'less-than': + result = number_a if number_a < number_b else number_b + elif comparison == 'less-than or equals': + result = number_a if number_a <= number_b else number_b + elif comparison == 'equals': + result = number_a if number_a == number_b else number_b + elif comparison == 'does not equal': + result = number_a if number_a != number_b else number_b + elif comparison == 'divisible by': + result = number_a if number_b % number_a == 0 else number_b + elif comparison == 'if A odd': + result = number_a if number_a % 2 != 0 else number_b + elif comparison == 'if A even': + result = number_a if number_a % 2 == 0 else number_b + elif comparison == 'if A prime': + result = number_a if self.is_prime(number_a) else number_b + elif comparison == 'factor of': + result = number_a if number_b % number_a == 0 else number_b + else: + result = number_a + + return (result, float(result), int(result)) + + def is_prime(self, n): + if n <= 1: + return False + elif n <= 3: + return True + elif n % 2 == 0 or n % 3 == 0: + return False + i = 5 + while i * i <= n: + if n % i == 0 or n % (i + 2) == 0: + return False + i += 6 + return True + +# ASPECT RATIO + +class WAS_Image_Aspect_Ratio: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": {}, + "optional": { + "image": ("IMAGE",), + "width": ("NUMBER",), + "height": ("NUMBER",), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "NUMBER", TEXT_TYPE, TEXT_TYPE) + RETURN_NAMES = ("aspect_number", "aspect_float", "is_landscape_bool", "aspect_ratio_common", "aspect_type") + FUNCTION = "aspect" + + CATEGORY = "WAS Suite/Logic" + + def aspect(self, boolean_number=1, image=None, width=None, height=None): + + if width and height: + width = width; height = height + elif image is not None: + width, height = tensor2pil(image).size + else: + raise Exception("WAS_Image_Aspect_Ratio must have width and height provided if no image tensori supplied.") + + aspect_ratio = width / height + aspect_type = "landscape" if aspect_ratio > 1 else "portrait" if aspect_ratio < 1 else "square" + + landscape_bool = 0 + if aspect_type == "landscape": + landscape_bool = 1 + + gcd = math.gcd(width, height) + gcd_w = width // gcd + gcd_h = height // gcd + aspect_ratio_common = f"{gcd_w}:{gcd_h}" + + return aspect_ratio, aspect_ratio, landscape_bool, aspect_ratio_common, aspect_type + + +# NUMBER INPUT SWITCH + +class WAS_Number_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number_a": ("NUMBER",), + "number_b": ("NUMBER",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("NUMBER", "FLOAT", "INT") + FUNCTION = "number_input_switch" + + CATEGORY = "WAS Suite/Logic" + + def number_input_switch(self, number_a, number_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (number_a, float(number_a), int(number_a)) + else: + return (number_b, float(number_b), int(number_b)) + + +# IMAGE INPUT SWITCH + +class WAS_Image_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_a": ("IMAGE",), + "image_b": ("IMAGE",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "image_input_switch" + + CATEGORY = "WAS Suite/Logic" + + def image_input_switch(self, image_a, image_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (image_a, ) + else: + return (image_b, ) + +# CONDITIONING INPUT SWITCH + +class WAS_Conditioning_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "conditioning_a": ("CONDITIONING",), + "conditioning_b": ("CONDITIONING",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "conditioning_input_switch" + + CATEGORY = "WAS Suite/Logic" + + def conditioning_input_switch(self, conditioning_a, conditioning_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (conditioning_a, ) + else: + return (conditioning_b, ) + +# MODEL INPUT SWITCH + +class WAS_Model_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model_a": ("MODEL",), + "model_b": ("MODEL",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "model_switch" + + CATEGORY = "WAS Suite/Logic" + + def model_switch(self, model_a, model_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (model_a, ) + else: + return (model_b, ) + +# VAE INPUT SWITCH + +class WAS_VAE_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "vae_a": ("VAE",), + "vae_b": ("VAE",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("VAE",) + FUNCTION = "vae_switch" + + CATEGORY = "WAS Suite/Logic" + + def vae_switch(self, vae_a, vae_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (vae_a, ) + else: + return (vae_b, ) + +# CLIP INPUT SWITCH + +class WAS_CLIP_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "clip_a": ("CLIP",), + "clip_b": ("CLIP",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("CLIP",) + FUNCTION = "clip_switch" + + CATEGORY = "WAS Suite/Logic" + + def clip_switch(self, clip_a, clip_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (clip_a, ) + else: + return (clip_b, ) + +# UPSCALE MODEL INPUT SWITCH + +class WAS_Upscale_Model_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "upscale_model_a": ("UPSCALE_MODEL",), + "upscale_model_b": ("UPSCALE_MODEL",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("UPSCALE_MODEL",) + FUNCTION = "upscale_model_switch" + + CATEGORY = "WAS Suite/Logic" + + def upscale_model_switch(self, upscale_model_a, upscale_model_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (upscale_model_a, ) + else: + return (upscale_model_b, ) + + +# CONTROL NET INPUT SWITCH + +class WAS_Control_Net_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "control_net_a": ("CONTROL_NET",), + "control_net_b": ("CONTROL_NET",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("CONTROL_NET",) + FUNCTION = "control_net_switch" + + CATEGORY = "WAS Suite/Logic" + + def control_net_switch(self, control_net_a, control_net_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (control_net_a, ) + else: + return (control_net_b, ) + +# CLIP VISION INPUT SWITCH + +class WAS_CLIP_Vision_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "clip_vision_a": ("CLIP_VISION",), + "clip_vision_b": ("CLIP_VISION",), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = ("CLIP_VISION",) + FUNCTION = "clip_vision_switch" + + CATEGORY = "WAS Suite/Logic" + + def clip_vision_switch(self, clip_vision_a, clip_vision_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (clip_vision_a, ) + else: + return (clip_vision_b) + +# TEXT INPUT SWITCH + +class WAS_Text_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "text_a": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "text_b": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}), + "boolean_number": ("NUMBER",), + } + } + + RETURN_TYPES = (TEXT_TYPE,) + FUNCTION = "text_input_switch" + + CATEGORY = "WAS Suite/Logic" + + def text_input_switch(self, text_a, text_b, boolean_number=1): + + if int(round(boolean_number)) == 1: + return (text_a, ) + else: + return (text_b, ) + + +# DEBUG INPUT TO CONSOLE + + +class WAS_Debug_Number_to_Console: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "number": ("NUMBER",), + "label": ("STRING", {"default": 'Debug to Console', "multiline": False}), + } + } + + RETURN_TYPES = ("NUMBER",) + OUTPUT_NODE = True + FUNCTION = "debug_to_console" + + CATEGORY = "WAS Suite/Debug" + + def debug_to_console(self, number, label): + if label.strip() != '': + cstr(f'\033[33m{label}\033[0m:\n{number}\n').msg.print() + else: + cstr(f'\033[33mDebug to Console\033[0m:\n{number}\n').msg.print() + return (number, ) + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + +# CUSTOM COMFYUI NODES + +class WAS_Checkpoint_Loader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "config_name": (comfy_paths.get_filename_list("configs"), ), + "ckpt_name": (comfy_paths.get_filename_list("checkpoints"), )}} + RETURN_TYPES = ("MODEL", "CLIP", "VAE", TEXT_TYPE) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "NAME_STRING") + FUNCTION = "load_checkpoint" + + CATEGORY = "WAS Suite/Loaders/Advanced" + + def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True): + config_path = comfy_paths.get_full_path("configs", config_name) + ckpt_path = comfy_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=comfy_paths.get_folder_paths("embeddings")) + return (out[0], out[1], out[2], os.path.splitext(os.path.basename(ckpt_name))[0]) + +class WAS_Checkpoint_Loader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "config_name": (comfy_paths.get_filename_list("configs"), ), + "ckpt_name": (comfy_paths.get_filename_list("checkpoints"), )}} + RETURN_TYPES = ("MODEL", "CLIP", "VAE", TEXT_TYPE) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "NAME_STRING") + FUNCTION = "load_checkpoint" + + CATEGORY = "WAS Suite/Loaders/Advanced" + + def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True): + config_path = comfy_paths.get_full_path("configs", config_name) + ckpt_path = comfy_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=comfy_paths.get_folder_paths("embeddings")) + return (out[0], out[1], out[2], os.path.splitext(os.path.basename(ckpt_name))[0]) + +class WAS_Diffusers_Hub_Model_Loader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "repo_id": ("STRING", {"multiline":False}), + "revision": ("STRING", {"default": "None", "multiline":False})}} + RETURN_TYPES = ("MODEL", "CLIP", "VAE", TEXT_TYPE) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "NAME_STRING") + FUNCTION = "load_hub_checkpoint" + + CATEGORY = "WAS Suite/Loaders/Advanced" + + def load_hub_checkpoint(self, repo_id=None, revision=None): + if revision in ["", "None", "none", None]: + revision = None + model_path = comfy_paths.get_folder_paths("diffusers")[0] + self.download_diffusers_model(repo_id, model_path, revision) + diffusersLoader = nodes.DiffusersLoader() + model, clip, vae = diffusersLoader.load_checkpoint(os.path.join(model_path, repo_id)) + return (model, clip, vae, repo_id) + + def download_diffusers_model(self, repo_id, local_dir, revision=None): + if 'huggingface-hub' not in packages(): + install_package("huggingface_hub") + + from huggingface_hub import snapshot_download + model_path = os.path.join(local_dir, repo_id) + ignore_patterns = ["*.ckpt","*.safetensors","*.onnx"] + snapshot_download(repo_id=repo_id, repo_type="model", local_dir=model_path, revision=revision, use_auth_token=False, ignore_patterns=ignore_patterns) + +class WAS_Checkpoint_Loader_Simple: + @classmethod + def INPUT_TYPES(s): + return {"required": { "ckpt_name": (comfy_paths.get_filename_list("checkpoints"), ), + }} + RETURN_TYPES = ("MODEL", "CLIP", "VAE", TEXT_TYPE) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "NAME_STRING") + FUNCTION = "load_checkpoint" + + CATEGORY = "WAS Suite/Loaders" + + def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = comfy_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=comfy_paths.get_folder_paths("embeddings")) + return (out[0], out[1], out[2], os.path.splitext(os.path.basename(ckpt_name))[0]) + +class WAS_Diffusers_Loader: + @classmethod + def INPUT_TYPES(cls): + paths = [] + for search_path in comfy_paths.get_folder_paths("diffusers"): + if os.path.exists(search_path): + paths += next(os.walk(search_path))[1] + return {"required": {"model_path": (paths,), }} + RETURN_TYPES = ("MODEL", "CLIP", "VAE", TEXT_TYPE) + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "NAME_STRING") + FUNCTION = "load_checkpoint" + + CATEGORY = "WAS Suite/Loaders/Advanced" + + def load_checkpoint(self, model_path, output_vae=True, output_clip=True): + for search_path in comfy_paths.get_folder_paths("diffusers"): + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] + if model_path in paths: + model_path = os.path.join(search_path, model_path) + break + + out = comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=comfy_paths.get_folder_paths("embeddings")) + return (out[0], out[1], out[2], os.path.basename(model_path)) + + +class WAS_unCLIP_Checkpoint_Loader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "ckpt_name": (comfy_paths.get_filename_list("checkpoints"), ), + }} + RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION", "STRING") + RETURN_NAMES = ("MODEL", "CLIP", "VAE", "CLIP_VISION", "NAME_STRING") + FUNCTION = "load_checkpoint" + + CATEGORY = "WAS Suite/Loaders" + + def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = comfy_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=comfy_paths.get_folder_paths("embeddings")) + return (out[0], out[1], out[2], out[3], os.path.splitext(os.path.basename(ckpt_name))[0]) + + +class WAS_Lora_Input_Switch: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "model_a": ("MODEL",), + "clip_a": ("CLIP",), + "model_b": ("MODEL",), + "clip_b": ("CLIP",), + "boolean_number": ("NUMBER",), + } + } + RETURN_TYPES = ("MODEL", "CLIP") + FUNCTION = "lora_input_switch" + + CATEGORY = "WAS Suite/Logic" + + def lora_input_switch(self, model_a, clip_a, model_b, clip_b, boolean_number=1): + if int(round(boolean_number)) == 1: + return (model_a, clip_a) + else: + return (model_b, clip_b) + + +class WAS_Lora_Loader: + def __init__(self): + self.loaded_lora = None; + + @classmethod + def INPUT_TYPES(s): + file_list = comfy_paths.get_filename_list("loras") + file_list.insert(0, "None") + return {"required": { "model": ("MODEL",), + "clip": ("CLIP", ), + "lora_name": (file_list, ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "strength_clip": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL", "CLIP", TEXT_TYPE) + RETURN_NAMES = ("MODEL", "CLIP", "NAME_STRING") + FUNCTION = "load_lora" + + CATEGORY = "WAS Suite/Loaders" + + def load_lora(self, model, clip, lora_name, strength_model, strength_clip): + if strength_model == 0 and strength_clip == 0: + return (model, clip) + + lora_path = comfy_paths.get_full_path("loras", lora_name) + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + temp = self.loaded_lora + self.loaded_lora = None + del temp + + if lora is None: + lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) + return (model_lora, clip_lora, os.path.splitext(os.path.basename(lora_name))[0]) + +class WAS_Upscale_Model_Loader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model_name": (comfy_paths.get_filename_list("upscale_models"), ), + }} + RETURN_TYPES = ("UPSCALE_MODEL",TEXT_TYPE) + RETURN_NAMES = ("UPSCALE_MODEL","MODEL_NAME_TEXT") + FUNCTION = "load_model" + + CATEGORY = "WAS Suite/Loaders" + + def load_model(self, model_name): + model_path = comfy_paths.get_full_path("upscale_models", model_name) + sd = comfy.utils.load_torch_file(model_path) + out = model_loading.load_state_dict(sd).eval() + return (out,model_name) + +# VIDEO WRITER + +class WAS_Video_Writer: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + WTools = WAS_Tools_Class() + v = WTools.VideoWriter() + codecs = [] + for codec in v.get_codecs(): + codecs.append(codec.upper()) + codecs = sorted(codecs) + return { + "required": { + "image": ("IMAGE",), + "transition_frames": ("INT", {"default":30, "min":0, "max":120, "step":1}), + "image_delay_sec": ("FLOAT", {"default":2.5, "min":0.1, "max":60000.0, "step":0.1}), + "fps": ("INT", {"default":30, "min":1, "max":60.0, "step":1}), + "max_size": ("INT", {"default":512, "min":128, "max":1920, "step":1}), + "output_path": ("STRING", {"default": "./ComfyUI/output", "multiline": False}), + "filename": ("STRING", {"default": "comfy_writer", "multiline": False}), + "codec": (codecs,), + } + } + + #@classmethod + #def IS_CHANGED(cls, **kwargs): + # return float("NaN") + + RETURN_TYPES = ("IMAGE",TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("IMAGE_PASS","filepath_text","filename_text") + FUNCTION = "write_video" + + CATEGORY = "WAS Suite/Animation/Writer" + + def write_video(self, image, transition_frames=10, image_delay_sec=10, fps=30, max_size=512, + output_path="./ComfyUI/output", filename="morph", codec="H264"): + + conf = getSuiteConfig() + if not conf.__contains__('ffmpeg_bin_path'): + cstr(f"Unable to use MP4 Writer because the `ffmpeg_bin_path` is not set in `{WAS_CONFIG_FILE}`").error.print() + return (image,"","") + + if conf.__contains__('ffmpeg_bin_path'): + if conf['ffmpeg_bin_path'] != "/path/to/ffmpeg": + sys.path.append(conf['ffmpeg_bin_path']) + os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp" + os.environ['OPENCV_FFMPEG_BINARY'] = conf['ffmpeg_bin_path'] + + if output_path.strip() in [None, "", "."]: + output_path = "./ComfyUI/output" + + if image == None: + image = pil2tensor(Image.new("RGB", (512,512), (0,0,0))) + + if transition_frames < 0: + transition_frames = 0 + elif transition_frames > 60: + transition_frames = 60 + + if fps < 1: + fps = 1 + elif fps > 60: + fps = 60 + + results = [] + for img in image: + print(img.shape) + new_image = self.rescale_image(tensor2pil(img), max_size) + print(new_image.size) + + tokens = TextTokens() + output_path = os.path.abspath(os.path.join(*tokens.parseTokens(output_path).split('/'))) + output_file = os.path.join(output_path, tokens.parseTokens(filename)) + + if not os.path.exists(output_path): + os.makedirs(output_path, exist_ok=True) + + WTools = WAS_Tools_Class() + MP4Writer = WTools.VideoWriter(int(transition_frames), int(fps), int(image_delay_sec), max_size=max_size, codec=codec) + path = MP4Writer.write(new_image, output_file) + + results.append(img) + + return (torch.cat(results, dim=0), path, filename) + + def rescale_image(self, image, max_dimension): + width, height = image.size + if width > max_dimension or height > max_dimension: + scaling_factor = max(width, height) / max_dimension + new_width = int(width / scaling_factor) + new_height = int(height / scaling_factor) + image = image.resize((new_width, new_height), Image.Resampling(1)) + return image + +# VIDEO CREATOR + +class WAS_Create_Video_From_Path: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + WTools = WAS_Tools_Class() + v = WTools.VideoWriter() + codecs = [] + for codec in v.get_codecs(): + codecs.append(codec.upper()) + codecs = sorted(codecs) + return { + "required": { + "transition_frames": ("INT", {"default":30, "min":0, "max":120, "step":1}), + "image_delay_sec": ("FLOAT", {"default":2.5, "min":0.01, "max":60000.0, "step":0.01}), + "fps": ("INT", {"default":30, "min":1, "max":60.0, "step":1}), + "max_size": ("INT", {"default":512, "min":128, "max":1920, "step":1}), + "input_path": ("STRING", {"default": "./ComfyUI/input", "multiline": False}), + "output_path": ("STRING", {"default": "./ComfyUI/output", "multiline": False}), + "filename": ("STRING", {"default": "comfy_video", "multiline": False}), + "codec": (codecs,), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("filepath_text","filename_text") + FUNCTION = "create_video_from_path" + + CATEGORY = "WAS Suite/Animation" + + def create_video_from_path(self, transition_frames=10, image_delay_sec=10, fps=30, max_size=512, + input_path="./ComfyUI/input", output_path="./ComfyUI/output", filename="morph", codec="H264"): + + conf = getSuiteConfig() + if not conf.__contains__('ffmpeg_bin_path'): + cstr(f"Unable to use MP4 Writer because the `ffmpeg_bin_path` is not set in `{WAS_CONFIG_FILE}`").error.print() + return ("","") + + if conf.__contains__('ffmpeg_bin_path'): + if conf['ffmpeg_bin_path'] != "/path/to/ffmpeg": + sys.path.append(conf['ffmpeg_bin_path']) + os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp" + os.environ['OPENCV_FFMPEG_BINARY'] = conf['ffmpeg_bin_path'] + + if output_path.strip() in [None, "", "."]: + output_path = "./ComfyUI/output" + + if transition_frames < 0: + transition_frames = 0 + elif transition_frames > 60: + transition_frames = 60 + + if fps < 1: + fps = 1 + elif fps > 60: + fps = 60 + + tokens = TextTokens() + + # Check if output_path is an absolute path + if not os.path.isabs(output_path): + output_path = os.path.abspath(os.path.join(*tokens.parseTokens(output_path).split('/'))) + + output_file = os.path.join(output_path, tokens.parseTokens(filename)) + + if not os.path.exists(output_path): + os.makedirs(output_path, exist_ok=True) + + WTools = WAS_Tools_Class() + MP4Writer = WTools.VideoWriter(int(transition_frames), int(fps), int(image_delay_sec), max_size, codec) + path = MP4Writer.create_video(input_path, output_file) + + return (path, filename) + +# VIDEO FRAME DUMP + +class WAS_Video_Frame_Dump: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "video_path": ("STRING", {"default":"./ComfyUI/input/MyVideo.mp4", "multiline":False}), + "output_path": ("STRING", {"default": "./ComfyUI/input/MyVideo", "multiline": False}), + "prefix": ("STRING", {"default": "frame_", "multiline": False}), + "filenumber_digits": ("INT", {"default":4, "min":-1, "max":8, "step":1}), + "extension": (["png","jpg","gif","tiff"],), + } + } + + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + RETURN_TYPES = (TEXT_TYPE,"NUMBER") + RETURN_NAMES = ("output_path","processed_count") + FUNCTION = "dump_video_frames" + + CATEGORY = "WAS Suite/Animation" + + def dump_video_frames(self, video_path, output_path, prefix="fame_", extension="png",filenumber_digits=-1): + + conf = getSuiteConfig() + if not conf.__contains__('ffmpeg_bin_path'): + cstr(f"Unable to use dump frames because the `ffmpeg_bin_path` is not set in `{WAS_CONFIG_FILE}`").error.print() + return ("",0) + + if conf.__contains__('ffmpeg_bin_path'): + if conf['ffmpeg_bin_path'] != "/path/to/ffmpeg": + sys.path.append(conf['ffmpeg_bin_path']) + os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp" + os.environ['OPENCV_FFMPEG_BINARY'] = conf['ffmpeg_bin_path'] + + if output_path.strip() in [None, "", "."]: + output_path = "./ComfyUI/input/frames" + + tokens = TextTokens() + output_path = os.path.abspath(os.path.join(*tokens.parseTokens(output_path).split('/'))) + prefix = tokens.parseTokens(prefix) + + WTools = WAS_Tools_Class() + MP4Writer = WTools.VideoWriter() + processed = MP4Writer.extract(video_path, output_path, prefix, extension,filenumber_digits) + + return (output_path, processed) + +# CACHING + +class WAS_Cache: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "latent_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), + "image_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), + "conditioning_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), + }, + "optional": { + "output_path": ("STRING", {"default": os.path.join(WAS_SUITE_ROOT, 'cache'), "multiline": False}), + "latent": ("LATENT",), + "image": ("IMAGE",), + "conditioning": ("CONDITIONING",), + } + } + + RETURN_TYPES = (TEXT_TYPE,TEXT_TYPE,TEXT_TYPE) + RETURN_NAMES = ("latent_filename","image_filename","conditioning_filename") + FUNCTION = "cache_input" + OUTPUT_NODE = True + + CATEGORY = "WAS Suite/IO" + + def cache_input(self, latent_suffix="_cache", image_suffix="_cache", conditioning_suffix="_cache", output_path=None, latent=None, image=None, conditioning=None): + + if 'joblib' not in packages(): + install_package('joblib') + + import joblib + + output = os.path.join(WAS_SUITE_ROOT, 'cache') + if output_path: + if output_path.strip() not in ['', 'none', 'None']: + output = output_path + if not os.path.isabs(output): + output = os.path.abspath(output) + if not os.path.exists(output): + os.makedirs(output, exist_ok=True) + + l_filename = "" + i_filename = "" + c_filename = "" + + tokens = TextTokens() + output = tokens.parseTokens(output) + + if latent != None: + l_filename = f'{tokens.parseTokens(latent_suffix)}.latent' + out_file = os.path.join(output, l_filename) + joblib.dump(latent, out_file) + cstr(f"Latent saved to: {out_file}").msg.print() + + if image != None: + i_filename = f'{tokens.parseTokens(image_suffix)}.image' + out_file = os.path.join(output, i_filename) + joblib.dump(image, out_file) + cstr(f"Tensor batch saved to: {out_file}").msg.print() + + if conditioning != None: + c_filename = f'{tokens.parseTokens(conditioning_suffix)}.conditioning' + out_file = os.path.join(output, c_filename) + joblib.dump(conditioning, os.path.join(output, out_file)) + cstr(f"Conditioning saved to: {out_file}").msg.print() + + return (l_filename, i_filename, c_filename) + + +class WAS_Load_Cache: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "latent_path": ("STRING", {"default": "", "multiline":False}), + "image_path": ("STRING", {"default": "", "multiline":False}), + "conditioning_path": ("STRING", {"default": "", "multiline":False}), + } + } + + RETURN_TYPES = ("LATENT","IMAGE","CONDITIONING") + RETURN_NAMES = ("LATENT","IMAGE","CONDITIONING") + FUNCTION = "load_cache" + + CATEGORY = "WAS Suite/IO" + + def load_cache(self, latent_path=None, image_path=None, conditioning_path=None): + + if 'joblib' not in packages(): + install_package('joblib') + + import joblib + + input_path = os.path.join(WAS_SUITE_ROOT, 'cache') + + latent = None + image = None + conditioning = None + + if latent_path not in ["",None]: + if os.path.exists(latent_path): + latent = joblib.load(latent_path) + else: + cstr(f"Unable to locate cache file {latent_path}").error.print() + + if image_path not in ["",None]: + if os.path.exists(image_path): + image = joblib.load(image_path) + else: + cstr(f"Unable to locate cache file {image_path}").msg.print() + + if conditioning_path not in ["",None]: + if os.path.exists(conditioning_path): + conditioning = joblib.load(conditioning_path) + else: + cstr(f"Unable to locate cache file {conditioning_path}").error.print() + + return (latent, image, conditioning) + + +# SAMPLES PASS STAT SYSTEM + +class WAS_Samples_Passthrough_Stat_System: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "samples": ("LATENT",), + } + } + + RETURN_TYPES = ("LATENT",) + RETURN_NAMES = ("samples",) + FUNCTION = "stat_system" + + CATEGORY = "WAS Suite/Debug" + + def stat_system(self, samples): + + log = "" + for stat in self.get_system_stats(): + log += stat + "\n" + + cstr("\n"+log).msg.print() + + return (samples,) + + def get_system_stats(self): + + import psutil + + # RAM + ram = psutil.virtual_memory() + ram_used = ram.used / (1024 ** 3) + ram_total = ram.total / (1024 ** 3) + ram_stats = f"Used RAM: {ram_used:.2f} GB / Total RAM: {ram_total:.2f} GB" + + # VRAM (with PyTorch) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + vram_used = torch.cuda.memory_allocated(device) / (1024 ** 3) + vram_total = torch.cuda.get_device_properties(device).total_memory / (1024 ** 3) + vram_stats = f"Used VRAM: {vram_used:.2f} GB / Total VRAM: {vram_total:.2f} GB" + + # Hard Drive Space + hard_drive = psutil.disk_usage("/") + used_space = hard_drive.used / (1024 ** 3) + total_space = hard_drive.total / (1024 ** 3) + hard_drive_stats = f"Used Space: {used_space:.2f} GB / Total Space: {total_space:.2f} GB" + + return [ram_stats, vram_stats, hard_drive_stats] + +# Class to count the number of places on an integer + +class WAS_Integer_Place_Counter: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "int_input": ("INT", {"default": 0, "min": 0, "max": 10000000, "step": 1}), + } + } + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("INT_PLACES",) + FUNCTION = "count_places" + + CATEGORY = "WAS Suite/Integer" + + def count_places(self, int_input): + output = len(str(int_input)) + cstr("\nInteger Places Count: "+str(output)).msg.print() + return (output,) + + +# NODE MAPPING +NODE_CLASS_MAPPINGS = { + "BLIP Model Loader": WAS_BLIP_Model_Loader, + "Blend Latents": WAS_Blend_Latents, + "Bus Node": WAS_Bus, + "Cache Node": WAS_Cache, + "Checkpoint Loader": WAS_Checkpoint_Loader, + "Checkpoint Loader (Simple)": WAS_Checkpoint_Loader_Simple, + "CLIPTextEncode (NSP)": WAS_NSP_CLIPTextEncoder, + "CLIP Input Switch": WAS_CLIP_Input_Switch, + "CLIP Vision Input Switch": WAS_CLIP_Vision_Input_Switch, + "Conditioning Input Switch": WAS_Conditioning_Input_Switch, + "Constant Number": WAS_Constant_Number, + "Create Grid Image": WAS_Image_Grid_Image, + "Create Grid Image from Batch": WAS_Image_Grid_Image_Batch, + "Create Morph Image": WAS_Image_Morph_GIF, + "Create Morph Image from Path": WAS_Image_Morph_GIF_By_Path, + "Create Video from Path": WAS_Create_Video_From_Path, + "CLIPSeg Masking": WAS_CLIPSeg, + "CLIPSeg Model Loader": WAS_CLIPSeg_Model_Loader, + "CLIPSeg Batch Masking": WAS_CLIPSeg_Batch, + "Convert Masks to Images": WAS_Mask_To_Image, + "Control Net Model Input Switch": WAS_Control_Net_Input_Switch, + "Debug Number to Console": WAS_Debug_Number_to_Console, + "Dictionary to Console": WAS_Dictionary_To_Console, + "Diffusers Model Loader": WAS_Diffusers_Loader, + "Diffusers Hub Model Down-Loader": WAS_Diffusers_Hub_Model_Loader, + "Export API": WAS_Export_API, + "Latent Input Switch": WAS_Latent_Input_Switch, + "Load Cache": WAS_Load_Cache, + "Logic Boolean": WAS_Boolean, + "Lora Loader": WAS_Lora_Loader, + "Image SSAO (Ambient Occlusion)": WAS_Image_Ambient_Occlusion, + "Image SSDO (Direct Occlusion)": WAS_Image_Direct_Occlusion, + "Image Analyze": WAS_Image_Analyze, + "Image Aspect Ratio": WAS_Image_Aspect_Ratio, + "Image Batch": WAS_Image_Batch, + "Image Blank": WAS_Image_Blank, + "Image Blend by Mask": WAS_Image_Blend_Mask, + "Image Blend": WAS_Image_Blend, + "Image Blending Mode": WAS_Image_Blending_Mode, + "Image Bloom Filter": WAS_Image_Bloom_Filter, + "Image Canny Filter": WAS_Canny_Filter, + "Image Chromatic Aberration": WAS_Image_Chromatic_Aberration, + "Image Color Palette": WAS_Image_Color_Palette, + "Image Crop Face": WAS_Image_Crop_Face, + "Image Crop Location": WAS_Image_Crop_Location, + "Image Crop Square Location": WAS_Image_Crop_Square_Location, + "Image Displacement Warp": WAS_Image_Displacement_Warp, + "Image Lucy Sharpen": WAS_Lucy_Sharpen, + "Image Paste Face": WAS_Image_Paste_Face_Crop, + "Image Paste Crop": WAS_Image_Paste_Crop, + "Image Paste Crop by Location": WAS_Image_Paste_Crop_Location, + "Image Pixelate": WAS_Image_Pixelate, + "Image Power Noise": WAS_Image_Power_Noise, + "Image Dragan Photography Filter": WAS_Dragon_Filter, + "Image Edge Detection Filter": WAS_Image_Edge, + "Image Film Grain": WAS_Film_Grain, + "Image Filter Adjustments": WAS_Image_Filters, + "Image Flip": WAS_Image_Flip, + "Image Gradient Map": WAS_Image_Gradient_Map, + "Image Generate Gradient": WAS_Image_Generate_Gradient, + "Image High Pass Filter": WAS_Image_High_Pass_Filter, + "Image History Loader": WAS_Image_History, + "Image Input Switch": WAS_Image_Input_Switch, + "Image Levels Adjustment": WAS_Image_Levels, + "Image Load": WAS_Load_Image, + "Image Median Filter": WAS_Image_Median_Filter, + "Image Mix RGB Channels": WAS_Image_RGB_Merge, + "Image Monitor Effects Filter": WAS_Image_Monitor_Distortion_Filter, + "Image Nova Filter": WAS_Image_Nova_Filter, + "Image Padding": WAS_Image_Padding, + "Image Perlin Noise": WAS_Image_Perlin_Noise, + "Image Rembg (Remove Background)": WAS_Remove_Rembg, + "Image Perlin Power Fractal": WAS_Image_Perlin_Power_Fractal, + "Image Remove Background (Alpha)": WAS_Remove_Background, + "Image Remove Color": WAS_Image_Remove_Color, + "Image Resize": WAS_Image_Rescale, + "Image Rotate": WAS_Image_Rotate, + "Image Rotate Hue": WAS_Image_Rotate_Hue, + "Image Save": WAS_Image_Save, + "Image Seamless Texture": WAS_Image_Make_Seamless, + "Image Select Channel": WAS_Image_Select_Channel, + "Image Select Color": WAS_Image_Select_Color, + "Image Shadows and Highlights": WAS_Shadow_And_Highlight_Adjustment, + "Image Size to Number": WAS_Image_Size_To_Number, + "Image Stitch": WAS_Image_Stitch, + "Image Style Filter": WAS_Image_Style_Filter, + "Image Threshold": WAS_Image_Threshold, + "Image Tiled": WAS_Image_Tile_Batch, + "Image Transpose": WAS_Image_Transpose, + "Image fDOF Filter": WAS_Image_fDOF, + "Image to Latent Mask": WAS_Image_To_Mask, + "Image to Noise": WAS_Image_To_Noise, + "Image to Seed": WAS_Image_To_Seed, + "Images to RGB": WAS_Images_To_RGB, + "Images to Linear": WAS_Images_To_Linear, + "Integer place counter": WAS_Integer_Place_Counter, + "Image Voronoi Noise Filter": WAS_Image_Voronoi_Noise_Filter, + "KSampler (WAS)": WAS_KSampler, + "KSampler Cycle": WAS_KSampler_Cycle, + "Latent Noise Injection": WAS_Latent_Noise, + "Latent Size to Number": WAS_Latent_Size_To_Number, + "Latent Upscale by Factor (WAS)": WAS_Latent_Upscale, + "Load Image Batch": WAS_Load_Image_Batch, + "Load Text File": WAS_Text_Load_From_File, + "Load Lora": WAS_Lora_Loader, + "Lora Input Switch": WAS_Lora_Input_Switch, + "Masks Add": WAS_Mask_Add, + "Masks Subtract": WAS_Mask_Subtract, + "Mask Arbitrary Region": WAS_Mask_Arbitrary_Region, + "Mask Batch to Mask": WAS_Mask_Batch_to_Single_Mask, + "Mask Batch": WAS_Mask_Batch, + "Mask Ceiling Region": WAS_Mask_Ceiling_Region, + "Mask Crop Dominant Region": WAS_Mask_Crop_Dominant_Region, + "Mask Crop Minority Region": WAS_Mask_Crop_Minority_Region, + "Mask Crop Region": WAS_Mask_Crop_Region, + "Mask Paste Region": WAS_Mask_Paste_Region, + "Mask Dilate Region": WAS_Mask_Dilate_Region, + "Mask Dominant Region": WAS_Mask_Dominant_Region, + "Mask Erode Region": WAS_Mask_Erode_Region, + "Mask Fill Holes": WAS_Mask_Fill_Region, + "Mask Floor Region": WAS_Mask_Floor_Region, + "Mask Gaussian Region": WAS_Mask_Gaussian_Region, + "Mask Invert": WAS_Mask_Invert, + "Mask Minority Region": WAS_Mask_Minority_Region, + "Mask Smooth Region": WAS_Mask_Smooth_Region, + "Mask Threshold Region": WAS_Mask_Threshold_Region, + "Masks Combine Regions": WAS_Mask_Combine, + "Masks Combine Batch": WAS_Mask_Combine_Batch, + "MiDaS Model Loader": MiDaS_Model_Loader, + "MiDaS Depth Approximation": MiDaS_Depth_Approx, + "MiDaS Mask Image": MiDaS_Background_Foreground_Removal, + "Model Input Switch": WAS_Model_Input_Switch, + "Number Counter": WAS_Number_Counter, + "Number Operation": WAS_Number_Operation, + "Number to Float": WAS_Number_To_Float, + "Number Input Switch": WAS_Number_Input_Switch, + "Number Input Condition": WAS_Number_Input_Condition, + "Number Multiple Of": WAS_Number_Multiple_Of, + "Number PI": WAS_Number_PI, + "Number to Int": WAS_Number_To_Int, + "Number to Seed": WAS_Number_To_Seed, + "Number to String": WAS_Number_To_String, + "Number to Text": WAS_Number_To_Text, + "Prompt Styles Selector": WAS_Prompt_Styles_Selector, + "Prompt Multiple Styles Selector": WAS_Prompt_Multiple_Styles_Selector, + "Random Number": WAS_Random_Number, + "Save Text File": WAS_Text_Save, + "Seed": WAS_Seed, + "Tensor Batch to Image": WAS_Tensor_Batch_to_Image, + "BLIP Analyze Image": WAS_BLIP_Analyze_Image, + "SAM Model Loader": WAS_SAM_Model_Loader, + "SAM Parameters": WAS_SAM_Parameters, + "SAM Parameters Combine": WAS_SAM_Combine_Parameters, + "SAM Image Mask": WAS_SAM_Image_Mask, + "Samples Passthrough (Stat System)": WAS_Samples_Passthrough_Stat_System, + "String to Text": WAS_String_To_Text, + "Image Bounds": WAS_Image_Bounds, + "Inset Image Bounds": WAS_Inset_Image_Bounds, + "Bounded Image Blend": WAS_Bounded_Image_Blend, + "Bounded Image Blend with Mask": WAS_Bounded_Image_Blend_With_Mask, + "Bounded Image Crop": WAS_Bounded_Image_Crop, + "Bounded Image Crop with Mask": WAS_Bounded_Image_Crop_With_Mask, + "Image Bounds to Console": WAS_Image_Bounds_to_Console, + "Text Dictionary Update": WAS_Dictionary_Update, + "Text Add Tokens": WAS_Text_Add_Tokens, + "Text Add Token by Input": WAS_Text_Add_Token_Input, + "Text Compare": WAS_Text_Compare, + "Text Concatenate": WAS_Text_Concatenate, + "Text File History Loader": WAS_Text_File_History, + "Text Find and Replace by Dictionary": WAS_Search_and_Replace_Dictionary, + "Text Find and Replace Input": WAS_Search_and_Replace_Input, + "Text Find and Replace": WAS_Search_and_Replace, + "Text Input Switch": WAS_Text_Input_Switch, + "Text List": WAS_Text_List, + "Text List Concatenate": WAS_Text_List_Concatenate, + "Text Load Line From File": WAS_Text_Load_Line_From_File, + "Text Multiline": WAS_Text_Multiline, + "Text Parse A1111 Embeddings": WAS_Text_Parse_Embeddings_By_Name, + "Text Parse Noodle Soup Prompts": WAS_Text_Parse_NSP, + "Text Parse Tokens": WAS_Text_Parse_Tokens, + "Text Random Line": WAS_Text_Random_Line, + "Text Random Prompt": WAS_Text_Random_Prompt, + "Text String": WAS_Text_String, + "Text Shuffle": WAS_Text_Shuffle, + "Text to Conditioning": WAS_Text_to_Conditioning, + "Text to Console": WAS_Text_to_Console, + "Text to Number": WAS_Text_To_Number, + "Text to String": WAS_Text_To_String, + "Text String Truncate": WAS_Text_String_Truncate, + "True Random.org Number Generator": WAS_True_Random_Number, + "unCLIP Checkpoint Loader": WAS_unCLIP_Checkpoint_Loader, + "Upscale Model Loader": WAS_Upscale_Model_Loader, + "Upscale Model Switch": WAS_Upscale_Model_Input_Switch, + "Write to GIF": WAS_Image_Morph_GIF_Writer, + "Write to Video": WAS_Video_Writer, + "VAE Input Switch": WAS_VAE_Input_Switch, + "Video Dump Frames": WAS_Video_Frame_Dump, +} + +#! EXTRA NODES + +# Check for BlenderNeko's Advanced CLIP Text Encode repo +BKAdvCLIP_dir = os.path.join(CUSTOM_NODES_DIR, "ComfyUI_ADV_CLIP_emb") +if os.path.exists(BKAdvCLIP_dir): + + cstr(f"BlenderNeko\'s Advanced CLIP Text Encode found, attempting to enable `CLIPTextEncode` support.").msg.print() + + class WAS_AdvancedCLIPTextEncode: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mode": (["Noodle Soup Prompts", "Wildcards"],), + "noodle_key": ("STRING", {"default": '__', "multiline": False}), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "clip": ("CLIP", ), + "token_normalization": (["none", "mean", "length", "length+mean"],), + "weight_interpretation": (["comfy", "A1111", "compel", "comfy++"],), + "text": ("STRING", {"multiline": True}), + } + } + + RETURN_TYPES = ("CONDITIONING", TEXT_TYPE, TEXT_TYPE) + RETURN_NAMES = ("conditioning", "parsed_text", "raw_text") + OUTPUT_NODE = True + FUNCTION = "encode" + CATEGORY = "WAS Suite/Conditioning" + + DESCRIPTION = "A node based on Blenderneko's Advanced CLIP Text Encode. This version adds the ability to use Noodle Soup Prompts and Wildcards. Wildcards are stored in WAS Node Suite root under the folder 'wildcards'. You can create the folder if it doesn't exist and move your wildcards into it." + URL = { + "Example Workflow": "https://github.com/WASasquatch/was-node-suite-comfyui", + } + IMAGES = [ + "https://i.postimg.cc/Jh4N2h5r/CLIPText-Encode-BLK-plus-NSP.png", + ] + + def encode(self, clip, text, token_normalization, weight_interpretation, seed=0, mode="Noodle Soup Prompts", noodle_key="__"): + + BKAdvCLIP_dir = os.path.join(CUSTOM_NODES_DIR, "ComfyUI_ADV_CLIP_emb") + sys.path.append(BKAdvCLIP_dir) + + from ComfyUI_ADV_CLIP_emb.nodes import AdvancedCLIPTextEncode + + if mode == "Noodle Soup Prompts": + new_text = nsp_parse(text, int(seed), noodle_key) + else: + new_text = replace_wildcards(text, (None if seed == 0 else seed), noodle_key) + + new_text = parse_dynamic_prompt(new_text, seed) + new_text, text_vars = parse_prompt_vars(new_text) + cstr(f"CLIPTextEncode Prased Prompt:\n {new_text}").msg.print() + + encode = AdvancedCLIPTextEncode().encode(clip, new_text, token_normalization, weight_interpretation) + + sys.path.remove(BKAdvCLIP_dir) + + return ([[encode[0][0][0], encode[0][0][1]]], new_text, text, { "ui": { "string": new_text } } ) + + + NODE_CLASS_MAPPINGS.update({"CLIPTextEncode (BlenderNeko Advanced + NSP)": WAS_AdvancedCLIPTextEncode}) + + if NODE_CLASS_MAPPINGS.__contains__("CLIPTextEncode (BlenderNeko Advanced + NSP)"): + cstr('`CLIPTextEncode (BlenderNeko Advanced + NSP)` node enabled under `WAS Suite/Conditioning` menu.').msg.print() + +# opencv-python-headless handling +if 'opencv-python' in packages() or 'opencv-python-headless' in packages(): + try: + import cv2 + build_info = ' '.join(cv2.getBuildInformation().split()) + if "FFMPEG: YES" in build_info: + if was_config.__contains__('show_startup_junk'): + if was_config['show_startup_junk']: + cstr("OpenCV Python FFMPEG support is enabled").msg.print() + if was_config.__contains__('ffmpeg_bin_path'): + if was_config['ffmpeg_bin_path'] == "/path/to/ffmpeg": + cstr(f"`ffmpeg_bin_path` is not set in `{WAS_CONFIG_FILE}` config file. Will attempt to use system ffmpeg binaries if available.").warning.print() + else: + if was_config.__contains__('show_startup_junk'): + if was_config['show_startup_junk']: + cstr(f"`ffmpeg_bin_path` is set to: {was_config['ffmpeg_bin_path']}").msg.print() + else: + cstr(f"OpenCV Python FFMPEG support is not enabled\033[0m. OpenCV Python FFMPEG support, and FFMPEG binaries is required for video writing.").warning.print() + except ImportError: + cstr("OpenCV Python module cannot be found. Attempting install...").warning.print() + install_package( + package='opencv-python-headless[ffmpeg]', + uninstall_first=['opencv-python', 'opencv-python-headless[ffmpeg]'] + ) + try: + import cv2 + cstr("OpenCV Python installed.").msg.print() + except ImportError: + cstr("OpenCV Python module still cannot be imported. There is a system conflict.").error.print() +else: + install_package('opencv-python-headless[ffmpeg]') + try: + import cv2 + cstr("OpenCV Python installed.").msg.print() + except ImportError: + cstr("OpenCV Python module still cannot be imported. There is a system conflict.").error.print() + +# scipy handling +if 'scipy' not in packages(): + install_package('scipy') + try: + import scipy + except ImportError as e: + cstr("Unable to import tools for certain masking procedures.").msg.print() + print(e) + +# scikit-image handling +try: + import skimage +except ImportError as e: + install_package( + package='scikit-image', + uninstall_first=['scikit-image'] + ) + import skimage + +was_conf = getSuiteConfig() + +# Suppress warnings +if was_conf.__contains__('suppress_uncomfy_warnings'): + if was_conf['suppress_uncomfy_warnings']: + import warnings + warnings.filterwarnings("ignore", category=UserWarning, module="safetensors") + warnings.filterwarnings("ignore", category=UserWarning, module="torch") + warnings.filterwarnings("ignore", category=UserWarning, module="transformers") + +# Well we got here, we're as loaded as we're gonna get. +print(" ".join([cstr("Finished.").msg, cstr("Loaded").green, cstr(len(NODE_CLASS_MAPPINGS.keys())).end, cstr("nodes successfully.").green])) + +show_quotes = True +if was_conf.__contains__('show_inspiration_quote'): + if was_conf['show_inspiration_quote'] == False: + show_quotes = False +if show_quotes: + art_quotes = [ + # ARTISTIC INSPIRATION QUOTES + '\033[93m"Every artist was first an amateur."\033[0m\033[3m - Ralph Waldo Emerson', + '\033[93m"Art is not freedom from discipline, but disciplined freedom."\033[0m\033[3m - John F. Kennedy', + '\033[93m"Art enables us to find ourselves and lose ourselves at the same time."\033[0m\033[3m - Thomas Merton', + '\033[93m"Art is the most intense mode of individualism that the world has known."\033[0m\033[3m - Oscar Wilde', + '\033[93m"The purpose of art is washing the dust of daily life off our souls."\033[0m\033[3m - Pablo Picasso', + '\033[93m"Art is the lie that enables us to realize the truth."\033[0m\033[3m - Pablo Picasso', + '\033[93m"Art is not what you see, but what you make others see."\033[0m\033[3m - Edgar Degas', + '\033[93m"Every artist dips his brush in his own soul, and paints his own nature into his pictures."\033[0m\033[3m - Henry Ward Beecher', + '\033[93m"Art is the stored honey of the human soul."\033[0m\033[3m - Theodore Dreiser', + '\033[93m"Creativity takes courage."\033[0m\033[3m - Henri Matisse', + '\033[93m"Art should disturb the comfortable and comfort the disturbed." - Cesar Cruz', + '\033[93m"Art is the most beautiful of all lies."\033[0m\033[3m - Claude Debussy', + '\033[93m"Art is the journey of a free soul."\033[0m\033[3m - Alev Oguz', + '\033[93m"The artist\'s world is limitless. It can be found anywhere, far from where he lives or a few feet away. It is always on his doorstep."\033[0m\033[3m - Paul Strand', + '\033[93m"Art is not a thing; it is a way."\033[0m\033[3m - Elbert Hubbard', + '\033[93m"Art is the lie that enables us to recognize the truth."\033[0m\033[3m - Friedrich Nietzsche', + '\033[93m"Art is the triumph over chaos."\033[0m\033[3m - John Cheever', + '\033[93m"Art is the lie that enables us to realize the truth."\033[0m\033[3m - Pablo Picasso', + '\033[93m"Art is the only way to run away without leaving home."\033[0m\033[3m - Twyla Tharp', + '\033[93m"Art is the most powerful tool we have to connect with the world and express our individuality."\033[0m\033[3m - Unknown', + '\033[93m"Art is not about making something perfect, it\'s about making something meaningful."\033[0m\033[3m - Unknown', + '\033[93m"Art is the voice of the soul, expressing what words cannot."\033[0m\033[3m - Unknown', + '\033[93m"Art is the bridge that connects imagination to reality."\033[0m\033[3m - Unknown', + '\033[93m"Art is the language of the heart and the window to the soul."\033[0m\033[3m - Unknown', + '\033[93m"Art is the magic that brings beauty into the world."\033[0m\033[3m - Unknown', + '\033[93m"Art is the freedom to create, explore, and inspire."\033[0m\033[3m - Unknown', + '\033[93m"Art is the mirror that reflects the beauty within us."\033[0m\033[3m - Unknown', + '\033[93m"Art is the universal language that transcends boundaries and speaks to all."\033[0m\033[3m - Unknown', + '\033[93m"Art is the light that shines even in the darkest corners."\033[0m\033[3m - Unknown', + '\033[93m"Art is the soul made visible."\033[0m\033[3m - George Crook', + '\033[93m"Art is the breath of life."\033[0m\033[3m - Liza Donnelly', + '\033[93m"Art is a harmony parallel with nature."\033[0m\033[3m - Paul Cézanne', + '\033[93m"Art is the daughter of freedom."\033[0m\033[3m - Friedrich Schiller', + # GENERAL INSPIRATION QUOTES + '\033[93m"Believe you can and you\'re halfway there."\033[0m\033[3m - Theodore Roosevelt', + '\033[93m"The only way to do great work is to love what you do."\033[0m\033[3m - Steve Jobs', + '\033[93m"Success is not final, failure is not fatal: It is the courage to continue that counts."\033[0m\033[3m - Winston Churchill', + '\033[93m"Your time is limited, don\'t waste it living someone else\'s life."\033[0m\033[3m - Steve Jobs', + '\033[93m"The future belongs to those who believe in the beauty of their dreams."\033[0m\033[3m - Eleanor Roosevelt', + '\033[93m"Success is not the key to happiness. Happiness is the key to success."\033[0m\033[3m - Albert Schweitzer', + '\033[93m"The best way to predict the future is to create it."\033[0m\033[3m - Peter Drucker', + '\033[93m"Don\'t watch the clock; do what it does. Keep going."\033[0m\033[3m - Sam Levenson', + '\033[93m"Believe in yourself, take on your challenges, and dig deep within yourself to conquer fears."\033[0m\033[3m - Chantal Sutherland', + '\033[93m"Challenges are what make life interesting and overcoming them is what makes life meaningful."\033[0m\033[3m - Joshua J. Marine', + '\033[93m"Opportunities don\'t happen. You create them."\033[0m\033[3m - Chris Grosser', + '\033[93m"Your work is going to fill a large part of your life, and the only way to be truly satisfied is to do what you believe is great work."\033[0m\033[3m - Steve Jobs', + '\033[93m"The harder I work, the luckier I get."\033[0m\033[3m - Samuel Goldwyn', + '\033[93m"Don\'t be pushed around by the fears in your mind. Be led by the dreams in your heart."\033[0m\033[3m - Roy T. Bennett', + '\033[93m"Believe in yourself, and the rest will fall into place."\033[0m\033[3m - Unknown', + '\033[93m"Life is 10% what happens to us and 90% how we react to it."\033[0m\033[3m - Charles R. Swindoll', + '\033[93m"Success is not just about making money. It\'s about making a difference."\033[0m\033[3m - Unknown', + '\033[93m"The only limit to our realization of tomorrow will be our doubts of today."\033[0m\033[3m - Franklin D. Roosevelt', + '\033[93m"Great minds discuss ideas; average minds discuss events; small minds discuss people."\033[0m\033[3m - Eleanor Roosevelt', + '\033[93m"The future depends on what you do today."\033[0m\033[3m - Mahatma Gandhi', + '\033[93m"Don\'t be afraid to give up the good to go for the great."\033[0m\033[3m - John D. Rockefeller', + '\033[93m"Success usually comes to those who are too busy to be looking for it."\033[0m\033[3m - Henry David Thoreau', + '\033[93m"The secret to getting ahead is getting started."\033[0m\033[3m - Mark Twain', + '\033[93m"Every great dream begins with a dreamer."\033[0m\033[3m - Harriet Tubman', + '\033[93m"Do not wait for the opportunity. Create it."\033[0m\033[3m - George Bernard Shaw', + '\033[93m"Your time is now. Start where you are and never stop."\033[0m\033[3m - Roy T. Bennett', + '\033[93m"The only person you should try to be better than is the person you were yesterday."\033[0m\033[3m - Unknown', + '\033[93m"Success is not in what you have, but who you are."\033[0m\033[3m - Bo Bennett', + '\033[93m"Do one thing every day that scares you."\033[0m\033[3m - Eleanor Roosevelt', + '\033[93m"Failure is the opportunity to begin again more intelligently."\033[0m\033[3m - Henry Ford', + '\033[93m"Dream big and dare to fail."\033[0m\033[3m - Norman Vaughan', + '\033[93m"Everything you\'ve ever wanted is on the other side of fear."\033[0m\033[3m - George Addair', + '\033[93m"Believe you deserve it and the universe will serve it."\033[0m\033[3m - Unknown', + '\033[93m"Don\'t wait. The time will never be just right."\033[0m\033[3m - Napoleon Hill', + '\033[93m"The distance between insanity and genius is measured only by success."\033[0m\033[3m - Bruce Feirstein', + '\033[93m"Be the change that you wish to see in the world."\033[0m\033[3m - Mahatma Gandhi', + '\033[93m"Success is not about being better than someone else. It\'s about being better than you used to be."\033[0m\033[3m - Unknown', + '\033[93m"The best revenge is massive success."\033[0m\033[3m - Frank Sinatra', + '\033[93m"You have within you right now, everything you need to deal with whatever the world can throw at you."\033[0m\033[3m - Brian Tracy', + '\033[93m"Don\'t let yesterday take up too much of today."\033[0m\033[3m - Will Rogers', + '\033[93m"The biggest risk is not taking any risk. In a world that is changing quickly, the only strategy that is guaranteed to fail is not taking risks."\033[0m\033[3m - Mark Zuckerberg', + '\033[93m"The journey of a thousand miles begins with one step."\033[0m\033[3m - Lao Tzu', + '\033[93m"Every strike brings me closer to the next home run."\033[0m\033[3m - Babe Ruth', + ] + print(f'\n\t\033[3m{random.choice(art_quotes)}\033[0m\n') diff --git a/custom_nodes/was-node-suite-comfyui/__init__.py b/custom_nodes/was-node-suite-comfyui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b09dfa1a17a4d612937d01f7324e02e564a11806 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/__init__.py @@ -0,0 +1,3 @@ +from .WAS_Node_Suite import NODE_CLASS_MAPPINGS + +__all__ = ['NODE_CLASS_MAPPINGS'] diff --git a/custom_nodes/was-node-suite-comfyui/__pycache__/WAS_Node_Suite.cpython-310.pyc b/custom_nodes/was-node-suite-comfyui/__pycache__/WAS_Node_Suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db15e4815d068c886a2b40d0722a9c5890cb2549 Binary files /dev/null and b/custom_nodes/was-node-suite-comfyui/__pycache__/WAS_Node_Suite.cpython-310.pyc differ diff --git a/custom_nodes/was-node-suite-comfyui/__pycache__/WAS_Node_Suite.cpython-311.pyc b/custom_nodes/was-node-suite-comfyui/__pycache__/WAS_Node_Suite.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77709b061ab09802ab7699d817d1db90ebda2aa3 Binary files /dev/null and b/custom_nodes/was-node-suite-comfyui/__pycache__/WAS_Node_Suite.cpython-311.pyc differ diff --git a/custom_nodes/was-node-suite-comfyui/__pycache__/__init__.cpython-310.pyc b/custom_nodes/was-node-suite-comfyui/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2889873158ec67086dcac36ec2e1ece35b466777 Binary files /dev/null and b/custom_nodes/was-node-suite-comfyui/__pycache__/__init__.cpython-310.pyc differ diff --git a/custom_nodes/was-node-suite-comfyui/__pycache__/__init__.cpython-311.pyc b/custom_nodes/was-node-suite-comfyui/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06bdf5a12b23303b7dd1c21a971ad39b10e85ea4 Binary files /dev/null and b/custom_nodes/was-node-suite-comfyui/__pycache__/__init__.cpython-311.pyc differ diff --git a/custom_nodes/was-node-suite-comfyui/install.bat b/custom_nodes/was-node-suite-comfyui/install.bat new file mode 100644 index 0000000000000000000000000000000000000000..5b4ac24c55f93cd1e3797b08574c957332a557f4 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/install.bat @@ -0,0 +1,16 @@ +@echo off + +set "requirements_txt=%~dp0\requirements.txt" +set "python_exec=..\..\..\python_embeded\python.exe" + +echo Installing WAS-NS ... + +if exist "%python_exec%" ( + echo Installing with ComfyUI Portable + "%python_exec%" -s -m pip install -r "%requirements_txt%" +) else ( + echo Installing with system Python + pip install -r "%requirements_txt%" +) + +pause \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/install_alt.bat b/custom_nodes/was-node-suite-comfyui/install_alt.bat new file mode 100644 index 0000000000000000000000000000000000000000..23add63bc6dc8504320a24322657e24386807961 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/install_alt.bat @@ -0,0 +1,20 @@ +@echo off + +set "requirements_txt=%~dp0\requirements.txt" +set "python_exec=..\..\..\python_embeded\python.exe" + +echo Installing WAS-NS ... + +if exist "%python_exec%" ( + echo Installing with ComfyUI Portable + for /f "delims=" %%i in (%requirements_txt%) do ( + %python_exec% -s -m pip install "%%i" + ) +) else ( + echo Installing with system Python + for /f "delims=" %%i in (%requirements_txt%) do ( + pip install "%%i" + ) +) + +pause \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/__init__.py b/custom_nodes/was-node-suite-comfyui/modules/BLIP/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/__init__.py b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/__init__.py @@ -0,0 +1 @@ + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/bert_config.json b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/bert_config.json new file mode 100644 index 0000000000000000000000000000000000000000..3ef38aabc7f966b53079e9d559dc59e459cc0051 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/bert_config.json @@ -0,0 +1,21 @@ +{ + "architectures": [ + "BertModel" + ], + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 512, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 0, + "type_vocab_size": 2, + "vocab_size": 30522, + "encoder_width": 768, + "add_cross_attention": true +} diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/caption_coco.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/caption_coco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..42eab7030c0310ba2f265baf36fa1400aa6e5846 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/caption_coco.yaml @@ -0,0 +1,33 @@ +image_root: '/export/share/datasets/vision/coco/images/' +ann_root: 'annotation' +coco_gt_root: 'annotation/coco_gt' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth' + +# size of vit model; base or large +vit: 'base' +vit_grad_ckpt: False +vit_ckpt_layer: 0 +batch_size: 32 +init_lr: 1e-5 + +# vit: 'large' +# vit_grad_ckpt: True +# vit_ckpt_layer: 5 +# batch_size: 16 +# init_lr: 2e-6 + +image_size: 384 + +# generation configs +max_length: 20 +min_length: 5 +num_beams: 3 +prompt: 'a picture of ' + +# optimizer +weight_decay: 0.05 +min_lr: 0 +max_epoch: 5 + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/med_config.json b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/med_config.json new file mode 100644 index 0000000000000000000000000000000000000000..0ffad0a6f3c2f9f11b8faa84529d9860bb70327a --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/med_config.json @@ -0,0 +1,21 @@ +{ + "architectures": [ + "BertModel" + ], + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 512, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 0, + "type_vocab_size": 2, + "vocab_size": 30524, + "encoder_width": 768, + "add_cross_attention": true +} diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/nlvr.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/nlvr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d1122aadb1a776bd347068233096b0c984f648b --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/nlvr.yaml @@ -0,0 +1,21 @@ +image_root: '/export/share/datasets/vision/NLVR2/' +ann_root: 'annotation' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth' + +#size of vit model; base or large +vit: 'base' +batch_size_train: 16 +batch_size_test: 64 +vit_grad_ckpt: False +vit_ckpt_layer: 0 +max_epoch: 15 + +image_size: 384 + +# optimizer +weight_decay: 0.05 +init_lr: 3e-5 +min_lr: 0 + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/nocaps.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/nocaps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9028135859b94aef5324c85c80e376c609d8a089 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/nocaps.yaml @@ -0,0 +1,15 @@ +image_root: '/export/share/datasets/vision/nocaps/' +ann_root: 'annotation' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth' + +vit: 'base' +batch_size: 32 + +image_size: 384 + +max_length: 20 +min_length: 5 +num_beams: 3 +prompt: 'a picture of ' \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/pretrain.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/pretrain.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02355ee0228932803c661616485bf315e862b826 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/pretrain.yaml @@ -0,0 +1,27 @@ +train_file: ['/export/share/junnan-li/VL_pretrain/annotation/coco_karpathy_train.json', + '/export/share/junnan-li/VL_pretrain/annotation/vg_caption.json', + ] +laion_path: '' + +# size of vit model; base or large +vit: 'base' +vit_grad_ckpt: False +vit_ckpt_layer: 0 + +image_size: 224 +batch_size: 75 + +queue_size: 57600 +alpha: 0.4 + +# optimizer +weight_decay: 0.05 +init_lr: 3e-4 +min_lr: 1e-6 +warmup_lr: 1e-6 +lr_decay_rate: 0.9 +max_epoch: 20 +warmup_steps: 3000 + + + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_coco.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_coco.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8569e9b67112fe3605ac25e4fdc0231f7975378 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_coco.yaml @@ -0,0 +1,34 @@ +image_root: '/export/share/datasets/vision/coco/images/' +ann_root: 'annotation' +dataset: 'coco' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' + +# size of vit model; base or large + +vit: 'base' +batch_size_train: 32 +batch_size_test: 64 +vit_grad_ckpt: True +vit_ckpt_layer: 4 +init_lr: 1e-5 + +# vit: 'large' +# batch_size_train: 16 +# batch_size_test: 32 +# vit_grad_ckpt: True +# vit_ckpt_layer: 12 +# init_lr: 5e-6 + +image_size: 384 +queue_size: 57600 +alpha: 0.4 +k_test: 256 +negative_all_rank: True + +# optimizer +weight_decay: 0.05 +min_lr: 0 +max_epoch: 6 + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_flickr.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_flickr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d75ea4eed87c9a001523c5e5914998c5e737594d --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_flickr.yaml @@ -0,0 +1,34 @@ +image_root: '/export/share/datasets/vision/flickr30k/' +ann_root: 'annotation' +dataset: 'flickr' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_flickr.pth' + +# size of vit model; base or large + +vit: 'base' +batch_size_train: 32 +batch_size_test: 64 +vit_grad_ckpt: True +vit_ckpt_layer: 4 +init_lr: 1e-5 + +# vit: 'large' +# batch_size_train: 16 +# batch_size_test: 32 +# vit_grad_ckpt: True +# vit_ckpt_layer: 10 +# init_lr: 5e-6 + +image_size: 384 +queue_size: 57600 +alpha: 0.4 +k_test: 128 +negative_all_rank: False + +# optimizer +weight_decay: 0.05 +min_lr: 0 +max_epoch: 6 + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_msrvtt.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_msrvtt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..395f62542bb22d706b8e19e2455d2c7298984d0b --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/retrieval_msrvtt.yaml @@ -0,0 +1,12 @@ +video_root: '/export/share/dongxuli/data/msrvtt_retrieval/videos' +ann_root: 'annotation' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' + +# size of vit model; base or large +vit: 'base' +batch_size: 64 +k_test: 128 +image_size: 384 +num_frm_test: 8 \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/vqa.yaml b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/vqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74327e6d0a34672023b44569558fe8beeb052548 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_configs/vqa.yaml @@ -0,0 +1,25 @@ +vqa_root: '/export/share/datasets/vision/VQA/Images/mscoco/' #followed by train2014/ +vg_root: '/export/share/datasets/vision/visual-genome/' #followed by image/ +train_files: ['vqa_train','vqa_val','vg_qa'] +ann_root: 'annotation' + +# set pretrained as a file path or an url +pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' + +# size of vit model; base or large +vit: 'base' +batch_size_train: 16 +batch_size_test: 32 +vit_grad_ckpt: False +vit_ckpt_layer: 0 +init_lr: 2e-5 + +image_size: 480 + +k_test: 128 +inference: 'rank' + +# optimizer +weight_decay: 0.05 +min_lr: 0 +max_epoch: 10 \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_med.py b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_med.py new file mode 100644 index 0000000000000000000000000000000000000000..7b00a35450b736180a805d4f4664b4fb95aeba01 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_med.py @@ -0,0 +1,955 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +''' + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode=='multimodal': + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + + def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, + device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + if reduction=='none': + lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module.py b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module.py new file mode 100644 index 0000000000000000000000000000000000000000..db91d7b99b10634ab59489c29c5a0c32e8338e33 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module.py @@ -0,0 +1,423 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li +''' +import warnings +warnings.filterwarnings("ignore") + +from .blip_vit import VisionTransformer, interpolate_pos_embed +from .blip_med import BertConfig, BertModel, BertLMHeadModel +from transformers import BertTokenizer + +import torch +from torch import nn +import torch.nn.functional as F + +import os +from urllib.parse import urlparse +from timm.models.hub import download_cached_file +import numpy as np + +from pathlib import Path +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# BLIP + +class BLIP_Base(nn.Module): + def __init__(self, + med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'), + image_size = 224, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) + self.tokenizer = init_tokenizer() + med_config = BertConfig.from_json_file(med_config) + med_config.encoder_width = vision_width + self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) + + + def forward(self, image, caption, mode): + + assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" + text = self.tokenizer(caption, return_tensors="pt").to(image.device) + + if mode=='image': + # return image features + image_embeds = self.visual_encoder(image) + return image_embeds + + elif mode=='text': + # return text features + text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, + return_dict = True, mode = 'text') + return text_output.last_hidden_state + + elif mode=='multimodal': + # return multimodel features + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + text.input_ids[:,0] = self.tokenizer.enc_token_id + output = self.text_encoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + ) + return output.last_hidden_state + + + +class BLIP_Decoder(nn.Module): + def __init__(self, + med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'), + image_size = 384, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + prompt = 'a picture of ', + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) + self.tokenizer = init_tokenizer() + med_config = BertConfig.from_json_file(med_config) + med_config.encoder_width = vision_width + self.text_decoder = BertLMHeadModel(config=med_config) + + self.prompt = prompt + self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1 + + + def forward(self, image, caption): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device) + + text.input_ids[:,0] = self.tokenizer.bos_token_id + + decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100) + decoder_targets[:,:self.prompt_length] = -100 + + decoder_output = self.text_decoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + labels = decoder_targets, + return_dict = True, + ) + loss_lm = decoder_output.loss + + return loss_lm + + def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0): + image_embeds = self.visual_encoder(image) + + if not sample: + image_embeds = image_embeds.repeat_interleave(num_beams,dim=0) + + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts} + + prompt = [self.prompt] * image.size(0) + input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device) + input_ids[:,0] = self.tokenizer.bos_token_id + input_ids = input_ids[:, :-1] + + if sample: + #nucleus sampling + outputs = self.text_decoder.generate(input_ids=input_ids, + max_length=max_length, + min_length=min_length, + do_sample=True, + top_p=top_p, + num_return_sequences=1, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + repetition_penalty=1.1, + **model_kwargs) + else: + #beam search + outputs = self.text_decoder.generate(input_ids=input_ids, + max_length=max_length, + min_length=min_length, + num_beams=num_beams, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + repetition_penalty=repetition_penalty, + **model_kwargs) + + captions = [] + for output in outputs: + caption = self.tokenizer.decode(output, skip_special_tokens=True) + captions.append(caption[len(self.prompt):]) + return captions + + +def blip_decoder(pretrained='',**kwargs): + model = BLIP_Decoder(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) + assert(len(msg.missing_keys)==0) + return model + +def blip_feature_extractor(pretrained='',**kwargs): + model = BLIP_Base(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) + assert(len(msg.missing_keys)==0) + return model + +def init_tokenizer(): + tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + tokenizer.add_special_tokens({'bos_token':'[DEC]'}) + tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) + tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] + return tokenizer + + +def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): + + assert vit in ['base', 'large'], "vit parameter must be base or large" + if vit=='base': + vision_width = 768 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, + num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0 or drop_path_rate + ) + elif vit=='large': + vision_width = 1024 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, + num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0.1 or drop_path_rate + ) + return visual_encoder, vision_width + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + +def load_checkpoint(model,url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) + checkpoint = torch.load(cached_file, map_location='cpu') + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location='cpu') + else: + raise RuntimeError('checkpoint url or path is invalid') + + state_dict = checkpoint['model'] + + state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) + if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): + state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], + model.visual_encoder_m) + for key in model.state_dict().keys(): + if key in state_dict.keys(): + if state_dict[key].shape!=model.state_dict()[key].shape: + del state_dict[key] + + msg = model.load_state_dict(state_dict,strict=False) + print('load checkpoint from %s'%url_or_filename) + return model,msg + +# BLIP VQA + +class BLIP_VQA(nn.Module): + def __init__(self, + med_config = Path(LOCAL_PATH, 'blip_configs/med_config.json'), + image_size = 480, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1) + self.tokenizer = init_tokenizer() + + encoder_config = BertConfig.from_json_file(med_config) + encoder_config.encoder_width = vision_width + self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False) + + decoder_config = BertConfig.from_json_file(med_config) + self.text_decoder = BertLMHeadModel(config=decoder_config) + + + def forward(self, image, question, answer=None, n=None, weights=None, train=True, inference='rank', k_test=128): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + question = self.tokenizer(question, padding='longest', truncation=True, max_length=35, + return_tensors="pt").to(image.device) + question.input_ids[:,0] = self.tokenizer.enc_token_id + + if train: + ''' + n: number of answers for each question + weights: weight for each answer + ''' + answer = self.tokenizer(answer, padding='longest', return_tensors="pt").to(image.device) + answer.input_ids[:,0] = self.tokenizer.bos_token_id + answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100) + + question_output = self.text_encoder(question.input_ids, + attention_mask = question.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True) + + question_states = [] + question_atts = [] + for b, n in enumerate(n): + question_states += [question_output.last_hidden_state[b]]*n + question_atts += [question.attention_mask[b]]*n + question_states = torch.stack(question_states,0) + question_atts = torch.stack(question_atts,0) + + answer_output = self.text_decoder(answer.input_ids, + attention_mask = answer.attention_mask, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + labels = answer_targets, + return_dict = True, + reduction = 'none', + ) + + loss = weights * answer_output.loss + loss = loss.sum()/image.size(0) + + return loss + + + else: + question_output = self.text_encoder(question.input_ids, + attention_mask = question.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True) + + if inference=='generate': + num_beams = 3 + question_states = question_output.last_hidden_state.repeat_interleave(num_beams,dim=0) + question_atts = torch.ones(question_states.size()[:-1],dtype=torch.long).to(question_states.device) + model_kwargs = {"encoder_hidden_states": question_states, "encoder_attention_mask":question_atts} + + bos_ids = torch.full((image.size(0),1),fill_value=self.tokenizer.bos_token_id,device=image.device) + + outputs = self.text_decoder.generate(input_ids=bos_ids, + max_length=10, + min_length=1, + num_beams=num_beams, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + **model_kwargs) + + answers = [] + for output in outputs: + answer = self.tokenizer.decode(output, skip_special_tokens=True) + answers.append(answer) + return answers + + elif inference=='rank': + max_ids = self.rank_answer(question_output.last_hidden_state, question.attention_mask, + answer.input_ids, answer.attention_mask, k_test) + return max_ids + + + + def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k): + + num_ques = question_states.size(0) + start_ids = answer_ids[0,0].repeat(num_ques,1) # bos token + + start_output = self.text_decoder(start_ids, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + return_dict = True, + reduction = 'none') + logits = start_output.logits[:,0,:] # first token's logit + + # topk_probs: top-k probability + # topk_ids: [num_question, k] + answer_first_token = answer_ids[:,1] + prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token) + topk_probs, topk_ids = prob_first_token.topk(k,dim=1) + + # answer input: [num_question*k, answer_len] + input_ids = [] + input_atts = [] + for b, topk_id in enumerate(topk_ids): + input_ids.append(answer_ids.index_select(dim=0, index=topk_id)) + input_atts.append(answer_atts.index_select(dim=0, index=topk_id)) + input_ids = torch.cat(input_ids,dim=0) + input_atts = torch.cat(input_atts,dim=0) + + targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100) + + # repeat encoder's output for top-k answers + question_states = tile(question_states, 0, k) + question_atts = tile(question_atts, 0, k) + + output = self.text_decoder(input_ids, + attention_mask = input_atts, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + labels = targets_ids, + return_dict = True, + reduction = 'none') + + log_probs_sum = -output.loss + log_probs_sum = log_probs_sum.view(num_ques,k) + + max_topk_ids = log_probs_sum.argmax(dim=1) + max_ids = topk_ids[max_topk_ids>=0,max_topk_ids] + + return max_ids + + +def blip_vqa(pretrained='',**kwargs): + model = BLIP_VQA(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) +# assert(len(msg.missing_keys)==0) + return model + + +def tile(x, dim, n_tile): + init_dim = x.size(dim) + repeat_idx = [1] * x.dim() + repeat_idx[dim] = n_tile + x = x.repeat(*(repeat_idx)) + order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])) + return torch.index_select(x, dim, order_index.to(x.device)) + + diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module_license.txt b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module_license.txt new file mode 100644 index 0000000000000000000000000000000000000000..a63e87f4e1e90c96861648a16a7304d97d3c3f7b --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_module_license.txt @@ -0,0 +1,12 @@ +Copyright (c) 2022, Salesforce.com, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_vit.py b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..cec3d8e08ed4451d65392feb2e9f4848d1ef3899 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/modules/BLIP/blip_vit.py @@ -0,0 +1,305 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on timm code base + * https://github.com/rwightman/pytorch-image-models/tree/master/timm +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.vision_transformer import _cfg, PatchEmbed +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_, DropPath +from timm.models.helpers import named_apply, adapt_input_conv + +from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.attn_gradients = None + self.attention_map = None + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def forward(self, x, register_hook=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + if register_hook: + self.save_attention_map(attn) + attn.register_hook(self.save_attn_gradients) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if use_grad_checkpointing: + self.attn = checkpoint_wrapper(self.attn) + self.mlp = checkpoint_wrapper(self.mlp) + + def forward(self, x, register_hook=False): + x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, + use_grad_checkpointing=False, ckpt_layer=0): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward(self, x, register_blk=-1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + self.pos_embed[:,:x.size(1),:] + x = self.pos_drop(x) + + for i,blk in enumerate(self.blocks): + x = blk(x, register_blk==i) + x = self.norm(x) + + return x + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) +# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: +# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) +# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) +# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: +# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) +# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): + # interpolate position embedding + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = visual_encoder.patch_embed.num_patches + num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + + if orig_size!=new_size: + # class_token and dist_token are kept unchanged + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) + + return new_pos_embed + else: + return pos_embed_checkpoint \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/modules/__init__.py b/custom_nodes/was-node-suite-comfyui/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/was-node-suite-comfyui/requirements.txt b/custom_nodes/was-node-suite-comfyui/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae843ca1876947d2cf69aa2ebd60f7445566cd38 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/requirements.txt @@ -0,0 +1,20 @@ +cmake +fairscale>=0.4.4 +git+https://github.com/WASasquatch/img2texture.git +git+https://github.com/WASasquatch/cstr +gitpython +imageio +joblib +matplotlib +numba +numpy>=1.18.5, <1.25.0 +opencv-python-headless[ffmpeg]<=4.7.0.72 +pilgram +git+https://github.com/WASasquatch/ffmpy.git +rembg +scikit-image==0.20.0 +scikit-learn +scipy +timm>=0.4.12 +tqdm +transformers==4.26.1 diff --git a/custom_nodes/was-node-suite-comfyui/res/font.ttf b/custom_nodes/was-node-suite-comfyui/res/font.ttf new file mode 100644 index 0000000000000000000000000000000000000000..5eaf3c73aa73f0a84d1fd524470951dcb1a47f9a Binary files /dev/null and b/custom_nodes/was-node-suite-comfyui/res/font.ttf differ diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_eye.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_eye.xml new file mode 100644 index 0000000000000000000000000000000000000000..b21e3b93d74b5130b5a1323be9fc46017ab0e8c7 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_eye.xml @@ -0,0 +1,12213 @@ + + + +BOOST + HAAR + 20 + 20 + + 93 + + 0 + 24 + + <_> + 6 + -1.4562760591506958e+00 + + <_> + + 0 -1 0 1.2963959574699402e-01 + + -7.7304208278656006e-01 6.8350148200988770e-01 + <_> + + 0 -1 1 -4.6326808631420135e-02 + + 5.7352751493453979e-01 -4.9097689986228943e-01 + <_> + + 0 -1 2 -1.6173090785741806e-02 + + 6.0254341363906860e-01 -3.1610709428787231e-01 + <_> + + 0 -1 3 -4.5828841626644135e-02 + + 6.4177548885345459e-01 -1.5545040369033813e-01 + <_> + + 0 -1 4 -5.3759619593620300e-02 + + 5.4219317436218262e-01 -2.0480829477310181e-01 + <_> + + 0 -1 5 3.4171190112829208e-02 + + -2.3388190567493439e-01 4.8410901427268982e-01 + <_> + 12 + -1.2550230026245117e+00 + + <_> + + 0 -1 6 -2.1727620065212250e-01 + + 7.1098899841308594e-01 -5.9360730648040771e-01 + <_> + + 0 -1 7 1.2071969918906689e-02 + + -2.8240481019020081e-01 5.9013551473617554e-01 + <_> + + 0 -1 8 -1.7854139208793640e-02 + + 5.3137522935867310e-01 -2.2758960723876953e-01 + <_> + + 0 -1 9 2.2333610802888870e-02 + + -1.7556099593639374e-01 6.3356137275695801e-01 + <_> + + 0 -1 10 -9.1420017182826996e-02 + + 6.1563092470169067e-01 -1.6899530589580536e-01 + <_> + + 0 -1 11 2.8973650187253952e-02 + + -1.2250079959630966e-01 7.4401170015335083e-01 + <_> + + 0 -1 12 7.8203463926911354e-03 + + 1.6974370181560516e-01 -6.5441650152206421e-01 + <_> + + 0 -1 13 2.0340489223599434e-02 + + -1.2556649744510651e-01 8.2710450887680054e-01 + <_> + + 0 -1 14 -1.1926149949431419e-02 + + 3.8605681061744690e-01 -2.0992340147495270e-01 + <_> + + 0 -1 15 -9.7281101625412703e-04 + + -6.3761192560195923e-01 1.2952390313148499e-01 + <_> + + 0 -1 16 1.8322050891583785e-05 + + -3.4631478786468506e-01 2.2924269735813141e-01 + <_> + + 0 -1 17 -8.0854417756199837e-03 + + -6.3665801286697388e-01 1.3078659772872925e-01 + <_> + 9 + -1.3728189468383789e+00 + + <_> + + 0 -1 18 -1.1812269687652588e-01 + + 6.7844521999359131e-01 -5.0045782327651978e-01 + <_> + + 0 -1 19 -3.4332759678363800e-02 + + 6.7186361551284790e-01 -3.5744878649711609e-01 + <_> + + 0 -1 20 -2.1530799567699432e-02 + + 7.2220700979232788e-01 -1.8192419409751892e-01 + <_> + + 0 -1 21 -2.1909970790147781e-02 + + 6.6529387235641479e-01 -2.7510228753089905e-01 + <_> + + 0 -1 22 -2.8713539242744446e-02 + + 6.9955700635910034e-01 -1.9615580141544342e-01 + <_> + + 0 -1 23 -1.1467480100691319e-02 + + 5.9267348051071167e-01 -2.2097350656986237e-01 + <_> + + 0 -1 24 -2.2611169144511223e-02 + + 3.4483069181442261e-01 -3.8379558920860291e-01 + <_> + + 0 -1 25 -1.9308089977130294e-03 + + -7.9445719718933105e-01 1.5628659725189209e-01 + <_> + + 0 -1 26 5.6419910833938047e-05 + + -3.0896010994911194e-01 3.5431089997291565e-01 + <_> + 16 + -1.2879480123519897e+00 + + <_> + + 0 -1 27 1.9886520504951477e-01 + + -5.2860701084136963e-01 3.5536721348762512e-01 + <_> + + 0 -1 28 -3.6008939146995544e-02 + + 4.2109689116477966e-01 -3.9348980784416199e-01 + <_> + + 0 -1 29 -7.7569849789142609e-02 + + 4.7991541028022766e-01 -2.5122168660163879e-01 + <_> + + 0 -1 30 8.2630853285081685e-05 + + -3.8475489616394043e-01 3.1849220395088196e-01 + <_> + + 0 -1 31 3.2773229759186506e-04 + + -2.6427319645881653e-01 3.2547241449356079e-01 + <_> + + 0 -1 32 -1.8574850633740425e-02 + + 4.6736589074134827e-01 -1.5067270398139954e-01 + <_> + + 0 -1 33 -7.0008762122597545e-05 + + 2.9313150048255920e-01 -2.5365099310874939e-01 + <_> + + 0 -1 34 -1.8552130088210106e-02 + + 4.6273660659790039e-01 -1.3148050010204315e-01 + <_> + + 0 -1 35 -1.3030420057475567e-02 + + 4.1627219319343567e-01 -1.7751489579677582e-01 + <_> + + 0 -1 36 6.5694141085259616e-05 + + -2.8035101294517517e-01 2.6680740714073181e-01 + <_> + + 0 -1 37 1.7005260451696813e-04 + + -2.7027249336242676e-01 2.3981650173664093e-01 + <_> + + 0 -1 38 -3.3129199873656034e-03 + + 4.4411438703536987e-01 -1.4428889751434326e-01 + <_> + + 0 -1 39 1.7583490116521716e-03 + + -1.6126190125942230e-01 4.2940768599510193e-01 + <_> + + 0 -1 40 -2.5194749236106873e-02 + + 4.0687298774719238e-01 -1.8202580511569977e-01 + <_> + + 0 -1 41 1.4031709870323539e-03 + + 8.4759786725044250e-02 -8.0018568038940430e-01 + <_> + + 0 -1 42 -7.3991729877889156e-03 + + 5.5766099691390991e-01 -1.1843159794807434e-01 + <_> + 23 + -1.2179850339889526e+00 + + <_> + + 0 -1 43 -2.9943080618977547e-02 + + 3.5810810327529907e-01 -3.8487631082534790e-01 + <_> + + 0 -1 44 -1.2567380070686340e-01 + + 3.9316931366920471e-01 -3.0012258887290955e-01 + <_> + + 0 -1 45 5.3635272197425365e-03 + + -4.3908619880676270e-01 1.9257010519504547e-01 + <_> + + 0 -1 46 -8.0971820279955864e-03 + + 3.9906668663024902e-01 -2.3407870531082153e-01 + <_> + + 0 -1 47 -1.6597909852862358e-02 + + 4.2095288634300232e-01 -2.2674840688705444e-01 + <_> + + 0 -1 48 -2.0199299324303865e-03 + + -7.4156731367111206e-01 1.2601189315319061e-01 + <_> + + 0 -1 49 -1.5202340437099338e-03 + + -7.6154601573944092e-01 8.6373612284660339e-02 + <_> + + 0 -1 50 -4.9663940444588661e-03 + + 4.2182239890098572e-01 -1.7904919385910034e-01 + <_> + + 0 -1 51 -1.9207600504159927e-02 + + 4.6894899010658264e-01 -1.4378750324249268e-01 + <_> + + 0 -1 52 -1.2222680263221264e-02 + + 3.2842078804969788e-01 -2.1802149713039398e-01 + <_> + + 0 -1 53 5.7548668235540390e-02 + + -3.6768808960914612e-01 2.4357110261917114e-01 + <_> + + 0 -1 54 -9.5794079825282097e-03 + + -7.2245067358016968e-01 6.3664563000202179e-02 + <_> + + 0 -1 55 -2.9545740690082312e-03 + + 3.5846439003944397e-01 -1.6696329414844513e-01 + <_> + + 0 -1 56 -4.2017991654574871e-03 + + 3.9094808697700500e-01 -1.2041790038347244e-01 + <_> + + 0 -1 57 -1.3624990358948708e-02 + + -5.8767718076705933e-01 8.8404729962348938e-02 + <_> + + 0 -1 58 6.2853112467564642e-05 + + -2.6348459720611572e-01 2.1419279277324677e-01 + <_> + + 0 -1 59 -2.6782939676195383e-03 + + -7.8390169143676758e-01 8.0526962876319885e-02 + <_> + + 0 -1 60 -7.0597179234027863e-02 + + 4.1469261050224304e-01 -1.3989959657192230e-01 + <_> + + 0 -1 61 9.2093646526336670e-02 + + -1.3055180013179779e-01 5.0435781478881836e-01 + <_> + + 0 -1 62 -8.8004386052489281e-03 + + 3.6609750986099243e-01 -1.4036649465560913e-01 + <_> + + 0 -1 63 7.5080977694597095e-05 + + -2.9704439640045166e-01 2.0702940225601196e-01 + <_> + + 0 -1 64 -2.9870450962334871e-03 + + 3.5615700483322144e-01 -1.5445969998836517e-01 + <_> + + 0 -1 65 -2.6441509835422039e-03 + + -5.4353517293930054e-01 1.0295110195875168e-01 + <_> + 27 + -1.2905240058898926e+00 + + <_> + + 0 -1 66 -4.7862470149993896e-02 + + 4.1528239846229553e-01 -3.4185820817947388e-01 + <_> + + 0 -1 67 8.7350532412528992e-02 + + -3.8749781250953674e-01 2.4204200506210327e-01 + <_> + + 0 -1 68 -1.6849499195814133e-02 + + 5.3082478046417236e-01 -1.7282910645008087e-01 + <_> + + 0 -1 69 -2.8870029374957085e-02 + + 3.5843509435653687e-01 -2.2402590513229370e-01 + <_> + + 0 -1 70 2.5679389946162701e-03 + + 1.4990499615669250e-01 -6.5609407424926758e-01 + <_> + + 0 -1 71 -2.4116659536957741e-02 + + 5.5889678001403809e-01 -1.4810280501842499e-01 + <_> + + 0 -1 72 -3.2826658338308334e-02 + + 4.6468681097030640e-01 -1.0785529762506485e-01 + <_> + + 0 -1 73 -1.5233060345053673e-02 + + -7.3954427242279053e-01 5.6236881762742996e-02 + <_> + + 0 -1 74 -3.0209511169232428e-04 + + -4.5548820495605469e-01 9.7069837152957916e-02 + <_> + + 0 -1 75 7.5365108205005527e-04 + + 9.5147296786308289e-02 -5.4895019531250000e-01 + <_> + + 0 -1 76 -1.0638950392603874e-02 + + 4.0912970900535583e-01 -1.2308409810066223e-01 + <_> + + 0 -1 77 -7.5217830017209053e-03 + + 4.0289148688316345e-01 -1.6048780083656311e-01 + <_> + + 0 -1 78 -1.0677099972963333e-01 + + 6.1759322881698608e-01 -7.3091186583042145e-02 + <_> + + 0 -1 79 1.6256919130682945e-02 + + -1.3103680312633514e-01 3.7453651428222656e-01 + <_> + + 0 -1 80 -2.0679360255599022e-02 + + -7.1402907371520996e-01 5.2390009164810181e-02 + <_> + + 0 -1 81 1.7052369192242622e-02 + + 1.2822860479354858e-01 -3.1080681085586548e-01 + <_> + + 0 -1 82 -5.7122060097754002e-03 + + -6.0556507110595703e-01 8.1884756684303284e-02 + <_> + + 0 -1 83 2.0851430235779844e-05 + + -2.6812988519668579e-01 1.4453840255737305e-01 + <_> + + 0 -1 84 7.9284431412816048e-03 + + -7.8795351088047028e-02 5.6762582063674927e-01 + <_> + + 0 -1 85 -2.5217379443347454e-03 + + 3.7068629264831543e-01 -1.3620570302009583e-01 + <_> + + 0 -1 86 -2.2426199167966843e-02 + + -6.8704998493194580e-01 5.1062859594821930e-02 + <_> + + 0 -1 87 -7.6451441273093224e-03 + + 2.3492220044136047e-01 -1.7905959486961365e-01 + <_> + + 0 -1 88 -1.1175329564139247e-03 + + -5.9869050979614258e-01 7.4324436485767365e-02 + <_> + + 0 -1 89 1.9212789833545685e-02 + + -1.5702550113201141e-01 2.9737469553947449e-01 + <_> + + 0 -1 90 5.6293429806828499e-03 + + -9.9769018590450287e-02 4.2130270600318909e-01 + <_> + + 0 -1 91 -9.5671862363815308e-03 + + -6.0858798027038574e-01 7.3506258428096771e-02 + <_> + + 0 -1 92 1.1217960156500340e-02 + + -1.0320810228586197e-01 4.1909849643707275e-01 + <_> + 28 + -1.1600480079650879e+00 + + <_> + + 0 -1 93 -1.7486440017819405e-02 + + 3.1307280063629150e-01 -3.3681181073188782e-01 + <_> + + 0 -1 94 3.0714649707078934e-02 + + -1.8766190111637115e-01 5.3780800104141235e-01 + <_> + + 0 -1 95 -2.2188719362020493e-02 + + 3.6637881398200989e-01 -1.6124810278415680e-01 + <_> + + 0 -1 96 -5.0700771680567414e-05 + + 2.1245710551738739e-01 -2.8444620966911316e-01 + <_> + + 0 -1 97 -7.0170420221984386e-03 + + 3.9543110132217407e-01 -1.3173590600490570e-01 + <_> + + 0 -1 98 -6.8563609384000301e-03 + + 3.0373859405517578e-01 -2.0657819509506226e-01 + <_> + + 0 -1 99 -1.4129259623587132e-02 + + -7.6503008604049683e-01 9.8213188350200653e-02 + <_> + + 0 -1 100 -4.7915481030941010e-02 + + 4.8307389020919800e-01 -1.3006809353828430e-01 + <_> + + 0 -1 101 4.7032979637151584e-05 + + -2.5216570496559143e-01 2.4386680126190186e-01 + <_> + + 0 -1 102 1.0221180273219943e-03 + + 6.8857602775096893e-02 -6.5861141681671143e-01 + <_> + + 0 -1 103 -2.6056109927594662e-03 + + 4.2942029237747192e-01 -1.3022460043430328e-01 + <_> + + 0 -1 104 5.4505340813193470e-05 + + -1.9288620352745056e-01 2.8958499431610107e-01 + <_> + + 0 -1 105 -6.6721157054416835e-05 + + 3.0290710926055908e-01 -1.9854369759559631e-01 + <_> + + 0 -1 106 2.6281431317329407e-01 + + -2.3293940722942352e-01 2.3692460358142853e-01 + <_> + + 0 -1 107 -2.3569669574499130e-02 + + 1.9401040673255920e-01 -2.8484618663787842e-01 + <_> + + 0 -1 108 -3.9120172150433064e-03 + + 5.5378979444503784e-01 -9.5665678381919861e-02 + <_> + + 0 -1 109 5.0788799853762612e-05 + + -2.3912659287452698e-01 2.1799489855766296e-01 + <_> + + 0 -1 110 -7.8732017427682877e-03 + + 4.0697428584098816e-01 -1.2768040597438812e-01 + <_> + + 0 -1 111 -1.6778609715402126e-03 + + -5.7744657993316650e-01 9.7324788570404053e-02 + <_> + + 0 -1 112 -2.6832430739887059e-04 + + 2.9021880030632019e-01 -1.6831269860267639e-01 + <_> + + 0 -1 113 7.8687182394787669e-05 + + -1.9551570713520050e-01 2.7720969915390015e-01 + <_> + + 0 -1 114 1.2953500263392925e-02 + + -9.6838317811489105e-02 4.0323871374130249e-01 + <_> + + 0 -1 115 -1.3043959625065327e-02 + + 4.7198569774627686e-01 -8.9287549257278442e-02 + <_> + + 0 -1 116 3.0261781066656113e-03 + + -1.3623380661010742e-01 3.0686271190643311e-01 + <_> + + 0 -1 117 -6.0438038781285286e-03 + + -7.7954101562500000e-01 5.7316310703754425e-02 + <_> + + 0 -1 118 -2.2507249377667904e-03 + + 3.0877059698104858e-01 -1.5006309747695923e-01 + <_> + + 0 -1 119 1.5826810151338577e-02 + + 6.4551889896392822e-02 -7.2455567121505737e-01 + <_> + + 0 -1 120 6.5864507632795721e-05 + + -1.7598840594291687e-01 2.3210389912128448e-01 + <_> + 36 + -1.2257250547409058e+00 + + <_> + + 0 -1 121 -2.7854869142174721e-02 + + 4.5518448948860168e-01 -1.8099910020828247e-01 + <_> + + 0 -1 122 1.2895040214061737e-01 + + -5.2565532922744751e-01 1.6188900172710419e-01 + <_> + + 0 -1 123 2.4403180927038193e-02 + + -1.4974960684776306e-01 4.2357379198074341e-01 + <_> + + 0 -1 124 -2.4458570405840874e-03 + + 3.2948669791221619e-01 -1.7447690665721893e-01 + <_> + + 0 -1 125 -3.5336529836058617e-03 + + 4.7426640987396240e-01 -7.3618359863758087e-02 + <_> + + 0 -1 126 5.1358150813030079e-05 + + -3.0421930551528931e-01 1.5633270144462585e-01 + <_> + + 0 -1 127 -1.6225680708885193e-02 + + 2.3002180457115173e-01 -2.0359820127487183e-01 + <_> + + 0 -1 128 -4.6007009223103523e-03 + + 4.0459269285202026e-01 -1.3485440611839294e-01 + <_> + + 0 -1 129 -2.1928999572992325e-02 + + -6.8724489212036133e-01 8.0684266984462738e-02 + <_> + + 0 -1 130 -2.8971210122108459e-03 + + -6.9619607925415039e-01 4.8545219004154205e-02 + <_> + + 0 -1 131 -4.4074649922549725e-03 + + 2.5166261196136475e-01 -1.6236649453639984e-01 + <_> + + 0 -1 132 2.8437169268727303e-02 + + 6.0394261032342911e-02 -6.6744458675384521e-01 + <_> + + 0 -1 133 8.3212882280349731e-02 + + 6.4357921481132507e-02 -5.3626042604446411e-01 + <_> + + 0 -1 134 -1.2419329956173897e-02 + + -7.0816862583160400e-01 5.7526610791683197e-02 + <_> + + 0 -1 135 -4.6992599964141846e-03 + + 5.1254332065582275e-01 -8.7350800633430481e-02 + <_> + + 0 -1 136 -7.8025809489190578e-04 + + 2.6687660813331604e-01 -1.7961509525775909e-01 + <_> + + 0 -1 137 -1.9724339246749878e-02 + + -6.7563730478286743e-01 7.2941906750202179e-02 + <_> + + 0 -1 138 1.0269250487908721e-03 + + 5.3919319063425064e-02 -5.5540180206298828e-01 + <_> + + 0 -1 139 -2.5957189500331879e-02 + + 5.6362527608871460e-01 -7.1898393332958221e-02 + <_> + + 0 -1 140 -1.2552699772641063e-03 + + -5.0346630811691284e-01 8.9691452682018280e-02 + <_> + + 0 -1 141 -4.9970578402280807e-02 + + 1.7685119807720184e-01 -2.2301959991455078e-01 + <_> + + 0 -1 142 -2.9899610672146082e-03 + + 3.9122420549392700e-01 -1.0149750113487244e-01 + <_> + + 0 -1 143 4.8546842299401760e-03 + + -1.1770179867744446e-01 4.2190939188003540e-01 + <_> + + 0 -1 144 1.0448860120959580e-04 + + -1.7333979904651642e-01 2.2344440221786499e-01 + <_> + + 0 -1 145 5.9689260524464771e-05 + + -2.3409630358219147e-01 1.6558240354061127e-01 + <_> + + 0 -1 146 -1.3423919677734375e-02 + + 4.3023818731307983e-01 -9.9723652005195618e-02 + <_> + + 0 -1 147 2.2581999655812979e-03 + + 7.2720989584922791e-02 -5.7501018047332764e-01 + <_> + + 0 -1 148 -1.2546280398964882e-02 + + 3.6184579133987427e-01 -1.1457010358572006e-01 + <_> + + 0 -1 149 -2.8705769218504429e-03 + + 2.8210538625717163e-01 -1.2367550283670425e-01 + <_> + + 0 -1 150 1.9785640761256218e-02 + + 4.7876749187707901e-02 -8.0666238069534302e-01 + <_> + + 0 -1 151 4.7588930465281010e-03 + + -1.0925389826297760e-01 3.3746978640556335e-01 + <_> + + 0 -1 152 -6.9974269717931747e-03 + + -8.0295938253402710e-01 4.5706700533628464e-02 + <_> + + 0 -1 153 -1.3033480383455753e-02 + + 1.8680439889431000e-01 -1.7688910663127899e-01 + <_> + + 0 -1 154 -1.3742579612880945e-03 + + 2.7725479006767273e-01 -1.2809009850025177e-01 + <_> + + 0 -1 155 2.7657810132950544e-03 + + 9.0758942067623138e-02 -4.2594739794731140e-01 + <_> + + 0 -1 156 2.8941841446794569e-04 + + -3.8816329836845398e-01 8.9267797768115997e-02 + <_> + 47 + -1.2863140106201172e+00 + + <_> + + 0 -1 157 -1.4469229616224766e-02 + + 3.7507829070091248e-01 -2.4928289651870728e-01 + <_> + + 0 -1 158 -1.3317629694938660e-01 + + 3.0166378617286682e-01 -2.2414070367813110e-01 + <_> + + 0 -1 159 -1.0132160037755966e-02 + + 3.6985591053962708e-01 -1.7850010097026825e-01 + <_> + + 0 -1 160 -7.8511182218790054e-03 + + 4.6086761355400085e-01 -1.2931390106678009e-01 + <_> + + 0 -1 161 -1.4295839704573154e-02 + + 4.4841429591178894e-01 -1.0226240009069443e-01 + <_> + + 0 -1 162 -5.9606940485537052e-03 + + 2.7927988767623901e-01 -1.5323829650878906e-01 + <_> + + 0 -1 163 1.0932769626379013e-02 + + -1.5141740441322327e-01 3.9889648556709290e-01 + <_> + + 0 -1 164 5.0430990086169913e-05 + + -2.2681570053100586e-01 2.1644389629364014e-01 + <_> + + 0 -1 165 -5.8431681245565414e-03 + + 4.5420148968696594e-01 -1.2587159872055054e-01 + <_> + + 0 -1 166 -2.2346209734678268e-02 + + -6.2690192461013794e-01 8.2403123378753662e-02 + <_> + + 0 -1 167 -4.8836669884622097e-03 + + 2.6359251141548157e-01 -1.4686630666255951e-01 + <_> + + 0 -1 168 7.5506002758629620e-05 + + -2.4507020413875580e-01 1.6678880155086517e-01 + <_> + + 0 -1 169 -4.9026997294276953e-04 + + -4.2649960517883301e-01 8.9973561465740204e-02 + <_> + + 0 -1 170 1.4861579984426498e-03 + + -1.2040250003337860e-01 3.0097651481628418e-01 + <_> + + 0 -1 171 -1.1988339945673943e-02 + + 2.7852478623390198e-01 -1.2244340032339096e-01 + <_> + + 0 -1 172 1.0502239689230919e-02 + + 4.0452759712934494e-02 -7.4050408601760864e-01 + <_> + + 0 -1 173 -3.0963009223341942e-02 + + -6.2842690944671631e-01 4.8013761639595032e-02 + <_> + + 0 -1 174 1.1414520442485809e-02 + + 3.9405211806297302e-02 -7.1674120426177979e-01 + <_> + + 0 -1 175 -1.2337000109255314e-02 + + 1.9941329956054688e-01 -1.9274300336837769e-01 + <_> + + 0 -1 176 -5.9942267835140228e-03 + + 5.1318162679672241e-01 -6.1658058315515518e-02 + <_> + + 0 -1 177 -1.1923230485990644e-03 + + -7.2605299949645996e-01 5.0652720034122467e-02 + <_> + + 0 -1 178 -7.4582789093255997e-03 + + 2.9603078961372375e-01 -1.1754789948463440e-01 + <_> + + 0 -1 179 2.7877509128302336e-03 + + 4.5068711042404175e-02 -6.9535410404205322e-01 + <_> + + 0 -1 180 -2.2503209766000509e-04 + + 2.0047250390052795e-01 -1.5775249898433685e-01 + <_> + + 0 -1 181 -5.0367889925837517e-03 + + 2.9299819469451904e-01 -1.1700499802827835e-01 + <_> + + 0 -1 182 7.4742160737514496e-02 + + -1.1392319947481155e-01 3.0256620049476624e-01 + <_> + + 0 -1 183 2.0255519077181816e-02 + + -1.0515890270471573e-01 4.0670460462570190e-01 + <_> + + 0 -1 184 4.4214509427547455e-02 + + -2.7631640434265137e-01 1.2363869696855545e-01 + <_> + + 0 -1 185 -8.7259558495134115e-04 + + 2.4355030059814453e-01 -1.3300949335098267e-01 + <_> + + 0 -1 186 -2.4453739169985056e-03 + + -5.3866171836853027e-01 6.2510646879673004e-02 + <_> + + 0 -1 187 8.2725353422574699e-05 + + -2.0772209763526917e-01 1.6270439326763153e-01 + <_> + + 0 -1 188 -3.6627110093832016e-02 + + 3.6568409204483032e-01 -9.0330280363559723e-02 + <_> + + 0 -1 189 3.0996399000287056e-03 + + -1.3183020055294037e-01 2.5354298949241638e-01 + <_> + + 0 -1 190 -2.4709280114620924e-03 + + -5.6853497028350830e-01 5.3505431860685349e-02 + <_> + + 0 -1 191 -1.4114670455455780e-02 + + -4.8599010705947876e-01 5.8485250920057297e-02 + <_> + + 0 -1 192 8.4537261864170432e-04 + + -8.0093637108802795e-02 4.0265649557113647e-01 + <_> + + 0 -1 193 -7.1098632179200649e-03 + + 4.4703239202499390e-01 -6.2947437167167664e-02 + <_> + + 0 -1 194 -1.9125960767269135e-02 + + -6.6422867774963379e-01 4.9822770059108734e-02 + <_> + + 0 -1 195 -5.0773010589182377e-03 + + 1.7379400134086609e-01 -1.6850599646568298e-01 + <_> + + 0 -1 196 -2.9198289848864079e-03 + + -6.0110282897949219e-01 5.7427939027547836e-02 + <_> + + 0 -1 197 -2.4902150034904480e-02 + + 2.3397980630397797e-01 -1.1818459630012512e-01 + <_> + + 0 -1 198 2.0147779956459999e-02 + + -8.9459821581840515e-02 3.6024400591850281e-01 + <_> + + 0 -1 199 1.7597640398889780e-03 + + 4.9458440393209457e-02 -6.3102620840072632e-01 + <_> + + 0 -1 200 1.3812039978802204e-03 + + -1.5218059718608856e-01 1.8971739709377289e-01 + <_> + + 0 -1 201 -1.0904540307819843e-02 + + -5.8097380399703979e-01 4.4862728565931320e-02 + <_> + + 0 -1 202 7.5157178798690438e-05 + + -1.3777349889278412e-01 1.9543160498142242e-01 + <_> + + 0 -1 203 3.8649770431220531e-03 + + -1.0302229970693588e-01 2.5374969840049744e-01 + <_> + 48 + -1.1189440488815308e+00 + + <_> + + 0 -1 204 -1.0215889662504196e-01 + + 4.1681259870529175e-01 -1.6655629873275757e-01 + <_> + + 0 -1 205 -5.1939819008111954e-02 + + 3.3023950457572937e-01 -2.0715710520744324e-01 + <_> + + 0 -1 206 -4.2717780917882919e-02 + + 2.6093730330467224e-01 -1.6013890504837036e-01 + <_> + + 0 -1 207 4.3890418601222336e-04 + + -3.4750530123710632e-01 1.3918919861316681e-01 + <_> + + 0 -1 208 2.4264389649033546e-02 + + -4.2552059888839722e-01 1.3578380644321442e-01 + <_> + + 0 -1 209 -2.3820599541068077e-02 + + 3.1749808788299561e-01 -1.6652040183544159e-01 + <_> + + 0 -1 210 -7.0518180727958679e-03 + + 3.0947178602218628e-01 -1.3338300585746765e-01 + <_> + + 0 -1 211 -6.8517157342284918e-04 + + -6.0082262754440308e-01 8.7747000157833099e-02 + <_> + + 0 -1 212 5.3705149330198765e-03 + + -1.2311449646949768e-01 3.8333550095558167e-01 + <_> + + 0 -1 213 -1.3403539545834064e-02 + + 3.3877369761466980e-01 -1.0140489786863327e-01 + <_> + + 0 -1 214 -6.6856360062956810e-03 + + -6.1193597316741943e-01 4.7740221023559570e-02 + <_> + + 0 -1 215 -4.2887418530881405e-03 + + 2.5275790691375732e-01 -1.4434510469436646e-01 + <_> + + 0 -1 216 -1.0876749642193317e-02 + + 5.4775732755661011e-01 -5.9455480426549911e-02 + <_> + + 0 -1 217 3.7882640026509762e-04 + + 8.3410300314426422e-02 -4.4226369261741638e-01 + <_> + + 0 -1 218 -2.4550149682909250e-03 + + 2.3330999910831451e-01 -1.3964480161666870e-01 + <_> + + 0 -1 219 1.2721839593723416e-03 + + 6.0480289161205292e-02 -4.9456089735031128e-01 + <_> + + 0 -1 220 -4.8933159559965134e-03 + + -6.6833269596099854e-01 4.6218499541282654e-02 + <_> + + 0 -1 221 2.6449989527463913e-02 + + -7.3235362768173218e-02 4.4425961375236511e-01 + <_> + + 0 -1 222 -3.3706070389598608e-03 + + -4.2464339733123779e-01 6.8676561117172241e-02 + <_> + + 0 -1 223 -2.9559480026364326e-03 + + 1.6218039393424988e-01 -1.8222999572753906e-01 + <_> + + 0 -1 224 3.0619909986853600e-02 + + -5.8643341064453125e-02 5.3263628482818604e-01 + <_> + + 0 -1 225 -9.5765907317399979e-03 + + -6.0562682151794434e-01 5.3345989435911179e-02 + <_> + + 0 -1 226 6.6372493165545166e-05 + + -1.6680839657783508e-01 1.9284160435199738e-01 + <_> + + 0 -1 227 5.0975950434803963e-03 + + 4.4119510799646378e-02 -5.7458841800689697e-01 + <_> + + 0 -1 228 3.7112718564458191e-04 + + -1.1086399853229523e-01 2.3105390369892120e-01 + <_> + + 0 -1 229 -8.6607588455080986e-03 + + 4.0456289052963257e-01 -6.2446091324090958e-02 + <_> + + 0 -1 230 8.7489158613607287e-04 + + 6.4875148236751556e-02 -4.4871041178703308e-01 + <_> + + 0 -1 231 1.1120870476588607e-03 + + -9.3861460685729980e-02 3.0453911423683167e-01 + <_> + + 0 -1 232 -2.3837819695472717e-02 + + -5.8887428045272827e-01 4.6659421175718307e-02 + <_> + + 0 -1 233 2.2272899514064193e-04 + + -1.4898599684238434e-01 1.7701950669288635e-01 + <_> + + 0 -1 234 2.4467470124363899e-02 + + -5.5789601057767868e-02 4.9208301305770874e-01 + <_> + + 0 -1 235 -1.4239320158958435e-01 + + 1.5192000567913055e-01 -1.8778899312019348e-01 + <_> + + 0 -1 236 -2.0123120397329330e-02 + + 2.1780100464820862e-01 -1.2081900238990784e-01 + <_> + + 0 -1 237 1.1513679783092812e-04 + + -1.6856589913368225e-01 1.6451929509639740e-01 + <_> + + 0 -1 238 -2.7556740678846836e-03 + + -6.9442039728164673e-01 3.9449468255043030e-02 + <_> + + 0 -1 239 -7.5843912782147527e-05 + + 1.8941369652748108e-01 -1.5183840692043304e-01 + <_> + + 0 -1 240 -7.0697711780667305e-03 + + 4.7064599394798279e-01 -5.7927619665861130e-02 + <_> + + 0 -1 241 -3.7393178790807724e-02 + + -7.5892448425292969e-01 3.4116048365831375e-02 + <_> + + 0 -1 242 -1.5995610505342484e-02 + + 3.0670469999313354e-01 -8.7525576353073120e-02 + <_> + + 0 -1 243 -3.1183990649878979e-03 + + 2.6195371150970459e-01 -9.1214887797832489e-02 + <_> + + 0 -1 244 1.0651360498741269e-03 + + -1.7427560687065125e-01 1.5277640521526337e-01 + <_> + + 0 -1 245 -1.6029420075938106e-03 + + 3.5612630844116211e-01 -7.6629996299743652e-02 + <_> + + 0 -1 246 4.3619908392429352e-03 + + 4.9356970936059952e-02 -5.9228771924972534e-01 + <_> + + 0 -1 247 -1.0779909789562225e-02 + + -6.3922178745269775e-01 3.3204540610313416e-02 + <_> + + 0 -1 248 -4.3590869754552841e-03 + + 1.6107389330863953e-01 -1.5221320092678070e-01 + <_> + + 0 -1 249 7.4596069753170013e-03 + + 3.3172961324453354e-02 -7.5007742643356323e-01 + <_> + + 0 -1 250 8.1385448575019836e-03 + + 2.6325279846787453e-02 -7.1731162071228027e-01 + <_> + + 0 -1 251 -3.3338490873575211e-02 + + 3.3536610007286072e-01 -7.0803590118885040e-02 + <_> + 55 + -1.1418989896774292e+00 + + <_> + + 0 -1 252 1.9553979858756065e-02 + + -1.0439720004796982e-01 5.3128951787948608e-01 + <_> + + 0 -1 253 2.2122919559478760e-02 + + -2.4747270345687866e-01 2.0847250521183014e-01 + <_> + + 0 -1 254 -4.1829389519989491e-03 + + 3.8289439678192139e-01 -1.4711579680442810e-01 + <_> + + 0 -1 255 -8.6381728760898113e-04 + + -6.2632888555526733e-01 1.1993259936571121e-01 + <_> + + 0 -1 256 7.9958612332120538e-04 + + 9.2573471367359161e-02 -5.5168831348419189e-01 + <_> + + 0 -1 257 9.1527570039033890e-03 + + -7.2929807007312775e-02 5.5512511730194092e-01 + <_> + + 0 -1 258 -3.9388681761920452e-03 + + 2.0196039974689484e-01 -2.0912039279937744e-01 + <_> + + 0 -1 259 1.4613410166930407e-04 + + -2.7861818671226501e-01 1.3817410171031952e-01 + <_> + + 0 -1 260 -3.1691689509898424e-03 + + 3.6685898900032043e-01 -7.6308242976665497e-02 + <_> + + 0 -1 261 -2.2189389914274216e-02 + + 3.9096599817276001e-01 -1.0971540212631226e-01 + <_> + + 0 -1 262 -7.4523608200252056e-03 + + 1.2838590145111084e-01 -2.4159869551658630e-01 + <_> + + 0 -1 263 7.7997002517804503e-04 + + 7.1978069841861725e-02 -4.3976500630378723e-01 + <_> + + 0 -1 264 -4.6783639118075371e-03 + + 2.1569849550724030e-01 -1.4205920696258545e-01 + <_> + + 0 -1 265 -1.5188639983534813e-02 + + 3.6458781361579895e-01 -8.2675926387310028e-02 + <_> + + 0 -1 266 5.0619798712432384e-03 + + -3.4380409121513367e-01 9.2068232595920563e-02 + <_> + + 0 -1 267 -1.7351920250803232e-03 + + -6.1725497245788574e-01 4.9214478582143784e-02 + <_> + + 0 -1 268 -1.2423450127243996e-02 + + -5.8558952808380127e-01 4.6112600713968277e-02 + <_> + + 0 -1 269 -1.3031429611146450e-02 + + -5.9710788726806641e-01 4.0672458708286285e-02 + <_> + + 0 -1 270 -1.2369629694148898e-03 + + -6.8334168195724487e-01 3.3156178891658783e-02 + <_> + + 0 -1 271 6.1022108420729637e-03 + + -9.4729237258434296e-02 3.0102241039276123e-01 + <_> + + 0 -1 272 6.6952849738299847e-04 + + 8.1816866993904114e-02 -3.5196030139923096e-01 + <_> + + 0 -1 273 -1.7970580374822021e-03 + + 2.3718979954719543e-01 -1.1768709868192673e-01 + <_> + + 0 -1 274 -7.1074528386816382e-04 + + -4.4763788580894470e-01 5.7682480663061142e-02 + <_> + + 0 -1 275 -5.9126471169292927e-03 + + 4.3425410985946655e-01 -6.6868573427200317e-02 + <_> + + 0 -1 276 -3.3132149837911129e-03 + + 1.8150010704994202e-01 -1.4180320501327515e-01 + <_> + + 0 -1 277 -6.0814660042524338e-02 + + 4.7221711277961731e-01 -6.1410639435052872e-02 + <_> + + 0 -1 278 -9.6714183688163757e-02 + + 2.7683168649673462e-01 -9.4490036368370056e-02 + <_> + + 0 -1 279 3.9073550142347813e-03 + + -1.2278530001640320e-01 2.1057400107383728e-01 + <_> + + 0 -1 280 -9.0431869029998779e-03 + + 3.5641568899154663e-01 -7.7806226909160614e-02 + <_> + + 0 -1 281 -4.8800031654536724e-03 + + -4.1034790873527527e-01 6.9694377481937408e-02 + <_> + + 0 -1 282 -4.3547428213059902e-03 + + -7.3017889261245728e-01 3.6655150353908539e-02 + <_> + + 0 -1 283 -9.6500627696514130e-03 + + 5.5181127786636353e-01 -5.3168080747127533e-02 + <_> + + 0 -1 284 -1.7397310584783554e-02 + + -5.7084232568740845e-01 5.0214089453220367e-02 + <_> + + 0 -1 285 -6.8304329179227352e-03 + + -4.6180281043052673e-01 5.0202690064907074e-02 + <_> + + 0 -1 286 3.3255619928240776e-04 + + -9.5362730324268341e-02 2.5983759760856628e-01 + <_> + + 0 -1 287 -2.3100529797375202e-03 + + 2.2872470319271088e-01 -1.0533530265092850e-01 + <_> + + 0 -1 288 -7.5426651164889336e-03 + + -5.6990510225296021e-01 4.8863459378480911e-02 + <_> + + 0 -1 289 -5.2723060362040997e-03 + + 3.5145181417465210e-01 -8.2390107214450836e-02 + <_> + + 0 -1 290 -4.8578968271613121e-03 + + -6.0417622327804565e-01 4.4539440423250198e-02 + <_> + + 0 -1 291 1.5867310576140881e-03 + + -1.0340909659862518e-01 2.3282019793987274e-01 + <_> + + 0 -1 292 -4.7427811659872532e-03 + + 2.8490281105041504e-01 -9.8090499639511108e-02 + <_> + + 0 -1 293 -1.3515240279957652e-03 + + 2.3096430301666260e-01 -1.1361840367317200e-01 + <_> + + 0 -1 294 2.2526069078594446e-03 + + 6.4478322863578796e-02 -4.2205891013145447e-01 + <_> + + 0 -1 295 -3.8038659840822220e-04 + + -3.8076201081275940e-01 6.0043290257453918e-02 + <_> + + 0 -1 296 4.9043921753764153e-03 + + -7.6104998588562012e-02 3.3232170343399048e-01 + <_> + + 0 -1 297 -9.0969670563936234e-03 + + 1.4287790656089783e-01 -1.6887800395488739e-01 + <_> + + 0 -1 298 -6.9317929446697235e-03 + + 2.7255409955978394e-01 -9.2879563570022583e-02 + <_> + + 0 -1 299 1.1471060570329428e-03 + + -1.5273059904575348e-01 1.9702400267124176e-01 + <_> + + 0 -1 300 -3.7662889808416367e-02 + + -5.9320437908172607e-01 4.0738601237535477e-02 + <_> + + 0 -1 301 -6.8165571428835392e-03 + + 2.5494089722633362e-01 -9.4081960618495941e-02 + <_> + + 0 -1 302 6.6205562325194478e-04 + + 4.6795718371868134e-02 -4.8454371094703674e-01 + <_> + + 0 -1 303 -4.2202551849186420e-03 + + 2.4682149291038513e-01 -9.4673976302146912e-02 + <_> + + 0 -1 304 -6.8986512720584869e-02 + + -6.6514801979064941e-01 3.5926390439271927e-02 + <_> + + 0 -1 305 6.1707608401775360e-03 + + 2.5833319872617722e-02 -7.2686272859573364e-01 + <_> + + 0 -1 306 1.0536249727010727e-02 + + -8.1828996539115906e-02 2.9760798811912537e-01 + <_> + 32 + -1.1255199909210205e+00 + + <_> + + 0 -1 307 -6.2758728861808777e-02 + + 2.7899080514907837e-01 -2.9656109213829041e-01 + <_> + + 0 -1 308 3.4516479354351759e-03 + + -3.4635880589485168e-01 2.0903840661048889e-01 + <_> + + 0 -1 309 -7.8699486330151558e-03 + + 2.4144889414310455e-01 -1.9205570220947266e-01 + <_> + + 0 -1 310 -3.4624869003891945e-03 + + -5.9151780605316162e-01 1.2486449629068375e-01 + <_> + + 0 -1 311 -9.4818761572241783e-03 + + 1.8391540646553040e-01 -2.4858260154724121e-01 + <_> + + 0 -1 312 2.3226840130519122e-04 + + -3.3047258853912354e-01 1.0999260097742081e-01 + <_> + + 0 -1 313 1.8101120367646217e-03 + + 9.8744012415409088e-02 -4.9634781479835510e-01 + <_> + + 0 -1 314 -5.4422430694103241e-03 + + 2.9344418644905090e-01 -1.3094750046730042e-01 + <_> + + 0 -1 315 7.4148122221231461e-03 + + -1.4762699604034424e-01 3.3277168869972229e-01 + <_> + + 0 -1 316 -1.5565140172839165e-02 + + -6.8404901027679443e-01 9.9872693419456482e-02 + <_> + + 0 -1 317 2.8720520436763763e-02 + + -1.4833280444145203e-01 3.0902579426765442e-01 + <_> + + 0 -1 318 9.6687392215244472e-05 + + -1.7431040108203888e-01 2.1402959525585175e-01 + <_> + + 0 -1 319 5.2371058613061905e-02 + + -7.0156857371330261e-02 4.9222990870475769e-01 + <_> + + 0 -1 320 -8.6485691368579865e-02 + + 5.0757247209548950e-01 -7.5294211506843567e-02 + <_> + + 0 -1 321 -4.2169868946075439e-02 + + 4.5680961012840271e-01 -9.0219900012016296e-02 + <_> + + 0 -1 322 4.5369830331765115e-05 + + -2.6538279652595520e-01 1.6189539432525635e-01 + <_> + + 0 -1 323 5.2918000146746635e-03 + + 7.4890151619911194e-02 -5.4054671525955200e-01 + <_> + + 0 -1 324 -7.5511651812121272e-04 + + -4.9261990189552307e-01 5.8723948895931244e-02 + <_> + + 0 -1 325 7.5108138844370842e-05 + + -2.1432100236415863e-01 1.4077760279178619e-01 + <_> + + 0 -1 326 4.9981209449470043e-03 + + -9.0547338128089905e-02 3.5716068744659424e-01 + <_> + + 0 -1 327 -1.4929979806765914e-03 + + 2.5623458623886108e-01 -1.4229069650173187e-01 + <_> + + 0 -1 328 2.7239411137998104e-03 + + -1.5649250149726868e-01 2.1088710427284241e-01 + <_> + + 0 -1 329 2.2218320518732071e-03 + + -1.5072989463806152e-01 2.6801869273185730e-01 + <_> + + 0 -1 330 -7.3993072146549821e-04 + + 2.9546990990638733e-01 -1.0692390054464340e-01 + <_> + + 0 -1 331 2.0113459322601557e-03 + + 5.0614349544048309e-02 -7.1683371067047119e-01 + <_> + + 0 -1 332 1.1452870443463326e-02 + + -1.2719069421291351e-01 2.4152779579162598e-01 + <_> + + 0 -1 333 -1.0782170575112104e-03 + + 2.4813009798526764e-01 -1.3461199402809143e-01 + <_> + + 0 -1 334 3.3417691010981798e-03 + + 5.3578309714794159e-02 -5.2274167537689209e-01 + <_> + + 0 -1 335 6.9398651248775423e-05 + + -2.1698740124702454e-01 1.2812179327011108e-01 + <_> + + 0 -1 336 -4.0982551872730255e-03 + + 2.4401889741420746e-01 -1.1570589989423752e-01 + <_> + + 0 -1 337 -1.6289720078930259e-03 + + 2.8261470794677734e-01 -1.0659469664096832e-01 + <_> + + 0 -1 338 1.3984859921038151e-02 + + 4.2715899646282196e-02 -7.3646312952041626e-01 + <_> + 30 + -1.1729990243911743e+00 + + <_> + + 0 -1 339 1.6416519880294800e-01 + + -4.8960301280021667e-01 1.7607709765434265e-01 + <_> + + 0 -1 340 8.3413062384352088e-04 + + -2.8220430016517639e-01 2.4199579656124115e-01 + <_> + + 0 -1 341 -1.7193210078403354e-03 + + -7.1485888957977295e-01 8.6162216961383820e-02 + <_> + + 0 -1 342 -1.5654950402677059e-03 + + -7.2972381114959717e-01 9.4070672988891602e-02 + <_> + + 0 -1 343 1.9124479731544852e-03 + + -3.1187158823013306e-01 1.8143390119075775e-01 + <_> + + 0 -1 344 -1.3512369990348816e-01 + + 2.9577299952507019e-01 -2.2179250419139862e-01 + <_> + + 0 -1 345 -4.0300549007952213e-03 + + -6.6595137119293213e-01 8.5431016981601715e-02 + <_> + + 0 -1 346 -2.8640460222959518e-03 + + -6.2086361646652222e-01 5.3106021136045456e-02 + <_> + + 0 -1 347 -1.4065420255064964e-03 + + 2.2346289455890656e-01 -2.0211009681224823e-01 + <_> + + 0 -1 348 -3.5820449702441692e-03 + + -5.4030400514602661e-01 6.8213619291782379e-02 + <_> + + 0 -1 349 4.1544470936059952e-02 + + -6.5215840935707092e-02 6.2109231948852539e-01 + <_> + + 0 -1 350 -9.1709550470113754e-03 + + -7.5553297996520996e-01 5.2640449255704880e-02 + <_> + + 0 -1 351 6.1552738770842552e-03 + + 9.0939402580261230e-02 -4.4246131181716919e-01 + <_> + + 0 -1 352 -1.0043520014733076e-03 + + 2.4292330443859100e-01 -1.8669790029525757e-01 + <_> + + 0 -1 353 1.1519829742610455e-02 + + -1.1763150244951248e-01 3.6723458766937256e-01 + <_> + + 0 -1 354 -8.9040733873844147e-03 + + -4.8931330442428589e-01 1.0897020250558853e-01 + <_> + + 0 -1 355 5.3973670583218336e-04 + + -2.1850399672985077e-01 1.8489989638328552e-01 + <_> + + 0 -1 356 1.3727260520681739e-03 + + -1.5072910487651825e-01 2.9173129796981812e-01 + <_> + + 0 -1 357 -1.0807390324771404e-02 + + 4.2897450923919678e-01 -1.0280139744281769e-01 + <_> + + 0 -1 358 1.2670770520344377e-03 + + 7.4192158877849579e-02 -6.4208251237869263e-01 + <_> + + 0 -1 359 2.2991129662841558e-03 + + 4.7100279480218887e-02 -7.2335231304168701e-01 + <_> + + 0 -1 360 2.7187510859221220e-03 + + -1.7086869478225708e-01 2.3513509333133698e-01 + <_> + + 0 -1 361 -6.6619180142879486e-03 + + -7.8975427150726318e-01 4.5084670186042786e-02 + <_> + + 0 -1 362 -4.8266649246215820e-02 + + -6.9579917192459106e-01 4.1976079344749451e-02 + <_> + + 0 -1 363 1.5214690007269382e-02 + + -1.0818280279636383e-01 3.6460620164871216e-01 + <_> + + 0 -1 364 -6.0080131515860558e-03 + + 3.0970990657806396e-01 -1.1359210312366486e-01 + <_> + + 0 -1 365 6.6127157770097256e-03 + + 8.0665342509746552e-02 -4.6658530831336975e-01 + <_> + + 0 -1 366 -7.9607013612985611e-03 + + -8.7201941013336182e-01 3.6774590611457825e-02 + <_> + + 0 -1 367 3.8847199175506830e-03 + + -1.1666289716959000e-01 3.3070269227027893e-01 + <_> + + 0 -1 368 -1.0988810099661350e-03 + + 2.3872570693492889e-01 -1.7656759917736053e-01 + <_> + 44 + -1.0368299484252930e+00 + + <_> + + 0 -1 369 3.5903379321098328e-03 + + -2.3688079416751862e-01 2.4631640315055847e-01 + <_> + + 0 -1 370 6.4815930090844631e-03 + + -3.1373620033264160e-01 1.8675759434700012e-01 + <_> + + 0 -1 371 7.3048402555286884e-05 + + -2.7644351124763489e-01 1.6496239602565765e-01 + <_> + + 0 -1 372 -3.8514640182256699e-03 + + -5.6014508008956909e-01 1.1294739693403244e-01 + <_> + + 0 -1 373 3.8588210009038448e-03 + + 3.9848998188972473e-02 -5.8071857690811157e-01 + <_> + + 0 -1 374 -2.4651220068335533e-02 + + 1.6755010187625885e-01 -2.5343671441078186e-01 + <_> + + 0 -1 375 4.7245521098375320e-02 + + -1.0662080347537994e-01 3.9451980590820312e-01 + <_> + + 0 -1 376 6.5964651294052601e-03 + + -1.7744250595569611e-01 2.7280190587043762e-01 + <_> + + 0 -1 377 -1.3177490327507257e-03 + + -5.4272651672363281e-01 4.8606589436531067e-02 + <_> + + 0 -1 378 -5.0261709839105606e-03 + + 2.4394249916076660e-01 -1.3143649697303772e-01 + <_> + + 0 -1 379 3.4632768947631121e-03 + + 6.9049343466758728e-02 -7.0336240530014038e-01 + <_> + + 0 -1 380 2.1692588925361633e-03 + + -1.3289460539817810e-01 2.2098529338836670e-01 + <_> + + 0 -1 381 2.9395870864391327e-02 + + -2.8530520200729370e-01 1.3543990254402161e-01 + <_> + + 0 -1 382 -9.6181448316201568e-04 + + -5.8041381835937500e-01 3.7450648844242096e-02 + <_> + + 0 -1 383 -1.0820999741554260e-01 + + 3.9467281103134155e-01 -7.8655943274497986e-02 + <_> + + 0 -1 384 -1.8024869263172150e-02 + + 2.7355629205703735e-01 -1.3415299355983734e-01 + <_> + + 0 -1 385 6.2509840354323387e-03 + + 2.3388059809803963e-02 -8.0088591575622559e-01 + <_> + + 0 -1 386 -1.6088379779830575e-03 + + -5.6762522459030151e-01 4.1215669363737106e-02 + <_> + + 0 -1 387 7.7564752427861094e-04 + + -1.4891269803047180e-01 1.9086180627346039e-01 + <_> + + 0 -1 388 8.7122338300105184e-05 + + -1.5557530522346497e-01 1.9428220391273499e-01 + <_> + + 0 -1 389 -2.0755320787429810e-02 + + -6.3006532192230225e-01 3.6134380847215652e-02 + <_> + + 0 -1 390 -6.2931738793849945e-03 + + 2.5609248876571655e-01 -1.0588269680738449e-01 + <_> + + 0 -1 391 1.0844149626791477e-02 + + -1.0124850273132324e-01 3.0322128534317017e-01 + <_> + + 0 -1 392 -6.3752777350600809e-05 + + 1.9111579656600952e-01 -1.3849230110645294e-01 + <_> + + 0 -1 393 6.6480963141657412e-05 + + -1.5205250680446625e-01 2.1706309914588928e-01 + <_> + + 0 -1 394 1.3560829684138298e-03 + + 4.9431789666414261e-02 -6.4279842376708984e-01 + <_> + + 0 -1 395 -9.0662558795884252e-04 + + 1.7982010543346405e-01 -1.4044609665870667e-01 + <_> + + 0 -1 396 1.0473709553480148e-03 + + -1.0933549702167511e-01 2.4265940487384796e-01 + <_> + + 0 -1 397 -1.0243969736620784e-03 + + 2.7162680029869080e-01 -1.1820919811725616e-01 + <_> + + 0 -1 398 -1.2024149764329195e-03 + + -7.0151102542877197e-01 3.9489898830652237e-02 + <_> + + 0 -1 399 7.6911649666726589e-03 + + -9.2218913137912750e-02 3.1046289205551147e-01 + <_> + + 0 -1 400 -1.3966549932956696e-01 + + 6.8979388475418091e-01 -3.9706118404865265e-02 + <_> + + 0 -1 401 2.1276050247251987e-03 + + 9.7277611494064331e-02 -2.8841799497604370e-01 + <_> + + 0 -1 402 -2.7594310231506824e-03 + + 2.4168670177459717e-01 -1.1277820169925690e-01 + <_> + + 0 -1 403 5.2236132323741913e-03 + + -1.1430279910564423e-01 2.4256780743598938e-01 + <_> + + 0 -1 404 -1.2590440455824137e-03 + + -5.9679388999938965e-01 4.7663960605859756e-02 + <_> + + 0 -1 405 -3.7192099262028933e-03 + + -4.6414130926132202e-01 5.2847690880298615e-02 + <_> + + 0 -1 406 5.9696151874959469e-03 + + -7.3244288563728333e-02 3.8743090629577637e-01 + <_> + + 0 -1 407 -5.1776720210909843e-03 + + -7.4193227291107178e-01 4.0496710687875748e-02 + <_> + + 0 -1 408 5.0035100430250168e-03 + + -1.3888800144195557e-01 1.8767620623111725e-01 + <_> + + 0 -1 409 -5.2013457752764225e-04 + + -5.4940617084503174e-01 4.9417849630117416e-02 + <_> + + 0 -1 410 5.3168768063187599e-03 + + -8.2482978701591492e-02 3.1740561127662659e-01 + <_> + + 0 -1 411 -1.4774589799344540e-02 + + 2.0816099643707275e-01 -1.2115559726953506e-01 + <_> + + 0 -1 412 -4.1416451334953308e-02 + + -8.2437807321548462e-01 3.3329188823699951e-02 + <_> + 53 + -1.0492420196533203e+00 + + <_> + + 0 -1 413 9.0962520334869623e-04 + + 8.4579966962337494e-02 -5.6118410825729370e-01 + <_> + + 0 -1 414 -5.6139789521694183e-02 + + 1.5341749787330627e-01 -2.6967319846153259e-01 + <_> + + 0 -1 415 1.0292009683325887e-03 + + -2.0489980280399323e-01 2.0153179764747620e-01 + <_> + + 0 -1 416 2.8783010784536600e-03 + + -1.7351140081882477e-01 2.1297949552536011e-01 + <_> + + 0 -1 417 -7.4144392274320126e-03 + + -5.9624868631362915e-01 4.7077950090169907e-02 + <_> + + 0 -1 418 -1.4831849839538336e-03 + + 1.9024610519409180e-01 -1.5986390411853790e-01 + <_> + + 0 -1 419 4.5968941412866116e-03 + + 3.1447131186723709e-02 -6.8694341182708740e-01 + <_> + + 0 -1 420 2.4255330208688974e-03 + + -2.3609359562397003e-01 1.1036109924316406e-01 + <_> + + 0 -1 421 -8.4950566291809082e-02 + + 2.3107160627841949e-01 -1.3776530325412750e-01 + <_> + + 0 -1 422 -5.0145681016147137e-03 + + 3.8676109910011292e-01 -5.6217379868030548e-02 + <_> + + 0 -1 423 -2.1482061129063368e-03 + + 1.8191599845886230e-01 -1.7615699768066406e-01 + <_> + + 0 -1 424 -1.0396770201623440e-02 + + -7.5351381301879883e-01 2.4091970175504684e-02 + <_> + + 0 -1 425 -1.3466750271618366e-02 + + -7.2118860483169556e-01 3.4949369728565216e-02 + <_> + + 0 -1 426 -8.4435477852821350e-02 + + -3.3792638778686523e-01 7.1113817393779755e-02 + <_> + + 0 -1 427 2.4771490134298801e-03 + + -1.1765109747648239e-01 2.2541989386081696e-01 + <_> + + 0 -1 428 1.5828050673007965e-02 + + -6.9536216557025909e-02 3.1395369768142700e-01 + <_> + + 0 -1 429 6.4916983246803284e-02 + + -7.5043588876724243e-02 4.0677338838577271e-01 + <_> + + 0 -1 430 2.9652469675056636e-04 + + 7.3953360319137573e-02 -3.4544008970260620e-01 + <_> + + 0 -1 431 1.3129520229995251e-03 + + -1.6909439861774445e-01 1.5258370339870453e-01 + <_> + + 0 -1 432 -5.8032129891216755e-03 + + 3.5260149836540222e-01 -8.3444066345691681e-02 + <_> + + 0 -1 433 -1.4791679382324219e-01 + + 4.3004658818244934e-01 -5.7309929281473160e-02 + <_> + + 0 -1 434 -1.6584150493144989e-02 + + 2.3432689905166626e-01 -1.0907640308141708e-01 + <_> + + 0 -1 435 3.0183270573616028e-03 + + -1.3600939512252808e-01 2.6409289240837097e-01 + <_> + + 0 -1 436 -3.6471918225288391e-02 + + -6.2809741497039795e-01 4.3545108288526535e-02 + <_> + + 0 -1 437 -7.3119226726703346e-05 + + 1.6470630466938019e-01 -1.6463780403137207e-01 + <_> + + 0 -1 438 -3.6719450727105141e-03 + + -4.7421360015869141e-01 4.8586919903755188e-02 + <_> + + 0 -1 439 -4.0151178836822510e-03 + + 1.8222180008888245e-01 -1.4097510278224945e-01 + <_> + + 0 -1 440 1.9948020577430725e-02 + + -6.9787658751010895e-02 3.6707460880279541e-01 + <_> + + 0 -1 441 7.6699437340721488e-04 + + 5.5729299783706665e-02 -4.4585430622100830e-01 + <_> + + 0 -1 442 -1.1806039838120341e-03 + + -4.6876621246337891e-01 4.8902221024036407e-02 + <_> + + 0 -1 443 1.5847349539399147e-02 + + -1.2120209634304047e-01 2.0566530525684357e-01 + <_> + + 0 -1 444 -1.1985700111836195e-03 + + 2.0262099802494049e-01 -1.2823820114135742e-01 + <_> + + 0 -1 445 -1.0964959859848022e-01 + + -8.6619192361831665e-01 3.0351849272847176e-02 + <_> + + 0 -1 446 -9.2532606795430183e-03 + + 2.9343119263648987e-01 -8.5361950099468231e-02 + <_> + + 0 -1 447 1.4686530455946922e-02 + + 3.2798621803522110e-02 -7.7556562423706055e-01 + <_> + + 0 -1 448 -1.3514430029317737e-03 + + 2.4426999688148499e-01 -1.1503250151872635e-01 + <_> + + 0 -1 449 -4.3728090822696686e-03 + + 2.1687670052051544e-01 -1.3984480500221252e-01 + <_> + + 0 -1 450 3.4263390116393566e-03 + + 4.5614220201969147e-02 -5.4567712545394897e-01 + <_> + + 0 -1 451 -3.8404068909585476e-03 + + 1.4949500560760498e-01 -1.5062509477138519e-01 + <_> + + 0 -1 452 3.7988980766385794e-03 + + -8.7301626801490784e-02 2.5481531023979187e-01 + <_> + + 0 -1 453 -2.0094281062483788e-03 + + 1.7259070277214050e-01 -1.4288470149040222e-01 + <_> + + 0 -1 454 -2.4370709434151649e-03 + + 2.6848098635673523e-01 -8.1898219883441925e-02 + <_> + + 0 -1 455 1.0485399980098009e-03 + + 4.6113260090351105e-02 -4.7243279218673706e-01 + <_> + + 0 -1 456 1.7460780218243599e-03 + + -1.1030430346727371e-01 2.0379729568958282e-01 + <_> + + 0 -1 457 5.8608627878129482e-03 + + -1.5619659423828125e-01 1.5927439928054810e-01 + <_> + + 0 -1 458 -2.7724979445338249e-02 + + 1.1349119991064072e-01 -2.1885140240192413e-01 + <_> + + 0 -1 459 4.7080639749765396e-02 + + -4.1688729077577591e-02 5.3630048036575317e-01 + <_> + + 0 -1 460 -7.9283770173788071e-03 + + -5.3595131635665894e-01 4.4237509369850159e-02 + <_> + + 0 -1 461 -1.2880540452897549e-02 + + 2.3237949609756470e-01 -1.0246250033378601e-01 + <_> + + 0 -1 462 2.3604769259691238e-02 + + -8.8291436433792114e-02 3.0561059713363647e-01 + <_> + + 0 -1 463 1.5902200713753700e-02 + + -1.2238109856843948e-01 1.7849120497703552e-01 + <_> + + 0 -1 464 7.9939495772123337e-03 + + -8.3729006350040436e-02 3.2319590449333191e-01 + <_> + + 0 -1 465 5.7100867852568626e-03 + + 3.8479208946228027e-02 -6.8138152360916138e-01 + <_> + 51 + -1.1122100353240967e+00 + + <_> + + 0 -1 466 2.2480720654129982e-03 + + -1.6416870057582855e-01 4.1648530960083008e-01 + <_> + + 0 -1 467 4.5813550241291523e-03 + + -1.2465959787368774e-01 4.0385121107101440e-01 + <_> + + 0 -1 468 -1.6073239967226982e-03 + + 2.6082459092140198e-01 -2.0282520353794098e-01 + <_> + + 0 -1 469 2.5205370038747787e-03 + + -1.0557229816913605e-01 3.6669111251831055e-01 + <_> + + 0 -1 470 2.4119189474731684e-03 + + -1.3877600431442261e-01 2.9959911108016968e-01 + <_> + + 0 -1 471 5.7156179100275040e-03 + + -7.7683463692665100e-02 4.8481920361518860e-01 + <_> + + 0 -1 472 3.1093840952962637e-03 + + -1.1229000240564346e-01 2.9215508699417114e-01 + <_> + + 0 -1 473 -8.6836628615856171e-02 + + -3.6779600381851196e-01 7.2597242891788483e-02 + <_> + + 0 -1 474 5.2652182057499886e-03 + + -1.0890290141105652e-01 3.1791260838508606e-01 + <_> + + 0 -1 475 -1.9913529977202415e-02 + + -5.3373438119888306e-01 7.0585712790489197e-02 + <_> + + 0 -1 476 3.8297839928418398e-03 + + -1.3575910031795502e-01 2.2788879275321960e-01 + <_> + + 0 -1 477 1.0431859642267227e-02 + + 8.8797912001609802e-02 -4.7958970069885254e-01 + <_> + + 0 -1 478 -2.0040439441800117e-02 + + 1.5745539963245392e-01 -1.7771570384502411e-01 + <_> + + 0 -1 479 -5.2967290394008160e-03 + + -6.8434917926788330e-01 3.5671461373567581e-02 + <_> + + 0 -1 480 -2.1624139044433832e-03 + + 2.8318038582801819e-01 -9.8511278629302979e-02 + <_> + + 0 -1 481 -3.5464888787828386e-04 + + -3.7077340483665466e-01 8.0932952463626862e-02 + <_> + + 0 -1 482 -1.8152060511056334e-04 + + -3.2207030057907104e-01 7.7551059424877167e-02 + <_> + + 0 -1 483 -2.7563021285459399e-04 + + -3.2441279292106628e-01 8.7949477136135101e-02 + <_> + + 0 -1 484 6.3823810778558254e-03 + + -8.8924713432788849e-02 3.1727218627929688e-01 + <_> + + 0 -1 485 1.1150909587740898e-02 + + 7.1019843220710754e-02 -4.0494039654731750e-01 + <_> + + 0 -1 486 -1.0593760525807738e-03 + + 2.6050668954849243e-01 -1.1765640228986740e-01 + <_> + + 0 -1 487 2.3906480055302382e-03 + + -8.4388621151447296e-02 3.1230551004409790e-01 + <_> + + 0 -1 488 -1.1000749655067921e-02 + + 1.9152249395847321e-01 -1.5210020542144775e-01 + <_> + + 0 -1 489 -2.4643228971399367e-04 + + -3.1765159964561462e-01 8.6582258343696594e-02 + <_> + + 0 -1 490 2.3053269833326340e-02 + + -1.0089760273694992e-01 2.5769290328025818e-01 + <_> + + 0 -1 491 -2.2135660983622074e-03 + + 4.5689210295677185e-01 -5.2404791116714478e-02 + <_> + + 0 -1 492 -9.7139709396287799e-04 + + -3.5518380999565125e-01 8.0094382166862488e-02 + <_> + + 0 -1 493 1.5676229959353805e-03 + + 1.0091420263051987e-01 -2.1603040397167206e-01 + <_> + + 0 -1 494 7.5460801599547267e-04 + + 5.7896178215742111e-02 -4.0461111068725586e-01 + <_> + + 0 -1 495 -2.0698970183730125e-02 + + 3.1543630361557007e-01 -8.0713048577308655e-02 + <_> + + 0 -1 496 -2.0619940012693405e-02 + + 2.7181661128997803e-01 -7.6358616352081299e-02 + <_> + + 0 -1 497 2.1611129865050316e-02 + + 3.9493449032306671e-02 -5.9429651498794556e-01 + <_> + + 0 -1 498 6.5676742233335972e-03 + + -9.8353669047355652e-02 2.3649279773235321e-01 + <_> + + 0 -1 499 -8.8434796780347824e-03 + + -5.2523428201675415e-01 4.3099921196699142e-02 + <_> + + 0 -1 500 -9.4260741025209427e-03 + + 2.4665130674839020e-01 -9.4130717217922211e-02 + <_> + + 0 -1 501 -1.9830230157822371e-03 + + 2.6743701100349426e-01 -9.0069316327571869e-02 + <_> + + 0 -1 502 -1.7358399927616119e-03 + + 1.5940019488334656e-01 -1.5789410471916199e-01 + <_> + + 0 -1 503 -1.3513869605958462e-02 + + 4.0792331099510193e-01 -6.4223118126392365e-02 + <_> + + 0 -1 504 -1.9394010305404663e-02 + + 1.8015649914741516e-01 -1.3731400668621063e-01 + <_> + + 0 -1 505 -3.2684770412743092e-03 + + 2.9080390930175781e-01 -8.0161906778812408e-02 + <_> + + 0 -1 506 4.1773589327931404e-04 + + -2.1412980556488037e-01 1.1273439973592758e-01 + <_> + + 0 -1 507 -7.6351119205355644e-03 + + -4.5365959405899048e-01 5.4625060409307480e-02 + <_> + + 0 -1 508 -8.3652976900339127e-03 + + 2.6472920179367065e-01 -9.4334110617637634e-02 + <_> + + 0 -1 509 2.7768449857831001e-02 + + -1.0136710107326508e-01 2.0743979513645172e-01 + <_> + + 0 -1 510 -5.4891228675842285e-02 + + 2.8840309381484985e-01 -7.5312040746212006e-02 + <_> + + 0 -1 511 2.5793339591473341e-03 + + -1.1088529974222183e-01 2.1724960207939148e-01 + <_> + + 0 -1 512 6.6196516854688525e-05 + + -1.8872100114822388e-01 1.4440689980983734e-01 + <_> + + 0 -1 513 5.0907251425087452e-03 + + -7.7601231634616852e-02 2.9398378729820251e-01 + <_> + + 0 -1 514 -1.0444259643554688e-01 + + 2.0133109390735626e-01 -1.0903970152139664e-01 + <_> + + 0 -1 515 -6.7273090826347470e-04 + + 1.7945900559425354e-01 -1.2023670226335526e-01 + <_> + + 0 -1 516 3.2412849832326174e-03 + + 4.0688131004571915e-02 -5.4600572586059570e-01 + <_> + 44 + -1.2529590129852295e+00 + + <_> + + 0 -1 517 5.2965320646762848e-03 + + -1.2154529988765717e-01 6.4420372247695923e-01 + <_> + + 0 -1 518 -2.5326260365545750e-03 + + 5.1233220100402832e-01 -1.1108259856700897e-01 + <_> + + 0 -1 519 -2.9183230362832546e-03 + + -5.0615429878234863e-01 1.1501979827880859e-01 + <_> + + 0 -1 520 -2.3692339658737183e-02 + + 3.7167280912399292e-01 -1.4672680199146271e-01 + <_> + + 0 -1 521 2.0177470520138741e-02 + + -1.7388840019702911e-01 4.7759491205215454e-01 + <_> + + 0 -1 522 -2.1723210811614990e-02 + + -4.3880090117454529e-01 1.3576899468898773e-01 + <_> + + 0 -1 523 2.8369780629873276e-03 + + -1.2512069940567017e-01 4.6789029240608215e-01 + <_> + + 0 -1 524 2.7148420922458172e-03 + + -8.8018856942653656e-02 3.6866518855094910e-01 + <_> + + 0 -1 525 3.2625689636915922e-03 + + -8.5335306823253632e-02 5.1644730567932129e-01 + <_> + + 0 -1 526 -3.5618850961327553e-03 + + -4.4503930211067200e-01 9.1738171875476837e-02 + <_> + + 0 -1 527 1.9227749435231090e-03 + + -1.1077310144901276e-01 3.9416998624801636e-01 + <_> + + 0 -1 528 -3.5111969918943942e-04 + + -3.7775701284408569e-01 1.2166170030832291e-01 + <_> + + 0 -1 529 1.9121779769193381e-04 + + 7.4816018342971802e-02 -4.0767100453376770e-01 + <_> + + 0 -1 530 -2.6525629800744355e-04 + + -3.3151718974113464e-01 1.1291120201349258e-01 + <_> + + 0 -1 531 2.0086700096726418e-02 + + -6.1598118394613266e-02 5.6128817796707153e-01 + <_> + + 0 -1 532 3.6783248186111450e-02 + + -6.0251388698816299e-02 5.2192491292953491e-01 + <_> + + 0 -1 533 1.3941619545221329e-03 + + -3.5503050684928894e-01 1.0863020271062851e-01 + <_> + + 0 -1 534 -1.5181669965386391e-02 + + 2.2739650309085846e-01 -1.6252990067005157e-01 + <_> + + 0 -1 535 4.6796840615570545e-03 + + -5.7535041123628616e-02 4.8124238848686218e-01 + <_> + + 0 -1 536 -1.7988319450523704e-04 + + -3.0587670207023621e-01 1.0868159681558609e-01 + <_> + + 0 -1 537 -3.5850999411195517e-03 + + 3.8596940040588379e-01 -9.2194072902202606e-02 + <_> + + 0 -1 538 1.0793360415846109e-03 + + -1.1190389841794968e-01 3.1125208735466003e-01 + <_> + + 0 -1 539 7.3285802500322461e-05 + + -2.0239910483360291e-01 1.5586680173873901e-01 + <_> + + 0 -1 540 1.3678739964962006e-01 + + -2.1672859787940979e-01 1.4420390129089355e-01 + <_> + + 0 -1 541 -1.1729259975254536e-02 + + 4.3503770232200623e-01 -7.4886530637741089e-02 + <_> + + 0 -1 542 3.9230841211974621e-03 + + -5.0289329141378403e-02 5.8831161260604858e-01 + <_> + + 0 -1 543 -2.9819121118634939e-04 + + -3.8232401013374329e-01 9.2451132833957672e-02 + <_> + + 0 -1 544 -4.7992770560085773e-03 + + 4.8488789796829224e-01 -7.3136523365974426e-02 + <_> + + 0 -1 545 -3.0155890271998942e-04 + + -3.5757359862327576e-01 1.0581880062818527e-01 + <_> + + 0 -1 546 1.0390769690275192e-02 + + 5.2920468151569366e-02 -5.7249659299850464e-01 + <_> + + 0 -1 547 -9.4488041941076517e-04 + + 4.4966828823089600e-01 -8.3075523376464844e-02 + <_> + + 0 -1 548 1.2651870492845774e-03 + + -9.6695438027381897e-02 3.1302270293235779e-01 + <_> + + 0 -1 549 1.7094539478421211e-02 + + -8.1248976290225983e-02 3.6113831400871277e-01 + <_> + + 0 -1 550 2.5973359588533640e-03 + + -1.1338350176811218e-01 2.2233949601650238e-01 + <_> + + 0 -1 551 1.4527440071105957e-03 + + 6.9750443100929260e-02 -3.6720710992813110e-01 + <_> + + 0 -1 552 4.7638658434152603e-03 + + -6.5788961946964264e-02 3.8328540325164795e-01 + <_> + + 0 -1 553 -6.2501081265509129e-03 + + -7.0754468441009521e-01 3.8350198417901993e-02 + <_> + + 0 -1 554 -3.1765329185873270e-03 + + 1.3755400478839874e-01 -2.3240029811859131e-01 + <_> + + 0 -1 555 3.2191169448196888e-03 + + -1.2935450673103333e-01 2.2737880051136017e-01 + <_> + + 0 -1 556 -5.6365579366683960e-03 + + 3.8067150115966797e-01 -6.7246839404106140e-02 + <_> + + 0 -1 557 -2.3844049428589642e-04 + + -3.1122380495071411e-01 8.3838358521461487e-02 + <_> + + 0 -1 558 -4.1017560288310051e-03 + + 2.6067280769348145e-01 -1.0449740290641785e-01 + <_> + + 0 -1 559 1.3336989795789123e-03 + + -5.8250140398740768e-02 4.7682440280914307e-01 + <_> + + 0 -1 560 -1.2090239906683564e-03 + + 1.4834509789943695e-01 -1.7329469323158264e-01 + <_> + 72 + -1.1188739538192749e+00 + + <_> + + 0 -1 561 -3.1760931015014648e-03 + + 3.3333331346511841e-01 -1.6642349958419800e-01 + <_> + + 0 -1 562 2.4858079850673676e-02 + + -7.2728872299194336e-02 5.6674581766128540e-01 + <_> + + 0 -1 563 -7.7597280032932758e-03 + + 4.6258568763732910e-01 -9.3112178146839142e-02 + <_> + + 0 -1 564 7.8239021822810173e-03 + + -2.7414610981941223e-01 1.3243049383163452e-01 + <_> + + 0 -1 565 -1.0948839597404003e-02 + + 2.2345480322837830e-01 -1.4965449273586273e-01 + <_> + + 0 -1 566 -3.4349008928984404e-03 + + 3.8724988698959351e-01 -6.6121727228164673e-02 + <_> + + 0 -1 567 -3.1156290322542191e-02 + + 2.4078279733657837e-01 -1.1406909674406052e-01 + <_> + + 0 -1 568 1.1100519914180040e-03 + + -2.8207978606224060e-01 1.3275429606437683e-01 + <_> + + 0 -1 569 3.1762740109115839e-03 + + 3.4585930407047272e-02 -5.1374310255050659e-01 + <_> + + 0 -1 570 -2.7977459132671356e-02 + + 2.3926779627799988e-01 -1.3255919516086578e-01 + <_> + + 0 -1 571 -2.3097939789295197e-02 + + 3.9019620418548584e-01 -7.8478008508682251e-02 + <_> + + 0 -1 572 -3.9731930010020733e-03 + + 3.0691069364547729e-01 -7.0601403713226318e-02 + <_> + + 0 -1 573 3.0335749033838511e-03 + + -1.4002190530300140e-01 1.9134859740734100e-01 + <_> + + 0 -1 574 -1.0844370350241661e-02 + + 1.6548730432987213e-01 -1.5657779574394226e-01 + <_> + + 0 -1 575 -1.8150510266423225e-02 + + -6.3243591785430908e-01 3.9561819285154343e-02 + <_> + + 0 -1 576 7.1052298881113529e-04 + + -1.8515570461750031e-01 1.3408809900283813e-01 + <_> + + 0 -1 577 1.0893340222537518e-02 + + -2.6730230078101158e-02 6.0971802473068237e-01 + <_> + + 0 -1 578 -2.8780900174751878e-04 + + -3.0065140128135681e-01 7.3171459138393402e-02 + <_> + + 0 -1 579 -3.5855069290846586e-03 + + 2.6217609643936157e-01 -7.9714097082614899e-02 + <_> + + 0 -1 580 -1.9759280607104301e-02 + + -5.9039229154586792e-01 4.0698971599340439e-02 + <_> + + 0 -1 581 -1.0845210403203964e-02 + + 1.6364559531211853e-01 -1.2586060166358948e-01 + <_> + + 0 -1 582 -4.3183090165257454e-03 + + -5.7474881410598755e-01 3.7644311785697937e-02 + <_> + + 0 -1 583 1.4913700288161635e-03 + + 6.0913469642400742e-02 -3.0222928524017334e-01 + <_> + + 0 -1 584 1.5675699338316917e-02 + + -7.3145911097526550e-02 2.9379451274871826e-01 + <_> + + 0 -1 585 -1.1033560149371624e-02 + + 3.9318808913230896e-01 -4.7084320336580276e-02 + <_> + + 0 -1 586 8.8555756956338882e-03 + + 3.7601381540298462e-02 -4.9108490347862244e-01 + <_> + + 0 -1 587 -8.9665671112015843e-04 + + 1.7952020466327667e-01 -1.1086239665746689e-01 + <_> + + 0 -1 588 -3.0592409893870354e-03 + + -4.4429460167884827e-01 5.1005430519580841e-02 + <_> + + 0 -1 589 6.3201179727911949e-03 + + -5.2841089665889740e-02 3.7197101116180420e-01 + <_> + + 0 -1 590 2.0682830363512039e-02 + + 5.7667169719934464e-02 -3.6901599168777466e-01 + <_> + + 0 -1 591 9.9822662770748138e-02 + + -3.7377018481492996e-02 5.8165591955184937e-01 + <_> + + 0 -1 592 -6.5854229032993317e-03 + + 2.8509441018104553e-01 -6.0978069901466370e-02 + <_> + + 0 -1 593 -6.0900300741195679e-02 + + -5.1031768321990967e-01 3.7787400186061859e-02 + <_> + + 0 -1 594 -2.9991709161549807e-03 + + -4.7943010926246643e-01 3.8833890110254288e-02 + <_> + + 0 -1 595 -9.8906438797712326e-03 + + 4.0609079599380493e-01 -4.7869648784399033e-02 + <_> + + 0 -1 596 -8.2688927650451660e-02 + + -7.0671182870864868e-01 2.7487749233841896e-02 + <_> + + 0 -1 597 5.0060399807989597e-03 + + 2.8208440169692039e-02 -5.2909690141677856e-01 + <_> + + 0 -1 598 6.1695030890405178e-03 + + -5.4554861038923264e-02 3.2837980985641479e-01 + <_> + + 0 -1 599 -3.3914761152118444e-03 + + 9.2117667198181152e-02 -2.1637110412120819e-01 + <_> + + 0 -1 600 -2.6131230406463146e-03 + + 1.3651019334793091e-01 -1.3781130313873291e-01 + <_> + + 0 -1 601 8.0490659456700087e-04 + + -6.8637110292911530e-02 3.3581069111824036e-01 + <_> + + 0 -1 602 -3.8106508553028107e-02 + + 2.9445430636405945e-01 -6.8239226937294006e-02 + <_> + + 0 -1 603 7.2450799052603543e-05 + + -1.6750130057334900e-01 1.2178230285644531e-01 + <_> + + 0 -1 604 1.5837959945201874e-03 + + -9.2042848467826843e-02 2.1348990499973297e-01 + <_> + + 0 -1 605 1.2924340553581715e-03 + + 6.2917232513427734e-02 -3.6174508929252625e-01 + <_> + + 0 -1 606 9.9146775901317596e-03 + + 1.9534060731530190e-02 -8.1015038490295410e-01 + <_> + + 0 -1 607 -1.7086310544982553e-03 + + 2.5525239109992981e-01 -6.8229459226131439e-02 + <_> + + 0 -1 608 2.1844399161636829e-03 + + 2.3314049467444420e-02 -8.4296780824661255e-01 + <_> + + 0 -1 609 -3.4244330599904060e-03 + + 2.7213689684867859e-01 -7.6395228505134583e-02 + <_> + + 0 -1 610 2.7591470279730856e-04 + + -1.0742840170860291e-01 2.2888970375061035e-01 + <_> + + 0 -1 611 -6.0005177510902286e-04 + + -2.9854211211204529e-01 6.3479736447334290e-02 + <_> + + 0 -1 612 -2.5001438916660845e-04 + + -2.7178969979286194e-01 6.9615006446838379e-02 + <_> + + 0 -1 613 6.8751391954720020e-03 + + -5.7185899466276169e-02 3.6695951223373413e-01 + <_> + + 0 -1 614 1.2761900201439857e-02 + + 6.7955687642097473e-02 -2.8534150123596191e-01 + <_> + + 0 -1 615 -1.4752789866179228e-03 + + 2.0680660009384155e-01 -1.0059390217065811e-01 + <_> + + 0 -1 616 1.2138819694519043e-01 + + -9.7126796841621399e-02 1.9789619743824005e-01 + <_> + + 0 -1 617 -5.0081279128789902e-02 + + 2.8417178988456726e-01 -6.7879997193813324e-02 + <_> + + 0 -1 618 3.1454950571060181e-02 + + -8.9468672871589661e-02 2.1298420429229736e-01 + <_> + + 0 -1 619 1.8878319533541799e-03 + + -1.1656440049409866e-01 1.6663520038127899e-01 + <_> + + 0 -1 620 -5.7211960665881634e-03 + + 2.3702140152454376e-01 -9.0776607394218445e-02 + <_> + + 0 -1 621 -1.8076719425152987e-04 + + 1.7951929569244385e-01 -1.0793480277061462e-01 + <_> + + 0 -1 622 -1.9761849939823151e-01 + + 4.5674291253089905e-01 -4.0480159223079681e-02 + <_> + + 0 -1 623 -2.3846809926908463e-04 + + -2.3733009397983551e-01 7.5922161340713501e-02 + <_> + + 0 -1 624 2.1540730085689574e-04 + + 8.1688016653060913e-02 -2.8685030341148376e-01 + <_> + + 0 -1 625 1.0163090191781521e-02 + + -4.1250020265579224e-02 4.8038348555564880e-01 + <_> + + 0 -1 626 -7.2184870950877666e-03 + + 1.7458580434322357e-01 -1.0146500170230865e-01 + <_> + + 0 -1 627 2.4263170361518860e-01 + + 5.3426481783390045e-02 -3.2318529486656189e-01 + <_> + + 0 -1 628 6.9304101634770632e-04 + + -1.1499179899692535e-01 1.4793939888477325e-01 + <_> + + 0 -1 629 3.5475199110805988e-03 + + -3.9424978196620941e-02 5.3126180171966553e-01 + <_> + + 0 -1 630 2.1403690334409475e-04 + + 6.9753833115100861e-02 -2.7319580316543579e-01 + <_> + + 0 -1 631 -5.7119462871924043e-04 + + 3.4369900822639465e-01 -5.7699009776115417e-02 + <_> + + 0 -1 632 -6.6290069371461868e-03 + + 1.1758489906787872e-01 -1.5020139515399933e-01 + <_> + 66 + -1.0888810157775879e+00 + + <_> + + 0 -1 633 -2.6513449847698212e-02 + + 2.0568640530109406e-01 -2.6473900675773621e-01 + <_> + + 0 -1 634 9.7727458924055099e-03 + + -1.1192840337753296e-01 3.2570549845695496e-01 + <_> + + 0 -1 635 3.2290350645780563e-02 + + -9.8574757575988770e-02 3.1779170036315918e-01 + <_> + + 0 -1 636 -2.8103240765631199e-03 + + 1.5213899314403534e-01 -1.9686409831047058e-01 + <_> + + 0 -1 637 -1.0991429910063744e-02 + + 5.1407659053802490e-01 -4.3707210570573807e-02 + <_> + + 0 -1 638 6.3133831135928631e-03 + + -9.2781022191047668e-02 3.4702470898628235e-01 + <_> + + 0 -1 639 8.7105982005596161e-02 + + 3.0053649097681046e-02 -8.2814818620681763e-01 + <_> + + 0 -1 640 1.1799359926953912e-03 + + -1.2928420305252075e-01 2.0646120607852936e-01 + <_> + + 0 -1 641 -9.3056890182197094e-04 + + -5.0021439790725708e-01 9.3666993081569672e-02 + <_> + + 0 -1 642 -1.3687170110642910e-02 + + -7.9358148574829102e-01 -6.6733639687299728e-03 + <_> + + 0 -1 643 -7.5917452573776245e-02 + + 3.0469641089439392e-01 -7.9655893146991730e-02 + <_> + + 0 -1 644 -2.8559709899127483e-03 + + 2.0961460471153259e-01 -1.2732550501823425e-01 + <_> + + 0 -1 645 -4.0231510065495968e-03 + + -6.5817278623580933e-01 5.0683639943599701e-02 + <_> + + 0 -1 646 1.7558040097355843e-02 + + -8.5382692515850067e-02 3.6174559593200684e-01 + <_> + + 0 -1 647 2.1988239139318466e-02 + + 6.2943696975708008e-02 -7.0896339416503906e-01 + <_> + + 0 -1 648 -2.8599589131772518e-03 + + 1.4683780074119568e-01 -1.6465979814529419e-01 + <_> + + 0 -1 649 -1.0030849836766720e-02 + + 4.9579939246177673e-01 -2.7188340201973915e-02 + <_> + + 0 -1 650 -6.9560329429805279e-03 + + 2.7977779507637024e-01 -7.7953331172466278e-02 + <_> + + 0 -1 651 -3.8356808945536613e-03 + + -5.8163982629776001e-01 3.5739939659833908e-02 + <_> + + 0 -1 652 -3.2647319603711367e-03 + + -4.9945080280303955e-01 4.6986490488052368e-02 + <_> + + 0 -1 653 -7.8412350267171860e-03 + + 3.4532830119132996e-01 -6.8810403347015381e-02 + <_> + + 0 -1 654 -8.1718113506212831e-05 + + 1.5041710436344147e-01 -1.4146679639816284e-01 + <_> + + 0 -1 655 -3.2448628917336464e-03 + + 2.2724510729312897e-01 -9.2860206961631775e-02 + <_> + + 0 -1 656 -7.8561151167377830e-04 + + -4.4319018721580505e-01 5.7812441140413284e-02 + <_> + + 0 -1 657 -6.2474247533828020e-04 + + 1.3952389359474182e-01 -1.4668719470500946e-01 + <_> + + 0 -1 658 -3.2942948746494949e-04 + + -2.9901570081710815e-01 7.6066739857196808e-02 + <_> + + 0 -1 659 1.2605739757418633e-03 + + -1.6125600039958954e-01 1.3953800499439240e-01 + <_> + + 0 -1 660 -5.1667019724845886e-02 + + -5.3142839670181274e-01 4.0719520300626755e-02 + <_> + + 0 -1 661 -1.5285619534552097e-02 + + -7.8206378221511841e-01 2.7183769270777702e-02 + <_> + + 0 -1 662 6.9029822945594788e-02 + + -3.6427021026611328e-02 7.1102517843246460e-01 + <_> + + 0 -1 663 1.4522749697789550e-03 + + -9.6890516579151154e-02 2.1668420732021332e-01 + <_> + + 0 -1 664 -2.4765590205788612e-03 + + 1.1645310372114182e-01 -1.8227979540824890e-01 + <_> + + 0 -1 665 -1.5134819550439715e-03 + + 1.7863979935646057e-01 -1.2214969843626022e-01 + <_> + + 0 -1 666 -1.5099470037966967e-03 + + 1.8086239695549011e-01 -1.1446069926023483e-01 + <_> + + 0 -1 667 -6.7054620012640953e-03 + + 2.5106599926948547e-01 -9.1871462762355804e-02 + <_> + + 0 -1 668 -1.4075200073421001e-02 + + 1.3707509636878967e-01 -1.7333500087261200e-01 + <_> + + 0 -1 669 -2.2400720044970512e-03 + + 4.0092980861663818e-01 -4.7576878219842911e-02 + <_> + + 0 -1 670 1.9782369956374168e-02 + + -1.9040350615978241e-01 1.4923410117626190e-01 + <_> + + 0 -1 671 2.6002870872616768e-03 + + 4.6971768140792847e-02 -4.3307659029960632e-01 + <_> + + 0 -1 672 -5.3445628145709634e-04 + + -4.3744230270385742e-01 4.1520189493894577e-02 + <_> + + 0 -1 673 -1.7466509714722633e-02 + + 6.5818172693252563e-01 -3.4447491168975830e-02 + <_> + + 0 -1 674 -2.0425589755177498e-03 + + 3.9657929539680481e-01 -4.4052429497241974e-02 + <_> + + 0 -1 675 2.6661779265850782e-03 + + 5.8770958334207535e-02 -3.2806369662284851e-01 + <_> + + 0 -1 676 -5.5982369929552078e-02 + + -5.1735472679138184e-01 3.5791840404272079e-02 + <_> + + 0 -1 677 -1.5066330088302493e-03 + + 1.5123869478702545e-01 -1.2520180642604828e-01 + <_> + + 0 -1 678 -1.1472369544208050e-02 + + -6.2930530309677124e-01 3.4704331308603287e-02 + <_> + + 0 -1 679 2.3409629240632057e-02 + + -5.8063350617885590e-02 3.8668221235275269e-01 + <_> + + 0 -1 680 -2.3243729956448078e-03 + + 1.8754099309444427e-01 -9.8394669592380524e-02 + <_> + + 0 -1 681 -2.9039299115538597e-02 + + -5.4486900568008423e-01 4.0926340967416763e-02 + <_> + + 0 -1 682 -1.4474649913609028e-02 + + -6.7248392105102539e-01 2.3128850385546684e-02 + <_> + + 0 -1 683 -5.2086091600358486e-03 + + -4.3271440267562866e-01 4.3780650943517685e-02 + <_> + + 0 -1 684 4.9382899887859821e-03 + + -1.0878620296716690e-01 1.9342589378356934e-01 + <_> + + 0 -1 685 -4.3193930760025978e-03 + + 2.4080930650234222e-01 -1.0380800068378448e-01 + <_> + + 0 -1 686 2.3705669445917010e-04 + + -8.7349072098731995e-02 2.0466239750385284e-01 + <_> + + 0 -1 687 4.7858079778961837e-04 + + 4.5624580234289169e-02 -3.8854670524597168e-01 + <_> + + 0 -1 688 -8.5342838428914547e-04 + + -5.5077940225601196e-01 3.5825889557600021e-02 + <_> + + 0 -1 689 5.4772121075075120e-05 + + -1.1225239932537079e-01 1.7503519356250763e-01 + <_> + + 0 -1 690 -3.8445889949798584e-03 + + 2.4526700377464294e-01 -8.1132568418979645e-02 + <_> + + 0 -1 691 -4.0128458291292191e-02 + + -6.3122707605361938e-01 2.6972670108079910e-02 + <_> + + 0 -1 692 -1.7886360001284629e-04 + + 1.9855099916458130e-01 -1.0333680361509323e-01 + <_> + + 0 -1 693 1.7668239888735116e-04 + + -9.1359011828899384e-02 1.9848720729351044e-01 + <_> + + 0 -1 694 7.2763383388519287e-02 + + 5.0075579434633255e-02 -3.3852630853652954e-01 + <_> + + 0 -1 695 1.0181630030274391e-02 + + -9.3229979276657104e-02 2.0059590041637421e-01 + <_> + + 0 -1 696 2.4409969337284565e-03 + + 6.4636632800102234e-02 -2.6921740174293518e-01 + <_> + + 0 -1 697 -3.6227488890290260e-03 + + 1.3169890642166138e-01 -1.2514840066432953e-01 + <_> + + 0 -1 698 -1.3635610230267048e-03 + + 1.6350460052490234e-01 -1.0665939748287201e-01 + <_> + 69 + -1.0408929586410522e+00 + + <_> + + 0 -1 699 -9.6991164609789848e-03 + + 6.1125320196151733e-01 -6.6225312650203705e-02 + <_> + + 0 -1 700 -9.6426531672477722e-03 + + -1. 2.7699959464371204e-03 + <_> + + 0 -1 701 -9.6381865441799164e-03 + + 1. -2.9904270195402205e-04 + <_> + + 0 -1 702 -4.2553939856588840e-03 + + 2.8464388847351074e-01 -1.5540120005607605e-01 + <_> + + 0 -1 703 -9.6223521977663040e-03 + + -1. 4.3999180197715759e-02 + <_> + + 0 -1 704 -9.1231241822242737e-03 + + 8.6869341135025024e-01 -2.7267890982329845e-03 + <_> + + 0 -1 705 -8.6240433156490326e-03 + + 4.5352488756179810e-01 -8.6071379482746124e-02 + <_> + + 0 -1 706 -8.9324144646525383e-03 + + 1.3375559449195862e-01 -2.6012519001960754e-01 + <_> + + 0 -1 707 -1.4207810163497925e-02 + + 3.2077640295028687e-01 -9.7226411104202271e-02 + <_> + + 0 -1 708 2.5911010801792145e-02 + + -1.2964080274105072e-01 2.6218649744987488e-01 + <_> + + 0 -1 709 2.0531509653665125e-04 + + -1.2404280155897141e-01 2.1062959730625153e-01 + <_> + + 0 -1 710 -5.4795680625829846e-05 + + 1.1974299699068069e-01 -2.3201279342174530e-01 + <_> + + 0 -1 711 6.8555199541151524e-03 + + -6.3276126980781555e-02 4.1044250130653381e-01 + <_> + + 0 -1 712 -1.2253040447831154e-02 + + 5.4883331060409546e-01 -3.9731100201606750e-02 + <_> + + 0 -1 713 -3.9058770053088665e-03 + + 2.4190980195999146e-01 -9.7096011042594910e-02 + <_> + + 0 -1 714 2.7560980524867773e-03 + + -1.2569679319858551e-01 1.9456650316715240e-01 + <_> + + 0 -1 715 -7.7662160620093346e-03 + + 2.9765701293945312e-01 -9.6818156540393829e-02 + <_> + + 0 -1 716 3.8997188676148653e-04 + + 6.2188401818275452e-02 -4.2040899395942688e-01 + <_> + + 0 -1 717 3.3579880837351084e-03 + + 4.7498140484094620e-02 -6.3216882944107056e-01 + <_> + + 0 -1 718 -1.6745539382100105e-02 + + 7.1098130941390991e-01 -3.9157349616289139e-02 + <_> + + 0 -1 719 -6.5409899689257145e-03 + + -3.5043171048164368e-01 7.0616953074932098e-02 + <_> + + 0 -1 720 3.0016340315341949e-04 + + 9.1902457177639008e-02 -2.4618670344352722e-01 + <_> + + 0 -1 721 1.4918990433216095e-02 + + -5.1909450441598892e-02 5.6636041402816772e-01 + <_> + + 0 -1 722 4.8153079114854336e-04 + + 6.4659558236598969e-02 -3.6590608954429626e-01 + <_> + + 0 -1 723 -3.0211321427486837e-04 + + 1.7926569283008575e-01 -1.1410660296678543e-01 + <_> + + 0 -1 724 3.8521419628523290e-04 + + 1.0345619916915894e-01 -2.0072460174560547e-01 + <_> + + 0 -1 725 8.0837132409214973e-03 + + -6.6073462367057800e-02 3.0284249782562256e-01 + <_> + + 0 -1 726 -2.2804969921708107e-02 + + 5.2962350845336914e-01 -4.0118999779224396e-02 + <_> + + 0 -1 727 1.9440450705587864e-04 + + 8.1854820251464844e-02 -2.4663360416889191e-01 + <_> + + 0 -1 728 -1.2848090380430222e-02 + + -3.4973311424255371e-01 5.6916229426860809e-02 + <_> + + 0 -1 729 -1.0937290498986840e-03 + + 2.3368680477142334e-01 -9.1604806482791901e-02 + <_> + + 0 -1 730 1.0032650316134095e-03 + + 1.1852180212736130e-01 -1.8469190597534180e-01 + <_> + + 0 -1 731 -4.4688429683446884e-02 + + -6.4362460374832153e-01 3.0363269150257111e-02 + <_> + + 0 -1 732 8.1657543778419495e-03 + + 4.3674658983945847e-02 -4.3002089858055115e-01 + <_> + + 0 -1 733 -1.1717810295522213e-02 + + 4.1781479120254517e-01 -4.8233699053525925e-02 + <_> + + 0 -1 734 8.4277130663394928e-02 + + 5.3461279720067978e-02 -3.7952190637588501e-01 + <_> + + 0 -1 735 1.4211839996278286e-02 + + 4.4900938868522644e-02 -4.2981499433517456e-01 + <_> + + 0 -1 736 1.5028340276330709e-03 + + 8.2227639853954315e-02 -2.4706399440765381e-01 + <_> + + 0 -1 737 1.0003579780459404e-02 + + -5.7221669703722000e-02 3.4609371423721313e-01 + <_> + + 0 -1 738 -9.0706320479512215e-03 + + 4.5058089494705200e-01 -4.2795319110155106e-02 + <_> + + 0 -1 739 -3.3141620224341750e-04 + + 1.8336910009384155e-01 -1.0759949684143066e-01 + <_> + + 0 -1 740 1.9723279774188995e-01 + + -3.0363829806447029e-02 6.6423428058624268e-01 + <_> + + 0 -1 741 -7.1258801035583019e-03 + + -8.9225047826766968e-01 2.5669990107417107e-02 + <_> + + 0 -1 742 8.6921341717243195e-03 + + -7.0764370262622833e-02 2.8210529685020447e-01 + <_> + + 0 -1 743 8.9262127876281738e-03 + + 7.1078233420848846e-02 -3.0232560634613037e-01 + <_> + + 0 -1 744 5.7286009192466736e-02 + + 5.0974130630493164e-02 -3.9196950197219849e-01 + <_> + + 0 -1 745 3.7920880131423473e-03 + + 3.3841941505670547e-02 -5.1016288995742798e-01 + <_> + + 0 -1 746 -1.4508679741993546e-03 + + 3.0879148840904236e-01 -6.3845083117485046e-02 + <_> + + 0 -1 747 9.8390132188796997e-04 + + -1.3029569387435913e-01 1.4604410529136658e-01 + <_> + + 0 -1 748 -1.7221809830516577e-03 + + 2.9157009720802307e-01 -6.8549558520317078e-02 + <_> + + 0 -1 749 1.0948250070214272e-02 + + 3.4351408481597900e-02 -4.7702258825302124e-01 + <_> + + 0 -1 750 -1.7176309484057128e-05 + + 1.6055269539356232e-01 -1.1690840125083923e-01 + <_> + + 0 -1 751 -5.4884208366274834e-03 + + -4.3415889143943787e-01 4.6106241643428802e-02 + <_> + + 0 -1 752 -3.0975250992923975e-03 + + 3.7943339347839355e-01 -5.6860551238059998e-02 + <_> + + 0 -1 753 6.4182081259787083e-03 + + -1.5858210623264313e-01 1.2335419654846191e-01 + <_> + + 0 -1 754 1.1831239797174931e-02 + + -4.0929291397333145e-02 4.5878958702087402e-01 + <_> + + 0 -1 755 1.3540499843657017e-02 + + -5.3725559264421463e-02 3.5056120157241821e-01 + <_> + + 0 -1 756 -2.5932150892913342e-03 + + 1.1010520160198212e-01 -1.6752210259437561e-01 + <_> + + 0 -1 757 1.6856270376592875e-03 + + 6.6574357450008392e-02 -3.0835020542144775e-01 + <_> + + 0 -1 758 2.6524690911173820e-03 + + 6.6318482160568237e-02 -2.7861338853836060e-01 + <_> + + 0 -1 759 -7.7341729775071144e-03 + + 1.9718359410762787e-01 -1.0782919824123383e-01 + <_> + + 0 -1 760 5.0944271497428417e-03 + + 8.5337489843368530e-02 -2.4847009778022766e-01 + <_> + + 0 -1 761 -2.9162371065467596e-03 + + -4.7476351261138916e-01 3.3566489815711975e-02 + <_> + + 0 -1 762 3.0121419113129377e-03 + + -4.7575380653142929e-02 4.2586800456047058e-01 + <_> + + 0 -1 763 3.1694869976490736e-03 + + -1.0519450157880783e-01 1.7163459956645966e-01 + <_> + + 0 -1 764 2.2327560186386108e-01 + + -1.4370209537446499e-02 9.2483651638031006e-01 + <_> + + 0 -1 765 -9.5585048198699951e-02 + + -7.4206638336181641e-01 2.7818970382213593e-02 + <_> + + 0 -1 766 3.4773729566950351e-05 + + -1.2765780091285706e-01 1.2926669418811798e-01 + <_> + + 0 -1 767 7.2459770308341831e-05 + + -1.6518579423427582e-01 1.0036809742450714e-01 + <_> + 59 + -1.0566600561141968e+00 + + <_> + + 0 -1 768 -6.5778270363807678e-03 + + 3.3815258741378784e-01 -1.5281909704208374e-01 + <_> + + 0 -1 769 -1.0922809597104788e-03 + + 2.2282369434833527e-01 -1.9308499991893768e-01 + <_> + + 0 -1 770 -2.9759589582681656e-02 + + 2.5959870219230652e-01 -1.5409409999847412e-01 + <_> + + 0 -1 771 -1.3147540390491486e-02 + + 1.9033810496330261e-01 -1.6543999314308167e-01 + <_> + + 0 -1 772 -1.4396329643204808e-03 + + 2.0071710646152496e-01 -1.2338940054178238e-01 + <_> + + 0 -1 773 -3.5928250290453434e-03 + + 2.3985520005226135e-01 -1.2922149896621704e-01 + <_> + + 0 -1 774 -1.5314699849113822e-03 + + -4.9014899134635925e-01 1.0275030136108398e-01 + <_> + + 0 -1 775 -6.2372139655053616e-03 + + 3.1214639544487000e-01 -1.1405629664659500e-01 + <_> + + 0 -1 776 -3.3364649862051010e-02 + + -4.9520879983901978e-01 5.1328450441360474e-02 + <_> + + 0 -1 777 -2.2827699780464172e-02 + + 3.2558828592300415e-01 -6.5089307725429535e-02 + <_> + + 0 -1 778 -8.6199097335338593e-02 + + -6.7646330595016479e-01 2.6985699310898781e-02 + <_> + + 0 -1 779 -2.1065981127321720e-03 + + 2.2452430427074432e-01 -1.2610229849815369e-01 + <_> + + 0 -1 780 3.9120148867368698e-02 + + 1.1329399794340134e-01 -2.6860630512237549e-01 + <_> + + 0 -1 781 3.5082739777863026e-03 + + -1.1359959840774536e-01 2.5649771094322205e-01 + <_> + + 0 -1 782 5.9289898490533233e-04 + + -1.4942969381809235e-01 1.6409839689731598e-01 + <_> + + 0 -1 783 7.1766850305721164e-04 + + 9.9905692040920258e-02 -2.1967969834804535e-01 + <_> + + 0 -1 784 -2.1803600713610649e-02 + + -3.1711721420288086e-01 8.2889586687088013e-02 + <_> + + 0 -1 785 -3.2962779514491558e-03 + + -3.8048729300498962e-01 6.0819379985332489e-02 + <_> + + 0 -1 786 2.4196270387619734e-03 + + -9.6013016998767853e-02 2.8540581464767456e-01 + <_> + + 0 -1 787 -4.4187481398694217e-04 + + 2.2127939760684967e-01 -9.7434908151626587e-02 + <_> + + 0 -1 788 3.4523929934948683e-03 + + 3.7553120404481888e-02 -5.7969051599502563e-01 + <_> + + 0 -1 789 -2.1834600716829300e-02 + + 2.9562139511108398e-01 -8.0048300325870514e-02 + <_> + + 0 -1 790 -2.1309500152710825e-04 + + 2.2814509272575378e-01 -1.0114189982414246e-01 + <_> + + 0 -1 791 -1.6166249988600612e-03 + + -5.0541198253631592e-01 4.4764541089534760e-02 + <_> + + 0 -1 792 7.5959609821438789e-03 + + 4.5986540615558624e-02 -4.1197681427001953e-01 + <_> + + 0 -1 793 3.8601809646934271e-03 + + -8.6563169956207275e-02 2.4809999763965607e-01 + <_> + + 0 -1 794 6.0622231103479862e-03 + + -7.5557373464107513e-02 2.8433260321617126e-01 + <_> + + 0 -1 795 -1.7097420059144497e-03 + + -3.5295820236206055e-01 5.8410499244928360e-02 + <_> + + 0 -1 796 1.6515579074621201e-02 + + -8.0486953258514404e-02 2.3537430167198181e-01 + <_> + + 0 -1 797 4.8465100117027760e-03 + + 4.1895218193531036e-02 -4.8443049192428589e-01 + <_> + + 0 -1 798 -3.1167170032858849e-02 + + 1.9192309677600861e-01 -1.0268159955739975e-01 + <_> + + 0 -1 799 6.1892281519249082e-04 + + -2.1085770428180695e-01 9.3886926770210266e-02 + <_> + + 0 -1 800 1.1946310289204121e-02 + + 3.9096169173717499e-02 -6.2248629331588745e-01 + <_> + + 0 -1 801 -7.5677200220525265e-03 + + 1.5936839580535889e-01 -1.2250780314207077e-01 + <_> + + 0 -1 802 -5.3747411817312241e-02 + + -5.5622178316116333e-01 4.1190009564161301e-02 + <_> + + 0 -1 803 1.5513530001044273e-02 + + -3.9826881140470505e-02 6.2400728464126587e-01 + <_> + + 0 -1 804 1.5246650436893106e-03 + + 7.0138677954673767e-02 -3.0789071321487427e-01 + <_> + + 0 -1 805 -4.8315100139006972e-04 + + 1.7887659370899200e-01 -1.0958620160818100e-01 + <_> + + 0 -1 806 2.7374739293009043e-03 + + 2.7478590607643127e-02 -8.8489568233489990e-01 + <_> + + 0 -1 807 -6.5787717700004578e-02 + + -4.6432140469551086e-01 3.5037148743867874e-02 + <_> + + 0 -1 808 1.2409730115905404e-03 + + -9.6479237079620361e-02 2.8779220581054688e-01 + <_> + + 0 -1 809 8.1398809561505914e-04 + + 1.1511719971895218e-01 -1.6766160726547241e-01 + <_> + + 0 -1 810 2.3901820182800293e-02 + + -3.2603189349174500e-02 6.0017347335815430e-01 + <_> + + 0 -1 811 2.7556600049138069e-02 + + -6.6137343645095825e-02 2.9994478821754456e-01 + <_> + + 0 -1 812 -3.8070970913395286e-04 + + -3.3881181478500366e-01 6.4450770616531372e-02 + <_> + + 0 -1 813 -1.3335429830476642e-03 + + 1.4588660001754761e-01 -1.3217620551586151e-01 + <_> + + 0 -1 814 -9.3507990241050720e-03 + + -5.1177829504013062e-01 3.4969471395015717e-02 + <_> + + 0 -1 815 7.6215229928493500e-03 + + 2.3249529302120209e-02 -6.9619411230087280e-01 + <_> + + 0 -1 816 -5.3407860832521692e-05 + + 2.3727379739284515e-01 -8.6910709738731384e-02 + <_> + + 0 -1 817 -1.5332329785451293e-03 + + 1.9228410720825195e-01 -1.0422399640083313e-01 + <_> + + 0 -1 818 4.3135890737175941e-03 + + -9.6219547092914581e-02 2.5601211190223694e-01 + <_> + + 0 -1 819 -2.3042880638968199e-04 + + -3.1564751267433167e-01 5.8838598430156708e-02 + <_> + + 0 -1 820 -7.8411828726530075e-03 + + -6.6340929269790649e-01 2.4500999599695206e-02 + <_> + + 0 -1 821 1.7103740572929382e-01 + + 3.3831499516963959e-02 -4.5615941286087036e-01 + <_> + + 0 -1 822 -1.6011140542104840e-03 + + 2.1574890613555908e-01 -8.3622530102729797e-02 + <_> + + 0 -1 823 -1.0535780340433121e-02 + + 2.4552319943904877e-01 -8.2384489476680756e-02 + <_> + + 0 -1 824 -5.8351638726890087e-03 + + -4.7807329893112183e-01 4.4086221605539322e-02 + <_> + + 0 -1 825 -1.8706109374761581e-02 + + -6.0024029016494751e-01 2.1410040557384491e-02 + <_> + + 0 -1 826 -9.3307439237833023e-04 + + 2.4323590099811554e-01 -7.4165716767311096e-02 + <_> + 88 + -9.7693431377410889e-01 + + <_> + + 0 -1 827 1.0646229609847069e-02 + + -1.3861389458179474e-01 2.6494070887565613e-01 + <_> + + 0 -1 828 3.5298269242048264e-02 + + -7.5821727514266968e-02 3.9021068811416626e-01 + <_> + + 0 -1 829 7.5638387352228165e-04 + + -9.5521442592144012e-02 2.9061999917030334e-01 + <_> + + 0 -1 830 9.2497706413269043e-02 + + -2.7704238891601562e-01 7.9474702477455139e-02 + <_> + + 0 -1 831 -2.9340879991650581e-03 + + 2.2989539802074432e-01 -7.8550010919570923e-02 + <_> + + 0 -1 832 -8.6535848677158356e-02 + + 4.7744810581207275e-01 -6.8231220357120037e-03 + <_> + + 0 -1 833 5.4699288739357144e-05 + + -2.2642609477043152e-01 8.8192112743854523e-02 + <_> + + 0 -1 834 -3.6592520773410797e-02 + + 2.7353870868682861e-01 -9.8606742918491364e-02 + <_> + + 0 -1 835 2.6469118893146515e-03 + + -4.4083978980779648e-02 3.1445288658142090e-01 + <_> + + 0 -1 836 -4.4271810911595821e-03 + + 2.3822729289531708e-01 -8.6784273386001587e-02 + <_> + + 0 -1 837 -5.1882481202483177e-03 + + 1.5042769908905029e-01 -1.2672109901905060e-01 + <_> + + 0 -1 838 4.5530400238931179e-03 + + -5.5945020169019699e-02 3.6501631140708923e-01 + <_> + + 0 -1 839 1.4562410302460194e-02 + + 3.6397770047187805e-02 -5.3559190034866333e-01 + <_> + + 0 -1 840 6.8677567469421774e-05 + + -1.7479629814624786e-01 1.1068709939718246e-01 + <_> + + 0 -1 841 -5.9744901955127716e-03 + + 3.1077870726585388e-01 -6.6530227661132812e-02 + <_> + + 0 -1 842 -5.8691250160336494e-03 + + -3.1901490688323975e-01 6.3931830227375031e-02 + <_> + + 0 -1 843 -1.1140310205519199e-02 + + 2.4364790320396423e-01 -8.0935180187225342e-02 + <_> + + 0 -1 844 -5.8643531054258347e-02 + + -7.6083260774612427e-01 3.0809629708528519e-02 + <_> + + 0 -1 845 -4.6097282320261002e-03 + + -4.5315021276473999e-01 2.9879059642553329e-02 + <_> + + 0 -1 846 -9.3032103031873703e-03 + + 1.4513379335403442e-01 -1.1033169925212860e-01 + <_> + + 0 -1 847 1.3253629440441728e-03 + + -9.7698956727981567e-02 1.9646440446376801e-01 + <_> + + 0 -1 848 4.9800761044025421e-03 + + 3.3648081123828888e-02 -3.9792209863662720e-01 + <_> + + 0 -1 849 -7.6542161405086517e-03 + + 9.0841993689537048e-02 -1.5967549383640289e-01 + <_> + + 0 -1 850 -3.8920590281486511e-01 + + -6.6571092605590820e-01 1.9028829410672188e-02 + <_> + + 0 -1 851 -1.0019669681787491e-01 + + -5.7559269666671753e-01 2.4282779544591904e-02 + <_> + + 0 -1 852 7.3541211895644665e-04 + + 8.7919801473617554e-02 -1.6195340454578400e-01 + <_> + + 0 -1 853 -3.4802639856934547e-03 + + 2.6064491271972656e-01 -6.0200810432434082e-02 + <_> + + 0 -1 854 8.4000425413250923e-03 + + -1.0979729890823364e-01 1.5707309544086456e-01 + <_> + + 0 -1 855 2.3786011151969433e-03 + + 3.6058239638805389e-02 -4.7277191281318665e-01 + <_> + + 0 -1 856 7.3831682093441486e-03 + + -3.5756360739469528e-02 4.9498590826988220e-01 + <_> + + 0 -1 857 3.2115620560944080e-03 + + -1.0125560313463211e-01 1.5747989714145660e-01 + <_> + + 0 -1 858 -7.8209668397903442e-02 + + -7.6627081632614136e-01 2.2965829819440842e-02 + <_> + + 0 -1 859 5.3303989261621609e-05 + + -1.3414350152015686e-01 1.1114919930696487e-01 + <_> + + 0 -1 860 -9.6419155597686768e-03 + + 2.5068029761314392e-01 -6.6608138382434845e-02 + <_> + + 0 -1 861 -7.1092672646045685e-02 + + -4.0056818723678589e-01 4.0297791361808777e-02 + <_> + + 0 -1 862 3.5171560011804104e-04 + + 4.1861180216073990e-02 -3.2961198687553406e-01 + <_> + + 0 -1 863 -3.3458150574006140e-04 + + -2.6029831171035767e-01 6.7892737686634064e-02 + <_> + + 0 -1 864 -4.1451421566307545e-03 + + 2.3967699706554413e-01 -7.2093337774276733e-02 + <_> + + 0 -1 865 3.1754500232636929e-03 + + -7.1235269308090210e-02 2.4128450453281403e-01 + <_> + + 0 -1 866 -5.5184490047395229e-03 + + 5.0320237874984741e-01 -2.9686680063605309e-02 + <_> + + 0 -1 867 -3.0242869979701936e-04 + + 2.4879050254821777e-01 -5.6758578866720200e-02 + <_> + + 0 -1 868 -1.3125919504091144e-03 + + 3.1747800111770630e-01 -4.1845861822366714e-02 + <_> + + 0 -1 869 -2.7123570907860994e-04 + + -2.7042070031166077e-01 5.6828990578651428e-02 + <_> + + 0 -1 870 -7.3241777718067169e-03 + + 2.7556678652763367e-01 -5.4252970963716507e-02 + <_> + + 0 -1 871 -1.6851710155606270e-02 + + -3.4852910041809082e-01 4.5368999242782593e-02 + <_> + + 0 -1 872 2.9902100563049316e-02 + + 3.1621079891920090e-02 -4.3114370107650757e-01 + <_> + + 0 -1 873 2.8902660124003887e-03 + + 3.8029961287975311e-02 -3.7027099728584290e-01 + <_> + + 0 -1 874 -1.9242949783802032e-03 + + 2.4800279736518860e-01 -5.9333298355340958e-02 + <_> + + 0 -1 875 4.9354149959981441e-03 + + -8.3068400621414185e-02 2.2043809294700623e-01 + <_> + + 0 -1 876 8.2075603306293488e-02 + + -1.9413439556956291e-02 6.9089287519454956e-01 + <_> + + 0 -1 877 -2.4699489586055279e-04 + + -2.4660569429397583e-01 6.4776450395584106e-02 + <_> + + 0 -1 878 -1.8365769647061825e-03 + + 2.8836160898208618e-01 -5.3390458226203918e-02 + <_> + + 0 -1 879 -4.9553811550140381e-03 + + 1.2740829586982727e-01 -1.2559419870376587e-01 + <_> + + 0 -1 880 -8.3086621016263962e-03 + + 2.3478110134601593e-01 -7.1676492691040039e-02 + <_> + + 0 -1 881 -1.0879919677972794e-01 + + -2.5992238521575928e-01 5.8689739555120468e-02 + <_> + + 0 -1 882 -9.6786450594663620e-03 + + -7.0720428228378296e-01 1.8749259412288666e-02 + <_> + + 0 -1 883 -2.7136830613017082e-02 + + -5.8384227752685547e-01 2.1684130653738976e-02 + <_> + + 0 -1 884 -6.5389778465032578e-03 + + -5.9748911857604980e-01 2.1480310708284378e-02 + <_> + + 0 -1 885 -1.2095630168914795e-02 + + 1.3269039988517761e-01 -9.9722720682621002e-02 + <_> + + 0 -1 886 -1.6776099801063538e-01 + + -5.6655067205429077e-01 3.2123088836669922e-02 + <_> + + 0 -1 887 -1.3262550346553326e-02 + + 1.1495590209960938e-01 -1.1738389730453491e-01 + <_> + + 0 -1 888 7.6744519174098969e-02 + + -3.1413231045007706e-02 5.9935492277145386e-01 + <_> + + 0 -1 889 5.0785229541361332e-03 + + -5.2911940962076187e-02 2.3342399299144745e-01 + <_> + + 0 -1 890 3.1800279393792152e-03 + + -7.7734388411045074e-02 1.7652909457683563e-01 + <_> + + 0 -1 891 -1.7729829996824265e-03 + + 1.9591629505157471e-01 -7.9752199351787567e-02 + <_> + + 0 -1 892 -4.8560940194875002e-04 + + -2.8800371289253235e-01 4.9047119915485382e-02 + <_> + + 0 -1 893 3.6554320831783116e-04 + + 6.7922897636890411e-02 -2.2499430179595947e-01 + <_> + + 0 -1 894 -2.6938671362586319e-04 + + 1.6582170128822327e-01 -8.9744098484516144e-02 + <_> + + 0 -1 895 7.8684233129024506e-02 + + 2.6081679388880730e-02 -5.5693739652633667e-01 + <_> + + 0 -1 896 -7.3774810880422592e-04 + + 1.4036870002746582e-01 -1.1800300329923630e-01 + <_> + + 0 -1 897 2.3957829922437668e-02 + + 3.0470740050077438e-02 -4.6159979701042175e-01 + <_> + + 0 -1 898 -1.6239080578088760e-03 + + 2.6327079534530640e-01 -5.6765370070934296e-02 + <_> + + 0 -1 899 -9.0819748584181070e-04 + + 1.5462459623813629e-01 -1.1087069660425186e-01 + <_> + + 0 -1 900 3.9806248969398439e-04 + + 5.5630370974540710e-02 -2.8331959247589111e-01 + <_> + + 0 -1 901 2.0506449509412050e-03 + + -9.1604836285114288e-02 1.7585539817810059e-01 + <_> + + 0 -1 902 2.6742549613118172e-02 + + 6.2003031373023987e-02 -2.4487000703811646e-01 + <_> + + 0 -1 903 -2.1497008856385946e-03 + + 2.9449298977851868e-01 -5.3218148648738861e-02 + <_> + + 0 -1 904 5.6671658530831337e-03 + + -6.4298242330551147e-02 2.4905680119991302e-01 + <_> + + 0 -1 905 6.8317902332637459e-05 + + -1.6819630563259125e-01 9.6548579633235931e-02 + <_> + + 0 -1 906 1.7600439605303109e-04 + + 6.5308012068271637e-02 -2.4267880618572235e-01 + <_> + + 0 -1 907 4.1861608624458313e-03 + + -9.7988583147525787e-02 1.8052889406681061e-01 + <_> + + 0 -1 908 -2.1808340679854155e-03 + + 1.9231270253658295e-01 -9.4123929738998413e-02 + <_> + + 0 -1 909 2.1730400621891022e-02 + + 3.5578511655330658e-02 -4.5088538527488708e-01 + <_> + + 0 -1 910 -1.4780269935727119e-02 + + -4.3927010893821716e-01 3.1735591590404510e-02 + <_> + + 0 -1 911 -3.6145891062915325e-03 + + 1.9811479747295380e-01 -7.7701419591903687e-02 + <_> + + 0 -1 912 1.8892709631472826e-03 + + 1.9962439313530922e-02 -7.2041720151901245e-01 + <_> + + 0 -1 913 -1.3822480104863644e-03 + + 9.8466947674751282e-02 -1.4881080389022827e-01 + <_> + + 0 -1 914 -3.9505911991000175e-03 + + 1.1593230068683624e-01 -1.2791970372200012e-01 + <_> + 58 + -1.0129359960556030e+00 + + <_> + + 0 -1 915 -1.9395539537072182e-02 + + 4.7474750876426697e-01 -1.1721090227365494e-01 + <_> + + 0 -1 916 1.3118919916450977e-02 + + -2.5552129745483398e-01 1.6378800570964813e-01 + <_> + + 0 -1 917 -5.1606801571324468e-04 + + 1.9452619552612305e-01 -1.7448890209197998e-01 + <_> + + 0 -1 918 -1.3184159994125366e-02 + + 4.4181451201438904e-01 -9.0048752725124359e-02 + <_> + + 0 -1 919 3.4657081123441458e-03 + + -1.3477090001106262e-01 1.8056340515613556e-01 + <_> + + 0 -1 920 6.2980200164020061e-03 + + -5.4164979606866837e-02 3.6033380031585693e-01 + <_> + + 0 -1 921 1.6879989998415112e-03 + + -1.9997949898242950e-01 1.2021599709987640e-01 + <_> + + 0 -1 922 3.6039709812030196e-04 + + 1.0524140298366547e-01 -2.4116060137748718e-01 + <_> + + 0 -1 923 -1.5276849735528231e-03 + + 2.8135529160499573e-01 -6.8964816629886627e-02 + <_> + + 0 -1 924 3.5033570602536201e-03 + + -8.2519583404064178e-02 4.0713590383529663e-01 + <_> + + 0 -1 925 -4.7337161377072334e-03 + + 1.9727009534835815e-01 -1.1710140109062195e-01 + <_> + + 0 -1 926 -1.1557149700820446e-02 + + -5.6061112880706787e-01 6.8170957267284393e-02 + <_> + + 0 -1 927 -2.7445720508694649e-02 + + 4.9718621373176575e-01 -6.2380149960517883e-02 + <_> + + 0 -1 928 -5.2825778722763062e-02 + + 1.6921220719814301e-01 -1.3093550503253937e-01 + <_> + + 0 -1 929 -2.9849699139595032e-01 + + -6.4649671316146851e-01 4.0076818317174911e-02 + <_> + + 0 -1 930 -2.6307269581593573e-04 + + 2.5127941370010376e-01 -8.9494839310646057e-02 + <_> + + 0 -1 931 2.3261709429789335e-04 + + -8.6843989789485931e-02 2.3831979930400848e-01 + <_> + + 0 -1 932 2.3631360090803355e-04 + + 1.1554460227489471e-01 -1.8936349451541901e-01 + <_> + + 0 -1 933 2.0742209162563086e-03 + + -4.8594851046800613e-02 5.7485991716384888e-01 + <_> + + 0 -1 934 -7.0308889262378216e-03 + + -5.4120808839797974e-01 4.8743750900030136e-02 + <_> + + 0 -1 935 8.2652270793914795e-03 + + 2.6494519785046577e-02 -6.1728459596633911e-01 + <_> + + 0 -1 936 2.0042760297656059e-04 + + -1.1768630146980286e-01 1.6333860158920288e-01 + <_> + + 0 -1 937 1.6470040427520871e-03 + + -5.9954918920993805e-02 3.5179701447486877e-01 + <_> + + 0 -1 938 -3.5642538568936288e-04 + + -3.4420299530029297e-01 6.4948253333568573e-02 + <_> + + 0 -1 939 -3.0935870483517647e-02 + + 1.9979700446128845e-01 -9.7693696618080139e-02 + <_> + + 0 -1 940 -6.3578772824257612e-04 + + -3.1481391191482544e-01 5.9425041079521179e-02 + <_> + + 0 -1 941 -1.1862180195748806e-02 + + 2.0043690502643585e-01 -8.9447543025016785e-02 + <_> + + 0 -1 942 7.1508930996060371e-03 + + -3.9006061851978302e-02 5.3327161073684692e-01 + <_> + + 0 -1 943 -2.0059191156178713e-03 + + -2.8469720482826233e-01 7.0723608136177063e-02 + <_> + + 0 -1 944 3.6412389017641544e-03 + + -1.0660319775342941e-01 2.4944800138473511e-01 + <_> + + 0 -1 945 -1.3467429578304291e-01 + + 4.9910080432891846e-01 -4.0332220494747162e-02 + <_> + + 0 -1 946 -2.2547659464180470e-03 + + 1.6851690411567688e-01 -1.1119280010461807e-01 + <_> + + 0 -1 947 4.3842289596796036e-03 + + 8.6139492690563202e-02 -2.7431771159172058e-01 + <_> + + 0 -1 948 -7.3361168615520000e-03 + + 2.4875210225582123e-01 -9.5919162034988403e-02 + <_> + + 0 -1 949 6.4666912658140063e-04 + + 6.7431576550006866e-02 -3.3754080533981323e-01 + <_> + + 0 -1 950 2.2983769304119051e-04 + + -8.3903051912784576e-02 2.4584099650382996e-01 + <_> + + 0 -1 951 6.7039071582257748e-03 + + 2.9079329222440720e-02 -6.9055938720703125e-01 + <_> + + 0 -1 952 5.0734888645820320e-05 + + -1.5696719288825989e-01 1.1965429782867432e-01 + <_> + + 0 -1 953 -2.0335559546947479e-01 + + -6.9506347179412842e-01 2.7507519349455833e-02 + <_> + + 0 -1 954 9.4939414411783218e-03 + + -8.7449371814727783e-02 2.3968330025672913e-01 + <_> + + 0 -1 955 -2.4055240210145712e-03 + + 2.1150960028171539e-01 -1.3148930668830872e-01 + <_> + + 0 -1 956 -1.1342419747961685e-04 + + 1.5233789384365082e-01 -1.2725900113582611e-01 + <_> + + 0 -1 957 1.4992210082709789e-02 + + -3.4127969294786453e-02 5.0624072551727295e-01 + <_> + + 0 -1 958 7.4068200774490833e-04 + + 4.8764750361442566e-02 -4.0225321054458618e-01 + <_> + + 0 -1 959 -4.2459447868168354e-03 + + 2.1554760634899139e-01 -8.7126992642879486e-02 + <_> + + 0 -1 960 6.8655109498649836e-04 + + -7.5418718159198761e-02 2.6405909657478333e-01 + <_> + + 0 -1 961 -1.6751460731029510e-02 + + -6.7729032039642334e-01 3.2918728888034821e-02 + <_> + + 0 -1 962 -2.6301678735762835e-04 + + 2.2725869715213776e-01 -9.0534873306751251e-02 + <_> + + 0 -1 963 4.3398610432632267e-04 + + 5.5894378572702408e-02 -3.5592669248580933e-01 + <_> + + 0 -1 964 -2.0150149241089821e-02 + + 1.9162760674953461e-01 -9.4929970800876617e-02 + <_> + + 0 -1 965 -1.4452129602432251e-02 + + -6.8510341644287109e-01 2.5422170758247375e-02 + <_> + + 0 -1 966 -2.1149739623069763e-02 + + 3.7533190846443176e-01 -5.1496580243110657e-02 + <_> + + 0 -1 967 2.1137770265340805e-02 + + 2.9083080589771271e-02 -8.9430367946624756e-01 + <_> + + 0 -1 968 1.1524349683895707e-03 + + -6.9694936275482178e-02 2.7299800515174866e-01 + <_> + + 0 -1 969 -1.9070580310653895e-04 + + 1.8228119611740112e-01 -9.8367072641849518e-02 + <_> + + 0 -1 970 -3.6349631845951080e-02 + + -8.3693099021911621e-01 2.5055760517716408e-02 + <_> + + 0 -1 971 -9.0632075443863869e-03 + + 4.1463500261306763e-01 -5.4413449019193649e-02 + <_> + + 0 -1 972 -2.0535490475594997e-03 + + -1.9750310480594635e-01 1.0506899654865265e-01 + <_> + 93 + -9.7747492790222168e-01 + + <_> + + 0 -1 973 -2.2717019543051720e-02 + + 2.4288550019264221e-01 -1.4745520055294037e-01 + <_> + + 0 -1 974 2.5505950674414635e-02 + + -2.8551739454269409e-01 1.0837209969758987e-01 + <_> + + 0 -1 975 -2.6640091091394424e-03 + + 2.9275730252265930e-01 -1.0372710227966309e-01 + <_> + + 0 -1 976 -3.8115289062261581e-03 + + 2.1426899731159210e-01 -1.3811139762401581e-01 + <_> + + 0 -1 977 -1.6732690855860710e-02 + + 2.6550260186195374e-01 -4.3911330401897430e-02 + <_> + + 0 -1 978 4.9277010839432478e-04 + + 2.1104559302330017e-02 -4.2971360683441162e-01 + <_> + + 0 -1 979 -3.6691110581159592e-02 + + 5.3992420434951782e-01 -4.3648801743984222e-02 + <_> + + 0 -1 980 1.2615970335900784e-03 + + -1.2933869659900665e-01 1.6638770699501038e-01 + <_> + + 0 -1 981 -8.4106856957077980e-03 + + -9.4698411226272583e-01 2.1465849131345749e-02 + <_> + + 0 -1 982 6.4902722835540771e-02 + + -7.1727760136127472e-02 2.6613479852676392e-01 + <_> + + 0 -1 983 3.0305000022053719e-02 + + -8.2782492041587830e-02 2.7694320678710938e-01 + <_> + + 0 -1 984 2.5875340215861797e-03 + + -1.2966169416904449e-01 1.7756630480289459e-01 + <_> + + 0 -1 985 -7.0240451022982597e-03 + + -6.4243179559707642e-01 3.9943210780620575e-02 + <_> + + 0 -1 986 -1.0099769569933414e-03 + + 1.4176610112190247e-01 -1.1659970134496689e-01 + <_> + + 0 -1 987 -4.1179071558872238e-05 + + 1.5687669813632965e-01 -1.1127340048551559e-01 + <_> + + 0 -1 988 -4.7293151146732271e-04 + + -3.3554559946060181e-01 4.5977730304002762e-02 + <_> + + 0 -1 989 -1.7178079579025507e-03 + + 1.6952909529209137e-01 -1.0578069835901260e-01 + <_> + + 0 -1 990 -1.3333169743418694e-02 + + -5.8257812261581421e-01 3.0978430062532425e-02 + <_> + + 0 -1 991 -1.8783430568873882e-03 + + 1.4266879856586456e-01 -1.1131259799003601e-01 + <_> + + 0 -1 992 -6.5765981562435627e-03 + + 2.7561360597610474e-01 -5.3100328892469406e-02 + <_> + + 0 -1 993 -7.7210381277836859e-05 + + 1.3240240514278412e-01 -1.1167799681425095e-01 + <_> + + 0 -1 994 2.1968539804220200e-02 + + -2.6968160644173622e-02 5.0067168474197388e-01 + <_> + + 0 -1 995 -2.7445750311017036e-02 + + -2.4086740612983704e-01 6.0478270053863525e-02 + <_> + + 0 -1 996 7.8305849456228316e-05 + + -1.3334889709949493e-01 1.0123469680547714e-01 + <_> + + 0 -1 997 7.0190683007240295e-02 + + -5.4863780736923218e-02 2.4809940159320831e-01 + <_> + + 0 -1 998 -7.1902133524417877e-02 + + -3.7846690416336060e-01 4.2210999876260757e-02 + <_> + + 0 -1 999 -1.0780979692935944e-01 + + -3.7486588954925537e-01 4.2833440005779266e-02 + <_> + + 0 -1 1000 1.4364200178533792e-03 + + 8.0476358532905579e-02 -1.7263789474964142e-01 + <_> + + 0 -1 1001 6.8289190530776978e-02 + + -3.5595789551734924e-02 4.0761318802833557e-01 + <_> + + 0 -1 1002 -6.8037179298698902e-03 + + 1.9233790040016174e-01 -8.2368023693561554e-02 + <_> + + 0 -1 1003 -5.6193489581346512e-04 + + 1.3057120144367218e-01 -1.4355149865150452e-01 + <_> + + 0 -1 1004 -5.8276649564504623e-02 + + -3.0125439167022705e-01 5.2819650620222092e-02 + <_> + + 0 -1 1005 -6.1205718666315079e-03 + + 2.2043900191783905e-01 -7.5691752135753632e-02 + <_> + + 0 -1 1006 -1.3594309799373150e-02 + + -3.9049360156059265e-01 4.1857108473777771e-02 + <_> + + 0 -1 1007 1.3626200379803777e-03 + + -9.5363423228263855e-02 1.4970320463180542e-01 + <_> + + 0 -1 1008 -1.5074219845701009e-04 + + -2.3945580422878265e-01 6.4798332750797272e-02 + <_> + + 0 -1 1009 -7.7414259314537048e-02 + + 5.5941981077194214e-01 -2.4516880512237549e-02 + <_> + + 0 -1 1010 9.2117872554808855e-04 + + 5.4928861558437347e-02 -2.7934810519218445e-01 + <_> + + 0 -1 1011 1.0250780032947659e-03 + + -6.2167309224605560e-02 2.4976369738578796e-01 + <_> + + 0 -1 1012 -8.1174750812351704e-04 + + 2.3437939584255219e-01 -6.5725810825824738e-02 + <_> + + 0 -1 1013 8.3431020379066467e-02 + + 5.0954800099134445e-02 -3.1020981073379517e-01 + <_> + + 0 -1 1014 -9.2014456167817116e-03 + + -3.9242538809776306e-01 3.2926950603723526e-02 + <_> + + 0 -1 1015 -2.9086650465615094e-04 + + -3.1039750576019287e-01 4.9711819738149643e-02 + <_> + + 0 -1 1016 7.7576898038387299e-03 + + -4.4040750712156296e-02 3.6431351304054260e-01 + <_> + + 0 -1 1017 -1.2466090172529221e-01 + + -8.1957077980041504e-01 1.9150640815496445e-02 + <_> + + 0 -1 1018 1.3242550194263458e-02 + + 3.8988839834928513e-02 -3.3230680227279663e-01 + <_> + + 0 -1 1019 -6.6770128905773163e-03 + + -3.5790139436721802e-01 4.0460210293531418e-02 + <_> + + 0 -1 1020 -2.7479929849505424e-03 + + 2.5253900885581970e-01 -5.6427821516990662e-02 + <_> + + 0 -1 1021 8.2659651525318623e-04 + + -7.1988657116889954e-02 2.2780479490756989e-01 + <_> + + 0 -1 1022 -5.0153400748968124e-02 + + -6.3036471605300903e-01 2.7462050318717957e-02 + <_> + + 0 -1 1023 7.4203149415552616e-03 + + -6.6610716283321381e-02 2.7787339687347412e-01 + <_> + + 0 -1 1024 -6.7951780511066318e-04 + + -3.6327061057090759e-01 4.2795430868864059e-02 + <_> + + 0 -1 1025 -1.9305750029161572e-03 + + 1.4196230471134186e-01 -1.0759980231523514e-01 + <_> + + 0 -1 1026 -3.8132671033963561e-04 + + 2.1591760218143463e-01 -7.0202663540840149e-02 + <_> + + 0 -1 1027 -7.0990346372127533e-02 + + 4.5266601443290710e-01 -4.0750481188297272e-02 + <_> + + 0 -1 1028 -5.3368080407381058e-02 + + -6.7674058675765991e-01 1.9288340583443642e-02 + <_> + + 0 -1 1029 -2.0064849406480789e-02 + + -4.3365430831909180e-01 3.1853288412094116e-02 + <_> + + 0 -1 1030 1.1976360110566020e-03 + + -2.6559870690107346e-02 5.0797182321548462e-01 + <_> + + 0 -1 1031 -2.2697300300933421e-04 + + 1.8012599647045135e-01 -8.3606548607349396e-02 + <_> + + 0 -1 1032 1.5262699685990810e-02 + + -2.0238929986953735e-01 6.7422017455101013e-02 + <_> + + 0 -1 1033 -2.0811769366264343e-01 + + 6.6943860054016113e-01 -2.2452110424637794e-02 + <_> + + 0 -1 1034 1.5514369588345289e-03 + + -7.5121842324733734e-02 1.7326919734477997e-01 + <_> + + 0 -1 1035 -5.2924010902643204e-02 + + 2.4992519617080688e-01 -6.2879167497158051e-02 + <_> + + 0 -1 1036 -2.1648850291967392e-02 + + -2.9194280505180359e-01 5.2614491432905197e-02 + <_> + + 0 -1 1037 -2.2905069636180997e-04 + + -2.2117300331592560e-01 6.3168339431285858e-02 + <_> + + 0 -1 1038 5.0170070608146489e-05 + + -1.1510709673166275e-01 1.1611440032720566e-01 + <_> + + 0 -1 1039 -1.6416069411206990e-04 + + 1.5871520340442657e-01 -8.2600601017475128e-02 + <_> + + 0 -1 1040 -1.2003289535641670e-02 + + 1.2218090146780014e-01 -1.1229699850082397e-01 + <_> + + 0 -1 1041 -1.7784100025892258e-02 + + -3.5072788596153259e-01 3.1341921538114548e-02 + <_> + + 0 -1 1042 -6.3457582145929337e-03 + + 1.3078069686889648e-01 -1.0574410110712051e-01 + <_> + + 0 -1 1043 -7.9523242311552167e-04 + + 1.7204670608043671e-01 -8.6001992225646973e-02 + <_> + + 0 -1 1044 -3.1029590172693133e-04 + + -2.8433170914649963e-01 5.1817119121551514e-02 + <_> + + 0 -1 1045 -1.7053710296750069e-02 + + 3.9242428541183472e-01 -4.0143270045518875e-02 + <_> + + 0 -1 1046 4.6504959464073181e-03 + + -3.1837560236454010e-02 4.1237699985504150e-01 + <_> + + 0 -1 1047 -1.0358760133385658e-02 + + -5.6993198394775391e-01 2.9248379170894623e-02 + <_> + + 0 -1 1048 -2.2196240723133087e-02 + + -4.5605289936065674e-01 2.6285989210009575e-02 + <_> + + 0 -1 1049 -7.0536029525101185e-03 + + 1.5998320281505585e-01 -9.1594859957695007e-02 + <_> + + 0 -1 1050 -5.7094299700111151e-04 + + -1.4076329767704010e-01 1.0287419706583023e-01 + <_> + + 0 -1 1051 -2.2152599412947893e-03 + + 1.6593599319458008e-01 -8.5273988544940948e-02 + <_> + + 0 -1 1052 -2.8084890916943550e-02 + + 2.7022340893745422e-01 -5.5873811244964600e-02 + <_> + + 0 -1 1053 2.1515151020139456e-03 + + 4.2472891509532928e-02 -3.2005849480628967e-01 + <_> + + 0 -1 1054 -2.9733829433098435e-04 + + 1.6177169978618622e-01 -8.5115589201450348e-02 + <_> + + 0 -1 1055 -1.6694780439138412e-02 + + -4.2858770489692688e-01 3.0541609972715378e-02 + <_> + + 0 -1 1056 1.1982990056276321e-01 + + -1.6277290880680084e-02 7.9846781492233276e-01 + <_> + + 0 -1 1057 -3.5499420482665300e-04 + + 1.5935939550399780e-01 -8.3272881805896759e-02 + <_> + + 0 -1 1058 -1.8226269632577896e-02 + + 1.9527280330657959e-01 -7.3939889669418335e-02 + <_> + + 0 -1 1059 -4.0238600922748446e-04 + + 7.9101808369159698e-02 -2.0806129276752472e-01 + <_> + + 0 -1 1060 4.0892060496844351e-04 + + 1.0036630183458328e-01 -1.5128210186958313e-01 + <_> + + 0 -1 1061 9.5368112670257688e-04 + + -7.3011666536331177e-02 2.1752020716667175e-01 + <_> + + 0 -1 1062 4.3081799149513245e-01 + + -2.7450699359178543e-02 5.7061582803726196e-01 + <_> + + 0 -1 1063 5.3564831614494324e-04 + + 1.1587540060281754e-01 -1.2790560722351074e-01 + <_> + + 0 -1 1064 2.4430730263702571e-05 + + -1.6816629469394684e-01 8.0449983477592468e-02 + <_> + + 0 -1 1065 -5.5345650762319565e-02 + + 4.5338949561119080e-01 -3.1222779303789139e-02 + + <_> + + <_> + 0 8 20 12 -1. + <_> + 0 14 20 6 2. + <_> + + <_> + 9 1 4 15 -1. + <_> + 9 6 4 5 3. + <_> + + <_> + 6 10 9 2 -1. + <_> + 9 10 3 2 3. + <_> + + <_> + 7 0 10 9 -1. + <_> + 7 3 10 3 3. + <_> + + <_> + 12 2 2 18 -1. + <_> + 12 8 2 6 3. + <_> + + <_> + 8 6 8 6 -1. + <_> + 8 9 8 3 2. + <_> + + <_> + 2 0 17 18 -1. + <_> + 2 6 17 6 3. + <_> + + <_> + 10 10 1 8 -1. + <_> + 10 14 1 4 2. + <_> + + <_> + 7 10 9 2 -1. + <_> + 10 10 3 2 3. + <_> + + <_> + 5 1 6 6 -1. + <_> + 5 3 6 2 3. + <_> + + <_> + 3 1 15 9 -1. + <_> + 3 4 15 3 3. + <_> + + <_> + 6 3 9 6 -1. + <_> + 6 5 9 2 3. + <_> + + <_> + 8 17 6 3 -1. + <_> + 10 17 2 3 3. + <_> + + <_> + 9 10 9 1 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 1 7 6 11 -1. + <_> + 3 7 2 11 3. + <_> + + <_> + 9 18 3 1 -1. + <_> + 10 18 1 1 3. + <_> + + <_> + 16 16 1 2 -1. + <_> + 16 17 1 1 2. + <_> + + <_> + 9 17 6 3 -1. + <_> + 11 17 2 3 3. + <_> + + <_> + 8 0 5 18 -1. + <_> + 8 6 5 6 3. + <_> + + <_> + 6 7 9 7 -1. + <_> + 9 7 3 7 3. + <_> + + <_> + 14 6 6 10 -1. + <_> + 16 6 2 10 3. + <_> + + <_> + 9 8 9 5 -1. + <_> + 12 8 3 5 3. + <_> + + <_> + 3 7 9 6 -1. + <_> + 6 7 3 6 3. + <_> + + <_> + 1 7 6 6 -1. + <_> + 3 7 2 6 3. + <_> + + <_> + 16 0 4 18 -1. + <_> + 16 6 4 6 3. + <_> + + <_> + 0 17 3 3 -1. + <_> + 0 18 3 1 3. + <_> + + <_> + 16 0 2 1 -1. + <_> + 17 0 1 1 2. + <_> + + <_> + 0 8 20 12 -1. + <_> + 0 14 20 6 2. + <_> + + <_> + 6 6 9 8 -1. + <_> + 9 6 3 8 3. + <_> + + <_> + 5 3 12 9 -1. + <_> + 5 6 12 3 3. + <_> + + <_> + 4 16 1 2 -1. + <_> + 4 17 1 1 2. + <_> + + <_> + 18 10 2 1 -1. + <_> + 19 10 1 1 2. + <_> + + <_> + 9 8 6 5 -1. + <_> + 11 8 2 5 3. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 6 8 6 6 -1. + <_> + 8 8 2 6 3. + <_> + + <_> + 11 7 6 7 -1. + <_> + 13 7 2 7 3. + <_> + + <_> + 19 14 1 2 -1. + <_> + 19 15 1 1 2. + <_> + + <_> + 6 17 1 2 -1. + <_> + 6 18 1 1 2. + <_> + + <_> + 14 7 2 7 -1. + <_> + 15 7 1 7 2. + <_> + + <_> + 6 8 2 4 -1. + <_> + 7 8 1 4 2. + <_> + + <_> + 5 8 12 6 -1. + <_> + 5 10 12 2 3. + <_> + + <_> + 2 17 1 3 -1. + <_> + 2 18 1 1 3. + <_> + + <_> + 6 7 3 6 -1. + <_> + 7 7 1 6 3. + <_> + + <_> + 6 7 9 12 -1. + <_> + 9 7 3 12 3. + <_> + + <_> + 6 2 11 12 -1. + <_> + 6 6 11 4 3. + <_> + + <_> + 1 12 5 8 -1. + <_> + 1 16 5 4 2. + <_> + + <_> + 14 7 6 7 -1. + <_> + 16 7 2 7 3. + <_> + + <_> + 10 8 6 6 -1. + <_> + 12 8 2 6 3. + <_> + + <_> + 16 18 4 2 -1. + <_> + 16 19 4 1 2. + <_> + + <_> + 18 17 2 3 -1. + <_> + 18 18 2 1 3. + <_> + + <_> + 9 7 3 7 -1. + <_> + 10 7 1 7 3. + <_> + + <_> + 5 6 6 8 -1. + <_> + 7 6 2 8 3. + <_> + + <_> + 2 6 6 11 -1. + <_> + 4 6 2 11 3. + <_> + + <_> + 8 10 12 8 -1. + <_> + 8 14 12 4 2. + <_> + + <_> + 7 17 6 3 -1. + <_> + 9 17 2 3 3. + <_> + + <_> + 10 9 3 3 -1. + <_> + 11 9 1 3 3. + <_> + + <_> + 8 8 3 6 -1. + <_> + 9 8 1 6 3. + <_> + + <_> + 7 0 6 5 -1. + <_> + 9 0 2 5 3. + <_> + + <_> + 6 17 1 3 -1. + <_> + 6 18 1 1 3. + <_> + + <_> + 0 18 4 2 -1. + <_> + 0 19 4 1 2. + <_> + + <_> + 4 1 11 9 -1. + <_> + 4 4 11 3 3. + <_> + + <_> + 3 1 14 9 -1. + <_> + 3 4 14 3 3. + <_> + + <_> + 0 9 6 4 -1. + <_> + 2 9 2 4 3. + <_> + + <_> + 18 13 1 2 -1. + <_> + 18 14 1 1 2. + <_> + + <_> + 13 5 3 11 -1. + <_> + 14 5 1 11 3. + <_> + + <_> + 0 18 8 2 -1. + <_> + 0 18 4 1 2. + <_> + 4 19 4 1 2. + <_> + + <_> + 5 8 12 5 -1. + <_> + 9 8 4 5 3. + <_> + + <_> + 4 7 11 10 -1. + <_> + 4 12 11 5 2. + <_> + + <_> + 14 9 6 4 -1. + <_> + 16 9 2 4 3. + <_> + + <_> + 0 7 6 8 -1. + <_> + 3 7 3 8 2. + <_> + + <_> + 0 16 3 3 -1. + <_> + 0 17 3 1 3. + <_> + + <_> + 7 11 12 1 -1. + <_> + 11 11 4 1 3. + <_> + + <_> + 4 8 9 4 -1. + <_> + 7 8 3 4 3. + <_> + + <_> + 5 16 6 4 -1. + <_> + 7 16 2 4 3. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 4 9 4 10 -1. + <_> + 4 9 2 5 2. + <_> + 6 14 2 5 2. + <_> + + <_> + 4 8 6 4 -1. + <_> + 6 8 2 4 3. + <_> + + <_> + 10 2 2 18 -1. + <_> + 10 8 2 6 3. + <_> + + <_> + 0 5 8 6 -1. + <_> + 0 5 4 3 2. + <_> + 4 8 4 3 2. + <_> + + <_> + 6 0 6 5 -1. + <_> + 8 0 2 5 3. + <_> + + <_> + 18 0 2 14 -1. + <_> + 18 7 2 7 2. + <_> + + <_> + 8 18 4 2 -1. + <_> + 10 18 2 2 2. + <_> + + <_> + 1 17 6 3 -1. + <_> + 1 18 6 1 3. + <_> + + <_> + 11 8 3 5 -1. + <_> + 12 8 1 5 3. + <_> + + <_> + 11 8 3 4 -1. + <_> + 12 8 1 4 3. + <_> + + <_> + 11 0 6 5 -1. + <_> + 13 0 2 5 3. + <_> + + <_> + 1 7 6 7 -1. + <_> + 3 7 2 7 3. + <_> + + <_> + 0 13 1 3 -1. + <_> + 0 14 1 1 3. + <_> + + <_> + 3 2 9 6 -1. + <_> + 3 4 9 2 3. + <_> + + <_> + 8 6 9 2 -1. + <_> + 8 7 9 1 2. + <_> + + <_> + 0 14 3 6 -1. + <_> + 0 16 3 2 3. + <_> + + <_> + 1 11 6 4 -1. + <_> + 3 11 2 4 3. + <_> + + <_> + 6 9 9 3 -1. + <_> + 9 9 3 3 3. + <_> + + <_> + 6 0 9 6 -1. + <_> + 6 2 9 2 3. + <_> + + <_> + 8 5 6 6 -1. + <_> + 8 7 6 2 3. + <_> + + <_> + 1 12 2 1 -1. + <_> + 2 12 1 1 2. + <_> + + <_> + 10 10 6 2 -1. + <_> + 12 10 2 2 3. + <_> + + <_> + 13 8 6 6 -1. + <_> + 15 8 2 6 3. + <_> + + <_> + 6 16 6 4 -1. + <_> + 8 16 2 4 3. + <_> + + <_> + 8 0 9 9 -1. + <_> + 8 3 9 3 3. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 7 10 3 3 -1. + <_> + 8 10 1 3 3. + <_> + + <_> + 9 14 2 2 -1. + <_> + 9 14 1 1 2. + <_> + 10 15 1 1 2. + <_> + + <_> + 9 14 2 2 -1. + <_> + 9 14 1 1 2. + <_> + 10 15 1 1 2. + <_> + + <_> + 0 8 19 12 -1. + <_> + 0 14 19 6 2. + <_> + + <_> + 7 6 9 14 -1. + <_> + 10 6 3 14 3. + <_> + + <_> + 13 8 3 4 -1. + <_> + 14 8 1 4 3. + <_> + + <_> + 4 17 1 3 -1. + <_> + 4 18 1 1 3. + <_> + + <_> + 4 9 6 3 -1. + <_> + 6 9 2 3 3. + <_> + + <_> + 2 18 5 2 -1. + <_> + 2 19 5 1 2. + <_> + + <_> + 7 8 2 2 -1. + <_> + 7 8 1 1 2. + <_> + 8 9 1 1 2. + <_> + + <_> + 7 8 2 2 -1. + <_> + 7 8 1 1 2. + <_> + 8 9 1 1 2. + <_> + + <_> + 5 10 13 2 -1. + <_> + 5 11 13 1 2. + <_> + + <_> + 10 8 1 9 -1. + <_> + 10 11 1 3 3. + <_> + + <_> + 15 8 2 12 -1. + <_> + 15 8 1 6 2. + <_> + 16 14 1 6 2. + <_> + + <_> + 4 0 3 5 -1. + <_> + 5 0 1 5 3. + <_> + + <_> + 12 6 3 7 -1. + <_> + 13 6 1 7 3. + <_> + + <_> + 7 16 6 4 -1. + <_> + 9 16 2 4 3. + <_> + + <_> + 9 16 2 1 -1. + <_> + 10 16 1 1 2. + <_> + + <_> + 6 10 9 2 -1. + <_> + 9 10 3 2 3. + <_> + + <_> + 0 6 15 14 -1. + <_> + 0 13 15 7 2. + <_> + + <_> + 9 1 5 6 -1. + <_> + 9 3 5 2 3. + <_> + + <_> + 3 9 3 4 -1. + <_> + 4 9 1 4 3. + <_> + + <_> + 5 7 3 6 -1. + <_> + 6 7 1 6 3. + <_> + + <_> + 17 16 1 2 -1. + <_> + 17 17 1 1 2. + <_> + + <_> + 9 8 6 12 -1. + <_> + 11 8 2 12 3. + <_> + + <_> + 6 10 6 1 -1. + <_> + 8 10 2 1 3. + <_> + + <_> + 7 17 9 3 -1. + <_> + 10 17 3 3 3. + <_> + + <_> + 14 18 6 2 -1. + <_> + 14 19 6 1 2. + <_> + + <_> + 9 5 3 14 -1. + <_> + 10 5 1 14 3. + <_> + + <_> + 8 16 9 4 -1. + <_> + 11 16 3 4 3. + <_> + + <_> + 0 0 4 14 -1. + <_> + 0 7 4 7 2. + <_> + + <_> + 8 1 6 3 -1. + <_> + 10 1 2 3 3. + <_> + + <_> + 6 8 3 4 -1. + <_> + 7 8 1 4 3. + <_> + + <_> + 4 8 3 4 -1. + <_> + 5 8 1 4 3. + <_> + + <_> + 5 1 6 5 -1. + <_> + 7 1 2 5 3. + <_> + + <_> + 1 18 1 2 -1. + <_> + 1 19 1 1 2. + <_> + + <_> + 7 0 6 6 -1. + <_> + 7 2 6 2 3. + <_> + + <_> + 0 18 4 2 -1. + <_> + 0 19 4 1 2. + <_> + + <_> + 12 3 8 12 -1. + <_> + 12 7 8 4 3. + <_> + + <_> + 12 9 3 4 -1. + <_> + 13 9 1 4 3. + <_> + + <_> + 12 8 3 5 -1. + <_> + 13 8 1 5 3. + <_> + + <_> + 16 0 2 1 -1. + <_> + 17 0 1 1 2. + <_> + + <_> + 5 17 1 3 -1. + <_> + 5 18 1 1 3. + <_> + + <_> + 10 2 3 6 -1. + <_> + 10 4 3 2 3. + <_> + + <_> + 4 17 2 3 -1. + <_> + 4 18 2 1 3. + <_> + + <_> + 12 7 1 9 -1. + <_> + 12 10 1 3 3. + <_> + + <_> + 7 6 3 9 -1. + <_> + 8 6 1 9 3. + <_> + + <_> + 17 13 3 6 -1. + <_> + 17 15 3 2 3. + <_> + + <_> + 7 7 3 8 -1. + <_> + 8 7 1 8 3. + <_> + + <_> + 5 0 3 5 -1. + <_> + 6 0 1 5 3. + <_> + + <_> + 4 6 9 8 -1. + <_> + 7 6 3 8 3. + <_> + + <_> + 2 9 3 3 -1. + <_> + 3 9 1 3 3. + <_> + + <_> + 16 18 4 2 -1. + <_> + 16 19 4 1 2. + <_> + + <_> + 17 10 3 10 -1. + <_> + 17 15 3 5 2. + <_> + + <_> + 8 9 6 4 -1. + <_> + 10 9 2 4 3. + <_> + + <_> + 5 2 10 12 -1. + <_> + 5 6 10 4 3. + <_> + + <_> + 6 9 6 3 -1. + <_> + 8 9 2 3 3. + <_> + + <_> + 11 7 3 7 -1. + <_> + 12 7 1 7 3. + <_> + + <_> + 12 8 6 4 -1. + <_> + 14 8 2 4 3. + <_> + + <_> + 14 8 6 5 -1. + <_> + 16 8 2 5 3. + <_> + + <_> + 12 12 2 4 -1. + <_> + 12 14 2 2 2. + <_> + + <_> + 3 15 1 2 -1. + <_> + 3 16 1 1 2. + <_> + + <_> + 12 7 3 4 -1. + <_> + 13 7 1 4 3. + <_> + + <_> + 10 0 6 6 -1. + <_> + 12 0 2 6 3. + <_> + + <_> + 10 6 3 8 -1. + <_> + 11 6 1 8 3. + <_> + + <_> + 16 17 1 2 -1. + <_> + 16 18 1 1 2. + <_> + + <_> + 16 16 1 3 -1. + <_> + 16 17 1 1 3. + <_> + + <_> + 11 11 1 2 -1. + <_> + 11 12 1 1 2. + <_> + + <_> + 3 7 6 9 -1. + <_> + 5 7 2 9 3. + <_> + + <_> + 4 18 9 1 -1. + <_> + 7 18 3 1 3. + <_> + + <_> + 0 11 4 9 -1. + <_> + 0 14 4 3 3. + <_> + + <_> + 9 17 6 3 -1. + <_> + 11 17 2 3 3. + <_> + + <_> + 7 8 6 12 -1. + <_> + 9 8 2 12 3. + <_> + + <_> + 6 8 3 4 -1. + <_> + 7 8 1 4 3. + <_> + + <_> + 3 17 1 3 -1. + <_> + 3 18 1 1 3. + <_> + + <_> + 11 9 6 4 -1. + <_> + 13 9 2 4 3. + <_> + + <_> + 6 1 3 2 -1. + <_> + 7 1 1 2 3. + <_> + + <_> + 1 0 2 1 -1. + <_> + 2 0 1 1 2. + <_> + + <_> + 1 0 2 14 -1. + <_> + 1 0 1 7 2. + <_> + 2 7 1 7 2. + <_> + + <_> + 5 5 11 8 -1. + <_> + 5 9 11 4 2. + <_> + + <_> + 9 3 5 6 -1. + <_> + 9 5 5 2 3. + <_> + + <_> + 7 9 5 10 -1. + <_> + 7 14 5 5 2. + <_> + + <_> + 15 10 2 2 -1. + <_> + 16 10 1 2 2. + <_> + + <_> + 0 18 8 2 -1. + <_> + 0 19 8 1 2. + <_> + + <_> + 7 17 1 3 -1. + <_> + 7 18 1 1 3. + <_> + + <_> + 7 2 11 6 -1. + <_> + 7 4 11 2 3. + <_> + + <_> + 8 3 9 3 -1. + <_> + 8 4 9 1 3. + <_> + + <_> + 0 9 2 2 -1. + <_> + 0 10 2 1 2. + <_> + + <_> + 0 5 3 6 -1. + <_> + 0 7 3 2 3. + <_> + + <_> + 6 7 2 2 -1. + <_> + 6 7 1 1 2. + <_> + 7 8 1 1 2. + <_> + + <_> + 7 6 3 6 -1. + <_> + 8 6 1 6 3. + <_> + + <_> + 12 1 6 4 -1. + <_> + 14 1 2 4 3. + <_> + + <_> + 9 11 6 8 -1. + <_> + 11 11 2 8 3. + <_> + + <_> + 17 15 3 3 -1. + <_> + 17 16 3 1 3. + <_> + + <_> + 6 6 3 9 -1. + <_> + 6 9 3 3 3. + <_> + + <_> + 0 5 8 6 -1. + <_> + 0 5 4 3 2. + <_> + 4 8 4 3 2. + <_> + + <_> + 0 6 1 3 -1. + <_> + 0 7 1 1 3. + <_> + + <_> + 17 0 2 6 -1. + <_> + 18 0 1 6 2. + <_> + + <_> + 10 17 6 3 -1. + <_> + 12 17 2 3 3. + <_> + + <_> + 13 15 2 2 -1. + <_> + 13 15 1 1 2. + <_> + 14 16 1 1 2. + <_> + + <_> + 4 0 12 3 -1. + <_> + 4 1 12 1 3. + <_> + + <_> + 5 3 10 9 -1. + <_> + 5 6 10 3 3. + <_> + + <_> + 7 7 9 7 -1. + <_> + 10 7 3 7 3. + <_> + + <_> + 5 8 9 6 -1. + <_> + 8 8 3 6 3. + <_> + + <_> + 0 16 6 2 -1. + <_> + 0 17 6 1 2. + <_> + + <_> + 12 6 7 14 -1. + <_> + 12 13 7 7 2. + <_> + + <_> + 13 7 6 8 -1. + <_> + 15 7 2 8 3. + <_> + + <_> + 2 10 6 3 -1. + <_> + 4 10 2 3 3. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 7 1 6 2 -1. + <_> + 7 2 6 1 2. + <_> + + <_> + 6 0 6 4 -1. + <_> + 6 2 6 2 2. + <_> + + <_> + 8 18 6 2 -1. + <_> + 10 18 2 2 3. + <_> + + <_> + 7 6 5 2 -1. + <_> + 7 7 5 1 2. + <_> + + <_> + 6 7 3 6 -1. + <_> + 7 7 1 6 3. + <_> + + <_> + 18 18 2 2 -1. + <_> + 18 18 1 1 2. + <_> + 19 19 1 1 2. + <_> + + <_> + 16 8 3 7 -1. + <_> + 17 8 1 7 3. + <_> + + <_> + 0 16 2 3 -1. + <_> + 0 17 2 1 3. + <_> + + <_> + 5 19 6 1 -1. + <_> + 7 19 2 1 3. + <_> + + <_> + 9 5 6 6 -1. + <_> + 9 7 6 2 3. + <_> + + <_> + 0 10 2 4 -1. + <_> + 0 12 2 2 2. + <_> + + <_> + 0 9 4 3 -1. + <_> + 2 9 2 3 2. + <_> + + <_> + 1 10 6 9 -1. + <_> + 3 10 2 9 3. + <_> + + <_> + 9 0 6 2 -1. + <_> + 11 0 2 2 3. + <_> + + <_> + 14 1 2 1 -1. + <_> + 15 1 1 1 2. + <_> + + <_> + 0 8 1 4 -1. + <_> + 0 10 1 2 2. + <_> + + <_> + 15 6 2 2 -1. + <_> + 15 6 1 1 2. + <_> + 16 7 1 1 2. + <_> + + <_> + 7 5 3 6 -1. + <_> + 8 5 1 6 3. + <_> + + <_> + 19 17 1 3 -1. + <_> + 19 18 1 1 3. + <_> + + <_> + 7 10 3 1 -1. + <_> + 8 10 1 1 3. + <_> + + <_> + 12 1 6 6 -1. + <_> + 14 1 2 6 3. + <_> + + <_> + 15 5 2 1 -1. + <_> + 16 5 1 1 2. + <_> + + <_> + 8 2 7 4 -1. + <_> + 8 4 7 2 2. + <_> + + <_> + 4 0 14 15 -1. + <_> + 4 5 14 5 3. + <_> + + <_> + 7 8 6 6 -1. + <_> + 9 8 2 6 3. + <_> + + <_> + 11 17 1 3 -1. + <_> + 11 18 1 1 3. + <_> + + <_> + 12 16 2 4 -1. + <_> + 12 16 1 2 2. + <_> + 13 18 1 2 2. + <_> + + <_> + 10 13 2 1 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 11 8 3 3 -1. + <_> + 12 8 1 3 3. + <_> + + <_> + 2 0 6 8 -1. + <_> + 4 0 2 8 3. + <_> + + <_> + 3 5 6 6 -1. + <_> + 3 5 3 3 2. + <_> + 6 8 3 3 2. + <_> + + <_> + 10 8 3 3 -1. + <_> + 11 8 1 3 3. + <_> + + <_> + 5 17 4 2 -1. + <_> + 5 18 4 1 2. + <_> + + <_> + 8 16 5 2 -1. + <_> + 8 17 5 1 2. + <_> + + <_> + 0 4 3 3 -1. + <_> + 0 5 3 1 3. + <_> + + <_> + 6 3 6 2 -1. + <_> + 8 3 2 2 3. + <_> + + <_> + 4 4 9 3 -1. + <_> + 7 4 3 3 3. + <_> + + <_> + 0 13 1 4 -1. + <_> + 0 15 1 2 2. + <_> + + <_> + 0 17 8 3 -1. + <_> + 0 18 8 1 3. + <_> + + <_> + 6 1 11 6 -1. + <_> + 6 3 11 2 3. + <_> + + <_> + 4 10 6 2 -1. + <_> + 6 10 2 2 3. + <_> + + <_> + 10 8 1 12 -1. + <_> + 10 14 1 6 2. + <_> + + <_> + 5 8 3 4 -1. + <_> + 6 8 1 4 3. + <_> + + <_> + 0 17 1 3 -1. + <_> + 0 18 1 1 3. + <_> + + <_> + 0 17 1 3 -1. + <_> + 0 18 1 1 3. + <_> + + <_> + 13 8 3 4 -1. + <_> + 14 8 1 4 3. + <_> + + <_> + 1 5 5 4 -1. + <_> + 1 7 5 2 2. + <_> + + <_> + 18 14 1 2 -1. + <_> + 18 15 1 1 2. + <_> + + <_> + 13 8 2 4 -1. + <_> + 14 8 1 4 2. + <_> + + <_> + 10 6 6 8 -1. + <_> + 12 6 2 8 3. + <_> + + <_> + 8 6 6 10 -1. + <_> + 10 6 2 10 3. + <_> + + <_> + 17 16 1 3 -1. + <_> + 17 17 1 1 3. + <_> + + <_> + 1 7 2 10 -1. + <_> + 2 7 1 10 2. + <_> + + <_> + 5 9 6 3 -1. + <_> + 7 9 2 3 3. + <_> + + <_> + 0 8 5 12 -1. + <_> + 0 14 5 6 2. + <_> + + <_> + 0 11 1 3 -1. + <_> + 0 12 1 1 3. + <_> + + <_> + 6 16 6 4 -1. + <_> + 8 16 2 4 3. + <_> + + <_> + 0 6 2 6 -1. + <_> + 0 8 2 2 3. + <_> + + <_> + 11 18 2 1 -1. + <_> + 12 18 1 1 2. + <_> + + <_> + 5 1 9 2 -1. + <_> + 5 2 9 1 2. + <_> + + <_> + 0 0 1 2 -1. + <_> + 0 1 1 1 2. + <_> + + <_> + 15 9 3 3 -1. + <_> + 16 9 1 3 3. + <_> + + <_> + 18 16 1 3 -1. + <_> + 18 17 1 1 3. + <_> + + <_> + 11 10 6 1 -1. + <_> + 13 10 2 1 3. + <_> + + <_> + 1 3 4 4 -1. + <_> + 3 3 2 4 2. + <_> + + <_> + 11 2 1 18 -1. + <_> + 11 8 1 6 3. + <_> + + <_> + 9 1 5 12 -1. + <_> + 9 5 5 4 3. + <_> + + <_> + 12 0 8 1 -1. + <_> + 16 0 4 1 2. + <_> + + <_> + 8 6 3 10 -1. + <_> + 9 6 1 10 3. + <_> + + <_> + 19 2 1 6 -1. + <_> + 19 4 1 2 3. + <_> + + <_> + 18 6 2 2 -1. + <_> + 18 7 2 1 2. + <_> + + <_> + 7 7 3 4 -1. + <_> + 8 7 1 4 3. + <_> + + <_> + 5 0 6 5 -1. + <_> + 7 0 2 5 3. + <_> + + <_> + 0 3 7 3 -1. + <_> + 0 4 7 1 3. + <_> + + <_> + 1 6 2 1 -1. + <_> + 2 6 1 1 2. + <_> + + <_> + 4 8 2 10 -1. + <_> + 4 8 1 5 2. + <_> + 5 13 1 5 2. + <_> + + <_> + 2 18 18 2 -1. + <_> + 2 18 9 1 2. + <_> + 11 19 9 1 2. + <_> + + <_> + 2 7 4 4 -1. + <_> + 2 7 2 2 2. + <_> + 4 9 2 2 2. + <_> + + <_> + 17 3 3 4 -1. + <_> + 18 3 1 4 3. + <_> + + <_> + 16 9 2 8 -1. + <_> + 16 9 1 4 2. + <_> + 17 13 1 4 2. + <_> + + <_> + 15 7 1 6 -1. + <_> + 15 9 1 2 3. + <_> + + <_> + 14 2 2 2 -1. + <_> + 14 3 2 1 2. + <_> + + <_> + 17 0 2 3 -1. + <_> + 17 1 2 1 3. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 1 2. + <_> + 17 19 1 1 2. + <_> + + <_> + 10 4 4 3 -1. + <_> + 10 5 4 1 3. + <_> + + <_> + 0 2 8 6 -1. + <_> + 4 2 4 6 2. + <_> + + <_> + 7 14 6 6 -1. + <_> + 7 16 6 2 3. + <_> + + <_> + 11 15 2 2 -1. + <_> + 11 16 2 1 2. + <_> + + <_> + 7 1 9 4 -1. + <_> + 10 1 3 4 3. + <_> + + <_> + 9 7 3 7 -1. + <_> + 10 7 1 7 3. + <_> + + <_> + 6 17 2 2 -1. + <_> + 6 17 1 1 2. + <_> + 7 18 1 1 2. + <_> + + <_> + 4 6 3 9 -1. + <_> + 5 6 1 9 3. + <_> + + <_> + 0 10 19 10 -1. + <_> + 0 15 19 5 2. + <_> + + <_> + 5 17 6 1 -1. + <_> + 7 17 2 1 3. + <_> + + <_> + 0 12 6 3 -1. + <_> + 3 12 3 3 2. + <_> + + <_> + 2 5 18 5 -1. + <_> + 8 5 6 5 3. + <_> + + <_> + 1 15 6 4 -1. + <_> + 1 17 6 2 2. + <_> + + <_> + 14 10 6 6 -1. + <_> + 16 10 2 6 3. + <_> + + <_> + 0 14 4 3 -1. + <_> + 0 15 4 1 3. + <_> + + <_> + 1 7 6 11 -1. + <_> + 3 7 2 11 3. + <_> + + <_> + 13 17 7 2 -1. + <_> + 13 18 7 1 2. + <_> + + <_> + 0 14 2 3 -1. + <_> + 0 15 2 1 3. + <_> + + <_> + 0 0 6 2 -1. + <_> + 3 0 3 2 2. + <_> + + <_> + 0 1 6 3 -1. + <_> + 3 1 3 3 2. + <_> + + <_> + 0 8 2 6 -1. + <_> + 0 10 2 2 3. + <_> + + <_> + 1 2 6 14 -1. + <_> + 1 2 3 7 2. + <_> + 4 9 3 7 2. + <_> + + <_> + 17 5 2 2 -1. + <_> + 17 5 1 1 2. + <_> + 18 6 1 1 2. + <_> + + <_> + 11 10 9 4 -1. + <_> + 14 10 3 4 3. + <_> + + <_> + 2 9 12 4 -1. + <_> + 6 9 4 4 3. + <_> + + <_> + 7 10 12 2 -1. + <_> + 11 10 4 2 3. + <_> + + <_> + 2 13 1 2 -1. + <_> + 2 14 1 1 2. + <_> + + <_> + 16 7 4 3 -1. + <_> + 16 8 4 1 3. + <_> + + <_> + 19 16 1 3 -1. + <_> + 19 17 1 1 3. + <_> + + <_> + 18 11 1 2 -1. + <_> + 18 12 1 1 2. + <_> + + <_> + 12 7 8 2 -1. + <_> + 12 7 4 1 2. + <_> + 16 8 4 1 2. + <_> + + <_> + 14 9 2 4 -1. + <_> + 15 9 1 4 2. + <_> + + <_> + 14 2 6 4 -1. + <_> + 14 2 3 2 2. + <_> + 17 4 3 2 2. + <_> + + <_> + 14 0 6 1 -1. + <_> + 17 0 3 1 2. + <_> + + <_> + 3 12 2 1 -1. + <_> + 4 12 1 1 2. + <_> + + <_> + 17 2 3 1 -1. + <_> + 18 2 1 1 3. + <_> + + <_> + 1 16 18 2 -1. + <_> + 7 16 6 2 3. + <_> + + <_> + 2 19 8 1 -1. + <_> + 6 19 4 1 2. + <_> + + <_> + 1 17 4 3 -1. + <_> + 1 18 4 1 3. + <_> + + <_> + 19 13 1 2 -1. + <_> + 19 14 1 1 2. + <_> + + <_> + 9 16 10 4 -1. + <_> + 9 16 5 2 2. + <_> + 14 18 5 2 2. + <_> + + <_> + 12 9 2 4 -1. + <_> + 12 9 1 2 2. + <_> + 13 11 1 2 2. + <_> + + <_> + 19 11 1 9 -1. + <_> + 19 14 1 3 3. + <_> + + <_> + 6 6 14 14 -1. + <_> + 6 13 14 7 2. + <_> + + <_> + 2 17 4 2 -1. + <_> + 2 18 4 1 2. + <_> + + <_> + 0 2 1 3 -1. + <_> + 0 3 1 1 3. + <_> + + <_> + 0 12 1 3 -1. + <_> + 0 13 1 1 3. + <_> + + <_> + 15 15 4 4 -1. + <_> + 15 17 4 2 2. + <_> + + <_> + 2 5 18 7 -1. + <_> + 8 5 6 7 3. + <_> + + <_> + 1 16 5 3 -1. + <_> + 1 17 5 1 3. + <_> + + <_> + 0 4 2 3 -1. + <_> + 0 5 2 1 3. + <_> + + <_> + 0 6 2 6 -1. + <_> + 1 6 1 6 2. + <_> + + <_> + 16 14 4 3 -1. + <_> + 16 15 4 1 3. + <_> + + <_> + 0 0 10 6 -1. + <_> + 0 0 5 3 2. + <_> + 5 3 5 3 2. + <_> + + <_> + 2 2 3 6 -1. + <_> + 3 2 1 6 3. + <_> + + <_> + 2 0 3 10 -1. + <_> + 3 0 1 10 3. + <_> + + <_> + 5 5 2 2 -1. + <_> + 5 6 2 1 2. + <_> + + <_> + 12 6 4 4 -1. + <_> + 12 8 4 2 2. + <_> + + <_> + 13 5 7 3 -1. + <_> + 13 6 7 1 3. + <_> + + <_> + 10 13 1 2 -1. + <_> + 10 14 1 1 2. + <_> + + <_> + 16 16 4 2 -1. + <_> + 18 16 2 2 2. + <_> + + <_> + 16 12 4 7 -1. + <_> + 18 12 2 7 2. + <_> + + <_> + 16 17 1 3 -1. + <_> + 16 18 1 1 3. + <_> + + <_> + 19 9 1 3 -1. + <_> + 19 10 1 1 3. + <_> + + <_> + 18 7 2 6 -1. + <_> + 19 7 1 6 2. + <_> + + <_> + 8 1 3 4 -1. + <_> + 9 1 1 4 3. + <_> + + <_> + 14 0 6 9 -1. + <_> + 16 0 2 9 3. + <_> + + <_> + 4 2 10 2 -1. + <_> + 9 2 5 2 2. + <_> + + <_> + 2 12 8 4 -1. + <_> + 2 12 4 2 2. + <_> + 6 14 4 2 2. + <_> + + <_> + 0 4 7 3 -1. + <_> + 0 5 7 1 3. + <_> + + <_> + 14 14 3 3 -1. + <_> + 15 14 1 3 3. + <_> + + <_> + 0 3 4 3 -1. + <_> + 2 3 2 3 2. + <_> + + <_> + 1 0 2 7 -1. + <_> + 2 0 1 7 2. + <_> + + <_> + 15 16 4 4 -1. + <_> + 15 18 4 2 2. + <_> + + <_> + 5 8 12 4 -1. + <_> + 5 10 12 2 2. + <_> + + <_> + 3 17 1 2 -1. + <_> + 3 18 1 1 2. + <_> + + <_> + 6 1 3 4 -1. + <_> + 7 1 1 4 3. + <_> + + <_> + 6 2 3 4 -1. + <_> + 7 2 1 4 3. + <_> + + <_> + 6 8 9 12 -1. + <_> + 9 8 3 12 3. + <_> + + <_> + 8 1 8 6 -1. + <_> + 8 3 8 2 3. + <_> + + <_> + 14 2 6 3 -1. + <_> + 17 2 3 3 2. + <_> + + <_> + 0 6 1 3 -1. + <_> + 0 7 1 1 3. + <_> + + <_> + 10 0 10 2 -1. + <_> + 15 0 5 2 2. + <_> + + <_> + 11 0 3 2 -1. + <_> + 12 0 1 2 3. + <_> + + <_> + 3 19 10 1 -1. + <_> + 8 19 5 1 2. + <_> + + <_> + 0 4 7 16 -1. + <_> + 0 12 7 8 2. + <_> + + <_> + 2 16 1 3 -1. + <_> + 2 17 1 1 3. + <_> + + <_> + 7 8 12 6 -1. + <_> + 11 8 4 6 3. + <_> + + <_> + 14 9 6 7 -1. + <_> + 16 9 2 7 3. + <_> + + <_> + 12 17 6 1 -1. + <_> + 14 17 2 1 3. + <_> + + <_> + 16 1 3 1 -1. + <_> + 17 1 1 1 3. + <_> + + <_> + 0 17 8 2 -1. + <_> + 0 17 4 1 2. + <_> + 4 18 4 1 2. + <_> + + <_> + 17 0 2 1 -1. + <_> + 18 0 1 1 2. + <_> + + <_> + 4 15 6 5 -1. + <_> + 6 15 2 5 3. + <_> + + <_> + 7 2 8 2 -1. + <_> + 7 3 8 1 2. + <_> + + <_> + 4 1 8 4 -1. + <_> + 4 3 8 2 2. + <_> + + <_> + 5 19 2 1 -1. + <_> + 6 19 1 1 2. + <_> + + <_> + 5 19 2 1 -1. + <_> + 6 19 1 1 2. + <_> + + <_> + 16 17 1 3 -1. + <_> + 16 18 1 1 3. + <_> + + <_> + 0 11 2 3 -1. + <_> + 1 11 1 3 2. + <_> + + <_> + 0 19 4 1 -1. + <_> + 2 19 2 1 2. + <_> + + <_> + 0 18 4 2 -1. + <_> + 2 18 2 2 2. + <_> + + <_> + 2 17 1 3 -1. + <_> + 2 18 1 1 3. + <_> + + <_> + 5 7 11 2 -1. + <_> + 5 8 11 1 2. + <_> + + <_> + 9 2 4 10 -1. + <_> + 9 7 4 5 2. + <_> + + <_> + 0 2 4 3 -1. + <_> + 0 3 4 1 3. + <_> + + <_> + 10 19 10 1 -1. + <_> + 15 19 5 1 2. + <_> + + <_> + 11 17 8 3 -1. + <_> + 15 17 4 3 2. + <_> + + <_> + 8 19 3 1 -1. + <_> + 9 19 1 1 3. + <_> + + <_> + 14 0 3 4 -1. + <_> + 15 0 1 4 3. + <_> + + <_> + 10 6 4 3 -1. + <_> + 10 7 4 1 3. + <_> + + <_> + 0 8 3 2 -1. + <_> + 0 9 3 1 2. + <_> + + <_> + 7 12 3 6 -1. + <_> + 7 14 3 2 3. + <_> + + <_> + 1 18 1 2 -1. + <_> + 1 19 1 1 2. + <_> + + <_> + 0 12 4 4 -1. + <_> + 2 12 2 4 2. + <_> + + <_> + 1 8 6 7 -1. + <_> + 3 8 2 7 3. + <_> + + <_> + 0 8 4 5 -1. + <_> + 2 8 2 5 2. + <_> + + <_> + 19 16 1 3 -1. + <_> + 19 17 1 1 3. + <_> + + <_> + 1 5 18 6 -1. + <_> + 7 5 6 6 3. + <_> + + <_> + 2 15 4 2 -1. + <_> + 2 16 4 1 2. + <_> + + <_> + 18 6 2 11 -1. + <_> + 19 6 1 11 2. + <_> + + <_> + 0 12 2 6 -1. + <_> + 0 14 2 2 3. + <_> + + <_> + 12 5 3 2 -1. + <_> + 12 6 3 1 2. + <_> + + <_> + 1 3 2 3 -1. + <_> + 1 4 2 1 3. + <_> + + <_> + 16 14 4 4 -1. + <_> + 16 16 4 2 2. + <_> + + <_> + 6 8 12 5 -1. + <_> + 10 8 4 5 3. + <_> + + <_> + 13 7 2 7 -1. + <_> + 14 7 1 7 2. + <_> + + <_> + 1 8 2 6 -1. + <_> + 2 8 1 6 2. + <_> + + <_> + 15 0 3 7 -1. + <_> + 16 0 1 7 3. + <_> + + <_> + 4 2 6 2 -1. + <_> + 6 2 2 2 3. + <_> + + <_> + 0 9 20 9 -1. + <_> + 0 12 20 3 3. + <_> + + <_> + 10 14 2 2 -1. + <_> + 10 15 2 1 2. + <_> + + <_> + 6 5 10 4 -1. + <_> + 6 7 10 2 2. + <_> + + <_> + 6 1 5 9 -1. + <_> + 6 4 5 3 3. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 1 2. + <_> + 17 19 1 1 2. + <_> + + <_> + 0 14 2 4 -1. + <_> + 0 16 2 2 2. + <_> + + <_> + 10 8 2 5 -1. + <_> + 11 8 1 5 2. + <_> + + <_> + 3 7 12 7 -1. + <_> + 7 7 4 7 3. + <_> + + <_> + 0 0 6 6 -1. + <_> + 3 0 3 6 2. + <_> + + <_> + 1 0 4 4 -1. + <_> + 3 0 2 4 2. + <_> + + <_> + 0 0 6 8 -1. + <_> + 2 0 2 8 3. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 0 0 3 3 -1. + <_> + 0 1 3 1 3. + <_> + + <_> + 5 4 2 4 -1. + <_> + 5 6 2 2 2. + <_> + + <_> + 2 10 9 1 -1. + <_> + 5 10 3 1 3. + <_> + + <_> + 1 17 1 3 -1. + <_> + 1 18 1 1 3. + <_> + + <_> + 0 17 2 3 -1. + <_> + 0 18 2 1 3. + <_> + + <_> + 0 15 16 3 -1. + <_> + 8 15 8 3 2. + <_> + + <_> + 0 5 4 1 -1. + <_> + 2 5 2 1 2. + <_> + + <_> + 1 0 6 20 -1. + <_> + 3 0 2 20 3. + <_> + + <_> + 2 5 4 6 -1. + <_> + 2 5 2 3 2. + <_> + 4 8 2 3 2. + <_> + + <_> + 9 16 6 3 -1. + <_> + 11 16 2 3 3. + <_> + + <_> + 11 17 6 1 -1. + <_> + 14 17 3 1 2. + <_> + + <_> + 3 17 15 2 -1. + <_> + 8 17 5 2 3. + <_> + + <_> + 18 0 2 3 -1. + <_> + 18 1 2 1 3. + <_> + + <_> + 13 1 7 4 -1. + <_> + 13 3 7 2 2. + <_> + + <_> + 13 6 4 4 -1. + <_> + 13 6 2 2 2. + <_> + 15 8 2 2 2. + <_> + + <_> + 17 6 3 4 -1. + <_> + 17 8 3 2 2. + <_> + + <_> + 14 9 2 2 -1. + <_> + 15 9 1 2 2. + <_> + + <_> + 17 17 1 3 -1. + <_> + 17 18 1 1 3. + <_> + + <_> + 3 19 8 1 -1. + <_> + 7 19 4 1 2. + <_> + + <_> + 0 9 3 6 -1. + <_> + 0 12 3 3 2. + <_> + + <_> + 4 7 15 5 -1. + <_> + 9 7 5 5 3. + <_> + + <_> + 6 9 9 5 -1. + <_> + 9 9 3 5 3. + <_> + + <_> + 8 1 6 2 -1. + <_> + 10 1 2 2 3. + <_> + + <_> + 4 0 12 2 -1. + <_> + 10 0 6 2 2. + <_> + + <_> + 7 0 10 3 -1. + <_> + 12 0 5 3 2. + <_> + + <_> + 5 0 9 6 -1. + <_> + 5 2 9 2 3. + <_> + + <_> + 8 3 6 4 -1. + <_> + 8 5 6 2 2. + <_> + + <_> + 17 4 2 3 -1. + <_> + 17 5 2 1 3. + <_> + + <_> + 5 2 4 3 -1. + <_> + 5 3 4 1 3. + <_> + + <_> + 5 9 2 6 -1. + <_> + 6 9 1 6 2. + <_> + + <_> + 14 10 2 6 -1. + <_> + 15 10 1 6 2. + <_> + + <_> + 7 4 3 3 -1. + <_> + 7 5 3 1 3. + <_> + + <_> + 12 4 8 2 -1. + <_> + 12 4 4 1 2. + <_> + 16 5 4 1 2. + <_> + + <_> + 15 8 1 6 -1. + <_> + 15 10 1 2 3. + <_> + + <_> + 4 17 11 3 -1. + <_> + 4 18 11 1 3. + <_> + + <_> + 3 0 16 20 -1. + <_> + 3 10 16 10 2. + <_> + + <_> + 12 4 4 6 -1. + <_> + 12 6 4 2 3. + <_> + + <_> + 11 0 6 6 -1. + <_> + 13 0 2 6 3. + <_> + + <_> + 13 1 6 4 -1. + <_> + 13 1 3 2 2. + <_> + 16 3 3 2 2. + <_> + + <_> + 11 0 6 4 -1. + <_> + 13 0 2 4 3. + <_> + + <_> + 8 6 6 9 -1. + <_> + 10 6 2 9 3. + <_> + + <_> + 7 0 3 4 -1. + <_> + 8 0 1 4 3. + <_> + + <_> + 0 17 14 2 -1. + <_> + 0 17 7 1 2. + <_> + 7 18 7 1 2. + <_> + + <_> + 6 18 2 2 -1. + <_> + 6 18 1 1 2. + <_> + 7 19 1 1 2. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 17 18 2 2 -1. + <_> + 17 18 1 1 2. + <_> + 18 19 1 1 2. + <_> + + <_> + 5 7 1 9 -1. + <_> + 5 10 1 3 3. + <_> + + <_> + 5 3 6 4 -1. + <_> + 7 3 2 4 3. + <_> + + <_> + 1 9 6 2 -1. + <_> + 1 9 3 1 2. + <_> + 4 10 3 1 2. + <_> + + <_> + 6 9 2 3 -1. + <_> + 7 9 1 3 2. + <_> + + <_> + 6 8 6 12 -1. + <_> + 8 8 2 12 3. + <_> + + <_> + 4 18 2 2 -1. + <_> + 4 18 1 1 2. + <_> + 5 19 1 1 2. + <_> + + <_> + 9 1 6 6 -1. + <_> + 9 3 6 2 3. + <_> + + <_> + 6 17 6 2 -1. + <_> + 6 18 6 1 2. + <_> + + <_> + 3 18 16 2 -1. + <_> + 3 19 16 1 2. + <_> + + <_> + 3 0 3 11 -1. + <_> + 4 0 1 11 3. + <_> + + <_> + 13 18 3 1 -1. + <_> + 14 18 1 1 3. + <_> + + <_> + 6 0 9 6 -1. + <_> + 6 2 9 2 3. + <_> + + <_> + 1 2 12 4 -1. + <_> + 1 2 6 2 2. + <_> + 7 4 6 2 2. + <_> + + <_> + 3 3 6 4 -1. + <_> + 5 3 2 4 3. + <_> + + <_> + 12 0 8 1 -1. + <_> + 16 0 4 1 2. + <_> + + <_> + 9 0 6 2 -1. + <_> + 11 0 2 2 3. + <_> + + <_> + 3 3 12 1 -1. + <_> + 9 3 6 1 2. + <_> + + <_> + 2 7 6 2 -1. + <_> + 2 7 3 1 2. + <_> + 5 8 3 1 2. + <_> + + <_> + 0 8 4 6 -1. + <_> + 0 10 4 2 3. + <_> + + <_> + 9 6 3 7 -1. + <_> + 10 6 1 7 3. + <_> + + <_> + 9 6 6 13 -1. + <_> + 11 6 2 13 3. + <_> + + <_> + 11 12 6 1 -1. + <_> + 13 12 2 1 3. + <_> + + <_> + 18 9 2 6 -1. + <_> + 18 12 2 3 2. + <_> + + <_> + 17 2 3 9 -1. + <_> + 18 2 1 9 3. + <_> + + <_> + 13 8 4 6 -1. + <_> + 13 8 2 3 2. + <_> + 15 11 2 3 2. + <_> + + <_> + 4 2 12 6 -1. + <_> + 10 2 6 6 2. + <_> + + <_> + 4 14 16 6 -1. + <_> + 12 14 8 6 2. + <_> + + <_> + 6 19 10 1 -1. + <_> + 11 19 5 1 2. + <_> + + <_> + 6 17 1 3 -1. + <_> + 6 18 1 1 3. + <_> + + <_> + 4 14 10 3 -1. + <_> + 4 15 10 1 3. + <_> + + <_> + 6 0 12 12 -1. + <_> + 6 4 12 4 3. + <_> + + <_> + 5 7 4 2 -1. + <_> + 5 7 2 1 2. + <_> + 7 8 2 1 2. + <_> + + <_> + 17 5 3 2 -1. + <_> + 18 5 1 2 3. + <_> + + <_> + 8 13 6 3 -1. + <_> + 8 14 6 1 3. + <_> + + <_> + 8 13 5 3 -1. + <_> + 8 14 5 1 3. + <_> + + <_> + 13 2 1 18 -1. + <_> + 13 11 1 9 2. + <_> + + <_> + 6 10 9 2 -1. + <_> + 9 10 3 2 3. + <_> + + <_> + 11 0 7 4 -1. + <_> + 11 2 7 2 2. + <_> + + <_> + 1 0 6 8 -1. + <_> + 3 0 2 8 3. + <_> + + <_> + 9 15 3 3 -1. + <_> + 9 16 3 1 3. + <_> + + <_> + 9 17 9 3 -1. + <_> + 9 18 9 1 3. + <_> + + <_> + 12 12 3 3 -1. + <_> + 12 13 3 1 3. + <_> + + <_> + 4 1 3 5 -1. + <_> + 5 1 1 5 3. + <_> + + <_> + 10 14 2 3 -1. + <_> + 10 15 2 1 3. + <_> + + <_> + 18 17 2 2 -1. + <_> + 18 17 1 1 2. + <_> + 19 18 1 1 2. + <_> + + <_> + 18 18 2 2 -1. + <_> + 18 18 1 1 2. + <_> + 19 19 1 1 2. + <_> + + <_> + 18 18 2 2 -1. + <_> + 18 18 1 1 2. + <_> + 19 19 1 1 2. + <_> + + <_> + 4 10 9 1 -1. + <_> + 7 10 3 1 3. + <_> + + <_> + 3 9 6 5 -1. + <_> + 5 9 2 5 3. + <_> + + <_> + 18 8 1 12 -1. + <_> + 18 14 1 6 2. + <_> + + <_> + 0 2 8 6 -1. + <_> + 0 2 4 3 2. + <_> + 4 5 4 3 2. + <_> + + <_> + 9 4 3 3 -1. + <_> + 9 5 3 1 3. + <_> + + <_> + 3 18 2 2 -1. + <_> + 3 18 1 1 2. + <_> + 4 19 1 1 2. + <_> + + <_> + 6 4 4 3 -1. + <_> + 6 5 4 1 3. + <_> + + <_> + 16 7 4 2 -1. + <_> + 16 7 2 1 2. + <_> + 18 8 2 1 2. + <_> + + <_> + 5 17 1 3 -1. + <_> + 5 18 1 1 3. + <_> + + <_> + 2 0 15 20 -1. + <_> + 2 10 15 10 2. + <_> + + <_> + 8 11 6 4 -1. + <_> + 8 11 3 2 2. + <_> + 11 13 3 2 2. + <_> + + <_> + 8 16 4 3 -1. + <_> + 8 17 4 1 3. + <_> + + <_> + 8 18 2 2 -1. + <_> + 8 18 1 1 2. + <_> + 9 19 1 1 2. + <_> + + <_> + 2 16 13 3 -1. + <_> + 2 17 13 1 3. + <_> + + <_> + 16 16 2 2 -1. + <_> + 16 16 1 1 2. + <_> + 17 17 1 1 2. + <_> + + <_> + 8 1 6 3 -1. + <_> + 10 1 2 3 3. + <_> + + <_> + 16 7 2 2 -1. + <_> + 16 7 1 1 2. + <_> + 17 8 1 1 2. + <_> + + <_> + 14 7 4 2 -1. + <_> + 14 7 2 1 2. + <_> + 16 8 2 1 2. + <_> + + <_> + 4 0 14 1 -1. + <_> + 11 0 7 1 2. + <_> + + <_> + 10 4 8 2 -1. + <_> + 10 4 4 1 2. + <_> + 14 5 4 1 2. + <_> + + <_> + 8 2 3 2 -1. + <_> + 9 2 1 2 3. + <_> + + <_> + 12 11 6 3 -1. + <_> + 12 12 6 1 3. + <_> + + <_> + 1 5 1 4 -1. + <_> + 1 7 1 2 2. + <_> + + <_> + 1 1 1 18 -1. + <_> + 1 7 1 6 3. + <_> + + <_> + 11 13 3 2 -1. + <_> + 11 14 3 1 2. + <_> + + <_> + 0 1 12 2 -1. + <_> + 0 1 6 1 2. + <_> + 6 2 6 1 2. + <_> + + <_> + 10 18 2 2 -1. + <_> + 10 18 1 1 2. + <_> + 11 19 1 1 2. + <_> + + <_> + 4 5 4 4 -1. + <_> + 4 5 2 2 2. + <_> + 6 7 2 2 2. + <_> + + <_> + 6 7 1 3 -1. + <_> + 6 8 1 1 3. + <_> + + <_> + 14 10 6 2 -1. + <_> + 16 10 2 2 3. + <_> + + <_> + 16 8 3 6 -1. + <_> + 17 8 1 6 3. + <_> + + <_> + 4 10 6 2 -1. + <_> + 6 10 2 2 3. + <_> + + <_> + 6 5 3 7 -1. + <_> + 7 5 1 7 3. + <_> + + <_> + 0 13 6 6 -1. + <_> + 0 16 6 3 2. + <_> + + <_> + 12 5 1 9 -1. + <_> + 12 8 1 3 3. + <_> + + <_> + 5 9 3 3 -1. + <_> + 6 9 1 3 3. + <_> + + <_> + 7 5 6 13 -1. + <_> + 9 5 2 13 3. + <_> + + <_> + 19 8 1 10 -1. + <_> + 19 13 1 5 2. + <_> + + <_> + 11 18 6 1 -1. + <_> + 13 18 2 1 3. + <_> + + <_> + 9 7 6 12 -1. + <_> + 11 7 2 12 3. + <_> + + <_> + 12 7 6 6 -1. + <_> + 14 7 2 6 3. + <_> + + <_> + 15 8 3 4 -1. + <_> + 16 8 1 4 3. + <_> + + <_> + 6 11 4 2 -1. + <_> + 6 12 4 1 2. + <_> + + <_> + 1 6 6 8 -1. + <_> + 3 6 2 8 3. + <_> + + <_> + 11 15 6 5 -1. + <_> + 13 15 2 5 3. + <_> + + <_> + 15 17 4 2 -1. + <_> + 15 18 4 1 2. + <_> + + <_> + 13 11 6 1 -1. + <_> + 15 11 2 1 3. + <_> + + <_> + 5 18 2 2 -1. + <_> + 5 18 1 1 2. + <_> + 6 19 1 1 2. + <_> + + <_> + 4 8 4 4 -1. + <_> + 4 8 2 2 2. + <_> + 6 10 2 2 2. + <_> + + <_> + 11 7 9 3 -1. + <_> + 11 8 9 1 3. + <_> + + <_> + 0 3 10 4 -1. + <_> + 0 3 5 2 2. + <_> + 5 5 5 2 2. + <_> + + <_> + 7 18 6 1 -1. + <_> + 9 18 2 1 3. + <_> + + <_> + 0 8 3 3 -1. + <_> + 0 9 3 1 3. + <_> + + <_> + 0 0 6 8 -1. + <_> + 0 0 3 4 2. + <_> + 3 4 3 4 2. + <_> + + <_> + 7 6 3 8 -1. + <_> + 8 6 1 8 3. + <_> + + <_> + 13 7 7 3 -1. + <_> + 13 8 7 1 3. + <_> + + <_> + 3 3 2 2 -1. + <_> + 3 4 2 1 2. + <_> + + <_> + 0 3 3 3 -1. + <_> + 0 4 3 1 3. + <_> + + <_> + 9 3 5 2 -1. + <_> + 9 4 5 1 2. + <_> + + <_> + 6 5 9 4 -1. + <_> + 9 5 3 4 3. + <_> + + <_> + 3 10 12 3 -1. + <_> + 7 10 4 3 3. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 5 5 6 5 -1. + <_> + 8 5 3 5 2. + <_> + + <_> + 0 5 2 3 -1. + <_> + 0 6 2 1 3. + <_> + + <_> + 9 7 3 4 -1. + <_> + 10 7 1 4 3. + <_> + + <_> + 1 0 6 15 -1. + <_> + 3 0 2 15 3. + <_> + + <_> + 15 1 3 5 -1. + <_> + 16 1 1 5 3. + <_> + + <_> + 9 2 3 10 -1. + <_> + 10 2 1 10 3. + <_> + + <_> + 8 8 6 12 -1. + <_> + 10 8 2 12 3. + <_> + + <_> + 16 4 3 4 -1. + <_> + 16 6 3 2 2. + <_> + + <_> + 16 7 2 2 -1. + <_> + 16 7 1 1 2. + <_> + 17 8 1 1 2. + <_> + + <_> + 13 0 6 9 -1. + <_> + 13 3 6 3 3. + <_> + + <_> + 7 17 1 3 -1. + <_> + 7 18 1 1 3. + <_> + + <_> + 12 1 4 2 -1. + <_> + 12 2 4 1 2. + <_> + + <_> + 17 3 1 3 -1. + <_> + 17 4 1 1 3. + <_> + + <_> + 0 16 9 3 -1. + <_> + 0 17 9 1 3. + <_> + + <_> + 3 6 2 4 -1. + <_> + 3 6 1 2 2. + <_> + 4 8 1 2 2. + <_> + + <_> + 13 18 3 1 -1. + <_> + 14 18 1 1 3. + <_> + + <_> + 0 18 4 2 -1. + <_> + 2 18 2 2 2. + <_> + + <_> + 1 19 2 1 -1. + <_> + 2 19 1 1 2. + <_> + + <_> + 0 18 4 2 -1. + <_> + 0 19 4 1 2. + <_> + + <_> + 2 17 1 3 -1. + <_> + 2 18 1 1 3. + <_> + + <_> + 4 8 3 5 -1. + <_> + 5 8 1 5 3. + <_> + + <_> + 2 1 6 7 -1. + <_> + 4 1 2 7 3. + <_> + + <_> + 3 6 2 8 -1. + <_> + 3 6 1 4 2. + <_> + 4 10 1 4 2. + <_> + + <_> + 4 5 11 10 -1. + <_> + 4 10 11 5 2. + <_> + + <_> + 0 13 20 2 -1. + <_> + 10 13 10 2 2. + <_> + + <_> + 1 13 16 3 -1. + <_> + 9 13 8 3 2. + <_> + + <_> + 16 4 4 4 -1. + <_> + 16 4 2 2 2. + <_> + 18 6 2 2 2. + <_> + + <_> + 16 0 4 12 -1. + <_> + 16 0 2 6 2. + <_> + 18 6 2 6 2. + <_> + + <_> + 14 15 3 1 -1. + <_> + 15 15 1 1 3. + <_> + + <_> + 3 4 12 10 -1. + <_> + 3 9 12 5 2. + <_> + + <_> + 9 18 2 2 -1. + <_> + 9 18 1 1 2. + <_> + 10 19 1 1 2. + <_> + + <_> + 9 18 2 2 -1. + <_> + 9 18 1 1 2. + <_> + 10 19 1 1 2. + <_> + + <_> + 13 4 2 14 -1. + <_> + 13 4 1 7 2. + <_> + 14 11 1 7 2. + <_> + + <_> + 4 2 6 4 -1. + <_> + 7 2 3 4 2. + <_> + + <_> + 0 0 18 20 -1. + <_> + 0 0 9 10 2. + <_> + 9 10 9 10 2. + <_> + + <_> + 15 11 1 2 -1. + <_> + 15 12 1 1 2. + <_> + + <_> + 16 10 2 4 -1. + <_> + 16 10 1 2 2. + <_> + 17 12 1 2 2. + <_> + + <_> + 18 17 2 2 -1. + <_> + 18 17 1 1 2. + <_> + 19 18 1 1 2. + <_> + + <_> + 9 17 1 2 -1. + <_> + 9 18 1 1 2. + <_> + + <_> + 8 4 9 6 -1. + <_> + 11 4 3 6 3. + <_> + + <_> + 6 9 9 10 -1. + <_> + 9 9 3 10 3. + <_> + + <_> + 5 0 5 4 -1. + <_> + 5 2 5 2 2. + <_> + + <_> + 5 7 11 4 -1. + <_> + 5 9 11 2 2. + <_> + + <_> + 2 4 2 14 -1. + <_> + 3 4 1 14 2. + <_> + + <_> + 8 6 3 5 -1. + <_> + 9 6 1 5 3. + <_> + + <_> + 8 4 3 9 -1. + <_> + 9 4 1 9 3. + <_> + + <_> + 0 8 20 6 -1. + <_> + 0 10 20 2 3. + <_> + + <_> + 14 16 6 1 -1. + <_> + 17 16 3 1 2. + <_> + + <_> + 17 18 2 2 -1. + <_> + 17 19 2 1 2. + <_> + + <_> + 8 17 6 3 -1. + <_> + 10 17 2 3 3. + <_> + + <_> + 4 1 9 15 -1. + <_> + 7 1 3 15 3. + <_> + + <_> + 11 5 3 12 -1. + <_> + 12 5 1 12 3. + <_> + + <_> + 0 15 4 3 -1. + <_> + 0 16 4 1 3. + <_> + + <_> + 0 0 15 1 -1. + <_> + 5 0 5 1 3. + <_> + + <_> + 6 0 6 4 -1. + <_> + 8 0 2 4 3. + <_> + + <_> + 2 0 9 3 -1. + <_> + 5 0 3 3 3. + <_> + + <_> + 13 6 3 7 -1. + <_> + 14 6 1 7 3. + <_> + + <_> + 7 6 4 2 -1. + <_> + 7 7 4 1 2. + <_> + + <_> + 6 18 6 1 -1. + <_> + 8 18 2 1 3. + <_> + + <_> + 18 6 2 2 -1. + <_> + 18 7 2 1 2. + <_> + + <_> + 6 4 7 3 -1. + <_> + 6 5 7 1 3. + <_> + + <_> + 12 7 3 1 -1. + <_> + 13 7 1 1 3. + <_> + + <_> + 15 1 2 10 -1. + <_> + 15 1 1 5 2. + <_> + 16 6 1 5 2. + <_> + + <_> + 0 18 2 2 -1. + <_> + 0 19 2 1 2. + <_> + + <_> + 19 4 1 8 -1. + <_> + 19 8 1 4 2. + <_> + + <_> + 1 17 1 3 -1. + <_> + 1 18 1 1 3. + <_> + + <_> + 0 15 6 4 -1. + <_> + 0 15 3 2 2. + <_> + 3 17 3 2 2. + <_> + + <_> + 19 0 1 18 -1. + <_> + 19 6 1 6 3. + <_> + + <_> + 10 2 6 2 -1. + <_> + 12 2 2 2 3. + <_> + + <_> + 2 8 12 2 -1. + <_> + 6 8 4 2 3. + <_> + + <_> + 16 0 4 1 -1. + <_> + 18 0 2 1 2. + <_> + + <_> + 8 4 2 6 -1. + <_> + 8 7 2 3 2. + <_> + + <_> + 14 5 2 10 -1. + <_> + 15 5 1 10 2. + <_> + + <_> + 13 4 2 2 -1. + <_> + 13 5 2 1 2. + <_> + + <_> + 11 1 3 6 -1. + <_> + 11 3 3 2 3. + <_> + + <_> + 6 9 12 2 -1. + <_> + 10 9 4 2 3. + <_> + + <_> + 9 16 4 2 -1. + <_> + 9 17 4 1 2. + <_> + + <_> + 5 14 15 4 -1. + <_> + 5 16 15 2 2. + <_> + + <_> + 18 16 2 2 -1. + <_> + 18 17 2 1 2. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 1 2. + <_> + 17 19 1 1 2. + <_> + + <_> + 6 4 3 8 -1. + <_> + 7 4 1 8 3. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 0 8 1 6 -1. + <_> + 0 10 1 2 3. + <_> + + <_> + 11 2 9 6 -1. + <_> + 14 2 3 6 3. + <_> + + <_> + 12 2 6 4 -1. + <_> + 14 2 2 4 3. + <_> + + <_> + 1 7 2 4 -1. + <_> + 1 9 2 2 2. + <_> + + <_> + 13 1 6 4 -1. + <_> + 13 3 6 2 2. + <_> + + <_> + 4 10 2 10 -1. + <_> + 4 10 1 5 2. + <_> + 5 15 1 5 2. + <_> + + <_> + 2 16 9 3 -1. + <_> + 5 16 3 3 3. + <_> + + <_> + 1 2 3 9 -1. + <_> + 2 2 1 9 3. + <_> + + <_> + 19 7 1 4 -1. + <_> + 19 9 1 2 2. + <_> + + <_> + 14 11 6 8 -1. + <_> + 14 11 3 4 2. + <_> + 17 15 3 4 2. + <_> + + <_> + 15 12 4 6 -1. + <_> + 15 12 2 3 2. + <_> + 17 15 2 3 2. + <_> + + <_> + 16 15 2 2 -1. + <_> + 16 15 1 1 2. + <_> + 17 16 1 1 2. + <_> + + <_> + 17 16 2 2 -1. + <_> + 17 16 1 1 2. + <_> + 18 17 1 1 2. + <_> + + <_> + 17 16 2 2 -1. + <_> + 17 16 1 1 2. + <_> + 18 17 1 1 2. + <_> + + <_> + 2 3 2 2 -1. + <_> + 2 3 1 1 2. + <_> + 3 4 1 1 2. + <_> + + <_> + 10 10 3 3 -1. + <_> + 11 10 1 3 3. + <_> + + <_> + 5 9 7 8 -1. + <_> + 5 13 7 4 2. + <_> + + <_> + 7 16 2 2 -1. + <_> + 7 16 1 1 2. + <_> + 8 17 1 1 2. + <_> + + <_> + 7 16 2 2 -1. + <_> + 7 16 1 1 2. + <_> + 8 17 1 1 2. + <_> + + <_> + 9 8 10 3 -1. + <_> + 14 8 5 3 2. + <_> + + <_> + 6 7 4 8 -1. + <_> + 6 7 2 4 2. + <_> + 8 11 2 4 2. + <_> + + <_> + 1 6 4 3 -1. + <_> + 1 7 4 1 3. + <_> + + <_> + 6 10 6 10 -1. + <_> + 8 10 2 10 3. + <_> + + <_> + 4 6 3 6 -1. + <_> + 5 6 1 6 3. + <_> + + <_> + 3 10 4 4 -1. + <_> + 3 10 2 2 2. + <_> + 5 12 2 2 2. + <_> + + <_> + 3 10 4 4 -1. + <_> + 3 10 2 2 2. + <_> + 5 12 2 2 2. + <_> + + <_> + 3 10 4 4 -1. + <_> + 3 10 2 2 2. + <_> + 5 12 2 2 2. + <_> + + <_> + 14 8 2 6 -1. + <_> + 15 8 1 6 2. + <_> + + <_> + 3 10 4 4 -1. + <_> + 3 10 2 2 2. + <_> + 5 12 2 2 2. + <_> + + <_> + 3 10 4 4 -1. + <_> + 3 10 2 2 2. + <_> + 5 12 2 2 2. + <_> + + <_> + 12 4 3 9 -1. + <_> + 13 4 1 9 3. + <_> + + <_> + 12 3 1 12 -1. + <_> + 12 7 1 4 3. + <_> + + <_> + 2 0 18 1 -1. + <_> + 8 0 6 1 3. + <_> + + <_> + 10 0 10 6 -1. + <_> + 10 0 5 3 2. + <_> + 15 3 5 3 2. + <_> + + <_> + 18 16 2 2 -1. + <_> + 18 17 2 1 2. + <_> + + <_> + 3 5 4 2 -1. + <_> + 3 5 2 1 2. + <_> + 5 6 2 1 2. + <_> + + <_> + 11 8 3 3 -1. + <_> + 12 8 1 3 3. + <_> + + <_> + 11 7 3 5 -1. + <_> + 12 7 1 5 3. + <_> + + <_> + 3 19 15 1 -1. + <_> + 8 19 5 1 3. + <_> + + <_> + 8 13 3 2 -1. + <_> + 8 14 3 1 2. + <_> + + <_> + 2 12 8 4 -1. + <_> + 2 12 4 2 2. + <_> + 6 14 4 2 2. + <_> + + <_> + 16 16 2 2 -1. + <_> + 16 16 1 1 2. + <_> + 17 17 1 1 2. + <_> + + <_> + 7 0 3 2 -1. + <_> + 8 0 1 2 3. + <_> + + <_> + 6 7 2 5 -1. + <_> + 7 7 1 5 2. + <_> + + <_> + 18 0 2 17 -1. + <_> + 19 0 1 17 2. + <_> + + <_> + 16 16 1 3 -1. + <_> + 16 17 1 1 3. + <_> + + <_> + 14 8 3 7 -1. + <_> + 15 8 1 7 3. + <_> + + <_> + 10 17 2 2 -1. + <_> + 10 17 1 1 2. + <_> + 11 18 1 1 2. + <_> + + <_> + 4 9 1 3 -1. + <_> + 4 10 1 1 3. + <_> + + <_> + 18 10 2 3 -1. + <_> + 18 11 2 1 3. + <_> + + <_> + 12 1 3 10 -1. + <_> + 13 1 1 10 3. + <_> + + <_> + 8 12 9 1 -1. + <_> + 11 12 3 1 3. + <_> + + <_> + 5 18 2 2 -1. + <_> + 5 18 1 1 2. + <_> + 6 19 1 1 2. + <_> + + <_> + 19 6 1 9 -1. + <_> + 19 9 1 3 3. + <_> + + <_> + 4 7 2 4 -1. + <_> + 4 7 1 2 2. + <_> + 5 9 1 2 2. + <_> + + <_> + 1 4 6 14 -1. + <_> + 3 4 2 14 3. + <_> + + <_> + 10 5 9 3 -1. + <_> + 13 5 3 3 3. + <_> + + <_> + 18 7 2 6 -1. + <_> + 18 9 2 2 3. + <_> + + <_> + 5 6 2 7 -1. + <_> + 6 6 1 7 2. + <_> + + <_> + 10 4 6 8 -1. + <_> + 13 4 3 8 2. + <_> + + <_> + 0 8 2 9 -1. + <_> + 0 11 2 3 3. + <_> + + <_> + 0 7 5 3 -1. + <_> + 0 8 5 1 3. + <_> + + <_> + 8 1 7 2 -1. + <_> + 8 2 7 1 2. + <_> + + <_> + 7 5 3 5 -1. + <_> + 8 5 1 5 3. + <_> + + <_> + 19 2 1 2 -1. + <_> + 19 3 1 1 2. + <_> + + <_> + 6 7 10 11 -1. + <_> + 11 7 5 11 2. + <_> + + <_> + 9 19 6 1 -1. + <_> + 11 19 2 1 3. + <_> + + <_> + 3 0 12 1 -1. + <_> + 7 0 4 1 3. + <_> + + <_> + 4 1 6 5 -1. + <_> + 6 1 2 5 3. + <_> + + <_> + 6 12 12 6 -1. + <_> + 10 12 4 6 3. + <_> + + <_> + 16 13 2 3 -1. + <_> + 16 14 2 1 3. + <_> + + <_> + 7 14 4 2 -1. + <_> + 7 15 4 1 2. + <_> + + <_> + 7 14 2 2 -1. + <_> + 7 15 2 1 2. + <_> + + <_> + 3 10 2 4 -1. + <_> + 3 10 1 2 2. + <_> + 4 12 1 2 2. + <_> + + <_> + 0 3 2 6 -1. + <_> + 0 5 2 2 3. + <_> + + <_> + 1 10 2 2 -1. + <_> + 1 10 1 1 2. + <_> + 2 11 1 1 2. + <_> + + <_> + 16 4 4 3 -1. + <_> + 16 5 4 1 3. + <_> + + <_> + 5 10 2 4 -1. + <_> + 5 10 1 2 2. + <_> + 6 12 1 2 2. + <_> + + <_> + 5 11 13 2 -1. + <_> + 5 12 13 1 2. + <_> + + <_> + 10 2 3 11 -1. + <_> + 11 2 1 11 3. + <_> + + <_> + 10 2 4 4 -1. + <_> + 10 4 4 2 2. + <_> + + <_> + 8 8 6 2 -1. + <_> + 10 8 2 2 3. + <_> + + <_> + 11 2 3 3 -1. + <_> + 12 2 1 3 3. + <_> + + <_> + 6 18 14 2 -1. + <_> + 6 18 7 1 2. + <_> + 13 19 7 1 2. + <_> + + <_> + 17 7 1 12 -1. + <_> + 17 11 1 4 3. + <_> + + <_> + 10 5 10 3 -1. + <_> + 10 6 10 1 3. + <_> + + <_> + 6 1 3 3 -1. + <_> + 7 1 1 3 3. + <_> + + <_> + 13 8 3 1 -1. + <_> + 14 8 1 1 3. + <_> + + <_> + 10 14 2 6 -1. + <_> + 10 16 2 2 3. + <_> + + <_> + 4 1 12 14 -1. + <_> + 8 1 4 14 3. + <_> + + <_> + 14 1 6 14 -1. + <_> + 16 1 2 14 3. + <_> + + <_> + 3 16 2 2 -1. + <_> + 3 16 1 1 2. + <_> + 4 17 1 1 2. + <_> + + <_> + 0 16 2 2 -1. + <_> + 0 17 2 1 2. + <_> + + <_> + 15 6 4 6 -1. + <_> + 15 6 2 3 2. + <_> + 17 9 2 3 2. + <_> + + <_> + 12 5 2 2 -1. + <_> + 12 6 2 1 2. + <_> + + <_> + 7 6 6 13 -1. + <_> + 9 6 2 13 3. + <_> + + <_> + 1 9 6 5 -1. + <_> + 3 9 2 5 3. + <_> + + <_> + 0 5 3 4 -1. + <_> + 0 7 3 2 2. + <_> + + <_> + 4 1 16 2 -1. + <_> + 4 1 8 1 2. + <_> + 12 2 8 1 2. + <_> + + <_> + 1 18 4 2 -1. + <_> + 1 18 2 1 2. + <_> + 3 19 2 1 2. + <_> + + <_> + 7 7 3 4 -1. + <_> + 8 7 1 4 3. + <_> + + <_> + 3 4 9 3 -1. + <_> + 6 4 3 3 3. + <_> + + <_> + 4 6 6 10 -1. + <_> + 6 6 2 10 3. + <_> + + <_> + 9 0 8 10 -1. + <_> + 13 0 4 10 2. + <_> + + <_> + 8 0 8 1 -1. + <_> + 12 0 4 1 2. + <_> + + <_> + 6 2 8 16 -1. + <_> + 6 2 4 8 2. + <_> + 10 10 4 8 2. + <_> + + <_> + 14 10 2 10 -1. + <_> + 14 10 1 5 2. + <_> + 15 15 1 5 2. + <_> + + <_> + 12 11 1 2 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 16 0 3 8 -1. + <_> + 17 0 1 8 3. + <_> + + <_> + 14 0 6 10 -1. + <_> + 17 0 3 10 2. + <_> + + <_> + 16 0 3 5 -1. + <_> + 17 0 1 5 3. + <_> + + <_> + 4 5 11 2 -1. + <_> + 4 6 11 1 2. + <_> + + <_> + 1 0 2 1 -1. + <_> + 2 0 1 1 2. + <_> + + <_> + 0 0 2 3 -1. + <_> + 0 1 2 1 3. + <_> + + <_> + 11 6 6 11 -1. + <_> + 13 6 2 11 3. + <_> + + <_> + 14 0 3 1 -1. + <_> + 15 0 1 1 3. + <_> + + <_> + 19 7 1 2 -1. + <_> + 19 8 1 1 2. + <_> + + <_> + 17 0 3 9 -1. + <_> + 18 0 1 9 3. + <_> + + <_> + 12 7 3 4 -1. + <_> + 13 7 1 4 3. + <_> + + <_> + 0 1 14 2 -1. + <_> + 0 1 7 1 2. + <_> + 7 2 7 1 2. + <_> + + <_> + 3 1 3 2 -1. + <_> + 4 1 1 2 3. + <_> + + <_> + 4 0 15 2 -1. + <_> + 9 0 5 2 3. + <_> + + <_> + 10 2 6 1 -1. + <_> + 12 2 2 1 3. + <_> + + <_> + 9 4 6 11 -1. + <_> + 11 4 2 11 3. + <_> + + <_> + 2 16 2 4 -1. + <_> + 2 18 2 2 2. + <_> + + <_> + 6 17 6 3 -1. + <_> + 8 17 2 3 3. + <_> + + <_> + 7 9 6 2 -1. + <_> + 9 9 2 2 3. + <_> + + <_> + 6 8 9 2 -1. + <_> + 9 8 3 2 3. + <_> + + <_> + 6 6 2 10 -1. + <_> + 6 6 1 5 2. + <_> + 7 11 1 5 2. + <_> + + <_> + 0 11 2 3 -1. + <_> + 0 12 2 1 3. + <_> + + <_> + 11 15 4 1 -1. + <_> + 13 15 2 1 2. + <_> + + <_> + 6 17 1 2 -1. + <_> + 6 18 1 1 2. + <_> + + <_> + 0 0 6 20 -1. + <_> + 2 0 2 20 3. + <_> + + <_> + 3 10 2 2 -1. + <_> + 4 10 1 2 2. + <_> + + <_> + 4 7 3 5 -1. + <_> + 5 7 1 5 3. + <_> + + <_> + 3 12 6 2 -1. + <_> + 5 12 2 2 3. + <_> + + <_> + 6 15 7 4 -1. + <_> + 6 17 7 2 2. + <_> + + <_> + 17 16 2 2 -1. + <_> + 17 16 1 1 2. + <_> + 18 17 1 1 2. + <_> + + <_> + 15 1 3 16 -1. + <_> + 16 1 1 16 3. + <_> + + <_> + 6 16 6 3 -1. + <_> + 8 16 2 3 3. + <_> + + <_> + 15 14 3 2 -1. + <_> + 15 15 3 1 2. + <_> + + <_> + 12 16 1 2 -1. + <_> + 12 17 1 1 2. + <_> + + <_> + 0 2 4 4 -1. + <_> + 0 2 2 2 2. + <_> + 2 4 2 2 2. + <_> + + <_> + 1 1 6 4 -1. + <_> + 1 1 3 2 2. + <_> + 4 3 3 2 2. + <_> + + <_> + 1 18 1 2 -1. + <_> + 1 19 1 1 2. + <_> + + <_> + 4 7 2 3 -1. + <_> + 4 8 2 1 3. + <_> + + <_> + 1 0 9 14 -1. + <_> + 1 7 9 7 2. + <_> + + <_> + 4 9 2 6 -1. + <_> + 4 9 1 3 2. + <_> + 5 12 1 3 2. + <_> + + <_> + 3 9 4 3 -1. + <_> + 5 9 2 3 2. + <_> + + <_> + 0 9 2 4 -1. + <_> + 0 11 2 2 2. + <_> + + <_> + 16 6 3 10 -1. + <_> + 17 6 1 10 3. + <_> + + <_> + 16 11 2 1 -1. + <_> + 17 11 1 1 2. + <_> + + <_> + 5 7 4 4 -1. + <_> + 5 9 4 2 2. + <_> + + <_> + 10 11 9 2 -1. + <_> + 13 11 3 2 3. + <_> + + <_> + 15 10 2 2 -1. + <_> + 15 10 1 1 2. + <_> + 16 11 1 1 2. + <_> + + <_> + 10 6 6 14 -1. + <_> + 10 13 6 7 2. + <_> + + <_> + 14 7 3 5 -1. + <_> + 15 7 1 5 3. + <_> + + <_> + 6 11 12 3 -1. + <_> + 10 11 4 3 3. + <_> + + <_> + 17 16 1 2 -1. + <_> + 17 17 1 1 2. + <_> + + <_> + 8 5 5 4 -1. + <_> + 8 7 5 2 2. + <_> + + <_> + 11 6 4 2 -1. + <_> + 11 7 4 1 2. + <_> + + <_> + 3 4 8 2 -1. + <_> + 3 4 4 1 2. + <_> + 7 5 4 1 2. + <_> + + <_> + 0 8 6 6 -1. + <_> + 2 8 2 6 3. + <_> + + <_> + 7 4 6 2 -1. + <_> + 7 5 6 1 2. + <_> + + <_> + 7 3 6 3 -1. + <_> + 9 3 2 3 3. + <_> + + <_> + 2 17 3 3 -1. + <_> + 2 18 3 1 3. + <_> + + <_> + 3 10 6 1 -1. + <_> + 5 10 2 1 3. + <_> + + <_> + 7 2 6 2 -1. + <_> + 9 2 2 2 3. + <_> + + <_> + 4 11 9 1 -1. + <_> + 7 11 3 1 3. + <_> + + <_> + 7 7 11 12 -1. + <_> + 7 13 11 6 2. + <_> + + <_> + 3 2 3 4 -1. + <_> + 4 2 1 4 3. + <_> + + <_> + 9 7 9 3 -1. + <_> + 12 7 3 3 3. + <_> + + <_> + 15 11 2 6 -1. + <_> + 15 11 1 3 2. + <_> + 16 14 1 3 2. + <_> + + <_> + 0 5 5 3 -1. + <_> + 0 6 5 1 3. + <_> + + <_> + 8 1 6 12 -1. + <_> + 10 1 2 12 3. + <_> + + <_> + 3 7 15 13 -1. + <_> + 8 7 5 13 3. + <_> + + <_> + 0 9 9 9 -1. + <_> + 0 12 9 3 3. + <_> + + <_> + 16 0 3 8 -1. + <_> + 17 0 1 8 3. + <_> + + <_> + 16 2 4 2 -1. + <_> + 18 2 2 2 2. + <_> + + <_> + 13 0 6 5 -1. + <_> + 16 0 3 5 2. + <_> + + <_> + 15 1 3 2 -1. + <_> + 16 1 1 2 3. + <_> + + <_> + 11 8 3 2 -1. + <_> + 12 8 1 2 3. + <_> + + <_> + 1 8 2 12 -1. + <_> + 1 8 1 6 2. + <_> + 2 14 1 6 2. + <_> + + <_> + 0 1 6 12 -1. + <_> + 2 1 2 12 3. + <_> + + <_> + 19 17 1 3 -1. + <_> + 19 18 1 1 3. + <_> + + <_> + 11 3 3 10 -1. + <_> + 12 3 1 10 3. + <_> + + <_> + 8 1 9 8 -1. + <_> + 11 1 3 8 3. + <_> + + <_> + 18 16 2 2 -1. + <_> + 18 16 1 1 2. + <_> + 19 17 1 1 2. + <_> + + <_> + 18 16 2 2 -1. + <_> + 18 16 1 1 2. + <_> + 19 17 1 1 2. + <_> + + <_> + 6 13 2 6 -1. + <_> + 6 15 2 2 3. + <_> + + <_> + 9 14 2 2 -1. + <_> + 9 15 2 1 2. + <_> + + <_> + 14 10 2 4 -1. + <_> + 14 10 1 2 2. + <_> + 15 12 1 2 2. + <_> + + <_> + 0 15 2 2 -1. + <_> + 0 15 1 1 2. + <_> + 1 16 1 1 2. + <_> + + <_> + 6 7 2 2 -1. + <_> + 6 7 1 1 2. + <_> + 7 8 1 1 2. + <_> + + <_> + 11 18 2 2 -1. + <_> + 11 18 1 1 2. + <_> + 12 19 1 1 2. + <_> + + <_> + 0 0 6 4 -1. + <_> + 0 0 3 2 2. + <_> + 3 2 3 2 2. + <_> + + <_> + 4 1 6 6 -1. + <_> + 6 1 2 6 3. + <_> + + <_> + 15 13 5 4 -1. + <_> + 15 15 5 2 2. + <_> + + <_> + 7 17 6 1 -1. + <_> + 9 17 2 1 3. + <_> + + <_> + 16 19 4 1 -1. + <_> + 18 19 2 1 2. + <_> + + <_> + 16 16 4 4 -1. + <_> + 18 16 2 4 2. + <_> + + <_> + 7 8 9 4 -1. + <_> + 10 8 3 4 3. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 1 2. + <_> + 17 19 1 1 2. + <_> + + <_> + 2 9 2 4 -1. + <_> + 2 9 1 2 2. + <_> + 3 11 1 2 2. + <_> + + <_> + 0 3 8 4 -1. + <_> + 0 3 4 2 2. + <_> + 4 5 4 2 2. + <_> + + <_> + 0 1 8 1 -1. + <_> + 4 1 4 1 2. + <_> + + <_> + 0 5 8 9 -1. + <_> + 4 5 4 9 2. + <_> + + <_> + 7 18 6 2 -1. + <_> + 9 18 2 2 3. + <_> + + <_> + 0 4 1 12 -1. + <_> + 0 8 1 4 3. + <_> + + <_> + 19 13 1 6 -1. + <_> + 19 15 1 2 3. + <_> + + <_> + 2 8 6 8 -1. + <_> + 4 8 2 8 3. + <_> + + <_> + 0 0 9 17 -1. + <_> + 3 0 3 17 3. + <_> + + <_> + 7 9 6 8 -1. + <_> + 9 9 2 8 3. + <_> + + <_> + 5 10 9 4 -1. + <_> + 8 10 3 4 3. + <_> + + <_> + 5 0 8 3 -1. + <_> + 5 1 8 1 3. + <_> + + <_> + 16 6 4 4 -1. + <_> + 16 6 2 2 2. + <_> + 18 8 2 2 2. + <_> + + <_> + 17 4 2 8 -1. + <_> + 17 4 1 4 2. + <_> + 18 8 1 4 2. + <_> + + <_> + 2 16 1 3 -1. + <_> + 2 17 1 1 3. + <_> + + <_> + 2 16 1 3 -1. + <_> + 2 17 1 1 3. + <_> + + <_> + 11 0 1 3 -1. + <_> + 11 1 1 1 3. + <_> + + <_> + 11 2 9 7 -1. + <_> + 14 2 3 7 3. + <_> + + <_> + 10 2 3 6 -1. + <_> + 11 2 1 6 3. + <_> + + <_> + 5 9 15 2 -1. + <_> + 5 10 15 1 2. + <_> + + <_> + 8 16 6 2 -1. + <_> + 8 17 6 1 2. + <_> + + <_> + 9 16 10 2 -1. + <_> + 9 16 5 1 2. + <_> + 14 17 5 1 2. + <_> + + <_> + 9 17 2 2 -1. + <_> + 9 17 1 1 2. + <_> + 10 18 1 1 2. + <_> + + <_> + 10 15 6 4 -1. + <_> + 10 15 3 2 2. + <_> + 13 17 3 2 2. + <_> + + <_> + 4 5 15 12 -1. + <_> + 9 5 5 12 3. + <_> + + <_> + 11 13 2 3 -1. + <_> + 11 14 2 1 3. + <_> + + <_> + 8 13 7 3 -1. + <_> + 8 14 7 1 3. + <_> + + <_> + 1 12 1 2 -1. + <_> + 1 13 1 1 2. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 1 2. + <_> + 17 19 1 1 2. + <_> + + <_> + 1 19 18 1 -1. + <_> + 7 19 6 1 3. + <_> + + <_> + 1 17 6 1 -1. + <_> + 4 17 3 1 2. + <_> + + <_> + 1 3 1 12 -1. + <_> + 1 9 1 6 2. + <_> + + <_> + 0 9 3 6 -1. + <_> + 0 11 3 2 3. + <_> + + <_> + 5 4 3 10 -1. + <_> + 6 4 1 10 3. + <_> + + <_> + 6 17 2 1 -1. + <_> + 7 17 1 1 2. + <_> + + <_> + 1 0 6 12 -1. + <_> + 3 0 2 12 3. + <_> + + <_> + 4 7 9 2 -1. + <_> + 7 7 3 2 3. + <_> + + <_> + 6 11 9 1 -1. + <_> + 9 11 3 1 3. + <_> + + <_> + 17 10 2 10 -1. + <_> + 17 15 2 5 2. + <_> + + <_> + 4 10 2 10 -1. + <_> + 4 10 1 5 2. + <_> + 5 15 1 5 2. + <_> + + <_> + 12 3 3 12 -1. + <_> + 13 3 1 12 3. + <_> + + <_> + 15 3 4 6 -1. + <_> + 15 3 2 3 2. + <_> + 17 6 2 3 2. + <_> + + <_> + 12 8 3 3 -1. + <_> + 13 8 1 3 3. + <_> + + <_> + 4 14 2 4 -1. + <_> + 4 16 2 2 2. + <_> + + <_> + 6 16 1 3 -1. + <_> + 6 17 1 1 3. + <_> + + <_> + 1 1 2 3 -1. + <_> + 2 1 1 3 2. + <_> + + <_> + 0 2 4 1 -1. + <_> + 2 2 2 1 2. + <_> + + <_> + 8 17 12 3 -1. + <_> + 12 17 4 3 3. + <_> + + <_> + 9 16 6 4 -1. + <_> + 11 16 2 4 3. + <_> + + <_> + 4 6 3 6 -1. + <_> + 4 9 3 3 2. + <_> + + <_> + 6 2 12 9 -1. + <_> + 6 5 12 3 3. + <_> + + <_> + 6 0 14 20 -1. + <_> + 6 0 7 10 2. + <_> + 13 10 7 10 2. + <_> + + <_> + 15 16 2 2 -1. + <_> + 15 16 1 1 2. + <_> + 16 17 1 1 2. + <_> + + <_> + 15 16 2 2 -1. + <_> + 15 16 1 1 2. + <_> + 16 17 1 1 2. + <_> + + <_> + 19 8 1 3 -1. + <_> + 19 9 1 1 3. + <_> + + <_> + 13 4 1 2 -1. + <_> + 13 5 1 1 2. + <_> + + <_> + 0 4 4 2 -1. + <_> + 0 5 4 1 2. + <_> + + <_> + 19 5 1 6 -1. + <_> + 19 7 1 2 3. + <_> + + <_> + 16 0 2 1 -1. + <_> + 17 0 1 1 2. + <_> + + <_> + 13 1 1 3 -1. + <_> + 13 2 1 1 3. + <_> + + <_> + 17 17 1 3 -1. + <_> + 17 18 1 1 3. + <_> + + <_> + 5 4 8 8 -1. + <_> + 5 4 4 4 2. + <_> + 9 8 4 4 2. + <_> + + <_> + 1 2 2 2 -1. + <_> + 1 2 1 1 2. + <_> + 2 3 1 1 2. + <_> + + <_> + 0 0 8 6 -1. + <_> + 0 0 4 3 2. + <_> + 4 3 4 3 2. + <_> + + <_> + 6 3 4 2 -1. + <_> + 6 4 4 1 2. + <_> + + <_> + 1 0 3 3 -1. + <_> + 1 1 3 1 3. + <_> + + <_> + 6 1 7 2 -1. + <_> + 6 2 7 1 2. + <_> + + <_> + 2 6 12 6 -1. + <_> + 6 6 4 6 3. + <_> + + <_> + 1 16 9 2 -1. + <_> + 4 16 3 2 3. + <_> + + <_> + 7 15 6 4 -1. + <_> + 9 15 2 4 3. + <_> + + <_> + 6 15 12 1 -1. + <_> + 12 15 6 1 2. + <_> + + <_> + 17 17 1 3 -1. + <_> + 17 18 1 1 3. + <_> + + <_> + 17 15 2 2 -1. + <_> + 17 15 1 1 2. + <_> + 18 16 1 1 2. + <_> + + <_> + 3 13 3 3 -1. + <_> + 3 14 3 1 3. + <_> + + <_> + 10 17 1 3 -1. + <_> + 10 18 1 1 3. + <_> + + <_> + 4 0 14 8 -1. + <_> + 11 0 7 8 2. + <_> + + <_> + 2 0 12 2 -1. + <_> + 6 0 4 2 3. + <_> + + <_> + 2 0 4 3 -1. + <_> + 4 0 2 3 2. + <_> + + <_> + 13 1 1 2 -1. + <_> + 13 2 1 1 2. + <_> + + <_> + 7 5 3 6 -1. + <_> + 8 5 1 6 3. + <_> + + <_> + 18 2 2 2 -1. + <_> + 18 2 1 1 2. + <_> + 19 3 1 1 2. + <_> + + <_> + 15 1 2 14 -1. + <_> + 16 1 1 14 2. + <_> + + <_> + 15 6 2 2 -1. + <_> + 15 6 1 1 2. + <_> + 16 7 1 1 2. + <_> + + <_> + 3 1 6 3 -1. + <_> + 5 1 2 3 3. + <_> + + <_> + 7 16 2 2 -1. + <_> + 7 16 1 1 2. + <_> + 8 17 1 1 2. + <_> + + <_> + 5 17 2 2 -1. + <_> + 5 17 1 1 2. + <_> + 6 18 1 1 2. + <_> + + <_> + 9 10 6 10 -1. + <_> + 11 10 2 10 3. + <_> + + <_> + 10 17 6 3 -1. + <_> + 12 17 2 3 3. + <_> + + <_> + 14 5 2 10 -1. + <_> + 14 10 2 5 2. + <_> + + <_> + 11 12 6 2 -1. + <_> + 11 13 6 1 2. + <_> + + <_> + 8 1 1 3 -1. + <_> + 8 2 1 1 3. + <_> + + <_> + 12 15 2 2 -1. + <_> + 12 15 1 1 2. + <_> + 13 16 1 1 2. + <_> + + <_> + 6 8 6 4 -1. + <_> + 6 8 3 2 2. + <_> + 9 10 3 2 2. + <_> + + <_> + 7 5 3 5 -1. + <_> + 8 5 1 5 3. + <_> + + <_> + 0 5 7 3 -1. + <_> + 0 6 7 1 3. + <_> + + <_> + 7 9 6 6 -1. + <_> + 9 9 2 6 3. + <_> + + <_> + 5 7 8 8 -1. + <_> + 5 11 8 4 2. + <_> + + <_> + 4 9 2 6 -1. + <_> + 4 9 1 3 2. + <_> + 5 12 1 3 2. + <_> + + <_> + 10 11 6 1 -1. + <_> + 12 11 2 1 3. + <_> + + <_> + 13 6 6 11 -1. + <_> + 15 6 2 11 3. + <_> + + <_> + 8 17 2 2 -1. + <_> + 8 17 1 1 2. + <_> + 9 18 1 1 2. + <_> + + <_> + 4 12 12 1 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 11 17 3 2 -1. + <_> + 11 18 3 1 2. + <_> + + <_> + 8 17 6 1 -1. + <_> + 10 17 2 1 3. + <_> + + <_> + 4 1 14 6 -1. + <_> + 4 3 14 2 3. + <_> + + <_> + 14 2 2 12 -1. + <_> + 14 8 2 6 2. + <_> + + <_> + 12 13 3 2 -1. + <_> + 12 14 3 1 2. + <_> + + <_> + 6 1 6 1 -1. + <_> + 8 1 2 1 3. + <_> + + <_> + 10 6 6 1 -1. + <_> + 12 6 2 1 3. + <_> + + <_> + 3 19 2 1 -1. + <_> + 4 19 1 1 2. + <_> + + <_> + 18 16 2 2 -1. + <_> + 18 16 1 1 2. + <_> + 19 17 1 1 2. + <_> + + <_> + 16 11 3 7 -1. + <_> + 17 11 1 7 3. + <_> + + <_> + 19 5 1 6 -1. + <_> + 19 8 1 3 2. + <_> + + <_> + 9 8 4 3 -1. + <_> + 9 9 4 1 3. + <_> + + <_> + 16 8 4 4 -1. + <_> + 16 8 2 2 2. + <_> + 18 10 2 2 2. + <_> + + <_> + 2 8 2 2 -1. + <_> + 2 8 1 1 2. + <_> + 3 9 1 1 2. + <_> + + <_> + 3 5 6 4 -1. + <_> + 3 5 3 2 2. + <_> + 6 7 3 2 2. + <_> + + <_> + 2 3 8 16 -1. + <_> + 2 3 4 8 2. + <_> + 6 11 4 8 2. + <_> + + <_> + 17 17 1 3 -1. + <_> + 17 18 1 1 3. + <_> + + <_> + 7 2 8 11 -1. + <_> + 11 2 4 11 2. + <_> + + <_> + 13 3 6 14 -1. + <_> + 16 3 3 14 2. + <_> + + <_> + 0 9 18 2 -1. + <_> + 6 9 6 2 3. + <_> + + <_> + 6 10 14 3 -1. + <_> + 6 11 14 1 3. + <_> + + <_> + 10 9 9 3 -1. + <_> + 13 9 3 3 3. + <_> + + <_> + 3 5 4 6 -1. + <_> + 3 5 2 3 2. + <_> + 5 8 2 3 2. + <_> + + <_> + 3 7 3 7 -1. + <_> + 4 7 1 7 3. + <_> + + <_> + 2 8 11 6 -1. + <_> + 2 10 11 2 3. + <_> + + <_> + 8 9 6 3 -1. + <_> + 8 10 6 1 3. + <_> + + <_> + 3 3 3 11 -1. + <_> + 4 3 1 11 3. + <_> + + <_> + 0 19 6 1 -1. + <_> + 3 19 3 1 2. + <_> + + <_> + 18 18 1 2 -1. + <_> + 18 19 1 1 2. + <_> + + <_> + 8 0 12 6 -1. + <_> + 8 0 6 3 2. + <_> + 14 3 6 3 2. + <_> + + <_> + 19 5 1 3 -1. + <_> + 19 6 1 1 3. + <_> + + <_> + 5 8 2 1 -1. + <_> + 6 8 1 1 2. + <_> + + <_> + 13 11 2 1 -1. + <_> + 14 11 1 1 2. + <_> + + <_> + 3 6 15 13 -1. + <_> + 8 6 5 13 3. + <_> + + <_> + 4 3 6 2 -1. + <_> + 6 3 2 2 3. + <_> + + <_> + 0 18 1 2 -1. + <_> + 0 19 1 1 2. + <_> + + <_> + 7 8 2 6 -1. + <_> + 8 8 1 6 2. + <_> + + <_> + 3 0 6 19 -1. + <_> + 5 0 2 19 3. + <_> + + <_> + 3 1 6 5 -1. + <_> + 5 1 2 5 3. + <_> + + <_> + 17 14 3 6 -1. + <_> + 17 16 3 2 3. + <_> + + <_> + 17 13 2 6 -1. + <_> + 18 13 1 6 2. + <_> + + <_> + 17 18 2 2 -1. + <_> + 18 18 1 2 2. + <_> + + <_> + 11 14 9 4 -1. + <_> + 14 14 3 4 3. + <_> + + <_> + 15 8 4 6 -1. + <_> + 15 8 2 3 2. + <_> + 17 11 2 3 2. + <_> + + <_> + 1 16 1 3 -1. + <_> + 1 17 1 1 3. + <_> + + <_> + 7 0 3 14 -1. + <_> + 8 0 1 14 3. + <_> + + <_> + 12 0 2 1 -1. + <_> + 13 0 1 1 2. + <_> + + <_> + 7 9 6 5 -1. + <_> + 10 9 3 5 2. + <_> + + <_> + 15 5 4 9 -1. + <_> + 17 5 2 9 2. + <_> + + <_> + 11 0 6 6 -1. + <_> + 13 0 2 6 3. + <_> + + <_> + 16 15 2 2 -1. + <_> + 16 15 1 1 2. + <_> + 17 16 1 1 2. + <_> + + <_> + 16 15 2 2 -1. + <_> + 16 15 1 1 2. + <_> + 17 16 1 1 2. + <_> + + <_> + 13 2 2 18 -1. + <_> + 13 11 2 9 2. + <_> + + <_> + 8 4 8 10 -1. + <_> + 8 9 8 5 2. + <_> + + <_> + 8 3 2 3 -1. + <_> + 8 4 2 1 3. + <_> + + <_> + 11 1 6 9 -1. + <_> + 11 4 6 3 3. + <_> + + <_> + 15 4 5 6 -1. + <_> + 15 6 5 2 3. + <_> + + <_> + 12 18 2 2 -1. + <_> + 12 18 1 1 2. + <_> + 13 19 1 1 2. + <_> + + <_> + 1 17 1 3 -1. + <_> + 1 18 1 1 3. + <_> + + <_> + 12 19 2 1 -1. + <_> + 13 19 1 1 2. + <_> + + <_> + 8 10 6 6 -1. + <_> + 10 10 2 6 3. + <_> + + <_> + 14 2 6 5 -1. + <_> + 16 2 2 5 3. + <_> + + <_> + 9 5 2 6 -1. + <_> + 9 7 2 2 3. + <_> + + <_> + 1 15 2 2 -1. + <_> + 2 15 1 2 2. + <_> + + <_> + 18 17 1 3 -1. + <_> + 18 18 1 1 3. + <_> + + <_> + 10 14 4 6 -1. + <_> + 10 16 4 2 3. + <_> + + <_> + 9 7 3 2 -1. + <_> + 10 7 1 2 3. + <_> + + <_> + 6 9 6 2 -1. + <_> + 6 9 3 1 2. + <_> + 9 10 3 1 2. + <_> + + <_> + 0 2 1 12 -1. + <_> + 0 6 1 4 3. + <_> + + <_> + 4 0 15 1 -1. + <_> + 9 0 5 1 3. + <_> + + <_> + 9 0 8 2 -1. + <_> + 9 0 4 1 2. + <_> + 13 1 4 1 2. + <_> + + <_> + 12 2 8 1 -1. + <_> + 16 2 4 1 2. + <_> + + <_> + 7 1 10 6 -1. + <_> + 7 3 10 2 3. + <_> + + <_> + 18 6 2 3 -1. + <_> + 18 7 2 1 3. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 12 1 1 2. + <_> + 5 13 1 1 2. + <_> + + <_> + 6 6 6 2 -1. + <_> + 8 6 2 2 3. + <_> + + <_> + 0 9 9 6 -1. + <_> + 3 9 3 6 3. + <_> + + <_> + 17 18 2 2 -1. + <_> + 18 18 1 2 2. + <_> + + <_> + 11 2 6 16 -1. + <_> + 13 2 2 16 3. + <_> + + <_> + 2 4 15 13 -1. + <_> + 7 4 5 13 3. + <_> + + <_> + 16 2 3 10 -1. + <_> + 17 2 1 10 3. + <_> + + <_> + 6 10 2 1 -1. + <_> + 7 10 1 1 2. + <_> + + <_> + 1 1 18 16 -1. + <_> + 10 1 9 16 2. + <_> + + <_> + 14 4 3 15 -1. + <_> + 15 4 1 15 3. + <_> + + <_> + 19 13 1 2 -1. + <_> + 19 14 1 1 2. + <_> + + <_> + 2 6 5 8 -1. + <_> + 2 10 5 4 2. + diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt.xml new file mode 100644 index 0000000000000000000000000000000000000000..ade4b2121a68e6967cc558f4393dc8d828cee60e --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt.xml @@ -0,0 +1,24350 @@ + + + +BOOST + HAAR + 20 + 20 + + 213 + + 0 + 22 + + <_> + 3 + 8.2268941402435303e-01 + + <_> + + 0 -1 0 4.0141958743333817e-03 + + 3.3794190734624863e-02 8.3781069517135620e-01 + <_> + + 0 -1 1 1.5151339583098888e-02 + + 1.5141320228576660e-01 7.4888122081756592e-01 + <_> + + 0 -1 2 4.2109931819140911e-03 + + 9.0049281716346741e-02 6.3748198747634888e-01 + <_> + 16 + 6.9566087722778320e+00 + + <_> + + 0 -1 3 1.6227109590545297e-03 + + 6.9308586418628693e-02 7.1109461784362793e-01 + <_> + + 0 -1 4 2.2906649392098188e-03 + + 1.7958030104637146e-01 6.6686922311782837e-01 + <_> + + 0 -1 5 5.0025708042085171e-03 + + 1.6936729848384857e-01 6.5540069341659546e-01 + <_> + + 0 -1 6 7.9659894108772278e-03 + + 5.8663320541381836e-01 9.1414518654346466e-02 + <_> + + 0 -1 7 -3.5227010957896709e-03 + + 1.4131669700145721e-01 6.0318958759307861e-01 + <_> + + 0 -1 8 3.6667689681053162e-02 + + 3.6756721138954163e-01 7.9203182458877563e-01 + <_> + + 0 -1 9 9.3361474573612213e-03 + + 6.1613857746124268e-01 2.0885099470615387e-01 + <_> + + 0 -1 10 8.6961314082145691e-03 + + 2.8362309932708740e-01 6.3602739572525024e-01 + <_> + + 0 -1 11 1.1488880263641477e-03 + + 2.2235809266567230e-01 5.8007007837295532e-01 + <_> + + 0 -1 12 -2.1484689787030220e-03 + + 2.4064640700817108e-01 5.7870548963546753e-01 + <_> + + 0 -1 13 2.1219060290604830e-03 + + 5.5596548318862915e-01 1.3622370362281799e-01 + <_> + + 0 -1 14 -9.3949146568775177e-02 + + 8.5027372837066650e-01 4.7177401185035706e-01 + <_> + + 0 -1 15 1.3777789426967502e-03 + + 5.9936738014221191e-01 2.8345298767089844e-01 + <_> + + 0 -1 16 7.3063157498836517e-02 + + 4.3418860435485840e-01 7.0600342750549316e-01 + <_> + + 0 -1 17 3.6767389974556863e-04 + + 3.0278879404067993e-01 6.0515749454498291e-01 + <_> + + 0 -1 18 -6.0479710809886456e-03 + + 1.7984339594841003e-01 5.6752568483352661e-01 + <_> + 21 + 9.4985427856445312e+00 + + <_> + + 0 -1 19 -1.6510689631104469e-02 + + 6.6442251205444336e-01 1.4248579740524292e-01 + <_> + + 0 -1 20 2.7052499353885651e-03 + + 6.3253521919250488e-01 1.2884770333766937e-01 + <_> + + 0 -1 21 2.8069869149476290e-03 + + 1.2402880191802979e-01 6.1931931972503662e-01 + <_> + + 0 -1 22 -1.5402400167658925e-03 + + 1.4321430027484894e-01 5.6700158119201660e-01 + <_> + + 0 -1 23 -5.6386279175058007e-04 + + 1.6574330627918243e-01 5.9052079916000366e-01 + <_> + + 0 -1 24 1.9253729842603207e-03 + + 2.6955071091651917e-01 5.7388240098953247e-01 + <_> + + 0 -1 25 -5.0214841030538082e-03 + + 1.8935389816761017e-01 5.7827740907669067e-01 + <_> + + 0 -1 26 2.6365420781075954e-03 + + 2.3093290627002716e-01 5.6954258680343628e-01 + <_> + + 0 -1 27 -1.5127769438549876e-03 + + 2.7596020698547363e-01 5.9566420316696167e-01 + <_> + + 0 -1 28 -1.0157439857721329e-02 + + 1.7325380444526672e-01 5.5220472812652588e-01 + <_> + + 0 -1 29 -1.1953660286962986e-02 + + 1.3394099473953247e-01 5.5590140819549561e-01 + <_> + + 0 -1 30 4.8859491944313049e-03 + + 3.6287039518356323e-01 6.1888492107391357e-01 + <_> + + 0 -1 31 -8.0132916569709778e-02 + + 9.1211050748825073e-02 5.4759448766708374e-01 + <_> + + 0 -1 32 1.0643280111253262e-03 + + 3.7151429057121277e-01 5.7113999128341675e-01 + <_> + + 0 -1 33 -1.3419450260698795e-03 + + 5.9533137083053589e-01 3.3180978894233704e-01 + <_> + + 0 -1 34 -5.4601140320301056e-02 + + 1.8440659344196320e-01 5.6028461456298828e-01 + <_> + + 0 -1 35 2.9071690514683723e-03 + + 3.5942441225051880e-01 6.1317151784896851e-01 + <_> + + 0 -1 36 7.4718717951327562e-04 + + 5.9943532943725586e-01 3.4595629572868347e-01 + <_> + + 0 -1 37 4.3013808317482471e-03 + + 4.1726520657539368e-01 6.9908452033996582e-01 + <_> + + 0 -1 38 4.5017572119832039e-03 + + 4.5097151398658752e-01 7.8014570474624634e-01 + <_> + + 0 -1 39 2.4138500913977623e-02 + + 5.4382127523422241e-01 1.3198269903659821e-01 + <_> + 39 + 1.8412969589233398e+01 + + <_> + + 0 -1 40 1.9212230108678341e-03 + + 1.4152669906616211e-01 6.1998707056045532e-01 + <_> + + 0 -1 41 -1.2748669541906565e-04 + + 6.1910742521286011e-01 1.8849289417266846e-01 + <_> + + 0 -1 42 5.1409931620582938e-04 + + 1.4873969554901123e-01 5.8579277992248535e-01 + <_> + + 0 -1 43 4.1878609918057919e-03 + + 2.7469098567962646e-01 6.3592398166656494e-01 + <_> + + 0 -1 44 5.1015717908740044e-03 + + 5.8708512783050537e-01 2.1756289899349213e-01 + <_> + + 0 -1 45 -2.1448440384119749e-03 + + 5.8809447288513184e-01 2.9795908927917480e-01 + <_> + + 0 -1 46 -2.8977119363844395e-03 + + 2.3733270168304443e-01 5.8766472339630127e-01 + <_> + + 0 -1 47 -2.1610679104924202e-02 + + 1.2206549942493439e-01 5.1942020654678345e-01 + <_> + + 0 -1 48 -4.6299318782985210e-03 + + 2.6312309503555298e-01 5.8174091577529907e-01 + <_> + + 0 -1 49 5.9393711853772402e-04 + + 3.6386200785636902e-01 5.6985449790954590e-01 + <_> + + 0 -1 50 5.3878661245107651e-02 + + 4.3035310506820679e-01 7.5593662261962891e-01 + <_> + + 0 -1 51 1.8887349870055914e-03 + + 2.1226030588150024e-01 5.6134271621704102e-01 + <_> + + 0 -1 52 -2.3635339457541704e-03 + + 5.6318491697311401e-01 2.6427671313285828e-01 + <_> + + 0 -1 53 2.4017799645662308e-02 + + 5.7971078157424927e-01 2.7517059445381165e-01 + <_> + + 0 -1 54 2.0543030404951423e-04 + + 2.7052420377731323e-01 5.7525688409805298e-01 + <_> + + 0 -1 55 8.4790197433903813e-04 + + 5.4356247186660767e-01 2.3348769545555115e-01 + <_> + + 0 -1 56 1.4091329649090767e-03 + + 5.3194248676300049e-01 2.0631550252437592e-01 + <_> + + 0 -1 57 1.4642629539594054e-03 + + 5.4189807176589966e-01 3.0688610672950745e-01 + <_> + + 0 -1 58 1.6352549428120255e-03 + + 3.6953729391098022e-01 6.1128681898117065e-01 + <_> + + 0 -1 59 8.3172752056270838e-04 + + 3.5650369524955750e-01 6.0252362489700317e-01 + <_> + + 0 -1 60 -2.0998890977352858e-03 + + 1.9139820337295532e-01 5.3628271818161011e-01 + <_> + + 0 -1 61 -7.4213981861248612e-04 + + 3.8355550169944763e-01 5.5293101072311401e-01 + <_> + + 0 -1 62 3.2655049581080675e-03 + + 4.3128961324691772e-01 7.1018958091735840e-01 + <_> + + 0 -1 63 8.9134991867467761e-04 + + 3.9848309755325317e-01 6.3919639587402344e-01 + <_> + + 0 -1 64 -1.5284179709851742e-02 + + 2.3667329549789429e-01 5.4337137937545776e-01 + <_> + + 0 -1 65 4.8381411470472813e-03 + + 5.8175009489059448e-01 3.2391890883445740e-01 + <_> + + 0 -1 66 -9.1093179071322083e-04 + + 5.5405938625335693e-01 2.9118689894676208e-01 + <_> + + 0 -1 67 -6.1275060288608074e-03 + + 1.7752550542354584e-01 5.1966291666030884e-01 + <_> + + 0 -1 68 -4.4576259097084403e-04 + + 3.0241701006889343e-01 5.5335938930511475e-01 + <_> + + 0 -1 69 2.2646540775895119e-02 + + 4.4149309396743774e-01 6.9753772020339966e-01 + <_> + + 0 -1 70 -1.8804960418492556e-03 + + 2.7913948893547058e-01 5.4979521036148071e-01 + <_> + + 0 -1 71 7.0889107882976532e-03 + + 5.2631992101669312e-01 2.3855470120906830e-01 + <_> + + 0 -1 72 1.7318050377070904e-03 + + 4.3193790316581726e-01 6.9836008548736572e-01 + <_> + + 0 -1 73 -6.8482700735330582e-03 + + 3.0820429325103760e-01 5.3909200429916382e-01 + <_> + + 0 -1 74 -1.5062530110299122e-05 + + 5.5219221115112305e-01 3.1203660368919373e-01 + <_> + + 0 -1 75 2.9475569725036621e-02 + + 5.4013228416442871e-01 1.7706030607223511e-01 + <_> + + 0 -1 76 8.1387329846620560e-03 + + 5.1786178350448608e-01 1.2110190093517303e-01 + <_> + + 0 -1 77 2.0942950621247292e-02 + + 5.2902942895889282e-01 3.3112218976020813e-01 + <_> + + 0 -1 78 -9.5665529370307922e-03 + + 7.4719941616058350e-01 4.4519689679145813e-01 + <_> + 33 + 1.5324139595031738e+01 + + <_> + + 0 -1 79 -2.8206960996612906e-04 + + 2.0640860497951508e-01 6.0767322778701782e-01 + <_> + + 0 -1 80 1.6790600493550301e-03 + + 5.8519971370697021e-01 1.2553839385509491e-01 + <_> + + 0 -1 81 6.9827912375330925e-04 + + 9.4018429517745972e-02 5.7289612293243408e-01 + <_> + + 0 -1 82 7.8959012171253562e-04 + + 1.7819879949092865e-01 5.6943088769912720e-01 + <_> + + 0 -1 83 -2.8560499195009470e-03 + + 1.6383990645408630e-01 5.7886648178100586e-01 + <_> + + 0 -1 84 -3.8122469559311867e-03 + + 2.0854400098323822e-01 5.5085647106170654e-01 + <_> + + 0 -1 85 1.5896620461717248e-03 + + 5.7027608156204224e-01 1.8572150170803070e-01 + <_> + + 0 -1 86 1.0078339837491512e-02 + + 5.1169431209564209e-01 2.1897700428962708e-01 + <_> + + 0 -1 87 -6.3526302576065063e-02 + + 7.1313798427581787e-01 4.0438130497932434e-01 + <_> + + 0 -1 88 -9.1031491756439209e-03 + + 2.5671818852424622e-01 5.4639732837677002e-01 + <_> + + 0 -1 89 -2.4035000242292881e-03 + + 1.7006659507751465e-01 5.5909740924835205e-01 + <_> + + 0 -1 90 1.5226360410451889e-03 + + 5.4105567932128906e-01 2.6190540194511414e-01 + <_> + + 0 -1 91 1.7997439950704575e-02 + + 3.7324368953704834e-01 6.5352207422256470e-01 + <_> + + 0 -1 92 -6.4538191072642803e-03 + + 2.6264819502830505e-01 5.5374461412429810e-01 + <_> + + 0 -1 93 -1.1880760081112385e-02 + + 2.0037539303302765e-01 5.5447459220886230e-01 + <_> + + 0 -1 94 1.2713660253211856e-03 + + 5.5919027328491211e-01 3.0319759249687195e-01 + <_> + + 0 -1 95 1.1376109905540943e-03 + + 2.7304071187973022e-01 5.6465089321136475e-01 + <_> + + 0 -1 96 -4.2651998810470104e-03 + + 1.4059090614318848e-01 5.4618209600448608e-01 + <_> + + 0 -1 97 -2.9602861031889915e-03 + + 1.7950350046157837e-01 5.4592901468276978e-01 + <_> + + 0 -1 98 -8.8448226451873779e-03 + + 5.7367831468582153e-01 2.8092199563980103e-01 + <_> + + 0 -1 99 -6.6430689767003059e-03 + + 2.3706759512424469e-01 5.5038261413574219e-01 + <_> + + 0 -1 100 3.9997808635234833e-03 + + 5.6081998348236084e-01 3.3042821288108826e-01 + <_> + + 0 -1 101 -4.1221720166504383e-03 + + 1.6401059925556183e-01 5.3789931535720825e-01 + <_> + + 0 -1 102 1.5624909661710262e-02 + + 5.2276492118835449e-01 2.2886039316654205e-01 + <_> + + 0 -1 103 -1.0356419719755650e-02 + + 7.0161938667297363e-01 4.2529278993606567e-01 + <_> + + 0 -1 104 -8.7960809469223022e-03 + + 2.7673470973968506e-01 5.3558301925659180e-01 + <_> + + 0 -1 105 1.6226939857006073e-01 + + 4.3422400951385498e-01 7.4425792694091797e-01 + <_> + + 0 -1 106 4.5542530715465546e-03 + + 5.7264858484268188e-01 2.5821250677108765e-01 + <_> + + 0 -1 107 -2.1309209987521172e-03 + + 2.1068480610847473e-01 5.3610187768936157e-01 + <_> + + 0 -1 108 -1.3208420015871525e-02 + + 7.5937908887863159e-01 4.5524680614471436e-01 + <_> + + 0 -1 109 -6.5996676683425903e-02 + + 1.2524759769439697e-01 5.3440397977828979e-01 + <_> + + 0 -1 110 7.9142656177282333e-03 + + 3.3153840899467468e-01 5.6010431051254272e-01 + <_> + + 0 -1 111 2.0894279703497887e-02 + + 5.5060499906539917e-01 2.7688381075859070e-01 + <_> + 44 + 2.1010639190673828e+01 + + <_> + + 0 -1 112 1.1961159761995077e-03 + + 1.7626909911632538e-01 6.1562412977218628e-01 + <_> + + 0 -1 113 -1.8679830245673656e-03 + + 6.1181068420410156e-01 1.8323999643325806e-01 + <_> + + 0 -1 114 -1.9579799845814705e-04 + + 9.9044263362884521e-02 5.7238161563873291e-01 + <_> + + 0 -1 115 -8.0255657667294145e-04 + + 5.5798798799514771e-01 2.3772829771041870e-01 + <_> + + 0 -1 116 -2.4510810617357492e-03 + + 2.2314579784870148e-01 5.8589351177215576e-01 + <_> + + 0 -1 117 5.0361850298941135e-04 + + 2.6539939641952515e-01 5.7941037416458130e-01 + <_> + + 0 -1 118 4.0293349884450436e-03 + + 5.8038270473480225e-01 2.4848650395870209e-01 + <_> + + 0 -1 119 -1.4451709575951099e-02 + + 1.8303519487380981e-01 5.4842048883438110e-01 + <_> + + 0 -1 120 2.0380979403853416e-03 + + 3.3635589480400085e-01 6.0510927438735962e-01 + <_> + + 0 -1 121 -1.6155190533027053e-03 + + 2.2866420447826385e-01 5.4412460327148438e-01 + <_> + + 0 -1 122 3.3458340913057327e-03 + + 5.6259131431579590e-01 2.3923380672931671e-01 + <_> + + 0 -1 123 1.6379579901695251e-03 + + 3.9069938659667969e-01 5.9646219015121460e-01 + <_> + + 0 -1 124 3.0251210555434227e-02 + + 5.2484822273254395e-01 1.5757469832897186e-01 + <_> + + 0 -1 125 3.7251990288496017e-02 + + 4.1943109035491943e-01 6.7484188079833984e-01 + <_> + + 0 -1 126 -2.5109790265560150e-02 + + 1.8825499713420868e-01 5.4734510183334351e-01 + <_> + + 0 -1 127 -5.3099058568477631e-03 + + 1.3399730622768402e-01 5.2271109819412231e-01 + <_> + + 0 -1 128 1.2086479691788554e-03 + + 3.7620881199836731e-01 6.1096358299255371e-01 + <_> + + 0 -1 129 -2.1907679736614227e-02 + + 2.6631429791450500e-01 5.4040068387985229e-01 + <_> + + 0 -1 130 5.4116579703986645e-03 + + 5.3635787963867188e-01 2.2322730720043182e-01 + <_> + + 0 -1 131 6.9946326315402985e-02 + + 5.3582328557968140e-01 2.4536980688571930e-01 + <_> + + 0 -1 132 3.4520021290518343e-04 + + 2.4096719920635223e-01 5.3769302368164062e-01 + <_> + + 0 -1 133 1.2627709656953812e-03 + + 5.4258567094802856e-01 3.1556931138038635e-01 + <_> + + 0 -1 134 2.2719509899616241e-02 + + 4.1584059596061707e-01 6.5978652238845825e-01 + <_> + + 0 -1 135 -1.8111000536009669e-03 + + 2.8112530708312988e-01 5.5052447319030762e-01 + <_> + + 0 -1 136 3.3469670452177525e-03 + + 5.2600282430648804e-01 1.8914650380611420e-01 + <_> + + 0 -1 137 4.0791751234792173e-04 + + 5.6735092401504517e-01 3.3442100882530212e-01 + <_> + + 0 -1 138 1.2734799645841122e-02 + + 5.3435921669006348e-01 2.3956120014190674e-01 + <_> + + 0 -1 139 -7.3119727894663811e-03 + + 6.0108900070190430e-01 4.0222078561782837e-01 + <_> + + 0 -1 140 -5.6948751211166382e-02 + + 8.1991511583328247e-01 4.5431908965110779e-01 + <_> + + 0 -1 141 -5.0116591155529022e-03 + + 2.2002810239791870e-01 5.3577107191085815e-01 + <_> + + 0 -1 142 6.0334368608891964e-03 + + 4.4130811095237732e-01 7.1817511320114136e-01 + <_> + + 0 -1 143 3.9437441155314445e-03 + + 5.4788607358932495e-01 2.7917331457138062e-01 + <_> + + 0 -1 144 -3.6591119132936001e-03 + + 6.3578677177429199e-01 3.9897239208221436e-01 + <_> + + 0 -1 145 -3.8456181064248085e-03 + + 3.4936860203742981e-01 5.3006649017333984e-01 + <_> + + 0 -1 146 -7.1926261298358440e-03 + + 1.1196149885654449e-01 5.2296727895736694e-01 + <_> + + 0 -1 147 -5.2798941731452942e-02 + + 2.3871029913425446e-01 5.4534512758255005e-01 + <_> + + 0 -1 148 -7.9537667334079742e-03 + + 7.5869178771972656e-01 4.4393768906593323e-01 + <_> + + 0 -1 149 -2.7344180271029472e-03 + + 2.5654768943786621e-01 5.4893219470977783e-01 + <_> + + 0 -1 150 -1.8507939530536532e-03 + + 6.7343479394912720e-01 4.2524749040603638e-01 + <_> + + 0 -1 151 1.5918919816613197e-02 + + 5.4883527755737305e-01 2.2926619648933411e-01 + <_> + + 0 -1 152 -1.2687679845839739e-03 + + 6.1043310165405273e-01 4.0223899483680725e-01 + <_> + + 0 -1 153 6.2883910723030567e-03 + + 5.3108531236648560e-01 1.5361930429935455e-01 + <_> + + 0 -1 154 -6.2259892001748085e-03 + + 1.7291119694709778e-01 5.2416062355041504e-01 + <_> + + 0 -1 155 -1.2132599949836731e-02 + + 6.5977597236633301e-01 4.3251821398735046e-01 + <_> + 50 + 2.3918790817260742e+01 + + <_> + + 0 -1 156 -3.9184908382594585e-03 + + 6.1034351587295532e-01 1.4693309366703033e-01 + <_> + + 0 -1 157 1.5971299726516008e-03 + + 2.6323631405830383e-01 5.8964669704437256e-01 + <_> + + 0 -1 158 1.7780110239982605e-02 + + 5.8728742599487305e-01 1.7603619396686554e-01 + <_> + + 0 -1 159 6.5334769897162914e-04 + + 1.5678019821643829e-01 5.5960661172866821e-01 + <_> + + 0 -1 160 -2.8353091329336166e-04 + + 1.9131539762020111e-01 5.7320362329483032e-01 + <_> + + 0 -1 161 1.6104689566418529e-03 + + 2.9149138927459717e-01 5.6230807304382324e-01 + <_> + + 0 -1 162 -9.7750619053840637e-02 + + 1.9434769451618195e-01 5.6482332944869995e-01 + <_> + + 0 -1 163 5.5182358482852578e-04 + + 3.1346169114112854e-01 5.5046397447586060e-01 + <_> + + 0 -1 164 -1.2858220376074314e-02 + + 2.5364819169044495e-01 5.7601428031921387e-01 + <_> + + 0 -1 165 4.1530239395797253e-03 + + 5.7677221298217773e-01 3.6597740650177002e-01 + <_> + + 0 -1 166 1.7092459602281451e-03 + + 2.8431910276412964e-01 5.9189391136169434e-01 + <_> + + 0 -1 167 7.5217359699308872e-03 + + 4.0524271130561829e-01 6.1831092834472656e-01 + <_> + + 0 -1 168 2.2479810286313295e-03 + + 5.7837551832199097e-01 3.1354010105133057e-01 + <_> + + 0 -1 169 5.2006211131811142e-02 + + 5.5413120985031128e-01 1.9166369736194611e-01 + <_> + + 0 -1 170 1.2085529975593090e-02 + + 4.0326559543609619e-01 6.6445910930633545e-01 + <_> + + 0 -1 171 1.4687820112158079e-05 + + 3.5359779000282288e-01 5.7093828916549683e-01 + <_> + + 0 -1 172 7.1395188570022583e-06 + + 3.0374449491500854e-01 5.6102699041366577e-01 + <_> + + 0 -1 173 -4.6001640148460865e-03 + + 7.1810871362686157e-01 4.5803260803222656e-01 + <_> + + 0 -1 174 2.0058949012309313e-03 + + 5.6219518184661865e-01 2.9536840319633484e-01 + <_> + + 0 -1 175 4.5050270855426788e-03 + + 4.6153879165649414e-01 7.6190179586410522e-01 + <_> + + 0 -1 176 1.1746830306947231e-02 + + 5.3438371419906616e-01 1.7725290358066559e-01 + <_> + + 0 -1 177 -5.8316338807344437e-02 + + 1.6862459480762482e-01 5.3407722711563110e-01 + <_> + + 0 -1 178 2.3629379575140774e-04 + + 3.7920561432838440e-01 6.0268038511276245e-01 + <_> + + 0 -1 179 -7.8156180679798126e-03 + + 1.5128670632839203e-01 5.3243237733840942e-01 + <_> + + 0 -1 180 -1.0876160115003586e-02 + + 2.0818220078945160e-01 5.3199452161788940e-01 + <_> + + 0 -1 181 -2.7745519764721394e-03 + + 4.0982469916343689e-01 5.2103281021118164e-01 + <_> + + 0 -1 182 -7.8276381827890873e-04 + + 5.6932741403579712e-01 3.4788420796394348e-01 + <_> + + 0 -1 183 1.3870409689843655e-02 + + 5.3267508745193481e-01 2.2576980292797089e-01 + <_> + + 0 -1 184 -2.3674910888075829e-02 + + 1.5513050556182861e-01 5.2007079124450684e-01 + <_> + + 0 -1 185 -1.4879409718560055e-05 + + 5.5005669593811035e-01 3.8201761245727539e-01 + <_> + + 0 -1 186 3.6190641112625599e-03 + + 4.2386838793754578e-01 6.6397482156753540e-01 + <_> + + 0 -1 187 -1.9817110151052475e-02 + + 2.1500380337238312e-01 5.3823578357696533e-01 + <_> + + 0 -1 188 -3.8154039066284895e-03 + + 6.6757112741470337e-01 4.2152971029281616e-01 + <_> + + 0 -1 189 -4.9775829538702965e-03 + + 2.2672890126705170e-01 5.3863281011581421e-01 + <_> + + 0 -1 190 2.2441020701080561e-03 + + 4.3086910247802734e-01 6.8557357788085938e-01 + <_> + + 0 -1 191 1.2282459996640682e-02 + + 5.8366149663925171e-01 3.4674790501594543e-01 + <_> + + 0 -1 192 -2.8548699337989092e-03 + + 7.0169448852539062e-01 4.3114539980888367e-01 + <_> + + 0 -1 193 -3.7875669077038765e-03 + + 2.8953450918197632e-01 5.2249461412429810e-01 + <_> + + 0 -1 194 -1.2201230274513364e-03 + + 2.9755708575248718e-01 5.4816448688507080e-01 + <_> + + 0 -1 195 1.0160599835216999e-02 + + 4.8888179659843445e-01 8.1826978921890259e-01 + <_> + + 0 -1 196 -1.6174569725990295e-02 + + 1.4814929664134979e-01 5.2399927377700806e-01 + <_> + + 0 -1 197 1.9292460754513741e-02 + + 4.7863098978996277e-01 7.3781907558441162e-01 + <_> + + 0 -1 198 -3.2479539513587952e-03 + + 7.3742228746414185e-01 4.4706439971923828e-01 + <_> + + 0 -1 199 -9.3803480267524719e-03 + + 3.4891548752784729e-01 5.5379962921142578e-01 + <_> + + 0 -1 200 -1.2606129981577396e-02 + + 2.3796869814395905e-01 5.3154432773590088e-01 + <_> + + 0 -1 201 -2.5621930137276649e-02 + + 1.9646880030632019e-01 5.1387697458267212e-01 + <_> + + 0 -1 202 -7.5741496402770281e-05 + + 5.5905228853225708e-01 3.3658531308174133e-01 + <_> + + 0 -1 203 -8.9210882782936096e-02 + + 6.3404656946659088e-02 5.1626348495483398e-01 + <_> + + 0 -1 204 -2.7670480776578188e-03 + + 7.3234677314758301e-01 4.4907060265541077e-01 + <_> + + 0 -1 205 2.7152578695677221e-04 + + 4.1148349642753601e-01 5.9855180978775024e-01 + <_> + 51 + 2.4527879714965820e+01 + + <_> + + 0 -1 206 1.4786219689995050e-03 + + 2.6635450124740601e-01 6.6433167457580566e-01 + <_> + + 0 -1 207 -1.8741659587249160e-03 + + 6.1438488960266113e-01 2.5185129046440125e-01 + <_> + + 0 -1 208 -1.7151009524241090e-03 + + 5.7663410902023315e-01 2.3974630236625671e-01 + <_> + + 0 -1 209 -1.8939269939437509e-03 + + 5.6820458173751831e-01 2.5291448831558228e-01 + <_> + + 0 -1 210 -5.3006052039563656e-03 + + 1.6406759619712830e-01 5.5560797452926636e-01 + <_> + + 0 -1 211 -4.6662531793117523e-02 + + 6.1231541633605957e-01 4.7628301382064819e-01 + <_> + + 0 -1 212 -7.9431332414969802e-04 + + 5.7078588008880615e-01 2.8394040465354919e-01 + <_> + + 0 -1 213 1.4891670085489750e-02 + + 4.0896728634834290e-01 6.0063672065734863e-01 + <_> + + 0 -1 214 -1.2046529445797205e-03 + + 5.7124507427215576e-01 2.7052891254425049e-01 + <_> + + 0 -1 215 6.0619381256401539e-03 + + 5.2625042200088501e-01 3.2622259855270386e-01 + <_> + + 0 -1 216 -2.5286648888140917e-03 + + 6.8538308143615723e-01 4.1992568969726562e-01 + <_> + + 0 -1 217 -5.9010218828916550e-03 + + 3.2662820816040039e-01 5.4348129034042358e-01 + <_> + + 0 -1 218 5.6702760048210621e-03 + + 5.4684108495712280e-01 2.3190039396286011e-01 + <_> + + 0 -1 219 -3.0304100364446640e-03 + + 5.5706679821014404e-01 2.7082380652427673e-01 + <_> + + 0 -1 220 2.9803649522364140e-03 + + 3.7005689740180969e-01 5.8906257152557373e-01 + <_> + + 0 -1 221 -7.5840510427951813e-02 + + 2.1400700509548187e-01 5.4199481010437012e-01 + <_> + + 0 -1 222 1.9262539222836494e-02 + + 5.5267721414566040e-01 2.7265900373458862e-01 + <_> + + 0 -1 223 1.8888259364757687e-04 + + 3.9580118656158447e-01 6.0172098875045776e-01 + <_> + + 0 -1 224 2.9369549825787544e-02 + + 5.2413737773895264e-01 1.4357580244541168e-01 + <_> + + 0 -1 225 1.0417619487270713e-03 + + 3.3854091167449951e-01 5.9299832582473755e-01 + <_> + + 0 -1 226 2.6125640142709017e-03 + + 5.4853779077529907e-01 3.0215978622436523e-01 + <_> + + 0 -1 227 9.6977467183023691e-04 + + 3.3752760291099548e-01 5.5320328474044800e-01 + <_> + + 0 -1 228 5.9512659208849072e-04 + + 5.6317430734634399e-01 3.3593991398811340e-01 + <_> + + 0 -1 229 -1.0156559944152832e-01 + + 6.3735038042068481e-02 5.2304250001907349e-01 + <_> + + 0 -1 230 3.6156699061393738e-02 + + 5.1369631290435791e-01 1.0295289754867554e-01 + <_> + + 0 -1 231 3.4624140243977308e-03 + + 3.8793200254440308e-01 5.5582892894744873e-01 + <_> + + 0 -1 232 1.9554980099201202e-02 + + 5.2500867843627930e-01 1.8758599460124969e-01 + <_> + + 0 -1 233 -2.3121440317481756e-03 + + 6.6720288991928101e-01 4.6796411275863647e-01 + <_> + + 0 -1 234 -1.8605289515107870e-03 + + 7.1633791923522949e-01 4.3346709012985229e-01 + <_> + + 0 -1 235 -9.4026362057775259e-04 + + 3.0213609337806702e-01 5.6502032279968262e-01 + <_> + + 0 -1 236 -5.2418331615626812e-03 + + 1.8200090527534485e-01 5.2502560615539551e-01 + <_> + + 0 -1 237 1.1729019752237946e-04 + + 3.3891880512237549e-01 5.4459732770919800e-01 + <_> + + 0 -1 238 1.1878840159624815e-03 + + 4.0853491425514221e-01 6.2535631656646729e-01 + <_> + + 0 -1 239 -1.0881359688937664e-02 + + 3.3783990144729614e-01 5.7000827789306641e-01 + <_> + + 0 -1 240 1.7354859737679362e-03 + + 4.2046359181404114e-01 6.5230387449264526e-01 + <_> + + 0 -1 241 -6.5119052305817604e-03 + + 2.5952160358428955e-01 5.4281437397003174e-01 + <_> + + 0 -1 242 -1.2136430013924837e-03 + + 6.1651438474655151e-01 3.9778938889503479e-01 + <_> + + 0 -1 243 -1.0354240424931049e-02 + + 1.6280280053615570e-01 5.2195048332214355e-01 + <_> + + 0 -1 244 5.5858830455690622e-04 + + 3.1996509432792664e-01 5.5035740137100220e-01 + <_> + + 0 -1 245 1.5299649909138680e-02 + + 4.1039940714836121e-01 6.1223882436752319e-01 + <_> + + 0 -1 246 -2.1588210016489029e-02 + + 1.0349129885435104e-01 5.1973849534988403e-01 + <_> + + 0 -1 247 -1.2834629416465759e-01 + + 8.4938651323318481e-01 4.8931029438972473e-01 + <_> + + 0 -1 248 -2.2927189711481333e-03 + + 3.1301578879356384e-01 5.4715752601623535e-01 + <_> + + 0 -1 249 7.9915106296539307e-02 + + 4.8563209176063538e-01 6.0739892721176147e-01 + <_> + + 0 -1 250 -7.9441092908382416e-02 + + 8.3946740627288818e-01 4.6245330572128296e-01 + <_> + + 0 -1 251 -5.2800010889768600e-03 + + 1.8816959857940674e-01 5.3066980838775635e-01 + <_> + + 0 -1 252 1.0463109938427806e-03 + + 5.2712291479110718e-01 2.5830659270286560e-01 + <_> + + 0 -1 253 2.6317298761568964e-04 + + 4.2353048920631409e-01 5.7354408502578735e-01 + <_> + + 0 -1 254 -3.6173160187900066e-03 + + 6.9343960285186768e-01 4.4954448938369751e-01 + <_> + + 0 -1 255 1.1421879753470421e-02 + + 5.9009212255477905e-01 4.1381931304931641e-01 + <_> + + 0 -1 256 -1.9963278900831938e-03 + + 6.4663827419281006e-01 4.3272399902343750e-01 + <_> + 56 + 2.7153350830078125e+01 + + <_> + + 0 -1 257 -9.9691245704889297e-03 + + 6.1423242092132568e-01 2.4822120368480682e-01 + <_> + + 0 -1 258 7.3073059320449829e-04 + + 5.7049518823623657e-01 2.3219659924507141e-01 + <_> + + 0 -1 259 6.4045301405712962e-04 + + 2.1122519671916962e-01 5.8149331808090210e-01 + <_> + + 0 -1 260 4.5424019917845726e-03 + + 2.9504820704460144e-01 5.8663117885589600e-01 + <_> + + 0 -1 261 9.2477443104144186e-05 + + 2.9909908771514893e-01 5.7913267612457275e-01 + <_> + + 0 -1 262 -8.6603146046400070e-03 + + 2.8130298852920532e-01 5.6355422735214233e-01 + <_> + + 0 -1 263 8.0515816807746887e-03 + + 3.5353690385818481e-01 6.0547572374343872e-01 + <_> + + 0 -1 264 4.3835240649059415e-04 + + 5.5965322256088257e-01 2.7315109968185425e-01 + <_> + + 0 -1 265 -9.8168973636347800e-05 + + 5.9780317544937134e-01 3.6385610699653625e-01 + <_> + + 0 -1 266 -1.1298790341243148e-03 + + 2.7552521228790283e-01 5.4327291250228882e-01 + <_> + + 0 -1 267 6.4356150105595589e-03 + + 4.3056419491767883e-01 7.0698332786560059e-01 + <_> + + 0 -1 268 -5.6829329580068588e-02 + + 2.4952429533004761e-01 5.2949970960617065e-01 + <_> + + 0 -1 269 4.0668169967830181e-03 + + 5.4785531759262085e-01 2.4977239966392517e-01 + <_> + + 0 -1 270 4.8164798499783501e-05 + + 3.9386010169982910e-01 5.7063561677932739e-01 + <_> + + 0 -1 271 6.1795017682015896e-03 + + 4.4076061248779297e-01 7.3947668075561523e-01 + <_> + + 0 -1 272 6.4985752105712891e-03 + + 5.4452431201934814e-01 2.4791529774665833e-01 + <_> + + 0 -1 273 -1.0211090557277203e-03 + + 2.5447669625282288e-01 5.3389710187911987e-01 + <_> + + 0 -1 274 -5.4247528314590454e-03 + + 2.7188581228256226e-01 5.3240692615509033e-01 + <_> + + 0 -1 275 -1.0559899965301156e-03 + + 3.1782880425453186e-01 5.5345088243484497e-01 + <_> + + 0 -1 276 6.6465808777138591e-04 + + 4.2842191457748413e-01 6.5581941604614258e-01 + <_> + + 0 -1 277 -2.7524109464138746e-04 + + 5.9028607606887817e-01 3.8102629780769348e-01 + <_> + + 0 -1 278 4.2293202131986618e-03 + + 3.8164898753166199e-01 5.7093858718872070e-01 + <_> + + 0 -1 279 -3.2868210691958666e-03 + + 1.7477439343929291e-01 5.2595442533493042e-01 + <_> + + 0 -1 280 1.5611879643984139e-04 + + 3.6017221212387085e-01 5.7256120443344116e-01 + <_> + + 0 -1 281 -7.3621381488919724e-06 + + 5.4018580913543701e-01 3.0444970726966858e-01 + <_> + + 0 -1 282 -1.4767250046133995e-02 + + 3.2207700610160828e-01 5.5734348297119141e-01 + <_> + + 0 -1 283 2.4489590898156166e-02 + + 4.3015280365943909e-01 6.5188127756118774e-01 + <_> + + 0 -1 284 -3.7652091123163700e-04 + + 3.5645830631256104e-01 5.5982369184494019e-01 + <_> + + 0 -1 285 7.3657688517414499e-06 + + 3.4907829761505127e-01 5.5618977546691895e-01 + <_> + + 0 -1 286 -1.5099939890205860e-02 + + 1.7762720584869385e-01 5.3352999687194824e-01 + <_> + + 0 -1 287 -3.8316650316119194e-03 + + 6.1496877670288086e-01 4.2213940620422363e-01 + <_> + + 0 -1 288 1.6925400123000145e-02 + + 5.4130148887634277e-01 2.1665850281715393e-01 + <_> + + 0 -1 289 -3.0477850232273340e-03 + + 6.4494907855987549e-01 4.3546178936958313e-01 + <_> + + 0 -1 290 3.2140589319169521e-03 + + 5.4001551866531372e-01 3.5232171416282654e-01 + <_> + + 0 -1 291 -4.0023201145231724e-03 + + 2.7745240926742554e-01 5.3384172916412354e-01 + <_> + + 0 -1 292 7.4182129465043545e-03 + + 5.6767392158508301e-01 3.7028178572654724e-01 + <_> + + 0 -1 293 -8.8764587417244911e-03 + + 7.7492219209671021e-01 4.5836889743804932e-01 + <_> + + 0 -1 294 2.7311739977449179e-03 + + 5.3387218713760376e-01 3.9966610074043274e-01 + <_> + + 0 -1 295 -2.5082379579544067e-03 + + 5.6119632720947266e-01 3.7774989008903503e-01 + <_> + + 0 -1 296 -8.0541074275970459e-03 + + 2.9152289032936096e-01 5.1791828870773315e-01 + <_> + + 0 -1 297 -9.7938813269138336e-04 + + 5.5364328622817993e-01 3.7001928687095642e-01 + <_> + + 0 -1 298 -5.8745909482240677e-03 + + 3.7543910741806030e-01 5.6793761253356934e-01 + <_> + + 0 -1 299 -4.4936719350516796e-03 + + 7.0196992158889771e-01 4.4809499382972717e-01 + <_> + + 0 -1 300 -5.4389229044318199e-03 + + 2.3103649914264679e-01 5.3133869171142578e-01 + <_> + + 0 -1 301 -7.5094640487805009e-04 + + 5.8648687601089478e-01 4.1293430328369141e-01 + <_> + + 0 -1 302 1.4528800420521293e-05 + + 3.7324070930480957e-01 5.6196212768554688e-01 + <_> + + 0 -1 303 4.0758069604635239e-02 + + 5.3120911121368408e-01 2.7205219864845276e-01 + <_> + + 0 -1 304 6.6505931317806244e-03 + + 4.7100159525871277e-01 6.6934937238693237e-01 + <_> + + 0 -1 305 4.5759351924061775e-03 + + 5.1678192615509033e-01 1.6372759640216827e-01 + <_> + + 0 -1 306 6.5269311890006065e-03 + + 5.3976088762283325e-01 2.9385319352149963e-01 + <_> + + 0 -1 307 -1.3660379685461521e-02 + + 7.0864880084991455e-01 4.5322000980377197e-01 + <_> + + 0 -1 308 2.7358869090676308e-02 + + 5.2064812183380127e-01 3.5892319679260254e-01 + <_> + + 0 -1 309 6.2197551596909761e-04 + + 3.5070759057998657e-01 5.4411232471466064e-01 + <_> + + 0 -1 310 -3.3077080734074116e-03 + + 5.8595228195190430e-01 4.0248918533325195e-01 + <_> + + 0 -1 311 -1.0631109587848186e-02 + + 6.7432671785354614e-01 4.4226029515266418e-01 + <_> + + 0 -1 312 1.9441649317741394e-02 + + 5.2827161550521851e-01 1.7979049682617188e-01 + <_> + 71 + 3.4554111480712891e+01 + + <_> + + 0 -1 313 -5.5052167735993862e-03 + + 5.9147310256958008e-01 2.6265591382980347e-01 + <_> + + 0 -1 314 1.9562279339879751e-03 + + 2.3125819861888885e-01 5.7416272163391113e-01 + <_> + + 0 -1 315 -8.8924784213304520e-03 + + 1.6565300524234772e-01 5.6266540288925171e-01 + <_> + + 0 -1 316 8.3638377487659454e-02 + + 5.4234498739242554e-01 1.9572949409484863e-01 + <_> + + 0 -1 317 1.2282270472496748e-03 + + 3.4179040789604187e-01 5.9925037622451782e-01 + <_> + + 0 -1 318 5.7629169896245003e-03 + + 3.7195819616317749e-01 6.0799038410186768e-01 + <_> + + 0 -1 319 -1.6417410224676132e-03 + + 2.5774860382080078e-01 5.5769157409667969e-01 + <_> + + 0 -1 320 3.4113149158656597e-03 + + 2.9507490992546082e-01 5.5141717195510864e-01 + <_> + + 0 -1 321 -1.1069320142269135e-02 + + 7.5693589448928833e-01 4.4770789146423340e-01 + <_> + + 0 -1 322 3.4865971654653549e-02 + + 5.5837088823318481e-01 2.6696211099624634e-01 + <_> + + 0 -1 323 6.5701099811121821e-04 + + 5.6273132562637329e-01 2.9888901114463806e-01 + <_> + + 0 -1 324 -2.4339130148291588e-02 + + 2.7711850404739380e-01 5.1088631153106689e-01 + <_> + + 0 -1 325 5.9435202274471521e-04 + + 5.5806517601013184e-01 3.1203418970108032e-01 + <_> + + 0 -1 326 2.2971509024500847e-03 + + 3.3302500844001770e-01 5.6790757179260254e-01 + <_> + + 0 -1 327 -3.7801829166710377e-03 + + 2.9905349016189575e-01 5.3448081016540527e-01 + <_> + + 0 -1 328 -1.3420669734477997e-01 + + 1.4638589322566986e-01 5.3925681114196777e-01 + <_> + + 0 -1 329 7.5224548345431685e-04 + + 3.7469539046287537e-01 5.6927347183227539e-01 + <_> + + 0 -1 330 -4.0545541793107986e-02 + + 2.7547478675842285e-01 5.4842978715896606e-01 + <_> + + 0 -1 331 1.2572970008477569e-03 + + 3.7445840239524841e-01 5.7560759782791138e-01 + <_> + + 0 -1 332 -7.4249948374927044e-03 + + 7.5138592720031738e-01 4.7282311320304871e-01 + <_> + + 0 -1 333 5.0908129196614027e-04 + + 5.4048967361450195e-01 2.9323211312294006e-01 + <_> + + 0 -1 334 -1.2808450264856219e-03 + + 6.1697798967361450e-01 4.2733490467071533e-01 + <_> + + 0 -1 335 -1.8348860321566463e-03 + + 2.0484960079193115e-01 5.2064722776412964e-01 + <_> + + 0 -1 336 2.7484869584441185e-02 + + 5.2529847621917725e-01 1.6755220293998718e-01 + <_> + + 0 -1 337 2.2372419480234385e-03 + + 5.2677828073501587e-01 2.7776581048965454e-01 + <_> + + 0 -1 338 -8.8635291904211044e-03 + + 6.9545578956604004e-01 4.8120489716529846e-01 + <_> + + 0 -1 339 4.1753971017897129e-03 + + 4.2918878793716431e-01 6.3491958379745483e-01 + <_> + + 0 -1 340 -1.7098189564421773e-03 + + 2.9305368661880493e-01 5.3612488508224487e-01 + <_> + + 0 -1 341 6.5328548662364483e-03 + + 4.4953250885009766e-01 7.4096941947937012e-01 + <_> + + 0 -1 342 -9.5372907817363739e-03 + + 3.1491199135780334e-01 5.4165017604827881e-01 + <_> + + 0 -1 343 2.5310989469289780e-02 + + 5.1218920946121216e-01 1.3117079436779022e-01 + <_> + + 0 -1 344 3.6460969597101212e-02 + + 5.1759117841720581e-01 2.5913399457931519e-01 + <_> + + 0 -1 345 2.0854329690337181e-02 + + 5.1371401548385620e-01 1.5823160111904144e-01 + <_> + + 0 -1 346 -8.7207747856155038e-04 + + 5.5743098258972168e-01 4.3989789485931396e-01 + <_> + + 0 -1 347 -1.5227000403683633e-05 + + 5.5489408969879150e-01 3.7080699205398560e-01 + <_> + + 0 -1 348 -8.4316509310156107e-04 + + 3.3874198794364929e-01 5.5542111396789551e-01 + <_> + + 0 -1 349 3.6037859972566366e-03 + + 5.3580617904663086e-01 3.4111711382865906e-01 + <_> + + 0 -1 350 -6.8057891912758350e-03 + + 6.1252027750015259e-01 4.3458628654479980e-01 + <_> + + 0 -1 351 -4.7021660953760147e-02 + + 2.3581659793853760e-01 5.1937389373779297e-01 + <_> + + 0 -1 352 -3.6954108625650406e-02 + + 7.3231112957000732e-01 4.7609439492225647e-01 + <_> + + 0 -1 353 1.0439479956403375e-03 + + 5.4194551706314087e-01 3.4113308787345886e-01 + <_> + + 0 -1 354 -2.1050689974799752e-04 + + 2.8216940164566040e-01 5.5549472570419312e-01 + <_> + + 0 -1 355 -8.0831587314605713e-02 + + 9.1299301385879517e-01 4.6974349021911621e-01 + <_> + + 0 -1 356 -3.6579059087671340e-04 + + 6.0226702690124512e-01 3.9782929420471191e-01 + <_> + + 0 -1 357 -1.2545920617412776e-04 + + 5.6132131814956665e-01 3.8455399870872498e-01 + <_> + + 0 -1 358 -6.8786486983299255e-02 + + 2.2616119682788849e-01 5.3004968166351318e-01 + <_> + + 0 -1 359 1.2415789999067783e-02 + + 4.0756919980049133e-01 5.8288121223449707e-01 + <_> + + 0 -1 360 -4.7174817882478237e-03 + + 2.8272539377212524e-01 5.2677577733993530e-01 + <_> + + 0 -1 361 3.8136858493089676e-02 + + 5.0747412443161011e-01 1.0236159712076187e-01 + <_> + + 0 -1 362 -2.8168049175292253e-03 + + 6.1690068244934082e-01 4.3596929311752319e-01 + <_> + + 0 -1 363 8.1303603947162628e-03 + + 4.5244330167770386e-01 7.6060950756072998e-01 + <_> + + 0 -1 364 6.0056019574403763e-03 + + 5.2404087781906128e-01 1.8597120046615601e-01 + <_> + + 0 -1 365 1.9139319658279419e-02 + + 5.2093791961669922e-01 2.3320719599723816e-01 + <_> + + 0 -1 366 1.6445759683847427e-02 + + 5.4507029056549072e-01 3.2642349600791931e-01 + <_> + + 0 -1 367 -3.7356890738010406e-02 + + 6.9990468025207520e-01 4.5332419872283936e-01 + <_> + + 0 -1 368 -1.9727900624275208e-02 + + 2.6536649465560913e-01 5.4128098487854004e-01 + <_> + + 0 -1 369 6.6972579807043076e-03 + + 4.4805660843849182e-01 7.1386522054672241e-01 + <_> + + 0 -1 370 7.4457528535276651e-04 + + 4.2313501238822937e-01 5.4713201522827148e-01 + <_> + + 0 -1 371 1.1790640419349074e-03 + + 5.3417021036148071e-01 3.1304550170898438e-01 + <_> + + 0 -1 372 3.4980610013008118e-02 + + 5.1186597347259521e-01 3.4305301308631897e-01 + <_> + + 0 -1 373 5.6859792675822973e-04 + + 3.5321870446205139e-01 5.4686397314071655e-01 + <_> + + 0 -1 374 -1.1340649798512459e-02 + + 2.8423538804054260e-01 5.3487008810043335e-01 + <_> + + 0 -1 375 -6.6228108480572701e-03 + + 6.8836402893066406e-01 4.4926649332046509e-01 + <_> + + 0 -1 376 -8.0160330981016159e-03 + + 1.7098939418792725e-01 5.2243089675903320e-01 + <_> + + 0 -1 377 1.4206819469109178e-03 + + 5.2908462285995483e-01 2.9933831095695496e-01 + <_> + + 0 -1 378 -2.7801711112260818e-03 + + 6.4988541603088379e-01 4.4604998826980591e-01 + <_> + + 0 -1 379 -1.4747589593753219e-03 + + 3.2604381442070007e-01 5.3881132602691650e-01 + <_> + + 0 -1 380 -2.3830339312553406e-02 + + 7.5289410352706909e-01 4.8012199997901917e-01 + <_> + + 0 -1 381 6.9369790144264698e-03 + + 5.3351658582687378e-01 3.2614278793334961e-01 + <_> + + 0 -1 382 8.2806255668401718e-03 + + 4.5803940296173096e-01 5.7378298044204712e-01 + <_> + + 0 -1 383 -1.0439500212669373e-02 + + 2.5923201441764832e-01 5.2338278293609619e-01 + <_> + 80 + 3.9107288360595703e+01 + + <_> + + 0 -1 384 7.2006587870419025e-03 + + 3.2588860392570496e-01 6.8498080968856812e-01 + <_> + + 0 -1 385 -2.8593589086085558e-03 + + 5.8388811349868774e-01 2.5378298759460449e-01 + <_> + + 0 -1 386 6.8580528022721410e-04 + + 5.7080817222595215e-01 2.8124240040779114e-01 + <_> + + 0 -1 387 7.9580191522836685e-03 + + 2.5010511279106140e-01 5.5442607402801514e-01 + <_> + + 0 -1 388 -1.2124150525778532e-03 + + 2.3853680491447449e-01 5.4333502054214478e-01 + <_> + + 0 -1 389 7.9426132142543793e-03 + + 3.9550709724426270e-01 6.2207579612731934e-01 + <_> + + 0 -1 390 2.4630590341985226e-03 + + 5.6397080421447754e-01 2.9923579096794128e-01 + <_> + + 0 -1 391 -6.0396599583327770e-03 + + 2.1865129470825195e-01 5.4116767644882202e-01 + <_> + + 0 -1 392 -1.2988339876756072e-03 + + 2.3507060110569000e-01 5.3645849227905273e-01 + <_> + + 0 -1 393 2.2299369447864592e-04 + + 3.8041129708290100e-01 5.7296061515808105e-01 + <_> + + 0 -1 394 1.4654280385002494e-03 + + 2.5101679563522339e-01 5.2582687139511108e-01 + <_> + + 0 -1 395 -8.1210042117163539e-04 + + 5.9928238391876221e-01 3.8511589169502258e-01 + <_> + + 0 -1 396 -1.3836020370945334e-03 + + 5.6813961267471313e-01 3.6365869641304016e-01 + <_> + + 0 -1 397 -2.7936449274420738e-02 + + 1.4913170039653778e-01 5.3775602579116821e-01 + <_> + + 0 -1 398 -4.6919551095925272e-04 + + 3.6924299597740173e-01 5.5724847316741943e-01 + <_> + + 0 -1 399 -4.9829659983515739e-03 + + 6.7585092782974243e-01 4.5325040817260742e-01 + <_> + + 0 -1 400 1.8815309740602970e-03 + + 5.3680229187011719e-01 2.9325398802757263e-01 + <_> + + 0 -1 401 -1.9067550078034401e-02 + + 1.6493770480155945e-01 5.3300672769546509e-01 + <_> + + 0 -1 402 -4.6906559728085995e-03 + + 1.9639259576797485e-01 5.1193618774414062e-01 + <_> + + 0 -1 403 5.9777139686048031e-03 + + 4.6711719036102295e-01 7.0083981752395630e-01 + <_> + + 0 -1 404 -3.3303130418062210e-02 + + 1.1554169654846191e-01 5.1041620969772339e-01 + <_> + + 0 -1 405 9.0744107961654663e-02 + + 5.1496601104736328e-01 1.3061730563640594e-01 + <_> + + 0 -1 406 9.3555898638442159e-04 + + 3.6054810881614685e-01 5.4398590326309204e-01 + <_> + + 0 -1 407 1.4901650138199329e-02 + + 4.8862120509147644e-01 7.6875698566436768e-01 + <_> + + 0 -1 408 6.1594118596985936e-04 + + 5.3568130731582642e-01 3.2409390807151794e-01 + <_> + + 0 -1 409 -5.0670988857746124e-02 + + 1.8486219644546509e-01 5.2304041385650635e-01 + <_> + + 0 -1 410 6.8665749859064817e-04 + + 3.8405799865722656e-01 5.5179458856582642e-01 + <_> + + 0 -1 411 8.3712432533502579e-03 + + 4.2885640263557434e-01 6.1317539215087891e-01 + <_> + + 0 -1 412 -1.2953069526702166e-03 + + 2.9136741161346436e-01 5.2807378768920898e-01 + <_> + + 0 -1 413 -4.1941680014133453e-02 + + 7.5547999143600464e-01 4.8560309410095215e-01 + <_> + + 0 -1 414 -2.3529380559921265e-02 + + 2.8382799029350281e-01 5.2560812234878540e-01 + <_> + + 0 -1 415 4.0857449173927307e-02 + + 4.8709350824356079e-01 6.2772971391677856e-01 + <_> + + 0 -1 416 -2.5406869128346443e-02 + + 7.0997077226638794e-01 4.5750290155410767e-01 + <_> + + 0 -1 417 -4.1415440500713885e-04 + + 4.0308868885040283e-01 5.4694122076034546e-01 + <_> + + 0 -1 418 2.1824119612574577e-02 + + 4.5020240545272827e-01 6.7687010765075684e-01 + <_> + + 0 -1 419 1.4114039950072765e-02 + + 5.4428607225418091e-01 3.7917000055313110e-01 + <_> + + 0 -1 420 6.7214590671937913e-05 + + 4.2004638910293579e-01 5.8734762668609619e-01 + <_> + + 0 -1 421 -7.9417638480663300e-03 + + 3.7925618886947632e-01 5.5852657556533813e-01 + <_> + + 0 -1 422 -7.2144409641623497e-03 + + 7.2531038522720337e-01 4.6035489439964294e-01 + <_> + + 0 -1 423 2.5817339774221182e-03 + + 4.6933019161224365e-01 5.9002387523651123e-01 + <_> + + 0 -1 424 1.3409319519996643e-01 + + 5.1492130756378174e-01 1.8088449537754059e-01 + <_> + + 0 -1 425 2.2962710354477167e-03 + + 5.3997439146041870e-01 3.7178671360015869e-01 + <_> + + 0 -1 426 -2.1575849968940020e-03 + + 2.4084959924221039e-01 5.1488637924194336e-01 + <_> + + 0 -1 427 -4.9196188338100910e-03 + + 6.5735882520675659e-01 4.7387400269508362e-01 + <_> + + 0 -1 428 1.6267469618469477e-03 + + 4.1928219795227051e-01 6.3031142950057983e-01 + <_> + + 0 -1 429 3.3413388882763684e-04 + + 5.5402982234954834e-01 3.7021011114120483e-01 + <_> + + 0 -1 430 -2.6698080822825432e-02 + + 1.7109179496765137e-01 5.1014107465744019e-01 + <_> + + 0 -1 431 -3.0561879277229309e-02 + + 1.9042180478572845e-01 5.1687937974929810e-01 + <_> + + 0 -1 432 2.8511548880487680e-03 + + 4.4475069642066956e-01 6.3138538599014282e-01 + <_> + + 0 -1 433 -3.6211479455232620e-02 + + 2.4907270073890686e-01 5.3773492574691772e-01 + <_> + + 0 -1 434 -2.4115189444273710e-03 + + 5.3812432289123535e-01 3.6642369627952576e-01 + <_> + + 0 -1 435 -7.7253201743587852e-04 + + 5.5302321910858154e-01 3.5415500402450562e-01 + <_> + + 0 -1 436 2.9481729143299162e-04 + + 4.1326990723609924e-01 5.6672430038452148e-01 + <_> + + 0 -1 437 -6.2334560789167881e-03 + + 9.8787233233451843e-02 5.1986688375473022e-01 + <_> + + 0 -1 438 -2.6274729520082474e-02 + + 9.1127492487430573e-02 5.0281071662902832e-01 + <_> + + 0 -1 439 5.3212260827422142e-03 + + 4.7266489267349243e-01 6.2227207422256470e-01 + <_> + + 0 -1 440 -4.1129058226943016e-03 + + 2.1574570238590240e-01 5.1378047466278076e-01 + <_> + + 0 -1 441 3.2457809429615736e-03 + + 5.4107707738876343e-01 3.7217769026756287e-01 + <_> + + 0 -1 442 -1.6359709203243256e-02 + + 7.7878749370574951e-01 4.6852919459342957e-01 + <_> + + 0 -1 443 3.2166109303943813e-04 + + 5.4789870977401733e-01 4.2403739690780640e-01 + <_> + + 0 -1 444 6.4452440710738301e-04 + + 5.3305608034133911e-01 3.5013249516487122e-01 + <_> + + 0 -1 445 -7.8909732401371002e-03 + + 6.9235211610794067e-01 4.7265690565109253e-01 + <_> + + 0 -1 446 4.8336211591959000e-02 + + 5.0559002161026001e-01 7.5749203562736511e-02 + <_> + + 0 -1 447 -7.5178127735853195e-04 + + 3.7837418913841248e-01 5.5385738611221313e-01 + <_> + + 0 -1 448 -2.4953910615295172e-03 + + 3.0816510319709778e-01 5.3596121072769165e-01 + <_> + + 0 -1 449 -2.2385010961443186e-03 + + 6.6339588165283203e-01 4.6493428945541382e-01 + <_> + + 0 -1 450 -1.7988430336117744e-03 + + 6.5968447923660278e-01 4.3471878767013550e-01 + <_> + + 0 -1 451 8.7860915809869766e-03 + + 5.2318328619003296e-01 2.3155799508094788e-01 + <_> + + 0 -1 452 3.6715380847454071e-03 + + 5.2042502164840698e-01 2.9773768782615662e-01 + <_> + + 0 -1 453 -3.5336449742317200e-02 + + 7.2388780117034912e-01 4.8615050315856934e-01 + <_> + + 0 -1 454 -6.9189240457490087e-04 + + 3.1050220131874084e-01 5.2298247814178467e-01 + <_> + + 0 -1 455 -3.3946109469980001e-03 + + 3.1389680504798889e-01 5.2101737260818481e-01 + <_> + + 0 -1 456 9.8569283727556467e-04 + + 4.5365801453590393e-01 6.5850979089736938e-01 + <_> + + 0 -1 457 -5.0163101404905319e-02 + + 1.8044540286064148e-01 5.1989167928695679e-01 + <_> + + 0 -1 458 -2.2367259953171015e-03 + + 7.2557020187377930e-01 4.6513590216636658e-01 + <_> + + 0 -1 459 7.4326287722215056e-04 + + 4.4129210710525513e-01 5.8985459804534912e-01 + <_> + + 0 -1 460 -9.3485182151198387e-04 + + 3.5000529885292053e-01 5.3660178184509277e-01 + <_> + + 0 -1 461 1.7497939988970757e-02 + + 4.9121949076652527e-01 8.3152848482131958e-01 + <_> + + 0 -1 462 -1.5200000489130616e-03 + + 3.5702759027481079e-01 5.3705602884292603e-01 + <_> + + 0 -1 463 7.8003940870985389e-04 + + 4.3537721037864685e-01 5.9673351049423218e-01 + <_> + 103 + 5.0610481262207031e+01 + + <_> + + 0 -1 464 -9.9945552647113800e-03 + + 6.1625832319259644e-01 3.0545330047607422e-01 + <_> + + 0 -1 465 -1.1085229925811291e-03 + + 5.8182948827743530e-01 3.1555780768394470e-01 + <_> + + 0 -1 466 1.0364380432292819e-03 + + 2.5520521402359009e-01 5.6929117441177368e-01 + <_> + + 0 -1 467 6.8211311008781195e-04 + + 3.6850899457931519e-01 5.9349310398101807e-01 + <_> + + 0 -1 468 -6.8057340104132891e-04 + + 2.3323920369148254e-01 5.4747921228408813e-01 + <_> + + 0 -1 469 2.6068789884448051e-04 + + 3.2574570178985596e-01 5.6675457954406738e-01 + <_> + + 0 -1 470 5.1607372006401420e-04 + + 3.7447169423103333e-01 5.8454728126525879e-01 + <_> + + 0 -1 471 8.5007521556690335e-04 + + 3.4203711152076721e-01 5.5228072404861450e-01 + <_> + + 0 -1 472 -1.8607829697430134e-03 + + 2.8044199943542480e-01 5.3754240274429321e-01 + <_> + + 0 -1 473 -1.5033970121294260e-03 + + 2.5790509581565857e-01 5.4989522695541382e-01 + <_> + + 0 -1 474 2.3478909861296415e-03 + + 4.1751560568809509e-01 6.3137108087539673e-01 + <_> + + 0 -1 475 -2.8880240279249847e-04 + + 5.8651697635650635e-01 4.0526661276817322e-01 + <_> + + 0 -1 476 8.9405477046966553e-03 + + 5.2111411094665527e-01 2.3186540603637695e-01 + <_> + + 0 -1 477 -1.9327739253640175e-02 + + 2.7534329891204834e-01 5.2415257692337036e-01 + <_> + + 0 -1 478 -2.0202060113660991e-04 + + 5.7229787111282349e-01 3.6771959066390991e-01 + <_> + + 0 -1 479 2.1179069299250841e-03 + + 4.4661080837249756e-01 5.5424308776855469e-01 + <_> + + 0 -1 480 -1.7743760254234076e-03 + + 2.8132531046867371e-01 5.3009599447250366e-01 + <_> + + 0 -1 481 4.2234458960592747e-03 + + 4.3997099995613098e-01 5.7954281568527222e-01 + <_> + + 0 -1 482 -1.4375220052897930e-02 + + 2.9811179637908936e-01 5.2920591831207275e-01 + <_> + + 0 -1 483 -1.5349180437624454e-02 + + 7.7052152156829834e-01 4.7481718659400940e-01 + <_> + + 0 -1 484 1.5152279956964776e-05 + + 3.7188440561294556e-01 5.5768972635269165e-01 + <_> + + 0 -1 485 -9.1293919831514359e-03 + + 3.6151960492134094e-01 5.2867668867111206e-01 + <_> + + 0 -1 486 2.2512159775942564e-03 + + 5.3647047281265259e-01 3.4862980246543884e-01 + <_> + + 0 -1 487 -4.9696918576955795e-03 + + 6.9276517629623413e-01 4.6768361330032349e-01 + <_> + + 0 -1 488 -1.2829010374844074e-02 + + 7.7121537923812866e-01 4.6607351303100586e-01 + <_> + + 0 -1 489 -9.3660065904259682e-03 + + 3.3749839663505554e-01 5.3512877225875854e-01 + <_> + + 0 -1 490 3.2452319283038378e-03 + + 5.3251898288726807e-01 3.2896101474761963e-01 + <_> + + 0 -1 491 -1.1723560281097889e-02 + + 6.8376529216766357e-01 4.7543001174926758e-01 + <_> + + 0 -1 492 2.9257940695970319e-05 + + 3.5720878839492798e-01 5.3605020046234131e-01 + <_> + + 0 -1 493 -2.2244219508138485e-05 + + 5.5414271354675293e-01 3.5520640015602112e-01 + <_> + + 0 -1 494 5.0881509669125080e-03 + + 5.0708442926406860e-01 1.2564620375633240e-01 + <_> + + 0 -1 495 2.7429679408669472e-02 + + 5.2695602178573608e-01 1.6258180141448975e-01 + <_> + + 0 -1 496 -6.4142867922782898e-03 + + 7.1455889940261841e-01 4.5841971039772034e-01 + <_> + + 0 -1 497 3.3479959238320589e-03 + + 5.3986120223999023e-01 3.4946969151496887e-01 + <_> + + 0 -1 498 -8.2635492086410522e-02 + + 2.4391929805278778e-01 5.1602262258529663e-01 + <_> + + 0 -1 499 1.0261740535497665e-03 + + 3.8868919014930725e-01 5.7679080963134766e-01 + <_> + + 0 -1 500 -1.6307090409100056e-03 + + 3.3894580602645874e-01 5.3477007150650024e-01 + <_> + + 0 -1 501 2.4546680506318808e-03 + + 4.6014139056205750e-01 6.3872468471527100e-01 + <_> + + 0 -1 502 -9.9476519972085953e-04 + + 5.7698792219161987e-01 4.1203960776329041e-01 + <_> + + 0 -1 503 1.5409190207719803e-02 + + 4.8787090182304382e-01 7.0898222923278809e-01 + <_> + + 0 -1 504 1.1784400558099151e-03 + + 5.2635532617568970e-01 2.8952449560165405e-01 + <_> + + 0 -1 505 -2.7701919898390770e-02 + + 1.4988289773464203e-01 5.2196067571640015e-01 + <_> + + 0 -1 506 -2.9505399987101555e-02 + + 2.4893319234251976e-02 4.9998161196708679e-01 + <_> + + 0 -1 507 4.5159430010244250e-04 + + 5.4646229743957520e-01 4.0296629071235657e-01 + <_> + + 0 -1 508 7.1772639639675617e-03 + + 4.2710569500923157e-01 5.8662968873977661e-01 + <_> + + 0 -1 509 -7.4182048439979553e-02 + + 6.8741792440414429e-01 4.9190279841423035e-01 + <_> + + 0 -1 510 -1.7254160717129707e-02 + + 3.3706760406494141e-01 5.3487390279769897e-01 + <_> + + 0 -1 511 1.4851559884846210e-02 + + 4.6267929673194885e-01 6.1299049854278564e-01 + <_> + + 0 -1 512 1.0002000257372856e-02 + + 5.3461229801177979e-01 3.4234538674354553e-01 + <_> + + 0 -1 513 2.0138120744377375e-03 + + 4.6438300609588623e-01 5.8243042230606079e-01 + <_> + + 0 -1 514 1.5135470312088728e-03 + + 5.1963961124420166e-01 2.8561499714851379e-01 + <_> + + 0 -1 515 3.1381431035697460e-03 + + 4.8381629586219788e-01 5.9585297107696533e-01 + <_> + + 0 -1 516 -5.1450440660119057e-03 + + 8.9203029870986938e-01 4.7414121031761169e-01 + <_> + + 0 -1 517 -4.4736708514392376e-03 + + 2.0339429378509521e-01 5.3372788429260254e-01 + <_> + + 0 -1 518 1.9628470763564110e-03 + + 4.5716339349746704e-01 6.7258632183074951e-01 + <_> + + 0 -1 519 5.4260450415313244e-03 + + 5.2711081504821777e-01 2.8456708788871765e-01 + <_> + + 0 -1 520 4.9611460417509079e-04 + + 4.1383129358291626e-01 5.7185977697372437e-01 + <_> + + 0 -1 521 9.3728788197040558e-03 + + 5.2251511812210083e-01 2.8048470616340637e-01 + <_> + + 0 -1 522 6.0500897234305739e-04 + + 5.2367687225341797e-01 3.3145239949226379e-01 + <_> + + 0 -1 523 5.6792551185935736e-04 + + 4.5310598611831665e-01 6.2769711017608643e-01 + <_> + + 0 -1 524 2.4644339457154274e-02 + + 5.1308518648147583e-01 2.0171439647674561e-01 + <_> + + 0 -1 525 -1.0290450416505337e-02 + + 7.7865952253341675e-01 4.8766410350799561e-01 + <_> + + 0 -1 526 2.0629419013857841e-03 + + 4.2885988950729370e-01 5.8812642097473145e-01 + <_> + + 0 -1 527 -5.0519481301307678e-03 + + 3.5239779949188232e-01 5.2860087156295776e-01 + <_> + + 0 -1 528 -5.7692620903253555e-03 + + 6.8410861492156982e-01 4.5880940556526184e-01 + <_> + + 0 -1 529 -4.5789941214025021e-04 + + 3.5655200481414795e-01 5.4859781265258789e-01 + <_> + + 0 -1 530 -7.5918837683275342e-04 + + 3.3687931299209595e-01 5.2541971206665039e-01 + <_> + + 0 -1 531 -1.7737259622663260e-03 + + 3.4221610426902771e-01 5.4540151357650757e-01 + <_> + + 0 -1 532 -8.5610467940568924e-03 + + 6.5336120128631592e-01 4.4858568906784058e-01 + <_> + + 0 -1 533 1.7277270089834929e-03 + + 5.3075802326202393e-01 3.9253529906272888e-01 + <_> + + 0 -1 534 -2.8199609369039536e-02 + + 6.8574589490890503e-01 4.5885840058326721e-01 + <_> + + 0 -1 535 -1.7781109781935811e-03 + + 4.0378510951995850e-01 5.3698569536209106e-01 + <_> + + 0 -1 536 3.3177141449414194e-04 + + 5.3997987508773804e-01 3.7057501077651978e-01 + <_> + + 0 -1 537 2.6385399978607893e-03 + + 4.6654370427131653e-01 6.4527308940887451e-01 + <_> + + 0 -1 538 -2.1183069329708815e-03 + + 5.9147810935974121e-01 4.0646770596504211e-01 + <_> + + 0 -1 539 -1.4773289673030376e-02 + + 3.6420381069183350e-01 5.2947628498077393e-01 + <_> + + 0 -1 540 -1.6815440729260445e-02 + + 2.6642319560050964e-01 5.1449728012084961e-01 + <_> + + 0 -1 541 -6.3370140269398689e-03 + + 6.7795312404632568e-01 4.8520979285240173e-01 + <_> + + 0 -1 542 -4.4560048991115764e-05 + + 5.6139647960662842e-01 4.1530540585517883e-01 + <_> + + 0 -1 543 -1.0240620467811823e-03 + + 5.9644782543182373e-01 4.5663040876388550e-01 + <_> + + 0 -1 544 -2.3161689750850201e-03 + + 2.9761150479316711e-01 5.1881599426269531e-01 + <_> + + 0 -1 545 5.3217571973800659e-01 + + 5.1878392696380615e-01 2.2026319801807404e-01 + <_> + + 0 -1 546 -1.6643050312995911e-01 + + 1.8660229444503784e-01 5.0603431463241577e-01 + <_> + + 0 -1 547 1.1253529787063599e-01 + + 5.2121251821517944e-01 1.1850229650735855e-01 + <_> + + 0 -1 548 9.3046864494681358e-03 + + 4.5899370312690735e-01 6.8261492252349854e-01 + <_> + + 0 -1 549 -4.6255099587142467e-03 + + 3.0799409747123718e-01 5.2250087261199951e-01 + <_> + + 0 -1 550 -1.1116469651460648e-01 + + 2.1010440587997437e-01 5.0808018445968628e-01 + <_> + + 0 -1 551 -1.0888439603149891e-02 + + 5.7653552293777466e-01 4.7904640436172485e-01 + <_> + + 0 -1 552 5.8564301580190659e-03 + + 5.0651001930236816e-01 1.5635989606380463e-01 + <_> + + 0 -1 553 5.4854389280080795e-02 + + 4.9669149518013000e-01 7.2305107116699219e-01 + <_> + + 0 -1 554 -1.1197339743375778e-02 + + 2.1949790418148041e-01 5.0987982749938965e-01 + <_> + + 0 -1 555 4.4069071300327778e-03 + + 4.7784018516540527e-01 6.7709028720855713e-01 + <_> + + 0 -1 556 -6.3665293157100677e-02 + + 1.9363629817962646e-01 5.0810241699218750e-01 + <_> + + 0 -1 557 -9.8081491887569427e-03 + + 5.9990632534027100e-01 4.8103410005569458e-01 + <_> + + 0 -1 558 -2.1717099007219076e-03 + + 3.3383339643478394e-01 5.2354729175567627e-01 + <_> + + 0 -1 559 -1.3315520249307156e-02 + + 6.6170698404312134e-01 4.9192130565643311e-01 + <_> + + 0 -1 560 2.5442079640924931e-03 + + 4.4887441396713257e-01 6.0821849107742310e-01 + <_> + + 0 -1 561 1.2037839740514755e-02 + + 5.4093921184539795e-01 3.2924321293830872e-01 + <_> + + 0 -1 562 -2.0701050758361816e-02 + + 6.8191200494766235e-01 4.5949959754943848e-01 + <_> + + 0 -1 563 2.7608279138803482e-02 + + 4.6307921409606934e-01 5.7672828435897827e-01 + <_> + + 0 -1 564 1.2370620388537645e-03 + + 5.1653790473937988e-01 2.6350161433219910e-01 + <_> + + 0 -1 565 -3.7669338285923004e-02 + + 2.5363931059837341e-01 5.2789801359176636e-01 + <_> + + 0 -1 566 -1.8057259730994701e-03 + + 3.9851561188697815e-01 5.5175000429153442e-01 + <_> + 111 + 5.4620071411132812e+01 + + <_> + + 0 -1 567 4.4299028813838959e-03 + + 2.8910180926322937e-01 6.3352262973785400e-01 + <_> + + 0 -1 568 -2.3813319858163595e-03 + + 6.2117892503738403e-01 3.4774878621101379e-01 + <_> + + 0 -1 569 2.2915711160749197e-03 + + 2.2544120252132416e-01 5.5821180343627930e-01 + <_> + + 0 -1 570 9.9457940086722374e-04 + + 3.7117108702659607e-01 5.9300708770751953e-01 + <_> + + 0 -1 571 7.7164667891338468e-04 + + 5.6517201662063599e-01 3.3479958772659302e-01 + <_> + + 0 -1 572 -1.1386410333216190e-03 + + 3.0691260099411011e-01 5.5086308717727661e-01 + <_> + + 0 -1 573 -1.6403039626311511e-04 + + 5.7628279924392700e-01 3.6990478634834290e-01 + <_> + + 0 -1 574 2.9793529392918572e-05 + + 2.6442441344261169e-01 5.4379111528396606e-01 + <_> + + 0 -1 575 8.5774902254343033e-03 + + 5.0511389970779419e-01 1.7957249283790588e-01 + <_> + + 0 -1 576 -2.6032689493149519e-04 + + 5.8269691467285156e-01 4.4468268752098083e-01 + <_> + + 0 -1 577 -6.1404630541801453e-03 + + 3.1138521432876587e-01 5.3469717502593994e-01 + <_> + + 0 -1 578 -2.3086950182914734e-02 + + 3.2779461145401001e-01 5.3311979770660400e-01 + <_> + + 0 -1 579 -1.4243650250136852e-02 + + 7.3817098140716553e-01 4.5880630612373352e-01 + <_> + + 0 -1 580 1.9487129524350166e-02 + + 5.2566307783126831e-01 2.2744719684123993e-01 + <_> + + 0 -1 581 -9.6681108698248863e-04 + + 5.5112308263778687e-01 3.8150069117546082e-01 + <_> + + 0 -1 582 3.1474709976464510e-03 + + 5.4256367683410645e-01 2.5437268614768982e-01 + <_> + + 0 -1 583 -1.8026070029009134e-04 + + 5.3801918029785156e-01 3.4063041210174561e-01 + <_> + + 0 -1 584 -6.0266260989010334e-03 + + 3.0358019471168518e-01 5.4205721616744995e-01 + <_> + + 0 -1 585 4.4462960795499384e-04 + + 3.9909970760345459e-01 5.6601101160049438e-01 + <_> + + 0 -1 586 2.2609760053455830e-03 + + 5.5628067255020142e-01 3.9406880736351013e-01 + <_> + + 0 -1 587 5.1133058965206146e-02 + + 4.6096539497375488e-01 7.1185618638992310e-01 + <_> + + 0 -1 588 -1.7786309123039246e-02 + + 2.3161660134792328e-01 5.3221440315246582e-01 + <_> + + 0 -1 589 -4.9679628573358059e-03 + + 2.3307719826698303e-01 5.1220291852951050e-01 + <_> + + 0 -1 590 2.0667689386755228e-03 + + 4.6574440598487854e-01 6.4554882049560547e-01 + <_> + + 0 -1 591 7.4413768015801907e-03 + + 5.1543921232223511e-01 2.3616339266300201e-01 + <_> + + 0 -1 592 -3.6277279723435640e-03 + + 6.2197732925415039e-01 4.4766610860824585e-01 + <_> + + 0 -1 593 -5.3530759178102016e-03 + + 1.8373550474643707e-01 5.1022082567214966e-01 + <_> + + 0 -1 594 1.4530919492244720e-01 + + 5.1459872722625732e-01 1.5359309315681458e-01 + <_> + + 0 -1 595 2.4394490756094456e-03 + + 5.3436601161956787e-01 3.6246618628501892e-01 + <_> + + 0 -1 596 -3.1283390708267689e-03 + + 6.2150079011917114e-01 4.8455920815467834e-01 + <_> + + 0 -1 597 1.7940260004252195e-03 + + 4.2992618680000305e-01 5.8241981267929077e-01 + <_> + + 0 -1 598 3.6253821104764938e-02 + + 5.2603340148925781e-01 1.4394679665565491e-01 + <_> + + 0 -1 599 -5.1746722310781479e-03 + + 3.5065388679504395e-01 5.2870452404022217e-01 + <_> + + 0 -1 600 6.5383297624066472e-04 + + 4.8096409440040588e-01 6.1220401525497437e-01 + <_> + + 0 -1 601 -2.6480229571461678e-02 + + 1.1393620073795319e-01 5.0455862283706665e-01 + <_> + + 0 -1 602 -3.0440660193562508e-03 + + 6.3520950078964233e-01 4.7947341203689575e-01 + <_> + + 0 -1 603 3.6993520334362984e-03 + + 5.1311182975769043e-01 2.4985109269618988e-01 + <_> + + 0 -1 604 -3.6762931267730892e-04 + + 5.4213947057723999e-01 3.7095320224761963e-01 + <_> + + 0 -1 605 -4.1382260620594025e-02 + + 1.8949599564075470e-01 5.0816917419433594e-01 + <_> + + 0 -1 606 -1.0532729793339968e-03 + + 6.4543670415878296e-01 4.7836089134216309e-01 + <_> + + 0 -1 607 -2.1648600231856108e-03 + + 6.2150311470031738e-01 4.4998261332511902e-01 + <_> + + 0 -1 608 -5.6747748749330640e-04 + + 3.7126109004020691e-01 5.4193347692489624e-01 + <_> + + 0 -1 609 1.7375840246677399e-01 + + 5.0236439704895020e-01 1.2157420068979263e-01 + <_> + + 0 -1 610 -2.9049699660390615e-03 + + 3.2402679324150085e-01 5.3818839788436890e-01 + <_> + + 0 -1 611 1.2299539521336555e-03 + + 4.1655078530311584e-01 5.7034862041473389e-01 + <_> + + 0 -1 612 -5.4329237900674343e-04 + + 3.8540428876876831e-01 5.5475491285324097e-01 + <_> + + 0 -1 613 -8.3297258242964745e-03 + + 2.2044940292835236e-01 5.0970828533172607e-01 + <_> + + 0 -1 614 -1.0417630255687982e-04 + + 5.6070661544799805e-01 4.3030360341072083e-01 + <_> + + 0 -1 615 3.1204700469970703e-02 + + 4.6216571331024170e-01 6.9820040464401245e-01 + <_> + + 0 -1 616 7.8943502157926559e-03 + + 5.2695941925048828e-01 2.2690680623054504e-01 + <_> + + 0 -1 617 -4.3645310215651989e-03 + + 6.3592231273651123e-01 4.5379561185836792e-01 + <_> + + 0 -1 618 7.6793059706687927e-03 + + 5.2747678756713867e-01 2.7404838800430298e-01 + <_> + + 0 -1 619 -2.5431139394640923e-02 + + 2.0385199785232544e-01 5.0717329978942871e-01 + <_> + + 0 -1 620 8.2000601105391979e-04 + + 4.5874550938606262e-01 6.1198681592941284e-01 + <_> + + 0 -1 621 2.9284600168466568e-03 + + 5.0712740421295166e-01 2.0282049477100372e-01 + <_> + + 0 -1 622 4.5256470912136137e-05 + + 4.8121041059494019e-01 5.4308217763900757e-01 + <_> + + 0 -1 623 1.3158309739083052e-03 + + 4.6258139610290527e-01 6.7793232202529907e-01 + <_> + + 0 -1 624 1.5870389761403203e-03 + + 5.3862917423248291e-01 3.4314650297164917e-01 + <_> + + 0 -1 625 -2.1539660170674324e-02 + + 2.5942500680685043e-02 5.0032228231430054e-01 + <_> + + 0 -1 626 1.4334480278193951e-02 + + 5.2028447389602661e-01 1.5906329452991486e-01 + <_> + + 0 -1 627 -8.3881383761763573e-03 + + 7.2824811935424805e-01 4.6480441093444824e-01 + <_> + + 0 -1 628 9.1906841844320297e-03 + + 5.5623567104339600e-01 3.9231911301612854e-01 + <_> + + 0 -1 629 -5.8453059755265713e-03 + + 6.8033927679061890e-01 4.6291279792785645e-01 + <_> + + 0 -1 630 -5.4707799106836319e-02 + + 2.5616711378097534e-01 5.2061259746551514e-01 + <_> + + 0 -1 631 9.1142775490880013e-03 + + 5.1896202564239502e-01 3.0538770556449890e-01 + <_> + + 0 -1 632 -1.5575000084936619e-02 + + 1.2950749695301056e-01 5.1690948009490967e-01 + <_> + + 0 -1 633 -1.2050600344082341e-04 + + 5.7350981235504150e-01 4.2308250069618225e-01 + <_> + + 0 -1 634 1.2273970060050488e-03 + + 5.2898782491683960e-01 4.0797919034957886e-01 + <_> + + 0 -1 635 -1.2186600361019373e-03 + + 6.5756398439407349e-01 4.5744091272354126e-01 + <_> + + 0 -1 636 -3.3256649039685726e-03 + + 3.6280471086502075e-01 5.1950198411941528e-01 + <_> + + 0 -1 637 -1.3288309797644615e-02 + + 1.2842659652233124e-01 5.0434887409210205e-01 + <_> + + 0 -1 638 -3.3839771058410406e-03 + + 6.2922400236129761e-01 4.7575059533119202e-01 + <_> + + 0 -1 639 -2.1954220533370972e-01 + + 1.4877319335937500e-01 5.0650137662887573e-01 + <_> + + 0 -1 640 4.9111708067357540e-03 + + 4.2561021447181702e-01 5.6658387184143066e-01 + <_> + + 0 -1 641 -1.8744950648397207e-04 + + 4.0041440725326538e-01 5.5868571996688843e-01 + <_> + + 0 -1 642 -5.2178641781210899e-03 + + 6.0091161727905273e-01 4.8127061128616333e-01 + <_> + + 0 -1 643 -1.1111519997939467e-03 + + 3.5149338841438293e-01 5.2870899438858032e-01 + <_> + + 0 -1 644 4.4036400504410267e-03 + + 4.6422758698463440e-01 5.9240859746932983e-01 + <_> + + 0 -1 645 1.2299499660730362e-01 + + 5.0255292654037476e-01 6.9152481853961945e-02 + <_> + + 0 -1 646 -1.2313510291278362e-02 + + 5.8845919370651245e-01 4.9340128898620605e-01 + <_> + + 0 -1 647 4.1471039876341820e-03 + + 4.3722391128540039e-01 5.8934777975082397e-01 + <_> + + 0 -1 648 -3.5502649843692780e-03 + + 4.3275511264801025e-01 5.3962701559066772e-01 + <_> + + 0 -1 649 -1.9224269315600395e-02 + + 1.9131340086460114e-01 5.0683307647705078e-01 + <_> + + 0 -1 650 1.4395059552043676e-03 + + 5.3081780672073364e-01 4.2435330152511597e-01 + <_> + + 0 -1 651 -6.7751999013125896e-03 + + 6.3653957843780518e-01 4.5400860905647278e-01 + <_> + + 0 -1 652 7.0119630545377731e-03 + + 5.1898342370986938e-01 3.0261999368667603e-01 + <_> + + 0 -1 653 5.4014651104807854e-03 + + 5.1050621271133423e-01 2.5576829910278320e-01 + <_> + + 0 -1 654 9.0274988906458020e-04 + + 4.6969148516654968e-01 5.8618277311325073e-01 + <_> + + 0 -1 655 1.1474450118839741e-02 + + 5.0536459684371948e-01 1.5271779894828796e-01 + <_> + + 0 -1 656 -6.7023430019617081e-03 + + 6.5089809894561768e-01 4.8906040191650391e-01 + <_> + + 0 -1 657 -2.0462959073483944e-03 + + 6.2418168783187866e-01 4.5146000385284424e-01 + <_> + + 0 -1 658 -9.9951568990945816e-03 + + 3.4327811002731323e-01 5.4009538888931274e-01 + <_> + + 0 -1 659 -3.5700708627700806e-02 + + 1.8780590593814850e-01 5.0740778446197510e-01 + <_> + + 0 -1 660 4.5584561303257942e-04 + + 3.8052770495414734e-01 5.4025697708129883e-01 + <_> + + 0 -1 661 -5.4260600358247757e-02 + + 6.8437147140502930e-01 4.5950970053672791e-01 + <_> + + 0 -1 662 6.0600461438298225e-03 + + 5.5029052495956421e-01 4.5005279779434204e-01 + <_> + + 0 -1 663 -6.4791832119226456e-03 + + 3.3688580989837646e-01 5.3107571601867676e-01 + <_> + + 0 -1 664 -1.4939469983801246e-03 + + 6.4876401424407959e-01 4.7561758756637573e-01 + <_> + + 0 -1 665 1.4610530342906713e-05 + + 4.0345790982246399e-01 5.4510641098022461e-01 + <_> + + 0 -1 666 -7.2321938350796700e-03 + + 6.3868737220764160e-01 4.8247399926185608e-01 + <_> + + 0 -1 667 -4.0645818226039410e-03 + + 2.9864218831062317e-01 5.1573359966278076e-01 + <_> + + 0 -1 668 3.0463080853223801e-02 + + 5.0221997499465942e-01 7.1599560976028442e-01 + <_> + + 0 -1 669 -8.0544911324977875e-03 + + 6.4924520254135132e-01 4.6192750334739685e-01 + <_> + + 0 -1 670 3.9505138993263245e-02 + + 5.1505708694458008e-01 2.4506139755249023e-01 + <_> + + 0 -1 671 8.4530208259820938e-03 + + 4.5736691355705261e-01 6.3940370082855225e-01 + <_> + + 0 -1 672 -1.1688120430335402e-03 + + 3.8655120134353638e-01 5.4836612939834595e-01 + <_> + + 0 -1 673 2.8070670086890459e-03 + + 5.1285791397094727e-01 2.7014800906181335e-01 + <_> + + 0 -1 674 4.7365209320560098e-04 + + 4.0515819191932678e-01 5.3874611854553223e-01 + <_> + + 0 -1 675 1.1741080321371555e-02 + + 5.2959501743316650e-01 3.7194138765335083e-01 + <_> + + 0 -1 676 3.1833238899707794e-03 + + 4.7894069552421570e-01 6.8951261043548584e-01 + <_> + + 0 -1 677 7.0241501089185476e-04 + + 5.3844892978668213e-01 3.9180809259414673e-01 + <_> + 102 + 5.0169731140136719e+01 + + <_> + + 0 -1 678 1.7059929668903351e-02 + + 3.9485278725624084e-01 7.1425348520278931e-01 + <_> + + 0 -1 679 2.1840840578079224e-02 + + 3.3703160285949707e-01 6.0900169610977173e-01 + <_> + + 0 -1 680 2.4520049919374287e-04 + + 3.5005760192871094e-01 5.9879022836685181e-01 + <_> + + 0 -1 681 8.3272606134414673e-03 + + 3.2675281167030334e-01 5.6972408294677734e-01 + <_> + + 0 -1 682 5.7148298947140574e-04 + + 3.0445998907089233e-01 5.5316567420959473e-01 + <_> + + 0 -1 683 6.7373987985774875e-04 + + 3.6500120162963867e-01 5.6726312637329102e-01 + <_> + + 0 -1 684 3.4681590477703139e-05 + + 3.3135411143302917e-01 5.3887271881103516e-01 + <_> + + 0 -1 685 -5.8563398197293282e-03 + + 2.6979428529739380e-01 5.4987788200378418e-01 + <_> + + 0 -1 686 8.5102273151278496e-03 + + 5.2693581581115723e-01 2.7628791332244873e-01 + <_> + + 0 -1 687 -6.9817207753658295e-02 + + 2.9096031188964844e-01 5.2592468261718750e-01 + <_> + + 0 -1 688 -8.6113670840859413e-04 + + 5.8925771713256836e-01 4.0736979246139526e-01 + <_> + + 0 -1 689 9.7149249631911516e-04 + + 3.5235640406608582e-01 5.4158622026443481e-01 + <_> + + 0 -1 690 -1.4727490452060010e-05 + + 5.4230177402496338e-01 3.5031560063362122e-01 + <_> + + 0 -1 691 4.8420291393995285e-02 + + 5.1939457654953003e-01 3.4111958742141724e-01 + <_> + + 0 -1 692 1.3257140526548028e-03 + + 3.1577691435813904e-01 5.3353762626647949e-01 + <_> + + 0 -1 693 1.4922149603080470e-05 + + 4.4512999057769775e-01 5.5365538597106934e-01 + <_> + + 0 -1 694 -2.7173398993909359e-03 + + 3.0317419767379761e-01 5.2480888366699219e-01 + <_> + + 0 -1 695 2.9219500720500946e-03 + + 4.7814530134201050e-01 6.6060417890548706e-01 + <_> + + 0 -1 696 -1.9804988987743855e-03 + + 3.1863081455230713e-01 5.2876251935958862e-01 + <_> + + 0 -1 697 -4.0012109093368053e-03 + + 6.4135968685150146e-01 4.7499281167984009e-01 + <_> + + 0 -1 698 -4.3491991236805916e-03 + + 1.5074980258941650e-01 5.0989967584609985e-01 + <_> + + 0 -1 699 1.3490889687091112e-03 + + 4.3161588907241821e-01 5.8811670541763306e-01 + <_> + + 0 -1 700 1.8597070127725601e-02 + + 4.7355538606643677e-01 9.0897941589355469e-01 + <_> + + 0 -1 701 -1.8562379991635680e-03 + + 3.5531890392303467e-01 5.5778372287750244e-01 + <_> + + 0 -1 702 2.2940430790185928e-03 + + 4.5000949501991272e-01 6.5808779001235962e-01 + <_> + + 0 -1 703 2.9982850537635386e-04 + + 5.6292420625686646e-01 3.9758789539337158e-01 + <_> + + 0 -1 704 3.5455459728837013e-03 + + 5.3815472126007080e-01 3.6054858565330505e-01 + <_> + + 0 -1 705 9.6104722470045090e-03 + + 5.2559971809387207e-01 1.7967459559440613e-01 + <_> + + 0 -1 706 -6.2783220782876015e-03 + + 2.2728569805622101e-01 5.1140302419662476e-01 + <_> + + 0 -1 707 3.4598479978740215e-03 + + 4.6263080835342407e-01 6.6082191467285156e-01 + <_> + + 0 -1 708 -1.3112019514665008e-03 + + 6.3175398111343384e-01 4.4368579983711243e-01 + <_> + + 0 -1 709 2.6876179035753012e-03 + + 5.4211097955703735e-01 4.0540221333503723e-01 + <_> + + 0 -1 710 3.9118169806897640e-03 + + 5.3584778308868408e-01 3.2734549045562744e-01 + <_> + + 0 -1 711 -1.4206450432538986e-02 + + 7.7935767173767090e-01 4.9757811427116394e-01 + <_> + + 0 -1 712 7.1705528534948826e-04 + + 5.2973198890686035e-01 3.5609039664268494e-01 + <_> + + 0 -1 713 1.6635019565001130e-03 + + 4.6780940890312195e-01 5.8164817094802856e-01 + <_> + + 0 -1 714 3.3686188980937004e-03 + + 5.2767342329025269e-01 3.4464201331138611e-01 + <_> + + 0 -1 715 1.2799530290067196e-02 + + 4.8346799612045288e-01 7.4721592664718628e-01 + <_> + + 0 -1 716 3.3901201095432043e-03 + + 4.5118591189384460e-01 6.4017212390899658e-01 + <_> + + 0 -1 717 4.7070779837667942e-03 + + 5.3356587886810303e-01 3.5552209615707397e-01 + <_> + + 0 -1 718 1.4819339849054813e-03 + + 4.2507070302963257e-01 5.7727241516113281e-01 + <_> + + 0 -1 719 -6.9995759986341000e-03 + + 3.0033200979232788e-01 5.2929002046585083e-01 + <_> + + 0 -1 720 1.5939010307192802e-02 + + 5.0673192739486694e-01 1.6755819320678711e-01 + <_> + + 0 -1 721 7.6377349905669689e-03 + + 4.7950699925422668e-01 7.0856010913848877e-01 + <_> + + 0 -1 722 6.7334040068089962e-03 + + 5.1331132650375366e-01 2.1624700725078583e-01 + <_> + + 0 -1 723 -1.2858809903264046e-02 + + 1.9388419389724731e-01 5.2513718605041504e-01 + <_> + + 0 -1 724 -6.2270800117403269e-04 + + 5.6865382194519043e-01 4.1978681087493896e-01 + <_> + + 0 -1 725 -5.2651681471616030e-04 + + 4.2241689562797546e-01 5.4296958446502686e-01 + <_> + + 0 -1 726 1.1075099930167198e-02 + + 5.1137751340866089e-01 2.5145179033279419e-01 + <_> + + 0 -1 727 -3.6728251725435257e-02 + + 7.1946620941162109e-01 4.8496189713478088e-01 + <_> + + 0 -1 728 -2.8207109426148236e-04 + + 3.8402619957923889e-01 5.3944462537765503e-01 + <_> + + 0 -1 729 -2.7489690110087395e-03 + + 5.9370887279510498e-01 4.5691820979118347e-01 + <_> + + 0 -1 730 1.0047519579529762e-02 + + 5.1385760307312012e-01 2.8022980690002441e-01 + <_> + + 0 -1 731 -8.1497840583324432e-03 + + 6.0900372266769409e-01 4.6361210942268372e-01 + <_> + + 0 -1 732 -6.8833888508379459e-03 + + 3.4586110711097717e-01 5.2546602487564087e-01 + <_> + + 0 -1 733 -1.4039360394235700e-05 + + 5.6931042671203613e-01 4.0820831060409546e-01 + <_> + + 0 -1 734 1.5498419525101781e-03 + + 4.3505370616912842e-01 5.8065170049667358e-01 + <_> + + 0 -1 735 -6.7841499112546444e-03 + + 1.4688730239868164e-01 5.1827752590179443e-01 + <_> + + 0 -1 736 2.1705629478674382e-04 + + 5.2935242652893066e-01 3.4561741352081299e-01 + <_> + + 0 -1 737 3.1198898795992136e-04 + + 4.6524509787559509e-01 5.9424138069152832e-01 + <_> + + 0 -1 738 5.4507530294358730e-03 + + 4.6535089612007141e-01 7.0248460769653320e-01 + <_> + + 0 -1 739 -2.5818689027801156e-04 + + 5.4972952604293823e-01 3.7689670920372009e-01 + <_> + + 0 -1 740 -1.7442539334297180e-02 + + 3.9190879464149475e-01 5.4574978351593018e-01 + <_> + + 0 -1 741 -4.5343529433012009e-02 + + 1.6313570737838745e-01 5.1549088954925537e-01 + <_> + + 0 -1 742 1.9190689781680703e-03 + + 5.1458978652954102e-01 2.7918958663940430e-01 + <_> + + 0 -1 743 -6.0177869163453579e-03 + + 6.5176361799240112e-01 4.7563329339027405e-01 + <_> + + 0 -1 744 -4.0720738470554352e-03 + + 5.5146527290344238e-01 4.0926858782768250e-01 + <_> + + 0 -1 745 3.9855059003457427e-04 + + 3.1652408838272095e-01 5.2855509519577026e-01 + <_> + + 0 -1 746 -6.5418570302426815e-03 + + 6.8533778190612793e-01 4.6528089046478271e-01 + <_> + + 0 -1 747 3.4845089539885521e-03 + + 5.4845881462097168e-01 4.5027598738670349e-01 + <_> + + 0 -1 748 -1.3696780428290367e-02 + + 6.3957798480987549e-01 4.5725551247596741e-01 + <_> + + 0 -1 749 -1.7347140237689018e-02 + + 2.7510729432106018e-01 5.1816147565841675e-01 + <_> + + 0 -1 750 -4.0885428898036480e-03 + + 3.3256360888481140e-01 5.1949840784072876e-01 + <_> + + 0 -1 751 -9.4687901437282562e-03 + + 5.9422808885574341e-01 4.8518198728561401e-01 + <_> + + 0 -1 752 1.7084840219467878e-03 + + 4.1671109199523926e-01 5.5198061466217041e-01 + <_> + + 0 -1 753 9.4809094443917274e-03 + + 5.4338949918746948e-01 4.2085149884223938e-01 + <_> + + 0 -1 754 -4.7389650717377663e-03 + + 6.4071899652481079e-01 4.5606550574302673e-01 + <_> + + 0 -1 755 6.5761050209403038e-03 + + 5.2145552635192871e-01 2.2582270205020905e-01 + <_> + + 0 -1 756 -2.1690549328923225e-03 + + 3.1515279412269592e-01 5.1567047834396362e-01 + <_> + + 0 -1 757 1.4660170301795006e-02 + + 4.8708370327949524e-01 6.6899412870407104e-01 + <_> + + 0 -1 758 1.7231999663636088e-04 + + 3.5697489976882935e-01 5.2510780096054077e-01 + <_> + + 0 -1 759 -2.1803760901093483e-02 + + 8.8259208202362061e-01 4.9663299322128296e-01 + <_> + + 0 -1 760 -9.4736106693744659e-02 + + 1.4461620151996613e-01 5.0611138343811035e-01 + <_> + + 0 -1 761 5.5825551971793175e-03 + + 5.3964787721633911e-01 4.2380660772323608e-01 + <_> + + 0 -1 762 1.9517090404406190e-03 + + 4.1704109311103821e-01 5.4977869987487793e-01 + <_> + + 0 -1 763 1.2149900197982788e-02 + + 4.6983671188354492e-01 5.6642740964889526e-01 + <_> + + 0 -1 764 -7.5169620104134083e-03 + + 6.2677729129791260e-01 4.4631358981132507e-01 + <_> + + 0 -1 765 -7.1667909622192383e-02 + + 3.0970111489295959e-01 5.2210032939910889e-01 + <_> + + 0 -1 766 -8.8292419910430908e-02 + + 8.1123888492584229e-02 5.0063651800155640e-01 + <_> + + 0 -1 767 3.1063079833984375e-02 + + 5.1555037498474121e-01 1.2822559475898743e-01 + <_> + + 0 -1 768 4.6621840447187424e-02 + + 4.6997779607772827e-01 7.3639607429504395e-01 + <_> + + 0 -1 769 -1.2189489789307117e-02 + + 3.9205300807952881e-01 5.5189967155456543e-01 + <_> + + 0 -1 770 1.3016110286116600e-02 + + 5.2606582641601562e-01 3.6851361393928528e-01 + <_> + + 0 -1 771 -3.4952899441123009e-03 + + 6.3392949104309082e-01 4.7162809967994690e-01 + <_> + + 0 -1 772 -4.4015039748046547e-05 + + 5.3330272436141968e-01 3.7761849164962769e-01 + <_> + + 0 -1 773 -1.0966490209102631e-01 + + 1.7653420567512512e-01 5.1983469724655151e-01 + <_> + + 0 -1 774 -9.0279558207839727e-04 + + 5.3241598606109619e-01 3.8389080762863159e-01 + <_> + + 0 -1 775 7.1126641705632210e-04 + + 4.6479299664497375e-01 5.7552242279052734e-01 + <_> + + 0 -1 776 -3.1250279862433672e-03 + + 3.2367089390754700e-01 5.1667708158493042e-01 + <_> + + 0 -1 777 2.4144679773598909e-03 + + 4.7874391078948975e-01 6.4597177505493164e-01 + <_> + + 0 -1 778 4.4391240226104856e-04 + + 4.4093081355094910e-01 6.0102558135986328e-01 + <_> + + 0 -1 779 -2.2611189342569560e-04 + + 4.0381139516830444e-01 5.4932558536529541e-01 + <_> + 135 + 6.6669120788574219e+01 + + <_> + + 0 -1 780 -4.6901289373636246e-02 + + 6.6001719236373901e-01 3.7438011169433594e-01 + <_> + + 0 -1 781 -1.4568349579349160e-03 + + 5.7839912176132202e-01 3.4377971291542053e-01 + <_> + + 0 -1 782 5.5598369799554348e-03 + + 3.6222669482231140e-01 5.9082162380218506e-01 + <_> + + 0 -1 783 7.3170487303286791e-04 + + 5.5004191398620605e-01 2.8735581040382385e-01 + <_> + + 0 -1 784 1.3318009441718459e-03 + + 2.6731699705123901e-01 5.4310190677642822e-01 + <_> + + 0 -1 785 2.4347059661522508e-04 + + 3.8550278544425964e-01 5.7413887977600098e-01 + <_> + + 0 -1 786 -3.0512469820678234e-03 + + 5.5032098293304443e-01 3.4628450870513916e-01 + <_> + + 0 -1 787 -6.8657199153676629e-04 + + 3.2912218570709229e-01 5.4295092821121216e-01 + <_> + + 0 -1 788 1.4668200165033340e-03 + + 3.5883820056915283e-01 5.3518110513687134e-01 + <_> + + 0 -1 789 3.2021870720200241e-04 + + 4.2968419194221497e-01 5.7002341747283936e-01 + <_> + + 0 -1 790 7.4122188379988074e-04 + + 5.2821648120880127e-01 3.3668708801269531e-01 + <_> + + 0 -1 791 3.8330298848450184e-03 + + 4.5595678687095642e-01 6.2573361396789551e-01 + <_> + + 0 -1 792 -1.5456439927220345e-02 + + 2.3501169681549072e-01 5.1294529438018799e-01 + <_> + + 0 -1 793 2.6796779129654169e-03 + + 5.3294152021408081e-01 4.1550621390342712e-01 + <_> + + 0 -1 794 2.8296569362282753e-03 + + 4.2730879783630371e-01 5.8045381307601929e-01 + <_> + + 0 -1 795 -3.9444249123334885e-03 + + 2.9126119613647461e-01 5.2026861906051636e-01 + <_> + + 0 -1 796 2.7179559692740440e-03 + + 5.3076881170272827e-01 3.5856771469116211e-01 + <_> + + 0 -1 797 5.9077627956867218e-03 + + 4.7037750482559204e-01 5.9415858983993530e-01 + <_> + + 0 -1 798 -4.2240349575877190e-03 + + 2.1415670216083527e-01 5.0887960195541382e-01 + <_> + + 0 -1 799 4.0725888684391975e-03 + + 4.7664138674736023e-01 6.8410611152648926e-01 + <_> + + 0 -1 800 1.0149530135095119e-02 + + 5.3607988357543945e-01 3.7484970688819885e-01 + <_> + + 0 -1 801 -1.8864999583456665e-04 + + 5.7201302051544189e-01 3.8538050651550293e-01 + <_> + + 0 -1 802 -4.8864358104765415e-03 + + 3.6931228637695312e-01 5.3409588336944580e-01 + <_> + + 0 -1 803 2.6158479973673820e-02 + + 4.9623748660087585e-01 6.0599899291992188e-01 + <_> + + 0 -1 804 4.8560759751126170e-04 + + 4.4389459490776062e-01 6.0124689340591431e-01 + <_> + + 0 -1 805 1.1268709786236286e-02 + + 5.2442502975463867e-01 1.8403880298137665e-01 + <_> + + 0 -1 806 -2.8114619199186563e-03 + + 6.0602837800979614e-01 4.4098970293998718e-01 + <_> + + 0 -1 807 -5.6112729944288731e-03 + + 3.8911709189414978e-01 5.5892372131347656e-01 + <_> + + 0 -1 808 8.5680093616247177e-03 + + 5.0693458318710327e-01 2.0626190304756165e-01 + <_> + + 0 -1 809 -3.8172779022715986e-04 + + 5.8822017908096313e-01 4.1926109790802002e-01 + <_> + + 0 -1 810 -1.7680290329735726e-04 + + 5.5336058139801025e-01 4.0033689141273499e-01 + <_> + + 0 -1 811 6.5112537704408169e-03 + + 3.3101469278335571e-01 5.4441910982131958e-01 + <_> + + 0 -1 812 -6.5948683186434209e-05 + + 5.4338318109512329e-01 3.9449059963226318e-01 + <_> + + 0 -1 813 6.9939051754772663e-03 + + 5.6003582477569580e-01 4.1927140951156616e-01 + <_> + + 0 -1 814 -4.6744439750909805e-03 + + 6.6854667663574219e-01 4.6049609780311584e-01 + <_> + + 0 -1 815 1.1589850299060345e-02 + + 5.3571212291717529e-01 2.9268300533294678e-01 + <_> + + 0 -1 816 1.3007840141654015e-02 + + 4.6798178553581238e-01 7.3074632883071899e-01 + <_> + + 0 -1 817 -1.1008579749614000e-03 + + 3.9375010132789612e-01 5.4150652885437012e-01 + <_> + + 0 -1 818 6.0472649056464434e-04 + + 4.2423760890960693e-01 5.6040412187576294e-01 + <_> + + 0 -1 819 -1.4494840055704117e-02 + + 3.6312100291252136e-01 5.2931827306747437e-01 + <_> + + 0 -1 820 -5.3056948818266392e-03 + + 6.8604522943496704e-01 4.6218210458755493e-01 + <_> + + 0 -1 821 -8.1829127157106996e-04 + + 3.9440968632698059e-01 5.4204392433166504e-01 + <_> + + 0 -1 822 -1.9077520817518234e-02 + + 1.9626219570636749e-01 5.0378918647766113e-01 + <_> + + 0 -1 823 3.5549470339901745e-04 + + 4.0862590074539185e-01 5.6139731407165527e-01 + <_> + + 0 -1 824 1.9679730758070946e-03 + + 4.4891211390495300e-01 5.9261232614517212e-01 + <_> + + 0 -1 825 6.9189141504466534e-03 + + 5.3359258174896240e-01 3.7283858656883240e-01 + <_> + + 0 -1 826 2.9872779268771410e-03 + + 5.1113212108612061e-01 2.9756438732147217e-01 + <_> + + 0 -1 827 -6.2264618463814259e-03 + + 5.5414897203445435e-01 4.8245379328727722e-01 + <_> + + 0 -1 828 1.3353300280869007e-02 + + 4.5864239335060120e-01 6.4147979021072388e-01 + <_> + + 0 -1 829 3.3505238592624664e-02 + + 5.3924250602722168e-01 3.4299948811531067e-01 + <_> + + 0 -1 830 -2.5294460356235504e-03 + + 1.7037139832973480e-01 5.0133150815963745e-01 + <_> + + 0 -1 831 -1.2801629491150379e-03 + + 5.3054618835449219e-01 4.6974050998687744e-01 + <_> + + 0 -1 832 7.0687388069927692e-03 + + 4.6155458688735962e-01 6.4365047216415405e-01 + <_> + + 0 -1 833 9.6880499040707946e-04 + + 4.8335990309715271e-01 6.0438942909240723e-01 + <_> + + 0 -1 834 3.9647659286856651e-03 + + 5.1876372098922729e-01 3.2318168878555298e-01 + <_> + + 0 -1 835 -2.2057730704545975e-02 + + 4.0792569518089294e-01 5.2009809017181396e-01 + <_> + + 0 -1 836 -6.6906312713399529e-04 + + 5.3316092491149902e-01 3.8156008720397949e-01 + <_> + + 0 -1 837 -6.7009328631684184e-04 + + 5.6554222106933594e-01 4.6889019012451172e-01 + <_> + + 0 -1 838 7.4284552829340100e-04 + + 4.5343810319900513e-01 6.2874001264572144e-01 + <_> + + 0 -1 839 2.2227810695767403e-03 + + 5.3506332635879517e-01 3.3036559820175171e-01 + <_> + + 0 -1 840 -5.4130521602928638e-03 + + 1.1136870086193085e-01 5.0054347515106201e-01 + <_> + + 0 -1 841 -1.4520040167553816e-05 + + 5.6287378072738647e-01 4.3251338601112366e-01 + <_> + + 0 -1 842 2.3369169502984732e-04 + + 4.1658350825309753e-01 5.4477912187576294e-01 + <_> + + 0 -1 843 4.2894547805190086e-03 + + 4.8603910207748413e-01 6.7786490917205811e-01 + <_> + + 0 -1 844 5.9103150852024555e-03 + + 5.2623051404953003e-01 3.6121138930320740e-01 + <_> + + 0 -1 845 1.2900539673864841e-02 + + 5.3193771839141846e-01 3.2502880692481995e-01 + <_> + + 0 -1 846 4.6982979401946068e-03 + + 4.6182450652122498e-01 6.6659259796142578e-01 + <_> + + 0 -1 847 1.0439859703183174e-02 + + 5.5056709051132202e-01 3.8836041092872620e-01 + <_> + + 0 -1 848 3.0443191062659025e-03 + + 4.6978530287742615e-01 7.3018449544906616e-01 + <_> + + 0 -1 849 -6.1593751888722181e-04 + + 3.8308390974998474e-01 5.4649841785430908e-01 + <_> + + 0 -1 850 -3.4247159492224455e-03 + + 2.5663000345230103e-01 5.0895309448242188e-01 + <_> + + 0 -1 851 -9.3538565561175346e-03 + + 6.4699661731719971e-01 4.9407958984375000e-01 + <_> + + 0 -1 852 5.2338998764753342e-02 + + 4.7459828853607178e-01 7.8787708282470703e-01 + <_> + + 0 -1 853 3.5765620414167643e-03 + + 5.3066647052764893e-01 2.7484980225563049e-01 + <_> + + 0 -1 854 7.1555317845195532e-04 + + 5.4131257534027100e-01 4.0419089794158936e-01 + <_> + + 0 -1 855 -1.0516679845750332e-02 + + 6.1585122346878052e-01 4.8152831196784973e-01 + <_> + + 0 -1 856 7.7347927726805210e-03 + + 4.6958059072494507e-01 7.0289808511734009e-01 + <_> + + 0 -1 857 -4.3226778507232666e-03 + + 2.8495660424232483e-01 5.3046840429306030e-01 + <_> + + 0 -1 858 -2.5534399319440126e-03 + + 7.0569849014282227e-01 4.6888920664787292e-01 + <_> + + 0 -1 859 1.0268510231981054e-04 + + 3.9029321074485779e-01 5.5734640359878540e-01 + <_> + + 0 -1 860 7.1395188570022583e-06 + + 3.6842319369316101e-01 5.2639877796173096e-01 + <_> + + 0 -1 861 -1.6711989883333445e-03 + + 3.8491758704185486e-01 5.3872710466384888e-01 + <_> + + 0 -1 862 4.9260449595749378e-03 + + 4.7297719120979309e-01 7.4472510814666748e-01 + <_> + + 0 -1 863 4.3908702209591866e-03 + + 4.8091810941696167e-01 5.5919218063354492e-01 + <_> + + 0 -1 864 -1.7793629318475723e-02 + + 6.9036781787872314e-01 4.6769270300865173e-01 + <_> + + 0 -1 865 2.0469669252634048e-03 + + 5.3706902265548706e-01 3.3081620931625366e-01 + <_> + + 0 -1 866 2.9891489073634148e-02 + + 5.1398652791976929e-01 3.3090591430664062e-01 + <_> + + 0 -1 867 1.5494900289922953e-03 + + 4.6602371335029602e-01 6.0783427953720093e-01 + <_> + + 0 -1 868 1.4956969534978271e-03 + + 4.4048359990119934e-01 5.8639198541641235e-01 + <_> + + 0 -1 869 9.5885928021743894e-04 + + 5.4359710216522217e-01 4.2085230350494385e-01 + <_> + + 0 -1 870 4.9643701640889049e-04 + + 5.3705781698226929e-01 4.0006220340728760e-01 + <_> + + 0 -1 871 -2.7280810754746199e-03 + + 5.6594127416610718e-01 4.2596429586410522e-01 + <_> + + 0 -1 872 2.3026480339467525e-03 + + 5.1616579294204712e-01 3.3508691191673279e-01 + <_> + + 0 -1 873 2.5151631236076355e-01 + + 4.8696619272232056e-01 7.1473097801208496e-01 + <_> + + 0 -1 874 -4.6328022144734859e-03 + + 2.7274489402770996e-01 5.0837898254394531e-01 + <_> + + 0 -1 875 -4.0434490889310837e-02 + + 6.8514388799667358e-01 5.0217670202255249e-01 + <_> + + 0 -1 876 1.4972220014897175e-05 + + 4.2844650149345398e-01 5.5225551128387451e-01 + <_> + + 0 -1 877 -2.4050309730228037e-04 + + 4.2261189222335815e-01 5.3900748491287231e-01 + <_> + + 0 -1 878 2.3657839745283127e-02 + + 4.7446319460868835e-01 7.5043660402297974e-01 + <_> + + 0 -1 879 -8.1449104472994804e-03 + + 4.2450588941574097e-01 5.5383628606796265e-01 + <_> + + 0 -1 880 -3.6992130335420370e-03 + + 5.9523570537567139e-01 4.5297130942344666e-01 + <_> + + 0 -1 881 -6.7718601785600185e-03 + + 4.1377940773963928e-01 5.4733997583389282e-01 + <_> + + 0 -1 882 4.2669530957937241e-03 + + 4.4841149449348450e-01 5.7979941368103027e-01 + <_> + + 0 -1 883 1.7791989957913756e-03 + + 5.6248587369918823e-01 4.4324448704719543e-01 + <_> + + 0 -1 884 1.6774770338088274e-03 + + 4.6377518773078918e-01 6.3642418384552002e-01 + <_> + + 0 -1 885 1.1732629500329494e-03 + + 4.5445030927658081e-01 5.9144157171249390e-01 + <_> + + 0 -1 886 8.6998171173036098e-04 + + 5.3347527980804443e-01 3.8859179615974426e-01 + <_> + + 0 -1 887 7.6378340600058436e-04 + + 5.3985852003097534e-01 3.7449419498443604e-01 + <_> + + 0 -1 888 1.5684569370932877e-04 + + 4.3178731203079224e-01 5.6146162748336792e-01 + <_> + + 0 -1 889 -2.1511370316147804e-02 + + 1.7859250307083130e-01 5.1855427026748657e-01 + <_> + + 0 -1 890 1.3081369979772717e-04 + + 4.3424990773200989e-01 5.6828498840332031e-01 + <_> + + 0 -1 891 2.1992040798068047e-02 + + 5.1617169380187988e-01 2.3793940246105194e-01 + <_> + + 0 -1 892 -8.0136500764638186e-04 + + 5.9867632389068604e-01 4.4664269685745239e-01 + <_> + + 0 -1 893 -8.2736099138855934e-03 + + 4.1082179546356201e-01 5.2510571479797363e-01 + <_> + + 0 -1 894 3.6831789184361696e-03 + + 5.1738142967224121e-01 3.3975180983543396e-01 + <_> + + 0 -1 895 -7.9525681212544441e-03 + + 6.8889832496643066e-01 4.8459240794181824e-01 + <_> + + 0 -1 896 1.5382299898192286e-03 + + 5.1785671710968018e-01 3.4541139006614685e-01 + <_> + + 0 -1 897 -1.4043530449271202e-02 + + 1.6784210503101349e-01 5.1886677742004395e-01 + <_> + + 0 -1 898 1.4315890148282051e-03 + + 4.3682569265365601e-01 5.6557738780975342e-01 + <_> + + 0 -1 899 -3.4014228731393814e-02 + + 7.8022962808609009e-01 4.9592170119285583e-01 + <_> + + 0 -1 900 -1.2027299962937832e-02 + + 1.5851010382175446e-01 5.0322318077087402e-01 + <_> + + 0 -1 901 1.3316619396209717e-01 + + 5.1633048057556152e-01 2.7551281452178955e-01 + <_> + + 0 -1 902 -1.5221949433907866e-03 + + 3.7283179163932800e-01 5.2145522832870483e-01 + <_> + + 0 -1 903 -9.3929271679371595e-04 + + 5.8383792638778687e-01 4.5111650228500366e-01 + <_> + + 0 -1 904 2.7719739824533463e-02 + + 4.7282868623733521e-01 7.3315447568893433e-01 + <_> + + 0 -1 905 3.1030150130391121e-03 + + 5.3022021055221558e-01 4.1015630960464478e-01 + <_> + + 0 -1 906 7.7861219644546509e-02 + + 4.9983340501785278e-01 1.2729619443416595e-01 + <_> + + 0 -1 907 -1.5854939818382263e-02 + + 5.0833359360694885e-02 5.1656562089920044e-01 + <_> + + 0 -1 908 -4.9725300632417202e-03 + + 6.7981338500976562e-01 4.6842318773269653e-01 + <_> + + 0 -1 909 -9.7676506265997887e-04 + + 6.0107719898223877e-01 4.7889319062232971e-01 + <_> + + 0 -1 910 -2.4647710379213095e-03 + + 3.3933979272842407e-01 5.2205038070678711e-01 + <_> + + 0 -1 911 -6.7937700077891350e-03 + + 4.3651369214057922e-01 5.2396631240844727e-01 + <_> + + 0 -1 912 3.2608021050691605e-02 + + 5.0527238845825195e-01 2.4252149462699890e-01 + <_> + + 0 -1 913 -5.8514421107247472e-04 + + 5.7339739799499512e-01 4.7585740685462952e-01 + <_> + + 0 -1 914 -2.9632600024342537e-02 + + 3.8922891020774841e-01 5.2635979652404785e-01 + <_> + 137 + 6.7698921203613281e+01 + + <_> + + 0 -1 915 4.6550851315259933e-02 + + 3.2769501209259033e-01 6.2405228614807129e-01 + <_> + + 0 -1 916 7.9537127166986465e-03 + + 4.2564851045608521e-01 6.9429391622543335e-01 + <_> + + 0 -1 917 6.8221561377868056e-04 + + 3.7114870548248291e-01 5.9007328748703003e-01 + <_> + + 0 -1 918 -1.9348249770700932e-04 + + 2.0411339402198792e-01 5.3005450963973999e-01 + <_> + + 0 -1 919 -2.6710508973337710e-04 + + 5.4161262512207031e-01 3.1031790375709534e-01 + <_> + + 0 -1 920 2.7818060480058193e-03 + + 5.2778327465057373e-01 3.4670698642730713e-01 + <_> + + 0 -1 921 -4.6779078547842801e-04 + + 5.3082311153411865e-01 3.2944920659065247e-01 + <_> + + 0 -1 922 -3.0335160772665404e-05 + + 5.7738727331161499e-01 3.8520970940589905e-01 + <_> + + 0 -1 923 7.8038009814918041e-04 + + 4.3174389004707336e-01 6.1500579118728638e-01 + <_> + + 0 -1 924 -4.2553851380944252e-03 + + 2.9339039325714111e-01 5.3242927789688110e-01 + <_> + + 0 -1 925 -2.4735610350035131e-04 + + 5.4688447713851929e-01 3.8430300354957581e-01 + <_> + + 0 -1 926 -1.4724259381182492e-04 + + 4.2815428972244263e-01 5.7555872201919556e-01 + <_> + + 0 -1 927 1.1864770203828812e-03 + + 3.7473011016845703e-01 5.4714661836624146e-01 + <_> + + 0 -1 928 2.3936580400913954e-03 + + 4.5377838611602783e-01 6.1115288734436035e-01 + <_> + + 0 -1 929 -1.5390539774671197e-03 + + 2.9713419079780579e-01 5.1895380020141602e-01 + <_> + + 0 -1 930 -7.1968790143728256e-03 + + 6.6990667581558228e-01 4.7264769673347473e-01 + <_> + + 0 -1 931 -4.1499789222143590e-04 + + 3.3849540352821350e-01 5.2603179216384888e-01 + <_> + + 0 -1 932 4.4359830208122730e-03 + + 5.3991222381591797e-01 3.9201408624649048e-01 + <_> + + 0 -1 933 2.6606200262904167e-03 + + 4.4825780391693115e-01 6.1196178197860718e-01 + <_> + + 0 -1 934 -1.5287200221791863e-03 + + 3.7112379074096680e-01 5.3402662277221680e-01 + <_> + + 0 -1 935 -4.7397250309586525e-03 + + 6.0310882329940796e-01 4.4551450014114380e-01 + <_> + + 0 -1 936 -1.4829129911959171e-02 + + 2.8387540578842163e-01 5.3418618440628052e-01 + <_> + + 0 -1 937 9.2275557108223438e-04 + + 5.2095472812652588e-01 3.3616539835929871e-01 + <_> + + 0 -1 938 8.3529807627201080e-02 + + 5.1199698448181152e-01 8.1164449453353882e-02 + <_> + + 0 -1 939 -7.5633148662745953e-04 + + 3.3171200752258301e-01 5.1898312568664551e-01 + <_> + + 0 -1 940 9.8403859883546829e-03 + + 5.2475982904434204e-01 2.3349590599536896e-01 + <_> + + 0 -1 941 -1.5953830443322659e-03 + + 5.7500940561294556e-01 4.2956221103668213e-01 + <_> + + 0 -1 942 3.4766020689858124e-05 + + 4.3424451351165771e-01 5.5640292167663574e-01 + <_> + + 0 -1 943 2.9862910509109497e-02 + + 4.5791471004486084e-01 6.5791881084442139e-01 + <_> + + 0 -1 944 1.1325590312480927e-02 + + 5.2743119001388550e-01 3.6738881468772888e-01 + <_> + + 0 -1 945 -8.7828645482659340e-03 + + 7.1003687381744385e-01 4.6421670913696289e-01 + <_> + + 0 -1 946 4.3639959767460823e-03 + + 5.2792161703109741e-01 2.7058771252632141e-01 + <_> + + 0 -1 947 4.1804728098213673e-03 + + 5.0725251436233521e-01 2.4490830302238464e-01 + <_> + + 0 -1 948 -4.5668511302210391e-04 + + 4.2831051349639893e-01 5.5486911535263062e-01 + <_> + + 0 -1 949 -3.7140368949621916e-03 + + 5.5193877220153809e-01 4.1036531329154968e-01 + <_> + + 0 -1 950 -2.5304289534687996e-02 + + 6.8670022487640381e-01 4.8698890209197998e-01 + <_> + + 0 -1 951 -3.4454080741852522e-04 + + 3.7288740277290344e-01 5.2876931428909302e-01 + <_> + + 0 -1 952 -8.3935231668874621e-04 + + 6.0601520538330078e-01 4.6160620450973511e-01 + <_> + + 0 -1 953 1.7280049622058868e-02 + + 5.0496357679367065e-01 1.8198239803314209e-01 + <_> + + 0 -1 954 -6.3595077954232693e-03 + + 1.6312399506568909e-01 5.2327787876129150e-01 + <_> + + 0 -1 955 1.0298109846189618e-03 + + 4.4632780551910400e-01 6.1765491962432861e-01 + <_> + + 0 -1 956 1.0117109632119536e-03 + + 5.4733848571777344e-01 4.3006989359855652e-01 + <_> + + 0 -1 957 -1.0308800265192986e-02 + + 1.1669850349426270e-01 5.0008672475814819e-01 + <_> + + 0 -1 958 5.4682018235325813e-03 + + 4.7692871093750000e-01 6.7192137241363525e-01 + <_> + + 0 -1 959 -9.1696460731327534e-04 + + 3.4710898995399475e-01 5.1781648397445679e-01 + <_> + + 0 -1 960 2.3922820109874010e-03 + + 4.7852361202239990e-01 6.2163108587265015e-01 + <_> + + 0 -1 961 -7.5573818758130074e-03 + + 5.8147960901260376e-01 4.4100850820541382e-01 + <_> + + 0 -1 962 -7.7024032361805439e-04 + + 3.8780000805854797e-01 5.4657220840454102e-01 + <_> + + 0 -1 963 -8.7125990539789200e-03 + + 1.6600510478019714e-01 4.9958360195159912e-01 + <_> + + 0 -1 964 -1.0306320153176785e-02 + + 4.0933910012245178e-01 5.2742338180541992e-01 + <_> + + 0 -1 965 -2.0940979011356831e-03 + + 6.2061947584152222e-01 4.5722800493240356e-01 + <_> + + 0 -1 966 6.8099051713943481e-03 + + 5.5677592754364014e-01 4.1556000709533691e-01 + <_> + + 0 -1 967 -1.0746059706434608e-03 + + 5.6389278173446655e-01 4.3530249595642090e-01 + <_> + + 0 -1 968 2.1550289820879698e-03 + + 4.8262658715248108e-01 6.7497581243515015e-01 + <_> + + 0 -1 969 3.1742319464683533e-02 + + 5.0483798980712891e-01 1.8832489848136902e-01 + <_> + + 0 -1 970 -7.8382723033428192e-02 + + 2.3695489764213562e-01 5.2601581811904907e-01 + <_> + + 0 -1 971 5.7415119372308254e-03 + + 5.0488287210464478e-01 2.7764698863029480e-01 + <_> + + 0 -1 972 -2.9014600440859795e-03 + + 6.2386047840118408e-01 4.6933171153068542e-01 + <_> + + 0 -1 973 -2.6427931152284145e-03 + + 3.3141419291496277e-01 5.1697772741317749e-01 + <_> + + 0 -1 974 -1.0949660092592239e-01 + + 2.3800450563430786e-01 5.1834410429000854e-01 + <_> + + 0 -1 975 7.4075913289561868e-05 + + 4.0696358680725098e-01 5.3621500730514526e-01 + <_> + + 0 -1 976 -5.0593802006915212e-04 + + 5.5067062377929688e-01 4.3745940923690796e-01 + <_> + + 0 -1 977 -8.2131777890026569e-04 + + 5.5257099866867065e-01 4.2093759775161743e-01 + <_> + + 0 -1 978 -6.0276539443293586e-05 + + 5.4554748535156250e-01 4.7482660412788391e-01 + <_> + + 0 -1 979 6.8065142259001732e-03 + + 5.1579958200454712e-01 3.4245771169662476e-01 + <_> + + 0 -1 980 1.7202789895236492e-03 + + 5.0132077932357788e-01 6.3312637805938721e-01 + <_> + + 0 -1 981 -1.3016929733566940e-04 + + 5.5397182703018188e-01 4.2268699407577515e-01 + <_> + + 0 -1 982 -4.8016388900578022e-03 + + 4.4250950217247009e-01 5.4307800531387329e-01 + <_> + + 0 -1 983 -2.5399310979992151e-03 + + 7.1457821130752563e-01 4.6976050734519958e-01 + <_> + + 0 -1 984 -1.4278929447755218e-03 + + 4.0704450011253357e-01 5.3996050357818604e-01 + <_> + + 0 -1 985 -2.5142550468444824e-02 + + 7.8846907615661621e-01 4.7473520040512085e-01 + <_> + + 0 -1 986 -3.8899609353393316e-03 + + 4.2961919307708740e-01 5.5771100521087646e-01 + <_> + + 0 -1 987 4.3947459198534489e-03 + + 4.6931621432304382e-01 7.0239442586898804e-01 + <_> + + 0 -1 988 2.4678420275449753e-02 + + 5.2423220872879028e-01 3.8125100731849670e-01 + <_> + + 0 -1 989 3.8047678768634796e-02 + + 5.0117397308349609e-01 1.6878280043601990e-01 + <_> + + 0 -1 990 7.9424865543842316e-03 + + 4.8285821080207825e-01 6.3695681095123291e-01 + <_> + + 0 -1 991 -1.5110049862414598e-03 + + 5.9064859151840210e-01 4.4876679778099060e-01 + <_> + + 0 -1 992 6.4201741479337215e-03 + + 5.2410978078842163e-01 2.9905700683593750e-01 + <_> + + 0 -1 993 -2.9802159406244755e-03 + + 3.0414658784866333e-01 5.0784897804260254e-01 + <_> + + 0 -1 994 -7.4580078944563866e-04 + + 4.1281390190124512e-01 5.2568262815475464e-01 + <_> + + 0 -1 995 -1.0470950044691563e-02 + + 5.8083951473236084e-01 4.4942960143089294e-01 + <_> + + 0 -1 996 9.3369204550981522e-03 + + 5.2465528249740601e-01 2.6589488983154297e-01 + <_> + + 0 -1 997 2.7936900034546852e-02 + + 4.6749550104141235e-01 7.0872569084167480e-01 + <_> + + 0 -1 998 7.4277678504586220e-03 + + 5.4094868898391724e-01 3.7585180997848511e-01 + <_> + + 0 -1 999 -2.3584509268403053e-02 + + 3.7586399912834167e-01 5.2385509014129639e-01 + <_> + + 0 -1 1000 1.1452640173956752e-03 + + 4.3295788764953613e-01 5.8042472600936890e-01 + <_> + + 0 -1 1001 -4.3468660442158580e-04 + + 5.2806180715560913e-01 3.8730698823928833e-01 + <_> + + 0 -1 1002 1.0648540221154690e-02 + + 4.9021130800247192e-01 5.6812518835067749e-01 + <_> + + 0 -1 1003 -3.9418050437234342e-04 + + 5.5708801746368408e-01 4.3182510137557983e-01 + <_> + + 0 -1 1004 -1.3270479394122958e-04 + + 5.6584399938583374e-01 4.3435549736022949e-01 + <_> + + 0 -1 1005 -2.0125510636717081e-03 + + 6.0567390918731689e-01 4.5375239849090576e-01 + <_> + + 0 -1 1006 2.4854319635778666e-03 + + 5.3904771804809570e-01 4.1380101442337036e-01 + <_> + + 0 -1 1007 1.8237880431115627e-03 + + 4.3548288941383362e-01 5.7171887159347534e-01 + <_> + + 0 -1 1008 -1.6656659543514252e-02 + + 3.0109131336212158e-01 5.2161228656768799e-01 + <_> + + 0 -1 1009 8.0349558265879750e-04 + + 5.3001511096954346e-01 3.8183969259262085e-01 + <_> + + 0 -1 1010 3.4170378930866718e-03 + + 5.3280287981033325e-01 4.2414000630378723e-01 + <_> + + 0 -1 1011 -3.6222729249857366e-04 + + 5.4917281866073608e-01 4.1869771480560303e-01 + <_> + + 0 -1 1012 -1.1630020290613174e-01 + + 1.4407220482826233e-01 5.2264511585235596e-01 + <_> + + 0 -1 1013 -1.4695010147988796e-02 + + 7.7477252483367920e-01 4.7157171368598938e-01 + <_> + + 0 -1 1014 2.1972130052745342e-03 + + 5.3554338216781616e-01 3.3156448602676392e-01 + <_> + + 0 -1 1015 -4.6965209185145795e-04 + + 5.7672351598739624e-01 4.4581368565559387e-01 + <_> + + 0 -1 1016 6.5144998952746391e-03 + + 5.2156740427017212e-01 3.6478888988494873e-01 + <_> + + 0 -1 1017 2.1300060674548149e-02 + + 4.9942049384117126e-01 1.5679509937763214e-01 + <_> + + 0 -1 1018 3.1881409231573343e-03 + + 4.7422000765800476e-01 6.2872701883316040e-01 + <_> + + 0 -1 1019 9.0019777417182922e-04 + + 5.3479540348052979e-01 3.9437520503997803e-01 + <_> + + 0 -1 1020 -5.1772277802228928e-03 + + 6.7271918058395386e-01 5.0131380558013916e-01 + <_> + + 0 -1 1021 -4.3764649890363216e-03 + + 3.1066751480102539e-01 5.1287931203842163e-01 + <_> + + 0 -1 1022 2.6299960445612669e-03 + + 4.8863101005554199e-01 5.7552158832550049e-01 + <_> + + 0 -1 1023 -2.0458688959479332e-03 + + 6.0257941484451294e-01 4.5580768585205078e-01 + <_> + + 0 -1 1024 6.9482706487178802e-02 + + 5.2407479286193848e-01 2.1852590143680573e-01 + <_> + + 0 -1 1025 2.4048939347267151e-02 + + 5.0118672847747803e-01 2.0906220376491547e-01 + <_> + + 0 -1 1026 3.1095340382307768e-03 + + 4.8667120933532715e-01 7.1085482835769653e-01 + <_> + + 0 -1 1027 -1.2503260513767600e-03 + + 3.4078910946846008e-01 5.1561951637268066e-01 + <_> + + 0 -1 1028 -1.0281190043315291e-03 + + 5.5755722522735596e-01 4.4394320249557495e-01 + <_> + + 0 -1 1029 -8.8893622159957886e-03 + + 6.4020007848739624e-01 4.6204420924186707e-01 + <_> + + 0 -1 1030 -6.1094801640138030e-04 + + 3.7664419412612915e-01 5.4488998651504517e-01 + <_> + + 0 -1 1031 -5.7686357758939266e-03 + + 3.3186489343643188e-01 5.1336771249771118e-01 + <_> + + 0 -1 1032 1.8506490159779787e-03 + + 4.9035701155662537e-01 6.4069348573684692e-01 + <_> + + 0 -1 1033 -9.9799469113349915e-02 + + 1.5360510349273682e-01 5.0155621767044067e-01 + <_> + + 0 -1 1034 -3.5128349065780640e-01 + + 5.8823131024837494e-02 5.1743787527084351e-01 + <_> + + 0 -1 1035 -4.5244570821523666e-02 + + 6.9614887237548828e-01 4.6778729557991028e-01 + <_> + + 0 -1 1036 7.1481578052043915e-02 + + 5.1679861545562744e-01 1.0380929708480835e-01 + <_> + + 0 -1 1037 2.1895780228078365e-03 + + 4.2730781435966492e-01 5.5320608615875244e-01 + <_> + + 0 -1 1038 -5.9242651332169771e-04 + + 4.6389439702033997e-01 5.2763891220092773e-01 + <_> + + 0 -1 1039 1.6788389766588807e-03 + + 5.3016489744186401e-01 3.9320349693298340e-01 + <_> + + 0 -1 1040 -2.2163488902151585e-03 + + 5.6306940317153931e-01 4.7570338845252991e-01 + <_> + + 0 -1 1041 1.1568699846975505e-04 + + 4.3075358867645264e-01 5.5357027053833008e-01 + <_> + + 0 -1 1042 -7.2017288766801357e-03 + + 1.4448820054531097e-01 5.1930642127990723e-01 + <_> + + 0 -1 1043 8.9081272017210722e-04 + + 4.3844321370124817e-01 5.5936211347579956e-01 + <_> + + 0 -1 1044 1.9605009583756328e-04 + + 5.3404158353805542e-01 4.7059568762779236e-01 + <_> + + 0 -1 1045 5.2022142335772514e-04 + + 5.2138561010360718e-01 3.8100790977478027e-01 + <_> + + 0 -1 1046 9.4588572392240167e-04 + + 4.7694149613380432e-01 6.1307388544082642e-01 + <_> + + 0 -1 1047 9.1698471806012094e-05 + + 4.2450091242790222e-01 5.4293632507324219e-01 + <_> + + 0 -1 1048 2.1833200007677078e-03 + + 5.4577308893203735e-01 4.1910758614540100e-01 + <_> + + 0 -1 1049 -8.6039671441540122e-04 + + 5.7645887136459351e-01 4.4716599583625793e-01 + <_> + + 0 -1 1050 -1.3236239552497864e-02 + + 6.3728231191635132e-01 4.6950098872184753e-01 + <_> + + 0 -1 1051 4.3376701069064438e-04 + + 5.3178739547729492e-01 3.9458298683166504e-01 + <_> + 140 + 6.9229873657226562e+01 + + <_> + + 0 -1 1052 -2.4847149848937988e-02 + + 6.5555167198181152e-01 3.8733118772506714e-01 + <_> + + 0 -1 1053 6.1348611488938332e-03 + + 3.7480720877647400e-01 5.9739977121353149e-01 + <_> + + 0 -1 1054 6.4498498104512691e-03 + + 5.4254919290542603e-01 2.5488111376762390e-01 + <_> + + 0 -1 1055 6.3491211039945483e-04 + + 2.4624420702457428e-01 5.3872537612915039e-01 + <_> + + 0 -1 1056 1.4023890253156424e-03 + + 5.5943220853805542e-01 3.5286578536033630e-01 + <_> + + 0 -1 1057 3.0044000595808029e-04 + + 3.9585039019584656e-01 5.7659381628036499e-01 + <_> + + 0 -1 1058 1.0042409849120304e-04 + + 3.6989969015121460e-01 5.5349981784820557e-01 + <_> + + 0 -1 1059 -5.0841490738093853e-03 + + 3.7110909819602966e-01 5.5478000640869141e-01 + <_> + + 0 -1 1060 -1.9537260755896568e-02 + + 7.4927550554275513e-01 4.5792970061302185e-01 + <_> + + 0 -1 1061 -7.4532740654831287e-06 + + 5.6497871875762939e-01 3.9040699601173401e-01 + <_> + + 0 -1 1062 -3.6079459823668003e-03 + + 3.3810880780220032e-01 5.2678012847900391e-01 + <_> + + 0 -1 1063 2.0697501022368670e-03 + + 5.5192911624908447e-01 3.7143889069557190e-01 + <_> + + 0 -1 1064 -4.6463840408250690e-04 + + 5.6082147359848022e-01 4.1135668754577637e-01 + <_> + + 0 -1 1065 7.5490452582016587e-04 + + 3.5592061281204224e-01 5.3293561935424805e-01 + <_> + + 0 -1 1066 -9.8322238773107529e-04 + + 5.4147958755493164e-01 3.7632051110267639e-01 + <_> + + 0 -1 1067 -1.9940640777349472e-02 + + 6.3479030132293701e-01 4.7052991390228271e-01 + <_> + + 0 -1 1068 3.7680300883948803e-03 + + 3.9134898781776428e-01 5.5637162923812866e-01 + <_> + + 0 -1 1069 -9.4528505578637123e-03 + + 2.5548928976058960e-01 5.2151167392730713e-01 + <_> + + 0 -1 1070 2.9560849070549011e-03 + + 5.1746791601181030e-01 3.0639201402664185e-01 + <_> + + 0 -1 1071 9.1078737750649452e-03 + + 5.3884482383728027e-01 2.8859630227088928e-01 + <_> + + 0 -1 1072 1.8219229532405734e-03 + + 4.3360430002212524e-01 5.8521968126296997e-01 + <_> + + 0 -1 1073 1.4688739553093910e-02 + + 5.2873617410659790e-01 2.8700059652328491e-01 + <_> + + 0 -1 1074 -1.4387990348041058e-02 + + 7.0194488763809204e-01 4.6473708748817444e-01 + <_> + + 0 -1 1075 -1.8986649811267853e-02 + + 2.9865521192550659e-01 5.2470117807388306e-01 + <_> + + 0 -1 1076 1.1527639580890536e-03 + + 4.3234738707542419e-01 5.9316617250442505e-01 + <_> + + 0 -1 1077 1.0933670215308666e-02 + + 5.2868640422821045e-01 3.1303191184997559e-01 + <_> + + 0 -1 1078 -1.4932730235159397e-02 + + 2.6584190130233765e-01 5.0840771198272705e-01 + <_> + + 0 -1 1079 -2.9970539617352188e-04 + + 5.4635268449783325e-01 3.7407240271568298e-01 + <_> + + 0 -1 1080 4.1677621193230152e-03 + + 4.7034969925880432e-01 7.4357217550277710e-01 + <_> + + 0 -1 1081 -6.3905320130288601e-03 + + 2.0692589879035950e-01 5.2805382013320923e-01 + <_> + + 0 -1 1082 4.5029609464108944e-03 + + 5.1826488971710205e-01 3.4835430979728699e-01 + <_> + + 0 -1 1083 -9.2040365561842918e-03 + + 6.8037772178649902e-01 4.9323600530624390e-01 + <_> + + 0 -1 1084 8.1327259540557861e-02 + + 5.0583988428115845e-01 2.2530519962310791e-01 + <_> + + 0 -1 1085 -1.5079280734062195e-01 + + 2.9634249210357666e-01 5.2646797895431519e-01 + <_> + + 0 -1 1086 3.3179009333252907e-03 + + 4.6554958820343018e-01 7.0729321241378784e-01 + <_> + + 0 -1 1087 7.7402801252901554e-04 + + 4.7803479433059692e-01 5.6682378053665161e-01 + <_> + + 0 -1 1088 6.8199541419744492e-04 + + 4.2869961261749268e-01 5.7221567630767822e-01 + <_> + + 0 -1 1089 5.3671570494771004e-03 + + 5.2993071079254150e-01 3.1146219372749329e-01 + <_> + + 0 -1 1090 9.7018666565418243e-05 + + 3.6746388673782349e-01 5.2694618701934814e-01 + <_> + + 0 -1 1091 -1.2534089386463165e-01 + + 2.3514920473098755e-01 5.2457910776138306e-01 + <_> + + 0 -1 1092 -5.2516269497573376e-03 + + 7.1159368753433228e-01 4.6937671303749084e-01 + <_> + + 0 -1 1093 -7.8342109918594360e-03 + + 4.4626510143280029e-01 5.4090857505798340e-01 + <_> + + 0 -1 1094 -1.1310069821774960e-03 + + 5.9456187486648560e-01 4.4176620244979858e-01 + <_> + + 0 -1 1095 1.7601120052859187e-03 + + 5.3532499074935913e-01 3.9734530448913574e-01 + <_> + + 0 -1 1096 -8.1581249833106995e-04 + + 3.7602680921554565e-01 5.2647268772125244e-01 + <_> + + 0 -1 1097 -3.8687589112669230e-03 + + 6.3099128007888794e-01 4.7498199343681335e-01 + <_> + + 0 -1 1098 1.5207129763439298e-03 + + 5.2301818132400513e-01 3.3612239360809326e-01 + <_> + + 0 -1 1099 5.4586738348007202e-01 + + 5.1671397686004639e-01 1.1726350337266922e-01 + <_> + + 0 -1 1100 1.5650190412998199e-02 + + 4.9794390797615051e-01 1.3932949304580688e-01 + <_> + + 0 -1 1101 -1.1731860227882862e-02 + + 7.1296507120132446e-01 4.9211961030960083e-01 + <_> + + 0 -1 1102 -6.1765122227370739e-03 + + 2.2881029546260834e-01 5.0497019290924072e-01 + <_> + + 0 -1 1103 2.2457661107182503e-03 + + 4.6324339509010315e-01 6.0487258434295654e-01 + <_> + + 0 -1 1104 -5.1915869116783142e-03 + + 6.4674210548400879e-01 4.6021929383277893e-01 + <_> + + 0 -1 1105 -2.3827880620956421e-02 + + 1.4820009469985962e-01 5.2260792255401611e-01 + <_> + + 0 -1 1106 1.0284580057486892e-03 + + 5.1354891061782837e-01 3.3759570121765137e-01 + <_> + + 0 -1 1107 -1.0078850202262402e-02 + + 2.7405610680580139e-01 5.3035670518875122e-01 + <_> + + 0 -1 1108 2.6168930344283581e-03 + + 5.3326708078384399e-01 3.9724540710449219e-01 + <_> + + 0 -1 1109 5.4385367548093200e-04 + + 5.3656041622161865e-01 4.0634119510650635e-01 + <_> + + 0 -1 1110 5.3510512225329876e-03 + + 4.6537590026855469e-01 6.8890458345413208e-01 + <_> + + 0 -1 1111 -1.5274790348485112e-03 + + 5.4495012760162354e-01 3.6247238516807556e-01 + <_> + + 0 -1 1112 -8.0624416470527649e-02 + + 1.6560870409011841e-01 5.0002872943878174e-01 + <_> + + 0 -1 1113 2.2192029282450676e-02 + + 5.1327311992645264e-01 2.0028080046176910e-01 + <_> + + 0 -1 1114 7.3100631125271320e-03 + + 4.6179479360580444e-01 6.3665360212326050e-01 + <_> + + 0 -1 1115 -6.4063072204589844e-03 + + 5.9162509441375732e-01 4.8678609728813171e-01 + <_> + + 0 -1 1116 -7.6415040530264378e-04 + + 3.8884091377258301e-01 5.3157979249954224e-01 + <_> + + 0 -1 1117 7.6734489994123578e-04 + + 4.1590648889541626e-01 5.6052798032760620e-01 + <_> + + 0 -1 1118 6.1474501853808761e-04 + + 3.0890220403671265e-01 5.1201480627059937e-01 + <_> + + 0 -1 1119 -5.0105270929634571e-03 + + 3.9721998572349548e-01 5.2073061466217041e-01 + <_> + + 0 -1 1120 -8.6909132078289986e-03 + + 6.2574082612991333e-01 4.6085759997367859e-01 + <_> + + 0 -1 1121 -1.6391459852457047e-02 + + 2.0852099359035492e-01 5.2422660589218140e-01 + <_> + + 0 -1 1122 4.0973909199237823e-04 + + 5.2224272489547729e-01 3.7803208827972412e-01 + <_> + + 0 -1 1123 -2.5242289993911982e-03 + + 5.8039271831512451e-01 4.6118900179862976e-01 + <_> + + 0 -1 1124 5.0945312250405550e-04 + + 4.4012719392776489e-01 5.8460158109664917e-01 + <_> + + 0 -1 1125 1.9656419754028320e-03 + + 5.3223252296447754e-01 4.1845908761024475e-01 + <_> + + 0 -1 1126 5.6298897834494710e-04 + + 3.7418448925018311e-01 5.2345657348632812e-01 + <_> + + 0 -1 1127 -6.7946797935292125e-04 + + 4.6310418844223022e-01 5.3564780950546265e-01 + <_> + + 0 -1 1128 7.2856349870562553e-03 + + 5.0446701049804688e-01 2.3775640130043030e-01 + <_> + + 0 -1 1129 -1.7459489405155182e-02 + + 7.2891211509704590e-01 5.0504350662231445e-01 + <_> + + 0 -1 1130 -2.5421749800443649e-02 + + 6.6671347618103027e-01 4.6781000494956970e-01 + <_> + + 0 -1 1131 -1.5647639520466328e-03 + + 4.3917590379714966e-01 5.3236269950866699e-01 + <_> + + 0 -1 1132 1.1444360017776489e-02 + + 4.3464401364326477e-01 5.6800121068954468e-01 + <_> + + 0 -1 1133 -6.7352550104260445e-04 + + 4.4771409034729004e-01 5.2968120574951172e-01 + <_> + + 0 -1 1134 9.3194209039211273e-03 + + 4.7402000427246094e-01 7.4626070261001587e-01 + <_> + + 0 -1 1135 1.3328490604180843e-04 + + 5.3650617599487305e-01 4.7521349787712097e-01 + <_> + + 0 -1 1136 -7.8815799206495285e-03 + + 1.7522190511226654e-01 5.0152552127838135e-01 + <_> + + 0 -1 1137 -5.7985680177807808e-03 + + 7.2712367773056030e-01 4.8962008953094482e-01 + <_> + + 0 -1 1138 -3.8922499516047537e-04 + + 4.0039089322090149e-01 5.3449410200119019e-01 + <_> + + 0 -1 1139 -1.9288610201328993e-03 + + 5.6056129932403564e-01 4.8039558529853821e-01 + <_> + + 0 -1 1140 8.4214154630899429e-03 + + 4.7532469034194946e-01 7.6236087083816528e-01 + <_> + + 0 -1 1141 8.1655876711010933e-03 + + 5.3932619094848633e-01 4.1916438937187195e-01 + <_> + + 0 -1 1142 4.8280550981871784e-04 + + 4.2408001422882080e-01 5.3998219966888428e-01 + <_> + + 0 -1 1143 -2.7186630759388208e-03 + + 4.2445999383926392e-01 5.4249238967895508e-01 + <_> + + 0 -1 1144 -1.2507230043411255e-02 + + 5.8958417177200317e-01 4.5504111051559448e-01 + <_> + + 0 -1 1145 -2.4286519736051559e-02 + + 2.6471349596977234e-01 5.1891797780990601e-01 + <_> + + 0 -1 1146 -2.9676330741494894e-03 + + 7.3476827144622803e-01 4.7497498989105225e-01 + <_> + + 0 -1 1147 -1.2528999708592892e-02 + + 2.7560499310493469e-01 5.1775997877120972e-01 + <_> + + 0 -1 1148 -1.0104000102728605e-03 + + 3.5105609893798828e-01 5.1447242498397827e-01 + <_> + + 0 -1 1149 -2.1348530426621437e-03 + + 5.6379258632659912e-01 4.6673199534416199e-01 + <_> + + 0 -1 1150 1.9564259797334671e-02 + + 4.6145731210708618e-01 6.1376398801803589e-01 + <_> + + 0 -1 1151 -9.7146347165107727e-02 + + 2.9983788728713989e-01 5.1935559511184692e-01 + <_> + + 0 -1 1152 4.5014568604528904e-03 + + 5.0778847932815552e-01 3.0457559227943420e-01 + <_> + + 0 -1 1153 6.3706971704959869e-03 + + 4.8610189557075500e-01 6.8875008821487427e-01 + <_> + + 0 -1 1154 -9.0721528977155685e-03 + + 1.6733959317207336e-01 5.0175631046295166e-01 + <_> + + 0 -1 1155 -5.3537208586931229e-03 + + 2.6927569508552551e-01 5.2426332235336304e-01 + <_> + + 0 -1 1156 -1.0932840406894684e-02 + + 7.1838641166687012e-01 4.7360289096832275e-01 + <_> + + 0 -1 1157 8.2356072962284088e-03 + + 5.2239668369293213e-01 2.3898629844188690e-01 + <_> + + 0 -1 1158 -1.0038160253316164e-03 + + 5.7193559408187866e-01 4.4339430332183838e-01 + <_> + + 0 -1 1159 4.0859128348529339e-03 + + 5.4728418588638306e-01 4.1488361358642578e-01 + <_> + + 0 -1 1160 1.5485419332981110e-01 + + 4.9738121032714844e-01 6.1061598360538483e-02 + <_> + + 0 -1 1161 2.0897459762636572e-04 + + 4.7091740369796753e-01 5.4238891601562500e-01 + <_> + + 0 -1 1162 3.3316991175524890e-04 + + 4.0896269679069519e-01 5.3009921312332153e-01 + <_> + + 0 -1 1163 -1.0813400149345398e-02 + + 6.1043697595596313e-01 4.9573341012001038e-01 + <_> + + 0 -1 1164 4.5656010508537292e-02 + + 5.0696891546249390e-01 2.8666600584983826e-01 + <_> + + 0 -1 1165 1.2569549726322293e-03 + + 4.8469170928001404e-01 6.3181710243225098e-01 + <_> + + 0 -1 1166 -1.2015070021152496e-01 + + 6.0526140034198761e-02 4.9809598922729492e-01 + <_> + + 0 -1 1167 -1.0533799650147557e-04 + + 5.3631097078323364e-01 4.7080421447753906e-01 + <_> + + 0 -1 1168 -2.0703190565109253e-01 + + 5.9660330414772034e-02 4.9790981411933899e-01 + <_> + + 0 -1 1169 1.2909180077258497e-04 + + 4.7129771113395691e-01 5.3779977560043335e-01 + <_> + + 0 -1 1170 3.8818528992123902e-04 + + 4.3635380268096924e-01 5.5341911315917969e-01 + <_> + + 0 -1 1171 -2.9243610333651304e-03 + + 5.8111858367919922e-01 4.8252159357070923e-01 + <_> + + 0 -1 1172 8.3882332546636462e-04 + + 5.3117001056671143e-01 4.0381389856338501e-01 + <_> + + 0 -1 1173 -1.9061550265178084e-03 + + 3.7707018852233887e-01 5.2600151300430298e-01 + <_> + + 0 -1 1174 8.9514348655939102e-03 + + 4.7661679983139038e-01 7.6821839809417725e-01 + <_> + + 0 -1 1175 1.3083459809422493e-02 + + 5.2644628286361694e-01 3.0622220039367676e-01 + <_> + + 0 -1 1176 -2.1159330010414124e-01 + + 6.7371982336044312e-01 4.6958100795745850e-01 + <_> + + 0 -1 1177 3.1493250280618668e-03 + + 5.6448352336883545e-01 4.3869531154632568e-01 + <_> + + 0 -1 1178 3.9754100725986063e-04 + + 4.5260611176490784e-01 5.8956301212310791e-01 + <_> + + 0 -1 1179 -1.3814480043947697e-03 + + 6.0705822706222534e-01 4.9424138665199280e-01 + <_> + + 0 -1 1180 -5.8122188784182072e-04 + + 5.9982132911682129e-01 4.5082521438598633e-01 + <_> + + 0 -1 1181 -2.3905329871922731e-03 + + 4.2055889964103699e-01 5.2238482236862183e-01 + <_> + + 0 -1 1182 2.7268929407000542e-02 + + 5.2064472436904907e-01 3.5633018612861633e-01 + <_> + + 0 -1 1183 -3.7658358924090862e-03 + + 3.1447041034698486e-01 5.2188140153884888e-01 + <_> + + 0 -1 1184 -1.4903489500284195e-03 + + 3.3801960945129395e-01 5.1244372129440308e-01 + <_> + + 0 -1 1185 -1.7428230494260788e-02 + + 5.8299607038497925e-01 4.9197259545326233e-01 + <_> + + 0 -1 1186 -1.5278030186891556e-02 + + 6.1631447076797485e-01 4.6178871393203735e-01 + <_> + + 0 -1 1187 3.1995609402656555e-02 + + 5.1663571596145630e-01 1.7127640545368195e-01 + <_> + + 0 -1 1188 -3.8256710395216942e-03 + + 3.4080120921134949e-01 5.1313877105712891e-01 + <_> + + 0 -1 1189 -8.5186436772346497e-03 + + 6.1055189371109009e-01 4.9979418516159058e-01 + <_> + + 0 -1 1190 9.0641621500253677e-04 + + 4.3272709846496582e-01 5.5823111534118652e-01 + <_> + + 0 -1 1191 1.0344849899411201e-02 + + 4.8556530475616455e-01 5.4524201154708862e-01 + <_> + 160 + 7.9249076843261719e+01 + + <_> + + 0 -1 1192 7.8981826081871986e-03 + + 3.3325248956680298e-01 5.9464621543884277e-01 + <_> + + 0 -1 1193 1.6170160379260778e-03 + + 3.4906411170959473e-01 5.5778688192367554e-01 + <_> + + 0 -1 1194 -5.5449741194024682e-04 + + 5.5425661802291870e-01 3.2915300130844116e-01 + <_> + + 0 -1 1195 1.5428980113938451e-03 + + 3.6125791072845459e-01 5.5459791421890259e-01 + <_> + + 0 -1 1196 -1.0329450014978647e-03 + + 3.5301390290260315e-01 5.5761402845382690e-01 + <_> + + 0 -1 1197 7.7698158565908670e-04 + + 3.9167788624763489e-01 5.6453210115432739e-01 + <_> + + 0 -1 1198 1.4320300519466400e-01 + + 4.6674820780754089e-01 7.0236331224441528e-01 + <_> + + 0 -1 1199 -7.3866490274667740e-03 + + 3.0736848711967468e-01 5.2892577648162842e-01 + <_> + + 0 -1 1200 -6.2936742324382067e-04 + + 5.6221181154251099e-01 4.0370491147041321e-01 + <_> + + 0 -1 1201 7.8893528552725911e-04 + + 5.2676612138748169e-01 3.5578748583793640e-01 + <_> + + 0 -1 1202 -1.2228050269186497e-02 + + 6.6683208942413330e-01 4.6255499124526978e-01 + <_> + + 0 -1 1203 3.5420239437371492e-03 + + 5.5214381217956543e-01 3.8696730136871338e-01 + <_> + + 0 -1 1204 -1.0585320414975286e-03 + + 3.6286780238151550e-01 5.3209269046783447e-01 + <_> + + 0 -1 1205 1.4935660146875307e-05 + + 4.6324449777603149e-01 5.3633230924606323e-01 + <_> + + 0 -1 1206 5.2537708543241024e-03 + + 5.1322317123413086e-01 3.2657089829444885e-01 + <_> + + 0 -1 1207 -8.2338023930788040e-03 + + 6.6936898231506348e-01 4.7741401195526123e-01 + <_> + + 0 -1 1208 2.1866810129722580e-05 + + 4.0538620948791504e-01 5.4579311609268188e-01 + <_> + + 0 -1 1209 -3.8150229956954718e-03 + + 6.4549958705902100e-01 4.7931781411170959e-01 + <_> + + 0 -1 1210 1.1105879675596952e-03 + + 5.2704071998596191e-01 3.5296788811683655e-01 + <_> + + 0 -1 1211 -5.7707689702510834e-03 + + 3.8035470247268677e-01 5.3529578447341919e-01 + <_> + + 0 -1 1212 -3.0158339068293571e-03 + + 5.3394031524658203e-01 3.8871330022811890e-01 + <_> + + 0 -1 1213 -8.5453689098358154e-04 + + 3.5646161437034607e-01 5.2736037969589233e-01 + <_> + + 0 -1 1214 1.1050510220229626e-02 + + 4.6719071269035339e-01 6.8497377634048462e-01 + <_> + + 0 -1 1215 4.2605839669704437e-02 + + 5.1514732837677002e-01 7.0220090448856354e-02 + <_> + + 0 -1 1216 -3.0781750101596117e-03 + + 3.0416610836982727e-01 5.1526021957397461e-01 + <_> + + 0 -1 1217 -5.4815728217363358e-03 + + 6.4302957057952881e-01 4.8972299695014954e-01 + <_> + + 0 -1 1218 3.1881860923022032e-03 + + 5.3074932098388672e-01 3.8262099027633667e-01 + <_> + + 0 -1 1219 3.5947180003859103e-04 + + 4.6500471234321594e-01 5.4219049215316772e-01 + <_> + + 0 -1 1220 -4.0705031715333462e-03 + + 2.8496798872947693e-01 5.0791162252426147e-01 + <_> + + 0 -1 1221 -1.4594170264899731e-02 + + 2.9716458916664124e-01 5.1284617185592651e-01 + <_> + + 0 -1 1222 -1.1947689927183092e-04 + + 5.6310981512069702e-01 4.3430820107460022e-01 + <_> + + 0 -1 1223 -6.9344649091362953e-04 + + 4.4035780429840088e-01 5.3599590063095093e-01 + <_> + + 0 -1 1224 1.4834799912932795e-05 + + 3.4210088849067688e-01 5.1646977663040161e-01 + <_> + + 0 -1 1225 9.0296985581517220e-03 + + 4.6393430233001709e-01 6.1140751838684082e-01 + <_> + + 0 -1 1226 -8.0640818923711777e-03 + + 2.8201588988304138e-01 5.0754940509796143e-01 + <_> + + 0 -1 1227 2.6062119752168655e-02 + + 5.2089059352874756e-01 2.6887780427932739e-01 + <_> + + 0 -1 1228 1.7314659431576729e-02 + + 4.6637138724327087e-01 6.7385399341583252e-01 + <_> + + 0 -1 1229 2.2666640579700470e-02 + + 5.2093499898910522e-01 2.2127239406108856e-01 + <_> + + 0 -1 1230 -2.1965929772704840e-03 + + 6.0631012916564941e-01 4.5381900668144226e-01 + <_> + + 0 -1 1231 -9.5282476395368576e-03 + + 4.6352049708366394e-01 5.2474308013916016e-01 + <_> + + 0 -1 1232 8.0943619832396507e-03 + + 5.2894401550292969e-01 3.9138820767402649e-01 + <_> + + 0 -1 1233 -7.2877332568168640e-02 + + 7.7520018815994263e-01 4.9902349710464478e-01 + <_> + + 0 -1 1234 -6.9009521976113319e-03 + + 2.4280390143394470e-01 5.0480902194976807e-01 + <_> + + 0 -1 1235 -1.1308239772915840e-02 + + 5.7343649864196777e-01 4.8423761129379272e-01 + <_> + + 0 -1 1236 5.9613201767206192e-02 + + 5.0298362970352173e-01 2.5249770283699036e-01 + <_> + + 0 -1 1237 -2.8624620754271746e-03 + + 6.0730451345443726e-01 4.8984599113464355e-01 + <_> + + 0 -1 1238 4.4781449250876904e-03 + + 5.0152891874313354e-01 2.2203169763088226e-01 + <_> + + 0 -1 1239 -1.7513240454718471e-03 + + 6.6144287586212158e-01 4.9338689446449280e-01 + <_> + + 0 -1 1240 4.0163420140743256e-02 + + 5.1808780431747437e-01 3.7410449981689453e-01 + <_> + + 0 -1 1241 3.4768949262797832e-04 + + 4.7204169631004333e-01 5.8180320262908936e-01 + <_> + + 0 -1 1242 2.6551650371402502e-03 + + 3.8050109148025513e-01 5.2213358879089355e-01 + <_> + + 0 -1 1243 -8.7706279009580612e-03 + + 2.9441660642623901e-01 5.2312952280044556e-01 + <_> + + 0 -1 1244 -5.5122091434895992e-03 + + 7.3461771011352539e-01 4.7228169441223145e-01 + <_> + + 0 -1 1245 6.8672042107209563e-04 + + 5.4528760910034180e-01 4.2424130439758301e-01 + <_> + + 0 -1 1246 5.6019669864326715e-04 + + 4.3988621234893799e-01 5.6012850999832153e-01 + <_> + + 0 -1 1247 2.4143769405782223e-03 + + 4.7416868805885315e-01 6.1366218328475952e-01 + <_> + + 0 -1 1248 -1.5680900542065501e-03 + + 6.0445529222488403e-01 4.5164099335670471e-01 + <_> + + 0 -1 1249 -3.6827491130679846e-03 + + 2.4524590373039246e-01 5.2949821949005127e-01 + <_> + + 0 -1 1250 -2.9409190756268799e-04 + + 3.7328380346298218e-01 5.2514511346817017e-01 + <_> + + 0 -1 1251 4.2847759323194623e-04 + + 5.4988098144531250e-01 4.0655350685119629e-01 + <_> + + 0 -1 1252 -4.8817070201039314e-03 + + 2.1399089694023132e-01 4.9999570846557617e-01 + <_> + + 0 -1 1253 2.7272020815871656e-04 + + 4.6502870321273804e-01 5.8134287595748901e-01 + <_> + + 0 -1 1254 2.0947199664078653e-04 + + 4.3874868750572205e-01 5.5727928876876831e-01 + <_> + + 0 -1 1255 4.8501189798116684e-02 + + 5.2449727058410645e-01 3.2128891348838806e-01 + <_> + + 0 -1 1256 -4.5166411437094212e-03 + + 6.0568130016326904e-01 4.5458820462226868e-01 + <_> + + 0 -1 1257 -1.2291680090129375e-02 + + 2.0409290492534637e-01 5.1522141695022583e-01 + <_> + + 0 -1 1258 4.8549679922871292e-04 + + 5.2376049757003784e-01 3.7395030260086060e-01 + <_> + + 0 -1 1259 3.0556049197912216e-02 + + 4.9605339765548706e-01 5.9382462501525879e-01 + <_> + + 0 -1 1260 -1.5105320198927075e-04 + + 5.3513038158416748e-01 4.1452041268348694e-01 + <_> + + 0 -1 1261 2.4937440175563097e-03 + + 4.6933668851852417e-01 5.5149412155151367e-01 + <_> + + 0 -1 1262 -1.2382130138576031e-02 + + 6.7913967370986938e-01 4.6816679835319519e-01 + <_> + + 0 -1 1263 -5.1333461888134480e-03 + + 3.6087390780448914e-01 5.2291601896286011e-01 + <_> + + 0 -1 1264 5.1919277757406235e-04 + + 5.3000730276107788e-01 3.6336138844490051e-01 + <_> + + 0 -1 1265 1.5060420334339142e-01 + + 5.1573169231414795e-01 2.2117820382118225e-01 + <_> + + 0 -1 1266 7.7144149690866470e-03 + + 4.4104969501495361e-01 5.7766091823577881e-01 + <_> + + 0 -1 1267 9.4443522393703461e-03 + + 5.4018551111221313e-01 3.7566500902175903e-01 + <_> + + 0 -1 1268 2.5006249779835343e-04 + + 4.3682709336280823e-01 5.6073749065399170e-01 + <_> + + 0 -1 1269 -3.3077150583267212e-03 + + 4.2447990179061890e-01 5.5182307958602905e-01 + <_> + + 0 -1 1270 7.4048910755664110e-04 + + 4.4969621300697327e-01 5.9005767107009888e-01 + <_> + + 0 -1 1271 4.4092051684856415e-02 + + 5.2934932708740234e-01 3.1563550233840942e-01 + <_> + + 0 -1 1272 3.3639909233897924e-03 + + 4.4832968711853027e-01 5.8486622571945190e-01 + <_> + + 0 -1 1273 -3.9760079234838486e-03 + + 4.5595070719718933e-01 5.4836392402648926e-01 + <_> + + 0 -1 1274 2.7716930489987135e-03 + + 5.3417861461639404e-01 3.7924841046333313e-01 + <_> + + 0 -1 1275 -2.4123019829858094e-04 + + 5.6671887636184692e-01 4.5769730210304260e-01 + <_> + + 0 -1 1276 4.9425667384639382e-04 + + 4.4212448596954346e-01 5.6287872791290283e-01 + <_> + + 0 -1 1277 -3.8876468897797167e-04 + + 4.2883709073066711e-01 5.3910630941390991e-01 + <_> + + 0 -1 1278 -5.0048898905515671e-02 + + 6.8995130062103271e-01 4.7037428617477417e-01 + <_> + + 0 -1 1279 -3.6635480821132660e-02 + + 2.2177790105342865e-01 5.1918262243270874e-01 + <_> + + 0 -1 1280 2.4273579474538565e-03 + + 5.1362240314483643e-01 3.4973978996276855e-01 + <_> + + 0 -1 1281 1.9558030180633068e-03 + + 4.8261928558349609e-01 6.4083808660507202e-01 + <_> + + 0 -1 1282 -1.7494610510766506e-03 + + 3.9228358864784241e-01 5.2726852893829346e-01 + <_> + + 0 -1 1283 1.3955079950392246e-02 + + 5.0782018899917603e-01 8.4165048599243164e-01 + <_> + + 0 -1 1284 -2.1896739781368524e-04 + + 5.5204898118972778e-01 4.3142348527908325e-01 + <_> + + 0 -1 1285 -1.5131309628486633e-03 + + 3.9346051216125488e-01 5.3825712203979492e-01 + <_> + + 0 -1 1286 -4.3622800149023533e-03 + + 7.3706287145614624e-01 4.7364759445190430e-01 + <_> + + 0 -1 1287 6.5160587430000305e-02 + + 5.1592797040939331e-01 3.2815951108932495e-01 + <_> + + 0 -1 1288 -2.3567399475723505e-03 + + 3.6728268861770630e-01 5.1728862524032593e-01 + <_> + + 0 -1 1289 1.5146659687161446e-02 + + 5.0314939022064209e-01 6.6876041889190674e-01 + <_> + + 0 -1 1290 -2.2850960493087769e-02 + + 6.7675197124481201e-01 4.7095969319343567e-01 + <_> + + 0 -1 1291 4.8867650330066681e-03 + + 5.2579981088638306e-01 4.0598788857460022e-01 + <_> + + 0 -1 1292 1.7619599821045995e-03 + + 4.6962729096412659e-01 6.6882789134979248e-01 + <_> + + 0 -1 1293 -1.2942519970238209e-03 + + 4.3207129836082458e-01 5.3442817926406860e-01 + <_> + + 0 -1 1294 1.0929949581623077e-02 + + 4.9977061152458191e-01 1.6374860703945160e-01 + <_> + + 0 -1 1295 2.9958489903947338e-05 + + 4.2824178934097290e-01 5.6332242488861084e-01 + <_> + + 0 -1 1296 -6.5884361974895000e-03 + + 6.7721211910247803e-01 4.7005268931388855e-01 + <_> + + 0 -1 1297 3.2527779694646597e-03 + + 5.3133970499038696e-01 4.5361489057540894e-01 + <_> + + 0 -1 1298 -4.0435739792883396e-03 + + 5.6600618362426758e-01 4.4133889675140381e-01 + <_> + + 0 -1 1299 -1.2523540062829852e-03 + + 3.7319138646125793e-01 5.3564518690109253e-01 + <_> + + 0 -1 1300 1.9246719602961093e-04 + + 5.1899862289428711e-01 3.7388110160827637e-01 + <_> + + 0 -1 1301 -3.8589671254158020e-02 + + 2.9563739895820618e-01 5.1888108253479004e-01 + <_> + + 0 -1 1302 1.5489870565943420e-04 + + 4.3471351265907288e-01 5.5095332860946655e-01 + <_> + + 0 -1 1303 -3.3763848245143890e-02 + + 3.2303300499916077e-01 5.1954758167266846e-01 + <_> + + 0 -1 1304 -8.2657067105174065e-03 + + 5.9754890203475952e-01 4.5521140098571777e-01 + <_> + + 0 -1 1305 1.4481440302915871e-05 + + 4.7456780076026917e-01 5.4974269866943359e-01 + <_> + + 0 -1 1306 1.4951299817766994e-05 + + 4.3244731426239014e-01 5.4806441068649292e-01 + <_> + + 0 -1 1307 -1.8741799518465996e-02 + + 1.5800529718399048e-01 5.1785331964492798e-01 + <_> + + 0 -1 1308 1.7572239739820361e-03 + + 4.5176368951797485e-01 5.7737642526626587e-01 + <_> + + 0 -1 1309 -3.1391119118779898e-03 + + 4.1496479511260986e-01 5.4608422517776489e-01 + <_> + + 0 -1 1310 6.6656779381446540e-05 + + 4.0390908718109131e-01 5.2930849790573120e-01 + <_> + + 0 -1 1311 6.7743421532213688e-03 + + 4.7676518559455872e-01 6.1219561100006104e-01 + <_> + + 0 -1 1312 -7.3868161998689175e-03 + + 3.5862588882446289e-01 5.1872807741165161e-01 + <_> + + 0 -1 1313 1.4040930196642876e-02 + + 4.7121399641036987e-01 5.5761557817459106e-01 + <_> + + 0 -1 1314 -5.5258329957723618e-03 + + 2.6610270142555237e-01 5.0392812490463257e-01 + <_> + + 0 -1 1315 3.8684239983558655e-01 + + 5.1443397998809814e-01 2.5258991122245789e-01 + <_> + + 0 -1 1316 1.1459240340627730e-04 + + 4.2849949002265930e-01 5.4233711957931519e-01 + <_> + + 0 -1 1317 -1.8467569723725319e-02 + + 3.8858351111412048e-01 5.2130621671676636e-01 + <_> + + 0 -1 1318 -4.5907011372037232e-04 + + 5.4125630855560303e-01 4.2359098792076111e-01 + <_> + + 0 -1 1319 1.2527540093287826e-03 + + 4.8993051052093506e-01 6.6240912675857544e-01 + <_> + + 0 -1 1320 1.4910609461367130e-03 + + 5.2867782115936279e-01 4.0400519967079163e-01 + <_> + + 0 -1 1321 -7.5435562757775187e-04 + + 6.0329902172088623e-01 4.7951200604438782e-01 + <_> + + 0 -1 1322 -6.9478838704526424e-03 + + 4.0844011306762695e-01 5.3735041618347168e-01 + <_> + + 0 -1 1323 2.8092920547351241e-04 + + 4.8460629582405090e-01 5.7593822479248047e-01 + <_> + + 0 -1 1324 9.6073717577382922e-04 + + 5.1647412776947021e-01 3.5549798607826233e-01 + <_> + + 0 -1 1325 -2.6883929967880249e-04 + + 5.6775820255279541e-01 4.7317659854888916e-01 + <_> + + 0 -1 1326 2.1599370520561934e-03 + + 4.7314870357513428e-01 7.0705670118331909e-01 + <_> + + 0 -1 1327 5.6235301308333874e-03 + + 5.2402430772781372e-01 2.7817919850349426e-01 + <_> + + 0 -1 1328 -5.0243991427123547e-03 + + 2.8370139002799988e-01 5.0623041391372681e-01 + <_> + + 0 -1 1329 -9.7611639648675919e-03 + + 7.4007177352905273e-01 4.9345690011978149e-01 + <_> + + 0 -1 1330 4.1515100747346878e-03 + + 5.1191312074661255e-01 3.4070080518722534e-01 + <_> + + 0 -1 1331 6.2465080991387367e-03 + + 4.9237880110740662e-01 6.5790587663650513e-01 + <_> + + 0 -1 1332 -7.0597478188574314e-03 + + 2.4347110092639923e-01 5.0328421592712402e-01 + <_> + + 0 -1 1333 -2.0587709732353687e-03 + + 5.9003108739852905e-01 4.6950870752334595e-01 + <_> + + 0 -1 1334 -2.4146060459315777e-03 + + 3.6473178863525391e-01 5.1892018318176270e-01 + <_> + + 0 -1 1335 -1.4817609917372465e-03 + + 6.0349482297897339e-01 4.9401280283927917e-01 + <_> + + 0 -1 1336 -6.3016400672495365e-03 + + 5.8189898729324341e-01 4.5604279637336731e-01 + <_> + + 0 -1 1337 3.4763428848236799e-03 + + 5.2174758911132812e-01 3.4839931130409241e-01 + <_> + + 0 -1 1338 -2.2250870242714882e-02 + + 2.3607000708580017e-01 5.0320827960968018e-01 + <_> + + 0 -1 1339 -3.0612550675868988e-02 + + 6.4991867542266846e-01 4.9149191379547119e-01 + <_> + + 0 -1 1340 1.3057479634881020e-02 + + 4.4133231043815613e-01 5.6837642192840576e-01 + <_> + + 0 -1 1341 -6.0095742810517550e-04 + + 4.3597310781478882e-01 5.3334832191467285e-01 + <_> + + 0 -1 1342 -4.1514250915497541e-04 + + 5.5040627717971802e-01 4.3260601162910461e-01 + <_> + + 0 -1 1343 -1.3776290230453014e-02 + + 4.0641129016876221e-01 5.2015489339828491e-01 + <_> + + 0 -1 1344 -3.2296508550643921e-02 + + 4.7351971268653870e-02 4.9771949648857117e-01 + <_> + + 0 -1 1345 5.3556978702545166e-02 + + 4.8817330598831177e-01 6.6669392585754395e-01 + <_> + + 0 -1 1346 8.1889545544981956e-03 + + 5.4000371694564819e-01 4.2408201098442078e-01 + <_> + + 0 -1 1347 2.1055320394225419e-04 + + 4.8020479083061218e-01 5.5638527870178223e-01 + <_> + + 0 -1 1348 -2.4382730480283499e-03 + + 7.3877930641174316e-01 4.7736850380897522e-01 + <_> + + 0 -1 1349 3.2835570164024830e-03 + + 5.2885460853576660e-01 3.1712919473648071e-01 + <_> + + 0 -1 1350 2.3729570675641298e-03 + + 4.7508129477500916e-01 7.0601707696914673e-01 + <_> + + 0 -1 1351 -1.4541699783876538e-03 + + 3.8117301464080811e-01 5.3307390213012695e-01 + <_> + 177 + 8.7696029663085938e+01 + + <_> + + 0 -1 1352 5.5755238980054855e-02 + + 4.0191569924354553e-01 6.8060368299484253e-01 + <_> + + 0 -1 1353 2.4730248842388391e-03 + + 3.3511489629745483e-01 5.9657198190689087e-01 + <_> + + 0 -1 1354 -3.5031698644161224e-04 + + 5.5577081441879272e-01 3.4822869300842285e-01 + <_> + + 0 -1 1355 5.4167630150914192e-04 + + 4.2608588933944702e-01 5.6933808326721191e-01 + <_> + + 0 -1 1356 7.7193678589537740e-04 + + 3.4942400455474854e-01 5.4336887598037720e-01 + <_> + + 0 -1 1357 -1.5999219613149762e-03 + + 4.0284991264343262e-01 5.4843592643737793e-01 + <_> + + 0 -1 1358 -1.1832080053864047e-04 + + 3.8069018721580505e-01 5.4254651069641113e-01 + <_> + + 0 -1 1359 3.2909031142480671e-04 + + 2.6201000809669495e-01 5.4295217990875244e-01 + <_> + + 0 -1 1360 2.9518108931370080e-04 + + 3.7997689843177795e-01 5.3992640972137451e-01 + <_> + + 0 -1 1361 9.0466710389591753e-05 + + 4.4336450099945068e-01 5.4402261972427368e-01 + <_> + + 0 -1 1362 1.5007190086180344e-05 + + 3.7196549773216248e-01 5.4091197252273560e-01 + <_> + + 0 -1 1363 1.3935610651969910e-01 + + 5.5253958702087402e-01 4.4790428876876831e-01 + <_> + + 0 -1 1364 1.6461990308016539e-03 + + 4.2645010352134705e-01 5.7721698284149170e-01 + <_> + + 0 -1 1365 4.9984431825578213e-04 + + 4.3595260381698608e-01 5.6858712434768677e-01 + <_> + + 0 -1 1366 -1.0971280280500650e-03 + + 3.3901369571685791e-01 5.2054089307785034e-01 + <_> + + 0 -1 1367 6.6919892560690641e-04 + + 4.5574560761451721e-01 5.9806597232818604e-01 + <_> + + 0 -1 1368 8.6471042595803738e-04 + + 5.1348412036895752e-01 2.9440331459045410e-01 + <_> + + 0 -1 1369 -2.7182599296793342e-04 + + 3.9065781235694885e-01 5.3771811723709106e-01 + <_> + + 0 -1 1370 3.0249499104684219e-05 + + 3.6796098947525024e-01 5.2256888151168823e-01 + <_> + + 0 -1 1371 -8.5225896909832954e-03 + + 7.2931021451950073e-01 4.8923650383949280e-01 + <_> + + 0 -1 1372 1.6705560265108943e-03 + + 4.3453249335289001e-01 5.6961381435394287e-01 + <_> + + 0 -1 1373 -7.1433838456869125e-03 + + 2.5912800431251526e-01 5.2256238460540771e-01 + <_> + + 0 -1 1374 -1.6319369897246361e-02 + + 6.9222790002822876e-01 4.6515759825706482e-01 + <_> + + 0 -1 1375 4.8034260980784893e-03 + + 5.3522628545761108e-01 3.2863029837608337e-01 + <_> + + 0 -1 1376 -7.5421929359436035e-03 + + 2.0405440032482147e-01 5.0345462560653687e-01 + <_> + + 0 -1 1377 -1.4363110065460205e-02 + + 6.8048888444900513e-01 4.8890590667724609e-01 + <_> + + 0 -1 1378 8.9063588529825211e-04 + + 5.3106957674026489e-01 3.8954809308052063e-01 + <_> + + 0 -1 1379 -4.4060191139578819e-03 + + 5.7415628433227539e-01 4.3724268674850464e-01 + <_> + + 0 -1 1380 -1.8862540309783071e-04 + + 2.8317859768867493e-01 5.0982052087783813e-01 + <_> + + 0 -1 1381 -3.7979281041771173e-03 + + 3.3725079894065857e-01 5.2465802431106567e-01 + <_> + + 0 -1 1382 1.4627049677073956e-04 + + 5.3066742420196533e-01 3.9117100834846497e-01 + <_> + + 0 -1 1383 -4.9164638767251745e-05 + + 5.4624962806701660e-01 3.9427208900451660e-01 + <_> + + 0 -1 1384 -3.3582501113414764e-02 + + 2.1578240394592285e-01 5.0482118129730225e-01 + <_> + + 0 -1 1385 -3.5339309833943844e-03 + + 6.4653122425079346e-01 4.8726969957351685e-01 + <_> + + 0 -1 1386 5.0144111737608910e-03 + + 4.6176680922508240e-01 6.2480747699737549e-01 + <_> + + 0 -1 1387 1.8817370757460594e-02 + + 5.2206891775131226e-01 2.0000520348548889e-01 + <_> + + 0 -1 1388 -1.3434339780360460e-03 + + 4.0145379304885864e-01 5.3016197681427002e-01 + <_> + + 0 -1 1389 1.7557960236445069e-03 + + 4.7940391302108765e-01 5.6531697511672974e-01 + <_> + + 0 -1 1390 -9.5637463033199310e-02 + + 2.0341950654983521e-01 5.0067067146301270e-01 + <_> + + 0 -1 1391 -2.2241229191422462e-02 + + 7.6724731922149658e-01 5.0463402271270752e-01 + <_> + + 0 -1 1392 -1.5575819648802280e-02 + + 7.4903422594070435e-01 4.7558510303497314e-01 + <_> + + 0 -1 1393 5.3599118255078793e-03 + + 5.3653037548065186e-01 4.0046709775924683e-01 + <_> + + 0 -1 1394 -2.1763499826192856e-02 + + 7.4015498161315918e-02 4.9641749262809753e-01 + <_> + + 0 -1 1395 -1.6561590135097504e-01 + + 2.8591030836105347e-01 5.2180862426757812e-01 + <_> + + 0 -1 1396 1.6461320046801120e-04 + + 4.1916158795356750e-01 5.3807932138442993e-01 + <_> + + 0 -1 1397 -8.9077502489089966e-03 + + 6.2731927633285522e-01 4.8774048686027527e-01 + <_> + + 0 -1 1398 8.6346449097618461e-04 + + 5.1599407196044922e-01 3.6710259318351746e-01 + <_> + + 0 -1 1399 -1.3751760125160217e-03 + + 5.8843767642974854e-01 4.5790839195251465e-01 + <_> + + 0 -1 1400 -1.4081239933148026e-03 + + 3.5605099797248840e-01 5.1399451494216919e-01 + <_> + + 0 -1 1401 -3.9342888630926609e-03 + + 5.9942889213562012e-01 4.6642720699310303e-01 + <_> + + 0 -1 1402 -3.1966928392648697e-02 + + 3.3454620838165283e-01 5.1441830396652222e-01 + <_> + + 0 -1 1403 -1.5089280168467667e-05 + + 5.5826562643051147e-01 4.4140571355819702e-01 + <_> + + 0 -1 1404 5.1994470413774252e-04 + + 4.6236801147460938e-01 6.1689937114715576e-01 + <_> + + 0 -1 1405 -3.4220460802316666e-03 + + 6.5570747852325439e-01 4.9748051166534424e-01 + <_> + + 0 -1 1406 1.7723299970384687e-04 + + 5.2695018053054810e-01 3.9019080996513367e-01 + <_> + + 0 -1 1407 1.5716759953647852e-03 + + 4.6333730220794678e-01 5.7904577255249023e-01 + <_> + + 0 -1 1408 -8.9041329920291901e-03 + + 2.6896080374717712e-01 5.0535911321640015e-01 + <_> + + 0 -1 1409 4.0677518700249493e-04 + + 5.4566031694412231e-01 4.3298989534378052e-01 + <_> + + 0 -1 1410 6.7604780197143555e-03 + + 4.6489939093589783e-01 6.6897618770599365e-01 + <_> + + 0 -1 1411 2.9100088868290186e-03 + + 5.3097039461135864e-01 3.3778399229049683e-01 + <_> + + 0 -1 1412 1.3885459629818797e-03 + + 4.0747389197349548e-01 5.3491330146789551e-01 + <_> + + 0 -1 1413 -7.6764263212680817e-02 + + 1.9921760261058807e-01 5.2282422780990601e-01 + <_> + + 0 -1 1414 -2.2688310127705336e-04 + + 5.4385018348693848e-01 4.2530721426010132e-01 + <_> + + 0 -1 1415 -6.3094152137637138e-03 + + 4.2591789364814758e-01 5.3789097070693970e-01 + <_> + + 0 -1 1416 -1.1007279902696609e-01 + + 6.9041568040847778e-01 4.7217491269111633e-01 + <_> + + 0 -1 1417 2.8619659133255482e-04 + + 4.5249149203300476e-01 5.5483061075210571e-01 + <_> + + 0 -1 1418 2.9425329557852820e-05 + + 5.3703737258911133e-01 4.2364639043807983e-01 + <_> + + 0 -1 1419 -2.4886570870876312e-02 + + 6.4235579967498779e-01 4.9693039059638977e-01 + <_> + + 0 -1 1420 3.3148851245641708e-02 + + 4.9884751439094543e-01 1.6138119995594025e-01 + <_> + + 0 -1 1421 7.8491691965609789e-04 + + 5.4160261154174805e-01 4.2230090498924255e-01 + <_> + + 0 -1 1422 4.7087189741432667e-03 + + 4.5763289928436279e-01 6.0275578498840332e-01 + <_> + + 0 -1 1423 2.4144479539245367e-03 + + 5.3089731931686401e-01 4.4224989414215088e-01 + <_> + + 0 -1 1424 1.9523180089890957e-03 + + 4.7056341171264648e-01 6.6633248329162598e-01 + <_> + + 0 -1 1425 1.3031980488449335e-03 + + 4.4061261415481567e-01 5.5269622802734375e-01 + <_> + + 0 -1 1426 4.4735497795045376e-03 + + 5.1290237903594971e-01 3.3014988899230957e-01 + <_> + + 0 -1 1427 -2.6652868837118149e-03 + + 3.1354710459709167e-01 5.1750361919403076e-01 + <_> + + 0 -1 1428 1.3666770246345550e-04 + + 4.1193708777427673e-01 5.3068768978118896e-01 + <_> + + 0 -1 1429 -1.7126450315117836e-02 + + 6.1778062582015991e-01 4.8365789651870728e-01 + <_> + + 0 -1 1430 -2.6601430727168918e-04 + + 3.6543309688568115e-01 5.1697367429733276e-01 + <_> + + 0 -1 1431 -2.2932380437850952e-02 + + 3.4909150004386902e-01 5.1639920473098755e-01 + <_> + + 0 -1 1432 2.3316550068557262e-03 + + 5.1662999391555786e-01 3.7093898653984070e-01 + <_> + + 0 -1 1433 1.6925660893321037e-02 + + 5.0147360563278198e-01 8.0539882183074951e-01 + <_> + + 0 -1 1434 -8.9858826249837875e-03 + + 6.4707887172698975e-01 4.6570208668708801e-01 + <_> + + 0 -1 1435 -1.1874699965119362e-02 + + 3.2463788986206055e-01 5.2587550878524780e-01 + <_> + + 0 -1 1436 1.9350569345988333e-04 + + 5.1919418573379517e-01 3.8396438956260681e-01 + <_> + + 0 -1 1437 5.8713490143418312e-03 + + 4.9181339144706726e-01 6.1870431900024414e-01 + <_> + + 0 -1 1438 -2.4838790297508240e-01 + + 1.8368029594421387e-01 4.9881500005722046e-01 + <_> + + 0 -1 1439 1.2256000190973282e-02 + + 5.2270537614822388e-01 3.6320298910140991e-01 + <_> + + 0 -1 1440 8.3990179700776935e-04 + + 4.4902500510215759e-01 5.7741481065750122e-01 + <_> + + 0 -1 1441 2.5407369248569012e-03 + + 4.8047870397567749e-01 5.8582991361618042e-01 + <_> + + 0 -1 1442 -1.4822429977357388e-02 + + 2.5210499763488770e-01 5.0235372781753540e-01 + <_> + + 0 -1 1443 -5.7973959483206272e-03 + + 5.9966957569122314e-01 4.8537150025367737e-01 + <_> + + 0 -1 1444 7.2662148158997297e-04 + + 5.1537168025970459e-01 3.6717799305915833e-01 + <_> + + 0 -1 1445 -1.7232580110430717e-02 + + 6.6217190027236938e-01 4.9946561455726624e-01 + <_> + + 0 -1 1446 7.8624086454510689e-03 + + 4.6333950757980347e-01 6.2561017274856567e-01 + <_> + + 0 -1 1447 -4.7343620099127293e-03 + + 3.6155730485916138e-01 5.2818852663040161e-01 + <_> + + 0 -1 1448 8.3048478700220585e-04 + + 4.4428890943527222e-01 5.5509579181671143e-01 + <_> + + 0 -1 1449 7.6602199114859104e-03 + + 5.1629352569580078e-01 2.6133549213409424e-01 + <_> + + 0 -1 1450 -4.1048377752304077e-03 + + 2.7896320819854736e-01 5.0190317630767822e-01 + <_> + + 0 -1 1451 4.8512578941881657e-03 + + 4.9689841270446777e-01 5.6616681814193726e-01 + <_> + + 0 -1 1452 9.9896453320980072e-04 + + 4.4456079602241516e-01 5.5518132448196411e-01 + <_> + + 0 -1 1453 -2.7023631334304810e-01 + + 2.9388209804892540e-02 5.1513141393661499e-01 + <_> + + 0 -1 1454 -1.3090680353343487e-02 + + 5.6993997097015381e-01 4.4474598765373230e-01 + <_> + + 0 -1 1455 -9.4342790544033051e-03 + + 4.3054661154747009e-01 5.4878950119018555e-01 + <_> + + 0 -1 1456 -1.5482039889320731e-03 + + 3.6803171038627625e-01 5.1280808448791504e-01 + <_> + + 0 -1 1457 5.3746132180094719e-03 + + 4.8389169573783875e-01 6.1015558242797852e-01 + <_> + + 0 -1 1458 1.5786769799888134e-03 + + 5.3252232074737549e-01 4.1185480356216431e-01 + <_> + + 0 -1 1459 3.6856050137430429e-03 + + 4.8109480738639832e-01 6.2523031234741211e-01 + <_> + + 0 -1 1460 9.3887019902467728e-03 + + 5.2002298831939697e-01 3.6294108629226685e-01 + <_> + + 0 -1 1461 1.2792630121111870e-02 + + 4.9617099761962891e-01 6.7380160093307495e-01 + <_> + + 0 -1 1462 -3.3661040943115950e-03 + + 4.0602791309356689e-01 5.2835988998413086e-01 + <_> + + 0 -1 1463 3.9771420415490866e-04 + + 4.6741139888763428e-01 5.9007751941680908e-01 + <_> + + 0 -1 1464 1.4868030557408929e-03 + + 4.5191168785095215e-01 6.0820537805557251e-01 + <_> + + 0 -1 1465 -8.8686749339103699e-02 + + 2.8078991174697876e-01 5.1809918880462646e-01 + <_> + + 0 -1 1466 -7.4296112870797515e-05 + + 5.2955842018127441e-01 4.0876251459121704e-01 + <_> + + 0 -1 1467 -1.4932939848222304e-05 + + 5.4614001512527466e-01 4.5385429263114929e-01 + <_> + + 0 -1 1468 5.9162238612771034e-03 + + 5.3291612863540649e-01 4.1921341419219971e-01 + <_> + + 0 -1 1469 1.1141640134155750e-03 + + 4.5120179653167725e-01 5.7062172889709473e-01 + <_> + + 0 -1 1470 8.9249362645205110e-05 + + 4.5778059959411621e-01 5.8976382017135620e-01 + <_> + + 0 -1 1471 2.5319510605186224e-03 + + 5.2996039390563965e-01 3.3576390147209167e-01 + <_> + + 0 -1 1472 1.2426200322806835e-02 + + 4.9590590596199036e-01 1.3466019928455353e-01 + <_> + + 0 -1 1473 2.8335750102996826e-02 + + 5.1170790195465088e-01 6.1043637106195092e-04 + <_> + + 0 -1 1474 6.6165882162749767e-03 + + 4.7363498806953430e-01 7.0116281509399414e-01 + <_> + + 0 -1 1475 8.0468766391277313e-03 + + 5.2164179086685181e-01 3.2828199863433838e-01 + <_> + + 0 -1 1476 -1.1193980462849140e-03 + + 5.8098608255386353e-01 4.5637390017509460e-01 + <_> + + 0 -1 1477 1.3277590274810791e-02 + + 5.3983622789382935e-01 4.1039010882377625e-01 + <_> + + 0 -1 1478 4.8794739996083081e-04 + + 4.2492860555648804e-01 5.4105907678604126e-01 + <_> + + 0 -1 1479 1.1243170127272606e-02 + + 5.2699637413024902e-01 3.4382158517837524e-01 + <_> + + 0 -1 1480 -8.9896668214350939e-04 + + 5.6330758333206177e-01 4.4566130638122559e-01 + <_> + + 0 -1 1481 6.6677159629762173e-03 + + 5.3128892183303833e-01 4.3626791238784790e-01 + <_> + + 0 -1 1482 2.8947299346327782e-02 + + 4.7017949819564819e-01 6.5757977962493896e-01 + <_> + + 0 -1 1483 -2.3400049656629562e-02 + + 0. 5.1373988389968872e-01 + <_> + + 0 -1 1484 -8.9117050170898438e-02 + + 2.3745279759168625e-02 4.9424308538436890e-01 + <_> + + 0 -1 1485 -1.4054600149393082e-02 + + 3.1273230910301208e-01 5.1175111532211304e-01 + <_> + + 0 -1 1486 8.1239398568868637e-03 + + 5.0090491771697998e-01 2.5200259685516357e-01 + <_> + + 0 -1 1487 -4.9964650534093380e-03 + + 6.3871437311172485e-01 4.9278119206428528e-01 + <_> + + 0 -1 1488 3.1253970228135586e-03 + + 5.1368498802185059e-01 3.6804521083831787e-01 + <_> + + 0 -1 1489 6.7669642157852650e-03 + + 5.5098438262939453e-01 4.3636319041252136e-01 + <_> + + 0 -1 1490 -2.3711440153419971e-03 + + 6.1623352766036987e-01 4.5869469642639160e-01 + <_> + + 0 -1 1491 -5.3522791713476181e-03 + + 6.1854577064514160e-01 4.9204909801483154e-01 + <_> + + 0 -1 1492 -1.5968859195709229e-02 + + 1.3826179504394531e-01 4.9832528829574585e-01 + <_> + + 0 -1 1493 4.7676060348749161e-03 + + 4.6880578994750977e-01 5.4900461435317993e-01 + <_> + + 0 -1 1494 -2.4714691098779440e-03 + + 2.3685149848461151e-01 5.0039529800415039e-01 + <_> + + 0 -1 1495 -7.1033788844943047e-04 + + 5.8563941717147827e-01 4.7215330600738525e-01 + <_> + + 0 -1 1496 -1.4117559790611267e-01 + + 8.6900062859058380e-02 4.9615910649299622e-01 + <_> + + 0 -1 1497 1.0651809722185135e-01 + + 5.1388370990753174e-01 1.7410050332546234e-01 + <_> + + 0 -1 1498 -5.2744749933481216e-02 + + 7.3536360263824463e-01 4.7728818655014038e-01 + <_> + + 0 -1 1499 -4.7431760467588902e-03 + + 3.8844060897827148e-01 5.2927017211914062e-01 + <_> + + 0 -1 1500 9.9676765967160463e-04 + + 5.2234929800033569e-01 4.0034240484237671e-01 + <_> + + 0 -1 1501 8.0284131690859795e-03 + + 4.9591061472892761e-01 7.2129642963409424e-01 + <_> + + 0 -1 1502 8.6025858763605356e-04 + + 4.4448840618133545e-01 5.5384761095046997e-01 + <_> + + 0 -1 1503 9.3191501218825579e-04 + + 5.3983712196350098e-01 4.1632440686225891e-01 + <_> + + 0 -1 1504 -2.5082060601562262e-03 + + 5.8542650938034058e-01 4.5625001192092896e-01 + <_> + + 0 -1 1505 -2.1378761157393456e-03 + + 4.6080690622329712e-01 5.2802592515945435e-01 + <_> + + 0 -1 1506 -2.1546049974858761e-03 + + 3.7911269068717957e-01 5.2559971809387207e-01 + <_> + + 0 -1 1507 -7.6214009895920753e-03 + + 5.9986090660095215e-01 4.9520739912986755e-01 + <_> + + 0 -1 1508 2.2055360022932291e-03 + + 4.4842061400413513e-01 5.5885308980941772e-01 + <_> + + 0 -1 1509 1.2586950324475765e-03 + + 5.4507470130920410e-01 4.4238409399986267e-01 + <_> + + 0 -1 1510 -5.0926720723509789e-03 + + 4.1182750463485718e-01 5.2630358934402466e-01 + <_> + + 0 -1 1511 -2.5095739401876926e-03 + + 5.7879078388214111e-01 4.9984949827194214e-01 + <_> + + 0 -1 1512 -7.7327556908130646e-02 + + 8.3978658914566040e-01 4.8111200332641602e-01 + <_> + + 0 -1 1513 -4.1485819965600967e-02 + + 2.4086110293865204e-01 5.1769930124282837e-01 + <_> + + 0 -1 1514 1.0355669655837119e-04 + + 4.3553608655929565e-01 5.4170542955398560e-01 + <_> + + 0 -1 1515 1.3255809899419546e-03 + + 5.4539710283279419e-01 4.8940950632095337e-01 + <_> + + 0 -1 1516 -8.0598732456564903e-03 + + 5.7710242271423340e-01 4.5779189467430115e-01 + <_> + + 0 -1 1517 1.9058620557188988e-02 + + 5.1698678731918335e-01 3.4004750847816467e-01 + <_> + + 0 -1 1518 -3.5057891160249710e-02 + + 2.2032439708709717e-01 5.0005030632019043e-01 + <_> + + 0 -1 1519 5.7296059094369411e-03 + + 5.0434082746505737e-01 6.5975707769393921e-01 + <_> + + 0 -1 1520 -1.1648329906165600e-02 + + 2.1862849593162537e-01 4.9966529011726379e-01 + <_> + + 0 -1 1521 1.4544479781761765e-03 + + 5.0076818466186523e-01 5.5037277936935425e-01 + <_> + + 0 -1 1522 -2.5030909455381334e-04 + + 4.1298410296440125e-01 5.2416700124740601e-01 + <_> + + 0 -1 1523 -8.2907272735610604e-04 + + 5.4128682613372803e-01 4.9744960665702820e-01 + <_> + + 0 -1 1524 1.0862209601327777e-03 + + 4.6055299043655396e-01 5.8792287111282349e-01 + <_> + + 0 -1 1525 2.0000500080641359e-04 + + 5.2788549661636353e-01 4.7052091360092163e-01 + <_> + + 0 -1 1526 2.9212920926511288e-03 + + 5.1296097040176392e-01 3.7555369734764099e-01 + <_> + + 0 -1 1527 2.5387400761246681e-02 + + 4.8226919770240784e-01 5.7907682657241821e-01 + <_> + + 0 -1 1528 -3.1968469265848398e-03 + + 5.2483952045440674e-01 3.9628401398658752e-01 + <_> + 182 + 9.0253349304199219e+01 + + <_> + + 0 -1 1529 5.8031738735735416e-03 + + 3.4989839792251587e-01 5.9619832038879395e-01 + <_> + + 0 -1 1530 -9.0003069490194321e-03 + + 6.8166369199752808e-01 4.4785520434379578e-01 + <_> + + 0 -1 1531 -1.1549659539014101e-03 + + 5.5857062339782715e-01 3.5782510042190552e-01 + <_> + + 0 -1 1532 -1.1069850297644734e-03 + + 5.3650361299514771e-01 3.0504280328750610e-01 + <_> + + 0 -1 1533 1.0308309720130637e-04 + + 3.6390951275825500e-01 5.3446358442306519e-01 + <_> + + 0 -1 1534 -5.0984839908778667e-03 + + 2.8591570258140564e-01 5.5042648315429688e-01 + <_> + + 0 -1 1535 8.2572200335562229e-04 + + 5.2365237474441528e-01 3.4760418534278870e-01 + <_> + + 0 -1 1536 9.9783325567841530e-03 + + 4.7503221035003662e-01 6.2196469306945801e-01 + <_> + + 0 -1 1537 -3.7402529269456863e-02 + + 3.3433759212493896e-01 5.2780628204345703e-01 + <_> + + 0 -1 1538 4.8548257909715176e-03 + + 5.1921808719635010e-01 3.7004441022872925e-01 + <_> + + 0 -1 1539 -1.8664470408111811e-03 + + 2.9298439621925354e-01 5.0919449329376221e-01 + <_> + + 0 -1 1540 1.6888890415430069e-02 + + 3.6868458986282349e-01 5.4312258958816528e-01 + <_> + + 0 -1 1541 -5.8372621424496174e-03 + + 3.6321839690208435e-01 5.2213358879089355e-01 + <_> + + 0 -1 1542 -1.4713739510625601e-03 + + 5.8706837892532349e-01 4.7006508708000183e-01 + <_> + + 0 -1 1543 -1.1522950371727347e-03 + + 3.1958949565887451e-01 5.1409542560577393e-01 + <_> + + 0 -1 1544 -4.2560300789773464e-03 + + 6.3018590211868286e-01 4.8149210214614868e-01 + <_> + + 0 -1 1545 -6.7378291860222816e-03 + + 1.9770480692386627e-01 5.0258082151412964e-01 + <_> + + 0 -1 1546 1.1382670141756535e-02 + + 4.9541321396827698e-01 6.8670457601547241e-01 + <_> + + 0 -1 1547 5.1794708706438541e-03 + + 5.1644277572631836e-01 3.3506479859352112e-01 + <_> + + 0 -1 1548 -1.1743789911270142e-01 + + 2.3152460157871246e-01 5.2344137430191040e-01 + <_> + + 0 -1 1549 2.8703449293971062e-02 + + 4.6642971038818359e-01 6.7225211858749390e-01 + <_> + + 0 -1 1550 4.8231030814349651e-03 + + 5.2208751440048218e-01 2.7235329151153564e-01 + <_> + + 0 -1 1551 2.6798530016094446e-03 + + 5.0792771577835083e-01 2.9069489240646362e-01 + <_> + + 0 -1 1552 8.0504082143306732e-03 + + 4.8859509825706482e-01 6.3950210809707642e-01 + <_> + + 0 -1 1553 4.8054959625005722e-03 + + 5.1972568035125732e-01 3.6566638946533203e-01 + <_> + + 0 -1 1554 -2.2420159075409174e-03 + + 6.1534678936004639e-01 4.7637018561363220e-01 + <_> + + 0 -1 1555 -1.3757710345089436e-02 + + 2.6373448967933655e-01 5.0309032201766968e-01 + <_> + + 0 -1 1556 -1.0338299721479416e-01 + + 2.2875219583511353e-01 5.1824611425399780e-01 + <_> + + 0 -1 1557 -9.4432085752487183e-03 + + 6.9533038139343262e-01 4.6949490904808044e-01 + <_> + + 0 -1 1558 8.0271181650459766e-04 + + 5.4506552219390869e-01 4.2687839269638062e-01 + <_> + + 0 -1 1559 -4.1945669800043106e-03 + + 6.0913878679275513e-01 4.5716428756713867e-01 + <_> + + 0 -1 1560 1.0942210443317890e-02 + + 5.2410632371902466e-01 3.2845470309257507e-01 + <_> + + 0 -1 1561 -5.7841069065034389e-04 + + 5.3879290819168091e-01 4.1793689131736755e-01 + <_> + + 0 -1 1562 -2.0888620056211948e-03 + + 4.2926910519599915e-01 5.3017157316207886e-01 + <_> + + 0 -1 1563 3.2383969519287348e-03 + + 3.7923479080200195e-01 5.2207440137863159e-01 + <_> + + 0 -1 1564 4.9075027927756310e-03 + + 5.2372831106185913e-01 4.1267579793930054e-01 + <_> + + 0 -1 1565 -3.2277941703796387e-02 + + 1.9476559758186340e-01 4.9945020675659180e-01 + <_> + + 0 -1 1566 -8.9711230248212814e-03 + + 6.0112851858139038e-01 4.9290320277214050e-01 + <_> + + 0 -1 1567 1.5321089886128902e-02 + + 5.0097537040710449e-01 2.0398220419883728e-01 + <_> + + 0 -1 1568 2.0855569746345282e-03 + + 4.8621898889541626e-01 5.7216948270797729e-01 + <_> + + 0 -1 1569 5.0615021027624607e-03 + + 5.0002187490463257e-01 1.8018059432506561e-01 + <_> + + 0 -1 1570 -3.7174751050770283e-03 + + 5.5301171541213989e-01 4.8975929617881775e-01 + <_> + + 0 -1 1571 -1.2170500122010708e-02 + + 4.1786059737205505e-01 5.3837239742279053e-01 + <_> + + 0 -1 1572 4.6248398721218109e-03 + + 4.9971699714660645e-01 5.7613271474838257e-01 + <_> + + 0 -1 1573 -2.1040429419372231e-04 + + 5.3318071365356445e-01 4.0976810455322266e-01 + <_> + + 0 -1 1574 -1.4641780406236649e-02 + + 5.7559251785278320e-01 5.0517761707305908e-01 + <_> + + 0 -1 1575 3.3199489116668701e-03 + + 4.5769768953323364e-01 6.0318058729171753e-01 + <_> + + 0 -1 1576 3.7236879579722881e-03 + + 4.3803969025611877e-01 5.4158830642700195e-01 + <_> + + 0 -1 1577 8.2951161311939359e-04 + + 5.1630318164825439e-01 3.7022191286087036e-01 + <_> + + 0 -1 1578 -1.1408490128815174e-02 + + 6.0729467868804932e-01 4.8625651001930237e-01 + <_> + + 0 -1 1579 -4.5320121571421623e-03 + + 3.2924759387969971e-01 5.0889629125595093e-01 + <_> + + 0 -1 1580 5.1276017911732197e-03 + + 4.8297679424285889e-01 6.1227089166641235e-01 + <_> + + 0 -1 1581 9.8583158105611801e-03 + + 4.6606799960136414e-01 6.5561771392822266e-01 + <_> + + 0 -1 1582 3.6985918879508972e-02 + + 5.2048492431640625e-01 1.6904720664024353e-01 + <_> + + 0 -1 1583 4.6491161920130253e-03 + + 5.1673221588134766e-01 3.7252250313758850e-01 + <_> + + 0 -1 1584 -4.2664702050387859e-03 + + 6.4064931869506836e-01 4.9873429536819458e-01 + <_> + + 0 -1 1585 -4.7956590424291790e-04 + + 5.8972930908203125e-01 4.4648739695549011e-01 + <_> + + 0 -1 1586 3.6827160511165857e-03 + + 5.4415607452392578e-01 3.4726628661155701e-01 + <_> + + 0 -1 1587 -1.0059880092740059e-02 + + 2.1431629359722137e-01 5.0048297643661499e-01 + <_> + + 0 -1 1588 -3.0361840617842972e-04 + + 5.3864240646362305e-01 4.5903238654136658e-01 + <_> + + 0 -1 1589 -1.4545479789376259e-03 + + 5.7511842250823975e-01 4.4970950484275818e-01 + <_> + + 0 -1 1590 1.6515209572389722e-03 + + 5.4219377040863037e-01 4.2385208606719971e-01 + <_> + + 0 -1 1591 -7.8468639403581619e-03 + + 4.0779209136962891e-01 5.2581572532653809e-01 + <_> + + 0 -1 1592 -5.1259850151836872e-03 + + 4.2292758822441101e-01 5.4794532060623169e-01 + <_> + + 0 -1 1593 -3.6890961229801178e-02 + + 6.5963757038116455e-01 4.6746781468391418e-01 + <_> + + 0 -1 1594 2.4035639944486320e-04 + + 4.2511358857154846e-01 5.5732029676437378e-01 + <_> + + 0 -1 1595 -1.5150169929256663e-05 + + 5.2592468261718750e-01 4.0741148591041565e-01 + <_> + + 0 -1 1596 2.2108471021056175e-03 + + 4.6717229485511780e-01 5.8863520622253418e-01 + <_> + + 0 -1 1597 -1.1568620102480054e-03 + + 5.7110661268234253e-01 4.4871619343757629e-01 + <_> + + 0 -1 1598 4.9996292218565941e-03 + + 5.2641981840133667e-01 2.8983271121978760e-01 + <_> + + 0 -1 1599 -1.4656189596280456e-03 + + 3.8917380571365356e-01 5.1978719234466553e-01 + <_> + + 0 -1 1600 -1.1975039960816503e-03 + + 5.7958728075027466e-01 4.9279558658599854e-01 + <_> + + 0 -1 1601 -4.4954330660402775e-03 + + 2.3776030540466309e-01 5.0125551223754883e-01 + <_> + + 0 -1 1602 1.4997160178609192e-04 + + 4.8766261339187622e-01 5.6176078319549561e-01 + <_> + + 0 -1 1603 2.6391509454697371e-03 + + 5.1680880784988403e-01 3.7655091285705566e-01 + <_> + + 0 -1 1604 -2.9368131072260439e-04 + + 5.4466491937637329e-01 4.8746308684349060e-01 + <_> + + 0 -1 1605 1.4211760135367513e-03 + + 4.6878978610038757e-01 6.6913318634033203e-01 + <_> + + 0 -1 1606 7.9427637159824371e-02 + + 5.1934438943862915e-01 2.7329459786415100e-01 + <_> + + 0 -1 1607 7.9937502741813660e-02 + + 4.9717310070991516e-01 1.7820839583873749e-01 + <_> + + 0 -1 1608 1.1089259758591652e-02 + + 5.1659947633743286e-01 3.2094758749008179e-01 + <_> + + 0 -1 1609 1.6560709627810866e-04 + + 4.0584719181060791e-01 5.3072762489318848e-01 + <_> + + 0 -1 1610 -5.3354292176663876e-03 + + 3.4450569748878479e-01 5.1581299304962158e-01 + <_> + + 0 -1 1611 1.1287260567769408e-03 + + 4.5948630571365356e-01 6.0755330324172974e-01 + <_> + + 0 -1 1612 -2.1969219669699669e-02 + + 1.6804009675979614e-01 5.2285957336425781e-01 + <_> + + 0 -1 1613 -2.1775320055894554e-04 + + 3.8615968823432922e-01 5.2156728506088257e-01 + <_> + + 0 -1 1614 2.0200149447191507e-04 + + 5.5179792642593384e-01 4.3630391359329224e-01 + <_> + + 0 -1 1615 -2.1733149886131287e-02 + + 7.9994601011276245e-01 4.7898510098457336e-01 + <_> + + 0 -1 1616 -8.4399932529777288e-04 + + 4.0859758853912354e-01 5.3747731447219849e-01 + <_> + + 0 -1 1617 -4.3895249837078154e-04 + + 5.4704052209854126e-01 4.3661430478096008e-01 + <_> + + 0 -1 1618 1.5092400135472417e-03 + + 4.9889969825744629e-01 5.8421492576599121e-01 + <_> + + 0 -1 1619 -3.5547839943319559e-03 + + 6.7536902427673340e-01 4.7210058569908142e-01 + <_> + + 0 -1 1620 4.8191400128416717e-04 + + 5.4158538579940796e-01 4.3571090698242188e-01 + <_> + + 0 -1 1621 -6.0264398343861103e-03 + + 2.2585099935531616e-01 4.9918809533119202e-01 + <_> + + 0 -1 1622 -1.1668140068650246e-02 + + 6.2565547227859497e-01 4.9274989962577820e-01 + <_> + + 0 -1 1623 -2.8718370012938976e-03 + + 3.9477849006652832e-01 5.2458018064498901e-01 + <_> + + 0 -1 1624 1.7051169648766518e-02 + + 4.7525110840797424e-01 5.7942241430282593e-01 + <_> + + 0 -1 1625 -1.3352080248296261e-02 + + 6.0411047935485840e-01 4.5445358753204346e-01 + <_> + + 0 -1 1626 -3.9301801007241011e-04 + + 4.2582759261131287e-01 5.5449050664901733e-01 + <_> + + 0 -1 1627 3.0483349692076445e-03 + + 5.2334201335906982e-01 3.7802729010581970e-01 + <_> + + 0 -1 1628 -4.3579288758337498e-03 + + 6.3718891143798828e-01 4.8386740684509277e-01 + <_> + + 0 -1 1629 5.6661018170416355e-03 + + 5.3747057914733887e-01 4.1636660695075989e-01 + <_> + + 0 -1 1630 6.0677339206449687e-05 + + 4.6387958526611328e-01 5.3116250038146973e-01 + <_> + + 0 -1 1631 3.6738160997629166e-02 + + 4.6886560320854187e-01 6.4665240049362183e-01 + <_> + + 0 -1 1632 8.6528137326240540e-03 + + 5.2043187618255615e-01 2.1886579692363739e-01 + <_> + + 0 -1 1633 -1.5371359884738922e-01 + + 1.6303719580173492e-01 4.9588400125503540e-01 + <_> + + 0 -1 1634 -4.1560421232134104e-04 + + 5.7744592428207397e-01 4.6964588761329651e-01 + <_> + + 0 -1 1635 -1.2640169588848948e-03 + + 3.9771759510040283e-01 5.2171981334686279e-01 + <_> + + 0 -1 1636 -3.5473341122269630e-03 + + 6.0465282201766968e-01 4.8083150386810303e-01 + <_> + + 0 -1 1637 3.0019069527043030e-05 + + 3.9967238903045654e-01 5.2282011508941650e-01 + <_> + + 0 -1 1638 1.3113019522279501e-03 + + 4.7121581435203552e-01 5.7659977674484253e-01 + <_> + + 0 -1 1639 -1.3374709524214268e-03 + + 4.1095849871635437e-01 5.2531701326370239e-01 + <_> + + 0 -1 1640 2.0876709371805191e-02 + + 5.2029937505722046e-01 1.7579819262027740e-01 + <_> + + 0 -1 1641 -7.5497948564589024e-03 + + 6.5666097402572632e-01 4.6949750185012817e-01 + <_> + + 0 -1 1642 2.4188550189137459e-02 + + 5.1286739110946655e-01 3.3702209591865540e-01 + <_> + + 0 -1 1643 -2.9358828905969858e-03 + + 6.5807867050170898e-01 4.6945410966873169e-01 + <_> + + 0 -1 1644 5.7557929307222366e-02 + + 5.1464450359344482e-01 2.7752599120140076e-01 + <_> + + 0 -1 1645 -1.1343370424583554e-03 + + 3.8366019725799561e-01 5.1926672458648682e-01 + <_> + + 0 -1 1646 1.6816999763250351e-02 + + 5.0855928659439087e-01 6.1772608757019043e-01 + <_> + + 0 -1 1647 5.0535178743302822e-03 + + 5.1387631893157959e-01 3.6847919225692749e-01 + <_> + + 0 -1 1648 -4.5874710194766521e-03 + + 5.9896552562713623e-01 4.8352020978927612e-01 + <_> + + 0 -1 1649 1.6882460331544280e-03 + + 4.5094868540763855e-01 5.7230567932128906e-01 + <_> + + 0 -1 1650 -1.6554000321775675e-03 + + 3.4967708587646484e-01 5.2433192729949951e-01 + <_> + + 0 -1 1651 -1.9373800605535507e-02 + + 1.1205369979143143e-01 4.9687129259109497e-01 + <_> + + 0 -1 1652 1.0374450124800205e-02 + + 5.1481968164443970e-01 4.3952131271362305e-01 + <_> + + 0 -1 1653 1.4973050565458834e-04 + + 4.0849998593330383e-01 5.2698868513107300e-01 + <_> + + 0 -1 1654 -4.2981930077075958e-02 + + 6.3941049575805664e-01 5.0185042619705200e-01 + <_> + + 0 -1 1655 8.3065936341881752e-03 + + 4.7075539827346802e-01 6.6983532905578613e-01 + <_> + + 0 -1 1656 -4.1285790503025055e-03 + + 4.5413690805435181e-01 5.3236472606658936e-01 + <_> + + 0 -1 1657 1.7399420030415058e-03 + + 4.3339619040489197e-01 5.4398661851882935e-01 + <_> + + 0 -1 1658 1.1739750334527344e-04 + + 4.5796871185302734e-01 5.5434262752532959e-01 + <_> + + 0 -1 1659 1.8585780344437808e-04 + + 4.3246439099311829e-01 5.4267549514770508e-01 + <_> + + 0 -1 1660 5.5587692186236382e-03 + + 5.2572208642959595e-01 3.5506111383438110e-01 + <_> + + 0 -1 1661 -7.9851560294628143e-03 + + 6.0430181026458740e-01 4.6306359767913818e-01 + <_> + + 0 -1 1662 6.0594122624024749e-04 + + 4.5982548594474792e-01 5.5331951379776001e-01 + <_> + + 0 -1 1663 -2.2983040253166109e-04 + + 4.1307520866394043e-01 5.3224611282348633e-01 + <_> + + 0 -1 1664 4.3740210821852088e-04 + + 4.0430399775505066e-01 5.4092890024185181e-01 + <_> + + 0 -1 1665 2.9482020181603730e-04 + + 4.4949638843536377e-01 5.6288522481918335e-01 + <_> + + 0 -1 1666 1.0312659665942192e-02 + + 5.1775109767913818e-01 2.7043169736862183e-01 + <_> + + 0 -1 1667 -7.7241109684109688e-03 + + 1.9880190491676331e-01 4.9805539846420288e-01 + <_> + + 0 -1 1668 -4.6797208487987518e-03 + + 6.6447502374649048e-01 5.0182962417602539e-01 + <_> + + 0 -1 1669 -5.0755459815263748e-03 + + 3.8983049988746643e-01 5.1852691173553467e-01 + <_> + + 0 -1 1670 2.2479740437120199e-03 + + 4.8018088936805725e-01 5.6603360176086426e-01 + <_> + + 0 -1 1671 8.3327008178457618e-04 + + 5.2109199762344360e-01 3.9571881294250488e-01 + <_> + + 0 -1 1672 -4.1279330849647522e-02 + + 6.1545419692993164e-01 5.0070542097091675e-01 + <_> + + 0 -1 1673 -5.0930189900100231e-04 + + 3.9759421348571777e-01 5.2284038066864014e-01 + <_> + + 0 -1 1674 1.2568780221045017e-03 + + 4.9791380763053894e-01 5.9391832351684570e-01 + <_> + + 0 -1 1675 8.0048497766256332e-03 + + 4.9844971299171448e-01 1.6333660483360291e-01 + <_> + + 0 -1 1676 -1.1879300000146031e-03 + + 5.9049648046493530e-01 4.9426248669624329e-01 + <_> + + 0 -1 1677 6.1948952497914433e-04 + + 4.1995579004287720e-01 5.3287261724472046e-01 + <_> + + 0 -1 1678 6.6829859279096127e-03 + + 5.4186028242111206e-01 4.9058890342712402e-01 + <_> + + 0 -1 1679 -3.7062340416014194e-03 + + 3.7259390950202942e-01 5.1380002498626709e-01 + <_> + + 0 -1 1680 -3.9739411324262619e-02 + + 6.4789611101150513e-01 5.0503468513488770e-01 + <_> + + 0 -1 1681 1.4085009461268783e-03 + + 4.6823391318321228e-01 6.3778841495513916e-01 + <_> + + 0 -1 1682 3.9322688826359808e-04 + + 5.4585301876068115e-01 4.1504821181297302e-01 + <_> + + 0 -1 1683 -1.8979819724336267e-03 + + 3.6901599168777466e-01 5.1497042179107666e-01 + <_> + + 0 -1 1684 -1.3970440253615379e-02 + + 6.0505628585815430e-01 4.8113578557968140e-01 + <_> + + 0 -1 1685 -1.0100819915533066e-01 + + 2.0170800387859344e-01 4.9923619627952576e-01 + <_> + + 0 -1 1686 -1.7346920445561409e-02 + + 5.7131487131118774e-01 4.8994860053062439e-01 + <_> + + 0 -1 1687 1.5619759506080300e-04 + + 4.2153888940811157e-01 5.3926420211791992e-01 + <_> + + 0 -1 1688 1.3438929617404938e-01 + + 5.1361519098281860e-01 3.7676128745079041e-01 + <_> + + 0 -1 1689 -2.4582240730524063e-02 + + 7.0273578166961670e-01 4.7479069232940674e-01 + <_> + + 0 -1 1690 -3.8553720805794001e-03 + + 4.3174090981483459e-01 5.4277169704437256e-01 + <_> + + 0 -1 1691 -2.3165249731391668e-03 + + 5.9426987171173096e-01 4.6186479926109314e-01 + <_> + + 0 -1 1692 -4.8518120311200619e-03 + + 6.1915689706802368e-01 4.8848950862884521e-01 + <_> + + 0 -1 1693 2.4699938949197531e-03 + + 5.2566647529602051e-01 4.0171998739242554e-01 + <_> + + 0 -1 1694 4.5496959239244461e-02 + + 5.2378678321838379e-01 2.6857739686965942e-01 + <_> + + 0 -1 1695 -2.0319599658250809e-02 + + 2.1304459869861603e-01 4.9797388911247253e-01 + <_> + + 0 -1 1696 2.6994998916052282e-04 + + 4.8140418529510498e-01 5.5431222915649414e-01 + <_> + + 0 -1 1697 -1.8232699949294329e-03 + + 6.4825797080993652e-01 4.7099891304969788e-01 + <_> + + 0 -1 1698 -6.3015790656208992e-03 + + 4.5819279551506042e-01 5.3062361478805542e-01 + <_> + + 0 -1 1699 -2.4139499873854220e-04 + + 5.2320867776870728e-01 4.0517631173133850e-01 + <_> + + 0 -1 1700 -1.0330369696021080e-03 + + 5.5562019348144531e-01 4.7891938686370850e-01 + <_> + + 0 -1 1701 1.8041160365100950e-04 + + 5.2294427156448364e-01 4.0118101239204407e-01 + <_> + + 0 -1 1702 -6.1407860368490219e-02 + + 6.2986820936203003e-01 5.0107032060623169e-01 + <_> + + 0 -1 1703 -6.9543913006782532e-02 + + 7.2282809019088745e-01 4.7731840610504150e-01 + <_> + + 0 -1 1704 -7.0542663335800171e-02 + + 2.2695130109786987e-01 5.1825290918350220e-01 + <_> + + 0 -1 1705 2.4423799477517605e-03 + + 5.2370971441268921e-01 4.0981510281562805e-01 + <_> + + 0 -1 1706 1.5494349645450711e-03 + + 4.7737509012222290e-01 5.4680430889129639e-01 + <_> + + 0 -1 1707 -2.3914219811558723e-02 + + 7.1469759941101074e-01 4.7838249802589417e-01 + <_> + + 0 -1 1708 -1.2453690171241760e-02 + + 2.6352968811988831e-01 5.2411228418350220e-01 + <_> + + 0 -1 1709 -2.0760179904755205e-04 + + 3.6237570643424988e-01 5.1136088371276855e-01 + <_> + + 0 -1 1710 2.9781080229440704e-05 + + 4.7059321403503418e-01 5.4328018426895142e-01 + <_> + 211 + 1.0474919891357422e+02 + + <_> + + 0 -1 1711 1.1772749945521355e-02 + + 3.8605189323425293e-01 6.4211672544479370e-01 + <_> + + 0 -1 1712 2.7037570253014565e-02 + + 4.3856549263000488e-01 6.7540389299392700e-01 + <_> + + 0 -1 1713 -3.6419500247575343e-05 + + 5.4871010780334473e-01 3.4233158826828003e-01 + <_> + + 0 -1 1714 1.9995409529656172e-03 + + 3.2305321097373962e-01 5.4003179073333740e-01 + <_> + + 0 -1 1715 4.5278300531208515e-03 + + 5.0916397571563721e-01 2.9350438714027405e-01 + <_> + + 0 -1 1716 4.7890920541249216e-04 + + 4.1781538724899292e-01 5.3440642356872559e-01 + <_> + + 0 -1 1717 1.1720920447260141e-03 + + 2.8991821408271790e-01 5.1320707798004150e-01 + <_> + + 0 -1 1718 9.5305702416226268e-04 + + 4.2801249027252197e-01 5.5608451366424561e-01 + <_> + + 0 -1 1719 1.5099150004971307e-05 + + 4.0448719263076782e-01 5.4047602415084839e-01 + <_> + + 0 -1 1720 -6.0817901976406574e-04 + + 4.2717689275741577e-01 5.5034661293029785e-01 + <_> + + 0 -1 1721 3.3224520739167929e-03 + + 3.9627239108085632e-01 5.3697347640991211e-01 + <_> + + 0 -1 1722 -1.1037490330636501e-03 + + 4.7271779179573059e-01 5.2377498149871826e-01 + <_> + + 0 -1 1723 -1.4350269921123981e-03 + + 5.6030082702636719e-01 4.2235091328620911e-01 + <_> + + 0 -1 1724 2.0767399109899998e-03 + + 5.2259171009063721e-01 4.7327259182929993e-01 + <_> + + 0 -1 1725 -1.6412809782195836e-04 + + 3.9990758895874023e-01 5.4327398538589478e-01 + <_> + + 0 -1 1726 8.8302437216043472e-03 + + 4.6783858537673950e-01 6.0273271799087524e-01 + <_> + + 0 -1 1727 -1.0552070103585720e-02 + + 3.4939670562744141e-01 5.2139747142791748e-01 + <_> + + 0 -1 1728 -2.2731600329279900e-03 + + 6.1858189105987549e-01 4.7490629553794861e-01 + <_> + + 0 -1 1729 -8.4786332445219159e-04 + + 5.2853411436080933e-01 3.8434821367263794e-01 + <_> + + 0 -1 1730 1.2081359745934606e-03 + + 5.3606408834457397e-01 3.4473359584808350e-01 + <_> + + 0 -1 1731 2.6512730401009321e-03 + + 4.5582920312881470e-01 6.1939620971679688e-01 + <_> + + 0 -1 1732 -1.1012479662895203e-03 + + 3.6802300810813904e-01 5.3276282548904419e-01 + <_> + + 0 -1 1733 4.9561518244445324e-04 + + 3.9605951309204102e-01 5.2749407291412354e-01 + <_> + + 0 -1 1734 -4.3901771306991577e-02 + + 7.0204448699951172e-01 4.9928390979766846e-01 + <_> + + 0 -1 1735 3.4690350294113159e-02 + + 5.0491642951965332e-01 2.7666029334068298e-01 + <_> + + 0 -1 1736 -2.7442190330475569e-03 + + 2.6726329326629639e-01 5.2749711275100708e-01 + <_> + + 0 -1 1737 3.3316588960587978e-03 + + 4.5794829726219177e-01 6.0011017322540283e-01 + <_> + + 0 -1 1738 -2.0044570788741112e-02 + + 3.1715941429138184e-01 5.2357178926467896e-01 + <_> + + 0 -1 1739 1.3492030557245016e-03 + + 5.2653628587722778e-01 4.0343248844146729e-01 + <_> + + 0 -1 1740 2.9702018946409225e-03 + + 5.3324568271636963e-01 4.5719841122627258e-01 + <_> + + 0 -1 1741 6.3039981760084629e-03 + + 4.5933109521865845e-01 6.0346359014511108e-01 + <_> + + 0 -1 1742 -1.2936590239405632e-02 + + 4.4379639625549316e-01 5.3729712963104248e-01 + <_> + + 0 -1 1743 4.0148729458451271e-03 + + 4.6803238987922668e-01 6.4378339052200317e-01 + <_> + + 0 -1 1744 -2.6401679497212172e-03 + + 3.7096318602561951e-01 5.3143328428268433e-01 + <_> + + 0 -1 1745 1.3918439857661724e-02 + + 4.7235551476478577e-01 7.1308088302612305e-01 + <_> + + 0 -1 1746 -4.5087869511917233e-04 + + 4.4923940300941467e-01 5.3704041242599487e-01 + <_> + + 0 -1 1747 2.5384349282830954e-04 + + 4.4068640470504761e-01 5.5144029855728149e-01 + <_> + + 0 -1 1748 2.2710000630468130e-03 + + 4.6824169158935547e-01 5.9679841995239258e-01 + <_> + + 0 -1 1749 2.4120779708027840e-03 + + 5.0793921947479248e-01 3.0185988545417786e-01 + <_> + + 0 -1 1750 -3.6025670851813629e-05 + + 5.6010371446609497e-01 4.4710969924926758e-01 + <_> + + 0 -1 1751 -7.4905529618263245e-03 + + 2.2075350582599640e-01 4.9899441003799438e-01 + <_> + + 0 -1 1752 -1.7513120546936989e-02 + + 6.5312159061431885e-01 5.0176489353179932e-01 + <_> + + 0 -1 1753 1.4281630516052246e-01 + + 4.9679630994796753e-01 1.4820620417594910e-01 + <_> + + 0 -1 1754 5.5345268920063972e-03 + + 4.8989468812942505e-01 5.9542238712310791e-01 + <_> + + 0 -1 1755 -9.6323591424152255e-04 + + 3.9271169900894165e-01 5.1960742473602295e-01 + <_> + + 0 -1 1756 -2.0370010752230883e-03 + + 5.6133252382278442e-01 4.8848581314086914e-01 + <_> + + 0 -1 1757 1.6614829655736685e-03 + + 4.4728800654411316e-01 5.5788809061050415e-01 + <_> + + 0 -1 1758 -3.1188090797513723e-03 + + 3.8405328989028931e-01 5.3974777460098267e-01 + <_> + + 0 -1 1759 -6.4000617712736130e-03 + + 5.8439838886260986e-01 4.5332181453704834e-01 + <_> + + 0 -1 1760 3.1319601112045348e-04 + + 5.4392218589782715e-01 4.2347279191017151e-01 + <_> + + 0 -1 1761 -1.8222099170088768e-02 + + 1.2884649634361267e-01 4.9584048986434937e-01 + <_> + + 0 -1 1762 8.7969247251749039e-03 + + 4.9512979388237000e-01 7.1534800529479980e-01 + <_> + + 0 -1 1763 -4.2395070195198059e-03 + + 3.9465999603271484e-01 5.1949369907379150e-01 + <_> + + 0 -1 1764 9.7086271271109581e-03 + + 4.8975038528442383e-01 6.0649001598358154e-01 + <_> + + 0 -1 1765 -3.9934171363711357e-03 + + 3.2454401254653931e-01 5.0608289241790771e-01 + <_> + + 0 -1 1766 -1.6785059124231339e-02 + + 1.5819530189037323e-01 5.2037787437438965e-01 + <_> + + 0 -1 1767 1.8272090703248978e-02 + + 4.6809351444244385e-01 6.6269791126251221e-01 + <_> + + 0 -1 1768 5.6872838176786900e-03 + + 5.2116978168487549e-01 3.5121849179267883e-01 + <_> + + 0 -1 1769 -1.0739039862528443e-03 + + 5.7683861255645752e-01 4.5298451185226440e-01 + <_> + + 0 -1 1770 -3.7093870341777802e-03 + + 4.5077630877494812e-01 5.3135812282562256e-01 + <_> + + 0 -1 1771 -2.1110709349159151e-04 + + 5.4608201980590820e-01 4.3333768844604492e-01 + <_> + + 0 -1 1772 1.0670139454305172e-03 + + 5.3718560934066772e-01 4.0783908963203430e-01 + <_> + + 0 -1 1773 3.5943021066486835e-03 + + 4.4712871313095093e-01 5.6438362598419189e-01 + <_> + + 0 -1 1774 -5.1776031032204628e-03 + + 4.4993931055068970e-01 5.2803301811218262e-01 + <_> + + 0 -1 1775 -2.5414369883947074e-04 + + 5.5161732435226440e-01 4.4077080488204956e-01 + <_> + + 0 -1 1776 6.3522560521960258e-03 + + 5.1941901445388794e-01 2.4652279913425446e-01 + <_> + + 0 -1 1777 -4.4205080484971404e-04 + + 3.8307058811187744e-01 5.1396822929382324e-01 + <_> + + 0 -1 1778 7.4488727841526270e-04 + + 4.8910909891128540e-01 5.9747868776321411e-01 + <_> + + 0 -1 1779 -3.5116379149258137e-03 + + 7.4136817455291748e-01 4.7687649726867676e-01 + <_> + + 0 -1 1780 -1.2540910392999649e-02 + + 3.6488190293312073e-01 5.2528268098831177e-01 + <_> + + 0 -1 1781 9.4931852072477341e-03 + + 5.1004928350448608e-01 3.6295869946479797e-01 + <_> + + 0 -1 1782 1.2961150147020817e-02 + + 5.2324420213699341e-01 4.3335610628128052e-01 + <_> + + 0 -1 1783 4.7209449112415314e-03 + + 4.6481490135192871e-01 6.3310527801513672e-01 + <_> + + 0 -1 1784 -2.3119079414755106e-03 + + 5.9303098917007446e-01 4.5310580730438232e-01 + <_> + + 0 -1 1785 -2.8262299019843340e-03 + + 3.8704779744148254e-01 5.2571010589599609e-01 + <_> + + 0 -1 1786 -1.4311339473351836e-03 + + 5.5225032567977905e-01 4.5618548989295959e-01 + <_> + + 0 -1 1787 1.9378310535103083e-03 + + 4.5462208986282349e-01 5.7369667291641235e-01 + <_> + + 0 -1 1788 2.6343559147790074e-04 + + 5.3457391262054443e-01 4.5718750357627869e-01 + <_> + + 0 -1 1789 7.8257522545754910e-04 + + 3.9678159356117249e-01 5.2201879024505615e-01 + <_> + + 0 -1 1790 -1.9550440832972527e-02 + + 2.8296428918838501e-01 5.2435082197189331e-01 + <_> + + 0 -1 1791 4.3914958951063454e-04 + + 4.5900669693946838e-01 5.8990901708602905e-01 + <_> + + 0 -1 1792 2.1452000364661217e-02 + + 5.2314108610153198e-01 2.8553789854049683e-01 + <_> + + 0 -1 1793 5.8973580598831177e-04 + + 4.3972569704055786e-01 5.5064219236373901e-01 + <_> + + 0 -1 1794 -2.6157610118389130e-02 + + 3.1350791454315186e-01 5.1891750097274780e-01 + <_> + + 0 -1 1795 -1.3959860429167747e-02 + + 3.2132729887962341e-01 5.0407177209854126e-01 + <_> + + 0 -1 1796 -6.3699018210172653e-03 + + 6.3875448703765869e-01 4.8495069146156311e-01 + <_> + + 0 -1 1797 -8.5613820701837540e-03 + + 2.7591320872306824e-01 5.0320190191268921e-01 + <_> + + 0 -1 1798 9.6622901037335396e-04 + + 4.6856409311294556e-01 5.8348792791366577e-01 + <_> + + 0 -1 1799 7.6550268568098545e-04 + + 5.1752072572708130e-01 3.8964220881462097e-01 + <_> + + 0 -1 1800 -8.1833340227603912e-03 + + 2.0691369473934174e-01 5.2081221342086792e-01 + <_> + + 0 -1 1801 -9.3976939097046852e-03 + + 6.1340910196304321e-01 4.6412229537963867e-01 + <_> + + 0 -1 1802 4.8028980381786823e-03 + + 5.4541081190109253e-01 4.3952199816703796e-01 + <_> + + 0 -1 1803 -3.5680569708347321e-03 + + 6.3444852828979492e-01 4.6810939908027649e-01 + <_> + + 0 -1 1804 4.0733120404183865e-03 + + 5.2926832437515259e-01 4.0156200528144836e-01 + <_> + + 0 -1 1805 1.2568129459396005e-03 + + 4.3929880857467651e-01 5.4528248310089111e-01 + <_> + + 0 -1 1806 -2.9065010603517294e-03 + + 5.8988320827484131e-01 4.8633798956871033e-01 + <_> + + 0 -1 1807 -2.4409340694546700e-03 + + 4.0693649649620056e-01 5.2474218606948853e-01 + <_> + + 0 -1 1808 2.4830700829625130e-02 + + 5.1827257871627808e-01 3.6825248599052429e-01 + <_> + + 0 -1 1809 -4.8854008316993713e-02 + + 1.3075779378414154e-01 4.9612811207771301e-01 + <_> + + 0 -1 1810 -1.6110379947349429e-03 + + 6.4210057258605957e-01 4.8726621270179749e-01 + <_> + + 0 -1 1811 -9.7009479999542236e-02 + + 4.7769349068403244e-02 4.9509888887405396e-01 + <_> + + 0 -1 1812 1.1209240183234215e-03 + + 4.6162670850753784e-01 5.3547459840774536e-01 + <_> + + 0 -1 1813 -1.3064090162515640e-03 + + 6.2618541717529297e-01 4.6388059854507446e-01 + <_> + + 0 -1 1814 4.5771620352752507e-04 + + 5.3844177722930908e-01 4.6466401219367981e-01 + <_> + + 0 -1 1815 -6.3149951165542006e-04 + + 3.8040471076965332e-01 5.1302570104598999e-01 + <_> + + 0 -1 1816 1.4505970466416329e-04 + + 4.5543101429939270e-01 5.6644618511199951e-01 + <_> + + 0 -1 1817 -1.6474550589919090e-02 + + 6.5969580411911011e-01 4.7158598899841309e-01 + <_> + + 0 -1 1818 1.3369579799473286e-02 + + 5.1954662799835205e-01 3.0359649658203125e-01 + <_> + + 0 -1 1819 1.0271780047332868e-04 + + 5.2291762828826904e-01 4.1070660948753357e-01 + <_> + + 0 -1 1820 -5.5311559699475765e-03 + + 6.3528877496719360e-01 4.9609071016311646e-01 + <_> + + 0 -1 1821 -2.6187049224972725e-03 + + 3.8245460391044617e-01 5.1409840583801270e-01 + <_> + + 0 -1 1822 5.0834268331527710e-03 + + 4.9504399299621582e-01 6.2208187580108643e-01 + <_> + + 0 -1 1823 7.9818159341812134e-02 + + 4.9523359537124634e-01 1.3224759697914124e-01 + <_> + + 0 -1 1824 -9.9226586520671844e-02 + + 7.5427287817001343e-01 5.0084167718887329e-01 + <_> + + 0 -1 1825 -6.5174017800018191e-04 + + 3.6993029713630676e-01 5.1301211118698120e-01 + <_> + + 0 -1 1826 -1.8996849656105042e-02 + + 6.6891789436340332e-01 4.9212029576301575e-01 + <_> + + 0 -1 1827 1.7346899956464767e-02 + + 4.9833008646965027e-01 1.8591980636119843e-01 + <_> + + 0 -1 1828 5.5082101607695222e-04 + + 4.5744240283966064e-01 5.5221217870712280e-01 + <_> + + 0 -1 1829 2.0056050270795822e-03 + + 5.1317447423934937e-01 3.8564699888229370e-01 + <_> + + 0 -1 1830 -7.7688191086053848e-03 + + 4.3617001175880432e-01 5.4343092441558838e-01 + <_> + + 0 -1 1831 5.0878278911113739e-02 + + 4.6827208995819092e-01 6.8406397104263306e-01 + <_> + + 0 -1 1832 -2.2901780903339386e-03 + + 4.3292450904846191e-01 5.3060990571975708e-01 + <_> + + 0 -1 1833 -1.5715380141045898e-04 + + 5.3700572252273560e-01 4.3781641125679016e-01 + <_> + + 0 -1 1834 1.0519240051507950e-01 + + 5.1372742652893066e-01 6.7361466586589813e-02 + <_> + + 0 -1 1835 2.7198919560760260e-03 + + 4.1120609641075134e-01 5.2556651830673218e-01 + <_> + + 0 -1 1836 4.8337779939174652e-02 + + 5.4046237468719482e-01 4.4389671087265015e-01 + <_> + + 0 -1 1837 9.5703761326149106e-04 + + 4.3559691309928894e-01 5.3995108604431152e-01 + <_> + + 0 -1 1838 -2.5371259078383446e-02 + + 5.9951752424240112e-01 5.0310248136520386e-01 + <_> + + 0 -1 1839 5.2457951009273529e-02 + + 4.9502879381179810e-01 1.3983510434627533e-01 + <_> + + 0 -1 1840 -1.2365629896521568e-02 + + 6.3972991704940796e-01 4.9641060829162598e-01 + <_> + + 0 -1 1841 -1.4589719474315643e-01 + + 1.0016699880361557e-01 4.9463221430778503e-01 + <_> + + 0 -1 1842 -1.5908600762486458e-02 + + 3.3123299479484558e-01 5.2083408832550049e-01 + <_> + + 0 -1 1843 3.9486068999394774e-04 + + 4.4063639640808105e-01 5.4261028766632080e-01 + <_> + + 0 -1 1844 -5.2454001270234585e-03 + + 2.7995899319648743e-01 5.1899671554565430e-01 + <_> + + 0 -1 1845 -5.0421799533069134e-03 + + 6.9875800609588623e-01 4.7521421313285828e-01 + <_> + + 0 -1 1846 2.9812189750373363e-03 + + 4.9832889437675476e-01 6.3074797391891479e-01 + <_> + + 0 -1 1847 -7.2884308174252510e-03 + + 2.9823330044746399e-01 5.0268697738647461e-01 + <_> + + 0 -1 1848 1.5094350092113018e-03 + + 5.3084421157836914e-01 3.8329708576202393e-01 + <_> + + 0 -1 1849 -9.3340799212455750e-03 + + 2.0379640161991119e-01 4.9698171019554138e-01 + <_> + + 0 -1 1850 2.8667140752077103e-02 + + 5.0256967544555664e-01 6.9280272722244263e-01 + <_> + + 0 -1 1851 1.7019680142402649e-01 + + 4.9600529670715332e-01 1.4764429628849030e-01 + <_> + + 0 -1 1852 -3.2614478841423988e-03 + + 5.6030637025833130e-01 4.8260560631752014e-01 + <_> + + 0 -1 1853 5.5769277969375253e-04 + + 5.2055621147155762e-01 4.1296330094337463e-01 + <_> + + 0 -1 1854 3.6258339881896973e-01 + + 5.2216529846191406e-01 3.7686121463775635e-01 + <_> + + 0 -1 1855 -1.1615130119025707e-02 + + 6.0226827859878540e-01 4.6374899148941040e-01 + <_> + + 0 -1 1856 -4.0795197710394859e-03 + + 4.0704470872879028e-01 5.3374791145324707e-01 + <_> + + 0 -1 1857 5.7204300537705421e-04 + + 4.6018350124359131e-01 5.9003931283950806e-01 + <_> + + 0 -1 1858 6.7543348995968699e-04 + + 5.3982520103454590e-01 4.3454289436340332e-01 + <_> + + 0 -1 1859 6.3295697327703238e-04 + + 5.2015632390975952e-01 4.0513589978218079e-01 + <_> + + 0 -1 1860 1.2435320531949401e-03 + + 4.6423879265785217e-01 5.5474412441253662e-01 + <_> + + 0 -1 1861 -4.7363857738673687e-03 + + 6.1985671520233154e-01 4.6725520491600037e-01 + <_> + + 0 -1 1862 -6.4658462069928646e-03 + + 6.8373328447341919e-01 5.0190007686614990e-01 + <_> + + 0 -1 1863 3.5017321351915598e-04 + + 4.3448030948638916e-01 5.3636229038238525e-01 + <_> + + 0 -1 1864 1.5754920605104417e-04 + + 4.7600790858268738e-01 5.7320207357406616e-01 + <_> + + 0 -1 1865 9.9774366244673729e-03 + + 5.0909858942031860e-01 3.6350399255752563e-01 + <_> + + 0 -1 1866 -4.1464529931545258e-04 + + 5.5700647830963135e-01 4.5938020944595337e-01 + <_> + + 0 -1 1867 -3.5888899583369493e-04 + + 5.3568458557128906e-01 4.3391349911689758e-01 + <_> + + 0 -1 1868 4.0463250479660928e-04 + + 4.4398030638694763e-01 5.4367768764495850e-01 + <_> + + 0 -1 1869 -8.2184787606820464e-04 + + 4.0422949194908142e-01 5.1762992143630981e-01 + <_> + + 0 -1 1870 5.9467419050633907e-03 + + 4.9276518821716309e-01 5.6337797641754150e-01 + <_> + + 0 -1 1871 -2.1753389388322830e-02 + + 8.0062937736511230e-01 4.8008409142494202e-01 + <_> + + 0 -1 1872 -1.4540379866957664e-02 + + 3.9460548758506775e-01 5.1822227239608765e-01 + <_> + + 0 -1 1873 -4.0510769933462143e-02 + + 2.1324990317225456e-02 4.9357929825782776e-01 + <_> + + 0 -1 1874 -5.8458268176764250e-04 + + 4.0127959847450256e-01 5.3140252828598022e-01 + <_> + + 0 -1 1875 5.5151800625026226e-03 + + 4.6424189209938049e-01 5.8962607383728027e-01 + <_> + + 0 -1 1876 -6.0626221820712090e-03 + + 6.5021592378616333e-01 5.0164777040481567e-01 + <_> + + 0 -1 1877 9.4535842537879944e-02 + + 5.2647089958190918e-01 4.1268271207809448e-01 + <_> + + 0 -1 1878 4.7315051779150963e-03 + + 4.8791998624801636e-01 5.8924478292465210e-01 + <_> + + 0 -1 1879 -5.2571471314877272e-04 + + 3.9172801375389099e-01 5.1894128322601318e-01 + <_> + + 0 -1 1880 -2.5464049540460110e-03 + + 5.8375990390777588e-01 4.9857059121131897e-01 + <_> + + 0 -1 1881 -2.6075689122080803e-02 + + 1.2619839608669281e-01 4.9558219313621521e-01 + <_> + + 0 -1 1882 -5.4779709316790104e-03 + + 5.7225137948989868e-01 5.0102657079696655e-01 + <_> + + 0 -1 1883 5.1337741315364838e-03 + + 5.2732622623443604e-01 4.2263761162757874e-01 + <_> + + 0 -1 1884 4.7944980906322598e-04 + + 4.4500669836997986e-01 5.8195871114730835e-01 + <_> + + 0 -1 1885 -2.1114079281687737e-03 + + 5.7576531171798706e-01 4.5117148756980896e-01 + <_> + + 0 -1 1886 -1.3179990462958813e-02 + + 1.8843810260295868e-01 5.1607340574264526e-01 + <_> + + 0 -1 1887 -4.7968099825084209e-03 + + 6.5897899866104126e-01 4.7361189126968384e-01 + <_> + + 0 -1 1888 6.7483168095350266e-03 + + 5.2594298124313354e-01 3.3563950657844543e-01 + <_> + + 0 -1 1889 1.4623369788751006e-03 + + 5.3552711009979248e-01 4.2640921473503113e-01 + <_> + + 0 -1 1890 4.7645159065723419e-03 + + 5.0344067811965942e-01 5.7868278026580811e-01 + <_> + + 0 -1 1891 6.8066660314798355e-03 + + 4.7566050291061401e-01 6.6778290271759033e-01 + <_> + + 0 -1 1892 3.6608621012419462e-03 + + 5.3696119785308838e-01 4.3115469813346863e-01 + <_> + + 0 -1 1893 2.1449640393257141e-02 + + 4.9686419963836670e-01 1.8888160586357117e-01 + <_> + + 0 -1 1894 4.1678901761770248e-03 + + 4.9307331442832947e-01 5.8153688907623291e-01 + <_> + + 0 -1 1895 8.6467564105987549e-03 + + 5.2052050828933716e-01 4.1325950622558594e-01 + <_> + + 0 -1 1896 -3.6114078829996288e-04 + + 5.4835551977157593e-01 4.8009279370307922e-01 + <_> + + 0 -1 1897 1.0808729566633701e-03 + + 4.6899020671844482e-01 6.0414212942123413e-01 + <_> + + 0 -1 1898 5.7719959877431393e-03 + + 5.1711422204971313e-01 3.0532771348953247e-01 + <_> + + 0 -1 1899 1.5720770461484790e-03 + + 5.2199780941009521e-01 4.1788038611412048e-01 + <_> + + 0 -1 1900 -1.9307859474793077e-03 + + 5.8603698015213013e-01 4.8129200935363770e-01 + <_> + + 0 -1 1901 -7.8926272690296173e-03 + + 1.7492769658565521e-01 4.9717339873313904e-01 + <_> + + 0 -1 1902 -2.2224679123610258e-03 + + 4.3425890803337097e-01 5.2128481864929199e-01 + <_> + + 0 -1 1903 1.9011989934369922e-03 + + 4.7651869058609009e-01 6.8920552730560303e-01 + <_> + + 0 -1 1904 2.7576119173318148e-03 + + 5.2621912956237793e-01 4.3374860286712646e-01 + <_> + + 0 -1 1905 5.1787449046969414e-03 + + 4.8040691018104553e-01 7.8437292575836182e-01 + <_> + + 0 -1 1906 -9.0273341629654169e-04 + + 4.1208469867706299e-01 5.3534239530563354e-01 + <_> + + 0 -1 1907 5.1797959022223949e-03 + + 4.7403728961944580e-01 6.4259600639343262e-01 + <_> + + 0 -1 1908 -1.0114000178873539e-02 + + 2.4687920510768890e-01 5.1750177145004272e-01 + <_> + + 0 -1 1909 -1.8617060035467148e-02 + + 5.7562941312789917e-01 4.6289789676666260e-01 + <_> + + 0 -1 1910 5.9225959703326225e-03 + + 5.1696258783340454e-01 3.2142710685729980e-01 + <_> + + 0 -1 1911 -6.2945079989731312e-03 + + 3.8720148801803589e-01 5.1416367292404175e-01 + <_> + + 0 -1 1912 6.5353019163012505e-03 + + 4.8530489206314087e-01 6.3104897737503052e-01 + <_> + + 0 -1 1913 1.0878399480134249e-03 + + 5.1173150539398193e-01 3.7232589721679688e-01 + <_> + + 0 -1 1914 -2.2542240098118782e-02 + + 5.6927400827407837e-01 4.8871129751205444e-01 + <_> + + 0 -1 1915 -3.0065660830587149e-03 + + 2.5560128688812256e-01 5.0039929151535034e-01 + <_> + + 0 -1 1916 7.4741272255778313e-03 + + 4.8108729720115662e-01 5.6759268045425415e-01 + <_> + + 0 -1 1917 2.6162320747971535e-02 + + 4.9711948633193970e-01 1.7772370576858521e-01 + <_> + + 0 -1 1918 9.4352738233283162e-04 + + 4.9400109052658081e-01 5.4912507534027100e-01 + <_> + + 0 -1 1919 3.3363241702318192e-02 + + 5.0076121091842651e-01 2.7907240390777588e-01 + <_> + + 0 -1 1920 -1.5118650160729885e-02 + + 7.0595788955688477e-01 4.9730318784713745e-01 + <_> + + 0 -1 1921 9.8648946732282639e-04 + + 5.1286202669143677e-01 3.7767618894577026e-01 + <_> + 213 + 1.0576110076904297e+02 + + <_> + + 0 -1 1922 -9.5150798559188843e-02 + + 6.4707571268081665e-01 4.0172868967056274e-01 + <_> + + 0 -1 1923 6.2702340073883533e-03 + + 3.9998221397399902e-01 5.7464492321014404e-01 + <_> + + 0 -1 1924 3.0018089455552399e-04 + + 3.5587701201438904e-01 5.5388098955154419e-01 + <_> + + 0 -1 1925 1.1757409665733576e-03 + + 4.2565348744392395e-01 5.3826177120208740e-01 + <_> + + 0 -1 1926 4.4235268433112651e-05 + + 3.6829081177711487e-01 5.5899268388748169e-01 + <_> + + 0 -1 1927 -2.9936920327600092e-05 + + 5.4524701833724976e-01 4.0203678607940674e-01 + <_> + + 0 -1 1928 3.0073199886828661e-03 + + 5.2390581369400024e-01 3.3178439736366272e-01 + <_> + + 0 -1 1929 -1.0513889603316784e-02 + + 4.3206891417503357e-01 5.3079837560653687e-01 + <_> + + 0 -1 1930 8.3476826548576355e-03 + + 4.5046371221542358e-01 6.4532989263534546e-01 + <_> + + 0 -1 1931 -3.1492270063608885e-03 + + 4.3134251236915588e-01 5.3705251216888428e-01 + <_> + + 0 -1 1932 -1.4435649973165710e-05 + + 5.3266030550003052e-01 3.8179719448089600e-01 + <_> + + 0 -1 1933 -4.2855090578086674e-04 + + 4.3051639199256897e-01 5.3820097446441650e-01 + <_> + + 0 -1 1934 1.5062429883982986e-04 + + 4.2359709739685059e-01 5.5449652671813965e-01 + <_> + + 0 -1 1935 7.1559831500053406e-02 + + 5.3030598163604736e-01 2.6788029074668884e-01 + <_> + + 0 -1 1936 8.4095180500298738e-04 + + 3.5571089386940002e-01 5.2054339647293091e-01 + <_> + + 0 -1 1937 6.2986500561237335e-02 + + 5.2253627777099609e-01 2.8613761067390442e-01 + <_> + + 0 -1 1938 -3.3798629883676767e-03 + + 3.6241859197616577e-01 5.2016979455947876e-01 + <_> + + 0 -1 1939 -1.1810739670181647e-04 + + 5.4744768142700195e-01 3.9598938822746277e-01 + <_> + + 0 -1 1940 -5.4505601292476058e-04 + + 3.7404221296310425e-01 5.2157157659530640e-01 + <_> + + 0 -1 1941 -1.8454910023137927e-03 + + 5.8930522203445435e-01 4.5844489336013794e-01 + <_> + + 0 -1 1942 -4.3832371011376381e-04 + + 4.0845820307731628e-01 5.3853511810302734e-01 + <_> + + 0 -1 1943 -2.4000830017030239e-03 + + 3.7774550914764404e-01 5.2935802936553955e-01 + <_> + + 0 -1 1944 -9.8795741796493530e-02 + + 2.9636120796203613e-01 5.0700891017913818e-01 + <_> + + 0 -1 1945 3.1798239797353745e-03 + + 4.8776328563690186e-01 6.7264437675476074e-01 + <_> + + 0 -1 1946 3.2406419632025063e-04 + + 4.3669110536575317e-01 5.5611097812652588e-01 + <_> + + 0 -1 1947 -3.2547250390052795e-02 + + 3.1281578540802002e-01 5.3086161613464355e-01 + <_> + + 0 -1 1948 -7.7561130747199059e-03 + + 6.5602248907089233e-01 4.6398720145225525e-01 + <_> + + 0 -1 1949 1.6027249395847321e-02 + + 5.1726800203323364e-01 3.1418979167938232e-01 + <_> + + 0 -1 1950 7.1002350523485802e-06 + + 4.0844461321830750e-01 5.3362947702407837e-01 + <_> + + 0 -1 1951 7.3422808200120926e-03 + + 4.9669221043586731e-01 6.6034650802612305e-01 + <_> + + 0 -1 1952 -1.6970280557870865e-03 + + 5.9082370996475220e-01 4.5001828670501709e-01 + <_> + + 0 -1 1953 2.4118260480463505e-03 + + 5.3151607513427734e-01 3.5997208952903748e-01 + <_> + + 0 -1 1954 -5.5300937965512276e-03 + + 2.3340409994125366e-01 4.9968141317367554e-01 + <_> + + 0 -1 1955 -2.6478730142116547e-03 + + 5.8809357881546021e-01 4.6847340464591980e-01 + <_> + + 0 -1 1956 1.1295629665255547e-02 + + 4.9837771058082581e-01 1.8845909833908081e-01 + <_> + + 0 -1 1957 -6.6952878842130303e-04 + + 5.8721381425857544e-01 4.7990199923515320e-01 + <_> + + 0 -1 1958 1.4410680159926414e-03 + + 5.1311892271041870e-01 3.5010111331939697e-01 + <_> + + 0 -1 1959 2.4637870956212282e-03 + + 5.3393721580505371e-01 4.1176390647888184e-01 + <_> + + 0 -1 1960 3.3114518737420440e-04 + + 4.3133831024169922e-01 5.3982460498809814e-01 + <_> + + 0 -1 1961 -3.3557269722223282e-02 + + 2.6753368973731995e-01 5.1791548728942871e-01 + <_> + + 0 -1 1962 1.8539419397711754e-02 + + 4.9738699197769165e-01 2.3171770572662354e-01 + <_> + + 0 -1 1963 -2.9698139405809343e-04 + + 5.5297082662582397e-01 4.6436640620231628e-01 + <_> + + 0 -1 1964 -4.5577259152196348e-04 + + 5.6295841932296753e-01 4.4691911339759827e-01 + <_> + + 0 -1 1965 -1.0158980265259743e-02 + + 6.7062127590179443e-01 4.9259188771247864e-01 + <_> + + 0 -1 1966 -2.2413829356082715e-05 + + 5.2394217252731323e-01 3.9129018783569336e-01 + <_> + + 0 -1 1967 7.2034963523037732e-05 + + 4.7994381189346313e-01 5.5017888545989990e-01 + <_> + + 0 -1 1968 -6.9267209619283676e-03 + + 6.9300097227096558e-01 4.6980848908424377e-01 + <_> + + 0 -1 1969 -7.6997838914394379e-03 + + 4.0996238589286804e-01 5.4808831214904785e-01 + <_> + + 0 -1 1970 -7.3130549862980843e-03 + + 3.2834759354591370e-01 5.0578862428665161e-01 + <_> + + 0 -1 1971 1.9650589674711227e-03 + + 4.9780470132827759e-01 6.3982498645782471e-01 + <_> + + 0 -1 1972 7.1647600270807743e-03 + + 4.6611601114273071e-01 6.2221372127532959e-01 + <_> + + 0 -1 1973 -2.4078639224171638e-02 + + 2.3346449434757233e-01 5.2221620082855225e-01 + <_> + + 0 -1 1974 -2.1027969196438789e-02 + + 1.1836539953947067e-01 4.9382260441780090e-01 + <_> + + 0 -1 1975 3.6017020465806127e-04 + + 5.3250199556350708e-01 4.1167110204696655e-01 + <_> + + 0 -1 1976 -1.7219729721546173e-02 + + 6.2787622213363647e-01 4.6642690896987915e-01 + <_> + + 0 -1 1977 -7.8672142699360847e-03 + + 3.4034150838851929e-01 5.2497369050979614e-01 + <_> + + 0 -1 1978 -4.4777389848604798e-04 + + 3.6104118824005127e-01 5.0862592458724976e-01 + <_> + + 0 -1 1979 5.5486010387539864e-03 + + 4.8842659592628479e-01 6.2034982442855835e-01 + <_> + + 0 -1 1980 -6.9461148232221603e-03 + + 2.6259300112724304e-01 5.0110971927642822e-01 + <_> + + 0 -1 1981 1.3569870498031378e-04 + + 4.3407949805259705e-01 5.6283122301101685e-01 + <_> + + 0 -1 1982 -4.5880250632762909e-02 + + 6.5079987049102783e-01 4.6962749958038330e-01 + <_> + + 0 -1 1983 -2.1582560613751411e-02 + + 3.8265028595924377e-01 5.2876168489456177e-01 + <_> + + 0 -1 1984 -2.0209539681673050e-02 + + 3.2333680987358093e-01 5.0744771957397461e-01 + <_> + + 0 -1 1985 5.8496710844337940e-03 + + 5.1776039600372314e-01 4.4896709918975830e-01 + <_> + + 0 -1 1986 -5.7476379879517481e-05 + + 4.0208509564399719e-01 5.2463638782501221e-01 + <_> + + 0 -1 1987 -1.1513100471347570e-03 + + 6.3150721788406372e-01 4.9051541090011597e-01 + <_> + + 0 -1 1988 1.9862831104546785e-03 + + 4.7024598717689514e-01 6.4971512556076050e-01 + <_> + + 0 -1 1989 -5.2719512023031712e-03 + + 3.6503839492797852e-01 5.2276527881622314e-01 + <_> + + 0 -1 1990 1.2662699446082115e-03 + + 5.1661008596420288e-01 3.8776180148124695e-01 + <_> + + 0 -1 1991 -6.2919440679252148e-03 + + 7.3758941888809204e-01 5.0238478183746338e-01 + <_> + + 0 -1 1992 6.7360111279413104e-04 + + 4.4232261180877686e-01 5.4955857992172241e-01 + <_> + + 0 -1 1993 -1.0523450328037143e-03 + + 5.9763962030410767e-01 4.8595830798149109e-01 + <_> + + 0 -1 1994 -4.4216238893568516e-04 + + 5.9559392929077148e-01 4.3989309668540955e-01 + <_> + + 0 -1 1995 1.1747940443456173e-03 + + 5.3498882055282593e-01 4.6050581336021423e-01 + <_> + + 0 -1 1996 5.2457437850534916e-03 + + 5.0491911172866821e-01 2.9415771365165710e-01 + <_> + + 0 -1 1997 -2.4539720267057419e-02 + + 2.5501778721809387e-01 5.2185869216918945e-01 + <_> + + 0 -1 1998 7.3793041519820690e-04 + + 4.4248610734939575e-01 5.4908162355422974e-01 + <_> + + 0 -1 1999 1.4233799884095788e-03 + + 5.3195142745971680e-01 4.0813559293746948e-01 + <_> + + 0 -1 2000 -2.4149110540747643e-03 + + 4.0876591205596924e-01 5.2389502525329590e-01 + <_> + + 0 -1 2001 -1.2165299849584699e-03 + + 5.6745791435241699e-01 4.9080529808998108e-01 + <_> + + 0 -1 2002 -1.2438809499144554e-03 + + 4.1294258832931519e-01 5.2561181783676147e-01 + <_> + + 0 -1 2003 6.1942739412188530e-03 + + 5.0601941347122192e-01 7.3136532306671143e-01 + <_> + + 0 -1 2004 -1.6607169527560472e-03 + + 5.9796321392059326e-01 4.5963698625564575e-01 + <_> + + 0 -1 2005 -2.7316259220242500e-02 + + 4.1743651032447815e-01 5.3088420629501343e-01 + <_> + + 0 -1 2006 -1.5845570014789701e-03 + + 5.6158047914505005e-01 4.5194861292839050e-01 + <_> + + 0 -1 2007 -1.5514739789068699e-03 + + 4.0761870145797729e-01 5.3607851266860962e-01 + <_> + + 0 -1 2008 3.8446558755822480e-04 + + 4.3472939729690552e-01 5.4304420948028564e-01 + <_> + + 0 -1 2009 -1.4672259800136089e-02 + + 1.6593049466609955e-01 5.1460939645767212e-01 + <_> + + 0 -1 2010 8.1608882173895836e-03 + + 4.9618190526962280e-01 1.8847459554672241e-01 + <_> + + 0 -1 2011 1.1121659772470593e-03 + + 4.8682639002799988e-01 6.0938161611557007e-01 + <_> + + 0 -1 2012 -7.2603770531713963e-03 + + 6.2843251228332520e-01 4.6903759241104126e-01 + <_> + + 0 -1 2013 -2.4046430189628154e-04 + + 5.5750000476837158e-01 4.0460440516471863e-01 + <_> + + 0 -1 2014 -2.3348190006799996e-04 + + 4.1157621145248413e-01 5.2528482675552368e-01 + <_> + + 0 -1 2015 5.5736480280756950e-03 + + 4.7300729155540466e-01 5.6901007890701294e-01 + <_> + + 0 -1 2016 3.0623769387602806e-02 + + 4.9718868732452393e-01 1.7400950193405151e-01 + <_> + + 0 -1 2017 9.2074798885732889e-04 + + 5.3721177577972412e-01 4.3548721075057983e-01 + <_> + + 0 -1 2018 -4.3550739064812660e-05 + + 5.3668838739395142e-01 4.3473169207572937e-01 + <_> + + 0 -1 2019 -6.6452710889279842e-03 + + 3.4355181455612183e-01 5.1605331897735596e-01 + <_> + + 0 -1 2020 4.3221998959779739e-02 + + 4.7667920589447021e-01 7.2936528921127319e-01 + <_> + + 0 -1 2021 2.2331769578158855e-03 + + 5.0293159484863281e-01 5.6331712007522583e-01 + <_> + + 0 -1 2022 3.1829739455133677e-03 + + 4.0160921216011047e-01 5.1921367645263672e-01 + <_> + + 0 -1 2023 -1.8027749320026487e-04 + + 4.0883159637451172e-01 5.4179197549819946e-01 + <_> + + 0 -1 2024 -5.2934689447283745e-03 + + 4.0756770968437195e-01 5.2435618638992310e-01 + <_> + + 0 -1 2025 1.2750959722325206e-03 + + 4.9132829904556274e-01 6.3870108127593994e-01 + <_> + + 0 -1 2026 4.3385322205722332e-03 + + 5.0316721200942993e-01 2.9473468661308289e-01 + <_> + + 0 -1 2027 8.5250744596123695e-03 + + 4.9497890472412109e-01 6.3088691234588623e-01 + <_> + + 0 -1 2028 -9.4266352243721485e-04 + + 5.3283667564392090e-01 4.2856499552726746e-01 + <_> + + 0 -1 2029 1.3609660090878606e-03 + + 4.9915251135826111e-01 5.9415012598037720e-01 + <_> + + 0 -1 2030 4.4782509212382138e-04 + + 4.5735040307044983e-01 5.8544808626174927e-01 + <_> + + 0 -1 2031 1.3360050506889820e-03 + + 4.6043589711189270e-01 5.8490520715713501e-01 + <_> + + 0 -1 2032 -6.0967548051849008e-04 + + 3.9693889021873474e-01 5.2294230461120605e-01 + <_> + + 0 -1 2033 -2.3656780831515789e-03 + + 5.8083200454711914e-01 4.8983570933341980e-01 + <_> + + 0 -1 2034 1.0734340175986290e-03 + + 4.3512108922004700e-01 5.4700392484664917e-01 + <_> + + 0 -1 2035 2.1923359017819166e-03 + + 5.3550601005554199e-01 3.8429039716720581e-01 + <_> + + 0 -1 2036 5.4968618787825108e-03 + + 5.0181388854980469e-01 2.8271919488906860e-01 + <_> + + 0 -1 2037 -7.5368821620941162e-02 + + 1.2250760197639465e-01 5.1488268375396729e-01 + <_> + + 0 -1 2038 2.5134470313787460e-02 + + 4.7317668795585632e-01 7.0254462957382202e-01 + <_> + + 0 -1 2039 -2.9358599931583740e-05 + + 5.4305320978164673e-01 4.6560868620872498e-01 + <_> + + 0 -1 2040 -5.8355910005047917e-04 + + 4.0310400724411011e-01 5.1901197433471680e-01 + <_> + + 0 -1 2041 -2.6639450807124376e-03 + + 4.3081268668174744e-01 5.1617711782455444e-01 + <_> + + 0 -1 2042 -1.3804089976474643e-03 + + 6.2198299169540405e-01 4.6955159306526184e-01 + <_> + + 0 -1 2043 1.2313219485804439e-03 + + 5.3793638944625854e-01 4.4258311390876770e-01 + <_> + + 0 -1 2044 -1.4644179827882908e-05 + + 5.2816402912139893e-01 4.2225030064582825e-01 + <_> + + 0 -1 2045 -1.2818809598684311e-02 + + 2.5820928812026978e-01 5.1799327135086060e-01 + <_> + + 0 -1 2046 2.2852189838886261e-02 + + 4.7786930203437805e-01 7.6092642545700073e-01 + <_> + + 0 -1 2047 8.2305970136076212e-04 + + 5.3409922122955322e-01 4.6717241406440735e-01 + <_> + + 0 -1 2048 1.2770120054483414e-02 + + 4.9657610058784485e-01 1.4723660051822662e-01 + <_> + + 0 -1 2049 -5.0051510334014893e-02 + + 6.4149940013885498e-01 5.0165921449661255e-01 + <_> + + 0 -1 2050 1.5775270760059357e-02 + + 4.5223200321197510e-01 5.6853622198104858e-01 + <_> + + 0 -1 2051 -1.8501620739698410e-02 + + 2.7647489309310913e-01 5.1379591226577759e-01 + <_> + + 0 -1 2052 2.4626250378787518e-03 + + 5.1419419050216675e-01 3.7954080104827881e-01 + <_> + + 0 -1 2053 6.2916167080402374e-02 + + 5.0606489181518555e-01 6.5804338455200195e-01 + <_> + + 0 -1 2054 -2.1648500478477217e-05 + + 5.1953881978988647e-01 4.0198868513107300e-01 + <_> + + 0 -1 2055 2.1180990152060986e-03 + + 4.9623650312423706e-01 5.9544587135314941e-01 + <_> + + 0 -1 2056 -1.6634890809655190e-02 + + 3.7579330801963806e-01 5.1754468679428101e-01 + <_> + + 0 -1 2057 -2.8899470344185829e-03 + + 6.6240137815475464e-01 5.0571787357330322e-01 + <_> + + 0 -1 2058 7.6783262193202972e-02 + + 4.7957968711853027e-01 8.0477148294448853e-01 + <_> + + 0 -1 2059 3.9170677773654461e-03 + + 4.9378821253776550e-01 5.7199418544769287e-01 + <_> + + 0 -1 2060 -7.2670601308345795e-02 + + 5.3894560784101486e-02 4.9439039826393127e-01 + <_> + + 0 -1 2061 5.4039502143859863e-01 + + 5.1297742128372192e-01 1.1433389782905579e-01 + <_> + + 0 -1 2062 2.9510019812732935e-03 + + 4.5283439755439758e-01 5.6985741853713989e-01 + <_> + + 0 -1 2063 3.4508369863033295e-03 + + 5.3577268123626709e-01 4.2187309265136719e-01 + <_> + + 0 -1 2064 -4.2077939724549651e-04 + + 5.9161728620529175e-01 4.6379259228706360e-01 + <_> + + 0 -1 2065 3.3051050268113613e-03 + + 5.2733850479125977e-01 4.3820428848266602e-01 + <_> + + 0 -1 2066 4.7735060798004270e-04 + + 4.0465280413627625e-01 5.1818847656250000e-01 + <_> + + 0 -1 2067 -2.5928510352969170e-02 + + 7.4522358179092407e-01 5.0893861055374146e-01 + <_> + + 0 -1 2068 -2.9729790985584259e-03 + + 3.2954359054565430e-01 5.0587952136993408e-01 + <_> + + 0 -1 2069 5.8508329093456268e-03 + + 4.8571440577507019e-01 5.7930248975753784e-01 + <_> + + 0 -1 2070 -4.5967519283294678e-02 + + 4.3127310276031494e-01 5.3806531429290771e-01 + <_> + + 0 -1 2071 1.5585960447788239e-01 + + 5.1961702108383179e-01 1.6847139596939087e-01 + <_> + + 0 -1 2072 1.5164829790592194e-02 + + 4.7357571125030518e-01 6.7350268363952637e-01 + <_> + + 0 -1 2073 -1.0604249546304345e-03 + + 5.8229267597198486e-01 4.7757029533386230e-01 + <_> + + 0 -1 2074 6.6476291976869106e-03 + + 4.9991989135742188e-01 2.3195350170135498e-01 + <_> + + 0 -1 2075 -1.2231130152940750e-02 + + 4.7508931159973145e-01 5.2629822492599487e-01 + <_> + + 0 -1 2076 5.6528882123529911e-03 + + 5.0697678327560425e-01 3.5618188977241516e-01 + <_> + + 0 -1 2077 1.2977829901501536e-03 + + 4.8756939172744751e-01 5.6190627813339233e-01 + <_> + + 0 -1 2078 1.0781589895486832e-02 + + 4.7507700324058533e-01 6.7823082208633423e-01 + <_> + + 0 -1 2079 2.8654779307544231e-03 + + 5.3054618835449219e-01 4.2907360196113586e-01 + <_> + + 0 -1 2080 2.8663428965955973e-03 + + 4.5184791088104248e-01 5.5393511056900024e-01 + <_> + + 0 -1 2081 -5.1983320154249668e-03 + + 4.1491198539733887e-01 5.4341888427734375e-01 + <_> + + 0 -1 2082 5.3739990107715130e-03 + + 4.7178968787193298e-01 6.5076571702957153e-01 + <_> + + 0 -1 2083 -1.4641529880464077e-02 + + 2.1721640229225159e-01 5.1617771387100220e-01 + <_> + + 0 -1 2084 -1.5042580344015732e-05 + + 5.3373837471008301e-01 4.2988368868827820e-01 + <_> + + 0 -1 2085 -1.1875660129589960e-04 + + 4.6045941114425659e-01 5.5824470520019531e-01 + <_> + + 0 -1 2086 1.6995530575513840e-02 + + 4.9458950757980347e-01 7.3880076408386230e-02 + <_> + + 0 -1 2087 -3.5095941275358200e-02 + + 7.0055091381072998e-01 4.9775910377502441e-01 + <_> + + 0 -1 2088 2.4217350874096155e-03 + + 4.4662651419639587e-01 5.4776942729949951e-01 + <_> + + 0 -1 2089 -9.6340337768197060e-04 + + 4.7140988707542419e-01 5.3133380413055420e-01 + <_> + + 0 -1 2090 1.6391130338888615e-04 + + 4.3315461277961731e-01 5.3422421216964722e-01 + <_> + + 0 -1 2091 -2.1141460165381432e-02 + + 2.6447001099586487e-01 5.2044987678527832e-01 + <_> + + 0 -1 2092 8.7775202700868249e-04 + + 5.2083498239517212e-01 4.1527429223060608e-01 + <_> + + 0 -1 2093 -2.7943920344114304e-02 + + 6.3441252708435059e-01 5.0188118219375610e-01 + <_> + + 0 -1 2094 6.7297378554940224e-03 + + 5.0504380464553833e-01 3.5008639097213745e-01 + <_> + + 0 -1 2095 2.3281039670109749e-02 + + 4.9663180112838745e-01 6.9686770439147949e-01 + <_> + + 0 -1 2096 -1.1644979938864708e-02 + + 3.3002600073814392e-01 5.0496298074722290e-01 + <_> + + 0 -1 2097 1.5764309093356133e-02 + + 4.9915981292724609e-01 7.3211538791656494e-01 + <_> + + 0 -1 2098 -1.3611479662358761e-03 + + 3.9117351174354553e-01 5.1606708765029907e-01 + <_> + + 0 -1 2099 -8.1522337859496474e-04 + + 5.6289112567901611e-01 4.9497190117835999e-01 + <_> + + 0 -1 2100 -6.0066272271797061e-04 + + 5.8535951375961304e-01 4.5505958795547485e-01 + <_> + + 0 -1 2101 4.9715518252924085e-04 + + 4.2714700102806091e-01 5.4435992240905762e-01 + <_> + + 0 -1 2102 2.3475370835512877e-03 + + 5.1431107521057129e-01 3.8876569271087646e-01 + <_> + + 0 -1 2103 -8.9261569082736969e-03 + + 6.0445022583007812e-01 4.9717208743095398e-01 + <_> + + 0 -1 2104 -1.3919910416007042e-02 + + 2.5831609964370728e-01 5.0003677606582642e-01 + <_> + + 0 -1 2105 1.0209949687123299e-03 + + 4.8573741316795349e-01 5.5603581666946411e-01 + <_> + + 0 -1 2106 -2.7441629208624363e-03 + + 5.9368848800659180e-01 4.6457770466804504e-01 + <_> + + 0 -1 2107 -1.6200130805373192e-02 + + 3.1630149483680725e-01 5.1934951543807983e-01 + <_> + + 0 -1 2108 4.3331980705261230e-03 + + 5.0612241029739380e-01 3.4588789939880371e-01 + <_> + + 0 -1 2109 5.8497930876910686e-04 + + 4.7790178656578064e-01 5.8701777458190918e-01 + <_> + + 0 -1 2110 -2.2466450463980436e-03 + + 4.2978510260581970e-01 5.3747731447219849e-01 + <_> + + 0 -1 2111 2.3146099410951138e-03 + + 5.4386717081069946e-01 4.6409699320793152e-01 + <_> + + 0 -1 2112 8.7679121643304825e-03 + + 4.7268930077552795e-01 6.7717897891998291e-01 + <_> + + 0 -1 2113 -2.2448020172305405e-04 + + 4.2291730642318726e-01 5.4280489683151245e-01 + <_> + + 0 -1 2114 -7.4336021207273006e-03 + + 6.0988807678222656e-01 4.6836739778518677e-01 + <_> + + 0 -1 2115 -2.3189240600913763e-03 + + 5.6894367933273315e-01 4.4242420792579651e-01 + <_> + + 0 -1 2116 -2.1042178850620985e-03 + + 3.7622210383415222e-01 5.1870870590209961e-01 + <_> + + 0 -1 2117 4.6034841216169298e-04 + + 4.6994051337242126e-01 5.7712072134017944e-01 + <_> + + 0 -1 2118 1.0547629790380597e-03 + + 4.4652169942855835e-01 5.6017017364501953e-01 + <_> + + 0 -1 2119 8.7148818420246243e-04 + + 5.4498052597045898e-01 3.9147090911865234e-01 + <_> + + 0 -1 2120 3.3364820410497487e-04 + + 4.5640090107917786e-01 5.6457388401031494e-01 + <_> + + 0 -1 2121 -1.4853250468149781e-03 + + 5.7473778724670410e-01 4.6927788853645325e-01 + <_> + + 0 -1 2122 3.0251620337367058e-03 + + 5.1661968231201172e-01 3.7628141045570374e-01 + <_> + + 0 -1 2123 5.0280741415917873e-03 + + 5.0021117925643921e-01 6.1515271663665771e-01 + <_> + + 0 -1 2124 -5.8164511574432254e-04 + + 5.3945982456207275e-01 4.3907511234283447e-01 + <_> + + 0 -1 2125 4.5141529291868210e-02 + + 5.1883268356323242e-01 2.0630359649658203e-01 + <_> + + 0 -1 2126 -1.0795620037242770e-03 + + 3.9046850800514221e-01 5.1379072666168213e-01 + <_> + + 0 -1 2127 1.5995999274309725e-04 + + 4.8953229188919067e-01 5.4275041818618774e-01 + <_> + + 0 -1 2128 -1.9359270110726357e-02 + + 6.9752287864685059e-01 4.7735071182250977e-01 + <_> + + 0 -1 2129 2.0725509524345398e-01 + + 5.2336359024047852e-01 3.0349919199943542e-01 + <_> + + 0 -1 2130 -4.1953290929086506e-04 + + 5.4193967580795288e-01 4.4601860642433167e-01 + <_> + + 0 -1 2131 2.2582069505006075e-03 + + 4.8157641291618347e-01 6.0274088382720947e-01 + <_> + + 0 -1 2132 -6.7811207845807076e-03 + + 3.9802789688110352e-01 5.1833057403564453e-01 + <_> + + 0 -1 2133 1.1154309846460819e-02 + + 5.4312318563461304e-01 4.1887599229812622e-01 + <_> + + 0 -1 2134 4.3162431567907333e-02 + + 4.7382280230522156e-01 6.5229612588882446e-01 + + <_> + + <_> + 3 7 14 4 -1. + <_> + 3 9 14 2 2. + <_> + + <_> + 1 2 18 4 -1. + <_> + 7 2 6 4 3. + <_> + + <_> + 1 7 15 9 -1. + <_> + 1 10 15 3 3. + <_> + + <_> + 5 6 2 6 -1. + <_> + 5 9 2 3 2. + <_> + + <_> + 7 5 6 3 -1. + <_> + 9 5 2 3 3. + <_> + + <_> + 4 0 12 9 -1. + <_> + 4 3 12 3 3. + <_> + + <_> + 6 9 10 8 -1. + <_> + 6 13 10 4 2. + <_> + + <_> + 3 6 14 8 -1. + <_> + 3 10 14 4 2. + <_> + + <_> + 14 1 6 10 -1. + <_> + 14 1 3 10 2. + <_> + + <_> + 7 8 5 12 -1. + <_> + 7 12 5 4 3. + <_> + + <_> + 1 1 18 3 -1. + <_> + 7 1 6 3 3. + <_> + + <_> + 1 8 17 2 -1. + <_> + 1 9 17 1 2. + <_> + + <_> + 16 6 4 2 -1. + <_> + 16 7 4 1 2. + <_> + + <_> + 5 17 2 2 -1. + <_> + 5 18 2 1 2. + <_> + + <_> + 14 2 6 12 -1. + <_> + 14 2 3 12 2. + <_> + + <_> + 4 0 4 12 -1. + <_> + 4 0 2 6 2. + <_> + 6 6 2 6 2. + <_> + + <_> + 2 11 18 8 -1. + <_> + 8 11 6 8 3. + <_> + + <_> + 5 7 10 2 -1. + <_> + 5 8 10 1 2. + <_> + + <_> + 15 11 5 3 -1. + <_> + 15 12 5 1 3. + <_> + + <_> + 5 3 10 9 -1. + <_> + 5 6 10 3 3. + <_> + + <_> + 9 4 2 14 -1. + <_> + 9 11 2 7 2. + <_> + + <_> + 3 5 4 12 -1. + <_> + 3 9 4 4 3. + <_> + + <_> + 4 5 12 5 -1. + <_> + 8 5 4 5 3. + <_> + + <_> + 5 6 10 8 -1. + <_> + 5 10 10 4 2. + <_> + + <_> + 8 0 6 9 -1. + <_> + 8 3 6 3 3. + <_> + + <_> + 9 12 1 8 -1. + <_> + 9 16 1 4 2. + <_> + + <_> + 0 7 20 6 -1. + <_> + 0 9 20 2 3. + <_> + + <_> + 7 0 6 17 -1. + <_> + 9 0 2 17 3. + <_> + + <_> + 9 0 6 4 -1. + <_> + 11 0 2 4 3. + <_> + + <_> + 5 1 6 4 -1. + <_> + 7 1 2 4 3. + <_> + + <_> + 12 1 6 16 -1. + <_> + 14 1 2 16 3. + <_> + + <_> + 0 5 18 8 -1. + <_> + 0 5 9 4 2. + <_> + 9 9 9 4 2. + <_> + + <_> + 8 15 10 4 -1. + <_> + 13 15 5 2 2. + <_> + 8 17 5 2 2. + <_> + + <_> + 3 1 4 8 -1. + <_> + 3 1 2 4 2. + <_> + 5 5 2 4 2. + <_> + + <_> + 3 6 14 10 -1. + <_> + 10 6 7 5 2. + <_> + 3 11 7 5 2. + <_> + + <_> + 2 1 6 16 -1. + <_> + 4 1 2 16 3. + <_> + + <_> + 0 18 20 2 -1. + <_> + 0 19 20 1 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 0 12 9 6 -1. + <_> + 0 14 9 2 3. + <_> + + <_> + 5 7 3 4 -1. + <_> + 5 9 3 2 2. + <_> + + <_> + 9 3 2 16 -1. + <_> + 9 11 2 8 2. + <_> + + <_> + 3 6 13 8 -1. + <_> + 3 10 13 4 2. + <_> + + <_> + 12 3 8 2 -1. + <_> + 12 3 4 2 2. + <_> + + <_> + 8 8 4 12 -1. + <_> + 8 12 4 4 3. + <_> + + <_> + 11 3 8 6 -1. + <_> + 15 3 4 3 2. + <_> + 11 6 4 3 2. + <_> + + <_> + 7 1 6 19 -1. + <_> + 9 1 2 19 3. + <_> + + <_> + 9 0 6 4 -1. + <_> + 11 0 2 4 3. + <_> + + <_> + 3 1 9 3 -1. + <_> + 6 1 3 3 3. + <_> + + <_> + 8 15 10 4 -1. + <_> + 13 15 5 2 2. + <_> + 8 17 5 2 2. + <_> + + <_> + 0 3 6 10 -1. + <_> + 3 3 3 10 2. + <_> + + <_> + 3 4 15 15 -1. + <_> + 3 9 15 5 3. + <_> + + <_> + 6 5 8 6 -1. + <_> + 6 7 8 2 3. + <_> + + <_> + 4 4 12 10 -1. + <_> + 10 4 6 5 2. + <_> + 4 9 6 5 2. + <_> + + <_> + 6 4 4 4 -1. + <_> + 8 4 2 4 2. + <_> + + <_> + 15 11 1 2 -1. + <_> + 15 12 1 1 2. + <_> + + <_> + 3 11 2 2 -1. + <_> + 3 12 2 1 2. + <_> + + <_> + 16 11 1 3 -1. + <_> + 16 12 1 1 3. + <_> + + <_> + 3 15 6 4 -1. + <_> + 3 15 3 2 2. + <_> + 6 17 3 2 2. + <_> + + <_> + 6 7 8 2 -1. + <_> + 6 8 8 1 2. + <_> + + <_> + 3 11 1 3 -1. + <_> + 3 12 1 1 3. + <_> + + <_> + 6 0 12 2 -1. + <_> + 6 1 12 1 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 7 15 6 2 -1. + <_> + 7 16 6 1 2. + <_> + + <_> + 0 5 4 6 -1. + <_> + 0 7 4 2 3. + <_> + + <_> + 4 12 12 2 -1. + <_> + 8 12 4 2 3. + <_> + + <_> + 6 3 1 9 -1. + <_> + 6 6 1 3 3. + <_> + + <_> + 10 17 3 2 -1. + <_> + 11 17 1 2 3. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 7 6 6 4 -1. + <_> + 9 6 2 4 3. + <_> + + <_> + 7 17 3 2 -1. + <_> + 8 17 1 2 3. + <_> + + <_> + 10 17 3 3 -1. + <_> + 11 17 1 3 3. + <_> + + <_> + 8 12 3 2 -1. + <_> + 8 13 3 1 2. + <_> + + <_> + 9 3 6 2 -1. + <_> + 11 3 2 2 3. + <_> + + <_> + 3 11 14 4 -1. + <_> + 3 13 14 2 2. + <_> + + <_> + 1 10 18 4 -1. + <_> + 10 10 9 2 2. + <_> + 1 12 9 2 2. + <_> + + <_> + 0 10 3 3 -1. + <_> + 0 11 3 1 3. + <_> + + <_> + 9 1 6 6 -1. + <_> + 11 1 2 6 3. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 1 0 18 9 -1. + <_> + 1 3 18 3 3. + <_> + + <_> + 12 10 2 6 -1. + <_> + 12 13 2 3 2. + <_> + + <_> + 0 5 19 8 -1. + <_> + 0 9 19 4 2. + <_> + + <_> + 7 0 6 9 -1. + <_> + 9 0 2 9 3. + <_> + + <_> + 5 3 6 1 -1. + <_> + 7 3 2 1 3. + <_> + + <_> + 11 3 6 1 -1. + <_> + 13 3 2 1 3. + <_> + + <_> + 5 10 4 6 -1. + <_> + 5 13 4 3 2. + <_> + + <_> + 11 3 6 1 -1. + <_> + 13 3 2 1 3. + <_> + + <_> + 4 4 12 6 -1. + <_> + 4 6 12 2 3. + <_> + + <_> + 15 12 2 6 -1. + <_> + 15 14 2 2 3. + <_> + + <_> + 9 3 2 2 -1. + <_> + 10 3 1 2 2. + <_> + + <_> + 9 3 3 1 -1. + <_> + 10 3 1 1 3. + <_> + + <_> + 1 1 4 14 -1. + <_> + 3 1 2 14 2. + <_> + + <_> + 9 0 4 4 -1. + <_> + 11 0 2 2 2. + <_> + 9 2 2 2 2. + <_> + + <_> + 7 5 1 14 -1. + <_> + 7 12 1 7 2. + <_> + + <_> + 19 0 1 4 -1. + <_> + 19 2 1 2 2. + <_> + + <_> + 5 5 6 4 -1. + <_> + 8 5 3 4 2. + <_> + + <_> + 9 18 3 2 -1. + <_> + 10 18 1 2 3. + <_> + + <_> + 8 18 3 2 -1. + <_> + 9 18 1 2 3. + <_> + + <_> + 4 5 12 6 -1. + <_> + 4 7 12 2 3. + <_> + + <_> + 3 12 2 6 -1. + <_> + 3 14 2 2 3. + <_> + + <_> + 10 8 2 12 -1. + <_> + 10 12 2 4 3. + <_> + + <_> + 7 18 3 2 -1. + <_> + 8 18 1 2 3. + <_> + + <_> + 9 0 6 2 -1. + <_> + 11 0 2 2 3. + <_> + + <_> + 5 11 9 3 -1. + <_> + 5 12 9 1 3. + <_> + + <_> + 9 0 6 2 -1. + <_> + 11 0 2 2 3. + <_> + + <_> + 1 1 18 5 -1. + <_> + 7 1 6 5 3. + <_> + + <_> + 8 0 4 4 -1. + <_> + 10 0 2 2 2. + <_> + 8 2 2 2 2. + <_> + + <_> + 3 12 1 3 -1. + <_> + 3 13 1 1 3. + <_> + + <_> + 8 14 5 3 -1. + <_> + 8 15 5 1 3. + <_> + + <_> + 5 4 10 12 -1. + <_> + 5 4 5 6 2. + <_> + 10 10 5 6 2. + <_> + + <_> + 9 6 9 12 -1. + <_> + 9 10 9 4 3. + <_> + + <_> + 2 2 12 14 -1. + <_> + 2 2 6 7 2. + <_> + 8 9 6 7 2. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 7 4 6 4 -1. + <_> + 7 6 6 2 2. + <_> + + <_> + 4 5 11 8 -1. + <_> + 4 9 11 4 2. + <_> + + <_> + 3 10 16 4 -1. + <_> + 3 12 16 2 2. + <_> + + <_> + 0 0 16 2 -1. + <_> + 0 1 16 1 2. + <_> + + <_> + 7 5 6 2 -1. + <_> + 9 5 2 2 3. + <_> + + <_> + 3 2 6 10 -1. + <_> + 3 2 3 5 2. + <_> + 6 7 3 5 2. + <_> + + <_> + 10 5 8 15 -1. + <_> + 10 10 8 5 3. + <_> + + <_> + 3 14 8 6 -1. + <_> + 3 14 4 3 2. + <_> + 7 17 4 3 2. + <_> + + <_> + 14 2 2 2 -1. + <_> + 14 3 2 1 2. + <_> + + <_> + 1 10 7 6 -1. + <_> + 1 13 7 3 2. + <_> + + <_> + 15 4 4 3 -1. + <_> + 15 4 2 3 2. + <_> + + <_> + 2 9 14 6 -1. + <_> + 2 9 7 3 2. + <_> + 9 12 7 3 2. + <_> + + <_> + 5 7 10 4 -1. + <_> + 5 9 10 2 2. + <_> + + <_> + 6 9 8 8 -1. + <_> + 6 9 4 4 2. + <_> + 10 13 4 4 2. + <_> + + <_> + 14 1 3 2 -1. + <_> + 14 2 3 1 2. + <_> + + <_> + 1 4 4 2 -1. + <_> + 3 4 2 2 2. + <_> + + <_> + 11 10 2 8 -1. + <_> + 11 14 2 4 2. + <_> + + <_> + 0 0 5 3 -1. + <_> + 0 1 5 1 3. + <_> + + <_> + 2 5 18 8 -1. + <_> + 11 5 9 4 2. + <_> + 2 9 9 4 2. + <_> + + <_> + 6 6 1 6 -1. + <_> + 6 9 1 3 2. + <_> + + <_> + 19 1 1 3 -1. + <_> + 19 2 1 1 3. + <_> + + <_> + 7 6 6 6 -1. + <_> + 9 6 2 6 3. + <_> + + <_> + 19 1 1 3 -1. + <_> + 19 2 1 1 3. + <_> + + <_> + 3 13 2 3 -1. + <_> + 3 14 2 1 3. + <_> + + <_> + 8 4 8 12 -1. + <_> + 12 4 4 6 2. + <_> + 8 10 4 6 2. + <_> + + <_> + 5 2 6 3 -1. + <_> + 7 2 2 3 3. + <_> + + <_> + 6 1 9 10 -1. + <_> + 6 6 9 5 2. + <_> + + <_> + 0 4 6 12 -1. + <_> + 2 4 2 12 3. + <_> + + <_> + 15 13 2 3 -1. + <_> + 15 14 2 1 3. + <_> + + <_> + 7 14 5 3 -1. + <_> + 7 15 5 1 3. + <_> + + <_> + 15 13 3 3 -1. + <_> + 15 14 3 1 3. + <_> + + <_> + 6 14 8 3 -1. + <_> + 6 15 8 1 3. + <_> + + <_> + 15 13 3 3 -1. + <_> + 15 14 3 1 3. + <_> + + <_> + 2 13 3 3 -1. + <_> + 2 14 3 1 3. + <_> + + <_> + 4 7 12 12 -1. + <_> + 10 7 6 6 2. + <_> + 4 13 6 6 2. + <_> + + <_> + 9 7 2 6 -1. + <_> + 10 7 1 6 2. + <_> + + <_> + 8 9 5 2 -1. + <_> + 8 10 5 1 2. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 9 6 2 8 -1. + <_> + 9 10 2 4 2. + <_> + + <_> + 7 7 3 6 -1. + <_> + 8 7 1 6 3. + <_> + + <_> + 11 3 3 3 -1. + <_> + 12 3 1 3 3. + <_> + + <_> + 5 4 6 1 -1. + <_> + 7 4 2 1 3. + <_> + + <_> + 5 6 10 3 -1. + <_> + 5 7 10 1 3. + <_> + + <_> + 7 3 6 9 -1. + <_> + 7 6 6 3 3. + <_> + + <_> + 6 7 9 1 -1. + <_> + 9 7 3 1 3. + <_> + + <_> + 2 8 16 8 -1. + <_> + 2 12 16 4 2. + <_> + + <_> + 14 6 2 6 -1. + <_> + 14 9 2 3 2. + <_> + + <_> + 1 5 6 15 -1. + <_> + 1 10 6 5 3. + <_> + + <_> + 10 0 6 9 -1. + <_> + 10 3 6 3 3. + <_> + + <_> + 6 6 7 14 -1. + <_> + 6 13 7 7 2. + <_> + + <_> + 13 7 3 6 -1. + <_> + 13 9 3 2 3. + <_> + + <_> + 1 8 15 4 -1. + <_> + 6 8 5 4 3. + <_> + + <_> + 11 2 3 10 -1. + <_> + 11 7 3 5 2. + <_> + + <_> + 3 7 4 6 -1. + <_> + 3 9 4 2 3. + <_> + + <_> + 13 3 6 10 -1. + <_> + 15 3 2 10 3. + <_> + + <_> + 5 7 8 10 -1. + <_> + 5 7 4 5 2. + <_> + 9 12 4 5 2. + <_> + + <_> + 4 4 12 12 -1. + <_> + 10 4 6 6 2. + <_> + 4 10 6 6 2. + <_> + + <_> + 1 4 6 9 -1. + <_> + 3 4 2 9 3. + <_> + + <_> + 11 3 2 5 -1. + <_> + 11 3 1 5 2. + <_> + + <_> + 7 3 2 5 -1. + <_> + 8 3 1 5 2. + <_> + + <_> + 10 14 2 3 -1. + <_> + 10 15 2 1 3. + <_> + + <_> + 5 12 6 2 -1. + <_> + 8 12 3 2 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 4 11 12 6 -1. + <_> + 4 14 12 3 2. + <_> + + <_> + 11 11 5 9 -1. + <_> + 11 14 5 3 3. + <_> + + <_> + 6 15 3 2 -1. + <_> + 6 16 3 1 2. + <_> + + <_> + 11 0 3 5 -1. + <_> + 12 0 1 5 3. + <_> + + <_> + 5 5 6 7 -1. + <_> + 8 5 3 7 2. + <_> + + <_> + 13 0 1 9 -1. + <_> + 13 3 1 3 3. + <_> + + <_> + 3 2 4 8 -1. + <_> + 3 2 2 4 2. + <_> + 5 6 2 4 2. + <_> + + <_> + 13 12 4 6 -1. + <_> + 13 14 4 2 3. + <_> + + <_> + 3 12 4 6 -1. + <_> + 3 14 4 2 3. + <_> + + <_> + 13 11 3 4 -1. + <_> + 13 13 3 2 2. + <_> + + <_> + 4 4 4 3 -1. + <_> + 4 5 4 1 3. + <_> + + <_> + 7 5 11 8 -1. + <_> + 7 9 11 4 2. + <_> + + <_> + 7 8 3 4 -1. + <_> + 8 8 1 4 3. + <_> + + <_> + 9 1 6 1 -1. + <_> + 11 1 2 1 3. + <_> + + <_> + 5 5 3 3 -1. + <_> + 5 6 3 1 3. + <_> + + <_> + 0 9 20 6 -1. + <_> + 10 9 10 3 2. + <_> + 0 12 10 3 2. + <_> + + <_> + 8 6 3 5 -1. + <_> + 9 6 1 5 3. + <_> + + <_> + 11 0 1 3 -1. + <_> + 11 1 1 1 3. + <_> + + <_> + 4 2 4 2 -1. + <_> + 4 3 4 1 2. + <_> + + <_> + 12 6 4 3 -1. + <_> + 12 7 4 1 3. + <_> + + <_> + 5 0 6 4 -1. + <_> + 7 0 2 4 3. + <_> + + <_> + 9 7 3 8 -1. + <_> + 10 7 1 8 3. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 6 7 14 4 -1. + <_> + 13 7 7 2 2. + <_> + 6 9 7 2 2. + <_> + + <_> + 0 5 3 6 -1. + <_> + 0 7 3 2 3. + <_> + + <_> + 13 11 3 4 -1. + <_> + 13 13 3 2 2. + <_> + + <_> + 4 11 3 4 -1. + <_> + 4 13 3 2 2. + <_> + + <_> + 5 9 12 8 -1. + <_> + 11 9 6 4 2. + <_> + 5 13 6 4 2. + <_> + + <_> + 9 12 1 3 -1. + <_> + 9 13 1 1 3. + <_> + + <_> + 10 15 2 4 -1. + <_> + 10 17 2 2 2. + <_> + + <_> + 7 7 6 1 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 12 3 6 6 -1. + <_> + 15 3 3 3 2. + <_> + 12 6 3 3 2. + <_> + + <_> + 0 4 10 6 -1. + <_> + 0 6 10 2 3. + <_> + + <_> + 8 3 8 14 -1. + <_> + 12 3 4 7 2. + <_> + 8 10 4 7 2. + <_> + + <_> + 4 4 7 15 -1. + <_> + 4 9 7 5 3. + <_> + + <_> + 12 2 6 8 -1. + <_> + 15 2 3 4 2. + <_> + 12 6 3 4 2. + <_> + + <_> + 2 2 6 8 -1. + <_> + 2 2 3 4 2. + <_> + 5 6 3 4 2. + <_> + + <_> + 2 13 18 7 -1. + <_> + 8 13 6 7 3. + <_> + + <_> + 4 3 8 14 -1. + <_> + 4 3 4 7 2. + <_> + 8 10 4 7 2. + <_> + + <_> + 18 1 2 6 -1. + <_> + 18 3 2 2 3. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 18 1 2 6 -1. + <_> + 18 3 2 2 3. + <_> + + <_> + 0 1 2 6 -1. + <_> + 0 3 2 2 3. + <_> + + <_> + 1 5 18 6 -1. + <_> + 1 7 18 2 3. + <_> + + <_> + 0 2 6 7 -1. + <_> + 3 2 3 7 2. + <_> + + <_> + 7 3 6 14 -1. + <_> + 7 10 6 7 2. + <_> + + <_> + 3 7 13 10 -1. + <_> + 3 12 13 5 2. + <_> + + <_> + 11 15 2 2 -1. + <_> + 11 16 2 1 2. + <_> + + <_> + 2 11 16 4 -1. + <_> + 2 11 8 2 2. + <_> + 10 13 8 2 2. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 6 10 3 9 -1. + <_> + 6 13 3 3 3. + <_> + + <_> + 14 6 1 6 -1. + <_> + 14 9 1 3 2. + <_> + + <_> + 5 10 4 1 -1. + <_> + 7 10 2 1 2. + <_> + + <_> + 3 8 15 5 -1. + <_> + 8 8 5 5 3. + <_> + + <_> + 1 6 5 4 -1. + <_> + 1 8 5 2 2. + <_> + + <_> + 3 1 17 6 -1. + <_> + 3 3 17 2 3. + <_> + + <_> + 6 7 8 2 -1. + <_> + 10 7 4 2 2. + <_> + + <_> + 9 7 3 2 -1. + <_> + 10 7 1 2 3. + <_> + + <_> + 8 7 3 2 -1. + <_> + 9 7 1 2 3. + <_> + + <_> + 8 9 4 2 -1. + <_> + 8 10 4 1 2. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 9 5 6 4 -1. + <_> + 9 5 3 4 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 4 7 12 6 -1. + <_> + 10 7 6 3 2. + <_> + 4 10 6 3 2. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 9 8 3 1 3. + <_> + + <_> + 7 4 3 8 -1. + <_> + 8 4 1 8 3. + <_> + + <_> + 10 0 3 6 -1. + <_> + 11 0 1 6 3. + <_> + + <_> + 6 3 4 8 -1. + <_> + 8 3 2 8 2. + <_> + + <_> + 14 3 6 13 -1. + <_> + 14 3 3 13 2. + <_> + + <_> + 8 13 3 6 -1. + <_> + 8 16 3 3 2. + <_> + + <_> + 14 3 6 13 -1. + <_> + 14 3 3 13 2. + <_> + + <_> + 0 7 10 4 -1. + <_> + 0 7 5 2 2. + <_> + 5 9 5 2 2. + <_> + + <_> + 14 3 6 13 -1. + <_> + 14 3 3 13 2. + <_> + + <_> + 0 3 6 13 -1. + <_> + 3 3 3 13 2. + <_> + + <_> + 9 1 4 1 -1. + <_> + 9 1 2 1 2. + <_> + + <_> + 8 0 2 1 -1. + <_> + 9 0 1 1 2. + <_> + + <_> + 10 16 4 4 -1. + <_> + 12 16 2 2 2. + <_> + 10 18 2 2 2. + <_> + + <_> + 9 6 2 3 -1. + <_> + 10 6 1 3 2. + <_> + + <_> + 4 5 12 2 -1. + <_> + 8 5 4 2 3. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 6 4 8 6 -1. + <_> + 6 6 8 2 3. + <_> + + <_> + 9 5 2 12 -1. + <_> + 9 11 2 6 2. + <_> + + <_> + 4 6 6 8 -1. + <_> + 4 10 6 4 2. + <_> + + <_> + 12 2 8 5 -1. + <_> + 12 2 4 5 2. + <_> + + <_> + 0 8 18 3 -1. + <_> + 0 9 18 1 3. + <_> + + <_> + 8 12 4 8 -1. + <_> + 8 16 4 4 2. + <_> + + <_> + 0 2 8 5 -1. + <_> + 4 2 4 5 2. + <_> + + <_> + 13 11 3 4 -1. + <_> + 13 13 3 2 2. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 11 3 3 1 -1. + <_> + 12 3 1 1 3. + <_> + + <_> + 7 13 5 3 -1. + <_> + 7 14 5 1 3. + <_> + + <_> + 11 11 7 6 -1. + <_> + 11 14 7 3 2. + <_> + + <_> + 2 11 7 6 -1. + <_> + 2 14 7 3 2. + <_> + + <_> + 12 14 2 6 -1. + <_> + 12 16 2 2 3. + <_> + + <_> + 8 14 3 3 -1. + <_> + 8 15 3 1 3. + <_> + + <_> + 11 0 3 5 -1. + <_> + 12 0 1 5 3. + <_> + + <_> + 6 1 4 9 -1. + <_> + 8 1 2 9 2. + <_> + + <_> + 10 3 6 1 -1. + <_> + 12 3 2 1 3. + <_> + + <_> + 8 8 3 4 -1. + <_> + 8 10 3 2 2. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 5 18 4 2 -1. + <_> + 5 19 4 1 2. + <_> + + <_> + 2 1 18 6 -1. + <_> + 2 3 18 2 3. + <_> + + <_> + 6 0 3 2 -1. + <_> + 7 0 1 2 3. + <_> + + <_> + 13 8 6 2 -1. + <_> + 16 8 3 1 2. + <_> + 13 9 3 1 2. + <_> + + <_> + 6 10 3 6 -1. + <_> + 6 13 3 3 2. + <_> + + <_> + 0 13 20 4 -1. + <_> + 10 13 10 2 2. + <_> + 0 15 10 2 2. + <_> + + <_> + 7 7 6 5 -1. + <_> + 9 7 2 5 3. + <_> + + <_> + 11 0 2 2 -1. + <_> + 11 1 2 1 2. + <_> + + <_> + 1 8 6 2 -1. + <_> + 1 8 3 1 2. + <_> + 4 9 3 1 2. + <_> + + <_> + 0 2 20 2 -1. + <_> + 10 2 10 1 2. + <_> + 0 3 10 1 2. + <_> + + <_> + 7 14 5 3 -1. + <_> + 7 15 5 1 3. + <_> + + <_> + 7 13 6 6 -1. + <_> + 10 13 3 3 2. + <_> + 7 16 3 3 2. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 16 11 1 6 -1. + <_> + 16 13 1 2 3. + <_> + + <_> + 3 11 1 6 -1. + <_> + 3 13 1 2 3. + <_> + + <_> + 4 4 14 12 -1. + <_> + 11 4 7 6 2. + <_> + 4 10 7 6 2. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 12 3 3 3 -1. + <_> + 13 3 1 3 3. + <_> + + <_> + 6 6 8 3 -1. + <_> + 6 7 8 1 3. + <_> + + <_> + 12 3 3 3 -1. + <_> + 13 3 1 3 3. + <_> + + <_> + 3 1 4 10 -1. + <_> + 3 1 2 5 2. + <_> + 5 6 2 5 2. + <_> + + <_> + 5 7 10 2 -1. + <_> + 5 7 5 2 2. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 15 12 2 3 -1. + <_> + 15 13 2 1 3. + <_> + + <_> + 7 8 3 4 -1. + <_> + 8 8 1 4 3. + <_> + + <_> + 13 4 1 12 -1. + <_> + 13 10 1 6 2. + <_> + + <_> + 4 5 12 12 -1. + <_> + 4 5 6 6 2. + <_> + 10 11 6 6 2. + <_> + + <_> + 7 14 7 3 -1. + <_> + 7 15 7 1 3. + <_> + + <_> + 3 12 2 3 -1. + <_> + 3 13 2 1 3. + <_> + + <_> + 3 2 14 2 -1. + <_> + 10 2 7 1 2. + <_> + 3 3 7 1 2. + <_> + + <_> + 0 1 3 10 -1. + <_> + 1 1 1 10 3. + <_> + + <_> + 9 0 6 5 -1. + <_> + 11 0 2 5 3. + <_> + + <_> + 5 7 6 2 -1. + <_> + 8 7 3 2 2. + <_> + + <_> + 7 1 6 10 -1. + <_> + 7 6 6 5 2. + <_> + + <_> + 1 1 18 3 -1. + <_> + 7 1 6 3 3. + <_> + + <_> + 16 3 3 6 -1. + <_> + 16 5 3 2 3. + <_> + + <_> + 6 3 7 6 -1. + <_> + 6 6 7 3 2. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 0 4 17 10 -1. + <_> + 0 9 17 5 2. + <_> + + <_> + 3 4 15 16 -1. + <_> + 3 12 15 8 2. + <_> + + <_> + 7 15 6 4 -1. + <_> + 7 17 6 2 2. + <_> + + <_> + 15 2 4 9 -1. + <_> + 15 2 2 9 2. + <_> + + <_> + 2 3 3 2 -1. + <_> + 2 4 3 1 2. + <_> + + <_> + 13 6 7 9 -1. + <_> + 13 9 7 3 3. + <_> + + <_> + 8 11 4 3 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 0 2 20 6 -1. + <_> + 10 2 10 3 2. + <_> + 0 5 10 3 2. + <_> + + <_> + 3 2 6 10 -1. + <_> + 3 2 3 5 2. + <_> + 6 7 3 5 2. + <_> + + <_> + 13 10 3 4 -1. + <_> + 13 12 3 2 2. + <_> + + <_> + 4 10 3 4 -1. + <_> + 4 12 3 2 2. + <_> + + <_> + 7 5 6 3 -1. + <_> + 9 5 2 3 3. + <_> + + <_> + 7 6 6 8 -1. + <_> + 7 10 6 4 2. + <_> + + <_> + 0 11 20 6 -1. + <_> + 0 14 20 3 2. + <_> + + <_> + 4 13 4 6 -1. + <_> + 4 13 2 3 2. + <_> + 6 16 2 3 2. + <_> + + <_> + 6 0 8 12 -1. + <_> + 10 0 4 6 2. + <_> + 6 6 4 6 2. + <_> + + <_> + 2 0 15 2 -1. + <_> + 2 1 15 1 2. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 3 12 1 2 -1. + <_> + 3 13 1 1 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 7 3 3 1 -1. + <_> + 8 3 1 1 3. + <_> + + <_> + 17 7 3 6 -1. + <_> + 17 9 3 2 3. + <_> + + <_> + 7 2 3 2 -1. + <_> + 8 2 1 2 3. + <_> + + <_> + 11 4 5 3 -1. + <_> + 11 5 5 1 3. + <_> + + <_> + 4 4 5 3 -1. + <_> + 4 5 5 1 3. + <_> + + <_> + 19 3 1 2 -1. + <_> + 19 4 1 1 2. + <_> + + <_> + 5 5 4 3 -1. + <_> + 5 6 4 1 3. + <_> + + <_> + 17 7 3 6 -1. + <_> + 17 9 3 2 3. + <_> + + <_> + 0 7 3 6 -1. + <_> + 0 9 3 2 3. + <_> + + <_> + 14 2 6 9 -1. + <_> + 14 5 6 3 3. + <_> + + <_> + 0 4 5 6 -1. + <_> + 0 6 5 2 3. + <_> + + <_> + 10 5 6 2 -1. + <_> + 12 5 2 2 3. + <_> + + <_> + 4 5 6 2 -1. + <_> + 6 5 2 2 3. + <_> + + <_> + 8 1 4 6 -1. + <_> + 8 3 4 2 3. + <_> + + <_> + 0 2 3 6 -1. + <_> + 0 4 3 2 3. + <_> + + <_> + 6 6 8 3 -1. + <_> + 6 7 8 1 3. + <_> + + <_> + 0 1 5 9 -1. + <_> + 0 4 5 3 3. + <_> + + <_> + 16 0 4 15 -1. + <_> + 16 0 2 15 2. + <_> + + <_> + 1 10 3 2 -1. + <_> + 1 11 3 1 2. + <_> + + <_> + 14 4 1 10 -1. + <_> + 14 9 1 5 2. + <_> + + <_> + 0 1 4 12 -1. + <_> + 2 1 2 12 2. + <_> + + <_> + 11 11 4 2 -1. + <_> + 11 11 2 2 2. + <_> + + <_> + 5 11 4 2 -1. + <_> + 7 11 2 2 2. + <_> + + <_> + 3 8 15 5 -1. + <_> + 8 8 5 5 3. + <_> + + <_> + 0 0 6 10 -1. + <_> + 3 0 3 10 2. + <_> + + <_> + 11 4 3 2 -1. + <_> + 12 4 1 2 3. + <_> + + <_> + 8 12 3 8 -1. + <_> + 8 16 3 4 2. + <_> + + <_> + 8 14 5 3 -1. + <_> + 8 15 5 1 3. + <_> + + <_> + 7 14 4 3 -1. + <_> + 7 15 4 1 3. + <_> + + <_> + 11 4 3 2 -1. + <_> + 12 4 1 2 3. + <_> + + <_> + 3 15 14 4 -1. + <_> + 3 15 7 2 2. + <_> + 10 17 7 2 2. + <_> + + <_> + 2 2 16 4 -1. + <_> + 10 2 8 2 2. + <_> + 2 4 8 2 2. + <_> + + <_> + 0 8 6 12 -1. + <_> + 3 8 3 12 2. + <_> + + <_> + 5 7 10 2 -1. + <_> + 5 7 5 2 2. + <_> + + <_> + 9 7 2 5 -1. + <_> + 10 7 1 5 2. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 0 13 8 2 -1. + <_> + 0 14 8 1 2. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 1 7 6 4 -1. + <_> + 1 7 3 2 2. + <_> + 4 9 3 2 2. + <_> + + <_> + 12 6 1 12 -1. + <_> + 12 12 1 6 2. + <_> + + <_> + 9 5 2 6 -1. + <_> + 10 5 1 6 2. + <_> + + <_> + 14 12 2 3 -1. + <_> + 14 13 2 1 3. + <_> + + <_> + 4 12 2 3 -1. + <_> + 4 13 2 1 3. + <_> + + <_> + 8 12 4 3 -1. + <_> + 8 13 4 1 3. + <_> + + <_> + 5 2 2 4 -1. + <_> + 5 2 1 2 2. + <_> + 6 4 1 2 2. + <_> + + <_> + 5 5 11 3 -1. + <_> + 5 6 11 1 3. + <_> + + <_> + 7 6 4 12 -1. + <_> + 7 12 4 6 2. + <_> + + <_> + 12 13 8 5 -1. + <_> + 12 13 4 5 2. + <_> + + <_> + 7 6 1 12 -1. + <_> + 7 12 1 6 2. + <_> + + <_> + 1 2 6 3 -1. + <_> + 4 2 3 3 2. + <_> + + <_> + 9 5 6 10 -1. + <_> + 12 5 3 5 2. + <_> + 9 10 3 5 2. + <_> + + <_> + 5 5 8 12 -1. + <_> + 5 5 4 6 2. + <_> + 9 11 4 6 2. + <_> + + <_> + 0 7 20 6 -1. + <_> + 0 9 20 2 3. + <_> + + <_> + 4 2 2 2 -1. + <_> + 4 3 2 1 2. + <_> + + <_> + 4 18 12 2 -1. + <_> + 8 18 4 2 3. + <_> + + <_> + 7 4 4 16 -1. + <_> + 7 12 4 8 2. + <_> + + <_> + 7 6 7 8 -1. + <_> + 7 10 7 4 2. + <_> + + <_> + 6 3 3 1 -1. + <_> + 7 3 1 1 3. + <_> + + <_> + 11 15 2 4 -1. + <_> + 11 17 2 2 2. + <_> + + <_> + 3 5 4 8 -1. + <_> + 3 9 4 4 2. + <_> + + <_> + 7 1 6 12 -1. + <_> + 7 7 6 6 2. + <_> + + <_> + 4 6 6 2 -1. + <_> + 6 6 2 2 3. + <_> + + <_> + 16 4 4 6 -1. + <_> + 16 6 4 2 3. + <_> + + <_> + 3 3 5 2 -1. + <_> + 3 4 5 1 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 2 16 4 2 -1. + <_> + 2 17 4 1 2. + <_> + + <_> + 7 13 6 6 -1. + <_> + 10 13 3 3 2. + <_> + 7 16 3 3 2. + <_> + + <_> + 7 0 3 4 -1. + <_> + 8 0 1 4 3. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 0 4 4 6 -1. + <_> + 0 6 4 2 3. + <_> + + <_> + 5 6 12 3 -1. + <_> + 9 6 4 3 3. + <_> + + <_> + 7 6 6 14 -1. + <_> + 9 6 2 14 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 6 12 2 4 -1. + <_> + 6 14 2 2 2. + <_> + + <_> + 10 12 7 6 -1. + <_> + 10 14 7 2 3. + <_> + + <_> + 1 0 15 2 -1. + <_> + 1 1 15 1 2. + <_> + + <_> + 14 0 6 6 -1. + <_> + 14 0 3 6 2. + <_> + + <_> + 5 3 3 1 -1. + <_> + 6 3 1 1 3. + <_> + + <_> + 14 0 6 6 -1. + <_> + 14 0 3 6 2. + <_> + + <_> + 0 3 20 10 -1. + <_> + 0 8 20 5 2. + <_> + + <_> + 14 0 6 6 -1. + <_> + 14 0 3 6 2. + <_> + + <_> + 0 0 6 6 -1. + <_> + 3 0 3 6 2. + <_> + + <_> + 19 15 1 2 -1. + <_> + 19 16 1 1 2. + <_> + + <_> + 0 2 4 8 -1. + <_> + 2 2 2 8 2. + <_> + + <_> + 2 1 18 4 -1. + <_> + 11 1 9 2 2. + <_> + 2 3 9 2 2. + <_> + + <_> + 8 12 1 2 -1. + <_> + 8 13 1 1 2. + <_> + + <_> + 5 2 10 6 -1. + <_> + 10 2 5 3 2. + <_> + 5 5 5 3 2. + <_> + + <_> + 9 7 2 4 -1. + <_> + 10 7 1 4 2. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 4 5 12 8 -1. + <_> + 8 5 4 8 3. + <_> + + <_> + 15 15 4 3 -1. + <_> + 15 16 4 1 3. + <_> + + <_> + 8 18 3 1 -1. + <_> + 9 18 1 1 3. + <_> + + <_> + 9 13 4 3 -1. + <_> + 9 14 4 1 3. + <_> + + <_> + 7 13 4 3 -1. + <_> + 7 14 4 1 3. + <_> + + <_> + 19 15 1 2 -1. + <_> + 19 16 1 1 2. + <_> + + <_> + 0 15 8 4 -1. + <_> + 0 17 8 2 2. + <_> + + <_> + 9 3 6 4 -1. + <_> + 11 3 2 4 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 3 14 14 6 -1. + <_> + 3 16 14 2 3. + <_> + + <_> + 6 3 6 6 -1. + <_> + 6 6 6 3 2. + <_> + + <_> + 5 11 10 6 -1. + <_> + 5 14 10 3 2. + <_> + + <_> + 3 10 3 4 -1. + <_> + 4 10 1 4 3. + <_> + + <_> + 13 9 2 2 -1. + <_> + 13 9 1 2 2. + <_> + + <_> + 5 3 6 4 -1. + <_> + 7 3 2 4 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 2 12 2 3 -1. + <_> + 2 13 2 1 3. + <_> + + <_> + 9 8 3 12 -1. + <_> + 9 12 3 4 3. + <_> + + <_> + 3 14 4 6 -1. + <_> + 3 14 2 3 2. + <_> + 5 17 2 3 2. + <_> + + <_> + 16 15 2 2 -1. + <_> + 16 16 2 1 2. + <_> + + <_> + 2 15 2 2 -1. + <_> + 2 16 2 1 2. + <_> + + <_> + 8 12 4 3 -1. + <_> + 8 13 4 1 3. + <_> + + <_> + 0 7 20 1 -1. + <_> + 10 7 10 1 2. + <_> + + <_> + 7 6 8 3 -1. + <_> + 7 6 4 3 2. + <_> + + <_> + 5 7 8 2 -1. + <_> + 9 7 4 2 2. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 11 1 3 5 -1. + <_> + 12 1 1 5 3. + <_> + + <_> + 6 2 3 6 -1. + <_> + 7 2 1 6 3. + <_> + + <_> + 14 14 6 5 -1. + <_> + 14 14 3 5 2. + <_> + + <_> + 9 8 2 2 -1. + <_> + 9 9 2 1 2. + <_> + + <_> + 10 7 1 3 -1. + <_> + 10 8 1 1 3. + <_> + + <_> + 6 6 2 2 -1. + <_> + 6 6 1 1 2. + <_> + 7 7 1 1 2. + <_> + + <_> + 2 11 18 4 -1. + <_> + 11 11 9 2 2. + <_> + 2 13 9 2 2. + <_> + + <_> + 6 6 2 2 -1. + <_> + 6 6 1 1 2. + <_> + 7 7 1 1 2. + <_> + + <_> + 0 15 20 2 -1. + <_> + 0 16 20 1 2. + <_> + + <_> + 4 14 2 3 -1. + <_> + 4 15 2 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 8 7 2 3 -1. + <_> + 8 8 2 1 3. + <_> + + <_> + 9 10 2 3 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 5 4 10 4 -1. + <_> + 5 6 10 2 2. + <_> + + <_> + 9 7 6 4 -1. + <_> + 12 7 3 2 2. + <_> + 9 9 3 2 2. + <_> + + <_> + 4 7 3 6 -1. + <_> + 4 9 3 2 3. + <_> + + <_> + 11 15 4 4 -1. + <_> + 13 15 2 2 2. + <_> + 11 17 2 2 2. + <_> + + <_> + 7 8 4 2 -1. + <_> + 7 9 4 1 2. + <_> + + <_> + 13 1 4 3 -1. + <_> + 13 1 2 3 2. + <_> + + <_> + 5 15 4 4 -1. + <_> + 5 15 2 2 2. + <_> + 7 17 2 2 2. + <_> + + <_> + 9 5 4 7 -1. + <_> + 9 5 2 7 2. + <_> + + <_> + 5 6 8 3 -1. + <_> + 9 6 4 3 2. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 7 15 5 3 -1. + <_> + 7 16 5 1 3. + <_> + + <_> + 11 10 4 3 -1. + <_> + 11 10 2 3 2. + <_> + + <_> + 6 9 8 10 -1. + <_> + 6 14 8 5 2. + <_> + + <_> + 10 11 6 2 -1. + <_> + 10 11 3 2 2. + <_> + + <_> + 4 11 6 2 -1. + <_> + 7 11 3 2 2. + <_> + + <_> + 11 3 8 1 -1. + <_> + 11 3 4 1 2. + <_> + + <_> + 6 3 3 2 -1. + <_> + 7 3 1 2 3. + <_> + + <_> + 14 5 6 5 -1. + <_> + 14 5 3 5 2. + <_> + + <_> + 7 5 2 12 -1. + <_> + 7 11 2 6 2. + <_> + + <_> + 8 11 4 3 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 4 1 2 3 -1. + <_> + 5 1 1 3 2. + <_> + + <_> + 18 3 2 6 -1. + <_> + 18 5 2 2 3. + <_> + + <_> + 0 3 2 6 -1. + <_> + 0 5 2 2 3. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 7 13 4 3 -1. + <_> + 7 14 4 1 3. + <_> + + <_> + 18 0 2 6 -1. + <_> + 18 2 2 2 3. + <_> + + <_> + 0 0 2 6 -1. + <_> + 0 2 2 2 3. + <_> + + <_> + 8 14 6 3 -1. + <_> + 8 15 6 1 3. + <_> + + <_> + 7 4 2 4 -1. + <_> + 8 4 1 4 2. + <_> + + <_> + 8 5 4 6 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 6 4 2 2 -1. + <_> + 7 4 1 2 2. + <_> + + <_> + 3 14 14 4 -1. + <_> + 10 14 7 2 2. + <_> + 3 16 7 2 2. + <_> + + <_> + 6 15 6 2 -1. + <_> + 6 15 3 1 2. + <_> + 9 16 3 1 2. + <_> + + <_> + 14 15 6 2 -1. + <_> + 14 16 6 1 2. + <_> + + <_> + 2 12 12 8 -1. + <_> + 2 16 12 4 2. + <_> + + <_> + 7 7 7 2 -1. + <_> + 7 8 7 1 2. + <_> + + <_> + 0 2 18 2 -1. + <_> + 0 3 18 1 2. + <_> + + <_> + 9 6 2 5 -1. + <_> + 9 6 1 5 2. + <_> + + <_> + 7 5 3 8 -1. + <_> + 8 5 1 8 3. + <_> + + <_> + 9 6 3 4 -1. + <_> + 10 6 1 4 3. + <_> + + <_> + 4 13 3 2 -1. + <_> + 4 14 3 1 2. + <_> + + <_> + 9 4 6 3 -1. + <_> + 11 4 2 3 3. + <_> + + <_> + 5 4 6 3 -1. + <_> + 7 4 2 3 3. + <_> + + <_> + 14 11 5 2 -1. + <_> + 14 12 5 1 2. + <_> + + <_> + 1 2 6 9 -1. + <_> + 3 2 2 9 3. + <_> + + <_> + 14 6 6 13 -1. + <_> + 14 6 3 13 2. + <_> + + <_> + 3 6 14 8 -1. + <_> + 3 6 7 4 2. + <_> + 10 10 7 4 2. + <_> + + <_> + 16 0 4 11 -1. + <_> + 16 0 2 11 2. + <_> + + <_> + 3 4 12 12 -1. + <_> + 3 4 6 6 2. + <_> + 9 10 6 6 2. + <_> + + <_> + 11 4 5 3 -1. + <_> + 11 5 5 1 3. + <_> + + <_> + 4 11 4 2 -1. + <_> + 4 12 4 1 2. + <_> + + <_> + 10 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 8 7 2 2 -1. + <_> + 9 7 1 2 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 5 6 3 3 -1. + <_> + 5 7 3 1 3. + <_> + + <_> + 10 0 3 3 -1. + <_> + 11 0 1 3 3. + <_> + + <_> + 5 6 6 2 -1. + <_> + 5 6 3 1 2. + <_> + 8 7 3 1 2. + <_> + + <_> + 12 16 4 3 -1. + <_> + 12 17 4 1 3. + <_> + + <_> + 3 12 3 2 -1. + <_> + 3 13 3 1 2. + <_> + + <_> + 9 12 3 2 -1. + <_> + 9 13 3 1 2. + <_> + + <_> + 1 11 16 4 -1. + <_> + 1 11 8 2 2. + <_> + 9 13 8 2 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 4 4 5 3 -1. + <_> + 4 5 5 1 3. + <_> + + <_> + 12 16 4 3 -1. + <_> + 12 17 4 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 9 0 2 2 -1. + <_> + 9 1 2 1 2. + <_> + + <_> + 8 9 4 2 -1. + <_> + 8 10 4 1 2. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 0 13 6 3 -1. + <_> + 2 13 2 3 3. + <_> + + <_> + 16 14 3 2 -1. + <_> + 16 15 3 1 2. + <_> + + <_> + 1 18 18 2 -1. + <_> + 7 18 6 2 3. + <_> + + <_> + 16 14 3 2 -1. + <_> + 16 15 3 1 2. + <_> + + <_> + 1 14 3 2 -1. + <_> + 1 15 3 1 2. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 5 14 8 3 -1. + <_> + 5 15 8 1 3. + <_> + + <_> + 10 6 4 14 -1. + <_> + 10 6 2 14 2. + <_> + + <_> + 6 6 4 14 -1. + <_> + 8 6 2 14 2. + <_> + + <_> + 13 5 2 3 -1. + <_> + 13 6 2 1 3. + <_> + + <_> + 7 16 6 1 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 7 0 3 3 -1. + <_> + 8 0 1 3 3. + <_> + + <_> + 4 0 16 18 -1. + <_> + 4 9 16 9 2. + <_> + + <_> + 1 1 16 14 -1. + <_> + 1 8 16 7 2. + <_> + + <_> + 3 9 15 4 -1. + <_> + 8 9 5 4 3. + <_> + + <_> + 6 12 7 3 -1. + <_> + 6 13 7 1 3. + <_> + + <_> + 14 15 2 3 -1. + <_> + 14 16 2 1 3. + <_> + + <_> + 2 3 16 14 -1. + <_> + 2 3 8 7 2. + <_> + 10 10 8 7 2. + <_> + + <_> + 16 2 4 18 -1. + <_> + 18 2 2 9 2. + <_> + 16 11 2 9 2. + <_> + + <_> + 4 15 2 3 -1. + <_> + 4 16 2 1 3. + <_> + + <_> + 16 2 4 18 -1. + <_> + 18 2 2 9 2. + <_> + 16 11 2 9 2. + <_> + + <_> + 1 1 8 3 -1. + <_> + 1 2 8 1 3. + <_> + + <_> + 8 11 4 3 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 5 11 5 9 -1. + <_> + 5 14 5 3 3. + <_> + + <_> + 16 0 4 11 -1. + <_> + 16 0 2 11 2. + <_> + + <_> + 7 0 6 1 -1. + <_> + 9 0 2 1 3. + <_> + + <_> + 16 3 3 7 -1. + <_> + 17 3 1 7 3. + <_> + + <_> + 1 3 3 7 -1. + <_> + 2 3 1 7 3. + <_> + + <_> + 7 8 6 12 -1. + <_> + 7 12 6 4 3. + <_> + + <_> + 0 0 4 11 -1. + <_> + 2 0 2 11 2. + <_> + + <_> + 14 0 6 20 -1. + <_> + 14 0 3 20 2. + <_> + + <_> + 0 3 1 2 -1. + <_> + 0 4 1 1 2. + <_> + + <_> + 5 5 10 8 -1. + <_> + 10 5 5 4 2. + <_> + 5 9 5 4 2. + <_> + + <_> + 4 7 12 4 -1. + <_> + 4 7 6 2 2. + <_> + 10 9 6 2 2. + <_> + + <_> + 2 1 6 4 -1. + <_> + 5 1 3 4 2. + <_> + + <_> + 9 7 6 4 -1. + <_> + 12 7 3 2 2. + <_> + 9 9 3 2 2. + <_> + + <_> + 5 6 2 6 -1. + <_> + 5 9 2 3 2. + <_> + + <_> + 9 16 6 4 -1. + <_> + 12 16 3 2 2. + <_> + 9 18 3 2 2. + <_> + + <_> + 9 4 2 12 -1. + <_> + 9 10 2 6 2. + <_> + + <_> + 7 1 6 18 -1. + <_> + 9 1 2 18 3. + <_> + + <_> + 4 12 12 2 -1. + <_> + 8 12 4 2 3. + <_> + + <_> + 8 8 6 2 -1. + <_> + 8 9 6 1 2. + <_> + + <_> + 8 0 3 6 -1. + <_> + 9 0 1 6 3. + <_> + + <_> + 11 18 3 2 -1. + <_> + 11 19 3 1 2. + <_> + + <_> + 1 1 17 4 -1. + <_> + 1 3 17 2 2. + <_> + + <_> + 11 8 4 12 -1. + <_> + 11 8 2 12 2. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 12 3 2 17 -1. + <_> + 12 3 1 17 2. + <_> + + <_> + 4 7 6 1 -1. + <_> + 6 7 2 1 3. + <_> + + <_> + 18 3 2 3 -1. + <_> + 18 4 2 1 3. + <_> + + <_> + 8 4 3 4 -1. + <_> + 8 6 3 2 2. + <_> + + <_> + 4 5 12 10 -1. + <_> + 4 10 12 5 2. + <_> + + <_> + 5 18 4 2 -1. + <_> + 7 18 2 2 2. + <_> + + <_> + 17 2 3 6 -1. + <_> + 17 4 3 2 3. + <_> + + <_> + 7 7 6 6 -1. + <_> + 9 7 2 6 3. + <_> + + <_> + 17 2 3 6 -1. + <_> + 17 4 3 2 3. + <_> + + <_> + 8 0 3 4 -1. + <_> + 9 0 1 4 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 0 12 6 3 -1. + <_> + 0 13 6 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 3 12 2 3 -1. + <_> + 3 13 2 1 3. + <_> + + <_> + 5 6 12 7 -1. + <_> + 9 6 4 7 3. + <_> + + <_> + 0 2 3 6 -1. + <_> + 0 4 3 2 3. + <_> + + <_> + 14 6 1 3 -1. + <_> + 14 7 1 1 3. + <_> + + <_> + 2 0 3 14 -1. + <_> + 3 0 1 14 3. + <_> + + <_> + 12 14 5 6 -1. + <_> + 12 16 5 2 3. + <_> + + <_> + 4 14 5 6 -1. + <_> + 4 16 5 2 3. + <_> + + <_> + 11 10 2 2 -1. + <_> + 12 10 1 1 2. + <_> + 11 11 1 1 2. + <_> + + <_> + 5 0 3 14 -1. + <_> + 6 0 1 14 3. + <_> + + <_> + 10 15 2 3 -1. + <_> + 10 16 2 1 3. + <_> + + <_> + 0 2 2 3 -1. + <_> + 0 3 2 1 3. + <_> + + <_> + 5 11 12 6 -1. + <_> + 5 14 12 3 2. + <_> + + <_> + 6 11 3 9 -1. + <_> + 6 14 3 3 3. + <_> + + <_> + 11 10 2 2 -1. + <_> + 12 10 1 1 2. + <_> + 11 11 1 1 2. + <_> + + <_> + 5 6 1 3 -1. + <_> + 5 7 1 1 3. + <_> + + <_> + 4 9 13 3 -1. + <_> + 4 10 13 1 3. + <_> + + <_> + 1 7 15 6 -1. + <_> + 6 7 5 6 3. + <_> + + <_> + 4 5 12 6 -1. + <_> + 8 5 4 6 3. + <_> + + <_> + 8 10 4 3 -1. + <_> + 8 11 4 1 3. + <_> + + <_> + 15 14 1 3 -1. + <_> + 15 15 1 1 3. + <_> + + <_> + 1 11 5 3 -1. + <_> + 1 12 5 1 3. + <_> + + <_> + 7 1 7 12 -1. + <_> + 7 7 7 6 2. + <_> + + <_> + 0 1 6 10 -1. + <_> + 0 1 3 5 2. + <_> + 3 6 3 5 2. + <_> + + <_> + 16 1 4 3 -1. + <_> + 16 2 4 1 3. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 12 2 3 5 -1. + <_> + 13 2 1 5 3. + <_> + + <_> + 0 3 4 6 -1. + <_> + 0 5 4 2 3. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 8 18 3 1 -1. + <_> + 9 18 1 1 3. + <_> + + <_> + 11 10 2 2 -1. + <_> + 12 10 1 1 2. + <_> + 11 11 1 1 2. + <_> + + <_> + 7 10 2 2 -1. + <_> + 7 10 1 1 2. + <_> + 8 11 1 1 2. + <_> + + <_> + 11 11 4 4 -1. + <_> + 11 13 4 2 2. + <_> + + <_> + 8 12 3 8 -1. + <_> + 9 12 1 8 3. + <_> + + <_> + 13 0 6 3 -1. + <_> + 13 1 6 1 3. + <_> + + <_> + 8 8 3 4 -1. + <_> + 9 8 1 4 3. + <_> + + <_> + 5 7 10 10 -1. + <_> + 10 7 5 5 2. + <_> + 5 12 5 5 2. + <_> + + <_> + 3 18 8 2 -1. + <_> + 3 18 4 1 2. + <_> + 7 19 4 1 2. + <_> + + <_> + 10 2 6 8 -1. + <_> + 12 2 2 8 3. + <_> + + <_> + 4 2 6 8 -1. + <_> + 6 2 2 8 3. + <_> + + <_> + 11 0 3 7 -1. + <_> + 12 0 1 7 3. + <_> + + <_> + 7 11 2 1 -1. + <_> + 8 11 1 1 2. + <_> + + <_> + 15 14 1 3 -1. + <_> + 15 15 1 1 3. + <_> + + <_> + 7 15 2 2 -1. + <_> + 7 15 1 1 2. + <_> + 8 16 1 1 2. + <_> + + <_> + 15 14 1 3 -1. + <_> + 15 15 1 1 3. + <_> + + <_> + 6 0 3 7 -1. + <_> + 7 0 1 7 3. + <_> + + <_> + 18 1 2 7 -1. + <_> + 18 1 1 7 2. + <_> + + <_> + 2 0 8 20 -1. + <_> + 2 10 8 10 2. + <_> + + <_> + 3 0 15 6 -1. + <_> + 3 2 15 2 3. + <_> + + <_> + 4 3 12 2 -1. + <_> + 4 4 12 1 2. + <_> + + <_> + 16 0 4 5 -1. + <_> + 16 0 2 5 2. + <_> + + <_> + 7 0 3 4 -1. + <_> + 8 0 1 4 3. + <_> + + <_> + 16 0 4 5 -1. + <_> + 16 0 2 5 2. + <_> + + <_> + 1 7 6 13 -1. + <_> + 3 7 2 13 3. + <_> + + <_> + 16 0 4 5 -1. + <_> + 16 0 2 5 2. + <_> + + <_> + 0 0 4 5 -1. + <_> + 2 0 2 5 2. + <_> + + <_> + 14 12 3 6 -1. + <_> + 14 14 3 2 3. + <_> + + <_> + 3 12 3 6 -1. + <_> + 3 14 3 2 3. + <_> + + <_> + 16 1 4 3 -1. + <_> + 16 2 4 1 3. + <_> + + <_> + 8 7 2 10 -1. + <_> + 8 7 1 5 2. + <_> + 9 12 1 5 2. + <_> + + <_> + 11 11 4 4 -1. + <_> + 11 13 4 2 2. + <_> + + <_> + 0 1 4 3 -1. + <_> + 0 2 4 1 3. + <_> + + <_> + 13 4 1 3 -1. + <_> + 13 5 1 1 3. + <_> + + <_> + 7 15 3 5 -1. + <_> + 8 15 1 5 3. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 10 6 4 14 -1. + <_> + 10 6 2 14 2. + <_> + + <_> + 0 5 5 6 -1. + <_> + 0 7 5 2 3. + <_> + + <_> + 9 5 6 4 -1. + <_> + 9 5 3 4 2. + <_> + + <_> + 0 0 18 10 -1. + <_> + 6 0 6 10 3. + <_> + + <_> + 10 6 4 14 -1. + <_> + 10 6 2 14 2. + <_> + + <_> + 6 6 4 14 -1. + <_> + 8 6 2 14 2. + <_> + + <_> + 13 4 1 3 -1. + <_> + 13 5 1 1 3. + <_> + + <_> + 5 1 2 3 -1. + <_> + 6 1 1 3 2. + <_> + + <_> + 18 1 2 18 -1. + <_> + 19 1 1 9 2. + <_> + 18 10 1 9 2. + <_> + + <_> + 2 1 4 3 -1. + <_> + 2 2 4 1 3. + <_> + + <_> + 18 1 2 18 -1. + <_> + 19 1 1 9 2. + <_> + 18 10 1 9 2. + <_> + + <_> + 1 14 4 6 -1. + <_> + 1 14 2 3 2. + <_> + 3 17 2 3 2. + <_> + + <_> + 10 11 7 6 -1. + <_> + 10 13 7 2 3. + <_> + + <_> + 0 10 6 10 -1. + <_> + 0 10 3 5 2. + <_> + 3 15 3 5 2. + <_> + + <_> + 11 0 3 4 -1. + <_> + 12 0 1 4 3. + <_> + + <_> + 5 10 5 6 -1. + <_> + 5 13 5 3 2. + <_> + + <_> + 14 6 1 8 -1. + <_> + 14 10 1 4 2. + <_> + + <_> + 1 7 18 6 -1. + <_> + 1 7 9 3 2. + <_> + 10 10 9 3 2. + <_> + + <_> + 9 7 2 2 -1. + <_> + 9 7 1 2 2. + <_> + + <_> + 5 9 4 5 -1. + <_> + 7 9 2 5 2. + <_> + + <_> + 7 6 6 3 -1. + <_> + 9 6 2 3 3. + <_> + + <_> + 1 0 18 4 -1. + <_> + 7 0 6 4 3. + <_> + + <_> + 7 15 2 4 -1. + <_> + 7 17 2 2 2. + <_> + + <_> + 1 0 19 9 -1. + <_> + 1 3 19 3 3. + <_> + + <_> + 3 7 3 6 -1. + <_> + 3 9 3 2 3. + <_> + + <_> + 13 7 4 4 -1. + <_> + 15 7 2 2 2. + <_> + 13 9 2 2 2. + <_> + + <_> + 3 7 4 4 -1. + <_> + 3 7 2 2 2. + <_> + 5 9 2 2 2. + <_> + + <_> + 9 6 10 8 -1. + <_> + 9 10 10 4 2. + <_> + + <_> + 3 8 14 12 -1. + <_> + 3 14 14 6 2. + <_> + + <_> + 6 5 10 12 -1. + <_> + 11 5 5 6 2. + <_> + 6 11 5 6 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 9 5 6 5 -1. + <_> + 9 5 3 5 2. + <_> + + <_> + 9 4 2 4 -1. + <_> + 9 6 2 2 2. + <_> + + <_> + 9 5 6 5 -1. + <_> + 9 5 3 5 2. + <_> + + <_> + 5 5 6 5 -1. + <_> + 8 5 3 5 2. + <_> + + <_> + 11 2 6 1 -1. + <_> + 13 2 2 1 3. + <_> + + <_> + 3 2 6 1 -1. + <_> + 5 2 2 1 3. + <_> + + <_> + 13 5 2 3 -1. + <_> + 13 6 2 1 3. + <_> + + <_> + 0 10 1 4 -1. + <_> + 0 12 1 2 2. + <_> + + <_> + 13 5 2 3 -1. + <_> + 13 6 2 1 3. + <_> + + <_> + 8 18 3 2 -1. + <_> + 9 18 1 2 3. + <_> + + <_> + 6 15 9 2 -1. + <_> + 6 16 9 1 2. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 18 4 2 4 -1. + <_> + 18 6 2 2 2. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 15 16 3 2 -1. + <_> + 15 17 3 1 2. + <_> + + <_> + 0 0 3 9 -1. + <_> + 0 3 3 3 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 9 8 3 1 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 8 8 3 1 3. + <_> + + <_> + 9 5 2 6 -1. + <_> + 9 5 1 6 2. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 7 6 8 12 -1. + <_> + 11 6 4 6 2. + <_> + 7 12 4 6 2. + <_> + + <_> + 5 6 8 12 -1. + <_> + 5 6 4 6 2. + <_> + 9 12 4 6 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 2 16 3 2 -1. + <_> + 2 17 3 1 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 2 12 6 6 -1. + <_> + 2 14 6 2 3. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 6 14 6 3 -1. + <_> + 6 15 6 1 3. + <_> + + <_> + 14 15 5 3 -1. + <_> + 14 16 5 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 14 15 5 3 -1. + <_> + 14 16 5 1 3. + <_> + + <_> + 5 3 6 2 -1. + <_> + 7 3 2 2 3. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 1 15 5 3 -1. + <_> + 1 16 5 1 3. + <_> + + <_> + 8 13 4 6 -1. + <_> + 10 13 2 3 2. + <_> + 8 16 2 3 2. + <_> + + <_> + 7 8 3 3 -1. + <_> + 8 8 1 3 3. + <_> + + <_> + 12 0 5 4 -1. + <_> + 12 2 5 2 2. + <_> + + <_> + 0 2 20 2 -1. + <_> + 0 2 10 1 2. + <_> + 10 3 10 1 2. + <_> + + <_> + 1 0 18 4 -1. + <_> + 7 0 6 4 3. + <_> + + <_> + 4 3 6 1 -1. + <_> + 6 3 2 1 3. + <_> + + <_> + 4 18 13 2 -1. + <_> + 4 19 13 1 2. + <_> + + <_> + 2 10 3 6 -1. + <_> + 2 12 3 2 3. + <_> + + <_> + 14 12 6 8 -1. + <_> + 17 12 3 4 2. + <_> + 14 16 3 4 2. + <_> + + <_> + 4 13 10 6 -1. + <_> + 4 13 5 3 2. + <_> + 9 16 5 3 2. + <_> + + <_> + 14 12 1 2 -1. + <_> + 14 13 1 1 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 14 12 2 2 -1. + <_> + 14 13 2 1 2. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 13 2 1 2. + <_> + + <_> + 8 12 9 2 -1. + <_> + 8 13 9 1 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 11 10 3 6 -1. + <_> + 11 13 3 3 2. + <_> + + <_> + 5 6 9 12 -1. + <_> + 5 12 9 6 2. + <_> + + <_> + 11 10 3 6 -1. + <_> + 11 13 3 3 2. + <_> + + <_> + 6 10 3 6 -1. + <_> + 6 13 3 3 2. + <_> + + <_> + 5 4 11 3 -1. + <_> + 5 5 11 1 3. + <_> + + <_> + 7 1 5 10 -1. + <_> + 7 6 5 5 2. + <_> + + <_> + 2 8 18 2 -1. + <_> + 2 9 18 1 2. + <_> + + <_> + 7 17 5 3 -1. + <_> + 7 18 5 1 3. + <_> + + <_> + 5 9 12 1 -1. + <_> + 9 9 4 1 3. + <_> + + <_> + 0 14 6 6 -1. + <_> + 0 14 3 3 2. + <_> + 3 17 3 3 2. + <_> + + <_> + 5 9 12 1 -1. + <_> + 9 9 4 1 3. + <_> + + <_> + 3 9 12 1 -1. + <_> + 7 9 4 1 3. + <_> + + <_> + 14 10 6 7 -1. + <_> + 14 10 3 7 2. + <_> + + <_> + 1 0 16 2 -1. + <_> + 1 1 16 1 2. + <_> + + <_> + 10 9 10 9 -1. + <_> + 10 12 10 3 3. + <_> + + <_> + 0 1 10 2 -1. + <_> + 5 1 5 2 2. + <_> + + <_> + 17 3 2 3 -1. + <_> + 17 4 2 1 3. + <_> + + <_> + 1 3 2 3 -1. + <_> + 1 4 2 1 3. + <_> + + <_> + 9 7 3 6 -1. + <_> + 10 7 1 6 3. + <_> + + <_> + 6 5 4 3 -1. + <_> + 8 5 2 3 2. + <_> + + <_> + 7 5 6 6 -1. + <_> + 9 5 2 6 3. + <_> + + <_> + 3 4 12 12 -1. + <_> + 3 4 6 6 2. + <_> + 9 10 6 6 2. + <_> + + <_> + 9 2 6 15 -1. + <_> + 11 2 2 15 3. + <_> + + <_> + 2 2 6 17 -1. + <_> + 4 2 2 17 3. + <_> + + <_> + 14 10 6 7 -1. + <_> + 14 10 3 7 2. + <_> + + <_> + 0 10 6 7 -1. + <_> + 3 10 3 7 2. + <_> + + <_> + 9 2 6 15 -1. + <_> + 11 2 2 15 3. + <_> + + <_> + 5 2 6 15 -1. + <_> + 7 2 2 15 3. + <_> + + <_> + 17 9 3 6 -1. + <_> + 17 11 3 2 3. + <_> + + <_> + 6 7 6 6 -1. + <_> + 8 7 2 6 3. + <_> + + <_> + 1 10 18 6 -1. + <_> + 10 10 9 3 2. + <_> + 1 13 9 3 2. + <_> + + <_> + 0 9 10 9 -1. + <_> + 0 12 10 3 3. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 5 12 3 4 -1. + <_> + 5 14 3 2 2. + <_> + + <_> + 3 3 16 12 -1. + <_> + 3 9 16 6 2. + <_> + + <_> + 1 1 12 12 -1. + <_> + 1 1 6 6 2. + <_> + 7 7 6 6 2. + <_> + + <_> + 10 4 2 4 -1. + <_> + 11 4 1 2 2. + <_> + 10 6 1 2 2. + <_> + + <_> + 0 9 10 2 -1. + <_> + 0 9 5 1 2. + <_> + 5 10 5 1 2. + <_> + + <_> + 9 11 3 3 -1. + <_> + 9 12 3 1 3. + <_> + + <_> + 3 12 9 2 -1. + <_> + 3 13 9 1 2. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 3 4 13 6 -1. + <_> + 3 6 13 2 3. + <_> + + <_> + 9 7 6 4 -1. + <_> + 12 7 3 2 2. + <_> + 9 9 3 2 2. + <_> + + <_> + 1 0 6 8 -1. + <_> + 4 0 3 8 2. + <_> + + <_> + 9 5 2 12 -1. + <_> + 9 11 2 6 2. + <_> + + <_> + 4 4 3 10 -1. + <_> + 4 9 3 5 2. + <_> + + <_> + 6 17 8 3 -1. + <_> + 6 18 8 1 3. + <_> + + <_> + 0 5 10 6 -1. + <_> + 0 7 10 2 3. + <_> + + <_> + 13 2 3 2 -1. + <_> + 13 3 3 1 2. + <_> + + <_> + 7 5 4 5 -1. + <_> + 9 5 2 5 2. + <_> + + <_> + 12 14 3 6 -1. + <_> + 12 16 3 2 3. + <_> + + <_> + 1 11 8 2 -1. + <_> + 1 12 8 1 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 0 5 3 6 -1. + <_> + 0 7 3 2 3. + <_> + + <_> + 13 2 3 2 -1. + <_> + 13 3 3 1 2. + <_> + + <_> + 4 14 4 6 -1. + <_> + 4 14 2 3 2. + <_> + 6 17 2 3 2. + <_> + + <_> + 13 2 3 2 -1. + <_> + 13 3 3 1 2. + <_> + + <_> + 8 2 4 12 -1. + <_> + 8 6 4 4 3. + <_> + + <_> + 14 0 6 8 -1. + <_> + 17 0 3 4 2. + <_> + 14 4 3 4 2. + <_> + + <_> + 7 17 3 2 -1. + <_> + 8 17 1 2 3. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 6 0 8 12 -1. + <_> + 6 0 4 6 2. + <_> + 10 6 4 6 2. + <_> + + <_> + 14 0 2 10 -1. + <_> + 15 0 1 5 2. + <_> + 14 5 1 5 2. + <_> + + <_> + 5 3 8 6 -1. + <_> + 5 3 4 3 2. + <_> + 9 6 4 3 2. + <_> + + <_> + 14 0 6 10 -1. + <_> + 17 0 3 5 2. + <_> + 14 5 3 5 2. + <_> + + <_> + 9 14 1 2 -1. + <_> + 9 15 1 1 2. + <_> + + <_> + 15 10 4 3 -1. + <_> + 15 11 4 1 3. + <_> + + <_> + 8 14 2 3 -1. + <_> + 8 15 2 1 3. + <_> + + <_> + 3 13 14 4 -1. + <_> + 10 13 7 2 2. + <_> + 3 15 7 2 2. + <_> + + <_> + 1 10 4 3 -1. + <_> + 1 11 4 1 3. + <_> + + <_> + 9 11 6 1 -1. + <_> + 11 11 2 1 3. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 3 5 16 15 -1. + <_> + 3 10 16 5 3. + <_> + + <_> + 6 12 4 2 -1. + <_> + 8 12 2 2 2. + <_> + + <_> + 4 4 12 10 -1. + <_> + 10 4 6 5 2. + <_> + 4 9 6 5 2. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 8 12 4 8 -1. + <_> + 10 12 2 4 2. + <_> + 8 16 2 4 2. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 12 2 3 2 -1. + <_> + 13 2 1 2 3. + <_> + + <_> + 8 15 3 2 -1. + <_> + 8 16 3 1 2. + <_> + + <_> + 6 0 9 14 -1. + <_> + 9 0 3 14 3. + <_> + + <_> + 9 6 2 3 -1. + <_> + 10 6 1 3 2. + <_> + + <_> + 10 8 2 3 -1. + <_> + 10 9 2 1 3. + <_> + + <_> + 0 9 4 6 -1. + <_> + 0 11 4 2 3. + <_> + + <_> + 6 0 8 2 -1. + <_> + 6 1 8 1 2. + <_> + + <_> + 6 14 7 3 -1. + <_> + 6 15 7 1 3. + <_> + + <_> + 8 10 8 9 -1. + <_> + 8 13 8 3 3. + <_> + + <_> + 5 2 3 2 -1. + <_> + 6 2 1 2 3. + <_> + + <_> + 14 1 6 8 -1. + <_> + 17 1 3 4 2. + <_> + 14 5 3 4 2. + <_> + + <_> + 0 1 6 8 -1. + <_> + 0 1 3 4 2. + <_> + 3 5 3 4 2. + <_> + + <_> + 1 2 18 6 -1. + <_> + 10 2 9 3 2. + <_> + 1 5 9 3 2. + <_> + + <_> + 9 3 2 1 -1. + <_> + 10 3 1 1 2. + <_> + + <_> + 13 2 4 6 -1. + <_> + 15 2 2 3 2. + <_> + 13 5 2 3 2. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 13 5 1 3 -1. + <_> + 13 6 1 1 3. + <_> + + <_> + 2 16 5 3 -1. + <_> + 2 17 5 1 3. + <_> + + <_> + 13 2 4 6 -1. + <_> + 15 2 2 3 2. + <_> + 13 5 2 3 2. + <_> + + <_> + 3 2 4 6 -1. + <_> + 3 2 2 3 2. + <_> + 5 5 2 3 2. + <_> + + <_> + 13 5 1 2 -1. + <_> + 13 6 1 1 2. + <_> + + <_> + 5 5 2 2 -1. + <_> + 5 6 2 1 2. + <_> + + <_> + 13 9 2 2 -1. + <_> + 13 9 1 2 2. + <_> + + <_> + 5 9 2 2 -1. + <_> + 6 9 1 2 2. + <_> + + <_> + 13 17 3 2 -1. + <_> + 13 18 3 1 2. + <_> + + <_> + 6 16 4 4 -1. + <_> + 6 16 2 2 2. + <_> + 8 18 2 2 2. + <_> + + <_> + 9 16 2 3 -1. + <_> + 9 17 2 1 3. + <_> + + <_> + 0 13 9 6 -1. + <_> + 0 15 9 2 3. + <_> + + <_> + 9 14 2 6 -1. + <_> + 9 17 2 3 2. + <_> + + <_> + 9 15 2 3 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 1 10 18 6 -1. + <_> + 1 12 18 2 3. + <_> + + <_> + 8 11 4 2 -1. + <_> + 8 12 4 1 2. + <_> + + <_> + 7 9 6 2 -1. + <_> + 7 10 6 1 2. + <_> + + <_> + 8 8 2 3 -1. + <_> + 8 9 2 1 3. + <_> + + <_> + 17 5 3 4 -1. + <_> + 18 5 1 4 3. + <_> + + <_> + 1 19 18 1 -1. + <_> + 7 19 6 1 3. + <_> + + <_> + 9 0 3 2 -1. + <_> + 10 0 1 2 3. + <_> + + <_> + 1 8 1 6 -1. + <_> + 1 10 1 2 3. + <_> + + <_> + 12 17 8 3 -1. + <_> + 12 17 4 3 2. + <_> + + <_> + 0 5 3 4 -1. + <_> + 1 5 1 4 3. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 7 11 2 2 -1. + <_> + 7 11 1 1 2. + <_> + 8 12 1 1 2. + <_> + + <_> + 11 3 2 5 -1. + <_> + 11 3 1 5 2. + <_> + + <_> + 7 3 2 5 -1. + <_> + 8 3 1 5 2. + <_> + + <_> + 15 13 2 3 -1. + <_> + 15 14 2 1 3. + <_> + + <_> + 5 6 2 3 -1. + <_> + 5 7 2 1 3. + <_> + + <_> + 4 19 15 1 -1. + <_> + 9 19 5 1 3. + <_> + + <_> + 1 19 15 1 -1. + <_> + 6 19 5 1 3. + <_> + + <_> + 15 13 2 3 -1. + <_> + 15 14 2 1 3. + <_> + + <_> + 5 0 4 15 -1. + <_> + 7 0 2 15 2. + <_> + + <_> + 9 6 2 5 -1. + <_> + 9 6 1 5 2. + <_> + + <_> + 9 5 2 7 -1. + <_> + 10 5 1 7 2. + <_> + + <_> + 16 11 3 3 -1. + <_> + 16 12 3 1 3. + <_> + + <_> + 1 11 3 3 -1. + <_> + 1 12 3 1 3. + <_> + + <_> + 6 6 8 3 -1. + <_> + 6 7 8 1 3. + <_> + + <_> + 0 15 6 2 -1. + <_> + 0 16 6 1 2. + <_> + + <_> + 1 0 18 6 -1. + <_> + 7 0 6 6 3. + <_> + + <_> + 6 0 3 4 -1. + <_> + 7 0 1 4 3. + <_> + + <_> + 14 10 4 10 -1. + <_> + 16 10 2 5 2. + <_> + 14 15 2 5 2. + <_> + + <_> + 3 2 3 2 -1. + <_> + 4 2 1 2 3. + <_> + + <_> + 11 2 2 2 -1. + <_> + 11 3 2 1 2. + <_> + + <_> + 2 10 4 10 -1. + <_> + 2 10 2 5 2. + <_> + 4 15 2 5 2. + <_> + + <_> + 0 13 20 6 -1. + <_> + 10 13 10 3 2. + <_> + 0 16 10 3 2. + <_> + + <_> + 0 5 2 15 -1. + <_> + 1 5 1 15 2. + <_> + + <_> + 1 7 18 4 -1. + <_> + 10 7 9 2 2. + <_> + 1 9 9 2 2. + <_> + + <_> + 0 0 2 17 -1. + <_> + 1 0 1 17 2. + <_> + + <_> + 2 6 16 6 -1. + <_> + 10 6 8 3 2. + <_> + 2 9 8 3 2. + <_> + + <_> + 8 14 1 3 -1. + <_> + 8 15 1 1 3. + <_> + + <_> + 8 15 4 2 -1. + <_> + 8 16 4 1 2. + <_> + + <_> + 5 2 8 2 -1. + <_> + 5 2 4 1 2. + <_> + 9 3 4 1 2. + <_> + + <_> + 6 11 8 6 -1. + <_> + 6 14 8 3 2. + <_> + + <_> + 9 13 2 2 -1. + <_> + 9 14 2 1 2. + <_> + + <_> + 18 4 2 6 -1. + <_> + 18 6 2 2 3. + <_> + + <_> + 9 12 2 2 -1. + <_> + 9 13 2 1 2. + <_> + + <_> + 18 4 2 6 -1. + <_> + 18 6 2 2 3. + <_> + + <_> + 9 13 1 3 -1. + <_> + 9 14 1 1 3. + <_> + + <_> + 18 4 2 6 -1. + <_> + 18 6 2 2 3. + <_> + + <_> + 0 4 2 6 -1. + <_> + 0 6 2 2 3. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 3 13 2 3 -1. + <_> + 3 14 2 1 3. + <_> + + <_> + 13 13 4 3 -1. + <_> + 13 14 4 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 5 2 10 6 -1. + <_> + 5 4 10 2 3. + <_> + + <_> + 3 13 4 3 -1. + <_> + 3 14 4 1 3. + <_> + + <_> + 3 7 15 5 -1. + <_> + 8 7 5 5 3. + <_> + + <_> + 3 7 12 2 -1. + <_> + 7 7 4 2 3. + <_> + + <_> + 10 3 3 9 -1. + <_> + 11 3 1 9 3. + <_> + + <_> + 8 6 4 6 -1. + <_> + 10 6 2 6 2. + <_> + + <_> + 9 7 4 3 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 0 9 4 9 -1. + <_> + 2 9 2 9 2. + <_> + + <_> + 9 13 3 5 -1. + <_> + 10 13 1 5 3. + <_> + + <_> + 7 7 6 3 -1. + <_> + 9 7 2 3 3. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 5 7 8 2 -1. + <_> + 9 7 4 2 2. + <_> + + <_> + 5 9 12 2 -1. + <_> + 9 9 4 2 3. + <_> + + <_> + 5 6 10 3 -1. + <_> + 10 6 5 3 2. + <_> + + <_> + 10 12 3 1 -1. + <_> + 11 12 1 1 3. + <_> + + <_> + 0 1 11 15 -1. + <_> + 0 6 11 5 3. + <_> + + <_> + 1 0 18 6 -1. + <_> + 7 0 6 6 3. + <_> + + <_> + 7 7 6 1 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 5 16 6 4 -1. + <_> + 5 16 3 2 2. + <_> + 8 18 3 2 2. + <_> + + <_> + 6 5 9 8 -1. + <_> + 6 9 9 4 2. + <_> + + <_> + 5 10 2 6 -1. + <_> + 5 13 2 3 2. + <_> + + <_> + 7 6 8 10 -1. + <_> + 11 6 4 5 2. + <_> + 7 11 4 5 2. + <_> + + <_> + 5 6 8 10 -1. + <_> + 5 6 4 5 2. + <_> + 9 11 4 5 2. + <_> + + <_> + 9 5 2 2 -1. + <_> + 9 6 2 1 2. + <_> + + <_> + 5 12 8 2 -1. + <_> + 5 13 8 1 2. + <_> + + <_> + 10 2 8 2 -1. + <_> + 10 3 8 1 2. + <_> + + <_> + 4 0 2 10 -1. + <_> + 4 0 1 5 2. + <_> + 5 5 1 5 2. + <_> + + <_> + 9 10 2 2 -1. + <_> + 9 11 2 1 2. + <_> + + <_> + 2 8 15 3 -1. + <_> + 2 9 15 1 3. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 7 2 3 2 -1. + <_> + 8 2 1 2 3. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 17 2 3 6 -1. + <_> + 17 4 3 2 3. + <_> + + <_> + 1 5 3 4 -1. + <_> + 2 5 1 4 3. + <_> + + <_> + 14 8 4 6 -1. + <_> + 14 10 4 2 3. + <_> + + <_> + 1 4 3 8 -1. + <_> + 2 4 1 8 3. + <_> + + <_> + 8 13 4 6 -1. + <_> + 8 16 4 3 2. + <_> + + <_> + 3 14 2 2 -1. + <_> + 3 15 2 1 2. + <_> + + <_> + 14 8 4 6 -1. + <_> + 14 10 4 2 3. + <_> + + <_> + 2 8 4 6 -1. + <_> + 2 10 4 2 3. + <_> + + <_> + 10 14 1 6 -1. + <_> + 10 17 1 3 2. + <_> + + <_> + 7 5 3 6 -1. + <_> + 8 5 1 6 3. + <_> + + <_> + 11 2 2 6 -1. + <_> + 12 2 1 3 2. + <_> + 11 5 1 3 2. + <_> + + <_> + 6 6 6 5 -1. + <_> + 8 6 2 5 3. + <_> + + <_> + 17 1 3 6 -1. + <_> + 17 3 3 2 3. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 9 18 3 2 -1. + <_> + 10 18 1 2 3. + <_> + + <_> + 8 18 3 2 -1. + <_> + 9 18 1 2 3. + <_> + + <_> + 12 3 5 2 -1. + <_> + 12 4 5 1 2. + <_> + + <_> + 7 1 5 12 -1. + <_> + 7 7 5 6 2. + <_> + + <_> + 1 0 18 4 -1. + <_> + 7 0 6 4 3. + <_> + + <_> + 4 2 2 2 -1. + <_> + 4 3 2 1 2. + <_> + + <_> + 11 14 4 2 -1. + <_> + 13 14 2 1 2. + <_> + 11 15 2 1 2. + <_> + + <_> + 0 2 3 6 -1. + <_> + 0 4 3 2 3. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 5 5 1 3 -1. + <_> + 5 6 1 1 3. + <_> + + <_> + 10 10 6 1 -1. + <_> + 10 10 3 1 2. + <_> + + <_> + 4 10 6 1 -1. + <_> + 7 10 3 1 2. + <_> + + <_> + 9 17 3 3 -1. + <_> + 9 18 3 1 3. + <_> + + <_> + 4 14 1 3 -1. + <_> + 4 15 1 1 3. + <_> + + <_> + 12 5 3 3 -1. + <_> + 12 6 3 1 3. + <_> + + <_> + 4 5 12 3 -1. + <_> + 4 6 12 1 3. + <_> + + <_> + 9 8 2 3 -1. + <_> + 9 9 2 1 3. + <_> + + <_> + 4 9 3 3 -1. + <_> + 5 9 1 3 3. + <_> + + <_> + 6 0 9 17 -1. + <_> + 9 0 3 17 3. + <_> + + <_> + 9 12 1 3 -1. + <_> + 9 13 1 1 3. + <_> + + <_> + 9 5 2 15 -1. + <_> + 9 10 2 5 3. + <_> + + <_> + 8 14 2 3 -1. + <_> + 8 15 2 1 3. + <_> + + <_> + 10 14 1 3 -1. + <_> + 10 15 1 1 3. + <_> + + <_> + 7 1 6 5 -1. + <_> + 9 1 2 5 3. + <_> + + <_> + 0 0 20 2 -1. + <_> + 0 0 10 2 2. + <_> + + <_> + 2 13 5 3 -1. + <_> + 2 14 5 1 3. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 2 5 9 15 -1. + <_> + 2 10 9 5 3. + <_> + + <_> + 5 0 12 10 -1. + <_> + 11 0 6 5 2. + <_> + 5 5 6 5 2. + <_> + + <_> + 5 1 2 3 -1. + <_> + 6 1 1 3 2. + <_> + + <_> + 10 7 6 1 -1. + <_> + 12 7 2 1 3. + <_> + + <_> + 3 1 2 10 -1. + <_> + 3 1 1 5 2. + <_> + 4 6 1 5 2. + <_> + + <_> + 13 7 2 1 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 4 13 4 6 -1. + <_> + 4 15 4 2 3. + <_> + + <_> + 13 7 2 1 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 5 7 2 1 -1. + <_> + 6 7 1 1 2. + <_> + + <_> + 2 12 18 4 -1. + <_> + 11 12 9 2 2. + <_> + 2 14 9 2 2. + <_> + + <_> + 5 7 2 2 -1. + <_> + 5 7 1 1 2. + <_> + 6 8 1 1 2. + <_> + + <_> + 16 3 4 2 -1. + <_> + 16 4 4 1 2. + <_> + + <_> + 0 2 2 18 -1. + <_> + 0 2 1 9 2. + <_> + 1 11 1 9 2. + <_> + + <_> + 1 2 18 4 -1. + <_> + 10 2 9 2 2. + <_> + 1 4 9 2 2. + <_> + + <_> + 9 14 1 3 -1. + <_> + 9 15 1 1 3. + <_> + + <_> + 2 12 18 4 -1. + <_> + 11 12 9 2 2. + <_> + 2 14 9 2 2. + <_> + + <_> + 0 12 18 4 -1. + <_> + 0 12 9 2 2. + <_> + 9 14 9 2 2. + <_> + + <_> + 11 4 5 3 -1. + <_> + 11 5 5 1 3. + <_> + + <_> + 6 4 7 3 -1. + <_> + 6 5 7 1 3. + <_> + + <_> + 13 17 3 3 -1. + <_> + 13 18 3 1 3. + <_> + + <_> + 8 1 3 4 -1. + <_> + 9 1 1 4 3. + <_> + + <_> + 11 4 2 4 -1. + <_> + 11 4 1 4 2. + <_> + + <_> + 0 17 9 3 -1. + <_> + 3 17 3 3 3. + <_> + + <_> + 11 0 2 8 -1. + <_> + 12 0 1 4 2. + <_> + 11 4 1 4 2. + <_> + + <_> + 0 8 6 12 -1. + <_> + 0 8 3 6 2. + <_> + 3 14 3 6 2. + <_> + + <_> + 10 7 4 12 -1. + <_> + 10 13 4 6 2. + <_> + + <_> + 5 3 8 14 -1. + <_> + 5 10 8 7 2. + <_> + + <_> + 14 10 6 1 -1. + <_> + 14 10 3 1 2. + <_> + + <_> + 0 4 10 4 -1. + <_> + 0 6 10 2 2. + <_> + + <_> + 10 0 5 8 -1. + <_> + 10 4 5 4 2. + <_> + + <_> + 8 1 4 8 -1. + <_> + 8 1 2 4 2. + <_> + 10 5 2 4 2. + <_> + + <_> + 9 11 6 1 -1. + <_> + 11 11 2 1 3. + <_> + + <_> + 8 9 3 4 -1. + <_> + 9 9 1 4 3. + <_> + + <_> + 18 4 2 6 -1. + <_> + 18 6 2 2 3. + <_> + + <_> + 8 8 3 4 -1. + <_> + 9 8 1 4 3. + <_> + + <_> + 7 1 13 3 -1. + <_> + 7 2 13 1 3. + <_> + + <_> + 7 13 6 1 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 12 11 3 6 -1. + <_> + 12 13 3 2 3. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 1 4 18 10 -1. + <_> + 10 4 9 5 2. + <_> + 1 9 9 5 2. + <_> + + <_> + 8 6 4 9 -1. + <_> + 8 9 4 3 3. + <_> + + <_> + 8 6 4 3 -1. + <_> + 8 7 4 1 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 14 15 4 3 -1. + <_> + 14 16 4 1 3. + <_> + + <_> + 5 10 3 10 -1. + <_> + 6 10 1 10 3. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 0 8 1 6 -1. + <_> + 0 10 1 2 3. + <_> + + <_> + 10 15 1 3 -1. + <_> + 10 16 1 1 3. + <_> + + <_> + 2 15 4 3 -1. + <_> + 2 16 4 1 3. + <_> + + <_> + 18 3 2 8 -1. + <_> + 19 3 1 4 2. + <_> + 18 7 1 4 2. + <_> + + <_> + 0 3 2 8 -1. + <_> + 0 3 1 4 2. + <_> + 1 7 1 4 2. + <_> + + <_> + 3 7 14 10 -1. + <_> + 10 7 7 5 2. + <_> + 3 12 7 5 2. + <_> + + <_> + 0 7 19 3 -1. + <_> + 0 8 19 1 3. + <_> + + <_> + 12 6 3 3 -1. + <_> + 12 7 3 1 3. + <_> + + <_> + 0 6 1 3 -1. + <_> + 0 7 1 1 3. + <_> + + <_> + 12 6 3 3 -1. + <_> + 12 7 3 1 3. + <_> + + <_> + 5 6 3 3 -1. + <_> + 5 7 3 1 3. + <_> + + <_> + 8 2 4 2 -1. + <_> + 8 3 4 1 2. + <_> + + <_> + 6 3 4 12 -1. + <_> + 8 3 2 12 2. + <_> + + <_> + 13 6 2 3 -1. + <_> + 13 7 2 1 3. + <_> + + <_> + 0 10 20 4 -1. + <_> + 0 12 20 2 2. + <_> + + <_> + 2 0 17 14 -1. + <_> + 2 7 17 7 2. + <_> + + <_> + 0 0 6 10 -1. + <_> + 0 0 3 5 2. + <_> + 3 5 3 5 2. + <_> + + <_> + 14 6 6 4 -1. + <_> + 14 6 3 4 2. + <_> + + <_> + 0 6 6 4 -1. + <_> + 3 6 3 4 2. + <_> + + <_> + 13 2 7 2 -1. + <_> + 13 3 7 1 2. + <_> + + <_> + 0 2 7 2 -1. + <_> + 0 3 7 1 2. + <_> + + <_> + 6 11 14 2 -1. + <_> + 13 11 7 1 2. + <_> + 6 12 7 1 2. + <_> + + <_> + 8 5 2 2 -1. + <_> + 8 5 1 1 2. + <_> + 9 6 1 1 2. + <_> + + <_> + 13 9 2 3 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 1 1 3 12 -1. + <_> + 2 1 1 12 3. + <_> + + <_> + 17 4 1 3 -1. + <_> + 17 5 1 1 3. + <_> + + <_> + 2 4 1 3 -1. + <_> + 2 5 1 1 3. + <_> + + <_> + 14 5 1 3 -1. + <_> + 14 6 1 1 3. + <_> + + <_> + 7 16 2 3 -1. + <_> + 7 17 2 1 3. + <_> + + <_> + 8 13 4 6 -1. + <_> + 10 13 2 3 2. + <_> + 8 16 2 3 2. + <_> + + <_> + 5 5 1 3 -1. + <_> + 5 6 1 1 3. + <_> + + <_> + 16 0 4 20 -1. + <_> + 16 0 2 20 2. + <_> + + <_> + 5 1 2 6 -1. + <_> + 5 1 1 3 2. + <_> + 6 4 1 3 2. + <_> + + <_> + 5 4 10 4 -1. + <_> + 5 6 10 2 2. + <_> + + <_> + 15 2 4 12 -1. + <_> + 15 2 2 12 2. + <_> + + <_> + 7 6 4 12 -1. + <_> + 7 12 4 6 2. + <_> + + <_> + 14 5 1 8 -1. + <_> + 14 9 1 4 2. + <_> + + <_> + 1 4 14 10 -1. + <_> + 1 4 7 5 2. + <_> + 8 9 7 5 2. + <_> + + <_> + 11 6 6 14 -1. + <_> + 14 6 3 7 2. + <_> + 11 13 3 7 2. + <_> + + <_> + 3 6 6 14 -1. + <_> + 3 6 3 7 2. + <_> + 6 13 3 7 2. + <_> + + <_> + 4 9 15 2 -1. + <_> + 9 9 5 2 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 6 3 14 4 -1. + <_> + 13 3 7 2 2. + <_> + 6 5 7 2 2. + <_> + + <_> + 1 9 15 2 -1. + <_> + 6 9 5 2 3. + <_> + + <_> + 6 11 8 9 -1. + <_> + 6 14 8 3 3. + <_> + + <_> + 7 4 3 8 -1. + <_> + 8 4 1 8 3. + <_> + + <_> + 14 6 2 6 -1. + <_> + 14 9 2 3 2. + <_> + + <_> + 5 7 6 4 -1. + <_> + 5 7 3 2 2. + <_> + 8 9 3 2 2. + <_> + + <_> + 1 1 18 19 -1. + <_> + 7 1 6 19 3. + <_> + + <_> + 1 2 6 5 -1. + <_> + 4 2 3 5 2. + <_> + + <_> + 12 17 6 2 -1. + <_> + 12 18 6 1 2. + <_> + + <_> + 2 17 6 2 -1. + <_> + 2 18 6 1 2. + <_> + + <_> + 17 3 3 6 -1. + <_> + 17 5 3 2 3. + <_> + + <_> + 8 17 3 3 -1. + <_> + 8 18 3 1 3. + <_> + + <_> + 10 13 2 6 -1. + <_> + 10 16 2 3 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 17 3 3 6 -1. + <_> + 17 5 3 2 3. + <_> + + <_> + 8 13 2 3 -1. + <_> + 8 14 2 1 3. + <_> + + <_> + 9 3 6 2 -1. + <_> + 11 3 2 2 3. + <_> + + <_> + 0 3 3 6 -1. + <_> + 0 5 3 2 3. + <_> + + <_> + 8 5 4 6 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 5 5 3 2 -1. + <_> + 5 6 3 1 2. + <_> + + <_> + 10 1 3 4 -1. + <_> + 11 1 1 4 3. + <_> + + <_> + 1 2 5 9 -1. + <_> + 1 5 5 3 3. + <_> + + <_> + 13 6 2 3 -1. + <_> + 13 7 2 1 3. + <_> + + <_> + 0 6 14 3 -1. + <_> + 7 6 7 3 2. + <_> + + <_> + 2 11 18 8 -1. + <_> + 2 15 18 4 2. + <_> + + <_> + 5 6 2 3 -1. + <_> + 5 7 2 1 3. + <_> + + <_> + 10 6 4 2 -1. + <_> + 12 6 2 1 2. + <_> + 10 7 2 1 2. + <_> + + <_> + 6 6 4 2 -1. + <_> + 6 6 2 1 2. + <_> + 8 7 2 1 2. + <_> + + <_> + 10 1 3 4 -1. + <_> + 11 1 1 4 3. + <_> + + <_> + 7 1 2 7 -1. + <_> + 8 1 1 7 2. + <_> + + <_> + 4 2 15 14 -1. + <_> + 4 9 15 7 2. + <_> + + <_> + 8 7 3 2 -1. + <_> + 9 7 1 2 3. + <_> + + <_> + 2 3 18 4 -1. + <_> + 11 3 9 2 2. + <_> + 2 5 9 2 2. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 13 9 2 3 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 5 2 6 2 -1. + <_> + 7 2 2 2 3. + <_> + + <_> + 9 5 2 7 -1. + <_> + 9 5 1 7 2. + <_> + + <_> + 5 9 2 3 -1. + <_> + 6 9 1 3 2. + <_> + + <_> + 6 0 14 18 -1. + <_> + 6 9 14 9 2. + <_> + + <_> + 2 16 6 3 -1. + <_> + 2 17 6 1 3. + <_> + + <_> + 9 7 3 6 -1. + <_> + 10 7 1 6 3. + <_> + + <_> + 7 8 4 3 -1. + <_> + 7 9 4 1 3. + <_> + + <_> + 7 12 6 3 -1. + <_> + 7 13 6 1 3. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 7 12 6 2 -1. + <_> + 9 12 2 2 3. + <_> + + <_> + 5 11 4 6 -1. + <_> + 5 14 4 3 2. + <_> + + <_> + 11 12 7 2 -1. + <_> + 11 13 7 1 2. + <_> + + <_> + 6 10 8 6 -1. + <_> + 6 10 4 3 2. + <_> + 10 13 4 3 2. + <_> + + <_> + 11 10 3 4 -1. + <_> + 11 12 3 2 2. + <_> + + <_> + 9 16 2 3 -1. + <_> + 9 17 2 1 3. + <_> + + <_> + 13 3 1 9 -1. + <_> + 13 6 1 3 3. + <_> + + <_> + 1 13 14 6 -1. + <_> + 1 15 14 2 3. + <_> + + <_> + 13 6 1 6 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 0 4 3 8 -1. + <_> + 1 4 1 8 3. + <_> + + <_> + 18 0 2 18 -1. + <_> + 18 0 1 18 2. + <_> + + <_> + 2 3 6 2 -1. + <_> + 2 4 6 1 2. + <_> + + <_> + 9 0 8 6 -1. + <_> + 9 2 8 2 3. + <_> + + <_> + 6 6 1 6 -1. + <_> + 6 9 1 3 2. + <_> + + <_> + 14 8 6 3 -1. + <_> + 14 9 6 1 3. + <_> + + <_> + 0 0 2 18 -1. + <_> + 1 0 1 18 2. + <_> + + <_> + 1 18 18 2 -1. + <_> + 10 18 9 1 2. + <_> + 1 19 9 1 2. + <_> + + <_> + 3 15 2 2 -1. + <_> + 3 16 2 1 2. + <_> + + <_> + 8 14 5 3 -1. + <_> + 8 15 5 1 3. + <_> + + <_> + 8 14 2 3 -1. + <_> + 8 15 2 1 3. + <_> + + <_> + 12 3 3 3 -1. + <_> + 13 3 1 3 3. + <_> + + <_> + 7 5 6 2 -1. + <_> + 9 5 2 2 3. + <_> + + <_> + 15 5 5 2 -1. + <_> + 15 6 5 1 2. + <_> + + <_> + 0 5 5 2 -1. + <_> + 0 6 5 1 2. + <_> + + <_> + 17 14 1 6 -1. + <_> + 17 17 1 3 2. + <_> + + <_> + 2 9 9 3 -1. + <_> + 5 9 3 3 3. + <_> + + <_> + 12 3 3 3 -1. + <_> + 13 3 1 3 3. + <_> + + <_> + 0 0 4 18 -1. + <_> + 2 0 2 18 2. + <_> + + <_> + 17 6 1 3 -1. + <_> + 17 7 1 1 3. + <_> + + <_> + 2 14 1 6 -1. + <_> + 2 17 1 3 2. + <_> + + <_> + 19 8 1 2 -1. + <_> + 19 9 1 1 2. + <_> + + <_> + 5 3 3 3 -1. + <_> + 6 3 1 3 3. + <_> + + <_> + 9 16 2 3 -1. + <_> + 9 17 2 1 3. + <_> + + <_> + 2 6 1 3 -1. + <_> + 2 7 1 1 3. + <_> + + <_> + 12 4 8 2 -1. + <_> + 16 4 4 1 2. + <_> + 12 5 4 1 2. + <_> + + <_> + 0 4 8 2 -1. + <_> + 0 4 4 1 2. + <_> + 4 5 4 1 2. + <_> + + <_> + 2 16 18 4 -1. + <_> + 2 18 18 2 2. + <_> + + <_> + 7 15 2 4 -1. + <_> + 7 17 2 2 2. + <_> + + <_> + 4 0 14 3 -1. + <_> + 4 1 14 1 3. + <_> + + <_> + 0 0 4 20 -1. + <_> + 2 0 2 20 2. + <_> + + <_> + 12 4 4 8 -1. + <_> + 14 4 2 4 2. + <_> + 12 8 2 4 2. + <_> + + <_> + 6 7 2 2 -1. + <_> + 6 7 1 1 2. + <_> + 7 8 1 1 2. + <_> + + <_> + 10 6 2 3 -1. + <_> + 10 7 2 1 3. + <_> + + <_> + 8 7 3 2 -1. + <_> + 8 8 3 1 2. + <_> + + <_> + 8 2 6 12 -1. + <_> + 8 8 6 6 2. + <_> + + <_> + 4 0 11 12 -1. + <_> + 4 4 11 4 3. + <_> + + <_> + 14 9 6 11 -1. + <_> + 16 9 2 11 3. + <_> + + <_> + 0 14 4 3 -1. + <_> + 0 15 4 1 3. + <_> + + <_> + 9 10 2 3 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 5 11 3 2 -1. + <_> + 5 12 3 1 2. + <_> + + <_> + 9 15 3 3 -1. + <_> + 10 15 1 3 3. + <_> + + <_> + 8 8 3 4 -1. + <_> + 9 8 1 4 3. + <_> + + <_> + 9 15 3 3 -1. + <_> + 10 15 1 3 3. + <_> + + <_> + 7 7 3 2 -1. + <_> + 8 7 1 2 3. + <_> + + <_> + 2 10 16 4 -1. + <_> + 10 10 8 2 2. + <_> + 2 12 8 2 2. + <_> + + <_> + 2 3 4 17 -1. + <_> + 4 3 2 17 2. + <_> + + <_> + 15 13 2 7 -1. + <_> + 15 13 1 7 2. + <_> + + <_> + 2 2 6 1 -1. + <_> + 5 2 3 1 2. + <_> + + <_> + 5 2 12 4 -1. + <_> + 9 2 4 4 3. + <_> + + <_> + 6 0 8 12 -1. + <_> + 6 0 4 6 2. + <_> + 10 6 4 6 2. + <_> + + <_> + 13 7 2 2 -1. + <_> + 14 7 1 1 2. + <_> + 13 8 1 1 2. + <_> + + <_> + 0 12 20 6 -1. + <_> + 0 14 20 2 3. + <_> + + <_> + 14 7 2 3 -1. + <_> + 14 7 1 3 2. + <_> + + <_> + 0 8 9 12 -1. + <_> + 3 8 3 12 3. + <_> + + <_> + 3 0 16 2 -1. + <_> + 3 0 8 2 2. + <_> + + <_> + 6 15 3 3 -1. + <_> + 6 16 3 1 3. + <_> + + <_> + 8 15 6 3 -1. + <_> + 8 16 6 1 3. + <_> + + <_> + 0 10 1 6 -1. + <_> + 0 12 1 2 3. + <_> + + <_> + 10 9 4 3 -1. + <_> + 10 10 4 1 3. + <_> + + <_> + 9 15 2 3 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 5 7 10 1 -1. + <_> + 5 7 5 1 2. + <_> + + <_> + 4 0 12 19 -1. + <_> + 10 0 6 19 2. + <_> + + <_> + 0 6 20 6 -1. + <_> + 10 6 10 3 2. + <_> + 0 9 10 3 2. + <_> + + <_> + 3 6 2 2 -1. + <_> + 3 6 1 1 2. + <_> + 4 7 1 1 2. + <_> + + <_> + 15 6 2 2 -1. + <_> + 16 6 1 1 2. + <_> + 15 7 1 1 2. + <_> + + <_> + 3 6 2 2 -1. + <_> + 3 6 1 1 2. + <_> + 4 7 1 1 2. + <_> + + <_> + 14 4 1 12 -1. + <_> + 14 10 1 6 2. + <_> + + <_> + 2 5 16 10 -1. + <_> + 2 5 8 5 2. + <_> + 10 10 8 5 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 1 4 2 2 -1. + <_> + 1 5 2 1 2. + <_> + + <_> + 5 0 15 5 -1. + <_> + 10 0 5 5 3. + <_> + + <_> + 0 0 15 5 -1. + <_> + 5 0 5 5 3. + <_> + + <_> + 11 2 2 17 -1. + <_> + 11 2 1 17 2. + <_> + + <_> + 7 2 2 17 -1. + <_> + 8 2 1 17 2. + <_> + + <_> + 15 11 2 9 -1. + <_> + 15 11 1 9 2. + <_> + + <_> + 3 11 2 9 -1. + <_> + 4 11 1 9 2. + <_> + + <_> + 5 16 14 4 -1. + <_> + 5 16 7 4 2. + <_> + + <_> + 1 4 18 1 -1. + <_> + 7 4 6 1 3. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 9 8 2 12 -1. + <_> + 9 12 2 4 3. + <_> + + <_> + 12 1 6 6 -1. + <_> + 12 3 6 2 3. + <_> + + <_> + 5 2 6 6 -1. + <_> + 5 2 3 3 2. + <_> + 8 5 3 3 2. + <_> + + <_> + 9 16 6 4 -1. + <_> + 12 16 3 2 2. + <_> + 9 18 3 2 2. + <_> + + <_> + 1 2 18 3 -1. + <_> + 7 2 6 3 3. + <_> + + <_> + 7 4 9 10 -1. + <_> + 7 9 9 5 2. + <_> + + <_> + 5 9 4 4 -1. + <_> + 7 9 2 4 2. + <_> + + <_> + 11 10 3 6 -1. + <_> + 11 13 3 3 2. + <_> + + <_> + 7 11 5 3 -1. + <_> + 7 12 5 1 3. + <_> + + <_> + 7 11 6 6 -1. + <_> + 10 11 3 3 2. + <_> + 7 14 3 3 2. + <_> + + <_> + 0 0 10 9 -1. + <_> + 0 3 10 3 3. + <_> + + <_> + 13 14 1 6 -1. + <_> + 13 16 1 2 3. + <_> + + <_> + 0 2 3 6 -1. + <_> + 0 4 3 2 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 6 14 1 6 -1. + <_> + 6 16 1 2 3. + <_> + + <_> + 9 15 2 3 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 6 4 3 3 -1. + <_> + 7 4 1 3 3. + <_> + + <_> + 9 0 11 3 -1. + <_> + 9 1 11 1 3. + <_> + + <_> + 0 6 20 3 -1. + <_> + 0 7 20 1 3. + <_> + + <_> + 10 1 1 2 -1. + <_> + 10 2 1 1 2. + <_> + + <_> + 9 6 2 6 -1. + <_> + 10 6 1 6 2. + <_> + + <_> + 5 8 12 1 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 3 8 12 1 -1. + <_> + 7 8 4 1 3. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 3 9 6 2 -1. + <_> + 6 9 3 2 2. + <_> + + <_> + 12 9 3 3 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 7 0 6 1 -1. + <_> + 9 0 2 1 3. + <_> + + <_> + 12 9 3 3 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 7 10 2 1 -1. + <_> + 8 10 1 1 2. + <_> + + <_> + 6 4 9 13 -1. + <_> + 9 4 3 13 3. + <_> + + <_> + 6 8 4 2 -1. + <_> + 6 9 4 1 2. + <_> + + <_> + 16 2 4 6 -1. + <_> + 16 2 2 6 2. + <_> + + <_> + 0 17 6 3 -1. + <_> + 0 18 6 1 3. + <_> + + <_> + 10 10 3 10 -1. + <_> + 10 15 3 5 2. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 10 4 4 3 -1. + <_> + 10 4 2 3 2. + <_> + + <_> + 8 4 3 8 -1. + <_> + 9 4 1 8 3. + <_> + + <_> + 6 6 9 13 -1. + <_> + 9 6 3 13 3. + <_> + + <_> + 6 0 8 12 -1. + <_> + 6 0 4 6 2. + <_> + 10 6 4 6 2. + <_> + + <_> + 14 2 6 8 -1. + <_> + 16 2 2 8 3. + <_> + + <_> + 6 0 3 6 -1. + <_> + 7 0 1 6 3. + <_> + + <_> + 14 2 6 8 -1. + <_> + 16 2 2 8 3. + <_> + + <_> + 0 5 6 6 -1. + <_> + 0 8 6 3 2. + <_> + + <_> + 9 12 6 2 -1. + <_> + 12 12 3 1 2. + <_> + 9 13 3 1 2. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 11 6 2 2 -1. + <_> + 12 6 1 1 2. + <_> + 11 7 1 1 2. + <_> + + <_> + 1 9 18 2 -1. + <_> + 7 9 6 2 3. + <_> + + <_> + 11 6 2 2 -1. + <_> + 12 6 1 1 2. + <_> + 11 7 1 1 2. + <_> + + <_> + 3 4 12 8 -1. + <_> + 7 4 4 8 3. + <_> + + <_> + 13 11 5 3 -1. + <_> + 13 12 5 1 3. + <_> + + <_> + 9 10 2 3 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 14 7 2 3 -1. + <_> + 14 7 1 3 2. + <_> + + <_> + 5 4 1 3 -1. + <_> + 5 5 1 1 3. + <_> + + <_> + 13 4 2 3 -1. + <_> + 13 5 2 1 3. + <_> + + <_> + 5 4 2 3 -1. + <_> + 5 5 2 1 3. + <_> + + <_> + 9 8 2 3 -1. + <_> + 9 9 2 1 3. + <_> + + <_> + 8 9 2 2 -1. + <_> + 8 10 2 1 2. + <_> + + <_> + 15 14 1 4 -1. + <_> + 15 16 1 2 2. + <_> + + <_> + 3 12 2 2 -1. + <_> + 3 13 2 1 2. + <_> + + <_> + 12 15 2 2 -1. + <_> + 13 15 1 1 2. + <_> + 12 16 1 1 2. + <_> + + <_> + 9 13 2 2 -1. + <_> + 9 14 2 1 2. + <_> + + <_> + 4 11 14 9 -1. + <_> + 4 14 14 3 3. + <_> + + <_> + 7 13 4 3 -1. + <_> + 7 14 4 1 3. + <_> + + <_> + 15 14 1 4 -1. + <_> + 15 16 1 2 2. + <_> + + <_> + 4 14 1 4 -1. + <_> + 4 16 1 2 2. + <_> + + <_> + 14 0 6 13 -1. + <_> + 16 0 2 13 3. + <_> + + <_> + 4 1 2 12 -1. + <_> + 4 1 1 6 2. + <_> + 5 7 1 6 2. + <_> + + <_> + 11 14 6 6 -1. + <_> + 14 14 3 3 2. + <_> + 11 17 3 3 2. + <_> + + <_> + 3 14 6 6 -1. + <_> + 3 14 3 3 2. + <_> + 6 17 3 3 2. + <_> + + <_> + 14 17 3 2 -1. + <_> + 14 18 3 1 2. + <_> + + <_> + 3 17 3 2 -1. + <_> + 3 18 3 1 2. + <_> + + <_> + 14 0 6 13 -1. + <_> + 16 0 2 13 3. + <_> + + <_> + 0 0 6 13 -1. + <_> + 2 0 2 13 3. + <_> + + <_> + 10 10 7 6 -1. + <_> + 10 12 7 2 3. + <_> + + <_> + 6 15 2 2 -1. + <_> + 6 15 1 1 2. + <_> + 7 16 1 1 2. + <_> + + <_> + 6 11 8 6 -1. + <_> + 10 11 4 3 2. + <_> + 6 14 4 3 2. + <_> + + <_> + 7 6 2 2 -1. + <_> + 7 6 1 1 2. + <_> + 8 7 1 1 2. + <_> + + <_> + 2 2 16 6 -1. + <_> + 10 2 8 3 2. + <_> + 2 5 8 3 2. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 11 7 3 10 -1. + <_> + 11 12 3 5 2. + <_> + + <_> + 6 7 3 10 -1. + <_> + 6 12 3 5 2. + <_> + + <_> + 10 7 3 2 -1. + <_> + 11 7 1 2 3. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 10 1 1 3 -1. + <_> + 10 2 1 1 3. + <_> + + <_> + 1 2 4 18 -1. + <_> + 1 2 2 9 2. + <_> + 3 11 2 9 2. + <_> + + <_> + 12 4 4 12 -1. + <_> + 12 10 4 6 2. + <_> + + <_> + 0 0 1 6 -1. + <_> + 0 2 1 2 3. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 8 7 4 3 -1. + <_> + 8 8 4 1 3. + <_> + + <_> + 10 7 3 2 -1. + <_> + 11 7 1 2 3. + <_> + + <_> + 7 7 3 2 -1. + <_> + 8 7 1 2 3. + <_> + + <_> + 9 4 6 1 -1. + <_> + 11 4 2 1 3. + <_> + + <_> + 8 7 2 3 -1. + <_> + 9 7 1 3 2. + <_> + + <_> + 12 7 8 6 -1. + <_> + 16 7 4 3 2. + <_> + 12 10 4 3 2. + <_> + + <_> + 0 7 8 6 -1. + <_> + 0 7 4 3 2. + <_> + 4 10 4 3 2. + <_> + + <_> + 18 2 2 10 -1. + <_> + 19 2 1 5 2. + <_> + 18 7 1 5 2. + <_> + + <_> + 0 2 6 4 -1. + <_> + 3 2 3 4 2. + <_> + + <_> + 9 4 6 1 -1. + <_> + 11 4 2 1 3. + <_> + + <_> + 7 15 2 2 -1. + <_> + 7 15 1 1 2. + <_> + 8 16 1 1 2. + <_> + + <_> + 11 13 1 6 -1. + <_> + 11 16 1 3 2. + <_> + + <_> + 8 13 1 6 -1. + <_> + 8 16 1 3 2. + <_> + + <_> + 14 3 2 1 -1. + <_> + 14 3 1 1 2. + <_> + + <_> + 8 15 2 3 -1. + <_> + 8 16 2 1 3. + <_> + + <_> + 12 15 7 4 -1. + <_> + 12 17 7 2 2. + <_> + + <_> + 4 14 12 3 -1. + <_> + 4 15 12 1 3. + <_> + + <_> + 10 3 3 2 -1. + <_> + 11 3 1 2 3. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 13 2 1 2. + <_> + + <_> + 10 11 4 6 -1. + <_> + 10 14 4 3 2. + <_> + + <_> + 7 13 2 2 -1. + <_> + 7 13 1 1 2. + <_> + 8 14 1 1 2. + <_> + + <_> + 4 11 14 4 -1. + <_> + 11 11 7 2 2. + <_> + 4 13 7 2 2. + <_> + + <_> + 1 18 18 2 -1. + <_> + 7 18 6 2 3. + <_> + + <_> + 11 18 2 2 -1. + <_> + 12 18 1 1 2. + <_> + 11 19 1 1 2. + <_> + + <_> + 7 18 2 2 -1. + <_> + 7 18 1 1 2. + <_> + 8 19 1 1 2. + <_> + + <_> + 12 18 8 2 -1. + <_> + 12 19 8 1 2. + <_> + + <_> + 7 14 6 2 -1. + <_> + 7 15 6 1 2. + <_> + + <_> + 8 12 4 8 -1. + <_> + 10 12 2 4 2. + <_> + 8 16 2 4 2. + <_> + + <_> + 4 9 3 3 -1. + <_> + 4 10 3 1 3. + <_> + + <_> + 7 10 6 2 -1. + <_> + 9 10 2 2 3. + <_> + + <_> + 5 0 4 15 -1. + <_> + 7 0 2 15 2. + <_> + + <_> + 8 6 12 14 -1. + <_> + 12 6 4 14 3. + <_> + + <_> + 5 16 3 3 -1. + <_> + 5 17 3 1 3. + <_> + + <_> + 8 1 12 19 -1. + <_> + 12 1 4 19 3. + <_> + + <_> + 3 0 3 2 -1. + <_> + 3 1 3 1 2. + <_> + + <_> + 10 12 4 5 -1. + <_> + 10 12 2 5 2. + <_> + + <_> + 6 12 4 5 -1. + <_> + 8 12 2 5 2. + <_> + + <_> + 11 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 11 12 1 1 2. + <_> + + <_> + 0 2 3 6 -1. + <_> + 0 4 3 2 3. + <_> + + <_> + 11 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 11 12 1 1 2. + <_> + + <_> + 7 6 4 10 -1. + <_> + 7 11 4 5 2. + <_> + + <_> + 11 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 11 12 1 1 2. + <_> + + <_> + 2 13 5 2 -1. + <_> + 2 14 5 1 2. + <_> + + <_> + 11 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 11 12 1 1 2. + <_> + + <_> + 7 11 2 2 -1. + <_> + 7 11 1 1 2. + <_> + 8 12 1 1 2. + <_> + + <_> + 14 13 3 3 -1. + <_> + 14 14 3 1 3. + <_> + + <_> + 3 13 3 3 -1. + <_> + 3 14 3 1 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 8 8 3 1 3. + <_> + + <_> + 13 5 3 3 -1. + <_> + 13 6 3 1 3. + <_> + + <_> + 0 9 5 3 -1. + <_> + 0 10 5 1 3. + <_> + + <_> + 13 5 3 3 -1. + <_> + 13 6 3 1 3. + <_> + + <_> + 9 12 2 8 -1. + <_> + 9 12 1 4 2. + <_> + 10 16 1 4 2. + <_> + + <_> + 11 7 2 2 -1. + <_> + 12 7 1 1 2. + <_> + 11 8 1 1 2. + <_> + + <_> + 0 16 6 4 -1. + <_> + 3 16 3 4 2. + <_> + + <_> + 10 6 2 3 -1. + <_> + 10 7 2 1 3. + <_> + + <_> + 9 5 2 6 -1. + <_> + 9 7 2 2 3. + <_> + + <_> + 12 15 8 4 -1. + <_> + 12 15 4 4 2. + <_> + + <_> + 0 14 8 6 -1. + <_> + 4 14 4 6 2. + <_> + + <_> + 9 0 3 2 -1. + <_> + 10 0 1 2 3. + <_> + + <_> + 4 15 4 2 -1. + <_> + 6 15 2 2 2. + <_> + + <_> + 12 7 3 13 -1. + <_> + 13 7 1 13 3. + <_> + + <_> + 5 7 3 13 -1. + <_> + 6 7 1 13 3. + <_> + + <_> + 9 6 3 9 -1. + <_> + 9 9 3 3 3. + <_> + + <_> + 4 4 7 12 -1. + <_> + 4 10 7 6 2. + <_> + + <_> + 12 12 2 2 -1. + <_> + 13 12 1 1 2. + <_> + 12 13 1 1 2. + <_> + + <_> + 6 12 2 2 -1. + <_> + 6 12 1 1 2. + <_> + 7 13 1 1 2. + <_> + + <_> + 8 9 4 2 -1. + <_> + 10 9 2 1 2. + <_> + 8 10 2 1 2. + <_> + + <_> + 3 6 2 2 -1. + <_> + 3 6 1 1 2. + <_> + 4 7 1 1 2. + <_> + + <_> + 16 6 3 2 -1. + <_> + 16 7 3 1 2. + <_> + + <_> + 0 7 19 4 -1. + <_> + 0 9 19 2 2. + <_> + + <_> + 10 2 10 1 -1. + <_> + 10 2 5 1 2. + <_> + + <_> + 9 4 2 12 -1. + <_> + 9 10 2 6 2. + <_> + + <_> + 12 18 4 1 -1. + <_> + 12 18 2 1 2. + <_> + + <_> + 1 7 6 4 -1. + <_> + 1 7 3 2 2. + <_> + 4 9 3 2 2. + <_> + + <_> + 12 0 6 13 -1. + <_> + 14 0 2 13 3. + <_> + + <_> + 2 0 6 13 -1. + <_> + 4 0 2 13 3. + <_> + + <_> + 10 5 8 8 -1. + <_> + 10 9 8 4 2. + <_> + + <_> + 8 3 2 5 -1. + <_> + 9 3 1 5 2. + <_> + + <_> + 8 4 9 1 -1. + <_> + 11 4 3 1 3. + <_> + + <_> + 3 4 9 1 -1. + <_> + 6 4 3 1 3. + <_> + + <_> + 1 0 18 10 -1. + <_> + 7 0 6 10 3. + <_> + + <_> + 7 17 5 3 -1. + <_> + 7 18 5 1 3. + <_> + + <_> + 7 11 6 1 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 2 2 3 2 -1. + <_> + 2 3 3 1 2. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 6 10 3 6 -1. + <_> + 6 13 3 3 2. + <_> + + <_> + 11 4 2 4 -1. + <_> + 11 4 1 4 2. + <_> + + <_> + 7 4 2 4 -1. + <_> + 8 4 1 4 2. + <_> + + <_> + 9 6 2 4 -1. + <_> + 9 6 1 4 2. + <_> + + <_> + 6 13 8 3 -1. + <_> + 6 14 8 1 3. + <_> + + <_> + 9 15 3 4 -1. + <_> + 10 15 1 4 3. + <_> + + <_> + 9 2 2 17 -1. + <_> + 10 2 1 17 2. + <_> + + <_> + 7 0 6 1 -1. + <_> + 9 0 2 1 3. + <_> + + <_> + 8 15 3 4 -1. + <_> + 9 15 1 4 3. + <_> + + <_> + 7 13 7 3 -1. + <_> + 7 14 7 1 3. + <_> + + <_> + 8 16 3 3 -1. + <_> + 9 16 1 3 3. + <_> + + <_> + 6 2 8 10 -1. + <_> + 6 7 8 5 2. + <_> + + <_> + 2 5 8 8 -1. + <_> + 2 9 8 4 2. + <_> + + <_> + 14 16 2 2 -1. + <_> + 14 17 2 1 2. + <_> + + <_> + 4 16 2 2 -1. + <_> + 4 17 2 1 2. + <_> + + <_> + 10 11 4 6 -1. + <_> + 10 14 4 3 2. + <_> + + <_> + 6 11 4 6 -1. + <_> + 6 14 4 3 2. + <_> + + <_> + 10 14 1 3 -1. + <_> + 10 15 1 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 10 0 4 6 -1. + <_> + 12 0 2 3 2. + <_> + 10 3 2 3 2. + <_> + + <_> + 0 3 20 2 -1. + <_> + 0 4 20 1 2. + <_> + + <_> + 12 0 8 2 -1. + <_> + 16 0 4 1 2. + <_> + 12 1 4 1 2. + <_> + + <_> + 2 12 10 8 -1. + <_> + 2 16 10 4 2. + <_> + + <_> + 17 7 2 10 -1. + <_> + 18 7 1 5 2. + <_> + 17 12 1 5 2. + <_> + + <_> + 1 7 2 10 -1. + <_> + 1 7 1 5 2. + <_> + 2 12 1 5 2. + <_> + + <_> + 15 10 3 6 -1. + <_> + 15 12 3 2 3. + <_> + + <_> + 4 4 6 2 -1. + <_> + 6 4 2 2 3. + <_> + + <_> + 0 5 20 6 -1. + <_> + 0 7 20 2 3. + <_> + + <_> + 0 0 8 2 -1. + <_> + 0 0 4 1 2. + <_> + 4 1 4 1 2. + <_> + + <_> + 1 0 18 4 -1. + <_> + 7 0 6 4 3. + <_> + + <_> + 1 13 6 2 -1. + <_> + 1 14 6 1 2. + <_> + + <_> + 10 8 3 4 -1. + <_> + 11 8 1 4 3. + <_> + + <_> + 6 1 6 1 -1. + <_> + 8 1 2 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 1 6 18 2 -1. + <_> + 10 6 9 2 2. + <_> + + <_> + 15 11 1 2 -1. + <_> + 15 12 1 1 2. + <_> + + <_> + 6 5 1 2 -1. + <_> + 6 6 1 1 2. + <_> + + <_> + 13 4 1 3 -1. + <_> + 13 5 1 1 3. + <_> + + <_> + 2 15 1 2 -1. + <_> + 2 16 1 1 2. + <_> + + <_> + 12 4 4 3 -1. + <_> + 12 5 4 1 3. + <_> + + <_> + 0 0 7 3 -1. + <_> + 0 1 7 1 3. + <_> + + <_> + 9 12 6 2 -1. + <_> + 9 12 3 2 2. + <_> + + <_> + 5 4 2 3 -1. + <_> + 5 5 2 1 3. + <_> + + <_> + 18 4 2 3 -1. + <_> + 18 5 2 1 3. + <_> + + <_> + 3 0 8 6 -1. + <_> + 3 2 8 2 3. + <_> + + <_> + 0 2 20 6 -1. + <_> + 10 2 10 3 2. + <_> + 0 5 10 3 2. + <_> + + <_> + 4 7 2 4 -1. + <_> + 5 7 1 4 2. + <_> + + <_> + 3 10 15 2 -1. + <_> + 8 10 5 2 3. + <_> + + <_> + 3 0 12 11 -1. + <_> + 9 0 6 11 2. + <_> + + <_> + 13 0 2 6 -1. + <_> + 13 0 1 6 2. + <_> + + <_> + 0 19 2 1 -1. + <_> + 1 19 1 1 2. + <_> + + <_> + 16 10 4 10 -1. + <_> + 18 10 2 5 2. + <_> + 16 15 2 5 2. + <_> + + <_> + 4 8 10 3 -1. + <_> + 4 9 10 1 3. + <_> + + <_> + 14 12 3 3 -1. + <_> + 14 13 3 1 3. + <_> + + <_> + 0 10 4 10 -1. + <_> + 0 10 2 5 2. + <_> + 2 15 2 5 2. + <_> + + <_> + 18 3 2 6 -1. + <_> + 18 5 2 2 3. + <_> + + <_> + 6 6 1 3 -1. + <_> + 6 7 1 1 3. + <_> + + <_> + 7 7 7 2 -1. + <_> + 7 8 7 1 2. + <_> + + <_> + 0 3 2 6 -1. + <_> + 0 5 2 2 3. + <_> + + <_> + 11 1 3 1 -1. + <_> + 12 1 1 1 3. + <_> + + <_> + 5 0 2 6 -1. + <_> + 6 0 1 6 2. + <_> + + <_> + 1 1 18 14 -1. + <_> + 7 1 6 14 3. + <_> + + <_> + 4 6 8 3 -1. + <_> + 8 6 4 3 2. + <_> + + <_> + 9 12 6 2 -1. + <_> + 9 12 3 2 2. + <_> + + <_> + 5 12 6 2 -1. + <_> + 8 12 3 2 2. + <_> + + <_> + 10 7 3 5 -1. + <_> + 11 7 1 5 3. + <_> + + <_> + 7 7 3 5 -1. + <_> + 8 7 1 5 3. + <_> + + <_> + 13 0 3 10 -1. + <_> + 14 0 1 10 3. + <_> + + <_> + 4 11 3 2 -1. + <_> + 4 12 3 1 2. + <_> + + <_> + 17 3 3 6 -1. + <_> + 18 3 1 6 3. + <_> + + <_> + 1 8 18 10 -1. + <_> + 1 13 18 5 2. + <_> + + <_> + 13 0 3 10 -1. + <_> + 14 0 1 10 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 16 3 3 7 -1. + <_> + 17 3 1 7 3. + <_> + + <_> + 4 0 3 10 -1. + <_> + 5 0 1 10 3. + <_> + + <_> + 16 3 3 7 -1. + <_> + 17 3 1 7 3. + <_> + + <_> + 0 9 1 2 -1. + <_> + 0 10 1 1 2. + <_> + + <_> + 18 1 2 10 -1. + <_> + 18 1 1 10 2. + <_> + + <_> + 0 1 2 10 -1. + <_> + 1 1 1 10 2. + <_> + + <_> + 10 16 3 4 -1. + <_> + 11 16 1 4 3. + <_> + + <_> + 2 8 3 3 -1. + <_> + 3 8 1 3 3. + <_> + + <_> + 11 0 2 6 -1. + <_> + 12 0 1 3 2. + <_> + 11 3 1 3 2. + <_> + + <_> + 7 0 2 6 -1. + <_> + 7 0 1 3 2. + <_> + 8 3 1 3 2. + <_> + + <_> + 16 3 3 7 -1. + <_> + 17 3 1 7 3. + <_> + + <_> + 1 3 3 7 -1. + <_> + 2 3 1 7 3. + <_> + + <_> + 14 1 6 16 -1. + <_> + 16 1 2 16 3. + <_> + + <_> + 0 1 6 16 -1. + <_> + 2 1 2 16 3. + <_> + + <_> + 2 0 16 8 -1. + <_> + 10 0 8 4 2. + <_> + 2 4 8 4 2. + <_> + + <_> + 6 8 5 3 -1. + <_> + 6 9 5 1 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 9 6 2 4 -1. + <_> + 9 6 1 4 2. + <_> + + <_> + 0 7 15 1 -1. + <_> + 5 7 5 1 3. + <_> + + <_> + 8 2 7 9 -1. + <_> + 8 5 7 3 3. + <_> + + <_> + 1 7 16 4 -1. + <_> + 1 7 8 2 2. + <_> + 9 9 8 2 2. + <_> + + <_> + 6 12 8 2 -1. + <_> + 6 13 8 1 2. + <_> + + <_> + 8 11 3 3 -1. + <_> + 8 12 3 1 3. + <_> + + <_> + 4 5 14 10 -1. + <_> + 11 5 7 5 2. + <_> + 4 10 7 5 2. + <_> + + <_> + 4 12 3 2 -1. + <_> + 4 13 3 1 2. + <_> + + <_> + 9 11 6 1 -1. + <_> + 11 11 2 1 3. + <_> + + <_> + 4 9 7 6 -1. + <_> + 4 11 7 2 3. + <_> + + <_> + 7 10 6 3 -1. + <_> + 7 11 6 1 3. + <_> + + <_> + 9 11 2 2 -1. + <_> + 9 12 2 1 2. + <_> + + <_> + 0 5 20 6 -1. + <_> + 0 7 20 2 3. + <_> + + <_> + 6 4 6 1 -1. + <_> + 8 4 2 1 3. + <_> + + <_> + 9 11 6 1 -1. + <_> + 11 11 2 1 3. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 10 16 3 4 -1. + <_> + 11 16 1 4 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 2 12 16 8 -1. + <_> + 2 16 16 4 2. + <_> + + <_> + 0 15 15 2 -1. + <_> + 0 16 15 1 2. + <_> + + <_> + 15 4 5 6 -1. + <_> + 15 6 5 2 3. + <_> + + <_> + 9 5 2 4 -1. + <_> + 10 5 1 4 2. + <_> + + <_> + 8 10 9 6 -1. + <_> + 8 12 9 2 3. + <_> + + <_> + 2 19 15 1 -1. + <_> + 7 19 5 1 3. + <_> + + <_> + 10 16 3 4 -1. + <_> + 11 16 1 4 3. + <_> + + <_> + 0 15 20 4 -1. + <_> + 0 17 20 2 2. + <_> + + <_> + 10 16 3 4 -1. + <_> + 11 16 1 4 3. + <_> + + <_> + 7 16 3 4 -1. + <_> + 8 16 1 4 3. + <_> + + <_> + 9 16 3 3 -1. + <_> + 9 17 3 1 3. + <_> + + <_> + 8 11 4 6 -1. + <_> + 8 14 4 3 2. + <_> + + <_> + 9 6 2 12 -1. + <_> + 9 10 2 4 3. + <_> + + <_> + 8 17 4 3 -1. + <_> + 8 18 4 1 3. + <_> + + <_> + 9 18 8 2 -1. + <_> + 13 18 4 1 2. + <_> + 9 19 4 1 2. + <_> + + <_> + 1 18 8 2 -1. + <_> + 1 19 8 1 2. + <_> + + <_> + 13 5 6 15 -1. + <_> + 15 5 2 15 3. + <_> + + <_> + 9 8 2 2 -1. + <_> + 9 9 2 1 2. + <_> + + <_> + 9 5 2 3 -1. + <_> + 9 5 1 3 2. + <_> + + <_> + 1 5 6 15 -1. + <_> + 3 5 2 15 3. + <_> + + <_> + 4 1 14 8 -1. + <_> + 11 1 7 4 2. + <_> + 4 5 7 4 2. + <_> + + <_> + 2 4 4 16 -1. + <_> + 2 4 2 8 2. + <_> + 4 12 2 8 2. + <_> + + <_> + 12 4 3 12 -1. + <_> + 12 10 3 6 2. + <_> + + <_> + 4 5 10 12 -1. + <_> + 4 5 5 6 2. + <_> + 9 11 5 6 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 5 4 2 3 -1. + <_> + 5 5 2 1 3. + <_> + + <_> + 12 2 4 10 -1. + <_> + 14 2 2 5 2. + <_> + 12 7 2 5 2. + <_> + + <_> + 6 4 7 3 -1. + <_> + 6 5 7 1 3. + <_> + + <_> + 2 0 18 2 -1. + <_> + 11 0 9 1 2. + <_> + 2 1 9 1 2. + <_> + + <_> + 0 0 18 2 -1. + <_> + 0 0 9 1 2. + <_> + 9 1 9 1 2. + <_> + + <_> + 13 13 4 6 -1. + <_> + 15 13 2 3 2. + <_> + 13 16 2 3 2. + <_> + + <_> + 3 13 4 6 -1. + <_> + 3 13 2 3 2. + <_> + 5 16 2 3 2. + <_> + + <_> + 10 12 2 6 -1. + <_> + 10 15 2 3 2. + <_> + + <_> + 5 9 10 10 -1. + <_> + 5 9 5 5 2. + <_> + 10 14 5 5 2. + <_> + + <_> + 11 4 4 2 -1. + <_> + 13 4 2 1 2. + <_> + 11 5 2 1 2. + <_> + + <_> + 7 12 6 8 -1. + <_> + 10 12 3 8 2. + <_> + + <_> + 12 2 4 10 -1. + <_> + 14 2 2 5 2. + <_> + 12 7 2 5 2. + <_> + + <_> + 8 11 2 1 -1. + <_> + 9 11 1 1 2. + <_> + + <_> + 10 5 1 12 -1. + <_> + 10 9 1 4 3. + <_> + + <_> + 0 11 6 9 -1. + <_> + 3 11 3 9 2. + <_> + + <_> + 12 2 4 10 -1. + <_> + 14 2 2 5 2. + <_> + 12 7 2 5 2. + <_> + + <_> + 4 2 4 10 -1. + <_> + 4 2 2 5 2. + <_> + 6 7 2 5 2. + <_> + + <_> + 11 4 4 2 -1. + <_> + 13 4 2 1 2. + <_> + 11 5 2 1 2. + <_> + + <_> + 0 14 6 3 -1. + <_> + 0 15 6 1 3. + <_> + + <_> + 11 4 4 2 -1. + <_> + 13 4 2 1 2. + <_> + 11 5 2 1 2. + <_> + + <_> + 6 1 3 2 -1. + <_> + 7 1 1 2 3. + <_> + + <_> + 11 4 4 2 -1. + <_> + 13 4 2 1 2. + <_> + 11 5 2 1 2. + <_> + + <_> + 5 4 4 2 -1. + <_> + 5 4 2 1 2. + <_> + 7 5 2 1 2. + <_> + + <_> + 13 0 2 12 -1. + <_> + 14 0 1 6 2. + <_> + 13 6 1 6 2. + <_> + + <_> + 6 0 3 10 -1. + <_> + 7 0 1 10 3. + <_> + + <_> + 3 0 17 8 -1. + <_> + 3 4 17 4 2. + <_> + + <_> + 0 4 20 4 -1. + <_> + 0 6 20 2 2. + <_> + + <_> + 0 3 8 2 -1. + <_> + 4 3 4 2 2. + <_> + + <_> + 8 11 4 3 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 5 7 6 4 -1. + <_> + 5 7 3 2 2. + <_> + 8 9 3 2 2. + <_> + + <_> + 8 3 4 9 -1. + <_> + 8 6 4 3 3. + <_> + + <_> + 8 15 1 4 -1. + <_> + 8 17 1 2 2. + <_> + + <_> + 4 5 12 7 -1. + <_> + 8 5 4 7 3. + <_> + + <_> + 4 2 4 10 -1. + <_> + 4 2 2 5 2. + <_> + 6 7 2 5 2. + <_> + + <_> + 3 0 17 2 -1. + <_> + 3 1 17 1 2. + <_> + + <_> + 2 2 16 15 -1. + <_> + 2 7 16 5 3. + <_> + + <_> + 15 2 5 2 -1. + <_> + 15 3 5 1 2. + <_> + + <_> + 9 3 2 2 -1. + <_> + 10 3 1 2 2. + <_> + + <_> + 4 5 16 15 -1. + <_> + 4 10 16 5 3. + <_> + + <_> + 7 13 5 6 -1. + <_> + 7 16 5 3 2. + <_> + + <_> + 10 7 3 2 -1. + <_> + 11 7 1 2 3. + <_> + + <_> + 8 3 3 1 -1. + <_> + 9 3 1 1 3. + <_> + + <_> + 9 16 3 3 -1. + <_> + 9 17 3 1 3. + <_> + + <_> + 0 2 5 2 -1. + <_> + 0 3 5 1 2. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 1 7 12 1 -1. + <_> + 5 7 4 1 3. + <_> + + <_> + 7 5 6 14 -1. + <_> + 7 12 6 7 2. + <_> + + <_> + 0 0 8 10 -1. + <_> + 0 0 4 5 2. + <_> + 4 5 4 5 2. + <_> + + <_> + 9 1 3 2 -1. + <_> + 10 1 1 2 3. + <_> + + <_> + 8 1 3 2 -1. + <_> + 9 1 1 2 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 7 4 6 16 -1. + <_> + 7 12 6 8 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 2 3 2 6 -1. + <_> + 2 5 2 2 3. + <_> + + <_> + 14 2 6 9 -1. + <_> + 14 5 6 3 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 13 11 3 6 -1. + <_> + 13 13 3 2 3. + <_> + + <_> + 3 14 2 6 -1. + <_> + 3 17 2 3 2. + <_> + + <_> + 14 3 6 2 -1. + <_> + 14 4 6 1 2. + <_> + + <_> + 0 8 16 2 -1. + <_> + 0 9 16 1 2. + <_> + + <_> + 14 3 6 2 -1. + <_> + 14 4 6 1 2. + <_> + + <_> + 0 0 5 6 -1. + <_> + 0 2 5 2 3. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 4 11 3 6 -1. + <_> + 4 13 3 2 3. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 9 5 1 3 -1. + <_> + 9 6 1 1 3. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 6 6 8 12 -1. + <_> + 6 12 8 6 2. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 5 12 9 2 -1. + <_> + 8 12 3 2 3. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 4 5 4 3 -1. + <_> + 4 6 4 1 3. + <_> + + <_> + 6 6 9 2 -1. + <_> + 9 6 3 2 3. + <_> + + <_> + 4 11 1 3 -1. + <_> + 4 12 1 1 3. + <_> + + <_> + 14 12 6 6 -1. + <_> + 14 12 3 6 2. + <_> + + <_> + 7 0 3 7 -1. + <_> + 8 0 1 7 3. + <_> + + <_> + 9 8 3 3 -1. + <_> + 10 8 1 3 3. + <_> + + <_> + 8 8 3 3 -1. + <_> + 9 8 1 3 3. + <_> + + <_> + 5 10 11 3 -1. + <_> + 5 11 11 1 3. + <_> + + <_> + 5 7 10 1 -1. + <_> + 10 7 5 1 2. + <_> + + <_> + 9 7 3 2 -1. + <_> + 10 7 1 2 3. + <_> + + <_> + 8 7 3 2 -1. + <_> + 9 7 1 2 3. + <_> + + <_> + 11 9 4 2 -1. + <_> + 11 9 2 2 2. + <_> + + <_> + 5 9 4 2 -1. + <_> + 7 9 2 2 2. + <_> + + <_> + 14 10 2 4 -1. + <_> + 14 12 2 2 2. + <_> + + <_> + 7 7 3 2 -1. + <_> + 8 7 1 2 3. + <_> + + <_> + 14 17 6 3 -1. + <_> + 14 18 6 1 3. + <_> + + <_> + 4 5 12 12 -1. + <_> + 4 5 6 6 2. + <_> + 10 11 6 6 2. + <_> + + <_> + 6 9 8 8 -1. + <_> + 10 9 4 4 2. + <_> + 6 13 4 4 2. + <_> + + <_> + 0 4 15 4 -1. + <_> + 5 4 5 4 3. + <_> + + <_> + 13 2 4 1 -1. + <_> + 13 2 2 1 2. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 13 2 1 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 9 13 2 3 -1. + <_> + 9 14 2 1 3. + <_> + + <_> + 13 11 2 3 -1. + <_> + 13 12 2 1 3. + <_> + + <_> + 7 12 4 4 -1. + <_> + 7 12 2 2 2. + <_> + 9 14 2 2 2. + <_> + + <_> + 10 11 2 2 -1. + <_> + 11 11 1 1 2. + <_> + 10 12 1 1 2. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 10 11 2 2 -1. + <_> + 11 11 1 1 2. + <_> + 10 12 1 1 2. + <_> + + <_> + 0 17 6 3 -1. + <_> + 0 18 6 1 3. + <_> + + <_> + 10 11 2 2 -1. + <_> + 11 11 1 1 2. + <_> + 10 12 1 1 2. + <_> + + <_> + 8 11 2 2 -1. + <_> + 8 11 1 1 2. + <_> + 9 12 1 1 2. + <_> + + <_> + 12 5 8 4 -1. + <_> + 12 5 4 4 2. + <_> + + <_> + 0 5 8 4 -1. + <_> + 4 5 4 4 2. + <_> + + <_> + 13 2 4 1 -1. + <_> + 13 2 2 1 2. + <_> + + <_> + 3 2 4 1 -1. + <_> + 5 2 2 1 2. + <_> + + <_> + 10 0 4 2 -1. + <_> + 12 0 2 1 2. + <_> + 10 1 2 1 2. + <_> + + <_> + 7 12 3 1 -1. + <_> + 8 12 1 1 3. + <_> + + <_> + 8 11 4 8 -1. + <_> + 10 11 2 4 2. + <_> + 8 15 2 4 2. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 3 18 15 2 -1. + <_> + 3 19 15 1 2. + <_> + + <_> + 2 6 2 12 -1. + <_> + 2 6 1 6 2. + <_> + 3 12 1 6 2. + <_> + + <_> + 9 8 2 3 -1. + <_> + 9 9 2 1 3. + <_> + + <_> + 7 10 3 2 -1. + <_> + 8 10 1 2 3. + <_> + + <_> + 11 11 3 1 -1. + <_> + 12 11 1 1 3. + <_> + + <_> + 6 11 3 1 -1. + <_> + 7 11 1 1 3. + <_> + + <_> + 9 2 4 2 -1. + <_> + 11 2 2 1 2. + <_> + 9 3 2 1 2. + <_> + + <_> + 4 12 2 3 -1. + <_> + 4 13 2 1 3. + <_> + + <_> + 2 1 18 3 -1. + <_> + 8 1 6 3 3. + <_> + + <_> + 5 1 4 14 -1. + <_> + 7 1 2 14 2. + <_> + + <_> + 8 16 12 3 -1. + <_> + 8 16 6 3 2. + <_> + + <_> + 1 17 18 3 -1. + <_> + 7 17 6 3 3. + <_> + + <_> + 9 14 2 6 -1. + <_> + 9 17 2 3 2. + <_> + + <_> + 9 12 1 8 -1. + <_> + 9 16 1 4 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 9 6 2 12 -1. + <_> + 9 10 2 4 3. + <_> + + <_> + 12 9 3 3 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 0 1 4 8 -1. + <_> + 2 1 2 8 2. + <_> + + <_> + 9 1 6 2 -1. + <_> + 12 1 3 1 2. + <_> + 9 2 3 1 2. + <_> + + <_> + 1 3 12 14 -1. + <_> + 1 10 12 7 2. + <_> + + <_> + 8 12 4 2 -1. + <_> + 10 12 2 1 2. + <_> + 8 13 2 1 2. + <_> + + <_> + 1 9 10 2 -1. + <_> + 1 9 5 1 2. + <_> + 6 10 5 1 2. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 6 8 8 3 -1. + <_> + 6 9 8 1 3. + <_> + + <_> + 9 15 5 3 -1. + <_> + 9 16 5 1 3. + <_> + + <_> + 8 7 4 3 -1. + <_> + 8 8 4 1 3. + <_> + + <_> + 7 7 6 2 -1. + <_> + 7 8 6 1 2. + <_> + + <_> + 5 7 8 2 -1. + <_> + 5 7 4 1 2. + <_> + 9 8 4 1 2. + <_> + + <_> + 12 9 3 3 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 4 7 4 2 -1. + <_> + 4 8 4 1 2. + <_> + + <_> + 14 2 6 9 -1. + <_> + 14 5 6 3 3. + <_> + + <_> + 4 9 3 3 -1. + <_> + 5 9 1 3 3. + <_> + + <_> + 12 9 3 3 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 0 2 6 9 -1. + <_> + 0 5 6 3 3. + <_> + + <_> + 17 3 3 6 -1. + <_> + 18 3 1 6 3. + <_> + + <_> + 0 3 3 6 -1. + <_> + 1 3 1 6 3. + <_> + + <_> + 17 14 1 2 -1. + <_> + 17 15 1 1 2. + <_> + + <_> + 4 9 4 3 -1. + <_> + 6 9 2 3 2. + <_> + + <_> + 12 9 3 3 -1. + <_> + 12 10 3 1 3. + <_> + + <_> + 5 9 3 3 -1. + <_> + 5 10 3 1 3. + <_> + + <_> + 9 5 6 8 -1. + <_> + 12 5 3 4 2. + <_> + 9 9 3 4 2. + <_> + + <_> + 5 5 6 8 -1. + <_> + 5 5 3 4 2. + <_> + 8 9 3 4 2. + <_> + + <_> + 16 1 4 6 -1. + <_> + 16 4 4 3 2. + <_> + + <_> + 1 0 6 20 -1. + <_> + 3 0 2 20 3. + <_> + + <_> + 12 11 3 2 -1. + <_> + 13 11 1 2 3. + <_> + + <_> + 5 11 3 2 -1. + <_> + 6 11 1 2 3. + <_> + + <_> + 9 4 6 1 -1. + <_> + 11 4 2 1 3. + <_> + + <_> + 0 0 8 3 -1. + <_> + 4 0 4 3 2. + <_> + + <_> + 15 0 2 5 -1. + <_> + 15 0 1 5 2. + <_> + + <_> + 4 1 3 2 -1. + <_> + 5 1 1 2 3. + <_> + + <_> + 7 0 6 15 -1. + <_> + 9 0 2 15 3. + <_> + + <_> + 6 11 3 1 -1. + <_> + 7 11 1 1 3. + <_> + + <_> + 12 0 3 4 -1. + <_> + 13 0 1 4 3. + <_> + + <_> + 5 4 6 1 -1. + <_> + 7 4 2 1 3. + <_> + + <_> + 12 7 3 2 -1. + <_> + 12 8 3 1 2. + <_> + + <_> + 0 1 4 6 -1. + <_> + 0 4 4 3 2. + <_> + + <_> + 12 7 3 2 -1. + <_> + 12 8 3 1 2. + <_> + + <_> + 2 16 3 3 -1. + <_> + 2 17 3 1 3. + <_> + + <_> + 13 8 6 10 -1. + <_> + 16 8 3 5 2. + <_> + 13 13 3 5 2. + <_> + + <_> + 0 9 5 2 -1. + <_> + 0 10 5 1 2. + <_> + + <_> + 12 11 2 2 -1. + <_> + 13 11 1 1 2. + <_> + 12 12 1 1 2. + <_> + + <_> + 3 15 3 3 -1. + <_> + 3 16 3 1 3. + <_> + + <_> + 12 7 3 2 -1. + <_> + 12 8 3 1 2. + <_> + + <_> + 5 7 3 2 -1. + <_> + 5 8 3 1 2. + <_> + + <_> + 9 5 9 9 -1. + <_> + 9 8 9 3 3. + <_> + + <_> + 5 0 3 7 -1. + <_> + 6 0 1 7 3. + <_> + + <_> + 5 2 12 5 -1. + <_> + 9 2 4 5 3. + <_> + + <_> + 6 11 2 2 -1. + <_> + 6 11 1 1 2. + <_> + 7 12 1 1 2. + <_> + + <_> + 15 15 3 2 -1. + <_> + 15 16 3 1 2. + <_> + + <_> + 2 15 3 2 -1. + <_> + 2 16 3 1 2. + <_> + + <_> + 14 12 6 8 -1. + <_> + 17 12 3 4 2. + <_> + 14 16 3 4 2. + <_> + + <_> + 2 8 15 6 -1. + <_> + 7 8 5 6 3. + <_> + + <_> + 2 2 18 17 -1. + <_> + 8 2 6 17 3. + <_> + + <_> + 5 1 4 1 -1. + <_> + 7 1 2 1 2. + <_> + + <_> + 5 2 12 5 -1. + <_> + 9 2 4 5 3. + <_> + + <_> + 3 2 12 5 -1. + <_> + 7 2 4 5 3. + <_> + + <_> + 4 9 12 4 -1. + <_> + 10 9 6 2 2. + <_> + 4 11 6 2 2. + <_> + + <_> + 5 15 6 2 -1. + <_> + 5 15 3 1 2. + <_> + 8 16 3 1 2. + <_> + + <_> + 10 14 2 3 -1. + <_> + 10 15 2 1 3. + <_> + + <_> + 0 13 20 2 -1. + <_> + 0 13 10 1 2. + <_> + 10 14 10 1 2. + <_> + + <_> + 4 9 12 8 -1. + <_> + 10 9 6 4 2. + <_> + 4 13 6 4 2. + <_> + + <_> + 8 13 3 6 -1. + <_> + 8 16 3 3 2. + <_> + + <_> + 10 12 2 2 -1. + <_> + 10 13 2 1 2. + <_> + + <_> + 9 12 2 2 -1. + <_> + 9 12 1 1 2. + <_> + 10 13 1 1 2. + <_> + + <_> + 4 11 14 4 -1. + <_> + 11 11 7 2 2. + <_> + 4 13 7 2 2. + <_> + + <_> + 8 5 4 2 -1. + <_> + 8 6 4 1 2. + <_> + + <_> + 10 10 6 3 -1. + <_> + 12 10 2 3 3. + <_> + + <_> + 2 14 1 2 -1. + <_> + 2 15 1 1 2. + <_> + + <_> + 13 8 6 12 -1. + <_> + 16 8 3 6 2. + <_> + 13 14 3 6 2. + <_> + + <_> + 1 8 6 12 -1. + <_> + 1 8 3 6 2. + <_> + 4 14 3 6 2. + <_> + + <_> + 10 0 6 10 -1. + <_> + 12 0 2 10 3. + <_> + + <_> + 5 11 8 4 -1. + <_> + 5 11 4 2 2. + <_> + 9 13 4 2 2. + <_> + + <_> + 10 16 8 4 -1. + <_> + 14 16 4 2 2. + <_> + 10 18 4 2 2. + <_> + + <_> + 7 7 6 6 -1. + <_> + 9 7 2 6 3. + <_> + + <_> + 10 2 4 10 -1. + <_> + 10 2 2 10 2. + <_> + + <_> + 6 1 4 9 -1. + <_> + 8 1 2 9 2. + <_> + + <_> + 12 19 2 1 -1. + <_> + 12 19 1 1 2. + <_> + + <_> + 1 2 4 9 -1. + <_> + 3 2 2 9 2. + <_> + + <_> + 7 5 6 4 -1. + <_> + 9 5 2 4 3. + <_> + + <_> + 9 4 2 4 -1. + <_> + 9 6 2 2 2. + <_> + + <_> + 14 5 2 8 -1. + <_> + 14 9 2 4 2. + <_> + + <_> + 7 6 5 12 -1. + <_> + 7 12 5 6 2. + <_> + + <_> + 14 6 2 6 -1. + <_> + 14 9 2 3 2. + <_> + + <_> + 4 6 2 6 -1. + <_> + 4 9 2 3 2. + <_> + + <_> + 8 15 10 4 -1. + <_> + 13 15 5 2 2. + <_> + 8 17 5 2 2. + <_> + + <_> + 6 18 2 2 -1. + <_> + 7 18 1 2 2. + <_> + + <_> + 11 3 6 2 -1. + <_> + 11 4 6 1 2. + <_> + + <_> + 2 0 16 6 -1. + <_> + 2 2 16 2 3. + <_> + + <_> + 11 3 6 2 -1. + <_> + 11 4 6 1 2. + <_> + + <_> + 4 11 10 3 -1. + <_> + 4 12 10 1 3. + <_> + + <_> + 11 3 6 2 -1. + <_> + 11 4 6 1 2. + <_> + + <_> + 3 3 6 2 -1. + <_> + 3 4 6 1 2. + <_> + + <_> + 16 0 4 7 -1. + <_> + 16 0 2 7 2. + <_> + + <_> + 0 14 9 6 -1. + <_> + 0 16 9 2 3. + <_> + + <_> + 9 16 3 3 -1. + <_> + 9 17 3 1 3. + <_> + + <_> + 4 6 6 2 -1. + <_> + 6 6 2 2 3. + <_> + + <_> + 15 11 1 3 -1. + <_> + 15 12 1 1 3. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 10 9 2 2 -1. + <_> + 10 10 2 1 2. + <_> + + <_> + 3 1 4 3 -1. + <_> + 5 1 2 3 2. + <_> + + <_> + 16 0 4 7 -1. + <_> + 16 0 2 7 2. + <_> + + <_> + 0 0 20 1 -1. + <_> + 10 0 10 1 2. + <_> + + <_> + 15 11 1 3 -1. + <_> + 15 12 1 1 3. + <_> + + <_> + 0 4 3 4 -1. + <_> + 1 4 1 4 3. + <_> + + <_> + 16 3 3 6 -1. + <_> + 16 5 3 2 3. + <_> + + <_> + 1 3 3 6 -1. + <_> + 1 5 3 2 3. + <_> + + <_> + 6 2 12 6 -1. + <_> + 12 2 6 3 2. + <_> + 6 5 6 3 2. + <_> + + <_> + 8 10 4 3 -1. + <_> + 8 11 4 1 3. + <_> + + <_> + 4 2 14 6 -1. + <_> + 11 2 7 3 2. + <_> + 4 5 7 3 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 15 13 2 3 -1. + <_> + 15 14 2 1 3. + <_> + + <_> + 8 12 4 3 -1. + <_> + 8 13 4 1 3. + <_> + + <_> + 15 11 1 3 -1. + <_> + 15 12 1 1 3. + <_> + + <_> + 7 13 5 2 -1. + <_> + 7 14 5 1 2. + <_> + + <_> + 7 12 6 3 -1. + <_> + 7 13 6 1 3. + <_> + + <_> + 5 11 4 4 -1. + <_> + 5 13 4 2 2. + <_> + + <_> + 11 4 3 3 -1. + <_> + 12 4 1 3 3. + <_> + + <_> + 6 4 3 3 -1. + <_> + 7 4 1 3 3. + <_> + + <_> + 16 5 3 6 -1. + <_> + 17 5 1 6 3. + <_> + + <_> + 3 6 12 7 -1. + <_> + 7 6 4 7 3. + <_> + + <_> + 16 5 3 6 -1. + <_> + 17 5 1 6 3. + <_> + + <_> + 3 13 2 3 -1. + <_> + 3 14 2 1 3. + <_> + + <_> + 16 5 3 6 -1. + <_> + 17 5 1 6 3. + <_> + + <_> + 1 5 3 6 -1. + <_> + 2 5 1 6 3. + <_> + + <_> + 1 9 18 1 -1. + <_> + 7 9 6 1 3. + <_> + + <_> + 0 9 8 7 -1. + <_> + 4 9 4 7 2. + <_> + + <_> + 12 11 8 2 -1. + <_> + 12 12 8 1 2. + <_> + + <_> + 0 11 8 2 -1. + <_> + 0 12 8 1 2. + <_> + + <_> + 9 13 2 3 -1. + <_> + 9 14 2 1 3. + <_> + + <_> + 4 10 12 4 -1. + <_> + 4 10 6 2 2. + <_> + 10 12 6 2 2. + <_> + + <_> + 9 3 3 7 -1. + <_> + 10 3 1 7 3. + <_> + + <_> + 7 2 3 5 -1. + <_> + 8 2 1 5 3. + <_> + + <_> + 9 12 4 6 -1. + <_> + 11 12 2 3 2. + <_> + 9 15 2 3 2. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 15 4 4 2 -1. + <_> + 15 5 4 1 2. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 14 2 6 4 -1. + <_> + 14 4 6 2 2. + <_> + + <_> + 7 16 6 1 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 15 13 2 3 -1. + <_> + 15 14 2 1 3. + <_> + + <_> + 8 7 3 10 -1. + <_> + 9 7 1 10 3. + <_> + + <_> + 11 10 2 6 -1. + <_> + 11 12 2 2 3. + <_> + + <_> + 6 10 4 1 -1. + <_> + 8 10 2 1 2. + <_> + + <_> + 10 9 2 2 -1. + <_> + 10 10 2 1 2. + <_> + + <_> + 8 9 2 2 -1. + <_> + 8 10 2 1 2. + <_> + + <_> + 12 7 2 2 -1. + <_> + 13 7 1 1 2. + <_> + 12 8 1 1 2. + <_> + + <_> + 5 7 2 2 -1. + <_> + 5 7 1 1 2. + <_> + 6 8 1 1 2. + <_> + + <_> + 13 0 3 14 -1. + <_> + 14 0 1 14 3. + <_> + + <_> + 4 0 3 14 -1. + <_> + 5 0 1 14 3. + <_> + + <_> + 13 4 3 14 -1. + <_> + 14 4 1 14 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 4 2 3 16 -1. + <_> + 5 2 1 16 3. + <_> + + <_> + 7 2 8 10 -1. + <_> + 7 7 8 5 2. + <_> + + <_> + 6 14 7 3 -1. + <_> + 6 15 7 1 3. + <_> + + <_> + 9 2 10 12 -1. + <_> + 14 2 5 6 2. + <_> + 9 8 5 6 2. + <_> + + <_> + 6 7 8 2 -1. + <_> + 6 8 8 1 2. + <_> + + <_> + 8 13 4 6 -1. + <_> + 8 16 4 3 2. + <_> + + <_> + 6 6 1 3 -1. + <_> + 6 7 1 1 3. + <_> + + <_> + 16 2 4 6 -1. + <_> + 16 4 4 2 3. + <_> + + <_> + 6 6 4 2 -1. + <_> + 6 6 2 1 2. + <_> + 8 7 2 1 2. + <_> + + <_> + 16 2 4 6 -1. + <_> + 16 4 4 2 3. + <_> + + <_> + 0 2 4 6 -1. + <_> + 0 4 4 2 3. + <_> + + <_> + 9 6 2 6 -1. + <_> + 9 6 1 6 2. + <_> + + <_> + 3 4 6 10 -1. + <_> + 3 9 6 5 2. + <_> + + <_> + 9 5 2 6 -1. + <_> + 9 5 1 6 2. + <_> + + <_> + 3 13 2 3 -1. + <_> + 3 14 2 1 3. + <_> + + <_> + 13 13 3 2 -1. + <_> + 13 14 3 1 2. + <_> + + <_> + 2 16 10 4 -1. + <_> + 2 16 5 2 2. + <_> + 7 18 5 2 2. + <_> + + <_> + 5 6 10 6 -1. + <_> + 10 6 5 3 2. + <_> + 5 9 5 3 2. + <_> + + <_> + 7 14 1 3 -1. + <_> + 7 15 1 1 3. + <_> + + <_> + 14 16 6 3 -1. + <_> + 14 17 6 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 7 4 10 3 -1. + <_> + 7 5 10 1 3. + <_> + + <_> + 0 4 5 4 -1. + <_> + 0 6 5 2 2. + <_> + + <_> + 13 11 3 9 -1. + <_> + 13 14 3 3 3. + <_> + + <_> + 4 11 3 9 -1. + <_> + 4 14 3 3 3. + <_> + + <_> + 9 7 2 1 -1. + <_> + 9 7 1 1 2. + <_> + + <_> + 5 0 6 17 -1. + <_> + 7 0 2 17 3. + <_> + + <_> + 10 3 6 3 -1. + <_> + 10 3 3 3 2. + <_> + + <_> + 2 2 15 4 -1. + <_> + 7 2 5 4 3. + <_> + + <_> + 8 2 8 2 -1. + <_> + 12 2 4 1 2. + <_> + 8 3 4 1 2. + <_> + + <_> + 8 1 3 6 -1. + <_> + 8 3 3 2 3. + <_> + + <_> + 9 17 2 2 -1. + <_> + 9 18 2 1 2. + <_> + + <_> + 0 0 2 14 -1. + <_> + 1 0 1 14 2. + <_> + + <_> + 12 0 7 3 -1. + <_> + 12 1 7 1 3. + <_> + + <_> + 1 14 1 2 -1. + <_> + 1 15 1 1 2. + <_> + + <_> + 14 12 2 8 -1. + <_> + 15 12 1 4 2. + <_> + 14 16 1 4 2. + <_> + + <_> + 1 0 7 3 -1. + <_> + 1 1 7 1 3. + <_> + + <_> + 14 12 2 8 -1. + <_> + 15 12 1 4 2. + <_> + 14 16 1 4 2. + <_> + + <_> + 6 0 8 12 -1. + <_> + 6 0 4 6 2. + <_> + 10 6 4 6 2. + <_> + + <_> + 6 1 8 9 -1. + <_> + 6 4 8 3 3. + <_> + + <_> + 5 2 2 2 -1. + <_> + 5 3 2 1 2. + <_> + + <_> + 13 14 6 6 -1. + <_> + 16 14 3 3 2. + <_> + 13 17 3 3 2. + <_> + + <_> + 0 17 20 2 -1. + <_> + 0 17 10 1 2. + <_> + 10 18 10 1 2. + <_> + + <_> + 10 3 2 6 -1. + <_> + 11 3 1 3 2. + <_> + 10 6 1 3 2. + <_> + + <_> + 5 12 6 2 -1. + <_> + 8 12 3 2 2. + <_> + + <_> + 10 7 6 13 -1. + <_> + 10 7 3 13 2. + <_> + + <_> + 5 15 10 5 -1. + <_> + 10 15 5 5 2. + <_> + + <_> + 10 4 4 10 -1. + <_> + 10 4 2 10 2. + <_> + + <_> + 5 7 2 1 -1. + <_> + 6 7 1 1 2. + <_> + + <_> + 10 3 6 7 -1. + <_> + 10 3 3 7 2. + <_> + + <_> + 4 3 6 7 -1. + <_> + 7 3 3 7 2. + <_> + + <_> + 1 7 18 5 -1. + <_> + 7 7 6 5 3. + <_> + + <_> + 3 17 4 3 -1. + <_> + 5 17 2 3 2. + <_> + + <_> + 8 14 12 6 -1. + <_> + 14 14 6 3 2. + <_> + 8 17 6 3 2. + <_> + + <_> + 0 13 20 4 -1. + <_> + 0 13 10 2 2. + <_> + 10 15 10 2 2. + <_> + + <_> + 4 5 14 2 -1. + <_> + 11 5 7 1 2. + <_> + 4 6 7 1 2. + <_> + + <_> + 1 2 10 12 -1. + <_> + 1 2 5 6 2. + <_> + 6 8 5 6 2. + <_> + + <_> + 6 1 14 3 -1. + <_> + 6 2 14 1 3. + <_> + + <_> + 8 16 2 3 -1. + <_> + 8 17 2 1 3. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 5 15 4 2 -1. + <_> + 5 15 2 1 2. + <_> + 7 16 2 1 2. + <_> + + <_> + 10 15 1 3 -1. + <_> + 10 16 1 1 3. + <_> + + <_> + 8 16 4 4 -1. + <_> + 8 16 2 2 2. + <_> + 10 18 2 2 2. + <_> + + <_> + 6 11 8 6 -1. + <_> + 6 14 8 3 2. + <_> + + <_> + 2 13 5 2 -1. + <_> + 2 14 5 1 2. + <_> + + <_> + 13 14 6 6 -1. + <_> + 16 14 3 3 2. + <_> + 13 17 3 3 2. + <_> + + <_> + 1 9 18 4 -1. + <_> + 7 9 6 4 3. + <_> + + <_> + 13 14 6 6 -1. + <_> + 16 14 3 3 2. + <_> + 13 17 3 3 2. + <_> + + <_> + 0 2 1 6 -1. + <_> + 0 4 1 2 3. + <_> + + <_> + 5 0 15 20 -1. + <_> + 5 10 15 10 2. + <_> + + <_> + 1 14 6 6 -1. + <_> + 1 14 3 3 2. + <_> + 4 17 3 3 2. + <_> + + <_> + 8 14 4 6 -1. + <_> + 10 14 2 3 2. + <_> + 8 17 2 3 2. + <_> + + <_> + 7 11 2 1 -1. + <_> + 8 11 1 1 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 12 14 4 6 -1. + <_> + 14 14 2 3 2. + <_> + 12 17 2 3 2. + <_> + + <_> + 4 14 4 6 -1. + <_> + 4 14 2 3 2. + <_> + 6 17 2 3 2. + <_> + + <_> + 13 14 2 6 -1. + <_> + 14 14 1 3 2. + <_> + 13 17 1 3 2. + <_> + + <_> + 5 14 2 6 -1. + <_> + 5 14 1 3 2. + <_> + 6 17 1 3 2. + <_> + + <_> + 7 0 6 12 -1. + <_> + 7 4 6 4 3. + <_> + + <_> + 0 7 12 2 -1. + <_> + 4 7 4 2 3. + <_> + + <_> + 10 3 3 13 -1. + <_> + 11 3 1 13 3. + <_> + + <_> + 7 3 3 13 -1. + <_> + 8 3 1 13 3. + <_> + + <_> + 10 8 6 3 -1. + <_> + 10 9 6 1 3. + <_> + + <_> + 3 11 3 2 -1. + <_> + 4 11 1 2 3. + <_> + + <_> + 13 12 6 8 -1. + <_> + 16 12 3 4 2. + <_> + 13 16 3 4 2. + <_> + + <_> + 7 6 6 5 -1. + <_> + 9 6 2 5 3. + <_> + + <_> + 17 11 2 7 -1. + <_> + 17 11 1 7 2. + <_> + + <_> + 3 13 8 2 -1. + <_> + 7 13 4 2 2. + <_> + + <_> + 6 9 8 3 -1. + <_> + 6 10 8 1 3. + <_> + + <_> + 4 3 4 3 -1. + <_> + 4 4 4 1 3. + <_> + + <_> + 11 3 4 3 -1. + <_> + 11 4 4 1 3. + <_> + + <_> + 1 4 17 12 -1. + <_> + 1 8 17 4 3. + <_> + + <_> + 11 3 4 3 -1. + <_> + 11 4 4 1 3. + <_> + + <_> + 4 8 6 3 -1. + <_> + 4 9 6 1 3. + <_> + + <_> + 12 3 5 3 -1. + <_> + 12 4 5 1 3. + <_> + + <_> + 1 11 2 7 -1. + <_> + 2 11 1 7 2. + <_> + + <_> + 15 12 2 8 -1. + <_> + 16 12 1 4 2. + <_> + 15 16 1 4 2. + <_> + + <_> + 4 8 11 3 -1. + <_> + 4 9 11 1 3. + <_> + + <_> + 9 13 6 2 -1. + <_> + 12 13 3 1 2. + <_> + 9 14 3 1 2. + <_> + + <_> + 6 13 4 3 -1. + <_> + 6 14 4 1 3. + <_> + + <_> + 9 12 3 3 -1. + <_> + 10 12 1 3 3. + <_> + + <_> + 5 3 3 3 -1. + <_> + 5 4 3 1 3. + <_> + + <_> + 9 4 2 3 -1. + <_> + 9 5 2 1 3. + <_> + + <_> + 0 2 16 3 -1. + <_> + 0 3 16 1 3. + <_> + + <_> + 15 12 2 8 -1. + <_> + 16 12 1 4 2. + <_> + 15 16 1 4 2. + <_> + + <_> + 3 12 2 8 -1. + <_> + 3 12 1 4 2. + <_> + 4 16 1 4 2. + <_> + + <_> + 14 13 3 6 -1. + <_> + 14 15 3 2 3. + <_> + + <_> + 3 13 3 6 -1. + <_> + 3 15 3 2 3. + <_> + + <_> + 6 5 10 2 -1. + <_> + 11 5 5 1 2. + <_> + 6 6 5 1 2. + <_> + + <_> + 2 14 14 6 -1. + <_> + 2 17 14 3 2. + <_> + + <_> + 10 14 1 3 -1. + <_> + 10 15 1 1 3. + <_> + + <_> + 4 16 2 2 -1. + <_> + 4 16 1 1 2. + <_> + 5 17 1 1 2. + <_> + + <_> + 10 6 2 3 -1. + <_> + 10 7 2 1 3. + <_> + + <_> + 0 17 20 2 -1. + <_> + 0 17 10 1 2. + <_> + 10 18 10 1 2. + <_> + + <_> + 13 6 1 3 -1. + <_> + 13 7 1 1 3. + <_> + + <_> + 8 13 3 2 -1. + <_> + 9 13 1 2 3. + <_> + + <_> + 12 2 3 3 -1. + <_> + 13 2 1 3 3. + <_> + + <_> + 3 18 2 2 -1. + <_> + 3 18 1 1 2. + <_> + 4 19 1 1 2. + <_> + + <_> + 9 16 3 4 -1. + <_> + 10 16 1 4 3. + <_> + + <_> + 6 6 1 3 -1. + <_> + 6 7 1 1 3. + <_> + + <_> + 13 1 5 2 -1. + <_> + 13 2 5 1 2. + <_> + + <_> + 7 14 6 2 -1. + <_> + 7 14 3 1 2. + <_> + 10 15 3 1 2. + <_> + + <_> + 11 3 3 4 -1. + <_> + 12 3 1 4 3. + <_> + + <_> + 1 13 12 6 -1. + <_> + 5 13 4 6 3. + <_> + + <_> + 14 11 5 2 -1. + <_> + 14 12 5 1 2. + <_> + + <_> + 2 15 14 4 -1. + <_> + 2 15 7 2 2. + <_> + 9 17 7 2 2. + <_> + + <_> + 3 7 14 2 -1. + <_> + 10 7 7 1 2. + <_> + 3 8 7 1 2. + <_> + + <_> + 1 11 4 2 -1. + <_> + 1 12 4 1 2. + <_> + + <_> + 14 0 6 14 -1. + <_> + 16 0 2 14 3. + <_> + + <_> + 4 11 1 3 -1. + <_> + 4 12 1 1 3. + <_> + + <_> + 14 0 6 14 -1. + <_> + 16 0 2 14 3. + <_> + + <_> + 1 10 3 7 -1. + <_> + 2 10 1 7 3. + <_> + + <_> + 8 12 9 2 -1. + <_> + 8 13 9 1 2. + <_> + + <_> + 0 6 20 1 -1. + <_> + 10 6 10 1 2. + <_> + + <_> + 8 4 4 4 -1. + <_> + 8 4 2 4 2. + <_> + + <_> + 0 0 2 2 -1. + <_> + 0 1 2 1 2. + <_> + + <_> + 5 3 10 9 -1. + <_> + 5 6 10 3 3. + <_> + + <_> + 15 2 4 10 -1. + <_> + 15 2 2 10 2. + <_> + + <_> + 8 2 2 7 -1. + <_> + 9 2 1 7 2. + <_> + + <_> + 7 4 12 1 -1. + <_> + 11 4 4 1 3. + <_> + + <_> + 3 4 9 1 -1. + <_> + 6 4 3 1 3. + <_> + + <_> + 15 10 1 4 -1. + <_> + 15 12 1 2 2. + <_> + + <_> + 4 10 6 4 -1. + <_> + 7 10 3 4 2. + <_> + + <_> + 15 9 1 6 -1. + <_> + 15 12 1 3 2. + <_> + + <_> + 7 17 6 3 -1. + <_> + 7 18 6 1 3. + <_> + + <_> + 14 3 2 16 -1. + <_> + 15 3 1 8 2. + <_> + 14 11 1 8 2. + <_> + + <_> + 4 9 1 6 -1. + <_> + 4 12 1 3 2. + <_> + + <_> + 12 1 5 2 -1. + <_> + 12 2 5 1 2. + <_> + + <_> + 6 18 4 2 -1. + <_> + 6 18 2 1 2. + <_> + 8 19 2 1 2. + <_> + + <_> + 2 4 16 10 -1. + <_> + 10 4 8 5 2. + <_> + 2 9 8 5 2. + <_> + + <_> + 6 5 1 10 -1. + <_> + 6 10 1 5 2. + <_> + + <_> + 4 8 15 2 -1. + <_> + 9 8 5 2 3. + <_> + + <_> + 1 8 15 2 -1. + <_> + 6 8 5 2 3. + <_> + + <_> + 9 5 3 6 -1. + <_> + 9 7 3 2 3. + <_> + + <_> + 5 7 8 2 -1. + <_> + 9 7 4 2 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 1 0 16 3 -1. + <_> + 1 1 16 1 3. + <_> + + <_> + 11 2 7 2 -1. + <_> + 11 3 7 1 2. + <_> + + <_> + 5 1 10 18 -1. + <_> + 5 7 10 6 3. + <_> + + <_> + 17 4 3 2 -1. + <_> + 18 4 1 2 3. + <_> + + <_> + 8 13 1 3 -1. + <_> + 8 14 1 1 3. + <_> + + <_> + 3 14 14 6 -1. + <_> + 3 16 14 2 3. + <_> + + <_> + 0 2 3 4 -1. + <_> + 1 2 1 4 3. + <_> + + <_> + 12 1 5 2 -1. + <_> + 12 2 5 1 2. + <_> + + <_> + 3 1 5 2 -1. + <_> + 3 2 5 1 2. + <_> + + <_> + 10 13 2 3 -1. + <_> + 10 14 2 1 3. + <_> + + <_> + 8 13 2 3 -1. + <_> + 8 14 2 1 3. + <_> + + <_> + 14 12 2 3 -1. + <_> + 14 13 2 1 3. + <_> + + <_> + 7 2 2 3 -1. + <_> + 7 3 2 1 3. + <_> + + <_> + 5 6 10 4 -1. + <_> + 10 6 5 2 2. + <_> + 5 8 5 2 2. + <_> + + <_> + 9 13 1 6 -1. + <_> + 9 16 1 3 2. + <_> + + <_> + 10 12 2 2 -1. + <_> + 11 12 1 1 2. + <_> + 10 13 1 1 2. + <_> + + <_> + 4 12 2 3 -1. + <_> + 4 13 2 1 3. + <_> + + <_> + 14 4 6 6 -1. + <_> + 14 6 6 2 3. + <_> + + <_> + 8 17 2 3 -1. + <_> + 8 18 2 1 3. + <_> + + <_> + 16 4 4 6 -1. + <_> + 16 6 4 2 3. + <_> + + <_> + 0 4 4 6 -1. + <_> + 0 6 4 2 3. + <_> + + <_> + 14 6 2 3 -1. + <_> + 14 6 1 3 2. + <_> + + <_> + 4 9 8 1 -1. + <_> + 8 9 4 1 2. + <_> + + <_> + 8 12 4 3 -1. + <_> + 8 13 4 1 3. + <_> + + <_> + 5 12 10 6 -1. + <_> + 5 14 10 2 3. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 8 15 4 2 -1. + <_> + 8 16 4 1 2. + <_> + + <_> + 6 9 8 8 -1. + <_> + 10 9 4 4 2. + <_> + 6 13 4 4 2. + <_> + + <_> + 7 12 4 6 -1. + <_> + 7 12 2 3 2. + <_> + 9 15 2 3 2. + <_> + + <_> + 10 11 3 1 -1. + <_> + 11 11 1 1 3. + <_> + + <_> + 9 7 2 10 -1. + <_> + 9 7 1 5 2. + <_> + 10 12 1 5 2. + <_> + + <_> + 8 0 6 6 -1. + <_> + 10 0 2 6 3. + <_> + + <_> + 3 11 2 6 -1. + <_> + 3 13 2 2 3. + <_> + + <_> + 16 12 1 2 -1. + <_> + 16 13 1 1 2. + <_> + + <_> + 1 14 6 6 -1. + <_> + 1 14 3 3 2. + <_> + 4 17 3 3 2. + <_> + + <_> + 13 1 3 6 -1. + <_> + 14 1 1 6 3. + <_> + + <_> + 8 8 2 2 -1. + <_> + 8 9 2 1 2. + <_> + + <_> + 9 9 3 3 -1. + <_> + 10 9 1 3 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 8 8 3 1 3. + <_> + + <_> + 14 0 2 3 -1. + <_> + 14 0 1 3 2. + <_> + + <_> + 1 0 18 9 -1. + <_> + 7 0 6 9 3. + <_> + + <_> + 11 5 4 15 -1. + <_> + 11 5 2 15 2. + <_> + + <_> + 5 5 4 15 -1. + <_> + 7 5 2 15 2. + <_> + + <_> + 14 0 2 3 -1. + <_> + 14 0 1 3 2. + <_> + + <_> + 4 0 2 3 -1. + <_> + 5 0 1 3 2. + <_> + + <_> + 11 12 2 2 -1. + <_> + 12 12 1 1 2. + <_> + 11 13 1 1 2. + <_> + + <_> + 7 12 2 2 -1. + <_> + 7 12 1 1 2. + <_> + 8 13 1 1 2. + <_> + + <_> + 12 0 3 4 -1. + <_> + 13 0 1 4 3. + <_> + + <_> + 4 11 3 3 -1. + <_> + 4 12 3 1 3. + <_> + + <_> + 12 7 4 2 -1. + <_> + 12 8 4 1 2. + <_> + + <_> + 8 10 3 2 -1. + <_> + 9 10 1 2 3. + <_> + + <_> + 9 9 3 2 -1. + <_> + 10 9 1 2 3. + <_> + + <_> + 8 9 3 2 -1. + <_> + 9 9 1 2 3. + <_> + + <_> + 12 0 3 4 -1. + <_> + 13 0 1 4 3. + <_> + + <_> + 5 0 3 4 -1. + <_> + 6 0 1 4 3. + <_> + + <_> + 4 14 12 4 -1. + <_> + 10 14 6 2 2. + <_> + 4 16 6 2 2. + <_> + + <_> + 8 13 2 3 -1. + <_> + 8 14 2 1 3. + <_> + + <_> + 10 10 3 8 -1. + <_> + 10 14 3 4 2. + <_> + + <_> + 8 10 4 8 -1. + <_> + 8 10 2 4 2. + <_> + 10 14 2 4 2. + <_> + + <_> + 10 8 3 1 -1. + <_> + 11 8 1 1 3. + <_> + + <_> + 9 12 1 6 -1. + <_> + 9 15 1 3 2. + <_> + + <_> + 10 8 3 1 -1. + <_> + 11 8 1 1 3. + <_> + + <_> + 7 8 3 1 -1. + <_> + 8 8 1 1 3. + <_> + + <_> + 5 2 15 14 -1. + <_> + 5 9 15 7 2. + <_> + + <_> + 2 1 2 10 -1. + <_> + 2 1 1 5 2. + <_> + 3 6 1 5 2. + <_> + + <_> + 14 14 2 3 -1. + <_> + 14 15 2 1 3. + <_> + + <_> + 2 7 3 3 -1. + <_> + 3 7 1 3 3. + <_> + + <_> + 17 4 3 3 -1. + <_> + 17 5 3 1 3. + <_> + + <_> + 0 4 3 3 -1. + <_> + 0 5 3 1 3. + <_> + + <_> + 13 5 6 2 -1. + <_> + 16 5 3 1 2. + <_> + 13 6 3 1 2. + <_> + + <_> + 4 19 12 1 -1. + <_> + 8 19 4 1 3. + <_> + + <_> + 12 12 2 4 -1. + <_> + 12 14 2 2 2. + <_> + + <_> + 3 15 1 3 -1. + <_> + 3 16 1 1 3. + <_> + + <_> + 11 16 6 4 -1. + <_> + 11 16 3 4 2. + <_> + + <_> + 2 10 3 10 -1. + <_> + 3 10 1 10 3. + <_> + + <_> + 12 8 2 4 -1. + <_> + 12 8 1 4 2. + <_> + + <_> + 6 8 2 4 -1. + <_> + 7 8 1 4 2. + <_> + + <_> + 10 14 2 3 -1. + <_> + 10 14 1 3 2. + <_> + + <_> + 5 1 10 3 -1. + <_> + 10 1 5 3 2. + <_> + + <_> + 10 7 3 2 -1. + <_> + 11 7 1 2 3. + <_> + + <_> + 5 6 9 2 -1. + <_> + 8 6 3 2 3. + <_> + + <_> + 9 8 2 2 -1. + <_> + 9 9 2 1 2. + <_> + + <_> + 2 11 16 6 -1. + <_> + 2 11 8 3 2. + <_> + 10 14 8 3 2. + <_> + + <_> + 12 7 2 2 -1. + <_> + 13 7 1 1 2. + <_> + 12 8 1 1 2. + <_> + + <_> + 9 5 2 3 -1. + <_> + 9 6 2 1 3. + <_> + + <_> + 9 7 3 2 -1. + <_> + 10 7 1 2 3. + <_> + + <_> + 5 1 8 12 -1. + <_> + 5 7 8 6 2. + <_> + + <_> + 13 5 2 2 -1. + <_> + 13 6 2 1 2. + <_> + + <_> + 5 5 2 2 -1. + <_> + 5 6 2 1 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 4 14 2 3 -1. + <_> + 4 15 2 1 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 9 14 2 6 -1. + <_> + 10 14 1 3 2. + <_> + 9 17 1 3 2. + <_> + + <_> + 8 14 3 2 -1. + <_> + 9 14 1 2 3. + <_> + + <_> + 9 5 6 6 -1. + <_> + 11 5 2 6 3. + <_> + + <_> + 5 5 6 6 -1. + <_> + 7 5 2 6 3. + <_> + + <_> + 13 13 1 2 -1. + <_> + 13 14 1 1 2. + <_> + + <_> + 0 2 10 2 -1. + <_> + 0 3 10 1 2. + <_> + + <_> + 13 13 1 2 -1. + <_> + 13 14 1 1 2. + <_> + + <_> + 5 7 2 2 -1. + <_> + 5 7 1 1 2. + <_> + 6 8 1 1 2. + <_> + + <_> + 13 5 2 7 -1. + <_> + 13 5 1 7 2. + <_> + + <_> + 6 13 1 2 -1. + <_> + 6 14 1 1 2. + <_> + + <_> + 11 0 3 7 -1. + <_> + 12 0 1 7 3. + <_> + + <_> + 0 3 2 16 -1. + <_> + 0 3 1 8 2. + <_> + 1 11 1 8 2. + <_> + + <_> + 11 0 3 7 -1. + <_> + 12 0 1 7 3. + <_> + + <_> + 6 0 3 7 -1. + <_> + 7 0 1 7 3. + <_> + + <_> + 11 16 8 4 -1. + <_> + 11 16 4 4 2. + <_> + + <_> + 1 16 8 4 -1. + <_> + 5 16 4 4 2. + <_> + + <_> + 13 5 2 7 -1. + <_> + 13 5 1 7 2. + <_> + + <_> + 5 5 2 7 -1. + <_> + 6 5 1 7 2. + <_> + + <_> + 18 6 2 14 -1. + <_> + 18 13 2 7 2. + <_> + + <_> + 6 10 3 4 -1. + <_> + 6 12 3 2 2. + <_> + + <_> + 14 7 1 2 -1. + <_> + 14 8 1 1 2. + <_> + + <_> + 0 1 18 6 -1. + <_> + 0 1 9 3 2. + <_> + 9 4 9 3 2. + <_> + + <_> + 14 7 1 2 -1. + <_> + 14 8 1 1 2. + <_> + + <_> + 0 6 2 14 -1. + <_> + 0 13 2 7 2. + <_> + + <_> + 17 0 3 12 -1. + <_> + 18 0 1 12 3. + <_> + + <_> + 0 6 18 3 -1. + <_> + 0 7 18 1 3. + <_> + + <_> + 6 0 14 16 -1. + <_> + 6 8 14 8 2. + <_> + + <_> + 0 0 3 12 -1. + <_> + 1 0 1 12 3. + <_> + + <_> + 13 0 3 7 -1. + <_> + 14 0 1 7 3. + <_> + + <_> + 5 7 1 2 -1. + <_> + 5 8 1 1 2. + <_> + + <_> + 14 4 6 6 -1. + <_> + 14 6 6 2 3. + <_> + + <_> + 5 7 7 2 -1. + <_> + 5 8 7 1 2. + <_> + + <_> + 8 6 6 9 -1. + <_> + 8 9 6 3 3. + <_> + + <_> + 5 4 6 1 -1. + <_> + 7 4 2 1 3. + <_> + + <_> + 13 0 6 4 -1. + <_> + 16 0 3 2 2. + <_> + 13 2 3 2 2. + <_> + + <_> + 1 2 18 12 -1. + <_> + 1 6 18 4 3. + <_> + + <_> + 3 2 17 12 -1. + <_> + 3 6 17 4 3. + <_> + + <_> + 5 14 7 3 -1. + <_> + 5 15 7 1 3. + <_> + + <_> + 10 14 1 3 -1. + <_> + 10 15 1 1 3. + <_> + + <_> + 3 14 3 3 -1. + <_> + 3 15 3 1 3. + <_> + + <_> + 14 4 6 6 -1. + <_> + 14 6 6 2 3. + <_> + + <_> + 0 4 6 6 -1. + <_> + 0 6 6 2 3. + <_> + + <_> + 12 5 4 3 -1. + <_> + 12 6 4 1 3. + <_> + + <_> + 4 5 4 3 -1. + <_> + 4 6 4 1 3. + <_> + + <_> + 18 0 2 6 -1. + <_> + 18 2 2 2 3. + <_> + + <_> + 8 1 4 9 -1. + <_> + 10 1 2 9 2. + <_> + + <_> + 6 6 8 2 -1. + <_> + 6 6 4 2 2. + <_> + + <_> + 6 5 4 2 -1. + <_> + 6 5 2 1 2. + <_> + 8 6 2 1 2. + <_> + + <_> + 10 5 2 3 -1. + <_> + 10 6 2 1 3. + <_> + + <_> + 9 5 1 3 -1. + <_> + 9 6 1 1 3. + <_> + + <_> + 9 10 2 2 -1. + <_> + 9 11 2 1 2. + <_> + + <_> + 0 8 4 3 -1. + <_> + 0 9 4 1 3. + <_> + + <_> + 6 0 8 6 -1. + <_> + 6 3 8 3 2. + <_> + + <_> + 1 0 6 4 -1. + <_> + 1 0 3 2 2. + <_> + 4 2 3 2 2. + <_> + + <_> + 13 0 3 7 -1. + <_> + 14 0 1 7 3. + <_> + + <_> + 9 16 2 2 -1. + <_> + 9 17 2 1 2. + <_> + + <_> + 11 4 6 10 -1. + <_> + 11 9 6 5 2. + <_> + + <_> + 0 10 19 2 -1. + <_> + 0 11 19 1 2. + <_> + + <_> + 9 5 8 9 -1. + <_> + 9 8 8 3 3. + <_> + + <_> + 4 0 3 7 -1. + <_> + 5 0 1 7 3. + <_> + + <_> + 8 6 4 12 -1. + <_> + 10 6 2 6 2. + <_> + 8 12 2 6 2. + <_> + + <_> + 0 2 6 4 -1. + <_> + 0 4 6 2 2. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 8 0 3 7 -1. + <_> + 9 0 1 7 3. + <_> + + <_> + 9 5 3 4 -1. + <_> + 10 5 1 4 3. + <_> + + <_> + 8 5 3 4 -1. + <_> + 9 5 1 4 3. + <_> + + <_> + 7 6 6 1 -1. + <_> + 9 6 2 1 3. + <_> + + <_> + 7 14 4 4 -1. + <_> + 7 14 2 2 2. + <_> + 9 16 2 2 2. + <_> + + <_> + 13 14 4 6 -1. + <_> + 15 14 2 3 2. + <_> + 13 17 2 3 2. + <_> + + <_> + 7 8 1 8 -1. + <_> + 7 12 1 4 2. + <_> + + <_> + 16 0 2 8 -1. + <_> + 17 0 1 4 2. + <_> + 16 4 1 4 2. + <_> + + <_> + 2 0 2 8 -1. + <_> + 2 0 1 4 2. + <_> + 3 4 1 4 2. + <_> + + <_> + 6 1 14 3 -1. + <_> + 6 2 14 1 3. + <_> + + <_> + 7 9 3 10 -1. + <_> + 7 14 3 5 2. + <_> + + <_> + 9 14 2 2 -1. + <_> + 9 15 2 1 2. + <_> + + <_> + 7 7 6 8 -1. + <_> + 7 11 6 4 2. + <_> + + <_> + 9 7 3 6 -1. + <_> + 9 10 3 3 2. + <_> + + <_> + 7 13 3 3 -1. + <_> + 7 14 3 1 3. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 0 1 18 2 -1. + <_> + 6 1 6 2 3. + <_> + + <_> + 7 1 6 14 -1. + <_> + 7 8 6 7 2. + <_> + + <_> + 1 9 18 1 -1. + <_> + 7 9 6 1 3. + <_> + + <_> + 9 7 2 2 -1. + <_> + 9 7 1 2 2. + <_> + + <_> + 9 3 2 9 -1. + <_> + 10 3 1 9 2. + <_> + + <_> + 18 14 2 3 -1. + <_> + 18 15 2 1 3. + <_> + + <_> + 7 11 3 1 -1. + <_> + 8 11 1 1 3. + <_> + + <_> + 10 8 3 4 -1. + <_> + 11 8 1 4 3. + <_> + + <_> + 7 14 3 6 -1. + <_> + 8 14 1 6 3. + <_> + + <_> + 10 8 3 4 -1. + <_> + 11 8 1 4 3. + <_> + + <_> + 7 8 3 4 -1. + <_> + 8 8 1 4 3. + <_> + + <_> + 7 9 6 9 -1. + <_> + 7 12 6 3 3. + <_> + + <_> + 0 14 2 3 -1. + <_> + 0 15 2 1 3. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 4 3 8 3 -1. + <_> + 8 3 4 3 2. + <_> + + <_> + 0 4 20 6 -1. + <_> + 0 4 10 6 2. + <_> + + <_> + 9 14 1 3 -1. + <_> + 9 15 1 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 0 15 14 4 -1. + <_> + 0 17 14 2 2. + <_> + + <_> + 1 14 18 6 -1. + <_> + 1 17 18 3 2. + <_> + + <_> + 0 0 10 6 -1. + <_> + 0 0 5 3 2. + <_> + 5 3 5 3 2. + diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt2.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt2.xml new file mode 100644 index 0000000000000000000000000000000000000000..b49cf5df3b0c561c9b3887d5e63a93b813847e18 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt2.xml @@ -0,0 +1,20719 @@ + + + +BOOST + HAAR + 20 + 20 + + 109 + + 0 + 20 + + <_> + 3 + 3.5069230198860168e-01 + + <_> + + 0 1 0 4.3272329494357109e-03 -1 -2 1 1.3076160103082657e-02 + + 3.8381900638341904e-02 8.9652568101882935e-01 + 2.6293140649795532e-01 + <_> + + 0 1 2 5.2434601821005344e-04 -1 -2 3 4.4573000632226467e-03 + + 1.0216630250215530e-01 1.2384019792079926e-01 + 6.9103831052780151e-01 + <_> + + 1 0 4 -9.2708261217921972e-04 -1 -2 5 3.3989109215326607e-04 + + 1.9536970555782318e-01 2.1014410257339478e-01 + 8.2586747407913208e-01 + <_> + 9 + 3.4721779823303223e+00 + + <_> + + 0 1 6 2.3025739938020706e-03 -1 -2 7 4.4174338690936565e-03 + + 1.0183759778738022e-01 8.2190579175949097e-01 + 1.9565549492835999e-01 + <_> + + 0 1 8 2.2203210741281509e-02 -1 -2 9 -1.7283110355492681e-04 + + 2.2054070234298706e-01 7.3263257741928101e-02 + 5.9314841032028198e-01 + <_> + + 0 1 10 4.3567270040512085e-03 -1 -2 11 + -2.6032889727503061e-03 + + 1.8441149592399597e-01 4.0322139859199524e-01 + 8.0665212869644165e-01 + <_> + + 0 1 12 1.7309630056843162e-03 -1 -2 13 + -7.8146401792764664e-03 + + 2.5483280420303345e-01 6.0570698976516724e-01 + 2.7790638804435730e-01 + <_> + + 0 1 14 -8.7343417108058929e-03 -1 -2 15 + 9.4522320432588458e-04 + + 2.8899800777435303e-01 7.6165872812271118e-01 + 3.4956431388854980e-01 + <_> + + 1 0 16 4.9414858222007751e-02 -1 -2 17 + 4.4891750440001488e-03 + + 8.1516528129577637e-01 2.8087830543518066e-01 + 6.0277748107910156e-01 + <_> + + 1 0 18 6.0313619673252106e-02 -1 -2 19 + -1.0762850288301706e-03 + + 7.6075017452239990e-01 4.4440358877182007e-01 + 1.4373120665550232e-01 + <_> + + 1 0 20 -9.5083238556981087e-03 -1 -2 21 + 7.6601309701800346e-03 + + 5.3181701898574829e-01 5.4110521078109741e-01 + 2.1806870400905609e-01 + <_> + + 1 0 22 7.6467678882181644e-03 -1 -2 23 + -8.4662932204082608e-04 + + 1.1589600145816803e-01 2.3406790196895599e-01 + 5.9903818368911743e-01 + <_> + 14 + 5.9844889640808105e+00 + + <_> + + 1 0 24 -4.8506218008697033e-03 -1 -2 25 + -4.6141650527715683e-03 + + 1.8054960668087006e-01 2.1778939664363861e-01 + 8.0182367563247681e-01 + <_> + + 0 1 26 -2.4301309604197741e-03 -1 -2 27 + 4.1787960799410939e-04 + + 1.1413549631834030e-01 1.2030939757823944e-01 + 6.1085307598114014e-01 + <_> + + 0 1 28 1.0010929545387626e-03 -1 -2 29 + 1.0577100329101086e-03 + + 2.0799599587917328e-01 3.3020541071891785e-01 + 7.5110942125320435e-01 + <_> + + 1 0 30 1.2376549420878291e-03 -1 -2 31 + 3.5315038985572755e-04 + + 2.7682220935821533e-01 1.6682930290699005e-01 + 5.8294767141342163e-01 + <_> + + 0 1 32 -1.1953660286962986e-02 -1 -2 33 + 1.4182999730110168e-03 + + 1.5087880194187164e-01 4.3912279605865479e-01 + 7.6465952396392822e-01 + <_> + + 1 0 34 3.4642980899661779e-03 -1 -2 35 + -1.4948950149118900e-02 + + 2.6515561342239380e-01 2.2980530560016632e-01 + 5.4421657323837280e-01 + <_> + + 1 0 36 -1.0506849503144622e-03 -1 -2 37 + -4.0782918222248554e-03 + + 3.6228439211845398e-01 2.6012599468231201e-01 + 7.2336578369140625e-01 + <_> + + 0 1 38 5.4242828628048301e-04 -1 -2 39 + -7.3204059153795242e-03 + + 3.8496789336204529e-01 2.9655128717422485e-01 + 5.4803091287612915e-01 + <_> + + 0 1 40 1.1421289527788758e-03 -1 -2 41 + 1.1783400550484657e-03 + + 4.1047701239585876e-01 7.2390240430831909e-01 + 2.7872839570045471e-01 + <_> + + 0 1 42 4.4077109545469284e-02 -1 -2 43 + 3.7900090683251619e-03 + + 5.6405162811279297e-01 5.9475481510162354e-01 + 3.3120200037956238e-01 + <_> + + 0 1 44 -2.4291418958455324e-03 -1 -2 45 + 9.4262324273586273e-03 + + 6.6032320261001587e-01 4.6806651353836060e-01 + 2.0643380284309387e-01 + <_> + + 0 1 46 8.0630257725715637e-03 -1 -2 47 + 5.2240812219679356e-03 + + 5.2988511323928833e-01 5.2816027402877808e-01 + 1.9095499813556671e-01 + <_> + + 0 1 48 -7.0630568079650402e-03 -1 -2 49 + 5.6897541508078575e-03 + + 1.3806459307670593e-01 5.4906368255615234e-01 + 1.2602810561656952e-01 + <_> + + 0 1 50 1.2472929665818810e-03 -1 -2 51 + 4.9543488770723343e-02 + + 2.3726630210876465e-01 5.2401661872863770e-01 + 1.7692160606384277e-01 + <_> + 19 + 8.5117864608764648e+00 + + <_> + + 1 0 52 -4.9326149746775627e-03 -1 -2 53 + 2.7918140403926373e-05 + + 1.9980649650096893e-01 2.2993800044059753e-01 + 7.3932111263275146e-01 + <_> + + 1 0 54 3.0876200180500746e-03 -1 -2 55 + 7.4669660534709692e-06 + + 1.5338400006294250e-01 2.0368589460849762e-01 + 5.8549159765243530e-01 + <_> + + 0 1 56 1.8739729421213269e-03 -1 -2 57 + 9.3380251200869679e-04 + + 2.0498959720134735e-01 3.2341998815536499e-01 + 7.3230141401290894e-01 + <_> + + 0 1 58 1.9151850137859583e-03 -1 -2 59 + -5.9683797881007195e-03 + + 3.0451491475105286e-01 2.9321339726448059e-01 + 5.6212961673736572e-01 + <_> + + 0 1 60 -7.2115601506084204e-04 -1 -2 61 + -5.9663117863237858e-03 + + 3.6580368876457214e-01 2.7121558785438538e-01 + 7.2263348102569580e-01 + <_> + + 0 1 62 3.0874179676175117e-02 -1 -2 63 + -1.1099710129201412e-02 + + 4.4198378920555115e-01 3.6129769682884216e-01 + 5.2514511346817017e-01 + <_> + + 0 1 64 2.1164179779589176e-03 -1 -2 65 + -9.4317439943552017e-03 + + 3.6286169290542603e-01 1.6010950505733490e-01 + 7.0522767305374146e-01 + <_> + + 0 1 66 -3.5266019403934479e-03 -1 -2 67 + -1.6907559474930167e-03 + + 1.3012880086898804e-01 1.7863239347934723e-01 + 5.5215299129486084e-01 + <_> + + 0 1 68 4.6470930101349950e-04 -1 -2 69 + -1.0215570218861103e-02 + + 3.4873831272125244e-01 2.6739910244941711e-01 + 6.6679191589355469e-01 + <_> + + 1 0 70 1.2634709710255265e-03 -1 -2 71 + -1.1875299736857414e-02 + + 3.4378638863563538e-01 5.9953361749649048e-01 + 3.4977179765701294e-01 + <_> + + 0 1 72 -1.0732339695096016e-02 -1 -2 73 + 7.1836481802165508e-03 + + 2.1504899859428406e-01 6.2714362144470215e-01 + 2.5195419788360596e-01 + <_> + + 0 1 74 -2.8340889140963554e-02 -1 -2 75 + -4.5813230099156499e-04 + + 8.2411892712116241e-02 5.9100568294525146e-01 + 3.7052011489868164e-01 + <_> + + 1 0 76 4.2940340936183929e-03 -1 -2 77 + 1.0751079767942429e-02 + + 1.5947279334068298e-01 5.9804809093475342e-01 + 2.8325080871582031e-01 + <_> + + 1 0 78 2.2465119138360023e-02 -1 -2 79 + -5.7988539338111877e-02 + + 7.8770911693572998e-01 1.5557409822940826e-01 + 5.2396571636199951e-01 + <_> + + 1 0 80 7.2110891342163086e-03 -1 -2 81 + -4.8367571085691452e-02 + + 6.6203659772872925e-01 1.4247199892997742e-01 + 4.4298338890075684e-01 + <_> + + 0 1 82 -1.4418059960007668e-02 -1 -2 83 + -2.3156389594078064e-02 + + 1.5885409712791443e-01 2.3757989704608917e-01 + 5.2171349525451660e-01 + <_> + + 1 0 84 7.6985340565443039e-03 -1 -2 85 + -5.6248619221150875e-03 + + 1.9417250156402588e-01 6.2784057855606079e-01 + 3.7460449337959290e-01 + <_> + + 1 0 86 -7.2936748620122671e-04 -1 -2 87 + 6.1783898854628205e-04 + + 3.8409221172332764e-01 3.1064930558204651e-01 + 5.5378472805023193e-01 + <_> + + 1 0 88 -4.5803939428878948e-05 -1 -2 89 + -1.4719359569426160e-05 + + 3.4444490075111389e-01 2.7295520901679993e-01 + 6.4289510250091553e-01 + <_> + 19 + 8.4680156707763672e+00 + + <_> + + 0 1 90 -1.3469370314851403e-03 -1 -2 91 + -2.4774789344519377e-03 + + 1.6570860147476196e-01 2.2738510370254517e-01 + 6.9893497228622437e-01 + <_> + + 0 1 92 5.2632777951657772e-03 -1 -2 93 + 4.9075339920818806e-03 + + 1.5120740234851837e-01 5.5644702911376953e-01 + 1.6054420173168182e-01 + <_> + + 0 1 94 -2.3254349362105131e-03 -1 -2 95 + -1.4665479538962245e-03 + + 1.8802590668201447e-01 3.1224989891052246e-01 + 7.1653962135314941e-01 + <_> + + 1 0 96 -1.2311690300703049e-01 -1 -2 97 + 2.2108340635895729e-03 + + 3.8595831394195557e-01 2.4552939832210541e-01 + 5.6957101821899414e-01 + <_> + + 0 1 98 2.0661531016230583e-03 -1 -2 99 + 3.6130280932411551e-04 + + 2.7165201306343079e-01 2.2933620214462280e-01 + 7.2086298465728760e-01 + <_> + + 1 0 100 7.9957872629165649e-02 -1 -2 101 + 2.6064720004796982e-03 + + 7.8336209058761597e-01 5.5452322959899902e-01 + 2.5506898760795593e-01 + <_> + + 1 0 102 6.5699010156095028e-03 -1 -2 103 + 1.6259610420092940e-03 + + 1.8193900585174561e-01 3.5298758745193481e-01 + 6.5528190135955811e-01 + <_> + + 0 1 104 3.6204981151968241e-03 -1 -2 105 + -4.4391951523721218e-03 + + 5.4623097181320190e-01 1.3598430156707764e-01 + 5.4158151149749756e-01 + <_> + + 0 1 106 -9.0540945529937744e-03 -1 -2 107 + -4.6067481162026525e-04 + + 1.1151199787855148e-01 5.8467197418212891e-01 + 2.5983488559722900e-01 + <_> + + 0 1 108 -5.6621041148900986e-03 -1 -2 109 + 5.1165837794542313e-03 + + 1.6105690598487854e-01 5.3766787052154541e-01 + 1.7394550144672394e-01 + <_> + + 0 1 110 -2.1362339612096548e-03 -1 -2 111 + -5.4809921421110630e-03 + + 1.9020730257034302e-01 3.2720080018043518e-01 + 6.3648408651351929e-01 + <_> + + 0 1 112 -8.1061907112598419e-03 -1 -2 113 + 6.0048708692193031e-03 + + 6.9148528575897217e-01 4.3273261189460754e-01 + 6.9638431072235107e-01 + <_> + + 0 1 114 -8.7028548121452332e-02 -1 -2 115 + -4.7809639945626259e-03 + + 8.5941338539123535e-01 9.7394466400146484e-02 + 4.5870301127433777e-01 + <_> + + 0 1 116 -2.2166660055518150e-03 -1 -2 117 + 1.3642730191349983e-03 + + 2.5546258687973022e-01 3.3190909028053284e-01 + 5.9641027450561523e-01 + <_> + + 0 1 118 -9.0077864006161690e-03 -1 -2 119 + -1.5494120307266712e-02 + + 2.6665949821472168e-01 1.8481859564781189e-01 + 6.2459707260131836e-01 + <_> + + 1 0 120 -4.2165028862655163e-03 -1 -2 121 + 4.3249759823083878e-02 + + 5.3799271583557129e-01 5.1830291748046875e-01 + 2.1704199910163879e-01 + <_> + + 1 0 122 2.8786511393263936e-04 -1 -2 123 + 1.2373150093480945e-03 + + 2.6133841276168823e-01 2.7865320444107056e-01 + 5.9089881181716919e-01 + <_> + + 1 0 124 1.9528300035744905e-03 -1 -2 125 + -1.4947060262784362e-03 + + 2.6128691434860229e-01 5.9154129028320312e-01 + 3.4557819366455078e-01 + <_> + + 1 0 126 3.5878680646419525e-03 -1 -2 127 + -2.5938691105693579e-03 + + 1.5870520472526550e-01 1.2704110145568848e-01 + 5.9794288873672485e-01 + <_> + 27 + 1.2578499794006348e+01 + + <_> + + 0 1 128 3.5810680128633976e-03 -1 -2 129 + -2.8552350122481585e-03 + + 1.9951049983501434e-01 7.3730701208114624e-01 + 2.9217371344566345e-01 + <_> + + 0 1 130 1.9758539274334908e-03 -1 -2 131 + 3.2583118882030249e-03 + + 1.9564199447631836e-01 5.6920468807220459e-01 + 1.8390649557113647e-01 + <_> + + 0 1 132 2.3711679386906326e-04 -1 -2 133 + 2.5942500215023756e-03 + + 2.1716670691967010e-01 2.7199891209602356e-01 + 7.1502441167831421e-01 + <_> + + 0 1 134 -2.5032449513673782e-02 -1 -2 135 + 6.3087949529290199e-03 + + 1.8251839280128479e-01 5.6998378038406372e-01 + 3.5098528861999512e-01 + <_> + + 1 0 136 -3.2494920305907726e-03 -1 -2 137 + -1.4885730110108852e-02 + + 4.0239268541336060e-01 3.6040958762168884e-01 + 7.2919952869415283e-01 + <_> + + 1 0 138 8.0623216927051544e-03 -1 -2 139 + 2.7405679225921631e-02 + + 6.4914900064468384e-01 5.5189931392669678e-01 + 2.6596811413764954e-01 + <_> + + 1 0 140 3.4368600696325302e-02 -1 -2 141 + -2.7292970567941666e-02 + + 6.7125129699707031e-01 1.6913780570030212e-01 + 4.3262779712677002e-01 + <_> + + 0 1 142 7.4452121043577790e-04 -1 -2 143 + 7.0336280623450875e-04 + + 3.4051001071929932e-01 5.5167931318283081e-01 + 3.3113878965377808e-01 + <_> + + 0 1 144 -1.2275460362434387e-01 -1 -2 145 + 3.2559928949922323e-03 + + 1.6753150522708893e-01 3.6157518625259399e-01 + 6.4207828044891357e-01 + <_> + + 0 1 146 -3.2090399414300919e-02 -1 -2 147 + 3.2957999501377344e-03 + + 2.9210790991783142e-01 5.6130319833755493e-01 + 3.3578601479530334e-01 + <_> + + 0 1 148 -3.2273170072585344e-03 -1 -2 149 + 1.1171669466421008e-03 + + 6.9706428050994873e-01 3.5411500930786133e-01 + 6.1440062522888184e-01 + <_> + + 1 0 150 -1.7279950901865959e-02 -1 -2 151 + 1.1741200461983681e-02 + + 5.5371809005737305e-01 5.3419572114944458e-01 + 2.7571049332618713e-01 + <_> + + 1 0 152 4.6405228786170483e-03 -1 -2 153 + -1.6913030296564102e-02 + + 2.4895210564136505e-01 1.7119289934635162e-01 + 5.5239528417587280e-01 + <_> + + 1 0 154 1.0060169734060764e-02 -1 -2 155 + -6.0715491417795420e-04 + + 8.2734507322311401e-01 3.7793910503387451e-01 + 5.4762518405914307e-01 + <_> + + 1 0 156 -1.0865400545299053e-03 -1 -2 157 + 8.9362077414989471e-03 + + 3.2965409755706787e-01 6.0628837347030640e-01 + 2.4342200160026550e-01 + <_> + + 1 0 158 -2.6372660067863762e-04 -1 -2 159 + 1.3110050000250340e-02 + + 3.8140949606895447e-01 5.5176162719726562e-01 + 3.7268930673599243e-01 + <_> + + 0 1 160 -2.9806280508637428e-03 -1 -2 161 + -4.1619571857154369e-03 + + 1.2296640127897263e-01 7.2522747516632080e-01 + 4.9734550714492798e-01 + <_> + + 0 1 162 3.3842328935861588e-02 -1 -2 163 + -1.2564560165628791e-03 + + 5.3483128547668457e-01 5.8519148826599121e-01 + 4.3841668963432312e-01 + <_> + + 0 1 164 -1.9635230302810669e-02 -1 -2 165 + -9.9625496659427881e-04 + + 2.2978340089321136e-01 6.2959378957748413e-01 + 4.1315990686416626e-01 + <_> + + 0 1 166 -2.3127110674977303e-02 -1 -2 167 + 2.3525709286332130e-02 + + 1.6954590380191803e-01 5.1741302013397217e-01 + 5.9519391506910324e-02 + <_> + + 0 1 168 -1.9356520846486092e-02 -1 -2 169 + -4.1787112131714821e-03 + + 1.3572479784488678e-01 2.9966288805007935e-01 + 5.7916951179504395e-01 + <_> + + 1 0 170 3.1488779932260513e-03 -1 -2 171 + 7.3972279205918312e-03 + + 6.5925890207290649e-01 5.3071719408035278e-01 + 3.7951210141181946e-01 + <_> + + 0 1 172 7.1955118983169086e-06 -1 -2 173 + 4.7114409506320953e-02 + + 3.1283149123191833e-01 5.5378931760787964e-01 + 1.0273090004920959e-01 + <_> + + 0 1 174 7.2878710925579071e-03 -1 -2 175 + -6.1887511983513832e-03 + + 4.6608591079711914e-01 7.1588581800460815e-01 + 4.7244489192962646e-01 + <_> + + 1 0 176 2.9757320880889893e-03 -1 -2 177 + -1.8449809867888689e-03 + + 5.9345688670873642e-02 7.0273017883300781e-01 + 4.7187310457229614e-01 + <_> + + 0 1 178 1.0239540279144421e-04 -1 -2 179 + 2.4277009069919586e-03 + + 5.8947342634201050e-01 4.8623558878898621e-01 + 5.2475881576538086e-01 + <_> + + 0 1 180 -6.4751312136650085e-02 -1 -2 181 + 3.9380151429213583e-04 + + 6.9174712896347046e-01 4.6696171164512634e-01 + 2.3824059963226318e-01 + <_> + 31 + 1.4546750068664551e+01 + + <_> + + 0 1 182 1.4397440245375037e-03 -1 -2 183 + -5.4068560712039471e-04 + + 2.7734708786010742e-01 7.4271547794342041e-01 + 2.4797350168228149e-01 + <_> + + 1 0 184 -7.1237959673453588e-06 -1 -2 185 + -2.3661039303988218e-03 + + 2.1995030343532562e-01 5.8899897336959839e-01 + 2.5957161188125610e-01 + <_> + + 0 1 186 1.7343269428238273e-03 -1 -2 187 + 1.5874590026214719e-03 + + 1.8601259589195251e-01 4.1518709063529968e-01 + 7.1034741401672363e-01 + <_> + + 1 0 188 3.7285638973116875e-03 -1 -2 189 + -1.2883819639682770e-01 + + 2.5279670953750610e-01 1.3930009305477142e-01 + 5.2545148134231567e-01 + <_> + + 1 0 190 7.9412180930376053e-03 -1 -2 191 + -1.2661729939281940e-02 + + 2.4877290427684784e-01 2.7107000350952148e-01 + 6.6188377141952515e-01 + <_> + + 0 1 192 3.0146789868013002e-05 -1 -2 193 + -1.6330160200595856e-02 + + 3.8128259778022766e-01 2.3264320194721222e-01 + 5.2630108594894409e-01 + <_> + + 0 1 194 1.4622770322603174e-05 -1 -2 195 + -2.0858660340309143e-02 + + 4.2933320999145508e-01 1.6004039347171783e-01 + 6.7823147773742676e-01 + <_> + + 1 0 196 2.8194559272378683e-03 -1 -2 197 + 3.7899368908256292e-03 + + 6.6792941093444824e-01 4.5877051353454590e-01 + 7.1762388944625854e-01 + <_> + + 1 0 198 3.5344641655683517e-02 -1 -2 199 + -1.1571600334718823e-03 + + 1.8640750646591187e-01 5.5382597446441650e-01 + 3.1504508852958679e-01 + <_> + + 0 1 200 -5.8742752298712730e-03 -1 -2 201 + -1.5201780115603469e-05 + + 2.8287911415100098e-01 5.8702242374420166e-01 + 3.7048238515853882e-01 + <_> + + 1 0 202 -2.2681879636365920e-04 -1 -2 203 + 3.7845689803361893e-03 + + 4.2189309000968933e-01 6.6670012474060059e-01 + 2.4611820280551910e-01 + <_> + + 1 0 204 -8.5295992903411388e-05 -1 -2 205 + -4.4394891709089279e-02 + + 3.5575878620147705e-01 1.6655470430850983e-01 + 5.2348488569259644e-01 + <_> + + 0 1 206 1.0126030538231134e-03 -1 -2 207 + -7.6327780261635780e-03 + + 2.8846129775047302e-01 2.9693400859832764e-01 + 6.0801112651824951e-01 + <_> + + 0 1 208 4.0330411866307259e-03 -1 -2 209 + 1.3676689565181732e-01 + + 4.5363900065422058e-01 5.1772642135620117e-01 + 1.4491820335388184e-01 + <_> + + 0 1 210 -5.0060478970408440e-03 -1 -2 211 + -1.2475839816033840e-02 + + 7.6169097423553467e-01 2.1597060561180115e-01 + 5.4601877927780151e-01 + <_> + + 1 0 212 -9.4012258341535926e-04 -1 -2 213 + -1.2191980145871639e-02 + + 3.9262959361076355e-01 3.4788811206817627e-01 + 5.5426627397537231e-01 + <_> + + 0 1 214 -5.4959481349214911e-04 -1 -2 215 + -2.1802430273965001e-04 + + 6.0642760992050171e-01 5.6974071264266968e-01 + 1.7797139286994934e-01 + <_> + + 0 1 216 6.9115799851715565e-03 -1 -2 217 + -9.7631698008626699e-04 + + 5.3793722391128540e-01 3.3278390765190125e-01 + 5.4615312814712524e-01 + <_> + + 0 1 218 -8.7870173156261444e-03 -1 -2 219 + -1.6761029837653041e-03 + + 2.1161609888076782e-01 6.6358232498168945e-01 + 4.3658590316772461e-01 + <_> + + 1 0 220 -5.5694948881864548e-02 -1 -2 221 + -1.9844379276037216e-02 + + 5.3874248266220093e-01 1.6028049588203430e-01 + 5.3304588794708252e-01 + <_> + + 0 1 222 -7.4751611100509763e-04 -1 -2 223 + 2.3032890632748604e-02 + + 2.9174768924713135e-01 5.6081241369247437e-01 + 1.9979810714721680e-01 + <_> + + 1 0 224 -3.0700280331075191e-03 -1 -2 225 + -1.1636839481070638e-03 + + 3.9383140206336975e-01 5.7574361562728882e-01 + 4.2394569516181946e-01 + <_> + + 1 0 226 2.2464339435100555e-01 -1 -2 227 + 1.4412109740078449e-03 + + 7.6765531301498413e-01 5.3538662195205688e-01 + 2.5147768855094910e-01 + <_> + + 0 1 228 -3.0011249706149101e-02 -1 -2 229 + -5.3078960627317429e-02 + + 2.3649039864540100e-01 2.3858639597892761e-01 + 5.4146647453308105e-01 + <_> + + 1 0 230 2.0800929050892591e-03 -1 -2 231 + -4.0738182142376900e-03 + + 6.5116149187088013e-01 6.0304141044616699e-01 + 3.5877010226249695e-01 + <_> + + 1 0 232 -1.9529370591044426e-02 -1 -2 233 + -5.3309470415115356e-02 + + 5.4235929250717163e-01 2.3609539866447449e-01 + 5.4017579555511475e-01 + <_> + + 0 1 234 -3.4849561750888824e-02 -1 -2 235 + -1.2658450007438660e-01 + + 2.8369858860969543e-01 1.8135160207748413e-01 + 5.4210460186004639e-01 + <_> + + 0 1 236 7.3325118137290701e-06 -1 -2 237 + -1.1843870393931866e-02 + + 3.9803659915924072e-01 2.6163849234580994e-01 + 5.2377301454544067e-01 + <_> + + 0 1 238 -4.8470678739249706e-03 -1 -2 239 + 8.1693977117538452e-03 + + 2.4381080269813538e-01 5.3271460533142090e-01 + 8.1903767585754395e-01 + <_> + + 1 0 240 -6.4716790802776814e-03 -1 -2 241 + -1.5188479665084742e-05 + + 4.6796938776969910e-01 5.5639117956161499e-01 + 4.3675860762596130e-01 + <_> + + 1 0 242 3.0696711037307978e-03 -1 -2 243 + -1.6296720423270017e-04 + + 6.6643488407135010e-01 5.5946111679077148e-01 + 3.0427119135856628e-01 + <_> + 39 + 1.8572250366210938e+01 + + <_> + + 1 0 244 -9.8275858908891678e-03 -1 -2 245 + -4.1693858802318573e-03 + + 2.1160189807415009e-01 6.9246852397918701e-01 + 3.0437770485877991e-01 + <_> + + 0 1 246 3.5341319744475186e-04 -1 -2 247 + 4.8054549843072891e-03 + + 3.1832858920097351e-01 5.4565590620040894e-01 + 2.5222688913345337e-01 + <_> + + 0 1 248 2.1071180526632816e-04 -1 -2 249 + -2.8318869881331921e-03 + + 2.9026180505752563e-01 3.1304559111595154e-01 + 6.8849372863769531e-01 + <_> + + 1 0 250 -7.5633679443853907e-06 -1 -2 251 + -8.2888139877468348e-04 + + 2.9624658823013306e-01 3.0996260046958923e-01 + 5.7525151968002319e-01 + <_> + + 0 1 252 1.6209259629249573e-03 -1 -2 253 + 9.1338958591222763e-03 + + 3.9931958913803101e-01 4.8273721337318420e-01 + 7.5378328561782837e-01 + <_> + + 0 1 254 -4.1212290525436401e-03 -1 -2 255 + -2.5447290390729904e-03 + + 2.6169270277023315e-01 3.1087028980255127e-01 + 5.4912358522415161e-01 + <_> + + 0 1 256 -6.2652782071381807e-04 -1 -2 257 + -3.6596331483451650e-05 + + 3.2396918535232544e-01 6.5174108743667603e-01 + 4.1789120435714722e-01 + <_> + + 1 0 258 1.3882719911634922e-02 -1 -2 259 + 1.0493700392544270e-03 + + 6.7712038755416870e-01 4.1595110297203064e-01 + 5.6528919935226440e-01 + <_> + + 1 0 260 1.8215360119938850e-02 -1 -2 261 + -1.1334580369293690e-02 + + 7.6896011829376221e-01 2.8733238577842712e-01 + 4.9889329075813293e-01 + <_> + + 1 0 262 -4.1097560897469521e-03 -1 -2 263 + 4.2612891411408782e-04 + + 5.4630082845687866e-01 3.6312350630760193e-01 + 5.5125522613525391e-01 + <_> + + 1 0 264 6.0301548801362514e-03 -1 -2 265 + 3.3587709185667336e-04 + + 1.1437670141458511e-01 2.8910788893699646e-01 + 5.4473417997360229e-01 + <_> + + 1 0 266 6.2279507983475924e-04 -1 -2 267 + -2.5837119668722153e-02 + + 3.0234318971633911e-01 2.1670059859752655e-01 + 5.2781528234481812e-01 + <_> + + 1 0 268 2.1774910390377045e-02 -1 -2 269 + 1.7682299949228764e-03 + + 3.2548341155052185e-01 5.2630507946014404e-01 + 7.5263291597366333e-01 + <_> + + 0 1 270 -1.3793810270726681e-02 -1 -2 271 + -5.0852829590439796e-03 + + 7.4103301763534546e-01 6.8366098403930664e-01 + 4.5790711045265198e-01 + <_> + + 1 0 272 6.1795017682015896e-03 -1 -2 273 + 1.0030319914221764e-02 + + 7.4499362707138062e-01 4.8607799410820007e-01 + 2.3614570498466492e-01 + <_> + + 0 1 274 -6.4201927743852139e-03 -1 -2 275 + -5.6961281225085258e-03 + + 1.4673270285129547e-01 2.3478199541568756e-01 + 5.3233772516250610e-01 + <_> + + 0 1 276 -7.1498160250484943e-03 -1 -2 277 + 2.4450740311294794e-03 + + 1.4770570397377014e-01 3.4985339641571045e-01 + 5.8035618066787720e-01 + <_> + + 1 0 278 -3.7503410130739212e-02 -1 -2 279 + 4.7799441381357610e-04 + + 5.2595508098602295e-01 4.3628829717636108e-01 + 6.2089228630065918e-01 + <_> + + 0 1 280 -7.0806080475449562e-03 -1 -2 281 + 3.2818000763654709e-02 + + 2.0394609868526459e-01 5.1983588933944702e-01 + 1.3711960613727570e-01 + <_> + + 1 0 282 6.5188988810405135e-04 -1 -2 283 + 4.6485587954521179e-03 + + 6.3234299421310425e-01 4.7201630473136902e-01 + 6.5670871734619141e-01 + <_> + + 0 1 284 -1.9827929791063070e-03 -1 -2 285 + -1.6011310508474708e-03 + + 6.0530602931976318e-01 5.0905191898345947e-01 + 3.1169331073760986e-01 + <_> + + 0 1 286 -3.0539939180016518e-03 -1 -2 287 + 4.3212040327489376e-04 + + 3.4298041462898254e-01 3.8384029269218445e-01 + 5.7755982875823975e-01 + <_> + + 0 1 288 -2.7452120557427406e-02 -1 -2 289 + 9.3099439982324839e-04 + + 2.1434690058231354e-01 5.9529662132263184e-01 + 3.7601581215858459e-01 + <_> + + 0 1 290 6.7144189961254597e-03 -1 -2 291 + -3.3701690845191479e-03 + + 5.6926268339157104e-01 5.7843041419982910e-01 + 3.9742821455001831e-01 + <_> + + 0 1 292 -1.8903959542512894e-02 -1 -2 293 + -6.5850871615111828e-03 + + 1.8188929557800293e-01 6.8491101264953613e-01 + 4.3515840172767639e-01 + <_> + + 1 0 294 5.8810501359403133e-03 -1 -2 295 + 8.0092082498595119e-04 + + 2.7266609668731689e-01 4.2364311218261719e-01 + 5.8446758985519409e-01 + <_> + + 1 0 296 1.8510579830035567e-03 -1 -2 297 + 6.3273650594055653e-03 + + 3.3713209629058838e-01 5.2702218294143677e-01 + 8.0536508560180664e-01 + <_> + + 0 1 298 -3.3820930402725935e-03 -1 -2 299 + -1.9292969955131412e-03 + + 2.8660181164741516e-01 5.8889460563659668e-01 + 3.8957870006561279e-01 + <_> + + 1 0 300 1.4995220117270947e-02 -1 -2 301 + -2.6330750435590744e-02 + + 2.1778169274330139e-01 1.7753170430660248e-01 + 5.6714701652526855e-01 + <_> + + 1 0 302 -4.1734222322702408e-03 -1 -2 303 + 2.7268350124359131e-02 + + 4.6529620885848999e-01 4.7683110833168030e-01 + 5.6952387094497681e-01 + <_> + + 1 0 304 9.8880263976752758e-04 -1 -2 305 + -1.0528849670663476e-03 + + 3.3974018692970276e-01 6.2500411272048950e-01 + 4.2884120345115662e-01 + <_> + + 0 1 306 5.2288072183728218e-03 -1 -2 307 + 3.0395459383726120e-02 + + 5.3477621078491211e-01 4.1155189275741577e-01 + 5.6607538461685181e-01 + <_> + + 0 1 308 -7.9113930463790894e-02 -1 -2 309 + 1.8231669440865517e-02 + + 7.8813230991363525e-01 3.6043399572372437e-01 + 5.5695050954818726e-01 + <_> + + 0 1 310 5.2288072183728218e-03 -1 -2 311 + 4.3922828626818955e-04 + + 5.4166442155838013e-01 5.5071568489074707e-01 + 3.8822770118713379e-01 + <_> + + 0 1 312 -8.6501962505280972e-04 -1 -2 313 + 1.0326979681849480e-03 + + 3.1858509778976440e-01 5.5783641338348389e-01 + 3.2192459702491760e-01 + <_> + + 0 1 314 -7.2997747920453548e-03 -1 -2 315 + -9.3629042385146022e-04 + + 7.0732331275939941e-01 5.5580157041549683e-01 + 4.6138420701026917e-01 + <_> + + 0 1 316 -6.0483231209218502e-03 -1 -2 317 + 6.7529221996665001e-03 + + 6.8692898750305176e-01 4.8703178763389587e-01 + 2.6503708958625793e-01 + <_> + + 0 1 318 5.3078029304742813e-02 -1 -2 319 + -1.0225810110569000e-03 + + 5.2815151214599609e-01 6.0858821868896484e-01 + 4.3048679828643799e-01 + <_> + + 1 0 320 3.1270649284124374e-02 -1 -2 321 + -6.3522169366478920e-03 + + 5.4458320140838623e-01 5.3283357620239258e-01 + 2.3643240332603455e-01 + <_> + 45 + 2.1578119277954102e+01 + + <_> + + 1 0 322 -6.2215630896389484e-03 -1 -2 323 + 2.1097389981150627e-03 + + 2.6255810260772705e-01 1.5649929642677307e-01 + 6.7928832769393921e-01 + <_> + + 0 1 324 1.0845859535038471e-02 -1 -2 325 + 6.4230401767417789e-04 + + 3.4858089685440063e-01 3.6982551217079163e-01 + 5.9216582775115967e-01 + <_> + + 1 0 326 7.3311722371727228e-04 -1 -2 327 + 1.0134200565516949e-03 + + 3.0070841312408447e-01 3.6249229311943054e-01 + 7.0724260807037354e-01 + <_> + + 0 1 328 1.1093559674918652e-02 -1 -2 329 + -7.9127531498670578e-03 + + 4.4167020916938782e-01 3.0287081003189087e-01 + 5.4173761606216431e-01 + <_> + + 0 1 330 1.2905309908092022e-02 -1 -2 331 + -4.2430912144482136e-03 + + 4.3745040893554688e-01 4.4015899300575256e-01 + 7.5651907920837402e-01 + <_> + + 0 1 332 -2.1304309484548867e-04 -1 -2 333 + -2.2308640182018280e-03 + + 2.3107869923114777e-01 3.5681959986686707e-01 + 5.7499992847442627e-01 + <_> + + 0 1 334 2.6400520000606775e-03 -1 -2 335 + 7.5101032853126526e-02 + + 3.5936889052391052e-01 6.3635677099227905e-01 + 2.3270289599895477e-01 + <_> + + 0 1 336 -7.7012968249619007e-03 -1 -2 337 + 1.5588370151817799e-03 + + 7.0746237039566040e-01 5.7002371549606323e-01 + 3.5904508829116821e-01 + <_> + + 0 1 338 -4.7687938786111772e-04 -1 -2 339 + 8.4234727546572685e-04 + + 2.8054410219192505e-01 4.1254189610481262e-01 + 6.1779958009719849e-01 + <_> + + 1 0 340 -1.2825109995901585e-02 -1 -2 341 + -6.5156567143276334e-04 + + 5.4030781984329224e-01 5.6336438655853271e-01 + 3.3565390110015869e-01 + <_> + + 0 1 342 -1.2006159871816635e-02 -1 -2 343 + 1.3213419588282704e-03 + + 7.1095108985900879e-01 4.9038508534431458e-01 + 2.8245830535888672e-01 + <_> + + 0 1 344 -2.0307440310716629e-02 -1 -2 345 + 4.0180929936468601e-03 + + 1.8913699686527252e-01 5.3779661655426025e-01 + 3.1194949150085449e-01 + <_> + + 1 0 346 4.5315311290323734e-03 -1 -2 347 + -4.4381739571690559e-03 + + 7.2067582607269287e-01 1.8546679615974426e-01 + 4.9817329645156860e-01 + <_> + + 1 0 348 1.5692010056227446e-03 -1 -2 349 + -4.9516442231833935e-03 + + 2.6382741332054138e-01 6.8710672855377197e-01 + 4.7146868705749512e-01 + <_> + + 0 1 350 -2.7429679408669472e-02 -1 -2 351 + 1.4181969454512000e-03 + + 1.5482850372791290e-01 4.3768429756164551e-01 + 6.3273680210113525e-01 + <_> + + 0 1 352 -1.3078940100967884e-02 -1 -2 353 + -3.5092779435217381e-03 + + 3.1668141484260559e-01 6.1997437477111816e-01 + 4.3796870112419128e-01 + <_> + + 1 0 354 1.8920730799436569e-02 -1 -2 355 + 2.1683350205421448e-03 + + 1.4707140624523163e-01 5.8094590902328491e-01 + 3.4319490194320679e-01 + <_> + + 0 1 356 1.6401590546593070e-03 -1 -2 357 + 1.4005920093040913e-04 + + 3.9594578742980957e-01 3.2400250434875488e-01 + 5.6466472148895264e-01 + <_> + + 1 0 358 -3.3137591090053320e-03 -1 -2 359 + -2.9459029901772738e-03 + + 4.2745280265808105e-01 3.3416679501533508e-01 + 6.6279602050781250e-01 + <_> + + 0 1 360 1.3612229668069631e-04 -1 -2 361 + 6.0512032359838486e-04 + + 4.0469279885292053e-01 5.4840582609176636e-01 + 3.5699409246444702e-01 + <_> + + 0 1 362 -1.7513990402221680e-02 -1 -2 363 + -1.8735030665993690e-02 + + 1.8241509795188904e-01 7.9718202352523804e-01 + 5.0685691833496094e-01 + <_> + + 1 0 364 1.2065649963915348e-02 -1 -2 365 + -2.6544178836047649e-03 + + 2.1670070290565491e-01 6.5841788053512573e-01 + 4.6282431483268738e-01 + <_> + + 1 0 366 1.4501289697363973e-03 -1 -2 367 + 1.0954019613564014e-02 + + 2.0902520418167114e-01 5.1123052835464478e-01 + 7.7845758199691772e-01 + <_> + + 0 1 368 1.5771709382534027e-02 -1 -2 369 + -1.4252689667046070e-02 + + 5.1323592662811279e-01 1.7424149811267853e-01 + 5.2671480178833008e-01 + <_> + + 0 1 370 3.0411860279855318e-05 -1 -2 371 + 2.3486299440264702e-02 + + 3.4184479713439941e-01 5.6312650442123413e-01 + 2.0063939690589905e-01 + <_> + + 1 0 372 5.2205449901521206e-03 -1 -2 373 + -2.5812430307269096e-02 + + 6.2496489286422729e-01 3.2032281160354614e-01 + 5.1993298530578613e-01 + <_> + + 0 1 374 -1.9526650430634618e-03 -1 -2 375 + -8.1470049917697906e-03 + + 6.1407059431076050e-01 6.5928959846496582e-01 + 3.7111249566078186e-01 + <_> + + 1 0 376 3.2962448894977570e-03 -1 -2 377 + -1.3961310032755136e-03 + + 2.9521119594573975e-01 3.3208039402961731e-01 + 5.5284148454666138e-01 + <_> + + 0 1 378 -4.1055441834032536e-03 -1 -2 379 + -1.0888779535889626e-02 + + 1.7105500400066376e-01 3.3594349026679993e-01 + 5.6749051809310913e-01 + <_> + + 1 0 380 -7.6768421567976475e-03 -1 -2 381 + -9.7729787230491638e-03 + + 4.7732418775558472e-01 8.0810451507568359e-01 + 4.8458281159400940e-01 + <_> + + 1 0 382 6.0439710505306721e-03 -1 -2 383 + -4.6134641161188483e-04 + + 6.7840021848678589e-01 5.5146390199661255e-01 + 3.6423599720001221e-01 + <_> + + 1 0 384 5.7992361485958099e-02 -1 -2 385 + 5.9384980704635382e-04 + + 1.2544350326061249e-01 4.4248789548873901e-01 + 5.7284617424011230e-01 + <_> + + 0 1 386 -6.2353480607271194e-03 -1 -2 387 + -1.2784929946064949e-02 + + 2.8050419688224792e-01 1.9509120285511017e-01 + 5.6529247760772705e-01 + <_> + + 1 0 388 4.1973669431172311e-04 -1 -2 389 + 8.0646801507100463e-04 + + 6.1664837598800659e-01 4.5265799760818481e-01 + 5.9444868564605713e-01 + <_> + + 1 0 390 -1.6339010326191783e-03 -1 -2 391 + -4.8299999907612801e-03 + + 4.0869420766830444e-01 2.7935269474983215e-01 + 6.4449352025985718e-01 + <_> + + 1 0 392 -6.3992068171501160e-03 -1 -2 393 + 1.0819199681282043e-01 + + 5.6716561317443848e-01 5.3118121623992920e-01 + 2.6143568754196167e-01 + <_> + + 1 0 394 6.5056560561060905e-04 -1 -2 395 + 2.0611250773072243e-02 + + 2.9967740178108215e-01 4.4899430871009827e-01 + 6.8882799148559570e-01 + <_> + + 1 0 396 -2.5129050016403198e-02 -1 -2 397 + 1.7922939732670784e-03 + + 5.1968640089035034e-01 3.4669959545135498e-01 + 5.5335879325866699e-01 + <_> + + 1 0 398 1.5626220265403390e-03 -1 -2 399 + -6.1898730928078294e-04 + + 3.0814400315284729e-01 2.6938709616661072e-01 + 5.5444890260696411e-01 + <_> + + 0 1 400 4.8111421056091785e-03 -1 -2 401 + 2.2484229411929846e-03 + + 5.5878478288650513e-01 4.6721130609512329e-01 + 6.0908252000808716e-01 + <_> + + 0 1 402 -3.0147239565849304e-02 -1 -2 403 + 2.7548679709434509e-01 + + 9.0275919437408447e-01 4.7198349237442017e-01 + 2.1969200670719147e-01 + <_> + + 1 0 404 3.6894630175083876e-03 -1 -2 405 + 7.2957701049745083e-03 + + 6.2730091810226440e-01 4.8392179608345032e-01 + 6.9090622663497925e-01 + <_> + + 0 1 406 -5.6211069226264954e-02 -1 -2 407 + -2.6478560175746679e-03 + + 1.7384879291057587e-01 6.3041448593139648e-01 + 4.4743019342422485e-01 + <_> + + 1 0 408 -1.4534000074490905e-03 -1 -2 409 + 2.8540920466184616e-03 + + 5.3025382757186890e-01 5.3383970260620117e-01 + 3.7968829274177551e-01 + <_> + + 1 0 410 5.8243022067472339e-04 -1 -2 411 + 9.2509482055902481e-04 + + 3.2698369026184082e-01 4.5548120141029358e-01 + 6.3583481311798096e-01 + <_> + 47 + 2.2585290908813477e+01 + + <_> + + 0 1 412 1.9806440919637680e-02 -1 -2 413 + 7.0395611692219973e-04 + + 2.8097251057624817e-01 3.1198260188102722e-01 + 7.0903062820434570e-01 + <_> + + 0 1 414 2.5563780218362808e-03 -1 -2 415 + 1.0824160417541862e-03 + + 2.9819479584693909e-01 3.0205601453781128e-01 + 5.8088111877441406e-01 + <_> + + 1 0 416 -9.2893769033253193e-04 -1 -2 417 + -1.8009729683399200e-02 + + 3.7381029129028320e-01 2.1631260216236115e-01 + 6.6192537546157837e-01 + <_> + + 1 0 418 2.3500190582126379e-03 -1 -2 419 + 8.1822491483762860e-04 + + 2.9104039072990417e-01 5.5786228179931641e-01 + 3.3666279911994934e-01 + <_> + + 0 1 420 6.2095321482047439e-04 -1 -2 421 + 9.6780969761312008e-04 + + 4.0724259614944458e-01 6.8595957756042480e-01 + 3.1054618954658508e-01 + <_> + + 1 0 422 4.8000211245380342e-04 -1 -2 423 + 9.0538640506565571e-05 + + 3.3373329043388367e-01 3.3709588646888733e-01 + 5.4512107372283936e-01 + <_> + + 0 1 424 -4.3914798647165298e-02 -1 -2 425 + -5.6501338258385658e-03 + + 2.6256701350212097e-01 6.0504627227783203e-01 + 3.2324150204658508e-01 + <_> + + 1 0 426 3.8661491125822067e-03 -1 -2 427 + -6.3069426687434316e-05 + + 3.2626131176948547e-01 5.8173078298568726e-01 + 4.1643899679183960e-01 + <_> + + 1 0 428 5.2533738315105438e-02 -1 -2 429 + 1.3818660518154502e-03 + + 7.0953989028930664e-01 5.2928757667541504e-01 + 2.5413888692855835e-01 + <_> + + 1 0 430 -8.9264067355543375e-04 -1 -2 431 + 8.5579507052898407e-02 + + 4.0853410959243774e-01 5.2632361650466919e-01 + 3.0032029747962952e-01 + <_> + + 1 0 432 -1.8343339615967125e-04 -1 -2 433 + -9.7924815490841866e-03 + + 4.0292051434516907e-01 3.5213199257850647e-01 + 6.6640049219131470e-01 + <_> + + 0 1 434 1.4428620226681232e-02 -1 -2 435 + -4.5687001198530197e-02 + + 4.5935660600662231e-01 1.4747560024261475e-01 + 5.1786321401596069e-01 + <_> + + 0 1 436 -2.5763090234249830e-03 -1 -2 437 + -3.8301859050989151e-02 + + 1.8372780084609985e-01 8.0826580524444580e-01 + 5.1666879653930664e-01 + <_> + + 0 1 438 2.8978290501981974e-03 -1 -2 439 + -2.5165060069411993e-03 + + 4.7980138659477234e-01 3.3462959527969360e-01 + 5.4444491863250732e-01 + <_> + + 0 1 440 5.6281982688233256e-04 -1 -2 441 + 3.6684391088783741e-03 + + 3.5890269279479980e-01 5.9831297397613525e-01 + 2.9839640855789185e-01 + <_> + + 1 0 442 2.1319789811968803e-03 -1 -2 443 + 7.6037310063838959e-03 + + 6.1632239818572998e-01 5.2171301841735840e-01 + 2.0541590452194214e-01 + <_> + + 1 0 444 -1.1668079969240353e-04 -1 -2 445 + 3.1659509986639023e-03 + + 3.4466689825057983e-01 5.5974847078323364e-01 + 2.6737868785858154e-01 + <_> + + 0 1 446 -2.2569499909877777e-02 -1 -2 447 + 2.7129601221531630e-04 + + 6.9002681970596313e-01 4.4866389036178589e-01 + 5.5087852478027344e-01 + <_> + + 0 1 448 -1.5434459783136845e-02 -1 -2 449 + -8.4861656650900841e-03 + + 2.0483230054378510e-01 1.2549529969692230e-01 + 5.0603562593460083e-01 + <_> + + 0 1 450 -1.1807470023632050e-01 -1 -2 451 + -1.2300079688429832e-03 + + 6.7633062601089478e-02 5.6607007980346680e-01 + 4.2922011017799377e-01 + <_> + + 0 1 452 -7.0290351286530495e-03 -1 -2 453 + 8.9325206354260445e-03 + + 7.1364039182662964e-01 4.3388760089874268e-01 + 7.0608752965927124e-01 + <_> + + 1 0 454 -4.7735981643199921e-02 -1 -2 455 + -4.4155579060316086e-02 + + 5.2686852216720581e-01 2.5805801153182983e-01 + 5.4069608449935913e-01 + <_> + + 0 1 456 -2.5983480736613274e-02 -1 -2 457 + -4.7885831445455551e-03 + + 1.9050540030002594e-01 2.5518929958343506e-01 + 5.3390771150588989e-01 + <_> + + 0 1 458 6.7423451691865921e-03 -1 -2 459 + 1.1654750443994999e-02 + + 4.6933099627494812e-01 5.2619642019271851e-01 + 3.1454348564147949e-01 + <_> + + 0 1 460 -5.6982729583978653e-03 -1 -2 461 + -7.2983349673449993e-03 + + 1.7568530142307281e-01 7.7747297286987305e-01 + 5.1242929697036743e-01 + <_> + + 0 1 462 7.9091778025031090e-03 -1 -2 463 + -1.5874979726504534e-04 + + 5.2845597267150879e-01 3.8878020644187927e-01 + 5.5011737346649170e-01 + <_> + + 0 1 464 -6.2235877849161625e-03 -1 -2 465 + 1.3308860361576080e-03 + + 2.4898290634155273e-01 4.2621460556983948e-01 + 5.9350621700286865e-01 + <_> + + 1 0 466 5.2055278792977333e-03 -1 -2 467 + 1.4065169729292393e-02 + + 2.5452229380607605e-01 4.8519900441169739e-01 + 7.0214188098907471e-01 + <_> + + 0 1 468 -6.7384149879217148e-03 -1 -2 469 + 3.3406780567020178e-03 + + 7.1432709693908691e-01 5.1757252216339111e-01 + 2.8086438775062561e-01 + <_> + + 1 0 470 -1.1880699545145035e-02 -1 -2 471 + 1.4226379571482539e-03 + + 5.1732218265533447e-01 4.5028659701347351e-01 + 5.7956951856613159e-01 + <_> + + 1 0 472 2.9858129564672709e-03 -1 -2 473 + -2.0481580868363380e-03 + + 1.9151160120964050e-01 6.5024322271347046e-01 + 4.5593151450157166e-01 + <_> + + 0 1 474 1.7122729914262891e-03 -1 -2 475 + -1.6980869695544243e-02 + + 5.3762471675872803e-01 7.0562332868576050e-01 + 4.9146059155464172e-01 + <_> + + 0 1 476 -1.1290470138192177e-03 -1 -2 477 + 2.8620059601962566e-03 + + 2.6787060499191284e-01 4.4108539819717407e-01 + 6.3683199882507324e-01 + <_> + + 0 1 478 -3.8065758999437094e-03 -1 -2 479 + 5.9090270660817623e-03 + + 2.7635639905929565e-01 4.8673018813133240e-01 + 6.7287760972976685e-01 + <_> + + 0 1 480 1.1004370171576738e-03 -1 -2 481 + -2.3396299220621586e-03 + + 4.0705141425132751e-01 2.6049488782882690e-01 + 6.1548602581024170e-01 + <_> + + 0 1 482 -3.6068160552531481e-03 -1 -2 483 + 4.0831189602613449e-02 + + 5.7319998741149902e-01 4.9733769893646240e-01 + 7.3870068788528442e-01 + <_> + + 0 1 484 -7.1082250215113163e-03 -1 -2 485 + -9.3759730225428939e-04 + + 6.9847512245178223e-01 2.6911678910255432e-01 + 4.7417798638343811e-01 + <_> + + 0 1 486 -1.6740820137783885e-03 -1 -2 487 + 8.8287703692913055e-02 + + 3.5510140657424927e-01 5.2446138858795166e-01 + 2.0966500043869019e-01 + <_> + + 0 1 488 8.2009629113599658e-04 -1 -2 489 + -7.6624617213383317e-04 + + 4.1310968995094299e-01 4.6202930808067322e-01 + 6.7754101753234863e-01 + <_> + + 1 0 490 6.5769668435677886e-04 -1 -2 491 + -2.1304790861904621e-03 + + 5.6282752752304077e-01 5.5768597126007080e-01 + 4.5776501297950745e-01 + <_> + + 1 0 492 -3.7317050737328827e-04 -1 -2 493 + -1.1172230355441570e-02 + + 4.9592560529708862e-01 5.6256359815597534e-01 + 2.0471079647541046e-01 + <_> + + 1 0 494 4.3435219675302505e-02 -1 -2 495 + 9.6736161503940821e-04 + + 2.2421480715274811e-01 4.5333439111709595e-01 + 6.1999320983886719e-01 + <_> + + 0 1 496 -3.1452889088541269e-03 -1 -2 497 + 1.5233129961416125e-03 + + 6.6627562046051025e-01 5.0079882144927979e-01 + 2.3849929869174957e-01 + <_> + + 1 0 498 2.0854279864579439e-03 -1 -2 499 + 3.6098200827836990e-02 + + 3.7535008788108826e-01 5.1771712303161621e-01 + 1.6344930231571198e-01 + <_> + + 1 0 500 1.6179570229724050e-03 -1 -2 501 + -6.2132300809025764e-04 + + 2.5873818993568420e-01 6.2995338439941406e-01 + 4.6587899327278137e-01 + <_> + + 1 0 502 7.1878539165481925e-04 -1 -2 503 + -3.9339520037174225e-02 + + 3.3540761470794678e-01 2.1541289985179901e-01 + 5.2357137203216553e-01 + <_> + + 0 1 504 -1.0988829890266061e-03 -1 -2 505 + 2.1191420964896679e-03 + + 6.4688968658447266e-01 2.8930890560150146e-01 + 5.2548158168792725e-01 + <_> + 53 + 2.5609300613403320e+01 + + <_> + + 0 1 506 5.2359891124069691e-03 -1 -2 507 + -2.2169889416545630e-03 + + 3.2997110486030579e-01 7.0415931940078735e-01 + 3.2354658842086792e-01 + <_> + + 1 0 508 -8.2303592935204506e-03 -1 -2 509 + -8.2303592935204506e-03 + + 4.9611708521842957e-01 7.1280431747436523e-01 + 4.9611708521842957e-01 + <_> + + 0 1 510 4.5343261444941163e-04 -1 -2 511 + -4.1777061414904892e-04 + + 3.2084721326828003e-01 6.6139167547225952e-01 + 3.5513329505920410e-01 + <_> + + 0 1 512 2.7823769487440586e-03 -1 -2 513 + -6.0361868236213923e-05 + + 3.7101349234580994e-01 5.7463937997817993e-01 + 3.8948801159858704e-01 + <_> + + 1 0 514 3.5061789676547050e-03 -1 -2 515 + 1.7013119941111654e-04 + + 3.0541029572486877e-01 2.8855779767036438e-01 + 6.4877450466156006e-01 + <_> + + 1 0 516 -2.3378930054605007e-03 -1 -2 517 + -2.1369170863181353e-03 + + 3.1744310259819031e-01 3.8209199905395508e-01 + 5.2328932285308838e-01 + <_> + + 0 1 518 1.0250400518998504e-03 -1 -2 519 + -4.4726220949087292e-05 + + 3.6227950453758240e-01 6.5389591455459595e-01 + 4.0036809444427490e-01 + <_> + + 1 0 520 5.7102291611954570e-04 -1 -2 521 + 5.7743012439459562e-04 + + 3.8931730389595032e-01 5.6145328283309937e-01 + 3.6876440048217773e-01 + <_> + + 1 0 522 7.9692091094329953e-04 -1 -2 523 + 3.5945948911830783e-04 + + 6.4430278539657593e-01 3.3808529376983643e-01 + 5.8246481418609619e-01 + <_> + + 1 0 524 4.3973900028504431e-04 -1 -2 525 + -8.9061429025605321e-04 + + 3.9387670159339905e-01 3.4279710054397583e-01 + 5.5156987905502319e-01 + <_> + + 1 0 526 5.4110242053866386e-03 -1 -2 527 + -8.5764907998964190e-04 + + 3.8035380840301514e-01 6.4395052194595337e-01 + 4.1683459281921387e-01 + <_> + + 0 1 528 -2.2000649943947792e-02 -1 -2 529 + -7.8731682151556015e-03 + + 6.6546010971069336e-01 4.1827228665351868e-01 + 5.6047242879867554e-01 + <_> + + 0 1 530 -2.7444459497928619e-02 -1 -2 531 + 1.9792269449681044e-03 + + 6.5868628025054932e-01 3.2449120283126831e-01 + 4.8828700184822083e-01 + <_> + + 0 1 532 -5.6783691979944706e-03 -1 -2 533 + 1.5057219570735469e-05 + + 2.2290790081024170e-01 4.1072851419448853e-01 + 5.7475912570953369e-01 + <_> + + 0 1 534 -5.4136710241436958e-03 -1 -2 535 + 5.3679239936172962e-03 + + 2.0657970011234283e-01 4.9264231324195862e-01 + 7.1394848823547363e-01 + <_> + + 0 1 536 -3.1426660716533661e-03 -1 -2 537 + 1.0907390154898167e-02 + + 6.7800867557525635e-01 5.2149301767349243e-01 + 1.1439959704875946e-01 + <_> + + 1 0 538 5.8436761610209942e-03 -1 -2 539 + 9.0507230197545141e-05 + + 1.9375260174274445e-01 3.8125771284103394e-01 + 5.5141878128051758e-01 + <_> + + 0 1 540 -1.6345789656043053e-02 -1 -2 541 + 1.5987500082701445e-03 + + 2.4740239977836609e-01 4.8177829384803772e-01 + 5.9230798482894897e-01 + <_> + + 0 1 542 -4.0257978253066540e-03 -1 -2 543 + -6.7750471644103527e-03 + + 7.5082087516784668e-01 2.8798109292984009e-01 + 5.1996952295303345e-01 + <_> + + 0 1 544 -3.2470689620822668e-03 -1 -2 545 + 1.5409620245918632e-03 + + 3.0449101328849792e-01 4.0634828805923462e-01 + 5.6765627861022949e-01 + <_> + + 0 1 546 -1.2858119793236256e-02 -1 -2 547 + -1.4824670506641269e-04 + + 9.6717558801174164e-02 4.5378330349922180e-01 + 6.1153751611709595e-01 + <_> + + 1 0 548 -9.0210810303688049e-03 -1 -2 549 + -2.8795029968023300e-02 + + 4.8077508807182312e-01 3.4037950634956360e-01 + 5.2555292844772339e-01 + <_> + + 1 0 550 9.0210810303688049e-03 -1 -2 551 + 7.4121179059147835e-03 + + 7.5058358907699585e-01 5.4554468393325806e-01 + 3.2260689139366150e-01 + <_> + + 0 1 552 -3.7217529024928808e-03 -1 -2 553 + 1.9865889847278595e-01 + + 2.3118489980697632e-01 5.2710479497909546e-01 + 1.4699299633502960e-01 + <_> + + 0 1 554 1.5208719560177997e-05 -1 -2 555 + -3.9089918136596680e-03 + + 3.6781388521194458e-01 7.1319299936294556e-01 + 4.9938669800758362e-01 + <_> + + 0 1 556 2.5106288958340883e-03 -1 -2 557 + 2.3921660613268614e-04 + + 5.3120541572570801e-01 4.6893781423568726e-01 + 5.7140219211578369e-01 + <_> + + 1 0 558 6.9443131797015667e-03 -1 -2 559 + 1.2065629707649350e-03 + + 6.9487977027893066e-01 4.0045049786567688e-01 + 5.8748817443847656e-01 + <_> + + 0 1 560 2.5106288958340883e-03 -1 -2 561 + 1.7514040227979422e-03 + + 5.3295719623565674e-01 5.5458492040634155e-01 + 3.4495818614959717e-01 + <_> + + 0 1 562 -4.1978210210800171e-03 -1 -2 563 + 1.3092850567772985e-03 + + 1.2171830236911774e-01 5.3750497102737427e-01 + 3.4156250953674316e-01 + <_> + + 0 1 564 6.7396182566881180e-04 -1 -2 565 + -1.0530710220336914e-02 + + 4.1951790452003479e-01 3.4607538580894470e-01 + 5.1558601856231689e-01 + <_> + + 0 1 566 -4.0672299265861511e-01 -1 -2 567 + -2.6314549148082733e-02 + + 5.8065678924322128e-02 1.4734490215778351e-01 + 5.5593782663345337e-01 + <_> + + 1 0 568 2.2557149641215801e-03 -1 -2 569 + 1.2154860422015190e-02 + + 5.4777151346206665e-01 4.2077910900115967e-01 + 5.6218808889389038e-01 + <_> + + 0 1 570 -1.8436539918184280e-02 -1 -2 571 + 5.3676147945225239e-04 + + 6.4471471309661865e-01 2.7651271224021912e-01 + 4.8885959386825562e-01 + <_> + + 1 0 572 -2.6265541091561317e-03 -1 -2 573 + -5.1119807176291943e-04 + + 5.2646911144256592e-01 5.7853102684020996e-01 + 4.2911028861999512e-01 + <_> + + 1 0 574 4.1454841266386211e-04 -1 -2 575 + -5.5028748465701938e-04 + + 3.4554108977317810e-01 6.0269188880920410e-01 + 4.1438931226730347e-01 + <_> + + 0 1 576 -1.0347720235586166e-03 -1 -2 577 + -3.3966631162911654e-03 + + 6.0952937602996826e-01 6.1082822084426880e-01 + 4.7077208757400513e-01 + <_> + + 1 0 578 3.1795909162610769e-03 -1 -2 579 + -1.6528950072824955e-04 + + 3.2443669438362122e-01 3.8307571411132812e-01 + 5.7343262434005737e-01 + <_> + + 1 0 580 8.3725210279226303e-03 -1 -2 581 + -2.5799809955060482e-03 + + 6.6109192371368408e-01 6.1393070220947266e-01 + 4.6861499547958374e-01 + <_> + + 1 0 582 9.0194388758391142e-04 -1 -2 583 + 3.6952210939489305e-04 + + 3.5200220346450806e-01 2.5787541270256042e-01 + 5.4672420024871826e-01 + <_> + + 0 1 584 9.9746137857437134e-04 -1 -2 585 + -3.6688039544969797e-03 + + 4.8201468586921692e-01 5.7101500034332275e-01 + 4.8319110274314880e-01 + <_> + + 0 1 586 -8.9501030743122101e-04 -1 -2 587 + 5.1904921419918537e-03 + + 6.1336791515350342e-01 4.9285829067230225e-01 + 2.5813090801239014e-01 + <_> + + 0 1 588 4.2274440056644380e-04 -1 -2 589 + 8.5176713764667511e-03 + + 4.4711241126060486e-01 5.1610249280929565e-01 + 3.3165338635444641e-01 + <_> + + 0 1 590 -3.6623608320951462e-02 -1 -2 591 + -4.1103712283074856e-03 + + 9.2606216669082642e-02 8.5221147537231445e-01 + 5.1379078626632690e-01 + <_> + + 1 0 592 -6.6017331555485725e-03 -1 -2 593 + 2.5578640401363373e-02 + + 5.4590600728988647e-01 5.2193528413772583e-01 + 1.9271859526634216e-01 + <_> + + 1 0 594 1.1447439901530743e-02 -1 -2 595 + 7.2427501436322927e-04 + + 1.9160020351409912e-01 5.2315711975097656e-01 + 3.5353401303291321e-01 + <_> + + 1 0 596 9.7127500921487808e-03 -1 -2 597 + -1.1337569914758205e-02 + + 6.4641010761260986e-01 7.3830378055572510e-01 + 4.9647438526153564e-01 + <_> + + 0 1 598 -8.1453882157802582e-03 -1 -2 599 + -8.5570756345987320e-03 + + 3.6117058992385864e-01 3.4219071269035339e-01 + 5.9435117244720459e-01 + <_> + + 0 1 600 2.2993308957666159e-03 -1 -2 601 + 3.8430930580943823e-03 + + 4.5501041412353516e-01 4.7168621420860291e-01 + 6.6561907529830933e-01 + <_> + + 1 0 602 -9.9116540513932705e-04 -1 -2 603 + 2.5496469810605049e-02 + + 4.5927169919013977e-01 6.5634012222290039e-01 + 1.2588350474834442e-01 + <_> + + 1 0 604 -1.5748359262943268e-02 -1 -2 605 + -1.8046120181679726e-02 + + 5.2395021915435791e-01 8.0158519744873047e-01 + 5.0079578161239624e-01 + <_> + + 1 0 606 1.0323390364646912e-02 -1 -2 607 + 1.6452240524813533e-03 + + 2.2748200595378876e-01 4.3519461154937744e-01 + 5.8676278591156006e-01 + <_> + + 0 1 608 1.5881149098277092e-02 -1 -2 609 + 1.0586519725620747e-02 + + 4.4650518894195557e-01 4.5444580912590027e-01 + 5.7071107625961304e-01 + <_> + + 0 1 610 -2.1531689912080765e-02 -1 -2 611 + 5.2480469457805157e-03 + + 6.5276437997817993e-01 3.4447279572486877e-01 + 5.3246361017227173e-01 + <_> + 67 + 3.2647129058837891e+01 + + <_> + + 0 1 612 1.8219340126961470e-03 -1 -2 613 + 8.1313941627740860e-03 + + 3.1087881326675415e-01 3.1332370638847351e-01 + 6.6458672285079956e-01 + <_> + + 0 1 614 1.7055979697033763e-03 -1 -2 615 + -7.4483548814896494e-05 + + 2.6401311159133911e-01 5.6472051143646240e-01 + 3.4853729605674744e-01 + <_> + + 1 0 616 3.8342390325851738e-04 -1 -2 617 + 3.1868910882622004e-03 + + 3.1406548619270325e-01 6.4891988039016724e-01 + 3.8877290487289429e-01 + <_> + + 1 0 618 1.6044320166110992e-01 -1 -2 619 + -6.7285560071468353e-03 + + 7.2165298461914062e-01 1.6531379520893097e-01 + 5.1398259401321411e-01 + <_> + + 0 1 620 7.2638481469766703e-06 -1 -2 621 + 5.5551197146996856e-04 + + 3.1406199932098389e-01 5.9936988353729248e-01 + 3.3173981308937073e-01 + <_> + + 0 1 622 -1.0822320356965065e-02 -1 -2 623 + -4.5834020711481571e-03 + + 2.6529380679130554e-01 1.8495689332485199e-01 + 5.3139579296112061e-01 + <_> + + 1 0 624 -3.0205070506781340e-03 -1 -2 625 + 7.7864617109298706e-02 + + 4.0400999784469604e-01 6.1581897735595703e-01 + 1.7864869534969330e-01 + <_> + + 0 1 626 2.6494380086660385e-02 -1 -2 627 + 3.6912109702825546e-02 + + 4.5110899209976196e-01 4.5282199978828430e-01 + 5.9722828865051270e-01 + <_> + + 1 0 628 5.7857790961861610e-03 -1 -2 629 + 9.3849771656095982e-04 + + 2.5338920950889587e-01 3.4104120731353760e-01 + 5.9236437082290649e-01 + <_> + + 0 1 630 -1.1003199964761734e-02 -1 -2 631 + -1.1737640015780926e-03 + + 6.9580441713333130e-01 3.8510841131210327e-01 + 5.4081892967224121e-01 + <_> + + 0 1 632 -3.6596669815480709e-03 -1 -2 633 + -2.4822750128805637e-03 + + 2.0093089342117310e-01 6.2953931093215942e-01 + 4.3950408697128296e-01 + <_> + + 0 1 634 -4.4606071896851063e-03 -1 -2 635 + -3.5969649907201529e-03 + + 2.4052999913692474e-01 5.4501742124557495e-01 + 3.7823578715324402e-01 + <_> + + 0 1 636 -3.6222559865564108e-03 -1 -2 637 + 1.2059339787811041e-03 + + 3.0338969826698303e-01 4.6337789297103882e-01 + 6.3359522819519043e-01 + <_> + + 1 0 638 4.3124938383698463e-03 -1 -2 639 + -4.4961250387132168e-03 + + 6.5988260507583618e-01 6.6216969490051270e-01 + 4.7552469372749329e-01 + <_> + + 0 1 640 -1.3860689941793680e-03 -1 -2 641 + -5.1588460337370634e-04 + + 2.8012010455131531e-01 3.8294890522956848e-01 + 5.6236267089843750e-01 + <_> + + 0 1 642 7.0330002927221358e-05 -1 -2 643 + -2.0976549421902746e-04 + + 4.5363429188728333e-01 5.6081390380859375e-01 + 4.2657798528671265e-01 + <_> + + 1 0 644 1.3642259873449802e-03 -1 -2 645 + 1.5483660390600562e-03 + + 2.6370918750762939e-01 4.1707509756088257e-01 + 5.9329879283905029e-01 + <_> + + 0 1 646 1.9179609417915344e-01 -1 -2 647 + -4.4776909053325653e-03 + + 5.2567642927169800e-01 6.6326218843460083e-01 + 4.8925888538360596e-01 + <_> + + 0 1 648 -1.2649179995059967e-01 -1 -2 649 + 6.5253327193204314e-05 + + 1.4997789263725281e-01 4.2333200573921204e-01 + 5.7560402154922485e-01 + <_> + + 0 1 650 4.1856421157717705e-03 -1 -2 651 + 2.7478230185806751e-04 + + 5.2888268232345581e-01 4.5240178704261780e-01 + 5.6041252613067627e-01 + <_> + + 0 1 652 -2.2906810045242310e-03 -1 -2 653 + 1.6744500026106834e-03 + + 5.5782741308212280e-01 3.3230578899383545e-01 + 5.5587881803512573e-01 + <_> + + 1 0 654 1.2349759927019477e-03 -1 -2 655 + -8.7158754467964172e-03 + + 3.6539471149444580e-01 1.9245339930057526e-01 + 5.3136497735977173e-01 + <_> + + 1 0 656 4.6613621525466442e-03 -1 -2 657 + -8.5815992206335068e-03 + + 2.0277309417724609e-01 7.6360601186752319e-01 + 5.1408261060714722e-01 + <_> + + 0 1 658 1.4352120459079742e-02 -1 -2 659 + -7.7948719263076782e-03 + + 5.2529758214950562e-01 2.6329371333122253e-01 + 5.3286892175674438e-01 + <_> + + 0 1 660 -3.4155680332332850e-03 -1 -2 661 + -4.2639090679585934e-03 + + 2.4160879850387573e-01 3.9365449547767639e-01 + 5.4787421226501465e-01 + <_> + + 0 1 662 8.7177697569131851e-03 -1 -2 663 + -3.2232629600912333e-03 + + 4.7881990671157837e-01 3.6316120624542236e-01 + 5.2883160114288330e-01 + <_> + + 0 1 664 -4.2188368737697601e-02 -1 -2 665 + 1.9875749945640564e-02 + + 6.9311392307281494e-01 4.5201000571250916e-01 + 6.8550550937652588e-01 + <_> + + 1 0 666 -3.1134510412812233e-02 -1 -2 667 + 5.7032387703657150e-03 + + 5.3004240989685059e-01 5.6068921089172363e-01 + 4.2306229472160339e-01 + <_> + + 1 0 668 5.2733682096004486e-03 -1 -2 669 + -3.1231069006025791e-03 + + 3.2472288608551025e-01 1.9856959581375122e-01 + 5.3498727083206177e-01 + <_> + + 0 1 670 4.6453849063254893e-04 -1 -2 671 + 3.0355889350175858e-02 + + 4.2075088620185852e-01 5.1534587144851685e-01 + 3.1181010603904724e-01 + <_> + + 0 1 672 -4.2992769740521908e-03 -1 -2 673 + 1.9509199773892760e-04 + + 3.2745069265365601e-01 5.9530782699584961e-01 + 4.2255210876464844e-01 + <_> + + 0 1 674 -7.7784480527043343e-03 -1 -2 675 + 1.6917599365115166e-02 + + 7.2111797332763672e-01 4.9365919828414917e-01 + 7.0302772521972656e-01 + <_> + + 0 1 676 -5.1948569715023041e-02 -1 -2 677 + -5.4751220159232616e-03 + + 1.4255349338054657e-01 6.0593318939208984e-01 + 4.3939951062202454e-01 + <_> + + 0 1 678 1.5210839592327829e-05 -1 -2 679 + 1.0235579684376717e-03 + + 4.4888499379158020e-01 4.2565500736236572e-01 + 5.7954382896423340e-01 + <_> + + 0 1 680 -1.0427719826111570e-04 -1 -2 681 + 8.7853781878948212e-03 + + 4.2460399866104126e-01 4.9580091238021851e-01 + 6.7594307661056519e-01 + <_> + + 0 1 682 3.4012699034065008e-03 -1 -2 683 + 5.8582378551363945e-04 + + 5.4234808683395386e-01 3.6365428566932678e-01 + 5.4643487930297852e-01 + <_> + + 0 1 684 -2.2973360028117895e-03 -1 -2 685 + -1.4330189675092697e-02 + + 2.5488188862800598e-01 6.5876567363739014e-01 + 4.5328021049499512e-01 + <_> + + 0 1 686 9.8565965890884399e-04 -1 -2 687 + -4.6640761196613312e-02 + + 3.8227710127830505e-01 3.0773219466209412e-01 + 5.2441328763961792e-01 + <_> + + 0 1 688 -1.1907300353050232e-01 -1 -2 689 + 1.9333280622959137e-02 + + 1.0338629782199860e-01 5.5547451972961426e-01 + 3.2213169336318970e-01 + <_> + + 0 1 690 3.1427849084138870e-02 -1 -2 691 + 2.0082130504306406e-04 + + 4.6823790669441223e-01 5.3730702400207520e-01 + 3.8006669282913208e-01 + <_> + + 0 1 692 -6.2584900297224522e-03 -1 -2 693 + 8.2861045375466347e-03 + + 1.7992070317268372e-01 5.0950688123703003e-01 + 7.5446051359176636e-01 + <_> + + 0 1 694 2.0529709290713072e-03 -1 -2 695 + 3.2524869311600924e-03 + + 5.6286448240280151e-01 4.8016890883445740e-01 + 5.8021020889282227e-01 + <_> + + 0 1 696 -3.1884901225566864e-02 -1 -2 697 + 1.8379340181127191e-03 + + 1.7427450418472290e-01 3.4665969014167786e-01 + 5.1071548461914062e-01 + <_> + + 1 0 698 -4.8512680223211646e-04 -1 -2 699 + -2.5407879147678614e-03 + + 5.3260862827301025e-01 6.3427752256393433e-01 + 4.9926930665969849e-01 + <_> + + 0 1 700 -5.1559060811996460e-03 -1 -2 701 + -4.4968750327825546e-02 + + 3.4334290027618408e-01 1.8681369721889496e-01 + 5.2154648303985596e-01 + <_> + + 1 0 702 5.8984281495213509e-03 -1 -2 703 + 3.2763120252639055e-03 + + 6.2293052673339844e-01 4.9357721209526062e-01 + 7.2179448604583740e-01 + <_> + + 1 0 704 -1.0161520185647532e-04 -1 -2 705 + -1.6290300118271261e-04 + + 5.0079762935638428e-01 6.0241490602493286e-01 + 2.3295080661773682e-01 + <_> + + 0 1 706 9.0541364625096321e-03 -1 -2 707 + 3.5398490726947784e-02 + + 4.5104169845581055e-01 5.1419967412948608e-01 + 2.8602918982505798e-01 + <_> + + 0 1 708 5.6469351984560490e-03 -1 -2 709 + -2.4807190056890249e-03 + + 4.7049251198768616e-01 4.1798511147499084e-01 + 6.7266470193862915e-01 + <_> + + 0 1 710 -4.1088787838816643e-03 -1 -2 711 + -2.0714469719678164e-03 + + 5.8098018169403076e-01 6.0747838020324707e-01 + 4.5240598917007446e-01 + <_> + + 0 1 712 -2.8939060866832733e-03 -1 -2 713 + 1.3467279495671391e-03 + + 3.3835199475288391e-01 5.6969100236892700e-01 + 3.9708450436592102e-01 + <_> + + 0 1 714 -9.0779133141040802e-02 -1 -2 715 + -8.3171762526035309e-02 + + 1.5027019381523132e-01 7.5736707448959351e-01 + 4.9364370107650757e-01 + <_> + + 0 1 716 -1.4107000315561891e-03 -1 -2 717 + 5.5668760091066360e-02 + + 3.3909329771995544e-01 5.0250971317291260e-01 + 7.4220830202102661e-01 + <_> + + 0 1 718 5.7701539248228073e-02 -1 -2 719 + -4.2503291368484497e-01 + + 5.1973718404769897e-01 9.7346916794776917e-02 + 5.1857399940490723e-01 + <_> + + 0 1 720 -4.4380719191394746e-04 -1 -2 721 + 1.7924769781529903e-04 + + 3.6493501067161560e-01 5.6192791461944580e-01 + 3.7602970004081726e-01 + <_> + + 1 0 722 5.0382469780743122e-03 -1 -2 723 + 1.5191170386970043e-02 + + 6.3284450769424438e-01 4.9360820651054382e-01 + 7.4265247583389282e-01 + <_> + + 0 1 724 -1.2300389818847179e-02 -1 -2 725 + 1.5168030513450503e-03 + + 1.3893499970436096e-01 5.0919622182846069e-01 + 3.4826481342315674e-01 + <_> + + 1 0 726 9.5754547510296106e-04 -1 -2 727 + -1.8962200731039047e-02 + + 6.0363167524337769e-01 2.3191730678081512e-01 + 5.1166528463363647e-01 + <_> + + 0 1 728 -2.2272260859608650e-02 -1 -2 729 + -2.5145230814814568e-02 + + 6.5550220012664795e-01 1.3260710239410400e-01 + 4.6740341186523438e-01 + <_> + + 0 1 730 1.9533900544047356e-02 -1 -2 731 + -1.1231349781155586e-03 + + 5.1820272207260132e-01 6.3182431459426880e-01 + 4.8255190253257751e-01 + <_> + + 0 1 732 -1.4861139934509993e-03 -1 -2 733 + 3.5002888762392104e-04 + + 2.9186710715293884e-01 5.6213712692260742e-01 + 4.2492130398750305e-01 + <_> + + 1 0 734 -1.1231349781155586e-03 -1 -2 735 + 1.0409739799797535e-02 + + 4.8137450218200684e-01 5.1840060949325562e-01 + 2.0512230694293976e-01 + <_> + + 0 1 736 -8.7832562625408173e-02 -1 -2 737 + 1.6584879485890269e-03 + + 1.1799219995737076e-01 4.9878111481666565e-01 + 6.9737559556961060e-01 + <_> + + 1 0 738 -2.3008750285953283e-03 -1 -2 739 + 3.3026169985532761e-02 + + 5.3398311138153076e-01 5.0332891941070557e-01 + 6.8519067764282227e-01 + <_> + + 0 1 740 -1.3585069682449102e-03 -1 -2 741 + 7.8067491995170712e-04 + + 3.0028221011161804e-01 4.5930838584899902e-01 + 6.4400452375411987e-01 + <_> + + 1 0 742 -1.8025759607553482e-02 -1 -2 743 + 1.2354910140857100e-03 + + 5.3112912178039551e-01 4.7291061282157898e-01 + 5.7214611768722534e-01 + <_> + + 0 1 744 -9.2583027435466647e-04 -1 -2 745 + 8.0123997759073973e-04 + + 3.6623328924179077e-01 5.3619897365570068e-01 + 3.0086329579353333e-01 + <_> + 63 + 3.0672130584716797e+01 + + <_> + + 0 1 746 2.4914839304983616e-03 -1 -2 747 + -5.0488598644733429e-02 + + 3.4223890304565430e-01 7.7034580707550049e-01 + 4.5163908600807190e-01 + <_> + + 1 0 748 -7.7838351717218757e-04 -1 -2 749 + 2.3572890495415777e-04 + + 3.2563421130180359e-01 3.4065559506416321e-01 + 5.8970272541046143e-01 + <_> + + 0 1 750 4.5575071126222610e-03 -1 -2 751 + 8.1241987645626068e-03 + + 4.3065789341926575e-01 7.1495872735977173e-01 + 4.3456849455833435e-01 + <_> + + 0 1 752 -4.4612158671952784e-04 -1 -2 753 + -2.8972938889637589e-04 + + 3.2959741353988647e-01 5.8456200361251831e-01 + 3.5266879200935364e-01 + <_> + + 0 1 754 7.1604831646254752e-06 -1 -2 755 + -3.8497708737850189e-04 + + 4.0819549560546875e-01 4.2031130194664001e-01 + 6.6341269016265869e-01 + <_> + + 0 1 756 1.9489860278554261e-04 -1 -2 757 + -1.7083849757909775e-02 + + 3.9424669742584229e-01 2.2940720617771149e-01 + 5.2389609813690186e-01 + <_> + + 0 1 758 8.3513697609305382e-04 -1 -2 759 + 7.5499608647078276e-04 + + 3.0260318517684937e-01 6.0321962833404541e-01 + 3.4124588966369629e-01 + <_> + + 1 0 760 8.0216713249683380e-03 -1 -2 761 + -3.8930509239435196e-02 + + 7.3062407970428467e-01 3.5993251204490662e-01 + 5.2343809604644775e-01 + <_> + + 1 0 762 -7.0348767621908337e-05 -1 -2 763 + -8.5350573062896729e-03 + + 3.4937581419944763e-01 2.7461090683937073e-01 + 5.6265860795974731e-01 + <_> + + 0 1 764 1.0854450054466724e-02 -1 -2 765 + 4.5329501153901219e-04 + + 5.2822262048721313e-01 4.5220491290092468e-01 + 6.0543018579483032e-01 + <_> + + 0 1 766 1.8117150466423482e-04 -1 -2 767 + 4.6641560038551688e-04 + + 3.3068621158599854e-01 1.4550000429153442e-01 + 5.3849279880523682e-01 + <_> + + 1 0 768 -8.4854792803525925e-03 -1 -2 769 + -1.8934309482574463e-02 + + 4.8141559958457947e-01 3.5637411475181580e-01 + 5.4051452875137329e-01 + <_> + + 1 0 770 4.9814549274742603e-03 -1 -2 771 + 3.4286780282855034e-03 + + 6.9577431678771973e-01 5.0508928298950195e-01 + 2.3169949650764465e-01 + <_> + + 1 0 772 4.4203791185282171e-04 -1 -2 773 + 2.3822550429031253e-04 + + 6.0185819864273071e-01 4.7550821304321289e-01 + 5.5852377414703369e-01 + <_> + + 0 1 774 -6.4261639490723610e-03 -1 -2 775 + 9.9637769162654877e-03 + + 2.2824659943580627e-01 4.0405881404876709e-01 + 5.6501698493957520e-01 + <_> + + 0 1 776 1.3654050417244434e-02 -1 -2 777 + -9.9892877042293549e-03 + + 5.2677392959594727e-01 6.7940497398376465e-01 + 4.7970339655876160e-01 + <_> + + 1 0 778 3.6558631807565689e-02 -1 -2 779 + 4.8999379941960797e-05 + + 8.8425733149051666e-02 4.0207880735397339e-01 + 5.4573321342468262e-01 + <_> + + 0 1 780 1.3654050417244434e-02 -1 -2 781 + 1.8802779959514737e-03 + + 5.2676129341125488e-01 4.8060521483421326e-01 + 6.3943648338317871e-01 + <_> + + 0 1 782 -1.3654050417244434e-02 -1 -2 783 + 1.2778700329363346e-03 + + 1.7248100042343140e-01 4.4798240065574646e-01 + 6.3100087642669678e-01 + <_> + + 1 0 784 9.8843395244330168e-04 -1 -2 785 + 1.4511500012304168e-05 + + 5.9481692314147949e-01 4.8541748523712158e-01 + 5.3093612194061279e-01 + <_> + + 0 1 786 -2.2775429533794522e-04 -1 -2 787 + -1.4753740280866623e-02 + + 3.1836318969726562e-01 3.0849760770797729e-01 + 5.3520262241363525e-01 + <_> + + 0 1 788 -3.4148250706493855e-03 -1 -2 789 + 7.5806681998074055e-03 + + 6.1153268814086914e-01 4.9516460299491882e-01 + 7.0613312721252441e-01 + <_> + + 1 0 790 -5.7734688743948936e-03 -1 -2 791 + 7.4033669079653919e-05 + + 3.7542209029197693e-01 4.1155171394348145e-01 + 5.8894449472427368e-01 + <_> + + 0 1 792 -8.2278084009885788e-03 -1 -2 793 + 5.3380909375846386e-03 + + 9.5610566437244415e-02 5.3005087375640869e-01 + 3.9618980884552002e-01 + <_> + + 0 1 794 -2.7049109339714050e-03 -1 -2 795 + 7.7341338619589806e-03 + + 6.4818692207336426e-01 5.1104402542114258e-01 + 3.1215190887451172e-01 + <_> + + 0 1 796 1.0886609554290771e-02 -1 -2 797 + 1.1038660071790218e-02 + + 4.8014289140701294e-01 5.4297101497650146e-01 + 4.1623631119728088e-01 + <_> + + 0 1 798 -1.0054199956357479e-02 -1 -2 799 + 7.7072880230844021e-03 + + 7.3293352127075195e-01 5.3568720817565918e-01 + 3.4555470943450928e-01 + <_> + + 0 1 800 -5.8278098003938794e-04 -1 -2 801 + -2.5739220436662436e-03 + + 3.6550220847129822e-01 3.7767601013183594e-01 + 5.3917747735977173e-01 + <_> + + 0 1 802 -7.0167761296033859e-03 -1 -2 803 + -1.7727289814502001e-03 + + 4.0393048524856567e-01 6.9504439830780029e-01 + 4.9811169505119324e-01 + <_> + + 1 0 804 -1.6318289563059807e-02 -1 -2 805 + -1.1663000099360943e-02 + + 5.2967327833175659e-01 5.8426398038864136e-01 + 4.7895029187202454e-01 + <_> + + 1 0 806 2.5881489273160696e-03 -1 -2 807 + -3.7328999023884535e-03 + + 6.0921788215637207e-01 6.7217427492141724e-01 + 4.0668940544128418e-01 + <_> + + 0 1 808 -1.4355930034071207e-03 -1 -2 809 + 1.8340899841859937e-03 + + 3.5850879549980164e-01 5.3711581230163574e-01 + 4.0335071086883545e-01 + <_> + + 1 0 810 1.2280289828777313e-01 -1 -2 811 + 5.0228700041770935e-02 + + 1.5475720167160034e-01 5.4338437318801880e-01 + 8.4292672574520111e-02 + <_> + + 1 0 812 -2.1437000483274460e-02 -1 -2 813 + -3.1009620055556297e-02 + + 4.8600539565086365e-01 1.8330100178718567e-01 + 5.2075541019439697e-01 + <_> + + 0 1 814 -1.2973720207810402e-02 -1 -2 815 + 1.5818020328879356e-03 + + 7.0482409000396729e-01 4.1705870628356934e-01 + 5.8651638031005859e-01 + <_> + + 1 0 816 -9.7806248813867569e-03 -1 -2 817 + 1.1735740117728710e-03 + + 5.3079181909561157e-01 5.5224531888961792e-01 + 3.5071650147438049e-01 + <_> + + 1 0 818 1.4651629608124495e-03 -1 -2 819 + 2.3532148916274309e-03 + + 3.0426511168479919e-01 5.3393232822418213e-01 + 2.8062361478805542e-01 + <_> + + 0 1 820 -6.1809681355953217e-03 -1 -2 821 + 6.5688649192452431e-04 + + 6.4101332426071167e-01 5.6208711862564087e-01 + 4.3903189897537231e-01 + <_> + + 1 0 822 2.6228010654449463e-02 -1 -2 823 + -1.7958110198378563e-02 + + 6.4455568790435791e-01 2.0027139782905579e-01 + 4.6246650815010071e-01 + <_> + + 1 0 824 -7.6468721963465214e-03 -1 -2 825 + -2.7482809964567423e-03 + + 5.2632009983062744e-01 5.8739811182022095e-01 + 4.8366001248359680e-01 + <_> + + 1 0 826 1.3851850293576717e-02 -1 -2 827 + 2.6369190309196711e-03 + + 1.5661309659481049e-01 4.2701789736747742e-01 + 5.8066600561141968e-01 + <_> + + 0 1 828 -3.1513599678874016e-03 -1 -2 829 + -1.4788460248382762e-05 + + 6.2158662080764771e-01 5.5766427516937256e-01 + 4.1220021247863770e-01 + <_> + + 0 1 830 -7.3676988482475281e-02 -1 -2 831 + -3.0912780202925205e-03 + + 1.5367099642753601e-01 6.3442689180374146e-01 + 4.5074120163917542e-01 + <_> + + 0 1 832 7.9240966588258743e-03 -1 -2 833 + 8.5778040811419487e-03 + + 5.4579752683639526e-01 5.4016572237014771e-01 + 3.8907998800277710e-01 + <_> + + 1 0 834 5.5403169244527817e-03 -1 -2 835 + -1.1886510037584230e-04 + + 3.5556110739707947e-01 5.8367502689361572e-01 + 4.2743161320686340e-01 + <_> + + 0 1 836 -1.8408369272947311e-02 -1 -2 837 + -2.3490579333156347e-03 + + 5.8604401350021362e-01 4.4989579916000366e-01 + 5.4981988668441772e-01 + <_> + + 1 0 838 -7.6157399453222752e-03 -1 -2 839 + -3.3190969843417406e-03 + + 4.1009929776191711e-01 6.7013788223266602e-01 + 4.3530011177062988e-01 + <_> + + 1 0 840 -9.4642979092895985e-04 -1 -2 841 + 8.7858550250530243e-03 + + 5.3911769390106201e-01 5.5040502548217773e-01 + 3.9909350872039795e-01 + <_> + + 1 0 842 1.6395459533669055e-04 -1 -2 843 + -2.3508940357714891e-03 + + 3.5929331183433533e-01 4.0341728925704956e-01 + 5.8060771226882935e-01 + <_> + + 1 0 844 7.5449963333085179e-05 -1 -2 845 + 2.7018489316105843e-02 + + 5.4123848676681519e-01 4.9449229240417480e-01 + 5.5894362926483154e-01 + <_> + + 1 0 846 8.4561208495870233e-04 -1 -2 847 + -1.1687109945341945e-03 + + 5.8092182874679565e-01 4.7469571232795715e-01 + 2.8458958864212036e-01 + <_> + + 1 0 848 2.2897500544786453e-02 -1 -2 849 + 7.0879262685775757e-01 + + 2.4144110083580017e-01 5.1957648992538452e-01 + 1.0300920158624649e-01 + <_> + + 1 0 850 3.7483830004930496e-02 -1 -2 851 + 1.2827500468119979e-03 + + 1.8146389722824097e-01 4.2460718750953674e-01 + 5.7079732418060303e-01 + <_> + + 0 1 852 -5.1718312315642834e-03 -1 -2 853 + 2.7545939665287733e-03 + + 6.1433231830596924e-01 5.2056711912155151e-01 + 4.2204418778419495e-01 + <_> + + 0 1 854 -3.6072919610887766e-03 -1 -2 855 + -2.5258748792111874e-04 + + 3.1825920939445496e-01 5.7104682922363281e-01 + 4.2260938882827759e-01 + <_> + + 1 0 856 -7.0514748804271221e-03 -1 -2 857 + -5.4323761723935604e-03 + + 5.1628297567367554e-01 2.6662889122962952e-01 + 5.2146798372268677e-01 + <_> + + 1 0 858 -1.4652940080850385e-05 -1 -2 859 + -1.8556920113041997e-03 + + 3.9817610383033752e-01 3.3227631449699402e-01 + 5.7058340311050415e-01 + <_> + + 1 0 860 4.7609540633857250e-03 -1 -2 861 + 1.5676260227337480e-03 + + 6.6365581750869751e-01 5.5055677890777588e-01 + 4.4206619262695312e-01 + <_> + + 1 0 862 5.4239919409155846e-03 -1 -2 863 + -6.4692399464547634e-03 + + 5.9599381685256958e-01 5.3695940971374512e-01 + 3.7443399429321289e-01 + <_> + + 0 1 864 -7.8038539504632354e-04 -1 -2 865 + 4.5086450874805450e-02 + + 4.1035950183868408e-01 5.1775068044662476e-01 + 1.8781000375747681e-01 + <_> + + 0 1 866 -5.1405387930572033e-03 -1 -2 867 + -2.1236129105091095e-02 + + 2.3528920114040375e-01 1.7087510228157043e-01 + 5.4249739646911621e-01 + <_> + + 0 1 868 -2.3763340432196856e-03 -1 -2 869 + 5.4122589528560638e-02 + + 5.8365309238433838e-01 5.1174330711364746e-01 + 1.8659310042858124e-01 + <_> + + 0 1 870 -5.3492980077862740e-04 -1 -2 871 + -5.8454048121348023e-04 + + 5.1086932420730591e-01 4.7754910588264465e-01 + 2.4398539960384369e-01 + <_> + 71 + 3.4677078247070312e+01 + + <_> + + 0 1 872 3.0031939968466759e-03 -1 -2 873 + 6.9161207647994161e-04 + + 3.3496499061584473e-01 4.5183679461479187e-01 + 7.2893542051315308e-01 + <_> + + 0 1 874 1.1212790384888649e-02 -1 -2 875 + -7.6108198845759034e-04 + + 2.9508009552955627e-01 5.6690549850463867e-01 + 2.8308510780334473e-01 + <_> + + 0 1 876 1.1984579759882763e-04 -1 -2 877 + -1.9725349557120353e-04 + + 4.0905779600143433e-01 6.9514942169189453e-01 + 4.6378681063652039e-01 + <_> + + 1 0 878 -5.5180420167744160e-03 -1 -2 879 + 1.2148249661549926e-03 + + 3.1676751375198364e-01 3.3167061209678650e-01 + 5.3963977098464966e-01 + <_> + + 0 1 880 -4.2497441172599792e-03 -1 -2 881 + -9.4915721565485001e-03 + + 2.6005738973617554e-01 7.4842947721481323e-01 + 5.0731921195983887e-01 + <_> + + 1 0 882 6.5378600265830755e-04 -1 -2 883 + -4.9741100519895554e-04 + + 3.9520108699798584e-01 5.8802747726440430e-01 + 3.5521200299263000e-01 + <_> + + 0 1 884 -4.3079249560832977e-02 -1 -2 885 + -5.1999092102050781e-04 + + 2.4348780512809753e-01 3.1955629587173462e-01 + 5.5854547023773193e-01 + <_> + + 1 0 886 -4.5451628975570202e-03 -1 -2 887 + -7.9610403627157211e-03 + + 4.8452898859977722e-01 3.8011810183525085e-01 + 5.3585118055343628e-01 + <_> + + 1 0 888 -3.1919340835884213e-04 -1 -2 889 + -1.9223889335989952e-02 + + 4.3563291430473328e-01 2.6130661368370056e-01 + 6.1554962396621704e-01 + <_> + + 0 1 890 -1.3076990144327283e-03 -1 -2 891 + 1.9825039431452751e-02 + + 5.9420621395111084e-01 4.9454280734062195e-01 + 7.3848551511764526e-01 + <_> + + 0 1 892 -2.2013280540704727e-03 -1 -2 893 + -7.8596705570816994e-03 + + 2.2144819796085358e-01 3.6009770631790161e-01 + 5.2985501289367676e-01 + <_> + + 1 0 894 1.4142199652269483e-03 -1 -2 895 + -1.1232759803533554e-02 + + 5.7765662670135498e-01 6.9344568252563477e-01 + 4.8272070288658142e-01 + <_> + + 1 0 896 2.9746301006525755e-03 -1 -2 897 + 5.3283828310668468e-04 + + 3.2166770100593567e-01 3.9625000953674316e-01 + 5.6803637742996216e-01 + <_> + + 1 0 898 1.0105259716510773e-02 -1 -2 899 + -1.1653699912130833e-02 + + 7.5674182176589966e-01 6.5235567092895508e-01 + 5.0270539522171021e-01 + <_> + + 0 1 900 -7.0609981194138527e-03 -1 -2 901 + 2.2343141026794910e-03 + + 2.5387701392173767e-01 4.3872770667076111e-01 + 6.1776322126388550e-01 + <_> + + 1 0 902 -2.9802279546856880e-02 -1 -2 903 + 1.1611840454861522e-03 + + 5.2011400461196899e-01 4.6479099988937378e-01 + 6.1842548847198486e-01 + <_> + + 1 0 904 9.4824447296559811e-04 -1 -2 905 + 4.1284630424343050e-04 + + 3.0409941077232361e-01 4.5188081264495850e-01 + 6.2457829713821411e-01 + <_> + + 0 1 906 -3.1203540042042732e-02 -1 -2 907 + 2.7652881108224392e-03 + + 2.7889358997344971e-01 4.6985000371932983e-01 + 6.5024542808532715e-01 + <_> + + 1 0 908 2.5644779205322266e-02 -1 -2 909 + -7.5331530533730984e-03 + + 1.8051710724830627e-01 3.2080689072608948e-01 + 5.5220228433609009e-01 + <_> + + 1 0 910 3.2047149725258350e-03 -1 -2 911 + -2.4282479716930538e-04 + + 6.4369338750839233e-01 5.6767052412033081e-01 + 4.5091038942337036e-01 + <_> + + 0 1 912 -6.1979342717677355e-04 -1 -2 913 + -8.0101029016077518e-04 + + 3.1221461296081543e-01 2.9651939868927002e-01 + 5.2304947376251221e-01 + <_> + + 1 0 914 -9.1816839994862676e-04 -1 -2 915 + 1.2239529751241207e-03 + + 5.4647117853164673e-01 4.6185028553009033e-01 + 5.6795489788055420e-01 + <_> + + 0 1 916 -6.8743730662390590e-04 -1 -2 917 + -1.8252469599246979e-03 + + 5.4308801889419556e-01 5.4336231946945190e-01 + 3.3852210640907288e-01 + <_> + + 1 0 918 -7.4570789001882076e-03 -1 -2 919 + 5.3775748237967491e-03 + + 5.2655947208404541e-01 4.8572158813476562e-01 + 6.8151241540908813e-01 + <_> + + 1 0 920 3.7602309603244066e-03 -1 -2 921 + 8.7752222316339612e-04 + + 2.8321608901023865e-01 3.9668309688568115e-01 + 5.5124807357788086e-01 + <_> + + 1 0 922 5.5084479972720146e-03 -1 -2 923 + -7.5949047459289432e-04 + + 6.7846202850341797e-01 3.9065030217170715e-01 + 5.4572027921676636e-01 + <_> + + 1 0 924 1.6352660022675991e-03 -1 -2 925 + -1.2750849418807775e-04 + + 3.6402040719985962e-01 5.8297240734100342e-01 + 4.1949799656867981e-01 + <_> + + 0 1 926 2.2067610174417496e-02 -1 -2 927 + -1.9203789532184601e-02 + + 4.6067029237747192e-01 3.2614830136299133e-01 + 5.2360808849334717e-01 + <_> + + 0 1 928 -1.2998109683394432e-02 -1 -2 929 + -3.1332690268754959e-03 + + 7.0221120119094849e-01 2.8704708814620972e-01 + 5.0764769315719604e-01 + <_> + + 1 0 930 -5.2937557920813560e-03 -1 -2 931 + 2.1857069805264473e-03 + + 4.7095209360122681e-01 4.7082918882369995e-01 + 6.1698418855667114e-01 + <_> + + 0 1 932 -4.5750709250569344e-03 -1 -2 933 + -4.5152138918638229e-02 + + 3.1142529845237732e-01 1.8514350056648254e-01 + 5.5048149824142456e-01 + <_> + + 1 0 934 -2.7783559635281563e-03 -1 -2 935 + -2.5752480141818523e-03 + + 4.9373480677604675e-01 6.1529481410980225e-01 + 4.7354999184608459e-01 + <_> + + 1 0 936 1.1614130344241858e-03 -1 -2 937 + 2.3350189439952374e-03 + + 6.5105718374252319e-01 4.0883418917655945e-01 + 5.6841522455215454e-01 + <_> + + 1 0 938 3.8499289657920599e-03 -1 -2 939 + 2.4529630318284035e-03 + + 3.0258288979530334e-01 5.2325028181076050e-01 + 2.0176209509372711e-01 + <_> + + 1 0 940 3.6731390282511711e-03 -1 -2 941 + 2.1937100682407618e-03 + + 6.4284259080886841e-01 4.3288651108741760e-01 + 6.4205098152160645e-01 + <_> + + 1 0 942 -6.4666871912777424e-03 -1 -2 943 + -5.7186251506209373e-03 + + 5.2540659904479980e-01 2.4909840524196625e-01 + 5.2876192331314087e-01 + <_> + + 1 0 944 9.9941878579556942e-04 -1 -2 945 + -7.8276498243212700e-04 + + 3.3297958970069885e-01 3.5983449220657349e-01 + 5.4983407258987427e-01 + <_> + + 0 1 946 4.3231188319623470e-03 -1 -2 947 + 4.0838290005922318e-03 + + 4.8187050223350525e-01 5.2663302421569824e-01 + 3.1057891249656677e-01 + <_> + + 1 0 948 3.0515898833982646e-04 -1 -2 949 + 1.2640280183404684e-03 + + 3.9952918887138367e-01 3.2284379005432129e-01 + 5.8192151784896851e-01 + <_> + + 0 1 950 -1.0152660310268402e-02 -1 -2 951 + -2.6863690000027418e-03 + + 8.0260711908340454e-01 3.8756170868873596e-01 + 5.4665708541870117e-01 + <_> + + 1 0 952 -9.0515613555908203e-03 -1 -2 953 + -6.3204211182892323e-03 + + 4.3720579147338867e-01 1.1265510320663452e-01 + 6.3954162597656250e-01 + <_> + + 0 1 954 2.6117300149053335e-03 -1 -2 955 + 1.4339019544422626e-02 + + 5.4239892959594727e-01 4.9792730808258057e-01 + 6.0422360897064209e-01 + <_> + + 1 0 956 2.8452780097723007e-03 -1 -2 957 + 1.4783289771003183e-05 + + 3.4910920262336731e-01 4.1950678825378418e-01 + 5.7759660482406616e-01 + <_> + + 0 1 958 8.1814555451273918e-03 -1 -2 959 + 6.6321990452706814e-03 + + 4.8859870433807373e-01 5.4444682598114014e-01 + 4.4209951162338257e-01 + <_> + + 0 1 960 -2.2483461070805788e-03 -1 -2 961 + 1.2374560348689556e-02 + + 6.6997921466827393e-01 4.4786059856414795e-01 + 6.5648937225341797e-01 + <_> + + 1 0 962 -6.6516688093543053e-03 -1 -2 963 + -8.5750613361597061e-03 + + 5.5118787288665771e-01 4.0174451470375061e-01 + 5.4055362939834595e-01 + <_> + + 1 0 964 6.5078441984951496e-03 -1 -2 965 + 2.8675209730863571e-02 + + 2.2943930327892303e-01 5.1779001951217651e-01 + 3.5677561163902283e-01 + <_> + + 0 1 966 7.0673860609531403e-03 -1 -2 967 + 1.2367829913273454e-03 + + 5.5646997690200806e-01 3.6276981234550476e-01 + 5.5724138021469116e-01 + <_> + + 1 0 968 7.4818679131567478e-03 -1 -2 969 + 4.7109839506447315e-03 + + 6.7849111557006836e-01 4.1212528944015503e-01 + 6.0722357034683228e-01 + <_> + + 1 0 970 -6.9405790418386459e-03 -1 -2 971 + 3.3302098512649536e-02 + + 5.4597669839859009e-01 5.2767068147659302e-01 + 2.3749159276485443e-01 + <_> + + 1 0 972 3.6104630678892136e-02 -1 -2 973 + 1.9674649462103844e-02 + + 7.2492793202400208e-02 4.6263459324836731e-01 + 8.2089632749557495e-01 + <_> + + 0 1 974 3.4766150638461113e-03 -1 -2 975 + 1.3987369602546096e-03 + + 5.2087318897247314e-01 5.4844141006469727e-01 + 4.2300349473953247e-01 + <_> + + 1 0 976 4.0974249131977558e-03 -1 -2 977 + 2.6973790954798460e-03 + + 2.7805531024932861e-01 5.4038310050964355e-01 + 3.7909889221191406e-01 + <_> + + 1 0 978 -5.6591699831187725e-03 -1 -2 979 + 3.9460969856008887e-04 + + 4.7983360290527344e-01 3.7669500708580017e-01 + 5.4292291402816772e-01 + <_> + + 1 0 980 2.1750570740550756e-03 -1 -2 981 + 1.4614439569413662e-03 + + 6.2071627378463745e-01 3.3579450845718384e-01 + 5.1426321268081665e-01 + <_> + + 1 0 982 -5.3006567759439349e-04 -1 -2 983 + 1.4869309961795807e-01 + + 5.3446400165557861e-01 5.1596081256866455e-01 + 2.5618231296539307e-01 + <_> + + 1 0 984 -5.8816498494707048e-05 -1 -2 985 + -1.6275369562208652e-03 + + 5.1230919361114502e-01 6.0176461935043335e-01 + 3.1093719601631165e-01 + <_> + + 0 1 986 -1.2881809845566750e-02 -1 -2 987 + 9.4982917653396726e-04 + + 2.7122870087623596e-01 5.4424422979354858e-01 + 4.0288880467414856e-01 + <_> + + 1 0 988 -1.2315999716520309e-02 -1 -2 989 + 9.0286601334810257e-03 + + 4.7360658645629883e-01 7.4514347314834595e-01 + 3.4879919886589050e-01 + <_> + + 0 1 990 -8.6876116693019867e-02 -1 -2 991 + -1.5107560102478601e-05 + + 2.2903330624103546e-01 5.5178898572921753e-01 + 4.3931490182876587e-01 + <_> + + 0 1 992 -1.7457660287618637e-02 -1 -2 993 + -2.5219470262527466e-03 + + 9.0167902410030365e-02 6.2335401773452759e-01 + 4.7894591093063354e-01 + <_> + + 0 1 994 1.0656520025804639e-03 -1 -2 995 + -4.2540300637483597e-03 + + 5.4896962642669678e-01 5.5798089504241943e-01 + 4.3758779764175415e-01 + <_> + + 0 1 996 -9.0349102392792702e-03 -1 -2 997 + -1.5230999561026692e-03 + + 3.5791561007499695e-01 5.6136602163314819e-01 + 3.9390438795089722e-01 + <_> + + 1 0 998 2.8441150207072496e-03 -1 -2 999 + -3.2824429217725992e-03 + + 3.9015549421310425e-01 4.5286190509796143e-01 + 5.4413431882858276e-01 + <_> + + 1 0 1000 3.2161718991119415e-05 -1 -2 1001 + 3.0118400900391862e-05 + + 5.8031117916107178e-01 3.3368501067161560e-01 + 5.5048561096191406e-01 + <_> + + 0 1 1002 -5.6150099262595177e-03 -1 -2 1003 + -1.7389209941029549e-02 + + 6.1247891187667847e-01 8.7271630764007568e-02 + 5.2045881748199463e-01 + <_> + + 0 1 1004 -4.4361080654198304e-05 -1 -2 1005 + 1.0354899859521538e-04 + + 3.9353290200233459e-01 5.9188538789749146e-01 + 4.1196140646934509e-01 + <_> + + 0 1 1006 1.5939630102366209e-03 -1 -2 1007 + 2.5440789759159088e-03 + + 4.8396238684654236e-01 4.7873649001121521e-01 + 6.3606631755828857e-01 + <_> + + 0 1 1008 1.5083180187502876e-05 -1 -2 1009 + -9.9282202427275479e-05 + + 4.2311170697212219e-01 4.2745891213417053e-01 + 6.0940480232238770e-01 + <_> + + 1 0 1010 5.5371708003804088e-04 -1 -2 1011 + 1.9186759600415826e-03 + + 4.2719879746437073e-01 4.4971078634262085e-01 + 5.5491220951080322e-01 + <_> + + 1 0 1012 -5.0764222396537662e-04 -1 -2 1013 + 1.7236480489373207e-03 + + 5.4771959781646729e-01 2.8829228878021240e-01 + 5.6151270866394043e-01 + <_> + 75 + 3.6726501464843750e+01 + + <_> + + 0 1 1014 1.3092169538140297e-02 -1 -2 1015 + 4.1446479735895991e-04 + + 3.3388701081275940e-01 3.0993521213531494e-01 + 6.6774922609329224e-01 + <_> + + 0 1 1016 2.1835729479789734e-02 -1 -2 1017 + 4.8323940485715866e-02 + + 4.3690490722656250e-01 4.3017241358757019e-01 + 6.1538851261138916e-01 + <_> + + 0 1 1018 1.6091950237751007e-03 -1 -2 1019 + 1.3469760306179523e-03 + + 3.3873260021209717e-01 6.2487137317657471e-01 + 3.5941308736801147e-01 + <_> + + 0 1 1020 1.7729059618432075e-04 -1 -2 1021 + 3.6743620876222849e-04 + + 3.8684248924255371e-01 4.4093450903892517e-01 + 5.4764741659164429e-01 + <_> + + 0 1 1022 -1.2352119665592909e-03 -1 -2 1023 + 1.1705530341714621e-03 + + 3.2601711153984070e-01 4.1113489866256714e-01 + 6.0881638526916504e-01 + <_> + + 1 0 1024 -2.9695429475395940e-05 -1 -2 1025 + 2.7050738572143018e-04 + + 4.2694228887557983e-01 4.3064668774604797e-01 + 5.8105140924453735e-01 + <_> + + 1 0 1026 -7.9626210208516568e-05 -1 -2 1027 + 3.3152441028505564e-04 + + 3.6691430211067200e-01 4.6106639504432678e-01 + 6.2905901670455933e-01 + <_> + + 1 0 1028 -5.2305828779935837e-02 -1 -2 1029 + 2.6880469173192978e-02 + + 5.3286898136138916e-01 5.2132612466812134e-01 + 3.2312199473381042e-01 + <_> + + 1 0 1030 -2.4203000066336244e-04 -1 -2 1031 + -1.6424639616161585e-03 + + 3.5685700178146362e-01 3.4406611323356628e-01 + 5.6256049871444702e-01 + <_> + + 1 0 1032 -2.6830288697965443e-04 -1 -2 1033 + -2.2649629972875118e-03 + + 4.5611730217933655e-01 5.3213518857955933e-01 + 3.6741548776626587e-01 + <_> + + 1 0 1034 1.5627209097146988e-02 -1 -2 1035 + 1.6211320459842682e-01 + + 2.0293539762496948e-01 5.5630332231521606e-01 + 2.6188498735427856e-01 + <_> + + 0 1 1036 -3.7391691002994776e-03 -1 -2 1037 + -2.0878419745713472e-03 + + 6.0621947050094604e-01 5.9507638216018677e-01 + 4.5451170206069946e-01 + <_> + + 1 0 1038 2.3334210272878408e-03 -1 -2 1039 + 6.5116386394947767e-05 + + 6.4355242252349854e-01 3.5207340121269226e-01 + 5.1797789335250854e-01 + <_> + + 0 1 1040 7.4625718407332897e-03 -1 -2 1041 + -2.2032689303159714e-02 + + 5.3266882896423340e-01 3.4919810295104980e-01 + 5.4292368888854980e-01 + <_> + + 0 1 1042 -8.3081610500812531e-03 -1 -2 1043 + -4.3259368976578116e-04 + + 2.0840230584144592e-01 3.9652720093727112e-01 + 5.4254537820816040e-01 + <_> + + 1 0 1044 -3.2209228724241257e-02 -1 -2 1045 + -9.0424838708713651e-04 + + 5.3064119815826416e-01 5.4503858089447021e-01 + 4.2566969990730286e-01 + <_> + + 1 0 1046 2.2727500181645155e-03 -1 -2 1047 + 5.9820008464157581e-03 + + 5.9686112403869629e-01 4.7581401467323303e-01 + 3.1509441137313843e-01 + <_> + + 1 0 1048 -5.8856618124991655e-04 -1 -2 1049 + -8.8227191008627415e-04 + + 4.8477488756179810e-01 5.4263162612915039e-01 + 4.3383410573005676e-01 + <_> + + 1 0 1050 -7.4473457061685622e-05 -1 -2 1051 + 3.9148979703895748e-04 + + 4.2875099182128906e-01 6.3451850414276123e-01 + 4.1018518805503845e-01 + <_> + + 1 0 1052 -3.6939629353582859e-03 -1 -2 1053 + -1.1207849718630314e-02 + + 4.8491048812866211e-01 4.1463369131088257e-01 + 5.4712641239166260e-01 + <_> + + 0 1 1054 -1.0337409563362598e-02 -1 -2 1055 + 3.6883640568703413e-03 + + 2.8771838545799255e-01 5.1019018888473511e-01 + 7.2169512510299683e-01 + <_> + + 1 0 1056 -3.8984280545264482e-03 -1 -2 1057 + -5.9986729174852371e-03 + + 5.2761822938919067e-01 6.6184598207473755e-01 + 4.8416310548782349e-01 + <_> + + 1 0 1058 4.5043681748211384e-03 -1 -2 1059 + 1.7799530178308487e-02 + + 1.8741579353809357e-01 4.6169349551200867e-01 + 7.0889657735824585e-01 + <_> + + 0 1 1060 -1.8462570384144783e-02 -1 -2 1061 + 1.4931300029275008e-05 + + 3.0019798874855042e-01 4.5618081092834473e-01 + 5.6107878684997559e-01 + <_> + + 0 1 1062 -8.6021229624748230e-02 -1 -2 1063 + -6.0818758356617764e-05 + + 2.3417009413242340e-01 5.6722861528396606e-01 + 4.1999641060829163e-01 + <_> + + 1 0 1064 1.2670679716393352e-03 -1 -2 1065 + 1.3699879636988044e-03 + + 6.2074822187423706e-01 5.3949588537216187e-01 + 3.8238629698753357e-01 + <_> + + 1 0 1066 3.3162781037390232e-03 -1 -2 1067 + -1.4532039640471339e-03 + + 7.0616811513900757e-01 3.0655130743980408e-01 + 4.8273730278015137e-01 + <_> + + 1 0 1068 -7.1492061018943787e-02 -1 -2 1069 + 1.9857978913933039e-03 + + 5.1931220293045044e-01 4.6424350142478943e-01 + 5.8076947927474976e-01 + <_> + + 1 0 1070 6.2516499310731888e-03 -1 -2 1071 + 2.7005500160157681e-03 + + 2.9498139023780823e-01 4.5858868956565857e-01 + 6.0223537683486938e-01 + <_> + + 0 1 1072 1.1130389757454395e-02 -1 -2 1073 + 1.5092849731445312e-02 + + 4.3578410148620605e-01 4.5615398883819580e-01 + 6.1190617084503174e-01 + <_> + + 0 1 1074 -2.7943300083279610e-02 -1 -2 1075 + 4.4036991312168539e-05 + + 6.5371441841125488e-01 3.4747231006622314e-01 + 5.3369677066802979e-01 + <_> + + 0 1 1076 -1.2232770211994648e-02 -1 -2 1077 + -6.8591412855312228e-04 + + 3.7316760420799255e-01 5.7172292470932007e-01 + 4.7933790087699890e-01 + <_> + + 0 1 1078 -3.8992990739643574e-03 -1 -2 1079 + 4.9113907152786851e-04 + + 4.0564361214637756e-01 6.1740481853485107e-01 + 4.4717541337013245e-01 + <_> + + 1 0 1080 8.2117747515439987e-03 -1 -2 1081 + -4.5564480125904083e-02 + + 6.1796981096267700e-01 2.2854949533939362e-01 + 5.2495658397674561e-01 + <_> + + 0 1 1082 -5.3631910122931004e-03 -1 -2 1083 + -1.2274970300495625e-02 + + 1.7849500477313995e-01 7.2619527578353882e-01 + 4.5503988862037659e-01 + <_> + + 0 1 1084 5.4185991175472736e-03 -1 -2 1085 + 8.1846961984410882e-04 + + 5.2529907226562500e-01 5.4452222585678101e-01 + 3.2722181081771851e-01 + <_> + + 1 0 1086 4.1358140297234058e-03 -1 -2 1087 + 3.9578010910190642e-04 + + 7.0138317346572876e-01 4.9659439921379089e-01 + 3.2955980300903320e-01 + <_> + + 0 1 1088 4.6887691132724285e-03 -1 -2 1089 + -1.8255440518260002e-02 + + 5.3626418113708496e-01 6.4961087703704834e-01 + 4.7571370005607605e-01 + <_> + + 0 1 1090 -6.2736468389630318e-03 -1 -2 1091 + 2.4320168886333704e-03 + + 2.3437410593032837e-01 4.6201181411743164e-01 + 6.8984192609786987e-01 + <_> + + 0 1 1092 -4.9617629498243332e-02 -1 -2 1093 + 1.1701210169121623e-03 + + 2.1007199585437775e-01 4.6215289831161499e-01 + 5.7971358299255371e-01 + <_> + + 0 1 1094 -4.5237291604280472e-02 -1 -2 1095 + 4.7563421539962292e-03 + + 2.1182620525360107e-01 4.8846149444580078e-01 + 6.8724989891052246e-01 + <_> + + 1 0 1096 -1.4835969544947147e-02 -1 -2 1097 + 7.7436608262360096e-04 + + 5.2751058340072632e-01 4.1723209619522095e-01 + 5.4911398887634277e-01 + <_> + + 1 0 1098 1.4835969544947147e-02 -1 -2 1099 + -8.0892542609944940e-04 + + 2.1248769760131836e-01 5.4952150583267212e-01 + 4.2077958583831787e-01 + <_> + + 0 1 1100 7.7517668250948191e-04 -1 -2 1101 + -6.7618978209793568e-03 + + 3.3219420909881592e-01 2.2129580378532410e-01 + 5.2326530218124390e-01 + <_> + + 0 1 1102 -4.0135860443115234e-02 -1 -2 1103 + -3.3651469275355339e-03 + + 1.1017960309982300e-01 3.8101008534431458e-01 + 5.6172919273376465e-01 + <_> + + 1 0 1104 7.4713007779791951e-04 -1 -2 1105 + -4.2727389372885227e-03 + + 5.7950568199157715e-01 6.3922691345214844e-01 + 4.7114381194114685e-01 + <_> + + 1 0 1106 3.6202510818839073e-03 -1 -2 1107 + 4.7307618660852313e-04 + + 3.4098839759826660e-01 3.6593028903007507e-01 + 5.3881710767745972e-01 + <_> + + 1 0 1108 3.3094909042119980e-02 -1 -2 1109 + -1.1544119566679001e-02 + + 7.1703857183456421e-01 6.3868182897567749e-01 + 4.6813040971755981e-01 + <_> + + 0 1 1110 -7.4234469793736935e-03 -1 -2 1111 + -4.2252950370311737e-03 + + 3.2637009024620056e-01 5.7678192853927612e-01 + 4.3464180827140808e-01 + <_> + + 0 1 1112 1.8133109435439110e-02 -1 -2 1113 + 7.0903049781918526e-03 + + 4.6978279948234558e-01 4.4373890757560730e-01 + 6.0616689920425415e-01 + <_> + + 0 1 1114 -1.3272940181195736e-02 -1 -2 1115 + 1.4632199599873275e-04 + + 6.5585112571716309e-01 3.3763539791107178e-01 + 5.0916552543640137e-01 + <_> + + 0 1 1116 -3.5790191031992435e-03 -1 -2 1117 + -4.6997101162560284e-04 + + 2.9478839039802551e-01 5.5569821596145630e-01 + 4.6654561161994934e-01 + <_> + + 0 1 1118 -4.8179440200328827e-02 -1 -2 1119 + -9.2581362696364522e-04 + + 7.3383557796478271e-01 3.5438719391822815e-01 + 5.2851498126983643e-01 + <_> + + 0 1 1120 -1.4780730009078979e-02 -1 -2 1121 + -1.0027450323104858e-01 + + 1.9444419443607330e-01 9.9049292504787445e-02 + 5.1398539543151855e-01 + <_> + + 0 1 1122 -9.3848101096227765e-04 -1 -2 1123 + -2.8861360624432564e-03 + + 5.8271098136901855e-01 3.4414279460906982e-01 + 5.1488387584686279e-01 + <_> + + 1 0 1124 -4.3682761490345001e-02 -1 -2 1125 + 2.6115700602531433e-03 + + 5.2079981565475464e-01 4.8355031013488770e-01 + 6.3222199678421021e-01 + <_> + + 1 0 1126 4.3682761490345001e-02 -1 -2 1127 + 1.7179530113935471e-03 + + 1.3645380735397339e-01 4.5373201370239258e-01 + 6.0667508840560913e-01 + <_> + + 1 0 1128 -3.3964909613132477e-02 -1 -2 1129 + -1.0993590112775564e-03 + + 4.9683749675750732e-01 5.8316808938980103e-01 + 4.6882399916648865e-01 + <_> + + 1 0 1130 5.4301079362630844e-02 -1 -2 1131 + 1.0993590112775564e-03 + + 7.5682890415191650e-01 4.3301481008529663e-01 + 5.7684689760208130e-01 + <_> + + 1 0 1132 -1.4954120160837192e-05 -1 -2 1133 + 3.1415868550539017e-02 + + 4.4432818889617920e-01 5.2744728326797485e-01 + 3.0378559231758118e-01 + <_> + + 1 0 1134 1.0831849649548531e-02 -1 -2 1135 + 8.6545711383223534e-04 + + 3.5817208886146545e-01 5.9375840425491333e-01 + 4.2946299910545349e-01 + <_> + + 1 0 1136 2.2743160370737314e-03 -1 -2 1137 + 3.9340821094810963e-03 + + 5.9545767307281494e-01 4.7922229766845703e-01 + 5.8561331033706665e-01 + <_> + + 1 0 1138 8.1451907753944397e-03 -1 -2 1139 + -5.2763288840651512e-03 + + 3.5734778642654419e-01 4.0260228514671326e-01 + 5.7647430896759033e-01 + <_> + + 1 0 1140 -8.3787851035594940e-03 -1 -2 1141 + 1.5621910570189357e-03 + + 4.9813330173492432e-01 4.7365880012512207e-01 + 5.5836081504821777e-01 + <_> + + 1 0 1142 3.2318739686161280e-03 -1 -2 1143 + 6.6804019734263420e-03 + + 6.1674368381500244e-01 4.1314241290092468e-01 + 6.2806951999664307e-01 + <_> + + 0 1 1144 -3.3396480139344931e-03 -1 -2 1145 + -2.0933480560779572e-01 + + 3.4463581442832947e-01 1.0386580228805542e-01 + 5.2044892311096191e-01 + <_> + + 1 0 1146 6.3805822283029556e-03 -1 -2 1147 + -6.0137799009680748e-03 + + 2.1674020588397980e-01 6.7383992671966553e-01 + 4.8966509103775024e-01 + <_> + + 1 0 1148 -8.1756077706813812e-03 -1 -2 1149 + 6.3951779156923294e-04 + + 5.1779150962829590e-01 4.8196458816528320e-01 + 5.4644381999969482e-01 + <_> + + 1 0 1150 1.0127760469913483e-03 -1 -2 1151 + 4.9784599104896188e-04 + + 3.4235960245132446e-01 4.4884610176086426e-01 + 5.9126710891723633e-01 + <_> + + 1 0 1152 1.3596490316558629e-04 -1 -2 1153 + 1.3571660034358501e-02 + + 5.5688631534576416e-01 5.1610678434371948e-01 + 1.7130009829998016e-01 + <_> + + 1 0 1154 3.0259079721872695e-05 -1 -2 1155 + -3.2625840976834297e-03 + + 4.9162039160728455e-01 6.4046627283096313e-01 + 2.8590849041938782e-01 + <_> + + 1 0 1156 -1.9217010412830859e-04 -1 -2 1157 + 2.1993879228830338e-02 + + 5.4592829942703247e-01 4.7157138586044312e-01 + 5.6900751590728760e-01 + <_> + + 1 0 1158 7.8907777788117528e-04 -1 -2 1159 + 5.0893891602754593e-04 + + 3.2798269391059875e-01 4.3020078539848328e-01 + 5.6960451602935791e-01 + <_> + + 1 0 1160 1.1662710312521085e-04 -1 -2 1161 + 8.0604078248143196e-03 + + 5.3872352838516235e-01 5.0214231014251709e-01 + 5.9653222560882568e-01 + <_> + + 1 0 1162 9.5925969071686268e-04 -1 -2 1163 + -1.9526129588484764e-02 + + 3.4734940528869629e-01 6.4755451679229736e-01 + 4.6437820792198181e-01 + <_> + 78 + 3.8236038208007812e+01 + + <_> + + 0 1 1164 4.1242439299821854e-02 -1 -2 1165 + 1.5626709908246994e-02 + + 3.3933150768280029e-01 5.1041001081466675e-01 + 7.7728152275085449e-01 + <_> + + 0 1 1166 2.9947189614176750e-04 -1 -2 1167 + -1.0037609608843923e-03 + + 3.6646738648414612e-01 5.4056507349014282e-01 + 3.9262050390243530e-01 + <_> + + 0 1 1168 6.8128242855891585e-04 -1 -2 1169 + 1.3098999625071883e-04 + + 4.2515191435813904e-01 4.1351449489593506e-01 + 6.9257462024688721e-01 + <_> + + 1 0 1170 3.1696720980107784e-03 -1 -2 1171 + -2.0587369799613953e-03 + + 3.4558731317520142e-01 2.2341939806938171e-01 + 5.2861189842224121e-01 + <_> + + 1 0 1172 -4.6395038953050971e-04 -1 -2 1173 + 3.5089480224996805e-03 + + 4.2065200209617615e-01 6.5029817819595337e-01 + 4.1175979375839233e-01 + <_> + + 1 0 1174 -2.3975980002433062e-03 -1 -2 1175 + 1.0901279747486115e-03 + + 3.6733010411262512e-01 2.9062381386756897e-01 + 5.4451119899749756e-01 + <_> + + 0 1 1176 -1.6524370585102588e-04 -1 -2 1177 + -4.1602319106459618e-04 + + 4.2335158586502075e-01 3.8863611221313477e-01 + 6.2691658735275269e-01 + <_> + + 0 1 1178 -2.3739910102449358e-04 -1 -2 1179 + 2.4739760905504227e-02 + + 5.5244511365890503e-01 4.9600958824157715e-01 + 5.3734910488128662e-01 + <_> + + 0 1 1180 -1.5342839993536472e-02 -1 -2 1181 + 1.1540469713509083e-02 + + 6.8494051694869995e-01 4.0372350811958313e-01 + 6.7869400978088379e-01 + <_> + + 1 0 1182 6.4230621792376041e-03 -1 -2 1183 + 1.2977809645235538e-02 + + 3.8146761059761047e-01 5.5270588397979736e-01 + 3.7449559569358826e-01 + <_> + + 0 1 1184 1.1063399724662304e-03 -1 -2 1185 + 1.3743690215051174e-03 + + 3.5209289193153381e-01 5.6419032812118530e-01 + 3.0750259757041931e-01 + <_> + + 0 1 1186 1.6233779489994049e-02 -1 -2 1187 + -8.1519351806491613e-04 + + 4.8888280987739563e-01 5.4563212394714355e-01 + 4.7435501217842102e-01 + <_> + + 0 1 1188 -9.0782493352890015e-02 -1 -2 1189 + 1.1665210127830505e-02 + + 2.9252481460571289e-01 4.6884548664093018e-01 + 6.2303477525711060e-01 + <_> + + 0 1 1190 -2.3286409676074982e-02 -1 -2 1191 + 2.1559339947998524e-03 + + 6.8958431482315063e-01 5.3558021783828735e-01 + 3.4234660863876343e-01 + <_> + + 0 1 1192 -4.3167220428586006e-03 -1 -2 1193 + 1.5610599657520652e-03 + + 5.9370762109756470e-01 4.7086599469184875e-01 + 2.7369970083236694e-01 + <_> + + 0 1 1194 1.4076639898121357e-02 -1 -2 1195 + 7.1018589660525322e-03 + + 5.2871561050415039e-01 5.3361928462982178e-01 + 3.2248139381408691e-01 + <_> + + 0 1 1196 -4.8221647739410400e-03 -1 -2 1197 + -5.3852899000048637e-03 + + 2.9839101433753967e-01 5.6239992380142212e-01 + 4.2959120869636536e-01 + <_> + + 1 0 1198 7.3483278974890709e-03 -1 -2 1199 + -3.5707519855350256e-03 + + 6.8139612674713135e-01 5.8579689264297485e-01 + 4.6034291386604309e-01 + <_> + + 1 0 1200 2.3340100888162851e-03 -1 -2 1201 + 4.7432780265808105e-03 + + 2.7448511123657227e-01 5.0475269556045532e-01 + 2.3627419769763947e-01 + <_> + + 0 1 1202 6.5055489540100098e-03 -1 -2 1203 + 1.2589249759912491e-02 + + 5.2422481775283813e-01 4.8236909508705139e-01 + 6.7525368928909302e-01 + <_> + + 0 1 1204 -6.3358368352055550e-03 -1 -2 1205 + -5.7639651931822300e-03 + + 1.7346349358558655e-01 6.3543808460235596e-01 + 4.5874750614166260e-01 + <_> + + 0 1 1206 1.3599749654531479e-03 -1 -2 1207 + 2.8404260054230690e-02 + + 4.5803809165954590e-01 5.1763808727264404e-01 + 1.2043850123882294e-01 + <_> + + 0 1 1208 -9.2958156019449234e-03 -1 -2 1209 + -1.1800320353358984e-03 + + 2.3379570245742798e-01 3.9028140902519226e-01 + 5.6529301404953003e-01 + <_> + + 0 1 1210 -2.0948140881955624e-03 -1 -2 1211 + 4.1679958812892437e-03 + + 5.5120289325714111e-01 5.4559761285781860e-01 + 4.7989490628242493e-01 + <_> + + 1 0 1212 5.4458891972899437e-03 -1 -2 1213 + -1.2766510481014848e-03 + + 6.1270868778228760e-01 5.3171318769454956e-01 + 3.8509321212768555e-01 + <_> + + 0 1 1214 5.9404270723462105e-04 -1 -2 1215 + 4.2309608310461044e-02 + + 5.4464370012283325e-01 5.2346438169479370e-01 + 2.2130440175533295e-01 + <_> + + 0 1 1216 5.6189671158790588e-03 -1 -2 1217 + 7.2401198558509350e-03 + + 4.9161979556083679e-01 1.4714759588241577e-01 + 4.8528939485549927e-01 + <_> + + 0 1 1218 -4.5610670931637287e-03 -1 -2 1219 + 4.5506159949582070e-05 + + 2.7737739682197571e-01 4.6264618635177612e-01 + 5.7680791616439819e-01 + <_> + + 0 1 1220 -6.1903791502118111e-03 -1 -2 1221 + 8.1186462193727493e-04 + + 1.6442899405956268e-01 4.7785910964012146e-01 + 6.2618649005889893e-01 + <_> + + 0 1 1222 1.3779809698462486e-02 -1 -2 1223 + 1.1290319962427020e-03 + + 5.2573078870773315e-01 5.4980480670928955e-01 + 3.9831069111824036e-01 + <_> + + 0 1 1224 -1.0610350000206381e-04 -1 -2 1225 + 1.6695790691301227e-04 + + 4.0335190296173096e-01 4.1493400931358337e-01 + 5.7953411340713501e-01 + <_> + + 1 0 1226 1.1290319962427020e-03 -1 -2 1227 + -1.2019349634647369e-01 + + 3.9341148734092712e-01 7.3400482535362244e-02 + 5.2025860548019409e-01 + <_> + + 0 1 1228 -1.5230740420520306e-02 -1 -2 1229 + 3.5759829916059971e-03 + + 3.7495058774948120e-01 5.0781500339508057e-01 + 6.6060662269592285e-01 + <_> + + 0 1 1230 1.3479460030794144e-02 -1 -2 1231 + -2.1162950433790684e-03 + + 4.5477110147476196e-01 3.3110061287879944e-01 + 5.3842592239379883e-01 + <_> + + 0 1 1232 -1.7877709120512009e-02 -1 -2 1233 + 1.0931970318779349e-03 + + 6.5132528543472290e-01 5.2647650241851807e-01 + 3.4569910168647766e-01 + <_> + + 0 1 1234 -3.0553159303963184e-03 -1 -2 1235 + 3.6365049891173840e-03 + + 6.2686139345169067e-01 5.3992128372192383e-01 + 4.3453970551490784e-01 + <_> + + 0 1 1236 9.7896481747739017e-05 -1 -2 1237 + -3.2714448752813041e-04 + + 3.8356059789657593e-01 3.3376678824424744e-01 + 5.5391657352447510e-01 + <_> + + 1 0 1238 4.3425030889920890e-04 -1 -2 1239 + 1.4005579985678196e-02 + + 5.7882702350616455e-01 5.2750778198242188e-01 + 2.7011251449584961e-01 + <_> + + 0 1 1240 -9.2654931358993053e-04 -1 -2 1241 + 3.9504268206655979e-03 + + 5.8522802591323853e-01 4.7283369302749634e-01 + 3.3139181137084961e-01 + <_> + + 1 0 1242 -5.8086868375539780e-04 -1 -2 1243 + -1.2018020264804363e-02 + + 4.2588108777999878e-01 5.6097871065139771e-01 + 4.8951920866966248e-01 + <_> + + 0 1 1244 -1.4521540701389313e-01 -1 -2 1245 + -6.6049019806087017e-03 + + 4.3894480913877487e-02 4.2291709780693054e-01 + 5.6162929534912109e-01 + <_> + + 1 0 1246 -3.4909751266241074e-02 -1 -2 1247 + 3.7478420417755842e-03 + + 4.7881281375885010e-01 4.8002821207046509e-01 + 5.8013892173767090e-01 + <_> + + 1 0 1248 3.3038031309843063e-02 -1 -2 1249 + 3.6872599739581347e-03 + + 7.0781761407852173e-01 4.4496241211891174e-01 + 5.9577310085296631e-01 + <_> + + 0 1 1250 -4.5311939902603626e-03 -1 -2 1251 + 4.1058510541915894e-03 + + 4.1770470142364502e-01 5.3729480504989624e-01 + 3.7369269132614136e-01 + <_> + + 0 1 1252 -8.7599847465753555e-03 -1 -2 1253 + -2.3003309965133667e-02 + + 6.6588079929351807e-01 2.6479220390319824e-01 + 5.1018178462982178e-01 + <_> + + 0 1 1254 5.3664818406105042e-03 -1 -2 1255 + 3.8971770554780960e-02 + + 4.5486348867416382e-01 5.1570618152618408e-01 + 3.4364390373229980e-01 + <_> + + 0 1 1256 -2.7767190709710121e-02 -1 -2 1257 + -9.8894089460372925e-03 + + 2.3543910682201385e-01 6.8877410888671875e-01 + 5.1110517978668213e-01 + <_> + + 0 1 1258 -3.2073140610009432e-03 -1 -2 1259 + -6.7484978353604674e-04 + + 5.4388678073883057e-01 5.4511487483978271e-01 + 4.8313531279563904e-01 + <_> + + 0 1 1260 -5.1947520114481449e-03 -1 -2 1261 + -2.6169899501837790e-04 + + 2.1134190261363983e-01 5.2736818790435791e-01 + 3.9925870299339294e-01 + <_> + + 0 1 1262 2.2421479225158691e-03 -1 -2 1263 + -1.2139769969508052e-03 + + 4.6882608532905579e-01 5.5042350292205811e-01 + 4.3848711252212524e-01 + <_> + + 0 1 1264 -2.9469770379364491e-03 -1 -2 1265 + -3.9291830034926534e-04 + + 3.8928470015525818e-01 6.0017228126525879e-01 + 4.5616629719734192e-01 + <_> + + 1 0 1266 6.2550729513168335e-01 -1 -2 1267 + 9.7744520753622055e-03 + + 6.8125613033771515e-02 4.8130258917808533e-01 + 5.6206572055816650e-01 + <_> + + 1 0 1268 9.4378247857093811e-02 -1 -2 1269 + -1.9560910295695066e-03 + + 6.6632293164730072e-02 3.5882329940795898e-01 + 5.2954071760177612e-01 + <_> + + 0 1 1270 9.0652769431471825e-03 -1 -2 1271 + 4.2138071148656309e-04 + + 4.8226881027221680e-01 4.6703329682350159e-01 + 5.6831127405166626e-01 + <_> + + 1 0 1272 -4.4220191193744540e-04 -1 -2 1273 + -4.7313501127064228e-03 + + 5.3607952594757080e-01 6.1372458934783936e-01 + 3.1880891323089600e-01 + <_> + + 0 1 1274 1.5395509544759989e-03 -1 -2 1275 + 2.4315000046044588e-03 + + 4.4877201318740845e-01 4.8941668868064880e-01 + 6.7166537046432495e-01 + <_> + + 0 1 1276 -1.5581619925796986e-02 -1 -2 1277 + 1.0816920548677444e-03 + + 3.3367419242858887e-01 4.7182199358940125e-01 + 5.9606271982192993e-01 + <_> + + 0 1 1278 -2.2197659127414227e-03 -1 -2 1279 + -9.3048671260476112e-04 + + 3.5885548591613770e-01 6.2187129259109497e-01 + 4.8173001408576965e-01 + <_> + + 0 1 1280 -4.7418707981705666e-03 -1 -2 1281 + -6.2950369901955128e-03 + + 2.5500270724296570e-01 6.7280787229537964e-01 + 5.0510638952255249e-01 + <_> + + 0 1 1282 3.5216049291193485e-03 -1 -2 1283 + -2.4289379362016916e-03 + + 5.4019099473953247e-01 5.4194617271423340e-01 + 4.3471428751945496e-01 + <_> + + 0 1 1284 -2.5261470582336187e-03 -1 -2 1285 + -1.4817339833825827e-03 + + 6.9706249237060547e-01 3.2634168863296509e-01 + 4.9178731441497803e-01 + <_> + + 0 1 1286 -2.2474530339241028e-01 -1 -2 1287 + 2.8342509176582098e-03 + + 7.2937291115522385e-03 4.5792299509048462e-01 + 5.3798812627792358e-01 + <_> + + 0 1 1288 -2.0821610465645790e-02 -1 -2 1289 + 1.4896340144332498e-04 + + 6.0240888595581055e-01 3.3361440896987915e-01 + 4.9628159403800964e-01 + <_> + + 0 1 1290 -3.3524499740451574e-03 -1 -2 1291 + -3.7279881536960602e-02 + + 3.5587510466575623e-01 1.6985629498958588e-01 + 5.2089858055114746e-01 + <_> + + 1 0 1292 1.3896770542487502e-04 -1 -2 1293 + -3.1912620761431754e-04 + + 5.5906862020492554e-01 5.8487337827682495e-01 + 3.7958368659019470e-01 + <_> + + 1 0 1294 5.4003461264073849e-04 -1 -2 1295 + 3.8956850767135620e-03 + + 5.6702882051467896e-01 5.1826947927474976e-01 + 3.3277091383934021e-01 + <_> + + 1 0 1296 1.6084529925137758e-03 -1 -2 1297 + -5.7474587811157107e-04 + + 5.4104858636856079e-01 6.0226422548294067e-01 + 3.6446440219879150e-01 + <_> + + 1 0 1298 1.3435039669275284e-02 -1 -2 1299 + 2.1368139423429966e-03 + + 3.4412819147109985e-01 5.2924340963363647e-01 + 2.7470758557319641e-01 + <_> + + 1 0 1300 1.4157629571855068e-02 -1 -2 1301 + 5.3884391672909260e-03 + + 8.0278682708740234e-01 5.2223151922225952e-01 + 3.5867279767990112e-01 + <_> + + 0 1 1302 8.8013410568237305e-03 -1 -2 1303 + 3.8858849438838661e-04 + + 4.9003869295120239e-01 4.6810561418533325e-01 + 5.7219529151916504e-01 + <_> + + 0 1 1304 -2.2143588867038488e-03 -1 -2 1305 + -8.4642972797155380e-03 + + 5.3888058662414551e-01 6.6755378246307373e-01 + 3.4484419226646423e-01 + <_> + + 1 0 1306 1.5044390223920345e-02 -1 -2 1307 + 7.6346402056515217e-03 + + 9.2396140098571777e-01 4.8848968744277954e-01 + 6.3060528039932251e-01 + <_> + + 1 0 1308 3.3895121305249631e-04 -1 -2 1309 + 2.1157610171940178e-04 + + 3.9974310994148254e-01 5.6639820337295532e-01 + 3.9729809761047363e-01 + <_> + + 1 0 1310 -2.7514949440956116e-02 -1 -2 1311 + 5.1603060215711594e-02 + + 5.2010637521743774e-01 5.1407301425933838e-01 + 1.2451309710741043e-01 + <_> + + 1 0 1312 3.7510651163756847e-03 -1 -2 1313 + -2.1457639522850513e-03 + + 3.8020950555801392e-01 3.3094480633735657e-01 + 5.4745388031005859e-01 + <_> + + 1 0 1314 -5.8178009930998087e-04 -1 -2 1315 + -9.3638541875407100e-04 + + 4.8926019668579102e-01 5.9373992681503296e-01 + 4.6646690368652344e-01 + <_> + + 1 0 1316 4.1667491197586060e-02 -1 -2 1317 + -6.7763780243694782e-03 + + 7.0213532447814941e-01 3.2227510213851929e-01 + 5.0683951377868652e-01 + <_> + + 1 0 1318 -2.9170580673962831e-03 -1 -2 1319 + 3.2789530814625323e-04 + + 4.7177010774612427e-01 4.5093831419944763e-01 + 5.6511628627777100e-01 + <_> + 91 + 4.4682968139648438e+01 + + <_> + + 0 1 1320 1.1729800142347813e-02 -1 -2 1321 + 1.1712179984897375e-03 + + 3.8052248954772949e-01 3.1400179862976074e-01 + 6.8581461906433105e-01 + <_> + + 1 0 1322 9.3555096536874771e-03 -1 -2 1323 + 1.6570610459893942e-03 + + 6.8346732854843140e-01 2.9924729466438293e-01 + 5.4756778478622437e-01 + <_> + + 1 0 1324 -1.3387809740379453e-03 -1 -2 1325 + 1.7580550047568977e-04 + + 2.9414069652557373e-01 3.8969779014587402e-01 + 5.8729708194732666e-01 + <_> + + 0 1 1326 -2.9473248869180679e-03 -1 -2 1327 + 8.3220899105072021e-03 + + 3.5765719413757324e-01 5.2324008941650391e-01 + 3.2310879230499268e-01 + <_> + + 1 0 1328 7.4366689659655094e-03 -1 -2 1329 + -2.1322889369912446e-04 + + 6.7156732082366943e-01 5.4705417156219482e-01 + 3.8633960485458374e-01 + <_> + + 0 1 1330 -7.8024631366133690e-03 -1 -2 1331 + 5.6611228501424193e-04 + + 2.7714601159095764e-01 4.6891361474990845e-01 + 5.8519637584686279e-01 + <_> + + 0 1 1332 -9.2346500605344772e-03 -1 -2 1333 + -1.4676499631605111e-05 + + 2.7043971419334412e-01 5.6225502490997314e-01 + 3.5793170332908630e-01 + <_> + + 0 1 1334 9.7007937729358673e-03 -1 -2 1335 + -3.5320650786161423e-03 + + 4.1738718748092651e-01 4.1950130462646484e-01 + 5.5494689941406250e-01 + <_> + + 1 0 1336 2.1616410464048386e-02 -1 -2 1337 + 3.4567608963698149e-03 + + 2.8573909401893616e-01 6.0245329141616821e-01 + 4.3775078654289246e-01 + <_> + + 0 1 1338 2.2914320230484009e-02 -1 -2 1339 + 3.4328910987824202e-03 + + 4.6893501281738281e-01 4.6646049618721008e-01 + 5.7625621557235718e-01 + <_> + + 0 1 1340 -8.6510833352804184e-03 -1 -2 1341 + 1.4510039472952485e-03 + + 6.3817399740219116e-01 3.7114879488945007e-01 + 5.5307507514953613e-01 + <_> + + 0 1 1342 7.8191719949245453e-03 -1 -2 1343 + 2.0798550394829363e-04 + + 5.2643620967864990e-01 3.7305128574371338e-01 + 5.4457312822341919e-01 + <_> + + 0 1 1344 -3.9962218143045902e-03 -1 -2 1345 + -1.5010139577498194e-05 + + 2.4381700158119202e-01 5.3246712684631348e-01 + 3.6829888820648193e-01 + <_> + + 0 1 1346 -4.2428788729012012e-03 -1 -2 1347 + 9.1374982148408890e-03 + + 6.4814740419387817e-01 4.8961588740348816e-01 + 6.5588432550430298e-01 + <_> + + 1 0 1348 8.8254585862159729e-03 -1 -2 1349 + 9.4092212384566665e-04 + + 3.6138701438903809e-01 5.5028957128524780e-01 + 3.6325180530548096e-01 + <_> + + 0 1 1350 -1.2503350153565407e-02 -1 -2 1351 + 8.6759645491838455e-03 + + 2.2611320018768311e-01 4.9878901243209839e-01 + 6.8471962213516235e-01 + <_> + + 0 1 1352 -1.0416760109364986e-02 -1 -2 1353 + 2.7432460337877274e-03 + + 2.4462990462779999e-01 3.5115250945091248e-01 + 5.3998267650604248e-01 + <_> + + 0 1 1354 -4.2385691776871681e-03 -1 -2 1355 + 1.8325870856642723e-02 + + 6.8236732482910156e-01 4.8915800452232361e-01 + 7.1356189250946045e-01 + <_> + + 0 1 1356 -2.4334540590643883e-02 -1 -2 1357 + 4.6469361404888332e-04 + + 3.5225218534469604e-01 4.0498688817024231e-01 + 5.5158257484436035e-01 + <_> + + 1 0 1358 3.4260009415447712e-03 -1 -2 1359 + -2.5827318895608187e-03 + + 4.1267699003219604e-01 2.8994289040565491e-01 + 5.3864318132400513e-01 + <_> + + 1 0 1360 1.0545699624344707e-03 -1 -2 1361 + -9.1257691383361816e-04 + + 3.7713441252708435e-01 5.8273869752883911e-01 + 4.2675569653511047e-01 + <_> + + 0 1 1362 2.6589010376483202e-03 -1 -2 1363 + 4.8598358407616615e-03 + + 4.6881249547004700e-01 4.8539221286773682e-01 + 6.1636447906494141e-01 + <_> + + 1 0 1364 8.0638676881790161e-03 -1 -2 1365 + -7.5898370705544949e-03 + + 1.7491950094699860e-01 6.8261897563934326e-01 + 4.8940700292587280e-01 + <_> + + 0 1 1366 3.6368070868775249e-04 -1 -2 1367 + 6.2594950199127197e-02 + + 4.6145960688591003e-01 5.1830172538757324e-01 + 2.6866960525512695e-01 + <_> + + 0 1 1368 -4.9753207713365555e-03 -1 -2 1369 + -2.0880119409412146e-03 + + 1.7584669589996338e-01 6.3693821430206299e-01 + 4.9300441145896912e-01 + <_> + + 1 0 1370 9.5644511748105288e-04 -1 -2 1371 + -3.1721461564302444e-02 + + 4.1393989324569702e-01 6.0455572605133057e-01 + 4.8163640499114990e-01 + <_> + + 0 1 1372 1.2898689601570368e-03 -1 -2 1373 + 9.8405163735151291e-03 + + 5.4508107900619507e-01 2.9240009188652039e-01 + 6.6996061801910400e-01 + <_> + + 1 0 1374 1.2237089686095715e-03 -1 -2 1375 + -8.4232585504651070e-03 + + 6.2828367948532104e-01 5.9865701198577881e-01 + 4.8525801301002502e-01 + <_> + + 0 1 1376 -7.2726322105154395e-04 -1 -2 1377 + 4.6842931769788265e-03 + + 3.3400490880012512e-01 5.1689237356185913e-01 + 2.6794800162315369e-01 + <_> + + 0 1 1378 -1.0379579616710544e-03 -1 -2 1379 + 9.1342730447649956e-03 + + 5.9257918596267700e-01 5.4377281665802002e-01 + 4.3468001484870911e-01 + <_> + + 0 1 1380 1.4971119817346334e-03 -1 -2 1381 + 1.5762320253998041e-03 + + 4.1295009851455688e-01 4.5228740572929382e-01 + 6.5562921762466431e-01 + <_> + + 0 1 1382 8.7496247142553329e-03 -1 -2 1383 + -8.5103599121794105e-04 + + 4.5320340991020203e-01 3.7859839200973511e-01 + 5.4169750213623047e-01 + <_> + + 0 1 1384 -1.7325570806860924e-02 -1 -2 1385 + -8.3266440778970718e-03 + + 6.8842482566833496e-01 3.0913260579109192e-01 + 5.2436548471450806e-01 + <_> + + 0 1 1386 1.5157909729168750e-05 -1 -2 1387 + 1.8041470320895314e-03 + + 4.7657939791679382e-01 4.7253859043121338e-01 + 5.7165551185607910e-01 + <_> + + 1 0 1388 3.0691560823470354e-03 -1 -2 1389 + -5.2225510444259271e-05 + + 2.1433599293231964e-01 5.6532102823257446e-01 + 4.3851110339164734e-01 + <_> + + 1 0 1390 1.0072169970953837e-04 -1 -2 1391 + 1.3573700562119484e-04 + + 5.9247761964797974e-01 4.5734488964080811e-01 + 5.7693827152252197e-01 + <_> + + 1 0 1392 9.2137878527864814e-04 -1 -2 1393 + 3.0316581251099706e-04 + + 5.9926092624664307e-01 3.6100810766220093e-01 + 5.0493258237838745e-01 + <_> + + 1 0 1394 3.9582479745149612e-02 -1 -2 1395 + 4.7519680112600327e-02 + + 1.5384890139102936e-01 5.2161407470703125e-01 + 1.4283910393714905e-01 + <_> + + 1 0 1396 1.8871759995818138e-02 -1 -2 1397 + -3.9876459049992263e-04 + + 2.8255069255828857e-01 4.0350168943405151e-01 + 5.4377931356430054e-01 + <_> + + 0 1 1398 4.6556600136682391e-04 -1 -2 1399 + 6.7090610973536968e-03 + + 4.6689969301223755e-01 5.3313547372817993e-01 + 4.1365718841552734e-01 + <_> + + 0 1 1400 -1.8931160448119044e-03 -1 -2 1401 + -1.3056949712336063e-02 + + 7.1551632881164551e-01 3.1178998947143555e-01 + 5.2084398269653320e-01 + <_> + + 1 0 1402 -1.9484119547996670e-04 -1 -2 1403 + 1.5093220099515747e-05 + + 4.6376588940620422e-01 4.5616531372070312e-01 + 5.4452341794967651e-01 + <_> + + 1 0 1404 -7.1617960202274844e-06 -1 -2 1405 + 3.0164679628796875e-04 + + 4.1931080818176270e-01 5.9662377834320068e-01 + 4.1005000472068787e-01 + <_> + + 0 1 1406 4.4195181690156460e-03 -1 -2 1407 + -7.3984181508421898e-03 + + 4.8450559377670288e-01 6.2068462371826172e-01 + 4.9312090873718262e-01 + <_> + + 1 0 1408 -7.8031201846897602e-03 -1 -2 1409 + -1.0731429792940617e-02 + + 5.2824628353118896e-01 9.1048341989517212e-01 + 3.4559220075607300e-01 + <_> + + 0 1 1410 1.4246780192479491e-03 -1 -2 1411 + -8.2717568147927523e-05 + + 4.7085541486740112e-01 5.6516230106353760e-01 + 4.7310239076614380e-01 + <_> + + 1 0 1412 4.4803409837186337e-03 -1 -2 1413 + 3.0789140146225691e-03 + + 6.1758869886398315e-01 5.1395332813262939e-01 + 3.4230878949165344e-01 + <_> + + 1 0 1414 -1.1310289846733212e-03 -1 -2 1415 + -1.0410690447315574e-03 + + 4.9182820320129395e-01 5.9420871734619141e-01 + 4.9230429530143738e-01 + <_> + + 1 0 1416 1.1648540385067463e-03 -1 -2 1417 + 9.0057362103834748e-04 + + 6.4052718877792358e-01 4.5043969154357910e-01 + 6.1920768022537231e-01 + <_> + + 0 1 1418 6.8781538866460323e-03 -1 -2 1419 + -3.5283900797367096e-02 + + 5.3748130798339844e-01 2.2471010684967041e-01 + 5.2171707153320312e-01 + <_> + + 0 1 1420 -1.3320200378075242e-03 -1 -2 1421 + -2.3177571129053831e-03 + + 2.5547030568122864e-01 3.7925159931182861e-01 + 5.2432268857955933e-01 + <_> + + 0 1 1422 2.1332940377760679e-04 -1 -2 1423 + 1.3467900454998016e-02 + + 3.8603371381759644e-01 5.3806877136230469e-01 + 4.1783639788627625e-01 + <_> + + 0 1 1424 -1.2829169863834977e-03 -1 -2 1425 + 5.1571638323366642e-04 + + 6.1336231231689453e-01 4.0285378694534302e-01 + 5.5368518829345703e-01 + <_> + + 0 1 1426 3.9254198782145977e-03 -1 -2 1427 + -3.3780589699745178e-02 + + 5.2799212932586670e-01 2.3346750438213348e-01 + 5.1759117841720581e-01 + <_> + + 0 1 1428 -3.7853721529245377e-02 -1 -2 1429 + -4.0752900531515479e-04 + + 1.0748530179262161e-01 5.3459298610687256e-01 + 4.1989380121231079e-01 + <_> + + 0 1 1430 -3.1193809118121862e-03 -1 -2 1431 + -1.5714969485998154e-02 + + 3.8558250665664673e-01 3.3351901173591614e-01 + 5.2632021903991699e-01 + <_> + + 0 1 1432 -7.8525702701881528e-04 -1 -2 1433 + -2.8750501223839819e-04 + + 5.8603972196578979e-01 5.4377847909927368e-01 + 3.7161049246788025e-01 + <_> + + 1 0 1434 2.8016859665513039e-02 -1 -2 1435 + -1.9018839811906219e-03 + + 3.3307549357414246e-01 5.3665977716445923e-01 + 4.6937939524650574e-01 + <_> + + 1 0 1436 2.0647559314966202e-02 -1 -2 1437 + 4.3002571910619736e-03 + + 1.0069560259580612e-01 4.8160359263420105e-01 + 6.2156772613525391e-01 + <_> + + 0 1 1438 1.3459140434861183e-02 -1 -2 1439 + -1.0320040397346020e-02 + + 5.4619538784027100e-01 4.5784530043601990e-01 + 5.4193097352981567e-01 + <_> + + 1 0 1440 3.1990748643875122e-01 -1 -2 1441 + 9.2198798665776849e-04 + + 2.0080469548702240e-01 5.1932811737060547e-01 + 3.9121940732002258e-01 + <_> + + 0 1 1442 4.1852539288811386e-04 -1 -2 1443 + 3.5891108564101160e-04 + + 4.2997440695762634e-01 4.3445029854774475e-01 + 5.5319738388061523e-01 + <_> + + 0 1 1444 -2.0992439985275269e-01 -1 -2 1445 + -4.9328152090311050e-03 + + 1.0757210105657578e-01 5.7627969980239868e-01 + 4.5746439695358276e-01 + <_> + + 1 0 1446 2.3409130517393351e-03 -1 -2 1447 + 4.7120270319283009e-03 + + 7.4768078327178955e-01 5.2617651224136353e-01 + 4.5055508613586426e-01 + <_> + + 0 1 1448 2.8713190928101540e-02 -1 -2 1449 + -2.6156550738960505e-03 + + 4.4071030616760254e-01 4.2442709207534790e-01 + 6.8929767608642578e-01 + <_> + + 0 1 1450 -1.3558969832956791e-02 -1 -2 1451 + -3.0331799644045532e-04 + + 1.2522679567337036e-01 4.0777918696403503e-01 + 5.4428178071975708e-01 + <_> + + 0 1 1452 -5.5601762142032385e-04 -1 -2 1453 + 2.4025330785661936e-03 + + 5.3780037164688110e-01 3.1665799021720886e-01 + 5.2857381105422974e-01 + <_> + + 1 0 1454 -3.4089901018887758e-03 -1 -2 1455 + 8.0019602319225669e-04 + + 4.9052149057388306e-01 4.5227360725402832e-01 + 5.5806142091751099e-01 + <_> + + 1 0 1456 2.1901070140302181e-03 -1 -2 1457 + 3.3745369873940945e-03 + + 6.6126817464828491e-01 5.1077651977539062e-01 + 3.3869299292564392e-01 + <_> + + 1 0 1458 8.0019602319225669e-04 -1 -2 1459 + 1.7346069216728210e-02 + + 5.7075601816177368e-01 5.0160211324691772e-01 + 6.3064599037170410e-01 + <_> + + 0 1 1460 -1.9568449351936579e-03 -1 -2 1461 + -1.1229019612073898e-02 + + 3.0178061127662659e-01 6.2938511371612549e-01 + 4.5204889774322510e-01 + <_> + + 0 1 1462 -2.6608388870954514e-03 -1 -2 1463 + -1.1615100316703320e-02 + + 3.3440071344375610e-01 2.8253790736198425e-01 + 5.1509708166122437e-01 + <_> + + 0 1 1464 -9.5248602330684662e-02 -1 -2 1465 + 7.3701781220734119e-03 + + 1.3982650637626648e-01 5.2939987182617188e-01 + 2.3317280411720276e-01 + <_> + + 1 0 1466 -1.4953900128602982e-02 -1 -2 1467 + 5.7038792874664068e-04 + + 4.9404659867286682e-01 5.4665708541870117e-01 + 4.6267679333686829e-01 + <_> + + 1 0 1468 5.8516198769211769e-03 -1 -2 1469 + 2.1150549582671374e-04 + + 6.2700408697128296e-01 5.5081409215927124e-01 + 4.0618729591369629e-01 + <_> + + 1 0 1470 -6.9679190346505493e-06 -1 -2 1471 + -7.9677387839183211e-04 + + 4.0965679287910461e-01 5.6155568361282349e-01 + 4.6668860316276550e-01 + <_> + + 1 0 1472 1.9459480419754982e-02 -1 -2 1473 + -1.1160830035805702e-02 + + 2.3114809393882751e-01 3.0870118737220764e-01 + 5.5146622657775879e-01 + <_> + + 1 0 1474 1.4056149870157242e-02 -1 -2 1475 + -3.2958350493572652e-04 + + 7.0050561428070068e-01 5.7974857091903687e-01 + 4.6916508674621582e-01 + <_> + + 0 1 1476 -5.4636420682072639e-03 -1 -2 1477 + 5.8881669247057289e-05 + + 5.9285950660705566e-01 3.7413978576660156e-01 + 5.1701688766479492e-01 + <_> + + 0 1 1478 6.6343429498374462e-03 -1 -2 1479 + 4.5263409614562988e-02 + + 5.4149878025054932e-01 5.1803272962570190e-01 + 1.5296840667724609e-01 + <_> + + 0 1 1480 -8.0646127462387085e-03 -1 -2 1481 + 4.7389548853971064e-04 + + 2.5154680013656616e-01 5.1219987869262695e-01 + 3.7259489297866821e-01 + <_> + + 1 0 1482 1.4877359717502259e-05 -1 -2 1483 + 2.4321159347891808e-02 + + 5.5324357748031616e-01 4.9607661366462708e-01 + 5.9833151102066040e-01 + <_> + + 0 1 1484 6.9931396865285933e-05 -1 -2 1485 + 2.6287760119885206e-03 + + 4.1639530658721924e-01 5.8801448345184326e-01 + 3.3996629714965820e-01 + <_> + + 1 0 1486 3.8190539926290512e-03 -1 -2 1487 + -2.5989150628447533e-02 + + 7.8466212749481201e-01 3.2881140708923340e-01 + 5.1550877094268799e-01 + <_> + + 0 1 1488 1.2062400346621871e-03 -1 -2 1489 + -1.5557400183752179e-03 + + 4.5960599184036255e-01 3.1269869208335876e-01 + 7.1833992004394531e-01 + <_> + + 1 0 1490 -2.2691930644214153e-03 -1 -2 1491 + 2.3287249496206641e-04 + + 5.2740061283111572e-01 4.8786661028862000e-01 + 5.6151527166366577e-01 + <_> + + 1 0 1492 -5.5999699980020523e-03 -1 -2 1493 + -1.0496189817786217e-02 + + 5.1608121395111084e-01 5.7016140222549438e-01 + 3.2048508524894714e-01 + <_> + + 0 1 1494 -1.4814930182183161e-05 -1 -2 1495 + -6.4287078566849232e-04 + + 5.5388379096984863e-01 5.3494292497634888e-01 + 4.4721511006355286e-01 + <_> + + 0 1 1496 -1.8891949730459601e-04 -1 -2 1497 + -9.0413521975278854e-03 + + 5.0128370523452759e-01 2.5629359483718872e-01 + 4.5033830404281616e-01 + <_> + + 1 0 1498 7.9534705728292465e-03 -1 -2 1499 + -2.7908999472856522e-03 + + 2.6304998993873596e-01 5.7565087080001831e-01 + 4.8548638820648193e-01 + <_> + + 1 0 1500 3.2857100013643503e-03 -1 -2 1501 + 7.7063008211553097e-04 + + 4.0847519040107727e-01 4.0733560919761658e-01 + 5.9202408790588379e-01 + <_> + 97 + 4.7763450622558594e+01 + + <_> + + 0 1 1502 6.3021942973136902e-02 -1 -2 1503 + -2.8374609537422657e-03 + + 3.4193828701972961e-01 6.8295639753341675e-01 + 4.4045230746269226e-01 + <_> + + 0 1 1504 4.6461950987577438e-02 -1 -2 1505 + 2.9152540490031242e-02 + + 4.3917450308799744e-01 4.6010631322860718e-01 + 6.3579368591308594e-01 + <_> + + 1 0 1506 -1.4000290320836939e-05 -1 -2 1507 + -1.2757079675793648e-03 + + 3.7300100922584534e-01 3.0938240885734558e-01 + 5.9013700485229492e-01 + <_> + + 0 1 1508 1.3596529606729746e-03 -1 -2 1509 + 1.7991929780691862e-04 + + 4.3375650048255920e-01 4.2175039649009705e-01 + 5.8468478918075562e-01 + <_> + + 1 0 1510 -1.4166639630275313e-05 -1 -2 1511 + 6.0252390539972112e-05 + + 4.0846911072731018e-01 5.0872868299484253e-01 + 7.2771841287612915e-01 + <_> + + 1 0 1512 6.4320368692278862e-03 -1 -2 1513 + 4.6682319953106344e-04 + + 2.9679030179977417e-01 4.1104629635810852e-01 + 5.5812197923660278e-01 + <_> + + 0 1 1514 5.7436279021203518e-03 -1 -2 1515 + 3.2019240316003561e-03 + + 4.2873099446296692e-01 4.2661958932876587e-01 + 6.4440459012985229e-01 + <_> + + 1 0 1516 -5.7637941790744662e-04 -1 -2 1517 + -3.7901920732110739e-03 + + 4.0848249197006226e-01 3.1819209456443787e-01 + 5.2306932210922241e-01 + <_> + + 1 0 1518 4.8914109356701374e-03 -1 -2 1519 + 4.6459292061626911e-03 + + 3.5483568906784058e-01 5.6105977296829224e-01 + 2.6938489079475403e-01 + <_> + + 0 1 1520 -6.8799369037151337e-03 -1 -2 1521 + -1.8147470429539680e-02 + + 6.2354081869125366e-01 2.8619819879531860e-01 + 5.2268481254577637e-01 + <_> + + 1 0 1522 1.1409220314817503e-04 -1 -2 1523 + -5.4334272863343358e-04 + + 3.2578331232070923e-01 3.8829690217971802e-01 + 5.3411662578582764e-01 + <_> + + 0 1 1524 -2.7602489572018385e-03 -1 -2 1525 + -1.9730569329112768e-03 + + 6.3539659976959229e-01 5.8807611465454102e-01 + 4.5930901169776917e-01 + <_> + + 1 0 1526 2.4565239436924458e-03 -1 -2 1527 + 1.9392010290175676e-04 + + 3.1340101361274719e-01 5.2771317958831787e-01 + 3.6041069030761719e-01 + <_> + + 0 1 1528 7.8643016517162323e-02 -1 -2 1529 + 6.5276869572699070e-03 + + 5.2903419733047485e-01 4.6544799208641052e-01 + 6.0449051856994629e-01 + <_> + + 0 1 1530 -7.8716799616813660e-02 -1 -2 1531 + 5.7298499159514904e-03 + + 2.5411269068717957e-01 4.3669191002845764e-01 + 5.8228862285614014e-01 + <_> + + 1 0 1532 6.2386557692661881e-04 -1 -2 1533 + -8.5267230868339539e-02 + + 5.4726922512054443e-01 1.4616079628467560e-01 + 5.1818108558654785e-01 + <_> + + 1 0 1534 4.0981110185384750e-02 -1 -2 1535 + 7.7135749161243439e-03 + + 1.2701350450515747e-01 4.8326849937438965e-01 + 2.2235789895057678e-01 + <_> + + 0 1 1536 -6.8663940764963627e-03 -1 -2 1537 + 1.4559639617800713e-02 + + 5.9189289808273315e-01 4.7615069150924683e-01 + 5.7272237539291382e-01 + <_> + + 0 1 1538 -1.0064310394227505e-02 -1 -2 1539 + 3.6274080630391836e-03 + + 3.6367309093475342e-01 5.2717310190200806e-01 + 2.7405250072479248e-01 + <_> + + 0 1 1540 -2.3421540390700102e-03 -1 -2 1541 + -2.4686409160494804e-02 + + 5.4977840185165405e-01 6.0598951578140259e-01 + 4.9603140354156494e-01 + <_> + + 1 0 1542 1.9456120207905769e-04 -1 -2 1543 + 3.1714211218059063e-04 + + 3.7694650888442993e-01 4.0623620152473450e-01 + 5.6682151556015015e-01 + <_> + + 0 1 1544 2.0793990697711706e-03 -1 -2 1545 + 1.7982709687203169e-03 + + 4.6186569333076477e-01 4.8675051331520081e-01 + 6.5184497833251953e-01 + <_> + + 0 1 1546 -2.2287059982772917e-04 -1 -2 1547 + 3.2623921288177371e-04 + + 5.6775957345962524e-01 3.7107339501380920e-01 + 5.6766051054000854e-01 + <_> + + 0 1 1548 -6.6792681813240051e-02 -1 -2 1549 + -1.4869889710098505e-03 + + 2.5115218758583069e-01 3.8867509365081787e-01 + 5.2622538805007935e-01 + <_> + + 0 1 1550 -5.0454870797693729e-03 -1 -2 1551 + -4.8297587782144547e-03 + + 6.5574729442596436e-01 5.9341061115264893e-01 + 4.2859220504760742e-01 + <_> + + 1 0 1552 -1.0722599690780044e-03 -1 -2 1553 + 8.7901195511221886e-03 + + 5.4260587692260742e-01 5.3513032197952271e-01 + 4.8342779278755188e-01 + <_> + + 0 1 1554 -7.1750381030142307e-03 -1 -2 1555 + 1.1251230025663972e-03 + + 2.0671689510345459e-01 5.1122522354125977e-01 + 3.4687140583992004e-01 + <_> + + 0 1 1556 1.0634710080921650e-02 -1 -2 1557 + -1.1763219721615314e-02 + + 4.4790080189704895e-01 6.2539017200469971e-01 + 4.9689871072769165e-01 + <_> + + 1 0 1558 9.2324063181877136e-02 -1 -2 1559 + 1.8991080578416586e-03 + + 2.0313039422035217e-01 5.6187218427658081e-01 + 4.0465721487998962e-01 + <_> + + 1 0 1560 -1.0510340332984924e-02 -1 -2 1561 + -7.4531312566250563e-04 + + 4.9432641267776489e-01 5.6134277582168579e-01 + 3.8453319668769836e-01 + <_> + + 1 0 1562 8.0041000619530678e-03 -1 -2 1563 + 5.8110528625547886e-03 + + 7.7598422765731812e-01 4.6247330307960510e-01 + 6.2862771749496460e-01 + <_> + + 0 1 1564 -2.7918580919504166e-02 -1 -2 1565 + 2.1739399526268244e-03 + + 2.4093140661716461e-01 5.3455048799514771e-01 + 3.5079580545425415e-01 + <_> + + 0 1 1566 -4.0639587678015232e-03 -1 -2 1567 + 6.0017139185220003e-04 + + 6.6471010446548462e-01 4.9985098838806152e-01 + 3.0221650004386902e-01 + <_> + + 1 0 1568 1.9214770291000605e-03 -1 -2 1569 + -1.3860830105841160e-02 + + 5.9191507101058960e-01 6.3517677783966064e-01 + 4.9933108687400818e-01 + <_> + + 1 0 1570 2.3006850853562355e-02 -1 -2 1571 + -1.3857929734513164e-03 + + 1.9023360311985016e-01 5.2533692121505737e-01 + 3.9858600497245789e-01 + <_> + + 0 1 1572 1.2637410545721650e-03 -1 -2 1573 + -1.4675210230052471e-02 + + 4.6661040186882019e-01 3.8231649994850159e-01 + 5.3266328573226929e-01 + <_> + + 0 1 1574 -2.9535070061683655e-03 -1 -2 1575 + -1.7189770005643368e-03 + + 7.0636558532714844e-01 3.8134628534317017e-01 + 5.2467352151870728e-01 + <_> + + 1 0 1576 -4.2484089499339461e-04 -1 -2 1577 + -8.5248658433556557e-04 + + 4.7916388511657715e-01 4.4912180304527283e-01 + 5.3709012269973755e-01 + <_> + + 1 0 1578 8.9034568518400192e-03 -1 -2 1579 + 1.4895649655954912e-05 + + 2.0764739811420441e-01 4.4476351141929626e-01 + 5.6671631336212158e-01 + <_> + + 0 1 1580 -4.7091601300053298e-04 -1 -2 1581 + 4.3084810022264719e-04 + + 5.4650712013244629e-01 5.4932618141174316e-01 + 4.5807081460952759e-01 + <_> + + 0 1 1582 -6.3893961487337947e-04 -1 -2 1583 + -7.3733746830839664e-05 + + 5.5015718936920166e-01 5.0857907533645630e-01 + 3.3056980371475220e-01 + <_> + + 0 1 1584 -8.8991485536098480e-03 -1 -2 1585 + -1.0253350250422955e-02 + + 4.2764690518379211e-01 1.1232180148363113e-01 + 5.1527231931686401e-01 + <_> + + 0 1 1586 -5.9637490659952164e-02 -1 -2 1587 + 2.1707199513912201e-02 + + 7.3867720365524292e-01 4.9962919950485229e-01 + 1.3394139707088470e-01 + <_> + + 0 1 1588 9.9107045680284500e-03 -1 -2 1589 + -1.0998300276696682e-02 + + 4.6790120005607605e-01 6.9286561012268066e-01 + 5.0120681524276733e-01 + <_> + + 1 0 1590 7.4608891736716032e-04 -1 -2 1591 + 2.9539171373471618e-04 + + 5.8335822820663452e-01 3.8263911008834839e-01 + 5.5663508176803589e-01 + <_> + + 1 0 1592 5.0054129213094711e-02 -1 -2 1593 + -7.2330660186707973e-03 + + 3.0027210712432861e-01 5.9080427885055542e-01 + 5.0008708238601685e-01 + <_> + + 0 1 1594 -2.6863380335271358e-03 -1 -2 1595 + -1.0195849463343620e-03 + + 3.9750349521636963e-01 3.6976858973503113e-01 + 5.7561928033828735e-01 + <_> + + 0 1 1596 -2.0204920321702957e-02 -1 -2 1597 + 2.1340379025787115e-03 + + 6.3752681016921997e-01 5.3632658720016479e-01 + 4.4331708550453186e-01 + <_> + + 0 1 1598 -1.8348889425396919e-03 -1 -2 1599 + -5.9489468112587929e-03 + + 5.8289992809295654e-01 2.6806709170341492e-01 + 4.6428859233856201e-01 + <_> + + 0 1 1600 -2.3030120064504445e-04 -1 -2 1601 + 5.0581009127199650e-03 + + 5.4753202199935913e-01 5.3208339214324951e-01 + 4.6464928984642029e-01 + <_> + + 0 1 1602 -5.1950011402368546e-04 -1 -2 1603 + -6.8620947422459722e-04 + + 5.2327448129653931e-01 4.9350860714912415e-01 + 3.1031179428100586e-01 + <_> + + 0 1 1604 -7.4936267919838428e-03 -1 -2 1605 + -1.5682930126786232e-02 + + 2.8830468654632568e-01 3.6403131484985352e-01 + 5.3687548637390137e-01 + <_> + + 0 1 1606 -3.2649750355631113e-03 -1 -2 1607 + 3.8463930832222104e-04 + + 6.4686310291290283e-01 5.2596598863601685e-01 + 3.8314279913902283e-01 + <_> + + 1 0 1608 4.4492390006780624e-03 -1 -2 1609 + 2.3118320852518082e-02 + + 2.0868189632892609e-01 4.9785330891609192e-01 + 5.9612572193145752e-01 + <_> + + 1 0 1610 2.0835159812122583e-03 -1 -2 1611 + 1.1513150529935956e-03 + + 5.7464218139648438e-01 3.5868450999259949e-01 + 5.3634738922119141e-01 + <_> + + 1 0 1612 3.6104708909988403e-02 -1 -2 1613 + 3.6256198654882610e-04 + + 2.8331369161605835e-01 5.4777222871780396e-01 + 4.1105321049690247e-01 + <_> + + 0 1 1614 -3.4635469783097506e-03 -1 -2 1615 + -2.8796829283237457e-03 + + 5.9903860092163086e-01 5.7252532243728638e-01 + 4.1495120525360107e-01 + <_> + + 1 0 1616 -8.1119500100612640e-03 -1 -2 1617 + 4.5932079665362835e-03 + + 5.3963518142700195e-01 5.3797042369842529e-01 + 3.8913029432296753e-01 + <_> + + 1 0 1618 7.0014740340411663e-03 -1 -2 1619 + 8.0169539432972670e-04 + + 3.7146711349487305e-01 5.5295670032501221e-01 + 3.7558048963546753e-01 + <_> + + 1 0 1620 -8.6652329191565514e-03 -1 -2 1621 + -2.7315050829201937e-03 + + 5.0257730484008789e-01 5.8503222465515137e-01 + 4.6175739169120789e-01 + <_> + + 1 0 1622 1.3301590224727988e-03 -1 -2 1623 + -4.2648240923881531e-03 + + 5.9377008676528931e-01 5.6453680992126465e-01 + 3.9376249909400940e-01 + <_> + + 0 1 1624 6.3251499086618423e-03 -1 -2 1625 + -3.0753740575164557e-03 + + 5.1821058988571167e-01 3.0074161291122437e-01 + 5.1964038610458374e-01 + <_> + + 0 1 1626 -7.3622138006612659e-04 -1 -2 1627 + 3.0082479497650638e-05 + + 3.6975800991058350e-01 4.3275931477546692e-01 + 5.7158088684082031e-01 + <_> + + 0 1 1628 -3.8722730241715908e-03 -1 -2 1629 + 6.2879058532416821e-04 + + 3.4737130999565125e-01 5.4382592439651489e-01 + 4.4539061188697815e-01 + <_> + + 1 0 1630 1.3411579420790076e-03 -1 -2 1631 + -8.3681922405958176e-03 + + 6.5117138624191284e-01 1.4432950317859650e-01 + 4.8881998658180237e-01 + <_> + + 1 0 1632 9.3305751215666533e-04 -1 -2 1633 + -1.0746510233730078e-03 + + 3.9511090517044067e-01 3.9102658629417419e-01 + 5.3495037555694580e-01 + <_> + + 0 1 1634 -1.8610050901770592e-02 -1 -2 1635 + 1.3651419430971146e-03 + + 1.2757439911365509e-01 5.0382888317108154e-01 + 6.9513040781021118e-01 + <_> + + 0 1 1636 7.3744421824812889e-03 -1 -2 1637 + 8.4163323044776917e-03 + + 5.2534431219100952e-01 5.0112438201904297e-01 + 7.3113328218460083e-01 + <_> + + 0 1 1638 5.1413988694548607e-03 -1 -2 1639 + 4.5847031287848949e-03 + + 4.9535360932350159e-01 2.5355559587478638e-01 + 6.4624428749084473e-01 + <_> + + 1 0 1640 2.8565239161252975e-02 -1 -2 1641 + 4.3958800961263478e-04 + + 2.3307220637798309e-01 4.7022441029548645e-01 + 5.5445492267608643e-01 + <_> + + 1 0 1642 3.1459458172321320e-02 -1 -2 1643 + 5.6011630222201347e-03 + + 3.3689688891172409e-02 4.7871211171150208e-01 + 6.3383519649505615e-01 + <_> + + 0 1 1644 7.1835669223219156e-04 -1 -2 1645 + -5.5303089320659637e-03 + + 5.4314869642257690e-01 4.1058328747749329e-01 + 5.4039907455444336e-01 + <_> + + 1 0 1646 1.4129279879853129e-03 -1 -2 1647 + 2.5530709535814822e-04 + + 3.1055399775505066e-01 4.2544719576835632e-01 + 5.4471540451049805e-01 + <_> + + 1 0 1648 3.1966410460881889e-04 -1 -2 1649 + 5.0411392003297806e-03 + + 6.1183619499206543e-01 5.2900421619415283e-01 + 4.2247870564460754e-01 + <_> + + 0 1 1650 7.7617880888283253e-03 -1 -2 1651 + 2.9374631121754646e-03 + + 4.3153458833694458e-01 6.6292631626129150e-01 + 3.0289649963378906e-01 + <_> + + 1 0 1652 -1.6497720498591661e-03 -1 -2 1653 + -5.8834417723119259e-03 + + 5.4918527603149414e-01 3.1885540485382080e-01 + 5.1842892169952393e-01 + <_> + + 1 0 1654 8.7459187489002943e-04 -1 -2 1655 + -1.5308779664337635e-02 + + 3.3288308978080750e-01 3.9236080646514893e-01 + 5.2351391315460205e-01 + <_> + + 1 0 1656 3.2292451709508896e-02 -1 -2 1657 + -4.3842519517056644e-04 + + 5.9776467084884644e-01 4.5416879653930664e-01 + 5.3694289922714233e-01 + <_> + + 1 0 1658 1.5429529594257474e-03 -1 -2 1659 + -2.4733028840273619e-03 + + 6.3181412220001221e-01 3.4906330704689026e-01 + 4.7590249776840210e-01 + <_> + + 1 0 1660 2.0994939841330051e-03 -1 -2 1661 + -5.7541108690202236e-03 + + 5.8871978521347046e-01 5.9613317251205444e-01 + 4.8419830203056335e-01 + <_> + + 0 1 1662 -1.0233130306005478e-02 -1 -2 1663 + 2.2554509341716766e-01 + + 1.7054040729999542e-01 4.7793799638748169e-01 + 9.7879663109779358e-02 + <_> + + 1 0 1664 2.9666559770703316e-02 -1 -2 1665 + -2.8518449980765581e-03 + + 5.8222240209579468e-01 5.4596269130706787e-01 + 4.6100661158561707e-01 + <_> + + 1 0 1666 9.7465328872203827e-04 -1 -2 1667 + 1.4044740055396687e-05 + + 3.6703228950500488e-01 4.3023860454559326e-01 + 5.6917107105255127e-01 + <_> + + 0 1 1668 -1.7579430714249611e-02 -1 -2 1669 + -5.2381679415702820e-02 + + 6.9173210859298706e-01 7.1100401878356934e-01 + 5.0601547956466675e-01 + <_> + + 0 1 1670 -1.1242110282182693e-02 -1 -2 1671 + -3.6728400737047195e-03 + + 8.7691891193389893e-01 6.5191918611526489e-01 + 4.5460689067840576e-01 + <_> + + 0 1 1672 3.5082760732620955e-03 -1 -2 1673 + 6.1679710634052753e-03 + + 5.3298658132553101e-01 5.2204591035842896e-01 + 2.9535189270973206e-01 + <_> + + 1 0 1674 -9.7009900491684675e-04 -1 -2 1675 + -1.0957010090351105e-02 + + 5.0486332178115845e-01 5.8373582363128662e-01 + 3.0200859904289246e-01 + <_> + + 0 1 1676 -8.3272513002157211e-03 -1 -2 1677 + 2.9798380637657829e-05 + + 3.1580638885498047e-01 4.3863898515701294e-01 + 5.4432111978530884e-01 + <_> + + 1 0 1678 2.8244039276614785e-04 -1 -2 1679 + -8.1364117795601487e-04 + + 5.6253957748413086e-01 5.2811980247497559e-01 + 3.4014078974723816e-01 + <_> + + 1 0 1680 1.8008040497079492e-03 -1 -2 1681 + -6.9944779388606548e-03 + + 3.4716591238975525e-01 4.4816970825195312e-01 + 5.3857702016830444e-01 + <_> + + 0 1 1682 4.5625398342963308e-05 -1 -2 1683 + -7.3189922841265798e-04 + + 4.4925129413604736e-01 4.1673120856285095e-01 + 6.0211020708084106e-01 + <_> + + 0 1 1684 -2.9980219551362097e-04 -1 -2 1685 + -2.9060940505587496e-05 + + 4.1484281420707703e-01 5.5920898914337158e-01 + 4.0732109546661377e-01 + <_> + + 0 1 1686 -5.9742690064013004e-04 -1 -2 1687 + 1.4831830048933625e-04 + + 6.0889142751693726e-01 5.2983051538467407e-01 + 3.7619501352310181e-01 + <_> + + 1 0 1688 -2.9441029764711857e-03 -1 -2 1689 + 1.3741210103034973e-01 + + 4.7160848975181580e-01 5.1013368368148804e-01 + 4.6746801584959030e-02 + <_> + + 0 1 1690 -8.8414177298545837e-02 -1 -2 1691 + 7.0610277354717255e-02 + + 1.1818689852952957e-01 5.1190632581710815e-01 + 7.7784419059753418e-01 + <_> + + 0 1 1692 -7.7188978902995586e-03 -1 -2 1693 + 1.5115399844944477e-02 + + 1.8741349875926971e-01 4.9800279736518860e-01 + 7.0058178901672363e-01 + <_> + + 0 1 1694 1.0671879863366485e-03 -1 -2 1695 + 7.0487911580130458e-04 + + 4.4822388887405396e-01 6.2657529115676880e-01 + 4.4026550650596619e-01 + <_> + 90 + 4.4251281738281250e+01 + + <_> + + 1 0 1696 -9.8690733313560486e-02 -1 -2 1697 + 6.2373418360948563e-02 + + 3.9994749426841736e-01 5.2477848529815674e-01 + 8.1935757398605347e-01 + <_> + + 0 1 1698 1.9496519817039371e-03 -1 -2 1699 + -8.9139147894456983e-04 + + 3.5298168659210205e-01 5.8527278900146484e-01 + 3.2459780573844910e-01 + <_> + + 0 1 1700 -5.5150408297777176e-04 -1 -2 1701 + -1.1721949558705091e-03 + + 3.8928169012069702e-01 4.3350520730018616e-01 + 6.5206241607666016e-01 + <_> + + 1 0 1702 -7.4480642797425389e-04 -1 -2 1703 + -2.6264840271323919e-03 + + 4.0411350131034851e-01 5.6249821186065674e-01 + 3.9675250649452209e-01 + <_> + + 0 1 1704 -3.9712688885629177e-04 -1 -2 1705 + 3.5984949208796024e-03 + + 3.8561120629310608e-01 5.9978890419006348e-01 + 4.2416140437126160e-01 + <_> + + 1 0 1706 5.3080618381500244e-03 -1 -2 1707 + 9.6319877775385976e-04 + + 6.6601687669754028e-01 4.4813790917396545e-01 + 5.5834877490997314e-01 + <_> + + 0 1 1708 5.0776469288393855e-04 -1 -2 1709 + 3.6223160568624735e-03 + + 3.5354590415954590e-01 3.4098070859909058e-01 + 5.4206877946853638e-01 + <_> + + 0 1 1710 -6.2061410397291183e-02 -1 -2 1711 + 6.4387189922854304e-04 + + 1.9340839982032776e-01 4.0836268663406372e-01 + 5.4902219772338867e-01 + <_> + + 1 0 1712 2.6239909231662750e-02 -1 -2 1713 + 8.1940297968685627e-04 + + 2.2857080399990082e-01 4.6486678719520569e-01 + 6.0173559188842773e-01 + <_> + + 1 0 1714 2.3833119485061616e-04 -1 -2 1715 + -1.5869759954512119e-03 + + 3.5980388522148132e-01 4.2596510052680969e-01 + 5.4764348268508911e-01 + <_> + + 0 1 1716 -6.7263417877256870e-03 -1 -2 1717 + 1.1006110347807407e-02 + + 6.5072381496429443e-01 5.1494097709655762e-01 + 3.3629849553108215e-01 + <_> + + 1 0 1718 7.1445819921791553e-03 -1 -2 1719 + -4.7233798541128635e-03 + + 2.6729300618171692e-01 5.6521821022033691e-01 + 4.2981448769569397e-01 + <_> + + 1 0 1720 9.8437406122684479e-03 -1 -2 1721 + 1.5124640412977897e-05 + + 1.1518859863281250e-01 4.3735980987548828e-01 + 5.6121289730072021e-01 + <_> + + 0 1 1722 3.9908871054649353e-02 -1 -2 1723 + 5.3903679363429546e-03 + + 5.2046489715576172e-01 4.8134678602218628e-01 + 6.3612091541290283e-01 + <_> + + 0 1 1724 -3.9908871054649353e-02 -1 -2 1725 + 5.3903679363429546e-03 + + 1.5068709850311279e-01 4.5816949009895325e-01 + 6.2002408504486084e-01 + <_> + + 1 0 1726 6.7005190066993237e-03 -1 -2 1727 + -1.2623789720237255e-02 + + 3.4322351217269897e-01 3.0882269144058228e-01 + 5.2267378568649292e-01 + <_> + + 1 0 1728 1.1806610040366650e-02 -1 -2 1729 + -3.4257229417562485e-03 + + 7.1879392862319946e-01 3.1208148598670959e-01 + 5.0658440589904785e-01 + <_> + + 0 1 1730 3.9385299896821380e-04 -1 -2 1731 + 3.4388188272714615e-02 + + 4.7545841336250305e-01 5.2616578340530396e-01 + 3.3501741290092468e-01 + <_> + + 0 1 1732 -7.5009986758232117e-02 -1 -2 1733 + 4.9022492021322250e-04 + + 1.7134809494018555e-01 4.7258019447326660e-01 + 5.9564691781997681e-01 + <_> + + 0 1 1734 -8.5525289177894592e-03 -1 -2 1735 + 1.3135520566720515e-04 + + 6.5582227706909180e-01 4.8354008793830872e-01 + 5.5869138240814209e-01 + <_> + + 1 0 1736 4.7948658466339111e-03 -1 -2 1737 + 2.0124691072851419e-03 + + 2.6457059383392334e-01 3.6579450964927673e-01 + 5.1247721910476685e-01 + <_> + + 0 1 1738 -1.1785479635000229e-01 -1 -2 1739 + 1.5575019642710686e-03 + + 2.3856540024280548e-01 5.4904741048812866e-01 + 4.2747479677200317e-01 + <_> + + 0 1 1740 -1.5573759563267231e-02 -1 -2 1741 + -2.1854790393263102e-03 + + 6.9389009475708008e-01 3.6459881067276001e-01 + 5.0925260782241821e-01 + <_> + + 0 1 1742 2.9272339306771755e-03 -1 -2 1743 + 6.4663668163120747e-03 + + 4.6858081221580505e-01 4.9734100699424744e-01 + 7.7260971069335938e-01 + <_> + + 0 1 1744 -7.6140360906720161e-03 -1 -2 1745 + 4.1512572206556797e-03 + + 6.8774658441543579e-01 4.7885251045227051e-01 + 6.9216579198837280e-01 + <_> + + 0 1 1746 2.7711640577763319e-03 -1 -2 1747 + -1.2836109846830368e-02 + + 5.4818397760391235e-01 3.8001629710197449e-01 + 5.2044928073883057e-01 + <_> + + 0 1 1748 -2.4380050599575043e-03 -1 -2 1749 + 2.1713329479098320e-03 + + 2.5824350118637085e-01 4.9611631035804749e-01 + 3.2152029871940613e-01 + <_> + + 1 0 1750 6.2800728483125567e-04 -1 -2 1751 + -9.7982389852404594e-03 + + 5.4604238271713257e-01 6.0465437173843384e-01 + 4.9399220943450928e-01 + <_> + + 1 0 1752 7.3543828912079334e-03 -1 -2 1753 + -1.4665040187537670e-02 + + 5.2910941839218140e-01 5.4461228847503662e-01 + 3.5673621296882629e-01 + <_> + + 0 1 1754 3.0244510620832443e-02 -1 -2 1755 + -5.6660208851099014e-02 + + 5.5183291435241699e-01 6.9309788942337036e-01 + 5.0933879613876343e-01 + <_> + + 0 1 1756 -5.6967479176819324e-03 -1 -2 1757 + 3.0806770548224449e-02 + + 3.2015261054039001e-01 4.9892461299896240e-01 + 2.2770540416240692e-01 + <_> + + 0 1 1758 2.2748769260942936e-03 -1 -2 1759 + 2.0436900667846203e-03 + + 4.8109310865402222e-01 5.2838671207427979e-01 + 3.2559248805046082e-01 + <_> + + 0 1 1760 -8.6277956143021584e-03 -1 -2 1761 + 6.5113382879644632e-04 + + 6.2665361166000366e-01 5.0971370935440063e-01 + 3.1919100880622864e-01 + <_> + + 0 1 1762 8.8188261725008488e-04 -1 -2 1763 + -1.4594909735023975e-02 + + 4.5495858788490295e-01 2.6450389623641968e-01 + 5.1538681983947754e-01 + <_> + + 0 1 1764 -1.2304580304771662e-03 -1 -2 1765 + -2.1867299801670015e-04 + + 6.1975848674774170e-01 5.4691988229751587e-01 + 4.2068558931350708e-01 + <_> + + 0 1 1766 -1.0909959673881531e-03 -1 -2 1767 + 3.5210378700867295e-04 + + 4.1407600045204163e-01 5.4766088724136353e-01 + 4.1550210118293762e-01 + <_> + + 0 1 1768 -7.2563779540359974e-03 -1 -2 1769 + 1.4701850013807416e-03 + + 7.1604692935943604e-01 5.2408081293106079e-01 + 3.7296628952026367e-01 + <_> + + 0 1 1770 1.1472719779703766e-04 -1 -2 1771 + 3.0506469774991274e-03 + + 4.0337988734245300e-01 5.2639859914779663e-01 + 3.5600930452346802e-01 + <_> + + 0 1 1772 2.6269949739798903e-04 -1 -2 1773 + -3.6365550477057695e-03 + + 4.5697999000549316e-01 3.0425709486007690e-01 + 5.8682537078857422e-01 + <_> + + 1 0 1774 -8.4893293678760529e-03 -1 -2 1775 + 5.8107408694922924e-03 + + 4.9141570925712585e-01 4.9185299873352051e-01 + 6.2669628858566284e-01 + <_> + + 1 0 1776 7.5583951547741890e-04 -1 -2 1777 + -2.2017690353095531e-03 + + 5.6332361698150635e-01 5.5539160966873169e-01 + 3.8276460766792297e-01 + <_> + + 0 1 1778 2.7908938936889172e-03 -1 -2 1779 + -1.8228569533675909e-03 + + 5.4986977577209473e-01 4.3822830915451050e-01 + 5.4240328073501587e-01 + <_> + + 0 1 1780 -7.2495508939027786e-03 -1 -2 1781 + -6.8744522286579013e-04 + + 2.8881219029426575e-01 3.4726551175117493e-01 + 5.0763708353042603e-01 + <_> + + 0 1 1782 2.5174440816044807e-03 -1 -2 1783 + -1.0151379741728306e-02 + + 4.6612051129341125e-01 3.7447750568389893e-01 + 5.2940011024475098e-01 + <_> + + 1 0 1784 -4.1399952024221420e-03 -1 -2 1785 + -4.7078551724553108e-03 + + 4.6604850888252258e-01 4.1750618815422058e-01 + 6.9163060188293457e-01 + <_> + + 1 0 1786 4.1981041431427002e-02 -1 -2 1787 + -1.4272999949753284e-02 + + 2.0182150602340698e-01 7.5111979246139526e-01 + 5.0320839881896973e-01 + <_> + + 1 0 1788 4.0869521908462048e-03 -1 -2 1789 + 1.7606799956411123e-03 + + 2.5045138597488403e-01 3.3014011383056641e-01 + 5.2183371782302856e-01 + <_> + + 0 1 1790 1.2550549581646919e-04 -1 -2 1791 + -2.9503209516406059e-03 + + 4.6144428849220276e-01 4.6199500560760498e-01 + 5.2470302581787109e-01 + <_> + + 0 1 1792 -1.1312420247122645e-03 -1 -2 1793 + -1.6983180539682508e-03 + + 6.3143682479858398e-01 3.4013068675994873e-01 + 5.0555270910263062e-01 + <_> + + 1 0 1794 -1.1457820422947407e-02 -1 -2 1795 + -8.4962565451860428e-03 + + 4.9399960041046143e-01 2.9654508829116821e-01 + 5.1943677663803101e-01 + <_> + + 1 0 1796 1.1919089592993259e-02 -1 -2 1797 + 6.4416420646011829e-03 + + 7.8869980573654175e-01 5.1069867610931396e-01 + 2.9671460390090942e-01 + <_> + + 0 1 1798 -8.7857811013236642e-04 -1 -2 1799 + -2.0312711130827665e-03 + + 5.7143712043762207e-01 4.4812008738517761e-01 + 5.3849118947982788e-01 + <_> + + 0 1 1800 -1.5262430533766747e-03 -1 -2 1801 + 4.2860880494117737e-03 + + 6.1935687065124512e-01 4.3398851156234741e-01 + 7.6972991228103638e-01 + <_> + + 1 0 1802 3.5010920837521553e-03 -1 -2 1803 + 1.2587670236825943e-02 + + 3.1713891029357910e-01 5.2466988563537598e-01 + 4.2412081360816956e-01 + <_> + + 0 1 1804 2.6207490009255707e-04 -1 -2 1805 + 4.4701730075757951e-05 + + 4.2318999767303467e-01 4.1741389036178589e-01 + 5.9196037054061890e-01 + <_> + + 0 1 1806 7.8084698179736733e-04 -1 -2 1807 + 8.8851212058216333e-04 + + 4.2773890495300293e-01 3.7201610207557678e-01 + 5.2268189191818237e-01 + <_> + + 0 1 1808 2.3369069676846266e-03 -1 -2 1809 + 1.6688359901309013e-03 + + 5.4780668020248413e-01 3.6286789178848267e-01 + 6.1500048637390137e-01 + <_> + + 0 1 1810 3.0844469438306987e-04 -1 -2 1811 + 3.4617560449987650e-03 + + 4.7470751404762268e-01 4.5801380276679993e-01 + 5.5856817960739136e-01 + <_> + + 0 1 1812 1.8961310386657715e-02 -1 -2 1813 + 1.7347310483455658e-01 + + 5.2988010644912720e-01 3.6983850598335266e-01 + 8.4986197948455811e-01 + <_> + + 1 0 1814 2.0020549709443003e-04 -1 -2 1815 + 1.0967060225084424e-03 + + 5.5656617879867554e-01 4.7957131266593933e-01 + 6.2862598896026611e-01 + <_> + + 0 1 1816 1.5107099898159504e-04 -1 -2 1817 + -3.4463501069694757e-03 + + 4.0524059534072876e-01 6.1730152368545532e-01 + 4.4142639636993408e-01 + <_> + + 1 0 1818 8.5176620632410049e-03 -1 -2 1819 + -3.5812109708786011e-02 + + 3.5705709457397461e-01 3.1513288617134094e-01 + 5.2527028322219849e-01 + <_> + + 0 1 1820 -2.1155400201678276e-02 -1 -2 1821 + 8.9890940580517054e-04 + + 6.1247211694717407e-01 5.1699757575988770e-01 + 3.5962718725204468e-01 + <_> + + 1 0 1822 -1.5613760333508253e-03 -1 -2 1823 + 6.7120860330760479e-04 + + 4.9149879813194275e-01 4.5462110638618469e-01 + 5.3958117961883545e-01 + <_> + + 0 1 1824 -2.1597029641270638e-02 -1 -2 1825 + -2.4947229772806168e-02 + + 1.9031339883804321e-01 6.9740772247314453e-01 + 4.9677160382270813e-01 + <_> + + 0 1 1826 1.8725979607552290e-03 -1 -2 1827 + 6.3912719488143921e-03 + + 4.7489479184150696e-01 5.1801782846450806e-01 + 2.9243218898773193e-01 + <_> + + 0 1 1828 -9.1552399098873138e-03 -1 -2 1829 + 2.1715660113841295e-03 + + 7.6658701896667480e-01 5.2155512571334839e-01 + 3.3657190203666687e-01 + <_> + + 1 0 1830 1.2330369791015983e-03 -1 -2 1831 + -4.0785901364870369e-04 + + 6.2609577178955078e-01 4.5335099101066589e-01 + 5.3864890336990356e-01 + <_> + + 0 1 1832 4.6437609125860035e-04 -1 -2 1833 + -1.1600199650274590e-04 + + 4.1034960746765137e-01 5.8303910493850708e-01 + 4.3041059374809265e-01 + <_> + + 0 1 1834 -1.2718720361590385e-02 -1 -2 1835 + 8.9431880041956902e-05 + + 2.1325829625129700e-01 4.8728910088539124e-01 + 5.4589152336120605e-01 + <_> + + 0 1 1836 -3.3913689549081028e-04 -1 -2 1837 + -1.8026340752840042e-02 + + 3.9743649959564209e-01 7.5685507059097290e-01 + 5.0456118583679199e-01 + <_> + + 1 0 1838 6.9179181009531021e-03 -1 -2 1839 + -1.1839679791592062e-04 + + 3.9662998914718628e-01 4.1980829834938049e-01 + 5.4358041286468506e-01 + <_> + + 0 1 1840 -3.9474181830883026e-03 -1 -2 1841 + 6.0050919273635373e-05 + + 6.3694578409194946e-01 5.2695667743682861e-01 + 3.8122430443763733e-01 + <_> + + 1 0 1842 9.1423643752932549e-03 -1 -2 1843 + 2.1305440168362111e-04 + + 4.1567629575729370e-01 3.5235330462455750e-01 + 5.3494542837142944e-01 + <_> + + 1 0 1844 -2.0855850016232580e-04 -1 -2 1845 + 1.3130389852449298e-03 + + 4.4033220410346985e-01 6.0581612586975098e-01 + 4.4682189822196960e-01 + <_> + + 1 0 1846 -2.9134768992662430e-03 -1 -2 1847 + 2.9645769391208887e-03 + + 4.8257058858871460e-01 4.8359981179237366e-01 + 6.0392779111862183e-01 + <_> + + 1 0 1848 1.7772549763321877e-03 -1 -2 1849 + -7.7136349864304066e-03 + + 6.8718272447586060e-01 2.8422209620475769e-01 + 5.1454281806945801e-01 + <_> + + 1 0 1850 5.1027478184551001e-04 -1 -2 1851 + 1.7460630042478442e-03 + + 6.0244262218475342e-01 4.7566100955009460e-01 + 5.7211542129516602e-01 + <_> + + 1 0 1852 3.8068278809078038e-04 -1 -2 1853 + 2.8228890150785446e-03 + + 4.9310690164566040e-01 3.3116981387138367e-01 + 6.2275981903076172e-01 + <_> + + 1 0 1854 -5.3000478073954582e-03 -1 -2 1855 + 4.4951299059903249e-05 + + 5.2320927381515503e-01 3.9952319860458374e-01 + 5.3147977590560913e-01 + <_> + + 0 1 1856 3.2752458937466145e-03 -1 -2 1857 + -2.8162579983472824e-03 + + 4.4816198945045471e-01 3.9079719781875610e-01 + 6.6716408729553223e-01 + <_> + + 0 1 1858 1.4112279750406742e-03 -1 -2 1859 + 8.3062034100294113e-03 + + 5.3570109605789185e-01 4.7709658741950989e-01 + 5.5700999498367310e-01 + <_> + + 0 1 1860 2.2164839319884777e-03 -1 -2 1861 + -4.9868631176650524e-03 + + 4.9471241235733032e-01 5.2413070201873779e-01 + 2.5126549601554871e-01 + <_> + + 1 0 1862 -3.6664260551333427e-03 -1 -2 1863 + -1.0581229813396931e-02 + + 4.6195539832115173e-01 6.3017189502716064e-01 + 4.9730318784713745e-01 + <_> + + 1 0 1864 7.3366491124033928e-03 -1 -2 1865 + -3.9318940252996981e-04 + + 2.8709700703620911e-01 4.2528051137924194e-01 + 5.5792468786239624e-01 + <_> + + 0 1 1866 -8.1375334411859512e-03 -1 -2 1867 + 2.4809150490909815e-03 + + 5.7473158836364746e-01 5.2033740282058716e-01 + 3.9035668969154358e-01 + <_> + + 1 0 1868 8.8749779388308525e-04 -1 -2 1869 + -4.2194919660687447e-04 + + 5.5343210697174072e-01 5.3380441665649414e-01 + 3.9258408546447754e-01 + <_> + + 0 1 1870 -7.9790111631155014e-03 -1 -2 1871 + 1.1439629597589374e-03 + + 4.1443160176277161e-01 4.7013729810714722e-01 + 5.2817362546920776e-01 + <_> + + 1 0 1872 7.5542130507528782e-03 -1 -2 1873 + 1.0288399644196033e-03 + + 2.5272560119628906e-01 5.6051462888717651e-01 + 4.2978560924530029e-01 + <_> + + 1 0 1874 -1.7234670231118798e-03 -1 -2 1875 + 5.7586699724197388e-01 + + 4.8396828770637512e-01 5.1105028390884399e-01 + 8.0489329993724823e-02 + <_> + 109 + 5.3755569458007812e+01 + + <_> + + 0 1 1876 6.6640521399676800e-03 -1 -2 1877 + 8.9905522763729095e-03 + + 3.8289201259613037e-01 4.8584291338920593e-01 + 7.3549592494964600e-01 + <_> + + 1 0 1878 5.7154200039803982e-03 -1 -2 1879 + 1.1257929727435112e-03 + + 6.7232239246368408e-01 4.4295778870582581e-01 + 6.0707777738571167e-01 + <_> + + 1 0 1880 -9.1789010912179947e-04 -1 -2 1881 + -1.0492859873920679e-03 + + 3.0763450264930725e-01 5.5936437845230103e-01 + 3.6510229110717773e-01 + <_> + + 0 1 1882 3.5453929740469903e-05 -1 -2 1883 + 2.9015709878876805e-04 + + 4.2779681086540222e-01 4.5835450291633606e-01 + 5.2846831083297729e-01 + <_> + + 1 0 1884 1.6071660502348095e-04 -1 -2 1885 + -5.2961107576265931e-04 + + 3.7981921434402466e-01 3.8504371047019958e-01 + 5.9396880865097046e-01 + <_> + + 0 1 1886 2.6682569296099246e-04 -1 -2 1887 + -1.3492540165316314e-04 + + 4.1230249404907227e-01 5.7605999708175659e-01 + 4.2376458644866943e-01 + <_> + + 0 1 1888 -1.0841679759323597e-02 -1 -2 1889 + 1.2077829800546169e-02 + + 3.9299210906028748e-01 5.7619231939315796e-01 + 2.7804449200630188e-01 + <_> + + 0 1 1890 2.2128869313746691e-03 -1 -2 1891 + -1.5266190283000469e-02 + + 4.7945070266723633e-01 7.4055880308151245e-02 + 5.1535779237747192e-01 + <_> + + 1 0 1892 6.7929533543065190e-05 -1 -2 1893 + 1.7633590323384851e-04 + + 5.8587378263473511e-01 3.5676109790802002e-01 + 5.5989629030227661e-01 + <_> + + 1 0 1894 8.1311381654813886e-04 -1 -2 1895 + 3.2630451023578644e-03 + + 5.3468507528305054e-01 4.7825369238853455e-01 + 5.4567539691925049e-01 + <_> + + 0 1 1896 -3.9503918960690498e-03 -1 -2 1897 + -3.9864578866399825e-04 + + 2.8318119049072266e-01 5.4852157831192017e-01 + 4.1596978902816772e-01 + <_> + + 0 1 1898 -1.1432520113885403e-02 -1 -2 1899 + 5.3339172154664993e-03 + + 5.6391012668609619e-01 4.5969840884208679e-01 + 5.9312427043914795e-01 + <_> + + 1 0 1900 8.3193257451057434e-03 -1 -2 1901 + -4.2479918920435011e-04 + + 3.2306200265884399e-01 3.7952938675880432e-01 + 5.4086112976074219e-01 + <_> + + 0 1 1902 -1.1189430207014084e-01 -1 -2 1903 + -7.5553781352937222e-03 + + 1.1322979629039764e-01 6.3393700122833252e-01 + 4.8387709259986877e-01 + <_> + + 0 1 1904 -7.0337029173970222e-03 -1 -2 1905 + -1.4833680354058743e-02 + + 5.6652551889419556e-01 6.7514181137084961e-01 + 4.1409450769424438e-01 + <_> + + 1 0 1906 8.7506724521517754e-03 -1 -2 1907 + 1.6645010327920318e-03 + + 3.5612589120864868e-01 5.3472799062728882e-01 + 3.6497798562049866e-01 + <_> + + 1 0 1908 9.4900820404291153e-03 -1 -2 1909 + 1.1133110383525491e-03 + + 2.7546560764312744e-01 4.2259928584098816e-01 + 5.6291788816452026e-01 + <_> + + 0 1 1910 9.4940755516290665e-03 -1 -2 1911 + -1.5396620146930218e-03 + + 4.9060368537902832e-01 4.0070518851280212e-01 + 5.3807091712951660e-01 + <_> + + 1 0 1912 1.3434959948062897e-01 -1 -2 1913 + -9.4940755516290665e-03 + + 2.2146719694137573e-01 7.3531562089920044e-01 + 5.0050330162048340e-01 + <_> + + 1 0 1914 2.0011790096759796e-02 -1 -2 1915 + -1.8875009845942259e-03 + + 3.3279061317443848e-01 3.9152890443801880e-01 + 5.4018497467041016e-01 + <_> + + 1 0 1916 7.1842782199382782e-03 -1 -2 1917 + 1.6976969782263041e-03 + + 7.1766048669815063e-01 4.5269781351089478e-01 + 6.0769128799438477e-01 + <_> + + 1 0 1918 4.9219978973269463e-03 -1 -2 1919 + 1.1803199537098408e-02 + + 2.5698339939117432e-01 4.9996379017829895e-01 + 5.9582281112670898e-01 + <_> + + 0 1 1920 -9.7703449428081512e-03 -1 -2 1921 + 2.1174899302423000e-03 + + 3.4590938687324524e-01 4.5151269435882568e-01 + 5.8297157287597656e-01 + <_> + + 0 1 1922 9.4801411032676697e-03 -1 -2 1923 + -2.6078789960592985e-03 + + 4.8073920607566833e-01 3.4622168540954590e-01 + 5.2015948295593262e-01 + <_> + + 0 1 1924 -5.7252747938036919e-03 -1 -2 1925 + -8.2325618714094162e-03 + + 6.5998530387878418e-01 2.8218281269073486e-01 + 5.1252847909927368e-01 + <_> + + 0 1 1926 8.9571950957179070e-04 -1 -2 1927 + -1.5021569561213255e-04 + + 4.8838189244270325e-01 4.8299181461334229e-01 + 5.4287171363830566e-01 + <_> + + 0 1 1928 4.8489659093320370e-04 -1 -2 1929 + -9.6192650496959686e-02 + + 4.4345989823341370e-01 2.2566360235214233e-01 + 5.9562277793884277e-01 + <_> + + 0 1 1930 -1.1053519556298852e-03 -1 -2 1931 + -1.0215040296316147e-01 + + 4.5272240042686462e-01 2.8443491458892822e-01 + 5.1864528656005859e-01 + <_> + + 1 0 1932 3.0147889629006386e-03 -1 -2 1933 + 7.6131648384034634e-03 + + 3.8089990615844727e-01 5.7186990976333618e-01 + 4.2625638842582703e-01 + <_> + + 1 0 1934 1.5197630273178220e-03 -1 -2 1935 + -1.4197279699146748e-02 + + 5.9427189826965332e-01 7.7311038970947266e-01 + 4.9976539611816406e-01 + <_> + + 0 1 1936 -1.3818879611790180e-02 -1 -2 1937 + -5.0701329018920660e-04 + + 6.6811382770538330e-01 3.3056080341339111e-01 + 4.7499749064445496e-01 + <_> + + 0 1 1938 -9.3537531793117523e-03 -1 -2 1939 + -9.4771059229969978e-03 + + 2.8609329462051392e-01 6.1888831853866577e-01 + 4.8421001434326172e-01 + <_> + + 1 0 1940 1.6923650400713086e-03 -1 -2 1941 + 5.8652542065829039e-04 + + 6.0702490806579590e-01 3.7826898694038391e-01 + 5.3681969642639160e-01 + <_> + + 0 1 1942 -2.5826620403677225e-03 -1 -2 1943 + -2.7307639829814434e-03 + + 3.6902099847793579e-01 3.8571149110794067e-01 + 5.3181087970733643e-01 + <_> + + 1 0 1944 2.1871570497751236e-02 -1 -2 1945 + -1.5010299648565706e-05 + + 2.3270089924335480e-01 5.5607229471206665e-01 + 4.3014100193977356e-01 + <_> + + 1 0 1946 5.3583700209856033e-03 -1 -2 1947 + 5.0057549960911274e-03 + + 6.7676377296447754e-01 5.1949042081832886e-01 + 3.6128538846969604e-01 + <_> + + 0 1 1948 -1.9030070398002863e-03 -1 -2 1949 + -7.8506693243980408e-03 + + 3.2378450036048889e-01 1.1948519945144653e-01 + 4.9917238950729370e-01 + <_> + + 1 0 1950 -2.7093670796602964e-03 -1 -2 1951 + 1.4138079714030027e-03 + + 4.8549601435661316e-01 4.8723229765892029e-01 + 5.9035778045654297e-01 + <_> + + 1 0 1952 9.0300198644399643e-03 -1 -2 1953 + -9.7925681620836258e-04 + + 6.5473157167434692e-01 5.8492732048034668e-01 + 4.5542308688163757e-01 + <_> + + 1 0 1954 1.3984439428895712e-03 -1 -2 1955 + 8.3372107474133372e-04 + + 4.0646260976791382e-01 5.3995430469512939e-01 + 4.1528099775314331e-01 + <_> + + 1 0 1956 1.0551059618592262e-02 -1 -2 1957 + 8.8344102550763637e-05 + + 1.7966809868812561e-01 4.2518630623817444e-01 + 5.4135227203369141e-01 + <_> + + 1 0 1958 -4.1022308170795441e-02 -1 -2 1959 + 7.5065628625452518e-03 + + 5.2281248569488525e-01 4.8537430167198181e-01 + 6.0934442281723022e-01 + <_> + + 1 0 1960 4.1022308170795441e-02 -1 -2 1961 + -5.3961377125233412e-04 + + 2.2050240635871887e-01 5.6927317380905151e-01 + 4.4687569141387939e-01 + <_> + + 0 1 1962 -6.8696036934852600e-02 -1 -2 1963 + -1.8447940237820148e-03 + + 1.4833140373229980e-01 6.2112838029861450e-01 + 4.9666011333465576e-01 + <_> + + 0 1 1964 -6.0959919355809689e-03 -1 -2 1965 + -4.2068301700055599e-03 + + 2.2946719825267792e-01 6.4070910215377808e-01 + 4.7485628724098206e-01 + <_> + + 1 0 1966 -7.1332789957523346e-04 -1 -2 1967 + 1.1756779998540878e-01 + + 5.3549361228942871e-01 5.1369780302047729e-01 + 1.0595739819109440e-02 + <_> + + 0 1 1968 5.9354289987822995e-05 -1 -2 1969 + -6.3173691742122173e-03 + + 3.7118038535118103e-01 1.7120739817619324e-01 + 5.0617581605911255e-01 + <_> + + 1 0 1970 1.4941499568521976e-02 -1 -2 1971 + -2.0789399277418852e-03 + + 6.7291188240051270e-01 4.4106459617614746e-01 + 5.4440277814865112e-01 + <_> + + 0 1 1972 -7.0736219640821218e-04 -1 -2 1973 + -3.1247111037373543e-03 + + 5.5689108371734619e-01 5.0238692760467529e-01 + 3.5624051094055176e-01 + <_> + + 1 0 1974 -7.8919378574937582e-04 -1 -2 1975 + 1.0179580189287663e-02 + + 5.4567861557006836e-01 5.5451387166976929e-01 + 4.6223109960556030e-01 + <_> + + 1 0 1976 -2.7506109327077866e-03 -1 -2 1977 + 1.0601329617202282e-02 + + 4.9425360560417175e-01 2.9612338542938232e-01 + 5.9643387794494629e-01 + <_> + + 0 1 1978 5.1466780714690685e-03 -1 -2 1979 + 7.6321147382259369e-02 + + 5.4952287673950195e-01 5.1739591360092163e-01 + 2.9402169585227966e-01 + <_> + + 0 1 1980 -1.5027689514681697e-03 -1 -2 1981 + 1.2266670353710651e-02 + + 3.1062999367713928e-01 4.6511501073837280e-01 + 6.8466138839721680e-01 + <_> + + 1 0 1982 -3.1118579208850861e-02 -1 -2 1983 + 2.8905589133501053e-02 + + 5.2260571718215942e-01 5.1822441816329956e-01 + 2.7054280042648315e-01 + <_> + + 1 0 1984 4.7598380595445633e-02 -1 -2 1985 + 3.0808549374341965e-02 + + 1.1095120012760162e-01 4.9386250972747803e-01 + 1.4041109383106232e-01 + <_> + + 1 0 1986 -2.1277810446918011e-04 -1 -2 1987 + 7.8969962894916534e-02 + + 4.3923568725585938e-01 5.2165520191192627e-01 + 2.2941139340400696e-01 + <_> + + 0 1 1988 -1.0257950052618980e-02 -1 -2 1989 + 1.2604889925569296e-03 + + 6.1766529083251953e-01 5.2362227439880371e-01 + 3.3289659023284912e-01 + <_> + + 1 0 1990 -3.3490460366010666e-02 -1 -2 1991 + -5.9202767442911863e-04 + + 4.8661869764328003e-01 4.1164070367813110e-01 + 5.3956401348114014e-01 + <_> + + 1 0 1992 3.0320750738610514e-05 -1 -2 1993 + -5.4369680583477020e-04 + + 5.6107360124588013e-01 5.6213891506195068e-01 + 3.4612038731575012e-01 + <_> + + 1 0 1994 -3.3490460366010666e-02 -1 -2 1995 + -5.9202767442911863e-04 + + 4.8967620730400085e-01 4.3054041266441345e-01 + 5.3407138586044312e-01 + <_> + + 0 1 1996 2.0550889894366264e-03 -1 -2 1997 + -4.4353571720421314e-03 + + 5.5449998378753662e-01 6.0385400056838989e-01 + 3.7465929985046387e-01 + <_> + + 1 0 1998 -8.4170423448085785e-02 -1 -2 1999 + 6.7419027909636497e-03 + + 5.0073480606079102e-01 5.2980971336364746e-01 + 4.7161450982093811e-01 + <_> + + 1 0 2000 1.0278150439262390e-02 -1 -2 2001 + 5.8800862170755863e-03 + + 6.2693750858306885e-01 5.1548278331756592e-01 + 3.8130408525466919e-01 + <_> + + 1 0 2002 -6.9679190346505493e-06 -1 -2 2003 + 8.2419527461752295e-04 + + 4.4402399659156799e-01 4.6975341439247131e-01 + 5.4855042695999146e-01 + <_> + + 0 1 2004 -5.5268318392336369e-03 -1 -2 2005 + 9.6128671430051327e-04 + + 5.5136048793792725e-01 3.6186391115188599e-01 + 5.8384567499160767e-01 + <_> + + 1 0 2006 2.4810510221868753e-03 -1 -2 2007 + -1.0480589699000120e-03 + + 2.5232228636741638e-01 4.1172578930854797e-01 + 5.3929960727691650e-01 + <_> + + 0 1 2008 -6.1287907883524895e-03 -1 -2 2009 + 1.1682329932227731e-04 + + 6.7263299226760864e-01 5.0411927700042725e-01 + 3.6077290773391724e-01 + <_> + + 0 1 2010 -3.9909478276968002e-02 -1 -2 2011 + 1.5859459526836872e-03 + + 1.5637390315532684e-01 4.8919808864593506e-01 + 5.7798451185226440e-01 + <_> + + 0 1 2012 -2.2690229117870331e-02 -1 -2 2013 + 2.0916070789098740e-03 + + 2.1868790686130524e-01 4.7715771198272705e-01 + 6.0992312431335449e-01 + <_> + + 0 1 2014 -2.4715419858694077e-02 -1 -2 2015 + -1.3419450260698795e-02 + + 3.4639969468116760e-01 3.6306929588317871e-01 + 5.2521961927413940e-01 + <_> + + 0 1 2016 -6.0629472136497498e-03 -1 -2 2017 + -2.0921030081808567e-03 + + 6.6663217544555664e-01 3.3995470404624939e-01 + 5.0356978178024292e-01 + <_> + + 0 1 2018 2.5961859151721001e-02 -1 -2 2019 + 1.7908669542521238e-04 + + 5.0368028879165649e-01 5.4185307025909424e-01 + 4.3189769983291626e-01 + <_> + + 0 1 2020 -3.1546850223094225e-03 -1 -2 2021 + -1.1397759662941098e-03 + + 7.2210252285003662e-01 3.3209729194641113e-01 + 5.0244337320327759e-01 + <_> + + 0 1 2022 -4.7840211540460587e-02 -1 -2 2023 + 4.1577088995836675e-04 + + 1.9387650489807129e-01 4.8021888732910156e-01 + 5.7307147979736328e-01 + <_> + + 0 1 2024 -4.4247039477340877e-04 -1 -2 2025 + 1.4479350065812469e-03 + + 4.2625150084495544e-01 5.7191711664199829e-01 + 4.0641531348228455e-01 + <_> + + 0 1 2026 1.5701510012149811e-02 -1 -2 2027 + 2.7805729769170284e-04 + + 4.9957260489463806e-01 5.2892869710922241e-01 + 4.5817288756370544e-01 + <_> + + 0 1 2028 -2.9010509606450796e-03 -1 -2 2029 + 2.0830519497394562e-04 + + 6.0121482610702515e-01 5.0579768419265747e-01 + 3.5994321107864380e-01 + <_> + + 1 0 2030 -5.1530029624700546e-02 -1 -2 2031 + 1.7163449956569821e-04 + + 4.9917969107627869e-01 4.6754699945449829e-01 + 5.3747731447219849e-01 + <_> + + 1 0 2032 2.3614279925823212e-02 -1 -2 2033 + -5.6427798699587584e-04 + + 6.5864789485931396e-01 3.8532960414886475e-01 + 5.1960402727127075e-01 + <_> + + 1 0 2034 6.6903959959745407e-03 -1 -2 2035 + -4.8789530992507935e-03 + + 6.0042357444763184e-01 3.2932278513908386e-01 + 5.2452367544174194e-01 + <_> + + 0 1 2036 -6.8537332117557526e-03 -1 -2 2037 + 9.9893810693174601e-04 + + 2.5659140944480896e-01 4.6154940128326416e-01 + 5.9424322843551636e-01 + <_> + + 0 1 2038 -1.3354700058698654e-04 -1 -2 2039 + 1.0165109997615218e-03 + + 5.4873758554458618e-01 4.5783591270446777e-01 + 5.4269278049468994e-01 + <_> + + 1 0 2040 9.1216771397739649e-04 -1 -2 2041 + 1.0080259526148438e-03 + + 3.9394611120223999e-01 4.0497899055480957e-01 + 5.5207037925720215e-01 + <_> + + 1 0 2042 -1.3102490629535168e-04 -1 -2 2043 + 5.5228749988600612e-04 + + 4.8790889978408813e-01 4.8449438810348511e-01 + 5.5128258466720581e-01 + <_> + + 1 0 2044 -1.2130969844292849e-04 -1 -2 2045 + -1.5112989785848185e-05 + + 4.3679711222648621e-01 6.4259552955627441e-01 + 4.8818269371986389e-01 + <_> + + 1 0 2046 -4.0125829400494695e-04 -1 -2 2047 + -6.5766851184889674e-04 + + 5.3720992803573608e-01 5.8345532417297363e-01 + 4.8690780997276306e-01 + <_> + + 1 0 2048 6.2220421386882663e-04 -1 -2 2049 + 1.4663359615951777e-03 + + 3.8246369361877441e-01 4.8134881258010864e-01 + 6.9667392969131470e-01 + <_> + + 0 1 2050 -4.9547709524631500e-02 -1 -2 2051 + 1.3017569435760379e-03 + + 5.3927659988403320e-02 5.3374558687210083e-01 + 4.1607481241226196e-01 + <_> + + 0 1 2052 -4.4914530590176582e-03 -1 -2 2053 + 1.6592369647696614e-03 + + 5.9974372386932373e-01 3.7271851301193237e-01 + 5.1156342029571533e-01 + <_> + + 0 1 2054 6.4695458859205246e-03 -1 -2 2055 + 4.9810269847512245e-03 + + 5.2520352602005005e-01 5.2567178010940552e-01 + 3.9344060420989990e-01 + <_> + + 0 1 2056 -3.8536980748176575e-02 -1 -2 2057 + -2.8275650739669800e-01 + + 2.0619249343872070e-01 6.1883211135864258e-02 + 4.9250578880310059e-01 + <_> + + 0 1 2058 -9.0301828458905220e-03 -1 -2 2059 + -4.3866269290447235e-02 + + 3.1575900316238403e-01 2.0336820185184479e-01 + 5.1647698879241943e-01 + <_> + + 0 1 2060 -4.5701069757342339e-03 -1 -2 2061 + -2.3362410720437765e-03 + + 6.6111832857131958e-01 2.8077891469001770e-01 + 4.9628761410713196e-01 + <_> + + 0 1 2062 5.3960331715643406e-03 -1 -2 2063 + -2.6297608856111765e-03 + + 5.1463878154754639e-01 6.2844878435134888e-01 + 4.9555888772010803e-01 + <_> + + 0 1 2064 -3.8577478844672441e-03 -1 -2 2065 + 1.3963800156489015e-03 + + 1.4867480099201202e-01 4.7013381123542786e-01 + 6.3209718465805054e-01 + <_> + + 1 0 2066 -8.8699469342827797e-03 -1 -2 2067 + -7.0626288652420044e-04 + + 5.2868181467056274e-01 4.6483701467514038e-01 + 5.3332102298736572e-01 + <_> + + 0 1 2068 4.2645810171961784e-03 -1 -2 2069 + 6.1572100967168808e-02 + + 5.0848782062530518e-01 3.6296251416206360e-01 + 8.7571567296981812e-01 + <_> + + 1 0 2070 -4.5381980016827583e-03 -1 -2 2071 + -4.0877899155020714e-03 + + 4.8566961288452148e-01 4.5841160416603088e-01 + 5.4202407598495483e-01 + <_> + + 1 0 2072 6.4308601431548595e-03 -1 -2 2073 + 7.0455260574817657e-03 + + 2.7073028683662415e-01 5.0574868917465210e-01 + 7.0265239477157593e-01 + <_> + + 1 0 2074 -2.3246440105140209e-03 -1 -2 2075 + 6.0276601288933307e-05 + + 4.8272788524627686e-01 4.2472490668296814e-01 + 5.5087631940841675e-01 + <_> + + 1 0 2076 1.8084559589624405e-02 -1 -2 2077 + 8.4693520329892635e-04 + + 8.1048011779785156e-01 5.1546192169189453e-01 + 3.5143798589706421e-01 + <_> + + 1 0 2078 -2.6931039988994598e-02 -1 -2 2079 + -4.2346641421318054e-03 + + 4.8868888616561890e-01 4.6223780512809753e-01 + 5.3824782371520996e-01 + <_> + + 1 0 2080 2.6947110891342163e-02 -1 -2 2081 + 4.6446882188320160e-03 + + 6.3665962219238281e-01 5.3685069084167480e-01 + 3.7654298543930054e-01 + <_> + + 0 1 2082 -6.9577661342918873e-03 -1 -2 2083 + 8.7609712500125170e-04 + + 4.2346870899200439e-01 4.6724060177803040e-01 + 5.3506839275360107e-01 + <_> + + 1 0 2084 1.6103329835459590e-03 -1 -2 2085 + -1.2848590267822146e-03 + + 5.7327628135681152e-01 5.4817992448806763e-01 + 3.7845930457115173e-01 + <_> + + 0 1 2086 1.0243539698421955e-02 -1 -2 2087 + 2.6889349101111293e-04 + + 5.1559072732925415e-01 5.3531897068023682e-01 + 4.3871539831161499e-01 + <_> + + 0 1 2088 3.7903659977018833e-03 -1 -2 2089 + -2.9369680210947990e-02 + + 5.0320029258728027e-01 5.8735388517379761e-01 + 2.2154450416564941e-01 + <_> + + 1 0 2090 6.0743088833987713e-03 -1 -2 2091 + -1.2710720300674438e-02 + + 5.4170298576354980e-01 6.0565119981765747e-01 + 4.9851819872856140e-01 + <_> + + 0 1 2092 -5.9445449151098728e-03 -1 -2 2093 + -2.8927479870617390e-03 + + 3.3520698547363281e-01 6.9292408227920532e-01 + 4.7782200574874878e-01 + + <_> + + <_> + 2 7 16 4 -1. + <_> + 2 9 16 2 2. + <_> + + <_> + 8 4 3 14 -1. + <_> + 8 11 3 7 2. + <_> + + <_> + 13 6 1 6 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 4 2 12 8 -1. + <_> + 8 2 4 8 3. + <_> + + <_> + 6 3 1 9 -1. + <_> + 6 6 1 3 3. + <_> + + <_> + 3 7 14 9 -1. + <_> + 3 10 14 3 3. + <_> + + <_> + 4 7 4 4 -1. + <_> + 4 9 4 2 2. + <_> + + <_> + 9 4 2 16 -1. + <_> + 9 12 2 8 2. + <_> + + <_> + 1 1 18 5 -1. + <_> + 7 1 6 5 3. + <_> + + <_> + 4 5 13 8 -1. + <_> + 4 9 13 4 2. + <_> + + <_> + 1 7 16 9 -1. + <_> + 1 10 16 3 3. + <_> + + <_> + 2 0 15 4 -1. + <_> + 2 2 15 2 2. + <_> + + <_> + 7 5 6 4 -1. + <_> + 9 5 2 4 3. + <_> + + <_> + 6 3 8 9 -1. + <_> + 6 6 8 3 3. + <_> + + <_> + 8 12 3 8 -1. + <_> + 8 16 3 4 2. + <_> + + <_> + 3 16 2 2 -1. + <_> + 3 17 2 1 2. + <_> + + <_> + 14 1 6 12 -1. + <_> + 14 1 3 12 2. + <_> + + <_> + 4 4 12 6 -1. + <_> + 8 4 4 6 3. + <_> + + <_> + 0 2 6 15 -1. + <_> + 3 2 3 15 2. + <_> + + <_> + 5 4 9 6 -1. + <_> + 5 6 9 2 3. + <_> + + <_> + 13 11 6 3 -1. + <_> + 13 12 6 1 3. + <_> + + <_> + 12 12 6 4 -1. + <_> + 12 14 6 2 2. + <_> + + <_> + 1 11 6 3 -1. + <_> + 1 12 6 1 3. + <_> + + <_> + 2 5 5 8 -1. + <_> + 2 9 5 4 2. + <_> + + <_> + 5 4 10 4 -1. + <_> + 5 6 10 2 2. + <_> + + <_> + 2 4 16 12 -1. + <_> + 2 8 16 4 3. + <_> + + <_> + 4 5 12 6 -1. + <_> + 8 5 4 6 3. + <_> + + <_> + 13 7 2 9 -1. + <_> + 13 10 2 3 3. + <_> + + <_> + 5 7 2 9 -1. + <_> + 5 10 2 3 3. + <_> + + <_> + 7 1 6 8 -1. + <_> + 9 1 2 8 3. + <_> + + <_> + 12 0 4 12 -1. + <_> + 14 0 2 6 2. + <_> + 12 6 2 6 2. + <_> + + <_> + 5 8 10 2 -1. + <_> + 5 9 10 1 2. + <_> + + <_> + 5 1 6 4 -1. + <_> + 7 1 2 4 3. + <_> + + <_> + 0 3 9 12 -1. + <_> + 3 3 3 12 3. + <_> + + <_> + 9 8 3 12 -1. + <_> + 9 12 3 4 3. + <_> + + <_> + 0 5 20 15 -1. + <_> + 0 10 20 5 3. + <_> + + <_> + 2 2 6 8 -1. + <_> + 2 2 3 4 2. + <_> + 5 6 3 4 2. + <_> + + <_> + 2 1 6 2 -1. + <_> + 2 2 6 1 2. + <_> + + <_> + 10 15 6 4 -1. + <_> + 13 15 3 2 2. + <_> + 10 17 3 2 2. + <_> + + <_> + 12 14 2 6 -1. + <_> + 12 16 2 2 3. + <_> + + <_> + 5 15 4 4 -1. + <_> + 5 15 2 2 2. + <_> + 7 17 2 2 2. + <_> + + <_> + 7 18 1 2 -1. + <_> + 7 19 1 1 2. + <_> + + <_> + 4 5 12 10 -1. + <_> + 10 5 6 5 2. + <_> + 4 10 6 5 2. + <_> + + <_> + 7 4 8 12 -1. + <_> + 11 4 4 6 2. + <_> + 7 10 4 6 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 3 3 12 12 -1. + <_> + 3 3 6 6 2. + <_> + 9 9 6 6 2. + <_> + + <_> + 15 11 5 3 -1. + <_> + 15 12 5 1 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 0 11 5 3 -1. + <_> + 0 12 5 1 3. + <_> + + <_> + 7 18 3 2 -1. + <_> + 8 18 1 2 3. + <_> + + <_> + 2 8 16 2 -1. + <_> + 2 9 16 1 2. + <_> + + <_> + 9 6 5 12 -1. + <_> + 9 12 5 6 2. + <_> + + <_> + 6 3 8 6 -1. + <_> + 6 6 8 3 2. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 10 9 6 8 -1. + <_> + 10 13 6 4 2. + <_> + + <_> + 12 5 3 10 -1. + <_> + 12 10 3 5 2. + <_> + + <_> + 4 6 3 9 -1. + <_> + 4 9 3 3 3. + <_> + + <_> + 7 4 6 4 -1. + <_> + 9 4 2 4 3. + <_> + + <_> + 12 3 8 3 -1. + <_> + 12 3 4 3 2. + <_> + + <_> + 15 0 3 6 -1. + <_> + 15 3 3 3 2. + <_> + + <_> + 2 12 10 8 -1. + <_> + 2 12 5 4 2. + <_> + 7 16 5 4 2. + <_> + + <_> + 5 5 6 8 -1. + <_> + 5 9 6 4 2. + <_> + + <_> + 12 3 8 3 -1. + <_> + 12 3 4 3 2. + <_> + + <_> + 15 0 3 6 -1. + <_> + 15 3 3 3 2. + <_> + + <_> + 0 3 8 3 -1. + <_> + 4 3 4 3 2. + <_> + + <_> + 2 1 4 4 -1. + <_> + 2 3 4 2 2. + <_> + + <_> + 10 2 3 2 -1. + <_> + 11 2 1 2 3. + <_> + + <_> + 10 3 3 1 -1. + <_> + 11 3 1 1 3. + <_> + + <_> + 7 15 3 4 -1. + <_> + 7 17 3 2 2. + <_> + + <_> + 4 13 3 6 -1. + <_> + 4 15 3 2 3. + <_> + + <_> + 10 5 1 14 -1. + <_> + 10 12 1 7 2. + <_> + + <_> + 5 4 10 6 -1. + <_> + 5 6 10 2 3. + <_> + + <_> + 5 0 6 3 -1. + <_> + 7 0 2 3 3. + <_> + + <_> + 6 0 3 5 -1. + <_> + 7 0 1 5 3. + <_> + + <_> + 7 15 6 5 -1. + <_> + 9 15 2 5 3. + <_> + + <_> + 9 10 2 6 -1. + <_> + 9 12 2 2 3. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 1 12 7 6 -1. + <_> + 1 14 7 2 3. + <_> + + <_> + 9 6 3 7 -1. + <_> + 10 6 1 7 3. + <_> + + <_> + 16 3 4 9 -1. + <_> + 16 6 4 3 3. + <_> + + <_> + 8 6 3 7 -1. + <_> + 9 6 1 7 3. + <_> + + <_> + 0 5 18 8 -1. + <_> + 0 5 9 4 2. + <_> + 9 9 9 4 2. + <_> + + <_> + 13 5 2 10 -1. + <_> + 13 10 2 5 2. + <_> + + <_> + 12 10 2 6 -1. + <_> + 12 13 2 3 2. + <_> + + <_> + 7 0 3 5 -1. + <_> + 8 0 1 5 3. + <_> + + <_> + 6 5 8 6 -1. + <_> + 6 7 8 2 3. + <_> + + <_> + 10 3 6 14 -1. + <_> + 13 3 3 7 2. + <_> + 10 10 3 7 2. + <_> + + <_> + 13 5 1 8 -1. + <_> + 13 9 1 4 2. + <_> + + <_> + 4 3 6 14 -1. + <_> + 4 3 3 7 2. + <_> + 7 10 3 7 2. + <_> + + <_> + 6 5 1 8 -1. + <_> + 6 9 1 4 2. + <_> + + <_> + 8 1 1 6 -1. + <_> + 8 3 1 2 3. + <_> + + <_> + 2 0 15 2 -1. + <_> + 2 1 15 1 2. + <_> + + <_> + 0 7 20 6 -1. + <_> + 0 9 20 2 3. + <_> + + <_> + 10 10 6 8 -1. + <_> + 10 14 6 4 2. + <_> + + <_> + 7 1 3 2 -1. + <_> + 8 1 1 2 3. + <_> + + <_> + 8 1 2 2 -1. + <_> + 9 1 1 2 2. + <_> + + <_> + 4 3 12 9 -1. + <_> + 4 6 12 3 3. + <_> + + <_> + 6 5 9 5 -1. + <_> + 9 5 3 5 3. + <_> + + <_> + 5 5 9 5 -1. + <_> + 8 5 3 5 3. + <_> + + <_> + 4 6 6 12 -1. + <_> + 4 10 6 4 3. + <_> + + <_> + 13 0 6 18 -1. + <_> + 13 0 3 18 2. + <_> + + <_> + 10 8 1 12 -1. + <_> + 10 12 1 4 3. + <_> + + <_> + 3 2 6 10 -1. + <_> + 3 2 3 5 2. + <_> + 6 7 3 5 2. + <_> + + <_> + 1 2 4 6 -1. + <_> + 3 2 2 6 2. + <_> + + <_> + 9 18 3 2 -1. + <_> + 10 18 1 2 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 2 8 2 6 -1. + <_> + 2 10 2 2 3. + <_> + + <_> + 7 5 6 6 -1. + <_> + 7 7 6 2 3. + <_> + + <_> + 7 19 6 1 -1. + <_> + 9 19 2 1 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 8 3 3 1 -1. + <_> + 9 3 1 1 3. + <_> + + <_> + 2 2 16 2 -1. + <_> + 2 2 8 1 2. + <_> + 10 3 8 1 2. + <_> + + <_> + 8 11 5 3 -1. + <_> + 8 12 5 1 3. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 0 1 6 15 -1. + <_> + 2 1 2 15 3. + <_> + + <_> + 2 12 2 3 -1. + <_> + 2 13 2 1 3. + <_> + + <_> + 16 13 1 3 -1. + <_> + 16 14 1 1 3. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 7 13 3 6 -1. + <_> + 7 16 3 3 2. + <_> + + <_> + 7 5 1 14 -1. + <_> + 7 12 1 7 2. + <_> + + <_> + 15 12 2 3 -1. + <_> + 15 13 2 1 3. + <_> + + <_> + 10 5 3 14 -1. + <_> + 10 12 3 7 2. + <_> + + <_> + 6 10 2 6 -1. + <_> + 6 13 2 3 2. + <_> + + <_> + 6 5 1 8 -1. + <_> + 6 9 1 4 2. + <_> + + <_> + 13 11 2 1 -1. + <_> + 13 11 1 1 2. + <_> + + <_> + 12 1 6 10 -1. + <_> + 15 1 3 5 2. + <_> + 12 6 3 5 2. + <_> + + <_> + 3 12 2 3 -1. + <_> + 3 13 2 1 3. + <_> + + <_> + 9 18 2 1 -1. + <_> + 10 18 1 1 2. + <_> + + <_> + 1 0 17 9 -1. + <_> + 1 3 17 3 3. + <_> + + <_> + 1 2 8 8 -1. + <_> + 1 2 4 4 2. + <_> + 5 6 4 4 2. + <_> + + <_> + 9 5 6 4 -1. + <_> + 9 5 3 4 2. + <_> + + <_> + 10 9 7 10 -1. + <_> + 10 14 7 5 2. + <_> + + <_> + 5 5 6 4 -1. + <_> + 8 5 3 4 2. + <_> + + <_> + 0 7 20 6 -1. + <_> + 0 9 20 2 3. + <_> + + <_> + 6 5 9 10 -1. + <_> + 6 10 9 5 2. + <_> + + <_> + 8 4 4 12 -1. + <_> + 8 10 4 6 2. + <_> + + <_> + 6 6 8 3 -1. + <_> + 6 7 8 1 3. + <_> + + <_> + 3 13 10 6 -1. + <_> + 3 13 5 3 2. + <_> + 8 16 5 3 2. + <_> + + <_> + 15 1 4 11 -1. + <_> + 15 1 2 11 2. + <_> + + <_> + 5 7 10 10 -1. + <_> + 10 7 5 5 2. + <_> + 5 12 5 5 2. + <_> + + <_> + 1 1 4 11 -1. + <_> + 3 1 2 11 2. + <_> + + <_> + 1 5 8 12 -1. + <_> + 1 11 8 6 2. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 11 10 7 4 -1. + <_> + 11 12 7 2 2. + <_> + + <_> + 0 4 20 12 -1. + <_> + 0 4 10 6 2. + <_> + 10 10 10 6 2. + <_> + + <_> + 1 5 6 15 -1. + <_> + 1 10 6 5 3. + <_> + + <_> + 11 10 3 8 -1. + <_> + 11 14 3 4 2. + <_> + + <_> + 11 12 7 6 -1. + <_> + 11 14 7 2 3. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 3 14 14 4 -1. + <_> + 10 14 7 2 2. + <_> + 3 16 7 2 2. + <_> + + <_> + 18 7 2 4 -1. + <_> + 18 9 2 2 2. + <_> + + <_> + 3 12 6 6 -1. + <_> + 3 14 6 2 3. + <_> + + <_> + 0 4 3 6 -1. + <_> + 0 6 3 2 3. + <_> + + <_> + 9 14 3 3 -1. + <_> + 9 15 3 1 3. + <_> + + <_> + 10 7 10 4 -1. + <_> + 15 7 5 2 2. + <_> + 10 9 5 2 2. + <_> + + <_> + 7 2 6 8 -1. + <_> + 7 6 6 4 2. + <_> + + <_> + 6 3 6 2 -1. + <_> + 8 3 2 2 3. + <_> + + <_> + 10 6 3 5 -1. + <_> + 11 6 1 5 3. + <_> + + <_> + 9 0 6 19 -1. + <_> + 11 0 2 19 3. + <_> + + <_> + 3 12 1 2 -1. + <_> + 3 13 1 1 2. + <_> + + <_> + 7 14 5 3 -1. + <_> + 7 15 5 1 3. + <_> + + <_> + 2 1 18 4 -1. + <_> + 11 1 9 2 2. + <_> + 2 3 9 2 2. + <_> + + <_> + 10 5 3 8 -1. + <_> + 11 5 1 8 3. + <_> + + <_> + 0 1 18 4 -1. + <_> + 0 1 9 2 2. + <_> + 9 3 9 2 2. + <_> + + <_> + 7 5 3 8 -1. + <_> + 8 5 1 8 3. + <_> + + <_> + 9 5 2 6 -1. + <_> + 9 7 2 2 3. + <_> + + <_> + 10 8 5 2 -1. + <_> + 10 9 5 1 2. + <_> + + <_> + 2 10 15 1 -1. + <_> + 7 10 5 1 3. + <_> + + <_> + 2 7 2 6 -1. + <_> + 2 9 2 2 3. + <_> + + <_> + 9 14 3 3 -1. + <_> + 9 15 3 1 3. + <_> + + <_> + 9 7 4 10 -1. + <_> + 9 12 4 5 2. + <_> + + <_> + 0 8 8 2 -1. + <_> + 0 8 4 1 2. + <_> + 4 9 4 1 2. + <_> + + <_> + 5 9 10 8 -1. + <_> + 5 9 5 4 2. + <_> + 10 13 5 4 2. + <_> + + <_> + 9 7 2 4 -1. + <_> + 9 7 1 4 2. + <_> + + <_> + 9 6 3 4 -1. + <_> + 10 6 1 4 3. + <_> + + <_> + 8 3 2 1 -1. + <_> + 9 3 1 1 2. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 12 0 4 14 -1. + <_> + 14 0 2 7 2. + <_> + 12 7 2 7 2. + <_> + + <_> + 12 5 6 9 -1. + <_> + 12 5 3 9 2. + <_> + + <_> + 0 2 6 16 -1. + <_> + 3 2 3 16 2. + <_> + + <_> + 1 12 4 2 -1. + <_> + 1 13 4 1 2. + <_> + + <_> + 7 7 6 1 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 8 3 4 9 -1. + <_> + 8 6 4 3 3. + <_> + + <_> + 12 10 4 6 -1. + <_> + 12 13 4 3 2. + <_> + + <_> + 8 1 8 16 -1. + <_> + 12 1 4 8 2. + <_> + 8 9 4 8 2. + <_> + + <_> + 4 6 3 6 -1. + <_> + 4 9 3 3 2. + <_> + + <_> + 1 3 6 2 -1. + <_> + 4 3 3 2 2. + <_> + + <_> + 9 8 3 12 -1. + <_> + 9 12 3 4 3. + <_> + + <_> + 10 9 7 10 -1. + <_> + 10 14 7 5 2. + <_> + + <_> + 3 9 7 10 -1. + <_> + 3 14 7 5 2. + <_> + + <_> + 7 5 1 14 -1. + <_> + 7 12 1 7 2. + <_> + + <_> + 13 14 1 6 -1. + <_> + 13 16 1 2 3. + <_> + + <_> + 14 12 3 6 -1. + <_> + 14 14 3 2 3. + <_> + + <_> + 6 14 1 6 -1. + <_> + 6 16 1 2 3. + <_> + + <_> + 3 12 3 6 -1. + <_> + 3 14 3 2 3. + <_> + + <_> + 8 13 5 3 -1. + <_> + 8 14 5 1 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 5 1 10 8 -1. + <_> + 5 1 5 4 2. + <_> + 10 5 5 4 2. + <_> + + <_> + 6 4 5 4 -1. + <_> + 6 6 5 2 2. + <_> + + <_> + 1 10 18 1 -1. + <_> + 7 10 6 1 3. + <_> + + <_> + 11 10 4 3 -1. + <_> + 11 10 2 3 2. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 3 13 2 3 -1. + <_> + 3 14 2 1 3. + <_> + + <_> + 12 12 3 4 -1. + <_> + 12 14 3 2 2. + <_> + + <_> + 11 10 5 6 -1. + <_> + 11 12 5 2 3. + <_> + + <_> + 0 8 16 2 -1. + <_> + 0 9 16 1 2. + <_> + + <_> + 2 1 3 4 -1. + <_> + 2 3 3 2 2. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 5 6 12 6 -1. + <_> + 9 6 4 6 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 3 6 12 6 -1. + <_> + 7 6 4 6 3. + <_> + + <_> + 10 5 6 5 -1. + <_> + 12 5 2 5 3. + <_> + + <_> + 5 7 10 2 -1. + <_> + 5 7 5 2 2. + <_> + + <_> + 4 5 6 5 -1. + <_> + 6 5 2 5 3. + <_> + + <_> + 9 3 2 10 -1. + <_> + 9 8 2 5 2. + <_> + + <_> + 3 1 16 2 -1. + <_> + 11 1 8 1 2. + <_> + 3 2 8 1 2. + <_> + + <_> + 9 9 3 2 -1. + <_> + 9 10 3 1 2. + <_> + + <_> + 1 1 16 2 -1. + <_> + 1 1 8 1 2. + <_> + 9 2 8 1 2. + <_> + + <_> + 8 14 1 3 -1. + <_> + 8 15 1 1 3. + <_> + + <_> + 4 5 12 10 -1. + <_> + 10 5 6 5 2. + <_> + 4 10 6 5 2. + <_> + + <_> + 7 13 6 6 -1. + <_> + 10 13 3 3 2. + <_> + 7 16 3 3 2. + <_> + + <_> + 8 9 3 2 -1. + <_> + 8 10 3 1 2. + <_> + + <_> + 7 2 6 4 -1. + <_> + 9 2 2 4 3. + <_> + + <_> + 6 6 9 3 -1. + <_> + 6 7 9 1 3. + <_> + + <_> + 10 7 6 1 -1. + <_> + 12 7 2 1 3. + <_> + + <_> + 0 0 18 6 -1. + <_> + 6 0 6 6 3. + <_> + + <_> + 6 10 2 6 -1. + <_> + 6 13 2 3 2. + <_> + + <_> + 11 12 3 6 -1. + <_> + 11 15 3 3 2. + <_> + + <_> + 4 4 12 12 -1. + <_> + 10 4 6 6 2. + <_> + 4 10 6 6 2. + <_> + + <_> + 1 2 3 6 -1. + <_> + 2 2 1 6 3. + <_> + + <_> + 1 5 3 7 -1. + <_> + 2 5 1 7 3. + <_> + + <_> + 4 13 12 4 -1. + <_> + 10 13 6 2 2. + <_> + 4 15 6 2 2. + <_> + + <_> + 3 3 17 12 -1. + <_> + 3 9 17 6 2. + <_> + + <_> + 3 3 14 12 -1. + <_> + 3 3 7 6 2. + <_> + 10 9 7 6 2. + <_> + + <_> + 2 11 16 9 -1. + <_> + 2 14 16 3 3. + <_> + + <_> + 9 14 3 6 -1. + <_> + 9 17 3 3 2. + <_> + + <_> + 8 14 4 6 -1. + <_> + 10 14 2 3 2. + <_> + 8 17 2 3 2. + <_> + + <_> + 6 2 6 1 -1. + <_> + 8 2 2 1 3. + <_> + + <_> + 9 5 2 5 -1. + <_> + 10 5 1 5 2. + <_> + + <_> + 9 8 3 5 -1. + <_> + 10 8 1 5 3. + <_> + + <_> + 9 12 6 1 -1. + <_> + 9 12 3 1 2. + <_> + + <_> + 8 8 3 5 -1. + <_> + 9 8 1 5 3. + <_> + + <_> + 6 10 4 3 -1. + <_> + 8 10 2 3 2. + <_> + + <_> + 0 4 20 6 -1. + <_> + 0 6 20 2 3. + <_> + + <_> + 1 3 8 6 -1. + <_> + 1 3 4 3 2. + <_> + 5 6 4 3 2. + <_> + + <_> + 7 15 6 4 -1. + <_> + 7 17 6 2 2. + <_> + + <_> + 3 10 14 10 -1. + <_> + 3 15 14 5 2. + <_> + + <_> + 6 4 4 4 -1. + <_> + 8 4 2 4 2. + <_> + + <_> + 0 4 20 10 -1. + <_> + 0 9 20 5 2. + <_> + + <_> + 9 4 2 14 -1. + <_> + 9 11 2 7 2. + <_> + + <_> + 2 0 16 4 -1. + <_> + 2 2 16 2 2. + <_> + + <_> + 4 12 6 8 -1. + <_> + 4 12 3 4 2. + <_> + 7 16 3 4 2. + <_> + + <_> + 0 5 6 7 -1. + <_> + 3 5 3 7 2. + <_> + + <_> + 10 7 10 4 -1. + <_> + 15 7 5 2 2. + <_> + 10 9 5 2 2. + <_> + + <_> + 5 8 12 1 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 9 4 2 4 -1. + <_> + 9 6 2 2 2. + <_> + + <_> + 9 6 3 6 -1. + <_> + 10 6 1 6 3. + <_> + + <_> + 12 7 6 4 -1. + <_> + 15 7 3 2 2. + <_> + 12 9 3 2 2. + <_> + + <_> + 8 6 3 6 -1. + <_> + 9 6 1 6 3. + <_> + + <_> + 1 6 18 6 -1. + <_> + 1 6 9 3 2. + <_> + 10 9 9 3 2. + <_> + + <_> + 9 1 3 3 -1. + <_> + 10 1 1 3 3. + <_> + + <_> + 10 8 5 2 -1. + <_> + 10 9 5 1 2. + <_> + + <_> + 8 1 3 3 -1. + <_> + 9 1 1 3 3. + <_> + + <_> + 5 8 5 2 -1. + <_> + 5 9 5 1 2. + <_> + + <_> + 8 6 8 8 -1. + <_> + 12 6 4 4 2. + <_> + 8 10 4 4 2. + <_> + + <_> + 5 7 10 2 -1. + <_> + 5 7 5 2 2. + <_> + + <_> + 4 5 12 10 -1. + <_> + 4 5 6 5 2. + <_> + 10 10 6 5 2. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 9 14 3 3 -1. + <_> + 9 15 3 1 3. + <_> + + <_> + 8 14 3 3 -1. + <_> + 8 15 3 1 3. + <_> + + <_> + 1 10 8 9 -1. + <_> + 1 13 8 3 3. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 12 3 3 3 -1. + <_> + 13 3 1 3 3. + <_> + + <_> + 5 3 3 3 -1. + <_> + 6 3 1 3 3. + <_> + + <_> + 5 6 2 12 -1. + <_> + 5 10 2 4 3. + <_> + + <_> + 1 11 18 4 -1. + <_> + 10 11 9 2 2. + <_> + 1 13 9 2 2. + <_> + + <_> + 7 12 6 2 -1. + <_> + 7 13 6 1 2. + <_> + + <_> + 6 0 3 6 -1. + <_> + 7 0 1 6 3. + <_> + + <_> + 0 11 18 4 -1. + <_> + 0 11 9 2 2. + <_> + 9 13 9 2 2. + <_> + + <_> + 7 12 6 2 -1. + <_> + 7 13 6 1 2. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 8 11 4 3 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 13 3 4 2 -1. + <_> + 13 4 4 1 2. + <_> + + <_> + 4 0 12 2 -1. + <_> + 4 1 12 1 2. + <_> + + <_> + 6 9 8 8 -1. + <_> + 6 9 4 4 2. + <_> + 10 13 4 4 2. + <_> + + <_> + 1 11 6 2 -1. + <_> + 1 12 6 1 2. + <_> + + <_> + 2 5 18 8 -1. + <_> + 11 5 9 4 2. + <_> + 2 9 9 4 2. + <_> + + <_> + 7 1 6 10 -1. + <_> + 7 6 6 5 2. + <_> + + <_> + 0 3 3 6 -1. + <_> + 0 5 3 2 3. + <_> + + <_> + 4 5 4 3 -1. + <_> + 4 6 4 1 3. + <_> + + <_> + 19 3 1 6 -1. + <_> + 19 5 1 2 3. + <_> + + <_> + 6 15 8 2 -1. + <_> + 6 16 8 1 2. + <_> + + <_> + 0 3 1 6 -1. + <_> + 0 5 1 2 3. + <_> + + <_> + 5 5 3 3 -1. + <_> + 5 6 3 1 3. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 10 6 6 3 -1. + <_> + 12 6 2 3 3. + <_> + + <_> + 8 13 2 6 -1. + <_> + 8 16 2 3 2. + <_> + + <_> + 9 11 2 8 -1. + <_> + 9 15 2 4 2. + <_> + + <_> + 10 6 6 3 -1. + <_> + 12 6 2 3 3. + <_> + + <_> + 5 15 15 5 -1. + <_> + 10 15 5 5 3. + <_> + + <_> + 2 14 2 2 -1. + <_> + 2 15 2 1 2. + <_> + + <_> + 4 7 6 2 -1. + <_> + 6 7 2 2 3. + <_> + + <_> + 8 3 6 1 -1. + <_> + 10 3 2 1 3. + <_> + + <_> + 1 0 18 12 -1. + <_> + 7 0 6 12 3. + <_> + + <_> + 0 14 8 6 -1. + <_> + 4 14 4 6 2. + <_> + + <_> + 0 15 15 5 -1. + <_> + 5 15 5 5 3. + <_> + + <_> + 8 3 6 1 -1. + <_> + 10 3 2 1 3. + <_> + + <_> + 11 11 3 6 -1. + <_> + 11 14 3 3 2. + <_> + + <_> + 6 3 6 1 -1. + <_> + 8 3 2 1 3. + <_> + + <_> + 6 11 3 6 -1. + <_> + 6 14 3 3 2. + <_> + + <_> + 9 6 3 4 -1. + <_> + 10 6 1 4 3. + <_> + + <_> + 12 10 4 7 -1. + <_> + 12 10 2 7 2. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 4 6 4 7 -1. + <_> + 6 6 2 7 2. + <_> + + <_> + 10 3 4 12 -1. + <_> + 10 3 2 12 2. + <_> + + <_> + 10 8 3 4 -1. + <_> + 11 8 1 4 3. + <_> + + <_> + 1 0 18 14 -1. + <_> + 7 0 6 14 3. + <_> + + <_> + 2 8 6 11 -1. + <_> + 5 8 3 11 2. + <_> + + <_> + 1 4 15 4 -1. + <_> + 1 6 15 2 2. + <_> + + <_> + 5 5 10 8 -1. + <_> + 5 9 10 4 2. + <_> + + <_> + 14 2 6 8 -1. + <_> + 14 2 3 8 2. + <_> + + <_> + 11 6 6 14 -1. + <_> + 14 6 3 7 2. + <_> + 11 13 3 7 2. + <_> + + <_> + 9 5 2 12 -1. + <_> + 9 11 2 6 2. + <_> + + <_> + 3 7 4 6 -1. + <_> + 3 9 4 2 3. + <_> + + <_> + 14 3 6 6 -1. + <_> + 14 3 3 6 2. + <_> + + <_> + 15 2 4 4 -1. + <_> + 15 4 4 2 2. + <_> + + <_> + 0 2 6 7 -1. + <_> + 3 2 3 7 2. + <_> + + <_> + 3 6 6 14 -1. + <_> + 3 6 3 7 2. + <_> + 6 13 3 7 2. + <_> + + <_> + 4 6 16 8 -1. + <_> + 4 10 16 4 2. + <_> + + <_> + 10 12 2 8 -1. + <_> + 10 16 2 4 2. + <_> + + <_> + 7 0 6 20 -1. + <_> + 9 0 2 20 3. + <_> + + <_> + 1 7 16 12 -1. + <_> + 1 7 8 6 2. + <_> + 9 13 8 6 2. + <_> + + <_> + 9 11 3 3 -1. + <_> + 9 12 3 1 3. + <_> + + <_> + 11 9 4 5 -1. + <_> + 11 9 2 5 2. + <_> + + <_> + 3 3 1 2 -1. + <_> + 3 4 1 1 2. + <_> + + <_> + 7 17 5 3 -1. + <_> + 7 18 5 1 3. + <_> + + <_> + 8 12 4 8 -1. + <_> + 10 12 2 4 2. + <_> + 8 16 2 4 2. + <_> + + <_> + 7 4 10 12 -1. + <_> + 12 4 5 6 2. + <_> + 7 10 5 6 2. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 5 9 4 5 -1. + <_> + 7 9 2 5 2. + <_> + + <_> + 9 9 8 2 -1. + <_> + 9 9 4 2 2. + <_> + + <_> + 14 15 5 2 -1. + <_> + 14 16 5 1 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 1 7 8 4 -1. + <_> + 1 7 4 2 2. + <_> + 5 9 4 2 2. + <_> + + <_> + 19 3 1 2 -1. + <_> + 19 4 1 1 2. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 3 14 14 4 -1. + <_> + 3 14 7 2 2. + <_> + 10 16 7 2 2. + <_> + + <_> + 5 0 10 2 -1. + <_> + 5 1 10 1 2. + <_> + + <_> + 11 14 4 6 -1. + <_> + 11 16 4 2 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 7 13 6 6 -1. + <_> + 7 13 3 3 2. + <_> + 10 16 3 3 2. + <_> + + <_> + 0 2 1 6 -1. + <_> + 0 4 1 2 3. + <_> + + <_> + 6 7 8 2 -1. + <_> + 6 8 8 1 2. + <_> + + <_> + 9 7 6 1 -1. + <_> + 9 7 3 1 2. + <_> + + <_> + 7 1 6 10 -1. + <_> + 7 6 6 5 2. + <_> + + <_> + 0 2 6 2 -1. + <_> + 0 3 6 1 2. + <_> + + <_> + 11 4 2 4 -1. + <_> + 11 4 1 4 2. + <_> + + <_> + 11 10 3 6 -1. + <_> + 11 13 3 3 2. + <_> + + <_> + 3 9 8 2 -1. + <_> + 7 9 4 2 2. + <_> + + <_> + 0 0 4 6 -1. + <_> + 2 0 2 6 2. + <_> + + <_> + 7 0 6 2 -1. + <_> + 9 0 2 2 3. + <_> + + <_> + 9 15 2 3 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 3 12 1 2 -1. + <_> + 3 13 1 1 2. + <_> + + <_> + 4 5 11 3 -1. + <_> + 4 6 11 1 3. + <_> + + <_> + 11 4 2 4 -1. + <_> + 11 4 1 4 2. + <_> + + <_> + 8 3 6 3 -1. + <_> + 10 3 2 3 3. + <_> + + <_> + 7 4 2 4 -1. + <_> + 8 4 1 4 2. + <_> + + <_> + 6 3 6 3 -1. + <_> + 8 3 2 3 3. + <_> + + <_> + 11 4 4 3 -1. + <_> + 11 5 4 1 3. + <_> + + <_> + 11 8 2 8 -1. + <_> + 11 12 2 4 2. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 9 7 2 5 -1. + <_> + 10 7 1 5 2. + <_> + + <_> + 14 11 1 6 -1. + <_> + 14 13 1 2 3. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 0 3 2 2 -1. + <_> + 0 4 2 1 2. + <_> + + <_> + 4 14 5 6 -1. + <_> + 4 16 5 2 3. + <_> + + <_> + 11 4 4 3 -1. + <_> + 11 5 4 1 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 5 4 4 3 -1. + <_> + 5 5 4 1 3. + <_> + + <_> + 5 15 4 2 -1. + <_> + 7 15 2 2 2. + <_> + + <_> + 15 1 5 9 -1. + <_> + 15 4 5 3 3. + <_> + + <_> + 9 10 3 3 -1. + <_> + 9 11 3 1 3. + <_> + + <_> + 1 6 2 6 -1. + <_> + 1 8 2 2 3. + <_> + + <_> + 2 4 8 15 -1. + <_> + 2 9 8 5 3. + <_> + + <_> + 9 12 3 2 -1. + <_> + 9 13 3 1 2. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 7 6 3 5 -1. + <_> + 8 6 1 5 3. + <_> + + <_> + 5 3 6 2 -1. + <_> + 7 3 2 2 3. + <_> + + <_> + 6 1 8 10 -1. + <_> + 10 1 4 5 2. + <_> + 6 6 4 5 2. + <_> + + <_> + 0 0 20 10 -1. + <_> + 10 0 10 5 2. + <_> + 0 5 10 5 2. + <_> + + <_> + 6 3 3 1 -1. + <_> + 7 3 1 1 3. + <_> + + <_> + 0 2 6 8 -1. + <_> + 2 2 2 8 3. + <_> + + <_> + 11 10 3 4 -1. + <_> + 11 12 3 2 2. + <_> + + <_> + 12 6 3 8 -1. + <_> + 12 10 3 4 2. + <_> + + <_> + 6 10 3 4 -1. + <_> + 6 12 3 2 2. + <_> + + <_> + 5 6 3 8 -1. + <_> + 5 10 3 4 2. + <_> + + <_> + 2 6 18 6 -1. + <_> + 11 6 9 3 2. + <_> + 2 9 9 3 2. + <_> + + <_> + 7 14 7 3 -1. + <_> + 7 15 7 1 3. + <_> + + <_> + 0 0 2 12 -1. + <_> + 1 0 1 12 2. + <_> + + <_> + 1 2 18 16 -1. + <_> + 1 10 18 8 2. + <_> + + <_> + 9 13 5 3 -1. + <_> + 9 14 5 1 3. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 0 6 18 6 -1. + <_> + 0 6 9 3 2. + <_> + 9 9 9 3 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 17 4 1 3 -1. + <_> + 17 5 1 1 3. + <_> + + <_> + 12 11 1 9 -1. + <_> + 12 14 1 3 3. + <_> + + <_> + 2 4 1 3 -1. + <_> + 2 5 1 1 3. + <_> + + <_> + 5 4 2 3 -1. + <_> + 5 5 2 1 3. + <_> + + <_> + 1 2 18 3 -1. + <_> + 7 2 6 3 3. + <_> + + <_> + 0 1 20 6 -1. + <_> + 0 3 20 2 3. + <_> + + <_> + 7 5 6 3 -1. + <_> + 9 5 2 3 3. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 3 1 4 10 -1. + <_> + 3 1 2 5 2. + <_> + 5 6 2 5 2. + <_> + + <_> + 0 4 19 10 -1. + <_> + 0 9 19 5 2. + <_> + + <_> + 9 8 3 12 -1. + <_> + 9 12 3 4 3. + <_> + + <_> + 11 18 5 2 -1. + <_> + 11 19 5 1 2. + <_> + + <_> + 5 16 6 4 -1. + <_> + 5 16 3 2 2. + <_> + 8 18 3 2 2. + <_> + + <_> + 5 18 3 2 -1. + <_> + 5 19 3 1 2. + <_> + + <_> + 13 11 3 2 -1. + <_> + 13 12 3 1 2. + <_> + + <_> + 8 5 8 4 -1. + <_> + 8 5 4 4 2. + <_> + + <_> + 1 2 18 6 -1. + <_> + 1 2 9 3 2. + <_> + 10 5 9 3 2. + <_> + + <_> + 3 5 14 6 -1. + <_> + 3 7 14 2 3. + <_> + + <_> + 18 1 2 6 -1. + <_> + 18 3 2 2 3. + <_> + + <_> + 9 11 6 1 -1. + <_> + 11 11 2 1 3. + <_> + + <_> + 0 2 6 11 -1. + <_> + 3 2 3 11 2. + <_> + + <_> + 4 12 2 3 -1. + <_> + 4 13 2 1 3. + <_> + + <_> + 6 12 9 2 -1. + <_> + 9 12 3 2 3. + <_> + + <_> + 9 4 6 15 -1. + <_> + 9 4 3 15 2. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 5 4 6 15 -1. + <_> + 8 4 3 15 2. + <_> + + <_> + 14 12 6 7 -1. + <_> + 14 12 3 7 2. + <_> + + <_> + 18 3 2 9 -1. + <_> + 18 6 2 3 3. + <_> + + <_> + 8 1 3 1 -1. + <_> + 9 1 1 1 3. + <_> + + <_> + 0 12 6 7 -1. + <_> + 3 12 3 7 2. + <_> + + <_> + 13 7 6 4 -1. + <_> + 16 7 3 2 2. + <_> + 13 9 3 2 2. + <_> + + <_> + 8 0 10 2 -1. + <_> + 8 1 10 1 2. + <_> + + <_> + 1 7 6 4 -1. + <_> + 1 7 3 2 2. + <_> + 4 9 3 2 2. + <_> + + <_> + 1 2 3 3 -1. + <_> + 1 3 3 1 3. + <_> + + <_> + 9 13 4 3 -1. + <_> + 9 14 4 1 3. + <_> + + <_> + 12 13 7 2 -1. + <_> + 12 14 7 1 2. + <_> + + <_> + 5 12 9 2 -1. + <_> + 8 12 3 2 3. + <_> + + <_> + 6 10 4 8 -1. + <_> + 6 14 4 4 2. + <_> + + <_> + 1 0 18 4 -1. + <_> + 7 0 6 4 3. + <_> + + <_> + 12 0 5 2 -1. + <_> + 12 1 5 1 2. + <_> + + <_> + 7 7 1 12 -1. + <_> + 7 13 1 6 2. + <_> + + <_> + 6 2 3 4 -1. + <_> + 7 2 1 4 3. + <_> + + <_> + 0 13 20 6 -1. + <_> + 0 15 20 2 3. + <_> + + <_> + 8 5 12 2 -1. + <_> + 14 5 6 1 2. + <_> + 8 6 6 1 2. + <_> + + <_> + 8 14 2 3 -1. + <_> + 8 15 2 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 12 13 7 6 -1. + <_> + 12 15 7 2 3. + <_> + + <_> + 6 0 8 12 -1. + <_> + 10 0 4 6 2. + <_> + 6 6 4 6 2. + <_> + + <_> + 0 15 9 4 -1. + <_> + 0 17 9 2 2. + <_> + + <_> + 9 0 2 5 -1. + <_> + 10 0 1 5 2. + <_> + + <_> + 9 5 2 6 -1. + <_> + 9 5 1 6 2. + <_> + + <_> + 17 2 3 6 -1. + <_> + 17 4 3 2 3. + <_> + + <_> + 3 11 2 3 -1. + <_> + 3 12 2 1 3. + <_> + + <_> + 7 13 3 3 -1. + <_> + 7 14 3 1 3. + <_> + + <_> + 14 12 5 3 -1. + <_> + 14 13 5 1 3. + <_> + + <_> + 4 8 14 3 -1. + <_> + 4 9 14 1 3. + <_> + + <_> + 1 12 5 3 -1. + <_> + 1 13 5 1 3. + <_> + + <_> + 1 15 12 2 -1. + <_> + 1 15 6 1 2. + <_> + 7 16 6 1 2. + <_> + + <_> + 12 11 4 2 -1. + <_> + 12 12 4 1 2. + <_> + + <_> + 9 8 3 5 -1. + <_> + 10 8 1 5 3. + <_> + + <_> + 9 5 2 6 -1. + <_> + 10 5 1 6 2. + <_> + + <_> + 0 2 3 6 -1. + <_> + 0 4 3 2 3. + <_> + + <_> + 12 11 4 2 -1. + <_> + 12 12 4 1 2. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 4 11 4 2 -1. + <_> + 4 12 4 1 2. + <_> + + <_> + 8 8 3 5 -1. + <_> + 9 8 1 5 3. + <_> + + <_> + 9 3 3 1 -1. + <_> + 10 3 1 1 3. + <_> + + <_> + 16 5 3 8 -1. + <_> + 17 5 1 8 3. + <_> + + <_> + 8 3 3 1 -1. + <_> + 9 3 1 1 3. + <_> + + <_> + 1 5 3 8 -1. + <_> + 2 5 1 8 3. + <_> + + <_> + 10 1 3 3 -1. + <_> + 11 1 1 3 3. + <_> + + <_> + 17 5 2 4 -1. + <_> + 17 5 1 4 2. + <_> + + <_> + 2 8 14 3 -1. + <_> + 2 9 14 1 3. + <_> + + <_> + 9 7 1 3 -1. + <_> + 9 8 1 1 3. + <_> + + <_> + 6 1 8 10 -1. + <_> + 6 6 8 5 2. + <_> + + <_> + 13 0 6 8 -1. + <_> + 16 0 3 4 2. + <_> + 13 4 3 4 2. + <_> + + <_> + 1 5 2 4 -1. + <_> + 2 5 1 4 2. + <_> + + <_> + 4 2 12 2 -1. + <_> + 4 3 12 1 2. + <_> + + <_> + 8 8 4 4 -1. + <_> + 8 10 4 2 2. + <_> + + <_> + 5 6 12 4 -1. + <_> + 9 6 4 4 3. + <_> + + <_> + 1 2 8 1 -1. + <_> + 5 2 4 1 2. + <_> + + <_> + 1 1 6 10 -1. + <_> + 3 1 2 10 3. + <_> + + <_> + 8 6 8 2 -1. + <_> + 8 6 4 2 2. + <_> + + <_> + 10 7 6 6 -1. + <_> + 12 7 2 6 3. + <_> + + <_> + 4 6 8 2 -1. + <_> + 8 6 4 2 2. + <_> + + <_> + 4 7 6 6 -1. + <_> + 6 7 2 6 3. + <_> + + <_> + 3 14 16 4 -1. + <_> + 3 16 16 2 2. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 8 12 3 3 -1. + <_> + 8 13 3 1 3. + <_> + + <_> + 5 12 6 1 -1. + <_> + 8 12 3 1 2. + <_> + + <_> + 18 10 2 3 -1. + <_> + 18 11 2 1 3. + <_> + + <_> + 16 8 4 6 -1. + <_> + 16 10 4 2 3. + <_> + + <_> + 8 3 2 1 -1. + <_> + 9 3 1 1 2. + <_> + + <_> + 7 1 3 9 -1. + <_> + 8 1 1 9 3. + <_> + + <_> + 5 11 11 6 -1. + <_> + 5 14 11 3 2. + <_> + + <_> + 12 2 3 14 -1. + <_> + 12 9 3 7 2. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 3 5 12 5 -1. + <_> + 7 5 4 5 3. + <_> + + <_> + 1 2 6 3 -1. + <_> + 4 2 3 3 2. + <_> + + <_> + 5 5 6 10 -1. + <_> + 5 5 3 5 2. + <_> + 8 10 3 5 2. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 2 2. + <_> + + <_> + 16 18 2 2 -1. + <_> + 16 18 1 2 2. + <_> + + <_> + 8 4 2 5 -1. + <_> + 9 4 1 5 2. + <_> + + <_> + 8 4 1 4 -1. + <_> + 8 6 1 2 2. + <_> + + <_> + 7 15 12 4 -1. + <_> + 13 15 6 2 2. + <_> + 7 17 6 2 2. + <_> + + <_> + 11 18 6 2 -1. + <_> + 11 19 6 1 2. + <_> + + <_> + 7 7 4 10 -1. + <_> + 7 12 4 5 2. + <_> + + <_> + 5 6 10 8 -1. + <_> + 5 10 10 4 2. + <_> + + <_> + 11 1 6 12 -1. + <_> + 14 1 3 6 2. + <_> + 11 7 3 6 2. + <_> + + <_> + 5 8 12 1 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 4 7 3 6 -1. + <_> + 4 9 3 2 3. + <_> + + <_> + 4 11 3 4 -1. + <_> + 4 13 3 2 2. + <_> + + <_> + 14 16 2 2 -1. + <_> + 14 17 2 1 2. + <_> + + <_> + 15 15 2 2 -1. + <_> + 15 16 2 1 2. + <_> + + <_> + 7 12 6 2 -1. + <_> + 7 13 6 1 2. + <_> + + <_> + 8 13 4 2 -1. + <_> + 8 14 4 1 2. + <_> + + <_> + 11 1 6 12 -1. + <_> + 14 1 3 6 2. + <_> + 11 7 3 6 2. + <_> + + <_> + 12 2 4 2 -1. + <_> + 12 3 4 1 2. + <_> + + <_> + 3 10 12 6 -1. + <_> + 3 10 6 3 2. + <_> + 9 13 6 3 2. + <_> + + <_> + 3 1 6 12 -1. + <_> + 3 1 3 6 2. + <_> + 6 7 3 6 2. + <_> + + <_> + 16 6 4 14 -1. + <_> + 18 6 2 7 2. + <_> + 16 13 2 7 2. + <_> + + <_> + 5 1 10 8 -1. + <_> + 10 1 5 4 2. + <_> + 5 5 5 4 2. + <_> + + <_> + 0 6 4 14 -1. + <_> + 0 6 2 7 2. + <_> + 2 13 2 7 2. + <_> + + <_> + 1 15 12 4 -1. + <_> + 1 15 6 2 2. + <_> + 7 17 6 2 2. + <_> + + <_> + 10 17 3 3 -1. + <_> + 11 17 1 3 3. + <_> + + <_> + 11 2 2 6 -1. + <_> + 12 2 1 3 2. + <_> + 11 5 1 3 2. + <_> + + <_> + 7 17 3 3 -1. + <_> + 8 17 1 3 3. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 10 15 4 2 -1. + <_> + 12 15 2 1 2. + <_> + 10 16 2 1 2. + <_> + + <_> + 13 13 4 3 -1. + <_> + 13 14 4 1 3. + <_> + + <_> + 3 13 4 3 -1. + <_> + 3 14 4 1 3. + <_> + + <_> + 7 2 2 6 -1. + <_> + 7 2 1 3 2. + <_> + 8 5 1 3 2. + <_> + + <_> + 2 1 16 3 -1. + <_> + 2 2 16 1 3. + <_> + + <_> + 10 15 4 2 -1. + <_> + 12 15 2 1 2. + <_> + 10 16 2 1 2. + <_> + + <_> + 6 15 4 2 -1. + <_> + 6 15 2 1 2. + <_> + 8 16 2 1 2. + <_> + + <_> + 3 0 13 3 -1. + <_> + 3 1 13 1 3. + <_> + + <_> + 0 9 20 3 -1. + <_> + 0 10 20 1 3. + <_> + + <_> + 6 7 9 2 -1. + <_> + 6 8 9 1 2. + <_> + + <_> + 8 14 3 6 -1. + <_> + 9 14 1 6 3. + <_> + + <_> + 9 10 2 2 -1. + <_> + 9 11 2 1 2. + <_> + + <_> + 9 7 2 5 -1. + <_> + 9 7 1 5 2. + <_> + + <_> + 5 6 10 3 -1. + <_> + 5 6 5 3 2. + <_> + + <_> + 9 7 2 5 -1. + <_> + 10 7 1 5 2. + <_> + + <_> + 5 6 10 3 -1. + <_> + 10 6 5 3 2. + <_> + + <_> + 13 9 2 2 -1. + <_> + 13 9 1 2 2. + <_> + + <_> + 4 3 12 11 -1. + <_> + 8 3 4 11 3. + <_> + + <_> + 7 1 2 7 -1. + <_> + 8 1 1 7 2. + <_> + + <_> + 7 4 3 8 -1. + <_> + 8 4 1 8 3. + <_> + + <_> + 13 9 2 2 -1. + <_> + 13 9 1 2 2. + <_> + + <_> + 11 6 2 2 -1. + <_> + 12 6 1 1 2. + <_> + 11 7 1 1 2. + <_> + + <_> + 5 4 2 3 -1. + <_> + 5 5 2 1 3. + <_> + + <_> + 6 5 1 3 -1. + <_> + 6 6 1 1 3. + <_> + + <_> + 13 9 2 2 -1. + <_> + 13 9 1 2 2. + <_> + + <_> + 16 14 3 3 -1. + <_> + 16 15 3 1 3. + <_> + + <_> + 5 9 2 2 -1. + <_> + 6 9 1 2 2. + <_> + + <_> + 1 14 3 3 -1. + <_> + 1 15 3 1 3. + <_> + + <_> + 13 1 1 6 -1. + <_> + 13 3 1 2 3. + <_> + + <_> + 13 3 7 2 -1. + <_> + 13 4 7 1 2. + <_> + + <_> + 0 6 20 14 -1. + <_> + 0 13 20 7 2. + <_> + + <_> + 0 4 3 6 -1. + <_> + 0 6 3 2 3. + <_> + + <_> + 10 1 9 6 -1. + <_> + 10 3 9 2 3. + <_> + + <_> + 8 0 12 5 -1. + <_> + 8 0 6 5 2. + <_> + + <_> + 0 0 18 5 -1. + <_> + 6 0 6 5 3. + <_> + + <_> + 1 1 9 6 -1. + <_> + 1 3 9 2 3. + <_> + + <_> + 15 15 2 2 -1. + <_> + 15 16 2 1 2. + <_> + + <_> + 13 16 3 4 -1. + <_> + 13 18 3 2 2. + <_> + + <_> + 3 15 2 2 -1. + <_> + 3 16 2 1 2. + <_> + + <_> + 4 16 3 4 -1. + <_> + 4 18 3 2 2. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 9 13 5 3 -1. + <_> + 9 14 5 1 3. + <_> + + <_> + 0 0 3 6 -1. + <_> + 0 2 3 2 3. + <_> + + <_> + 4 1 6 3 -1. + <_> + 6 1 2 3 3. + <_> + + <_> + 9 13 4 3 -1. + <_> + 9 14 4 1 3. + <_> + + <_> + 8 15 5 3 -1. + <_> + 8 16 5 1 3. + <_> + + <_> + 8 3 3 2 -1. + <_> + 9 3 1 2 3. + <_> + + <_> + 1 8 18 2 -1. + <_> + 1 9 18 1 2. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 8 13 6 3 -1. + <_> + 8 14 6 1 3. + <_> + + <_> + 8 14 1 3 -1. + <_> + 8 15 1 1 3. + <_> + + <_> + 4 13 12 4 -1. + <_> + 4 13 6 2 2. + <_> + 10 15 6 2 2. + <_> + + <_> + 10 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 13 4 2 8 -1. + <_> + 14 4 1 4 2. + <_> + 13 8 1 4 2. + <_> + + <_> + 0 5 4 6 -1. + <_> + 0 7 4 2 3. + <_> + + <_> + 8 7 2 2 -1. + <_> + 9 7 1 2 2. + <_> + + <_> + 13 0 3 7 -1. + <_> + 14 0 1 7 3. + <_> + + <_> + 11 2 2 14 -1. + <_> + 11 2 1 14 2. + <_> + + <_> + 4 0 3 7 -1. + <_> + 5 0 1 7 3. + <_> + + <_> + 5 5 8 12 -1. + <_> + 5 5 4 6 2. + <_> + 9 11 4 6 2. + <_> + + <_> + 11 4 6 3 -1. + <_> + 11 5 6 1 3. + <_> + + <_> + 12 3 4 3 -1. + <_> + 12 4 4 1 3. + <_> + + <_> + 5 5 10 12 -1. + <_> + 5 5 5 6 2. + <_> + 10 11 5 6 2. + <_> + + <_> + 3 6 12 3 -1. + <_> + 9 6 6 3 2. + <_> + + <_> + 9 6 2 7 -1. + <_> + 9 6 1 7 2. + <_> + + <_> + 9 5 2 4 -1. + <_> + 9 5 1 4 2. + <_> + + <_> + 8 7 3 3 -1. + <_> + 9 7 1 3 3. + <_> + + <_> + 5 1 6 4 -1. + <_> + 7 1 2 4 3. + <_> + + <_> + 13 16 7 3 -1. + <_> + 13 17 7 1 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 0 16 7 3 -1. + <_> + 0 17 7 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 12 9 8 10 -1. + <_> + 12 9 4 10 2. + <_> + + <_> + 8 10 12 5 -1. + <_> + 12 10 4 5 3. + <_> + + <_> + 0 9 8 10 -1. + <_> + 4 9 4 10 2. + <_> + + <_> + 0 10 12 5 -1. + <_> + 4 10 4 5 3. + <_> + + <_> + 2 3 6 2 -1. + <_> + 5 3 3 2 2. + <_> + + <_> + 0 0 17 9 -1. + <_> + 0 3 17 3 3. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 10 4 6 4 -1. + <_> + 12 4 2 4 3. + <_> + + <_> + 0 10 20 4 -1. + <_> + 0 12 20 2 2. + <_> + + <_> + 4 3 6 5 -1. + <_> + 6 3 2 5 3. + <_> + + <_> + 1 1 18 4 -1. + <_> + 7 1 6 4 3. + <_> + + <_> + 13 9 2 3 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 6 15 7 4 -1. + <_> + 6 17 7 2 2. + <_> + + <_> + 3 17 4 2 -1. + <_> + 3 18 4 1 2. + <_> + + <_> + 9 4 8 10 -1. + <_> + 9 9 8 5 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 8 2 4 8 -1. + <_> + 8 6 4 4 2. + <_> + + <_> + 3 4 14 12 -1. + <_> + 3 4 7 6 2. + <_> + 10 10 7 6 2. + <_> + + <_> + 7 7 6 4 -1. + <_> + 9 7 2 4 3. + <_> + + <_> + 6 7 9 4 -1. + <_> + 6 9 9 2 2. + <_> + + <_> + 2 10 3 3 -1. + <_> + 2 11 3 1 3. + <_> + + <_> + 4 6 2 9 -1. + <_> + 4 9 2 3 3. + <_> + + <_> + 9 11 3 3 -1. + <_> + 9 12 3 1 3. + <_> + + <_> + 3 1 15 2 -1. + <_> + 3 2 15 1 2. + <_> + + <_> + 9 8 2 3 -1. + <_> + 9 9 2 1 3. + <_> + + <_> + 9 6 2 5 -1. + <_> + 10 6 1 5 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 4 10 12 10 -1. + <_> + 4 15 12 5 2. + <_> + + <_> + 0 10 4 2 -1. + <_> + 0 11 4 1 2. + <_> + + <_> + 5 15 9 2 -1. + <_> + 5 16 9 1 2. + <_> + + <_> + 8 14 6 3 -1. + <_> + 8 15 6 1 3. + <_> + + <_> + 8 16 4 3 -1. + <_> + 8 17 4 1 3. + <_> + + <_> + 8 9 4 2 -1. + <_> + 8 10 4 1 2. + <_> + + <_> + 3 3 14 2 -1. + <_> + 3 4 14 1 2. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 4 12 12 1 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 0 2 1 2 -1. + <_> + 0 3 1 1 2. + <_> + + <_> + 7 4 4 6 -1. + <_> + 9 4 2 6 2. + <_> + + <_> + 0 2 20 14 -1. + <_> + 10 2 10 7 2. + <_> + 0 9 10 7 2. + <_> + + <_> + 14 6 1 3 -1. + <_> + 14 7 1 1 3. + <_> + + <_> + 0 4 20 12 -1. + <_> + 0 4 10 6 2. + <_> + 10 10 10 6 2. + <_> + + <_> + 8 12 1 2 -1. + <_> + 8 13 1 1 2. + <_> + + <_> + 9 18 3 2 -1. + <_> + 10 18 1 2 3. + <_> + + <_> + 9 17 6 2 -1. + <_> + 11 17 2 2 3. + <_> + + <_> + 5 6 2 3 -1. + <_> + 5 7 2 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 14 15 3 2 -1. + <_> + 14 16 3 1 2. + <_> + + <_> + 11 3 3 4 -1. + <_> + 12 3 1 4 3. + <_> + + <_> + 3 15 3 2 -1. + <_> + 3 16 3 1 2. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 9 13 3 7 -1. + <_> + 10 13 1 7 3. + <_> + + <_> + 12 12 5 3 -1. + <_> + 12 13 5 1 3. + <_> + + <_> + 8 18 3 2 -1. + <_> + 9 18 1 2 3. + <_> + + <_> + 4 7 12 4 -1. + <_> + 4 7 6 2 2. + <_> + 10 9 6 2 2. + <_> + + <_> + 6 19 14 1 -1. + <_> + 6 19 7 1 2. + <_> + + <_> + 16 14 3 2 -1. + <_> + 16 15 3 1 2. + <_> + + <_> + 1 0 6 10 -1. + <_> + 1 0 3 5 2. + <_> + 4 5 3 5 2. + <_> + + <_> + 1 0 4 10 -1. + <_> + 1 0 2 5 2. + <_> + 3 5 2 5 2. + <_> + + <_> + 15 3 5 6 -1. + <_> + 15 5 5 2 3. + <_> + + <_> + 9 5 2 15 -1. + <_> + 9 10 2 5 3. + <_> + + <_> + 0 3 5 6 -1. + <_> + 0 5 5 2 3. + <_> + + <_> + 6 0 3 2 -1. + <_> + 7 0 1 2 3. + <_> + + <_> + 12 8 8 2 -1. + <_> + 16 8 4 1 2. + <_> + 12 9 4 1 2. + <_> + + <_> + 5 8 12 1 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 3 13 3 3 -1. + <_> + 3 14 3 1 3. + <_> + + <_> + 5 13 3 2 -1. + <_> + 5 14 3 1 2. + <_> + + <_> + 9 15 3 3 -1. + <_> + 9 16 3 1 3. + <_> + + <_> + 7 15 7 3 -1. + <_> + 7 16 7 1 3. + <_> + + <_> + 3 14 11 6 -1. + <_> + 3 16 11 2 3. + <_> + + <_> + 0 19 14 1 -1. + <_> + 7 19 7 1 2. + <_> + + <_> + 9 17 6 2 -1. + <_> + 11 17 2 2 3. + <_> + + <_> + 12 11 6 2 -1. + <_> + 14 11 2 2 3. + <_> + + <_> + 5 17 6 2 -1. + <_> + 7 17 2 2 3. + <_> + + <_> + 0 1 9 10 -1. + <_> + 3 1 3 10 3. + <_> + + <_> + 10 1 3 3 -1. + <_> + 11 1 1 3 3. + <_> + + <_> + 9 5 6 4 -1. + <_> + 9 5 3 4 2. + <_> + + <_> + 7 1 3 3 -1. + <_> + 8 1 1 3 3. + <_> + + <_> + 0 4 4 11 -1. + <_> + 2 4 2 11 2. + <_> + + <_> + 9 5 6 4 -1. + <_> + 9 5 3 4 2. + <_> + + <_> + 6 0 8 10 -1. + <_> + 10 0 4 5 2. + <_> + 6 5 4 5 2. + <_> + + <_> + 6 6 5 14 -1. + <_> + 6 13 5 7 2. + <_> + + <_> + 8 5 4 14 -1. + <_> + 8 12 4 7 2. + <_> + + <_> + 7 7 6 5 -1. + <_> + 9 7 2 5 3. + <_> + + <_> + 9 3 3 9 -1. + <_> + 9 6 3 3 3. + <_> + + <_> + 8 1 3 3 -1. + <_> + 9 1 1 3 3. + <_> + + <_> + 9 6 2 4 -1. + <_> + 10 6 1 4 2. + <_> + + <_> + 10 8 6 9 -1. + <_> + 10 8 3 9 2. + <_> + + <_> + 16 4 3 8 -1. + <_> + 17 4 1 8 3. + <_> + + <_> + 5 9 10 6 -1. + <_> + 5 9 5 3 2. + <_> + 10 12 5 3 2. + <_> + + <_> + 5 5 6 4 -1. + <_> + 8 5 3 4 2. + <_> + + <_> + 9 8 4 2 -1. + <_> + 9 9 4 1 2. + <_> + + <_> + 11 7 2 2 -1. + <_> + 11 7 1 2 2. + <_> + + <_> + 8 12 4 8 -1. + <_> + 8 12 2 4 2. + <_> + 10 16 2 4 2. + <_> + + <_> + 0 1 4 9 -1. + <_> + 0 4 4 3 3. + <_> + + <_> + 9 10 3 3 -1. + <_> + 9 11 3 1 3. + <_> + + <_> + 8 11 4 2 -1. + <_> + 8 12 4 1 2. + <_> + + <_> + 7 8 4 2 -1. + <_> + 7 9 4 1 2. + <_> + + <_> + 7 8 6 1 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 16 0 4 9 -1. + <_> + 16 0 2 9 2. + <_> + + <_> + 16 0 3 6 -1. + <_> + 16 3 3 3 2. + <_> + + <_> + 0 0 4 9 -1. + <_> + 2 0 2 9 2. + <_> + + <_> + 1 0 3 6 -1. + <_> + 1 3 3 3 2. + <_> + + <_> + 9 7 6 9 -1. + <_> + 11 7 2 9 3. + <_> + + <_> + 10 6 3 6 -1. + <_> + 11 6 1 6 3. + <_> + + <_> + 1 2 18 2 -1. + <_> + 1 2 9 1 2. + <_> + 10 3 9 1 2. + <_> + + <_> + 5 8 6 8 -1. + <_> + 7 8 2 8 3. + <_> + + <_> + 9 0 6 16 -1. + <_> + 11 0 2 16 3. + <_> + + <_> + 14 1 6 18 -1. + <_> + 17 1 3 9 2. + <_> + 14 10 3 9 2. + <_> + + <_> + 2 9 2 3 -1. + <_> + 2 10 2 1 3. + <_> + + <_> + 0 1 6 18 -1. + <_> + 0 1 3 9 2. + <_> + 3 10 3 9 2. + <_> + + <_> + 11 8 4 12 -1. + <_> + 11 8 2 12 2. + <_> + + <_> + 2 1 18 18 -1. + <_> + 2 10 18 9 2. + <_> + + <_> + 6 3 3 1 -1. + <_> + 7 3 1 1 3. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 13 2 1 2. + <_> + + <_> + 8 13 5 3 -1. + <_> + 8 14 5 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 3 12 5 3 -1. + <_> + 3 13 5 1 3. + <_> + + <_> + 6 3 3 4 -1. + <_> + 7 3 1 4 3. + <_> + + <_> + 11 10 2 2 -1. + <_> + 12 10 1 1 2. + <_> + 11 11 1 1 2. + <_> + + <_> + 5 8 12 1 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 8 4 4 8 -1. + <_> + 10 4 2 8 2. + <_> + + <_> + 6 6 8 5 -1. + <_> + 10 6 4 5 2. + <_> + + <_> + 10 4 6 4 -1. + <_> + 12 4 2 4 3. + <_> + + <_> + 12 7 2 2 -1. + <_> + 13 7 1 1 2. + <_> + 12 8 1 1 2. + <_> + + <_> + 3 5 10 8 -1. + <_> + 3 9 10 4 2. + <_> + + <_> + 7 1 2 12 -1. + <_> + 7 7 2 6 2. + <_> + + <_> + 12 7 2 2 -1. + <_> + 13 7 1 1 2. + <_> + 12 8 1 1 2. + <_> + + <_> + 11 13 1 6 -1. + <_> + 11 16 1 3 2. + <_> + + <_> + 5 1 6 15 -1. + <_> + 7 1 2 15 3. + <_> + + <_> + 6 7 2 2 -1. + <_> + 6 7 1 1 2. + <_> + 7 8 1 1 2. + <_> + + <_> + 17 5 2 2 -1. + <_> + 17 6 2 1 2. + <_> + + <_> + 10 3 4 10 -1. + <_> + 12 3 2 5 2. + <_> + 10 8 2 5 2. + <_> + + <_> + 1 5 2 2 -1. + <_> + 1 6 2 1 2. + <_> + + <_> + 7 10 2 2 -1. + <_> + 7 10 1 1 2. + <_> + 8 11 1 1 2. + <_> + + <_> + 3 12 14 4 -1. + <_> + 10 12 7 2 2. + <_> + 3 14 7 2 2. + <_> + + <_> + 9 15 3 2 -1. + <_> + 9 16 3 1 2. + <_> + + <_> + 1 13 3 3 -1. + <_> + 1 14 3 1 3. + <_> + + <_> + 0 3 1 2 -1. + <_> + 0 4 1 1 2. + <_> + + <_> + 7 7 6 1 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 0 4 16 6 -1. + <_> + 0 6 16 2 3. + <_> + + <_> + 9 3 2 14 -1. + <_> + 9 10 2 7 2. + <_> + + <_> + 12 0 4 3 -1. + <_> + 12 0 2 3 2. + <_> + + <_> + 4 18 12 2 -1. + <_> + 8 18 4 2 3. + <_> + + <_> + 4 10 12 4 -1. + <_> + 8 10 4 4 3. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 14 1 2 8 -1. + <_> + 15 1 1 4 2. + <_> + 14 5 1 4 2. + <_> + + <_> + 3 4 9 1 -1. + <_> + 6 4 3 1 3. + <_> + + <_> + 3 3 4 2 -1. + <_> + 3 4 4 1 2. + <_> + + <_> + 11 15 2 4 -1. + <_> + 11 17 2 2 2. + <_> + + <_> + 14 13 2 6 -1. + <_> + 14 15 2 2 3. + <_> + + <_> + 6 6 1 6 -1. + <_> + 6 9 1 3 2. + <_> + + <_> + 6 10 8 8 -1. + <_> + 6 14 8 4 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 10 11 4 8 -1. + <_> + 10 15 4 4 2. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 5 4 6 10 -1. + <_> + 8 4 3 10 2. + <_> + + <_> + 14 2 6 3 -1. + <_> + 14 3 6 1 3. + <_> + + <_> + 9 12 3 2 -1. + <_> + 9 13 3 1 2. + <_> + + <_> + 8 1 4 6 -1. + <_> + 8 3 4 2 3. + <_> + + <_> + 3 5 13 8 -1. + <_> + 3 9 13 4 2. + <_> + + <_> + 12 5 5 3 -1. + <_> + 12 6 5 1 3. + <_> + + <_> + 5 14 15 6 -1. + <_> + 5 16 15 2 3. + <_> + + <_> + 3 5 5 3 -1. + <_> + 3 6 5 1 3. + <_> + + <_> + 9 14 2 6 -1. + <_> + 9 14 1 3 2. + <_> + 10 17 1 3 2. + <_> + + <_> + 9 12 3 2 -1. + <_> + 9 13 3 1 2. + <_> + + <_> + 9 13 3 2 -1. + <_> + 9 14 3 1 2. + <_> + + <_> + 0 2 6 3 -1. + <_> + 0 3 6 1 3. + <_> + + <_> + 0 1 9 11 -1. + <_> + 3 1 3 11 3. + <_> + + <_> + 8 13 4 6 -1. + <_> + 10 13 2 3 2. + <_> + 8 16 2 3 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 3 12 14 4 -1. + <_> + 3 12 7 2 2. + <_> + 10 14 7 2 2. + <_> + + <_> + 7 14 1 4 -1. + <_> + 7 16 1 2 2. + <_> + + <_> + 8 13 4 6 -1. + <_> + 10 13 2 3 2. + <_> + 8 16 2 3 2. + <_> + + <_> + 10 14 1 3 -1. + <_> + 10 15 1 1 3. + <_> + + <_> + 8 13 4 6 -1. + <_> + 8 13 2 3 2. + <_> + 10 16 2 3 2. + <_> + + <_> + 9 14 1 3 -1. + <_> + 9 15 1 1 3. + <_> + + <_> + 10 15 2 3 -1. + <_> + 10 16 2 1 3. + <_> + + <_> + 11 16 1 2 -1. + <_> + 11 17 1 1 2. + <_> + + <_> + 9 0 2 2 -1. + <_> + 9 1 2 1 2. + <_> + + <_> + 0 1 5 8 -1. + <_> + 0 5 5 4 2. + <_> + + <_> + 10 14 2 3 -1. + <_> + 10 15 2 1 3. + <_> + + <_> + 10 13 2 3 -1. + <_> + 10 14 2 1 3. + <_> + + <_> + 0 3 16 6 -1. + <_> + 0 6 16 3 2. + <_> + + <_> + 4 1 2 2 -1. + <_> + 5 1 1 2 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 10 8 2 12 -1. + <_> + 10 12 2 4 3. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 5 0 6 8 -1. + <_> + 7 0 2 8 3. + <_> + + <_> + 9 7 3 6 -1. + <_> + 10 7 1 6 3. + <_> + + <_> + 8 12 10 8 -1. + <_> + 8 16 10 4 2. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 4 7 12 2 -1. + <_> + 10 7 6 2 2. + <_> + + <_> + 8 6 8 3 -1. + <_> + 8 6 4 3 2. + <_> + + <_> + 16 15 3 3 -1. + <_> + 16 16 3 1 3. + <_> + + <_> + 4 6 12 3 -1. + <_> + 10 6 6 3 2. + <_> + + <_> + 7 8 3 5 -1. + <_> + 8 8 1 5 3. + <_> + + <_> + 0 10 20 2 -1. + <_> + 10 10 10 1 2. + <_> + 0 11 10 1 2. + <_> + + <_> + 11 16 9 4 -1. + <_> + 14 16 3 4 3. + <_> + + <_> + 0 5 3 4 -1. + <_> + 1 5 1 4 3. + <_> + + <_> + 8 15 4 2 -1. + <_> + 8 15 2 1 2. + <_> + 10 16 2 1 2. + <_> + + <_> + 1 8 19 3 -1. + <_> + 1 9 19 1 3. + <_> + + <_> + 15 16 3 3 -1. + <_> + 15 17 3 1 3. + <_> + + <_> + 0 4 20 10 -1. + <_> + 0 4 10 5 2. + <_> + 10 9 10 5 2. + <_> + + <_> + 2 14 7 6 -1. + <_> + 2 16 7 2 3. + <_> + + <_> + 8 6 6 6 -1. + <_> + 10 6 2 6 3. + <_> + + <_> + 16 4 4 6 -1. + <_> + 16 6 4 2 3. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 7 13 4 3 -1. + <_> + 7 14 4 1 3. + <_> + + <_> + 13 13 6 2 -1. + <_> + 13 14 6 1 2. + <_> + + <_> + 14 12 2 3 -1. + <_> + 14 13 2 1 3. + <_> + + <_> + 1 13 6 2 -1. + <_> + 1 14 6 1 2. + <_> + + <_> + 4 12 2 3 -1. + <_> + 4 13 2 1 3. + <_> + + <_> + 17 4 3 5 -1. + <_> + 18 4 1 5 3. + <_> + + <_> + 5 5 14 8 -1. + <_> + 12 5 7 4 2. + <_> + 5 9 7 4 2. + <_> + + <_> + 6 8 6 5 -1. + <_> + 8 8 2 5 3. + <_> + + <_> + 0 4 4 6 -1. + <_> + 0 6 4 2 3. + <_> + + <_> + 9 1 3 6 -1. + <_> + 10 1 1 6 3. + <_> + + <_> + 10 4 6 3 -1. + <_> + 10 5 6 1 3. + <_> + + <_> + 8 1 3 6 -1. + <_> + 9 1 1 6 3. + <_> + + <_> + 4 4 6 3 -1. + <_> + 4 5 6 1 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 12 11 4 2 -1. + <_> + 12 12 4 1 2. + <_> + + <_> + 0 2 20 6 -1. + <_> + 0 2 10 3 2. + <_> + 10 5 10 3 2. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 2 10 16 4 -1. + <_> + 10 10 8 2 2. + <_> + 2 12 8 2 2. + <_> + + <_> + 3 10 16 6 -1. + <_> + 11 10 8 3 2. + <_> + 3 13 8 3 2. + <_> + + <_> + 1 10 16 6 -1. + <_> + 1 10 8 3 2. + <_> + 9 13 8 3 2. + <_> + + <_> + 4 7 2 4 -1. + <_> + 5 7 1 4 2. + <_> + + <_> + 11 16 9 4 -1. + <_> + 14 16 3 4 3. + <_> + + <_> + 3 16 14 4 -1. + <_> + 10 16 7 2 2. + <_> + 3 18 7 2 2. + <_> + + <_> + 0 16 9 4 -1. + <_> + 3 16 3 4 3. + <_> + + <_> + 1 14 6 6 -1. + <_> + 1 14 3 3 2. + <_> + 4 17 3 3 2. + <_> + + <_> + 9 0 2 1 -1. + <_> + 9 0 1 1 2. + <_> + + <_> + 6 7 8 10 -1. + <_> + 10 7 4 5 2. + <_> + 6 12 4 5 2. + <_> + + <_> + 2 15 1 2 -1. + <_> + 2 16 1 1 2. + <_> + + <_> + 0 14 7 6 -1. + <_> + 0 16 7 2 3. + <_> + + <_> + 7 8 6 2 -1. + <_> + 7 9 6 1 2. + <_> + + <_> + 9 2 2 15 -1. + <_> + 9 7 2 5 3. + <_> + + <_> + 5 6 2 2 -1. + <_> + 5 7 2 1 2. + <_> + + <_> + 6 6 8 3 -1. + <_> + 6 7 8 1 3. + <_> + + <_> + 12 13 5 6 -1. + <_> + 12 15 5 2 3. + <_> + + <_> + 0 0 20 18 -1. + <_> + 0 9 20 9 2. + <_> + + <_> + 5 1 6 6 -1. + <_> + 7 1 2 6 3. + <_> + + <_> + 5 1 4 9 -1. + <_> + 7 1 2 9 2. + <_> + + <_> + 1 19 18 1 -1. + <_> + 7 19 6 1 3. + <_> + + <_> + 14 16 5 2 -1. + <_> + 14 17 5 1 2. + <_> + + <_> + 0 5 15 10 -1. + <_> + 0 10 15 5 2. + <_> + + <_> + 7 15 4 2 -1. + <_> + 7 15 2 1 2. + <_> + 9 16 2 1 2. + <_> + + <_> + 14 11 2 2 -1. + <_> + 14 12 2 1 2. + <_> + + <_> + 9 8 3 3 -1. + <_> + 9 9 3 1 3. + <_> + + <_> + 4 11 2 2 -1. + <_> + 4 12 2 1 2. + <_> + + <_> + 8 8 3 3 -1. + <_> + 8 9 3 1 3. + <_> + + <_> + 9 10 2 3 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 1 9 4 10 -1. + <_> + 1 9 2 5 2. + <_> + 3 14 2 5 2. + <_> + + <_> + 0 12 6 8 -1. + <_> + 2 12 2 8 3. + <_> + + <_> + 9 1 4 2 -1. + <_> + 11 1 2 1 2. + <_> + 9 2 2 1 2. + <_> + + <_> + 12 13 7 6 -1. + <_> + 12 15 7 2 3. + <_> + + <_> + 7 0 2 3 -1. + <_> + 7 1 2 1 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 9 14 2 3 3. + <_> + + <_> + 9 6 6 4 -1. + <_> + 11 6 2 4 3. + <_> + + <_> + 8 10 8 3 -1. + <_> + 8 10 4 3 2. + <_> + + <_> + 6 10 4 3 -1. + <_> + 8 10 2 3 2. + <_> + + <_> + 6 8 3 5 -1. + <_> + 7 8 1 5 3. + <_> + + <_> + 0 4 8 1 -1. + <_> + 4 4 4 1 2. + <_> + + <_> + 8 2 2 6 -1. + <_> + 8 2 1 3 2. + <_> + 9 5 1 3 2. + <_> + + <_> + 0 7 20 6 -1. + <_> + 0 9 20 2 3. + <_> + + <_> + 12 10 3 6 -1. + <_> + 12 13 3 3 2. + <_> + + <_> + 8 15 1 4 -1. + <_> + 8 17 1 2 2. + <_> + + <_> + 5 16 2 4 -1. + <_> + 5 18 2 2 2. + <_> + + <_> + 6 2 8 12 -1. + <_> + 6 6 8 4 3. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 7 0 6 1 -1. + <_> + 9 0 2 1 3. + <_> + + <_> + 8 11 3 3 -1. + <_> + 8 12 3 1 3. + <_> + + <_> + 12 11 3 6 -1. + <_> + 12 14 3 3 2. + <_> + + <_> + 11 2 6 10 -1. + <_> + 14 2 3 5 2. + <_> + 11 7 3 5 2. + <_> + + <_> + 5 7 10 12 -1. + <_> + 5 7 5 6 2. + <_> + 10 13 5 6 2. + <_> + + <_> + 4 4 2 10 -1. + <_> + 4 9 2 5 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 7 1 3 2. + <_> + + <_> + 11 9 6 2 -1. + <_> + 11 9 3 2 2. + <_> + + <_> + 4 7 2 2 -1. + <_> + 5 7 1 2 2. + <_> + + <_> + 0 2 4 6 -1. + <_> + 0 4 4 2 3. + <_> + + <_> + 10 7 3 4 -1. + <_> + 11 7 1 4 3. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 9 1 1 3 -1. + <_> + 9 2 1 1 3. + <_> + + <_> + 0 6 16 6 -1. + <_> + 0 6 8 3 2. + <_> + 8 9 8 3 2. + <_> + + <_> + 10 15 3 3 -1. + <_> + 10 16 3 1 3. + <_> + + <_> + 9 14 4 3 -1. + <_> + 9 15 4 1 3. + <_> + + <_> + 3 2 6 10 -1. + <_> + 3 2 3 5 2. + <_> + 6 7 3 5 2. + <_> + + <_> + 3 0 14 2 -1. + <_> + 3 1 14 1 2. + <_> + + <_> + 9 14 3 3 -1. + <_> + 9 15 3 1 3. + <_> + + <_> + 10 15 3 3 -1. + <_> + 10 16 3 1 3. + <_> + + <_> + 9 13 2 6 -1. + <_> + 9 16 2 3 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 12 11 3 6 -1. + <_> + 12 14 3 3 2. + <_> + + <_> + 8 12 5 2 -1. + <_> + 8 13 5 1 2. + <_> + + <_> + 5 11 3 6 -1. + <_> + 5 14 3 3 2. + <_> + + <_> + 8 12 3 2 -1. + <_> + 8 13 3 1 2. + <_> + + <_> + 11 13 7 6 -1. + <_> + 11 15 7 2 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 3 13 14 4 -1. + <_> + 3 13 7 2 2. + <_> + 10 15 7 2 2. + <_> + + <_> + 8 14 4 6 -1. + <_> + 8 14 2 3 2. + <_> + 10 17 2 3 2. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 7 16 6 2 -1. + <_> + 9 16 2 2 3. + <_> + + <_> + 7 7 6 2 -1. + <_> + 7 8 6 1 2. + <_> + + <_> + 3 9 13 3 -1. + <_> + 3 10 13 1 3. + <_> + + <_> + 9 8 3 4 -1. + <_> + 9 10 3 2 2. + <_> + + <_> + 8 10 4 3 -1. + <_> + 8 11 4 1 3. + <_> + + <_> + 7 7 3 4 -1. + <_> + 8 7 1 4 3. + <_> + + <_> + 8 7 3 5 -1. + <_> + 9 7 1 5 3. + <_> + + <_> + 12 3 3 4 -1. + <_> + 13 3 1 4 3. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 7 1 3 2. + <_> + + <_> + 5 3 3 4 -1. + <_> + 6 3 1 4 3. + <_> + + <_> + 3 7 12 1 -1. + <_> + 7 7 4 1 3. + <_> + + <_> + 12 5 3 3 -1. + <_> + 12 6 3 1 3. + <_> + + <_> + 11 2 6 2 -1. + <_> + 11 3 6 1 2. + <_> + + <_> + 3 2 14 2 -1. + <_> + 3 2 7 1 2. + <_> + 10 3 7 1 2. + <_> + + <_> + 6 1 7 14 -1. + <_> + 6 8 7 7 2. + <_> + + <_> + 8 0 12 5 -1. + <_> + 8 0 6 5 2. + <_> + + <_> + 1 9 18 1 -1. + <_> + 7 9 6 1 3. + <_> + + <_> + 0 0 10 5 -1. + <_> + 5 0 5 5 2. + <_> + + <_> + 2 5 8 15 -1. + <_> + 2 10 8 5 3. + <_> + + <_> + 12 5 3 3 -1. + <_> + 12 6 3 1 3. + <_> + + <_> + 13 4 2 3 -1. + <_> + 13 5 2 1 3. + <_> + + <_> + 2 15 4 3 -1. + <_> + 2 16 4 1 3. + <_> + + <_> + 5 6 10 3 -1. + <_> + 10 6 5 3 2. + <_> + + <_> + 11 6 2 2 -1. + <_> + 12 6 1 1 2. + <_> + 11 7 1 1 2. + <_> + + <_> + 12 4 4 3 -1. + <_> + 12 5 4 1 3. + <_> + + <_> + 7 6 2 2 -1. + <_> + 7 6 1 1 2. + <_> + 8 7 1 1 2. + <_> + + <_> + 4 4 4 3 -1. + <_> + 4 5 4 1 3. + <_> + + <_> + 11 4 3 3 -1. + <_> + 12 4 1 3 3. + <_> + + <_> + 9 3 2 1 -1. + <_> + 9 3 1 1 2. + <_> + + <_> + 4 5 5 3 -1. + <_> + 4 6 5 1 3. + <_> + + <_> + 4 6 4 3 -1. + <_> + 4 7 4 1 3. + <_> + + <_> + 11 4 3 3 -1. + <_> + 12 4 1 3 3. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 6 4 3 3 -1. + <_> + 7 4 1 3 3. + <_> + + <_> + 4 14 1 3 -1. + <_> + 4 15 1 1 3. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 7 1 3 2. + <_> + + <_> + 17 0 3 2 -1. + <_> + 17 1 3 1 2. + <_> + + <_> + 8 10 2 9 -1. + <_> + 8 13 2 3 3. + <_> + + <_> + 0 8 18 2 -1. + <_> + 0 9 18 1 2. + <_> + + <_> + 9 15 2 3 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 8 7 4 3 -1. + <_> + 8 8 4 1 3. + <_> + + <_> + 1 14 6 6 -1. + <_> + 1 14 3 3 2. + <_> + 4 17 3 3 2. + <_> + + <_> + 0 18 6 2 -1. + <_> + 0 19 6 1 2. + <_> + + <_> + 12 9 4 3 -1. + <_> + 12 9 2 3 2. + <_> + + <_> + 9 8 3 8 -1. + <_> + 10 8 1 8 3. + <_> + + <_> + 4 9 4 3 -1. + <_> + 6 9 2 3 2. + <_> + + <_> + 4 18 6 1 -1. + <_> + 6 18 2 1 3. + <_> + + <_> + 9 7 3 2 -1. + <_> + 10 7 1 2 3. + <_> + + <_> + 6 7 8 12 -1. + <_> + 10 7 4 6 2. + <_> + 6 13 4 6 2. + <_> + + <_> + 8 7 3 2 -1. + <_> + 9 7 1 2 3. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 3 16 14 4 -1. + <_> + 10 16 7 2 2. + <_> + 3 18 7 2 2. + <_> + + <_> + 1 14 18 4 -1. + <_> + 10 14 9 2 2. + <_> + 1 16 9 2 2. + <_> + + <_> + 8 7 3 3 -1. + <_> + 8 8 3 1 3. + <_> + + <_> + 0 4 20 12 -1. + <_> + 0 4 10 6 2. + <_> + 10 10 10 6 2. + <_> + + <_> + 5 5 10 12 -1. + <_> + 10 5 5 6 2. + <_> + 5 11 5 6 2. + <_> + + <_> + 10 2 4 7 -1. + <_> + 10 2 2 7 2. + <_> + + <_> + 8 11 4 3 -1. + <_> + 8 12 4 1 3. + <_> + + <_> + 8 12 3 3 -1. + <_> + 8 13 3 1 3. + <_> + + <_> + 13 13 5 6 -1. + <_> + 13 15 5 2 3. + <_> + + <_> + 7 0 6 6 -1. + <_> + 9 0 2 6 3. + <_> + + <_> + 2 13 5 6 -1. + <_> + 2 15 5 2 3. + <_> + + <_> + 0 4 2 12 -1. + <_> + 0 4 1 6 2. + <_> + 1 10 1 6 2. + <_> + + <_> + 9 19 3 1 -1. + <_> + 10 19 1 1 3. + <_> + + <_> + 18 0 2 6 -1. + <_> + 18 2 2 2 3. + <_> + + <_> + 0 3 1 6 -1. + <_> + 0 5 1 2 3. + <_> + + <_> + 0 0 3 6 -1. + <_> + 0 2 3 2 3. + <_> + + <_> + 17 2 3 7 -1. + <_> + 18 2 1 7 3. + <_> + + <_> + 10 3 4 7 -1. + <_> + 10 3 2 7 2. + <_> + + <_> + 0 2 3 7 -1. + <_> + 1 2 1 7 3. + <_> + + <_> + 6 2 4 8 -1. + <_> + 8 2 2 8 2. + <_> + + <_> + 13 0 1 4 -1. + <_> + 13 2 1 2 2. + <_> + + <_> + 5 1 12 5 -1. + <_> + 9 1 4 5 3. + <_> + + <_> + 6 0 1 4 -1. + <_> + 6 2 1 2 2. + <_> + + <_> + 3 1 12 5 -1. + <_> + 7 1 4 5 3. + <_> + + <_> + 9 12 3 8 -1. + <_> + 10 12 1 8 3. + <_> + + <_> + 7 13 6 1 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 5 16 7 3 -1. + <_> + 5 17 7 1 3. + <_> + + <_> + 0 12 20 6 -1. + <_> + 0 14 20 2 3. + <_> + + <_> + 4 18 14 2 -1. + <_> + 4 19 14 1 2. + <_> + + <_> + 8 12 3 8 -1. + <_> + 9 12 1 8 3. + <_> + + <_> + 7 13 3 3 -1. + <_> + 7 14 3 1 3. + <_> + + <_> + 5 5 12 10 -1. + <_> + 11 5 6 5 2. + <_> + 5 10 6 5 2. + <_> + + <_> + 8 1 5 10 -1. + <_> + 8 6 5 5 2. + <_> + + <_> + 5 4 9 12 -1. + <_> + 5 10 9 6 2. + <_> + + <_> + 7 13 6 6 -1. + <_> + 7 15 6 2 3. + <_> + + <_> + 8 4 5 16 -1. + <_> + 8 12 5 8 2. + <_> + + <_> + 8 12 4 6 -1. + <_> + 8 15 4 3 2. + <_> + + <_> + 7 13 2 2 -1. + <_> + 7 13 1 1 2. + <_> + 8 14 1 1 2. + <_> + + <_> + 7 12 2 2 -1. + <_> + 7 12 1 1 2. + <_> + 8 13 1 1 2. + <_> + + <_> + 18 0 2 14 -1. + <_> + 18 0 1 14 2. + <_> + + <_> + 12 11 7 2 -1. + <_> + 12 12 7 1 2. + <_> + + <_> + 1 18 1 2 -1. + <_> + 1 19 1 1 2. + <_> + + <_> + 2 18 1 2 -1. + <_> + 2 19 1 1 2. + <_> + + <_> + 9 7 2 1 -1. + <_> + 9 7 1 1 2. + <_> + + <_> + 9 6 2 3 -1. + <_> + 9 6 1 3 2. + <_> + + <_> + 3 1 2 2 -1. + <_> + 4 1 1 2 2. + <_> + + <_> + 3 0 3 2 -1. + <_> + 3 1 3 1 2. + <_> + + <_> + 12 10 3 4 -1. + <_> + 12 12 3 2 2. + <_> + + <_> + 7 7 8 2 -1. + <_> + 7 8 8 1 2. + <_> + + <_> + 8 8 3 4 -1. + <_> + 8 10 3 2 2. + <_> + + <_> + 7 12 6 3 -1. + <_> + 7 13 6 1 3. + <_> + + <_> + 0 2 10 3 -1. + <_> + 5 2 5 3 2. + <_> + + <_> + 0 1 20 6 -1. + <_> + 0 3 20 2 3. + <_> + + <_> + 7 6 6 3 -1. + <_> + 9 6 2 3 3. + <_> + + <_> + 3 7 14 4 -1. + <_> + 3 9 14 2 2. + <_> + + <_> + 5 7 3 6 -1. + <_> + 5 9 3 2 3. + <_> + + <_> + 8 8 3 12 -1. + <_> + 8 12 3 4 3. + <_> + + <_> + 9 17 6 2 -1. + <_> + 12 17 3 1 2. + <_> + 9 18 3 1 2. + <_> + + <_> + 10 17 4 3 -1. + <_> + 10 18 4 1 3. + <_> + + <_> + 4 2 4 2 -1. + <_> + 4 3 4 1 2. + <_> + + <_> + 7 3 6 14 -1. + <_> + 9 3 2 14 3. + <_> + + <_> + 15 13 1 6 -1. + <_> + 15 16 1 3 2. + <_> + + <_> + 13 14 2 6 -1. + <_> + 13 16 2 2 3. + <_> + + <_> + 4 11 5 6 -1. + <_> + 4 14 5 3 2. + <_> + + <_> + 4 17 4 2 -1. + <_> + 6 17 2 2 2. + <_> + + <_> + 0 6 20 2 -1. + <_> + 0 6 10 2 2. + <_> + + <_> + 6 5 10 12 -1. + <_> + 11 5 5 6 2. + <_> + 6 11 5 6 2. + <_> + + <_> + 4 0 2 12 -1. + <_> + 4 0 1 6 2. + <_> + 5 6 1 6 2. + <_> + + <_> + 4 1 6 2 -1. + <_> + 6 1 2 2 3. + <_> + + <_> + 13 7 2 1 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 5 5 15 6 -1. + <_> + 5 7 15 2 3. + <_> + + <_> + 1 10 18 2 -1. + <_> + 1 10 9 1 2. + <_> + 10 11 9 1 2. + <_> + + <_> + 1 6 15 7 -1. + <_> + 6 6 5 7 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 9 14 3 3 -1. + <_> + 9 15 3 1 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 8 13 3 2 -1. + <_> + 8 14 3 1 2. + <_> + + <_> + 15 14 5 3 -1. + <_> + 15 15 5 1 3. + <_> + + <_> + 0 14 20 1 -1. + <_> + 0 14 10 1 2. + <_> + + <_> + 0 14 6 3 -1. + <_> + 0 15 6 1 3. + <_> + + <_> + 5 3 4 2 -1. + <_> + 5 4 4 1 2. + <_> + + <_> + 0 6 20 1 -1. + <_> + 0 6 10 1 2. + <_> + + <_> + 6 3 10 14 -1. + <_> + 11 3 5 7 2. + <_> + 6 10 5 7 2. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 6 3 8 6 -1. + <_> + 6 3 4 3 2. + <_> + 10 6 4 3 2. + <_> + + <_> + 13 7 2 1 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 6 3 10 14 -1. + <_> + 11 3 5 7 2. + <_> + 6 10 5 7 2. + <_> + + <_> + 5 7 2 1 -1. + <_> + 6 7 1 1 2. + <_> + + <_> + 4 3 10 14 -1. + <_> + 4 3 5 7 2. + <_> + 9 10 5 7 2. + <_> + + <_> + 9 7 2 2 -1. + <_> + 9 7 1 2 2. + <_> + + <_> + 0 3 20 1 -1. + <_> + 0 3 10 1 2. + <_> + + <_> + 2 1 10 3 -1. + <_> + 2 2 10 1 3. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 9 7 3 6 -1. + <_> + 10 7 1 6 3. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 16 3 4 6 -1. + <_> + 16 5 4 2 3. + <_> + + <_> + 15 6 2 12 -1. + <_> + 16 6 1 6 2. + <_> + 15 12 1 6 2. + <_> + + <_> + 1 4 18 10 -1. + <_> + 1 4 9 5 2. + <_> + 10 9 9 5 2. + <_> + + <_> + 9 4 2 4 -1. + <_> + 9 6 2 2 2. + <_> + + <_> + 12 5 3 2 -1. + <_> + 12 6 3 1 2. + <_> + + <_> + 5 12 10 4 -1. + <_> + 5 14 10 2 2. + <_> + + <_> + 5 5 3 2 -1. + <_> + 5 6 3 1 2. + <_> + + <_> + 4 6 12 6 -1. + <_> + 8 6 4 6 3. + <_> + + <_> + 14 4 6 6 -1. + <_> + 14 6 6 2 3. + <_> + + <_> + 16 0 4 6 -1. + <_> + 18 0 2 3 2. + <_> + 16 3 2 3 2. + <_> + + <_> + 0 4 6 6 -1. + <_> + 0 6 6 2 3. + <_> + + <_> + 0 0 4 6 -1. + <_> + 0 0 2 3 2. + <_> + 2 3 2 3 2. + <_> + + <_> + 12 0 8 5 -1. + <_> + 12 0 4 5 2. + <_> + + <_> + 16 0 4 17 -1. + <_> + 16 0 2 17 2. + <_> + + <_> + 1 0 18 20 -1. + <_> + 7 0 6 20 3. + <_> + + <_> + 6 0 2 5 -1. + <_> + 7 0 1 5 2. + <_> + + <_> + 0 6 20 1 -1. + <_> + 0 6 10 1 2. + <_> + + <_> + 8 7 6 4 -1. + <_> + 10 7 2 4 3. + <_> + + <_> + 1 1 16 4 -1. + <_> + 1 1 8 2 2. + <_> + 9 3 8 2 2. + <_> + + <_> + 7 2 4 2 -1. + <_> + 7 2 2 1 2. + <_> + 9 3 2 1 2. + <_> + + <_> + 7 4 9 3 -1. + <_> + 7 5 9 1 3. + <_> + + <_> + 10 4 5 12 -1. + <_> + 10 10 5 6 2. + <_> + + <_> + 3 12 2 3 -1. + <_> + 3 13 2 1 3. + <_> + + <_> + 8 8 3 5 -1. + <_> + 9 8 1 5 3. + <_> + + <_> + 13 9 2 3 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 15 11 2 2 -1. + <_> + 15 12 2 1 2. + <_> + + <_> + 5 6 2 3 -1. + <_> + 5 7 2 1 3. + <_> + + <_> + 2 11 6 2 -1. + <_> + 2 12 6 1 2. + <_> + + <_> + 15 11 4 3 -1. + <_> + 15 12 4 1 3. + <_> + + <_> + 16 0 4 17 -1. + <_> + 16 0 2 17 2. + <_> + + <_> + 1 11 4 3 -1. + <_> + 1 12 4 1 3. + <_> + + <_> + 9 11 1 3 -1. + <_> + 9 12 1 1 3. + <_> + + <_> + 10 9 6 7 -1. + <_> + 10 9 3 7 2. + <_> + + <_> + 8 15 4 2 -1. + <_> + 8 16 4 1 2. + <_> + + <_> + 4 9 6 7 -1. + <_> + 7 9 3 7 2. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 0 2 20 2 -1. + <_> + 10 2 10 1 2. + <_> + 0 3 10 1 2. + <_> + + <_> + 6 7 8 2 -1. + <_> + 6 8 8 1 2. + <_> + + <_> + 0 2 20 2 -1. + <_> + 0 2 10 1 2. + <_> + 10 3 10 1 2. + <_> + + <_> + 3 1 2 10 -1. + <_> + 3 1 1 5 2. + <_> + 4 6 1 5 2. + <_> + + <_> + 13 4 1 10 -1. + <_> + 13 9 1 5 2. + <_> + + <_> + 9 8 4 3 -1. + <_> + 9 9 4 1 3. + <_> + + <_> + 2 11 16 4 -1. + <_> + 2 11 8 2 2. + <_> + 10 13 8 2 2. + <_> + + <_> + 5 1 3 5 -1. + <_> + 6 1 1 5 3. + <_> + + <_> + 9 10 2 3 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 9 11 2 2 -1. + <_> + 9 12 2 1 2. + <_> + + <_> + 0 10 20 2 -1. + <_> + 0 11 20 1 2. + <_> + + <_> + 1 7 6 4 -1. + <_> + 1 7 3 2 2. + <_> + 4 9 3 2 2. + <_> + + <_> + 12 0 8 8 -1. + <_> + 16 0 4 4 2. + <_> + 12 4 4 4 2. + <_> + + <_> + 14 1 6 4 -1. + <_> + 16 1 2 4 3. + <_> + + <_> + 6 3 2 14 -1. + <_> + 6 10 2 7 2. + <_> + + <_> + 6 1 7 12 -1. + <_> + 6 7 7 6 2. + <_> + + <_> + 5 0 15 5 -1. + <_> + 10 0 5 5 3. + <_> + + <_> + 15 0 4 10 -1. + <_> + 15 0 2 10 2. + <_> + + <_> + 1 0 18 3 -1. + <_> + 7 0 6 3 3. + <_> + + <_> + 0 0 17 2 -1. + <_> + 0 1 17 1 2. + <_> + + <_> + 10 0 3 3 -1. + <_> + 11 0 1 3 3. + <_> + + <_> + 10 0 3 12 -1. + <_> + 11 0 1 12 3. + <_> + + <_> + 1 3 4 16 -1. + <_> + 1 3 2 8 2. + <_> + 3 11 2 8 2. + <_> + + <_> + 7 0 3 3 -1. + <_> + 8 0 1 3 3. + <_> + + <_> + 9 13 2 6 -1. + <_> + 9 16 2 3 2. + <_> + + <_> + 9 0 6 13 -1. + <_> + 11 0 2 13 3. + <_> + + <_> + 7 7 3 2 -1. + <_> + 8 7 1 2 3. + <_> + + <_> + 8 2 1 12 -1. + <_> + 8 6 1 4 3. + <_> + + <_> + 4 10 12 6 -1. + <_> + 10 10 6 3 2. + <_> + 4 13 6 3 2. + <_> + + <_> + 13 5 2 3 -1. + <_> + 13 6 2 1 3. + <_> + + <_> + 4 10 12 6 -1. + <_> + 4 10 6 3 2. + <_> + 10 13 6 3 2. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 8 6 6 7 -1. + <_> + 10 6 2 7 3. + <_> + + <_> + 9 6 2 4 -1. + <_> + 9 6 1 4 2. + <_> + + <_> + 6 6 6 7 -1. + <_> + 8 6 2 7 3. + <_> + + <_> + 9 6 2 4 -1. + <_> + 10 6 1 4 2. + <_> + + <_> + 12 9 2 3 -1. + <_> + 12 9 1 3 2. + <_> + + <_> + 0 6 20 1 -1. + <_> + 0 6 10 1 2. + <_> + + <_> + 5 7 10 2 -1. + <_> + 10 7 5 2 2. + <_> + + <_> + 1 16 4 3 -1. + <_> + 1 17 4 1 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 10 3 5 3 -1. + <_> + 10 4 5 1 3. + <_> + + <_> + 3 9 14 8 -1. + <_> + 3 9 7 4 2. + <_> + 10 13 7 4 2. + <_> + + <_> + 6 8 8 10 -1. + <_> + 6 8 4 5 2. + <_> + 10 13 4 5 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 10 3 5 3 -1. + <_> + 10 4 5 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 5 3 5 3 -1. + <_> + 5 4 5 1 3. + <_> + + <_> + 13 16 2 3 -1. + <_> + 13 17 2 1 3. + <_> + + <_> + 0 5 20 6 -1. + <_> + 0 7 20 2 3. + <_> + + <_> + 3 14 3 3 -1. + <_> + 3 15 3 1 3. + <_> + + <_> + 7 15 5 3 -1. + <_> + 7 16 5 1 3. + <_> + + <_> + 12 9 2 3 -1. + <_> + 12 9 1 3 2. + <_> + + <_> + 15 13 2 6 -1. + <_> + 15 13 1 6 2. + <_> + + <_> + 6 9 2 3 -1. + <_> + 7 9 1 3 2. + <_> + + <_> + 3 13 2 6 -1. + <_> + 4 13 1 6 2. + <_> + + <_> + 11 4 2 4 -1. + <_> + 11 4 1 4 2. + <_> + + <_> + 13 4 2 5 -1. + <_> + 13 4 1 5 2. + <_> + + <_> + 7 4 2 4 -1. + <_> + 8 4 1 4 2. + <_> + + <_> + 5 4 2 5 -1. + <_> + 6 4 1 5 2. + <_> + + <_> + 19 6 1 2 -1. + <_> + 19 7 1 1 2. + <_> + + <_> + 12 7 8 13 -1. + <_> + 12 7 4 13 2. + <_> + + <_> + 0 6 1 2 -1. + <_> + 0 7 1 1 2. + <_> + + <_> + 6 15 4 3 -1. + <_> + 6 16 4 1 3. + <_> + + <_> + 11 8 2 2 -1. + <_> + 11 9 2 1 2. + <_> + + <_> + 11 7 2 4 -1. + <_> + 11 7 1 4 2. + <_> + + <_> + 4 13 2 3 -1. + <_> + 4 14 2 1 3. + <_> + + <_> + 0 17 18 3 -1. + <_> + 6 17 6 3 3. + <_> + + <_> + 1 0 18 5 -1. + <_> + 7 0 6 5 3. + <_> + + <_> + 5 7 3 4 -1. + <_> + 5 9 3 2 2. + <_> + + <_> + 10 6 2 2 -1. + <_> + 10 6 1 2 2. + <_> + + <_> + 6 4 14 4 -1. + <_> + 13 4 7 2 2. + <_> + 6 6 7 2 2. + <_> + + <_> + 5 16 6 4 -1. + <_> + 5 16 3 2 2. + <_> + 8 18 3 2 2. + <_> + + <_> + 7 15 2 4 -1. + <_> + 7 17 2 2 2. + <_> + + <_> + 8 5 5 14 -1. + <_> + 8 12 5 7 2. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 7 5 3 7 -1. + <_> + 8 5 1 7 3. + <_> + + <_> + 0 0 3 9 -1. + <_> + 0 3 3 3 3. + <_> + + <_> + 8 6 8 8 -1. + <_> + 12 6 4 4 2. + <_> + 8 10 4 4 2. + <_> + + <_> + 4 8 13 2 -1. + <_> + 4 9 13 1 2. + <_> + + <_> + 4 3 6 1 -1. + <_> + 6 3 2 1 3. + <_> + + <_> + 9 1 2 6 -1. + <_> + 9 3 2 2 3. + <_> + + <_> + 10 5 6 4 -1. + <_> + 12 5 2 4 3. + <_> + + <_> + 9 5 2 12 -1. + <_> + 9 9 2 4 3. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 8 12 4 3 -1. + <_> + 8 13 4 1 3. + <_> + + <_> + 10 3 6 7 -1. + <_> + 12 3 2 7 3. + <_> + + <_> + 3 10 16 6 -1. + <_> + 3 12 16 2 3. + <_> + + <_> + 5 5 3 10 -1. + <_> + 5 10 3 5 2. + <_> + + <_> + 6 10 3 6 -1. + <_> + 6 13 3 3 2. + <_> + + <_> + 17 2 2 12 -1. + <_> + 17 2 1 12 2. + <_> + + <_> + 16 6 2 14 -1. + <_> + 16 13 2 7 2. + <_> + + <_> + 3 11 12 9 -1. + <_> + 3 14 12 3 3. + <_> + + <_> + 0 2 4 12 -1. + <_> + 2 2 2 12 2. + <_> + + <_> + 18 0 2 18 -1. + <_> + 18 0 1 18 2. + <_> + + <_> + 16 12 3 2 -1. + <_> + 16 13 3 1 2. + <_> + + <_> + 0 2 2 15 -1. + <_> + 1 2 1 15 2. + <_> + + <_> + 1 10 2 4 -1. + <_> + 1 12 2 2 2. + <_> + + <_> + 11 1 2 18 -1. + <_> + 11 1 1 18 2. + <_> + + <_> + 3 2 14 2 -1. + <_> + 10 2 7 1 2. + <_> + 3 3 7 1 2. + <_> + + <_> + 7 1 2 18 -1. + <_> + 8 1 1 18 2. + <_> + + <_> + 6 1 8 12 -1. + <_> + 6 7 8 6 2. + <_> + + <_> + 8 14 4 3 -1. + <_> + 8 15 4 1 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 0 13 5 2 -1. + <_> + 0 14 5 1 2. + <_> + + <_> + 9 0 2 6 -1. + <_> + 9 0 1 3 2. + <_> + 10 3 1 3 2. + <_> + + <_> + 9 0 2 6 -1. + <_> + 10 0 1 3 2. + <_> + 9 3 1 3 2. + <_> + + <_> + 9 7 3 6 -1. + <_> + 10 7 1 6 3. + <_> + + <_> + 9 0 2 6 -1. + <_> + 9 0 1 3 2. + <_> + 10 3 1 3 2. + <_> + + <_> + 8 7 3 6 -1. + <_> + 9 7 1 6 3. + <_> + + <_> + 9 6 2 6 -1. + <_> + 9 6 1 6 2. + <_> + + <_> + 9 4 4 3 -1. + <_> + 9 4 2 3 2. + <_> + + <_> + 0 4 4 3 -1. + <_> + 0 5 4 1 3. + <_> + + <_> + 8 7 4 2 -1. + <_> + 8 8 4 1 2. + <_> + + <_> + 10 6 6 3 -1. + <_> + 12 6 2 3 3. + <_> + + <_> + 9 6 3 12 -1. + <_> + 9 10 3 4 3. + <_> + + <_> + 5 4 2 3 -1. + <_> + 5 5 2 1 3. + <_> + + <_> + 5 6 1 3 -1. + <_> + 5 7 1 1 3. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 0 7 20 2 -1. + <_> + 0 8 20 1 2. + <_> + + <_> + 4 3 6 7 -1. + <_> + 6 3 2 7 3. + <_> + + <_> + 5 10 6 10 -1. + <_> + 5 10 3 5 2. + <_> + 8 15 3 5 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 9 10 2 2 -1. + <_> + 9 11 2 1 2. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 5 6 1 3 -1. + <_> + 5 7 1 1 3. + <_> + + <_> + 0 1 20 2 -1. + <_> + 10 1 10 1 2. + <_> + 0 2 10 1 2. + <_> + + <_> + 14 2 6 9 -1. + <_> + 14 5 6 3 3. + <_> + + <_> + 5 3 3 2 -1. + <_> + 5 4 3 1 2. + <_> + + <_> + 5 4 4 2 -1. + <_> + 7 4 2 2 2. + <_> + + <_> + 14 2 6 9 -1. + <_> + 14 5 6 3 3. + <_> + + <_> + 0 12 20 6 -1. + <_> + 0 14 20 2 3. + <_> + + <_> + 2 2 16 4 -1. + <_> + 2 2 8 2 2. + <_> + 10 4 8 2 2. + <_> + + <_> + 7 12 5 3 -1. + <_> + 7 13 5 1 3. + <_> + + <_> + 14 9 6 10 -1. + <_> + 14 9 3 10 2. + <_> + + <_> + 16 6 3 2 -1. + <_> + 16 7 3 1 2. + <_> + + <_> + 0 9 6 10 -1. + <_> + 3 9 3 10 2. + <_> + + <_> + 0 16 5 2 -1. + <_> + 0 17 5 1 2. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 9 7 2 12 -1. + <_> + 9 11 2 4 3. + <_> + + <_> + 3 2 6 2 -1. + <_> + 5 2 2 2 3. + <_> + + <_> + 4 1 1 2 -1. + <_> + 4 2 1 1 2. + <_> + + <_> + 11 15 1 2 -1. + <_> + 11 16 1 1 2. + <_> + + <_> + 3 1 16 2 -1. + <_> + 11 1 8 1 2. + <_> + 3 2 8 1 2. + <_> + + <_> + 3 6 2 2 -1. + <_> + 3 6 1 1 2. + <_> + 4 7 1 1 2. + <_> + + <_> + 5 11 10 6 -1. + <_> + 5 11 5 3 2. + <_> + 10 14 5 3 2. + <_> + + <_> + 10 11 4 6 -1. + <_> + 10 14 4 3 2. + <_> + + <_> + 14 9 6 11 -1. + <_> + 16 9 2 11 3. + <_> + + <_> + 0 9 6 11 -1. + <_> + 2 9 2 11 3. + <_> + + <_> + 2 11 16 6 -1. + <_> + 2 11 8 3 2. + <_> + 10 14 8 3 2. + <_> + + <_> + 12 0 8 10 -1. + <_> + 16 0 4 5 2. + <_> + 12 5 4 5 2. + <_> + + <_> + 14 2 6 4 -1. + <_> + 16 2 2 4 3. + <_> + + <_> + 0 0 8 10 -1. + <_> + 0 0 4 5 2. + <_> + 4 5 4 5 2. + <_> + + <_> + 0 2 6 4 -1. + <_> + 2 2 2 4 3. + <_> + + <_> + 4 9 15 2 -1. + <_> + 9 9 5 2 3. + <_> + + <_> + 12 3 4 8 -1. + <_> + 14 3 2 4 2. + <_> + 12 7 2 4 2. + <_> + + <_> + 9 2 2 9 -1. + <_> + 10 2 1 9 2. + <_> + + <_> + 0 2 20 1 -1. + <_> + 10 2 10 1 2. + <_> + + <_> + 16 1 4 5 -1. + <_> + 16 1 2 5 2. + <_> + + <_> + 16 0 4 6 -1. + <_> + 16 3 4 3 2. + <_> + + <_> + 4 3 6 4 -1. + <_> + 6 3 2 4 3. + <_> + + <_> + 0 0 18 5 -1. + <_> + 6 0 6 5 3. + <_> + + <_> + 6 2 12 14 -1. + <_> + 12 2 6 7 2. + <_> + 6 9 6 7 2. + <_> + + <_> + 11 8 3 5 -1. + <_> + 12 8 1 5 3. + <_> + + <_> + 5 12 2 2 -1. + <_> + 5 13 2 1 2. + <_> + + <_> + 5 10 4 3 -1. + <_> + 7 10 2 3 2. + <_> + + <_> + 4 9 15 2 -1. + <_> + 9 9 5 2 3. + <_> + + <_> + 10 7 6 2 -1. + <_> + 12 7 2 2 3. + <_> + + <_> + 1 9 15 2 -1. + <_> + 6 9 5 2 3. + <_> + + <_> + 5 0 2 10 -1. + <_> + 5 0 1 5 2. + <_> + 6 5 1 5 2. + <_> + + <_> + 0 0 20 14 -1. + <_> + 0 7 20 7 2. + <_> + + <_> + 12 7 8 4 -1. + <_> + 12 7 4 4 2. + <_> + + <_> + 0 7 8 4 -1. + <_> + 4 7 4 4 2. + <_> + + <_> + 8 1 3 3 -1. + <_> + 9 1 1 3 3. + <_> + + <_> + 9 7 3 4 -1. + <_> + 10 7 1 4 3. + <_> + + <_> + 9 9 3 1 -1. + <_> + 10 9 1 1 3. + <_> + + <_> + 8 9 3 2 -1. + <_> + 8 10 3 1 2. + <_> + + <_> + 8 4 2 8 -1. + <_> + 8 4 1 4 2. + <_> + 9 8 1 4 2. + <_> + + <_> + 5 8 12 3 -1. + <_> + 5 9 12 1 3. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 6 10 3 6 -1. + <_> + 6 12 3 2 3. + <_> + + <_> + 4 17 8 3 -1. + <_> + 4 18 8 1 3. + <_> + + <_> + 17 6 2 3 -1. + <_> + 17 7 2 1 3. + <_> + + <_> + 9 12 2 2 -1. + <_> + 10 12 1 1 2. + <_> + 9 13 1 1 2. + <_> + + <_> + 9 13 2 4 -1. + <_> + 9 13 1 2 2. + <_> + 10 15 1 2 2. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 5 5 12 10 -1. + <_> + 11 5 6 5 2. + <_> + 5 10 6 5 2. + <_> + + <_> + 6 3 12 12 -1. + <_> + 12 3 6 6 2. + <_> + 6 9 6 6 2. + <_> + + <_> + 5 7 2 2 -1. + <_> + 5 7 1 1 2. + <_> + 6 8 1 1 2. + <_> + + <_> + 4 3 3 2 -1. + <_> + 5 3 1 2 3. + <_> + + <_> + 6 2 12 14 -1. + <_> + 12 2 6 7 2. + <_> + 6 9 6 7 2. + <_> + + <_> + 5 2 12 3 -1. + <_> + 9 2 4 3 3. + <_> + + <_> + 1 1 18 17 -1. + <_> + 7 1 6 17 3. + <_> + + <_> + 0 9 10 1 -1. + <_> + 5 9 5 1 2. + <_> + + <_> + 16 8 4 3 -1. + <_> + 16 9 4 1 3. + <_> + + <_> + 7 13 6 6 -1. + <_> + 7 16 6 3 2. + <_> + + <_> + 6 14 1 6 -1. + <_> + 6 16 1 2 3. + <_> + + <_> + 6 17 4 2 -1. + <_> + 6 18 4 1 2. + <_> + + <_> + 10 18 6 2 -1. + <_> + 13 18 3 1 2. + <_> + 10 19 3 1 2. + <_> + + <_> + 16 8 1 3 -1. + <_> + 16 9 1 1 3. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 9 15 1 2 -1. + <_> + 9 16 1 1 2. + <_> + + <_> + 13 0 3 12 -1. + <_> + 14 0 1 12 3. + <_> + + <_> + 15 11 1 3 -1. + <_> + 15 12 1 1 3. + <_> + + <_> + 8 15 3 3 -1. + <_> + 8 16 3 1 3. + <_> + + <_> + 4 0 3 12 -1. + <_> + 5 0 1 12 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 9 9 3 1 -1. + <_> + 10 9 1 1 3. + <_> + + <_> + 2 2 12 14 -1. + <_> + 2 2 6 7 2. + <_> + 8 9 6 7 2. + <_> + + <_> + 4 2 12 3 -1. + <_> + 8 2 4 3 3. + <_> + + <_> + 18 18 2 2 -1. + <_> + 18 18 1 2 2. + <_> + + <_> + 17 2 3 8 -1. + <_> + 18 2 1 8 3. + <_> + + <_> + 0 18 2 2 -1. + <_> + 1 18 1 2 2. + <_> + + <_> + 6 11 2 6 -1. + <_> + 6 14 2 3 2. + <_> + + <_> + 13 10 5 6 -1. + <_> + 13 12 5 2 3. + <_> + + <_> + 5 8 15 3 -1. + <_> + 5 9 15 1 3. + <_> + + <_> + 2 10 5 6 -1. + <_> + 2 12 5 2 3. + <_> + + <_> + 0 8 15 3 -1. + <_> + 0 9 15 1 3. + <_> + + <_> + 16 2 3 1 -1. + <_> + 17 2 1 1 3. + <_> + + <_> + 17 4 3 2 -1. + <_> + 18 4 1 2 3. + <_> + + <_> + 0 8 8 12 -1. + <_> + 0 8 4 6 2. + <_> + 4 14 4 6 2. + <_> + + <_> + 1 7 8 6 -1. + <_> + 1 7 4 3 2. + <_> + 5 10 4 3 2. + <_> + + <_> + 14 1 6 2 -1. + <_> + 16 1 2 2 3. + <_> + + <_> + 15 0 4 4 -1. + <_> + 17 0 2 2 2. + <_> + 15 2 2 2 2. + <_> + + <_> + 1 1 4 11 -1. + <_> + 3 1 2 11 2. + <_> + + <_> + 5 5 1 8 -1. + <_> + 5 9 1 4 2. + <_> + + <_> + 7 7 6 1 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 8 4 4 4 -1. + <_> + 8 6 4 2 2. + <_> + + <_> + 2 4 9 1 -1. + <_> + 5 4 3 1 3. + <_> + + <_> + 9 12 2 8 -1. + <_> + 9 16 2 4 2. + <_> + + <_> + 3 8 14 12 -1. + <_> + 3 14 14 6 2. + <_> + + <_> + 6 13 7 3 -1. + <_> + 6 14 7 1 3. + <_> + + <_> + 5 9 6 3 -1. + <_> + 7 9 2 3 3. + <_> + + <_> + 12 1 6 3 -1. + <_> + 12 2 6 1 3. + <_> + + <_> + 8 12 6 2 -1. + <_> + 8 13 6 1 2. + <_> + + <_> + 0 2 18 2 -1. + <_> + 0 2 9 1 2. + <_> + 9 3 9 1 2. + <_> + + <_> + 6 10 3 6 -1. + <_> + 6 13 3 3 2. + <_> + + <_> + 14 0 6 6 -1. + <_> + 14 0 3 6 2. + <_> + + <_> + 15 0 5 8 -1. + <_> + 15 4 5 4 2. + <_> + + <_> + 7 16 6 4 -1. + <_> + 9 16 2 4 3. + <_> + + <_> + 2 11 14 4 -1. + <_> + 2 11 7 2 2. + <_> + 9 13 7 2 2. + <_> + + <_> + 14 10 6 10 -1. + <_> + 14 10 3 10 2. + <_> + + <_> + 9 8 10 12 -1. + <_> + 14 8 5 6 2. + <_> + 9 14 5 6 2. + <_> + + <_> + 0 10 6 10 -1. + <_> + 3 10 3 10 2. + <_> + + <_> + 1 8 10 12 -1. + <_> + 1 8 5 6 2. + <_> + 6 14 5 6 2. + <_> + + <_> + 9 3 6 1 -1. + <_> + 11 3 2 1 3. + <_> + + <_> + 7 4 6 3 -1. + <_> + 9 4 2 3 3. + <_> + + <_> + 5 3 6 1 -1. + <_> + 7 3 2 1 3. + <_> + + <_> + 4 5 6 3 -1. + <_> + 6 5 2 3 3. + <_> + + <_> + 9 16 3 3 -1. + <_> + 9 17 3 1 3. + <_> + + <_> + 8 14 6 3 -1. + <_> + 8 15 6 1 3. + <_> + + <_> + 6 0 8 12 -1. + <_> + 6 0 4 6 2. + <_> + 10 6 4 6 2. + <_> + + <_> + 4 12 2 3 -1. + <_> + 4 13 2 1 3. + <_> + + <_> + 12 16 6 3 -1. + <_> + 12 17 6 1 3. + <_> + + <_> + 7 12 7 2 -1. + <_> + 7 13 7 1 2. + <_> + + <_> + 2 16 6 3 -1. + <_> + 2 17 6 1 3. + <_> + + <_> + 0 7 16 6 -1. + <_> + 0 10 16 3 2. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 9 7 3 5 -1. + <_> + 10 7 1 5 3. + <_> + + <_> + 0 5 20 10 -1. + <_> + 0 5 10 5 2. + <_> + 10 10 10 5 2. + <_> + + <_> + 3 1 4 2 -1. + <_> + 5 1 2 2 2. + <_> + + <_> + 7 6 8 10 -1. + <_> + 11 6 4 5 2. + <_> + 7 11 4 5 2. + <_> + + <_> + 17 6 3 2 -1. + <_> + 17 7 3 1 2. + <_> + + <_> + 5 6 8 10 -1. + <_> + 5 6 4 5 2. + <_> + 9 11 4 5 2. + <_> + + <_> + 5 12 10 6 -1. + <_> + 5 14 10 2 3. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 10 3 2 6 -1. + <_> + 11 3 1 3 2. + <_> + 10 6 1 3 2. + <_> + + <_> + 0 4 3 3 -1. + <_> + 0 5 3 1 3. + <_> + + <_> + 3 16 8 4 -1. + <_> + 3 16 4 2 2. + <_> + 7 18 4 2 2. + <_> + + <_> + 8 13 5 2 -1. + <_> + 8 14 5 1 2. + <_> + + <_> + 8 7 4 12 -1. + <_> + 8 11 4 4 3. + <_> + + <_> + 5 9 2 2 -1. + <_> + 6 9 1 2 2. + <_> + + <_> + 9 15 2 3 -1. + <_> + 9 16 2 1 3. + <_> + + <_> + 13 9 2 3 -1. + <_> + 13 9 1 3 2. + <_> + + <_> + 14 0 6 17 -1. + <_> + 16 0 2 17 3. + <_> + + <_> + 5 10 2 2 -1. + <_> + 6 10 1 2 2. + <_> + + <_> + 2 9 9 1 -1. + <_> + 5 9 3 1 3. + <_> + + <_> + 9 11 2 3 -1. + <_> + 9 12 2 1 3. + <_> + + <_> + 7 11 6 3 -1. + <_> + 7 12 6 1 3. + <_> + + <_> + 0 6 3 2 -1. + <_> + 0 7 3 1 2. + <_> + + <_> + 7 0 6 1 -1. + <_> + 9 0 2 1 3. + <_> + + <_> + 9 16 3 3 -1. + <_> + 9 17 3 1 3. + <_> + + <_> + 2 13 17 6 -1. + <_> + 2 16 17 3 2. + <_> + + <_> + 1 3 3 7 -1. + <_> + 2 3 1 7 3. + <_> + + <_> + 1 1 6 4 -1. + <_> + 3 1 2 4 3. + <_> + + <_> + 14 1 6 5 -1. + <_> + 14 1 3 5 2. + <_> + + <_> + 13 2 3 2 -1. + <_> + 13 3 3 1 2. + <_> + + <_> + 0 1 6 5 -1. + <_> + 3 1 3 5 2. + <_> + + <_> + 2 3 2 6 -1. + <_> + 2 5 2 2 3. + <_> + + <_> + 9 10 3 2 -1. + <_> + 9 11 3 1 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 6 3 3 1 -1. + <_> + 7 3 1 1 3. + <_> + + <_> + 8 2 3 12 -1. + <_> + 8 6 3 4 3. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 11 12 2 2 -1. + <_> + 12 12 1 1 2. + <_> + 11 13 1 1 2. + <_> + + <_> + 5 5 2 2 -1. + <_> + 5 6 2 1 2. + <_> + + <_> + 5 4 1 3 -1. + <_> + 5 5 1 1 3. + <_> + + <_> + 3 11 16 4 -1. + <_> + 11 11 8 2 2. + <_> + 3 13 8 2 2. + <_> + + <_> + 0 10 20 3 -1. + <_> + 0 11 20 1 3. + <_> + + <_> + 1 11 16 4 -1. + <_> + 1 11 8 2 2. + <_> + 9 13 8 2 2. + <_> + + <_> + 4 2 4 2 -1. + <_> + 4 3 4 1 2. + <_> + + <_> + 12 6 2 2 -1. + <_> + 13 6 1 1 2. + <_> + 12 7 1 1 2. + <_> + + <_> + 12 11 6 6 -1. + <_> + 12 13 6 2 3. + <_> + + <_> + 6 6 2 2 -1. + <_> + 6 6 1 1 2. + <_> + 7 7 1 1 2. + <_> + + <_> + 6 4 4 16 -1. + <_> + 8 4 2 16 2. + <_> + + <_> + 11 18 3 2 -1. + <_> + 11 19 3 1 2. + <_> + + <_> + 9 17 6 2 -1. + <_> + 12 17 3 1 2. + <_> + 9 18 3 1 2. + <_> + + <_> + 2 13 5 2 -1. + <_> + 2 14 5 1 2. + <_> + + <_> + 3 15 2 2 -1. + <_> + 3 16 2 1 2. + <_> + + <_> + 9 7 3 3 -1. + <_> + 10 7 1 3 3. + <_> + + <_> + 9 6 2 6 -1. + <_> + 9 6 1 6 2. + <_> + + <_> + 1 14 7 6 -1. + <_> + 1 16 7 2 3. + <_> + + <_> + 8 1 2 11 -1. + <_> + 9 1 1 11 2. + <_> + + <_> + 9 7 2 4 -1. + <_> + 9 7 1 4 2. + <_> + + <_> + 11 10 2 1 -1. + <_> + 11 10 1 1 2. + <_> + + <_> + 0 3 3 9 -1. + <_> + 1 3 1 9 3. + <_> + + <_> + 0 3 3 6 -1. + <_> + 0 5 3 2 3. + <_> + + <_> + 11 15 2 2 -1. + <_> + 12 15 1 1 2. + <_> + 11 16 1 1 2. + <_> + + <_> + 11 14 2 2 -1. + <_> + 12 14 1 1 2. + <_> + 11 15 1 1 2. + <_> + + <_> + 7 15 2 2 -1. + <_> + 7 15 1 1 2. + <_> + 8 16 1 1 2. + <_> + + <_> + 7 14 2 2 -1. + <_> + 7 14 1 1 2. + <_> + 8 15 1 1 2. + <_> + + <_> + 8 13 4 6 -1. + <_> + 10 13 2 3 2. + <_> + 8 16 2 3 2. + <_> + + <_> + 2 14 16 4 -1. + <_> + 10 14 8 2 2. + <_> + 2 16 8 2 2. + <_> + + <_> + 9 8 2 2 -1. + <_> + 9 9 2 1 2. + <_> + + <_> + 7 7 5 3 -1. + <_> + 7 8 5 1 3. + <_> + + <_> + 7 5 6 2 -1. + <_> + 9 5 2 2 3. + <_> + + <_> + 9 1 6 18 -1. + <_> + 11 1 2 18 3. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 8 5 2 4 -1. + <_> + 8 5 1 2 2. + <_> + 9 7 1 2 2. + <_> + + <_> + 9 13 2 6 -1. + <_> + 10 13 1 3 2. + <_> + 9 16 1 3 2. + <_> + + <_> + 11 0 3 18 -1. + <_> + 12 0 1 18 3. + <_> + + <_> + 6 0 3 18 -1. + <_> + 7 0 1 18 3. + <_> + + <_> + 5 15 4 2 -1. + <_> + 7 15 2 2 2. + <_> + + <_> + 1 9 18 1 -1. + <_> + 7 9 6 1 3. + <_> + + <_> + 0 0 20 3 -1. + <_> + 0 1 20 1 3. + <_> + + <_> + 9 6 2 4 -1. + <_> + 10 6 1 4 2. + <_> + + <_> + 6 10 6 2 -1. + <_> + 8 10 2 2 3. + <_> + + <_> + 0 7 20 1 -1. + <_> + 0 7 10 1 2. + <_> + + <_> + 11 3 5 4 -1. + <_> + 11 5 5 2 2. + <_> + + <_> + 5 7 10 1 -1. + <_> + 10 7 5 1 2. + <_> + + <_> + 8 10 3 3 -1. + <_> + 8 11 3 1 3. + <_> + + <_> + 2 0 16 8 -1. + <_> + 10 0 8 4 2. + <_> + 2 4 8 4 2. + <_> + + <_> + 11 0 9 10 -1. + <_> + 11 5 9 5 2. + <_> + + <_> + 0 2 8 18 -1. + <_> + 4 2 4 18 2. + <_> + + <_> + 0 0 2 6 -1. + <_> + 0 2 2 2 3. + <_> + + <_> + 6 0 9 2 -1. + <_> + 6 1 9 1 2. + <_> + + <_> + 4 1 12 2 -1. + <_> + 4 2 12 1 2. + <_> + + <_> + 2 1 16 14 -1. + <_> + 2 8 16 7 2. + <_> + + <_> + 5 1 8 12 -1. + <_> + 5 7 8 6 2. + <_> + + <_> + 9 11 2 2 -1. + <_> + 9 12 2 1 2. + <_> + + <_> + 9 10 5 6 -1. + <_> + 9 12 5 2 3. + <_> + + <_> + 3 0 13 8 -1. + <_> + 3 4 13 4 2. + <_> + + <_> + 6 7 5 8 -1. + <_> + 6 11 5 4 2. + <_> + + <_> + 9 5 2 3 -1. + <_> + 9 6 2 1 3. + <_> + + <_> + 6 8 8 3 -1. + <_> + 6 9 8 1 3. + <_> + + <_> + 2 2 7 6 -1. + <_> + 2 5 7 3 2. + <_> + + <_> + 2 1 14 4 -1. + <_> + 2 1 7 2 2. + <_> + 9 3 7 2 2. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 6 15 8 2 -1. + <_> + 6 16 8 1 2. + <_> + + <_> + 8 14 1 3 -1. + <_> + 8 15 1 1 3. + <_> + + <_> + 8 11 2 8 -1. + <_> + 8 15 2 4 2. + <_> + + <_> + 6 15 8 2 -1. + <_> + 6 16 8 1 2. + <_> + + <_> + 7 16 8 3 -1. + <_> + 7 17 8 1 3. + <_> + + <_> + 0 16 2 2 -1. + <_> + 0 17 2 1 2. + <_> + + <_> + 1 16 8 4 -1. + <_> + 1 16 4 2 2. + <_> + 5 18 4 2 2. + <_> + + <_> + 2 9 16 3 -1. + <_> + 2 10 16 1 3. + <_> + + <_> + 13 11 2 4 -1. + <_> + 13 11 1 4 2. + <_> + + <_> + 0 13 16 6 -1. + <_> + 0 15 16 2 3. + <_> + + <_> + 5 11 2 4 -1. + <_> + 6 11 1 4 2. + <_> + + <_> + 18 2 2 18 -1. + <_> + 19 2 1 9 2. + <_> + 18 11 1 9 2. + <_> + + <_> + 19 7 1 9 -1. + <_> + 19 10 1 3 3. + <_> + + <_> + 0 2 2 18 -1. + <_> + 0 2 1 9 2. + <_> + 1 11 1 9 2. + <_> + + <_> + 0 7 1 9 -1. + <_> + 0 10 1 3 3. + <_> + + <_> + 14 12 2 2 -1. + <_> + 14 13 2 1 2. + <_> + + <_> + 11 14 2 3 -1. + <_> + 11 15 2 1 3. + <_> + + <_> + 7 8 6 2 -1. + <_> + 7 9 6 1 2. + <_> + + <_> + 7 12 4 6 -1. + <_> + 7 12 2 3 2. + <_> + 9 15 2 3 2. + <_> + + <_> + 8 13 5 3 -1. + <_> + 8 14 5 1 3. + <_> + + <_> + 12 14 2 2 -1. + <_> + 13 14 1 1 2. + <_> + 12 15 1 1 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 7 13 5 2 -1. + <_> + 7 14 5 1 2. + <_> + + <_> + 2 10 16 4 -1. + <_> + 10 10 8 2 2. + <_> + 2 12 8 2 2. + <_> + + <_> + 7 0 6 6 -1. + <_> + 9 0 2 6 3. + <_> + + <_> + 7 1 6 3 -1. + <_> + 7 2 6 1 3. + <_> + + <_> + 0 12 6 2 -1. + <_> + 0 13 6 1 2. + <_> + + <_> + 6 3 11 2 -1. + <_> + 6 4 11 1 2. + <_> + + <_> + 12 0 8 6 -1. + <_> + 16 0 4 3 2. + <_> + 12 3 4 3 2. + <_> + + <_> + 8 12 1 2 -1. + <_> + 8 13 1 1 2. + <_> + + <_> + 8 8 1 12 -1. + <_> + 8 12 1 4 3. + <_> + + <_> + 11 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 11 12 1 1 2. + <_> + + <_> + 12 7 3 13 -1. + <_> + 13 7 1 13 3. + <_> + + <_> + 7 11 2 2 -1. + <_> + 7 11 1 1 2. + <_> + 8 12 1 1 2. + <_> + + <_> + 3 13 1 3 -1. + <_> + 3 14 1 1 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 11 11 2 1 -1. + <_> + 11 11 1 1 2. + <_> + + <_> + 1 10 5 9 -1. + <_> + 1 13 5 3 3. + <_> + + <_> + 4 8 6 4 -1. + <_> + 6 8 2 4 3. + <_> + + <_> + 13 12 1 4 -1. + <_> + 13 14 1 2 2. + <_> + + <_> + 11 3 4 14 -1. + <_> + 13 3 2 7 2. + <_> + 11 10 2 7 2. + <_> + + <_> + 6 12 1 4 -1. + <_> + 6 14 1 2 2. + <_> + + <_> + 5 3 4 14 -1. + <_> + 5 3 2 7 2. + <_> + 7 10 2 7 2. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 2 2 12 6 -1. + <_> + 2 2 6 3 2. + <_> + 8 5 6 3 2. + <_> + + <_> + 6 6 6 2 -1. + <_> + 9 6 3 2 2. + <_> + + <_> + 1 0 18 12 -1. + <_> + 7 0 6 12 3. + <_> + + <_> + 5 7 6 4 -1. + <_> + 5 7 3 2 2. + <_> + 8 9 3 2 2. + <_> + + <_> + 5 7 10 4 -1. + <_> + 5 9 10 2 2. + <_> + + <_> + 7 7 6 4 -1. + <_> + 9 7 2 4 3. + <_> + + <_> + 9 5 2 2 -1. + <_> + 9 6 2 1 2. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 6 17 8 3 -1. + <_> + 6 18 8 1 3. + <_> + + <_> + 9 17 6 2 -1. + <_> + 12 17 3 1 2. + <_> + 9 18 3 1 2. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 13 2 1 2. + <_> + + <_> + 3 12 9 2 -1. + <_> + 3 13 9 1 2. + <_> + + <_> + 8 3 6 1 -1. + <_> + 10 3 2 1 3. + <_> + + <_> + 9 3 4 6 -1. + <_> + 11 3 2 3 2. + <_> + 9 6 2 3 2. + <_> + + <_> + 0 3 6 5 -1. + <_> + 3 3 3 5 2. + <_> + + <_> + 2 0 2 18 -1. + <_> + 2 6 2 6 3. + <_> + + <_> + 14 2 4 9 -1. + <_> + 14 5 4 3 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 2 2 4 9 -1. + <_> + 2 5 4 3 3. + <_> + + <_> + 7 18 3 2 -1. + <_> + 8 18 1 2 3. + <_> + + <_> + 10 14 3 3 -1. + <_> + 10 15 3 1 3. + <_> + + <_> + 10 12 2 6 -1. + <_> + 10 15 2 3 2. + <_> + + <_> + 7 5 3 6 -1. + <_> + 7 7 3 2 3. + <_> + + <_> + 3 3 6 2 -1. + <_> + 3 4 6 1 2. + <_> + + <_> + 8 4 7 3 -1. + <_> + 8 5 7 1 3. + <_> + + <_> + 13 6 2 3 -1. + <_> + 13 7 2 1 3. + <_> + + <_> + 8 8 2 12 -1. + <_> + 8 12 2 4 3. + <_> + + <_> + 5 4 8 14 -1. + <_> + 5 4 4 7 2. + <_> + 9 11 4 7 2. + <_> + + <_> + 0 1 20 8 -1. + <_> + 10 1 10 4 2. + <_> + 0 5 10 4 2. + <_> + + <_> + 4 0 12 2 -1. + <_> + 4 1 12 1 2. + <_> + + <_> + 0 1 20 8 -1. + <_> + 0 1 10 4 2. + <_> + 10 5 10 4 2. + <_> + + <_> + 4 0 12 2 -1. + <_> + 4 1 12 1 2. + <_> + + <_> + 9 5 6 3 -1. + <_> + 9 5 3 3 2. + <_> + + <_> + 8 13 10 6 -1. + <_> + 8 15 10 2 3. + <_> + + <_> + 5 5 6 3 -1. + <_> + 8 5 3 3 2. + <_> + + <_> + 6 3 6 1 -1. + <_> + 8 3 2 1 3. + <_> + + <_> + 11 18 9 2 -1. + <_> + 14 18 3 2 3. + <_> + + <_> + 13 11 6 7 -1. + <_> + 13 11 3 7 2. + <_> + + <_> + 4 6 12 10 -1. + <_> + 4 6 6 5 2. + <_> + 10 11 6 5 2. + <_> + + <_> + 8 17 3 3 -1. + <_> + 9 17 1 3 3. + <_> + + <_> + 11 18 9 2 -1. + <_> + 14 18 3 2 3. + <_> + + <_> + 13 11 6 8 -1. + <_> + 13 11 3 8 2. + <_> + + <_> + 4 16 2 2 -1. + <_> + 4 17 2 1 2. + <_> + + <_> + 7 15 4 4 -1. + <_> + 7 17 4 2 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 13 6 2 3 -1. + <_> + 13 7 2 1 3. + <_> + + <_> + 5 11 6 1 -1. + <_> + 7 11 2 1 3. + <_> + + <_> + 7 10 3 1 -1. + <_> + 8 10 1 1 3. + <_> + + <_> + 0 12 20 4 -1. + <_> + 0 14 20 2 2. + <_> + + <_> + 10 2 3 2 -1. + <_> + 10 3 3 1 2. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 5 5 4 3 -1. + <_> + 5 6 4 1 3. + <_> + + <_> + 8 8 4 3 -1. + <_> + 8 9 4 1 3. + <_> + + <_> + 10 4 2 12 -1. + <_> + 10 8 2 4 3. + <_> + + <_> + 0 3 4 3 -1. + <_> + 0 4 4 1 3. + <_> + + <_> + 1 3 2 3 -1. + <_> + 1 4 2 1 3. + <_> + + <_> + 16 1 4 11 -1. + <_> + 16 1 2 11 2. + <_> + + <_> + 18 2 2 16 -1. + <_> + 19 2 1 8 2. + <_> + 18 10 1 8 2. + <_> + + <_> + 1 8 6 12 -1. + <_> + 3 8 2 12 3. + <_> + + <_> + 7 2 6 2 -1. + <_> + 7 2 3 1 2. + <_> + 10 3 3 1 2. + <_> + + <_> + 12 4 8 2 -1. + <_> + 16 4 4 1 2. + <_> + 12 5 4 1 2. + <_> + + <_> + 10 6 6 2 -1. + <_> + 12 6 2 2 3. + <_> + + <_> + 0 4 8 2 -1. + <_> + 0 4 4 1 2. + <_> + 4 5 4 1 2. + <_> + + <_> + 1 3 3 5 -1. + <_> + 2 3 1 5 3. + <_> + + <_> + 16 3 4 6 -1. + <_> + 16 5 4 2 3. + <_> + + <_> + 8 6 4 3 -1. + <_> + 8 7 4 1 3. + <_> + + <_> + 8 14 1 3 -1. + <_> + 8 15 1 1 3. + <_> + + <_> + 4 11 1 2 -1. + <_> + 4 12 1 1 2. + <_> + + <_> + 8 14 6 3 -1. + <_> + 8 15 6 1 3. + <_> + + <_> + 7 15 7 3 -1. + <_> + 7 16 7 1 3. + <_> + + <_> + 9 12 2 8 -1. + <_> + 9 16 2 4 2. + <_> + + <_> + 4 6 6 2 -1. + <_> + 6 6 2 2 3. + <_> + + <_> + 12 7 4 2 -1. + <_> + 12 8 4 1 2. + <_> + + <_> + 5 3 13 10 -1. + <_> + 5 8 13 5 2. + <_> + + <_> + 4 7 4 2 -1. + <_> + 4 8 4 1 2. + <_> + + <_> + 0 8 16 2 -1. + <_> + 0 8 8 1 2. + <_> + 8 9 8 1 2. + <_> + + <_> + 11 8 2 5 -1. + <_> + 11 8 1 5 2. + <_> + + <_> + 10 0 6 13 -1. + <_> + 10 0 3 13 2. + <_> + + <_> + 1 6 4 2 -1. + <_> + 1 7 4 1 2. + <_> + + <_> + 4 3 2 1 -1. + <_> + 5 3 1 1 2. + <_> + + <_> + 11 8 2 5 -1. + <_> + 11 8 1 5 2. + <_> + + <_> + 12 10 4 8 -1. + <_> + 12 10 2 8 2. + <_> + + <_> + 7 8 2 5 -1. + <_> + 8 8 1 5 2. + <_> + + <_> + 4 10 4 8 -1. + <_> + 6 10 2 8 2. + <_> + + <_> + 6 7 9 12 -1. + <_> + 9 7 3 12 3. + <_> + + <_> + 11 13 2 3 -1. + <_> + 11 13 1 3 2. + <_> + + <_> + 7 10 6 10 -1. + <_> + 10 10 3 10 2. + <_> + + <_> + 8 11 4 8 -1. + <_> + 8 11 2 4 2. + <_> + 10 15 2 4 2. + <_> + + <_> + 16 1 4 11 -1. + <_> + 16 1 2 11 2. + <_> + + <_> + 18 2 2 4 -1. + <_> + 18 2 1 4 2. + <_> + + <_> + 5 6 6 2 -1. + <_> + 5 6 3 1 2. + <_> + 8 7 3 1 2. + <_> + + <_> + 5 4 1 3 -1. + <_> + 5 5 1 1 3. + <_> + + <_> + 11 1 4 14 -1. + <_> + 11 1 2 14 2. + <_> + + <_> + 4 2 12 3 -1. + <_> + 8 2 4 3 3. + <_> + + <_> + 5 1 4 14 -1. + <_> + 7 1 2 14 2. + <_> + + <_> + 7 3 6 2 -1. + <_> + 9 3 2 2 3. + <_> + + <_> + 2 0 18 4 -1. + <_> + 8 0 6 4 3. + <_> + + <_> + 9 5 2 10 -1. + <_> + 9 10 2 5 2. + <_> + + <_> + 8 6 3 4 -1. + <_> + 9 6 1 4 3. + <_> + + <_> + 5 5 9 11 -1. + <_> + 8 5 3 11 3. + <_> + + <_> + 10 6 3 5 -1. + <_> + 11 6 1 5 3. + <_> + + <_> + 8 9 6 5 -1. + <_> + 8 9 3 5 2. + <_> + + <_> + 7 6 3 5 -1. + <_> + 8 6 1 5 3. + <_> + + <_> + 6 10 6 3 -1. + <_> + 9 10 3 3 2. + <_> + + <_> + 10 0 3 7 -1. + <_> + 11 0 1 7 3. + <_> + + <_> + 0 3 20 12 -1. + <_> + 0 9 20 6 2. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 5 9 4 1 -1. + <_> + 7 9 2 1 2. + <_> + + <_> + 13 13 3 2 -1. + <_> + 13 14 3 1 2. + <_> + + <_> + 16 9 4 6 -1. + <_> + 16 9 2 6 2. + <_> + + <_> + 7 15 6 3 -1. + <_> + 7 16 6 1 3. + <_> + + <_> + 6 16 7 3 -1. + <_> + 6 17 7 1 3. + <_> + + <_> + 11 14 9 6 -1. + <_> + 11 16 9 2 3. + <_> + + <_> + 19 14 1 3 -1. + <_> + 19 15 1 1 3. + <_> + + <_> + 0 9 6 6 -1. + <_> + 3 9 3 6 2. + <_> + + <_> + 0 19 9 1 -1. + <_> + 3 19 3 1 3. + <_> + + <_> + 11 14 9 6 -1. + <_> + 11 16 9 2 3. + <_> + + <_> + 12 12 6 6 -1. + <_> + 12 14 6 2 3. + <_> + + <_> + 1 14 8 6 -1. + <_> + 1 16 8 2 3. + <_> + + <_> + 8 1 3 2 -1. + <_> + 9 1 1 2 3. + <_> + + <_> + 18 2 2 4 -1. + <_> + 18 2 1 4 2. + <_> + + <_> + 14 0 6 3 -1. + <_> + 16 0 2 3 3. + <_> + + <_> + 0 2 2 4 -1. + <_> + 1 2 1 4 2. + <_> + + <_> + 0 0 6 3 -1. + <_> + 2 0 2 3 3. + <_> + + <_> + 9 0 3 2 -1. + <_> + 10 0 1 2 3. + <_> + + <_> + 12 1 2 2 -1. + <_> + 12 1 1 2 2. + <_> + + <_> + 8 0 3 2 -1. + <_> + 9 0 1 2 3. + <_> + + <_> + 6 1 2 2 -1. + <_> + 7 1 1 2 2. + <_> + + <_> + 10 8 2 3 -1. + <_> + 10 9 2 1 3. + <_> + + <_> + 13 15 6 2 -1. + <_> + 13 16 6 1 2. + <_> + + <_> + 8 12 2 2 -1. + <_> + 8 12 1 1 2. + <_> + 9 13 1 1 2. + <_> + + <_> + 8 15 3 5 -1. + <_> + 9 15 1 5 3. + <_> + + <_> + 8 6 4 12 -1. + <_> + 8 12 4 6 2. + <_> + + <_> + 7 6 7 8 -1. + <_> + 7 10 7 4 2. + <_> + + <_> + 0 11 8 2 -1. + <_> + 0 12 8 1 2. + <_> + + <_> + 8 11 2 2 -1. + <_> + 8 11 1 1 2. + <_> + 9 12 1 1 2. + <_> + + <_> + 7 7 12 1 -1. + <_> + 11 7 4 1 3. + <_> + + <_> + 10 8 3 2 -1. + <_> + 11 8 1 2 3. + <_> + + <_> + 1 7 12 1 -1. + <_> + 5 7 4 1 3. + <_> + + <_> + 6 5 8 2 -1. + <_> + 6 5 4 1 2. + <_> + 10 6 4 1 2. + <_> + + <_> + 9 10 3 10 -1. + <_> + 10 10 1 10 3. + <_> + + <_> + 16 0 2 4 -1. + <_> + 16 0 1 4 2. + <_> + + <_> + 8 10 3 10 -1. + <_> + 9 10 1 10 3. + <_> + + <_> + 9 10 2 3 -1. + <_> + 9 11 2 1 3. + <_> + + <_> + 8 9 4 2 -1. + <_> + 10 9 2 1 2. + <_> + 8 10 2 1 2. + <_> + + <_> + 12 14 7 6 -1. + <_> + 12 16 7 2 3. + <_> + + <_> + 6 1 3 1 -1. + <_> + 7 1 1 1 3. + <_> + + <_> + 2 0 2 4 -1. + <_> + 3 0 1 4 2. + <_> + + <_> + 11 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 11 12 1 1 2. + <_> + + <_> + 12 12 6 6 -1. + <_> + 12 14 6 2 3. + <_> + + <_> + 1 0 6 10 -1. + <_> + 1 0 3 5 2. + <_> + 4 5 3 5 2. + <_> + + <_> + 3 0 2 9 -1. + <_> + 3 3 2 3 3. + <_> + + <_> + 14 13 3 2 -1. + <_> + 14 14 3 1 2. + <_> + + <_> + 15 2 3 2 -1. + <_> + 15 3 3 1 2. + <_> + + <_> + 2 13 5 2 -1. + <_> + 2 14 5 1 2. + <_> + + <_> + 3 4 12 10 -1. + <_> + 3 4 6 5 2. + <_> + 9 9 6 5 2. + <_> + + <_> + 5 1 14 6 -1. + <_> + 5 3 14 2 3. + <_> + + <_> + 15 3 3 2 -1. + <_> + 15 4 3 1 2. + <_> + + <_> + 7 11 2 2 -1. + <_> + 7 11 1 1 2. + <_> + 8 12 1 1 2. + <_> + + <_> + 2 14 6 6 -1. + <_> + 2 16 6 2 3. + <_> + + <_> + 6 13 8 3 -1. + <_> + 6 14 8 1 3. + <_> + + <_> + 1 19 18 1 -1. + <_> + 7 19 6 1 3. + <_> + + <_> + 8 12 1 6 -1. + <_> + 8 15 1 3 2. + <_> + + <_> + 0 0 14 15 -1. + <_> + 0 5 14 5 3. + <_> + + <_> + 3 0 16 8 -1. + <_> + 3 4 16 4 2. + <_> + + <_> + 6 1 8 12 -1. + <_> + 6 7 8 6 2. + <_> + + <_> + 5 3 3 3 -1. + <_> + 6 3 1 3 3. + <_> + + <_> + 5 1 3 4 -1. + <_> + 6 1 1 4 3. + <_> + + <_> + 15 14 4 6 -1. + <_> + 17 14 2 3 2. + <_> + 15 17 2 3 2. + <_> + + <_> + 12 11 6 8 -1. + <_> + 15 11 3 4 2. + <_> + 12 15 3 4 2. + <_> + + <_> + 8 7 2 4 -1. + <_> + 9 7 1 4 2. + <_> + + <_> + 6 11 3 1 -1. + <_> + 7 11 1 1 3. + <_> + + <_> + 12 3 2 14 -1. + <_> + 12 3 1 14 2. + <_> + + <_> + 12 11 6 2 -1. + <_> + 15 11 3 1 2. + <_> + 12 12 3 1 2. + <_> + + <_> + 0 2 5 2 -1. + <_> + 0 3 5 1 2. + <_> + + <_> + 0 0 15 1 -1. + <_> + 5 0 5 1 3. + <_> + + <_> + 12 11 6 2 -1. + <_> + 15 11 3 1 2. + <_> + 12 12 3 1 2. + <_> + + <_> + 10 5 2 2 -1. + <_> + 10 5 1 2 2. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 9 0 2 10 -1. + <_> + 9 0 1 5 2. + <_> + 10 5 1 5 2. + <_> + + <_> + 18 14 2 2 -1. + <_> + 18 15 2 1 2. + <_> + + <_> + 13 11 4 9 -1. + <_> + 13 14 4 3 3. + <_> + + <_> + 8 13 2 2 -1. + <_> + 8 13 1 1 2. + <_> + 9 14 1 1 2. + <_> + + <_> + 7 8 4 3 -1. + <_> + 7 9 4 1 3. + <_> + + <_> + 8 9 4 2 -1. + <_> + 8 10 4 1 2. + <_> + + <_> + 13 12 4 2 -1. + <_> + 13 13 4 1 2. + <_> + + <_> + 6 14 2 2 -1. + <_> + 6 14 1 1 2. + <_> + 7 15 1 1 2. + <_> + + <_> + 0 14 2 2 -1. + <_> + 0 15 2 1 2. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 7 9 10 6 -1. + <_> + 7 11 10 2 3. + <_> + + <_> + 2 9 12 4 -1. + <_> + 6 9 4 4 3. + <_> + + <_> + 7 9 6 11 -1. + <_> + 10 9 3 11 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 9 14 4 3 -1. + <_> + 9 15 4 1 3. + <_> + + <_> + 2 3 3 17 -1. + <_> + 3 3 1 17 3. + <_> + + <_> + 0 11 6 3 -1. + <_> + 0 12 6 1 3. + <_> + + <_> + 4 3 11 9 -1. + <_> + 4 6 11 3 3. + <_> + + <_> + 0 2 6 11 -1. + <_> + 3 2 3 11 2. + <_> + + <_> + 13 0 4 5 -1. + <_> + 13 0 2 5 2. + <_> + + <_> + 9 7 6 4 -1. + <_> + 12 7 3 2 2. + <_> + 9 9 3 2 2. + <_> + + <_> + 5 7 8 2 -1. + <_> + 9 7 4 2 2. + <_> + + <_> + 1 8 15 1 -1. + <_> + 6 8 5 1 3. + <_> + + <_> + 4 12 12 2 -1. + <_> + 8 12 4 2 3. + <_> + + <_> + 13 0 4 10 -1. + <_> + 15 0 2 5 2. + <_> + 13 5 2 5 2. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 3 9 6 2 -1. + <_> + 6 9 3 2 2. + <_> + + <_> + 8 17 4 3 -1. + <_> + 8 18 4 1 3. + <_> + + <_> + 8 3 9 2 -1. + <_> + 11 3 3 2 3. + <_> + + <_> + 3 3 9 2 -1. + <_> + 6 3 3 2 3. + <_> + + <_> + 5 0 9 14 -1. + <_> + 8 0 3 14 3. + <_> + + <_> + 7 3 7 10 -1. + <_> + 7 8 7 5 2. + <_> + + <_> + 4 8 13 3 -1. + <_> + 4 9 13 1 3. + <_> + + <_> + 3 12 14 4 -1. + <_> + 3 12 7 2 2. + <_> + 10 14 7 2 2. + <_> + + <_> + 8 12 4 2 -1. + <_> + 8 13 4 1 2. + <_> + + <_> + 6 10 9 8 -1. + <_> + 6 14 9 4 2. + <_> + + <_> + 9 12 2 8 -1. + <_> + 9 16 2 4 2. + <_> + + <_> + 8 12 3 3 -1. + <_> + 8 13 3 1 3. + <_> + + <_> + 5 5 4 10 -1. + <_> + 7 5 2 10 2. + <_> + + <_> + 14 15 3 3 -1. + <_> + 14 16 3 1 3. + <_> + + <_> + 4 6 13 3 -1. + <_> + 4 7 13 1 3. + <_> + + <_> + 3 15 3 3 -1. + <_> + 3 16 3 1 3. + <_> + + <_> + 3 9 4 2 -1. + <_> + 3 9 2 1 2. + <_> + 5 10 2 1 2. + <_> + + <_> + 0 11 20 4 -1. + <_> + 10 11 10 2 2. + <_> + 0 13 10 2 2. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 0 11 20 4 -1. + <_> + 0 11 10 2 2. + <_> + 10 13 10 2 2. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 10 13 1 6 -1. + <_> + 10 16 1 3 2. + <_> + + <_> + 2 1 18 2 -1. + <_> + 11 1 9 1 2. + <_> + 2 2 9 1 2. + <_> + + <_> + 8 14 3 3 -1. + <_> + 8 15 3 1 3. + <_> + + <_> + 4 1 6 1 -1. + <_> + 6 1 2 1 3. + <_> + + <_> + 11 13 1 3 -1. + <_> + 11 14 1 1 3. + <_> + + <_> + 13 5 2 12 -1. + <_> + 13 11 2 6 2. + <_> + + <_> + 1 14 18 6 -1. + <_> + 1 16 18 2 3. + <_> + + <_> + 8 13 1 3 -1. + <_> + 8 14 1 1 3. + <_> + + <_> + 7 13 6 3 -1. + <_> + 7 14 6 1 3. + <_> + + <_> + 9 10 3 2 -1. + <_> + 9 11 3 1 2. + <_> + + <_> + 5 1 3 3 -1. + <_> + 6 1 1 3 3. + <_> + + <_> + 5 5 6 5 -1. + <_> + 8 5 3 5 2. + <_> + + <_> + 7 5 6 14 -1. + <_> + 7 12 6 7 2. + <_> + + <_> + 7 16 6 2 -1. + <_> + 9 16 2 2 3. + <_> + + <_> + 0 2 2 12 -1. + <_> + 1 2 1 12 2. + <_> + + <_> + 1 0 5 3 -1. + <_> + 1 1 5 1 3. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 12 6 3 3 -1. + <_> + 12 7 3 1 3. + <_> + + <_> + 5 4 3 3 -1. + <_> + 5 5 3 1 3. + <_> + + <_> + 5 6 3 3 -1. + <_> + 5 7 3 1 3. + <_> + + <_> + 8 12 4 8 -1. + <_> + 10 12 2 4 2. + <_> + 8 16 2 4 2. + <_> + + <_> + 2 17 18 2 -1. + <_> + 11 17 9 1 2. + <_> + 2 18 9 1 2. + <_> + + <_> + 9 3 2 2 -1. + <_> + 9 4 2 1 2. + <_> + + <_> + 8 5 4 6 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 9 0 8 6 -1. + <_> + 9 2 8 2 3. + <_> + + <_> + 1 0 18 4 -1. + <_> + 7 0 6 4 3. + <_> + + <_> + 0 0 4 8 -1. + <_> + 2 0 2 8 2. + <_> + + <_> + 0 4 6 9 -1. + <_> + 2 4 2 9 3. + <_> + + <_> + 1 4 18 2 -1. + <_> + 7 4 6 2 3. + <_> + + <_> + 8 16 12 4 -1. + <_> + 14 16 6 2 2. + <_> + 8 18 6 2 2. + <_> + + <_> + 0 0 18 2 -1. + <_> + 0 0 9 1 2. + <_> + 9 1 9 1 2. + <_> + + <_> + 3 0 3 18 -1. + <_> + 4 0 1 18 3. + <_> + + <_> + 14 9 4 7 -1. + <_> + 14 9 2 7 2. + <_> + + <_> + 15 14 2 2 -1. + <_> + 15 15 2 1 2. + <_> + + <_> + 2 9 4 7 -1. + <_> + 4 9 2 7 2. + <_> + + <_> + 3 14 2 2 -1. + <_> + 3 15 2 1 2. + <_> + + <_> + 11 0 6 6 -1. + <_> + 11 2 6 2 3. + <_> + + <_> + 14 0 2 6 -1. + <_> + 15 0 1 3 2. + <_> + 14 3 1 3 2. + <_> + + <_> + 7 11 2 2 -1. + <_> + 7 11 1 1 2. + <_> + 8 12 1 1 2. + <_> + + <_> + 7 10 2 2 -1. + <_> + 8 10 1 2 2. + <_> + + <_> + 9 14 2 6 -1. + <_> + 9 17 2 3 2. + <_> + + <_> + 12 18 4 2 -1. + <_> + 12 19 4 1 2. + <_> + + <_> + 8 17 4 3 -1. + <_> + 8 18 4 1 3. + <_> + + <_> + 2 18 8 2 -1. + <_> + 2 19 8 1 2. + <_> + + <_> + 2 9 16 3 -1. + <_> + 2 10 16 1 3. + <_> + + <_> + 9 9 2 2 -1. + <_> + 9 10 2 1 2. + <_> + + <_> + 5 14 2 4 -1. + <_> + 5 14 1 2 2. + <_> + 6 16 1 2 2. + <_> + + <_> + 8 9 4 2 -1. + <_> + 8 9 2 1 2. + <_> + 10 10 2 1 2. + <_> + + <_> + 9 5 2 5 -1. + <_> + 9 5 1 5 2. + <_> + + <_> + 9 9 3 2 -1. + <_> + 10 9 1 2 3. + <_> + + <_> + 8 9 3 2 -1. + <_> + 9 9 1 2 3. + <_> + + <_> + 8 8 3 6 -1. + <_> + 9 8 1 6 3. + <_> + + <_> + 8 12 4 8 -1. + <_> + 10 12 2 4 2. + <_> + 8 16 2 4 2. + <_> + + <_> + 2 17 16 2 -1. + <_> + 10 17 8 1 2. + <_> + 2 18 8 1 2. + <_> + + <_> + 8 12 3 8 -1. + <_> + 9 12 1 8 3. + <_> + + <_> + 3 10 1 3 -1. + <_> + 3 11 1 1 3. + <_> + + <_> + 9 14 10 6 -1. + <_> + 14 14 5 3 2. + <_> + 9 17 5 3 2. + <_> + + <_> + 14 13 3 6 -1. + <_> + 14 15 3 2 3. + <_> + + <_> + 1 19 18 1 -1. + <_> + 7 19 6 1 3. + <_> + + <_> + 2 10 15 2 -1. + <_> + 7 10 5 2 3. + <_> + + <_> + 4 17 16 3 -1. + <_> + 4 18 16 1 3. + <_> + + <_> + 8 6 4 9 -1. + <_> + 8 9 4 3 3. + <_> + + <_> + 9 16 2 4 -1. + <_> + 9 16 1 2 2. + <_> + 10 18 1 2 2. + <_> + + <_> + 5 5 10 8 -1. + <_> + 5 9 10 4 2. + <_> + + <_> + 13 1 4 2 -1. + <_> + 13 1 2 2 2. + <_> + + <_> + 14 0 3 6 -1. + <_> + 14 2 3 2 3. + <_> + + <_> + 6 7 2 2 -1. + <_> + 6 7 1 1 2. + <_> + 7 8 1 1 2. + <_> + + <_> + 7 1 6 1 -1. + <_> + 9 1 2 1 3. + <_> + + <_> + 9 11 3 3 -1. + <_> + 9 12 3 1 3. + <_> + + <_> + 12 9 3 3 -1. + <_> + 13 9 1 3 3. + <_> + + <_> + 8 11 3 3 -1. + <_> + 8 12 3 1 3. + <_> + + <_> + 5 9 3 3 -1. + <_> + 6 9 1 3 3. + <_> + + <_> + 10 11 1 3 -1. + <_> + 10 12 1 1 3. + <_> + + <_> + 7 9 6 4 -1. + <_> + 10 9 3 2 2. + <_> + 7 11 3 2 2. + <_> + + <_> + 4 7 2 2 -1. + <_> + 4 7 1 1 2. + <_> + 5 8 1 1 2. + <_> + + <_> + 5 7 3 1 -1. + <_> + 6 7 1 1 3. + <_> + + <_> + 18 3 2 3 -1. + <_> + 18 4 2 1 3. + <_> + + <_> + 13 1 4 2 -1. + <_> + 13 1 2 2 2. + <_> + + <_> + 3 1 4 2 -1. + <_> + 5 1 2 2 2. + <_> + + <_> + 3 0 5 2 -1. + <_> + 3 1 5 1 2. + <_> + + <_> + 14 7 6 4 -1. + <_> + 17 7 3 2 2. + <_> + 14 9 3 2 2. + <_> + + <_> + 4 8 16 2 -1. + <_> + 4 9 16 1 2. + <_> + + <_> + 2 11 5 6 -1. + <_> + 2 13 5 2 3. + <_> + + <_> + 5 16 2 4 -1. + <_> + 5 16 1 2 2. + <_> + 6 18 1 2 2. + <_> + + <_> + 15 6 2 12 -1. + <_> + 16 6 1 6 2. + <_> + 15 12 1 6 2. + <_> + + <_> + 13 3 6 16 -1. + <_> + 15 3 2 16 3. + <_> + + <_> + 4 5 12 12 -1. + <_> + 4 5 6 6 2. + <_> + 10 11 6 6 2. + <_> + + <_> + 5 1 10 13 -1. + <_> + 10 1 5 13 2. + <_> + + <_> + 11 5 2 2 -1. + <_> + 12 5 1 1 2. + <_> + 11 6 1 1 2. + <_> + + <_> + 13 5 1 3 -1. + <_> + 13 6 1 1 3. + <_> + + <_> + 7 4 2 4 -1. + <_> + 7 4 1 2 2. + <_> + 8 6 1 2 2. + <_> + + <_> + 7 5 6 4 -1. + <_> + 10 5 3 4 2. + <_> + + <_> + 12 4 4 6 -1. + <_> + 14 4 2 3 2. + <_> + 12 7 2 3 2. + <_> + + <_> + 12 11 7 6 -1. + <_> + 12 13 7 2 3. + <_> + + <_> + 5 6 6 6 -1. + <_> + 7 6 2 6 3. + <_> + + <_> + 9 8 2 2 -1. + <_> + 9 9 2 1 2. + <_> + + <_> + 15 6 2 2 -1. + <_> + 16 6 1 1 2. + <_> + 15 7 1 1 2. + <_> + + <_> + 14 7 4 4 -1. + <_> + 16 7 2 2 2. + <_> + 14 9 2 2 2. + <_> + + <_> + 5 5 6 2 -1. + <_> + 7 5 2 2 3. + <_> + + <_> + 1 19 18 1 -1. + <_> + 7 19 6 1 3. + <_> + + <_> + 12 3 3 3 -1. + <_> + 12 4 3 1 3. + <_> + + <_> + 16 0 2 3 -1. + <_> + 16 1 2 1 3. + <_> + + <_> + 5 3 3 3 -1. + <_> + 5 4 3 1 3. + <_> + + <_> + 2 0 2 3 -1. + <_> + 2 1 2 1 3. + <_> + + <_> + 15 6 2 2 -1. + <_> + 16 6 1 1 2. + <_> + 15 7 1 1 2. + <_> + + <_> + 10 13 1 6 -1. + <_> + 10 16 1 3 2. + <_> + + <_> + 0 7 10 2 -1. + <_> + 0 7 5 1 2. + <_> + 5 8 5 1 2. + <_> + + <_> + 3 10 6 2 -1. + <_> + 3 11 6 1 2. + <_> + + <_> + 12 18 4 2 -1. + <_> + 12 19 4 1 2. + <_> + + <_> + 12 18 2 2 -1. + <_> + 13 18 1 1 2. + <_> + 12 19 1 1 2. + <_> + + <_> + 6 19 2 1 -1. + <_> + 7 19 1 1 2. + <_> + + <_> + 0 4 2 16 -1. + <_> + 0 4 1 8 2. + <_> + 1 12 1 8 2. + <_> + + <_> + 16 1 4 9 -1. + <_> + 16 4 4 3 3. + <_> + + <_> + 10 2 1 2 -1. + <_> + 10 3 1 1 2. + <_> + + <_> + 4 14 4 6 -1. + <_> + 4 14 2 3 2. + <_> + 6 17 2 3 2. + <_> + + <_> + 4 15 1 4 -1. + <_> + 4 17 1 2 2. + <_> + + <_> + 0 2 20 4 -1. + <_> + 10 2 10 2 2. + <_> + 0 4 10 2 2. + <_> + + <_> + 14 5 2 8 -1. + <_> + 14 9 2 4 2. + <_> + + <_> + 5 12 4 5 -1. + <_> + 7 12 2 5 2. + <_> + + <_> + 0 13 9 6 -1. + <_> + 0 15 9 2 3. + <_> + + <_> + 9 14 11 3 -1. + <_> + 9 15 11 1 3. + <_> + + <_> + 7 14 7 3 -1. + <_> + 7 15 7 1 3. + <_> + + <_> + 3 6 2 2 -1. + <_> + 3 6 1 1 2. + <_> + 4 7 1 1 2. + <_> + + <_> + 6 7 2 7 -1. + <_> + 7 7 1 7 2. + <_> + + <_> + 14 5 1 3 -1. + <_> + 14 6 1 1 3. + <_> + + <_> + 13 4 4 3 -1. + <_> + 13 5 4 1 3. + <_> + + <_> + 2 7 4 4 -1. + <_> + 2 7 2 2 2. + <_> + 4 9 2 2 2. + <_> + + <_> + 2 9 13 6 -1. + <_> + 2 12 13 3 2. + <_> + + <_> + 10 1 3 4 -1. + <_> + 11 1 1 4 3. + <_> + + <_> + 9 8 5 2 -1. + <_> + 9 9 5 1 2. + <_> + + <_> + 0 14 11 3 -1. + <_> + 0 15 11 1 3. + <_> + + <_> + 8 11 2 8 -1. + <_> + 8 15 2 4 2. + <_> + + <_> + 5 11 10 6 -1. + <_> + 5 14 10 3 2. + <_> + + <_> + 5 13 15 5 -1. + <_> + 10 13 5 5 3. + <_> + + <_> + 8 10 1 10 -1. + <_> + 8 15 1 5 2. + <_> + + <_> + 4 14 6 2 -1. + <_> + 6 14 2 2 3. + <_> + + <_> + 7 14 7 3 -1. + <_> + 7 15 7 1 3. + <_> + + <_> + 7 16 9 3 -1. + <_> + 7 17 9 1 3. + <_> + + <_> + 8 7 3 3 -1. + <_> + 8 8 3 1 3. + <_> + + <_> + 3 5 1 6 -1. + <_> + 3 8 1 3 2. + <_> + + <_> + 6 5 11 2 -1. + <_> + 6 6 11 1 2. + <_> + + <_> + 9 0 3 2 -1. + <_> + 10 0 1 2 3. + <_> + + <_> + 5 5 1 3 -1. + <_> + 5 6 1 1 3. + <_> + + <_> + 8 7 3 2 -1. + <_> + 9 7 1 2 3. + <_> + + <_> + 5 2 10 6 -1. + <_> + 10 2 5 3 2. + <_> + 5 5 5 3 2. + <_> + + <_> + 8 4 6 4 -1. + <_> + 8 4 3 4 2. + <_> + + <_> + 8 16 3 4 -1. + <_> + 9 16 1 4 3. + <_> + + <_> + 9 13 2 6 -1. + <_> + 9 13 1 3 2. + <_> + 10 16 1 3 2. + <_> + + <_> + 9 8 3 1 -1. + <_> + 10 8 1 1 3. + <_> + + <_> + 2 5 18 15 -1. + <_> + 2 10 18 5 3. + <_> + + <_> + 1 3 6 2 -1. + <_> + 4 3 3 2 2. + <_> + + <_> + 7 6 6 2 -1. + <_> + 9 6 2 2 3. + <_> + + <_> + 8 17 4 3 -1. + <_> + 8 18 4 1 3. + <_> + + <_> + 10 13 2 3 -1. + <_> + 10 14 2 1 3. + <_> + + <_> + 0 10 20 4 -1. + <_> + 0 12 20 2 2. + <_> + + <_> + 5 7 6 4 -1. + <_> + 5 7 3 2 2. + <_> + 8 9 3 2 2. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 10 10 2 3 -1. + <_> + 10 11 2 1 3. + <_> + + <_> + 9 5 2 2 -1. + <_> + 9 6 2 1 2. + <_> + + <_> + 4 4 1 10 -1. + <_> + 4 9 1 5 2. + <_> + + <_> + 11 18 4 2 -1. + <_> + 11 18 2 2 2. + <_> + + <_> + 12 18 3 2 -1. + <_> + 12 19 3 1 2. + <_> + + <_> + 0 6 16 6 -1. + <_> + 0 6 8 3 2. + <_> + 8 9 8 3 2. + <_> + + <_> + 7 6 4 12 -1. + <_> + 7 12 4 6 2. + <_> + + <_> + 11 18 4 2 -1. + <_> + 11 18 2 2 2. + <_> + + <_> + 12 18 3 2 -1. + <_> + 12 19 3 1 2. + <_> + + <_> + 8 12 1 2 -1. + <_> + 8 13 1 1 2. + <_> + + <_> + 8 13 1 3 -1. + <_> + 8 14 1 1 3. + <_> + + <_> + 11 18 4 2 -1. + <_> + 11 18 2 2 2. + <_> + + <_> + 14 12 4 6 -1. + <_> + 14 12 2 6 2. + <_> + + <_> + 6 0 3 4 -1. + <_> + 7 0 1 4 3. + <_> + + <_> + 4 0 2 8 -1. + <_> + 4 0 1 4 2. + <_> + 5 4 1 4 2. + <_> + + <_> + 11 17 9 3 -1. + <_> + 14 17 3 3 3. + <_> + + <_> + 16 2 4 5 -1. + <_> + 16 2 2 5 2. + <_> + + <_> + 0 2 5 9 -1. + <_> + 0 5 5 3 3. + <_> + + <_> + 7 2 3 2 -1. + <_> + 8 2 1 2 3. + <_> + + <_> + 11 17 9 3 -1. + <_> + 14 17 3 3 3. + <_> + + <_> + 16 2 4 5 -1. + <_> + 16 2 2 5 2. + <_> + + <_> + 0 17 9 3 -1. + <_> + 3 17 3 3 3. + <_> + + <_> + 0 2 4 5 -1. + <_> + 2 2 2 5 2. + <_> + + <_> + 5 11 10 9 -1. + <_> + 5 14 10 3 3. + <_> + + <_> + 9 6 3 3 -1. + <_> + 9 7 3 1 3. + <_> + + <_> + 3 17 5 3 -1. + <_> + 3 18 5 1 3. + <_> + + <_> + 7 5 4 7 -1. + <_> + 9 5 2 7 2. + <_> + + <_> + 9 8 2 5 -1. + <_> + 9 8 1 5 2. + <_> + + <_> + 2 2 18 2 -1. + <_> + 2 3 18 1 2. + <_> + + <_> + 2 8 15 6 -1. + <_> + 7 8 5 6 3. + <_> + + <_> + 9 8 2 5 -1. + <_> + 10 8 1 5 2. + <_> + + <_> + 12 10 4 6 -1. + <_> + 12 12 4 2 3. + <_> + + <_> + 14 3 6 2 -1. + <_> + 14 4 6 1 2. + <_> + + <_> + 5 5 2 3 -1. + <_> + 5 6 2 1 3. + <_> + + <_> + 4 6 3 3 -1. + <_> + 4 7 3 1 3. + <_> + + <_> + 14 12 3 3 -1. + <_> + 14 13 3 1 3. + <_> + + <_> + 6 12 11 3 -1. + <_> + 6 13 11 1 3. + <_> + + <_> + 1 2 3 6 -1. + <_> + 1 4 3 2 3. + <_> + + <_> + 1 0 4 7 -1. + <_> + 3 0 2 7 2. + <_> + + <_> + 9 8 3 4 -1. + <_> + 10 8 1 4 3. + <_> + + <_> + 10 9 2 2 -1. + <_> + 10 10 2 1 2. + <_> + + <_> + 8 8 3 4 -1. + <_> + 9 8 1 4 3. + <_> + + <_> + 4 4 10 10 -1. + <_> + 4 9 10 5 2. + <_> + + <_> + 9 10 3 2 -1. + <_> + 10 10 1 2 3. + <_> + + <_> + 9 10 3 2 -1. + <_> + 9 11 3 1 2. + <_> + + <_> + 8 10 3 2 -1. + <_> + 9 10 1 2 3. + <_> + + <_> + 2 4 14 12 -1. + <_> + 2 4 7 6 2. + <_> + 9 10 7 6 2. + <_> + + <_> + 10 12 1 6 -1. + <_> + 10 15 1 3 2. + <_> + + <_> + 7 3 8 16 -1. + <_> + 11 3 4 8 2. + <_> + 7 11 4 8 2. + <_> + + <_> + 5 6 8 10 -1. + <_> + 5 6 4 5 2. + <_> + 9 11 4 5 2. + <_> + + <_> + 6 2 8 8 -1. + <_> + 6 2 4 4 2. + <_> + 10 6 4 4 2. + <_> + + <_> + 10 5 4 2 -1. + <_> + 12 5 2 1 2. + <_> + 10 6 2 1 2. + <_> + + <_> + 12 4 3 3 -1. + <_> + 12 5 3 1 3. + <_> + + <_> + 4 19 12 1 -1. + <_> + 8 19 4 1 3. + <_> + + <_> + 8 2 3 1 -1. + <_> + 9 2 1 1 3. + <_> + + <_> + 13 17 4 3 -1. + <_> + 13 18 4 1 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 7 15 6 3 -1. + <_> + 7 16 6 1 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 14 12 2 3 -1. + <_> + 14 13 2 1 3. + <_> + + <_> + 4 10 4 6 -1. + <_> + 4 12 4 2 3. + <_> + + <_> + 4 13 3 2 -1. + <_> + 4 14 3 1 2. + <_> + + <_> + 9 16 2 3 -1. + <_> + 9 17 2 1 3. + <_> + + <_> + 10 18 3 2 -1. + <_> + 11 18 1 2 3. + <_> + + <_> + 7 18 3 2 -1. + <_> + 8 18 1 2 3. + <_> + + <_> + 1 10 4 2 -1. + <_> + 1 11 4 1 2. + <_> + + <_> + 12 4 6 3 -1. + <_> + 12 5 6 1 3. + <_> + + <_> + 14 4 1 3 -1. + <_> + 14 5 1 1 3. + <_> + + <_> + 2 4 6 3 -1. + <_> + 2 5 6 1 3. + <_> + + <_> + 5 4 1 3 -1. + <_> + 5 5 1 1 3. + <_> + + <_> + 14 12 3 3 -1. + <_> + 14 13 3 1 3. + <_> + + <_> + 15 12 2 3 -1. + <_> + 15 13 2 1 3. + <_> + + <_> + 3 16 4 3 -1. + <_> + 3 17 4 1 3. + <_> + + <_> + 8 0 4 2 -1. + <_> + 8 1 4 1 2. + <_> + + <_> + 0 0 20 1 -1. + <_> + 0 0 10 1 2. + <_> + + <_> + 9 7 3 4 -1. + <_> + 10 7 1 4 3. + <_> + + <_> + 0 0 20 1 -1. + <_> + 10 0 10 1 2. + <_> + + <_> + 8 7 3 4 -1. + <_> + 9 7 1 4 3. + <_> + + <_> + 1 6 19 3 -1. + <_> + 1 7 19 1 3. + <_> + + <_> + 12 7 4 2 -1. + <_> + 12 8 4 1 2. + <_> + + <_> + 7 8 3 3 -1. + <_> + 7 9 3 1 3. + <_> + + <_> + 7 7 3 3 -1. + <_> + 8 7 1 3 3. + <_> + + <_> + 2 9 16 3 -1. + <_> + 2 10 16 1 3. + <_> + + <_> + 9 4 2 12 -1. + <_> + 9 8 2 4 3. + <_> + + <_> + 7 3 2 5 -1. + <_> + 8 3 1 5 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 9 14 4 3 -1. + <_> + 9 15 4 1 3. + <_> + + <_> + 7 8 6 4 -1. + <_> + 10 8 3 2 2. + <_> + 7 10 3 2 2. + <_> + + <_> + 9 7 2 2 -1. + <_> + 10 7 1 2 2. + <_> + + <_> + 5 5 6 6 -1. + <_> + 7 5 2 6 3. + <_> + + <_> + 9 1 3 6 -1. + <_> + 10 1 1 6 3. + <_> + + <_> + 4 5 12 2 -1. + <_> + 8 5 4 2 3. + <_> + + <_> + 4 2 6 4 -1. + <_> + 6 2 2 4 3. + <_> + + <_> + 4 7 8 2 -1. + <_> + 4 8 8 1 2. + <_> + + <_> + 3 6 14 6 -1. + <_> + 10 6 7 3 2. + <_> + 3 9 7 3 2. + <_> + + <_> + 3 6 14 3 -1. + <_> + 3 6 7 3 2. + <_> + + <_> + 0 5 2 2 -1. + <_> + 0 6 2 1 2. + <_> + + <_> + 8 13 4 3 -1. + <_> + 8 14 4 1 3. + <_> + + <_> + 13 0 3 20 -1. + <_> + 14 0 1 20 3. + <_> + + <_> + 10 8 10 3 -1. + <_> + 10 9 10 1 3. + <_> + + <_> + 4 0 3 20 -1. + <_> + 5 0 1 20 3. + <_> + + <_> + 0 8 10 3 -1. + <_> + 0 9 10 1 3. + <_> + + <_> + 12 5 3 4 -1. + <_> + 13 5 1 4 3. + <_> + + <_> + 6 7 12 4 -1. + <_> + 10 7 4 4 3. + <_> + + <_> + 1 14 6 6 -1. + <_> + 1 14 3 3 2. + <_> + 4 17 3 3 2. + <_> + + <_> + 1 17 6 2 -1. + <_> + 1 18 6 1 2. + <_> + + <_> + 14 8 6 12 -1. + <_> + 17 8 3 6 2. + <_> + 14 14 3 6 2. + <_> + + <_> + 18 5 2 2 -1. + <_> + 18 6 2 1 2. + <_> + + <_> + 3 16 4 2 -1. + <_> + 3 16 2 1 2. + <_> + 5 17 2 1 2. + <_> + + <_> + 2 16 6 2 -1. + <_> + 4 16 2 2 3. + <_> + + <_> + 14 8 6 12 -1. + <_> + 17 8 3 6 2. + <_> + 14 14 3 6 2. + <_> + + <_> + 18 5 2 2 -1. + <_> + 18 6 2 1 2. + <_> + + <_> + 5 16 9 2 -1. + <_> + 8 16 3 2 3. + <_> + + <_> + 3 14 6 6 -1. + <_> + 3 14 3 3 2. + <_> + 6 17 3 3 2. + <_> + + <_> + 14 8 6 12 -1. + <_> + 17 8 3 6 2. + <_> + 14 14 3 6 2. + <_> + + <_> + 11 7 2 12 -1. + <_> + 11 11 2 4 3. + <_> + + <_> + 0 8 6 12 -1. + <_> + 0 8 3 6 2. + <_> + 3 14 3 6 2. + <_> + + <_> + 7 7 2 12 -1. + <_> + 7 11 2 4 3. + <_> + + <_> + 14 12 1 2 -1. + <_> + 14 13 1 1 2. + <_> + + <_> + 12 13 8 1 -1. + <_> + 12 13 4 1 2. + <_> + + <_> + 0 3 16 6 -1. + <_> + 0 6 16 3 2. + <_> + + <_> + 1 4 8 2 -1. + <_> + 1 4 4 1 2. + <_> + 5 5 4 1 2. + <_> + + <_> + 14 12 1 2 -1. + <_> + 14 13 1 1 2. + <_> + + <_> + 15 12 2 3 -1. + <_> + 15 13 2 1 3. + <_> + + <_> + 8 16 3 3 -1. + <_> + 8 17 3 1 3. + <_> + + <_> + 5 12 1 2 -1. + <_> + 5 13 1 1 2. + <_> + + <_> + 13 4 3 15 -1. + <_> + 14 4 1 15 3. + <_> + + <_> + 17 3 2 6 -1. + <_> + 18 3 1 3 2. + <_> + 17 6 1 3 2. + <_> + + <_> + 4 4 3 15 -1. + <_> + 5 4 1 15 3. + <_> + + <_> + 1 3 2 6 -1. + <_> + 1 3 1 3 2. + <_> + 2 6 1 3 2. + <_> + + <_> + 7 15 12 4 -1. + <_> + 7 17 12 2 2. + <_> + + <_> + 1 0 19 3 -1. + <_> + 1 1 19 1 3. + <_> + + <_> + 3 17 10 2 -1. + <_> + 3 17 5 1 2. + <_> + 8 18 5 1 2. + <_> + + <_> + 2 5 10 15 -1. + <_> + 2 10 10 5 3. + <_> + + <_> + 13 8 3 4 -1. + <_> + 13 10 3 2 2. + <_> + + <_> + 19 13 1 2 -1. + <_> + 19 14 1 1 2. + <_> + + <_> + 4 8 3 4 -1. + <_> + 4 10 3 2 2. + <_> + + <_> + 0 13 1 2 -1. + <_> + 0 14 1 1 2. + <_> + + <_> + 12 7 2 12 -1. + <_> + 12 13 2 6 2. + <_> + + <_> + 14 7 2 2 -1. + <_> + 15 7 1 1 2. + <_> + 14 8 1 1 2. + <_> + + <_> + 5 3 8 2 -1. + <_> + 5 4 8 1 2. + <_> + + <_> + 0 2 2 6 -1. + <_> + 0 4 2 2 3. + <_> + + <_> + 18 2 2 12 -1. + <_> + 19 2 1 6 2. + <_> + 18 8 1 6 2. + <_> + + <_> + 18 1 1 2 -1. + <_> + 18 2 1 1 2. + <_> + + <_> + 0 2 2 12 -1. + <_> + 0 2 1 6 2. + <_> + 1 8 1 6 2. + <_> + + <_> + 1 1 1 2 -1. + <_> + 1 2 1 1 2. + <_> + + <_> + 16 4 4 14 -1. + <_> + 18 4 2 7 2. + <_> + 16 11 2 7 2. + <_> + + <_> + 10 14 1 6 -1. + <_> + 10 17 1 3 2. + <_> + + <_> + 0 4 4 14 -1. + <_> + 0 4 2 7 2. + <_> + 2 11 2 7 2. + <_> + + <_> + 9 14 1 6 -1. + <_> + 9 17 1 3 2. + <_> + + <_> + 9 14 4 3 -1. + <_> + 9 15 4 1 3. + <_> + + <_> + 4 7 12 2 -1. + <_> + 8 7 4 2 3. + <_> + + <_> + 0 8 4 3 -1. + <_> + 0 9 4 1 3. + <_> + + <_> + 4 7 2 2 -1. + <_> + 4 7 1 1 2. + <_> + 5 8 1 1 2. + <_> + + <_> + 13 7 2 1 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 11 4 4 5 -1. + <_> + 11 4 2 5 2. + <_> + + <_> + 4 8 3 3 -1. + <_> + 5 8 1 3 3. + <_> + + <_> + 0 3 8 1 -1. + <_> + 4 3 4 1 2. + <_> + + <_> + 13 7 2 1 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 14 7 3 2 -1. + <_> + 15 7 1 2 3. + <_> + + <_> + 5 7 2 1 -1. + <_> + 6 7 1 1 2. + <_> + + <_> + 3 7 3 2 -1. + <_> + 4 7 1 2 3. + <_> + + <_> + 18 5 2 2 -1. + <_> + 18 6 2 1 2. + <_> + + <_> + 12 14 2 2 -1. + <_> + 13 14 1 1 2. + <_> + 12 15 1 1 2. + <_> + + <_> + 0 5 2 2 -1. + <_> + 0 6 2 1 2. + <_> + + <_> + 6 14 2 2 -1. + <_> + 6 14 1 1 2. + <_> + 7 15 1 1 2. + <_> + + <_> + 7 12 6 5 -1. + <_> + 9 12 2 5 3. + <_> + + <_> + 12 17 5 2 -1. + <_> + 12 18 5 1 2. + <_> + + <_> + 1 11 6 3 -1. + <_> + 4 11 3 3 2. + <_> + + <_> + 1 9 6 3 -1. + <_> + 4 9 3 3 2. + <_> + + <_> + 12 7 2 12 -1. + <_> + 12 13 2 6 2. + <_> + + <_> + 8 7 5 3 -1. + <_> + 8 8 5 1 3. + <_> + + <_> + 6 7 2 12 -1. + <_> + 6 13 2 6 2. + <_> + + <_> + 1 2 9 18 -1. + <_> + 4 2 3 18 3. + <_> + + <_> + 12 17 5 2 -1. + <_> + 12 18 5 1 2. + <_> + + <_> + 4 7 12 2 -1. + <_> + 4 7 6 2 2. + <_> + + <_> + 6 7 6 1 -1. + <_> + 8 7 2 1 3. + <_> + + <_> + 7 3 3 2 -1. + <_> + 8 3 1 2 3. + <_> + + <_> + 9 4 3 1 -1. + <_> + 10 4 1 1 3. + <_> + + <_> + 11 11 3 1 -1. + <_> + 12 11 1 1 3. + <_> + + <_> + 8 4 3 1 -1. + <_> + 9 4 1 1 3. + <_> + + <_> + 6 11 3 1 -1. + <_> + 7 11 1 1 3. + <_> + + <_> + 12 13 6 6 -1. + <_> + 12 15 6 2 3. + <_> + + <_> + 14 13 1 6 -1. + <_> + 14 15 1 2 3. + <_> + + <_> + 2 13 6 6 -1. + <_> + 2 15 6 2 3. + <_> + + <_> + 1 5 18 1 -1. + <_> + 7 5 6 1 3. + <_> + + <_> + 4 7 12 2 -1. + <_> + 10 7 6 1 2. + <_> + 4 8 6 1 2. + <_> + + <_> + 6 1 8 10 -1. + <_> + 10 1 4 5 2. + <_> + 6 6 4 5 2. + <_> + + <_> + 3 13 4 3 -1. + <_> + 3 14 4 1 3. + <_> + + <_> + 6 13 4 3 -1. + <_> + 6 14 4 1 3. + <_> + + <_> + 9 14 4 3 -1. + <_> + 9 15 4 1 3. + <_> + + <_> + 12 9 2 3 -1. + <_> + 12 10 2 1 3. + <_> + + <_> + 7 14 4 3 -1. + <_> + 7 15 4 1 3. + <_> + + <_> + 9 0 2 1 -1. + <_> + 10 0 1 1 2. + <_> + + <_> + 5 0 10 5 -1. + <_> + 5 0 5 5 2. + <_> + + <_> + 6 6 8 7 -1. + <_> + 6 6 4 7 2. + <_> + + <_> + 5 0 10 5 -1. + <_> + 10 0 5 5 2. + <_> + + <_> + 6 6 8 7 -1. + <_> + 10 6 4 7 2. + <_> + + <_> + 5 9 10 8 -1. + <_> + 10 9 5 4 2. + <_> + 5 13 5 4 2. + <_> + + <_> + 10 0 4 10 -1. + <_> + 12 0 2 5 2. + <_> + 10 5 2 5 2. + <_> + + <_> + 1 4 8 3 -1. + <_> + 1 5 8 1 3. + <_> + + <_> + 4 4 8 3 -1. + <_> + 4 5 8 1 3. + <_> + + <_> + 9 7 4 3 -1. + <_> + 9 8 4 1 3. + <_> + + <_> + 12 8 3 12 -1. + <_> + 12 14 3 6 2. + <_> + + <_> + 7 7 4 3 -1. + <_> + 7 8 4 1 3. + <_> + + <_> + 5 8 3 12 -1. + <_> + 5 14 3 6 2. + <_> + + <_> + 10 0 7 6 -1. + <_> + 10 2 7 2 3. + <_> + + <_> + 2 1 18 1 -1. + <_> + 8 1 6 1 3. + <_> + + <_> + 5 0 3 8 -1. + <_> + 6 0 1 8 3. + <_> + + <_> + 4 7 4 2 -1. + <_> + 4 8 4 1 2. + diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt_tree.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt_tree.xml new file mode 100644 index 0000000000000000000000000000000000000000..ff638d5b19d7bc938321749aaee2ce8727dbeb5a --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_alt_tree.xml @@ -0,0 +1,103493 @@ + + + + + 20 20 + + <_> + + + <_> + + <_> + + + + <_>2 7 14 4 -1. + <_>2 9 14 2 2. + 0 + 3.7895569112151861e-003 + -0.9294580221176148 + 0.6411985158920288 + <_> + + <_> + + + + <_>1 2 18 4 -1. + <_>7 2 6 4 3. + 0 + 0.0120981102809310 + -0.7181009054183960 + 0.4714100956916809 + <_> + + <_> + + + + <_>5 5 9 5 -1. + <_>8 5 3 5 3. + 0 + 1.2138449819758534e-003 + -0.7283161282539368 + 0.3033069074153900 + -1.3442519903182983 + -1 + -1 + <_> + + + <_> + + <_> + + + + <_>3 6 14 9 -1. + <_>3 9 14 3 3. + 0 + 8.7510552257299423e-003 + -0.8594707250595093 + 0.3688138127326965 + <_> + + <_> + + + + <_>1 1 18 5 -1. + <_>7 1 6 5 3. + 0 + 0.0219867005944252 + -0.6018015146255493 + 0.3289783000946045 + <_> + + <_> + + + + <_>4 6 12 8 -1. + <_>4 10 12 4 2. + 0 + 6.4913398819044232e-004 + -0.7943195104598999 + 0.2549329996109009 + <_> + + <_> + + + + <_>9 5 6 10 -1. + <_>12 5 3 5 2. + <_>9 10 3 5 2. + 0 + -1.0192029876634479e-003 + 0.2272932976484299 + -0.6362798213958740 + <_> + + <_> + + + + <_>4 0 11 9 -1. + <_>4 3 11 3 3. + 0 + 1.3674780493602157e-003 + -0.6001418232917786 + 0.2411836981773377 + <_> + + <_> + + + + <_>12 5 4 8 -1. + <_>12 9 4 4 2. + 0 + 1.0245250305160880e-003 + -0.5854247212409973 + 0.1255010962486267 + <_> + + <_> + + + + <_>4 5 10 10 -1. + <_>4 5 5 5 2. + <_>9 10 5 5 2. + 0 + 0.0184658598154783 + 0.1956356018781662 + -0.6763023138046265 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 4.0901508182287216e-003 + -0.4491649866104126 + 0.2667768895626068 + <_> + + <_> + + + + <_>3 8 5 12 -1. + <_>3 14 5 6 2. + 0 + 0.0113580999895930 + 0.1878322958946228 + -0.6137936115264893 + -1.6378560066223145 + 0 + -1 + <_> + + + <_> + + <_> + + + + <_>5 3 9 9 -1. + <_>5 6 9 3 3. + 0 + -0.0115889497101307 + 0.3456704020500183 + -0.7647898197174072 + <_> + + <_> + + + + <_>8 5 4 12 -1. + <_>8 11 4 6 2. + 0 + 5.1809530705213547e-003 + 0.2410492002964020 + -0.6962355971336365 + <_> + + <_> + + + + <_>3 6 5 6 -1. + <_>3 9 5 3 2. + 0 + 2.1468549966812134e-003 + -0.8055366277694702 + 0.1983861029148102 + <_> + + <_> + + + + <_>4 5 12 5 -1. + <_>8 5 4 5 3. + 0 + -3.6556499544531107e-003 + -0.7183313965797424 + 0.1230567991733551 + <_> + + <_> + + + + <_>1 2 8 8 -1. + <_>1 2 4 4 2. + <_>5 6 4 4 2. + 0 + -1.9701640121638775e-003 + 0.2277768999338150 + -0.4752016961574554 + <_> + + <_> + + + + <_>8 12 10 8 -1. + <_>13 12 5 4 2. + <_>8 16 5 4 2. + 0 + -3.3645539078861475e-003 + -0.4609504938125610 + 0.2039465010166168 + <_> + + <_> + + + + <_>4 9 3 10 -1. + <_>4 14 3 5 2. + 0 + -7.4126059189438820e-005 + 0.1821323931217194 + -0.4782927036285400 + <_> + + <_> + + + + <_>0 4 20 10 -1. + <_>0 9 20 5 2. + 0 + -0.0175711102783680 + -0.7173755168914795 + 0.1131113022565842 + <_> + + <_> + + + + <_>3 0 13 9 -1. + <_>3 3 13 3 3. + 0 + 6.3840472139418125e-003 + -0.4020568132400513 + 0.2073028981685638 + <_> + + <_> + + + + <_>10 1 4 11 -1. + <_>10 1 2 11 2. + 0 + -0.0147233996540308 + -0.6755877137184143 + 0.0689730867743492 + <_> + + <_> + + + + <_>6 1 4 11 -1. + <_>8 1 2 11 2. + 0 + -5.2889222279191017e-003 + -0.6210517287254334 + 0.1334936022758484 + <_> + + <_> + + + + <_>4 6 12 8 -1. + <_>10 6 6 4 2. + <_>4 10 6 4 2. + 0 + 0.0277436301112175 + 0.1176085025072098 + -0.5464112162590027 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 0.0394275598227978 + -0.2113427966833115 + 0.3945299983024597 + <_> + + <_> + + + + <_>11 9 4 7 -1. + <_>11 9 2 7 2. + 0 + 8.6949411779642105e-003 + 0.1258095055818558 + -0.4798910021781921 + <_> + + <_> + + + + <_>5 9 4 7 -1. + <_>7 9 2 7 2. + 0 + 2.8245279099792242e-003 + 0.1965314000844955 + -0.4025667905807495 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0289151892066002 + -0.8061652779579163 + 0.0818822607398033 + -1.7317579984664917 + 1 + -1 + <_> + + + <_> + + <_> + + + + <_>0 7 20 6 -1. + <_>0 9 20 2 3. + 0 + 8.0171944573521614e-003 + -0.6898155212402344 + 0.2413686066865921 + <_> + + <_> + + + + <_>6 3 8 6 -1. + <_>6 6 8 3 2. + 0 + -2.4478728882968426e-003 + 0.2135320007801056 + -0.6414669156074524 + <_> + + <_> + + + + <_>7 2 6 7 -1. + <_>9 2 2 7 3. + 0 + 1.7917619552463293e-003 + -0.6144546866416931 + 0.1923692971467972 + <_> + + <_> + + + + <_>11 7 5 9 -1. + <_>11 10 5 3 3. + 0 + 4.3905500206165016e-004 + -0.7536042928695679 + 0.1569689065217972 + <_> + + <_> + + + + <_>4 6 8 8 -1. + <_>4 6 4 4 2. + <_>8 10 4 4 2. + 0 + -3.6769549478776753e-004 + 0.1738051027059555 + -0.5840449929237366 + <_> + + <_> + + + + <_>9 5 6 8 -1. + <_>9 9 6 4 2. + 0 + -4.2802388779819012e-003 + -0.6696898937225342 + 0.1128972992300987 + <_> + + <_> + + + + <_>4 10 5 6 -1. + <_>4 13 5 3 2. + 0 + 3.5238768905401230e-003 + 0.1250194013118744 + -0.7329921722412109 + <_> + + <_> + + + + <_>12 0 6 5 -1. + <_>12 0 3 5 2. + 0 + 7.9299701610580087e-004 + -0.4496619999408722 + 0.2159093022346497 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 4.4371088733896613e-004 + -0.3890976905822754 + 0.2118114978075028 + <_> + + <_> + + + + <_>3 2 17 2 -1. + <_>3 3 17 1 2. + 0 + -2.7145470958203077e-003 + -0.4671686887741089 + 0.1503839939832687 + <_> + + <_> + + + + <_>5 6 4 8 -1. + <_>5 10 4 4 2. + 0 + -6.9272058317437768e-004 + -0.5859655141830444 + 0.1171438023447990 + <_> + + <_> + + + + <_>14 3 6 9 -1. + <_>14 3 3 9 2. + 0 + 0.0492618083953857 + -0.1380015015602112 + 0.4936623871326447 + <_> + + <_> + + + + <_>3 0 9 5 -1. + <_>6 0 3 5 3. + 0 + -0.0228375196456909 + -0.6374350786209106 + 0.1232409030199051 + <_> + + <_> + + + + <_>15 2 4 9 -1. + <_>15 2 2 9 2. + 0 + 4.8372112214565277e-003 + -0.1239162981510162 + 0.1062088981270790 + <_> + + <_> + + + + <_>1 2 4 9 -1. + <_>3 2 2 9 2. + 0 + 0.0102562597021461 + -0.1876704990863800 + 0.2982417047023773 + <_> + + <_> + + + + <_>8 8 6 12 -1. + <_>8 12 6 4 3. + 0 + 0.0106186801567674 + 0.1061246022582054 + -0.3324488103389740 + <_> + + <_> + + + + <_>2 13 16 4 -1. + <_>2 13 8 2 2. + <_>10 15 8 2 2. + 0 + 0.0241131391376257 + 0.0872006118297577 + -0.6684662103652954 + <_> + + <_> + + + + <_>6 5 8 6 -1. + <_>6 7 8 2 3. + 0 + -3.6754710599780083e-003 + 0.1104328036308289 + -0.4458195865154266 + <_> + + <_> + + + + <_>0 11 8 6 -1. + <_>0 13 8 2 3. + 0 + -0.0389962010085583 + -0.7022811174392700 + 0.0818094909191132 + <_> + + <_> + + + + <_>0 18 20 2 -1. + <_>0 19 20 1 2. + 0 + 1.5777100343257189e-003 + 0.1595419943332672 + -0.3286077082157135 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 9.1089410707354546e-003 + 0.1032636985182762 + -0.4440256059169769 + <_> + + <_> + + + + <_>3 1 17 3 -1. + <_>3 2 17 1 3. + 0 + -0.0170516092330217 + -0.5585334897041321 + 0.0627114996314049 + <_> + + <_> + + + + <_>3 6 5 6 -1. + <_>3 9 5 3 2. + 0 + 1.3652660418301821e-003 + -0.5393446087837219 + 0.0708398967981339 + <_> + + <_> + + + + <_>4 5 12 7 -1. + <_>8 5 4 7 3. + 0 + -0.0111861499026418 + -0.4726018011569977 + 0.0810194164514542 + <_> + + <_> + + + + <_>0 4 14 4 -1. + <_>0 4 7 2 2. + <_>7 6 7 2 2. + 0 + -0.0117052700370550 + 0.2475008964538574 + -0.1777898967266083 + <_> + + <_> + + + + <_>4 11 12 9 -1. + <_>4 14 12 3 3. + 0 + -0.0977369323372841 + -0.5617750883102417 + 0.0809218212962151 + <_> + + <_> + + + + <_>3 2 14 16 -1. + <_>3 2 7 8 2. + <_>10 10 7 8 2. + 0 + -0.0852280631661415 + -0.5223324894905090 + 0.0728213936090469 + <_> + + <_> + + + + <_>1 0 18 4 -1. + <_>7 0 6 4 3. + 0 + -0.0367334596812725 + 0.4362357854843140 + -0.0993395075201988 + <_> + + <_> + + + + <_>3 1 10 16 -1. + <_>3 1 5 8 2. + <_>8 9 5 8 2. + 0 + -3.6704430822283030e-003 + 0.1483422070741653 + -0.2711966931819916 + -1.9308480024337769 + 2 + -1 + <_> + + + <_> + + <_> + + + + <_>1 0 16 2 -1. + <_>1 1 16 1 2. + 0 + -1.1610370129346848e-003 + -0.5637788772583008 + 0.2356878072023392 + <_> + + <_> + + + + <_>2 10 16 4 -1. + <_>2 12 16 2 2. + 0 + 1.1830299627035856e-003 + 0.1572428047657013 + -0.6772817969322205 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>9 0 2 8 3. + 0 + -2.1273950114846230e-003 + -0.6615015268325806 + 0.1494313925504684 + <_> + + <_> + + + + <_>5 3 10 9 -1. + <_>5 6 10 3 3. + 0 + -0.1189346984028816 + 0.5322582125663757 + -0.2296836972236633 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0136248702183366 + -0.6063550114631653 + 0.1700108945369721 + <_> + + <_> + + + + <_>10 6 8 12 -1. + <_>10 10 8 4 3. + 0 + -6.3198682619258761e-004 + -0.6897224187850952 + 0.1158462986350060 + <_> + + <_> + + + + <_>2 8 15 3 -1. + <_>2 9 15 1 3. + 0 + -4.4108428992331028e-003 + -0.6296700239181519 + 0.1243060007691383 + <_> + + <_> + + + + <_>10 6 9 12 -1. + <_>10 10 9 4 3. + 0 + -0.0229822397232056 + -0.5049725174903870 + 0.0166361201554537 + <_> + + <_> + + + + <_>4 6 6 8 -1. + <_>4 10 6 4 2. + 0 + -2.3721898905932903e-003 + -0.6246224045753479 + 0.1379375010728836 + <_> + + <_> + + + + <_>9 8 4 12 -1. + <_>9 12 4 4 3. + 0 + 8.7364763021469116e-003 + 0.1399662047624588 + -0.5482295155525208 + <_> + + <_> + + + + <_>1 0 6 18 -1. + <_>4 0 3 18 2. + 0 + 0.0677370727062225 + -0.1917248070240021 + 0.5470048785209656 + <_> + + <_> + + + + <_>5 2 13 2 -1. + <_>5 3 13 1 2. + 0 + -4.0138149634003639e-003 + -0.5542911887168884 + 0.1451705992221832 + <_> + + <_> + + + + <_>5 5 6 5 -1. + <_>8 5 3 5 2. + 0 + 1.2857170077040792e-004 + -0.5103123784065247 + 0.1102394014596939 + <_> + + <_> + + + + <_>6 0 8 12 -1. + <_>10 0 4 6 2. + <_>6 6 4 6 2. + 0 + -0.0396889485418797 + -0.6183072924613953 + 0.0966760963201523 + <_> + + <_> + + + + <_>2 1 6 10 -1. + <_>2 1 3 5 2. + <_>5 6 3 5 2. + 0 + -1.6646150033921003e-003 + 0.1644988954067230 + -0.3718631863594055 + <_> + + <_> + + + + <_>11 12 7 6 -1. + <_>11 14 7 2 3. + 0 + 5.3499247878789902e-003 + 0.1114505007863045 + -0.3744102120399475 + <_> + + <_> + + + + <_>0 12 18 4 -1. + <_>0 12 9 2 2. + <_>9 14 9 2 2. + 0 + -0.0229040104895830 + -0.5809758901596069 + 0.1107726022601128 + <_> + + <_> + + + + <_>5 5 15 6 -1. + <_>5 7 15 2 3. + 0 + 0.0107034500688314 + 0.0447332598268986 + -0.5811663269996643 + <_> + + <_> + + + + <_>2 6 5 9 -1. + <_>2 9 5 3 3. + 0 + -4.2331559234298766e-004 + -0.5442379117012024 + 0.0870892927050591 + <_> + + <_> + + + + <_>9 8 10 6 -1. + <_>14 8 5 3 2. + <_>9 11 5 3 2. + 0 + 0.0155544299632311 + 0.0568843409419060 + -0.3764517009258270 + <_> + + <_> + + + + <_>5 6 10 10 -1. + <_>5 6 5 5 2. + <_>10 11 5 5 2. + 0 + -0.0205394495278597 + -0.3871456980705261 + 0.1183383986353874 + <_> + + <_> + + + + <_>7 4 12 4 -1. + <_>7 6 12 2 2. + 0 + -3.1234358903020620e-003 + 0.0836354270577431 + -0.1986238956451416 + <_> + + <_> + + + + <_>1 10 16 4 -1. + <_>1 10 8 2 2. + <_>9 12 8 2 2. + 0 + 0.0239328294992447 + 0.0796005427837372 + -0.6537010073661804 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>7 17 6 3 3. + 0 + 0.0839204564690590 + -0.1065312996506691 + 0.4877282083034515 + <_> + + <_> + + + + <_>6 0 3 17 -1. + <_>7 0 1 17 3. + 0 + 0.0160031598061323 + 0.0836432129144669 + -0.5920773148536682 + <_> + + <_> + + + + <_>9 4 4 16 -1. + <_>11 4 2 8 2. + <_>9 12 2 8 2. + 0 + 5.8071441017091274e-003 + 0.0879975035786629 + -0.3327913880348206 + <_> + + <_> + + + + <_>0 0 4 20 -1. + <_>2 0 2 20 2. + 0 + -0.0811044275760651 + 0.6377518773078919 + -0.0676923617720604 + <_> + + <_> + + + + <_>13 2 6 13 -1. + <_>15 2 2 13 3. + 0 + 0.0454030297696590 + -0.0515103898942471 + 0.3022567033767700 + <_> + + <_> + + + + <_>6 1 6 18 -1. + <_>6 1 3 9 2. + <_>9 10 3 9 2. + 0 + 0.0138772297650576 + 0.0999676287174225 + -0.4652090966701508 + <_> + + <_> + + + + <_>15 0 4 13 -1. + <_>15 0 2 13 2. + 0 + 0.0345907099545002 + -0.0976144373416901 + 0.3467875123023987 + <_> + + <_> + + + + <_>5 6 3 14 -1. + <_>6 6 1 14 3. + 0 + 0.0157045498490334 + 0.0763441175222397 + -0.5335631966590881 + <_> + + <_> + + + + <_>14 2 6 13 -1. + <_>14 2 3 13 2. + 0 + -0.1042054966092110 + 0.6189097166061401 + -0.0442597605288029 + <_> + + <_> + + + + <_>1 2 18 3 -1. + <_>7 2 6 3 3. + 0 + 0.1344318985939026 + -0.0598530210554600 + 0.6363571286201477 + <_> + + <_> + + + + <_>5 5 11 8 -1. + <_>5 9 11 4 2. + 0 + -2.5646309368312359e-003 + -0.5360047221183777 + 0.0731160268187523 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 0.0186470896005630 + 0.0698561519384384 + -0.5687832236289978 + <_> + + <_> + + + + <_>11 4 7 4 -1. + <_>11 6 7 2 2. + 0 + 0.0151595398783684 + 0.0182063393294811 + -0.2766315937042236 + -2.0711259841918945 + 3 + -1 + <_> + + + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.1477842926979065 + -0.8993312120437622 + 0.5703592896461487 + <_> + + <_> + + + + <_>1 0 18 20 -1. + <_>7 0 6 20 3. + 0 + 0.2998467087745667 + -0.6539415121078491 + 0.3505445122718811 + <_> + + <_> + + + + <_>5 3 10 9 -1. + <_>5 6 10 3 3. + 0 + -0.0790617167949677 + 0.4408529102802277 + -0.6508756875991821 + <_> + + <_> + + + + <_>14 3 6 11 -1. + <_>14 3 3 11 2. + 0 + 0.0584289617836475 + -0.4266535937786102 + 0.5841056704521179 + <_> + + <_> + + + + <_>3 9 4 10 -1. + <_>3 14 4 5 2. + 0 + -0.0146642802283168 + 0.3243524134159088 + -0.5965961813926697 + <_> + + <_> + + + + <_>8 1 12 19 -1. + <_>8 1 6 19 2. + 0 + 0.3951719999313355 + -0.0757983475923538 + 0.4865995049476624 + <_> + + <_> + + + + <_>0 1 12 19 -1. + <_>6 1 6 19 2. + 0 + 0.1104058995842934 + -0.8455610275268555 + 0.2137456983327866 + -2.1360809803009033 + 4 + 6 + <_> + + + <_> + + <_> + + + + <_>8 4 4 16 -1. + <_>8 12 4 8 2. + 0 + 3.7777079269289970e-003 + 0.1874440014362335 + -0.6535406112670898 + <_> + + <_> + + + + <_>9 8 4 12 -1. + <_>9 12 4 4 3. + 0 + 5.3003188222646713e-003 + 0.0939518436789513 + -0.5691788792610169 + <_> + + <_> + + + + <_>6 2 8 12 -1. + <_>6 6 8 4 3. + 0 + -5.5426009930670261e-003 + 0.1603170931339264 + -0.5182223916053772 + <_> + + <_> + + + + <_>7 7 6 13 -1. + <_>9 7 2 13 3. + 0 + -9.1971885412931442e-003 + -0.5742046236991882 + 0.1479140073060989 + <_> + + <_> + + + + <_>0 6 7 6 -1. + <_>0 9 7 3 2. + 0 + 5.3701602155342698e-004 + -0.7044969797134399 + 0.1075214967131615 + <_> + + <_> + + + + <_>1 8 19 3 -1. + <_>1 9 19 1 3. + 0 + -2.2125479299575090e-003 + -0.5087742805480957 + 0.1136718988418579 + <_> + + <_> + + + + <_>5 0 3 14 -1. + <_>6 0 1 14 3. + 0 + 0.0116757303476334 + 0.0842586830258369 + -0.6738470196723938 + <_> + + <_> + + + + <_>10 3 10 6 -1. + <_>15 3 5 3 2. + <_>10 6 5 3 2. + 0 + -2.0404369570314884e-003 + 0.1625111997127533 + -0.4143564999103546 + <_> + + <_> + + + + <_>5 1 8 8 -1. + <_>5 1 4 4 2. + <_>9 5 4 4 2. + 0 + -7.6540438458323479e-003 + -0.4283317923545837 + 0.1306070983409882 + <_> + + <_> + + + + <_>6 7 14 4 -1. + <_>13 7 7 2 2. + <_>6 9 7 2 2. + 0 + 0.0293704792857170 + 0.0546510517597198 + -0.3479537963867188 + <_> + + <_> + + + + <_>0 7 14 4 -1. + <_>0 7 7 2 2. + <_>7 9 7 2 2. + 0 + -9.5828901976346970e-003 + -0.4862071871757507 + 0.1170689016580582 + <_> + + <_> + + + + <_>10 6 9 12 -1. + <_>10 10 9 4 3. + 0 + 6.0666278004646301e-003 + -0.3655388057231903 + 0.0878136008977890 + <_> + + <_> + + + + <_>4 10 8 4 -1. + <_>8 10 4 4 2. + 0 + 1.7992249922826886e-003 + 0.1603599041700363 + -0.3085910975933075 + <_> + + <_> + + + + <_>11 14 8 6 -1. + <_>11 16 8 2 3. + 0 + -0.0100923096761107 + -0.3950586915016174 + 0.1151477992534638 + <_> + + <_> + + + + <_>2 7 13 2 -1. + <_>2 8 13 1 2. + 0 + 2.5171819142997265e-003 + -0.3004311025142670 + 0.1825605034828186 + <_> + + <_> + + + + <_>3 14 14 4 -1. + <_>10 14 7 2 2. + <_>3 16 7 2 2. + 0 + -0.0170892402529716 + -0.5217359066009522 + 0.0974572673439980 + <_> + + <_> + + + + <_>0 11 6 9 -1. + <_>3 11 3 9 2. + 0 + -0.0558562688529491 + 0.5354002118110657 + -0.0892215520143509 + <_> + + <_> + + + + <_>5 9 13 2 -1. + <_>5 10 13 1 2. + 0 + -2.3930610623210669e-003 + -0.4701243937015533 + 0.0861414074897766 + <_> + + <_> + + + + <_>3 0 7 9 -1. + <_>3 3 7 3 3. + 0 + 3.6918919067829847e-003 + -0.2775559127330780 + 0.1518609970808029 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 2.1945969201624393e-003 + -0.1686706990003586 + 0.1195252016186714 + <_> + + <_> + + + + <_>5 4 9 5 -1. + <_>8 4 3 5 3. + 0 + 2.9675459954887629e-003 + -0.3894068002700806 + 0.1038891002535820 + <_> + + <_> + + + + <_>11 10 7 4 -1. + <_>11 12 7 2 2. + 0 + 1.9976729527115822e-003 + 0.0911413431167603 + -0.4105004966259003 + <_> + + <_> + + + + <_>2 5 8 15 -1. + <_>2 10 8 5 3. + 0 + -0.0203696992248297 + -0.5996876955032349 + 0.0693018063902855 + <_> + + <_> + + + + <_>10 11 5 6 -1. + <_>10 14 5 3 2. + 0 + 2.3318571038544178e-003 + 0.0618925504386425 + -0.3288680016994476 + <_> + + <_> + + + + <_>5 11 5 6 -1. + <_>5 14 5 3 2. + 0 + -0.0428635887801647 + -0.7384496927261353 + 0.0570716597139835 + <_> + + <_> + + + + <_>4 8 13 2 -1. + <_>4 9 13 1 2. + 0 + 1.1471749749034643e-003 + -0.5137962102890015 + 0.0711964964866638 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0137356696650386 + -0.5378550887107849 + 0.0655420422554016 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0471655912697315 + 0.0453893616795540 + -0.6894479990005493 + <_> + + <_> + + + + <_>0 1 14 12 -1. + <_>0 1 7 6 2. + <_>7 7 7 6 2. + 0 + -0.0112048797309399 + 0.1693263947963715 + -0.2306171953678131 + <_> + + <_> + + + + <_>10 10 10 9 -1. + <_>10 13 10 3 3. + 0 + -0.1547842025756836 + -0.7770537137985230 + 0.0121424701064825 + <_> + + <_> + + + + <_>0 10 10 9 -1. + <_>0 13 10 3 3. + 0 + 5.8086342178285122e-003 + 0.1131810024380684 + -0.3320631980895996 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0285295695066452 + -0.5674728155136108 + 0.0487345606088638 + <_> + + <_> + + + + <_>8 5 4 10 -1. + <_>10 5 2 10 2. + 0 + -0.0387589484453201 + 0.5942310094833374 + -0.0751393362879753 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0310378093272448 + 0.0519735403358936 + -0.5855265259742737 + <_> + + <_> + + + + <_>7 1 4 14 -1. + <_>9 1 2 14 2. + 0 + 7.4786080404010136e-006 + -0.2762320041656494 + 0.1408849060535431 + <_> + + <_> + + + + <_>13 12 7 6 -1. + <_>13 14 7 2 3. + 0 + 0.0310002602636814 + 0.0313317291438580 + -0.5686017274856567 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -0.0498606599867344 + -0.8292462229728699 + 0.0388015806674957 + <_> + + <_> + + + + <_>3 8 15 3 -1. + <_>8 8 5 3 3. + 0 + -0.0423232801258564 + -0.4306210875511169 + 0.0165794808417559 + <_> + + <_> + + + + <_>6 15 8 4 -1. + <_>6 17 8 2 2. + 0 + 9.1987219639122486e-004 + -0.2115444988012314 + 0.1551752984523773 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.2055986970663071 + -0.0624031797051430 + 0.3222961127758026 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.2911841869354248 + 0.0392284691333771 + -0.9412822127342224 + <_> + + <_> + + + + <_>15 0 4 11 -1. + <_>15 0 2 11 2. + 0 + 7.8337509185075760e-003 + -0.1480659991502762 + 0.1784920990467072 + <_> + + <_> + + + + <_>7 0 4 18 -1. + <_>7 0 2 9 2. + <_>9 9 2 9 2. + 0 + 0.0113933198153973 + 0.0779877230525017 + -0.4242425858974457 + <_> + + <_> + + + + <_>12 2 8 18 -1. + <_>16 2 4 9 2. + <_>12 11 4 9 2. + 0 + -0.0918070226907730 + 0.3368948101997376 + -0.0561741292476654 + <_> + + <_> + + + + <_>4 2 12 18 -1. + <_>4 2 6 9 2. + <_>10 11 6 9 2. + 0 + -0.0160382501780987 + -0.2495401054620743 + 0.1457086950540543 + <_> + + <_> + + + + <_>4 6 12 6 -1. + <_>4 9 12 3 2. + 0 + 0.0548302903771400 + -0.1549600064754486 + 0.2032960057258606 + <_> + + <_> + + + + <_>0 9 18 4 -1. + <_>0 9 9 2 2. + <_>9 11 9 2 2. + 0 + 0.0244497004896402 + 0.0609743781387806 + -0.6307234168052673 + <_> + + <_> + + + + <_>2 0 18 4 -1. + <_>11 0 9 2 2. + <_>2 2 9 2 2. + 0 + 0.0292606707662344 + 0.0468336082994938 + -0.3798538148403168 + <_> + + <_> + + + + <_>1 0 4 11 -1. + <_>3 0 2 11 2. + 0 + 3.9965552277863026e-003 + -0.1692730039358139 + 0.1910032033920288 + <_> + + <_> + + + + <_>16 0 4 15 -1. + <_>16 0 2 15 2. + 0 + -0.0699388533830643 + 0.5465558767318726 + -0.0549657493829727 + -1.8755869865417480 + 4 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 6 11 -1. + <_>3 2 3 11 2. + 0 + 0.0458356216549873 + -0.4998284876346588 + 0.4096108078956604 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0263631008565426 + -0.3919320106506348 + 0.5156775712966919 + <_> + + <_> + + + + <_>2 17 15 3 -1. + <_>7 17 5 3 3. + 0 + 0.0151898302137852 + -0.5221636295318604 + 0.3136821985244751 + <_> + + <_> + + + + <_>5 4 10 4 -1. + <_>5 6 10 2 2. + 0 + -0.0208052806556225 + 0.3761447966098785 + -0.4737553894519806 + <_> + + <_> + + + + <_>3 9 14 8 -1. + <_>3 13 14 4 2. + 0 + -7.4902721680700779e-003 + 0.1628348976373673 + -0.7038447260856628 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.2771936953067780 + -0.1640412062406540 + 0.3348158001899719 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.0641884431242943 + -0.8017662167549133 + 0.1276382952928543 + <_> + + <_> + + + + <_>3 7 14 6 -1. + <_>3 9 14 2 3. + 0 + 0.0406681708991528 + -0.3338693082332611 + 0.2845618128776550 + <_> + + <_> + + + + <_>3 10 6 8 -1. + <_>5 10 2 8 3. + 0 + 7.4888020753860474e-003 + -0.3718892037868500 + 0.2593226134777069 + <_> + + <_> + + + + <_>0 5 20 8 -1. + <_>10 5 10 4 2. + <_>0 9 10 4 2. + 0 + 0.0649426728487015 + 0.1037290990352631 + -0.7167106866836548 + <_> + + <_> + + + + <_>0 5 16 8 -1. + <_>0 9 16 4 2. + 0 + -2.1149769891053438e-003 + -0.7568392753601074 + 0.0790195912122726 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + -4.8293141298927367e-004 + -0.4985207915306091 + 0.0811113268136978 + <_> + + <_> + + + + <_>2 6 15 5 -1. + <_>7 6 5 5 3. + 0 + 0.1399645954370499 + 0.0874975994229317 + -0.7638937234878540 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 0.0522119887173176 + 0.0316404812037945 + -0.5328137278556824 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 3.0680459458380938e-003 + -0.6245852708816528 + 0.1386954039335251 + <_> + + <_> + + + + <_>6 8 8 12 -1. + <_>10 8 4 6 2. + <_>6 14 4 6 2. + 0 + 0.0504788607358933 + 0.0790634974837303 + -0.7401704192161560 + <_> + + <_> + + + + <_>1 1 7 4 -1. + <_>1 3 7 2 2. + 0 + -8.5122063755989075e-003 + -0.4997166097164154 + 0.1113225966691971 + <_> + + <_> + + + + <_>0 0 20 8 -1. + <_>10 0 10 4 2. + <_>0 4 10 4 2. + 0 + 0.0700918063521385 + 0.0970819070935249 + -0.6187918782234192 + <_> + + <_> + + + + <_>5 3 5 9 -1. + <_>5 6 5 3 3. + 0 + -2.7261190116405487e-003 + 0.0975466296076775 + -0.5776004195213318 + <_> + + <_> + + + + <_>11 3 8 4 -1. + <_>11 3 4 4 2. + 0 + 0.0106765599921346 + -0.2905812859535217 + 0.1842612028121948 + <_> + + <_> + + + + <_>1 10 7 4 -1. + <_>1 12 7 2 2. + 0 + 6.3848652644082904e-004 + 0.1386975049972534 + -0.4254654049873352 + <_> + + <_> + + + + <_>5 10 12 6 -1. + <_>11 10 6 3 2. + <_>5 13 6 3 2. + 0 + -0.0479572601616383 + -0.7324913740158081 + 0.0411881096661091 + <_> + + <_> + + + + <_>1 3 8 4 -1. + <_>5 3 4 4 2. + 0 + 0.0171400494873524 + -0.3197345137596130 + 0.1684008985757828 + <_> + + <_> + + + + <_>6 0 9 5 -1. + <_>9 0 3 5 3. + 0 + 0.0785445421934128 + 0.0500532314181328 + -0.7141004800796509 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0113428495824337 + -0.3881097137928009 + 0.1297640949487686 + -1.9646480083465576 + 5 + -1 + <_> + + + <_> + + <_> + + + + <_>9 4 2 14 -1. + <_>9 11 2 7 2. + 0 + -8.6751781054772437e-005 + 0.2517991065979004 + -0.6772311925888062 + <_> + + <_> + + + + <_>8 1 12 19 -1. + <_>8 1 6 19 2. + 0 + 0.2055017948150635 + 0.0202171504497528 + -0.3361819982528687 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1389326006174088 + 0.1067826971411705 + -0.8671011924743652 + <_> + + <_> + + + + <_>7 6 6 10 -1. + <_>9 6 2 10 3. + 0 + 2.6432450395077467e-003 + -0.4105708897113800 + 0.2560392022132874 + <_> + + <_> + + + + <_>0 3 10 6 -1. + <_>0 3 5 3 2. + <_>5 6 5 3 2. + 0 + -1.6145260306075215e-003 + 0.1744816005229950 + -0.5029013156890869 + <_> + + <_> + + + + <_>6 5 8 8 -1. + <_>6 9 8 4 2. + 0 + -4.6492749825119972e-003 + -0.8396093249320984 + 0.1040996983647347 + <_> + + <_> + + + + <_>7 13 5 6 -1. + <_>7 16 5 3 2. + 0 + -5.5983918718993664e-003 + -0.5267335772514343 + 0.1211448982357979 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 14 4 4 2. + 0 + 2.1482799202203751e-003 + 0.0868319272994995 + -0.5238474011421204 + <_> + + <_> + + + + <_>4 6 8 8 -1. + <_>4 6 4 4 2. + <_>8 10 4 4 2. + 0 + -2.2942349314689636e-003 + 0.1566673070192337 + -0.3938758075237274 + <_> + + <_> + + + + <_>2 5 16 6 -1. + <_>2 7 16 2 3. + 0 + -1.0809659725055099e-003 + 0.0947775468230248 + -0.5796759724617004 + <_> + + <_> + + + + <_>5 7 10 12 -1. + <_>5 7 5 6 2. + <_>10 13 5 6 2. + 0 + -0.0187398791313171 + -0.4378077089786530 + 0.1275431960821152 + <_> + + <_> + + + + <_>6 11 13 3 -1. + <_>6 12 13 1 3. + 0 + -2.0956669468432665e-003 + 0.2127586007118225 + -0.1764553934335709 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0613701194524765 + -0.6700798869132996 + 0.0852911770343781 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 14 7 3 2. + 0 + -0.0450749695301056 + -0.4761415123939514 + 0.0383843891322613 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 4.5961341820657253e-003 + 0.0907766968011856 + -0.5364217758178711 + <_> + + <_> + + + + <_>3 10 16 6 -1. + <_>11 10 8 3 2. + <_>3 13 8 3 2. + 0 + -0.0562051795423031 + -0.4412812888622284 + 0.0263406392186880 + <_> + + <_> + + + + <_>0 8 6 12 -1. + <_>3 8 3 12 2. + 0 + -0.0170700307935476 + 0.3196252882480621 + -0.1569907963275909 + <_> + + <_> + + + + <_>0 5 20 15 -1. + <_>0 10 20 5 3. + 0 + 0.0137785403057933 + -0.4146823883056641 + 0.1083204001188278 + <_> + + <_> + + + + <_>1 11 16 4 -1. + <_>1 11 8 2 2. + <_>9 13 8 2 2. + 0 + 5.6932470761239529e-003 + 0.1097327023744583 + -0.4142096936702728 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 1.1573060182854533e-003 + -0.4699645936489105 + 0.1408822983503342 + <_> + + <_> + + + + <_>3 6 5 9 -1. + <_>3 9 5 3 3. + 0 + -4.3259391532046720e-005 + -0.5911747813224793 + 0.0722088366746902 + <_> + + <_> + + + + <_>10 10 6 5 -1. + <_>10 10 3 5 2. + 0 + -1.4467669825535268e-004 + 0.1434050053358078 + -0.2080902010202408 + <_> + + <_> + + + + <_>4 10 6 5 -1. + <_>7 10 3 5 2. + 0 + -0.0306675396859646 + -0.6418172717094421 + 0.0763162225484848 + <_> + + <_> + + + + <_>13 4 6 9 -1. + <_>15 4 2 9 3. + 0 + 6.4002368599176407e-003 + -0.1542620062828064 + 0.2061882019042969 + <_> + + <_> + + + + <_>1 4 6 7 -1. + <_>3 4 2 7 3. + 0 + 2.7318780776113272e-003 + -0.1842913031578064 + 0.2204626947641373 + <_> + + <_> + + + + <_>16 0 4 8 -1. + <_>16 0 2 8 2. + 0 + -0.0417598597705364 + 0.5128465890884399 + -0.0430972203612328 + <_> + + <_> + + + + <_>2 5 12 12 -1. + <_>2 11 12 6 2. + 0 + -0.0301744192838669 + -0.3613480925559998 + 0.1163339018821716 + <_> + + <_> + + + + <_>3 1 14 6 -1. + <_>3 3 14 2 3. + 0 + 6.8081771023571491e-003 + -0.2595328092575073 + 0.1492739021778107 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 0.0434303693473339 + 0.0686012431979179 + -0.5822119116783142 + <_> + + <_> + + + + <_>10 2 10 18 -1. + <_>10 2 5 18 2. + 0 + 0.0211213007569313 + -0.0853729173541069 + 0.0804985836148262 + <_> + + <_> + + + + <_>0 3 10 17 -1. + <_>5 3 5 17 2. + 0 + 0.0998402833938599 + 0.0532925203442574 + -0.7181965708732605 + <_> + + <_> + + + + <_>16 0 4 8 -1. + <_>16 0 2 8 2. + 0 + 5.6953770108520985e-003 + -0.0889761075377464 + 0.1348394006490707 + <_> + + <_> + + + + <_>0 0 4 8 -1. + <_>2 0 2 8 2. + 0 + -0.0599845685064793 + 0.6832429170608521 + -0.0519162714481354 + <_> + + <_> + + + + <_>10 10 10 6 -1. + <_>10 12 10 2 3. + 0 + 5.9353262186050415e-003 + 0.1030519008636475 + -0.2536143958568573 + <_> + + <_> + + + + <_>5 11 5 9 -1. + <_>5 14 5 3 3. + 0 + -7.4867930379696190e-005 + 0.1334072947502136 + -0.2932355999946594 + <_> + + <_> + + + + <_>5 18 13 2 -1. + <_>5 19 13 1 2. + 0 + -2.5437519070692360e-004 + 0.1533578038215637 + -0.1938757002353668 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 7.7576987678185105e-004 + -0.3115557134151459 + 0.1063250973820686 + <_> + + <_> + + + + <_>5 6 14 2 -1. + <_>5 6 7 2 2. + 0 + 0.0544785000383854 + 0.0262774806469679 + -0.6668741106987000 + <_> + + <_> + + + + <_>1 6 14 2 -1. + <_>8 6 7 2 2. + 0 + 0.0126928500831127 + 0.0936130434274673 + -0.3915219008922577 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>10 10 4 4 2. + <_>6 14 4 4 2. + 0 + -0.0307669602334499 + -0.5923808813095093 + 0.0483149997889996 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>10 5 2 7 2. + 0 + -0.0193661507219076 + 0.4366160929203033 + -0.0886729434132576 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -2.8705620206892490e-003 + 0.1524478048086166 + -0.1386117041110992 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0400036983191967 + 0.0587480515241623 + -0.6911970973014832 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -0.0811304673552513 + -0.7868431806564331 + 2.0421498920768499e-003 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -2.1017501130700111e-003 + 0.1910044997930527 + -0.1965968012809753 + <_> + + <_> + + + + <_>9 6 4 14 -1. + <_>9 13 4 7 2. + 0 + 8.6481617763638496e-003 + 0.0886892899870873 + -0.3741415143013001 + <_> + + <_> + + + + <_>3 7 12 5 -1. + <_>7 7 4 5 3. + 0 + -0.0524290204048157 + -0.7261599898338318 + 0.0394656881690025 + <_> + + <_> + + + + <_>3 13 14 3 -1. + <_>3 14 14 1 3. + 0 + 3.4464800264686346e-003 + -0.1164089962840080 + 0.2738626897335053 + <_> + + <_> + + + + <_>1 0 16 4 -1. + <_>1 2 16 2 2. + 0 + -7.0581152103841305e-003 + -0.3628394007682800 + 0.0920236781239510 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + -0.0574122592806816 + -0.8883938193321228 + 0.0266477596014738 + <_> + + <_> + + + + <_>0 1 6 8 -1. + <_>3 1 3 8 2. + 0 + 3.3479030244052410e-003 + -0.1488405019044876 + 0.1836643069982529 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>14 0 3 9 2. + 0 + -0.0539584197103977 + 0.3809813857078552 + -0.0440465807914734 + <_> + + <_> + + + + <_>0 0 6 9 -1. + <_>3 0 3 9 2. + 0 + -0.0257196892052889 + 0.3257082104682922 + -0.1007822006940842 + -2.1222629547119141 + 6 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.1244122013449669 + -0.3857372999191284 + 0.3927366137504578 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 0.0378028787672520 + -0.4702867865562439 + 0.3578683137893677 + <_> + + <_> + + + + <_>1 9 9 8 -1. + <_>4 9 3 8 3. + 0 + 0.0304414294660091 + -0.3946039974689484 + 0.3251850008964539 + <_> + + <_> + + + + <_>2 0 16 2 -1. + <_>2 1 16 1 2. + 0 + 3.9223438943736255e-004 + -0.4516651034355164 + 0.1967238038778305 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0390777103602886 + -0.2107332944869995 + 0.4386476874351502 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + -8.9118082541972399e-005 + 0.1519695967435837 + -0.5956351757049561 + <_> + + <_> + + + + <_>1 16 10 3 -1. + <_>6 16 5 3 2. + 0 + 8.8415127247571945e-003 + -0.4929248988628388 + 0.1740657985210419 + <_> + + <_> + + + + <_>9 5 3 12 -1. + <_>9 11 3 6 2. + 0 + 0.0136660598218441 + 0.0928617492318153 + -0.5518230795860291 + <_> + + <_> + + + + <_>3 4 14 12 -1. + <_>3 4 7 6 2. + <_>10 10 7 6 2. + 0 + -0.0612033009529114 + -0.6798529028892517 + 0.1004908010363579 + <_> + + <_> + + + + <_>6 6 9 8 -1. + <_>6 10 9 4 2. + 0 + 5.7719892356544733e-004 + -0.5830199718475342 + 0.1108962967991829 + <_> + + <_> + + + + <_>0 7 7 4 -1. + <_>0 9 7 2 2. + 0 + 2.8370460495352745e-004 + -0.5979334115982056 + 0.0938983783125877 + <_> + + <_> + + + + <_>16 3 4 8 -1. + <_>16 3 2 8 2. + 0 + 0.0176659803837538 + -0.2201547026634216 + 0.3453308939933777 + <_> + + <_> + + + + <_>0 3 6 10 -1. + <_>3 3 3 10 2. + 0 + 0.0256973300129175 + -0.3619570136070252 + 0.1687735021114349 + <_> + + <_> + + + + <_>5 4 10 6 -1. + <_>5 6 10 2 3. + 0 + -0.0403166897594929 + 0.2296440005302429 + -0.2930144071578980 + <_> + + <_> + + + + <_>4 5 12 4 -1. + <_>8 5 4 4 3. + 0 + 4.6522719785571098e-003 + -0.5899596810340881 + 0.1046691015362740 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -0.0134060001000762 + -0.3957209885120392 + 0.0835281163454056 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0361272804439068 + 0.0941658020019531 + -0.5409718155860901 + <_> + + <_> + + + + <_>11 2 6 10 -1. + <_>14 2 3 5 2. + <_>11 7 3 5 2. + 0 + 2.2792080417275429e-003 + 0.1281906962394714 + -0.3651453852653503 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 1.4454070478677750e-003 + -0.2328159958124161 + 0.1982991993427277 + <_> + + <_> + + + + <_>3 14 15 6 -1. + <_>3 17 15 3 2. + 0 + 0.0574825294315815 + 0.0750423967838287 + -0.5770497918128967 + <_> + + <_> + + + + <_>0 11 7 4 -1. + <_>0 13 7 2 2. + 0 + 3.3360819797962904e-003 + 0.0880120173096657 + -0.4677925109863281 + <_> + + <_> + + + + <_>5 9 12 6 -1. + <_>11 9 6 3 2. + <_>5 12 6 3 2. + 0 + 0.0372257493436337 + 0.0321551114320755 + -0.6634662151336670 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 0.0166127607226372 + 0.0916898399591446 + -0.5212817192077637 + <_> + + <_> + + + + <_>1 0 19 9 -1. + <_>1 3 19 3 3. + 0 + 0.0205432493239641 + -0.2875337898731232 + 0.1426130980253220 + <_> + + <_> + + + + <_>1 11 16 3 -1. + <_>1 12 16 1 3. + 0 + -1.5633470320608467e-004 + 0.2024673074483872 + -0.2242446988821030 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.1218881011009216 + -0.1646130979061127 + 0.1758392006158829 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.0464134402573109 + -0.6897801756858826 + 0.0643499270081520 + <_> + + <_> + + + + <_>3 6 15 5 -1. + <_>8 6 5 5 3. + 0 + 0.1494643986225128 + 0.0398058407008648 + -0.7017732858657837 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 0.0143468696624041 + 0.0926287770271301 + -0.4631417095661163 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0361587181687355 + 0.0644129365682602 + -0.6527721285820007 + <_> + + <_> + + + + <_>2 11 7 6 -1. + <_>2 14 7 3 2. + 0 + -0.0550982281565666 + -0.6102198958396912 + 0.0660342872142792 + <_> + + <_> + + + + <_>12 11 5 6 -1. + <_>12 14 5 3 2. + 0 + -3.2978600356727839e-003 + 0.0865798667073250 + -0.2184482067823410 + <_> + + <_> + + + + <_>4 5 3 15 -1. + <_>4 10 3 5 3. + 0 + 4.1257790289819241e-003 + -0.4498029947280884 + 0.0932512506842613 + <_> + + <_> + + + + <_>11 2 6 10 -1. + <_>14 2 3 5 2. + <_>11 7 3 5 2. + 0 + 0.0334652699530125 + 0.0145244998857379 + -0.4020000100135803 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0225846301764250 + -0.6006761789321899 + 0.0644167214632034 + <_> + + <_> + + + + <_>7 10 10 9 -1. + <_>7 13 10 3 3. + 0 + -7.1505038067698479e-003 + 0.0671394690871239 + -0.1294730007648468 + <_> + + <_> + + + + <_>2 6 16 10 -1. + <_>2 6 8 5 2. + <_>10 11 8 5 2. + 0 + -0.0514400415122509 + -0.4846647977828980 + 0.0820937529206276 + <_> + + <_> + + + + <_>0 9 20 4 -1. + <_>10 9 10 2 2. + <_>0 11 10 2 2. + 0 + -0.0191009491682053 + -0.3539437949657440 + 0.1085169017314911 + <_> + + <_> + + + + <_>4 6 4 7 -1. + <_>6 6 2 7 2. + 0 + 6.9468282163143158e-003 + 0.1540756970643997 + -0.2304019033908844 + <_> + + <_> + + + + <_>18 0 2 20 -1. + <_>18 0 1 20 2. + 0 + -0.0238866005092859 + 0.4900797903537750 + -0.0596504285931587 + <_> + + <_> + + + + <_>3 1 13 2 -1. + <_>3 2 13 1 2. + 0 + -1.3964619720354676e-003 + -0.3370470106601715 + 0.1156945973634720 + <_> + + <_> + + + + <_>17 0 3 18 -1. + <_>18 0 1 18 3. + 0 + 0.0263206008821726 + -0.0391326807439327 + 0.3761535882949829 + <_> + + <_> + + + + <_>1 7 15 5 -1. + <_>6 7 5 5 3. + 0 + 5.0336541607975960e-003 + -0.3545702099800110 + 0.1078672036528587 + <_> + + <_> + + + + <_>9 3 2 15 -1. + <_>9 3 1 15 2. + 0 + -0.0115239601582289 + 0.3514864146709442 + -0.1137370988726616 + -2.1038460731506348 + 7 + -1 + <_> + + + <_> + + <_> + + + + <_>5 3 10 6 -1. + <_>5 6 10 3 2. + 0 + -5.6698019616305828e-003 + 0.2529909014701843 + -0.5537719726562500 + <_> + + <_> + + + + <_>10 9 4 8 -1. + <_>10 13 4 4 2. + 0 + 1.2186550302430987e-003 + 0.0917235389351845 + -0.6566165089607239 + <_> + + <_> + + + + <_>7 8 4 12 -1. + <_>7 12 4 4 3. + 0 + 3.1903409399092197e-003 + 0.1211680993437767 + -0.5440536141395569 + <_> + + <_> + + + + <_>5 5 15 10 -1. + <_>5 10 15 5 2. + 0 + -0.0121176801621914 + -0.6821125149726868 + 0.1117822006344795 + <_> + + <_> + + + + <_>4 7 7 4 -1. + <_>4 9 7 2 2. + 0 + 2.2634069900959730e-003 + -0.5631396174430847 + 0.0996292605996132 + <_> + + <_> + + + + <_>4 5 12 4 -1. + <_>8 5 4 4 3. + 0 + 2.2871519904583693e-003 + -0.5022724270820618 + 0.1128802970051765 + <_> + + <_> + + + + <_>1 1 7 4 -1. + <_>1 3 7 2 2. + 0 + -7.4018500745296478e-003 + -0.5062230825424194 + 0.1032527014613152 + <_> + + <_> + + + + <_>11 9 4 8 -1. + <_>11 13 4 4 2. + 0 + 6.5725757740437984e-003 + 0.0316036716103554 + -0.4587934911251068 + <_> + + <_> + + + + <_>4 6 12 12 -1. + <_>4 6 6 6 2. + <_>10 12 6 6 2. + 0 + -0.0172370690852404 + -0.3655610084533691 + 0.1412204951047897 + <_> + + <_> + + + + <_>11 1 6 10 -1. + <_>14 1 3 5 2. + <_>11 6 3 5 2. + 0 + -1.7646619817242026e-003 + 0.1896221041679382 + -0.3434976041316986 + <_> + + <_> + + + + <_>1 5 16 12 -1. + <_>1 5 8 6 2. + <_>9 11 8 6 2. + 0 + 0.0260859504342079 + 0.0873692333698273 + -0.5333216190338135 + <_> + + <_> + + + + <_>4 7 12 6 -1. + <_>4 9 12 2 3. + 0 + 8.5357967764139175e-003 + -0.3736073076725006 + 0.1450852006673813 + <_> + + <_> + + + + <_>6 0 6 10 -1. + <_>6 0 3 5 2. + <_>9 5 3 5 2. + 0 + -6.2934341840445995e-003 + -0.4577507972717285 + 0.1001626998186112 + <_> + + <_> + + + + <_>7 1 12 8 -1. + <_>13 1 6 4 2. + <_>7 5 6 4 2. + 0 + 0.0970815494656563 + 3.3761640079319477e-003 + -0.8467985987663269 + <_> + + <_> + + + + <_>0 1 4 18 -1. + <_>2 1 2 18 2. + 0 + -0.0994557216763496 + 0.7789235711097717 + -0.0544560886919498 + <_> + + <_> + + + + <_>15 9 5 9 -1. + <_>15 12 5 3 3. + 0 + 0.0391285493969917 + 0.0394799299538136 + -0.4662021100521088 + <_> + + <_> + + + + <_>0 12 20 6 -1. + <_>0 12 10 3 2. + <_>10 15 10 3 2. + 0 + 0.0684237629175186 + 0.0481634102761745 + -0.8191074132919312 + <_> + + <_> + + + + <_>10 4 4 15 -1. + <_>10 9 4 5 3. + 0 + -0.0173045508563519 + -0.4600183069705963 + 0.0217813402414322 + <_> + + <_> + + + + <_>1 1 12 8 -1. + <_>1 1 6 4 2. + <_>7 5 6 4 2. + 0 + 4.5203989429865032e-005 + 0.1559097021818161 + -0.2573460042476654 + <_> + + <_> + + + + <_>11 11 5 6 -1. + <_>11 14 5 3 2. + 0 + -0.0537207499146461 + -0.7398458719253540 + 0.0236581396311522 + <_> + + <_> + + + + <_>4 11 5 6 -1. + <_>4 14 5 3 2. + 0 + -2.1576840663328767e-004 + 0.1180372014641762 + -0.3538045883178711 + <_> + + <_> + + + + <_>4 14 13 6 -1. + <_>4 16 13 2 3. + 0 + 1.2613219441846013e-003 + -0.1831308007240295 + 0.1630696058273315 + <_> + + <_> + + + + <_>0 0 6 9 -1. + <_>2 0 2 9 3. + 0 + 0.0227140299975872 + -0.0956473425030708 + 0.3806278109550476 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0209583304822445 + 0.0611855983734131 + -0.5264493823051453 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0154584497213364 + 0.0644667893648148 + -0.4744128882884979 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>5 7 10 2 3. + 0 + -5.0828810781240463e-003 + 0.1001883000135422 + -0.3639725148677826 + <_> + + <_> + + + + <_>2 0 16 2 -1. + <_>2 1 16 1 2. + 0 + 1.1842510430142283e-003 + -0.2060351967811585 + 0.1712958961725235 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.0501877702772617 + -0.0709249675273895 + 0.1043531969189644 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1753520071506500 + 0.0377662107348442 + -0.8080273866653442 + <_> + + <_> + + + + <_>1 2 18 10 -1. + <_>10 2 9 5 2. + <_>1 7 9 5 2. + 0 + -0.0684255585074425 + -0.5021489858627319 + 0.0546711198985577 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 2.2496099118143320e-003 + -0.2801350951194763 + 0.1095009967684746 + <_> + + <_> + + + + <_>5 4 10 14 -1. + <_>10 4 5 7 2. + <_>5 11 5 7 2. + 0 + 0.0853556320071220 + 0.0333769805729389 + -0.7367684245109558 + <_> + + <_> + + + + <_>0 11 5 6 -1. + <_>0 14 5 3 2. + 0 + -0.0288259796798229 + -0.4852809906005859 + 0.0495960786938667 + <_> + + <_> + + + + <_>7 11 13 3 -1. + <_>7 12 13 1 3. + 0 + -1.3562700478360057e-003 + 0.1849309056997299 + -0.1654148995876312 + <_> + + <_> + + + + <_>0 11 8 4 -1. + <_>0 13 8 2 2. + 0 + 1.5731659950688481e-003 + 0.0904318168759346 + -0.3019388020038605 + <_> + + <_> + + + + <_>5 6 14 8 -1. + <_>5 10 14 4 2. + 0 + -5.2912188693881035e-003 + -0.4396361112594605 + 0.0468806996941566 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0422001406550407 + -0.0753480121493340 + 0.3771280944347382 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0310307703912258 + 0.0660533681511879 + -0.4737842082977295 + <_> + + <_> + + + + <_>1 13 18 3 -1. + <_>1 14 18 1 3. + 0 + 8.0451928079128265e-003 + -0.0773269832134247 + 0.3489888906478882 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>10 15 7 2 2. + <_>3 17 7 2 2. + 0 + 0.0237911809235811 + 0.0486299283802509 + -0.5815547704696655 + <_> + + <_> + + + + <_>0 2 2 13 -1. + <_>1 2 1 13 2. + 0 + -0.0268846806138754 + 0.7385225892066956 + -0.0400251187384129 + <_> + + <_> + + + + <_>4 9 12 8 -1. + <_>8 9 4 8 3. + 0 + -1.7013859469443560e-003 + 0.1411640942096710 + -0.1830507963895798 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + -0.0322589799761772 + -0.6459869742393494 + 0.0417741797864437 + <_> + + <_> + + + + <_>16 0 4 20 -1. + <_>16 0 2 20 2. + 0 + -0.0917195528745651 + 0.6365169286727905 + -0.0444062799215317 + <_> + + <_> + + + + <_>0 0 4 20 -1. + <_>2 0 2 20 2. + 0 + 0.0112532200291753 + -0.1039896979928017 + 0.2438649982213974 + <_> + + <_> + + + + <_>16 1 4 19 -1. + <_>16 1 2 19 2. + 0 + 9.1702006757259369e-003 + -0.1014230027794838 + 0.1732572019100189 + <_> + + <_> + + + + <_>1 0 16 4 -1. + <_>1 0 8 2 2. + <_>9 2 8 2 2. + 0 + -0.0375844314694405 + -0.6599904894828796 + 0.0353572592139244 + <_> + + <_> + + + + <_>12 6 4 14 -1. + <_>14 6 2 7 2. + <_>12 13 2 7 2. + 0 + 1.4904039562679827e-004 + -0.1250495016574860 + 0.1016137972474098 + <_> + + <_> + + + + <_>2 8 15 3 -1. + <_>2 9 15 1 3. + 0 + 5.6240631965920329e-004 + -0.2151121944189072 + 0.1053744032979012 + <_> + + <_> + + + + <_>7 6 8 10 -1. + <_>11 6 4 5 2. + <_>7 11 4 5 2. + 0 + -0.0173142701387405 + -0.1679829061031342 + 0.0612074993550777 + <_> + + <_> + + + + <_>0 0 4 20 -1. + <_>2 0 2 20 2. + 0 + -0.0154298702254891 + 0.2567448019981384 + -0.0971934869885445 + <_> + + <_> + + + + <_>5 5 10 3 -1. + <_>5 5 5 3 2. + 0 + -0.0156120797619224 + -0.3579750061035156 + 0.0692600682377815 + <_> + + <_> + + + + <_>1 17 14 3 -1. + <_>1 18 14 1 3. + 0 + 7.4424187187105417e-004 + -0.1574046015739441 + 0.1492107063531876 + <_> + + <_> + + + + <_>15 6 5 9 -1. + <_>15 9 5 3 3. + 0 + 0.0790083408355713 + 0.0359247289597988 + -0.6490759253501892 + <_> + + <_> + + + + <_>7 6 4 10 -1. + <_>9 6 2 10 2. + 0 + -3.3477540127933025e-003 + -0.2579470872879028 + 0.0816268622875214 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + 0.0355894193053246 + -0.0468700490891933 + 0.5394526720046997 + <_> + + <_> + + + + <_>5 4 8 14 -1. + <_>5 4 4 7 2. + <_>9 11 4 7 2. + 0 + 7.6168961822986603e-004 + 0.0804098695516586 + -0.2804597020149231 + <_> + + <_> + + + + <_>4 6 12 8 -1. + <_>10 6 6 4 2. + <_>4 10 6 4 2. + 0 + 9.6126887947320938e-003 + 0.0927157774567604 + -0.2275521010160446 + <_> + + <_> + + + + <_>3 2 13 6 -1. + <_>3 4 13 2 3. + 0 + 0.0345827899873257 + -0.0954955071210861 + 0.2811649143695831 + <_> + + <_> + + + + <_>10 4 7 10 -1. + <_>10 9 7 5 2. + 0 + -8.2031842321157455e-003 + -0.3316228985786438 + 0.0406297110021114 + <_> + + <_> + + + + <_>3 4 14 10 -1. + <_>3 4 7 5 2. + <_>10 9 7 5 2. + 0 + 0.0255401097238064 + 0.0704589337110519 + -0.3279935121536255 + <_> + + <_> + + + + <_>16 4 3 13 -1. + <_>17 4 1 13 3. + 0 + -3.1389920040965080e-003 + 0.1252934932708740 + -0.0607668012380600 + <_> + + <_> + + + + <_>1 4 3 13 -1. + <_>2 4 1 13 3. + 0 + 4.5892409980297089e-003 + -0.0953354462981224 + 0.2473867982625961 + <_> + + <_> + + + + <_>11 10 8 6 -1. + <_>11 12 8 2 3. + 0 + -0.0232600308954716 + -0.2382315993309021 + 0.0335029698908329 + <_> + + <_> + + + + <_>0 10 9 4 -1. + <_>0 12 9 2 2. + 0 + 1.7964519793167710e-003 + 0.0898438617587090 + -0.2804915904998779 + <_> + + <_> + + + + <_>7 8 12 8 -1. + <_>13 8 6 4 2. + <_>7 12 6 4 2. + 0 + -0.1095291003584862 + -0.4620654881000519 + 7.4333418160676956e-003 + <_> + + <_> + + + + <_>1 8 12 8 -1. + <_>1 8 6 4 2. + <_>7 12 6 4 2. + 0 + 6.8442770279943943e-003 + 0.0735201090574265 + -0.3619070053100586 + <_> + + <_> + + + + <_>1 0 18 10 -1. + <_>7 0 6 10 3. + 0 + -0.0737198516726494 + 0.4113180041313171 + -0.0682930573821068 + <_> + + <_> + + + + <_>0 2 12 12 -1. + <_>4 2 4 12 3. + 0 + 9.4485012814402580e-003 + -0.1213229969143868 + 0.2149195969104767 + <_> + + <_> + + + + <_>8 11 12 9 -1. + <_>12 11 4 9 3. + 0 + -0.0746860578656197 + 0.2429201006889343 + -0.0385207198560238 + <_> + + <_> + + + + <_>5 10 4 9 -1. + <_>7 10 2 9 2. + 0 + -0.0189582295715809 + -0.3726381957530975 + 0.0683819502592087 + <_> + + <_> + + + + <_>10 2 3 10 -1. + <_>10 7 3 5 2. + 0 + -8.3170487778261304e-004 + 0.0957854464650154 + -0.1016902029514313 + -1.9109580516815186 + 8 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.1523323059082031 + -0.3180535137653351 + 0.4703998863697052 + <_> + + <_> + + + + <_>9 12 8 8 -1. + <_>13 12 4 4 2. + <_>9 16 4 4 2. + 0 + 8.8482722640037537e-003 + -0.3613426983356476 + 0.2733295857906342 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0297884102910757 + -0.2805927991867065 + 0.3627023994922638 + <_> + + <_> + + + + <_>10 2 9 15 -1. + <_>13 2 3 15 3. + 0 + 0.0527256391942501 + -0.1932056993246079 + 0.3550725877285004 + <_> + + <_> + + + + <_>1 1 9 15 -1. + <_>4 1 3 15 3. + 0 + 0.0260774195194244 + -0.3712019920349121 + 0.2703844010829926 + <_> + + <_> + + + + <_>5 4 10 6 -1. + <_>5 6 10 2 3. + 0 + -0.0448785200715065 + 0.2911930084228516 + -0.3517824113368988 + <_> + + <_> + + + + <_>5 6 5 8 -1. + <_>5 10 5 4 2. + 0 + -9.3984341947361827e-004 + -0.6014366149902344 + 0.1181579008698463 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 3.1817350536584854e-003 + -0.6163272261619568 + 0.1058147028088570 + <_> + + <_> + + + + <_>3 9 5 8 -1. + <_>3 13 5 4 2. + 0 + -6.2214181525632739e-004 + 0.1170104965567589 + -0.6187378168106079 + <_> + + <_> + + + + <_>11 1 6 12 -1. + <_>14 1 3 6 2. + <_>11 7 3 6 2. + 0 + 5.4993429221212864e-003 + 0.0717406421899796 + -0.3212271034717560 + <_> + + <_> + + + + <_>3 12 8 8 -1. + <_>3 12 4 4 2. + <_>7 16 4 4 2. + 0 + 7.0621701888740063e-003 + -0.3081459999084473 + 0.1829912960529327 + <_> + + <_> + + + + <_>15 0 3 15 -1. + <_>15 5 3 5 3. + 0 + -0.0344922989606857 + -0.3695257008075714 + 0.1114277988672257 + <_> + + <_> + + + + <_>2 5 14 8 -1. + <_>2 5 7 4 2. + <_>9 9 7 4 2. + 0 + -0.0537834316492081 + -0.6668996214866638 + 0.0848636403679848 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -0.0201949104666710 + -0.4230006933212280 + 0.0563254691660404 + <_> + + <_> + + + + <_>3 1 6 10 -1. + <_>3 1 3 5 2. + <_>6 6 3 5 2. + 0 + -7.6839578105136752e-004 + 0.1354745030403137 + -0.3569628894329071 + <_> + + <_> + + + + <_>4 8 13 2 -1. + <_>4 9 13 1 2. + 0 + 6.6877179779112339e-003 + -0.3437983095645905 + 0.1330209970474243 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1114740967750549 + -0.4952355027198792 + 0.0973030030727386 + <_> + + <_> + + + + <_>1 2 19 2 -1. + <_>1 3 19 1 2. + 0 + -8.5021732375025749e-003 + -0.5177899003028870 + 0.0671889036893845 + <_> + + <_> + + + + <_>1 14 7 6 -1. + <_>1 16 7 2 3. + 0 + -0.0188970193266869 + -0.4706476926803589 + 0.0908737778663635 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + 5.7387170381844044e-003 + -0.1486068964004517 + 0.3097684085369110 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0326040498912334 + 0.0786777064204216 + -0.5471382737159729 + <_> + + <_> + + + + <_>7 0 13 2 -1. + <_>7 1 13 1 2. + 0 + 1.8975350030814297e-005 + -0.2435985058546066 + 0.0989089310169220 + <_> + + <_> + + + + <_>6 6 8 12 -1. + <_>6 10 8 4 3. + 0 + -1.9267159514129162e-003 + -0.5052297711372376 + 0.0751193314790726 + <_> + + <_> + + + + <_>7 1 8 8 -1. + <_>11 1 4 4 2. + <_>7 5 4 4 2. + 0 + -7.7145430259406567e-003 + -0.2501496076583862 + 0.1021149978041649 + <_> + + <_> + + + + <_>5 1 8 8 -1. + <_>5 1 4 4 2. + <_>9 5 4 4 2. + 0 + -0.0188066493719816 + -0.4326916933059692 + 0.1114768013358116 + <_> + + <_> + + + + <_>10 10 8 6 -1. + <_>10 12 8 2 3. + 0 + 0.0299121998250484 + 0.0467484481632710 + -0.5881829261779785 + <_> + + <_> + + + + <_>8 2 3 12 -1. + <_>8 8 3 6 2. + 0 + -7.4260600376874208e-004 + 0.1838930994272232 + -0.2013826072216034 + <_> + + <_> + + + + <_>12 5 7 8 -1. + <_>12 9 7 4 2. + 0 + 4.0662181563675404e-003 + -0.4494845867156982 + 0.0868813768029213 + <_> + + <_> + + + + <_>1 2 6 14 -1. + <_>3 2 2 14 3. + 0 + 0.0186816696077585 + -0.1710352003574371 + 0.2293123006820679 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 0.0465806908905506 + 0.0438743792474270 + -0.6670460104942322 + <_> + + <_> + + + + <_>1 5 7 8 -1. + <_>1 9 7 4 2. + 0 + -0.0150307398289442 + -0.7656944990158081 + 0.0425244905054569 + <_> + + <_> + + + + <_>8 4 4 16 -1. + <_>8 12 4 8 2. + 0 + 0.0636028200387955 + 0.0336294881999493 + -0.8677732944488525 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -0.0336131006479263 + -0.6746404767036438 + 0.0451969206333160 + <_> + + <_> + + + + <_>11 10 7 6 -1. + <_>11 12 7 2 3. + 0 + -0.0443145297467709 + -0.4705643057823181 + 0.0209879502654076 + <_> + + <_> + + + + <_>2 10 7 6 -1. + <_>2 12 7 2 3. + 0 + 0.0291758198291063 + 0.0560364909470081 + -0.6574596166610718 + <_> + + <_> + + + + <_>5 12 13 3 -1. + <_>5 13 13 1 3. + 0 + 8.4737781435251236e-003 + -0.1231212988495827 + 0.3603718876838684 + <_> + + <_> + + + + <_>1 15 7 4 -1. + <_>1 17 7 2 2. + 0 + -0.0269307401031256 + -0.6525511741638184 + 0.0607266202569008 + <_> + + <_> + + + + <_>2 2 17 6 -1. + <_>2 4 17 2 3. + 0 + 0.0379301384091377 + -0.1549136042594910 + 0.2177045047283173 + <_> + + <_> + + + + <_>1 15 8 4 -1. + <_>5 15 4 4 2. + 0 + 0.0164300501346588 + -0.2525069117546082 + 0.1545823067426682 + <_> + + <_> + + + + <_>10 1 4 8 -1. + <_>10 1 2 8 2. + 0 + 0.0510798096656799 + 0.0307734999805689 + -0.6492931246757507 + <_> + + <_> + + + + <_>6 1 4 8 -1. + <_>8 1 2 8 2. + 0 + 1.6663300339132547e-003 + -0.3742555975914002 + 0.0813921764492989 + <_> + + <_> + + + + <_>10 3 3 14 -1. + <_>11 3 1 14 3. + 0 + -9.0896980836987495e-003 + 0.1785404980182648 + -0.0765780806541443 + <_> + + <_> + + + + <_>0 11 18 4 -1. + <_>0 11 9 2 2. + <_>9 13 9 2 2. + 0 + 0.0206291992217302 + 0.0723732635378838 + -0.4205057919025421 + <_> + + <_> + + + + <_>11 11 7 4 -1. + <_>11 13 7 2 2. + 0 + 8.2410024479031563e-003 + 0.0328966788947582 + -0.3732526898384094 + <_> + + <_> + + + + <_>2 7 12 12 -1. + <_>2 7 6 6 2. + <_>8 13 6 6 2. + 0 + -0.0461264997720718 + -0.3735642135143280 + 0.0773367807269096 + <_> + + <_> + + + + <_>4 11 13 2 -1. + <_>4 12 13 1 2. + 0 + -8.3484929054975510e-003 + 0.1869013011455536 + -0.1512683928012848 + <_> + + <_> + + + + <_>0 4 15 12 -1. + <_>0 10 15 6 2. + 0 + -0.0476890802383423 + -0.4073002040386200 + 0.0875983685255051 + <_> + + <_> + + + + <_>5 2 11 8 -1. + <_>5 6 11 4 2. + 0 + -5.0166220171377063e-004 + 0.1203676983714104 + -0.2471766024827957 + <_> + + <_> + + + + <_>2 8 13 3 -1. + <_>2 9 13 1 3. + 0 + 2.1794239728478715e-005 + -0.2980081140995026 + 0.1206500008702278 + <_> + + <_> + + + + <_>15 3 5 9 -1. + <_>15 6 5 3 3. + 0 + -0.0705972909927368 + -0.6811661124229431 + 0.0641989484429359 + <_> + + <_> + + + + <_>7 3 3 13 -1. + <_>8 3 1 13 3. + 0 + -6.4999358728528023e-003 + 0.2621915936470032 + -0.1401500999927521 + <_> + + <_> + + + + <_>1 9 18 3 -1. + <_>7 9 6 3 3. + 0 + 5.3664338774979115e-003 + -0.3427318036556244 + 0.0920485705137253 + <_> + + <_> + + + + <_>8 1 3 13 -1. + <_>9 1 1 13 3. + 0 + -0.0133419502526522 + 0.4025807976722717 + -0.0720523074269295 + <_> + + <_> + + + + <_>9 3 2 13 -1. + <_>9 3 1 13 2. + 0 + 0.0122430901974440 + -0.0824268311262131 + 0.3836919963359833 + -2.0048389434814453 + 9 + -1 + <_> + + + <_> + + <_> + + + + <_>1 2 8 8 -1. + <_>1 2 4 4 2. + <_>5 6 4 4 2. + 0 + -2.8617910575121641e-003 + 0.2144317030906677 + -0.5153213739395142 + <_> + + <_> + + + + <_>9 5 3 12 -1. + <_>9 11 3 6 2. + 0 + 1.9125089747831225e-003 + 0.1448303014039993 + -0.6117541193962097 + <_> + + <_> + + + + <_>5 4 9 5 -1. + <_>8 4 3 5 3. + 0 + 4.8059499822556973e-003 + -0.4423562884330750 + 0.1346658021211624 + <_> + + <_> + + + + <_>0 3 20 16 -1. + <_>0 11 20 8 2. + 0 + -0.0957776233553886 + -0.4891478121280670 + 0.1316964030265808 + <_> + + <_> + + + + <_>0 4 16 6 -1. + <_>0 6 16 2 3. + 0 + -8.9395968243479729e-003 + 0.1479054987430573 + -0.4669628143310547 + <_> + + <_> + + + + <_>9 6 5 12 -1. + <_>9 12 5 6 2. + 0 + 8.1128235906362534e-003 + 0.0506713315844536 + -0.4022750854492188 + <_> + + <_> + + + + <_>5 6 10 8 -1. + <_>5 10 10 4 2. + 0 + 2.2638900554738939e-004 + -0.5092825293540955 + 0.0821132063865662 + <_> + + <_> + + + + <_>2 8 16 3 -1. + <_>2 9 16 1 3. + 0 + -6.1516009736806154e-004 + -0.3813680112361908 + 0.1015795022249222 + <_> + + <_> + + + + <_>2 9 16 3 -1. + <_>2 10 16 1 3. + 0 + -3.2050691079348326e-003 + -0.5835245847702026 + 0.0623853988945484 + <_> + + <_> + + + + <_>7 15 7 4 -1. + <_>7 17 7 2 2. + 0 + 5.4250762332230806e-004 + -0.2554849982261658 + 0.1483220010995865 + <_> + + <_> + + + + <_>6 1 7 6 -1. + <_>6 3 7 2 3. + 0 + 1.0713520459830761e-003 + -0.3533431887626648 + 0.1179158985614777 + <_> + + <_> + + + + <_>3 10 14 3 -1. + <_>3 11 14 1 3. + 0 + -1.7755989683791995e-003 + -0.3408727943897247 + 0.0947401076555252 + <_> + + <_> + + + + <_>1 4 6 16 -1. + <_>1 4 3 8 2. + <_>4 12 3 8 2. + 0 + -0.0930142030119896 + 0.7468546032905579 + -0.0524433404207230 + <_> + + <_> + + + + <_>1 14 19 6 -1. + <_>1 16 19 2 3. + 0 + -0.0141921304166317 + -0.3143399953842163 + 0.0904521867632866 + <_> + + <_> + + + + <_>5 9 4 8 -1. + <_>7 9 2 8 2. + 0 + -5.3375191055238247e-004 + 0.1411971002817154 + -0.2029671072959900 + <_> + + <_> + + + + <_>5 7 12 4 -1. + <_>9 7 4 4 3. + 0 + 0.0948446094989777 + 0.0146256797015667 + -0.6221520900726318 + <_> + + <_> + + + + <_>3 6 12 4 -1. + <_>7 6 4 4 3. + 0 + 1.1853160103783011e-003 + -0.2598401010036469 + 0.1215312033891678 + <_> + + <_> + + + + <_>6 5 8 6 -1. + <_>6 7 8 2 3. + 0 + -2.4541220627725124e-003 + 0.0718945935368538 + -0.3980351984500885 + <_> + + <_> + + + + <_>4 0 6 10 -1. + <_>6 0 2 10 3. + 0 + 6.8703000433743000e-003 + 0.0686260983347893 + -0.3856580853462219 + <_> + + <_> + + + + <_>11 9 4 8 -1. + <_>11 13 4 4 2. + 0 + -0.0604112707078457 + -0.4848239123821259 + 0.0207060202956200 + <_> + + <_> + + + + <_>5 9 4 8 -1. + <_>5 13 4 4 2. + 0 + -4.6826168545521796e-004 + 0.0958562418818474 + -0.3123035132884979 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -3.3507338957861066e-004 + 0.0781286582350731 + -0.0947510004043579 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0363130606710911 + 0.0448244214057922 + -0.6369314789772034 + <_> + + <_> + + + + <_>4 0 13 2 -1. + <_>4 1 13 1 2. + 0 + 3.8052719901315868e-004 + -0.2193126976490021 + 0.1178051978349686 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + -0.0509646311402321 + 0.5578337907791138 + -0.0438696891069412 + <_> + + <_> + + + + <_>14 3 6 7 -1. + <_>16 3 2 7 3. + 0 + -0.0761987566947937 + 0.6778960824012756 + -0.0179358907043934 + <_> + + <_> + + + + <_>5 4 5 10 -1. + <_>5 9 5 5 2. + 0 + -0.0126770203933120 + -0.6073101162910461 + 0.0490861907601357 + <_> + + <_> + + + + <_>8 1 5 10 -1. + <_>8 6 5 5 2. + 0 + -3.6766629200428724e-003 + 0.1522663980722427 + -0.1995368003845215 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0388467386364937 + -0.7704523801803589 + 0.0337324701249599 + <_> + + <_> + + + + <_>14 3 6 9 -1. + <_>16 3 2 9 3. + 0 + 9.4217229634523392e-003 + -0.0699294880032539 + 0.1366914063692093 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>2 3 2 9 3. + 0 + 7.3391180485486984e-003 + -0.1213333979249001 + 0.2117549926042557 + <_> + + <_> + + + + <_>1 1 19 3 -1. + <_>1 2 19 1 3. + 0 + 0.0122113795951009 + 0.0676368474960327 + -0.4335371851921082 + <_> + + <_> + + + + <_>6 6 4 14 -1. + <_>8 6 2 14 2. + 0 + -9.3064550310373306e-003 + -0.3468249142169952 + 0.0640623122453690 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + 0.0521113090217113 + -0.0341469906270504 + 0.3890474140644074 + <_> + + <_> + + + + <_>4 10 12 4 -1. + <_>8 10 4 4 3. + 0 + -4.3582019861787558e-004 + 0.1395650953054428 + -0.1828942000865936 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>0 6 10 2 2. + 0 + -0.0105753596872091 + -0.2778246104717255 + 0.0856670662760735 + <_> + + <_> + + + + <_>6 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 1.4794029993936419e-003 + -0.2315472066402435 + 0.1176588982343674 + <_> + + <_> + + + + <_>13 2 6 11 -1. + <_>13 2 3 11 2. + 0 + 9.4746891409158707e-003 + -0.1334528028964996 + 0.1806696951389313 + <_> + + <_> + + + + <_>0 6 5 9 -1. + <_>0 9 5 3 3. + 0 + 0.0833551883697510 + 0.0335639603435993 + -0.7286074161529541 + <_> + + <_> + + + + <_>13 2 6 8 -1. + <_>13 2 3 8 2. + 0 + -0.0666290074586868 + 0.3805825114250183 + -0.0334907509386539 + <_> + + <_> + + + + <_>1 2 6 8 -1. + <_>4 2 3 8 2. + 0 + 5.0287488847970963e-003 + -0.1141801029443741 + 0.2153498977422714 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.5122200250625610 + 7.6377480290830135e-003 + -0.6506755948066711 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1230005994439125 + 0.0388790816068649 + -0.5942044258117676 + <_> + + <_> + + + + <_>7 11 13 3 -1. + <_>7 12 13 1 3. + 0 + -1.1227129725739360e-003 + 0.1023541018366814 + -0.1120750978589058 + <_> + + <_> + + + + <_>0 2 20 6 -1. + <_>0 2 10 3 2. + <_>10 5 10 3 2. + 0 + -0.0622209496796131 + -0.5117347240447998 + 0.0418797992169857 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -0.0263233892619610 + 0.3400599062442780 + -0.0506244711577892 + <_> + + <_> + + + + <_>5 0 3 13 -1. + <_>6 0 1 13 3. + 0 + -0.0188750196248293 + -0.5455083847045898 + 0.0415249206125736 + <_> + + <_> + + + + <_>0 1 20 10 -1. + <_>0 6 20 5 2. + 0 + -0.3403478860855103 + -0.9154180288314819 + 0.0165613200515509 + <_> + + <_> + + + + <_>7 1 3 13 -1. + <_>8 1 1 13 3. + 0 + -8.0456008436158299e-004 + 0.1427077054977417 + -0.1290145069360733 + <_> + + <_> + + + + <_>11 0 2 16 -1. + <_>11 0 1 16 2. + 0 + -3.9579509757459164e-003 + -0.3340837061405182 + 0.0586375482380390 + <_> + + <_> + + + + <_>0 0 2 13 -1. + <_>1 0 1 13 2. + 0 + 0.0183365494012833 + -0.0456322208046913 + 0.5269632935523987 + <_> + + <_> + + + + <_>0 13 20 6 -1. + <_>10 13 10 3 2. + <_>0 16 10 3 2. + 0 + -0.0576861016452312 + -0.5760436058044434 + 0.0395500995218754 + <_> + + <_> + + + + <_>0 7 4 13 -1. + <_>2 7 2 13 2. + 0 + -8.6881890892982483e-003 + 0.2092967927455902 + -0.1030900031328201 + <_> + + <_> + + + + <_>5 10 15 10 -1. + <_>5 15 15 5 2. + 0 + 0.2031854987144470 + 9.4080818817019463e-003 + -0.9938954710960388 + <_> + + <_> + + + + <_>0 10 15 10 -1. + <_>0 15 15 5 2. + 0 + 0.0200977995991707 + 0.0565773993730545 + -0.3781901895999908 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + 0.0132171399891377 + -0.0743221268057823 + 0.1787465065717697 + <_> + + <_> + + + + <_>7 0 2 16 -1. + <_>8 0 1 16 2. + 0 + -9.1346688568592072e-003 + -0.4935688078403473 + 0.0377993695437908 + <_> + + <_> + + + + <_>6 14 9 4 -1. + <_>6 16 9 2 2. + 0 + 8.7239191634580493e-004 + -0.1384868025779724 + 0.1151691973209381 + <_> + + <_> + + + + <_>1 3 15 2 -1. + <_>1 4 15 1 2. + 0 + -3.4609009162522852e-004 + -0.1637182980775833 + 0.1194979026913643 + <_> + + <_> + + + + <_>6 5 13 8 -1. + <_>6 9 13 4 2. + 0 + -9.8570866975933313e-004 + -0.5464289784431458 + 0.0446892790496349 + <_> + + <_> + + + + <_>4 0 11 6 -1. + <_>4 2 11 2 3. + 0 + 0.0102185597643256 + -0.1157016977667809 + 0.1672383993864059 + <_> + + <_> + + + + <_>1 9 18 4 -1. + <_>10 9 9 2 2. + <_>1 11 9 2 2. + 0 + 0.0267026796936989 + 0.0439220406115055 + -0.4512043893337250 + <_> + + <_> + + + + <_>3 9 6 8 -1. + <_>6 9 3 8 2. + 0 + -2.0299260504543781e-003 + 0.1193227991461754 + -0.1697949022054672 + <_> + + <_> + + + + <_>5 8 12 4 -1. + <_>9 8 4 4 3. + 0 + -0.0880236029624939 + -0.8027979135513306 + 9.4295190647244453e-003 + <_> + + <_> + + + + <_>3 8 12 4 -1. + <_>7 8 4 4 3. + 0 + -0.0131091102957726 + -0.3086530864238739 + 0.0608020499348640 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -9.9501870572566986e-003 + 0.1840061992406845 + -0.0464654788374901 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -3.4293539356440306e-003 + 0.2668299973011017 + -0.0993386432528496 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0547291412949562 + 0.0287311300635338 + -0.7774584889411926 + <_> + + <_> + + + + <_>5 7 8 8 -1. + <_>5 7 4 4 2. + <_>9 11 4 4 2. + 0 + 7.2012972086668015e-003 + 0.0448924787342548 + -0.3828934133052826 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.0420471206307411 + -0.0225623399019241 + 0.4064665138721466 + <_> + + <_> + + + + <_>4 6 12 3 -1. + <_>10 6 6 3 2. + 0 + 4.4444389641284943e-003 + 0.0912041068077087 + -0.1874821037054062 + <_> + + <_> + + + + <_>0 0 20 4 -1. + <_>10 0 10 2 2. + <_>0 2 10 2 2. + 0 + 0.0284418407827616 + 0.0406680405139923 + -0.4055212140083313 + <_> + + <_> + + + + <_>3 6 13 3 -1. + <_>3 7 13 1 3. + 0 + -0.0151418298482895 + 0.2479986995458603 + -0.0836073383688927 + <_> + + <_> + + + + <_>11 2 4 7 -1. + <_>11 2 2 7 2. + 0 + 0.0393880903720856 + 0.0242792796343565 + -0.7682729959487915 + <_> + + <_> + + + + <_>5 2 4 7 -1. + <_>7 2 2 7 2. + 0 + 6.1649468261748552e-004 + -0.1724991053342819 + 0.1031161025166512 + <_> + + <_> + + + + <_>1 16 18 2 -1. + <_>1 17 18 1 2. + 0 + 0.0260016508400440 + 0.0228253491222858 + -0.7754545211791992 + <_> + + <_> + + + + <_>0 13 14 3 -1. + <_>0 14 14 1 3. + 0 + 1.4940380351617932e-003 + -0.1102840974926949 + 0.1696674972772598 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + -0.0137771498411894 + -0.3842472136020660 + 0.0303202699869871 + <_> + + <_> + + + + <_>3 14 13 3 -1. + <_>3 15 13 1 3. + 0 + 9.9619822576642036e-003 + -0.0537646599113941 + 0.3788712918758392 + <_> + + <_> + + + + <_>11 12 7 6 -1. + <_>11 14 7 2 3. + 0 + 3.2952039036899805e-003 + 0.0943841636180878 + -0.3276272118091583 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + 5.7747410610318184e-003 + 0.0571149401366711 + -0.3071976900100708 + <_> + + <_> + + + + <_>2 10 18 10 -1. + <_>8 10 6 10 3. + 0 + -0.0483925901353359 + 0.1702105998992920 + -0.0870455130934715 + <_> + + <_> + + + + <_>0 12 13 2 -1. + <_>0 13 13 1 2. + 0 + 5.6376052089035511e-004 + -0.0938163027167320 + 0.2064231038093567 + <_> + + <_> + + + + <_>5 7 14 4 -1. + <_>12 7 7 2 2. + <_>5 9 7 2 2. + 0 + -0.0238738097250462 + -0.3008235096931458 + 0.0174777191132307 + <_> + + <_> + + + + <_>1 7 14 4 -1. + <_>1 7 7 2 2. + <_>8 9 7 2 2. + 0 + -0.0105269001796842 + -0.3441892862319946 + 0.0579956397414207 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + 0.0222886707633734 + -0.0571798495948315 + 0.1973951011896133 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -0.0145890703424811 + -0.4516879916191101 + 0.0414904095232487 + <_> + + <_> + + + + <_>9 7 9 9 -1. + <_>12 7 3 9 3. + 0 + -0.0469363704323769 + 0.2045795023441315 + -0.0517691895365715 + <_> + + <_> + + + + <_>0 8 15 2 -1. + <_>0 9 15 1 2. + 0 + 5.3777720313519239e-004 + -0.3948144912719727 + 0.0450766906142235 + <_> + + <_> + + + + <_>15 4 5 6 -1. + <_>15 7 5 3 2. + 0 + -2.2181039676070213e-003 + -0.2457561939954758 + 0.1026121973991394 + <_> + + <_> + + + + <_>4 0 9 18 -1. + <_>4 9 9 9 2. + 0 + 0.3507654964923859 + 0.0197911299765110 + -0.9516146779060364 + <_> + + <_> + + + + <_>14 15 6 5 -1. + <_>14 15 3 5 2. + 0 + -0.0267120599746704 + 0.2239314019680023 + -0.0455801002681255 + <_> + + <_> + + + + <_>0 4 5 6 -1. + <_>0 7 5 3 2. + 0 + -3.9627091027796268e-003 + -0.2420701980590820 + 0.0765885934233665 + <_> + + <_> + + + + <_>9 1 5 10 -1. + <_>9 6 5 5 2. + 0 + -4.7878702171146870e-003 + 0.1265527009963989 + -0.1196471005678177 + <_> + + <_> + + + + <_>0 11 6 8 -1. + <_>3 11 3 8 2. + 0 + 7.1042939089238644e-003 + -0.0921304225921631 + 0.2151913940906525 + <_> + + <_> + + + + <_>9 7 6 10 -1. + <_>12 7 3 5 2. + <_>9 12 3 5 2. + 0 + -2.2581929442822002e-005 + 0.0606346093118191 + -0.1584898978471756 + <_> + + <_> + + + + <_>1 5 9 10 -1. + <_>4 5 3 10 3. + 0 + -0.0780606418848038 + 0.3482210934162140 + -0.0531737096607685 + <_> + + <_> + + + + <_>6 2 9 16 -1. + <_>9 2 3 16 3. + 0 + 0.2755585014820099 + 7.4112107977271080e-003 + -1.0000040531158447 + <_> + + <_> + + + + <_>5 2 9 16 -1. + <_>8 2 3 16 3. + 0 + 0.1965232938528061 + 0.0201311092823744 + -0.8532667160034180 + <_> + + <_> + + + + <_>5 10 10 10 -1. + <_>5 15 10 5 2. + 0 + -1.6801860183477402e-003 + 0.0770821794867516 + -0.2262036949396133 + -1.8743180036544800 + 10 + -1 + <_> + + + <_> + + <_> + + + + <_>5 4 6 10 -1. + <_>5 4 3 5 2. + <_>8 9 3 5 2. + 0 + -0.0188147109001875 + 0.3774428963661194 + -0.4077064096927643 + <_> + + <_> + + + + <_>11 2 8 8 -1. + <_>15 2 4 4 2. + <_>11 6 4 4 2. + 0 + -0.0231910496950150 + 0.3404903113842011 + -0.3614461123943329 + <_> + + <_> + + + + <_>0 2 6 10 -1. + <_>3 2 3 10 2. + 0 + 0.0313330888748169 + -0.4361351132392883 + 0.1966868937015533 + <_> + + <_> + + + + <_>4 10 13 8 -1. + <_>4 14 13 4 2. + 0 + -0.0113187003880739 + 0.1168517023324966 + -0.5635979175567627 + <_> + + <_> + + + + <_>5 6 8 4 -1. + <_>9 6 4 4 2. + 0 + -3.1084290822036564e-004 + -0.4339633882045746 + 0.1426406949758530 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>7 17 6 3 3. + 0 + 0.0873500630259514 + -0.1995280981063843 + 0.3304361104965210 + <_> + + <_> + + + + <_>1 2 8 8 -1. + <_>1 2 4 4 2. + <_>5 6 4 4 2. + 0 + -0.0290185194462538 + 0.3231520950794220 + -0.2170704007148743 + <_> + + <_> + + + + <_>4 7 12 6 -1. + <_>4 9 12 2 3. + 0 + 0.0598606802523136 + -0.1876475065946579 + 0.2765103876590729 + <_> + + <_> + + + + <_>4 5 12 10 -1. + <_>4 5 6 5 2. + <_>10 10 6 5 2. + 0 + -0.0296821705996990 + -0.4643633067607880 + 0.1112900972366333 + <_> + + <_> + + + + <_>8 12 8 8 -1. + <_>12 12 4 4 2. + <_>8 16 4 4 2. + 0 + -2.2648361045867205e-003 + -0.2716302871704102 + 0.0869167596101761 + <_> + + <_> + + + + <_>3 14 5 6 -1. + <_>3 17 5 3 2. + 0 + -1.6869819955900311e-003 + 0.1799899041652679 + -0.2715292870998383 + <_> + + <_> + + + + <_>7 4 6 8 -1. + <_>9 4 2 8 3. + 0 + 1.0256370296701789e-003 + -0.4324820935726166 + 0.1025668978691101 + <_> + + <_> + + + + <_>4 0 6 8 -1. + <_>6 0 2 8 3. + 0 + -0.0317629203200340 + -0.6441916823387146 + 0.0675051063299179 + <_> + + <_> + + + + <_>7 0 13 3 -1. + <_>7 1 13 1 3. + 0 + -8.5913296788930893e-003 + -0.3767251074314117 + 0.0729007571935654 + <_> + + <_> + + + + <_>3 1 14 2 -1. + <_>3 2 14 1 2. + 0 + -2.1636451128870249e-003 + -0.4220950901508331 + 0.1072463020682335 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + 6.0111237689852715e-004 + 0.0613021105527878 + -0.3800497949123383 + <_> + + <_> + + + + <_>0 5 20 6 -1. + <_>0 7 20 2 3. + 0 + -6.1244412790983915e-005 + 0.0747657865285873 + -0.5264449119567871 + <_> + + <_> + + + + <_>13 1 3 18 -1. + <_>14 1 1 18 3. + 0 + -0.0236664302647114 + -0.5680130124092102 + 0.0363775417208672 + <_> + + <_> + + + + <_>4 1 3 15 -1. + <_>5 1 1 15 3. + 0 + -0.0142566096037626 + -0.5344669222831726 + 0.0627688691020012 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + -0.0157139096409082 + 0.3189856112003326 + -0.1154123991727829 + <_> + + <_> + + + + <_>0 12 20 4 -1. + <_>0 14 20 2 2. + 0 + -0.0592860206961632 + -0.5713595747947693 + 0.0817756801843643 + <_> + + <_> + + + + <_>12 11 7 4 -1. + <_>12 13 7 2 2. + 0 + -0.0441229082643986 + -0.7059100866317749 + 0.0208330992609262 + <_> + + <_> + + + + <_>1 11 7 4 -1. + <_>1 13 7 2 2. + 0 + -7.2728260420262814e-004 + 0.1081985011696816 + -0.3807745873928070 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0666537284851074 + -0.6082463860511780 + 0.0432488210499287 + <_> + + <_> + + + + <_>0 7 20 2 -1. + <_>0 8 20 1 2. + 0 + 2.3679709993302822e-003 + -0.2979309856891632 + 0.1209193989634514 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 0.0335661806166172 + 0.0364646203815937 + -0.5576698780059815 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0531388111412525 + -0.5624539256095886 + 0.0652962774038315 + <_> + + <_> + + + + <_>5 5 10 8 -1. + <_>5 9 10 4 2. + 0 + -2.9401908977888525e-004 + -0.5841795206069946 + 0.0500055104494095 + <_> + + <_> + + + + <_>7 1 3 10 -1. + <_>7 6 3 5 2. + 0 + -4.8085048911161721e-004 + 0.1401866972446442 + -0.2479272037744522 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0477770604193211 + 0.0556727983057499 + -0.5954074263572693 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0334238708019257 + -0.1437038928270340 + 0.2330098003149033 + <_> + + <_> + + + + <_>4 9 12 11 -1. + <_>8 9 4 11 3. + 0 + 0.2043281048536301 + 0.0453270487487316 + -0.7416430711746216 + <_> + + <_> + + + + <_>1 0 18 20 -1. + <_>7 0 6 20 3. + 0 + 0.1410606056451798 + -0.3967429101467133 + 0.0816928669810295 + <_> + + <_> + + + + <_>7 15 7 4 -1. + <_>7 17 7 2 2. + 0 + 1.0005939839174971e-004 + -0.2231793999671936 + 0.1391762942075729 + <_> + + <_> + + + + <_>2 15 16 4 -1. + <_>2 17 16 2 2. + 0 + 0.0606893897056580 + 0.0343249887228012 + -0.8279684782028198 + <_> + + <_> + + + + <_>5 18 13 2 -1. + <_>5 19 13 1 2. + 0 + -3.6456179805099964e-003 + 0.1528643965721130 + -0.1400597989559174 + <_> + + <_> + + + + <_>3 0 6 8 -1. + <_>5 0 2 8 3. + 0 + 0.0319453403353691 + 0.0653436928987503 + -0.4429608881473541 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + 0.0234283804893494 + 0.0255273096263409 + -0.6327065825462341 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0460679493844509 + 0.0435791015625000 + -0.6492987275123596 + <_> + + <_> + + + + <_>10 14 9 6 -1. + <_>10 16 9 2 3. + 0 + -0.0580551512539387 + -0.6395754218101502 + 0.0140287503600121 + <_> + + <_> + + + + <_>1 14 9 6 -1. + <_>1 16 9 2 3. + 0 + 0.0387837402522564 + 0.0512335188686848 + -0.5414438843727112 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + -0.0127655202522874 + 0.2708289027214050 + -0.0919277667999268 + <_> + + <_> + + + + <_>3 2 13 2 -1. + <_>3 3 13 1 2. + 0 + -3.1400551088154316e-003 + -0.3467982113361359 + 0.0839736685156822 + <_> + + <_> + + + + <_>4 6 16 3 -1. + <_>4 6 8 3 2. + 0 + -0.0197199992835522 + -0.2047695964574814 + 0.0632321983575821 + <_> + + <_> + + + + <_>0 10 17 2 -1. + <_>0 11 17 1 2. + 0 + 3.2241051085293293e-003 + 0.0962597131729126 + -0.2809821963310242 + <_> + + <_> + + + + <_>11 6 6 12 -1. + <_>11 12 6 6 2. + 0 + -0.0592718608677387 + -0.2668690979480743 + 0.0329072587192059 + <_> + + <_> + + + + <_>0 10 16 4 -1. + <_>0 10 8 2 2. + <_>8 12 8 2 2. + 0 + 0.0156366396695375 + 0.0691880732774735 + -0.4176171123981476 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -8.8900122791528702e-003 + 0.1960355043411255 + -0.1124975010752678 + <_> + + <_> + + + + <_>3 14 14 4 -1. + <_>3 14 7 2 2. + <_>10 16 7 2 2. + 0 + 0.0244589094072580 + 0.0569889694452286 + -0.5102502107620239 + <_> + + <_> + + + + <_>6 6 14 3 -1. + <_>6 6 7 3 2. + 0 + 0.1010131984949112 + 9.4210049137473106e-003 + -0.3669132888317108 + <_> + + <_> + + + + <_>0 6 14 3 -1. + <_>7 6 7 3 2. + 0 + 0.0907398313283920 + 0.0539998784661293 + -0.5118147730827332 + <_> + + <_> + + + + <_>5 8 10 8 -1. + <_>10 8 5 4 2. + <_>5 12 5 4 2. + 0 + -0.0495578683912754 + -0.6246703863143921 + 0.0409882701933384 + <_> + + <_> + + + + <_>1 2 18 7 -1. + <_>7 2 6 7 3. + 0 + 0.2655834853649139 + -0.0861365497112274 + 0.3243843913078308 + <_> + + <_> + + + + <_>12 6 5 6 -1. + <_>12 9 5 3 2. + 0 + 1.8632459687069058e-003 + -0.5456336140632629 + 0.0586840510368347 + <_> + + <_> + + + + <_>1 10 4 7 -1. + <_>3 10 2 7 2. + 0 + 0.0118049401789904 + -0.2060389965772629 + 0.1416734009981155 + <_> + + <_> + + + + <_>4 0 14 2 -1. + <_>4 1 14 1 2. + 0 + 6.8137067137286067e-004 + -0.2080647051334381 + 0.0926273763179779 + <_> + + <_> + + + + <_>0 6 7 9 -1. + <_>0 9 7 3 3. + 0 + 5.7278381427749991e-004 + -0.4317088127136231 + 0.0633603632450104 + <_> + + <_> + + + + <_>9 6 3 14 -1. + <_>10 6 1 14 3. + 0 + -0.0110419997945428 + 0.1814437955617905 + -0.0417078398168087 + <_> + + <_> + + + + <_>3 4 13 3 -1. + <_>3 5 13 1 3. + 0 + 9.5696747303009033e-003 + -0.1209833994507790 + 0.2160761952400208 + <_> + + <_> + + + + <_>13 2 7 6 -1. + <_>13 4 7 2 3. + 0 + 0.0742741972208023 + 0.0263995490968227 + -0.7760186791419983 + <_> + + <_> + + + + <_>0 1 18 5 -1. + <_>6 1 6 5 3. + 0 + -0.0258158296346664 + 0.5349736809730530 + -0.0520251505076885 + <_> + + <_> + + + + <_>12 10 6 10 -1. + <_>15 10 3 5 2. + <_>12 15 3 5 2. + 0 + -0.0633146911859512 + 0.5190032124519348 + -0.0193295907229185 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>2 10 3 5 2. + <_>5 15 3 5 2. + 0 + -0.0664324909448624 + 0.7214093208312988 + -0.0328820310533047 + <_> + + <_> + + + + <_>4 3 12 6 -1. + <_>4 5 12 2 3. + 0 + -0.0757490396499634 + 0.4148524999618530 + -0.0554517284035683 + <_> + + <_> + + + + <_>0 2 18 4 -1. + <_>0 2 9 2 2. + <_>9 4 9 2 2. + 0 + -0.0202960409224033 + -0.3325068950653076 + 0.0823978930711746 + <_> + + <_> + + + + <_>7 6 6 10 -1. + <_>9 6 2 10 3. + 0 + 0.0221726503223181 + -0.1441915035247803 + 0.1728086024522781 + <_> + + <_> + + + + <_>3 0 6 5 -1. + <_>6 0 3 5 2. + 0 + 4.2085880413651466e-003 + -0.3023748993873596 + 0.0866990834474564 + <_> + + <_> + + + + <_>10 10 6 10 -1. + <_>13 10 3 5 2. + <_>10 15 3 5 2. + 0 + 0.0682673305273056 + 8.7291244417428970e-003 + -0.3695572912693024 + <_> + + <_> + + + + <_>4 10 6 10 -1. + <_>4 10 3 5 2. + <_>7 15 3 5 2. + 0 + 5.1220320165157318e-003 + -0.2082498073577881 + 0.1453005969524384 + <_> + + <_> + + + + <_>6 0 8 10 -1. + <_>10 0 4 5 2. + <_>6 5 4 5 2. + 0 + -0.0531143285334110 + -0.5514230132102966 + 0.0434211902320385 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + -0.0497399792075157 + 0.4407710134983063 + -0.0643496736884117 + -1.9982930421829224 + 11 + -1 + <_> + + + <_> + + <_> + + + + <_>9 3 2 14 -1. + <_>9 10 2 7 2. + 0 + -3.3883380820043385e-004 + 0.1899784952402115 + -0.4618484973907471 + <_> + + <_> + + + + <_>12 1 6 10 -1. + <_>15 1 3 5 2. + <_>12 6 3 5 2. + 0 + -1.5632030554115772e-003 + 0.1938140988349915 + -0.4351884126663208 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 1.5552520053461194e-003 + -0.4742031097412109 + 0.1213762983679771 + <_> + + <_> + + + + <_>11 1 9 18 -1. + <_>11 10 9 9 2. + 0 + -0.0314171202480793 + -0.3909668922424316 + 0.1095193028450012 + <_> + + <_> + + + + <_>2 1 6 10 -1. + <_>2 1 3 5 2. + <_>5 6 3 5 2. + 0 + -3.2835190650075674e-003 + 0.1642895042896271 + -0.3275192975997925 + <_> + + <_> + + + + <_>4 10 16 4 -1. + <_>12 10 8 2 2. + <_>4 12 8 2 2. + 0 + 5.8749080635607243e-003 + 0.0762259736657143 + -0.4347071051597595 + <_> + + <_> + + + + <_>0 10 18 4 -1. + <_>0 10 9 2 2. + <_>9 12 9 2 2. + 0 + 4.4846539385616779e-003 + 0.1219756007194519 + -0.4487237930297852 + <_> + + <_> + + + + <_>12 5 4 8 -1. + <_>12 9 4 4 2. + 0 + 1.9835829734802246e-003 + -0.6291102170944214 + 0.1012253016233444 + <_> + + <_> + + + + <_>0 4 18 10 -1. + <_>0 4 9 5 2. + <_>9 9 9 5 2. + 0 + 0.0126094697043300 + 0.1043825000524521 + -0.3501549959182739 + <_> + + <_> + + + + <_>2 11 18 2 -1. + <_>2 12 18 1 2. + 0 + -4.7475768951699138e-004 + 0.1100815981626511 + -0.3042953908443451 + <_> + + <_> + + + + <_>4 0 5 9 -1. + <_>4 3 5 3 3. + 0 + 3.2356760930269957e-003 + -0.2705790102481842 + 0.1274618059396744 + <_> + + <_> + + + + <_>10 2 6 8 -1. + <_>12 2 2 8 3. + 0 + 9.9898613989353180e-003 + 0.0639069825410843 + -0.4711843132972717 + <_> + + <_> + + + + <_>1 7 13 2 -1. + <_>1 8 13 1 2. + 0 + 5.6069239508360624e-004 + -0.3178333044052124 + 0.1040434017777443 + <_> + + <_> + + + + <_>10 2 6 8 -1. + <_>12 2 2 8 3. + 0 + -0.0576946996152401 + -0.5134257078170776 + 0.0263949800282717 + <_> + + <_> + + + + <_>4 2 6 8 -1. + <_>6 2 2 8 3. + 0 + 5.5947788059711456e-003 + 0.0767747536301613 + -0.4337426126003265 + <_> + + <_> + + + + <_>8 5 8 8 -1. + <_>12 5 4 4 2. + <_>8 9 4 4 2. + 0 + -3.8770840037614107e-003 + 0.1398819983005524 + -0.2022155970335007 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0478742010891438 + -0.4792838990688324 + 0.0680430307984352 + <_> + + <_> + + + + <_>13 0 6 10 -1. + <_>16 0 3 5 2. + <_>13 5 3 5 2. + 0 + 0.0258175507187843 + -0.0455241985619068 + 0.3945290148258209 + <_> + + <_> + + + + <_>3 9 13 3 -1. + <_>3 10 13 1 3. + 0 + 1.6696650709491223e-004 + -0.3088071942329407 + 0.1087523996829987 + <_> + + <_> + + + + <_>5 11 11 6 -1. + <_>5 14 11 3 2. + 0 + 9.8888948559761047e-004 + 0.0686990320682526 + -0.4181300997734070 + <_> + + <_> + + + + <_>1 14 7 6 -1. + <_>1 16 7 2 3. + 0 + -3.4260770771652460e-003 + -0.2892970144748688 + 0.1147964969277382 + <_> + + <_> + + + + <_>13 5 4 8 -1. + <_>13 9 4 4 2. + 0 + 0.0660443678498268 + 0.0168092697858810 + -0.3353480100631714 + <_> + + <_> + + + + <_>3 5 4 8 -1. + <_>3 9 4 4 2. + 0 + 2.8318059630692005e-003 + -0.3948217034339905 + 0.0855987221002579 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.4268054962158203 + 5.0977780483663082e-003 + -0.5933117866516113 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.1196065023541451 + 0.0274377707391977 + -0.7661628127098084 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0195713192224503 + -0.1196618005633354 + 0.2396223992109299 + <_> + + <_> + + + + <_>0 1 16 3 -1. + <_>0 2 16 1 3. + 0 + -0.0174324698746204 + -0.5853034853935242 + 0.0564003400504589 + <_> + + <_> + + + + <_>8 9 6 10 -1. + <_>8 14 6 5 2. + 0 + -0.1119662970304489 + -0.6724832057952881 + 0.0291506592184305 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + -4.5747519470751286e-003 + -0.4773026108741760 + 0.0566129982471466 + <_> + + <_> + + + + <_>13 0 6 10 -1. + <_>16 0 3 5 2. + <_>13 5 3 5 2. + 0 + -5.1501519046723843e-003 + 0.1151062995195389 + -0.1073232963681221 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + 0.0290342494845390 + -0.0533687099814415 + 0.6422646045684815 + <_> + + <_> + + + + <_>7 1 8 12 -1. + <_>7 7 8 6 2. + 0 + -1.8050910439342260e-003 + 0.1279534995555878 + -0.1232938989996910 + <_> + + <_> + + + + <_>1 2 17 2 -1. + <_>1 3 17 1 2. + 0 + -2.4374839849770069e-003 + -0.3531234860420227 + 0.0877031534910202 + <_> + + <_> + + + + <_>11 0 3 18 -1. + <_>12 0 1 18 3. + 0 + -0.0190700795501471 + -0.4066244065761566 + 0.0432731881737709 + <_> + + <_> + + + + <_>0 13 8 6 -1. + <_>0 15 8 2 3. + 0 + -0.0504542402923107 + -0.8119810223579407 + 0.0282891094684601 + <_> + + <_> + + + + <_>7 15 7 4 -1. + <_>7 17 7 2 2. + 0 + 1.6544000245630741e-003 + -0.1696404069662094 + 0.1219474002718926 + <_> + + <_> + + + + <_>0 6 6 14 -1. + <_>0 6 3 7 2. + <_>3 13 3 7 2. + 0 + -0.0467913113534451 + 0.4061444103717804 + -0.0611748583614826 + <_> + + <_> + + + + <_>12 11 8 6 -1. + <_>12 13 8 2 3. + 0 + -0.0559538491070271 + -0.8266291022300720 + 0.0277747493237257 + <_> + + <_> + + + + <_>2 16 12 4 -1. + <_>6 16 4 4 3. + 0 + 1.4469559537246823e-003 + -0.1495386958122253 + 0.1596699059009552 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + -0.0125290500000119 + -0.4250465035438538 + 0.0216580796986818 + <_> + + <_> + + + + <_>5 6 4 8 -1. + <_>5 10 4 4 2. + 0 + 1.1086500016972423e-003 + -0.3600699007511139 + 0.0644150972366333 + <_> + + <_> + + + + <_>3 11 16 4 -1. + <_>11 11 8 2 2. + <_>3 13 8 2 2. + 0 + 0.0393617786467075 + 8.2419048994779587e-003 + -0.7530307173728943 + <_> + + <_> + + + + <_>1 11 16 4 -1. + <_>1 11 8 2 2. + <_>9 13 8 2 2. + 0 + 0.0188239291310310 + 0.0448211207985878 + -0.5060411095619202 + <_> + + <_> + + + + <_>16 3 4 8 -1. + <_>16 3 2 8 2. + 0 + -0.0320830009877682 + 0.3143131136894226 + -0.0391818694770336 + <_> + + <_> + + + + <_>6 0 3 18 -1. + <_>7 0 1 18 3. + 0 + -0.0310819298028946 + -0.7690374255180359 + 0.0307429600507021 + <_> + + <_> + + + + <_>16 3 4 8 -1. + <_>16 3 2 8 2. + 0 + 0.0232182107865810 + -0.0577487498521805 + 0.2895534932613373 + <_> + + <_> + + + + <_>4 12 12 4 -1. + <_>8 12 4 4 3. + 0 + -1.1492100311443210e-003 + 0.1150140985846520 + -0.1931069046258926 + <_> + + <_> + + + + <_>4 0 16 3 -1. + <_>4 1 16 1 3. + 0 + -0.0165939405560493 + -0.4229854047298431 + 0.0437389798462391 + <_> + + <_> + + + + <_>0 3 4 8 -1. + <_>2 3 2 8 2. + 0 + -0.0101465703919530 + 0.2557984888553619 + -0.0919662415981293 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + -0.0130540197715163 + 0.1833952963352203 + -0.0401608310639858 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + 3.7463540211319923e-003 + -0.1258676946163178 + 0.2224701941013336 + <_> + + <_> + + + + <_>9 6 6 12 -1. + <_>9 6 3 12 2. + 0 + -0.0484635904431343 + -0.5815590023994446 + 0.0297133903950453 + <_> + + <_> + + + + <_>0 10 10 6 -1. + <_>0 12 10 2 3. + 0 + 6.4649381674826145e-003 + 0.0931691080331802 + -0.2904658019542694 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 0.0156078096479177 + 0.0473319701850414 + -0.4480555951595306 + <_> + + <_> + + + + <_>4 10 12 10 -1. + <_>4 15 12 5 2. + 0 + -5.8314641937613487e-003 + 0.0989417582750320 + -0.2205685973167419 + <_> + + <_> + + + + <_>10 4 4 16 -1. + <_>10 4 2 16 2. + 0 + 0.0736078023910522 + 0.0167804602533579 + -0.5495312213897705 + <_> + + <_> + + + + <_>6 4 4 16 -1. + <_>8 4 2 16 2. + 0 + -6.4223129302263260e-003 + -0.2964796125888825 + 0.0735399127006531 + <_> + + <_> + + + + <_>7 8 13 2 -1. + <_>7 9 13 1 2. + 0 + 2.2267029635258950e-005 + -0.3421182036399841 + 0.0418582707643509 + <_> + + <_> + + + + <_>0 8 13 2 -1. + <_>0 9 13 1 2. + 0 + 0.0372736304998398 + 0.0274580791592598 + -0.7855197191238403 + <_> + + <_> + + + + <_>8 0 9 5 -1. + <_>11 0 3 5 3. + 0 + 4.2738770134747028e-003 + -0.0825145170092583 + 0.1040488034486771 + <_> + + <_> + + + + <_>3 0 9 5 -1. + <_>6 0 3 5 3. + 0 + 1.1906049912795424e-003 + -0.1630043983459473 + 0.1530064940452576 + <_> + + <_> + + + + <_>14 6 6 10 -1. + <_>14 6 3 10 2. + 0 + 8.7800435721874237e-003 + -0.0928859487175941 + 0.1314751058816910 + <_> + + <_> + + + + <_>1 5 17 6 -1. + <_>1 7 17 2 3. + 0 + 2.4151368997991085e-003 + 0.0475985594093800 + -0.4482966959476471 + <_> + + <_> + + + + <_>14 6 6 10 -1. + <_>14 6 3 10 2. + 0 + -0.0274283401668072 + 0.1981106996536255 + -0.0559796988964081 + <_> + + <_> + + + + <_>0 17 14 3 -1. + <_>0 18 14 1 3. + 0 + -1.4117059763520956e-003 + -0.2113897055387497 + 0.1040974035859108 + <_> + + <_> + + + + <_>14 6 6 10 -1. + <_>14 6 3 10 2. + 0 + -0.2021020054817200 + -0.7712023258209229 + 7.0582218468189240e-003 + <_> + + <_> + + + + <_>0 6 6 10 -1. + <_>3 6 3 10 2. + 0 + -0.0414513200521469 + 0.2829514145851135 + -0.0713235288858414 + <_> + + <_> + + + + <_>10 9 6 5 -1. + <_>10 9 3 5 2. + 0 + 4.8561887815594673e-003 + 0.0866938978433609 + -0.2354182004928589 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + -4.4662880100077018e-005 + 0.1325713992118835 + -0.2016859948635101 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0376715809106827 + -0.0749522894620895 + 0.3384338021278381 + <_> + + <_> + + + + <_>2 7 6 13 -1. + <_>4 7 2 13 3. + 0 + 0.0743432566523552 + 0.0329050309956074 + -0.7353677749633789 + <_> + + <_> + + + + <_>13 3 3 15 -1. + <_>14 3 1 15 3. + 0 + -0.0101864198222756 + -0.3127708137035370 + 0.0441639907658100 + <_> + + <_> + + + + <_>4 3 3 15 -1. + <_>5 3 1 15 3. + 0 + -0.0245068799704313 + -0.6134651899337769 + 0.0296921394765377 + <_> + + <_> + + + + <_>3 2 15 5 -1. + <_>8 2 5 5 3. + 0 + -0.0382381491363049 + 0.3558354079723358 + -0.0483886189758778 + <_> + + <_> + + + + <_>5 4 9 14 -1. + <_>5 11 9 7 2. + 0 + 0.1798366010189056 + 0.0195015892386436 + -0.9848588109016419 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 8.4765878273174167e-004 + -0.2796033024787903 + 0.0783230364322662 + <_> + + <_> + + + + <_>4 6 10 12 -1. + <_>4 6 5 6 2. + <_>9 12 5 6 2. + 0 + 3.7178809288889170e-003 + 0.0725254416465759 + -0.2406740933656693 + <_> + + <_> + + + + <_>5 5 12 10 -1. + <_>11 5 6 5 2. + <_>5 10 6 5 2. + 0 + -0.0909323170781136 + -0.7153915166854858 + 8.8080493733286858e-003 + <_> + + <_> + + + + <_>3 5 12 10 -1. + <_>3 5 6 5 2. + <_>9 10 6 5 2. + 0 + -0.0800878107547760 + -0.6783071756362915 + 0.0249043200165033 + <_> + + <_> + + + + <_>12 0 8 12 -1. + <_>16 0 4 6 2. + <_>12 6 4 6 2. + 0 + 7.6924148015677929e-003 + -0.0509674996137619 + 0.1195252984762192 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 0.0414852313697338 + -0.0494939200580120 + 0.3538686037063599 + <_> + + <_> + + + + <_>0 2 20 4 -1. + <_>10 2 10 2 2. + <_>0 4 10 2 2. + 0 + 0.0340516082942486 + 0.0422009788453579 + -0.5011072158813477 + <_> + + <_> + + + + <_>6 6 6 8 -1. + <_>8 6 2 8 3. + 0 + -0.0262358300387859 + 0.4493483901023865 + -0.0418512001633644 + <_> + + <_> + + + + <_>10 0 3 20 -1. + <_>11 0 1 20 3. + 0 + -0.0513739585876465 + -0.9594280123710632 + 0.0171927902847528 + <_> + + <_> + + + + <_>7 0 3 20 -1. + <_>8 0 1 20 3. + 0 + -0.0267427396029234 + -0.6563224196434021 + 0.0217780806124210 + <_> + + <_> + + + + <_>10 0 2 13 -1. + <_>10 0 1 13 2. + 0 + -1.3730529462918639e-003 + -0.1863850951194763 + 0.0411393493413925 + <_> + + <_> + + + + <_>8 0 2 13 -1. + <_>9 0 1 13 2. + 0 + 1.0963230160996318e-003 + -0.1421937048435211 + 0.1383201926946640 + <_> + + <_> + + + + <_>0 15 20 4 -1. + <_>10 15 10 2 2. + <_>0 17 10 2 2. + 0 + -4.5011811889708042e-003 + -0.1846860051155090 + 0.0910241901874542 + <_> + + <_> + + + + <_>2 3 3 13 -1. + <_>3 3 1 13 3. + 0 + 4.4253250234760344e-004 + -0.1273694038391113 + 0.1365536004304886 + <_> + + <_> + + + + <_>7 2 7 6 -1. + <_>7 4 7 2 3. + 0 + 0.0305007100105286 + -0.0581461489200592 + 0.2418991029262543 + <_> + + <_> + + + + <_>0 2 15 14 -1. + <_>0 9 15 7 2. + 0 + -0.1169191971421242 + -0.5546640753746033 + 0.0302490293979645 + <_> + + <_> + + + + <_>12 10 4 8 -1. + <_>12 14 4 4 2. + 0 + -9.5684931147843599e-004 + 0.0518998689949512 + -0.1415279954671860 + <_> + + <_> + + + + <_>4 14 12 6 -1. + <_>4 16 12 2 3. + 0 + 1.3096149777993560e-003 + -0.1424822956323624 + 0.1222778037190437 + <_> + + <_> + + + + <_>1 13 18 4 -1. + <_>10 13 9 2 2. + <_>1 15 9 2 2. + 0 + 0.0349888801574707 + 0.0276531297713518 + -0.6173881292343140 + -1.8377989530563354 + 12 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.1648942977190018 + -0.2565720975399017 + 0.4127771854400635 + <_> + + <_> + + + + <_>5 7 11 4 -1. + <_>5 9 11 2 2. + 0 + 0.0205848608165979 + -0.5244221091270447 + 0.1491083055734634 + <_> + + <_> + + + + <_>9 4 2 14 -1. + <_>9 11 2 7 2. + 0 + 8.8764587417244911e-004 + 0.1333470046520233 + -0.5225952267646790 + <_> + + <_> + + + + <_>11 6 6 14 -1. + <_>14 6 3 7 2. + <_>11 13 3 7 2. + 0 + -1.3320889556780457e-003 + -0.3656874895095825 + 0.2048227936029434 + <_> + + <_> + + + + <_>0 2 6 11 -1. + <_>3 2 3 11 2. + 0 + 0.0779161974787712 + -0.2155715972185135 + 0.3106957972049713 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 2.4321360979229212e-003 + -0.4474255144596100 + 0.1063833981752396 + <_> + + <_> + + + + <_>3 7 6 12 -1. + <_>3 7 3 6 2. + <_>6 13 3 6 2. + 0 + -5.8699389919638634e-003 + -0.3880077898502350 + 0.1441058963537216 + <_> + + <_> + + + + <_>7 6 10 3 -1. + <_>7 6 5 3 2. + 0 + 0.0697543025016785 + 0.0132249100133777 + -0.8009663224220276 + <_> + + <_> + + + + <_>3 6 10 3 -1. + <_>8 6 5 3 2. + 0 + 3.8338101003319025e-003 + -0.4313930869102478 + 0.1425399035215378 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + -0.0158290304243565 + 0.3095479905605316 + -0.1223272010684013 + <_> + + <_> + + + + <_>3 0 14 9 -1. + <_>3 3 14 3 3. + 0 + 0.0661982968449593 + -0.2055824995040894 + 0.1953122019767761 + <_> + + <_> + + + + <_>3 1 14 4 -1. + <_>10 1 7 2 2. + <_>3 3 7 2 2. + 0 + 0.0176395196467638 + 0.1077058985829353 + -0.4348832070827484 + <_> + + <_> + + + + <_>1 14 7 6 -1. + <_>1 16 7 2 3. + 0 + -0.0110826296731830 + -0.3614957034587860 + 0.1132721006870270 + <_> + + <_> + + + + <_>6 9 10 10 -1. + <_>11 9 5 5 2. + <_>6 14 5 5 2. + 0 + -0.0365152992308140 + -0.4391221106052399 + 0.0552794486284256 + <_> + + <_> + + + + <_>4 9 10 10 -1. + <_>4 9 5 5 2. + <_>9 14 5 5 2. + 0 + -0.0333732999861240 + -0.5686920881271362 + 0.0840439572930336 + <_> + + <_> + + + + <_>5 6 10 6 -1. + <_>5 9 10 3 2. + 0 + 0.0813955590128899 + -0.1423501074314117 + 0.2874828875064850 + <_> + + <_> + + + + <_>1 1 7 4 -1. + <_>1 3 7 2 2. + 0 + -4.3892292305827141e-003 + -0.3485983014106751 + 0.1165034025907517 + <_> + + <_> + + + + <_>3 0 14 3 -1. + <_>3 1 14 1 3. + 0 + -6.3558202236890793e-003 + -0.3382304906845093 + 0.1100549027323723 + <_> + + <_> + + + + <_>6 7 7 10 -1. + <_>6 12 7 5 2. + 0 + 0.0209124591201544 + 0.0781978294253349 + -0.4633755087852478 + <_> + + <_> + + + + <_>10 1 10 19 -1. + <_>10 1 5 19 2. + 0 + 0.1160036027431488 + -0.2052866965532303 + 0.1592338979244232 + <_> + + <_> + + + + <_>8 6 3 14 -1. + <_>9 6 1 14 3. + 0 + 0.0163166001439095 + -0.1063399985432625 + 0.3345352113246918 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + -0.2848814129829407 + 0.5163800120353699 + -3.9357859641313553e-003 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.0241554304957390 + -0.7167022824287415 + 0.0500315502285957 + <_> + + <_> + + + + <_>12 0 2 13 -1. + <_>12 0 1 13 2. + 0 + 0.0114132603630424 + 0.0592360310256481 + -0.3814190030097961 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -0.0243041999638081 + 0.4347585141658783 + -0.0865741595625877 + <_> + + <_> + + + + <_>5 5 12 8 -1. + <_>5 9 12 4 2. + 0 + -1.5267609851434827e-003 + -0.6430760025978088 + 0.0516427792608738 + <_> + + <_> + + + + <_>1 14 7 4 -1. + <_>1 16 7 2 2. + 0 + 0.0100733498111367 + 0.0757430270314217 + -0.4290296137332916 + <_> + + <_> + + + + <_>7 12 11 8 -1. + <_>7 16 11 4 2. + 0 + -0.0812248811125755 + -0.4082733094692230 + 0.0554446317255497 + <_> + + <_> + + + + <_>6 0 2 13 -1. + <_>7 0 1 13 2. + 0 + 0.0151490103453398 + 0.0530848614871502 + -0.5449541211128235 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -0.0534907393157482 + -0.4742214977741242 + 0.0394207797944546 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0408842712640762 + -0.8855779767036438 + 0.0320427082479000 + <_> + + <_> + + + + <_>10 3 4 7 -1. + <_>10 3 2 7 2. + 0 + -4.2768509592860937e-004 + -0.3055447041988373 + 0.0514328815042973 + <_> + + <_> + + + + <_>2 15 16 4 -1. + <_>2 15 8 2 2. + <_>10 17 8 2 2. + 0 + 0.0184412691742182 + 0.0806880891323090 + -0.3588404953479767 + <_> + + <_> + + + + <_>1 1 18 6 -1. + <_>10 1 9 3 2. + <_>1 4 9 3 2. + 0 + -0.0476307906210423 + -0.4613190889358521 + 0.0605927705764771 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 8.2442145794630051e-003 + 0.0897936075925827 + -0.3760578036308289 + <_> + + <_> + + + + <_>3 0 14 6 -1. + <_>3 3 14 3 2. + 0 + 0.1000375971198082 + -0.0837603807449341 + 0.3922181129455566 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -0.0284205507487059 + -0.6948354840278626 + 0.0491004101932049 + <_> + + <_> + + + + <_>10 3 4 7 -1. + <_>10 3 2 7 2. + 0 + 0.0564859993755817 + 4.4795661233365536e-003 + -0.7537339925765991 + <_> + + <_> + + + + <_>6 3 4 7 -1. + <_>8 3 2 7 2. + 0 + 1.0085420217365026e-003 + -0.3788126111030579 + 0.0783769935369492 + <_> + + <_> + + + + <_>4 2 13 12 -1. + <_>4 6 13 4 3. + 0 + -1.2643639929592609e-003 + 0.0754860267043114 + -0.3101564049720764 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 0.0141463400796056 + -0.0818050205707550 + 0.3731384873390198 + <_> + + <_> + + + + <_>15 4 5 6 -1. + <_>15 7 5 3 2. + 0 + -3.1549399718642235e-003 + -0.2124166041612625 + 0.0891297906637192 + <_> + + <_> + + + + <_>3 10 13 3 -1. + <_>3 11 13 1 3. + 0 + 1.4796239556744695e-003 + -0.2147904038429260 + 0.1354327946901321 + <_> + + <_> + + + + <_>5 10 10 6 -1. + <_>10 10 5 3 2. + <_>5 13 5 3 2. + 0 + -0.0313436090946198 + -0.5811458826065064 + 0.0485763289034367 + <_> + + <_> + + + + <_>3 5 12 12 -1. + <_>3 5 6 6 2. + <_>9 11 6 6 2. + 0 + -0.0761497616767883 + -0.5377451777458191 + 0.0483390688896179 + <_> + + <_> + + + + <_>15 4 5 6 -1. + <_>15 7 5 3 2. + 0 + -0.0616689398884773 + -0.8452566266059876 + 1.7448999278713018e-004 + <_> + + <_> + + + + <_>1 3 4 8 -1. + <_>1 7 4 4 2. + 0 + -0.0270849205553532 + -0.5065913796424866 + 0.0477094203233719 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + -0.0242409296333790 + -0.3853445053100586 + 0.0503007806837559 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0419793985784054 + -0.1037800982594490 + 0.2623626887798309 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + 0.0237176902592182 + 0.0568972714245319 + -0.2895944118499756 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -0.0186697896569967 + -0.3992452919483185 + 0.0734422132372856 + <_> + + <_> + + + + <_>2 1 18 3 -1. + <_>2 2 18 1 3. + 0 + -0.0149870002642274 + -0.3229691982269287 + 0.0416767485439777 + <_> + + <_> + + + + <_>4 11 6 6 -1. + <_>7 11 3 6 2. + 0 + 8.7209865450859070e-003 + 0.1352138966321945 + -0.1822458058595657 + <_> + + <_> + + + + <_>4 6 13 3 -1. + <_>4 7 13 1 3. + 0 + -0.0122392196208239 + 0.1554080992937088 + -0.1520806998014450 + <_> + + <_> + + + + <_>1 12 18 4 -1. + <_>1 14 18 2 2. + 0 + -0.0487449802458286 + -0.3660675883293152 + 0.0631525665521622 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + -3.8249569479376078e-003 + 0.0834729894995689 + -0.2418632954359055 + <_> + + <_> + + + + <_>0 1 5 14 -1. + <_>0 8 5 7 2. + 0 + 0.1558165997266769 + 0.0319539606571198 + -0.6781318187713623 + <_> + + <_> + + + + <_>2 12 18 6 -1. + <_>11 12 9 3 2. + <_>2 15 9 3 2. + 0 + 0.0682415813207626 + 0.0154784396290779 + -0.4202975034713745 + <_> + + <_> + + + + <_>5 2 6 13 -1. + <_>7 2 2 13 3. + 0 + -0.0959746465086937 + -0.9564784169197083 + 0.0214445907622576 + <_> + + <_> + + + + <_>13 8 7 6 -1. + <_>13 10 7 2 3. + 0 + -0.0126184299588203 + -0.5054485797882080 + 0.0308752600103617 + <_> + + <_> + + + + <_>2 5 16 10 -1. + <_>2 5 8 5 2. + <_>10 10 8 5 2. + 0 + 0.0727276429533958 + 0.0472153499722481 + -0.4507515132427216 + <_> + + <_> + + + + <_>14 4 6 7 -1. + <_>16 4 2 7 3. + 0 + 0.0299232192337513 + -0.0814443528652191 + 0.3165622949600220 + <_> + + <_> + + + + <_>4 1 6 7 -1. + <_>6 1 2 7 3. + 0 + 0.0191380903124809 + 0.0681874006986618 + -0.3487679064273834 + <_> + + <_> + + + + <_>13 10 7 4 -1. + <_>13 12 7 2 2. + 0 + -0.0343147218227386 + -0.5522037148475647 + 0.0373250097036362 + <_> + + <_> + + + + <_>0 10 7 4 -1. + <_>0 12 7 2 2. + 0 + 5.2559198811650276e-003 + 0.0647869780659676 + -0.3636350929737091 + <_> + + <_> + + + + <_>6 14 14 3 -1. + <_>6 15 14 1 3. + 0 + 0.0140923997387290 + -0.0487043596804142 + 0.2767783105373383 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -9.0101473033428192e-003 + 0.2345259934663773 + -0.1314035058021545 + <_> + + <_> + + + + <_>2 14 17 6 -1. + <_>2 16 17 2 3. + 0 + 0.0967202186584473 + 0.0266613606363535 + -0.7742279767990112 + <_> + + <_> + + + + <_>7 7 5 12 -1. + <_>7 11 5 4 3. + 0 + 0.0853650718927383 + 0.0235299095511436 + -0.7071086168289185 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>8 6 2 7 2. + 0 + 0.0243844296783209 + -0.0626484826207161 + 0.3725188076496124 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>7 10 3 5 2. + <_>10 15 3 5 2. + 0 + 0.0363807789981365 + 0.0433587394654751 + -0.6022241711616516 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0537802688777447 + -0.3344100117683411 + 0.0357005782425404 + <_> + + <_> + + + + <_>0 13 15 3 -1. + <_>0 14 15 1 3. + 0 + -0.0147871002554893 + 0.2913616895675659 + -0.0740752965211868 + <_> + + <_> + + + + <_>13 12 5 8 -1. + <_>13 16 5 4 2. + 0 + 1.2491010129451752e-003 + 0.0416542403399944 + -0.0937588363885880 + <_> + + <_> + + + + <_>0 12 18 6 -1. + <_>0 12 9 3 2. + <_>9 15 9 3 2. + 0 + -0.0275729093700647 + -0.3139821887016296 + 0.0724119991064072 + <_> + + <_> + + + + <_>12 10 6 10 -1. + <_>15 10 3 5 2. + <_>12 15 3 5 2. + 0 + -0.0788664519786835 + 0.6065583825111389 + -0.0238380506634712 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>2 10 3 5 2. + <_>5 15 3 5 2. + 0 + -0.0693393126130104 + 0.7113773226737976 + -0.0298142693936825 + <_> + + <_> + + + + <_>4 7 15 3 -1. + <_>9 7 5 3 3. + 0 + 0.0943725928664207 + 0.0335794389247894 + -0.5977404117584229 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -0.0260486491024494 + -0.4057491123676300 + 0.0556035302579403 + <_> + + <_> + + + + <_>5 7 15 3 -1. + <_>10 7 5 3 3. + 0 + -0.0736302062869072 + -0.6078035235404968 + 0.0252516493201256 + <_> + + <_> + + + + <_>2 5 16 3 -1. + <_>2 6 16 1 3. + 0 + -0.0186104495078325 + 0.2401355952024460 + -0.0953897833824158 + <_> + + <_> + + + + <_>8 8 12 12 -1. + <_>8 8 6 12 2. + 0 + 0.1332962960004807 + -0.0697423815727234 + 0.1332300007343292 + -1.9031070470809937 + 13 + -1 + <_> + + + <_> + + <_> + + + + <_>6 3 7 6 -1. + <_>6 6 7 3 2. + 0 + -4.1724857874214649e-003 + 0.1931089013814926 + -0.4963074028491974 + <_> + + <_> + + + + <_>9 5 6 5 -1. + <_>9 5 3 5 2. + 0 + 9.6606701845303178e-004 + -0.5434030294418335 + 0.1243411973118782 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 1.0261629940941930e-003 + -0.4632157981395721 + 0.1116029024124146 + <_> + + <_> + + + + <_>9 8 5 12 -1. + <_>9 12 5 4 3. + 0 + 3.6368470173329115e-003 + 0.0829189494252205 + -0.3666251003742218 + <_> + + <_> + + + + <_>6 5 8 8 -1. + <_>6 9 8 4 2. + 0 + -2.8364539612084627e-003 + -0.6736599206924439 + 0.0655460134148598 + <_> + + <_> + + + + <_>11 0 6 12 -1. + <_>14 0 3 6 2. + <_>11 6 3 6 2. + 0 + -1.0111520532518625e-003 + 0.1405518949031830 + -0.3527033030986786 + <_> + + <_> + + + + <_>3 0 6 12 -1. + <_>3 0 3 6 2. + <_>6 6 3 6 2. + 0 + -2.5434889830648899e-003 + 0.1419118046760559 + -0.2835082113742828 + <_> + + <_> + + + + <_>10 10 4 8 -1. + <_>10 14 4 4 2. + 0 + 3.3014779910445213e-003 + 0.0465538911521435 + -0.4853729009628296 + <_> + + <_> + + + + <_>5 9 10 8 -1. + <_>5 9 5 4 2. + <_>10 13 5 4 2. + 0 + -0.0118029303848743 + -0.3795883059501648 + 0.0920719131827354 + <_> + + <_> + + + + <_>4 11 13 3 -1. + <_>4 12 13 1 3. + 0 + -1.3293370138853788e-003 + 0.1731142997741699 + -0.1689043939113617 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1495845019817352 + 0.0376266017556190 + -0.8001688122749329 + <_> + + <_> + + + + <_>14 0 4 7 -1. + <_>14 0 2 7 2. + 0 + 1.6352189704775810e-003 + -0.2085812985897064 + 0.1598542928695679 + <_> + + <_> + + + + <_>2 0 4 7 -1. + <_>4 0 2 7 2. + 0 + 1.5483440365642309e-003 + -0.1757826954126358 + 0.1756010055541992 + <_> + + <_> + + + + <_>6 5 14 6 -1. + <_>13 5 7 3 2. + <_>6 8 7 3 2. + 0 + -0.0356742590665817 + -0.4605753123760223 + 0.0439837910234928 + <_> + + <_> + + + + <_>0 6 16 6 -1. + <_>0 6 8 3 2. + <_>8 9 8 3 2. + 0 + -0.0145586999133229 + -0.3358741104602814 + 0.0839654803276062 + <_> + + <_> + + + + <_>12 6 5 9 -1. + <_>12 9 5 3 3. + 0 + 5.2891410887241364e-003 + -0.3563517928123474 + 0.0941019728779793 + <_> + + <_> + + + + <_>1 6 9 8 -1. + <_>1 10 9 4 2. + 0 + -9.8066125065088272e-004 + -0.4430184066295624 + 0.0643682107329369 + <_> + + <_> + + + + <_>13 10 7 6 -1. + <_>13 12 7 2 3. + 0 + -0.0407049991190434 + -0.5970032215118408 + 0.0178467705845833 + <_> + + <_> + + + + <_>0 10 7 6 -1. + <_>0 12 7 2 3. + 0 + 0.0296820402145386 + 0.0381270200014114 + -0.6679514050483704 + <_> + + <_> + + + + <_>9 5 2 14 -1. + <_>9 12 2 7 2. + 0 + -1.7841320368461311e-004 + 0.0741185769438744 + -0.3212124109268189 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 1.0050840210169554e-003 + -0.2064224928617477 + 0.1219410970807076 + <_> + + <_> + + + + <_>1 2 19 2 -1. + <_>1 3 19 1 2. + 0 + -1.6711819916963577e-003 + -0.2658641934394836 + 0.0718826875090599 + <_> + + <_> + + + + <_>0 0 4 13 -1. + <_>2 0 2 13 2. + 0 + -0.0699553191661835 + 0.5009706020355225 + -0.0521725490689278 + <_> + + <_> + + + + <_>14 1 6 9 -1. + <_>16 1 2 9 3. + 0 + 8.3406828343868256e-003 + -0.0695461109280586 + 0.1694944053888321 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>2 1 2 9 3. + 0 + 0.0154831595718861 + -0.0958656221628189 + 0.2873673141002655 + <_> + + <_> + + + + <_>0 11 20 9 -1. + <_>0 14 20 3 3. + 0 + -0.0426219888031483 + -0.2516076862812042 + 0.1138179004192352 + <_> + + <_> + + + + <_>0 11 8 4 -1. + <_>0 13 8 2 2. + 0 + 3.6459038965404034e-003 + 0.0701384693384171 + -0.4037627875804901 + <_> + + <_> + + + + <_>9 3 6 10 -1. + <_>11 3 2 10 3. + 0 + -1.8889949424192309e-003 + 0.1469555050134659 + -0.1787984967231751 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -3.4749018959701061e-003 + -0.2498586028814316 + 0.1034967973828316 + <_> + + <_> + + + + <_>10 9 6 5 -1. + <_>10 9 3 5 2. + 0 + -0.0377922095358372 + -0.6575605869293213 + 0.0230075996369123 + <_> + + <_> + + + + <_>5 9 9 5 -1. + <_>8 9 3 5 3. + 0 + -4.0167139377444983e-004 + 0.1498796045780182 + -0.1452760994434357 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + 0.0348909907042980 + -0.0452078282833099 + 0.5129585266113281 + <_> + + <_> + + + + <_>5 3 6 9 -1. + <_>7 3 2 9 3. + 0 + -9.5964537467807531e-004 + 0.1468829065561295 + -0.1724454015493393 + <_> + + <_> + + + + <_>1 0 18 8 -1. + <_>10 0 9 4 2. + <_>1 4 9 4 2. + 0 + -0.0964613333344460 + -0.7181431055068970 + 0.0325879193842411 + <_> + + <_> + + + + <_>3 18 14 2 -1. + <_>3 19 14 1 2. + 0 + -1.1924919672310352e-003 + 0.1380531042814255 + -0.1416230946779251 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -0.0164200700819492 + -0.4195474088191986 + 0.0430406890809536 + <_> + + <_> + + + + <_>0 4 6 16 -1. + <_>0 4 3 8 2. + <_>3 12 3 8 2. + 0 + -0.0611122697591782 + 0.3776139020919800 + -0.0562647692859173 + <_> + + <_> + + + + <_>14 6 6 13 -1. + <_>14 6 3 13 2. + 0 + -0.0316821709275246 + 0.2103880941867828 + -0.0544750094413757 + <_> + + <_> + + + + <_>6 7 3 12 -1. + <_>6 13 3 6 2. + 0 + -7.4058552272617817e-003 + -0.1870995014905930 + 0.1087614968419075 + <_> + + <_> + + + + <_>11 11 5 6 -1. + <_>11 14 5 3 2. + 0 + -2.8892440604977310e-004 + 0.0697343721985817 + -0.2451675981283188 + <_> + + <_> + + + + <_>1 8 15 4 -1. + <_>6 8 5 4 3. + 0 + -7.9921782016754150e-003 + -0.2406989932060242 + 0.0880122706294060 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -6.4670671708881855e-003 + 0.2081995010375977 + -0.0690622106194496 + <_> + + <_> + + + + <_>6 4 6 7 -1. + <_>8 4 2 7 3. + 0 + -5.3345328196883202e-003 + 0.3246938884258270 + -0.0740588083863258 + <_> + + <_> + + + + <_>9 0 6 10 -1. + <_>12 0 3 5 2. + <_>9 5 3 5 2. + 0 + -6.7914440296590328e-003 + -0.1701446026563644 + 0.0373784489929676 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.1633761972188950 + 0.0196821000427008 + -0.9165204167366028 + <_> + + <_> + + + + <_>15 10 4 10 -1. + <_>15 10 2 10 2. + 0 + 0.1175965964794159 + 8.8446342851966619e-004 + -0.7805082798004150 + <_> + + <_> + + + + <_>1 10 4 10 -1. + <_>3 10 2 10 2. + 0 + -0.1168228015303612 + -0.9600989818572998 + 0.0170702803879976 + <_> + + <_> + + + + <_>5 0 10 16 -1. + <_>10 0 5 8 2. + <_>5 8 5 8 2. + 0 + 0.0468992516398430 + 0.0478918999433517 + -0.3204477131366730 + <_> + + <_> + + + + <_>3 6 13 3 -1. + <_>3 7 13 1 3. + 0 + -4.0058898739516735e-003 + 0.1141439005732536 + -0.1571146994829178 + <_> + + <_> + + + + <_>8 6 5 9 -1. + <_>8 9 5 3 3. + 0 + -4.4986438297200948e-005 + 0.2900809943675995 + -0.0424133315682411 + <_> + + <_> + + + + <_>4 6 6 12 -1. + <_>4 10 6 4 3. + 0 + 2.1421080455183983e-003 + -0.3313758075237274 + 0.0539436899125576 + <_> + + <_> + + + + <_>8 13 9 6 -1. + <_>8 16 9 3 2. + 0 + -0.0714087635278702 + -0.8851947188377380 + 9.3488330021500587e-003 + <_> + + <_> + + + + <_>0 5 12 6 -1. + <_>0 7 12 2 3. + 0 + -0.1373367011547089 + -0.8324189782142639 + 0.0178003292530775 + <_> + + <_> + + + + <_>4 8 13 3 -1. + <_>4 9 13 1 3. + 0 + 6.1765720602124929e-004 + -0.1941922008991242 + 0.0680346190929413 + <_> + + <_> + + + + <_>6 6 4 12 -1. + <_>6 12 4 6 2. + 0 + -0.0671707987785339 + -0.5724321007728577 + 0.0303336307406425 + <_> + + <_> + + + + <_>4 15 13 3 -1. + <_>4 16 13 1 3. + 0 + 2.4611391127109528e-003 + -0.1057017967104912 + 0.1880190074443817 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + 5.0573959015309811e-003 + -0.0659217536449432 + 0.2986895143985748 + <_> + + <_> + + + + <_>11 1 4 14 -1. + <_>11 1 2 14 2. + 0 + 0.0142137799412012 + 0.0637678802013397 + -0.2121724933385849 + <_> + + <_> + + + + <_>3 6 12 4 -1. + <_>7 6 4 4 3. + 0 + -2.0629619248211384e-003 + -0.2671405076980591 + 0.0768175721168518 + <_> + + <_> + + + + <_>8 0 4 7 -1. + <_>8 0 2 7 2. + 0 + 0.0337877795100212 + 0.0217741504311562 + -0.7493813037872315 + <_> + + <_> + + + + <_>0 0 4 8 -1. + <_>2 0 2 8 2. + 0 + -0.0273718703538179 + 0.3200806081295013 + -0.0596225112676620 + <_> + + <_> + + + + <_>2 11 16 9 -1. + <_>2 14 16 3 3. + 0 + 0.0283103492110968 + 0.0441506095230579 + -0.4427869915962219 + <_> + + <_> + + + + <_>0 4 6 7 -1. + <_>2 4 2 7 3. + 0 + 3.7205279804766178e-003 + -0.1313648968935013 + 0.1544770002365112 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + 2.3320990148931742e-003 + -0.1084922999143601 + 0.2268289029598236 + <_> + + <_> + + + + <_>0 10 16 4 -1. + <_>0 10 8 2 2. + <_>8 12 8 2 2. + 0 + 7.6775359921157360e-003 + 0.0495203882455826 + -0.3885476887226105 + <_> + + <_> + + + + <_>3 1 14 2 -1. + <_>3 2 14 1 2. + 0 + -2.9863099916838109e-004 + -0.1963256001472473 + 0.0834489315748215 + <_> + + <_> + + + + <_>4 10 5 9 -1. + <_>4 13 5 3 3. + 0 + 6.1346050351858139e-003 + 0.0514332503080368 + -0.3083161115646362 + <_> + + <_> + + + + <_>2 14 16 4 -1. + <_>10 14 8 2 2. + <_>2 16 8 2 2. + 0 + 0.0310907792299986 + 0.0241807997226715 + -0.6018446087837219 + <_> + + <_> + + + + <_>0 0 19 8 -1. + <_>0 4 19 4 2. + 0 + 0.2932040095329285 + 0.0118110300973058 + -0.9625393152236939 + <_> + + <_> + + + + <_>10 10 6 5 -1. + <_>10 10 3 5 2. + 0 + -6.6321907797828317e-004 + 0.1024527028203011 + -0.1420076042413712 + <_> + + <_> + + + + <_>1 1 18 15 -1. + <_>7 1 6 15 3. + 0 + 0.0447363592684269 + -0.1123879998922348 + 0.1739203929901123 + <_> + + <_> + + + + <_>10 10 6 5 -1. + <_>10 10 3 5 2. + 0 + -0.0151533903554082 + -0.1610036045312882 + 0.0311169493943453 + <_> + + <_> + + + + <_>4 7 4 8 -1. + <_>6 7 2 8 2. + 0 + -1.1029309825971723e-003 + 0.1212851032614708 + -0.1618229001760483 + <_> + + <_> + + + + <_>17 3 3 14 -1. + <_>18 3 1 14 3. + 0 + -2.8973959852010012e-003 + 0.1082762032747269 + -0.0536213107407093 + <_> + + <_> + + + + <_>4 6 12 12 -1. + <_>4 6 6 6 2. + <_>10 12 6 6 2. + 0 + -9.5785204321146011e-003 + -0.1680832058191299 + 0.0850536227226257 + <_> + + <_> + + + + <_>12 6 8 14 -1. + <_>16 6 4 7 2. + <_>12 13 4 7 2. + 0 + 0.0990923866629601 + -0.0154698798432946 + 0.4113850891590118 + <_> + + <_> + + + + <_>0 6 8 14 -1. + <_>0 6 4 7 2. + <_>4 13 4 7 2. + 0 + 0.0372297801077366 + -0.0528659708797932 + 0.3180429935455322 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0247160494327545 + -0.4033941030502319 + 0.0299648400396109 + <_> + + <_> + + + + <_>2 4 6 16 -1. + <_>2 4 3 8 2. + <_>5 12 3 8 2. + 0 + -0.0989653021097183 + 0.5851048231124878 + -0.0269241705536842 + <_> + + <_> + + + + <_>14 11 5 9 -1. + <_>14 14 5 3 3. + 0 + -9.6337851136922836e-003 + -0.1746747046709061 + 0.0751268714666367 + <_> + + <_> + + + + <_>3 3 14 3 -1. + <_>3 4 14 1 3. + 0 + 1.0483879595994949e-003 + -0.1372846961021423 + 0.1068458035588265 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0425238497555256 + 0.0165786296129227 + -0.5633273720741272 + <_> + + <_> + + + + <_>5 1 6 16 -1. + <_>5 1 3 8 2. + <_>8 9 3 8 2. + 0 + -3.0866260640323162e-003 + 0.0752648934721947 + -0.1947654038667679 + <_> + + <_> + + + + <_>7 7 6 10 -1. + <_>9 7 2 10 3. + 0 + 0.0286433994770050 + -0.0675781369209290 + 0.2576622068881989 + <_> + + <_> + + + + <_>5 9 4 11 -1. + <_>7 9 2 11 2. + 0 + -0.0106273395940661 + -0.2238461971282959 + 0.0721724480390549 + <_> + + <_> + + + + <_>10 9 6 6 -1. + <_>10 9 3 6 2. + 0 + 4.6080970205366611e-003 + 0.0508760809898376 + -0.1407632976770401 + <_> + + <_> + + + + <_>0 3 3 14 -1. + <_>1 3 1 14 3. + 0 + 2.9914160259068012e-003 + -0.0973379835486412 + 0.1766595989465714 + <_> + + <_> + + + + <_>10 9 6 6 -1. + <_>10 9 3 6 2. + 0 + -7.7902628108859062e-003 + -0.0980082377791405 + 0.0374030694365501 + <_> + + <_> + + + + <_>5 10 4 7 -1. + <_>7 10 2 7 2. + 0 + -6.1339238891378045e-004 + 0.0990360230207443 + -0.1626594960689545 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + -0.0102343196049333 + 0.2365497946739197 + -0.0378171317279339 + <_> + + <_> + + + + <_>2 5 16 8 -1. + <_>2 9 16 4 2. + 0 + -0.0118674095720053 + -0.8503506779670715 + 0.0190632995218039 + <_> + + <_> + + + + <_>6 2 12 10 -1. + <_>6 7 12 5 2. + 0 + 4.1437768377363682e-003 + 0.0878783464431763 + -0.0944046303629875 + <_> + + <_> + + + + <_>0 7 7 6 -1. + <_>0 9 7 2 3. + 0 + -5.1355729810893536e-003 + -0.3569979965686798 + 0.0415464900434017 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + -1.5296200290322304e-003 + 0.0776945725083351 + -0.0431865788996220 + <_> + + <_> + + + + <_>0 7 2 13 -1. + <_>1 7 1 13 2. + 0 + -2.7581020258367062e-003 + 0.1906588971614838 + -0.0806799009442329 + <_> + + <_> + + + + <_>12 3 8 12 -1. + <_>12 3 4 12 2. + 0 + 0.2837516963481903 + 6.2291761860251427e-003 + -0.8857815265655518 + <_> + + <_> + + + + <_>0 3 8 12 -1. + <_>4 3 4 12 2. + 0 + -0.2461249977350235 + -0.7054811120033264 + 0.0217989608645439 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + -3.9965631440281868e-003 + -0.1971096992492676 + 0.0803006067872047 + <_> + + <_> + + + + <_>1 0 18 4 -1. + <_>7 0 6 4 3. + 0 + -8.4951231256127357e-003 + 0.2129660993814468 + -0.0829746276140213 + <_> + + <_> + + + + <_>2 11 18 4 -1. + <_>11 11 9 2 2. + <_>2 13 9 2 2. + 0 + 0.0472064800560474 + 9.7466083243489265e-003 + -0.7006629705429077 + <_> + + <_> + + + + <_>0 11 18 4 -1. + <_>0 11 9 2 2. + <_>9 13 9 2 2. + 0 + 3.7802560254931450e-003 + 0.0774788931012154 + -0.2337200045585632 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 0.0446316711604595 + -0.0214647706598043 + 0.3213633894920349 + <_> + + <_> + + + + <_>4 1 9 12 -1. + <_>4 7 9 6 2. + 0 + 6.8157288478687406e-004 + 0.1217707023024559 + -0.1206320002675057 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + -0.0697124525904655 + -0.9482805132865906 + 0.0120174400508404 + <_> + + <_> + + + + <_>0 3 5 6 -1. + <_>0 6 5 3 2. + 0 + -4.8821792006492615e-003 + -0.2177484035491943 + 0.0771133229136467 + <_> + + <_> + + + + <_>6 6 8 4 -1. + <_>6 8 8 2 2. + 0 + 3.4387600608170033e-003 + -0.1809356957674027 + 0.0935955569148064 + <_> + + <_> + + + + <_>0 9 7 6 -1. + <_>0 11 7 2 3. + 0 + -0.0252157002687454 + -0.5571495890617371 + 0.0274208206683397 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + 7.4309771880507469e-003 + -0.0466304905712605 + 0.2102489024400711 + <_> + + <_> + + + + <_>5 2 4 13 -1. + <_>7 2 2 13 2. + 0 + -0.0157899595797062 + -0.3344314098358154 + 0.0462916903197765 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + 3.5080160014331341e-003 + -0.0646126121282578 + 0.2273766994476318 + <_> + + <_> + + + + <_>0 7 20 2 -1. + <_>0 8 20 1 2. + 0 + 0.0442912615835667 + 0.0226427298039198 + -0.7068312168121338 + <_> + + <_> + + + + <_>11 0 9 5 -1. + <_>14 0 3 5 3. + 0 + 0.0191081892699003 + -0.0358933210372925 + 0.1461369991302490 + -1.6909840106964111 + 14 + -1 + <_> + + + <_> + + <_> + + + + <_>0 3 10 6 -1. + <_>0 3 5 3 2. + <_>5 6 5 3 2. + 0 + -0.0166366696357727 + 0.2596651911735535 + -0.4116224944591522 + <_> + + <_> + + + + <_>6 4 9 5 -1. + <_>9 4 3 5 3. + 0 + 0.0298658106476069 + -0.3318266868591309 + 0.2054599970579147 + <_> + + <_> + + + + <_>3 12 8 8 -1. + <_>3 12 4 4 2. + <_>7 16 4 4 2. + 0 + 9.1892024502158165e-003 + -0.3448179960250855 + 0.1814869046211243 + <_> + + <_> + + + + <_>4 7 15 3 -1. + <_>9 7 5 3 3. + 0 + 2.8450509998947382e-003 + -0.3290483057498932 + 0.0943922922015190 + <_> + + <_> + + + + <_>0 4 6 9 -1. + <_>3 4 3 9 2. + 0 + 0.0342576391994953 + -0.3221279978752136 + 0.1733205020427704 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 0.0343677103519440 + -0.3259381055831909 + 0.1747326999902725 + <_> + + <_> + + + + <_>7 6 4 12 -1. + <_>7 12 4 6 2. + 0 + 9.0881884098052979e-003 + 0.1052701026201248 + -0.4813137054443359 + <_> + + <_> + + + + <_>6 1 8 15 -1. + <_>6 6 8 5 3. + 0 + -5.0939731299877167e-003 + 0.1737498939037323 + -0.2788312137126923 + <_> + + <_> + + + + <_>1 7 15 3 -1. + <_>6 7 5 3 3. + 0 + 1.1773620499297976e-003 + -0.4221720099449158 + 0.1023176014423370 + <_> + + <_> + + + + <_>4 9 12 5 -1. + <_>8 9 4 5 3. + 0 + 0.0367976091802120 + 0.1122936978936195 + -0.3840919137001038 + <_> + + <_> + + + + <_>6 6 8 8 -1. + <_>6 10 8 4 2. + 0 + -7.2484882548451424e-004 + -0.4479512870311737 + 0.0850795879960060 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + 0.0126032102853060 + 0.0604750402271748 + -0.3532750904560089 + <_> + + <_> + + + + <_>2 0 14 2 -1. + <_>2 1 14 1 2. + 0 + 5.1925552543252707e-004 + -0.3191638886928558 + 0.1190337017178536 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0132441800087690 + 0.2197573035955429 + -0.0950255915522575 + <_> + + <_> + + + + <_>1 3 16 2 -1. + <_>1 4 16 1 2. + 0 + -2.7882310096174479e-003 + -0.2729480862617493 + 0.1241976991295815 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 0.0265914704650640 + 0.0604520104825497 + -0.3963702917098999 + <_> + + <_> + + + + <_>0 12 8 6 -1. + <_>0 14 8 2 3. + 0 + 0.0125052100047469 + 0.0786311030387878 + -0.4030388891696930 + <_> + + <_> + + + + <_>5 11 13 3 -1. + <_>5 12 13 1 3. + 0 + -0.0138573404401541 + 0.2575975060462952 + -0.1035145968198776 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.0720997527241707 + -0.5519378185272217 + 0.0600208006799221 + <_> + + <_> + + + + <_>2 8 17 3 -1. + <_>2 9 17 1 3. + 0 + -9.8338630050420761e-004 + -0.3191519975662231 + 0.0879776477813721 + <_> + + <_> + + + + <_>1 1 18 6 -1. + <_>1 1 9 3 2. + <_>10 4 9 3 2. + 0 + -0.0583901703357697 + -0.5598897933959961 + 0.0529901906847954 + <_> + + <_> + + + + <_>1 1 19 6 -1. + <_>1 3 19 2 3. + 0 + 4.2504342272877693e-003 + -0.2889725863933563 + 0.0928165167570114 + <_> + + <_> + + + + <_>4 6 12 6 -1. + <_>4 6 6 3 2. + <_>10 9 6 3 2. + 0 + -0.0323325209319592 + -0.4871352016925812 + 0.0607876293361187 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0473656393587589 + -0.1011155024170876 + 0.3259778022766113 + <_> + + <_> + + + + <_>3 18 13 2 -1. + <_>3 19 13 1 2. + 0 + -3.8943330291658640e-003 + 0.1917316019535065 + -0.1672938019037247 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0577291995286942 + 0.0363432914018631 + -0.7316113114356995 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>10 5 2 7 2. + 0 + -0.0189255401492119 + 0.3247149884700775 + -0.0861880630254745 + <_> + + <_> + + + + <_>10 12 8 6 -1. + <_>10 14 8 2 3. + 0 + -0.0396796017885208 + -0.4182668030261993 + 0.0533542111515999 + <_> + + <_> + + + + <_>0 0 18 4 -1. + <_>0 0 9 2 2. + <_>9 2 9 2 2. + 0 + -0.0207336507737637 + -0.4120518863201141 + 0.0635968521237373 + <_> + + <_> + + + + <_>4 6 15 5 -1. + <_>9 6 5 5 3. + 0 + 0.1538791060447693 + 0.0199541505426168 + -0.5764328837394714 + <_> + + <_> + + + + <_>0 7 15 4 -1. + <_>5 7 5 4 3. + 0 + 0.1213126033544540 + 0.0445164591073990 + -0.5909324288368225 + <_> + + <_> + + + + <_>12 4 4 10 -1. + <_>12 9 4 5 2. + 0 + 2.7478559786686674e-005 + -0.4068849980831146 + 0.0528280995786190 + <_> + + <_> + + + + <_>0 6 18 12 -1. + <_>0 6 9 6 2. + <_>9 12 9 6 2. + 0 + 0.0888936817646027 + 0.0519852414727211 + -0.5022898912429810 + <_> + + <_> + + + + <_>16 5 2 14 -1. + <_>16 12 2 7 2. + 0 + 2.8169099241495132e-003 + 0.0677264332771301 + -0.1358204931020737 + <_> + + <_> + + + + <_>2 9 5 6 -1. + <_>2 12 5 3 2. + 0 + -1.7215269326698035e-004 + 0.0896169170737267 + -0.2958936989307404 + <_> + + <_> + + + + <_>12 0 3 19 -1. + <_>13 0 1 19 3. + 0 + -0.0318306200206280 + -0.5643360018730164 + 0.0228222496807575 + <_> + + <_> + + + + <_>0 10 9 6 -1. + <_>0 12 9 2 3. + 0 + -0.0633343309164047 + -0.8237169981002808 + 0.0275761205703020 + <_> + + <_> + + + + <_>11 12 7 6 -1. + <_>11 14 7 2 3. + 0 + -0.0690328180789948 + -0.6978821754455566 + 3.3770920708775520e-003 + <_> + + <_> + + + + <_>5 0 4 7 -1. + <_>7 0 2 7 2. + 0 + 2.1021519787609577e-003 + -0.2724404931068420 + 0.0869228914380074 + <_> + + <_> + + + + <_>12 0 3 19 -1. + <_>13 0 1 19 3. + 0 + 0.0340657792985439 + 0.0176705792546272 + -0.4300132095813751 + <_> + + <_> + + + + <_>0 15 14 4 -1. + <_>0 15 7 2 2. + <_>7 17 7 2 2. + 0 + 8.1215314567089081e-003 + -0.1594267040491104 + 0.1625607013702393 + <_> + + <_> + + + + <_>4 5 14 6 -1. + <_>4 7 14 2 3. + 0 + -1.6329119680449367e-003 + 0.0420095883309841 + -0.3292345106601715 + <_> + + <_> + + + + <_>3 1 6 7 -1. + <_>5 1 2 7 3. + 0 + -0.0391103290021420 + -0.6066625118255615 + 0.0412488505244255 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -0.0231888704001904 + -0.5536541938781738 + 0.0173155106604099 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>0 4 6 3 3. + 0 + -0.0629441589117050 + -0.5385370850563049 + 0.0417583510279655 + <_> + + <_> + + + + <_>11 12 7 6 -1. + <_>11 14 7 2 3. + 0 + -0.0854143723845482 + -0.9312245249748230 + -9.1123272432014346e-004 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + -0.0419633388519287 + -0.5672069787979126 + 0.0391757003962994 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 0.0111656198278070 + -0.0678158104419708 + 0.2900384068489075 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0137307699769735 + 0.3232809901237488 + -0.1059283986687660 + <_> + + <_> + + + + <_>8 9 6 5 -1. + <_>8 9 3 5 2. + 0 + -0.0757930502295494 + 0.5554572939872742 + -3.2934208866208792e-003 + <_> + + <_> + + + + <_>6 9 6 5 -1. + <_>9 9 3 5 2. + 0 + 2.7008100878447294e-003 + 0.1531118005514145 + -0.1660418063402176 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + 0.0101646604016423 + 0.0764046311378479 + -0.2874574959278107 + <_> + + <_> + + + + <_>5 8 10 10 -1. + <_>5 8 5 5 2. + <_>10 13 5 5 2. + 0 + -0.0598081499338150 + -0.7348673939704895 + 0.0303708203136921 + <_> + + <_> + + + + <_>1 5 18 10 -1. + <_>10 5 9 5 2. + <_>1 10 9 5 2. + 0 + 0.0964476168155670 + 0.0261988397687674 + -0.6600142717361450 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + 0.0323502197861671 + 0.0414077192544937 + -0.4744249880313873 + <_> + + <_> + + + + <_>1 0 18 6 -1. + <_>7 0 6 6 3. + 0 + 0.2371727973222733 + -0.0959410816431046 + 0.2407049983739853 + <_> + + <_> + + + + <_>4 3 5 14 -1. + <_>4 10 5 7 2. + 0 + -0.0409424714744091 + -0.4058212041854858 + 0.0643275603652000 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0344091616570950 + -0.7484955191612244 + 0.0225207600742579 + <_> + + <_> + + + + <_>0 0 8 10 -1. + <_>0 5 8 5 2. + 0 + 0.1384737938642502 + 0.0284723099321127 + -0.7061212062835693 + <_> + + <_> + + + + <_>7 2 6 6 -1. + <_>7 5 6 3 2. + 0 + 0.0465671606361866 + -0.0411681197583675 + 0.6996256709098816 + <_> + + <_> + + + + <_>0 0 19 3 -1. + <_>0 1 19 1 3. + 0 + -0.0304926391690969 + -0.6511697769165039 + 0.0399952791631222 + <_> + + <_> + + + + <_>8 0 8 6 -1. + <_>8 2 8 2 3. + 0 + 8.6345896124839783e-003 + -0.1120797023177147 + 0.0772416964173317 + <_> + + <_> + + + + <_>7 5 6 11 -1. + <_>9 5 2 11 3. + 0 + 0.0318459682166576 + -0.1155207976698875 + 0.1753938943147659 + <_> + + <_> + + + + <_>4 3 12 10 -1. + <_>8 3 4 10 3. + 0 + 0.1712459027767181 + 0.0506879799067974 + -0.4704223871231079 + <_> + + <_> + + + + <_>0 4 18 4 -1. + <_>0 6 18 2 2. + 0 + 5.2879499271512032e-003 + 0.0650414973497391 + -0.2889401912689209 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + 0.0100607797503471 + 0.0636892169713974 + -0.2608188986778259 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + 0.0333307683467865 + 0.0348092988133430 + -0.5784546732902527 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + -0.0528022795915604 + -0.6852104067802429 + 0.0175837799906731 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + -0.0154521996155381 + 0.3139589130878449 + -0.0776115432381630 + <_> + + <_> + + + + <_>10 4 4 14 -1. + <_>12 4 2 7 2. + <_>10 11 2 7 2. + 0 + -6.5528601408004761e-004 + 0.0561813600361347 + -0.1518439054489136 + <_> + + <_> + + + + <_>2 10 7 6 -1. + <_>2 12 7 2 3. + 0 + 0.0370621494948864 + 0.0289285499602556 + -0.7048760056495667 + <_> + + <_> + + + + <_>10 4 4 14 -1. + <_>12 4 2 7 2. + <_>10 11 2 7 2. + 0 + -0.0577280893921852 + -0.4319241046905518 + 9.2153800651431084e-003 + <_> + + <_> + + + + <_>6 4 4 14 -1. + <_>6 4 2 7 2. + <_>8 11 2 7 2. + 0 + -2.2813139948993921e-003 + 0.1020030006766319 + -0.2165704071521759 + <_> + + <_> + + + + <_>14 3 6 7 -1. + <_>16 3 2 7 3. + 0 + 0.0265132300555706 + -0.0836509466171265 + 0.3074035942554474 + <_> + + <_> + + + + <_>6 6 8 4 -1. + <_>6 8 8 2 2. + 0 + 0.0736221969127655 + 0.0306830499321222 + -0.7191023230552673 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + -0.0130223501473665 + -0.3638656139373779 + 0.0253672096878290 + <_> + + <_> + + + + <_>6 0 2 15 -1. + <_>7 0 1 15 2. + 0 + -0.0133198201656342 + -0.5188406109809876 + 0.0359350293874741 + <_> + + <_> + + + + <_>12 1 3 17 -1. + <_>13 1 1 17 3. + 0 + 2.3190369829535484e-003 + -0.0615152008831501 + 0.0711004510521889 + <_> + + <_> + + + + <_>5 1 3 17 -1. + <_>6 1 1 17 3. + 0 + -0.0213728304952383 + -0.5024757981300354 + 0.0398448109626770 + <_> + + <_> + + + + <_>9 4 3 13 -1. + <_>10 4 1 13 3. + 0 + 0.0244745891541243 + -0.0479608587920666 + 0.2693111002445221 + <_> + + <_> + + + + <_>9 3 2 14 -1. + <_>10 3 1 14 2. + 0 + -0.0106798699125648 + 0.3147428035736084 + -0.0847589522600174 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0489617995917797 + 0.0273580998182297 + -0.3822936117649078 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>10 5 2 7 2. + 0 + 0.0323763489723206 + -0.0470909997820854 + 0.4598523080348969 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0109952203929424 + -0.1854424029588699 + 0.0360069796442986 + <_> + + <_> + + + + <_>0 5 20 4 -1. + <_>10 5 10 4 2. + 0 + 0.1762603074312210 + 0.0243751592934132 + -0.7768660187721252 + <_> + + <_> + + + + <_>13 2 7 6 -1. + <_>13 4 7 2 3. + 0 + 0.0797784924507141 + 3.3787339925765991e-003 + -0.7292888760566711 + <_> + + <_> + + + + <_>0 2 19 2 -1. + <_>0 3 19 1 2. + 0 + -0.0113292103633285 + -0.4639767110347748 + 0.0393808297812939 + <_> + + <_> + + + + <_>10 9 10 11 -1. + <_>10 9 5 11 2. + 0 + 0.0634313002228737 + -0.0970740616321564 + 0.1011886969208717 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + -0.0126918498426676 + 0.2814230024814606 + -0.0721057131886482 + <_> + + <_> + + + + <_>3 0 15 9 -1. + <_>8 0 5 9 3. + 0 + -0.0782384127378464 + 0.5740063786506653 + -0.0184005498886108 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0395325198769569 + 0.0431549884378910 + -0.5232784152030945 + <_> + + <_> + + + + <_>3 4 14 2 -1. + <_>3 5 14 1 2. + 0 + 0.0153557797893882 + -0.0473161786794662 + 0.4692577123641968 + <_> + + <_> + + + + <_>0 11 6 7 -1. + <_>2 11 2 7 3. + 0 + -6.4018620178103447e-003 + 0.1329723000526428 + -0.1436561942100525 + <_> + + <_> + + + + <_>10 9 10 11 -1. + <_>10 9 5 11 2. + 0 + -0.1056734025478363 + 0.2020632028579712 + -0.0144064603373408 + <_> + + <_> + + + + <_>3 13 6 7 -1. + <_>5 13 2 7 3. + 0 + 0.0281638391315937 + 0.0711809918284416 + -0.3103423118591309 + <_> + + <_> + + + + <_>3 8 15 3 -1. + <_>8 8 5 3 3. + 0 + 0.1170298010110855 + 0.0116199301555753 + -0.7153096199035645 + <_> + + <_> + + + + <_>0 1 8 8 -1. + <_>0 1 4 4 2. + <_>4 5 4 4 2. + 0 + -0.0389215685427189 + 0.2441267967224121 + -0.0822448506951332 + <_> + + <_> + + + + <_>9 8 10 4 -1. + <_>9 8 5 4 2. + 0 + -0.0284354891628027 + -0.3678517043590546 + 0.0384888201951981 + <_> + + <_> + + + + <_>0 0 18 6 -1. + <_>6 0 6 6 3. + 0 + -0.0363935492932796 + 0.5220673084259033 + -0.0470793806016445 + -1.8724700212478638 + 15 + -1 + <_> + + + <_> + + <_> + + + + <_>4 3 12 9 -1. + <_>4 6 12 3 3. + 0 + -0.0214285105466843 + 0.1901407986879349 + -0.5061274170875549 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.0205961298197508 + -0.2928322851657867 + 0.2465517967939377 + <_> + + <_> + + + + <_>8 6 4 10 -1. + <_>8 11 4 5 2. + 0 + 2.7893469668924809e-003 + 0.1108592003583908 + -0.4690982997417450 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 4.4722640886902809e-003 + -0.2825078070163727 + 0.1456467062234879 + <_> + + <_> + + + + <_>0 3 17 2 -1. + <_>0 4 17 1 2. + 0 + -1.0463190264999866e-003 + -0.2660326957702637 + 0.1281591951847076 + <_> + + <_> + + + + <_>12 6 5 6 -1. + <_>12 9 5 3 2. + 0 + 1.5831940108910203e-003 + -0.6346729993820190 + 0.0710038319230080 + <_> + + <_> + + + + <_>5 6 8 8 -1. + <_>5 6 4 4 2. + <_>9 10 4 4 2. + 0 + -7.3153319135599304e-006 + 0.1024893000721932 + -0.3481596112251282 + <_> + + <_> + + + + <_>9 10 7 6 -1. + <_>9 12 7 2 3. + 0 + 5.4208859801292419e-003 + 0.0598305314779282 + -0.3138777911663055 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 1.2645759852603078e-003 + -0.2270915061235428 + 0.1316000968217850 + <_> + + <_> + + + + <_>13 6 5 9 -1. + <_>13 9 5 3 3. + 0 + 3.0235300073400140e-005 + -0.2641330957412720 + 0.0289180800318718 + <_> + + <_> + + + + <_>2 6 5 9 -1. + <_>2 9 5 3 3. + 0 + 1.5345469582825899e-003 + -0.4071195125579834 + 0.0697878375649452 + <_> + + <_> + + + + <_>14 2 6 5 -1. + <_>14 2 3 5 2. + 0 + 6.8222070112824440e-003 + -0.1506972014904022 + 0.2188841998577118 + <_> + + <_> + + + + <_>5 6 6 11 -1. + <_>8 6 3 11 2. + 0 + -9.8558319732546806e-003 + -0.3544136881828308 + 0.0860263928771019 + <_> + + <_> + + + + <_>14 2 6 5 -1. + <_>14 2 3 5 2. + 0 + -0.0298904292285442 + 0.2211744040250778 + -0.0286110099405050 + <_> + + <_> + + + + <_>0 3 10 6 -1. + <_>0 3 5 3 2. + <_>5 6 5 3 2. + 0 + -2.6285760104656219e-003 + 0.0982041805982590 + -0.2714973986148834 + <_> + + <_> + + + + <_>6 12 13 2 -1. + <_>6 13 13 1 2. + 0 + 3.2039839425124228e-004 + -0.0985404625535011 + 0.1878553926944733 + <_> + + <_> + + + + <_>5 11 10 6 -1. + <_>5 14 10 3 2. + 0 + 1.1079469695687294e-003 + 0.0640345364809036 + -0.4308266937732697 + <_> + + <_> + + + + <_>12 11 8 8 -1. + <_>12 15 8 4 2. + 0 + -0.0915383696556091 + -0.5244092941284180 + 0.0122504895552993 + <_> + + <_> + + + + <_>4 0 12 7 -1. + <_>8 0 4 7 3. + 0 + 0.0432058982551098 + 0.0966558679938316 + -0.2680931091308594 + <_> + + <_> + + + + <_>5 15 13 2 -1. + <_>5 16 13 1 2. + 0 + 9.1920839622616768e-004 + -0.1326016038656235 + 0.1235831975936890 + <_> + + <_> + + + + <_>0 12 20 6 -1. + <_>0 12 10 3 2. + <_>10 15 10 3 2. + 0 + 8.9521165937185287e-003 + 0.0864454209804535 + -0.2321943044662476 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 5.6190020404756069e-003 + -0.0603040494024754 + 0.1507066935300827 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 2 12 2 3. + 0 + 3.7380240391939878e-003 + -0.1865254044532776 + 0.1301178038120270 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + -0.0444169603288174 + 0.1903675943613052 + -0.0175271593034267 + <_> + + <_> + + + + <_>0 0 6 10 -1. + <_>0 0 3 5 2. + <_>3 5 3 5 2. + 0 + 0.0198327396064997 + -0.0535276308655739 + 0.4023813009262085 + <_> + + <_> + + + + <_>5 6 10 12 -1. + <_>10 6 5 6 2. + <_>5 12 5 6 2. + 0 + 0.0121556101366878 + 0.0912885665893555 + -0.2686276137828827 + <_> + + <_> + + + + <_>1 15 15 4 -1. + <_>1 17 15 2 2. + 0 + 0.0505323410034180 + 0.0312951803207397 + -0.6283653974533081 + <_> + + <_> + + + + <_>10 5 9 6 -1. + <_>10 7 9 2 3. + 0 + -1.7635909607633948e-003 + 0.0561852194368839 + -0.2186100929975510 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 4.9412921071052551e-003 + 0.0559158995747566 + -0.3595438897609711 + <_> + + <_> + + + + <_>10 5 10 6 -1. + <_>10 7 10 2 3. + 0 + -0.1153611987829208 + -0.5316873788833618 + 7.9654296860098839e-003 + <_> + + <_> + + + + <_>0 5 10 6 -1. + <_>0 7 10 2 3. + 0 + -2.0473708864301443e-003 + 0.0796330124139786 + -0.2538990080356598 + <_> + + <_> + + + + <_>8 9 12 4 -1. + <_>12 9 4 4 3. + 0 + 4.7814860008656979e-003 + -0.0941498801112175 + 0.1163100972771645 + <_> + + <_> + + + + <_>0 0 4 8 -1. + <_>2 0 2 8 2. + 0 + 0.0212749391794205 + -0.0474866107106209 + 0.3756451904773712 + <_> + + <_> + + + + <_>0 1 20 3 -1. + <_>0 2 20 1 3. + 0 + 5.1177050918340683e-003 + 0.0749366432428360 + -0.2610535025596619 + <_> + + <_> + + + + <_>0 0 4 17 -1. + <_>2 0 2 17 2. + 0 + -0.0139520000666380 + 0.2396017014980316 + -0.0968367680907249 + <_> + + <_> + + + + <_>5 6 12 3 -1. + <_>5 6 6 3 2. + 0 + -0.0138281797990203 + -0.3960526883602142 + 0.0586397498846054 + <_> + + <_> + + + + <_>6 7 3 12 -1. + <_>6 13 3 6 2. + 0 + -0.0471170209348202 + -0.5571753978729248 + 0.0316786505281925 + <_> + + <_> + + + + <_>14 2 6 5 -1. + <_>14 2 3 5 2. + 0 + 0.0105155901983380 + -0.0439305305480957 + 0.0852779597043991 + <_> + + <_> + + + + <_>0 2 6 5 -1. + <_>3 2 3 5 2. + 0 + 4.0591089054942131e-003 + -0.1077421978116036 + 0.1628309935331345 + <_> + + <_> + + + + <_>1 3 18 16 -1. + <_>7 3 6 16 3. + 0 + -0.0303762108087540 + 0.2099737972021103 + -0.0994177907705307 + <_> + + <_> + + + + <_>4 4 11 10 -1. + <_>4 9 11 5 2. + 0 + -6.6932791378349066e-004 + -0.3486334085464478 + 0.0591480210423470 + <_> + + <_> + + + + <_>6 1 13 3 -1. + <_>6 2 13 1 3. + 0 + -0.0146650895476341 + -0.4378654062747955 + 0.0280081797391176 + <_> + + <_> + + + + <_>3 4 8 10 -1. + <_>3 4 4 5 2. + <_>7 9 4 5 2. + 0 + -3.5847770050168037e-003 + 0.0966115370392799 + -0.1794831007719040 + <_> + + <_> + + + + <_>6 7 14 4 -1. + <_>13 7 7 2 2. + <_>6 9 7 2 2. + 0 + -5.5043050087988377e-003 + -0.3354665935039520 + 0.0750578492879868 + <_> + + <_> + + + + <_>1 1 8 6 -1. + <_>1 3 8 2 3. + 0 + 1.0141800157725811e-003 + -0.1860285997390747 + 0.0868800505995750 + <_> + + <_> + + + + <_>15 3 5 9 -1. + <_>15 6 5 3 3. + 0 + 0.0146423997357488 + 0.0266520902514458 + -0.2600268125534058 + <_> + + <_> + + + + <_>0 3 5 9 -1. + <_>0 6 5 3 3. + 0 + -5.8538499288260937e-003 + -0.1499318927526474 + 0.1268464028835297 + <_> + + <_> + + + + <_>14 6 4 14 -1. + <_>16 6 2 7 2. + <_>14 13 2 7 2. + 0 + -0.0534721687436104 + 0.5213112235069275 + -0.0203757490962744 + <_> + + <_> + + + + <_>0 1 6 12 -1. + <_>2 1 2 12 3. + 0 + -0.0766959264874458 + 0.4581707119941711 + -0.0348769500851631 + <_> + + <_> + + + + <_>10 9 6 5 -1. + <_>10 9 3 5 2. + 0 + -5.9094227617606521e-004 + 0.1157049983739853 + -0.1296696960926056 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + -0.0435433611273766 + -0.8213273286819458 + 0.0205355994403362 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + 0.0506917014718056 + -0.0362806394696236 + 0.4021244943141937 + <_> + + <_> + + + + <_>0 13 18 7 -1. + <_>6 13 6 7 3. + 0 + 0.0131246699020267 + -0.0836142674088478 + 0.2044152021408081 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.3544504940509796 + 0.0145805096253753 + -0.5688369870185852 + <_> + + <_> + + + + <_>0 9 10 11 -1. + <_>5 9 5 11 2. + 0 + -0.0219299104064703 + 0.1636828035116196 + -0.1001854017376900 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0381687395274639 + 0.0353313907980919 + -0.5378261208534241 + <_> + + <_> + + + + <_>5 8 10 12 -1. + <_>5 14 10 6 2. + 0 + 6.3126571476459503e-003 + 0.0561457611620426 + -0.2815802991390228 + <_> + + <_> + + + + <_>12 13 7 6 -1. + <_>12 15 7 2 3. + 0 + -0.0430026687681675 + -0.6480454206466675 + 0.0174780208617449 + <_> + + <_> + + + + <_>1 10 6 7 -1. + <_>3 10 2 7 3. + 0 + 2.4681850336492062e-003 + -0.1171970963478088 + 0.1369305998086929 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 0.0452612899243832 + 0.0159277506172657 + -0.7191559076309204 + <_> + + <_> + + + + <_>0 7 20 3 -1. + <_>0 8 20 1 3. + 0 + -0.0420671105384827 + -0.6420187950134277 + 0.0201964993029833 + <_> + + <_> + + + + <_>10 3 4 7 -1. + <_>10 3 2 7 2. + 0 + 3.9601750904694200e-004 + -0.3177456855773926 + 0.0768434777855873 + <_> + + <_> + + + + <_>0 6 6 14 -1. + <_>0 6 3 7 2. + <_>3 13 3 7 2. + 0 + -0.0124693196266890 + 0.1953141987323761 + -0.0787992328405380 + <_> + + <_> + + + + <_>12 13 7 6 -1. + <_>12 15 7 2 3. + 0 + 7.9188523814082146e-003 + 0.0567210800945759 + -0.2690643966197968 + <_> + + <_> + + + + <_>2 1 11 12 -1. + <_>2 7 11 6 2. + 0 + -6.2929331324994564e-003 + 0.1568834036588669 + -0.0992870107293129 + <_> + + <_> + + + + <_>5 0 10 8 -1. + <_>5 4 10 4 2. + 0 + 0.0229741204530001 + -0.0669302269816399 + 0.2442709952592850 + <_> + + <_> + + + + <_>1 5 8 8 -1. + <_>1 5 4 4 2. + <_>5 9 4 4 2. + 0 + -9.1710267588496208e-003 + -0.2907853126525879 + 0.0593120194971561 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0958922728896141 + -0.6370087862014771 + 0.0132787600159645 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 5.6696119718253613e-003 + 0.0561310015618801 + -0.2953512072563171 + <_> + + <_> + + + + <_>16 4 4 16 -1. + <_>18 4 2 8 2. + <_>16 12 2 8 2. + 0 + -0.0134953297674656 + 0.2020577937364578 + -0.0631285831332207 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + 0.0161082390695810 + 0.0450920611619949 + -0.3616381883621216 + <_> + + <_> + + + + <_>6 15 14 4 -1. + <_>13 15 7 2 2. + <_>6 17 7 2 2. + 0 + 1.1768710101023316e-003 + -0.1987991929054260 + 0.1307854056358337 + <_> + + <_> + + + + <_>6 3 4 7 -1. + <_>8 3 2 7 2. + 0 + 1.4128970215097070e-003 + -0.2085608989000320 + 0.0814737081527710 + <_> + + <_> + + + + <_>10 11 5 9 -1. + <_>10 14 5 3 3. + 0 + -0.0430280603468418 + -0.2868754863739014 + 0.0297046601772308 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + -0.0109614096581936 + 0.4884619116783142 + -0.0350027792155743 + <_> + + <_> + + + + <_>10 0 3 14 -1. + <_>11 0 1 14 3. + 0 + -4.5575079275295138e-004 + 0.1064456999301910 + -0.1050634011626244 + <_> + + <_> + + + + <_>6 13 6 7 -1. + <_>8 13 2 7 3. + 0 + -0.0500133298337460 + -0.8203945755958557 + 0.0186044704169035 + <_> + + <_> + + + + <_>10 1 3 13 -1. + <_>11 1 1 13 3. + 0 + -0.0468412004411221 + -0.8697211146354675 + 3.9388639852404594e-003 + <_> + + <_> + + + + <_>7 1 3 13 -1. + <_>8 1 1 13 3. + 0 + -8.0362131120637059e-004 + 0.1419689953327179 + -0.1218411996960640 + <_> + + <_> + + + + <_>5 14 10 6 -1. + <_>10 14 5 3 2. + <_>5 17 5 3 2. + 0 + 0.0198024008423090 + 0.0408579483628273 + -0.3611642122268677 + <_> + + <_> + + + + <_>6 8 8 4 -1. + <_>6 10 8 2 2. + 0 + 0.0218740291893482 + -0.0582306012511253 + 0.2449093014001846 + <_> + + <_> + + + + <_>11 14 8 6 -1. + <_>11 16 8 2 3. + 0 + 0.0323718488216400 + 0.0261722598224878 + -0.4080356955528259 + <_> + + <_> + + + + <_>1 14 8 6 -1. + <_>1 16 8 2 3. + 0 + -7.0319771766662598e-003 + -0.2517513036727905 + 0.0600908100605011 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + 2.6019799988716841e-003 + -0.0708278864622116 + 0.2073512971401215 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -3.1531439162790775e-003 + 0.1726828962564468 + -0.1132690012454987 + <_> + + <_> + + + + <_>7 9 13 3 -1. + <_>7 10 13 1 3. + 0 + 0.0583575516939163 + 0.0146687701344490 + -0.9290723800659180 + <_> + + <_> + + + + <_>2 2 12 6 -1. + <_>2 2 6 3 2. + <_>8 5 6 3 2. + 0 + 3.6941959988325834e-003 + 0.0668120086193085 + -0.2045454978942871 + <_> + + <_> + + + + <_>16 4 4 16 -1. + <_>18 4 2 8 2. + <_>16 12 2 8 2. + 0 + 0.0181837398558855 + -0.0359216593205929 + 0.2376513034105301 + <_> + + <_> + + + + <_>0 8 5 12 -1. + <_>0 12 5 4 3. + 0 + -4.4514648616313934e-003 + -0.1815667003393173 + 0.0800729691982269 + <_> + + <_> + + + + <_>10 10 9 6 -1. + <_>10 12 9 2 3. + 0 + 0.0355540104210377 + 0.0114133097231388 + -0.3950318098068237 + <_> + + <_> + + + + <_>5 2 6 10 -1. + <_>5 2 3 5 2. + <_>8 7 3 5 2. + 0 + 0.0160674992948771 + -0.0491470098495483 + 0.3030670881271362 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0363721884787083 + 0.0236751604825258 + -0.6806926131248474 + <_> + + <_> + + + + <_>6 5 6 8 -1. + <_>8 5 2 8 3. + 0 + -7.4834008701145649e-003 + 0.2414668053388596 + -0.0583017282187939 + <_> + + <_> + + + + <_>11 0 4 14 -1. + <_>11 0 2 14 2. + 0 + -7.2762509807944298e-003 + -0.2237306982278824 + 0.0502845905721188 + <_> + + <_> + + + + <_>5 0 4 14 -1. + <_>7 0 2 14 2. + 0 + -4.7946218401193619e-003 + -0.2192271053791046 + 0.0666982010006905 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0130664398893714 + 0.2260453999042511 + -0.0370374284684658 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.3257338907569647e-003 + -0.0815092399716377 + 0.2327075004577637 + <_> + + <_> + + + + <_>8 3 12 17 -1. + <_>8 3 6 17 2. + 0 + -0.0114362398162484 + 0.0677326917648315 + -0.0330696515738964 + <_> + + <_> + + + + <_>4 6 10 4 -1. + <_>9 6 5 4 2. + 0 + 6.7957569845020771e-003 + 0.0931888595223427 + -0.1854241937398911 + <_> + + <_> + + + + <_>16 4 4 16 -1. + <_>18 4 2 8 2. + <_>16 12 2 8 2. + 0 + -0.0527059286832809 + 0.4070782959461212 + -0.0258465595543385 + <_> + + <_> + + + + <_>0 6 12 14 -1. + <_>6 6 6 14 2. + 0 + 0.1277426928281784 + 0.0172073394060135 + -0.8895267248153687 + <_> + + <_> + + + + <_>12 9 8 10 -1. + <_>12 9 4 10 2. + 0 + -0.2799988090991974 + -0.9196342229843140 + 2.5054879370145500e-004 + <_> + + <_> + + + + <_>0 9 8 10 -1. + <_>4 9 4 10 2. + 0 + 0.0126690203323960 + -0.0731523931026459 + 0.2087228000164032 + <_> + + <_> + + + + <_>13 2 6 18 -1. + <_>13 2 3 18 2. + 0 + -0.0158945992588997 + 0.1126642003655434 + -0.0401405617594719 + <_> + + <_> + + + + <_>1 2 6 18 -1. + <_>4 2 3 18 2. + 0 + 0.0539381690323353 + 0.0301373898983002 + -0.5045430064201355 + <_> + + <_> + + + + <_>4 8 13 2 -1. + <_>4 9 13 1 2. + 0 + 7.3805922875180840e-004 + -0.3592377901077271 + 0.0334184803068638 + <_> + + <_> + + + + <_>0 6 18 9 -1. + <_>0 9 18 3 3. + 0 + 4.7065159305930138e-003 + 0.4419519007205963 + -0.0393960885703564 + <_> + + <_> + + + + <_>5 4 15 3 -1. + <_>5 5 15 1 3. + 0 + 3.0945870094001293e-003 + -0.0712243765592575 + 0.1230626031756401 + <_> + + <_> + + + + <_>0 3 19 15 -1. + <_>0 8 19 5 3. + 0 + -0.0326400399208069 + -0.4464471936225891 + 0.0345098301768303 + <_> + + <_> + + + + <_>10 10 9 6 -1. + <_>10 12 9 2 3. + 0 + -7.8390557318925858e-003 + -0.0998955965042114 + 0.0334918797016144 + <_> + + <_> + + + + <_>1 10 9 6 -1. + <_>1 12 9 2 3. + 0 + 7.6504289172589779e-003 + 0.0551073402166367 + -0.2400210946798325 + <_> + + <_> + + + + <_>5 12 13 3 -1. + <_>5 13 13 1 3. + 0 + 3.8153179921209812e-003 + -0.0571435205638409 + 0.1712068021297455 + <_> + + <_> + + + + <_>0 4 4 16 -1. + <_>0 4 2 8 2. + <_>2 12 2 8 2. + 0 + 0.0142953498288989 + -0.0557476617395878 + 0.2671900987625122 + <_> + + <_> + + + + <_>10 10 5 6 -1. + <_>10 13 5 3 2. + 0 + -1.8241480574943125e-004 + 0.0473623797297478 + -0.2147321999073029 + <_> + + <_> + + + + <_>0 10 20 8 -1. + <_>0 14 20 4 2. + 0 + -0.0319164805114269 + -0.1439830064773560 + 0.0925263091921806 + <_> + + <_> + + + + <_>14 0 6 7 -1. + <_>16 0 2 7 3. + 0 + -7.6755490154027939e-003 + 0.1251308023929596 + -0.0528555810451508 + <_> + + <_> + + + + <_>0 0 6 7 -1. + <_>2 0 2 7 3. + 0 + 0.0141521096229553 + -0.0581989996135235 + 0.2444438040256500 + <_> + + <_> + + + + <_>13 0 3 19 -1. + <_>14 0 1 19 3. + 0 + -0.0167010594159365 + -0.3026933968067169 + 0.0257134698331356 + <_> + + <_> + + + + <_>0 2 8 4 -1. + <_>4 2 4 4 2. + 0 + 3.5869849380105734e-003 + -0.1199979037046433 + 0.1246884018182755 + <_> + + <_> + + + + <_>12 12 7 6 -1. + <_>12 14 7 2 3. + 0 + 3.7683059927076101e-003 + 0.0502713508903980 + -0.2047702968120575 + <_> + + <_> + + + + <_>6 11 7 6 -1. + <_>6 13 7 2 3. + 0 + 9.9043175578117371e-004 + -0.0854138508439064 + 0.1631623953580856 + <_> + + <_> + + + + <_>10 10 5 6 -1. + <_>10 13 5 3 2. + 0 + 9.3151312321424484e-003 + 9.4177378341555595e-003 + -0.3520910143852234 + <_> + + <_> + + + + <_>3 10 6 9 -1. + <_>3 13 6 3 3. + 0 + -1.5002860163804144e-004 + 0.0834809765219688 + -0.1704777926206589 + <_> + + <_> + + + + <_>13 5 4 14 -1. + <_>15 5 2 7 2. + <_>13 12 2 7 2. + 0 + 8.7790598627179861e-004 + -0.1105471998453140 + 0.1175082027912140 + <_> + + <_> + + + + <_>3 5 10 9 -1. + <_>3 8 10 3 3. + 0 + -0.0376302711665630 + 0.5032584071159363 + -0.0261650606989861 + <_> + + <_> + + + + <_>2 15 18 4 -1. + <_>2 17 18 2 2. + 0 + 5.6488867849111557e-003 + 0.0747132375836372 + -0.1405851989984512 + <_> + + <_> + + + + <_>0 4 8 6 -1. + <_>0 6 8 2 3. + 0 + -1.4621330192312598e-003 + 0.0674653276801109 + -0.2014323025941849 + <_> + + <_> + + + + <_>4 5 13 2 -1. + <_>4 6 13 1 2. + 0 + 5.3189881145954132e-003 + -0.0359979383647442 + 0.3737648129463196 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 0.0210195202380419 + 0.0270638093352318 + -0.5019965767860413 + <_> + + <_> + + + + <_>0 12 20 6 -1. + <_>0 14 20 2 3. + 0 + -0.1132896989583969 + -0.7439544200897217 + 0.0137780895456672 + <_> + + <_> + + + + <_>0 10 6 8 -1. + <_>3 10 3 8 2. + 0 + -6.1144838109612465e-003 + 0.1404484063386917 + -0.0879396721720696 + <_> + + <_> + + + + <_>4 8 15 3 -1. + <_>9 8 5 3 3. + 0 + -7.7648349106311798e-003 + -0.1434164047241211 + 0.0430610999464989 + <_> + + <_> + + + + <_>1 9 9 6 -1. + <_>4 9 3 6 3. + 0 + -0.0913359969854355 + -0.6324607133865356 + 0.0209029503166676 + <_> + + <_> + + + + <_>2 0 16 14 -1. + <_>10 0 8 7 2. + <_>2 7 8 7 2. + 0 + -0.1633961051702499 + -0.7707108855247498 + 0.0136276902630925 + <_> + + <_> + + + + <_>3 0 14 18 -1. + <_>3 9 14 9 2. + 0 + 0.5300452113151550 + 0.0122928302735090 + -0.7970852255821228 + <_> + + <_> + + + + <_>9 7 6 10 -1. + <_>12 7 3 5 2. + <_>9 12 3 5 2. + 0 + -3.0609068926423788e-003 + 0.0574785284698009 + -0.0886268168687820 + <_> + + <_> + + + + <_>3 4 4 16 -1. + <_>3 4 2 8 2. + <_>5 12 2 8 2. + 0 + 1.3204859569668770e-003 + -0.1047393977642059 + 0.1241632029414177 + <_> + + <_> + + + + <_>12 14 8 6 -1. + <_>12 16 8 2 3. + 0 + -0.0660451278090477 + -0.7040370106697083 + 7.2672651149332523e-003 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + 5.2080051973462105e-003 + 0.0732894167304039 + -0.1610578000545502 + -1.7121059894561768 + 16 + -1 + <_> + + + <_> + + <_> + + + + <_>5 4 10 4 -1. + <_>5 6 10 2 2. + 0 + -0.0240407008677721 + 0.2431855946779251 + -0.3818928897380829 + <_> + + <_> + + + + <_>1 0 18 10 -1. + <_>7 0 6 10 3. + 0 + 0.2637419104576111 + -0.2509114146232605 + 0.2723194062709808 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 3.3161949831992388e-003 + -0.2811537086963654 + 0.2297758013010025 + <_> + + <_> + + + + <_>13 4 3 15 -1. + <_>13 9 3 5 3. + 0 + 2.5751669891178608e-003 + -0.6481587886810303 + 0.0830493271350861 + <_> + + <_> + + + + <_>4 4 3 15 -1. + <_>4 9 3 5 3. + 0 + 0.0128431497141719 + -0.5438807010650635 + 0.0863045528531075 + <_> + + <_> + + + + <_>14 3 6 5 -1. + <_>14 3 3 5 2. + 0 + 0.0130053600296378 + -0.2641158998012543 + 0.2210787981748581 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0263040605932474 + -0.2227616012096405 + 0.2245862931013107 + <_> + + <_> + + + + <_>14 2 6 7 -1. + <_>14 2 3 7 2. + 0 + -0.0688879936933517 + 0.4467779099941254 + -0.0183987505733967 + <_> + + <_> + + + + <_>0 2 6 7 -1. + <_>3 2 3 7 2. + 0 + 0.0158644001930952 + -0.3353232145309448 + 0.1638062000274658 + <_> + + <_> + + + + <_>11 6 8 8 -1. + <_>15 6 4 4 2. + <_>11 10 4 4 2. + 0 + -7.1481592021882534e-003 + -0.3599945902824402 + 0.1067965030670166 + <_> + + <_> + + + + <_>2 14 7 6 -1. + <_>2 16 7 2 3. + 0 + -0.0120021300390363 + -0.3749858140945435 + 0.0967593491077423 + <_> + + <_> + + + + <_>5 9 13 3 -1. + <_>5 10 13 1 3. + 0 + -2.6663220487535000e-003 + -0.3894163966178894 + 0.0597763918340206 + <_> + + <_> + + + + <_>0 8 15 3 -1. + <_>0 9 15 1 3. + 0 + 5.2618351764976978e-004 + -0.3055751025676727 + 0.1077807024121285 + <_> + + <_> + + + + <_>11 5 4 12 -1. + <_>11 11 4 6 2. + 0 + -0.0407057218253613 + -0.5857294797897339 + 0.0406608581542969 + <_> + + <_> + + + + <_>2 11 13 3 -1. + <_>2 12 13 1 3. + 0 + -8.7929163128137589e-003 + 0.2369941025972366 + -0.1382753998041153 + <_> + + <_> + + + + <_>2 1 16 2 -1. + <_>2 2 16 1 2. + 0 + -2.2475840523838997e-003 + -0.3547531962394714 + 0.0890797823667526 + <_> + + <_> + + + + <_>5 6 8 10 -1. + <_>5 6 4 5 2. + <_>9 11 4 5 2. + 0 + 5.8501982130110264e-003 + 0.0916956365108490 + -0.3332979977130890 + <_> + + <_> + + + + <_>8 8 10 12 -1. + <_>13 8 5 6 2. + <_>8 14 5 6 2. + 0 + -3.9623910561203957e-003 + -0.1984574049711227 + 0.1236386969685555 + <_> + + <_> + + + + <_>3 10 6 6 -1. + <_>3 13 6 3 2. + 0 + -1.7685770289972425e-003 + 0.0736848115921021 + -0.4586252868175507 + <_> + + <_> + + + + <_>1 5 18 8 -1. + <_>10 5 9 4 2. + <_>1 9 9 4 2. + 0 + 0.0633038803935051 + 0.0486901514232159 + -0.5730131864547730 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 7.9875197261571884e-003 + -0.8107230067253113 + 0.0270544104278088 + <_> + + <_> + + + + <_>4 6 13 3 -1. + <_>4 7 13 1 3. + 0 + -0.0135204000398517 + 0.1627480983734131 + -0.1684186011552811 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0481396093964577 + 0.0452342182397842 + -0.5730023980140686 + <_> + + <_> + + + + <_>11 2 6 10 -1. + <_>14 2 3 5 2. + <_>11 7 3 5 2. + 0 + 5.0355647690594196e-003 + 0.0652255117893219 + -0.2585661113262177 + <_> + + <_> + + + + <_>5 9 4 7 -1. + <_>7 9 2 7 2. + 0 + 1.9625260028988123e-004 + 0.1422155052423477 + -0.1848151981830597 + <_> + + <_> + + + + <_>1 9 18 3 -1. + <_>7 9 6 3 3. + 0 + 2.5747891049832106e-003 + -0.3590430021286011 + 0.0756635069847107 + <_> + + <_> + + + + <_>2 6 8 14 -1. + <_>2 6 4 7 2. + <_>6 13 4 7 2. + 0 + -4.0524629876017570e-003 + -0.2121212929487228 + 0.1184021010994911 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0569202601909637 + -0.0436572991311550 + 0.3877460062503815 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 0.0379869900643826 + -0.0817063301801682 + 0.3952980041503906 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0227315295487642 + -0.3469341993331909 + 0.0684385672211647 + <_> + + <_> + + + + <_>6 0 4 9 -1. + <_>8 0 2 9 2. + 0 + 9.9069473799318075e-004 + -0.3668186962604523 + 0.0610366500914097 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -4.3086782097816467e-003 + 0.1436198055744171 + -0.0961600765585899 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -0.0252022091299295 + -0.4610934853553772 + 0.0594206601381302 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -0.0335977189242840 + -0.4712752103805542 + 9.6356319263577461e-003 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -4.6891071833670139e-003 + 0.1967620998620987 + -0.1185335963964462 + <_> + + <_> + + + + <_>9 1 3 13 -1. + <_>10 1 1 13 3. + 0 + 0.0245499201118946 + -0.0455425903201103 + 0.2871705889701843 + <_> + + <_> + + + + <_>0 9 13 2 -1. + <_>0 10 13 1 2. + 0 + -1.8802500562742352e-003 + -0.2989243865013123 + 0.0801998898386955 + <_> + + <_> + + + + <_>7 3 13 16 -1. + <_>7 11 13 8 2. + 0 + 0.2016099989414215 + 0.0305025801062584 + -0.4841420948505402 + <_> + + <_> + + + + <_>0 3 5 9 -1. + <_>0 6 5 3 3. + 0 + -0.0698039531707764 + -0.6238281130790710 + 0.0351806618273258 + <_> + + <_> + + + + <_>11 1 7 6 -1. + <_>11 3 7 2 3. + 0 + 9.1318902559578419e-004 + -0.1993506997823715 + 0.0682703480124474 + <_> + + <_> + + + + <_>1 1 16 4 -1. + <_>1 1 8 2 2. + <_>9 3 8 2 2. + 0 + 0.0145789599046111 + 0.1006335988640785 + -0.2535313069820404 + <_> + + <_> + + + + <_>0 2 20 6 -1. + <_>10 2 10 3 2. + <_>0 5 10 3 2. + 0 + 0.0501303486526012 + 0.0571921095252037 + -0.4162805974483490 + <_> + + <_> + + + + <_>0 4 19 10 -1. + <_>0 9 19 5 2. + 0 + -0.0180481094866991 + -0.4457265138626099 + 0.0503994897007942 + <_> + + <_> + + + + <_>4 6 15 5 -1. + <_>9 6 5 5 3. + 0 + 0.1481816023588181 + 0.0167796108871698 + -0.4581047892570496 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -0.0262859500944614 + 0.3544262051582336 + -0.0611844286322594 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0184141099452972 + -0.3213210999965668 + 0.0761481523513794 + <_> + + <_> + + + + <_>1 12 9 8 -1. + <_>1 16 9 4 2. + 0 + 6.1610070988535881e-003 + 0.0879460796713829 + -0.2591320872306824 + <_> + + <_> + + + + <_>3 5 14 3 -1. + <_>3 6 14 1 3. + 0 + -0.0259001608937979 + 0.3068143129348755 + -0.0656004101037979 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 0.0150148998945951 + -0.0560769699513912 + 0.3866142928600311 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + -0.0431121587753296 + 0.5592610836029053 + -0.0392326302826405 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>3 11 7 2 2. + <_>10 13 7 2 2. + 0 + -0.0214851703494787 + -0.4638487100601196 + 0.0482646189630032 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>10 11 7 2 2. + <_>3 13 7 2 2. + 0 + -0.0251317899674177 + -0.4809173941612244 + 0.0413461700081825 + <_> + + <_> + + + + <_>2 5 14 6 -1. + <_>2 7 14 2 3. + 0 + 4.1451459401287138e-004 + 0.0446918308734894 + -0.4217401146888733 + <_> + + <_> + + + + <_>11 15 9 4 -1. + <_>11 17 9 2 2. + 0 + 0.0102185700088739 + 0.0537444800138474 + -0.1939547955989838 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -0.0203427001833916 + 0.2972249984741211 + -0.0712975636124611 + <_> + + <_> + + + + <_>12 13 7 6 -1. + <_>12 15 7 2 3. + 0 + -0.0306660495698452 + -0.3992078006267548 + 0.0455109812319279 + <_> + + <_> + + + + <_>1 13 7 6 -1. + <_>1 15 7 2 3. + 0 + -0.0327674411237240 + -0.5024853944778442 + 0.0448886081576347 + <_> + + <_> + + + + <_>0 16 20 4 -1. + <_>0 18 20 2 2. + 0 + -0.0543650016188622 + -0.4775117039680481 + 0.0418824702501297 + <_> + + <_> + + + + <_>0 14 12 6 -1. + <_>0 14 6 3 2. + <_>6 17 6 3 2. + 0 + -0.0299163591116667 + 0.3579361140727997 + -0.0618319399654865 + <_> + + <_> + + + + <_>4 6 15 5 -1. + <_>9 6 5 5 3. + 0 + 0.0101441796869040 + -0.1579091995954514 + 0.0573733597993851 + <_> + + <_> + + + + <_>1 6 15 5 -1. + <_>6 6 5 5 3. + 0 + 0.1563901007175446 + 0.0329497009515762 + -0.6446223258972168 + <_> + + <_> + + + + <_>11 5 6 9 -1. + <_>11 8 6 3 3. + 0 + 0.0544479787349701 + -0.0415080599486828 + 0.1286668926477432 + <_> + + <_> + + + + <_>5 0 6 8 -1. + <_>7 0 2 8 3. + 0 + -0.0397727191448212 + -0.6896231770515442 + 0.0290465708822012 + <_> + + <_> + + + + <_>5 17 13 3 -1. + <_>5 18 13 1 3. + 0 + 6.9650667719542980e-003 + -0.0947616770863533 + 0.1825713068246841 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0516174286603928 + -0.4490728974342346 + 0.0439131408929825 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -0.0268146097660065 + -0.2256883978843689 + 0.0549280717968941 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0131819201633334 + 0.0801019072532654 + -0.2867330014705658 + <_> + + <_> + + + + <_>5 3 14 3 -1. + <_>5 4 14 1 3. + 0 + 0.0142415901646018 + -0.0842644125223160 + 0.2100073993206024 + <_> + + <_> + + + + <_>6 9 6 5 -1. + <_>9 9 3 5 2. + 0 + 3.1410539522767067e-003 + 0.1325756013393402 + -0.1561053991317749 + <_> + + <_> + + + + <_>12 6 8 5 -1. + <_>12 6 4 5 2. + 0 + 0.1099515035748482 + 0.0123882703483105 + -0.4030236899852753 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + 0.0178458504378796 + 0.0528702288866043 + -0.3793024122714996 + <_> + + <_> + + + + <_>4 14 13 2 -1. + <_>4 15 13 1 2. + 0 + 0.0108519904315472 + -0.0540712587535381 + 0.3518624007701874 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + -0.0259582009166479 + 0.4197835028171539 + -0.0404774285852909 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>6 13 8 4 2. + 0 + 4.0990379638969898e-003 + 0.0509112887084484 + -0.3597494959831238 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + 0.0149098401889205 + -0.0614372305572033 + 0.2894755005836487 + <_> + + <_> + + + + <_>9 3 3 10 -1. + <_>9 8 3 5 2. + 0 + 4.0265037678182125e-003 + 0.1068639978766441 + -0.1297968029975891 + <_> + + <_> + + + + <_>4 0 12 20 -1. + <_>10 0 6 20 2. + 0 + 0.3949568867683411 + -0.0289205592125654 + 0.6353526711463928 + <_> + + <_> + + + + <_>13 12 6 6 -1. + <_>13 12 3 6 2. + 0 + 0.0128743797540665 + -0.1191041022539139 + 0.1206843033432961 + <_> + + <_> + + + + <_>3 2 12 4 -1. + <_>9 2 6 4 2. + 0 + -0.0485981814563274 + 0.4688569009304047 + -0.0427972897887230 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + 1.5357979573309422e-003 + -0.3088226914405823 + 0.0631548315286636 + <_> + + <_> + + + + <_>6 4 2 13 -1. + <_>7 4 1 13 2. + 0 + 3.5379750188440084e-003 + 0.1013244986534119 + -0.1772640049457550 + <_> + + <_> + + + + <_>13 4 4 12 -1. + <_>13 4 2 12 2. + 0 + -0.0194412209093571 + 0.2325439006090164 + -0.0537322685122490 + <_> + + <_> + + + + <_>0 9 12 3 -1. + <_>6 9 6 3 2. + 0 + 2.5940369814634323e-003 + -0.3568229973316193 + 0.0505988597869873 + <_> + + <_> + + + + <_>13 4 4 12 -1. + <_>13 4 2 12 2. + 0 + 0.0599103793501854 + -0.0240308698266745 + 0.1700322031974793 + <_> + + <_> + + + + <_>3 4 4 12 -1. + <_>5 4 2 12 2. + 0 + -0.0111817596480250 + 0.3486950099468231 + -0.0628124177455902 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 4.9201812362298369e-004 + -0.1264290958642960 + 0.0365038998425007 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.0679021775722504 + -0.4288708865642548 + 0.0463369116187096 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + 0.0157288294285536 + -0.0630289465188980 + 0.1627576947212219 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + -0.0148243904113770 + -0.5339167714118958 + 0.0321326218545437 + <_> + + <_> + + + + <_>7 15 13 3 -1. + <_>7 16 13 1 3. + 0 + -0.0197062604129314 + 0.2545562982559204 + -0.0308166500180960 + <_> + + <_> + + + + <_>0 2 18 4 -1. + <_>0 2 9 2 2. + <_>9 4 9 2 2. + 0 + 9.6607124432921410e-003 + 0.0926743522286415 + -0.1794023960828781 + <_> + + <_> + + + + <_>12 6 8 5 -1. + <_>12 6 4 5 2. + 0 + -0.0499294213950634 + 0.2674334049224854 + -0.0255951192229986 + <_> + + <_> + + + + <_>5 0 10 8 -1. + <_>5 4 10 4 2. + 0 + 0.0734596401453018 + -0.0586989596486092 + 0.2889882922172546 + <_> + + <_> + + + + <_>9 0 10 6 -1. + <_>9 2 10 2 3. + 0 + -8.6538150208070874e-004 + -0.1431846022605896 + 0.0653861835598946 + <_> + + <_> + + + + <_>3 0 14 3 -1. + <_>3 1 14 1 3. + 0 + -0.0104622198268771 + -0.3249850869178772 + 0.0549553185701370 + <_> + + <_> + + + + <_>12 6 8 5 -1. + <_>12 6 4 5 2. + 0 + -6.3478751108050346e-003 + -0.1039637029170990 + 0.0403214097023010 + <_> + + <_> + + + + <_>0 6 8 5 -1. + <_>4 6 4 5 2. + 0 + 0.1140640005469322 + 0.0261920392513275 + -0.6617791056632996 + <_> + + <_> + + + + <_>11 15 7 4 -1. + <_>11 17 7 2 2. + 0 + -0.0268937703222036 + -0.3533869981765747 + 0.0197535902261734 + <_> + + <_> + + + + <_>4 2 9 5 -1. + <_>7 2 3 5 3. + 0 + 0.0806009620428085 + 0.0288784801959991 + -0.5497518777847290 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + -0.0746769607067108 + -0.3441605865955353 + 0.0269907191395760 + <_> + + <_> + + + + <_>5 3 10 6 -1. + <_>5 5 10 2 3. + 0 + -0.0770040899515152 + 0.4004569947719574 + -0.0453402698040009 + <_> + + <_> + + + + <_>8 4 6 14 -1. + <_>8 11 6 7 2. + 0 + -0.0869204774498940 + -0.3468702137470245 + 0.0391959808766842 + <_> + + <_> + + + + <_>1 5 9 6 -1. + <_>1 7 9 2 3. + 0 + -4.3200692161917686e-003 + 0.0759325698018074 + -0.2372065037488937 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0341277606785297 + -0.4199472069740295 + 0.0436338707804680 + <_> + + <_> + + + + <_>8 3 4 7 -1. + <_>10 3 2 7 2. + 0 + 0.0218453705310822 + -0.0586817003786564 + 0.3297267854213715 + <_> + + <_> + + + + <_>0 4 20 12 -1. + <_>10 4 10 6 2. + <_>0 10 10 6 2. + 0 + 0.1003722995519638 + 0.0425072088837624 + -0.4336608052253723 + -1.8098859786987305 + 17 + -1 + <_> + + + <_> + + <_> + + + + <_>5 4 7 4 -1. + <_>5 6 7 2 2. + 0 + -2.8922120109200478e-003 + 0.1438132971525192 + -0.4089652001857758 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -3.2057950738817453e-003 + -0.3347241878509522 + 0.1283469051122665 + <_> + + <_> + + + + <_>8 6 3 12 -1. + <_>8 12 3 6 2. + 0 + -1.4795559764024802e-005 + 0.1013917028903961 + -0.4468091130256653 + <_> + + <_> + + + + <_>3 0 14 2 -1. + <_>3 1 14 1 2. + 0 + 3.7529919063672423e-004 + -0.2860493063926697 + 0.1535784006118774 + <_> + + <_> + + + + <_>7 7 6 13 -1. + <_>9 7 2 13 3. + 0 + 4.9170467536896467e-004 + -0.2840496003627777 + 0.1316390037536621 + <_> + + <_> + + + + <_>3 4 16 12 -1. + <_>11 4 8 6 2. + <_>3 10 8 6 2. + 0 + 0.0164173804223537 + 0.0799011066555977 + -0.2809281945228577 + <_> + + <_> + + + + <_>1 4 16 12 -1. + <_>1 4 8 6 2. + <_>9 10 8 6 2. + 0 + 0.0101198600605130 + 0.1002686992287636 + -0.4093256890773773 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>7 10 6 5 2. + 0 + -6.5251751802861691e-003 + -0.3310171067714691 + 0.0960446298122406 + <_> + + <_> + + + + <_>3 6 5 9 -1. + <_>3 9 5 3 3. + 0 + 6.1215078458189964e-003 + -0.3548310101032257 + 0.0843099206686020 + <_> + + <_> + + + + <_>6 3 14 4 -1. + <_>13 3 7 2 2. + <_>6 5 7 2 2. + 0 + 2.5817379355430603e-003 + 0.0833843573927879 + -0.2803170979022980 + <_> + + <_> + + + + <_>3 18 13 2 -1. + <_>3 19 13 1 2. + 0 + -1.3406439684331417e-003 + 0.1508380025625229 + -0.1494652032852173 + <_> + + <_> + + + + <_>4 10 16 4 -1. + <_>12 10 8 2 2. + <_>4 12 8 2 2. + 0 + 3.3681320492178202e-003 + 0.0421127006411552 + -0.2230971008539200 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 2.8937528841197491e-003 + 0.0829538106918335 + -0.2915230989456177 + <_> + + <_> + + + + <_>12 12 7 6 -1. + <_>12 14 7 2 3. + 0 + 3.3696501050144434e-003 + 0.0485485494136810 + -0.1954278051853180 + <_> + + <_> + + + + <_>0 0 4 11 -1. + <_>2 0 2 11 2. + 0 + -0.0715388804674149 + 0.5200868248939514 + -0.0426444411277771 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>14 0 3 9 2. + 0 + 7.6072360388934612e-003 + -0.0852086618542671 + 0.1152331009507179 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 1.9313229713588953e-003 + 0.0893573984503746 + -0.2361434996128082 + <_> + + <_> + + + + <_>6 12 13 2 -1. + <_>6 13 13 1 2. + 0 + 9.0475968318060040e-004 + -0.0774085894227028 + 0.1682958006858826 + <_> + + <_> + + + + <_>0 0 6 9 -1. + <_>3 0 3 9 2. + 0 + 0.0111036701127887 + -0.0959639772772789 + 0.2039172053337097 + <_> + + <_> + + + + <_>0 9 20 3 -1. + <_>0 10 20 1 3. + 0 + -3.1021970789879560e-003 + -0.3860571980476379 + 0.0463297218084335 + <_> + + <_> + + + + <_>5 5 3 10 -1. + <_>5 10 3 5 2. + 0 + 1.1446890421211720e-003 + -0.2830668985843658 + 0.0589782111346722 + <_> + + <_> + + + + <_>1 5 18 8 -1. + <_>10 5 9 4 2. + <_>1 9 9 4 2. + 0 + 7.7077788300812244e-003 + 0.1047424972057343 + -0.1714607030153275 + <_> + + <_> + + + + <_>4 2 10 6 -1. + <_>4 4 10 2 3. + 0 + 0.0498937107622623 + -0.0646926015615463 + 0.3014095127582550 + <_> + + <_> + + + + <_>6 0 8 12 -1. + <_>10 0 4 6 2. + <_>6 6 4 6 2. + 0 + -0.0149378199130297 + -0.2785437107086182 + 0.0708954706788063 + <_> + + <_> + + + + <_>5 6 6 7 -1. + <_>7 6 2 7 3. + 0 + -2.5303829461336136e-003 + 0.1210851967334747 + -0.1463529020547867 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + 0.0286112595349550 + -0.0503575317561626 + 0.4065187871456146 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0362440608441830 + 0.0445772185921669 + -0.5623428821563721 + <_> + + <_> + + + + <_>9 4 6 10 -1. + <_>12 4 3 5 2. + <_>9 9 3 5 2. + 0 + -3.0544339679181576e-003 + 0.1152698993682861 + -0.2737109065055847 + <_> + + <_> + + + + <_>0 8 19 3 -1. + <_>0 9 19 1 3. + 0 + -1.3101019430905581e-003 + -0.2679800093173981 + 0.0597266517579556 + <_> + + <_> + + + + <_>1 10 18 3 -1. + <_>1 11 18 1 3. + 0 + 1.0702989529818296e-003 + -0.1543941050767899 + 0.1120698973536491 + <_> + + <_> + + + + <_>5 1 3 13 -1. + <_>6 1 1 13 3. + 0 + -0.0234671607613564 + -0.6242492198944092 + 0.0260104797780514 + <_> + + <_> + + + + <_>12 11 8 9 -1. + <_>12 11 4 9 2. + 0 + -0.0227877497673035 + 0.1790398955345154 + -0.0682308524847031 + <_> + + <_> + + + + <_>5 0 3 20 -1. + <_>6 0 1 20 3. + 0 + 7.5017688795924187e-003 + 0.0526371784508228 + -0.3333347141742706 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 0.0138810900971293 + 0.0651188865303993 + -0.2415271997451782 + <_> + + <_> + + + + <_>0 1 4 14 -1. + <_>2 1 2 14 2. + 0 + -8.7769115343689919e-003 + 0.1992519050836563 + -0.0880632326006889 + <_> + + <_> + + + + <_>0 1 20 4 -1. + <_>10 1 10 2 2. + <_>0 3 10 2 2. + 0 + 0.0265235602855682 + 0.0465747788548470 + -0.3655050992965698 + <_> + + <_> + + + + <_>0 1 6 12 -1. + <_>2 1 2 12 3. + 0 + 7.2263809852302074e-003 + -0.1080685034394264 + 0.1513179987668991 + <_> + + <_> + + + + <_>11 0 6 6 -1. + <_>11 0 3 6 2. + 0 + 2.3426050320267677e-003 + -0.1507292985916138 + 0.0999450236558914 + <_> + + <_> + + + + <_>6 10 4 8 -1. + <_>6 14 4 4 2. + 0 + -2.8811080483137630e-005 + 0.0614130385220051 + -0.2434443980455399 + <_> + + <_> + + + + <_>7 0 13 3 -1. + <_>7 1 13 1 3. + 0 + -0.0139119001105428 + -0.3101083934307098 + 0.0248958505690098 + <_> + + <_> + + + + <_>0 0 13 3 -1. + <_>0 1 13 1 3. + 0 + 0.0247687809169292 + 0.0232180301100016 + -0.6507102847099304 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>5 7 10 2 3. + 0 + -6.0916407965123653e-003 + 0.0597684904932976 + -0.2536034882068634 + <_> + + <_> + + + + <_>4 5 4 14 -1. + <_>4 5 2 7 2. + <_>6 12 2 7 2. + 0 + -9.7264908254146576e-003 + -0.2558444142341614 + 0.0555546209216118 + <_> + + <_> + + + + <_>11 0 6 6 -1. + <_>11 0 3 6 2. + 0 + 0.0974990427494049 + 5.3867488168179989e-003 + -0.7356767058372498 + <_> + + <_> + + + + <_>3 0 6 6 -1. + <_>6 0 3 6 2. + 0 + 3.0411418993026018e-003 + -0.1375921070575714 + 0.1214364990592003 + <_> + + <_> + + + + <_>1 0 18 7 -1. + <_>7 0 6 7 3. + 0 + 2.7967148926109076e-003 + 0.1804866045713425 + -0.0845270007848740 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + 0.0107072796672583 + -0.0439708605408669 + 0.3104200959205627 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + 1.7561139538884163e-003 + 0.0518668405711651 + -0.2276871055364609 + <_> + + <_> + + + + <_>2 6 14 9 -1. + <_>2 9 14 3 3. + 0 + -3.0384738929569721e-003 + 0.7165204286575317 + -0.0224659293889999 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + -0.0941614806652069 + -0.7933856248855591 + 0.0131174903362989 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + -0.0238690096884966 + 0.4933817982673645 + -0.0321690216660500 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + -0.0399585887789726 + -0.1891476958990097 + 0.0285007003694773 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 6.9391070865094662e-003 + 0.0397772118449211 + -0.3910590112209320 + <_> + + <_> + + + + <_>1 14 18 4 -1. + <_>10 14 9 2 2. + <_>1 16 9 2 2. + 0 + -0.0335967801511288 + -0.5683007240295410 + 0.0216185096651316 + <_> + + <_> + + + + <_>2 8 15 6 -1. + <_>7 8 5 6 3. + 0 + -0.1407984048128128 + -0.7901437282562256 + 0.0148846097290516 + <_> + + <_> + + + + <_>16 2 4 8 -1. + <_>16 6 4 4 2. + 0 + -5.7346289977431297e-003 + -0.1551263928413391 + 0.0428795702755451 + <_> + + <_> + + + + <_>0 1 8 8 -1. + <_>0 1 4 4 2. + <_>4 5 4 4 2. + 0 + -0.0528418309986591 + 0.3082383871078491 + -0.0507096908986568 + <_> + + <_> + + + + <_>7 3 8 4 -1. + <_>7 5 8 2 2. + 0 + 0.0152070997282863 + -0.0257897693663836 + 0.3329232037067413 + <_> + + <_> + + + + <_>0 3 14 4 -1. + <_>0 3 7 2 2. + <_>7 5 7 2 2. + 0 + -5.8392022037878633e-004 + 0.0889003872871399 + -0.1629794985055924 + <_> + + <_> + + + + <_>3 12 14 4 -1. + <_>10 12 7 2 2. + <_>3 14 7 2 2. + 0 + -3.3715530298650265e-003 + -0.1789022982120514 + 0.0753766074776649 + <_> + + <_> + + + + <_>4 9 8 5 -1. + <_>8 9 4 5 2. + 0 + -1.2047060299664736e-003 + 0.1049197018146515 + -0.1297073960304260 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0552764795720577 + -0.0431975089013577 + 0.3721202909946442 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0393306091427803 + 0.0304163992404938 + -0.4907610118389130 + <_> + + <_> + + + + <_>8 5 8 4 -1. + <_>8 5 4 4 2. + 0 + -9.7229599487036467e-004 + -0.2189545929431915 + 0.0390327088534832 + <_> + + <_> + + + + <_>2 2 15 7 -1. + <_>7 2 5 7 3. + 0 + -0.0560480691492558 + 0.4163256883621216 + -0.0337473116815090 + <_> + + <_> + + + + <_>8 5 8 4 -1. + <_>8 5 4 4 2. + 0 + 0.0713767409324646 + 0.0121292099356651 + -0.6481407880783081 + <_> + + <_> + + + + <_>4 5 8 4 -1. + <_>8 5 4 4 2. + 0 + 1.4940260443836451e-003 + -0.2139361053705216 + 0.0848872214555740 + <_> + + <_> + + + + <_>7 1 7 12 -1. + <_>7 7 7 6 2. + 0 + -3.2299170270562172e-003 + 0.0907924324274063 + -0.0958160534501076 + <_> + + <_> + + + + <_>4 0 12 10 -1. + <_>4 5 12 5 2. + 0 + 0.0421828702092171 + -0.0669144019484520 + 0.2521761953830719 + <_> + + <_> + + + + <_>6 6 14 4 -1. + <_>13 6 7 2 2. + <_>6 8 7 2 2. + 0 + -6.5001910552382469e-003 + -0.1214955970644951 + 0.0373679883778095 + <_> + + <_> + + + + <_>0 1 5 6 -1. + <_>0 4 5 3 2. + 0 + 0.0194571297615767 + 0.0501637794077396 + -0.2870037853717804 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0372913889586926 + 0.0296084396541119 + -0.5722249746322632 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0255715195089579 + 0.4394184947013855 + -0.0365323089063168 + <_> + + <_> + + + + <_>12 2 2 14 -1. + <_>12 2 1 14 2. + 0 + -7.9122912138700485e-003 + -0.2961851060390472 + 0.0354832708835602 + <_> + + <_> + + + + <_>0 15 14 4 -1. + <_>0 15 7 2 2. + <_>7 17 7 2 2. + 0 + 3.0267490074038506e-003 + -0.1211377978324890 + 0.1127142012119293 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0210358202457428 + 0.2920606136322022 + -0.0310014896094799 + <_> + + <_> + + + + <_>6 2 2 14 -1. + <_>7 2 1 14 2. + 0 + -0.0129114203155041 + -0.5419433116912842 + 0.0267562400549650 + <_> + + <_> + + + + <_>6 6 14 4 -1. + <_>13 6 7 2 2. + <_>6 8 7 2 2. + 0 + 0.0550960712134838 + 8.4169982001185417e-003 + -0.6287345886230469 + <_> + + <_> + + + + <_>0 6 14 4 -1. + <_>0 6 7 2 2. + <_>7 8 7 2 2. + 0 + -6.3893562182784081e-003 + -0.2078483998775482 + 0.0604367889463902 + <_> + + <_> + + + + <_>12 11 8 9 -1. + <_>12 11 4 9 2. + 0 + 0.0108587602153420 + -0.0784972533583641 + 0.1295799016952515 + <_> + + <_> + + + + <_>0 11 8 9 -1. + <_>4 11 4 9 2. + 0 + -0.0158596206456423 + 0.1577291041612625 + -0.1014351025223732 + <_> + + <_> + + + + <_>7 1 12 18 -1. + <_>11 1 4 18 3. + 0 + 0.1520387977361679 + 0.0217213202267885 + -0.3171314001083374 + <_> + + <_> + + + + <_>1 1 12 18 -1. + <_>5 1 4 18 3. + 0 + 0.0179420392960310 + -0.0848169326782227 + 0.1769730001688004 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 8.8212518021464348e-003 + 0.0518006011843681 + -0.2144360989332199 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0157152898609638 + 0.0425258204340935 + -0.3227834105491638 + <_> + + <_> + + + + <_>8 1 4 10 -1. + <_>8 6 4 5 2. + 0 + -2.4744209367781878e-003 + 0.1082855015993118 + -0.1295306980609894 + <_> + + <_> + + + + <_>6 3 7 6 -1. + <_>6 5 7 2 3. + 0 + 0.0125975301489234 + -0.0602517016232014 + 0.2751215100288391 + <_> + + <_> + + + + <_>5 5 13 8 -1. + <_>5 9 13 4 2. + 0 + -1.0955630568787456e-003 + -0.5424407124519348 + 0.0281664393842220 + <_> + + <_> + + + + <_>1 2 14 2 -1. + <_>1 3 14 1 2. + 0 + -1.4035019557923079e-003 + -0.2362516969442368 + 0.0618872493505478 + <_> + + <_> + + + + <_>15 4 5 9 -1. + <_>15 7 5 3 3. + 0 + -0.0772945433855057 + -0.5214198231697083 + 0.0118441497907043 + <_> + + <_> + + + + <_>0 4 5 9 -1. + <_>0 7 5 3 3. + 0 + -0.0754421576857567 + -0.7158880233764648 + 0.0171514190733433 + <_> + + <_> + + + + <_>7 1 8 8 -1. + <_>7 5 8 4 2. + 0 + -0.0651483386754990 + 0.2409984022378922 + -0.0502787381410599 + <_> + + <_> + + + + <_>2 5 12 12 -1. + <_>2 5 6 6 2. + <_>8 11 6 6 2. + 0 + -1.0481229983270168e-003 + 0.0654616281390190 + -0.1919842064380646 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + 2.0919230300933123e-003 + 0.0487021617591381 + -0.2006254941225052 + <_> + + <_> + + + + <_>5 7 10 10 -1. + <_>5 7 5 5 2. + <_>10 12 5 5 2. + 0 + -0.0428493693470955 + -0.4615420997142792 + 0.0291370395570993 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -4.5563629828393459e-003 + 0.1373217999935150 + -0.0738710165023804 + <_> + + <_> + + + + <_>2 14 16 3 -1. + <_>2 15 16 1 3. + 0 + 6.7648440599441528e-003 + -0.0638660266995430 + 0.2757869958877564 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + 0.0422520712018013 + 0.0135830100625753 + -0.6271442174911499 + <_> + + <_> + + + + <_>0 13 18 4 -1. + <_>0 13 9 2 2. + <_>9 15 9 2 2. + 0 + -0.0354382209479809 + -0.5243613123893738 + 0.0210475306957960 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + -5.3693209774792194e-003 + 0.1836670935153961 + -0.0664324536919594 + <_> + + <_> + + + + <_>0 11 8 4 -1. + <_>0 13 8 2 2. + 0 + 1.3521539513021708e-003 + 0.0588343217968941 + -0.2245510071516037 + <_> + + <_> + + + + <_>6 12 13 2 -1. + <_>6 13 13 1 2. + 0 + -0.0322040282189846 + -0.4801704883575440 + 9.2976661399006844e-003 + <_> + + <_> + + + + <_>1 12 13 2 -1. + <_>1 13 13 1 2. + 0 + 4.0550291305407882e-004 + -0.0859484076499939 + 0.2010037004947662 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -3.8419410120695829e-003 + 0.2059556990861893 + -0.0668637081980705 + <_> + + <_> + + + + <_>0 7 14 4 -1. + <_>0 7 7 2 2. + <_>7 9 7 2 2. + 0 + -4.5518199913203716e-003 + -0.2290892004966736 + 0.0589543990790844 + <_> + + <_> + + + + <_>13 3 7 6 -1. + <_>13 5 7 2 3. + 0 + -0.0493403710424900 + -0.3899571895599365 + 0.0167140793055296 + <_> + + <_> + + + + <_>0 4 3 16 -1. + <_>0 12 3 8 2. + 0 + 0.0864564925432205 + -0.0322788283228874 + 0.3637163937091827 + <_> + + <_> + + + + <_>13 5 5 15 -1. + <_>13 10 5 5 3. + 0 + 5.1636258140206337e-003 + -0.1739903986454010 + 0.0560171492397785 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>2 10 3 5 2. + <_>5 15 3 5 2. + 0 + 3.5364869982004166e-003 + -0.0796309486031532 + 0.1631346046924591 + <_> + + <_> + + + + <_>11 11 9 6 -1. + <_>11 13 9 2 3. + 0 + -0.0431708395481110 + -0.3703685998916626 + 0.0198411308228970 + <_> + + <_> + + + + <_>0 11 9 6 -1. + <_>0 13 9 2 3. + 0 + 6.1772209592163563e-003 + 0.0590521693229675 + -0.2370197027921677 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0222447700798512 + 0.2576271891593933 + -0.0229684505611658 + <_> + + <_> + + + + <_>1 3 18 4 -1. + <_>1 3 9 2 2. + <_>10 5 9 2 2. + 0 + 0.0501637309789658 + 0.0174684002995491 + -0.6812874078750610 + <_> + + <_> + + + + <_>10 10 10 6 -1. + <_>15 10 5 3 2. + <_>10 13 5 3 2. + 0 + -3.0043811420910060e-004 + 0.0557814016938210 + -0.1268578022718430 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1978355050086975 + 0.0122114196419716 + -0.8606426715850830 + <_> + + <_> + + + + <_>8 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 0.0653624683618546 + 4.1287927888333797e-003 + -0.6294823884963989 + <_> + + <_> + + + + <_>3 6 12 7 -1. + <_>7 6 4 7 3. + 0 + -0.0186849907040596 + -0.2437735944986343 + 0.0432324893772602 + <_> + + <_> + + + + <_>8 4 6 5 -1. + <_>8 4 3 5 2. + 0 + -7.5593511573970318e-003 + 0.1725444048643112 + -0.0168717801570892 + <_> + + <_> + + + + <_>6 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 1.4699660241603851e-003 + -0.1556148976087570 + 0.0692318528890610 + <_> + + <_> + + + + <_>7 1 6 19 -1. + <_>7 1 3 19 2. + 0 + 0.1192594021558762 + -0.0263411905616522 + 0.4484722912311554 + <_> + + <_> + + + + <_>6 0 3 20 -1. + <_>7 0 1 20 3. + 0 + 0.0137634798884392 + 0.0318527109920979 + -0.3818455040454865 + <_> + + <_> + + + + <_>9 1 3 13 -1. + <_>10 1 1 13 3. + 0 + 0.0129664400592446 + -0.0393913686275482 + 0.1909269988536835 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0110414195805788 + -0.2730937898159027 + 0.0477778203785419 + <_> + + <_> + + + + <_>2 0 18 16 -1. + <_>2 8 18 8 2. + 0 + 0.6836441159248352 + 9.6240043640136719e-003 + -0.9744750261306763 + <_> + + <_> + + + + <_>1 5 6 15 -1. + <_>1 10 6 5 3. + 0 + -2.4255160242319107e-003 + -0.2543956935405731 + 0.0407325513660908 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + 6.4529682276770473e-004 + -0.1382417976856232 + 0.0746600478887558 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0223861802369356 + 0.3940477967262268 + -0.0425919517874718 + <_> + + <_> + + + + <_>6 13 10 6 -1. + <_>11 13 5 3 2. + <_>6 16 5 3 2. + 0 + -0.0643251612782478 + -0.9685335755348206 + 5.4289568215608597e-003 + <_> + + <_> + + + + <_>0 10 14 3 -1. + <_>0 11 14 1 3. + 0 + 0.0408037118613720 + 0.0147799802944064 + -0.7544596791267395 + <_> + + <_> + + + + <_>11 9 6 8 -1. + <_>11 9 3 8 2. + 0 + -2.4066439364105463e-003 + 0.0762139186263084 + -0.0813253372907639 + <_> + + <_> + + + + <_>1 13 7 6 -1. + <_>1 15 7 2 3. + 0 + -0.0498650595545769 + -0.7844797968864441 + 0.0151301501318812 + <_> + + <_> + + + + <_>9 0 3 12 -1. + <_>9 6 3 6 2. + 0 + -0.0897499918937683 + -0.9007651805877686 + 4.0898341685533524e-003 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 2.1489290520548820e-003 + -0.0778734087944031 + 0.1453898996114731 + <_> + + <_> + + + + <_>4 14 13 2 -1. + <_>4 15 13 1 2. + 0 + 1.8653910374268889e-003 + -0.0512646399438381 + 0.1451420933008194 + <_> + + <_> + + + + <_>6 13 6 7 -1. + <_>8 13 2 7 3. + 0 + 0.0541899502277374 + 0.0167405698448420 + -0.7296484708786011 + <_> + + <_> + + + + <_>16 10 4 7 -1. + <_>16 10 2 7 2. + 0 + -3.7668810691684484e-003 + 0.1534599959850311 + -0.0598672106862068 + <_> + + <_> + + + + <_>0 6 4 13 -1. + <_>2 6 2 13 2. + 0 + -0.1515194028615952 + -0.8261219859123230 + 0.0144882798194885 + <_> + + <_> + + + + <_>1 15 18 3 -1. + <_>7 15 6 3 3. + 0 + 0.0102466596290469 + -0.0631456896662712 + 0.1899479031562805 + <_> + + <_> + + + + <_>0 1 16 4 -1. + <_>0 1 8 2 2. + <_>8 3 8 2 2. + 0 + 0.0105782700702548 + 0.0597267486155033 + -0.1916207969188690 + <_> + + <_> + + + + <_>3 0 14 4 -1. + <_>3 2 14 2 2. + 0 + 0.0150329703465104 + -0.0738685205578804 + 0.1551170945167542 + <_> + + <_> + + + + <_>3 13 12 6 -1. + <_>3 13 6 3 2. + <_>9 16 6 3 2. + 0 + -0.0421362891793251 + -0.6873332262039185 + 0.0166046302765608 + <_> + + <_> + + + + <_>6 8 8 9 -1. + <_>6 11 8 3 3. + 0 + 1.8628799589350820e-003 + -0.1573285013437271 + 0.0757149085402489 + <_> + + <_> + + + + <_>0 8 18 9 -1. + <_>0 11 18 3 3. + 0 + 0.0246596392244101 + 0.0970811396837235 + -0.1604579985141754 + <_> + + <_> + + + + <_>10 13 10 7 -1. + <_>10 13 5 7 2. + 0 + 0.1914573013782501 + 7.1056559681892395e-003 + -0.7553734183311462 + <_> + + <_> + + + + <_>0 13 10 7 -1. + <_>5 13 5 7 2. + 0 + -0.0301671605557203 + 0.1700260937213898 + -0.0861638262867928 + <_> + + <_> + + + + <_>12 10 8 6 -1. + <_>12 12 8 2 3. + 0 + 9.2923697084188461e-003 + 0.0433526113629341 + -0.1953348070383072 + <_> + + <_> + + + + <_>0 12 17 6 -1. + <_>0 15 17 3 2. + 0 + -1.9069829722866416e-003 + 0.0824215188622475 + -0.1464408934116364 + <_> + + <_> + + + + <_>5 14 10 4 -1. + <_>5 16 10 2 2. + 0 + 3.1027841032482684e-004 + -0.1187931969761848 + 0.0946357622742653 + <_> + + <_> + + + + <_>1 8 13 3 -1. + <_>1 9 13 1 3. + 0 + 4.4492271263152361e-004 + -0.1564576029777527 + 0.0685128122568130 + <_> + + <_> + + + + <_>11 10 9 4 -1. + <_>11 12 9 2 2. + 0 + -0.0120954699814320 + -0.0901441276073456 + 0.0300506204366684 + <_> + + <_> + + + + <_>0 2 2 18 -1. + <_>1 2 1 18 2. + 0 + -2.0358909387141466e-003 + 0.1358647048473358 + -0.0726312622427940 + <_> + + <_> + + + + <_>14 12 6 7 -1. + <_>14 12 3 7 2. + 0 + -9.3594277277588844e-003 + 0.1137612015008926 + -0.0396327190101147 + <_> + + <_> + + + + <_>0 12 6 7 -1. + <_>3 12 3 7 2. + 0 + 4.2418478988111019e-003 + -0.0815194398164749 + 0.1576620936393738 + <_> + + <_> + + + + <_>8 2 8 14 -1. + <_>8 9 8 7 2. + 0 + -0.0599637590348721 + -0.2327315062284470 + 0.0208368804305792 + <_> + + <_> + + + + <_>4 2 8 14 -1. + <_>4 9 8 7 2. + 0 + 4.6651167795062065e-003 + 0.1313533037900925 + -0.1239491030573845 + <_> + + <_> + + + + <_>7 9 13 3 -1. + <_>7 10 13 1 3. + 0 + 6.2358117429539561e-004 + -0.1292017996311188 + 0.0652205571532249 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 2.0561330020427704e-003 + -0.0629108771681786 + 0.1628800034523010 + -1.5512030124664307 + 18 + -1 + <_> + + + <_> + + <_> + + + + <_>1 2 18 3 -1. + <_>7 2 6 3 3. + 0 + 0.1121644005179405 + -0.2906509041786194 + 0.3151021003723145 + <_> + + <_> + + + + <_>12 6 5 9 -1. + <_>12 9 5 3 3. + 0 + 0.0278506092727184 + -0.3997235000133514 + 0.1789499074220657 + <_> + + <_> + + + + <_>0 4 9 12 -1. + <_>3 4 3 12 3. + 0 + 0.0408042408525944 + -0.2417106032371521 + 0.2237673997879028 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 1.3134710025042295e-003 + -0.4223076105117798 + 0.0690668374300003 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 3.9736120961606503e-003 + -0.5524399280548096 + 0.1036207973957062 + <_> + + <_> + + + + <_>13 9 4 10 -1. + <_>13 14 4 5 2. + 0 + -9.7877913503907621e-005 + 0.0703004598617554 + -0.4197031855583191 + <_> + + <_> + + + + <_>3 12 10 8 -1. + <_>3 12 5 4 2. + <_>8 16 5 4 2. + 0 + 6.2921550124883652e-003 + -0.3062996864318848 + 0.1307204067707062 + <_> + + <_> + + + + <_>12 1 7 4 -1. + <_>12 3 7 2 2. + 0 + -8.7216142565011978e-003 + -0.4126763045787811 + 0.0727381482720375 + <_> + + <_> + + + + <_>2 4 12 6 -1. + <_>2 6 12 2 3. + 0 + -0.0586111098527908 + 0.1949152052402496 + -0.1973744928836823 + <_> + + <_> + + + + <_>13 10 5 6 -1. + <_>13 13 5 3 2. + 0 + -0.0461044684052467 + -0.2627475857734680 + 0.0243621896952391 + <_> + + <_> + + + + <_>2 10 5 6 -1. + <_>2 13 5 3 2. + 0 + -5.2685278933495283e-004 + 0.0798763111233711 + -0.4435858130455017 + <_> + + <_> + + + + <_>12 1 7 4 -1. + <_>12 3 7 2 2. + 0 + -0.0255219396203756 + -0.4418368935585022 + 0.0107056600973010 + <_> + + <_> + + + + <_>5 5 9 10 -1. + <_>5 10 9 5 2. + 0 + -6.8350387737154961e-003 + -0.3950119018554688 + 0.0784419924020767 + <_> + + <_> + + + + <_>12 1 7 4 -1. + <_>12 3 7 2 2. + 0 + 0.0610552094876766 + 3.5330320242792368e-003 + -0.6067745089530945 + <_> + + <_> + + + + <_>0 0 17 2 -1. + <_>0 1 17 1 2. + 0 + 4.7110877931118011e-003 + -0.1931038051843643 + 0.1525941044092178 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0375524982810020 + 0.0695726871490479 + -0.4158819019794464 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0408874303102493 + -0.1359692960977554 + 0.2489430010318756 + <_> + + <_> + + + + <_>11 10 6 8 -1. + <_>13 10 2 8 3. + 0 + 2.6306639483664185e-005 + -0.2560321092605591 + 0.1100158989429474 + <_> + + <_> + + + + <_>3 10 6 8 -1. + <_>5 10 2 8 3. + 0 + 9.4716809689998627e-003 + -0.2219702005386353 + 0.1364049017429352 + <_> + + <_> + + + + <_>5 1 10 12 -1. + <_>5 7 10 6 2. + 0 + 3.4596489276736975e-003 + 0.1556897014379501 + -0.1845435053110123 + <_> + + <_> + + + + <_>1 1 7 4 -1. + <_>1 3 7 2 2. + 0 + -8.1670414656400681e-003 + -0.3734661042690277 + 0.0822064206004143 + <_> + + <_> + + + + <_>10 10 8 6 -1. + <_>10 12 8 2 3. + 0 + 0.0470451787114143 + 0.0126555804163218 + -0.6916750073432922 + <_> + + <_> + + + + <_>0 7 8 6 -1. + <_>0 9 8 2 3. + 0 + -1.9954189192503691e-003 + -0.4287165105342865 + 0.0601198486983776 + <_> + + <_> + + + + <_>5 11 10 6 -1. + <_>10 11 5 3 2. + <_>5 14 5 3 2. + 0 + -0.0327976793050766 + -0.5851371884346008 + 0.0397392101585865 + <_> + + <_> + + + + <_>0 8 20 3 -1. + <_>0 9 20 1 3. + 0 + 0.0435161218047142 + 0.0363112390041351 + -0.5855696797370911 + <_> + + <_> + + + + <_>7 11 13 3 -1. + <_>7 12 13 1 3. + 0 + -0.0132136000320315 + 0.2116038054227829 + -0.0896183624863625 + <_> + + <_> + + + + <_>2 7 15 5 -1. + <_>7 7 5 5 3. + 0 + -0.0385740809142590 + -0.5937594771385193 + 0.0372978709638119 + <_> + + <_> + + + + <_>2 9 16 6 -1. + <_>2 9 8 6 2. + 0 + -0.1535183936357498 + 0.4411644041538239 + -0.0590583682060242 + <_> + + <_> + + + + <_>0 4 5 6 -1. + <_>0 7 5 3 2. + 0 + -0.0141332400962710 + -0.3404521048069000 + 0.0662774965167046 + <_> + + <_> + + + + <_>4 12 12 5 -1. + <_>8 12 4 5 3. + 0 + 0.0140610104426742 + 0.1131246015429497 + -0.1900123953819275 + <_> + + <_> + + + + <_>2 16 16 4 -1. + <_>2 16 8 2 2. + <_>10 18 8 2 2. + 0 + 0.0354574695229530 + 0.0372978188097477 + -0.5356817841529846 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -0.0129310395568609 + -0.2859332859516144 + 0.0583418011665344 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -0.0119869997724891 + -0.4021627008914948 + 0.0478411912918091 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + -0.0137232895940542 + 0.2023843973875046 + -0.0892904922366142 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + 0.0159908104687929 + -0.0617425516247749 + 0.3938700854778290 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -0.0145057598128915 + -0.3582904934883118 + 0.0437899082899094 + <_> + + <_> + + + + <_>0 4 6 7 -1. + <_>2 4 2 7 3. + 0 + 0.0314435288310051 + -0.0673745274543762 + 0.2877972126007080 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0342873409390450 + 0.0563902594149113 + -0.3340716063976288 + <_> + + <_> + + + + <_>7 0 6 20 -1. + <_>9 0 2 20 3. + 0 + 8.8674569269642234e-005 + -0.2865560054779053 + 0.0703185573220253 + <_> + + <_> + + + + <_>9 5 3 13 -1. + <_>10 5 1 13 3. + 0 + 0.0182664692401886 + -0.0522215701639652 + 0.1702639013528824 + <_> + + <_> + + + + <_>5 1 10 9 -1. + <_>5 4 10 3 3. + 0 + 0.0617696307599545 + -0.0688005834817886 + 0.2748331129550934 + <_> + + <_> + + + + <_>12 5 8 8 -1. + <_>16 5 4 4 2. + <_>12 9 4 4 2. + 0 + -0.0233833100646734 + -0.2784563004970551 + 0.0241313595324755 + <_> + + <_> + + + + <_>6 0 8 8 -1. + <_>6 4 8 4 2. + 0 + -0.1118286028504372 + 0.4568716883659363 + -0.0432179495692253 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -0.0643868967890739 + -0.3422875106334686 + 0.0640637129545212 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.2176343053579330 + -0.0605644993484020 + 0.3635270893573761 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + -4.9456087872385979e-003 + -0.1652639061212540 + 0.0460355803370476 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -1.2704910477623343e-003 + -0.2503579854965210 + 0.0823364406824112 + <_> + + <_> + + + + <_>13 2 6 6 -1. + <_>13 2 3 6 2. + 0 + 0.0265367291867733 + -0.1391904950141907 + 0.1952400058507919 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -0.0200274400413036 + -0.3747282922267914 + 0.0539810210466385 + <_> + + <_> + + + + <_>7 6 10 14 -1. + <_>12 6 5 7 2. + <_>7 13 5 7 2. + 0 + -0.0619875490665436 + -0.1443642973899841 + 0.0158632900565863 + <_> + + <_> + + + + <_>1 1 18 3 -1. + <_>1 2 18 1 3. + 0 + 0.0230370592325926 + 0.0384292304515839 + -0.4847930967807770 + <_> + + <_> + + + + <_>0 9 20 3 -1. + <_>0 10 20 1 3. + 0 + 0.0579582713544369 + 0.0207501407712698 + -0.7677661776542664 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + 5.4419268853962421e-003 + 0.0720744132995605 + -0.2425422072410584 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + 7.2400430217385292e-003 + -0.0824329480528831 + 0.1846349984407425 + <_> + + <_> + + + + <_>2 15 7 4 -1. + <_>2 17 7 2 2. + 0 + 0.0148477796465158 + 0.0562454089522362 + -0.3629705905914307 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>9 0 1 13 2. + 0 + 0.0120848799124360 + -0.0635362565517426 + 0.2861422896385193 + <_> + + <_> + + + + <_>4 0 9 6 -1. + <_>7 0 3 6 3. + 0 + 0.0808313563466072 + 0.0471439585089684 + -0.4996809065341950 + <_> + + <_> + + + + <_>11 6 5 6 -1. + <_>11 9 5 3 2. + 0 + 1.9218639936298132e-003 + -0.4046914875507355 + 0.0220930408686399 + <_> + + <_> + + + + <_>3 6 10 14 -1. + <_>3 6 5 7 2. + <_>8 13 5 7 2. + 0 + -0.0141796795651317 + -0.1852028071880341 + 0.0868239179253578 + <_> + + <_> + + + + <_>6 4 12 12 -1. + <_>12 4 6 6 2. + <_>6 10 6 6 2. + 0 + -2.9600440029753372e-005 + 0.0740548297762871 + -0.1933135986328125 + <_> + + <_> + + + + <_>4 6 5 6 -1. + <_>4 9 5 3 2. + 0 + 1.7121590208262205e-003 + -0.4995464980602264 + 0.0382737405598164 + <_> + + <_> + + + + <_>5 1 14 5 -1. + <_>5 1 7 5 2. + 0 + -0.1320794969797134 + 0.5296478867530823 + -0.0103634996339679 + <_> + + <_> + + + + <_>9 4 2 16 -1. + <_>9 12 2 8 2. + 0 + 0.0369220711290836 + 0.0195874702185392 + -0.8895406723022461 + <_> + + <_> + + + + <_>13 12 7 4 -1. + <_>13 14 7 2 2. + 0 + -7.3079409048659727e-006 + 0.0649930536746979 + -0.1733129024505615 + <_> + + <_> + + + + <_>3 12 5 6 -1. + <_>3 15 5 3 2. + 0 + -0.0352227091789246 + -0.3684993088245392 + 0.0505657382309437 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + -0.0555311106145382 + 0.3155569136142731 + -0.0450157299637794 + <_> + + <_> + + + + <_>1 3 8 4 -1. + <_>5 3 4 4 2. + 0 + 0.0187628697603941 + -0.1935907006263733 + 0.0790935307741165 + <_> + + <_> + + + + <_>9 14 10 6 -1. + <_>14 14 5 3 2. + <_>9 17 5 3 2. + 0 + 0.0249717608094215 + -0.0818621963262558 + 0.2101489007472992 + <_> + + <_> + + + + <_>3 0 3 13 -1. + <_>4 0 1 13 3. + 0 + -2.0817129407078028e-003 + -0.1772366017103195 + 0.0917572826147079 + <_> + + <_> + + + + <_>10 10 10 10 -1. + <_>15 10 5 5 2. + <_>10 15 5 5 2. + 0 + -0.1149986013770104 + 0.5086256265640259 + -0.0182674508541822 + <_> + + <_> + + + + <_>0 6 8 14 -1. + <_>4 6 4 14 2. + 0 + 0.3206895887851715 + 0.0216510090976954 + -0.7668547034263611 + <_> + + <_> + + + + <_>4 3 12 12 -1. + <_>10 3 6 6 2. + <_>4 9 6 6 2. + 0 + -0.0814512968063354 + -0.4633176028728485 + 0.0293835792690516 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0150079401209950 + -0.3930864930152893 + 0.0368675589561462 + <_> + + <_> + + + + <_>9 6 3 13 -1. + <_>10 6 1 13 3. + 0 + 0.0237958207726479 + -0.0324823111295700 + 0.1676425039768219 + <_> + + <_> + + + + <_>4 1 10 5 -1. + <_>9 1 5 5 2. + 0 + -0.0885088071227074 + 0.7210345864295960 + -0.0211402103304863 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 0.0450111217796803 + -0.0253261309117079 + 0.2806276082992554 + <_> + + <_> + + + + <_>3 2 12 6 -1. + <_>3 2 6 3 2. + <_>9 5 6 3 2. + 0 + 0.0192869901657104 + 0.0657711625099182 + -0.2569778859615326 + <_> + + <_> + + + + <_>2 2 18 4 -1. + <_>11 2 9 2 2. + <_>2 4 9 2 2. + 0 + 0.0221376195549965 + 0.0391549915075302 + -0.1914563030004501 + <_> + + <_> + + + + <_>3 2 11 6 -1. + <_>3 4 11 2 3. + 0 + 0.0298479795455933 + -0.1252101957798004 + 0.1486787050962448 + <_> + + <_> + + + + <_>12 0 8 12 -1. + <_>16 0 4 6 2. + <_>12 6 4 6 2. + 0 + -0.0683920234441757 + 0.2602387070655823 + -0.0475253015756607 + <_> + + <_> + + + + <_>0 0 8 12 -1. + <_>0 0 4 6 2. + <_>4 6 4 6 2. + 0 + 0.0680033713579178 + -0.0458985604345798 + 0.4010710120201111 + <_> + + <_> + + + + <_>7 1 6 10 -1. + <_>10 1 3 5 2. + <_>7 6 3 5 2. + 0 + 0.0560981594026089 + 0.0232777893543243 + -0.8445712924003601 + <_> + + <_> + + + + <_>0 0 13 3 -1. + <_>0 1 13 1 3. + 0 + -0.0130240898579359 + -0.3834899067878723 + 0.0383141897618771 + <_> + + <_> + + + + <_>4 5 13 3 -1. + <_>4 6 13 1 3. + 0 + 0.0125946803018451 + -0.0676168426871300 + 0.2985244095325470 + <_> + + <_> + + + + <_>3 12 7 6 -1. + <_>3 14 7 2 3. + 0 + -0.0490638799965382 + -0.5586265921592712 + 0.0285116191953421 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -0.0157341696321964 + 0.2561193108558655 + -0.0594071410596371 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 0.0146748498082161 + -0.0630010217428207 + 0.2785499989986420 + <_> + + <_> + + + + <_>8 1 7 6 -1. + <_>8 3 7 2 3. + 0 + 0.0250680297613144 + -0.0788613483309746 + 0.1057737022638321 + <_> + + <_> + + + + <_>0 8 12 7 -1. + <_>6 8 6 7 2. + 0 + 7.4170758016407490e-003 + -0.3577589988708496 + 0.0487077012658119 + <_> + + <_> + + + + <_>0 1 20 4 -1. + <_>10 1 10 2 2. + <_>0 3 10 2 2. + 0 + -7.7149281278252602e-003 + -0.1804956048727036 + 0.0975316017866135 + <_> + + <_> + + + + <_>0 10 20 3 -1. + <_>0 11 20 1 3. + 0 + 0.0499820709228516 + 0.0210093203932047 + -0.7653753757476807 + <_> + + <_> + + + + <_>12 1 2 14 -1. + <_>12 1 1 14 2. + 0 + -0.0167596302926540 + -0.5904538035392761 + 0.0269480496644974 + <_> + + <_> + + + + <_>1 7 18 10 -1. + <_>7 7 6 10 3. + 0 + 0.3763282895088196 + 0.0219898503273726 + -0.6146131157875061 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + 0.0527208298444748 + -0.0390741601586342 + 0.2660067081451416 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + 0.0262701995670795 + -0.0938639864325523 + 0.2228026986122131 + <_> + + <_> + + + + <_>14 1 3 14 -1. + <_>15 1 1 14 3. + 0 + -2.5664661079645157e-003 + -0.1862180978059769 + 0.0985197126865387 + <_> + + <_> + + + + <_>5 8 6 5 -1. + <_>8 8 3 5 2. + 0 + 5.3800269961357117e-003 + 0.1281605958938599 + -0.1367170065641403 + <_> + + <_> + + + + <_>14 1 3 14 -1. + <_>15 1 1 14 3. + 0 + 0.0252000503242016 + 0.0308755896985531 + -0.2968142032623291 + <_> + + <_> + + + + <_>3 1 3 14 -1. + <_>4 1 1 14 3. + 0 + 0.0254440605640411 + 0.0439784117043018 + -0.4050532877445221 + <_> + + <_> + + + + <_>0 16 20 2 -1. + <_>0 17 20 1 2. + 0 + -0.0247158091515303 + -0.5849229097366333 + 0.0231797602027655 + <_> + + <_> + + + + <_>6 6 4 14 -1. + <_>8 6 2 14 2. + 0 + -0.0161596499383450 + -0.3195050060749054 + 0.0446035303175449 + <_> + + <_> + + + + <_>9 6 3 13 -1. + <_>10 6 1 13 3. + 0 + 6.5401610918343067e-003 + -0.0585759915411472 + 0.0740167871117592 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + -0.0439406484365463 + -0.7721183896064758 + 0.0193529799580574 + <_> + + <_> + + + + <_>9 3 5 9 -1. + <_>9 6 5 3 3. + 0 + -4.5612620306201279e-004 + 0.0303974207490683 + -0.2698299884796143 + <_> + + <_> + + + + <_>2 13 9 6 -1. + <_>5 13 3 6 3. + 0 + 2.8633379843086004e-003 + -0.1687434017658234 + 0.0888862684369087 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0594884604215622 + -0.3405894935131073 + 0.0246258806437254 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + 0.0307144708931446 + 0.0317963995039463 + -0.4157277047634125 + <_> + + <_> + + + + <_>9 14 10 6 -1. + <_>14 14 5 3 2. + <_>9 17 5 3 2. + 0 + -0.0223303791135550 + 0.1289605051279068 + -0.0242325700819492 + <_> + + <_> + + + + <_>1 14 10 6 -1. + <_>1 14 5 3 2. + <_>6 17 5 3 2. + 0 + 0.0239716097712517 + -0.0768580585718155 + 0.2036072015762329 + <_> + + <_> + + + + <_>11 13 7 6 -1. + <_>11 15 7 2 3. + 0 + -0.0606967806816101 + -0.7206013202667236 + 0.0116178803145885 + <_> + + <_> + + + + <_>1 8 8 12 -1. + <_>1 8 4 6 2. + <_>5 14 4 6 2. + 0 + -0.0683622434735298 + 0.3582518100738525 + -0.0448078997433186 + <_> + + <_> + + + + <_>5 7 15 5 -1. + <_>10 7 5 5 3. + 0 + 0.1345103979110718 + 0.0260080695152283 + -0.2507762014865875 + <_> + + <_> + + + + <_>0 7 15 5 -1. + <_>5 7 5 5 3. + 0 + 0.1334117054939270 + 0.0471381805837154 + -0.3966158032417297 + <_> + + <_> + + + + <_>12 13 8 6 -1. + <_>12 15 8 2 3. + 0 + 0.0205243304371834 + 0.0438941717147827 + -0.2850196957588196 + <_> + + <_> + + + + <_>8 10 4 10 -1. + <_>8 15 4 5 2. + 0 + 0.0415436103940010 + 0.0254522208124399 + -0.5937765836715698 + <_> + + <_> + + + + <_>1 6 19 3 -1. + <_>1 7 19 1 3. + 0 + -0.0715734437108040 + -0.7874376177787781 + 0.0139793204143643 + <_> + + <_> + + + + <_>7 8 6 9 -1. + <_>7 11 6 3 3. + 0 + 0.0662646293640137 + 0.0229391306638718 + -0.5430498123168945 + <_> + + <_> + + + + <_>11 2 8 8 -1. + <_>15 2 4 4 2. + <_>11 6 4 4 2. + 0 + 4.4609569013118744e-003 + 0.0506881400942802 + -0.2059900015592575 + <_> + + <_> + + + + <_>8 6 3 14 -1. + <_>9 6 1 14 3. + 0 + 0.0148595403879881 + -0.0734084621071815 + 0.1990225017070770 + <_> + + <_> + + + + <_>9 2 3 13 -1. + <_>10 2 1 13 3. + 0 + -0.0396253392100334 + -0.5352293252944946 + 9.3211038038134575e-003 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + -9.6143726259469986e-003 + 0.2766486108303070 + -0.0630875229835510 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0545898303389549 + 0.0249628592282534 + -0.5817118883132935 + <_> + + <_> + + + + <_>3 2 3 18 -1. + <_>3 8 3 6 3. + 0 + 0.0137708997353911 + -0.2289174944162369 + 0.0699636712670326 + <_> + + <_> + + + + <_>1 5 18 10 -1. + <_>10 5 9 5 2. + <_>1 10 9 5 2. + 0 + 0.0868623405694962 + 0.0240580104291439 + -0.5864248275756836 + <_> + + <_> + + + + <_>6 1 2 13 -1. + <_>7 1 1 13 2. + 0 + -0.0224330108612776 + -0.9216936230659485 + 0.0132817998528481 + <_> + + <_> + + + + <_>11 0 8 6 -1. + <_>11 2 8 2 3. + 0 + -0.0737795978784561 + 0.3846378922462463 + -8.5962712764739990e-003 + <_> + + <_> + + + + <_>4 0 7 6 -1. + <_>4 2 7 2 3. + 0 + 2.9300490859895945e-004 + -0.1717057973146439 + 0.0885201096534729 + -1.7598799467086792 + 19 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 10 3 -1. + <_>5 2 5 3 2. + 0 + 5.3288340568542480e-003 + -0.2661677002906799 + 0.1776044964790344 + <_> + + <_> + + + + <_>1 4 19 4 -1. + <_>1 6 19 2 2. + 0 + -4.0987450629472733e-003 + 0.1235842034220696 + -0.3080511093139648 + <_> + + <_> + + + + <_>5 7 6 5 -1. + <_>8 7 3 5 2. + 0 + -5.5853058584034443e-003 + -0.5053399205207825 + 0.0620501190423965 + <_> + + <_> + + + + <_>11 10 5 6 -1. + <_>11 13 5 3 2. + 0 + -5.1797390915453434e-004 + 0.0691780671477318 + -0.3483135998249054 + <_> + + <_> + + + + <_>7 8 4 12 -1. + <_>7 12 4 4 3. + 0 + 5.3605018183588982e-003 + 0.0651586726307869 + -0.4626223146915436 + <_> + + <_> + + + + <_>10 1 10 19 -1. + <_>10 1 5 19 2. + 0 + 0.0301142707467079 + -0.0641323626041412 + 0.0710700601339340 + <_> + + <_> + + + + <_>0 1 10 19 -1. + <_>5 1 5 19 2. + 0 + 0.0890142917633057 + 0.0429871305823326 + -0.6017789840698242 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + 1.5248140553012490e-003 + -0.3307178914546967 + 0.0714083015918732 + <_> + + <_> + + + + <_>2 7 7 6 -1. + <_>2 9 7 2 3. + 0 + 1.8556410213932395e-003 + -0.3472712039947510 + 0.0706306770443916 + <_> + + <_> + + + + <_>10 5 10 12 -1. + <_>10 11 10 6 2. + 0 + -0.0161516200751066 + -0.2561177015304565 + 0.0712556988000870 + <_> + + <_> + + + + <_>5 10 4 8 -1. + <_>5 14 4 4 2. + 0 + -3.1278008827939630e-004 + 0.0734203308820724 + -0.2959462106227875 + <_> + + <_> + + + + <_>7 5 8 12 -1. + <_>11 5 4 6 2. + <_>7 11 4 6 2. + 0 + -6.0263078921707347e-005 + 0.0665661916136742 + -0.2180245071649551 + <_> + + <_> + + + + <_>5 5 8 12 -1. + <_>5 5 4 6 2. + <_>9 11 4 6 2. + 0 + 7.6520902803167701e-004 + 0.0755371972918510 + -0.3767788112163544 + <_> + + <_> + + + + <_>14 1 6 8 -1. + <_>16 1 2 8 3. + 0 + -0.0695890709757805 + 0.3981064856052399 + -0.0258418191224337 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>2 1 2 9 3. + 0 + -0.0985295772552490 + 0.6732196807861328 + -0.0339254699647427 + <_> + + <_> + + + + <_>1 6 18 4 -1. + <_>7 6 6 4 3. + 0 + 0.0499500595033169 + 0.0616605691611767 + -0.3785111010074616 + <_> + + <_> + + + + <_>3 12 13 2 -1. + <_>3 13 13 1 2. + 0 + 3.9009240572340786e-004 + -0.0964286103844643 + 0.2170020043849945 + <_> + + <_> + + + + <_>3 3 14 2 -1. + <_>3 4 14 1 2. + 0 + -7.1598717477172613e-004 + -0.1835810989141464 + 0.1058740019798279 + <_> + + <_> + + + + <_>2 0 13 6 -1. + <_>2 2 13 2 3. + 0 + 3.8064830005168915e-003 + -0.1752761006355286 + 0.1143039986491203 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + 6.5288757905364037e-003 + 0.0679945275187492 + -0.3072611987590790 + <_> + + <_> + + + + <_>3 8 13 2 -1. + <_>3 9 13 1 2. + 0 + 2.2182099055498838e-003 + -0.2793523073196411 + 0.0587907209992409 + <_> + + <_> + + + + <_>12 0 4 14 -1. + <_>14 0 2 7 2. + <_>12 7 2 7 2. + 0 + 1.7800349451135844e-004 + 0.0994891077280045 + -0.2661688029766083 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -0.0326566807925701 + 0.5873476266860962 + -0.0265458803623915 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 0.0267733503133059 + 0.0364144109189510 + -0.3718883097171783 + <_> + + <_> + + + + <_>1 0 6 12 -1. + <_>4 0 3 12 2. + 0 + 0.0127803096547723 + -0.0845405235886574 + 0.1785326004028320 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + 5.5374070070683956e-003 + -0.1089204996824265 + 0.1440391987562180 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -7.1258977986872196e-003 + 0.1985002011060715 + -0.0833593979477882 + <_> + + <_> + + + + <_>5 1 15 3 -1. + <_>5 2 15 1 3. + 0 + 8.0109452828764915e-003 + 0.0488443486392498 + -0.2859002947807312 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0272311307489872 + -0.6855816245079041 + 0.0218777693808079 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + -0.0209289491176605 + -0.2082023024559021 + 0.0265852306038141 + <_> + + <_> + + + + <_>1 11 7 6 -1. + <_>1 13 7 2 3. + 0 + 3.9801741950213909e-003 + 0.0670047774910927 + -0.2301581054925919 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 2.1598068997263908e-003 + -0.0931090191006660 + 0.1723553985357285 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + 9.9411439150571823e-003 + -0.0449998192489147 + 0.3183049857616425 + <_> + + <_> + + + + <_>0 5 20 10 -1. + <_>10 5 10 5 2. + <_>0 10 10 5 2. + 0 + -0.0179388597607613 + -0.2151595950126648 + 0.0724629163742065 + <_> + + <_> + + + + <_>4 6 4 7 -1. + <_>6 6 2 7 2. + 0 + -1.5030350368760992e-005 + 0.0914379730820656 + -0.1670629978179932 + <_> + + <_> + + + + <_>4 6 14 6 -1. + <_>11 6 7 3 2. + <_>4 9 7 3 2. + 0 + 4.2446260340511799e-003 + 0.0648107603192329 + -0.1055627018213272 + <_> + + <_> + + + + <_>5 6 6 8 -1. + <_>5 10 6 4 2. + 0 + 7.4575991675374098e-006 + -0.2630968987941742 + 0.0565884001553059 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>14 10 3 10 2. + 0 + -0.0104572102427483 + 0.1607888042926788 + -0.0727080330252647 + <_> + + <_> + + + + <_>2 18 13 2 -1. + <_>2 19 13 1 2. + 0 + -1.2225599493831396e-003 + 0.1155833005905151 + -0.1223348975181580 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 14 16 2 2. + 0 + 0.0160616301000118 + 0.0282017905265093 + -0.5099617838859558 + <_> + + <_> + + + + <_>1 6 10 6 -1. + <_>1 6 5 3 2. + <_>6 9 5 3 2. + 0 + -0.0161620303988457 + -0.3385752141475678 + 0.0359247811138630 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>14 10 3 10 2. + 0 + 7.2181350551545620e-003 + -0.0727062001824379 + 0.1062465980648994 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>3 10 3 10 2. + 0 + -0.0104166604578495 + 0.1620581001043320 + -0.0945677608251572 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + 0.0139466002583504 + 0.0541696399450302 + -0.3206804096698761 + <_> + + <_> + + + + <_>0 0 4 17 -1. + <_>2 0 2 17 2. + 0 + 0.0127341197803617 + -0.0860661119222641 + 0.1964863985776901 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0278583709150553 + -0.2840923964977264 + 0.0267065502703190 + <_> + + <_> + + + + <_>2 4 6 16 -1. + <_>2 4 3 8 2. + <_>5 12 3 8 2. + 0 + -0.0989315211772919 + 0.5845760703086853 + -0.0219555106014013 + <_> + + <_> + + + + <_>5 6 10 8 -1. + <_>10 6 5 4 2. + <_>5 10 5 4 2. + 0 + 2.3434299509972334e-003 + 0.0964754670858383 + -0.1209534034132958 + <_> + + <_> + + + + <_>4 6 8 8 -1. + <_>4 6 4 4 2. + <_>8 10 4 4 2. + 0 + -2.3025700356811285e-003 + 0.0732979699969292 + -0.2230906933546066 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + 0.0307910796254873 + 0.0114638796076179 + -0.2403407990932465 + <_> + + <_> + + + + <_>4 2 12 5 -1. + <_>8 2 4 5 3. + 0 + -8.4339501336216927e-003 + 0.2961153984069824 + -0.0426636897027493 + <_> + + <_> + + + + <_>11 2 2 18 -1. + <_>11 2 1 18 2. + 0 + -3.4617669880390167e-003 + -0.2125786989927292 + 0.0427094586193562 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>10 6 2 7 2. + 0 + -0.0333719290792942 + 0.3529927134513855 + -0.0355705693364143 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>10 9 4 4 2. + <_>6 13 4 4 2. + 0 + -0.0372381284832954 + -0.5917713046073914 + 0.0267758406698704 + <_> + + <_> + + + + <_>0 5 20 5 -1. + <_>10 5 10 5 2. + 0 + -0.2086006999015808 + -0.5759524106979370 + 0.0197635591030121 + <_> + + <_> + + + + <_>4 4 12 4 -1. + <_>4 6 12 2 2. + 0 + -0.0682798177003860 + 0.3458260893821716 + -0.0378611795604229 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0116003202274442 + 0.0576855801045895 + -0.2600820958614349 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -0.0672189593315125 + -0.4504827857017517 + 0.0124951899051666 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -5.1632397808134556e-003 + 0.1614670008420944 + -0.0769757702946663 + <_> + + <_> + + + + <_>3 15 16 4 -1. + <_>11 15 8 2 2. + <_>3 17 8 2 2. + 0 + 0.0401133112609386 + 0.0131312301382422 + -0.4573144912719727 + <_> + + <_> + + + + <_>1 15 16 4 -1. + <_>1 15 8 2 2. + <_>9 17 8 2 2. + 0 + 0.0378377400338650 + 0.0230019204318523 + -0.5363628864288330 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + 2.6023429818451405e-003 + -0.0610074400901794 + 0.1708422005176544 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + -0.0718416422605515 + -0.5833038091659546 + 0.0200752504169941 + <_> + + <_> + + + + <_>6 11 8 9 -1. + <_>6 14 8 3 3. + 0 + -8.2885712618008256e-004 + 0.0534653402864933 + -0.1909226030111313 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + -8.1979477545246482e-004 + -0.2377593070268631 + 0.0458449088037014 + <_> + + <_> + + + + <_>4 15 13 3 -1. + <_>4 16 13 1 3. + 0 + 0.0104748597368598 + -0.0401034206151962 + 0.2494840025901794 + <_> + + <_> + + + + <_>0 10 5 9 -1. + <_>0 13 5 3 3. + 0 + -6.3726361840963364e-003 + -0.1708784997463226 + 0.0728946030139923 + <_> + + <_> + + + + <_>12 10 8 4 -1. + <_>12 12 8 2 2. + 0 + -0.0361134894192219 + -0.3687992990016937 + 0.0183317307382822 + <_> + + <_> + + + + <_>0 10 8 4 -1. + <_>0 12 8 2 2. + 0 + 5.4730800911784172e-004 + 0.0720730572938919 + -0.1889377981424332 + <_> + + <_> + + + + <_>5 1 10 6 -1. + <_>5 3 10 2 3. + 0 + 0.0175476595759392 + -0.0944525972008705 + 0.1331100016832352 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + 6.3078789971768856e-003 + 0.0762234702706337 + -0.1666823029518127 + <_> + + <_> + + + + <_>3 6 14 9 -1. + <_>3 9 14 3 3. + 0 + 2.5120719801634550e-003 + 0.5037552714347839 + -0.0226243492215872 + <_> + + <_> + + + + <_>7 6 6 10 -1. + <_>9 6 2 10 3. + 0 + 4.5274170115590096e-003 + -0.1344659030437470 + 0.0991675779223442 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + -1.4772829308640212e-004 + 0.0396751798689365 + -0.0600154884159565 + <_> + + <_> + + + + <_>3 0 6 9 -1. + <_>5 0 2 9 3. + 0 + 0.0147287398576736 + 0.0392089188098907 + -0.3056001961231232 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -5.6161261163651943e-003 + -0.1084505021572113 + 0.0477546602487564 + <_> + + <_> + + + + <_>0 0 4 17 -1. + <_>2 0 2 17 2. + 0 + -9.8265614360570908e-003 + 0.1672933995723724 + -0.0767566934227943 + <_> + + <_> + + + + <_>8 0 12 16 -1. + <_>12 0 4 16 3. + 0 + 0.0179723296314478 + -0.0591479688882828 + 0.1277327984571457 + <_> + + <_> + + + + <_>0 0 12 16 -1. + <_>4 0 4 16 3. + 0 + 0.0112331397831440 + -0.0926260203123093 + 0.1573573946952820 + <_> + + <_> + + + + <_>5 6 10 6 -1. + <_>5 9 10 3 2. + 0 + 1.3678249670192599e-003 + -0.5615676045417786 + 0.0218007508665323 + <_> + + <_> + + + + <_>7 4 2 14 -1. + <_>8 4 1 14 2. + 0 + -4.1535100899636745e-003 + -0.2695116996765137 + 0.0412134788930416 + <_> + + <_> + + + + <_>16 5 4 14 -1. + <_>18 5 2 7 2. + <_>16 12 2 7 2. + 0 + -0.0671946927905083 + 0.5600836277008057 + -0.0209737401455641 + <_> + + <_> + + + + <_>4 4 6 8 -1. + <_>6 4 2 8 3. + 0 + -0.0805724114179611 + -0.7584664225578308 + 0.0166143104434013 + <_> + + <_> + + + + <_>5 4 14 3 -1. + <_>5 5 14 1 3. + 0 + -9.7504993900656700e-003 + 0.2278127968311310 + -0.0402463302016258 + <_> + + <_> + + + + <_>3 4 13 3 -1. + <_>3 5 13 1 3. + 0 + 5.6034037843346596e-003 + -0.0755198523402214 + 0.1637201011180878 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -0.0102320602163672 + -0.3580319881439209 + 0.0463310889899731 + <_> + + <_> + + + + <_>0 13 9 6 -1. + <_>0 15 9 2 3. + 0 + 2.8616760391741991e-003 + 0.0677462369203568 + -0.1642912030220032 + <_> + + <_> + + + + <_>8 10 10 6 -1. + <_>8 12 10 2 3. + 0 + 7.7214869670569897e-003 + 0.0344948209822178 + -0.1776258051395416 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>10 5 2 7 2. + 0 + -7.0147789083421230e-003 + 0.1728224009275436 + -0.0651763230562210 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + 0.0504708699882030 + -0.0270719602704048 + 0.3550944030284882 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -5.7124681770801544e-003 + -0.1590107977390289 + 0.0795591101050377 + <_> + + <_> + + + + <_>13 0 3 19 -1. + <_>14 0 1 19 3. + 0 + 8.7470682337880135e-003 + 0.0377898588776588 + -0.1915664970874786 + <_> + + <_> + + + + <_>4 0 3 19 -1. + <_>5 0 1 19 3. + 0 + 0.0200589299201965 + 0.0274152997881174 + -0.3807010948657990 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -1.8094859551638365e-003 + 0.1053837984800339 + -0.1499654948711395 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -7.3339277878403664e-003 + 0.2920326888561249 + -0.0612181909382343 + <_> + + <_> + + + + <_>7 7 6 9 -1. + <_>7 10 6 3 3. + 0 + 4.4179419055581093e-003 + 0.1886862069368362 + -0.0581327416002750 + <_> + + <_> + + + + <_>6 4 4 15 -1. + <_>6 9 4 5 3. + 0 + -0.0135433096438646 + -0.4940955936908722 + 0.0228559300303459 + <_> + + <_> + + + + <_>14 0 6 7 -1. + <_>16 0 2 7 3. + 0 + 0.0361972711980343 + -0.0260891206562519 + 0.3089025020599365 + <_> + + <_> + + + + <_>2 4 14 12 -1. + <_>2 4 7 6 2. + <_>9 10 7 6 2. + 0 + -0.1183184012770653 + -0.5909466147422791 + 0.0182152800261974 + <_> + + <_> + + + + <_>4 15 12 5 -1. + <_>4 15 6 5 2. + 0 + 0.0756560713052750 + -0.0359655804932117 + 0.3038612008094788 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -0.0131345195695758 + -0.2630613148212433 + 0.0422629192471504 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>18 6 2 7 2. + <_>16 13 2 7 2. + 0 + 0.0189811605960131 + -0.0264836307615042 + 0.1937198936939240 + <_> + + <_> + + + + <_>0 6 4 14 -1. + <_>0 6 2 7 2. + <_>2 13 2 7 2. + 0 + -0.0460032299160957 + 0.4051350057125092 + -0.0244542006403208 + <_> + + <_> + + + + <_>11 14 8 6 -1. + <_>11 16 8 2 3. + 0 + -0.0132327303290367 + -0.2972126901149750 + 0.0479592196643353 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1958685070276260 + 0.0105403997004032 + -0.8664792776107788 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + 9.6459556370973587e-003 + -0.0713349431753159 + 0.1146951019763947 + <_> + + <_> + + + + <_>7 1 5 12 -1. + <_>7 7 5 6 2. + 0 + -3.9044579025357962e-003 + 0.1074031963944435 + -0.0985149964690208 + <_> + + <_> + + + + <_>5 0 10 8 -1. + <_>5 4 10 4 2. + 0 + 0.0168963707983494 + -0.0768050700426102 + 0.1953320056200028 + <_> + + <_> + + + + <_>0 1 15 12 -1. + <_>0 5 15 4 3. + 0 + -5.5025662295520306e-003 + 0.0506431907415390 + -0.2089843004941940 + <_> + + <_> + + + + <_>7 3 6 10 -1. + <_>7 8 6 5 2. + 0 + -0.0196215696632862 + -0.2965135872364044 + 0.0329550504684448 + <_> + + <_> + + + + <_>6 4 4 16 -1. + <_>6 4 2 8 2. + <_>8 12 2 8 2. + 0 + 7.7158107887953520e-004 + 0.0460170991718769 + -0.1998299956321716 + <_> + + <_> + + + + <_>1 4 18 4 -1. + <_>7 4 6 4 3. + 0 + -0.1110284030437470 + 0.5757871270179749 + -0.0177415292710066 + <_> + + <_> + + + + <_>0 3 12 6 -1. + <_>0 3 6 3 2. + <_>6 6 6 3 2. + 0 + 1.4945500297471881e-003 + 0.0473357290029526 + -0.2089890986680985 + <_> + + <_> + + + + <_>12 1 8 10 -1. + <_>16 1 4 5 2. + <_>12 6 4 5 2. + 0 + 0.0506679192185402 + -0.0186576191335917 + 0.3407045900821686 + <_> + + <_> + + + + <_>0 1 8 10 -1. + <_>0 1 4 5 2. + <_>4 6 4 5 2. + 0 + 0.0160731691867113 + -0.0364494882524014 + 0.2656807899475098 + <_> + + <_> + + + + <_>6 12 8 8 -1. + <_>10 12 4 4 2. + <_>6 16 4 4 2. + 0 + -0.0265367403626442 + -0.3614169061183929 + 0.0297342706471682 + <_> + + <_> + + + + <_>5 8 8 12 -1. + <_>5 8 4 6 2. + <_>9 14 4 6 2. + 0 + -5.2550169639289379e-003 + -0.1310449987649918 + 0.0821535289287567 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -0.0166785605251789 + 0.3132489025592804 + -0.0450525283813477 + <_> + + <_> + + + + <_>3 11 14 6 -1. + <_>3 11 7 3 2. + <_>10 14 7 3 2. + 0 + 3.4808400087058544e-003 + 0.0829457789659500 + -0.1575350016355515 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -0.0808890536427498 + -0.6431419849395752 + 7.1740332059562206e-003 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -5.4260632023215294e-003 + 0.1353313028812408 + -0.1054790988564491 + <_> + + <_> + + + + <_>11 4 4 12 -1. + <_>11 4 2 12 2. + 0 + 0.0166308395564556 + 0.0416021011769772 + -0.2666820883750916 + <_> + + <_> + + + + <_>7 4 5 14 -1. + <_>7 11 5 7 2. + 0 + 1.7991060158237815e-003 + 0.0595310889184475 + -0.1835530996322632 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 0.0272199697792530 + -0.0265868306159973 + 0.2272228002548218 + <_> + + <_> + + + + <_>5 4 4 12 -1. + <_>7 4 2 12 2. + 0 + -9.6450755372643471e-003 + -0.2142816931009293 + 0.0495157316327095 + <_> + + <_> + + + + <_>4 11 12 7 -1. + <_>4 11 6 7 2. + 0 + 0.0831238031387329 + -0.0421768911182880 + 0.3079341948032379 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 0.0144064500927925 + -0.0295000206679106 + 0.3214437961578369 + <_> + + <_> + + + + <_>5 6 12 6 -1. + <_>11 6 6 3 2. + <_>5 9 6 3 2. + 0 + 4.7938730567693710e-003 + 0.0512440912425518 + -0.1093185022473335 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>3 11 7 2 2. + <_>10 13 7 2 2. + 0 + -2.8978011105209589e-003 + -0.1434437036514282 + 0.0665972232818604 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + -0.0458876900374889 + 0.1800383031368256 + -0.0156427901238203 + <_> + + <_> + + + + <_>4 0 12 10 -1. + <_>4 0 6 5 2. + <_>10 5 6 5 2. + 0 + -0.0547177009284496 + -0.3511080145835877 + 0.0304388906806707 + <_> + + <_> + + + + <_>8 5 12 15 -1. + <_>8 5 6 15 2. + 0 + -0.0197873692959547 + 0.0933853313326836 + -0.0493825711309910 + <_> + + <_> + + + + <_>1 12 14 3 -1. + <_>1 13 14 1 3. + 0 + 2.5110379792749882e-003 + -0.0666726008057594 + 0.1440619975328445 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0536601506173611 + 0.0144688403233886 + -0.6700747013092041 + <_> + + <_> + + + + <_>2 17 16 3 -1. + <_>10 17 8 3 2. + 0 + -8.1825470551848412e-003 + 0.1151012033224106 + -0.0809326171875000 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -3.5225939936935902e-003 + -0.1418114006519318 + 0.0613306201994419 + <_> + + <_> + + + + <_>7 8 4 9 -1. + <_>9 8 2 9 2. + 0 + 0.0282715503126383 + -0.0283538904041052 + 0.3704513013362885 + <_> + + <_> + + + + <_>4 3 12 12 -1. + <_>10 3 6 6 2. + <_>4 9 6 6 2. + 0 + -0.0649230182170868 + -0.4648115932941437 + 0.0228072591125965 + <_> + + <_> + + + + <_>0 0 6 20 -1. + <_>3 0 3 20 2. + 0 + -0.3506585061550140 + -0.8252905011177063 + 0.0110314600169659 + <_> + + <_> + + + + <_>11 12 7 6 -1. + <_>11 14 7 2 3. + 0 + 5.1821782253682613e-003 + 0.0365832708775997 + -0.2456717938184738 + <_> + + <_> + + + + <_>3 13 14 2 -1. + <_>3 14 14 1 2. + 0 + 9.2609220882877707e-004 + -0.0618987381458282 + 0.1930757015943527 + <_> + + <_> + + + + <_>13 11 7 4 -1. + <_>13 13 7 2 2. + 0 + 2.5952830910682678e-003 + 0.0430157184600830 + -0.1977027058601379 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + 3.4880579914897680e-003 + -0.0682965368032455 + 0.1572528034448624 + <_> + + <_> + + + + <_>13 1 6 12 -1. + <_>15 1 2 12 3. + 0 + 2.4002529680728912e-003 + -0.0686181783676147 + 0.0685519874095917 + <_> + + <_> + + + + <_>1 1 6 12 -1. + <_>3 1 2 12 3. + 0 + 1.2020230060443282e-003 + -0.1207313984632492 + 0.0950265228748322 + <_> + + <_> + + + + <_>4 8 14 12 -1. + <_>4 12 14 4 3. + 0 + -0.0204703602939844 + -0.1289163976907730 + 0.0793865993618965 + <_> + + <_> + + + + <_>0 6 6 12 -1. + <_>3 6 3 12 2. + 0 + -0.0595161803066731 + 0.2486968934535980 + -0.0497291609644890 + <_> + + <_> + + + + <_>13 1 3 13 -1. + <_>14 1 1 13 3. + 0 + -0.0105689503252506 + -0.1858384013175964 + 0.0207003206014633 + <_> + + <_> + + + + <_>4 1 3 13 -1. + <_>5 1 1 13 3. + 0 + -0.0141929201781750 + -0.3813742995262146 + 0.0298792794346809 + <_> + + <_> + + + + <_>16 2 3 14 -1. + <_>17 2 1 14 3. + 0 + -2.4968578945845366e-003 + 0.0915166810154915 + -0.0501783117651939 + <_> + + <_> + + + + <_>1 2 3 14 -1. + <_>2 2 1 14 3. + 0 + 1.7714010027702898e-004 + -0.1147001981735230 + 0.0992456972599030 + <_> + + <_> + + + + <_>6 9 14 3 -1. + <_>6 10 14 1 3. + 0 + 0.0783186703920364 + 3.6057420074939728e-003 + -0.9999607205390930 + <_> + + <_> + + + + <_>0 9 14 3 -1. + <_>0 10 14 1 3. + 0 + 1.5502399764955044e-003 + -0.1288861036300659 + 0.0798220112919807 + <_> + + <_> + + + + <_>4 6 14 6 -1. + <_>11 6 7 3 2. + <_>4 9 7 3 2. + 0 + -6.6678877919912338e-003 + -0.0882445573806763 + 0.0281025990843773 + <_> + + <_> + + + + <_>2 6 14 6 -1. + <_>2 6 7 3 2. + <_>9 9 7 3 2. + 0 + -4.0497239679098129e-003 + -0.1442718058824539 + 0.0871263965964317 + <_> + + <_> + + + + <_>10 9 6 5 -1. + <_>10 9 3 5 2. + 0 + -0.0354815311729908 + -0.4468117058277130 + 0.0148082701489329 + <_> + + <_> + + + + <_>3 1 10 16 -1. + <_>3 1 5 8 2. + <_>8 9 5 8 2. + 0 + -0.0125977201387286 + 0.0893241912126541 + -0.1251814067363739 + <_> + + <_> + + + + <_>3 7 14 12 -1. + <_>10 7 7 6 2. + <_>3 13 7 6 2. + 0 + 7.4662449769675732e-003 + 0.0748881995677948 + -0.1358778029680252 + <_> + + <_> + + + + <_>2 2 13 6 -1. + <_>2 5 13 3 2. + 0 + -0.0675369873642921 + 0.2341682016849518 + -0.0409522689878941 + <_> + + <_> + + + + <_>14 1 6 6 -1. + <_>14 4 6 3 2. + 0 + 0.0827041715383530 + 7.6422439888119698e-003 + -0.8517755270004273 + <_> + + <_> + + + + <_>0 1 6 6 -1. + <_>0 4 6 3 2. + 0 + -7.1595138870179653e-003 + -0.1873801052570343 + 0.0552884191274643 + <_> + + <_> + + + + <_>1 0 18 4 -1. + <_>7 0 6 4 3. + 0 + -0.0104810697957873 + 0.1827110946178436 + -0.0596419684588909 + <_> + + <_> + + + + <_>2 0 4 14 -1. + <_>4 0 2 14 2. + 0 + 4.5238467864692211e-003 + -0.0838176012039185 + 0.1482218056917191 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + -2.6731120306067169e-004 + -0.2089677006006241 + 0.0458357296884060 + <_> + + <_> + + + + <_>1 8 18 3 -1. + <_>7 8 6 3 3. + 0 + 0.0338385812938213 + 0.0425828695297241 + -0.2188381999731064 + <_> + + <_> + + + + <_>4 7 13 2 -1. + <_>4 8 13 1 2. + 0 + 2.2287720348685980e-003 + -0.1328423023223877 + 0.0817953199148178 + <_> + + <_> + + + + <_>2 1 16 6 -1. + <_>2 1 8 3 2. + <_>10 4 8 3 2. + 0 + -5.4200361482799053e-003 + -0.1389651000499725 + 0.0711547136306763 + <_> + + <_> + + + + <_>9 5 7 9 -1. + <_>9 8 7 3 3. + 0 + -0.0496429689228535 + 0.4890164136886597 + -0.0115569597110152 + <_> + + <_> + + + + <_>2 9 8 8 -1. + <_>2 9 4 4 2. + <_>6 13 4 4 2. + 0 + 3.3323399256914854e-003 + 0.0514261610805988 + -0.1826944053173065 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + 0.0243439394980669 + -0.0318395607173443 + 0.1275885999202728 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0237744897603989 + 0.3277355134487152 + -0.0272167604416609 + <_> + + <_> + + + + <_>13 10 7 6 -1. + <_>13 12 7 2 3. + 0 + 3.6809889134019613e-003 + 0.0529220402240753 + -0.1288072019815445 + <_> + + <_> + + + + <_>0 10 7 6 -1. + <_>0 12 7 2 3. + 0 + -3.2609070185571909e-003 + -0.1494812071323395 + 0.0657335370779037 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 0.0107938898727298 + -0.0329699516296387 + 0.3295542001724243 + <_> + + <_> + + + + <_>1 15 14 2 -1. + <_>1 16 14 1 2. + 0 + 5.4287910461425781e-004 + -0.1067868024110794 + 0.0985642299056053 + <_> + + <_> + + + + <_>13 12 7 6 -1. + <_>13 14 7 2 3. + 0 + 0.0119027597829700 + 0.0356829203665257 + -0.3131744861602783 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.4277849588543177e-003 + -0.0620806589722633 + 0.1759850978851318 + <_> + + <_> + + + + <_>7 13 7 6 -1. + <_>7 15 7 2 3. + 0 + -4.4930889271199703e-003 + 0.1179085001349449 + -0.1059319972991943 + -1.5360039472579956 + 20 + -1 + <_> + + + <_> + + <_> + + + + <_>5 5 6 10 -1. + <_>5 5 3 5 2. + <_>8 10 3 5 2. + 0 + -0.0206564702093601 + 0.2536514997482300 + -0.3104461133480072 + <_> + + <_> + + + + <_>5 4 10 4 -1. + <_>5 6 10 2 2. + 0 + -0.0365183502435684 + 0.2448413074016571 + -0.2322119027376175 + <_> + + <_> + + + + <_>1 0 18 20 -1. + <_>7 0 6 20 3. + 0 + 0.4931235015392304 + -0.1627524048089981 + 0.2811619043350220 + <_> + + <_> + + + + <_>7 15 7 4 -1. + <_>7 17 7 2 2. + 0 + 2.0970099285477772e-005 + -0.3084000945091248 + 0.1731754988431931 + <_> + + <_> + + + + <_>0 5 9 7 -1. + <_>3 5 3 7 3. + 0 + 0.0130829298868775 + -0.2598322033882141 + 0.1567586958408356 + <_> + + <_> + + + + <_>11 9 4 8 -1. + <_>11 13 4 4 2. + 0 + -4.3061940232291818e-004 + 0.0785436034202576 + -0.3901607096195221 + <_> + + <_> + + + + <_>0 3 20 10 -1. + <_>0 8 20 5 2. + 0 + -0.0163674000650644 + -0.4300003945827484 + 0.0741416364908218 + <_> + + <_> + + + + <_>7 0 6 12 -1. + <_>9 0 2 12 3. + 0 + 0.0362693890929222 + -0.1707320064306259 + 0.1804596930742264 + <_> + + <_> + + + + <_>3 16 14 4 -1. + <_>3 18 14 2 2. + 0 + 0.0123402699828148 + 0.0887753814458847 + -0.3440265953540802 + <_> + + <_> + + + + <_>11 9 4 8 -1. + <_>11 13 4 4 2. + 0 + -0.0735162869095802 + -0.4162347912788391 + -2.9528199229389429e-003 + <_> + + <_> + + + + <_>5 9 4 8 -1. + <_>5 13 4 4 2. + 0 + 4.6191830188035965e-004 + 0.0656298995018005 + -0.4101825058460236 + <_> + + <_> + + + + <_>6 11 13 3 -1. + <_>6 12 13 1 3. + 0 + -0.0147440396249294 + 0.2277503013610840 + -0.0791848674416542 + <_> + + <_> + + + + <_>0 0 19 6 -1. + <_>0 2 19 2 3. + 0 + 4.2559150606393814e-003 + -0.2400496006011963 + 0.1132109016180039 + <_> + + <_> + + + + <_>2 3 16 2 -1. + <_>2 4 16 1 2. + 0 + -3.6180280148983002e-003 + -0.2761206924915314 + 0.1011805012822151 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0460129193961620 + 0.0457635894417763 + -0.5471364855766296 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0161818098276854 + 0.1948966979980469 + -0.0739553421735764 + <_> + + <_> + + + + <_>7 1 3 12 -1. + <_>7 7 3 6 2. + 0 + -2.3682719984208234e-005 + 0.1172968000173569 + -0.1939682960510254 + <_> + + <_> + + + + <_>12 4 4 10 -1. + <_>12 9 4 5 2. + 0 + -2.1599140018224716e-003 + -0.4565455019474030 + 0.0426995307207108 + <_> + + <_> + + + + <_>0 2 13 2 -1. + <_>0 3 13 1 2. + 0 + -7.9827345907688141e-003 + -0.5410720109939575 + 0.0400361306965351 + <_> + + <_> + + + + <_>7 6 8 4 -1. + <_>7 6 4 4 2. + 0 + -8.1530469469726086e-004 + -0.2064051926136017 + 0.0667950734496117 + <_> + + <_> + + + + <_>5 6 8 4 -1. + <_>9 6 4 4 2. + 0 + -4.7501060180366039e-003 + -0.3657212853431702 + 0.0756657496094704 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>10 11 7 2 2. + <_>3 13 7 2 2. + 0 + -0.0348701402544975 + -0.8009381294250488 + 0.0223565399646759 + <_> + + <_> + + + + <_>3 10 14 4 -1. + <_>3 10 7 2 2. + <_>10 12 7 2 2. + 0 + -0.0199495591223240 + -0.3911063075065613 + 0.0468446500599384 + <_> + + <_> + + + + <_>6 6 14 3 -1. + <_>6 7 14 1 3. + 0 + -5.9008211828768253e-003 + 0.0907564982771873 + -0.1760028004646301 + <_> + + <_> + + + + <_>0 9 20 3 -1. + <_>0 10 20 1 3. + 0 + -1.4019970549270511e-003 + -0.2926093041896820 + 0.0648941099643707 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0228869393467903 + -0.4839186966419220 + 0.0505149587988853 + <_> + + <_> + + + + <_>9 3 2 13 -1. + <_>10 3 1 13 2. + 0 + -0.0100392904132605 + 0.2692166864871979 + -0.0752743706107140 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + 0.0167291890829802 + -0.0732175335288048 + 0.2204515933990479 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0204239096492529 + -0.4516198039054871 + 0.0458581112325192 + <_> + + <_> + + + + <_>12 12 7 4 -1. + <_>12 14 7 2 2. + 0 + -0.0351046808063984 + -0.5516998171806335 + 0.0231183003634214 + <_> + + <_> + + + + <_>1 12 7 4 -1. + <_>1 14 7 2 2. + 0 + 0.0106979999691248 + 0.0335165895521641 + -0.5248265266418457 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>10 10 3 5 2. + <_>7 15 3 5 2. + 0 + -0.0389782413840294 + -0.6233118772506714 + 0.0268384199589491 + <_> + + <_> + + + + <_>1 17 13 3 -1. + <_>1 18 13 1 3. + 0 + 4.8226700164377689e-003 + -0.1121554970741272 + 0.1561378985643387 + <_> + + <_> + + + + <_>4 0 16 9 -1. + <_>4 0 8 9 2. + 0 + 0.3687823116779327 + 0.0198579803109169 + -0.6126074790954590 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + -7.7059920877218246e-003 + -0.3737111091613770 + 0.0437242388725281 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0668433234095573 + -0.5077208876609802 + 0.0244010891765356 + <_> + + <_> + + + + <_>0 3 20 4 -1. + <_>0 3 10 2 2. + <_>10 5 10 2 2. + 0 + 0.0372730493545532 + 0.0365228801965714 + -0.4373561143875122 + <_> + + <_> + + + + <_>12 13 8 6 -1. + <_>12 15 8 2 3. + 0 + -0.0331052094697952 + -0.3443898856639862 + 0.0324401482939720 + <_> + + <_> + + + + <_>6 1 2 16 -1. + <_>7 1 1 16 2. + 0 + 5.3402669727802277e-003 + 0.0923857614398003 + -0.1782377958297730 + <_> + + <_> + + + + <_>10 0 10 19 -1. + <_>10 0 5 19 2. + 0 + 0.0215424392372370 + -0.1984867006540299 + 0.0519532002508640 + <_> + + <_> + + + + <_>2 0 14 18 -1. + <_>9 0 7 18 2. + 0 + 0.3328931033611298 + -0.0607502683997154 + 0.2892509996891022 + <_> + + <_> + + + + <_>9 3 5 9 -1. + <_>9 6 5 3 3. + 0 + -6.6301261540502310e-004 + 0.0336367189884186 + -0.2851041853427887 + <_> + + <_> + + + + <_>0 0 10 19 -1. + <_>5 0 5 19 2. + 0 + 0.0466867610812187 + -0.4988366961479187 + 0.0337760783731937 + <_> + + <_> + + + + <_>14 0 3 14 -1. + <_>15 0 1 14 3. + 0 + -2.2452229168266058e-003 + -0.1968539059162140 + 0.0951611772179604 + <_> + + <_> + + + + <_>3 0 3 14 -1. + <_>4 0 1 14 3. + 0 + -0.0114990202710032 + -0.3242388963699341 + 0.0524683594703674 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 0.0131345298141241 + -0.0675384923815727 + 0.2760593891143799 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + -0.0159789808094502 + 0.3149605095386505 + -0.0766573920845985 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 0.0241997502744198 + 0.0558365210890770 + -0.3660989999771118 + <_> + + <_> + + + + <_>0 10 13 3 -1. + <_>0 11 13 1 3. + 0 + 4.0229028090834618e-003 + -0.1305347979068756 + 0.1347011029720306 + <_> + + <_> + + + + <_>12 11 5 9 -1. + <_>12 14 5 3 3. + 0 + -0.0141725903376937 + -0.0886165425181389 + 0.0550532788038254 + <_> + + <_> + + + + <_>0 14 7 6 -1. + <_>0 16 7 2 3. + 0 + 0.0189673993736506 + 0.0513485483825207 + -0.3143992125988007 + <_> + + <_> + + + + <_>12 5 4 8 -1. + <_>12 9 4 4 2. + 0 + 0.0265029706060886 + -0.1106597036123276 + 0.0880809277296066 + <_> + + <_> + + + + <_>0 13 8 6 -1. + <_>0 15 8 2 3. + 0 + -0.0396544896066189 + -0.5074297189712524 + 0.0329994410276413 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -8.9988503605127335e-003 + 0.1283013969659805 + -0.0730641335248947 + <_> + + <_> + + + + <_>2 5 16 8 -1. + <_>2 5 8 4 2. + <_>10 9 8 4 2. + 0 + 0.0746132880449295 + 0.0317298099398613 + -0.5389965772628784 + <_> + + <_> + + + + <_>14 3 6 8 -1. + <_>16 3 2 8 3. + 0 + 0.0334148705005646 + -0.0611305907368660 + 0.2466990053653717 + <_> + + <_> + + + + <_>8 4 3 10 -1. + <_>8 9 3 5 2. + 0 + 9.6071150619536638e-004 + 0.1252817958593369 + -0.1430419981479645 + <_> + + <_> + + + + <_>9 6 4 8 -1. + <_>9 10 4 4 2. + 0 + -8.6224973201751709e-003 + -0.2208179980516434 + 0.0475694388151169 + <_> + + <_> + + + + <_>0 4 6 7 -1. + <_>2 4 2 7 3. + 0 + 0.0398930087685585 + -0.0517743602395058 + 0.3173567950725555 + <_> + + <_> + + + + <_>5 1 10 6 -1. + <_>5 4 10 3 2. + 0 + 0.0853881165385246 + -0.0355843901634216 + 0.4197419881820679 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 6.3205747865140438e-003 + 0.0694125369191170 + -0.2997998893260956 + <_> + + <_> + + + + <_>10 4 8 8 -1. + <_>14 4 4 4 2. + <_>10 8 4 4 2. + 0 + -0.0589323118329048 + -0.4619421958923340 + 0.0222905408591032 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + -0.0100544197484851 + 0.2364912927150726 + -0.0668119266629219 + <_> + + <_> + + + + <_>10 4 8 8 -1. + <_>14 4 4 4 2. + <_>10 8 4 4 2. + 0 + -2.5194720365107059e-005 + 0.0788154527544975 + -0.1158548966050148 + <_> + + <_> + + + + <_>2 4 8 8 -1. + <_>2 4 4 4 2. + <_>6 8 4 4 2. + 0 + -0.0593466497957706 + -0.5879974961280823 + 0.0304864197969437 + <_> + + <_> + + + + <_>13 0 2 20 -1. + <_>13 0 1 20 2. + 0 + 0.0204216595739126 + 0.0391840413212776 + -0.2698679864406586 + <_> + + <_> + + + + <_>3 14 7 6 -1. + <_>3 16 7 2 3. + 0 + -0.0403816401958466 + -0.6160110235214233 + 0.0253531001508236 + <_> + + <_> + + + + <_>2 2 18 4 -1. + <_>8 2 6 4 3. + 0 + 0.1787765026092529 + -0.0571357607841492 + 0.1736157983541489 + <_> + + <_> + + + + <_>6 0 6 10 -1. + <_>6 0 3 5 2. + <_>9 5 3 5 2. + 0 + -0.0221207402646542 + -0.3769758939743042 + 0.0426900498569012 + <_> + + <_> + + + + <_>4 6 16 3 -1. + <_>4 6 8 3 2. + 0 + 0.1158502027392387 + 9.8102567717432976e-003 + -0.6138088703155518 + <_> + + <_> + + + + <_>0 6 16 3 -1. + <_>8 6 8 3 2. + 0 + 0.0979448109865189 + 0.0363295599818230 + -0.4524078071117401 + <_> + + <_> + + + + <_>13 0 2 20 -1. + <_>13 0 1 20 2. + 0 + -0.0291230306029320 + -0.6560735702514648 + 8.4500880911946297e-003 + <_> + + <_> + + + + <_>2 1 16 3 -1. + <_>2 2 16 1 3. + 0 + -0.0130535997450352 + -0.3468565046787262 + 0.0465116798877716 + <_> + + <_> + + + + <_>13 0 2 20 -1. + <_>13 0 1 20 2. + 0 + 0.0134514896199107 + 0.0344204306602478 + -0.1016886979341507 + <_> + + <_> + + + + <_>5 0 2 20 -1. + <_>6 0 1 20 2. + 0 + -0.0239571407437325 + -0.8418948054313660 + 0.0193173196166754 + <_> + + <_> + + + + <_>5 0 15 8 -1. + <_>10 0 5 8 3. + 0 + -0.1345019042491913 + 0.3913233876228333 + -0.0219012591987848 + <_> + + <_> + + + + <_>0 0 15 8 -1. + <_>5 0 5 8 3. + 0 + -0.1034243032336235 + 0.6079022288322449 + -0.0258698798716068 + <_> + + <_> + + + + <_>11 3 6 7 -1. + <_>13 3 2 7 3. + 0 + -0.0414644293487072 + -0.3963131904602051 + 0.0377719812095165 + <_> + + <_> + + + + <_>3 3 6 7 -1. + <_>5 3 2 7 3. + 0 + -0.0349457487463951 + -0.4574693143367767 + 0.0329135693609715 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>9 0 1 13 2. + 0 + 0.0142899099737406 + -0.0507575310766697 + 0.3177290856838226 + <_> + + <_> + + + + <_>8 4 3 13 -1. + <_>9 4 1 13 3. + 0 + -5.4311589337885380e-003 + 0.2470868974924088 + -0.0785266235470772 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 2.6972589548677206e-003 + -0.3406186103820801 + 0.0509485192596912 + <_> + + <_> + + + + <_>3 1 6 10 -1. + <_>3 1 3 5 2. + <_>6 6 3 5 2. + 0 + -4.3831961229443550e-003 + 0.0800957977771759 + -0.2090218961238861 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0159583296626806 + -0.2462559044361115 + 0.0583482310175896 + <_> + + <_> + + + + <_>4 6 9 12 -1. + <_>4 12 9 6 2. + 0 + 0.0452523715794086 + 0.0416301414370537 + -0.3555093109607697 + <_> + + <_> + + + + <_>4 4 13 3 -1. + <_>4 5 13 1 3. + 0 + -0.0182781498879194 + 0.3080492913722992 + -0.0471848398447037 + <_> + + <_> + + + + <_>1 7 18 3 -1. + <_>1 8 18 1 3. + 0 + 0.0252776294946671 + 0.0296986494213343 + -0.5377609729766846 + <_> + + <_> + + + + <_>6 7 13 2 -1. + <_>6 8 13 1 2. + 0 + 7.2078350931406021e-003 + -0.1282051056623459 + 0.1175319030880928 + <_> + + <_> + + + + <_>6 3 7 16 -1. + <_>6 11 7 8 2. + 0 + -0.1401470005512238 + -0.4502086937427521 + 0.0327537916600704 + <_> + + <_> + + + + <_>8 11 6 9 -1. + <_>10 11 2 9 3. + 0 + -0.0458323694765568 + -0.4200083911418915 + 0.0241149291396141 + <_> + + <_> + + + + <_>6 11 6 9 -1. + <_>8 11 2 9 3. + 0 + -0.0439768992364407 + -0.4597324132919312 + 0.0336047410964966 + <_> + + <_> + + + + <_>10 5 3 13 -1. + <_>11 5 1 13 3. + 0 + -0.0101248202845454 + 0.1626081019639969 + -0.0664491578936577 + <_> + + <_> + + + + <_>7 4 3 13 -1. + <_>8 4 1 13 3. + 0 + -1.3071260182186961e-003 + 0.1160831004381180 + -0.1316865980625153 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + 0.0452848896384239 + 0.0357517600059509 + -0.4479573965072632 + <_> + + <_> + + + + <_>0 14 12 6 -1. + <_>0 14 6 3 2. + <_>6 17 6 3 2. + 0 + -0.0208510793745518 + 0.2466531991958618 + -0.0658545419573784 + <_> + + <_> + + + + <_>14 13 5 6 -1. + <_>14 16 5 3 2. + 0 + 2.6742550544440746e-003 + 0.0516831092536449 + -0.1369938999414444 + <_> + + <_> + + + + <_>1 13 5 6 -1. + <_>1 16 5 3 2. + 0 + 1.3148089637979865e-003 + 0.0777988731861115 + -0.2106450945138931 + <_> + + <_> + + + + <_>4 5 13 2 -1. + <_>4 6 13 1 2. + 0 + -0.0181747395545244 + 0.1735503971576691 + -0.0724171921610832 + <_> + + <_> + + + + <_>0 10 20 6 -1. + <_>0 10 10 3 2. + <_>10 13 10 3 2. + 0 + 0.0143143199384212 + 0.0817569866776466 + -0.1711145043373108 + <_> + + <_> + + + + <_>8 5 4 14 -1. + <_>10 5 2 7 2. + <_>8 12 2 7 2. + 0 + -0.0164864305406809 + 0.2280950993299484 + -0.0659063681960106 + <_> + + <_> + + + + <_>6 8 8 8 -1. + <_>6 8 4 4 2. + <_>10 12 4 4 2. + 0 + 0.0307560600340366 + 0.0387171395123005 + -0.4050514101982117 + <_> + + <_> + + + + <_>13 10 5 9 -1. + <_>13 13 5 3 3. + 0 + 0.0261060893535614 + 0.0308501999825239 + -0.2775925099849701 + <_> + + <_> + + + + <_>5 0 10 12 -1. + <_>5 0 5 6 2. + <_>10 6 5 6 2. + 0 + 0.0804011076688766 + 0.0297925006598234 + -0.4474256932735443 + <_> + + <_> + + + + <_>10 10 6 7 -1. + <_>12 10 2 7 3. + 0 + -0.0183507893234491 + 0.1151541993021965 + -0.0287443194538355 + <_> + + <_> + + + + <_>2 10 5 9 -1. + <_>2 13 5 3 3. + 0 + 0.0348270498216152 + 0.0287381391972303 + -0.4840180873870850 + <_> + + <_> + + + + <_>0 0 20 2 -1. + <_>0 0 10 2 2. + 0 + -0.0882501825690269 + -0.4263553917407990 + 0.0301734898239374 + <_> + + <_> + + + + <_>1 0 4 18 -1. + <_>3 0 2 18 2. + 0 + 0.1483698934316635 + 0.0220897495746613 + -0.5536422729492188 + <_> + + <_> + + + + <_>15 2 5 6 -1. + <_>15 5 5 3 2. + 0 + -0.0189496092498302 + -0.2302016019821167 + 0.0392673015594482 + <_> + + <_> + + + + <_>2 4 14 6 -1. + <_>2 4 7 3 2. + <_>9 7 7 3 2. + 0 + -0.0567759498953819 + 0.3501352965831757 + -0.0408628284931183 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 0.0622865408658981 + 0.0223445408046246 + -0.7108234167098999 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + -0.0386295504868031 + -0.3293349146842957 + 0.0385080687701702 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 0.0281543303281069 + -0.0736909136176109 + 0.1882437020540237 + <_> + + <_> + + + + <_>3 3 5 12 -1. + <_>3 9 5 6 2. + 0 + -0.0105701796710491 + -0.2780688107013702 + 0.0476791895925999 + <_> + + <_> + + + + <_>2 4 17 15 -1. + <_>2 9 17 5 3. + 0 + 0.0566045716404915 + 0.2476761043071747 + -0.0568309389054775 + <_> + + <_> + + + + <_>3 0 13 12 -1. + <_>3 4 13 4 3. + 0 + -0.2852267026901245 + 0.5234540104866028 + -0.0236528292298317 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>2 18 18 1 3. + 0 + 0.0348071381449699 + 0.0248199105262756 + -0.4320527017116547 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0232187993824482 + 0.2992916107177734 + -0.0447126701474190 + <_> + + <_> + + + + <_>2 0 18 6 -1. + <_>8 0 6 6 3. + 0 + -0.0630943924188614 + 0.3327926099300385 + -0.0160754993557930 + <_> + + <_> + + + + <_>0 0 18 9 -1. + <_>6 0 6 9 3. + 0 + 0.3018243014812470 + -0.0751969069242477 + 0.1913980990648270 + <_> + + <_> + + + + <_>10 2 6 7 -1. + <_>12 2 2 7 3. + 0 + 0.0230778697878122 + 0.0368449799716473 + -0.2876125872135162 + <_> + + <_> + + + + <_>1 6 15 4 -1. + <_>6 6 5 4 3. + 0 + 0.1096414998173714 + 0.0375481210649014 + -0.4176355898380280 + <_> + + <_> + + + + <_>5 1 12 9 -1. + <_>5 4 12 3 3. + 0 + 0.0296720396727324 + -0.0784098207950592 + 0.1306421011686325 + <_> + + <_> + + + + <_>6 7 4 12 -1. + <_>6 13 4 6 2. + 0 + 6.3356538303196430e-003 + 0.0670143216848373 + -0.2048150002956390 + <_> + + <_> + + + + <_>10 6 6 10 -1. + <_>12 6 2 10 3. + 0 + -0.0199409499764442 + 0.0846636369824409 + -0.0420694090425968 + <_> + + <_> + + + + <_>3 12 9 4 -1. + <_>3 14 9 2 2. + 0 + -0.0479880012571812 + -0.6109951734542847 + 0.0228422600775957 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + 0.0482800193130970 + 7.4727279134094715e-003 + -0.7515329718589783 + <_> + + <_> + + + + <_>4 3 5 9 -1. + <_>4 6 5 3 3. + 0 + -2.5825301418080926e-004 + 0.0355170890688896 + -0.3268606960773468 + <_> + + <_> + + + + <_>1 7 18 5 -1. + <_>7 7 6 5 3. + 0 + -0.0481753088533878 + -0.5809946060180664 + 0.0197607595473528 + <_> + + <_> + + + + <_>6 4 6 8 -1. + <_>8 4 2 8 3. + 0 + -0.0286063402891159 + 0.3209697008132935 + -0.0407343208789825 + <_> + + <_> + + + + <_>10 1 6 8 -1. + <_>12 1 2 8 3. + 0 + -0.0433285310864449 + -0.3302142918109894 + 0.0315272398293018 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 0.0227534100413322 + 0.0373278297483921 + -0.3629173934459686 + <_> + + <_> + + + + <_>7 0 13 2 -1. + <_>7 1 13 1 2. + 0 + 1.8975350030814297e-005 + -0.1150334998965263 + 0.0418166406452656 + <_> + + <_> + + + + <_>0 4 18 5 -1. + <_>6 4 6 5 3. + 0 + 0.1807754039764404 + -0.0557518713176250 + 0.2242483049631119 + <_> + + <_> + + + + <_>10 5 6 11 -1. + <_>12 5 2 11 3. + 0 + -0.1253914982080460 + -0.8809840083122253 + 3.8788339588791132e-003 + <_> + + <_> + + + + <_>3 5 4 11 -1. + <_>5 5 2 11 2. + 0 + -8.0908974632620811e-003 + 0.2621070146560669 + -0.0537066496908665 + <_> + + <_> + + + + <_>9 9 9 10 -1. + <_>12 9 3 10 3. + 0 + 9.9102966487407684e-003 + -0.1297809928655624 + 0.0836358070373535 + <_> + + <_> + + + + <_>2 9 9 10 -1. + <_>5 9 3 10 3. + 0 + 0.0247929207980633 + -0.1458443999290466 + 0.0923056602478027 + <_> + + <_> + + + + <_>7 7 6 9 -1. + <_>9 7 2 9 3. + 0 + 0.0450748801231384 + -0.0723754987120628 + 0.2605743110179901 + <_> + + <_> + + + + <_>5 0 6 15 -1. + <_>7 0 2 15 3. + 0 + -0.0792055130004883 + -0.6207352280616760 + 0.0213233493268490 + <_> + + <_> + + + + <_>6 12 10 6 -1. + <_>11 12 5 3 2. + <_>6 15 5 3 2. + 0 + -0.0447252504527569 + -0.6424819827079773 + 9.5317112281918526e-003 + <_> + + <_> + + + + <_>0 17 15 3 -1. + <_>5 17 5 3 3. + 0 + -0.0340657792985439 + 0.3075971007347107 + -0.0422969907522202 + <_> + + <_> + + + + <_>11 10 6 10 -1. + <_>14 10 3 5 2. + <_>11 15 3 5 2. + 0 + -0.0297567397356033 + 0.2521165013313294 + -0.0311830304563046 + <_> + + <_> + + + + <_>4 12 10 6 -1. + <_>4 12 5 3 2. + <_>9 15 5 3 2. + 0 + -0.0320269502699375 + -0.5530080199241638 + 0.0280215702950954 + -1.7262409925460815 + 21 + -1 + <_> + + + <_> + + <_> + + + + <_>0 0 18 5 -1. + <_>6 0 6 5 3. + 0 + 0.0286526195704937 + -0.2182213962078095 + 0.2267557978630066 + <_> + + <_> + + + + <_>2 1 18 6 -1. + <_>2 3 18 2 3. + 0 + 4.3320041149854660e-003 + -0.2859787940979004 + 0.1058920994400978 + <_> + + <_> + + + + <_>2 10 9 6 -1. + <_>2 12 9 2 3. + 0 + 5.6604119017720222e-003 + 0.0882954522967339 + -0.3892048001289368 + <_> + + <_> + + + + <_>9 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 2.4440148845314980e-003 + -0.3548268079757690 + 0.0993623733520508 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 2.2643520496785641e-003 + -0.2885844111442566 + 0.0883678570389748 + <_> + + <_> + + + + <_>7 12 6 7 -1. + <_>9 12 2 7 3. + 0 + 5.3952648304402828e-003 + 0.0855373814702034 + -0.3036639988422394 + <_> + + <_> + + + + <_>4 10 5 6 -1. + <_>4 13 5 3 2. + 0 + -7.2699488373473287e-004 + 0.0748402401804924 + -0.3403978049755096 + <_> + + <_> + + + + <_>12 2 6 10 -1. + <_>15 2 3 5 2. + <_>12 7 3 5 2. + 0 + -9.7503658616915345e-004 + 0.1200862973928452 + -0.2563441097736359 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 4.0540988557040691e-003 + 0.0672660320997238 + -0.3570193946361542 + <_> + + <_> + + + + <_>4 6 12 8 -1. + <_>4 10 12 4 2. + 0 + 2.5258921086788177e-003 + -0.4196647107601166 + 0.0556657984852791 + <_> + + <_> + + + + <_>2 2 6 10 -1. + <_>2 2 3 5 2. + <_>5 7 3 5 2. + 0 + -1.2021360453218222e-003 + 0.1000448018312454 + -0.2193232029676437 + <_> + + <_> + + + + <_>6 15 14 2 -1. + <_>6 16 14 1 2. + 0 + 7.7549100387841463e-004 + -0.1356272995471954 + 0.1197365969419479 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -0.0506998486816883 + 0.4541828930377960 + -0.0390303507447243 + <_> + + <_> + + + + <_>6 2 9 5 -1. + <_>9 2 3 5 3. + 0 + 0.0133644901216030 + 0.1116603985428810 + -0.1793878972530365 + <_> + + <_> + + + + <_>1 14 8 6 -1. + <_>1 16 8 2 3. + 0 + -0.0154189802706242 + -0.3518005907535553 + 0.0473549999296665 + <_> + + <_> + + + + <_>15 4 4 16 -1. + <_>17 4 2 8 2. + <_>15 12 2 8 2. + 0 + -0.0429810993373394 + 0.3923279941082001 + -0.0453370288014412 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 6.2867929227650166e-003 + 0.0643318220973015 + -0.2223951071500778 + <_> + + <_> + + + + <_>4 6 13 3 -1. + <_>4 7 13 1 3. + 0 + -3.5951940808445215e-003 + 0.0954042971134186 + -0.1533828973770142 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0767609179019928 + -0.6509981751441956 + 0.0172836501151323 + <_> + + <_> + + + + <_>2 8 18 2 -1. + <_>2 9 18 1 2. + 0 + 4.6225200640037656e-004 + -0.4341560900211334 + 0.0252418592572212 + <_> + + <_> + + + + <_>2 0 14 2 -1. + <_>2 1 14 1 2. + 0 + 7.5868278509005904e-004 + -0.1462433040142059 + 0.0963190719485283 + <_> + + <_> + + + + <_>11 10 4 7 -1. + <_>11 10 2 7 2. + 0 + -5.0252641085535288e-004 + 0.1358402073383331 + -0.2318104058504105 + <_> + + <_> + + + + <_>4 14 12 6 -1. + <_>8 14 4 6 3. + 0 + 9.7315143793821335e-003 + -0.0851555913686752 + 0.2015698999166489 + <_> + + <_> + + + + <_>11 10 4 7 -1. + <_>11 10 2 7 2. + 0 + -0.0264322292059660 + -0.3700251877307892 + 0.0246166307479143 + <_> + + <_> + + + + <_>5 10 4 7 -1. + <_>7 10 2 7 2. + 0 + -4.4683468877337873e-004 + 0.1004896014928818 + -0.1858860999345779 + <_> + + <_> + + + + <_>9 6 2 14 -1. + <_>9 13 2 7 2. + 0 + 1.9872789271175861e-003 + 0.0532239191234112 + -0.3160380125045776 + <_> + + <_> + + + + <_>2 17 15 3 -1. + <_>2 18 15 1 3. + 0 + 3.1368629424832761e-004 + -0.1321319043636322 + 0.0957717671990395 + <_> + + <_> + + + + <_>16 1 4 7 -1. + <_>16 1 2 7 2. + 0 + 5.9834700077772141e-003 + -0.0756818130612373 + 0.1523095071315765 + <_> + + <_> + + + + <_>5 13 4 7 -1. + <_>7 13 2 7 2. + 0 + -5.0965389236807823e-003 + -0.1847781985998154 + 0.0760221406817436 + <_> + + <_> + + + + <_>14 1 6 7 -1. + <_>16 1 2 7 3. + 0 + -0.0191876105964184 + 0.2143180966377258 + -0.0497642196714878 + <_> + + <_> + + + + <_>0 1 6 7 -1. + <_>2 1 2 7 3. + 0 + 0.0233204793184996 + -0.0486893206834793 + 0.2657899856567383 + <_> + + <_> + + + + <_>4 3 13 2 -1. + <_>4 4 13 1 2. + 0 + -6.9449091097339988e-004 + -0.1543335020542145 + 0.0874106511473656 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 4.8893648199737072e-003 + 0.0513427890837193 + -0.2616536021232605 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + -0.0274288691580296 + -0.3797203898429871 + 0.0318211615085602 + <_> + + <_> + + + + <_>0 2 6 14 -1. + <_>2 2 2 14 3. + 0 + -0.0177345499396324 + 0.1997662037611008 + -0.0623180493712425 + <_> + + <_> + + + + <_>13 0 6 13 -1. + <_>15 0 2 13 3. + 0 + 0.1514825969934464 + 7.4510741978883743e-003 + -0.5803133249282837 + <_> + + <_> + + + + <_>1 0 6 13 -1. + <_>3 0 2 13 3. + 0 + 1.5324390260502696e-003 + -0.1251055002212524 + 0.1043189987540245 + <_> + + <_> + + + + <_>0 3 20 4 -1. + <_>10 3 10 2 2. + <_>0 5 10 2 2. + 0 + -0.0123108103871346 + -0.2353972941637039 + 0.0536462105810642 + <_> + + <_> + + + + <_>0 7 12 11 -1. + <_>6 7 6 11 2. + 0 + -0.0112108001485467 + 0.1075923964381218 + -0.1205523014068604 + <_> + + <_> + + + + <_>7 11 7 6 -1. + <_>7 13 7 2 3. + 0 + 2.7532500680536032e-003 + -0.0664799064397812 + 0.1732115000486374 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -8.4678819403052330e-003 + -0.3185068070888519 + 0.0422808192670345 + <_> + + <_> + + + + <_>10 7 4 12 -1. + <_>10 7 2 12 2. + 0 + -7.3283319361507893e-003 + -0.1636925935745239 + 0.0317723490297794 + <_> + + <_> + + + + <_>4 7 11 4 -1. + <_>4 9 11 2 2. + 0 + 0.0471565499901772 + -0.0616670995950699 + 0.1741099059581757 + <_> + + <_> + + + + <_>5 7 10 6 -1. + <_>10 7 5 3 2. + <_>5 10 5 3 2. + 0 + 8.2125868648290634e-003 + 0.0670697987079620 + -0.2203007042407990 + <_> + + <_> + + + + <_>0 5 18 10 -1. + <_>0 5 9 5 2. + <_>9 10 9 5 2. + 0 + 7.6550841331481934e-003 + 0.0614223107695580 + -0.1935762017965317 + <_> + + <_> + + + + <_>0 0 20 4 -1. + <_>10 0 10 2 2. + <_>0 2 10 2 2. + 0 + -0.0453728511929512 + -0.4756565988063812 + 0.0228694695979357 + <_> + + <_> + + + + <_>2 4 13 3 -1. + <_>2 5 13 1 3. + 0 + 3.7434820551425219e-003 + -0.0909409224987030 + 0.1384121030569077 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 2.3490150924772024e-003 + 0.0632914975285530 + -0.1550638973712921 + <_> + + <_> + + + + <_>2 4 13 2 -1. + <_>2 5 13 1 2. + 0 + -0.0241497494280338 + 0.3458844125270844 + -0.0315258204936981 + <_> + + <_> + + + + <_>7 0 13 3 -1. + <_>7 1 13 1 3. + 0 + 0.0148783503100276 + 0.0242150593549013 + -0.3238762915134430 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + 0.0298431608825922 + -0.0278176907449961 + 0.4093947112560272 + <_> + + <_> + + + + <_>10 6 9 14 -1. + <_>13 6 3 14 3. + 0 + 7.1600051596760750e-003 + -0.0465962402522564 + 0.0745470672845840 + <_> + + <_> + + + + <_>1 6 9 14 -1. + <_>4 6 3 14 3. + 0 + 0.0562672093510628 + 0.0295518506318331 + -0.4009805917739868 + <_> + + <_> + + + + <_>8 1 5 10 -1. + <_>8 6 5 5 2. + 0 + -4.5356149785220623e-003 + 0.0818205773830414 + -0.1061929985880852 + <_> + + <_> + + + + <_>0 3 20 8 -1. + <_>0 7 20 4 2. + 0 + -0.0136973597109318 + -0.1935908943414688 + 0.0709177479147911 + <_> + + <_> + + + + <_>4 9 14 2 -1. + <_>4 10 14 1 2. + 0 + -1.5458730049431324e-003 + -0.2198767960071564 + 0.0283964890986681 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 2.9332858975976706e-003 + -0.0761532336473465 + 0.1646018028259277 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 3.4973609726876020e-003 + -0.0681960806250572 + 0.1671735048294067 + <_> + + <_> + + + + <_>3 13 14 6 -1. + <_>3 15 14 2 3. + 0 + -0.0183070693165064 + -0.1886709928512573 + 0.0699327364563942 + <_> + + <_> + + + + <_>6 11 13 9 -1. + <_>6 14 13 3 3. + 0 + -0.1709208041429520 + -0.5006777048110962 + 7.8164357692003250e-003 + <_> + + <_> + + + + <_>1 11 13 9 -1. + <_>1 14 13 3 3. + 0 + 4.1620130650699139e-003 + 0.0559000410139561 + -0.2297254949808121 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + -0.0197243094444275 + 0.3299855887889862 + -0.0366024002432823 + <_> + + <_> + + + + <_>3 5 12 4 -1. + <_>7 5 4 4 3. + 0 + 5.3331600502133369e-003 + -0.1413425952196121 + 0.0882776379585266 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0421822182834148 + -0.6671878099441528 + 0.0157705098390579 + <_> + + <_> + + + + <_>0 0 18 4 -1. + <_>6 0 6 4 3. + 0 + -5.2826730534434319e-003 + 0.1702563017606735 + -0.0684913173317909 + <_> + + <_> + + + + <_>9 1 4 10 -1. + <_>9 6 4 5 2. + 0 + -2.3227441124618053e-003 + 0.0723785907030106 + -0.1006670966744423 + <_> + + <_> + + + + <_>0 2 13 2 -1. + <_>0 3 13 1 2. + 0 + -1.6239390242844820e-003 + -0.2250131964683533 + 0.0558984987437725 + <_> + + <_> + + + + <_>7 1 8 8 -1. + <_>11 1 4 4 2. + <_>7 5 4 4 2. + 0 + 0.0560834109783173 + 0.0136461695656180 + -0.4930678904056549 + <_> + + <_> + + + + <_>5 7 6 12 -1. + <_>5 7 3 6 2. + <_>8 13 3 6 2. + 0 + -0.0301999300718308 + 0.2307083010673523 + -0.0536459386348724 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + 0.0191576704382896 + 0.0368303209543228 + -0.3952297866344452 + <_> + + <_> + + + + <_>0 14 20 3 -1. + <_>0 15 20 1 3. + 0 + 3.5853029694408178e-003 + -0.0618932209908962 + 0.1758320927619934 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + -0.0287753306329250 + -0.3183844089508057 + 0.0231037400662899 + <_> + + <_> + + + + <_>3 0 9 5 -1. + <_>6 0 3 5 3. + 0 + 2.5611401069909334e-003 + -0.1048441976308823 + 0.0971525683999062 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -0.0315544903278351 + 0.2936651110649109 + -0.0241890698671341 + <_> + + <_> + + + + <_>5 6 6 7 -1. + <_>7 6 2 7 3. + 0 + -7.3520588921383023e-004 + 0.0977110415697098 + -0.1524803936481476 + <_> + + <_> + + + + <_>9 1 3 19 -1. + <_>10 1 1 19 3. + 0 + -0.0479938797652721 + -0.9458782076835632 + 9.0406481176614761e-003 + <_> + + <_> + + + + <_>0 11 7 4 -1. + <_>0 13 7 2 2. + 0 + 5.2936570718884468e-003 + 0.0333203710615635 + -0.3126893937587738 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 0.0169032495468855 + -0.0241327099502087 + 0.2848340868949890 + <_> + + <_> + + + + <_>0 11 5 6 -1. + <_>0 14 5 3 2. + 0 + -7.0723611861467361e-003 + -0.1752420067787170 + 0.0727138817310333 + <_> + + <_> + + + + <_>9 2 10 18 -1. + <_>14 2 5 9 2. + <_>9 11 5 9 2. + 0 + 0.0641916170716286 + -0.0209696702659130 + 0.3540262877941132 + <_> + + <_> + + + + <_>2 16 8 4 -1. + <_>6 16 4 4 2. + 0 + 2.9694940894842148e-003 + -0.0750869363546371 + 0.1432134956121445 + <_> + + <_> + + + + <_>7 4 6 8 -1. + <_>9 4 2 8 3. + 0 + -0.0201052594929934 + 0.6078401207923889 + -0.0181044992059469 + <_> + + <_> + + + + <_>7 0 2 19 -1. + <_>8 0 1 19 2. + 0 + -0.0131698697805405 + -0.5467836856842041 + 0.0247422400861979 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0142267299816012 + -0.4672259092330933 + 0.0314896292984486 + <_> + + <_> + + + + <_>0 0 9 5 -1. + <_>3 0 3 5 3. + 0 + 0.0377461910247803 + -0.0384958311915398 + 0.3533348143100739 + <_> + + <_> + + + + <_>18 2 2 18 -1. + <_>18 2 1 18 2. + 0 + -3.8704369217157364e-003 + 0.1498429030179977 + -0.0565497688949108 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0115654403343797 + -0.1522793024778366 + 0.0760629624128342 + <_> + + <_> + + + + <_>18 2 2 18 -1. + <_>18 2 1 18 2. + 0 + -0.0888544768095016 + -0.7296792864799500 + 4.8231678083539009e-003 + <_> + + <_> + + + + <_>0 2 2 18 -1. + <_>1 2 1 18 2. + 0 + -2.0447981078177691e-003 + 0.1414818018674851 + -0.0832003578543663 + <_> + + <_> + + + + <_>7 4 7 15 -1. + <_>7 9 7 5 3. + 0 + -0.0117628602311015 + -0.4020051956176758 + 0.0266794394701719 + <_> + + <_> + + + + <_>7 13 6 6 -1. + <_>7 16 6 3 2. + 0 + -0.0175390299409628 + -0.3731625974178314 + 0.0301719792187214 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 3.8314110133796930e-003 + -0.0934099480509758 + 0.0795034989714623 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + -0.0144723597913980 + 0.3433358073234558 + -0.0436570607125759 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -0.0265166908502579 + -0.4823023080825806 + 0.0168116502463818 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -0.0331947915256023 + -0.4358026087284088 + 0.0226448904722929 + <_> + + <_> + + + + <_>17 0 3 16 -1. + <_>18 0 1 16 3. + 0 + 4.4987560249865055e-003 + -0.0322815403342247 + 0.0899463072419167 + <_> + + <_> + + + + <_>0 4 3 14 -1. + <_>1 4 1 14 3. + 0 + 3.6823831032961607e-003 + -0.0687554627656937 + 0.1433981060981751 + <_> + + <_> + + + + <_>14 8 6 5 -1. + <_>14 8 3 5 2. + 0 + -0.1118414029479027 + -0.7775676250457764 + 5.2246451377868652e-003 + <_> + + <_> + + + + <_>0 8 6 5 -1. + <_>3 8 3 5 2. + 0 + -0.0732550397515297 + -0.5563074946403503 + 0.0191271491348743 + <_> + + <_> + + + + <_>1 13 18 4 -1. + <_>10 13 9 2 2. + <_>1 15 9 2 2. + 0 + 0.0298557691276073 + 0.0211788304150105 + -0.4085004031658173 + <_> + + <_> + + + + <_>7 0 5 9 -1. + <_>7 3 5 3 3. + 0 + -0.0734722316265106 + 0.8282048702239990 + -0.0124529097229242 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + -7.2046648710966110e-004 + 0.0996305271983147 + -0.0952788591384888 + <_> + + <_> + + + + <_>7 3 3 13 -1. + <_>8 3 1 13 3. + 0 + -3.8003330701030791e-004 + 0.1023110970854759 + -0.1035138964653015 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0454531088471413 + -0.6488506197929382 + 0.0119660003110766 + <_> + + <_> + + + + <_>5 0 6 10 -1. + <_>5 0 3 5 2. + <_>8 5 3 5 2. + 0 + -5.1456969231367111e-004 + -0.1508329957723618 + 0.0665444731712341 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0279491804540157 + 0.0171863995492458 + -0.3750118911266327 + <_> + + <_> + + + + <_>3 0 14 8 -1. + <_>3 4 14 4 2. + 0 + 0.0630398765206337 + -0.0438215881586075 + 0.2478944063186646 + <_> + + <_> + + + + <_>8 1 5 10 -1. + <_>8 6 5 5 2. + 0 + -2.2690258920192719e-003 + 0.0747120082378387 + -0.1113158017396927 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -3.8063840474933386e-003 + -0.1553090959787369 + 0.0652645081281662 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + 0.0371900908648968 + -0.0296986307948828 + 0.2307187020778656 + <_> + + <_> + + + + <_>2 18 15 2 -1. + <_>2 19 15 1 2. + 0 + 0.0218958407640457 + 0.0157785192131996 + -0.6300626993179321 + <_> + + <_> + + + + <_>8 7 6 7 -1. + <_>10 7 2 7 3. + 0 + -0.0319939889013767 + 0.2625089883804321 + -0.0246271099895239 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0167786795645952 + -0.4243698120117188 + 0.0226078499108553 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + 0.0524776615202427 + -0.0161884203553200 + 0.3176614046096802 + <_> + + <_> + + + + <_>0 7 12 4 -1. + <_>0 9 12 2 2. + 0 + 0.1044372990727425 + 0.0112902000546455 + -0.8602101802825928 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + -6.5574781037867069e-003 + 0.1222584992647171 + -0.0560914315283298 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>10 6 5 3 2. + 0 + 0.0167973898351192 + 0.0358115397393703 + -0.3116301000118256 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + 5.0427159294486046e-003 + -0.0504395291209221 + 0.0639303326606750 + <_> + + <_> + + + + <_>3 16 14 4 -1. + <_>3 16 7 2 2. + <_>10 18 7 2 2. + 0 + -0.0345717892050743 + -0.5627837181091309 + 0.0166927408427000 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + 3.7999521009624004e-003 + -0.0685667470097542 + 0.0960178673267365 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>6 17 6 3 3. + 0 + -0.0119955996051431 + 0.1381991058588028 + -0.0715100169181824 + <_> + + <_> + + + + <_>9 6 6 12 -1. + <_>9 6 3 12 2. + 0 + 0.0110984295606613 + 0.0535066202282906 + -0.1048208996653557 + <_> + + <_> + + + + <_>0 8 18 4 -1. + <_>6 8 6 4 3. + 0 + -0.1290529072284699 + -0.6726217865943909 + 0.0151958502829075 + <_> + + <_> + + + + <_>14 1 6 10 -1. + <_>16 1 2 10 3. + 0 + 6.3130040653049946e-003 + -0.0610301308333874 + 0.1035564988851547 + <_> + + <_> + + + + <_>6 9 8 10 -1. + <_>6 9 4 5 2. + <_>10 14 4 5 2. + 0 + 4.0955888107419014e-003 + 0.0705346465110779 + -0.1448426991701126 + <_> + + <_> + + + + <_>14 1 6 10 -1. + <_>16 1 2 10 3. + 0 + -0.0105305500328541 + 0.0985696390271187 + -0.0379732102155685 + <_> + + <_> + + + + <_>0 12 8 6 -1. + <_>0 14 8 2 3. + 0 + 3.6035990342497826e-003 + 0.0512777902185917 + -0.1867156028747559 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + 1.1999369598925114e-003 + -0.0632314085960388 + 0.1044631004333496 + <_> + + <_> + + + + <_>1 14 5 6 -1. + <_>1 17 5 3 2. + 0 + -1.9585370318964124e-004 + 0.0860448628664017 + -0.1185685023665428 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + -0.1221356019377708 + -0.8841980099678040 + 6.3145011663436890e-003 + <_> + + <_> + + + + <_>1 11 6 6 -1. + <_>4 11 3 6 2. + 0 + -7.7650691382586956e-003 + 0.1372596025466919 + -0.0804128572344780 + <_> + + <_> + + + + <_>4 7 15 7 -1. + <_>9 7 5 7 3. + 0 + 0.1573431938886643 + 0.0127433203160763 + -0.6540129780769348 + <_> + + <_> + + + + <_>3 6 12 11 -1. + <_>7 6 4 11 3. + 0 + -7.6066371984779835e-003 + -0.1379771977663040 + 0.0760624930262566 + <_> + + <_> + + + + <_>8 4 6 7 -1. + <_>10 4 2 7 3. + 0 + -4.3096300214529037e-003 + 0.1119519993662834 + -0.0323907099664211 + <_> + + <_> + + + + <_>6 4 6 7 -1. + <_>8 4 2 7 3. + 0 + -3.2239840365946293e-003 + 0.2142059952020645 + -0.0582446306943893 + <_> + + <_> + + + + <_>11 2 2 15 -1. + <_>11 2 1 15 2. + 0 + 8.3754826337099075e-003 + 0.0476155988872051 + -0.2421604990959168 + <_> + + <_> + + + + <_>0 1 6 10 -1. + <_>2 1 2 10 3. + 0 + 3.0904430896043777e-003 + -0.0904186815023422 + 0.0992448329925537 + <_> + + <_> + + + + <_>10 0 10 6 -1. + <_>15 0 5 3 2. + <_>10 3 5 3 2. + 0 + 9.8243616521358490e-003 + -0.0446439199149609 + 0.1042303964495659 + <_> + + <_> + + + + <_>1 0 15 3 -1. + <_>1 1 15 1 3. + 0 + -3.2808810938149691e-003 + -0.1912315934896469 + 0.0631415173411369 + <_> + + <_> + + + + <_>7 0 13 3 -1. + <_>7 1 13 1 3. + 0 + 3.6370379384607077e-003 + 0.0369447395205498 + -0.1198861971497536 + <_> + + <_> + + + + <_>0 0 10 6 -1. + <_>0 0 5 3 2. + <_>5 3 5 3 2. + 0 + 7.8952945768833160e-003 + -0.0713135302066803 + 0.1610739976167679 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 6 2 12 2. + 0 + -3.3853040076792240e-003 + -0.1170492991805077 + 0.0255792494863272 + <_> + + <_> + + + + <_>7 6 4 12 -1. + <_>9 6 2 12 2. + 0 + -2.6786550879478455e-003 + -0.1706400960683823 + 0.0606274604797363 + <_> + + <_> + + + + <_>9 0 6 18 -1. + <_>12 0 3 9 2. + <_>9 9 3 9 2. + 0 + -4.5887688174843788e-003 + 0.0347797907888889 + -0.0688178315758705 + <_> + + <_> + + + + <_>3 9 14 2 -1. + <_>10 9 7 2 2. + 0 + -0.0616423003375530 + 0.5110810995101929 + -0.0197522398084402 + <_> + + <_> + + + + <_>13 10 7 6 -1. + <_>13 12 7 2 3. + 0 + 0.0252351593226194 + 0.0202030707150698 + -0.3435991108417511 + <_> + + <_> + + + + <_>6 2 4 12 -1. + <_>6 6 4 4 3. + 0 + -2.1312809549272060e-003 + 0.0546982102096081 + -0.1651237010955811 + <_> + + <_> + + + + <_>3 1 14 6 -1. + <_>3 1 7 6 2. + 0 + -0.0825988426804543 + 0.3380466997623444 + -0.0280265696346760 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + -5.6678601540625095e-003 + -0.3378623127937317 + 0.0297270491719246 + <_> + + <_> + + + + <_>9 4 7 4 -1. + <_>9 6 7 2 2. + 0 + -0.0933173969388008 + -0.6723803281784058 + 2.0025020930916071e-003 + <_> + + <_> + + + + <_>0 9 15 3 -1. + <_>0 10 15 1 3. + 0 + 9.2052231775596738e-004 + -0.1397425979375839 + 0.0631755962967873 + <_> + + <_> + + + + <_>7 0 8 8 -1. + <_>11 0 4 4 2. + <_>7 4 4 4 2. + 0 + 5.1411538152024150e-004 + -0.0815852507948875 + 0.0593242794275284 + <_> + + <_> + + + + <_>0 3 20 4 -1. + <_>0 3 10 2 2. + <_>10 5 10 2 2. + 0 + -6.7130490206182003e-003 + -0.1664599031209946 + 0.0615608096122742 + <_> + + <_> + + + + <_>10 2 10 3 -1. + <_>10 2 5 3 2. + 0 + 3.1578689813613892e-003 + -0.1071007028222084 + 0.0666951164603233 + <_> + + <_> + + + + <_>4 4 7 4 -1. + <_>4 6 7 2 2. + 0 + 0.0122020300477743 + -0.0248453002423048 + 0.4245803058147430 + <_> + + <_> + + + + <_>10 2 10 3 -1. + <_>10 2 5 3 2. + 0 + -0.0285851694643497 + 0.2352683991193771 + -0.0211214404553175 + <_> + + <_> + + + + <_>2 11 12 6 -1. + <_>2 11 6 3 2. + <_>8 14 6 3 2. + 0 + 2.3390499409288168e-003 + 0.0644411072134972 + -0.1406358033418655 + <_> + + <_> + + + + <_>0 0 20 10 -1. + <_>0 5 20 5 2. + 0 + 0.3590093851089478 + 0.0121229197829962 + -0.7312114238739014 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 7.6048658229410648e-003 + -0.0407009311020374 + 0.2358103990554810 + <_> + + <_> + + + + <_>12 13 8 6 -1. + <_>12 15 8 2 3. + 0 + 4.4263368472456932e-003 + 0.0530396290123463 + -0.1591202020645142 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + 8.5811351891607046e-004 + -0.0852659568190575 + 0.1048922017216682 + <_> + + <_> + + + + <_>12 13 8 6 -1. + <_>12 15 8 2 3. + 0 + -4.2959367856383324e-003 + -0.1285184025764465 + 0.0627527534961700 + <_> + + <_> + + + + <_>0 13 8 6 -1. + <_>0 15 8 2 3. + 0 + 4.4881720095872879e-003 + 0.0646714419126511 + -0.1878965049982071 + <_> + + <_> + + + + <_>12 0 8 12 -1. + <_>16 0 4 6 2. + <_>12 6 4 6 2. + 0 + -0.0498696193099022 + 0.2149675935506821 + -0.0355770215392113 + <_> + + <_> + + + + <_>7 1 6 14 -1. + <_>7 8 6 7 2. + 0 + -0.1194223016500473 + -0.6795393824577332 + 0.0150915700942278 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 6.2965508550405502e-004 + -0.0921454206109047 + 0.0618066489696503 + <_> + + <_> + + + + <_>7 7 6 9 -1. + <_>7 10 6 3 3. + 0 + 2.9381969943642616e-003 + 0.1790324002504349 + -0.0493559986352921 + <_> + + <_> + + + + <_>5 6 13 3 -1. + <_>5 7 13 1 3. + 0 + -0.0228606797754765 + 0.2097624987363815 + -0.0313708893954754 + <_> + + <_> + + + + <_>2 4 8 8 -1. + <_>2 4 4 4 2. + <_>6 8 4 4 2. + 0 + 0.0433696210384369 + 0.0182863306254148 + -0.5128899812698364 + <_> + + <_> + + + + <_>11 4 8 16 -1. + <_>15 4 4 8 2. + <_>11 12 4 8 2. + 0 + 0.1993250995874405 + 6.7204708466306329e-004 + -0.8976935744285584 + <_> + + <_> + + + + <_>1 4 8 16 -1. + <_>1 4 4 8 2. + <_>5 12 4 8 2. + 0 + 0.0807512030005455 + -0.0208696499466896 + 0.4376870095729828 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 14 4 4 2. + 0 + 1.5349129680544138e-003 + 0.0367617607116699 + -0.2220399975776672 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 10 4 4 2. + <_>10 14 4 4 2. + 0 + -3.6580949090421200e-003 + -0.1547171026468277 + 0.0672298967838287 + <_> + + <_> + + + + <_>4 0 12 8 -1. + <_>4 4 12 4 2. + 0 + 0.0247432906180620 + -0.0554747097194195 + 0.1742957979440689 + <_> + + <_> + + + + <_>5 1 8 6 -1. + <_>5 4 8 3 2. + 0 + -0.0164515003561974 + 0.1881732046604157 + -0.0557190105319023 + <_> + + <_> + + + + <_>5 2 15 2 -1. + <_>5 3 15 1 2. + 0 + -8.4505761042237282e-003 + -0.3294366896152496 + 0.0227437205612659 + <_> + + <_> + + + + <_>1 11 16 4 -1. + <_>1 11 8 2 2. + <_>9 13 8 2 2. + 0 + 0.0293691791594028 + 0.0154793104156852 + -0.5909963250160217 + <_> + + <_> + + + + <_>10 2 10 3 -1. + <_>10 2 5 3 2. + 0 + 0.1052479967474937 + 2.1177560556679964e-003 + -0.4921272099018097 + <_> + + <_> + + + + <_>1 0 10 6 -1. + <_>1 0 5 3 2. + <_>6 3 5 3 2. + 0 + -0.0278161503374577 + 0.3642143905162811 + -0.0251631196588278 + <_> + + <_> + + + + <_>10 2 10 3 -1. + <_>10 2 5 3 2. + 0 + 5.3339339792728424e-003 + -0.0484023503959179 + 0.0398515611886978 + <_> + + <_> + + + + <_>0 5 7 6 -1. + <_>0 7 7 2 3. + 0 + 0.0116827301681042 + 0.0248983409255743 + -0.3571999967098236 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 8.9094992727041245e-003 + 0.0465792603790760 + -0.1508810073137283 + <_> + + <_> + + + + <_>1 1 18 8 -1. + <_>1 1 9 4 2. + <_>10 5 9 4 2. + 0 + 7.3203681968152523e-003 + 0.0708918794989586 + -0.1327854990959168 + <_> + + <_> + + + + <_>16 2 4 18 -1. + <_>18 2 2 9 2. + <_>16 11 2 9 2. + 0 + -0.0203111302107573 + 0.1778337955474854 + -0.0375380516052246 + <_> + + <_> + + + + <_>0 15 14 4 -1. + <_>0 15 7 2 2. + <_>7 17 7 2 2. + 0 + 1.3689160114154220e-003 + -0.1209644973278046 + 0.0780178233981133 + <_> + + <_> + + + + <_>16 2 4 18 -1. + <_>18 2 2 9 2. + <_>16 11 2 9 2. + 0 + 0.0769940912723541 + -8.7762605398893356e-003 + 0.3299356102943420 + <_> + + <_> + + + + <_>0 2 4 18 -1. + <_>0 2 2 9 2. + <_>2 11 2 9 2. + 0 + 8.8949268683791161e-003 + -0.0555532500147820 + 0.1637210994958878 + <_> + + <_> + + + + <_>10 6 6 11 -1. + <_>10 6 3 11 2. + 0 + -0.0185184404253960 + -0.1447957009077072 + 0.0302502606064081 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>10 6 10 2 2. + 0 + -0.0401748791337013 + -0.2499050945043564 + 0.0407887883484364 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + 0.0651764869689941 + -0.0143930902704597 + 0.3770706951618195 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0148459300398827 + 0.2737560868263245 + -0.0338984094560146 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + -0.6143465042114258 + -0.6916775107383728 + 4.0905540809035301e-003 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1411989033222199 + 0.0166438706219196 + -0.5894458293914795 + -1.4976780414581299 + 22 + -1 + <_> + + + <_> + + <_> + + + + <_>7 2 6 7 -1. + <_>9 2 2 7 3. + 0 + 0.0219626706093550 + -0.3090349137783051 + 0.2152978926897049 + <_> + + <_> + + + + <_>12 2 8 4 -1. + <_>12 2 4 4 2. + 0 + 0.0512725301086903 + -0.2228662967681885 + 0.2986971139907837 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 0.0418700091540813 + -0.2784911990165710 + 0.2041607052087784 + <_> + + <_> + + + + <_>13 1 6 6 -1. + <_>13 1 3 6 2. + 0 + 6.7551871761679649e-003 + -0.2198854982852936 + 0.0738870203495026 + <_> + + <_> + + + + <_>0 3 6 7 -1. + <_>3 3 3 7 2. + 0 + 0.0173116903752089 + -0.3422743082046509 + 0.1319016069173813 + <_> + + <_> + + + + <_>8 12 10 8 -1. + <_>13 12 5 4 2. + <_>8 16 5 4 2. + 0 + 0.0153991095721722 + -0.2314949929714203 + 0.1882805973291397 + <_> + + <_> + + + + <_>2 9 12 10 -1. + <_>2 9 6 5 2. + <_>8 14 6 5 2. + 0 + -0.0107927303761244 + -0.3081369102001190 + 0.1119152978062630 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 14 8 4 2. + 0 + 8.5879449034109712e-004 + 0.0722382068634033 + -0.4462434947490692 + <_> + + <_> + + + + <_>1 1 8 6 -1. + <_>1 3 8 2 3. + 0 + 9.2791311908513308e-004 + -0.2924742996692658 + 0.0931328833103180 + <_> + + <_> + + + + <_>7 11 13 3 -1. + <_>7 12 13 1 3. + 0 + -8.5785696282982826e-003 + 0.2064279019832611 + -0.1120333969593048 + <_> + + <_> + + + + <_>0 1 18 4 -1. + <_>0 1 9 2 2. + <_>9 3 9 2 2. + 0 + -0.0189514905214310 + -0.3931762874126434 + 0.0672604665160179 + <_> + + <_> + + + + <_>10 1 6 8 -1. + <_>12 1 2 8 3. + 0 + 0.0349399484694004 + 0.0280459895730019 + -0.5741003155708313 + <_> + + <_> + + + + <_>4 1 6 8 -1. + <_>6 1 2 8 3. + 0 + -0.0428706593811512 + -0.5985689163208008 + 0.0346078909933567 + <_> + + <_> + + + + <_>12 5 3 10 -1. + <_>12 10 3 5 2. + 0 + 5.4958608234301209e-004 + -0.4119304120540619 + 0.0673224180936813 + <_> + + <_> + + + + <_>7 1 6 16 -1. + <_>7 9 6 8 2. + 0 + 2.2494920995086432e-003 + 0.1348288953304291 + -0.1977768987417221 + <_> + + <_> + + + + <_>14 0 5 8 -1. + <_>14 4 5 4 2. + 0 + -9.2442613095045090e-003 + -0.1785071939229965 + 0.0767345130443573 + <_> + + <_> + + + + <_>5 5 3 10 -1. + <_>5 10 3 5 2. + 0 + 1.2210760032758117e-003 + -0.3461630046367645 + 0.0754319503903389 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 0.0136540904641151 + 0.0778616368770599 + -0.4396337866783142 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0173328295350075 + 0.0483176000416279 + -0.4146179854869843 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0168077796697617 + 0.2321159988641739 + -0.0823420584201813 + <_> + + <_> + + + + <_>3 14 14 4 -1. + <_>3 14 7 2 2. + <_>10 16 7 2 2. + 0 + 0.0322031714022160 + 0.0340652689337730 + -0.5979660749435425 + <_> + + <_> + + + + <_>9 6 3 13 -1. + <_>10 6 1 13 3. + 0 + 0.0167778208851814 + -0.0594029687345028 + 0.1678290963172913 + <_> + + <_> + + + + <_>8 6 3 13 -1. + <_>9 6 1 13 3. + 0 + 0.0130748599767685 + -0.1059260964393616 + 0.2379689067602158 + <_> + + <_> + + + + <_>14 0 5 8 -1. + <_>14 4 5 4 2. + 0 + 0.0940828323364258 + 0.0105731897056103 + -0.5324926972389221 + <_> + + <_> + + + + <_>1 0 5 8 -1. + <_>1 4 5 4 2. + 0 + -7.6036658138036728e-003 + -0.2303142994642258 + 0.1010446995496750 + <_> + + <_> + + + + <_>14 13 6 6 -1. + <_>14 16 6 3 2. + 0 + 8.2368071889504790e-004 + 0.0465989708900452 + -0.1008758023381233 + <_> + + <_> + + + + <_>0 0 19 3 -1. + <_>0 1 19 1 3. + 0 + -7.6875449158251286e-003 + -0.2612339854240418 + 0.0735439732670784 + <_> + + <_> + + + + <_>10 12 8 8 -1. + <_>14 12 4 4 2. + <_>10 16 4 4 2. + 0 + -0.0337291806936264 + 0.2190714925527573 + -0.0219589397311211 + <_> + + <_> + + + + <_>2 12 8 8 -1. + <_>2 12 4 4 2. + <_>6 16 4 4 2. + 0 + 0.0132046900689602 + -0.1420318931341171 + 0.1510702967643738 + <_> + + <_> + + + + <_>3 8 15 3 -1. + <_>3 9 15 1 3. + 0 + 8.5354369366541505e-004 + -0.2430367022752762 + 0.0832831710577011 + <_> + + <_> + + + + <_>5 2 4 13 -1. + <_>7 2 2 13 2. + 0 + -0.0140713304281235 + -0.3697710037231445 + 0.0551423281431198 + <_> + + <_> + + + + <_>3 9 17 3 -1. + <_>3 10 17 1 3. + 0 + -0.0111159197986126 + -0.4657548964023590 + 0.0272855591028929 + <_> + + <_> + + + + <_>2 4 13 3 -1. + <_>2 5 13 1 3. + 0 + 0.0138589004054666 + -0.0917223468422890 + 0.1994789987802506 + <_> + + <_> + + + + <_>12 0 6 13 -1. + <_>14 0 2 13 3. + 0 + 0.0855482518672943 + 0.0261897891759872 + -0.3660382032394409 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -0.0194849297404289 + 0.1725998073816299 + -0.0894453004002571 + <_> + + <_> + + + + <_>9 12 9 6 -1. + <_>12 12 3 6 3. + 0 + 0.0216311793774366 + -0.0561832897365093 + 0.0677072778344154 + <_> + + <_> + + + + <_>5 9 10 6 -1. + <_>5 9 5 3 2. + <_>10 12 5 3 2. + 0 + 0.0192678403109312 + 0.0556096807122231 + -0.2948048114776611 + <_> + + <_> + + + + <_>3 10 14 4 -1. + <_>10 10 7 2 2. + <_>3 12 7 2 2. + 0 + 0.0118559002876282 + 0.0685800611972809 + -0.2709468901157379 + <_> + + <_> + + + + <_>1 0 8 6 -1. + <_>1 2 8 2 3. + 0 + 1.7135039670392871e-003 + -0.1559084057807922 + 0.0944774895906448 + <_> + + <_> + + + + <_>6 0 9 5 -1. + <_>9 0 3 5 3. + 0 + 0.0629933625459671 + 0.0290426798164845 + -0.2515141069889069 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 0.0173288807272911 + -0.0435626618564129 + 0.3401766121387482 + <_> + + <_> + + + + <_>11 13 9 6 -1. + <_>11 15 9 2 3. + 0 + 0.0240530893206596 + 0.0374501794576645 + -0.2899002134799957 + <_> + + <_> + + + + <_>0 13 9 6 -1. + <_>0 15 9 2 3. + 0 + 0.0212940294295549 + 0.0488897114992142 + -0.3639076054096222 + <_> + + <_> + + + + <_>4 1 13 6 -1. + <_>4 4 13 3 2. + 0 + 0.0928606763482094 + -0.0366044193506241 + 0.3236523866653442 + <_> + + <_> + + + + <_>0 2 20 6 -1. + <_>0 5 20 3 2. + 0 + 2.1167730446904898e-003 + 0.0875060707330704 + -0.1833993941545487 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -0.0871250405907631 + -0.4616275131702423 + 0.0313420407474041 + <_> + + <_> + + + + <_>2 6 15 8 -1. + <_>7 6 5 8 3. + 0 + 0.1929880976676941 + 0.0290416199713945 + -0.4454362988471985 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + -2.4475890313624404e-005 + 0.0593527592718601 + -0.2023988068103790 + <_> + + <_> + + + + <_>0 11 8 4 -1. + <_>0 13 8 2 2. + 0 + -0.0348941497504711 + -0.4567655026912689 + 0.0352497510612011 + <_> + + <_> + + + + <_>2 1 18 4 -1. + <_>8 1 6 4 3. + 0 + 0.1919220983982086 + -0.0407337397336960 + 0.1544484943151474 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>5 7 10 2 3. + 0 + -0.0230851396918297 + 0.0717403218150139 + -0.2049365043640137 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>10 10 4 4 2. + <_>6 14 4 4 2. + 0 + 0.0295355692505836 + 0.0407621189951897 + -0.3692643940448761 + <_> + + <_> + + + + <_>7 0 3 20 -1. + <_>8 0 1 20 3. + 0 + -0.0364925190806389 + -0.5494133234024048 + 0.0254313293844461 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + 0.0406962297856808 + 0.0105153098702431 + -0.4990622997283936 + <_> + + <_> + + + + <_>0 0 20 2 -1. + <_>10 0 10 2 2. + 0 + -0.0363845601677895 + -0.2473607063293457 + 0.0531878508627415 + <_> + + <_> + + + + <_>3 4 14 2 -1. + <_>3 4 7 2 2. + 0 + 0.0370000489056110 + -0.0467316918075085 + 0.3009530007839203 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 0.0378729812800884 + 0.0456008501350880 + -0.3378973007202148 + <_> + + <_> + + + + <_>7 11 13 3 -1. + <_>7 12 13 1 3. + 0 + -0.0161643400788307 + 0.1965561062097549 + -0.0565678104758263 + <_> + + <_> + + + + <_>0 6 8 14 -1. + <_>4 6 4 14 2. + 0 + 0.2425342053174973 + 0.0377725996077061 + -0.3619084060192108 + <_> + + <_> + + + + <_>9 13 9 5 -1. + <_>12 13 3 5 3. + 0 + -0.0174298696219921 + 0.0785196870565414 + -0.0198359508067369 + <_> + + <_> + + + + <_>2 13 9 5 -1. + <_>5 13 3 5 3. + 0 + 0.0141506697982550 + -0.1514340043067932 + 0.1202841028571129 + <_> + + <_> + + + + <_>10 1 4 7 -1. + <_>10 1 2 7 2. + 0 + 0.0637716874480248 + 6.8969810381531715e-003 + -0.8051149249076843 + <_> + + <_> + + + + <_>6 1 4 7 -1. + <_>8 1 2 7 2. + 0 + 1.1273720301687717e-003 + -0.2693197131156921 + 0.0525502189993858 + <_> + + <_> + + + + <_>12 8 6 8 -1. + <_>12 8 3 8 2. + 0 + -0.0382934994995594 + 0.2056383043527603 + -0.0214743707329035 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + 0.0501031093299389 + 0.0233524404466152 + -0.5464519262313843 + <_> + + <_> + + + + <_>11 12 8 8 -1. + <_>15 12 4 4 2. + <_>11 16 4 4 2. + 0 + -0.0400579310953617 + 0.2455333024263382 + -0.0334747098386288 + <_> + + <_> + + + + <_>1 12 8 8 -1. + <_>1 12 4 4 2. + <_>5 16 4 4 2. + 0 + 0.0184152908623219 + -0.0759774819016457 + 0.1851001977920532 + <_> + + <_> + + + + <_>12 8 6 5 -1. + <_>12 8 3 5 2. + 0 + 0.0105481501668692 + 0.0660501867532730 + -0.0643677413463593 + <_> + + <_> + + + + <_>2 8 6 5 -1. + <_>5 8 3 5 2. + 0 + 0.0730076879262924 + -0.0264719091355801 + 0.4650852084159851 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>10 5 6 3 2. + <_>4 8 6 3 2. + 0 + -0.0346580408513546 + 0.2784815132617950 + -0.0466628894209862 + <_> + + <_> + + + + <_>2 9 10 3 -1. + <_>7 9 5 3 2. + 0 + 0.0169246308505535 + 0.1155470013618469 + -0.1150436028838158 + <_> + + <_> + + + + <_>10 3 8 8 -1. + <_>14 3 4 4 2. + <_>10 7 4 4 2. + 0 + -0.0742458701133728 + -0.4307272136211395 + 0.0164612494409084 + <_> + + <_> + + + + <_>2 3 8 8 -1. + <_>2 3 4 4 2. + <_>6 7 4 4 2. + 0 + -0.0734063088893890 + -0.5662655830383301 + 0.0234539899975061 + <_> + + <_> + + + + <_>2 2 18 3 -1. + <_>8 2 6 3 3. + 0 + 0.1239741966128349 + -0.0546167083084583 + 0.1002435013651848 + <_> + + <_> + + + + <_>4 1 8 8 -1. + <_>4 1 4 4 2. + <_>8 5 4 4 2. + 0 + -0.0162355601787567 + -0.1991212069988251 + 0.0685376971960068 + <_> + + <_> + + + + <_>10 11 4 9 -1. + <_>10 11 2 9 2. + 0 + -0.0301379691809416 + -0.3339895009994507 + 0.0228060707449913 + <_> + + <_> + + + + <_>0 13 15 7 -1. + <_>5 13 5 7 3. + 0 + -0.0818365365266800 + 0.4062865078449249 + -0.0378282107412815 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.5224087834358215 + 0.0180944409221411 + -0.4347701072692871 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.0148455798625946 + -0.7027922272682190 + 0.0199775099754334 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + -0.0555077902972698 + 0.5121477842330933 + -0.0280976109206676 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0270780492573977 + 0.3083476126194000 + -0.0406768098473549 + <_> + + <_> + + + + <_>14 1 3 14 -1. + <_>15 1 1 14 3. + 0 + -2.4416339583694935e-003 + -0.1205457970499992 + 0.0598572790622711 + <_> + + <_> + + + + <_>0 2 18 3 -1. + <_>6 2 6 3 3. + 0 + 0.1504372060298920 + -0.0600363798439503 + 0.2202198952436447 + <_> + + <_> + + + + <_>10 2 6 7 -1. + <_>12 2 2 7 3. + 0 + -0.0410302616655827 + -0.3325470983982086 + 0.0250291302800179 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 0.0146094998344779 + 0.0513576604425907 + -0.2819032967090607 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + 0.1258842051029205 + 6.7158509045839310e-003 + -0.4915573000907898 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0377849787473679 + 0.5167595148086548 + -0.0272360108792782 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0180902108550072 + -0.3577840924263001 + 0.0354850590229034 + <_> + + <_> + + + + <_>0 4 16 10 -1. + <_>0 9 16 5 2. + 0 + -0.0398811399936676 + -0.4807954132556915 + 0.0271667707711458 + <_> + + <_> + + + + <_>6 15 13 3 -1. + <_>6 16 13 1 3. + 0 + 7.3324372060596943e-003 + -0.0532976910471916 + 0.1175729036331177 + <_> + + <_> + + + + <_>2 3 13 2 -1. + <_>2 4 13 1 2. + 0 + -6.9262558827176690e-004 + -0.1450120955705643 + 0.0928852185606956 + <_> + + <_> + + + + <_>5 0 11 8 -1. + <_>5 4 11 4 2. + 0 + -0.0821669772267342 + 0.2312760949134827 + -0.0569906495511532 + <_> + + <_> + + + + <_>1 6 3 10 -1. + <_>1 11 3 5 2. + 0 + 3.8556379731744528e-003 + 0.0953306704759598 + -0.1558628976345062 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + -7.4245668947696686e-003 + -0.2769294083118439 + 0.0353434495627880 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + 0.0228083506226540 + 0.0469046607613564 + -0.3365991115570068 + <_> + + <_> + + + + <_>14 1 4 7 -1. + <_>14 1 2 7 2. + 0 + 0.0829162225127220 + 2.8655149508267641e-003 + -0.5269166231155396 + <_> + + <_> + + + + <_>1 14 8 6 -1. + <_>1 16 8 2 3. + 0 + -0.0524020604789257 + -0.6983590126037598 + 0.0185878407210112 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 0.0151937399059534 + -0.0601263903081417 + 0.2591700851917267 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + -0.0142408097162843 + 0.2705619037151337 + -0.0646295025944710 + <_> + + <_> + + + + <_>14 1 4 7 -1. + <_>14 1 2 7 2. + 0 + -3.2158840913325548e-003 + -0.0935491174459457 + 0.0280900299549103 + <_> + + <_> + + + + <_>2 1 4 7 -1. + <_>4 1 2 7 2. + 0 + 4.7198659740388393e-003 + -0.1878395974636078 + 0.0710217878222466 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0254155993461609 + -0.3323681056499481 + 0.0409154891967773 + <_> + + <_> + + + + <_>1 14 9 6 -1. + <_>1 16 9 2 3. + 0 + 0.0427584908902645 + 0.0261509306728840 + -0.5112853050231934 + <_> + + <_> + + + + <_>10 9 6 7 -1. + <_>12 9 2 7 3. + 0 + 0.0422310493886471 + -0.0213985200971365 + 0.1745389997959137 + <_> + + <_> + + + + <_>4 9 6 7 -1. + <_>6 9 2 7 3. + 0 + -0.0206746701151133 + 0.2589876055717468 + -0.0564408898353577 + <_> + + <_> + + + + <_>10 14 10 6 -1. + <_>15 14 5 3 2. + <_>10 17 5 3 2. + 0 + 0.0289769694209099 + -0.0207637306302786 + 0.0969099625945091 + <_> + + <_> + + + + <_>4 14 12 6 -1. + <_>4 17 12 3 2. + 0 + 3.4173950552940369e-003 + 0.0935729518532753 + -0.1599608063697815 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + 0.0679229199886322 + 0.0162435192614794 + -0.7462471723556519 + <_> + + <_> + + + + <_>1 3 15 4 -1. + <_>6 3 5 4 3. + 0 + -9.0270619839429855e-003 + 0.3338269889354706 + -0.0387743897736073 + <_> + + <_> + + + + <_>2 9 18 3 -1. + <_>8 9 6 3 3. + 0 + -0.0283179990947247 + -0.3627611994743347 + 0.0238001290708780 + <_> + + <_> + + + + <_>2 8 12 4 -1. + <_>6 8 4 4 3. + 0 + -1.5302050160244107e-003 + -0.1841358989477158 + 0.0701502636075020 + <_> + + <_> + + + + <_>12 5 6 11 -1. + <_>12 5 3 11 2. + 0 + 8.4196459501981735e-003 + 0.0905866920948029 + -0.0611346289515495 + <_> + + <_> + + + + <_>0 0 20 2 -1. + <_>10 0 10 2 2. + 0 + 0.0443461090326309 + 0.0613880492746830 + -0.2123194932937622 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + 0.0259211007505655 + -0.0350286103785038 + 0.2210748940706253 + <_> + + <_> + + + + <_>3 2 14 2 -1. + <_>3 3 14 1 2. + 0 + -6.0503371059894562e-003 + -0.3217900097370148 + 0.0393338203430176 + <_> + + <_> + + + + <_>4 8 12 4 -1. + <_>4 10 12 2 2. + 0 + -0.0251710191369057 + 0.6951767206192017 + -0.0183601994067431 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + -0.0520730502903461 + -0.7472702860832214 + 0.0190303400158882 + <_> + + <_> + + + + <_>11 8 3 10 -1. + <_>11 13 3 5 2. + 0 + -0.0136394398286939 + -0.0620032399892807 + 0.0415896400809288 + <_> + + <_> + + + + <_>1 14 10 6 -1. + <_>1 14 5 3 2. + <_>6 17 5 3 2. + 0 + -0.0383772999048233 + 0.3851841092109680 + -0.0315095111727715 + <_> + + <_> + + + + <_>6 4 12 12 -1. + <_>12 4 6 6 2. + <_>6 10 6 6 2. + 0 + -0.1467771977186203 + -0.6009926199913025 + 0.0109894201159477 + <_> + + <_> + + + + <_>2 4 12 12 -1. + <_>2 4 6 6 2. + <_>8 10 6 6 2. + 0 + 0.0205084607005119 + 0.0564647503197193 + -0.2514936923980713 + <_> + + <_> + + + + <_>3 5 14 8 -1. + <_>10 5 7 4 2. + <_>3 9 7 4 2. + 0 + 0.0237845908850431 + 0.0584596209228039 + -0.2223334014415741 + <_> + + <_> + + + + <_>0 4 6 7 -1. + <_>2 4 2 7 3. + 0 + 0.0186581704765558 + -0.0737062171101570 + 0.1855663955211639 + <_> + + <_> + + + + <_>7 13 7 6 -1. + <_>7 15 7 2 3. + 0 + -0.0266535002738237 + 0.2106173038482666 + -0.0686295032501221 + <_> + + <_> + + + + <_>2 13 16 6 -1. + <_>2 15 16 2 3. + 0 + -0.0759757980704308 + -0.4853537082672119 + 0.0272395908832550 + <_> + + <_> + + + + <_>16 7 3 13 -1. + <_>17 7 1 13 3. + 0 + 0.0532057210803032 + 5.1950141787528992e-003 + -0.4794046878814697 + <_> + + <_> + + + + <_>1 7 3 13 -1. + <_>2 7 1 13 3. + 0 + 0.0412064790725708 + 0.0191664602607489 + -0.6443964838981628 + <_> + + <_> + + + + <_>11 10 5 9 -1. + <_>11 13 5 3 3. + 0 + 0.0226244907826185 + 0.0174904596060514 + -0.2064553052186966 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 0.0211474299430847 + -0.0329449512064457 + 0.3515450954437256 + <_> + + <_> + + + + <_>7 2 13 3 -1. + <_>7 3 13 1 3. + 0 + 0.0133747700601816 + 0.0407848507165909 + -0.1972593069076538 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 4.2831092141568661e-003 + -0.0851591527462006 + 0.1402571052312851 + <_> + + <_> + + + + <_>11 10 5 9 -1. + <_>11 13 5 3 3. + 0 + 0.0637189000844955 + -4.9198199994862080e-003 + 0.4549151957035065 + <_> + + <_> + + + + <_>4 10 5 9 -1. + <_>4 13 5 3 3. + 0 + 0.0120821697637439 + 0.0531768091022968 + -0.2615660130977631 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + 0.0181954093277454 + -0.0389994196593761 + 0.3341236114501953 + <_> + + <_> + + + + <_>1 2 18 4 -1. + <_>1 2 9 2 2. + <_>10 4 9 2 2. + 0 + 0.0289483293890953 + 0.0397502481937408 + -0.3418253064155579 + <_> + + <_> + + + + <_>14 2 6 6 -1. + <_>14 5 6 3 2. + 0 + -0.0936336070299149 + -0.9457129836082459 + 3.0850030016154051e-003 + <_> + + <_> + + + + <_>0 2 6 6 -1. + <_>0 5 6 3 2. + 0 + 0.0348505601286888 + 0.0313427299261093 + -0.3570046126842499 + <_> + + <_> + + + + <_>4 0 13 6 -1. + <_>4 3 13 3 2. + 0 + 0.1289574950933456 + -0.0396534912288189 + 0.3741292953491211 + <_> + + <_> + + + + <_>2 7 13 3 -1. + <_>2 8 13 1 3. + 0 + 0.0232972893863916 + 0.0259417109191418 + -0.4723119139671326 + <_> + + <_> + + + + <_>3 7 14 2 -1. + <_>3 8 14 1 2. + 0 + 0.0156676694750786 + -0.0814457908272743 + 0.1575078964233398 + <_> + + <_> + + + + <_>3 2 6 10 -1. + <_>3 2 3 5 2. + <_>6 7 3 5 2. + 0 + 1.1425570119172335e-003 + 0.0639014765620232 + -0.2054779976606369 + <_> + + <_> + + + + <_>11 10 6 8 -1. + <_>11 10 3 8 2. + 0 + -0.0557445511221886 + -0.3448184132575989 + 0.0113007100299001 + <_> + + <_> + + + + <_>4 0 8 7 -1. + <_>8 0 4 7 2. + 0 + -0.0925095379352570 + 0.8907420039176941 + -0.0153985302895308 + <_> + + <_> + + + + <_>11 10 6 7 -1. + <_>11 10 3 7 2. + 0 + -5.5660872021690011e-004 + 0.0870561897754669 + -0.0513219982385635 + <_> + + <_> + + + + <_>6 2 2 18 -1. + <_>7 2 1 18 2. + 0 + -0.0145385200157762 + -0.4514006078243256 + 0.0281461197882891 + <_> + + <_> + + + + <_>12 6 3 13 -1. + <_>13 6 1 13 3. + 0 + -0.0375157296657562 + -0.7328653931617737 + 6.7265569232404232e-003 + <_> + + <_> + + + + <_>2 18 14 2 -1. + <_>2 19 14 1 2. + 0 + -1.5516959829255939e-003 + 0.0912134796380997 + -0.1339533030986786 + <_> + + <_> + + + + <_>11 10 6 7 -1. + <_>11 10 3 7 2. + 0 + -0.0954614207148552 + -0.9552935957908630 + 2.3820339702069759e-003 + <_> + + <_> + + + + <_>8 6 3 13 -1. + <_>9 6 1 13 3. + 0 + -0.0129175996407866 + 0.2704051136970520 + -0.0469047017395496 + <_> + + <_> + + + + <_>12 4 2 14 -1. + <_>12 4 1 14 2. + 0 + 7.9802395775914192e-003 + 0.0553909800946712 + -0.2066739946603775 + <_> + + <_> + + + + <_>6 4 2 14 -1. + <_>7 4 1 14 2. + 0 + 6.6025177948176861e-003 + 0.0664483085274696 + -0.1992221027612686 + <_> + + <_> + + + + <_>1 12 18 3 -1. + <_>7 12 6 3 3. + 0 + 0.0178246796131134 + -0.1453249007463455 + 0.0899043232202530 + <_> + + <_> + + + + <_>2 8 6 9 -1. + <_>5 8 3 9 2. + 0 + -0.0232615396380425 + 0.4806286990642548 + -0.0270842891186476 + <_> + + <_> + + + + <_>11 5 8 8 -1. + <_>15 5 4 4 2. + <_>11 9 4 4 2. + 0 + -5.3659449331462383e-003 + -0.1914359927177429 + 0.0703980699181557 + <_> + + <_> + + + + <_>5 5 8 8 -1. + <_>5 5 4 4 2. + <_>9 9 4 4 2. + 0 + -0.0207753404974937 + 0.1677424013614655 + -0.0894554182887077 + <_> + + <_> + + + + <_>9 0 3 20 -1. + <_>10 0 1 20 3. + 0 + 0.0621078908443451 + 0.0128154903650284 + -0.6445289254188538 + <_> + + <_> + + + + <_>7 5 3 13 -1. + <_>8 5 1 13 3. + 0 + -4.4327871873974800e-003 + 0.1340595036745071 + -0.1023185029625893 + -1.5337220430374146 + 23 + -1 + <_> + + + <_> + + <_> + + + + <_>0 3 10 6 -1. + <_>0 3 5 3 2. + <_>5 6 5 3 2. + 0 + -4.6693067997694016e-003 + 0.1429760009050369 + -0.3529374897480011 + <_> + + <_> + + + + <_>5 7 12 4 -1. + <_>9 7 4 4 3. + 0 + -5.8510829694569111e-004 + -0.2244728952646256 + 0.0735566467046738 + <_> + + <_> + + + + <_>5 4 6 10 -1. + <_>5 4 3 5 2. + <_>8 9 3 5 2. + 0 + -3.4788011107593775e-003 + 0.1060324981808662 + -0.2562561035156250 + <_> + + <_> + + + + <_>10 9 4 8 -1. + <_>10 13 4 4 2. + 0 + 6.2952568987384439e-004 + 0.0410764589905739 + -0.3606142103672028 + <_> + + <_> + + + + <_>3 7 12 5 -1. + <_>7 7 4 5 3. + 0 + 2.1010650380048901e-004 + -0.2442522048950195 + 0.1094209030270577 + <_> + + <_> + + + + <_>7 2 6 12 -1. + <_>7 6 6 4 3. + 0 + -2.6671579107642174e-003 + 0.0845815017819405 + -0.2744900882244110 + <_> + + <_> + + + + <_>0 4 6 8 -1. + <_>3 4 3 8 2. + 0 + 7.1533219888806343e-003 + -0.1260381937026978 + 0.2007980048656464 + <_> + + <_> + + + + <_>4 11 13 3 -1. + <_>4 12 13 1 3. + 0 + -2.3616119287908077e-003 + 0.1662719994783402 + -0.1318628937005997 + <_> + + <_> + + + + <_>0 9 18 5 -1. + <_>6 9 6 5 3. + 0 + 0.0395996607840061 + 0.0551192387938499 + -0.3400340080261231 + <_> + + <_> + + + + <_>5 7 15 2 -1. + <_>5 8 15 1 2. + 0 + 1.9385309424251318e-003 + -0.2068665027618408 + 0.1040041968226433 + <_> + + <_> + + + + <_>2 11 14 4 -1. + <_>2 11 7 2 2. + <_>9 13 7 2 2. + 0 + 4.3686539866030216e-003 + 0.0647665932774544 + -0.2742631137371063 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + -3.9834968629293144e-004 + 0.0528209991753101 + -0.2268477976322174 + <_> + + <_> + + + + <_>4 10 12 6 -1. + <_>4 10 6 3 2. + <_>10 13 6 3 2. + 0 + -5.2277399227023125e-003 + -0.2551575005054474 + 0.0764053687453270 + <_> + + <_> + + + + <_>14 8 6 10 -1. + <_>14 8 3 10 2. + 0 + -0.0104456199333072 + 0.1351397037506104 + -0.0500320717692375 + <_> + + <_> + + + + <_>0 2 18 2 -1. + <_>0 3 18 1 2. + 0 + -2.0478919614106417e-003 + -0.2766987085342407 + 0.0547320395708084 + <_> + + <_> + + + + <_>14 1 6 5 -1. + <_>14 1 3 5 2. + 0 + 9.1795288026332855e-003 + -0.1264247000217438 + 0.1997922956943512 + <_> + + <_> + + + + <_>3 8 13 2 -1. + <_>3 9 13 1 2. + 0 + 9.4128772616386414e-004 + -0.4028648138046265 + 0.0389184914529324 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -4.0410319343209267e-003 + -0.2010831981897354 + 0.0514564290642738 + <_> + + <_> + + + + <_>0 1 6 5 -1. + <_>3 1 3 5 2. + 0 + -0.0127425696700811 + 0.2271686941385269 + -0.0682047903537750 + <_> + + <_> + + + + <_>7 1 8 8 -1. + <_>11 1 4 4 2. + <_>7 5 4 4 2. + 0 + -4.6246009878814220e-003 + -0.2585428953170776 + 0.0788783431053162 + <_> + + <_> + + + + <_>5 1 8 8 -1. + <_>5 1 4 4 2. + <_>9 5 4 4 2. + 0 + -6.4845927990972996e-003 + -0.3139114081859589 + 0.0716051533818245 + <_> + + <_> + + + + <_>15 4 4 14 -1. + <_>17 4 2 7 2. + <_>15 11 2 7 2. + 0 + -0.0482916906476021 + 0.2548848092556000 + -0.0218915808945894 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 8.4315962158143520e-004 + -0.1652926951646805 + 0.0895756110548973 + <_> + + <_> + + + + <_>15 4 4 14 -1. + <_>17 4 2 7 2. + <_>15 11 2 7 2. + 0 + -0.1077338978648186 + -0.6011593937873840 + 3.3779250225052238e-004 + <_> + + <_> + + + + <_>1 2 4 18 -1. + <_>1 2 2 9 2. + <_>3 11 2 9 2. + 0 + -0.0459694191813469 + 0.3648974001407623 + -0.0399422906339169 + <_> + + <_> + + + + <_>3 11 16 9 -1. + <_>3 14 16 3 3. + 0 + -0.0166496392339468 + -0.1185811981558800 + 0.1058513969182968 + <_> + + <_> + + + + <_>0 0 17 3 -1. + <_>0 1 17 1 3. + 0 + -0.0145215503871441 + -0.3795421123504639 + 0.0348671488463879 + <_> + + <_> + + + + <_>9 5 9 15 -1. + <_>9 10 9 5 3. + 0 + 1.3591590104624629e-003 + -0.2318060994148254 + 0.0504014715552330 + <_> + + <_> + + + + <_>0 7 7 9 -1. + <_>0 10 7 3 3. + 0 + -5.8343587443232536e-004 + -0.2849658131599426 + 0.0408942811191082 + <_> + + <_> + + + + <_>13 0 6 10 -1. + <_>16 0 3 5 2. + <_>13 5 3 5 2. + 0 + 7.9833306372165680e-003 + -0.0369923599064350 + 0.1698530018329620 + <_> + + <_> + + + + <_>0 3 14 4 -1. + <_>0 3 7 2 2. + <_>7 5 7 2 2. + 0 + 9.9762203171849251e-004 + 0.0648710429668427 + -0.1864833980798721 + <_> + + <_> + + + + <_>13 0 6 10 -1. + <_>16 0 3 5 2. + <_>13 5 3 5 2. + 0 + -4.6869087964296341e-003 + 0.0769874230027199 + -0.0814826264977455 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + 0.0300477407872677 + -0.0298399291932583 + 0.4367684125900269 + <_> + + <_> + + + + <_>10 1 6 7 -1. + <_>12 1 2 7 3. + 0 + 0.0180695392191410 + 0.0275097005069256 + -0.4272426962852478 + <_> + + <_> + + + + <_>7 4 5 16 -1. + <_>7 12 5 8 2. + 0 + -0.1508843004703522 + -0.6791852116584778 + 0.0180128607898951 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + -0.0258362907916307 + 0.2579798996448517 + -0.0359068587422371 + <_> + + <_> + + + + <_>4 3 6 17 -1. + <_>6 3 2 17 3. + 0 + 0.0181835293769836 + 0.0358950197696686 + -0.3719769120216370 + <_> + + <_> + + + + <_>2 0 18 20 -1. + <_>8 0 6 20 3. + 0 + 0.0631273090839386 + -0.0733929723501205 + 0.1256342977285385 + <_> + + <_> + + + + <_>5 12 6 6 -1. + <_>8 12 3 6 2. + 0 + -6.6507689189165831e-004 + 0.0854426175355911 + -0.1522855013608933 + <_> + + <_> + + + + <_>9 4 5 16 -1. + <_>9 12 5 8 2. + 0 + 0.0101049803197384 + 0.0345691181719303 + -0.2265769988298416 + <_> + + <_> + + + + <_>0 7 6 9 -1. + <_>3 7 3 9 2. + 0 + -0.0123559497296810 + 0.1578501015901566 + -0.0747107788920403 + <_> + + <_> + + + + <_>15 7 5 9 -1. + <_>15 10 5 3 3. + 0 + 0.0157281793653965 + 0.0688444226980209 + -0.1696176975965500 + <_> + + <_> + + + + <_>5 14 10 6 -1. + <_>5 16 10 2 3. + 0 + 1.5084549886523746e-005 + -0.1369553953409195 + 0.0908375978469849 + <_> + + <_> + + + + <_>2 14 17 6 -1. + <_>2 16 17 2 3. + 0 + 0.0296344794332981 + 0.0498223491013050 + -0.2680968940258026 + <_> + + <_> + + + + <_>3 2 14 6 -1. + <_>3 4 14 2 3. + 0 + 0.0280152000486851 + -0.0817997604608536 + 0.1784279942512512 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 2.3299450986087322e-003 + 0.0695352107286453 + -0.1820504069328308 + <_> + + <_> + + + + <_>0 0 4 15 -1. + <_>2 0 2 15 2. + 0 + 0.0134531203657389 + -0.0702314972877502 + 0.1849257946014404 + <_> + + <_> + + + + <_>1 4 18 10 -1. + <_>10 4 9 5 2. + <_>1 9 9 5 2. + 0 + 0.0140490401536226 + 0.0763282999396324 + -0.1721968948841095 + <_> + + <_> + + + + <_>0 1 2 13 -1. + <_>1 1 1 13 2. + 0 + -0.0146489897742867 + 0.3428106009960175 + -0.0431348197162151 + <_> + + <_> + + + + <_>13 3 3 12 -1. + <_>13 9 3 6 2. + 0 + 1.4879769878461957e-004 + -0.2761420905590057 + 0.0731407329440117 + <_> + + <_> + + + + <_>0 2 20 4 -1. + <_>0 2 10 2 2. + <_>10 4 10 2 2. + 0 + -6.8892319686710835e-003 + -0.1838674992322922 + 0.0658720210194588 + <_> + + <_> + + + + <_>7 9 6 7 -1. + <_>9 9 2 7 3. + 0 + 1.2898260029032826e-003 + -0.1168802008032799 + 0.1117333024740219 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + -2.5763860321603715e-004 + 0.0893919765949249 + -0.1418354064226151 + <_> + + <_> + + + + <_>11 8 4 12 -1. + <_>11 8 2 12 2. + 0 + 0.0136523498222232 + 0.0250858291983604 + -0.1795977056026459 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -5.7484027929604053e-003 + 0.1612817943096161 + -0.0790231674909592 + <_> + + <_> + + + + <_>11 8 4 12 -1. + <_>11 8 2 12 2. + 0 + -0.0116827199235559 + -0.1849395036697388 + 0.0454199612140656 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.7498970739543438e-003 + -0.0658009424805641 + 0.1942670047283173 + <_> + + <_> + + + + <_>11 8 4 12 -1. + <_>11 8 2 12 2. + 0 + -1.1797569459304214e-003 + 0.0535638704895973 + -0.0552251711487770 + <_> + + <_> + + + + <_>5 8 4 12 -1. + <_>7 8 2 12 2. + 0 + -0.0370058491826057 + -0.5136988759040833 + 0.0247792396694422 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 0.0234320200979710 + 0.0145175596699119 + -0.3262138962745667 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -0.0248036608099937 + 0.4137448966503143 + -0.0315165892243385 + <_> + + <_> + + + + <_>7 0 6 14 -1. + <_>10 0 3 7 2. + <_>7 7 3 7 2. + 0 + -9.1133005917072296e-003 + -0.2326236963272095 + 0.0653071701526642 + <_> + + <_> + + + + <_>5 0 8 8 -1. + <_>5 4 8 4 2. + 0 + -0.0722230076789856 + 0.3136501014232636 + -0.0402878113090992 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 6.4163007773458958e-003 + 0.0441519208252430 + -0.1443901062011719 + <_> + + <_> + + + + <_>3 0 14 8 -1. + <_>3 4 14 4 2. + 0 + 0.0543619394302368 + -0.0498216599225998 + 0.2623965144157410 + <_> + + <_> + + + + <_>9 1 5 10 -1. + <_>9 6 5 5 2. + 0 + -5.9238062240183353e-003 + 0.0740545168519020 + -0.0722157731652260 + <_> + + <_> + + + + <_>7 0 2 14 -1. + <_>8 0 1 14 2. + 0 + -3.4175089094787836e-003 + -0.3071495890617371 + 0.0394618995487690 + <_> + + <_> + + + + <_>2 15 18 5 -1. + <_>8 15 6 5 3. + 0 + 0.0113678798079491 + -0.0486989282071590 + 0.1007789000868797 + <_> + + <_> + + + + <_>1 9 10 6 -1. + <_>1 9 5 3 2. + <_>6 12 5 3 2. + 0 + 2.3361030034720898e-003 + 0.0495394803583622 + -0.2381505072116852 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -7.2044372791424394e-004 + 0.0960844829678535 + -0.0981235280632973 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -3.4777939436025918e-004 + 0.1054612025618553 + -0.1060089021921158 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -6.6456091590225697e-003 + -0.1747120022773743 + 0.0472641289234161 + <_> + + <_> + + + + <_>0 1 6 11 -1. + <_>2 1 2 11 3. + 0 + 0.0442614406347275 + -0.0407426692545414 + 0.2863773107528687 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0349597409367561 + 0.0134791499003768 + -0.4423314929008484 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0259718205779791 + -0.4633466005325317 + 0.0253019798547030 + <_> + + <_> + + + + <_>7 11 7 6 -1. + <_>7 13 7 2 3. + 0 + 1.8818200333043933e-003 + -0.0723444670438766 + 0.1557994037866592 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0326236784458160 + 0.0181710608303547 + -0.6347253918647766 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0150413000956178 + -0.0535820387303829 + 0.1832043975591660 + <_> + + <_> + + + + <_>8 4 4 8 -1. + <_>10 4 2 8 2. + 0 + -5.5875489488244057e-003 + 0.1544281989336014 + -0.0695214420557022 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 3.9029030594974756e-003 + 0.0728938430547714 + -0.1354229003190994 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + 0.0459648892283440 + 0.0214825607836246 + -0.5453287959098816 + <_> + + <_> + + + + <_>11 9 3 10 -1. + <_>11 14 3 5 2. + 0 + -0.0743384733796120 + -0.7179561257362366 + 3.5341270267963409e-003 + <_> + + <_> + + + + <_>6 9 3 10 -1. + <_>6 14 3 5 2. + 0 + 2.0902850665152073e-003 + 0.0433087609708309 + -0.2507815957069397 + <_> + + <_> + + + + <_>2 2 18 9 -1. + <_>8 2 6 9 3. + 0 + -0.0756084173917770 + 0.2748881876468658 + -0.0349673293530941 + <_> + + <_> + + + + <_>3 2 6 10 -1. + <_>3 2 3 5 2. + <_>6 7 3 5 2. + 0 + 5.1200888119637966e-003 + 0.0473843291401863 + -0.2679426968097687 + <_> + + <_> + + + + <_>2 15 18 5 -1. + <_>8 15 6 5 3. + 0 + -0.0201406702399254 + 0.0720394328236580 + -0.0445370599627495 + <_> + + <_> + + + + <_>0 15 18 5 -1. + <_>6 15 6 5 3. + 0 + 0.0267192795872688 + -0.0606716312468052 + 0.2401998043060303 + <_> + + <_> + + + + <_>12 0 8 9 -1. + <_>12 3 8 3 3. + 0 + -2.3299809545278549e-003 + -0.1484870016574860 + 0.0637793689966202 + <_> + + <_> + + + + <_>7 12 6 8 -1. + <_>9 12 2 8 3. + 0 + 0.0142482500523329 + 0.0394719317555428 + -0.2779029905796051 + <_> + + <_> + + + + <_>13 0 6 14 -1. + <_>15 0 2 14 3. + 0 + -0.0686914473772049 + 0.3130755126476288 + -0.0221117697656155 + <_> + + <_> + + + + <_>1 0 6 14 -1. + <_>3 0 2 14 3. + 0 + -0.0652131289243698 + 0.3619158864021301 + -0.0310897808521986 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0144698601216078 + -0.1994293928146362 + 0.0264897607266903 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -9.4575136899948120e-003 + -0.2969889938831329 + 0.0366936586797237 + <_> + + <_> + + + + <_>10 7 9 13 -1. + <_>13 7 3 13 3. + 0 + -0.1822270005941391 + -0.4088773131370544 + 7.3904348537325859e-003 + <_> + + <_> + + + + <_>1 7 9 13 -1. + <_>4 7 3 13 3. + 0 + -0.2399186939001083 + -0.9551969170570374 + 0.0108957495540380 + <_> + + <_> + + + + <_>8 15 12 5 -1. + <_>12 15 4 5 3. + 0 + -0.0149646000936627 + 0.1332550942897797 + -0.0641461163759232 + <_> + + <_> + + + + <_>3 14 14 6 -1. + <_>10 14 7 6 2. + 0 + 0.1105633974075317 + -0.0211470797657967 + 0.5226200819015503 + <_> + + <_> + + + + <_>5 2 15 3 -1. + <_>5 3 15 1 3. + 0 + -0.0118574602529407 + -0.2610326111316681 + 0.0249171294271946 + <_> + + <_> + + + + <_>5 3 10 6 -1. + <_>5 5 10 2 3. + 0 + 0.0170323997735977 + -0.0426550097763538 + 0.2432458996772766 + <_> + + <_> + + + + <_>7 4 7 8 -1. + <_>7 8 7 4 2. + 0 + -6.6315201111137867e-003 + -0.2799660861492157 + 0.0479722097516060 + <_> + + <_> + + + + <_>0 0 8 9 -1. + <_>0 3 8 3 3. + 0 + -1.3527619885280728e-003 + -0.1711764037609100 + 0.0684239864349365 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 0.0581593997776508 + 0.0144523000344634 + -0.3664070069789887 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 9.6522513777017593e-003 + 0.0641026869416237 + -0.1938609033823013 + <_> + + <_> + + + + <_>7 3 13 3 -1. + <_>7 4 13 1 3. + 0 + 4.6681659296154976e-003 + -0.0643053874373436 + 0.1219146028161049 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + 4.8228199593722820e-003 + 0.0423068590462208 + -0.2548623085021973 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 7.2615491226315498e-003 + -0.0441690310835838 + 0.1988808065652847 + <_> + + <_> + + + + <_>4 1 6 7 -1. + <_>6 1 2 7 3. + 0 + 2.7650638949126005e-003 + 0.0567487217485905 + -0.1880290061235428 + <_> + + <_> + + + + <_>8 6 5 9 -1. + <_>8 9 5 3 3. + 0 + -1.2599739711731672e-003 + 0.2968172132968903 + -0.0307953394949436 + <_> + + <_> + + + + <_>0 8 12 12 -1. + <_>4 8 4 12 3. + 0 + -0.0140797495841980 + 0.1279069930315018 + -0.0770787820219994 + <_> + + <_> + + + + <_>9 0 9 5 -1. + <_>12 0 3 5 3. + 0 + 4.1978028602898121e-003 + -0.0326511710882187 + 0.0442820116877556 + <_> + + <_> + + + + <_>2 0 9 5 -1. + <_>5 0 3 5 3. + 0 + 7.4891891563311219e-004 + -0.1180123984813690 + 0.1019627973437309 + <_> + + <_> + + + + <_>6 4 10 14 -1. + <_>11 4 5 7 2. + <_>6 11 5 7 2. + 0 + 0.0396994985640049 + 0.0162638891488314 + -0.3239181935787201 + <_> + + <_> + + + + <_>4 4 10 14 -1. + <_>4 4 5 7 2. + <_>9 11 5 7 2. + 0 + 2.9685199260711670e-003 + 0.0507293604314327 + -0.2252234071493149 + <_> + + <_> + + + + <_>13 9 6 5 -1. + <_>13 9 3 5 2. + 0 + 3.0207540839910507e-003 + -0.0643120631575584 + 0.0636184811592102 + <_> + + <_> + + + + <_>3 8 13 3 -1. + <_>3 9 13 1 3. + 0 + -1.0064570233225822e-003 + -0.2246979027986527 + 0.0432564206421375 + <_> + + <_> + + + + <_>5 16 14 4 -1. + <_>12 16 7 2 2. + <_>5 18 7 2 2. + 0 + 1.6607339493930340e-003 + -0.0581265315413475 + 0.0595409311354160 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + 4.9640638753771782e-003 + -0.0488043688237667 + 0.1843781024217606 + <_> + + <_> + + + + <_>11 1 5 12 -1. + <_>11 7 5 6 2. + 0 + 0.1719406992197037 + 3.6377978976815939e-003 + -1.0000029802322388 + <_> + + <_> + + + + <_>4 1 5 12 -1. + <_>4 7 5 6 2. + 0 + -2.0992290228605270e-003 + 0.1195136010646820 + -0.0886139571666718 + <_> + + <_> + + + + <_>8 6 4 8 -1. + <_>8 10 4 4 2. + 0 + -4.0529989637434483e-003 + -0.2019989937543869 + 0.0535645894706249 + <_> + + <_> + + + + <_>1 16 14 4 -1. + <_>1 16 7 2 2. + <_>8 18 7 2 2. + 0 + 1.5536800492554903e-003 + -0.0967972129583359 + 0.0951351374387741 + <_> + + <_> + + + + <_>5 14 13 2 -1. + <_>5 15 13 1 2. + 0 + 2.2837040014564991e-003 + -0.0455354191362858 + 0.1468275934457779 + <_> + + <_> + + + + <_>0 9 5 9 -1. + <_>0 12 5 3 3. + 0 + -0.0100946296006441 + -0.1885309964418411 + 0.0488643683493137 + <_> + + <_> + + + + <_>13 10 6 5 -1. + <_>13 10 3 5 2. + 0 + -7.0200799964368343e-003 + 0.1462875008583069 + -0.0421586483716965 + <_> + + <_> + + + + <_>1 10 6 5 -1. + <_>4 10 3 5 2. + 0 + 3.4074939321726561e-003 + -0.0771497189998627 + 0.1370200961828232 + <_> + + <_> + + + + <_>15 7 4 13 -1. + <_>15 7 2 13 2. + 0 + 3.9907437749207020e-003 + -0.0641788318753242 + 0.0854846164584160 + <_> + + <_> + + + + <_>1 7 4 13 -1. + <_>3 7 2 13 2. + 0 + 0.0206115599721670 + 0.0379889383912086 + -0.2935917079448700 + <_> + + <_> + + + + <_>5 10 10 4 -1. + <_>5 12 10 2 2. + 0 + -1.9768020138144493e-003 + 0.0604990012943745 + -0.1691028028726578 + <_> + + <_> + + + + <_>0 2 15 3 -1. + <_>0 3 15 1 3. + 0 + -0.0247833002358675 + -0.5505260825157166 + 0.0158317591995001 + <_> + + <_> + + + + <_>7 0 11 6 -1. + <_>7 2 11 2 3. + 0 + -0.0157109200954437 + 0.1971683055162430 + -0.0318840108811855 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 1.0070169810205698e-003 + 0.0465327501296997 + -0.2185309976339340 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -3.7466569337993860e-003 + -0.2537938952445984 + 0.0394639298319817 + <_> + + <_> + + + + <_>0 12 20 4 -1. + <_>0 12 10 2 2. + <_>10 14 10 2 2. + 0 + 0.0458495207130909 + 0.0136363403871655 + -0.6297612786293030 + <_> + + <_> + + + + <_>4 1 12 5 -1. + <_>8 1 4 5 3. + 0 + -0.0110401101410389 + 0.2493963986635208 + -0.0388954691588879 + <_> + + <_> + + + + <_>6 1 2 14 -1. + <_>7 1 1 14 2. + 0 + -4.2415689677000046e-003 + -0.2156476974487305 + 0.0456134304404259 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -3.1175611075013876e-003 + 0.1064146012067795 + -0.1226831004023552 + <_> + + <_> + + + + <_>6 4 6 8 -1. + <_>8 4 2 8 3. + 0 + -2.3725910577923059e-003 + 0.2057363986968994 + -0.0663385614752769 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -3.6906299646943808e-003 + -0.1580262035131455 + 0.0667606219649315 + <_> + + <_> + + + + <_>6 3 4 7 -1. + <_>8 3 2 7 2. + 0 + 1.0908120311796665e-003 + -0.1783002018928528 + 0.0571813210844994 + <_> + + <_> + + + + <_>15 3 5 9 -1. + <_>15 6 5 3 3. + 0 + -0.0139294201508164 + -0.1418585926294327 + 0.0581313706934452 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -0.0282833706587553 + 0.2645100057125092 + -0.0453325994312763 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + -3.9213709533214569e-004 + 0.0760397166013718 + -0.0846663266420364 + <_> + + <_> + + + + <_>0 4 5 6 -1. + <_>0 7 5 3 2. + 0 + -2.0424809772521257e-003 + -0.1639385074377060 + 0.0575951710343361 + <_> + + <_> + + + + <_>15 4 4 16 -1. + <_>17 4 2 8 2. + <_>15 12 2 8 2. + 0 + -0.0606340505182743 + 0.2434355020523071 + -0.0136308101937175 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + 0.0554729886353016 + 0.0122746303677559 + -0.7616189718246460 + <_> + + <_> + + + + <_>15 4 4 16 -1. + <_>17 4 2 8 2. + <_>15 12 2 8 2. + 0 + 0.0264517106115818 + -0.0161031596362591 + 0.1469652056694031 + <_> + + <_> + + + + <_>2 16 15 4 -1. + <_>2 18 15 2 2. + 0 + -0.0656158477067947 + -0.6693688035011292 + 0.0127883898094296 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + -0.0292873606085777 + 0.3842203915119171 + -0.0209795702248812 + <_> + + <_> + + + + <_>2 8 15 5 -1. + <_>7 8 5 5 3. + 0 + -0.0878142565488815 + -0.5538629293441773 + 0.0165409296751022 + <_> + + <_> + + + + <_>15 4 4 16 -1. + <_>17 4 2 8 2. + <_>15 12 2 8 2. + 0 + 0.0402130112051964 + 5.5229798890650272e-003 + -0.1516941040754318 + <_> + + <_> + + + + <_>1 4 4 16 -1. + <_>1 4 2 8 2. + <_>3 12 2 8 2. + 0 + 7.5501110404729843e-003 + -0.0530810616910458 + 0.1679124981164932 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 7.5557199306786060e-003 + 0.0492132492363453 + -0.1809742003679276 + <_> + + <_> + + + + <_>6 4 6 10 -1. + <_>6 4 3 5 2. + <_>9 9 3 5 2. + 0 + 0.0422647595405579 + 9.8954448476433754e-003 + -0.8726593852043152 + <_> + + <_> + + + + <_>1 9 19 3 -1. + <_>1 10 19 1 3. + 0 + -0.0158211793750525 + -0.4951527118682861 + 0.0104249101132154 + <_> + + <_> + + + + <_>3 0 14 12 -1. + <_>3 4 14 4 3. + 0 + 4.4557699002325535e-003 + -0.0528236106038094 + 0.1740911006927490 + <_> + + <_> + + + + <_>6 3 8 4 -1. + <_>6 5 8 2 2. + 0 + -6.3567152246832848e-003 + 0.1027880012989044 + -0.0940622836351395 + <_> + + <_> + + + + <_>0 5 15 3 -1. + <_>0 6 15 1 3. + 0 + 2.1308339200913906e-003 + -0.0573434494435787 + 0.1574780046939850 + <_> + + <_> + + + + <_>12 0 2 13 -1. + <_>12 0 1 13 2. + 0 + 6.4157308079302311e-003 + 0.0411121882498264 + -0.2648253142833710 + <_> + + <_> + + + + <_>8 4 4 14 -1. + <_>10 4 2 14 2. + 0 + -0.1057273969054222 + -0.9271939992904663 + 8.6396038532257080e-003 + <_> + + <_> + + + + <_>7 0 10 6 -1. + <_>12 0 5 3 2. + <_>7 3 5 3 2. + 0 + 0.0612984895706177 + 0.0112424800172448 + -0.5297625064849854 + <_> + + <_> + + + + <_>1 6 6 7 -1. + <_>3 6 2 7 3. + 0 + 0.0100186504423618 + -0.0618011914193630 + 0.1544186025857925 + <_> + + <_> + + + + <_>17 2 3 13 -1. + <_>18 2 1 13 3. + 0 + 2.3613891098648310e-003 + -0.0392823405563831 + 0.0880617797374725 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + -4.7975129564292729e-004 + -0.1066320016980171 + 0.0838875174522400 + <_> + + <_> + + + + <_>6 0 10 6 -1. + <_>11 0 5 3 2. + <_>6 3 5 3 2. + 0 + 0.0739824101328850 + 4.7058681957423687e-003 + -0.6012908220291138 + <_> + + <_> + + + + <_>4 0 10 6 -1. + <_>4 0 5 3 2. + <_>9 3 5 3 2. + 0 + 0.0638219118118286 + 0.0113723902031779 + -0.7404484748840332 + <_> + + <_> + + + + <_>6 1 14 2 -1. + <_>6 2 14 1 2. + 0 + 4.6818208647891879e-004 + -0.0765455067157745 + 0.0535638108849525 + <_> + + <_> + + + + <_>3 0 12 18 -1. + <_>3 9 12 9 2. + 0 + 0.4387798905372620 + 0.0124209597706795 + -0.6877604126930237 + <_> + + <_> + + + + <_>13 7 6 10 -1. + <_>13 12 6 5 2. + 0 + 0.0288314707577229 + 0.0151501102373004 + -0.1322962939739227 + <_> + + <_> + + + + <_>1 7 6 10 -1. + <_>1 12 6 5 2. + 0 + 0.0677268132567406 + -0.0189013294875622 + 0.4879981875419617 + <_> + + <_> + + + + <_>4 5 12 12 -1. + <_>10 5 6 6 2. + <_>4 11 6 6 2. + 0 + 0.0951254665851593 + 0.0125186601653695 + -0.7460774183273315 + <_> + + <_> + + + + <_>7 4 6 5 -1. + <_>10 4 3 5 2. + 0 + 3.4629011061042547e-003 + -0.0643965229392052 + 0.1345033049583435 + <_> + + <_> + + + + <_>4 8 15 4 -1. + <_>9 8 5 4 3. + 0 + -0.0102203404530883 + -0.1210239976644516 + 0.0350815989077091 + <_> + + <_> + + + + <_>4 9 12 11 -1. + <_>10 9 6 11 2. + 0 + -0.2522779107093811 + 0.5318639874458313 + -0.0173736102879047 + <_> + + <_> + + + + <_>7 6 8 10 -1. + <_>11 6 4 5 2. + <_>7 11 4 5 2. + 0 + 4.7006108798086643e-003 + 0.0262644793838263 + -0.1630567014217377 + <_> + + <_> + + + + <_>4 7 6 6 -1. + <_>4 10 6 3 2. + 0 + 0.0804870724678040 + -0.0111934300512075 + 0.7359899878501892 + <_> + + <_> + + + + <_>11 10 9 6 -1. + <_>11 12 9 2 3. + 0 + -3.8025099784135818e-003 + -0.1175692006945610 + 0.0648992434144020 + <_> + + <_> + + + + <_>6 4 7 6 -1. + <_>6 6 7 2 3. + 0 + -0.0519703999161720 + 0.2176486998796463 + -0.0462995804846287 + <_> + + <_> + + + + <_>9 3 2 16 -1. + <_>9 11 2 8 2. + 0 + -0.0123811196535826 + -0.1348332017660141 + 0.0709562525153160 + <_> + + <_> + + + + <_>3 2 9 16 -1. + <_>3 10 9 8 2. + 0 + 4.6567008830606937e-003 + 0.0848188474774361 + -0.1085081025958061 + <_> + + <_> + + + + <_>5 0 10 10 -1. + <_>5 5 10 5 2. + 0 + 0.0245205499231815 + -0.0565124005079269 + 0.2084549069404602 + <_> + + <_> + + + + <_>5 1 6 10 -1. + <_>5 6 6 5 2. + 0 + -6.0728159733116627e-003 + 0.1025331988930702 + -0.1073971018195152 + <_> + + <_> + + + + <_>13 3 3 12 -1. + <_>13 9 3 6 2. + 0 + 1.3803950278088450e-003 + -0.1235501989722252 + 0.0385239310562611 + <_> + + <_> + + + + <_>0 10 18 6 -1. + <_>0 12 18 2 3. + 0 + 8.3129312843084335e-003 + 0.0504419691860676 + -0.1790186017751694 + <_> + + <_> + + + + <_>6 15 14 2 -1. + <_>6 16 14 1 2. + 0 + 6.8436772562563419e-004 + -0.0613346882164478 + 0.0495438389480114 + <_> + + <_> + + + + <_>6 7 7 4 -1. + <_>6 9 7 2 2. + 0 + 0.0715894401073456 + 0.0112587297335267 + -0.7290254831314087 + <_> + + <_> + + + + <_>6 5 11 8 -1. + <_>6 9 11 4 2. + 0 + -3.9251110865734518e-004 + -0.2902264893054962 + 0.0139087196439505 + <_> + + <_> + + + + <_>0 8 8 12 -1. + <_>0 8 4 6 2. + <_>4 14 4 6 2. + 0 + -0.0169480200856924 + 0.1461602002382278 + -0.0562989488244057 + <_> + + <_> + + + + <_>8 6 5 9 -1. + <_>8 9 5 3 3. + 0 + 2.3180670104920864e-003 + 0.2028913944959641 + -0.0436493903398514 + <_> + + <_> + + + + <_>2 6 4 14 -1. + <_>2 6 2 7 2. + <_>4 13 2 7 2. + 0 + 7.9764174297451973e-003 + -0.0487680211663246 + 0.1807090938091278 + <_> + + <_> + + + + <_>6 10 9 6 -1. + <_>9 10 3 6 3. + 0 + -0.0115331504493952 + -0.1423880010843277 + 0.0566918402910233 + <_> + + <_> + + + + <_>2 5 4 8 -1. + <_>2 9 4 4 2. + 0 + -5.4723728680983186e-004 + -0.2384461015462875 + 0.0320613011717796 + <_> + + <_> + + + + <_>9 4 8 12 -1. + <_>13 4 4 6 2. + <_>9 10 4 6 2. + 0 + -1.1751300189644098e-003 + 0.0253949798643589 + -0.0898726135492325 + <_> + + <_> + + + + <_>3 4 8 12 -1. + <_>3 4 4 6 2. + <_>7 10 4 6 2. + 0 + 0.0136552397161722 + -0.0272302199155092 + 0.3341977894306183 + <_> + + <_> + + + + <_>9 8 10 8 -1. + <_>14 8 5 4 2. + <_>9 12 5 4 2. + 0 + 4.1803810745477676e-003 + 0.0269145406782627 + -0.1255704015493393 + <_> + + <_> + + + + <_>2 18 15 2 -1. + <_>2 19 15 1 2. + 0 + 3.1565671088173985e-004 + 0.0621775202453136 + -0.1334580928087235 + <_> + + <_> + + + + <_>10 11 5 9 -1. + <_>10 14 5 3 3. + 0 + 7.4048307724297047e-003 + 0.0315482988953590 + -0.2824712991714478 + <_> + + <_> + + + + <_>0 11 16 4 -1. + <_>8 11 8 4 2. + 0 + -0.0139774298295379 + 0.1234261021018028 + -0.0804930180311203 + <_> + + <_> + + + + <_>13 4 3 14 -1. + <_>14 4 1 14 3. + 0 + -0.0142405200749636 + -0.2397949993610382 + 0.0180166698992252 + <_> + + <_> + + + + <_>0 11 18 6 -1. + <_>9 11 9 6 2. + 0 + -0.2290156930685043 + -0.4289566874504089 + 0.0200323704630136 + <_> + + <_> + + + + <_>8 2 4 8 -1. + <_>8 2 2 8 2. + 0 + 0.0265225600451231 + -0.0298995096236467 + 0.3119553923606873 + <_> + + <_> + + + + <_>3 2 12 6 -1. + <_>3 2 6 3 2. + <_>9 5 6 3 2. + 0 + 5.0723659805953503e-003 + 0.0621178001165390 + -0.1544231027364731 + <_> + + <_> + + + + <_>12 10 8 4 -1. + <_>12 12 8 2 2. + 0 + 2.2340700961649418e-003 + 0.0307172592729330 + -0.1465622037649155 + <_> + + <_> + + + + <_>0 10 8 4 -1. + <_>0 12 8 2 2. + 0 + -0.0463483817875385 + -0.6784408092498779 + 0.0122586200013757 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + -3.0467000324279070e-003 + 0.1054750978946686 + -0.0544267892837524 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 7.0065702311694622e-003 + -0.0525379590690136 + 0.2425930052995682 + <_> + + <_> + + + + <_>9 0 3 15 -1. + <_>9 5 3 5 3. + 0 + -2.7783720288425684e-003 + -0.1073210015892983 + 0.0740646198391914 + <_> + + <_> + + + + <_>2 3 7 4 -1. + <_>2 5 7 2 2. + 0 + -4.2294961167499423e-004 + 0.0681514665484428 + -0.1411716043949127 + <_> + + <_> + + + + <_>14 13 4 7 -1. + <_>14 13 2 7 2. + 0 + -0.0876140072941780 + -0.6527119278907776 + 3.3460480626672506e-003 + <_> + + <_> + + + + <_>3 3 3 15 -1. + <_>4 3 1 15 3. + 0 + 0.0125529300421476 + 0.0332351699471474 + -0.2657198011875153 + <_> + + <_> + + + + <_>2 0 18 7 -1. + <_>8 0 6 7 3. + 0 + -0.0218635108321905 + 0.1559990942478180 + -0.0375619195401669 + -1.4604519605636597 + 24 + -1 + <_> + + + <_> + + <_> + + + + <_>3 6 5 6 -1. + <_>3 9 5 3 2. + 0 + 0.0197156593203545 + -0.4078615903854370 + 0.1631730049848557 + <_> + + <_> + + + + <_>10 2 10 3 -1. + <_>10 2 5 3 2. + 0 + 0.0499775409698486 + -0.2575316131114960 + 0.2347117066383362 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 3.4774339292198420e-004 + -0.2714801132678986 + 0.1520204991102219 + <_> + + <_> + + + + <_>8 4 4 14 -1. + <_>8 11 4 7 2. + 0 + 8.2787703722715378e-003 + 0.0862295627593994 + -0.4227265119552612 + <_> + + <_> + + + + <_>2 16 8 4 -1. + <_>6 16 4 4 2. + 0 + 0.0128918103873730 + -0.2758949100971222 + 0.0996773317456245 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -5.2444688044488430e-003 + 0.1468731015920639 + -0.1809055954217911 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + 4.7363140038214624e-004 + 0.1154457032680512 + -0.2324209064245224 + <_> + + <_> + + + + <_>10 3 10 3 -1. + <_>10 3 5 3 2. + 0 + 0.0107679301872849 + -0.2325616031885147 + 0.0578859299421310 + <_> + + <_> + + + + <_>5 6 5 8 -1. + <_>5 10 5 4 2. + 0 + -2.0576089154928923e-003 + -0.4055481851100922 + 0.0610861293971539 + <_> + + <_> + + + + <_>13 1 6 6 -1. + <_>13 1 3 6 2. + 0 + 0.1264827996492386 + 2.5926080998033285e-003 + -0.6095582842826843 + <_> + + <_> + + + + <_>1 1 6 6 -1. + <_>4 1 3 6 2. + 0 + 0.0220290906727314 + -0.2383597046136856 + 0.1152383983135223 + <_> + + <_> + + + + <_>8 5 8 4 -1. + <_>8 5 4 4 2. + 0 + 8.6279091192409396e-004 + -0.2438255995512009 + 0.0481749996542931 + <_> + + <_> + + + + <_>4 5 8 4 -1. + <_>8 5 4 4 2. + 0 + 6.1232252046465874e-003 + -0.3329313099384308 + 0.0738605484366417 + <_> + + <_> + + + + <_>12 10 7 4 -1. + <_>12 12 7 2 2. + 0 + 1.8321570241823792e-003 + 0.0749648064374924 + -0.3605068027973175 + <_> + + <_> + + + + <_>3 14 7 6 -1. + <_>3 17 7 3 2. + 0 + 0.0131769599393010 + 0.0786504074931145 + -0.3000935018062592 + <_> + + <_> + + + + <_>2 1 16 3 -1. + <_>2 2 16 1 3. + 0 + -0.0150928003713489 + -0.4566335976123810 + 0.0453597195446491 + <_> + + <_> + + + + <_>3 2 14 2 -1. + <_>3 3 14 1 2. + 0 + -3.9765550754964352e-003 + -0.3740411996841431 + 0.0572765916585922 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0125580998137593 + 0.1807938963174820 + -0.0907983928918839 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0113465301692486 + 0.0678424164652824 + -0.3335464894771576 + <_> + + <_> + + + + <_>7 12 13 2 -1. + <_>7 13 13 1 2. + 0 + 3.0938379932194948e-003 + -0.0643622577190399 + 0.1625099033117294 + <_> + + <_> + + + + <_>1 0 13 3 -1. + <_>1 1 13 1 3. + 0 + -7.9837916418910027e-003 + -0.2823725938796997 + 0.0642432272434235 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 0.0532575398683548 + -0.1184227988123894 + 0.1540372073650360 + <_> + + <_> + + + + <_>0 10 8 4 -1. + <_>0 12 8 2 2. + 0 + -0.0323084406554699 + -0.3817465901374817 + 0.0464447811245918 + <_> + + <_> + + + + <_>2 6 16 8 -1. + <_>10 6 8 4 2. + <_>2 10 8 4 2. + 0 + 7.4837519787251949e-003 + 0.1008763015270233 + -0.1784836947917938 + <_> + + <_> + + + + <_>2 10 6 7 -1. + <_>4 10 2 7 3. + 0 + 0.0140755400061607 + -0.1361269950866699 + 0.1258919984102249 + <_> + + <_> + + + + <_>6 14 13 2 -1. + <_>6 15 13 1 2. + 0 + 0.0119458604604006 + -0.0464521311223507 + 0.3182334899902344 + <_> + + <_> + + + + <_>1 11 18 6 -1. + <_>1 11 9 3 2. + <_>10 14 9 3 2. + 0 + 0.0497741401195526 + 0.0373733900487423 + -0.4391924142837524 + <_> + + <_> + + + + <_>10 9 5 10 -1. + <_>10 14 5 5 2. + 0 + 1.1070669861510396e-003 + 0.0331636108458042 + -0.1885541975498200 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>7 10 3 5 2. + <_>10 15 3 5 2. + 0 + -0.0285949893295765 + -0.3690691888332367 + 0.0419302284717560 + <_> + + <_> + + + + <_>6 2 9 12 -1. + <_>6 6 9 4 3. + 0 + -7.6013091020286083e-003 + 0.0521914809942245 + -0.2468905001878738 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>7 17 6 3 3. + 0 + 0.1311451047658920 + -0.0579573810100555 + 0.2731859982013702 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + -7.4186350502714049e-006 + 0.1180206015706062 + -0.1074535027146339 + <_> + + <_> + + + + <_>6 7 6 5 -1. + <_>9 7 3 5 2. + 0 + 0.0314721204340458 + -0.0717338770627975 + 0.2561757862567902 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0387004911899567 + 0.0428636893630028 + -0.6085581779479981 + <_> + + <_> + + + + <_>3 3 13 2 -1. + <_>3 4 13 1 2. + 0 + -3.9322520606219769e-003 + -0.2212730944156647 + 0.0656179487705231 + <_> + + <_> + + + + <_>14 3 6 13 -1. + <_>16 3 2 13 3. + 0 + 0.0231447797268629 + -0.0682003870606422 + 0.1610700935125351 + <_> + + <_> + + + + <_>0 3 6 13 -1. + <_>2 3 2 13 3. + 0 + 0.0440430417656899 + -0.0540927313268185 + 0.2700901031494141 + <_> + + <_> + + + + <_>9 9 6 10 -1. + <_>12 9 3 5 2. + <_>9 14 3 5 2. + 0 + 0.0163633897900581 + -0.0671650394797325 + 0.1429201960563660 + <_> + + <_> + + + + <_>1 11 5 9 -1. + <_>1 14 5 3 3. + 0 + 0.0405756905674934 + 0.0270955990999937 + -0.5192281007766724 + <_> + + <_> + + + + <_>12 8 8 12 -1. + <_>16 8 4 6 2. + <_>12 14 4 6 2. + 0 + -0.0815919786691666 + 0.3629040122032166 + -0.0506411492824554 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 9.6564572304487228e-003 + -0.0658684968948364 + 0.2045986950397492 + <_> + + <_> + + + + <_>4 9 12 8 -1. + <_>10 9 6 4 2. + <_>4 13 6 4 2. + 0 + 0.0438753701746464 + 0.0282871201634407 + -0.4731675982475281 + <_> + + <_> + + + + <_>4 2 6 8 -1. + <_>6 2 2 8 3. + 0 + -0.0533755905926228 + -0.6391239166259766 + 0.0192135795950890 + <_> + + <_> + + + + <_>8 2 4 10 -1. + <_>8 2 2 10 2. + 0 + -0.0427893698215485 + 0.3741447031497955 + -0.0360205397009850 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0141933504492044 + -0.3056217133998871 + 0.0517246499657631 + <_> + + <_> + + + + <_>15 2 4 18 -1. + <_>17 2 2 9 2. + <_>15 11 2 9 2. + 0 + -0.0529470518231392 + 0.2220384925603867 + -0.0271231904625893 + <_> + + <_> + + + + <_>0 0 20 20 -1. + <_>0 0 10 10 2. + <_>10 10 10 10 2. + 0 + 0.3044171929359436 + 0.0281070005148649 + -0.5148605108261108 + <_> + + <_> + + + + <_>5 6 14 3 -1. + <_>5 6 7 3 2. + 0 + 0.0969175770878792 + 7.5603500008583069e-003 + -0.5464221835136414 + <_> + + <_> + + + + <_>3 7 12 4 -1. + <_>7 7 4 4 3. + 0 + 4.5469900942407548e-004 + -0.2225777953863144 + 0.0596630610525608 + <_> + + <_> + + + + <_>11 6 6 5 -1. + <_>11 6 3 5 2. + 0 + 6.4785419963300228e-003 + 0.0705072730779648 + -0.0865259170532227 + <_> + + <_> + + + + <_>3 6 6 5 -1. + <_>6 6 3 5 2. + 0 + 9.5442440360784531e-003 + 0.1185839027166367 + -0.1284652948379517 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 0.0106640402227640 + 0.0602511800825596 + -0.2345412969589233 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0596014000475407 + -0.4908311069011688 + 0.0311799701303244 + <_> + + <_> + + + + <_>6 13 14 3 -1. + <_>6 14 14 1 3. + 0 + -0.0148106096312404 + 0.1792847067117691 + -0.0537883006036282 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0249884594231844 + 0.0455850511789322 + -0.3154296875000000 + <_> + + <_> + + + + <_>11 12 8 8 -1. + <_>15 12 4 4 2. + <_>11 16 4 4 2. + 0 + 0.0371598713099957 + -0.0255529899150133 + 0.1282448023557663 + <_> + + <_> + + + + <_>1 12 8 8 -1. + <_>1 12 4 4 2. + <_>5 16 4 4 2. + 0 + -0.0360237993299961 + 0.3033855855464935 + -0.0507238693535328 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + -0.0400736816227436 + -0.3532741963863373 + 0.0255427490919828 + <_> + + <_> + + + + <_>1 6 14 3 -1. + <_>8 6 7 3 2. + 0 + 0.1011879965662956 + 0.0149540500715375 + -0.8527551889419556 + <_> + + <_> + + + + <_>10 1 10 19 -1. + <_>10 1 5 19 2. + 0 + 0.1255193948745728 + -0.0557775981724262 + 0.0351623296737671 + <_> + + <_> + + + + <_>0 1 10 19 -1. + <_>5 1 5 19 2. + 0 + -0.0100942002609372 + -0.7951772212982178 + 0.0166582893580198 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + 0.0279578808695078 + 0.0308232307434082 + -0.2907303869724274 + <_> + + <_> + + + + <_>4 0 6 8 -1. + <_>6 0 2 8 3. + 0 + 0.0363602414727211 + 0.0279609598219395 + -0.4769163131713867 + <_> + + <_> + + + + <_>1 11 18 6 -1. + <_>1 14 18 3 2. + 0 + -0.0991004630923271 + -0.3080480098724365 + 0.0427254587411880 + <_> + + <_> + + + + <_>5 11 5 6 -1. + <_>5 14 5 3 2. + 0 + -5.8572040870785713e-004 + 0.0592276602983475 + -0.2353111952543259 + <_> + + <_> + + + + <_>9 12 4 8 -1. + <_>9 16 4 4 2. + 0 + -0.0512025691568851 + -0.5219962000846863 + 0.0149522395804524 + <_> + + <_> + + + + <_>0 11 13 3 -1. + <_>0 12 13 1 3. + 0 + -6.7564798519015312e-003 + 0.1408502012491226 + -0.0904521793127060 + <_> + + <_> + + + + <_>1 11 18 3 -1. + <_>1 12 18 1 3. + 0 + -0.0489597804844379 + -0.6687812805175781 + 0.0205903593450785 + <_> + + <_> + + + + <_>2 1 16 2 -1. + <_>2 2 16 1 2. + 0 + 1.4971289783716202e-004 + -0.1864105015993118 + 0.0652548521757126 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0344096794724464 + -0.6523596048355103 + 0.0146936504170299 + <_> + + <_> + + + + <_>0 9 19 3 -1. + <_>0 10 19 1 3. + 0 + 0.0647256895899773 + 0.0123297199606895 + -0.8407772183418274 + <_> + + <_> + + + + <_>9 7 7 4 -1. + <_>9 9 7 2 2. + 0 + 1.7888710135594010e-003 + -0.3308830857276917 + 0.0239440500736237 + <_> + + <_> + + + + <_>0 14 20 6 -1. + <_>0 16 20 2 3. + 0 + 0.0749998390674591 + 0.0263476297259331 + -0.4484134018421173 + <_> + + <_> + + + + <_>8 7 12 6 -1. + <_>8 7 6 6 2. + 0 + -0.1369580030441284 + -0.5719233155250549 + 1.2316530337557197e-003 + <_> + + <_> + + + + <_>0 7 12 6 -1. + <_>6 7 6 6 2. + 0 + 0.0876796171069145 + 0.0918524116277695 + -0.1471467018127441 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -0.0146911703050137 + -0.2738929986953735 + 0.0559109486639500 + <_> + + <_> + + + + <_>0 0 7 12 -1. + <_>0 6 7 6 2. + 0 + 0.1805976033210754 + 0.0184757392853498 + -0.6224799156188965 + <_> + + <_> + + + + <_>13 7 3 13 -1. + <_>14 7 1 13 3. + 0 + -6.9349152036011219e-003 + -0.1672389060258865 + 0.0423481203615665 + <_> + + <_> + + + + <_>3 1 13 6 -1. + <_>3 3 13 2 3. + 0 + -0.0453957282006741 + 0.5640187859535217 + -0.0207630395889282 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -0.0377147793769836 + -0.4972639977931976 + 0.0134577499702573 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -6.6780918277800083e-003 + 0.1565418988466263 + -0.0792542472481728 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -0.0356934182345867 + 0.3221456110477448 + -0.0279339607805014 + <_> + + <_> + + + + <_>5 0 4 8 -1. + <_>7 0 2 8 2. + 0 + 2.0231369417160749e-003 + -0.2047290056943893 + 0.0601369217038155 + <_> + + <_> + + + + <_>9 2 6 10 -1. + <_>12 2 3 5 2. + <_>9 7 3 5 2. + 0 + 7.7706989832222462e-003 + -0.0622757188975811 + 0.1361960023641586 + <_> + + <_> + + + + <_>5 1 3 14 -1. + <_>6 1 1 14 3. + 0 + -0.0238460600376129 + -0.6428096294403076 + 0.0192168708890677 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + 0.0381127893924713 + 0.0169262494891882 + -0.3200187981128693 + <_> + + <_> + + + + <_>2 14 7 6 -1. + <_>2 16 7 2 3. + 0 + -8.1509854644536972e-003 + -0.1852740049362183 + 0.0674316436052322 + <_> + + <_> + + + + <_>1 2 18 6 -1. + <_>7 2 6 6 3. + 0 + 0.3004167079925537 + -0.0349978692829609 + 0.3771956861019135 + <_> + + <_> + + + + <_>4 7 7 4 -1. + <_>4 9 7 2 2. + 0 + 3.2188769546337426e-004 + -0.4386006891727448 + 0.0310081802308559 + <_> + + <_> + + + + <_>9 4 10 16 -1. + <_>9 12 10 8 2. + 0 + 0.0998051315546036 + 0.0210430100560188 + -0.2418213933706284 + <_> + + <_> + + + + <_>1 3 16 12 -1. + <_>1 3 8 6 2. + <_>9 9 8 6 2. + 0 + -0.1313202977180481 + -0.6074452996253967 + 0.0191272292286158 + <_> + + <_> + + + + <_>11 3 2 16 -1. + <_>11 11 2 8 2. + 0 + -0.0444578789174557 + -0.2820771932601929 + 0.0161995906382799 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + -5.3282459266483784e-003 + 0.1911883950233460 + -0.0644835233688354 + <_> + + <_> + + + + <_>7 9 13 3 -1. + <_>7 10 13 1 3. + 0 + 0.0403675287961960 + 0.0163626205176115 + -0.5546327233314514 + <_> + + <_> + + + + <_>0 9 13 3 -1. + <_>0 10 13 1 3. + 0 + -8.7769925594329834e-003 + -0.3890318870544434 + 0.0312779694795609 + <_> + + <_> + + + + <_>7 7 9 6 -1. + <_>7 9 9 2 3. + 0 + -0.0150317801162601 + 0.4496696889400482 + -0.0187086500227451 + <_> + + <_> + + + + <_>4 5 6 8 -1. + <_>6 5 2 8 3. + 0 + -0.0320851206779480 + 0.2287266999483109 + -0.0526477992534637 + <_> + + <_> + + + + <_>9 4 3 10 -1. + <_>9 9 3 5 2. + 0 + 1.7735429573804140e-003 + 0.1064456999301910 + -0.1197023019194603 + <_> + + <_> + + + + <_>8 4 4 12 -1. + <_>8 8 4 4 3. + 0 + 0.0591959804296494 + -0.0644855573773384 + 0.1844072937965393 + <_> + + <_> + + + + <_>4 5 15 3 -1. + <_>4 6 15 1 3. + 0 + 0.0119761303067207 + -0.0466553382575512 + 0.2275061011314392 + <_> + + <_> + + + + <_>2 4 9 4 -1. + <_>2 6 9 2 2. + 0 + -7.3619361501187086e-004 + 0.0644279569387436 + -0.1966935992240906 + <_> + + <_> + + + + <_>8 0 8 10 -1. + <_>8 5 8 5 2. + 0 + 0.1127498000860214 + -0.0326037295162678 + 0.2616580128669739 + <_> + + <_> + + + + <_>8 6 3 10 -1. + <_>8 11 3 5 2. + 0 + -0.0296391304582357 + -0.2428608983755112 + 0.0525507703423500 + <_> + + <_> + + + + <_>5 7 11 8 -1. + <_>5 11 11 4 2. + 0 + -0.0489725992083550 + 0.2901341915130615 + -0.0399366095662117 + <_> + + <_> + + + + <_>1 12 6 6 -1. + <_>1 15 6 3 2. + 0 + -2.0732060074806213e-003 + 0.0667289569973946 + -0.1838591992855072 + <_> + + <_> + + + + <_>14 2 5 18 -1. + <_>14 8 5 6 3. + 0 + 0.1865248978137970 + 0.0257880706340075 + -0.3047712147235870 + <_> + + <_> + + + + <_>1 2 5 18 -1. + <_>1 8 5 6 3. + 0 + -0.0648462101817131 + 0.5896415114402771 + -0.0215318705886602 + <_> + + <_> + + + + <_>13 7 3 13 -1. + <_>14 7 1 13 3. + 0 + 0.0596680305898190 + 9.0434495359659195e-003 + -0.8992847800254822 + <_> + + <_> + + + + <_>4 7 3 13 -1. + <_>5 7 1 13 3. + 0 + -0.0228107906877995 + -0.5568975210189819 + 0.0210364200174809 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>0 7 20 1 2. + 0 + -0.0439245589077473 + -0.7756980061531067 + 0.0132441204041243 + <_> + + <_> + + + + <_>2 1 16 4 -1. + <_>2 1 8 2 2. + <_>10 3 8 2 2. + 0 + -8.1411283463239670e-003 + -0.1614574939012528 + 0.0638697519898415 + <_> + + <_> + + + + <_>6 1 10 6 -1. + <_>11 1 5 3 2. + <_>6 4 5 3 2. + 0 + -0.0176811404526234 + -0.1708822995424271 + 0.0443238206207752 + <_> + + <_> + + + + <_>0 5 8 15 -1. + <_>4 5 4 15 2. + 0 + 0.3561578094959259 + 0.0139115303754807 + -0.8236694931983948 + <_> + + <_> + + + + <_>4 13 12 6 -1. + <_>4 13 6 6 2. + 0 + 0.0897913873195648 + -0.0330686718225479 + 0.3950195014476776 + <_> + + <_> + + + + <_>7 0 6 14 -1. + <_>7 0 3 7 2. + <_>10 7 3 7 2. + 0 + -0.0510399602353573 + -0.4968731999397278 + 0.0249119102954865 + <_> + + <_> + + + + <_>1 10 18 10 -1. + <_>7 10 6 10 3. + 0 + 0.4450297057628632 + 0.0130857499316335 + -0.7137433886528015 + <_> + + <_> + + + + <_>0 2 13 2 -1. + <_>0 3 13 1 2. + 0 + -3.1571299768984318e-003 + -0.2323523014783859 + 0.0454227291047573 + <_> + + <_> + + + + <_>0 0 20 15 -1. + <_>0 5 20 5 3. + 0 + 0.2229550927877426 + 0.0252729207277298 + -0.4581792056560516 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 3 12 3 2. + 0 + 0.0817870497703552 + -0.0569666698575020 + 0.2063311934471130 + <_> + + <_> + + + + <_>6 1 8 4 -1. + <_>6 3 8 2 2. + 0 + 0.0122906398028135 + 0.1043353006243706 + -0.1412999033927918 + <_> + + <_> + + + + <_>0 7 7 6 -1. + <_>0 9 7 2 3. + 0 + 3.2738980371505022e-003 + -0.1992916017770767 + 0.0579004995524883 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 3.1915940344333649e-003 + -0.2864956855773926 + 0.0384459383785725 + <_> + + <_> + + + + <_>0 0 15 7 -1. + <_>5 0 5 7 3. + 0 + -0.0694291368126869 + 0.3999530076980591 + -0.0292284209281206 + <_> + + <_> + + + + <_>10 0 10 8 -1. + <_>10 0 5 8 2. + 0 + 0.3089629113674164 + 4.5684990473091602e-003 + -0.9759358167648315 + <_> + + <_> + + + + <_>0 0 10 8 -1. + <_>5 0 5 8 2. + 0 + 0.0605471692979336 + -0.1722735017538071 + 0.0733677595853806 + <_> + + <_> + + + + <_>5 6 12 4 -1. + <_>5 6 6 4 2. + 0 + 0.0802967473864555 + 0.0127908904105425 + -0.2963644862174988 + <_> + + <_> + + + + <_>3 6 12 4 -1. + <_>9 6 6 4 2. + 0 + 0.0983090475201607 + 0.0174215305596590 + -0.7342811226844788 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + -0.0606510788202286 + -0.8926808834075928 + 9.2950398102402687e-003 + <_> + + <_> + + + + <_>2 0 15 9 -1. + <_>7 0 5 9 3. + 0 + -0.0110678300261498 + 0.3694047033786774 + -0.0322818607091904 + <_> + + <_> + + + + <_>6 14 13 2 -1. + <_>6 15 13 1 2. + 0 + -0.0172526892274618 + 0.2016368955373764 + -0.0306496098637581 + <_> + + <_> + + + + <_>4 0 12 8 -1. + <_>8 0 4 8 3. + 0 + 0.1141714975237846 + -0.0725674405694008 + 0.1458079963922501 + <_> + + <_> + + + + <_>12 1 4 14 -1. + <_>14 1 2 7 2. + <_>12 8 2 7 2. + 0 + -1.1878489749506116e-004 + 0.0667036697268486 + -0.1204411014914513 + <_> + + <_> + + + + <_>0 5 18 3 -1. + <_>6 5 6 3 3. + 0 + 0.0425388216972351 + 0.1423566937446594 + -0.0931281968951225 + <_> + + <_> + + + + <_>7 1 7 6 -1. + <_>7 4 7 3 2. + 0 + 0.0462207905948162 + -0.0453481189906597 + 0.2666769027709961 + <_> + + <_> + + + + <_>6 6 5 14 -1. + <_>6 13 5 7 2. + 0 + -0.1259886026382446 + -0.6219599843025208 + 0.0193617902696133 + <_> + + <_> + + + + <_>4 7 15 5 -1. + <_>9 7 5 5 3. + 0 + 0.1433641016483307 + 0.0156024601310492 + -0.3426972925662994 + <_> + + <_> + + + + <_>1 7 15 5 -1. + <_>6 7 5 5 3. + 0 + 0.0148534001782537 + -0.1939989030361176 + 0.0593650490045547 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + 0.0296072997152805 + 0.0293708592653275 + -0.1184056028723717 + <_> + + <_> + + + + <_>7 6 4 7 -1. + <_>9 6 2 7 2. + 0 + 0.0451512001454830 + -0.0310253705829382 + 0.4233565032482147 + <_> + + <_> + + + + <_>7 1 10 6 -1. + <_>12 1 5 3 2. + <_>7 4 5 3 2. + 0 + 0.0173470508307219 + 0.0524686612188816 + -0.1707188934087753 + <_> + + <_> + + + + <_>2 8 13 2 -1. + <_>2 9 13 1 2. + 0 + 0.0486967898905277 + 0.0137575902044773 + -0.7385389208793640 + <_> + + <_> + + + + <_>1 2 18 4 -1. + <_>10 2 9 2 2. + <_>1 4 9 2 2. + 0 + -0.0251209400594234 + -0.2607721984386444 + 0.0362490005791187 + <_> + + <_> + + + + <_>5 8 9 5 -1. + <_>8 8 3 5 3. + 0 + -0.0144120398908854 + 0.1843540072441101 + -0.0553760491311550 + <_> + + <_> + + + + <_>15 2 4 18 -1. + <_>17 2 2 9 2. + <_>15 11 2 9 2. + 0 + 0.0160111300647259 + -0.0338221900165081 + 0.0984909906983376 + <_> + + <_> + + + + <_>1 2 4 18 -1. + <_>1 2 2 9 2. + <_>3 11 2 9 2. + 0 + -0.0637788772583008 + 0.3959665894508362 + -0.0266052894294262 + <_> + + <_> + + + + <_>10 7 10 6 -1. + <_>15 7 5 3 2. + <_>10 10 5 3 2. + 0 + -0.0124317901208997 + -0.2710328102111816 + 0.0511539094150066 + <_> + + <_> + + + + <_>1 7 17 6 -1. + <_>1 9 17 2 3. + 0 + 0.1543028950691223 + -0.0297420695424080 + 0.3622387945652008 + <_> + + <_> + + + + <_>7 6 7 4 -1. + <_>7 8 7 2 2. + 0 + 0.0689536184072495 + 0.0145605402067304 + -0.7130876183509827 + <_> + + <_> + + + + <_>1 8 10 6 -1. + <_>1 8 5 3 2. + <_>6 11 5 3 2. + 0 + 0.0268093906342983 + 0.0309030208736658 + -0.3145376145839691 + <_> + + <_> + + + + <_>10 7 10 6 -1. + <_>15 7 5 3 2. + <_>10 10 5 3 2. + 0 + -0.0543396398425102 + -0.5708159208297730 + 6.3606691546738148e-003 + <_> + + <_> + + + + <_>0 7 10 6 -1. + <_>0 7 5 3 2. + <_>5 10 5 3 2. + 0 + -7.4291341006755829e-003 + -0.2116782069206238 + 0.0547284111380577 + <_> + + <_> + + + + <_>8 1 12 19 -1. + <_>8 1 6 19 2. + 0 + 0.0150047196075320 + -0.1357697993516922 + 0.0366726182401180 + <_> + + <_> + + + + <_>0 1 12 19 -1. + <_>6 1 6 19 2. + 0 + 0.0234388597309589 + -0.6209517717361450 + 0.0174513701349497 + <_> + + <_> + + + + <_>5 1 12 13 -1. + <_>5 1 6 13 2. + 0 + 0.2186942994594574 + -0.0251758191734552 + 0.2425673007965088 + <_> + + <_> + + + + <_>5 1 9 5 -1. + <_>8 1 3 5 3. + 0 + 0.0725549012422562 + 0.0303783100098372 + -0.3531683981418610 + <_> + + <_> + + + + <_>16 0 4 8 -1. + <_>16 0 2 8 2. + 0 + -0.0607751905918121 + 0.6123114228248596 + -0.0293977502733469 + <_> + + <_> + + + + <_>0 12 13 3 -1. + <_>0 13 13 1 3. + 0 + 0.0104053597897291 + -0.0489253513514996 + 0.2004220038652420 + <_> + + <_> + + + + <_>10 0 4 16 -1. + <_>10 0 2 16 2. + 0 + -4.4559161178767681e-003 + -0.1817599982023239 + 0.0514601096510887 + <_> + + <_> + + + + <_>4 12 12 5 -1. + <_>8 12 4 5 3. + 0 + 5.3141661919653416e-003 + 0.1083642989397049 + -0.1146437004208565 + <_> + + <_> + + + + <_>10 0 4 16 -1. + <_>10 0 2 16 2. + 0 + 0.0281299091875553 + 0.0484524592757225 + -0.1058814972639084 + <_> + + <_> + + + + <_>6 0 4 16 -1. + <_>8 0 2 16 2. + 0 + -0.0100290300324559 + -0.2885420024394989 + 0.0465093813836575 + <_> + + <_> + + + + <_>6 1 8 7 -1. + <_>6 1 4 7 2. + 0 + 0.0416237600147724 + -0.0524241812527180 + 0.2463805973529816 + <_> + + <_> + + + + <_>8 4 4 7 -1. + <_>10 4 2 7 2. + 0 + 0.0174070298671722 + -0.0595117993652821 + 0.2248900979757309 + <_> + + <_> + + + + <_>11 8 9 9 -1. + <_>14 8 3 9 3. + 0 + -0.0910129174590111 + 0.3843485116958618 + -0.0267760790884495 + <_> + + <_> + + + + <_>0 8 9 9 -1. + <_>3 8 3 9 3. + 0 + -0.0559645593166351 + 0.3351255953311920 + -0.0370866693556309 + <_> + + <_> + + + + <_>0 4 20 5 -1. + <_>0 4 10 5 2. + 0 + -0.2319160997867584 + -0.7993714213371277 + 0.0161577109247446 + <_> + + <_> + + + + <_>1 12 18 2 -1. + <_>1 13 18 1 2. + 0 + 0.0150957796722651 + 0.0195627398788929 + -0.4758878052234650 + <_> + + <_> + + + + <_>11 5 5 9 -1. + <_>11 8 5 3 3. + 0 + -0.0635372027754784 + 0.5510386228561401 + -9.9191991612315178e-003 + <_> + + <_> + + + + <_>4 5 5 9 -1. + <_>4 8 5 3 3. + 0 + 0.0507804714143276 + -0.0507661215960979 + 0.1985673010349274 + <_> + + <_> + + + + <_>11 2 6 10 -1. + <_>14 2 3 5 2. + <_>11 7 3 5 2. + 0 + 0.0334357097744942 + 0.0171000305563211 + -0.3910605013370514 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 0.0272363107651472 + 0.0194911304861307 + -0.4995582103729248 + <_> + + <_> + + + + <_>0 11 20 4 -1. + <_>10 11 10 2 2. + <_>0 13 10 2 2. + 0 + 0.0361444614827633 + 0.0197128094732761 + -0.4771480858325958 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0371108986437321 + -0.7108097076416016 + 0.0132972402498126 + <_> + + <_> + + + + <_>14 0 3 15 -1. + <_>15 0 1 15 3. + 0 + -1.6986919799819589e-003 + -0.1145403981208801 + 0.0538331903517246 + <_> + + <_> + + + + <_>3 0 3 15 -1. + <_>4 0 1 15 3. + 0 + 7.0956937270238996e-004 + -0.1185242980718613 + 0.0861461535096169 + <_> + + <_> + + + + <_>9 12 7 4 -1. + <_>9 14 7 2 2. + 0 + -0.0398544594645500 + -0.2178416997194290 + 7.9314615577459335e-003 + <_> + + <_> + + + + <_>5 1 6 5 -1. + <_>8 1 3 5 2. + 0 + -0.0262653008103371 + 0.5182827711105347 + -0.0195025391876698 + <_> + + <_> + + + + <_>14 0 4 9 -1. + <_>14 0 2 9 2. + 0 + 1.5767179429531097e-003 + -0.0900251492857933 + 0.0436141490936279 + <_> + + <_> + + + + <_>2 0 4 9 -1. + <_>4 0 2 9 2. + 0 + 0.0845008492469788 + 0.0191088002175093 + -0.5804942846298218 + <_> + + <_> + + + + <_>9 1 8 8 -1. + <_>13 1 4 4 2. + <_>9 5 4 4 2. + 0 + 0.0580610297620296 + 5.1128780469298363e-003 + -0.3662971854209900 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -8.6446420755237341e-004 + 0.0985512211918831 + -0.0992868766188622 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>10 15 7 2 2. + <_>3 17 7 2 2. + 0 + -0.0163587797433138 + -0.2235393971204758 + 0.0451000109314919 + <_> + + <_> + + + + <_>4 12 7 4 -1. + <_>4 14 7 2 2. + 0 + 0.0120695000514388 + -0.0308855809271336 + 0.3593367040157318 + <_> + + <_> + + + + <_>9 12 4 8 -1. + <_>9 16 4 4 2. + 0 + 0.0649325922131538 + 8.9946594089269638e-003 + -0.6550527215003967 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -0.0163847208023071 + 0.1837438046932221 + -0.0583197288215160 + <_> + + <_> + + + + <_>5 7 10 10 -1. + <_>5 12 10 5 2. + 0 + 0.0364678315818310 + 0.0330538004636765 + -0.3117660880088806 + <_> + + <_> + + + + <_>5 7 6 8 -1. + <_>5 11 6 4 2. + 0 + -4.8026088625192642e-003 + -0.1309693008661270 + 0.0888154208660126 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -9.7134411334991455e-003 + 0.1248589009046555 + -0.0458519198000431 + <_> + + <_> + + + + <_>8 4 3 10 -1. + <_>8 9 3 5 2. + 0 + -3.6871319753117859e-004 + 0.1079858019948006 + -0.1079533025622368 + -1.6477719545364380 + 25 + -1 + <_> + + + <_> + + <_> + + + + <_>2 0 6 5 -1. + <_>5 0 3 5 2. + 0 + 4.8573319800198078e-003 + -0.2216591984033585 + 0.2066199034452438 + <_> + + <_> + + + + <_>8 4 4 14 -1. + <_>8 11 4 7 2. + 0 + -9.0601091505959630e-004 + 0.0926842167973518 + -0.3469268977642059 + <_> + + <_> + + + + <_>3 6 5 6 -1. + <_>3 9 5 3 2. + 0 + 3.8109601009637117e-003 + -0.4769397974014282 + 0.0722088664770126 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -1.9349349895492196e-003 + -0.2347428947687149 + 0.1030836999416351 + <_> + + <_> + + + + <_>1 2 17 6 -1. + <_>1 4 17 2 3. + 0 + 4.6932199038565159e-003 + -0.2175559997558594 + 0.1029777005314827 + <_> + + <_> + + + + <_>9 5 6 10 -1. + <_>9 5 3 10 2. + 0 + -4.5681721530854702e-003 + -0.3297953903675079 + 0.0621086992323399 + <_> + + <_> + + + + <_>5 4 6 6 -1. + <_>8 4 3 6 2. + 0 + 2.0976159721612930e-003 + -0.2758555114269257 + 0.0744477882981300 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>12 6 7 3 2. + <_>5 9 7 3 2. + 0 + -0.0234344601631165 + -0.2451709061861038 + 0.0208883006125689 + <_> + + <_> + + + + <_>1 6 14 6 -1. + <_>1 6 7 3 2. + <_>8 9 7 3 2. + 0 + -7.5489659793674946e-003 + -0.2353949993848801 + 0.0805947929620743 + <_> + + <_> + + + + <_>4 9 12 5 -1. + <_>8 9 4 5 3. + 0 + -1.3637889642268419e-003 + 0.1246228963136673 + -0.1438398063182831 + <_> + + <_> + + + + <_>0 5 20 15 -1. + <_>0 10 20 5 3. + 0 + 0.0208817701786757 + -0.2548697888851166 + 0.0704801306128502 + <_> + + <_> + + + + <_>12 5 4 14 -1. + <_>14 5 2 7 2. + <_>12 12 2 7 2. + 0 + -1.6712560318410397e-003 + -0.1474708020687103 + 0.0935977473855019 + <_> + + <_> + + + + <_>0 0 6 9 -1. + <_>2 0 2 9 3. + 0 + -0.0585527084767818 + 0.3792966008186340 + -0.0378922410309315 + <_> + + <_> + + + + <_>16 0 4 8 -1. + <_>16 0 2 8 2. + 0 + -0.0475916415452957 + 0.3476938903331757 + -0.0294844098389149 + <_> + + <_> + + + + <_>6 0 3 13 -1. + <_>7 0 1 13 3. + 0 + 5.7788072153925896e-003 + 0.0416271798312664 + -0.3801231086254120 + <_> + + <_> + + + + <_>16 0 4 8 -1. + <_>16 0 2 8 2. + 0 + 6.1923051252961159e-003 + -0.0798542425036430 + 0.1466230005025864 + <_> + + <_> + + + + <_>0 0 4 8 -1. + <_>2 0 2 8 2. + 0 + 8.6211357265710831e-003 + -0.0790525972843170 + 0.1970718055963516 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.3878768980503082 + 9.9500510841608047e-003 + -0.5495527982711792 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.1218483000993729 + 0.0215608794242144 + -0.7118219137191773 + <_> + + <_> + + + + <_>11 10 8 6 -1. + <_>11 12 8 2 3. + 0 + 5.6779510341584682e-003 + 0.0507787317037582 + -0.1981754004955292 + <_> + + <_> + + + + <_>6 0 3 20 -1. + <_>7 0 1 20 3. + 0 + -0.0324072688817978 + -0.6577636003494263 + 0.0189302302896976 + <_> + + <_> + + + + <_>7 5 8 12 -1. + <_>11 5 4 6 2. + <_>7 11 4 6 2. + 0 + 2.3834649473428726e-003 + 0.0359106212854385 + -0.1938607990741730 + <_> + + <_> + + + + <_>4 5 10 12 -1. + <_>4 5 5 6 2. + <_>9 11 5 6 2. + 0 + 4.4861159403808415e-004 + 0.0630491897463799 + -0.2306728065013886 + <_> + + <_> + + + + <_>12 5 4 14 -1. + <_>14 5 2 7 2. + <_>12 12 2 7 2. + 0 + 0.0283813606947660 + 0.0137987695634365 + -0.2028799057006836 + <_> + + <_> + + + + <_>4 5 4 14 -1. + <_>4 5 2 7 2. + <_>6 12 2 7 2. + 0 + -2.7084869798272848e-003 + -0.1645527034997940 + 0.0811827331781387 + <_> + + <_> + + + + <_>14 10 6 9 -1. + <_>14 10 3 9 2. + 0 + -0.0132185798138380 + 0.1292906999588013 + -0.0494105815887451 + <_> + + <_> + + + + <_>3 8 14 2 -1. + <_>3 9 14 1 2. + 0 + 1.8623949727043509e-003 + -0.2739819884300232 + 0.0457460992038250 + <_> + + <_> + + + + <_>11 10 8 6 -1. + <_>11 12 8 2 3. + 0 + -6.6727721132338047e-003 + -0.1516754031181335 + 0.0555876195430756 + <_> + + <_> + + + + <_>0 15 14 3 -1. + <_>0 16 14 1 3. + 0 + 1.9492399878799915e-003 + -0.0855471268296242 + 0.1371261030435562 + <_> + + <_> + + + + <_>11 10 8 6 -1. + <_>11 12 8 2 3. + 0 + -0.0709788128733635 + -0.7742931842803955 + 5.5506629869341850e-003 + <_> + + <_> + + + + <_>1 10 8 6 -1. + <_>1 12 8 2 3. + 0 + 5.7003321126103401e-003 + 0.0602996610105038 + -0.2300011068582535 + <_> + + <_> + + + + <_>1 0 18 19 -1. + <_>7 0 6 19 3. + 0 + 0.0663107782602310 + -0.0856906995177269 + 0.1516992002725601 + <_> + + <_> + + + + <_>0 9 6 10 -1. + <_>3 9 3 10 2. + 0 + -8.5291899740695953e-003 + 0.1429758965969086 + -0.0918055474758148 + <_> + + <_> + + + + <_>11 15 9 4 -1. + <_>11 17 9 2 2. + 0 + 5.1141469739377499e-003 + 0.0469179898500443 + -0.1331984996795654 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 1.9523530500009656e-003 + -0.1417748928070068 + 0.1052417010068893 + <_> + + <_> + + + + <_>4 3 12 10 -1. + <_>8 3 4 10 3. + 0 + 0.1955831050872803 + 0.0144788604229689 + -0.7998542785644531 + <_> + + <_> + + + + <_>7 10 3 10 -1. + <_>7 15 3 5 2. + 0 + 5.3029200062155724e-003 + 0.0372377000749111 + -0.2613134980201721 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + 6.4814360812306404e-003 + -0.0490926988422871 + 0.2568177878856659 + <_> + + <_> + + + + <_>0 15 9 4 -1. + <_>0 17 9 2 2. + 0 + -6.1802868731319904e-003 + -0.2131792008876801 + 0.0613900311291218 + <_> + + <_> + + + + <_>6 12 14 3 -1. + <_>6 13 14 1 3. + 0 + 1.9895739387720823e-003 + -0.0713353827595711 + 0.1300242990255356 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -4.2928531183861196e-004 + 0.0723834782838821 + -0.1564379930496216 + <_> + + <_> + + + + <_>11 10 6 6 -1. + <_>11 10 3 6 2. + 0 + -4.5690318802371621e-004 + 0.0757323578000069 + -0.1093285977840424 + <_> + + <_> + + + + <_>7 0 5 15 -1. + <_>7 5 5 5 3. + 0 + -0.1333373934030533 + -0.5488920807838440 + 0.0194945503026247 + <_> + + <_> + + + + <_>4 7 13 2 -1. + <_>4 8 13 1 2. + 0 + 8.2705507520586252e-004 + -0.1873998939990997 + 0.0574982613325119 + <_> + + <_> + + + + <_>2 8 4 12 -1. + <_>2 12 4 4 3. + 0 + -1.6954699531197548e-003 + -0.1410070061683655 + 0.0865483880043030 + <_> + + <_> + + + + <_>12 11 7 4 -1. + <_>12 13 7 2 2. + 0 + 9.8944529891014099e-003 + 0.0178981591016054 + -0.3139568865299225 + <_> + + <_> + + + + <_>7 5 6 15 -1. + <_>9 5 2 15 3. + 0 + 6.0766572132706642e-003 + -0.1312011033296585 + 0.0915785282850266 + <_> + + <_> + + + + <_>12 11 7 4 -1. + <_>12 13 7 2 2. + 0 + -0.0356802791357040 + -0.3888098895549774 + 0.0113778095692396 + <_> + + <_> + + + + <_>1 11 7 4 -1. + <_>1 13 7 2 2. + 0 + 8.7540567619726062e-004 + 0.0530229285359383 + -0.2150994986295700 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 1.9438719609752297e-003 + -0.0810357034206390 + 0.1338230967521668 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0563981384038925 + 0.0148579301312566 + -0.6955115199089050 + <_> + + <_> + + + + <_>3 2 14 2 -1. + <_>3 3 14 1 2. + 0 + -1.0274930391460657e-003 + -0.1919634938240051 + 0.0475960299372673 + <_> + + <_> + + + + <_>8 1 4 14 -1. + <_>8 8 4 7 2. + 0 + -3.3568819053471088e-003 + 0.1046605035662651 + -0.1017097979784012 + <_> + + <_> + + + + <_>2 6 17 6 -1. + <_>2 9 17 3 2. + 0 + 0.1173404008150101 + -0.0465654395520687 + 0.2087873965501785 + <_> + + <_> + + + + <_>0 7 5 9 -1. + <_>0 10 5 3 3. + 0 + 8.8005866855382919e-003 + 0.0917546525597572 + -0.1222150027751923 + <_> + + <_> + + + + <_>4 5 13 2 -1. + <_>4 6 13 1 2. + 0 + 2.4095149710774422e-003 + -0.0367521606385708 + 0.2344343960285187 + <_> + + <_> + + + + <_>2 9 14 2 -1. + <_>2 10 14 1 2. + 0 + -2.8434590785764158e-004 + -0.1999672949314117 + 0.0473531596362591 + <_> + + <_> + + + + <_>5 15 13 3 -1. + <_>5 16 13 1 3. + 0 + 0.0176237095147371 + -0.0227655190974474 + 0.2564666867256165 + <_> + + <_> + + + + <_>5 0 3 14 -1. + <_>6 0 1 14 3. + 0 + 0.0141217401251197 + 0.0226599890738726 + -0.4244908094406128 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -0.0152906496077776 + 0.2444576025009155 + -0.0431456305086613 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0254268795251846 + 0.4128093123435974 + -0.0250028204172850 + <_> + + <_> + + + + <_>4 0 14 4 -1. + <_>11 0 7 2 2. + <_>4 2 7 2 2. + 0 + 8.7438793852925301e-003 + 0.0419315397739410 + -0.1243304014205933 + <_> + + <_> + + + + <_>0 1 20 4 -1. + <_>0 1 10 2 2. + <_>10 3 10 2 2. + 0 + 0.0416429601609707 + 0.0215358696877956 + -0.4906223118305206 + <_> + + <_> + + + + <_>7 0 7 6 -1. + <_>7 3 7 3 2. + 0 + 0.0706923305988312 + -0.0243070907890797 + 0.3360632956027985 + <_> + + <_> + + + + <_>5 2 6 10 -1. + <_>7 2 2 10 3. + 0 + -0.0776903480291367 + -0.7388399839401245 + 0.0135768298059702 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 3.7781539140269160e-004 + -0.0966977328062058 + 0.0946905091404915 + <_> + + <_> + + + + <_>1 8 13 3 -1. + <_>1 9 13 1 3. + 0 + -1.1192850070074201e-003 + -0.2163182049989700 + 0.0442351996898651 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0597722493112087 + -0.0320242606103420 + 0.3060266077518463 + <_> + + <_> + + + + <_>0 1 20 3 -1. + <_>0 2 20 1 3. + 0 + -0.0154171204194427 + -0.3408783972263336 + 0.0280979797244072 + <_> + + <_> + + + + <_>18 3 2 17 -1. + <_>18 3 1 17 2. + 0 + -6.3111339695751667e-003 + 0.1532768011093140 + -0.0479014590382576 + <_> + + <_> + + + + <_>0 0 20 10 -1. + <_>0 0 10 5 2. + <_>10 5 10 5 2. + 0 + -0.0188264995813370 + -0.1526959985494614 + 0.0609556287527084 + <_> + + <_> + + + + <_>4 8 14 4 -1. + <_>11 8 7 2 2. + <_>4 10 7 2 2. + 0 + -0.0392238385975361 + 0.2662413120269775 + -7.6400930993258953e-003 + <_> + + <_> + + + + <_>0 3 7 6 -1. + <_>0 5 7 2 3. + 0 + -0.0486531592905521 + -0.4548850059509277 + 0.0198530498892069 + <_> + + <_> + + + + <_>4 8 14 4 -1. + <_>11 8 7 2 2. + <_>4 10 7 2 2. + 0 + 0.0672605186700821 + 1.0999150108546019e-003 + -0.7527347803115845 + <_> + + <_> + + + + <_>2 8 14 4 -1. + <_>2 8 7 2 2. + <_>9 10 7 2 2. + 0 + 1.2728190049529076e-003 + -0.0781212970614433 + 0.1181655004620552 + <_> + + <_> + + + + <_>3 4 16 10 -1. + <_>11 4 8 5 2. + <_>3 9 8 5 2. + 0 + -0.0941470265388489 + -0.5215358734130859 + 0.0149731701239944 + <_> + + <_> + + + + <_>6 3 8 6 -1. + <_>6 5 8 2 3. + 0 + -0.0474544614553452 + 0.2654714882373810 + -0.0305874105542898 + <_> + + <_> + + + + <_>5 3 13 2 -1. + <_>5 4 13 1 2. + 0 + -5.6014367146417499e-004 + -0.1050644963979721 + 0.0601612813770771 + <_> + + <_> + + + + <_>4 10 6 7 -1. + <_>7 10 3 7 2. + 0 + -2.9601220740005374e-004 + 0.0622574500739574 + -0.1312654018402100 + <_> + + <_> + + + + <_>11 7 4 13 -1. + <_>11 7 2 13 2. + 0 + -0.0209184903651476 + -0.2083151042461395 + 0.0268431194126606 + <_> + + <_> + + + + <_>5 7 4 13 -1. + <_>7 7 2 13 2. + 0 + -7.2696260176599026e-003 + -0.1622764021158218 + 0.0619370490312576 + <_> + + <_> + + + + <_>5 10 14 3 -1. + <_>5 11 14 1 3. + 0 + 7.2555372025817633e-004 + -0.1031593978404999 + 0.0680408775806427 + <_> + + <_> + + + + <_>2 6 3 14 -1. + <_>2 13 3 7 2. + 0 + 0.0208288393914700 + -0.0445576906204224 + 0.2216746956110001 + <_> + + <_> + + + + <_>3 9 15 3 -1. + <_>8 9 5 3 3. + 0 + 0.0872011929750443 + 9.5432223752140999e-003 + -0.5870642066001892 + <_> + + <_> + + + + <_>2 4 6 16 -1. + <_>2 4 3 8 2. + <_>5 12 3 8 2. + 0 + 0.0415966287255287 + -0.0307745393365622 + 0.2880901992321014 + <_> + + <_> + + + + <_>12 0 3 13 -1. + <_>13 0 1 13 3. + 0 + -0.0261548794806004 + -0.5935354232788086 + 0.0143884103745222 + <_> + + <_> + + + + <_>4 0 8 20 -1. + <_>4 10 8 10 2. + 0 + 0.2717542946338654 + 0.0137177202850580 + -0.5461906790733337 + <_> + + <_> + + + + <_>8 2 7 9 -1. + <_>8 5 7 3 3. + 0 + 0.0218116994947195 + -0.0167981106787920 + 0.2906233072280884 + <_> + + <_> + + + + <_>5 0 3 13 -1. + <_>6 0 1 13 3. + 0 + -0.0199659299105406 + -0.4305211901664734 + 0.0189177598804235 + <_> + + <_> + + + + <_>11 2 6 10 -1. + <_>14 2 3 5 2. + <_>11 7 3 5 2. + 0 + -1.1561929713934660e-003 + 0.0880315378308296 + -0.1959020942449570 + <_> + + <_> + + + + <_>5 9 6 7 -1. + <_>7 9 2 7 3. + 0 + -1.6627550357952714e-003 + 0.0891115590929985 + -0.0909596532583237 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + -1.7325150547549129e-003 + -0.1154083013534546 + 0.0536366701126099 + <_> + + <_> + + + + <_>7 7 6 7 -1. + <_>9 7 2 7 3. + 0 + -0.0392314083874226 + 0.6247127056121826 + -0.0136669203639030 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + 0.0104235801845789 + 0.0247111301869154 + -0.1675174981355667 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + 2.2725639864802361e-003 + -0.0551267787814140 + 0.1478146016597748 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -3.9644641801714897e-003 + 0.1133799031376839 + -0.0686720535159111 + <_> + + <_> + + + + <_>0 12 9 4 -1. + <_>0 14 9 2 2. + 0 + 4.0544760413467884e-003 + 0.0401802100241184 + -0.2383735030889511 + <_> + + <_> + + + + <_>7 7 8 10 -1. + <_>11 7 4 5 2. + <_>7 12 4 5 2. + 0 + 2.0538640674203634e-003 + 0.0328636914491653 + -0.1249582991003990 + <_> + + <_> + + + + <_>5 7 8 10 -1. + <_>5 7 4 5 2. + <_>9 12 4 5 2. + 0 + 2.9705381020903587e-003 + 0.0418100617825985 + -0.2053965926170349 + <_> + + <_> + + + + <_>14 15 6 5 -1. + <_>14 15 3 5 2. + 0 + -8.3381328731775284e-003 + 0.0922587364912033 + -0.0384351797401905 + <_> + + <_> + + + + <_>3 14 13 6 -1. + <_>3 16 13 2 3. + 0 + 1.5640279743820429e-003 + -0.0966615676879883 + 0.0855948179960251 + <_> + + <_> + + + + <_>3 12 14 4 -1. + <_>10 12 7 2 2. + <_>3 14 7 2 2. + 0 + -0.0370529703795910 + -0.7791547179222107 + 0.0104182902723551 + <_> + + <_> + + + + <_>0 15 6 5 -1. + <_>3 15 3 5 2. + 0 + -0.0101099302992225 + 0.1249905973672867 + -0.0644378364086151 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + -0.0793359801173210 + 0.7078437209129334 + -3.1601081136614084e-003 + <_> + + <_> + + + + <_>3 0 3 13 -1. + <_>4 0 1 13 3. + 0 + -2.5811919476836920e-003 + -0.1680275946855545 + 0.0672576129436493 + <_> + + <_> + + + + <_>2 11 18 8 -1. + <_>8 11 6 8 3. + 0 + 0.0188635401427746 + -0.0527492985129356 + 0.1457815021276474 + <_> + + <_> + + + + <_>2 3 3 15 -1. + <_>3 3 1 15 3. + 0 + 6.1697891214862466e-004 + -0.0965271666646004 + 0.0930772423744202 + <_> + + <_> + + + + <_>16 0 3 13 -1. + <_>17 0 1 13 3. + 0 + -9.9242655560374260e-003 + 0.1216444000601769 + -0.0264398306608200 + <_> + + <_> + + + + <_>3 3 6 7 -1. + <_>5 3 2 7 3. + 0 + -0.0473820082843304 + -0.3719424009323120 + 0.0248844493180513 + <_> + + <_> + + + + <_>16 0 3 13 -1. + <_>17 0 1 13 3. + 0 + 3.8585590664297342e-003 + -0.0424208305776119 + 0.1199790015816689 + <_> + + <_> + + + + <_>1 0 3 13 -1. + <_>2 0 1 13 3. + 0 + 2.3721279576420784e-003 + -0.0727690532803535 + 0.1302762925624847 + <_> + + <_> + + + + <_>8 1 4 16 -1. + <_>10 1 2 8 2. + <_>8 9 2 8 2. + 0 + -0.0319685712456703 + -0.4708814918994904 + 0.0188630390912294 + <_> + + <_> + + + + <_>7 6 5 9 -1. + <_>7 9 5 3 3. + 0 + -7.2849751450121403e-004 + 0.2812831997871399 + -0.0307851396501064 + <_> + + <_> + + + + <_>6 5 8 8 -1. + <_>6 9 8 4 2. + 0 + -0.0120968800038099 + -0.7016307115554810 + 0.0133367097005248 + <_> + + <_> + + + + <_>0 1 6 5 -1. + <_>3 1 3 5 2. + 0 + -0.0176583696156740 + 0.1919316053390503 + -0.0479510016739368 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0109740598127246 + -0.2730732858181000 + 0.0287844892591238 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0185601804405451 + -0.4430676102638245 + 0.0204720199108124 + <_> + + <_> + + + + <_>11 0 9 7 -1. + <_>14 0 3 7 3. + 0 + 0.0138611001893878 + -0.0374713391065598 + 0.1092984974384308 + <_> + + <_> + + + + <_>0 11 12 7 -1. + <_>6 11 6 7 2. + 0 + 0.0562431700527668 + 0.0133221298456192 + -0.6197215914726257 + <_> + + <_> + + + + <_>7 5 9 5 -1. + <_>10 5 3 5 3. + 0 + -0.0137467999011278 + 0.1898090988397598 + -0.0438101515173912 + <_> + + <_> + + + + <_>2 1 15 2 -1. + <_>2 2 15 1 2. + 0 + -2.0494889758992940e-004 + -0.1480952054262161 + 0.0594585500657558 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 0.0114160301163793 + 0.0451118014752865 + -0.1727721989154816 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + 0.0411697886884212 + -0.0234428402036428 + 0.3341323137283325 + <_> + + <_> + + + + <_>6 8 14 4 -1. + <_>13 8 7 2 2. + <_>6 10 7 2 2. + 0 + -9.6223354339599609e-003 + -0.1608631014823914 + 0.0331831499934196 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>10 0 1 13 2. + 0 + 1.5951909590512514e-003 + -0.0635905116796494 + 0.1339666992425919 + <_> + + <_> + + + + <_>4 6 12 3 -1. + <_>4 6 6 3 2. + 0 + -6.3169049099087715e-003 + -0.1636531949043274 + 0.0515520498156548 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>7 2 3 7 2. + 0 + 0.0464673787355423 + -0.0256277099251747 + 0.3809756934642792 + <_> + + <_> + + + + <_>9 5 4 11 -1. + <_>9 5 2 11 2. + 0 + 0.0915985926985741 + 4.2748241685330868e-003 + -0.5974013209342957 + <_> + + <_> + + + + <_>7 5 4 11 -1. + <_>9 5 2 11 2. + 0 + -1.0416290024295449e-003 + -0.1473388969898224 + 0.0551059506833553 + <_> + + <_> + + + + <_>5 12 15 8 -1. + <_>10 12 5 8 3. + 0 + -0.0233344696462154 + 0.0922664627432823 + -0.0536538809537888 + <_> + + <_> + + + + <_>5 7 4 9 -1. + <_>7 7 2 9 2. + 0 + -6.3067381270229816e-003 + -0.1697469949722290 + 0.0600464791059494 + <_> + + <_> + + + + <_>6 6 10 4 -1. + <_>6 8 10 2 2. + 0 + 5.2549671381711960e-003 + -0.0889894068241119 + 0.0473065488040447 + <_> + + <_> + + + + <_>0 4 5 9 -1. + <_>0 7 5 3 3. + 0 + -0.0106994602829218 + -0.1582352072000504 + 0.0511008314788342 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -5.4387808777391911e-003 + 0.1252456009387970 + -0.0394726991653442 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 3.4613600000739098e-003 + -0.0688926801085472 + 0.1792038977146149 + <_> + + <_> + + + + <_>7 8 10 12 -1. + <_>7 12 10 4 3. + 0 + -0.0178943593055010 + -0.0945996567606926 + 0.0623227283358574 + <_> + + <_> + + + + <_>2 8 9 12 -1. + <_>5 8 3 12 3. + 0 + -0.2114790976047516 + -0.8627576828002930 + 9.4653964042663574e-003 + <_> + + <_> + + + + <_>11 0 9 9 -1. + <_>11 3 9 3 3. + 0 + 1.4149859780445695e-003 + -0.0862147882580757 + 0.0406359210610390 + <_> + + <_> + + + + <_>5 9 9 5 -1. + <_>8 9 3 5 3. + 0 + -1.5357299707829952e-003 + 0.0995254367589951 + -0.0775581598281860 + <_> + + <_> + + + + <_>9 2 6 10 -1. + <_>12 2 3 5 2. + <_>9 7 3 5 2. + 0 + 2.8714749496430159e-003 + -0.0637787729501724 + 0.1125103011727333 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0184000693261623 + 0.0237006694078445 + -0.3595368862152100 + <_> + + <_> + + + + <_>6 3 10 6 -1. + <_>11 3 5 3 2. + <_>6 6 5 3 2. + 0 + -0.0730780065059662 + -0.8383663892745972 + 2.1687510889023542e-003 + <_> + + <_> + + + + <_>3 4 14 6 -1. + <_>3 4 7 3 2. + <_>10 7 7 3 2. + 0 + 9.8323542624711990e-003 + -0.0538999699056149 + 0.1618697047233582 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + 0.0229879599064589 + 0.0159551594406366 + -0.3307431042194367 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -5.4363980889320374e-003 + -0.1337265074253082 + 0.0581624507904053 + <_> + + <_> + + + + <_>5 3 12 6 -1. + <_>9 3 4 6 3. + 0 + 0.0101777398958802 + -0.0579019486904144 + 0.0407890602946281 + <_> + + <_> + + + + <_>3 3 12 6 -1. + <_>7 3 4 6 3. + 0 + -0.0516903698444366 + 0.4788129031658173 + -0.0200511794537306 + <_> + + <_> + + + + <_>8 4 6 9 -1. + <_>10 4 2 9 3. + 0 + -0.0463953316211700 + 0.3542290031909943 + -0.0166928898543119 + <_> + + <_> + + + + <_>2 12 13 2 -1. + <_>2 13 13 1 2. + 0 + 4.0920148603618145e-004 + -0.0588727891445160 + 0.1361768990755081 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>3 13 14 2 2. + 0 + 3.0743801034986973e-003 + 0.0318927317857742 + -0.2939678132534027 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1343895941972733 + 0.0150188403204083 + -0.5155730843544006 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + -0.0449545904994011 + -0.6540431976318359 + 5.8901738375425339e-003 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -0.0414790511131287 + -0.5692554116249085 + 0.0130122201517224 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.0291170999407768 + -0.0191480293869972 + 0.1831838041543961 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + 0.0510732494294643 + 0.0152603099122643 + -0.4948062896728516 + <_> + + <_> + + + + <_>3 1 15 12 -1. + <_>3 7 15 6 2. + 0 + 7.0886377943679690e-004 + 0.0876986533403397 + -0.0733336731791496 + <_> + + <_> + + + + <_>0 0 10 10 -1. + <_>0 0 5 5 2. + <_>5 5 5 5 2. + 0 + 0.0118353897705674 + -0.0391898788511753 + 0.2083484977483749 + <_> + + <_> + + + + <_>16 1 4 8 -1. + <_>16 5 4 4 2. + 0 + -4.2260489426553249e-003 + -0.1873376965522766 + 0.0746668502688408 + <_> + + <_> + + + + <_>0 14 12 5 -1. + <_>4 14 4 5 3. + 0 + 0.0348477996885777 + -0.0305729601532221 + 0.2651110887527466 + <_> + + <_> + + + + <_>11 5 2 15 -1. + <_>11 5 1 15 2. + 0 + 0.0129329804331064 + 0.0222243499010801 + -0.2320410013198853 + <_> + + <_> + + + + <_>6 2 7 6 -1. + <_>6 5 7 3 2. + 0 + -3.4806900657713413e-003 + 0.0605482384562492 + -0.1303485035896301 + <_> + + <_> + + + + <_>10 2 6 9 -1. + <_>10 5 6 3 3. + 0 + 0.0172250792384148 + -6.7219920456409454e-003 + 0.1112814992666245 + <_> + + <_> + + + + <_>7 5 2 15 -1. + <_>8 5 1 15 2. + 0 + -2.4316289927810431e-003 + -0.1872065961360931 + 0.0412841401994228 + <_> + + <_> + + + + <_>18 0 2 18 -1. + <_>18 0 1 18 2. + 0 + -0.0117866899818182 + 0.1591742038726807 + -0.0307634007185698 + <_> + + <_> + + + + <_>0 8 4 8 -1. + <_>0 12 4 4 2. + 0 + -5.3132520988583565e-003 + -0.1378607004880905 + 0.0542466305196285 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>8 6 2 7 2. + 0 + -0.0200120396912098 + 0.2935963869094849 + -0.0268663503229618 + <_> + + <_> + + + + <_>6 7 8 4 -1. + <_>10 7 4 4 2. + 0 + 2.0955558866262436e-003 + 0.0679630637168884 + -0.1252086013555527 + <_> + + <_> + + + + <_>5 9 10 6 -1. + <_>10 9 5 3 2. + <_>5 12 5 3 2. + 0 + -0.0396486409008503 + -0.5819538831710815 + 0.0131466900929809 + <_> + + <_> + + + + <_>4 7 5 8 -1. + <_>4 11 5 4 2. + 0 + -0.0344858504831791 + 0.4555915892124176 + -0.0186594296246767 + <_> + + <_> + + + + <_>13 8 7 6 -1. + <_>13 10 7 2 3. + 0 + -0.0445695407688618 + -0.9206756949424744 + 5.3931041620671749e-003 + <_> + + <_> + + + + <_>0 8 7 6 -1. + <_>0 10 7 2 3. + 0 + -1.1394550092518330e-003 + -0.2193243950605393 + 0.0362493805587292 + <_> + + <_> + + + + <_>4 0 12 19 -1. + <_>4 0 6 19 2. + 0 + -0.0370440818369389 + 0.1619254946708679 + -0.0476619191467762 + <_> + + <_> + + + + <_>0 12 15 8 -1. + <_>5 12 5 8 3. + 0 + 0.0193004906177521 + -0.0544328317046165 + 0.1443210989236832 + <_> + + <_> + + + + <_>6 8 14 4 -1. + <_>13 8 7 2 2. + <_>6 10 7 2 2. + 0 + -1.4382150257006288e-003 + -0.0673439800739288 + 0.0425113812088966 + <_> + + <_> + + + + <_>1 9 13 3 -1. + <_>1 10 13 1 3. + 0 + 0.0387610085308552 + 0.0141719300299883 + -0.5338264703750610 + <_> + + <_> + + + + <_>18 0 2 18 -1. + <_>18 0 1 18 2. + 0 + -0.1526580005884171 + -0.9153332710266113 + 2.1413750946521759e-003 + <_> + + <_> + + + + <_>0 0 2 18 -1. + <_>1 0 1 18 2. + 0 + -8.4089813753962517e-003 + 0.1770524978637695 + -0.0437534302473068 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>16 6 2 14 2. + 0 + -0.1667317003011704 + -0.5639045238494873 + 7.5904577970504761e-003 + <_> + + <_> + + + + <_>4 0 8 8 -1. + <_>4 0 4 4 2. + <_>8 4 4 4 2. + 0 + -7.3619261384010315e-003 + -0.1969183981418610 + 0.0396985001862049 + <_> + + <_> + + + + <_>2 0 16 10 -1. + <_>10 0 8 5 2. + <_>2 5 8 5 2. + 0 + -9.9920090287923813e-003 + -0.1341951042413712 + 0.0634891986846924 + <_> + + <_> + + + + <_>3 10 6 7 -1. + <_>6 10 3 7 2. + 0 + -2.2656610235571861e-003 + 0.0796760618686676 + -0.1068596020340920 + <_> + + <_> + + + + <_>1 9 18 5 -1. + <_>7 9 6 5 3. + 0 + -0.1386882066726685 + -0.4730693101882935 + 0.0153541304171085 + <_> + + <_> + + + + <_>0 7 4 9 -1. + <_>2 7 2 9 2. + 0 + -0.1328424066305161 + -0.8798437118530273 + 7.0595988072454929e-003 + <_> + + <_> + + + + <_>14 0 6 16 -1. + <_>14 0 3 16 2. + 0 + -0.0248822998255491 + 0.1333352029323578 + -0.0409336015582085 + <_> + + <_> + + + + <_>0 3 5 9 -1. + <_>0 6 5 3 3. + 0 + -6.6814320161938667e-003 + -0.1029554009437561 + 0.0748700425028801 + <_> + + <_> + + + + <_>11 2 9 12 -1. + <_>11 6 9 4 3. + 0 + 0.0603266991674900 + 0.0133558399975300 + -0.3760299980640411 + <_> + + <_> + + + + <_>0 2 9 12 -1. + <_>0 6 9 4 3. + 0 + -0.0855823010206223 + 0.2120077013969421 + -0.0387420281767845 + <_> + + <_> + + + + <_>8 2 5 12 -1. + <_>8 6 5 4 3. + 0 + -0.0120764002203941 + -0.0824575200676918 + 0.0677804425358772 + <_> + + <_> + + + + <_>5 6 9 9 -1. + <_>5 9 9 3 3. + 0 + 0.0203110892325640 + -0.1181799024343491 + 0.0648305788636208 + <_> + + <_> + + + + <_>0 17 20 2 -1. + <_>0 18 20 1 2. + 0 + -3.9900741539895535e-003 + -0.1572359949350357 + 0.0530339293181896 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + -1.4961370034143329e-003 + 0.2439212948083878 + -0.0311708394438028 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>9 0 2 8 3. + 0 + 1.8568099767435342e-004 + -0.1940955072641373 + 0.0454902090132236 + <_> + + <_> + + + + <_>6 5 8 14 -1. + <_>6 12 8 7 2. + 0 + 0.1479648053646088 + 6.2650348991155624e-003 + -0.9998729825019836 + <_> + + <_> + + + + <_>11 0 9 9 -1. + <_>11 3 9 3 3. + 0 + 0.1691866964101791 + 4.2962608858942986e-004 + -0.3549610078334808 + <_> + + <_> + + + + <_>0 0 9 9 -1. + <_>0 3 9 3 3. + 0 + -1.9380000594537705e-004 + -0.1305679976940155 + 0.0548771694302559 + <_> + + <_> + + + + <_>11 0 4 14 -1. + <_>13 0 2 7 2. + <_>11 7 2 7 2. + 0 + -6.2729098135605454e-004 + 0.0410535708069801 + -0.0831749886274338 + <_> + + <_> + + + + <_>0 1 18 4 -1. + <_>6 1 6 4 3. + 0 + -2.6877908967435360e-003 + 0.1551398932933807 + -0.0555738992989063 + <_> + + <_> + + + + <_>11 0 4 14 -1. + <_>13 0 2 7 2. + <_>11 7 2 7 2. + 0 + -0.0768852531909943 + -0.6144021153450012 + 3.2789220567792654e-003 + <_> + + <_> + + + + <_>4 0 4 14 -1. + <_>4 0 2 7 2. + <_>6 7 2 7 2. + 0 + -1.6956549370661378e-004 + 0.0609341487288475 + -0.1471709012985230 + <_> + + <_> + + + + <_>6 13 10 6 -1. + <_>11 13 5 3 2. + <_>6 16 5 3 2. + 0 + 0.0373908504843712 + 8.8595114648342133e-003 + -0.2384341061115265 + <_> + + <_> + + + + <_>1 8 14 4 -1. + <_>1 8 7 2 2. + <_>8 10 7 2 2. + 0 + -3.7611280567944050e-003 + -0.1189605966210365 + 0.0545266792178154 + <_> + + <_> + + + + <_>11 1 4 9 -1. + <_>11 1 2 9 2. + 0 + -0.0755386725068092 + 1. + -2.8170819859951735e-003 + <_> + + <_> + + + + <_>5 1 4 9 -1. + <_>7 1 2 9 2. + 0 + 5.1163119496777654e-004 + -0.1133382990956307 + 0.0682932510972023 + <_> + + <_> + + + + <_>9 0 6 6 -1. + <_>9 0 3 6 2. + 0 + -0.0543735213577747 + 0.5677248835563660 + -5.5303489789366722e-003 + <_> + + <_> + + + + <_>5 0 6 6 -1. + <_>8 0 3 6 2. + 0 + -0.0122007597237825 + 0.2631076872348785 + -0.0353340692818165 + <_> + + <_> + + + + <_>6 5 8 4 -1. + <_>6 5 4 4 2. + 0 + 0.0653407573699951 + 8.2145677879452705e-003 + -0.9791451096534729 + <_> + + <_> + + + + <_>2 9 12 4 -1. + <_>6 9 4 4 3. + 0 + -0.0970281064510345 + -0.7584530711174011 + 6.8704010918736458e-003 + <_> + + <_> + + + + <_>10 4 3 14 -1. + <_>11 4 1 14 3. + 0 + -0.0497682802379131 + -0.8078631758689880 + 1.3162019895389676e-003 + <_> + + <_> + + + + <_>7 4 3 14 -1. + <_>8 4 1 14 3. + 0 + -2.9802118660882115e-004 + 0.0850996226072311 + -0.0910548269748688 + <_> + + <_> + + + + <_>0 0 20 14 -1. + <_>0 0 10 14 2. + 0 + 0.0101245697587729 + -0.0891725793480873 + 0.0774021893739700 + <_> + + <_> + + + + <_>2 9 16 10 -1. + <_>10 9 8 10 2. + 0 + 8.1574246287345886e-003 + -0.0640160292387009 + 0.1246282979846001 + <_> + + <_> + + + + <_>2 5 16 8 -1. + <_>10 5 8 4 2. + <_>2 9 8 4 2. + 0 + -0.0120939202606678 + -0.1843356043100357 + 0.0496591888368130 + <_> + + <_> + + + + <_>4 2 10 6 -1. + <_>4 4 10 2 3. + 0 + -0.0119069097563624 + 0.2627781033515930 + -0.0299211591482162 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0814384222030640 + -0.6438925266265869 + 0.0172327104955912 + <_> + + <_> + + + + <_>0 9 18 3 -1. + <_>0 10 18 1 3. + 0 + 1.4961180277168751e-003 + -0.1222866028547287 + 0.0577638708055019 + <_> + + <_> + + + + <_>3 11 14 9 -1. + <_>3 14 14 3 3. + 0 + -0.0226512495428324 + -0.1109075993299484 + 0.0703856423497200 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0237897709012032 + 0.2964445054531097 + -0.0259977392852306 + <_> + + <_> + + + + <_>6 15 14 4 -1. + <_>13 15 7 2 2. + <_>6 17 7 2 2. + 0 + 1.4299990143626928e-003 + -0.0897168517112732 + 0.0560308210551739 + <_> + + <_> + + + + <_>3 13 10 6 -1. + <_>3 13 5 3 2. + <_>8 16 5 3 2. + 0 + -0.0415934585034847 + -0.5816047191619873 + 0.0115999300032854 + <_> + + <_> + + + + <_>0 6 20 3 -1. + <_>0 7 20 1 3. + 0 + -2.5586199481040239e-003 + 0.0622414089739323 + -0.1132832989096642 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.1025229021906853 + -0.8518571853637695 + 8.2774916663765907e-003 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -3.1799520365893841e-003 + -0.1391806006431580 + 0.0537192188203335 + <_> + + <_> + + + + <_>0 15 16 3 -1. + <_>0 16 16 1 3. + 0 + -3.9835860952734947e-003 + 0.1553149074316025 + -0.0533990003168583 + <_> + + <_> + + + + <_>2 16 16 4 -1. + <_>10 16 8 2 2. + <_>2 18 8 2 2. + 0 + 0.0108959600329399 + 0.0390849001705647 + -0.2126895934343338 + <_> + + <_> + + + + <_>1 15 13 3 -1. + <_>1 16 13 1 3. + 0 + 0.0178651008754969 + -0.0251462105661631 + 0.3358156085014343 + <_> + + <_> + + + + <_>5 10 12 6 -1. + <_>11 10 6 3 2. + <_>5 13 6 3 2. + 0 + 5.5075511336326599e-003 + 0.0233143102377653 + -0.0936663076281548 + <_> + + <_> + + + + <_>3 10 12 6 -1. + <_>3 10 6 3 2. + <_>9 13 6 3 2. + 0 + 2.0092551130801439e-003 + 0.0572313107550144 + -0.1409174948930740 + <_> + + <_> + + + + <_>7 14 10 6 -1. + <_>12 14 5 3 2. + <_>7 17 5 3 2. + 0 + -0.0122186997905374 + 0.1924355030059815 + -0.0246311090886593 + <_> + + <_> + + + + <_>2 13 7 6 -1. + <_>2 15 7 2 3. + 0 + 1.8039119895547628e-003 + 0.0557931996881962 + -0.1294033974409103 + <_> + + <_> + + + + <_>5 14 14 2 -1. + <_>5 15 14 1 2. + 0 + 0.0221598409116268 + -9.0001197531819344e-003 + 0.5215622186660767 + <_> + + <_> + + + + <_>1 16 18 3 -1. + <_>1 17 18 1 3. + 0 + -0.0358272902667522 + -0.6290597915649414 + 0.0117123899981380 + <_> + + <_> + + + + <_>16 1 4 14 -1. + <_>18 1 2 7 2. + <_>16 8 2 7 2. + 0 + 8.9478418231010437e-003 + -0.0374555811285973 + 0.1090630963444710 + <_> + + <_> + + + + <_>6 5 8 14 -1. + <_>6 12 8 7 2. + 0 + -0.1286190003156662 + -0.3952718079090118 + 0.0181515291333199 + <_> + + <_> + + + + <_>5 14 14 2 -1. + <_>5 15 14 1 2. + 0 + 1.8464029999449849e-003 + -0.0339525304734707 + 0.0965961888432503 + <_> + + <_> + + + + <_>4 10 6 8 -1. + <_>6 10 2 8 3. + 0 + 2.8246780857443810e-003 + -0.0626332610845566 + 0.1119887977838516 + <_> + + <_> + + + + <_>5 4 10 12 -1. + <_>10 4 5 6 2. + <_>5 10 5 6 2. + 0 + 0.0690758526325226 + 0.0135905602946877 + -0.5259826183319092 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + -8.0794151872396469e-003 + 0.1308156996965408 + -0.0501007288694382 + <_> + + <_> + + + + <_>3 13 14 4 -1. + <_>10 13 7 2 2. + <_>3 15 7 2 2. + 0 + -3.7193649914115667e-003 + -0.1488758027553558 + 0.0518234893679619 + <_> + + <_> + + + + <_>5 9 6 10 -1. + <_>5 9 3 5 2. + <_>8 14 3 5 2. + 0 + 2.0610638894140720e-003 + -0.0655459389090538 + 0.1134513020515442 + <_> + + <_> + + + + <_>9 7 6 7 -1. + <_>9 7 3 7 2. + 0 + -0.0607952810823917 + -0.7821925878524780 + 4.5540397986769676e-003 + <_> + + <_> + + + + <_>5 7 6 7 -1. + <_>8 7 3 7 2. + 0 + -7.3096780106425285e-003 + -0.1958681046962738 + 0.0355918705463409 + <_> + + <_> + + + + <_>7 13 8 6 -1. + <_>7 15 8 2 3. + 0 + -2.3796008899807930e-003 + 0.0433299206197262 + -0.0601194202899933 + <_> + + <_> + + + + <_>0 0 8 12 -1. + <_>0 0 4 6 2. + <_>4 6 4 6 2. + 0 + -0.0378744788467884 + 0.1670041978359222 + -0.0410824716091156 + <_> + + <_> + + + + <_>6 8 12 5 -1. + <_>10 8 4 5 3. + 0 + -0.0110115502029657 + -0.0797158032655716 + 0.0322470404207706 + <_> + + <_> + + + + <_>5 9 8 5 -1. + <_>9 9 4 5 2. + 0 + -1.5278880018740892e-003 + 0.0975419133901596 + -0.0946948304772377 + <_> + + <_> + + + + <_>7 5 13 3 -1. + <_>7 6 13 1 3. + 0 + 0.0371444188058376 + -4.4054100289940834e-003 + 0.4415973126888275 + <_> + + <_> + + + + <_>0 5 13 3 -1. + <_>0 6 13 1 3. + 0 + -0.0499489493668079 + -0.8040006160736084 + 9.0302517637610435e-003 + <_> + + <_> + + + + <_>4 0 13 6 -1. + <_>4 2 13 2 3. + 0 + -0.0185588598251343 + 0.1855690032243729 + -0.0266484804451466 + -1.3472950458526611 + 26 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 8 4 -1. + <_>4 2 4 4 2. + 0 + 0.0591064691543579 + -0.1939579993486404 + 0.2727208137512207 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 0.0267840195447207 + -0.4209322929382324 + 0.1233024001121521 + <_> + + <_> + + + + <_>2 12 10 8 -1. + <_>2 12 5 4 2. + <_>7 16 5 4 2. + 0 + 8.6407009512186050e-003 + -0.3023687005043030 + 0.1315350979566574 + <_> + + <_> + + + + <_>9 4 2 14 -1. + <_>9 11 2 7 2. + 0 + -1.1792869772762060e-003 + 0.0827135369181633 + -0.3514054119586945 + <_> + + <_> + + + + <_>4 4 3 10 -1. + <_>4 9 3 5 2. + 0 + -2.2481461055576801e-003 + -0.5132396817207336 + 0.0546146109700203 + <_> + + <_> + + + + <_>3 0 15 2 -1. + <_>3 1 15 1 2. + 0 + 5.7527530007064342e-003 + -0.1924300938844681 + 0.1387203037738800 + <_> + + <_> + + + + <_>0 12 8 6 -1. + <_>0 14 8 2 3. + 0 + 0.0100340200588107 + 0.0607736818492413 + -0.3163137137889862 + <_> + + <_> + + + + <_>4 18 13 2 -1. + <_>4 19 13 1 2. + 0 + -3.2057110220193863e-003 + 0.1347106993198395 + -0.1633301973342896 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 12 8 2 2. + <_>10 14 8 2 2. + 0 + 0.0138036301359534 + 0.0745902881026268 + -0.2775141894817352 + <_> + + <_> + + + + <_>5 3 11 9 -1. + <_>5 6 11 3 3. + 0 + -0.1921301037073135 + 0.2689034044742584 + -0.0665529072284698 + <_> + + <_> + + + + <_>0 2 20 10 -1. + <_>0 7 20 5 2. + 0 + -0.0702798217535019 + -0.3287015855312347 + 0.0499120391905308 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>10 9 4 4 2. + <_>6 13 4 4 2. + 0 + 0.0315196700394154 + 0.0358657017350197 + -0.5048919916152954 + <_> + + <_> + + + + <_>3 8 6 10 -1. + <_>3 8 3 5 2. + <_>6 13 3 5 2. + 0 + -0.0111644202843308 + -0.2742295861244202 + 0.0739491730928421 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 6.1416681855916977e-003 + -0.0879447981715202 + 0.1549274027347565 + <_> + + <_> + + + + <_>1 0 18 6 -1. + <_>7 0 6 6 3. + 0 + 0.2518314123153687 + -0.0936058536171913 + 0.1882757991552353 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0195243991911411 + -0.2873350083827972 + 0.0491477698087692 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -0.0216894894838333 + -0.3341565132141113 + 0.0484509915113449 + <_> + + <_> + + + + <_>6 5 9 5 -1. + <_>9 5 3 5 3. + 0 + 0.0340999104082584 + -0.1477680057287216 + 0.1132235974073410 + <_> + + <_> + + + + <_>1 7 18 4 -1. + <_>1 7 9 2 2. + <_>10 9 9 2 2. + 0 + -0.0203775502741337 + -0.2977840900421143 + 0.0567955411970615 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + 0.0239865407347679 + -0.0551398396492004 + 0.3567248880863190 + <_> + + <_> + + + + <_>7 6 5 8 -1. + <_>7 10 5 4 2. + 0 + -0.0145788900554180 + -0.3359586894512177 + 0.0497763305902481 + <_> + + <_> + + + + <_>4 9 12 4 -1. + <_>8 9 4 4 3. + 0 + -5.4530607303604484e-004 + 0.1490631997585297 + -0.1267461925745010 + <_> + + <_> + + + + <_>4 4 12 8 -1. + <_>8 4 4 8 3. + 0 + 3.0076410621404648e-003 + -0.3865425884723663 + 0.0373385101556778 + <_> + + <_> + + + + <_>12 10 7 4 -1. + <_>12 12 7 2 2. + 0 + 6.1654142336919904e-004 + 0.0703506171703339 + -0.2776953876018524 + <_> + + <_> + + + + <_>4 0 8 8 -1. + <_>4 0 4 4 2. + <_>8 4 4 4 2. + 0 + 0.0514610782265663 + 0.0276138596236706 + -0.4910759031772614 + <_> + + <_> + + + + <_>13 8 7 6 -1. + <_>13 10 7 2 3. + 0 + 0.0556076392531395 + 0.0276269391179085 + -0.2961547970771790 + <_> + + <_> + + + + <_>1 5 12 4 -1. + <_>5 5 4 4 3. + 0 + 0.0297090299427509 + 0.0659616366028786 + -0.2050871998071671 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.0340468287467957 + -0.0389025807380676 + 0.2468100041151047 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + 0.0248078498989344 + 0.0350155197083950 + -0.4140163958072662 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0407481603324413 + 0.0429677292704582 + -0.3204385936260223 + <_> + + <_> + + + + <_>1 3 14 4 -1. + <_>1 3 7 2 2. + <_>8 5 7 2 2. + 0 + 0.0106646595522761 + 0.0569528900086880 + -0.2474599927663803 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + -0.0630903691053391 + 0.1689924001693726 + -0.0186929106712341 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>0 0 4 4 2. + <_>4 4 4 4 2. + 0 + 0.0343711897730827 + -0.0475467517971992 + 0.3278163969516754 + <_> + + <_> + + + + <_>7 12 10 8 -1. + <_>7 16 10 4 2. + 0 + -0.1251811981201172 + -0.5628297924995422 + 0.0137214595451951 + <_> + + <_> + + + + <_>0 11 13 3 -1. + <_>0 12 13 1 3. + 0 + -0.0222737099975348 + 0.2845293879508972 + -0.0473347418010235 + <_> + + <_> + + + + <_>10 12 7 8 -1. + <_>10 16 7 4 2. + 0 + 3.1560619827359915e-003 + 0.0670930668711662 + -0.1577761024236679 + <_> + + <_> + + + + <_>1 2 13 2 -1. + <_>1 3 13 1 2. + 0 + -8.5235182195901871e-003 + -0.4540449082851410 + 0.0302389003336430 + <_> + + <_> + + + + <_>6 15 13 3 -1. + <_>6 16 13 1 3. + 0 + 9.4529008492827415e-003 + -0.0550230406224728 + 0.1402536034584045 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0152680901810527 + -0.4103938937187195 + 0.0331609100103378 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 0.0106658302247524 + -0.1171678006649017 + 0.0959433987736702 + <_> + + <_> + + + + <_>0 15 20 4 -1. + <_>0 15 10 2 2. + <_>10 17 10 2 2. + 0 + -0.0182115696370602 + -0.2485010027885437 + 0.0677136331796646 + <_> + + <_> + + + + <_>4 4 16 4 -1. + <_>4 6 16 2 2. + 0 + 2.9094598721712828e-004 + 0.0499810092151165 + -0.2229803949594498 + <_> + + <_> + + + + <_>7 5 6 11 -1. + <_>9 5 2 11 3. + 0 + 1.2524049961939454e-003 + -0.2356739044189453 + 0.0600581392645836 + <_> + + <_> + + + + <_>11 10 8 10 -1. + <_>15 10 4 5 2. + <_>11 15 4 5 2. + 0 + -0.1020013019442558 + 0.4681766927242279 + -0.0140468701720238 + <_> + + <_> + + + + <_>1 4 10 6 -1. + <_>1 4 5 3 2. + <_>6 7 5 3 2. + 0 + -0.0538033209741116 + -0.3887513875961304 + 0.0385331511497498 + <_> + + <_> + + + + <_>7 7 13 2 -1. + <_>7 8 13 1 2. + 0 + 0.0359198190271854 + 0.0176877491176128 + -0.6314917206764221 + <_> + + <_> + + + + <_>8 6 3 13 -1. + <_>9 6 1 13 3. + 0 + -9.9846003577113152e-003 + 0.2391439974308014 + -0.0584900006651878 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + 0.0221579093486071 + -0.0448142215609550 + 0.1942324042320252 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0142407398670912 + -0.3767049908638001 + 0.0349290482699871 + <_> + + <_> + + + + <_>12 10 8 10 -1. + <_>16 10 4 5 2. + <_>12 15 4 5 2. + 0 + -0.0591504797339439 + 0.1681668013334274 + -0.0352320000529289 + <_> + + <_> + + + + <_>0 11 18 4 -1. + <_>0 11 9 2 2. + <_>9 13 9 2 2. + 0 + 0.0360742285847664 + 0.0228684898465872 + -0.5782889723777771 + <_> + + <_> + + + + <_>12 10 8 10 -1. + <_>16 10 4 5 2. + <_>12 15 4 5 2. + 0 + 0.0576923005282879 + -0.0210031792521477 + 0.3075096905231476 + <_> + + <_> + + + + <_>0 10 8 10 -1. + <_>0 10 4 5 2. + <_>4 15 4 5 2. + 0 + -0.0566193982958794 + 0.2338367998600006 + -0.0550032481551170 + <_> + + <_> + + + + <_>7 6 12 14 -1. + <_>13 6 6 7 2. + <_>7 13 6 7 2. + 0 + -0.0106975696980953 + -0.1323641985654831 + 0.0915368273854256 + <_> + + <_> + + + + <_>1 10 7 4 -1. + <_>1 12 7 2 2. + 0 + 4.2940411367453635e-004 + 0.0523620583117008 + -0.2347017973661423 + <_> + + <_> + + + + <_>12 10 4 7 -1. + <_>12 10 2 7 2. + 0 + 3.9490307681262493e-003 + 0.0585836209356785 + -0.0825335979461670 + <_> + + <_> + + + + <_>0 0 20 2 -1. + <_>10 0 10 2 2. + 0 + 0.0298104304820299 + 0.0716840475797653 + -0.1693128049373627 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -0.0114629101008177 + -0.2641035914421082 + 0.0446875803172588 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 0.0229963902384043 + 0.0329921804368496 + -0.3435899019241333 + <_> + + <_> + + + + <_>12 10 4 7 -1. + <_>12 10 2 7 2. + 0 + -0.0567926093935966 + -0.7576050758361816 + 2.4003670550882816e-003 + <_> + + <_> + + + + <_>4 10 4 7 -1. + <_>6 10 2 7 2. + 0 + -4.4709402136504650e-003 + 0.1627760976552963 + -0.0681930631399155 + <_> + + <_> + + + + <_>12 0 2 14 -1. + <_>12 0 1 14 2. + 0 + -0.0123949898406863 + -0.4360333085060120 + 0.0284161400049925 + <_> + + <_> + + + + <_>4 2 12 17 -1. + <_>10 2 6 17 2. + 0 + 0.2918559014797211 + -0.0333005301654339 + 0.3986696898937225 + <_> + + <_> + + + + <_>12 12 6 7 -1. + <_>12 12 3 7 2. + 0 + 3.3633329439908266e-003 + -0.1097209006547928 + 0.0569312497973442 + <_> + + <_> + + + + <_>1 9 10 10 -1. + <_>6 9 5 10 2. + 0 + -0.0351752601563931 + -0.5721371769905090 + 0.0209034904837608 + <_> + + <_> + + + + <_>4 6 13 3 -1. + <_>4 7 13 1 3. + 0 + -0.0120448395609856 + 0.0910905227065086 + -0.1194794997572899 + <_> + + <_> + + + + <_>7 6 5 9 -1. + <_>7 9 5 3 3. + 0 + 6.5466752275824547e-003 + 0.2251234054565430 + -0.0583094507455826 + <_> + + <_> + + + + <_>9 5 4 14 -1. + <_>11 5 2 7 2. + <_>9 12 2 7 2. + 0 + -3.3635019790381193e-003 + 0.0831234529614449 + -0.1614429950714111 + <_> + + <_> + + + + <_>8 5 4 14 -1. + <_>8 5 2 7 2. + <_>10 12 2 7 2. + 0 + -0.0234512500464916 + 0.2511880993843079 + -0.0480303317308426 + <_> + + <_> + + + + <_>9 3 6 12 -1. + <_>11 3 2 12 3. + 0 + 0.0193560998886824 + 0.0581345893442631 + -0.2079125046730042 + <_> + + <_> + + + + <_>5 3 6 12 -1. + <_>7 3 2 12 3. + 0 + -0.0899949520826340 + -0.7506849169731140 + 0.0141698596999049 + <_> + + <_> + + + + <_>4 10 14 4 -1. + <_>11 10 7 2 2. + <_>4 12 7 2 2. + 0 + 0.0128882601857185 + 0.0337525717914104 + -0.2571501135826111 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 0.0189611706882715 + 0.0347173810005188 + -0.3602784872055054 + <_> + + <_> + + + + <_>7 4 6 7 -1. + <_>9 4 2 7 3. + 0 + -0.0208355505019426 + 0.5785130858421326 + -0.0221113096922636 + <_> + + <_> + + + + <_>1 14 13 2 -1. + <_>1 15 13 1 2. + 0 + 0.0100187798961997 + -0.0397758483886719 + 0.2681483924388886 + <_> + + <_> + + + + <_>7 14 13 2 -1. + <_>7 15 13 1 2. + 0 + -8.7516820058226585e-003 + 0.1125781983137131 + -0.0485382787883282 + <_> + + <_> + + + + <_>4 13 12 4 -1. + <_>4 15 12 2 2. + 0 + -0.0623667500913143 + -0.6608911156654358 + 0.0168521404266357 + <_> + + <_> + + + + <_>12 13 7 4 -1. + <_>12 15 7 2 2. + 0 + -0.0195821803063154 + -0.2118254005908966 + 0.0357029885053635 + <_> + + <_> + + + + <_>1 13 7 4 -1. + <_>1 15 7 2 2. + 0 + 2.2675599902868271e-003 + 0.0612129196524620 + -0.2004884928464890 + <_> + + <_> + + + + <_>10 6 3 14 -1. + <_>11 6 1 14 3. + 0 + -0.0465584583580494 + -0.5645493865013123 + 9.2866625636816025e-003 + <_> + + <_> + + + + <_>7 6 3 14 -1. + <_>8 6 1 14 3. + 0 + -7.7152079902589321e-003 + 0.1503991931676865 + -0.0833281502127647 + <_> + + <_> + + + + <_>8 13 6 7 -1. + <_>10 13 2 7 3. + 0 + 0.0415516681969166 + 0.0262477397918701 + -0.3234752118587494 + <_> + + <_> + + + + <_>2 5 6 10 -1. + <_>2 5 3 5 2. + <_>5 10 3 5 2. + 0 + -0.0217890795320272 + -0.3237582147121429 + 0.0317261889576912 + <_> + + <_> + + + + <_>15 3 3 16 -1. + <_>16 3 1 16 3. + 0 + 1.9698198884725571e-003 + -0.0925642475485802 + 0.1082341000437737 + <_> + + <_> + + + + <_>2 3 3 16 -1. + <_>3 3 1 16 3. + 0 + -5.2744988352060318e-003 + -0.1399033069610596 + 0.0771208778023720 + <_> + + <_> + + + + <_>14 0 6 13 -1. + <_>14 0 3 13 2. + 0 + 0.0560076609253883 + -0.1032849997282028 + 0.1145555973052979 + <_> + + <_> + + + + <_>0 0 6 13 -1. + <_>3 0 3 13 2. + 0 + 0.2274103015661240 + 0.0160284508019686 + -0.6814510822296143 + <_> + + <_> + + + + <_>17 6 3 14 -1. + <_>17 13 3 7 2. + 0 + 0.0513623803853989 + -0.0230258107185364 + 0.1544602960348129 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -0.0130170695483685 + -0.3260639905929565 + 0.0328926108777523 + <_> + + <_> + + + + <_>17 6 3 14 -1. + <_>17 13 3 7 2. + 0 + 0.1578202992677689 + -3.9765262044966221e-003 + 0.7776526212692261 + <_> + + <_> + + + + <_>1 10 10 10 -1. + <_>1 10 5 5 2. + <_>6 15 5 5 2. + 0 + -0.0998050868511200 + 0.6860954165458679 + -0.0146481804549694 + <_> + + <_> + + + + <_>0 0 20 10 -1. + <_>0 5 20 5 2. + 0 + 0.3750635087490082 + 0.0149258002638817 + -0.8310546875000000 + <_> + + <_> + + + + <_>2 8 13 3 -1. + <_>2 9 13 1 3. + 0 + -7.9828302841633558e-004 + -0.2016189992427826 + 0.0478976890444756 + <_> + + <_> + + + + <_>7 6 10 14 -1. + <_>7 13 10 7 2. + 0 + -0.2124160975217819 + -0.3440945148468018 + 0.0109504302963614 + <_> + + <_> + + + + <_>0 7 13 2 -1. + <_>0 8 13 1 2. + 0 + 0.0394516810774803 + 0.0139669599011540 + -0.7216311097145081 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0291855093091726 + -0.2746245861053467 + 0.0354969203472137 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>10 5 2 7 2. + 0 + 0.0270556006580591 + -0.0469957403838634 + 0.2928943037986755 + <_> + + <_> + + + + <_>8 6 6 10 -1. + <_>10 6 2 10 3. + 0 + -0.0260523501783609 + 0.2075203955173492 + -0.0363530814647675 + <_> + + <_> + + + + <_>3 13 14 6 -1. + <_>3 13 7 3 2. + <_>10 16 7 3 2. + 0 + 0.0572162196040154 + 0.0188957396894693 + -0.5714390873908997 + <_> + + <_> + + + + <_>10 1 4 19 -1. + <_>10 1 2 19 2. + 0 + -0.0171518400311470 + -0.3300957083702087 + 0.0385286286473274 + <_> + + <_> + + + + <_>1 10 18 6 -1. + <_>1 12 18 2 3. + 0 + -0.1230439990758896 + -0.7831639051437378 + 0.0116793904453516 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + 0.0567861609160900 + 0.0110638197511435 + -0.5352609753608704 + <_> + + <_> + + + + <_>6 1 4 19 -1. + <_>8 1 2 19 2. + 0 + 0.1194284036755562 + 9.5137851312756538e-003 + -0.9063721895217896 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0677071437239647 + -0.0392275191843510 + 0.2817656099796295 + <_> + + <_> + + + + <_>0 5 20 2 -1. + <_>0 6 20 1 2. + 0 + -0.0549188815057278 + -0.6206169128417969 + 0.0160722695291042 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + 9.2878006398677826e-003 + -0.0503394901752472 + 0.1904010027647018 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0131414895877242 + 0.1862982958555222 + -0.0755285471677780 + <_> + + <_> + + + + <_>12 0 4 7 -1. + <_>12 0 2 7 2. + 0 + 2.9876120970584452e-004 + -0.1616346985101700 + 0.0535895004868507 + <_> + + <_> + + + + <_>0 2 18 8 -1. + <_>6 2 6 8 3. + 0 + 0.1015359982848167 + 0.1845827996730804 + -0.0625706166028976 + <_> + + <_> + + + + <_>10 0 10 9 -1. + <_>10 0 5 9 2. + 0 + 0.2720572948455811 + 0.0137624796479940 + -0.4936406016349793 + <_> + + <_> + + + + <_>0 0 10 9 -1. + <_>5 0 5 9 2. + 0 + 0.0587302111089230 + -0.2393368035554886 + 0.0791668072342873 + <_> + + <_> + + + + <_>12 13 7 6 -1. + <_>12 15 7 2 3. + 0 + 0.0196942593902349 + 0.0371952801942825 + -0.2610926032066345 + <_> + + <_> + + + + <_>1 13 5 6 -1. + <_>1 16 5 3 2. + 0 + -1.0566900164121762e-004 + 0.0670529976487160 + -0.1651581972837448 + <_> + + <_> + + + + <_>8 4 6 10 -1. + <_>11 4 3 5 2. + <_>8 9 3 5 2. + 0 + -0.0197612792253494 + 0.0864436924457550 + -0.0686579719185829 + <_> + + <_> + + + + <_>4 5 12 10 -1. + <_>4 5 6 5 2. + <_>10 10 6 5 2. + 0 + 0.0531685091555119 + 0.0297677908092737 + -0.3522577881813049 + <_> + + <_> + + + + <_>13 9 5 9 -1. + <_>13 12 5 3 3. + 0 + 0.0260710697621107 + 0.0252163596451283 + -0.1415936946868897 + <_> + + <_> + + + + <_>0 0 2 18 -1. + <_>1 0 1 18 2. + 0 + -0.0287206899374723 + 0.3594140112400055 + -0.0291996207088232 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 0.0129892500117421 + 0.0400097705423832 + -0.1997303962707520 + <_> + + <_> + + + + <_>1 12 18 8 -1. + <_>1 12 9 4 2. + <_>10 16 9 4 2. + 0 + -0.0581760406494141 + 0.2934589982032776 + -0.0439675301313400 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>10 10 3 5 2. + <_>7 15 3 5 2. + 0 + 0.0282851401716471 + 0.0374574288725853 + -0.3136174976825714 + <_> + + <_> + + + + <_>4 3 10 4 -1. + <_>4 5 10 2 2. + 0 + 0.0427012182772160 + -0.0209877695888281 + 0.5084577798843384 + <_> + + <_> + + + + <_>6 1 9 6 -1. + <_>6 3 9 2 3. + 0 + 0.0247636009007692 + -0.1186925023794174 + 0.0944573506712914 + <_> + + <_> + + + + <_>5 4 10 10 -1. + <_>5 9 10 5 2. + 0 + -2.8076129965484142e-003 + -0.2324977964162827 + 0.0452227182686329 + <_> + + <_> + + + + <_>8 10 5 8 -1. + <_>8 14 5 4 2. + 0 + -0.0755839198827744 + -0.4590702950954437 + 0.0129322800785303 + <_> + + <_> + + + + <_>3 8 13 10 -1. + <_>3 13 13 5 2. + 0 + 0.0837968215346336 + -0.0158016309142113 + 0.6867048144340515 + <_> + + <_> + + + + <_>12 8 5 12 -1. + <_>12 14 5 6 2. + 0 + -0.0370724014937878 + 0.0541460290551186 + -0.0422074496746063 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0246910694986582 + 0.0260976795107126 + -0.3776040077209473 + <_> + + <_> + + + + <_>12 0 2 17 -1. + <_>12 0 1 17 2. + 0 + -0.0277439299970865 + -0.7863150835037231 + 4.7534159384667873e-003 + <_> + + <_> + + + + <_>6 0 2 17 -1. + <_>7 0 1 17 2. + 0 + 0.0191199705004692 + 0.0264977607876062 + -0.3648996949195862 + <_> + + <_> + + + + <_>0 5 20 6 -1. + <_>0 7 20 2 3. + 0 + 3.3773269969969988e-003 + 0.0319660902023315 + -0.3234676122665405 + <_> + + <_> + + + + <_>0 1 2 13 -1. + <_>1 1 1 13 2. + 0 + 0.0198768191039562 + -0.0351284183561802 + 0.2907829880714417 + <_> + + <_> + + + + <_>12 0 5 15 -1. + <_>12 5 5 5 3. + 0 + 0.1003564000129700 + 0.0146078402176499 + -0.5281224250793457 + <_> + + <_> + + + + <_>3 0 5 15 -1. + <_>3 5 5 5 3. + 0 + -0.0161632895469666 + -0.1015814021229744 + 0.1179649978876114 + <_> + + <_> + + + + <_>10 3 9 4 -1. + <_>10 5 9 2 2. + 0 + 0.0102533800527453 + 0.0360244102776051 + -0.1652078032493591 + <_> + + <_> + + + + <_>3 5 14 2 -1. + <_>3 6 14 1 2. + 0 + 9.0665705502033234e-003 + -0.0347317010164261 + 0.3732720017433167 + <_> + + <_> + + + + <_>3 2 14 6 -1. + <_>10 2 7 3 2. + <_>3 5 7 3 2. + 0 + 0.0301249008625746 + 0.0517584793269634 + -0.2358216047286987 + <_> + + <_> + + + + <_>6 4 8 6 -1. + <_>6 6 8 2 3. + 0 + -6.6870311275124550e-003 + 0.0433942414820194 + -0.2520298957824707 + <_> + + <_> + + + + <_>11 3 4 8 -1. + <_>11 3 2 8 2. + 0 + -2.0257479045540094e-003 + -0.1247901022434235 + 0.0393095314502716 + <_> + + <_> + + + + <_>8 5 3 13 -1. + <_>9 5 1 13 3. + 0 + 0.0232540704309940 + -0.0474469102919102 + 0.2328770011663437 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + 0.0238671991974115 + -0.0274216700345278 + 0.1463097035884857 + <_> + + <_> + + + + <_>5 3 6 7 -1. + <_>7 3 2 7 3. + 0 + -0.0405230000615120 + -0.4047296047210693 + 0.0304159596562386 + <_> + + <_> + + + + <_>2 6 18 5 -1. + <_>8 6 6 5 3. + 0 + 0.1995820999145508 + 0.0220494698733091 + -0.4655848145484924 + <_> + + <_> + + + + <_>6 8 8 4 -1. + <_>10 8 4 4 2. + 0 + -0.0129905901849270 + -0.1797062009572983 + 0.0588749386370182 + <_> + + <_> + + + + <_>8 4 6 10 -1. + <_>11 4 3 5 2. + <_>8 9 3 5 2. + 0 + 0.0256239492446184 + 9.9402610212564468e-003 + -0.2657527923583984 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>4 5 6 3 2. + <_>10 8 6 3 2. + 0 + -0.0320048704743385 + 0.2508738040924072 + -0.0462914705276489 + <_> + + <_> + + + + <_>9 2 6 10 -1. + <_>12 2 3 5 2. + <_>9 7 3 5 2. + 0 + 0.0187584199011326 + -0.0220382306724787 + 0.0944074317812920 + <_> + + <_> + + + + <_>3 12 7 8 -1. + <_>3 16 7 4 2. + 0 + 0.0454256683588028 + 0.0233715698122978 + -0.4839339852333069 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + 0.0156705807894468 + -0.0551098585128784 + 0.1990783065557480 + <_> + + <_> + + + + <_>6 13 6 7 -1. + <_>8 13 2 7 3. + 0 + 0.0513369813561440 + 0.0264254193753004 + -0.4408279061317444 + <_> + + <_> + + + + <_>2 6 18 9 -1. + <_>2 9 18 3 3. + 0 + 0.0408841706812382 + 0.2007120996713638 + -0.0348877795040607 + <_> + + <_> + + + + <_>1 8 16 2 -1. + <_>9 8 8 2 2. + 0 + 0.0691655576229095 + -0.0293033104389906 + 0.3493682146072388 + <_> + + <_> + + + + <_>5 2 11 4 -1. + <_>5 4 11 2 2. + 0 + 0.0479671582579613 + -0.0244169607758522 + 0.2701865136623383 + <_> + + <_> + + + + <_>0 12 10 8 -1. + <_>0 12 5 4 2. + <_>5 16 5 4 2. + 0 + 0.0440684407949448 + -0.0404972694814205 + 0.2438226938247681 + <_> + + <_> + + + + <_>3 1 15 8 -1. + <_>8 1 5 8 3. + 0 + -0.1028755009174347 + 0.7110528945922852 + -9.9055245518684387e-003 + <_> + + <_> + + + + <_>2 1 15 8 -1. + <_>7 1 5 8 3. + 0 + 0.2240774035453796 + -0.0549469999969006 + 0.1985343992710114 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -9.6570551395416260e-003 + -0.2505022883415222 + 0.0374109894037247 + <_> + + <_> + + + + <_>3 4 14 9 -1. + <_>3 7 14 3 3. + 0 + 0.0791997015476227 + -0.0221475698053837 + 0.4877107143402100 + <_> + + <_> + + + + <_>4 3 15 5 -1. + <_>9 3 5 5 3. + 0 + 0.0459831990301609 + 0.0822297334671021 + -0.0393357500433922 + <_> + + <_> + + + + <_>0 2 20 12 -1. + <_>0 8 20 6 2. + 0 + 0.4267044961452484 + 0.0171328000724316 + -0.5399625897407532 + <_> + + <_> + + + + <_>4 1 12 4 -1. + <_>8 1 4 4 3. + 0 + 0.1541399061679840 + 0.0119023500010371 + -0.6853371858596802 + <_> + + <_> + + + + <_>0 2 20 12 -1. + <_>0 8 20 6 2. + 0 + -0.1769988983869553 + -0.6311383247375488 + 0.0125452000647783 + <_> + + <_> + + + + <_>10 11 4 9 -1. + <_>10 11 2 9 2. + 0 + -0.0237698294222355 + -0.1428142935037613 + 0.0142843499779701 + <_> + + <_> + + + + <_>2 1 12 15 -1. + <_>6 1 4 15 3. + 0 + -0.0832902863621712 + 0.3643339872360230 + -0.0252874307334423 + <_> + + <_> + + + + <_>10 9 10 3 -1. + <_>10 9 5 3 2. + 0 + -3.0276349280029535e-003 + -0.1750126034021378 + 0.0355286002159119 + <_> + + <_> + + + + <_>0 9 10 3 -1. + <_>5 9 5 3 2. + 0 + 9.3518232461065054e-004 + -0.3431726992130280 + 0.0281960200518370 + <_> + + <_> + + + + <_>6 1 8 14 -1. + <_>6 8 8 7 2. + 0 + 8.6792530491948128e-003 + 0.0918547883629799 + -0.1134980022907257 + <_> + + <_> + + + + <_>6 6 6 12 -1. + <_>6 6 3 6 2. + <_>9 12 3 6 2. + 0 + -4.3289531022310257e-003 + 0.0765605270862579 + -0.1285037994384766 + <_> + + <_> + + + + <_>10 11 4 9 -1. + <_>10 11 2 9 2. + 0 + 0.0614850893616676 + 4.0065501816570759e-003 + -0.4279873073101044 + <_> + + <_> + + + + <_>6 11 4 9 -1. + <_>8 11 2 9 2. + 0 + -0.0231085699051619 + -0.3299978971481323 + 0.0312281008809805 + <_> + + <_> + + + + <_>8 9 6 5 -1. + <_>8 9 3 5 2. + 0 + -6.3490739557892084e-004 + 0.0533187612891197 + -0.0603079386055470 + <_> + + <_> + + + + <_>6 9 6 5 -1. + <_>9 9 3 5 2. + 0 + -4.1278889402747154e-003 + 0.1502967029809952 + -0.0898057967424393 + <_> + + <_> + + + + <_>6 11 9 6 -1. + <_>9 11 3 6 3. + 0 + 0.1540897041559219 + -2.3309229873120785e-003 + 0.9694647789001465 + <_> + + <_> + + + + <_>5 2 6 10 -1. + <_>5 2 3 5 2. + <_>8 7 3 5 2. + 0 + 0.0180837400257587 + -0.0466745197772980 + 0.2194194942712784 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -0.0600229687988758 + 0.3728309869766235 + -0.0136379403993487 + <_> + + <_> + + + + <_>0 1 9 18 -1. + <_>3 1 3 18 3. + 0 + -0.1602504998445511 + 0.3944236040115356 + -0.0248086098581553 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + -0.0232202000916004 + -0.2835206985473633 + 0.0384564697742462 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + 0.0323538295924664 + 0.0301975402981043 + -0.3537169992923737 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -0.0129307499155402 + -0.1827528029680252 + 0.0402194298803806 + <_> + + <_> + + + + <_>3 1 6 10 -1. + <_>3 1 3 5 2. + <_>6 6 3 5 2. + 0 + -2.9022840317338705e-003 + 0.0575834400951862 + -0.1817508041858673 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + 0.0370424091815948 + 0.0234715696424246 + -0.3722204864025116 + <_> + + <_> + + + + <_>1 3 18 12 -1. + <_>1 3 9 6 2. + <_>10 9 9 6 2. + 0 + -0.1437146067619324 + -0.6735327839851379 + 0.0137684596702456 + <_> + + <_> + + + + <_>7 15 13 3 -1. + <_>7 16 13 1 3. + 0 + -0.0107140997424722 + 0.2307460010051727 + -0.0598985813558102 + <_> + + <_> + + + + <_>1 15 13 3 -1. + <_>1 16 13 1 3. + 0 + 0.0113706998527050 + -0.0558591000735760 + 0.2160415947437286 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -0.0338293500244617 + -0.3286856114864349 + 0.0167437195777893 + <_> + + <_> + + + + <_>2 14 7 6 -1. + <_>2 16 7 2 3. + 0 + 0.0364060588181019 + 0.0235128104686737 + -0.4799953997135162 + <_> + + <_> + + + + <_>6 16 14 4 -1. + <_>13 16 7 2 2. + <_>6 18 7 2 2. + 0 + -0.0398533083498478 + 0.3038840889930725 + -0.0223882105201483 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + 0.0238576401025057 + -0.0439601391553879 + 0.2502183020114899 + <_> + + <_> + + + + <_>10 4 6 8 -1. + <_>12 4 2 8 3. + 0 + -0.0861493274569511 + -0.9264122247695923 + 0.0101808495819569 + <_> + + <_> + + + + <_>6 0 3 13 -1. + <_>7 0 1 13 3. + 0 + -0.0273604597896338 + -0.4533107876777649 + 0.0185172501951456 + <_> + + <_> + + + + <_>11 9 3 10 -1. + <_>11 14 3 5 2. + 0 + 4.6891667880117893e-003 + 0.0149831101298332 + -0.0986908674240112 + <_> + + <_> + + + + <_>1 8 14 3 -1. + <_>1 9 14 1 3. + 0 + 0.0361409597098827 + 0.0212403293699026 + -0.4227561056613922 + <_> + + <_> + + + + <_>4 7 12 6 -1. + <_>4 9 12 2 3. + 0 + 0.1071441993117333 + -0.0415921695530415 + 0.2488086968660355 + <_> + + <_> + + + + <_>6 8 8 9 -1. + <_>6 11 8 3 3. + 0 + -0.0120244501158595 + -0.1890603005886078 + 0.0552909001708031 + <_> + + <_> + + + + <_>4 13 12 4 -1. + <_>4 15 12 2 2. + 0 + 0.0216710902750492 + -0.0371640883386135 + 0.2989633083343506 + <_> + + <_> + + + + <_>1 12 18 2 -1. + <_>1 13 18 1 2. + 0 + -3.3205719664692879e-003 + -0.0918376892805099 + 0.1181083992123604 + <_> + + <_> + + + + <_>11 9 4 8 -1. + <_>11 13 4 4 2. + 0 + -0.0842564031481743 + -0.5493528246879578 + 4.6934271231293678e-003 + <_> + + <_> + + + + <_>5 9 4 8 -1. + <_>5 13 4 4 2. + 0 + -2.7107410132884979e-003 + 0.0523011796176434 + -0.2193256020545960 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>12 6 5 3 2. + <_>7 9 5 3 2. + 0 + -1.9661630503833294e-003 + 0.0695228502154350 + -0.1236959993839264 + <_> + + <_> + + + + <_>5 11 9 6 -1. + <_>8 11 3 6 3. + 0 + 0.1083585992455483 + -0.0160284396260977 + 0.6753829717636108 + <_> + + <_> + + + + <_>4 3 14 2 -1. + <_>4 3 7 2 2. + 0 + -0.0406615696847439 + 0.2823987007141113 + -0.0186430793255568 + <_> + + <_> + + + + <_>2 12 9 6 -1. + <_>5 12 3 6 3. + 0 + 9.4869043678045273e-003 + -0.1420473009347916 + 0.0742181763052940 + <_> + + <_> + + + + <_>14 1 6 12 -1. + <_>17 1 3 6 2. + <_>14 7 3 6 2. + 0 + -8.1196203827857971e-003 + 0.1273310929536820 + -0.0753254294395447 + <_> + + <_> + + + + <_>0 1 6 12 -1. + <_>0 1 3 6 2. + <_>3 7 3 6 2. + 0 + -0.0367189086973667 + 0.2520970106124878 + -0.0386423617601395 + <_> + + <_> + + + + <_>12 0 8 6 -1. + <_>12 2 8 2 3. + 0 + 0.0425158515572548 + 0.0346135087311268 + -0.3140614926815033 + <_> + + <_> + + + + <_>0 16 18 2 -1. + <_>0 17 18 1 2. + 0 + -0.0164842493832111 + -0.3462293148040772 + 0.0264703407883644 + <_> + + <_> + + + + <_>5 16 11 4 -1. + <_>5 18 11 2 2. + 0 + 0.0186085999011993 + 0.0311258397996426 + -0.2383791953325272 + <_> + + <_> + + + + <_>2 16 13 3 -1. + <_>2 17 13 1 3. + 0 + -0.0108720604330301 + 0.2306122034788132 + -0.0434693805873394 + <_> + + <_> + + + + <_>14 9 6 11 -1. + <_>16 9 2 11 3. + 0 + -0.0407280810177326 + 0.1325888037681580 + -0.0388332903385162 + -1.5900419950485229 + 27 + -1 + <_> + + + <_> + + <_> + + + + <_>1 0 18 4 -1. + <_>7 0 6 4 3. + 0 + 0.0278026703745127 + -0.1853515952825546 + 0.2377786040306091 + <_> + + <_> + + + + <_>11 1 8 6 -1. + <_>11 3 8 2 3. + 0 + 1.6392730176448822e-003 + -0.2678762972354889 + 0.1173330992460251 + <_> + + <_> + + + + <_>0 11 13 3 -1. + <_>0 12 13 1 3. + 0 + -3.0419689137488604e-003 + 0.1955285966396332 + -0.1324001997709274 + <_> + + <_> + + + + <_>10 10 4 8 -1. + <_>10 14 4 4 2. + 0 + -2.7744288672693074e-004 + 0.0607018209993839 + -0.3046542108058929 + <_> + + <_> + + + + <_>5 5 9 15 -1. + <_>8 5 3 15 3. + 0 + -2.7942769229412079e-003 + -0.2537094056606293 + 0.0761478468775749 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + 7.4005699716508389e-003 + 0.0656234920024872 + -0.3012852072715759 + <_> + + <_> + + + + <_>0 15 13 2 -1. + <_>0 16 13 1 2. + 0 + 1.1316470336169004e-003 + -0.1323293000459671 + 0.1362251937389374 + <_> + + <_> + + + + <_>11 1 8 6 -1. + <_>11 3 8 2 3. + 0 + -8.7306648492813110e-003 + -0.1024622991681099 + 0.0106498803943396 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -6.4327879808843136e-003 + -0.2130178958177567 + 0.0774253979325294 + <_> + + <_> + + + + <_>9 6 6 7 -1. + <_>11 6 2 7 3. + 0 + -1.3303949963301420e-003 + 0.0962342470884323 + -0.1708600968122482 + <_> + + <_> + + + + <_>5 6 6 7 -1. + <_>7 6 2 7 3. + 0 + -2.3770590778440237e-003 + 0.1165708974003792 + -0.1513576954603195 + <_> + + <_> + + + + <_>6 11 10 6 -1. + <_>11 11 5 3 2. + <_>6 14 5 3 2. + 0 + -5.3865360096096992e-003 + -0.1685196012258530 + 0.0443245582282543 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 10 4 4 2. + <_>10 14 4 4 2. + 0 + -5.6973858736455441e-003 + -0.2470239996910095 + 0.0777353420853615 + <_> + + <_> + + + + <_>11 1 8 6 -1. + <_>11 3 8 2 3. + 0 + 0.0456545203924179 + -0.0166876707226038 + 0.1422211974859238 + <_> + + <_> + + + + <_>4 4 11 10 -1. + <_>4 9 11 5 2. + 0 + -1.4929420103726443e-005 + -0.3272539079189301 + 0.0481421016156673 + <_> + + <_> + + + + <_>11 1 8 6 -1. + <_>11 3 8 2 3. + 0 + -1.7635900294408202e-003 + 0.0701158493757248 + -0.0168644990772009 + <_> + + <_> + + + + <_>1 1 8 6 -1. + <_>1 3 8 2 3. + 0 + 1.9133860478177667e-003 + -0.1957082003355026 + 0.0901691317558289 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 8 4 6 2. + 0 + -1.9309469498693943e-003 + 0.1182428970932961 + -0.1214670985937119 + <_> + + <_> + + + + <_>2 2 16 3 -1. + <_>2 3 16 1 3. + 0 + 9.7775761969387531e-004 + 0.1165720000863075 + -0.1277084946632385 + <_> + + <_> + + + + <_>18 1 2 13 -1. + <_>18 1 1 13 2. + 0 + -5.2643800154328346e-003 + 0.1995836049318314 + -0.0629286766052246 + <_> + + <_> + + + + <_>0 4 5 6 -1. + <_>0 7 5 3 2. + 0 + -2.2730689961463213e-003 + -0.2180469930171967 + 0.0665652900934219 + <_> + + <_> + + + + <_>5 6 13 3 -1. + <_>5 7 13 1 3. + 0 + -3.5128789022564888e-003 + 0.0811142474412918 + -0.1423033028841019 + <_> + + <_> + + + + <_>4 1 6 7 -1. + <_>6 1 2 7 3. + 0 + 2.8102330397814512e-003 + 0.0608847104012966 + -0.2200842946767807 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -0.0232113599777222 + 0.2318225950002670 + -0.0340142808854580 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -8.7068388238549232e-003 + -0.2069126963615418 + 0.0680041164159775 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + 7.0584798231720924e-003 + -0.1050079986453056 + 0.1261018961668015 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0688782408833504 + 0.4268761873245239 + -0.0313056185841560 + <_> + + <_> + + + + <_>7 2 8 8 -1. + <_>11 2 4 4 2. + <_>7 6 4 4 2. + 0 + -0.0127851497381926 + -0.2026803046464920 + 0.0320057906210423 + <_> + + <_> + + + + <_>5 2 8 8 -1. + <_>5 2 4 4 2. + <_>9 6 4 4 2. + 0 + -4.2242300696671009e-003 + -0.2161968946456909 + 0.0756608322262764 + <_> + + <_> + + + + <_>15 3 4 16 -1. + <_>17 3 2 8 2. + <_>15 11 2 8 2. + 0 + -0.0416606403887272 + 0.3560138046741486 + -0.0365009009838104 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 0.0149832395836711 + 0.0336635597050190 + -0.4301668107509613 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + 1.8940219888463616e-003 + -0.0777856409549713 + 0.1413003951311112 + <_> + + <_> + + + + <_>4 10 4 8 -1. + <_>4 14 4 4 2. + 0 + -1.0271830251440406e-003 + 0.0612920485436916 + -0.1856912970542908 + <_> + + <_> + + + + <_>4 14 13 6 -1. + <_>4 16 13 2 3. + 0 + -0.0104917604476213 + -0.2128003984689713 + 0.0466415695846081 + <_> + + <_> + + + + <_>1 14 14 3 -1. + <_>1 15 14 1 3. + 0 + 4.1263508610427380e-003 + -0.0631134733557701 + 0.2168339937925339 + <_> + + <_> + + + + <_>18 1 2 13 -1. + <_>18 1 1 13 2. + 0 + 0.0212845299392939 + -0.0195413809269667 + 0.4055550098419190 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>1 1 9 2 2. + <_>10 3 9 2 2. + 0 + 6.0370927676558495e-003 + 0.0613228008151054 + -0.1755875051021576 + <_> + + <_> + + + + <_>18 1 2 13 -1. + <_>18 1 1 13 2. + 0 + 2.8550080023705959e-003 + -0.0374029688537121 + 0.0867943763732910 + <_> + + <_> + + + + <_>0 1 2 13 -1. + <_>1 1 1 13 2. + 0 + -0.0308392997831106 + 0.4582639932632446 + -0.0228243190795183 + <_> + + <_> + + + + <_>2 0 18 2 -1. + <_>2 0 9 2 2. + 0 + -0.0126646403223276 + -0.1517917960882187 + 0.0383259095251560 + <_> + + <_> + + + + <_>0 0 6 12 -1. + <_>2 0 2 12 3. + 0 + 8.4788333624601364e-003 + -0.0791644528508186 + 0.1382130980491638 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + -9.0271160006523132e-003 + 0.2048342972993851 + -0.0584282390773296 + <_> + + <_> + + + + <_>0 2 5 6 -1. + <_>0 5 5 3 2. + 0 + -5.3999028168618679e-003 + -0.1956387013196945 + 0.0628818199038506 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 4.8698568716645241e-003 + 0.0472694486379623 + -0.2035723030567169 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + -5.6715728715062141e-003 + 0.1623262017965317 + -0.0724731832742691 + <_> + + <_> + + + + <_>7 15 7 4 -1. + <_>7 17 7 2 2. + 0 + -6.3621107256039977e-004 + -0.1764882951974869 + 0.0615539290010929 + <_> + + <_> + + + + <_>1 0 13 3 -1. + <_>1 1 13 1 3. + 0 + -5.7404721155762672e-003 + -0.2377389073371887 + 0.0484930910170078 + <_> + + <_> + + + + <_>9 0 9 6 -1. + <_>9 2 9 2 3. + 0 + 2.3313059937208891e-003 + -0.0980874672532082 + 0.0767057314515114 + <_> + + <_> + + + + <_>0 4 14 3 -1. + <_>0 5 14 1 3. + 0 + 2.6579289697110653e-003 + -0.1042959019541740 + 0.1327544003725052 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0124264899641275 + -0.1768611967563629 + 0.0787978619337082 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + 3.7596069741994143e-003 + 0.0580285005271435 + -0.2023569941520691 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + -0.0139418197795749 + 0.2936562895774841 + -0.0310690291225910 + <_> + + <_> + + + + <_>5 0 8 8 -1. + <_>5 4 8 4 2. + 0 + 0.0246055293828249 + -0.0497678406536579 + 0.2044660001993179 + <_> + + <_> + + + + <_>9 0 9 6 -1. + <_>9 2 9 2 3. + 0 + 0.1157227978110313 + 5.7542040012776852e-003 + -0.5578920841217041 + <_> + + <_> + + + + <_>2 0 9 6 -1. + <_>2 2 9 2 3. + 0 + 1.4880299568176270e-003 + -0.1287049949169159 + 0.0861913636326790 + <_> + + <_> + + + + <_>6 6 14 4 -1. + <_>13 6 7 2 2. + <_>6 8 7 2 2. + 0 + -0.0100858695805073 + -0.1871802955865860 + 0.0271437894552946 + <_> + + <_> + + + + <_>1 7 16 4 -1. + <_>1 7 8 2 2. + <_>9 9 8 2 2. + 0 + -4.0125781670212746e-003 + -0.1484356969594955 + 0.0614823512732983 + <_> + + <_> + + + + <_>8 7 4 7 -1. + <_>8 7 2 7 2. + 0 + 0.0452412888407707 + -0.0221871994435787 + 0.4902274906635284 + <_> + + <_> + + + + <_>5 9 9 5 -1. + <_>8 9 3 5 3. + 0 + -5.4588477360084653e-004 + 0.1074075028300285 + -0.0947847515344620 + <_> + + <_> + + + + <_>4 5 12 4 -1. + <_>8 5 4 4 3. + 0 + 0.0108221098780632 + -0.1182013973593712 + 0.0840096473693848 + <_> + + <_> + + + + <_>1 0 6 13 -1. + <_>3 0 2 13 3. + 0 + 6.4339267555624247e-004 + -0.1107214987277985 + 0.0841263979673386 + <_> + + <_> + + + + <_>16 7 4 11 -1. + <_>16 7 2 11 2. + 0 + 0.0935449898242950 + 6.1726439744234085e-003 + -0.3812153041362763 + <_> + + <_> + + + + <_>0 7 4 11 -1. + <_>2 7 2 11 2. + 0 + -3.9214221760630608e-003 + 0.1296992003917694 + -0.0755300298333168 + <_> + + <_> + + + + <_>8 6 4 8 -1. + <_>8 10 4 4 2. + 0 + -4.5141312293708324e-003 + -0.2122250944375992 + 0.0509413518011570 + <_> + + <_> + + + + <_>0 10 20 3 -1. + <_>0 11 20 1 3. + 0 + 0.0515638701617718 + 0.0112159997224808 + -0.8412504792213440 + <_> + + <_> + + + + <_>11 13 8 6 -1. + <_>11 15 8 2 3. + 0 + -0.0370868295431137 + -0.3344379067420960 + 0.0121983503922820 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + -1.5274320030584931e-003 + 0.1702284961938858 + -0.0531711094081402 + <_> + + <_> + + + + <_>9 6 3 13 -1. + <_>10 6 1 13 3. + 0 + -3.3183719497174025e-003 + 0.1497268974781036 + -0.0395227000117302 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>9 10 2 10 3. + 0 + -0.0106951398774982 + -0.2076769024133682 + 0.0482235401868820 + <_> + + <_> + + + + <_>16 0 4 18 -1. + <_>16 0 2 18 2. + 0 + 8.0909933894872665e-003 + -0.0555725693702698 + 0.0813619419932365 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + 8.9193560415878892e-004 + -0.1488822996616364 + 0.0569740198552608 + <_> + + <_> + + + + <_>4 9 13 3 -1. + <_>4 10 13 1 3. + 0 + 2.1180939802434295e-004 + -0.1877689063549042 + 0.0450870804488659 + <_> + + <_> + + + + <_>0 0 4 19 -1. + <_>2 0 2 19 2. + 0 + 6.8865409120917320e-003 + -0.0746515393257141 + 0.1180645972490311 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.3800981938838959 + 9.6241412684321404e-003 + -0.5025712847709656 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.0948449000716209 + 0.0202841106802225 + -0.3947888016700745 + <_> + + <_> + + + + <_>8 5 10 10 -1. + <_>13 5 5 5 2. + <_>8 10 5 5 2. + 0 + -1.1133160296594724e-004 + 0.0537170283496380 + -0.1543323993682861 + <_> + + <_> + + + + <_>1 8 6 12 -1. + <_>1 8 3 6 2. + <_>4 14 3 6 2. + 0 + 0.0359116308391094 + -0.0243740491569042 + 0.3507775962352753 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0292917806655169 + -0.4900273978710175 + 0.0216948408633471 + <_> + + <_> + + + + <_>4 1 3 13 -1. + <_>5 1 1 13 3. + 0 + -0.0242771897464991 + -0.5020691156387329 + 0.0158074200153351 + <_> + + <_> + + + + <_>4 4 13 3 -1. + <_>4 5 13 1 3. + 0 + 0.0126201100647449 + -0.0486378483474255 + 0.2137005031108856 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -4.1045118123292923e-003 + -0.1675793975591660 + 0.0626759231090546 + <_> + + <_> + + + + <_>5 1 10 19 -1. + <_>5 1 5 19 2. + 0 + -0.2347716987133026 + 0.6220551133155823 + -0.0139493197202683 + <_> + + <_> + + + + <_>1 13 8 6 -1. + <_>1 15 8 2 3. + 0 + -0.0679142475128174 + -0.9701414108276367 + 0.0104904603213072 + <_> + + <_> + + + + <_>4 5 13 3 -1. + <_>4 6 13 1 3. + 0 + 1.4207609929144382e-003 + -0.0608011186122894 + 0.1350073963403702 + <_> + + <_> + + + + <_>0 6 14 4 -1. + <_>0 6 7 2 2. + <_>7 8 7 2 2. + 0 + -5.0894408486783504e-003 + -0.1699216961860657 + 0.0507956705987453 + <_> + + <_> + + + + <_>14 3 6 16 -1. + <_>17 3 3 8 2. + <_>14 11 3 8 2. + 0 + -0.0192268006503582 + 0.0988611727952957 + -0.0336862206459045 + <_> + + <_> + + + + <_>1 4 18 10 -1. + <_>1 4 9 5 2. + <_>10 9 9 5 2. + 0 + 0.0105905402451754 + 0.0596169009804726 + -0.1649544984102249 + <_> + + <_> + + + + <_>14 2 6 16 -1. + <_>17 2 3 8 2. + <_>14 10 3 8 2. + 0 + 3.3726880792528391e-003 + -0.0386523418128490 + 0.0554005689918995 + <_> + + <_> + + + + <_>0 2 6 16 -1. + <_>0 2 3 8 2. + <_>3 10 3 8 2. + 0 + -0.0890128016471863 + 0.4075050950050354 + -0.0241503305733204 + <_> + + <_> + + + + <_>14 8 6 12 -1. + <_>14 8 3 12 2. + 0 + -0.2335907965898514 + -0.7264190912246704 + 6.5185138955712318e-003 + <_> + + <_> + + + + <_>0 8 6 12 -1. + <_>3 8 3 12 2. + 0 + -0.2273225933313370 + -0.8997700810432434 + 9.1146891936659813e-003 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0296017695218325 + -0.4327085018157959 + 0.0160211902111769 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -6.9494689814746380e-003 + 0.1521899998188019 + -0.0618968307971954 + <_> + + <_> + + + + <_>8 2 4 8 -1. + <_>8 6 4 4 2. + 0 + -1.9150479929521680e-003 + 0.0725705474615097 + -0.1312108933925629 + <_> + + <_> + + + + <_>0 12 8 8 -1. + <_>4 12 4 8 2. + 0 + 8.5106380283832550e-003 + -0.0573260895907879 + 0.1574310064315796 + <_> + + <_> + + + + <_>2 4 18 16 -1. + <_>8 4 6 16 3. + 0 + -0.0243631396442652 + 0.0957008227705956 + -0.0583644285798073 + <_> + + <_> + + + + <_>5 7 4 7 -1. + <_>7 7 2 7 2. + 0 + -0.0225226599723101 + -0.4694313108921051 + 0.0202413592487574 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -4.4660381972789764e-003 + 0.0762111097574234 + -0.0818446576595306 + <_> + + <_> + + + + <_>5 6 8 4 -1. + <_>9 6 4 4 2. + 0 + -4.2101819999516010e-003 + -0.2208358943462372 + 0.0470101982355118 + <_> + + <_> + + + + <_>7 2 10 4 -1. + <_>7 2 5 4 2. + 0 + 5.7130381464958191e-003 + -0.0622540004551411 + 0.0527058206498623 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + -5.6021669879555702e-003 + -0.1898576021194458 + 0.0501148216426373 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -0.0220420695841312 + 0.0876837521791458 + -0.0247771795839071 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -2.1817081142216921e-003 + 0.1676660031080246 + -0.0667717605829239 + <_> + + <_> + + + + <_>1 14 18 6 -1. + <_>1 16 18 2 3. + 0 + 0.0245453007519245 + 0.0492051206529140 + -0.2250372022390366 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>10 0 1 13 2. + 0 + -2.4728688877075911e-003 + 0.1353967040777206 + -0.0623301304876804 + <_> + + <_> + + + + <_>1 1 19 3 -1. + <_>1 2 19 1 3. + 0 + 2.3717728909105062e-003 + 0.0579260587692261 + -0.1332525014877319 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -0.0389996618032455 + 0.2987548112869263 + -0.0302572399377823 + <_> + + <_> + + + + <_>4 18 13 2 -1. + <_>4 19 13 1 2. + 0 + -1.7835620092228055e-003 + 0.0926802828907967 + -0.0743505880236626 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 0.0199844501912594 + 0.0224093496799469 + -0.4150193929672241 + <_> + + <_> + + + + <_>13 10 7 6 -1. + <_>13 12 7 2 3. + 0 + 4.1170548647642136e-003 + 0.0534322783350945 + -0.1509225964546204 + <_> + + <_> + + + + <_>0 10 7 6 -1. + <_>0 12 7 2 3. + 0 + 0.0439956001937389 + 0.0113898897543550 + -0.6649451851844788 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + -3.5350578837096691e-003 + 0.1100559011101723 + -0.0763770565390587 + <_> + + <_> + + + + <_>3 14 13 3 -1. + <_>3 15 13 1 3. + 0 + 1.4632029924541712e-003 + -0.0569621510803699 + 0.1318459957838059 + <_> + + <_> + + + + <_>1 15 18 4 -1. + <_>10 15 9 2 2. + <_>1 17 9 2 2. + 0 + -4.9925539642572403e-003 + -0.1467507034540176 + 0.0551299788057804 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>4 10 2 10 3. + 0 + -0.0786464288830757 + -0.5276818275451660 + 0.0136627396568656 + <_> + + <_> + + + + <_>11 14 9 6 -1. + <_>14 14 3 6 3. + 0 + -4.3559111654758453e-003 + 0.0917981192469597 + -0.0575981698930264 + <_> + + <_> + + + + <_>4 10 12 10 -1. + <_>10 10 6 10 2. + 0 + 8.2531487569212914e-003 + -0.0656139776110649 + 0.1308307051658630 + <_> + + <_> + + + + <_>6 6 8 7 -1. + <_>6 6 4 7 2. + 0 + -3.5033349413424730e-003 + -0.1274259984493256 + 0.0608751699328423 + <_> + + <_> + + + + <_>8 4 4 7 -1. + <_>10 4 2 7 2. + 0 + 3.9662471972405910e-003 + -0.0557151511311531 + 0.1478324979543686 + <_> + + <_> + + + + <_>9 0 3 15 -1. + <_>9 5 3 5 3. + 0 + -0.0102602196857333 + -0.1347229033708572 + 0.0445143505930901 + <_> + + <_> + + + + <_>2 7 12 12 -1. + <_>2 11 12 4 3. + 0 + 3.6724930396303535e-004 + -0.1372770071029663 + 0.0611796490848064 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 2 12 2 3. + 0 + 0.0195001997053623 + -0.0590333305299282 + 0.1558932065963745 + <_> + + <_> + + + + <_>5 10 9 9 -1. + <_>5 13 9 3 3. + 0 + 0.0140414200723171 + 0.0221404395997524 + -0.4283109009265900 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0384597405791283 + 0.0168757308274508 + -0.5242574214935303 + <_> + + <_> + + + + <_>0 12 8 8 -1. + <_>0 12 4 4 2. + <_>4 16 4 4 2. + 0 + -0.0259015392512083 + 0.2516309916973114 + -0.0325795114040375 + <_> + + <_> + + + + <_>14 11 6 9 -1. + <_>14 14 6 3 3. + 0 + 0.0282644797116518 + 0.0212977193295956 + -0.2397830933332443 + <_> + + <_> + + + + <_>5 1 7 6 -1. + <_>5 3 7 2 3. + 0 + -0.0530678816139698 + 0.7659469246864319 + -0.0101632401347160 + <_> + + <_> + + + + <_>9 5 3 14 -1. + <_>9 12 3 7 2. + 0 + 1.6842440236359835e-003 + 0.0401687286794186 + -0.2181098014116287 + <_> + + <_> + + + + <_>8 9 4 8 -1. + <_>8 13 4 4 2. + 0 + 6.5255112713202834e-004 + -0.0321552492678165 + 0.2602804899215698 + <_> + + <_> + + + + <_>7 5 6 14 -1. + <_>7 12 6 7 2. + 0 + -0.1538109928369522 + -0.7957018017768860 + 9.9420538172125816e-003 + <_> + + <_> + + + + <_>4 9 4 8 -1. + <_>6 9 2 8 2. + 0 + -1.7530319746583700e-004 + 0.0612571612000465 + -0.1183089017868042 + <_> + + <_> + + + + <_>12 9 6 9 -1. + <_>14 9 2 9 3. + 0 + 1.1829809518530965e-003 + -0.0825895294547081 + 0.0582347586750984 + <_> + + <_> + + + + <_>2 9 6 9 -1. + <_>4 9 2 9 3. + 0 + 0.0147538902238011 + 0.0467287786304951 + -0.1987434029579163 + <_> + + <_> + + + + <_>4 16 15 4 -1. + <_>9 16 5 4 3. + 0 + 0.0105925798416138 + -0.0571571588516235 + 0.1226172968745232 + <_> + + <_> + + + + <_>3 2 10 4 -1. + <_>8 2 5 4 2. + 0 + -0.0466389693319798 + 0.3922199904918671 + -0.0187704507261515 + <_> + + <_> + + + + <_>10 0 4 12 -1. + <_>10 0 2 12 2. + 0 + -2.2761020809412003e-003 + -0.1981981992721558 + 0.0326699502766132 + <_> + + <_> + + + + <_>6 0 4 12 -1. + <_>8 0 2 12 2. + 0 + -8.9252636826131493e-005 + -0.1779569983482361 + 0.0450881607830524 + <_> + + <_> + + + + <_>7 4 6 7 -1. + <_>9 4 2 7 3. + 0 + -4.8888921737670898e-003 + 0.3797332942485809 + -0.0256225001066923 + <_> + + <_> + + + + <_>5 2 3 13 -1. + <_>6 2 1 13 3. + 0 + -4.7039450146257877e-003 + -0.1407544016838074 + 0.0518858693540096 + <_> + + <_> + + + + <_>12 5 5 9 -1. + <_>12 8 5 3 3. + 0 + 6.8887867964804173e-003 + -0.0607079006731510 + 0.0673187822103500 + <_> + + <_> + + + + <_>5 6 9 12 -1. + <_>5 10 9 4 3. + 0 + 0.0944499671459198 + -0.0439751595258713 + 0.1688583046197891 + <_> + + <_> + + + + <_>9 0 4 20 -1. + <_>11 0 2 10 2. + <_>9 10 2 10 2. + 0 + 0.0515206716954708 + 3.8239071145653725e-003 + -0.6307771205902100 + <_> + + <_> + + + + <_>8 0 4 16 -1. + <_>8 0 2 8 2. + <_>10 8 2 8 2. + 0 + 6.3957129605114460e-003 + 0.0440943092107773 + -0.1815602034330368 + <_> + + <_> + + + + <_>2 9 18 11 -1. + <_>8 9 6 11 3. + 0 + -0.0496592707931995 + 0.1117423996329308 + -0.0558212101459503 + <_> + + <_> + + + + <_>0 11 6 9 -1. + <_>0 14 6 3 3. + 0 + -6.9081829860806465e-003 + -0.1403895020484924 + 0.0595357604324818 + <_> + + <_> + + + + <_>13 6 6 12 -1. + <_>13 6 3 12 2. + 0 + 9.2546567320823669e-003 + -0.0335879102349281 + 0.0585931017994881 + <_> + + <_> + + + + <_>6 12 8 8 -1. + <_>6 12 4 4 2. + <_>10 16 4 4 2. + 0 + 5.0454521551728249e-003 + 0.0537776611745358 + -0.1362603008747101 + <_> + + <_> + + + + <_>1 9 18 8 -1. + <_>10 9 9 4 2. + <_>1 13 9 4 2. + 0 + -0.0333334207534790 + 0.2464126944541931 + -0.0318886786699295 + <_> + + <_> + + + + <_>2 8 12 4 -1. + <_>6 8 4 4 3. + 0 + 0.0612010806798935 + 0.0200130306184292 + -0.3932656943798065 + <_> + + <_> + + + + <_>13 6 6 12 -1. + <_>13 6 3 12 2. + 0 + -0.0101751200854778 + 0.0753246024250984 + -0.0396225489675999 + <_> + + <_> + + + + <_>1 6 6 12 -1. + <_>4 6 3 12 2. + 0 + 0.0102713704109192 + -0.0522345192730427 + 0.1793947070837021 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0513378605246544 + -0.3109723925590515 + 0.0216564703732729 + <_> + + <_> + + + + <_>3 15 13 3 -1. + <_>3 16 13 1 3. + 0 + 2.3615739773958921e-003 + -0.0648433193564415 + 0.1177197992801666 + <_> + + <_> + + + + <_>7 15 13 3 -1. + <_>7 16 13 1 3. + 0 + -2.7691819705069065e-003 + 0.1468258947134018 + -0.0577945187687874 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + 0.0214578099548817 + 0.0252693500369787 + -0.3340482115745544 + <_> + + <_> + + + + <_>17 0 3 14 -1. + <_>18 0 1 14 3. + 0 + -5.9619098901748657e-003 + 0.0992413386702538 + -0.0353719592094421 + <_> + + <_> + + + + <_>0 0 20 16 -1. + <_>0 8 20 8 2. + 0 + 0.7521739006042481 + 7.7095897868275642e-003 + -0.8643410801887512 + <_> + + <_> + + + + <_>6 3 14 4 -1. + <_>13 3 7 2 2. + <_>6 5 7 2 2. + 0 + -9.2514551943168044e-004 + 0.0382519103586674 + -0.0755976289510727 + <_> + + <_> + + + + <_>0 2 20 6 -1. + <_>0 2 10 3 2. + <_>10 5 10 3 2. + 0 + 4.0818289853632450e-003 + 0.0666991397738457 + -0.1128949970006943 + <_> + + <_> + + + + <_>17 0 3 14 -1. + <_>18 0 1 14 3. + 0 + 0.0162560101598501 + -0.0187829006463289 + 0.1887574940919876 + <_> + + <_> + + + + <_>5 9 4 9 -1. + <_>7 9 2 9 2. + 0 + -9.3405954539775848e-003 + -0.1646234989166260 + 0.0468597188591957 + <_> + + <_> + + + + <_>11 11 4 7 -1. + <_>11 11 2 7 2. + 0 + -3.8136378861963749e-004 + 0.0604981705546379 + -0.1008936017751694 + <_> + + <_> + + + + <_>5 7 6 10 -1. + <_>7 7 2 10 3. + 0 + -0.0234709605574608 + 0.1854676008224487 + -0.0395773015916348 + <_> + + <_> + + + + <_>0 7 20 2 -1. + <_>0 7 10 2 2. + 0 + -0.0786843523383141 + -0.6054000854492188 + 0.0131629798561335 + <_> + + <_> + + + + <_>3 4 14 12 -1. + <_>3 4 7 6 2. + <_>10 10 7 6 2. + 0 + 0.1061614006757736 + 9.4080185517668724e-003 + -0.7241687774658203 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + -0.0692113786935806 + -0.9281964898109436 + 5.4140980355441570e-003 + <_> + + <_> + + + + <_>6 5 6 8 -1. + <_>8 5 2 8 3. + 0 + -0.0438282899558544 + 0.5493376851081848 + -0.0155168296769261 + <_> + + <_> + + + + <_>11 5 4 10 -1. + <_>11 5 2 10 2. + 0 + 5.6881271302700043e-003 + 0.0373288616538048 + -0.1201948001980782 + <_> + + <_> + + + + <_>1 2 18 14 -1. + <_>7 2 6 14 3. + 0 + 0.3693388104438782 + -9.9545158445835114e-003 + 0.8160753846168518 + <_> + + <_> + + + + <_>3 3 14 8 -1. + <_>10 3 7 4 2. + <_>3 7 7 4 2. + 0 + -0.0104475198313594 + 0.1419049948453903 + -0.0497983992099762 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + 0.0151513200253248 + 0.0227053202688694 + -0.3452369868755341 + <_> + + <_> + + + + <_>5 9 10 11 -1. + <_>5 9 5 11 2. + 0 + 0.1250385046005249 + -0.0271509103477001 + 0.3037905097007752 + <_> + + <_> + + + + <_>5 7 10 8 -1. + <_>5 7 5 4 2. + <_>10 11 5 4 2. + 0 + -9.1995187103748322e-003 + -0.1702055931091309 + 0.0443142987787724 + <_> + + <_> + + + + <_>16 0 4 16 -1. + <_>16 8 4 8 2. + 0 + 7.1795531548559666e-003 + -0.0789717882871628 + 0.0639191567897797 + <_> + + <_> + + + + <_>1 4 18 4 -1. + <_>10 4 9 4 2. + 0 + -0.1821783035993576 + -0.9759889245033264 + 7.1003441698849201e-003 + <_> + + <_> + + + + <_>4 10 14 3 -1. + <_>4 11 14 1 3. + 0 + 1.5047369743115269e-005 + -0.0989603772759438 + 0.0393710993230343 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0387634001672268 + -0.5909513831138611 + 0.0104290395975113 + <_> + + <_> + + + + <_>17 0 3 14 -1. + <_>18 0 1 14 3. + 0 + -0.0437998808920383 + 0.2529020905494690 + -9.5704924315214157e-003 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + -0.0567055195569992 + -0.7246677279472351 + 9.0332692489027977e-003 + <_> + + <_> + + + + <_>13 1 6 10 -1. + <_>16 1 3 5 2. + <_>13 6 3 5 2. + 0 + 0.0751839280128479 + -6.7565650679171085e-003 + 0.7307543754577637 + <_> + + <_> + + + + <_>1 1 6 10 -1. + <_>1 1 3 5 2. + <_>4 6 3 5 2. + 0 + -6.4183590002357960e-003 + 0.0854218304157257 + -0.0760568827390671 + <_> + + <_> + + + + <_>3 2 14 3 -1. + <_>3 3 14 1 3. + 0 + 1.3349299551919103e-003 + 0.0699776634573936 + -0.0921879187226295 + <_> + + <_> + + + + <_>3 12 13 3 -1. + <_>3 13 13 1 3. + 0 + 2.8028399683535099e-003 + -0.0509531982243061 + 0.1293468028306961 + <_> + + <_> + + + + <_>11 4 8 8 -1. + <_>15 4 4 4 2. + <_>11 8 4 4 2. + 0 + -0.0641968995332718 + -0.6175134181976318 + 8.7323756888508797e-003 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + 1.7879910301417112e-003 + -0.0594454295933247 + 0.1132500991225243 + <_> + + <_> + + + + <_>11 11 7 4 -1. + <_>11 13 7 2 2. + 0 + 2.3370790295302868e-003 + 0.0226433202624321 + -0.1742707043886185 + <_> + + <_> + + + + <_>0 14 14 2 -1. + <_>0 15 14 1 2. + 0 + 2.1500359289348125e-003 + -0.0518462583422661 + 0.1502798944711685 + <_> + + <_> + + + + <_>11 4 8 8 -1. + <_>15 4 4 4 2. + <_>11 8 4 4 2. + 0 + -0.0297449491918087 + -0.1723556071519852 + 0.0161605402827263 + <_> + + <_> + + + + <_>0 9 5 9 -1. + <_>0 12 5 3 3. + 0 + -2.9182229191064835e-003 + -0.1164601966738701 + 0.0533809401094913 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + -5.2581899799406528e-003 + -0.0842621028423309 + 0.0368803516030312 + <_> + + <_> + + + + <_>3 5 5 9 -1. + <_>3 8 5 3 3. + 0 + 0.0203024893999100 + -0.0532972291111946 + 0.1694989055395126 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 3.1120770145207644e-003 + 0.0446304306387901 + -0.1405466049909592 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + -0.0775247365236282 + -0.6503828167915344 + 0.0104688899591565 + <_> + + <_> + + + + <_>10 0 10 8 -1. + <_>15 0 5 4 2. + <_>10 4 5 4 2. + 0 + 0.0209784507751465 + -0.0300015695393085 + 0.1923335045576096 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 2.0581670105457306e-003 + 0.0515354312956333 + -0.1311402022838593 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + -7.8407032415270805e-003 + -0.1388293951749802 + 0.0506579317152500 + <_> + + <_> + + + + <_>0 4 12 9 -1. + <_>0 7 12 3 3. + 0 + -0.0718947499990463 + 0.2186698019504547 + -0.0336151905357838 + <_> + + <_> + + + + <_>0 7 20 4 -1. + <_>0 9 20 2 2. + 0 + 0.1421850025653839 + 0.0128802200779319 + -0.5885351896286011 + <_> + + <_> + + + + <_>5 2 10 4 -1. + <_>10 2 5 4 2. + 0 + 4.4800378382205963e-003 + -0.0555220395326614 + 0.1197623014450073 + <_> + + <_> + + + + <_>11 11 4 7 -1. + <_>11 11 2 7 2. + 0 + -9.4673000276088715e-003 + -0.1203638017177582 + 0.0302323605865240 + <_> + + <_> + + + + <_>6 12 4 7 -1. + <_>8 12 2 7 2. + 0 + -1.2275399640202522e-003 + 0.0835638269782066 + -0.0870467200875282 + <_> + + <_> + + + + <_>11 13 9 7 -1. + <_>14 13 3 7 3. + 0 + -6.2556960619986057e-003 + 0.0693551376461983 + -0.0351463407278061 + <_> + + <_> + + + + <_>4 15 12 5 -1. + <_>10 15 6 5 2. + 0 + 0.0649539008736610 + -0.0192965101450682 + 0.3489815890789032 + <_> + + <_> + + + + <_>8 9 4 8 -1. + <_>8 9 2 8 2. + 0 + -3.2067541033029556e-003 + -0.1520569026470184 + 0.0558979287743568 + <_> + + <_> + + + + <_>5 11 6 7 -1. + <_>7 11 2 7 3. + 0 + -0.0482600890100002 + -0.6030963063240051 + 0.0104638598859310 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>8 5 3 7 2. + 0 + -4.2638331651687622e-003 + -0.1527829021215439 + 0.0184243191033602 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>9 5 3 7 2. + 0 + 0.0493636913597584 + -0.0254420097917318 + 0.3922775983810425 + <_> + + <_> + + + + <_>2 6 16 9 -1. + <_>2 9 16 3 3. + 0 + 2.3624610621482134e-003 + 0.3851962089538574 + -0.0170713607221842 + <_> + + <_> + + + + <_>3 8 14 2 -1. + <_>3 9 14 1 2. + 0 + 2.5921489577740431e-003 + -0.1545972973108292 + 0.0439757890999317 + <_> + + <_> + + + + <_>9 4 3 15 -1. + <_>9 9 3 5 3. + 0 + 0.0115101700648665 + 0.0607402101159096 + -0.0986718907952309 + <_> + + <_> + + + + <_>7 10 4 8 -1. + <_>7 14 4 4 2. + 0 + 3.9182868786156178e-003 + 0.0261657498776913 + -0.2969762980937958 + <_> + + <_> + + + + <_>16 9 4 11 -1. + <_>16 9 2 11 2. + 0 + 0.0732656419277191 + 5.5715530179440975e-003 + -0.3047415912151337 + <_> + + <_> + + + + <_>0 9 4 11 -1. + <_>2 9 2 11 2. + 0 + -4.8912810161709785e-003 + 0.1275378018617630 + -0.0662368386983871 + <_> + + <_> + + + + <_>7 3 8 10 -1. + <_>7 8 8 5 2. + 0 + -0.0131870303303003 + -0.2025769054889679 + 0.0303698293864727 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + 1.8196239834651351e-003 + 0.0491981394588947 + -0.1378270983695984 + <_> + + <_> + + + + <_>7 16 8 4 -1. + <_>7 16 4 4 2. + 0 + -0.0102994004264474 + 0.1353435963392258 + -0.0291934702545404 + <_> + + <_> + + + + <_>1 0 10 20 -1. + <_>1 0 5 10 2. + <_>6 10 5 10 2. + 0 + 0.1715707927942276 + -9.5548974350094795e-003 + 0.7139971852302551 + <_> + + <_> + + + + <_>10 1 4 10 -1. + <_>10 6 4 5 2. + 0 + -3.4571110736578703e-003 + 0.0610946305096149 + -0.0768169984221458 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 3.3349241130053997e-004 + -0.1876861006021500 + 0.0394117198884487 + <_> + + <_> + + + + <_>1 7 18 4 -1. + <_>10 7 9 2 2. + <_>1 9 9 2 2. + 0 + 0.0560192093253136 + 8.5914824157953262e-003 + -0.7357705831527710 + <_> + + <_> + + + + <_>5 14 10 6 -1. + <_>5 16 10 2 3. + 0 + 6.2299368437379599e-004 + -0.0940620005130768 + 0.0679658874869347 + <_> + + <_> + + + + <_>7 12 13 3 -1. + <_>7 13 13 1 3. + 0 + -0.0142886796966195 + 0.2414492964744568 + -0.0270254593342543 + <_> + + <_> + + + + <_>2 11 7 6 -1. + <_>2 13 7 2 3. + 0 + -9.9114552140235901e-003 + -0.1534602940082550 + 0.0532433614134789 + <_> + + <_> + + + + <_>11 12 5 8 -1. + <_>11 16 5 4 2. + 0 + -0.0707279667258263 + -0.7124310135841370 + 7.4889077804982662e-003 + <_> + + <_> + + + + <_>4 12 5 8 -1. + <_>4 16 5 4 2. + 0 + 0.0161121692508459 + -0.0354375094175339 + 0.2202602028846741 + <_> + + <_> + + + + <_>10 10 10 4 -1. + <_>10 12 10 2 2. + 0 + 2.9938609804958105e-003 + 0.0115308202803135 + -0.0920172408223152 + <_> + + <_> + + + + <_>4 12 9 6 -1. + <_>4 15 9 3 2. + 0 + 1.4030840247869492e-003 + 0.0543021410703659 + -0.1177761033177376 + <_> + + <_> + + + + <_>10 10 10 4 -1. + <_>10 12 10 2 2. + 0 + -0.0898949131369591 + -0.6765859127044678 + 1.5741019742563367e-003 + <_> + + <_> + + + + <_>0 10 10 4 -1. + <_>0 12 10 2 2. + 0 + 2.7459259144961834e-003 + 0.0298608001321554 + -0.2209143042564392 + <_> + + <_> + + + + <_>16 0 4 16 -1. + <_>16 8 4 8 2. + 0 + 0.0222259406000376 + -0.0465929098427296 + 0.0804186910390854 + <_> + + <_> + + + + <_>7 4 3 15 -1. + <_>7 9 3 5 3. + 0 + 4.4512529857456684e-003 + 0.1070649996399880 + -0.0651014968752861 + <_> + + <_> + + + + <_>9 10 10 6 -1. + <_>14 10 5 3 2. + <_>9 13 5 3 2. + 0 + -2.1191150881350040e-003 + 0.0398718602955341 + -0.0525559596717358 + <_> + + <_> + + + + <_>3 1 14 14 -1. + <_>3 1 7 7 2. + <_>10 8 7 7 2. + 0 + 0.1022958979010582 + 0.0133862700313330 + -0.4554656147956848 + <_> + + <_> + + + + <_>16 5 4 14 -1. + <_>18 5 2 7 2. + <_>16 12 2 7 2. + 0 + -6.8260570988059044e-003 + 0.1269534975290299 + -0.0597040317952633 + <_> + + <_> + + + + <_>0 5 4 14 -1. + <_>0 5 2 7 2. + <_>2 12 2 7 2. + 0 + -0.0568905808031559 + 0.4018079936504364 + -0.0160482693463564 + <_> + + <_> + + + + <_>5 2 13 3 -1. + <_>5 3 13 1 3. + 0 + -0.0185900293290615 + -0.4037410914897919 + 0.0135025801137090 + <_> + + <_> + + + + <_>0 16 17 2 -1. + <_>0 17 17 1 2. + 0 + 0.0338822007179260 + 7.8824451193213463e-003 + -0.7926862239837647 + <_> + + <_> + + + + <_>2 9 16 6 -1. + <_>2 12 16 3 2. + 0 + 1.8759339582175016e-003 + -0.0345212407410145 + 0.1817788034677506 + <_> + + <_> + + + + <_>1 10 18 2 -1. + <_>1 11 18 1 2. + 0 + 1.5652549918740988e-003 + 0.0484198890626431 + -0.1518516987562180 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 3.9563868194818497e-003 + -0.0421620905399323 + 0.0789437219500542 + -1.3404430150985718 + 28 + -1 + <_> + + + <_> + + <_> + + + + <_>3 0 13 9 -1. + <_>3 3 13 3 3. + 0 + 0.0884874910116196 + -0.2293592989444733 + 0.2400110960006714 + <_> + + <_> + + + + <_>6 4 9 5 -1. + <_>9 4 3 5 3. + 0 + 0.0433443598449230 + -0.1992744952440262 + 0.2029874026775360 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 0.0159850791096687 + -0.1989088952541351 + 0.1923387944698334 + <_> + + <_> + + + + <_>10 1 10 4 -1. + <_>10 1 5 4 2. + 0 + 0.0984112322330475 + -0.0948308929800987 + 0.2447405010461807 + <_> + + <_> + + + + <_>1 3 18 15 -1. + <_>1 8 18 5 3. + 0 + 0.0100799798965454 + -0.4800091087818146 + 0.0598084516823292 + <_> + + <_> + + + + <_>14 2 6 12 -1. + <_>14 2 3 12 2. + 0 + 0.0626299381256104 + -0.1590265929698944 + 0.1516306996345520 + <_> + + <_> + + + + <_>1 2 6 5 -1. + <_>4 2 3 5 2. + 0 + 0.0136238699778914 + -0.2745133936405182 + 0.0904333665966988 + <_> + + <_> + + + + <_>12 5 8 8 -1. + <_>16 5 4 4 2. + <_>12 9 4 4 2. + 0 + -3.8067731074988842e-003 + -0.2934218049049377 + 0.0730208307504654 + <_> + + <_> + + + + <_>0 11 13 3 -1. + <_>0 12 13 1 3. + 0 + -0.0146496100351214 + 0.2605907917022705 + -0.0952483788132668 + <_> + + <_> + + + + <_>12 11 7 4 -1. + <_>12 13 7 2 2. + 0 + -4.9288192531093955e-004 + 0.0593522191047668 + -0.2808147072792053 + <_> + + <_> + + + + <_>0 0 17 3 -1. + <_>0 1 17 1 3. + 0 + -5.1220930181443691e-003 + -0.2421803027391434 + 0.0817015096545219 + <_> + + <_> + + + + <_>6 5 9 8 -1. + <_>6 9 9 4 2. + 0 + 3.3120220177806914e-004 + -0.4009391069412231 + 0.0340260900557041 + <_> + + <_> + + + + <_>1 11 7 4 -1. + <_>1 13 7 2 2. + 0 + -7.4724480509757996e-004 + 0.0605607889592648 + -0.2912786900997162 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0488296709954739 + -0.0722984224557877 + 0.2613297104835510 + <_> + + <_> + + + + <_>4 9 12 5 -1. + <_>8 9 4 5 3. + 0 + 0.0269940104335546 + 0.0954571291804314 + -0.2675864994525909 + <_> + + <_> + + + + <_>4 8 15 3 -1. + <_>9 8 5 3 3. + 0 + -2.1151660475879908e-003 + -0.2577306926250458 + 0.0532478690147400 + <_> + + <_> + + + + <_>1 8 15 3 -1. + <_>6 8 5 3 3. + 0 + 2.2652999177807942e-005 + -0.3009231090545654 + 0.0590967908501625 + <_> + + <_> + + + + <_>4 13 13 3 -1. + <_>4 14 13 1 3. + 0 + 0.0110349301248789 + -0.0742779374122620 + 0.1904879063367844 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0102752195671201 + -0.3283599913120270 + 0.0492186881601810 + <_> + + <_> + + + + <_>10 1 7 4 -1. + <_>10 3 7 2 2. + 0 + -8.3319991827011108e-003 + -0.2965146899223328 + 0.0394287891685963 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 0.0508086718618870 + -0.0476612411439419 + 0.3740425109863281 + <_> + + <_> + + + + <_>10 9 6 10 -1. + <_>13 9 3 5 2. + <_>10 14 3 5 2. + 0 + -1.2126479996368289e-003 + -0.1214888989925385 + 0.0650594383478165 + <_> + + <_> + + + + <_>0 10 20 5 -1. + <_>10 10 10 5 2. + 0 + 4.1254470124840736e-003 + -0.1491204053163528 + 0.1114611998200417 + <_> + + <_> + + + + <_>2 1 16 4 -1. + <_>10 1 8 2 2. + <_>2 3 8 2 2. + 0 + -0.0182843599468470 + -0.2857351899147034 + 0.0592681318521500 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 3 12 3 2. + 0 + 0.1415628045797348 + -0.0344361513853073 + 0.4637441933155060 + <_> + + <_> + + + + <_>10 9 6 5 -1. + <_>10 9 3 5 2. + 0 + -0.0369824208319187 + -0.5085319876670837 + 0.0250870808959007 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + 5.0303530879318714e-003 + 0.0946269035339355 + -0.1612031012773514 + <_> + + <_> + + + + <_>2 3 16 17 -1. + <_>2 3 8 17 2. + 0 + -0.4614908099174500 + 0.4509657025337219 + -0.0312092900276184 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0197946894913912 + -0.4104653000831604 + 0.0387902893126011 + <_> + + <_> + + + + <_>12 5 8 8 -1. + <_>16 5 4 4 2. + <_>12 9 4 4 2. + 0 + -0.0238720308989286 + -0.1525274068117142 + 9.2825219035148621e-003 + <_> + + <_> + + + + <_>0 5 8 8 -1. + <_>0 5 4 4 2. + <_>4 9 4 4 2. + 0 + 1.8736299825832248e-003 + -0.1918659955263138 + 0.0690484866499901 + <_> + + <_> + + + + <_>18 4 2 16 -1. + <_>18 12 2 8 2. + 0 + 0.0582442991435528 + -0.0226122308522463 + 0.2197508066892624 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0152811501175165 + 0.0563797503709793 + -0.2417110055685043 + <_> + + <_> + + + + <_>2 0 18 3 -1. + <_>8 0 6 3 3. + 0 + 0.1334712058305740 + -0.0418463498353958 + 0.1364179998636246 + <_> + + <_> + + + + <_>2 6 15 3 -1. + <_>2 7 15 1 3. + 0 + -0.0183592401444912 + 0.1365070044994354 + -0.1053709015250206 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0112365297973156 + -0.2104516029357910 + 0.0618727616965771 + <_> + + <_> + + + + <_>2 12 16 6 -1. + <_>2 14 16 2 3. + 0 + -0.0720137432217598 + -0.3848884999752045 + 0.0367311798036098 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0198934208601713 + 0.1991371959447861 + -0.0544709488749504 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -8.1342989578843117e-003 + -0.2752938866615295 + 0.0471528209745884 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + -0.0136144598945975 + 0.1924871057271957 + -0.0600259304046631 + <_> + + <_> + + + + <_>3 0 3 13 -1. + <_>4 0 1 13 3. + 0 + -6.4553669653832912e-003 + -0.2148008048534393 + 0.0626549199223518 + <_> + + <_> + + + + <_>5 5 10 12 -1. + <_>10 5 5 6 2. + <_>5 11 5 6 2. + 0 + -0.0722887068986893 + -0.5320072770118713 + 0.0221324805170298 + <_> + + <_> + + + + <_>2 4 14 12 -1. + <_>2 4 7 6 2. + <_>9 10 7 6 2. + 0 + -0.0704259797930717 + -0.3258849084377289 + 0.0371509008109570 + <_> + + <_> + + + + <_>18 4 2 16 -1. + <_>18 12 2 8 2. + 0 + -0.0122196702286601 + -0.0659457221627235 + 0.0287281107157469 + <_> + + <_> + + + + <_>5 4 9 5 -1. + <_>8 4 3 5 3. + 0 + 6.9816941395401955e-003 + -0.2850838899612427 + 0.0425124689936638 + <_> + + <_> + + + + <_>15 0 3 15 -1. + <_>16 0 1 15 3. + 0 + -2.1437550894916058e-003 + -0.1001932024955750 + 0.0711989998817444 + <_> + + <_> + + + + <_>2 0 3 15 -1. + <_>3 0 1 15 3. + 0 + -1.5813990030437708e-003 + -0.1292670965194702 + 0.0953322723507881 + <_> + + <_> + + + + <_>8 6 6 8 -1. + <_>8 10 6 4 2. + 0 + 2.1735160771640949e-005 + -0.1924615949392319 + 0.0537246987223625 + <_> + + <_> + + + + <_>1 4 6 16 -1. + <_>1 4 3 8 2. + <_>4 12 3 8 2. + 0 + -0.1007528007030487 + 0.5818105936050415 + -0.0211555194109678 + <_> + + <_> + + + + <_>3 0 15 2 -1. + <_>3 1 15 1 2. + 0 + 8.0153037561103702e-004 + -0.1675217002630234 + 0.0619126893579960 + <_> + + <_> + + + + <_>7 2 6 14 -1. + <_>7 2 3 7 2. + <_>10 9 3 7 2. + 0 + -0.0134243704378605 + 0.1700782030820847 + -0.0658217296004295 + <_> + + <_> + + + + <_>10 2 6 7 -1. + <_>12 2 2 7 3. + 0 + 0.0250065103173256 + 0.0318387895822525 + -0.3566446006298065 + <_> + + <_> + + + + <_>5 1 3 16 -1. + <_>6 1 1 16 3. + 0 + -0.0230613108724356 + -0.5344607830047607 + 0.0205004308372736 + <_> + + <_> + + + + <_>6 2 9 10 -1. + <_>6 7 9 5 2. + 0 + -8.1409228732809424e-004 + 0.0737168118357658 + -0.0983857288956642 + <_> + + <_> + + + + <_>9 2 2 13 -1. + <_>10 2 1 13 2. + 0 + -0.0130834402516484 + 0.2358510047197342 + -0.0478937588632107 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + 0.0104809096083045 + -0.0677257701754570 + 0.1178323030471802 + <_> + + <_> + + + + <_>2 6 14 6 -1. + <_>2 6 7 3 2. + <_>9 9 7 3 2. + 0 + -0.0431982688605785 + -0.4381685853004456 + 0.0251015704125166 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + -3.2453269232064486e-003 + -0.2245175987482071 + 0.0430568903684616 + <_> + + <_> + + + + <_>0 9 20 3 -1. + <_>0 10 20 1 3. + 0 + -1.6294110100716352e-003 + -0.2338878065347672 + 0.0450734011828899 + <_> + + <_> + + + + <_>9 5 7 9 -1. + <_>9 8 7 3 3. + 0 + -0.0329114086925983 + 0.2101268023252487 + -0.0212967004626989 + <_> + + <_> + + + + <_>3 12 13 2 -1. + <_>3 13 13 1 2. + 0 + 1.4785619896429125e-005 + -0.0708541572093964 + 0.1469694972038269 + <_> + + <_> + + + + <_>9 13 8 6 -1. + <_>9 15 8 2 3. + 0 + -0.0602085404098034 + -0.5213583111763001 + 0.0195774007588625 + <_> + + <_> + + + + <_>2 12 7 4 -1. + <_>2 14 7 2 2. + 0 + 1.1327289976179600e-003 + 0.0448174700140953 + -0.2439045011997223 + <_> + + <_> + + + + <_>6 17 13 3 -1. + <_>6 18 13 1 3. + 0 + 8.3639882504940033e-003 + -0.0569760799407959 + 0.1168429031968117 + <_> + + <_> + + + + <_>3 10 7 6 -1. + <_>3 12 7 2 3. + 0 + 0.0143133895471692 + 0.0474452115595341 + -0.2220298945903778 + <_> + + <_> + + + + <_>9 5 7 9 -1. + <_>9 8 7 3 3. + 0 + -0.1153006032109261 + 0.8666297793388367 + -4.2397230863571167e-003 + <_> + + <_> + + + + <_>4 5 7 9 -1. + <_>4 8 7 3 3. + 0 + -0.0207980908453465 + 0.2866652905941010 + -0.0409195087850094 + <_> + + <_> + + + + <_>5 5 13 3 -1. + <_>5 6 13 1 3. + 0 + -0.0182687006890774 + 0.1308714002370834 + -0.0453482009470463 + <_> + + <_> + + + + <_>1 2 18 12 -1. + <_>1 6 18 4 3. + 0 + -0.2549448907375336 + -0.3241083920001984 + 0.0404963307082653 + <_> + + <_> + + + + <_>4 4 13 3 -1. + <_>4 5 13 1 3. + 0 + -0.0217865705490112 + 0.3312666118144989 + -0.0370218008756638 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0427438989281654 + 0.0323168598115444 + -0.3525961935520172 + <_> + + <_> + + + + <_>10 2 4 8 -1. + <_>10 2 2 8 2. + 0 + 0.0347305908799171 + 0.0340495482087135 + -0.2139337062835693 + <_> + + <_> + + + + <_>6 2 4 8 -1. + <_>8 2 2 8 2. + 0 + -8.8458160462323576e-005 + -0.3113448023796082 + 0.0393645204603672 + <_> + + <_> + + + + <_>8 0 12 16 -1. + <_>14 0 6 8 2. + <_>8 8 6 8 2. + 0 + 0.2228846997022629 + -8.7889749556779861e-003 + 0.8656687140464783 + <_> + + <_> + + + + <_>0 0 18 6 -1. + <_>6 0 6 6 3. + 0 + 0.2704513967037201 + -0.0526949018239975 + 0.1874651014804840 + <_> + + <_> + + + + <_>12 0 8 4 -1. + <_>12 0 4 4 2. + 0 + -0.0247899405658245 + 0.2765029966831207 + -0.0273062493652105 + <_> + + <_> + + + + <_>0 0 6 7 -1. + <_>3 0 3 7 2. + 0 + -0.0357311703264713 + 0.4115746915340424 + -0.0228860899806023 + <_> + + <_> + + + + <_>9 13 6 7 -1. + <_>11 13 2 7 3. + 0 + 0.0478425808250904 + 0.0229893606156111 + -0.4128724932670593 + <_> + + <_> + + + + <_>6 4 6 7 -1. + <_>8 4 2 7 3. + 0 + -0.0318460911512375 + 0.3807303905487061 + -0.0295822303742170 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -6.9219218567013741e-003 + -0.1374137997627258 + 0.0487101189792156 + <_> + + <_> + + + + <_>1 14 17 6 -1. + <_>1 16 17 2 3. + 0 + 0.0413397587835789 + 0.0441196300089359 + -0.2356161028146744 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -0.0341570712625980 + -0.2487792968750000 + 0.0118720596656203 + <_> + + <_> + + + + <_>2 14 7 6 -1. + <_>2 16 7 2 3. + 0 + -0.0121989902108908 + -0.2142619937658310 + 0.0515333004295826 + <_> + + <_> + + + + <_>5 5 13 3 -1. + <_>5 6 13 1 3. + 0 + -7.9321218654513359e-003 + 0.0815533325076103 + -0.0699217170476913 + <_> + + <_> + + + + <_>5 13 6 7 -1. + <_>7 13 2 7 3. + 0 + -0.0426653884351254 + -0.5061656236648560 + 0.0192379690706730 + <_> + + <_> + + + + <_>12 10 4 7 -1. + <_>12 10 2 7 2. + 0 + 0.0354458801448345 + -0.0163948405534029 + 0.1705784946680069 + <_> + + <_> + + + + <_>1 9 18 11 -1. + <_>7 9 6 11 3. + 0 + 0.4568628072738648 + 0.0192641708999872 + -0.5441359281539917 + <_> + + <_> + + + + <_>10 10 6 7 -1. + <_>12 10 2 7 3. + 0 + 0.0311184208840132 + -0.0307769794017076 + 0.1358110010623932 + <_> + + <_> + + + + <_>4 10 6 7 -1. + <_>6 10 2 7 3. + 0 + -0.0161036793142557 + 0.2124428004026413 + -0.0483417809009552 + <_> + + <_> + + + + <_>9 10 9 9 -1. + <_>12 10 3 9 3. + 0 + 5.7916441000998020e-003 + -0.0739843770861626 + 0.0357490293681622 + <_> + + <_> + + + + <_>0 10 10 10 -1. + <_>0 10 5 5 2. + <_>5 15 5 5 2. + 0 + -0.0656602978706360 + 0.2618337869644165 + -0.0410048216581345 + <_> + + <_> + + + + <_>12 15 6 5 -1. + <_>12 15 3 5 2. + 0 + 0.0814649835228920 + 0.0129289999604225 + -0.3536277115345001 + <_> + + <_> + + + + <_>1 15 8 5 -1. + <_>5 15 4 5 2. + 0 + 0.0125611703842878 + -0.1910876929759979 + 0.0699659436941147 + <_> + + <_> + + + + <_>5 14 14 2 -1. + <_>5 14 7 2 2. + 0 + 0.0787838026881218 + -5.4801939986646175e-003 + 0.3921732902526856 + <_> + + <_> + + + + <_>1 14 12 3 -1. + <_>7 14 6 3 2. + 0 + 0.0339848287403584 + 0.0843287631869316 + -0.1247764006257057 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 0.0177183393388987 + 0.0447938293218613 + -0.1976087987422943 + <_> + + <_> + + + + <_>5 2 8 8 -1. + <_>5 2 4 4 2. + <_>9 6 4 4 2. + 0 + -9.8835285753011703e-003 + -0.1514932960271835 + 0.0673480480909348 + <_> + + <_> + + + + <_>6 16 14 4 -1. + <_>13 16 7 2 2. + <_>6 18 7 2 2. + 0 + 0.0238502305001020 + -0.0332198217511177 + 0.1613163053989410 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -0.0395907014608383 + 0.3990392982959747 + -0.0288859903812408 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>10 15 7 2 2. + <_>3 17 7 2 2. + 0 + 0.0349619202315807 + 0.0221032295376062 + -0.5288540720939636 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>10 6 10 2 2. + 0 + 0.0948258414864540 + 9.5985615625977516e-003 + -0.8203567266464233 + <_> + + <_> + + + + <_>5 3 14 6 -1. + <_>12 3 7 3 2. + <_>5 6 7 3 2. + 0 + -0.1021554023027420 + -0.2055155932903290 + 3.0388559680432081e-003 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>5 7 10 2 3. + 0 + -9.3128867447376251e-003 + 0.0368270687758923 + -0.2465641945600510 + <_> + + <_> + + + + <_>0 2 20 2 -1. + <_>0 3 20 1 2. + 0 + -5.4135788232088089e-003 + -0.2387809008359909 + 0.0410151891410351 + <_> + + <_> + + + + <_>6 0 8 6 -1. + <_>6 3 8 3 2. + 0 + -0.0262819807976484 + 0.2785386145114899 + -0.0368680804967880 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -9.9223516881465912e-003 + -0.2532212138175964 + 0.0335225500166416 + <_> + + <_> + + + + <_>0 5 13 14 -1. + <_>0 12 13 7 2. + 0 + -0.1710970997810364 + -0.2940491139888763 + 0.0324326790869236 + <_> + + <_> + + + + <_>14 11 4 8 -1. + <_>14 15 4 4 2. + 0 + -8.7599586695432663e-003 + 0.0687875002622604 + -0.1064717024564743 + <_> + + <_> + + + + <_>0 0 20 8 -1. + <_>0 0 10 4 2. + <_>10 4 10 4 2. + 0 + 0.1294253021478653 + 0.0132413003593683 + -0.6892367005348206 + <_> + + <_> + + + + <_>16 1 4 18 -1. + <_>18 1 2 9 2. + <_>16 10 2 9 2. + 0 + -0.0477239191532135 + 0.2221481055021286 + -0.0285170804709196 + <_> + + <_> + + + + <_>1 10 6 9 -1. + <_>3 10 2 9 3. + 0 + 0.1081231012940407 + 0.0119020203128457 + -0.7791512012481690 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>10 10 3 5 2. + <_>7 15 3 5 2. + 0 + -0.0274946894496679 + -0.3019264042377472 + 0.0285402107983828 + <_> + + <_> + + + + <_>4 7 12 12 -1. + <_>4 7 6 6 2. + <_>10 13 6 6 2. + 0 + -0.0495341382920742 + -0.3001514077186585 + 0.0317509509623051 + <_> + + <_> + + + + <_>7 12 13 3 -1. + <_>7 13 13 1 3. + 0 + -0.0103583503514528 + 0.1228711977601051 + -0.0391230396926403 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -0.0327058695256710 + -0.3335491120815277 + 0.0279652904719114 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0135804796591401 + 0.1119289994239807 + -0.0494710281491280 + <_> + + <_> + + + + <_>0 0 16 6 -1. + <_>0 2 16 2 3. + 0 + 5.5075851269066334e-003 + -0.1311812996864319 + 0.0694034770131111 + <_> + + <_> + + + + <_>6 1 8 6 -1. + <_>6 4 8 3 2. + 0 + 0.0755081102252007 + -0.0290196295827627 + 0.3941380083560944 + <_> + + <_> + + + + <_>0 0 5 8 -1. + <_>0 4 5 4 2. + 0 + 0.0568114109337330 + 0.0267886593937874 + -0.4198954999446869 + <_> + + <_> + + + + <_>9 3 9 5 -1. + <_>12 3 3 5 3. + 0 + 5.0004580989480019e-003 + 0.0462391600012779 + -0.0676206499338150 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>2 2 2 9 3. + 0 + 0.0197174903005362 + -0.0604025088250637 + 0.1663213968276978 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0647294521331787 + -0.5248411893844605 + 0.0279226005077362 + <_> + + <_> + + + + <_>4 5 10 6 -1. + <_>4 5 5 3 2. + <_>9 8 5 3 2. + 0 + -0.0306831300258636 + 0.2194546014070511 + -0.0481116287410259 + <_> + + <_> + + + + <_>7 1 10 6 -1. + <_>12 1 5 3 2. + <_>7 4 5 3 2. + 0 + 8.1467535346746445e-003 + 0.0602792203426361 + -0.1160089001059532 + <_> + + <_> + + + + <_>0 2 18 4 -1. + <_>0 2 9 2 2. + <_>9 4 9 2 2. + 0 + 7.9492190852761269e-003 + 0.0835634917020798 + -0.1605300009250641 + <_> + + <_> + + + + <_>17 1 2 17 -1. + <_>17 1 1 17 2. + 0 + -0.0224061999469996 + 0.2827141880989075 + -0.0281844791024923 + <_> + + <_> + + + + <_>1 0 2 19 -1. + <_>2 0 1 19 2. + 0 + 0.0829937905073166 + 0.0104750599712133 + -0.9687529206275940 + <_> + + <_> + + + + <_>2 9 16 4 -1. + <_>10 9 8 2 2. + <_>2 11 8 2 2. + 0 + -7.0176632143557072e-003 + -0.1375322937965393 + 0.0682054981589317 + <_> + + <_> + + + + <_>1 6 18 8 -1. + <_>1 6 9 4 2. + <_>10 10 9 4 2. + 0 + -9.7560193389654160e-003 + -0.1370708048343658 + 0.0728905871510506 + <_> + + <_> + + + + <_>1 8 18 4 -1. + <_>7 8 6 4 3. + 0 + -0.0522173792123795 + -0.6430044174194336 + 0.0144922202453017 + <_> + + <_> + + + + <_>5 4 3 10 -1. + <_>5 9 3 5 2. + 0 + -7.8029942233115435e-004 + -0.2647927105426788 + 0.0335178412497044 + <_> + + <_> + + + + <_>5 2 10 6 -1. + <_>5 4 10 2 3. + 0 + 0.0379199311137199 + -0.0848467871546745 + 0.1126058995723724 + <_> + + <_> + + + + <_>7 7 4 10 -1. + <_>7 12 4 5 2. + 0 + 3.0561289750039577e-003 + 0.0480869412422180 + -0.1900925040245056 + <_> + + <_> + + + + <_>8 11 6 6 -1. + <_>8 14 6 3 2. + 0 + 0.0658622682094574 + -5.2452040836215019e-003 + 0.9128062129020691 + <_> + + <_> + + + + <_>1 6 15 5 -1. + <_>6 6 5 5 3. + 0 + 0.1556821018457413 + 0.0208840500563383 + -0.4958043992519379 + <_> + + <_> + + + + <_>8 5 4 12 -1. + <_>8 9 4 4 3. + 0 + -1.9058469915762544e-003 + 0.1830590069293976 + -0.0497563108801842 + <_> + + <_> + + + + <_>1 8 10 12 -1. + <_>1 8 5 6 2. + <_>6 14 5 6 2. + 0 + -0.0983569994568825 + 0.4802044928073883 + -0.0203843098133802 + <_> + + <_> + + + + <_>14 12 5 6 -1. + <_>14 15 5 3 2. + 0 + 4.2754490859806538e-003 + 0.0400959290564060 + -0.1407112926244736 + <_> + + <_> + + + + <_>0 12 18 4 -1. + <_>0 12 9 2 2. + <_>9 14 9 2 2. + 0 + -0.0140330102294683 + -0.2079156041145325 + 0.0525762997567654 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0801794081926346 + -0.0257905591279268 + 0.3765121996402741 + <_> + + <_> + + + + <_>1 3 18 12 -1. + <_>1 3 9 6 2. + <_>10 9 9 6 2. + 0 + 0.1817575991153717 + 0.0114286495372653 + -0.8338211178779602 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + -0.0191416908055544 + -0.5052285790443420 + 0.0126055199652910 + <_> + + <_> + + + + <_>5 2 9 6 -1. + <_>5 4 9 2 3. + 0 + -0.0512608289718628 + 0.5829253196716309 + -0.0161097496747971 + <_> + + <_> + + + + <_>15 3 2 17 -1. + <_>15 3 1 17 2. + 0 + 0.0644781365990639 + 0.0102373296394944 + -0.6030235290527344 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + 0.0312383007258177 + 0.0208458509296179 + -0.3978582918643951 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + -5.0772321410477161e-003 + 0.1233154013752937 + -0.0352249816060066 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + -1.9385579507797956e-003 + 0.1572668999433518 + -0.0733163207769394 + <_> + + <_> + + + + <_>7 5 6 8 -1. + <_>9 5 2 8 3. + 0 + 0.0240997895598412 + -0.1117860972881317 + 0.1073898002505302 + <_> + + <_> + + + + <_>3 3 2 17 -1. + <_>4 3 1 17 2. + 0 + -8.8700000196695328e-003 + -0.3604820072650909 + 0.0270342491567135 + <_> + + <_> + + + + <_>2 0 18 4 -1. + <_>11 0 9 2 2. + <_>2 2 9 2 2. + 0 + -0.0374241210520267 + -0.3522940874099731 + 0.0167865306138992 + <_> + + <_> + + + + <_>0 0 18 4 -1. + <_>0 0 9 2 2. + <_>9 2 9 2 2. + 0 + -0.0200670696794987 + -0.2746093869209290 + 0.0395325906574726 + <_> + + <_> + + + + <_>11 12 6 8 -1. + <_>13 12 2 8 3. + 0 + 0.0651698708534241 + 0.0114021599292755 + -0.2481995970010757 + <_> + + <_> + + + + <_>3 12 6 8 -1. + <_>5 12 2 8 3. + 0 + 0.0381574705243111 + 0.0463233105838299 + -0.2098951041698456 + <_> + + <_> + + + + <_>7 12 10 6 -1. + <_>12 12 5 3 2. + <_>7 15 5 3 2. + 0 + 0.0110751800239086 + 0.0344111584126949 + -0.0512565001845360 + <_> + + <_> + + + + <_>5 0 9 14 -1. + <_>8 0 3 14 3. + 0 + 0.1158348023891449 + 0.0422828309237957 + -0.2170549929141998 + <_> + + <_> + + + + <_>4 3 15 4 -1. + <_>9 3 5 4 3. + 0 + -0.0467207804322243 + 0.2309352010488510 + -8.3234477788209915e-003 + <_> + + <_> + + + + <_>1 3 15 4 -1. + <_>6 3 5 4 3. + 0 + 0.1256745010614395 + -0.0498825013637543 + 0.2101844996213913 + <_> + + <_> + + + + <_>13 5 4 14 -1. + <_>15 5 2 7 2. + <_>13 12 2 7 2. + 0 + 1.8088010256178677e-004 + -0.1183658987283707 + 0.0842788964509964 + <_> + + <_> + + + + <_>3 5 4 14 -1. + <_>3 5 2 7 2. + <_>5 12 2 7 2. + 0 + 0.0104706902056932 + -0.0862106084823608 + 0.1176085025072098 + <_> + + <_> + + + + <_>11 0 4 7 -1. + <_>11 0 2 7 2. + 0 + 0.0580657199025154 + 0.0155827002599835 + -0.7421792149543762 + <_> + + <_> + + + + <_>5 0 4 7 -1. + <_>7 0 2 7 2. + 0 + 2.2783069871366024e-003 + -0.1915138065814972 + 0.0479906387627125 + <_> + + <_> + + + + <_>7 12 10 6 -1. + <_>12 12 5 3 2. + <_>7 15 5 3 2. + 0 + -0.0695965588092804 + -0.7324169278144836 + 1.1130559723824263e-003 + <_> + + <_> + + + + <_>3 12 10 6 -1. + <_>3 12 5 3 2. + <_>8 15 5 3 2. + 0 + 0.0589078702032566 + 0.0168783906847239 + -0.5440040826797485 + <_> + + <_> + + + + <_>3 4 16 6 -1. + <_>11 4 8 3 2. + <_>3 7 8 3 2. + 0 + -0.0806588232517242 + 0.2992295920848846 + -0.0185705702751875 + <_> + + <_> + + + + <_>4 1 6 7 -1. + <_>6 1 2 7 3. + 0 + 0.0176869295537472 + 0.0429361611604691 + -0.2259155064821243 + <_> + + <_> + + + + <_>6 13 14 3 -1. + <_>6 14 14 1 3. + 0 + -0.0163190700113773 + 0.1888964027166367 + -0.0470473989844322 + <_> + + <_> + + + + <_>4 3 6 7 -1. + <_>6 3 2 7 3. + 0 + -0.0395275689661503 + -0.3265733122825623 + 0.0287622194737196 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 1.9769819919019938e-003 + -0.0882174968719482 + 0.0574027299880981 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -0.0302720293402672 + -0.5117791295051575 + 0.0173592492938042 + <_> + + <_> + + + + <_>8 11 6 7 -1. + <_>10 11 2 7 3. + 0 + 0.0537864193320274 + 0.0120715703815222 + -0.4020195901393890 + <_> + + <_> + + + + <_>2 4 6 12 -1. + <_>5 4 3 12 2. + 0 + -9.4136483967304230e-003 + 0.2472815066576004 + -0.0367347411811352 + <_> + + <_> + + + + <_>10 0 10 18 -1. + <_>10 0 5 18 2. + 0 + -0.0590145289897919 + -0.1327728927135468 + 0.0152207398787141 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.0894176065921783 + -0.2591714859008789 + 0.0375636294484138 + <_> + + <_> + + + + <_>7 10 6 9 -1. + <_>7 10 3 9 2. + 0 + -0.0879961401224136 + 0.4920088052749634 + -0.0212108399719000 + <_> + + <_> + + + + <_>6 12 6 8 -1. + <_>8 12 2 8 3. + 0 + -0.0507475696504116 + -0.4856776893138886 + 0.0200053192675114 + <_> + + <_> + + + + <_>3 18 14 2 -1. + <_>3 19 14 1 2. + 0 + -0.0389182604849339 + -0.8955854773521423 + 7.8960238024592400e-003 + <_> + + <_> + + + + <_>1 6 7 6 -1. + <_>1 8 7 2 3. + 0 + 0.0209681391716003 + -0.0544317103922367 + 0.1612336039543152 + <_> + + <_> + + + + <_>13 5 7 4 -1. + <_>13 7 7 2 2. + 0 + -0.0321030691266060 + -0.3682270050048828 + 0.0191633496433496 + <_> + + <_> + + + + <_>0 5 7 4 -1. + <_>0 7 7 2 2. + 0 + 5.5592609569430351e-003 + 0.0783684402704239 + -0.1184248998761177 + <_> + + <_> + + + + <_>8 5 11 15 -1. + <_>8 10 11 5 3. + 0 + 0.0595542490482330 + -0.0522909387946129 + 0.0361948795616627 + <_> + + <_> + + + + <_>3 9 10 9 -1. + <_>8 9 5 9 2. + 0 + -0.0109731601551175 + 0.1585599035024643 + -0.0558044910430908 + <_> + + <_> + + + + <_>4 1 13 3 -1. + <_>4 2 13 1 3. + 0 + -0.0119346501305699 + -0.2571750879287720 + 0.0328298509120941 + <_> + + <_> + + + + <_>7 0 4 12 -1. + <_>7 6 4 6 2. + 0 + 0.0604416318237782 + -0.0387208014726639 + 0.2297187000513077 + <_> + + <_> + + + + <_>8 2 4 8 -1. + <_>8 6 4 4 2. + 0 + -8.2118069985881448e-004 + 0.0697387903928757 + -0.1599200069904327 + <_> + + <_> + + + + <_>2 16 16 3 -1. + <_>10 16 8 3 2. + 0 + 0.0204693898558617 + -0.0843492671847343 + 0.1013950034976006 + <_> + + <_> + + + + <_>6 7 9 5 -1. + <_>9 7 3 5 3. + 0 + -0.0763057619333267 + 0.8317422866821289 + -5.0806580111384392e-003 + <_> + + <_> + + + + <_>5 7 9 5 -1. + <_>8 7 3 5 3. + 0 + 0.0605518892407417 + -0.0379711613059044 + 0.2185014933347702 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -4.1085779666900635e-003 + -0.1149664968252182 + 0.0366474799811840 + <_> + + <_> + + + + <_>5 5 4 11 -1. + <_>7 5 2 11 2. + 0 + 0.0123999696224928 + 0.0628383010625839 + -0.1414466053247452 + <_> + + <_> + + + + <_>9 6 6 10 -1. + <_>12 6 3 5 2. + <_>9 11 3 5 2. + 0 + -0.0714557021856308 + -0.4267379045486450 + 0.0139471096917987 + <_> + + <_> + + + + <_>5 6 6 10 -1. + <_>5 6 3 5 2. + <_>8 11 3 5 2. + 0 + 0.0337090305984020 + -0.0127135999500752 + 0.7477509975433350 + <_> + + <_> + + + + <_>4 8 16 8 -1. + <_>12 8 8 4 2. + <_>4 12 8 4 2. + 0 + 0.0347427688539028 + 0.0209695007652044 + -0.1463028043508530 + <_> + + <_> + + + + <_>0 8 16 8 -1. + <_>0 8 8 4 2. + <_>8 12 8 4 2. + 0 + -0.0437052994966507 + 0.1806475073099136 + -0.0523351803421974 + <_> + + <_> + + + + <_>9 8 10 10 -1. + <_>14 8 5 5 2. + <_>9 13 5 5 2. + 0 + 0.0849268734455109 + 6.9014527834951878e-003 + -0.2607395946979523 + <_> + + <_> + + + + <_>1 8 10 10 -1. + <_>1 8 5 5 2. + <_>6 13 5 5 2. + 0 + -0.0171190798282623 + -0.1459008008241653 + 0.0674846768379211 + <_> + + <_> + + + + <_>11 1 9 16 -1. + <_>14 1 3 16 3. + 0 + 0.3363071978092194 + 7.8989071771502495e-003 + -0.8385292887687683 + <_> + + <_> + + + + <_>3 4 6 12 -1. + <_>6 4 3 12 2. + 0 + 0.1237123012542725 + -0.0254827104508877 + 0.3909803926944733 + <_> + + <_> + + + + <_>14 12 6 8 -1. + <_>16 12 2 8 3. + 0 + -0.1119590029120445 + -0.3831711113452911 + 6.0780011117458344e-003 + <_> + + <_> + + + + <_>0 12 6 8 -1. + <_>2 12 2 8 3. + 0 + -0.1088189035654068 + -0.7136299014091492 + 0.0127000696957111 + -1.4275209903717041 + 29 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 10 3 -1. + <_>5 2 5 3 2. + 0 + 9.6844611689448357e-003 + -0.1945503950119019 + 0.2004801928997040 + <_> + + <_> + + + + <_>6 4 8 6 -1. + <_>6 6 8 2 3. + 0 + -6.6196201369166374e-003 + 0.0922116413712502 + -0.3482440114021301 + <_> + + <_> + + + + <_>7 6 6 12 -1. + <_>7 12 6 6 2. + 0 + 5.6163137778639793e-003 + 0.0667676106095314 + -0.4117226004600525 + <_> + + <_> + + + + <_>10 1 4 18 -1. + <_>12 1 2 9 2. + <_>10 10 2 9 2. + 0 + -1.6882510390132666e-003 + 0.0726297125220299 + -0.2069447934627533 + <_> + + <_> + + + + <_>4 6 4 14 -1. + <_>4 6 2 7 2. + <_>6 13 2 7 2. + 0 + -2.9599820263683796e-003 + -0.2063589990139008 + 0.0773354172706604 + <_> + + <_> + + + + <_>13 4 3 10 -1. + <_>13 9 3 5 2. + 0 + 1.7798959743231535e-003 + -0.3214946985244751 + 0.0641071274876595 + <_> + + <_> + + + + <_>1 3 14 12 -1. + <_>1 3 7 6 2. + <_>8 9 7 6 2. + 0 + -4.0264189010486007e-004 + 0.0795122534036636 + -0.2405108958482742 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -5.0024548545479774e-004 + 0.0866756066679955 + -0.2050417065620422 + <_> + + <_> + + + + <_>0 11 13 3 -1. + <_>0 12 13 1 3. + 0 + -2.0284270867705345e-003 + 0.1432249993085861 + -0.1222056970000267 + <_> + + <_> + + + + <_>12 10 5 9 -1. + <_>12 13 5 3 3. + 0 + 6.0648359358310699e-003 + 0.0378605797886848 + -0.2437545955181122 + <_> + + <_> + + + + <_>1 14 18 4 -1. + <_>1 14 9 2 2. + <_>10 16 9 2 2. + 0 + 9.6257496625185013e-003 + 0.0571418404579163 + -0.2882792055606842 + <_> + + <_> + + + + <_>7 6 6 14 -1. + <_>9 6 2 14 3. + 0 + 2.5888499803841114e-003 + -0.1890601962804794 + 0.0864302068948746 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 2.9090950265526772e-003 + -0.0831084698438644 + 0.1761883944272995 + <_> + + <_> + + + + <_>10 10 4 8 -1. + <_>10 14 4 4 2. + 0 + 2.2233440540730953e-003 + 0.0201501697301865 + -0.2488275021314621 + <_> + + <_> + + + + <_>6 8 8 12 -1. + <_>6 8 4 6 2. + <_>10 14 4 6 2. + 0 + -9.8997671157121658e-003 + -0.2063976973295212 + 0.0609850101172924 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + 0.0196893904358149 + -0.0344524383544922 + 0.2006977945566177 + <_> + + <_> + + + + <_>0 0 18 2 -1. + <_>9 0 9 2 2. + 0 + 0.0211067702621222 + 0.0438868589699268 + -0.2661089003086090 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + -7.2028310969471931e-003 + 0.1701551973819733 + -0.0546393394470215 + <_> + + <_> + + + + <_>1 11 7 6 -1. + <_>1 13 7 2 3. + 0 + 4.0647671557962894e-003 + 0.0521828085184097 + -0.2130403071641922 + <_> + + <_> + + + + <_>9 5 6 10 -1. + <_>12 5 3 5 2. + <_>9 10 3 5 2. + 0 + -2.8419198933988810e-003 + 0.0531802102923393 + -0.1766956001520157 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + -0.0494618192315102 + 0.3722133040428162 + -0.0339698493480682 + <_> + + <_> + + + + <_>6 1 9 5 -1. + <_>9 1 3 5 3. + 0 + 0.0430241599678993 + 0.0312515497207642 + -0.3183189034461975 + <_> + + <_> + + + + <_>3 2 13 2 -1. + <_>3 3 13 1 2. + 0 + -7.0111698005348444e-004 + -0.2034021019935608 + 0.0589641705155373 + <_> + + <_> + + + + <_>4 0 14 3 -1. + <_>4 1 14 1 3. + 0 + 5.7489587925374508e-004 + -0.0949371904134750 + 0.1053818985819817 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + -1.4911209291312844e-004 + 0.0684236884117126 + -0.1820777952671051 + <_> + + <_> + + + + <_>7 1 10 6 -1. + <_>12 1 5 3 2. + <_>7 4 5 3 2. + 0 + 8.7993890047073364e-003 + 0.0338660702109337 + -0.1162557974457741 + <_> + + <_> + + + + <_>0 0 15 3 -1. + <_>5 0 5 3 3. + 0 + -8.7150773033499718e-003 + 0.1804129034280777 + -0.0657215267419815 + <_> + + <_> + + + + <_>4 7 15 5 -1. + <_>9 7 5 5 3. + 0 + -0.0137276295572519 + -0.1333781033754349 + 0.0359666012227535 + <_> + + <_> + + + + <_>0 7 6 12 -1. + <_>0 11 6 4 3. + 0 + -2.3620850406587124e-003 + -0.1908807009458542 + 0.0618498101830482 + <_> + + <_> + + + + <_>6 17 13 3 -1. + <_>6 18 13 1 3. + 0 + 1.7863539978861809e-003 + -0.0830715373158455 + 0.0989261269569397 + <_> + + <_> + + + + <_>1 7 15 5 -1. + <_>6 7 5 5 3. + 0 + -9.4514712691307068e-003 + -0.1802491992712021 + 0.0601467601954937 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0481952801346779 + -0.0266172997653484 + 0.3013446927070618 + <_> + + <_> + + + + <_>1 8 18 3 -1. + <_>1 9 18 1 3. + 0 + -1.2248229468241334e-003 + -0.2356013953685761 + 0.0455729104578495 + <_> + + <_> + + + + <_>14 0 6 11 -1. + <_>16 0 2 11 3. + 0 + -0.0428511016070843 + 0.1608632951974869 + -0.0234559401869774 + <_> + + <_> + + + + <_>3 1 12 6 -1. + <_>3 1 6 3 2. + <_>9 4 6 3 2. + 0 + 3.4798709675669670e-003 + 0.0768826305866241 + -0.1329917013645172 + <_> + + <_> + + + + <_>6 5 8 6 -1. + <_>6 7 8 2 3. + 0 + -3.9859190583229065e-003 + 0.0431151911616325 + -0.2313275933265686 + <_> + + <_> + + + + <_>0 0 6 11 -1. + <_>2 0 2 11 3. + 0 + 0.0431398488581181 + -0.0367800705134869 + 0.2388345003128052 + <_> + + <_> + + + + <_>8 5 5 12 -1. + <_>8 11 5 6 2. + 0 + -0.0174366291612387 + -0.1404626071453095 + 0.0590770505368710 + <_> + + <_> + + + + <_>1 4 6 16 -1. + <_>1 4 3 8 2. + <_>4 12 3 8 2. + 0 + -0.0752548873424530 + 0.3632852137088776 + -0.0313802808523178 + <_> + + <_> + + + + <_>13 5 6 10 -1. + <_>16 5 3 5 2. + <_>13 10 3 5 2. + 0 + 0.0601255409419537 + 8.2496693357825279e-003 + -0.2348520010709763 + <_> + + <_> + + + + <_>1 5 6 10 -1. + <_>1 5 3 5 2. + <_>4 10 3 5 2. + 0 + 1.2755369534716010e-003 + -0.1226816996932030 + 0.0900715366005898 + <_> + + <_> + + + + <_>16 2 4 8 -1. + <_>16 6 4 4 2. + 0 + -1.3465109514072537e-003 + -0.1455423980951309 + 0.0707611665129662 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>6 17 6 3 3. + 0 + 0.0237584691494703 + -0.0518349893391132 + 0.1758390069007874 + <_> + + <_> + + + + <_>6 1 8 16 -1. + <_>6 9 8 8 2. + 0 + 2.2376580163836479e-003 + 0.0917633399367332 + -0.1120605021715164 + <_> + + <_> + + + + <_>6 12 6 7 -1. + <_>8 12 2 7 3. + 0 + 3.8662939332425594e-003 + 0.0623901896178722 + -0.1514233946800232 + <_> + + <_> + + + + <_>7 1 6 13 -1. + <_>7 1 3 13 2. + 0 + 0.0768680423498154 + -0.0276401992887259 + 0.3763613104820252 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0166171994060278 + 0.0330678187310696 + -0.3095065057277679 + <_> + + <_> + + + + <_>2 10 18 10 -1. + <_>8 10 6 10 3. + 0 + -0.0461450293660164 + 0.1079813987016678 + -0.0582774393260479 + <_> + + <_> + + + + <_>0 0 8 20 -1. + <_>4 0 4 20 2. + 0 + 0.0982066094875336 + 0.0175021607428789 + -0.5086191892623901 + <_> + + <_> + + + + <_>10 0 8 6 -1. + <_>10 0 4 6 2. + 0 + 4.7838049940764904e-003 + -0.1020781025290489 + 0.0577968508005142 + <_> + + <_> + + + + <_>5 2 8 9 -1. + <_>5 5 8 3 3. + 0 + 0.0204676892608404 + -0.0203620102256536 + 0.4500145018100739 + <_> + + <_> + + + + <_>16 2 4 8 -1. + <_>16 6 4 4 2. + 0 + 0.0151417003944516 + 0.0281403791159391 + -0.0851300284266472 + <_> + + <_> + + + + <_>3 3 14 2 -1. + <_>10 3 7 2 2. + 0 + 5.2229189313948154e-003 + -0.0577892586588860 + 0.1558032929897308 + <_> + + <_> + + + + <_>8 0 9 5 -1. + <_>11 0 3 5 3. + 0 + 0.0188712999224663 + 0.0270537994801998 + -0.1204636022448540 + <_> + + <_> + + + + <_>3 3 13 3 -1. + <_>3 4 13 1 3. + 0 + 4.5608580112457275e-003 + -0.0795675888657570 + 0.1157101020216942 + <_> + + <_> + + + + <_>16 2 4 8 -1. + <_>16 6 4 4 2. + 0 + -0.0121725499629974 + -0.1614917963743210 + 0.0245715398341417 + <_> + + <_> + + + + <_>0 1 11 12 -1. + <_>0 7 11 6 2. + 0 + -0.1646880954504013 + -0.6571279168128967 + 0.0124286897480488 + <_> + + <_> + + + + <_>9 0 9 5 -1. + <_>12 0 3 5 3. + 0 + 1.8241419456899166e-003 + -0.0915267392992973 + 0.0878513902425766 + <_> + + <_> + + + + <_>3 0 9 5 -1. + <_>6 0 3 5 3. + 0 + -5.4591207299381495e-004 + -0.1258120983839035 + 0.0669683814048767 + <_> + + <_> + + + + <_>2 0 18 8 -1. + <_>8 0 6 8 3. + 0 + 2.1177160087972879e-003 + 0.1426133066415787 + -0.0617294684052467 + <_> + + <_> + + + + <_>0 15 14 2 -1. + <_>0 16 14 1 2. + 0 + 1.1853260220959783e-003 + -0.0914256274700165 + 0.0920893624424934 + <_> + + <_> + + + + <_>10 15 10 3 -1. + <_>10 15 5 3 2. + 0 + 7.9899299889802933e-003 + -0.0631192177534103 + 0.1544629931449890 + <_> + + <_> + + + + <_>7 10 3 10 -1. + <_>7 15 3 5 2. + 0 + 4.5044990256428719e-003 + 0.0409202985465527 + -0.2247591018676758 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + 7.4563547968864441e-003 + -0.0395407006144524 + 0.2420867979526520 + <_> + + <_> + + + + <_>4 11 12 6 -1. + <_>4 11 6 3 2. + <_>10 14 6 3 2. + 0 + 6.3897971995174885e-003 + 0.0529007390141487 + -0.1737896949052811 + <_> + + <_> + + + + <_>3 12 16 6 -1. + <_>11 12 8 3 2. + <_>3 15 8 3 2. + 0 + -0.0590520687401295 + -0.4795765876770020 + 8.3919316530227661e-003 + <_> + + <_> + + + + <_>1 12 16 6 -1. + <_>1 12 8 3 2. + <_>9 15 8 3 2. + 0 + -0.0537462085485458 + -0.5085443258285523 + 0.0168806705623865 + <_> + + <_> + + + + <_>4 0 15 6 -1. + <_>9 0 5 6 3. + 0 + -0.0918523669242859 + 0.1946624964475632 + -0.0111296297982335 + <_> + + <_> + + + + <_>1 0 15 6 -1. + <_>6 0 5 6 3. + 0 + 0.1503881961107254 + -0.0201123505830765 + 0.4473851025104523 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + -0.0213174298405647 + 0.2967613935470581 + -0.0282318405807018 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 0.0127114197239280 + 0.0335709415376186 + -0.2897258996963501 + <_> + + <_> + + + + <_>5 0 10 6 -1. + <_>5 3 10 3 2. + 0 + -0.0932879075407982 + 0.6438030004501343 + -0.0149238798767328 + <_> + + <_> + + + + <_>7 0 2 17 -1. + <_>8 0 1 17 2. + 0 + -4.5716729946434498e-003 + -0.2699424922466278 + 0.0332461111247540 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -3.4010890522040427e-004 + 0.0817155465483665 + -0.1064226031303406 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -2.6096890214830637e-003 + 0.1840341985225678 + -0.0647242367267609 + <_> + + <_> + + + + <_>1 9 19 3 -1. + <_>1 10 19 1 3. + 0 + 4.6332611236721277e-004 + -0.1428340971469879 + 0.0420332998037338 + <_> + + <_> + + + + <_>6 0 6 18 -1. + <_>8 0 2 18 3. + 0 + 0.1409530043601990 + 9.4516919925808907e-003 + -0.7772722840309143 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>9 0 1 13 2. + 0 + 2.0406199619174004e-003 + -0.0665054321289063 + 0.1180540993809700 + <_> + + <_> + + + + <_>0 10 20 6 -1. + <_>0 13 20 3 2. + 0 + -0.0223020091652870 + -0.1041987016797066 + 0.0893876776099205 + <_> + + <_> + + + + <_>10 11 5 9 -1. + <_>10 14 5 3 3. + 0 + 3.9168349467217922e-003 + 0.0257693808525801 + -0.1662549972534180 + <_> + + <_> + + + + <_>3 4 13 3 -1. + <_>3 5 13 1 3. + 0 + 6.1153857968747616e-003 + -0.0625316873192787 + 0.1407534927129746 + <_> + + <_> + + + + <_>13 11 7 4 -1. + <_>13 13 7 2 2. + 0 + -2.9564529540948570e-005 + 0.0469783097505569 + -0.1086298972368240 + <_> + + <_> + + + + <_>3 2 3 14 -1. + <_>4 2 1 14 3. + 0 + 1.4300559996627271e-004 + -0.1000514999032021 + 0.0803357288241386 + <_> + + <_> + + + + <_>12 3 2 17 -1. + <_>12 3 1 17 2. + 0 + 0.0114307897165418 + 0.0232013594359159 + -0.3136690855026245 + <_> + + <_> + + + + <_>0 9 6 9 -1. + <_>3 9 3 9 2. + 0 + -0.0137246102094650 + 0.1281441003084183 + -0.0612900294363499 + <_> + + <_> + + + + <_>11 3 6 10 -1. + <_>14 3 3 5 2. + <_>11 8 3 5 2. + 0 + -0.0455487705767155 + -0.4752830862998962 + 0.0136313401162624 + <_> + + <_> + + + + <_>2 0 3 13 -1. + <_>3 0 1 13 3. + 0 + 7.6914107194170356e-004 + -0.0894160270690918 + 0.0960914865136147 + <_> + + <_> + + + + <_>4 5 16 2 -1. + <_>4 5 8 2 2. + 0 + 0.0638409107923508 + 0.0160640608519316 + -0.3822189867496491 + <_> + + <_> + + + + <_>4 1 3 13 -1. + <_>5 1 1 13 3. + 0 + -7.2662779130041599e-003 + -0.2194049060344696 + 0.0381705090403557 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + -0.0128285996615887 + 0.1470542997121811 + -0.0558326691389084 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + -0.0914679691195488 + -0.7926533222198486 + 0.0104046398773789 + <_> + + <_> + + + + <_>12 3 2 17 -1. + <_>12 3 1 17 2. + 0 + -2.7164160273969173e-003 + -0.1772516965866089 + 0.0564558096230030 + <_> + + <_> + + + + <_>0 15 10 3 -1. + <_>5 15 5 3 2. + 0 + -0.1009757965803146 + -0.5937265753746033 + 0.0131622403860092 + <_> + + <_> + + + + <_>10 11 5 9 -1. + <_>10 14 5 3 3. + 0 + -0.0379835590720177 + -0.1507299989461899 + 0.0195573903620243 + <_> + + <_> + + + + <_>5 11 5 9 -1. + <_>5 14 5 3 3. + 0 + 5.3728191414847970e-004 + 0.0522570498287678 + -0.1799626052379608 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + 0.0124439103528857 + -0.0289530195295811 + 0.2544848918914795 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + -0.0181712806224823 + 0.3220398128032684 + -0.0313951000571251 + <_> + + <_> + + + + <_>3 11 16 9 -1. + <_>3 14 16 3 3. + 0 + -0.0306191593408585 + -0.1281727999448776 + 0.0604850202798843 + <_> + + <_> + + + + <_>5 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 2.8726200107485056e-003 + -0.1480740010738373 + 0.0537960007786751 + <_> + + <_> + + + + <_>10 0 10 16 -1. + <_>10 8 10 8 2. + 0 + -0.2877267897129059 + -0.8323444724082947 + 3.6127590574324131e-003 + <_> + + <_> + + + + <_>0 0 10 16 -1. + <_>0 8 10 8 2. + 0 + 0.4105707108974457 + 8.3212452009320259e-003 + -0.8247640728950501 + <_> + + <_> + + + + <_>9 5 3 13 -1. + <_>10 5 1 13 3. + 0 + 0.0163705106824636 + -0.0248491000384092 + 0.1630914062261581 + <_> + + <_> + + + + <_>6 0 6 10 -1. + <_>6 0 3 5 2. + <_>9 5 3 5 2. + 0 + 0.0536155700683594 + 0.0180340800434351 + -0.4612697064876556 + <_> + + <_> + + + + <_>11 10 3 10 -1. + <_>11 15 3 5 2. + 0 + -1.0296109830960631e-003 + 0.0388243496417999 + -0.0736259818077087 + <_> + + <_> + + + + <_>0 0 4 16 -1. + <_>0 0 2 8 2. + <_>2 8 2 8 2. + 0 + -6.3063339330255985e-003 + 0.1328887045383453 + -0.0558120608329773 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 6.8714357912540436e-003 + 0.0695624426007271 + -0.1138314008712769 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -8.3098851609975100e-004 + 0.1000270023941994 + -0.0857040286064148 + <_> + + <_> + + + + <_>6 6 12 6 -1. + <_>10 6 4 6 3. + 0 + 0.0132882101461291 + 0.0426062606275082 + -0.1172951012849808 + <_> + + <_> + + + + <_>0 4 4 16 -1. + <_>0 4 2 8 2. + <_>2 12 2 8 2. + 0 + 0.0170350391417742 + -0.0427578501403332 + 0.2240010946989059 + <_> + + <_> + + + + <_>0 1 20 3 -1. + <_>0 2 20 1 3. + 0 + 0.0321283005177975 + 0.0152969099581242 + -0.5331755876541138 + <_> + + <_> + + + + <_>5 0 7 6 -1. + <_>5 2 7 2 3. + 0 + 0.0114403301849961 + -0.0589556097984314 + 0.1284248977899551 + <_> + + <_> + + + + <_>11 3 6 10 -1. + <_>14 3 3 5 2. + <_>11 8 3 5 2. + 0 + 2.5446009822189808e-003 + 0.0460377708077431 + -0.1476019024848938 + <_> + + <_> + + + + <_>3 3 6 10 -1. + <_>3 3 3 5 2. + <_>6 8 3 5 2. + 0 + -0.0350623689591885 + -0.3472133874893189 + 0.0240204595029354 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + 4.6889069490134716e-003 + -0.0824602097272873 + 0.0762543827295303 + <_> + + <_> + + + + <_>6 10 3 10 -1. + <_>6 15 3 5 2. + 0 + -1.5067459571582731e-005 + 0.0582239888608456 + -0.1349619030952454 + <_> + + <_> + + + + <_>12 0 4 16 -1. + <_>14 0 2 8 2. + <_>12 8 2 8 2. + 0 + -6.5259548136964440e-004 + 0.0367804504930973 + -0.0708813965320587 + <_> + + <_> + + + + <_>4 0 4 16 -1. + <_>4 0 2 8 2. + <_>6 8 2 8 2. + 0 + 4.5456850784830749e-004 + 0.0598955415189266 + -0.1455395966768265 + <_> + + <_> + + + + <_>5 13 15 7 -1. + <_>10 13 5 7 3. + 0 + -0.1057047024369240 + 0.1376616060733795 + -0.0223370995372534 + <_> + + <_> + + + + <_>0 7 20 2 -1. + <_>0 8 20 1 2. + 0 + -4.6019242145121098e-003 + -0.3381172120571137 + 0.0225785095244646 + <_> + + <_> + + + + <_>2 13 18 5 -1. + <_>8 13 6 5 3. + 0 + 5.5374279618263245e-003 + -0.0412508696317673 + 0.0947506800293922 + <_> + + <_> + + + + <_>8 6 3 13 -1. + <_>9 6 1 13 3. + 0 + -2.7569069061428308e-003 + 0.1738086044788361 + -0.0454176403582096 + <_> + + <_> + + + + <_>12 7 6 12 -1. + <_>15 7 3 6 2. + <_>12 13 3 6 2. + 0 + 4.1876680916175246e-004 + -0.0552332587540150 + 0.0583426281809807 + <_> + + <_> + + + + <_>2 7 6 12 -1. + <_>2 7 3 6 2. + <_>5 13 3 6 2. + 0 + -2.4587850202806294e-004 + -0.0893730297684669 + 0.0811587497591972 + <_> + + <_> + + + + <_>9 8 10 6 -1. + <_>14 8 5 3 2. + <_>9 11 5 3 2. + 0 + -0.0749914124608040 + -0.5905706286430359 + 6.7846179008483887e-003 + <_> + + <_> + + + + <_>1 8 10 6 -1. + <_>1 8 5 3 2. + <_>6 11 5 3 2. + 0 + 1.7898950027301908e-003 + 0.0522622205317020 + -0.1588426977396011 + <_> + + <_> + + + + <_>4 13 13 3 -1. + <_>4 14 13 1 3. + 0 + -3.2704160548746586e-003 + 0.1121689975261688 + -0.0624884217977524 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -0.0178036503493786 + -0.4573907852172852 + 0.0166502892971039 + <_> + + <_> + + + + <_>0 8 20 10 -1. + <_>0 13 20 5 2. + 0 + -0.3353793025016785 + -0.8256465196609497 + 7.1495971642434597e-003 + <_> + + <_> + + + + <_>0 13 15 7 -1. + <_>5 13 5 7 3. + 0 + 0.1145182996988297 + -0.0189377199858427 + 0.4107643961906433 + <_> + + <_> + + + + <_>7 11 6 9 -1. + <_>9 11 2 9 3. + 0 + 0.0651410520076752 + 0.0111964000388980 + -0.7622531056404114 + <_> + + <_> + + + + <_>1 11 9 8 -1. + <_>4 11 3 8 3. + 0 + -0.0184424892067909 + 0.1400644034147263 + -0.0515683181583881 + <_> + + <_> + + + + <_>2 13 17 6 -1. + <_>2 15 17 2 3. + 0 + 0.0203626807779074 + 0.0276356805115938 + -0.2262261062860489 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + -5.4255980066955090e-003 + -0.1468822062015533 + 0.0512940697371960 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0146084800362587 + 0.2801474928855896 + -0.0326688997447491 + <_> + + <_> + + + + <_>5 6 4 8 -1. + <_>5 10 4 4 2. + 0 + 1.2462410377338529e-003 + -0.2088883966207504 + 0.0332129597663879 + <_> + + <_> + + + + <_>13 8 4 12 -1. + <_>13 12 4 4 3. + 0 + -0.0514872595667839 + 0.1987269967794418 + -0.0103762596845627 + <_> + + <_> + + + + <_>4 5 12 12 -1. + <_>4 5 6 6 2. + <_>10 11 6 6 2. + 0 + -0.0141380596905947 + -0.1619375050067902 + 0.0466047897934914 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>10 5 5 3 2. + <_>5 8 5 3 2. + 0 + -8.3356946706771851e-003 + 0.1642955988645554 + -0.0426956303417683 + <_> + + <_> + + + + <_>3 5 14 8 -1. + <_>3 5 7 4 2. + <_>10 9 7 4 2. + 0 + 9.5129031687974930e-003 + 0.0449995696544647 + -0.1597118973731995 + <_> + + <_> + + + + <_>5 6 10 9 -1. + <_>5 9 10 3 3. + 0 + -7.0411129854619503e-003 + 0.7063800096511841 + -9.1527765616774559e-003 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + -4.0637628990225494e-004 + 0.0707477927207947 + -0.1019425019621849 + <_> + + <_> + + + + <_>12 9 8 4 -1. + <_>12 11 8 2 2. + 0 + 4.2529408819973469e-003 + 0.0319374799728394 + -0.1035721972584724 + <_> + + <_> + + + + <_>0 9 8 4 -1. + <_>0 11 8 2 2. + 0 + -1.9221140246372670e-004 + 0.1024146005511284 + -0.0899963676929474 + <_> + + <_> + + + + <_>8 8 8 4 -1. + <_>8 10 8 2 2. + 0 + -1.3621139805763960e-003 + -0.1815731972455978 + 0.0239335205405951 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + -9.3250330537557602e-003 + 0.1588335931301117 + -0.0453171394765377 + <_> + + <_> + + + + <_>8 2 12 17 -1. + <_>12 2 4 17 3. + 0 + -0.3464108109474182 + -0.3590112924575806 + 9.8646534606814384e-003 + <_> + + <_> + + + + <_>0 2 12 17 -1. + <_>4 2 4 17 3. + 0 + 0.0170269608497620 + -0.0597310513257980 + 0.1257600039243698 + <_> + + <_> + + + + <_>11 9 6 8 -1. + <_>11 9 3 8 2. + 0 + -3.9226989611051977e-004 + 0.0648289769887924 + -0.0920517668128014 + <_> + + <_> + + + + <_>4 0 3 20 -1. + <_>5 0 1 20 3. + 0 + 7.0719248615205288e-003 + 0.0371445007622242 + -0.1916742026805878 + <_> + + <_> + + + + <_>5 14 14 6 -1. + <_>12 14 7 3 2. + <_>5 17 7 3 2. + 0 + 2.9001249931752682e-003 + -0.0626332089304924 + 0.0532489307224751 + <_> + + <_> + + + + <_>0 14 14 6 -1. + <_>0 14 7 3 2. + <_>7 17 7 3 2. + 0 + -0.0241646692156792 + 0.3079889118671417 + -0.0265059005469084 + <_> + + <_> + + + + <_>9 12 10 6 -1. + <_>9 14 10 2 3. + 0 + -0.0755094066262245 + -0.6182727813720703 + 7.8803002834320068e-003 + <_> + + <_> + + + + <_>1 14 5 6 -1. + <_>1 17 5 3 2. + 0 + -2.6605799212120473e-004 + 0.0696196705102921 + -0.0992688685655594 + <_> + + <_> + + + + <_>11 0 3 13 -1. + <_>12 0 1 13 3. + 0 + 2.3389840498566628e-003 + 0.0422696918249130 + -0.1629084944725037 + <_> + + <_> + + + + <_>6 0 3 13 -1. + <_>7 0 1 13 3. + 0 + -1.2518429430201650e-003 + 0.0908148288726807 + -0.0796180069446564 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + -1.9330839859321713e-003 + 0.0769560933113098 + -0.0652342513203621 + <_> + + <_> + + + + <_>1 4 18 9 -1. + <_>7 4 6 9 3. + 0 + 0.0238634403795004 + -0.0779856517910957 + 0.0979265719652176 + <_> + + <_> + + + + <_>11 9 6 8 -1. + <_>11 9 3 8 2. + 0 + -0.0519950799643993 + -0.2067606002092362 + 0.0122645301744342 + <_> + + <_> + + + + <_>3 9 6 8 -1. + <_>6 9 3 8 2. + 0 + -9.4953901134431362e-004 + 0.0720909312367439 + -0.1245244964957237 + <_> + + <_> + + + + <_>9 7 6 12 -1. + <_>9 7 3 12 2. + 0 + -9.0458765625953674e-003 + -0.1075676977634430 + 0.0260179992765188 + <_> + + <_> + + + + <_>3 3 14 12 -1. + <_>10 3 7 12 2. + 0 + 0.0320191010832787 + -0.0446895211935043 + 0.1671230047941208 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + -7.1996808983385563e-003 + -0.1206556037068367 + 0.0533295497298241 + <_> + + <_> + + + + <_>1 0 8 20 -1. + <_>1 0 4 10 2. + <_>5 10 4 10 2. + 0 + 0.0972478836774826 + -0.0200592800974846 + 0.4132153093814850 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + 1.7411670414730906e-003 + 0.0252652000635862 + -0.1140037998557091 + <_> + + <_> + + + + <_>0 2 10 5 -1. + <_>5 2 5 5 2. + 0 + -0.1569415032863617 + -0.9612188935279846 + 7.4661090038716793e-003 + <_> + + <_> + + + + <_>12 12 8 8 -1. + <_>12 12 4 8 2. + 0 + -0.0205738209187984 + 0.1320753991603851 + -0.0536888092756271 + <_> + + <_> + + + + <_>0 11 8 4 -1. + <_>0 13 8 2 2. + 0 + 2.0626350305974483e-003 + 0.0378691405057907 + -0.2033375054597855 + <_> + + <_> + + + + <_>15 9 5 10 -1. + <_>15 14 5 5 2. + 0 + 0.1238159984350205 + 2.3662589956074953e-003 + -0.4879466891288757 + <_> + + <_> + + + + <_>0 9 5 10 -1. + <_>0 14 5 5 2. + 0 + 3.1255739741027355e-003 + -0.0644760206341743 + 0.1505323946475983 + <_> + + <_> + + + + <_>9 12 10 6 -1. + <_>9 14 10 2 3. + 0 + 0.0187663603574038 + 0.0126392301172018 + -0.1912184953689575 + <_> + + <_> + + + + <_>1 12 10 6 -1. + <_>1 14 10 2 3. + 0 + -8.6109619587659836e-003 + -0.1191655993461609 + 0.0665471702814102 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + 0.0146041102707386 + -0.0219809394329786 + 0.2683242857456207 + <_> + + <_> + + + + <_>6 8 8 9 -1. + <_>6 11 8 3 3. + 0 + 1.8387939780950546e-003 + -0.1150683015584946 + 0.0608405098319054 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + -0.5793070793151856 + -1. + 3.7629920989274979e-003 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.1869073957204819 + 6.2871198169887066e-003 + -0.9242666959762573 + <_> + + <_> + + + + <_>7 8 9 12 -1. + <_>7 12 9 4 3. + 0 + 0.0183417499065399 + 0.0175167694687843 + -0.1651940047740936 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + -0.0147765101864934 + 0.2506814002990723 + -0.0261996407061815 + <_> + + <_> + + + + <_>3 8 14 2 -1. + <_>3 9 14 1 2. + 0 + 0.0440323017537594 + 0.0114792799577117 + -0.6466317176818848 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + 3.5362939815968275e-003 + 0.0486700795590878 + -0.1317166984081268 + <_> + + <_> + + + + <_>5 16 10 4 -1. + <_>5 18 10 2 2. + 0 + -4.5765978284180164e-003 + 0.1240120977163315 + -0.0538821704685688 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>0 10 3 5 2. + <_>3 15 3 5 2. + 0 + 3.0529699288308620e-003 + -0.0525388605892658 + 0.1286004930734634 + <_> + + <_> + + + + <_>12 3 2 17 -1. + <_>12 3 1 17 2. + 0 + -0.0113339396193624 + -0.1673226952552795 + 0.0128906397148967 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 2.7712888550013304e-004 + 0.0657760277390480 + -0.0945739001035690 + <_> + + <_> + + + + <_>7 12 13 2 -1. + <_>7 13 13 1 2. + 0 + 5.4571928922086954e-004 + -0.0597666017711163 + 0.1326590031385422 + <_> + + <_> + + + + <_>3 9 10 6 -1. + <_>3 9 5 3 2. + <_>8 12 5 3 2. + 0 + 6.2958751805126667e-003 + 0.0288547500967979 + -0.2432890981435776 + <_> + + <_> + + + + <_>9 9 6 10 -1. + <_>12 9 3 5 2. + <_>9 14 3 5 2. + 0 + 1.5611880226060748e-003 + -0.0563465394079685 + 0.0806206315755844 + <_> + + <_> + + + + <_>2 6 16 12 -1. + <_>2 6 8 6 2. + <_>10 12 8 6 2. + 0 + 0.1050127968192101 + -0.0140520995482802 + 0.5592792034149170 + <_> + + <_> + + + + <_>13 2 7 6 -1. + <_>13 4 7 2 3. + 0 + 0.0369073003530502 + 0.0154430102556944 + -0.2088145017623901 + <_> + + <_> + + + + <_>3 4 14 4 -1. + <_>3 6 14 2 2. + 0 + -0.0405692495405674 + 0.1585178971290588 + -0.0431761816143990 + <_> + + <_> + + + + <_>7 1 13 2 -1. + <_>7 2 13 1 2. + 0 + -7.2549749165773392e-003 + -0.2610417008399963 + 0.0172429103404284 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 4.5905262231826782e-003 + -0.0384190008044243 + 0.1746480017900467 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -4.2836060747504234e-003 + -0.1200624033808708 + 0.0419176109135151 + <_> + + <_> + + + + <_>2 1 15 6 -1. + <_>7 1 5 6 3. + 0 + -0.1083578020334244 + 0.5492755174636841 + -0.0122555699199438 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 6.4851208589971066e-003 + 0.0449524112045765 + -0.1658394038677216 + <_> + + <_> + + + + <_>0 10 14 3 -1. + <_>0 11 14 1 3. + 0 + -0.0237251296639442 + 0.5715867280960083 + -0.0123615004122257 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0300705190747976 + -0.3060995936393738 + 0.0116954296827316 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -7.9774633049964905e-003 + -0.1818598061800003 + 0.0369257703423500 + <_> + + <_> + + + + <_>9 6 4 14 -1. + <_>11 6 2 7 2. + <_>9 13 2 7 2. + 0 + -0.0172131992876530 + 0.1231793016195297 + -0.0366326794028282 + <_> + + <_> + + + + <_>0 8 19 2 -1. + <_>0 9 19 1 2. + 0 + -1.4119789702817798e-003 + -0.5049908757209778 + 0.0136952102184296 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + 0.0299090202897787 + -0.0235354397445917 + 0.1431297957897186 + <_> + + <_> + + + + <_>6 11 6 8 -1. + <_>8 11 2 8 3. + 0 + -0.0116604799404740 + -0.1782228052616119 + 0.0402505993843079 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + -8.9040184393525124e-003 + 0.3556716144084930 + -0.0247831400483847 + <_> + + <_> + + + + <_>7 5 4 11 -1. + <_>9 5 2 11 2. + 0 + -1.1394720058888197e-003 + -0.1426859945058823 + 0.0491028018295765 + <_> + + <_> + + + + <_>9 3 2 13 -1. + <_>9 3 1 13 2. + 0 + 2.9107509180903435e-003 + -0.0544718094170094 + 0.1302589029073715 + <_> + + <_> + + + + <_>0 3 12 6 -1. + <_>0 3 6 3 2. + <_>6 6 6 3 2. + 0 + 0.0176408104598522 + 0.0201840195804834 + -0.4195458889007568 + <_> + + <_> + + + + <_>3 6 14 2 -1. + <_>3 6 7 2 2. + 0 + 0.0500019006431103 + 0.0119759403169155 + -0.5188987851142883 + <_> + + <_> + + + + <_>4 11 6 7 -1. + <_>6 11 2 7 3. + 0 + 2.7523660100996494e-003 + -0.0606284104287624 + 0.1116911992430687 + <_> + + <_> + + + + <_>15 10 5 6 -1. + <_>15 13 5 3 2. + 0 + -0.0317533388733864 + -0.2261199057102203 + 0.0152673898264766 + <_> + + <_> + + + + <_>4 1 12 6 -1. + <_>8 1 4 6 3. + 0 + -0.0128238098695874 + 0.2302713990211487 + -0.0294048003852367 + <_> + + <_> + + + + <_>10 0 4 8 -1. + <_>10 0 2 8 2. + 0 + 5.2626157412305474e-004 + -0.1567780971527100 + 0.0499384813010693 + <_> + + <_> + + + + <_>3 1 12 5 -1. + <_>9 1 6 5 2. + 0 + 0.0127791501581669 + -0.0588518492877483 + 0.1225529983639717 + <_> + + <_> + + + + <_>13 2 7 6 -1. + <_>13 4 7 2 3. + 0 + 0.0776676684617996 + 4.6644411049783230e-003 + -0.5061432123184204 + <_> + + <_> + + + + <_>0 2 7 6 -1. + <_>0 4 7 2 3. + 0 + -5.2286800928413868e-003 + -0.1893980950117111 + 0.0447144284844399 + <_> + + <_> + + + + <_>14 1 6 9 -1. + <_>14 4 6 3 3. + 0 + 8.4478305652737617e-003 + 0.0391088984906673 + -0.1480915993452072 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>0 4 6 3 3. + 0 + 5.5970861576497555e-003 + 0.0546644702553749 + -0.1469808965921402 + <_> + + <_> + + + + <_>6 0 9 8 -1. + <_>6 4 9 4 2. + 0 + 0.0168829895555973 + -0.0464497394859791 + 0.1412197053432465 + <_> + + <_> + + + + <_>0 5 8 8 -1. + <_>0 5 4 4 2. + <_>4 9 4 4 2. + 0 + -6.1205658130347729e-004 + -0.1390601992607117 + 0.0525868684053421 + <_> + + <_> + + + + <_>11 1 4 12 -1. + <_>11 7 4 6 2. + 0 + -3.6216019652783871e-003 + 0.0533458814024925 + -0.0383616797626019 + <_> + + <_> + + + + <_>4 5 5 6 -1. + <_>4 8 5 3 2. + 0 + -1.4149090275168419e-003 + 0.2008254975080490 + -0.0359853617846966 + <_> + + <_> + + + + <_>7 5 11 8 -1. + <_>7 9 11 4 2. + 0 + 2.4758750805631280e-004 + -0.1820577979087830 + 0.0159153398126364 + <_> + + <_> + + + + <_>4 2 12 5 -1. + <_>8 2 4 5 3. + 0 + 0.1345784068107605 + 9.7890906035900116e-003 + -0.7287970781326294 + <_> + + <_> + + + + <_>10 12 10 8 -1. + <_>10 12 5 8 2. + 0 + 0.0113520100712776 + -0.0355531498789787 + 0.0632222071290016 + <_> + + <_> + + + + <_>0 12 10 8 -1. + <_>5 12 5 8 2. + 0 + -7.9044885933399200e-003 + 0.0907740890979767 + -0.0987964421510696 + <_> + + <_> + + + + <_>15 0 4 7 -1. + <_>15 0 2 7 2. + 0 + 0.0790501683950424 + 4.7087217681109905e-003 + -0.6052936911582947 + <_> + + <_> + + + + <_>1 0 4 7 -1. + <_>3 0 2 7 2. + 0 + 8.9114397997036576e-004 + -0.0902161076664925 + 0.0842938423156738 + <_> + + <_> + + + + <_>0 2 20 4 -1. + <_>10 2 10 2 2. + <_>0 4 10 2 2. + 0 + 4.1404040530323982e-003 + 0.0603141710162163 + -0.1217193976044655 + <_> + + <_> + + + + <_>1 0 12 9 -1. + <_>1 3 12 3 3. + 0 + -0.0926830917596817 + 0.6785330176353455 + -0.0106151700019836 + <_> + + <_> + + + + <_>10 14 9 4 -1. + <_>10 16 9 2 2. + 0 + 0.0428723804652691 + 7.3283850215375423e-003 + -0.5232148766517639 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0306525602936745 + -0.6557834148406982 + 9.7402445971965790e-003 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + 0.0750543996691704 + -0.0116605199873447 + 0.3755913972854614 + <_> + + <_> + + + + <_>1 6 11 4 -1. + <_>1 8 11 2 2. + 0 + 0.0930331125855446 + 7.4912221170961857e-003 + -0.8174855113029480 + <_> + + <_> + + + + <_>4 8 12 4 -1. + <_>4 10 12 2 2. + 0 + -4.0522208437323570e-003 + 0.3643113076686859 + -0.0180158894509077 + <_> + + <_> + + + + <_>4 4 3 10 -1. + <_>4 9 3 5 2. + 0 + 1.0411429684609175e-003 + -0.1962372958660126 + 0.0343369692564011 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + 0.0407908000051975 + 0.0174648594111204 + -0.3849726915359497 + <_> + + <_> + + + + <_>3 9 3 10 -1. + <_>3 14 3 5 2. + 0 + -1.8009789346251637e-004 + 0.0521576218307018 + -0.1203818991780281 + <_> + + <_> + + + + <_>18 3 2 17 -1. + <_>18 3 1 17 2. + 0 + -0.0354963801801205 + 0.2137162983417511 + -9.4601595774292946e-003 + <_> + + <_> + + + + <_>0 3 13 2 -1. + <_>0 4 13 1 2. + 0 + -1.2321450049057603e-003 + -0.1299993991851807 + 0.0487525314092636 + <_> + + <_> + + + + <_>18 3 2 17 -1. + <_>18 3 1 17 2. + 0 + -0.0663264468312263 + -0.5079520940780640 + 5.8305650018155575e-003 + <_> + + <_> + + + + <_>0 3 2 17 -1. + <_>1 3 1 17 2. + 0 + -2.7689670678228140e-003 + 0.1259692013263702 + -0.0557947792112827 + <_> + + <_> + + + + <_>2 0 18 6 -1. + <_>2 2 18 2 3. + 0 + 3.9610429666936398e-003 + -0.0844717398285866 + 0.0620925500988960 + <_> + + <_> + + + + <_>6 5 4 13 -1. + <_>8 5 2 13 2. + 0 + -7.5474479235708714e-003 + -0.2099227011203766 + 0.0314199104905128 + <_> + + <_> + + + + <_>7 3 12 16 -1. + <_>7 3 6 16 2. + 0 + -3.2456999178975821e-003 + 0.0562236011028290 + -0.0367749892175198 + <_> + + <_> + + + + <_>0 12 16 2 -1. + <_>8 12 8 2 2. + 0 + -5.0519341602921486e-003 + 0.0941366702318192 + -0.0808937773108482 + <_> + + <_> + + + + <_>11 6 8 12 -1. + <_>11 10 8 4 3. + 0 + 0.0213759597390890 + 0.0495295897126198 + -0.0479891486465931 + <_> + + <_> + + + + <_>0 12 6 7 -1. + <_>3 12 3 7 2. + 0 + -0.1672461926937103 + -0.9355136752128601 + 7.4155409820377827e-003 + <_> + + <_> + + + + <_>12 0 8 12 -1. + <_>16 0 4 6 2. + <_>12 6 4 6 2. + 0 + 6.4946119673550129e-003 + -0.0367358215153217 + 0.1095504015684128 + <_> + + <_> + + + + <_>5 6 10 10 -1. + <_>5 6 5 5 2. + <_>10 11 5 5 2. + 0 + -5.5810972116887569e-003 + -0.1276447027921677 + 0.0586917996406555 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -7.0414197398349643e-004 + 0.0393615588545799 + -0.0748447328805923 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -7.3160971514880657e-003 + 0.2176717966794968 + -0.0387031994760036 + <_> + + <_> + + + + <_>10 2 2 18 -1. + <_>10 11 2 9 2. + 0 + -5.4676099680364132e-003 + -0.0539733506739140 + 0.0550328008830547 + <_> + + <_> + + + + <_>4 9 12 8 -1. + <_>4 9 6 4 2. + <_>10 13 6 4 2. + 0 + 4.3309312313795090e-003 + 0.0571047104895115 + -0.1260392963886261 + <_> + + <_> + + + + <_>18 0 2 13 -1. + <_>18 0 1 13 2. + 0 + 2.8189779259264469e-003 + -0.0397292487323284 + 0.0927015915513039 + <_> + + <_> + + + + <_>2 8 12 4 -1. + <_>6 8 4 4 3. + 0 + -4.7759278677403927e-003 + -0.1285641044378281 + 0.0612166896462440 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + 0.0634246319532394 + -4.8541268333792686e-003 + 0.5988345146179199 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -3.5035109613090754e-003 + 0.1019155010581017 + -0.0988012775778770 + <_> + + <_> + + + + <_>18 0 2 13 -1. + <_>18 0 1 13 2. + 0 + -4.1303951293230057e-003 + 0.1089038029313088 + -0.0382259190082550 + <_> + + <_> + + + + <_>6 3 2 17 -1. + <_>7 3 1 17 2. + 0 + -2.2271529305726290e-003 + -0.1350196003913879 + 0.0513166114687920 + <_> + + <_> + + + + <_>11 9 4 8 -1. + <_>11 9 2 8 2. + 0 + -1.0730850044637918e-003 + 0.0515267215669155 + -0.0741710364818573 + <_> + + <_> + + + + <_>5 9 4 8 -1. + <_>7 9 2 8 2. + 0 + -7.7973678708076477e-004 + 0.0708575770258904 + -0.1120484992861748 + <_> + + <_> + + + + <_>18 0 2 13 -1. + <_>18 0 1 13 2. + 0 + -0.0557013489305973 + 0.3983623087406158 + -5.2183559164404869e-003 + <_> + + <_> + + + + <_>0 0 2 13 -1. + <_>1 0 1 13 2. + 0 + 0.0106082297861576 + -0.0323237888514996 + 0.2195097059011459 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>0 6 10 2 2. + 0 + -9.8208207637071609e-003 + -0.1650767028331757 + 0.0424444116652012 + <_> + + <_> + + + + <_>0 4 13 3 -1. + <_>0 5 13 1 3. + 0 + 1.4465330168604851e-003 + -0.0783926695585251 + 0.0813937336206436 + <_> + + <_> + + + + <_>11 10 8 6 -1. + <_>11 12 8 2 3. + 0 + -4.4582188129425049e-003 + -0.0923145785927773 + 0.0387341715395451 + <_> + + <_> + + + + <_>2 10 8 6 -1. + <_>2 12 8 2 3. + 0 + 5.6474958546459675e-003 + 0.0396512895822525 + -0.1749563962221146 + <_> + + <_> + + + + <_>5 4 14 8 -1. + <_>12 4 7 4 2. + <_>5 8 7 4 2. + 0 + 0.0420979186892509 + -0.0118507398292422 + 0.1276271045207977 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>4 5 6 3 2. + <_>10 8 6 3 2. + 0 + 6.9958101958036423e-003 + -0.0476687401533127 + 0.1420485973358154 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>10 10 4 4 2. + <_>6 14 4 4 2. + 0 + 0.0386867783963680 + 0.0135827800258994 + -0.4731589853763580 + -1.3290590047836304 + 30 + -1 + <_> + + + <_> + + <_> + + + + <_>5 5 9 5 -1. + <_>8 5 3 5 3. + 0 + 0.0350093208253384 + -0.2702023088932037 + 0.2042925059795380 + <_> + + <_> + + + + <_>6 4 8 6 -1. + <_>6 6 8 2 3. + 0 + -0.0367805399000645 + 0.1525488942861557 + -0.2674187123775482 + <_> + + <_> + + + + <_>4 9 12 5 -1. + <_>8 9 4 5 3. + 0 + 5.6993318721652031e-003 + 0.1680305004119873 + -0.2306824028491974 + <_> + + <_> + + + + <_>10 1 10 4 -1. + <_>10 1 5 4 2. + 0 + 0.0756016373634338 + -0.1527170985937119 + 0.1951083987951279 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0172483902424574 + 0.2937920093536377 + -0.0988695323467255 + <_> + + <_> + + + + <_>15 0 3 18 -1. + <_>15 6 3 6 3. + 0 + 2.8574180323630571e-003 + -0.1979047060012817 + 0.0833617374300957 + <_> + + <_> + + + + <_>1 2 9 15 -1. + <_>4 2 3 15 3. + 0 + 0.0310292690992355 + -0.2158230990171433 + 0.1169513016939163 + <_> + + <_> + + + + <_>7 6 8 4 -1. + <_>7 6 4 4 2. + 0 + -7.1099428460001945e-003 + -0.2520681917667389 + 0.0361165106296539 + <_> + + <_> + + + + <_>5 5 8 5 -1. + <_>9 5 4 5 2. + 0 + 4.5894421637058258e-003 + -0.2970761954784393 + 0.1074396967887878 + <_> + + <_> + + + + <_>4 2 15 2 -1. + <_>4 3 15 1 2. + 0 + -7.0509258657693863e-003 + -0.4563502967357636 + 0.0418647788465023 + <_> + + <_> + + + + <_>1 17 13 3 -1. + <_>1 18 13 1 3. + 0 + 6.6762260394170880e-004 + -0.1743271946907044 + 0.1230648979544640 + <_> + + <_> + + + + <_>6 6 8 8 -1. + <_>6 10 8 4 2. + 0 + -3.6481819115579128e-003 + -0.4034762978553772 + 0.0491147711873055 + <_> + + <_> + + + + <_>4 9 5 9 -1. + <_>4 12 5 3 3. + 0 + 0.0221942402422428 + 0.0612415298819542 + -0.3455736041069031 + <_> + + <_> + + + + <_>13 9 4 10 -1. + <_>13 14 4 5 2. + 0 + -1.1259679449722171e-003 + 0.0520137697458267 + -0.2846164107322693 + <_> + + <_> + + + + <_>2 9 12 10 -1. + <_>2 9 6 5 2. + <_>8 14 6 5 2. + 0 + -0.0159137398004532 + -0.2766785025596619 + 0.0758520215749741 + <_> + + <_> + + + + <_>3 7 15 3 -1. + <_>8 7 5 3 3. + 0 + 5.7643437758088112e-003 + -0.2718209028244019 + 0.0667906627058983 + <_> + + <_> + + + + <_>1 0 8 12 -1. + <_>1 0 4 6 2. + <_>5 6 4 6 2. + 0 + -0.0421964712440968 + 0.1578608006238937 + -0.1055767983198166 + <_> + + <_> + + + + <_>13 13 7 6 -1. + <_>13 15 7 2 3. + 0 + -0.0186246801167727 + -0.2550429999828339 + 0.0475868694484234 + <_> + + <_> + + + + <_>5 9 5 10 -1. + <_>5 14 5 5 2. + 0 + -9.5020089065656066e-004 + 0.0499038398265839 + -0.2906855046749115 + <_> + + <_> + + + + <_>13 13 7 6 -1. + <_>13 15 7 2 3. + 0 + 0.0208232402801514 + 0.0268251392990351 + -0.2055850028991699 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + -0.0131184598430991 + -0.2239520996809006 + 0.0690134987235069 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + -8.6902417242527008e-003 + 0.1949318945407867 + -0.0378506891429424 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0455898195505142 + 0.0251703895628452 + -0.5776666998863220 + <_> + + <_> + + + + <_>0 5 20 6 -1. + <_>0 7 20 2 3. + 0 + -0.0484584905207157 + 0.0951915532350540 + -0.1432019025087357 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0727611035108566 + -0.6596741080284119 + 0.0211752392351627 + <_> + + <_> + + + + <_>9 3 10 12 -1. + <_>9 9 10 6 2. + 0 + -0.0538403689861298 + -0.3642677962779999 + 0.0248279292136431 + <_> + + <_> + + + + <_>1 0 7 6 -1. + <_>1 2 7 2 3. + 0 + 2.3190240608528256e-004 + -0.1476769000291824 + 0.0837640389800072 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -3.4166979603469372e-003 + -0.1786570996046066 + 0.0607210882008076 + <_> + + <_> + + + + <_>5 9 10 8 -1. + <_>5 9 5 4 2. + <_>10 13 5 4 2. + 0 + 0.0497442185878754 + 0.0189181994646788 + -0.6662986874580383 + <_> + + <_> + + + + <_>11 5 5 9 -1. + <_>11 8 5 3 3. + 0 + 0.0668134391307831 + -0.0282865595072508 + 0.1740152984857559 + <_> + + <_> + + + + <_>6 3 8 8 -1. + <_>6 3 4 4 2. + <_>10 7 4 4 2. + 0 + 0.0314455591142178 + 0.0525560602545738 + -0.3088454902172089 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0395936183631420 + -0.0648752525448799 + 0.2570675909519196 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + 0.0186633802950382 + -0.0595684312283993 + 0.2153259962797165 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + 0.0401505716145039 + 0.0195891298353672 + -0.3539215028285980 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -0.0182636901736259 + -0.3122403919696808 + 0.0418453812599182 + <_> + + <_> + + + + <_>11 15 9 4 -1. + <_>11 17 9 2 2. + 0 + -0.0225799605250359 + -0.1489870995283127 + 0.0177571401000023 + <_> + + <_> + + + + <_>4 9 12 5 -1. + <_>8 9 4 5 3. + 0 + 0.0852817595005035 + 0.0248667597770691 + -0.5219795107841492 + <_> + + <_> + + + + <_>12 15 8 4 -1. + <_>12 17 8 2 2. + 0 + 4.9491669051349163e-003 + 0.0404333397746086 + -0.1123061031103134 + <_> + + <_> + + + + <_>0 15 8 4 -1. + <_>0 17 8 2 2. + 0 + -0.0274195205420256 + -0.4111996889114380 + 0.0305490791797638 + <_> + + <_> + + + + <_>0 11 20 3 -1. + <_>0 12 20 1 3. + 0 + 0.0382776409387589 + 0.0122112501412630 + -0.8186082839965820 + <_> + + <_> + + + + <_>0 0 3 16 -1. + <_>1 0 1 16 3. + 0 + -0.0216322802007198 + 0.2203048020601273 + -0.0554591305553913 + <_> + + <_> + + + + <_>3 2 14 11 -1. + <_>3 2 7 11 2. + 0 + -0.2452269941568375 + 0.4101333022117615 + -0.0270001497119665 + <_> + + <_> + + + + <_>4 2 8 6 -1. + <_>4 5 8 3 2. + 0 + 0.0393146313726902 + -0.0312425605952740 + 0.3671418130397797 + <_> + + <_> + + + + <_>3 0 15 6 -1. + <_>3 2 15 2 3. + 0 + 0.0136303603649139 + -0.1390230059623718 + 0.0959462374448776 + <_> + + <_> + + + + <_>1 6 13 3 -1. + <_>1 7 13 1 3. + 0 + -6.7042862065136433e-003 + 0.0787720009684563 + -0.1452272981405258 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + 0.0233128108084202 + 0.0228157900273800 + -0.4499056041240692 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + 0.0306210294365883 + -0.0697812736034393 + 0.1542250961065292 + <_> + + <_> + + + + <_>15 14 5 6 -1. + <_>15 17 5 3 2. + 0 + 0.0520471893250942 + -0.0177202001214027 + 0.4439741075038910 + <_> + + <_> + + + + <_>3 4 13 3 -1. + <_>3 5 13 1 3. + 0 + 0.0208505392074585 + -0.0523090511560440 + 0.2060880064964294 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 8.2694664597511292e-003 + 0.0771328210830688 + -0.1947413980960846 + <_> + + <_> + + + + <_>0 8 7 6 -1. + <_>0 10 7 2 3. + 0 + 0.0557062886655331 + 0.0337151512503624 + -0.3578340113162994 + <_> + + <_> + + + + <_>2 6 16 6 -1. + <_>10 6 8 3 2. + <_>2 9 8 3 2. + 0 + -0.0254069194197655 + -0.2142499983310700 + 0.0538135990500450 + <_> + + <_> + + + + <_>2 7 3 10 -1. + <_>2 12 3 5 2. + 0 + 3.7127479445189238e-003 + 0.0574782900512218 + -0.1773401051759720 + <_> + + <_> + + + + <_>15 14 5 6 -1. + <_>15 17 5 3 2. + 0 + 0.0983990877866745 + -3.5304271150380373e-003 + 0.7708644866943359 + <_> + + <_> + + + + <_>5 7 10 6 -1. + <_>5 7 5 3 2. + <_>10 10 5 3 2. + 0 + -7.0944158360362053e-003 + -0.1378269046545029 + 0.0702905729413033 + <_> + + <_> + + + + <_>15 14 5 6 -1. + <_>15 17 5 3 2. + 0 + -0.0782130733132362 + 0.4684407114982605 + -4.8642340116202831e-003 + <_> + + <_> + + + + <_>0 14 5 6 -1. + <_>0 17 5 3 2. + 0 + 0.0304070208221674 + -0.0284894797950983 + 0.3415730893611908 + <_> + + <_> + + + + <_>10 5 9 15 -1. + <_>10 10 9 5 3. + 0 + 1.7667879583314061e-003 + -0.1461423039436340 + 0.0235729701817036 + <_> + + <_> + + + + <_>5 7 9 5 -1. + <_>8 7 3 5 3. + 0 + 0.0719910115003586 + -0.0350751802325249 + 0.2886571884155273 + <_> + + <_> + + + + <_>13 1 7 6 -1. + <_>13 3 7 2 3. + 0 + 0.0500208698213100 + 0.0240963604301214 + -0.3389055132865906 + <_> + + <_> + + + + <_>3 4 13 3 -1. + <_>3 5 13 1 3. + 0 + -0.0179982706904411 + 0.2919169068336487 + -0.0412591695785522 + <_> + + <_> + + + + <_>13 1 7 6 -1. + <_>13 3 7 2 3. + 0 + -8.6585222743451595e-004 + -0.1224825978279114 + 0.0596901215612888 + <_> + + <_> + + + + <_>0 1 7 6 -1. + <_>0 3 7 2 3. + 0 + 0.0574704706668854 + 0.0215417407453060 + -0.4750837087631226 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -0.0165178105235100 + 0.1659874022006989 + -0.0396569706499577 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 0.0217030309140682 + -0.0383272282779217 + 0.3347625136375427 + <_> + + <_> + + + + <_>10 1 10 18 -1. + <_>10 1 5 18 2. + 0 + -6.1237839981913567e-003 + -0.1434268951416016 + 0.0263133291155100 + <_> + + <_> + + + + <_>0 1 10 18 -1. + <_>5 1 5 18 2. + 0 + -0.0108935097232461 + -0.7946888208389282 + 0.0124034797772765 + <_> + + <_> + + + + <_>2 1 18 5 -1. + <_>8 1 6 5 3. + 0 + -0.0385897383093834 + 0.3376350104808807 + -0.0187479406595230 + <_> + + <_> + + + + <_>4 5 4 8 -1. + <_>4 9 4 4 2. + 0 + 1.3378040166571736e-003 + -0.3628888130187988 + 0.0294601898640394 + <_> + + <_> + + + + <_>9 3 3 10 -1. + <_>9 8 3 5 2. + 0 + 2.7590300305746496e-004 + 0.0764191895723343 + -0.0869536325335503 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 7.9552736133337021e-003 + 0.0526961795985699 + -0.1920077055692673 + <_> + + <_> + + + + <_>9 11 9 5 -1. + <_>12 11 3 5 3. + 0 + -0.0121746296063066 + 0.0840130373835564 + -0.0217400901019573 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>3 11 7 2 2. + <_>10 13 7 2 2. + 0 + -0.0163610707968473 + -0.2549375891685486 + 0.0385825894773006 + <_> + + <_> + + + + <_>10 5 8 4 -1. + <_>10 5 4 4 2. + 0 + -0.0349921286106110 + 0.2576051056385040 + -0.0157270804047585 + <_> + + <_> + + + + <_>8 3 3 13 -1. + <_>9 3 1 13 3. + 0 + -7.6113208197057247e-003 + 0.1911467015743256 + -0.0529807806015015 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0501107499003410 + 0.0242652501910925 + -0.5150918960571289 + <_> + + <_> + + + + <_>6 0 2 14 -1. + <_>7 0 1 14 2. + 0 + -9.1486647725105286e-003 + -0.3317044079303742 + 0.0267744399607182 + <_> + + <_> + + + + <_>10 5 8 4 -1. + <_>10 5 4 4 2. + 0 + 0.0832932591438293 + 4.2860410176217556e-003 + -0.3038155138492584 + <_> + + <_> + + + + <_>0 0 8 4 -1. + <_>4 0 4 4 2. + 0 + -0.0193343590945005 + 0.3891637921333313 + -0.0249083098024130 + <_> + + <_> + + + + <_>14 0 6 13 -1. + <_>14 0 3 13 2. + 0 + -0.0720610469579697 + 0.4118429124355316 + -0.0256870593875647 + <_> + + <_> + + + + <_>0 1 6 11 -1. + <_>3 1 3 11 2. + 0 + 0.0225063599646091 + -0.2119673937559128 + 0.0538250207901001 + <_> + + <_> + + + + <_>9 11 9 5 -1. + <_>12 11 3 5 3. + 0 + 0.0557724013924599 + -0.0231041405349970 + 0.0915782526135445 + <_> + + <_> + + + + <_>2 11 9 5 -1. + <_>5 11 3 5 3. + 0 + -0.0262103900313377 + 0.3350940942764282 + -0.0342258103191853 + <_> + + <_> + + + + <_>7 12 6 7 -1. + <_>9 12 2 7 3. + 0 + -0.0460853315889835 + -0.5300675034523010 + 0.0190830808132887 + <_> + + <_> + + + + <_>0 0 4 15 -1. + <_>2 0 2 15 2. + 0 + -0.0329982601106167 + 0.3070138990879059 + -0.0316380597651005 + <_> + + <_> + + + + <_>12 2 2 15 -1. + <_>12 2 1 15 2. + 0 + 0.0106776598840952 + 0.0381867811083794 + -0.2025669962167740 + <_> + + <_> + + + + <_>6 2 2 15 -1. + <_>7 2 1 15 2. + 0 + 3.7972650025039911e-003 + 0.0789514333009720 + -0.1304014027118683 + <_> + + <_> + + + + <_>6 0 13 2 -1. + <_>6 1 13 1 2. + 0 + -2.4965009652078152e-003 + -0.1979921013116837 + 0.0307431295514107 + <_> + + <_> + + + + <_>0 12 13 3 -1. + <_>0 13 13 1 3. + 0 + 0.0142031395807862 + -0.0454434603452683 + 0.2180640995502472 + <_> + + <_> + + + + <_>10 3 4 7 -1. + <_>10 3 2 7 2. + 0 + 7.7012999099679291e-005 + -0.2585828900337219 + 0.0425083599984646 + <_> + + <_> + + + + <_>5 3 4 7 -1. + <_>7 3 2 7 2. + 0 + 2.3724909406155348e-003 + -0.1581588983535767 + 0.0614940710365772 + <_> + + <_> + + + + <_>10 5 8 4 -1. + <_>10 5 4 4 2. + 0 + -0.0840860828757286 + -0.9370452761650085 + 8.3687662845477462e-004 + <_> + + <_> + + + + <_>2 5 8 4 -1. + <_>6 5 4 4 2. + 0 + -0.0228922907263041 + 0.4296053946018219 + -0.0272158198058605 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + -0.1123896986246109 + -0.2060728967189789 + 0.0177988000214100 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.0681750327348709 + -0.4201978147029877 + 0.0250510908663273 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -0.0106201898306608 + -0.2187023013830185 + 0.0242314208298922 + <_> + + <_> + + + + <_>8 0 4 15 -1. + <_>8 5 4 5 3. + 0 + 2.9390859417617321e-003 + 0.0884701833128929 + -0.1195804029703140 + <_> + + <_> + + + + <_>5 0 11 8 -1. + <_>5 4 11 4 2. + 0 + 0.0567662604153156 + -0.0588203296065331 + 0.1784580051898956 + <_> + + <_> + + + + <_>2 3 8 14 -1. + <_>6 3 4 14 2. + 0 + -7.3099520523101091e-004 + 0.3012208044528961 + -0.0348908305168152 + <_> + + <_> + + + + <_>15 1 5 6 -1. + <_>15 4 5 3 2. + 0 + 0.0341749787330627 + 0.0196141507476568 + -0.1741998046636581 + <_> + + <_> + + + + <_>0 1 5 6 -1. + <_>0 4 5 3 2. + 0 + 0.0331520996987820 + 0.0293444693088531 + -0.3516373932361603 + <_> + + <_> + + + + <_>8 4 4 7 -1. + <_>8 4 2 7 2. + 0 + 0.0171585902571678 + -0.0477440096437931 + 0.2069031000137329 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>10 6 5 3 2. + 0 + -0.0332703106105328 + -0.3681805133819580 + 0.0305478796362877 + <_> + + <_> + + + + <_>14 0 2 19 -1. + <_>14 0 1 19 2. + 0 + -7.5228337664157152e-004 + -0.1006821021437645 + 0.0374460592865944 + <_> + + <_> + + + + <_>4 0 2 19 -1. + <_>5 0 1 19 2. + 0 + -5.7363631203770638e-003 + -0.2970463931560516 + 0.0308898091316223 + <_> + + <_> + + + + <_>11 13 6 7 -1. + <_>13 13 2 7 3. + 0 + 0.0342036783695221 + 0.0326943881809711 + -0.1938641071319580 + <_> + + <_> + + + + <_>1 8 18 3 -1. + <_>7 8 6 3 3. + 0 + 0.1175967007875443 + 0.0280105099081993 + -0.3446972966194153 + <_> + + <_> + + + + <_>8 7 5 8 -1. + <_>8 11 5 4 2. + 0 + 0.0356847606599331 + 0.0146120497956872 + -0.3232390880584717 + <_> + + <_> + + + + <_>6 2 8 16 -1. + <_>6 10 8 8 2. + 0 + -0.1456248015165329 + -0.4370346963405609 + 0.0206975191831589 + <_> + + <_> + + + + <_>8 3 6 9 -1. + <_>8 6 6 3 3. + 0 + 8.0413380637764931e-003 + 0.0184405501931906 + -0.3227277100086212 + <_> + + <_> + + + + <_>2 16 7 4 -1. + <_>2 18 7 2 2. + 0 + 5.3446288220584393e-003 + 0.0505033992230892 + -0.1842854022979736 + <_> + + <_> + + + + <_>8 7 7 4 -1. + <_>8 9 7 2 2. + 0 + 0.0864732265472412 + 6.2484769150614738e-003 + -0.9361289739608765 + <_> + + <_> + + + + <_>7 4 5 12 -1. + <_>7 8 5 4 3. + 0 + 0.0661687105894089 + -0.0598683916032314 + 0.1581059992313385 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 0.0289789903908968 + 0.0288443397730589 + -0.2826991975307465 + <_> + + <_> + + + + <_>3 6 14 4 -1. + <_>3 6 7 2 2. + <_>10 8 7 2 2. + 0 + 0.0186365190893412 + -0.0517092905938625 + 0.1777745932340622 + <_> + + <_> + + + + <_>8 4 6 10 -1. + <_>11 4 3 5 2. + <_>8 9 3 5 2. + 0 + -0.0268817692995071 + 0.0736350268125534 + -0.0362292192876339 + <_> + + <_> + + + + <_>7 4 6 10 -1. + <_>7 4 3 5 2. + <_>10 9 3 5 2. + 0 + -0.0136960195377469 + 0.1821562945842743 + -0.0598808787763119 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -4.1931979358196259e-003 + -0.0933217927813530 + 0.0279010701924562 + <_> + + <_> + + + + <_>0 14 7 6 -1. + <_>0 16 7 2 3. + 0 + 0.0227842200547457 + 0.0306313298642635 + -0.2853193879127502 + <_> + + <_> + + + + <_>13 0 3 15 -1. + <_>14 0 1 15 3. + 0 + -8.3819748833775520e-003 + -0.2325166016817093 + 0.0508014410734177 + <_> + + <_> + + + + <_>0 14 14 3 -1. + <_>0 15 14 1 3. + 0 + -6.4928620122373104e-003 + 0.1106083020567894 + -0.0832810103893280 + <_> + + <_> + + + + <_>1 4 18 15 -1. + <_>1 9 18 5 3. + 0 + 0.0558668486773968 + 0.2343903928995132 + -0.0451917797327042 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + -0.0109267104417086 + 0.2053284049034119 + -0.0507759191095829 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 0.0175153799355030 + 0.0367284491658211 + -0.3063859045505524 + <_> + + <_> + + + + <_>4 0 3 14 -1. + <_>5 0 1 14 3. + 0 + 0.0145439803600311 + 0.0447844900190830 + -0.2075784057378769 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + 1.7274370184168220e-003 + 0.0237066000699997 + -0.1863936930894852 + <_> + + <_> + + + + <_>1 15 18 4 -1. + <_>1 15 9 2 2. + <_>10 17 9 2 2. + 0 + 0.0201604999601841 + 0.0417446605861187 + -0.2194374948740006 + <_> + + <_> + + + + <_>10 13 8 6 -1. + <_>10 15 8 2 3. + 0 + -0.0557322315871716 + -0.3766668140888214 + 7.3045571334660053e-003 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -4.2138090357184410e-003 + 0.1131426021456718 + -0.0844519287347794 + <_> + + <_> + + + + <_>12 13 7 6 -1. + <_>12 15 7 2 3. + 0 + -0.0571134984493256 + -0.4190346002578735 + 4.2158551514148712e-003 + <_> + + <_> + + + + <_>1 13 7 6 -1. + <_>1 15 7 2 3. + 0 + -0.0333851613104343 + -0.3900786042213440 + 0.0252909697592258 + <_> + + <_> + + + + <_>8 0 10 18 -1. + <_>13 0 5 9 2. + <_>8 9 5 9 2. + 0 + -8.5305999964475632e-003 + 0.0535723790526390 + -0.1223846003413200 + <_> + + <_> + + + + <_>0 3 18 3 -1. + <_>6 3 6 3 3. + 0 + -0.0151448901742697 + 0.4574376046657562 + -0.0250029992312193 + <_> + + <_> + + + + <_>10 4 10 6 -1. + <_>15 4 5 3 2. + <_>10 7 5 3 2. + 0 + 7.5857941992580891e-003 + 0.0262685399502516 + -0.0988903194665909 + <_> + + <_> + + + + <_>2 8 16 4 -1. + <_>10 8 8 4 2. + 0 + -0.0643474683165550 + 0.2260705977678299 + -0.0418215803802013 + <_> + + <_> + + + + <_>4 4 12 12 -1. + <_>10 4 6 6 2. + <_>4 10 6 6 2. + 0 + 0.0657721832394600 + 0.0241479594260454 + -0.4022777974605560 + <_> + + <_> + + + + <_>1 0 18 3 -1. + <_>10 0 9 3 2. + 0 + -0.1049693003296852 + -0.4634326100349426 + 0.0191341098397970 + <_> + + <_> + + + + <_>11 4 4 10 -1. + <_>11 9 4 5 2. + 0 + 0.0963203907012939 + 8.7147848680615425e-003 + -0.3526932895183563 + <_> + + <_> + + + + <_>2 4 5 15 -1. + <_>2 9 5 5 3. + 0 + 0.0166510697454214 + -0.2384241074323654 + 0.0389286614954472 + <_> + + <_> + + + + <_>17 6 2 14 -1. + <_>17 13 2 7 2. + 0 + 0.0588299185037613 + -0.0165381003171206 + 0.3346559107303619 + <_> + + <_> + + + + <_>1 6 2 14 -1. + <_>1 13 2 7 2. + 0 + 0.0524111986160278 + -0.0196889191865921 + 0.4696607887744904 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + 1.2325269635766745e-003 + -0.1205618977546692 + 0.0505635291337967 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -0.0245309490710497 + -0.3916805982589722 + 0.0231086201965809 + <_> + + <_> + + + + <_>2 10 18 3 -1. + <_>2 11 18 1 3. + 0 + 0.0355076901614666 + 0.0204993393272161 + -0.3623383045196533 + <_> + + <_> + + + + <_>0 2 7 4 -1. + <_>0 4 7 2 2. + 0 + -0.0152827398851514 + -0.2460412979125977 + 0.0347499996423721 + <_> + + <_> + + + + <_>2 0 16 6 -1. + <_>2 2 16 2 3. + 0 + 0.0604664497077465 + -0.0550717487931252 + 0.2042866051197052 + <_> + + <_> + + + + <_>2 17 15 3 -1. + <_>7 17 5 3 3. + 0 + 0.0658098310232162 + -0.0714660808444023 + 0.1200297027826309 + <_> + + <_> + + + + <_>12 13 6 7 -1. + <_>12 13 3 7 2. + 0 + -0.0795436725020409 + 0.4904421865940094 + -7.8059309162199497e-003 + <_> + + <_> + + + + <_>2 13 6 7 -1. + <_>5 13 3 7 2. + 0 + 0.0710572004318237 + 0.0442194305360317 + -0.2107701003551483 + <_> + + <_> + + + + <_>14 2 2 13 -1. + <_>14 2 1 13 2. + 0 + 1.2412209762260318e-003 + 0.0997598469257355 + -0.0740651413798332 + <_> + + <_> + + + + <_>7 12 4 8 -1. + <_>7 16 4 4 2. + 0 + 0.0439005605876446 + 0.0202453397214413 + -0.4780013859272003 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + 0.1381482928991318 + -0.0341697297990322 + 0.2066240012645721 + <_> + + <_> + + + + <_>5 15 6 5 -1. + <_>8 15 3 5 2. + 0 + 0.0640267133712769 + 0.0173969306051731 + -0.5774987936019898 + <_> + + <_> + + + + <_>14 2 2 13 -1. + <_>14 2 1 13 2. + 0 + -0.0124567700549960 + -0.1671086996793747 + 0.0121063804253936 + <_> + + <_> + + + + <_>4 2 2 13 -1. + <_>5 2 1 13 2. + 0 + 0.0371836088597775 + -0.0190242994576693 + 0.4447616934776306 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + -0.0349052511155605 + -0.1464806050062180 + 0.0208957791328430 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + 0.0616895593702793 + 0.0124286497011781 + -0.7173764109611511 + <_> + + <_> + + + + <_>13 11 7 4 -1. + <_>13 13 7 2 2. + 0 + -0.0273584891110659 + -0.2431146949529648 + 0.0261387303471565 + <_> + + <_> + + + + <_>0 10 13 3 -1. + <_>0 11 13 1 3. + 0 + 6.3740741461515427e-003 + -0.0825930163264275 + 0.1135658025741577 + <_> + + <_> + + + + <_>6 7 9 12 -1. + <_>6 11 9 4 3. + 0 + -0.1029983982443810 + 0.4539861083030701 + -0.0163155291229486 + <_> + + <_> + + + + <_>2 2 14 4 -1. + <_>2 2 7 2 2. + <_>9 4 7 2 2. + 0 + -0.0146950203925371 + -0.1805031001567841 + 0.0480617806315422 + <_> + + <_> + + + + <_>10 0 2 13 -1. + <_>10 0 1 13 2. + 0 + 6.0288330132607371e-005 + -0.0989745035767555 + 0.0381056703627110 + <_> + + <_> + + + + <_>8 0 2 13 -1. + <_>9 0 1 13 2. + 0 + -0.0137636503204703 + 0.4568940103054047 + -0.0208085998892784 + <_> + + <_> + + + + <_>13 11 7 4 -1. + <_>13 13 7 2 2. + 0 + 5.1598600111901760e-003 + 0.0284798201173544 + -0.1977865993976593 + <_> + + <_> + + + + <_>6 11 7 6 -1. + <_>6 13 7 2 3. + 0 + 6.6321617923676968e-003 + -0.0615603588521481 + 0.1404590010643005 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -0.0110735902562737 + 0.1127232983708382 + -0.0384230390191078 + <_> + + <_> + + + + <_>0 11 7 4 -1. + <_>0 13 7 2 2. + 0 + 7.3836948722600937e-003 + 0.0245752800256014 + -0.3399445116519928 + <_> + + <_> + + + + <_>4 12 12 6 -1. + <_>8 12 4 6 3. + 0 + -0.0192776899784803 + 0.1573224961757660 + -0.0583822205662727 + <_> + + <_> + + + + <_>5 6 6 10 -1. + <_>8 6 3 10 2. + 0 + -0.0262091998010874 + -0.3257543146610260 + 0.0352961495518684 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + 0.0138720795512199 + 0.0275046899914742 + -0.2051005065441132 + <_> + + <_> + + + + <_>2 2 14 6 -1. + <_>2 2 7 3 2. + <_>9 5 7 3 2. + 0 + 2.5171930901706219e-003 + 0.0698056370019913 + -0.1151866018772125 + <_> + + <_> + + + + <_>5 0 10 7 -1. + <_>5 0 5 7 2. + 0 + 0.0677532926201820 + -0.0372681394219399 + 0.2336308062076569 + <_> + + <_> + + + + <_>6 6 8 5 -1. + <_>10 6 4 5 2. + 0 + -0.0243521798402071 + -0.2119124978780747 + 0.0429715812206268 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -0.0150854503735900 + 0.1474328041076660 + -0.0385891310870647 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0300520602613688 + 0.0438824892044067 + -0.2040134072303772 + <_> + + <_> + + + + <_>8 0 10 18 -1. + <_>13 0 5 9 2. + <_>8 9 5 9 2. + 0 + -0.0798785835504532 + 0.0713558271527290 + -0.0358063094317913 + <_> + + <_> + + + + <_>2 5 14 6 -1. + <_>2 5 7 3 2. + <_>9 8 7 3 2. + 0 + -0.0498456507921219 + 0.2899102866649628 + -0.0291932094842196 + <_> + + <_> + + + + <_>7 1 6 10 -1. + <_>10 1 3 5 2. + <_>7 6 3 5 2. + 0 + 0.0609835498034954 + 0.0110780904069543 + -0.8054903745651245 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -0.0241872295737267 + 0.2081667035818100 + -0.0403329916298389 + <_> + + <_> + + + + <_>9 9 10 6 -1. + <_>14 9 5 3 2. + <_>9 12 5 3 2. + 0 + 0.0295819099992514 + 0.0171898808330297 + -0.3017424941062927 + <_> + + <_> + + + + <_>2 8 6 10 -1. + <_>2 13 6 5 2. + 0 + -0.0961589366197586 + -0.3611518144607544 + 0.0214518792927265 + <_> + + <_> + + + + <_>1 10 19 2 -1. + <_>1 11 19 1 2. + 0 + 1.1087789898738265e-003 + 0.0607112683355808 + -0.1299573034048080 + <_> + + <_> + + + + <_>4 9 12 6 -1. + <_>4 12 12 3 2. + 0 + 0.0365770198404789 + -0.0157576892524958 + 0.6156833171844482 + <_> + + <_> + + + + <_>9 7 4 12 -1. + <_>9 11 4 4 3. + 0 + 0.0898875668644905 + 7.5012152083218098e-003 + -0.8463991880416870 + <_> + + <_> + + + + <_>0 11 13 3 -1. + <_>0 12 13 1 3. + 0 + 5.2048689685761929e-003 + -0.0504089109599590 + 0.1561879962682724 + <_> + + <_> + + + + <_>10 14 7 6 -1. + <_>10 16 7 2 3. + 0 + 0.0347273610532284 + 0.0210347902029753 + -0.2183419018983841 + <_> + + <_> + + + + <_>3 14 7 6 -1. + <_>3 16 7 2 3. + 0 + -0.0546950511634350 + -0.8312628269195557 + 8.9029762893915176e-003 + <_> + + <_> + + + + <_>15 5 4 15 -1. + <_>15 5 2 15 2. + 0 + 0.1598773002624512 + 8.5425339639186859e-003 + -0.6928086280822754 + <_> + + <_> + + + + <_>0 3 17 10 -1. + <_>0 8 17 5 2. + 0 + -0.0385586917400360 + -0.2707824110984802 + 0.0270253699272871 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -0.0718663707375526 + -0.3904461860656738 + 0.0109232803806663 + <_> + + <_> + + + + <_>0 0 20 4 -1. + <_>10 0 10 4 2. + 0 + 0.1959034055471420 + 0.0134233701974154 + -0.5426052212715149 + <_> + + <_> + + + + <_>6 1 10 6 -1. + <_>11 1 5 3 2. + <_>6 4 5 3 2. + 0 + -0.0223300792276859 + -0.1727523952722549 + 0.0290585104376078 + <_> + + <_> + + + + <_>0 9 18 11 -1. + <_>6 9 6 11 3. + 0 + 0.5101855993270874 + 0.0114186396822333 + -0.6787652969360352 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + -0.0112399095669389 + 0.1146249994635582 + -0.0568676292896271 + <_> + + <_> + + + + <_>0 10 20 6 -1. + <_>0 12 20 2 3. + 0 + 0.0174861606210470 + 0.0526418685913086 + -0.1619517952203751 + <_> + + <_> + + + + <_>10 9 6 10 -1. + <_>13 9 3 5 2. + <_>10 14 3 5 2. + 0 + -1.4517609961330891e-003 + -0.1087746992707253 + 0.0569604001939297 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>7 10 3 5 2. + <_>10 15 3 5 2. + 0 + 0.0370165593922138 + 0.0174600891768932 + -0.4650532007217407 + <_> + + <_> + + + + <_>6 1 8 15 -1. + <_>6 6 8 5 3. + 0 + -8.6366441100835800e-003 + 0.0730762705206871 + -0.1061659008264542 + <_> + + <_> + + + + <_>0 8 18 3 -1. + <_>0 9 18 1 3. + 0 + 1.9361129961907864e-003 + -0.1458536982536316 + 0.0593944899737835 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -0.0231195501983166 + -0.0948762372136116 + 0.0303874798119068 + <_> + + <_> + + + + <_>3 10 6 10 -1. + <_>3 10 3 5 2. + <_>6 15 3 5 2. + 0 + 6.3178739510476589e-003 + -0.1053709983825684 + 0.0778928473591805 + <_> + + <_> + + + + <_>11 8 8 12 -1. + <_>15 8 4 6 2. + <_>11 14 4 6 2. + 0 + 0.0109619498252869 + -0.0660419836640358 + 0.1056633964180946 + <_> + + <_> + + + + <_>1 8 8 12 -1. + <_>1 8 4 6 2. + <_>5 14 4 6 2. + 0 + -0.0421295203268528 + 0.2434408068656921 + -0.0515736788511276 + <_> + + <_> + + + + <_>13 7 3 13 -1. + <_>14 7 1 13 3. + 0 + 0.0451328195631504 + 0.0107720503583550 + -0.7615677714347839 + <_> + + <_> + + + + <_>6 11 5 9 -1. + <_>6 14 5 3 3. + 0 + 9.4924736768007278e-003 + 0.0452733784914017 + -0.1877003014087677 + <_> + + <_> + + + + <_>7 14 12 5 -1. + <_>7 14 6 5 2. + 0 + -0.1157386004924774 + 0.4483172893524170 + -8.6225848644971848e-003 + <_> + + <_> + + + + <_>2 0 4 8 -1. + <_>2 4 4 4 2. + 0 + 1.5801179688423872e-003 + -0.1093140989542007 + 0.0793912187218666 + <_> + + <_> + + + + <_>5 0 10 6 -1. + <_>5 3 10 3 2. + 0 + -0.0444422811269760 + 0.3382704854011536 + -0.0266497191041708 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + -0.0659930929541588 + -0.5310649275779724 + 0.0175430104136467 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>10 9 4 4 2. + <_>6 13 4 4 2. + 0 + -0.0109688201919198 + -0.1661282032728195 + 0.0494883507490158 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 0.0381490215659142 + -0.0415099002420902 + 0.2061666995286942 + <_> + + <_> + + + + <_>13 5 2 13 -1. + <_>13 5 1 13 2. + 0 + 4.0625538676977158e-003 + 0.0489250496029854 + -0.0848661810159683 + <_> + + <_> + + + + <_>5 9 6 10 -1. + <_>5 9 3 5 2. + <_>8 14 3 5 2. + 0 + 3.2693019602447748e-003 + -0.1188301965594292 + 0.0868031382560730 + <_> + + <_> + + + + <_>2 9 18 3 -1. + <_>8 9 6 3 3. + 0 + -1.2488859938457608e-003 + -0.1435472965240479 + 0.0214229691773653 + <_> + + <_> + + + + <_>5 5 2 13 -1. + <_>6 5 1 13 2. + 0 + -0.0170648898929358 + -0.5231634974479675 + 0.0165290404111147 + <_> + + <_> + + + + <_>11 10 4 10 -1. + <_>11 10 2 10 2. + 0 + -0.0233546998351812 + -0.1969852000474930 + 0.0219723004847765 + <_> + + <_> + + + + <_>5 10 4 10 -1. + <_>7 10 2 10 2. + 0 + 0.0278995297849178 + 0.0380332283675671 + -0.2232320010662079 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -0.0678694024682045 + -0.4207612872123718 + 0.0105596398934722 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0575420595705509 + -0.0421114303171635 + 0.2351571023464203 + <_> + + <_> + + + + <_>4 2 15 14 -1. + <_>9 2 5 14 3. + 0 + -0.2187730967998505 + 0.6955335140228272 + -9.9031934514641762e-003 + <_> + + <_> + + + + <_>1 2 15 14 -1. + <_>6 2 5 14 3. + 0 + 0.3777629137039185 + -0.0247218292206526 + 0.3036738932132721 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + 0.0410299003124237 + 0.0219992808997631 + -0.2470708936452866 + <_> + + <_> + + + + <_>3 0 6 9 -1. + <_>5 0 2 9 3. + 0 + 0.0255870707333088 + 0.0420451797544956 + -0.2233310043811798 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0672007724642754 + -0.0166483893990517 + 0.2426566034555435 + <_> + + <_> + + + + <_>1 3 10 8 -1. + <_>1 3 5 4 2. + <_>6 7 5 4 2. + 0 + 0.0282303895801306 + 0.0295722596347332 + -0.3012884855270386 + <_> + + <_> + + + + <_>5 13 14 6 -1. + <_>5 13 7 6 2. + 0 + 0.2458868026733398 + 1.9440819742158055e-003 + -0.4215391874313355 + <_> + + <_> + + + + <_>1 13 14 6 -1. + <_>8 13 7 6 2. + 0 + -0.0957524478435516 + -0.6471139788627625 + 0.0131804496049881 + <_> + + <_> + + + + <_>7 2 13 3 -1. + <_>7 3 13 1 3. + 0 + -0.0105965798720717 + -0.2048497051000595 + 0.0280544403940439 + <_> + + <_> + + + + <_>0 7 20 2 -1. + <_>10 7 10 2 2. + 0 + 0.0671039670705795 + 0.0290539897978306 + -0.2677051126956940 + <_> + + <_> + + + + <_>5 0 15 6 -1. + <_>10 0 5 6 3. + 0 + -0.0792808383703232 + 0.2191110998392105 + -0.0156840104609728 + <_> + + <_> + + + + <_>0 0 15 6 -1. + <_>5 0 5 6 3. + 0 + -4.0710358880460262e-003 + 0.2203157991170883 + -0.0405812896788120 + <_> + + <_> + + + + <_>12 1 8 13 -1. + <_>12 1 4 13 2. + 0 + 0.0376903600990772 + -0.1294624060392380 + 0.0619215890765190 + <_> + + <_> + + + + <_>0 1 8 13 -1. + <_>4 1 4 13 2. + 0 + 0.0184539295732975 + -0.3280088901519775 + 0.0297459699213505 + <_> + + <_> + + + + <_>15 0 4 18 -1. + <_>15 0 2 18 2. + 0 + 0.1521836966276169 + 0.0119288703426719 + -0.4367868900299072 + <_> + + <_> + + + + <_>4 0 12 4 -1. + <_>8 0 4 4 3. + 0 + 0.1094895973801613 + 0.0246637798845768 + -0.3156718015670776 + <_> + + <_> + + + + <_>15 0 4 18 -1. + <_>15 0 2 18 2. + 0 + -0.0449067093431950 + 0.2308275997638702 + -0.0221633892506361 + <_> + + <_> + + + + <_>1 0 4 18 -1. + <_>3 0 2 18 2. + 0 + 0.1466861963272095 + 0.0184906590729952 + -0.4666948020458221 + <_> + + <_> + + + + <_>4 12 12 6 -1. + <_>8 12 4 6 3. + 0 + -0.0405975803732872 + 0.2069137990474701 + -0.0414120890200138 + -1.4597640037536621 + 31 + -1 + <_> + + + <_> + + <_> + + + + <_>2 0 6 5 -1. + <_>5 0 3 5 2. + 0 + 2.5723339058458805e-003 + -0.2409705966711044 + 0.1565973013639450 + <_> + + <_> + + + + <_>12 5 4 12 -1. + <_>12 9 4 4 3. + 0 + 5.7603712193667889e-003 + -0.4360102117061615 + 0.0805160328745842 + <_> + + <_> + + + + <_>4 4 11 6 -1. + <_>4 6 11 2 3. + 0 + -0.1013860031962395 + 0.3970403075218201 + -0.0657615363597870 + <_> + + <_> + + + + <_>11 6 5 6 -1. + <_>11 9 5 3 2. + 0 + 1.3221249682828784e-003 + -0.4238297939300537 + 0.0286596808582544 + <_> + + <_> + + + + <_>5 6 8 8 -1. + <_>5 6 4 4 2. + <_>9 10 4 4 2. + 0 + 5.4164527682587504e-004 + 0.0674186870455742 + -0.3101926147937775 + <_> + + <_> + + + + <_>10 9 4 8 -1. + <_>10 13 4 4 2. + 0 + 2.4447739124298096e-003 + 0.0139284199103713 + -0.2448893934488297 + <_> + + <_> + + + + <_>6 14 8 4 -1. + <_>6 16 8 2 2. + 0 + 1.4049450401216745e-003 + -0.1504099965095520 + 0.1263857930898666 + <_> + + <_> + + + + <_>10 2 4 7 -1. + <_>10 2 2 7 2. + 0 + 1.1241709580644965e-003 + -0.2743634879589081 + 0.0711756572127342 + <_> + + <_> + + + + <_>1 9 13 2 -1. + <_>1 10 13 1 2. + 0 + -1.3413740089163184e-003 + -0.3768543899059296 + 0.0500381588935852 + <_> + + <_> + + + + <_>10 2 4 7 -1. + <_>10 2 2 7 2. + 0 + 0.0417145602405071 + 0.0117330001667142 + -0.5450943708419800 + <_> + + <_> + + + + <_>6 2 4 7 -1. + <_>8 2 2 7 2. + 0 + 2.1810019388794899e-003 + -0.2084711045026779 + 0.0849292278289795 + <_> + + <_> + + + + <_>9 5 7 14 -1. + <_>9 12 7 7 2. + 0 + 0.0196557007730007 + 0.0295681897550821 + -0.2484049052000046 + <_> + + <_> + + + + <_>0 0 17 2 -1. + <_>0 1 17 1 2. + 0 + 4.9905799096450210e-004 + -0.1722225993871689 + 0.0939105227589607 + <_> + + <_> + + + + <_>5 9 10 8 -1. + <_>10 9 5 4 2. + <_>5 13 5 4 2. + 0 + 3.3110571093857288e-003 + 0.0794808268547058 + -0.1824993938207626 + <_> + + <_> + + + + <_>3 10 8 6 -1. + <_>3 12 8 2 3. + 0 + 3.4921199548989534e-003 + 0.0601597093045712 + -0.2304109036922455 + <_> + + <_> + + + + <_>7 11 7 6 -1. + <_>7 13 7 2 3. + 0 + 1.3379369629547000e-003 + -0.0783470198512077 + 0.1581453979015350 + <_> + + <_> + + + + <_>3 3 13 2 -1. + <_>3 4 13 1 2. + 0 + -3.4234288614243269e-004 + -0.1512158066034317 + 0.0959981828927994 + <_> + + <_> + + + + <_>10 2 5 6 -1. + <_>10 5 5 3 2. + 0 + -7.2008459828794003e-003 + 0.1071621030569077 + -0.1208669990301132 + <_> + + <_> + + + + <_>6 5 2 14 -1. + <_>6 12 2 7 2. + 0 + -3.3037480898201466e-003 + -0.1914276927709580 + 0.0713471099734306 + <_> + + <_> + + + + <_>12 9 4 8 -1. + <_>12 13 4 4 2. + 0 + -0.0819097235798836 + -0.8508651852607727 + 6.6832960583269596e-003 + <_> + + <_> + + + + <_>4 9 4 8 -1. + <_>4 13 4 4 2. + 0 + -5.2563002100214362e-004 + 0.0718547031283379 + -0.2316266000270844 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -0.0214773193001747 + 0.2239914983510971 + -0.0329822786152363 + <_> + + <_> + + + + <_>1 4 4 14 -1. + <_>1 4 2 7 2. + <_>3 11 2 7 2. + 0 + -0.0567004308104515 + 0.5147553086280823 + -0.0233782306313515 + <_> + + <_> + + + + <_>11 0 3 20 -1. + <_>12 0 1 20 3. + 0 + 0.0184196997433901 + 0.0188533607870340 + -0.4470109045505524 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -8.8926553726196289e-003 + 0.1849759966135025 + -0.0669785067439079 + <_> + + <_> + + + + <_>6 2 9 5 -1. + <_>9 2 3 5 3. + 0 + 0.0126423696056008 + 0.0865711495280266 + -0.1423393040895462 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + 8.0502573400735855e-003 + -0.0770524218678474 + 0.2134090065956116 + <_> + + <_> + + + + <_>11 0 3 20 -1. + <_>12 0 1 20 3. + 0 + -6.9165248423814774e-003 + -0.1784826964139938 + 0.0564155988395214 + <_> + + <_> + + + + <_>0 0 4 14 -1. + <_>2 0 2 14 2. + 0 + -0.0141944400966167 + 0.1876329928636551 + -0.0675882175564766 + <_> + + <_> + + + + <_>11 0 3 20 -1. + <_>12 0 1 20 3. + 0 + 3.5530389286577702e-003 + 0.0389252491295338 + -0.1498124003410339 + <_> + + <_> + + + + <_>6 0 3 20 -1. + <_>7 0 1 20 3. + 0 + 4.8001301474869251e-003 + 0.0449633114039898 + -0.2459513992071152 + <_> + + <_> + + + + <_>14 2 6 7 -1. + <_>16 2 2 7 3. + 0 + 9.0420730412006378e-003 + -0.0536144003272057 + 0.1382469981908798 + <_> + + <_> + + + + <_>0 2 6 7 -1. + <_>2 2 2 7 3. + 0 + 4.3342178687453270e-003 + -0.0861664414405823 + 0.1279340982437134 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + 0.0122646996751428 + 0.0362030602991581 + -0.3749409914016724 + <_> + + <_> + + + + <_>1 1 18 14 -1. + <_>7 1 6 14 3. + 0 + 0.0491555295884609 + -0.0913192629814148 + 0.1258798986673355 + <_> + + <_> + + + + <_>10 1 3 13 -1. + <_>11 1 1 13 3. + 0 + -5.8642931981012225e-004 + 0.0937025919556618 + -0.1073611974716187 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0329710505902767 + 0.0272385291755199 + -0.4500569999217987 + <_> + + <_> + + + + <_>4 10 16 4 -1. + <_>12 10 8 2 2. + <_>4 12 8 2 2. + 0 + 1.6174600459635258e-003 + 0.0328630097210407 + -0.1424130946397781 + <_> + + <_> + + + + <_>0 10 18 4 -1. + <_>0 10 9 2 2. + <_>9 12 9 2 2. + 0 + 1.0178020456805825e-003 + 0.0698985382914543 + -0.1750721037387848 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 3.4081579651683569e-003 + -0.0779706165194511 + 0.0584236904978752 + <_> + + <_> + + + + <_>1 4 14 6 -1. + <_>1 4 7 3 2. + <_>8 7 7 3 2. + 0 + -6.9078300148248672e-003 + 0.1171109005808830 + -0.0953809991478920 + <_> + + <_> + + + + <_>11 2 3 10 -1. + <_>11 7 3 5 2. + 0 + -7.8317627776414156e-004 + 0.0637309402227402 + -0.0881908833980560 + <_> + + <_> + + + + <_>5 3 9 10 -1. + <_>5 8 9 5 2. + 0 + -0.0135788703337312 + -0.2716825008392334 + 0.0396881587803364 + <_> + + <_> + + + + <_>11 2 3 10 -1. + <_>11 7 3 5 2. + 0 + -0.0800215303897858 + 0.6011552214622498 + -2.4968839716166258e-003 + <_> + + <_> + + + + <_>6 2 3 10 -1. + <_>6 7 3 5 2. + 0 + -1.7085570143535733e-003 + 0.1088868007063866 + -0.1052035987377167 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 8.5700387135148048e-003 + -0.0417846217751503 + 0.1485798060894013 + <_> + + <_> + + + + <_>5 0 3 20 -1. + <_>6 0 1 20 3. + 0 + 0.0155185600742698 + 0.0218551605939865 + -0.4570878148078919 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 10 2 8 2. + 0 + -1.5739940572530031e-003 + 0.0506554618477821 + -0.0696584731340408 + <_> + + <_> + + + + <_>5 10 4 8 -1. + <_>7 10 2 8 2. + 0 + -1.0979890357702971e-003 + 0.0799175873398781 + -0.1189505979418755 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + -0.0262480191886425 + 0.7061498761177063 + -0.0136607801541686 + <_> + + <_> + + + + <_>4 7 12 8 -1. + <_>8 7 4 8 3. + 0 + -0.0102814603596926 + -0.1841211020946503 + 0.0664423406124115 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + -3.6530280485749245e-003 + 0.1299555003643036 + -0.0583515614271164 + <_> + + <_> + + + + <_>0 11 8 4 -1. + <_>0 13 8 2 2. + 0 + 7.8363716602325439e-003 + 0.0270732305943966 + -0.3360190987586975 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -0.0152837103232741 + 0.2556239962577820 + -0.0359409712255001 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -6.7279259674251080e-003 + 0.2466115951538086 + -0.0486734993755817 + <_> + + <_> + + + + <_>3 0 16 10 -1. + <_>11 0 8 5 2. + <_>3 5 8 5 2. + 0 + 0.1780785024166107 + 6.0471030883491039e-003 + -0.7256615161895752 + <_> + + <_> + + + + <_>0 2 18 2 -1. + <_>0 3 18 1 2. + 0 + -1.0486179962754250e-003 + -0.1933594048023224 + 0.0509406998753548 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 8.9163314551115036e-003 + 0.0330247916281223 + -0.1698628962039948 + <_> + + <_> + + + + <_>8 0 2 13 -1. + <_>9 0 1 13 2. + 0 + 4.0643039392307401e-004 + -0.1311711966991425 + 0.0668182820081711 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + -0.4749904870986939 + -0.4015274941921234 + 6.3146720640361309e-003 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1043004989624023 + 0.0240249708294868 + -0.3269580006599426 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + -0.0516501218080521 + 0.1693482995033264 + -0.0155392000451684 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>0 0 4 4 2. + <_>4 4 4 4 2. + 0 + 0.0405062697827816 + -0.0220829807221890 + 0.3969472944736481 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>10 15 7 2 2. + <_>3 17 7 2 2. + 0 + 0.0241797491908073 + 0.0219267792999744 + -0.4346067011356354 + <_> + + <_> + + + + <_>4 1 8 8 -1. + <_>4 1 4 4 2. + <_>8 5 4 4 2. + 0 + -3.0531319789588451e-003 + -0.1410803049802780 + 0.0561751797795296 + <_> + + <_> + + + + <_>7 9 13 3 -1. + <_>7 10 13 1 3. + 0 + -0.0171236507594585 + -0.6334189772605896 + 9.8466947674751282e-003 + <_> + + <_> + + + + <_>0 9 13 3 -1. + <_>0 10 13 1 3. + 0 + 0.0417059697210789 + 0.0109776295721531 + -0.6768128275871277 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 4.3895491398870945e-003 + -0.0577812902629375 + 0.1550164073705673 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + -4.4786250218749046e-003 + -0.1670601963996887 + 0.0465729385614395 + <_> + + <_> + + + + <_>6 7 13 2 -1. + <_>6 8 13 1 2. + 0 + 4.8733421135693789e-004 + -0.1503714025020599 + 0.0469204410910606 + <_> + + <_> + + + + <_>4 11 5 9 -1. + <_>4 14 5 3 3. + 0 + 0.0155306402593851 + 0.0225560106337070 + -0.3237045109272003 + <_> + + <_> + + + + <_>7 11 7 6 -1. + <_>7 13 7 2 3. + 0 + 0.0454431809484959 + -9.8806591704487801e-003 + 0.6081532239913940 + <_> + + <_> + + + + <_>0 1 6 7 -1. + <_>2 1 2 7 3. + 0 + -0.0779602974653244 + 0.4074381887912750 + -0.0183915290981531 + <_> + + <_> + + + + <_>5 8 13 2 -1. + <_>5 9 13 1 2. + 0 + -4.5014719944447279e-004 + -0.3831973075866699 + 0.0134208202362061 + <_> + + <_> + + + + <_>0 7 14 4 -1. + <_>0 7 7 2 2. + <_>7 9 7 2 2. + 0 + -0.0218527801334858 + -0.4469765126705170 + 0.0153793301433325 + <_> + + <_> + + + + <_>15 4 4 16 -1. + <_>17 4 2 8 2. + <_>15 12 2 8 2. + 0 + -0.0634108781814575 + 0.3992672860622406 + -0.0221688207238913 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -6.6417120397090912e-003 + -0.1459449976682663 + 0.0515417307615280 + <_> + + <_> + + + + <_>14 7 6 12 -1. + <_>17 7 3 6 2. + <_>14 13 3 6 2. + 0 + 0.0203554108738899 + -0.0231136791408062 + 0.1879265010356903 + <_> + + <_> + + + + <_>3 16 12 4 -1. + <_>7 16 4 4 3. + 0 + 9.2754261568188667e-003 + -0.0558089315891266 + 0.1350426971912384 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + -0.0640752837061882 + 0.2625977098941803 + -0.0319132506847382 + <_> + + <_> + + + + <_>2 7 15 5 -1. + <_>7 7 5 5 3. + 0 + 0.0575378984212875 + 0.0347036905586720 + -0.2720398902893066 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0133699998259544 + -0.1025179028511047 + 0.0207198299467564 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 2.9637520201504230e-003 + -0.0575798191130161 + 0.1334629952907562 + <_> + + <_> + + + + <_>7 0 6 12 -1. + <_>10 0 3 6 2. + <_>7 6 3 6 2. + 0 + -4.7313207760453224e-003 + -0.1422922015190125 + 0.0531062483787537 + <_> + + <_> + + + + <_>4 3 12 10 -1. + <_>8 3 4 10 3. + 0 + 0.1296754032373428 + -0.0219264701008797 + 0.3358376920223236 + <_> + + <_> + + + + <_>8 1 4 10 -1. + <_>8 6 4 5 2. + 0 + -2.8757948894053698e-003 + 0.0749709308147430 + -0.1018306016921997 + <_> + + <_> + + + + <_>0 3 20 8 -1. + <_>0 7 20 4 2. + 0 + -0.0135463597252965 + -0.1531372070312500 + 0.0522473901510239 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0635321736335754 + 9.1543495655059814e-003 + -0.7486910820007324 + <_> + + <_> + + + + <_>0 7 6 12 -1. + <_>0 7 3 6 2. + <_>3 13 3 6 2. + 0 + -0.0102614099159837 + 0.1274251937866211 + -0.0567860715091228 + <_> + + <_> + + + + <_>12 5 2 14 -1. + <_>12 12 2 7 2. + 0 + -0.0433319285511971 + -0.6182907223701477 + 8.0406935885548592e-003 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>0 10 3 5 2. + <_>3 15 3 5 2. + 0 + 4.0195342153310776e-003 + -0.0541303083300591 + 0.1486448049545288 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 6.7003332078456879e-003 + 0.0375072993338108 + -0.1998623013496399 + <_> + + <_> + + + + <_>2 0 16 8 -1. + <_>2 0 8 4 2. + <_>10 4 8 4 2. + 0 + -0.0112082399427891 + -0.1470471024513245 + 0.0571894012391567 + <_> + + <_> + + + + <_>9 5 7 9 -1. + <_>9 8 7 3 3. + 0 + -3.7890970706939697e-003 + 0.1552940011024475 + -0.0379304885864258 + <_> + + <_> + + + + <_>0 12 8 8 -1. + <_>0 12 4 4 2. + <_>4 16 4 4 2. + 0 + -0.0110984798520803 + 0.1785044074058533 + -0.0456896498799324 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -7.3761218227446079e-003 + -0.1089164018630981 + 0.0744255930185318 + <_> + + <_> + + + + <_>0 10 16 4 -1. + <_>0 10 8 2 2. + <_>8 12 8 2 2. + 0 + -3.2149269245564938e-003 + 0.0906417071819305 + -0.0943770334124565 + <_> + + <_> + + + + <_>0 2 20 4 -1. + <_>10 2 10 2 2. + <_>0 4 10 2 2. + 0 + -3.5010059364140034e-003 + -0.1349819004535675 + 0.0666527226567268 + <_> + + <_> + + + + <_>3 5 4 14 -1. + <_>3 5 2 7 2. + <_>5 12 2 7 2. + 0 + -1.4920319699740503e-005 + -0.1050548031926155 + 0.0845831707119942 + <_> + + <_> + + + + <_>5 10 11 9 -1. + <_>5 13 11 3 3. + 0 + 9.5882397145032883e-003 + 0.0194214992225170 + -0.2473284006118774 + <_> + + <_> + + + + <_>2 9 4 9 -1. + <_>4 9 2 9 2. + 0 + 0.0572749599814415 + 8.1852423027157784e-003 + -0.7950854897499085 + <_> + + <_> + + + + <_>3 14 14 3 -1. + <_>3 15 14 1 3. + 0 + 0.0245496407151222 + -0.0155159803107381 + 0.4899547994136810 + <_> + + <_> + + + + <_>3 4 4 15 -1. + <_>3 9 4 5 3. + 0 + -0.0467925593256950 + -0.8472008705139160 + 9.0526090934872627e-003 + <_> + + <_> + + + + <_>7 4 13 3 -1. + <_>7 5 13 1 3. + 0 + 3.1038739252835512e-003 + -0.0532710291445255 + 0.0788155570626259 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0342410318553448 + -0.4816122055053711 + 0.0136543400585651 + <_> + + <_> + + + + <_>11 0 9 7 -1. + <_>14 0 3 7 3. + 0 + 4.4056270271539688e-003 + -0.0492804385721684 + 0.0787091627717018 + <_> + + <_> + + + + <_>1 10 6 7 -1. + <_>3 10 2 7 3. + 0 + 2.3878510110080242e-003 + -0.0768876597285271 + 0.0846145823597908 + <_> + + <_> + + + + <_>13 0 3 17 -1. + <_>14 0 1 17 3. + 0 + -0.0116212302818894 + -0.2308605015277863 + 0.0225848108530045 + <_> + + <_> + + + + <_>9 4 2 13 -1. + <_>10 4 1 13 2. + 0 + 2.5225759018212557e-003 + -0.0508131310343742 + 0.1381040066480637 + <_> + + <_> + + + + <_>6 6 12 9 -1. + <_>10 6 4 9 3. + 0 + 0.1350747048854828 + 7.5730998069047928e-003 + -0.4795505106449127 + <_> + + <_> + + + + <_>2 6 12 9 -1. + <_>6 6 4 9 3. + 0 + -2.2317951079457998e-003 + -0.0902587920427322 + 0.0831187665462494 + <_> + + <_> + + + + <_>3 14 14 4 -1. + <_>10 14 7 2 2. + <_>3 16 7 2 2. + 0 + -0.0300617106258869 + -0.5179914236068726 + 0.0128817101940513 + <_> + + <_> + + + + <_>3 3 13 4 -1. + <_>3 5 13 2 2. + 0 + -0.0454643517732620 + 0.2066098004579544 + -0.0348603986203671 + <_> + + <_> + + + + <_>10 14 10 6 -1. + <_>10 16 10 2 3. + 0 + -9.2374589294195175e-003 + -0.1469502002000809 + 0.0313202589750290 + <_> + + <_> + + + + <_>0 14 11 6 -1. + <_>0 16 11 2 3. + 0 + 6.0185948386788368e-003 + 0.0638856217265129 + -0.1177961975336075 + <_> + + <_> + + + + <_>1 0 18 4 -1. + <_>7 0 6 4 3. + 0 + -0.0103228101506829 + 0.1795835047960281 + -0.0468300282955170 + <_> + + <_> + + + + <_>4 0 3 17 -1. + <_>5 0 1 17 3. + 0 + -1.7961780540645123e-003 + -0.1137404963374138 + 0.0617303811013699 + <_> + + <_> + + + + <_>13 3 3 17 -1. + <_>14 3 1 17 3. + 0 + 7.1363700553774834e-003 + 0.0335745215415955 + -0.1547258943319321 + <_> + + <_> + + + + <_>1 0 18 9 -1. + <_>7 0 6 9 3. + 0 + 0.0694877728819847 + -0.0591620095074177 + 0.1384111046791077 + <_> + + <_> + + + + <_>9 7 9 6 -1. + <_>12 7 3 6 3. + 0 + -0.0383218713104725 + 0.1562871932983398 + -0.0318156518042088 + <_> + + <_> + + + + <_>4 3 3 17 -1. + <_>5 3 1 17 3. + 0 + 3.9706169627606869e-003 + 0.0512525290250778 + -0.1761599928140640 + <_> + + <_> + + + + <_>6 14 14 3 -1. + <_>6 15 14 1 3. + 0 + -3.9275288581848145e-003 + 0.0789479985833168 + -0.0514867305755615 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + 1.9882800988852978e-003 + -0.0504746511578560 + 0.1336632966995239 + <_> + + <_> + + + + <_>5 14 15 3 -1. + <_>5 15 15 1 3. + 0 + -1.6472870483994484e-003 + 0.0491801984608173 + -0.0534374900162220 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0115801095962524 + -0.1322430968284607 + 0.0583215095102787 + <_> + + <_> + + + + <_>7 7 9 6 -1. + <_>7 10 9 3 2. + 0 + 0.0434967912733555 + -0.0235273800790310 + 0.1217914000153542 + <_> + + <_> + + + + <_>8 5 3 10 -1. + <_>8 10 3 5 2. + 0 + 1.8956169951707125e-003 + 0.0560729391872883 + -0.1199728995561600 + <_> + + <_> + + + + <_>5 8 14 2 -1. + <_>5 9 14 1 2. + 0 + 2.4906420148909092e-003 + -0.1279992014169693 + 0.0352185703814030 + <_> + + <_> + + + + <_>0 6 13 3 -1. + <_>0 7 13 1 3. + 0 + -0.0602531507611275 + -0.7870790958404541 + 7.7965850941836834e-003 + <_> + + <_> + + + + <_>3 13 17 6 -1. + <_>3 15 17 2 3. + 0 + -0.0153068099170923 + -0.1227606013417244 + 0.0425373911857605 + <_> + + <_> + + + + <_>6 15 8 4 -1. + <_>6 17 8 2 2. + 0 + 3.6899570841342211e-004 + -0.1219256967306137 + 0.0596502311527729 + <_> + + <_> + + + + <_>6 7 14 2 -1. + <_>6 8 14 1 2. + 0 + 3.0398070812225342e-003 + -0.0630238428711891 + 0.0509180910885334 + <_> + + <_> + + + + <_>6 7 6 8 -1. + <_>6 11 6 4 2. + 0 + -3.5760499304160476e-004 + -0.0768593326210976 + 0.0866243168711662 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + -2.7939230203628540e-003 + 0.1307436972856522 + -0.0469127111136913 + <_> + + <_> + + + + <_>0 0 6 10 -1. + <_>0 0 3 5 2. + <_>3 5 3 5 2. + 0 + 4.2060539126396179e-003 + -0.0531197190284729 + 0.1286624073982239 + <_> + + <_> + + + + <_>8 5 12 4 -1. + <_>12 5 4 4 3. + 0 + 0.0514486990869045 + 0.0110803702846169 + -0.4143421053886414 + <_> + + <_> + + + + <_>6 5 2 14 -1. + <_>6 12 2 7 2. + 0 + 0.0328598804771900 + 0.0174953099340200 + -0.3753879070281982 + <_> + + <_> + + + + <_>11 0 9 7 -1. + <_>14 0 3 7 3. + 0 + -0.0484080612659454 + 0.1701187938451767 + -0.0237264502793550 + <_> + + <_> + + + + <_>0 5 12 4 -1. + <_>4 5 4 4 3. + 0 + 0.0140613401308656 + 0.0259813908487558 + -0.2763577103614807 + <_> + + <_> + + + + <_>11 0 9 7 -1. + <_>14 0 3 7 3. + 0 + 0.0521964393556118 + -9.5534622669219971e-003 + 0.1097346991300583 + <_> + + <_> + + + + <_>0 0 9 7 -1. + <_>3 0 3 7 3. + 0 + 0.0447802618145943 + -0.0270329304039478 + 0.2743470966815949 + <_> + + <_> + + + + <_>2 13 16 4 -1. + <_>10 13 8 2 2. + <_>2 15 8 2 2. + 0 + -3.7703409325331450e-003 + -0.1441286951303482 + 0.0523424707353115 + <_> + + <_> + + + + <_>0 10 7 6 -1. + <_>0 12 7 2 3. + 0 + -4.1479258798062801e-003 + -0.1370683014392853 + 0.0496210902929306 + <_> + + <_> + + + + <_>5 0 10 8 -1. + <_>5 4 10 4 2. + 0 + 0.0146851502358913 + -0.0499496683478355 + 0.1365865021944046 + <_> + + <_> + + + + <_>5 2 10 14 -1. + <_>5 9 10 7 2. + 0 + 0.0103258499875665 + 0.0836594626307487 + -0.1037800982594490 + <_> + + <_> + + + + <_>7 7 13 2 -1. + <_>7 8 13 1 2. + 0 + -1.7972270143218338e-004 + -0.0866589173674583 + 0.0225923694670200 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 0.0200810004025698 + -0.0195899493992329 + 0.3435873985290527 + <_> + + <_> + + + + <_>4 0 13 3 -1. + <_>4 1 13 1 3. + 0 + -0.0229055806994438 + -0.4248282015323639 + 0.0154167702421546 + <_> + + <_> + + + + <_>5 0 10 4 -1. + <_>5 2 10 2 2. + 0 + -0.0555060282349586 + 0.7314381003379822 + -9.4347409904003143e-003 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -1.7899540252983570e-003 + -0.0819517821073532 + 0.0358237884938717 + <_> + + <_> + + + + <_>0 0 18 3 -1. + <_>9 0 9 3 2. + 0 + -8.0740358680486679e-004 + 0.0866209790110588 + -0.0787586122751236 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>8 17 6 3 3. + 0 + 0.0244450196623802 + -0.0220042504370213 + 0.0941588431596756 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>6 17 6 3 3. + 0 + -7.5640110298991203e-003 + 0.1201172992587090 + -0.0723497718572617 + <_> + + <_> + + + + <_>11 16 8 4 -1. + <_>11 16 4 4 2. + 0 + 2.3397218901664019e-003 + -0.0810343474149704 + 0.0981736183166504 + <_> + + <_> + + + + <_>0 3 18 15 -1. + <_>0 8 18 5 3. + 0 + -0.0318176113069057 + -0.3573046922683716 + 0.0196013096719980 + <_> + + <_> + + + + <_>2 9 16 8 -1. + <_>2 13 16 4 2. + 0 + 0.0100280800834298 + -0.0241604596376419 + 0.3134033977985382 + <_> + + <_> + + + + <_>0 10 7 4 -1. + <_>0 12 7 2 2. + 0 + 9.0504523541312665e-005 + 0.0580506287515163 + -0.1176043972373009 + <_> + + <_> + + + + <_>4 5 12 12 -1. + <_>10 5 6 6 2. + <_>4 11 6 6 2. + 0 + -0.0210107509046793 + -0.2034603953361511 + 0.0341454111039639 + <_> + + <_> + + + + <_>5 12 9 5 -1. + <_>8 12 3 5 3. + 0 + -7.1200268575921655e-004 + 0.0633031502366066 + -0.1049738973379135 + <_> + + <_> + + + + <_>18 0 2 16 -1. + <_>18 8 2 8 2. + 0 + -7.6272932346910238e-004 + -0.0744325667619705 + 0.0349122285842896 + <_> + + <_> + + + + <_>0 0 2 16 -1. + <_>0 8 2 8 2. + 0 + -0.0585063286125660 + 0.5575838088989258 + -0.0126664899289608 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + 2.4057500995695591e-003 + 0.0446050688624382 + -0.1158159002661705 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0197295192629099 + -0.4755010902881622 + 0.0155485598370433 + <_> + + <_> + + + + <_>14 7 6 10 -1. + <_>17 7 3 5 2. + <_>14 12 3 5 2. + 0 + -0.0226451307535172 + 0.1182895004749298 + -0.0221709292382002 + <_> + + <_> + + + + <_>0 2 12 6 -1. + <_>0 2 6 3 2. + <_>6 5 6 3 2. + 0 + -1.3123790267854929e-003 + 0.0506355389952660 + -0.1342331022024155 + <_> + + <_> + + + + <_>10 0 10 10 -1. + <_>15 0 5 5 2. + <_>10 5 5 5 2. + 0 + -5.9856739826500416e-003 + 0.0542738214135170 + -0.0696390569210052 + <_> + + <_> + + + + <_>0 0 10 10 -1. + <_>0 0 5 5 2. + <_>5 5 5 5 2. + 0 + 0.0522454492747784 + -0.0183413606137037 + 0.4168938100337982 + <_> + + <_> + + + + <_>2 7 18 4 -1. + <_>11 7 9 2 2. + <_>2 9 9 2 2. + 0 + -4.6837949194014072e-003 + -0.1212126016616821 + 0.0391879193484783 + <_> + + <_> + + + + <_>5 3 6 14 -1. + <_>5 3 3 7 2. + <_>8 10 3 7 2. + 0 + -0.0152083998546004 + -0.0964878425002098 + 0.0653250217437744 + <_> + + <_> + + + + <_>9 2 3 13 -1. + <_>10 2 1 13 3. + 0 + -5.7328920811414719e-003 + 0.2102347016334534 + -0.0317212603986263 + <_> + + <_> + + + + <_>0 7 6 10 -1. + <_>0 7 3 5 2. + <_>3 12 3 5 2. + 0 + -3.7612610030919313e-003 + 0.1008588001132011 + -0.0613929517567158 + <_> + + <_> + + + + <_>13 4 3 13 -1. + <_>14 4 1 13 3. + 0 + -0.0109805203974247 + -0.1834243983030319 + 0.0171212498098612 + <_> + + <_> + + + + <_>1 16 8 4 -1. + <_>5 16 4 4 2. + 0 + 2.7213071007281542e-003 + -0.0584041401743889 + 0.1072904989123344 + <_> + + <_> + + + + <_>5 15 15 5 -1. + <_>10 15 5 5 3. + 0 + -0.0189692694693804 + 0.0747647285461426 + -0.0340562015771866 + <_> + + <_> + + + + <_>7 3 4 13 -1. + <_>9 3 2 13 2. + 0 + -7.1104627568274736e-004 + -0.1474957019090653 + 0.0524471588432789 + <_> + + <_> + + + + <_>7 4 13 3 -1. + <_>7 5 13 1 3. + 0 + 9.4774961471557617e-003 + -0.0252324901521206 + 0.1067759990692139 + <_> + + <_> + + + + <_>2 0 16 8 -1. + <_>2 0 8 4 2. + <_>10 4 8 4 2. + 0 + 0.1027588024735451 + 0.0100393602624536 + -0.6463056802749634 + <_> + + <_> + + + + <_>13 7 6 11 -1. + <_>15 7 2 11 3. + 0 + -0.1122817993164063 + -0.5724760890007019 + 6.3971187919378281e-003 + <_> + + <_> + + + + <_>7 9 6 10 -1. + <_>7 9 3 5 2. + <_>10 14 3 5 2. + 0 + -0.0256835799664259 + -0.3200407922267914 + 0.0172394495457411 + <_> + + <_> + + + + <_>7 5 9 8 -1. + <_>10 5 3 8 3. + 0 + 0.0254942998290062 + -0.0221277792006731 + 0.1183812022209168 + <_> + + <_> + + + + <_>4 5 3 13 -1. + <_>5 5 1 13 3. + 0 + -0.0304587893188000 + -0.5874788165092468 + 9.8222652450203896e-003 + <_> + + <_> + + + + <_>10 4 6 12 -1. + <_>10 8 6 4 3. + 0 + -0.0278161205351353 + 0.3678570985794067 + -0.0122603401541710 + <_> + + <_> + + + + <_>7 4 6 7 -1. + <_>9 4 2 7 3. + 0 + -1.2768269516527653e-003 + 0.2415042966604233 + -0.0245034098625183 + <_> + + <_> + + + + <_>5 6 12 4 -1. + <_>9 6 4 4 3. + 0 + -0.0764358267188072 + -0.6347172260284424 + 2.7080429717898369e-003 + <_> + + <_> + + + + <_>3 6 12 4 -1. + <_>7 6 4 4 3. + 0 + 3.7574430461972952e-004 + -0.1331682056188583 + 0.0461895912885666 + <_> + + <_> + + + + <_>16 4 4 8 -1. + <_>16 8 4 4 2. + 0 + 0.0131938103586435 + 0.0265014804899693 + -0.0685159787535667 + <_> + + <_> + + + + <_>4 5 9 8 -1. + <_>7 5 3 8 3. + 0 + -0.0636896193027496 + 0.4112663865089417 + -0.0156471207737923 + <_> + + <_> + + + + <_>16 4 4 8 -1. + <_>16 8 4 4 2. + 0 + -8.0426287604495883e-004 + -0.0940060988068581 + 0.0310020707547665 + <_> + + <_> + + + + <_>4 5 8 15 -1. + <_>4 10 8 5 3. + 0 + 8.2476891111582518e-004 + -0.1592881977558136 + 0.0370967909693718 + <_> + + <_> + + + + <_>5 14 13 2 -1. + <_>5 15 13 1 2. + 0 + 4.8443409614264965e-003 + -0.0256988797336817 + 0.1507900953292847 + <_> + + <_> + + + + <_>1 7 4 13 -1. + <_>3 7 2 13 2. + 0 + 0.0229413192719221 + 0.0229411497712135 + -0.2775906920433044 + <_> + + <_> + + + + <_>11 9 6 8 -1. + <_>11 9 3 8 2. + 0 + 5.6285588070750237e-003 + 0.0201216191053391 + -0.0635844171047211 + <_> + + <_> + + + + <_>3 9 6 8 -1. + <_>6 9 3 8 2. + 0 + -8.1927451537922025e-004 + 0.0559341385960579 + -0.1077606007456780 + <_> + + <_> + + + + <_>8 1 9 15 -1. + <_>11 1 3 15 3. + 0 + 5.1910132169723511e-003 + -0.0267819706350565 + 0.0550941713154316 + <_> + + <_> + + + + <_>3 1 9 15 -1. + <_>6 1 3 15 3. + 0 + -0.0202204994857311 + -0.1250178068876267 + 0.0592748299241066 + <_> + + <_> + + + + <_>9 7 9 6 -1. + <_>12 7 3 6 3. + 0 + -3.6798599176108837e-003 + 0.0604743212461472 + -0.0596323497593403 + <_> + + <_> + + + + <_>0 5 6 7 -1. + <_>2 5 2 7 3. + 0 + 0.0104838600382209 + -0.0536522604525089 + 0.1290611028671265 + <_> + + <_> + + + + <_>11 2 2 16 -1. + <_>11 2 1 16 2. + 0 + 0.0179044604301453 + 0.0143182901665568 + -0.2734973132610321 + <_> + + <_> + + + + <_>1 1 18 10 -1. + <_>7 1 6 10 3. + 0 + 0.3369382023811340 + -8.6311781778931618e-003 + 0.7328857183456421 + <_> + + <_> + + + + <_>10 8 10 8 -1. + <_>15 8 5 4 2. + <_>10 12 5 4 2. + 0 + -0.1080747991800308 + -0.5070748925209045 + 6.7152627743780613e-003 + <_> + + <_> + + + + <_>0 8 10 8 -1. + <_>0 8 5 4 2. + <_>5 12 5 4 2. + 0 + -0.1221961006522179 + -0.7935271859169006 + 7.4890498071908951e-003 + <_> + + <_> + + + + <_>11 2 2 16 -1. + <_>11 2 1 16 2. + 0 + -3.7357630208134651e-003 + -0.1543643027544022 + 0.0199333596974611 + <_> + + <_> + + + + <_>3 9 12 11 -1. + <_>9 9 6 11 2. + 0 + 0.0472835302352905 + -0.0321807414293289 + 0.2233242988586426 + <_> + + <_> + + + + <_>6 7 10 3 -1. + <_>6 7 5 3 2. + 0 + -4.8949089832603931e-003 + -0.1444084942340851 + 0.0276874192059040 + <_> + + <_> + + + + <_>3 1 10 16 -1. + <_>3 1 5 8 2. + <_>8 9 5 8 2. + 0 + -4.6767960302531719e-003 + 0.0425895191729069 + -0.1318124979734421 + <_> + + <_> + + + + <_>8 3 8 10 -1. + <_>12 3 4 5 2. + <_>8 8 4 5 2. + 0 + -0.0405265688896179 + 0.1515536010265350 + -0.0131374001502991 + <_> + + <_> + + + + <_>4 3 8 10 -1. + <_>4 3 4 5 2. + <_>8 8 4 5 2. + 0 + 5.1309340633451939e-003 + -0.0424363985657692 + 0.1942812949419022 + <_> + + <_> + + + + <_>10 11 9 6 -1. + <_>10 14 9 3 2. + 0 + 4.9947341904044151e-003 + 0.0206563007086515 + -0.1833256036043167 + <_> + + <_> + + + + <_>1 11 9 6 -1. + <_>1 14 9 3 2. + 0 + -0.0109464498236775 + -0.1157637014985085 + 0.0619641989469528 + <_> + + <_> + + + + <_>6 16 14 4 -1. + <_>13 16 7 2 2. + <_>6 18 7 2 2. + 0 + -6.7135482095181942e-003 + 0.1579674929380417 + -0.0353996194899082 + <_> + + <_> + + + + <_>1 0 9 18 -1. + <_>1 6 9 6 3. + 0 + -0.0309906303882599 + -0.1727104932069778 + 0.0379165709018707 + <_> + + <_> + + + + <_>8 3 12 4 -1. + <_>8 5 12 2 2. + 0 + -2.7503890451043844e-003 + 0.0414951592683792 + -0.0551527887582779 + <_> + + <_> + + + + <_>1 5 7 9 -1. + <_>1 8 7 3 3. + 0 + -0.0247004292905331 + 0.2907611131668091 + -0.0205526407808065 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + -0.0176072698086500 + -0.0986715033650398 + 0.0328004509210587 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 8.7928329594433308e-004 + 0.0364424213767052 + -0.1751804053783417 + <_> + + <_> + + + + <_>9 2 6 7 -1. + <_>11 2 2 7 3. + 0 + 6.9036949425935745e-003 + 0.0214442703872919 + -0.1199729964137077 + <_> + + <_> + + + + <_>5 2 6 7 -1. + <_>7 2 2 7 3. + 0 + -2.2592858877032995e-003 + 0.0959442481398582 + -0.0812644809484482 + <_> + + <_> + + + + <_>4 16 15 4 -1. + <_>9 16 5 4 3. + 0 + 0.0158859398216009 + -0.0314941108226776 + 0.0875319465994835 + <_> + + <_> + + + + <_>0 17 15 3 -1. + <_>5 17 5 3 3. + 0 + 0.0193797107785940 + -0.0350754894316196 + 0.1619918942451477 + <_> + + <_> + + + + <_>2 2 18 18 -1. + <_>8 2 6 18 3. + 0 + -0.0235653296113014 + 0.0993678122758865 + -0.0504099614918232 + <_> + + <_> + + + + <_>5 4 4 16 -1. + <_>7 4 2 16 2. + 0 + -6.2582190148532391e-003 + -0.1596260964870453 + 0.0568719506263733 + <_> + + <_> + + + + <_>6 9 9 6 -1. + <_>9 9 3 6 3. + 0 + 0.0102890403941274 + 0.0324222594499588 + -0.1182584017515183 + <_> + + <_> + + + + <_>1 14 10 6 -1. + <_>1 14 5 3 2. + <_>6 17 5 3 2. + 0 + -5.8485912159085274e-003 + 0.1910745948553085 + -0.0370847396552563 + <_> + + <_> + + + + <_>6 7 12 5 -1. + <_>10 7 4 5 3. + 0 + -0.0858051627874374 + -0.4087724983692169 + 0.0127811003476381 + <_> + + <_> + + + + <_>0 10 5 9 -1. + <_>0 13 5 3 3. + 0 + -2.4852859787642956e-003 + -0.1011639982461929 + 0.0563114807009697 + <_> + + <_> + + + + <_>13 10 6 9 -1. + <_>13 13 6 3 3. + 0 + -7.1535720489919186e-003 + -0.0441186092793942 + 0.0222171694040298 + <_> + + <_> + + + + <_>1 10 6 9 -1. + <_>1 13 6 3 3. + 0 + 1.2644700473174453e-003 + 0.0653055980801582 + -0.1227300018072128 + <_> + + <_> + + + + <_>5 7 10 4 -1. + <_>5 9 10 2 2. + 0 + 0.0398256890475750 + -0.0504029802978039 + 0.1442425996065140 + <_> + + <_> + + + + <_>1 5 18 12 -1. + <_>1 9 18 4 3. + 0 + 0.0133226700127125 + 0.2323541939258575 + -0.0281981695443392 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 0.0210173502564430 + -0.0196532607078552 + 0.1043256968259811 + <_> + + <_> + + + + <_>2 4 13 14 -1. + <_>2 11 13 7 2. + 0 + 0.2451521009206772 + 8.4479590877890587e-003 + -0.7483342289924622 + <_> + + <_> + + + + <_>10 8 6 6 -1. + <_>10 8 3 6 2. + 0 + 4.3030278757214546e-003 + 0.0311724804341793 + -0.0941835865378380 + <_> + + <_> + + + + <_>2 1 16 8 -1. + <_>2 5 16 4 2. + 0 + 0.0222244802862406 + -0.0396029204130173 + 0.1561487019062042 + <_> + + <_> + + + + <_>10 8 6 6 -1. + <_>10 8 3 6 2. + 0 + -8.5019748657941818e-003 + -0.1085231974720955 + 0.0280456002801657 + <_> + + <_> + + + + <_>4 0 11 6 -1. + <_>4 2 11 2 3. + 0 + 0.0108455400913954 + -0.0655941590666771 + 0.1021739989519119 + <_> + + <_> + + + + <_>2 2 16 2 -1. + <_>2 3 16 1 2. + 0 + 1.7696369905024767e-003 + 0.0753691419959068 + -0.0952988266944885 + <_> + + <_> + + + + <_>4 15 12 5 -1. + <_>10 15 6 5 2. + 0 + 0.1028904989361763 + -0.0117672299966216 + 0.4816721081733704 + <_> + + <_> + + + + <_>10 8 6 6 -1. + <_>10 8 3 6 2. + 0 + -0.0350741706788540 + -0.2629905045032501 + 0.0100027797743678 + <_> + + <_> + + + + <_>0 14 12 4 -1. + <_>6 14 6 4 2. + 0 + 0.0383029989898205 + 0.0108839496970177 + -0.5809292793273926 + <_> + + <_> + + + + <_>12 7 6 6 -1. + <_>12 10 6 3 2. + 0 + 0.0121831195428967 + 0.0310989990830421 + -0.0542579293251038 + <_> + + <_> + + + + <_>1 5 6 14 -1. + <_>1 5 3 7 2. + <_>4 12 3 7 2. + 0 + 0.0203881394118071 + -0.0373795405030251 + 0.1872545033693314 + <_> + + <_> + + + + <_>10 2 9 13 -1. + <_>13 2 3 13 3. + 0 + 6.5857400186359882e-003 + -0.0441947802901268 + 0.0600337907671928 + <_> + + <_> + + + + <_>4 8 6 6 -1. + <_>7 8 3 6 2. + 0 + 5.8739529922604561e-003 + 0.0392197109758854 + -0.1585793942213059 + <_> + + <_> + + + + <_>12 5 6 9 -1. + <_>12 5 3 9 2. + 0 + -0.0782790333032608 + 0.2178917974233627 + -0.0100944200530648 + <_> + + <_> + + + + <_>2 5 6 9 -1. + <_>5 5 3 9 2. + 0 + 0.0153365796431899 + -0.0312195196747780 + 0.2245240062475205 + <_> + + <_> + + + + <_>5 8 15 2 -1. + <_>5 9 15 1 2. + 0 + 1.4171670190989971e-003 + -0.1662545055150986 + 0.0276841092854738 + <_> + + <_> + + + + <_>2 9 16 3 -1. + <_>2 10 16 1 3. + 0 + -3.4021309111267328e-003 + -0.2845237851142883 + 0.0226610600948334 + <_> + + <_> + + + + <_>12 7 5 6 -1. + <_>12 10 5 3 2. + 0 + -0.0193403400480747 + 0.5230051875114441 + -5.0734821707010269e-003 + <_> + + <_> + + + + <_>3 7 5 6 -1. + <_>3 10 5 3 2. + 0 + -0.0165143199265003 + 0.7061938047409058 + -8.2714930176734924e-003 + <_> + + <_> + + + + <_>15 9 5 9 -1. + <_>15 12 5 3 3. + 0 + -6.4589809626340866e-003 + -0.1210433021187782 + 0.0387184210121632 + <_> + + <_> + + + + <_>0 13 20 4 -1. + <_>0 13 10 2 2. + <_>10 15 10 2 2. + 0 + -4.3003219179809093e-003 + -0.1210365965962410 + 0.0553358905017376 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 0.0107842003926635 + -0.0389758199453354 + 0.1987051963806152 + <_> + + <_> + + + + <_>2 12 10 6 -1. + <_>2 12 5 3 2. + <_>7 15 5 3 2. + 0 + -1.1527650058269501e-003 + 0.0935961008071899 + -0.0642488896846771 + <_> + + <_> + + + + <_>9 10 3 10 -1. + <_>9 15 3 5 2. + 0 + -0.0421012602746487 + -0.3003219068050385 + 0.0159092992544174 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + 3.0202090274542570e-003 + -0.0653104782104492 + 0.0947547629475594 + <_> + + <_> + + + + <_>15 9 5 9 -1. + <_>15 12 5 3 3. + 0 + 0.0299999900162220 + 0.0176732297986746 + -0.2245714962482452 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + -1.3678170507773757e-003 + 0.1339491009712219 + -0.0500865504145622 + <_> + + <_> + + + + <_>15 9 5 9 -1. + <_>15 12 5 3 3. + 0 + -0.0231519509106874 + -0.1831011027097702 + 0.0191035792231560 + <_> + + <_> + + + + <_>1 6 3 13 -1. + <_>2 6 1 13 3. + 0 + 0.0638263225555420 + 7.5651248916983604e-003 + -0.8311659097671509 + <_> + + <_> + + + + <_>10 4 6 16 -1. + <_>12 4 2 16 3. + 0 + -0.1483162045478821 + -1. + 3.4445689525455236e-003 + <_> + + <_> + + + + <_>4 4 6 16 -1. + <_>6 4 2 16 3. + 0 + 1.3207890151534230e-004 + 0.0511358194053173 + -0.1186320036649704 + <_> + + <_> + + + + <_>7 15 9 5 -1. + <_>10 15 3 5 3. + 0 + 0.0660787075757980 + 7.1528651751577854e-003 + -0.4290638864040375 + <_> + + <_> + + + + <_>4 16 12 4 -1. + <_>8 16 4 4 3. + 0 + 6.1758249066770077e-003 + -0.0590105801820755 + 0.1078130975365639 + <_> + + <_> + + + + <_>5 3 10 6 -1. + <_>10 3 5 3 2. + <_>5 6 5 3 2. + 0 + -0.0335061103105545 + -0.3763673901557922 + 0.0170377995818853 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + -9.7032980993390083e-003 + 0.1382033973932266 + -0.0439222007989883 + <_> + + <_> + + + + <_>6 2 14 2 -1. + <_>6 3 14 1 2. + 0 + -7.2475131601095200e-003 + -0.2219274938106537 + 0.0128019396215677 + <_> + + <_> + + + + <_>3 11 8 4 -1. + <_>7 11 4 4 2. + 0 + -0.0533093288540840 + -0.4559476077556610 + 0.0124950101599097 + <_> + + <_> + + + + <_>4 2 12 4 -1. + <_>4 2 6 4 2. + 0 + 0.0103870695456862 + -0.0516241304576397 + 0.1223623976111412 + <_> + + <_> + + + + <_>0 2 6 15 -1. + <_>0 7 6 5 3. + 0 + 0.0672085732221603 + 0.0316551215946674 + -0.2108618021011353 + <_> + + <_> + + + + <_>3 0 17 6 -1. + <_>3 2 17 2 3. + 0 + -0.0151433199644089 + 0.1722407042980194 + -0.0292099397629499 + <_> + + <_> + + + + <_>0 4 7 4 -1. + <_>0 6 7 2 2. + 0 + -0.0392849706113338 + -0.4822677969932556 + 0.0143662001937628 + <_> + + <_> + + + + <_>3 9 14 2 -1. + <_>3 9 7 2 2. + 0 + -5.1000402309000492e-003 + 0.1370041072368622 + -0.0435415916144848 + <_> + + <_> + + + + <_>4 7 10 3 -1. + <_>9 7 5 3 2. + 0 + 4.7284159809350967e-003 + 0.0654955208301544 + -0.1291383951902390 + <_> + + <_> + + + + <_>4 4 13 3 -1. + <_>4 5 13 1 3. + 0 + -0.0118776299059391 + 0.2014613002538681 + -0.0236400496214628 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -4.5396368950605392e-003 + -0.1687245070934296 + 0.0448811799287796 + <_> + + <_> + + + + <_>4 12 16 8 -1. + <_>4 12 8 8 2. + 0 + -8.0548608675599098e-003 + 0.0659163221716881 + -0.0451842285692692 + <_> + + <_> + + + + <_>0 12 16 8 -1. + <_>8 12 8 8 2. + 0 + -0.0430377312004566 + 0.1281743049621582 + -0.0630217194557190 + <_> + + <_> + + + + <_>14 9 6 10 -1. + <_>16 9 2 10 3. + 0 + 0.1095227971673012 + 6.0560060665011406e-003 + -0.5161451101303101 + <_> + + <_> + + + + <_>2 7 11 12 -1. + <_>2 11 11 4 3. + 0 + -7.0019549457356334e-004 + -0.1284541040658951 + 0.0499361008405685 + <_> + + <_> + + + + <_>9 3 3 12 -1. + <_>9 9 3 6 2. + 0 + -2.9595570595120080e-005 + 0.0670763328671455 + -0.0903971195220947 + <_> + + <_> + + + + <_>2 1 6 15 -1. + <_>2 6 6 5 3. + 0 + 0.1774964034557343 + -7.6472861692309380e-003 + 0.8971657156944275 + <_> + + <_> + + + + <_>17 7 2 13 -1. + <_>17 7 1 13 2. + 0 + -0.0553644485771656 + -0.6551393866539002 + 6.7208600230515003e-003 + <_> + + <_> + + + + <_>1 7 2 13 -1. + <_>2 7 1 13 2. + 0 + -0.0514614097774029 + -0.6533753275871277 + 8.9703118428587914e-003 + <_> + + <_> + + + + <_>0 1 20 4 -1. + <_>10 1 10 2 2. + <_>0 3 10 2 2. + 0 + -0.0265817195177078 + -0.2811642885208130 + 0.0177660901099443 + <_> + + <_> + + + + <_>6 1 7 6 -1. + <_>6 3 7 2 3. + 0 + -0.0690343379974365 + 0.9258397817611694 + -6.2460578046739101e-003 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -0.0302057303488255 + 0.2378429025411606 + -0.0162954591214657 + <_> + + <_> + + + + <_>5 10 6 6 -1. + <_>8 10 3 6 2. + 0 + -9.1226873919367790e-003 + -0.1456989049911499 + 0.0456543900072575 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>12 0 4 20 3. + 0 + -0.2123378068208695 + 0.1647219955921173 + -0.0147588299587369 + <_> + + <_> + + + + <_>6 7 6 8 -1. + <_>8 7 2 8 3. + 0 + -0.0262546893209219 + 0.3038162887096405 + -0.0201085302978754 + <_> + + <_> + + + + <_>12 5 4 8 -1. + <_>12 9 4 4 2. + 0 + 3.0262209475040436e-003 + -0.1529828011989594 + 0.0268785394728184 + <_> + + <_> + + + + <_>5 2 9 5 -1. + <_>8 2 3 5 3. + 0 + 0.0838385969400406 + 0.0100423498079181 + -0.5934510231018066 + <_> + + <_> + + + + <_>8 10 12 9 -1. + <_>12 10 4 9 3. + 0 + 0.0188457593321800 + -0.0452605411410332 + 0.0842202007770538 + <_> + + <_> + + + + <_>4 15 9 5 -1. + <_>7 15 3 5 3. + 0 + -4.8671411350369453e-003 + -0.1123484000563622 + 0.0566763989627361 + -1.3393770456314087 + 32 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 3 -1. + <_>7 1 6 3 3. + 0 + 0.1190086975693703 + -0.2018668055534363 + 0.2441760003566742 + <_> + + <_> + + + + <_>12 7 8 4 -1. + <_>12 9 8 2 2. + 0 + 0.0212774891406298 + -0.2345439940690994 + 0.1630306988954544 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 3.7066950462758541e-003 + -0.2055990993976593 + 0.1498205959796906 + <_> + + <_> + + + + <_>3 4 15 16 -1. + <_>3 12 15 8 2. + 0 + 0.0329295508563519 + 0.0788030773401260 + -0.3368844091892242 + <_> + + <_> + + + + <_>0 7 8 4 -1. + <_>0 9 8 2 2. + 0 + 0.0250579603016377 + -0.1593209058046341 + 0.1640505045652390 + <_> + + <_> + + + + <_>7 6 6 9 -1. + <_>9 6 2 9 3. + 0 + 6.5863109193742275e-004 + -0.2780422866344452 + 0.0830289199948311 + <_> + + <_> + + + + <_>4 11 8 9 -1. + <_>4 14 8 3 3. + 0 + -0.0662109106779099 + -0.3640215098857880 + 0.0600673481822014 + <_> + + <_> + + + + <_>11 3 9 8 -1. + <_>14 3 3 8 3. + 0 + 4.2186300270259380e-003 + -0.1855151057243347 + 0.1282822042703629 + <_> + + <_> + + + + <_>0 4 9 8 -1. + <_>3 4 3 8 3. + 0 + 1.7119459807872772e-003 + -0.2157250940799713 + 0.0868794992566109 + <_> + + <_> + + + + <_>9 4 6 10 -1. + <_>12 4 3 5 2. + <_>9 9 3 5 2. + 0 + -0.0213904809206724 + 0.1112473979592323 + -0.1448650956153870 + <_> + + <_> + + + + <_>0 4 20 4 -1. + <_>0 6 20 2 2. + 0 + 5.5712480098009109e-003 + 0.0625468790531158 + -0.3159820139408112 + <_> + + <_> + + + + <_>2 9 18 3 -1. + <_>8 9 6 3 3. + 0 + 4.5709838159382343e-003 + -0.2364789992570877 + 0.0383995696902275 + <_> + + <_> + + + + <_>3 14 13 3 -1. + <_>3 15 13 1 3. + 0 + -0.0170860309153795 + 0.2065355926752091 + -0.0864056125283241 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + -0.0306409504264593 + 0.4152300059795380 + -0.0256018508225679 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 10 4 4 2. + <_>10 14 4 4 2. + 0 + 0.0258034691214561 + 0.0401562303304672 + -0.3744401037693024 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>10 9 4 4 2. + <_>6 13 4 4 2. + 0 + 0.0264259204268456 + 0.0426257811486721 + -0.4188891053199768 + <_> + + <_> + + + + <_>0 7 10 6 -1. + <_>0 7 5 3 2. + <_>5 10 5 3 2. + 0 + -0.0118497302755713 + -0.3061988055706024 + 0.0515059493482113 + <_> + + <_> + + + + <_>7 1 8 8 -1. + <_>11 1 4 4 2. + <_>7 5 4 4 2. + 0 + -0.0162698496133089 + -0.1987849026918411 + 0.0426832400262356 + <_> + + <_> + + + + <_>5 1 8 8 -1. + <_>5 1 4 4 2. + <_>9 5 4 4 2. + 0 + -0.0240361597388983 + -0.3321199119091034 + 0.0460914187133312 + <_> + + <_> + + + + <_>10 0 8 4 -1. + <_>10 2 8 2 2. + 0 + 7.3583971243351698e-004 + -0.2067741006612778 + 0.0574182607233524 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + -0.0204231608659029 + -0.2692205905914307 + 0.0448937192559242 + <_> + + <_> + + + + <_>15 11 5 6 -1. + <_>15 14 5 3 2. + 0 + 1.9533000886440277e-003 + 0.0434818491339684 + -0.1429585069417954 + <_> + + <_> + + + + <_>1 6 18 8 -1. + <_>1 6 9 4 2. + <_>10 10 9 4 2. + 0 + 0.0332025401294231 + 0.0611127205193043 + -0.2077313959598541 + <_> + + <_> + + + + <_>4 3 13 3 -1. + <_>4 4 13 1 3. + 0 + 0.0210495498031378 + -0.0551963299512863 + 0.1727333068847656 + <_> + + <_> + + + + <_>1 9 13 2 -1. + <_>1 10 13 1 2. + 0 + -4.2487941682338715e-003 + -0.3120211064815521 + 0.0357145518064499 + <_> + + <_> + + + + <_>9 12 8 8 -1. + <_>13 12 4 4 2. + <_>9 16 4 4 2. + 0 + 0.0145448902621865 + -0.1289152055978775 + 0.1087460964918137 + <_> + + <_> + + + + <_>0 11 5 6 -1. + <_>0 14 5 3 2. + 0 + 4.4858800247311592e-003 + 0.0502648502588272 + -0.2272962033748627 + <_> + + <_> + + + + <_>15 3 5 9 -1. + <_>15 6 5 3 3. + 0 + -0.0720195174217224 + -0.5035715103149414 + 0.0249091703444719 + <_> + + <_> + + + + <_>0 4 2 16 -1. + <_>0 12 2 8 2. + 0 + 0.0740883126854897 + -0.0261101797223091 + 0.4690495133399963 + <_> + + <_> + + + + <_>15 3 5 9 -1. + <_>15 6 5 3 3. + 0 + -0.0193762108683586 + -0.0877423286437988 + 0.0526968091726303 + <_> + + <_> + + + + <_>2 5 16 10 -1. + <_>2 5 8 5 2. + <_>10 10 8 5 2. + 0 + -0.0151920598000288 + -0.1647035032510757 + 0.0748419165611267 + <_> + + <_> + + + + <_>6 7 14 2 -1. + <_>6 8 14 1 2. + 0 + 6.7975218407809734e-003 + -0.1251268982887268 + 0.0820929929614067 + <_> + + <_> + + + + <_>3 2 6 10 -1. + <_>3 2 3 5 2. + <_>6 7 3 5 2. + 0 + -1.9816169515252113e-003 + 0.0612598806619644 + -0.1913881003856659 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + -0.0403438396751881 + -0.3463464081287384 + 0.0338140912353992 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + -9.7851715981960297e-003 + 0.2477196007966995 + -0.0510314293205738 + <_> + + <_> + + + + <_>9 6 3 14 -1. + <_>10 6 1 14 3. + 0 + 0.0130610503256321 + -0.0593781694769859 + 0.1429872065782547 + <_> + + <_> + + + + <_>8 6 3 14 -1. + <_>9 6 1 14 3. + 0 + 0.0125199696049094 + -0.1008744016289711 + 0.2061744928359985 + <_> + + <_> + + + + <_>9 13 6 7 -1. + <_>11 13 2 7 3. + 0 + 0.0616200491786003 + 0.0108506204560399 + -0.4997675120830536 + <_> + + <_> + + + + <_>6 0 2 13 -1. + <_>7 0 1 13 2. + 0 + 0.0153516102582216 + 0.0304591804742813 + -0.4024853110313416 + <_> + + <_> + + + + <_>3 7 15 3 -1. + <_>8 7 5 3 3. + 0 + 6.7390319891273975e-003 + -0.1523087024688721 + 0.0347637310624123 + <_> + + <_> + + + + <_>0 10 7 6 -1. + <_>0 12 7 2 3. + 0 + 0.0271660406142473 + 0.0324651785194874 + -0.3790565133094788 + <_> + + <_> + + + + <_>12 11 8 6 -1. + <_>12 13 8 2 3. + 0 + -0.0494436509907246 + -0.4104248881340027 + 0.0152657004073262 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + 0.0329997092485428 + 0.0289222393184900 + -0.4311968088150024 + <_> + + <_> + + + + <_>9 1 6 7 -1. + <_>11 1 2 7 3. + 0 + 0.0376041494309902 + 0.0209206994622946 + -0.3547154068946838 + <_> + + <_> + + + + <_>2 9 9 10 -1. + <_>5 9 3 10 3. + 0 + 0.0173116400837898 + -0.1549087017774582 + 0.0735432282090187 + <_> + + <_> + + + + <_>14 0 3 18 -1. + <_>15 0 1 18 3. + 0 + -1.7037079669535160e-003 + -0.0953469201922417 + 0.0515172891318798 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0150087904185057 + 0.2105749994516373 + -0.0521971695125103 + <_> + + <_> + + + + <_>9 1 6 7 -1. + <_>11 1 2 7 3. + 0 + -0.0412833616137505 + -0.4872767925262451 + 0.0166863705962896 + <_> + + <_> + + + + <_>8 2 4 8 -1. + <_>10 2 2 8 2. + 0 + -0.0171902999281883 + 0.2307074964046478 + -0.0570944398641586 + <_> + + <_> + + + + <_>14 0 3 18 -1. + <_>15 0 1 18 3. + 0 + 0.0397070087492466 + 0.0170162301510572 + -0.3823386132717133 + <_> + + <_> + + + + <_>0 5 12 4 -1. + <_>4 5 4 4 3. + 0 + 0.0470514707267284 + 0.0422392487525940 + -0.2805036902427673 + <_> + + <_> + + + + <_>6 0 13 3 -1. + <_>6 1 13 1 3. + 0 + -0.0119489496573806 + -0.2305649071931839 + 0.0265321899205446 + <_> + + <_> + + + + <_>0 6 20 3 -1. + <_>0 7 20 1 3. + 0 + -0.0798574090003967 + -0.8496391773223877 + 0.0125821800902486 + <_> + + <_> + + + + <_>10 8 8 8 -1. + <_>14 8 4 4 2. + <_>10 12 4 4 2. + 0 + 0.0256276391446590 + 0.0233112405985594 + -0.2492381930351257 + <_> + + <_> + + + + <_>1 1 5 9 -1. + <_>1 4 5 3 3. + 0 + -0.0310943704098463 + -0.2376987040042877 + 0.0461161285638809 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 0.0465732216835022 + 0.0287702903151512 + -0.5373960137367249 + <_> + + <_> + + + + <_>1 4 16 6 -1. + <_>1 4 8 3 2. + <_>9 7 8 3 2. + 0 + -0.0540669299662113 + 0.2779476046562195 + -0.0477707684040070 + <_> + + <_> + + + + <_>9 0 10 6 -1. + <_>9 2 10 2 3. + 0 + 1.8918470013886690e-003 + -0.0982548296451569 + 0.0478564202785492 + <_> + + <_> + + + + <_>4 3 12 6 -1. + <_>4 5 12 2 3. + 0 + 0.0332293286919594 + -0.0525953508913517 + 0.2356410026550293 + <_> + + <_> + + + + <_>9 5 8 8 -1. + <_>9 9 8 4 2. + 0 + 1.1775200255215168e-003 + -0.2340148985385895 + 0.0261420700699091 + <_> + + <_> + + + + <_>1 0 9 6 -1. + <_>1 2 9 2 3. + 0 + 1.9482020288705826e-003 + -0.1522361934185028 + 0.0787514671683311 + <_> + + <_> + + + + <_>8 3 9 5 -1. + <_>11 3 3 5 3. + 0 + 0.0559455081820488 + 0.0115406997501850 + -0.1988953948020935 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0294553693383932 + 0.0333157703280449 + -0.3285048902034760 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + 4.0880320593714714e-003 + -0.0861784070730209 + 0.0795757994055748 + <_> + + <_> + + + + <_>3 3 3 16 -1. + <_>4 3 1 16 3. + 0 + -5.9127728454768658e-003 + -0.1773830056190491 + 0.0606489405035973 + <_> + + <_> + + + + <_>14 0 3 17 -1. + <_>15 0 1 17 3. + 0 + -0.0624196790158749 + 0.2439669966697693 + -3.3243889920413494e-003 + <_> + + <_> + + + + <_>0 10 9 7 -1. + <_>3 10 3 7 3. + 0 + -0.0371951200067997 + 0.2680704891681671 + -0.0399792715907097 + <_> + + <_> + + + + <_>8 0 7 12 -1. + <_>8 4 7 4 3. + 0 + -0.1432476043701172 + 0.2933282852172852 + -0.0268972907215357 + <_> + + <_> + + + + <_>0 3 5 9 -1. + <_>0 6 5 3 3. + 0 + -0.0428452193737030 + -0.2528375089168549 + 0.0412320494651794 + <_> + + <_> + + + + <_>9 9 10 5 -1. + <_>9 9 5 5 2. + 0 + 0.1156008988618851 + -0.0149658499285579 + 0.2418725043535233 + <_> + + <_> + + + + <_>1 9 10 5 -1. + <_>6 9 5 5 2. + 0 + 0.0501694716513157 + 0.0885905474424362 + -0.1244257017970085 + <_> + + <_> + + + + <_>4 8 15 3 -1. + <_>9 8 5 3 3. + 0 + 0.1020011007785797 + 0.0123963197693229 + -0.3698217868804932 + <_> + + <_> + + + + <_>1 8 15 3 -1. + <_>6 8 5 3 3. + 0 + -5.2397060208022594e-003 + -0.2591294944286346 + 0.0405502989888191 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>10 5 5 3 2. + <_>5 8 5 3 2. + 0 + -0.0192278102040291 + 0.2006423026323319 + -0.0652235820889473 + <_> + + <_> + + + + <_>3 5 8 8 -1. + <_>3 9 8 4 2. + 0 + -0.0111331203952432 + -0.4626218974590302 + 0.0244280304759741 + <_> + + <_> + + + + <_>0 1 20 2 -1. + <_>0 1 10 2 2. + 0 + 0.0975510105490685 + 0.0129011897370219 + -0.7402247190475464 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + 0.0460717417299747 + 0.0184539891779423 + -0.4841982126235962 + <_> + + <_> + + + + <_>8 12 8 8 -1. + <_>12 12 4 4 2. + <_>8 16 4 4 2. + 0 + -0.0835335329174995 + -0.8843476772308350 + 1.6764779575169086e-003 + <_> + + <_> + + + + <_>4 12 8 8 -1. + <_>4 12 4 4 2. + <_>8 16 4 4 2. + 0 + 6.0535832308232784e-003 + -0.1586564034223557 + 0.0677586719393730 + <_> + + <_> + + + + <_>7 15 13 4 -1. + <_>7 17 13 2 2. + 0 + -1.3178240042179823e-003 + -0.0879431292414665 + 0.0665913596749306 + <_> + + <_> + + + + <_>0 14 12 6 -1. + <_>0 14 6 3 2. + <_>6 17 6 3 2. + 0 + -0.0209397301077843 + 0.2335896939039230 + -0.0521456710994244 + <_> + + <_> + + + + <_>12 11 8 8 -1. + <_>16 11 4 4 2. + <_>12 15 4 4 2. + 0 + -0.0881454199552536 + 0.4808130860328674 + -0.0119176404550672 + <_> + + <_> + + + + <_>0 11 8 8 -1. + <_>0 11 4 4 2. + <_>4 15 4 4 2. + 0 + 0.0163445994257927 + -0.0538380593061447 + 0.2234991043806076 + <_> + + <_> + + + + <_>6 0 10 19 -1. + <_>6 0 5 19 2. + 0 + -0.2283399999141693 + 0.3601382076740265 + -0.0187279097735882 + <_> + + <_> + + + + <_>0 12 13 3 -1. + <_>0 13 13 1 3. + 0 + 8.4737362340092659e-003 + -0.0562071315944195 + 0.1608947068452835 + <_> + + <_> + + + + <_>7 2 6 12 -1. + <_>7 8 6 6 2. + 0 + -9.8505034111440182e-004 + 0.1010883003473282 + -0.1045522987842560 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 4.9648447893559933e-003 + -0.0793593674898148 + 0.1314024031162262 + <_> + + <_> + + + + <_>11 14 9 4 -1. + <_>11 16 9 2 2. + 0 + -0.0131716104224324 + -0.1209981963038445 + 0.0377301312983036 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 8.2112876698374748e-003 + -0.0535974092781544 + 0.2215657979249954 + <_> + + <_> + + + + <_>11 12 8 6 -1. + <_>11 14 8 2 3. + 0 + -0.0489305593073368 + -0.3934924900531769 + 0.0198503099381924 + <_> + + <_> + + + + <_>1 12 8 6 -1. + <_>1 14 8 2 3. + 0 + 7.4527352117002010e-003 + 0.0582184381783009 + -0.2531755864620209 + <_> + + <_> + + + + <_>4 0 13 8 -1. + <_>4 4 13 4 2. + 0 + 0.0773886516690254 + -0.0577246807515621 + 0.2015454024076462 + <_> + + <_> + + + + <_>8 0 4 15 -1. + <_>8 5 4 5 3. + 0 + 4.9968929961323738e-003 + 0.0892606303095818 + -0.1308245956897736 + <_> + + <_> + + + + <_>10 8 8 8 -1. + <_>14 8 4 4 2. + <_>10 12 4 4 2. + 0 + -0.0409772694110870 + -0.1719042956829071 + 0.0220514498651028 + <_> + + <_> + + + + <_>8 7 3 10 -1. + <_>8 12 3 5 2. + 0 + 3.0041709542274475e-003 + 0.0453798696398735 + -0.2413036972284317 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>7 17 6 3 3. + 0 + 0.1543570011854172 + -0.0329164713621140 + 0.3209039866924286 + <_> + + <_> + + + + <_>2 9 5 9 -1. + <_>2 12 5 3 3. + 0 + 0.0151535095646977 + 0.0535764582455158 + -0.1627317965030670 + <_> + + <_> + + + + <_>3 6 16 3 -1. + <_>3 6 8 3 2. + 0 + 0.0952092930674553 + 0.0131325302645564 + -0.4338963031768799 + <_> + + <_> + + + + <_>3 13 12 7 -1. + <_>9 13 6 7 2. + 0 + -0.0220660194754601 + 0.1835885047912598 + -0.0539956800639629 + <_> + + <_> + + + + <_>10 2 3 15 -1. + <_>11 2 1 15 3. + 0 + -0.0406234301626682 + -0.4568724930286408 + 0.0111194001510739 + <_> + + <_> + + + + <_>7 2 3 15 -1. + <_>8 2 1 15 3. + 0 + -2.1428579930216074e-003 + 0.0952214673161507 + -0.1043168976902962 + <_> + + <_> + + + + <_>10 1 7 4 -1. + <_>10 3 7 2 2. + 0 + -9.6598910167813301e-003 + -0.2812178134918213 + 0.0313871800899506 + <_> + + <_> + + + + <_>5 0 7 12 -1. + <_>5 4 7 4 3. + 0 + -0.1786002069711685 + 0.4667539000511169 + -0.0222962908446789 + <_> + + <_> + + + + <_>10 1 7 4 -1. + <_>10 3 7 2 2. + 0 + -2.0536049269139767e-003 + -0.0884601101279259 + 0.0258634798228741 + <_> + + <_> + + + + <_>3 12 4 8 -1. + <_>3 16 4 4 2. + 0 + -4.6333461068570614e-003 + 0.0607207790017128 + -0.1656270027160645 + <_> + + <_> + + + + <_>6 7 9 5 -1. + <_>9 7 3 5 3. + 0 + 0.0468479916453362 + -0.0406967587769032 + 0.1059897020459175 + <_> + + <_> + + + + <_>5 0 6 16 -1. + <_>7 0 2 16 3. + 0 + -0.0905382335186005 + -0.6336705088615418 + 0.0162777006626129 + <_> + + <_> + + + + <_>10 8 8 8 -1. + <_>14 8 4 4 2. + <_>10 12 4 4 2. + 0 + -0.0662609264254570 + -0.2879275977611542 + 6.1133177950978279e-003 + <_> + + <_> + + + + <_>2 8 8 8 -1. + <_>2 8 4 4 2. + <_>6 12 4 4 2. + 0 + 0.0247317291796207 + 0.0400579310953617 + -0.2327253073453903 + <_> + + <_> + + + + <_>4 8 16 8 -1. + <_>12 8 8 4 2. + <_>4 12 8 4 2. + 0 + -0.1373658031225205 + 0.4725002944469452 + -8.2997139543294907e-003 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>2 10 3 5 2. + <_>5 15 3 5 2. + 0 + -0.0634149014949799 + 0.4303930103778839 + -0.0210490003228188 + <_> + + <_> + + + + <_>10 10 4 8 -1. + <_>10 14 4 4 2. + 0 + -0.0330718196928501 + -0.1107349991798401 + 0.0337187312543392 + <_> + + <_> + + + + <_>1 6 16 3 -1. + <_>9 6 8 3 2. + 0 + 0.1093479022383690 + 0.0135084995999932 + -0.6550201773643494 + <_> + + <_> + + + + <_>10 1 7 4 -1. + <_>10 3 7 2 2. + 0 + 0.0159258805215359 + 0.0336726903915405 + -0.0707790628075600 + <_> + + <_> + + + + <_>3 1 7 4 -1. + <_>3 3 7 2 2. + 0 + -7.4891438707709312e-003 + -0.2647283971309662 + 0.0381838604807854 + <_> + + <_> + + + + <_>10 2 4 7 -1. + <_>10 2 2 7 2. + 0 + 9.8611623980104923e-004 + -0.1614990979433060 + 0.0294753909111023 + <_> + + <_> + + + + <_>4 0 10 19 -1. + <_>9 0 5 19 2. + 0 + 0.2520647943019867 + -0.0323824882507324 + 0.3106861114501953 + <_> + + <_> + + + + <_>12 0 3 13 -1. + <_>13 0 1 13 3. + 0 + -0.0288927294313908 + -0.4911664128303528 + 0.0149231497198343 + <_> + + <_> + + + + <_>1 4 18 5 -1. + <_>7 4 6 5 3. + 0 + -0.0553898811340332 + 0.5754340887069702 + -0.0185828395187855 + <_> + + <_> + + + + <_>10 2 4 7 -1. + <_>10 2 2 7 2. + 0 + 0.0314145982265472 + 0.0207207594066858 + -0.0947296470403671 + <_> + + <_> + + + + <_>6 2 4 7 -1. + <_>8 2 2 7 2. + 0 + 2.8307519387453794e-003 + -0.2251935005187988 + 0.0415641590952873 + <_> + + <_> + + + + <_>2 1 16 3 -1. + <_>2 1 8 3 2. + 0 + -0.0337512604892254 + -0.1664658039808273 + 0.0726936236023903 + <_> + + <_> + + + + <_>5 7 7 9 -1. + <_>5 10 7 3 3. + 0 + -0.0382902882993221 + 0.7921373248100281 + -0.0114345299080014 + <_> + + <_> + + + + <_>4 5 14 3 -1. + <_>4 6 14 1 3. + 0 + -0.0179894808679819 + 0.1136166974902153 + -0.0440325103700161 + <_> + + <_> + + + + <_>2 13 7 6 -1. + <_>2 15 7 2 3. + 0 + 0.0181465297937393 + 0.0342195406556129 + -0.2504163086414337 + <_> + + <_> + + + + <_>10 10 4 8 -1. + <_>10 14 4 4 2. + 0 + -0.0691331923007965 + -0.2979319989681244 + 4.9929767847061157e-003 + <_> + + <_> + + + + <_>5 0 3 18 -1. + <_>5 6 3 6 3. + 0 + 0.1252592056989670 + 0.0107090799137950 + -0.7634230852127075 + <_> + + <_> + + + + <_>10 0 10 10 -1. + <_>15 0 5 5 2. + <_>10 5 5 5 2. + 0 + 0.0376835614442825 + -0.0348669104278088 + 0.1953237950801849 + <_> + + <_> + + + + <_>0 4 14 3 -1. + <_>0 5 14 1 3. + 0 + -7.6676071621477604e-003 + 0.1711481958627701 + -0.0511017814278603 + <_> + + <_> + + + + <_>6 4 13 3 -1. + <_>6 5 13 1 3. + 0 + 3.5654550883919001e-003 + -0.0690719112753868 + 0.0657246932387352 + <_> + + <_> + + + + <_>5 0 3 13 -1. + <_>6 0 1 13 3. + 0 + -0.0189686007797718 + -0.4097692966461182 + 0.0205602291971445 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + -0.0202113706618547 + 0.3350892066955566 + -0.0279074106365442 + <_> + + <_> + + + + <_>4 9 6 7 -1. + <_>6 9 2 7 3. + 0 + -0.0190645996481180 + 0.1936192959547043 + -0.0486482195556164 + <_> + + <_> + + + + <_>2 9 18 3 -1. + <_>8 9 6 3 3. + 0 + 0.1031334027647972 + 0.0193824600428343 + -0.1119868010282517 + <_> + + <_> + + + + <_>0 9 18 3 -1. + <_>6 9 6 3 3. + 0 + 9.8863355815410614e-003 + -0.2404316067695618 + 0.0443056002259254 + <_> + + <_> + + + + <_>2 17 17 3 -1. + <_>2 18 17 1 3. + 0 + 0.0432936996221542 + 0.0107287801802158 + -0.6466053724288940 + <_> + + <_> + + + + <_>8 1 3 19 -1. + <_>9 1 1 19 3. + 0 + 0.0618783310055733 + 0.0102918995544314 + -0.7296711206436157 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 9.7703160718083382e-003 + 0.0313111804425716 + -0.1560508012771606 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -0.0831750631332397 + -0.3304534852504730 + 0.0239973906427622 + <_> + + <_> + + + + <_>4 2 12 12 -1. + <_>4 6 12 4 3. + 0 + -0.3172465860843658 + 0.5476077198982239 + -0.0178533792495728 + <_> + + <_> + + + + <_>0 17 13 3 -1. + <_>0 18 13 1 3. + 0 + 6.7434520460665226e-003 + -0.0669694393873215 + 0.1265795975923538 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 0.0408868901431561 + 4.3191551230847836e-003 + -0.2203239947557449 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 6.4959921874105930e-003 + 0.0540977418422699 + -0.1550489962100983 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0328323505818844 + 0.3077057898044586 + -0.0243469104170799 + <_> + + <_> + + + + <_>4 8 11 12 -1. + <_>4 12 11 4 3. + 0 + -0.0161279607564211 + -0.1047791987657547 + 0.0912674665451050 + <_> + + <_> + + + + <_>12 8 5 6 -1. + <_>12 11 5 3 2. + 0 + 0.0346466712653637 + 0.0140302302315831 + -0.1820760071277618 + <_> + + <_> + + + + <_>3 8 5 6 -1. + <_>3 11 5 3 2. + 0 + -0.0330054089426994 + 0.3869892954826355 + -0.0218596290796995 + <_> + + <_> + + + + <_>13 3 7 6 -1. + <_>13 5 7 2 3. + 0 + -0.0439083389937878 + -0.3062177896499634 + 0.0227748006582260 + <_> + + <_> + + + + <_>3 0 3 17 -1. + <_>4 0 1 17 3. + 0 + 0.0248428992927074 + 0.0320772416889668 + -0.2527902126312256 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 0.0103312600404024 + -0.0605512000620365 + 0.1211913004517555 + <_> + + <_> + + + + <_>5 9 4 8 -1. + <_>5 13 4 4 2. + 0 + -0.0678322464227676 + -0.5583338737487793 + 0.0153369996696711 + <_> + + <_> + + + + <_>13 3 7 6 -1. + <_>13 5 7 2 3. + 0 + 0.0349478684365749 + 0.0116471797227860 + -0.2556365132331848 + <_> + + <_> + + + + <_>0 0 2 13 -1. + <_>1 0 1 13 2. + 0 + -0.0252617895603180 + 0.3283202052116394 + -0.0233572106808424 + <_> + + <_> + + + + <_>7 1 7 14 -1. + <_>7 8 7 7 2. + 0 + 7.5701558962464333e-003 + 0.0711838826537132 + -0.0838781818747520 + <_> + + <_> + + + + <_>2 0 15 8 -1. + <_>2 4 15 4 2. + 0 + 0.1180910021066666 + -0.0418099910020828 + 0.2208334952592850 + <_> + + <_> + + + + <_>1 4 18 3 -1. + <_>7 4 6 3 3. + 0 + 0.0363322310149670 + 0.1741527020931244 + -0.0517880804836750 + <_> + + <_> + + + + <_>0 2 10 16 -1. + <_>5 2 5 16 2. + 0 + 0.0132168503478169 + -0.4769985079765320 + 0.0188783891499043 + <_> + + <_> + + + + <_>5 2 15 12 -1. + <_>5 6 15 4 3. + 0 + 0.0143251102417707 + 0.0218347609043121 + -0.1396169066429138 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>9 0 2 8 3. + 0 + 1.3779220171272755e-003 + -0.2015677988529205 + 0.0399253815412521 + <_> + + <_> + + + + <_>5 1 15 5 -1. + <_>10 1 5 5 3. + 0 + 0.1449285000562668 + -0.0339473113417625 + 0.1480593979358673 + <_> + + <_> + + + + <_>0 8 12 9 -1. + <_>4 8 4 9 3. + 0 + 0.2033672034740448 + -0.0282801594585180 + 0.3046959936618805 + <_> + + <_> + + + + <_>6 5 10 6 -1. + <_>11 5 5 3 2. + <_>6 8 5 3 2. + 0 + -0.0305505208671093 + 0.1575158983469009 + -0.0343396589159966 + <_> + + <_> + + + + <_>3 4 4 12 -1. + <_>5 4 2 12 2. + 0 + -0.0110678598284721 + 0.2468834966421127 + -0.0375544913113117 + <_> + + <_> + + + + <_>13 0 7 4 -1. + <_>13 2 7 2 2. + 0 + 0.0259812101721764 + 0.0219940301030874 + -0.1476574987173080 + <_> + + <_> + + + + <_>0 2 10 12 -1. + <_>0 8 10 6 2. + 0 + -0.0483319386839867 + -0.2558029890060425 + 0.0328578688204288 + <_> + + <_> + + + + <_>4 8 16 3 -1. + <_>4 8 8 3 2. + 0 + 0.0152682801708579 + 0.0621620416641235 + -0.0518118105828762 + <_> + + <_> + + + + <_>4 8 11 12 -1. + <_>4 14 11 6 2. + 0 + -0.2439073026180267 + 0.5033984780311585 + -0.0168641693890095 + <_> + + <_> + + + + <_>2 1 16 3 -1. + <_>2 2 16 1 3. + 0 + -3.2398870680481195e-003 + -0.1385017037391663 + 0.0637383162975311 + <_> + + <_> + + + + <_>4 2 11 6 -1. + <_>4 4 11 2 3. + 0 + 0.0614509284496307 + -0.0569628290832043 + 0.1470678001642227 + <_> + + <_> + + + + <_>11 9 8 6 -1. + <_>11 11 8 2 3. + 0 + 0.0431614890694618 + 0.0234411004930735 + -0.2692278027534485 + <_> + + <_> + + + + <_>0 0 13 3 -1. + <_>0 1 13 1 3. + 0 + -0.0113708600401878 + -0.2613599896430969 + 0.0336247608065605 + <_> + + <_> + + + + <_>2 4 16 3 -1. + <_>2 5 16 1 3. + 0 + -0.0154185499995947 + 0.2215317934751511 + -0.0408664904534817 + <_> + + <_> + + + + <_>0 0 10 10 -1. + <_>0 0 5 5 2. + <_>5 5 5 5 2. + 0 + 0.0454872287809849 + -0.0315987505018711 + 0.2568730115890503 + <_> + + <_> + + + + <_>6 2 13 3 -1. + <_>6 3 13 1 3. + 0 + -0.0158796198666096 + -0.2998133897781372 + 0.0270061995834112 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + 0.0570124983787537 + 0.0151795800775290 + -0.5207880735397339 + <_> + + <_> + + + + <_>2 7 16 7 -1. + <_>2 7 8 7 2. + 0 + -0.1503849029541016 + 0.2516432106494904 + -0.0407965108752251 + <_> + + <_> + + + + <_>6 13 6 7 -1. + <_>8 13 2 7 3. + 0 + -0.0422460399568081 + -0.4830358028411865 + 0.0192220397293568 + <_> + + <_> + + + + <_>6 6 10 6 -1. + <_>11 6 5 3 2. + <_>6 9 5 3 2. + 0 + -0.0749284699559212 + -0.9545899033546448 + 4.4229729101061821e-003 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + -0.0212518405169249 + 0.3185069859027863 + -0.0280219707638025 + <_> + + <_> + + + + <_>4 10 12 4 -1. + <_>8 10 4 4 3. + 0 + 0.0539837814867496 + 0.0270374808460474 + -0.3443068861961365 + <_> + + <_> + + + + <_>7 6 6 9 -1. + <_>9 6 2 9 3. + 0 + 0.0335725806653500 + -0.0765458792448044 + 0.1425555050373077 + <_> + + <_> + + + + <_>9 1 3 13 -1. + <_>10 1 1 13 3. + 0 + -6.7975879646837711e-003 + 0.1774832010269165 + -0.0431553386151791 + <_> + + <_> + + + + <_>8 1 3 13 -1. + <_>9 1 1 13 3. + 0 + -1.3311849907040596e-003 + 0.1549810022115707 + -0.0762618333101273 + <_> + + <_> + + + + <_>6 1 8 12 -1. + <_>10 1 4 6 2. + <_>6 7 4 6 2. + 0 + 0.0393646992743015 + 0.0369915887713432 + -0.2424355000257492 + <_> + + <_> + + + + <_>4 5 10 6 -1. + <_>4 5 5 3 2. + <_>9 8 5 3 2. + 0 + -6.8364520557224751e-003 + 0.1074364036321640 + -0.0930581763386726 + <_> + + <_> + + + + <_>9 3 6 10 -1. + <_>12 3 3 5 2. + <_>9 8 3 5 2. + 0 + 0.0161180105060339 + -0.0356909111142159 + 0.2418579012155533 + <_> + + <_> + + + + <_>2 1 15 6 -1. + <_>2 3 15 2 3. + 0 + -0.0706200897693634 + 0.6336339116096497 + -0.0124382898211479 + <_> + + <_> + + + + <_>2 1 18 16 -1. + <_>8 1 6 16 3. + 0 + 0.4436163008213043 + -0.0372217893600464 + 0.1189270019531250 + <_> + + <_> + + + + <_>2 1 14 6 -1. + <_>9 1 7 6 2. + 0 + -0.0818992331624031 + 0.3485333919525147 + -0.0252110194414854 + <_> + + <_> + + + + <_>7 9 13 3 -1. + <_>7 10 13 1 3. + 0 + -8.2997446879744530e-003 + -0.3089908957481384 + 0.0257782395929098 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0297303907573223 + -0.3075981140136719 + 0.0255308207124472 + <_> + + <_> + + + + <_>8 1 12 14 -1. + <_>8 1 6 14 2. + 0 + -0.0260144900530577 + -0.1216239035129547 + 0.0183383505791426 + <_> + + <_> + + + + <_>0 1 12 14 -1. + <_>6 1 6 14 2. + 0 + 4.5121149742044508e-004 + -0.5473784804344177 + 0.0135647496208549 + <_> + + <_> + + + + <_>2 3 18 13 -1. + <_>8 3 6 13 3. + 0 + 0.1867994070053101 + 0.0780398473143578 + -0.0581372715532780 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 3.1894310377538204e-003 + -0.2497601956129074 + 0.0308658406138420 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + -0.0294490698724985 + 0.1048920005559921 + -0.0488691292703152 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>0 10 3 5 2. + <_>3 15 3 5 2. + 0 + 0.0296149700880051 + -0.0222617201507092 + 0.3499243855476379 + <_> + + <_> + + + + <_>7 7 13 2 -1. + <_>7 8 13 1 2. + 0 + 0.0398820601403713 + 9.6727507188916206e-003 + -0.6791443228721619 + <_> + + <_> + + + + <_>5 13 10 6 -1. + <_>5 13 5 3 2. + <_>10 16 5 3 2. + 0 + -0.0244044195860624 + -0.2674382925033569 + 0.0303603708744049 + <_> + + <_> + + + + <_>16 2 4 18 -1. + <_>18 2 2 9 2. + <_>16 11 2 9 2. + 0 + 0.0434818491339684 + -0.0233721993863583 + 0.2135642021894455 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + -0.0481283701956272 + -0.3689002990722656 + 0.0228328201919794 + <_> + + <_> + + + + <_>7 2 12 6 -1. + <_>13 2 6 3 2. + <_>7 5 6 3 2. + 0 + -1.3142440002411604e-003 + 0.0567646883428097 + -0.1379531025886536 + <_> + + <_> + + + + <_>4 2 12 6 -1. + <_>4 2 6 3 2. + <_>10 5 6 3 2. + 0 + 2.1767991129308939e-003 + 0.0824462622404099 + -0.1051168963313103 + <_> + + <_> + + + + <_>12 9 4 8 -1. + <_>12 13 4 4 2. + 0 + -0.0274710506200790 + 0.0964383408427238 + -0.0515207797288895 + <_> + + <_> + + + + <_>0 8 16 8 -1. + <_>0 8 8 4 2. + <_>8 12 8 4 2. + 0 + 0.0520031712949276 + -0.0232407599687576 + 0.3590059876441956 + <_> + + <_> + + + + <_>10 10 10 6 -1. + <_>15 10 5 3 2. + <_>10 13 5 3 2. + 0 + 0.0296817403286695 + 0.0146415596827865 + -0.2150088995695114 + <_> + + <_> + + + + <_>0 8 4 8 -1. + <_>0 12 4 4 2. + 0 + -0.0475459508597851 + -0.3883490860462189 + 0.0220626406371593 + <_> + + <_> + + + + <_>10 2 6 12 -1. + <_>13 2 3 6 2. + <_>10 8 3 6 2. + 0 + -0.0969008132815361 + -0.4341281056404114 + 6.4087379723787308e-003 + <_> + + <_> + + + + <_>0 0 20 14 -1. + <_>0 7 20 7 2. + 0 + -0.3821898996829987 + -0.9017667174339294 + 7.9825157299637794e-003 + <_> + + <_> + + + + <_>11 9 7 6 -1. + <_>11 11 7 2 3. + 0 + -0.0343893095850945 + -0.3185026943683624 + 9.1135511174798012e-003 + <_> + + <_> + + + + <_>1 9 8 6 -1. + <_>1 11 8 2 3. + 0 + 0.0390687882900238 + 0.0284209605306387 + -0.2657074928283691 + <_> + + <_> + + + + <_>13 1 7 15 -1. + <_>13 6 7 5 3. + 0 + 0.1003170013427734 + -0.0161553993821144 + 0.1221268996596336 + <_> + + <_> + + + + <_>0 1 7 15 -1. + <_>0 6 7 5 3. + 0 + -0.1085721030831337 + 0.3774287104606628 + -0.0240144208073616 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + -4.3303978600306436e-005 + 0.0203080605715513 + -0.1306051015853882 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + -0.0387572795152664 + -0.1582642048597336 + 0.0491292290389538 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + 0.0686680898070335 + 5.5041261948645115e-003 + -0.7222251892089844 + <_> + + <_> + + + + <_>4 6 10 6 -1. + <_>4 6 5 3 2. + <_>9 9 5 3 2. + 0 + -4.4268090277910233e-003 + 0.0822630599141121 + -0.1035472974181175 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -3.1016240245662630e-004 + 0.0904322564601898 + -0.1034862995147705 + <_> + + <_> + + + + <_>1 7 12 4 -1. + <_>5 7 4 4 3. + 0 + 0.0377030707895756 + 0.0601263381540775 + -0.1611139029264450 + <_> + + <_> + + + + <_>14 1 2 19 -1. + <_>14 1 1 19 2. + 0 + 0.0416721291840076 + 8.5145309567451477e-003 + -0.2421742975711823 + <_> + + <_> + + + + <_>4 1 2 19 -1. + <_>5 1 1 19 2. + 0 + -6.6434321925044060e-003 + -0.2717247903347015 + 0.0314632914960384 + <_> + + <_> + + + + <_>12 10 5 6 -1. + <_>12 13 5 3 2. + 0 + -0.0406586490571499 + -0.1167362034320831 + 0.0148495901376009 + <_> + + <_> + + + + <_>3 10 5 6 -1. + <_>3 13 5 3 2. + 0 + -3.0082110315561295e-003 + 0.0400285683572292 + -0.2307904958724976 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>12 6 5 3 2. + <_>7 9 5 3 2. + 0 + -0.0441877692937851 + -0.1788810938596726 + 0.0173136200755835 + <_> + + <_> + + + + <_>3 11 9 5 -1. + <_>6 11 3 5 3. + 0 + -0.0118137197569013 + 0.1563335955142975 + -0.0547516308724880 + <_> + + <_> + + + + <_>2 1 18 16 -1. + <_>8 1 6 16 3. + 0 + -0.2443345040082932 + 0.4071688950061798 + -3.8216509856283665e-003 + <_> + + <_> + + + + <_>0 1 18 16 -1. + <_>6 1 6 16 3. + 0 + 0.4723018109798431 + -0.0554546192288399 + 0.1641063988208771 + <_> + + <_> + + + + <_>6 12 9 5 -1. + <_>9 12 3 5 3. + 0 + 1.7955109942704439e-003 + 0.0952280014753342 + -0.1293476969003677 + <_> + + <_> + + + + <_>2 10 16 10 -1. + <_>2 10 8 5 2. + <_>10 15 8 5 2. + 0 + -0.0509340390563011 + 0.2215344011783600 + -0.0379755608737469 + <_> + + <_> + + + + <_>12 0 4 14 -1. + <_>14 0 2 7 2. + <_>12 7 2 7 2. + 0 + -0.0595317184925079 + -0.4297493994235992 + 0.0131964096799493 + <_> + + <_> + + + + <_>4 0 4 14 -1. + <_>4 0 2 7 2. + <_>6 7 2 7 2. + 0 + -0.0351493991911411 + -0.2123250961303711 + 0.0368725396692753 + <_> + + <_> + + + + <_>12 7 4 9 -1. + <_>12 7 2 9 2. + 0 + -8.2134327385574579e-004 + 0.0748902410268784 + -0.0697017312049866 + <_> + + <_> + + + + <_>4 7 4 9 -1. + <_>6 7 2 9 2. + 0 + 6.3945869915187359e-003 + 0.0806021094322205 + -0.1048861965537071 + <_> + + <_> + + + + <_>16 0 2 20 -1. + <_>16 0 1 20 2. + 0 + 0.0637358278036118 + 0.0119886603206396 + -0.5950837135314941 + <_> + + <_> + + + + <_>2 0 2 20 -1. + <_>3 0 1 20 2. + 0 + 0.0669420212507248 + 0.0107118599116802 + -0.7024027705192566 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + 0.0354453586041927 + 8.8395569473505020e-003 + -0.2058853954076767 + <_> + + <_> + + + + <_>5 1 2 14 -1. + <_>5 8 2 7 2. + 0 + 0.0820254236459732 + 0.0115113602951169 + -0.6708133816719055 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + -0.1215184032917023 + 0.3912476897239685 + -6.0432488098740578e-003 + <_> + + <_> + + + + <_>0 13 18 3 -1. + <_>6 13 6 3 3. + 0 + 0.1373285949230194 + -0.0161360204219818 + 0.4618254899978638 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + -0.1607525944709778 + -1. + 2.4232869036495686e-003 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 6.3080438412725925e-003 + 0.0430266894400120 + -0.1907224953174591 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>0 6 10 2 2. + 0 + -0.0857729688286781 + -0.5332754850387573 + 0.0141979996114969 + <_> + + <_> + + + + <_>3 14 9 6 -1. + <_>6 14 3 6 3. + 0 + 0.0558534488081932 + 0.0405352599918842 + -0.2081681936979294 + -1.4994510412216187 + 33 + -1 + <_> + + + <_> + + <_> + + + + <_>5 2 9 6 -1. + <_>5 5 9 3 2. + 0 + -0.0110099604353309 + 0.1610680073499680 + -0.2327049970626831 + <_> + + <_> + + + + <_>10 3 10 3 -1. + <_>10 3 5 3 2. + 0 + 5.6892321445047855e-003 + -0.2223366051912308 + 0.1225773990154266 + <_> + + <_> + + + + <_>0 3 8 4 -1. + <_>4 3 4 4 2. + 0 + 4.3932348489761353e-003 + -0.1529338061809540 + 0.1588848978281021 + <_> + + <_> + + + + <_>10 10 7 4 -1. + <_>10 12 7 2 2. + 0 + -5.0024059601128101e-004 + 0.0617161802947521 + -0.2317554056644440 + <_> + + <_> + + + + <_>6 2 4 7 -1. + <_>8 2 2 7 2. + 0 + 4.2015648796223104e-004 + -0.3025949895381928 + 0.0610939487814903 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -4.2626978829503059e-003 + -0.2438767999410629 + 0.0695137828588486 + <_> + + <_> + + + + <_>6 6 4 12 -1. + <_>6 10 4 4 3. + 0 + 6.5330968936905265e-004 + -0.3711237907409668 + 0.0461697801947594 + <_> + + <_> + + + + <_>14 1 6 8 -1. + <_>16 1 2 8 3. + 0 + -0.1016353964805603 + 0.4508996009826660 + -0.0144245103001595 + <_> + + <_> + + + + <_>3 2 6 10 -1. + <_>3 2 3 5 2. + <_>6 7 3 5 2. + 0 + -1.3200199464336038e-003 + 0.0757651329040527 + -0.1946184933185577 + <_> + + <_> + + + + <_>9 0 3 18 -1. + <_>9 6 3 6 3. + 0 + -9.8261423408985138e-003 + -0.2744089066982269 + 0.0523732192814350 + <_> + + <_> + + + + <_>0 1 6 8 -1. + <_>2 1 2 8 3. + 0 + -0.0665745511651039 + 0.4280484914779663 + -0.0326409488916397 + <_> + + <_> + + + + <_>9 5 10 6 -1. + <_>14 5 5 3 2. + <_>9 8 5 3 2. + 0 + -9.1772843152284622e-003 + -0.2587639093399048 + 0.0615967884659767 + <_> + + <_> + + + + <_>0 14 14 3 -1. + <_>0 15 14 1 3. + 0 + -2.5353950913995504e-003 + 0.1147368997335434 + -0.1009797975420952 + <_> + + <_> + + + + <_>10 10 7 6 -1. + <_>10 12 7 2 3. + 0 + 4.9194418825209141e-003 + 0.0400274693965912 + -0.1637817025184631 + <_> + + <_> + + + + <_>3 10 14 4 -1. + <_>3 10 7 2 2. + <_>10 12 7 2 2. + 0 + -1.6810640227049589e-003 + -0.1370667070150375 + 0.0803217291831970 + <_> + + <_> + + + + <_>3 8 17 2 -1. + <_>3 9 17 1 2. + 0 + 2.1476070396602154e-003 + -0.2340860068798065 + 0.0431139506399632 + <_> + + <_> + + + + <_>0 5 14 12 -1. + <_>0 11 14 6 2. + 0 + -0.0335024408996105 + -0.2420428991317749 + 0.0491002090275288 + <_> + + <_> + + + + <_>3 7 14 6 -1. + <_>3 9 14 2 3. + 0 + 0.1424178928136826 + -0.0286809802055359 + 0.4780705869197846 + <_> + + <_> + + + + <_>7 1 6 7 -1. + <_>9 1 2 7 3. + 0 + 5.8733951300382614e-004 + -0.2168561071157455 + 0.0485301092267036 + <_> + + <_> + + + + <_>4 18 13 2 -1. + <_>4 19 13 1 2. + 0 + -1.2295519700273871e-003 + 0.0931802466511726 + -0.1015821024775505 + <_> + + <_> + + + + <_>1 6 14 2 -1. + <_>8 6 7 2 2. + 0 + 0.0112106697633863 + 0.0362101793289185 + -0.2310644984245300 + <_> + + <_> + + + + <_>2 5 18 15 -1. + <_>8 5 6 15 3. + 0 + -0.0252359900623560 + 0.0857476219534874 + -0.0544151589274406 + <_> + + <_> + + + + <_>5 6 6 14 -1. + <_>8 6 3 14 2. + 0 + -0.0100140301510692 + -0.1936244070529938 + 0.0502747297286987 + <_> + + <_> + + + + <_>8 5 8 8 -1. + <_>12 5 4 4 2. + <_>8 9 4 4 2. + 0 + -4.5554949901998043e-003 + 0.0886749923229218 + -0.1423750966787338 + <_> + + <_> + + + + <_>5 1 6 5 -1. + <_>8 1 3 5 2. + 0 + -9.5264799892902374e-003 + 0.2675423920154572 + -0.0376324504613876 + <_> + + <_> + + + + <_>6 5 10 12 -1. + <_>11 5 5 6 2. + <_>6 11 5 6 2. + 0 + 2.3753349669277668e-003 + 0.0392619185149670 + -0.1419990956783295 + <_> + + <_> + + + + <_>3 5 12 14 -1. + <_>3 5 6 7 2. + <_>9 12 6 7 2. + 0 + 1.2389000039547682e-003 + 0.0686439126729965 + -0.1806087046861649 + <_> + + <_> + + + + <_>7 0 13 3 -1. + <_>7 1 13 1 3. + 0 + -1.5835729427635670e-003 + -0.1368415951728821 + 0.0578756891191006 + <_> + + <_> + + + + <_>5 7 9 12 -1. + <_>5 11 9 4 3. + 0 + 0.0652025863528252 + -0.0344483889639378 + 0.2531813979148865 + <_> + + <_> + + + + <_>11 6 4 14 -1. + <_>13 6 2 7 2. + <_>11 13 2 7 2. + 0 + 6.6306376538705081e-005 + -0.0846016332507133 + 0.0916575863957405 + <_> + + <_> + + + + <_>5 6 4 14 -1. + <_>5 6 2 7 2. + <_>7 13 2 7 2. + 0 + 1.5117590010049753e-005 + -0.0933438166975975 + 0.1107939034700394 + <_> + + <_> + + + + <_>3 1 17 2 -1. + <_>3 2 17 1 2. + 0 + -2.2637350484728813e-003 + -0.1953119933605194 + 0.0382635109126568 + <_> + + <_> + + + + <_>7 4 6 16 -1. + <_>7 12 6 8 2. + 0 + 6.5463641658425331e-004 + 0.0478608794510365 + -0.1635490059852600 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>8 6 2 7 2. + 0 + 0.0503452904522419 + -0.0156183699145913 + 0.5266051292419434 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + 8.5375197231769562e-003 + 0.0338947288691998 + -0.2704094052314758 + <_> + + <_> + + + + <_>2 5 18 15 -1. + <_>8 5 6 15 3. + 0 + -0.6162161827087402 + -0.9315608143806458 + 2.6866910047829151e-003 + <_> + + <_> + + + + <_>0 5 18 15 -1. + <_>6 5 6 15 3. + 0 + -0.0267428401857615 + 0.1241556033492088 + -0.0815768614411354 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0147567400708795 + -0.4422414898872376 + 0.0244187396019697 + <_> + + <_> + + + + <_>2 0 12 19 -1. + <_>6 0 4 19 3. + 0 + 0.0120458099991083 + -0.0845528766512871 + 0.0927352979779243 + <_> + + <_> + + + + <_>9 12 11 4 -1. + <_>9 14 11 2 2. + 0 + -0.0401319004595280 + -0.2573471963405609 + 0.0106921102851629 + <_> + + <_> + + + + <_>0 4 20 6 -1. + <_>0 6 20 2 3. + 0 + -1.0760580189526081e-003 + 0.0280271805822849 + -0.2680596113204956 + <_> + + <_> + + + + <_>5 3 10 4 -1. + <_>5 5 10 2 2. + 0 + 7.7456878498196602e-003 + -0.0364016890525818 + 0.2616504132747650 + <_> + + <_> + + + + <_>1 6 12 4 -1. + <_>5 6 4 4 3. + 0 + 0.0135398497804999 + 0.0289459191262722 + -0.2800337970256805 + <_> + + <_> + + + + <_>6 8 14 3 -1. + <_>6 9 14 1 3. + 0 + -0.0124647803604603 + -0.3625848889350891 + 0.0130060398951173 + <_> + + <_> + + + + <_>0 8 14 3 -1. + <_>0 9 14 1 3. + 0 + 0.0352978296577930 + 0.0129187498241663 + -0.5646079778671265 + <_> + + <_> + + + + <_>5 3 13 6 -1. + <_>5 6 13 3 2. + 0 + -0.0557105503976345 + 0.1279485970735550 + -0.0382571183145046 + <_> + + <_> + + + + <_>0 12 11 4 -1. + <_>0 14 11 2 2. + 0 + -4.5230439864099026e-003 + -0.0994105637073517 + 0.0789975225925446 + <_> + + <_> + + + + <_>5 12 13 3 -1. + <_>5 13 13 1 3. + 0 + 2.9874469619244337e-003 + -0.0485091395676136 + 0.1129868030548096 + <_> + + <_> + + + + <_>0 2 20 4 -1. + <_>0 2 10 2 2. + <_>10 4 10 2 2. + 0 + -0.0636133104562759 + -0.6664727926254273 + 0.0112211704254150 + <_> + + <_> + + + + <_>14 1 6 5 -1. + <_>14 1 3 5 2. + 0 + 0.0132444901391864 + -0.0619768686592579 + 0.1312289983034134 + <_> + + <_> + + + + <_>4 11 5 6 -1. + <_>4 14 5 3 2. + 0 + -3.6382430698722601e-004 + 0.0430542416870594 + -0.1699635982513428 + <_> + + <_> + + + + <_>6 1 10 18 -1. + <_>6 10 10 9 2. + 0 + -0.2150018960237503 + -0.4678407907485962 + 0.0122863203287125 + <_> + + <_> + + + + <_>0 8 6 12 -1. + <_>0 8 3 6 2. + <_>3 14 3 6 2. + 0 + 6.0248938389122486e-003 + -0.0514759197831154 + 0.1523485928773880 + <_> + + <_> + + + + <_>9 9 10 6 -1. + <_>14 9 5 3 2. + <_>9 12 5 3 2. + 0 + 0.0430005714297295 + 3.8120739627629519e-003 + -0.7534918785095215 + <_> + + <_> + + + + <_>1 9 10 6 -1. + <_>1 9 5 3 2. + <_>6 12 5 3 2. + 0 + 8.5592586547136307e-003 + 0.0244704391807318 + -0.3279660940170288 + <_> + + <_> + + + + <_>15 0 3 13 -1. + <_>16 0 1 13 3. + 0 + 2.9510160675272346e-004 + -0.0764569267630577 + 0.0680100470781326 + <_> + + <_> + + + + <_>2 0 3 13 -1. + <_>3 0 1 13 3. + 0 + 9.9761411547660828e-004 + -0.0846806615591049 + 0.0963161364197731 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 5.0175599753856659e-003 + -0.0390481017529964 + 0.1098378971219063 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 5.5693010799586773e-003 + 0.0407193005084991 + -0.1839596033096314 + <_> + + <_> + + + + <_>17 3 3 13 -1. + <_>18 3 1 13 3. + 0 + 1.0486049577593803e-003 + -0.0446220487356186 + 0.0709181129932404 + <_> + + <_> + + + + <_>0 3 3 13 -1. + <_>1 3 1 13 3. + 0 + 3.2043100800365210e-003 + -0.0588391087949276 + 0.1277731060981751 + <_> + + <_> + + + + <_>13 4 6 16 -1. + <_>16 4 3 8 2. + <_>13 12 3 8 2. + 0 + -0.1064466014504433 + 0.4333994984626770 + -0.0124499695375562 + <_> + + <_> + + + + <_>3 2 3 14 -1. + <_>4 2 1 14 3. + 0 + -8.9908082736656070e-004 + -0.1151050031185150 + 0.0633065626025200 + <_> + + <_> + + + + <_>16 1 3 13 -1. + <_>17 1 1 13 3. + 0 + 2.9652470257133245e-003 + -0.0312906801700592 + 0.0728456601500511 + <_> + + <_> + + + + <_>1 1 3 13 -1. + <_>2 1 1 13 3. + 0 + 8.9800870046019554e-004 + -0.0868405029177666 + 0.1002272963523865 + <_> + + <_> + + + + <_>8 6 9 9 -1. + <_>8 9 9 3 3. + 0 + -0.0218740291893482 + 0.7614316940307617 + -4.5735938474535942e-003 + <_> + + <_> + + + + <_>0 2 14 2 -1. + <_>0 3 14 1 2. + 0 + 1.4919589739292860e-003 + 0.0827241688966751 + -0.0968378931283951 + <_> + + <_> + + + + <_>12 5 6 6 -1. + <_>12 5 3 6 2. + 0 + -2.4136069696396589e-003 + 0.0624809414148331 + -0.0505495592951775 + <_> + + <_> + + + + <_>2 5 6 6 -1. + <_>5 5 3 6 2. + 0 + 0.0128938304260373 + -0.0339019894599915 + 0.2803659141063690 + <_> + + <_> + + + + <_>10 1 9 6 -1. + <_>10 3 9 2 3. + 0 + -1.9992720335721970e-003 + -0.1715281009674072 + 0.0400841496884823 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 1.3713949592784047e-003 + -0.1221671998500824 + 0.0621221810579300 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>9 10 2 10 3. + 0 + -8.9740045368671417e-003 + -0.1709423065185547 + 0.0440320000052452 + <_> + + <_> + + + + <_>0 0 2 20 -1. + <_>1 0 1 20 2. + 0 + -2.9300691094249487e-003 + 0.1236404031515122 + -0.0637657269835472 + <_> + + <_> + + + + <_>16 5 4 14 -1. + <_>16 5 2 14 2. + 0 + -8.0555928871035576e-003 + 0.1155256032943726 + -0.0444588698446751 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 6.4662001095712185e-003 + 0.0751474276185036 + -0.1128100976347923 + <_> + + <_> + + + + <_>16 5 4 14 -1. + <_>16 5 2 14 2. + 0 + -0.1954178959131241 + -0.8649423122406006 + 3.1826570630073547e-003 + <_> + + <_> + + + + <_>0 5 4 14 -1. + <_>2 5 2 14 2. + 0 + -0.1574075967073441 + -0.7240580916404724 + 9.4235781580209732e-003 + <_> + + <_> + + + + <_>0 11 20 4 -1. + <_>10 11 10 2 2. + <_>0 13 10 2 2. + 0 + -0.0315264612436295 + -0.3821895122528076 + 0.0163867902010679 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + 0.0504390485584736 + -0.0276230406016111 + 0.2730627954006195 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + -5.5078428704291582e-004 + 0.0496235489845276 + -0.0544628016650677 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>10 0 1 13 2. + 0 + 1.5047970227897167e-003 + -0.0620589405298233 + 0.1220401003956795 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0457968413829803 + -0.9331477284431458 + 6.8162381649017334e-003 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -9.3235643580555916e-003 + -0.2743670046329498 + 0.0278207492083311 + <_> + + <_> + + + + <_>10 1 9 6 -1. + <_>10 3 9 2 3. + 0 + 0.1068912968039513 + 4.7212988138198853e-003 + -0.4403704106807709 + <_> + + <_> + + + + <_>1 1 9 6 -1. + <_>1 3 9 2 3. + 0 + 1.1234519770368934e-003 + -0.1416224986314774 + 0.0475113689899445 + <_> + + <_> + + + + <_>11 0 5 8 -1. + <_>11 4 5 4 2. + 0 + 6.7312899045646191e-003 + -0.0458814799785614 + 0.1134274005889893 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + 0.0412641502916813 + 0.0114067802205682 + -0.6289417147636414 + <_> + + <_> + + + + <_>9 2 6 11 -1. + <_>11 2 2 11 3. + 0 + -0.0737887993454933 + -0.4192483127117157 + 7.9344836995005608e-003 + <_> + + <_> + + + + <_>5 2 6 11 -1. + <_>7 2 2 11 3. + 0 + -0.0326695293188095 + 0.2222491055727005 + -0.0308459792286158 + <_> + + <_> + + + + <_>7 1 6 10 -1. + <_>10 1 3 5 2. + <_>7 6 3 5 2. + 0 + -5.9001590125262737e-003 + -0.1500352025032044 + 0.0458197109401226 + <_> + + <_> + + + + <_>3 2 10 5 -1. + <_>8 2 5 5 2. + 0 + -0.0741418674588203 + 0.5623661279678345 + -0.0111841196194291 + <_> + + <_> + + + + <_>2 17 17 3 -1. + <_>2 18 17 1 3. + 0 + -0.0171105898916721 + -0.3088833093643189 + 0.0173403508961201 + <_> + + <_> + + + + <_>0 13 14 3 -1. + <_>0 14 14 1 3. + 0 + 2.4508470669388771e-003 + -0.0570740811526775 + 0.1130689010024071 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + -0.0211579799652100 + 0.2026463001966476 + -0.0147051699459553 + <_> + + <_> + + + + <_>7 10 4 10 -1. + <_>7 15 4 5 2. + 0 + 7.1819419972598553e-003 + 0.0297881998121738 + -0.2230837047100067 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 5.0557879731059074e-003 + -0.0262572802603245 + 0.1202829033136368 + <_> + + <_> + + + + <_>2 12 16 6 -1. + <_>2 14 16 2 3. + 0 + 0.0126106599345803 + 0.0259652994573116 + -0.2575523853302002 + <_> + + <_> + + + + <_>5 9 13 3 -1. + <_>5 10 13 1 3. + 0 + 3.0165250791469589e-005 + -0.1199491992592812 + 0.0289165005087852 + <_> + + <_> + + + + <_>8 5 4 12 -1. + <_>8 9 4 4 3. + 0 + -1.3415860012173653e-003 + 0.2059284001588821 + -0.0328030399978161 + <_> + + <_> + + + + <_>6 1 14 6 -1. + <_>13 1 7 3 2. + <_>6 4 7 3 2. + 0 + 5.9342157328501344e-004 + 0.0497886911034584 + -0.0709985271096230 + <_> + + <_> + + + + <_>3 1 12 6 -1. + <_>3 3 12 2 3. + 0 + -0.0154289295896888 + 0.3273377120494843 + -0.0202394891530275 + <_> + + <_> + + + + <_>9 5 11 6 -1. + <_>9 7 11 2 3. + 0 + -1.1928460298804566e-004 + 0.0264050103724003 + -0.1466607004404068 + <_> + + <_> + + + + <_>5 2 3 13 -1. + <_>6 2 1 13 3. + 0 + -0.0217268802225590 + -0.4401434957981110 + 0.0142646497115493 + <_> + + <_> + + + + <_>15 5 4 14 -1. + <_>17 5 2 7 2. + <_>15 12 2 7 2. + 0 + -0.0307107698172331 + 0.1354915052652359 + -0.0175862107425928 + <_> + + <_> + + + + <_>0 14 7 6 -1. + <_>0 16 7 2 3. + 0 + 4.3861479498445988e-003 + 0.0544237904250622 + -0.1123457998037338 + <_> + + <_> + + + + <_>5 15 13 3 -1. + <_>5 16 13 1 3. + 0 + 4.7966800630092621e-003 + -0.0434940792620182 + 0.1310887038707733 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 10 4 4 2. + <_>10 14 4 4 2. + 0 + 2.2497470490634441e-003 + 0.0594898089766502 + -0.1095547974109650 + <_> + + <_> + + + + <_>3 10 14 6 -1. + <_>10 10 7 3 2. + <_>3 13 7 3 2. + 0 + 4.3578739278018475e-003 + 0.0591861791908741 + -0.1302604973316193 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.0433720201253891e-003 + -0.0516254901885986 + 0.1378781050443649 + <_> + + <_> + + + + <_>5 14 15 3 -1. + <_>5 15 15 1 3. + 0 + -2.0268680527806282e-003 + 0.0881051272153854 + -0.0858675613999367 + <_> + + <_> + + + + <_>0 1 14 6 -1. + <_>0 1 7 3 2. + <_>7 4 7 3 2. + 0 + -6.5703789005056024e-004 + 0.0710449889302254 + -0.0907515436410904 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.0443099699914455 + -0.0115222902968526 + 0.2273374050855637 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>0 0 4 4 2. + <_>4 4 4 4 2. + 0 + 4.6578957699239254e-003 + -0.0461235493421555 + 0.1527702957391739 + <_> + + <_> + + + + <_>3 16 14 4 -1. + <_>10 16 7 2 2. + <_>3 18 7 2 2. + 0 + -0.0409600585699081 + -0.5598890185356140 + 0.0120647400617599 + <_> + + <_> + + + + <_>0 1 6 10 -1. + <_>0 1 3 5 2. + <_>3 6 3 5 2. + 0 + -6.7416871897876263e-003 + 0.1048407033085823 + -0.0651528015732765 + <_> + + <_> + + + + <_>10 3 8 8 -1. + <_>14 3 4 4 2. + <_>10 7 4 4 2. + 0 + -2.9713090043514967e-004 + 0.0322212018072605 + -0.0847099795937538 + <_> + + <_> + + + + <_>1 5 10 6 -1. + <_>1 5 5 3 2. + <_>6 8 5 3 2. + 0 + -8.0926045775413513e-003 + -0.1647664010524750 + 0.0457001216709614 + <_> + + <_> + + + + <_>14 2 2 14 -1. + <_>14 9 2 7 2. + 0 + 0.0407103486359119 + 0.0100992601364851 + -0.1089332997798920 + <_> + + <_> + + + + <_>4 2 2 14 -1. + <_>4 9 2 7 2. + 0 + -1.1402929667383432e-003 + -0.1926981955766678 + 0.0445908308029175 + <_> + + <_> + + + + <_>4 8 12 4 -1. + <_>4 10 12 2 2. + 0 + -0.0203064307570457 + 0.6866806149482727 + -9.8533723503351212e-003 + <_> + + <_> + + + + <_>2 3 8 8 -1. + <_>2 3 4 4 2. + <_>6 7 4 4 2. + 0 + 0.0486313700675964 + 0.0119915902614594 + -0.6477090716362000 + <_> + + <_> + + + + <_>17 0 2 16 -1. + <_>17 8 2 8 2. + 0 + -0.0544149503111839 + 0.3473069965839386 + -0.0119405901059508 + <_> + + <_> + + + + <_>1 5 4 14 -1. + <_>1 5 2 7 2. + <_>3 12 2 7 2. + 0 + -0.0595325306057930 + 0.3641026914119721 + -0.0160508193075657 + <_> + + <_> + + + + <_>8 6 5 10 -1. + <_>8 11 5 5 2. + 0 + -0.0350894518196583 + -0.1925289928913117 + 0.0235986299812794 + <_> + + <_> + + + + <_>4 2 8 10 -1. + <_>4 2 4 5 2. + <_>8 7 4 5 2. + 0 + 5.7658711448311806e-003 + -0.0462938509881496 + 0.1528797000646591 + <_> + + <_> + + + + <_>8 5 10 8 -1. + <_>13 5 5 4 2. + <_>8 9 5 4 2. + 0 + -2.3687579669058323e-003 + 0.0573452301323414 + -0.0881954729557037 + <_> + + <_> + + + + <_>0 7 7 6 -1. + <_>0 9 7 2 3. + 0 + -2.7341600507497787e-003 + -0.2389616072177887 + 0.0257618092000484 + <_> + + <_> + + + + <_>16 1 4 7 -1. + <_>16 1 2 7 2. + 0 + -9.1599775478243828e-003 + 0.1003749966621399 + -0.0267319791018963 + <_> + + <_> + + + + <_>1 0 2 16 -1. + <_>1 8 2 8 2. + 0 + -0.0506231710314751 + 0.4690837860107422 + -0.0138804297894239 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + -4.3487590737640858e-003 + -0.1481294035911560 + 0.0521153584122658 + <_> + + <_> + + + + <_>0 0 20 12 -1. + <_>0 6 20 6 2. + 0 + 0.4085980057716370 + 0.0154545297846198 + -0.4649426937103272 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + 0.0531040094792843 + 7.8609427437186241e-003 + -0.5355514287948608 + <_> + + <_> + + + + <_>0 3 5 6 -1. + <_>0 6 5 3 2. + 0 + -4.1035288013517857e-003 + -0.1377788037061691 + 0.0468478091061115 + <_> + + <_> + + + + <_>9 10 7 4 -1. + <_>9 12 7 2 2. + 0 + -2.7622529305517673e-003 + 0.0523039400577545 + -0.0949708372354507 + <_> + + <_> + + + + <_>2 9 13 6 -1. + <_>2 12 13 3 2. + 0 + 9.3903020024299622e-003 + -0.0234937295317650 + 0.3625979125499725 + <_> + + <_> + + + + <_>2 2 16 14 -1. + <_>2 9 16 7 2. + 0 + 0.0237716306000948 + 0.0807461664080620 + -0.0828936025500298 + <_> + + <_> + + + + <_>4 5 10 8 -1. + <_>4 9 10 4 2. + 0 + 2.8008709196001291e-003 + -0.2659569978713989 + 0.0285346806049347 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + -6.3013769686222076e-003 + 0.0804816335439682 + -0.0290161799639463 + <_> + + <_> + + + + <_>8 0 3 15 -1. + <_>8 5 3 5 3. + 0 + -5.1433448679745197e-003 + -0.1147350966930389 + 0.0584486313164234 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + 1.0679479455575347e-003 + -0.0316618904471397 + 0.0545227788388729 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + 1.5213950537145138e-003 + -0.0621725507080555 + 0.0976013168692589 + <_> + + <_> + + + + <_>1 14 18 4 -1. + <_>10 14 9 2 2. + <_>1 16 9 2 2. + 0 + -0.0337799116969109 + -0.4958269894123077 + 0.0120933195576072 + <_> + + <_> + + + + <_>1 8 6 5 -1. + <_>4 8 3 5 2. + 0 + -0.1050537005066872 + -0.9873880147933960 + 5.1499558612704277e-003 + <_> + + <_> + + + + <_>13 1 6 19 -1. + <_>13 1 3 19 2. + 0 + 0.0196858402341604 + -0.0561894290149212 + 0.0912605375051498 + <_> + + <_> + + + + <_>1 1 6 19 -1. + <_>4 1 3 19 2. + 0 + 0.0664703994989395 + 0.0140978898853064 + -0.4573164880275726 + <_> + + <_> + + + + <_>6 0 14 3 -1. + <_>6 1 14 1 3. + 0 + -0.0158980991691351 + -0.2331776022911072 + 0.0113696204498410 + <_> + + <_> + + + + <_>0 0 14 3 -1. + <_>0 1 14 1 3. + 0 + 4.0450799278914928e-003 + 0.0433450490236282 + -0.1590802073478699 + <_> + + <_> + + + + <_>8 2 7 6 -1. + <_>8 5 7 3 2. + 0 + -0.0334865488111973 + 0.1308659017086029 + -0.0343275591731071 + <_> + + <_> + + + + <_>0 3 9 14 -1. + <_>3 3 3 14 3. + 0 + 0.0214584805071354 + -0.0502133518457413 + 0.1146700978279114 + <_> + + <_> + + + + <_>10 8 9 6 -1. + <_>10 10 9 2 3. + 0 + 0.1167273968458176 + -3.4590030554682016e-003 + 0.4415673017501831 + <_> + + <_> + + + + <_>0 1 16 4 -1. + <_>0 1 8 2 2. + <_>8 3 8 2 2. + 0 + -5.0386278890073299e-003 + -0.1399540007114410 + 0.0408543981611729 + <_> + + <_> + + + + <_>16 2 4 7 -1. + <_>16 2 2 7 2. + 0 + 0.0372611209750175 + -0.0163991898298264 + 0.2362785041332245 + <_> + + <_> + + + + <_>0 8 10 6 -1. + <_>0 10 10 2 3. + 0 + -0.0179914608597755 + -0.5670362710952759 + 0.0101850796490908 + <_> + + <_> + + + + <_>16 2 4 7 -1. + <_>16 2 2 7 2. + 0 + 0.1074803993105888 + 1.8287489656358957e-003 + -0.7870578169822693 + <_> + + <_> + + + + <_>0 2 4 7 -1. + <_>2 2 2 7 2. + 0 + -0.0214396193623543 + 0.1834709048271179 + -0.0324107892811298 + <_> + + <_> + + + + <_>5 3 12 14 -1. + <_>11 3 6 7 2. + <_>5 10 6 7 2. + 0 + 6.8095367169007659e-004 + 0.0416750684380531 + -0.0893016383051872 + <_> + + <_> + + + + <_>7 6 3 10 -1. + <_>7 11 3 5 2. + 0 + -6.8581351079046726e-003 + -0.1451186984777451 + 0.0515854991972446 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>16 2 2 9 3. + 0 + 0.1531828045845032 + 3.1881679315119982e-003 + -0.4419009089469910 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>2 2 2 9 3. + 0 + 0.0227773692458868 + -0.0432341210544109 + 0.1747722029685974 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 6.6160550341010094e-003 + 0.0431408211588860 + -0.1718851029872894 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>4 5 6 3 2. + <_>10 8 6 3 2. + 0 + -8.8224448263645172e-003 + 0.1320316940546036 + -0.0475092008709908 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -5.1209977827966213e-003 + -0.1897916048765183 + 0.0576573088765144 + <_> + + <_> + + + + <_>7 5 6 8 -1. + <_>9 5 2 8 3. + 0 + -0.0103118801489472 + 0.3228681981563568 + -0.0197250191122293 + <_> + + <_> + + + + <_>4 6 12 6 -1. + <_>8 6 4 6 3. + 0 + -0.0250657591968775 + -0.3657239973545075 + 0.0183448698371649 + <_> + + <_> + + + + <_>1 4 4 14 -1. + <_>1 4 2 7 2. + <_>3 11 2 7 2. + 0 + -0.0143184298649430 + 0.1579546928405762 + -0.0382769182324409 + <_> + + <_> + + + + <_>0 1 20 6 -1. + <_>10 1 10 3 2. + <_>0 4 10 3 2. + 0 + -0.0573839396238327 + -0.3683528900146484 + 0.0169002097100019 + <_> + + <_> + + + + <_>5 2 10 6 -1. + <_>5 4 10 2 3. + 0 + -0.0436802990734577 + 0.4476679861545563 + -0.0137104596942663 + <_> + + <_> + + + + <_>0 2 20 6 -1. + <_>0 5 20 3 2. + 0 + -0.2428909987211227 + -0.7549092769622803 + 8.9195184409618378e-003 + <_> + + <_> + + + + <_>3 10 6 8 -1. + <_>5 10 2 8 3. + 0 + 3.8089449517428875e-003 + -0.0629167184233665 + 0.0942829027771950 + <_> + + <_> + + + + <_>13 4 4 16 -1. + <_>15 4 2 8 2. + <_>13 12 2 8 2. + 0 + 8.9389752247370780e-005 + -0.1125340014696121 + 0.0994479134678841 + <_> + + <_> + + + + <_>6 2 2 18 -1. + <_>6 11 2 9 2. + 0 + 2.7378369122743607e-003 + 0.0748805105686188 + -0.0992576107382774 + <_> + + <_> + + + + <_>13 4 4 16 -1. + <_>15 4 2 8 2. + <_>13 12 2 8 2. + 0 + 0.0236805602908134 + 0.0121058700606227 + -0.1178075000643730 + <_> + + <_> + + + + <_>3 4 4 16 -1. + <_>3 4 2 8 2. + <_>5 12 2 8 2. + 0 + -0.0460600703954697 + 0.3979974091053009 + -0.0171293690800667 + <_> + + <_> + + + + <_>6 15 9 4 -1. + <_>6 17 9 2 2. + 0 + 2.1130219101905823e-003 + -0.0609068498015404 + 0.0499742813408375 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 0.0147531498223543 + 0.0166297294199467 + -0.3780666887760162 + <_> + + <_> + + + + <_>8 0 4 12 -1. + <_>8 0 2 12 2. + 0 + 0.0354309082031250 + -0.0238443706184626 + 0.2635455131530762 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>10 6 10 2 2. + 0 + -0.0507450997829437 + -0.2314130961894989 + 0.0283203497529030 + <_> + + <_> + + + + <_>14 2 6 18 -1. + <_>17 2 3 9 2. + <_>14 11 3 9 2. + 0 + 0.0898740589618683 + -0.0101912496611476 + 0.2627770006656647 + <_> + + <_> + + + + <_>0 7 14 4 -1. + <_>0 7 7 2 2. + <_>7 9 7 2 2. + 0 + -2.7411670889705420e-003 + -0.1382844001054764 + 0.0469662807881832 + <_> + + <_> + + + + <_>8 5 10 8 -1. + <_>13 5 5 4 2. + <_>8 9 5 4 2. + 0 + 0.0873859375715256 + 1.7351199639961123e-003 + -0.8081040978431702 + <_> + + <_> + + + + <_>2 5 10 8 -1. + <_>2 5 5 4 2. + <_>7 9 5 4 2. + 0 + -2.9055110644549131e-003 + 0.0661932677030563 + -0.0959811881184578 + <_> + + <_> + + + + <_>4 2 16 12 -1. + <_>4 2 8 12 2. + 0 + -0.5125557780265808 + -1. + 8.6886010831221938e-004 + <_> + + <_> + + + + <_>0 2 16 12 -1. + <_>8 2 8 12 2. + 0 + -0.0132812596857548 + 0.1013427004218102 + -0.0643442794680595 + <_> + + <_> + + + + <_>11 2 4 7 -1. + <_>11 2 2 7 2. + 0 + 0.0536609403789043 + 3.2843649387359619e-003 + -0.8001198768615723 + <_> + + <_> + + + + <_>5 2 4 7 -1. + <_>7 2 2 7 2. + 0 + 0.0392906293272972 + 9.0429633855819702e-003 + -0.6707432866096497 + <_> + + <_> + + + + <_>6 5 8 4 -1. + <_>6 5 4 4 2. + 0 + 0.0651971325278282 + 4.4964649714529514e-003 + -0.9793130755424500 + <_> + + <_> + + + + <_>4 5 6 10 -1. + <_>6 5 2 10 3. + 0 + 0.0325052812695503 + -0.0126792499795556 + 0.4977447986602783 + <_> + + <_> + + + + <_>6 10 10 8 -1. + <_>11 10 5 4 2. + <_>6 14 5 4 2. + 0 + -0.0657490789890289 + -0.3784436881542206 + 5.9391320683062077e-003 + <_> + + <_> + + + + <_>2 11 6 9 -1. + <_>4 11 2 9 3. + 0 + -0.0600450709462166 + -0.3995777070522308 + 0.0141556998714805 + <_> + + <_> + + + + <_>4 0 12 18 -1. + <_>4 0 6 18 2. + 0 + -0.0466313511133194 + 0.1684381067752838 + -0.0376349613070488 + <_> + + <_> + + + + <_>4 1 9 17 -1. + <_>7 1 3 17 3. + 0 + 1.8095660198014230e-004 + -0.1019833013415337 + 0.0729405134916306 + <_> + + <_> + + + + <_>9 5 6 8 -1. + <_>11 5 2 8 3. + 0 + -3.7607289850711823e-003 + 0.0451540984213352 + -0.0543702207505703 + <_> + + <_> + + + + <_>6 3 6 7 -1. + <_>8 3 2 7 3. + 0 + -5.0964287947863340e-004 + 0.1610606014728546 + -0.0543980710208416 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + -1.6095000319182873e-003 + -0.2105861008167267 + 0.0308642592281103 + <_> + + <_> + + + + <_>5 5 6 9 -1. + <_>5 8 6 3 3. + 0 + -5.4673491977155209e-003 + 0.1907608062028885 + -0.0327386185526848 + <_> + + <_> + + + + <_>10 10 7 6 -1. + <_>10 12 7 2 3. + 0 + 4.1697090491652489e-003 + 0.0200098492205143 + -0.0681738406419754 + <_> + + <_> + + + + <_>4 6 7 4 -1. + <_>4 8 7 2 2. + 0 + 3.2709140796214342e-003 + -0.1111001968383789 + 0.0582118891179562 + <_> + + <_> + + + + <_>6 10 10 8 -1. + <_>11 10 5 4 2. + <_>6 14 5 4 2. + 0 + -5.1663857884705067e-003 + -0.0852107927203178 + 0.0339051000773907 + <_> + + <_> + + + + <_>4 10 12 4 -1. + <_>8 10 4 4 3. + 0 + -0.0129147199913859 + -0.1372693926095963 + 0.0483487695455551 + <_> + + <_> + + + + <_>5 7 14 4 -1. + <_>12 7 7 2 2. + <_>5 9 7 2 2. + 0 + -3.8130749017000198e-003 + -0.1108494028449059 + 0.0323736295104027 + <_> + + <_> + + + + <_>4 10 12 7 -1. + <_>8 10 4 7 3. + 0 + -0.0577624812722206 + 0.2170145064592362 + -0.0298280492424965 + <_> + + <_> + + + + <_>5 2 12 16 -1. + <_>11 2 6 8 2. + <_>5 10 6 8 2. + 0 + -2.2619909141212702e-003 + 0.0356410183012486 + -0.0552890785038471 + <_> + + <_> + + + + <_>1 7 14 4 -1. + <_>1 7 7 2 2. + <_>8 9 7 2 2. + 0 + 0.0529798492789268 + 7.7050398103892803e-003 + -0.7212120890617371 + <_> + + <_> + + + + <_>3 5 15 14 -1. + <_>3 12 15 7 2. + 0 + -0.3383991122245789 + -0.9454026222229004 + 4.5049181208014488e-003 + <_> + + <_> + + + + <_>0 11 7 4 -1. + <_>0 13 7 2 2. + 0 + 5.2918092114850879e-004 + 0.0416339300572872 + -0.1328317970037460 + <_> + + <_> + + + + <_>8 6 9 9 -1. + <_>8 9 9 3 3. + 0 + 2.8239609673619270e-003 + 0.1381590962409973 + -0.0113719301298261 + <_> + + <_> + + + + <_>5 6 6 10 -1. + <_>7 6 2 10 3. + 0 + -2.1569489035755396e-003 + 0.0635536536574364 + -0.0846833363175392 + <_> + + <_> + + + + <_>11 4 4 11 -1. + <_>11 4 2 11 2. + 0 + 4.1426848620176315e-003 + 0.0414313301444054 + -0.0914131999015808 + <_> + + <_> + + + + <_>1 12 14 8 -1. + <_>8 12 7 8 2. + 0 + -0.0110165597870946 + 0.0803824067115784 + -0.0839785709977150 + <_> + + <_> + + + + <_>11 4 4 11 -1. + <_>11 4 2 11 2. + 0 + -6.5561989322304726e-003 + -0.1356375962495804 + 0.0345143415033817 + <_> + + <_> + + + + <_>5 0 4 15 -1. + <_>7 0 2 15 2. + 0 + -2.2384698968380690e-003 + -0.1290034055709839 + 0.0607188306748867 + <_> + + <_> + + + + <_>4 2 12 6 -1. + <_>8 2 4 6 3. + 0 + -0.0127897197380662 + 0.2625438868999481 + -0.0252952892333269 + <_> + + <_> + + + + <_>3 3 12 14 -1. + <_>3 3 6 7 2. + <_>9 10 6 7 2. + 0 + -0.1102875992655754 + -0.4032453894615173 + 0.0139968497678638 + <_> + + <_> + + + + <_>9 2 4 7 -1. + <_>9 2 2 7 2. + 0 + 2.9025289695709944e-003 + -0.0601339004933834 + 0.0406575091183186 + <_> + + <_> + + + + <_>7 2 4 7 -1. + <_>9 2 2 7 2. + 0 + 1.3041580095887184e-003 + -0.1127184033393860 + 0.0530015490949154 + <_> + + <_> + + + + <_>15 9 5 9 -1. + <_>15 12 5 3 3. + 0 + 0.0485189110040665 + 9.9352700635790825e-003 + -0.3384445905685425 + <_> + + <_> + + + + <_>0 9 5 9 -1. + <_>0 12 5 3 3. + 0 + -5.0848070532083511e-003 + -0.1307263970375061 + 0.0471069291234016 + <_> + + <_> + + + + <_>8 3 4 9 -1. + <_>8 3 2 9 2. + 0 + 5.7023460976779461e-003 + -0.0528404898941517 + 0.1241874992847443 + <_> + + <_> + + + + <_>7 8 6 6 -1. + <_>10 8 3 6 2. + 0 + -2.7858179528266191e-003 + -0.0966856405138969 + 0.0668284371495247 + <_> + + <_> + + + + <_>6 13 14 3 -1. + <_>6 14 14 1 3. + 0 + -3.0082210432738066e-003 + 0.0717781409621239 + -0.0385115407407284 + <_> + + <_> + + + + <_>2 12 12 8 -1. + <_>6 12 4 8 3. + 0 + 6.9350451231002808e-003 + -0.0579321496188641 + 0.1069167032837868 + <_> + + <_> + + + + <_>5 14 15 6 -1. + <_>10 14 5 6 3. + 0 + -0.0470643416047096 + 0.1028449982404709 + -0.0279982890933752 + <_> + + <_> + + + + <_>6 8 6 12 -1. + <_>6 8 3 6 2. + <_>9 14 3 6 2. + 0 + -0.0826457366347313 + -0.8584945201873779 + 6.3560227863490582e-003 + <_> + + <_> + + + + <_>5 14 15 6 -1. + <_>10 14 5 6 3. + 0 + 8.9476434513926506e-003 + -0.0399044714868069 + 0.0668972805142403 + <_> + + <_> + + + + <_>6 0 8 20 -1. + <_>6 10 8 10 2. + 0 + 0.3059397935867310 + 7.2277039289474487e-003 + -0.7974972128868103 + <_> + + <_> + + + + <_>10 3 4 13 -1. + <_>10 3 2 13 2. + 0 + -5.8336472138762474e-003 + -0.1952649056911469 + 0.0241965502500534 + <_> + + <_> + + + + <_>4 12 12 6 -1. + <_>8 12 4 6 3. + 0 + -5.3784619085490704e-003 + 0.0719676315784454 + -0.0915475636720657 + <_> + + <_> + + + + <_>10 3 4 13 -1. + <_>10 3 2 13 2. + 0 + 9.2504899948835373e-003 + 0.0361463613808155 + -0.0744949206709862 + <_> + + <_> + + + + <_>5 11 9 6 -1. + <_>8 11 3 6 3. + 0 + 0.0375812910497189 + -0.0202227290719748 + 0.3322426974773407 + <_> + + <_> + + + + <_>8 13 6 7 -1. + <_>10 13 2 7 3. + 0 + -0.0468187406659126 + -0.5051367282867432 + 0.0128703098744154 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>0 0 4 4 2. + <_>4 4 4 4 2. + 0 + 0.0335079394280910 + -0.0186887998133898 + 0.3054238855838776 + <_> + + <_> + + + + <_>10 10 7 6 -1. + <_>10 12 7 2 3. + 0 + 0.0684372484683990 + -6.2482542125508189e-004 + 0.8396378755569458 + <_> + + <_> + + + + <_>3 10 7 6 -1. + <_>3 12 7 2 3. + 0 + 0.0101519403979182 + 0.0256537292152643 + -0.2183008044958115 + <_> + + <_> + + + + <_>12 3 5 12 -1. + <_>12 7 5 4 3. + 0 + -0.1386625021696091 + 0.5734167098999023 + -6.0921781696379185e-003 + <_> + + <_> + + + + <_>4 13 9 4 -1. + <_>4 15 9 2 2. + 0 + -1.1214310070499778e-003 + 0.0706924870610237 + -0.0829957500100136 + <_> + + <_> + + + + <_>6 13 14 3 -1. + <_>6 14 14 1 3. + 0 + 1.4782310463488102e-003 + -0.0351612791419029 + 0.0585691593587399 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + -2.3407500702887774e-003 + 0.1266739964485169 + -0.0777006074786186 + <_> + + <_> + + + + <_>12 0 3 19 -1. + <_>13 0 1 19 3. + 0 + 4.3265568092465401e-003 + 0.0312298797070980 + -0.1168064996600151 + <_> + + <_> + + + + <_>6 3 4 13 -1. + <_>8 3 2 13 2. + 0 + -0.0322522483766079 + -0.5439580082893372 + 0.0103865098208189 + <_> + + <_> + + + + <_>10 0 9 5 -1. + <_>13 0 3 5 3. + 0 + -7.1836792631074786e-004 + -0.0638500824570656 + 0.0489896796643734 + <_> + + <_> + + + + <_>1 0 9 5 -1. + <_>4 0 3 5 3. + 0 + 1.1035969946533442e-003 + -0.0710958391427994 + 0.0830879732966423 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -0.0102655198425055 + 0.1164705008268356 + -0.0281786303967237 + <_> + + <_> + + + + <_>6 11 6 9 -1. + <_>8 11 2 9 3. + 0 + 0.0726320371031761 + 7.5578331016004086e-003 + -0.7163549065589905 + <_> + + <_> + + + + <_>12 3 5 12 -1. + <_>12 7 5 4 3. + 0 + 0.1223236992955208 + -3.9898478426039219e-003 + 0.6070889234542847 + <_> + + <_> + + + + <_>3 3 5 12 -1. + <_>3 7 5 4 3. + 0 + -0.1439826041460037 + 0.8583632111549377 + -5.8769038878381252e-003 + <_> + + <_> + + + + <_>10 11 6 9 -1. + <_>10 14 6 3 3. + 0 + 5.9525449760258198e-003 + 0.0217127595096827 + -0.1589670032262802 + <_> + + <_> + + + + <_>4 16 12 4 -1. + <_>4 18 12 2 2. + 0 + -1.3158279471099377e-003 + 0.0832397714257240 + -0.0719442665576935 + <_> + + <_> + + + + <_>2 14 18 4 -1. + <_>11 14 9 2 2. + <_>2 16 9 2 2. + 0 + -0.0357826687395573 + -0.3188849091529846 + 6.7262151278555393e-003 + <_> + + <_> + + + + <_>6 16 7 4 -1. + <_>6 18 7 2 2. + 0 + 1.4122560387477279e-003 + -0.0692475736141205 + 0.0880377292633057 + <_> + + <_> + + + + <_>5 10 12 8 -1. + <_>5 14 12 4 2. + 0 + -0.0161880291998386 + -0.0604390017688274 + 0.0675304234027863 + <_> + + <_> + + + + <_>4 10 7 4 -1. + <_>4 12 7 2 2. + 0 + -2.8433150146156549e-003 + 0.0644664391875267 + -0.1050440967082977 + <_> + + <_> + + + + <_>8 9 7 4 -1. + <_>8 11 7 2 2. + 0 + -1.5944750048220158e-003 + -0.0519193597137928 + 0.0537104010581970 + <_> + + <_> + + + + <_>0 10 18 6 -1. + <_>9 10 9 6 2. + 0 + 0.1880826950073242 + -8.1325937062501907e-003 + 0.7035480737686157 + <_> + + <_> + + + + <_>0 6 20 2 -1. + <_>0 6 10 2 2. + 0 + -0.0335522294044495 + -0.3131825029850006 + 0.0242971908301115 + <_> + + <_> + + + + <_>6 5 6 8 -1. + <_>8 5 2 8 3. + 0 + -0.0153410602360964 + 0.2368717044591904 + -0.0280204508453608 + <_> + + <_> + + + + <_>12 0 3 13 -1. + <_>13 0 1 13 3. + 0 + -0.0135348103940487 + -0.3154464066028595 + 0.0230117402970791 + <_> + + <_> + + + + <_>8 10 3 10 -1. + <_>8 15 3 5 2. + 0 + 3.2969659660011530e-003 + 0.0329233594238758 + -0.1593357026576996 + <_> + + <_> + + + + <_>8 1 8 14 -1. + <_>12 1 4 7 2. + <_>8 8 4 7 2. + 0 + -0.0448468886315823 + 0.1287619024515152 + -0.0177957806736231 + <_> + + <_> + + + + <_>5 0 3 19 -1. + <_>6 0 1 19 3. + 0 + 5.1291137933731079e-003 + 0.0327090099453926 + -0.1787136048078537 + <_> + + <_> + + + + <_>9 10 6 10 -1. + <_>12 10 3 5 2. + <_>9 15 3 5 2. + 0 + 1.1287770466879010e-003 + -0.0762344002723694 + 0.0712672322988510 + <_> + + <_> + + + + <_>0 6 5 14 -1. + <_>0 13 5 7 2. + 0 + 0.0127591099590063 + -0.0512680411338806 + 0.1290178000926971 + <_> + + <_> + + + + <_>18 5 2 14 -1. + <_>18 12 2 7 2. + 0 + 5.3586461581289768e-004 + 0.0661443471908569 + -0.0680215284228325 + <_> + + <_> + + + + <_>0 5 2 14 -1. + <_>0 12 2 7 2. + 0 + 5.8012880617752671e-004 + 0.0759462565183640 + -0.0724268332123756 + <_> + + <_> + + + + <_>13 0 4 10 -1. + <_>13 5 4 5 2. + 0 + 0.0981135368347168 + 4.4115697965025902e-003 + -0.5764682292938232 + <_> + + <_> + + + + <_>1 0 18 18 -1. + <_>1 9 18 9 2. + 0 + 0.3254789113998413 + -0.0288497898727655 + 0.2324505001306534 + <_> + + <_> + + + + <_>1 16 18 4 -1. + <_>10 16 9 2 2. + <_>1 18 9 2 2. + 0 + 0.0161095298826694 + 0.0261495094746351 + -0.2250791043043137 + <_> + + <_> + + + + <_>5 1 8 6 -1. + <_>5 3 8 2 3. + 0 + 0.0166308004409075 + -0.0560016483068466 + 0.1001114025712013 + <_> + + <_> + + + + <_>4 7 13 9 -1. + <_>4 10 13 3 3. + 0 + 0.0125674698501825 + 0.1176059022545815 + -0.0258336905390024 + <_> + + <_> + + + + <_>5 5 10 10 -1. + <_>5 5 5 5 2. + <_>10 10 5 5 2. + 0 + 0.0245319604873657 + 0.0219795592129231 + -0.2415833026170731 + <_> + + <_> + + + + <_>8 4 8 10 -1. + <_>12 4 4 5 2. + <_>8 9 4 5 2. + 0 + 5.1343659870326519e-003 + -0.0139641799032688 + 0.1039829030632973 + <_> + + <_> + + + + <_>3 7 14 4 -1. + <_>3 7 7 2 2. + <_>10 9 7 2 2. + 0 + -1.1144300224259496e-003 + -0.0816086083650589 + 0.0649919733405113 + <_> + + <_> + + + + <_>16 2 4 18 -1. + <_>18 2 2 9 2. + <_>16 11 2 9 2. + 0 + -0.0686410069465637 + 0.3711335062980652 + -0.0177746191620827 + <_> + + <_> + + + + <_>1 0 13 2 -1. + <_>1 1 13 1 2. + 0 + 8.8211498223245144e-004 + -0.0840806812047958 + 0.0625246390700340 + <_> + + <_> + + + + <_>6 2 14 3 -1. + <_>6 3 14 1 3. + 0 + 1.0471940040588379e-003 + 0.0694885626435280 + -0.0830001607537270 + <_> + + <_> + + + + <_>0 0 13 3 -1. + <_>0 1 13 1 3. + 0 + 0.0161972492933273 + 0.0160077307373285 + -0.3421669900417328 + <_> + + <_> + + + + <_>4 1 12 6 -1. + <_>4 4 12 3 2. + 0 + -0.0226906202733517 + 0.1395916044712067 + -0.0423055700957775 + <_> + + <_> + + + + <_>0 3 7 6 -1. + <_>0 5 7 2 3. + 0 + -0.0410300008952618 + -0.3466942012310028 + 0.0172335393726826 + <_> + + <_> + + + + <_>2 5 16 6 -1. + <_>10 5 8 3 2. + <_>2 8 8 3 2. + 0 + 0.0851949304342270 + -8.8493460789322853e-003 + 0.6063935160636902 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 0.0397750996053219 + 6.5457229502499104e-003 + -0.9379426836967468 + <_> + + <_> + + + + <_>16 2 4 18 -1. + <_>18 2 2 9 2. + <_>16 11 2 9 2. + 0 + -0.0186732504516840 + 0.0847016498446465 + -0.0217429902404547 + <_> + + <_> + + + + <_>6 2 4 15 -1. + <_>6 7 4 5 3. + 0 + -0.0116322096437216 + -0.1650363951921463 + 0.0328527912497520 + <_> + + <_> + + + + <_>10 5 7 6 -1. + <_>10 7 7 2 3. + 0 + -2.1068679634481668e-003 + 0.0257741697132587 + -0.1054055988788605 + <_> + + <_> + + + + <_>4 0 4 14 -1. + <_>4 0 2 7 2. + <_>6 7 2 7 2. + 0 + -1.0474229929968715e-003 + 0.0534705705940723 + -0.1084444969892502 + <_> + + <_> + + + + <_>6 3 10 6 -1. + <_>11 3 5 3 2. + <_>6 6 5 3 2. + 0 + 0.0661699920892715 + 2.6304489001631737e-003 + -0.4390884935855866 + <_> + + <_> + + + + <_>4 3 10 6 -1. + <_>4 3 5 3 2. + <_>9 6 5 3 2. + 0 + -1.2816500384360552e-003 + -0.0887442082166672 + 0.0672860816121101 + <_> + + <_> + + + + <_>4 4 13 12 -1. + <_>4 8 13 4 3. + 0 + -0.0126018095761538 + 0.2304718047380447 + -0.0142046399414539 + <_> + + <_> + + + + <_>3 9 6 7 -1. + <_>5 9 2 7 3. + 0 + 3.1882619950920343e-003 + -0.0607906095683575 + 0.0932566076517105 + <_> + + <_> + + + + <_>11 11 4 9 -1. + <_>11 11 2 9 2. + 0 + -4.4821877963840961e-003 + -0.0749111399054527 + 0.0355636402964592 + <_> + + <_> + + + + <_>1 0 3 13 -1. + <_>2 0 1 13 3. + 0 + 1.3803370529785752e-003 + -0.0653553307056427 + 0.0896605774760246 + <_> + + <_> + + + + <_>11 11 4 9 -1. + <_>11 11 2 9 2. + 0 + 9.3855522572994232e-003 + 0.0226011797785759 + -0.1603891998529434 + <_> + + <_> + + + + <_>5 12 4 8 -1. + <_>7 12 2 8 2. + 0 + -3.3057469408959150e-003 + -0.0933906510472298 + 0.0565997883677483 + <_> + + <_> + + + + <_>5 14 15 6 -1. + <_>10 14 5 6 3. + 0 + -0.0148232495412230 + 0.0639465823769569 + -0.0376172587275505 + <_> + + <_> + + + + <_>0 14 15 6 -1. + <_>5 14 5 6 3. + 0 + -0.0243043098598719 + 0.1182530000805855 + -0.0536070801317692 + <_> + + <_> + + + + <_>6 8 12 4 -1. + <_>10 8 4 4 3. + 0 + -2.6398031041026115e-003 + -0.0784624293446541 + 0.0471259392797947 + <_> + + <_> + + + + <_>2 8 12 4 -1. + <_>6 8 4 4 3. + 0 + -6.6844499669969082e-003 + -0.1429809033870697 + 0.0548765808343887 + <_> + + <_> + + + + <_>13 6 4 10 -1. + <_>13 6 2 10 2. + 0 + -1.8713249592110515e-003 + 0.0659645572304726 + -0.0597260296344757 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>10 6 2 7 2. + 0 + -0.0505263395607471 + 0.5293369293212891 + -0.0106250997632742 + <_> + + <_> + + + + <_>5 1 12 5 -1. + <_>9 1 4 5 3. + 0 + -0.0710362866520882 + -0.3302770853042603 + 5.6759058497846127e-003 + <_> + + <_> + + + + <_>2 2 15 4 -1. + <_>7 2 5 4 3. + 0 + -0.0542125403881073 + 0.3753634095191956 + -0.0164795499294996 + <_> + + <_> + + + + <_>6 12 13 2 -1. + <_>6 13 13 1 2. + 0 + 1.4903850387781858e-004 + -0.0528962500393391 + 0.1064648032188416 + <_> + + <_> + + + + <_>3 11 13 3 -1. + <_>3 12 13 1 3. + 0 + 1.0254220105707645e-003 + -0.0517149008810520 + 0.1077118963003159 + <_> + + <_> + + + + <_>10 10 9 6 -1. + <_>10 12 9 2 3. + 0 + 7.6022921130061150e-003 + 0.0243768393993378 + -0.1249317973852158 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 6.8572920281440020e-004 + 0.0713415816426277 + -0.0764908120036125 + <_> + + <_> + + + + <_>0 2 20 2 -1. + <_>0 3 20 1 2. + 0 + -1.3697240501642227e-003 + -0.1517394036054611 + 0.0398277193307877 + <_> + + <_> + + + + <_>3 5 4 11 -1. + <_>5 5 2 11 2. + 0 + -2.4336120113730431e-003 + 0.0653152093291283 + -0.0792308971285820 + <_> + + <_> + + + + <_>13 1 3 17 -1. + <_>14 1 1 17 3. + 0 + -0.0143908699974418 + -0.2370626032352448 + 0.0167405307292938 + <_> + + <_> + + + + <_>0 0 18 9 -1. + <_>6 0 6 9 3. + 0 + 0.0789079815149307 + -0.0428104698657990 + 0.1424898952245712 + <_> + + <_> + + + + <_>6 9 9 6 -1. + <_>9 9 3 6 3. + 0 + 0.1068112999200821 + 3.4115819726139307e-003 + -0.7765647172927856 + <_> + + <_> + + + + <_>2 9 7 6 -1. + <_>2 11 7 2 3. + 0 + 0.0513773597776890 + 0.0107034100219607 + -0.5340057015419006 + <_> + + <_> + + + + <_>13 1 3 17 -1. + <_>14 1 1 17 3. + 0 + -0.0868832170963287 + 1. + -3.0740019865334034e-003 + <_> + + <_> + + + + <_>4 1 3 17 -1. + <_>5 1 1 17 3. + 0 + -2.4080339353531599e-003 + -0.1068553030490875 + 0.0497215688228607 + <_> + + <_> + + + + <_>2 0 18 6 -1. + <_>8 0 6 6 3. + 0 + -0.0155902896076441 + 0.1063615977764130 + -0.0244143195450306 + <_> + + <_> + + + + <_>7 2 4 12 -1. + <_>7 6 4 4 3. + 0 + 2.3770150728523731e-003 + 0.0398403815925121 + -0.1468984037637711 + <_> + + <_> + + + + <_>10 2 5 9 -1. + <_>10 5 5 3 3. + 0 + -0.0906486213207245 + 0.1886166036128998 + -0.0129516804590821 + <_> + + <_> + + + + <_>5 2 5 9 -1. + <_>5 5 5 3 3. + 0 + 4.4955732300877571e-003 + -0.0265634004026651 + 0.2394375056028366 + <_> + + <_> + + + + <_>9 0 3 18 -1. + <_>9 6 3 6 3. + 0 + -0.0647257566452026 + -0.5462207794189453 + 9.2595359310507774e-003 + <_> + + <_> + + + + <_>6 12 7 4 -1. + <_>6 14 7 2 2. + 0 + 0.0217035803943872 + -8.8741881772875786e-003 + 0.6401981711387634 + <_> + + <_> + + + + <_>16 10 4 9 -1. + <_>16 10 2 9 2. + 0 + 0.0611102394759655 + 9.5075201243162155e-003 + -0.4370290935039520 + <_> + + <_> + + + + <_>0 10 4 9 -1. + <_>2 10 2 9 2. + 0 + 0.0200868807733059 + 0.0229851994663477 + -0.2284089028835297 + <_> + + <_> + + + + <_>13 2 6 18 -1. + <_>16 2 3 9 2. + <_>13 11 3 9 2. + 0 + 0.0412166416645050 + -0.0144205903634429 + 0.1345296949148178 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + -0.0237122792750597 + -0.2953363955020905 + 0.0184357203543186 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -6.8324371241033077e-003 + 0.1209425032138825 + -0.0430162400007248 + <_> + + <_> + + + + <_>2 3 5 12 -1. + <_>2 7 5 4 3. + 0 + 0.1088021025061607 + -0.0102281495928764 + 0.5282484292984009 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>3 17 14 2 2. + 0 + 9.8231732845306396e-003 + 0.0418864116072655 + -0.1366547942161560 + <_> + + <_> + + + + <_>3 0 13 6 -1. + <_>3 3 13 3 2. + 0 + -0.0150057701393962 + 0.1814893037080765 + -0.0306911394000053 + <_> + + <_> + + + + <_>4 11 16 9 -1. + <_>4 11 8 9 2. + 0 + -0.4411061108112335 + -1. + 1.4937899541109800e-003 + <_> + + <_> + + + + <_>0 11 16 9 -1. + <_>8 11 8 9 2. + 0 + -0.3412280082702637 + -0.4918485879898071 + 0.0100969299674034 + <_> + + <_> + + + + <_>11 0 5 8 -1. + <_>11 4 5 4 2. + 0 + 9.3225948512554169e-003 + -0.0228948295116425 + 0.0707965865731239 + <_> + + <_> + + + + <_>0 3 14 9 -1. + <_>0 6 14 3 3. + 0 + 7.3594371788203716e-003 + 0.0138428695499897 + -0.3614270091056824 + <_> + + <_> + + + + <_>5 0 10 10 -1. + <_>10 0 5 5 2. + <_>5 5 5 5 2. + 0 + -0.0841090828180313 + -0.6228498220443726 + 7.3129259981215000e-003 + <_> + + <_> + + + + <_>0 2 6 18 -1. + <_>0 2 3 9 2. + <_>3 11 3 9 2. + 0 + 0.0107048703357577 + -0.0426171310245991 + 0.1136071979999542 + <_> + + <_> + + + + <_>9 5 3 15 -1. + <_>9 10 3 5 3. + 0 + 0.0114781400188804 + 0.0365864485502243 + -0.0964749529957771 + <_> + + <_> + + + + <_>0 7 13 2 -1. + <_>0 8 13 1 2. + 0 + 1.6416399739682674e-003 + -0.0987773090600967 + 0.0551583692431450 + <_> + + <_> + + + + <_>11 1 5 9 -1. + <_>11 4 5 3 3. + 0 + -1.5731199528090656e-004 + -0.0612079203128815 + 0.0560536012053490 + <_> + + <_> + + + + <_>2 1 14 6 -1. + <_>2 1 7 3 2. + <_>9 4 7 3 2. + 0 + 4.1953278705477715e-003 + 0.0506573915481567 + -0.1023868024349213 + <_> + + <_> + + + + <_>9 0 6 12 -1. + <_>12 0 3 6 2. + <_>9 6 3 6 2. + 0 + -0.0162382498383522 + 0.1126751974225044 + -0.0137868300080299 + <_> + + <_> + + + + <_>5 0 6 12 -1. + <_>5 0 3 6 2. + <_>8 6 3 6 2. + 0 + 0.0324288196861744 + -0.0255130194127560 + 0.2317194044589996 + <_> + + <_> + + + + <_>6 9 9 6 -1. + <_>9 9 3 6 3. + 0 + -8.3901472389698029e-003 + -0.0628423690795898 + 0.0237769596278667 + <_> + + <_> + + + + <_>5 9 9 6 -1. + <_>8 9 3 6 3. + 0 + 4.9057020805776119e-003 + 0.0576767586171627 + -0.1271547973155975 + <_> + + <_> + + + + <_>8 3 10 11 -1. + <_>8 3 5 11 2. + 0 + 0.0144588602706790 + -0.0509327687323093 + 0.0622393190860748 + <_> + + <_> + + + + <_>2 3 10 11 -1. + <_>7 3 5 11 2. + 0 + 0.1248451992869377 + -0.0116122299805284 + 0.4936102032661438 + <_> + + <_> + + + + <_>8 2 12 18 -1. + <_>8 2 6 18 2. + 0 + 0.4858770966529846 + 4.8130601644515991e-003 + -0.5539581179618835 + <_> + + <_> + + + + <_>0 1 12 19 -1. + <_>6 1 6 19 2. + 0 + 0.1688621044158936 + 7.8053288161754608e-003 + -0.7339497804641724 + <_> + + <_> + + + + <_>10 11 5 9 -1. + <_>10 14 5 3 3. + 0 + -2.1220340568106622e-004 + 0.0316566489636898 + -0.1031470000743866 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>3 15 7 2 2. + <_>10 17 7 2 2. + 0 + 1.9249629694968462e-003 + 0.0551357790827751 + -0.1030936986207962 + <_> + + <_> + + + + <_>4 14 16 6 -1. + <_>4 14 8 6 2. + 0 + -0.0281783398240805 + 0.1163733005523682 + -0.0346300601959229 + <_> + + <_> + + + + <_>5 11 9 6 -1. + <_>8 11 3 6 3. + 0 + -0.0140695003792644 + -0.1473771929740906 + 0.0447237901389599 + <_> + + <_> + + + + <_>13 4 4 14 -1. + <_>15 4 2 7 2. + <_>13 11 2 7 2. + 0 + -1.2483589816838503e-003 + -0.1118512004613876 + 0.0688061788678169 + <_> + + <_> + + + + <_>1 3 6 9 -1. + <_>3 3 2 9 3. + 0 + 5.3278112318366766e-004 + -0.0939088836312294 + 0.0670728385448456 + <_> + + <_> + + + + <_>10 7 6 7 -1. + <_>12 7 2 7 3. + 0 + 0.0117227695882320 + -0.0190124697983265 + 0.1883438974618912 + -1.3171190023422241 + 34 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 10 3 -1. + <_>5 2 5 3 2. + 0 + 0.0582546517252922 + -0.2323278933763504 + 0.2145415991544724 + <_> + + <_> + + + + <_>12 6 5 9 -1. + <_>12 9 5 3 3. + 0 + 0.0344334505498409 + -0.2652068138122559 + 0.1327435970306397 + <_> + + <_> + + + + <_>3 12 8 8 -1. + <_>3 12 4 4 2. + <_>7 16 4 4 2. + 0 + 0.0149370096623898 + -0.2392790019512177 + 0.1578651964664459 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0311536397784948 + -0.1500400006771088 + 0.1611603945493698 + <_> + + <_> + + + + <_>2 0 16 2 -1. + <_>2 1 16 1 2. + 0 + 2.6988480240106583e-003 + -0.2340988963842392 + 0.0999837815761566 + <_> + + <_> + + + + <_>13 7 7 6 -1. + <_>13 9 7 2 3. + 0 + 9.2046073405072093e-005 + -0.2926816940307617 + 0.0478727407753468 + <_> + + <_> + + + + <_>0 7 7 6 -1. + <_>0 9 7 2 3. + 0 + 5.0020251364912838e-005 + -0.3681570887565613 + 0.0581896081566811 + <_> + + <_> + + + + <_>9 6 5 8 -1. + <_>9 10 5 4 2. + 0 + -0.0149021595716476 + -0.3881885111331940 + 0.0261585190892220 + <_> + + <_> + + + + <_>7 5 6 12 -1. + <_>7 11 6 6 2. + 0 + 0.0204487200826406 + 0.0608468912541866 + -0.3064528107643127 + <_> + + <_> + + + + <_>13 4 4 14 -1. + <_>15 4 2 7 2. + <_>13 11 2 7 2. + 0 + 6.2656581576447934e-005 + -0.1716104000806809 + 0.1080029979348183 + <_> + + <_> + + + + <_>3 4 4 14 -1. + <_>3 4 2 7 2. + <_>5 11 2 7 2. + 0 + -7.0627559907734394e-003 + -0.2342894971370697 + 0.0763271301984787 + <_> + + <_> + + + + <_>3 3 14 2 -1. + <_>3 4 14 1 2. + 0 + -2.9078179504722357e-003 + -0.2101060003042221 + 0.0786054730415344 + <_> + + <_> + + + + <_>7 1 6 10 -1. + <_>7 6 6 5 2. + 0 + -0.0365543104708195 + 0.1701388955116272 + -0.1283787041902542 + <_> + + <_> + + + + <_>10 4 10 12 -1. + <_>10 10 10 6 2. + 0 + -0.0139916297048330 + -0.1519856005907059 + 0.0311683006584644 + <_> + + <_> + + + + <_>4 2 9 5 -1. + <_>7 2 3 5 3. + 0 + 0.0746810734272003 + 0.0360799990594387 + -0.4632237851619721 + <_> + + <_> + + + + <_>4 4 16 10 -1. + <_>12 4 8 5 2. + <_>4 9 8 5 2. + 0 + -0.1040792986750603 + -0.3180229961872101 + 0.0206125602126122 + <_> + + <_> + + + + <_>0 4 16 10 -1. + <_>0 4 8 5 2. + <_>8 9 8 5 2. + 0 + 0.0124447001144290 + 0.0778186172246933 + -0.1682558953762054 + <_> + + <_> + + + + <_>11 8 4 12 -1. + <_>11 8 2 12 2. + 0 + 0.0346793308854103 + 0.0325843803584576 + -0.2688415944576263 + <_> + + <_> + + + + <_>5 8 4 12 -1. + <_>7 8 2 12 2. + 0 + -0.0290284696966410 + -0.4452267885208130 + 0.0296610407531261 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 2.3345749650616199e-004 + -0.1307104974985123 + 0.0617566592991352 + <_> + + <_> + + + + <_>0 7 10 13 -1. + <_>5 7 5 13 2. + 0 + 0.3699317872524262 + 0.0174009092152119 + -0.7041854858398438 + <_> + + <_> + + + + <_>13 13 7 4 -1. + <_>13 15 7 2 2. + 0 + -0.0215057302266359 + -0.2409529983997345 + 0.0288916490972042 + <_> + + <_> + + + + <_>0 9 9 8 -1. + <_>3 9 3 8 3. + 0 + 0.0541818104684353 + -0.0840536206960678 + 0.1387698948383331 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + -0.0326773785054684 + -0.2990488111972809 + 0.0281952507793903 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0118043003603816 + 0.0491241216659546 + -0.2553828954696655 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -9.5703108236193657e-003 + 0.1186522021889687 + -0.0793051570653915 + <_> + + <_> + + + + <_>0 12 13 2 -1. + <_>0 13 13 1 2. + 0 + -8.5534068057313561e-004 + -0.0903157666325569 + 0.1298426985740662 + <_> + + <_> + + + + <_>8 5 8 4 -1. + <_>8 5 4 4 2. + 0 + 0.0714453309774399 + 0.0143962102010846 + -0.5316129922866821 + <_> + + <_> + + + + <_>4 5 8 4 -1. + <_>8 5 4 4 2. + 0 + 6.1263251118361950e-003 + -0.2455939054489136 + 0.0483532808721066 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + -4.8277149908244610e-003 + -0.2382885068655014 + 0.0756640434265137 + <_> + + <_> + + + + <_>4 9 4 8 -1. + <_>4 13 4 4 2. + 0 + -2.6015359908342361e-003 + 0.0458266809582710 + -0.2492837011814117 + <_> + + <_> + + + + <_>10 4 8 4 -1. + <_>10 6 8 2 2. + 0 + -4.7515620826743543e-004 + 0.0386048406362534 + -0.1311883032321930 + <_> + + <_> + + + + <_>0 0 4 8 -1. + <_>2 0 2 8 2. + 0 + -0.0545914694666862 + 0.5526043772697449 + -0.0196224898099899 + <_> + + <_> + + + + <_>3 2 14 4 -1. + <_>3 2 7 4 2. + 0 + 0.0539314113557339 + -0.0482855997979641 + 0.2211060971021652 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -9.1672148555517197e-003 + -0.2574455142021179 + 0.0408331714570522 + <_> + + <_> + + + + <_>10 0 9 9 -1. + <_>13 0 3 9 3. + 0 + -2.9818129260092974e-003 + -0.0758914574980736 + 0.0608992092311382 + <_> + + <_> + + + + <_>1 0 9 9 -1. + <_>4 0 3 9 3. + 0 + 0.0746973827481270 + 0.0366578884422779 + -0.2694618105888367 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>18 6 2 7 2. + <_>16 13 2 7 2. + 0 + -0.0270062703639269 + 0.1839165985584259 + -0.0558324791491032 + <_> + + <_> + + + + <_>0 9 18 3 -1. + <_>6 9 6 3 3. + 0 + -6.0810879804193974e-003 + -0.3277722895145416 + 0.0352696590125561 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0381820686161518 + -0.0560753718018532 + 0.2183950990438461 + <_> + + <_> + + + + <_>5 4 10 5 -1. + <_>10 4 5 5 2. + 0 + 9.5723047852516174e-003 + 0.0842939764261246 + -0.1176777034997940 + <_> + + <_> + + + + <_>5 1 14 4 -1. + <_>12 1 7 2 2. + <_>5 3 7 2 2. + 0 + 0.0780282169580460 + 5.6959469802677631e-003 + -0.8144273161888123 + <_> + + <_> + + + + <_>1 1 14 4 -1. + <_>1 1 7 2 2. + <_>8 3 7 2 2. + 0 + -0.0328620299696922 + -0.4721283018589020 + 0.0194189697504044 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>18 6 2 7 2. + <_>16 13 2 7 2. + 0 + 0.0423596799373627 + -0.0179292801767588 + 0.3136824965476990 + <_> + + <_> + + + + <_>0 6 4 14 -1. + <_>0 6 2 7 2. + <_>2 13 2 7 2. + 0 + -0.0210304204374552 + 0.1419924944639206 + -0.0671715065836906 + <_> + + <_> + + + + <_>12 11 5 9 -1. + <_>12 14 5 3 3. + 0 + -0.0464879684150219 + -0.3045510947704315 + 0.0318244993686676 + <_> + + <_> + + + + <_>5 9 10 9 -1. + <_>5 12 10 3 3. + 0 + -0.0852806270122528 + 0.2472552955150604 + -0.0407265201210976 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 4.7598700039088726e-003 + -0.0640764907002449 + 0.1010356023907661 + <_> + + <_> + + + + <_>4 0 8 9 -1. + <_>8 0 4 9 2. + 0 + 0.0607331991195679 + -0.0887726470828056 + 0.1165471971035004 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0547704882919788 + 0.0223904494196177 + -0.4985511898994446 + <_> + + <_> + + + + <_>1 13 5 6 -1. + <_>1 16 5 3 2. + 0 + -3.7478970625670627e-005 + 0.0624339282512665 + -0.1651535928249359 + <_> + + <_> + + + + <_>11 15 7 4 -1. + <_>11 17 7 2 2. + 0 + -0.0238987505435944 + -0.1902105063199997 + 0.0149795496836305 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + -0.0184658598154783 + 0.2300866991281509 + -0.0453632883727551 + <_> + + <_> + + + + <_>7 7 7 8 -1. + <_>7 11 7 4 2. + 0 + -3.8619639817625284e-003 + -0.1116836965084076 + 0.0795509666204453 + <_> + + <_> + + + + <_>2 4 3 10 -1. + <_>2 9 3 5 2. + 0 + 0.0606829896569252 + 0.0254010409116745 + -0.4178782105445862 + <_> + + <_> + + + + <_>7 2 13 2 -1. + <_>7 3 13 1 2. + 0 + -6.1235381290316582e-003 + -0.2420157045125961 + 0.0199846904724836 + <_> + + <_> + + + + <_>2 15 7 4 -1. + <_>2 17 7 2 2. + 0 + -0.0275584608316422 + -0.4567821025848389 + 0.0203280691057444 + <_> + + <_> + + + + <_>14 1 6 10 -1. + <_>17 1 3 5 2. + <_>14 6 3 5 2. + 0 + 0.0249386299401522 + -0.0383990183472633 + 0.1320528984069824 + <_> + + <_> + + + + <_>0 1 6 10 -1. + <_>0 1 3 5 2. + <_>3 6 3 5 2. + 0 + -0.0470814295113087 + 0.3183973133563995 + -0.0321274809539318 + <_> + + <_> + + + + <_>8 0 8 8 -1. + <_>12 0 4 4 2. + <_>8 4 4 4 2. + 0 + 0.0623219907283783 + 0.0178469605743885 + -0.5011476874351502 + <_> + + <_> + + + + <_>6 8 4 9 -1. + <_>8 8 2 9 2. + 0 + -5.5789871839806437e-004 + 0.1067302972078323 + -0.0904543101787567 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -0.0205287300050259 + 0.2277700006961823 + -0.0466837584972382 + <_> + + <_> + + + + <_>7 1 4 12 -1. + <_>9 1 2 12 2. + 0 + 1.4043749542906880e-003 + -0.2068850994110107 + 0.0673208534717560 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + 0.0314745493233204 + 0.0258730500936508 + -0.3138580918312073 + <_> + + <_> + + + + <_>4 0 6 8 -1. + <_>6 0 2 8 3. + 0 + -0.0313643403351307 + -0.3507966995239258 + 0.0248904805630445 + <_> + + <_> + + + + <_>10 0 4 18 -1. + <_>10 6 4 6 3. + 0 + -0.1007601991295815 + -0.2273838967084885 + 0.0107318796217442 + <_> + + <_> + + + + <_>0 5 7 12 -1. + <_>0 9 7 4 3. + 0 + 0.0144099602475762 + 0.2400186061859131 + -0.0383890494704247 + <_> + + <_> + + + + <_>11 5 5 9 -1. + <_>11 8 5 3 3. + 0 + 0.0564101710915565 + -0.0406672693789005 + 0.1988081037998200 + <_> + + <_> + + + + <_>3 9 14 4 -1. + <_>3 9 7 2 2. + <_>10 11 7 2 2. + 0 + -0.0143101001158357 + -0.2248423993587494 + 0.0514159686863422 + <_> + + <_> + + + + <_>3 7 17 3 -1. + <_>3 8 17 1 3. + 0 + 0.0380934812128544 + 0.0106020001694560 + -0.6503134965896606 + <_> + + <_> + + + + <_>3 2 6 10 -1. + <_>3 2 3 5 2. + <_>6 7 3 5 2. + 0 + 7.3483381420373917e-003 + 0.0376242995262146 + -0.2366017997264862 + <_> + + <_> + + + + <_>5 0 15 8 -1. + <_>10 0 5 8 3. + 0 + 0.1599038988351822 + -0.0319586917757988 + 0.0782571882009506 + <_> + + <_> + + + + <_>0 0 10 10 -1. + <_>0 0 5 5 2. + <_>5 5 5 5 2. + 0 + 0.0752983763813972 + -0.0222257394343615 + 0.4773482978343964 + <_> + + <_> + + + + <_>2 3 16 9 -1. + <_>2 6 16 3 3. + 0 + 0.0105156302452087 + 0.0249795392155647 + -0.4351730942726135 + <_> + + <_> + + + + <_>4 0 12 8 -1. + <_>4 4 12 4 2. + 0 + 0.1172024980187416 + -0.0372359789907932 + 0.2652949988842011 + <_> + + <_> + + + + <_>13 0 7 6 -1. + <_>13 2 7 2 3. + 0 + 1.5799700122443028e-005 + -0.1083744987845421 + 0.0728097036480904 + <_> + + <_> + + + + <_>4 0 2 15 -1. + <_>5 0 1 15 2. + 0 + 0.0121151199564338 + 0.0650321990251541 + -0.1437816023826599 + <_> + + <_> + + + + <_>10 10 6 7 -1. + <_>12 10 2 7 3. + 0 + -0.0177662707865238 + 0.1009543016552925 + -0.0244991406798363 + <_> + + <_> + + + + <_>4 10 6 7 -1. + <_>6 10 2 7 3. + 0 + 0.0422279201447964 + -0.0366250798106194 + 0.2834149003028870 + <_> + + <_> + + + + <_>10 8 8 8 -1. + <_>14 8 4 4 2. + <_>10 12 4 4 2. + 0 + 0.0243466794490814 + 0.0245600100606680 + -0.1978784054517746 + <_> + + <_> + + + + <_>2 8 8 8 -1. + <_>2 8 4 4 2. + <_>6 12 4 4 2. + 0 + 0.0317488387227058 + 0.0296038594096899 + -0.3041270971298218 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + -0.0526162385940552 + 0.1775135993957520 + -0.0318257212638855 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>0 10 3 5 2. + <_>3 15 3 5 2. + 0 + -0.0543589107692242 + 0.2288665026426315 + -0.0402214117348194 + <_> + + <_> + + + + <_>10 1 3 10 -1. + <_>10 6 3 5 2. + 0 + 1.1845750268548727e-003 + 0.0615281201899052 + -0.1220474019646645 + <_> + + <_> + + + + <_>6 11 5 6 -1. + <_>6 14 5 3 2. + 0 + -0.0363252982497215 + -0.2952817082405090 + 0.0334528312087059 + <_> + + <_> + + + + <_>4 3 12 12 -1. + <_>4 7 12 4 3. + 0 + 0.1510080993175507 + -0.0256619006395340 + 0.3878808915615082 + <_> + + <_> + + + + <_>4 5 10 6 -1. + <_>4 5 5 3 2. + <_>9 8 5 3 2. + 0 + 0.0282789394259453 + -0.0359514914453030 + 0.2525135874748230 + <_> + + <_> + + + + <_>11 4 9 10 -1. + <_>11 9 9 5 2. + 0 + -0.0838032513856888 + -0.7259948253631592 + 4.1993269696831703e-003 + <_> + + <_> + + + + <_>7 2 4 12 -1. + <_>7 6 4 4 3. + 0 + -2.9865629039704800e-004 + 0.0553029887378216 + -0.1667886972427368 + <_> + + <_> + + + + <_>11 1 9 18 -1. + <_>11 7 9 6 3. + 0 + -0.0168727394193411 + -0.1904053986072540 + 0.0523077584803104 + <_> + + <_> + + + + <_>4 8 12 10 -1. + <_>4 8 6 5 2. + <_>10 13 6 5 2. + 0 + -0.0594513118267059 + -0.4763435125350952 + 0.0209812093526125 + <_> + + <_> + + + + <_>8 4 6 10 -1. + <_>11 4 3 5 2. + <_>8 9 3 5 2. + 0 + -0.0183788295835257 + 0.0668584629893303 + -0.0603890903294086 + <_> + + <_> + + + + <_>6 0 8 14 -1. + <_>6 0 4 7 2. + <_>10 7 4 7 2. + 0 + 0.0481988489627838 + 0.0425803512334824 + -0.2601073086261749 + <_> + + <_> + + + + <_>8 1 8 8 -1. + <_>12 1 4 4 2. + <_>8 5 4 4 2. + 0 + -0.0432171300053597 + -0.2506701052188873 + 0.0172253008931875 + <_> + + <_> + + + + <_>5 1 8 8 -1. + <_>5 1 4 4 2. + <_>9 5 4 4 2. + 0 + -6.3647949136793613e-003 + -0.1678871065378189 + 0.0688573196530342 + <_> + + <_> + + + + <_>2 1 18 5 -1. + <_>8 1 6 5 3. + 0 + 0.2477056980133057 + -0.0331544503569603 + 0.1479407995939255 + <_> + + <_> + + + + <_>0 0 15 8 -1. + <_>5 0 5 8 3. + 0 + -0.1121686995029450 + 0.5112972855567932 + -0.0173601005226374 + <_> + + <_> + + + + <_>5 15 10 5 -1. + <_>5 15 5 5 2. + 0 + 0.0366010107100010 + -0.0438699796795845 + 0.1975523978471756 + <_> + + <_> + + + + <_>0 5 12 15 -1. + <_>6 5 6 15 2. + 0 + -0.0723325535655022 + -0.8293241262435913 + 0.0118101201951504 + <_> + + <_> + + + + <_>5 7 15 3 -1. + <_>10 7 5 3 3. + 0 + 0.0778379514813423 + 0.0245205797255039 + -0.2726052105426788 + <_> + + <_> + + + + <_>0 7 15 3 -1. + <_>5 7 5 3 3. + 0 + 0.0720945969223976 + 0.0376062504947186 + -0.2729178071022034 + <_> + + <_> + + + + <_>11 11 7 6 -1. + <_>11 13 7 2 3. + 0 + -0.0873733535408974 + -0.9534478783607483 + 3.2734218984842300e-003 + <_> + + <_> + + + + <_>2 11 7 6 -1. + <_>2 13 7 2 3. + 0 + -0.0362400598824024 + -0.3230000138282776 + 0.0263893101364374 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -8.7862694635987282e-003 + -0.1480821073055267 + 0.0467615611851215 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 6.5432381816208363e-003 + 0.0600714795291424 + -0.1503639966249466 + <_> + + <_> + + + + <_>15 0 5 8 -1. + <_>15 4 5 4 2. + 0 + 2.7910009957849979e-003 + -0.0795856565237045 + 0.0640649423003197 + <_> + + <_> + + + + <_>0 0 20 4 -1. + <_>0 0 10 2 2. + <_>10 2 10 2 2. + 0 + 0.0294719301164150 + 0.0369045287370682 + -0.2765960991382599 + <_> + + <_> + + + + <_>7 5 6 14 -1. + <_>10 5 3 7 2. + <_>7 12 3 7 2. + 0 + -0.0449241511523724 + 0.3531363010406494 + -0.0272191409021616 + <_> + + <_> + + + + <_>6 6 7 4 -1. + <_>6 8 7 2 2. + 0 + 0.0789695233106613 + 0.0108738001435995 + -0.9321752786636353 + <_> + + <_> + + + + <_>11 5 5 9 -1. + <_>11 8 5 3 3. + 0 + -0.0310530308634043 + 0.2408788949251175 + -0.0271559692919254 + <_> + + <_> + + + + <_>4 5 5 9 -1. + <_>4 8 5 3 3. + 0 + 0.0504290908575058 + -0.0541648007929325 + 0.2034392058849335 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>10 5 5 3 2. + <_>5 8 5 3 2. + 0 + -0.0376376584172249 + 0.3299897909164429 + -0.0345730893313885 + <_> + + <_> + + + + <_>0 0 5 8 -1. + <_>0 4 5 4 2. + 0 + -1.7269999952986836e-003 + -0.1233977973461151 + 0.0759583935141563 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + 0.0126043399795890 + 0.0361500009894371 + -0.2159177064895630 + <_> + + <_> + + + + <_>2 11 4 8 -1. + <_>4 11 2 8 2. + 0 + 0.0110106403008103 + -0.1433029025793076 + 0.0630432665348053 + <_> + + <_> + + + + <_>14 5 4 14 -1. + <_>16 5 2 7 2. + <_>14 12 2 7 2. + 0 + 0.0135396998375654 + -0.0784185230731964 + 0.1838940978050232 + <_> + + <_> + + + + <_>2 5 4 14 -1. + <_>2 5 2 7 2. + <_>4 12 2 7 2. + 0 + -0.0389497689902782 + 0.3418363034725189 + -0.0295054297894239 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + -0.0490930788218975 + -0.3627820014953613 + 0.0170936193317175 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 4.2306110262870789e-003 + 0.0581905506551266 + -0.1838379055261612 + <_> + + <_> + + + + <_>8 12 10 6 -1. + <_>8 14 10 2 3. + 0 + 8.9376904070377350e-003 + -0.0515764988958836 + 0.1937699019908905 + <_> + + <_> + + + + <_>7 2 4 14 -1. + <_>7 2 2 7 2. + <_>9 9 2 7 2. + 0 + 0.0408462807536125 + 0.0132417296990752 + -0.7089222073554993 + <_> + + <_> + + + + <_>5 7 14 4 -1. + <_>12 7 7 2 2. + <_>5 9 7 2 2. + 0 + -0.0369459614157677 + -0.3445631861686707 + 7.1702878922224045e-003 + <_> + + <_> + + + + <_>1 7 14 4 -1. + <_>1 7 7 2 2. + <_>8 9 7 2 2. + 0 + -0.0129241803660989 + -0.1935417950153351 + 0.0481577888131142 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + 0.0330796502530575 + -0.0517048202455044 + 0.1349232941865921 + <_> + + <_> + + + + <_>2 6 14 10 -1. + <_>2 6 7 5 2. + <_>9 11 7 5 2. + 0 + 0.0222335197031498 + 0.0529199913144112 + -0.1762863993644714 + <_> + + <_> + + + + <_>13 5 4 11 -1. + <_>13 5 2 11 2. + 0 + -0.0144835002720356 + 0.1510524004697800 + -0.0398177988827229 + <_> + + <_> + + + + <_>2 13 15 6 -1. + <_>7 13 5 6 3. + 0 + 0.1593490988016129 + -0.0334229283034801 + 0.2808581888675690 + <_> + + <_> + + + + <_>5 16 12 4 -1. + <_>9 16 4 4 3. + 0 + 0.1247043013572693 + 0.0112258298322558 + -0.4552010893821716 + <_> + + <_> + + + + <_>3 15 9 5 -1. + <_>6 15 3 5 3. + 0 + 0.0702432990074158 + 0.0262131690979004 + -0.3477858901023865 + <_> + + <_> + + + + <_>2 0 17 18 -1. + <_>2 9 17 9 2. + 0 + 0.6174768805503845 + 9.0320473536849022e-003 + -0.5521609783172607 + <_> + + <_> + + + + <_>1 0 4 12 -1. + <_>1 4 4 4 3. + 0 + 0.0770079270005226 + 9.3850009143352509e-003 + -0.6949511766433716 + <_> + + <_> + + + + <_>13 5 4 11 -1. + <_>13 5 2 11 2. + 0 + 0.0428741201758385 + -0.0331663191318512 + 0.1355023980140686 + <_> + + <_> + + + + <_>3 4 6 5 -1. + <_>6 4 3 5 2. + 0 + -0.0245582591742277 + 0.3898926079273224 + -0.0205063205212355 + <_> + + <_> + + + + <_>3 0 15 2 -1. + <_>3 1 15 1 2. + 0 + 0.0107231503352523 + -0.0515267588198185 + 0.0894612073898315 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 0.0383319705724716 + -0.0399528592824936 + 0.1859154999256134 + <_> + + <_> + + + + <_>4 7 15 3 -1. + <_>9 7 5 3 3. + 0 + 0.1255601942539215 + 5.1561538130044937e-003 + -0.8478239178657532 + <_> + + <_> + + + + <_>1 7 15 3 -1. + <_>6 7 5 3 3. + 0 + 0.1159007027745247 + 9.7828712314367294e-003 + -0.7643743157386780 + <_> + + <_> + + + + <_>11 2 3 14 -1. + <_>12 2 1 14 3. + 0 + -0.0150160603225231 + -0.1832856982946396 + 0.0321253389120102 + <_> + + <_> + + + + <_>7 6 3 13 -1. + <_>8 6 1 13 3. + 0 + -4.1521931998431683e-003 + 0.0981609821319580 + -0.0827690064907074 + <_> + + <_> + + + + <_>13 14 7 4 -1. + <_>13 16 7 2 2. + 0 + 1.4998050173744559e-003 + 0.0412286892533302 + -0.0844605267047882 + <_> + + <_> + + + + <_>2 7 16 2 -1. + <_>2 8 16 1 2. + 0 + 0.0381175316870213 + 0.0196919608861208 + -0.3993115127086639 + <_> + + <_> + + + + <_>7 6 7 4 -1. + <_>7 8 7 2 2. + 0 + 9.4391452148556709e-004 + -0.1967470049858093 + 0.0564762093126774 + <_> + + <_> + + + + <_>8 4 3 10 -1. + <_>8 9 3 5 2. + 0 + 2.4907960323616862e-004 + 0.0927974730730057 + -0.1070868968963623 + <_> + + <_> + + + + <_>9 6 4 8 -1. + <_>9 10 4 4 2. + 0 + 0.0254476703703403 + -0.0253043901175261 + 0.1003243997693062 + <_> + + <_> + + + + <_>0 4 11 12 -1. + <_>0 10 11 6 2. + 0 + -0.0288840904831886 + -0.1725983023643494 + 0.0496710613369942 + <_> + + <_> + + + + <_>13 6 4 14 -1. + <_>13 13 4 7 2. + 0 + 0.1210284009575844 + -5.5194748565554619e-003 + 0.9543825984001160 + <_> + + <_> + + + + <_>3 6 4 14 -1. + <_>3 13 4 7 2. + 0 + -7.9245921224355698e-003 + 0.0649034827947617 + -0.1267154961824417 + <_> + + <_> + + + + <_>10 2 6 10 -1. + <_>13 2 3 5 2. + <_>10 7 3 5 2. + 0 + -0.0655360668897629 + -0.3789218962192535 + 0.0164630897343159 + <_> + + <_> + + + + <_>4 7 12 6 -1. + <_>4 9 12 2 3. + 0 + -0.0168834608048201 + 0.5853481888771057 + -0.0146717699244618 + <_> + + <_> + + + + <_>0 5 20 6 -1. + <_>0 7 20 2 3. + 0 + 6.7252418957650661e-003 + 0.0276042297482491 + -0.3481742143630981 + <_> + + <_> + + + + <_>4 2 6 10 -1. + <_>4 2 3 5 2. + <_>7 7 3 5 2. + 0 + -0.0637838989496231 + -0.3956716060638428 + 0.0198678895831108 + <_> + + <_> + + + + <_>2 1 18 5 -1. + <_>8 1 6 5 3. + 0 + 0.1860055029392242 + -0.0458985790610313 + 0.0735860764980316 + <_> + + <_> + + + + <_>6 1 4 8 -1. + <_>6 5 4 4 2. + 0 + 0.0497240312397480 + -0.0205176305025816 + 0.4310784041881561 + <_> + + <_> + + + + <_>12 9 6 9 -1. + <_>12 12 6 3 3. + 0 + 0.0150113804265857 + 0.0401921495795250 + -0.1024248972535133 + <_> + + <_> + + + + <_>8 3 3 13 -1. + <_>9 3 1 13 3. + 0 + -0.0150850303471088 + 0.2388892024755478 + -0.0356429181993008 + <_> + + <_> + + + + <_>11 0 2 15 -1. + <_>11 0 1 15 2. + 0 + -0.0129314903169870 + -0.3686308860778809 + 0.0173778906464577 + <_> + + <_> + + + + <_>7 0 2 15 -1. + <_>8 0 1 15 2. + 0 + -0.0131868999451399 + -0.4317027032375336 + 0.0179479103535414 + <_> + + <_> + + + + <_>4 9 12 4 -1. + <_>8 9 4 4 3. + 0 + -0.0668149590492249 + 0.4133611917495728 + -0.0209043100476265 + <_> + + <_> + + + + <_>0 1 10 19 -1. + <_>5 1 5 19 2. + 0 + 0.0440643317997456 + -0.3861519098281860 + 0.0214145109057426 + <_> + + <_> + + + + <_>8 7 12 13 -1. + <_>8 7 6 13 2. + 0 + 0.4134173095226288 + 0.0101309902966022 + -0.4705309867858887 + <_> + + <_> + + + + <_>0 8 14 2 -1. + <_>7 8 7 2 2. + 0 + 0.0244436599314213 + 0.0931841209530830 + -0.0867741629481316 + <_> + + <_> + + + + <_>5 17 15 3 -1. + <_>10 17 5 3 3. + 0 + 0.1577968001365662 + 4.8137311823666096e-003 + -0.5874621272087097 + <_> + + <_> + + + + <_>0 17 15 3 -1. + <_>5 17 5 3 3. + 0 + -0.0201415102928877 + 0.2264391928911209 + -0.0468246303498745 + <_> + + <_> + + + + <_>11 8 8 5 -1. + <_>11 8 4 5 2. + 0 + 3.8796770386397839e-003 + -0.0771552175283432 + 0.0361061692237854 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + 0.0150649603456259 + -0.0566568598151207 + 0.1475864946842194 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 0.0129253100603819 + 0.0353080183267593 + -0.1164532005786896 + <_> + + <_> + + + + <_>3 1 8 8 -1. + <_>3 1 4 4 2. + <_>7 5 4 4 2. + 0 + -0.0147883100435138 + -0.1145993992686272 + 0.0750000700354576 + <_> + + <_> + + + + <_>10 1 3 10 -1. + <_>10 6 3 5 2. + 0 + -2.0497168879956007e-003 + 0.0420674011111259 + -0.0704095736145973 + <_> + + <_> + + + + <_>0 14 7 6 -1. + <_>0 16 7 2 3. + 0 + 8.9428946375846863e-003 + 0.0539898388087749 + -0.1538084000349045 + <_> + + <_> + + + + <_>8 4 4 12 -1. + <_>8 8 4 4 3. + 0 + 0.1006499975919724 + -0.0297092497348785 + 0.3129375874996185 + <_> + + <_> + + + + <_>0 11 18 2 -1. + <_>0 12 18 1 2. + 0 + -0.0465800799429417 + -0.7222787737846375 + 0.0130043402314186 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0386185906827450 + 0.3386775851249695 + -0.0217266101390123 + <_> + + <_> + + + + <_>2 9 6 9 -1. + <_>2 12 6 3 3. + 0 + 8.5657741874456406e-003 + 0.0706212893128395 + -0.1305588036775589 + <_> + + <_> + + + + <_>2 1 18 5 -1. + <_>8 1 6 5 3. + 0 + -0.1098629981279373 + 0.3797450959682465 + -5.1755867898464203e-003 + <_> + + <_> + + + + <_>0 1 18 5 -1. + <_>6 1 6 5 3. + 0 + 0.3018425107002258 + -0.0242748390883207 + 0.3663265109062195 + <_> + + <_> + + + + <_>11 5 2 14 -1. + <_>11 12 2 7 2. + 0 + -0.0532460883259773 + -0.5529050230979919 + 6.2071220017969608e-003 + <_> + + <_> + + + + <_>7 8 6 12 -1. + <_>7 8 3 6 2. + <_>10 14 3 6 2. + 0 + 0.0366298705339432 + 0.0231612492352724 + -0.3551486134529114 + <_> + + <_> + + + + <_>2 15 16 4 -1. + <_>2 17 16 2 2. + 0 + 0.0699931979179382 + 8.9623704552650452e-003 + -0.8224542140960693 + <_> + + <_> + + + + <_>5 1 2 19 -1. + <_>6 1 1 19 2. + 0 + -8.7623577564954758e-003 + -0.2802872061729431 + 0.0262174606323242 + <_> + + <_> + + + + <_>7 4 6 10 -1. + <_>10 4 3 5 2. + <_>7 9 3 5 2. + 0 + 0.0152759896591306 + -0.0501230694353580 + 0.1577408015727997 + <_> + + <_> + + + + <_>2 16 15 4 -1. + <_>7 16 5 4 3. + 0 + 0.1883618980646133 + 0.0114834597334266 + -0.7400444746017456 + <_> + + <_> + + + + <_>10 1 6 15 -1. + <_>12 1 2 15 3. + 0 + -0.0145186297595501 + 0.0829219222068787 + -0.0525361411273479 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + 0.0192219894379377 + 0.0407903417944908 + -0.2088976055383682 + <_> + + <_> + + + + <_>5 8 10 4 -1. + <_>5 10 10 2 2. + 0 + -0.0312749892473221 + 0.8086434006690979 + -0.0107549801468849 + <_> + + <_> + + + + <_>6 6 5 8 -1. + <_>6 10 5 4 2. + 0 + -4.9813431687653065e-003 + -0.1961786001920700 + 0.0413300693035126 + <_> + + <_> + + + + <_>4 5 12 8 -1. + <_>10 5 6 4 2. + <_>4 9 6 4 2. + 0 + 0.0372969098389149 + 0.0303138792514801 + -0.2733631134033203 + <_> + + <_> + + + + <_>4 1 6 15 -1. + <_>6 1 2 15 3. + 0 + -0.0190145503729582 + 0.1343944072723389 + -0.0607824996113777 + <_> + + <_> + + + + <_>8 8 6 12 -1. + <_>11 8 3 6 2. + <_>8 14 3 6 2. + 0 + -7.9229613766074181e-003 + -0.0796897709369659 + 0.0404974408447742 + <_> + + <_> + + + + <_>2 6 6 8 -1. + <_>5 6 3 8 2. + 0 + 0.0963717997074127 + -0.0255768708884716 + 0.3244051039218903 + <_> + + <_> + + + + <_>17 0 2 14 -1. + <_>17 0 1 14 2. + 0 + -0.0172103103250265 + 0.2977229952812195 + -0.0309941396117210 + <_> + + <_> + + + + <_>1 0 2 14 -1. + <_>2 0 1 14 2. + 0 + 0.0107361795380712 + -0.0702993422746658 + 0.1244890019297600 + <_> + + <_> + + + + <_>11 2 3 13 -1. + <_>12 2 1 13 3. + 0 + -0.0403988696634769 + -0.6447088718414307 + 6.9025149568915367e-003 + <_> + + <_> + + + + <_>6 2 3 13 -1. + <_>7 2 1 13 3. + 0 + -0.0318704284727573 + -0.5333933830261231 + 0.0152217904105783 + <_> + + <_> + + + + <_>16 0 4 13 -1. + <_>16 0 2 13 2. + 0 + 0.0365180782973766 + -0.0778756514191628 + 0.1445890069007874 + <_> + + <_> + + + + <_>0 0 4 13 -1. + <_>2 0 2 13 2. + 0 + 0.1233026012778282 + 0.0176893007010221 + -0.5189579725265503 + <_> + + <_> + + + + <_>5 6 14 3 -1. + <_>5 6 7 3 2. + 0 + 0.1008619964122772 + 6.6002830862998962e-003 + -0.5528950095176697 + <_> + + <_> + + + + <_>1 6 14 3 -1. + <_>8 6 7 3 2. + 0 + 0.1002677008509636 + 0.0101750902831554 + -0.7155439257621765 + <_> + + <_> + + + + <_>7 8 6 12 -1. + <_>10 8 3 6 2. + <_>7 14 3 6 2. + 0 + 0.0369567610323429 + 0.0221318602561951 + -0.3145228028297424 + <_> + + <_> + + + + <_>5 7 4 7 -1. + <_>7 7 2 7 2. + 0 + 8.5017476230859756e-003 + 0.0491466782987118 + -0.1519349962472916 + <_> + + <_> + + + + <_>12 1 4 12 -1. + <_>12 5 4 4 3. + 0 + 0.0538330487906933 + 2.5698679964989424e-003 + -0.5075020790100098 + <_> + + <_> + + + + <_>4 1 4 12 -1. + <_>4 5 4 4 3. + 0 + 0.0489589385688305 + 9.2353876680135727e-003 + -0.7937114238739014 + <_> + + <_> + + + + <_>3 0 14 12 -1. + <_>3 4 14 4 3. + 0 + 0.0408108793199062 + -0.0462704300880432 + 0.1972641050815582 + <_> + + <_> + + + + <_>6 6 7 4 -1. + <_>6 8 7 2 2. + 0 + -3.3165120985358953e-003 + -0.2149500995874405 + 0.0388684011995792 + <_> + + <_> + + + + <_>12 0 4 7 -1. + <_>12 0 2 7 2. + 0 + 4.8434760537929833e-004 + -0.1787064969539642 + 0.0571296811103821 + <_> + + <_> + + + + <_>2 9 12 3 -1. + <_>8 9 6 3 2. + 0 + 0.0794940963387489 + -0.0224635507911444 + 0.3677097856998444 + <_> + + <_> + + + + <_>0 9 20 3 -1. + <_>0 10 20 1 3. + 0 + -8.8844364508986473e-003 + -0.3379656076431274 + 0.0258696507662535 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0105756204575300 + 0.1243861988186836 + -0.0681473836302757 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + 7.3358109220862389e-003 + -0.0433751717209816 + 0.1548348069190979 + <_> + + <_> + + + + <_>2 2 15 12 -1. + <_>2 8 15 6 2. + 0 + 0.0423068217933178 + 0.1001643985509872 + -0.0880116894841194 + <_> + + <_> + + + + <_>11 5 5 6 -1. + <_>11 8 5 3 2. + 0 + 0.0717592164874077 + -8.9269876480102539e-003 + 0.2325419932603836 + <_> + + <_> + + + + <_>2 8 14 3 -1. + <_>2 9 14 1 3. + 0 + -0.0224782805889845 + -0.5405740737915039 + 0.0143961198627949 + <_> + + <_> + + + + <_>10 2 6 9 -1. + <_>10 5 6 3 3. + 0 + -0.0256065800786018 + -0.0435081794857979 + 0.0642850473523140 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 0.0257334094494581 + 0.0230848491191864 + -0.3427874147891998 + <_> + + <_> + + + + <_>8 14 12 6 -1. + <_>14 14 6 3 2. + <_>8 17 6 3 2. + 0 + -0.0701633393764496 + 0.4074433147907257 + -0.0118360901251435 + <_> + + <_> + + + + <_>6 12 8 6 -1. + <_>6 14 8 2 3. + 0 + -0.0125273298472166 + 0.0911845266819000 + -0.0870356336236000 + <_> + + <_> + + + + <_>9 14 9 4 -1. + <_>9 16 9 2 2. + 0 + 0.0599834583699703 + 3.6528799682855606e-003 + -0.8026152253150940 + <_> + + <_> + + + + <_>0 14 7 4 -1. + <_>0 16 7 2 2. + 0 + -5.2271911408752203e-004 + 0.0695738270878792 + -0.1209163963794708 + <_> + + <_> + + + + <_>2 11 18 8 -1. + <_>2 15 18 4 2. + 0 + -0.2099653929471970 + -0.4674727916717529 + 9.4682360067963600e-003 + <_> + + <_> + + + + <_>0 12 10 8 -1. + <_>0 12 5 4 2. + <_>5 16 5 4 2. + 0 + -0.0183586403727531 + 0.1491988003253937 + -0.0571989007294178 + <_> + + <_> + + + + <_>13 9 4 7 -1. + <_>13 9 2 7 2. + 0 + -0.0133420499041677 + 0.1444787979125977 + -0.0224946402013302 + <_> + + <_> + + + + <_>5 9 10 6 -1. + <_>5 9 5 3 2. + <_>10 12 5 3 2. + 0 + -0.0306130591779947 + -0.3359009027481079 + 0.0244337096810341 + <_> + + <_> + + + + <_>12 5 5 9 -1. + <_>12 8 5 3 3. + 0 + -0.0190187506377697 + 0.1551811993122101 + -0.0256136301904917 + <_> + + <_> + + + + <_>3 5 5 9 -1. + <_>3 8 5 3 3. + 0 + -0.0452018082141876 + 0.4873081147670746 + -0.0176416598260403 + <_> + + <_> + + + + <_>5 5 11 6 -1. + <_>5 8 11 3 2. + 0 + 0.0634325966238976 + -0.0519468188285828 + 0.1236144006252289 + <_> + + <_> + + + + <_>4 0 4 7 -1. + <_>6 0 2 7 2. + 0 + 3.4017860889434814e-003 + -0.1703003048896790 + 0.0541434101760387 + <_> + + <_> + + + + <_>1 8 18 5 -1. + <_>7 8 6 5 3. + 0 + -0.0853070765733719 + -0.7187842726707459 + 0.0103922598063946 + <_> + + <_> + + + + <_>1 3 18 7 -1. + <_>7 3 6 7 3. + 0 + -0.0530664995312691 + 0.5235915780067444 + -0.0183697603642941 + <_> + + <_> + + + + <_>7 11 7 8 -1. + <_>7 15 7 4 2. + 0 + -0.0283193700015545 + -0.1197988986968994 + 0.0589515492320061 + <_> + + <_> + + + + <_>4 14 12 6 -1. + <_>10 14 6 6 2. + 0 + -0.0873538032174110 + 0.2708908021450043 + -0.0293453298509121 + <_> + + <_> + + + + <_>5 6 11 9 -1. + <_>5 9 11 3 3. + 0 + 0.2715223133563995 + -0.0116485897451639 + 0.5584297776222229 + <_> + + <_> + + + + <_>7 12 4 8 -1. + <_>7 16 4 4 2. + 0 + 0.0193884801119566 + 0.0508955903351307 + -0.1796227991580963 + <_> + + <_> + + + + <_>9 14 10 6 -1. + <_>14 14 5 3 2. + <_>9 17 5 3 2. + 0 + 0.0211591795086861 + -0.0484248995780945 + 0.0950202569365501 + <_> + + <_> + + + + <_>6 5 7 6 -1. + <_>6 8 7 3 2. + 0 + 0.1203925013542175 + 9.2587787657976151e-003 + -0.8780462145805359 + <_> + + <_> + + + + <_>13 9 4 7 -1. + <_>13 9 2 7 2. + 0 + 0.0500907190144062 + -0.0219269506633282 + 0.2020203024148941 + <_> + + <_> + + + + <_>3 9 4 7 -1. + <_>5 9 2 7 2. + 0 + -5.5227670818567276e-003 + 0.2156028002500534 + -0.0365547798573971 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.0275514405220747 + -0.0327820181846619 + 0.1650391966104507 + <_> + + <_> + + + + <_>2 10 8 10 -1. + <_>6 10 4 10 2. + 0 + -0.0255431905388832 + -0.3642446100711823 + 0.0212752092629671 + <_> + + <_> + + + + <_>8 4 12 16 -1. + <_>14 4 6 8 2. + <_>8 12 6 8 2. + 0 + -0.2679182887077332 + 0.4852527081966400 + -4.7535290941596031e-003 + <_> + + <_> + + + + <_>0 4 12 16 -1. + <_>0 4 6 8 2. + <_>6 12 6 8 2. + 0 + -0.1679811030626297 + 0.3928064107894898 + -0.0194149892777205 + <_> + + <_> + + + + <_>8 4 6 7 -1. + <_>10 4 2 7 3. + 0 + 0.0459003485739231 + -0.0367061607539654 + 0.2067760974168778 + <_> + + <_> + + + + <_>8 6 4 14 -1. + <_>8 6 2 7 2. + <_>10 13 2 7 2. + 0 + 3.6797890788875520e-004 + -0.0870399028062820 + 0.0928309708833694 + <_> + + <_> + + + + <_>5 2 10 18 -1. + <_>10 2 5 9 2. + <_>5 11 5 9 2. + 0 + -0.0991945564746857 + -0.3609667122364044 + 0.0219627693295479 + <_> + + <_> + + + + <_>6 11 7 6 -1. + <_>6 13 7 2 3. + 0 + 8.0924080975819379e-005 + -0.0790076926350594 + 0.0959040671586990 + <_> + + <_> + + + + <_>9 4 5 12 -1. + <_>9 10 5 6 2. + 0 + 7.0894961245357990e-003 + 0.0370760783553123 + -0.0509171113371849 + <_> + + <_> + + + + <_>0 11 7 4 -1. + <_>0 13 7 2 2. + 0 + -1.2181960046291351e-003 + 0.0490940287709236 + -0.1597597002983093 + <_> + + <_> + + + + <_>1 5 19 15 -1. + <_>1 10 19 5 3. + 0 + -0.0921386629343033 + 0.5528473258018494 + -0.0135958604514599 + <_> + + <_> + + + + <_>0 15 7 4 -1. + <_>0 17 7 2 2. + 0 + 6.2209279276430607e-003 + 0.0468891896307468 + -0.1810580044984818 + <_> + + <_> + + + + <_>6 0 10 6 -1. + <_>11 0 5 3 2. + <_>6 3 5 3 2. + 0 + 0.0650148391723633 + 9.4407051801681519e-003 + -0.5122401714324951 + <_> + + <_> + + + + <_>4 0 10 6 -1. + <_>4 0 5 3 2. + <_>9 3 5 3 2. + 0 + 0.0540559217333794 + 0.0162890590727329 + -0.4268450140953064 + -1.4526200294494629 + 35 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.0375940799713135 + -0.1595308035612106 + 0.2424535006284714 + <_> + + <_> + + + + <_>11 7 7 6 -1. + <_>11 9 7 2 3. + 0 + 4.0349629707634449e-003 + -0.2561712861061096 + 0.0804205611348152 + <_> + + <_> + + + + <_>4 6 12 5 -1. + <_>8 6 4 5 3. + 0 + 2.1681638900190592e-003 + -0.2808907032012940 + 0.0709036290645599 + <_> + + <_> + + + + <_>9 4 11 4 -1. + <_>9 6 11 2 2. + 0 + -7.4014628808072302e-006 + 0.0493261814117432 + -0.1968849003314972 + <_> + + <_> + + + + <_>2 1 6 10 -1. + <_>2 1 3 5 2. + <_>5 6 3 5 2. + 0 + -2.2384349722415209e-003 + 0.0686188563704491 + -0.2177533954381943 + <_> + + <_> + + + + <_>12 5 4 8 -1. + <_>12 9 4 4 2. + 0 + 2.9939650557935238e-003 + -0.2425770014524460 + 0.0297161303460598 + <_> + + <_> + + + + <_>0 5 18 8 -1. + <_>0 5 9 4 2. + <_>9 9 9 4 2. + 0 + 4.5135850086808205e-003 + 0.0894438698887825 + -0.1946154981851578 + <_> + + <_> + + + + <_>9 6 5 12 -1. + <_>9 12 5 6 2. + 0 + 3.8457550108432770e-003 + 0.0509358011186123 + -0.2772192955017090 + <_> + + <_> + + + + <_>0 12 13 2 -1. + <_>0 13 13 1 2. + 0 + 4.0572669240646064e-004 + -0.0855177417397499 + 0.1644628047943115 + <_> + + <_> + + + + <_>10 4 3 13 -1. + <_>11 4 1 13 3. + 0 + -7.0624578256683890e-006 + 0.0784544870257378 + -0.1239598020911217 + <_> + + <_> + + + + <_>7 3 3 14 -1. + <_>8 3 1 14 3. + 0 + -2.8428720543161035e-004 + 0.1077425032854080 + -0.1222200989723206 + <_> + + <_> + + + + <_>7 12 6 8 -1. + <_>9 12 2 8 3. + 0 + 7.3404680006206036e-003 + 0.0478371605277061 + -0.2444117069244385 + <_> + + <_> + + + + <_>4 5 4 12 -1. + <_>4 9 4 4 3. + 0 + 3.6235509905964136e-003 + -0.3153378963470459 + 0.0350668802857399 + <_> + + <_> + + + + <_>3 3 17 2 -1. + <_>3 4 17 1 2. + 0 + -1.5671759610995650e-003 + -0.1714708060026169 + 0.0651218369603157 + <_> + + <_> + + + + <_>2 0 15 6 -1. + <_>2 2 15 2 3. + 0 + 4.2834067717194557e-003 + -0.1319001019001007 + 0.0927091464400291 + <_> + + <_> + + + + <_>8 0 12 4 -1. + <_>8 0 6 4 2. + 0 + -8.9772082865238190e-003 + 0.1246948018670082 + -0.0281185004860163 + <_> + + <_> + + + + <_>1 10 10 6 -1. + <_>1 12 10 2 3. + 0 + 5.5919871665537357e-003 + 0.0486716218292713 + -0.2246021926403046 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + 0.0117823900654912 + 0.0310411099344492 + -0.2988210916519165 + <_> + + <_> + + + + <_>1 8 4 12 -1. + <_>3 8 2 12 2. + 0 + -5.5568912066519260e-003 + 0.1368910074234009 + -0.0771521925926209 + <_> + + <_> + + + + <_>4 15 15 5 -1. + <_>9 15 5 5 3. + 0 + 0.0171620491892099 + -0.0402986705303192 + 0.1123280003666878 + <_> + + <_> + + + + <_>0 1 14 3 -1. + <_>0 2 14 1 3. + 0 + 3.5631000064313412e-003 + 0.0560561008751392 + -0.1960884034633637 + <_> + + <_> + + + + <_>10 2 6 7 -1. + <_>12 2 2 7 3. + 0 + 0.0225866995751858 + 0.0112503003329039 + -0.5049077868461609 + <_> + + <_> + + + + <_>4 2 6 7 -1. + <_>6 2 2 7 3. + 0 + 2.6307879015803337e-003 + 0.0415282696485519 + -0.2218586057424545 + <_> + + <_> + + + + <_>6 12 8 6 -1. + <_>6 14 8 2 3. + 0 + -1.0008380049839616e-003 + 0.0596570596098900 + -0.1539579033851624 + <_> + + <_> + + + + <_>1 3 14 12 -1. + <_>1 7 14 4 3. + 0 + -7.1316999383270741e-003 + 0.1059068962931633 + -0.0897009521722794 + <_> + + <_> + + + + <_>4 15 15 5 -1. + <_>9 15 5 5 3. + 0 + -0.0616853609681129 + 0.1267784982919693 + -0.0227099694311619 + <_> + + <_> + + + + <_>1 15 15 5 -1. + <_>6 15 5 5 3. + 0 + 0.0131207099184394 + -0.0637312307953835 + 0.1584208011627197 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0326765999197960 + 0.0257242508232594 + -0.3340620100498200 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.1888677030801773 + -0.0171004105359316 + 0.5370013117790222 + <_> + + <_> + + + + <_>11 10 4 7 -1. + <_>11 10 2 7 2. + 0 + -1.6522880468983203e-004 + 0.0549085810780525 + -0.1160800009965897 + <_> + + <_> + + + + <_>5 10 4 7 -1. + <_>7 10 2 7 2. + 0 + -1.4789770357310772e-003 + 0.0776021927595139 + -0.1097119003534317 + <_> + + <_> + + + + <_>4 10 12 5 -1. + <_>8 10 4 5 3. + 0 + -0.0124412104487419 + -0.1409073024988174 + 0.0687325224280357 + <_> + + <_> + + + + <_>0 0 8 12 -1. + <_>0 0 4 6 2. + <_>4 6 4 6 2. + 0 + 0.0194579102098942 + -0.0372761785984039 + 0.2631987929344177 + <_> + + <_> + + + + <_>7 1 13 2 -1. + <_>7 2 13 1 2. + 0 + -2.9123809654265642e-003 + -0.1896034032106400 + 0.0293609201908112 + <_> + + <_> + + + + <_>2 5 14 2 -1. + <_>2 6 14 1 2. + 0 + -0.0238706991076469 + 0.2552874982357025 + -0.0312794111669064 + <_> + + <_> + + + + <_>14 0 3 14 -1. + <_>15 0 1 14 3. + 0 + -2.6912079192698002e-003 + -0.1443164944648743 + 0.0484987795352936 + <_> + + <_> + + + + <_>3 0 3 14 -1. + <_>4 0 1 14 3. + 0 + -1.7636029515415430e-003 + -0.1332864016294479 + 0.0542508289217949 + <_> + + <_> + + + + <_>14 0 6 13 -1. + <_>16 0 2 13 3. + 0 + -0.0188441798090935 + 0.1165309995412827 + -0.0380281507968903 + <_> + + <_> + + + + <_>0 0 6 13 -1. + <_>2 0 2 13 3. + 0 + 0.0387528501451015 + -0.0368112996220589 + 0.2100208997726440 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 9.4316434115171432e-003 + 0.0579645894467831 + -0.1834280043840408 + <_> + + <_> + + + + <_>0 3 4 7 -1. + <_>2 3 2 7 2. + 0 + -0.0117053799331188 + 0.1790505051612854 + -0.0497996509075165 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + -4.4072889722883701e-003 + -0.1981050074100494 + 0.0446087196469307 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -4.7192219644784927e-003 + -0.1830749958753586 + 0.0422521717846394 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -4.5182029716670513e-003 + 0.0955721512436867 + -0.0607994608581066 + <_> + + <_> + + + + <_>0 2 4 8 -1. + <_>0 6 4 4 2. + 0 + -5.4851798340678215e-003 + -0.1755612939596176 + 0.0400925390422344 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + -9.9079031497240067e-004 + -0.1397833973169327 + 0.0482529103755951 + <_> + + <_> + + + + <_>0 1 20 16 -1. + <_>0 1 10 8 2. + <_>10 9 10 8 2. + 0 + -5.0425329245626926e-003 + -0.0886258333921433 + 0.0797940269112587 + <_> + + <_> + + + + <_>7 1 10 16 -1. + <_>12 1 5 8 2. + <_>7 9 5 8 2. + 0 + -6.3926707953214645e-003 + 0.0358549095690250 + -0.0850307121872902 + <_> + + <_> + + + + <_>0 1 16 14 -1. + <_>0 1 8 7 2. + <_>8 8 8 7 2. + 0 + -0.0114088095724583 + 0.0777561068534851 + -0.1020037978887558 + <_> + + <_> + + + + <_>9 5 10 6 -1. + <_>14 5 5 3 2. + <_>9 8 5 3 2. + 0 + 0.0592864491045475 + 6.4652841538190842e-003 + -0.4908235073089600 + <_> + + <_> + + + + <_>1 5 10 6 -1. + <_>1 5 5 3 2. + <_>6 8 5 3 2. + 0 + -5.7389298453927040e-003 + -0.1622118949890137 + 0.0595417916774750 + <_> + + <_> + + + + <_>4 5 13 2 -1. + <_>4 6 13 1 2. + 0 + 4.4626160524785519e-003 + -0.0246593896299601 + 0.2850956022739410 + <_> + + <_> + + + + <_>0 4 10 4 -1. + <_>0 6 10 2 2. + 0 + -7.4683688580989838e-004 + 0.0551594309508801 + -0.1451026946306229 + <_> + + <_> + + + + <_>10 0 4 8 -1. + <_>10 4 4 4 2. + 0 + 7.5665451586246490e-003 + -0.0305104404687881 + 0.0926857963204384 + <_> + + <_> + + + + <_>0 3 20 3 -1. + <_>0 4 20 1 3. + 0 + 0.0812033787369728 + 8.3315223455429077e-003 + -0.8862689137458801 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 2.5454829446971416e-003 + -0.0541312582790852 + 0.1655168980360031 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>0 4 6 3 3. + 0 + 0.0563191808760166 + 0.0157447494566441 + -0.4660595059394836 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + -0.0276709608733654 + 0.2791000902652741 + -0.0212675705552101 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>3 17 14 2 2. + 0 + 0.0574955493211746 + 0.0137654300779104 + -0.5688189268112183 + <_> + + <_> + + + + <_>12 12 7 6 -1. + <_>12 14 7 2 3. + 0 + 1.1847530258819461e-003 + 0.0634529665112495 + -0.1604492962360382 + <_> + + <_> + + + + <_>0 14 18 4 -1. + <_>0 14 9 2 2. + <_>9 16 9 2 2. + 0 + 4.2551690712571144e-003 + 0.0630177035927773 + -0.1358460932970047 + <_> + + <_> + + + + <_>14 4 4 9 -1. + <_>14 4 2 9 2. + 0 + -0.0211908593773842 + 0.1962350010871887 + -0.0282491296529770 + <_> + + <_> + + + + <_>0 4 6 8 -1. + <_>2 4 2 8 3. + 0 + 8.3922911435365677e-003 + -0.0620642490684986 + 0.1122507005929947 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>18 6 2 7 2. + <_>16 13 2 7 2. + 0 + -0.0355345793068409 + 0.1856577992439270 + -0.0210277102887630 + <_> + + <_> + + + + <_>1 10 5 9 -1. + <_>1 13 5 3 3. + 0 + -9.2783384025096893e-003 + -0.1625514030456543 + 0.0534937717020512 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>18 6 2 7 2. + <_>16 13 2 7 2. + 0 + -7.4480189941823483e-003 + 0.0560459792613983 + -0.0273571293801069 + <_> + + <_> + + + + <_>5 0 9 5 -1. + <_>8 0 3 5 3. + 0 + 0.0345736108720303 + 0.0278723295778036 + -0.2544369101524353 + <_> + + <_> + + + + <_>16 6 4 14 -1. + <_>18 6 2 7 2. + <_>16 13 2 7 2. + 0 + 0.0106442300602794 + -0.0250411499291658 + 0.1289550065994263 + <_> + + <_> + + + + <_>3 1 10 16 -1. + <_>3 1 5 8 2. + <_>8 9 5 8 2. + 0 + -6.9164121523499489e-003 + 0.0551454611122608 + -0.1428662985563278 + <_> + + <_> + + + + <_>2 12 18 4 -1. + <_>11 12 9 2 2. + <_>2 14 9 2 2. + 0 + 0.0404467284679413 + 4.3409019708633423e-003 + -0.3009513914585114 + <_> + + <_> + + + + <_>8 4 4 7 -1. + <_>10 4 2 7 2. + 0 + -0.0211822800338268 + 0.2398775070905685 + -0.0302679706364870 + <_> + + <_> + + + + <_>12 0 3 20 -1. + <_>13 0 1 20 3. + 0 + -0.0182786490768194 + -0.2802436947822571 + 0.0203522592782974 + <_> + + <_> + + + + <_>5 0 3 20 -1. + <_>6 0 1 20 3. + 0 + -6.0500060208141804e-003 + -0.1513808965682983 + 0.0458434186875820 + <_> + + <_> + + + + <_>11 13 9 7 -1. + <_>14 13 3 7 3. + 0 + -7.4632540345191956e-003 + 0.0730878263711929 + -0.0396451205015183 + <_> + + <_> + + + + <_>8 5 4 14 -1. + <_>8 5 2 7 2. + <_>10 12 2 7 2. + 0 + -0.0316406898200512 + 0.3854475915431976 + -0.0189876891672611 + <_> + + <_> + + + + <_>2 12 18 4 -1. + <_>11 12 9 2 2. + <_>2 14 9 2 2. + 0 + -0.0494887195527554 + -0.3745543956756592 + 4.6011591330170631e-003 + <_> + + <_> + + + + <_>0 12 18 4 -1. + <_>0 12 9 2 2. + <_>9 14 9 2 2. + 0 + -2.4384791031479836e-003 + -0.1086444035172463 + 0.0701712965965271 + <_> + + <_> + + + + <_>8 14 12 5 -1. + <_>12 14 4 5 3. + 0 + 7.4253929778933525e-003 + -0.0442232899367809 + 0.0756783708930016 + <_> + + <_> + + + + <_>0 14 12 5 -1. + <_>4 14 4 5 3. + 0 + -0.0535927414894104 + 0.1998178064823151 + -0.0380473807454109 + <_> + + <_> + + + + <_>6 8 14 3 -1. + <_>6 9 14 1 3. + 0 + -0.0215555801987648 + -0.5273768901824951 + 7.7934260480105877e-003 + <_> + + <_> + + + + <_>1 11 16 4 -1. + <_>1 11 8 2 2. + <_>9 13 8 2 2. + 0 + 4.1731819510459900e-003 + 0.0387420691549778 + -0.1694656014442444 + <_> + + <_> + + + + <_>13 10 6 10 -1. + <_>16 10 3 5 2. + <_>13 15 3 5 2. + 0 + 0.0418822802603245 + -0.0118538998067379 + 0.2923532128334045 + <_> + + <_> + + + + <_>0 5 20 12 -1. + <_>0 5 10 6 2. + <_>10 11 10 6 2. + 0 + -0.0220350697636604 + -0.1362926959991455 + 0.0473232194781303 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 1.6916249878704548e-003 + -0.0494619086384773 + 0.0740484818816185 + <_> + + <_> + + + + <_>1 18 15 2 -1. + <_>1 19 15 1 2. + 0 + -1.9994638860225677e-003 + 0.0930163934826851 + -0.0752305611968040 + <_> + + <_> + + + + <_>13 10 6 10 -1. + <_>16 10 3 5 2. + <_>13 15 3 5 2. + 0 + -8.7527623400092125e-003 + 0.0840763002634048 + -0.0377771891653538 + <_> + + <_> + + + + <_>0 14 20 6 -1. + <_>0 16 20 2 3. + 0 + 0.0281214397400618 + 0.0384716317057610 + -0.1903968006372452 + <_> + + <_> + + + + <_>13 10 6 10 -1. + <_>16 10 3 5 2. + <_>13 15 3 5 2. + 0 + 0.0247137695550919 + -0.0112256696447730 + 0.1340844035148621 + <_> + + <_> + + + + <_>3 0 13 2 -1. + <_>3 1 13 1 2. + 0 + 0.0217188205569983 + -0.0173614192754030 + 0.3487676978111267 + <_> + + <_> + + + + <_>0 7 20 3 -1. + <_>0 8 20 1 3. + 0 + -0.0432022996246815 + -0.5187743902206421 + 0.0129147097468376 + <_> + + <_> + + + + <_>2 5 10 8 -1. + <_>2 9 10 4 2. + 0 + -1.6658119857311249e-003 + -0.3072721958160400 + 0.0191040895879269 + <_> + + <_> + + + + <_>8 5 12 6 -1. + <_>8 8 12 3 2. + 0 + -0.0322691090404987 + 0.3182573020458221 + -6.1126789078116417e-003 + <_> + + <_> + + + + <_>0 5 11 6 -1. + <_>0 8 11 3 2. + 0 + -9.6689872443675995e-003 + 0.3318297863006592 + -0.0184094794094563 + <_> + + <_> + + + + <_>3 10 17 2 -1. + <_>3 11 17 1 2. + 0 + 1.7683519981801510e-003 + 0.0315872281789780 + -0.1148168966174126 + <_> + + <_> + + + + <_>1 10 6 10 -1. + <_>1 10 3 5 2. + <_>4 15 3 5 2. + 0 + 0.0346180386841297 + -0.0180139597505331 + 0.3466868996620178 + <_> + + <_> + + + + <_>1 0 18 3 -1. + <_>7 0 6 3 3. + 0 + -0.0936438962817192 + -0.5114368200302124 + 0.0142824603244662 + <_> + + <_> + + + + <_>3 12 14 4 -1. + <_>3 14 14 2 2. + 0 + 4.3095857836306095e-003 + 0.0244713891297579 + -0.2351769059896469 + <_> + + <_> + + + + <_>8 0 7 8 -1. + <_>8 4 7 4 2. + 0 + 0.0663119331002235 + -0.0157111398875713 + 0.2467675954103470 + <_> + + <_> + + + + <_>3 13 7 6 -1. + <_>3 15 7 2 3. + 0 + -9.2896772548556328e-003 + -0.1392403990030289 + 0.0488221496343613 + <_> + + <_> + + + + <_>9 7 3 13 -1. + <_>10 7 1 13 3. + 0 + -3.3214599825441837e-003 + 0.1337960958480835 + -0.0368186794221401 + <_> + + <_> + + + + <_>0 14 5 6 -1. + <_>0 17 5 3 2. + 0 + 0.0401809811592102 + -0.0127935204654932 + 0.5258095860481262 + <_> + + <_> + + + + <_>5 6 15 4 -1. + <_>10 6 5 4 3. + 0 + 0.0875909626483917 + 0.0125225996598601 + -0.5581073164939880 + <_> + + <_> + + + + <_>0 6 15 4 -1. + <_>5 6 5 4 3. + 0 + 0.0354752987623215 + 0.0231282804161310 + -0.2740291953086853 + <_> + + <_> + + + + <_>16 9 3 10 -1. + <_>16 14 3 5 2. + 0 + 0.0520337894558907 + -6.1640930362045765e-003 + 0.1905273050069809 + <_> + + <_> + + + + <_>1 0 8 15 -1. + <_>1 5 8 5 3. + 0 + -0.1304654926061630 + 0.2571254074573517 + -0.0235291905701160 + <_> + + <_> + + + + <_>14 0 4 13 -1. + <_>14 0 2 13 2. + 0 + 2.8882310725748539e-003 + -0.0607554093003273 + 0.0602434203028679 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 0.0150831602513790 + 0.0211921799927950 + -0.2847954034805298 + <_> + + <_> + + + + <_>4 0 15 2 -1. + <_>4 1 15 1 2. + 0 + 8.0875161802396178e-004 + -0.0854979008436203 + 0.0543055199086666 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0149478800594807 + -0.0579834505915642 + 0.1011572033166885 + <_> + + <_> + + + + <_>6 0 8 12 -1. + <_>10 0 4 6 2. + <_>6 6 4 6 2. + 0 + -0.0456835888326168 + -0.3934571146965027 + 0.0175566207617521 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + -9.4226107466965914e-004 + 0.1306409984827042 + -0.0516753196716309 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + -2.8342329896986485e-003 + 0.1599276065826416 + -0.0347878113389015 + <_> + + <_> + + + + <_>1 1 16 4 -1. + <_>1 1 8 2 2. + <_>9 3 8 2 2. + 0 + -0.0188129208981991 + -0.2980731129646301 + 0.0225360300391912 + <_> + + <_> + + + + <_>17 6 3 13 -1. + <_>18 6 1 13 3. + 0 + 0.0196015704423189 + 0.0134610999375582 + -0.1688593029975891 + <_> + + <_> + + + + <_>0 6 3 13 -1. + <_>1 6 1 13 3. + 0 + -0.0649295896291733 + -0.7119876146316528 + 8.5184276103973389e-003 + <_> + + <_> + + + + <_>9 2 6 14 -1. + <_>12 2 3 7 2. + <_>9 9 3 7 2. + 0 + -0.0142839998006821 + -0.0786023214459419 + 0.0422263592481613 + <_> + + <_> + + + + <_>7 6 4 7 -1. + <_>9 6 2 7 2. + 0 + 0.0251059196889400 + -0.0297449491918087 + 0.2258692979812622 + <_> + + <_> + + + + <_>6 8 8 12 -1. + <_>10 8 4 6 2. + <_>6 14 4 6 2. + 0 + 0.0384596697986126 + 0.0175929591059685 + -0.3445731103420258 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 2.9701360035687685e-003 + -0.0529142096638680 + 0.1156746000051498 + <_> + + <_> + + + + <_>2 13 16 3 -1. + <_>2 14 16 1 3. + 0 + -3.5584170836955309e-003 + 0.1295776069164276 + -0.0617142990231514 + <_> + + <_> + + + + <_>6 8 8 10 -1. + <_>6 8 4 5 2. + <_>10 13 4 5 2. + 0 + 5.5475500412285328e-003 + 0.0491682998836041 + -0.1292542964220047 + <_> + + <_> + + + + <_>5 3 12 3 -1. + <_>5 3 6 3 2. + 0 + 0.0713798627257347 + -0.0115281995385885 + 0.3242335915565491 + <_> + + <_> + + + + <_>8 0 4 18 -1. + <_>8 6 4 6 3. + 0 + -0.1173198968172073 + -0.9018443822860718 + 6.3025541603565216e-003 + <_> + + <_> + + + + <_>9 8 3 12 -1. + <_>9 14 3 6 2. + 0 + 0.0229319296777248 + -0.0114254197105765 + 0.4116899073123932 + <_> + + <_> + + + + <_>7 7 3 10 -1. + <_>7 12 3 5 2. + 0 + 3.6658400204032660e-003 + 0.0280305705964565 + -0.2056798934936523 + <_> + + <_> + + + + <_>10 5 7 6 -1. + <_>10 7 7 2 3. + 0 + -0.0707960724830627 + -0.2181712985038757 + 0.0128206498920918 + <_> + + <_> + + + + <_>0 6 4 14 -1. + <_>0 6 2 7 2. + <_>2 13 2 7 2. + 0 + 6.7239440977573395e-003 + -0.0423051603138447 + 0.1415031999349594 + <_> + + <_> + + + + <_>13 10 6 5 -1. + <_>13 10 3 5 2. + 0 + -2.0242671016603708e-003 + 0.0919769629836082 + -0.0468150712549686 + <_> + + <_> + + + + <_>1 10 6 5 -1. + <_>4 10 3 5 2. + 0 + 2.3123170249164104e-003 + -0.0710742026567459 + 0.0986173003911972 + <_> + + <_> + + + + <_>14 10 4 7 -1. + <_>14 10 2 7 2. + 0 + 2.7525359764695168e-003 + -0.0507856681942940 + 0.0752821266651154 + <_> + + <_> + + + + <_>1 12 6 5 -1. + <_>4 12 3 5 2. + 0 + -3.4460208844393492e-003 + 0.0963684767484665 + -0.0780517831444740 + <_> + + <_> + + + + <_>6 6 8 12 -1. + <_>6 12 8 6 2. + 0 + -0.0114164697006345 + -0.1131334975361824 + 0.0750808566808701 + <_> + + <_> + + + + <_>0 8 14 3 -1. + <_>0 9 14 1 3. + 0 + 3.0283999876701273e-005 + -0.1388618946075440 + 0.0437611490488052 + <_> + + <_> + + + + <_>8 11 6 6 -1. + <_>8 14 6 3 2. + 0 + -1.4150349888950586e-003 + 0.0371646210551262 + -0.1109559983015060 + <_> + + <_> + + + + <_>6 1 8 12 -1. + <_>6 7 8 6 2. + 0 + -1.9245060393586755e-003 + 0.0706045925617218 + -0.0942690595984459 + <_> + + <_> + + + + <_>2 0 16 8 -1. + <_>2 4 16 4 2. + 0 + 0.0300316493958235 + -0.0514077395200729 + 0.1633756011724472 + <_> + + <_> + + + + <_>1 0 17 3 -1. + <_>1 1 17 1 3. + 0 + -2.5132829323410988e-003 + -0.1493352055549622 + 0.0517498403787613 + <_> + + <_> + + + + <_>5 13 13 2 -1. + <_>5 14 13 1 2. + 0 + 1.9437290029600263e-004 + -0.0485539697110653 + 0.1056274026632309 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 2.9679399449378252e-003 + 0.0366641692817211 + -0.1565002053976059 + <_> + + <_> + + + + <_>13 12 7 6 -1. + <_>13 14 7 2 3. + 0 + 3.2629880588501692e-003 + 0.0429340004920959 + -0.1451455950737000 + <_> + + <_> + + + + <_>8 0 3 13 -1. + <_>9 0 1 13 3. + 0 + 2.9959511011838913e-003 + -0.0638218224048615 + 0.0935147777199745 + <_> + + <_> + + + + <_>13 12 7 6 -1. + <_>13 14 7 2 3. + 0 + -0.0154831903055310 + -0.2018454968929291 + 0.0311913806945086 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -0.0239565595984459 + 0.3611640930175781 + -0.0246982406824827 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0171362701803446 + -0.2625209093093872 + 0.0246162693947554 + <_> + + <_> + + + + <_>0 14 9 6 -1. + <_>3 14 3 6 3. + 0 + -6.2233610078692436e-003 + 0.1105912998318672 + -0.0579471997916698 + <_> + + <_> + + + + <_>13 12 7 6 -1. + <_>13 14 7 2 3. + 0 + 0.0298785194754601 + 7.8794546425342560e-003 + -0.2850458920001984 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -9.6910241991281509e-003 + -0.1569641977548599 + 0.0382633917033672 + <_> + + <_> + + + + <_>6 0 8 12 -1. + <_>6 4 8 4 3. + 0 + -0.1282542049884796 + 0.2835075855255127 + -0.0272243507206440 + <_> + + <_> + + + + <_>0 1 13 2 -1. + <_>0 2 13 1 2. + 0 + -3.9670959813520312e-004 + -0.1331633031368256 + 0.0538969412446022 + <_> + + <_> + + + + <_>15 1 3 13 -1. + <_>16 1 1 13 3. + 0 + -8.2217011367902160e-004 + -0.1368017941713333 + 0.0779573395848274 + <_> + + <_> + + + + <_>2 1 3 13 -1. + <_>3 1 1 13 3. + 0 + 7.4795359978452325e-005 + -0.0904964432120323 + 0.0685281604528427 + <_> + + <_> + + + + <_>4 4 12 4 -1. + <_>8 4 4 4 3. + 0 + 9.3816556036472321e-003 + -0.0991845801472664 + 0.0640786513686180 + <_> + + <_> + + + + <_>1 0 18 4 -1. + <_>7 0 6 4 3. + 0 + -6.6485297866165638e-003 + 0.1478358060121536 + -0.0469883307814598 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -5.5821631103754044e-003 + -0.1356212049722672 + 0.0553083904087543 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -0.0302247591316700 + 0.3476066887378693 + -0.0166988391429186 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -0.0275069493800402 + 0.2803105115890503 + -0.0101234903559089 + <_> + + <_> + + + + <_>5 2 3 18 -1. + <_>6 2 1 18 3. + 0 + 0.0150439301505685 + 0.0152790797874331 + -0.3950695991516113 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + 9.2139653861522675e-003 + 0.0266784094274044 + -0.1425559073686600 + <_> + + <_> + + + + <_>0 10 20 3 -1. + <_>0 11 20 1 3. + 0 + 0.0639555826783180 + 6.2569188885390759e-003 + -0.8807666897773743 + <_> + + <_> + + + + <_>7 10 13 3 -1. + <_>7 11 13 1 3. + 0 + 3.0171850085025653e-005 + -0.1104791983962059 + 0.0519368499517441 + <_> + + <_> + + + + <_>0 15 13 2 -1. + <_>0 16 13 1 2. + 0 + -5.1049161702394485e-003 + 0.2135072946548462 + -0.0278892703354359 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -9.1436346992850304e-003 + -0.1919710934162140 + 0.0303414594382048 + <_> + + <_> + + + + <_>3 7 12 5 -1. + <_>7 7 4 5 3. + 0 + -0.0767460465431213 + -0.7246891260147095 + 7.1879802271723747e-003 + <_> + + <_> + + + + <_>2 11 16 8 -1. + <_>10 11 8 4 2. + <_>2 15 8 4 2. + 0 + 0.0487805604934692 + -0.0214477796107531 + 0.3036446869373322 + <_> + + <_> + + + + <_>2 0 14 12 -1. + <_>2 6 14 6 2. + 0 + 0.4255141019821167 + 6.3504311256110668e-003 + -0.9478399157524109 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 14 4 4 2. + 0 + 2.2590209264308214e-003 + 0.0188931692391634 + -0.1944386959075928 + <_> + + <_> + + + + <_>5 11 10 6 -1. + <_>5 11 5 3 2. + <_>10 14 5 3 2. + 0 + -3.8309961091727018e-003 + -0.1281321942806244 + 0.0477487295866013 + <_> + + <_> + + + + <_>10 1 7 6 -1. + <_>10 3 7 2 3. + 0 + 7.5495108030736446e-003 + -0.0679828226566315 + 0.0764707997441292 + <_> + + <_> + + + + <_>5 3 10 6 -1. + <_>5 5 10 2 3. + 0 + 0.0147847300395370 + -0.0348850414156914 + 0.1793683022260666 + <_> + + <_> + + + + <_>4 6 12 3 -1. + <_>4 6 6 3 2. + 0 + 0.0567626394331455 + 0.0128167895600200 + -0.4810582995414734 + <_> + + <_> + + + + <_>1 4 14 3 -1. + <_>1 5 14 1 3. + 0 + -2.5854599662125111e-003 + 0.1265397071838379 + -0.0477618500590324 + <_> + + <_> + + + + <_>12 12 8 4 -1. + <_>12 12 4 4 2. + 0 + -5.5542518384754658e-003 + 0.0721269026398659 + -0.0386576615273952 + <_> + + <_> + + + + <_>0 12 8 4 -1. + <_>4 12 4 4 2. + 0 + 2.6672501116991043e-003 + -0.0614852607250214 + 0.1264784038066864 + <_> + + <_> + + + + <_>10 9 10 8 -1. + <_>10 9 5 8 2. + 0 + -0.2287995964288712 + -0.4843535125255585 + 4.5618140138685703e-003 + <_> + + <_> + + + + <_>0 9 10 8 -1. + <_>5 9 5 8 2. + 0 + 0.0378513298928738 + 0.0187695603817701 + -0.3080694973468781 + <_> + + <_> + + + + <_>3 4 14 3 -1. + <_>3 5 14 1 3. + 0 + 2.4275709874927998e-003 + -0.0715891718864441 + 0.0816945433616638 + <_> + + <_> + + + + <_>0 5 12 4 -1. + <_>0 7 12 2 2. + 0 + -7.9000797122716904e-003 + -0.1258932054042816 + 0.0474213100969791 + <_> + + <_> + + + + <_>7 1 8 12 -1. + <_>7 7 8 6 2. + 0 + -6.7925411276519299e-003 + 0.0617587305605412 + -0.0538035593926907 + <_> + + <_> + + + + <_>5 0 10 15 -1. + <_>10 0 5 15 2. + 0 + -0.1752236038446426 + 0.3372611105442047 + -0.0179619602859020 + <_> + + <_> + + + + <_>6 1 10 6 -1. + <_>11 1 5 3 2. + <_>6 4 5 3 2. + 0 + 0.0660339593887329 + 4.4206557795405388e-003 + -0.5581914782524109 + <_> + + <_> + + + + <_>4 1 10 6 -1. + <_>4 1 5 3 2. + <_>9 4 5 3 2. + 0 + 5.1699979230761528e-003 + 0.0533493012189865 + -0.1224528998136520 + <_> + + <_> + + + + <_>1 5 18 3 -1. + <_>7 5 6 3 3. + 0 + 0.1204798966646195 + -6.9788158871233463e-003 + 0.7934191226959229 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -4.2617730796337128e-003 + 0.0780141204595566 + -0.0682603865861893 + <_> + + <_> + + + + <_>11 8 4 12 -1. + <_>11 8 2 12 2. + 0 + 0.0306853707879782 + 9.3320813030004501e-003 + -0.2742024958133698 + <_> + + <_> + + + + <_>5 8 4 12 -1. + <_>7 8 2 12 2. + 0 + -6.8651121109724045e-003 + -0.1308497935533524 + 0.0472734086215496 + <_> + + <_> + + + + <_>8 4 4 16 -1. + <_>10 4 2 8 2. + <_>8 12 2 8 2. + 0 + -3.9284229278564453e-003 + 0.1155371963977814 + -0.0550442896783352 + <_> + + <_> + + + + <_>8 6 4 14 -1. + <_>8 6 2 7 2. + <_>10 13 2 7 2. + 0 + -4.2112590745091438e-003 + 0.1373077929019928 + -0.0525143891572952 + <_> + + <_> + + + + <_>3 2 14 2 -1. + <_>3 3 14 1 2. + 0 + -7.6999869197607040e-003 + -0.3401119112968445 + 0.0174786802381277 + <_> + + <_> + + + + <_>3 0 13 9 -1. + <_>3 3 13 3 3. + 0 + -0.0118679096922278 + 0.2573117911815643 + -0.0256917700171471 + <_> + + <_> + + + + <_>3 5 17 6 -1. + <_>3 7 17 2 3. + 0 + 5.3619472309947014e-003 + 0.0119367800652981 + -0.2893005013465881 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -2.3130229674279690e-003 + -0.1082130968570709 + 0.0536407493054867 + <_> + + <_> + + + + <_>3 1 15 19 -1. + <_>8 1 5 19 3. + 0 + -0.2222287058830261 + 0.3165431022644043 + -0.0145423198118806 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 6.2593920156359673e-003 + 0.0377951711416245 + -0.1510069966316223 + <_> + + <_> + + + + <_>3 2 14 3 -1. + <_>3 2 7 3 2. + 0 + 3.4754760563373566e-003 + -0.0630474686622620 + 0.0850256830453873 + <_> + + <_> + + + + <_>3 6 10 3 -1. + <_>8 6 5 3 2. + 0 + -2.8249478782527149e-004 + -0.1144286990165710 + 0.0560414008796215 + <_> + + <_> + + + + <_>6 7 14 2 -1. + <_>6 8 14 1 2. + 0 + 4.8107700422406197e-004 + -0.0968984663486481 + 0.0283470507711172 + <_> + + <_> + + + + <_>2 4 15 3 -1. + <_>2 5 15 1 3. + 0 + 0.0241789594292641 + -0.0210330598056316 + 0.2562944889068604 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + 0.0295269601047039 + 0.0161225795745850 + -0.3447209000587463 + <_> + + <_> + + + + <_>2 14 7 6 -1. + <_>2 16 7 2 3. + 0 + -3.0501780565828085e-003 + -0.1363352984189987 + 0.0409837886691093 + <_> + + <_> + + + + <_>8 15 7 4 -1. + <_>8 17 7 2 2. + 0 + 1.0082300286740065e-003 + -0.0609270296990871 + 0.0407171994447708 + <_> + + <_> + + + + <_>0 12 20 6 -1. + <_>0 15 20 3 2. + 0 + -3.0384280253201723e-003 + 0.0618832781910896 + -0.0978871211409569 + <_> + + <_> + + + + <_>6 3 13 3 -1. + <_>6 4 13 1 3. + 0 + 3.2816259190440178e-003 + -0.0479506216943264 + 0.0626754015684128 + <_> + + <_> + + + + <_>1 5 17 12 -1. + <_>1 9 17 4 3. + 0 + 0.0131826102733612 + 0.2247623950242996 + -0.0256491694599390 + <_> + + <_> + + + + <_>6 11 13 3 -1. + <_>6 12 13 1 3. + 0 + -2.3278119042515755e-003 + 0.0737356022000313 + -0.0510238893330097 + <_> + + <_> + + + + <_>2 5 16 8 -1. + <_>2 9 16 4 2. + 0 + -0.0106955096125603 + -0.7562553882598877 + 7.3301601223647594e-003 + <_> + + <_> + + + + <_>9 5 5 14 -1. + <_>9 12 5 7 2. + 0 + 0.0780467465519905 + 1.8139410531148314e-003 + -0.6206793189048767 + <_> + + <_> + + + + <_>8 4 3 16 -1. + <_>9 4 1 16 3. + 0 + 0.0566783398389816 + 6.2128840945661068e-003 + -0.7820093035697937 + <_> + + <_> + + + + <_>3 4 14 6 -1. + <_>10 4 7 3 2. + <_>3 7 7 3 2. + 0 + 7.2442921809852123e-003 + -0.0488524697721004 + 0.1064454987645149 + <_> + + <_> + + + + <_>0 3 7 6 -1. + <_>0 5 7 2 3. + 0 + -0.0667543336749077 + -0.6479606032371521 + 8.7654050439596176e-003 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>10 5 6 3 2. + <_>4 8 6 3 2. + 0 + -0.0346626304090023 + 0.3329395949840546 + -0.0172860696911812 + <_> + + <_> + + + + <_>0 13 19 6 -1. + <_>0 15 19 2 3. + 0 + -0.0150847500190139 + -0.1269658058881760 + 0.0455076992511749 + <_> + + <_> + + + + <_>13 13 7 6 -1. + <_>13 15 7 2 3. + 0 + -0.0234217308461666 + -0.2527934014797211 + 0.0158189702779055 + <_> + + <_> + + + + <_>3 1 7 6 -1. + <_>3 3 7 2 3. + 0 + 0.0256893206387758 + -0.0371946282684803 + 0.1622316986322403 + <_> + + <_> + + + + <_>13 13 7 6 -1. + <_>13 15 7 2 3. + 0 + 6.3883140683174133e-003 + 0.0306170098483562 + -0.1369500011205673 + <_> + + <_> + + + + <_>1 3 8 10 -1. + <_>1 3 4 5 2. + <_>5 8 4 5 2. + 0 + -0.1051959022879601 + -0.8445348143577576 + 6.6635669209063053e-003 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 12 4 6 2. + 0 + 0.0187736693769693 + 4.6610347926616669e-003 + -0.1711551994085312 + <_> + + <_> + + + + <_>4 10 4 7 -1. + <_>6 10 2 7 2. + 0 + -1.3318320270627737e-003 + 0.0657804235816002 + -0.0872415676712990 + <_> + + <_> + + + + <_>8 0 9 14 -1. + <_>11 0 3 14 3. + 0 + -0.2141733020544052 + 0.4786663949489594 + -3.0801231041550636e-003 + <_> + + <_> + + + + <_>1 1 18 19 -1. + <_>7 1 6 19 3. + 0 + -0.5509787201881409 + -0.6363369822502136 + 8.8994754478335381e-003 + <_> + + <_> + + + + <_>8 5 8 9 -1. + <_>8 8 8 3 3. + 0 + -3.3415539655834436e-003 + 0.1284604072570801 + -0.0323170796036720 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + 1.0858159512281418e-003 + -0.1143805012106895 + 0.0470908693969250 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + 4.2784498073160648e-003 + 0.0438426993787289 + -0.0808566883206367 + <_> + + <_> + + + + <_>2 10 6 8 -1. + <_>4 10 2 8 3. + 0 + -2.0054390188306570e-003 + 0.1053237020969391 + -0.0508663281798363 + <_> + + <_> + + + + <_>12 13 7 6 -1. + <_>12 15 7 2 3. + 0 + -3.4336079843342304e-003 + -0.0799860432744026 + 0.0425702705979347 + <_> + + <_> + + + + <_>6 10 4 8 -1. + <_>6 14 4 4 2. + 0 + -1.2204749509692192e-003 + 0.0411629416048527 + -0.1337811052799225 + <_> + + <_> + + + + <_>10 9 6 10 -1. + <_>10 14 6 5 2. + 0 + -0.1344037950038910 + -0.5204458832740784 + 2.9635489918291569e-003 + <_> + + <_> + + + + <_>4 9 6 10 -1. + <_>4 14 6 5 2. + 0 + 0.0145818199962378 + -0.0190679691731930 + 0.4006566107273102 + <_> + + <_> + + + + <_>13 13 7 6 -1. + <_>13 15 7 2 3. + 0 + -2.8450360987335443e-003 + -0.0589987114071846 + 0.0317977517843246 + <_> + + <_> + + + + <_>1 13 7 6 -1. + <_>1 15 7 2 3. + 0 + 4.8618339933454990e-003 + 0.0397547595202923 + -0.1474187970161438 + <_> + + <_> + + + + <_>13 1 6 13 -1. + <_>13 1 3 13 2. + 0 + 5.6295008398592472e-003 + -0.0420948788523674 + 0.0413941293954849 + <_> + + <_> + + + + <_>3 3 13 3 -1. + <_>3 4 13 1 3. + 0 + -4.5936359092593193e-003 + 0.2075109928846359 + -0.0279093794524670 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -0.0306937396526337 + -0.3402904868125916 + 5.0333337858319283e-003 + <_> + + <_> + + + + <_>4 14 10 6 -1. + <_>4 14 5 3 2. + <_>9 17 5 3 2. + 0 + 3.1476689036935568e-004 + -0.0881188735365868 + 0.0633542910218239 + <_> + + <_> + + + + <_>11 1 4 14 -1. + <_>13 1 2 7 2. + <_>11 8 2 7 2. + 0 + -3.4313879441469908e-003 + 0.0590887703001499 + -0.0677735805511475 + <_> + + <_> + + + + <_>0 3 14 2 -1. + <_>0 4 14 1 2. + 0 + -3.4075058647431433e-004 + -0.0982687622308731 + 0.0587836988270283 + <_> + + <_> + + + + <_>7 0 6 6 -1. + <_>7 3 6 3 2. + 0 + -3.7829359062016010e-003 + 0.1784172058105469 + -0.0469121783971787 + <_> + + <_> + + + + <_>0 0 16 18 -1. + <_>0 6 16 6 3. + 0 + -0.0463220588862896 + -0.1630741059780121 + 0.0391919314861298 + <_> + + <_> + + + + <_>14 2 5 9 -1. + <_>14 5 5 3 3. + 0 + 0.0184713806957006 + 0.0159750394523144 + -0.2880870103836060 + <_> + + <_> + + + + <_>1 10 4 10 -1. + <_>1 15 4 5 2. + 0 + 9.0416809543967247e-003 + -0.0318158306181431 + 0.1639292985200882 + <_> + + <_> + + + + <_>16 6 2 14 -1. + <_>16 13 2 7 2. + 0 + -0.0313879400491714 + 0.1569631993770599 + -0.0153331495821476 + <_> + + <_> + + + + <_>2 6 2 14 -1. + <_>2 13 2 7 2. + 0 + -7.5614887464325875e-005 + 0.0745913535356522 + -0.0843595415353775 + <_> + + <_> + + + + <_>14 2 5 9 -1. + <_>14 5 5 3 3. + 0 + -0.0239393003284931 + -0.1160458996891975 + 0.0308687891811132 + <_> + + <_> + + + + <_>1 2 5 9 -1. + <_>1 5 5 3 3. + 0 + 2.2537580225616693e-003 + 0.0402619093656540 + -0.1660403013229370 + <_> + + <_> + + + + <_>8 4 9 9 -1. + <_>8 7 9 3 3. + 0 + -0.0533898100256920 + 0.1031889021396637 + -0.0208772402256727 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>4 5 6 3 2. + <_>10 8 6 3 2. + 0 + 5.6420508772134781e-003 + -0.0468395203351974 + 0.1163408979773521 + <_> + + <_> + + + + <_>13 4 3 16 -1. + <_>14 4 1 16 3. + 0 + 4.2355400510132313e-003 + 0.0256312508136034 + -0.0931935831904411 + <_> + + <_> + + + + <_>4 4 3 16 -1. + <_>5 4 1 16 3. + 0 + -0.0219292603433132 + -0.3514122068881989 + 0.0157040208578110 + <_> + + <_> + + + + <_>12 2 4 12 -1. + <_>12 6 4 4 3. + 0 + 0.0130507899448276 + -7.6834131032228470e-003 + 0.1309593021869659 + <_> + + <_> + + + + <_>6 0 2 14 -1. + <_>7 0 1 14 2. + 0 + 0.0224261097609997 + 6.3964631408452988e-003 + -0.8051313161849976 + <_> + + <_> + + + + <_>15 0 4 16 -1. + <_>15 8 4 8 2. + 0 + -0.0887556523084641 + 0.3932324945926666 + -0.0103654200211167 + <_> + + <_> + + + + <_>1 0 4 16 -1. + <_>1 8 4 8 2. + 0 + 0.0117682702839375 + -0.0752705633640289 + 0.0711832270026207 + <_> + + <_> + + + + <_>12 9 8 6 -1. + <_>12 11 8 2 3. + 0 + 0.0212215706706047 + 0.0240827705711126 + -0.1629267036914825 + <_> + + <_> + + + + <_>0 6 14 2 -1. + <_>7 6 7 2 2. + 0 + -0.0528876110911369 + 0.3323107957839966 + -0.0155480401590467 + <_> + + <_> + + + + <_>0 0 20 5 -1. + <_>0 0 10 5 2. + 0 + 0.2584776878356934 + 9.5278248190879822e-003 + -0.6377344727516174 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 0 6 3 2. + <_>10 3 6 3 2. + 0 + -2.8695159126073122e-003 + -0.0987199917435646 + 0.0552446506917477 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.1249269023537636 + 1.9365450134500861e-003 + -0.9999927282333374 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>0 0 4 4 2. + <_>4 4 4 4 2. + 0 + 0.0439007207751274 + -0.0163855701684952 + 0.3718385100364685 + <_> + + <_> + + + + <_>14 1 5 9 -1. + <_>14 4 5 3 3. + 0 + 5.2520469762384892e-003 + 0.0477582700550556 + -0.1346182972192764 + <_> + + <_> + + + + <_>1 6 18 2 -1. + <_>1 7 18 1 2. + 0 + -2.0031959284096956e-003 + 0.0835871025919914 + -0.0677505806088448 + <_> + + <_> + + + + <_>7 1 7 6 -1. + <_>7 3 7 2 3. + 0 + 4.4535310007631779e-003 + -0.0892024636268616 + 0.0467482581734657 + <_> + + <_> + + + + <_>1 2 18 10 -1. + <_>1 2 9 5 2. + <_>10 7 9 5 2. + 0 + 0.1517463028430939 + 5.6481529027223587e-003 + -0.8245043754577637 + <_> + + <_> + + + + <_>9 3 8 8 -1. + <_>13 3 4 4 2. + <_>9 7 4 4 2. + 0 + -0.0619922094047070 + -0.4333459138870239 + 5.3922580555081367e-003 + <_> + + <_> + + + + <_>3 1 12 4 -1. + <_>9 1 6 4 2. + 0 + -0.0930853486061096 + 0.5216910243034363 + -9.9382782354950905e-003 + <_> + + <_> + + + + <_>4 5 12 7 -1. + <_>8 5 4 7 3. + 0 + -4.9394429661333561e-003 + -0.2000413984060288 + 0.0277109798043966 + <_> + + <_> + + + + <_>5 9 9 5 -1. + <_>8 9 3 5 3. + 0 + -1.3681269483640790e-003 + 0.0850654169917107 + -0.0745429694652557 + <_> + + <_> + + + + <_>7 10 6 7 -1. + <_>9 10 2 7 3. + 0 + -2.7988219517283142e-004 + -0.0769876316189766 + 0.0689129382371902 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + -3.2129848841577768e-003 + 0.1594099998474121 + -0.0342215895652771 + <_> + + <_> + + + + <_>11 2 2 16 -1. + <_>11 2 1 16 2. + 0 + 0.0395333692431450 + 3.1095379963517189e-003 + -0.8546090722084045 + <_> + + <_> + + + + <_>2 13 9 7 -1. + <_>5 13 3 7 3. + 0 + 2.0442719105631113e-003 + -0.0640745535492897 + 0.0786447599530220 + <_> + + <_> + + + + <_>11 2 2 16 -1. + <_>11 2 1 16 2. + 0 + -0.0207707602530718 + -0.3112941086292267 + 4.3864948675036430e-003 + <_> + + <_> + + + + <_>0 9 18 11 -1. + <_>6 9 6 11 3. + 0 + -0.0472003817558289 + 0.1052689030766487 + -0.0514561310410500 + <_> + + <_> + + + + <_>11 2 2 16 -1. + <_>11 2 1 16 2. + 0 + 0.0130968699231744 + 9.9430568516254425e-003 + -0.1425368040800095 + <_> + + <_> + + + + <_>3 7 12 6 -1. + <_>7 7 4 6 3. + 0 + -0.0109353903681040 + -0.1675661057233810 + 0.0358635485172272 + <_> + + <_> + + + + <_>11 4 5 9 -1. + <_>11 7 5 3 3. + 0 + -0.1635434925556183 + -0.8212932944297791 + 1.9741130527108908e-003 + <_> + + <_> + + + + <_>4 4 5 9 -1. + <_>4 7 5 3 3. + 0 + 0.0386687181890011 + -0.0113296797499061 + 0.4753246009349823 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + 0.0609499588608742 + 0.0115165300667286 + -0.5747207999229431 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0121016902849078 + 0.1550561040639877 + -0.0326291583478451 + <_> + + <_> + + + + <_>14 1 5 9 -1. + <_>14 4 5 3 3. + 0 + -0.0100642703473568 + -0.0923895314335823 + 0.0323180593550205 + <_> + + <_> + + + + <_>7 2 2 16 -1. + <_>8 2 1 16 2. + 0 + -5.8900681324303150e-003 + -0.2650313079357147 + 0.0191271398216486 + <_> + + <_> + + + + <_>3 15 14 3 -1. + <_>3 16 14 1 3. + 0 + -0.0313610397279263 + 0.5673077106475830 + -9.6010044217109680e-003 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0477773211896420 + 0.5903866291046143 + -7.4091539718210697e-003 + <_> + + <_> + + + + <_>0 1 20 6 -1. + <_>10 1 10 3 2. + <_>0 4 10 3 2. + 0 + -0.0107922703027725 + -0.1281493008136749 + 0.0402649492025375 + <_> + + <_> + + + + <_>4 0 8 5 -1. + <_>8 0 4 5 2. + 0 + -0.0143741201609373 + 0.2077254056930542 + -0.0298549905419350 + <_> + + <_> + + + + <_>13 1 3 14 -1. + <_>14 1 1 14 3. + 0 + 0.0520798116922379 + -3.8335260469466448e-003 + 0.7581862807273865 + <_> + + <_> + + + + <_>4 1 3 14 -1. + <_>5 1 1 14 3. + 0 + 6.1354418285191059e-003 + 0.0304764509201050 + -0.1728169023990631 + <_> + + <_> + + + + <_>13 0 6 10 -1. + <_>16 0 3 5 2. + <_>13 5 3 5 2. + 0 + -3.0654598958790302e-003 + 0.0580253005027771 + -0.0796170383691788 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + 5.7721929624676704e-003 + -0.0367475189268589 + 0.1631979048252106 + <_> + + <_> + + + + <_>2 0 18 5 -1. + <_>8 0 6 5 3. + 0 + 0.2702847123146057 + -3.9847781881690025e-003 + 0.4947654008865356 + <_> + + <_> + + + + <_>0 0 18 5 -1. + <_>6 0 6 5 3. + 0 + -0.1503452956676483 + -0.5262491106987000 + 0.0105679100379348 + <_> + + <_> + + + + <_>11 1 4 14 -1. + <_>13 1 2 7 2. + <_>11 8 2 7 2. + 0 + 0.0761016011238098 + -2.3525250144302845e-003 + 0.9181998968124390 + <_> + + <_> + + + + <_>5 1 4 14 -1. + <_>5 1 2 7 2. + <_>7 8 2 7 2. + 0 + -0.0559538118541241 + -0.7832127213478088 + 6.8363421596586704e-003 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + -0.0243209507316351 + 0.2273961007595062 + -0.0116222901269794 + <_> + + <_> + + + + <_>0 7 13 3 -1. + <_>0 8 13 1 3. + 0 + 0.0162743199616671 + 0.0140241701155901 + -0.3422223925590515 + <_> + + <_> + + + + <_>16 1 3 13 -1. + <_>17 1 1 13 3. + 0 + 7.7015208080410957e-004 + -0.0447687096893787 + 0.0574122294783592 + <_> + + <_> + + + + <_>1 1 3 13 -1. + <_>2 1 1 13 3. + 0 + 1.3995269546285272e-003 + -0.0606142394244671 + 0.0843989998102188 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0205447692424059 + -0.1816041022539139 + 0.0207951199263334 + <_> + + <_> + + + + <_>2 12 5 8 -1. + <_>2 16 5 4 2. + 0 + -0.0368725508451462 + 0.2681722939014435 + -0.0199212692677975 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + -2.5466610677540302e-003 + -0.1336192935705185 + 0.0191919393837452 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + 0.0335135906934738 + 9.8206587135791779e-003 + -0.5265988707542419 + <_> + + <_> + + + + <_>6 9 9 4 -1. + <_>6 11 9 2 2. + 0 + -0.0554376617074013 + 0.4529249072074890 + -9.3475803732872009e-003 + <_> + + <_> + + + + <_>0 7 10 6 -1. + <_>0 7 5 3 2. + <_>5 10 5 3 2. + 0 + -5.3564338013529778e-003 + -0.1478758007287979 + 0.0336179509758949 + <_> + + <_> + + + + <_>15 4 5 16 -1. + <_>15 12 5 8 2. + 0 + 0.0115512004122138 + -0.0328510589897633 + 0.0637165978550911 + <_> + + <_> + + + + <_>4 0 9 9 -1. + <_>7 0 3 9 3. + 0 + 0.0729178264737129 + -0.0163887199014425 + 0.3158080875873566 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0895630121231079 + 0.7536656260490418 + -2.0717559382319450e-003 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -2.2225419525057077e-003 + -0.0927338525652885 + 0.0603958517313004 + <_> + + <_> + + + + <_>3 12 14 8 -1. + <_>3 12 7 8 2. + 0 + -0.1784711033105850 + 0.4798853099346161 + -0.0104815103113651 + <_> + + <_> + + + + <_>2 10 16 10 -1. + <_>2 10 8 5 2. + <_>10 15 8 5 2. + 0 + 6.7723011597990990e-003 + 0.0526608303189278 + -0.1047129034996033 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>10 5 6 3 2. + <_>4 8 6 3 2. + 0 + 0.0283991303294897 + -0.0228620003908873 + 0.2534813880920410 + <_> + + <_> + + + + <_>5 5 10 8 -1. + <_>5 5 5 4 2. + <_>10 9 5 4 2. + 0 + -7.0053818635642529e-003 + -0.1301700025796890 + 0.0434489212930202 + <_> + + <_> + + + + <_>5 6 10 6 -1. + <_>10 6 5 3 2. + <_>5 9 5 3 2. + 0 + -5.1440461538732052e-003 + -0.1480010002851486 + 0.0451716296374798 + <_> + + <_> + + + + <_>1 15 12 5 -1. + <_>5 15 4 5 3. + 0 + -0.0112690599635243 + 0.1118535995483398 + -0.0548670887947083 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + 0.0228661093860865 + -0.0155636901035905 + 0.2170549035072327 + <_> + + <_> + + + + <_>5 9 10 8 -1. + <_>5 9 5 4 2. + <_>10 13 5 4 2. + 0 + 0.0515592284500599 + 0.0104218097403646 + -0.5323324799537659 + <_> + + <_> + + + + <_>2 7 18 13 -1. + <_>8 7 6 13 3. + 0 + 0.0189020596444607 + -0.0308788698166609 + 0.0555744990706444 + <_> + + <_> + + + + <_>4 6 10 5 -1. + <_>9 6 5 5 2. + 0 + 5.5700382217764854e-003 + 0.0536613613367081 + -0.0948764979839325 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + -0.0230217296630144 + 0.1276624053716660 + -0.0223079100251198 + <_> + + <_> + + + + <_>3 0 6 8 -1. + <_>5 0 2 8 3. + 0 + 7.1334750391542912e-003 + 0.0310896895825863 + -0.1629343032836914 + <_> + + <_> + + + + <_>3 14 16 6 -1. + <_>3 14 8 6 2. + 0 + -0.0293352603912354 + 0.1050309017300606 + -0.0260085500776768 + <_> + + <_> + + + + <_>6 2 4 7 -1. + <_>8 2 2 7 2. + 0 + 0.0462532788515091 + 7.8362170606851578e-003 + -0.6622666120529175 + <_> + + <_> + + + + <_>4 9 14 3 -1. + <_>4 10 14 1 3. + 0 + 3.9622580516152084e-004 + -0.0945671275258064 + 0.0267968997359276 + <_> + + <_> + + + + <_>3 6 13 9 -1. + <_>3 9 13 3 3. + 0 + -0.0113237500190735 + 0.7431365251541138 + -6.7432140931487083e-003 + <_> + + <_> + + + + <_>7 0 6 18 -1. + <_>7 9 6 9 2. + 0 + -0.1721720993518829 + -0.7148349881172180 + 8.1747565418481827e-003 + <_> + + <_> + + + + <_>8 5 3 10 -1. + <_>8 10 3 5 2. + 0 + 1.8156579462811351e-003 + 0.0481357201933861 + -0.1067847013473511 + <_> + + <_> + + + + <_>3 3 16 4 -1. + <_>3 5 16 2 2. + 0 + 0.0580224916338921 + -7.4218288064002991e-003 + 0.3822644054889679 + <_> + + <_> + + + + <_>5 6 5 6 -1. + <_>5 9 5 3 2. + 0 + 1.4357370091602206e-003 + -0.2254288047552109 + 0.0215767193585634 + <_> + + <_> + + + + <_>4 6 12 6 -1. + <_>4 9 12 3 2. + 0 + 5.5960440076887608e-003 + 0.2573193013668060 + -0.0212465096265078 + <_> + + <_> + + + + <_>4 7 12 4 -1. + <_>4 9 12 2 2. + 0 + 2.5314849335700274e-003 + -0.3622772097587585 + 0.0151382600888610 + <_> + + <_> + + + + <_>8 9 9 4 -1. + <_>8 11 9 2 2. + 0 + -4.2207110673189163e-003 + -0.0466389916837215 + 0.0261255390942097 + <_> + + <_> + + + + <_>1 5 16 3 -1. + <_>1 6 16 1 3. + 0 + -5.4260431788861752e-003 + 0.1011037975549698 + -0.0520661212503910 + <_> + + <_> + + + + <_>5 5 13 3 -1. + <_>5 6 13 1 3. + 0 + 1.6170790186151862e-003 + -0.0416805408895016 + 0.0964593514800072 + <_> + + <_> + + + + <_>0 1 18 3 -1. + <_>0 2 18 1 3. + 0 + -3.2414530869573355e-003 + -0.1263868063688278 + 0.0391692109405994 + <_> + + <_> + + + + <_>9 2 6 10 -1. + <_>12 2 3 5 2. + <_>9 7 3 5 2. + 0 + 4.5421482063829899e-003 + -0.0291498806327581 + 0.0699488893151283 + <_> + + <_> + + + + <_>3 1 12 4 -1. + <_>7 1 4 4 3. + 0 + 5.3024510852992535e-003 + -0.0791290625929832 + 0.0611118599772453 + <_> + + <_> + + + + <_>9 2 6 10 -1. + <_>12 2 3 5 2. + <_>9 7 3 5 2. + 0 + -0.0464120805263519 + 0.3112744987010956 + -6.2580788508057594e-003 + <_> + + <_> + + + + <_>8 2 2 18 -1. + <_>8 11 2 9 2. + 0 + -6.2991487793624401e-003 + -0.0839281305670738 + 0.0667615309357643 + <_> + + <_> + + + + <_>9 2 6 10 -1. + <_>12 2 3 5 2. + <_>9 7 3 5 2. + 0 + 0.0799480900168419 + 2.6887101121246815e-003 + -0.5655370950698853 + <_> + + <_> + + + + <_>5 2 6 10 -1. + <_>5 2 3 5 2. + <_>8 7 3 5 2. + 0 + 9.9693494848906994e-004 + -0.0720510035753250 + 0.0922608971595764 + <_> + + <_> + + + + <_>4 9 12 4 -1. + <_>8 9 4 4 3. + 0 + -2.1847949828952551e-003 + 0.0838645175099373 + -0.0660996064543724 + <_> + + <_> + + + + <_>4 9 9 8 -1. + <_>4 13 9 4 2. + 0 + -0.1528684049844742 + 0.6170576810836792 + -8.1674018874764442e-003 + <_> + + <_> + + + + <_>1 15 19 4 -1. + <_>1 17 19 2 2. + 0 + 0.0171211306005716 + 0.0266764406114817 + -0.1415830999612808 + <_> + + <_> + + + + <_>5 15 7 4 -1. + <_>5 17 7 2 2. + 0 + 1.8799189710989594e-003 + -0.0778655633330345 + 0.0679552182555199 + <_> + + <_> + + + + <_>7 6 6 10 -1. + <_>9 6 2 10 3. + 0 + 5.5029629729688168e-003 + -0.0799798592925072 + 0.0640559569001198 + <_> + + <_> + + + + <_>0 7 20 6 -1. + <_>0 10 20 3 2. + 0 + 0.0274745505303144 + 0.0604827217757702 + -0.0889575481414795 + <_> + + <_> + + + + <_>7 0 12 10 -1. + <_>7 5 12 5 2. + 0 + 0.2770887911319733 + 4.4098719954490662e-003 + -1.0000040531158447 + <_> + + <_> + + + + <_>0 14 10 6 -1. + <_>0 14 5 3 2. + <_>5 17 5 3 2. + 0 + -4.9538668245077133e-003 + 0.1472094058990479 + -0.0356715694069862 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + 0.0470953695476055 + -6.0950522311031818e-003 + 0.2431958019733429 + <_> + + <_> + + + + <_>0 8 5 9 -1. + <_>0 11 5 3 3. + 0 + -3.1939700711518526e-003 + -0.1341758072376251 + 0.0393355116248131 + <_> + + <_> + + + + <_>15 11 5 9 -1. + <_>15 14 5 3 3. + 0 + 3.5586568992584944e-003 + 0.0213994700461626 + -0.0436098016798496 + <_> + + <_> + + + + <_>1 11 13 3 -1. + <_>1 12 13 1 3. + 0 + -0.0100286398082972 + 0.1628888994455338 + -0.0314484499394894 + <_> + + <_> + + + + <_>15 11 5 9 -1. + <_>15 14 5 3 3. + 0 + -2.9802629724144936e-003 + -0.0702208578586578 + 0.0379107892513275 + <_> + + <_> + + + + <_>0 12 20 2 -1. + <_>0 13 20 1 2. + 0 + 0.0173475295305252 + 0.0110539598390460 + -0.4510779082775116 + <_> + + <_> + + + + <_>15 11 5 9 -1. + <_>15 14 5 3 3. + 0 + -0.0442071296274662 + 0.1411532014608383 + -6.2362072058022022e-003 + <_> + + <_> + + + + <_>0 11 5 9 -1. + <_>0 14 5 3 3. + 0 + -3.2249989453703165e-003 + -0.1030576005578041 + 0.0496478490531445 + <_> + + <_> + + + + <_>13 0 3 10 -1. + <_>13 5 3 5 2. + 0 + 7.5196991674602032e-003 + -0.0286043900996447 + 0.0983678027987480 + <_> + + <_> + + + + <_>3 0 13 18 -1. + <_>3 9 13 9 2. + 0 + -0.0612094588577747 + 0.2211385965347290 + -0.0298354905098677 + <_> + + <_> + + + + <_>12 5 3 14 -1. + <_>12 12 3 7 2. + 0 + 0.0201072506606579 + 0.0164124798029661 + -0.1231682971119881 + <_> + + <_> + + + + <_>5 5 3 14 -1. + <_>5 12 3 7 2. + 0 + -0.0165786799043417 + -0.2339563071727753 + 0.0302506908774376 + <_> + + <_> + + + + <_>2 8 16 10 -1. + <_>10 8 8 5 2. + <_>2 13 8 5 2. + 0 + -0.0609008707106113 + 0.3168857097625732 + -0.0184332001954317 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>10 5 2 7 2. + 0 + 4.2772209271788597e-003 + -0.0438594482839108 + 0.1285876035690308 + <_> + + <_> + + + + <_>6 3 12 9 -1. + <_>10 3 4 9 3. + 0 + 0.0661306977272034 + 0.0209411904215813 + -0.2054910063743591 + <_> + + <_> + + + + <_>4 5 6 5 -1. + <_>7 5 3 5 2. + 0 + 2.5896991137415171e-003 + -0.0825973227620125 + 0.0770487263798714 + <_> + + <_> + + + + <_>5 1 12 8 -1. + <_>11 1 6 4 2. + <_>5 5 6 4 2. + 0 + -0.0171137005090714 + -0.0995602011680603 + 0.0201742798089981 + <_> + + <_> + + + + <_>5 6 6 10 -1. + <_>5 6 3 5 2. + <_>8 11 3 5 2. + 0 + 6.2078679911792278e-003 + -0.0150742400437593 + 0.3539369106292725 + <_> + + <_> + + + + <_>2 10 18 9 -1. + <_>2 10 9 9 2. + 0 + -0.3367694914340973 + -0.4983867108821869 + 7.4067250825464725e-003 + <_> + + <_> + + + + <_>5 0 10 4 -1. + <_>5 2 10 2 2. + 0 + 0.0502393804490566 + -0.0185892395675182 + 0.2822335064411163 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + 0.0110363001003861 + 0.0296239592134953 + -0.2007879018783569 + <_> + + <_> + + + + <_>0 12 18 3 -1. + <_>6 12 6 3 3. + 0 + 0.0609650202095509 + -0.0110364602878690 + 0.5033451914787293 + <_> + + <_> + + + + <_>4 1 14 3 -1. + <_>4 2 14 1 3. + 0 + 0.0159665904939175 + 0.0139418700709939 + -0.2474247068166733 + -1.3073990345001221 + 36 + -1 + <_> + + + <_> + + <_> + + + + <_>4 5 8 8 -1. + <_>4 5 4 4 2. + <_>8 9 4 4 2. + 0 + -0.0388294197618961 + 0.3182382881641388 + -0.1406200975179672 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>4 7 12 2 3. + 0 + -0.0677713006734848 + 0.2052696943283081 + -0.1786746978759766 + <_> + + <_> + + + + <_>0 1 10 4 -1. + <_>5 1 5 4 2. + 0 + 0.0931529402732849 + -0.1329381018877029 + 0.2325212061405182 + <_> + + <_> + + + + <_>4 18 13 2 -1. + <_>4 19 13 1 2. + 0 + -6.0846367850899696e-003 + 0.1981765031814575 + -0.1553514003753662 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0172301493585110 + 0.2578431069850922 + -0.0903873667120934 + <_> + + <_> + + + + <_>2 4 16 10 -1. + <_>10 4 8 5 2. + <_>2 9 8 5 2. + 0 + 0.0419077984988689 + 0.0620661489665508 + -0.3230313956737518 + <_> + + <_> + + + + <_>0 2 16 2 -1. + <_>0 3 16 1 2. + 0 + -3.4084350336343050e-003 + -0.3166790902614594 + 0.0602750405669212 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0349092893302441 + -0.1245630979537964 + 0.1609985977411270 + <_> + + <_> + + + + <_>3 11 6 7 -1. + <_>5 11 2 7 3. + 0 + 0.0116769000887871 + -0.1802566051483154 + 0.1223443001508713 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + -1.2773449998348951e-003 + -0.2473558038473129 + 0.0621297396719456 + <_> + + <_> + + + + <_>3 1 10 6 -1. + <_>3 1 5 3 2. + <_>8 4 5 3 2. + 0 + 0.0169172994792461 + 0.0696710422635078 + -0.2529258131980896 + <_> + + <_> + + + + <_>12 9 5 9 -1. + <_>12 12 5 3 3. + 0 + 0.0256566405296326 + 0.0262125805020332 + -0.1634899973869324 + <_> + + <_> + + + + <_>6 3 4 7 -1. + <_>8 3 2 7 2. + 0 + 1.9884048961102962e-003 + -0.3101851046085358 + 0.0502592511475086 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + 0.0425484888255596 + 0.0170658193528652 + -0.4783062040805817 + <_> + + <_> + + + + <_>1 4 4 12 -1. + <_>1 8 4 4 3. + 0 + 6.0466718859970570e-003 + -0.2211804986000061 + 0.0728424116969109 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -8.0229081213474274e-003 + -0.1453005969524384 + 0.0499062612652779 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>10 6 2 7 2. + 0 + 0.0379372611641884 + -0.0340077802538872 + 0.4371533095836639 + <_> + + <_> + + + + <_>10 8 8 8 -1. + <_>14 8 4 4 2. + <_>10 12 4 4 2. + 0 + -0.0529602989554405 + -0.2885659039020538 + 0.0184572096914053 + <_> + + <_> + + + + <_>1 7 15 3 -1. + <_>6 7 5 3 3. + 0 + 7.5578060932457447e-003 + -0.2353460043668747 + 0.0603025704622269 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0155549803748727 + -0.2656773030757904 + 0.0552793703973293 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 14 8 4 2. + 0 + 3.4035260323435068e-003 + 0.0461758896708488 + -0.3365189135074616 + <_> + + <_> + + + + <_>3 5 14 3 -1. + <_>3 6 14 1 3. + 0 + -0.0193702708929777 + 0.1960383951663971 + -0.0801868289709091 + <_> + + <_> + + + + <_>2 10 7 6 -1. + <_>2 12 7 2 3. + 0 + 0.0217195693403482 + 0.0419320799410343 + -0.3432759046554565 + <_> + + <_> + + + + <_>8 6 7 8 -1. + <_>8 10 7 4 2. + 0 + -3.8787510129623115e-004 + -0.2538223862648010 + 0.0452007800340652 + <_> + + <_> + + + + <_>0 2 4 7 -1. + <_>2 2 2 7 2. + 0 + 0.0337945595383644 + -0.0649015605449677 + 0.2123865932226181 + <_> + + <_> + + + + <_>4 1 14 3 -1. + <_>4 2 14 1 3. + 0 + -9.1701336205005646e-003 + -0.2387458980083466 + 0.0407963804900646 + <_> + + <_> + + + + <_>2 3 13 2 -1. + <_>2 4 13 1 2. + 0 + -1.3741330476477742e-003 + -0.1643002033233643 + 0.0814962834119797 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + -0.0123527199029922 + 0.1680507063865662 + -0.0578839704394341 + <_> + + <_> + + + + <_>2 1 16 4 -1. + <_>2 1 8 2 2. + <_>10 3 8 2 2. + 0 + -0.0111777000129223 + -0.1977586001157761 + 0.0634087026119232 + <_> + + <_> + + + + <_>9 0 8 6 -1. + <_>9 2 8 2 3. + 0 + 2.5044390931725502e-003 + -0.1290045976638794 + 0.0589736104011536 + <_> + + <_> + + + + <_>3 9 6 8 -1. + <_>6 9 3 8 2. + 0 + 2.1939110010862350e-003 + 0.1493715941905975 + -0.0798972919583321 + <_> + + <_> + + + + <_>12 10 8 6 -1. + <_>12 12 8 2 3. + 0 + -0.0464434996247292 + -0.4433234930038452 + 0.0206913594156504 + <_> + + <_> + + + + <_>4 10 6 5 -1. + <_>7 10 3 5 2. + 0 + -0.0388673096895218 + -0.5345087051391602 + 0.0214356500655413 + <_> + + <_> + + + + <_>7 6 8 8 -1. + <_>11 6 4 4 2. + <_>7 10 4 4 2. + 0 + -2.0838780328631401e-003 + 0.0538762398064137 + -0.1667453050613403 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>7 5 3 5 2. + <_>10 10 3 5 2. + 0 + -0.0177849698811769 + 0.2589834928512573 + -0.0657944232225418 + <_> + + <_> + + + + <_>10 4 10 4 -1. + <_>10 6 10 2 2. + 0 + -0.0994784608483315 + -0.7233209013938904 + 6.1601991765201092e-003 + <_> + + <_> + + + + <_>0 4 10 4 -1. + <_>0 6 10 2 2. + 0 + -2.5733250658959150e-003 + 0.0720276534557343 + -0.1752230972051621 + <_> + + <_> + + + + <_>4 2 14 6 -1. + <_>4 5 14 3 2. + 0 + 0.0699774399399757 + -0.0302383303642273 + 0.3980937898159027 + <_> + + <_> + + + + <_>0 2 13 3 -1. + <_>0 3 13 1 3. + 0 + -0.0108807804062963 + -0.3060626983642578 + 0.0452105589210987 + <_> + + <_> + + + + <_>4 9 12 5 -1. + <_>8 9 4 5 3. + 0 + 0.0480814017355442 + 0.0439110994338989 + -0.2568621933460236 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + 0.0796882435679436 + -0.0337416008114815 + 0.3653270006179810 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + -0.0154040204361081 + -0.1773145943880081 + 0.0238007307052612 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + -0.0366438999772072 + -0.6393110752105713 + 0.0175186302512884 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0130725000053644 + -0.2411936074495316 + 0.0588769502937794 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -2.5379280559718609e-003 + -0.2050921022891998 + 0.0589157603681087 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + 0.0474912784993649 + 0.0228427797555923 + -0.3945347964763641 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -0.0214896406978369 + -0.3109112083911896 + 0.0380208715796471 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + 0.0138413300737739 + -0.0560395196080208 + 0.2130897939205170 + <_> + + <_> + + + + <_>4 15 12 5 -1. + <_>8 15 4 5 3. + 0 + 4.9399589188396931e-003 + -0.1883863061666489 + 0.0621718391776085 + <_> + + <_> + + + + <_>12 12 7 6 -1. + <_>12 14 7 2 3. + 0 + 0.0134834395721555 + 0.0368753299117088 + -0.2495236992835999 + <_> + + <_> + + + + <_>0 6 17 3 -1. + <_>0 7 17 1 3. + 0 + -8.4225656464695930e-003 + 0.0715010911226273 + -0.1399662047624588 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + -0.0437869913876057 + 0.2012841999530792 + -0.0537442602217197 + <_> + + <_> + + + + <_>0 12 18 4 -1. + <_>0 12 9 2 2. + <_>9 14 9 2 2. + 0 + -0.0100684398785234 + -0.1670701950788498 + 0.0613450892269611 + <_> + + <_> + + + + <_>11 0 4 7 -1. + <_>11 0 2 7 2. + 0 + 2.4383061099797487e-003 + -0.1210545971989632 + 0.0498077012598515 + <_> + + <_> + + + + <_>0 12 14 2 -1. + <_>0 13 14 1 2. + 0 + 3.2083820551633835e-003 + -0.0560453608632088 + 0.1795570999383926 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0203895196318626 + -0.3198359012603760 + 0.0341416187584400 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0229144208133221 + -0.3945465087890625 + 0.0238389708101749 + <_> + + <_> + + + + <_>10 1 6 7 -1. + <_>12 1 2 7 3. + 0 + 0.0185669008642435 + 0.0384325608611107 + -0.2299199998378754 + <_> + + <_> + + + + <_>8 6 3 13 -1. + <_>9 6 1 13 3. + 0 + -0.0102770300582051 + 0.2255744934082031 + -0.0492232292890549 + <_> + + <_> + + + + <_>9 1 3 14 -1. + <_>10 1 1 14 3. + 0 + -9.7914133220911026e-003 + 0.1932788044214249 + -0.0361390598118305 + <_> + + <_> + + + + <_>4 1 6 7 -1. + <_>6 1 2 7 3. + 0 + 0.0126998396590352 + 0.0562979914247990 + -0.2098159939050674 + <_> + + <_> + + + + <_>11 11 7 6 -1. + <_>11 13 7 2 3. + 0 + 0.0398674681782722 + 9.4982674345374107e-003 + -0.4768620133399963 + <_> + + <_> + + + + <_>2 11 7 6 -1. + <_>2 13 7 2 3. + 0 + 0.0337045192718506 + 0.0188484601676464 + -0.5370798110961914 + <_> + + <_> + + + + <_>0 3 20 12 -1. + <_>0 9 20 6 2. + 0 + -0.0336952693760395 + -0.2700335085391998 + 0.0389563404023647 + <_> + + <_> + + + + <_>7 6 6 11 -1. + <_>9 6 2 11 3. + 0 + 0.0239612497389317 + -0.0950004309415817 + 0.1028281971812248 + <_> + + <_> + + + + <_>4 6 12 4 -1. + <_>8 6 4 4 3. + 0 + 0.0829902291297913 + 0.0378285683691502 + -0.3026775121688843 + <_> + + <_> + + + + <_>0 1 6 11 -1. + <_>3 1 3 11 2. + 0 + 0.1653721034526825 + 0.0239121504127979 + -0.4121440947055817 + <_> + + <_> + + + + <_>9 4 5 12 -1. + <_>9 10 5 6 2. + 0 + 0.0182025693356991 + 0.0261274594813585 + -0.0692270100116730 + <_> + + <_> + + + + <_>0 3 20 4 -1. + <_>0 3 10 2 2. + <_>10 5 10 2 2. + 0 + -0.0453223809599876 + -0.4443764984607697 + 0.0212795697152615 + <_> + + <_> + + + + <_>10 0 10 6 -1. + <_>15 0 5 3 2. + <_>10 3 5 3 2. + 0 + 0.0476206094026566 + -0.0340700000524521 + 0.2106568068265915 + <_> + + <_> + + + + <_>4 0 10 6 -1. + <_>4 0 5 3 2. + <_>9 3 5 3 2. + 0 + 1.0596530046314001e-003 + 0.0983478203415871 + -0.0927325934171677 + <_> + + <_> + + + + <_>7 8 13 3 -1. + <_>7 9 13 1 3. + 0 + 0.0320280492305756 + 0.0238339491188526 + -0.4327659010887146 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + -0.0137643702328205 + -0.4172661900520325 + 0.0218833591789007 + <_> + + <_> + + + + <_>10 6 7 4 -1. + <_>10 8 7 2 2. + 0 + 0.0366521589457989 + -0.0268514100462198 + 0.1005123034119606 + <_> + + <_> + + + + <_>3 6 7 4 -1. + <_>3 8 7 2 2. + 0 + -0.0155077604576945 + 0.4851926863193512 + -0.0249007102102041 + <_> + + <_> + + + + <_>11 9 7 6 -1. + <_>11 11 7 2 3. + 0 + 7.1460101753473282e-003 + 0.0579064711928368 + -0.0516139715909958 + <_> + + <_> + + + + <_>2 8 14 4 -1. + <_>2 8 7 2 2. + <_>9 10 7 2 2. + 0 + 0.0242802295833826 + -0.0373418293893337 + 0.2920179963111877 + <_> + + <_> + + + + <_>10 10 10 6 -1. + <_>15 10 5 3 2. + <_>10 13 5 3 2. + 0 + -0.0835223197937012 + 0.3744797110557556 + -3.4602559171617031e-003 + <_> + + <_> + + + + <_>0 10 10 6 -1. + <_>0 10 5 3 2. + <_>5 13 5 3 2. + 0 + 0.0314857214689255 + 0.0240920092910528 + -0.3959487974643707 + <_> + + <_> + + + + <_>14 5 4 14 -1. + <_>16 5 2 7 2. + <_>14 12 2 7 2. + 0 + 9.4820279628038406e-003 + -0.0737146735191345 + 0.1306633055210114 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 0.0401169583201408 + 0.0304537191987038 + -0.3064115941524506 + <_> + + <_> + + + + <_>14 5 4 14 -1. + <_>16 5 2 7 2. + <_>14 12 2 7 2. + 0 + -0.0528154782950878 + 0.4579240977764130 + -0.0239062309265137 + <_> + + <_> + + + + <_>2 5 4 14 -1. + <_>2 5 2 7 2. + <_>4 12 2 7 2. + 0 + 4.6821571886539459e-003 + -0.0883959308266640 + 0.1285813003778458 + <_> + + <_> + + + + <_>2 5 18 12 -1. + <_>11 5 9 6 2. + <_>2 11 9 6 2. + 0 + -0.1344828009605408 + -0.2747175097465515 + 0.0159703101962805 + <_> + + <_> + + + + <_>3 0 6 5 -1. + <_>6 0 3 5 2. + 0 + 5.4646627977490425e-003 + -0.2162843942642212 + 0.0430353209376335 + <_> + + <_> + + + + <_>9 0 3 20 -1. + <_>10 0 1 20 3. + 0 + -0.0359963588416576 + -0.4852409064769745 + 0.0105637498199940 + <_> + + <_> + + + + <_>1 0 6 16 -1. + <_>1 8 6 8 2. + 0 + 0.2523599863052368 + 9.3745701014995575e-003 + -0.8861339092254639 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0250672698020935 + -0.2236464023590088 + 0.0371466018259525 + <_> + + <_> + + + + <_>1 3 15 4 -1. + <_>6 3 5 4 3. + 0 + -0.0141503298655152 + 0.3785665035247803 + -0.0278174895793200 + <_> + + <_> + + + + <_>8 4 5 16 -1. + <_>8 12 5 8 2. + 0 + 0.1004957035183907 + 0.0112448399886489 + -0.7186952233314514 + <_> + + <_> + + + + <_>1 12 7 6 -1. + <_>1 14 7 2 3. + 0 + 0.0199890807271004 + 0.0260568093508482 + -0.3214780092239380 + <_> + + <_> + + + + <_>17 5 3 12 -1. + <_>17 11 3 6 2. + 0 + -0.0491605587303638 + -0.2316488027572632 + 0.0163175594061613 + <_> + + <_> + + + + <_>1 3 15 3 -1. + <_>1 4 15 1 3. + 0 + 0.0221187900751829 + -0.0505694784224033 + 0.1757258027791977 + <_> + + <_> + + + + <_>8 5 4 12 -1. + <_>8 9 4 4 3. + 0 + -7.6390360482037067e-003 + 0.2226431965827942 + -0.0436853915452957 + <_> + + <_> + + + + <_>8 7 3 10 -1. + <_>8 12 3 5 2. + 0 + -1.6813250258564949e-003 + 0.0555824413895607 + -0.1773931980133057 + <_> + + <_> + + + + <_>4 1 14 3 -1. + <_>4 2 14 1 3. + 0 + -0.0166190005838871 + -0.2781296968460083 + 0.0197378303855658 + <_> + + <_> + + + + <_>0 5 3 12 -1. + <_>0 11 3 6 2. + 0 + -0.0328016206622124 + -0.2332518994808197 + 0.0366638191044331 + <_> + + <_> + + + + <_>1 13 18 6 -1. + <_>7 13 6 6 3. + 0 + 0.2452659010887146 + -0.0297389402985573 + 0.3133840858936310 + <_> + + <_> + + + + <_>7 3 4 7 -1. + <_>9 3 2 7 2. + 0 + -0.0172717701643705 + 0.5281891822814941 + -0.0141517799347639 + <_> + + <_> + + + + <_>8 7 9 5 -1. + <_>11 7 3 5 3. + 0 + 0.0201119091361761 + 0.0271735806018114 + -0.0831227228045464 + <_> + + <_> + + + + <_>3 7 9 5 -1. + <_>6 7 3 5 3. + 0 + 0.0160767491906881 + 0.0563466399908066 + -0.1589314043521881 + <_> + + <_> + + + + <_>10 10 8 10 -1. + <_>14 10 4 5 2. + <_>10 15 4 5 2. + 0 + -0.1017976999282837 + 0.6044800877571106 + -7.6062050648033619e-003 + <_> + + <_> + + + + <_>2 10 8 10 -1. + <_>2 10 4 5 2. + <_>6 15 4 5 2. + 0 + -0.0448656491935253 + 0.3307703137397766 + -0.0253291893750429 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 0.0270949807018042 + -0.0692517235875130 + 0.1535059958696365 + <_> + + <_> + + + + <_>3 12 7 6 -1. + <_>3 14 7 2 3. + 0 + -0.0376758910715580 + -0.3194983899593353 + 0.0299096796661615 + <_> + + <_> + + + + <_>8 3 5 8 -1. + <_>8 7 5 4 2. + 0 + -8.2310457946732640e-004 + 0.0606129691004753 + -0.1053157970309258 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>7 4 6 4 2. + 0 + 0.0556860491633415 + -0.0409203507006168 + 0.2295964956283569 + <_> + + <_> + + + + <_>10 0 7 6 -1. + <_>10 2 7 2 3. + 0 + -1.6866069927345961e-004 + -0.0776435881853104 + 0.0295492708683014 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + -0.0238732099533081 + 0.2794407904148102 + -0.0318884588778019 + <_> + + <_> + + + + <_>7 12 13 3 -1. + <_>7 13 13 1 3. + 0 + -0.0150036001577973 + 0.2507739067077637 + -0.0459327884018421 + <_> + + <_> + + + + <_>1 3 18 4 -1. + <_>1 3 9 2 2. + <_>10 5 9 2 2. + 0 + -0.0145223196595907 + -0.1645354032516480 + 0.0551809109747410 + <_> + + <_> + + + + <_>6 1 8 8 -1. + <_>10 1 4 4 2. + <_>6 5 4 4 2. + 0 + -7.4650160968303680e-003 + -0.1269046962261200 + 0.0715431123971939 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>10 6 2 7 2. + 0 + 0.0549846403300762 + -0.0137307997792959 + 0.6511964201927185 + <_> + + <_> + + + + <_>2 4 18 6 -1. + <_>11 4 9 3 2. + <_>2 7 9 3 2. + 0 + -0.0880307629704475 + 0.2541649043560028 + -0.0122338701039553 + <_> + + <_> + + + + <_>1 5 8 8 -1. + <_>1 5 4 4 2. + <_>5 9 4 4 2. + 0 + -0.0361955016851425 + -0.4491730928421021 + 0.0210937708616257 + <_> + + <_> + + + + <_>14 0 2 13 -1. + <_>14 0 1 13 2. + 0 + 0.0370632112026215 + -6.6644148901104927e-003 + 0.2494017034769058 + <_> + + <_> + + + + <_>4 0 2 13 -1. + <_>5 0 1 13 2. + 0 + -0.0105683803558350 + -0.4106157124042511 + 0.0213980898261070 + <_> + + <_> + + + + <_>7 3 12 3 -1. + <_>7 3 6 3 2. + 0 + 0.1266278028488159 + 5.2506178617477417e-003 + -0.3324024975299835 + <_> + + <_> + + + + <_>1 3 12 3 -1. + <_>7 3 6 3 2. + 0 + -8.7341770995408297e-004 + 0.3268721997737885 + -0.0277048293501139 + <_> + + <_> + + + + <_>7 1 6 7 -1. + <_>9 1 2 7 3. + 0 + -1.0967969428747892e-003 + -0.2771083116531372 + 0.0363528281450272 + <_> + + <_> + + + + <_>5 2 6 12 -1. + <_>7 2 2 12 3. + 0 + -0.0797380208969116 + -0.5832915902137756 + 0.0140617797151208 + <_> + + <_> + + + + <_>9 5 6 12 -1. + <_>12 5 3 6 2. + <_>9 11 3 6 2. + 0 + -3.8278030697256327e-003 + 0.0354594513773918 + -0.1399680972099304 + <_> + + <_> + + + + <_>5 5 6 12 -1. + <_>5 5 3 6 2. + <_>8 11 3 6 2. + 0 + 0.0203339997678995 + -0.0214213505387306 + 0.5161038041114807 + <_> + + <_> + + + + <_>5 9 14 3 -1. + <_>5 10 14 1 3. + 0 + 7.5564032886177301e-004 + -0.1080347001552582 + 0.0335382893681526 + <_> + + <_> + + + + <_>1 3 18 12 -1. + <_>1 3 9 6 2. + <_>10 9 9 6 2. + 0 + 0.1785584986209869 + 9.4842249527573586e-003 + -0.8185818791389465 + <_> + + <_> + + + + <_>3 11 14 4 -1. + <_>10 11 7 2 2. + <_>3 13 7 2 2. + 0 + -0.0347450710833073 + -0.5817219018936157 + 0.0113155497238040 + <_> + + <_> + + + + <_>4 6 4 14 -1. + <_>4 6 2 7 2. + <_>6 13 2 7 2. + 0 + 5.1304209046065807e-003 + -0.1065986007452011 + 0.0744408965110779 + <_> + + <_> + + + + <_>11 11 4 7 -1. + <_>11 11 2 7 2. + 0 + -0.0339361988008022 + -0.4599775969982147 + 0.0152644198387861 + <_> + + <_> + + + + <_>5 11 4 7 -1. + <_>7 11 2 7 2. + 0 + -1.0171560570597649e-003 + 0.1030130982398987 + -0.0898429602384567 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + 0.0634890198707581 + 6.8669100292026997e-003 + -0.7602251768112183 + <_> + + <_> + + + + <_>1 3 18 4 -1. + <_>7 3 6 4 3. + 0 + 0.2407793998718262 + -0.0215714797377586 + 0.4111303091049194 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + -0.0519634410738945 + -0.2851732075214386 + 0.0409430600702763 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>10 8 7 3 2. + 0 + 0.0364081710577011 + -0.0504609607160091 + 0.1667181998491287 + <_> + + <_> + + + + <_>9 4 2 13 -1. + <_>9 4 1 13 2. + 0 + 9.6712149679660797e-003 + -0.0489151105284691 + 0.1822443008422852 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0222681500017643 + 0.0613909810781479 + -0.1544584929943085 + <_> + + <_> + + + + <_>10 0 7 6 -1. + <_>10 2 7 2 3. + 0 + -0.0709292814135551 + 0.5001016855239868 + -3.9896317757666111e-003 + <_> + + <_> + + + + <_>3 0 7 6 -1. + <_>3 2 7 2 3. + 0 + 2.0806699467357248e-004 + -0.1447563022375107 + 0.0636075288057327 + <_> + + <_> + + + + <_>2 0 16 3 -1. + <_>2 1 16 1 3. + 0 + -9.2365043237805367e-003 + -0.2181728929281235 + 0.0388562604784966 + <_> + + <_> + + + + <_>2 9 7 4 -1. + <_>2 11 7 2 2. + 0 + 0.0227819904685020 + 0.0201086197048426 + -0.3845236003398895 + <_> + + <_> + + + + <_>4 7 16 8 -1. + <_>12 7 8 4 2. + <_>4 11 8 4 2. + 0 + -7.0844120346009731e-003 + -0.0488854907453060 + 0.0463673397898674 + <_> + + <_> + + + + <_>0 7 16 8 -1. + <_>0 7 8 4 2. + <_>8 11 8 4 2. + 0 + -0.0840062797069550 + 0.3592166900634766 + -0.0224618893116713 + <_> + + <_> + + + + <_>7 12 10 6 -1. + <_>12 12 5 3 2. + <_>7 15 5 3 2. + 0 + -0.0704465806484222 + -0.8839532136917114 + 2.9730550013482571e-003 + <_> + + <_> + + + + <_>3 12 10 6 -1. + <_>3 12 5 3 2. + <_>8 15 5 3 2. + 0 + 0.0488998107612133 + 0.0239362195134163 + -0.3677014112472534 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 0.0296773295849562 + 0.0166081208735704 + -0.2297268956899643 + <_> + + <_> + + + + <_>4 5 4 8 -1. + <_>4 9 4 4 2. + 0 + 2.5721399579197168e-003 + -0.3257220983505249 + 0.0241460092365742 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 1.6117929480969906e-003 + 0.0293553005903959 + -0.0375415794551373 + <_> + + <_> + + + + <_>2 4 14 3 -1. + <_>2 5 14 1 3. + 0 + 0.0175466407090425 + -0.0508792400360107 + 0.1528313010931015 + <_> + + <_> + + + + <_>2 3 18 4 -1. + <_>11 3 9 2 2. + <_>2 5 9 2 2. + 0 + -0.0463263988494873 + -0.2284332066774368 + 0.0144425304606557 + <_> + + <_> + + + + <_>5 0 10 18 -1. + <_>5 6 10 6 3. + 0 + -0.3320567011833191 + 0.7445781826972961 + -0.0108568798750639 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -0.0423178300261498 + -0.1466601938009262 + 0.0577992312610149 + <_> + + <_> + + + + <_>0 3 14 4 -1. + <_>0 3 7 2 2. + <_>7 5 7 2 2. + 0 + 3.2436659093946218e-003 + 0.0540214516222477 + -0.1702941060066223 + <_> + + <_> + + + + <_>13 4 3 15 -1. + <_>14 4 1 15 3. + 0 + -0.0209008902311325 + -0.4078929126262665 + 0.0253348108381033 + <_> + + <_> + + + + <_>4 4 3 15 -1. + <_>5 4 1 15 3. + 0 + 0.0203250106424093 + 0.0330159291625023 + -0.2450339049100876 + <_> + + <_> + + + + <_>14 4 6 10 -1. + <_>16 4 2 10 3. + 0 + -0.0463419295847416 + 0.1597664952278137 + -0.0411779396235943 + <_> + + <_> + + + + <_>0 4 6 10 -1. + <_>2 4 2 10 3. + 0 + -0.0343563295900822 + 0.1602140963077545 + -0.0625009536743164 + <_> + + <_> + + + + <_>8 5 4 14 -1. + <_>10 5 2 7 2. + <_>8 12 2 7 2. + 0 + 0.0244659706950188 + -0.0374875999987125 + 0.2280728071928024 + <_> + + <_> + + + + <_>4 6 12 12 -1. + <_>4 6 6 6 2. + <_>10 12 6 6 2. + 0 + -0.0181395392864943 + -0.1590958982706070 + 0.0605398118495941 + <_> + + <_> + + + + <_>9 1 3 19 -1. + <_>10 1 1 19 3. + 0 + 0.0643941611051559 + 6.6441670060157776e-003 + -0.7486022710800171 + <_> + + <_> + + + + <_>2 1 3 17 -1. + <_>3 1 1 17 3. + 0 + 9.6367759397253394e-004 + -0.0906208083033562 + 0.0941181331872940 + <_> + + <_> + + + + <_>2 7 18 4 -1. + <_>8 7 6 4 3. + 0 + 0.2002449035644531 + 5.9731658548116684e-003 + -0.8252168893814087 + <_> + + <_> + + + + <_>1 10 8 6 -1. + <_>1 12 8 2 3. + 0 + -0.0634986683726311 + -0.6963583827018738 + 9.3487137928605080e-003 + <_> + + <_> + + + + <_>9 9 9 8 -1. + <_>12 9 3 8 3. + 0 + -0.0192323997616768 + 0.1123668029904366 + -0.0291997399181128 + <_> + + <_> + + + + <_>0 0 20 15 -1. + <_>0 5 20 5 3. + 0 + 0.2541874945163727 + 0.0139590399339795 + -0.5158494710922241 + <_> + + <_> + + + + <_>3 1 14 6 -1. + <_>3 4 14 3 2. + 0 + 0.1043746024370194 + -0.0277430303394794 + 0.2737343013286591 + <_> + + <_> + + + + <_>0 2 7 4 -1. + <_>0 4 7 2 2. + 0 + 8.5034370422363281e-003 + 0.0541446506977081 + -0.1302950978279114 + <_> + + <_> + + + + <_>16 2 3 15 -1. + <_>17 2 1 15 3. + 0 + 5.2647730335593224e-003 + -0.0480775013566017 + 0.1037138029932976 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -0.0241935197263956 + 0.1993298977613449 + -0.0371110402047634 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -4.6968772076070309e-003 + -0.0657970905303955 + 0.0338373482227325 + <_> + + <_> + + + + <_>2 16 14 4 -1. + <_>2 16 7 2 2. + <_>9 18 7 2 2. + 0 + -0.0234645791351795 + -0.2604303061962128 + 0.0309330895543098 + <_> + + <_> + + + + <_>16 2 3 15 -1. + <_>17 2 1 15 3. + 0 + -0.0290298406034708 + 0.2068361937999725 + -0.0276286508888006 + <_> + + <_> + + + + <_>3 0 8 8 -1. + <_>3 0 4 4 2. + <_>7 4 4 4 2. + 0 + 0.0791002362966537 + 7.7356752008199692e-003 + -0.9181671142578125 + <_> + + <_> + + + + <_>5 10 14 3 -1. + <_>5 11 14 1 3. + 0 + 6.2152887694537640e-003 + -0.0739880278706551 + 0.0877274125814438 + <_> + + <_> + + + + <_>1 9 16 4 -1. + <_>1 11 16 2 2. + 0 + -0.0670132786035538 + 0.3762829899787903 + -0.0208927094936371 + <_> + + <_> + + + + <_>8 7 5 8 -1. + <_>8 11 5 4 2. + 0 + -7.9359989613294601e-003 + -0.0895327031612396 + 0.0665593072772026 + <_> + + <_> + + + + <_>1 2 3 15 -1. + <_>2 2 1 15 3. + 0 + 1.3035970041528344e-003 + -0.0666571408510208 + 0.1139909997582436 + <_> + + <_> + + + + <_>14 11 6 8 -1. + <_>16 11 2 8 3. + 0 + -0.1196431964635849 + -0.6065618991851807 + 7.3508038185536861e-003 + <_> + + <_> + + + + <_>0 11 6 8 -1. + <_>2 11 2 8 3. + 0 + -2.2869240492582321e-003 + 0.0733368173241615 + -0.1188957020640373 + <_> + + <_> + + + + <_>14 8 6 12 -1. + <_>17 8 3 6 2. + <_>14 14 3 6 2. + 0 + -0.1146256998181343 + 0.2928853034973145 + -6.7763519473373890e-003 + <_> + + <_> + + + + <_>0 8 6 12 -1. + <_>0 8 3 6 2. + <_>3 14 3 6 2. + 0 + 0.0484774895012379 + -0.0170629508793354 + 0.4295321106910706 + <_> + + <_> + + + + <_>15 0 3 20 -1. + <_>16 0 1 20 3. + 0 + -1.3129960279911757e-003 + -0.0743196383118629 + 0.0621497891843319 + <_> + + <_> + + + + <_>2 0 3 20 -1. + <_>3 0 1 20 3. + 0 + -0.0663447827100754 + -0.5894566774368286 + 0.0132258199155331 + <_> + + <_> + + + + <_>8 9 8 4 -1. + <_>8 9 4 4 2. + 0 + -4.6543189091607928e-004 + 0.0578865483403206 + -0.0642952993512154 + <_> + + <_> + + + + <_>6 9 6 10 -1. + <_>9 9 3 10 2. + 0 + -0.0132865402847528 + 0.1412332952022553 + -0.0615064688026905 + <_> + + <_> + + + + <_>9 9 9 8 -1. + <_>12 9 3 8 3. + 0 + 7.3928399942815304e-003 + -0.0727199912071228 + 0.0421791411936283 + <_> + + <_> + + + + <_>2 9 9 8 -1. + <_>5 9 3 8 3. + 0 + -0.0474341697990894 + 0.3267227113246918 + -0.0290015302598476 + <_> + + <_> + + + + <_>12 5 6 15 -1. + <_>14 5 2 15 3. + 0 + 0.1354679018259049 + 0.0103935701772571 + -0.4535447955131531 + <_> + + <_> + + + + <_>1 2 9 5 -1. + <_>4 2 3 5 3. + 0 + -0.0252168104052544 + -0.1907597929239273 + 0.0415227413177490 + <_> + + <_> + + + + <_>9 1 3 19 -1. + <_>10 1 1 19 3. + 0 + -0.0494313985109329 + -0.9419217109680176 + 3.5473550669848919e-003 + <_> + + <_> + + + + <_>8 1 3 19 -1. + <_>9 1 1 19 3. + 0 + -0.0483751818537712 + -0.8302866816520691 + 7.2369067929685116e-003 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + -0.0143485097214580 + -0.2186049968004227 + 0.0314864292740822 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -5.5373171344399452e-003 + -0.2152103036642075 + 0.0442358888685703 + <_> + + <_> + + + + <_>6 3 10 10 -1. + <_>6 3 5 10 2. + 0 + 0.2177180051803589 + -5.0501842051744461e-003 + 0.4902552068233490 + <_> + + <_> + + + + <_>3 0 12 5 -1. + <_>9 0 6 5 2. + 0 + 0.1744139939546585 + -9.7074145451188087e-003 + 0.7419623136520386 + <_> + + <_> + + + + <_>8 1 10 16 -1. + <_>13 1 5 8 2. + <_>8 9 5 8 2. + 0 + 0.0888404995203018 + -5.8005251921713352e-003 + 0.3340322077274323 + <_> + + <_> + + + + <_>4 8 8 4 -1. + <_>8 8 4 4 2. + 0 + -0.0380127914249897 + 0.5067759156227112 + -0.0138094304129481 + <_> + + <_> + + + + <_>9 16 9 4 -1. + <_>9 18 9 2 2. + 0 + -0.0636113882064819 + -0.5669682025909424 + 7.9266652464866638e-003 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>4 14 4 6 2. + 0 + 0.0983584821224213 + 0.0346348993480206 + -0.1965176016092300 + <_> + + <_> + + + + <_>12 5 6 15 -1. + <_>14 5 2 15 3. + 0 + 0.0229296106845140 + -0.0446826405823231 + 0.0600624196231365 + <_> + + <_> + + + + <_>2 5 6 15 -1. + <_>4 5 2 15 3. + 0 + -0.0397636517882347 + -0.2831034958362579 + 0.0260870698839426 + <_> + + <_> + + + + <_>11 0 9 17 -1. + <_>14 0 3 17 3. + 0 + 0.1121568977832794 + -0.0432257093489170 + 0.1550564020872116 + <_> + + <_> + + + + <_>0 0 9 17 -1. + <_>3 0 3 17 3. + 0 + -0.1495794057846069 + 0.4147608876228333 + -0.0251126699149609 + <_> + + <_> + + + + <_>3 8 17 2 -1. + <_>3 9 17 1 2. + 0 + 1.4239370357245207e-003 + -0.2281333059072495 + 0.0224146191030741 + <_> + + <_> + + + + <_>6 1 7 4 -1. + <_>6 3 7 2 2. + 0 + -0.0113461399450898 + -0.2608393132686615 + 0.0264564808458090 + <_> + + <_> + + + + <_>4 2 12 4 -1. + <_>4 4 12 2 2. + 0 + -0.0905184075236321 + 0.6006718277931213 + -0.0125591596588492 + <_> + + <_> + + + + <_>1 8 14 3 -1. + <_>1 9 14 1 3. + 0 + 0.0360974818468094 + 0.0194510091096163 + -0.4099824130535126 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0256574694067240 + 0.2345308065414429 + -0.0323545187711716 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + -9.2462729662656784e-003 + 0.1445856988430023 + -0.0572801418602467 + <_> + + <_> + + + + <_>1 5 19 12 -1. + <_>1 9 19 4 3. + 0 + 0.0610067397356033 + 0.1996331959962845 + -0.0350187905132771 + <_> + + <_> + + + + <_>2 3 13 15 -1. + <_>2 8 13 5 3. + 0 + -2.2736669052392244e-003 + -0.2718046009540558 + 0.0353243090212345 + <_> + + <_> + + + + <_>5 1 15 6 -1. + <_>10 1 5 6 3. + 0 + -0.1117335036396980 + 0.2601088881492615 + -8.4183625876903534e-003 + <_> + + <_> + + + + <_>0 0 18 3 -1. + <_>6 0 6 3 3. + 0 + 0.1460158973932266 + -0.0437078587710857 + 0.1934380978345871 + <_> + + <_> + + + + <_>15 9 5 9 -1. + <_>15 12 5 3 3. + 0 + -0.0390085987746716 + -0.2402154952287674 + 0.0193248093128204 + <_> + + <_> + + + + <_>3 12 14 4 -1. + <_>3 14 14 2 2. + 0 + -0.0320651493966579 + -0.1461603045463562 + 0.0504104383289814 + <_> + + <_> + + + + <_>7 14 13 2 -1. + <_>7 15 13 1 2. + 0 + -3.9755292236804962e-003 + 0.0867860615253448 + -0.0751010030508041 + <_> + + <_> + + + + <_>0 9 5 9 -1. + <_>0 12 5 3 3. + 0 + -0.0222646091133356 + -0.1782020926475525 + 0.0422218814492226 + <_> + + <_> + + + + <_>14 5 5 15 -1. + <_>14 10 5 5 3. + 0 + -0.0600966513156891 + 0.3306227028369904 + -0.0133472196757793 + <_> + + <_> + + + + <_>1 5 5 15 -1. + <_>1 10 5 5 3. + 0 + -0.0831704065203667 + 0.6986327171325684 + -0.0110143097117543 + <_> + + <_> + + + + <_>8 3 6 17 -1. + <_>10 3 2 17 3. + 0 + -0.0771823972463608 + -0.2563033103942871 + 8.8049499318003654e-003 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + 0.0689021721482277 + 0.0109964404255152 + -0.6352006793022156 + <_> + + <_> + + + + <_>4 7 12 8 -1. + <_>4 11 12 4 2. + 0 + -0.0503532811999321 + 0.2292789071798325 + -0.0327637195587158 + <_> + + <_> + + + + <_>5 3 2 14 -1. + <_>5 10 2 7 2. + 0 + 2.4320879019796848e-003 + -0.1321305930614471 + 0.0710885822772980 + <_> + + <_> + + + + <_>9 3 4 8 -1. + <_>9 7 4 4 2. + 0 + -0.0141964601352811 + 0.0718450695276260 + -0.0452636592090130 + <_> + + <_> + + + + <_>3 5 9 15 -1. + <_>3 10 9 5 3. + 0 + -4.5774779282510281e-003 + -0.2583228051662445 + 0.0294190403074026 + <_> + + <_> + + + + <_>9 5 3 12 -1. + <_>9 11 3 6 2. + 0 + -1.4008210273459554e-003 + 0.0446365214884281 + -0.1231015026569367 + <_> + + <_> + + + + <_>4 3 6 14 -1. + <_>4 3 3 7 2. + <_>7 10 3 7 2. + 0 + 0.0350627116858959 + -0.0187225006520748 + 0.4553366899490356 + <_> + + <_> + + + + <_>9 8 3 10 -1. + <_>9 13 3 5 2. + 0 + 0.0393649190664291 + -3.8776830770075321e-003 + 0.4822939038276672 + <_> + + <_> + + + + <_>0 4 20 8 -1. + <_>0 4 10 4 2. + <_>10 8 10 4 2. + 0 + 0.0294302906841040 + -0.0566326901316643 + 0.1360445022583008 + <_> + + <_> + + + + <_>6 11 10 6 -1. + <_>11 11 5 3 2. + <_>6 14 5 3 2. + 0 + 0.0793208405375481 + -4.0827351622283459e-003 + 0.9999855160713196 + <_> + + <_> + + + + <_>2 9 8 8 -1. + <_>2 9 4 4 2. + <_>6 13 4 4 2. + 0 + 0.0426963306963444 + 0.0235833395272493 + -0.3779887855052948 + <_> + + <_> + + + + <_>6 9 14 2 -1. + <_>6 9 7 2 2. + 0 + 0.0259377192705870 + 0.0502833388745785 + -0.0672493427991867 + <_> + + <_> + + + + <_>0 9 14 2 -1. + <_>7 9 7 2 2. + 0 + 0.0270536597818136 + 0.1040683984756470 + -0.1006971001625061 + <_> + + <_> + + + + <_>2 4 18 12 -1. + <_>8 4 6 12 3. + 0 + 0.3032230138778687 + -0.0516154095530510 + 0.1239866986870766 + <_> + + <_> + + + + <_>7 4 6 8 -1. + <_>9 4 2 8 3. + 0 + 0.0743731930851936 + -0.0299796499311924 + 0.2594498097896576 + <_> + + <_> + + + + <_>9 3 6 12 -1. + <_>12 3 3 6 2. + <_>9 9 3 6 2. + 0 + 0.0460597686469555 + 6.1678960919380188e-003 + -0.7088791131973267 + <_> + + <_> + + + + <_>6 9 5 9 -1. + <_>6 12 5 3 3. + 0 + 0.0368835106492043 + 0.0159850195050240 + -0.4443601965904236 + <_> + + <_> + + + + <_>0 1 20 8 -1. + <_>10 1 10 4 2. + <_>0 5 10 4 2. + 0 + 0.1349337995052338 + 8.8313389569520950e-003 + -0.7342693805694580 + <_> + + <_> + + + + <_>6 3 6 17 -1. + <_>8 3 2 17 3. + 0 + 0.1479919999837875 + 6.9719799794256687e-003 + -0.8207845091819763 + <_> + + <_> + + + + <_>14 10 6 10 -1. + <_>17 10 3 5 2. + <_>14 15 3 5 2. + 0 + 0.0396903790533543 + -0.0182477999478579 + 0.2695592045783997 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>0 10 3 5 2. + <_>3 15 3 5 2. + 0 + -0.0535112805664539 + 0.2000025063753128 + -0.0391367003321648 + <_> + + <_> + + + + <_>16 12 4 8 -1. + <_>16 12 2 8 2. + 0 + 0.0637957006692886 + 0.0116161303594708 + -0.2531512081623077 + <_> + + <_> + + + + <_>0 12 4 8 -1. + <_>2 12 2 8 2. + 0 + -0.0810789167881012 + -0.7758278846740723 + 9.7084697335958481e-003 + <_> + + <_> + + + + <_>9 3 6 7 -1. + <_>11 3 2 7 3. + 0 + -0.0482726581394672 + -0.3073430955410004 + 0.0112980101257563 + <_> + + <_> + + + + <_>6 6 6 11 -1. + <_>8 6 2 11 3. + 0 + 0.0439125709235668 + -0.0394033007323742 + 0.1921695023775101 + -1.4138590097427368 + 37 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.0191887393593788 + -0.2115039974451065 + 0.1328652948141098 + <_> + + <_> + + + + <_>5 4 15 4 -1. + <_>5 6 15 2 2. + 0 + -8.1222038716077805e-003 + 0.0924910828471184 + -0.1758511960506439 + <_> + + <_> + + + + <_>5 5 6 5 -1. + <_>8 5 3 5 2. + 0 + 1.5851219650357962e-003 + -0.2856569886207581 + 0.0667105689644814 + <_> + + <_> + + + + <_>12 1 6 11 -1. + <_>14 1 2 11 3. + 0 + -4.3140850029885769e-003 + -0.1388522982597351 + 0.0526946894824505 + <_> + + <_> + + + + <_>0 11 20 3 -1. + <_>0 12 20 1 3. + 0 + -1.7131429631263018e-003 + 0.1313561052083969 + -0.1314910948276520 + <_> + + <_> + + + + <_>12 1 6 11 -1. + <_>14 1 2 11 3. + 0 + 0.0684473663568497 + 9.3052154406905174e-003 + -0.2506326138973236 + <_> + + <_> + + + + <_>2 1 6 11 -1. + <_>4 1 2 11 3. + 0 + -2.4445978924632072e-003 + -0.1720553040504456 + 0.0983228236436844 + <_> + + <_> + + + + <_>10 9 4 8 -1. + <_>10 13 4 4 2. + 0 + 1.0310600046068430e-003 + 0.0230391602963209 + -0.2752762138843536 + <_> + + <_> + + + + <_>0 7 7 6 -1. + <_>0 9 7 2 3. + 0 + 7.4603251414373517e-004 + -0.2327678054571152 + 0.0526930093765259 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -6.6399492789059877e-004 + 0.0689907819032669 + -0.0846877098083496 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -4.0997468749992549e-004 + 0.1050138026475906 + -0.1081900969147682 + <_> + + <_> + + + + <_>15 7 5 6 -1. + <_>15 10 5 3 2. + 0 + -1.8094549886882305e-003 + -0.1817883998155594 + 0.0441841408610344 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 9.3385757645592093e-004 + -0.1462268978357315 + 0.0727264434099197 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 14 4 4 2. + 0 + -3.8197741378098726e-004 + 0.0240099392831326 + -0.1729580014944077 + <_> + + <_> + + + + <_>0 7 5 6 -1. + <_>0 10 5 3 2. + 0 + -1.4950280310586095e-003 + -0.1940338015556335 + 0.0488079190254211 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + -0.0101591004058719 + 0.1917389929294586 + -0.0527490712702274 + <_> + + <_> + + + + <_>2 0 14 3 -1. + <_>2 1 14 1 3. + 0 + 5.9903519286308438e-005 + -0.1079154983162880 + 0.0909881666302681 + <_> + + <_> + + + + <_>4 4 13 2 -1. + <_>4 5 13 1 2. + 0 + -0.0319675505161285 + 0.4110988974571228 + -0.0226506404578686 + <_> + + <_> + + + + <_>0 18 20 2 -1. + <_>0 19 20 1 2. + 0 + 0.0143432701006532 + 0.0243155397474766 + -0.4268015027046204 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + 0.0110395299270749 + -0.0627170130610466 + 0.1133053004741669 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -8.4228850901126862e-003 + -0.2136930972337723 + 0.0420592017471790 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + -0.0205498393625021 + 0.1516163051128388 + -0.0245941393077374 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -6.5411031246185303e-003 + 0.1488362997770309 + -0.0611793398857117 + <_> + + <_> + + + + <_>6 0 8 14 -1. + <_>10 0 4 7 2. + <_>6 7 4 7 2. + 0 + -0.0133244004100561 + -0.2079197019338608 + 0.0483333095908165 + <_> + + <_> + + + + <_>0 2 6 12 -1. + <_>2 2 2 12 3. + 0 + 0.0701112672686577 + -0.0268632192164660 + 0.3632225990295410 + <_> + + <_> + + + + <_>6 12 9 6 -1. + <_>9 12 3 6 3. + 0 + -2.6973750209435821e-004 + 0.0608766600489616 + -0.1127237007021904 + <_> + + <_> + + + + <_>2 0 7 4 -1. + <_>2 2 7 2 2. + 0 + -1.3509000418707728e-003 + -0.1855207979679108 + 0.0521549582481384 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + -0.0280831903219223 + 0.3511188030242920 + -0.0235963296145201 + <_> + + <_> + + + + <_>5 0 6 10 -1. + <_>5 0 3 5 2. + <_>8 5 3 5 2. + 0 + -0.0100032901391387 + -0.2905848026275635 + 0.0321256890892982 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + -1.6111029544845223e-003 + 0.0981136709451675 + -0.0522037111222744 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0184119008481503 + -0.1808266937732697 + 0.0545367002487183 + <_> + + <_> + + + + <_>18 6 2 13 -1. + <_>18 6 1 13 2. + 0 + -0.0717388167977333 + -0.7665498852729797 + 3.3518690615892410e-003 + <_> + + <_> + + + + <_>0 6 2 13 -1. + <_>1 6 1 13 2. + 0 + -2.7943260502070189e-003 + 0.1587136983871460 + -0.0642718002200127 + <_> + + <_> + + + + <_>16 7 4 13 -1. + <_>16 7 2 13 2. + 0 + -0.1687474995851517 + -0.6995618939399719 + 4.8861699178814888e-003 + <_> + + <_> + + + + <_>6 5 7 6 -1. + <_>6 7 7 2 3. + 0 + -1.2672400334849954e-003 + 0.0316160395741463 + -0.2495326995849609 + <_> + + <_> + + + + <_>6 11 10 6 -1. + <_>11 11 5 3 2. + <_>6 14 5 3 2. + 0 + 0.0208077505230904 + 0.0170534104108810 + -0.2433141022920609 + <_> + + <_> + + + + <_>5 9 6 5 -1. + <_>8 9 3 5 2. + 0 + -1.5869849594309926e-003 + 0.0931710898876190 + -0.0813619270920753 + <_> + + <_> + + + + <_>10 3 4 15 -1. + <_>10 3 2 15 2. + 0 + -0.0100146904587746 + -0.2778961956501007 + 0.0265692397952080 + <_> + + <_> + + + + <_>6 3 4 15 -1. + <_>8 3 2 15 2. + 0 + -5.7948171161115170e-003 + -0.2228773981332779 + 0.0359756611287594 + <_> + + <_> + + + + <_>6 7 13 2 -1. + <_>6 8 13 1 2. + 0 + 2.7189950924366713e-003 + -0.0906319096684456 + 0.0568204000592232 + <_> + + <_> + + + + <_>2 15 16 4 -1. + <_>2 15 8 2 2. + <_>10 17 8 2 2. + 0 + 0.0388451591134071 + 0.0122808599844575 + -0.5852134823799133 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + -0.0141586801037192 + 0.1815387010574341 + -0.0311094298958778 + <_> + + <_> + + + + <_>0 7 4 13 -1. + <_>2 7 2 13 2. + 0 + -0.1827860027551651 + -0.9001380801200867 + 7.6544750481843948e-003 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + 0.0275884196162224 + -0.0124600399285555 + 0.2006936967372894 + <_> + + <_> + + + + <_>5 11 10 9 -1. + <_>5 14 10 3 3. + 0 + -0.0147844301536679 + -0.0899104923009872 + 0.0816486775875092 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + 0.1162571981549263 + 2.3692469112575054e-003 + -0.9999806880950928 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + 3.5341090988367796e-003 + -0.0617605410516262 + 0.1349063962697983 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 12 4 6 2. + 0 + 5.1878788508474827e-003 + 0.0187458600848913 + -0.1744917035102844 + <_> + + <_> + + + + <_>0 3 2 16 -1. + <_>0 11 2 8 2. + 0 + 0.0794573575258255 + -0.0234029907733202 + 0.3350220024585724 + <_> + + <_> + + + + <_>0 15 20 4 -1. + <_>10 15 10 2 2. + <_>0 17 10 2 2. + 0 + 0.0276843793690205 + 0.0236639101058245 + -0.3325636088848114 + <_> + + <_> + + + + <_>0 15 9 4 -1. + <_>0 17 9 2 2. + 0 + -4.4806320220232010e-003 + -0.1465875059366226 + 0.0473768115043640 + <_> + + <_> + + + + <_>9 14 10 6 -1. + <_>14 14 5 3 2. + <_>9 17 5 3 2. + 0 + 5.6939688511192799e-003 + -0.0567761212587357 + 0.0675808563828468 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 7.7299480326473713e-003 + -0.0311566498130560 + 0.2310259044170380 + <_> + + <_> + + + + <_>4 15 13 3 -1. + <_>4 16 13 1 3. + 0 + 3.9786100387573242e-003 + -0.0568824410438538 + 0.1327152997255325 + <_> + + <_> + + + + <_>0 0 18 4 -1. + <_>0 0 9 2 2. + <_>9 2 9 2 2. + 0 + -0.0112758800387383 + -0.2093864977359772 + 0.0352914594113827 + <_> + + <_> + + + + <_>6 5 8 15 -1. + <_>6 10 8 5 3. + 0 + -2.4308220017701387e-003 + -0.2017636001110077 + 0.0345139317214489 + <_> + + <_> + + + + <_>0 0 6 7 -1. + <_>2 0 2 7 3. + 0 + 5.7369591668248177e-003 + -0.0556071586906910 + 0.1153208985924721 + <_> + + <_> + + + + <_>14 1 6 12 -1. + <_>16 1 2 12 3. + 0 + 4.6170800924301147e-003 + -0.0560835003852844 + 0.0817629173398018 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -4.7089671716094017e-003 + -0.1335121989250183 + 0.0562960803508759 + <_> + + <_> + + + + <_>18 1 2 13 -1. + <_>18 1 1 13 2. + 0 + -0.0326880700886250 + 0.2792238891124725 + -0.0108676599338651 + <_> + + <_> + + + + <_>0 1 10 19 -1. + <_>5 1 5 19 2. + 0 + 0.0886861979961395 + 0.0182682201266289 + -0.3563739061355591 + <_> + + <_> + + + + <_>14 2 4 10 -1. + <_>14 2 2 10 2. + 0 + 4.5751677826046944e-003 + -0.0515584610402584 + 0.0639488101005554 + <_> + + <_> + + + + <_>0 3 4 16 -1. + <_>0 3 2 8 2. + <_>2 11 2 8 2. + 0 + 4.9765850417315960e-003 + -0.0546845905482769 + 0.1190711036324501 + <_> + + <_> + + + + <_>6 0 10 6 -1. + <_>11 0 5 3 2. + <_>6 3 5 3 2. + 0 + -6.4881290309131145e-003 + -0.0991211235523224 + 0.0265088491141796 + <_> + + <_> + + + + <_>1 14 10 6 -1. + <_>1 14 5 3 2. + <_>6 17 5 3 2. + 0 + 2.4523450993001461e-003 + -0.0950459465384483 + 0.0668029263615608 + <_> + + <_> + + + + <_>8 7 5 9 -1. + <_>8 10 5 3 3. + 0 + 7.0354789495468140e-003 + 0.1070559024810791 + -0.0623950995504856 + <_> + + <_> + + + + <_>2 2 4 10 -1. + <_>4 2 2 10 2. + 0 + 0.0427467897534370 + -0.0160921793431044 + 0.4325619935989380 + <_> + + <_> + + + + <_>11 11 7 4 -1. + <_>11 13 7 2 2. + 0 + -4.5301730278879404e-004 + 0.0364205688238144 + -0.0993228927254677 + <_> + + <_> + + + + <_>5 6 10 12 -1. + <_>5 6 5 6 2. + <_>10 12 5 6 2. + 0 + -5.2631930448114872e-003 + -0.1141674965620041 + 0.0572602190077305 + <_> + + <_> + + + + <_>9 2 4 12 -1. + <_>9 6 4 4 3. + 0 + 1.0581909446045756e-003 + 0.0332204885780811 + -0.1183122023940086 + <_> + + <_> + + + + <_>2 0 15 6 -1. + <_>2 3 15 3 2. + 0 + 0.0250889491289854 + -0.0606550201773643 + 0.1260174065828323 + <_> + + <_> + + + + <_>6 0 13 8 -1. + <_>6 4 13 4 2. + 0 + 0.2425215989351273 + 2.2060840856283903e-003 + -1.0000120401382446 + <_> + + <_> + + + + <_>1 0 13 8 -1. + <_>1 4 13 4 2. + 0 + -0.1439307928085327 + 0.3741979897022247 + -0.0222521107643843 + <_> + + <_> + + + + <_>11 4 2 14 -1. + <_>11 11 2 7 2. + 0 + -6.0972762294113636e-003 + -0.1103809997439385 + 0.0459969602525234 + <_> + + <_> + + + + <_>0 1 20 3 -1. + <_>0 2 20 1 3. + 0 + 6.1375470831990242e-003 + 0.0383078083395958 + -0.1808677017688751 + <_> + + <_> + + + + <_>8 5 6 10 -1. + <_>11 5 3 5 2. + <_>8 10 3 5 2. + 0 + -3.6617079749703407e-003 + 0.0384399183094502 + -0.0625407919287682 + <_> + + <_> + + + + <_>4 8 10 12 -1. + <_>9 8 5 12 2. + 0 + -0.1585485041141510 + 0.3446939885616303 + -0.0198375005275011 + <_> + + <_> + + + + <_>8 5 6 5 -1. + <_>8 5 3 5 2. + 0 + 0.0672192871570587 + 9.5165139064192772e-003 + -0.5020645856857300 + <_> + + <_> + + + + <_>6 5 6 5 -1. + <_>9 5 3 5 2. + 0 + 2.2499680053442717e-003 + -0.1306392997503281 + 0.0648329332470894 + <_> + + <_> + + + + <_>13 0 6 7 -1. + <_>15 0 2 7 3. + 0 + 0.0846267864108086 + 5.9339799918234348e-003 + -0.4151659011840820 + <_> + + <_> + + + + <_>1 0 6 7 -1. + <_>3 0 2 7 3. + 0 + -9.5411221263930202e-004 + -0.0937907472252846 + 0.0754866078495979 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -7.6813949272036552e-003 + -0.1482196003198624 + 0.0290105808526278 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>6 17 6 3 3. + 0 + -0.0255933199077845 + 0.1485957950353622 + -0.0471959300339222 + <_> + + <_> + + + + <_>6 7 12 8 -1. + <_>10 7 4 8 3. + 0 + 0.0215083695948124 + 0.0237826202064753 + -0.0966592878103256 + <_> + + <_> + + + + <_>0 14 18 5 -1. + <_>6 14 6 5 3. + 0 + 0.0344631001353264 + -0.0374100692570210 + 0.2201530039310455 + <_> + + <_> + + + + <_>0 13 20 4 -1. + <_>10 13 10 2 2. + <_>0 15 10 2 2. + 0 + -0.0378603003919125 + -0.5004746913909912 + 0.0140598695725203 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 1.2028450146317482e-003 + -0.0650870576500893 + 0.0895834863185883 + <_> + + <_> + + + + <_>11 11 7 4 -1. + <_>11 13 7 2 2. + 0 + 0.0167535208165646 + 4.9179811030626297e-003 + -0.4303090870380402 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + 1.6640779795125127e-003 + 0.0408074297010899 + -0.1446996033191681 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 3.4473428968340158e-003 + -0.0399101786315441 + 0.1527296006679535 + <_> + + <_> + + + + <_>0 8 8 6 -1. + <_>0 10 8 2 3. + 0 + 8.9918142184615135e-003 + 0.0710712671279907 + -0.0861699134111404 + <_> + + <_> + + + + <_>4 8 15 2 -1. + <_>4 9 15 1 2. + 0 + 8.3185202674940228e-004 + -0.2573918998241425 + 0.0179410893470049 + <_> + + <_> + + + + <_>0 9 6 5 -1. + <_>3 9 3 5 2. + 0 + -6.8142730742692947e-003 + 0.1382316052913666 + -0.0539945401251316 + <_> + + <_> + + + + <_>13 9 6 5 -1. + <_>13 9 3 5 2. + 0 + 2.9746210202574730e-003 + -0.0415502600371838 + 0.0398397706449032 + <_> + + <_> + + + + <_>1 9 6 5 -1. + <_>4 9 3 5 2. + 0 + 2.5836620479822159e-003 + -0.0706564933061600 + 0.0950455069541931 + <_> + + <_> + + + + <_>13 0 4 14 -1. + <_>15 0 2 7 2. + <_>13 7 2 7 2. + 0 + 2.7143809711560607e-004 + 0.0580700710415840 + -0.1278176009654999 + <_> + + <_> + + + + <_>0 0 14 19 -1. + <_>7 0 7 19 2. + 0 + 0.3541829884052277 + 5.4909070022404194e-003 + -0.9796069860458374 + <_> + + <_> + + + + <_>13 0 4 14 -1. + <_>15 0 2 7 2. + <_>13 7 2 7 2. + 0 + 0.0253186505287886 + -0.0144109698012471 + 0.2621912956237793 + <_> + + <_> + + + + <_>3 0 4 14 -1. + <_>3 0 2 7 2. + <_>5 7 2 7 2. + 0 + -2.2658439411316067e-004 + 0.0529978498816490 + -0.1162934973835945 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + 6.8859090097248554e-003 + 0.0164373107254505 + -0.2034949064254761 + <_> + + <_> + + + + <_>2 4 14 3 -1. + <_>2 5 14 1 3. + 0 + 0.0116074597463012 + -0.0366510115563869 + 0.1518401056528091 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + -4.8253959976136684e-003 + -0.2347615063190460 + 0.0379140116274357 + <_> + + <_> + + + + <_>7 6 4 12 -1. + <_>7 12 4 6 2. + 0 + 2.5656020734459162e-003 + 0.0351856388151646 + -0.1854071021080017 + <_> + + <_> + + + + <_>6 2 14 18 -1. + <_>13 2 7 9 2. + <_>6 11 7 9 2. + 0 + 0.1260139942169190 + -9.8542850464582443e-003 + 0.2552069127559662 + <_> + + <_> + + + + <_>5 9 9 6 -1. + <_>5 12 9 3 2. + 0 + 2.7164958883076906e-003 + -0.0217484403401613 + 0.2546752989292145 + <_> + + <_> + + + + <_>0 1 20 18 -1. + <_>10 1 10 9 2. + <_>0 10 10 9 2. + 0 + 0.3235602974891663 + 8.8657345622777939e-003 + -0.7038357257843018 + <_> + + <_> + + + + <_>4 10 7 4 -1. + <_>4 12 7 2 2. + 0 + -8.4016058826819062e-004 + 0.0368313603103161 + -0.1495326012372971 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 3.3291990403085947e-003 + 0.0481858402490616 + -0.1229047030210495 + <_> + + <_> + + + + <_>1 0 14 12 -1. + <_>1 4 14 4 3. + 0 + 0.2113053947687149 + 6.5245870500802994e-003 + -0.8829386234283447 + <_> + + <_> + + + + <_>9 0 6 8 -1. + <_>9 0 3 8 2. + 0 + 5.0388509407639503e-003 + -0.0670799463987350 + 0.0378497093915939 + <_> + + <_> + + + + <_>4 2 12 5 -1. + <_>8 2 4 5 3. + 0 + -0.0278623998165131 + 0.3346948921680450 + -0.0188165009021759 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + 3.8636629469692707e-003 + 0.0436447300016880 + -0.1748148947954178 + <_> + + <_> + + + + <_>4 0 8 10 -1. + <_>8 0 4 10 2. + 0 + 0.1048030033707619 + -0.0157375298440456 + 0.4209423959255219 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -3.4130848944187164e-003 + -0.1083557009696960 + 0.0437177903950214 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0463969707489014 + -0.7568007707595825 + 8.6701400578022003e-003 + <_> + + <_> + + + + <_>9 2 2 13 -1. + <_>9 2 1 13 2. + 0 + 5.3708078339695930e-003 + -0.0417978018522263 + 0.1482471972703934 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -6.1126388609409332e-003 + 0.1867371946573257 + -0.0433874912559986 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0425093211233616 + 0.0116906799376011 + -0.4374065995216370 + <_> + + <_> + + + + <_>0 4 18 10 -1. + <_>0 4 9 5 2. + <_>9 9 9 5 2. + 0 + 0.0104730203747749 + 0.0431436300277710 + -0.1565439999103546 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + -0.0472239591181278 + -0.7448353767395020 + 3.4918629098683596e-003 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + 0.0530903600156307 + 0.0104081500321627 + -0.5349944829940796 + <_> + + <_> + + + + <_>4 3 16 6 -1. + <_>12 3 8 3 2. + <_>4 6 8 3 2. + 0 + -7.0432561915367842e-004 + 0.0333841703832150 + -0.0737060308456421 + <_> + + <_> + + + + <_>3 4 5 9 -1. + <_>3 7 5 3 3. + 0 + 7.5942431576550007e-003 + -0.0291070491075516 + 0.1946886032819748 + <_> + + <_> + + + + <_>8 4 12 5 -1. + <_>12 4 4 5 3. + 0 + 0.0226769894361496 + 0.0338038206100464 + -0.2762761116027832 + <_> + + <_> + + + + <_>3 9 8 4 -1. + <_>3 11 8 2 2. + 0 + 6.6533521749079227e-003 + -0.0265782400965691 + 0.2428331971168518 + <_> + + <_> + + + + <_>11 0 2 15 -1. + <_>11 0 1 15 2. + 0 + 3.7712270859628916e-003 + 0.0265542995184660 + -0.0649529173970222 + <_> + + <_> + + + + <_>7 0 2 15 -1. + <_>8 0 1 15 2. + 0 + -2.0740530453622341e-003 + -0.1796897053718567 + 0.0315321609377861 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -1.5632519498467445e-003 + 0.0531096793711185 + -0.0874156281352043 + <_> + + <_> + + + + <_>8 3 4 8 -1. + <_>10 3 2 8 2. + 0 + 0.0125408899039030 + -0.0341364592313766 + 0.2209753990173340 + <_> + + <_> + + + + <_>9 13 6 7 -1. + <_>11 13 2 7 3. + 0 + -3.2660199794918299e-003 + -0.0552616082131863 + 0.0326695591211319 + <_> + + <_> + + + + <_>4 14 9 5 -1. + <_>7 14 3 5 3. + 0 + -8.2185603678226471e-003 + -0.1447837948799133 + 0.0557439289987087 + <_> + + <_> + + + + <_>15 3 4 17 -1. + <_>15 3 2 17 2. + 0 + -0.0558110401034355 + 0.1723794043064117 + -0.0144565198570490 + <_> + + <_> + + + + <_>1 6 4 13 -1. + <_>3 6 2 13 2. + 0 + -0.1472315937280655 + -0.8139231204986572 + 7.4356291443109512e-003 + <_> + + <_> + + + + <_>11 12 4 7 -1. + <_>11 12 2 7 2. + 0 + -5.8468529023230076e-003 + -0.0690434426069260 + 0.0194567907601595 + <_> + + <_> + + + + <_>0 1 6 7 -1. + <_>2 1 2 7 3. + 0 + 0.0194622203707695 + -0.0354722291231155 + 0.1666630059480667 + <_> + + <_> + + + + <_>9 12 6 7 -1. + <_>11 12 2 7 3. + 0 + 0.0583534687757492 + 3.0551329255104065e-003 + -0.3928912878036499 + <_> + + <_> + + + + <_>5 12 6 7 -1. + <_>7 12 2 7 3. + 0 + 0.0437858290970325 + 0.0135746300220490 + -0.4615235924720764 + <_> + + <_> + + + + <_>7 7 6 8 -1. + <_>9 7 2 8 3. + 0 + -0.0519043505191803 + 0.6380243897438049 + -9.6664745360612869e-003 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + -7.7811058145016432e-004 + -0.0993032231926918 + 0.0560946017503738 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 4.9657518975436687e-003 + 0.0414193682372570 + -0.1127481982111931 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -5.4516079835593700e-003 + 0.1739906072616577 + -0.0411477312445641 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + 5.0428751856088638e-003 + -0.0412552207708359 + 0.1379422992467880 + <_> + + <_> + + + + <_>2 9 14 3 -1. + <_>2 10 14 1 3. + 0 + -1.6985220136120915e-003 + -0.2287479043006897 + 0.0252749808132648 + <_> + + <_> + + + + <_>8 7 7 4 -1. + <_>8 9 7 2 2. + 0 + 0.0827642381191254 + 3.3066510222852230e-003 + -0.6911343932151794 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 3.9285849779844284e-003 + -0.0790433585643768 + 0.0662188529968262 + <_> + + <_> + + + + <_>13 12 5 6 -1. + <_>13 15 5 3 2. + 0 + -0.0306012406945229 + -0.2651745080947876 + 0.0164678506553173 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0199411604553461 + 0.1543180942535400 + -0.0361006893217564 + <_> + + <_> + + + + <_>4 5 16 3 -1. + <_>4 5 8 3 2. + 0 + 0.0805200636386871 + 0.0170159190893173 + -0.3344888091087341 + <_> + + <_> + + + + <_>5 3 4 14 -1. + <_>5 10 4 7 2. + 0 + 0.0703238472342491 + 0.0171224400401115 + -0.3330214023590088 + <_> + + <_> + + + + <_>4 13 15 5 -1. + <_>9 13 5 5 3. + 0 + -0.0528509393334389 + 0.0624214000999928 + -0.0146901998668909 + <_> + + <_> + + + + <_>0 3 14 2 -1. + <_>0 4 14 1 2. + 0 + -7.1594159817323089e-004 + -0.1133515015244484 + 0.0522607900202274 + <_> + + <_> + + + + <_>4 13 15 5 -1. + <_>9 13 5 5 3. + 0 + 0.2146997004747391 + 9.9299731664359570e-004 + -0.9999758005142212 + <_> + + <_> + + + + <_>1 13 15 5 -1. + <_>6 13 5 5 3. + 0 + 0.0870425924658775 + -0.0123297600075603 + 0.5026066899299622 + <_> + + <_> + + + + <_>12 0 8 6 -1. + <_>12 2 8 2 3. + 0 + -5.8731262106448412e-004 + -0.0993464663624763 + 0.0517056100070477 + <_> + + <_> + + + + <_>3 10 6 5 -1. + <_>6 10 3 5 2. + 0 + -0.0442152209579945 + -0.3936890065670013 + 0.0139208501204848 + <_> + + <_> + + + + <_>4 7 14 8 -1. + <_>11 7 7 4 2. + <_>4 11 7 4 2. + 0 + -0.0876762270927429 + 0.3015744090080261 + -6.8702381104230881e-003 + <_> + + <_> + + + + <_>2 7 14 8 -1. + <_>2 7 7 4 2. + <_>9 11 7 4 2. + 0 + -0.0484539903700352 + 0.2547787129878998 + -0.0224577505141497 + <_> + + <_> + + + + <_>11 0 2 20 -1. + <_>11 0 1 20 2. + 0 + -2.1567570511251688e-003 + -0.1356289982795715 + 0.0317253991961479 + <_> + + <_> + + + + <_>7 0 2 20 -1. + <_>8 0 1 20 2. + 0 + 3.9050900377333164e-003 + 0.0491008907556534 + -0.1186105981469154 + <_> + + <_> + + + + <_>10 5 6 8 -1. + <_>12 5 2 8 3. + 0 + -3.9808028377592564e-003 + 0.0483339093625546 + -0.0558970794081688 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + 2.9744929634034634e-003 + -0.0648024529218674 + 0.0935835018754005 + <_> + + <_> + + + + <_>3 2 14 4 -1. + <_>10 2 7 2 2. + <_>3 4 7 2 2. + 0 + 0.0258752293884754 + 0.0184876099228859 + -0.3343634903430939 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + -1.9373580580577254e-003 + 0.2200064957141876 + -0.0254049804061651 + <_> + + <_> + + + + <_>8 4 9 16 -1. + <_>11 4 3 16 3. + 0 + -0.0201716292649508 + -0.0782283097505569 + 0.0454627908766270 + <_> + + <_> + + + + <_>4 5 6 8 -1. + <_>6 5 2 8 3. + 0 + -0.0260881409049034 + 0.1763706952333450 + -0.0450972989201546 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>10 10 3 5 2. + <_>7 15 3 5 2. + 0 + -0.0268683005124331 + -0.3265641927719116 + 0.0179942306131125 + <_> + + <_> + + + + <_>5 11 5 6 -1. + <_>5 14 5 3 2. + 0 + -7.0211151614785194e-004 + 0.0396719984710217 + -0.1453354060649872 + <_> + + <_> + + + + <_>4 8 13 8 -1. + <_>4 12 13 4 2. + 0 + 8.3507681265473366e-003 + -0.0230517294257879 + 0.1885076016187668 + <_> + + <_> + + + + <_>0 9 10 6 -1. + <_>0 9 5 3 2. + <_>5 12 5 3 2. + 0 + 4.6823569573462009e-003 + 0.0299965608865023 + -0.2070102989673615 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 3.3109660726040602e-003 + 0.0565367303788662 + -0.1683558970689774 + <_> + + <_> + + + + <_>4 0 5 8 -1. + <_>4 4 5 4 2. + 0 + 7.6425541192293167e-003 + -0.0414239503443241 + 0.1255751997232437 + <_> + + <_> + + + + <_>8 1 4 10 -1. + <_>8 6 4 5 2. + 0 + -2.4713110178709030e-003 + 0.0721561536192894 + -0.1076773032546043 + <_> + + <_> + + + + <_>6 3 7 10 -1. + <_>6 8 7 5 2. + 0 + -9.9495360627770424e-003 + -0.1818761974573135 + 0.0335672311484814 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + 1.9820800516754389e-003 + -0.0564887188374996 + 0.1074149012565613 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + 0.0232544392347336 + -0.0165433492511511 + 0.3646667897701263 + <_> + + <_> + + + + <_>12 11 7 4 -1. + <_>12 13 7 2 2. + 0 + -0.0541779212653637 + -1. + 3.3418419770896435e-003 + <_> + + <_> + + + + <_>1 11 7 4 -1. + <_>1 13 7 2 2. + 0 + 6.1567849479615688e-004 + 0.0401593297719955 + -0.1646022051572800 + <_> + + <_> + + + + <_>9 12 9 4 -1. + <_>9 14 9 2 2. + 0 + -4.2699510231614113e-003 + -0.0569786205887794 + 0.0444809012115002 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 12 8 2 2. + <_>10 14 8 2 2. + 0 + 1.9749389030039310e-003 + 0.0592836812138557 + -0.1079126000404358 + <_> + + <_> + + + + <_>10 14 10 6 -1. + <_>15 14 5 3 2. + <_>10 17 5 3 2. + 0 + -5.8583128266036510e-003 + 0.1373405009508133 + -0.0342315211892128 + <_> + + <_> + + + + <_>4 1 8 8 -1. + <_>4 1 4 4 2. + <_>8 5 4 4 2. + 0 + -7.2995189111679792e-004 + -0.1007506027817726 + 0.0547331608831882 + <_> + + <_> + + + + <_>2 12 18 7 -1. + <_>8 12 6 7 3. + 0 + -0.0299307405948639 + 0.0638825595378876 + -0.0410270206630230 + <_> + + <_> + + + + <_>3 13 12 6 -1. + <_>3 13 6 3 2. + <_>9 16 6 3 2. + 0 + -0.0517387501895428 + -0.7271345853805542 + 7.4993381276726723e-003 + <_> + + <_> + + + + <_>4 12 13 4 -1. + <_>4 14 13 2 2. + 0 + 0.0240211896598339 + 7.8491801396012306e-003 + -0.5579447150230408 + <_> + + <_> + + + + <_>6 0 2 15 -1. + <_>7 0 1 15 2. + 0 + -3.7574321031570435e-003 + -0.1608687937259674 + 0.0310159903019667 + <_> + + <_> + + + + <_>4 2 16 18 -1. + <_>12 2 8 9 2. + <_>4 11 8 9 2. + 0 + -0.0626356825232506 + 0.0905778631567955 + -0.0290337707847357 + <_> + + <_> + + + + <_>1 16 18 4 -1. + <_>7 16 6 4 3. + 0 + 0.0193634293973446 + -0.0499205887317657 + 0.1283577978610992 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0350728891789913 + 0.2139184027910233 + -8.8168960064649582e-003 + <_> + + <_> + + + + <_>4 0 12 9 -1. + <_>8 0 4 9 3. + 0 + -0.0132433101534843 + 0.2334969937801361 + -0.0230880193412304 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -0.0312908291816711 + -0.6949509978294373 + 9.3020889908075333e-003 + <_> + + <_> + + + + <_>4 9 6 6 -1. + <_>7 9 3 6 2. + 0 + 7.2391419671475887e-003 + 0.0284858494997025 + -0.1831077039241791 + <_> + + <_> + + + + <_>7 12 12 8 -1. + <_>13 12 6 4 2. + <_>7 16 6 4 2. + 0 + 6.6785318776965141e-003 + -0.0491329506039619 + 0.0541816912591457 + <_> + + <_> + + + + <_>1 12 12 8 -1. + <_>1 12 6 4 2. + <_>7 16 6 4 2. + 0 + -0.0368255712091923 + 0.3312020897865295 + -0.0213599298149347 + <_> + + <_> + + + + <_>0 10 20 9 -1. + <_>0 13 20 3 3. + 0 + -0.0455073416233063 + -0.1289349049329758 + 0.0495459884405136 + <_> + + <_> + + + + <_>4 5 10 6 -1. + <_>4 5 5 3 2. + <_>9 8 5 3 2. + 0 + 7.7639957889914513e-003 + -0.0362556204199791 + 0.1532140970230103 + <_> + + <_> + + + + <_>13 3 7 6 -1. + <_>13 5 7 2 3. + 0 + 0.0604176111519337 + 4.5740022324025631e-003 + -0.6754109263420105 + <_> + + <_> + + + + <_>8 1 4 14 -1. + <_>8 1 2 7 2. + <_>10 8 2 7 2. + 0 + 2.4624960497021675e-003 + 0.0536741614341736 + -0.1132654026150703 + <_> + + <_> + + + + <_>12 8 5 6 -1. + <_>12 11 5 3 2. + 0 + 7.3594506829977036e-005 + -0.0356489308178425 + 0.0254589691758156 + <_> + + <_> + + + + <_>3 8 5 6 -1. + <_>3 11 5 3 2. + 0 + -4.0958370082080364e-003 + 0.1556290984153748 + -0.0393906012177467 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 2.8689370083156973e-005 + -0.0848233029246330 + 0.0382542386651039 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -4.6220528893172741e-003 + -0.1899452954530716 + 0.0335087589919567 + <_> + + <_> + + + + <_>2 0 18 4 -1. + <_>8 0 6 4 3. + 0 + -8.5343196988105774e-003 + 0.1121253967285156 + -0.0339684896171093 + <_> + + <_> + + + + <_>6 5 3 14 -1. + <_>6 12 3 7 2. + 0 + -0.0588038489222527 + -0.5124431252479553 + 0.0107895499095321 + <_> + + <_> + + + + <_>5 17 15 3 -1. + <_>10 17 5 3 3. + 0 + 0.0607199296355248 + -0.0125550301745534 + 0.2250975966453552 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>6 0 3 7 2. + 0 + 1.1038020020350814e-003 + -0.0962944924831390 + 0.0567274801433086 + <_> + + <_> + + + + <_>8 3 12 17 -1. + <_>8 3 6 17 2. + 0 + -3.8484560791403055e-003 + 0.0405734591186047 + -0.0253268592059612 + <_> + + <_> + + + + <_>0 2 16 12 -1. + <_>8 2 8 12 2. + 0 + -0.0107710501179099 + 0.0887356325984001 + -0.0556286796927452 + <_> + + <_> + + + + <_>7 6 6 12 -1. + <_>7 12 6 6 2. + 0 + 0.0120168095454574 + 0.0235662795603275 + -0.2459058016538620 + <_> + + <_> + + + + <_>8 8 4 8 -1. + <_>8 12 4 4 2. + 0 + -1.1656560236588120e-003 + -0.0374173000454903 + 0.1650328934192658 + <_> + + <_> + + + + <_>8 7 12 10 -1. + <_>14 7 6 5 2. + <_>8 12 6 5 2. + 0 + 0.0321376286447048 + 0.0142459701746702 + -0.2648085057735443 + <_> + + <_> + + + + <_>4 1 12 5 -1. + <_>10 1 6 5 2. + 0 + 0.0233316700905561 + -0.0352887213230133 + 0.1844782978296280 + <_> + + <_> + + + + <_>7 2 8 8 -1. + <_>11 2 4 4 2. + <_>7 6 4 4 2. + 0 + -0.0126853203400970 + -0.1175730973482132 + 0.0164369102567434 + <_> + + <_> + + + + <_>5 2 8 8 -1. + <_>5 2 4 4 2. + <_>9 6 4 4 2. + 0 + 7.3903938755393028e-005 + -0.1027147993445396 + 0.0743014365434647 + <_> + + <_> + + + + <_>3 14 14 6 -1. + <_>3 17 14 3 2. + 0 + -0.1092547029256821 + -0.8316531777381897 + 5.6438110768795013e-003 + <_> + + <_> + + + + <_>3 3 5 12 -1. + <_>3 7 5 4 3. + 0 + -0.1332435011863709 + 0.7772982120513916 + -8.3403270691633224e-003 + <_> + + <_> + + + + <_>15 4 5 6 -1. + <_>15 7 5 3 2. + 0 + 8.9381448924541473e-004 + -0.0595243014395237 + 0.0411730892956257 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 0.0103186499327421 + 0.0159264300018549 + -0.3163779079914093 + <_> + + <_> + + + + <_>15 4 5 9 -1. + <_>15 7 5 3 3. + 0 + -5.2297548390924931e-003 + -0.0711665600538254 + 0.0334892906248569 + <_> + + <_> + + + + <_>8 6 4 14 -1. + <_>8 6 2 7 2. + <_>10 13 2 7 2. + 0 + 0.0164096206426620 + -0.0264541208744049 + 0.1958996951580048 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0140687096863985 + -0.0393641404807568 + 0.1397742033004761 + <_> + + <_> + + + + <_>5 0 8 10 -1. + <_>5 0 4 5 2. + <_>9 5 4 5 2. + 0 + 6.6486410796642303e-003 + 0.0640708282589912 + -0.1049339994788170 + <_> + + <_> + + + + <_>9 12 6 7 -1. + <_>11 12 2 7 3. + 0 + -0.0180306192487478 + 0.0839429125189781 + -0.0133991595357656 + <_> + + <_> + + + + <_>5 12 6 7 -1. + <_>7 12 2 7 3. + 0 + -0.0440343692898750 + -0.5582545995712280 + 9.7633162513375282e-003 + <_> + + <_> + + + + <_>13 9 7 6 -1. + <_>13 11 7 2 3. + 0 + -8.0966893583536148e-003 + -0.2048978954553604 + 0.0265202000737190 + <_> + + <_> + + + + <_>1 1 16 6 -1. + <_>1 3 16 2 3. + 0 + 5.0180461257696152e-003 + -0.1166120991110802 + 0.0457916706800461 + <_> + + <_> + + + + <_>2 1 17 6 -1. + <_>2 3 17 2 3. + 0 + -0.0170646291226149 + 0.2628273069858551 + -0.0203906390815973 + <_> + + <_> + + + + <_>4 4 2 16 -1. + <_>4 12 2 8 2. + 0 + 0.0718501731753349 + -6.9503681734204292e-003 + 0.6703253984451294 + <_> + + <_> + + + + <_>7 6 10 14 -1. + <_>12 6 5 7 2. + <_>7 13 5 7 2. + 0 + -0.0569143705070019 + -0.1347790062427521 + 0.0183990802615881 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -3.2365729566663504e-003 + 0.0696738511323929 + -0.0723145306110382 + <_> + + <_> + + + + <_>4 9 12 6 -1. + <_>10 9 6 3 2. + <_>4 12 6 3 2. + 0 + 0.0418189093470573 + 0.0111514599993825 + -0.5168011188507080 + <_> + + <_> + + + + <_>1 8 18 3 -1. + <_>7 8 6 3 3. + 0 + -6.1106588691473007e-003 + -0.1316394060850143 + 0.0437965095043182 + <_> + + <_> + + + + <_>2 13 18 7 -1. + <_>8 13 6 7 3. + 0 + -0.0355609096586704 + 0.0680055022239685 + -0.0363310202956200 + <_> + + <_> + + + + <_>1 8 15 3 -1. + <_>6 8 5 3 3. + 0 + 0.0687891691923141 + 0.0146989598870277 + -0.3821229934692383 + <_> + + <_> + + + + <_>6 0 12 7 -1. + <_>10 0 4 7 3. + 0 + -0.0783133730292320 + 0.2029606997966766 + -8.6810020729899406e-003 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 3.9626220241189003e-003 + -0.0357978902757168 + 0.1390551030635834 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + -0.0338740386068821 + -0.2225342988967896 + 7.5455638580024242e-003 + <_> + + <_> + + + + <_>6 7 6 8 -1. + <_>6 11 6 4 2. + 0 + -0.0647558569908142 + 0.4752154946327210 + -0.0109706800431013 + <_> + + <_> + + + + <_>9 2 4 12 -1. + <_>9 6 4 4 3. + 0 + 0.0266479402780533 + 0.0154453096911311 + -0.2678577899932861 + <_> + + <_> + + + + <_>0 9 7 6 -1. + <_>0 11 7 2 3. + 0 + -0.0307311099022627 + -0.4766868948936462 + 9.6429884433746338e-003 + <_> + + <_> + + + + <_>15 4 5 9 -1. + <_>15 7 5 3 3. + 0 + -0.0240227002650499 + -0.1063396036624908 + 0.0128490403294563 + <_> + + <_> + + + + <_>2 18 13 2 -1. + <_>2 19 13 1 2. + 0 + -1.3036349555477500e-003 + 0.0735241770744324 + -0.0680749192833900 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -9.8344050347805023e-003 + -0.1184355020523071 + 0.0428666993975639 + <_> + + <_> + + + + <_>6 6 8 12 -1. + <_>6 10 8 4 3. + 0 + 0.0871021971106529 + -0.0400882586836815 + 0.1780454069375992 + <_> + + <_> + + + + <_>7 9 6 9 -1. + <_>7 12 6 3 3. + 0 + 0.0204115696251392 + 0.0168499890714884 + -0.3895365893840790 + <_> + + <_> + + + + <_>0 7 11 4 -1. + <_>0 9 11 2 2. + 0 + 0.0958752632141113 + 5.9905550442636013e-003 + -0.8152565956115723 + <_> + + <_> + + + + <_>8 12 10 6 -1. + <_>13 12 5 3 2. + <_>8 15 5 3 2. + 0 + 6.4893220551311970e-003 + -0.0240392293781042 + 0.0538711696863174 + <_> + + <_> + + + + <_>2 12 10 6 -1. + <_>2 12 5 3 2. + <_>7 15 5 3 2. + 0 + -9.6279237186536193e-004 + 0.0942991897463799 + -0.0644360184669495 + <_> + + <_> + + + + <_>12 14 8 6 -1. + <_>12 16 8 2 3. + 0 + -3.7659960798919201e-004 + -0.0622968785464764 + 0.0412518493831158 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + 6.5272641368210316e-003 + 0.0513251312077045 + -0.1303779035806656 + <_> + + <_> + + + + <_>18 2 2 13 -1. + <_>18 2 1 13 2. + 0 + 0.0214291103184223 + -0.0119896596297622 + 0.2628045976161957 + <_> + + <_> + + + + <_>4 5 8 8 -1. + <_>4 5 4 4 2. + <_>8 9 4 4 2. + 0 + -5.0938720814883709e-003 + 0.0634189471602440 + -0.0905663371086121 + <_> + + <_> + + + + <_>18 2 2 13 -1. + <_>18 2 1 13 2. + 0 + -2.5309680495411158e-003 + 0.0602977611124516 + -0.0250494703650475 + <_> + + <_> + + + + <_>7 6 4 8 -1. + <_>7 10 4 4 2. + 0 + -1.5915350522845984e-003 + -0.1217119023203850 + 0.0377379916608334 + <_> + + <_> + + + + <_>9 8 11 4 -1. + <_>9 10 11 2 2. + 0 + -0.0340307094156742 + 0.4641343057155609 + -3.5409750416874886e-003 + <_> + + <_> + + + + <_>6 6 5 10 -1. + <_>6 11 5 5 2. + 0 + 5.1074200309813023e-003 + 0.0398238301277161 + -0.1264553964138031 + <_> + + <_> + + + + <_>4 7 14 6 -1. + <_>4 9 14 2 3. + 0 + -9.6449116244912148e-003 + 0.3346425890922546 + -6.6040740348398685e-003 + <_> + + <_> + + + + <_>4 4 12 8 -1. + <_>4 4 6 4 2. + <_>10 8 6 4 2. + 0 + 0.0114228604361415 + -0.0360804200172424 + 0.1371455043554306 + <_> + + <_> + + + + <_>5 5 12 5 -1. + <_>5 5 6 5 2. + 0 + -5.1042139530181885e-003 + -0.0939868092536926 + 0.0288447793573141 + <_> + + <_> + + + + <_>1 3 15 12 -1. + <_>6 3 5 12 3. + 0 + -0.2633227109909058 + 0.4998092949390411 + -0.0101732499897480 + <_> + + <_> + + + + <_>13 3 6 17 -1. + <_>13 3 3 17 2. + 0 + -0.2455663979053497 + -0.8177834749221802 + 6.9596339017152786e-003 + <_> + + <_> + + + + <_>1 3 6 17 -1. + <_>4 3 3 17 2. + 0 + -0.2141932994127274 + -0.5104051828384399 + 9.4540230929851532e-003 + <_> + + <_> + + + + <_>14 1 6 9 -1. + <_>14 4 6 3 3. + 0 + -0.0143632199615240 + -0.0910009816288948 + 0.0246466696262360 + <_> + + <_> + + + + <_>4 0 8 6 -1. + <_>4 3 8 3 2. + 0 + -1.2388969771564007e-003 + 0.1154457032680512 + -0.0495656207203865 + <_> + + <_> + + + + <_>5 4 15 3 -1. + <_>5 5 15 1 3. + 0 + 0.0210151206701994 + -0.0177658796310425 + 0.1957785934209824 + <_> + + <_> + + + + <_>0 5 8 4 -1. + <_>0 7 8 2 2. + 0 + -4.1783051565289497e-003 + -0.1117286011576653 + 0.0446254499256611 + <_> + + <_> + + + + <_>18 2 2 13 -1. + <_>18 2 1 13 2. + 0 + 2.0896939095109701e-003 + -0.0339887291193008 + 0.0655395016074181 + <_> + + <_> + + + + <_>0 2 2 13 -1. + <_>1 2 1 13 2. + 0 + 0.0164100602269173 + -0.0203732699155808 + 0.2533153891563416 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + -0.0642668828368187 + -0.6588014960289002 + 3.4550630953162909e-003 + <_> + + <_> + + + + <_>0 7 2 13 -1. + <_>1 7 1 13 2. + 0 + 6.8898178869858384e-004 + 0.0676432475447655 + -0.0875562429428101 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 5.6662331335246563e-003 + 0.0306383091956377 + -0.1189554035663605 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + -0.0437781214714050 + -0.2830913066864014 + 0.0177136305719614 + <_> + + <_> + + + + <_>4 8 13 2 -1. + <_>4 9 13 1 2. + 0 + 3.4748481120914221e-003 + -0.0957871228456497 + 0.0426304005086422 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 14 16 2 2. + 0 + -0.0116739403456450 + -0.1050257012248039 + 0.0509038902819157 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + -3.4004659391939640e-003 + 0.1047071963548660 + -0.0409391410648823 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 2.7091780211776495e-003 + -0.0605246014893055 + 0.1397895067930222 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + -0.0174393001943827 + -0.3239116966724396 + 0.0146302497014403 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -0.0125983301550150 + -0.2068262994289398 + 0.0255018696188927 + <_> + + <_> + + + + <_>6 2 8 6 -1. + <_>6 4 8 2 3. + 0 + 0.0187558699399233 + -0.0479259602725506 + 0.1086438000202179 + <_> + + <_> + + + + <_>6 5 7 4 -1. + <_>6 7 7 2 2. + 0 + -4.2074159719049931e-003 + -0.0820778086781502 + 0.0636477693915367 + <_> + + <_> + + + + <_>9 5 10 9 -1. + <_>9 8 10 3 3. + 0 + -1.6427719674538821e-004 + 0.1012039035558701 + -0.0340679287910461 + <_> + + <_> + + + + <_>0 10 18 4 -1. + <_>0 10 9 2 2. + <_>9 12 9 2 2. + 0 + 0.0438476912677288 + 6.0980222187936306e-003 + -0.8368598222732544 + <_> + + <_> + + + + <_>8 7 6 9 -1. + <_>10 7 2 9 3. + 0 + -0.0392846800386906 + 0.2825056016445160 + -0.0223892591893673 + <_> + + <_> + + + + <_>6 4 4 7 -1. + <_>8 4 2 7 2. + 0 + 0.0385509096086025 + 0.0155704896897078 + -0.3397862017154694 + <_> + + <_> + + + + <_>9 6 9 10 -1. + <_>12 6 3 10 3. + 0 + -0.0691770315170288 + 0.1225832030177116 + -0.0178501792252064 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -1.9251030171290040e-003 + -0.1068774983286858 + 0.0463795103132725 + <_> + + <_> + + + + <_>10 14 10 6 -1. + <_>15 14 5 3 2. + <_>10 17 5 3 2. + 0 + -8.6635202169418335e-003 + 0.0964127480983734 + -0.0175632499158382 + <_> + + <_> + + + + <_>0 6 5 12 -1. + <_>0 10 5 4 3. + 0 + 0.1339350938796997 + 6.3692941330373287e-003 + -0.7017058730125427 + <_> + + <_> + + + + <_>9 6 9 10 -1. + <_>12 6 3 10 3. + 0 + 0.0410823486745358 + -0.0110775697976351 + 0.1346375048160553 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1491145044565201 + 9.5263421535491943e-003 + -0.5087255239486694 + <_> + + <_> + + + + <_>6 13 10 7 -1. + <_>6 13 5 7 2. + 0 + -5.2500818856060505e-003 + 0.0700255781412125 + -0.0428802706301212 + <_> + + <_> + + + + <_>0 2 6 17 -1. + <_>3 2 3 17 2. + 0 + 0.0228235702961683 + -0.0418840497732162 + 0.1177031993865967 + <_> + + <_> + + + + <_>10 14 9 5 -1. + <_>13 14 3 5 3. + 0 + -8.5306530818343163e-003 + 0.0612221397459507 + -0.0249445494264364 + <_> + + <_> + + + + <_>1 14 9 5 -1. + <_>4 14 3 5 3. + 0 + 0.0119717298075557 + 0.0396627709269524 + -0.1626774072647095 + <_> + + <_> + + + + <_>7 13 7 6 -1. + <_>7 15 7 2 3. + 0 + -0.0389382690191269 + 0.2574352025985718 + -0.0163562390953302 + <_> + + <_> + + + + <_>1 14 7 6 -1. + <_>1 16 7 2 3. + 0 + -0.0217063892632723 + -0.3199867904186249 + 0.0171352904289961 + <_> + + <_> + + + + <_>12 10 8 6 -1. + <_>12 12 8 2 3. + 0 + 6.6900630481541157e-003 + 0.0261018499732018 + -0.1098072975873947 + <_> + + <_> + + + + <_>2 6 9 9 -1. + <_>5 6 3 9 3. + 0 + -0.0722708329558373 + 0.1943113058805466 + -0.0260443594306707 + <_> + + <_> + + + + <_>12 10 7 6 -1. + <_>12 12 7 2 3. + 0 + -6.7073688842356205e-003 + -0.1774785071611404 + 0.0458629988133907 + <_> + + <_> + + + + <_>3 2 4 12 -1. + <_>5 2 2 12 2. + 0 + 0.0550193600356579 + -8.3471573889255524e-003 + 0.6051154136657715 + <_> + + <_> + + + + <_>9 1 7 15 -1. + <_>9 6 7 5 3. + 0 + 0.1314264982938767 + -5.7535418309271336e-003 + 0.2916753888130188 + <_> + + <_> + + + + <_>6 10 4 7 -1. + <_>8 10 2 7 2. + 0 + -1.6564460238441825e-003 + 0.0700030326843262 + -0.0626908764243126 + <_> + + <_> + + + + <_>5 0 10 20 -1. + <_>10 0 5 10 2. + <_>5 10 5 10 2. + 0 + 0.1544540971517563 + 6.1896732077002525e-003 + -0.7432330250740051 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>9 10 2 10 3. + 0 + -5.0357519648969173e-003 + -0.1133328974246979 + 0.0387417711317539 + <_> + + <_> + + + + <_>12 7 7 4 -1. + <_>12 9 7 2 2. + 0 + 2.2772569209337234e-003 + -0.1134053021669388 + 0.0213194005191326 + <_> + + <_> + + + + <_>2 7 16 4 -1. + <_>2 7 8 2 2. + <_>10 9 8 2 2. + 0 + 3.3173530828207731e-003 + 0.0442733317613602 + -0.1045982986688614 + <_> + + <_> + + + + <_>5 10 12 10 -1. + <_>5 10 6 10 2. + 0 + -0.0296928007155657 + 0.0924837663769722 + -0.0233426094055176 + <_> + + <_> + + + + <_>6 1 2 16 -1. + <_>6 9 2 8 2. + 0 + 0.0629378408193588 + -0.0129982801154256 + 0.3888793885707855 + <_> + + <_> + + + + <_>6 2 12 10 -1. + <_>6 7 12 5 2. + 0 + 3.6641359329223633e-003 + 0.0320998206734657 + -0.0396479889750481 + <_> + + <_> + + + + <_>2 4 14 6 -1. + <_>2 4 7 3 2. + <_>9 7 7 3 2. + 0 + 4.4782999902963638e-003 + -0.0457013286650181 + 0.1069701015949249 + <_> + + <_> + + + + <_>5 0 11 12 -1. + <_>5 4 11 4 3. + 0 + 1.8147319788113236e-003 + -0.0328718200325966 + 0.1064793989062309 + <_> + + <_> + + + + <_>7 1 6 12 -1. + <_>7 5 6 4 3. + 0 + 4.8941639252007008e-003 + 0.0279110092669725 + -0.2172559052705765 + <_> + + <_> + + + + <_>9 8 11 4 -1. + <_>9 10 11 2 2. + 0 + -4.4425828382372856e-003 + -0.1347015053033829 + 0.0107814101502299 + <_> + + <_> + + + + <_>0 8 11 4 -1. + <_>0 10 11 2 2. + 0 + -0.0254934001713991 + 0.6837146878242493 + -7.7452720142900944e-003 + <_> + + <_> + + + + <_>1 8 19 6 -1. + <_>1 11 19 3 2. + 0 + 0.0278354492038488 + 0.0241442993283272 + -0.1517059952020645 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>7 4 6 4 2. + 0 + 7.5548859313130379e-003 + -0.0476434007287025 + 0.1192577034235001 + <_> + + <_> + + + + <_>5 3 15 2 -1. + <_>5 4 15 1 2. + 0 + 0.0103296097368002 + 0.0186468102037907 + -0.1612257063388825 + <_> + + <_> + + + + <_>2 7 14 6 -1. + <_>2 9 14 2 3. + 0 + -0.0123933898285031 + 0.6030492186546326 + -7.7566630207002163e-003 + <_> + + <_> + + + + <_>3 0 17 6 -1. + <_>3 2 17 2 3. + 0 + 0.0138337695971131 + -0.0276172999292612 + 0.0512668788433075 + <_> + + <_> + + + + <_>0 0 17 6 -1. + <_>0 2 17 2 3. + 0 + -0.0256693195551634 + 0.2380135953426361 + -0.0239719096571207 + <_> + + <_> + + + + <_>13 2 7 4 -1. + <_>13 4 7 2 2. + 0 + -5.2043660543859005e-003 + -0.1072179004549980 + 0.0266450494527817 + <_> + + <_> + + + + <_>0 2 7 4 -1. + <_>0 4 7 2 2. + 0 + 3.4628969151526690e-003 + 0.0543134100735188 + -0.1345832049846649 + <_> + + <_> + + + + <_>8 1 12 10 -1. + <_>14 1 6 5 2. + <_>8 6 6 5 2. + 0 + -0.0192206799983978 + 0.0729963928461075 + -0.0406521111726761 + <_> + + <_> + + + + <_>2 1 4 8 -1. + <_>2 5 4 4 2. + 0 + -2.5009829550981522e-003 + -0.0776712968945503 + 0.0590965412557125 + <_> + + <_> + + + + <_>5 1 11 10 -1. + <_>5 6 11 5 2. + 0 + -8.5285156965255737e-003 + 0.0490508116781712 + -0.0640783533453941 + <_> + + <_> + + + + <_>3 9 10 6 -1. + <_>3 9 5 3 2. + <_>8 12 5 3 2. + 0 + 4.3327538296580315e-003 + 0.0252210106700659 + -0.1935898065567017 + <_> + + <_> + + + + <_>12 7 7 4 -1. + <_>12 9 7 2 2. + 0 + 0.0365959703922272 + -0.0162625908851624 + 0.1565123945474625 + <_> + + <_> + + + + <_>2 7 12 8 -1. + <_>6 7 4 8 3. + 0 + -1.1795730097219348e-003 + -0.0724680721759796 + 0.0704494863748550 + <_> + + <_> + + + + <_>10 10 8 4 -1. + <_>10 10 4 4 2. + 0 + -0.0139758298173547 + -0.1178947016596794 + 0.0212920494377613 + <_> + + <_> + + + + <_>2 10 8 4 -1. + <_>6 10 4 4 2. + 0 + -1.3828700175508857e-003 + 0.0792835429310799 + -0.0951041206717491 + <_> + + <_> + + + + <_>3 10 16 3 -1. + <_>3 10 8 3 2. + 0 + -2.9435830656439066e-003 + 0.0703684315085411 + -0.0332179106771946 + <_> + + <_> + + + + <_>1 11 6 5 -1. + <_>4 11 3 5 2. + 0 + 9.5262555405497551e-003 + -0.0297336205840111 + 0.1667045950889587 + <_> + + <_> + + + + <_>10 7 9 9 -1. + <_>13 7 3 9 3. + 0 + -0.0901142731308937 + -0.1662537008523941 + 8.6199166253209114e-003 + <_> + + <_> + + + + <_>1 7 9 9 -1. + <_>4 7 3 9 3. + 0 + -1.2089919764548540e-003 + 0.0810838565230370 + -0.0730291232466698 + <_> + + <_> + + + + <_>5 5 12 5 -1. + <_>5 5 6 5 2. + 0 + -0.1419996023178101 + -1. + 2.2284830920398235e-003 + <_> + + <_> + + + + <_>3 5 12 5 -1. + <_>9 5 6 5 2. + 0 + 8.0690719187259674e-003 + 0.0474122203886509 + -0.1017893031239510 + <_> + + <_> + + + + <_>2 3 16 2 -1. + <_>2 3 8 2 2. + 0 + -4.7410889528691769e-003 + 0.1205111965537071 + -0.0499574802815914 + <_> + + <_> + + + + <_>2 8 7 6 -1. + <_>2 10 7 2 3. + 0 + -1.6977200284600258e-003 + -0.2417144030332565 + 0.0195343699306250 + <_> + + <_> + + + + <_>7 8 9 6 -1. + <_>7 10 9 2 3. + 0 + -2.8892089612782001e-003 + 0.2572799026966095 + -0.0116250598803163 + <_> + + <_> + + + + <_>3 0 3 15 -1. + <_>4 0 1 15 3. + 0 + -1.5177440363913774e-003 + -0.0987841933965683 + 0.0467061288654804 + <_> + + <_> + + + + <_>3 10 16 3 -1. + <_>3 10 8 3 2. + 0 + 0.1419731974601746 + -2.5096370372921228e-003 + 0.7545061111450195 + <_> + + <_> + + + + <_>1 10 16 3 -1. + <_>9 10 8 3 2. + 0 + 0.0975179374217987 + -6.9059049710631371e-003 + 0.6518443226814270 + <_> + + <_> + + + + <_>12 0 8 19 -1. + <_>12 0 4 19 2. + 0 + 0.0135673796758056 + -0.0763251930475235 + 0.0880545824766159 + <_> + + <_> + + + + <_>0 0 8 19 -1. + <_>4 0 4 19 2. + 0 + 0.0809814631938934 + 0.0155581096187234 + -0.3460162878036499 + <_> + + <_> + + + + <_>6 14 14 3 -1. + <_>6 15 14 1 3. + 0 + -4.7192731872200966e-003 + 0.0816200226545334 + -0.0460722893476486 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.0368969999253750e-003 + -0.0448176302015781 + 0.1286139041185379 + <_> + + <_> + + + + <_>6 14 14 3 -1. + <_>6 15 14 1 3. + 0 + -1.7878509825095534e-003 + 0.0437313318252563 + -0.0449959486722946 + <_> + + <_> + + + + <_>0 12 16 4 -1. + <_>0 12 8 2 2. + <_>8 14 8 2 2. + 0 + -7.1685528382658958e-003 + -0.1359799951314926 + 0.0387969911098480 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0674608871340752 + -0.2926574051380158 + 3.5135280340909958e-003 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0155985001474619 + 0.2310566008090973 + -0.0224050693213940 + <_> + + <_> + + + + <_>3 3 14 14 -1. + <_>10 3 7 7 2. + <_>3 10 7 7 2. + 0 + -0.0210264790803194 + -0.1528383046388626 + 0.0315314494073391 + <_> + + <_> + + + + <_>3 6 6 12 -1. + <_>5 6 2 12 3. + 0 + -0.1055836006999016 + -0.6836603879928589 + 6.8997950293123722e-003 + <_> + + <_> + + + + <_>5 12 12 6 -1. + <_>9 12 4 6 3. + 0 + -3.6966579500585794e-003 + 0.0343151502311230 + -0.0489227995276451 + <_> + + <_> + + + + <_>1 8 14 6 -1. + <_>1 8 7 3 2. + <_>8 11 7 3 2. + 0 + -6.0826627304777503e-004 + -0.0526384301483631 + 0.0895469486713409 + <_> + + <_> + + + + <_>8 7 12 10 -1. + <_>14 7 6 5 2. + <_>8 12 6 5 2. + 0 + -0.0289365407079458 + 0.0418184809386730 + -0.0138181699439883 + <_> + + <_> + + + + <_>0 7 12 10 -1. + <_>0 7 6 5 2. + <_>6 12 6 5 2. + 0 + -5.8082528412342072e-003 + 0.0678747966885567 + -0.0855787992477417 + <_> + + <_> + + + + <_>9 2 6 18 -1. + <_>12 2 3 9 2. + <_>9 11 3 9 2. + 0 + -0.0460953786969185 + -0.1258478015661240 + 0.0204669702798128 + <_> + + <_> + + + + <_>1 10 8 10 -1. + <_>1 10 4 5 2. + <_>5 15 4 5 2. + 0 + 0.0529729202389717 + -0.0124532599002123 + 0.3456504940986633 + <_> + + <_> + + + + <_>4 14 12 4 -1. + <_>4 16 12 2 2. + 0 + 0.0493515990674496 + 0.0109012397006154 + -0.4850698113441467 + <_> + + <_> + + + + <_>5 13 6 7 -1. + <_>7 13 2 7 3. + 0 + 0.0443778000771999 + 9.9294837564229965e-003 + -0.4387789964675903 + <_> + + <_> + + + + <_>5 2 15 5 -1. + <_>10 2 5 5 3. + 0 + -0.1146489009261131 + 0.2687459886074066 + -9.2000560835003853e-003 + <_> + + <_> + + + + <_>5 4 9 14 -1. + <_>5 11 9 7 2. + 0 + 0.1688783019781113 + 5.7101310230791569e-003 + -0.8597288131713867 + <_> + + <_> + + + + <_>8 0 11 4 -1. + <_>8 2 11 2 2. + 0 + 0.0511980988085270 + -8.5723921656608582e-003 + 0.1339516937732697 + <_> + + <_> + + + + <_>0 14 16 6 -1. + <_>0 16 16 2 3. + 0 + -3.0789880547672510e-003 + -0.1033876016736031 + 0.0434594787657261 + <_> + + <_> + + + + <_>10 14 8 6 -1. + <_>10 16 8 2 3. + 0 + 0.0472231283783913 + 8.1934239715337753e-003 + -0.4380340874195099 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + -7.6270569115877151e-003 + 0.1871389001607895 + -0.0246602501720190 + <_> + + <_> + + + + <_>5 8 15 3 -1. + <_>5 9 15 1 3. + 0 + 5.4106907919049263e-003 + 0.0410998314619064 + -0.0788682326674461 + <_> + + <_> + + + + <_>0 8 19 3 -1. + <_>0 9 19 1 3. + 0 + -1.4900229871273041e-003 + -0.2011504024267197 + 0.0318981595337391 + <_> + + <_> + + + + <_>8 16 8 4 -1. + <_>8 16 4 4 2. + 0 + -0.0838316082954407 + 0.5801793932914734 + -5.2973427809774876e-003 + <_> + + <_> + + + + <_>4 16 8 4 -1. + <_>8 16 4 4 2. + 0 + 6.2233800999820232e-003 + -0.0397860594093800 + 0.1228395029902458 + <_> + + <_> + + + + <_>9 5 10 9 -1. + <_>9 8 10 3 3. + 0 + 0.1147508025169373 + -0.0119754197075963 + 0.2158671021461487 + <_> + + <_> + + + + <_>1 5 10 9 -1. + <_>1 8 10 3 3. + 0 + -1.5253260498866439e-003 + 0.1380452960729599 + -0.0399418808519840 + <_> + + <_> + + + + <_>4 7 14 2 -1. + <_>4 7 7 2 2. + 0 + -5.2878521382808685e-003 + -0.1279065012931824 + 0.0328935608267784 + <_> + + <_> + + + + <_>2 7 13 2 -1. + <_>2 8 13 1 2. + 0 + 8.9670647867023945e-004 + -0.1248105987906456 + 0.0445442497730255 + <_> + + <_> + + + + <_>6 5 8 4 -1. + <_>6 7 8 2 2. + 0 + 0.0384216606616974 + 7.7155791223049164e-003 + -0.6557546854019165 + <_> + + <_> + + + + <_>5 12 9 5 -1. + <_>8 12 3 5 3. + 0 + -9.3785318313166499e-004 + 0.0556085109710693 + -0.0898769125342369 + <_> + + <_> + + + + <_>3 6 14 3 -1. + <_>3 7 14 1 3. + 0 + 1.9965849351137877e-003 + -0.0252976100891829 + 0.1941318064928055 + <_> + + <_> + + + + <_>7 2 4 12 -1. + <_>7 6 4 4 3. + 0 + 4.5782068627886474e-004 + 0.0390891991555691 + -0.1290857046842575 + <_> + + <_> + + + + <_>2 4 16 4 -1. + <_>2 6 16 2 2. + 0 + 3.8373940624296665e-003 + -0.0287488698959351 + 0.1942975074052811 + <_> + + <_> + + + + <_>1 4 9 4 -1. + <_>1 6 9 2 2. + 0 + 3.7142829387448728e-004 + 0.0382723584771156 + -0.1375918984413147 + <_> + + <_> + + + + <_>9 4 11 4 -1. + <_>9 6 11 2 2. + 0 + 7.5116259977221489e-003 + -0.0144611299037933 + 0.1265694946050644 + <_> + + <_> + + + + <_>4 5 8 8 -1. + <_>4 5 4 4 2. + <_>8 9 4 4 2. + 0 + -0.0503628402948380 + 0.3518357872962952 + -0.0140518601983786 + <_> + + <_> + + + + <_>1 5 18 3 -1. + <_>7 5 6 3 3. + 0 + 0.0399216413497925 + 0.0272804293781519 + -0.1995819956064224 + <_> + + <_> + + + + <_>1 0 15 7 -1. + <_>6 0 5 7 3. + 0 + 0.2260525971651077 + -6.8001961335539818e-003 + 0.7300689816474915 + <_> + + <_> + + + + <_>12 0 5 15 -1. + <_>12 5 5 5 3. + 0 + 0.1108177974820137 + 4.3370737694203854e-003 + -0.8682916164398193 + <_> + + <_> + + + + <_>3 0 5 15 -1. + <_>3 5 5 5 3. + 0 + -9.7494889050722122e-003 + -0.0637406632304192 + 0.0845379978418350 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + -2.2887689992785454e-003 + 0.0996540188789368 + -0.0415654182434082 + <_> + + <_> + + + + <_>8 3 4 7 -1. + <_>10 3 2 7 2. + 0 + 2.0008319988846779e-003 + -0.0556506998836994 + 0.1070986986160278 + <_> + + <_> + + + + <_>4 6 12 11 -1. + <_>8 6 4 11 3. + 0 + -0.0151600502431393 + -0.1409876048564911 + 0.0387415997684002 + <_> + + <_> + + + + <_>1 7 18 4 -1. + <_>1 9 18 2 2. + 0 + -6.3132969662547112e-003 + -1. + 4.4605308212339878e-003 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + -0.0139700099825859 + 0.1248108968138695 + -0.0214258302003145 + <_> + + <_> + + + + <_>7 2 6 5 -1. + <_>10 2 3 5 2. + 0 + -0.0443212799727917 + -0.5334007143974304 + 0.0101652396842837 + <_> + + <_> + + + + <_>9 0 4 7 -1. + <_>9 0 2 7 2. + 0 + 1.4885979471728206e-003 + -0.0488686002790928 + 0.0360779017210007 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + 0.0651396811008453 + 7.6331058517098427e-003 + -0.5878164172172546 + <_> + + <_> + + + + <_>13 0 7 6 -1. + <_>13 2 7 2 3. + 0 + -0.0207414105534554 + -0.2965827882289887 + 0.0186228007078171 + -1.2940989732742310 + 38 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.0191887393593788 + -0.2115039974451065 + 0.1328652948141098 + <_> + + <_> + + + + <_>5 4 15 4 -1. + <_>5 6 15 2 2. + 0 + -8.1222038716077805e-003 + 0.0924910828471184 + -0.1758511960506439 + <_> + + <_> + + + + <_>5 5 6 5 -1. + <_>8 5 3 5 2. + 0 + 1.5851219650357962e-003 + -0.2856569886207581 + 0.0667105689644814 + <_> + + <_> + + + + <_>12 1 6 11 -1. + <_>14 1 2 11 3. + 0 + -4.3140850029885769e-003 + -0.1388522982597351 + 0.0526946894824505 + <_> + + <_> + + + + <_>0 11 20 3 -1. + <_>0 12 20 1 3. + 0 + -1.7131429631263018e-003 + 0.1313561052083969 + -0.1314910948276520 + <_> + + <_> + + + + <_>12 1 6 11 -1. + <_>14 1 2 11 3. + 0 + 0.0684473663568497 + 9.3052154406905174e-003 + -0.2506326138973236 + <_> + + <_> + + + + <_>2 1 6 11 -1. + <_>4 1 2 11 3. + 0 + -2.4445978924632072e-003 + -0.1720553040504456 + 0.0983228236436844 + <_> + + <_> + + + + <_>10 9 4 8 -1. + <_>10 13 4 4 2. + 0 + 1.0310600046068430e-003 + 0.0230391602963209 + -0.2752762138843536 + <_> + + <_> + + + + <_>0 7 7 6 -1. + <_>0 9 7 2 3. + 0 + 7.4603251414373517e-004 + -0.2327678054571152 + 0.0526930093765259 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -6.6399492789059877e-004 + 0.0689907819032669 + -0.0846877098083496 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -4.0997468749992549e-004 + 0.1050138026475906 + -0.1081900969147682 + <_> + + <_> + + + + <_>15 7 5 6 -1. + <_>15 10 5 3 2. + 0 + -1.8094549886882305e-003 + -0.1817883998155594 + 0.0441841408610344 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 9.3385757645592093e-004 + -0.1462268978357315 + 0.0727264434099197 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 14 4 4 2. + 0 + -3.8197741378098726e-004 + 0.0240099392831326 + -0.1729580014944077 + <_> + + <_> + + + + <_>0 7 5 6 -1. + <_>0 10 5 3 2. + 0 + -1.4950280310586095e-003 + -0.1940338015556335 + 0.0488079190254211 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + -0.0101591004058719 + 0.1917389929294586 + -0.0527490712702274 + <_> + + <_> + + + + <_>2 0 14 3 -1. + <_>2 1 14 1 3. + 0 + 5.9903519286308438e-005 + -0.1079154983162880 + 0.0909881666302681 + <_> + + <_> + + + + <_>4 4 13 2 -1. + <_>4 5 13 1 2. + 0 + -0.0319675505161285 + 0.4110988974571228 + -0.0226506404578686 + <_> + + <_> + + + + <_>0 18 20 2 -1. + <_>0 19 20 1 2. + 0 + 0.0143432701006532 + 0.0243155397474766 + -0.4268015027046204 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + 0.0110395299270749 + -0.0627170130610466 + 0.1133053004741669 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -8.4228850901126862e-003 + -0.2136930972337723 + 0.0420592017471790 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + -0.0205498393625021 + 0.1516163051128388 + -0.0245941393077374 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -6.5411031246185303e-003 + 0.1488362997770309 + -0.0611793398857117 + <_> + + <_> + + + + <_>6 0 8 14 -1. + <_>10 0 4 7 2. + <_>6 7 4 7 2. + 0 + -0.0133244004100561 + -0.2079197019338608 + 0.0483333095908165 + <_> + + <_> + + + + <_>0 2 6 12 -1. + <_>2 2 2 12 3. + 0 + 0.0701112672686577 + -0.0268632192164660 + 0.3632225990295410 + <_> + + <_> + + + + <_>6 12 9 6 -1. + <_>9 12 3 6 3. + 0 + -2.6973750209435821e-004 + 0.0608766600489616 + -0.1127237007021904 + <_> + + <_> + + + + <_>2 0 7 4 -1. + <_>2 2 7 2 2. + 0 + -1.3509000418707728e-003 + -0.1855207979679108 + 0.0521549582481384 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + -0.0280831903219223 + 0.3511188030242920 + -0.0235963296145201 + <_> + + <_> + + + + <_>5 0 6 10 -1. + <_>5 0 3 5 2. + <_>8 5 3 5 2. + 0 + -0.0100032901391387 + -0.2905848026275635 + 0.0321256890892982 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + -1.6111029544845223e-003 + 0.0981136709451675 + -0.0522037111222744 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + -0.0184119008481503 + -0.1808266937732697 + 0.0545367002487183 + <_> + + <_> + + + + <_>18 6 2 13 -1. + <_>18 6 1 13 2. + 0 + -0.0717388167977333 + -0.7665498852729797 + 3.3518690615892410e-003 + <_> + + <_> + + + + <_>0 6 2 13 -1. + <_>1 6 1 13 2. + 0 + -2.7943260502070189e-003 + 0.1587136983871460 + -0.0642718002200127 + <_> + + <_> + + + + <_>16 7 4 13 -1. + <_>16 7 2 13 2. + 0 + -0.1687474995851517 + -0.6995618939399719 + 4.8861699178814888e-003 + <_> + + <_> + + + + <_>6 5 7 6 -1. + <_>6 7 7 2 3. + 0 + -1.2672400334849954e-003 + 0.0316160395741463 + -0.2495326995849609 + <_> + + <_> + + + + <_>6 11 10 6 -1. + <_>11 11 5 3 2. + <_>6 14 5 3 2. + 0 + 0.0208077505230904 + 0.0170534104108810 + -0.2433141022920609 + <_> + + <_> + + + + <_>5 9 6 5 -1. + <_>8 9 3 5 2. + 0 + -1.5869849594309926e-003 + 0.0931710898876190 + -0.0813619270920753 + <_> + + <_> + + + + <_>10 3 4 15 -1. + <_>10 3 2 15 2. + 0 + -0.0100146904587746 + -0.2778961956501007 + 0.0265692397952080 + <_> + + <_> + + + + <_>6 3 4 15 -1. + <_>8 3 2 15 2. + 0 + -5.7948171161115170e-003 + -0.2228773981332779 + 0.0359756611287594 + <_> + + <_> + + + + <_>6 7 13 2 -1. + <_>6 8 13 1 2. + 0 + 2.7189950924366713e-003 + -0.0906319096684456 + 0.0568204000592232 + <_> + + <_> + + + + <_>2 15 16 4 -1. + <_>2 15 8 2 2. + <_>10 17 8 2 2. + 0 + 0.0388451591134071 + 0.0122808599844575 + -0.5852134823799133 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + -0.0141586801037192 + 0.1815387010574341 + -0.0311094298958778 + <_> + + <_> + + + + <_>0 7 4 13 -1. + <_>2 7 2 13 2. + 0 + -0.1827860027551651 + -0.9001380801200867 + 7.6544750481843948e-003 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + 0.0275884196162224 + -0.0124600399285555 + 0.2006936967372894 + <_> + + <_> + + + + <_>5 11 10 9 -1. + <_>5 14 10 3 3. + 0 + -0.0147844301536679 + -0.0899104923009872 + 0.0816486775875092 + <_> + + <_> + + + + <_>17 0 3 13 -1. + <_>18 0 1 13 3. + 0 + 0.1162571981549263 + 2.3692469112575054e-003 + -0.9999806880950928 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + 3.5341090988367796e-003 + -0.0617605410516262 + 0.1349063962697983 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 12 4 6 2. + 0 + 5.1878788508474827e-003 + 0.0187458600848913 + -0.1744917035102844 + <_> + + <_> + + + + <_>0 3 2 16 -1. + <_>0 11 2 8 2. + 0 + 0.0794573575258255 + -0.0234029907733202 + 0.3350220024585724 + <_> + + <_> + + + + <_>0 15 20 4 -1. + <_>10 15 10 2 2. + <_>0 17 10 2 2. + 0 + 0.0276843793690205 + 0.0236639101058245 + -0.3325636088848114 + <_> + + <_> + + + + <_>0 15 9 4 -1. + <_>0 17 9 2 2. + 0 + -4.4806320220232010e-003 + -0.1465875059366226 + 0.0473768115043640 + <_> + + <_> + + + + <_>9 14 10 6 -1. + <_>14 14 5 3 2. + <_>9 17 5 3 2. + 0 + 5.6939688511192799e-003 + -0.0567761212587357 + 0.0675808563828468 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 7.7299480326473713e-003 + -0.0311566498130560 + 0.2310259044170380 + <_> + + <_> + + + + <_>4 15 13 3 -1. + <_>4 16 13 1 3. + 0 + 3.9786100387573242e-003 + -0.0568824410438538 + 0.1327152997255325 + <_> + + <_> + + + + <_>0 0 18 4 -1. + <_>0 0 9 2 2. + <_>9 2 9 2 2. + 0 + -0.0112758800387383 + -0.2093864977359772 + 0.0352914594113827 + <_> + + <_> + + + + <_>6 5 8 15 -1. + <_>6 10 8 5 3. + 0 + -2.4308220017701387e-003 + -0.2017636001110077 + 0.0345139317214489 + <_> + + <_> + + + + <_>0 0 6 7 -1. + <_>2 0 2 7 3. + 0 + 5.7369591668248177e-003 + -0.0556071586906910 + 0.1153208985924721 + <_> + + <_> + + + + <_>14 1 6 12 -1. + <_>16 1 2 12 3. + 0 + 4.6170800924301147e-003 + -0.0560835003852844 + 0.0817629173398018 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -4.7089671716094017e-003 + -0.1335121989250183 + 0.0562960803508759 + <_> + + <_> + + + + <_>18 1 2 13 -1. + <_>18 1 1 13 2. + 0 + -0.0326880700886250 + 0.2792238891124725 + -0.0108676599338651 + <_> + + <_> + + + + <_>0 1 10 19 -1. + <_>5 1 5 19 2. + 0 + 0.0886861979961395 + 0.0182682201266289 + -0.3563739061355591 + <_> + + <_> + + + + <_>14 2 4 10 -1. + <_>14 2 2 10 2. + 0 + 4.5751677826046944e-003 + -0.0515584610402584 + 0.0639488101005554 + <_> + + <_> + + + + <_>0 3 4 16 -1. + <_>0 3 2 8 2. + <_>2 11 2 8 2. + 0 + 4.9765850417315960e-003 + -0.0546845905482769 + 0.1190711036324501 + <_> + + <_> + + + + <_>6 0 10 6 -1. + <_>11 0 5 3 2. + <_>6 3 5 3 2. + 0 + -6.4881290309131145e-003 + -0.0991211235523224 + 0.0265088491141796 + <_> + + <_> + + + + <_>1 14 10 6 -1. + <_>1 14 5 3 2. + <_>6 17 5 3 2. + 0 + 2.4523450993001461e-003 + -0.0950459465384483 + 0.0668029263615608 + <_> + + <_> + + + + <_>8 7 5 9 -1. + <_>8 10 5 3 3. + 0 + 7.0354789495468140e-003 + 0.1070559024810791 + -0.0623950995504856 + <_> + + <_> + + + + <_>2 2 4 10 -1. + <_>4 2 2 10 2. + 0 + 0.0427467897534370 + -0.0160921793431044 + 0.4325619935989380 + <_> + + <_> + + + + <_>11 11 7 4 -1. + <_>11 13 7 2 2. + 0 + -4.5301730278879404e-004 + 0.0364205688238144 + -0.0993228927254677 + <_> + + <_> + + + + <_>5 6 10 12 -1. + <_>5 6 5 6 2. + <_>10 12 5 6 2. + 0 + -5.2631930448114872e-003 + -0.1141674965620041 + 0.0572602190077305 + <_> + + <_> + + + + <_>9 2 4 12 -1. + <_>9 6 4 4 3. + 0 + 1.0581909446045756e-003 + 0.0332204885780811 + -0.1183122023940086 + <_> + + <_> + + + + <_>2 0 15 6 -1. + <_>2 3 15 3 2. + 0 + 0.0250889491289854 + -0.0606550201773643 + 0.1260174065828323 + <_> + + <_> + + + + <_>6 0 13 8 -1. + <_>6 4 13 4 2. + 0 + 0.2425215989351273 + 2.2060840856283903e-003 + -1.0000120401382446 + <_> + + <_> + + + + <_>1 0 13 8 -1. + <_>1 4 13 4 2. + 0 + -0.1439307928085327 + 0.3741979897022247 + -0.0222521107643843 + <_> + + <_> + + + + <_>11 4 2 14 -1. + <_>11 11 2 7 2. + 0 + -6.0972762294113636e-003 + -0.1103809997439385 + 0.0459969602525234 + <_> + + <_> + + + + <_>0 1 20 3 -1. + <_>0 2 20 1 3. + 0 + 6.1375470831990242e-003 + 0.0383078083395958 + -0.1808677017688751 + <_> + + <_> + + + + <_>8 5 6 10 -1. + <_>11 5 3 5 2. + <_>8 10 3 5 2. + 0 + -3.6617079749703407e-003 + 0.0384399183094502 + -0.0625407919287682 + <_> + + <_> + + + + <_>4 8 10 12 -1. + <_>9 8 5 12 2. + 0 + -0.1585485041141510 + 0.3446939885616303 + -0.0198375005275011 + <_> + + <_> + + + + <_>8 5 6 5 -1. + <_>8 5 3 5 2. + 0 + 0.0672192871570587 + 9.5165139064192772e-003 + -0.5020645856857300 + <_> + + <_> + + + + <_>6 5 6 5 -1. + <_>9 5 3 5 2. + 0 + 2.2499680053442717e-003 + -0.1306392997503281 + 0.0648329332470894 + <_> + + <_> + + + + <_>13 0 6 7 -1. + <_>15 0 2 7 3. + 0 + 0.0846267864108086 + 5.9339799918234348e-003 + -0.4151659011840820 + <_> + + <_> + + + + <_>1 0 6 7 -1. + <_>3 0 2 7 3. + 0 + -9.5411221263930202e-004 + -0.0937907472252846 + 0.0754866078495979 + <_> + + <_> + + + + <_>12 14 7 6 -1. + <_>12 16 7 2 3. + 0 + -7.6813949272036552e-003 + -0.1482196003198624 + 0.0290105808526278 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>6 17 6 3 3. + 0 + -0.0255933199077845 + 0.1485957950353622 + -0.0471959300339222 + <_> + + <_> + + + + <_>6 7 12 8 -1. + <_>10 7 4 8 3. + 0 + 0.0215083695948124 + 0.0237826202064753 + -0.0966592878103256 + <_> + + <_> + + + + <_>0 14 18 5 -1. + <_>6 14 6 5 3. + 0 + 0.0344631001353264 + -0.0374100692570210 + 0.2201530039310455 + <_> + + <_> + + + + <_>0 13 20 4 -1. + <_>10 13 10 2 2. + <_>0 15 10 2 2. + 0 + -0.0378603003919125 + -0.5004746913909912 + 0.0140598695725203 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 1.2028450146317482e-003 + -0.0650870576500893 + 0.0895834863185883 + <_> + + <_> + + + + <_>11 11 7 4 -1. + <_>11 13 7 2 2. + 0 + 0.0167535208165646 + 4.9179811030626297e-003 + -0.4303090870380402 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + 1.6640779795125127e-003 + 0.0408074297010899 + -0.1446996033191681 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 3.4473428968340158e-003 + -0.0399101786315441 + 0.1527296006679535 + <_> + + <_> + + + + <_>0 8 8 6 -1. + <_>0 10 8 2 3. + 0 + 8.9918142184615135e-003 + 0.0710712671279907 + -0.0861699134111404 + <_> + + <_> + + + + <_>4 8 15 2 -1. + <_>4 9 15 1 2. + 0 + 8.3185202674940228e-004 + -0.2573918998241425 + 0.0179410893470049 + <_> + + <_> + + + + <_>0 9 6 5 -1. + <_>3 9 3 5 2. + 0 + -6.8142730742692947e-003 + 0.1382316052913666 + -0.0539945401251316 + <_> + + <_> + + + + <_>13 9 6 5 -1. + <_>13 9 3 5 2. + 0 + 2.9746210202574730e-003 + -0.0415502600371838 + 0.0398397706449032 + <_> + + <_> + + + + <_>1 9 6 5 -1. + <_>4 9 3 5 2. + 0 + 2.5836620479822159e-003 + -0.0706564933061600 + 0.0950455069541931 + <_> + + <_> + + + + <_>13 0 4 14 -1. + <_>15 0 2 7 2. + <_>13 7 2 7 2. + 0 + 2.7143809711560607e-004 + 0.0580700710415840 + -0.1278176009654999 + <_> + + <_> + + + + <_>0 0 14 19 -1. + <_>7 0 7 19 2. + 0 + 0.3541829884052277 + 5.4909070022404194e-003 + -0.9796069860458374 + <_> + + <_> + + + + <_>13 0 4 14 -1. + <_>15 0 2 7 2. + <_>13 7 2 7 2. + 0 + 0.0253186505287886 + -0.0144109698012471 + 0.2621912956237793 + <_> + + <_> + + + + <_>3 0 4 14 -1. + <_>3 0 2 7 2. + <_>5 7 2 7 2. + 0 + -2.2658439411316067e-004 + 0.0529978498816490 + -0.1162934973835945 + <_> + + <_> + + + + <_>13 4 7 6 -1. + <_>13 6 7 2 3. + 0 + 6.8859090097248554e-003 + 0.0164373107254505 + -0.2034949064254761 + <_> + + <_> + + + + <_>2 4 14 3 -1. + <_>2 5 14 1 3. + 0 + 0.0116074597463012 + -0.0366510115563869 + 0.1518401056528091 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + -4.8253959976136684e-003 + -0.2347615063190460 + 0.0379140116274357 + <_> + + <_> + + + + <_>7 6 4 12 -1. + <_>7 12 4 6 2. + 0 + 2.5656020734459162e-003 + 0.0351856388151646 + -0.1854071021080017 + <_> + + <_> + + + + <_>6 2 14 18 -1. + <_>13 2 7 9 2. + <_>6 11 7 9 2. + 0 + 0.1260139942169190 + -9.8542850464582443e-003 + 0.2552069127559662 + <_> + + <_> + + + + <_>5 9 9 6 -1. + <_>5 12 9 3 2. + 0 + 2.7164958883076906e-003 + -0.0217484403401613 + 0.2546752989292145 + <_> + + <_> + + + + <_>0 1 20 18 -1. + <_>10 1 10 9 2. + <_>0 10 10 9 2. + 0 + 0.3235602974891663 + 8.8657345622777939e-003 + -0.7038357257843018 + <_> + + <_> + + + + <_>4 10 7 4 -1. + <_>4 12 7 2 2. + 0 + -8.4016058826819062e-004 + 0.0368313603103161 + -0.1495326012372971 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + 3.3291990403085947e-003 + 0.0481858402490616 + -0.1229047030210495 + <_> + + <_> + + + + <_>1 0 14 12 -1. + <_>1 4 14 4 3. + 0 + 0.2113053947687149 + 6.5245870500802994e-003 + -0.8829386234283447 + <_> + + <_> + + + + <_>9 0 6 8 -1. + <_>9 0 3 8 2. + 0 + 5.0388509407639503e-003 + -0.0670799463987350 + 0.0378497093915939 + <_> + + <_> + + + + <_>4 2 12 5 -1. + <_>8 2 4 5 3. + 0 + -0.0278623998165131 + 0.3346948921680450 + -0.0188165009021759 + <_> + + <_> + + + + <_>12 0 2 15 -1. + <_>12 0 1 15 2. + 0 + 3.8636629469692707e-003 + 0.0436447300016880 + -0.1748148947954178 + <_> + + <_> + + + + <_>4 0 8 10 -1. + <_>8 0 4 10 2. + 0 + 0.1048030033707619 + -0.0157375298440456 + 0.4209423959255219 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -3.4130848944187164e-003 + -0.1083557009696960 + 0.0437177903950214 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0463969707489014 + -0.7568007707595825 + 8.6701400578022003e-003 + <_> + + <_> + + + + <_>9 2 2 13 -1. + <_>9 2 1 13 2. + 0 + 5.3708078339695930e-003 + -0.0417978018522263 + 0.1482471972703934 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -6.1126388609409332e-003 + 0.1867371946573257 + -0.0433874912559986 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0425093211233616 + 0.0116906799376011 + -0.4374065995216370 + <_> + + <_> + + + + <_>0 4 18 10 -1. + <_>0 4 9 5 2. + <_>9 9 9 5 2. + 0 + 0.0104730203747749 + 0.0431436300277710 + -0.1565439999103546 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + -0.0472239591181278 + -0.7448353767395020 + 3.4918629098683596e-003 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + 0.0530903600156307 + 0.0104081500321627 + -0.5349944829940796 + <_> + + <_> + + + + <_>4 3 16 6 -1. + <_>12 3 8 3 2. + <_>4 6 8 3 2. + 0 + -7.0432561915367842e-004 + 0.0333841703832150 + -0.0737060308456421 + <_> + + <_> + + + + <_>3 4 5 9 -1. + <_>3 7 5 3 3. + 0 + 7.5942431576550007e-003 + -0.0291070491075516 + 0.1946886032819748 + <_> + + <_> + + + + <_>8 4 12 5 -1. + <_>12 4 4 5 3. + 0 + 0.0226769894361496 + 0.0338038206100464 + -0.2762761116027832 + <_> + + <_> + + + + <_>3 9 8 4 -1. + <_>3 11 8 2 2. + 0 + 6.6533521749079227e-003 + -0.0265782400965691 + 0.2428331971168518 + <_> + + <_> + + + + <_>11 0 2 15 -1. + <_>11 0 1 15 2. + 0 + 3.7712270859628916e-003 + 0.0265542995184660 + -0.0649529173970222 + <_> + + <_> + + + + <_>7 0 2 15 -1. + <_>8 0 1 15 2. + 0 + -2.0740530453622341e-003 + -0.1796897053718567 + 0.0315321609377861 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -1.5632519498467445e-003 + 0.0531096793711185 + -0.0874156281352043 + <_> + + <_> + + + + <_>8 3 4 8 -1. + <_>10 3 2 8 2. + 0 + 0.0125408899039030 + -0.0341364592313766 + 0.2209753990173340 + <_> + + <_> + + + + <_>9 13 6 7 -1. + <_>11 13 2 7 3. + 0 + -3.2660199794918299e-003 + -0.0552616082131863 + 0.0326695591211319 + <_> + + <_> + + + + <_>4 14 9 5 -1. + <_>7 14 3 5 3. + 0 + -8.2185603678226471e-003 + -0.1447837948799133 + 0.0557439289987087 + <_> + + <_> + + + + <_>15 3 4 17 -1. + <_>15 3 2 17 2. + 0 + -0.0558110401034355 + 0.1723794043064117 + -0.0144565198570490 + <_> + + <_> + + + + <_>1 6 4 13 -1. + <_>3 6 2 13 2. + 0 + -0.1472315937280655 + -0.8139231204986572 + 7.4356291443109512e-003 + <_> + + <_> + + + + <_>11 12 4 7 -1. + <_>11 12 2 7 2. + 0 + -5.8468529023230076e-003 + -0.0690434426069260 + 0.0194567907601595 + <_> + + <_> + + + + <_>0 1 6 7 -1. + <_>2 1 2 7 3. + 0 + 0.0194622203707695 + -0.0354722291231155 + 0.1666630059480667 + <_> + + <_> + + + + <_>9 12 6 7 -1. + <_>11 12 2 7 3. + 0 + 0.0583534687757492 + 3.0551329255104065e-003 + -0.3928912878036499 + <_> + + <_> + + + + <_>5 12 6 7 -1. + <_>7 12 2 7 3. + 0 + 0.0437858290970325 + 0.0135746300220490 + -0.4615235924720764 + <_> + + <_> + + + + <_>7 7 6 8 -1. + <_>9 7 2 8 3. + 0 + -0.0519043505191803 + 0.6380243897438049 + -9.6664745360612869e-003 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + -7.7811058145016432e-004 + -0.0993032231926918 + 0.0560946017503738 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 4.9657518975436687e-003 + 0.0414193682372570 + -0.1127481982111931 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -5.4516079835593700e-003 + 0.1739906072616577 + -0.0411477312445641 + <_> + + <_> + + + + <_>5 13 13 3 -1. + <_>5 14 13 1 3. + 0 + 5.0428751856088638e-003 + -0.0412552207708359 + 0.1379422992467880 + <_> + + <_> + + + + <_>2 9 14 3 -1. + <_>2 10 14 1 3. + 0 + -1.6985220136120915e-003 + -0.2287479043006897 + 0.0252749808132648 + <_> + + <_> + + + + <_>8 7 7 4 -1. + <_>8 9 7 2 2. + 0 + 0.0827642381191254 + 3.3066510222852230e-003 + -0.6911343932151794 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 3.9285849779844284e-003 + -0.0790433585643768 + 0.0662188529968262 + <_> + + <_> + + + + <_>13 12 5 6 -1. + <_>13 15 5 3 2. + 0 + -0.0306012406945229 + -0.2651745080947876 + 0.0164678506553173 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0199411604553461 + 0.1543180942535400 + -0.0361006893217564 + <_> + + <_> + + + + <_>4 5 16 3 -1. + <_>4 5 8 3 2. + 0 + 0.0805200636386871 + 0.0170159190893173 + -0.3344888091087341 + <_> + + <_> + + + + <_>5 3 4 14 -1. + <_>5 10 4 7 2. + 0 + 0.0703238472342491 + 0.0171224400401115 + -0.3330214023590088 + <_> + + <_> + + + + <_>4 13 15 5 -1. + <_>9 13 5 5 3. + 0 + -0.0528509393334389 + 0.0624214000999928 + -0.0146901998668909 + <_> + + <_> + + + + <_>0 3 14 2 -1. + <_>0 4 14 1 2. + 0 + -7.1594159817323089e-004 + -0.1133515015244484 + 0.0522607900202274 + <_> + + <_> + + + + <_>4 13 15 5 -1. + <_>9 13 5 5 3. + 0 + 0.2146997004747391 + 9.9299731664359570e-004 + -0.9999758005142212 + <_> + + <_> + + + + <_>1 13 15 5 -1. + <_>6 13 5 5 3. + 0 + 0.0870425924658775 + -0.0123297600075603 + 0.5026066899299622 + <_> + + <_> + + + + <_>12 0 8 6 -1. + <_>12 2 8 2 3. + 0 + -5.8731262106448412e-004 + -0.0993464663624763 + 0.0517056100070477 + <_> + + <_> + + + + <_>3 10 6 5 -1. + <_>6 10 3 5 2. + 0 + -0.0442152209579945 + -0.3936890065670013 + 0.0139208501204848 + <_> + + <_> + + + + <_>4 7 14 8 -1. + <_>11 7 7 4 2. + <_>4 11 7 4 2. + 0 + -0.0876762270927429 + 0.3015744090080261 + -6.8702381104230881e-003 + <_> + + <_> + + + + <_>2 7 14 8 -1. + <_>2 7 7 4 2. + <_>9 11 7 4 2. + 0 + -0.0484539903700352 + 0.2547787129878998 + -0.0224577505141497 + <_> + + <_> + + + + <_>11 0 2 20 -1. + <_>11 0 1 20 2. + 0 + -2.1567570511251688e-003 + -0.1356289982795715 + 0.0317253991961479 + <_> + + <_> + + + + <_>7 0 2 20 -1. + <_>8 0 1 20 2. + 0 + 3.9050900377333164e-003 + 0.0491008907556534 + -0.1186105981469154 + <_> + + <_> + + + + <_>10 5 6 8 -1. + <_>12 5 2 8 3. + 0 + -3.9808028377592564e-003 + 0.0483339093625546 + -0.0558970794081688 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + 2.9744929634034634e-003 + -0.0648024529218674 + 0.0935835018754005 + <_> + + <_> + + + + <_>3 2 14 4 -1. + <_>10 2 7 2 2. + <_>3 4 7 2 2. + 0 + 0.0258752293884754 + 0.0184876099228859 + -0.3343634903430939 + <_> + + <_> + + + + <_>7 5 6 7 -1. + <_>9 5 2 7 3. + 0 + -1.9373580580577254e-003 + 0.2200064957141876 + -0.0254049804061651 + <_> + + <_> + + + + <_>8 4 9 16 -1. + <_>11 4 3 16 3. + 0 + -0.0201716292649508 + -0.0782283097505569 + 0.0454627908766270 + <_> + + <_> + + + + <_>4 5 6 8 -1. + <_>6 5 2 8 3. + 0 + -0.0260881409049034 + 0.1763706952333450 + -0.0450972989201546 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>10 10 3 5 2. + <_>7 15 3 5 2. + 0 + -0.0268683005124331 + -0.3265641927719116 + 0.0179942306131125 + <_> + + <_> + + + + <_>5 11 5 6 -1. + <_>5 14 5 3 2. + 0 + -7.0211151614785194e-004 + 0.0396719984710217 + -0.1453354060649872 + <_> + + <_> + + + + <_>4 8 13 8 -1. + <_>4 12 13 4 2. + 0 + 8.3507681265473366e-003 + -0.0230517294257879 + 0.1885076016187668 + <_> + + <_> + + + + <_>0 9 10 6 -1. + <_>0 9 5 3 2. + <_>5 12 5 3 2. + 0 + 4.6823569573462009e-003 + 0.0299965608865023 + -0.2070102989673615 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 3.3109660726040602e-003 + 0.0565367303788662 + -0.1683558970689774 + <_> + + <_> + + + + <_>4 0 5 8 -1. + <_>4 4 5 4 2. + 0 + 7.6425541192293167e-003 + -0.0414239503443241 + 0.1255751997232437 + <_> + + <_> + + + + <_>8 1 4 10 -1. + <_>8 6 4 5 2. + 0 + -2.4713110178709030e-003 + 0.0721561536192894 + -0.1076773032546043 + <_> + + <_> + + + + <_>6 3 7 10 -1. + <_>6 8 7 5 2. + 0 + -9.9495360627770424e-003 + -0.1818761974573135 + 0.0335672311484814 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + 1.9820800516754389e-003 + -0.0564887188374996 + 0.1074149012565613 + <_> + + <_> + + + + <_>2 13 13 3 -1. + <_>2 14 13 1 3. + 0 + 0.0232544392347336 + -0.0165433492511511 + 0.3646667897701263 + <_> + + <_> + + + + <_>12 11 7 4 -1. + <_>12 13 7 2 2. + 0 + -0.0541779212653637 + -1. + 3.3418419770896435e-003 + <_> + + <_> + + + + <_>1 11 7 4 -1. + <_>1 13 7 2 2. + 0 + 6.1567849479615688e-004 + 0.0401593297719955 + -0.1646022051572800 + <_> + + <_> + + + + <_>9 12 9 4 -1. + <_>9 14 9 2 2. + 0 + -4.2699510231614113e-003 + -0.0569786205887794 + 0.0444809012115002 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 12 8 2 2. + <_>10 14 8 2 2. + 0 + 1.9749389030039310e-003 + 0.0592836812138557 + -0.1079126000404358 + <_> + + <_> + + + + <_>10 14 10 6 -1. + <_>15 14 5 3 2. + <_>10 17 5 3 2. + 0 + -5.8583128266036510e-003 + 0.1373405009508133 + -0.0342315211892128 + <_> + + <_> + + + + <_>4 1 8 8 -1. + <_>4 1 4 4 2. + <_>8 5 4 4 2. + 0 + -7.2995189111679792e-004 + -0.1007506027817726 + 0.0547331608831882 + <_> + + <_> + + + + <_>2 12 18 7 -1. + <_>8 12 6 7 3. + 0 + -0.0299307405948639 + 0.0638825595378876 + -0.0410270206630230 + <_> + + <_> + + + + <_>3 13 12 6 -1. + <_>3 13 6 3 2. + <_>9 16 6 3 2. + 0 + -0.0517387501895428 + -0.7271345853805542 + 7.4993381276726723e-003 + <_> + + <_> + + + + <_>4 12 13 4 -1. + <_>4 14 13 2 2. + 0 + 0.0240211896598339 + 7.8491801396012306e-003 + -0.5579447150230408 + <_> + + <_> + + + + <_>6 0 2 15 -1. + <_>7 0 1 15 2. + 0 + -3.7574321031570435e-003 + -0.1608687937259674 + 0.0310159903019667 + <_> + + <_> + + + + <_>4 2 16 18 -1. + <_>12 2 8 9 2. + <_>4 11 8 9 2. + 0 + -0.0626356825232506 + 0.0905778631567955 + -0.0290337707847357 + <_> + + <_> + + + + <_>1 16 18 4 -1. + <_>7 16 6 4 3. + 0 + 0.0193634293973446 + -0.0499205887317657 + 0.1283577978610992 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0350728891789913 + 0.2139184027910233 + -8.8168960064649582e-003 + <_> + + <_> + + + + <_>4 0 12 9 -1. + <_>8 0 4 9 3. + 0 + -0.0132433101534843 + 0.2334969937801361 + -0.0230880193412304 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -0.0312908291816711 + -0.6949509978294373 + 9.3020889908075333e-003 + <_> + + <_> + + + + <_>4 9 6 6 -1. + <_>7 9 3 6 2. + 0 + 7.2391419671475887e-003 + 0.0284858494997025 + -0.1831077039241791 + <_> + + <_> + + + + <_>7 12 12 8 -1. + <_>13 12 6 4 2. + <_>7 16 6 4 2. + 0 + 6.6785318776965141e-003 + -0.0491329506039619 + 0.0541816912591457 + <_> + + <_> + + + + <_>1 12 12 8 -1. + <_>1 12 6 4 2. + <_>7 16 6 4 2. + 0 + -0.0368255712091923 + 0.3312020897865295 + -0.0213599298149347 + <_> + + <_> + + + + <_>0 10 20 9 -1. + <_>0 13 20 3 3. + 0 + -0.0455073416233063 + -0.1289349049329758 + 0.0495459884405136 + <_> + + <_> + + + + <_>4 5 10 6 -1. + <_>4 5 5 3 2. + <_>9 8 5 3 2. + 0 + 7.7639957889914513e-003 + -0.0362556204199791 + 0.1532140970230103 + <_> + + <_> + + + + <_>13 3 7 6 -1. + <_>13 5 7 2 3. + 0 + 0.0604176111519337 + 4.5740022324025631e-003 + -0.6754109263420105 + <_> + + <_> + + + + <_>8 1 4 14 -1. + <_>8 1 2 7 2. + <_>10 8 2 7 2. + 0 + 2.4624960497021675e-003 + 0.0536741614341736 + -0.1132654026150703 + <_> + + <_> + + + + <_>12 8 5 6 -1. + <_>12 11 5 3 2. + 0 + 7.3594506829977036e-005 + -0.0356489308178425 + 0.0254589691758156 + <_> + + <_> + + + + <_>3 8 5 6 -1. + <_>3 11 5 3 2. + 0 + -4.0958370082080364e-003 + 0.1556290984153748 + -0.0393906012177467 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 2.8689370083156973e-005 + -0.0848233029246330 + 0.0382542386651039 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -4.6220528893172741e-003 + -0.1899452954530716 + 0.0335087589919567 + <_> + + <_> + + + + <_>2 0 18 4 -1. + <_>8 0 6 4 3. + 0 + -8.5343196988105774e-003 + 0.1121253967285156 + -0.0339684896171093 + <_> + + <_> + + + + <_>6 5 3 14 -1. + <_>6 12 3 7 2. + 0 + -0.0588038489222527 + -0.5124431252479553 + 0.0107895499095321 + <_> + + <_> + + + + <_>5 17 15 3 -1. + <_>10 17 5 3 3. + 0 + 0.0607199296355248 + -0.0125550301745534 + 0.2250975966453552 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>6 0 3 7 2. + 0 + 1.1038020020350814e-003 + -0.0962944924831390 + 0.0567274801433086 + <_> + + <_> + + + + <_>8 3 12 17 -1. + <_>8 3 6 17 2. + 0 + -3.8484560791403055e-003 + 0.0405734591186047 + -0.0253268592059612 + <_> + + <_> + + + + <_>0 2 16 12 -1. + <_>8 2 8 12 2. + 0 + -0.0107710501179099 + 0.0887356325984001 + -0.0556286796927452 + <_> + + <_> + + + + <_>7 6 6 12 -1. + <_>7 12 6 6 2. + 0 + 0.0120168095454574 + 0.0235662795603275 + -0.2459058016538620 + <_> + + <_> + + + + <_>8 8 4 8 -1. + <_>8 12 4 4 2. + 0 + -1.1656560236588120e-003 + -0.0374173000454903 + 0.1650328934192658 + <_> + + <_> + + + + <_>8 7 12 10 -1. + <_>14 7 6 5 2. + <_>8 12 6 5 2. + 0 + 0.0321376286447048 + 0.0142459701746702 + -0.2648085057735443 + <_> + + <_> + + + + <_>4 1 12 5 -1. + <_>10 1 6 5 2. + 0 + 0.0233316700905561 + -0.0352887213230133 + 0.1844782978296280 + <_> + + <_> + + + + <_>7 2 8 8 -1. + <_>11 2 4 4 2. + <_>7 6 4 4 2. + 0 + -0.0126853203400970 + -0.1175730973482132 + 0.0164369102567434 + <_> + + <_> + + + + <_>5 2 8 8 -1. + <_>5 2 4 4 2. + <_>9 6 4 4 2. + 0 + 7.3903938755393028e-005 + -0.1027147993445396 + 0.0743014365434647 + <_> + + <_> + + + + <_>3 14 14 6 -1. + <_>3 17 14 3 2. + 0 + -0.1092547029256821 + -0.8316531777381897 + 5.6438110768795013e-003 + <_> + + <_> + + + + <_>3 3 5 12 -1. + <_>3 7 5 4 3. + 0 + -0.1332435011863709 + 0.7772982120513916 + -8.3403270691633224e-003 + <_> + + <_> + + + + <_>15 4 5 6 -1. + <_>15 7 5 3 2. + 0 + 8.9381448924541473e-004 + -0.0595243014395237 + 0.0411730892956257 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 0.0103186499327421 + 0.0159264300018549 + -0.3163779079914093 + <_> + + <_> + + + + <_>15 4 5 9 -1. + <_>15 7 5 3 3. + 0 + -5.2297548390924931e-003 + -0.0711665600538254 + 0.0334892906248569 + <_> + + <_> + + + + <_>8 6 4 14 -1. + <_>8 6 2 7 2. + <_>10 13 2 7 2. + 0 + 0.0164096206426620 + -0.0264541208744049 + 0.1958996951580048 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0140687096863985 + -0.0393641404807568 + 0.1397742033004761 + <_> + + <_> + + + + <_>5 0 8 10 -1. + <_>5 0 4 5 2. + <_>9 5 4 5 2. + 0 + 6.6486410796642303e-003 + 0.0640708282589912 + -0.1049339994788170 + <_> + + <_> + + + + <_>9 12 6 7 -1. + <_>11 12 2 7 3. + 0 + -0.0180306192487478 + 0.0839429125189781 + -0.0133991595357656 + <_> + + <_> + + + + <_>5 12 6 7 -1. + <_>7 12 2 7 3. + 0 + -0.0440343692898750 + -0.5582545995712280 + 9.7633162513375282e-003 + <_> + + <_> + + + + <_>13 9 7 6 -1. + <_>13 11 7 2 3. + 0 + -8.0966893583536148e-003 + -0.2048978954553604 + 0.0265202000737190 + <_> + + <_> + + + + <_>1 1 16 6 -1. + <_>1 3 16 2 3. + 0 + 5.0180461257696152e-003 + -0.1166120991110802 + 0.0457916706800461 + <_> + + <_> + + + + <_>2 1 17 6 -1. + <_>2 3 17 2 3. + 0 + -0.0170646291226149 + 0.2628273069858551 + -0.0203906390815973 + <_> + + <_> + + + + <_>4 4 2 16 -1. + <_>4 12 2 8 2. + 0 + 0.0718501731753349 + -6.9503681734204292e-003 + 0.6703253984451294 + <_> + + <_> + + + + <_>7 6 10 14 -1. + <_>12 6 5 7 2. + <_>7 13 5 7 2. + 0 + -0.0569143705070019 + -0.1347790062427521 + 0.0183990802615881 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -3.2365729566663504e-003 + 0.0696738511323929 + -0.0723145306110382 + <_> + + <_> + + + + <_>4 9 12 6 -1. + <_>10 9 6 3 2. + <_>4 12 6 3 2. + 0 + 0.0418189093470573 + 0.0111514599993825 + -0.5168011188507080 + <_> + + <_> + + + + <_>1 8 18 3 -1. + <_>7 8 6 3 3. + 0 + -6.1106588691473007e-003 + -0.1316394060850143 + 0.0437965095043182 + <_> + + <_> + + + + <_>2 13 18 7 -1. + <_>8 13 6 7 3. + 0 + -0.0355609096586704 + 0.0680055022239685 + -0.0363310202956200 + <_> + + <_> + + + + <_>1 8 15 3 -1. + <_>6 8 5 3 3. + 0 + 0.0687891691923141 + 0.0146989598870277 + -0.3821229934692383 + <_> + + <_> + + + + <_>6 0 12 7 -1. + <_>10 0 4 7 3. + 0 + -0.0783133730292320 + 0.2029606997966766 + -8.6810020729899406e-003 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 3.9626220241189003e-003 + -0.0357978902757168 + 0.1390551030635834 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + -0.0338740386068821 + -0.2225342988967896 + 7.5455638580024242e-003 + <_> + + <_> + + + + <_>6 7 6 8 -1. + <_>6 11 6 4 2. + 0 + -0.0647558569908142 + 0.4752154946327210 + -0.0109706800431013 + <_> + + <_> + + + + <_>9 2 4 12 -1. + <_>9 6 4 4 3. + 0 + 0.0266479402780533 + 0.0154453096911311 + -0.2678577899932861 + <_> + + <_> + + + + <_>0 9 7 6 -1. + <_>0 11 7 2 3. + 0 + -0.0307311099022627 + -0.4766868948936462 + 9.6429884433746338e-003 + <_> + + <_> + + + + <_>15 4 5 9 -1. + <_>15 7 5 3 3. + 0 + -0.0240227002650499 + -0.1063396036624908 + 0.0128490403294563 + <_> + + <_> + + + + <_>2 18 13 2 -1. + <_>2 19 13 1 2. + 0 + -1.3036349555477500e-003 + 0.0735241770744324 + -0.0680749192833900 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + -9.8344050347805023e-003 + -0.1184355020523071 + 0.0428666993975639 + <_> + + <_> + + + + <_>6 6 8 12 -1. + <_>6 10 8 4 3. + 0 + 0.0871021971106529 + -0.0400882586836815 + 0.1780454069375992 + <_> + + <_> + + + + <_>7 9 6 9 -1. + <_>7 12 6 3 3. + 0 + 0.0204115696251392 + 0.0168499890714884 + -0.3895365893840790 + <_> + + <_> + + + + <_>0 7 11 4 -1. + <_>0 9 11 2 2. + 0 + 0.0958752632141113 + 5.9905550442636013e-003 + -0.8152565956115723 + <_> + + <_> + + + + <_>8 12 10 6 -1. + <_>13 12 5 3 2. + <_>8 15 5 3 2. + 0 + 6.4893220551311970e-003 + -0.0240392293781042 + 0.0538711696863174 + <_> + + <_> + + + + <_>2 12 10 6 -1. + <_>2 12 5 3 2. + <_>7 15 5 3 2. + 0 + -9.6279237186536193e-004 + 0.0942991897463799 + -0.0644360184669495 + <_> + + <_> + + + + <_>12 14 8 6 -1. + <_>12 16 8 2 3. + 0 + -3.7659960798919201e-004 + -0.0622968785464764 + 0.0412518493831158 + <_> + + <_> + + + + <_>0 14 8 6 -1. + <_>0 16 8 2 3. + 0 + 6.5272641368210316e-003 + 0.0513251312077045 + -0.1303779035806656 + <_> + + <_> + + + + <_>18 2 2 13 -1. + <_>18 2 1 13 2. + 0 + 0.0214291103184223 + -0.0119896596297622 + 0.2628045976161957 + <_> + + <_> + + + + <_>4 5 8 8 -1. + <_>4 5 4 4 2. + <_>8 9 4 4 2. + 0 + -5.0938720814883709e-003 + 0.0634189471602440 + -0.0905663371086121 + <_> + + <_> + + + + <_>18 2 2 13 -1. + <_>18 2 1 13 2. + 0 + -2.5309680495411158e-003 + 0.0602977611124516 + -0.0250494703650475 + <_> + + <_> + + + + <_>7 6 4 8 -1. + <_>7 10 4 4 2. + 0 + -1.5915350522845984e-003 + -0.1217119023203850 + 0.0377379916608334 + <_> + + <_> + + + + <_>9 8 11 4 -1. + <_>9 10 11 2 2. + 0 + -0.0340307094156742 + 0.4641343057155609 + -3.5409750416874886e-003 + <_> + + <_> + + + + <_>6 6 5 10 -1. + <_>6 11 5 5 2. + 0 + 5.1074200309813023e-003 + 0.0398238301277161 + -0.1264553964138031 + <_> + + <_> + + + + <_>4 7 14 6 -1. + <_>4 9 14 2 3. + 0 + -9.6449116244912148e-003 + 0.3346425890922546 + -6.6040740348398685e-003 + <_> + + <_> + + + + <_>4 4 12 8 -1. + <_>4 4 6 4 2. + <_>10 8 6 4 2. + 0 + 0.0114228604361415 + -0.0360804200172424 + 0.1371455043554306 + <_> + + <_> + + + + <_>5 5 12 5 -1. + <_>5 5 6 5 2. + 0 + -5.1042139530181885e-003 + -0.0939868092536926 + 0.0288447793573141 + <_> + + <_> + + + + <_>1 3 15 12 -1. + <_>6 3 5 12 3. + 0 + -0.2633227109909058 + 0.4998092949390411 + -0.0101732499897480 + <_> + + <_> + + + + <_>13 3 6 17 -1. + <_>13 3 3 17 2. + 0 + -0.2455663979053497 + -0.8177834749221802 + 6.9596339017152786e-003 + <_> + + <_> + + + + <_>1 3 6 17 -1. + <_>4 3 3 17 2. + 0 + -0.2141932994127274 + -0.5104051828384399 + 9.4540230929851532e-003 + <_> + + <_> + + + + <_>14 1 6 9 -1. + <_>14 4 6 3 3. + 0 + -0.0143632199615240 + -0.0910009816288948 + 0.0246466696262360 + <_> + + <_> + + + + <_>4 0 8 6 -1. + <_>4 3 8 3 2. + 0 + -1.2388969771564007e-003 + 0.1154457032680512 + -0.0495656207203865 + <_> + + <_> + + + + <_>5 4 15 3 -1. + <_>5 5 15 1 3. + 0 + 0.0210151206701994 + -0.0177658796310425 + 0.1957785934209824 + <_> + + <_> + + + + <_>0 5 8 4 -1. + <_>0 7 8 2 2. + 0 + -4.1783051565289497e-003 + -0.1117286011576653 + 0.0446254499256611 + <_> + + <_> + + + + <_>18 2 2 13 -1. + <_>18 2 1 13 2. + 0 + 2.0896939095109701e-003 + -0.0339887291193008 + 0.0655395016074181 + <_> + + <_> + + + + <_>0 2 2 13 -1. + <_>1 2 1 13 2. + 0 + 0.0164100602269173 + -0.0203732699155808 + 0.2533153891563416 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + -0.0642668828368187 + -0.6588014960289002 + 3.4550630953162909e-003 + <_> + + <_> + + + + <_>0 7 2 13 -1. + <_>1 7 1 13 2. + 0 + 6.8898178869858384e-004 + 0.0676432475447655 + -0.0875562429428101 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 5.6662331335246563e-003 + 0.0306383091956377 + -0.1189554035663605 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + -0.0437781214714050 + -0.2830913066864014 + 0.0177136305719614 + <_> + + <_> + + + + <_>4 8 13 2 -1. + <_>4 9 13 1 2. + 0 + 3.4748481120914221e-003 + -0.0957871228456497 + 0.0426304005086422 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 14 16 2 2. + 0 + -0.0116739403456450 + -0.1050257012248039 + 0.0509038902819157 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + -3.4004659391939640e-003 + 0.1047071963548660 + -0.0409391410648823 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 2.7091780211776495e-003 + -0.0605246014893055 + 0.1397895067930222 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + -0.0174393001943827 + -0.3239116966724396 + 0.0146302497014403 + <_> + + <_> + + + + <_>4 6 10 3 -1. + <_>9 6 5 3 2. + 0 + -0.0125983301550150 + -0.2068262994289398 + 0.0255018696188927 + <_> + + <_> + + + + <_>6 2 8 6 -1. + <_>6 4 8 2 3. + 0 + 0.0187558699399233 + -0.0479259602725506 + 0.1086438000202179 + <_> + + <_> + + + + <_>6 5 7 4 -1. + <_>6 7 7 2 2. + 0 + -4.2074159719049931e-003 + -0.0820778086781502 + 0.0636477693915367 + <_> + + <_> + + + + <_>9 5 10 9 -1. + <_>9 8 10 3 3. + 0 + -1.6427719674538821e-004 + 0.1012039035558701 + -0.0340679287910461 + <_> + + <_> + + + + <_>0 10 18 4 -1. + <_>0 10 9 2 2. + <_>9 12 9 2 2. + 0 + 0.0438476912677288 + 6.0980222187936306e-003 + -0.8368598222732544 + <_> + + <_> + + + + <_>8 7 6 9 -1. + <_>10 7 2 9 3. + 0 + -0.0392846800386906 + 0.2825056016445160 + -0.0223892591893673 + <_> + + <_> + + + + <_>6 4 4 7 -1. + <_>8 4 2 7 2. + 0 + 0.0385509096086025 + 0.0155704896897078 + -0.3397862017154694 + <_> + + <_> + + + + <_>9 6 9 10 -1. + <_>12 6 3 10 3. + 0 + -0.0691770315170288 + 0.1225832030177116 + -0.0178501792252064 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -1.9251030171290040e-003 + -0.1068774983286858 + 0.0463795103132725 + <_> + + <_> + + + + <_>10 14 10 6 -1. + <_>15 14 5 3 2. + <_>10 17 5 3 2. + 0 + -8.6635202169418335e-003 + 0.0964127480983734 + -0.0175632499158382 + <_> + + <_> + + + + <_>0 6 5 12 -1. + <_>0 10 5 4 3. + 0 + 0.1339350938796997 + 6.3692941330373287e-003 + -0.7017058730125427 + <_> + + <_> + + + + <_>9 6 9 10 -1. + <_>12 6 3 10 3. + 0 + 0.0410823486745358 + -0.0110775697976351 + 0.1346375048160553 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1491145044565201 + 9.5263421535491943e-003 + -0.5087255239486694 + <_> + + <_> + + + + <_>6 13 10 7 -1. + <_>6 13 5 7 2. + 0 + -5.2500818856060505e-003 + 0.0700255781412125 + -0.0428802706301212 + <_> + + <_> + + + + <_>0 2 6 17 -1. + <_>3 2 3 17 2. + 0 + 0.0228235702961683 + -0.0418840497732162 + 0.1177031993865967 + <_> + + <_> + + + + <_>10 14 9 5 -1. + <_>13 14 3 5 3. + 0 + -8.5306530818343163e-003 + 0.0612221397459507 + -0.0249445494264364 + <_> + + <_> + + + + <_>1 14 9 5 -1. + <_>4 14 3 5 3. + 0 + 0.0119717298075557 + 0.0396627709269524 + -0.1626774072647095 + <_> + + <_> + + + + <_>7 13 7 6 -1. + <_>7 15 7 2 3. + 0 + -0.0389382690191269 + 0.2574352025985718 + -0.0163562390953302 + <_> + + <_> + + + + <_>1 14 7 6 -1. + <_>1 16 7 2 3. + 0 + -0.0217063892632723 + -0.3199867904186249 + 0.0171352904289961 + <_> + + <_> + + + + <_>12 10 8 6 -1. + <_>12 12 8 2 3. + 0 + 6.6900630481541157e-003 + 0.0261018499732018 + -0.1098072975873947 + <_> + + <_> + + + + <_>2 6 9 9 -1. + <_>5 6 3 9 3. + 0 + -0.0722708329558373 + 0.1943113058805466 + -0.0260443594306707 + <_> + + <_> + + + + <_>12 10 7 6 -1. + <_>12 12 7 2 3. + 0 + -6.7073688842356205e-003 + -0.1774785071611404 + 0.0458629988133907 + <_> + + <_> + + + + <_>3 2 4 12 -1. + <_>5 2 2 12 2. + 0 + 0.0550193600356579 + -8.3471573889255524e-003 + 0.6051154136657715 + <_> + + <_> + + + + <_>9 1 7 15 -1. + <_>9 6 7 5 3. + 0 + 0.1314264982938767 + -5.7535418309271336e-003 + 0.2916753888130188 + <_> + + <_> + + + + <_>6 10 4 7 -1. + <_>8 10 2 7 2. + 0 + -1.6564460238441825e-003 + 0.0700030326843262 + -0.0626908764243126 + <_> + + <_> + + + + <_>5 0 10 20 -1. + <_>10 0 5 10 2. + <_>5 10 5 10 2. + 0 + 0.1544540971517563 + 6.1896732077002525e-003 + -0.7432330250740051 + <_> + + <_> + + + + <_>7 10 6 10 -1. + <_>9 10 2 10 3. + 0 + -5.0357519648969173e-003 + -0.1133328974246979 + 0.0387417711317539 + <_> + + <_> + + + + <_>12 7 7 4 -1. + <_>12 9 7 2 2. + 0 + 2.2772569209337234e-003 + -0.1134053021669388 + 0.0213194005191326 + <_> + + <_> + + + + <_>2 7 16 4 -1. + <_>2 7 8 2 2. + <_>10 9 8 2 2. + 0 + 3.3173530828207731e-003 + 0.0442733317613602 + -0.1045982986688614 + <_> + + <_> + + + + <_>5 10 12 10 -1. + <_>5 10 6 10 2. + 0 + -0.0296928007155657 + 0.0924837663769722 + -0.0233426094055176 + <_> + + <_> + + + + <_>6 1 2 16 -1. + <_>6 9 2 8 2. + 0 + 0.0629378408193588 + -0.0129982801154256 + 0.3888793885707855 + <_> + + <_> + + + + <_>6 2 12 10 -1. + <_>6 7 12 5 2. + 0 + 3.6641359329223633e-003 + 0.0320998206734657 + -0.0396479889750481 + <_> + + <_> + + + + <_>2 4 14 6 -1. + <_>2 4 7 3 2. + <_>9 7 7 3 2. + 0 + 4.4782999902963638e-003 + -0.0457013286650181 + 0.1069701015949249 + <_> + + <_> + + + + <_>5 0 11 12 -1. + <_>5 4 11 4 3. + 0 + 1.8147319788113236e-003 + -0.0328718200325966 + 0.1064793989062309 + <_> + + <_> + + + + <_>7 1 6 12 -1. + <_>7 5 6 4 3. + 0 + 4.8941639252007008e-003 + 0.0279110092669725 + -0.2172559052705765 + <_> + + <_> + + + + <_>9 8 11 4 -1. + <_>9 10 11 2 2. + 0 + -4.4425828382372856e-003 + -0.1347015053033829 + 0.0107814101502299 + <_> + + <_> + + + + <_>0 8 11 4 -1. + <_>0 10 11 2 2. + 0 + -0.0254934001713991 + 0.6837146878242493 + -7.7452720142900944e-003 + <_> + + <_> + + + + <_>1 8 19 6 -1. + <_>1 11 19 3 2. + 0 + 0.0278354492038488 + 0.0241442993283272 + -0.1517059952020645 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>7 4 6 4 2. + 0 + 7.5548859313130379e-003 + -0.0476434007287025 + 0.1192577034235001 + <_> + + <_> + + + + <_>5 3 15 2 -1. + <_>5 4 15 1 2. + 0 + 0.0103296097368002 + 0.0186468102037907 + -0.1612257063388825 + <_> + + <_> + + + + <_>2 7 14 6 -1. + <_>2 9 14 2 3. + 0 + -0.0123933898285031 + 0.6030492186546326 + -7.7566630207002163e-003 + <_> + + <_> + + + + <_>3 0 17 6 -1. + <_>3 2 17 2 3. + 0 + 0.0138337695971131 + -0.0276172999292612 + 0.0512668788433075 + <_> + + <_> + + + + <_>0 0 17 6 -1. + <_>0 2 17 2 3. + 0 + -0.0256693195551634 + 0.2380135953426361 + -0.0239719096571207 + <_> + + <_> + + + + <_>13 2 7 4 -1. + <_>13 4 7 2 2. + 0 + -5.2043660543859005e-003 + -0.1072179004549980 + 0.0266450494527817 + <_> + + <_> + + + + <_>0 2 7 4 -1. + <_>0 4 7 2 2. + 0 + 3.4628969151526690e-003 + 0.0543134100735188 + -0.1345832049846649 + <_> + + <_> + + + + <_>8 1 12 10 -1. + <_>14 1 6 5 2. + <_>8 6 6 5 2. + 0 + -0.0192206799983978 + 0.0729963928461075 + -0.0406521111726761 + <_> + + <_> + + + + <_>2 1 4 8 -1. + <_>2 5 4 4 2. + 0 + -2.5009829550981522e-003 + -0.0776712968945503 + 0.0590965412557125 + <_> + + <_> + + + + <_>5 1 11 10 -1. + <_>5 6 11 5 2. + 0 + -8.5285156965255737e-003 + 0.0490508116781712 + -0.0640783533453941 + <_> + + <_> + + + + <_>3 9 10 6 -1. + <_>3 9 5 3 2. + <_>8 12 5 3 2. + 0 + 4.3327538296580315e-003 + 0.0252210106700659 + -0.1935898065567017 + <_> + + <_> + + + + <_>12 7 7 4 -1. + <_>12 9 7 2 2. + 0 + 0.0365959703922272 + -0.0162625908851624 + 0.1565123945474625 + <_> + + <_> + + + + <_>2 7 12 8 -1. + <_>6 7 4 8 3. + 0 + -1.1795730097219348e-003 + -0.0724680721759796 + 0.0704494863748550 + <_> + + <_> + + + + <_>10 10 8 4 -1. + <_>10 10 4 4 2. + 0 + -0.0139758298173547 + -0.1178947016596794 + 0.0212920494377613 + <_> + + <_> + + + + <_>2 10 8 4 -1. + <_>6 10 4 4 2. + 0 + -1.3828700175508857e-003 + 0.0792835429310799 + -0.0951041206717491 + <_> + + <_> + + + + <_>3 10 16 3 -1. + <_>3 10 8 3 2. + 0 + -2.9435830656439066e-003 + 0.0703684315085411 + -0.0332179106771946 + <_> + + <_> + + + + <_>1 11 6 5 -1. + <_>4 11 3 5 2. + 0 + 9.5262555405497551e-003 + -0.0297336205840111 + 0.1667045950889587 + <_> + + <_> + + + + <_>10 7 9 9 -1. + <_>13 7 3 9 3. + 0 + -0.0901142731308937 + -0.1662537008523941 + 8.6199166253209114e-003 + <_> + + <_> + + + + <_>1 7 9 9 -1. + <_>4 7 3 9 3. + 0 + -1.2089919764548540e-003 + 0.0810838565230370 + -0.0730291232466698 + <_> + + <_> + + + + <_>5 5 12 5 -1. + <_>5 5 6 5 2. + 0 + -0.1419996023178101 + -1. + 2.2284830920398235e-003 + <_> + + <_> + + + + <_>3 5 12 5 -1. + <_>9 5 6 5 2. + 0 + 8.0690719187259674e-003 + 0.0474122203886509 + -0.1017893031239510 + <_> + + <_> + + + + <_>2 3 16 2 -1. + <_>2 3 8 2 2. + 0 + -4.7410889528691769e-003 + 0.1205111965537071 + -0.0499574802815914 + <_> + + <_> + + + + <_>2 8 7 6 -1. + <_>2 10 7 2 3. + 0 + -1.6977200284600258e-003 + -0.2417144030332565 + 0.0195343699306250 + <_> + + <_> + + + + <_>7 8 9 6 -1. + <_>7 10 9 2 3. + 0 + -2.8892089612782001e-003 + 0.2572799026966095 + -0.0116250598803163 + <_> + + <_> + + + + <_>3 0 3 15 -1. + <_>4 0 1 15 3. + 0 + -1.5177440363913774e-003 + -0.0987841933965683 + 0.0467061288654804 + <_> + + <_> + + + + <_>3 10 16 3 -1. + <_>3 10 8 3 2. + 0 + 0.1419731974601746 + -2.5096370372921228e-003 + 0.7545061111450195 + <_> + + <_> + + + + <_>1 10 16 3 -1. + <_>9 10 8 3 2. + 0 + 0.0975179374217987 + -6.9059049710631371e-003 + 0.6518443226814270 + <_> + + <_> + + + + <_>12 0 8 19 -1. + <_>12 0 4 19 2. + 0 + 0.0135673796758056 + -0.0763251930475235 + 0.0880545824766159 + <_> + + <_> + + + + <_>0 0 8 19 -1. + <_>4 0 4 19 2. + 0 + 0.0809814631938934 + 0.0155581096187234 + -0.3460162878036499 + <_> + + <_> + + + + <_>6 14 14 3 -1. + <_>6 15 14 1 3. + 0 + -4.7192731872200966e-003 + 0.0816200226545334 + -0.0460722893476486 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.0368969999253750e-003 + -0.0448176302015781 + 0.1286139041185379 + <_> + + <_> + + + + <_>6 14 14 3 -1. + <_>6 15 14 1 3. + 0 + -1.7878509825095534e-003 + 0.0437313318252563 + -0.0449959486722946 + <_> + + <_> + + + + <_>0 12 16 4 -1. + <_>0 12 8 2 2. + <_>8 14 8 2 2. + 0 + -7.1685528382658958e-003 + -0.1359799951314926 + 0.0387969911098480 + <_> + + <_> + + + + <_>7 14 12 6 -1. + <_>13 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0674608871340752 + -0.2926574051380158 + 3.5135280340909958e-003 + <_> + + <_> + + + + <_>1 14 12 6 -1. + <_>1 14 6 3 2. + <_>7 17 6 3 2. + 0 + -0.0155985001474619 + 0.2310566008090973 + -0.0224050693213940 + <_> + + <_> + + + + <_>3 3 14 14 -1. + <_>10 3 7 7 2. + <_>3 10 7 7 2. + 0 + -0.0210264790803194 + -0.1528383046388626 + 0.0315314494073391 + <_> + + <_> + + + + <_>3 6 6 12 -1. + <_>5 6 2 12 3. + 0 + -0.1055836006999016 + -0.6836603879928589 + 6.8997950293123722e-003 + <_> + + <_> + + + + <_>5 12 12 6 -1. + <_>9 12 4 6 3. + 0 + -3.6966579500585794e-003 + 0.0343151502311230 + -0.0489227995276451 + <_> + + <_> + + + + <_>1 8 14 6 -1. + <_>1 8 7 3 2. + <_>8 11 7 3 2. + 0 + -6.0826627304777503e-004 + -0.0526384301483631 + 0.0895469486713409 + <_> + + <_> + + + + <_>8 7 12 10 -1. + <_>14 7 6 5 2. + <_>8 12 6 5 2. + 0 + -0.0289365407079458 + 0.0418184809386730 + -0.0138181699439883 + <_> + + <_> + + + + <_>0 7 12 10 -1. + <_>0 7 6 5 2. + <_>6 12 6 5 2. + 0 + -5.8082528412342072e-003 + 0.0678747966885567 + -0.0855787992477417 + <_> + + <_> + + + + <_>9 2 6 18 -1. + <_>12 2 3 9 2. + <_>9 11 3 9 2. + 0 + -0.0460953786969185 + -0.1258478015661240 + 0.0204669702798128 + <_> + + <_> + + + + <_>1 10 8 10 -1. + <_>1 10 4 5 2. + <_>5 15 4 5 2. + 0 + 0.0529729202389717 + -0.0124532599002123 + 0.3456504940986633 + <_> + + <_> + + + + <_>4 14 12 4 -1. + <_>4 16 12 2 2. + 0 + 0.0493515990674496 + 0.0109012397006154 + -0.4850698113441467 + <_> + + <_> + + + + <_>5 13 6 7 -1. + <_>7 13 2 7 3. + 0 + 0.0443778000771999 + 9.9294837564229965e-003 + -0.4387789964675903 + <_> + + <_> + + + + <_>5 2 15 5 -1. + <_>10 2 5 5 3. + 0 + -0.1146489009261131 + 0.2687459886074066 + -9.2000560835003853e-003 + <_> + + <_> + + + + <_>5 4 9 14 -1. + <_>5 11 9 7 2. + 0 + 0.1688783019781113 + 5.7101310230791569e-003 + -0.8597288131713867 + <_> + + <_> + + + + <_>8 0 11 4 -1. + <_>8 2 11 2 2. + 0 + 0.0511980988085270 + -8.5723921656608582e-003 + 0.1339516937732697 + <_> + + <_> + + + + <_>0 14 16 6 -1. + <_>0 16 16 2 3. + 0 + -3.0789880547672510e-003 + -0.1033876016736031 + 0.0434594787657261 + <_> + + <_> + + + + <_>10 14 8 6 -1. + <_>10 16 8 2 3. + 0 + 0.0472231283783913 + 8.1934239715337753e-003 + -0.4380340874195099 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + -7.6270569115877151e-003 + 0.1871389001607895 + -0.0246602501720190 + <_> + + <_> + + + + <_>5 8 15 3 -1. + <_>5 9 15 1 3. + 0 + 5.4106907919049263e-003 + 0.0410998314619064 + -0.0788682326674461 + <_> + + <_> + + + + <_>0 8 19 3 -1. + <_>0 9 19 1 3. + 0 + -1.4900229871273041e-003 + -0.2011504024267197 + 0.0318981595337391 + <_> + + <_> + + + + <_>8 16 8 4 -1. + <_>8 16 4 4 2. + 0 + -0.0838316082954407 + 0.5801793932914734 + -5.2973427809774876e-003 + <_> + + <_> + + + + <_>4 16 8 4 -1. + <_>8 16 4 4 2. + 0 + 6.2233800999820232e-003 + -0.0397860594093800 + 0.1228395029902458 + <_> + + <_> + + + + <_>9 5 10 9 -1. + <_>9 8 10 3 3. + 0 + 0.1147508025169373 + -0.0119754197075963 + 0.2158671021461487 + <_> + + <_> + + + + <_>1 5 10 9 -1. + <_>1 8 10 3 3. + 0 + -1.5253260498866439e-003 + 0.1380452960729599 + -0.0399418808519840 + <_> + + <_> + + + + <_>4 7 14 2 -1. + <_>4 7 7 2 2. + 0 + -5.2878521382808685e-003 + -0.1279065012931824 + 0.0328935608267784 + <_> + + <_> + + + + <_>2 7 13 2 -1. + <_>2 8 13 1 2. + 0 + 8.9670647867023945e-004 + -0.1248105987906456 + 0.0445442497730255 + <_> + + <_> + + + + <_>6 5 8 4 -1. + <_>6 7 8 2 2. + 0 + 0.0384216606616974 + 7.7155791223049164e-003 + -0.6557546854019165 + <_> + + <_> + + + + <_>5 12 9 5 -1. + <_>8 12 3 5 3. + 0 + -9.3785318313166499e-004 + 0.0556085109710693 + -0.0898769125342369 + <_> + + <_> + + + + <_>3 6 14 3 -1. + <_>3 7 14 1 3. + 0 + 1.9965849351137877e-003 + -0.0252976100891829 + 0.1941318064928055 + <_> + + <_> + + + + <_>7 2 4 12 -1. + <_>7 6 4 4 3. + 0 + 4.5782068627886474e-004 + 0.0390891991555691 + -0.1290857046842575 + <_> + + <_> + + + + <_>2 4 16 4 -1. + <_>2 6 16 2 2. + 0 + 3.8373940624296665e-003 + -0.0287488698959351 + 0.1942975074052811 + <_> + + <_> + + + + <_>1 4 9 4 -1. + <_>1 6 9 2 2. + 0 + 3.7142829387448728e-004 + 0.0382723584771156 + -0.1375918984413147 + <_> + + <_> + + + + <_>9 4 11 4 -1. + <_>9 6 11 2 2. + 0 + 7.5116259977221489e-003 + -0.0144611299037933 + 0.1265694946050644 + <_> + + <_> + + + + <_>4 5 8 8 -1. + <_>4 5 4 4 2. + <_>8 9 4 4 2. + 0 + -0.0503628402948380 + 0.3518357872962952 + -0.0140518601983786 + <_> + + <_> + + + + <_>1 5 18 3 -1. + <_>7 5 6 3 3. + 0 + 0.0399216413497925 + 0.0272804293781519 + -0.1995819956064224 + <_> + + <_> + + + + <_>1 0 15 7 -1. + <_>6 0 5 7 3. + 0 + 0.2260525971651077 + -6.8001961335539818e-003 + 0.7300689816474915 + <_> + + <_> + + + + <_>12 0 5 15 -1. + <_>12 5 5 5 3. + 0 + 0.1108177974820137 + 4.3370737694203854e-003 + -0.8682916164398193 + <_> + + <_> + + + + <_>3 0 5 15 -1. + <_>3 5 5 5 3. + 0 + -9.7494889050722122e-003 + -0.0637406632304192 + 0.0845379978418350 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + -2.2887689992785454e-003 + 0.0996540188789368 + -0.0415654182434082 + <_> + + <_> + + + + <_>8 3 4 7 -1. + <_>10 3 2 7 2. + 0 + 2.0008319988846779e-003 + -0.0556506998836994 + 0.1070986986160278 + <_> + + <_> + + + + <_>4 6 12 11 -1. + <_>8 6 4 11 3. + 0 + -0.0151600502431393 + -0.1409876048564911 + 0.0387415997684002 + <_> + + <_> + + + + <_>1 7 18 4 -1. + <_>1 9 18 2 2. + 0 + -6.3132969662547112e-003 + -1. + 4.4605308212339878e-003 + <_> + + <_> + + + + <_>8 5 6 8 -1. + <_>10 5 2 8 3. + 0 + -0.0139700099825859 + 0.1248108968138695 + -0.0214258302003145 + <_> + + <_> + + + + <_>7 2 6 5 -1. + <_>10 2 3 5 2. + 0 + -0.0443212799727917 + -0.5334007143974304 + 0.0101652396842837 + <_> + + <_> + + + + <_>9 0 4 7 -1. + <_>9 0 2 7 2. + 0 + 1.4885979471728206e-003 + -0.0488686002790928 + 0.0360779017210007 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + 0.0651396811008453 + 7.6331058517098427e-003 + -0.5878164172172546 + <_> + + <_> + + + + <_>13 0 7 6 -1. + <_>13 2 7 2 3. + 0 + -0.0207414105534554 + -0.2965827882289887 + 0.0186228007078171 + -1.2940989732742310 + 40 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 8 4 -1. + <_>5 1 4 4 2. + 0 + 0.0135756898671389 + -0.1424959003925324 + 0.2333762049674988 + <_> + + <_> + + + + <_>7 4 7 6 -1. + <_>7 6 7 2 3. + 0 + -7.5882389210164547e-003 + 0.0864644795656204 + -0.2395431995391846 + <_> + + <_> + + + + <_>4 5 10 12 -1. + <_>4 5 5 6 2. + <_>9 11 5 6 2. + 0 + 4.2986529879271984e-003 + 0.0502820909023285 + -0.3525012135505676 + <_> + + <_> + + + + <_>8 12 11 8 -1. + <_>8 16 11 4 2. + 0 + -0.0197931192815304 + -0.1682747006416321 + 0.0437127202749252 + <_> + + <_> + + + + <_>5 5 9 5 -1. + <_>8 5 3 5 3. + 0 + 6.6613829694688320e-003 + -0.2037153989076614 + 0.0712257474660873 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 3.2715050037950277e-003 + 0.0545367188751698 + -0.2242882996797562 + <_> + + <_> + + + + <_>0 0 2 14 -1. + <_>1 0 1 14 2. + 0 + -0.0361433215439320 + 0.5504488945007324 + -0.0235972106456757 + <_> + + <_> + + + + <_>11 9 3 10 -1. + <_>11 14 3 5 2. + 0 + 3.1145319808274508e-003 + 0.0220494307577610 + -0.3010942935943604 + <_> + + <_> + + + + <_>3 17 13 3 -1. + <_>3 18 13 1 3. + 0 + 8.9540961198508739e-004 + -0.1227985024452210 + 0.1075142025947571 + <_> + + <_> + + + + <_>6 10 13 3 -1. + <_>6 11 13 1 3. + 0 + 8.0573331797495484e-004 + -0.0875877812504768 + 0.0546320490539074 + <_> + + <_> + + + + <_>1 2 18 6 -1. + <_>1 2 9 3 2. + <_>10 5 9 3 2. + 0 + -6.5726130269467831e-003 + -0.1564987003803253 + 0.0765607580542564 + <_> + + <_> + + + + <_>6 1 12 8 -1. + <_>12 1 6 4 2. + <_>6 5 6 4 2. + 0 + 2.2269350010901690e-003 + 0.0294907800853252 + -0.0592101998627186 + <_> + + <_> + + + + <_>4 1 12 8 -1. + <_>4 1 6 4 2. + <_>10 5 6 4 2. + 0 + 6.2076752074062824e-003 + 0.0757273435592651 + -0.1767532974481583 + <_> + + <_> + + + + <_>4 3 13 3 -1. + <_>4 4 13 1 3. + 0 + 6.0021011158823967e-003 + -0.0783538073301315 + 0.1449289023876190 + <_> + + <_> + + + + <_>1 6 12 4 -1. + <_>5 6 4 4 3. + 0 + 0.0119963400065899 + 0.0286440309137106 + -0.3198246955871582 + <_> + + <_> + + + + <_>14 2 6 5 -1. + <_>14 2 3 5 2. + 0 + 6.7174229770898819e-003 + -0.1073990017175674 + 0.1310632973909378 + <_> + + <_> + + + + <_>3 12 13 2 -1. + <_>3 13 13 1 2. + 0 + 5.7567027397453785e-004 + -0.0641267970204353 + 0.1629354059696198 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + 3.9552329108119011e-003 + 0.0373474210500717 + -0.1525357067584992 + <_> + + <_> + + + + <_>1 0 4 7 -1. + <_>3 0 2 7 2. + 0 + 1.5598450554534793e-003 + -0.0986873134970665 + 0.0987182036042213 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -8.4324590861797333e-003 + 0.2090564966201782 + -0.0604840181767941 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 8.7580326944589615e-003 + 0.0506034307181835 + -0.2184547036886215 + <_> + + <_> + + + + <_>11 0 9 6 -1. + <_>14 0 3 6 3. + 0 + -0.1196575015783310 + 0.2671158909797669 + -7.4574039317667484e-003 + <_> + + <_> + + + + <_>6 9 3 10 -1. + <_>6 14 3 5 2. + 0 + 2.0653149113059044e-003 + 0.0351948104798794 + -0.2523075044155121 + <_> + + <_> + + + + <_>10 9 6 5 -1. + <_>10 9 3 5 2. + 0 + -5.7491107145324349e-004 + 0.0824242234230042 + -0.1083047986030579 + <_> + + <_> + + + + <_>6 7 3 12 -1. + <_>6 13 3 6 2. + 0 + -6.7591401748359203e-003 + -0.1370418965816498 + 0.0701543688774109 + <_> + + <_> + + + + <_>11 0 9 6 -1. + <_>14 0 3 6 3. + 0 + 0.0182107407599688 + -0.0254077706485987 + 0.1012372970581055 + <_> + + <_> + + + + <_>0 0 9 6 -1. + <_>3 0 3 6 3. + 0 + -0.0880068466067314 + 0.3663871884346008 + -0.0308931805193424 + <_> + + <_> + + + + <_>4 6 12 3 -1. + <_>4 6 6 3 2. + 0 + -4.4944360852241516e-003 + -0.1575381010770798 + 0.0600706301629543 + <_> + + <_> + + + + <_>6 4 6 8 -1. + <_>8 4 2 8 3. + 0 + -6.3741360791027546e-003 + 0.2118988931179047 + -0.0395679995417595 + <_> + + <_> + + + + <_>11 0 3 13 -1. + <_>12 0 1 13 3. + 0 + -0.0310974400490522 + -0.5996552109718323 + 9.9493442103266716e-003 + <_> + + <_> + + + + <_>6 0 3 13 -1. + <_>7 0 1 13 3. + 0 + 5.8496380224823952e-003 + 0.0282446891069412 + -0.2977800071239471 + <_> + + <_> + + + + <_>4 14 13 2 -1. + <_>4 15 13 1 2. + 0 + -2.2763800807297230e-003 + 0.1027041971683502 + -0.0737119913101196 + <_> + + <_> + + + + <_>1 11 7 6 -1. + <_>1 13 7 2 3. + 0 + 3.9103049784898758e-003 + 0.0524456687271595 + -0.2012391984462738 + <_> + + <_> + + + + <_>13 5 4 8 -1. + <_>13 9 4 4 2. + 0 + 2.8906730003654957e-003 + -0.2169228047132492 + 0.0372945703566074 + <_> + + <_> + + + + <_>4 16 12 4 -1. + <_>8 16 4 4 3. + 0 + 4.5904931612312794e-003 + -0.0812765806913376 + 0.1101315990090370 + <_> + + <_> + + + + <_>11 9 6 8 -1. + <_>11 9 3 8 2. + 0 + -0.0342458002269268 + -0.1154173016548157 + 0.0143840499222279 + <_> + + <_> + + + + <_>3 9 6 8 -1. + <_>6 9 3 8 2. + 0 + -1.7881620442494750e-004 + 0.0628859773278236 + -0.1326712965965271 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + -4.0114559233188629e-003 + -0.1896172016859055 + 0.0367017686367035 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 3.1429999507963657e-003 + -0.0499151200056076 + 0.1729976981878281 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + 0.0780823528766632 + 4.7195390798151493e-003 + -0.3401587903499603 + <_> + + <_> + + + + <_>2 0 14 9 -1. + <_>2 3 14 3 3. + 0 + 0.2037094980478287 + -0.0217331405729055 + 0.3742265105247498 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + 0.0974248200654984 + -6.8117439514026046e-004 + 0.4963915944099426 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -2.6366419624537230e-003 + -0.1853210031986237 + 0.0437688305974007 + <_> + + <_> + + + + <_>9 5 4 12 -1. + <_>9 11 4 6 2. + 0 + 4.1020149365067482e-004 + 0.0278029106557369 + -0.0877069681882858 + <_> + + <_> + + + + <_>2 4 10 6 -1. + <_>2 4 5 3 2. + <_>7 7 5 3 2. + 0 + -0.0596665591001511 + -0.5687270760536194 + 0.0133886402472854 + <_> + + <_> + + + + <_>9 1 8 16 -1. + <_>13 1 4 8 2. + <_>9 9 4 8 2. + 0 + -5.1892381161451340e-003 + 0.0504994988441467 + -0.1446586996316910 + <_> + + <_> + + + + <_>2 1 14 8 -1. + <_>2 5 14 4 2. + 0 + 0.1037714034318924 + -0.0189520604908466 + 0.4110797941684723 + <_> + + <_> + + + + <_>12 10 7 6 -1. + <_>12 12 7 2 3. + 0 + -0.0140757597982883 + -0.2036736011505127 + 0.0325132794678211 + <_> + + <_> + + + + <_>0 8 6 9 -1. + <_>3 8 3 9 2. + 0 + -6.8877148441970348e-003 + 0.1240172982215881 + -0.0766171291470528 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0293458495289087 + 8.4471162408590317e-003 + -0.3465698063373566 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -8.3123557269573212e-003 + -0.1918011009693146 + 0.0385856293141842 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0644932687282562 + -0.0271588806062937 + 0.3021799921989441 + <_> + + <_> + + + + <_>0 0 16 2 -1. + <_>0 1 16 1 2. + 0 + 8.0413377145305276e-004 + -0.1044417023658752 + 0.0647219792008400 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -6.5569980069994926e-003 + -0.1065860018134117 + 0.0252384897321463 + <_> + + <_> + + + + <_>2 13 14 4 -1. + <_>2 13 7 2 2. + <_>9 15 7 2 2. + 0 + -0.0383269302546978 + -0.6850638985633850 + 9.6486946567893028e-003 + <_> + + <_> + + + + <_>7 5 9 7 -1. + <_>10 5 3 7 3. + 0 + -0.0403273291885853 + 0.1975985020399094 + -0.0251841694116592 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 6.1981407925486565e-003 + 0.0464157909154892 + -0.1717167049646378 + <_> + + <_> + + + + <_>13 5 4 8 -1. + <_>13 9 4 4 2. + 0 + 0.0374655015766621 + -0.0150102796033025 + 0.0869622528553009 + <_> + + <_> + + + + <_>6 1 6 10 -1. + <_>6 6 6 5 2. + 0 + -6.0584479942917824e-003 + 0.0692427530884743 + -0.0945942029356956 + <_> + + <_> + + + + <_>0 3 20 8 -1. + <_>0 7 20 4 2. + 0 + -0.0149916997179389 + -0.1496981978416443 + 0.0465794503688812 + <_> + + <_> + + + + <_>4 0 12 8 -1. + <_>10 0 6 8 2. + 0 + 0.0647603571414948 + -0.0260891690850258 + 0.2707200944423676 + <_> + + <_> + + + + <_>2 1 18 19 -1. + <_>8 1 6 19 3. + 0 + 0.5902032852172852 + 3.9715780876576900e-003 + -0.6391807198524475 + <_> + + <_> + + + + <_>0 1 18 19 -1. + <_>6 1 6 19 3. + 0 + 0.0738922134041786 + -0.0625063329935074 + 0.1310071945190430 + <_> + + <_> + + + + <_>8 1 12 19 -1. + <_>8 1 6 19 2. + 0 + 0.4392817020416260 + 5.0452877767384052e-003 + -0.3762843906879425 + <_> + + <_> + + + + <_>0 1 12 19 -1. + <_>6 1 6 19 2. + 0 + 0.1019204035401344 + 0.0220532901585102 + -0.3340820074081421 + <_> + + <_> + + + + <_>0 0 20 10 -1. + <_>10 0 10 5 2. + <_>0 5 10 5 2. + 0 + 0.1108421981334686 + 0.0162155404686928 + -0.3490070104598999 + <_> + + <_> + + + + <_>0 4 13 3 -1. + <_>0 5 13 1 3. + 0 + 5.5628088302910328e-003 + -0.0521967113018036 + 0.1179637014865875 + <_> + + <_> + + + + <_>3 2 14 2 -1. + <_>3 3 14 1 2. + 0 + -6.3897081417962909e-004 + -0.1565970033407211 + 0.0447444505989552 + <_> + + <_> + + + + <_>1 4 13 3 -1. + <_>1 5 13 1 3. + 0 + -3.5426639951765537e-003 + 0.1449057012796402 + -0.0425187088549137 + <_> + + <_> + + + + <_>13 12 7 4 -1. + <_>13 14 7 2 2. + 0 + -0.0330161601305008 + -0.3694294095039368 + 7.6470980420708656e-003 + <_> + + <_> + + + + <_>2 1 4 19 -1. + <_>4 1 2 19 2. + 0 + 0.0960508584976196 + 6.5154801122844219e-003 + -0.8782703876495361 + <_> + + <_> + + + + <_>12 10 7 6 -1. + <_>12 12 7 2 3. + 0 + -0.0495720095932484 + -0.4272302091121674 + 3.1567770056426525e-003 + <_> + + <_> + + + + <_>3 9 13 3 -1. + <_>3 10 13 1 3. + 0 + 2.5885479408316314e-004 + -0.1568966954946518 + 0.0380518287420273 + <_> + + <_> + + + + <_>4 8 14 3 -1. + <_>4 9 14 1 3. + 0 + -1.5898289857432246e-003 + -0.1884572058916092 + 0.0246300492435694 + <_> + + <_> + + + + <_>4 5 12 9 -1. + <_>4 8 12 3 3. + 0 + -1.3463890354614705e-004 + 0.1445270031690598 + -0.0441722609102726 + <_> + + <_> + + + + <_>6 15 13 3 -1. + <_>6 16 13 1 3. + 0 + 0.0116742495447397 + -0.0256763808429241 + 0.1952770948410034 + <_> + + <_> + + + + <_>0 12 7 4 -1. + <_>0 14 7 2 2. + 0 + -0.0235070008784533 + -0.3227188885211945 + 0.0185148399323225 + <_> + + <_> + + + + <_>5 2 14 18 -1. + <_>12 2 7 9 2. + <_>5 11 7 9 2. + 0 + 0.0312258005142212 + -0.0196222998201847 + 0.1457010060548782 + <_> + + <_> + + + + <_>7 5 4 12 -1. + <_>7 11 4 6 2. + 0 + 8.0607319250702858e-004 + 0.0443799905478954 + -0.1363562047481537 + <_> + + <_> + + + + <_>5 2 14 18 -1. + <_>12 2 7 9 2. + <_>5 11 7 9 2. + 0 + -0.2644588053226471 + 0.4177120029926300 + -6.3821650110185146e-003 + <_> + + <_> + + + + <_>1 2 14 18 -1. + <_>1 2 7 9 2. + <_>8 11 7 9 2. + 0 + 0.0354793816804886 + -0.0227584801614285 + 0.2694610059261322 + <_> + + <_> + + + + <_>6 10 8 10 -1. + <_>10 10 4 5 2. + <_>6 15 4 5 2. + 0 + -0.0381375998258591 + -0.3671990931034088 + 0.0187220592051744 + <_> + + <_> + + + + <_>3 5 4 8 -1. + <_>3 9 4 4 2. + 0 + 3.9108810015022755e-003 + -0.1817681938409805 + 0.0390549488365650 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>10 10 4 4 2. + <_>6 14 4 4 2. + 0 + 4.1834539733827114e-003 + 0.0486762486398220 + -0.1355886012315750 + <_> + + <_> + + + + <_>1 10 7 6 -1. + <_>1 12 7 2 3. + 0 + -0.0466414205729961 + -0.5874168276786804 + 9.8590552806854248e-003 + <_> + + <_> + + + + <_>4 14 13 3 -1. + <_>4 15 13 1 3. + 0 + 0.0119501398876309 + -0.0255060493946075 + 0.2797119915485382 + <_> + + <_> + + + + <_>6 11 6 9 -1. + <_>8 11 2 9 3. + 0 + -0.0635850727558136 + -0.7094069719314575 + 8.8691459968686104e-003 + <_> + + <_> + + + + <_>7 5 9 7 -1. + <_>10 5 3 7 3. + 0 + 9.7221415489912033e-003 + -0.0278850290924311 + 0.0546266809105873 + <_> + + <_> + + + + <_>0 10 19 6 -1. + <_>0 13 19 3 2. + 0 + -0.0161114595830441 + -0.0682654827833176 + 0.0809329673647881 + <_> + + <_> + + + + <_>4 1 12 10 -1. + <_>4 6 12 5 2. + 0 + -0.0799505114555359 + 0.2042568027973175 + -0.0343068502843380 + <_> + + <_> + + + + <_>0 12 8 6 -1. + <_>0 14 8 2 3. + 0 + 3.1421340536326170e-003 + 0.0421968810260296 + -0.1536691039800644 + <_> + + <_> + + + + <_>5 10 13 3 -1. + <_>5 11 13 1 3. + 0 + 2.9253180400701240e-005 + -0.0763822570443153 + 0.0317488797008991 + <_> + + <_> + + + + <_>0 5 20 2 -1. + <_>0 6 20 1 2. + 0 + -0.0545870885252953 + -0.6489148736000061 + 9.1545386239886284e-003 + <_> + + <_> + + + + <_>2 0 17 6 -1. + <_>2 2 17 2 3. + 0 + -0.0210834201425314 + 0.1905899941921234 + -0.0246866401284933 + <_> + + <_> + + + + <_>3 14 10 6 -1. + <_>3 14 5 3 2. + <_>8 17 5 3 2. + 0 + 3.9170900708995759e-004 + -0.1057088971138001 + 0.0529467687010765 + <_> + + <_> + + + + <_>6 0 9 11 -1. + <_>9 0 3 11 3. + 0 + 0.2258882969617844 + 2.3077470250427723e-003 + -0.9260604977607727 + <_> + + <_> + + + + <_>0 2 6 11 -1. + <_>2 2 2 11 3. + 0 + -0.0188999790698290 + 0.1450397074222565 + -0.0385066196322441 + <_> + + <_> + + + + <_>14 0 6 7 -1. + <_>16 0 2 7 3. + 0 + -8.7533425539731979e-003 + 0.0839588269591331 + -0.0374790988862515 + <_> + + <_> + + + + <_>0 8 9 12 -1. + <_>3 8 3 12 3. + 0 + -0.2082125991582871 + -0.6794853806495667 + 9.8609952256083488e-003 + <_> + + <_> + + + + <_>13 10 7 6 -1. + <_>13 12 7 2 3. + 0 + 0.0162700600922108 + 0.0141155803576112 + -0.1821835935115814 + <_> + + <_> + + + + <_>0 10 7 6 -1. + <_>0 12 7 2 3. + 0 + 3.0145489145070314e-003 + 0.0520137399435043 + -0.1145019009709358 + <_> + + <_> + + + + <_>14 0 6 7 -1. + <_>16 0 2 7 3. + 0 + 0.0185474492609501 + -0.0256816204637289 + 0.1645638048648834 + <_> + + <_> + + + + <_>0 0 6 7 -1. + <_>2 0 2 7 3. + 0 + 4.2732958681881428e-003 + -0.0595732405781746 + 0.1039028018712997 + <_> + + <_> + + + + <_>8 0 9 15 -1. + <_>11 0 3 15 3. + 0 + -0.0282496307045221 + -0.0781615898013115 + 0.0290642306208611 + <_> + + <_> + + + + <_>3 5 12 11 -1. + <_>7 5 4 11 3. + 0 + -0.0155386002734303 + -0.1448138058185577 + 0.0384340584278107 + <_> + + <_> + + + + <_>6 15 13 3 -1. + <_>6 16 13 1 3. + 0 + 3.8620950654149055e-003 + -0.0387453809380531 + 0.0981835275888443 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + 0.0152533696964383 + 0.0179465003311634 + -0.3094803094863892 + <_> + + <_> + + + + <_>7 5 9 7 -1. + <_>10 5 3 7 3. + 0 + -4.2140888981521130e-003 + 0.0575215704739094 + -0.0277824308723211 + <_> + + <_> + + + + <_>7 6 3 14 -1. + <_>8 6 1 14 3. + 0 + -2.1610679104924202e-003 + 0.1061744987964630 + -0.0594112500548363 + <_> + + <_> + + + + <_>5 1 13 3 -1. + <_>5 2 13 1 3. + 0 + -1.8687519477680326e-003 + -0.1280768960714340 + 0.0477816388010979 + <_> + + <_> + + + + <_>8 1 3 13 -1. + <_>9 1 1 13 3. + 0 + -6.2083022203296423e-004 + 0.1172534972429276 + -0.0478611998260021 + <_> + + <_> + + + + <_>9 6 4 14 -1. + <_>11 6 2 7 2. + <_>9 13 2 7 2. + 0 + -2.5575871113687754e-003 + 0.0579006485641003 + -0.0840368568897247 + <_> + + <_> + + + + <_>6 9 8 10 -1. + <_>6 9 4 5 2. + <_>10 14 4 5 2. + 0 + 4.1207410395145416e-003 + 0.0542397797107697 + -0.1261114031076431 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0175257790833712 + 0.0287927500903606 + -0.1979317069053650 + <_> + + <_> + + + + <_>0 15 14 5 -1. + <_>7 15 7 5 2. + 0 + -0.0190124902874231 + 0.1144431978464127 + -0.0668130517005920 + <_> + + <_> + + + + <_>12 12 8 5 -1. + <_>12 12 4 5 2. + 0 + 9.5198452472686768e-003 + -0.0391056388616562 + 0.0885889828205109 + <_> + + <_> + + + + <_>0 14 10 6 -1. + <_>0 16 10 2 3. + 0 + 7.7857482247054577e-003 + 0.0479038506746292 + -0.1194128021597862 + <_> + + <_> + + + + <_>4 16 14 4 -1. + <_>4 18 14 2 2. + 0 + -2.5355129037052393e-003 + 0.0613774992525578 + -0.0515763908624649 + <_> + + <_> + + + + <_>6 1 6 18 -1. + <_>8 1 2 18 3. + 0 + 0.1388667970895767 + 7.1258218958973885e-003 + -0.7507606148719788 + <_> + + <_> + + + + <_>6 14 14 2 -1. + <_>6 15 14 1 2. + 0 + -3.0958889983594418e-003 + 0.0734322667121887 + -0.0404091812670231 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 4.7118910588324070e-003 + 0.0223742704838514 + -0.2388508021831513 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 6.3587618060410023e-003 + 0.0536843799054623 + -0.1339824050664902 + <_> + + <_> + + + + <_>3 0 14 20 -1. + <_>10 0 7 20 2. + 0 + 0.0683670118451118 + -0.0361039191484451 + 0.1741008013486862 + <_> + + <_> + + + + <_>8 10 4 7 -1. + <_>8 10 2 7 2. + 0 + -3.2802459318190813e-003 + -0.1460307985544205 + 0.0482151396572590 + <_> + + <_> + + + + <_>4 5 9 7 -1. + <_>7 5 3 7 3. + 0 + -0.0664302706718445 + 0.4673899114131928 + -0.0131403803825378 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0422740690410137 + -0.6325333118438721 + 0.0103594399988651 + <_> + + <_> + + + + <_>5 3 6 13 -1. + <_>8 3 3 13 2. + 0 + -1.0691370116546750e-003 + -0.1146982982754707 + 0.0450481213629246 + <_> + + <_> + + + + <_>7 12 6 8 -1. + <_>7 12 3 8 2. + 0 + 0.0542354695498943 + -0.0198096092790365 + 0.3143073022365570 + <_> + + <_> + + + + <_>4 9 6 5 -1. + <_>7 9 3 5 2. + 0 + -7.2852471930673346e-006 + 0.0580512508749962 + -0.1024617031216621 + <_> + + <_> + + + + <_>11 4 4 10 -1. + <_>11 4 2 10 2. + 0 + 0.0208933092653751 + 0.0156088098883629 + -0.2154573947191238 + <_> + + <_> + + + + <_>0 11 12 6 -1. + <_>4 11 4 6 3. + 0 + -0.0537651814520359 + 0.2055923938751221 + -0.0325259193778038 + <_> + + <_> + + + + <_>11 4 4 10 -1. + <_>11 4 2 10 2. + 0 + -0.0159726701676846 + -0.1711989045143127 + 0.0147738298401237 + <_> + + <_> + + + + <_>5 4 4 10 -1. + <_>7 4 2 10 2. + 0 + -0.0145914098247886 + -0.2304601967334747 + 0.0233450103551149 + <_> + + <_> + + + + <_>6 14 14 2 -1. + <_>6 15 14 1 2. + 0 + 2.4016639217734337e-003 + -0.0282724294811487 + 0.0951242372393608 + <_> + + <_> + + + + <_>0 14 14 2 -1. + <_>0 15 14 1 2. + 0 + -0.0204306896775961 + 0.4065555930137634 + -0.0162125397473574 + <_> + + <_> + + + + <_>15 2 5 12 -1. + <_>15 6 5 4 3. + 0 + 0.0819267928600311 + 8.7937163189053535e-003 + -0.4021030068397522 + <_> + + <_> + + + + <_>0 2 5 12 -1. + <_>0 6 5 4 3. + 0 + -0.0128928497433662 + -0.1194692999124527 + 0.0450221300125122 + <_> + + <_> + + + + <_>16 5 4 14 -1. + <_>16 12 4 7 2. + 0 + 0.0947126820683479 + -0.0107600800693035 + 0.2169398069381714 + <_> + + <_> + + + + <_>0 14 12 6 -1. + <_>0 14 6 3 2. + <_>6 17 6 3 2. + 0 + 4.0901689790189266e-003 + -0.0845926031470299 + 0.0704576969146729 + <_> + + <_> + + + + <_>16 5 4 14 -1. + <_>16 12 4 7 2. + 0 + -0.1249653995037079 + 0.2827695012092590 + -4.2760102078318596e-003 + <_> + + <_> + + + + <_>0 5 4 14 -1. + <_>0 12 4 7 2. + 0 + 0.0157581698149443 + -0.0489265881478786 + 0.1238022968173027 + <_> + + <_> + + + + <_>12 12 8 5 -1. + <_>12 12 4 5 2. + 0 + -5.2818129770457745e-003 + 0.0618364401161671 + -0.0367129407823086 + <_> + + <_> + + + + <_>0 12 8 5 -1. + <_>4 12 4 5 2. + 0 + 8.6735859513282776e-003 + -0.0473722405731678 + 0.1580915004014969 + <_> + + <_> + + + + <_>12 0 3 14 -1. + <_>13 0 1 14 3. + 0 + -5.2273580804467201e-003 + -0.1169456988573074 + 0.0291564408689737 + <_> + + <_> + + + + <_>5 12 5 8 -1. + <_>5 16 5 4 2. + 0 + 0.0618318282067776 + 8.0447606742382050e-003 + -0.6853052973747253 + <_> + + <_> + + + + <_>18 2 2 14 -1. + <_>18 9 2 7 2. + 0 + 0.0668156072497368 + -8.4813889116048813e-003 + 0.1452376991510391 + <_> + + <_> + + + + <_>6 0 8 6 -1. + <_>6 3 8 3 2. + 0 + -0.1006200015544891 + 0.7460582852363586 + -6.8016690202057362e-003 + <_> + + <_> + + + + <_>14 1 6 9 -1. + <_>14 4 6 3 3. + 0 + -0.0147515395656228 + -0.1489351987838745 + 0.0395791903138161 + <_> + + <_> + + + + <_>3 4 14 6 -1. + <_>3 4 7 3 2. + <_>10 7 7 3 2. + 0 + 0.0346165895462036 + -0.0207490995526314 + 0.2854982018470764 + <_> + + <_> + + + + <_>10 5 9 6 -1. + <_>10 7 9 2 3. + 0 + -0.1296638995409012 + -0.5544648766517639 + 4.6082548797130585e-003 + <_> + + <_> + + + + <_>0 13 8 5 -1. + <_>4 13 4 5 2. + 0 + 0.0740355104207993 + 5.3174998611211777e-003 + -0.8414952754974365 + <_> + + <_> + + + + <_>12 0 6 18 -1. + <_>15 0 3 9 2. + <_>12 9 3 9 2. + 0 + 0.1017711013555527 + -7.6451660133898258e-003 + 0.3544222116470337 + <_> + + <_> + + + + <_>2 0 6 18 -1. + <_>2 0 3 9 2. + <_>5 9 3 9 2. + 0 + 0.0896587371826172 + -9.3901483342051506e-003 + 0.5057793855667114 + <_> + + <_> + + + + <_>2 0 16 14 -1. + <_>10 0 8 7 2. + <_>2 7 8 7 2. + 0 + -0.1618074029684067 + -0.6545178294181824 + 8.7116202339529991e-003 + <_> + + <_> + + + + <_>2 0 4 16 -1. + <_>2 0 2 8 2. + <_>4 8 2 8 2. + 0 + 1.8784119747579098e-003 + 0.0520644187927246 + -0.0907419472932816 + <_> + + <_> + + + + <_>12 0 8 4 -1. + <_>12 0 4 4 2. + 0 + 1.9505689851939678e-003 + -0.0540916211903095 + 0.0355062000453472 + <_> + + <_> + + + + <_>0 0 8 4 -1. + <_>4 0 4 4 2. + 0 + -6.0789179988205433e-003 + 0.1223851963877678 + -0.0468037389218807 + <_> + + <_> + + + + <_>6 12 14 5 -1. + <_>6 12 7 5 2. + 0 + -0.2240325063467026 + -0.7772849202156067 + 2.3639709688723087e-003 + <_> + + <_> + + + + <_>0 12 14 5 -1. + <_>7 12 7 5 2. + 0 + -0.1303959041833878 + -0.2769264876842499 + 0.0215482898056507 + <_> + + <_> + + + + <_>8 1 12 5 -1. + <_>12 1 4 5 3. + 0 + 0.0725874230265617 + 0.0106212999671698 + -0.1627078056335449 + <_> + + <_> + + + + <_>0 1 12 5 -1. + <_>4 1 4 5 3. + 0 + 0.0731800422072411 + -0.0175192598253489 + 0.3369787037372589 + <_> + + <_> + + + + <_>3 10 14 4 -1. + <_>10 10 7 2 2. + <_>3 12 7 2 2. + 0 + -0.0345259793102741 + -0.5353869795799255 + 0.0103977099061012 + <_> + + <_> + + + + <_>0 14 20 4 -1. + <_>0 14 10 2 2. + <_>10 16 10 2 2. + 0 + 2.3753559216856956e-003 + 0.0519108287990093 + -0.0969595164060593 + <_> + + <_> + + + + <_>10 9 9 5 -1. + <_>13 9 3 5 3. + 0 + -6.8947779946029186e-003 + 0.0824099779129028 + -0.0230989996343851 + <_> + + <_> + + + + <_>1 9 9 5 -1. + <_>4 9 3 5 3. + 0 + -0.0947732925415039 + -0.7051069140434265 + 7.7322297729551792e-003 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 5.6327427737414837e-003 + 0.0179606806486845 + -0.0723070427775383 + <_> + + <_> + + + + <_>6 16 8 4 -1. + <_>10 16 4 4 2. + 0 + 6.6090249456465244e-003 + -0.0367010794579983 + 0.1370633989572525 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0249783992767334 + -0.1628139019012451 + 7.6992698013782501e-003 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>4 5 6 3 2. + <_>10 8 6 3 2. + 0 + -6.0882410034537315e-003 + 0.1055561974644661 + -0.0485074110329151 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + 0.0611615888774395 + 1.1127579491585493e-003 + -0.5665788054466248 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0387228094041348 + -0.5979735851287842 + 8.4153199568390846e-003 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + 6.2335198745131493e-003 + 0.0315630212426186 + -0.1876924037933350 + <_> + + <_> + + + + <_>1 1 18 5 -1. + <_>7 1 6 5 3. + 0 + 0.1693951040506363 + -0.0171837396919727 + 0.3144004940986633 + <_> + + <_> + + + + <_>9 2 10 10 -1. + <_>14 2 5 5 2. + <_>9 7 5 5 2. + 0 + 0.0858513414859772 + 5.7081878185272217e-003 + -0.4996680915355682 + <_> + + <_> + + + + <_>1 2 10 10 -1. + <_>1 2 5 5 2. + <_>6 7 5 5 2. + 0 + -0.0203150101006031 + -0.1235990002751350 + 0.0447048395872116 + <_> + + <_> + + + + <_>8 3 12 6 -1. + <_>14 3 6 3 2. + <_>8 6 6 3 2. + 0 + -4.0276069194078445e-003 + 0.0479572191834450 + -0.0971370562911034 + <_> + + <_> + + + + <_>1 5 8 4 -1. + <_>5 5 4 4 2. + 0 + -0.0392745099961758 + 0.1880427002906799 + -0.0297541990876198 + <_> + + <_> + + + + <_>0 3 20 12 -1. + <_>10 3 10 6 2. + <_>0 9 10 6 2. + 0 + -0.0211636293679476 + -0.1572490036487579 + 0.0396365299820900 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>5 5 5 3 2. + <_>10 8 5 3 2. + 0 + 4.0783579461276531e-003 + -0.0475628189742565 + 0.1097624972462654 + <_> + + <_> + + + + <_>9 8 6 12 -1. + <_>12 8 3 6 2. + <_>9 14 3 6 2. + 0 + 1.0180410463362932e-003 + -0.0663060918450356 + 0.0987730771303177 + <_> + + <_> + + + + <_>0 8 18 4 -1. + <_>0 8 9 2 2. + <_>9 10 9 2 2. + 0 + 2.8516049496829510e-003 + -0.0511017404496670 + 0.0969949588179588 + <_> + + <_> + + + + <_>3 14 14 4 -1. + <_>10 14 7 2 2. + <_>3 16 7 2 2. + 0 + 4.8373742029070854e-003 + 0.0408665500581265 + -0.1248036026954651 + <_> + + <_> + + + + <_>5 11 5 9 -1. + <_>5 14 5 3 3. + 0 + -3.4715479705482721e-004 + 0.0417786911129951 + -0.1257454007863998 + <_> + + <_> + + + + <_>6 16 8 4 -1. + <_>6 18 8 2 2. + 0 + -6.3760261982679367e-003 + 0.1575423032045364 + -0.0416927784681320 + <_> + + <_> + + + + <_>7 4 6 12 -1. + <_>7 10 6 6 2. + 0 + -0.0125340698286891 + -0.1356544047594070 + 0.0412955693900585 + <_> + + <_> + + + + <_>9 7 7 12 -1. + <_>9 11 7 4 3. + 0 + -0.0233215503394604 + 0.1251834928989410 + -0.0134272603318095 + <_> + + <_> + + + + <_>7 7 5 9 -1. + <_>7 10 5 3 3. + 0 + 2.1691620349884033e-003 + 0.1433120071887970 + -0.0351203493773937 + <_> + + <_> + + + + <_>4 13 12 5 -1. + <_>8 13 4 5 3. + 0 + -0.0500055402517319 + 0.2150021940469742 + -0.0276284199208021 + <_> + + <_> + + + + <_>4 9 7 9 -1. + <_>4 12 7 3 3. + 0 + 0.0138181699439883 + 0.0222085006535053 + -0.2604855895042419 + <_> + + <_> + + + + <_>2 1 18 4 -1. + <_>8 1 6 4 3. + 0 + -0.1138937994837761 + -0.2643468081951141 + 5.8247619308531284e-003 + <_> + + <_> + + + + <_>7 9 6 7 -1. + <_>9 9 2 7 3. + 0 + 1.4204699546098709e-003 + -0.0715462863445282 + 0.0703791826963425 + <_> + + <_> + + + + <_>0 13 20 4 -1. + <_>0 15 20 2 2. + 0 + 0.0123296100646257 + 0.0294751301407814 + -0.1922408938407898 + <_> + + <_> + + + + <_>2 4 13 3 -1. + <_>2 5 13 1 3. + 0 + 3.4679430536925793e-003 + -0.0619209408760071 + 0.0908930897712708 + <_> + + <_> + + + + <_>9 7 7 12 -1. + <_>9 11 7 4 3. + 0 + -0.1208847984671593 + 0.4662685990333557 + -2.7361230459064245e-003 + <_> + + <_> + + + + <_>3 1 9 17 -1. + <_>6 1 3 17 3. + 0 + -0.0158275198191404 + -0.0953428372740746 + 0.0550031699240208 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + -5.3695850074291229e-003 + 0.1689102053642273 + -0.0467009507119656 + <_> + + <_> + + + + <_>6 9 4 8 -1. + <_>8 9 2 8 2. + 0 + 0.0526950806379318 + -5.6889699772000313e-003 + 0.9048786163330078 + <_> + + <_> + + + + <_>5 4 14 12 -1. + <_>12 4 7 6 2. + <_>5 10 7 6 2. + 0 + -1.1397979687899351e-003 + 0.0343166813254356 + -0.0757879018783569 + <_> + + <_> + + + + <_>0 16 18 2 -1. + <_>9 16 9 2 2. + 0 + -2.8946578968316317e-003 + 0.0754823908209801 + -0.0764665529131889 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + -5.1091420464217663e-003 + -0.1229495033621788 + 0.0499727502465248 + <_> + + <_> + + + + <_>4 0 6 8 -1. + <_>6 0 2 8 3. + 0 + 1.8837359966710210e-003 + 0.0434064008295536 + -0.1257223039865494 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + 0.0154229197651148 + 0.0158312898129225 + -0.2091739028692246 + <_> + + <_> + + + + <_>7 5 6 12 -1. + <_>7 5 3 6 2. + <_>10 11 3 6 2. + 0 + 0.0216660406440496 + -0.0247134007513523 + 0.2417166978120804 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + -0.0943364128470421 + 0.8038954734802246 + -2.6913180481642485e-003 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -6.0154758393764496e-003 + -0.1323174983263016 + 0.0496137104928494 + <_> + + <_> + + + + <_>6 10 14 3 -1. + <_>6 11 14 1 3. + 0 + 0.0437753200531006 + 4.5396219938993454e-003 + -0.5873274803161621 + <_> + + <_> + + + + <_>0 10 14 3 -1. + <_>0 11 14 1 3. + 0 + 1.0561950039118528e-003 + -0.0880575627088547 + 0.0712941065430641 + <_> + + <_> + + + + <_>4 4 14 3 -1. + <_>4 5 14 1 3. + 0 + -1.6394529957324266e-003 + 0.0908108428120613 + -0.0377607010304928 + <_> + + <_> + + + + <_>0 2 15 12 -1. + <_>5 2 5 12 3. + 0 + 0.2674216032028198 + 9.4182817265391350e-003 + -0.5274013876914978 + <_> + + <_> + + + + <_>14 5 6 12 -1. + <_>14 5 3 12 2. + 0 + -0.2162933051586151 + -0.6112818717956543 + 5.2118571475148201e-003 + <_> + + <_> + + + + <_>2 1 16 16 -1. + <_>2 9 16 8 2. + 0 + -0.2697457075119019 + -0.7339445948600769 + 6.0041057877242565e-003 + <_> + + <_> + + + + <_>7 16 13 3 -1. + <_>7 17 13 1 3. + 0 + -6.0050850734114647e-003 + 0.1106709018349648 + -0.0206141993403435 + <_> + + <_> + + + + <_>3 5 13 4 -1. + <_>3 7 13 2 2. + 0 + 0.0492479391396046 + 0.0102871898561716 + -0.4958139061927795 + <_> + + <_> + + + + <_>9 9 7 4 -1. + <_>9 11 7 2 2. + 0 + 4.9235569313168526e-003 + 0.0148803601041436 + -0.1128747016191483 + <_> + + <_> + + + + <_>3 7 14 6 -1. + <_>3 9 14 2 3. + 0 + -8.2946997135877609e-003 + 0.5647606253623962 + -0.0104421498253942 + <_> + + <_> + + + + <_>9 9 7 4 -1. + <_>9 11 7 2 2. + 0 + 0.0235673300921917 + -2.9235871043056250e-003 + 0.2497925013303757 + <_> + + <_> + + + + <_>4 9 7 4 -1. + <_>4 11 7 2 2. + 0 + -0.0410409197211266 + 0.4003049135208130 + -0.0133126201108098 + <_> + + <_> + + + + <_>1 9 18 3 -1. + <_>1 10 18 1 3. + 0 + -5.3690220229327679e-003 + -0.2918637096881867 + 0.0167816001921892 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + 3.6616099532693624e-003 + -0.0479209609329700 + 0.1089833974838257 + <_> + + <_> + + + + <_>14 5 6 12 -1. + <_>14 5 3 12 2. + 0 + -0.0247357897460461 + 0.0672709196805954 + -0.0162079706788063 + <_> + + <_> + + + + <_>0 5 6 12 -1. + <_>3 5 3 12 2. + 0 + 8.6064152419567108e-003 + -0.0602502003312111 + 0.1067432016134262 + <_> + + <_> + + + + <_>11 8 3 10 -1. + <_>11 13 3 5 2. + 0 + -0.0338926091790199 + -0.1979532986879349 + 0.0190149694681168 + <_> + + <_> + + + + <_>0 0 3 20 -1. + <_>1 0 1 20 3. + 0 + 0.1052203029394150 + 6.0530952177941799e-003 + -0.7523800730705261 + <_> + + <_> + + + + <_>2 0 18 11 -1. + <_>8 0 6 11 3. + 0 + -5.9583578258752823e-003 + 0.0990943834185600 + -0.0355706401169300 + <_> + + <_> + + + + <_>4 4 6 5 -1. + <_>7 4 3 5 2. + 0 + 2.7306210249662399e-003 + -0.0888798087835312 + 0.0648439899086952 + <_> + + <_> + + + + <_>6 3 14 4 -1. + <_>13 3 7 2 2. + <_>6 5 7 2 2. + 0 + 4.3243571417406201e-004 + 0.0325284898281097 + -0.0914790704846382 + <_> + + <_> + + + + <_>4 4 9 6 -1. + <_>7 4 3 6 3. + 0 + -5.2608880214393139e-003 + 0.1389617025852203 + -0.0406248196959496 + <_> + + <_> + + + + <_>8 9 9 8 -1. + <_>11 9 3 8 3. + 0 + -0.1560512930154800 + -0.7317007184028626 + 2.5103189982473850e-003 + <_> + + <_> + + + + <_>3 9 9 8 -1. + <_>6 9 3 8 3. + 0 + -0.0112459901720285 + -0.1183411031961441 + 0.0522617213428020 + <_> + + <_> + + + + <_>10 6 6 10 -1. + <_>12 6 2 10 3. + 0 + -9.2654878972098231e-004 + 0.0433507785201073 + -0.0765213593840599 + <_> + + <_> + + + + <_>8 6 3 14 -1. + <_>9 6 1 14 3. + 0 + 1.5148459933698177e-003 + -0.0714858397841454 + 0.0732069164514542 + <_> + + <_> + + + + <_>6 9 9 9 -1. + <_>9 9 3 9 3. + 0 + 4.6230577863752842e-003 + 0.0202118791639805 + -0.0465659610927105 + <_> + + <_> + + + + <_>4 3 9 9 -1. + <_>7 3 3 9 3. + 0 + 0.1255514025688171 + 9.2135155573487282e-003 + -0.5483170747756958 + <_> + + <_> + + + + <_>2 2 18 9 -1. + <_>8 2 6 9 3. + 0 + 0.0407516807317734 + -0.0457712486386299 + 0.0569909997284412 + <_> + + <_> + + + + <_>0 2 16 3 -1. + <_>0 3 16 1 3. + 0 + -0.0220743492245674 + -0.3907549977302551 + 0.0116547103971243 + <_> + + <_> + + + + <_>10 10 10 6 -1. + <_>10 10 5 6 2. + 0 + 0.1241291984915733 + -6.0688108205795288e-003 + 0.2637670934200287 + <_> + + <_> + + + + <_>0 0 18 9 -1. + <_>6 0 6 9 3. + 0 + 6.0741119086742401e-003 + 0.1076852008700371 + -0.0501398704946041 + <_> + + <_> + + + + <_>5 4 14 12 -1. + <_>12 4 7 6 2. + <_>5 10 7 6 2. + 0 + -0.1469414979219437 + -0.4345254898071289 + 5.5836569517850876e-003 + <_> + + <_> + + + + <_>0 1 18 4 -1. + <_>6 1 6 4 3. + 0 + -0.1204646006226540 + -0.5406827926635742 + 9.8318615928292274e-003 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -9.0990159660577774e-003 + -0.1362525969743729 + 9.5357475802302361e-003 + <_> + + <_> + + + + <_>1 10 6 10 -1. + <_>1 10 3 5 2. + <_>4 15 3 5 2. + 0 + 0.0109664499759674 + -0.0313442982733250 + 0.1706863045692444 + <_> + + <_> + + + + <_>12 10 4 8 -1. + <_>12 14 4 4 2. + 0 + -0.0217633806169033 + 0.0739181786775589 + -0.0178464204072952 + <_> + + <_> + + + + <_>4 14 12 6 -1. + <_>4 14 6 3 2. + <_>10 17 6 3 2. + 0 + -0.0495787896215916 + -0.5803403258323669 + 0.0100632095709443 + <_> + + <_> + + + + <_>12 10 4 8 -1. + <_>12 14 4 4 2. + 0 + -6.6796392202377319e-003 + -0.0472803004086018 + 0.0386680699884892 + <_> + + <_> + + + + <_>4 10 4 8 -1. + <_>4 14 4 4 2. + 0 + -1.0112039744853973e-003 + 0.0454120300710201 + -0.1460335999727249 + <_> + + <_> + + + + <_>4 11 14 4 -1. + <_>11 11 7 2 2. + <_>4 13 7 2 2. + 0 + 2.5813570246100426e-003 + 0.0311124809086323 + -0.1000149995088577 + <_> + + <_> + + + + <_>2 11 14 4 -1. + <_>2 11 7 2 2. + <_>9 13 7 2 2. + 0 + 2.0418369676917791e-003 + 0.0483780615031719 + -0.1472270935773850 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>12 6 5 3 2. + <_>7 9 5 3 2. + 0 + 0.0562460683286190 + 3.7779449485242367e-003 + -0.6101362705230713 + <_> + + <_> + + + + <_>3 6 10 6 -1. + <_>3 6 5 3 2. + <_>8 9 5 3 2. + 0 + -0.0261307507753372 + 0.2624058127403259 + -0.0243136007338762 + <_> + + <_> + + + + <_>9 0 6 19 -1. + <_>11 0 2 19 3. + 0 + -0.0121510298922658 + -0.0561141297221184 + 0.0297391600906849 + <_> + + <_> + + + + <_>5 0 6 19 -1. + <_>7 0 2 19 3. + 0 + -0.0510364696383476 + 0.2795574069023132 + -0.0216835103929043 + <_> + + <_> + + + + <_>4 18 14 2 -1. + <_>4 18 7 2 2. + 0 + 0.0874446183443069 + -3.7635879125446081e-003 + 0.5271136164665222 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 3.4982790239155293e-003 + 0.0566732287406921 + -0.0925546362996101 + <_> + + <_> + + + + <_>13 1 7 9 -1. + <_>13 4 7 3 3. + 0 + 0.0978617221117020 + 3.7442990578711033e-003 + -0.5423772931098938 + <_> + + <_> + + + + <_>0 1 7 9 -1. + <_>0 4 7 3 3. + 0 + -6.3886200077831745e-003 + -0.0974681675434113 + 0.0602992996573448 + <_> + + <_> + + + + <_>9 11 11 6 -1. + <_>9 13 11 2 3. + 0 + -0.1012831032276154 + -0.6517366766929627 + 3.4321940038353205e-003 + <_> + + <_> + + + + <_>0 11 11 6 -1. + <_>0 13 11 2 3. + 0 + -0.0393122285604477 + 0.2647699117660523 + -0.0269813109189272 + <_> + + <_> + + + + <_>2 5 16 10 -1. + <_>10 5 8 5 2. + <_>2 10 8 5 2. + 0 + 0.1141799017786980 + 7.5375889427959919e-003 + -0.6855363845825195 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 8.4078265354037285e-003 + -0.0309730898588896 + 0.1720042973756790 + <_> + + <_> + + + + <_>11 3 8 4 -1. + <_>11 5 8 2 2. + 0 + -1.5489499783143401e-003 + 0.0464548096060753 + -0.0692617669701576 + <_> + + <_> + + + + <_>1 4 14 12 -1. + <_>1 4 7 6 2. + <_>8 10 7 6 2. + 0 + 2.9730569804087281e-004 + 0.0377727001905441 + -0.1376706957817078 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + 2.8460770845413208e-003 + -0.0431823208928108 + 0.0996346101164818 + <_> + + <_> + + + + <_>2 17 13 3 -1. + <_>2 18 13 1 3. + 0 + 0.0491444207727909 + 5.9465290978550911e-003 + -0.8236659765243530 + <_> + + <_> + + + + <_>1 11 18 6 -1. + <_>1 13 18 2 3. + 0 + 0.0102860201150179 + 0.0285910908132792 + -0.1594199985265732 + <_> + + <_> + + + + <_>6 2 7 18 -1. + <_>6 11 7 9 2. + 0 + 0.0199762806296349 + -0.0296170301735401 + 0.1594306975603104 + <_> + + <_> + + + + <_>11 3 8 4 -1. + <_>11 5 8 2 2. + 0 + 0.0235334094613791 + 7.5594270601868629e-003 + -0.2304113060235977 + <_> + + <_> + + + + <_>1 1 16 6 -1. + <_>1 1 8 3 2. + <_>9 4 8 3 2. + 0 + -9.0482197701931000e-003 + -0.1240869984030724 + 0.0416150018572807 + <_> + + <_> + + + + <_>16 1 4 14 -1. + <_>18 1 2 7 2. + <_>16 8 2 7 2. + 0 + -3.8635660894215107e-003 + 0.0878112167119980 + -0.0415111817419529 + <_> + + <_> + + + + <_>0 1 4 14 -1. + <_>0 1 2 7 2. + <_>2 8 2 7 2. + 0 + -2.7298410423099995e-003 + 0.0947126671671867 + -0.0528389587998390 + <_> + + <_> + + + + <_>6 7 14 4 -1. + <_>13 7 7 2 2. + <_>6 9 7 2 2. + 0 + -4.5442068949341774e-003 + -0.1074846014380455 + 0.0177447702735662 + <_> + + <_> + + + + <_>3 0 6 5 -1. + <_>6 0 3 5 2. + 0 + 2.3271010722965002e-003 + -0.0838262364268303 + 0.0572107098996639 + <_> + + <_> + + + + <_>4 2 12 6 -1. + <_>4 4 12 2 3. + 0 + -0.0124095501378179 + 0.2310030013322830 + -0.0221104193478823 + <_> + + <_> + + + + <_>0 7 14 4 -1. + <_>0 7 7 2 2. + <_>7 9 7 2 2. + 0 + -4.5268908143043518e-003 + -0.1624415069818497 + 0.0325643494725227 + <_> + + <_> + + + + <_>8 6 5 9 -1. + <_>8 9 5 3 3. + 0 + -4.4666860048891976e-005 + 0.2434111982584000 + -0.0267028007656336 + <_> + + <_> + + + + <_>2 7 13 2 -1. + <_>2 8 13 1 2. + 0 + 7.7015289571136236e-004 + -0.1285865008831024 + 0.0423081517219543 + <_> + + <_> + + + + <_>9 12 10 6 -1. + <_>14 12 5 3 2. + <_>9 15 5 3 2. + 0 + 0.0448630489408970 + 0.0107819996774197 + -0.3581424057483673 + <_> + + <_> + + + + <_>5 6 6 10 -1. + <_>7 6 2 10 3. + 0 + 0.0378694906830788 + -0.0149663602933288 + 0.3419500887393951 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -8.3092376589775085e-003 + -0.2751466035842896 + 0.0201395396143198 + <_> + + <_> + + + + <_>2 2 15 5 -1. + <_>7 2 5 5 3. + 0 + -0.0432901196181774 + 0.3003655970096588 + -0.0194930192083120 + <_> + + <_> + + + + <_>7 3 13 2 -1. + <_>7 4 13 1 2. + 0 + -0.0100756296887994 + -0.1226257979869843 + 9.1246366500854492e-003 + <_> + + <_> + + + + <_>0 0 2 13 -1. + <_>1 0 1 13 2. + 0 + -3.3486529719084501e-003 + 0.1179025992751122 + -0.0410501882433891 + <_> + + <_> + + + + <_>14 1 3 17 -1. + <_>15 1 1 17 3. + 0 + -6.4645247766748071e-004 + -0.0781549364328384 + 0.0469905696809292 + <_> + + <_> + + + + <_>3 1 3 17 -1. + <_>4 1 1 17 3. + 0 + 0.0352473706007004 + 0.0103652700781822 + -0.5150712728500366 + <_> + + <_> + + + + <_>12 1 7 6 -1. + <_>12 3 7 2 3. + 0 + 3.5965928691439331e-004 + -0.0779368132352829 + 0.0302752405405045 + <_> + + <_> + + + + <_>3 2 3 17 -1. + <_>4 2 1 17 3. + 0 + -1.5898740384727716e-003 + -0.1059432029724121 + 0.0500361509621143 + <_> + + <_> + + + + <_>14 0 6 18 -1. + <_>16 0 2 18 3. + 0 + -0.0214083008468151 + 0.1164933964610100 + -0.0375407002866268 + <_> + + <_> + + + + <_>3 5 7 6 -1. + <_>3 7 7 2 3. + 0 + -2.7612380217760801e-003 + 0.0347518101334572 + -0.1371853053569794 + <_> + + <_> + + + + <_>8 4 6 12 -1. + <_>11 4 3 6 2. + <_>8 10 3 6 2. + 0 + 6.4307968132197857e-003 + -0.0136674297973514 + 0.1493856012821198 + <_> + + <_> + + + + <_>4 4 12 10 -1. + <_>4 4 6 5 2. + <_>10 9 6 5 2. + 0 + -6.9555612280964851e-003 + -0.1217145994305611 + 0.0561001896858215 + <_> + + <_> + + + + <_>14 0 6 18 -1. + <_>16 0 2 18 3. + 0 + -0.2765496969223023 + -0.8507738709449768 + 3.8885050453245640e-003 + <_> + + <_> + + + + <_>0 0 6 18 -1. + <_>2 0 2 18 3. + 0 + 4.7567309811711311e-003 + -0.0655944272875786 + 0.0759470611810684 + <_> + + <_> + + + + <_>9 0 3 18 -1. + <_>9 9 3 9 2. + 0 + 0.0892180502414703 + 6.5016360022127628e-003 + -0.3203299045562744 + <_> + + <_> + + + + <_>3 2 12 6 -1. + <_>3 5 12 3 2. + 0 + 0.0677481517195702 + -0.0118788704276085 + 0.4495449066162109 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + 0.0453361906111240 + 7.4317739345133305e-003 + -0.4314487874507904 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + 0.0109658502042294 + 0.0251350104808807 + -0.2035907059907913 + <_> + + <_> + + + + <_>17 3 3 12 -1. + <_>17 9 3 6 2. + 0 + -0.0659385621547699 + 0.4552414119243622 + -7.5815711170434952e-003 + <_> + + <_> + + + + <_>0 3 3 12 -1. + <_>0 9 3 6 2. + 0 + -0.0422701090574265 + 0.3847005069255829 + -0.0116722797974944 + <_> + + <_> + + + + <_>14 10 5 9 -1. + <_>14 13 5 3 3. + 0 + -6.3518402166664600e-003 + -0.0870101675391197 + 0.0341599211096764 + <_> + + <_> + + + + <_>1 0 18 8 -1. + <_>1 4 18 4 2. + 0 + 0.0322698801755905 + -0.0407114401459694 + 0.1246946975588799 + <_> + + <_> + + + + <_>11 3 8 4 -1. + <_>11 5 8 2 2. + 0 + -0.0390683114528656 + -0.1040311977267265 + 6.7032999359071255e-003 + <_> + + <_> + + + + <_>1 3 8 4 -1. + <_>1 5 8 2 2. + 0 + -1.0384949855506420e-003 + 0.0584225282073021 + -0.1015489026904106 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + 0.0297406502068043 + 0.0125960595905781 + -0.1517045050859451 + <_> + + <_> + + + + <_>4 3 12 3 -1. + <_>10 3 6 3 2. + 0 + 5.3193639032542706e-003 + -0.0468430891633034 + 0.1100525036454201 + <_> + + <_> + + + + <_>5 7 10 5 -1. + <_>5 7 5 5 2. + 0 + -3.2385820522904396e-003 + -0.1030983999371529 + 0.0506860613822937 + <_> + + <_> + + + + <_>2 6 16 4 -1. + <_>2 6 8 2 2. + <_>10 8 8 2 2. + 0 + 4.2344750836491585e-003 + -0.0495824292302132 + 0.1209215000271797 + <_> + + <_> + + + + <_>15 0 5 9 -1. + <_>15 3 5 3 3. + 0 + -0.0747866630554199 + -0.4689513146877289 + 3.8582859560847282e-003 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + 8.5299033671617508e-003 + 0.0388061590492725 + -0.1202204972505570 + <_> + + <_> + + + + <_>11 0 3 12 -1. + <_>11 6 3 6 2. + 0 + -0.0486625693738461 + 0.1611399054527283 + -0.0117171304300427 + <_> + + <_> + + + + <_>0 1 6 6 -1. + <_>0 4 6 3 2. + 0 + -1.3677199603989720e-003 + -0.0853037163615227 + 0.0553941093385220 + <_> + + <_> + + + + <_>7 1 7 18 -1. + <_>7 10 7 9 2. + 0 + -5.8111362159252167e-003 + 0.0470392704010010 + -0.0517368689179420 + <_> + + <_> + + + + <_>0 2 18 6 -1. + <_>0 2 9 3 2. + <_>9 5 9 3 2. + 0 + -3.9951619692146778e-003 + -0.0781671628355980 + 0.0639193430542946 + <_> + + <_> + + + + <_>5 8 13 2 -1. + <_>5 9 13 1 2. + 0 + 3.0817699152976274e-003 + -0.0692898333072662 + 0.0282425396144390 + <_> + + <_> + + + + <_>6 8 3 10 -1. + <_>6 13 3 5 2. + 0 + -0.0462794713675976 + -0.3476049005985260 + 0.0138789098709822 + <_> + + <_> + + + + <_>6 11 13 2 -1. + <_>6 12 13 1 2. + 0 + -0.0187257807701826 + 0.1522226929664612 + -0.0157240908592939 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>1 18 18 1 3. + 0 + -0.0214453693479300 + -0.3596273064613342 + 0.0127642601728439 + <_> + + <_> + + + + <_>1 3 18 2 -1. + <_>1 3 9 2 2. + 0 + -0.0910034775733948 + -0.7961595058441162 + 4.9090441316366196e-003 + <_> + + <_> + + + + <_>3 17 10 3 -1. + <_>8 17 5 3 2. + 0 + 2.5607119314372540e-003 + -0.0545516908168793 + 0.0844034105539322 + <_> + + <_> + + + + <_>1 15 18 4 -1. + <_>7 15 6 4 3. + 0 + -0.0136620998382568 + 0.0949872508645058 + -0.0620368197560310 + <_> + + <_> + + + + <_>5 5 6 9 -1. + <_>8 5 3 9 2. + 0 + 9.2437807470560074e-003 + 0.0538223311305046 + -0.0992365106940269 + <_> + + <_> + + + + <_>4 6 12 11 -1. + <_>8 6 4 11 3. + 0 + -0.0146121401339769 + -0.1524866074323654 + 0.0429055504500866 + <_> + + <_> + + + + <_>0 0 8 10 -1. + <_>0 0 4 5 2. + <_>4 5 4 5 2. + 0 + -0.0395846590399742 + 0.1588324010372162 + -0.0354844294488430 + <_> + + <_> + + + + <_>2 0 18 3 -1. + <_>8 0 6 3 3. + 0 + -6.7460699938237667e-003 + 0.1174926012754440 + -0.0379344411194324 + <_> + + <_> + + + + <_>5 9 9 9 -1. + <_>8 9 3 9 3. + 0 + 2.0449559669941664e-003 + 0.0616261884570122 + -0.0944093465805054 + <_> + + <_> + + + + <_>11 3 2 17 -1. + <_>11 3 1 17 2. + 0 + -0.0151465600356460 + -0.3388757109642029 + 6.8320450372993946e-003 + <_> + + <_> + + + + <_>7 0 2 20 -1. + <_>8 0 1 20 2. + 0 + -2.0916219800710678e-003 + -0.1482957005500794 + 0.0333583503961563 + <_> + + <_> + + + + <_>10 1 8 18 -1. + <_>10 1 4 18 2. + 0 + 0.0132743902504444 + -0.0381690002977848 + 0.0463796295225620 + <_> + + <_> + + + + <_>4 5 8 8 -1. + <_>4 5 4 4 2. + <_>8 9 4 4 2. + 0 + 0.0124043300747871 + -0.0184986796230078 + 0.2795296013355255 + <_> + + <_> + + + + <_>6 1 12 14 -1. + <_>12 1 6 7 2. + <_>6 8 6 7 2. + 0 + -0.0236782599240541 + -0.0471428595483303 + 0.0231413394212723 + <_> + + <_> + + + + <_>2 1 8 18 -1. + <_>6 1 4 18 2. + 0 + 0.0675759837031364 + -0.0185984000563622 + 0.2748115062713623 + <_> + + <_> + + + + <_>1 5 18 7 -1. + <_>7 5 6 7 3. + 0 + 0.0763591229915619 + 0.0291781295090914 + -0.2057282030582428 + <_> + + <_> + + + + <_>3 4 6 16 -1. + <_>3 4 3 8 2. + <_>6 12 3 8 2. + 0 + -0.1091888993978500 + 0.6257721185684204 + -9.8246810957789421e-003 + <_> + + <_> + + + + <_>12 3 4 14 -1. + <_>14 3 2 7 2. + <_>12 10 2 7 2. + 0 + 1.2964319903403521e-003 + -0.0317764990031719 + 0.0678339302539825 + <_> + + <_> + + + + <_>4 3 4 14 -1. + <_>4 3 2 7 2. + <_>6 10 2 7 2. + 0 + 0.0412186793982983 + 8.5701625794172287e-003 + -0.5837911963462830 + <_> + + <_> + + + + <_>8 12 6 6 -1. + <_>8 12 3 6 2. + 0 + -1.8773629562929273e-003 + 0.0532635413110256 + -0.0417027883231640 + <_> + + <_> + + + + <_>6 12 6 6 -1. + <_>9 12 3 6 2. + 0 + -2.9402649961411953e-003 + 0.0869319215416908 + -0.0713440701365471 + <_> + + <_> + + + + <_>4 1 14 3 -1. + <_>4 2 14 1 3. + 0 + -0.0308337491005659 + -0.3943957090377808 + 6.0907239094376564e-003 + <_> + + <_> + + + + <_>3 5 10 6 -1. + <_>3 5 5 3 2. + <_>8 8 5 3 2. + 0 + -3.7960989866405725e-003 + 0.0741505324840546 + -0.0618812814354897 + <_> + + <_> + + + + <_>6 6 14 4 -1. + <_>13 6 7 2 2. + <_>6 8 7 2 2. + 0 + -6.3087488524615765e-003 + -0.1166246980428696 + 0.0250167604535818 + <_> + + <_> + + + + <_>0 4 20 8 -1. + <_>0 4 10 4 2. + <_>10 8 10 4 2. + 0 + 4.0001370944082737e-003 + -0.0572367310523987 + 0.0975897014141083 + <_> + + <_> + + + + <_>12 5 8 8 -1. + <_>16 5 4 4 2. + <_>12 9 4 4 2. + 0 + 0.0677529573440552 + 9.5101362094283104e-003 + -0.3377701938152313 + <_> + + <_> + + + + <_>1 1 15 6 -1. + <_>1 3 15 2 3. + 0 + -0.0923537835478783 + 0.7901524901390076 + -6.2939748167991638e-003 + <_> + + <_> + + + + <_>3 6 16 3 -1. + <_>3 6 8 3 2. + 0 + -0.0240508392453194 + -0.1558571010828018 + 0.0180999301373959 + <_> + + <_> + + + + <_>7 3 6 5 -1. + <_>10 3 3 5 2. + 0 + 3.2272089738398790e-003 + -0.0479367412626743 + 0.1073589995503426 + <_> + + <_> + + + + <_>7 4 9 5 -1. + <_>10 4 3 5 3. + 0 + -7.2444709949195385e-003 + 0.0967755392193794 + -0.0240959003567696 + <_> + + <_> + + + + <_>1 6 16 3 -1. + <_>9 6 8 3 2. + 0 + -0.1088825985789299 + -0.8125579953193665 + 6.0875630006194115e-003 + <_> + + <_> + + + + <_>9 0 3 15 -1. + <_>9 5 3 5 3. + 0 + -0.0140772303566337 + -0.1335898935794830 + 0.0254211407154799 + <_> + + <_> + + + + <_>0 1 2 14 -1. + <_>1 1 1 14 2. + 0 + -0.0300713703036308 + 0.3542703986167908 + -0.0135534303262830 + <_> + + <_> + + + + <_>12 5 3 13 -1. + <_>13 5 1 13 3. + 0 + 0.0349857993423939 + -3.0686240643262863e-003 + 0.4631117880344391 + <_> + + <_> + + + + <_>5 5 3 13 -1. + <_>6 5 1 13 3. + 0 + 0.0183547697961330 + 0.0112180197611451 + -0.4614357948303223 + <_> + + <_> + + + + <_>4 6 16 8 -1. + <_>4 10 16 4 2. + 0 + -0.0643064081668854 + -0.6120715141296387 + 1.9155009649693966e-003 + <_> + + <_> + + + + <_>3 7 7 6 -1. + <_>3 10 7 3 2. + 0 + 0.0820961296558380 + -8.8210906833410263e-003 + 0.5488597750663757 + <_> + + <_> + + + + <_>0 3 20 10 -1. + <_>0 8 20 5 2. + 0 + 7.7698810491710901e-004 + 0.1324795037508011 + -0.0339151285588741 + <_> + + <_> + + + + <_>0 3 7 6 -1. + <_>0 5 7 2 3. + 0 + 0.0645689815282822 + 6.4043831080198288e-003 + -0.7715017795562744 + <_> + + <_> + + + + <_>11 1 8 4 -1. + <_>11 3 8 2 2. + 0 + -0.0158334895968437 + -0.1949895024299622 + 7.5541301630437374e-003 + <_> + + <_> + + + + <_>1 1 8 4 -1. + <_>1 3 8 2 2. + 0 + 0.0341256186366081 + -0.0159152895212173 + 0.2971644103527069 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -0.0126150501891971 + -0.2465070933103561 + 0.0226997993886471 + <_> + + <_> + + + + <_>5 0 10 6 -1. + <_>5 2 10 2 3. + 0 + 0.0182726792991161 + -0.0405939593911171 + 0.1169349029660225 + <_> + + <_> + + + + <_>6 3 8 10 -1. + <_>6 8 8 5 2. + 0 + -6.6374349407851696e-003 + -0.1455710977315903 + 0.0353539101779461 + <_> + + <_> + + + + <_>7 2 5 12 -1. + <_>7 8 5 6 2. + 0 + -2.6520919054746628e-003 + 0.0763825923204422 + -0.0666886270046234 + <_> + + <_> + + + + <_>7 7 6 12 -1. + <_>9 7 2 12 3. + 0 + 2.2452129051089287e-003 + -0.0897598788142204 + 0.0550913698971272 + <_> + + <_> + + + + <_>7 3 6 8 -1. + <_>9 3 2 8 3. + 0 + -4.4775419519282877e-004 + 0.2126415967941284 + -0.0266206394881010 + <_> + + <_> + + + + <_>10 0 4 16 -1. + <_>10 8 4 8 2. + 0 + -0.1111525967717171 + -0.4313994944095612 + 4.6484731137752533e-003 + <_> + + <_> + + + + <_>0 6 16 8 -1. + <_>0 10 16 4 2. + 0 + -0.0115787703543901 + -0.3529626131057739 + 0.0127505399286747 + <_> + + <_> + + + + <_>3 8 16 4 -1. + <_>3 10 16 2 2. + 0 + -0.0252901706844568 + 0.5138598084449768 + -6.7363809794187546e-003 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0322323404252529 + -0.5769019126892090 + 7.7741048298776150e-003 + <_> + + <_> + + + + <_>10 8 9 4 -1. + <_>10 10 9 2 2. + 0 + -4.1698799468576908e-003 + -0.1751931011676788 + 0.0110186999663711 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>7 10 6 5 2. + 0 + -0.0206645000725985 + 0.2582195103168488 + -0.0179202891886234 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 10 4 4 3. + 0 + -1.0834420099854469e-003 + -0.1317851990461350 + 0.0254197493195534 + <_> + + <_> + + + + <_>0 7 13 9 -1. + <_>0 10 13 3 3. + 0 + -9.5458701252937317e-003 + 0.4496468901634216 + -0.0113150300458074 + <_> + + <_> + + + + <_>6 11 8 8 -1. + <_>10 11 4 4 2. + <_>6 15 4 4 2. + 0 + 0.0532321818172932 + 7.4498020112514496e-003 + -0.6812205910682678 + <_> + + <_> + + + + <_>0 15 10 4 -1. + <_>5 15 5 4 2. + 0 + -0.1385252028703690 + -0.6011788249015808 + 6.5434179268777370e-003 + <_> + + <_> + + + + <_>4 18 16 2 -1. + <_>4 18 8 2 2. + 0 + 0.0171734392642975 + -0.0251205097883940 + 0.0865166336297989 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 14 8 4 2. + 0 + 0.0399471893906593 + 5.8647249825298786e-003 + -0.7465305924415588 + <_> + + <_> + + + + <_>8 13 7 6 -1. + <_>8 15 7 2 3. + 0 + 0.0206470098346472 + -0.0102260001003742 + 0.1722760945558548 + <_> + + <_> + + + + <_>7 7 5 8 -1. + <_>7 11 5 4 2. + 0 + -1.8602909985929728e-003 + -0.0657679736614227 + 0.0692484900355339 + <_> + + <_> + + + + <_>6 7 10 12 -1. + <_>6 11 10 4 3. + 0 + -0.0341060683131218 + 0.1590873003005981 + -0.0132416300475597 + <_> + + <_> + + + + <_>6 13 6 7 -1. + <_>8 13 2 7 3. + 0 + 6.3425069674849510e-003 + 0.0351191498339176 + -0.1343608051538467 + <_> + + <_> + + + + <_>14 11 4 7 -1. + <_>14 11 2 7 2. + 0 + 1.6866199439391494e-003 + -0.0434017702937126 + 0.0506066307425499 + <_> + + <_> + + + + <_>4 6 6 10 -1. + <_>6 6 2 10 3. + 0 + -3.0595089774578810e-003 + 0.0569767095148563 + -0.0810745283961296 + <_> + + <_> + + + + <_>13 4 2 16 -1. + <_>13 4 1 16 2. + 0 + 2.7664829976856709e-003 + 0.0204970091581345 + -0.0809638276696205 + <_> + + <_> + + + + <_>5 4 2 16 -1. + <_>6 4 1 16 2. + 0 + -3.2909188885241747e-003 + -0.1080378964543343 + 0.0462379604578018 + <_> + + <_> + + + + <_>8 3 4 16 -1. + <_>10 3 2 8 2. + <_>8 11 2 8 2. + 0 + 0.0172444004565477 + -0.0251270607113838 + 0.2459103018045425 + <_> + + <_> + + + + <_>8 0 3 18 -1. + <_>8 9 3 9 2. + 0 + 0.0911615863442421 + 0.0101749803870916 + -0.4698387980461121 + <_> + + <_> + + + + <_>4 4 13 2 -1. + <_>4 5 13 1 2. + 0 + 2.5459621101617813e-003 + -0.0300037506967783 + 0.1480046957731247 + <_> + + <_> + + + + <_>0 2 14 2 -1. + <_>0 3 14 1 2. + 0 + 1.7582690343260765e-003 + 0.0544006898999214 + -0.0774442702531815 + <_> + + <_> + + + + <_>14 11 4 7 -1. + <_>14 11 2 7 2. + 0 + -1.6833960544317961e-003 + 0.0818381235003471 + -0.0437511987984180 + <_> + + <_> + + + + <_>0 2 13 2 -1. + <_>0 3 13 1 2. + 0 + -7.6617579907178879e-004 + -0.1356440037488937 + 0.0360419489443302 + <_> + + <_> + + + + <_>14 11 4 7 -1. + <_>14 11 2 7 2. + 0 + 1.1155450483784080e-003 + -0.0482638888061047 + 0.0502734482288361 + <_> + + <_> + + + + <_>2 11 4 7 -1. + <_>4 11 2 7 2. + 0 + -2.6005289983004332e-003 + 0.0887934863567352 + -0.0545542091131210 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + -3.2424980308860540e-003 + -0.1315919011831284 + 0.0342485085129738 + <_> + + <_> + + + + <_>2 10 5 6 -1. + <_>2 13 5 3 2. + 0 + -1.4817930059507489e-004 + 0.0378754287958145 + -0.1222522035241127 + <_> + + <_> + + + + <_>14 10 5 9 -1. + <_>14 13 5 3 3. + 0 + 0.0115466397255659 + 0.0153709696605802 + -0.1028624027967453 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 2.4446300230920315e-003 + -0.0517830513417721 + 0.1073507964611054 + <_> + + <_> + + + + <_>5 12 13 3 -1. + <_>5 13 13 1 3. + 0 + 4.5723789371550083e-003 + -0.0363621003925800 + 0.1328985989093781 + <_> + + <_> + + + + <_>0 13 17 6 -1. + <_>0 15 17 2 3. + 0 + -0.0119383400306106 + -0.1088235005736351 + 0.0476989001035690 + <_> + + <_> + + + + <_>5 15 13 3 -1. + <_>5 16 13 1 3. + 0 + -4.1671381331980228e-003 + 0.1163709983229637 + -0.0306387804448605 + -1.2181390523910522 + 41 + -1 + <_> + + + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.0336596183478832 + -0.1557604074478149 + 0.1910901069641113 + <_> + + <_> + + + + <_>9 4 2 14 -1. + <_>9 11 2 7 2. + 0 + -1.5392389614135027e-003 + 0.0725277364253998 + -0.2880895137786865 + <_> + + <_> + + + + <_>1 15 13 3 -1. + <_>1 16 13 1 3. + 0 + 1.5648789703845978e-003 + -0.1132922023534775 + 0.1505738943815231 + <_> + + <_> + + + + <_>13 5 4 8 -1. + <_>13 9 4 4 2. + 0 + 5.6565739214420319e-004 + -0.4050228893756867 + 0.0302351005375385 + <_> + + <_> + + + + <_>4 5 4 14 -1. + <_>4 5 2 7 2. + <_>6 12 2 7 2. + 0 + -2.9683491447940469e-004 + -0.1259232014417648 + 0.1035299971699715 + <_> + + <_> + + + + <_>13 5 4 8 -1. + <_>13 9 4 4 2. + 0 + 4.3946141377091408e-003 + -0.1058242022991180 + 0.0231637507677078 + <_> + + <_> + + + + <_>2 8 8 8 -1. + <_>2 8 4 4 2. + <_>6 12 4 4 2. + 0 + 3.2444300595670938e-003 + 0.0501885600388050 + -0.2547726035118103 + <_> + + <_> + + + + <_>13 6 6 9 -1. + <_>13 9 6 3 3. + 0 + 3.8864749949425459e-003 + -0.1433265954256058 + 0.0298710707575083 + <_> + + <_> + + + + <_>4 0 5 9 -1. + <_>4 3 5 3 3. + 0 + 3.3563380129635334e-003 + -0.1873977035284042 + 0.0613545216619968 + <_> + + <_> + + + + <_>13 4 3 10 -1. + <_>13 9 3 5 2. + 0 + 0.0197976995259523 + 0.0275679193437099 + -0.0731898769736290 + <_> + + <_> + + + + <_>3 5 4 8 -1. + <_>3 9 4 4 2. + 0 + 3.3829871099442244e-003 + -0.2691569030284882 + 0.0475612208247185 + <_> + + <_> + + + + <_>10 10 8 6 -1. + <_>10 12 8 2 3. + 0 + 5.0223460420966148e-003 + 0.0425726696848869 + -0.2009748965501785 + <_> + + <_> + + + + <_>1 17 13 3 -1. + <_>1 18 13 1 3. + 0 + 1.4903279952704906e-003 + -0.1016063988208771 + 0.1129127964377403 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + -5.5050072260200977e-003 + -0.2176041007041931 + 0.0250673796981573 + <_> + + <_> + + + + <_>7 5 6 11 -1. + <_>9 5 2 11 3. + 0 + 4.1127130389213562e-003 + -0.1370330005884171 + 0.0665366873145103 + <_> + + <_> + + + + <_>6 1 9 6 -1. + <_>9 1 3 6 3. + 0 + 0.0194422602653503 + 0.0422539114952087 + -0.1173110008239746 + <_> + + <_> + + + + <_>1 11 13 3 -1. + <_>1 12 13 1 3. + 0 + -0.0194458700716496 + 0.2861663103103638 + -0.0304230898618698 + <_> + + <_> + + + + <_>4 0 13 3 -1. + <_>4 1 13 1 3. + 0 + -1.5500449808314443e-003 + -0.1515711992979050 + 0.0637232363224030 + <_> + + <_> + + + + <_>1 2 14 12 -1. + <_>1 2 7 6 2. + <_>8 8 7 6 2. + 0 + -3.2575910445302725e-003 + 0.0610639490187168 + -0.1300669014453888 + <_> + + <_> + + + + <_>13 4 4 14 -1. + <_>15 4 2 7 2. + <_>13 11 2 7 2. + 0 + 8.5774611216038465e-004 + -0.0620512887835503 + 0.0548092909157276 + <_> + + <_> + + + + <_>3 4 4 14 -1. + <_>3 4 2 7 2. + <_>5 11 2 7 2. + 0 + 6.8592262687161565e-004 + -0.0928287133574486 + 0.0922878533601761 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 0.0489056594669819 + -0.0120980404317379 + 0.2467487007379532 + <_> + + <_> + + + + <_>1 15 7 4 -1. + <_>1 17 7 2 2. + 0 + -4.6415459364652634e-003 + -0.1710343956947327 + 0.0519001483917236 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + -9.9253775551915169e-003 + 0.1682472974061966 + -0.0437427312135696 + <_> + + <_> + + + + <_>1 2 18 2 -1. + <_>1 3 18 1 2. + 0 + -7.2820088826119900e-004 + -0.1576201021671295 + 0.0492832399904728 + <_> + + <_> + + + + <_>16 0 4 7 -1. + <_>16 0 2 7 2. + 0 + 7.1829417720437050e-003 + -0.0750838518142700 + 0.1567766070365906 + <_> + + <_> + + + + <_>3 2 14 3 -1. + <_>3 3 14 1 3. + 0 + 7.4819842120632529e-004 + 0.0943036824464798 + -0.0944104865193367 + <_> + + <_> + + + + <_>11 13 6 7 -1. + <_>13 13 2 7 3. + 0 + 0.0138563197106123 + 0.0422500297427177 + -0.2404627948999405 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -5.0514908507466316e-003 + 0.2017091959714890 + -0.0449724793434143 + <_> + + <_> + + + + <_>1 7 19 12 -1. + <_>1 11 19 4 3. + 0 + -2.5696419179439545e-003 + -0.1400468945503235 + 0.0417545102536678 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 0.0542757511138916 + -0.0260947998613119 + 0.2837474048137665 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -0.0372994691133499 + -0.5828117728233337 + 0.0135019496083260 + <_> + + <_> + + + + <_>7 9 6 10 -1. + <_>7 9 3 5 2. + <_>10 14 3 5 2. + 0 + 3.0674990266561508e-003 + 0.0562241785228252 + -0.1199505031108856 + <_> + + <_> + + + + <_>4 6 13 3 -1. + <_>4 7 13 1 3. + 0 + -3.5402809735387564e-003 + 0.0665154680609703 + -0.1183426976203919 + <_> + + <_> + + + + <_>3 11 7 4 -1. + <_>3 13 7 2 2. + 0 + 4.1401982307434082e-003 + 0.0209880191832781 + -0.3180744051933289 + <_> + + <_> + + + + <_>16 0 4 15 -1. + <_>16 0 2 15 2. + 0 + -0.0111835598945618 + 0.1246713995933533 + -0.0417979098856449 + <_> + + <_> + + + + <_>0 3 14 4 -1. + <_>0 3 7 2 2. + <_>7 5 7 2 2. + 0 + 1.0800679447129369e-003 + 0.0455484911799431 + -0.1585731059312820 + <_> + + <_> + + + + <_>7 0 8 10 -1. + <_>11 0 4 5 2. + <_>7 5 4 5 2. + 0 + -7.7602718956768513e-003 + -0.1703172028064728 + 0.0339895300567150 + <_> + + <_> + + + + <_>0 2 20 2 -1. + <_>10 2 10 2 2. + 0 + -3.1192360911518335e-003 + 0.0968178808689117 + -0.0860225334763527 + <_> + + <_> + + + + <_>7 6 10 3 -1. + <_>7 6 5 3 2. + 0 + -0.0136733800172806 + -0.2253659963607788 + 0.0155871696770191 + <_> + + <_> + + + + <_>3 6 10 3 -1. + <_>8 6 5 3 2. + 0 + -2.0611209329217672e-003 + -0.1526986062526703 + 0.0502276793122292 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 2.2635459899902344e-003 + -0.0428894609212875 + 0.0768185630440712 + <_> + + <_> + + + + <_>0 4 18 16 -1. + <_>6 4 6 16 3. + 0 + -0.0345300808548927 + 0.1287443935871124 + -0.0676603168249130 + <_> + + <_> + + + + <_>15 0 4 19 -1. + <_>15 0 2 19 2. + 0 + 6.1309239827096462e-003 + -0.0634560585021973 + 0.0642376467585564 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -0.0101712802425027 + -0.2919202148914337 + 0.0266455095261335 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + -0.1306065022945404 + -0.9629706740379334 + 1.5367489540949464e-003 + <_> + + <_> + + + + <_>0 0 6 10 -1. + <_>0 0 3 5 2. + <_>3 5 3 5 2. + 0 + 6.8621779792010784e-003 + -0.0472395196557045 + 0.1544039994478226 + <_> + + <_> + + + + <_>9 0 9 5 -1. + <_>12 0 3 5 3. + 0 + 1.2950079981237650e-003 + -0.0711223483085632 + 0.0586972385644913 + <_> + + <_> + + + + <_>5 0 8 10 -1. + <_>5 0 4 5 2. + <_>9 5 4 5 2. + 0 + -5.6443549692630768e-003 + -0.1726133972406387 + 0.0447693094611168 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.1634611040353775 + -0.0215368308126926 + 0.3682580888271332 + <_> + + <_> + + + + <_>0 0 14 3 -1. + <_>0 1 14 1 3. + 0 + 0.0141706001013517 + 0.0234620198607445 + -0.3049874901771545 + <_> + + <_> + + + + <_>16 0 4 12 -1. + <_>16 0 2 12 2. + 0 + -0.1067991033196449 + 0.3148567974567413 + -9.1049326583743095e-003 + <_> + + <_> + + + + <_>1 0 4 19 -1. + <_>3 0 2 19 2. + 0 + 7.0258649066090584e-003 + -0.0654181912541389 + 0.1020023971796036 + <_> + + <_> + + + + <_>14 10 6 7 -1. + <_>14 10 3 7 2. + 0 + -4.3358937837183475e-003 + 0.1160119995474815 + -0.0550410598516464 + <_> + + <_> + + + + <_>1 6 9 14 -1. + <_>4 6 3 14 3. + 0 + 0.0353942401707172 + 0.0277954805642366 + -0.2553454935550690 + <_> + + <_> + + + + <_>9 2 6 9 -1. + <_>9 5 6 3 3. + 0 + 0.0215996801853180 + -0.0105139603838325 + 0.2608759105205536 + <_> + + <_> + + + + <_>0 10 6 10 -1. + <_>0 10 3 5 2. + <_>3 15 3 5 2. + 0 + 4.3032150715589523e-003 + -0.0467454008758068 + 0.1331862062215805 + <_> + + <_> + + + + <_>4 8 12 6 -1. + <_>8 8 4 6 3. + 0 + 7.8372862190008163e-003 + 0.0618998110294342 + -0.1240516975522041 + <_> + + <_> + + + + <_>2 5 12 9 -1. + <_>6 5 4 9 3. + 0 + -1.6856989823281765e-003 + -0.0956963077187538 + 0.0776673108339310 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -4.1602249257266521e-003 + 0.0658505335450172 + -0.0768375918269157 + <_> + + <_> + + + + <_>4 5 9 5 -1. + <_>7 5 3 5 3. + 0 + -0.0508648194372654 + 0.5241906046867371 + -0.0173424296081066 + <_> + + <_> + + + + <_>10 3 6 7 -1. + <_>12 3 2 7 3. + 0 + -0.0644778832793236 + -0.4197225868701935 + 0.0122311003506184 + <_> + + <_> + + + + <_>6 13 7 6 -1. + <_>6 15 7 2 3. + 0 + -2.4949579965323210e-003 + 0.0642422065138817 + -0.0974573120474815 + <_> + + <_> + + + + <_>11 6 4 14 -1. + <_>13 6 2 7 2. + <_>11 13 2 7 2. + 0 + 3.2167730387300253e-003 + -0.0379022881388664 + 0.0821970924735069 + <_> + + <_> + + + + <_>5 6 4 14 -1. + <_>5 6 2 7 2. + <_>7 13 2 7 2. + 0 + -2.3393060546368361e-003 + -0.1060846000909805 + 0.0720048993825912 + <_> + + <_> + + + + <_>13 13 7 4 -1. + <_>13 15 7 2 2. + 0 + -8.0535542219877243e-003 + -0.1099186986684799 + 0.0256432797759771 + <_> + + <_> + + + + <_>1 5 4 14 -1. + <_>1 5 2 7 2. + <_>3 12 2 7 2. + 0 + 0.0150077398866415 + -0.0312671288847923 + 0.2050703018903732 + <_> + + <_> + + + + <_>1 13 18 4 -1. + <_>10 13 9 2 2. + <_>1 15 9 2 2. + 0 + -4.7144708223640919e-003 + -0.1405889987945557 + 0.0486872494220734 + <_> + + <_> + + + + <_>0 1 18 12 -1. + <_>0 7 18 6 2. + 0 + -0.2718858122825623 + -0.7708619236946106 + 8.2119107246398926e-003 + <_> + + <_> + + + + <_>4 1 14 18 -1. + <_>4 10 14 9 2. + 0 + -3.7261729594320059e-003 + 0.0783864185214043 + -0.0611103214323521 + <_> + + <_> + + + + <_>4 0 6 10 -1. + <_>6 0 2 10 3. + 0 + 8.1726117059588432e-003 + 0.0258723907172680 + -0.2420330047607422 + <_> + + <_> + + + + <_>16 10 4 9 -1. + <_>16 10 2 9 2. + 0 + -0.1538413017988205 + -0.8368161916732788 + 1.0526239639148116e-003 + <_> + + <_> + + + + <_>0 10 4 9 -1. + <_>2 10 2 9 2. + 0 + -4.2209690436720848e-003 + 0.1098781973123550 + -0.0609731301665306 + <_> + + <_> + + + + <_>10 3 6 7 -1. + <_>12 3 2 7 3. + 0 + 0.0346411801874638 + 5.9377611614763737e-003 + -0.7302142977714539 + <_> + + <_> + + + + <_>4 10 4 7 -1. + <_>6 10 2 7 2. + 0 + -1.0757029522210360e-003 + 0.0632532313466072 + -0.0939545333385468 + <_> + + <_> + + + + <_>4 9 15 3 -1. + <_>9 9 5 3 3. + 0 + 6.0506182489916682e-004 + -0.0726337432861328 + 0.0548477917909622 + <_> + + <_> + + + + <_>1 9 15 3 -1. + <_>6 9 5 3 3. + 0 + -4.9192002043128014e-003 + -0.1461798995733261 + 0.0498548895120621 + <_> + + <_> + + + + <_>16 0 4 12 -1. + <_>16 0 2 12 2. + 0 + 0.0586413405835629 + -0.0144878895953298 + 0.2194927930831909 + <_> + + <_> + + + + <_>7 8 4 12 -1. + <_>7 12 4 4 3. + 0 + -0.0959936380386353 + -0.4245699048042297 + 0.0156111698597670 + <_> + + <_> + + + + <_>16 0 4 12 -1. + <_>16 0 2 12 2. + 0 + -0.1754675060510635 + -0.5715453028678894 + 2.7310380246490240e-003 + <_> + + <_> + + + + <_>0 0 4 12 -1. + <_>2 0 2 12 2. + 0 + 0.0531927011907101 + -0.0207596104592085 + 0.3153161108493805 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + -0.0308621097356081 + -0.4081869125366211 + 9.1538606211543083e-003 + <_> + + <_> + + + + <_>8 1 3 13 -1. + <_>9 1 1 13 3. + 0 + -2.9243549797683954e-003 + 0.1653891950845718 + -0.0370483398437500 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + 7.9757552593946457e-003 + 0.0400102995336056 + -0.1060308963060379 + <_> + + <_> + + + + <_>0 6 6 7 -1. + <_>2 6 2 7 3. + 0 + 0.1022820025682449 + 9.6151717007160187e-003 + -0.6529924869537354 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 2.3435470648109913e-003 + -0.0431196093559265 + 0.1190873011946678 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -3.3627110533416271e-003 + 0.1051867008209229 + -0.0696444436907768 + <_> + + <_> + + + + <_>1 9 18 4 -1. + <_>10 9 9 2 2. + <_>1 11 9 2 2. + 0 + 4.9040392041206360e-003 + 0.0489499010145664 + -0.1294935941696167 + <_> + + <_> + + + + <_>3 9 13 2 -1. + <_>3 10 13 1 2. + 0 + 4.5119290007278323e-005 + -0.1614855974912643 + 0.0417335405945778 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 0.0161958597600460 + -0.0127593204379082 + 0.2074635028839111 + <_> + + <_> + + + + <_>6 12 8 8 -1. + <_>6 12 4 4 2. + <_>10 16 4 4 2. + 0 + -6.4254719763994217e-003 + -0.1373693943023682 + 0.0434904210269451 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -6.6467811120674014e-004 + 0.0667715370655060 + -0.0746484622359276 + <_> + + <_> + + + + <_>3 14 7 6 -1. + <_>3 16 7 2 3. + 0 + -2.3743628989905119e-003 + -0.1237770020961762 + 0.0517287291586399 + <_> + + <_> + + + + <_>5 10 15 6 -1. + <_>10 10 5 6 3. + 0 + -0.0831660181283951 + 0.1526110023260117 + -0.0215027593076229 + <_> + + <_> + + + + <_>8 2 4 7 -1. + <_>10 2 2 7 2. + 0 + 1.3301270082592964e-003 + -0.0619254484772682 + 0.1059143990278244 + <_> + + <_> + + + + <_>7 1 9 7 -1. + <_>10 1 3 7 3. + 0 + 0.0909253507852554 + 6.9404938258230686e-003 + -0.5102267861366272 + <_> + + <_> + + + + <_>1 14 9 6 -1. + <_>1 16 9 2 3. + 0 + 5.7555912062525749e-003 + 0.0528490096330643 + -0.1075816974043846 + <_> + + <_> + + + + <_>7 0 8 6 -1. + <_>7 2 8 2 3. + 0 + 9.3440711498260498e-004 + -0.1060513034462929 + 0.0478242784738541 + <_> + + <_> + + + + <_>0 0 8 10 -1. + <_>0 0 4 5 2. + <_>4 5 4 5 2. + 0 + 0.0523537993431091 + -0.0163872092962265 + 0.4231866896152496 + <_> + + <_> + + + + <_>11 8 6 7 -1. + <_>13 8 2 7 3. + 0 + -0.0243072099983692 + 0.1352169066667557 + -0.0100883599370718 + <_> + + <_> + + + + <_>6 0 2 13 -1. + <_>7 0 1 13 2. + 0 + -0.0137222399935126 + -0.4952099919319153 + 0.0117843402549624 + <_> + + <_> + + + + <_>10 10 6 8 -1. + <_>10 10 3 8 2. + 0 + -1.1442030081525445e-003 + 0.0438187308609486 + -0.0691040232777596 + <_> + + <_> + + + + <_>2 9 8 9 -1. + <_>2 12 8 3 3. + 0 + -0.0788481906056404 + 0.3519859910011292 + -0.0164646897464991 + <_> + + <_> + + + + <_>14 4 4 14 -1. + <_>16 4 2 7 2. + <_>14 11 2 7 2. + 0 + 1.7305529909208417e-003 + -0.0667900815606117 + 0.0824635773897171 + <_> + + <_> + + + + <_>4 9 7 8 -1. + <_>4 13 7 4 2. + 0 + -0.0129288397729397 + -0.0810021236538887 + 0.0852232873439789 + <_> + + <_> + + + + <_>7 1 6 8 -1. + <_>7 1 3 8 2. + 0 + 8.7096104398369789e-003 + -0.0500219017267227 + 0.1349322050809860 + <_> + + <_> + + + + <_>1 11 7 6 -1. + <_>1 13 7 2 3. + 0 + -0.0634830668568611 + -0.7768175005912781 + 7.0912609808146954e-003 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -4.3746097944676876e-003 + -0.1332938969135284 + 0.0426270402967930 + <_> + + <_> + + + + <_>0 10 15 6 -1. + <_>5 10 5 6 3. + 0 + -0.0439851693809032 + 0.1513186991214752 + -0.0408015586435795 + <_> + + <_> + + + + <_>9 10 6 5 -1. + <_>9 10 3 5 2. + 0 + -6.0488767921924591e-003 + -0.0536457411944866 + 0.0178327299654484 + <_> + + <_> + + + + <_>5 10 6 5 -1. + <_>8 10 3 5 2. + 0 + -5.1487190648913383e-004 + 0.0621029511094093 + -0.0953394025564194 + <_> + + <_> + + + + <_>7 6 7 4 -1. + <_>7 8 7 2 2. + 0 + -3.3046479802578688e-003 + -0.2473282068967819 + 0.0219773408025503 + <_> + + <_> + + + + <_>5 2 5 9 -1. + <_>5 5 5 3 3. + 0 + -3.0949179199524224e-004 + -0.0346560813486576 + 0.1959951072931290 + <_> + + <_> + + + + <_>7 12 13 3 -1. + <_>7 13 13 1 3. + 0 + -8.3323381841182709e-003 + 0.1743672937154770 + -0.0326315499842167 + <_> + + <_> + + + + <_>2 12 16 4 -1. + <_>2 14 16 2 2. + 0 + 6.6935829818248749e-003 + 0.0250507593154907 + -0.2736282944679260 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 1.4068570453673601e-003 + -0.0297970101237297 + 0.0657525807619095 + <_> + + <_> + + + + <_>0 0 20 4 -1. + <_>0 0 10 2 2. + <_>10 2 10 2 2. + 0 + 0.0407253988087177 + 0.0149674797430635 + -0.3711180090904236 + <_> + + <_> + + + + <_>6 14 13 2 -1. + <_>6 15 13 1 2. + 0 + -0.0215241201221943 + 0.3729447126388550 + -0.0141429100185633 + <_> + + <_> + + + + <_>1 10 13 3 -1. + <_>1 11 13 1 3. + 0 + 0.0416896305978298 + 8.3227548748254776e-003 + -0.6682286858558655 + <_> + + <_> + + + + <_>12 0 6 10 -1. + <_>15 0 3 5 2. + <_>12 5 3 5 2. + 0 + -3.2075429335236549e-003 + 0.0627410188317299 + -0.1306160986423492 + <_> + + <_> + + + + <_>3 16 13 2 -1. + <_>3 17 13 1 2. + 0 + 0.0264184307307005 + 6.6760168410837650e-003 + -0.7555707097053528 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + -0.0511538386344910 + -0.5038297176361084 + 2.2476969752460718e-003 + <_> + + <_> + + + + <_>1 16 13 3 -1. + <_>1 17 13 1 3. + 0 + 1.5723450342193246e-003 + -0.0602146200835705 + 0.0799331516027451 + <_> + + <_> + + + + <_>15 1 5 9 -1. + <_>15 4 5 3 3. + 0 + 1.2616170570254326e-003 + 0.0446749888360500 + -0.0838307365775108 + <_> + + <_> + + + + <_>0 1 18 4 -1. + <_>0 1 9 2 2. + <_>9 3 9 2 2. + 0 + -0.0286086704581976 + -0.3024907112121582 + 0.0162548106163740 + <_> + + <_> + + + + <_>5 0 10 4 -1. + <_>5 2 10 2 2. + 0 + 0.0147264599800110 + -0.0494594201445580 + 0.1145775988698006 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0353192016482353 + 0.0112768197432160 + -0.4805553853511810 + <_> + + <_> + + + + <_>4 2 12 10 -1. + <_>4 2 6 10 2. + 0 + 0.2247018963098526 + -0.0105967698618770 + 0.5402629971504211 + <_> + + <_> + + + + <_>5 10 6 6 -1. + <_>8 10 3 6 2. + 0 + -7.0188841782510281e-003 + -0.1183698996901512 + 0.0529952794313431 + <_> + + <_> + + + + <_>5 2 12 6 -1. + <_>5 4 12 2 3. + 0 + -0.0291949305683374 + 0.2849856913089752 + -0.0146521301940084 + <_> + + <_> + + + + <_>8 0 3 12 -1. + <_>8 6 3 6 2. + 0 + -1.6918469918891788e-003 + 0.0677315220236778 + -0.0741295889019966 + <_> + + <_> + + + + <_>5 0 14 8 -1. + <_>5 4 14 4 2. + 0 + 0.0131104895845056 + -0.0404180511832237 + 0.0965377986431122 + <_> + + <_> + + + + <_>2 4 4 14 -1. + <_>2 4 2 7 2. + <_>4 11 2 7 2. + 0 + 7.5334981374908239e-005 + -0.0730650573968887 + 0.0710496678948402 + <_> + + <_> + + + + <_>10 9 10 6 -1. + <_>15 9 5 3 2. + <_>10 12 5 3 2. + 0 + 2.9962710104882717e-003 + 0.0244011301547289 + -0.1067982017993927 + <_> + + <_> + + + + <_>5 12 9 5 -1. + <_>8 12 3 5 3. + 0 + -0.0412361286580563 + 0.2544656097888947 + -0.0198012292385101 + <_> + + <_> + + + + <_>4 14 12 6 -1. + <_>8 14 4 6 3. + 0 + 2.2827479988336563e-003 + -0.0596221499145031 + 0.0868717879056931 + <_> + + <_> + + + + <_>2 5 12 14 -1. + <_>2 5 6 7 2. + <_>8 12 6 7 2. + 0 + -2.1318379731383175e-004 + 0.0405060611665249 + -0.1235762983560562 + <_> + + <_> + + + + <_>3 10 14 4 -1. + <_>10 10 7 2 2. + <_>3 12 7 2 2. + 0 + 4.1725938208401203e-003 + 0.0416747890412807 + -0.1302922964096069 + <_> + + <_> + + + + <_>4 2 12 4 -1. + <_>8 2 4 4 3. + 0 + -0.0179458595812321 + 0.2539598941802979 + -0.0207839291542768 + <_> + + <_> + + + + <_>12 0 4 14 -1. + <_>14 0 2 7 2. + <_>12 7 2 7 2. + 0 + -0.0609579309821129 + -0.5939993858337402 + 5.6327730417251587e-003 + <_> + + <_> + + + + <_>4 0 4 14 -1. + <_>4 0 2 7 2. + <_>6 7 2 7 2. + 0 + -8.3080737385898829e-004 + 0.0480113103985786 + -0.1128986999392510 + <_> + + <_> + + + + <_>12 9 6 11 -1. + <_>14 9 2 11 3. + 0 + 0.0270372293889523 + 0.0265243798494339 + -0.1720861941576004 + <_> + + <_> + + + + <_>0 4 3 14 -1. + <_>1 4 1 14 3. + 0 + 3.7293829955160618e-003 + -0.0507954508066177 + 0.1109343990683556 + <_> + + <_> + + + + <_>15 1 3 13 -1. + <_>16 1 1 13 3. + 0 + -1.0271129431203008e-003 + -0.0890258699655533 + 0.0498617403209209 + <_> + + <_> + + + + <_>2 1 3 13 -1. + <_>3 1 1 13 3. + 0 + 4.3261310202069581e-004 + -0.0764715299010277 + 0.0724907368421555 + <_> + + <_> + + + + <_>8 10 10 10 -1. + <_>13 10 5 5 2. + <_>8 15 5 5 2. + 0 + -0.0839979127049446 + 0.4017896056175232 + -8.4397885948419571e-003 + <_> + + <_> + + + + <_>6 0 2 20 -1. + <_>7 0 1 20 2. + 0 + -3.4407388884574175e-003 + -0.1432646065950394 + 0.0391704104840755 + <_> + + <_> + + + + <_>5 14 14 6 -1. + <_>12 14 7 3 2. + <_>5 17 7 3 2. + 0 + -0.0214187894016504 + 0.1583556979894638 + -0.0137018701061606 + <_> + + <_> + + + + <_>1 4 3 13 -1. + <_>2 4 1 13 3. + 0 + 2.4877830874174833e-003 + -0.0568754300475121 + 0.1021872013807297 + <_> + + <_> + + + + <_>18 6 2 14 -1. + <_>18 6 1 14 2. + 0 + -1.0390300303697586e-003 + 0.0815307125449181 + -0.0471837110817432 + <_> + + <_> + + + + <_>0 6 2 14 -1. + <_>1 6 1 14 2. + 0 + 4.6788761392235756e-004 + 0.0709956809878349 + -0.0884646028280258 + <_> + + <_> + + + + <_>10 2 9 5 -1. + <_>13 2 3 5 3. + 0 + 0.0274362601339817 + 0.0151905501261353 + -0.1211766973137856 + <_> + + <_> + + + + <_>2 0 6 7 -1. + <_>4 0 2 7 3. + 0 + -5.8917858405038714e-004 + -0.0814716070890427 + 0.0684807822108269 + <_> + + <_> + + + + <_>4 4 14 16 -1. + <_>11 4 7 8 2. + <_>4 12 7 8 2. + 0 + 0.0794390812516212 + -7.3907868936657906e-003 + 0.1490225940942764 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0351530909538269 + 0.4194208979606628 + -0.0124802896752954 + <_> + + <_> + + + + <_>12 8 7 6 -1. + <_>12 10 7 2 3. + 0 + 0.0682309716939926 + 9.3489149585366249e-003 + -0.2596547007560730 + <_> + + <_> + + + + <_>0 17 20 3 -1. + <_>10 17 10 3 2. + 0 + 0.0817330330610275 + 0.0155133903026581 + -0.3270446956157684 + <_> + + <_> + + + + <_>6 10 10 4 -1. + <_>6 10 5 4 2. + 0 + -3.0718350317329168e-003 + 0.0669384673237801 + -0.0422257483005524 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0563018806278706 + -0.0256806500256062 + 0.2172815054655075 + <_> + + <_> + + + + <_>12 8 7 6 -1. + <_>12 10 7 2 3. + 0 + 0.0251660197973251 + 0.0232283007353544 + -0.0927910432219505 + <_> + + <_> + + + + <_>7 11 6 8 -1. + <_>9 11 2 8 3. + 0 + 0.0650881975889206 + 6.8949609994888306e-003 + -0.8263949155807495 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + 2.2007930092513561e-003 + -0.0743942484259605 + 0.0872093811631203 + <_> + + <_> + + + + <_>6 2 4 15 -1. + <_>6 7 4 5 3. + 0 + -8.8553391396999359e-003 + -0.1320305019617081 + 0.0376584306359291 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + 0.0609424114227295 + 0.0101978396996856 + -0.5425286293029785 + <_> + + <_> + + + + <_>3 6 14 9 -1. + <_>3 9 14 3 3. + 0 + -5.2589550614356995e-004 + 0.4883571863174439 + -0.0118280798196793 + <_> + + <_> + + + + <_>4 5 12 8 -1. + <_>4 9 12 4 2. + 0 + 1.3005370274186134e-003 + -0.3889844119548798 + 0.0142263397574425 + <_> + + <_> + + + + <_>2 4 14 16 -1. + <_>2 4 7 8 2. + <_>9 12 7 8 2. + 0 + -0.1653168946504593 + 0.4000451862812042 + -0.0126667702570558 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 1.8595480360090733e-003 + 0.0478026606142521 + -0.1136891990900040 + <_> + + <_> + + + + <_>1 17 12 3 -1. + <_>7 17 6 3 2. + 0 + 0.0130651798099279 + -0.0337142199277878 + 0.1576226949691773 + <_> + + <_> + + + + <_>1 7 19 3 -1. + <_>1 8 19 1 3. + 0 + 0.0316127501428127 + 7.6767429709434509e-003 + -0.5964102149009705 + <_> + + <_> + + + + <_>4 0 12 10 -1. + <_>10 0 6 10 2. + 0 + -0.0225666202604771 + 0.1060371026396751 + -0.0473831705749035 + <_> + + <_> + + + + <_>6 11 12 4 -1. + <_>6 11 6 4 2. + 0 + 6.2679480761289597e-003 + 0.0345950312912464 + -0.0776223465800285 + <_> + + <_> + + + + <_>4 10 6 5 -1. + <_>7 10 3 5 2. + 0 + -0.0317580811679363 + -0.3214743137359619 + 0.0159864705055952 + <_> + + <_> + + + + <_>18 0 2 18 -1. + <_>18 0 1 18 2. + 0 + -0.0214776098728180 + 0.2052776068449020 + -0.0180746093392372 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 0.0185940507799387 + 0.0163755901157856 + -0.2995521128177643 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + 0.0146044297143817 + -0.0204334408044815 + 0.2272551059722900 + <_> + + <_> + + + + <_>3 13 14 3 -1. + <_>3 14 14 1 3. + 0 + 1.9902919884771109e-003 + -0.0585182495415211 + 0.1099736019968987 + <_> + + <_> + + + + <_>12 8 7 6 -1. + <_>12 10 7 2 3. + 0 + 9.7299525514245033e-003 + 0.0313718616962433 + -0.0443699099123478 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + -2.3401379585266113e-003 + 0.0964882001280785 + -0.0572499297559261 + <_> + + <_> + + + + <_>12 8 7 6 -1. + <_>12 10 7 2 3. + 0 + -1.9590060692280531e-003 + -0.1403114944696426 + 0.0135463401675224 + <_> + + <_> + + + + <_>1 8 7 6 -1. + <_>1 10 7 2 3. + 0 + 8.4066856652498245e-003 + 0.0662895515561104 + -0.0803482830524445 + <_> + + <_> + + + + <_>5 7 12 12 -1. + <_>5 11 12 4 3. + 0 + 0.0525745488703251 + -0.0362970083951950 + 0.1463834047317505 + <_> + + <_> + + + + <_>4 5 10 10 -1. + <_>4 5 5 5 2. + <_>9 10 5 5 2. + 0 + 4.1065202094614506e-003 + 0.0303723495453596 + -0.1815577000379562 + <_> + + <_> + + + + <_>12 13 8 7 -1. + <_>12 13 4 7 2. + 0 + -4.1818427853286266e-003 + 0.0555907897651196 + -0.0371485203504562 + <_> + + <_> + + + + <_>4 0 9 6 -1. + <_>4 3 9 3 2. + 0 + -1.5470250509679317e-003 + 0.1034715026617050 + -0.0463747307658196 + <_> + + <_> + + + + <_>4 3 13 2 -1. + <_>4 4 13 1 2. + 0 + -8.2695618038997054e-004 + -0.0932969897985458 + 0.0437344610691071 + <_> + + <_> + + + + <_>0 0 2 18 -1. + <_>1 0 1 18 2. + 0 + 4.1385791264474392e-003 + -0.0442664884030819 + 0.1096898019313812 + <_> + + <_> + + + + <_>0 13 20 2 -1. + <_>0 14 20 1 2. + 0 + -0.0336841195821762 + -0.6433715224266052 + 7.9893283545970917e-003 + <_> + + <_> + + + + <_>4 10 10 4 -1. + <_>9 10 5 4 2. + 0 + 0.0527988187968731 + -0.0124903004616499 + 0.4157246053218842 + <_> + + <_> + + + + <_>8 4 12 16 -1. + <_>8 4 6 16 2. + 0 + -0.2969925999641419 + -0.1959837973117828 + 9.4300797209143639e-003 + <_> + + <_> + + + + <_>0 4 12 16 -1. + <_>6 4 6 16 2. + 0 + 0.1119631007313728 + 0.0111627196893096 + -0.4683805108070374 + <_> + + <_> + + + + <_>12 5 6 9 -1. + <_>12 5 3 9 2. + 0 + -0.0185443107038736 + -0.0740807875990868 + 0.0195282101631165 + <_> + + <_> + + + + <_>0 13 8 7 -1. + <_>4 13 4 7 2. + 0 + -0.0109374299645424 + 0.0882065296173096 + -0.0628301873803139 + <_> + + <_> + + + + <_>12 0 3 16 -1. + <_>13 0 1 16 3. + 0 + 2.7186619117856026e-003 + 0.0308554805815220 + -0.0924058631062508 + <_> + + <_> + + + + <_>0 7 18 12 -1. + <_>6 7 6 12 3. + 0 + 0.0207273196429014 + -0.0525433011353016 + 0.1060841009020805 + <_> + + <_> + + + + <_>4 9 12 4 -1. + <_>8 9 4 4 3. + 0 + -0.0279619302600622 + 0.2173516005277634 + -0.0213561393320560 + <_> + + <_> + + + + <_>0 7 16 4 -1. + <_>0 7 8 2 2. + <_>8 9 8 2 2. + 0 + -9.0406360104680061e-003 + -0.1953538954257965 + 0.0300774201750755 + <_> + + <_> + + + + <_>7 4 9 5 -1. + <_>10 4 3 5 3. + 0 + -0.0109063498675823 + 0.1488863974809647 + -0.0311886798590422 + <_> + + <_> + + + + <_>5 0 3 16 -1. + <_>6 0 1 16 3. + 0 + -3.8616119418293238e-003 + -0.1209480017423630 + 0.0451440811157227 + <_> + + <_> + + + + <_>6 11 13 2 -1. + <_>6 12 13 1 2. + 0 + 4.3162601068615913e-003 + -0.0107136499136686 + 0.2811649143695831 + <_> + + <_> + + + + <_>1 11 13 2 -1. + <_>1 12 13 1 2. + 0 + -1.4098359970375896e-003 + 0.0646855086088181 + -0.0994713008403778 + <_> + + <_> + + + + <_>8 6 5 9 -1. + <_>8 9 5 3 3. + 0 + 3.2964099664241076e-003 + 0.1429533064365387 + -0.0311010107398033 + <_> + + <_> + + + + <_>6 4 4 8 -1. + <_>8 4 2 8 2. + 0 + -2.9802869539707899e-003 + -0.2457893043756485 + 0.0217602606862783 + <_> + + <_> + + + + <_>14 3 4 8 -1. + <_>14 3 2 8 2. + 0 + 0.0671787932515144 + 3.3457649406045675e-003 + -0.4568560123443604 + <_> + + <_> + + + + <_>2 3 4 8 -1. + <_>4 3 2 8 2. + 0 + 0.0291828494518995 + -0.0170168597251177 + 0.3354592919349670 + <_> + + <_> + + + + <_>10 3 6 7 -1. + <_>12 3 2 7 3. + 0 + 1.7935150535777211e-003 + 0.0305161792784929 + -0.1252674013376236 + <_> + + <_> + + + + <_>4 6 8 8 -1. + <_>4 6 4 4 2. + <_>8 10 4 4 2. + 0 + 0.0204656794667244 + -0.0109099801629782 + 0.4355213940143585 + <_> + + <_> + + + + <_>10 9 6 7 -1. + <_>10 9 3 7 2. + 0 + -2.6115079526789486e-004 + 0.0387597605586052 + -0.0640986934304237 + <_> + + <_> + + + + <_>4 9 6 7 -1. + <_>7 9 3 7 2. + 0 + 3.7161160726100206e-003 + 0.0371508896350861 + -0.1546732038259506 + <_> + + <_> + + + + <_>4 10 12 5 -1. + <_>8 10 4 5 3. + 0 + -7.4094999581575394e-003 + -0.0827042236924171 + 0.0628099068999290 + <_> + + <_> + + + + <_>6 1 7 6 -1. + <_>6 3 7 2 3. + 0 + 0.0170948095619679 + -0.0483473315834999 + 0.0987708121538162 + <_> + + <_> + + + + <_>4 0 13 3 -1. + <_>4 1 13 1 3. + 0 + -3.0473200604319572e-003 + -0.1063883006572723 + 0.0309486500918865 + <_> + + <_> + + + + <_>4 3 4 14 -1. + <_>4 3 2 7 2. + <_>6 10 2 7 2. + 0 + 0.0345024988055229 + 0.0109972301870584 + -0.4286173880100250 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + -2.6834919117391109e-003 + -0.1498644948005676 + 0.0331576392054558 + <_> + + <_> + + + + <_>2 8 16 2 -1. + <_>10 8 8 2 2. + 0 + 9.2392861843109131e-003 + -0.0377333387732506 + 0.1577825993299484 + <_> + + <_> + + + + <_>11 6 8 14 -1. + <_>15 6 4 7 2. + <_>11 13 4 7 2. + 0 + 0.0882051065564156 + -0.0107047697529197 + 0.3235310912132263 + <_> + + <_> + + + + <_>1 0 6 19 -1. + <_>4 0 3 19 2. + 0 + 0.0778688862919807 + 0.0108046596869826 + -0.4424335062503815 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + -3.1202291138470173e-003 + 0.2044450938701630 + -0.0239764396101236 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 2.6000461075454950e-003 + 0.0457650199532509 + -0.1013889983296394 + <_> + + <_> + + + + <_>12 5 6 10 -1. + <_>15 5 3 5 2. + <_>12 10 3 5 2. + 0 + 7.0194108411669731e-003 + 0.0257407296448946 + -0.0490608401596546 + <_> + + <_> + + + + <_>2 5 6 10 -1. + <_>2 5 3 5 2. + <_>5 10 3 5 2. + 0 + -2.4108150973916054e-003 + -0.1183748021721840 + 0.0486499294638634 + <_> + + <_> + + + + <_>7 0 9 4 -1. + <_>7 2 9 2 2. + 0 + 0.0498862490057945 + -0.0144498804584146 + 0.2089405953884125 + <_> + + <_> + + + + <_>0 11 18 2 -1. + <_>9 11 9 2 2. + 0 + -7.2655039839446545e-003 + 0.0890421867370605 + -0.0498455502092838 + <_> + + <_> + + + + <_>6 6 8 9 -1. + <_>6 6 4 9 2. + 0 + 0.0105602703988552 + 0.0529117099940777 + -0.1150913983583450 + <_> + + <_> + + + + <_>4 4 9 5 -1. + <_>7 4 3 5 3. + 0 + 5.6417449377477169e-003 + -0.0686727464199066 + 0.0774893164634705 + <_> + + <_> + + + + <_>10 2 6 7 -1. + <_>10 2 3 7 2. + 0 + 4.3234648182988167e-003 + -0.0792070627212524 + 0.0534913092851639 + <_> + + <_> + + + + <_>5 2 9 5 -1. + <_>8 2 3 5 3. + 0 + 0.0111840702593327 + 0.0716560930013657 + -0.1063494011759758 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -0.0992304235696793 + 0.3716951906681061 + -6.6843931563198566e-003 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -4.4848727993667126e-003 + 0.0755774080753326 + -0.0694810822606087 + <_> + + <_> + + + + <_>4 1 14 4 -1. + <_>11 1 7 2 2. + <_>4 3 7 2 2. + 0 + -0.0191041808575392 + -0.1729121953248978 + 0.0113604096695781 + <_> + + <_> + + + + <_>9 1 2 13 -1. + <_>10 1 1 13 2. + 0 + -1.7672680551186204e-003 + 0.0925671607255936 + -0.0524700507521629 + <_> + + <_> + + + + <_>10 6 10 6 -1. + <_>15 6 5 3 2. + <_>10 9 5 3 2. + 0 + 0.0590715296566486 + 9.2153968289494514e-003 + -0.2668764889240265 + <_> + + <_> + + + + <_>0 6 10 6 -1. + <_>0 6 5 3 2. + <_>5 9 5 3 2. + 0 + -0.0343628190457821 + -0.5791472196578980 + 7.9972539097070694e-003 + <_> + + <_> + + + + <_>6 6 10 3 -1. + <_>6 6 5 3 2. + 0 + 0.0567665398120880 + 5.8937501162290573e-003 + -0.5227519273757935 + <_> + + <_> + + + + <_>1 7 4 13 -1. + <_>3 7 2 13 2. + 0 + -0.1217354983091354 + -0.5222960114479065 + 7.9296948388218880e-003 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + 0.0342746190726757 + -0.0170698799192905 + 0.1295899003744125 + <_> + + <_> + + + + <_>0 0 6 5 -1. + <_>3 0 3 5 2. + 0 + -6.7191021516919136e-003 + 0.1118772029876709 + -0.0446857288479805 + <_> + + <_> + + + + <_>15 6 5 12 -1. + <_>15 10 5 4 3. + 0 + 0.0316982604563236 + 0.0285293199121952 + -0.1161706969141960 + <_> + + <_> + + + + <_>0 1 6 16 -1. + <_>0 1 3 8 2. + <_>3 9 3 8 2. + 0 + -0.0953267514705658 + 0.3636204898357391 + -0.0135233197361231 + <_> + + <_> + + + + <_>0 0 20 2 -1. + <_>0 0 10 2 2. + 0 + 0.1262056976556778 + 6.0956259258091450e-003 + -0.8494762182235718 + <_> + + <_> + + + + <_>0 6 5 12 -1. + <_>0 10 5 4 3. + 0 + -0.0273248702287674 + -0.2904601991176605 + 0.0143038798123598 + <_> + + <_> + + + + <_>1 0 18 6 -1. + <_>10 0 9 3 2. + <_>1 3 9 3 2. + 0 + -0.0736186802387238 + 0.4882428944110870 + -0.0102698598057032 + <_> + + <_> + + + + <_>3 0 12 5 -1. + <_>7 0 4 5 3. + 0 + 5.0417389720678329e-003 + -0.0847702771425247 + 0.0560356117784977 + <_> + + <_> + + + + <_>7 0 9 5 -1. + <_>10 0 3 5 3. + 0 + 2.7569099329411983e-003 + -0.0482694804668427 + 0.0385255701839924 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.0219673700630665 + 0.0861905664205551 + -0.0807973295450211 + <_> + + <_> + + + + <_>11 2 8 18 -1. + <_>11 2 4 18 2. + 0 + -0.3863753080368042 + -0.8399801850318909 + 3.6657860036939383e-003 + <_> + + <_> + + + + <_>1 2 8 18 -1. + <_>5 2 4 18 2. + 0 + -0.4108321964740753 + -0.9718242883682251 + 3.9403690025210381e-003 + <_> + + <_> + + + + <_>12 7 5 6 -1. + <_>12 10 5 3 2. + 0 + -0.0410332791507244 + 1. + -3.3212041016668081e-003 + <_> + + <_> + + + + <_>2 1 14 4 -1. + <_>2 1 7 2 2. + <_>9 3 7 2 2. + 0 + 0.0243050009012222 + 0.0182349700480700 + -0.2495432049036026 + <_> + + <_> + + + + <_>12 7 8 6 -1. + <_>12 9 8 2 3. + 0 + 1.6170740127563477e-003 + -0.1295816004276276 + 0.0327252000570297 + <_> + + <_> + + + + <_>0 7 8 6 -1. + <_>0 9 8 2 3. + 0 + 0.0447852686047554 + -0.0238688495010138 + 0.1976343989372253 + <_> + + <_> + + + + <_>7 7 13 2 -1. + <_>7 8 13 1 2. + 0 + 0.0402095913887024 + 5.3034191951155663e-003 + -0.6628453135490418 + <_> + + <_> + + + + <_>1 6 18 9 -1. + <_>1 9 18 3 3. + 0 + 3.3616109285503626e-003 + 0.3022617995738983 + -0.0161032807081938 + <_> + + <_> + + + + <_>0 8 20 6 -1. + <_>0 10 20 2 3. + 0 + -1.1624400503933430e-003 + -0.2793419063091278 + 0.0182761698961258 + <_> + + <_> + + + + <_>4 3 4 13 -1. + <_>6 3 2 13 2. + 0 + 0.0555242598056793 + -6.5288958139717579e-003 + 0.7569044232368469 + <_> + + <_> + + + + <_>13 3 3 15 -1. + <_>14 3 1 15 3. + 0 + 4.6308599412441254e-003 + 0.0282546300441027 + -0.0949451774358749 + <_> + + <_> + + + + <_>3 15 14 3 -1. + <_>3 16 14 1 3. + 0 + 2.7387610170990229e-003 + -0.0469804108142853 + 0.0945112183690071 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 2.9127181041985750e-003 + -0.0222646705806255 + 0.0720913335680962 + <_> + + <_> + + + + <_>0 16 17 3 -1. + <_>0 17 17 1 3. + 0 + -0.0236285105347633 + -0.3914751112461090 + 0.0128408595919609 + <_> + + <_> + + + + <_>5 11 11 6 -1. + <_>5 14 11 3 2. + 0 + 7.1669870521873236e-004 + 0.0204136800020933 + -0.1658779978752136 + <_> + + <_> + + + + <_>4 3 3 15 -1. + <_>5 3 1 15 3. + 0 + 0.0327236317098141 + 8.5352789610624313e-003 + -0.5183864831924439 + <_> + + <_> + + + + <_>3 1 14 9 -1. + <_>3 4 14 3 3. + 0 + 0.0563932694494724 + -0.0249375998973846 + 0.1902554929256439 + <_> + + <_> + + + + <_>0 0 20 8 -1. + <_>0 4 20 4 2. + 0 + 0.2939200103282929 + 5.7944031432271004e-003 + -0.8553059101104736 + <_> + + <_> + + + + <_>7 6 7 4 -1. + <_>7 8 7 2 2. + 0 + -5.6904228404164314e-003 + -0.2435491979122162 + 0.0106016797944903 + <_> + + <_> + + + + <_>2 13 13 2 -1. + <_>2 14 13 1 2. + 0 + 9.8184328526258469e-003 + -0.0135997701436281 + 0.3379540145397186 + <_> + + <_> + + + + <_>2 12 16 3 -1. + <_>2 13 16 1 3. + 0 + -0.0369705893099308 + -0.5730929970741272 + 0.0100909704342484 + <_> + + <_> + + + + <_>1 11 13 3 -1. + <_>1 12 13 1 3. + 0 + 0.0186076108366251 + -0.0129385702311993 + 0.4112375080585480 + <_> + + <_> + + + + <_>7 1 13 3 -1. + <_>7 2 13 1 3. + 0 + -1.5049210051074624e-003 + -0.0846785679459572 + 0.0337247513234615 + <_> + + <_> + + + + <_>5 13 7 6 -1. + <_>5 16 7 3 2. + 0 + -0.0390403792262077 + -0.4739069938659668 + 9.5385275781154633e-003 + <_> + + <_> + + + + <_>4 3 14 3 -1. + <_>4 4 14 1 3. + 0 + -3.4379279240965843e-003 + 0.1411287039518356 + -0.0223677698522806 + <_> + + <_> + + + + <_>3 2 14 2 -1. + <_>3 3 14 1 2. + 0 + -1.1330900015309453e-003 + -0.1395018994808197 + 0.0325058698654175 + <_> + + <_> + + + + <_>3 0 15 14 -1. + <_>3 7 15 7 2. + 0 + -0.0653704702854156 + 0.1480170041322708 + -0.0220399200916290 + <_> + + <_> + + + + <_>4 1 12 14 -1. + <_>4 8 12 7 2. + 0 + -0.2097097039222717 + -0.7439227104187012 + 7.5829490087926388e-003 + <_> + + <_> + + + + <_>9 13 6 7 -1. + <_>11 13 2 7 3. + 0 + -5.8827060274779797e-003 + -0.0632530376315117 + 0.0233638398349285 + <_> + + <_> + + + + <_>6 14 8 4 -1. + <_>6 16 8 2 2. + 0 + -0.0297594498842955 + 0.4873329997062683 + -9.2995148152112961e-003 + <_> + + <_> + + + + <_>8 14 8 6 -1. + <_>8 16 8 2 3. + 0 + -0.0530643612146378 + -0.3806410133838654 + 5.6431228294968605e-003 + <_> + + <_> + + + + <_>5 13 6 7 -1. + <_>7 13 2 7 3. + 0 + 0.0666673332452774 + 4.6323328278958797e-003 + -0.9153608083724976 + <_> + + <_> + + + + <_>11 10 8 5 -1. + <_>11 10 4 5 2. + 0 + -0.0923252329230309 + 0.2918460071086884 + -7.4540497735142708e-003 + <_> + + <_> + + + + <_>1 0 8 16 -1. + <_>1 0 4 8 2. + <_>5 8 4 8 2. + 0 + 0.0856440365314484 + -0.0102885300293565 + 0.4125156104564667 + <_> + + <_> + + + + <_>8 2 6 18 -1. + <_>8 8 6 6 3. + 0 + 0.2296997010707855 + -4.6802540309727192e-003 + 0.3650914132595062 + <_> + + <_> + + + + <_>6 2 6 18 -1. + <_>6 8 6 6 3. + 0 + 8.7508037686347961e-003 + 0.0778168514370918 + -0.0636575594544411 + <_> + + <_> + + + + <_>7 6 9 4 -1. + <_>7 8 9 2 2. + 0 + 5.7104961015284061e-003 + -0.0596532002091408 + 0.0427327305078506 + <_> + + <_> + + + + <_>1 10 5 9 -1. + <_>1 13 5 3 3. + 0 + -4.8026451840996742e-003 + -0.0989185124635696 + 0.0449569784104824 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + 3.2986800651997328e-003 + 0.0331645384430885 + -0.1347782015800476 + <_> + + <_> + + + + <_>0 14 10 6 -1. + <_>0 14 5 3 2. + <_>5 17 5 3 2. + 0 + -4.0092850103974342e-003 + 0.1355177015066147 + -0.0371397808194160 + <_> + + <_> + + + + <_>9 11 5 9 -1. + <_>9 14 5 3 3. + 0 + -7.7049341052770615e-004 + 0.0266906004399061 + -0.0845023915171623 + <_> + + <_> + + + + <_>0 16 12 4 -1. + <_>4 16 4 4 3. + 0 + 0.0230740997940302 + -0.0263989698141813 + 0.1852087974548340 + <_> + + <_> + + + + <_>14 6 3 14 -1. + <_>15 6 1 14 3. + 0 + 9.9315540865063667e-003 + 0.0217025000602007 + -0.1414783000946045 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>6 9 4 4 2. + <_>10 13 4 4 2. + 0 + -0.0439774803817272 + -0.5930699706077576 + 7.6594059355556965e-003 + <_> + + <_> + + + + <_>8 5 4 7 -1. + <_>8 5 2 7 2. + 0 + -2.1170598920434713e-003 + 0.0969894975423813 + -0.0499889589846134 + <_> + + <_> + + + + <_>6 11 6 9 -1. + <_>8 11 2 9 3. + 0 + -0.0111789498478174 + -0.1505848020315170 + 0.0313856899738312 + <_> + + <_> + + + + <_>7 2 6 16 -1. + <_>10 2 3 8 2. + <_>7 10 3 8 2. + 0 + -1.1888720327988267e-003 + 0.0876652523875237 + -0.0688619464635849 + <_> + + <_> + + + + <_>0 15 18 5 -1. + <_>9 15 9 5 2. + 0 + -0.0122058596462011 + 0.0826706662774086 + -0.0653268992900848 + <_> + + <_> + + + + <_>4 12 14 4 -1. + <_>11 12 7 2 2. + <_>4 14 7 2 2. + 0 + -0.0376459695398808 + -0.4822615981101990 + 5.5899759754538536e-003 + <_> + + <_> + + + + <_>2 12 14 4 -1. + <_>2 12 7 2 2. + <_>9 14 7 2 2. + 0 + -1.7758710309863091e-003 + -0.0916063413023949 + 0.0583803616464138 + <_> + + <_> + + + + <_>4 3 14 3 -1. + <_>4 3 7 3 2. + 0 + -0.0111162997782230 + 0.1471060961484909 + -0.0292559992522001 + <_> + + <_> + + + + <_>0 2 10 3 -1. + <_>5 2 5 3 2. + 0 + 4.3831788934767246e-004 + -0.1049474999308586 + 0.0444458909332752 + <_> + + <_> + + + + <_>3 0 15 8 -1. + <_>8 0 5 8 3. + 0 + -0.0986952111124992 + 0.2652114927768707 + -9.5453672111034393e-003 + <_> + + <_> + + + + <_>2 5 16 2 -1. + <_>10 5 8 2 2. + 0 + 0.0117361200973392 + 0.0289686806499958 + -0.1535501033067703 + <_> + + <_> + + + + <_>6 0 8 9 -1. + <_>6 0 4 9 2. + 0 + -0.0366011410951614 + 0.2406360954046249 + -0.0225255992263556 + <_> + + <_> + + + + <_>3 2 10 6 -1. + <_>3 2 5 3 2. + <_>8 5 5 3 2. + 0 + -0.0523712895810604 + -0.4900667071342468 + 0.0103195598348975 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -3.1134579330682755e-003 + 0.0622871294617653 + -0.0452340394258499 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 1.0345289483666420e-003 + -0.0565487295389175 + 0.1197013035416603 + <_> + + <_> + + + + <_>14 1 3 13 -1. + <_>15 1 1 13 3. + 0 + -2.3240610025823116e-003 + -0.0952652469277382 + 0.0324024781584740 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0274589806795120 + 0.2954815924167633 + -0.0160165093839169 + <_> + + <_> + + + + <_>11 10 8 6 -1. + <_>11 12 8 2 3. + 0 + -9.3150883913040161e-003 + -0.1146584972739220 + 0.0281716808676720 + <_> + + <_> + + + + <_>1 10 8 6 -1. + <_>1 12 8 2 3. + 0 + 7.6356199570000172e-003 + 0.0292644798755646 + -0.1616635024547577 + <_> + + <_> + + + + <_>3 3 14 3 -1. + <_>3 4 14 1 3. + 0 + 0.0161075908690691 + -0.0309233497828245 + 0.1667739003896713 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 0.0614607892930508 + 8.1282109022140503e-003 + -0.5483344793319702 + <_> + + <_> + + + + <_>4 2 15 9 -1. + <_>4 5 15 3 3. + 0 + 0.0433773212134838 + -7.7782347798347473e-003 + 0.3557837009429932 + <_> + + <_> + + + + <_>0 1 13 3 -1. + <_>0 2 13 1 3. + 0 + -0.0158094801008701 + -0.3123717904090881 + 0.0149107603356242 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + -0.0432630293071270 + 0.4739317893981934 + -9.4731850549578667e-003 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 1.0775650152936578e-003 + -0.1089264005422592 + 0.0507807582616806 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + -6.8012787960469723e-003 + -0.0938413068652153 + 0.0385557301342487 + <_> + + <_> + + + + <_>0 12 8 6 -1. + <_>0 14 8 2 3. + 0 + -3.8845991366542876e-004 + 0.0640718713402748 + -0.0935772135853767 + <_> + + <_> + + + + <_>6 16 14 4 -1. + <_>13 16 7 2 2. + <_>6 18 7 2 2. + 0 + 3.8177249953150749e-003 + -0.0475907400250435 + 0.0719976723194122 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -3.1246189028024673e-003 + 0.1526986956596375 + -0.0487896502017975 + <_> + + <_> + + + + <_>4 16 16 4 -1. + <_>12 16 8 2 2. + <_>4 18 8 2 2. + 0 + 0.0609805099666119 + 8.0068446695804596e-003 + -0.6760275959968567 + <_> + + <_> + + + + <_>0 16 16 4 -1. + <_>0 16 8 2 2. + <_>8 18 8 2 2. + 0 + 2.1819709800183773e-003 + -0.0684917494654655 + 0.0758635774254799 + <_> + + <_> + + + + <_>8 4 6 5 -1. + <_>8 4 3 5 2. + 0 + 2.4469599593430758e-003 + -0.0743712931871414 + 0.0320118591189384 + <_> + + <_> + + + + <_>6 4 6 5 -1. + <_>9 4 3 5 2. + 0 + 1.4674840494990349e-003 + -0.1191250979900360 + 0.0466677397489548 + <_> + + <_> + + + + <_>8 7 4 8 -1. + <_>8 11 4 4 2. + 0 + -2.1786419674754143e-003 + -0.0653242766857147 + 0.0763552784919739 + <_> + + <_> + + + + <_>4 6 10 12 -1. + <_>4 12 10 6 2. + 0 + -2.8284740983508527e-004 + 0.0582924000918865 + -0.0878471881151199 + <_> + + <_> + + + + <_>1 5 18 12 -1. + <_>1 9 18 4 3. + 0 + 0.0147231100127101 + 0.1982049047946930 + -0.0249629803001881 + <_> + + <_> + + + + <_>4 6 9 4 -1. + <_>4 8 9 2 2. + 0 + 4.6598021872341633e-003 + -0.0937327370047569 + 0.0541978403925896 + <_> + + <_> + + + + <_>1 5 19 3 -1. + <_>1 6 19 1 3. + 0 + -0.0603169910609722 + -0.6295881271362305 + 6.8706739693880081e-003 + <_> + + <_> + + + + <_>2 3 12 14 -1. + <_>2 3 6 7 2. + <_>8 10 6 7 2. + 0 + -3.6654649302363396e-003 + 0.0361301898956299 + -0.1281609982252121 + <_> + + <_> + + + + <_>13 0 3 16 -1. + <_>13 8 3 8 2. + 0 + 0.0148754799738526 + -0.0243139099329710 + 0.0466574095189571 + <_> + + <_> + + + + <_>4 0 3 16 -1. + <_>4 8 3 8 2. + 0 + 0.1184287965297699 + 0.0104761300608516 + -0.5178639292716980 + <_> + + <_> + + + + <_>4 0 12 14 -1. + <_>8 0 4 14 3. + 0 + 0.1980919986963272 + 0.0101578002795577 + -0.4187220931053162 + <_> + + <_> + + + + <_>0 10 10 6 -1. + <_>0 10 5 3 2. + <_>5 13 5 3 2. + 0 + -0.1016753017902374 + -0.8512129187583923 + 4.4935508631169796e-003 + <_> + + <_> + + + + <_>7 4 13 3 -1. + <_>7 5 13 1 3. + 0 + -0.0303252004086971 + -0.3180339038372040 + 6.4301840029656887e-003 + <_> + + <_> + + + + <_>2 5 6 10 -1. + <_>5 5 3 10 2. + 0 + 0.0345318503677845 + -0.0125614302232862 + 0.3477819859981537 + <_> + + <_> + + + + <_>11 6 8 14 -1. + <_>15 6 4 7 2. + <_>11 13 4 7 2. + 0 + -0.0351333804428577 + 0.1147503033280373 + -0.0175271499902010 + <_> + + <_> + + + + <_>3 1 3 13 -1. + <_>4 1 1 13 3. + 0 + 5.3501729853451252e-003 + 0.0352634191513062 + -0.1386768072843552 + <_> + + <_> + + + + <_>11 6 8 14 -1. + <_>15 6 4 7 2. + <_>11 13 4 7 2. + 0 + 0.0312092993408442 + -0.0209251008927822 + 0.1474861055612564 + <_> + + <_> + + + + <_>3 1 3 13 -1. + <_>4 1 1 13 3. + 0 + -5.5827602045610547e-004 + -0.0955442413687706 + 0.0562348999083042 + <_> + + <_> + + + + <_>9 5 10 9 -1. + <_>9 5 5 9 2. + 0 + -0.2159986048936844 + 0.5971019864082336 + -3.9994427934288979e-003 + <_> + + <_> + + + + <_>1 6 8 14 -1. + <_>1 6 4 7 2. + <_>5 13 4 7 2. + 0 + 0.0770182013511658 + -0.0121823698282242 + 0.3599503934383392 + <_> + + <_> + + + + <_>11 13 9 6 -1. + <_>11 15 9 2 3. + 0 + -0.0258083492517471 + -0.1999460011720657 + 0.0165620408952236 + <_> + + <_> + + + + <_>0 13 9 6 -1. + <_>0 15 9 2 3. + 0 + 4.0148189291357994e-003 + 0.0388748608529568 + -0.1177598983049393 + <_> + + <_> + + + + <_>12 11 8 9 -1. + <_>12 14 8 3 3. + 0 + 7.4287859206378926e-006 + 0.0314054600894451 + -0.0491425096988678 + <_> + + <_> + + + + <_>2 11 15 9 -1. + <_>2 14 15 3 3. + 0 + -2.8249230235815048e-003 + -0.0558891184628010 + 0.1179113015532494 + <_> + + <_> + + + + <_>2 16 18 4 -1. + <_>8 16 6 4 3. + 0 + -0.0227131303399801 + 0.1073333993554115 + -0.0416476801037788 + <_> + + <_> + + + + <_>1 9 18 3 -1. + <_>7 9 6 3 3. + 0 + -0.0100521696731448 + -0.1410229057073593 + 0.0377072691917419 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>14 0 3 10 2. + 0 + -0.2102396935224533 + -0.6318464279174805 + 3.6316630430519581e-003 + <_> + + <_> + + + + <_>0 0 6 10 -1. + <_>3 0 3 10 2. + 0 + -0.0118127102032304 + 0.1212301030755043 + -0.0503737889230251 + <_> + + <_> + + + + <_>13 1 4 16 -1. + <_>15 1 2 8 2. + <_>13 9 2 8 2. + 0 + 6.3666589558124542e-003 + 0.0301988497376442 + -0.0959202572703362 + <_> + + <_> + + + + <_>1 9 6 11 -1. + <_>3 9 2 11 3. + 0 + -0.1214641034603119 + -0.6869606971740723 + 6.8671889603137970e-003 + <_> + + <_> + + + + <_>6 12 13 3 -1. + <_>6 13 13 1 3. + 0 + 0.0235683005303144 + -0.0103768697008491 + 0.2633312046527863 + <_> + + <_> + + + + <_>0 0 12 10 -1. + <_>0 0 6 5 2. + <_>6 5 6 5 2. + 0 + -4.9841329455375671e-003 + 0.0523144491016865 + -0.0865979194641113 + <_> + + <_> + + + + <_>4 5 13 3 -1. + <_>4 6 13 1 3. + 0 + 1.4171230141073465e-003 + -0.0414451882243156 + 0.0933327674865723 + <_> + + <_> + + + + <_>0 4 7 6 -1. + <_>0 6 7 2 3. + 0 + 1.6522710211575031e-003 + 0.0272923391312361 + -0.1719374060630798 + <_> + + <_> + + + + <_>13 6 4 8 -1. + <_>13 10 4 4 2. + 0 + -0.0421914681792259 + 0.7758833765983582 + -2.4552440736442804e-003 + <_> + + <_> + + + + <_>3 6 4 8 -1. + <_>3 10 4 4 2. + 0 + -1.5193390427157283e-003 + 0.2329716980457306 + -0.0194999203085899 + <_> + + <_> + + + + <_>15 8 5 6 -1. + <_>15 11 5 3 2. + 0 + -5.9203859418630600e-003 + -0.0834959298372269 + 0.0197560004889965 + <_> + + <_> + + + + <_>0 4 13 3 -1. + <_>0 5 13 1 3. + 0 + 6.4658280462026596e-003 + -0.0406683012843132 + 0.1223602965474129 + <_> + + <_> + + + + <_>9 8 10 6 -1. + <_>14 8 5 3 2. + <_>9 11 5 3 2. + 0 + -0.0481106713414192 + -0.3162949979305267 + 0.0126943401992321 + <_> + + <_> + + + + <_>1 8 10 6 -1. + <_>1 8 5 3 2. + <_>6 11 5 3 2. + 0 + 5.0246939063072205e-003 + 0.0313569009304047 + -0.1919033974409103 + <_> + + <_> + + + + <_>5 5 15 6 -1. + <_>5 8 15 3 2. + 0 + 0.1115801036357880 + -0.0140738897025585 + 0.1784895956516266 + <_> + + <_> + + + + <_>2 8 14 2 -1. + <_>9 8 7 2 2. + 0 + -0.0646658763289452 + -0.5623084902763367 + 8.2082729786634445e-003 + <_> + + <_> + + + + <_>9 1 6 7 -1. + <_>9 1 3 7 2. + 0 + -0.0579424686729908 + 0.7734174728393555 + -4.3547940440475941e-003 + <_> + + <_> + + + + <_>5 1 6 7 -1. + <_>8 1 3 7 2. + 0 + -8.1669846549630165e-003 + 0.2101934999227524 + -0.0208022203296423 + <_> + + <_> + + + + <_>0 6 20 6 -1. + <_>0 9 20 3 2. + 0 + 0.0285068396478891 + 0.0814131274819374 + -0.0626635104417801 + <_> + + <_> + + + + <_>2 8 15 2 -1. + <_>2 9 15 1 2. + 0 + 2.4857679381966591e-003 + -0.1563597023487091 + 0.0352108590304852 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + 0.0197989493608475 + 0.0113537395372987 + -0.1653116047382355 + <_> + + <_> + + + + <_>0 2 15 6 -1. + <_>0 4 15 2 3. + 0 + -0.0270279198884964 + 0.2891221940517426 + -0.0167530700564384 + <_> + + <_> + + + + <_>5 2 15 2 -1. + <_>5 3 15 1 2. + 0 + -6.9706928916275501e-003 + -0.2576938867568970 + 0.0163550209254026 + <_> + + <_> + + + + <_>5 9 7 4 -1. + <_>5 11 7 2 2. + 0 + 1.1425119591876864e-003 + -0.0410568006336689 + 0.1158090010285378 + <_> + + <_> + + + + <_>13 9 4 8 -1. + <_>13 13 4 4 2. + 0 + -1.3041249476373196e-003 + 0.0510829798877239 + -0.1172436997294426 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + 3.7698419764637947e-003 + 0.0585573315620422 + -0.0828401073813438 + <_> + + <_> + + + + <_>12 11 5 6 -1. + <_>12 14 5 3 2. + 0 + -0.0486898683011532 + -0.3876915872097015 + 8.6165666580200195e-003 + <_> + + <_> + + + + <_>3 3 14 9 -1. + <_>3 6 14 3 3. + 0 + -0.1147174015641213 + 0.1344410032033920 + -0.0428486913442612 + <_> + + <_> + + + + <_>12 11 5 6 -1. + <_>12 14 5 3 2. + 0 + 0.0235035195946693 + 3.8586359005421400e-003 + -0.4361529946327210 + <_> + + <_> + + + + <_>3 11 5 6 -1. + <_>3 14 5 3 2. + 0 + -5.9582752874121070e-004 + 0.0423767305910587 + -0.1216159015893936 + <_> + + <_> + + + + <_>2 9 17 8 -1. + <_>2 13 17 4 2. + 0 + 5.4052029736340046e-003 + -0.0237530004233122 + 0.2013726979494095 + <_> + + <_> + + + + <_>6 8 7 12 -1. + <_>6 12 7 4 3. + 0 + 9.1158300638198853e-003 + 0.0280881691724062 + -0.1966772973537445 + <_> + + <_> + + + + <_>11 0 4 9 -1. + <_>11 0 2 9 2. + 0 + 3.3211729023605585e-003 + -0.0512588992714882 + 0.0479939803481102 + <_> + + <_> + + + + <_>6 2 4 16 -1. + <_>6 2 2 8 2. + <_>8 10 2 8 2. + 0 + 0.0129754999652505 + 0.0118510201573372 + -0.3944402039051056 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -5.0546238198876381e-003 + -0.1095615997910500 + 0.0426627807319164 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -0.0768244788050652 + 0.7626957297325134 + -6.6229291260242462e-003 + <_> + + <_> + + + + <_>10 4 10 6 -1. + <_>15 4 5 3 2. + <_>10 7 5 3 2. + 0 + -1.8690669676288962e-003 + 0.0401126593351364 + -0.0713981986045837 + <_> + + <_> + + + + <_>0 0 18 4 -1. + <_>6 0 6 4 3. + 0 + -6.0407500714063644e-003 + 0.1261429041624069 + -0.0395851507782936 + <_> + + <_> + + + + <_>7 1 9 7 -1. + <_>10 1 3 7 3. + 0 + -0.0450132302939892 + -0.2187144011259079 + 6.5213250927627087e-003 + <_> + + <_> + + + + <_>4 1 9 7 -1. + <_>7 1 3 7 3. + 0 + 3.8492688909173012e-003 + -0.0922133028507233 + 0.0669251829385757 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>9 0 1 13 2. + 0 + -4.3247821740806103e-003 + 0.1497375071048737 + -0.0311235599219799 + <_> + + <_> + + + + <_>1 1 12 17 -1. + <_>5 1 4 17 3. + 0 + -0.0267768409103155 + -0.1143222972750664 + 0.0530902594327927 + <_> + + <_> + + + + <_>9 1 6 12 -1. + <_>12 1 3 6 2. + <_>9 7 3 6 2. + 0 + 2.0645130425691605e-003 + -0.0384834185242653 + 0.0715077668428421 + <_> + + <_> + + + + <_>2 5 9 15 -1. + <_>5 5 3 15 3. + 0 + 0.0572065189480782 + 0.0124631403014064 + -0.3988445997238159 + <_> + + <_> + + + + <_>4 0 16 4 -1. + <_>12 0 8 2 2. + <_>4 2 8 2 2. + 0 + 7.7696829102933407e-003 + -0.0243099592626095 + 0.0611208416521549 + <_> + + <_> + + + + <_>0 0 16 4 -1. + <_>0 0 8 2 2. + <_>8 2 8 2 2. + 0 + 2.8191099409013987e-003 + 0.0622438713908196 + -0.0797742828726768 + <_> + + <_> + + + + <_>10 4 10 6 -1. + <_>15 4 5 3 2. + <_>10 7 5 3 2. + 0 + -0.0517471097409725 + -0.2047557979822159 + 9.8433922976255417e-003 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 4.2840079404413700e-003 + -0.0367991290986538 + 0.1238069981336594 + <_> + + <_> + + + + <_>5 13 13 2 -1. + <_>5 14 13 1 2. + 0 + -8.0563372466713190e-004 + -0.0537424907088280 + 0.0687464326620102 + <_> + + <_> + + + + <_>0 4 10 6 -1. + <_>0 4 5 3 2. + <_>5 7 5 3 2. + 0 + 0.0460624508559704 + 7.3871058411896229e-003 + -0.6113321185112000 + <_> + + <_> + + + + <_>8 11 12 5 -1. + <_>12 11 4 5 3. + 0 + 0.0668072700500488 + -0.0125453099608421 + 0.1573168933391571 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 2.0568699110299349e-003 + 0.0430873893201351 + -0.1106270030140877 + <_> + + <_> + + + + <_>11 13 7 6 -1. + <_>11 15 7 2 3. + 0 + 2.8760819695889950e-003 + 0.0258009806275368 + -0.0846978574991226 + <_> + + <_> + + + + <_>1 14 18 6 -1. + <_>1 17 18 3 2. + 0 + -4.9642049707472324e-003 + 0.0831687226891518 + -0.0567508600652218 + -1.2330470085144043 + 42 + -1 + <_> + + + <_> + + <_> + + + + <_>3 1 14 6 -1. + <_>3 3 14 2 3. + 0 + 0.0151668498292565 + -0.1750102937221527 + 0.1516530066728592 + <_> + + <_> + + + + <_>12 0 6 6 -1. + <_>12 0 3 6 2. + 0 + 4.1852002032101154e-003 + -0.1825325936079025 + 0.1054553017020226 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + -2.6159440167248249e-003 + -0.2151761054992676 + 0.0774602591991425 + <_> + + <_> + + + + <_>5 7 12 5 -1. + <_>9 7 4 5 3. + 0 + 2.7645078953355551e-003 + -0.1150690987706184 + 0.0677712634205818 + <_> + + <_> + + + + <_>5 10 4 8 -1. + <_>5 14 4 4 2. + 0 + -2.7296729967929423e-004 + 0.0557126514613628 + -0.2872366905212402 + <_> + + <_> + + + + <_>13 0 4 14 -1. + <_>15 0 2 7 2. + <_>13 7 2 7 2. + 0 + 2.4992981343530118e-004 + 0.0552024990320206 + -0.1519149988889694 + <_> + + <_> + + + + <_>2 0 9 5 -1. + <_>5 0 3 5 3. + 0 + 1.3287579640746117e-003 + -0.1256757974624634 + 0.0940948277711868 + <_> + + <_> + + + + <_>9 2 6 16 -1. + <_>12 2 3 8 2. + <_>9 10 3 8 2. + 0 + -2.4653770960867405e-003 + 0.0493935905396938 + -0.2223927974700928 + <_> + + <_> + + + + <_>6 5 2 14 -1. + <_>6 12 2 7 2. + 0 + -3.2979049719870090e-003 + -0.1736799031496048 + 0.0693910717964172 + <_> + + <_> + + + + <_>15 4 4 16 -1. + <_>17 4 2 8 2. + <_>15 12 2 8 2. + 0 + -0.0496678091585636 + 0.3285422027111054 + -0.0330672189593315 + <_> + + <_> + + + + <_>5 1 10 8 -1. + <_>5 1 5 4 2. + <_>10 5 5 4 2. + 0 + 5.7844468392431736e-003 + 0.0612895190715790 + -0.1687342971563339 + <_> + + <_> + + + + <_>11 7 7 6 -1. + <_>11 9 7 2 3. + 0 + 2.9754149727523327e-003 + -0.2401700019836426 + 0.0579064786434174 + <_> + + <_> + + + + <_>1 2 14 3 -1. + <_>1 3 14 1 3. + 0 + 2.3769649851601571e-004 + 0.1114102005958557 + -0.0865080207586288 + <_> + + <_> + + + + <_>13 5 4 8 -1. + <_>13 9 4 4 2. + 0 + 5.4410300217568874e-003 + -0.0892577022314072 + 0.0284929797053337 + <_> + + <_> + + + + <_>2 10 7 6 -1. + <_>2 12 7 2 3. + 0 + 2.5746610481292009e-003 + 0.0603835806250572 + -0.1477154046297073 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + -0.0121554397046566 + 0.1802673041820526 + -0.0357449613511562 + <_> + + <_> + + + + <_>0 5 18 8 -1. + <_>0 5 9 4 2. + <_>9 9 9 4 2. + 0 + 5.5069979280233383e-003 + 0.0614534690976143 + -0.1614727973937988 + <_> + + <_> + + + + <_>13 5 4 14 -1. + <_>15 5 2 7 2. + <_>13 12 2 7 2. + 0 + -3.0918378615751863e-004 + -0.0912956893444061 + 0.0681119635701180 + <_> + + <_> + + + + <_>0 0 4 13 -1. + <_>2 0 2 13 2. + 0 + -0.0777052715420723 + 0.3334448039531708 + -0.0267951693385839 + <_> + + <_> + + + + <_>13 5 4 14 -1. + <_>15 5 2 7 2. + <_>13 12 2 7 2. + 0 + 0.0458748787641525 + 6.2387371435761452e-003 + -0.2273890972137451 + <_> + + <_> + + + + <_>3 5 4 14 -1. + <_>3 5 2 7 2. + <_>5 12 2 7 2. + 0 + 3.1658360967412591e-004 + -0.1129792034626007 + 0.0986025705933571 + <_> + + <_> + + + + <_>11 12 7 6 -1. + <_>11 14 7 2 3. + 0 + -0.0529627688229084 + -0.6011739969253540 + 0.0100044896826148 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + 5.3028380498290062e-003 + 0.0361643992364407 + -0.2635985910892487 + <_> + + <_> + + + + <_>13 4 6 16 -1. + <_>16 4 3 8 2. + <_>13 12 3 8 2. + 0 + -0.0234735906124115 + 0.1066351979970932 + -0.0306539908051491 + <_> + + <_> + + + + <_>0 9 10 6 -1. + <_>0 9 5 3 2. + <_>5 12 5 3 2. + 0 + -1.5029460191726685e-003 + 0.0628828406333923 + -0.1228535026311874 + <_> + + <_> + + + + <_>9 5 3 15 -1. + <_>9 10 3 5 3. + 0 + -0.0122326500713825 + -0.2304708063602448 + 0.0400487892329693 + <_> + + <_> + + + + <_>8 2 4 10 -1. + <_>10 2 2 10 2. + 0 + -0.0474282689392567 + 0.4413514137268066 + -0.0188735798001289 + <_> + + <_> + + + + <_>13 4 6 16 -1. + <_>16 4 3 8 2. + <_>13 12 3 8 2. + 0 + 0.0363792516291142 + -0.0130203804001212 + 0.1468573063611984 + <_> + + <_> + + + + <_>1 8 18 5 -1. + <_>7 8 6 5 3. + 0 + 0.0363435111939907 + 0.0387880392372608 + -0.1990313977003098 + <_> + + <_> + + + + <_>13 4 6 16 -1. + <_>16 4 3 8 2. + <_>13 12 3 8 2. + 0 + -0.1079292967915535 + 0.1617752015590668 + -6.3546439632773399e-003 + <_> + + <_> + + + + <_>1 4 6 16 -1. + <_>1 4 3 8 2. + <_>4 12 3 8 2. + 0 + -0.0954797416925430 + 0.3732065856456757 + -0.0239402893930674 + <_> + + <_> + + + + <_>2 15 18 4 -1. + <_>11 15 9 2 2. + <_>2 17 9 2 2. + 0 + 0.0389542989432812 + 0.0112397996708751 + -0.3479448854923248 + <_> + + <_> + + + + <_>7 3 2 16 -1. + <_>7 11 2 8 2. + 0 + -0.0326462090015411 + -0.3179763853549957 + 0.0217801891267300 + <_> + + <_> + + + + <_>0 4 20 4 -1. + <_>0 6 20 2 2. + 0 + -2.5872089900076389e-003 + 0.0472686104476452 + -0.1562477946281433 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + 0.0129792001098394 + -0.0243940707296133 + 0.3034175038337708 + <_> + + <_> + + + + <_>14 1 6 17 -1. + <_>14 1 3 17 2. + 0 + -0.0174905005842447 + 0.1196710020303726 + -0.0348252095282078 + <_> + + <_> + + + + <_>2 9 7 6 -1. + <_>2 11 7 2 3. + 0 + 8.2290060818195343e-003 + 0.0517062991857529 + -0.1412431001663208 + <_> + + <_> + + + + <_>11 0 6 16 -1. + <_>14 0 3 8 2. + <_>11 8 3 8 2. + 0 + 8.7701035663485527e-003 + 0.0121396295726299 + -0.0934101864695549 + <_> + + <_> + + + + <_>1 14 13 3 -1. + <_>1 15 13 1 3. + 0 + -2.5523800868541002e-003 + 0.0918820798397064 + -0.0796939432621002 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 1.2640489730983973e-003 + -0.0428683310747147 + 0.0984691604971886 + <_> + + <_> + + + + <_>3 0 6 16 -1. + <_>3 0 3 8 2. + <_>6 8 3 8 2. + 0 + -3.8762169424444437e-003 + 0.0644778907299042 + -0.1142697036266327 + <_> + + <_> + + + + <_>10 12 10 3 -1. + <_>10 12 5 3 2. + 0 + 1.5416350215673447e-003 + -0.0382401682436466 + 0.0508807897567749 + <_> + + <_> + + + + <_>3 7 12 5 -1. + <_>7 7 4 5 3. + 0 + 7.6829752651974559e-004 + -0.1286921948194504 + 0.0581613704562187 + <_> + + <_> + + + + <_>1 0 18 6 -1. + <_>7 0 6 6 3. + 0 + 1.6587260179221630e-003 + 0.1639191955327988 + -0.0471649989485741 + <_> + + <_> + + + + <_>0 12 10 3 -1. + <_>5 12 5 3 2. + 0 + 1.6514799790456891e-003 + -0.0592217184603214 + 0.1316508054733276 + <_> + + <_> + + + + <_>4 10 12 4 -1. + <_>8 10 4 4 3. + 0 + -3.8682940066792071e-004 + 0.0644935816526413 + -0.1072873994708061 + <_> + + <_> + + + + <_>5 5 6 8 -1. + <_>7 5 2 8 3. + 0 + -3.4595469478517771e-003 + 0.0807432010769844 + -0.0925685912370682 + <_> + + <_> + + + + <_>11 9 9 6 -1. + <_>11 11 9 2 3. + 0 + 0.0351306609809399 + 0.0155206201598048 + -0.1973257958889008 + <_> + + <_> + + + + <_>4 7 7 9 -1. + <_>4 10 7 3 3. + 0 + 0.1202535033226013 + -0.0204970296472311 + 0.4090565145015717 + <_> + + <_> + + + + <_>5 14 10 6 -1. + <_>5 16 10 2 3. + 0 + 7.8581331763416529e-004 + -0.0948587879538536 + 0.0693166404962540 + <_> + + <_> + + + + <_>0 14 19 4 -1. + <_>0 16 19 2 2. + 0 + 6.1606317758560181e-003 + 0.0605566687881947 + -0.1243650987744331 + <_> + + <_> + + + + <_>6 9 12 8 -1. + <_>12 9 6 4 2. + <_>6 13 6 4 2. + 0 + 0.0133515596389771 + 0.0176349692046642 + -0.1464945971965790 + <_> + + <_> + + + + <_>1 1 3 14 -1. + <_>2 1 1 14 3. + 0 + 0.0198736395686865 + -0.0244497992098331 + 0.2732233107089996 + <_> + + <_> + + + + <_>6 9 12 8 -1. + <_>12 9 6 4 2. + <_>6 13 6 4 2. + 0 + -2.3918889928609133e-003 + -0.0407449007034302 + 0.0499253198504448 + <_> + + <_> + + + + <_>2 9 12 8 -1. + <_>2 9 6 4 2. + <_>8 13 6 4 2. + 0 + 8.6433859542012215e-003 + 0.0289679504930973 + -0.2366106957197189 + <_> + + <_> + + + + <_>18 2 2 18 -1. + <_>18 2 1 18 2. + 0 + -8.8321920484304428e-003 + 0.1205402985215187 + -0.0277029909193516 + <_> + + <_> + + + + <_>6 5 6 8 -1. + <_>8 5 2 8 3. + 0 + -0.0441504791378975 + 0.5003805160522461 + -0.0122511303052306 + <_> + + <_> + + + + <_>10 3 4 12 -1. + <_>10 3 2 12 2. + 0 + -4.0243011899292469e-003 + -0.1950252950191498 + 0.0251930095255375 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + 9.8465122282505035e-003 + -0.0602838695049286 + 0.1266546994447708 + <_> + + <_> + + + + <_>9 8 6 12 -1. + <_>12 8 3 6 2. + <_>9 14 3 6 2. + 0 + -2.7608149684965611e-003 + -0.0839265286922455 + 0.0601026490330696 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + 0.0390768311917782 + 0.0153276501223445 + -0.4319779872894287 + <_> + + <_> + + + + <_>18 2 2 18 -1. + <_>18 2 1 18 2. + 0 + 3.8136269431561232e-003 + -0.0312810912728310 + 0.0779421180486679 + <_> + + <_> + + + + <_>1 5 17 6 -1. + <_>1 7 17 2 3. + 0 + 2.7646059170365334e-003 + 0.0173348393291235 + -0.3473272025585175 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + -3.6096980329602957e-003 + -0.0822867080569267 + 0.0281708799302578 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 2 12 2 3. + 0 + 3.5445080138742924e-003 + -0.1055762022733688 + 0.0600509196519852 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + 0.0129859000444412 + 0.0185979902744293 + -0.0949878022074699 + <_> + + <_> + + + + <_>3 0 14 6 -1. + <_>3 2 14 2 3. + 0 + -0.0200275406241417 + 0.2600725889205933 + -0.0270791593939066 + <_> + + <_> + + + + <_>15 3 5 6 -1. + <_>15 6 5 3 2. + 0 + -0.0729665979743004 + -0.7684810757637024 + 2.3947900626808405e-003 + <_> + + <_> + + + + <_>0 3 5 6 -1. + <_>0 6 5 3 2. + 0 + -2.1148719824850559e-003 + -0.1076332032680512 + 0.0523613914847374 + <_> + + <_> + + + + <_>4 1 14 10 -1. + <_>4 6 14 5 2. + 0 + -0.0776671469211578 + 0.1782232969999313 + -0.0314632989466190 + <_> + + <_> + + + + <_>0 1 7 4 -1. + <_>0 3 7 2 2. + 0 + -4.6600410714745522e-003 + -0.2038647979497910 + 0.0390254110097885 + <_> + + <_> + + + + <_>13 1 7 4 -1. + <_>13 3 7 2 2. + 0 + 0.0170594993978739 + 0.0189547408372164 + -0.1726024001836777 + <_> + + <_> + + + + <_>1 4 10 9 -1. + <_>6 4 5 9 2. + 0 + 0.0431746914982796 + -0.0316856093704700 + 0.2334644943475723 + <_> + + <_> + + + + <_>10 1 10 19 -1. + <_>10 1 5 19 2. + 0 + -0.4892792999744415 + -0.7104313969612122 + 4.6672620810568333e-003 + <_> + + <_> + + + + <_>0 1 10 19 -1. + <_>5 1 5 19 2. + 0 + 0.0914955064654350 + 0.0160276293754578 + -0.4053801894187927 + <_> + + <_> + + + + <_>13 5 4 12 -1. + <_>13 9 4 4 3. + 0 + -0.0468432493507862 + 0.6935886144638062 + -2.0055349450558424e-003 + <_> + + <_> + + + + <_>3 5 4 12 -1. + <_>3 9 4 4 3. + 0 + 6.0863760299980640e-003 + -0.1521815955638886 + 0.0404083095490932 + <_> + + <_> + + + + <_>2 0 18 4 -1. + <_>11 0 9 2 2. + <_>2 2 9 2 2. + 0 + 0.0436766110360622 + 0.0122571596875787 + -0.2599659860134125 + <_> + + <_> + + + + <_>6 8 6 5 -1. + <_>9 8 3 5 2. + 0 + -0.0495805293321610 + 0.6757134795188904 + -8.0354865640401840e-003 + <_> + + <_> + + + + <_>6 5 12 8 -1. + <_>12 5 6 4 2. + <_>6 9 6 4 2. + 0 + -2.8614638722501695e-004 + 0.0345487706363201 + -0.0618491806089878 + <_> + + <_> + + + + <_>2 5 12 8 -1. + <_>2 5 6 4 2. + <_>8 9 6 4 2. + 0 + -0.0118631999939680 + -0.1206132993102074 + 0.0514165796339512 + <_> + + <_> + + + + <_>5 4 13 3 -1. + <_>5 5 13 1 3. + 0 + 0.0147540103644133 + -0.0246380493044853 + 0.1523413956165314 + <_> + + <_> + + + + <_>2 4 13 3 -1. + <_>2 5 13 1 3. + 0 + -5.1772277802228928e-003 + 0.1842893064022064 + -0.0422003194689751 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + -0.0200335308909416 + -0.2098641991615295 + 0.0230167806148529 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 4.1349478997290134e-003 + 0.0385001115500927 + -0.1540091931819916 + <_> + + <_> + + + + <_>7 12 13 2 -1. + <_>7 13 13 1 2. + 0 + 4.9832498189061880e-004 + -0.0568344704806805 + 0.1173754036426544 + <_> + + <_> + + + + <_>2 4 15 3 -1. + <_>2 5 15 1 3. + 0 + 1.5235079918056726e-003 + -0.0823057517409325 + 0.0733407586812973 + <_> + + <_> + + + + <_>1 14 18 4 -1. + <_>10 14 9 2 2. + <_>1 16 9 2 2. + 0 + 0.0266690608114004 + 0.0171319209039211 + -0.3333728015422821 + <_> + + <_> + + + + <_>5 8 6 10 -1. + <_>5 8 3 5 2. + <_>8 13 3 5 2. + 0 + -0.0251928996294737 + 0.1834809035062790 + -0.0352759994566441 + <_> + + <_> + + + + <_>12 4 3 10 -1. + <_>12 9 3 5 2. + 0 + 1.1769080301746726e-003 + -0.1319703012704849 + 0.0242424197494984 + <_> + + <_> + + + + <_>2 0 14 3 -1. + <_>2 1 14 1 3. + 0 + -6.6034111659973860e-004 + -0.1072555035352707 + 0.0586052685976028 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 0.0433866195380688 + -0.0164984092116356 + 0.3929358124732971 + <_> + + <_> + + + + <_>0 1 15 3 -1. + <_>0 2 15 1 3. + 0 + -0.0114902900531888 + -0.2633295059204102 + 0.0242405906319618 + <_> + + <_> + + + + <_>2 1 16 4 -1. + <_>2 3 16 2 2. + 0 + 0.0859336927533150 + -0.0162797607481480 + 0.4172945022583008 + <_> + + <_> + + + + <_>0 1 5 9 -1. + <_>0 4 5 3 3. + 0 + 2.0756269805133343e-003 + 0.0525438897311687 + -0.1057431027293205 + <_> + + <_> + + + + <_>3 5 15 3 -1. + <_>3 6 15 1 3. + 0 + 1.4016899513080716e-003 + -0.0465945415198803 + 0.1135535985231400 + <_> + + <_> + + + + <_>1 5 10 6 -1. + <_>1 5 5 3 2. + <_>6 8 5 3 2. + 0 + -3.4351870417594910e-003 + -0.1080633029341698 + 0.0587785318493843 + <_> + + <_> + + + + <_>9 2 3 12 -1. + <_>9 8 3 6 2. + 0 + -1.8299809889867902e-003 + 0.0606455989181995 + -0.0660843998193741 + <_> + + <_> + + + + <_>0 2 19 2 -1. + <_>0 3 19 1 2. + 0 + -3.4186599077656865e-004 + -0.1268256008625031 + 0.0492446683347225 + <_> + + <_> + + + + <_>16 0 4 10 -1. + <_>16 0 2 10 2. + 0 + 0.0106162903830409 + -0.0556194707751274 + 0.1227082982659340 + <_> + + <_> + + + + <_>1 8 13 3 -1. + <_>1 9 13 1 3. + 0 + 0.0394907705485821 + 8.2882875576615334e-003 + -0.6619415283203125 + <_> + + <_> + + + + <_>7 0 13 4 -1. + <_>7 2 13 2 2. + 0 + -0.0197460409253836 + 0.1576106995344162 + -9.3961963430047035e-003 + <_> + + <_> + + + + <_>4 4 3 10 -1. + <_>4 9 3 5 2. + 0 + 4.6383799053728580e-004 + -0.2012722045183182 + 0.0267063304781914 + <_> + + <_> + + + + <_>7 9 6 7 -1. + <_>9 9 2 7 3. + 0 + 5.1521410932764411e-004 + -0.0860197171568871 + 0.0671314969658852 + <_> + + <_> + + + + <_>4 3 3 13 -1. + <_>5 3 1 13 3. + 0 + -0.0112835401669145 + -0.2275408953428268 + 0.0222506001591682 + <_> + + <_> + + + + <_>14 10 6 6 -1. + <_>14 10 3 6 2. + 0 + -8.4253363311290741e-003 + 0.1650525927543640 + -0.0504381805658340 + <_> + + <_> + + + + <_>8 0 3 15 -1. + <_>8 5 3 5 3. + 0 + 0.0306045692414045 + 0.0275005400180817 + -0.2098412960767746 + <_> + + <_> + + + + <_>12 0 8 8 -1. + <_>16 0 4 4 2. + <_>12 4 4 4 2. + 0 + 5.0000958144664764e-003 + -0.0389117710292339 + 0.1155347004532814 + <_> + + <_> + + + + <_>7 4 6 9 -1. + <_>7 7 6 3 3. + 0 + 0.0416444614529610 + -0.0141642801463604 + 0.4400491118431091 + <_> + + <_> + + + + <_>11 9 9 6 -1. + <_>11 11 9 2 3. + 0 + -3.9140251465141773e-003 + -0.1152814030647278 + 0.0276295207440853 + <_> + + <_> + + + + <_>5 13 9 5 -1. + <_>8 13 3 5 3. + 0 + -2.2060431074351072e-003 + 0.0747944936156273 + -0.0759503915905952 + <_> + + <_> + + + + <_>9 9 6 10 -1. + <_>12 9 3 5 2. + <_>9 14 3 5 2. + 0 + -0.0740605071187019 + -0.6090257167816162 + 3.8528270088136196e-003 + <_> + + <_> + + + + <_>5 9 6 10 -1. + <_>5 9 3 5 2. + <_>8 14 3 5 2. + 0 + 1.5966329956427217e-003 + -0.0700151994824409 + 0.1101925969123840 + <_> + + <_> + + + + <_>13 10 6 10 -1. + <_>16 10 3 5 2. + <_>13 15 3 5 2. + 0 + 2.0102860871702433e-003 + -0.0318591818213463 + 0.0715927407145500 + <_> + + <_> + + + + <_>1 10 6 10 -1. + <_>1 10 3 5 2. + <_>4 15 3 5 2. + 0 + 3.2757699955254793e-003 + -0.0522607602179050 + 0.1265238970518112 + <_> + + <_> + + + + <_>10 3 4 12 -1. + <_>10 3 2 12 2. + 0 + 3.6700100172311068e-003 + 0.0540187209844589 + -0.0465303808450699 + <_> + + <_> + + + + <_>6 3 4 12 -1. + <_>8 3 2 12 2. + 0 + -5.7776779867708683e-003 + -0.2294086068868637 + 0.0247044507414103 + <_> + + <_> + + + + <_>11 1 9 5 -1. + <_>14 1 3 5 3. + 0 + 3.7388929631561041e-003 + -0.0482731312513351 + 0.0767729133367538 + <_> + + <_> + + + + <_>2 9 16 3 -1. + <_>10 9 8 3 2. + 0 + -0.0124045601114631 + 0.1149199977517128 + -0.0493081398308277 + <_> + + <_> + + + + <_>6 2 8 10 -1. + <_>10 2 4 5 2. + <_>6 7 4 5 2. + 0 + 9.0428609400987625e-003 + 0.0430131405591965 + -0.1443942934274674 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>0 0 4 4 2. + <_>4 4 4 4 2. + 0 + 6.1762649565935135e-003 + -0.0393628217279911 + 0.1607349067926407 + <_> + + <_> + + + + <_>12 10 6 10 -1. + <_>14 10 2 10 3. + 0 + 0.0210514403879642 + 0.0246080607175827 + -0.1376848071813583 + <_> + + <_> + + + + <_>0 1 9 5 -1. + <_>3 1 3 5 3. + 0 + 2.7457328978925943e-003 + -0.0632719993591309 + 0.0912694334983826 + <_> + + <_> + + + + <_>16 0 4 17 -1. + <_>16 0 2 17 2. + 0 + -0.0107779596000910 + 0.0912453010678291 + -0.0301109291613102 + <_> + + <_> + + + + <_>2 0 6 20 -1. + <_>4 0 2 20 3. + 0 + 0.0166991893202066 + 0.0435396097600460 + -0.1524014025926590 + <_> + + <_> + + + + <_>16 0 4 17 -1. + <_>16 0 2 17 2. + 0 + 5.4665589705109596e-003 + -0.0535750314593315 + 0.0602662004530430 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -3.2001500949263573e-003 + 0.1422092020511627 + -0.0408233813941479 + <_> + + <_> + + + + <_>6 8 12 4 -1. + <_>10 8 4 4 3. + 0 + 0.0472890585660934 + 0.0158536992967129 + -0.2712359130382538 + <_> + + <_> + + + + <_>8 5 3 14 -1. + <_>8 12 3 7 2. + 0 + -1.3604690320789814e-003 + 0.0406360812485218 + -0.1488569974899292 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + 6.2847061781212687e-004 + 0.0418331585824490 + -0.1239489018917084 + <_> + + <_> + + + + <_>1 3 14 15 -1. + <_>1 8 14 5 3. + 0 + -0.0370360799133778 + -0.3694469034671783 + 0.0136641599237919 + <_> + + <_> + + + + <_>16 0 4 16 -1. + <_>16 0 2 16 2. + 0 + -0.0225785505026579 + 0.1181204989552498 + -0.0229398608207703 + <_> + + <_> + + + + <_>4 6 10 9 -1. + <_>4 9 10 3 3. + 0 + 3.2851321157068014e-003 + 0.3113695085048676 + -0.0188564192503691 + <_> + + <_> + + + + <_>16 0 4 16 -1. + <_>16 0 2 16 2. + 0 + -0.2022536993026733 + -0.6246569752693176 + 3.9239428006112576e-003 + <_> + + <_> + + + + <_>0 0 4 16 -1. + <_>2 0 2 16 2. + 0 + -4.9903858453035355e-003 + 0.1067498996853828 + -0.0600004903972149 + <_> + + <_> + + + + <_>15 9 4 7 -1. + <_>15 9 2 7 2. + 0 + -0.0225394796580076 + -0.1989119052886963 + 0.0188299696892500 + <_> + + <_> + + + + <_>0 0 9 6 -1. + <_>3 0 3 6 3. + 0 + 0.0268784593790770 + -0.0311851892620325 + 0.2084130942821503 + <_> + + <_> + + + + <_>12 11 8 4 -1. + <_>12 13 8 2 2. + 0 + -6.3416860066354275e-003 + -0.0836588665843010 + 0.0406036600470543 + <_> + + <_> + + + + <_>1 9 4 7 -1. + <_>3 9 2 7 2. + 0 + 2.8207020368427038e-003 + -0.0582558587193489 + 0.0972031429409981 + <_> + + <_> + + + + <_>14 10 6 6 -1. + <_>14 10 3 6 2. + 0 + 0.0247399806976318 + -0.0186992399394512 + 0.0998585075139999 + <_> + + <_> + + + + <_>1 7 2 13 -1. + <_>2 7 1 13 2. + 0 + 7.4140671640634537e-003 + 0.0296130198985338 + -0.1917762011289597 + <_> + + <_> + + + + <_>1 2 18 11 -1. + <_>7 2 6 11 3. + 0 + -8.3040986210107803e-003 + 0.1295897960662842 + -0.0426711402833462 + <_> + + <_> + + + + <_>6 2 4 7 -1. + <_>8 2 2 7 2. + 0 + 1.1470559984445572e-003 + -0.1536511927843094 + 0.0410832390189171 + <_> + + <_> + + + + <_>0 6 20 14 -1. + <_>10 6 10 7 2. + <_>0 13 10 7 2. + 0 + -0.1647070050239563 + -0.4143765866756439 + 0.0135092902928591 + <_> + + <_> + + + + <_>0 5 18 15 -1. + <_>6 5 6 15 3. + 0 + 0.2432862073183060 + -0.0124993901699781 + 0.4462372958660126 + <_> + + <_> + + + + <_>16 5 4 15 -1. + <_>16 5 2 15 2. + 0 + 0.0245450790971518 + 0.0222707707434893 + -0.1076686009764671 + <_> + + <_> + + + + <_>5 6 6 7 -1. + <_>7 6 2 7 3. + 0 + -0.0360040217638016 + 0.2149553000926971 + -0.0232983306050301 + <_> + + <_> + + + + <_>6 8 12 4 -1. + <_>10 8 4 4 3. + 0 + 0.0170126799494028 + 0.0285665206611156 + -0.1368986070156097 + <_> + + <_> + + + + <_>5 10 10 6 -1. + <_>5 13 10 3 2. + 0 + -1.7947000451385975e-003 + 0.0260637104511261 + -0.1806043982505798 + <_> + + <_> + + + + <_>3 7 17 12 -1. + <_>3 13 17 6 2. + 0 + -0.3449208140373230 + -0.5910199284553528 + 1.3455889420583844e-003 + <_> + + <_> + + + + <_>0 7 17 12 -1. + <_>0 13 17 6 2. + 0 + -0.0104715498164296 + -0.0643943697214127 + 0.0812442526221275 + <_> + + <_> + + + + <_>2 0 18 19 -1. + <_>8 0 6 19 3. + 0 + 0.0643352195620537 + -0.0508744716644287 + 0.0837525278329849 + <_> + + <_> + + + + <_>7 2 4 7 -1. + <_>9 2 2 7 2. + 0 + 0.0467034503817558 + 8.1825926899909973e-003 + -0.6222047805786133 + <_> + + <_> + + + + <_>9 7 7 8 -1. + <_>9 11 7 4 2. + 0 + 0.0673962906002998 + -4.0585128590464592e-003 + 0.3111543059349060 + <_> + + <_> + + + + <_>0 10 19 2 -1. + <_>0 11 19 1 2. + 0 + -1.8122399342246354e-004 + 0.0635992288589478 + -0.0838707014918327 + <_> + + <_> + + + + <_>11 9 9 6 -1. + <_>11 11 9 2 3. + 0 + -0.0467838905751705 + -0.4374811947345734 + 3.6999220028519630e-003 + <_> + + <_> + + + + <_>0 0 15 3 -1. + <_>5 0 5 3 3. + 0 + 0.1253741979598999 + -7.1869022212922573e-003 + 0.6926767230033875 + <_> + + <_> + + + + <_>18 7 2 13 -1. + <_>18 7 1 13 2. + 0 + 3.5549318999983370e-004 + 0.0358049198985100 + -0.0419990494847298 + <_> + + <_> + + + + <_>0 9 9 6 -1. + <_>0 11 9 2 3. + 0 + -0.0181698706001043 + -0.2646794021129608 + 0.0192748699337244 + <_> + + <_> + + + + <_>9 7 7 8 -1. + <_>9 11 7 4 2. + 0 + 0.0275093708187342 + -9.9343024194240570e-003 + 0.1248172968626022 + <_> + + <_> + + + + <_>4 7 7 8 -1. + <_>4 11 7 4 2. + 0 + -0.0319848395884037 + 0.2569411098957062 + -0.0263920202851295 + <_> + + <_> + + + + <_>3 3 16 2 -1. + <_>3 4 16 1 2. + 0 + -0.0128916501998901 + -0.1883811056613922 + 0.0161357503384352 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 10 4 4 2. + <_>10 14 4 4 2. + 0 + 0.0450090914964676 + 8.4453048184514046e-003 + -0.5792089104652405 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + 3.9589041844010353e-003 + -0.0436723306775093 + 0.1208762973546982 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + 2.7181839104741812e-003 + -0.0407793894410133 + 0.1297443956136704 + <_> + + <_> + + + + <_>6 12 10 6 -1. + <_>6 14 10 2 3. + 0 + -7.5994711369276047e-004 + 0.0329541005194187 + -0.0864193215966225 + <_> + + <_> + + + + <_>0 13 7 6 -1. + <_>0 15 7 2 3. + 0 + 6.6315899603068829e-003 + 0.0360798314213753 + -0.1576362997293472 + <_> + + <_> + + + + <_>3 11 15 9 -1. + <_>3 14 15 3 3. + 0 + -3.6433320492506027e-003 + -0.0298321191221476 + 0.0628015473484993 + <_> + + <_> + + + + <_>0 7 2 13 -1. + <_>1 7 1 13 2. + 0 + -0.0647683367133141 + -0.8435174226760864 + 6.0920589603483677e-003 + <_> + + <_> + + + + <_>10 0 10 20 -1. + <_>10 0 5 20 2. + 0 + 0.4171225130558014 + 3.0659181065857410e-003 + -0.4426969885826111 + <_> + + <_> + + + + <_>0 0 10 20 -1. + <_>5 0 5 20 2. + 0 + 0.1885427981615067 + 4.8159952275454998e-003 + -0.9549772739410400 + <_> + + <_> + + + + <_>5 12 13 3 -1. + <_>5 13 13 1 3. + 0 + 0.0237512700259686 + -0.0121662896126509 + 0.3082712888717651 + <_> + + <_> + + + + <_>5 6 6 8 -1. + <_>5 10 6 4 2. + 0 + 1.8907970516011119e-003 + -0.1249708011746407 + 0.0372619889676571 + <_> + + <_> + + + + <_>4 0 13 18 -1. + <_>4 9 13 9 2. + 0 + -1.5546990325674415e-003 + 0.0736365765333176 + -0.0493988506495953 + <_> + + <_> + + + + <_>0 0 15 4 -1. + <_>5 0 5 4 3. + 0 + -9.2505775392055511e-003 + 0.1244603991508484 + -0.0386735498905182 + <_> + + <_> + + + + <_>4 7 15 3 -1. + <_>9 7 5 3 3. + 0 + -9.9219558760523796e-003 + -0.1223175972700119 + 0.0272524803876877 + <_> + + <_> + + + + <_>6 8 6 6 -1. + <_>9 8 3 6 2. + 0 + -6.7504931939765811e-004 + 0.0807927325367928 + -0.0610036998987198 + <_> + + <_> + + + + <_>0 8 20 2 -1. + <_>0 8 10 2 2. + 0 + -0.0132861901074648 + 0.1729564964771271 + -0.0304869394749403 + <_> + + <_> + + + + <_>5 0 3 14 -1. + <_>6 0 1 14 3. + 0 + 4.3905568309128284e-003 + 0.0294212605804205 + -0.1823053956031799 + <_> + + <_> + + + + <_>13 2 5 12 -1. + <_>13 6 5 4 3. + 0 + -0.0188793092966080 + -0.0538374297320843 + 0.0283304695039988 + <_> + + <_> + + + + <_>4 4 12 6 -1. + <_>4 4 6 3 2. + <_>10 7 6 3 2. + 0 + -0.0693915635347366 + 0.5471312999725342 + -9.0404544025659561e-003 + <_> + + <_> + + + + <_>7 1 9 8 -1. + <_>10 1 3 8 3. + 0 + 0.0782269835472107 + 6.9561759009957314e-003 + -0.1599217057228088 + <_> + + <_> + + + + <_>1 1 6 10 -1. + <_>1 1 3 5 2. + <_>4 6 3 5 2. + 0 + -9.5910448580980301e-003 + 0.0834773704409599 + -0.0607142895460129 + <_> + + <_> + + + + <_>11 10 8 8 -1. + <_>11 14 8 4 2. + 0 + 0.0808563530445099 + -3.1028070952743292e-003 + 0.8153027892112732 + <_> + + <_> + + + + <_>1 10 8 8 -1. + <_>1 14 8 4 2. + 0 + -6.9029820151627064e-003 + -0.0626259967684746 + 0.0779940932989120 + <_> + + <_> + + + + <_>13 8 3 12 -1. + <_>13 14 3 6 2. + 0 + 0.0382191799581051 + -9.4691133126616478e-003 + 0.4182862937450409 + <_> + + <_> + + + + <_>4 8 3 12 -1. + <_>4 14 3 6 2. + 0 + -7.2923908010125160e-004 + 0.0543949902057648 + -0.1086949035525322 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + -0.0112243602052331 + -0.2877430021762848 + 0.0193324405699968 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -0.0237552393227816 + 0.2963249981403351 + -0.0169950295239687 + <_> + + <_> + + + + <_>7 1 9 8 -1. + <_>10 1 3 8 3. + 0 + 0.0251709409058094 + 0.0181516408920288 + -0.0692111775279045 + <_> + + <_> + + + + <_>4 1 9 8 -1. + <_>7 1 3 8 3. + 0 + 0.0846194103360176 + -0.0126183303073049 + 0.4018830955028534 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -2.8461799956858158e-003 + -0.1656547933816910 + 0.0355403795838356 + <_> + + <_> + + + + <_>5 2 6 10 -1. + <_>5 2 3 5 2. + <_>8 7 3 5 2. + 0 + 9.9000544287264347e-004 + -0.0706472098827362 + 0.0920708328485489 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + 8.5722869262099266e-003 + -0.0165993198752403 + 0.0600255802273750 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + 7.7498499304056168e-003 + 0.0250650495290756 + -0.2041956037282944 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>17 0 3 5 2. + <_>14 5 3 5 2. + 0 + -5.1633790135383606e-003 + 0.0564656406641006 + -0.0393665693700314 + <_> + + <_> + + + + <_>0 0 6 10 -1. + <_>0 0 3 5 2. + <_>3 5 3 5 2. + 0 + 3.4570649731904268e-003 + -0.0487127490341663 + 0.1175640001893044 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>8 5 3 7 2. + 0 + 1.5435590175911784e-003 + -0.1238515004515648 + 0.0472409501671791 + <_> + + <_> + + + + <_>5 3 4 8 -1. + <_>7 3 2 8 2. + 0 + 0.0392214693129063 + 9.7949290648102760e-003 + -0.5596526861190796 + <_> + + <_> + + + + <_>15 2 5 9 -1. + <_>15 5 5 3 3. + 0 + -0.0480199307203293 + -0.2451460957527161 + 0.0155443800613284 + <_> + + <_> + + + + <_>1 4 4 16 -1. + <_>1 4 2 8 2. + <_>3 12 2 8 2. + 0 + 0.0178677495568991 + -0.0264586899429560 + 0.1853612959384918 + <_> + + <_> + + + + <_>3 14 16 4 -1. + <_>11 14 8 2 2. + <_>3 16 8 2 2. + 0 + -7.8233405947685242e-003 + -0.1230596974492073 + 0.0218501705676317 + <_> + + <_> + + + + <_>5 2 9 6 -1. + <_>8 2 3 6 3. + 0 + -4.8894518986344337e-003 + 0.2508647143840790 + -0.0199141502380371 + <_> + + <_> + + + + <_>6 1 14 2 -1. + <_>6 1 7 2 2. + 0 + 0.1109059974551201 + 2.1982348989695311e-003 + -0.9611018896102905 + <_> + + <_> + + + + <_>0 1 14 2 -1. + <_>7 1 7 2 2. + 0 + 5.3139701485633850e-003 + -0.0702078416943550 + 0.0747920572757721 + <_> + + <_> + + + + <_>8 0 8 8 -1. + <_>12 0 4 4 2. + <_>8 4 4 4 2. + 0 + -4.0226429700851440e-003 + -0.0929820612072945 + 0.0276421699672937 + <_> + + <_> + + + + <_>5 4 10 14 -1. + <_>5 4 5 7 2. + <_>10 11 5 7 2. + 0 + -0.0998207628726959 + -0.8252760767936707 + 5.8367499150335789e-003 + <_> + + <_> + + + + <_>2 0 18 4 -1. + <_>11 0 9 2 2. + <_>2 2 9 2 2. + 0 + 3.2612269278615713e-003 + 0.0304818507283926 + -0.0482892915606499 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>9 5 3 7 2. + 0 + -0.0415590591728687 + 0.5887929797172546 + -8.5169300436973572e-003 + <_> + + <_> + + + + <_>4 10 14 4 -1. + <_>11 10 7 2 2. + <_>4 12 7 2 2. + 0 + 5.4297139868140221e-003 + 0.0181418005377054 + -0.1394830942153931 + <_> + + <_> + + + + <_>2 10 14 4 -1. + <_>2 10 7 2 2. + <_>9 12 7 2 2. + 0 + 0.0167562998831272 + 0.0123229296877980 + -0.4124552011489868 + <_> + + <_> + + + + <_>7 1 9 6 -1. + <_>7 4 9 3 2. + 0 + -0.0175638608634472 + 0.1138577014207840 + -0.0309686306864023 + <_> + + <_> + + + + <_>6 0 7 8 -1. + <_>6 4 7 4 2. + 0 + 0.0183087605983019 + -0.0359302498400211 + 0.1469727009534836 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 0.0355563089251518 + 0.0101906796917319 + -0.2583765089511871 + <_> + + <_> + + + + <_>1 3 9 4 -1. + <_>1 5 9 2 2. + 0 + -5.1635081035783514e-005 + 0.0460890904068947 + -0.1171912029385567 + <_> + + <_> + + + + <_>4 4 13 2 -1. + <_>4 5 13 1 2. + 0 + 2.5128800189122558e-004 + -0.0408963300287724 + 0.1066941022872925 + <_> + + <_> + + + + <_>1 4 14 3 -1. + <_>1 5 14 1 3. + 0 + -1.5876770485192537e-003 + 0.1078673005104065 + -0.0458900593221188 + <_> + + <_> + + + + <_>7 11 6 9 -1. + <_>9 11 2 9 3. + 0 + -9.5712337642908096e-003 + -0.1521212011575699 + 0.0371377803385258 + <_> + + <_> + + + + <_>6 11 4 7 -1. + <_>8 11 2 7 2. + 0 + 2.8643130790442228e-003 + 0.0360751189291477 + -0.1426859945058823 + <_> + + <_> + + + + <_>4 8 12 12 -1. + <_>4 8 6 12 2. + 0 + -0.0504540987312794 + 0.1962296068668366 + -0.0285990703850985 + <_> + + <_> + + + + <_>1 11 18 5 -1. + <_>10 11 9 5 2. + 0 + -2.8714470099657774e-003 + 0.0739199891686440 + -0.0860240012407303 + <_> + + <_> + + + + <_>4 5 16 6 -1. + <_>4 7 16 2 3. + 0 + 4.9587138928472996e-003 + 9.4060972332954407e-003 + -0.2488034963607788 + <_> + + <_> + + + + <_>0 3 4 16 -1. + <_>0 3 2 8 2. + <_>2 11 2 8 2. + 0 + -0.0782703906297684 + 0.4330515861511231 + -0.0111234299838543 + <_> + + <_> + + + + <_>16 9 4 11 -1. + <_>16 9 2 11 2. + 0 + -0.0646568089723587 + -0.1953912973403931 + 9.3969572335481644e-003 + <_> + + <_> + + + + <_>0 0 20 8 -1. + <_>0 4 20 4 2. + 0 + -0.4021360874176025 + -0.9373127818107605 + 4.8170168884098530e-003 + <_> + + <_> + + + + <_>8 7 8 8 -1. + <_>12 7 4 4 2. + <_>8 11 4 4 2. + 0 + 0.0429171510040760 + 5.9442862402647734e-004 + -0.7943031787872315 + <_> + + <_> + + + + <_>4 7 8 8 -1. + <_>4 7 4 4 2. + <_>8 11 4 4 2. + 0 + 2.1517940331250429e-003 + -0.0241273194551468 + 0.2109694927930832 + <_> + + <_> + + + + <_>16 9 4 11 -1. + <_>16 9 2 11 2. + 0 + 0.0955142378807068 + 3.0073130037635565e-003 + -0.3003076016902924 + <_> + + <_> + + + + <_>4 5 10 12 -1. + <_>4 5 5 6 2. + <_>9 11 5 6 2. + 0 + 0.0359494201838970 + 9.1736158356070518e-003 + -0.5330185294151306 + <_> + + <_> + + + + <_>16 9 4 11 -1. + <_>16 9 2 11 2. + 0 + 0.1406147927045822 + -1.9780038855969906e-003 + 0.5836036205291748 + <_> + + <_> + + + + <_>0 9 4 11 -1. + <_>2 9 2 11 2. + 0 + -0.1000026986002922 + -0.4657706022262573 + 0.0104473000392318 + <_> + + <_> + + + + <_>12 4 6 11 -1. + <_>12 4 3 11 2. + 0 + -0.1689841002225876 + 0.4757839143276215 + -3.0947721097618341e-003 + <_> + + <_> + + + + <_>2 4 6 11 -1. + <_>5 4 3 11 2. + 0 + 0.0261231902986765 + -0.0186734702438116 + 0.2558305859565735 + <_> + + <_> + + + + <_>8 7 5 9 -1. + <_>8 10 5 3 3. + 0 + 8.8816967036109418e-005 + 0.1293116062879562 + -0.0220339000225067 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -2.5785199832171202e-003 + 0.0775902420282364 + -0.0586698018014431 + <_> + + <_> + + + + <_>0 3 20 4 -1. + <_>10 3 10 2 2. + <_>0 5 10 2 2. + 0 + -0.0558297410607338 + -0.5629606842994690 + 8.2240002229809761e-003 + <_> + + <_> + + + + <_>0 15 18 4 -1. + <_>0 15 9 2 2. + <_>9 17 9 2 2. + 0 + -0.0351142585277557 + -0.4152520895004273 + 0.0102372597903013 + <_> + + <_> + + + + <_>6 14 13 3 -1. + <_>6 15 13 1 3. + 0 + 3.0091139487922192e-003 + -0.0328016616404057 + 0.1123789995908737 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -3.0068641062825918e-003 + -0.1579416990280151 + 0.0303542204201221 + <_> + + <_> + + + + <_>9 2 3 13 -1. + <_>10 2 1 13 3. + 0 + -2.0059049129486084e-003 + 0.1134639978408814 + -0.0333722010254860 + <_> + + <_> + + + + <_>8 2 3 13 -1. + <_>9 2 1 13 3. + 0 + -1.3963360106572509e-003 + 0.1445423066616058 + -0.0501152314245701 + <_> + + <_> + + + + <_>9 6 6 7 -1. + <_>9 6 3 7 2. + 0 + -0.0545883104205132 + -0.9655225872993469 + 2.6290758978575468e-003 + <_> + + <_> + + + + <_>5 6 6 7 -1. + <_>8 6 3 7 2. + 0 + -5.0577907823026180e-003 + -0.2153673022985458 + 0.0278238691389561 + <_> + + <_> + + + + <_>8 0 8 5 -1. + <_>8 0 4 5 2. + 0 + -0.0744309499859810 + 0.5924457907676697 + -3.5832428839057684e-003 + <_> + + <_> + + + + <_>4 0 8 5 -1. + <_>8 0 4 5 2. + 0 + -0.0697595700621605 + 0.6585460901260376 + -7.1275448426604271e-003 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + 3.4715738729573786e-004 + 0.0432145111262798 + -0.0652092397212982 + <_> + + <_> + + + + <_>5 1 6 19 -1. + <_>7 1 2 19 3. + 0 + 6.5575069747865200e-003 + 0.0410329811275005 + -0.1220093965530396 + <_> + + <_> + + + + <_>3 0 15 20 -1. + <_>8 0 5 20 3. + 0 + 0.0922872126102448 + -0.0219333898276091 + 0.0899531766772270 + <_> + + <_> + + + + <_>0 4 14 3 -1. + <_>7 4 7 3 2. + 0 + 0.0526855997741222 + 0.0164393503218889 + -0.2784793078899384 + <_> + + <_> + + + + <_>4 4 14 6 -1. + <_>11 4 7 3 2. + <_>4 7 7 3 2. + 0 + 7.2394758462905884e-003 + -0.0332179106771946 + 0.0972440615296364 + <_> + + <_> + + + + <_>0 5 10 6 -1. + <_>0 7 10 2 3. + 0 + -2.2218099329620600e-003 + 0.0358609184622765 + -0.1387619972229004 + <_> + + <_> + + + + <_>6 7 14 3 -1. + <_>6 8 14 1 3. + 0 + -0.0233093798160553 + -0.2791394889354706 + 0.0163622293621302 + <_> + + <_> + + + + <_>2 2 5 12 -1. + <_>2 6 5 4 3. + 0 + 1.4036920038051903e-004 + -0.0400968715548515 + 0.1237995997071266 + <_> + + <_> + + + + <_>9 9 7 4 -1. + <_>9 11 7 2 2. + 0 + 0.0537028498947620 + 1.4607049524784088e-003 + -0.8643640875816345 + <_> + + <_> + + + + <_>4 9 7 4 -1. + <_>4 11 7 2 2. + 0 + 4.1926259291358292e-004 + -0.0493428297340870 + 0.1028954982757568 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + -1.6786300111562014e-003 + -0.1906508058309555 + 0.0251450594514608 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + 0.0166032407432795 + -0.0181257091462612 + 0.2688744962215424 + <_> + + <_> + + + + <_>9 1 4 10 -1. + <_>9 6 4 5 2. + 0 + -0.0226217899471521 + 0.1314570009708405 + -0.0252885594964027 + <_> + + <_> + + + + <_>0 8 13 3 -1. + <_>0 9 13 1 3. + 0 + 4.4634779915213585e-003 + 0.0565682090818882 + -0.1030642986297607 + <_> + + <_> + + + + <_>3 10 17 2 -1. + <_>3 11 17 1 2. + 0 + 3.3281201031059027e-003 + 0.0215178094804287 + -0.1408663988113403 + <_> + + <_> + + + + <_>0 0 6 17 -1. + <_>3 0 3 17 2. + 0 + -0.0253118406981230 + 0.1123747006058693 + -0.0417844988405705 + <_> + + <_> + + + + <_>14 0 6 12 -1. + <_>14 0 3 12 2. + 0 + -0.0261198803782463 + 0.1270370036363602 + -0.0235303100198507 + <_> + + <_> + + + + <_>2 0 4 16 -1. + <_>4 0 2 16 2. + 0 + -0.0726086422801018 + -0.3305288851261139 + 0.0217411592602730 + <_> + + <_> + + + + <_>14 1 6 7 -1. + <_>16 1 2 7 3. + 0 + 5.8377808891236782e-003 + -0.0281706806272268 + 0.0613000318408012 + <_> + + <_> + + + + <_>0 1 6 7 -1. + <_>2 1 2 7 3. + 0 + 1.7830949509516358e-003 + -0.0761407166719437 + 0.0843913033604622 + <_> + + <_> + + + + <_>9 1 9 12 -1. + <_>12 1 3 12 3. + 0 + -0.1450258940458298 + -0.2888636887073517 + 9.4371382147073746e-003 + <_> + + <_> + + + + <_>2 1 9 12 -1. + <_>5 1 3 12 3. + 0 + -2.4291570298373699e-003 + -0.0636451691389084 + 0.0900570079684258 + <_> + + <_> + + + + <_>13 5 4 12 -1. + <_>13 5 2 12 2. + 0 + 0.1097790002822876 + -1.4906959841027856e-003 + 0.8971021771430969 + <_> + + <_> + + + + <_>3 5 4 12 -1. + <_>5 5 2 12 2. + 0 + -3.8412429857999086e-003 + 0.0739800110459328 + -0.0693783834576607 + <_> + + <_> + + + + <_>6 8 12 4 -1. + <_>10 8 4 4 3. + 0 + 3.9507250767201185e-004 + -0.0711664110422134 + 0.0631507411599159 + <_> + + <_> + + + + <_>2 8 12 4 -1. + <_>6 8 4 4 3. + 0 + -6.6879019141197205e-003 + -0.1421196013689041 + 0.0510072000324726 + <_> + + <_> + + + + <_>2 9 18 11 -1. + <_>8 9 6 11 3. + 0 + -0.2127815932035446 + 0.1747954934835434 + -0.0168664995580912 + <_> + + <_> + + + + <_>6 11 6 6 -1. + <_>9 11 3 6 2. + 0 + 0.0439136102795601 + -7.9228030517697334e-003 + 0.5999451875686646 + <_> + + <_> + + + + <_>1 12 19 2 -1. + <_>1 13 19 1 2. + 0 + 3.0486818868666887e-003 + 0.0278801005333662 + -0.1499668955802918 + <_> + + <_> + + + + <_>0 12 13 3 -1. + <_>0 13 13 1 3. + 0 + 1.7128599574789405e-003 + -0.0615758895874023 + 0.1079311966896057 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -0.0130615895614028 + -0.3586418926715851 + 0.0123326899483800 + <_> + + <_> + + + + <_>0 8 16 4 -1. + <_>0 8 8 2 2. + <_>8 10 8 2 2. + 0 + 1.4779239427298307e-003 + -0.0552806183695793 + 0.0764003396034241 + <_> + + <_> + + + + <_>8 6 8 8 -1. + <_>12 6 4 4 2. + <_>8 10 4 4 2. + 0 + -0.0741171836853027 + 0.3305566012859345 + -5.4406579583883286e-003 + <_> + + <_> + + + + <_>3 13 14 6 -1. + <_>3 15 14 2 3. + 0 + 0.0415327884256840 + 0.0127627495676279 + -0.3409101068973541 + <_> + + <_> + + + + <_>4 13 15 6 -1. + <_>4 15 15 2 3. + 0 + -0.0164743103086948 + -0.1193590015172958 + 0.0359978713095188 + <_> + + <_> + + + + <_>0 0 14 4 -1. + <_>7 0 7 4 2. + 0 + -0.0133844502270222 + 0.1492701023817062 + -0.0371512509882450 + <_> + + <_> + + + + <_>14 3 4 10 -1. + <_>14 8 4 5 2. + 0 + -4.3293130584061146e-003 + -0.1525720953941345 + 0.0200080294162035 + <_> + + <_> + + + + <_>2 4 14 12 -1. + <_>2 4 7 6 2. + <_>9 10 7 6 2. + 0 + 3.7254339549690485e-003 + 0.0382492803037167 + -0.1356284022331238 + <_> + + <_> + + + + <_>7 4 6 10 -1. + <_>10 4 3 5 2. + <_>7 9 3 5 2. + 0 + -3.5788780078291893e-003 + 0.1195114031434059 + -0.0513569712638855 + <_> + + <_> + + + + <_>1 0 3 15 -1. + <_>1 5 3 5 3. + 0 + 0.0909365415573120 + -9.6294376999139786e-003 + 0.5058292746543884 + <_> + + <_> + + + + <_>1 1 19 12 -1. + <_>1 5 19 4 3. + 0 + -3.1301870476454496e-003 + 0.0245875306427479 + -0.1575251966714859 + <_> + + <_> + + + + <_>5 13 6 7 -1. + <_>7 13 2 7 3. + 0 + -3.0295769684016705e-003 + -0.0966699570417404 + 0.0474024601280689 + <_> + + <_> + + + + <_>10 0 4 16 -1. + <_>12 0 2 8 2. + <_>10 8 2 8 2. + 0 + -3.1865050550550222e-003 + 0.0350353196263313 + -0.0408417098224163 + <_> + + <_> + + + + <_>6 0 4 16 -1. + <_>6 0 2 8 2. + <_>8 8 2 8 2. + 0 + 0.0448362603783607 + -7.4580628424882889e-003 + 0.6519020795822144 + <_> + + <_> + + + + <_>8 1 4 11 -1. + <_>8 1 2 11 2. + 0 + -6.4811948686838150e-003 + 0.1316393017768860 + -0.0360601283609867 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -2.0486880093812943e-003 + -0.1109751015901566 + 0.0510119087994099 + <_> + + <_> + + + + <_>0 11 20 3 -1. + <_>0 12 20 1 3. + 0 + 0.0491756200790405 + 5.1457029767334461e-003 + -0.8914859890937805 + <_> + + <_> + + + + <_>6 15 7 4 -1. + <_>6 17 7 2 2. + 0 + 8.4772880654782057e-004 + -0.0907417908310890 + 0.0448530204594135 + <_> + + <_> + + + + <_>7 16 7 4 -1. + <_>7 18 7 2 2. + 0 + -0.0165457092225552 + 0.2532956898212433 + -0.0169970802962780 + <_> + + <_> + + + + <_>1 14 16 4 -1. + <_>1 14 8 2 2. + <_>9 16 8 2 2. + 0 + 6.9274050183594227e-003 + 0.0389414615929127 + -0.1396130025386810 + <_> + + <_> + + + + <_>7 16 13 3 -1. + <_>7 17 13 1 3. + 0 + -6.5109939314424992e-003 + 0.1561030000448227 + -0.0244938805699348 + <_> + + <_> + + + + <_>1 12 18 8 -1. + <_>1 12 9 4 2. + <_>10 16 9 4 2. + 0 + -4.9708629958331585e-003 + -0.0982985869050026 + 0.0579038411378860 + <_> + + <_> + + + + <_>14 3 4 10 -1. + <_>14 8 4 5 2. + 0 + 0.1307460963726044 + -2.7071859221905470e-004 + 1.0000669956207275 + <_> + + <_> + + + + <_>2 3 4 10 -1. + <_>2 8 4 5 2. + 0 + -0.0267059206962585 + -0.4257703125476837 + 0.0107059702277184 + <_> + + <_> + + + + <_>2 1 16 12 -1. + <_>2 7 16 6 2. + 0 + -0.1032906025648117 + 0.2589618861675263 + -0.0184145905077457 + <_> + + <_> + + + + <_>7 0 6 16 -1. + <_>7 8 6 8 2. + 0 + -0.0201661307364702 + -0.1145585030317307 + 0.0404395684599876 + <_> + + <_> + + + + <_>7 1 8 12 -1. + <_>7 7 8 6 2. + 0 + -4.2215920984745026e-003 + 0.0430392585694790 + -0.0487358607351780 + <_> + + <_> + + + + <_>2 12 15 8 -1. + <_>7 12 5 8 3. + 0 + -0.0100388396531343 + 0.0716087371110916 + -0.0662046074867249 + <_> + + <_> + + + + <_>4 16 15 4 -1. + <_>9 16 5 4 3. + 0 + 0.0158330593258142 + -0.0320668593049049 + 0.0899508967995644 + <_> + + <_> + + + + <_>6 7 8 6 -1. + <_>10 7 4 6 2. + 0 + 3.4065160434693098e-003 + 0.0472160093486309 + -0.1089878976345062 + <_> + + <_> + + + + <_>1 8 18 12 -1. + <_>1 8 9 12 2. + 0 + -9.8251160234212875e-003 + 0.1021322980523109 + -0.0529021099209785 + <_> + + <_> + + + + <_>0 17 15 3 -1. + <_>5 17 5 3 3. + 0 + 0.0168046299368143 + -0.0371899902820587 + 0.1378764957189560 + <_> + + <_> + + + + <_>9 2 6 17 -1. + <_>11 2 2 17 3. + 0 + 8.5175316780805588e-003 + 0.0271414406597614 + -0.1356956064701080 + <_> + + <_> + + + + <_>5 2 6 17 -1. + <_>7 2 2 17 3. + 0 + -6.3797592883929610e-004 + 0.0692171901464462 + -0.0906967371702194 + <_> + + <_> + + + + <_>7 4 6 7 -1. + <_>9 4 2 7 3. + 0 + -9.6052087610587478e-004 + 0.2247247993946075 + -0.0240326393395662 + <_> + + <_> + + + + <_>0 11 15 3 -1. + <_>0 12 15 1 3. + 0 + 7.2245922638103366e-004 + -0.0467312000691891 + 0.0969055071473122 + <_> + + <_> + + + + <_>9 10 11 6 -1. + <_>9 12 11 2 3. + 0 + 1.0769399814307690e-003 + 0.0382594913244247 + -0.0666741579771042 + <_> + + <_> + + + + <_>8 0 3 18 -1. + <_>9 0 1 18 3. + 0 + 0.0416201911866665 + 9.3473913148045540e-003 + -0.4904668927192688 + <_> + + <_> + + + + <_>14 11 4 8 -1. + <_>14 15 4 4 2. + 0 + -8.1712089013308287e-004 + 0.0527974404394627 + -0.0964580923318863 + <_> + + <_> + + + + <_>1 11 15 8 -1. + <_>1 15 15 4 2. + 0 + 6.2240879051387310e-003 + -0.0353507883846760 + 0.1648416072130203 + <_> + + <_> + + + + <_>9 10 3 10 -1. + <_>9 15 3 5 2. + 0 + 2.0862540695816278e-003 + 0.0339587107300758 + -0.1311400979757309 + <_> + + <_> + + + + <_>1 6 18 9 -1. + <_>1 9 18 3 3. + 0 + 4.2804637923836708e-003 + 0.3010404109954834 + -0.0162454508244991 + <_> + + <_> + + + + <_>3 1 14 2 -1. + <_>3 2 14 1 2. + 0 + -3.3040030393749475e-004 + -0.1166545972228050 + 0.0381462089717388 + <_> + + <_> + + + + <_>0 1 20 3 -1. + <_>0 2 20 1 3. + 0 + 2.8100309427827597e-003 + 0.0419405102729797 + -0.1118030026555061 + <_> + + <_> + + + + <_>5 0 14 2 -1. + <_>5 1 14 1 2. + 0 + 0.0198327396064997 + -0.0117015698924661 + 0.2012213021516800 + <_> + + <_> + + + + <_>3 8 12 10 -1. + <_>7 8 4 10 3. + 0 + 0.0708796828985214 + -0.0181978195905685 + 0.2542958855628967 + <_> + + <_> + + + + <_>8 2 4 12 -1. + <_>8 6 4 4 3. + 0 + -0.0838939696550369 + -0.3871923089027405 + 0.0117272902280092 + <_> + + <_> + + + + <_>6 2 8 12 -1. + <_>6 6 8 4 3. + 0 + 0.0284776203334332 + 0.0137015199288726 + -0.3249661922454834 + <_> + + <_> + + + + <_>4 3 12 4 -1. + <_>4 5 12 2 2. + 0 + 0.0120773101225495 + -0.0239758901298046 + 0.2523278892040253 + <_> + + <_> + + + + <_>0 0 5 9 -1. + <_>0 3 5 3 3. + 0 + -0.0756134092807770 + -0.6086645126342773 + 8.2847801968455315e-003 + <_> + + <_> + + + + <_>7 1 9 6 -1. + <_>7 4 9 3 2. + 0 + -0.0175638608634472 + 0.1081158965826035 + -0.0286227595061064 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>4 10 2 10 3. + 0 + 0.0118091097101569 + 0.0347582697868347 + -0.1444471031427383 + <_> + + <_> + + + + <_>2 5 17 14 -1. + <_>2 12 17 7 2. + 0 + 0.3345921933650971 + 3.5104870330542326e-003 + -0.9150757789611816 + <_> + + <_> + + + + <_>0 7 10 8 -1. + <_>0 11 10 4 2. + 0 + 0.0984478369355202 + -0.0102903302758932 + 0.4794301986694336 + <_> + + <_> + + + + <_>12 4 3 15 -1. + <_>13 4 1 15 3. + 0 + -0.0402778387069702 + -0.7379382848739624 + 4.8832078464329243e-003 + <_> + + <_> + + + + <_>5 4 3 15 -1. + <_>6 4 1 15 3. + 0 + 4.6712718904018402e-003 + 0.0250373091548681 + -0.1700375974178314 + <_> + + <_> + + + + <_>8 7 12 5 -1. + <_>12 7 4 5 3. + 0 + 0.1395848989486694 + 1.9962170626968145e-003 + -0.7154716849327087 + <_> + + <_> + + + + <_>0 7 12 5 -1. + <_>4 7 4 5 3. + 0 + 0.0697427168488503 + -8.4846932440996170e-003 + 0.5537828207015991 + <_> + + <_> + + + + <_>3 6 14 3 -1. + <_>3 7 14 1 3. + 0 + 4.0283710695803165e-003 + -0.0167180299758911 + 0.2391424030065537 + <_> + + <_> + + + + <_>6 1 2 18 -1. + <_>7 1 1 18 2. + 0 + 0.0109117096289992 + 0.0157816596329212 + -0.2681370973587036 + <_> + + <_> + + + + <_>6 16 9 4 -1. + <_>6 18 9 2 2. + 0 + -6.7120362073183060e-003 + 0.1108765974640846 + -0.0313658788800240 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>3 17 14 2 2. + 0 + -0.0134678203612566 + -0.2074151933193207 + 0.0234590806066990 + <_> + + <_> + + + + <_>7 16 13 3 -1. + <_>7 17 13 1 3. + 0 + -2.1431609056890011e-003 + 0.0782745927572250 + -0.0279594305902720 + <_> + + <_> + + + + <_>0 4 12 4 -1. + <_>4 4 4 4 3. + 0 + 0.0151633704081178 + 0.0217278301715851 + -0.1899544000625610 + <_> + + <_> + + + + <_>6 4 14 4 -1. + <_>13 4 7 2 2. + <_>6 6 7 2 2. + 0 + -0.0185519494116306 + 0.1116416007280350 + -0.0303740296512842 + <_> + + <_> + + + + <_>0 5 10 6 -1. + <_>0 7 10 2 3. + 0 + -0.1108345985412598 + -0.5637990832328796 + 7.6859779655933380e-003 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 5.6210728362202644e-003 + 0.0329302586615086 + -0.1033701002597809 + <_> + + <_> + + + + <_>3 12 10 8 -1. + <_>3 12 5 4 2. + <_>8 16 5 4 2. + 0 + 3.0593289993703365e-003 + -0.0688718035817146 + 0.0603897199034691 + <_> + + <_> + + + + <_>12 10 5 9 -1. + <_>12 13 5 3 3. + 0 + -6.9845258258283138e-004 + 0.0380809083580971 + -0.0701129287481308 + <_> + + <_> + + + + <_>0 13 14 4 -1. + <_>0 13 7 2 2. + <_>7 15 7 2 2. + 0 + -1.3236569939181209e-003 + 0.0750040933489800 + -0.0639500468969345 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + -1.6736539546400309e-003 + -0.1058039963245392 + 0.0494763888418674 + <_> + + <_> + + + + <_>2 10 6 10 -1. + <_>2 10 3 5 2. + <_>5 15 3 5 2. + 0 + 7.0728380233049393e-003 + -0.0365821197628975 + 0.1312654018402100 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 1.8164990469813347e-003 + 0.0399538315832615 + -0.0515895783901215 + <_> + + <_> + + + + <_>0 14 7 6 -1. + <_>0 16 7 2 3. + 0 + 4.1909920983016491e-003 + 0.0486651994287968 + -0.1059850975871086 + <_> + + <_> + + + + <_>0 12 20 6 -1. + <_>0 15 20 3 2. + 0 + 0.1194002032279968 + -6.7811049520969391e-003 + 0.7452349066734314 + <_> + + <_> + + + + <_>1 16 16 4 -1. + <_>1 18 16 2 2. + 0 + -1.4965030131861567e-003 + 0.0668059363961220 + -0.0677984729409218 + <_> + + <_> + + + + <_>12 10 5 9 -1. + <_>12 13 5 3 3. + 0 + -0.1172299981117249 + -0.8786048889160156 + 1.8648250261321664e-003 + <_> + + <_> + + + + <_>3 10 5 9 -1. + <_>3 13 5 3 3. + 0 + 3.2925528939813375e-003 + 0.0356349013745785 + -0.1503078937530518 + <_> + + <_> + + + + <_>5 8 13 12 -1. + <_>5 12 13 4 3. + 0 + 0.0684935674071312 + -9.8042488098144531e-003 + 0.3016194105148315 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>5 5 5 3 2. + <_>10 8 5 3 2. + 0 + 2.1837449166923761e-003 + -0.0534208491444588 + 0.0856263265013695 + <_> + + <_> + + + + <_>5 5 10 6 -1. + <_>10 5 5 3 2. + <_>5 8 5 3 2. + 0 + 6.9181360304355621e-003 + -0.0436855182051659 + 0.1270675957202911 + <_> + + <_> + + + + <_>0 3 13 2 -1. + <_>0 4 13 1 2. + 0 + -1.5878600534051657e-003 + -0.1264044046401978 + 0.0390260890126228 + <_> + + <_> + + + + <_>8 2 12 4 -1. + <_>8 4 12 2 2. + 0 + 3.8289129734039307e-003 + 0.0390253812074661 + -0.0796756893396378 + <_> + + <_> + + + + <_>5 0 8 6 -1. + <_>5 2 8 2 3. + 0 + 0.0122532602399588 + -0.0448096282780170 + 0.0977727100253105 + <_> + + <_> + + + + <_>5 2 14 4 -1. + <_>12 2 7 2 2. + <_>5 4 7 2 2. + 0 + 6.4031239598989487e-003 + 0.0335796102881432 + -0.1330029964447022 + <_> + + <_> + + + + <_>5 0 10 8 -1. + <_>5 4 10 4 2. + 0 + 7.0500532165169716e-003 + -0.0511214099824429 + 0.1177240014076233 + <_> + + <_> + + + + <_>12 0 8 4 -1. + <_>12 2 8 2 2. + 0 + 0.0132167302072048 + 0.0264540091156960 + -0.1319022029638290 + <_> + + <_> + + + + <_>8 9 4 8 -1. + <_>8 13 4 4 2. + 0 + 6.7367991432547569e-003 + -0.0101531995460391 + 0.4157046973705292 + <_> + + <_> + + + + <_>9 10 5 8 -1. + <_>9 14 5 4 2. + 0 + 2.4951510131359100e-003 + 0.0146310199052095 + -0.1656035929918289 + <_> + + <_> + + + + <_>0 14 12 4 -1. + <_>6 14 6 4 2. + 0 + 0.0383029989898205 + 7.2940620593726635e-003 + -0.6074460744857788 + <_> + + <_> + + + + <_>4 6 14 4 -1. + <_>11 6 7 2 2. + <_>4 8 7 2 2. + 0 + -0.0164910592138767 + 0.1678835004568100 + -0.0150621701031923 + <_> + + <_> + + + + <_>4 4 11 10 -1. + <_>4 9 11 5 2. + 0 + -0.0270716398954391 + -0.4638155102729797 + 0.0103350598365068 + <_> + + <_> + + + + <_>7 1 9 12 -1. + <_>7 7 9 6 2. + 0 + -0.0587149597704411 + 0.1486099958419800 + -0.0166637301445007 + <_> + + <_> + + + + <_>8 5 3 15 -1. + <_>8 10 3 5 3. + 0 + 9.2380512505769730e-003 + 0.0438303388655186 + -0.1061268970370293 + <_> + + <_> + + + + <_>7 13 13 3 -1. + <_>7 14 13 1 3. + 0 + 3.0808299779891968e-003 + -0.0367814898490906 + 0.0895591974258423 + <_> + + <_> + + + + <_>0 4 20 6 -1. + <_>0 6 20 2 3. + 0 + 2.9910521116107702e-003 + 0.0160191897302866 + -0.2917783856391907 + <_> + + <_> + + + + <_>5 3 12 4 -1. + <_>5 5 12 2 2. + 0 + 0.0447866097092628 + -6.7814979702234268e-003 + 0.3669516146183014 + <_> + + <_> + + + + <_>6 11 8 8 -1. + <_>6 11 4 4 2. + <_>10 15 4 4 2. + 0 + -2.9985690489411354e-003 + -0.0903160721063614 + 0.0480480417609215 + <_> + + <_> + + + + <_>5 15 13 3 -1. + <_>5 16 13 1 3. + 0 + -8.9135952293872833e-003 + 0.1690360009670258 + -0.0218804609030485 + <_> + + <_> + + + + <_>0 13 18 4 -1. + <_>0 13 9 2 2. + <_>9 15 9 2 2. + 0 + -0.0395982004702091 + -0.4488484859466553 + 0.0100272195413709 + <_> + + <_> + + + + <_>10 0 3 13 -1. + <_>11 0 1 13 3. + 0 + -0.0370648093521595 + -0.4418356120586395 + 2.2891450207680464e-003 + <_> + + <_> + + + + <_>7 0 3 13 -1. + <_>8 0 1 13 3. + 0 + -9.3376229051500559e-004 + 0.0736330598592758 + -0.0589016899466515 + <_> + + <_> + + + + <_>2 0 18 18 -1. + <_>8 0 6 18 3. + 0 + 0.0808877572417259 + -0.0249635800719261 + 0.0603037588298321 + <_> + + <_> + + + + <_>2 2 12 15 -1. + <_>2 7 12 5 3. + 0 + -0.0306975692510605 + -0.1781900972127914 + 0.0260902903974056 + <_> + + <_> + + + + <_>7 1 11 18 -1. + <_>7 7 11 6 3. + 0 + -0.1849526017904282 + 0.3490122854709625 + -3.8219890557229519e-003 + <_> + + <_> + + + + <_>8 5 4 14 -1. + <_>8 5 2 7 2. + <_>10 12 2 7 2. + 0 + 0.0112183196470141 + -0.0267815496772528 + 0.1743142008781433 + <_> + + <_> + + + + <_>10 5 3 14 -1. + <_>10 12 3 7 2. + 0 + 6.2761609442532063e-003 + 0.0145324403420091 + -0.1186456978321075 + <_> + + <_> + + + + <_>7 5 3 14 -1. + <_>7 12 3 7 2. + 0 + -8.8509358465671539e-003 + -0.1051568984985352 + 0.0576556809246540 + <_> + + <_> + + + + <_>3 4 14 4 -1. + <_>3 6 14 2 2. + 0 + -0.0385757982730865 + 0.1500456035137177 + -0.0360802002251148 + <_> + + <_> + + + + <_>0 5 20 4 -1. + <_>0 5 10 2 2. + <_>10 7 10 2 2. + 0 + -0.0527202114462852 + -0.4755679070949554 + 0.0111260702833533 + -1.1474020481109619 + 43 + -1 + <_> + + + <_> + + <_> + + + + <_>8 4 4 14 -1. + <_>8 11 4 7 2. + 0 + -3.8506588898599148e-003 + 0.1120956987142563 + -0.2733029127120972 + <_> + + <_> + + + + <_>15 3 4 16 -1. + <_>17 3 2 8 2. + <_>15 11 2 8 2. + 0 + -0.0494272597134113 + 0.3927012085914612 + -0.0398718491196632 + <_> + + <_> + + + + <_>2 0 4 7 -1. + <_>4 0 2 7 2. + 0 + 1.3538210187107325e-003 + -0.1596504002809525 + 0.1252105981111527 + <_> + + <_> + + + + <_>12 6 5 9 -1. + <_>12 9 5 3 3. + 0 + 3.9328690618276596e-003 + -0.3404383957386017 + 0.0474374890327454 + <_> + + <_> + + + + <_>2 1 8 6 -1. + <_>2 3 8 2 3. + 0 + 2.3011169396340847e-003 + -0.2082774937152863 + 0.0748917013406754 + <_> + + <_> + + + + <_>10 1 4 8 -1. + <_>10 1 2 8 2. + 0 + 5.9128052089363337e-004 + -0.2084272056818008 + 0.0377987809479237 + <_> + + <_> + + + + <_>6 1 4 8 -1. + <_>8 1 2 8 2. + 0 + 1.7478190129622817e-003 + -0.1963517963886261 + 0.0645820274949074 + <_> + + <_> + + + + <_>10 10 7 6 -1. + <_>10 12 7 2 3. + 0 + 5.8316658250987530e-003 + 0.0315820388495922 + -0.1908458024263382 + <_> + + <_> + + + + <_>4 6 5 6 -1. + <_>4 9 5 3 2. + 0 + 1.2435190146788955e-003 + -0.5321357846260071 + 0.0221622306853533 + <_> + + <_> + + + + <_>7 15 7 4 -1. + <_>7 17 7 2 2. + 0 + 1.6247769817709923e-003 + -0.1327618062496185 + 0.0801356732845306 + <_> + + <_> + + + + <_>8 6 4 8 -1. + <_>8 10 4 4 2. + 0 + -2.2734089288860559e-003 + -0.1734469980001450 + 0.0547829903662205 + <_> + + <_> + + + + <_>10 10 7 6 -1. + <_>10 12 7 2 3. + 0 + 0.0578590594232082 + -1.5829589683562517e-003 + -0.6636794209480286 + <_> + + <_> + + + + <_>3 10 7 6 -1. + <_>3 12 7 2 3. + 0 + 5.7728560641407967e-003 + 0.0398151688277721 + -0.2291924953460693 + <_> + + <_> + + + + <_>8 6 6 12 -1. + <_>11 6 3 6 2. + <_>8 12 3 6 2. + 0 + -0.0440396107733250 + 0.2179328054189682 + -0.0235340092331171 + <_> + + <_> + + + + <_>5 6 4 14 -1. + <_>5 6 2 7 2. + <_>7 13 2 7 2. + 0 + 3.0226248782128096e-004 + -0.0894195809960365 + 0.1104286983609200 + <_> + + <_> + + + + <_>0 15 20 2 -1. + <_>0 15 10 2 2. + 0 + -0.0344708599150181 + -0.3666667938232422 + 0.0278582796454430 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + 0.0324603989720345 + 0.0157338809221983 + -0.4973374903202057 + <_> + + <_> + + + + <_>6 15 13 2 -1. + <_>6 16 13 1 2. + 0 + 9.9335552658885717e-004 + -0.0918009430170059 + 0.0840039774775505 + <_> + + <_> + + + + <_>0 17 19 3 -1. + <_>0 18 19 1 3. + 0 + -0.0234738308936358 + -0.4437566995620728 + 0.0151480101048946 + <_> + + <_> + + + + <_>9 5 6 10 -1. + <_>12 5 3 5 2. + <_>9 10 3 5 2. + 0 + -2.9013049788773060e-003 + 0.0546423494815826 + -0.2015652954578400 + <_> + + <_> + + + + <_>3 3 13 2 -1. + <_>3 4 13 1 2. + 0 + -6.5832951804623008e-004 + -0.1228576973080635 + 0.0567078888416290 + <_> + + <_> + + + + <_>2 0 17 6 -1. + <_>2 2 17 2 3. + 0 + 2.0407158881425858e-003 + -0.1089906990528107 + 0.0599336996674538 + <_> + + <_> + + + + <_>1 3 4 16 -1. + <_>1 3 2 8 2. + <_>3 11 2 8 2. + 0 + -0.0131614999845624 + 0.1409195959568024 + -0.0473962016403675 + <_> + + <_> + + + + <_>12 10 8 6 -1. + <_>12 12 8 2 3. + 0 + -4.2273551225662231e-003 + -0.1249826997518539 + 0.0511246584355831 + <_> + + <_> + + + + <_>1 7 12 4 -1. + <_>5 7 4 4 3. + 0 + 7.6580629684031010e-003 + 0.0387734808027744 + -0.1809569001197815 + <_> + + <_> + + + + <_>14 0 6 5 -1. + <_>14 0 3 5 2. + 0 + -5.1912548951804638e-003 + 0.1254525929689407 + -0.0440125800669193 + <_> + + <_> + + + + <_>3 0 14 6 -1. + <_>10 0 7 6 2. + 0 + 0.1187459006905556 + -0.0148014798760414 + 0.4007121026515961 + <_> + + <_> + + + + <_>7 9 6 10 -1. + <_>10 9 3 5 2. + <_>7 14 3 5 2. + 0 + 4.5105828903615475e-003 + 0.0533368512988091 + -0.1570904999971390 + <_> + + <_> + + + + <_>0 14 18 6 -1. + <_>6 14 6 6 3. + 0 + 0.0450153797864914 + -0.0332787781953812 + 0.2053513973951340 + <_> + + <_> + + + + <_>11 0 6 16 -1. + <_>14 0 3 8 2. + <_>11 8 3 8 2. + 0 + -2.0866969134658575e-003 + 0.0421035289764404 + -0.1036178991198540 + <_> + + <_> + + + + <_>5 10 4 7 -1. + <_>7 10 2 7 2. + 0 + -1.3008449459448457e-003 + 0.0644244700670242 + -0.0978970602154732 + <_> + + <_> + + + + <_>11 10 4 8 -1. + <_>11 10 2 8 2. + 0 + -1.3591230381280184e-003 + 0.0729873478412628 + -0.0944510027766228 + <_> + + <_> + + + + <_>5 10 4 8 -1. + <_>7 10 2 8 2. + 0 + -7.4056759476661682e-003 + -0.1532036066055298 + 0.0532420016825199 + <_> + + <_> + + + + <_>16 0 3 13 -1. + <_>17 0 1 13 3. + 0 + 2.0208859350532293e-003 + -0.0332455299794674 + 0.0603197105228901 + <_> + + <_> + + + + <_>1 14 16 6 -1. + <_>9 14 8 6 2. + 0 + -0.0103421499952674 + 0.0855105593800545 + -0.0839208289980888 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + 0.0248658601194620 + 0.0126394601538777 + -0.3475719988346100 + <_> + + <_> + + + + <_>5 0 10 6 -1. + <_>5 3 10 3 2. + 0 + 0.0997986570000649 + -0.0188239701092243 + 0.3446500003337860 + <_> + + <_> + + + + <_>6 4 14 15 -1. + <_>6 9 14 5 3. + 0 + 0.0212013907730579 + -0.1046779975295067 + 0.0314945094287395 + <_> + + <_> + + + + <_>3 1 14 4 -1. + <_>3 1 7 2 2. + <_>10 3 7 2 2. + 0 + -5.1909908652305603e-003 + -0.1579234004020691 + 0.0502699613571167 + <_> + + <_> + + + + <_>8 3 6 10 -1. + <_>11 3 3 5 2. + <_>8 8 3 5 2. + 0 + 0.0669612288475037 + 3.2651789952069521e-003 + -0.5604916810989380 + <_> + + <_> + + + + <_>6 3 6 10 -1. + <_>6 3 3 5 2. + <_>9 8 3 5 2. + 0 + 0.0118091097101569 + -0.0285137891769409 + 0.2122631967067719 + <_> + + <_> + + + + <_>12 4 3 10 -1. + <_>12 9 3 5 2. + 0 + -0.0176456607878208 + -0.4450336098670960 + 5.0029670819640160e-003 + <_> + + <_> + + + + <_>5 4 3 10 -1. + <_>5 9 3 5 2. + 0 + -6.8918941542506218e-003 + -0.4219962060451508 + 0.0148130403831601 + <_> + + <_> + + + + <_>11 0 6 5 -1. + <_>11 0 3 5 2. + 0 + 2.1675550378859043e-003 + -0.1312519013881683 + 0.0671404227614403 + <_> + + <_> + + + + <_>5 7 10 6 -1. + <_>5 7 5 3 2. + <_>10 10 5 3 2. + 0 + -3.3283489756286144e-003 + -0.1076532974839211 + 0.0536107681691647 + <_> + + <_> + + + + <_>1 10 19 3 -1. + <_>1 11 19 1 3. + 0 + 0.0488696210086346 + 6.4427889883518219e-003 + -0.6456328034400940 + <_> + + <_> + + + + <_>1 0 3 13 -1. + <_>2 0 1 13 3. + 0 + 7.2693959809839725e-003 + -0.0396036207675934 + 0.1536964029073715 + <_> + + <_> + + + + <_>14 1 6 16 -1. + <_>16 1 2 16 3. + 0 + 0.0888499915599823 + -0.0132344001904130 + 0.2855528891086578 + <_> + + <_> + + + + <_>3 5 14 12 -1. + <_>3 5 7 6 2. + <_>10 11 7 6 2. + 0 + 0.0154559500515461 + 0.0396941006183624 + -0.1720626950263977 + <_> + + <_> + + + + <_>14 1 6 16 -1. + <_>16 1 2 16 3. + 0 + -0.0137472003698349 + 0.1007926985621452 + -0.0438120290637016 + <_> + + <_> + + + + <_>0 1 6 16 -1. + <_>2 1 2 16 3. + 0 + -0.0228057503700256 + 0.1501417011022568 + -0.0437677986919880 + <_> + + <_> + + + + <_>4 2 12 4 -1. + <_>8 2 4 4 3. + 0 + 0.0238380394876003 + 0.0539012812077999 + -0.1461029052734375 + <_> + + <_> + + + + <_>3 9 12 6 -1. + <_>3 12 12 3 2. + 0 + -0.1018162965774536 + 0.3190504014492035 + -0.0200115907937288 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + 7.1074268780648708e-003 + 0.0562441796064377 + -0.1258756071329117 + <_> + + <_> + + + + <_>8 0 2 13 -1. + <_>9 0 1 13 2. + 0 + 7.6678092591464520e-004 + -0.1070419028401375 + 0.0664362981915474 + <_> + + <_> + + + + <_>8 9 6 10 -1. + <_>11 9 3 5 2. + <_>8 14 3 5 2. + 0 + 3.7424071342684329e-004 + -0.0378262996673584 + 0.0472349897027016 + <_> + + <_> + + + + <_>6 9 6 10 -1. + <_>6 9 3 5 2. + <_>9 14 3 5 2. + 0 + -2.0078169181942940e-003 + -0.0933162868022919 + 0.0676416084170341 + <_> + + <_> + + + + <_>5 17 10 3 -1. + <_>5 17 5 3 2. + 0 + 0.0334690511226654 + -0.0279261507093906 + 0.2529337108135223 + <_> + + <_> + + + + <_>7 2 2 18 -1. + <_>8 2 1 18 2. + 0 + -0.0155070303007960 + -0.5514515042304993 + 0.0128211602568626 + <_> + + <_> + + + + <_>5 14 15 6 -1. + <_>10 14 5 6 3. + 0 + -0.0192487090826035 + 0.0526886284351349 + -0.0303649902343750 + <_> + + <_> + + + + <_>0 9 7 6 -1. + <_>0 11 7 2 3. + 0 + -0.0175560303032398 + -0.3324734866619110 + 0.0187803804874420 + <_> + + <_> + + + + <_>5 14 15 6 -1. + <_>10 14 5 6 3. + 0 + 0.0193243809044361 + -0.0324584618210793 + 0.0949869975447655 + <_> + + <_> + + + + <_>0 14 15 6 -1. + <_>5 14 5 6 3. + 0 + -0.0203671604394913 + 0.1134840026497841 + -0.0584348216652870 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + 5.1770661957561970e-003 + 0.0470305606722832 + -0.0849603265523911 + <_> + + <_> + + + + <_>2 4 4 14 -1. + <_>2 4 2 7 2. + <_>4 11 2 7 2. + 0 + 1.9768481142818928e-003 + -0.0707941427826881 + 0.1037515029311180 + <_> + + <_> + + + + <_>11 1 6 12 -1. + <_>14 1 3 6 2. + <_>11 7 3 6 2. + 0 + -7.0216279709711671e-004 + 0.0307817291468382 + -0.1017082035541534 + <_> + + <_> + + + + <_>3 1 6 12 -1. + <_>3 1 3 6 2. + <_>6 7 3 6 2. + 0 + -2.4710369762033224e-003 + 0.0515776202082634 + -0.1192080974578857 + <_> + + <_> + + + + <_>4 7 15 6 -1. + <_>9 7 5 6 3. + 0 + 0.0232785400003195 + 0.0301915705204010 + -0.0939378887414932 + <_> + + <_> + + + + <_>1 0 6 10 -1. + <_>1 0 3 5 2. + <_>4 5 3 5 2. + 0 + 0.0136738196015358 + -0.0267589595168829 + 0.2401420027017593 + <_> + + <_> + + + + <_>8 13 9 5 -1. + <_>11 13 3 5 3. + 0 + -8.3967903628945351e-003 + -0.0504037700593472 + 0.0223681107163429 + <_> + + <_> + + + + <_>0 0 9 7 -1. + <_>3 0 3 7 3. + 0 + 0.0478784702718258 + -0.0237580500543118 + 0.2648639082908630 + <_> + + <_> + + + + <_>9 7 8 5 -1. + <_>9 7 4 5 2. + 0 + -0.0224835202097893 + -0.2304278016090393 + 0.0128406798467040 + <_> + + <_> + + + + <_>3 7 8 5 -1. + <_>7 7 4 5 2. + 0 + -0.0108839897438884 + -0.1838018000125885 + 0.0326397083699703 + <_> + + <_> + + + + <_>4 0 12 19 -1. + <_>8 0 4 19 3. + 0 + -0.0449019894003868 + 0.2419596016407013 + -0.0265072807669640 + <_> + + <_> + + + + <_>3 8 8 6 -1. + <_>7 8 4 6 2. + 0 + -0.0830429270863533 + -0.8049132823944092 + 7.5420029461383820e-003 + <_> + + <_> + + + + <_>15 2 5 6 -1. + <_>15 5 5 3 2. + 0 + -3.7240530364215374e-003 + -0.0802282392978668 + 0.0315844714641571 + <_> + + <_> + + + + <_>3 1 13 10 -1. + <_>3 6 13 5 2. + 0 + -7.3502189479768276e-003 + 0.0689622312784195 + -0.0973912477493286 + <_> + + <_> + + + + <_>14 0 3 10 -1. + <_>14 5 3 5 2. + 0 + 5.5313981138169765e-003 + -0.0301807206124067 + 0.0601748004555702 + <_> + + <_> + + + + <_>0 1 20 8 -1. + <_>0 1 10 4 2. + <_>10 5 10 4 2. + 0 + 0.0172930806875229 + 0.0407321006059647 + -0.1560066044330597 + <_> + + <_> + + + + <_>8 6 6 12 -1. + <_>11 6 3 6 2. + <_>8 12 3 6 2. + 0 + -3.3298740163445473e-003 + 0.0410010889172554 + -0.0769090279936790 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + -4.9308240413665771e-003 + 0.1703153997659683 + -0.0405822396278381 + <_> + + <_> + + + + <_>8 10 6 10 -1. + <_>10 10 2 10 3. + 0 + 8.6011141538619995e-003 + 0.0316569209098816 + -0.1405003964900971 + <_> + + <_> + + + + <_>9 3 2 14 -1. + <_>9 10 2 7 2. + 0 + 0.0136743402108550 + -0.0218457095324993 + 0.3012866079807282 + <_> + + <_> + + + + <_>11 1 4 18 -1. + <_>11 1 2 18 2. + 0 + -0.0113754197955132 + -0.1568734049797058 + 0.0282560195773840 + <_> + + <_> + + + + <_>5 1 4 18 -1. + <_>7 1 2 18 2. + 0 + -4.2750681750476360e-003 + -0.1215597018599510 + 0.0501467995345593 + <_> + + <_> + + + + <_>7 1 8 5 -1. + <_>7 1 4 5 2. + 0 + 0.0164847597479820 + -0.0365578904747963 + 0.1258372962474823 + <_> + + <_> + + + + <_>5 5 6 8 -1. + <_>7 5 2 8 3. + 0 + -0.0390569008886814 + 0.2405312955379486 + -0.0269838906824589 + <_> + + <_> + + + + <_>12 9 7 6 -1. + <_>12 11 7 2 3. + 0 + -5.7546719908714294e-003 + -0.1333768069744110 + 0.0202660206705332 + <_> + + <_> + + + + <_>1 9 7 6 -1. + <_>1 11 7 2 3. + 0 + 5.1583289168775082e-003 + 0.0646663904190063 + -0.1142849996685982 + <_> + + <_> + + + + <_>9 10 7 4 -1. + <_>9 12 7 2 2. + 0 + -3.0463270377367735e-003 + 0.0450186803936958 + -0.0815735906362534 + <_> + + <_> + + + + <_>0 2 5 9 -1. + <_>0 5 5 3 3. + 0 + 7.4743861332535744e-003 + 0.0312467105686665 + -0.1892973035573959 + <_> + + <_> + + + + <_>10 2 6 9 -1. + <_>10 5 6 3 3. + 0 + 1.6480450285598636e-003 + -0.0258950404822826 + 0.1865288019180298 + <_> + + <_> + + + + <_>0 1 18 6 -1. + <_>0 1 9 3 2. + <_>9 4 9 3 2. + 0 + 4.5184311456978321e-003 + 0.0548034682869911 + -0.1044400036334992 + <_> + + <_> + + + + <_>5 6 14 3 -1. + <_>5 7 14 1 3. + 0 + -3.3209871035069227e-003 + 0.0439594015479088 + -0.0812404826283455 + <_> + + <_> + + + + <_>0 12 6 5 -1. + <_>3 12 3 5 2. + 0 + 5.2665979601442814e-003 + -0.0448534712195396 + 0.1134390980005264 + <_> + + <_> + + + + <_>10 10 9 6 -1. + <_>13 10 3 6 3. + 0 + -4.7867707908153534e-003 + 0.0763190090656281 + -0.0285511706024408 + <_> + + <_> + + + + <_>0 9 5 9 -1. + <_>0 12 5 3 3. + 0 + -0.0447101183235645 + -0.3479571938514710 + 0.0149282300844789 + <_> + + <_> + + + + <_>8 0 8 19 -1. + <_>8 0 4 19 2. + 0 + 4.3861730955541134e-003 + 0.0745409503579140 + -0.0462980717420578 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + 9.2240851372480392e-003 + -0.0586261786520481 + 0.0986934080719948 + <_> + + <_> + + + + <_>9 0 2 13 -1. + <_>9 0 1 13 2. + 0 + -1.1849260190501809e-003 + 0.1002314016222954 + -0.0567296408116817 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -0.0185465402901173 + -0.3823617100715637 + 0.0151415299624205 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + 3.4743950236588717e-003 + 0.0265239104628563 + -0.1128982976078987 + <_> + + <_> + + + + <_>3 9 9 6 -1. + <_>6 9 3 6 3. + 0 + 0.1027401983737946 + -6.6097700037062168e-003 + 0.7756177783012390 + <_> + + <_> + + + + <_>6 4 12 14 -1. + <_>10 4 4 14 3. + 0 + 0.2047939002513886 + 6.9657550193369389e-003 + -0.3598898053169251 + <_> + + <_> + + + + <_>2 4 12 14 -1. + <_>6 4 4 14 3. + 0 + 0.1209406033158302 + 0.0181744508445263 + -0.3353117108345032 + <_> + + <_> + + + + <_>7 1 8 5 -1. + <_>7 1 4 5 2. + 0 + 0.0122242299839854 + -0.0314540490508080 + 0.0790049731731415 + <_> + + <_> + + + + <_>4 0 8 19 -1. + <_>8 0 4 19 2. + 0 + 0.1517646014690399 + -0.0108266696333885 + 0.4558309018611908 + <_> + + <_> + + + + <_>8 13 9 5 -1. + <_>11 13 3 5 3. + 0 + -0.0996921509504318 + -0.3542217910289764 + 3.1256359070539474e-003 + <_> + + <_> + + + + <_>3 13 9 5 -1. + <_>6 13 3 5 3. + 0 + -6.3465638086199760e-003 + -0.1109881997108460 + 0.0537353083491325 + <_> + + <_> + + + + <_>4 1 12 4 -1. + <_>8 1 4 4 3. + 0 + -6.7007602192461491e-003 + 0.1891009062528610 + -0.0309301596134901 + <_> + + <_> + + + + <_>1 2 8 18 -1. + <_>1 2 4 9 2. + <_>5 11 4 9 2. + 0 + -0.1010119989514351 + 0.2376350015401840 + -0.0222139693796635 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.0461110211908817 + -0.0375433303415775 + 0.0487337596714497 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1414680927991867 + 0.0111480196937919 + -0.5147436261177063 + <_> + + <_> + + + + <_>11 11 6 9 -1. + <_>11 14 6 3 3. + 0 + -0.0113944998010993 + -0.0708243027329445 + 0.0317593701183796 + <_> + + <_> + + + + <_>3 11 6 9 -1. + <_>3 14 6 3 3. + 0 + 3.1667309813201427e-003 + 0.0411772802472115 + -0.1490058004856110 + <_> + + <_> + + + + <_>8 14 10 6 -1. + <_>13 14 5 3 2. + <_>8 17 5 3 2. + 0 + 8.9959725737571716e-003 + -0.0411865115165710 + 0.0728167816996574 + <_> + + <_> + + + + <_>7 13 6 7 -1. + <_>9 13 2 7 3. + 0 + -0.0615592710673809 + -0.7393764257431030 + 6.6859079524874687e-003 + <_> + + <_> + + + + <_>9 5 7 6 -1. + <_>9 7 7 2 3. + 0 + -3.5607949830591679e-003 + 0.0132605098187923 + -0.0611508190631866 + <_> + + <_> + + + + <_>4 5 7 6 -1. + <_>4 7 7 2 3. + 0 + -0.1247633993625641 + -0.7858049869537354 + 6.2701301649212837e-003 + <_> + + <_> + + + + <_>3 0 17 16 -1. + <_>3 8 17 8 2. + 0 + 0.6273918747901917 + 3.5465341061353683e-003 + -0.7336381077766419 + <_> + + <_> + + + + <_>0 0 19 3 -1. + <_>0 1 19 1 3. + 0 + 0.0342191606760025 + 8.2031572237610817e-003 + -0.5333021283149719 + <_> + + <_> + + + + <_>11 1 5 9 -1. + <_>11 4 5 3 3. + 0 + 1.0574149928288534e-004 + -0.0503547005355358 + 0.0470194891095161 + <_> + + <_> + + + + <_>4 1 10 6 -1. + <_>4 4 10 3 2. + 0 + -0.0321122892200947 + 0.1708530038595200 + -0.0347341410815716 + <_> + + <_> + + + + <_>7 10 12 9 -1. + <_>7 13 12 3 3. + 0 + -0.0161408390849829 + -0.0647530928254128 + 0.0569431111216545 + <_> + + <_> + + + + <_>1 10 12 3 -1. + <_>7 10 6 3 2. + 0 + 0.0197372809052467 + -0.0180651806294918 + 0.2618342041969299 + <_> + + <_> + + + + <_>7 8 6 12 -1. + <_>10 8 3 6 2. + <_>7 14 3 6 2. + 0 + 0.0278954505920410 + 0.0176410600543022 + -0.3095115125179291 + <_> + + <_> + + + + <_>2 14 10 6 -1. + <_>2 14 5 3 2. + <_>7 17 5 3 2. + 0 + 3.5123159177601337e-003 + -0.0834470689296722 + 0.0650159716606140 + <_> + + <_> + + + + <_>6 9 8 8 -1. + <_>10 9 4 4 2. + <_>6 13 4 4 2. + 0 + -4.4775637798011303e-003 + -0.1242344975471497 + 0.0470611192286015 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>7 17 6 3 3. + 0 + -6.1348858289420605e-003 + 0.1024826988577843 + -0.0597009584307671 + <_> + + <_> + + + + <_>6 6 10 6 -1. + <_>11 6 5 3 2. + <_>6 9 5 3 2. + 0 + 0.0140479598194361 + 0.0148333795368671 + -0.1122959032654762 + <_> + + <_> + + + + <_>4 6 10 6 -1. + <_>4 6 5 3 2. + <_>9 9 5 3 2. + 0 + 1.1907520238310099e-003 + 0.0499866902828217 + -0.1169629022479057 + <_> + + <_> + + + + <_>6 14 9 5 -1. + <_>9 14 3 5 3. + 0 + 0.0176173895597458 + -0.0176877006888390 + 0.1541609019041061 + <_> + + <_> + + + + <_>6 10 6 10 -1. + <_>8 10 2 10 3. + 0 + -4.9166870303452015e-003 + -0.1022718027234078 + 0.0469943918287754 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -3.9010820910334587e-003 + 0.1422944962978363 + -0.0453127995133400 + <_> + + <_> + + + + <_>8 8 4 7 -1. + <_>10 8 2 7 2. + 0 + -1.7458139918744564e-003 + -0.1085309013724327 + 0.0756895616650581 + <_> + + <_> + + + + <_>8 10 8 4 -1. + <_>8 12 8 2 2. + 0 + -1.2748650042340159e-003 + 0.0223845206201077 + -0.0751505270600319 + <_> + + <_> + + + + <_>0 0 10 9 -1. + <_>0 3 10 3 3. + 0 + -0.0791095569729805 + 0.4877392947673798 + -9.6941655501723289e-003 + <_> + + <_> + + + + <_>9 1 8 4 -1. + <_>9 3 8 2 2. + 0 + -0.0141032701358199 + -0.2326368987560272 + 0.0150915598496795 + <_> + + <_> + + + + <_>4 5 5 6 -1. + <_>4 8 5 3 2. + 0 + -2.2076119203120470e-003 + 0.1926839947700501 + -0.0254290606826544 + <_> + + <_> + + + + <_>8 6 9 4 -1. + <_>8 8 9 2 2. + 0 + 0.0396260581910610 + -0.0156307592988014 + 0.1227002963423729 + <_> + + <_> + + + + <_>0 0 3 13 -1. + <_>1 0 1 13 3. + 0 + -7.8973636846058071e-005 + -0.0732576474547386 + 0.0658486932516098 + <_> + + <_> + + + + <_>13 1 6 11 -1. + <_>15 1 2 11 3. + 0 + 5.1964947488158941e-004 + -0.1136638000607491 + 0.0811334922909737 + <_> + + <_> + + + + <_>1 1 6 11 -1. + <_>3 1 2 11 3. + 0 + -1.1722079943865538e-003 + -0.0976026430726051 + 0.0598395690321922 + <_> + + <_> + + + + <_>11 0 6 5 -1. + <_>11 0 3 5 2. + 0 + 3.9326730184257030e-003 + -0.0570261515676975 + 0.0422261282801628 + <_> + + <_> + + + + <_>4 2 6 17 -1. + <_>6 2 2 17 3. + 0 + -0.0873861536383629 + -0.3789604902267456 + 0.0128692798316479 + <_> + + <_> + + + + <_>8 12 8 8 -1. + <_>12 12 4 4 2. + <_>8 16 4 4 2. + 0 + -0.0213240403681993 + 0.3088644146919251 + -0.0177342407405376 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -2.3385910317301750e-003 + -0.1132232025265694 + 0.0439149402081966 + <_> + + <_> + + + + <_>6 6 8 4 -1. + <_>6 8 8 2 2. + 0 + 1.5183660434558988e-003 + -0.1433762013912201 + 0.0394417084753513 + <_> + + <_> + + + + <_>2 10 9 6 -1. + <_>2 13 9 3 2. + 0 + -0.1108551993966103 + 0.7403758764266968 + -6.7982021719217300e-003 + <_> + + <_> + + + + <_>9 11 11 6 -1. + <_>9 14 11 3 2. + 0 + -0.0100091202184558 + -0.0392032302916050 + 0.0317492112517357 + <_> + + <_> + + + + <_>3 11 14 8 -1. + <_>3 11 7 4 2. + <_>10 15 7 4 2. + 0 + -0.0209164302796125 + 0.1892773061990738 + -0.0304902307689190 + <_> + + <_> + + + + <_>8 4 4 10 -1. + <_>8 9 4 5 2. + 0 + 7.4165337719023228e-003 + 0.0467974506318569 + -0.1111361011862755 + <_> + + <_> + + + + <_>1 12 13 3 -1. + <_>1 13 13 1 3. + 0 + 3.3599510788917542e-003 + -0.0452549904584885 + 0.1150840967893601 + <_> + + <_> + + + + <_>9 7 4 12 -1. + <_>9 11 4 4 3. + 0 + -5.7189498329535127e-004 + -0.0634720772504807 + 0.0520499497652054 + <_> + + <_> + + + + <_>0 14 7 6 -1. + <_>0 17 7 3 2. + 0 + -0.0681202933192253 + 0.5080602765083313 + -9.5091843977570534e-003 + <_> + + <_> + + + + <_>13 11 7 6 -1. + <_>13 13 7 2 3. + 0 + 2.5180799420922995e-003 + 0.0553053207695484 + -0.1440276950597763 + <_> + + <_> + + + + <_>4 4 12 16 -1. + <_>4 12 12 8 2. + 0 + 0.0560552515089512 + -0.0233591701835394 + 0.2193540036678314 + <_> + + <_> + + + + <_>11 10 9 4 -1. + <_>11 12 9 2 2. + 0 + -0.0403867103159428 + -0.1918344050645828 + 7.8779058530926704e-003 + <_> + + <_> + + + + <_>0 10 9 4 -1. + <_>0 12 9 2 2. + 0 + 3.1857648864388466e-003 + 0.0276057794690132 + -0.2008430957794190 + <_> + + <_> + + + + <_>2 11 16 6 -1. + <_>2 14 16 3 2. + 0 + 0.0251595508307219 + 0.0112656997516751 + -0.4362818002700806 + <_> + + <_> + + + + <_>0 7 2 13 -1. + <_>1 7 1 13 2. + 0 + -2.7010419871658087e-003 + 0.1133650019764900 + -0.0469042696058750 + <_> + + <_> + + + + <_>7 0 6 7 -1. + <_>9 0 2 7 3. + 0 + -0.0300568901002407 + -0.6236873269081116 + 7.3214052245020866e-003 + <_> + + <_> + + + + <_>0 11 12 4 -1. + <_>4 11 4 4 3. + 0 + -0.1208802014589310 + -0.8642836809158325 + 4.3813590891659260e-003 + <_> + + <_> + + + + <_>11 9 6 8 -1. + <_>13 9 2 8 3. + 0 + 4.0104859508574009e-003 + -0.0534716509282589 + 0.0711138024926186 + <_> + + <_> + + + + <_>3 9 6 8 -1. + <_>5 9 2 8 3. + 0 + -2.9688570648431778e-003 + 0.1007663011550903 + -0.0492339283227921 + <_> + + <_> + + + + <_>11 0 2 19 -1. + <_>11 0 1 19 2. + 0 + -3.7600689101964235e-003 + -0.2092870026826859 + 0.0265496801584959 + <_> + + <_> + + + + <_>5 10 4 8 -1. + <_>7 10 2 8 2. + 0 + -1.5982619952410460e-003 + 0.0610701888799667 + -0.0796235725283623 + <_> + + <_> + + + + <_>13 14 7 6 -1. + <_>13 16 7 2 3. + 0 + 5.4285880178213120e-003 + 0.0397665798664093 + -0.1174684986472130 + <_> + + <_> + + + + <_>1 15 13 3 -1. + <_>1 16 13 1 3. + 0 + 1.0872900020331144e-003 + -0.0645962283015251 + 0.0749644264578819 + <_> + + <_> + + + + <_>5 15 13 3 -1. + <_>5 16 13 1 3. + 0 + -2.8442030306905508e-003 + 0.1173835024237633 + -0.0401594005525112 + <_> + + <_> + + + + <_>4 16 9 4 -1. + <_>4 18 9 2 2. + 0 + 0.0355461016297340 + 0.0121949696913362 + -0.4218482077121735 + <_> + + <_> + + + + <_>7 13 7 6 -1. + <_>7 15 7 2 3. + 0 + -0.0485429503023624 + 0.3129276931285858 + -0.0127738304436207 + <_> + + <_> + + + + <_>3 14 14 4 -1. + <_>3 14 7 2 2. + <_>10 16 7 2 2. + 0 + -0.0307321008294821 + -0.5063123703002930 + 0.0106007298454642 + <_> + + <_> + + + + <_>13 0 7 14 -1. + <_>13 7 7 7 2. + 0 + 0.0130669297650456 + -0.0500031188130379 + 0.0440059304237366 + <_> + + <_> + + + + <_>0 0 7 14 -1. + <_>0 7 7 7 2. + 0 + 0.2920064032077789 + 5.3693680092692375e-003 + -0.8903915882110596 + <_> + + <_> + + + + <_>3 2 16 4 -1. + <_>3 2 8 4 2. + 0 + -8.7579451501369476e-003 + 0.0966667309403419 + -0.0313106589019299 + <_> + + <_> + + + + <_>6 2 4 8 -1. + <_>6 6 4 4 2. + 0 + -2.3599369451403618e-003 + 0.0430462807416916 + -0.1099243015050888 + <_> + + <_> + + + + <_>10 0 3 14 -1. + <_>10 7 3 7 2. + 0 + 6.9077489897608757e-003 + -0.0291741602122784 + 0.0891748964786530 + <_> + + <_> + + + + <_>1 7 18 9 -1. + <_>1 10 18 3 3. + 0 + 0.0208496898412704 + 0.1261470019817352 + -0.0443581007421017 + <_> + + <_> + + + + <_>6 5 9 14 -1. + <_>9 5 3 14 3. + 0 + -0.0588464215397835 + 0.2166150063276291 + -8.7285088375210762e-003 + <_> + + <_> + + + + <_>5 5 9 14 -1. + <_>8 5 3 14 3. + 0 + 2.5576311163604259e-003 + -0.1164821013808250 + 0.0547560192644596 + <_> + + <_> + + + + <_>11 2 2 15 -1. + <_>11 2 1 15 2. + 0 + 3.8973900955170393e-003 + 0.0357594899833202 + -0.0978685617446899 + <_> + + <_> + + + + <_>6 8 4 8 -1. + <_>8 8 2 8 2. + 0 + -1.2494160328060389e-003 + 0.0913479626178741 + -0.0578171797096729 + <_> + + <_> + + + + <_>6 10 10 9 -1. + <_>6 13 10 3 3. + 0 + 3.4928850363940001e-003 + 0.0206342209130526 + -0.1449493020772934 + <_> + + <_> + + + + <_>0 16 14 4 -1. + <_>0 16 7 2 2. + <_>7 18 7 2 2. + 0 + -0.0113785099238157 + 0.2120326012372971 + -0.0241508502513170 + <_> + + <_> + + + + <_>9 5 4 13 -1. + <_>9 5 2 13 2. + 0 + -0.0440604500472546 + 0.4226736128330231 + -4.7765900380909443e-003 + <_> + + <_> + + + + <_>4 11 12 4 -1. + <_>8 11 4 4 3. + 0 + -8.3084795624017715e-003 + -0.0849286466836929 + 0.0602280907332897 + <_> + + <_> + + + + <_>6 17 14 2 -1. + <_>6 17 7 2 2. + 0 + -9.1945994645357132e-003 + 0.0723187029361725 + -0.0204722601920366 + <_> + + <_> + + + + <_>0 9 14 2 -1. + <_>7 9 7 2 2. + 0 + 0.0655751079320908 + 5.0813751295208931e-003 + -0.8969318866729736 + <_> + + <_> + + + + <_>16 0 4 15 -1. + <_>16 0 2 15 2. + 0 + 0.1851042062044144 + 2.2485901135951281e-003 + -0.7512516975402832 + <_> + + <_> + + + + <_>0 0 4 10 -1. + <_>2 0 2 10 2. + 0 + -0.1760881990194321 + -0.7896922230720520 + 5.2678477950394154e-003 + <_> + + <_> + + + + <_>16 8 4 12 -1. + <_>16 12 4 4 3. + 0 + 0.0983497127890587 + 2.8081049676984549e-003 + -0.2582851946353912 + <_> + + <_> + + + + <_>0 8 4 12 -1. + <_>0 12 4 4 3. + 0 + -1.8191979324910790e-004 + -0.0862061008810997 + 0.0522947981953621 + <_> + + <_> + + + + <_>12 12 8 6 -1. + <_>12 14 8 2 3. + 0 + -5.2928649820387363e-003 + -0.0546002388000488 + 0.0283046308904886 + <_> + + <_> + + + + <_>0 12 8 6 -1. + <_>0 14 8 2 3. + 0 + 1.1537299724295735e-003 + 0.0466841682791710 + -0.1123477965593338 + <_> + + <_> + + + + <_>9 5 4 14 -1. + <_>11 5 2 7 2. + <_>9 12 2 7 2. + 0 + -3.8274680264294147e-003 + 0.0601455084979534 + -0.0823711007833481 + <_> + + <_> + + + + <_>0 11 11 6 -1. + <_>0 14 11 3 2. + 0 + -0.0869578570127487 + -0.4836303889751434 + 9.2326821759343147e-003 + <_> + + <_> + + + + <_>5 15 12 5 -1. + <_>9 15 4 5 3. + 0 + -2.4195960722863674e-003 + -0.0352211408317089 + 0.0270817093551159 + <_> + + <_> + + + + <_>6 6 6 12 -1. + <_>6 6 3 6 2. + <_>9 12 3 6 2. + 0 + -4.7905668616294861e-003 + 0.0589552000164986 + -0.0787481367588043 + <_> + + <_> + + + + <_>7 7 8 4 -1. + <_>7 7 4 4 2. + 0 + -4.0910490788519382e-003 + -0.1755093932151794 + 0.0264547299593687 + <_> + + <_> + + + + <_>5 8 6 10 -1. + <_>5 8 3 5 2. + <_>8 13 3 5 2. + 0 + 2.5641750544309616e-003 + -0.0368148311972618 + 0.1514022946357727 + <_> + + <_> + + + + <_>7 4 7 14 -1. + <_>7 11 7 7 2. + 0 + 5.4726968519389629e-003 + 0.0312435794621706 + -0.0978909581899643 + <_> + + <_> + + + + <_>7 6 4 8 -1. + <_>7 10 4 4 2. + 0 + -1.0310260113328695e-003 + -0.1242405027151108 + 0.0403650291264057 + <_> + + <_> + + + + <_>9 2 6 9 -1. + <_>9 5 6 3 3. + 0 + -0.1303016990423203 + 0.1710616946220398 + -6.9856629706919193e-003 + <_> + + <_> + + + + <_>5 2 6 9 -1. + <_>5 5 6 3 3. + 0 + 3.5753389820456505e-003 + -0.0254371296614408 + 0.2196757048368454 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + 8.4238024428486824e-003 + 0.0295823998749256 + -0.1739009022712708 + <_> + + <_> + + + + <_>8 1 4 11 -1. + <_>10 1 2 11 2. + 0 + 0.0411546491086483 + -0.0132654998451471 + 0.3628241121768951 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>10 1 9 2 2. + <_>1 3 9 2 2. + 0 + -0.0186207592487335 + -0.2280678004026413 + 0.0215025693178177 + <_> + + <_> + + + + <_>3 4 4 16 -1. + <_>3 4 2 8 2. + <_>5 12 2 8 2. + 0 + 0.0233076196163893 + -0.0230477601289749 + 0.2320867031812668 + <_> + + <_> + + + + <_>8 12 6 8 -1. + <_>10 12 2 8 3. + 0 + 0.0465182997286320 + 0.0105854002758861 + -0.4607670009136200 + <_> + + <_> + + + + <_>0 3 6 7 -1. + <_>2 3 2 7 3. + 0 + -0.0834994018077850 + 0.3784511983394623 + -0.0141057400032878 + <_> + + <_> + + + + <_>14 2 6 9 -1. + <_>14 5 6 3 3. + 0 + -0.0968970134854317 + -0.3299584984779358 + 6.2883920036256313e-003 + <_> + + <_> + + + + <_>0 2 7 9 -1. + <_>0 5 7 3 3. + 0 + 6.9753699935972691e-003 + 0.0245936308056116 + -0.2100367993116379 + <_> + + <_> + + + + <_>16 0 3 13 -1. + <_>17 0 1 13 3. + 0 + -0.0338599495589733 + 0.1892790049314499 + -8.7296841666102409e-003 + <_> + + <_> + + + + <_>1 0 3 13 -1. + <_>2 0 1 13 3. + 0 + 1.0354740079492331e-003 + -0.0644933432340622 + 0.0801922902464867 + <_> + + <_> + + + + <_>6 7 12 7 -1. + <_>6 7 6 7 2. + 0 + 0.0399506613612175 + 0.0250730402767658 + -0.1163693964481354 + <_> + + <_> + + + + <_>5 3 6 10 -1. + <_>5 3 3 5 2. + <_>8 8 3 5 2. + 0 + 3.0460350681096315e-003 + -0.0337549410760403 + 0.1332425028085709 + <_> + + <_> + + + + <_>6 9 9 5 -1. + <_>9 9 3 5 3. + 0 + -1.5341850230470300e-003 + 0.0624428614974022 + -0.0560610704123974 + <_> + + <_> + + + + <_>3 0 6 5 -1. + <_>6 0 3 5 2. + 0 + 2.0531520713120699e-003 + -0.0847900435328484 + 0.0534080490469933 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + 2.1295580081641674e-003 + 0.0406503193080425 + -0.1112471967935562 + <_> + + <_> + + + + <_>4 4 12 8 -1. + <_>4 4 6 4 2. + <_>10 8 6 4 2. + 0 + -0.0154620297253132 + 0.1380697935819626 + -0.0339442081749439 + <_> + + <_> + + + + <_>8 8 10 6 -1. + <_>13 8 5 3 2. + <_>8 11 5 3 2. + 0 + -0.0278782397508621 + -0.1002539992332459 + 0.0134448800235987 + <_> + + <_> + + + + <_>2 8 10 6 -1. + <_>2 8 5 3 2. + <_>7 11 5 3 2. + 0 + 0.0172556806355715 + 0.0153617896139622 + -0.3693079948425293 + <_> + + <_> + + + + <_>9 5 8 14 -1. + <_>13 5 4 7 2. + <_>9 12 4 7 2. + 0 + -0.0178705006837845 + 0.0528707988560200 + -0.0251080095767975 + <_> + + <_> + + + + <_>3 0 3 13 -1. + <_>4 0 1 13 3. + 0 + -0.0144439199939370 + -0.2276381999254227 + 0.0203916095197201 + <_> + + <_> + + + + <_>6 14 9 5 -1. + <_>9 14 3 5 3. + 0 + -8.3497241139411926e-003 + -0.0870558172464371 + 0.0327079407870770 + <_> + + <_> + + + + <_>1 6 4 14 -1. + <_>1 6 2 7 2. + <_>3 13 2 7 2. + 0 + 0.0275143198668957 + -0.0206284094601870 + 0.2597712874412537 + <_> + + <_> + + + + <_>9 6 8 8 -1. + <_>13 6 4 4 2. + <_>9 10 4 4 2. + 0 + 0.0186101198196411 + -8.0523788928985596e-003 + 0.1692509055137634 + <_> + + <_> + + + + <_>0 4 4 8 -1. + <_>2 4 2 8 2. + 0 + -0.0957860499620438 + -0.5011662840843201 + 8.7666641920804977e-003 + <_> + + <_> + + + + <_>9 5 8 14 -1. + <_>13 5 4 7 2. + <_>9 12 4 7 2. + 0 + 0.1203697994351387 + 9.8632962908595800e-004 + -1.0000280141830444 + <_> + + <_> + + + + <_>3 6 8 8 -1. + <_>3 6 4 4 2. + <_>7 10 4 4 2. + 0 + 0.0247825793921947 + -0.0125197097659111 + 0.3591960966587067 + <_> + + <_> + + + + <_>11 3 6 10 -1. + <_>14 3 3 5 2. + <_>11 8 3 5 2. + 0 + -0.0503538288176060 + -0.3334051966667175 + 6.9066900759935379e-003 + <_> + + <_> + + + + <_>3 3 6 10 -1. + <_>3 3 3 5 2. + <_>6 8 3 5 2. + 0 + 0.0312980599701405 + 0.0109631195664406 + -0.4064522087574005 + <_> + + <_> + + + + <_>11 0 8 10 -1. + <_>15 0 4 5 2. + <_>11 5 4 5 2. + 0 + 7.4575231410562992e-003 + -0.0212076008319855 + 0.1316742002964020 + <_> + + <_> + + + + <_>3 13 13 3 -1. + <_>3 14 13 1 3. + 0 + 5.5791479535400867e-003 + -0.0340980701148510 + 0.1298383027315140 + <_> + + <_> + + + + <_>5 14 13 3 -1. + <_>5 15 13 1 3. + 0 + 5.9088319540023804e-003 + -0.0269406698644161 + 0.1683945953845978 + <_> + + <_> + + + + <_>0 4 4 12 -1. + <_>0 8 4 4 3. + 0 + 0.0175433605909348 + 0.0423763692378998 + -0.1235039979219437 + <_> + + <_> + + + + <_>4 8 16 6 -1. + <_>12 8 8 3 2. + <_>4 11 8 3 2. + 0 + -9.6103046089410782e-003 + 0.0522239208221436 + -0.0255825594067574 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 2.0607879851013422e-003 + 0.0401741303503513 + -0.1054807975888252 + <_> + + <_> + + + + <_>12 11 7 6 -1. + <_>12 13 7 2 3. + 0 + -5.3874161094427109e-003 + -0.0649955794215202 + 0.0278071407228708 + <_> + + <_> + + + + <_>2 9 9 7 -1. + <_>5 9 3 7 3. + 0 + 0.1110230982303619 + -4.9670711159706116e-003 + 0.8171892166137695 + <_> + + <_> + + + + <_>5 6 15 9 -1. + <_>5 9 15 3 3. + 0 + -0.0373741500079632 + -0.6261141896247864 + 3.0927599873393774e-003 + <_> + + <_> + + + + <_>0 6 15 9 -1. + <_>0 9 15 3 3. + 0 + 5.0286632031202316e-003 + 0.2497866004705429 + -0.0181511007249355 + <_> + + <_> + + + + <_>6 8 14 2 -1. + <_>6 9 14 1 2. + 0 + 2.9225579928606749e-003 + -0.0605768188834190 + 0.0264973398298025 + <_> + + <_> + + + + <_>3 8 10 3 -1. + <_>8 8 5 3 2. + 0 + -0.0542966201901436 + -0.5799043774604797 + 6.5989522263407707e-003 + <_> + + <_> + + + + <_>11 0 9 5 -1. + <_>14 0 3 5 3. + 0 + 0.0129967201501131 + -0.0261282604187727 + 0.0970306098461151 + <_> + + <_> + + + + <_>2 6 16 2 -1. + <_>10 6 8 2 2. + 0 + 0.0330012291669846 + 0.0149604799225926 + -0.3230465948581696 + <_> + + <_> + + + + <_>5 12 12 8 -1. + <_>5 12 6 8 2. + 0 + -0.1166044995188713 + 0.2572514116764069 + -0.0126258302479982 + <_> + + <_> + + + + <_>0 3 18 3 -1. + <_>0 4 18 1 3. + 0 + 0.0707063376903534 + 7.0192231796681881e-003 + -0.6926059126853943 + <_> + + <_> + + + + <_>3 15 14 4 -1. + <_>10 15 7 2 2. + <_>3 17 7 2 2. + 0 + -0.0445499494671822 + -0.7113422155380249 + 4.9668429419398308e-003 + <_> + + <_> + + + + <_>2 7 16 2 -1. + <_>2 8 16 1 2. + 0 + 0.0428738184273243 + 6.7160711623728275e-003 + -0.5266085267066956 + <_> + + <_> + + + + <_>10 2 7 6 -1. + <_>10 4 7 2 3. + 0 + 0.0250252801924944 + -0.0184454098343849 + 0.0787932202219963 + <_> + + <_> + + + + <_>0 10 19 2 -1. + <_>0 11 19 1 2. + 0 + 2.1663550287485123e-003 + 0.0325403101742268 + -0.1311504989862442 + <_> + + <_> + + + + <_>13 0 7 18 -1. + <_>13 9 7 9 2. + 0 + 0.0255400408059359 + -0.0346935689449310 + 0.0414047986268997 + <_> + + <_> + + + + <_>1 9 9 5 -1. + <_>4 9 3 5 3. + 0 + -0.0836275070905685 + -0.5214344263076782 + 7.7060810290277004e-003 + <_> + + <_> + + + + <_>18 0 2 17 -1. + <_>18 0 1 17 2. + 0 + 3.7637550849467516e-003 + -0.0294636301696301 + 0.0744241923093796 + <_> + + <_> + + + + <_>0 0 2 16 -1. + <_>1 0 1 16 2. + 0 + 3.7175719626247883e-003 + -0.0421230010688305 + 0.1028700992465019 + <_> + + <_> + + + + <_>7 1 6 10 -1. + <_>10 1 3 5 2. + <_>7 6 3 5 2. + 0 + -5.2892807871103287e-003 + -0.1234839037060738 + 0.0371527001261711 + <_> + + <_> + + + + <_>0 9 12 11 -1. + <_>4 9 4 11 3. + 0 + -9.1878473758697510e-003 + 0.0902567505836487 + -0.0526740513741970 + <_> + + <_> + + + + <_>10 2 4 16 -1. + <_>10 2 2 16 2. + 0 + -0.0554489195346832 + -0.5363965034484863 + 2.6584670413285494e-003 + <_> + + <_> + + + + <_>6 2 4 16 -1. + <_>8 2 2 16 2. + 0 + 6.4754108898341656e-003 + 0.0553673505783081 + -0.0927226319909096 + <_> + + <_> + + + + <_>9 0 3 13 -1. + <_>10 0 1 13 3. + 0 + -1.5773440245538950e-003 + 0.1357893943786621 + -0.0409117303788662 + <_> + + <_> + + + + <_>7 4 4 12 -1. + <_>9 4 2 12 2. + 0 + -4.9912789836525917e-004 + -0.1472838073968887 + 0.0536036305129528 + <_> + + <_> + + + + <_>7 9 10 9 -1. + <_>7 9 5 9 2. + 0 + 0.1569050997495651 + -7.8873159363865852e-003 + 0.3739778995513916 + <_> + + <_> + + + + <_>0 6 13 3 -1. + <_>0 7 13 1 3. + 0 + 0.0363918505609035 + 4.9765990115702152e-003 + -0.9115753173828125 + <_> + + <_> + + + + <_>10 2 7 6 -1. + <_>10 4 7 2 3. + 0 + -9.5625342801213264e-003 + 0.1276770979166031 + -0.0143946800380945 + <_> + + <_> + + + + <_>4 2 11 6 -1. + <_>4 4 11 2 3. + 0 + 2.4007901083678007e-003 + -0.1310738027095795 + 0.0447314791381359 + <_> + + <_> + + + + <_>9 1 8 4 -1. + <_>9 3 8 2 2. + 0 + 3.2929850276559591e-003 + 0.0404286310076714 + -0.0532235614955425 + <_> + + <_> + + + + <_>5 5 6 10 -1. + <_>5 5 3 5 2. + <_>8 10 3 5 2. + 0 + -3.1314359512180090e-003 + 0.0368261113762856 + -0.1211315989494324 + <_> + + <_> + + + + <_>15 3 3 13 -1. + <_>16 3 1 13 3. + 0 + 0.0520083308219910 + 5.9283021837472916e-003 + -0.4385884106159210 + <_> + + <_> + + + + <_>2 3 3 13 -1. + <_>3 3 1 13 3. + 0 + 5.7681259931996465e-004 + -0.0698517709970474 + 0.0642862915992737 + <_> + + <_> + + + + <_>13 1 3 13 -1. + <_>14 1 1 13 3. + 0 + 6.1443001031875610e-003 + 0.0309080593287945 + -0.1822980940341950 + <_> + + <_> + + + + <_>4 1 10 6 -1. + <_>4 3 10 2 3. + 0 + 0.0359597206115723 + -0.0416809916496277 + 0.1424479037523270 + <_> + + <_> + + + + <_>0 2 20 8 -1. + <_>0 6 20 4 2. + 0 + -0.0212908200919628 + -0.0966623201966286 + 0.0558887496590614 + <_> + + <_> + + + + <_>2 1 13 18 -1. + <_>2 10 13 9 2. + 0 + -6.2724511371925473e-004 + 0.0901505574584007 + -0.0694307535886765 + <_> + + <_> + + + + <_>9 5 3 10 -1. + <_>9 10 3 5 2. + 0 + -2.5145700201392174e-003 + -0.0695260465145111 + 0.0455525815486908 + <_> + + <_> + + + + <_>3 6 12 14 -1. + <_>9 6 6 14 2. + 0 + 0.0578746497631073 + -0.0250365808606148 + 0.2063318043947220 + <_> + + <_> + + + + <_>8 12 6 6 -1. + <_>8 12 3 6 2. + 0 + 0.0158984698355198 + -0.0171333998441696 + 0.1100495979189873 + <_> + + <_> + + + + <_>1 9 18 3 -1. + <_>7 9 6 3 3. + 0 + 0.0278827995061874 + 0.0277131795883179 + -0.1653641015291214 + <_> + + <_> + + + + <_>2 14 18 2 -1. + <_>2 14 9 2 2. + 0 + 8.8283112272620201e-003 + -0.0274972505867481 + 0.0598228909075260 + <_> + + <_> + + + + <_>4 1 3 13 -1. + <_>5 1 1 13 3. + 0 + -0.0156799107789993 + -0.2698498964309692 + 0.0163982398808002 + <_> + + <_> + + + + <_>11 6 6 7 -1. + <_>13 6 2 7 3. + 0 + 0.0419061891734600 + -8.0525986850261688e-003 + 0.3155631124973297 + <_> + + <_> + + + + <_>3 6 6 7 -1. + <_>5 6 2 7 3. + 0 + -0.0410686098039150 + 0.2563756108283997 + -0.0183579102158546 + <_> + + <_> + + + + <_>12 0 3 13 -1. + <_>13 0 1 13 3. + 0 + 3.5570110194385052e-003 + 0.0293438304215670 + -0.1266846954822540 + <_> + + <_> + + + + <_>8 6 3 13 -1. + <_>9 6 1 13 3. + 0 + -2.1371750626713037e-003 + 0.1292326003313065 + -0.0401022098958492 + <_> + + <_> + + + + <_>8 8 5 12 -1. + <_>8 12 5 4 3. + 0 + 0.0336380898952484 + 8.1196166574954987e-003 + -0.4039478003978729 + <_> + + <_> + + + + <_>2 4 8 5 -1. + <_>6 4 4 5 2. + 0 + 0.0101829199120402 + -0.0425661802291870 + 0.1184310019016266 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -7.0302112726494670e-004 + 0.0387219786643982 + -0.0797034204006195 + <_> + + <_> + + + + <_>7 4 6 16 -1. + <_>7 4 3 8 2. + <_>10 12 3 8 2. + 0 + -2.8552680741995573e-003 + 0.0912742763757706 + -0.0616914518177509 + <_> + + <_> + + + + <_>12 0 3 13 -1. + <_>13 0 1 13 3. + 0 + -2.9935541097074747e-003 + -0.1091345027089119 + 0.0387369506061077 + <_> + + <_> + + + + <_>3 7 8 4 -1. + <_>3 9 8 2 2. + 0 + -5.3608341841027141e-004 + -0.4325248897075653 + 0.0109582701697946 + <_> + + <_> + + + + <_>4 8 16 6 -1. + <_>12 8 8 3 2. + <_>4 11 8 3 2. + 0 + 0.0514318905770779 + 4.7060111537575722e-003 + -0.2676590085029602 + <_> + + <_> + + + + <_>5 11 9 8 -1. + <_>5 15 9 4 2. + 0 + -0.0488728918135166 + 0.2014472931623459 + -0.0228445194661617 + <_> + + <_> + + + + <_>10 3 6 17 -1. + <_>12 3 2 17 3. + 0 + -0.1608044952154160 + -1. + 1.9577229395508766e-003 + <_> + + <_> + + + + <_>4 3 6 17 -1. + <_>6 3 2 17 3. + 0 + 0.0185099393129349 + 0.0178086608648300 + -0.2787115871906281 + <_> + + <_> + + + + <_>5 6 10 3 -1. + <_>5 6 5 3 2. + 0 + -0.0421069487929344 + -0.6249315738677979 + 7.0520970039069653e-003 + <_> + + <_> + + + + <_>1 16 16 2 -1. + <_>9 16 8 2 2. + 0 + -0.0970967784523964 + -0.8450583815574646 + 4.4749649241566658e-003 + <_> + + <_> + + + + <_>7 1 6 10 -1. + <_>9 1 2 10 3. + 0 + -9.4244757201522589e-004 + 0.1979676038026810 + -0.0227331202477217 + <_> + + <_> + + + + <_>5 0 3 13 -1. + <_>6 0 1 13 3. + 0 + -0.0180408097803593 + -0.3342410922050476 + 0.0133580397814512 + <_> + + <_> + + + + <_>4 9 13 2 -1. + <_>4 10 13 1 2. + 0 + 6.3626631163060665e-004 + -0.1053074970841408 + 0.0440161600708961 + <_> + + <_> + + + + <_>1 0 13 3 -1. + <_>1 1 13 1 3. + 0 + -3.4530549310147762e-003 + -0.1368706971406937 + 0.0302882809191942 + <_> + + <_> + + + + <_>3 0 14 12 -1. + <_>3 4 14 4 3. + 0 + 0.0175898093730211 + -0.0280312802642584 + 0.1833170056343079 + <_> + + <_> + + + + <_>0 1 10 6 -1. + <_>0 4 10 3 2. + 0 + -1.4289390528574586e-003 + 0.0676161572337151 + -0.0644003599882126 + <_> + + <_> + + + + <_>9 0 11 10 -1. + <_>9 5 11 5 2. + 0 + 0.0145845701918006 + -0.0325488112866879 + 0.0770702213048935 + <_> + + <_> + + + + <_>0 0 20 20 -1. + <_>0 10 20 10 2. + 0 + 0.7457957863807678 + 9.1963959857821465e-003 + -0.4568012058734894 + <_> + + <_> + + + + <_>10 1 10 4 -1. + <_>10 1 5 4 2. + 0 + -0.1228564977645874 + -0.6442360877990723 + 2.0847769919782877e-003 + <_> + + <_> + + + + <_>0 1 10 4 -1. + <_>5 1 5 4 2. + 0 + -0.1161300018429756 + -0.7927427887916565 + 4.9578230828046799e-003 + <_> + + <_> + + + + <_>11 0 8 10 -1. + <_>15 0 4 5 2. + <_>11 5 4 5 2. + 0 + 0.0556448400020599 + -5.7718120515346527e-003 + 0.3083428144454956 + <_> + + <_> + + + + <_>1 0 8 10 -1. + <_>1 0 4 5 2. + <_>5 5 4 5 2. + 0 + 0.0205664299428463 + -0.0154747096821666 + 0.2800293862819672 + <_> + + <_> + + + + <_>6 3 14 4 -1. + <_>13 3 7 2 2. + <_>6 5 7 2 2. + 0 + 3.8393519935198128e-004 + 0.0343902483582497 + -0.1024418994784355 + <_> + + <_> + + + + <_>0 3 20 4 -1. + <_>0 3 10 2 2. + <_>10 5 10 2 2. + 0 + 4.0198508650064468e-003 + 0.0525331385433674 + -0.1149272024631500 + <_> + + <_> + + + + <_>9 5 6 7 -1. + <_>11 5 2 7 3. + 0 + -0.0741244107484818 + -0.3021646142005920 + 4.2779031209647655e-003 + <_> + + <_> + + + + <_>5 5 6 7 -1. + <_>7 5 2 7 3. + 0 + -3.4346429165452719e-003 + 0.0656274929642677 + -0.0699915885925293 + <_> + + <_> + + + + <_>6 6 8 7 -1. + <_>6 6 4 7 2. + 0 + -4.3740049004554749e-003 + -0.1293483972549439 + 0.0512335188686848 + <_> + + <_> + + + + <_>8 6 4 7 -1. + <_>10 6 2 7 2. + 0 + 6.9464151747524738e-003 + -0.0325918495655060 + 0.1509806066751480 + <_> + + <_> + + + + <_>8 0 6 7 -1. + <_>10 0 2 7 3. + 0 + -0.0184341706335545 + -0.3136422038078308 + 9.5867328345775604e-003 + <_> + + <_> + + + + <_>6 0 6 7 -1. + <_>8 0 2 7 3. + 0 + -3.2201830763369799e-003 + -0.1749431937932968 + 0.0335790589451790 + <_> + + <_> + + + + <_>4 0 12 16 -1. + <_>8 0 4 16 3. + 0 + -0.0322732999920845 + 0.2413620054721832 + -0.0243920106440783 + <_> + + <_> + + + + <_>5 6 4 8 -1. + <_>7 6 2 8 2. + 0 + -4.8193791881203651e-003 + -0.1361021995544434 + 0.0411566607654095 + <_> + + <_> + + + + <_>7 12 11 8 -1. + <_>7 16 11 4 2. + 0 + -0.0983476266264915 + -0.5332471728324890 + 8.8729923591017723e-003 + <_> + + <_> + + + + <_>6 0 6 12 -1. + <_>6 0 3 6 2. + <_>9 6 3 6 2. + 0 + 0.0190546195954084 + -0.0325642712414265 + 0.1672970950603485 + <_> + + <_> + + + + <_>4 3 12 12 -1. + <_>10 3 6 6 2. + <_>4 9 6 6 2. + 0 + -0.0817961692810059 + -0.6413124203681946 + 8.7052602320909500e-003 + <_> + + <_> + + + + <_>2 10 6 7 -1. + <_>4 10 2 7 3. + 0 + 3.2996949739754200e-003 + -0.0597654394805431 + 0.0718798562884331 + <_> + + <_> + + + + <_>15 10 4 7 -1. + <_>15 10 2 7 2. + 0 + -0.0759776607155800 + -0.5041542053222656 + 5.6795510463416576e-003 + <_> + + <_> + + + + <_>1 10 4 7 -1. + <_>3 10 2 7 2. + 0 + 0.0305087603628635 + 0.0103173600509763 + -0.4355288147926331 + <_> + + <_> + + + + <_>8 5 6 7 -1. + <_>10 5 2 7 3. + 0 + -0.0376429595053196 + 0.3732442855834961 + -0.0172762293368578 + <_> + + <_> + + + + <_>3 2 13 2 -1. + <_>3 3 13 1 2. + 0 + -9.9801109172403812e-004 + -0.1450877040624619 + 0.0309737008064985 + <_> + + <_> + + + + <_>4 3 14 3 -1. + <_>4 4 14 1 3. + 0 + -2.0703389309346676e-003 + 0.1228592023253441 + -0.0252858996391296 + <_> + + <_> + + + + <_>1 0 7 6 -1. + <_>1 2 7 2 3. + 0 + 0.0718163773417473 + 7.2997398674488068e-003 + -0.6262109279632568 + <_> + + <_> + + + + <_>6 5 13 9 -1. + <_>6 8 13 3 3. + 0 + 0.1678192019462585 + -0.0100940698757768 + 0.2253118008375168 + <_> + + <_> + + + + <_>0 8 16 6 -1. + <_>0 8 8 3 2. + <_>8 11 8 3 2. + 0 + 1.5028619964141399e-004 + -0.0490138381719589 + 0.0956356376409531 + <_> + + <_> + + + + <_>15 1 5 12 -1. + <_>15 5 5 4 3. + 0 + 0.0951396375894547 + -2.3964960128068924e-003 + 0.7897282242774963 + <_> + + <_> + + + + <_>0 1 5 12 -1. + <_>0 5 5 4 3. + 0 + 3.8569360040128231e-003 + 0.0408524312078953 + -0.1197697967290878 + <_> + + <_> + + + + <_>5 14 14 3 -1. + <_>5 15 14 1 3. + 0 + 0.0231727603822947 + -8.1755416467785835e-003 + 0.3489589989185333 + <_> + + <_> + + + + <_>2 10 6 9 -1. + <_>4 10 2 9 3. + 0 + 0.0134179899469018 + 0.0293577294796705 + -0.1447695046663284 + <_> + + <_> + + + + <_>11 13 9 7 -1. + <_>14 13 3 7 3. + 0 + -0.1416577994823456 + 0.3496044874191284 + -3.9633908309042454e-003 + <_> + + <_> + + + + <_>0 15 9 5 -1. + <_>3 15 3 5 3. + 0 + 5.5483141914010048e-003 + -0.0467367693781853 + 0.0876308232545853 + <_> + + <_> + + + + <_>16 9 4 11 -1. + <_>16 9 2 11 2. + 0 + -4.7431029379367828e-003 + 0.0628996789455414 + -0.0269835907965899 + <_> + + <_> + + + + <_>0 11 19 3 -1. + <_>0 12 19 1 3. + 0 + -0.0668627768754959 + -0.9527286887168884 + 3.9776111952960491e-003 + <_> + + <_> + + + + <_>6 15 14 4 -1. + <_>13 15 7 2 2. + <_>6 17 7 2 2. + 0 + 0.0229878406971693 + -0.0178028997033834 + 0.1456494927406311 + <_> + + <_> + + + + <_>0 5 12 6 -1. + <_>0 7 12 2 3. + 0 + -0.0222342796623707 + -0.0933604463934898 + 0.0515370704233646 + <_> + + <_> + + + + <_>16 9 4 11 -1. + <_>16 9 2 11 2. + 0 + 1.5045719919726253e-005 + -0.0302377492189407 + 0.0266546700149775 + <_> + + <_> + + + + <_>0 9 4 11 -1. + <_>2 9 2 11 2. + 0 + -4.7994707711040974e-003 + 0.1010553017258644 + -0.0500839911401272 + <_> + + <_> + + + + <_>2 11 18 5 -1. + <_>8 11 6 5 3. + 0 + -0.2422790974378586 + -0.6839948296546936 + 2.1470880601555109e-003 + <_> + + <_> + + + + <_>1 15 14 4 -1. + <_>1 15 7 2 2. + <_>8 17 7 2 2. + 0 + 0.0469397902488709 + 8.1193735823035240e-003 + -0.4767181873321533 + <_> + + <_> + + + + <_>12 10 7 9 -1. + <_>12 13 7 3 3. + 0 + -0.0609402805566788 + 0.2382732927799225 + -9.5430584624409676e-003 + <_> + + <_> + + + + <_>1 10 7 9 -1. + <_>1 13 7 3 3. + 0 + 0.0241047404706478 + -0.0157990790903568 + 0.2672789990901947 + <_> + + <_> + + + + <_>11 7 8 8 -1. + <_>15 7 4 4 2. + <_>11 11 4 4 2. + 0 + -0.0465675704181194 + -0.3101777136325836 + 8.3353007212281227e-003 + <_> + + <_> + + + + <_>6 14 8 4 -1. + <_>6 16 8 2 2. + 0 + 1.8709240248426795e-003 + -0.0725880712270737 + 0.0656082704663277 + <_> + + <_> + + + + <_>11 1 2 19 -1. + <_>11 1 1 19 2. + 0 + -5.9872400015592575e-003 + -0.1815969049930573 + 0.0140300299972296 + <_> + + <_> + + + + <_>6 10 3 10 -1. + <_>6 15 3 5 2. + 0 + -7.3103660724882502e-006 + 0.0409137904644012 + -0.1065644025802612 + <_> + + <_> + + + + <_>11 9 6 5 -1. + <_>11 9 3 5 2. + 0 + -0.0232445504516363 + -0.1903554052114487 + 0.0159660596400499 + <_> + + <_> + + + + <_>3 9 6 5 -1. + <_>6 9 3 5 2. + 0 + -1.1853489559143782e-003 + 0.0599567107856274 + -0.0766784474253654 + <_> + + <_> + + + + <_>4 12 15 4 -1. + <_>9 12 5 4 3. + 0 + -0.1298182010650635 + 0.4099949896335602 + -5.0850748084485531e-003 + <_> + + <_> + + + + <_>0 5 16 2 -1. + <_>8 5 8 2 2. + 0 + -0.0515126697719097 + -0.3052723109722138 + 0.0141863403841853 + <_> + + <_> + + + + <_>6 6 14 4 -1. + <_>13 6 7 2 2. + <_>6 8 7 2 2. + 0 + -3.9303461089730263e-003 + -0.0797634795308113 + 0.0262488909065723 + <_> + + <_> + + + + <_>3 5 8 14 -1. + <_>3 5 4 7 2. + <_>7 12 4 7 2. + 0 + 0.0158228296786547 + -0.0168493092060089 + 0.2754979133605957 + <_> + + <_> + + + + <_>12 2 7 15 -1. + <_>12 7 7 5 3. + 0 + 0.1156157031655312 + 6.7870649509131908e-003 + -0.1270931959152222 + <_> + + <_> + + + + <_>1 2 7 15 -1. + <_>1 7 7 5 3. + 0 + 1.1260829633101821e-003 + 0.0819085165858269 + -0.0581940487027168 + <_> + + <_> + + + + <_>13 0 6 12 -1. + <_>13 6 6 6 2. + 0 + 0.0155134303495288 + -0.0429897196590900 + 0.0783642977476120 + <_> + + <_> + + + + <_>6 0 8 10 -1. + <_>6 0 4 5 2. + <_>10 5 4 5 2. + 0 + 0.0462687313556671 + 0.0117595503106713 + -0.3994733095169067 + <_> + + <_> + + + + <_>11 0 2 19 -1. + <_>11 0 1 19 2. + 0 + 7.9535972326993942e-003 + 0.0168485399335623 + -0.0885990783572197 + <_> + + <_> + + + + <_>4 12 8 8 -1. + <_>4 12 4 4 2. + <_>8 16 4 4 2. + 0 + -0.0189912207424641 + 0.2481326013803482 + -0.0173208508640528 + <_> + + <_> + + + + <_>4 12 15 4 -1. + <_>9 12 5 4 3. + 0 + 3.7058200687170029e-003 + -0.0217470303177834 + 0.0582760907709599 + <_> + + <_> + + + + <_>7 0 2 19 -1. + <_>8 0 1 19 2. + 0 + 2.5829279329627752e-003 + 0.0505592785775661 + -0.0931939184665680 + <_> + + <_> + + + + <_>8 4 6 9 -1. + <_>10 4 2 9 3. + 0 + -0.0310105606913567 + 0.2211043983697891 + -0.0147864995524287 + <_> + + <_> + + + + <_>5 5 8 4 -1. + <_>9 5 4 4 2. + 0 + 2.5402549654245377e-003 + -0.0867436006665230 + 0.0579324103891850 + <_> + + <_> + + + + <_>4 12 15 4 -1. + <_>9 12 5 4 3. + 0 + -8.9100487530231476e-003 + 0.0538460798561573 + -0.0459319092333317 + <_> + + <_> + + + + <_>2 6 4 12 -1. + <_>2 12 4 6 2. + 0 + 4.0557151660323143e-003 + 0.0592983998358250 + -0.0830072537064552 + <_> + + <_> + + + + <_>6 7 12 6 -1. + <_>10 7 4 6 3. + 0 + 0.0612049400806427 + 9.2248879373073578e-003 + -0.2108236998319626 + <_> + + <_> + + + + <_>3 5 12 4 -1. + <_>7 5 4 4 3. + 0 + 7.7630057930946350e-003 + -0.0759270563721657 + 0.0578657090663910 + <_> + + <_> + + + + <_>8 14 12 4 -1. + <_>8 14 6 4 2. + 0 + 0.1592115014791489 + 8.3040859317407012e-004 + -1.0000480413436890 + <_> + + <_> + + + + <_>0 14 12 4 -1. + <_>6 14 6 4 2. + 0 + 0.0391961894929409 + 7.1930838748812675e-003 + -0.6033862233161926 + <_> + + <_> + + + + <_>4 12 15 4 -1. + <_>9 12 5 4 3. + 0 + 0.1022028997540474 + -3.6227719392627478e-003 + 0.5450075268745422 + <_> + + <_> + + + + <_>1 12 15 4 -1. + <_>6 12 5 4 3. + 0 + -0.1506498008966446 + -0.7045075893402100 + 6.6995541565120220e-003 + <_> + + <_> + + + + <_>6 0 12 18 -1. + <_>10 0 4 18 3. + 0 + 0.1381929963827133 + -0.0111538600176573 + 0.1793290972709656 + <_> + + <_> + + + + <_>0 6 14 4 -1. + <_>0 6 7 2 2. + <_>7 8 7 2 2. + 0 + -3.8313010009005666e-004 + -0.0724423527717590 + 0.0579259805381298 + <_> + + <_> + + + + <_>13 13 7 6 -1. + <_>13 15 7 2 3. + 0 + -2.7796919457614422e-003 + -0.0862803980708122 + 0.0410146005451679 + <_> + + <_> + + + + <_>0 0 6 18 -1. + <_>0 9 6 9 2. + 0 + 0.0393651388585567 + -0.0466293208301067 + 0.0881240069866180 + <_> + + <_> + + + + <_>6 8 14 4 -1. + <_>13 8 7 2 2. + <_>6 10 7 2 2. + 0 + -0.0619338192045689 + 0.7011855244636536 + -2.5661089457571507e-003 + <_> + + <_> + + + + <_>0 8 14 4 -1. + <_>0 8 7 2 2. + <_>7 10 7 2 2. + 0 + -5.9742941521108150e-003 + -0.1651901006698608 + 0.0379470214247704 + <_> + + <_> + + + + <_>3 2 14 10 -1. + <_>3 7 14 5 2. + 0 + 7.5101079419255257e-003 + 0.0541914887726307 + -0.0791666582226753 + <_> + + <_> + + + + <_>3 5 6 7 -1. + <_>5 5 2 7 3. + 0 + -0.0970056727528572 + -0.8810477256774902 + 4.8486101441085339e-003 + <_> + + <_> + + + + <_>4 4 14 6 -1. + <_>11 4 7 3 2. + <_>4 7 7 3 2. + 0 + -6.7751510068774223e-003 + 0.0916011631488800 + -0.0489427708089352 + <_> + + <_> + + + + <_>6 2 4 10 -1. + <_>6 7 4 5 2. + 0 + -9.2599419876933098e-003 + -0.1329811960458756 + 0.0417855009436607 + <_> + + <_> + + + + <_>11 1 3 18 -1. + <_>11 7 3 6 3. + 0 + 1.5215040184557438e-003 + 0.0526335909962654 + -0.0606244392693043 + <_> + + <_> + + + + <_>3 1 3 15 -1. + <_>3 6 3 5 3. + 0 + 5.4703168570995331e-003 + -0.0478251799941063 + 0.1119457036256790 + <_> + + <_> + + + + <_>7 0 8 6 -1. + <_>7 0 4 6 2. + 0 + 0.0250021107494831 + -0.0203549694269896 + 0.1017559021711350 + <_> + + <_> + + + + <_>2 0 9 15 -1. + <_>2 5 9 5 3. + 0 + 0.0325767807662487 + 0.0256296601146460 + -0.1948419064283371 + <_> + + <_> + + + + <_>2 0 18 3 -1. + <_>8 0 6 3 3. + 0 + -7.7732130885124207e-003 + 0.1247740015387535 + -0.0346679985523224 + <_> + + <_> + + + + <_>2 8 12 8 -1. + <_>6 8 4 8 3. + 0 + 0.0177771896123886 + 0.0332618206739426 + -0.1415522992610931 + <_> + + <_> + + + + <_>5 8 15 12 -1. + <_>10 8 5 12 3. + 0 + 0.0104594295844436 + -0.0440398789942265 + 0.0618715584278107 + -1.1210759878158569 + 44 + -1 + <_> + + + <_> + + <_> + + + + <_>0 1 18 3 -1. + <_>6 1 6 3 3. + 0 + 0.0187511891126633 + -0.1777507960796356 + 0.1715743988752365 + <_> + + <_> + + + + <_>9 5 2 14 -1. + <_>9 12 2 7 2. + 0 + -2.1875950042158365e-003 + 0.0753391534090042 + -0.2584212124347687 + <_> + + <_> + + + + <_>5 4 10 6 -1. + <_>5 6 10 2 3. + 0 + -0.1169869005680084 + 0.4264537096023560 + -0.0371216982603073 + <_> + + <_> + + + + <_>9 10 7 6 -1. + <_>9 12 7 2 3. + 0 + 3.8377330638468266e-003 + 0.0350924395024776 + -0.1575728952884674 + <_> + + <_> + + + + <_>3 7 12 4 -1. + <_>7 7 4 4 3. + 0 + -1.2941210297867656e-003 + -0.2006873041391373 + 0.0557048097252846 + <_> + + <_> + + + + <_>4 10 12 4 -1. + <_>8 10 4 4 3. + 0 + 4.3927300721406937e-003 + 0.0574970990419388 + -0.1930274069309235 + <_> + + <_> + + + + <_>0 3 14 4 -1. + <_>0 3 7 2 2. + <_>7 5 7 2 2. + 0 + -1.5021540457382798e-003 + 0.0723789781332016 + -0.1453491002321243 + <_> + + <_> + + + + <_>5 17 15 3 -1. + <_>5 18 15 1 3. + 0 + 1.2381949927657843e-003 + -0.0904137790203094 + 0.0828387886285782 + <_> + + <_> + + + + <_>5 11 10 6 -1. + <_>5 11 5 3 2. + <_>10 14 5 3 2. + 0 + 3.0004729051142931e-003 + 0.0601994097232819 + -0.1555617004632950 + <_> + + <_> + + + + <_>4 4 13 3 -1. + <_>4 5 13 1 3. + 0 + 4.5666601508855820e-003 + -0.0769366398453712 + 0.1376277059316635 + <_> + + <_> + + + + <_>5 11 5 9 -1. + <_>5 14 5 3 3. + 0 + 9.9231943022459745e-004 + 0.0479182116687298 + -0.2047235965728760 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + -3.8909649010747671e-003 + -0.2106703966856003 + 0.0592971891164780 + <_> + + <_> + + + + <_>0 13 13 3 -1. + <_>0 14 13 1 3. + 0 + 2.4324860423803329e-003 + -0.0736118704080582 + 0.1416556984186173 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + -3.3090400975197554e-003 + -0.1648906022310257 + 0.0433108918368816 + <_> + + <_> + + + + <_>2 6 6 9 -1. + <_>2 9 6 3 3. + 0 + 5.9596560895442963e-003 + -0.2138839960098267 + 0.0434729084372520 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + 9.7754271700978279e-003 + 0.0276642907410860 + -0.1911989003419876 + <_> + + <_> + + + + <_>1 3 4 14 -1. + <_>1 3 2 7 2. + <_>3 10 2 7 2. + 0 + -0.0381243005394936 + 0.3165884017944336 + -0.0299726799130440 + <_> + + <_> + + + + <_>13 4 3 12 -1. + <_>13 10 3 6 2. + 0 + 1.4401610242202878e-003 + -0.1660213023424149 + 0.0613009110093117 + <_> + + <_> + + + + <_>7 7 6 13 -1. + <_>9 7 2 13 3. + 0 + 7.5199408456683159e-004 + -0.1356851011514664 + 0.0573457702994347 + <_> + + <_> + + + + <_>7 0 9 5 -1. + <_>10 0 3 5 3. + 0 + 2.4780649691820145e-003 + -0.0772587582468987 + 0.0537812002003193 + <_> + + <_> + + + + <_>5 0 9 5 -1. + <_>8 0 3 5 3. + 0 + 9.2068109661340714e-003 + 0.0743493512272835 + -0.1388649940490723 + <_> + + <_> + + + + <_>9 5 2 13 -1. + <_>9 5 1 13 2. + 0 + 0.0176345407962799 + -0.0268171597272158 + 0.3491244912147522 + <_> + + <_> + + + + <_>7 3 3 12 -1. + <_>7 9 3 6 2. + 0 + 1.0517879854887724e-003 + 0.0834444835782051 + -0.0832714363932610 + <_> + + <_> + + + + <_>2 1 18 3 -1. + <_>8 1 6 3 3. + 0 + -7.2119189426302910e-003 + 0.1414905041456223 + -0.0308531895279884 + <_> + + <_> + + + + <_>4 3 12 16 -1. + <_>4 3 6 8 2. + <_>10 11 6 8 2. + 0 + 8.1929508596658707e-003 + 0.0642498284578323 + -0.1422446072101593 + <_> + + <_> + + + + <_>14 0 3 13 -1. + <_>15 0 1 13 3. + 0 + -5.7932751951739192e-004 + -0.0617689304053783 + 0.0348352305591106 + <_> + + <_> + + + + <_>3 3 14 3 -1. + <_>3 4 14 1 3. + 0 + 4.5172017998993397e-003 + -0.0739256665110588 + 0.0953478664159775 + <_> + + <_> + + + + <_>0 13 20 7 -1. + <_>0 13 10 7 2. + 0 + 0.2228025048971176 + 0.0280794501304626 + -0.2617459893226624 + <_> + + <_> + + + + <_>3 0 3 13 -1. + <_>4 0 1 13 3. + 0 + -8.1560667604207993e-004 + -0.1112871021032333 + 0.0617512613534927 + <_> + + <_> + + + + <_>14 0 6 6 -1. + <_>14 0 3 6 2. + 0 + 0.0190092604607344 + -0.0359148494899273 + 0.0953326970338821 + <_> + + <_> + + + + <_>4 2 2 14 -1. + <_>4 9 2 7 2. + 0 + -1.1708099627867341e-003 + -0.1780942976474762 + 0.0384717583656311 + <_> + + <_> + + + + <_>14 1 6 12 -1. + <_>16 1 2 12 3. + 0 + -0.0274928398430347 + 0.1567419022321701 + -0.0363074503839016 + <_> + + <_> + + + + <_>0 6 14 4 -1. + <_>0 6 7 2 2. + <_>7 8 7 2 2. + 0 + -5.4139150306582451e-003 + -0.1601458042860031 + 0.0452282987535000 + <_> + + <_> + + + + <_>14 1 6 12 -1. + <_>16 1 2 12 3. + 0 + 0.0113256704062223 + -0.0526791289448738 + 0.1241158023476601 + <_> + + <_> + + + + <_>0 4 20 6 -1. + <_>0 6 20 2 3. + 0 + -0.1391907930374146 + -0.2857300937175751 + 0.0256421808153391 + <_> + + <_> + + + + <_>14 1 6 12 -1. + <_>16 1 2 12 3. + 0 + -0.0761838108301163 + 0.2039088010787964 + -0.0127019397914410 + <_> + + <_> + + + + <_>0 8 15 3 -1. + <_>0 9 15 1 3. + 0 + 1.3947900151833892e-003 + -0.1132052987813950 + 0.0574193000793457 + <_> + + <_> + + + + <_>2 1 16 6 -1. + <_>10 1 8 3 2. + <_>2 4 8 3 2. + 0 + 4.6532237902283669e-003 + 0.0577959902584553 + -0.1099701002240181 + <_> + + <_> + + + + <_>0 1 6 12 -1. + <_>2 1 2 12 3. + 0 + 0.0450343899428844 + -0.0287619791924953 + 0.2260572016239166 + <_> + + <_> + + + + <_>9 2 9 5 -1. + <_>12 2 3 5 3. + 0 + 0.0168640092015266 + 0.0363180898129940 + -0.2016277015209198 + <_> + + <_> + + + + <_>1 1 18 4 -1. + <_>7 1 6 4 3. + 0 + 0.1925127953290939 + -0.0138699896633625 + 0.5422633886337280 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + -1.6758369747549295e-003 + -0.1146278977394104 + 0.0499848499894142 + <_> + + <_> + + + + <_>0 0 4 7 -1. + <_>2 0 2 7 2. + 0 + -4.5270361006259918e-003 + 0.1173190996050835 + -0.0613847002387047 + <_> + + <_> + + + + <_>13 0 3 13 -1. + <_>14 0 1 13 3. + 0 + 5.4975082166492939e-003 + 0.0321948304772377 + -0.1534854024648666 + <_> + + <_> + + + + <_>0 0 9 6 -1. + <_>3 0 3 6 3. + 0 + 3.5562040284276009e-003 + -0.0639379397034645 + 0.1078746989369392 + <_> + + <_> + + + + <_>10 1 6 5 -1. + <_>10 1 3 5 2. + 0 + 2.1489830687642097e-003 + -0.0509767383337021 + 0.0293150003999472 + <_> + + <_> + + + + <_>6 6 6 7 -1. + <_>8 6 2 7 3. + 0 + -0.0104642100632191 + 0.1954874992370606 + -0.0327844098210335 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -0.0297797191888094 + -0.3928653895854950 + 0.0122666200622916 + <_> + + <_> + + + + <_>5 2 4 7 -1. + <_>7 2 2 7 2. + 0 + 9.6993939951062202e-004 + -0.1077279970049858 + 0.0616842508316040 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -0.0404990985989571 + -0.3669664859771729 + 0.0118055399507284 + <_> + + <_> + + + + <_>4 0 3 13 -1. + <_>5 0 1 13 3. + 0 + -2.3762779310345650e-003 + -0.1393374055624008 + 0.0500101707875729 + <_> + + <_> + + + + <_>2 1 18 3 -1. + <_>8 1 6 3 3. + 0 + -5.1528858020901680e-003 + 0.0974240005016327 + -0.0238206908106804 + <_> + + <_> + + + + <_>6 5 4 14 -1. + <_>6 5 2 7 2. + <_>8 12 2 7 2. + 0 + -0.0287269800901413 + 0.2103171944618225 + -0.0360882692039013 + <_> + + <_> + + + + <_>1 5 19 4 -1. + <_>1 7 19 2 2. + 0 + 0.0142153501510620 + 0.0346641317009926 + -0.1581434011459351 + <_> + + <_> + + + + <_>0 11 7 6 -1. + <_>0 13 7 2 3. + 0 + 2.0164670422673225e-003 + 0.0504870712757111 + -0.1270419955253601 + <_> + + <_> + + + + <_>6 12 13 2 -1. + <_>6 13 13 1 2. + 0 + 4.1724709444679320e-004 + -0.0566351898014545 + 0.1078914031386375 + <_> + + <_> + + + + <_>3 0 12 10 -1. + <_>3 0 6 5 2. + <_>9 5 6 5 2. + 0 + 7.3380130343139172e-003 + 0.0508917197585106 + -0.1221043989062309 + <_> + + <_> + + + + <_>2 1 18 3 -1. + <_>8 1 6 3 3. + 0 + -0.0759307667613029 + 0.2262721061706543 + -6.6569480113685131e-003 + <_> + + <_> + + + + <_>0 15 18 3 -1. + <_>9 15 9 3 2. + 0 + -4.2873369529843330e-003 + 0.0721042901277542 + -0.0801061391830444 + <_> + + <_> + + + + <_>6 14 14 6 -1. + <_>6 14 7 6 2. + 0 + -0.0241016708314419 + 0.0913553014397621 + -0.0345919691026211 + <_> + + <_> + + + + <_>0 14 14 6 -1. + <_>7 14 7 6 2. + 0 + 0.0199365504086018 + -0.0377642400562763 + 0.1889691948890686 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.5693989992141724 + 3.1492649577558041e-003 + -0.5984647274017334 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + 0.1035206019878388 + 0.0233232006430626 + -0.3212923109531403 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + 0.0595569908618927 + 4.2170342057943344e-003 + -0.3344213962554932 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -0.0505755394697189 + -0.8479322791099548 + 6.6583030857145786e-003 + <_> + + <_> + + + + <_>11 0 6 7 -1. + <_>13 0 2 7 3. + 0 + -5.5158971808850765e-003 + -0.0705074965953827 + 0.0217167697846890 + <_> + + <_> + + + + <_>1 8 15 4 -1. + <_>6 8 5 4 3. + 0 + 0.0294193103909492 + -0.0363194085657597 + 0.1751094013452530 + <_> + + <_> + + + + <_>13 12 7 6 -1. + <_>13 14 7 2 3. + 0 + 0.0109724402427673 + 0.0182671993970871 + -0.1864134073257446 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -3.8842339999973774e-003 + -0.1073592007160187 + 0.0608490407466888 + <_> + + <_> + + + + <_>12 12 7 4 -1. + <_>12 14 7 2 2. + 0 + -1.1936859664274380e-004 + 0.0523486211895943 + -0.1270153969526291 + <_> + + <_> + + + + <_>1 2 8 8 -1. + <_>1 2 4 4 2. + <_>5 6 4 4 2. + 0 + -5.0230980850756168e-003 + 0.0526827201247215 + -0.1270367950201035 + <_> + + <_> + + + + <_>2 1 18 3 -1. + <_>8 1 6 3 3. + 0 + 0.1898681968450546 + 1.7255579587072134e-003 + -0.3270105123519898 + <_> + + <_> + + + + <_>0 1 18 3 -1. + <_>6 1 6 3 3. + 0 + -2.4319409858435392e-003 + 0.1387514024972916 + -0.0430466011166573 + <_> + + <_> + + + + <_>8 0 12 6 -1. + <_>8 2 12 2 3. + 0 + -2.0888550207018852e-003 + -0.1124100983142853 + 0.0376768596470356 + <_> + + <_> + + + + <_>5 3 4 7 -1. + <_>7 3 2 7 2. + 0 + 0.0421163104474545 + 8.1929191946983337e-003 + -0.6854190826416016 + <_> + + <_> + + + + <_>3 16 16 2 -1. + <_>3 17 16 1 2. + 0 + 0.0273801106959581 + 4.4103930704295635e-003 + -0.5342184901237488 + <_> + + <_> + + + + <_>3 0 13 6 -1. + <_>3 3 13 3 2. + 0 + 0.0213485695421696 + -0.0511603802442551 + 0.1002148017287254 + <_> + + <_> + + + + <_>4 0 13 3 -1. + <_>4 1 13 1 3. + 0 + -0.0172368697822094 + -0.3999573886394501 + 0.0202574897557497 + <_> + + <_> + + + + <_>1 1 5 12 -1. + <_>1 5 5 4 3. + 0 + 7.8617185354232788e-003 + 0.0289962794631720 + -0.1801407039165497 + <_> + + <_> + + + + <_>6 10 13 3 -1. + <_>6 11 13 1 3. + 0 + 8.1942398101091385e-003 + -0.0254980307072401 + 0.0846939310431480 + <_> + + <_> + + + + <_>1 11 7 4 -1. + <_>1 13 7 2 2. + 0 + 6.2367911450564861e-003 + 0.0186592601239681 + -0.2644366025924683 + <_> + + <_> + + + + <_>7 0 6 8 -1. + <_>9 0 2 8 3. + 0 + 2.1872919751331210e-004 + -0.1594302952289581 + 0.0307226497679949 + <_> + + <_> + + + + <_>7 5 6 8 -1. + <_>9 5 2 8 3. + 0 + -6.4004249870777130e-003 + 0.2833105027675629 + -0.0193524900823832 + <_> + + <_> + + + + <_>14 12 6 8 -1. + <_>16 12 2 8 3. + 0 + -0.1000719964504242 + -0.4070405066013336 + 6.1583020724356174e-003 + <_> + + <_> + + + + <_>3 5 13 3 -1. + <_>3 6 13 1 3. + 0 + 0.0156901497393847 + -0.0167723391205072 + 0.2904956936836243 + <_> + + <_> + + + + <_>9 2 9 5 -1. + <_>12 2 3 5 3. + 0 + -7.0421490818262100e-003 + -0.0679851770401001 + 0.0311303697526455 + <_> + + <_> + + + + <_>5 15 7 4 -1. + <_>5 17 7 2 2. + 0 + -0.0153200300410390 + 0.3640008866786957 + -0.0136086996644735 + <_> + + <_> + + + + <_>11 14 7 6 -1. + <_>11 16 7 2 3. + 0 + 0.0584856607019901 + 7.4363988824188709e-003 + -0.7559933066368103 + <_> + + <_> + + + + <_>2 14 7 6 -1. + <_>2 16 7 2 3. + 0 + -3.5200670827180147e-003 + -0.1392329037189484 + 0.0376575514674187 + <_> + + <_> + + + + <_>10 13 9 4 -1. + <_>10 15 9 2 2. + 0 + -8.7158178212121129e-004 + 0.0423398390412331 + -0.0535304583609104 + <_> + + <_> + + + + <_>2 14 13 3 -1. + <_>2 15 13 1 3. + 0 + 2.4548629298806190e-003 + -0.0446670502424240 + 0.1378507018089294 + <_> + + <_> + + + + <_>10 13 10 6 -1. + <_>10 15 10 2 3. + 0 + -0.0617789290845394 + -0.3533807992935181 + 4.5869671739637852e-003 + <_> + + <_> + + + + <_>0 13 10 6 -1. + <_>0 15 10 2 3. + 0 + -3.8533521001227200e-004 + 0.0722780078649521 + -0.1043329983949661 + <_> + + <_> + + + + <_>2 8 16 8 -1. + <_>10 8 8 4 2. + <_>2 12 8 4 2. + 0 + 0.0762277171015739 + -0.0110045503824949 + 0.5002518892288208 + <_> + + <_> + + + + <_>2 0 9 7 -1. + <_>5 0 3 7 3. + 0 + -4.4210380874574184e-003 + -0.0862904265522957 + 0.0587734207510948 + <_> + + <_> + + + + <_>7 6 6 7 -1. + <_>9 6 2 7 3. + 0 + 0.0150682702660561 + -0.0589162707328796 + 0.1002511978149414 + <_> + + <_> + + + + <_>1 7 10 9 -1. + <_>1 10 10 3 3. + 0 + 0.0250079408288002 + 0.0762514770030975 + -0.0887449607253075 + <_> + + <_> + + + + <_>5 3 11 6 -1. + <_>5 5 11 2 3. + 0 + -0.0773281231522560 + 0.2536340057849884 + -0.0157785303890705 + <_> + + <_> + + + + <_>0 7 2 13 -1. + <_>1 7 1 13 2. + 0 + 3.5588641185313463e-004 + 0.0629836991429329 + -0.0771819874644279 + <_> + + <_> + + + + <_>14 1 6 11 -1. + <_>16 1 2 11 3. + 0 + 0.0694005265831947 + -8.9571140706539154e-003 + 0.1510262936353684 + <_> + + <_> + + + + <_>0 6 6 14 -1. + <_>2 6 2 14 3. + 0 + -0.1857770979404450 + -0.6951835155487061 + 7.8398203477263451e-003 + <_> + + <_> + + + + <_>7 8 8 12 -1. + <_>11 8 4 6 2. + <_>7 14 4 6 2. + 0 + -6.6014728508889675e-003 + -0.0560566410422325 + 0.0245579201728106 + <_> + + <_> + + + + <_>2 10 16 8 -1. + <_>2 10 8 4 2. + <_>10 14 8 4 2. + 0 + 0.0404903106391430 + -0.0202025994658470 + 0.2773627042770386 + <_> + + <_> + + + + <_>11 6 7 8 -1. + <_>11 10 7 4 2. + 0 + 1.6997240018099546e-003 + -0.1140346005558968 + 0.0192226804792881 + <_> + + <_> + + + + <_>2 6 7 8 -1. + <_>2 10 7 4 2. + 0 + 0.0847500413656235 + 0.0186075102537870 + -0.3050543069839478 + <_> + + <_> + + + + <_>15 6 4 14 -1. + <_>17 6 2 7 2. + <_>15 13 2 7 2. + 0 + -0.0169758796691895 + 0.1235710978507996 + -0.0290166605263948 + <_> + + <_> + + + + <_>1 6 4 14 -1. + <_>1 6 2 7 2. + <_>3 13 2 7 2. + 0 + 4.6773189678788185e-003 + -0.0458647608757019 + 0.1171884015202522 + <_> + + <_> + + + + <_>15 7 4 8 -1. + <_>15 11 4 4 2. + 0 + -0.0140660200268030 + -0.1367049068212509 + 0.0173626299947500 + <_> + + <_> + + + + <_>4 0 8 8 -1. + <_>4 0 4 4 2. + <_>8 4 4 4 2. + 0 + 0.0509446896612644 + 0.0138656403869390 + -0.3952904045581818 + <_> + + <_> + + + + <_>7 0 7 6 -1. + <_>7 3 7 3 2. + 0 + 0.0982657968997955 + -0.0123391998931766 + 0.3640823960304260 + <_> + + <_> + + + + <_>3 2 14 3 -1. + <_>3 3 14 1 3. + 0 + 1.1730480473488569e-003 + 0.0664005130529404 + -0.0820910930633545 + <_> + + <_> + + + + <_>10 0 10 6 -1. + <_>10 2 10 2 3. + 0 + 0.1097903996706009 + 4.6397978439927101e-003 + -0.6134455800056458 + <_> + + <_> + + + + <_>0 0 10 6 -1. + <_>0 2 10 2 3. + 0 + 4.9452850362285972e-004 + -0.1006267964839935 + 0.0571919903159142 + <_> + + <_> + + + + <_>0 3 20 14 -1. + <_>0 10 20 7 2. + 0 + 0.3567355871200562 + -0.0144829899072647 + 0.3927611112594605 + <_> + + <_> + + + + <_>0 0 4 12 -1. + <_>2 0 2 12 2. + 0 + 8.7493062019348145e-003 + -0.0485512204468250 + 0.1046025007963181 + <_> + + <_> + + + + <_>8 3 12 6 -1. + <_>12 3 4 6 3. + 0 + 0.0224633496254683 + 0.0223960001021624 + -0.1358785033226013 + <_> + + <_> + + + + <_>0 3 12 6 -1. + <_>4 3 4 6 3. + 0 + 0.0185387600213289 + 0.0300294794142246 + -0.2086187005043030 + <_> + + <_> + + + + <_>14 3 4 8 -1. + <_>14 3 2 8 2. + 0 + 0.0342362597584724 + -0.0106440801173449 + 0.1667549014091492 + <_> + + <_> + + + + <_>2 3 4 8 -1. + <_>4 3 2 8 2. + 0 + 0.0409004800021648 + -0.0120569700375199 + 0.4377332031726837 + <_> + + <_> + + + + <_>13 6 6 10 -1. + <_>16 6 3 5 2. + <_>13 11 3 5 2. + 0 + 0.1051257997751236 + -9.4033451750874519e-004 + 0.7806162238121033 + <_> + + <_> + + + + <_>1 6 6 10 -1. + <_>1 6 3 5 2. + <_>4 11 3 5 2. + 0 + 0.0747993662953377 + 7.8805796802043915e-003 + -0.6634296178817749 + <_> + + <_> + + + + <_>7 13 13 2 -1. + <_>7 14 13 1 2. + 0 + 4.3973559513688087e-005 + -0.0581061504781246 + 0.1046651974320412 + <_> + + <_> + + + + <_>3 12 11 4 -1. + <_>3 14 11 2 2. + 0 + 6.6341059282422066e-003 + 0.0197503697127104 + -0.2703348100185394 + <_> + + <_> + + + + <_>13 12 6 8 -1. + <_>13 12 3 8 2. + 0 + 6.9901258684694767e-003 + -0.0322103686630726 + 0.0566778108477592 + <_> + + <_> + + + + <_>1 12 6 8 -1. + <_>4 12 3 8 2. + 0 + -6.9424291141331196e-003 + 0.0834926292300224 + -0.0642367228865623 + <_> + + <_> + + + + <_>12 6 8 8 -1. + <_>16 6 4 4 2. + <_>12 10 4 4 2. + 0 + 0.1252495050430298 + 1.9679870456457138e-003 + -0.8788949251174927 + <_> + + <_> + + + + <_>0 6 8 8 -1. + <_>0 6 4 4 2. + <_>4 10 4 4 2. + 0 + -0.0605558082461357 + -0.6582552790641785 + 7.3593561537563801e-003 + <_> + + <_> + + + + <_>3 8 16 2 -1. + <_>3 9 16 1 2. + 0 + 0.0420927293598652 + 9.0475538745522499e-003 + -0.3767631053924561 + <_> + + <_> + + + + <_>0 7 16 3 -1. + <_>0 8 16 1 3. + 0 + 0.0161900594830513 + 0.0145348403602839 + -0.3408921062946320 + <_> + + <_> + + + + <_>5 11 14 3 -1. + <_>5 12 14 1 3. + 0 + -0.0267569608986378 + 0.1686244010925293 + -0.0107689499855042 + <_> + + <_> + + + + <_>8 0 3 20 -1. + <_>9 0 1 20 3. + 0 + -0.0511635392904282 + -0.9406844973564148 + 4.8503028228878975e-003 + <_> + + <_> + + + + <_>8 10 9 7 -1. + <_>11 10 3 7 3. + 0 + -0.0290930792689323 + 0.1305136978626251 + -0.0272160600870848 + <_> + + <_> + + + + <_>0 6 20 3 -1. + <_>10 6 10 3 2. + 0 + -0.1343380957841873 + -0.5371304750442505 + 0.0106057301163673 + <_> + + <_> + + + + <_>4 7 15 3 -1. + <_>4 8 15 1 3. + 0 + -4.0363678708672523e-003 + -0.0785979479551315 + 0.0456093102693558 + <_> + + <_> + + + + <_>0 5 14 5 -1. + <_>7 5 7 5 2. + 0 + -0.1630388051271439 + 0.6915314793586731 + -6.8249078467488289e-003 + <_> + + <_> + + + + <_>8 10 9 7 -1. + <_>11 10 3 7 3. + 0 + 0.0535272285342216 + -8.2422774285078049e-003 + 0.2364957928657532 + <_> + + <_> + + + + <_>3 10 9 7 -1. + <_>6 10 3 7 3. + 0 + 0.0932096168398857 + -7.0793349295854568e-003 + 0.6398562788963318 + <_> + + <_> + + + + <_>11 7 3 10 -1. + <_>11 12 3 5 2. + 0 + -0.0415833517909050 + -0.4052774906158447 + 0.0119533697143197 + <_> + + <_> + + + + <_>1 7 18 6 -1. + <_>1 9 18 2 3. + 0 + 0.1524126976728439 + -0.0160168893635273 + 0.3708480894565582 + <_> + + <_> + + + + <_>8 0 4 15 -1. + <_>8 5 4 5 3. + 0 + -0.0130174802616239 + -0.1236660033464432 + 0.0445375107228756 + <_> + + <_> + + + + <_>6 1 7 15 -1. + <_>6 6 7 5 3. + 0 + 0.0549465417861938 + 0.0248529296368361 + -0.2195506989955902 + <_> + + <_> + + + + <_>6 9 14 3 -1. + <_>6 10 14 1 3. + 0 + 3.0320021323859692e-004 + -0.1336728930473328 + 0.0402260906994343 + <_> + + <_> + + + + <_>1 10 6 10 -1. + <_>1 10 3 5 2. + <_>4 15 3 5 2. + 0 + 0.0138911800459027 + -0.0269018206745386 + 0.1964741051197052 + <_> + + <_> + + + + <_>9 3 6 13 -1. + <_>11 3 2 13 3. + 0 + 1.0848880046978593e-003 + 0.0364220701158047 + -0.0834306329488754 + <_> + + <_> + + + + <_>8 1 4 9 -1. + <_>10 1 2 9 2. + 0 + 2.3160090204328299e-003 + -0.0612158291041851 + 0.1127784997224808 + <_> + + <_> + + + + <_>9 0 6 7 -1. + <_>11 0 2 7 3. + 0 + -7.1280319243669510e-003 + -0.1464242935180664 + 0.0313001684844494 + <_> + + <_> + + + + <_>7 1 6 8 -1. + <_>10 1 3 8 2. + 0 + -3.5769429523497820e-003 + 0.1015909016132355 + -0.0607895106077194 + <_> + + <_> + + + + <_>3 6 14 2 -1. + <_>3 6 7 2 2. + 0 + 7.6856701634824276e-003 + 0.0422294698655605 + -0.1258313059806824 + <_> + + <_> + + + + <_>1 3 4 8 -1. + <_>3 3 2 8 2. + 0 + 8.4121264517307281e-003 + -0.0468726195394993 + 0.1301138997077942 + <_> + + <_> + + + + <_>18 3 2 14 -1. + <_>18 10 2 7 2. + 0 + 0.0758399292826653 + -9.2988023534417152e-003 + 0.2426081001758575 + <_> + + <_> + + + + <_>0 3 2 14 -1. + <_>0 10 2 7 2. + 0 + 8.6365960305556655e-004 + 0.0911338478326797 + -0.0613235607743263 + <_> + + <_> + + + + <_>3 15 16 2 -1. + <_>3 15 8 2 2. + 0 + -0.0106325699016452 + -0.0678184032440186 + 0.0190364997833967 + <_> + + <_> + + + + <_>2 1 9 6 -1. + <_>2 3 9 2 3. + 0 + -0.0141201401129365 + 0.2912392914295197 + -0.0174822397530079 + <_> + + <_> + + + + <_>11 1 7 6 -1. + <_>11 3 7 2 3. + 0 + 2.0944620482623577e-003 + -0.1174428984522820 + 0.0541295185685158 + <_> + + <_> + + + + <_>1 8 8 8 -1. + <_>1 8 4 4 2. + <_>5 12 4 4 2. + 0 + 4.2378879152238369e-003 + 0.0384955108165741 + -0.1447281986474991 + <_> + + <_> + + + + <_>8 6 5 8 -1. + <_>8 10 5 4 2. + 0 + -2.2818730212748051e-003 + -0.1157623007893562 + 0.0276634991168976 + <_> + + <_> + + + + <_>4 12 8 8 -1. + <_>4 12 4 4 2. + <_>8 16 4 4 2. + 0 + 9.4367301790043712e-004 + -0.0940889269113541 + 0.0533738210797310 + <_> + + <_> + + + + <_>15 12 4 8 -1. + <_>15 16 4 4 2. + 0 + 0.0148901902139187 + -0.0115624200552702 + 0.1094198003411293 + <_> + + <_> + + + + <_>7 11 5 8 -1. + <_>7 15 5 4 2. + 0 + 5.2381302230060101e-003 + 0.0352654308080673 + -0.1521206051111221 + <_> + + <_> + + + + <_>5 14 13 2 -1. + <_>5 15 13 1 2. + 0 + 1.2663690140470862e-003 + -0.0333525687456131 + 0.0798120498657227 + <_> + + <_> + + + + <_>2 4 9 12 -1. + <_>2 8 9 4 3. + 0 + -5.3786882199347019e-003 + 0.2093476951122284 + -0.0240730699151754 + <_> + + <_> + + + + <_>3 8 14 3 -1. + <_>3 9 14 1 3. + 0 + -1.9063480431213975e-003 + -0.2077497988939285 + 0.0254068300127983 + <_> + + <_> + + + + <_>0 15 13 3 -1. + <_>0 16 13 1 3. + 0 + 3.0771149322390556e-003 + -0.0519401803612709 + 0.1047597974538803 + <_> + + <_> + + + + <_>9 14 8 6 -1. + <_>9 16 8 2 3. + 0 + 9.5619028434157372e-003 + 0.0306337904185057 + -0.1075816974043846 + <_> + + <_> + + + + <_>1 12 4 8 -1. + <_>1 16 4 4 2. + 0 + 0.0205408297479153 + -0.0220289193093777 + 0.2357084006071091 + <_> + + <_> + + + + <_>5 16 12 4 -1. + <_>9 16 4 4 3. + 0 + 7.0854742079973221e-003 + -0.0471882484853268 + 0.0841227471828461 + <_> + + <_> + + + + <_>4 13 6 7 -1. + <_>6 13 2 7 3. + 0 + -6.2047559767961502e-003 + -0.1220982000231743 + 0.0451773293316364 + <_> + + <_> + + + + <_>11 1 3 15 -1. + <_>12 1 1 15 3. + 0 + -0.0234741196036339 + -0.2877045869827271 + 0.0108765298500657 + <_> + + <_> + + + + <_>0 0 2 13 -1. + <_>1 0 1 13 2. + 0 + 9.1368835419416428e-003 + -0.0334267504513264 + 0.2068012058734894 + <_> + + <_> + + + + <_>11 1 3 19 -1. + <_>12 1 1 19 3. + 0 + 1.0512090520933270e-003 + 0.0470068007707596 + -0.0950183793902397 + <_> + + <_> + + + + <_>5 10 4 7 -1. + <_>7 10 2 7 2. + 0 + -6.0899247182533145e-004 + 0.0534191988408566 + -0.1044477000832558 + <_> + + <_> + + + + <_>8 11 8 4 -1. + <_>8 11 4 4 2. + 0 + -7.4382261373102665e-003 + -0.0480893291532993 + 0.0192444995045662 + <_> + + <_> + + + + <_>5 12 8 8 -1. + <_>9 12 4 8 2. + 0 + 0.0194959901273251 + -0.0301367007195950 + 0.2038148045539856 + <_> + + <_> + + + + <_>6 4 10 14 -1. + <_>11 4 5 7 2. + <_>6 11 5 7 2. + 0 + 0.0777995064854622 + 4.2237630113959312e-003 + -0.7240787744522095 + <_> + + <_> + + + + <_>4 4 10 14 -1. + <_>4 4 5 7 2. + <_>9 11 5 7 2. + 0 + 3.1717489473521709e-003 + 0.0288189407438040 + -0.1630569994449616 + <_> + + <_> + + + + <_>2 3 18 15 -1. + <_>2 8 18 5 3. + 0 + -0.0390127189457417 + -0.2915115952491760 + 0.0111319404095411 + <_> + + <_> + + + + <_>4 7 6 9 -1. + <_>6 7 2 9 3. + 0 + -3.1845991034060717e-003 + 0.0630722194910049 + -0.0772915631532669 + <_> + + <_> + + + + <_>8 7 9 9 -1. + <_>8 10 9 3 3. + 0 + 0.0178767200559378 + 0.0511965900659561 + -0.0378859303891659 + <_> + + <_> + + + + <_>2 8 14 4 -1. + <_>2 8 7 2 2. + <_>9 10 7 2 2. + 0 + 1.2821210548281670e-003 + -0.0573147088289261 + 0.0870549827814102 + <_> + + <_> + + + + <_>6 10 8 10 -1. + <_>6 10 4 10 2. + 0 + 0.1071055009961128 + -0.0155610004439950 + 0.3152500987052918 + <_> + + <_> + + + + <_>4 15 9 5 -1. + <_>7 15 3 5 3. + 0 + 0.0695771276950836 + 8.9664813131093979e-003 + -0.5858960747718811 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + -4.1071181185543537e-003 + 0.0954722464084625 + -0.0351764708757401 + <_> + + <_> + + + + <_>5 6 8 4 -1. + <_>9 6 4 4 2. + 0 + -2.4557299911975861e-003 + -0.1660528033971787 + 0.0373229198157787 + <_> + + <_> + + + + <_>10 7 6 7 -1. + <_>12 7 2 7 3. + 0 + -0.0209084209054708 + 0.1398988068103790 + -0.0299874506890774 + <_> + + <_> + + + + <_>4 7 6 12 -1. + <_>6 7 2 12 3. + 0 + -8.1008402630686760e-003 + -0.1052922010421753 + 0.0702457875013351 + <_> + + <_> + + + + <_>7 6 6 8 -1. + <_>9 6 2 8 3. + 0 + -0.0256718192249537 + 0.4425472021102905 + -0.0110814599320292 + <_> + + <_> + + + + <_>5 3 6 16 -1. + <_>5 3 3 8 2. + <_>8 11 3 8 2. + 0 + -9.3759642913937569e-003 + -0.0607650317251682 + 0.0813383236527443 + <_> + + <_> + + + + <_>12 10 6 6 -1. + <_>12 10 3 6 2. + 0 + 0.0511406995356083 + -0.0105162495747209 + 0.3404153883457184 + <_> + + <_> + + + + <_>2 10 6 6 -1. + <_>5 10 3 6 2. + 0 + -4.0337219834327698e-003 + 0.0850994735956192 + -0.0634215325117111 + <_> + + <_> + + + + <_>10 0 4 9 -1. + <_>10 0 2 9 2. + 0 + 3.3258409239351749e-003 + -0.0846251398324966 + 0.0473683699965477 + <_> + + <_> + + + + <_>5 0 6 7 -1. + <_>7 0 2 7 3. + 0 + -3.9332117885351181e-003 + -0.1263709962368012 + 0.0424505993723869 + <_> + + <_> + + + + <_>10 0 6 8 -1. + <_>12 0 2 8 3. + 0 + -4.7937841154634953e-003 + -0.0425274111330509 + 0.0251268092542887 + <_> + + <_> + + + + <_>4 0 6 8 -1. + <_>6 0 2 8 3. + 0 + 2.5972370058298111e-003 + 0.0418841205537319 + -0.1437415927648544 + <_> + + <_> + + + + <_>6 6 8 6 -1. + <_>6 8 8 2 3. + 0 + 0.0528075508773327 + -0.0124670201912522 + 0.4022338986396790 + <_> + + <_> + + + + <_>3 0 6 7 -1. + <_>5 0 2 7 3. + 0 + -8.1413555890321732e-003 + -0.1278377026319504 + 0.0389758795499802 + <_> + + <_> + + + + <_>8 10 10 10 -1. + <_>13 10 5 5 2. + <_>8 15 5 5 2. + 0 + 0.0298017393797636 + -0.0167473908513784 + 0.1242422983050346 + <_> + + <_> + + + + <_>2 16 15 4 -1. + <_>7 16 5 4 3. + 0 + -0.0899077206850052 + 0.3141846954822540 + -0.0183604191988707 + <_> + + <_> + + + + <_>9 6 10 13 -1. + <_>9 6 5 13 2. + 0 + 0.1784521043300629 + 0.0104551902040839 + -0.3204891979694367 + <_> + + <_> + + + + <_>1 6 10 13 -1. + <_>6 6 5 13 2. + 0 + 0.0185882207006216 + -0.0385414399206638 + 0.1513532996177673 + <_> + + <_> + + + + <_>4 15 16 2 -1. + <_>4 15 8 2 2. + 0 + -4.5074601075612009e-005 + 0.0504628494381905 + -0.0565748512744904 + <_> + + <_> + + + + <_>1 15 16 2 -1. + <_>9 15 8 2 2. + 0 + 3.8339050952345133e-003 + 0.0475015491247177 + -0.1432714015245438 + <_> + + <_> + + + + <_>15 7 3 12 -1. + <_>15 13 3 6 2. + 0 + 0.0886082500219345 + -3.3567149657756090e-003 + 0.5859820842742920 + <_> + + <_> + + + + <_>2 7 3 12 -1. + <_>2 13 3 6 2. + 0 + -0.0706114694476128 + 0.6029266715049744 + -8.3463769406080246e-003 + <_> + + <_> + + + + <_>2 13 18 7 -1. + <_>8 13 6 7 3. + 0 + -0.1395819932222366 + -0.0916935130953789 + 0.0153119899332523 + <_> + + <_> + + + + <_>2 4 15 3 -1. + <_>2 5 15 1 3. + 0 + 7.6274941675364971e-003 + -0.0408250093460083 + 0.1193772032856941 + <_> + + <_> + + + + <_>16 6 2 13 -1. + <_>16 6 1 13 2. + 0 + -0.0704195871949196 + -0.6653149724006653 + 2.6815559249371290e-003 + <_> + + <_> + + + + <_>4 1 6 5 -1. + <_>7 1 3 5 2. + 0 + 2.2952680010348558e-003 + -0.0794965177774429 + 0.0570342689752579 + <_> + + <_> + + + + <_>14 6 4 14 -1. + <_>16 6 2 7 2. + <_>14 13 2 7 2. + 0 + 3.6756680347025394e-003 + -0.0291802808642387 + 0.0563330389559269 + <_> + + <_> + + + + <_>0 4 12 3 -1. + <_>6 4 6 3 2. + 0 + 0.0460725016891956 + 0.0191001798957586 + -0.2916376888751984 + <_> + + <_> + + + + <_>4 5 13 2 -1. + <_>4 6 13 1 2. + 0 + 2.1738489158451557e-003 + -0.0269121304154396 + 0.2019996047019959 + <_> + + <_> + + + + <_>3 2 13 10 -1. + <_>3 7 13 5 2. + 0 + -5.3164511919021606e-003 + 0.0930229797959328 + -0.0715486407279968 + <_> + + <_> + + + + <_>7 2 6 10 -1. + <_>7 7 6 5 2. + 0 + -0.0111989602446556 + -0.1061891987919807 + 0.0483955815434456 + <_> + + <_> + + + + <_>3 1 7 6 -1. + <_>3 3 7 2 3. + 0 + 1.7013610340654850e-003 + -0.1311120986938477 + 0.0430862195789814 + <_> + + <_> + + + + <_>4 0 13 6 -1. + <_>4 2 13 2 3. + 0 + -0.0116262696683407 + 0.1568453013896942 + -0.0246989503502846 + <_> + + <_> + + + + <_>3 0 12 6 -1. + <_>3 2 12 2 3. + 0 + 0.0938818305730820 + -0.0120585896074772 + 0.3794193863868713 + <_> + + <_> + + + + <_>13 0 7 6 -1. + <_>13 2 7 2 3. + 0 + 0.0120410900563002 + 0.0295691099017859 + -0.1332854926586151 + <_> + + <_> + + + + <_>5 0 4 16 -1. + <_>5 0 2 8 2. + <_>7 8 2 8 2. + 0 + -4.1863098740577698e-003 + 0.0672440230846405 + -0.0722289904952049 + <_> + + <_> + + + + <_>1 14 18 6 -1. + <_>10 14 9 3 2. + <_>1 17 9 3 2. + 0 + 0.0883739069104195 + 7.5915241613984108e-003 + -0.6251279711723328 + <_> + + <_> + + + + <_>2 17 14 3 -1. + <_>9 17 7 3 2. + 0 + -0.0148764103651047 + 0.1176209002733231 + -0.0438402183353901 + <_> + + <_> + + + + <_>16 11 4 7 -1. + <_>16 11 2 7 2. + 0 + 0.0134335299953818 + 0.0196157898753881 + -0.1192376017570496 + <_> + + <_> + + + + <_>4 1 8 15 -1. + <_>8 1 4 15 2. + 0 + 0.1509104073047638 + -9.9040074273943901e-003 + 0.5626248121261597 + <_> + + <_> + + + + <_>13 0 7 6 -1. + <_>13 2 7 2 3. + 0 + -0.0175078399479389 + -0.2343973964452744 + 0.0188283603638411 + <_> + + <_> + + + + <_>1 6 4 13 -1. + <_>3 6 2 13 2. + 0 + -0.1470708996057510 + -0.7453066110610962 + 7.0233740843832493e-003 + <_> + + <_> + + + + <_>12 12 7 4 -1. + <_>12 14 7 2 2. + 0 + 0.0314858891069889 + -3.6193220876157284e-003 + 0.6921570897102356 + <_> + + <_> + + + + <_>1 12 7 4 -1. + <_>1 14 7 2 2. + 0 + -1.6217399388551712e-004 + 0.0464600399136543 + -0.1064255014061928 + <_> + + <_> + + + + <_>7 13 13 2 -1. + <_>7 14 13 1 2. + 0 + 5.6881760247051716e-004 + -0.0288161505013704 + 0.0743787288665771 + <_> + + <_> + + + + <_>0 12 7 6 -1. + <_>0 14 7 2 3. + 0 + -0.0198762007057667 + -0.2099740058183670 + 0.0230188108980656 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + -8.7401196360588074e-003 + 0.1732510030269623 + -0.0357868596911430 + <_> + + <_> + + + + <_>6 11 6 8 -1. + <_>8 11 2 8 3. + 0 + -0.0505792088806629 + -0.5202491879463196 + 9.2388605698943138e-003 + <_> + + <_> + + + + <_>8 10 10 10 -1. + <_>13 10 5 5 2. + <_>8 15 5 5 2. + 0 + 0.0939821526408196 + 3.4048059023916721e-003 + -0.2920742928981781 + <_> + + <_> + + + + <_>2 10 10 10 -1. + <_>2 10 5 5 2. + <_>7 15 5 5 2. + 0 + -0.0133265396580100 + 0.1366183012723923 + -0.0344055593013763 + <_> + + <_> + + + + <_>6 13 10 6 -1. + <_>11 13 5 3 2. + <_>6 16 5 3 2. + 0 + -0.0224726200103760 + -0.2591367959976196 + 0.0112661700695753 + <_> + + <_> + + + + <_>4 13 10 6 -1. + <_>4 13 5 3 2. + <_>9 16 5 3 2. + 0 + -0.0411250405013561 + -0.6692156195640564 + 7.3854308575391769e-003 + <_> + + <_> + + + + <_>7 6 9 12 -1. + <_>7 12 9 6 2. + 0 + 0.0697207674384117 + 5.0764488987624645e-003 + -0.2474718987941742 + <_> + + <_> + + + + <_>1 14 14 4 -1. + <_>1 14 7 2 2. + <_>8 16 7 2 2. + 0 + 0.0251985993236303 + -0.0156600493937731 + 0.2940840125083923 + <_> + + <_> + + + + <_>11 15 7 4 -1. + <_>11 17 7 2 2. + 0 + 4.2568319477140903e-003 + 0.0381121188402176 + -0.1236869022250176 + <_> + + <_> + + + + <_>1 15 16 4 -1. + <_>1 17 16 2 2. + 0 + -0.0126790096983314 + -0.1997618973255158 + 0.0288066398352385 + <_> + + <_> + + + + <_>2 0 18 8 -1. + <_>8 0 6 8 3. + 0 + -0.1608065962791443 + 0.1871045976877213 + -8.2025080919265747e-003 + <_> + + <_> + + + + <_>0 8 18 12 -1. + <_>0 12 18 4 3. + 0 + 0.1218139976263046 + -0.0108559299260378 + 0.4541229009628296 + <_> + + <_> + + + + <_>7 11 13 2 -1. + <_>7 12 13 1 2. + 0 + 2.8687159065157175e-003 + -9.8563097417354584e-003 + 0.1968989074230194 + <_> + + <_> + + + + <_>0 11 13 2 -1. + <_>0 12 13 1 2. + 0 + -3.4924471401609480e-004 + 0.0479552596807480 + -0.1254905015230179 + <_> + + <_> + + + + <_>1 12 19 3 -1. + <_>1 13 19 1 3. + 0 + 0.0437891818583012 + 5.1197651773691177e-003 + -0.6604471206665039 + <_> + + <_> + + + + <_>0 3 13 3 -1. + <_>0 4 13 1 3. + 0 + 0.0494254492223263 + 7.9704420641064644e-003 + -0.5153719186782837 + <_> + + <_> + + + + <_>9 11 6 9 -1. + <_>9 14 6 3 3. + 0 + 0.0122637897729874 + 9.8127601668238640e-003 + -0.1627492010593414 + <_> + + <_> + + + + <_>5 11 6 9 -1. + <_>5 14 6 3 3. + 0 + -6.7564379423856735e-003 + -0.0669927671551704 + 0.0784260928630829 + <_> + + <_> + + + + <_>4 3 13 3 -1. + <_>4 4 13 1 3. + 0 + 0.0195992402732372 + -0.0245084799826145 + 0.1789238005876541 + <_> + + <_> + + + + <_>5 14 9 4 -1. + <_>5 16 9 2 2. + 0 + 1.3520059874281287e-003 + -0.0758534222841263 + 0.0572824701666832 + <_> + + <_> + + + + <_>8 12 4 8 -1. + <_>8 16 4 4 2. + 0 + 5.1610758528113365e-003 + 0.0505926199257374 + -0.0966589227318764 + <_> + + <_> + + + + <_>3 8 14 4 -1. + <_>3 8 7 2 2. + <_>10 10 7 2 2. + 0 + 0.0271245893090963 + -0.0130784995853901 + 0.3389481902122498 + <_> + + <_> + + + + <_>4 5 12 6 -1. + <_>8 5 4 6 3. + 0 + -0.0736590623855591 + -0.9077556133270264 + 5.3760888986289501e-003 + <_> + + <_> + + + + <_>3 5 8 9 -1. + <_>3 8 8 3 3. + 0 + -2.7619479224085808e-003 + 0.1344632059335709 + -0.0344833098351955 + <_> + + <_> + + + + <_>10 5 4 12 -1. + <_>10 9 4 4 3. + 0 + -1.5638889744877815e-003 + -0.1999212056398392 + 0.0140036996454000 + <_> + + <_> + + + + <_>0 6 18 6 -1. + <_>0 6 9 3 2. + <_>9 9 9 3 2. + 0 + 4.0559601038694382e-003 + 0.0531832091510296 + -0.1007082983851433 + <_> + + <_> + + + + <_>3 6 16 4 -1. + <_>11 6 8 2 2. + <_>3 8 8 2 2. + 0 + -3.2189621124416590e-003 + 0.0626243129372597 + -0.0302760899066925 + <_> + + <_> + + + + <_>4 6 7 4 -1. + <_>4 8 7 2 2. + 0 + 4.1666622273623943e-003 + -0.0917611569166183 + 0.0584005005657673 + <_> + + <_> + + + + <_>12 4 7 6 -1. + <_>12 6 7 2 3. + 0 + 0.0203930605202913 + 4.8048538155853748e-003 + -0.3838635087013245 + <_> + + <_> + + + + <_>1 4 7 6 -1. + <_>1 6 7 2 3. + 0 + -9.9844802170991898e-003 + -0.0694732964038849 + 0.0700341910123825 + <_> + + <_> + + + + <_>6 0 10 6 -1. + <_>6 2 10 2 3. + 0 + 0.0195153206586838 + -0.0341065004467964 + 0.1083140969276428 + <_> + + <_> + + + + <_>0 0 7 6 -1. + <_>0 2 7 2 3. + 0 + 8.7807718664407730e-003 + 0.0369900502264500 + -0.1308933049440384 + <_> + + <_> + + + + <_>17 2 3 13 -1. + <_>18 2 1 13 3. + 0 + 1.7314519500359893e-003 + -0.0421234704554081 + 0.0849820971488953 + <_> + + <_> + + + + <_>0 2 3 13 -1. + <_>1 2 1 13 3. + 0 + -0.0267095193266869 + 0.3232682943344116 + -0.0154271600767970 + <_> + + <_> + + + + <_>6 8 13 3 -1. + <_>6 9 13 1 3. + 0 + 7.8696580603718758e-003 + 0.0313611589372158 + -0.1056860983371735 + <_> + + <_> + + + + <_>0 13 10 6 -1. + <_>0 13 5 3 2. + <_>5 16 5 3 2. + 0 + 3.2152980566024780e-003 + -0.0651618018746376 + 0.0761894881725311 + <_> + + <_> + + + + <_>10 12 8 8 -1. + <_>14 12 4 4 2. + <_>10 16 4 4 2. + 0 + -0.0232151206582785 + 0.2252265065908432 + -0.0148387700319290 + <_> + + <_> + + + + <_>6 10 8 8 -1. + <_>6 10 4 4 2. + <_>10 14 4 4 2. + 0 + -4.4935368932783604e-003 + -0.1313146054744721 + 0.0428559407591820 + <_> + + <_> + + + + <_>10 10 6 7 -1. + <_>12 10 2 7 3. + 0 + -0.0118503896519542 + 0.1482574045658112 + -0.0294568501412869 + <_> + + <_> + + + + <_>5 9 9 5 -1. + <_>8 9 3 5 3. + 0 + -9.3039282364770770e-004 + 0.0793299376964569 + -0.0757845267653465 + <_> + + <_> + + + + <_>7 5 7 6 -1. + <_>7 7 7 2 3. + 0 + -7.2138011455535889e-004 + 0.0220424104481936 + -0.2089328020811081 + <_> + + <_> + + + + <_>0 13 18 7 -1. + <_>6 13 6 7 3. + 0 + 0.1307877004146576 + -0.0122144203633070 + 0.4322460889816284 + <_> + + <_> + + + + <_>7 7 12 9 -1. + <_>7 10 12 3 3. + 0 + 0.2786338925361633 + -7.4468360980972648e-004 + 0.9999976158142090 + <_> + + <_> + + + + <_>1 12 18 3 -1. + <_>1 13 18 1 3. + 0 + -0.0408152006566525 + -0.6131027936935425 + 8.2405265420675278e-003 + <_> + + <_> + + + + <_>7 13 13 2 -1. + <_>7 14 13 1 2. + 0 + 1.5054940013214946e-003 + -0.0180533993989229 + 0.0652307271957397 + <_> + + <_> + + + + <_>7 12 6 7 -1. + <_>9 12 2 7 3. + 0 + 6.5729310736060143e-003 + 0.0309676304459572 + -0.1502135992050171 + <_> + + <_> + + + + <_>8 10 12 10 -1. + <_>14 10 6 5 2. + <_>8 15 6 5 2. + 0 + -0.1403317004442215 + -0.4464120864868164 + 5.0997259095311165e-003 + <_> + + <_> + + + + <_>0 10 12 10 -1. + <_>0 10 6 5 2. + <_>6 15 6 5 2. + 0 + -0.0127815604209900 + 0.1257960945367813 + -0.0462587699294090 + <_> + + <_> + + + + <_>7 7 12 9 -1. + <_>7 10 12 3 3. + 0 + 0.0133838197216392 + 0.0752338320016861 + -0.0298584196716547 + <_> + + <_> + + + + <_>3 16 12 4 -1. + <_>7 16 4 4 3. + 0 + 9.5225386321544647e-003 + -0.0441355295479298 + 0.1082296967506409 + <_> + + <_> + + + + <_>7 16 9 4 -1. + <_>7 18 9 2 2. + 0 + -0.0724846869707108 + -1. + 1.3005880173295736e-003 + <_> + + <_> + + + + <_>4 16 9 4 -1. + <_>4 18 9 2 2. + 0 + 3.6246789386495948e-004 + -0.0668785423040390 + 0.0739164799451828 + <_> + + <_> + + + + <_>11 1 3 19 -1. + <_>12 1 1 19 3. + 0 + -0.0155119802802801 + -0.1841454058885574 + 0.0159990396350622 + <_> + + <_> + + + + <_>6 14 7 6 -1. + <_>6 16 7 2 3. + 0 + 0.0511466115713120 + -9.4361994415521622e-003 + 0.5472086071968079 + <_> + + <_> + + + + <_>11 1 3 15 -1. + <_>12 1 1 15 3. + 0 + -8.9448272774461657e-005 + 0.0329708904027939 + -0.0451033897697926 + <_> + + <_> + + + + <_>6 1 3 19 -1. + <_>7 1 1 19 3. + 0 + 1.0151580208912492e-003 + 0.0486031807959080 + -0.0982570499181747 + <_> + + <_> + + + + <_>4 0 14 10 -1. + <_>11 0 7 5 2. + <_>4 5 7 5 2. + 0 + 0.0535709708929062 + 0.0103257000446320 + -0.1430442035198212 + <_> + + <_> + + + + <_>2 0 14 10 -1. + <_>2 0 7 5 2. + <_>9 5 7 5 2. + 0 + 0.1230262964963913 + -5.2219899371266365e-003 + 0.8690345287322998 + <_> + + <_> + + + + <_>10 1 3 13 -1. + <_>11 1 1 13 3. + 0 + -6.0005468549206853e-004 + 0.0535720400512218 + -0.0582032687962055 + <_> + + <_> + + + + <_>6 7 6 8 -1. + <_>8 7 2 8 3. + 0 + -0.0447156988084316 + 0.4498831033706665 + -0.0105494195595384 + <_> + + <_> + + + + <_>11 5 4 10 -1. + <_>11 5 2 10 2. + 0 + 6.3781379722058773e-003 + 0.0261842906475067 + -0.1064003035426140 + <_> + + <_> + + + + <_>3 18 13 2 -1. + <_>3 19 13 1 2. + 0 + -5.6618300732225180e-004 + 0.0572648495435715 + -0.0777502432465553 + <_> + + <_> + + + + <_>11 8 4 8 -1. + <_>11 12 4 4 2. + 0 + -1.5853339573368430e-004 + 0.0253169499337673 + -0.0571899414062500 + <_> + + <_> + + + + <_>5 8 4 8 -1. + <_>5 12 4 4 2. + 0 + -0.0497907698154449 + -0.3712770938873291 + 0.0131251700222492 + <_> + + <_> + + + + <_>4 8 16 6 -1. + <_>12 8 8 3 2. + <_>4 11 8 3 2. + 0 + -0.0104770204052329 + 0.0842459499835968 + -0.0367316082119942 + <_> + + <_> + + + + <_>5 5 4 10 -1. + <_>7 5 2 10 2. + 0 + -9.0497080236673355e-003 + -0.1689444035291672 + 0.0284713692963123 + <_> + + <_> + + + + <_>10 1 3 13 -1. + <_>11 1 1 13 3. + 0 + -0.0352020785212517 + -0.4381084144115448 + 5.8491500094532967e-003 + <_> + + <_> + + + + <_>7 1 3 13 -1. + <_>8 1 1 13 3. + 0 + -2.0730090327560902e-003 + 0.0948908403515816 + -0.0530595891177654 + <_> + + <_> + + + + <_>6 6 8 7 -1. + <_>6 6 4 7 2. + 0 + -5.0727208144962788e-003 + -0.1122173964977264 + 0.0441659912467003 + <_> + + <_> + + + + <_>8 0 4 9 -1. + <_>10 0 2 9 2. + 0 + 2.5876651052385569e-003 + -0.0555578209459782 + 0.1142631992697716 + <_> + + <_> + + + + <_>9 7 4 12 -1. + <_>9 11 4 4 3. + 0 + -2.4757650680840015e-003 + -0.0482131801545620 + 0.0315298996865749 + <_> + + <_> + + + + <_>4 2 12 4 -1. + <_>10 2 6 4 2. + 0 + -0.0129125304520130 + 0.1148665994405747 + -0.0385897606611252 + <_> + + <_> + + + + <_>8 1 10 6 -1. + <_>13 1 5 3 2. + <_>8 4 5 3 2. + 0 + 0.0701943486928940 + 3.5798270255327225e-003 + -0.7300816774368286 + <_> + + <_> + + + + <_>0 2 9 10 -1. + <_>0 7 9 5 2. + 0 + -0.1201630011200905 + -0.6721792221069336 + 5.8088749647140503e-003 + <_> + + <_> + + + + <_>10 1 10 14 -1. + <_>10 8 10 7 2. + 0 + 0.1310949027538300 + 0.0153406998142600 + -0.1291787028312683 + <_> + + <_> + + + + <_>0 1 10 14 -1. + <_>0 8 10 7 2. + 0 + -0.1135049983859062 + 0.4729798138141632 + -0.0105742802843452 + <_> + + <_> + + + + <_>9 0 3 15 -1. + <_>9 5 3 5 3. + 0 + -0.0715335234999657 + -0.3491029143333435 + 9.8157208412885666e-003 + <_> + + <_> + + + + <_>0 2 4 18 -1. + <_>0 2 2 9 2. + <_>2 11 2 9 2. + 0 + 0.0158896706998348 + -0.0301492903381586 + 0.1513480991125107 + <_> + + <_> + + + + <_>8 0 12 20 -1. + <_>8 0 6 20 2. + 0 + 0.2684037089347839 + 9.9974423646926880e-003 + -0.1224374994635582 + <_> + + <_> + + + + <_>0 0 12 20 -1. + <_>6 0 6 20 2. + 0 + -0.1492256969213486 + -0.1577313989400864 + 0.0276825092732906 + <_> + + <_> + + + + <_>10 9 6 7 -1. + <_>12 9 2 7 3. + 0 + -0.0228584893047810 + 0.1734071969985962 + -0.0211247708648443 + <_> + + <_> + + + + <_>3 3 6 7 -1. + <_>5 3 2 7 3. + 0 + -9.0983451809734106e-004 + 0.0552699081599712 + -0.0850529819726944 + <_> + + <_> + + + + <_>13 2 3 17 -1. + <_>14 2 1 17 3. + 0 + -0.0114621603861451 + -0.1439760029315949 + 0.0138097098097205 + <_> + + <_> + + + + <_>2 5 4 8 -1. + <_>2 9 4 4 2. + 0 + 0.0871184319257736 + 6.4688520506024361e-003 + -0.7280907034873962 + <_> + + <_> + + + + <_>6 5 10 10 -1. + <_>6 10 10 5 2. + 0 + 0.0538105890154839 + -0.0282515194267035 + 0.1361580044031143 + <_> + + <_> + + + + <_>4 2 3 17 -1. + <_>5 2 1 17 3. + 0 + -1.6928049735724926e-003 + -0.1011480018496513 + 0.0520966015756130 + <_> + + <_> + + + + <_>6 6 14 5 -1. + <_>6 6 7 5 2. + 0 + -0.0145269203931093 + -0.1061320975422859 + 0.0272180307656527 + <_> + + <_> + + + + <_>0 11 15 3 -1. + <_>5 11 5 3 3. + 0 + -5.9082340449094772e-003 + 0.1125700026750565 + -0.0610327012836933 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + -0.0214214697480202 + -0.1546418964862824 + 0.0118538700044155 + <_> + + <_> + + + + <_>3 0 10 6 -1. + <_>3 0 5 3 2. + <_>8 3 5 3 2. + 0 + 0.0801715701818466 + 5.5826799944043159e-003 + -0.8238909244537354 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 10 4 4 3. + 0 + -1.0931739816442132e-003 + -0.0783939063549042 + 0.0134330997243524 + <_> + + <_> + + + + <_>0 13 13 2 -1. + <_>0 14 13 1 2. + 0 + 4.1605130536481738e-004 + -0.0431861393153667 + 0.1050084009766579 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + -2.8376420959830284e-003 + 0.0789602100849152 + -0.0422472804784775 + <_> + + <_> + + + + <_>1 2 12 15 -1. + <_>5 2 4 15 3. + 0 + -0.0285225193947554 + -0.1072297021746635 + 0.0477891899645329 + <_> + + <_> + + + + <_>2 0 18 16 -1. + <_>8 0 6 16 3. + 0 + 0.4006808102130890 + -5.7991011999547482e-003 + 0.3069550991058350 + <_> + + <_> + + + + <_>0 0 18 16 -1. + <_>6 0 6 16 3. + 0 + -8.1703867763280869e-003 + 0.1085176020860672 + -0.0561534687876701 + <_> + + <_> + + + + <_>14 0 6 13 -1. + <_>14 0 3 13 2. + 0 + 9.3125440180301666e-003 + -0.0445609390735626 + 0.0436340495944023 + <_> + + <_> + + + + <_>4 3 3 17 -1. + <_>5 3 1 17 3. + 0 + 5.8274720795452595e-003 + 0.0313108414411545 + -0.1605342030525208 + <_> + + <_> + + + + <_>13 6 6 10 -1. + <_>13 6 3 10 2. + 0 + -2.9063750989735126e-003 + 0.0371482297778130 + -0.0273105800151825 + <_> + + <_> + + + + <_>1 5 6 11 -1. + <_>4 5 3 11 2. + 0 + 0.0164219699800015 + -0.0316163711249828 + 0.1619547009468079 + <_> + + <_> + + + + <_>16 3 4 12 -1. + <_>16 7 4 4 3. + 0 + -0.0138760600239038 + -0.1784088015556335 + 0.0269252397119999 + <_> + + <_> + + + + <_>5 1 3 10 -1. + <_>5 6 3 5 2. + 0 + -0.0299359802156687 + 0.2006970942020416 + -0.0273727308958769 + <_> + + <_> + + + + <_>16 3 4 12 -1. + <_>16 7 4 4 3. + 0 + 8.1381313502788544e-003 + 0.0409517697989941 + -0.0747569724917412 + <_> + + <_> + + + + <_>0 3 4 12 -1. + <_>0 7 4 4 3. + 0 + -5.8591389097273350e-003 + -0.1233702003955841 + 0.0396418794989586 + <_> + + <_> + + + + <_>6 0 14 6 -1. + <_>13 0 7 3 2. + <_>6 3 7 3 2. + 0 + 0.0715921968221664 + -0.0102937603369355 + 0.2239125967025757 + <_> + + <_> + + + + <_>0 1 6 19 -1. + <_>3 1 3 19 2. + 0 + 0.0501115210354328 + 0.0240729991346598 + -0.2144380956888199 + <_> + + <_> + + + + <_>16 1 3 13 -1. + <_>17 1 1 13 3. + 0 + 4.2603579349815845e-003 + -0.0237120501697063 + 0.0736034065485001 + <_> + + <_> + + + + <_>0 0 6 13 -1. + <_>3 0 3 13 2. + 0 + 6.5065422095358372e-003 + -0.0674027800559998 + 0.0769261419773102 + <_> + + <_> + + + + <_>12 1 6 5 -1. + <_>12 1 3 5 2. + 0 + 2.0325470250099897e-003 + -0.0996646732091904 + 0.0579942315816879 + <_> + + <_> + + + + <_>2 1 6 5 -1. + <_>5 1 3 5 2. + 0 + -9.3465158715844154e-003 + 0.1943292021751404 + -0.0313877090811729 + <_> + + <_> + + + + <_>10 0 6 7 -1. + <_>12 0 2 7 3. + 0 + 9.5768114551901817e-003 + 0.0225949902087450 + -0.1609085053205490 + <_> + + <_> + + + + <_>1 1 10 3 -1. + <_>6 1 5 3 2. + 0 + -0.0467639118432999 + -0.3502027094364166 + 0.0150351496413350 + <_> + + <_> + + + + <_>4 0 16 8 -1. + <_>12 0 8 4 2. + <_>4 4 8 4 2. + 0 + -0.0501648709177971 + 0.1276338994503021 + -0.0110356202349067 + <_> + + <_> + + + + <_>0 0 8 12 -1. + <_>0 0 4 6 2. + <_>4 6 4 6 2. + 0 + 0.0231481492519379 + -0.0246365796774626 + 0.2026434987783432 + <_> + + <_> + + + + <_>11 10 7 6 -1. + <_>11 12 7 2 3. + 0 + -0.0741685628890991 + -0.9485428929328919 + 2.2216918878257275e-003 + <_> + + <_> + + + + <_>2 10 7 6 -1. + <_>2 12 7 2 3. + 0 + -0.0206986293196678 + -0.2458554953336716 + 0.0213708207011223 + <_> + + <_> + + + + <_>3 8 15 9 -1. + <_>3 11 15 3 3. + 0 + -0.0581875406205654 + 0.3053100109100342 + -8.1265745684504509e-003 + <_> + + <_> + + + + <_>4 6 4 10 -1. + <_>6 6 2 10 2. + 0 + -0.0524515882134438 + 0.5056778192520142 + -9.7108660265803337e-003 + <_> + + <_> + + + + <_>15 7 5 6 -1. + <_>15 10 5 3 2. + 0 + -0.0467216409742832 + 0.8089610934257507 + -1.8908439669758081e-003 + <_> + + <_> + + + + <_>0 7 5 6 -1. + <_>0 10 5 3 2. + 0 + -0.0103855095803738 + -0.2836990952491760 + 0.0191662292927504 + <_> + + <_> + + + + <_>8 5 12 4 -1. + <_>12 5 4 4 3. + 0 + 5.4432367905974388e-003 + 0.0414307191967964 + -0.1603327989578247 + <_> + + <_> + + + + <_>2 0 14 6 -1. + <_>2 3 14 3 2. + 0 + 0.0240301601588726 + -0.0437515489757061 + 0.1055302023887634 + <_> + + <_> + + + + <_>8 5 12 4 -1. + <_>12 5 4 4 3. + 0 + -0.0264304205775261 + -0.0874482691287994 + 0.0287698302417994 + <_> + + <_> + + + + <_>0 5 12 4 -1. + <_>4 5 4 4 3. + 0 + 4.8743681982159615e-003 + 0.0350329615175724 + -0.1588167995214462 + <_> + + <_> + + + + <_>7 0 7 6 -1. + <_>7 3 7 3 2. + 0 + -2.5106489192694426e-003 + 0.0881616771221161 + -0.0302055906504393 + <_> + + <_> + + + + <_>4 0 6 7 -1. + <_>6 0 2 7 3. + 0 + -5.2146320231258869e-003 + -0.1135013028979302 + 0.0420010611414909 + <_> + + <_> + + + + <_>13 9 3 10 -1. + <_>13 14 3 5 2. + 0 + -0.0109860096126795 + 0.0844287797808647 + -0.0382728390395641 + <_> + + <_> + + + + <_>2 12 7 6 -1. + <_>2 14 7 2 3. + 0 + -0.0600571297109127 + -0.7924910187721252 + 5.2951448597013950e-003 + <_> + + <_> + + + + <_>7 14 13 3 -1. + <_>7 15 13 1 3. + 0 + 0.0136218098923564 + -0.0174198206514120 + 0.2161206007003784 + <_> + + <_> + + + + <_>0 14 13 3 -1. + <_>0 15 13 1 3. + 0 + -0.0222238004207611 + 0.2672164142131805 + -0.0202071908861399 + <_> + + <_> + + + + <_>9 2 6 12 -1. + <_>9 6 6 4 3. + 0 + 0.0581243596971035 + 6.0539757832884789e-003 + -0.4092710912227631 + <_> + + <_> + + + + <_>5 2 6 12 -1. + <_>5 6 6 4 3. + 0 + -0.0280979704111815 + -0.1121790036559105 + 0.0541446395218372 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>9 10 4 4 3. + 0 + 0.0652783736586571 + -7.4973162263631821e-003 + 0.1238427013158798 + <_> + + <_> + + + + <_>7 6 4 12 -1. + <_>7 10 4 4 3. + 0 + -2.5233640335500240e-003 + -0.1822437942028046 + 0.0245378501713276 + <_> + + <_> + + + + <_>9 2 8 18 -1. + <_>9 8 8 6 3. + 0 + 0.1147859990596771 + 0.0196175798773766 + -0.1190512031316757 + <_> + + <_> + + + + <_>6 5 6 7 -1. + <_>8 5 2 7 3. + 0 + 9.6991509199142456e-003 + -0.0539465509355068 + 0.1118021011352539 + <_> + + <_> + + + + <_>8 6 6 7 -1. + <_>10 6 2 7 3. + 0 + 0.0293591506779194 + -0.0233956091105938 + 0.1853425055742264 + <_> + + <_> + + + + <_>3 7 9 9 -1. + <_>3 10 9 3 3. + 0 + 7.8490097075700760e-003 + 0.1645410954952240 + -0.0421294905245304 + <_> + + <_> + + + + <_>14 4 3 13 -1. + <_>15 4 1 13 3. + 0 + 4.0329899638891220e-003 + 0.0244955904781818 + -0.0659554898738861 + <_> + + <_> + + + + <_>4 1 12 15 -1. + <_>4 6 12 5 3. + 0 + 0.2147139012813568 + -0.0104628801345825 + 0.4743803143501282 + <_> + + <_> + + + + <_>8 2 4 8 -1. + <_>8 6 4 4 2. + 0 + -2.2316209506243467e-003 + 0.0497964397072792 + -0.1032828018069267 + <_> + + <_> + + + + <_>3 0 12 20 -1. + <_>3 10 12 10 2. + 0 + 0.0218333303928375 + -0.0538848489522934 + 0.0932775512337685 + <_> + + <_> + + + + <_>1 17 19 3 -1. + <_>1 18 19 1 3. + 0 + 0.0244307797402143 + 0.0157060995697975 + -0.2824443876743317 + <_> + + <_> + + + + <_>0 18 18 2 -1. + <_>9 18 9 2 2. + 0 + 0.0125325201079249 + -0.0309839006513357 + 0.1559969931840897 + <_> + + <_> + + + + <_>8 10 6 9 -1. + <_>10 10 2 9 3. + 0 + 7.9741179943084717e-003 + 0.0266505405306816 + -0.1368958055973053 + <_> + + <_> + + + + <_>6 10 6 9 -1. + <_>8 10 2 9 3. + 0 + 0.0794445574283600 + 6.4238710328936577e-003 + -0.7848566174507141 + <_> + + <_> + + + + <_>5 11 12 4 -1. + <_>5 13 12 2 2. + 0 + -1.7925030551850796e-003 + 0.0396455898880959 + -0.1149725988507271 + <_> + + <_> + + + + <_>2 5 8 4 -1. + <_>2 7 8 2 2. + 0 + -9.0927572455257177e-004 + 0.0632568895816803 + -0.0752503722906113 + <_> + + <_> + + + + <_>9 10 7 6 -1. + <_>9 12 7 2 3. + 0 + -0.0260400492697954 + 0.1486425995826721 + -0.0185062400996685 + <_> + + <_> + + + + <_>1 0 13 3 -1. + <_>1 1 13 1 3. + 0 + 4.1452320292592049e-003 + 0.0339596197009087 + -0.1435599029064179 + <_> + + <_> + + + + <_>3 0 14 3 -1. + <_>3 1 14 1 3. + 0 + 5.7123368605971336e-004 + -0.0685509666800499 + 0.0699447318911552 + <_> + + <_> + + + + <_>8 6 4 8 -1. + <_>10 6 2 8 2. + 0 + -0.0495777204632759 + 0.3988083899021149 + -0.0113399103283882 + <_> + + <_> + + + + <_>9 3 6 13 -1. + <_>11 3 2 13 3. + 0 + -0.0153348604217172 + -0.0834456235170364 + 0.0322763696312904 + <_> + + <_> + + + + <_>0 0 6 10 -1. + <_>0 0 3 5 2. + <_>3 5 3 5 2. + 0 + -0.0174060892313719 + 0.1356094032526016 + -0.0319455787539482 + <_> + + <_> + + + + <_>8 0 7 18 -1. + <_>8 6 7 6 3. + 0 + -0.0214222595095634 + -0.1105023995041847 + 0.0285360403358936 + <_> + + <_> + + + + <_>5 3 6 13 -1. + <_>7 3 2 13 3. + 0 + 1.9694769289344549e-003 + 0.0438341088593006 + -0.1055186018347740 + <_> + + <_> + + + + <_>7 4 9 5 -1. + <_>10 4 3 5 3. + 0 + -0.0191153790801764 + 0.1469029039144516 + -0.0154053103178740 + <_> + + <_> + + + + <_>8 1 3 18 -1. + <_>9 1 1 18 3. + 0 + 0.0469632595777512 + 8.1654358655214310e-003 + -0.5873488783836365 + <_> + + <_> + + + + <_>9 0 11 15 -1. + <_>9 5 11 5 3. + 0 + 0.2096432000398636 + 3.1721789855509996e-003 + -0.8043789863586426 + <_> + + <_> + + + + <_>0 0 16 8 -1. + <_>0 0 8 4 2. + <_>8 4 8 4 2. + 0 + 0.0625114068388939 + -0.0164227895438671 + 0.3097603917121887 + <_> + + <_> + + + + <_>4 3 12 14 -1. + <_>10 3 6 7 2. + <_>4 10 6 7 2. + 0 + -0.1012618020176888 + -0.6163914799690247 + 7.2699659503996372e-003 + <_> + + <_> + + + + <_>5 6 6 12 -1. + <_>5 6 3 6 2. + <_>8 12 3 6 2. + 0 + 3.3980670850723982e-003 + -0.0196648892015219 + 0.2254192978143692 + <_> + + <_> + + + + <_>6 3 11 9 -1. + <_>6 6 11 3 3. + 0 + -0.0170599501580000 + -0.0171935204416513 + 0.0691145509481430 + <_> + + <_> + + + + <_>0 0 18 8 -1. + <_>0 0 9 4 2. + <_>9 4 9 4 2. + 0 + 3.7455849815160036e-003 + 0.0517374612390995 + -0.0827488228678703 + <_> + + <_> + + + + <_>11 5 9 12 -1. + <_>11 11 9 6 2. + 0 + 0.0877698063850403 + -6.3681108877062798e-003 + 0.0794920027256012 + <_> + + <_> + + + + <_>2 5 14 8 -1. + <_>2 9 14 4 2. + 0 + 2.3725361097604036e-003 + -0.3048743903636932 + 0.0145207699388266 + <_> + + <_> + + + + <_>16 2 4 8 -1. + <_>16 6 4 4 2. + 0 + -0.0192829091101885 + 0.1880698055028915 + -0.0132209295406938 + <_> + + <_> + + + + <_>4 10 7 6 -1. + <_>4 12 7 2 3. + 0 + 3.8580079562962055e-003 + 0.0339784398674965 + -0.1285416930913925 + <_> + + <_> + + + + <_>7 11 7 6 -1. + <_>7 13 7 2 3. + 0 + 2.6525680441409349e-003 + -0.0391469001770020 + 0.0991193577647209 + <_> + + <_> + + + + <_>0 2 4 8 -1. + <_>0 6 4 4 2. + 0 + 0.0991756021976471 + 5.0618657842278481e-003 + -0.8737046122550964 + <_> + + <_> + + + + <_>16 1 3 13 -1. + <_>17 1 1 13 3. + 0 + -7.0648840628564358e-003 + 0.0852192863821983 + -0.0244677904993296 + <_> + + <_> + + + + <_>4 2 10 6 -1. + <_>4 2 5 3 2. + <_>9 5 5 3 2. + 0 + -5.2547529339790344e-003 + -0.1215846985578537 + 0.0372285284101963 + <_> + + <_> + + + + <_>4 4 14 3 -1. + <_>4 5 14 1 3. + 0 + 5.0068609416484833e-003 + -0.0355571918189526 + 0.0785154625773430 + <_> + + <_> + + + + <_>5 5 7 6 -1. + <_>5 7 7 2 3. + 0 + -0.0681181624531746 + -0.2629249989986420 + 0.0183259602636099 + <_> + + <_> + + + + <_>6 13 13 3 -1. + <_>6 14 13 1 3. + 0 + 9.3348289374262094e-004 + -0.0301071796566248 + 0.0448697209358215 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + -2.1996269933879375e-003 + 0.1113670021295548 + -0.0662019327282906 + <_> + + <_> + + + + <_>10 13 7 4 -1. + <_>10 15 7 2 2. + 0 + -6.6485330462455750e-003 + -0.0783986970782280 + 0.0204720702022314 + <_> + + <_> + + + + <_>1 13 13 3 -1. + <_>1 14 13 1 3. + 0 + 1.4126920141279697e-003 + -0.0524286702275276 + 0.0894713997840881 + <_> + + <_> + + + + <_>6 3 11 9 -1. + <_>6 6 11 3 3. + 0 + 0.0514065995812416 + -1.4306739903986454e-003 + 0.6388527154922485 + -1.1700680255889893 + 45 + -1 + diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_default.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_default.xml new file mode 100644 index 0000000000000000000000000000000000000000..8dff079dac798e0b84f26aad876f3323d594c8fa --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_frontalface_default.xml @@ -0,0 +1,35712 @@ + + + + + 24 24 + + <_> + + + <_> + + <_> + + + + <_>6 4 12 9 -1. + <_>6 7 12 3 3. + 0 + -0.0315119996666908 + 2.0875380039215088 + -2.2172100543975830 + <_> + + <_> + + + + <_>6 4 12 7 -1. + <_>10 4 4 7 3. + 0 + 0.0123960003256798 + -1.8633940219879150 + 1.3272049427032471 + <_> + + <_> + + + + <_>3 9 18 9 -1. + <_>3 12 18 3 3. + 0 + 0.0219279993325472 + -1.5105249881744385 + 1.0625729560852051 + <_> + + <_> + + + + <_>8 18 9 6 -1. + <_>8 20 9 2 3. + 0 + 5.7529998011887074e-003 + -0.8746389746665955 + 1.1760339736938477 + <_> + + <_> + + + + <_>3 5 4 19 -1. + <_>5 5 2 19 2. + 0 + 0.0150140002369881 + -0.7794569730758667 + 1.2608419656753540 + <_> + + <_> + + + + <_>6 5 12 16 -1. + <_>6 13 12 8 2. + 0 + 0.0993710011243820 + 0.5575129985809326 + -1.8743000030517578 + <_> + + <_> + + + + <_>5 8 12 6 -1. + <_>5 11 12 3 2. + 0 + 2.7340000960975885e-003 + -1.6911929845809937 + 0.4400970041751862 + <_> + + <_> + + + + <_>11 14 4 10 -1. + <_>11 19 4 5 2. + 0 + -0.0188590008765459 + -1.4769539833068848 + 0.4435009956359863 + <_> + + <_> + + + + <_>4 0 7 6 -1. + <_>4 3 7 3 2. + 0 + 5.9739998541772366e-003 + -0.8590919971466065 + 0.8525559902191162 + -5.0425500869750977 + -1 + -1 + <_> + + + <_> + + <_> + + + + <_>6 6 12 6 -1. + <_>6 8 12 2 3. + 0 + -0.0211100000888109 + 1.2435649633407593 + -1.5713009834289551 + <_> + + <_> + + + + <_>6 4 12 7 -1. + <_>10 4 4 7 3. + 0 + 0.0203559994697571 + -1.6204780340194702 + 1.1817760467529297 + <_> + + <_> + + + + <_>1 8 19 12 -1. + <_>1 12 19 4 3. + 0 + 0.0213089995086193 + -1.9415930509567261 + 0.7006909847259522 + <_> + + <_> + + + + <_>0 2 24 3 -1. + <_>8 2 8 3 3. + 0 + 0.0916600003838539 + -0.5567010045051575 + 1.7284419536590576 + <_> + + <_> + + + + <_>9 9 6 15 -1. + <_>9 14 6 5 3. + 0 + 0.0362880006432533 + 0.2676379978656769 + -2.1831810474395752 + <_> + + <_> + + + + <_>5 6 14 10 -1. + <_>5 11 14 5 2. + 0 + -0.0191099997609854 + -2.6730210781097412 + 0.4567080140113831 + <_> + + <_> + + + + <_>5 0 14 9 -1. + <_>5 3 14 3 3. + 0 + 8.2539999857544899e-003 + -1.0852910280227661 + 0.5356420278549194 + <_> + + <_> + + + + <_>13 11 9 6 -1. + <_>16 11 3 6 3. + 0 + 0.0183550007641315 + -0.3520019948482513 + 0.9333919882774353 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>9 5 2 10 3. + 0 + -7.0569999516010284e-003 + 0.9278209805488586 + -0.6634989976882935 + <_> + + <_> + + + + <_>10 8 6 10 -1. + <_>12 8 2 10 3. + 0 + -9.8770000040531158e-003 + 1.1577470302581787 + -0.2977479994297028 + <_> + + <_> + + + + <_>2 5 4 9 -1. + <_>4 5 2 9 2. + 0 + 0.0158140007406473 + -0.4196060001850128 + 1.3576040267944336 + <_> + + <_> + + + + <_>18 0 6 11 -1. + <_>20 0 2 11 3. + 0 + -0.0207000002264977 + 1.4590020179748535 + -0.1973939985036850 + <_> + + <_> + + + + <_>0 6 24 13 -1. + <_>8 6 8 13 3. + 0 + -0.1376080065965653 + 1.1186759471893311 + -0.5291550159454346 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0143189998343587 + -0.3512719869613648 + 1.1440860033035278 + <_> + + <_> + + + + <_>7 18 10 6 -1. + <_>7 20 10 2 3. + 0 + 0.0102530000731349 + -0.6085060238838196 + 0.7709850072860718 + <_> + + <_> + + + + <_>5 7 14 12 -1. + <_>5 13 14 6 2. + 0 + 0.0915080010890961 + 0.3881779909133911 + -1.5122940540313721 + -4.9842400550842285 + 0 + -1 + <_> + + + <_> + + <_> + + + + <_>0 3 24 3 -1. + <_>8 3 8 3 3. + 0 + 0.0697470009326935 + -1.0130879878997803 + 1.4687349796295166 + <_> + + <_> + + + + <_>5 8 15 6 -1. + <_>5 11 15 3 2. + 0 + 0.0315029993653297 + -1.6463639736175537 + 1.0000629425048828 + <_> + + <_> + + + + <_>9 6 5 14 -1. + <_>9 13 5 7 2. + 0 + 0.0142609998583794 + 0.4648030102252960 + -1.5959889888763428 + <_> + + <_> + + + + <_>9 5 6 10 -1. + <_>11 5 2 10 3. + 0 + 0.0144530003890395 + -0.6551190018653870 + 0.8302180171012878 + <_> + + <_> + + + + <_>6 6 3 12 -1. + <_>6 12 3 6 2. + 0 + -3.0509999487549067e-003 + -1.3982310295104980 + 0.4255059957504273 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>9 21 6 3 3. + 0 + 0.0327229984104633 + -0.5070260167121887 + 1.0526109933853149 + <_> + + <_> + + + + <_>5 6 13 6 -1. + <_>5 8 13 2 3. + 0 + -7.2960001416504383e-003 + 0.3635689914226532 + -1.3464889526367187 + <_> + + <_> + + + + <_>18 1 6 15 -1. + <_>18 1 3 15 2. + 0 + 0.0504250004887581 + -0.3046140074729919 + 1.4504129886627197 + <_> + + <_> + + + + <_>1 1 6 15 -1. + <_>4 1 3 15 2. + 0 + 0.0468790009617805 + -0.4028620123863220 + 1.2145609855651855 + <_> + + <_> + + + + <_>0 8 24 15 -1. + <_>8 8 8 15 3. + 0 + -0.0693589970469475 + 1.0539360046386719 + -0.4571970105171204 + <_> + + <_> + + + + <_>5 6 14 12 -1. + <_>5 6 7 6 2. + <_>12 12 7 6 2. + 0 + -0.0490339994430542 + -1.6253089904785156 + 0.1537899971008301 + <_> + + <_> + + + + <_>2 12 21 12 -1. + <_>2 16 21 4 3. + 0 + 0.0848279967904091 + 0.2840299904346466 + -1.5662059783935547 + <_> + + <_> + + + + <_>8 1 4 10 -1. + <_>10 1 2 10 2. + 0 + -1.7229999648407102e-003 + -1.0147459506988525 + 0.2329480051994324 + <_> + + <_> + + + + <_>2 13 20 10 -1. + <_>2 13 10 10 2. + 0 + 0.1156219989061356 + -0.1673289984464645 + 1.2804069519042969 + <_> + + <_> + + + + <_>0 1 6 13 -1. + <_>2 1 2 13 3. + 0 + -0.0512799993157387 + 1.5162390470504761 + -0.3027110099792481 + <_> + + <_> + + + + <_>20 2 4 13 -1. + <_>20 2 2 13 2. + 0 + -0.0427069999277592 + 1.7631920576095581 + -0.0518320016562939 + <_> + + <_> + + + + <_>0 5 22 19 -1. + <_>11 5 11 19 2. + 0 + 0.3717809915542603 + -0.3138920068740845 + 1.5357979536056519 + <_> + + <_> + + + + <_>18 4 6 9 -1. + <_>20 4 2 9 3. + 0 + 0.0194129999727011 + -0.1001759991049767 + 0.9365540146827698 + <_> + + <_> + + + + <_>0 3 6 11 -1. + <_>2 3 2 11 3. + 0 + 0.0174390003085136 + -0.4037989974021912 + 0.9629300236701965 + <_> + + <_> + + + + <_>12 1 4 9 -1. + <_>12 1 2 9 2. + 0 + 0.0396389998495579 + 0.1703909933567047 + -2.9602990150451660 + <_> + + <_> + + + + <_>0 6 19 3 -1. + <_>0 7 19 1 3. + 0 + -9.1469995677471161e-003 + 0.8878679871559143 + -0.4381870031356812 + <_> + + <_> + + + + <_>12 1 4 9 -1. + <_>12 1 2 9 2. + 0 + 1.7219999572262168e-003 + -0.3721860051155090 + 0.4001890122890472 + <_> + + <_> + + + + <_>8 1 4 9 -1. + <_>10 1 2 9 2. + 0 + 0.0302310008555651 + 0.0659240037202835 + -2.6469180583953857 + <_> + + <_> + + + + <_>5 5 14 14 -1. + <_>12 5 7 7 2. + <_>5 12 7 7 2. + 0 + -0.0787959992885590 + -1.7491459846496582 + 0.2847529947757721 + <_> + + <_> + + + + <_>1 10 18 2 -1. + <_>1 11 18 1 2. + 0 + 2.1110000088810921e-003 + -0.9390810132026672 + 0.2320519983768463 + <_> + + <_> + + + + <_>17 13 4 11 -1. + <_>17 13 2 11 2. + 0 + 0.0270910002291203 + -0.0526640005409718 + 1.0756820440292358 + <_> + + <_> + + + + <_>0 4 6 9 -1. + <_>0 7 6 3 3. + 0 + -0.0449649989604950 + -1.8294479846954346 + 0.0995619967579842 + -4.6551899909973145 + 1 + -1 + <_> + + + <_> + + <_> + + + + <_>6 4 12 9 -1. + <_>6 7 12 3 3. + 0 + -0.0657010003924370 + 1.1558510065078735 + -1.0716359615325928 + <_> + + <_> + + + + <_>6 5 12 6 -1. + <_>10 5 4 6 3. + 0 + 0.0158399995416403 + -1.5634720325469971 + 0.7687709927558899 + <_> + + <_> + + + + <_>0 1 24 5 -1. + <_>8 1 8 5 3. + 0 + 0.1457089930772781 + -0.5745009779930115 + 1.3808720111846924 + <_> + + <_> + + + + <_>4 10 18 6 -1. + <_>4 12 18 2 3. + 0 + 6.1389999464154243e-003 + -1.4570560455322266 + 0.5161030292510986 + <_> + + <_> + + + + <_>2 17 12 6 -1. + <_>2 17 6 3 2. + <_>8 20 6 3 2. + 0 + 6.7179999314248562e-003 + -0.8353360295295715 + 0.5852220058441162 + <_> + + <_> + + + + <_>19 3 4 13 -1. + <_>19 3 2 13 2. + 0 + 0.0185180008411407 + -0.3131209909915924 + 1.1696679592132568 + <_> + + <_> + + + + <_>1 3 4 13 -1. + <_>3 3 2 13 2. + 0 + 0.0199580006301403 + -0.4344260096549988 + 0.9544690251350403 + <_> + + <_> + + + + <_>0 1 24 23 -1. + <_>8 1 8 23 3. + 0 + -0.2775500118732452 + 1.4906179904937744 + -0.1381590068340302 + <_> + + <_> + + + + <_>1 7 8 12 -1. + <_>1 11 8 4 3. + 0 + 9.1859996318817139e-003 + -0.9636150002479553 + 0.2766549885272980 + <_> + + <_> + + + + <_>14 7 3 14 -1. + <_>14 14 3 7 2. + 0 + -0.0377379991114140 + -2.4464108943939209 + 0.2361959964036942 + <_> + + <_> + + + + <_>3 12 16 6 -1. + <_>3 12 8 3 2. + <_>11 15 8 3 2. + 0 + 0.0184630006551743 + 0.1753920018672943 + -1.3423130512237549 + <_> + + <_> + + + + <_>6 6 12 6 -1. + <_>6 8 12 2 3. + 0 + -0.0111149996519089 + 0.4871079921722412 + -0.8985189795494080 + <_> + + <_> + + + + <_>8 7 6 12 -1. + <_>8 13 6 6 2. + 0 + 0.0339279994368553 + 0.1787420064210892 + -1.6342279911041260 + <_> + + <_> + + + + <_>15 15 9 6 -1. + <_>15 17 9 2 3. + 0 + -0.0356490015983582 + -1.9607399702072144 + 0.1810249984264374 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>1 18 18 1 3. + 0 + -0.0114380000159144 + 0.9901069998741150 + -0.3810319900512695 + <_> + + <_> + + + + <_>4 4 16 12 -1. + <_>4 10 16 6 2. + 0 + -0.0652360022068024 + -2.5794160366058350 + 0.2475360035896301 + <_> + + <_> + + + + <_>0 1 4 20 -1. + <_>2 1 2 20 2. + 0 + -0.0422720015048981 + 1.4411840438842773 + -0.2950829863548279 + <_> + + <_> + + + + <_>3 0 18 2 -1. + <_>3 1 18 1 2. + 0 + 1.9219999667257071e-003 + -0.4960860013961792 + 0.6317359805107117 + <_> + + <_> + + + + <_>1 5 20 14 -1. + <_>1 5 10 7 2. + <_>11 12 10 7 2. + 0 + -0.1292179971933365 + -2.3314270973205566 + 0.0544969998300076 + <_> + + <_> + + + + <_>5 8 14 12 -1. + <_>5 12 14 4 3. + 0 + 0.0229310002177954 + -0.8444709777832031 + 0.3873809874057770 + <_> + + <_> + + + + <_>3 14 7 9 -1. + <_>3 17 7 3 3. + 0 + -0.0341200008988380 + -1.4431500434875488 + 0.0984229966998100 + <_> + + <_> + + + + <_>14 15 9 6 -1. + <_>14 17 9 2 3. + 0 + 0.0262230001389980 + 0.1822309941053391 + -1.2586519718170166 + <_> + + <_> + + + + <_>1 15 9 6 -1. + <_>1 17 9 2 3. + 0 + 0.0222369991242886 + 0.0698079988360405 + -2.3820950984954834 + <_> + + <_> + + + + <_>11 6 8 10 -1. + <_>15 6 4 5 2. + <_>11 11 4 5 2. + 0 + -5.8240001089870930e-003 + 0.3933250010013580 + -0.2754279971122742 + <_> + + <_> + + + + <_>5 5 14 14 -1. + <_>5 5 7 7 2. + <_>12 12 7 7 2. + 0 + 0.0436530001461506 + 0.1483269929885864 + -1.1368780136108398 + <_> + + <_> + + + + <_>6 0 12 5 -1. + <_>10 0 4 5 3. + 0 + 0.0572669990360737 + 0.2462809979915619 + -1.2687400579452515 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>9 3 6 3 3. + 0 + 2.3409998975694180e-003 + -0.7544890046119690 + 0.2716380059719086 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0129960002377629 + -0.3639490008354187 + 0.7095919847488403 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0265170000493526 + -2.3221859931945801 + 0.0357440002262592 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + -5.8400002308189869e-003 + 0.4219430088996887 + -0.0481849983334541 + <_> + + <_> + + + + <_>8 6 6 9 -1. + <_>10 6 2 9 3. + 0 + -0.0165689997375011 + 1.1099940538406372 + -0.3484970033168793 + <_> + + <_> + + + + <_>3 8 18 4 -1. + <_>9 8 6 4 3. + 0 + -0.0681570023298264 + -3.3269989490509033 + 0.2129900008440018 + -4.4531588554382324 + 2 + -1 + <_> + + + <_> + + <_> + + + + <_>6 0 12 9 -1. + <_>6 3 12 3 3. + 0 + 0.0399740003049374 + -1.2173449993133545 + 1.0826710462570190 + <_> + + <_> + + + + <_>0 0 24 6 -1. + <_>8 0 8 6 3. + 0 + 0.1881950050592423 + -0.4828940033912659 + 1.4045250415802002 + <_> + + <_> + + + + <_>4 7 16 12 -1. + <_>4 11 16 4 3. + 0 + 0.0780270025134087 + -1.0782150030136108 + 0.7404029965400696 + <_> + + <_> + + + + <_>11 6 6 6 -1. + <_>11 6 3 6 2. + 0 + 1.1899999663000926e-004 + -1.2019979953765869 + 0.3774920105934143 + <_> + + <_> + + + + <_>0 20 24 3 -1. + <_>8 20 8 3 3. + 0 + 0.0850569978356361 + -0.4393909871578217 + 1.2647340297698975 + <_> + + <_> + + + + <_>11 6 4 9 -1. + <_>11 6 2 9 2. + 0 + 8.9720003306865692e-003 + -0.1844049990177155 + 0.4572640061378479 + <_> + + <_> + + + + <_>4 13 15 4 -1. + <_>9 13 5 4 3. + 0 + 8.8120000436902046e-003 + 0.3039669990539551 + -0.9599109888076782 + <_> + + <_> + + + + <_>11 6 4 9 -1. + <_>11 6 2 9 2. + 0 + -0.0235079992562532 + 1.2487529516220093 + 0.0462279990315437 + <_> + + <_> + + + + <_>9 6 4 9 -1. + <_>11 6 2 9 2. + 0 + 7.0039997808635235e-003 + -0.5944210290908814 + 0.5396329760551453 + <_> + + <_> + + + + <_>9 12 6 12 -1. + <_>9 18 6 6 2. + 0 + 0.0338519997894764 + 0.2849609851837158 + -1.4895249605178833 + <_> + + <_> + + + + <_>1 22 18 2 -1. + <_>1 23 18 1 2. + 0 + -3.2530000898987055e-003 + 0.4812079966068268 + -0.5271239876747131 + <_> + + <_> + + + + <_>10 7 4 10 -1. + <_>10 12 4 5 2. + 0 + 0.0290970001369715 + 0.2674390077590942 + -1.6007850170135498 + <_> + + <_> + + + + <_>6 7 8 10 -1. + <_>6 12 8 5 2. + 0 + -8.4790000692009926e-003 + -1.3107639551162720 + 0.1524309962987900 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>7 8 10 2 3. + 0 + -0.0107950000092387 + 0.4561359882354736 + -0.7205089926719666 + <_> + + <_> + + + + <_>0 14 10 4 -1. + <_>0 16 10 2 2. + 0 + -0.0246200002729893 + -1.7320619821548462 + 0.0683630034327507 + <_> + + <_> + + + + <_>6 18 18 2 -1. + <_>6 19 18 1 2. + 0 + 3.7380000576376915e-003 + -0.1930329948663712 + 0.6824349761009216 + <_> + + <_> + + + + <_>1 1 22 3 -1. + <_>1 2 22 1 3. + 0 + -0.0122640002518892 + -1.6095290184020996 + 0.0752680003643036 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + -4.8670000396668911e-003 + 0.7428650259971619 + -0.2151020020246506 + <_> + + <_> + + + + <_>2 4 6 15 -1. + <_>5 4 3 15 2. + 0 + 0.0767259970307350 + -0.2683509886264801 + 1.3094140291213989 + <_> + + <_> + + + + <_>20 4 4 10 -1. + <_>20 4 2 10 2. + 0 + 0.0285780001431704 + -0.0587930008769035 + 1.2196329832077026 + <_> + + <_> + + + + <_>0 4 4 10 -1. + <_>2 4 2 10 2. + 0 + 0.0196940004825592 + -0.3514289855957031 + 0.8492699861526489 + <_> + + <_> + + + + <_>2 16 20 6 -1. + <_>12 16 10 3 2. + <_>2 19 10 3 2. + 0 + -0.0290939994156361 + -1.0507299900054932 + 0.2980630099773407 + <_> + + <_> + + + + <_>0 12 8 9 -1. + <_>4 12 4 9 2. + 0 + -0.0291440002620220 + 0.8254780173301697 + -0.3268719911575317 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + 0.0197410006076097 + 0.2045260071754456 + -0.8376020193099976 + <_> + + <_> + + + + <_>5 10 6 6 -1. + <_>8 10 3 6 2. + 0 + 4.3299999088048935e-003 + 0.2057790011167526 + -0.6682980060577393 + <_> + + <_> + + + + <_>11 8 12 6 -1. + <_>17 8 6 3 2. + <_>11 11 6 3 2. + 0 + -0.0355009995400906 + -1.2969900369644165 + 0.1389749944210053 + <_> + + <_> + + + + <_>0 8 12 6 -1. + <_>0 8 6 3 2. + <_>6 11 6 3 2. + 0 + -0.0161729995161295 + -1.3110569715499878 + 0.0757519975304604 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + -0.0221510007977486 + -1.0524389743804932 + 0.1924110054969788 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + -0.0227070003747940 + -1.3735309839248657 + 0.0667809993028641 + <_> + + <_> + + + + <_>8 14 9 6 -1. + <_>8 16 9 2 3. + 0 + 0.0166079998016357 + -0.0371359996497631 + 0.7784640192985535 + <_> + + <_> + + + + <_>0 16 9 6 -1. + <_>0 18 9 2 3. + 0 + -0.0133090000599623 + -0.9985070228576660 + 0.1224810034036636 + <_> + + <_> + + + + <_>10 8 6 10 -1. + <_>12 8 2 10 3. + 0 + -0.0337320007383823 + 1.4461359977722168 + 0.0131519995629787 + <_> + + <_> + + + + <_>3 19 12 3 -1. + <_>9 19 6 3 2. + 0 + 0.0169350001960993 + -0.3712129890918732 + 0.5284219980239868 + <_> + + <_> + + + + <_>2 10 20 2 -1. + <_>2 11 20 1 2. + 0 + 3.3259999472647905e-003 + -0.5756850242614746 + 0.3926190137863159 + <_> + + <_> + + + + <_>2 9 18 12 -1. + <_>2 9 9 6 2. + <_>11 15 9 6 2. + 0 + 0.0836440026760101 + 0.0161160007119179 + -2.1173279285430908 + <_> + + <_> + + + + <_>3 0 18 24 -1. + <_>3 0 9 24 2. + 0 + 0.2578519880771637 + -0.0816090032458305 + 0.9878249764442444 + <_> + + <_> + + + + <_>5 6 14 10 -1. + <_>5 6 7 5 2. + <_>12 11 7 5 2. + 0 + -0.0365669988095760 + -1.1512110233306885 + 0.0964590013027191 + <_> + + <_> + + + + <_>9 5 10 12 -1. + <_>14 5 5 6 2. + <_>9 11 5 6 2. + 0 + -0.0164459999650717 + 0.3731549978256226 + -0.1458539962768555 + <_> + + <_> + + + + <_>4 5 12 12 -1. + <_>4 5 6 6 2. + <_>10 11 6 6 2. + 0 + -3.7519999314099550e-003 + 0.2617929875850678 + -0.5815669894218445 + <_> + + <_> + + + + <_>4 14 18 3 -1. + <_>4 15 18 1 3. + 0 + -6.3660000450909138e-003 + 0.7547739744186401 + -0.1705520004034042 + <_> + + <_> + + + + <_>6 13 8 8 -1. + <_>6 17 8 4 2. + 0 + -3.8499999791383743e-003 + 0.2265399992465973 + -0.6387640237808228 + <_> + + <_> + + + + <_>3 16 18 6 -1. + <_>3 19 18 3 2. + 0 + -0.0454940013587475 + -1.2640299797058105 + 0.2526069879531860 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0239410009235144 + 0.8706840276718140 + -0.2710469961166382 + <_> + + <_> + + + + <_>6 6 12 18 -1. + <_>10 6 4 18 3. + 0 + -0.0775580033659935 + -1.3901610374450684 + 0.2361229956150055 + <_> + + <_> + + + + <_>6 1 4 14 -1. + <_>8 1 2 14 2. + 0 + 0.0236140005290508 + 0.0661400035023689 + -1.2645419836044312 + <_> + + <_> + + + + <_>3 2 19 2 -1. + <_>3 3 19 1 2. + 0 + -2.5750000495463610e-003 + -0.5384169816970825 + 0.3037909865379334 + <_> + + <_> + + + + <_>1 8 22 13 -1. + <_>12 8 11 13 2. + 0 + 0.1201080009341240 + -0.3534300029277802 + 0.5286620259284973 + <_> + + <_> + + + + <_>8 9 11 4 -1. + <_>8 11 11 2 2. + 0 + 2.2899999748915434e-003 + -0.5870199799537659 + 0.2406100034713745 + <_> + + <_> + + + + <_>0 12 15 10 -1. + <_>5 12 5 10 3. + 0 + 0.0697169974446297 + -0.3334890007972717 + 0.5191630125045776 + <_> + + <_> + + + + <_>12 16 12 6 -1. + <_>16 16 4 6 3. + 0 + -0.0466700010001659 + 0.6979539990425110 + -0.0148959998041391 + <_> + + <_> + + + + <_>0 16 12 6 -1. + <_>4 16 4 6 3. + 0 + -0.0501290000975132 + 0.8614619970321655 + -0.2598600089550018 + <_> + + <_> + + + + <_>19 1 5 12 -1. + <_>19 5 5 4 3. + 0 + 0.0301479995250702 + 0.1933279931545258 + -0.5913109779357910 + -4.3864588737487793 + 3 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 24 4 -1. + <_>8 2 8 4 3. + 0 + 0.0910850018262863 + -0.8923310041427612 + 1.0434230566024780 + <_> + + <_> + + + + <_>6 8 12 4 -1. + <_>6 10 12 2 2. + 0 + 0.0128189995884895 + -1.2597670555114746 + 0.5531709790229797 + <_> + + <_> + + + + <_>7 5 9 6 -1. + <_>10 5 3 6 3. + 0 + 0.0159319993108511 + -0.8625440001487732 + 0.6373180150985718 + <_> + + <_> + + + + <_>9 17 6 6 -1. + <_>9 20 6 3 2. + 0 + 2.2780001163482666e-003 + -0.7463920116424561 + 0.5315560102462769 + <_> + + <_> + + + + <_>0 7 22 15 -1. + <_>0 12 22 5 3. + 0 + 0.0318409986793995 + -1.2650489807128906 + 0.3615390062332153 + <_> + + <_> + + + + <_>4 1 17 9 -1. + <_>4 4 17 3 3. + 0 + 2.6960000395774841e-003 + -0.9829040169715881 + 0.3601300120353699 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>9 5 2 10 3. + 0 + -0.0120550002902746 + 0.6406840085983276 + -0.5012500286102295 + <_> + + <_> + + + + <_>18 1 6 8 -1. + <_>18 1 3 8 2. + 0 + 0.0213249996304512 + -0.2403499931097031 + 0.8544800281524658 + <_> + + <_> + + + + <_>0 1 6 7 -1. + <_>3 1 3 7 2. + 0 + 0.0304860007017851 + -0.3427360057830811 + 1.1428849697113037 + <_> + + <_> + + + + <_>18 0 6 22 -1. + <_>18 0 3 22 2. + 0 + -0.0450799986720085 + 1.0976949930191040 + -0.1797460019588471 + <_> + + <_> + + + + <_>0 0 6 22 -1. + <_>3 0 3 22 2. + 0 + -0.0717009976506233 + 1.5735000371932983 + -0.3143349885940552 + <_> + + <_> + + + + <_>16 7 8 16 -1. + <_>16 7 4 16 2. + 0 + 0.0592180006206036 + -0.2758240103721619 + 1.0448570251464844 + <_> + + <_> + + + + <_>2 10 19 6 -1. + <_>2 12 19 2 3. + 0 + 6.7010000348091125e-003 + -1.0974019765853882 + 0.1980119943618774 + <_> + + <_> + + + + <_>9 9 6 12 -1. + <_>9 13 6 4 3. + 0 + 0.0410469993948936 + 0.3054769933223724 + -1.3287999629974365 + <_> + + <_> + + + + <_>2 15 17 6 -1. + <_>2 17 17 2 3. + 0 + -8.5499999113380909e-004 + 0.2580710053443909 + -0.7005289793014526 + <_> + + <_> + + + + <_>14 7 3 14 -1. + <_>14 14 3 7 2. + 0 + -0.0303600002080202 + -1.2306419610977173 + 0.2260939925909042 + <_> + + <_> + + + + <_>5 6 8 10 -1. + <_>5 6 4 5 2. + <_>9 11 4 5 2. + 0 + -0.0129300002008677 + 0.4075860083103180 + -0.5123450160026550 + <_> + + <_> + + + + <_>15 8 9 11 -1. + <_>18 8 3 11 3. + 0 + 0.0373679995536804 + -0.0947550013661385 + 0.6176509857177734 + <_> + + <_> + + + + <_>0 8 9 11 -1. + <_>3 8 3 11 3. + 0 + 0.0244340002536774 + -0.4110060036182404 + 0.4763050079345703 + <_> + + <_> + + + + <_>8 6 10 18 -1. + <_>8 15 10 9 2. + 0 + 0.0570079982280731 + 0.2524929940700531 + -0.6866980195045471 + <_> + + <_> + + + + <_>7 7 3 14 -1. + <_>7 14 3 7 2. + 0 + -0.0163139998912811 + -0.9392840266227722 + 0.1144810020923615 + <_> + + <_> + + + + <_>0 14 24 8 -1. + <_>8 14 8 8 3. + 0 + -0.1764889955520630 + 1.2451089620590210 + -0.0565190017223358 + <_> + + <_> + + + + <_>1 10 18 14 -1. + <_>10 10 9 14 2. + 0 + 0.1761460006237030 + -0.3252820074558258 + 0.8279150128364563 + <_> + + <_> + + + + <_>14 12 6 6 -1. + <_>14 15 6 3 2. + 0 + -7.3910001665353775e-003 + 0.3478370010852814 + -0.1792909950017929 + <_> + + <_> + + + + <_>7 0 10 16 -1. + <_>7 0 5 8 2. + <_>12 8 5 8 2. + 0 + 0.0608909986913204 + 0.0550980009138584 + -1.5480779409408569 + <_> + + <_> + + + + <_>10 0 9 6 -1. + <_>13 0 3 6 3. + 0 + -0.0291230008006096 + -1.0255639553070068 + 0.2410690039396286 + <_> + + <_> + + + + <_>4 3 16 4 -1. + <_>12 3 8 4 2. + 0 + -0.0456489995121956 + 1.0301599502563477 + -0.3167209923267365 + <_> + + <_> + + + + <_>10 0 9 6 -1. + <_>13 0 3 6 3. + 0 + 0.0373330004513264 + 0.2162059992551804 + -0.8258990049362183 + <_> + + <_> + + + + <_>1 1 20 4 -1. + <_>1 1 10 2 2. + <_>11 3 10 2 2. + 0 + -0.0244110003113747 + -1.5957959890365601 + 0.0511390008032322 + <_> + + <_> + + + + <_>10 0 9 6 -1. + <_>13 0 3 6 3. + 0 + -0.0598069988191128 + -1.0312290191650391 + 0.1309230029582977 + <_> + + <_> + + + + <_>5 0 9 6 -1. + <_>8 0 3 6 3. + 0 + -0.0301060006022453 + -1.4781630039215088 + 0.0372119992971420 + <_> + + <_> + + + + <_>8 18 10 6 -1. + <_>8 20 10 2 3. + 0 + 7.4209999293088913e-003 + -0.2402410060167313 + 0.4933399856090546 + <_> + + <_> + + + + <_>6 3 6 9 -1. + <_>8 3 2 9 3. + 0 + -2.1909999195486307e-003 + 0.2894150018692017 + -0.5725960135459900 + <_> + + <_> + + + + <_>7 3 12 6 -1. + <_>7 5 12 2 3. + 0 + 0.0208609998226166 + -0.2314839959144592 + 0.6376590132713318 + <_> + + <_> + + + + <_>0 10 18 3 -1. + <_>0 11 18 1 3. + 0 + -6.6990000195801258e-003 + -1.2107750177383423 + 0.0640180036425591 + <_> + + <_> + + + + <_>1 10 22 3 -1. + <_>1 11 22 1 3. + 0 + 0.0187580008059740 + 0.2446130067110062 + -0.9978669881820679 + <_> + + <_> + + + + <_>5 11 8 8 -1. + <_>9 11 4 8 2. + 0 + -0.0443230010569096 + -1.3699189424514771 + 0.0360519997775555 + <_> + + <_> + + + + <_>12 11 6 6 -1. + <_>12 11 3 6 2. + 0 + 0.0228599999099970 + 0.2128839939832687 + -1.0397620201110840 + <_> + + <_> + + + + <_>6 11 6 6 -1. + <_>9 11 3 6 2. + 0 + -9.8600005730986595e-004 + 0.3244360089302063 + -0.5429180264472961 + <_> + + <_> + + + + <_>7 10 11 6 -1. + <_>7 12 11 2 3. + 0 + 0.0172390006482601 + -0.2832390069961548 + 0.4446820020675659 + <_> + + <_> + + + + <_>0 13 24 4 -1. + <_>0 13 12 2 2. + <_>12 15 12 2 2. + 0 + -0.0345310010015965 + -2.3107020854949951 + -3.1399999279528856e-003 + <_> + + <_> + + + + <_>2 4 22 12 -1. + <_>13 4 11 6 2. + <_>2 10 11 6 2. + 0 + 0.0670069977641106 + 0.2871569991111755 + -0.6448100209236145 + <_> + + <_> + + + + <_>2 0 20 17 -1. + <_>12 0 10 17 2. + 0 + 0.2377689927816391 + -0.2717480063438416 + 0.8021910190582275 + <_> + + <_> + + + + <_>14 0 2 24 -1. + <_>14 0 1 24 2. + 0 + -0.0129030002281070 + -1.5317620038986206 + 0.2142360061407089 + <_> + + <_> + + + + <_>8 0 2 24 -1. + <_>9 0 1 24 2. + 0 + 0.0105149997398257 + 0.0770379975438118 + -1.0581140518188477 + <_> + + <_> + + + + <_>14 1 2 22 -1. + <_>14 1 1 22 2. + 0 + 0.0169690009206533 + 0.1430670022964478 + -0.8582839965820313 + <_> + + <_> + + + + <_>8 1 2 22 -1. + <_>9 1 1 22 2. + 0 + -7.2460002265870571e-003 + -1.1020129919052124 + 0.0649069994688034 + <_> + + <_> + + + + <_>17 6 3 18 -1. + <_>18 6 1 18 3. + 0 + 0.0105569995939732 + 0.0139640001580119 + 0.6360149979591370 + <_> + + <_> + + + + <_>6 14 9 6 -1. + <_>6 16 9 2 3. + 0 + 6.1380001716315746e-003 + -0.3454590141773224 + 0.5629680156707764 + <_> + + <_> + + + + <_>13 14 9 4 -1. + <_>13 16 9 2 2. + 0 + 0.0131580000743270 + 0.1992730051279068 + -1.5040320158004761 + <_> + + <_> + + + + <_>3 18 18 3 -1. + <_>3 19 18 1 3. + 0 + 3.1310000922530890e-003 + -0.4090369939804077 + 0.3779639899730682 + <_> + + <_> + + + + <_>9 4 8 18 -1. + <_>13 4 4 9 2. + <_>9 13 4 9 2. + 0 + -0.1092069968581200 + -2.2227079868316650 + 0.1217819973826408 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + 8.1820003688335419e-003 + -0.2865200042724609 + 0.6789079904556274 + -4.1299300193786621 + 4 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 12 4 -1. + <_>6 2 6 4 2. + 0 + 0.0313469991087914 + -0.8888459801673889 + 0.9493680000305176 + <_> + + <_> + + + + <_>6 8 14 6 -1. + <_>6 11 14 3 2. + 0 + 0.0319180004298687 + -1.1146880388259888 + 0.4888899922370911 + <_> + + <_> + + + + <_>7 5 6 6 -1. + <_>10 5 3 6 2. + 0 + 6.5939999185502529e-003 + -1.0097689628601074 + 0.4972380101680756 + <_> + + <_> + + + + <_>10 5 6 16 -1. + <_>10 13 6 8 2. + 0 + 0.0261480007320642 + 0.2599129974842072 + -1.2537480592727661 + <_> + + <_> + + + + <_>1 4 9 16 -1. + <_>4 4 3 16 3. + 0 + 0.0128450002521276 + -0.5713859796524048 + 0.5965949892997742 + <_> + + <_> + + + + <_>5 0 18 9 -1. + <_>5 3 18 3 3. + 0 + 0.0263449996709824 + -0.5520319938659668 + 0.3021740019321442 + <_> + + <_> + + + + <_>9 15 5 8 -1. + <_>9 19 5 4 2. + 0 + -0.0150830000638962 + -1.2871240377426147 + 0.2235420048236847 + <_> + + <_> + + + + <_>20 0 4 9 -1. + <_>20 0 2 9 2. + 0 + -0.0388870015740395 + 1.7425049543380737 + -0.0997470021247864 + <_> + + <_> + + + + <_>2 0 18 3 -1. + <_>2 1 18 1 3. + 0 + -5.7029998861253262e-003 + -1.0523240566253662 + 0.1836259961128235 + <_> + + <_> + + + + <_>5 22 19 2 -1. + <_>5 23 19 1 2. + 0 + -1.4860000228509307e-003 + 0.5678420066833496 + -0.4674200117588043 + <_> + + <_> + + + + <_>0 0 4 9 -1. + <_>2 0 2 9 2. + 0 + -0.0284860003739595 + 1.3082909584045410 + -0.2646090090274811 + <_> + + <_> + + + + <_>5 6 19 18 -1. + <_>5 12 19 6 3. + 0 + 0.0662249997258186 + -0.4621070027351379 + 0.4174959957599640 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>2 1 2 9 3. + 0 + 8.8569996878504753e-003 + -0.4147489964962006 + 0.5920479893684387 + <_> + + <_> + + + + <_>6 5 14 12 -1. + <_>13 5 7 6 2. + <_>6 11 7 6 2. + 0 + 0.0113559998571873 + 0.3610309958457947 + -0.4578120112419128 + <_> + + <_> + + + + <_>0 1 20 2 -1. + <_>0 2 20 1 2. + 0 + -2.7679998893290758e-003 + -0.8923889994621277 + 0.1419900059700012 + <_> + + <_> + + + + <_>1 2 22 3 -1. + <_>1 3 22 1 3. + 0 + 0.0112469997256994 + 0.2935340106487274 + -0.9733060002326965 + <_> + + <_> + + + + <_>2 8 7 9 -1. + <_>2 11 7 3 3. + 0 + 7.1970000863075256e-003 + -0.7933490276336670 + 0.1831340044736862 + <_> + + <_> + + + + <_>2 12 22 4 -1. + <_>13 12 11 2 2. + <_>2 14 11 2 2. + 0 + 0.0317689999938011 + 0.1552309989929199 + -1.3245639801025391 + <_> + + <_> + + + + <_>0 12 22 4 -1. + <_>0 12 11 2 2. + <_>11 14 11 2 2. + 0 + 0.0251739993691444 + 0.0342149995267391 + -2.0948131084442139 + <_> + + <_> + + + + <_>9 7 6 11 -1. + <_>11 7 2 11 3. + 0 + 7.5360001064836979e-003 + -0.3945060074329376 + 0.5133399963378906 + <_> + + <_> + + + + <_>7 1 9 6 -1. + <_>10 1 3 6 3. + 0 + 0.0328730009496212 + 0.0883729979395866 + -1.2814120054244995 + <_> + + <_> + + + + <_>11 2 4 10 -1. + <_>11 7 4 5 2. + 0 + -2.7379998937249184e-003 + 0.5528650283813477 + -0.4638499915599823 + <_> + + <_> + + + + <_>6 4 12 12 -1. + <_>6 10 12 6 2. + 0 + -0.0380750000476837 + -1.8497270345687866 + 0.0459440015256405 + <_> + + <_> + + + + <_>18 1 6 15 -1. + <_>18 6 6 5 3. + 0 + -0.0389840006828308 + -0.4822370111942291 + 0.3476060032844544 + <_> + + <_> + + + + <_>3 15 18 3 -1. + <_>3 16 18 1 3. + 0 + 2.8029999230057001e-003 + -0.4515469968318939 + 0.4280630052089691 + <_> + + <_> + + + + <_>18 5 6 9 -1. + <_>18 8 6 3 3. + 0 + -0.0541459992527962 + -0.8452079892158508 + 0.1667490005493164 + <_> + + <_> + + + + <_>1 5 16 6 -1. + <_>1 5 8 3 2. + <_>9 8 8 3 2. + 0 + -8.3280000835657120e-003 + 0.3534829914569855 + -0.4716320037841797 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + 0.0337780006229877 + 0.1846310049295425 + -1.6686669588088989 + <_> + + <_> + + + + <_>0 4 24 14 -1. + <_>0 4 12 7 2. + <_>12 11 12 7 2. + 0 + -0.1123809963464737 + -1.2521569728851318 + 0.0359920002520084 + <_> + + <_> + + + + <_>13 0 4 13 -1. + <_>13 0 2 13 2. + 0 + -0.0104080000892282 + -0.8162040114402771 + 0.2342859953641892 + <_> + + <_> + + + + <_>7 0 4 13 -1. + <_>9 0 2 13 2. + 0 + -4.9439999274909496e-003 + -0.9258469939231873 + 0.1003480032086372 + <_> + + <_> + + + + <_>11 6 6 9 -1. + <_>13 6 2 9 3. + 0 + -9.3029998242855072e-003 + 0.5649930238723755 + -0.1888190060853958 + <_> + + <_> + + + + <_>8 7 6 9 -1. + <_>10 7 2 9 3. + 0 + -0.0117499995976686 + 0.8030239939689636 + -0.3827700018882752 + <_> + + <_> + + + + <_>13 17 9 6 -1. + <_>13 19 9 2 3. + 0 + -0.0232170000672340 + -0.8492699861526489 + 0.1967120021581650 + <_> + + <_> + + + + <_>2 18 14 6 -1. + <_>2 18 7 3 2. + <_>9 21 7 3 2. + 0 + 0.0168660003691912 + -0.4059189856052399 + 0.5069530010223389 + <_> + + <_> + + + + <_>3 18 18 4 -1. + <_>12 18 9 2 2. + <_>3 20 9 2 2. + 0 + -0.0240310002118349 + -1.5297520160675049 + 0.2334499955177307 + <_> + + <_> + + + + <_>0 20 15 4 -1. + <_>5 20 5 4 3. + 0 + -0.0369459986686707 + 0.6300770044326782 + -0.3178040087223053 + <_> + + <_> + + + + <_>9 15 15 9 -1. + <_>14 15 5 9 3. + 0 + -0.0615639984607697 + 0.5862789750099182 + -0.0121079999953508 + <_> + + <_> + + + + <_>4 4 16 4 -1. + <_>4 6 16 2 2. + 0 + 0.0216610003262758 + -0.2562370002269745 + 1.0409849882125854 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>7 8 10 2 3. + 0 + -3.6710000131279230e-003 + 0.2917110025882721 + -0.8328729867935181 + <_> + + <_> + + + + <_>0 14 15 10 -1. + <_>5 14 5 10 3. + 0 + 0.0448490008711815 + -0.3963319957256317 + 0.4566200077533722 + <_> + + <_> + + + + <_>7 9 10 14 -1. + <_>12 9 5 7 2. + <_>7 16 5 7 2. + 0 + 0.0571950003504753 + 0.2102389931678772 + -1.5004800558090210 + <_> + + <_> + + + + <_>7 6 6 9 -1. + <_>9 6 2 9 3. + 0 + -0.0113420002162457 + 0.4407129883766174 + -0.3865379989147186 + <_> + + <_> + + + + <_>3 6 18 3 -1. + <_>3 7 18 1 3. + 0 + -0.0120040001347661 + 0.9395459890365601 + -0.1058949977159500 + <_> + + <_> + + + + <_>0 10 18 3 -1. + <_>0 11 18 1 3. + 0 + 0.0225159991532564 + 9.4480002298951149e-003 + -1.6799509525299072 + <_> + + <_> + + + + <_>3 16 18 4 -1. + <_>12 16 9 2 2. + <_>3 18 9 2 2. + 0 + -0.0198090001940727 + -1.0133639574050903 + 0.2414660006761551 + <_> + + <_> + + + + <_>4 6 14 6 -1. + <_>4 6 7 3 2. + <_>11 9 7 3 2. + 0 + 0.0158910006284714 + -0.3750759959220886 + 0.4661409854888916 + <_> + + <_> + + + + <_>13 0 2 18 -1. + <_>13 0 1 18 2. + 0 + -9.1420002281665802e-003 + -0.8048409819602966 + 0.1781699955463409 + <_> + + <_> + + + + <_>9 0 2 18 -1. + <_>10 0 1 18 2. + 0 + -4.4740000739693642e-003 + -1.0562069416046143 + 0.0733050033450127 + <_> + + <_> + + + + <_>5 7 15 10 -1. + <_>10 7 5 10 3. + 0 + 0.1274250000715256 + 0.2016559988260269 + -1.5467929840087891 + <_> + + <_> + + + + <_>1 20 21 4 -1. + <_>8 20 7 4 3. + 0 + 0.0477030016481876 + -0.3793779909610748 + 0.3788599967956543 + <_> + + <_> + + + + <_>10 5 5 18 -1. + <_>10 14 5 9 2. + 0 + 0.0536080002784729 + 0.2122049927711487 + -1.2399710416793823 + <_> + + <_> + + + + <_>0 2 24 6 -1. + <_>0 2 12 3 2. + <_>12 5 12 3 2. + 0 + -0.0396809987723827 + -1.0257550477981567 + 0.0512829981744289 + <_> + + <_> + + + + <_>1 1 22 8 -1. + <_>12 1 11 4 2. + <_>1 5 11 4 2. + 0 + -0.0673270002007484 + -1.0304750204086304 + 0.2300529927015305 + <_> + + <_> + + + + <_>4 0 15 9 -1. + <_>4 3 15 3 3. + 0 + 0.1333760023117065 + -0.2086900025606155 + 1.2272510528564453 + <_> + + <_> + + + + <_>0 0 24 19 -1. + <_>8 0 8 19 3. + 0 + -0.2091930061578751 + 0.8792989850044251 + -0.0442549996078014 + <_> + + <_> + + + + <_>2 21 18 3 -1. + <_>11 21 9 3 2. + 0 + -0.0655890032649040 + 1.0443429946899414 + -0.2168209999799728 + <_> + + <_> + + + + <_>9 7 10 4 -1. + <_>9 7 5 4 2. + 0 + 0.0618829987943172 + 0.1379819959402084 + -1.9009059667587280 + <_> + + <_> + + + + <_>5 7 10 4 -1. + <_>10 7 5 4 2. + 0 + -0.0255789998918772 + -1.6607600450515747 + 5.8439997956156731e-003 + <_> + + <_> + + + + <_>17 8 6 16 -1. + <_>20 8 3 8 2. + <_>17 16 3 8 2. + 0 + -0.0348270013928413 + 0.7994040250778198 + -0.0824069976806641 + <_> + + <_> + + + + <_>1 15 20 4 -1. + <_>1 15 10 2 2. + <_>11 17 10 2 2. + 0 + -0.0182099994271994 + -0.9607399702072144 + 0.0663200020790100 + <_> + + <_> + + + + <_>14 15 10 6 -1. + <_>14 17 10 2 3. + 0 + 0.0150709999725223 + 0.1989939957857132 + -0.7643300294876099 + -4.0218091011047363 + 5 + -1 + <_> + + + <_> + + <_> + + + + <_>3 0 16 9 -1. + <_>3 3 16 3 3. + 0 + 0.0463249981403351 + -1.0362670421600342 + 0.8220149874687195 + <_> + + <_> + + + + <_>15 6 7 15 -1. + <_>15 11 7 5 3. + 0 + 0.0154069997370243 + -1.2327589988708496 + 0.2964769899845123 + <_> + + <_> + + + + <_>9 1 6 13 -1. + <_>11 1 2 13 3. + 0 + 0.0128089999780059 + -0.7585229873657227 + 0.5798550248146057 + <_> + + <_> + + + + <_>17 2 6 14 -1. + <_>17 2 3 14 2. + 0 + 0.0491509996354580 + -0.3898389935493469 + 0.8968030214309692 + <_> + + <_> + + + + <_>3 14 12 10 -1. + <_>3 14 6 5 2. + <_>9 19 6 5 2. + 0 + 0.0126210004091263 + -0.7179930210113525 + 0.5044090151786804 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>7 8 10 2 3. + 0 + -0.0187689997255802 + 0.5514760017395020 + -0.7055540084838867 + <_> + + <_> + + + + <_>1 2 6 14 -1. + <_>4 2 3 14 2. + 0 + 0.0419650003314018 + -0.4478209912776947 + 0.7098550200462341 + <_> + + <_> + + + + <_>10 4 5 12 -1. + <_>10 8 5 4 3. + 0 + -0.0514019988477230 + -1.0932120084762573 + 0.2670190036296845 + <_> + + <_> + + + + <_>0 17 24 5 -1. + <_>8 17 8 5 3. + 0 + -0.0709609985351563 + 0.8361840248107910 + -0.3831810057163239 + <_> + + <_> + + + + <_>15 7 5 12 -1. + <_>15 11 5 4 3. + 0 + 0.0167459994554520 + -0.2573310136795044 + 0.2596650123596191 + <_> + + <_> + + + + <_>3 1 6 12 -1. + <_>3 1 3 6 2. + <_>6 7 3 6 2. + 0 + -6.2400000169873238e-003 + 0.3163149952888489 + -0.5879690051078796 + <_> + + <_> + + + + <_>12 13 6 6 -1. + <_>12 16 6 3 2. + 0 + -0.0393979996442795 + -1.0491210222244263 + 0.1682240068912506 + <_> + + <_> + + + + <_>6 13 6 6 -1. + <_>6 16 6 3 2. + 0 + 0. + 0.1614419966936112 + -0.8787689805030823 + <_> + + <_> + + + + <_>14 6 3 16 -1. + <_>14 14 3 8 2. + 0 + -0.0223079994320869 + -0.6905350089073181 + 0.2360700070858002 + <_> + + <_> + + + + <_>1 12 13 6 -1. + <_>1 14 13 2 3. + 0 + 1.8919999711215496e-003 + 0.2498919963836670 + -0.5658329725265503 + <_> + + <_> + + + + <_>13 1 4 9 -1. + <_>13 1 2 9 2. + 0 + 1.0730000212788582e-003 + -0.5041580200195313 + 0.3837450146675110 + <_> + + <_> + + + + <_>7 0 9 6 -1. + <_>10 0 3 6 3. + 0 + 0.0392309986054897 + 0.0426190011203289 + -1.3875889778137207 + <_> + + <_> + + + + <_>12 2 6 9 -1. + <_>12 2 3 9 2. + 0 + 0.0622380003333092 + 0.1411940008401871 + -1.0688860416412354 + <_> + + <_> + + + + <_>6 2 6 9 -1. + <_>9 2 3 9 2. + 0 + 2.1399999968707561e-003 + -0.8962240219116211 + 0.1979639977216721 + <_> + + <_> + + + + <_>6 18 12 6 -1. + <_>6 20 12 2 3. + 0 + 9.1800000518560410e-004 + -0.4533729851245880 + 0.4353269934654236 + <_> + + <_> + + + + <_>7 6 6 9 -1. + <_>9 6 2 9 3. + 0 + -6.9169998168945313e-003 + 0.3382279872894287 + -0.4479300081729889 + <_> + + <_> + + + + <_>7 7 12 3 -1. + <_>7 7 6 3 2. + 0 + -0.0238669998943806 + -0.7890859842300415 + 0.2251179963350296 + <_> + + <_> + + + + <_>8 3 8 21 -1. + <_>8 10 8 7 3. + 0 + -0.1026280000805855 + -2.2831439971923828 + -5.3960001096129417e-003 + <_> + + <_> + + + + <_>7 4 10 12 -1. + <_>7 8 10 4 3. + 0 + -9.5239998772740364e-003 + 0.3934670090675354 + -0.5224220156669617 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>0 4 6 3 3. + 0 + 0.0398770011961460 + 0.0327990017831326 + -1.5079489946365356 + <_> + + <_> + + + + <_>15 2 2 20 -1. + <_>15 2 1 20 2. + 0 + -0.0131449997425079 + -1.0839990377426147 + 0.1848240047693253 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>0 6 6 3 3. + 0 + -0.0505909994244576 + -1.8822289705276489 + -2.2199999075382948e-003 + <_> + + <_> + + + + <_>15 3 2 21 -1. + <_>15 3 1 21 2. + 0 + 0.0249170009046793 + 0.1459340006113052 + -2.2196519374847412 + <_> + + <_> + + + + <_>7 0 2 23 -1. + <_>8 0 1 23 2. + 0 + -7.6370001770555973e-003 + -1.0164569616317749 + 0.0587970018386841 + <_> + + <_> + + + + <_>15 8 9 4 -1. + <_>15 10 9 2 2. + 0 + 0.0429119989275932 + 0.1544300019741058 + -1.1843889951705933 + <_> + + <_> + + + + <_>0 8 9 4 -1. + <_>0 10 9 2 2. + 0 + 2.3000000510364771e-004 + -0.7730579972267151 + 0.1218990013003349 + <_> + + <_> + + + + <_>8 14 9 6 -1. + <_>8 16 9 2 3. + 0 + 9.0929996222257614e-003 + -0.1145009994506836 + 0.7109130024909973 + <_> + + <_> + + + + <_>0 14 9 6 -1. + <_>0 16 9 2 3. + 0 + 0.0111450003460050 + 0.0700009986758232 + -1.0534820556640625 + <_> + + <_> + + + + <_>3 10 18 4 -1. + <_>9 10 6 4 3. + 0 + -0.0524530000984669 + -1.7594360113143921 + 0.1952379941940308 + <_> + + <_> + + + + <_>0 0 24 19 -1. + <_>8 0 8 19 3. + 0 + -0.2302069962024689 + 0.9584029912948608 + -0.2504569888114929 + <_> + + <_> + + + + <_>9 1 8 12 -1. + <_>9 7 8 6 2. + 0 + -0.0163659993559122 + 0.4673190116882324 + -0.2110839933156967 + <_> + + <_> + + + + <_>10 6 4 10 -1. + <_>12 6 2 10 2. + 0 + -0.0172080006450415 + 0.7083569765090942 + -0.2801829874515533 + <_> + + <_> + + + + <_>7 9 10 12 -1. + <_>12 9 5 6 2. + <_>7 15 5 6 2. + 0 + -0.0366480015218258 + -1.1013339757919312 + 0.2434110045433044 + <_> + + <_> + + + + <_>5 0 3 19 -1. + <_>6 0 1 19 3. + 0 + -0.0103049995377660 + -1.0933129787445068 + 0.0562589988112450 + <_> + + <_> + + + + <_>14 0 6 10 -1. + <_>16 0 2 10 3. + 0 + -0.0137130003422499 + -0.2643809914588928 + 0.1982100009918213 + <_> + + <_> + + + + <_>2 0 6 12 -1. + <_>2 0 3 6 2. + <_>5 6 3 6 2. + 0 + 0.0293080005794764 + -0.2214239984750748 + 1.0525950193405151 + <_> + + <_> + + + + <_>0 11 24 2 -1. + <_>0 12 24 1 2. + 0 + 0.0240770000964403 + 0.1848569959402084 + -1.7203969955444336 + <_> + + <_> + + + + <_>4 9 13 4 -1. + <_>4 11 13 2 2. + 0 + 6.1280000954866409e-003 + -0.9272149801254273 + 0.0587529987096787 + <_> + + <_> + + + + <_>9 8 6 9 -1. + <_>9 11 6 3 3. + 0 + -0.0223779994994402 + 1.9646559953689575 + 0.0277859997004271 + <_> + + <_> + + + + <_>0 12 16 4 -1. + <_>0 14 16 2 2. + 0 + -7.0440000854432583e-003 + 0.2142760008573532 + -0.4840759932994843 + <_> + + <_> + + + + <_>18 12 6 9 -1. + <_>18 15 6 3 3. + 0 + -0.0406030006706715 + -1.1754349470138550 + 0.1606120020151138 + <_> + + <_> + + + + <_>0 12 6 9 -1. + <_>0 15 6 3 3. + 0 + -0.0244660004973412 + -1.1239900588989258 + 0.0411100015044212 + <_> + + <_> + + + + <_>8 7 10 4 -1. + <_>8 7 5 4 2. + 0 + 2.5309999473392963e-003 + -0.1716970056295395 + 0.3217880129814148 + <_> + + <_> + + + + <_>8 7 6 9 -1. + <_>10 7 2 9 3. + 0 + -0.0195889994502068 + 0.8272020220756531 + -0.2637670040130615 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + -0.0296359993517399 + -1.1524770259857178 + 0.1499930024147034 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0150300003588200 + -1.0491830110549927 + 0.0401609987020493 + <_> + + <_> + + + + <_>12 3 6 15 -1. + <_>14 3 2 15 3. + 0 + -0.0607150010764599 + -1.0903840065002441 + 0.1533080041408539 + <_> + + <_> + + + + <_>6 3 6 15 -1. + <_>8 3 2 15 3. + 0 + -0.0127900000661612 + 0.4224860072135925 + -0.4239920079708099 + <_> + + <_> + + + + <_>15 2 9 4 -1. + <_>15 4 9 2 2. + 0 + -0.0202479995787144 + -0.9186699986457825 + 0.1848569959402084 + <_> + + <_> + + + + <_>5 10 6 7 -1. + <_>8 10 3 7 2. + 0 + -0.0306839998811483 + -1.5958670377731323 + 2.5760000571608543e-003 + <_> + + <_> + + + + <_>9 14 6 10 -1. + <_>9 19 6 5 2. + 0 + -0.0207180008292198 + -0.6629999876022339 + 0.3103719949722290 + <_> + + <_> + + + + <_>7 13 5 8 -1. + <_>7 17 5 4 2. + 0 + -1.7290000105276704e-003 + 0.1918340027332306 + -0.6508499979972839 + <_> + + <_> + + + + <_>14 5 3 16 -1. + <_>14 13 3 8 2. + 0 + -0.0313940010964870 + -0.6364300251007080 + 0.1540839970111847 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>2 18 18 1 3. + 0 + 0.0190030001103878 + -0.1891939938068390 + 1.5294510126113892 + <_> + + <_> + + + + <_>5 18 19 3 -1. + <_>5 19 19 1 3. + 0 + 6.1769997701048851e-003 + -0.1059790030121803 + 0.6485959887504578 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + -0.0101659996435046 + -1.0802700519561768 + 0.0371760018169880 + <_> + + <_> + + + + <_>12 4 3 18 -1. + <_>13 4 1 18 3. + 0 + -1.4169999631121755e-003 + 0.3415749967098236 + -0.0977379977703094 + <_> + + <_> + + + + <_>9 4 3 18 -1. + <_>10 4 1 18 3. + 0 + -4.0799998678267002e-003 + 0.4762459993362427 + -0.3436630070209503 + <_> + + <_> + + + + <_>3 3 18 9 -1. + <_>9 3 6 9 3. + 0 + -0.0440969988703728 + 0.9763429760932922 + -0.0191730000078678 + <_> + + <_> + + + + <_>6 1 6 14 -1. + <_>8 1 2 14 3. + 0 + -0.0606699995696545 + -2.1752851009368896 + -0.0289259999990463 + <_> + + <_> + + + + <_>12 16 9 6 -1. + <_>12 19 9 3 2. + 0 + -0.0329319983720779 + -0.6438310146331787 + 0.1649409979581833 + <_> + + <_> + + + + <_>1 3 20 16 -1. + <_>1 3 10 8 2. + <_>11 11 10 8 2. + 0 + -0.1472280025482178 + -1.4745830297470093 + 2.5839998852461576e-003 + <_> + + <_> + + + + <_>12 5 6 12 -1. + <_>15 5 3 6 2. + <_>12 11 3 6 2. + 0 + -0.0119300000369549 + 0.4244140088558197 + -0.1771260052919388 + <_> + + <_> + + + + <_>1 2 22 16 -1. + <_>1 2 11 8 2. + <_>12 10 11 8 2. + 0 + 0.1451790034770966 + 0.0254449993371964 + -1.2779400348663330 + <_> + + <_> + + + + <_>10 14 5 10 -1. + <_>10 19 5 5 2. + 0 + 0.0514479987323284 + 0.1567839980125427 + -1.5188430547714233 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>3 22 18 1 3. + 0 + 3.1479999888688326e-003 + -0.4042440056800842 + 0.3242970108985901 + <_> + + <_> + + + + <_>10 14 6 10 -1. + <_>12 14 2 10 3. + 0 + -0.0436000004410744 + -1.9932260513305664 + 0.1501860022544861 + -3.8832089900970459 + 6 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 24 4 -1. + <_>8 2 8 4 3. + 0 + 0.1289959996938705 + -0.6216199994087219 + 1.1116520166397095 + <_> + + <_> + + + + <_>6 4 12 9 -1. + <_>6 7 12 3 3. + 0 + -0.0912619978189468 + 1.0143059492111206 + -0.6133520007133484 + <_> + + <_> + + + + <_>6 6 12 5 -1. + <_>10 6 4 5 3. + 0 + 0.0142719997093081 + -1.0261659622192383 + 0.3977999985218048 + <_> + + <_> + + + + <_>5 8 14 12 -1. + <_>5 12 14 4 3. + 0 + 0.0328899994492531 + -1.1386079788208008 + 0.2869080007076263 + <_> + + <_> + + + + <_>4 14 8 10 -1. + <_>4 14 4 5 2. + <_>8 19 4 5 2. + 0 + 0.0125900004059076 + -0.5664560198783875 + 0.4517239928245544 + <_> + + <_> + + + + <_>11 6 5 14 -1. + <_>11 13 5 7 2. + 0 + 0.0146610001102090 + 0.3050599992275238 + -0.6812959909439087 + <_> + + <_> + + + + <_>7 6 3 16 -1. + <_>7 14 3 8 2. + 0 + -0.0335559993982315 + -1.7208939790725708 + 0.0614390000700951 + <_> + + <_> + + + + <_>3 7 18 8 -1. + <_>9 7 6 8 3. + 0 + 0.1425269991159439 + 0.2319220006465912 + -1.7297149896621704 + <_> + + <_> + + + + <_>2 3 20 2 -1. + <_>2 4 20 1 2. + 0 + -6.2079997733235359e-003 + -1.2163300514221191 + 0.1216019988059998 + <_> + + <_> + + + + <_>3 12 19 6 -1. + <_>3 14 19 2 3. + 0 + 0.0181789994239807 + 0.3255369961261749 + -0.8100399971008301 + <_> + + <_> + + + + <_>8 6 6 9 -1. + <_>10 6 2 9 3. + 0 + 0.0250369999557734 + -0.3169879913330078 + 0.6736140251159668 + <_> + + <_> + + + + <_>16 6 6 14 -1. + <_>16 6 3 14 2. + 0 + 0.0465609990060329 + -0.1108980029821396 + 0.8408250212669373 + <_> + + <_> + + + + <_>7 9 6 12 -1. + <_>9 9 2 12 3. + 0 + -8.9999996125698090e-003 + 0.3957450091838837 + -0.4762459993362427 + <_> + + <_> + + + + <_>18 6 6 18 -1. + <_>21 6 3 9 2. + <_>18 15 3 9 2. + 0 + 0.0408059991896153 + -1.8000000272877514e-004 + 0.9457070231437683 + <_> + + <_> + + + + <_>0 6 6 18 -1. + <_>0 6 3 9 2. + <_>3 15 3 9 2. + 0 + -0.0342219993472099 + 0.7520629763603210 + -0.3153150081634522 + <_> + + <_> + + + + <_>18 2 6 9 -1. + <_>18 5 6 3 3. + 0 + -0.0397160016000271 + -0.8313959836959839 + 0.1774439960718155 + <_> + + <_> + + + + <_>3 18 15 6 -1. + <_>3 20 15 2 3. + 0 + 2.5170000735670328e-003 + -0.5937799811363220 + 0.2465700060129166 + <_> + + <_> + + + + <_>18 2 6 9 -1. + <_>18 5 6 3 3. + 0 + 0.0274289995431900 + 0.1599839925765991 + -0.4278199970722199 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 0.0349860005080700 + 0.0350559987127781 + -1.5988600254058838 + <_> + + <_> + + + + <_>5 10 18 2 -1. + <_>5 11 18 1 2. + 0 + 4.4970000162720680e-003 + -0.5203430056571960 + 0.3782829940319061 + <_> + + <_> + + + + <_>6 0 12 6 -1. + <_>6 2 12 2 3. + 0 + 2.7699999045580626e-003 + -0.5318260192871094 + 0.2495100051164627 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + 0.0351740010082722 + 0.1998340040445328 + -1.4446129798889160 + <_> + + <_> + + + + <_>8 0 6 9 -1. + <_>10 0 2 9 3. + 0 + 0.0259709991514683 + 0.0444269999861717 + -1.3622980117797852 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + -0.0157839991152287 + -0.9102039933204651 + 0.2719030082225800 + <_> + + <_> + + + + <_>3 6 13 6 -1. + <_>3 8 13 2 3. + 0 + -7.5880000367760658e-003 + 0.0920649990439415 + -0.8162890076637268 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + 0.0207540001720190 + 0.2118570059537888 + -0.7472900152206421 + <_> + + <_> + + + + <_>2 5 6 15 -1. + <_>5 5 3 15 2. + 0 + 0.0598290003836155 + -0.2730109989643097 + 0.8092330098152161 + <_> + + <_> + + + + <_>8 8 9 6 -1. + <_>11 8 3 6 3. + 0 + 0.0390390008687973 + -0.1043229997158051 + 0.8622620105743408 + <_> + + <_> + + + + <_>8 6 3 14 -1. + <_>8 13 3 7 2. + 0 + 0.0216659996658564 + 0.0627090036869049 + -0.9889429807662964 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + -0.0274969991296530 + -0.9269099831581116 + 0.1558630019426346 + <_> + + <_> + + + + <_>4 12 10 4 -1. + <_>9 12 5 4 2. + 0 + 0.0104620000347495 + 0.1341809928417206 + -0.7038639783859253 + <_> + + <_> + + + + <_>13 1 4 19 -1. + <_>13 1 2 19 2. + 0 + 0.0248709991574287 + 0.1970670074224472 + -0.4026330113410950 + <_> + + <_> + + + + <_>7 1 4 19 -1. + <_>9 1 2 19 2. + 0 + -0.0160360001027584 + -1.1409829854965210 + 0.0739979967474937 + <_> + + <_> + + + + <_>18 9 6 9 -1. + <_>18 12 6 3 3. + 0 + 0.0486270003020763 + 0.1699039936065674 + -0.7215219736099243 + <_> + + <_> + + + + <_>1 21 18 3 -1. + <_>1 22 18 1 3. + 0 + 1.2619999470189214e-003 + -0.4738979935646057 + 0.2625499963760376 + <_> + + <_> + + + + <_>14 13 10 9 -1. + <_>14 16 10 3 3. + 0 + -0.0880350023508072 + -2.1606519222259521 + 0.1455480009317398 + <_> + + <_> + + + + <_>1 13 22 4 -1. + <_>1 13 11 2 2. + <_>12 15 11 2 2. + 0 + 0.0183569993823767 + 0.0447509996592999 + -1.0766370296478271 + <_> + + <_> + + + + <_>4 6 16 6 -1. + <_>12 6 8 3 2. + <_>4 9 8 3 2. + 0 + 0.0352750010788441 + -0.0329190008342266 + 1.2153890132904053 + <_> + + <_> + + + + <_>1 0 18 22 -1. + <_>1 0 9 11 2. + <_>10 11 9 11 2. + 0 + -0.2039290070533752 + -1.3187999725341797 + 0.0155039997771382 + <_> + + <_> + + + + <_>10 7 8 14 -1. + <_>14 7 4 7 2. + <_>10 14 4 7 2. + 0 + -0.0166190005838871 + 0.3685019910335541 + -0.1528369933366776 + <_> + + <_> + + + + <_>0 4 6 20 -1. + <_>0 4 3 10 2. + <_>3 14 3 10 2. + 0 + 0.0377390012145042 + -0.2572779953479767 + 0.7065529823303223 + <_> + + <_> + + + + <_>15 0 6 9 -1. + <_>17 0 2 9 3. + 0 + 2.2720000706613064e-003 + -0.0776029974222183 + 0.3336780071258545 + <_> + + <_> + + + + <_>3 0 6 9 -1. + <_>5 0 2 9 3. + 0 + -0.0148029997944832 + -0.7852479815483093 + 0.0769340023398399 + <_> + + <_> + + + + <_>15 12 6 12 -1. + <_>18 12 3 6 2. + <_>15 18 3 6 2. + 0 + -0.0483190007507801 + 1.7022320032119751 + 0.0497220009565353 + <_> + + <_> + + + + <_>3 12 6 12 -1. + <_>3 12 3 6 2. + <_>6 18 3 6 2. + 0 + -0.0295390002429485 + 0.7767069935798645 + -0.2453429996967316 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + -0.0461690016090870 + -1.4922779798507690 + 0.1234000027179718 + <_> + + <_> + + + + <_>0 12 9 6 -1. + <_>0 14 9 2 3. + 0 + -0.0280649997293949 + -2.1345369815826416 + -0.0257970001548529 + <_> + + <_> + + + + <_>4 14 19 3 -1. + <_>4 15 19 1 3. + 0 + -5.7339998893439770e-003 + 0.5698260068893433 + -0.1205660030245781 + <_> + + <_> + + + + <_>2 13 19 3 -1. + <_>2 14 19 1 3. + 0 + -0.0101110003888607 + 0.6791139841079712 + -0.2663800120353699 + <_> + + <_> + + + + <_>14 15 10 6 -1. + <_>14 17 10 2 3. + 0 + 0.0113599998876452 + 0.2478979974985123 + -0.6449300050735474 + <_> + + <_> + + + + <_>6 0 10 12 -1. + <_>6 0 5 6 2. + <_>11 6 5 6 2. + 0 + 0.0518090017139912 + 0.0147160002961755 + -1.2395579814910889 + <_> + + <_> + + + + <_>17 1 6 12 -1. + <_>20 1 3 6 2. + <_>17 7 3 6 2. + 0 + 0.0332919992506504 + -8.2559995353221893e-003 + 1.0168470144271851 + <_> + + <_> + + + + <_>1 1 6 12 -1. + <_>1 1 3 6 2. + <_>4 7 3 6 2. + 0 + -0.0144940000027418 + 0.4506680071353912 + -0.3625099956989288 + <_> + + <_> + + + + <_>16 14 6 9 -1. + <_>16 17 6 3 3. + 0 + -0.0342219993472099 + -0.9529250264167786 + 0.2068459987640381 + <_> + + <_> + + + + <_>7 3 9 12 -1. + <_>7 9 9 6 2. + 0 + -0.0806540027260780 + -2.0139501094818115 + -0.0230849999934435 + <_> + + <_> + + + + <_>12 1 4 12 -1. + <_>12 7 4 6 2. + 0 + -8.9399999706074595e-004 + 0.3957200050354004 + -0.2935130000114441 + <_> + + <_> + + + + <_>4 0 14 8 -1. + <_>4 4 14 4 2. + 0 + 0.0971620008349419 + -0.2498030066490173 + 1.0859220027923584 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + 0.0366140007972717 + -0.0578440017998219 + 1.2162159681320190 + <_> + + <_> + + + + <_>2 10 18 3 -1. + <_>8 10 6 3 3. + 0 + 0.0516939982771873 + 0.0430629998445511 + -1.0636160373687744 + <_> + + <_> + + + + <_>15 15 9 6 -1. + <_>15 17 9 2 3. + 0 + -0.0245570000261068 + -0.4894680082798004 + 0.1718290001153946 + <_> + + <_> + + + + <_>0 1 21 23 -1. + <_>7 1 7 23 3. + 0 + 0.3273679912090302 + -0.2968859970569611 + 0.5179830193519592 + <_> + + <_> + + + + <_>6 9 17 4 -1. + <_>6 11 17 2 2. + 0 + 7.6959999278187752e-003 + -0.5980589985847473 + 0.2480320036411285 + <_> + + <_> + + + + <_>1 0 11 18 -1. + <_>1 6 11 6 3. + 0 + 0.1617220044136047 + -0.0296139996498823 + -2.3162529468536377 + <_> + + <_> + + + + <_>6 15 13 6 -1. + <_>6 17 13 2 3. + 0 + -4.7889999113976955e-003 + 0.3745790123939514 + -0.3277919888496399 + <_> + + <_> + + + + <_>0 15 9 6 -1. + <_>0 17 9 2 3. + 0 + -0.0184029992669821 + -0.9969270229339600 + 0.0729480013251305 + <_> + + <_> + + + + <_>8 7 15 4 -1. + <_>13 7 5 4 3. + 0 + 0.0776650011539459 + 0.1417569965124130 + -1.7238730192184448 + <_> + + <_> + + + + <_>9 12 6 9 -1. + <_>9 15 6 3 3. + 0 + 0.0189210008829832 + -0.2127310037612915 + 1.0165189504623413 + <_> + + <_> + + + + <_>6 8 18 3 -1. + <_>12 8 6 3 3. + 0 + -0.0793979987502098 + -1.3164349794387817 + 0.1498199999332428 + <_> + + <_> + + + + <_>0 14 24 4 -1. + <_>8 14 8 4 3. + 0 + -0.0680370032787323 + 0.4942199885845184 + -0.2909100055694580 + <_> + + <_> + + + + <_>16 10 3 12 -1. + <_>16 16 3 6 2. + 0 + -6.1010001227259636e-003 + 0.4243049919605255 + -0.3389930129051209 + <_> + + <_> + + + + <_>0 3 24 3 -1. + <_>0 4 24 1 3. + 0 + 0.0319270007312298 + -0.0310469996184111 + -2.3459999561309814 + <_> + + <_> + + + + <_>14 17 10 6 -1. + <_>14 19 10 2 3. + 0 + -0.0298439990729094 + -0.7898960113525391 + 0.1541769951581955 + <_> + + <_> + + + + <_>1 13 18 3 -1. + <_>7 13 6 3 3. + 0 + -0.0805419981479645 + -2.2509229183197021 + -0.0309069994837046 + <_> + + <_> + + + + <_>5 0 18 9 -1. + <_>5 3 18 3 3. + 0 + 3.8109999150037766e-003 + -0.2557730078697205 + 0.2378550022840500 + <_> + + <_> + + + + <_>4 3 16 9 -1. + <_>4 6 16 3 3. + 0 + 0.0336470007896423 + -0.2254139930009842 + 0.9230740070343018 + <_> + + <_> + + + + <_>16 5 3 12 -1. + <_>16 11 3 6 2. + 0 + 8.2809999585151672e-003 + -0.2889620065689087 + 0.3104619979858398 + <_> + + <_> + + + + <_>0 7 18 4 -1. + <_>6 7 6 4 3. + 0 + 0.1010439991950989 + -0.0348640009760857 + -2.7102620601654053 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + -0.0100090000778437 + 0.5971540212631226 + -0.0338310003280640 + <_> + + <_> + + + + <_>9 8 6 10 -1. + <_>11 8 2 10 3. + 0 + 7.1919998154044151e-003 + -0.4773800075054169 + 0.2268600016832352 + <_> + + <_> + + + + <_>9 15 6 9 -1. + <_>11 15 2 9 3. + 0 + 0.0249690003693104 + 0.2287770062685013 + -1.0435529947280884 + <_> + + <_> + + + + <_>3 1 18 21 -1. + <_>12 1 9 21 2. + 0 + 0.2790800034999847 + -0.2581810057163239 + 0.7678049802780151 + <_> + + <_> + + + + <_>6 8 12 7 -1. + <_>6 8 6 7 2. + 0 + -0.0442130006849766 + -0.5979800224304199 + 0.2803989946842194 + <_> + + <_> + + + + <_>8 5 6 9 -1. + <_>10 5 2 9 3. + 0 + -0.0141369998455048 + 0.7098730206489563 + -0.2564519941806793 + -3.8424909114837646 + 7 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 24 4 -1. + <_>8 2 8 4 3. + 0 + 0.1377120018005371 + -0.5587059855461121 + 1.0953769683837891 + <_> + + <_> + + + + <_>14 7 5 12 -1. + <_>14 11 5 4 3. + 0 + 0.0344609990715981 + -0.7117189764976502 + 0.5289959907531738 + <_> + + <_> + + + + <_>5 7 5 12 -1. + <_>5 11 5 4 3. + 0 + 0.0185800008475780 + -1.1157519817352295 + 0.4059399962425232 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0250419992953539 + -0.4089249968528748 + 0.7412999868392944 + <_> + + <_> + + + + <_>0 1 6 17 -1. + <_>3 1 3 17 2. + 0 + 0.0571790002286434 + -0.3805429935455322 + 0.7364770174026489 + <_> + + <_> + + + + <_>3 1 19 9 -1. + <_>3 4 19 3 3. + 0 + 0.0149320000782609 + -0.6994550228118897 + 0.3795099854469299 + <_> + + <_> + + + + <_>3 18 12 6 -1. + <_>3 18 6 3 2. + <_>9 21 6 3 2. + 0 + 8.8900001719594002e-003 + -0.5455859899520874 + 0.3633249998092651 + <_> + + <_> + + + + <_>20 4 4 19 -1. + <_>20 4 2 19 2. + 0 + 0.0304359998553991 + -0.1012459993362427 + 0.7958589792251587 + <_> + + <_> + + + + <_>0 16 10 7 -1. + <_>5 16 5 7 2. + 0 + -0.0441600009799004 + 0.8441089987754822 + -0.3297640085220337 + <_> + + <_> + + + + <_>8 7 10 12 -1. + <_>13 7 5 6 2. + <_>8 13 5 6 2. + 0 + 0.0184610001742840 + 0.2632659971714020 + -0.9673650264739990 + <_> + + <_> + + + + <_>6 7 10 12 -1. + <_>6 7 5 6 2. + <_>11 13 5 6 2. + 0 + 0.0106149995699525 + 0.1525190025568008 + -1.0589870214462280 + <_> + + <_> + + + + <_>9 2 9 6 -1. + <_>12 2 3 6 3. + 0 + -0.0459740012884140 + -1.9918340444564819 + 0.1362909972667694 + <_> + + <_> + + + + <_>1 20 21 4 -1. + <_>8 20 7 4 3. + 0 + 0.0829000025987625 + -0.3203719854354858 + 0.6030420064926148 + <_> + + <_> + + + + <_>9 12 9 6 -1. + <_>9 14 9 2 3. + 0 + -8.9130001142621040e-003 + 0.5958660244941711 + -0.2113959938287735 + <_> + + <_> + + + + <_>7 2 9 6 -1. + <_>10 2 3 6 3. + 0 + 0.0428140014410019 + 0.0229250006377697 + -1.4679330587387085 + <_> + + <_> + + + + <_>13 0 4 14 -1. + <_>13 0 2 14 2. + 0 + -8.7139997631311417e-003 + -0.4398950040340424 + 0.2043969929218292 + <_> + + <_> + + + + <_>7 0 4 14 -1. + <_>9 0 2 14 2. + 0 + -4.3390002101659775e-003 + -0.8906679749488831 + 0.1046999990940094 + <_> + + <_> + + + + <_>14 15 9 6 -1. + <_>14 17 9 2 3. + 0 + 8.0749997869133949e-003 + 0.2116419970989227 + -0.4023160040378571 + <_> + + <_> + + + + <_>2 8 18 5 -1. + <_>8 8 6 5 3. + 0 + 0.0967390015721321 + 0.0133199999108911 + -1.6085360050201416 + <_> + + <_> + + + + <_>18 3 6 11 -1. + <_>20 3 2 11 3. + 0 + -0.0305369999259710 + 1.0063740015029907 + -0.1341329962015152 + <_> + + <_> + + + + <_>6 5 11 14 -1. + <_>6 12 11 7 2. + 0 + -0.0608559995889664 + -1.4689979553222656 + 9.4240000471472740e-003 + <_> + + <_> + + + + <_>18 4 6 9 -1. + <_>18 7 6 3 3. + 0 + -0.0381620004773140 + -0.8163639903068543 + 0.2617120146751404 + <_> + + <_> + + + + <_>7 6 9 6 -1. + <_>7 8 9 2 3. + 0 + -9.6960002556443214e-003 + 0.1156169995665550 + -0.7169319987297058 + <_> + + <_> + + + + <_>18 4 6 9 -1. + <_>18 7 6 3 3. + 0 + 0.0489029996097088 + 0.1305049955844879 + -1.6448370218276978 + <_> + + <_> + + + + <_>0 4 6 9 -1. + <_>0 7 6 3 3. + 0 + -0.0416119992733002 + -1.1795840263366699 + 0.0250170007348061 + <_> + + <_> + + + + <_>9 4 9 4 -1. + <_>9 6 9 2 2. + 0 + -0.0201880000531673 + 0.6318820118904114 + -0.1049040034413338 + <_> + + <_> + + + + <_>0 22 19 2 -1. + <_>0 23 19 1 2. + 0 + -9.7900000400841236e-004 + 0.1850779950618744 + -0.5356590151786804 + <_> + + <_> + + + + <_>17 14 6 9 -1. + <_>17 17 6 3 3. + 0 + -0.0336220003664494 + -0.9312760233879089 + 0.2007150053977966 + <_> + + <_> + + + + <_>1 14 6 9 -1. + <_>1 17 6 3 3. + 0 + 0.0194559991359711 + 0.0380290001630783 + -1.0112210512161255 + <_> + + <_> + + + + <_>14 11 4 9 -1. + <_>14 11 2 9 2. + 0 + -3.1800000579096377e-004 + 0.3645769953727722 + -0.2761090099811554 + <_> + + <_> + + + + <_>6 11 4 9 -1. + <_>8 11 2 9 2. + 0 + -3.8899999344721437e-004 + 0.1966589987277985 + -0.5341050028800964 + <_> + + <_> + + + + <_>3 9 18 7 -1. + <_>9 9 6 7 3. + 0 + -0.0934960022568703 + -1.6772350072860718 + 0.2072709947824478 + <_> + + <_> + + + + <_>9 12 6 10 -1. + <_>9 17 6 5 2. + 0 + -0.0778779983520508 + -3.0760629177093506 + -0.0358039997518063 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + 0.0169479995965958 + 0.2144739925861359 + -0.7137629985809326 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + -0.0214590001851320 + -1.1468060016632080 + 0.0158559996634722 + <_> + + <_> + + + + <_>6 17 18 3 -1. + <_>6 18 18 1 3. + 0 + -0.0128659997135401 + 0.8381239771842957 + -0.0659440010786057 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>1 18 18 1 3. + 0 + 7.8220004215836525e-003 + -0.2802680134773254 + 0.7937690019607544 + <_> + + <_> + + + + <_>10 6 11 12 -1. + <_>10 12 11 6 2. + 0 + 0.1029440015554428 + 0.1783230006694794 + -0.6841220259666443 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>5 6 7 3 2. + <_>12 9 7 3 2. + 0 + -0.0374879986047745 + 0.9618999958038330 + -0.2173559963703156 + <_> + + <_> + + + + <_>5 4 15 4 -1. + <_>5 6 15 2 2. + 0 + 0.0255059991031885 + 0.0101039996370673 + 1.2461110353469849 + <_> + + <_> + + + + <_>0 0 22 2 -1. + <_>0 1 22 1 2. + 0 + 6.6700001480057836e-004 + -0.5348820090293884 + 0.1474629938602448 + <_> + + <_> + + + + <_>0 0 24 24 -1. + <_>8 0 8 24 3. + 0 + -0.2886790037155151 + 0.8217279911041260 + -0.0149480002000928 + <_> + + <_> + + + + <_>1 15 18 4 -1. + <_>10 15 9 4 2. + 0 + 0.0912949964404106 + -0.1960539966821671 + 1.0803170204162598 + <_> + + <_> + + + + <_>6 8 12 9 -1. + <_>6 11 12 3 3. + 0 + 0.1205660030245781 + -0.0238489992916584 + 1.1392610073089600 + <_> + + <_> + + + + <_>4 12 7 12 -1. + <_>4 16 7 4 3. + 0 + -0.0737750008702278 + -1.3583840131759644 + -4.2039998807013035e-003 + <_> + + <_> + + + + <_>1 2 22 6 -1. + <_>12 2 11 3 2. + <_>1 5 11 3 2. + 0 + -0.0331280007958412 + -0.6448320150375366 + 0.2414219975471497 + <_> + + <_> + + + + <_>5 20 14 3 -1. + <_>12 20 7 3 2. + 0 + -0.0439370013773441 + 0.8428540229797363 + -0.2062480002641678 + <_> + + <_> + + + + <_>0 0 24 16 -1. + <_>12 0 12 8 2. + <_>0 8 12 8 2. + 0 + 0.1811019927263260 + 0.1921209990978241 + -1.2222139835357666 + <_> + + <_> + + + + <_>3 13 18 4 -1. + <_>3 13 9 2 2. + <_>12 15 9 2 2. + 0 + -0.0118509996682405 + -0.7267739772796631 + 0.0526879988610744 + <_> + + <_> + + + + <_>2 10 22 2 -1. + <_>2 11 22 1 2. + 0 + 4.5920000411570072e-003 + -0.3630520105361939 + 0.2922379970550537 + <_> + + <_> + + + + <_>6 3 11 8 -1. + <_>6 7 11 4 2. + 0 + 7.0620002225041389e-003 + 0.0581160001456738 + -0.6716160178184509 + <_> + + <_> + + + + <_>14 5 6 6 -1. + <_>14 8 6 3 2. + 0 + -0.0237150005996227 + 0.4714210033416748 + 0.0185800008475780 + <_> + + <_> + + + + <_>0 7 24 6 -1. + <_>0 9 24 2 3. + 0 + -0.0671719983220100 + -1.1331889629364014 + 0.0237809997051954 + <_> + + <_> + + + + <_>14 0 10 10 -1. + <_>19 0 5 5 2. + <_>14 5 5 5 2. + 0 + -0.0653100013732910 + 0.9825350046157837 + 0.0283620003610849 + <_> + + <_> + + + + <_>0 0 10 10 -1. + <_>0 0 5 5 2. + <_>5 5 5 5 2. + 0 + 0.0227910000830889 + -0.2821370065212250 + 0.5899339914321899 + <_> + + <_> + + + + <_>0 1 24 4 -1. + <_>12 1 12 2 2. + <_>0 3 12 2 2. + 0 + -0.0190379992127419 + -0.6371150016784668 + 0.2651459872722626 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + -6.8689999170601368e-003 + 0.3748730123043060 + -0.3323209881782532 + <_> + + <_> + + + + <_>5 15 16 6 -1. + <_>13 15 8 3 2. + <_>5 18 8 3 2. + 0 + -0.0401460006833076 + -1.3048729896545410 + 0.1572429984807968 + <_> + + <_> + + + + <_>3 15 16 6 -1. + <_>3 15 8 3 2. + <_>11 18 8 3 2. + 0 + -0.0405309982597828 + -2.0458049774169922 + -0.0269259996712208 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + -0.0122539997100830 + 0.7764940261840820 + -0.0429710000753403 + <_> + + <_> + + + + <_>0 13 21 10 -1. + <_>0 18 21 5 2. + 0 + -0.0272199995815754 + 0.1742440015077591 + -0.4460090100765228 + <_> + + <_> + + + + <_>13 0 6 24 -1. + <_>15 0 2 24 3. + 0 + -0.0883660018444061 + -1.5036419630050659 + 0.1428990066051483 + <_> + + <_> + + + + <_>7 4 6 11 -1. + <_>9 4 2 11 3. + 0 + -7.9159997403621674e-003 + 0.2866669893264771 + -0.3792369961738586 + <_> + + <_> + + + + <_>9 5 9 6 -1. + <_>12 5 3 6 3. + 0 + -0.0419600009918213 + 1.3846950531005859 + 0.0650269985198975 + <_> + + <_> + + + + <_>1 4 2 20 -1. + <_>1 14 2 10 2. + 0 + 0.0456629991531372 + -0.2245229929685593 + 0.7952100038528442 + <_> + + <_> + + + + <_>13 0 6 24 -1. + <_>15 0 2 24 3. + 0 + -0.1409060060977936 + -1.5879319906234741 + 0.1135900020599365 + <_> + + <_> + + + + <_>5 0 6 24 -1. + <_>7 0 2 24 3. + 0 + -0.0592160001397133 + -1.1945960521697998 + -7.1640000678598881e-003 + <_> + + <_> + + + + <_>16 7 6 14 -1. + <_>19 7 3 7 2. + <_>16 14 3 7 2. + 0 + 4.3390002101659775e-003 + -0.1552869975566864 + 0.4066449999809265 + <_> + + <_> + + + + <_>4 7 4 12 -1. + <_>6 7 2 12 2. + 0 + -2.0369999110698700e-003 + 0.2592790126800537 + -0.3836829960346222 + <_> + + <_> + + + + <_>0 5 24 14 -1. + <_>8 5 8 14 3. + 0 + 0.2751649916172028 + -0.0884979963302612 + 0.7678750157356262 + <_> + + <_> + + + + <_>5 13 10 6 -1. + <_>5 15 10 2 3. + 0 + -0.0266019999980927 + 0.7502449750900269 + -0.2262199968099594 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + 0.0409060008823872 + 0.1215860024094582 + -1.4566910266876221 + <_> + + <_> + + + + <_>2 7 6 14 -1. + <_>2 7 3 7 2. + <_>5 14 3 7 2. + 0 + 5.5320002138614655e-003 + -0.3661150038242340 + 0.2596859931945801 + <_> + + <_> + + + + <_>15 2 9 15 -1. + <_>18 2 3 15 3. + 0 + 0.0318790003657341 + -0.0750190019607544 + 0.4848479926586151 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>2 2 2 9 3. + 0 + -0.0414820015430450 + 0.7822039723396301 + -0.2199220061302185 + <_> + + <_> + + + + <_>12 2 10 14 -1. + <_>17 2 5 7 2. + <_>12 9 5 7 2. + 0 + -0.0961309969425201 + -0.8945630192756653 + 0.1468070000410080 + <_> + + <_> + + + + <_>11 6 2 18 -1. + <_>12 6 1 18 2. + 0 + -0.0115689998492599 + 0.8271409869194031 + -0.2027560025453568 + <_> + + <_> + + + + <_>9 5 15 6 -1. + <_>14 5 5 6 3. + 0 + 0.0183129999786615 + 0.0163679998368025 + 0.2730680108070374 + <_> + + <_> + + + + <_>8 6 6 10 -1. + <_>10 6 2 10 3. + 0 + -0.0341660007834435 + 1.1307320594787598 + -0.1881089955568314 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + -0.0244769994169474 + -0.5779129862785339 + 0.1581249982118607 + <_> + + <_> + + + + <_>3 3 9 7 -1. + <_>6 3 3 7 3. + 0 + 0.0489570014178753 + -0.0225649997591972 + -1.6373280286788940 + <_> + + <_> + + + + <_>6 7 14 3 -1. + <_>6 7 7 3 2. + 0 + -0.0207029990851879 + -0.5451210141181946 + 0.2408699989318848 + <_> + + <_> + + + + <_>7 7 8 6 -1. + <_>11 7 4 6 2. + 0 + -0.0230020005255938 + -1.2236540317535400 + -7.3440000414848328e-003 + <_> + + <_> + + + + <_>12 7 7 12 -1. + <_>12 13 7 6 2. + 0 + 0.0645850002765656 + 0.1469559967517853 + -0.4496749937534332 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>10 6 2 9 2. + <_>12 15 2 9 2. + 0 + 0.0126660000532866 + -0.2787390053272247 + 0.4387660026550293 + <_> + + <_> + + + + <_>16 14 6 9 -1. + <_>16 17 6 3 3. + 0 + -0.0120029998943210 + -0.2428909987211227 + 0.2535009980201721 + <_> + + <_> + + + + <_>4 0 6 13 -1. + <_>6 0 2 13 3. + 0 + -0.0264439992606640 + -0.8586480021476746 + 0.0260259993374348 + <_> + + <_> + + + + <_>2 2 21 3 -1. + <_>9 2 7 3 3. + 0 + -0.0255479998886585 + 0.6928790211677551 + -2.1160000469535589e-003 + <_> + + <_> + + + + <_>5 4 5 12 -1. + <_>5 8 5 4 3. + 0 + 0.0391150005161762 + -0.1658910065889359 + 1.5209139585494995 + <_> + + <_> + + + + <_>10 3 4 10 -1. + <_>10 8 4 5 2. + 0 + -6.0330000706017017e-003 + 0.4385690093040466 + -0.2161370068788528 + <_> + + <_> + + + + <_>8 4 5 8 -1. + <_>8 8 5 4 2. + 0 + -0.0339369997382164 + -0.9799839854240418 + 0.0221330001950264 + -3.6478610038757324 + 8 + -1 + <_> + + + <_> + + <_> + + + + <_>6 0 11 9 -1. + <_>6 3 11 3 3. + 0 + 0.0406729988753796 + -0.9047470092773438 + 0.6441059708595276 + <_> + + <_> + + + + <_>6 6 12 5 -1. + <_>10 6 4 5 3. + 0 + 0.0256099998950958 + -0.7921699881553650 + 0.5748999714851379 + <_> + + <_> + + + + <_>0 0 24 5 -1. + <_>8 0 8 5 3. + 0 + 0.1995950043201447 + -0.3009960055351257 + 1.3143850564956665 + <_> + + <_> + + + + <_>1 10 23 6 -1. + <_>1 12 23 2 3. + 0 + 0.0124049996957183 + -0.8988299965858460 + 0.2920579910278320 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>9 21 6 3 3. + 0 + 0.0392079986631870 + -0.4195519983768463 + 0.5346329808235169 + <_> + + <_> + + + + <_>3 6 21 6 -1. + <_>3 8 21 2 3. + 0 + -0.0308439992368221 + 0.4579339921474457 + -0.4462909996509552 + <_> + + <_> + + + + <_>0 5 6 12 -1. + <_>2 5 2 12 3. + 0 + -0.0355230011045933 + 0.9131050109863281 + -0.2737320065498352 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0616500005125999 + -1.4697799682617187 + 0.2036409974098206 + <_> + + <_> + + + + <_>8 7 8 10 -1. + <_>8 12 8 5 2. + 0 + -0.0117399999871850 + -1.0482879877090454 + 0.0678019970655441 + <_> + + <_> + + + + <_>5 7 15 12 -1. + <_>10 7 5 12 3. + 0 + 0.0669339969754219 + 0.2927449941635132 + -0.5228289961814880 + <_> + + <_> + + + + <_>0 17 10 6 -1. + <_>0 19 10 2 3. + 0 + -0.0206310003995895 + -1.2855139970779419 + 0.0445509999990463 + <_> + + <_> + + + + <_>14 18 9 6 -1. + <_>14 20 9 2 3. + 0 + -0.0223570000380278 + -0.8575379848480225 + 0.1843400001525879 + <_> + + <_> + + + + <_>9 6 6 16 -1. + <_>9 14 6 8 2. + 0 + 1.1500000255182385e-003 + 0.1640550047159195 + -0.6912500262260437 + <_> + + <_> + + + + <_>14 18 9 6 -1. + <_>14 20 9 2 3. + 0 + 0.0358729995787144 + 0.1575649976730347 + -0.8426259756088257 + <_> + + <_> + + + + <_>1 18 9 6 -1. + <_>1 20 9 2 3. + 0 + 0.0306599996984005 + 0.0216370001435280 + -1.3634690046310425 + <_> + + <_> + + + + <_>15 9 9 6 -1. + <_>15 11 9 2 3. + 0 + 5.5559999309480190e-003 + -0.1673700064420700 + 0.2588840126991272 + <_> + + <_> + + + + <_>0 9 9 6 -1. + <_>0 11 9 2 3. + 0 + -6.1160000041127205e-003 + -0.9727180004119873 + 0.0661000013351440 + <_> + + <_> + + + + <_>17 3 6 9 -1. + <_>19 3 2 9 3. + 0 + -0.0303169991821051 + 0.9847419857978821 + -0.0164480004459620 + <_> + + <_> + + + + <_>2 17 18 3 -1. + <_>2 18 18 1 3. + 0 + -9.7200004383921623e-003 + 0.4760470092296600 + -0.3251670002937317 + <_> + + <_> + + + + <_>3 15 21 6 -1. + <_>3 17 21 2 3. + 0 + -0.0571269989013672 + -0.9592069983482361 + 0.1993820071220398 + <_> + + <_> + + + + <_>9 17 6 6 -1. + <_>9 20 6 3 2. + 0 + 4.0059997700154781e-003 + -0.5261250138282776 + 0.2242870032787323 + <_> + + <_> + + + + <_>18 3 6 9 -1. + <_>18 6 6 3 3. + 0 + 0.0337340012192726 + 0.1707009971141815 + -1.0737580060958862 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>0 6 6 3 3. + 0 + -0.0346419997513294 + -1.1343129873275757 + 0.0365400016307831 + <_> + + <_> + + + + <_>4 0 16 10 -1. + <_>12 0 8 5 2. + <_>4 5 8 5 2. + 0 + 0.0469230003654957 + 0.2583230137825012 + -0.7153580188751221 + <_> + + <_> + + + + <_>2 0 10 16 -1. + <_>2 0 5 8 2. + <_>7 8 5 8 2. + 0 + -8.7660001590847969e-003 + 0.1964090019464493 + -0.5335509777069092 + <_> + + <_> + + + + <_>14 0 10 5 -1. + <_>14 0 5 5 2. + 0 + 0.0656279996037483 + -0.0511949993669987 + 0.9761070013046265 + <_> + + <_> + + + + <_>0 0 10 5 -1. + <_>5 0 5 5 2. + 0 + -0.0441650003194809 + 1.0631920099258423 + -0.2346259951591492 + <_> + + <_> + + + + <_>18 3 6 10 -1. + <_>18 3 3 10 2. + 0 + 0.0173049997538328 + -0.1858289986848831 + 0.4588989913463593 + <_> + + <_> + + + + <_>5 11 12 6 -1. + <_>5 11 6 3 2. + <_>11 14 6 3 2. + 0 + 0.0331359989941120 + -0.0293819997459650 + -2.6651329994201660 + <_> + + <_> + + + + <_>21 0 3 18 -1. + <_>22 0 1 18 3. + 0 + -0.0210299994796515 + 0.9997990131378174 + 0.0249370001256466 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + 0.0297839995473623 + -0.0296059995889664 + -2.1695868968963623 + <_> + + <_> + + + + <_>8 8 9 7 -1. + <_>11 8 3 7 3. + 0 + 0.0552919991314411 + -7.5599999399855733e-004 + 0.7465199828147888 + <_> + + <_> + + + + <_>7 12 8 10 -1. + <_>7 12 4 5 2. + <_>11 17 4 5 2. + 0 + -0.0335979983210564 + -1.5274159908294678 + 0.0110600003972650 + <_> + + <_> + + + + <_>21 0 3 18 -1. + <_>22 0 1 18 3. + 0 + 0.0196029990911484 + 0.0335749983787537 + 0.9952620267868042 + <_> + + <_> + + + + <_>10 6 4 9 -1. + <_>12 6 2 9 2. + 0 + -0.0207870006561279 + 0.7661290168762207 + -0.2467080056667328 + <_> + + <_> + + + + <_>15 0 9 6 -1. + <_>15 2 9 2 3. + 0 + 0.0325360000133514 + 0.1626340001821518 + -0.6113430261611939 + <_> + + <_> + + + + <_>0 2 24 3 -1. + <_>0 3 24 1 3. + 0 + -0.0107880001887679 + -0.9783970117568970 + 0.0289699994027615 + <_> + + <_> + + + + <_>11 7 6 9 -1. + <_>13 7 2 9 3. + 0 + -9.9560003727674484e-003 + 0.4614579975605011 + -0.1351049989461899 + <_> + + <_> + + + + <_>7 6 6 10 -1. + <_>9 6 2 10 3. + 0 + -3.7489999085664749e-003 + 0.2545819878578186 + -0.5195559859275818 + <_> + + <_> + + + + <_>12 1 6 12 -1. + <_>14 1 2 12 3. + 0 + -0.0417799986898899 + -0.8056510090827942 + 0.1520850062370300 + <_> + + <_> + + + + <_>6 4 12 12 -1. + <_>6 10 12 6 2. + 0 + -0.0342210009694099 + -1.3137799501419067 + -3.5800000187009573e-003 + <_> + + <_> + + + + <_>14 3 2 21 -1. + <_>14 3 1 21 2. + 0 + 0.0101300003007054 + 0.2017579972743988 + -0.6133959889411926 + <_> + + <_> + + + + <_>6 1 12 8 -1. + <_>6 5 12 4 2. + 0 + -0.0898490026593208 + 0.9763280153274536 + -0.2088479995727539 + <_> + + <_> + + + + <_>3 0 18 8 -1. + <_>3 4 18 4 2. + 0 + 0.0260979998856783 + -0.1880799978971481 + 0.4770579934120178 + <_> + + <_> + + + + <_>3 0 18 3 -1. + <_>3 1 18 1 3. + 0 + -3.7539999466389418e-003 + -0.6798040270805359 + 0.1128880009055138 + <_> + + <_> + + + + <_>0 13 24 4 -1. + <_>12 13 12 2 2. + <_>0 15 12 2 2. + 0 + 0.0319730006158352 + 0.1895170062780380 + -1.4967479705810547 + <_> + + <_> + + + + <_>10 5 4 9 -1. + <_>12 5 2 9 2. + 0 + 0.0193329993635416 + -0.2360990047454834 + 0.8132050037384033 + <_> + + <_> + + + + <_>11 1 6 9 -1. + <_>13 1 2 9 3. + 0 + 1.9490000559017062e-003 + 0.2483039945363998 + -0.0692119970917702 + <_> + + <_> + + + + <_>6 2 6 22 -1. + <_>8 2 2 22 3. + 0 + -0.0441469997167587 + -1.0418920516967773 + 0.0480530001223087 + <_> + + <_> + + + + <_>16 10 8 14 -1. + <_>20 10 4 7 2. + <_>16 17 4 7 2. + 0 + -0.0446819998323917 + 0.5134630203247070 + -7.3799998499453068e-003 + <_> + + <_> + + + + <_>3 4 16 15 -1. + <_>3 9 16 5 3. + 0 + -0.1075749993324280 + 1.6202019453048706 + -0.1866759955883026 + <_> + + <_> + + + + <_>16 10 8 14 -1. + <_>20 10 4 7 2. + <_>16 17 4 7 2. + 0 + -0.1284680068492889 + 2.9869480133056641 + 0.0954279974102974 + <_> + + <_> + + + + <_>0 10 8 14 -1. + <_>0 10 4 7 2. + <_>4 17 4 7 2. + 0 + -0.0447579994797707 + 0.6040530204772949 + -0.2705869972705841 + <_> + + <_> + + + + <_>10 14 11 6 -1. + <_>10 17 11 3 2. + 0 + -0.0439909994602203 + -0.6179050207138062 + 0.1599719971418381 + <_> + + <_> + + + + <_>0 7 24 9 -1. + <_>8 7 8 9 3. + 0 + -0.1226899996399880 + 0.6632720232009888 + -0.2363699972629547 + <_> + + <_> + + + + <_>13 1 4 16 -1. + <_>13 1 2 16 2. + 0 + -0.0199829991906881 + -1.1228660345077515 + 0.1961670070886612 + <_> + + <_> + + + + <_>7 1 4 16 -1. + <_>9 1 2 16 2. + 0 + -0.0155279999598861 + -1.0770269632339478 + 0.0206930004060268 + <_> + + <_> + + + + <_>5 5 16 8 -1. + <_>13 5 8 4 2. + <_>5 9 8 4 2. + 0 + -0.0489710010588169 + 0.8116829991340637 + -0.0172520000487566 + <_> + + <_> + + + + <_>0 9 6 9 -1. + <_>0 12 6 3 3. + 0 + 0.0559759996831417 + -0.0225290004163980 + -1.7356760501861572 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + -9.8580000922083855e-003 + 0.6788139939308167 + -0.0581800006330013 + <_> + + <_> + + + + <_>3 12 6 9 -1. + <_>3 15 6 3 3. + 0 + 0.0134810004383326 + 0.0578479990363121 + -0.7725530266761780 + <_> + + <_> + + + + <_>8 14 9 6 -1. + <_>8 16 9 2 3. + 0 + 6.5609999001026154e-003 + -0.1314689964056015 + 0.6705579757690430 + <_> + + <_> + + + + <_>2 13 8 10 -1. + <_>2 13 4 5 2. + <_>6 18 4 5 2. + 0 + 7.1149999275803566e-003 + -0.3788059949874878 + 0.3097899854183197 + <_> + + <_> + + + + <_>15 5 3 18 -1. + <_>15 11 3 6 3. + 0 + 4.8159998841583729e-003 + -0.5847039818763733 + 0.2560209929943085 + <_> + + <_> + + + + <_>3 5 18 3 -1. + <_>3 6 18 1 3. + 0 + 9.5319999381899834e-003 + -0.3021700084209442 + 0.4125329852104187 + <_> + + <_> + + + + <_>17 5 6 11 -1. + <_>19 5 2 11 3. + 0 + -0.0274749994277954 + 0.5915470123291016 + 0.0179639998823404 + <_> + + <_> + + + + <_>1 5 6 11 -1. + <_>3 5 2 11 3. + 0 + -0.0395199991762638 + 0.9691349864006043 + -0.2102030068635941 + <_> + + <_> + + + + <_>19 1 4 9 -1. + <_>19 1 2 9 2. + 0 + -0.0306589994579554 + 0.9115589857101440 + 0.0405500009655952 + <_> + + <_> + + + + <_>1 1 4 9 -1. + <_>3 1 2 9 2. + 0 + -1.4680000022053719e-003 + -0.6048979759216309 + 0.1696089953184128 + <_> + + <_> + + + + <_>4 15 18 9 -1. + <_>4 15 9 9 2. + 0 + 0.1907760053873062 + 0.0435150004923344 + 0.8189290165901184 + <_> + + <_> + + + + <_>6 9 12 4 -1. + <_>6 11 12 2 2. + 0 + 5.1790000870823860e-003 + -0.9361730217933655 + 0.0249370001256466 + <_> + + <_> + + + + <_>15 2 9 6 -1. + <_>15 4 9 2 3. + 0 + 0.0241260007023811 + 0.1817550063133240 + -0.3418590128421783 + <_> + + <_> + + + + <_>0 2 9 6 -1. + <_>0 4 9 2 3. + 0 + -0.0263839997351170 + -1.2912579774856567 + -3.4280000254511833e-003 + <_> + + <_> + + + + <_>15 0 6 17 -1. + <_>17 0 2 17 3. + 0 + 5.4139997810125351e-003 + -0.0462919995188713 + 0.2526960074901581 + <_> + + <_> + + + + <_>3 0 6 17 -1. + <_>5 0 2 17 3. + 0 + 0.0542160011827946 + -0.0128480000421405 + -1.4304540157318115 + <_> + + <_> + + + + <_>8 17 9 4 -1. + <_>8 19 9 2 2. + 0 + 2.3799999326001853e-004 + -0.2667669951915741 + 0.3358829915523529 + <_> + + <_> + + + + <_>6 5 3 18 -1. + <_>6 11 3 6 3. + 0 + 0.0152169996872544 + -0.5136730074882507 + 0.1300510019063950 + <_> + + <_> + + + + <_>5 2 14 12 -1. + <_>5 8 14 6 2. + 0 + 0.0170079991221428 + 0.4157589972019196 + -0.3124119937419891 + <_> + + <_> + + + + <_>10 2 3 12 -1. + <_>10 8 3 6 2. + 0 + 0.0304969996213913 + -0.2482099980115891 + 0.7082849740982056 + <_> + + <_> + + + + <_>10 7 14 15 -1. + <_>10 12 14 5 3. + 0 + 6.5430002287030220e-003 + -0.2263700067996979 + 0.1918459981679916 + <_> + + <_> + + + + <_>0 7 14 15 -1. + <_>0 12 14 5 3. + 0 + 0.1416399925947189 + 0.0652270019054413 + -0.8880950212478638 + <_> + + <_> + + + + <_>15 0 9 6 -1. + <_>15 2 9 2 3. + 0 + 0.0193380005657673 + 0.1889120042324066 + -0.2739770114421845 + <_> + + <_> + + + + <_>0 0 9 6 -1. + <_>0 2 9 2 3. + 0 + -0.0173240005970001 + -0.9486669898033142 + 0.0241969991475344 + <_> + + <_> + + + + <_>12 6 6 14 -1. + <_>14 6 2 14 3. + 0 + -6.2069999985396862e-003 + 0.3693839907646179 + -0.1749490052461624 + <_> + + <_> + + + + <_>9 7 6 9 -1. + <_>11 7 2 9 3. + 0 + -0.0161090008914471 + 0.9615949988365173 + -0.2000530064105988 + <_> + + <_> + + + + <_>12 6 6 15 -1. + <_>14 6 2 15 3. + 0 + -0.1012250036001205 + -3.0699110031127930 + 0.1136379987001419 + <_> + + <_> + + + + <_>6 6 6 15 -1. + <_>8 6 2 15 3. + 0 + -7.5509999878704548e-003 + 0.2292100042104721 + -0.4564509987831116 + <_> + + <_> + + + + <_>15 3 8 9 -1. + <_>15 3 4 9 2. + 0 + 0.0442479997873306 + -3.1599999056197703e-004 + 0.3922530114650726 + <_> + + <_> + + + + <_>0 0 9 21 -1. + <_>3 0 3 21 3. + 0 + -0.1163600012660027 + 0.9523370265960693 + -0.2020159959793091 + <_> + + <_> + + + + <_>11 9 8 12 -1. + <_>11 13 8 4 3. + 0 + 4.7360002063214779e-003 + -0.0991770029067993 + 0.2037049978971481 + <_> + + <_> + + + + <_>6 7 10 12 -1. + <_>6 7 5 6 2. + <_>11 13 5 6 2. + 0 + 0.0224590003490448 + 8.7280003353953362e-003 + -1.0217070579528809 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>12 6 2 9 2. + <_>10 15 2 9 2. + 0 + -0.0121090002357960 + 0.6481260061264038 + -0.0901490002870560 + <_> + + <_> + + + + <_>0 0 6 9 -1. + <_>0 3 6 3 3. + 0 + 0.0561200007796288 + -0.0367599986493587 + -1.9275590181350708 + <_> + + <_> + + + + <_>3 14 18 3 -1. + <_>3 15 18 1 3. + 0 + -8.7379999458789825e-003 + 0.6926130056381226 + -0.0683749988675117 + <_> + + <_> + + + + <_>3 14 8 10 -1. + <_>3 14 4 5 2. + <_>7 19 4 5 2. + 0 + 6.6399998031556606e-003 + -0.4056980013847351 + 0.1862570047378540 + <_> + + <_> + + + + <_>0 12 24 4 -1. + <_>12 12 12 2 2. + <_>0 14 12 2 2. + 0 + -0.0181319992989302 + -0.6451820135116577 + 0.2197639942169190 + <_> + + <_> + + + + <_>0 2 3 20 -1. + <_>1 2 1 20 3. + 0 + -0.0227189995348454 + 0.9777619838714600 + -0.1865430027246475 + <_> + + <_> + + + + <_>12 16 10 8 -1. + <_>17 16 5 4 2. + <_>12 20 5 4 2. + 0 + 0.0127050001174212 + -0.1054660007357597 + 0.3740409910678864 + <_> + + <_> + + + + <_>2 16 10 8 -1. + <_>2 16 5 4 2. + <_>7 20 5 4 2. + 0 + -0.0136829996481538 + 0.6106410026550293 + -0.2688109874725342 + -3.8700489997863770 + 9 + -1 + <_> + + + <_> + + <_> + + + + <_>7 0 10 9 -1. + <_>7 3 10 3 3. + 0 + 0.0313579998910427 + -1.0183910131454468 + 0.5752859711647034 + <_> + + <_> + + + + <_>0 0 24 3 -1. + <_>8 0 8 3 3. + 0 + 0.0930500030517578 + -0.4129750132560730 + 1.0091199874877930 + <_> + + <_> + + + + <_>3 8 15 4 -1. + <_>3 10 15 2 2. + 0 + 0.0259499996900558 + -0.5858790278434753 + 0.5660619735717773 + <_> + + <_> + + + + <_>6 5 12 6 -1. + <_>10 5 4 6 3. + 0 + 0.0164720006287098 + -0.9285749793052673 + 0.3092449903488159 + <_> + + <_> + + + + <_>5 13 14 6 -1. + <_>5 16 14 3 2. + 0 + -1.8779999809339643e-003 + 0.1195100024342537 + -1.1180130243301392 + <_> + + <_> + + + + <_>11 14 4 10 -1. + <_>11 19 4 5 2. + 0 + -9.0129999443888664e-003 + -0.5784950256347656 + 0.3315440118312836 + <_> + + <_> + + + + <_>0 6 6 7 -1. + <_>3 6 3 7 2. + 0 + 0.0225479993969202 + -0.3832510113716126 + 0.5246220231056213 + <_> + + <_> + + + + <_>18 0 6 6 -1. + <_>18 0 3 6 2. + 0 + -0.0377800017595291 + 1.1790670156478882 + -0.0341669991612434 + <_> + + <_> + + + + <_>3 1 18 3 -1. + <_>3 2 18 1 3. + 0 + -5.3799999877810478e-003 + -0.8626589775085449 + 0.1186790019273758 + <_> + + <_> + + + + <_>9 6 14 18 -1. + <_>9 12 14 6 3. + 0 + -0.0238930005580187 + -0.7495059967041016 + 0.2101140022277832 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0265219993889332 + 0.9212859869003296 + -0.2825280129909515 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + 0.0122800003737211 + 0.2666279971599579 + -0.7001360058784485 + <_> + + <_> + + + + <_>0 20 24 3 -1. + <_>8 20 8 3 3. + 0 + 0.0965949967503548 + -0.2845399975776672 + 0.7316899895668030 + <_> + + <_> + + + + <_>13 11 6 7 -1. + <_>13 11 3 7 2. + 0 + -0.0274149999022484 + -0.6149269938468933 + 0.1557620018720627 + <_> + + <_> + + + + <_>4 12 10 6 -1. + <_>4 14 10 2 3. + 0 + -0.0157670006155968 + 0.5755119919776917 + -0.3436219990253449 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + -2.1100000012665987e-003 + 0.3259969949722290 + -0.1300829946994782 + <_> + + <_> + + + + <_>5 11 6 7 -1. + <_>8 11 3 7 2. + 0 + 0.0120069999247789 + 0.0893229991197586 + -0.9602559804916382 + <_> + + <_> + + + + <_>7 4 11 12 -1. + <_>7 8 11 4 3. + 0 + -0.0154219996184111 + 0.3444949984550476 + -0.4671199917793274 + <_> + + <_> + + + + <_>6 15 10 4 -1. + <_>6 17 10 2 2. + 0 + -4.1579999960958958e-003 + 0.2369630038738251 + -0.5256329774856567 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>16 0 2 9 3. + 0 + -0.0211859997361898 + -0.7426769733428955 + 0.2170200049877167 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + -0.0170770008116961 + -0.9047179818153381 + 0.0660120025277138 + <_> + + <_> + + + + <_>11 2 4 15 -1. + <_>11 7 4 5 3. + 0 + -0.0408499985933304 + -0.3444660007953644 + 0.2150370031595230 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -8.1930002197623253e-003 + -0.9338859915733337 + 0.0504710003733635 + <_> + + <_> + + + + <_>13 18 10 6 -1. + <_>13 20 10 2 3. + 0 + -0.0192380007356405 + -0.5320370197296143 + 0.1724060028791428 + <_> + + <_> + + + + <_>2 7 6 11 -1. + <_>5 7 3 11 2. + 0 + -0.0441920012235641 + 0.9207500219345093 + -0.2214850038290024 + <_> + + <_> + + + + <_>10 14 10 9 -1. + <_>10 17 10 3 3. + 0 + -0.0623920001089573 + -0.7105380296707153 + 0.1832389980554581 + <_> + + <_> + + + + <_>8 2 4 9 -1. + <_>10 2 2 9 2. + 0 + -1.0079999919980764e-003 + -0.8706309795379639 + 0.0553300008177757 + <_> + + <_> + + + + <_>14 3 10 4 -1. + <_>14 3 5 4 2. + 0 + 0.0238700006157160 + -0.2285420000553131 + 0.5241559743881226 + <_> + + <_> + + + + <_>6 6 12 6 -1. + <_>6 6 6 3 2. + <_>12 9 6 3 2. + 0 + 0.0213910005986691 + -0.3032589852809906 + 0.5586060285568237 + <_> + + <_> + + + + <_>8 8 8 10 -1. + <_>12 8 4 5 2. + <_>8 13 4 5 2. + 0 + 0.0202549993991852 + 0.2690150141716003 + -0.7026180028915405 + <_> + + <_> + + + + <_>7 4 4 16 -1. + <_>7 12 4 8 2. + 0 + -0.0287720002233982 + -1.1835030317306519 + 0.0465120002627373 + <_> + + <_> + + + + <_>8 8 9 4 -1. + <_>8 10 9 2 2. + 0 + 3.4199999645352364e-003 + -0.5465210080146790 + 0.2596249878406525 + <_> + + <_> + + + + <_>5 2 14 9 -1. + <_>5 5 14 3 3. + 0 + 0.0569830015301704 + -0.2698290050029755 + 0.5817070007324219 + <_> + + <_> + + + + <_>3 16 19 8 -1. + <_>3 20 19 4 2. + 0 + -0.0938920006155968 + -0.9104639887809753 + 0.1967770010232925 + <_> + + <_> + + + + <_>0 0 10 8 -1. + <_>5 0 5 8 2. + 0 + 0.0176999997347593 + -0.4400329887866974 + 0.2134950011968613 + <_> + + <_> + + + + <_>5 2 16 18 -1. + <_>5 2 8 18 2. + 0 + 0.2284419983625412 + 0.0236050002276897 + 0.7717159986495972 + <_> + + <_> + + + + <_>0 11 24 11 -1. + <_>8 11 8 11 3. + 0 + -0.1828750073909760 + 0.7922859787940979 + -0.2464479953050613 + <_> + + <_> + + + + <_>3 3 18 5 -1. + <_>3 3 9 5 2. + 0 + -0.0698919966816902 + 0.8026779890060425 + -0.0360720008611679 + <_> + + <_> + + + + <_>1 16 18 3 -1. + <_>1 17 18 1 3. + 0 + 0.0152970002964139 + -0.2007230073213577 + 1.1030600070953369 + <_> + + <_> + + + + <_>5 17 18 3 -1. + <_>5 18 18 1 3. + 0 + 6.7500001750886440e-003 + -0.0459679998457432 + 0.7209450006484985 + <_> + + <_> + + + + <_>1 13 9 6 -1. + <_>1 15 9 2 3. + 0 + -0.0159830003976822 + -0.9035720229148865 + 0.0449879989027977 + <_> + + <_> + + + + <_>1 9 23 10 -1. + <_>1 14 23 5 2. + 0 + 0.0130880000069737 + 0.3529709875583649 + -0.3771060109138489 + <_> + + <_> + + + + <_>3 7 18 3 -1. + <_>3 8 18 1 3. + 0 + 0.0130610000342131 + -0.1958359926939011 + 1.1198940277099609 + <_> + + <_> + + + + <_>6 8 12 3 -1. + <_>6 8 6 3 2. + 0 + -0.0399070009589195 + -1.3998429775238037 + 0.1914509981870651 + <_> + + <_> + + + + <_>6 2 3 22 -1. + <_>7 2 1 22 3. + 0 + 0.0150269996374846 + 2.3600000422447920e-003 + -1.1611249446868896 + <_> + + <_> + + + + <_>14 17 10 6 -1. + <_>14 19 10 2 3. + 0 + -0.0205179993063211 + -0.4890809953212738 + 0.1674340069293976 + <_> + + <_> + + + + <_>1 18 10 6 -1. + <_>1 20 10 2 3. + 0 + -0.0223590005189180 + -1.2202980518341064 + -0.0119759999215603 + <_> + + <_> + + + + <_>11 3 6 12 -1. + <_>13 3 2 12 3. + 0 + -7.9150004312396049e-003 + 0.3722809851169586 + -0.0850630030035973 + <_> + + <_> + + + + <_>10 6 4 9 -1. + <_>12 6 2 9 2. + 0 + 0.0152580002322793 + -0.2941260039806366 + 0.5940639972686768 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + -0.0316659994423389 + -1.4395569562911987 + 0.1357879936695099 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0307739991694689 + -2.2545371055603027 + -0.0339710004627705 + <_> + + <_> + + + + <_>12 10 9 6 -1. + <_>15 10 3 6 3. + 0 + -0.0154830003157258 + 0.3770070075988770 + 0.0158479996025562 + <_> + + <_> + + + + <_>2 11 6 9 -1. + <_>5 11 3 9 2. + 0 + 0.0351670011878014 + -0.2944610118865967 + 0.5315909981727600 + <_> + + <_> + + + + <_>14 5 3 19 -1. + <_>15 5 1 19 3. + 0 + -0.0179060008376837 + -0.9978820085525513 + 0.1623599976301193 + <_> + + <_> + + + + <_>6 6 9 6 -1. + <_>6 8 9 2 3. + 0 + -3.1799999997019768e-003 + 0.0476570017635822 + -0.7524989843368530 + <_> + + <_> + + + + <_>14 5 3 19 -1. + <_>15 5 1 19 3. + 0 + 0.0157200004905462 + 0.1487379968166351 + -0.6537539958953857 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>0 6 6 3 3. + 0 + 0.0298640001565218 + -0.0149520002305508 + -1.2275190353393555 + <_> + + <_> + + + + <_>5 21 18 3 -1. + <_>5 22 18 1 3. + 0 + 2.9899999499320984e-003 + -0.1426369994878769 + 0.4327279925346375 + <_> + + <_> + + + + <_>1 10 18 4 -1. + <_>7 10 6 4 3. + 0 + 0.0847499966621399 + -0.0192809998989105 + -1.1946409940719604 + <_> + + <_> + + + + <_>13 4 8 10 -1. + <_>17 4 4 5 2. + <_>13 9 4 5 2. + 0 + -0.0587249994277954 + -1.7328219413757324 + 0.1437470018863678 + <_> + + <_> + + + + <_>7 8 9 6 -1. + <_>10 8 3 6 3. + 0 + 0.0447559989988804 + -0.2414059937000275 + 0.5401999950408936 + <_> + + <_> + + + + <_>12 9 9 8 -1. + <_>15 9 3 8 3. + 0 + 0.0403690002858639 + 5.7680001482367516e-003 + 0.5657809972763062 + <_> + + <_> + + + + <_>0 6 5 12 -1. + <_>0 10 5 4 3. + 0 + 0.0377359986305237 + 0.0381809994578362 + -0.7937039732933044 + <_> + + <_> + + + + <_>7 6 14 6 -1. + <_>14 6 7 3 2. + <_>7 9 7 3 2. + 0 + 0.0607529990375042 + 0.0764530003070831 + 1.4813209772109985 + <_> + + <_> + + + + <_>7 5 3 19 -1. + <_>8 5 1 19 3. + 0 + -0.0198320001363754 + -1.6971720457077026 + -0.0273700002580881 + <_> + + <_> + + + + <_>8 4 15 20 -1. + <_>13 4 5 20 3. + 0 + -0.1659269928932190 + 0.6297600269317627 + 0.0317629985511303 + <_> + + <_> + + + + <_>1 4 15 20 -1. + <_>6 4 5 20 3. + 0 + 0.0690149962902069 + -0.3346320092678070 + 0.3007670044898987 + <_> + + <_> + + + + <_>13 10 6 6 -1. + <_>13 10 3 6 2. + 0 + 0.0113580003380775 + 0.2274149954319000 + -0.3822470009326935 + <_> + + <_> + + + + <_>5 10 6 6 -1. + <_>8 10 3 6 2. + 0 + 1.7000000225380063e-003 + 0.1922380030155182 + -0.5273510217666626 + <_> + + <_> + + + + <_>14 2 6 14 -1. + <_>17 2 3 7 2. + <_>14 9 3 7 2. + 0 + 0.0797690004110336 + 0.0914919972419739 + 2.1049048900604248 + <_> + + <_> + + + + <_>4 2 6 14 -1. + <_>4 2 3 7 2. + <_>7 9 3 7 2. + 0 + -0.0571440011262894 + -1.7452130317687988 + -0.0409100018441677 + <_> + + <_> + + + + <_>12 4 6 7 -1. + <_>12 4 3 7 2. + 0 + 7.3830001056194305e-003 + -0.2421479970216751 + 0.3557780086994171 + <_> + + <_> + + + + <_>9 4 6 9 -1. + <_>11 4 2 9 3. + 0 + -0.0180409997701645 + 1.1779999732971191 + -0.1767670065164566 + <_> + + <_> + + + + <_>11 4 8 10 -1. + <_>11 4 4 10 2. + 0 + 0.0945030003786087 + 0.1393609941005707 + -1.2993700504302979 + <_> + + <_> + + + + <_>5 4 8 10 -1. + <_>9 4 4 10 2. + 0 + 5.4210000671446323e-003 + -0.5460860133171082 + 0.1391640007495880 + <_> + + <_> + + + + <_>8 18 10 6 -1. + <_>8 20 10 2 3. + 0 + 7.0290002040565014e-003 + -0.2159720063209534 + 0.3925809860229492 + <_> + + <_> + + + + <_>1 18 21 6 -1. + <_>1 20 21 2 3. + 0 + 0.0345159992575645 + 0.0631889998912811 + -0.7210810184478760 + <_> + + <_> + + + + <_>9 2 12 6 -1. + <_>9 2 6 6 2. + 0 + -0.0519249998033047 + 0.6866760253906250 + 0.0632729977369308 + <_> + + <_> + + + + <_>3 2 12 6 -1. + <_>9 2 6 6 2. + 0 + -0.0691620036959648 + 1.7411810159683228 + -0.1661929935216904 + <_> + + <_> + + + + <_>12 5 12 6 -1. + <_>18 5 6 3 2. + <_>12 8 6 3 2. + 0 + -5.5229999125003815e-003 + 0.3069469928741455 + -0.1666290014982224 + <_> + + <_> + + + + <_>8 8 6 9 -1. + <_>8 11 6 3 3. + 0 + 0.0685999989509583 + -0.2140540033578873 + 0.7318500280380249 + <_> + + <_> + + + + <_>2 7 20 6 -1. + <_>2 9 20 2 3. + 0 + -0.0670389980077744 + -0.7936059832572937 + 0.2052579969167709 + <_> + + <_> + + + + <_>0 5 12 6 -1. + <_>0 5 6 3 2. + <_>6 8 6 3 2. + 0 + -0.0210050009191036 + 0.3734439909458160 + -0.2961860001087189 + <_> + + <_> + + + + <_>14 14 8 10 -1. + <_>18 14 4 5 2. + <_>14 19 4 5 2. + 0 + 0.0202789995819330 + -0.0152000002563000 + 0.4055530130863190 + <_> + + <_> + + + + <_>2 14 8 10 -1. + <_>2 14 4 5 2. + <_>6 19 4 5 2. + 0 + -0.0471079982817173 + 1.2116849422454834 + -0.1746429949998856 + <_> + + <_> + + + + <_>2 11 20 13 -1. + <_>2 11 10 13 2. + 0 + 0.1876849979162216 + -0.0229090005159378 + 0.6964579820632935 + <_> + + <_> + + + + <_>6 9 12 5 -1. + <_>12 9 6 5 2. + 0 + -0.0432289987802505 + -1.0602480173110962 + -5.5599998449906707e-004 + <_> + + <_> + + + + <_>5 6 16 6 -1. + <_>13 6 8 3 2. + <_>5 9 8 3 2. + 0 + 0.0200040005147457 + -0.0327510014176369 + 0.5380510091781616 + <_> + + <_> + + + + <_>1 19 9 4 -1. + <_>1 21 9 2 2. + 0 + 8.0880001187324524e-003 + 0.0375480018556118 + -0.7476890087127686 + <_> + + <_> + + + + <_>7 5 12 5 -1. + <_>11 5 4 5 3. + 0 + 0.0271010007709265 + -0.0817900002002716 + 0.3338710069656372 + <_> + + <_> + + + + <_>3 5 14 12 -1. + <_>3 5 7 6 2. + <_>10 11 7 6 2. + 0 + -0.0917460024356842 + -1.9213509559631348 + -0.0389529988169670 + <_> + + <_> + + + + <_>9 4 9 6 -1. + <_>12 4 3 6 3. + 0 + -0.0124549996107817 + 0.4836060106754303 + 0.0181680005043745 + <_> + + <_> + + + + <_>2 6 19 3 -1. + <_>2 7 19 1 3. + 0 + 0.0146490000188351 + -0.1990669965744019 + 0.7281540036201477 + <_> + + <_> + + + + <_>18 10 6 9 -1. + <_>18 13 6 3 3. + 0 + 0.0291019994765520 + 0.1987109929323196 + -0.4921680092811585 + <_> + + <_> + + + + <_>3 7 18 2 -1. + <_>3 8 18 1 2. + 0 + 8.7799998000264168e-003 + -0.1949959993362427 + 0.7731739878654480 + <_> + + <_> + + + + <_>20 2 4 18 -1. + <_>22 2 2 9 2. + <_>20 11 2 9 2. + 0 + -0.0547400005161762 + 1.8087190389633179 + 0.0683230012655258 + <_> + + <_> + + + + <_>2 18 20 3 -1. + <_>2 19 20 1 3. + 0 + -0.0147980004549026 + 0.7806490063667297 + -0.1870959997177124 + <_> + + <_> + + + + <_>1 9 22 3 -1. + <_>1 10 22 1 3. + 0 + 0.0250129997730255 + 0.1528529971837997 + -1.6021020412445068 + <_> + + <_> + + + + <_>0 2 4 18 -1. + <_>0 2 2 9 2. + <_>2 11 2 9 2. + 0 + 0.0465480014681816 + -0.1673820018768311 + 1.1902060508728027 + <_> + + <_> + + + + <_>19 0 4 23 -1. + <_>19 0 2 23 2. + 0 + 0.0176240000873804 + -0.1028549969196320 + 0.3917590081691742 + <_> + + <_> + + + + <_>0 3 6 19 -1. + <_>3 3 3 19 2. + 0 + 0.1631959974765778 + -0.0356240011751652 + -1.6098170280456543 + <_> + + <_> + + + + <_>18 2 6 9 -1. + <_>20 2 2 9 3. + 0 + 0.0131379999220371 + -0.0563590005040169 + 0.5415890216827393 + <_> + + <_> + + + + <_>0 5 10 6 -1. + <_>0 7 10 2 3. + 0 + -0.0156650003045797 + 0.2806310057640076 + -0.3170860111713409 + <_> + + <_> + + + + <_>7 0 12 12 -1. + <_>13 0 6 6 2. + <_>7 6 6 6 2. + 0 + 0.0805540010333061 + 0.1264040023088455 + -1.0297529697418213 + <_> + + <_> + + + + <_>0 3 24 6 -1. + <_>0 3 12 3 2. + <_>12 6 12 3 2. + 0 + 0.0353639982640743 + 0.0207529999315739 + -0.7910559773445129 + <_> + + <_> + + + + <_>10 14 4 10 -1. + <_>10 19 4 5 2. + 0 + 0.0329869985580444 + 0.1905709952116013 + -0.8383989930152893 + <_> + + <_> + + + + <_>8 9 4 15 -1. + <_>8 14 4 5 3. + 0 + 0.0121950004249811 + 0.0737290009856224 + -0.6278070211410523 + <_> + + <_> + + + + <_>4 11 17 6 -1. + <_>4 14 17 3 2. + 0 + 0.0430659987032413 + 0.0473849996924400 + 1.5712939500808716 + <_> + + <_> + + + + <_>2 5 18 8 -1. + <_>2 5 9 4 2. + <_>11 9 9 4 2. + 0 + 0.0303269997239113 + -0.2731460034847260 + 0.3857200145721436 + <_> + + <_> + + + + <_>7 6 14 6 -1. + <_>14 6 7 3 2. + <_>7 9 7 3 2. + 0 + 0.0354930013418198 + 0.0545939989387989 + 0.5258340239524841 + <_> + + <_> + + + + <_>3 6 14 6 -1. + <_>3 6 7 3 2. + <_>10 9 7 3 2. + 0 + -0.0145969996228814 + 0.3815259933471680 + -0.2833240032196045 + <_> + + <_> + + + + <_>16 5 3 18 -1. + <_>17 5 1 18 3. + 0 + 0.0126069998368621 + 0.1545509994029999 + -0.3050149977207184 + <_> + + <_> + + + + <_>5 5 3 18 -1. + <_>6 5 1 18 3. + 0 + 0.0101720001548529 + 0.0236370004713535 + -0.8721789717674255 + <_> + + <_> + + + + <_>10 10 14 4 -1. + <_>10 12 14 2 2. + 0 + 0.0288430005311966 + 0.1609099954366684 + -0.2027759999036789 + <_> + + <_> + + + + <_>4 10 9 4 -1. + <_>4 12 9 2 2. + 0 + 5.5100000463426113e-004 + -0.6154540181159973 + 0.0809359997510910 + -3.7160909175872803 + 10 + -1 + <_> + + + <_> + + <_> + + + + <_>2 0 18 9 -1. + <_>2 3 18 3 3. + 0 + 0.0483440011739731 + -0.8490459918975830 + 0.5697439908981323 + <_> + + <_> + + + + <_>6 3 12 8 -1. + <_>10 3 4 8 3. + 0 + 0.0324600003659725 + -0.8141729831695557 + 0.4478169977664948 + <_> + + <_> + + + + <_>1 1 8 5 -1. + <_>5 1 4 5 2. + 0 + 0.0333399996161461 + -0.3642379939556122 + 0.6793739795684815 + <_> + + <_> + + + + <_>12 7 7 8 -1. + <_>12 11 7 4 2. + 0 + 6.4019998535513878e-003 + -1.1885459423065186 + 0.1923869997262955 + <_> + + <_> + + + + <_>0 12 22 4 -1. + <_>0 14 22 2 2. + 0 + -5.6889997795224190e-003 + 0.3308529853820801 + -0.7133409976959229 + <_> + + <_> + + + + <_>15 6 4 15 -1. + <_>15 11 4 5 3. + 0 + 0.0126980002969503 + -0.5099080204963684 + 0.1137629970908165 + <_> + + <_> + + + + <_>5 7 7 8 -1. + <_>5 11 7 4 2. + 0 + 6.0549997724592686e-003 + -1.0470550060272217 + 0.2022259980440140 + <_> + + <_> + + + + <_>8 18 9 4 -1. + <_>8 20 9 2 2. + 0 + 2.6420000940561295e-003 + -0.5055940151214600 + 0.3644120097160339 + <_> + + <_> + + + + <_>1 2 22 4 -1. + <_>1 4 22 2 2. + 0 + -0.0169259998947382 + -0.9954190254211426 + 0.1260219961404800 + <_> + + <_> + + + + <_>17 3 6 17 -1. + <_>19 3 2 17 3. + 0 + 0.0282359998673201 + -0.0941379964351654 + 0.5778040289878845 + <_> + + <_> + + + + <_>8 2 8 18 -1. + <_>8 11 8 9 2. + 0 + 0.0104289995506406 + 0.2327290028333664 + -0.5256969928741455 + <_> + + <_> + + + + <_>17 0 6 12 -1. + <_>20 0 3 6 2. + <_>17 6 3 6 2. + 0 + 9.8860003054141998e-003 + -0.1031629964709282 + 0.4765760004520416 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + 0.0260150004178286 + -1.0920000495389104e-003 + -1.5581729412078857 + <_> + + <_> + + + + <_>15 5 9 12 -1. + <_>15 11 9 6 2. + 0 + -0.0255379993468523 + -0.6545140147209168 + 0.1884319931268692 + <_> + + <_> + + + + <_>2 22 18 2 -1. + <_>2 23 18 1 2. + 0 + -3.5310001112520695e-003 + 0.2814059853553772 + -0.4457530081272125 + <_> + + <_> + + + + <_>10 10 12 6 -1. + <_>16 10 6 3 2. + <_>10 13 6 3 2. + 0 + 9.2449998483061790e-003 + 0.1561200022697449 + -0.2137099951505661 + <_> + + <_> + + + + <_>0 1 4 11 -1. + <_>2 1 2 11 2. + 0 + 0.0210309997200966 + -0.2917029857635498 + 0.5223410129547119 + <_> + + <_> + + + + <_>20 0 4 10 -1. + <_>20 0 2 10 2. + 0 + -0.0510630011558533 + 1.3661290407180786 + 0.0304659996181726 + <_> + + <_> + + + + <_>1 3 6 17 -1. + <_>3 3 2 17 3. + 0 + -0.0623300001025200 + 1.2207020521163940 + -0.2243440002202988 + <_> + + <_> + + + + <_>15 15 9 6 -1. + <_>15 17 9 2 3. + 0 + -0.0329630002379417 + -0.8201680183410645 + 0.1453189998865128 + <_> + + <_> + + + + <_>0 13 8 9 -1. + <_>0 16 8 3 3. + 0 + -0.0374180004000664 + -1.2218099832534790 + 0.0194489993155003 + <_> + + <_> + + + + <_>16 8 6 12 -1. + <_>16 12 6 4 3. + 0 + 0.1240279972553253 + 0.1208230033516884 + -0.9872930049896240 + <_> + + <_> + + + + <_>2 8 6 12 -1. + <_>2 12 6 4 3. + 0 + -8.9229997247457504e-003 + -1.1688489913940430 + 0.0211050007492304 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0598799996078014 + -1.0689330101013184 + 0.1986020058393478 + <_> + + <_> + + + + <_>1 5 19 3 -1. + <_>1 6 19 1 3. + 0 + 6.2620001845061779e-003 + -0.3622959852218628 + 0.3800080120563507 + <_> + + <_> + + + + <_>11 8 9 7 -1. + <_>14 8 3 7 3. + 0 + -0.0176730006933212 + 0.4909409880638123 + -0.1460669934749603 + <_> + + <_> + + + + <_>3 8 12 9 -1. + <_>3 11 12 3 3. + 0 + 0.0175790004432201 + 0.5872809886932373 + -0.2777439951896668 + <_> + + <_> + + + + <_>3 6 18 3 -1. + <_>3 7 18 1 3. + 0 + 5.1560001447796822e-003 + -0.0751949995756149 + 0.6019309759140015 + <_> + + <_> + + + + <_>10 0 4 12 -1. + <_>10 6 4 6 2. + 0 + -0.0105999996885657 + 0.2763740122318268 + -0.3779430091381073 + <_> + + <_> + + + + <_>3 9 18 14 -1. + <_>3 9 9 14 2. + 0 + 0.2088409960269928 + -5.3599998354911804e-003 + 1.0317809581756592 + <_> + + <_> + + + + <_>0 0 4 9 -1. + <_>2 0 2 9 2. + 0 + -0.0264129992574453 + 0.8233640193939209 + -0.2248059958219528 + <_> + + <_> + + + + <_>12 5 4 18 -1. + <_>12 5 2 18 2. + 0 + 0.0588920004665852 + 0.1309829950332642 + -1.1853699684143066 + <_> + + <_> + + + + <_>8 5 4 18 -1. + <_>10 5 2 18 2. + 0 + -0.0115790003910661 + -0.9066780209541321 + 0.0441269986331463 + <_> + + <_> + + + + <_>10 5 6 10 -1. + <_>12 5 2 10 3. + 0 + 0.0459880009293556 + 0.0101439999416471 + 1.0740900039672852 + <_> + + <_> + + + + <_>9 4 4 11 -1. + <_>11 4 2 11 2. + 0 + -0.0228380002081394 + 1.7791990041732788 + -0.1731549948453903 + <_> + + <_> + + + + <_>4 16 18 3 -1. + <_>4 17 18 1 3. + 0 + -8.1709995865821838e-003 + 0.5738630294799805 + -0.0741060003638268 + <_> + + <_> + + + + <_>0 16 20 3 -1. + <_>0 17 20 1 3. + 0 + 3.5359999164938927e-003 + -0.3207289874553680 + 0.4018250107765198 + <_> + + <_> + + + + <_>9 9 6 12 -1. + <_>9 13 6 4 3. + 0 + 0.0494449995458126 + 0.1928800046443939 + -1.2166700363159180 + <_> + + <_> + + + + <_>8 13 8 8 -1. + <_>8 17 8 4 2. + 0 + 3.5139999818056822e-003 + 0.0695680007338524 + -0.7132369875907898 + <_> + + <_> + + + + <_>13 10 3 12 -1. + <_>13 16 3 6 2. + 0 + -0.0309960003942251 + -0.3886219859123230 + 0.1809879988431931 + <_> + + <_> + + + + <_>5 9 14 14 -1. + <_>5 9 7 7 2. + <_>12 16 7 7 2. + 0 + 0.0864529982209206 + -0.0257929991930723 + -1.5453219413757324 + <_> + + <_> + + + + <_>0 0 24 10 -1. + <_>12 0 12 5 2. + <_>0 5 12 5 2. + 0 + -0.1365260034799576 + -1.9199420213699341 + 0.1661330014467239 + <_> + + <_> + + + + <_>1 11 18 2 -1. + <_>1 12 18 1 2. + 0 + -5.7689999230206013e-003 + -1.2822589874267578 + -0.0159079991281033 + <_> + + <_> + + + + <_>19 5 5 12 -1. + <_>19 9 5 4 3. + 0 + -0.0178999993950129 + -0.4040989875793457 + 0.2359160035848618 + <_> + + <_> + + + + <_>0 5 5 12 -1. + <_>0 9 5 4 3. + 0 + -0.0199699997901917 + -0.7289190292358398 + 0.0562350004911423 + <_> + + <_> + + + + <_>16 6 8 18 -1. + <_>20 6 4 9 2. + <_>16 15 4 9 2. + 0 + -0.0574930012226105 + 0.5783079862594605 + -0.0157960001379251 + <_> + + <_> + + + + <_>0 6 8 18 -1. + <_>0 6 4 9 2. + <_>4 15 4 9 2. + 0 + -0.0830560028553009 + 0.9151160120964050 + -0.2112140059471130 + <_> + + <_> + + + + <_>12 5 12 12 -1. + <_>18 5 6 6 2. + <_>12 11 6 6 2. + 0 + -0.0537710003554821 + -0.5193129777908325 + 0.1857600063085556 + <_> + + <_> + + + + <_>7 6 6 9 -1. + <_>9 6 2 9 3. + 0 + -8.3670001477003098e-003 + 0.2410970032215118 + -0.3964860141277313 + <_> + + <_> + + + + <_>9 13 6 11 -1. + <_>11 13 2 11 3. + 0 + 0.0554069988429546 + 0.1677120029926300 + -2.5664970874786377 + <_> + + <_> + + + + <_>0 5 12 12 -1. + <_>0 5 6 6 2. + <_>6 11 6 6 2. + 0 + -0.0671809986233711 + -1.3658570051193237 + -0.0142320003360510 + <_> + + <_> + + + + <_>1 2 23 3 -1. + <_>1 3 23 1 3. + 0 + -0.0239000003784895 + -1.7084569931030273 + 0.1650779992341995 + <_> + + <_> + + + + <_>1 15 19 3 -1. + <_>1 16 19 1 3. + 0 + 5.5949999950826168e-003 + -0.3137399852275848 + 0.3283790051937103 + <_> + + <_> + + + + <_>13 17 11 4 -1. + <_>13 19 11 2 2. + 0 + 0.0212949998676777 + 0.1495340019464493 + -0.4857980012893677 + <_> + + <_> + + + + <_>0 13 8 5 -1. + <_>4 13 4 5 2. + 0 + -0.0246130004525185 + 0.7434639930725098 + -0.2230519950389862 + <_> + + <_> + + + + <_>12 10 10 4 -1. + <_>12 10 5 4 2. + 0 + -0.0196260008960962 + -0.4091829955577850 + 0.1889320015907288 + <_> + + <_> + + + + <_>4 6 9 9 -1. + <_>4 9 9 3 3. + 0 + -0.0532660000026226 + 0.8138160109519959 + -0.2085369974374771 + <_> + + <_> + + + + <_>15 14 9 6 -1. + <_>15 16 9 2 3. + 0 + 7.1290000341832638e-003 + 0.3299610018730164 + -0.5993739962577820 + <_> + + <_> + + + + <_>1 12 9 6 -1. + <_>1 14 9 2 3. + 0 + -0.0224869996309280 + -1.2551610469818115 + -0.0204130001366138 + <_> + + <_> + + + + <_>3 10 20 8 -1. + <_>13 10 10 4 2. + <_>3 14 10 4 2. + 0 + -0.0823109969496727 + 1.3821430206298828 + 0.0593089982867241 + <_> + + <_> + + + + <_>2 0 9 18 -1. + <_>5 0 3 18 3. + 0 + 0.1309700012207031 + -0.0358439981937408 + -1.5396369695663452 + <_> + + <_> + + + + <_>13 11 9 10 -1. + <_>16 11 3 10 3. + 0 + 0.0142930001020432 + -0.1847520023584366 + 0.3745500147342682 + <_> + + <_> + + + + <_>1 2 8 5 -1. + <_>5 2 4 5 2. + 0 + 6.3479999080300331e-003 + -0.4490109980106354 + 0.1387699991464615 + <_> + + <_> + + + + <_>3 4 21 6 -1. + <_>10 4 7 6 3. + 0 + -0.0460550002753735 + 0.6783260107040405 + -0.0170719996094704 + <_> + + <_> + + + + <_>7 0 10 14 -1. + <_>7 0 5 7 2. + <_>12 7 5 7 2. + 0 + 0.0576939992606640 + -0.0119559997692704 + -1.2261159420013428 + <_> + + <_> + + + + <_>12 17 12 4 -1. + <_>12 19 12 2 2. + 0 + -6.0609998181462288e-003 + 0.3395859897136688 + 6.2800000887364149e-004 + <_> + + <_> + + + + <_>0 6 23 4 -1. + <_>0 8 23 2 2. + 0 + -0.0521630011498928 + -1.0621069669723511 + -0.0137799996882677 + <_> + + <_> + + + + <_>13 10 8 10 -1. + <_>17 10 4 5 2. + <_>13 15 4 5 2. + 0 + 0.0465729981660843 + 0.1453880071640015 + -1.2384550571441650 + <_> + + <_> + + + + <_>0 16 18 3 -1. + <_>0 17 18 1 3. + 0 + 7.5309998355805874e-003 + -0.2446770071983337 + 0.5137709975242615 + <_> + + <_> + + + + <_>15 16 9 4 -1. + <_>15 18 9 2 2. + 0 + 0.0216150004416704 + 0.1307259947061539 + -0.7099679708480835 + <_> + + <_> + + + + <_>0 16 9 4 -1. + <_>0 18 9 2 2. + 0 + -0.0178640000522137 + -1.0474660396575928 + 4.9599999329075217e-004 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + -0.0371950007975101 + -1.5126730203628540 + 0.1480139940977097 + <_> + + <_> + + + + <_>5 11 6 6 -1. + <_>8 11 3 6 2. + 0 + -3.1100001069717109e-004 + 0.1397150009870529 + -0.4686749875545502 + <_> + + <_> + + + + <_>0 3 24 6 -1. + <_>12 3 12 3 2. + <_>0 6 12 3 2. + 0 + 0.0250429995357990 + 0.2863200008869171 + -0.4179469943046570 + <_> + + <_> + + + + <_>2 4 18 3 -1. + <_>2 5 18 1 3. + 0 + 9.3449996784329414e-003 + -0.2733620107173920 + 0.4344469904899597 + <_> + + <_> + + + + <_>0 0 24 4 -1. + <_>12 0 12 2 2. + <_>0 2 12 2 2. + 0 + 0.0323639996349812 + 0.1843889951705933 + -0.9501929879188538 + <_> + + <_> + + + + <_>1 16 18 3 -1. + <_>1 17 18 1 3. + 0 + -6.2299999408423901e-003 + 0.3258199989795685 + -0.3081560134887695 + <_> + + <_> + + + + <_>15 15 9 6 -1. + <_>15 17 9 2 3. + 0 + 0.0514889992773533 + 0.1141600012779236 + -1.9795479774475098 + <_> + + <_> + + + + <_>0 15 9 6 -1. + <_>0 17 9 2 3. + 0 + -0.0264490004628897 + -1.1067299842834473 + -8.5519999265670776e-003 + <_> + + <_> + + + + <_>6 17 18 3 -1. + <_>6 18 18 1 3. + 0 + -0.0154200000688434 + 0.8013870120048523 + -0.0320350006222725 + <_> + + <_> + + + + <_>8 8 6 10 -1. + <_>10 8 2 10 3. + 0 + 0.0194569993764162 + -0.2644949853420258 + 0.3875389993190765 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + 0.0336209982633591 + 0.0160520002245903 + 0.5884090065956116 + <_> + + <_> + + + + <_>8 8 5 8 -1. + <_>8 12 5 4 2. + 0 + 0.0289060007780790 + 0.0152160003781319 + -0.9472360014915466 + <_> + + <_> + + + + <_>12 8 6 8 -1. + <_>12 12 6 4 2. + 0 + 2.0300000323913991e-004 + -0.3076600134372711 + 0.2123589962720871 + <_> + + <_> + + + + <_>6 5 6 11 -1. + <_>8 5 2 11 3. + 0 + -0.0491419993340969 + -1.6058609485626221 + -0.0310949999839067 + <_> + + <_> + + + + <_>13 6 8 9 -1. + <_>13 9 8 3 3. + 0 + 0.0764259994029999 + 0.0747589990496635 + 1.1639410257339478 + <_> + + <_> + + + + <_>1 7 21 6 -1. + <_>1 9 21 2 3. + 0 + 0.0238979998975992 + -6.4320000819861889e-003 + -1.1150749921798706 + <_> + + <_> + + + + <_>15 5 3 12 -1. + <_>15 11 3 6 2. + 0 + 3.8970001041889191e-003 + -0.2410569936037064 + 0.2085890024900436 + <_> + + <_> + + + + <_>6 9 11 12 -1. + <_>6 13 11 4 3. + 0 + -0.0894450023770332 + 1.9157789945602417 + -0.1572110056877136 + <_> + + <_> + + + + <_>13 8 10 8 -1. + <_>18 8 5 4 2. + <_>13 12 5 4 2. + 0 + -0.0150089999660850 + -0.2517409920692444 + 0.1817989945411682 + <_> + + <_> + + + + <_>5 8 12 3 -1. + <_>11 8 6 3 2. + 0 + -0.0111459996551275 + -0.6934949755668640 + 0.0449279993772507 + <_> + + <_> + + + + <_>6 11 18 4 -1. + <_>12 11 6 4 3. + 0 + 0.0945789963006973 + 0.1810210049152374 + -0.7497860193252564 + <_> + + <_> + + + + <_>0 0 22 22 -1. + <_>0 11 22 11 2. + 0 + 0.5503889918327332 + -0.0309740006923676 + -1.6746139526367188 + <_> + + <_> + + + + <_>11 2 6 8 -1. + <_>11 6 6 4 2. + 0 + 0.0413810014724731 + 0.0639100000262260 + 0.7656120061874390 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + 0.0247719995677471 + 0.0113800000399351 + -0.8855940103530884 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + 0.0509990006685257 + 0.1489029973745346 + -2.4634211063385010 + <_> + + <_> + + + + <_>8 3 6 14 -1. + <_>8 3 3 7 2. + <_>11 10 3 7 2. + 0 + -0.0168939996510744 + 0.3887099921703339 + -0.2988030016422272 + <_> + + <_> + + + + <_>3 10 18 8 -1. + <_>9 10 6 8 3. + 0 + -0.1216230019927025 + -1.5542800426483154 + 0.1630080044269562 + <_> + + <_> + + + + <_>10 0 3 14 -1. + <_>10 7 3 7 2. + 0 + -3.6049999762326479e-003 + 0.2184280008077622 + -0.3731209933757782 + <_> + + <_> + + + + <_>4 3 16 20 -1. + <_>4 13 16 10 2. + 0 + 0.1157540008425713 + -0.0470610000193119 + 0.5940369963645935 + <_> + + <_> + + + + <_>9 4 6 10 -1. + <_>11 4 2 10 3. + 0 + 0.0369039997458458 + -0.2550860047340393 + 0.5539730191230774 + <_> + + <_> + + + + <_>5 0 16 4 -1. + <_>5 2 16 2 2. + 0 + 0.0114839999005198 + -0.1812949925661087 + 0.4068279862403870 + <_> + + <_> + + + + <_>2 5 18 4 -1. + <_>8 5 6 4 3. + 0 + -0.0202339999377728 + 0.5431119799613953 + -0.2382239997386932 + <_> + + <_> + + + + <_>13 0 6 9 -1. + <_>15 0 2 9 3. + 0 + -0.0287650004029274 + -0.6917229890823364 + 0.1594330072402954 + <_> + + <_> + + + + <_>8 4 8 5 -1. + <_>12 4 4 5 2. + 0 + -5.8320001699030399e-003 + 0.2944779992103577 + -0.3400599956512451 + <_> + + <_> + + + + <_>12 10 10 4 -1. + <_>12 10 5 4 2. + 0 + -0.0554689988493919 + 0.9220079779624939 + 0.0940930023789406 + <_> + + <_> + + + + <_>2 10 10 4 -1. + <_>7 10 5 4 2. + 0 + -0.0148010002449155 + -0.7953969836235046 + 0.0315219983458519 + <_> + + <_> + + + + <_>7 11 12 5 -1. + <_>11 11 4 5 3. + 0 + -7.0940000005066395e-003 + 0.3309600055217743 + -0.0508869998157024 + <_> + + <_> + + + + <_>3 10 8 10 -1. + <_>3 10 4 5 2. + <_>7 15 4 5 2. + 0 + -0.0451240018010139 + -1.3719749450683594 + -0.0214089993387461 + <_> + + <_> + + + + <_>11 12 9 8 -1. + <_>14 12 3 8 3. + 0 + 0.0643770024180412 + 0.0639019981026649 + 0.9147830009460449 + <_> + + <_> + + + + <_>0 21 24 3 -1. + <_>8 21 8 3 3. + 0 + -0.0147270001471043 + 0.3605059981346130 + -0.2861450016498566 + <_> + + <_> + + + + <_>3 20 18 4 -1. + <_>9 20 6 4 3. + 0 + 0.0450070016086102 + -0.1561969965696335 + 0.5316029787063599 + <_> + + <_> + + + + <_>1 15 9 6 -1. + <_>1 17 9 2 3. + 0 + -1.1330000124871731e-003 + 0.1342290043830872 + -0.4435890018939972 + <_> + + <_> + + + + <_>11 17 10 4 -1. + <_>11 19 10 2 2. + 0 + 0.0494510009884834 + 0.1057180017232895 + -2.5589139461517334 + <_> + + <_> + + + + <_>9 12 4 12 -1. + <_>9 18 4 6 2. + 0 + 0.0291029997169971 + -0.0100880004465580 + -1.1073939800262451 + <_> + + <_> + + + + <_>9 6 9 6 -1. + <_>12 6 3 6 3. + 0 + 0.0347860008478165 + -2.7719999197870493e-003 + 0.5670099854469299 + <_> + + <_> + + + + <_>1 13 6 9 -1. + <_>1 16 6 3 3. + 0 + -6.1309998854994774e-003 + -0.4688940048217773 + 0.1263639926910400 + <_> + + <_> + + + + <_>6 16 12 4 -1. + <_>6 18 12 2 2. + 0 + 0.0155250001698732 + -8.4279999136924744e-003 + 0.8746920228004456 + <_> + + <_> + + + + <_>1 5 20 3 -1. + <_>1 6 20 1 3. + 0 + 2.9249999206513166e-003 + -0.3443430066108704 + 0.2085160017013550 + <_> + + <_> + + + + <_>8 1 9 9 -1. + <_>8 4 9 3 3. + 0 + -0.0535710006952286 + 1.4982949495315552 + 0.0573280006647110 + <_> + + <_> + + + + <_>2 19 9 4 -1. + <_>2 21 9 2 2. + 0 + -0.0192179996520281 + -0.9923409819602966 + -9.3919998034834862e-003 + <_> + + <_> + + + + <_>11 1 4 18 -1. + <_>11 7 4 6 3. + 0 + -0.0552829988300800 + -0.5768229961395264 + 0.1686059981584549 + <_> + + <_> + + + + <_>7 2 8 12 -1. + <_>7 2 4 6 2. + <_>11 8 4 6 2. + 0 + 0.0563360005617142 + -0.0337750017642975 + -1.3889650106430054 + <_> + + <_> + + + + <_>11 10 9 8 -1. + <_>14 10 3 8 3. + 0 + -0.0238240007311106 + 0.4018209874629974 + 1.8360000103712082e-003 + <_> + + <_> + + + + <_>5 11 12 5 -1. + <_>9 11 4 5 3. + 0 + 1.7810000572353601e-003 + 0.1814599931240082 + -0.4174340069293976 + <_> + + <_> + + + + <_>11 9 9 6 -1. + <_>14 9 3 6 3. + 0 + -0.0376890003681183 + 0.5468310117721558 + 0.0182199999690056 + <_> + + <_> + + + + <_>5 10 6 9 -1. + <_>7 10 2 9 3. + 0 + -0.0241449996829033 + 0.6835209727287293 + -0.1965020000934601 + -3.5645289421081543 + 11 + -1 + <_> + + + <_> + + <_> + + + + <_>4 7 5 12 -1. + <_>4 11 5 4 3. + 0 + 0.0274449996650219 + -0.8998420238494873 + 0.5187649726867676 + <_> + + <_> + + + + <_>2 0 21 6 -1. + <_>9 0 7 6 3. + 0 + 0.1155410036444664 + -0.5652440190315247 + 0.7055130004882813 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>7 8 10 2 3. + 0 + -0.0222970005124807 + 0.3607999980449677 + -0.6686459779739380 + <_> + + <_> + + + + <_>9 0 6 15 -1. + <_>11 0 2 15 3. + 0 + 0.0133250001817942 + -0.5557339787483215 + 0.3578999936580658 + <_> + + <_> + + + + <_>2 2 18 2 -1. + <_>2 3 18 1 2. + 0 + -3.8060001097619534e-003 + -1.0713000297546387 + 0.1885000020265579 + <_> + + <_> + + + + <_>8 17 8 6 -1. + <_>8 20 8 3 2. + 0 + -2.6819999329745770e-003 + -0.7158430218696594 + 0.2634449899196625 + <_> + + <_> + + + + <_>3 0 18 2 -1. + <_>3 1 18 1 2. + 0 + 3.3819999080151320e-003 + -0.4693079888820648 + 0.2665840089321137 + <_> + + <_> + + + + <_>8 0 9 6 -1. + <_>11 0 3 6 3. + 0 + 0.0376430004835129 + 0.2109870016574860 + -1.0804339647293091 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + -0.0138619998469949 + 0.6691200137138367 + -0.2794280052185059 + <_> + + <_> + + + + <_>6 7 12 5 -1. + <_>10 7 4 5 3. + 0 + -2.7350001037120819e-003 + -0.9533230066299439 + 0.2405129969120026 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>2 3 2 9 3. + 0 + -0.0383369997143745 + 0.8143280148506165 + -0.2491939961910248 + <_> + + <_> + + + + <_>20 2 4 9 -1. + <_>20 2 2 9 2. + 0 + -0.0346979983150959 + 1.2330100536346436 + 6.8600000813603401e-003 + <_> + + <_> + + + + <_>0 2 4 9 -1. + <_>2 2 2 9 2. + 0 + 0.0233609993010759 + -0.3079470098018646 + 0.7071449756622315 + <_> + + <_> + + + + <_>0 1 24 4 -1. + <_>12 1 12 2 2. + <_>0 3 12 2 2. + 0 + 0.0350579991936684 + 0.2120590060949326 + -1.4399830102920532 + <_> + + <_> + + + + <_>0 16 9 6 -1. + <_>0 18 9 2 3. + 0 + -0.0132569996640086 + -0.9026070237159729 + 0.0486100018024445 + <_> + + <_> + + + + <_>14 13 9 6 -1. + <_>14 15 9 2 3. + 0 + 0.0127400001510978 + 0.2265519946813583 + -0.4464380145072937 + <_> + + <_> + + + + <_>0 15 19 3 -1. + <_>0 16 19 1 3. + 0 + 3.6400000099092722e-003 + -0.3981789946556091 + 0.3466539978981018 + <_> + + <_> + + + + <_>1 5 22 12 -1. + <_>12 5 11 6 2. + <_>1 11 11 6 2. + 0 + 0.1006470024585724 + 0.1838359981775284 + -1.3410769701004028 + <_> + + <_> + + + + <_>5 13 6 6 -1. + <_>8 13 3 6 2. + 0 + 0. + 0.1553640067577362 + -0.5158249735832214 + <_> + + <_> + + + + <_>4 2 20 3 -1. + <_>4 3 20 1 3. + 0 + 0.0117089999839664 + 0.2165140062570572 + -0.7270519733428955 + <_> + + <_> + + + + <_>8 14 6 10 -1. + <_>10 14 2 10 3. + 0 + -0.0359649993479252 + -1.4789500236511230 + -0.0243170000612736 + <_> + + <_> + + + + <_>6 12 16 6 -1. + <_>14 12 8 3 2. + <_>6 15 8 3 2. + 0 + -0.0212360005825758 + -0.1684409976005554 + 0.1952659934759140 + <_> + + <_> + + + + <_>2 13 8 9 -1. + <_>2 16 8 3 3. + 0 + 0.0148740001022816 + 0.0373359993100166 + -0.8755729794502258 + <_> + + <_> + + + + <_>11 8 6 14 -1. + <_>14 8 3 7 2. + <_>11 15 3 7 2. + 0 + -5.1409997977316380e-003 + 0.3346650004386902 + -0.2410970032215118 + <_> + + <_> + + + + <_>2 12 16 6 -1. + <_>2 12 8 3 2. + <_>10 15 8 3 2. + 0 + 0.0234500002115965 + 5.5320002138614655e-003 + -1.2509720325469971 + <_> + + <_> + + + + <_>5 16 16 8 -1. + <_>5 20 16 4 2. + 0 + -0.0250620003789663 + 0.4521239995956421 + -0.0844699963927269 + <_> + + <_> + + + + <_>9 1 4 12 -1. + <_>9 7 4 6 2. + 0 + -7.7400001464411616e-004 + 0.1524990051984787 + -0.4848650097846985 + <_> + + <_> + + + + <_>8 2 8 10 -1. + <_>12 2 4 5 2. + <_>8 7 4 5 2. + 0 + -0.0404839999973774 + -1.3024920225143433 + 0.1798350065946579 + <_> + + <_> + + + + <_>6 6 12 6 -1. + <_>6 6 6 3 2. + <_>12 9 6 3 2. + 0 + 0.0281709991395473 + -0.2441090047359467 + 0.6227110028266907 + <_> + + <_> + + + + <_>10 7 6 9 -1. + <_>12 7 2 9 3. + 0 + 0.0456929989159107 + 0.0281220003962517 + 0.9239439964294434 + <_> + + <_> + + + + <_>0 0 8 12 -1. + <_>0 0 4 6 2. + <_>4 6 4 6 2. + 0 + 0.0397070012986660 + -0.2233279943466187 + 0.7767400145530701 + <_> + + <_> + + + + <_>18 8 6 9 -1. + <_>18 11 6 3 3. + 0 + 0.0505170002579689 + 0.2031999975442886 + -1.0895930528640747 + <_> + + <_> + + + + <_>2 12 6 6 -1. + <_>5 12 3 6 2. + 0 + -0.0172669999301434 + 0.6859840154647827 + -0.2330449968576431 + <_> + + <_> + + + + <_>3 21 21 3 -1. + <_>10 21 7 3 3. + 0 + 0.0801860019564629 + -0.0102920001372695 + 0.6188110113143921 + <_> + + <_> + + + + <_>2 0 16 6 -1. + <_>2 3 16 3 2. + 0 + 0.0976760014891624 + -0.2007029950618744 + 1.0088349580764771 + <_> + + <_> + + + + <_>13 6 7 6 -1. + <_>13 9 7 3 2. + 0 + -0.0155720002949238 + 0.4761529862880707 + 0.0456239990890026 + <_> + + <_> + + + + <_>6 4 4 14 -1. + <_>6 11 4 7 2. + 0 + -0.0153050003573298 + -1.1077369451522827 + 4.5239999890327454e-003 + <_> + + <_> + + + + <_>9 7 6 9 -1. + <_>11 7 2 9 3. + 0 + -0.0164850000292063 + 1.0152939558029175 + 0.0163279995322227 + <_> + + <_> + + + + <_>7 8 6 14 -1. + <_>7 8 3 7 2. + <_>10 15 3 7 2. + 0 + -0.0261419992893934 + 0.4172329902648926 + -0.2864550054073334 + <_> + + <_> + + + + <_>18 8 4 16 -1. + <_>18 16 4 8 2. + 0 + 8.8679995387792587e-003 + 0.2140499949455261 + -0.1677280068397522 + <_> + + <_> + + + + <_>9 14 6 10 -1. + <_>11 14 2 10 3. + 0 + -0.0268869996070862 + -1.1564220190048218 + -0.0103240003809333 + <_> + + <_> + + + + <_>6 11 12 5 -1. + <_>10 11 4 5 3. + 0 + 7.7789998613297939e-003 + 0.3535949885845184 + -0.2961130142211914 + <_> + + <_> + + + + <_>0 12 23 3 -1. + <_>0 13 23 1 3. + 0 + -0.0159740000963211 + -1.5374109745025635 + -0.0299580004066229 + <_> + + <_> + + + + <_>13 0 6 12 -1. + <_>15 0 2 12 3. + 0 + 0.0208669994026423 + 0.2024410068988800 + -0.7127019762992859 + <_> + + <_> + + + + <_>0 10 12 5 -1. + <_>4 10 4 5 3. + 0 + 0.0854820013046265 + -0.0259329993277788 + -1.5156569480895996 + <_> + + <_> + + + + <_>13 2 10 4 -1. + <_>13 4 10 2 2. + 0 + 0.0238729994744062 + 0.1680340021848679 + -0.3880620002746582 + <_> + + <_> + + + + <_>5 0 6 12 -1. + <_>7 0 2 12 3. + 0 + -0.0391050018370152 + -1.1958349943161011 + -0.0203610006719828 + <_> + + <_> + + + + <_>11 6 9 6 -1. + <_>14 6 3 6 3. + 0 + -0.0779469981789589 + -1.0898950099945068 + 0.1453029960393906 + <_> + + <_> + + + + <_>4 6 9 6 -1. + <_>7 6 3 6 3. + 0 + -0.0168760009109974 + 0.2804970145225525 + -0.4133630096912384 + <_> + + <_> + + + + <_>6 11 18 13 -1. + <_>12 11 6 13 3. + 0 + 0.1187560036778450 + -0.0434909984469414 + 0.4126369953155518 + <_> + + <_> + + + + <_>0 11 18 13 -1. + <_>6 11 6 13 3. + 0 + 0.1562419980764389 + -0.2642959952354431 + 0.5512779951095581 + <_> + + <_> + + + + <_>12 16 12 6 -1. + <_>16 16 4 6 3. + 0 + -0.0459080003201962 + 0.6018919944763184 + 0.0189210008829832 + <_> + + <_> + + + + <_>0 6 21 3 -1. + <_>0 7 21 1 3. + 0 + -0.0103099998086691 + 0.3815299868583679 + -0.2950789928436279 + <_> + + <_> + + + + <_>12 16 12 6 -1. + <_>16 16 4 6 3. + 0 + 0.0957690030336380 + 0.1324650049209595 + -0.4626680016517639 + <_> + + <_> + + + + <_>5 7 6 14 -1. + <_>5 14 6 7 2. + 0 + 0.0136869996786118 + 0.1173869967460632 + -0.5166410207748413 + <_> + + <_> + + + + <_>5 10 19 2 -1. + <_>5 11 19 1 2. + 0 + 2.3990001063793898e-003 + -0.3400759994983673 + 0.2095350027084351 + <_> + + <_> + + + + <_>5 4 14 4 -1. + <_>5 6 14 2 2. + 0 + 0.0332649983465672 + -0.1705279946327210 + 1.4366799592971802 + <_> + + <_> + + + + <_>3 18 18 4 -1. + <_>9 18 6 4 3. + 0 + -0.0332060009241104 + 0.6129570007324219 + -0.0415499992668629 + <_> + + <_> + + + + <_>7 0 4 9 -1. + <_>9 0 2 9 2. + 0 + 2.7979998849332333e-003 + -0.4855430126190186 + 0.1337269991636276 + <_> + + <_> + + + + <_>13 3 11 4 -1. + <_>13 5 11 2 2. + 0 + -0.0657920017838478 + -4.0257668495178223 + 0.1087670028209686 + <_> + + <_> + + + + <_>2 0 9 6 -1. + <_>5 0 3 6 3. + 0 + 2.1430000197142363e-003 + -0.3917999863624573 + 0.2242709994316101 + <_> + + <_> + + + + <_>19 1 4 23 -1. + <_>19 1 2 23 2. + 0 + 0.0223639998584986 + -0.0864299982786179 + 0.3778519928455353 + <_> + + <_> + + + + <_>1 1 4 23 -1. + <_>3 1 2 23 2. + 0 + -0.0574100017547607 + 1.1454069614410400 + -0.1973659992218018 + <_> + + <_> + + + + <_>5 16 18 3 -1. + <_>5 17 18 1 3. + 0 + 6.6550001502037048e-003 + -0.0211050007492304 + 0.5845339894294739 + <_> + + <_> + + + + <_>0 3 11 4 -1. + <_>0 5 11 2 2. + 0 + 0.0123269995674491 + 0.0378170013427734 + -0.6698700189590454 + <_> + + <_> + + + + <_>2 16 20 3 -1. + <_>2 17 20 1 3. + 0 + -8.1869997084140778e-003 + 0.5636600255966187 + -0.0768779963254929 + <_> + + <_> + + + + <_>5 3 13 4 -1. + <_>5 5 13 2 2. + 0 + 0.0366810001432896 + -0.1734330058097839 + 1.1670149564743042 + <_> + + <_> + + + + <_>1 9 22 15 -1. + <_>1 9 11 15 2. + 0 + -0.4022040069103241 + 1.2640819549560547 + 0.0433989986777306 + <_> + + <_> + + + + <_>3 4 14 3 -1. + <_>10 4 7 3 2. + 0 + -0.0221260003745556 + 0.6697810292243958 + -0.2160529941320419 + <_> + + <_> + + + + <_>8 7 10 4 -1. + <_>8 7 5 4 2. + 0 + -0.0131569998338819 + -0.4119859933853149 + 0.2021500021219254 + <_> + + <_> + + + + <_>6 7 10 4 -1. + <_>11 7 5 4 2. + 0 + -0.0128600001335144 + -0.9158269762992859 + 0.0392329990863800 + <_> + + <_> + + + + <_>10 4 6 9 -1. + <_>12 4 2 9 3. + 0 + 0.0216279998421669 + 3.8719999138265848e-003 + 0.3566820025444031 + <_> + + <_> + + + + <_>1 12 9 6 -1. + <_>4 12 3 6 3. + 0 + 0.0118960002437234 + -0.3730390071868897 + 0.1923509985208511 + <_> + + <_> + + + + <_>8 3 8 10 -1. + <_>12 3 4 5 2. + <_>8 8 4 5 2. + 0 + -0.0195489991456270 + -0.4237489998340607 + 0.2442959994077683 + <_> + + <_> + + + + <_>3 6 16 6 -1. + <_>3 6 8 3 2. + <_>11 9 8 3 2. + 0 + 0.0644449964165688 + -0.1655890047550201 + 1.2697030305862427 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>5 9 14 3 2. + 0 + 0.1089849993586540 + 0.1489430069923401 + -2.1534640789031982 + <_> + + <_> + + + + <_>4 3 9 6 -1. + <_>4 5 9 2 3. + 0 + -0.0340779982507229 + 1.3779460191726685 + -0.1619849950075150 + <_> + + <_> + + + + <_>6 3 18 2 -1. + <_>6 4 18 1 2. + 0 + -3.7489999085664749e-003 + -0.3382860124111176 + 0.2115290015935898 + <_> + + <_> + + + + <_>7 6 9 6 -1. + <_>10 6 3 6 3. + 0 + -0.0109719997271895 + 0.7651789784431458 + -0.1969259977340698 + <_> + + <_> + + + + <_>0 1 24 3 -1. + <_>0 2 24 1 3. + 0 + -0.0114850001409650 + -0.6927120089530945 + 0.2165710031986237 + <_> + + <_> + + + + <_>0 17 10 6 -1. + <_>0 19 10 2 3. + 0 + 0.0259840004146099 + -0.0119839999824762 + -0.9969729781150818 + <_> + + <_> + + + + <_>3 18 18 3 -1. + <_>3 19 18 1 3. + 0 + 4.2159999720752239e-003 + -0.1020570024847984 + 0.4888440072536469 + <_> + + <_> + + + + <_>2 5 6 16 -1. + <_>2 5 3 8 2. + <_>5 13 3 8 2. + 0 + -0.0476970002055168 + 1.0666010379791260 + -0.1757629960775375 + <_> + + <_> + + + + <_>7 6 11 6 -1. + <_>7 8 11 2 3. + 0 + 4.0300001273863018e-004 + 0.1852480024099350 + -0.7479000091552734 + <_> + + <_> + + + + <_>5 2 12 22 -1. + <_>5 13 12 11 2. + 0 + 0.1153960004448891 + -0.2201970070600510 + 0.5450999736785889 + <_> + + <_> + + + + <_>10 7 4 10 -1. + <_>10 12 4 5 2. + 0 + 0.0160210002213717 + 0.2548750042915344 + -0.5074009895324707 + <_> + + <_> + + + + <_>9 0 4 18 -1. + <_>9 6 4 6 3. + 0 + 0.0566320009529591 + -0.0112560000270605 + -0.9596809744834900 + <_> + + <_> + + + + <_>18 8 6 9 -1. + <_>18 11 6 3 3. + 0 + -0.0107260001823306 + -0.2854470014572144 + 0.1699479967355728 + <_> + + <_> + + + + <_>4 7 15 10 -1. + <_>9 7 5 10 3. + 0 + 0.1242000013589859 + -0.0361399985849857 + -1.3132710456848145 + <_> + + <_> + + + + <_>10 5 6 9 -1. + <_>12 5 2 9 3. + 0 + -5.3799999877810478e-003 + 0.3309270143508911 + 0.0133079998195171 + <_> + + <_> + + + + <_>9 9 6 10 -1. + <_>11 9 2 10 3. + 0 + 0.0119080003350973 + -0.3483029901981354 + 0.2404190003871918 + <_> + + <_> + + + + <_>11 14 6 10 -1. + <_>13 14 2 10 3. + 0 + -0.0430079996585846 + -1.4390469789505005 + 0.1559959948062897 + <_> + + <_> + + + + <_>7 14 6 10 -1. + <_>9 14 2 10 3. + 0 + -0.0331499986350536 + -1.1805850267410278 + -0.0123479999601841 + <_> + + <_> + + + + <_>4 8 16 9 -1. + <_>4 11 16 3 3. + 0 + -0.0213419999927282 + 2.2119441032409668 + 0.0627370029687881 + <_> + + <_> + + + + <_>2 11 20 3 -1. + <_>2 12 20 1 3. + 0 + -0.0122189996764064 + -1.8709750175476074 + -0.0454999990761280 + <_> + + <_> + + + + <_>13 0 4 13 -1. + <_>13 0 2 13 2. + 0 + -0.0168609991669655 + -0.7691270112991333 + 0.1533000022172928 + <_> + + <_> + + + + <_>7 0 4 13 -1. + <_>9 0 2 13 2. + 0 + -2.4999999441206455e-003 + -0.6298739910125732 + 0.0516000017523766 + <_> + + <_> + + + + <_>3 1 18 7 -1. + <_>9 1 6 7 3. + 0 + -0.0450379997491837 + 0.8542889952659607 + 6.2600001692771912e-003 + <_> + + <_> + + + + <_>1 11 6 9 -1. + <_>1 14 6 3 3. + 0 + 0.0390579998493195 + -0.0324589982628822 + -1.3325669765472412 + <_> + + <_> + + + + <_>8 18 9 6 -1. + <_>8 20 9 2 3. + 0 + 6.6720000468194485e-003 + -0.1942359954118729 + 0.3732869923114777 + <_> + + <_> + + + + <_>3 9 15 6 -1. + <_>3 11 15 2 3. + 0 + -0.0163610000163317 + 2.0605869293212891 + -0.1504269987344742 + <_> + + <_> + + + + <_>5 10 19 2 -1. + <_>5 11 19 1 2. + 0 + 6.1719999648630619e-003 + -0.1161099970340729 + 0.2545540034770966 + <_> + + <_> + + + + <_>8 6 7 16 -1. + <_>8 14 7 8 2. + 0 + 0.0457220003008842 + -0.0163400005549192 + -1.0449140071868896 + <_> + + <_> + + + + <_>9 14 9 6 -1. + <_>9 16 9 2 3. + 0 + 4.1209999471902847e-003 + -0.0419979989528656 + 0.3968099951744080 + <_> + + <_> + + + + <_>0 7 8 12 -1. + <_>0 11 8 4 3. + 0 + -1.7800000205170363e-004 + -0.6642259955406189 + 0.0334430001676083 + <_> + + <_> + + + + <_>6 4 18 3 -1. + <_>6 5 18 1 3. + 0 + 7.1109998971223831e-003 + -0.0582319982349873 + 0.3785730004310608 + <_> + + <_> + + + + <_>0 16 12 6 -1. + <_>4 16 4 6 3. + 0 + -0.0498640015721321 + 0.6101940274238586 + -0.2100570052862167 + <_> + + <_> + + + + <_>13 13 9 4 -1. + <_>13 15 9 2 2. + 0 + -0.0250119995325804 + -0.5710009932518005 + 0.1784839928150177 + <_> + + <_> + + + + <_>5 8 14 14 -1. + <_>5 8 7 7 2. + <_>12 15 7 7 2. + 0 + 0.0309399999678135 + 0.0563630014657974 + -0.6473100185394287 + <_> + + <_> + + + + <_>1 16 22 6 -1. + <_>12 16 11 3 2. + <_>1 19 11 3 2. + 0 + 0.0462710000574589 + 0.1748239994049072 + -0.9890940189361572 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + -3.1870000530034304e-003 + -0.6680480241775513 + 0.0322670005261898 + <_> + + <_> + + + + <_>9 5 10 10 -1. + <_>14 5 5 5 2. + <_>9 10 5 5 2. + 0 + -0.0243519991636276 + 0.2944490015506744 + -1.3599999947473407e-003 + <_> + + <_> + + + + <_>5 5 10 10 -1. + <_>5 5 5 5 2. + <_>10 10 5 5 2. + 0 + 0.0119740003719926 + -0.2834509909152985 + 0.4717119932174683 + <_> + + <_> + + + + <_>4 6 16 6 -1. + <_>12 6 8 3 2. + <_>4 9 8 3 2. + 0 + 0.0130700003355742 + -0.1083460003137589 + 0.5719329714775085 + <_> + + <_> + + + + <_>0 7 6 9 -1. + <_>0 10 6 3 3. + 0 + 0.0591630004346371 + -0.0509390011429787 + -1.9059720039367676 + <_> + + <_> + + + + <_>16 10 8 14 -1. + <_>20 10 4 7 2. + <_>16 17 4 7 2. + 0 + -0.0410949997603893 + 0.4510459899902344 + -9.7599998116493225e-003 + <_> + + <_> + + + + <_>9 12 6 12 -1. + <_>9 18 6 6 2. + 0 + -0.0839890018105507 + -2.0349199771881104 + -0.0510190017521381 + <_> + + <_> + + + + <_>8 10 8 12 -1. + <_>12 10 4 6 2. + <_>8 16 4 6 2. + 0 + 0.0446190014481544 + 0.1704110056161881 + -1.2278720140457153 + <_> + + <_> + + + + <_>8 0 4 9 -1. + <_>10 0 2 9 2. + 0 + 0.0244190003722906 + -0.0217969994992018 + -1.0822949409484863 + <_> + + <_> + + + + <_>10 4 8 16 -1. + <_>14 4 4 8 2. + <_>10 12 4 8 2. + 0 + -4.3870001100003719e-003 + 0.3046669960021973 + -0.3706659972667694 + <_> + + <_> + + + + <_>7 10 10 6 -1. + <_>7 12 10 2 3. + 0 + 0.0246079992502928 + -0.3116950094699860 + 0.2365729957818985 + <_> + + <_> + + + + <_>5 6 14 14 -1. + <_>12 6 7 7 2. + <_>5 13 7 7 2. + 0 + -0.0851820036768913 + -1.7982350587844849 + 0.1525429934263229 + <_> + + <_> + + + + <_>2 11 20 2 -1. + <_>2 12 20 1 2. + 0 + 0.0218449998646975 + -0.0518880002200603 + -1.9017189741134644 + <_> + + <_> + + + + <_>18 8 4 16 -1. + <_>18 16 4 8 2. + 0 + -0.0168290007859468 + 0.2102590054273605 + 0.0216569993644953 + <_> + + <_> + + + + <_>1 11 12 10 -1. + <_>1 11 6 5 2. + <_>7 16 6 5 2. + 0 + 0.0325479991734028 + -0.2029259949922562 + 0.6094400286674500 + <_> + + <_> + + + + <_>6 9 12 4 -1. + <_>6 11 12 2 2. + 0 + 2.4709999561309814e-003 + -0.9537119865417481 + 0.1856839954853058 + <_> + + <_> + + + + <_>9 12 6 7 -1. + <_>12 12 3 7 2. + 0 + 0.0554159991443157 + -0.1440529972314835 + 2.1506340503692627 + <_> + + <_> + + + + <_>10 4 8 16 -1. + <_>14 4 4 8 2. + <_>10 12 4 8 2. + 0 + -0.1063549965620041 + -1.0911970138549805 + 0.1322800070047379 + <_> + + <_> + + + + <_>6 4 8 16 -1. + <_>6 4 4 8 2. + <_>10 12 4 8 2. + 0 + -7.9889995977282524e-003 + 0.1025340035557747 + -0.5174490213394165 + <_> + + <_> + + + + <_>8 9 9 6 -1. + <_>11 9 3 6 3. + 0 + 0.0755679979920387 + 0.0589650012552738 + 1.2354209423065186 + <_> + + <_> + + + + <_>1 5 16 12 -1. + <_>1 5 8 6 2. + <_>9 11 8 6 2. + 0 + -0.0928059965372086 + -1.3431650400161743 + -0.0344629995524883 + <_> + + <_> + + + + <_>9 9 6 8 -1. + <_>9 9 3 8 2. + 0 + 0.0494319982826710 + 0.0496019981801510 + 1.6054730415344238 + <_> + + <_> + + + + <_>6 0 3 18 -1. + <_>7 0 1 18 3. + 0 + -0.0117729995399714 + -1.0261050462722778 + -4.1559999808669090e-003 + <_> + + <_> + + + + <_>17 9 5 14 -1. + <_>17 16 5 7 2. + 0 + 0.0858860015869141 + 0.0846429988741875 + 0.9522079825401306 + <_> + + <_> + + + + <_>2 9 5 14 -1. + <_>2 16 5 7 2. + 0 + 0.0810310021042824 + -0.1468710005283356 + 1.9359990358352661 + -3.7025990486145020 + 12 + -1 + <_> + + + <_> + + <_> + + + + <_>7 4 10 6 -1. + <_>7 7 10 3 2. + 0 + -0.0338409990072250 + 0.6588950157165527 + -0.6975529789924622 + <_> + + <_> + + + + <_>1 3 23 18 -1. + <_>1 9 23 6 3. + 0 + 0.0154100004583597 + -0.9072840213775635 + 0.3047859966754913 + <_> + + <_> + + + + <_>1 1 21 3 -1. + <_>8 1 7 3 3. + 0 + 0.0549059994518757 + -0.4977479875087738 + 0.5713260173797607 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0213900003582239 + -0.4256519973278046 + 0.5809680223464966 + <_> + + <_> + + + + <_>3 18 12 6 -1. + <_>3 18 6 3 2. + <_>9 21 6 3 2. + 0 + 7.8849997371435165e-003 + -0.4790599942207336 + 0.4301649928092957 + <_> + + <_> + + + + <_>16 8 8 16 -1. + <_>20 8 4 8 2. + <_>16 16 4 8 2. + 0 + -0.0375449992716312 + 0.5086159706115723 + -0.1998589932918549 + <_> + + <_> + + + + <_>0 19 24 4 -1. + <_>8 19 8 4 3. + 0 + 0.1592579931020737 + -0.2326360046863556 + 1.0993319749832153 + <_> + + <_> + + + + <_>16 8 8 16 -1. + <_>20 8 4 8 2. + <_>16 16 4 8 2. + 0 + -0.0689399987459183 + 0.4056900143623352 + 0.0568550005555153 + <_> + + <_> + + + + <_>0 8 8 16 -1. + <_>0 8 4 8 2. + <_>4 16 4 8 2. + 0 + -0.0336950011551380 + 0.4513280093669891 + -0.3333280086517334 + <_> + + <_> + + + + <_>8 12 8 10 -1. + <_>8 17 8 5 2. + 0 + -0.0633149966597557 + -0.8501570224761963 + 0.2234169989824295 + <_> + + <_> + + + + <_>5 7 5 8 -1. + <_>5 11 5 4 2. + 0 + 7.3699997738003731e-003 + -0.9308220148086548 + 0.0592169985175133 + <_> + + <_> + + + + <_>4 1 19 2 -1. + <_>4 2 19 1 2. + 0 + -9.5969997346401215e-003 + -1.2794899940490723 + 0.1844729930162430 + <_> + + <_> + + + + <_>0 12 24 9 -1. + <_>8 12 8 9 3. + 0 + -0.1306799948215485 + 0.5842689871788025 + -0.2600719928741455 + <_> + + <_> + + + + <_>6 0 13 8 -1. + <_>6 4 13 4 2. + 0 + 0.0574029982089996 + -0.0537890009582043 + 0.7117559909820557 + <_> + + <_> + + + + <_>0 0 24 3 -1. + <_>0 1 24 1 3. + 0 + -7.2340001352131367e-003 + -0.8696219921112061 + 0.0752149969339371 + <_> + + <_> + + + + <_>20 3 4 11 -1. + <_>20 3 2 11 2. + 0 + 0.0310989990830421 + -0.0750069990754128 + 0.9078159928321838 + <_> + + <_> + + + + <_>8 6 6 9 -1. + <_>10 6 2 9 3. + 0 + 0.0358540005981922 + -0.2479549944400787 + 0.7227209806442261 + <_> + + <_> + + + + <_>6 11 12 8 -1. + <_>12 11 6 4 2. + <_>6 15 6 4 2. + 0 + -0.0315349996089935 + -1.1238329410552979 + 0.2098830044269562 + <_> + + <_> + + + + <_>0 8 12 6 -1. + <_>0 8 6 3 2. + <_>6 11 6 3 2. + 0 + -0.0194370001554489 + -1.4499390125274658 + -0.0151000004261732 + <_> + + <_> + + + + <_>6 17 18 3 -1. + <_>6 18 18 1 3. + 0 + -7.2420001961290836e-003 + 0.5386490225791931 + -0.1137539967894554 + <_> + + <_> + + + + <_>0 14 9 6 -1. + <_>0 16 9 2 3. + 0 + 8.1639997661113739e-003 + 0.0668890029191971 + -0.7687289714813232 + <_> + + <_> + + + + <_>20 3 4 9 -1. + <_>20 3 2 9 2. + 0 + -0.0436530001461506 + 1.1413530111312866 + 0.0402170009911060 + <_> + + <_> + + + + <_>0 3 4 9 -1. + <_>2 3 2 9 2. + 0 + 0.0265699997544289 + -0.2471909970045090 + 0.5929509997367859 + <_> + + <_> + + + + <_>15 0 9 19 -1. + <_>18 0 3 19 3. + 0 + 0.0322169996798038 + -0.0400249995291233 + 0.3268800079822540 + <_> + + <_> + + + + <_>0 0 9 19 -1. + <_>3 0 3 19 3. + 0 + -0.0722360014915466 + 0.5872939825057983 + -0.2539600133895874 + <_> + + <_> + + + + <_>13 11 6 8 -1. + <_>13 11 3 8 2. + 0 + 0.0314249992370605 + 0.1531510055065155 + -0.5604209899902344 + <_> + + <_> + + + + <_>5 11 6 8 -1. + <_>8 11 3 8 2. + 0 + -4.7699999413453043e-004 + 0.1695889979600906 + -0.5262669920921326 + <_> + + <_> + + + + <_>5 11 19 3 -1. + <_>5 12 19 1 3. + 0 + 2.7189999818801880e-003 + -0.1494459956884384 + 0.2965869903564453 + <_> + + <_> + + + + <_>3 20 18 4 -1. + <_>9 20 6 4 3. + 0 + 0.0328750014305115 + -0.3994350135326386 + 0.2515659928321838 + <_> + + <_> + + + + <_>6 6 16 6 -1. + <_>6 8 16 2 3. + 0 + -0.0145530002191663 + 0.2797259986400604 + -0.4720380008220673 + <_> + + <_> + + + + <_>6 0 9 6 -1. + <_>9 0 3 6 3. + 0 + 0.0380179993808270 + -2.9200001154094934e-003 + -1.1300059556961060 + <_> + + <_> + + + + <_>10 3 4 14 -1. + <_>10 10 4 7 2. + 0 + 2.8659999370574951e-003 + 0.4111180007457733 + -0.2622080147266388 + <_> + + <_> + + + + <_>1 5 15 12 -1. + <_>1 11 15 6 2. + 0 + -0.0416069999337196 + -1.4293819665908813 + -0.0191329997032881 + <_> + + <_> + + + + <_>11 12 8 5 -1. + <_>11 12 4 5 2. + 0 + -0.0248029995709658 + -0.2501359879970551 + 0.1597869992256165 + <_> + + <_> + + + + <_>5 0 6 9 -1. + <_>7 0 2 9 3. + 0 + 0.0100980000570416 + 0.0437389984726906 + -0.6998609900474548 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + -0.0209470000118017 + -0.9413779973983765 + 0.2320400029420853 + <_> + + <_> + + + + <_>5 5 12 8 -1. + <_>5 5 6 4 2. + <_>11 9 6 4 2. + 0 + 0.0224580001085997 + -0.2718580067157745 + 0.4531919956207275 + <_> + + <_> + + + + <_>13 12 11 6 -1. + <_>13 14 11 2 3. + 0 + -0.0371109992265701 + -1.0314660072326660 + 0.1442179977893829 + <_> + + <_> + + + + <_>0 13 21 3 -1. + <_>0 14 21 1 3. + 0 + -0.0106480000540614 + 0.6310700178146362 + -0.2552079856395721 + <_> + + <_> + + + + <_>8 1 8 12 -1. + <_>12 1 4 6 2. + <_>8 7 4 6 2. + 0 + 0.0554229989647865 + 0.1620659977197647 + -1.7722640037536621 + <_> + + <_> + + + + <_>1 0 6 12 -1. + <_>1 0 3 6 2. + <_>4 6 3 6 2. + 0 + 0.0216019991785288 + -0.2501609921455383 + 0.5411980152130127 + <_> + + <_> + + + + <_>2 2 21 2 -1. + <_>2 3 21 1 2. + 0 + 8.7000000348780304e-005 + -0.2900890111923218 + 0.3350799977779388 + <_> + + <_> + + + + <_>2 2 19 3 -1. + <_>2 3 19 1 3. + 0 + 0.0144060002639890 + -7.8840004280209541e-003 + -1.1677219867706299 + <_> + + <_> + + + + <_>17 10 6 14 -1. + <_>20 10 3 7 2. + <_>17 17 3 7 2. + 0 + 0.1077739968895912 + 0.1129200011491776 + -2.4940319061279297 + <_> + + <_> + + + + <_>1 10 6 14 -1. + <_>1 10 3 7 2. + <_>4 17 3 7 2. + 0 + 0.0359439998865128 + -0.1948059946298599 + 0.9575750231742859 + <_> + + <_> + + + + <_>7 6 14 14 -1. + <_>14 6 7 7 2. + <_>7 13 7 7 2. + 0 + -3.9510000497102737e-003 + 0.3092780113220215 + -0.2553020119667053 + <_> + + <_> + + + + <_>0 12 9 6 -1. + <_>0 14 9 2 3. + 0 + 0.0209420006722212 + -7.6319999061524868e-003 + -1.0086350440979004 + <_> + + <_> + + + + <_>15 14 8 9 -1. + <_>15 17 8 3 3. + 0 + -0.0298779997974634 + -0.4602769911289215 + 0.1950719952583313 + <_> + + <_> + + + + <_>1 1 22 4 -1. + <_>1 1 11 2 2. + <_>12 3 11 2 2. + 0 + 0.0259719993919134 + -0.0121879996731877 + -1.0035500526428223 + <_> + + <_> + + + + <_>9 11 9 6 -1. + <_>9 13 9 2 3. + 0 + 0.0106030004099011 + -0.0759690031409264 + 0.4166989922523499 + <_> + + <_> + + + + <_>0 15 18 3 -1. + <_>0 16 18 1 3. + 0 + 8.5819996893405914e-003 + -0.2664859890937805 + 0.3911150097846985 + <_> + + <_> + + + + <_>16 14 7 9 -1. + <_>16 17 7 3 3. + 0 + 0.0212709996849298 + 0.1827390044927597 + -0.3605229854583740 + <_> + + <_> + + + + <_>4 3 16 4 -1. + <_>12 3 8 4 2. + 0 + 0.0745180025696754 + -0.1893839985132217 + 0.9265800118446350 + <_> + + <_> + + + + <_>7 6 12 5 -1. + <_>7 6 6 5 2. + 0 + 4.6569998376071453e-003 + -0.1450619995594025 + 0.3329460024833679 + <_> + + <_> + + + + <_>9 6 4 9 -1. + <_>11 6 2 9 2. + 0 + 1.7119999974966049e-003 + -0.5246400237083435 + 0.0898799970746040 + <_> + + <_> + + + + <_>12 1 4 10 -1. + <_>12 1 2 10 2. + 0 + 9.8500004969537258e-004 + -0.3838199973106384 + 0.2439299970865250 + <_> + + <_> + + + + <_>8 1 4 10 -1. + <_>10 1 2 10 2. + 0 + 0.0282339993864298 + -5.7879998348653316e-003 + -1.2617139816284180 + <_> + + <_> + + + + <_>15 15 6 9 -1. + <_>15 18 6 3 3. + 0 + -0.0326780006289482 + -0.5795329809188843 + 0.1695529967546463 + <_> + + <_> + + + + <_>3 15 6 9 -1. + <_>3 18 6 3 3. + 0 + 0.0225360002368689 + 0.0222810003906488 + -0.8786960244178772 + <_> + + <_> + + + + <_>15 1 3 19 -1. + <_>16 1 1 19 3. + 0 + -0.0216579996049404 + -0.6510850191116333 + 0.1296689957380295 + <_> + + <_> + + + + <_>1 3 6 9 -1. + <_>3 3 2 9 3. + 0 + 7.6799998059868813e-003 + -0.3396520018577576 + 0.2201330065727234 + <_> + + <_> + + + + <_>15 0 3 19 -1. + <_>16 0 1 19 3. + 0 + 0.0145920002833009 + 0.1507730036973953 + -0.5045239925384522 + <_> + + <_> + + + + <_>6 3 12 4 -1. + <_>12 3 6 4 2. + 0 + 0.0278680007904768 + -0.2504529953002930 + 0.4574199914932251 + <_> + + <_> + + + + <_>10 5 4 9 -1. + <_>10 5 2 9 2. + 0 + 5.6940000504255295e-003 + -0.1094850003719330 + 0.5575780272483826 + <_> + + <_> + + + + <_>6 0 3 19 -1. + <_>7 0 1 19 3. + 0 + -0.0100029995664954 + -0.9736629724502564 + 0.0184679999947548 + <_> + + <_> + + + + <_>11 1 3 12 -1. + <_>11 7 3 6 2. + 0 + -4.0719998069107533e-003 + 0.3822219967842102 + -0.1692110002040863 + <_> + + <_> + + + + <_>6 7 10 5 -1. + <_>11 7 5 5 2. + 0 + -0.0225939992815256 + -1.0391089916229248 + 5.1839998923242092e-003 + <_> + + <_> + + + + <_>11 3 3 18 -1. + <_>12 3 1 18 3. + 0 + -0.0395799987018108 + -5.5109229087829590 + 0.1116399988532066 + <_> + + <_> + + + + <_>9 3 6 12 -1. + <_>11 3 2 12 3. + 0 + -0.0175379998981953 + 0.9548580050468445 + -0.1858450025320053 + <_> + + <_> + + + + <_>3 7 19 3 -1. + <_>3 8 19 1 3. + 0 + 9.0300003066658974e-003 + 0.0104360003024340 + 0.8211479783058167 + <_> + + <_> + + + + <_>2 7 18 3 -1. + <_>2 8 18 1 3. + 0 + -7.9539995640516281e-003 + 0.2263289988040924 + -0.3456819951534271 + <_> + + <_> + + + + <_>3 13 18 4 -1. + <_>12 13 9 2 2. + <_>3 15 9 2 2. + 0 + 0.0270910002291203 + 0.1643009930849075 + -1.3926379680633545 + <_> + + <_> + + + + <_>3 5 6 9 -1. + <_>5 5 2 9 3. + 0 + -0.0206259991973639 + -0.8636609911918640 + 2.3880000226199627e-003 + <_> + + <_> + + + + <_>4 1 20 4 -1. + <_>14 1 10 2 2. + <_>4 3 10 2 2. + 0 + -0.0719899982213974 + -2.8192629814147949 + 0.1157049983739853 + <_> + + <_> + + + + <_>0 1 20 4 -1. + <_>0 1 10 2 2. + <_>10 3 10 2 2. + 0 + -0.0269649997353554 + -1.2946130037307739 + -0.0246610008180141 + <_> + + <_> + + + + <_>10 15 6 6 -1. + <_>10 15 3 6 2. + 0 + -0.0473779998719692 + -0.8130639791488648 + 0.1183139979839325 + <_> + + <_> + + + + <_>0 2 24 8 -1. + <_>8 2 8 8 3. + 0 + -0.1089560016989708 + 0.6593790054321289 + -0.2084390074014664 + <_> + + <_> + + + + <_>5 5 18 3 -1. + <_>5 6 18 1 3. + 0 + 0.0135740004479885 + 7.4240001849830151e-003 + 0.5315219759941101 + <_> + + <_> + + + + <_>8 15 6 6 -1. + <_>11 15 3 6 2. + 0 + -6.6920001991093159e-003 + 0.3065580129623413 + -0.3108429908752441 + <_> + + <_> + + + + <_>11 12 8 5 -1. + <_>11 12 4 5 2. + 0 + -3.9070001803338528e-003 + 0.2557649910449982 + -0.0529320016503334 + <_> + + <_> + + + + <_>5 12 8 5 -1. + <_>9 12 4 5 2. + 0 + -0.0376130007207394 + -1.4350049495697021 + -0.0154480002820492 + <_> + + <_> + + + + <_>5 0 14 6 -1. + <_>5 2 14 2 3. + 0 + 8.6329998448491096e-003 + -0.1688439995050430 + 0.4212490022182465 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0320970006287098 + -0.6497939825057983 + 0.0411100015044212 + <_> + + <_> + + + + <_>10 7 5 12 -1. + <_>10 11 5 4 3. + 0 + 0.0584959983825684 + -0.0529639981687069 + 0.6336830258369446 + <_> + + <_> + + + + <_>7 9 8 14 -1. + <_>7 9 4 7 2. + <_>11 16 4 7 2. + 0 + -0.0409019999206066 + -0.9210109710693359 + 9.0640000998973846e-003 + <_> + + <_> + + + + <_>1 5 22 6 -1. + <_>12 5 11 3 2. + <_>1 8 11 3 2. + 0 + -0.0199250001460314 + 0.5375999808311462 + -0.0629969984292984 + <_> + + <_> + + + + <_>0 5 6 6 -1. + <_>0 8 6 3 2. + 0 + -4.6020001173019409e-003 + -0.5433350205421448 + 0.0841049998998642 + <_> + + <_> + + + + <_>12 17 9 4 -1. + <_>12 19 9 2 2. + 0 + 0.0168249998241663 + 0.1556369960308075 + -0.4017120003700256 + <_> + + <_> + + + + <_>2 18 19 3 -1. + <_>2 19 19 1 3. + 0 + 9.4790002331137657e-003 + -0.2424529939889908 + 0.5150949954986572 + <_> + + <_> + + + + <_>12 17 9 4 -1. + <_>12 19 9 2 2. + 0 + -0.0195349995046854 + -0.5111839771270752 + 0.1383199989795685 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>1 18 18 1 3. + 0 + 0.0107460003346205 + -0.2185499966144562 + 0.6282870173454285 + <_> + + <_> + + + + <_>12 17 9 4 -1. + <_>12 19 9 2 2. + 0 + 0.0379270017147064 + 0.1164029985666275 + -2.7301959991455078 + <_> + + <_> + + + + <_>0 0 24 3 -1. + <_>0 1 24 1 3. + 0 + 0.0163909997791052 + -0.0146359996870160 + -1.0797250270843506 + <_> + + <_> + + + + <_>5 0 14 4 -1. + <_>5 2 14 2 2. + 0 + -0.0197850000113249 + 1.2166420221328735 + 0.0332750007510185 + <_> + + <_> + + + + <_>6 14 9 6 -1. + <_>6 16 9 2 3. + 0 + 0.0110670002177358 + -0.2538830041885376 + 0.4403859972953796 + <_> + + <_> + + + + <_>14 13 6 9 -1. + <_>14 16 6 3 3. + 0 + 5.2479999139904976e-003 + 0.2249680012464523 + -0.2421649992465973 + <_> + + <_> + + + + <_>5 20 13 4 -1. + <_>5 22 13 2 2. + 0 + -0.0111419996246696 + 0.2501809895038605 + -0.3081150054931641 + <_> + + <_> + + + + <_>9 9 6 12 -1. + <_>9 13 6 4 3. + 0 + -0.0106669999659061 + -0.3272910118103027 + 0.2616829872131348 + <_> + + <_> + + + + <_>1 10 21 3 -1. + <_>8 10 7 3 3. + 0 + 0.1054529994726181 + -0.0557500012218952 + -1.9605729579925537 + <_> + + <_> + + + + <_>8 8 9 6 -1. + <_>11 8 3 6 3. + 0 + 0.0548279993236065 + -1.9519999623298645e-003 + 0.7386609911918640 + <_> + + <_> + + + + <_>3 10 9 7 -1. + <_>6 10 3 7 3. + 0 + 0.0177609995007515 + -0.3064720034599304 + 0.2634699940681458 + <_> + + <_> + + + + <_>12 10 10 8 -1. + <_>17 10 5 4 2. + <_>12 14 5 4 2. + 0 + -0.0311859995126724 + -0.2460090070962906 + 0.1708219945430756 + <_> + + <_> + + + + <_>0 15 24 3 -1. + <_>8 15 8 3 3. + 0 + -0.0572960004210472 + 0.4703350067138672 + -0.2604829967021942 + <_> + + <_> + + + + <_>8 5 9 6 -1. + <_>8 7 9 2 3. + 0 + -0.0113120004534721 + 0.3862890005111694 + -0.2881700098514557 + <_> + + <_> + + + + <_>4 13 6 9 -1. + <_>4 16 6 3 3. + 0 + 0.0305920001119375 + -0.0488260015845299 + -1.7638969421386719 + <_> + + <_> + + + + <_>12 17 9 4 -1. + <_>12 19 9 2 2. + 0 + 1.8489999929443002e-003 + 0.2109989970922470 + -0.0259409993886948 + <_> + + <_> + + + + <_>9 12 6 6 -1. + <_>9 15 6 3 2. + 0 + 0.0114190001040697 + -0.1682959944009781 + 1.0278660058975220 + <_> + + <_> + + + + <_>9 9 14 10 -1. + <_>16 9 7 5 2. + <_>9 14 7 5 2. + 0 + 0.0814030021429062 + 0.1153199970722199 + -1.2482399940490723 + <_> + + <_> + + + + <_>1 9 14 10 -1. + <_>1 9 7 5 2. + <_>8 14 7 5 2. + 0 + 0.0534959994256496 + -0.0463039986789227 + -1.7165969610214233 + <_> + + <_> + + + + <_>8 7 9 17 -1. + <_>11 7 3 17 3. + 0 + -0.0239480007439852 + -0.4024659991264343 + 0.2056210041046143 + <_> + + <_> + + + + <_>3 4 6 20 -1. + <_>3 4 3 10 2. + <_>6 14 3 10 2. + 0 + 6.7690000869333744e-003 + -0.3315230011940002 + 0.2068340033292770 + <_> + + <_> + + + + <_>7 8 10 4 -1. + <_>7 8 5 4 2. + 0 + -0.0323439985513687 + -0.7263280153274536 + 0.2007350027561188 + <_> + + <_> + + + + <_>10 7 4 9 -1. + <_>12 7 2 9 2. + 0 + 0.0378630012273788 + -0.1563100069761276 + 1.6697460412979126 + <_> + + <_> + + + + <_>10 15 6 9 -1. + <_>12 15 2 9 3. + 0 + 0.0154400002211332 + 0.1948740035295487 + -0.3538419902324677 + <_> + + <_> + + + + <_>3 8 6 16 -1. + <_>3 8 3 8 2. + <_>6 16 3 8 2. + 0 + -0.0443760007619858 + 0.8209360241889954 + -0.1819359958171845 + <_> + + <_> + + + + <_>12 17 9 4 -1. + <_>12 19 9 2 2. + 0 + -0.0231020003557205 + -0.4304409921169281 + 0.1237540021538734 + <_> + + <_> + + + + <_>3 17 9 4 -1. + <_>3 19 9 2 2. + 0 + 0.0194000005722046 + -0.0297260005027056 + -1.1597590446472168 + <_> + + <_> + + + + <_>10 1 9 6 -1. + <_>13 1 3 6 3. + 0 + 0.1038570031523705 + 0.1114989966154099 + -4.6835222244262695 + <_> + + <_> + + + + <_>5 7 4 10 -1. + <_>5 12 4 5 2. + 0 + -0.0189640000462532 + 2.1773819923400879 + -0.1454440057277679 + <_> + + <_> + + + + <_>7 5 12 6 -1. + <_>11 5 4 6 3. + 0 + 0.0387509986758232 + -0.0494460016489029 + 0.3401829898357391 + <_> + + <_> + + + + <_>6 4 9 8 -1. + <_>9 4 3 8 3. + 0 + 0.0227669999003410 + -0.3280299901962280 + 0.3053140044212341 + <_> + + <_> + + + + <_>12 16 10 8 -1. + <_>17 16 5 4 2. + <_>12 20 5 4 2. + 0 + -0.0313570015132427 + 1.1520819664001465 + 0.0273059997707605 + <_> + + <_> + + + + <_>2 16 10 8 -1. + <_>2 16 5 4 2. + <_>7 20 5 4 2. + 0 + 9.6909999847412109e-003 + -0.3879950046539307 + 0.2151259928941727 + <_> + + <_> + + + + <_>0 0 24 4 -1. + <_>12 0 12 2 2. + <_>0 2 12 2 2. + 0 + -0.0492849983274937 + -1.6774909496307373 + 0.1577419936656952 + <_> + + <_> + + + + <_>0 6 9 6 -1. + <_>0 8 9 2 3. + 0 + -0.0395109988749027 + -0.9764789938926697 + -0.0105520002543926 + <_> + + <_> + + + + <_>0 4 24 6 -1. + <_>12 4 12 3 2. + <_>0 7 12 3 2. + 0 + 0.0479979999363422 + 0.2084390074014664 + -0.6899279952049255 + <_> + + <_> + + + + <_>5 0 11 4 -1. + <_>5 2 11 2 2. + 0 + 0.0514229983091354 + -0.1666530072689056 + 1.2149239778518677 + <_> + + <_> + + + + <_>1 1 22 4 -1. + <_>12 1 11 2 2. + <_>1 3 11 2 2. + 0 + 0.0142799997702241 + 0.2362769991159439 + -0.4139679968357086 + <_> + + <_> + + + + <_>9 6 6 18 -1. + <_>9 15 6 9 2. + 0 + -0.0916119962930679 + -0.9283090233802795 + -0.0183450002223253 + <_> + + <_> + + + + <_>2 9 20 4 -1. + <_>2 11 20 2 2. + 0 + 6.5080001950263977e-003 + -0.7364720106124878 + 0.1949709951877594 + <_> + + <_> + + + + <_>5 2 14 14 -1. + <_>5 9 14 7 2. + 0 + 0.0357230007648468 + 0.1419779956340790 + -0.4208930134773254 + <_> + + <_> + + + + <_>4 2 16 6 -1. + <_>4 5 16 3 2. + 0 + 0.0506380014121532 + 0.0116440001875162 + 0.7848659753799439 + <_> + + <_> + + + + <_>2 3 19 3 -1. + <_>2 4 19 1 3. + 0 + -0.0146139999851584 + -1.1909500360488892 + -0.0351280011236668 + <_> + + <_> + + + + <_>7 1 10 4 -1. + <_>7 3 10 2 2. + 0 + -0.0386629998683929 + 2.4314730167388916 + 0.0656479969620705 + <_> + + <_> + + + + <_>0 9 4 15 -1. + <_>0 14 4 5 3. + 0 + -0.0403469987213612 + 0.7175530195236206 + -0.1910829991102219 + <_> + + <_> + + + + <_>2 10 21 3 -1. + <_>2 11 21 1 3. + 0 + 0.0239020008593798 + 0.1564619988203049 + -0.7929480075836182 + -3.4265899658203125 + 13 + -1 + <_> + + + <_> + + <_> + + + + <_>3 0 6 6 -1. + <_>6 0 3 6 2. + 0 + 8.5640000179409981e-003 + -0.8145070075988770 + 0.5887529850006104 + <_> + + <_> + + + + <_>6 4 14 9 -1. + <_>6 7 14 3 3. + 0 + -0.1329260021448135 + 0.9321339726448059 + -0.2936730086803436 + <_> + + <_> + + + + <_>9 1 6 9 -1. + <_>11 1 2 9 3. + 0 + 9.8400004208087921e-003 + -0.5646290183067322 + 0.4164769947528839 + <_> + + <_> + + + + <_>15 8 9 9 -1. + <_>15 11 9 3 3. + 0 + 5.0889998674392700e-003 + -0.7923280000686646 + 0.1697500050067902 + <_> + + <_> + + + + <_>8 0 4 21 -1. + <_>8 7 4 7 3. + 0 + -0.0610390007495880 + -1.4169000387191772 + 0.0250209998339415 + <_> + + <_> + + + + <_>3 22 19 2 -1. + <_>3 23 19 1 2. + 0 + -4.6599999768659472e-004 + 0.3798249959945679 + -0.4156709909439087 + <_> + + <_> + + + + <_>2 15 20 3 -1. + <_>2 16 20 1 3. + 0 + 3.3889999613165855e-003 + -0.4076859951019287 + 0.3554849922657013 + <_> + + <_> + + + + <_>19 0 4 13 -1. + <_>19 0 2 13 2. + 0 + 0.0210069995373487 + -0.2408010065555573 + 0.8611270189285278 + <_> + + <_> + + + + <_>1 7 8 8 -1. + <_>1 11 8 4 2. + 0 + 7.5559997931122780e-003 + -0.8746719956398010 + 0.0985720008611679 + <_> + + <_> + + + + <_>14 14 6 9 -1. + <_>14 17 6 3 3. + 0 + 0.0247799996286631 + 0.1556620001792908 + -0.6922979950904846 + <_> + + <_> + + + + <_>4 14 6 9 -1. + <_>4 17 6 3 3. + 0 + -0.0356200002133846 + -1.1472270488739014 + 0.0363599993288517 + <_> + + <_> + + + + <_>14 5 4 10 -1. + <_>14 5 2 10 2. + 0 + 0.0198100004345179 + 0.1551620066165924 + -0.6952009797096252 + <_> + + <_> + + + + <_>6 5 4 10 -1. + <_>8 5 2 10 2. + 0 + 0.0150199998170137 + 0.0419900007545948 + -0.9662280082702637 + <_> + + <_> + + + + <_>14 5 6 6 -1. + <_>14 8 6 3 2. + 0 + -0.0231379996985197 + 0.4339689910411835 + 2.4160000029951334e-003 + <_> + + <_> + + + + <_>4 5 6 6 -1. + <_>4 8 6 3 2. + 0 + -0.0187430009245873 + 0.4348109960556030 + -0.3252249956130981 + <_> + + <_> + + + + <_>0 2 24 21 -1. + <_>8 2 8 21 3. + 0 + 0.4508000016212463 + -0.0945739969611168 + 0.7242130041122437 + <_> + + <_> + + + + <_>1 2 6 13 -1. + <_>3 2 2 13 3. + 0 + 0.0118549996986985 + -0.3813309967517853 + 0.3009839951992035 + <_> + + <_> + + + + <_>20 0 4 21 -1. + <_>20 0 2 21 2. + 0 + -0.0248300004750490 + 0.8930060267448425 + -0.1029589995741844 + <_> + + <_> + + + + <_>0 4 4 20 -1. + <_>2 4 2 20 2. + 0 + -0.0447430014610291 + 0.8628029823303223 + -0.2171649932861328 + <_> + + <_> + + + + <_>8 16 9 6 -1. + <_>8 18 9 2 3. + 0 + -0.0146000003442168 + 0.6006940007209778 + -0.1590629965066910 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0245270002633333 + -1.5872869491577148 + -0.0218170005828142 + <_> + + <_> + + + + <_>16 12 7 9 -1. + <_>16 15 7 3 3. + 0 + 0.0230240002274513 + 0.1685339957475662 + -0.3810690045356751 + <_> + + <_> + + + + <_>5 21 14 3 -1. + <_>12 21 7 3 2. + 0 + -0.0249170009046793 + 0.5081089735031128 + -0.2727989852428436 + <_> + + <_> + + + + <_>11 5 6 9 -1. + <_>11 5 3 9 2. + 0 + 1.0130000300705433e-003 + -0.4313879907131195 + 0.2643809914588928 + <_> + + <_> + + + + <_>10 5 4 10 -1. + <_>12 5 2 10 2. + 0 + 0.0156030002981424 + -0.3162420094013214 + 0.5571590065956116 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + -0.0266859997063875 + 1.0553920269012451 + 0.0290740001946688 + <_> + + <_> + + + + <_>7 5 6 9 -1. + <_>10 5 3 9 2. + 0 + 1.3940000208094716e-003 + -0.7187380194664002 + 0.0653909966349602 + <_> + + <_> + + + + <_>14 14 10 4 -1. + <_>14 16 10 2 2. + 0 + -6.4799998654052615e-004 + 0.2488439977169037 + -0.2097820043563843 + <_> + + <_> + + + + <_>5 5 14 14 -1. + <_>5 5 7 7 2. + <_>12 12 7 7 2. + 0 + -0.0318880006670952 + -0.6884449720382690 + 0.0635899975895882 + <_> + + <_> + + + + <_>12 8 12 6 -1. + <_>18 8 6 3 2. + <_>12 11 6 3 2. + 0 + -4.9290000461041927e-003 + -0.5915250182151794 + 0.2794359922409058 + <_> + + <_> + + + + <_>6 6 12 12 -1. + <_>6 6 6 6 2. + <_>12 12 6 6 2. + 0 + 0.0311680007725954 + 0.0452239997684956 + -0.8863919973373413 + <_> + + <_> + + + + <_>11 13 6 10 -1. + <_>13 13 2 10 3. + 0 + -0.0336630009114742 + -0.6159020066261292 + 0.1574929952621460 + <_> + + <_> + + + + <_>1 10 20 8 -1. + <_>1 10 10 4 2. + <_>11 14 10 4 2. + 0 + 0.0119669996201992 + -0.3060669898986816 + 0.4229330122470856 + <_> + + <_> + + + + <_>15 13 9 6 -1. + <_>15 15 9 2 3. + 0 + -0.0346800014376640 + -1.3734940290451050 + 0.1590870022773743 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>9 3 6 3 3. + 0 + 9.9290004000067711e-003 + -0.5586019754409790 + 0.1211920008063316 + <_> + + <_> + + + + <_>10 1 5 14 -1. + <_>10 8 5 7 2. + 0 + 0.0595749989151955 + 4.9720001406967640e-003 + 0.8205540180206299 + <_> + + <_> + + + + <_>3 4 16 6 -1. + <_>3 6 16 2 3. + 0 + -0.0654280036687851 + 1.5651429891586304 + -0.1681749969720841 + <_> + + <_> + + + + <_>16 3 8 9 -1. + <_>16 6 8 3 3. + 0 + -0.0928959995508194 + -1.5794529914855957 + 0.1466179937124252 + <_> + + <_> + + + + <_>7 13 6 10 -1. + <_>9 13 2 10 3. + 0 + -0.0411840006709099 + -1.5518720149993896 + -0.0299699995666742 + <_> + + <_> + + + + <_>15 13 9 6 -1. + <_>15 15 9 2 3. + 0 + 0.0214479994028807 + 0.1719630062580109 + -0.6934319734573364 + <_> + + <_> + + + + <_>0 13 9 6 -1. + <_>0 15 9 2 3. + 0 + -0.0255699995905161 + -1.3061310052871704 + -0.0243369992822409 + <_> + + <_> + + + + <_>13 16 9 6 -1. + <_>13 18 9 2 3. + 0 + -0.0412009991705418 + -1.3821059465408325 + 0.1480180025100708 + <_> + + <_> + + + + <_>2 16 9 6 -1. + <_>2 18 9 2 3. + 0 + -0.0176689997315407 + -0.7088999748229981 + 0.0365240015089512 + <_> + + <_> + + + + <_>5 16 18 3 -1. + <_>5 17 18 1 3. + 0 + 9.0060001239180565e-003 + -0.0409139990806580 + 0.8037310242652893 + <_> + + <_> + + + + <_>1 16 18 3 -1. + <_>1 17 18 1 3. + 0 + -0.0116529995575547 + 0.5754680037498474 + -0.2499170005321503 + <_> + + <_> + + + + <_>5 0 18 3 -1. + <_>5 1 18 1 3. + 0 + -7.4780001305043697e-003 + -0.4928089976310730 + 0.1981090009212494 + <_> + + <_> + + + + <_>1 1 19 2 -1. + <_>1 2 19 1 2. + 0 + 8.5499999113380909e-004 + -0.4885810017585754 + 0.1356309950351715 + <_> + + <_> + + + + <_>14 2 6 11 -1. + <_>16 2 2 11 3. + 0 + -0.0305380001664162 + -0.6027839779853821 + 0.1852200031280518 + <_> + + <_> + + + + <_>4 15 15 6 -1. + <_>9 15 5 6 3. + 0 + -0.0188469998538494 + 0.2356559932231903 + -0.3513630032539368 + <_> + + <_> + + + + <_>14 2 6 11 -1. + <_>16 2 2 11 3. + 0 + -8.1129996106028557e-003 + -0.0813049972057343 + 0.2106959968805313 + <_> + + <_> + + + + <_>4 2 6 11 -1. + <_>6 2 2 11 3. + 0 + -0.0348300002515316 + -1.2065670490264893 + -0.0142519995570183 + <_> + + <_> + + + + <_>18 2 6 9 -1. + <_>18 5 6 3 3. + 0 + 0.0190210007131100 + 0.2334990054368973 + -0.4566490054130554 + <_> + + <_> + + + + <_>1 2 22 4 -1. + <_>1 2 11 2 2. + <_>12 4 11 2 2. + 0 + -0.0190040003508329 + -0.8107579946517944 + 0.0131400004029274 + <_> + + <_> + + + + <_>2 0 21 12 -1. + <_>9 0 7 12 3. + 0 + -0.0890579968690872 + 0.6154239773750305 + 0.0329830013215542 + <_> + + <_> + + + + <_>0 12 18 3 -1. + <_>0 13 18 1 3. + 0 + 6.8620000965893269e-003 + -0.2958309948444367 + 0.2700369954109192 + <_> + + <_> + + + + <_>12 2 6 9 -1. + <_>14 2 2 9 3. + 0 + -0.0282409992069006 + -0.6110270023345947 + 0.1735749989748001 + <_> + + <_> + + + + <_>3 10 18 3 -1. + <_>3 11 18 1 3. + 0 + -3.2099999953061342e-004 + -0.5332289934158325 + 0.0685390010476112 + <_> + + <_> + + + + <_>16 3 8 9 -1. + <_>16 6 8 3 3. + 0 + -0.1082910001277924 + -1.2879559993743896 + 0.1180170029401779 + <_> + + <_> + + + + <_>3 7 18 3 -1. + <_>3 8 18 1 3. + 0 + 0.0158789996057749 + -0.1707260012626648 + 1.1103910207748413 + <_> + + <_> + + + + <_>9 11 6 9 -1. + <_>11 11 2 9 3. + 0 + 8.6859995499253273e-003 + -0.1099509969353676 + 0.4601050019264221 + <_> + + <_> + + + + <_>9 8 6 9 -1. + <_>11 8 2 9 3. + 0 + -0.0252349991351366 + 1.0220669507980347 + -0.1869429945945740 + <_> + + <_> + + + + <_>15 0 2 18 -1. + <_>15 0 1 18 2. + 0 + -0.0135089997202158 + -0.7831659913063049 + 0.1420260071754456 + <_> + + <_> + + + + <_>7 0 2 18 -1. + <_>8 0 1 18 2. + 0 + -7.7149998396635056e-003 + -0.8806070089340210 + 0.0110600003972650 + <_> + + <_> + + + + <_>17 3 7 9 -1. + <_>17 6 7 3 3. + 0 + 0.0715800002217293 + 0.1136939972639084 + -1.1032789945602417 + <_> + + <_> + + + + <_>3 18 9 6 -1. + <_>3 20 9 2 3. + 0 + -0.0135540002956986 + -0.8109650015830994 + 3.4080001059919596e-003 + <_> + + <_> + + + + <_>3 18 21 3 -1. + <_>3 19 21 1 3. + 0 + 2.9450000729411840e-003 + -0.0728799998760223 + 0.3499810099601746 + <_> + + <_> + + + + <_>0 3 7 9 -1. + <_>0 6 7 3 3. + 0 + -0.0508330017328262 + -1.2868590354919434 + -0.0288420002907515 + <_> + + <_> + + + + <_>2 7 22 3 -1. + <_>2 8 22 1 3. + 0 + -8.7989997118711472e-003 + 0.4761359989643097 + -0.1469040066003799 + <_> + + <_> + + + + <_>0 3 24 16 -1. + <_>0 3 12 8 2. + <_>12 11 12 8 2. + 0 + 0.2142439931631088 + -0.0597020015120506 + -2.4802260398864746 + <_> + + <_> + + + + <_>13 17 9 4 -1. + <_>13 19 9 2 2. + 0 + 0.0139629999175668 + 0.1742029935121536 + -0.4391100108623505 + <_> + + <_> + + + + <_>5 5 12 8 -1. + <_>5 5 6 4 2. + <_>11 9 6 4 2. + 0 + 0.0425020009279251 + -0.1996529996395111 + 0.7065479755401611 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>12 6 7 3 2. + <_>5 9 7 3 2. + 0 + 0.0198279991745949 + -0.0691360011696815 + 0.6164339780807495 + <_> + + <_> + + + + <_>5 16 14 6 -1. + <_>5 16 7 3 2. + <_>12 19 7 3 2. + 0 + -0.0335600003600121 + -1.2740780115127563 + -0.0256730001419783 + <_> + + <_> + + + + <_>18 2 6 9 -1. + <_>18 5 6 3 3. + 0 + 0.0635429993271828 + 0.1240350008010864 + -1.0776289701461792 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 0.0219330005347729 + 0.0149520002305508 + -0.7102349996566773 + <_> + + <_> + + + + <_>3 4 20 10 -1. + <_>13 4 10 5 2. + <_>3 9 10 5 2. + 0 + -0.0784249976277351 + 0.6203399896621704 + 0.0336109995841980 + <_> + + <_> + + + + <_>2 13 9 8 -1. + <_>5 13 3 8 3. + 0 + 0.0143900001421571 + -0.3632459938526154 + 0.1730830073356628 + <_> + + <_> + + + + <_>2 1 21 15 -1. + <_>9 1 7 15 3. + 0 + -0.0673099979758263 + 0.5237410068511963 + 0.0127999996766448 + <_> + + <_> + + + + <_>5 12 14 8 -1. + <_>12 12 7 8 2. + 0 + 0.1304749995470047 + -0.1712249964475632 + 1.1235200166702271 + <_> + + <_> + + + + <_>6 7 12 4 -1. + <_>6 7 6 4 2. + 0 + -0.0462459996342659 + -1.1908329725265503 + 0.1742559969425201 + <_> + + <_> + + + + <_>6 5 9 6 -1. + <_>9 5 3 6 3. + 0 + -0.0298420004546642 + 0.8393059968948364 + -0.1806419938802719 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + -3.8099999073892832e-004 + 0.3553279936313629 + -0.2384230047464371 + <_> + + <_> + + + + <_>5 11 6 6 -1. + <_>8 11 3 6 2. + 0 + -0.0223789997398853 + -0.8794389963150024 + -7.8399997437372804e-004 + <_> + + <_> + + + + <_>6 4 18 2 -1. + <_>6 5 18 1 2. + 0 + -1.5569999814033508e-003 + -0.1425330042839050 + 0.2587620019912720 + <_> + + <_> + + + + <_>0 2 6 11 -1. + <_>2 2 2 11 3. + 0 + 0.0120130004361272 + -0.2901549935340881 + 0.2605110108852387 + <_> + + <_> + + + + <_>18 0 6 15 -1. + <_>20 0 2 15 3. + 0 + 0.0243849996477365 + -0.0314389988780022 + 0.5869590044021606 + <_> + + <_> + + + + <_>0 0 6 13 -1. + <_>2 0 2 13 3. + 0 + -0.0471809990704060 + 0.6943010091781616 + -0.2181610018014908 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + -0.0248939990997314 + -0.6459929943084717 + 0.1561159938573837 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + 0.0219449996948242 + -0.0277420002967119 + -1.1346880197525024 + <_> + + <_> + + + + <_>0 2 24 4 -1. + <_>8 2 8 4 3. + 0 + 0.1880989968776703 + -0.0100760003551841 + 1.2429029941558838 + <_> + + <_> + + + + <_>3 13 18 4 -1. + <_>12 13 9 4 2. + 0 + -0.0778720006346703 + 0.8500800132751465 + -0.1901549994945526 + <_> + + <_> + + + + <_>9 7 10 4 -1. + <_>9 7 5 4 2. + 0 + -0.0487690009176731 + -2.0763080120086670 + 0.1217940002679825 + <_> + + <_> + + + + <_>5 8 12 3 -1. + <_>11 8 6 3 2. + 0 + -0.0171150006353855 + -0.8568729758262634 + 7.8760003671050072e-003 + <_> + + <_> + + + + <_>4 14 19 3 -1. + <_>4 15 19 1 3. + 0 + -2.7499999850988388e-003 + 0.3864549994468689 + -0.1139149963855743 + <_> + + <_> + + + + <_>10 0 4 20 -1. + <_>10 10 4 10 2. + 0 + -0.0987939983606339 + -1.7233899831771851 + -0.0560630001127720 + <_> + + <_> + + + + <_>8 15 9 6 -1. + <_>8 17 9 2 3. + 0 + -0.0219369996339083 + 0.5474939942359924 + -0.0424819998443127 + <_> + + <_> + + + + <_>2 9 15 4 -1. + <_>7 9 5 4 3. + 0 + 0.0610969997942448 + -0.0389450006186962 + -1.0807880163192749 + <_> + + <_> + + + + <_>8 4 12 7 -1. + <_>12 4 4 7 3. + 0 + -0.0245639998465776 + 0.5831109881401062 + -9.7599998116493225e-004 + <_> + + <_> + + + + <_>0 10 6 9 -1. + <_>0 13 6 3 3. + 0 + 0.0337520018219948 + -0.0137959998100996 + -0.8473029732704163 + <_> + + <_> + + + + <_>18 5 6 9 -1. + <_>18 8 6 3 3. + 0 + 0.0381990000605583 + 0.1511429995298386 + -0.7947340011596680 + <_> + + <_> + + + + <_>0 18 16 6 -1. + <_>0 18 8 3 2. + <_>8 21 8 3 2. + 0 + -0.0201179999858141 + 0.5157909989356995 + -0.2144539952278137 + <_> + + <_> + + + + <_>9 18 14 6 -1. + <_>16 18 7 3 2. + <_>9 21 7 3 2. + 0 + 0.0247349999845028 + -0.0221050009131432 + 0.4291769862174988 + <_> + + <_> + + + + <_>1 20 20 4 -1. + <_>1 20 10 2 2. + <_>11 22 10 2 2. + 0 + -0.0243570003658533 + -0.8620129823684692 + -3.6760000512003899e-003 + <_> + + <_> + + + + <_>2 8 20 6 -1. + <_>12 8 10 3 2. + <_>2 11 10 3 2. + 0 + -0.0264420006424189 + -0.4539749920368195 + 0.2246280014514923 + <_> + + <_> + + + + <_>7 8 6 9 -1. + <_>9 8 2 9 3. + 0 + -3.4429999068379402e-003 + 0.1307300031185150 + -0.3862270116806030 + <_> + + <_> + + + + <_>8 5 12 8 -1. + <_>12 5 4 8 3. + 0 + 0.1070170029997826 + 0.1315860003232956 + -0.7930690050125122 + <_> + + <_> + + + + <_>4 5 12 8 -1. + <_>8 5 4 8 3. + 0 + 0.0451529994606972 + -0.2529680132865906 + 0.4067240059375763 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + 0.0443499982357025 + 0.0226130001246929 + 0.7961810231208801 + <_> + + <_> + + + + <_>2 0 6 16 -1. + <_>4 0 2 16 3. + 0 + 1.0839999886229634e-003 + -0.3915840089321137 + 0.1163910031318665 + <_> + + <_> + + + + <_>15 4 6 12 -1. + <_>15 8 6 4 3. + 0 + 0.0714330002665520 + 0.0824669972062111 + 1.2530590295791626 + <_> + + <_> + + + + <_>3 4 6 12 -1. + <_>3 8 6 4 3. + 0 + 0.0358380004763603 + -0.1820330023765564 + 0.7707870006561279 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + -0.0208390001207590 + -0.6174439787864685 + 0.1589139997959137 + <_> + + <_> + + + + <_>4 0 15 22 -1. + <_>4 11 15 11 2. + 0 + 0.4252580106258392 + -0.0489780008792877 + -1.8422030210494995 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + 0.0114080002531409 + 0.1791819930076599 + -0.1538349986076355 + <_> + + <_> + + + + <_>0 12 9 6 -1. + <_>0 14 9 2 3. + 0 + -0.0153649998828769 + -0.8401650190353394 + -1.0280000278726220e-003 + <_> + + <_> + + + + <_>15 15 9 6 -1. + <_>15 17 9 2 3. + 0 + -0.0152120003476739 + -0.1899569928646088 + 0.1713099926710129 + <_> + + <_> + + + + <_>0 15 9 6 -1. + <_>0 17 9 2 3. + 0 + -0.0189720001071692 + -0.7954199910163879 + 6.6800001077353954e-003 + <_> + + <_> + + + + <_>10 0 8 10 -1. + <_>14 0 4 5 2. + <_>10 5 4 5 2. + 0 + -3.3330000005662441e-003 + -0.2353080064058304 + 0.2473009973764420 + <_> + + <_> + + + + <_>1 0 4 16 -1. + <_>3 0 2 16 2. + 0 + 0.0932480022311211 + -0.0547580011188984 + -1.8324300050735474 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>7 8 10 2 3. + 0 + -0.0125550003722310 + 0.2638520002365112 + -0.3852640092372894 + <_> + + <_> + + + + <_>10 12 4 10 -1. + <_>10 17 4 5 2. + 0 + -0.0270700007677078 + -0.6692979931831360 + 0.0203409995883703 + <_> + + <_> + + + + <_>8 4 10 6 -1. + <_>8 6 10 2 3. + 0 + -0.0236770007759333 + 0.6726530194282532 + -0.0143440002575517 + <_> + + <_> + + + + <_>3 22 18 2 -1. + <_>12 22 9 2 2. + 0 + -0.0142750004306436 + 0.3018639981746674 + -0.2851440012454987 + <_> + + <_> + + + + <_>7 7 11 6 -1. + <_>7 9 11 2 3. + 0 + 0.0280969999730587 + 0.1476600021123886 + -1.4078520536422729 + <_> + + <_> + + + + <_>0 0 12 10 -1. + <_>0 0 6 5 2. + <_>6 5 6 5 2. + 0 + 0.0508400015532970 + -0.1861360073089600 + 0.7995300292968750 + <_> + + <_> + + + + <_>10 1 12 6 -1. + <_>16 1 6 3 2. + <_>10 4 6 3 2. + 0 + 0.0115059996023774 + 0.1911839991807938 + -0.0850350037217140 + <_> + + <_> + + + + <_>7 16 9 4 -1. + <_>7 18 9 2 2. + 0 + -0.0146610001102090 + 0.4523929953575134 + -0.2220519930124283 + <_> + + <_> + + + + <_>5 7 15 16 -1. + <_>10 7 5 16 3. + 0 + 0.2284249961376190 + 0.1348839998245239 + -1.2894610166549683 + <_> + + <_> + + + + <_>5 10 12 13 -1. + <_>11 10 6 13 2. + 0 + 0.1110690012574196 + -0.2075379937887192 + 0.5456159710884094 + <_> + + <_> + + + + <_>6 2 12 6 -1. + <_>12 2 6 3 2. + <_>6 5 6 3 2. + 0 + 3.2450000289827585e-003 + 0.3205370008945465 + -0.1640350073575974 + <_> + + <_> + + + + <_>3 9 12 9 -1. + <_>3 12 12 3 3. + 0 + 0.0853099972009659 + -0.2021050006151199 + 0.5329679846763611 + <_> + + <_> + + + + <_>16 2 8 6 -1. + <_>16 5 8 3 2. + 0 + 0.0220480002462864 + 0.1569859981536865 + -0.1701409965753555 + <_> + + <_> + + + + <_>0 2 8 6 -1. + <_>0 5 8 3 2. + 0 + -0.0156769994646311 + -0.6286349892616272 + 0.0407619997859001 + <_> + + <_> + + + + <_>0 3 24 11 -1. + <_>0 3 12 11 2. + 0 + 0.3311290144920349 + 0.1660930067300797 + -1.0326379537582397 + <_> + + <_> + + + + <_>0 13 8 10 -1. + <_>0 13 4 5 2. + <_>4 18 4 5 2. + 0 + 8.8470000773668289e-003 + -0.2507619857788086 + 0.3166059851646423 + <_> + + <_> + + + + <_>10 14 4 10 -1. + <_>10 19 4 5 2. + 0 + 0.0460800006985664 + 0.1535210013389587 + -1.6333500146865845 + <_> + + <_> + + + + <_>10 2 4 21 -1. + <_>10 9 4 7 3. + 0 + -0.0377030000090599 + 0.5687379837036133 + -0.2010259926319122 + -3.5125269889831543 + 14 + -1 + <_> + + + <_> + + <_> + + + + <_>4 4 15 9 -1. + <_>4 7 15 3 3. + 0 + -0.0818089991807938 + 0.5712479948997498 + -0.6743879914283752 + <_> + + <_> + + + + <_>0 1 24 6 -1. + <_>8 1 8 6 3. + 0 + 0.2176119983196259 + -0.3861019909381867 + 0.9034399986267090 + <_> + + <_> + + + + <_>9 6 5 16 -1. + <_>9 14 5 8 2. + 0 + 0.0148780001327395 + 0.2224159985780716 + -1.2779350280761719 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>9 21 6 3 3. + 0 + 0.0524349994957447 + -0.2869040071964264 + 0.7574229836463928 + <_> + + <_> + + + + <_>6 5 3 12 -1. + <_>6 11 3 6 2. + 0 + 9.1429995372891426e-003 + -0.6488040089607239 + 0.2226880043745041 + <_> + + <_> + + + + <_>11 6 4 9 -1. + <_>11 6 2 9 2. + 0 + 7.9169999808073044e-003 + -0.2925359904766083 + 0.3103019893169403 + <_> + + <_> + + + + <_>5 6 9 8 -1. + <_>8 6 3 8 3. + 0 + -0.0260840002447367 + 0.4553270041942596 + -0.3850060105323792 + <_> + + <_> + + + + <_>4 3 20 2 -1. + <_>4 4 20 1 2. + 0 + -2.9400000348687172e-003 + -0.5126439929008484 + 0.2743229866027832 + <_> + + <_> + + + + <_>2 10 18 3 -1. + <_>8 10 6 3 3. + 0 + 0.0571300014853477 + 0.0157880000770092 + -1.2133100032806396 + <_> + + <_> + + + + <_>7 15 10 6 -1. + <_>7 17 10 2 3. + 0 + -6.1309998854994774e-003 + 0.3917460143566132 + -0.3086679875850678 + <_> + + <_> + + + + <_>1 4 4 18 -1. + <_>1 4 2 9 2. + <_>3 13 2 9 2. + 0 + -0.0404050014913082 + 1.1901949644088745 + -0.2034710049629211 + <_> + + <_> + + + + <_>13 0 6 9 -1. + <_>15 0 2 9 3. + 0 + -0.0202970001846552 + -0.6823949813842773 + 0.2045869976282120 + <_> + + <_> + + + + <_>5 0 6 9 -1. + <_>7 0 2 9 3. + 0 + -0.0171889998018742 + -0.8493989706039429 + 0.0384330004453659 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + -0.0242159999907017 + -1.1039420366287231 + 0.1597509980201721 + <_> + + <_> + + + + <_>6 7 9 6 -1. + <_>9 7 3 6 3. + 0 + 0.0568690001964569 + -0.1959529966115952 + 1.1806850433349609 + <_> + + <_> + + + + <_>3 0 18 2 -1. + <_>3 1 18 1 2. + 0 + 3.6199999158270657e-004 + -0.4084779918193817 + 0.3293859958648682 + <_> + + <_> + + + + <_>0 10 20 4 -1. + <_>0 10 10 2 2. + <_>10 12 10 2 2. + 0 + 9.9790003150701523e-003 + -0.2967300117015839 + 0.4154790043830872 + <_> + + <_> + + + + <_>10 2 4 12 -1. + <_>10 8 4 6 2. + 0 + -0.0526250004768372 + -1.3069299459457397 + 0.1786260008811951 + <_> + + <_> + + + + <_>6 5 6 12 -1. + <_>6 5 3 6 2. + <_>9 11 3 6 2. + 0 + -0.0137489996850491 + 0.2366580069065094 + -0.4453659951686859 + <_> + + <_> + + + + <_>6 0 18 22 -1. + <_>15 0 9 11 2. + <_>6 11 9 11 2. + 0 + -0.0305170007050037 + 0.2901830077171326 + -0.1121010035276413 + <_> + + <_> + + + + <_>0 0 18 22 -1. + <_>0 0 9 11 2. + <_>9 11 9 11 2. + 0 + -0.3003750145435333 + -2.4237680435180664 + -0.0428309999406338 + <_> + + <_> + + + + <_>18 2 6 11 -1. + <_>20 2 2 11 3. + 0 + -0.0359909981489182 + 0.8820649981498718 + -0.0470129996538162 + <_> + + <_> + + + + <_>0 2 6 11 -1. + <_>2 2 2 11 3. + 0 + -0.0551120005548000 + 0.8011900186538696 + -0.2049099951982498 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + 0.0337620005011559 + 0.1461759954690933 + -1.1349489688873291 + <_> + + <_> + + + + <_>0 0 20 3 -1. + <_>0 1 20 1 3. + 0 + -8.2710003480315208e-003 + -0.8160489797592163 + 0.0189880002290010 + <_> + + <_> + + + + <_>2 2 20 2 -1. + <_>2 3 20 1 2. + 0 + -5.4399999789893627e-003 + -0.7098090052604675 + 0.2234369963407517 + <_> + + <_> + + + + <_>1 10 18 2 -1. + <_>1 11 18 1 2. + 0 + 3.1059999018907547e-003 + -0.7280859947204590 + 0.0402249991893768 + <_> + + <_> + + + + <_>18 7 6 9 -1. + <_>18 10 6 3 3. + 0 + 0.0536519996821880 + 0.1717090010643005 + -1.1163710355758667 + <_> + + <_> + + + + <_>0 0 22 9 -1. + <_>0 3 22 3 3. + 0 + -0.1254139989614487 + 2.7680370807647705 + -0.1461150050163269 + <_> + + <_> + + + + <_>17 3 6 9 -1. + <_>17 6 6 3 3. + 0 + 0.0925420001149178 + 0.1160980015993118 + -3.9635529518127441 + <_> + + <_> + + + + <_>0 7 6 9 -1. + <_>0 10 6 3 3. + 0 + 0.0385139994323254 + -7.6399999670684338e-003 + -0.9878090023994446 + <_> + + <_> + + + + <_>0 6 24 6 -1. + <_>0 8 24 2 3. + 0 + -2.0200000144541264e-003 + 0.2305999994277954 + -0.7497029900550842 + <_> + + <_> + + + + <_>0 2 6 10 -1. + <_>2 2 2 10 3. + 0 + 9.7599998116493225e-003 + -0.3113799989223480 + 0.3028779923915863 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + 0.0240950006991625 + -0.0495299994945526 + 0.5269010066986084 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0179820004850626 + -1.1610640287399292 + -5.7000000961124897e-003 + <_> + + <_> + + + + <_>15 0 6 9 -1. + <_>17 0 2 9 3. + 0 + -0.0105550000444055 + -0.2718909978866577 + 0.2359769940376282 + <_> + + <_> + + + + <_>3 0 6 9 -1. + <_>5 0 2 9 3. + 0 + -7.2889998555183411e-003 + -0.5421910285949707 + 0.0819140002131462 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + 0.0239390004426241 + 0.1797579973936081 + -0.6704949736595154 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + -0.0183659996837378 + 0.6266430020332336 + -0.2097010016441345 + <_> + + <_> + + + + <_>15 14 9 6 -1. + <_>15 16 9 2 3. + 0 + 0.0157159995287657 + 0.2419369965791702 + -1.0444309711456299 + <_> + + <_> + + + + <_>0 15 23 6 -1. + <_>0 17 23 2 3. + 0 + -0.0488040000200272 + -0.9406059980392456 + -3.7519999314099550e-003 + <_> + + <_> + + + + <_>5 15 18 3 -1. + <_>5 16 18 1 3. + 0 + 6.7130001261830330e-003 + -0.0754320025444031 + 0.6157529950141907 + <_> + + <_> + + + + <_>0 14 9 6 -1. + <_>0 16 9 2 3. + 0 + 9.7770001739263535e-003 + 0.0392850004136562 + -0.8481029868125916 + <_> + + <_> + + + + <_>9 8 8 10 -1. + <_>13 8 4 5 2. + <_>9 13 4 5 2. + 0 + 0.0147449998185039 + 0.1696899980306625 + -0.5090640187263489 + <_> + + <_> + + + + <_>3 7 15 6 -1. + <_>8 7 5 6 3. + 0 + 0.0970790013670921 + -0.0331030003726482 + -1.2706379890441895 + <_> + + <_> + + + + <_>9 8 8 10 -1. + <_>13 8 4 5 2. + <_>9 13 4 5 2. + 0 + 0.0482859984040260 + 0.0943299978971481 + 2.7203190326690674 + <_> + + <_> + + + + <_>5 0 6 12 -1. + <_>8 0 3 12 2. + 0 + 9.7810002043843269e-003 + -0.3953340053558350 + 0.1536380052566528 + <_> + + <_> + + + + <_>9 8 8 10 -1. + <_>13 8 4 5 2. + <_>9 13 4 5 2. + 0 + -0.0398939996957779 + -0.2276740074157715 + 0.1391399949789047 + <_> + + <_> + + + + <_>8 5 6 9 -1. + <_>10 5 2 9 3. + 0 + 0.0228480007499456 + -0.2739199995994568 + 0.3419950008392334 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>12 6 2 9 2. + <_>10 15 2 9 2. + 0 + 6.7179999314248562e-003 + -0.1087429970502853 + 0.4812540113925934 + <_> + + <_> + + + + <_>5 7 12 4 -1. + <_>11 7 6 4 2. + 0 + 0.0595999993383884 + -0.0495220012962818 + -2.0117089748382568 + <_> + + <_> + + + + <_>9 8 8 10 -1. + <_>13 8 4 5 2. + <_>9 13 4 5 2. + 0 + 6.9340001791715622e-003 + 0.1503749936819077 + -0.1127189993858337 + <_> + + <_> + + + + <_>7 8 8 10 -1. + <_>7 8 4 5 2. + <_>11 13 4 5 2. + 0 + 0.0157570000737906 + -0.0208850000053644 + -1.1651979684829712 + <_> + + <_> + + + + <_>11 10 6 14 -1. + <_>14 10 3 7 2. + <_>11 17 3 7 2. + 0 + -0.0496900007128716 + -0.8021349906921387 + 0.1437229961156845 + <_> + + <_> + + + + <_>9 5 6 19 -1. + <_>12 5 3 19 2. + 0 + 0.0523470006883144 + -0.2083670049905777 + 0.6167759895324707 + <_> + + <_> + + + + <_>6 12 12 6 -1. + <_>12 12 6 3 2. + <_>6 15 6 3 2. + 0 + 0.0224309992045164 + 0.2030590027570725 + -0.7532619833946228 + <_> + + <_> + + + + <_>1 9 18 6 -1. + <_>1 9 9 3 2. + <_>10 12 9 3 2. + 0 + 0.0411420017480850 + -0.1811819970607758 + 1.0033359527587891 + <_> + + <_> + + + + <_>16 14 8 10 -1. + <_>20 14 4 5 2. + <_>16 19 4 5 2. + 0 + -0.0216320008039474 + 0.4999899864196777 + -0.0346629992127419 + <_> + + <_> + + + + <_>0 9 22 8 -1. + <_>0 9 11 4 2. + <_>11 13 11 4 2. + 0 + -0.0828080028295517 + 1.1711900234222412 + -0.1843360066413879 + <_> + + <_> + + + + <_>8 18 12 6 -1. + <_>14 18 6 3 2. + <_>8 21 6 3 2. + 0 + 8.5060000419616699e-003 + -0.0632250010967255 + 0.2902489900588989 + <_> + + <_> + + + + <_>0 6 20 18 -1. + <_>0 6 10 9 2. + <_>10 15 10 9 2. + 0 + 0.0789050012826920 + -0.2327450066804886 + 0.5969579815864563 + <_> + + <_> + + + + <_>3 6 20 12 -1. + <_>13 6 10 6 2. + <_>3 12 10 6 2. + 0 + -0.0902070030570030 + -0.8221189975738525 + 0.1777220070362091 + <_> + + <_> + + + + <_>0 16 10 8 -1. + <_>0 16 5 4 2. + <_>5 20 5 4 2. + 0 + -0.0292690005153418 + 0.6086069941520691 + -0.2146890014410019 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + 6.9499998353421688e-003 + -0.0426659993827343 + 0.6051210165023804 + <_> + + <_> + + + + <_>0 11 19 3 -1. + <_>0 12 19 1 3. + 0 + -8.0629996955394745e-003 + -1.1508270502090454 + -0.0272860005497932 + <_> + + <_> + + + + <_>14 6 6 9 -1. + <_>14 9 6 3 3. + 0 + 0.0195959992706776 + -9.1880001127719879e-003 + 0.5685780048370361 + <_> + + <_> + + + + <_>1 7 22 4 -1. + <_>1 7 11 2 2. + <_>12 9 11 2 2. + 0 + -0.0148849999532104 + 0.3765879869461060 + -0.2714950144290924 + <_> + + <_> + + + + <_>13 6 7 12 -1. + <_>13 10 7 4 3. + 0 + 0.0252170003950596 + -0.0999910011887550 + 0.2466470003128052 + <_> + + <_> + + + + <_>4 7 11 9 -1. + <_>4 10 11 3 3. + 0 + -0.0158559996634722 + 0.6682670116424561 + -0.2061470001935959 + <_> + + <_> + + + + <_>12 10 10 8 -1. + <_>17 10 5 4 2. + <_>12 14 5 4 2. + 0 + 0.0294410008937120 + 0.1583220064640045 + -0.7606089711189270 + <_> + + <_> + + + + <_>2 12 9 7 -1. + <_>5 12 3 7 3. + 0 + -8.5279997438192368e-003 + 0.3821229934692383 + -0.2540780007839203 + <_> + + <_> + + + + <_>16 14 6 9 -1. + <_>16 17 6 3 3. + 0 + 0.0244219992309809 + 0.1510509997606278 + -0.2875289916992188 + <_> + + <_> + + + + <_>3 12 6 12 -1. + <_>3 16 6 4 3. + 0 + -0.0338869988918304 + -0.6800280213356018 + 0.0343270003795624 + <_> + + <_> + + + + <_>14 13 6 6 -1. + <_>14 16 6 3 2. + 0 + -2.0810000132769346e-003 + 0.2541390061378479 + -0.2685909867286682 + <_> + + <_> + + + + <_>8 0 6 9 -1. + <_>10 0 2 9 3. + 0 + 0.0303589999675751 + -0.0308420006185770 + -1.1476809978485107 + <_> + + <_> + + + + <_>9 1 6 23 -1. + <_>11 1 2 23 3. + 0 + 4.0210001170635223e-003 + -0.3525379896163940 + 0.2986809909343720 + <_> + + <_> + + + + <_>0 16 9 6 -1. + <_>0 18 9 2 3. + 0 + 0.0276810005307198 + -0.0381489992141724 + -1.3262039422988892 + <_> + + <_> + + + + <_>4 17 18 3 -1. + <_>4 18 18 1 3. + 0 + 7.9039996489882469e-003 + -0.0237370003014803 + 0.7050300240516663 + <_> + + <_> + + + + <_>5 2 13 14 -1. + <_>5 9 13 7 2. + 0 + 0.0440310016274452 + 0.1067489981651306 + -0.4526120126247406 + <_> + + <_> + + + + <_>15 0 8 12 -1. + <_>19 0 4 6 2. + <_>15 6 4 6 2. + 0 + -0.0323709994554520 + 0.4667490124702454 + -0.0615469999611378 + <_> + + <_> + + + + <_>0 0 8 12 -1. + <_>0 0 4 6 2. + <_>4 6 4 6 2. + 0 + 0.0209330003708601 + -0.2844789922237396 + 0.4384559988975525 + <_> + + <_> + + + + <_>8 2 8 7 -1. + <_>8 2 4 7 2. + 0 + 0.0252279993146658 + -0.0225370004773140 + 0.7038909792900085 + <_> + + <_> + + + + <_>1 1 6 9 -1. + <_>3 1 2 9 3. + 0 + 6.5520000644028187e-003 + -0.3255490064620972 + 0.2402369976043701 + <_> + + <_> + + + + <_>14 8 6 12 -1. + <_>17 8 3 6 2. + <_>14 14 3 6 2. + 0 + -0.0585579983890057 + -1.2227720022201538 + 0.1166879981756210 + <_> + + <_> + + + + <_>4 8 6 12 -1. + <_>4 8 3 6 2. + <_>7 14 3 6 2. + 0 + 0.0318999998271465 + -0.0193050000816584 + -1.0973169803619385 + <_> + + <_> + + + + <_>16 5 5 15 -1. + <_>16 10 5 5 3. + 0 + -0.0304450001567602 + 0.6558250188827515 + 0.0750909969210625 + <_> + + <_> + + + + <_>3 5 5 15 -1. + <_>3 10 5 5 3. + 0 + 0.0149330003187060 + -0.5215579867362976 + 0.1152309998869896 + <_> + + <_> + + + + <_>18 4 6 9 -1. + <_>18 7 6 3 3. + 0 + -0.0490080006420612 + -0.7830399870872498 + 0.1665720045566559 + <_> + + <_> + + + + <_>1 7 6 15 -1. + <_>1 12 6 5 3. + 0 + 0.0831589996814728 + -2.6879999786615372e-003 + -0.8528230190277100 + <_> + + <_> + + + + <_>11 15 12 8 -1. + <_>17 15 6 4 2. + <_>11 19 6 4 2. + 0 + 0.0239029992371798 + -0.0510109998285770 + 0.4199909865856171 + <_> + + <_> + + + + <_>0 2 24 4 -1. + <_>0 2 12 2 2. + <_>12 4 12 2 2. + 0 + 0.0164289996027946 + 0.0192329995334148 + -0.6504909992218018 + <_> + + <_> + + + + <_>15 1 2 19 -1. + <_>15 1 1 19 2. + 0 + -0.0118380002677441 + -0.6240980029106140 + 0.1541119962930679 + <_> + + <_> + + + + <_>7 1 2 19 -1. + <_>8 1 1 19 2. + 0 + -1.6799999866634607e-004 + 0.1758919954299927 + -0.3433870077133179 + <_> + + <_> + + + + <_>22 1 2 20 -1. + <_>22 1 1 20 2. + 0 + 0.0191939994692802 + 0.0434189997613430 + 0.7906919717788696 + <_> + + <_> + + + + <_>0 1 2 20 -1. + <_>1 1 1 20 2. + 0 + -0.0100320000201464 + 0.4564889967441559 + -0.2249480038881302 + <_> + + <_> + + + + <_>18 11 6 12 -1. + <_>20 11 2 12 3. + 0 + -0.0140040004625916 + 0.3357099890708923 + -4.8799999058246613e-003 + <_> + + <_> + + + + <_>0 11 6 12 -1. + <_>2 11 2 12 3. + 0 + -0.1031989976763725 + -2.3378000259399414 + -0.0589330010116100 + <_> + + <_> + + + + <_>3 6 18 14 -1. + <_>3 13 18 7 2. + 0 + -0.0956970006227493 + -0.6615390181541443 + 0.2009859979152679 + <_> + + <_> + + + + <_>6 10 7 8 -1. + <_>6 14 7 4 2. + 0 + -0.0414809994399548 + 0.4593920111656189 + -0.2231409996747971 + <_> + + <_> + + + + <_>7 9 12 12 -1. + <_>7 13 12 4 3. + 0 + 2.4099999573081732e-003 + -0.2689859867095947 + 0.2492299973964691 + <_> + + <_> + + + + <_>2 18 18 5 -1. + <_>11 18 9 5 2. + 0 + 0.1072499975562096 + -0.1864019930362701 + 0.7276980280876160 + <_> + + <_> + + + + <_>4 21 20 3 -1. + <_>4 22 20 1 3. + 0 + 3.1870000530034304e-003 + -0.0246089994907379 + 0.2864390015602112 + <_> + + <_> + + + + <_>9 12 6 12 -1. + <_>9 12 3 6 2. + <_>12 18 3 6 2. + 0 + 0.0291670002043247 + -0.0346830002963543 + -1.1162580251693726 + <_> + + <_> + + + + <_>4 6 18 3 -1. + <_>4 7 18 1 3. + 0 + 0.0112870000302792 + 6.3760001212358475e-003 + 0.6663209795951843 + <_> + + <_> + + + + <_>3 6 18 3 -1. + <_>3 7 18 1 3. + 0 + -0.0120010003447533 + 0.4242010116577148 + -0.2627980113029480 + <_> + + <_> + + + + <_>18 4 6 9 -1. + <_>18 7 6 3 3. + 0 + -0.0126959998160601 + -0.0219570007175207 + 0.1893679946660996 + <_> + + <_> + + + + <_>2 12 9 6 -1. + <_>2 14 9 2 3. + 0 + 0.0245970003306866 + -0.0349639989435673 + -1.0989320278167725 + <_> + + <_> + + + + <_>4 14 18 4 -1. + <_>13 14 9 2 2. + <_>4 16 9 2 2. + 0 + 0.0459530018270016 + 0.1110979989171028 + -2.9306049346923828 + <_> + + <_> + + + + <_>7 7 6 14 -1. + <_>7 7 3 7 2. + <_>10 14 3 7 2. + 0 + -0.0272410009056330 + 0.2910169959068298 + -0.2740789949893951 + <_> + + <_> + + + + <_>7 13 12 6 -1. + <_>13 13 6 3 2. + <_>7 16 6 3 2. + 0 + 0.0400639995932579 + 0.1187790036201477 + -0.6280180215835571 + <_> + + <_> + + + + <_>6 7 12 9 -1. + <_>10 7 4 9 3. + 0 + 0.0230550002306700 + 0.1481380015611649 + -0.3700749874114990 + <_> + + <_> + + + + <_>12 12 6 6 -1. + <_>12 12 3 6 2. + 0 + -0.0237370003014803 + -0.5372480154037476 + 0.1935819983482361 + <_> + + <_> + + + + <_>0 2 4 10 -1. + <_>0 7 4 5 2. + 0 + 0.0775220021605492 + -0.0601940006017685 + -1.9489669799804688 + <_> + + <_> + + + + <_>8 0 9 6 -1. + <_>11 0 3 6 3. + 0 + -0.0133450003340840 + -0.4522959887981415 + 0.1874150037765503 + <_> + + <_> + + + + <_>2 9 12 6 -1. + <_>2 12 12 3 2. + 0 + -0.0217199996113777 + 1.2144249677658081 + -0.1536580026149750 + <_> + + <_> + + + + <_>13 10 6 9 -1. + <_>13 13 6 3 3. + 0 + -0.0714749991893768 + -2.3047130107879639 + 0.1099990010261536 + <_> + + <_> + + + + <_>5 10 6 9 -1. + <_>5 13 6 3 3. + 0 + -5.4999999701976776e-003 + -0.7185519933700562 + 0.0201009996235371 + <_> + + <_> + + + + <_>9 15 9 6 -1. + <_>9 17 9 2 3. + 0 + 0.0267409998923540 + 0.0735450014472008 + 0.9878600239753723 + <_> + + <_> + + + + <_>5 16 12 6 -1. + <_>5 19 12 3 2. + 0 + -0.0394079983234406 + -1.2227380275726318 + -0.0435069985687733 + <_> + + <_> + + + + <_>3 2 20 3 -1. + <_>3 3 20 1 3. + 0 + 0.0258889999240637 + 0.1340930014848709 + -1.1770780086517334 + <_> + + <_> + + + + <_>2 5 12 6 -1. + <_>6 5 4 6 3. + 0 + 0.0489250011742115 + -0.0308100003749132 + -0.9347950220108032 + <_> + + <_> + + + + <_>11 0 3 24 -1. + <_>12 0 1 24 3. + 0 + 0.0368929989635944 + 0.1333370059728622 + -1.4998290538787842 + <_> + + <_> + + + + <_>3 16 15 4 -1. + <_>8 16 5 4 3. + 0 + 0.0789299979805946 + -0.1453880071640015 + 1.5631790161132813 + <_> + + <_> + + + + <_>9 12 6 12 -1. + <_>9 18 6 6 2. + 0 + 0.0290060006082058 + 0.1938370019197464 + -0.6764280200004578 + <_> + + <_> + + + + <_>1 15 12 8 -1. + <_>1 15 6 4 2. + <_>7 19 6 4 2. + 0 + 6.3089998438954353e-003 + -0.3746539950370789 + 0.1085750013589859 + <_> + + <_> + + + + <_>15 10 8 14 -1. + <_>19 10 4 7 2. + <_>15 17 4 7 2. + 0 + -0.0658309981226921 + 0.8105940222740173 + 0.0302019994705915 + <_> + + <_> + + + + <_>1 9 8 14 -1. + <_>1 9 4 7 2. + <_>5 16 4 7 2. + 0 + -0.0689650028944016 + 0.8377259969711304 + -0.1714099943637848 + <_> + + <_> + + + + <_>9 11 9 10 -1. + <_>9 16 9 5 2. + 0 + -0.1166910007596016 + -0.9464719891548157 + 0.1312319934368134 + <_> + + <_> + + + + <_>6 7 12 6 -1. + <_>6 9 12 2 3. + 0 + -1.3060000492259860e-003 + 0.0460079982876778 + -0.5201159715652466 + <_> + + <_> + + + + <_>10 15 6 9 -1. + <_>12 15 2 9 3. + 0 + -0.0445589981973171 + -1.9423669576644897 + 0.1320070028305054 + <_> + + <_> + + + + <_>7 8 9 7 -1. + <_>10 8 3 7 3. + 0 + 0.0510330013930798 + -0.2148099988698959 + 0.4867390096187592 + <_> + + <_> + + + + <_>10 4 8 10 -1. + <_>14 4 4 5 2. + <_>10 9 4 5 2. + 0 + -0.0315780006349087 + 0.5998979806900024 + 7.9159997403621674e-003 + <_> + + <_> + + + + <_>4 6 6 9 -1. + <_>4 9 6 3 3. + 0 + 0.0210200008004904 + -0.2206950038671494 + 0.5404620170593262 + <_> + + <_> + + + + <_>0 6 24 12 -1. + <_>8 6 8 12 3. + 0 + -0.1382420063018799 + 0.6295750141143799 + -0.0217129997909069 + <_> + + <_> + + + + <_>3 7 6 14 -1. + <_>6 7 3 14 2. + 0 + 0.0522289983928204 + -0.2336090058088303 + 0.4976080060005188 + <_> + + <_> + + + + <_>19 8 5 8 -1. + <_>19 12 5 4 2. + 0 + 0.0258840005844831 + 0.1804199963808060 + -0.2203920036554337 + <_> + + <_> + + + + <_>0 8 5 8 -1. + <_>0 12 5 4 2. + 0 + -0.0121389999985695 + -0.6973189711570740 + 0.0157120004296303 + <_> + + <_> + + + + <_>17 3 6 6 -1. + <_>17 6 6 3 2. + 0 + -0.0242379996925592 + 0.3459329903125763 + 0.0714699998497963 + <_> + + <_> + + + + <_>1 3 6 6 -1. + <_>1 6 6 3 2. + 0 + -0.0252720005810261 + -0.8758329749107361 + -9.8240002989768982e-003 + <_> + + <_> + + + + <_>18 2 6 9 -1. + <_>18 5 6 3 3. + 0 + 0.0125970002263784 + 0.2364999949932098 + -0.2873120009899139 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + 0.0573309995234013 + -0.0615309998393059 + -2.2326040267944336 + <_> + + <_> + + + + <_>3 3 18 6 -1. + <_>3 5 18 2 3. + 0 + 0.0166710000485182 + -0.1985010057687759 + 0.4081070125102997 + <_> + + <_> + + + + <_>2 3 9 6 -1. + <_>2 5 9 2 3. + 0 + -0.0228189993649721 + 0.9648759961128235 + -0.2024569958448410 + <_> + + <_> + + + + <_>9 3 10 8 -1. + <_>14 3 5 4 2. + <_>9 7 5 4 2. + 0 + 3.7000001611886546e-005 + -0.0589089989662170 + 0.2705540060997009 + <_> + + <_> + + + + <_>5 3 10 8 -1. + <_>5 3 5 4 2. + <_>10 7 5 4 2. + 0 + -7.6700001955032349e-003 + -0.4531710147857666 + 0.0896280035376549 + <_> + + <_> + + + + <_>10 11 6 12 -1. + <_>10 11 3 12 2. + 0 + 0.0940859988331795 + 0.1160459965467453 + -1.0951169729232788 + <_> + + <_> + + + + <_>8 11 6 11 -1. + <_>11 11 3 11 2. + 0 + -0.0622670017182827 + 1.8096530437469482 + -0.1477320045232773 + <_> + + <_> + + + + <_>7 8 10 4 -1. + <_>7 8 5 4 2. + 0 + 0.0174160003662109 + 0.2306820005178452 + -0.4241760075092316 + <_> + + <_> + + + + <_>9 6 6 7 -1. + <_>12 6 3 7 2. + 0 + -0.0220660008490086 + 0.4927029907703400 + -0.2063090056180954 + <_> + + <_> + + + + <_>5 18 18 3 -1. + <_>5 19 18 1 3. + 0 + -0.0104040000587702 + 0.6092429757118225 + 0.0281300004571676 + <_> + + <_> + + + + <_>8 4 6 9 -1. + <_>10 4 2 9 3. + 0 + -9.3670003116130829e-003 + 0.4017120003700256 + -0.2168170064687729 + <_> + + <_> + + + + <_>8 1 9 7 -1. + <_>11 1 3 7 3. + 0 + -0.0290399994701147 + -0.8487650156021118 + 0.1424680054187775 + <_> + + <_> + + + + <_>6 11 6 6 -1. + <_>9 11 3 6 2. + 0 + -0.0210619997233152 + -0.7919830083847046 + -0.0125959999859333 + <_> + + <_> + + + + <_>14 12 4 11 -1. + <_>14 12 2 11 2. + 0 + -0.0370009988546371 + -0.6748890280723572 + 0.1283040046691895 + <_> + + <_> + + + + <_>6 12 4 11 -1. + <_>8 12 2 11 2. + 0 + 0.0107359997928143 + 0.0367799997329712 + -0.6339300274848938 + <_> + + <_> + + + + <_>8 0 12 18 -1. + <_>12 0 4 18 3. + 0 + 0.1636759936809540 + 0.1380389928817749 + -0.4718900024890900 + <_> + + <_> + + + + <_>2 12 10 5 -1. + <_>7 12 5 5 2. + 0 + 0.0949179977178574 + -0.1385570019483566 + 1.9492419958114624 + <_> + + <_> + + + + <_>2 20 22 3 -1. + <_>2 21 22 1 3. + 0 + 0.0352619998157024 + 0.1372189968824387 + -2.1186530590057373 + <_> + + <_> + + + + <_>0 4 2 20 -1. + <_>1 4 1 20 2. + 0 + 0.0128110004588962 + -0.2000810056924820 + 0.4950779974460602 + -3.5939640998840332 + 15 + -1 + <_> + + + <_> + + <_> + + + + <_>0 2 24 4 -1. + <_>8 2 8 4 3. + 0 + 0.1390440016984940 + -0.4658119976520538 + 0.7643160223960877 + <_> + + <_> + + + + <_>7 8 10 4 -1. + <_>7 10 10 2 2. + 0 + 0.0119169997051358 + -0.9439899921417236 + 0.3972629904747009 + <_> + + <_> + + + + <_>6 7 8 10 -1. + <_>6 7 4 5 2. + <_>10 12 4 5 2. + 0 + -0.0100069995969534 + 0.3271879851818085 + -0.6336740255355835 + <_> + + <_> + + + + <_>14 0 6 14 -1. + <_>17 0 3 7 2. + <_>14 7 3 7 2. + 0 + -6.0479999519884586e-003 + 0.2742789983749390 + -0.5744699835777283 + <_> + + <_> + + + + <_>4 11 5 8 -1. + <_>4 15 5 4 2. + 0 + -1.2489999644458294e-003 + 0.2362930029630661 + -0.6859350204467773 + <_> + + <_> + + + + <_>2 0 20 9 -1. + <_>2 3 20 3 3. + 0 + 0.0323820002377033 + -0.5763019919395447 + 0.2749269902706146 + <_> + + <_> + + + + <_>6 7 12 8 -1. + <_>6 7 6 4 2. + <_>12 11 6 4 2. + 0 + -0.0139579996466637 + -0.6106150150299072 + 0.2454160004854202 + <_> + + <_> + + + + <_>9 17 6 6 -1. + <_>9 20 6 3 2. + 0 + 1.1159999994561076e-003 + -0.5653910040855408 + 0.2717930078506470 + <_> + + <_> + + + + <_>7 10 10 4 -1. + <_>7 12 10 2 2. + 0 + 2.7000000045518391e-005 + -0.8023599982261658 + 0.1150910034775734 + <_> + + <_> + + + + <_>6 5 12 9 -1. + <_>10 5 4 9 3. + 0 + -2.5700000696815550e-004 + -0.8120589852333069 + 0.2384469956159592 + <_> + + <_> + + + + <_>5 11 6 8 -1. + <_>8 11 3 8 2. + 0 + 4.0460000745952129e-003 + 0.1390960067510605 + -0.6616320013999939 + <_> + + <_> + + + + <_>18 4 4 17 -1. + <_>18 4 2 17 2. + 0 + 0.0143560003489256 + -0.1648519933223724 + 0.4190169870853424 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0553749985992908 + 1.4425870180130005 + -0.1882019937038422 + <_> + + <_> + + + + <_>18 4 4 17 -1. + <_>18 4 2 17 2. + 0 + 0.0935949981212616 + 0.1354829967021942 + -0.9163609743118286 + <_> + + <_> + + + + <_>2 4 4 17 -1. + <_>4 4 2 17 2. + 0 + 0.0266249999403954 + -0.3374829888343811 + 0.3923360109329224 + <_> + + <_> + + + + <_>5 18 19 3 -1. + <_>5 19 19 1 3. + 0 + 3.7469998933374882e-003 + -0.1161540001630783 + 0.4439930021762848 + <_> + + <_> + + + + <_>11 0 2 18 -1. + <_>11 9 2 9 2. + 0 + -0.0318860001862049 + -0.9949830174446106 + 1.6120000509545207e-003 + <_> + + <_> + + + + <_>15 4 2 18 -1. + <_>15 13 2 9 2. + 0 + -0.0226000007241964 + -0.4806739985942841 + 0.1700730025768280 + <_> + + <_> + + + + <_>7 4 2 18 -1. + <_>7 13 2 9 2. + 0 + 0.0252020005136728 + 0.0355800017714500 + -0.8021540045738220 + <_> + + <_> + + + + <_>7 11 10 8 -1. + <_>12 11 5 4 2. + <_>7 15 5 4 2. + 0 + -0.0310369990766048 + -1.0895340442657471 + 0.1808190047740936 + <_> + + <_> + + + + <_>10 6 4 9 -1. + <_>12 6 2 9 2. + 0 + -0.0264759995043278 + 0.9567120075225830 + -0.2104939967393875 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + -0.0138539997860789 + -1.0370320081710815 + 0.2216670066118240 + <_> + + <_> + + + + <_>2 9 16 8 -1. + <_>2 9 8 4 2. + <_>10 13 8 4 2. + 0 + -0.0629250034689903 + 0.9019939899444580 + -0.1908529996871948 + <_> + + <_> + + + + <_>14 15 6 9 -1. + <_>14 18 6 3 3. + 0 + -0.0447509996592999 + -1.0119110345840454 + 0.1469119936227799 + <_> + + <_> + + + + <_>8 7 6 9 -1. + <_>10 7 2 9 3. + 0 + -0.0204280000180006 + 0.6162449717521668 + -0.2355269938707352 + <_> + + <_> + + + + <_>14 15 6 9 -1. + <_>14 18 6 3 3. + 0 + -8.0329999327659607e-003 + -0.0832799971103668 + 0.2172870039939880 + <_> + + <_> + + + + <_>3 12 12 6 -1. + <_>3 14 12 2 3. + 0 + 8.7280003353953362e-003 + 0.0654589980840683 + -0.6031870245933533 + <_> + + <_> + + + + <_>14 12 9 6 -1. + <_>14 14 9 2 3. + 0 + -0.0272020008414984 + -0.9344739913940430 + 0.1527000069618225 + <_> + + <_> + + + + <_>1 12 9 6 -1. + <_>1 14 9 2 3. + 0 + -0.0164710003882647 + -0.8417710065841675 + 0.0133320000022650 + <_> + + <_> + + + + <_>3 7 18 3 -1. + <_>3 8 18 1 3. + 0 + -0.0137440003454685 + 0.6056720018386841 + -0.0920210033655167 + <_> + + <_> + + + + <_>1 7 22 6 -1. + <_>1 9 22 2 3. + 0 + 0.0291649997234344 + -0.0281140003353357 + -1.4014569520950317 + <_> + + <_> + + + + <_>18 4 6 6 -1. + <_>18 7 6 3 2. + 0 + 0.0374570004642010 + 0.1308059990406036 + -0.4938249886035919 + <_> + + <_> + + + + <_>0 4 6 6 -1. + <_>0 7 6 3 2. + 0 + -0.0250700004398823 + -1.1289390325546265 + -0.0146000003442168 + <_> + + <_> + + + + <_>5 11 16 6 -1. + <_>5 14 16 3 2. + 0 + -0.0638120025396347 + 0.7587159872055054 + -1.8200000049546361e-003 + <_> + + <_> + + + + <_>6 16 9 4 -1. + <_>6 18 9 2 2. + 0 + -9.3900002539157867e-003 + 0.2993640005588532 + -0.2948780059814453 + <_> + + <_> + + + + <_>14 15 6 9 -1. + <_>14 18 6 3 3. + 0 + -7.6000002445653081e-004 + 0.0197250004857779 + 0.1999389976263046 + <_> + + <_> + + + + <_>4 15 6 9 -1. + <_>4 18 6 3 3. + 0 + -0.0217409990727901 + -0.8524789810180664 + 0.0491699986159801 + <_> + + <_> + + + + <_>15 1 6 23 -1. + <_>17 1 2 23 3. + 0 + -0.0178699996322393 + -0.0599859990179539 + 0.1522250026464462 + <_> + + <_> + + + + <_>0 21 24 3 -1. + <_>8 21 8 3 3. + 0 + -0.0248310007154942 + 0.3560340106487274 + -0.2625989913940430 + <_> + + <_> + + + + <_>0 20 24 4 -1. + <_>8 20 8 4 3. + 0 + 0.1571550071239471 + 1.5599999460391700e-004 + 1.0428730249404907 + <_> + + <_> + + + + <_>3 1 6 23 -1. + <_>5 1 2 23 3. + 0 + 0.0690269991755486 + -0.0330069996416569 + -1.1796669960021973 + <_> + + <_> + + + + <_>3 17 18 3 -1. + <_>3 18 18 1 3. + 0 + -0.0110219996422529 + 0.5898770093917847 + -0.0576479993760586 + <_> + + <_> + + + + <_>0 16 18 3 -1. + <_>0 17 18 1 3. + 0 + -0.0138349998742342 + 0.5950279831886292 + -0.2441859990358353 + <_> + + <_> + + + + <_>1 16 22 4 -1. + <_>12 16 11 2 2. + <_>1 18 11 2 2. + 0 + -0.0309410002082586 + -1.1723799705505371 + 0.1690700054168701 + <_> + + <_> + + + + <_>0 16 9 6 -1. + <_>0 18 9 2 3. + 0 + 0.0212580002844334 + -0.0189009997993708 + -1.0684759616851807 + <_> + + <_> + + + + <_>2 10 21 3 -1. + <_>9 10 7 3 3. + 0 + 0.0930799990892410 + 0.1630560010671616 + -1.3375270366668701 + <_> + + <_> + + + + <_>2 18 12 6 -1. + <_>2 18 6 3 2. + <_>8 21 6 3 2. + 0 + 0.0296359993517399 + -0.2252479940652847 + 0.4540010094642639 + <_> + + <_> + + + + <_>0 5 24 4 -1. + <_>0 7 24 2 2. + 0 + -1.2199999764561653e-004 + 0.2740910053253174 + -0.3737139999866486 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0420980006456375 + -0.7582880258560181 + 0.0171370003372431 + <_> + + <_> + + + + <_>10 7 6 12 -1. + <_>10 13 6 6 2. + 0 + -0.0225050002336502 + -0.2275930047035217 + 0.2369869947433472 + <_> + + <_> + + + + <_>6 6 6 9 -1. + <_>8 6 2 9 3. + 0 + -0.0128629999235272 + 0.1925240010023117 + -0.3212710022926331 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + 0.0278600007295609 + 0.1672369986772537 + -1.0209059715270996 + <_> + + <_> + + + + <_>9 7 6 9 -1. + <_>11 7 2 9 3. + 0 + -0.0278079994022846 + 1.2824759483337402 + -0.1722529977560043 + <_> + + <_> + + + + <_>2 1 20 3 -1. + <_>2 2 20 1 3. + 0 + -6.1630001291632652e-003 + -0.5407289862632752 + 0.2388570010662079 + <_> + + <_> + + + + <_>1 18 12 6 -1. + <_>1 18 6 3 2. + <_>7 21 6 3 2. + 0 + -0.0204360000789166 + 0.6335539817810059 + -0.2109059989452362 + <_> + + <_> + + + + <_>13 2 4 13 -1. + <_>13 2 2 13 2. + 0 + -0.0123079996556044 + -0.4977819919586182 + 0.1740259975194931 + <_> + + <_> + + + + <_>6 7 12 4 -1. + <_>12 7 6 4 2. + 0 + -0.0404939986765385 + -1.1848740577697754 + -0.0338909998536110 + <_> + + <_> + + + + <_>10 1 4 13 -1. + <_>10 1 2 13 2. + 0 + 0.0296570006757975 + 0.0217409990727901 + 1.0069919824600220 + <_> + + <_> + + + + <_>6 0 3 18 -1. + <_>7 0 1 18 3. + 0 + 6.8379999138414860e-003 + 0.0292179994285107 + -0.5990629792213440 + <_> + + <_> + + + + <_>14 3 10 5 -1. + <_>14 3 5 5 2. + 0 + 0.0161649994552135 + -0.2100079953670502 + 0.3763729929924011 + <_> + + <_> + + + + <_>6 15 12 8 -1. + <_>10 15 4 8 3. + 0 + 0.0501930005848408 + 2.5319999549537897e-003 + -0.7166820168495178 + <_> + + <_> + + + + <_>9 10 6 9 -1. + <_>11 10 2 9 3. + 0 + 1.9680000841617584e-003 + -0.2192140072584152 + 0.3229869902133942 + <_> + + <_> + + + + <_>8 3 4 9 -1. + <_>10 3 2 9 2. + 0 + 0.0249799992889166 + -9.6840001642704010e-003 + -0.7757290005683899 + <_> + + <_> + + + + <_>17 0 6 14 -1. + <_>20 0 3 7 2. + <_>17 7 3 7 2. + 0 + -0.0158099997788668 + 0.4463750123977661 + -0.0617600008845329 + <_> + + <_> + + + + <_>1 0 6 14 -1. + <_>1 0 3 7 2. + <_>4 7 3 7 2. + 0 + 0.0372069999575615 + -0.2049539983272553 + 0.5772219896316528 + <_> + + <_> + + + + <_>14 0 6 16 -1. + <_>17 0 3 8 2. + <_>14 8 3 8 2. + 0 + -0.0792649984359741 + -0.7674540281295776 + 0.1255040019750595 + <_> + + <_> + + + + <_>7 4 4 10 -1. + <_>9 4 2 10 2. + 0 + -0.0171520002186298 + -1.4121830463409424 + -0.0517040006816387 + <_> + + <_> + + + + <_>3 17 18 6 -1. + <_>12 17 9 3 2. + <_>3 20 9 3 2. + 0 + 0.0327400006353855 + 0.1933400034904480 + -0.6363369822502136 + <_> + + <_> + + + + <_>1 20 22 4 -1. + <_>12 20 11 4 2. + 0 + -0.1175699979066849 + 0.8432540297508240 + -0.1801860034465790 + <_> + + <_> + + + + <_>14 3 10 5 -1. + <_>14 3 5 5 2. + 0 + 0.1205720007419586 + 0.1253000050783157 + -2.1213600635528564 + <_> + + <_> + + + + <_>0 3 10 5 -1. + <_>5 3 5 5 2. + 0 + 4.2779999785125256e-003 + -0.4660440087318420 + 0.0896439999341965 + <_> + + <_> + + + + <_>12 6 12 16 -1. + <_>16 6 4 16 3. + 0 + -0.0725449994206429 + 0.5182650089263916 + 0.0168239995837212 + <_> + + <_> + + + + <_>0 6 12 16 -1. + <_>4 6 4 16 3. + 0 + 0.1771059930324554 + -0.0309100002050400 + -1.1046639680862427 + <_> + + <_> + + + + <_>10 9 5 15 -1. + <_>10 14 5 5 3. + 0 + 8.4229996427893639e-003 + 0.2444580048322678 + -0.3861309885978699 + <_> + + <_> + + + + <_>1 18 21 2 -1. + <_>1 19 21 1 2. + 0 + -0.0130350003018975 + 0.9800440073013306 + -0.1701650023460388 + <_> + + <_> + + + + <_>15 0 9 6 -1. + <_>15 2 9 2 3. + 0 + 0.0189120005816221 + 0.2024849951267242 + -0.3854590058326721 + <_> + + <_> + + + + <_>6 1 12 4 -1. + <_>12 1 6 4 2. + 0 + 0.0214479994028807 + -0.2571719884872437 + 0.3518120050430298 + <_> + + <_> + + + + <_>6 0 12 12 -1. + <_>12 0 6 6 2. + <_>6 6 6 6 2. + 0 + 0.0633570030331612 + 0.1699479967355728 + -0.9138380289077759 + <_> + + <_> + + + + <_>8 10 8 12 -1. + <_>8 10 4 6 2. + <_>12 16 4 6 2. + 0 + -0.0324359983205795 + -0.8568159937858582 + -0.0216809995472431 + <_> + + <_> + + + + <_>14 16 10 8 -1. + <_>19 16 5 4 2. + <_>14 20 5 4 2. + 0 + -0.0235649999231100 + 0.5611559748649597 + -2.2400000307243317e-004 + <_> + + <_> + + + + <_>0 16 10 8 -1. + <_>0 16 5 4 2. + <_>5 20 5 4 2. + 0 + 0.0187890008091927 + -0.2545979917049408 + 0.3451290130615234 + <_> + + <_> + + + + <_>10 12 12 5 -1. + <_>14 12 4 5 3. + 0 + 0.0310420002788305 + 7.5719999149441719e-003 + 0.3480019867420197 + <_> + + <_> + + + + <_>6 16 10 8 -1. + <_>6 16 5 4 2. + <_>11 20 5 4 2. + 0 + -0.0112269995734096 + -0.6021980047225952 + 0.0428149998188019 + <_> + + <_> + + + + <_>7 6 12 6 -1. + <_>13 6 6 3 2. + <_>7 9 6 3 2. + 0 + -0.0128459995612502 + 0.4202040135860443 + -0.0538010001182556 + <_> + + <_> + + + + <_>9 6 4 18 -1. + <_>9 6 2 9 2. + <_>11 15 2 9 2. + 0 + -0.0127919996157289 + 0.2272450029850006 + -0.3239800035953522 + <_> + + <_> + + + + <_>10 9 6 14 -1. + <_>13 9 3 7 2. + <_>10 16 3 7 2. + 0 + 0.0686519965529442 + 0.0935320034623146 + 10. + <_> + + <_> + + + + <_>8 9 6 14 -1. + <_>8 9 3 7 2. + <_>11 16 3 7 2. + 0 + 5.2789999172091484e-003 + -0.2692629992961884 + 0.3330320119857788 + <_> + + <_> + + + + <_>7 4 11 12 -1. + <_>7 10 11 6 2. + 0 + -0.0387790016829968 + -0.7236530184745789 + 0.1780650019645691 + <_> + + <_> + + + + <_>4 8 6 16 -1. + <_>4 8 3 8 2. + <_>7 16 3 8 2. + 0 + 6.1820000410079956e-003 + -0.3511939942836762 + 0.1658630073070526 + <_> + + <_> + + + + <_>17 3 4 21 -1. + <_>17 10 4 7 3. + 0 + 0.1751520037651062 + 0.1162310019135475 + -1.5419290065765381 + <_> + + <_> + + + + <_>3 3 4 21 -1. + <_>3 10 4 7 3. + 0 + 0.1162799969315529 + -9.1479998081922531e-003 + -0.9984260201454163 + <_> + + <_> + + + + <_>10 1 8 18 -1. + <_>14 1 4 9 2. + <_>10 10 4 9 2. + 0 + -0.0229640007019043 + 0.2056539952754974 + 0.0154320001602173 + <_> + + <_> + + + + <_>2 5 16 8 -1. + <_>2 5 8 4 2. + <_>10 9 8 4 2. + 0 + -0.0514100007712841 + 0.5807240009307861 + -0.2011840045452118 + <_> + + <_> + + + + <_>3 6 18 12 -1. + <_>3 10 18 4 3. + 0 + 0.2247419953346252 + 0.0187289994210005 + 1.0829299688339233 + <_> + + <_> + + + + <_>4 10 16 12 -1. + <_>4 14 16 4 3. + 0 + 9.4860000535845757e-003 + -0.3317129909992218 + 0.1990299969911575 + <_> + + <_> + + + + <_>15 4 8 20 -1. + <_>19 4 4 10 2. + <_>15 14 4 10 2. + 0 + -0.1184630021452904 + 1.3711010217666626 + 0.0689269974827766 + <_> + + <_> + + + + <_>7 2 9 6 -1. + <_>10 2 3 6 3. + 0 + 0.0378109999001026 + -9.3600002583116293e-004 + -0.8399699926376343 + <_> + + <_> + + + + <_>15 4 8 20 -1. + <_>19 4 4 10 2. + <_>15 14 4 10 2. + 0 + 0.0222020000219345 + -0.0119639998301864 + 0.3667399883270264 + <_> + + <_> + + + + <_>1 4 8 20 -1. + <_>1 4 4 10 2. + <_>5 14 4 10 2. + 0 + -0.0363660007715225 + 0.3786650002002716 + -0.2771480083465576 + <_> + + <_> + + + + <_>11 8 8 14 -1. + <_>15 8 4 7 2. + <_>11 15 4 7 2. + 0 + -0.1318469941616058 + -2.7481179237365723 + 0.1066690012812614 + <_> + + <_> + + + + <_>5 8 8 14 -1. + <_>5 8 4 7 2. + <_>9 15 4 7 2. + 0 + -0.0416559986770153 + 0.4752430021762848 + -0.2324980050325394 + <_> + + <_> + + + + <_>10 13 5 8 -1. + <_>10 17 5 4 2. + 0 + -0.0331519991159439 + -0.5792940258979797 + 0.1743440032005310 + <_> + + <_> + + + + <_>4 13 7 9 -1. + <_>4 16 7 3 3. + 0 + 0.0157699994742870 + -0.0112840002402663 + -0.8370140194892883 + <_> + + <_> + + + + <_>0 13 24 10 -1. + <_>0 18 24 5 2. + 0 + -0.0393630005419254 + 0.3482159972190857 + -0.1745540052652359 + <_> + + <_> + + + + <_>4 2 8 11 -1. + <_>8 2 4 11 2. + 0 + -0.0678490027785301 + 1.4225699901580811 + -0.1476559937000275 + <_> + + <_> + + + + <_>10 2 8 16 -1. + <_>14 2 4 8 2. + <_>10 10 4 8 2. + 0 + -0.0267750006169081 + 0.2394700050354004 + 0.0132719995453954 + <_> + + <_> + + + + <_>0 2 24 6 -1. + <_>0 2 12 3 2. + <_>12 5 12 3 2. + 0 + 0.0399190001189709 + -8.9999996125698090e-003 + -0.7593889832496643 + <_> + + <_> + + + + <_>6 0 12 9 -1. + <_>6 3 12 3 3. + 0 + 0.1006560027599335 + -0.0186850000172853 + 0.7624530196189880 + <_> + + <_> + + + + <_>1 2 12 12 -1. + <_>1 2 6 6 2. + <_>7 8 6 6 2. + 0 + -0.0810220018029213 + -0.9043909907341003 + -8.5880002006888390e-003 + <_> + + <_> + + + + <_>18 5 6 9 -1. + <_>18 8 6 3 3. + 0 + -0.0212580002844334 + -0.2131959944963455 + 0.2191970050334930 + <_> + + <_> + + + + <_>4 3 8 10 -1. + <_>4 3 4 5 2. + <_>8 8 4 5 2. + 0 + -0.0106309996917844 + 0.1959809958934784 + -0.3576810061931610 + <_> + + <_> + + + + <_>6 21 18 3 -1. + <_>6 22 18 1 3. + 0 + 8.1300002057105303e-004 + -0.0927949994802475 + 0.2614589929580689 + <_> + + <_> + + + + <_>1 10 18 2 -1. + <_>1 11 18 1 2. + 0 + 3.4650000743567944e-003 + -0.5533609986305237 + 0.0273860003799200 + <_> + + <_> + + + + <_>1 10 22 3 -1. + <_>1 11 22 1 3. + 0 + 0.0188359990715981 + 0.1844609975814819 + -0.6693429946899414 + <_> + + <_> + + + + <_>2 8 12 9 -1. + <_>2 11 12 3 3. + 0 + -0.0256319995969534 + 1.9382879734039307 + -0.1470890045166016 + <_> + + <_> + + + + <_>12 8 12 6 -1. + <_>18 8 6 3 2. + <_>12 11 6 3 2. + 0 + -4.0939999744296074e-003 + -0.2645159959793091 + 0.2073320001363754 + <_> + + <_> + + + + <_>0 8 12 6 -1. + <_>0 8 6 3 2. + <_>6 11 6 3 2. + 0 + -8.9199998183175921e-004 + -0.5503159761428833 + 0.0503749996423721 + <_> + + <_> + + + + <_>10 15 6 9 -1. + <_>12 15 2 9 3. + 0 + -0.0495180003345013 + -2.5615389347076416 + 0.1314170062541962 + <_> + + <_> + + + + <_>7 13 9 6 -1. + <_>7 15 9 2 3. + 0 + 0.0116809997707605 + -0.2481980025768280 + 0.3998270034790039 + <_> + + <_> + + + + <_>9 8 7 12 -1. + <_>9 14 7 6 2. + 0 + 0.0345639996230602 + 0.1617880016565323 + -0.7141889929771423 + <_> + + <_> + + + + <_>4 13 9 6 -1. + <_>7 13 3 6 3. + 0 + -8.2909995689988136e-003 + 0.2218009978532791 + -0.2918170094490051 + <_> + + <_> + + + + <_>6 15 18 4 -1. + <_>12 15 6 4 3. + 0 + -0.0223580002784729 + 0.3104409873485565 + -2.7280000504106283e-003 + <_> + + <_> + + + + <_>5 4 4 16 -1. + <_>7 4 2 16 2. + 0 + -0.0308010000735521 + -0.9567270278930664 + -8.3400001749396324e-003 + <_> + + <_> + + + + <_>10 15 6 9 -1. + <_>12 15 2 9 3. + 0 + 0.0437790006399155 + 0.1255690008401871 + -1.1759619712829590 + <_> + + <_> + + + + <_>8 15 6 9 -1. + <_>10 15 2 9 3. + 0 + 0.0430460013449192 + -0.0588769987225533 + -1.8568470478057861 + <_> + + <_> + + + + <_>9 11 12 10 -1. + <_>15 11 6 5 2. + <_>9 16 6 5 2. + 0 + 0.0271889995783567 + 0.0428580008447170 + 0.3903670012950897 + <_> + + <_> + + + + <_>3 6 14 6 -1. + <_>3 8 14 2 3. + 0 + 9.4149997457861900e-003 + -0.0435670018196106 + -1.1094470024108887 + <_> + + <_> + + + + <_>4 2 17 8 -1. + <_>4 6 17 4 2. + 0 + 0.0943119972944260 + 0.0402569994330406 + 0.9844229817390442 + <_> + + <_> + + + + <_>6 2 12 21 -1. + <_>6 9 12 7 3. + 0 + 0.1702509969472885 + 0.0295100007206202 + -0.6950929760932922 + <_> + + <_> + + + + <_>8 1 9 9 -1. + <_>8 4 9 3 3. + 0 + -0.0471480004489422 + 1.0338569879531860 + 0.0676020011305809 + <_> + + <_> + + + + <_>0 7 24 3 -1. + <_>12 7 12 3 2. + 0 + 0.1118630021810532 + -0.0686829984188080 + -2.4985830783843994 + <_> + + <_> + + + + <_>11 6 9 10 -1. + <_>11 11 9 5 2. + 0 + -0.0143539998680353 + -0.5948190093040466 + 0.1500169932842255 + <_> + + <_> + + + + <_>2 11 18 3 -1. + <_>2 12 18 1 3. + 0 + 0.0340240001678467 + -0.0648230016231537 + -2.1382639408111572 + <_> + + <_> + + + + <_>8 16 9 4 -1. + <_>8 18 9 2 2. + 0 + 0.0216019991785288 + 0.0553099997341633 + 0.7829290032386780 + <_> + + <_> + + + + <_>0 0 9 6 -1. + <_>0 2 9 2 3. + 0 + 0.0217719990760088 + -7.1279997937381268e-003 + -0.7214810252189636 + <_> + + <_> + + + + <_>0 11 24 6 -1. + <_>0 13 24 2 3. + 0 + 0.0824169963598251 + 0.1460949927568436 + -1.3636670112609863 + <_> + + <_> + + + + <_>2 9 20 6 -1. + <_>2 12 20 3 2. + 0 + 0.0846719965338707 + -0.1778469979763031 + 0.7285770177841187 + <_> + + <_> + + + + <_>4 5 16 12 -1. + <_>12 5 8 6 2. + <_>4 11 8 6 2. + 0 + -0.0551280006766319 + -0.5940240025520325 + 0.1935780048370361 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0648230016231537 + -1.0783840417861938 + -0.0407340005040169 + <_> + + <_> + + + + <_>7 3 10 4 -1. + <_>7 5 10 2 2. + 0 + -0.0227690003812313 + 0.7790020108222961 + 3.4960000775754452e-003 + <_> + + <_> + + + + <_>9 15 6 8 -1. + <_>9 19 6 4 2. + 0 + 0.0547560006380081 + -0.0656839981675148 + -1.8188409805297852 + <_> + + <_> + + + + <_>17 0 7 10 -1. + <_>17 5 7 5 2. + 0 + -8.9000001025851816e-005 + -0.0178919993340969 + 0.2076829969882965 + <_> + + <_> + + + + <_>0 0 7 10 -1. + <_>0 5 7 5 2. + 0 + 0.0983619987964630 + -0.0559469982981682 + -1.4153920412063599 + <_> + + <_> + + + + <_>16 1 6 12 -1. + <_>19 1 3 6 2. + <_>16 7 3 6 2. + 0 + -7.0930002257227898e-003 + 0.3413529992103577 + -0.1208989992737770 + <_> + + <_> + + + + <_>1 0 19 8 -1. + <_>1 4 19 4 2. + 0 + 0.0502780005335808 + -0.2628670036792755 + 0.2579729855060577 + <_> + + <_> + + + + <_>12 2 9 4 -1. + <_>12 4 9 2 2. + 0 + -5.7870000600814819e-003 + -0.1317860037088394 + 0.1735019981861115 + <_> + + <_> + + + + <_>3 2 9 4 -1. + <_>3 4 9 2 2. + 0 + 0.0139739997684956 + 0.0285180006176233 + -0.6115220189094544 + <_> + + <_> + + + + <_>12 2 10 6 -1. + <_>12 4 10 2 3. + 0 + 0.0214499998837709 + 0.0261819995939732 + 0.3030659854412079 + <_> + + <_> + + + + <_>3 4 18 2 -1. + <_>12 4 9 2 2. + 0 + -0.0292140003293753 + 0.4494059979915619 + -0.2280309945344925 + <_> + + <_> + + + + <_>12 1 4 9 -1. + <_>12 1 2 9 2. + 0 + 4.8099999548867345e-004 + -0.1987999975681305 + 0.2074449956417084 + <_> + + <_> + + + + <_>8 1 4 9 -1. + <_>10 1 2 9 2. + 0 + 1.7109999898821115e-003 + -0.5403720140457153 + 0.0678659975528717 + <_> + + <_> + + + + <_>10 5 8 10 -1. + <_>14 5 4 5 2. + <_>10 10 4 5 2. + 0 + 8.6660003289580345e-003 + -0.0131280003115535 + 0.5229790210723877 + <_> + + <_> + + + + <_>6 4 12 13 -1. + <_>10 4 4 13 3. + 0 + 0.0636579990386963 + 0.0682990029454231 + -0.4923509955406189 + <_> + + <_> + + + + <_>13 5 6 6 -1. + <_>13 5 3 6 2. + 0 + -0.0279680006206036 + 0.6818389892578125 + 0.0787810012698174 + <_> + + <_> + + + + <_>1 5 12 3 -1. + <_>7 5 6 3 2. + 0 + 0.0489539988338947 + -0.2062239944934845 + 0.5038809776306152 + -3.3933560848236084 + 16 + -1 + <_> + + + <_> + + <_> + + + + <_>7 5 10 6 -1. + <_>7 7 10 2 3. + 0 + -0.0293129999190569 + 0.7128469944000244 + -0.5823069810867310 + <_> + + <_> + + + + <_>2 0 21 5 -1. + <_>9 0 7 5 3. + 0 + 0.1241509988903999 + -0.3686349987983704 + 0.6006720066070557 + <_> + + <_> + + + + <_>0 8 9 9 -1. + <_>0 11 9 3 3. + 0 + 7.9349996522068977e-003 + -0.8600829839706421 + 0.2172469943761826 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0303659997880459 + -0.2718699872493744 + 0.6124789714813232 + <_> + + <_> + + + + <_>0 3 6 7 -1. + <_>3 3 3 7 2. + 0 + 0.0252180006355047 + -0.3474830090999603 + 0.5042769908905029 + <_> + + <_> + + + + <_>9 18 12 6 -1. + <_>15 18 6 3 2. + <_>9 21 6 3 2. + 0 + 0.0100140003487468 + -0.3189899921417236 + 0.4137679934501648 + <_> + + <_> + + + + <_>2 8 20 6 -1. + <_>2 8 10 3 2. + <_>12 11 10 3 2. + 0 + -0.0167750008404255 + -0.6904810070991516 + 0.0948309972882271 + <_> + + <_> + + + + <_>13 2 10 4 -1. + <_>13 4 10 2 2. + 0 + -2.6950000319629908e-003 + -0.2082979977130890 + 0.2373719960451126 + <_> + + <_> + + + + <_>4 5 5 18 -1. + <_>4 11 5 6 3. + 0 + 0.0422579981386662 + -0.4936670064926148 + 0.1817059963941574 + <_> + + <_> + + + + <_>20 4 4 9 -1. + <_>20 4 2 9 2. + 0 + -0.0485050007700920 + 1.3429640531539917 + 0.0397690013051033 + <_> + + <_> + + + + <_>8 6 8 14 -1. + <_>8 13 8 7 2. + 0 + 0.0289929993450642 + 0.0464960001409054 + -0.8164349794387817 + <_> + + <_> + + + + <_>0 1 24 6 -1. + <_>12 1 12 3 2. + <_>0 4 12 3 2. + 0 + -0.0400890000164509 + -0.7119780182838440 + 0.2255389988422394 + <_> + + <_> + + + + <_>0 4 4 9 -1. + <_>2 4 2 9 2. + 0 + -0.0410219989717007 + 1.0057929754257202 + -0.1969020068645477 + <_> + + <_> + + + + <_>3 6 18 3 -1. + <_>3 7 18 1 3. + 0 + 0.0118380002677441 + -0.0126000000163913 + 0.8076710104942322 + <_> + + <_> + + + + <_>3 17 16 6 -1. + <_>3 19 16 2 3. + 0 + -0.0213280003517866 + -0.8202390074729919 + 0.0205249991267920 + <_> + + <_> + + + + <_>13 6 6 9 -1. + <_>13 9 6 3 3. + 0 + -0.0239049997180700 + 0.5421050190925598 + -0.0747670009732246 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>5 6 7 3 2. + <_>12 9 7 3 2. + 0 + 0.0180089995265007 + -0.3382770121097565 + 0.4235860109329224 + <_> + + <_> + + + + <_>13 5 8 10 -1. + <_>17 5 4 5 2. + <_>13 10 4 5 2. + 0 + -0.0436140000820160 + -1.1983489990234375 + 0.1556620001792908 + <_> + + <_> + + + + <_>2 2 20 3 -1. + <_>2 3 20 1 3. + 0 + -9.2449998483061790e-003 + -0.8902999758720398 + 0.0110039999708533 + <_> + + <_> + + + + <_>9 2 9 6 -1. + <_>12 2 3 6 3. + 0 + 0.0474850013852119 + 0.1666409969329834 + -0.9076449871063232 + <_> + + <_> + + + + <_>8 6 6 9 -1. + <_>10 6 2 9 3. + 0 + -0.0142339998856187 + 0.6269519925117493 + -0.2579120099544525 + <_> + + <_> + + + + <_>12 3 4 11 -1. + <_>12 3 2 11 2. + 0 + 3.8010000716894865e-003 + -0.2822999954223633 + 0.2662459909915924 + <_> + + <_> + + + + <_>8 3 4 11 -1. + <_>10 3 2 11 2. + 0 + 3.4330000635236502e-003 + -0.6377199888229370 + 0.0984229966998100 + <_> + + <_> + + + + <_>8 3 8 10 -1. + <_>12 3 4 5 2. + <_>8 8 4 5 2. + 0 + -0.0292210001498461 + -0.7676990032196045 + 0.2263450026512146 + <_> + + <_> + + + + <_>11 1 2 18 -1. + <_>12 1 1 18 2. + 0 + -6.4949998632073402e-003 + 0.4560010135173798 + -0.2652890086174011 + <_> + + <_> + + + + <_>9 2 9 6 -1. + <_>12 2 3 6 3. + 0 + -0.0300340000540018 + -0.7655109763145447 + 0.1400929987430573 + <_> + + <_> + + + + <_>0 2 19 3 -1. + <_>0 3 19 1 3. + 0 + 7.8360000625252724e-003 + 0.0467559993267059 + -0.7235620021820068 + <_> + + <_> + + + + <_>9 14 9 6 -1. + <_>9 16 9 2 3. + 0 + 8.8550001382827759e-003 + -0.0491419993340969 + 0.5147269964218140 + <_> + + <_> + + + + <_>1 8 18 5 -1. + <_>7 8 6 5 3. + 0 + 0.0959739983081818 + -0.0200689993798733 + -1.0850950479507446 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + -0.0328769981861115 + -0.9587529897689819 + 0.1454360038042069 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + -0.0133840003982186 + -0.7001360058784485 + 0.0291579999029636 + <_> + + <_> + + + + <_>13 6 4 15 -1. + <_>13 11 4 5 3. + 0 + 0.0152359995990992 + -0.2823570072650909 + 0.2536799907684326 + <_> + + <_> + + + + <_>1 5 18 3 -1. + <_>1 6 18 1 3. + 0 + 0.0120540000498295 + -0.2530339956283569 + 0.4652670025825501 + <_> + + <_> + + + + <_>9 7 14 6 -1. + <_>9 9 14 2 3. + 0 + -0.0762950032949448 + -0.6991580128669739 + 0.1321720033884049 + <_> + + <_> + + + + <_>2 16 18 3 -1. + <_>2 17 18 1 3. + 0 + -0.0120400004088879 + 0.4589459896087647 + -0.2385649979114533 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + 0.0219160001724958 + 0.1826860010623932 + -0.6162970066070557 + <_> + + <_> + + + + <_>0 8 12 6 -1. + <_>0 8 6 3 2. + <_>6 11 6 3 2. + 0 + -2.7330000884830952e-003 + -0.6325790286064148 + 0.0342190004885197 + <_> + + <_> + + + + <_>9 13 7 8 -1. + <_>9 17 7 4 2. + 0 + -0.0486520007252693 + -1.0297729969024658 + 0.1738650053739548 + <_> + + <_> + + + + <_>2 17 20 3 -1. + <_>2 18 20 1 3. + 0 + -0.0104639995843172 + 0.3475730121135712 + -0.2746410071849823 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + -6.6550001502037048e-003 + -0.2898029983043671 + 0.2403790056705475 + <_> + + <_> + + + + <_>4 0 15 4 -1. + <_>4 2 15 2 2. + 0 + 8.5469996556639671e-003 + -0.4434050023555756 + 0.1426739990711212 + <_> + + <_> + + + + <_>17 2 6 6 -1. + <_>17 5 6 3 2. + 0 + 0.0199139993637800 + 0.1774040013551712 + -0.2409629970788956 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>0 6 6 3 3. + 0 + 0.0220129992812872 + -0.0108120003715158 + -0.9469079971313477 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + -0.0521790012717247 + 1.6547499895095825 + 0.0964870005846024 + <_> + + <_> + + + + <_>0 17 9 6 -1. + <_>0 19 9 2 3. + 0 + 0.0196989998221397 + -6.7560002207756042e-003 + -0.8631150126457214 + <_> + + <_> + + + + <_>9 18 12 6 -1. + <_>15 18 6 3 2. + <_>9 21 6 3 2. + 0 + 0.0230400003492832 + -2.3519999813288450e-003 + 0.3853130042552948 + <_> + + <_> + + + + <_>3 15 6 9 -1. + <_>3 18 6 3 3. + 0 + -0.0150380004197359 + -0.6190569996833801 + 0.0310779996216297 + <_> + + <_> + + + + <_>16 13 8 10 -1. + <_>20 13 4 5 2. + <_>16 18 4 5 2. + 0 + -0.0499560013413429 + 0.7065749764442444 + 0.0478809997439384 + <_> + + <_> + + + + <_>0 14 24 4 -1. + <_>8 14 8 4 3. + 0 + -0.0692699998617172 + 0.3921290040016174 + -0.2384800016880035 + <_> + + <_> + + + + <_>13 18 6 6 -1. + <_>13 18 3 6 2. + 0 + 4.7399997711181641e-003 + -0.0243090000003576 + 0.2538630068302155 + <_> + + <_> + + + + <_>0 13 8 10 -1. + <_>0 13 4 5 2. + <_>4 18 4 5 2. + 0 + -0.0339239984750748 + 0.4693039953708649 + -0.2332189977169037 + <_> + + <_> + + + + <_>0 14 24 6 -1. + <_>0 17 24 3 2. + 0 + -0.0162310004234314 + 0.3231920003890991 + -0.2054560035467148 + <_> + + <_> + + + + <_>5 2 12 8 -1. + <_>5 2 6 4 2. + <_>11 6 6 4 2. + 0 + -0.0501930005848408 + -1.2277870178222656 + -0.0407980009913445 + <_> + + <_> + + + + <_>8 9 9 6 -1. + <_>11 9 3 6 3. + 0 + 0.0569440014660358 + 0.0451840013265610 + 0.6019750237464905 + <_> + + <_> + + + + <_>4 3 16 4 -1. + <_>4 5 16 2 2. + 0 + 0.0409369990229607 + -0.1677280068397522 + 0.8981930017471314 + <_> + + <_> + + + + <_>10 2 4 10 -1. + <_>10 7 4 5 2. + 0 + -3.0839999672025442e-003 + 0.3371619880199432 + -0.2724080085754395 + <_> + + <_> + + + + <_>8 4 5 8 -1. + <_>8 8 5 4 2. + 0 + -0.0326000005006790 + -0.8544650077819824 + 0.0196649990975857 + <_> + + <_> + + + + <_>11 5 9 12 -1. + <_>11 9 9 4 3. + 0 + 0.0984809994697571 + 0.0547420009970665 + 0.6382730007171631 + <_> + + <_> + + + + <_>4 5 9 12 -1. + <_>4 9 9 4 3. + 0 + -0.0381850004196167 + 0.5227469801902771 + -0.2338480055332184 + <_> + + <_> + + + + <_>14 6 6 9 -1. + <_>14 9 6 3 3. + 0 + -0.0459170006215572 + 0.6282920241355896 + 0.0328590013086796 + <_> + + <_> + + + + <_>2 4 20 12 -1. + <_>2 8 20 4 3. + 0 + -0.1195549964904785 + -0.6157270073890686 + 0.0346800014376640 + <_> + + <_> + + + + <_>4 4 17 16 -1. + <_>4 12 17 8 2. + 0 + -0.1204439997673035 + -0.8438000082969666 + 0.1653070002794266 + <_> + + <_> + + + + <_>8 7 7 6 -1. + <_>8 10 7 3 2. + 0 + 0.0706190019845963 + -0.0632610023021698 + -1.9863929748535156 + <_> + + <_> + + + + <_>1 9 23 2 -1. + <_>1 10 23 1 2. + 0 + 8.4889996796846390e-003 + -0.1766339987516403 + 0.3801119923591614 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + 0.0227109994739294 + -0.0276059992611408 + -0.9192140102386475 + <_> + + <_> + + + + <_>13 3 4 9 -1. + <_>13 3 2 9 2. + 0 + 4.9700000090524554e-004 + -0.2429320067167282 + 0.2287890017032623 + <_> + + <_> + + + + <_>8 1 6 13 -1. + <_>10 1 2 13 3. + 0 + 0.0346519984304905 + -0.2370599955320358 + 0.5401099920272827 + <_> + + <_> + + + + <_>4 22 18 2 -1. + <_>4 23 18 1 2. + 0 + -4.4700000435113907e-003 + 0.3907899856567383 + -0.1269380003213882 + <_> + + <_> + + + + <_>3 10 9 6 -1. + <_>6 10 3 6 3. + 0 + 0.0236430000513792 + -0.2666369974613190 + 0.3231259882450104 + <_> + + <_> + + + + <_>14 0 2 24 -1. + <_>14 0 1 24 2. + 0 + 0.0128130000084639 + 0.1754080057144165 + -0.6078799962997437 + <_> + + <_> + + + + <_>8 0 2 24 -1. + <_>9 0 1 24 2. + 0 + -0.0112509997561574 + -1.0852589607238770 + -0.0280460007488728 + <_> + + <_> + + + + <_>3 2 18 10 -1. + <_>9 2 6 10 3. + 0 + -0.0415350012481213 + 0.7188739776611328 + 0.0279820002615452 + <_> + + <_> + + + + <_>4 13 15 6 -1. + <_>9 13 5 6 3. + 0 + -0.0934709981083870 + -1.1906319856643677 + -0.0448109991848469 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>9 21 6 3 3. + 0 + -0.0272499993443489 + 0.6294249892234802 + 9.5039997249841690e-003 + <_> + + <_> + + + + <_>9 1 4 11 -1. + <_>11 1 2 11 2. + 0 + -0.0217599999159575 + 1.3233649730682373 + -0.1502700001001358 + <_> + + <_> + + + + <_>9 7 10 4 -1. + <_>9 7 5 4 2. + 0 + -9.6890004351735115e-003 + -0.3394710123538971 + 0.1708579957485199 + <_> + + <_> + + + + <_>7 0 10 18 -1. + <_>12 0 5 18 2. + 0 + 0.0693959966301918 + -0.2565779983997345 + 0.4765209853649139 + <_> + + <_> + + + + <_>12 1 6 16 -1. + <_>14 1 2 16 3. + 0 + 0.0312089994549751 + 0.1415400058031082 + -0.3494200110435486 + <_> + + <_> + + + + <_>6 1 6 16 -1. + <_>8 1 2 16 3. + 0 + -0.0497270002961159 + -1.1675560474395752 + -0.0407579988241196 + <_> + + <_> + + + + <_>18 2 6 6 -1. + <_>18 5 6 3 2. + 0 + -0.0203019995242357 + -0.3948639929294586 + 0.1581490039825440 + <_> + + <_> + + + + <_>3 5 18 2 -1. + <_>3 6 18 1 2. + 0 + -0.0153670003637671 + 0.4930000007152557 + -0.2009209990501404 + <_> + + <_> + + + + <_>18 2 6 6 -1. + <_>18 5 6 3 2. + 0 + -0.0507350005209446 + 1.8736059665679932 + 0.0867300033569336 + <_> + + <_> + + + + <_>0 2 6 6 -1. + <_>0 5 6 3 2. + 0 + -0.0207260008901358 + -0.8893839716911316 + -7.3199998587369919e-003 + <_> + + <_> + + + + <_>13 11 11 6 -1. + <_>13 13 11 2 3. + 0 + -0.0309939999133348 + -1.1664899587631226 + 0.1427460014820099 + <_> + + <_> + + + + <_>5 7 10 4 -1. + <_>10 7 5 4 2. + 0 + -4.4269999489188194e-003 + -0.6681510210037231 + 4.4120000675320625e-003 + <_> + + <_> + + + + <_>11 9 10 7 -1. + <_>11 9 5 7 2. + 0 + -0.0457439981400967 + -0.4795520007610321 + 0.1512199938297272 + <_> + + <_> + + + + <_>3 9 10 7 -1. + <_>8 9 5 7 2. + 0 + 0.0166989993304014 + 0.1204859986901283 + -0.4523589909076691 + <_> + + <_> + + + + <_>16 4 6 6 -1. + <_>16 4 3 6 2. + 0 + 3.2210000790655613e-003 + -0.0776150003075600 + 0.2784659862518311 + <_> + + <_> + + + + <_>5 6 10 8 -1. + <_>5 6 5 4 2. + <_>10 10 5 4 2. + 0 + 0.0244340002536774 + -0.1998710036277771 + 0.6725370287895203 + <_> + + <_> + + + + <_>7 21 16 3 -1. + <_>7 21 8 3 2. + 0 + -0.0796779990196228 + 0.9222239851951599 + 0.0925579965114594 + <_> + + <_> + + + + <_>1 21 16 3 -1. + <_>9 21 8 3 2. + 0 + 0.0445300005376339 + -0.2669050097465515 + 0.3332050144672394 + <_> + + <_> + + + + <_>2 5 22 14 -1. + <_>13 5 11 7 2. + <_>2 12 11 7 2. + 0 + -0.1252830028533936 + -0.5425310134887695 + 0.1397629976272583 + <_> + + <_> + + + + <_>3 10 8 10 -1. + <_>3 10 4 5 2. + <_>7 15 4 5 2. + 0 + 0.0179719999432564 + 0.0182199999690056 + -0.6804850101470947 + <_> + + <_> + + + + <_>17 0 6 12 -1. + <_>20 0 3 6 2. + <_>17 6 3 6 2. + 0 + 0.0191840007901192 + -0.0125839998945594 + 0.5412669777870178 + <_> + + <_> + + + + <_>5 2 6 18 -1. + <_>7 2 2 18 3. + 0 + 0.0400240011513233 + -0.1763879954814911 + 0.7881039977073669 + <_> + + <_> + + + + <_>13 0 6 9 -1. + <_>15 0 2 9 3. + 0 + 0.0135589996352792 + 0.2073760032653809 + -0.4774430096149445 + <_> + + <_> + + + + <_>0 12 7 9 -1. + <_>0 15 7 3 3. + 0 + 0.0162209998816252 + 0.0230769999325275 + -0.6118209958076477 + <_> + + <_> + + + + <_>15 13 8 10 -1. + <_>19 13 4 5 2. + <_>15 18 4 5 2. + 0 + 0.0112290000542998 + -0.0177280008792877 + 0.4176419973373413 + <_> + + <_> + + + + <_>1 0 6 12 -1. + <_>1 0 3 6 2. + <_>4 6 3 6 2. + 0 + 0.0391930006444454 + -0.1894849985837936 + 0.7401930093765259 + <_> + + <_> + + + + <_>12 1 3 12 -1. + <_>12 7 3 6 2. + 0 + -9.5539996400475502e-003 + 0.4094710052013397 + -0.1350889950990677 + <_> + + <_> + + + + <_>1 13 8 10 -1. + <_>1 13 4 5 2. + <_>5 18 4 5 2. + 0 + 0.0278789997100830 + -0.2035070061683655 + 0.6162539720535278 + <_> + + <_> + + + + <_>3 21 19 2 -1. + <_>3 22 19 1 2. + 0 + -0.0236009992659092 + -1.6967060565948486 + 0.1463319957256317 + <_> + + <_> + + + + <_>6 3 4 13 -1. + <_>8 3 2 13 2. + 0 + 0.0269300006330013 + -0.0304019991308451 + -1.0909470319747925 + <_> + + <_> + + + + <_>5 10 18 3 -1. + <_>5 11 18 1 3. + 0 + 2.8999999631196260e-004 + -0.2007600069046021 + 0.2231409996747971 + <_> + + <_> + + + + <_>9 3 5 12 -1. + <_>9 7 5 4 3. + 0 + -0.0411249995231628 + -0.4524219930171967 + 0.0573920011520386 + <_> + + <_> + + + + <_>11 2 4 15 -1. + <_>11 7 4 5 3. + 0 + 6.6789998672902584e-003 + 0.2382490038871765 + -0.2126210033893585 + <_> + + <_> + + + + <_>4 1 16 4 -1. + <_>4 3 16 2 2. + 0 + 0.0478649996221066 + -0.1819480061531067 + 0.6191840171813965 + <_> + + <_> + + + + <_>6 0 18 3 -1. + <_>6 1 18 1 3. + 0 + -3.1679999083280563e-003 + -0.2739320099353790 + 0.2501730024814606 + <_> + + <_> + + + + <_>5 1 10 8 -1. + <_>5 1 5 4 2. + <_>10 5 5 4 2. + 0 + -8.6230002343654633e-003 + -0.4628030061721802 + 0.0423979982733727 + <_> + + <_> + + + + <_>11 18 12 6 -1. + <_>17 18 6 3 2. + <_>11 21 6 3 2. + 0 + -7.4350000359117985e-003 + 0.4179680049419403 + -1.7079999670386314e-003 + <_> + + <_> + + + + <_>5 15 12 3 -1. + <_>11 15 6 3 2. + 0 + -1.8769999733194709e-003 + 0.1460230052471161 + -0.3372110128402710 + <_> + + <_> + + + + <_>1 10 22 4 -1. + <_>1 10 11 4 2. + 0 + -0.0862260013818741 + 0.7514340281486511 + 0.0107119996100664 + <_> + + <_> + + + + <_>7 9 9 6 -1. + <_>10 9 3 6 3. + 0 + 0.0468339994549751 + -0.1911959946155548 + 0.4841490089893341 + <_> + + <_> + + + + <_>6 11 12 5 -1. + <_>10 11 4 5 3. + 0 + -9.2000002041459084e-005 + 0.3522039949893951 + -0.1733330041170120 + <_> + + <_> + + + + <_>6 7 10 7 -1. + <_>11 7 5 7 2. + 0 + -0.0163439996540546 + -0.6439769864082336 + 9.0680001303553581e-003 + <_> + + <_> + + + + <_>11 2 8 10 -1. + <_>11 2 4 10 2. + 0 + 0.0457039996981621 + 0.0182160008698702 + 0.3197079896926880 + <_> + + <_> + + + + <_>5 2 8 10 -1. + <_>9 2 4 10 2. + 0 + -0.0273829996585846 + 1.0564049482345581 + -0.1727640032768250 + <_> + + <_> + + + + <_>6 4 18 6 -1. + <_>15 4 9 3 2. + <_>6 7 9 3 2. + 0 + -0.0276020001620054 + 0.2971549928188324 + -9.4600003212690353e-003 + <_> + + <_> + + + + <_>0 5 10 9 -1. + <_>0 8 10 3 3. + 0 + 7.6939999125897884e-003 + -0.2166029959917069 + 0.4738520085811615 + <_> + + <_> + + + + <_>2 7 21 6 -1. + <_>2 9 21 2 3. + 0 + -7.0500001311302185e-004 + 0.2404879927635193 + -0.2677600085735321 + <_> + + <_> + + + + <_>0 4 22 16 -1. + <_>0 4 11 8 2. + <_>11 12 11 8 2. + 0 + 0.1105419993400574 + -0.0335390008985996 + -1.0233880281448364 + <_> + + <_> + + + + <_>9 0 6 22 -1. + <_>9 11 6 11 2. + 0 + 0.0687659978866577 + -4.3239998631179333e-003 + 0.5715339779853821 + <_> + + <_> + + + + <_>9 1 3 12 -1. + <_>9 7 3 6 2. + 0 + 1.7999999690800905e-003 + 0.0775749981403351 + -0.4209269881248474 + <_> + + <_> + + + + <_>12 0 12 18 -1. + <_>18 0 6 9 2. + <_>12 9 6 9 2. + 0 + 0.1923200041055679 + 0.0820219963788986 + 2.8810169696807861 + <_> + + <_> + + + + <_>0 0 12 18 -1. + <_>0 0 6 9 2. + <_>6 9 6 9 2. + 0 + 0.1574209928512573 + -0.1370819956064224 + 2.0890059471130371 + <_> + + <_> + + + + <_>1 1 22 4 -1. + <_>12 1 11 2 2. + <_>1 3 11 2 2. + 0 + -0.0493870005011559 + -1.8610910177230835 + 0.1433209925889969 + <_> + + <_> + + + + <_>3 0 18 4 -1. + <_>3 2 18 2 2. + 0 + 0.0519290007650852 + -0.1873700022697449 + 0.5423160195350647 + <_> + + <_> + + + + <_>2 5 22 6 -1. + <_>2 7 22 2 3. + 0 + 0.0499650016427040 + 0.1417530030012131 + -1.5625779628753662 + <_> + + <_> + + + + <_>5 0 6 9 -1. + <_>5 3 6 3 3. + 0 + -0.0426330007612705 + 1.6059479713439941 + -0.1471289992332459 + <_> + + <_> + + + + <_>10 14 6 9 -1. + <_>12 14 2 9 3. + 0 + -0.0375539995729923 + -0.8097490072250366 + 0.1325699985027313 + <_> + + <_> + + + + <_>8 14 6 9 -1. + <_>10 14 2 9 3. + 0 + -0.0371749997138977 + -1.3945020437240601 + -0.0570550002157688 + <_> + + <_> + + + + <_>5 18 18 3 -1. + <_>5 19 18 1 3. + 0 + 0.0139459995552897 + 0.0334270000457764 + 0.5747479796409607 + <_> + + <_> + + + + <_>6 0 6 13 -1. + <_>9 0 3 13 2. + 0 + -4.4800000614486635e-004 + -0.5532749891281128 + 0.0219529997557402 + <_> + + <_> + + + + <_>7 4 12 4 -1. + <_>7 4 6 4 2. + 0 + 0.0319930016994476 + 0.0203409995883703 + 0.3745920062065125 + <_> + + <_> + + + + <_>5 2 12 6 -1. + <_>9 2 4 6 3. + 0 + -4.2799999937415123e-003 + 0.4442870020866394 + -0.2299969941377640 + <_> + + <_> + + + + <_>4 1 18 3 -1. + <_>4 2 18 1 3. + 0 + 9.8550003021955490e-003 + 0.1831579953432083 + -0.4096499979496002 + <_> + + <_> + + + + <_>0 8 6 12 -1. + <_>0 12 6 4 3. + 0 + 0.0933569967746735 + -0.0636610016226768 + -1.6929290294647217 + <_> + + <_> + + + + <_>9 15 6 9 -1. + <_>11 15 2 9 3. + 0 + 0.0172099992632866 + 0.2015389949083328 + -0.4606109857559204 + <_> + + <_> + + + + <_>9 10 6 13 -1. + <_>11 10 2 13 3. + 0 + 8.4319999441504478e-003 + -0.3200399875640869 + 0.1531219929456711 + <_> + + <_> + + + + <_>6 17 18 2 -1. + <_>6 18 18 1 2. + 0 + -0.0140549996867776 + 0.8688240051269531 + 0.0325750000774860 + <_> + + <_> + + + + <_>9 4 6 9 -1. + <_>11 4 2 9 3. + 0 + -7.7180000953376293e-003 + 0.6368669867515564 + -0.1842550039291382 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + 0.0280050002038479 + 0.1735749989748001 + -0.4788359999656677 + <_> + + <_> + + + + <_>5 6 10 8 -1. + <_>5 6 5 4 2. + <_>10 10 5 4 2. + 0 + -0.0188849996775389 + 0.2410160005092621 + -0.2654759883880615 + <_> + + <_> + + + + <_>14 9 5 8 -1. + <_>14 13 5 4 2. + 0 + -0.0185850001871586 + 0.5423250198364258 + 0.0536330007016659 + <_> + + <_> + + + + <_>5 9 5 8 -1. + <_>5 13 5 4 2. + 0 + -0.0364370010793209 + 2.3908898830413818 + -0.1363469958305359 + <_> + + <_> + + + + <_>14 11 9 6 -1. + <_>14 13 9 2 3. + 0 + 0.0324550010263920 + 0.1591069996356964 + -0.6758149862289429 + <_> + + <_> + + + + <_>0 2 23 15 -1. + <_>0 7 23 5 3. + 0 + 0.0597819983959198 + -2.3479999508708715e-003 + -0.7305369973182678 + <_> + + <_> + + + + <_>16 0 8 12 -1. + <_>16 6 8 6 2. + 0 + 9.8209995776414871e-003 + -0.1144409999251366 + 0.3057030141353607 + <_> + + <_> + + + + <_>4 15 6 9 -1. + <_>4 18 6 3 3. + 0 + -0.0351639986038208 + -1.0511469841003418 + -0.0331030003726482 + <_> + + <_> + + + + <_>8 18 9 4 -1. + <_>8 20 9 2 2. + 0 + 2.7429999317973852e-003 + -0.2013539969921112 + 0.3275409936904907 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + 8.1059997901320457e-003 + -0.2138350009918213 + 0.4336209893226624 + <_> + + <_> + + + + <_>13 11 11 6 -1. + <_>13 13 11 2 3. + 0 + 0.0889429971575737 + 0.1094089969992638 + -4.7609338760375977 + <_> + + <_> + + + + <_>0 11 11 6 -1. + <_>0 13 11 2 3. + 0 + -0.0300549995154142 + -1.7169300317764282 + -0.0609190016984940 + <_> + + <_> + + + + <_>0 9 24 6 -1. + <_>12 9 12 3 2. + <_>0 12 12 3 2. + 0 + -0.0217349994927645 + 0.6477890014648438 + -0.0328309983015060 + <_> + + <_> + + + + <_>6 16 8 8 -1. + <_>6 20 8 4 2. + 0 + 0.0376489982008934 + -0.0100600002333522 + -0.7656909823417664 + <_> + + <_> + + + + <_>10 16 14 6 -1. + <_>10 18 14 2 3. + 0 + 2.7189999818801880e-003 + 0.1988890022039414 + -0.0824790000915527 + <_> + + <_> + + + + <_>1 1 21 3 -1. + <_>1 2 21 1 3. + 0 + -0.0105480002239347 + -0.8661360144615173 + -0.0259860008955002 + <_> + + <_> + + + + <_>0 2 24 3 -1. + <_>0 2 12 3 2. + 0 + 0.1296630054712296 + 0.1391199976205826 + -2.2271950244903564 + <_> + + <_> + + + + <_>2 15 8 5 -1. + <_>6 15 4 5 2. + 0 + -0.0176769997924566 + 0.3396770060062408 + -0.2398959994316101 + <_> + + <_> + + + + <_>2 11 21 3 -1. + <_>9 11 7 3 3. + 0 + -0.0770519971847534 + -2.5017969608306885 + 0.1284199953079224 + <_> + + <_> + + + + <_>1 18 12 6 -1. + <_>1 18 6 3 2. + <_>7 21 6 3 2. + 0 + -0.0192300006747246 + 0.5064120292663574 + -0.1975159943103790 + <_> + + <_> + + + + <_>10 14 4 10 -1. + <_>10 19 4 5 2. + 0 + -0.0512229986488819 + -2.9333369731903076 + 0.1385850012302399 + <_> + + <_> + + + + <_>7 7 4 10 -1. + <_>7 12 4 5 2. + 0 + 2.0830000285059214e-003 + -0.6004359722137451 + 0.0297180004417896 + <_> + + <_> + + + + <_>9 8 6 12 -1. + <_>9 12 6 4 3. + 0 + 0.0254180002957582 + 0.3391579985618591 + -0.1439200043678284 + <_> + + <_> + + + + <_>7 1 9 6 -1. + <_>10 1 3 6 3. + 0 + -0.0239059999585152 + -1.1082680225372314 + -0.0473770014941692 + <_> + + <_> + + + + <_>3 14 19 2 -1. + <_>3 15 19 1 2. + 0 + -6.3740001060068607e-003 + 0.4453369975090027 + -0.0670529976487160 + <_> + + <_> + + + + <_>7 7 10 10 -1. + <_>7 7 5 5 2. + <_>12 12 5 5 2. + 0 + -0.0376989990472794 + -1.0406579971313477 + -0.0417900010943413 + <_> + + <_> + + + + <_>3 12 18 12 -1. + <_>3 12 9 12 2. + 0 + 0.2165510058403015 + 0.0338630005717278 + 0.8201730251312256 + <_> + + <_> + + + + <_>8 0 6 12 -1. + <_>10 0 2 12 3. + 0 + -0.0134009998291731 + 0.5290349721908569 + -0.1913300007581711 + -3.2396929264068604 + 17 + -1 + <_> + + + <_> + + <_> + + + + <_>3 0 17 9 -1. + <_>3 3 17 3 3. + 0 + 0.0712689980864525 + -0.5363119840621948 + 0.6071529984474182 + <_> + + <_> + + + + <_>6 0 12 11 -1. + <_>10 0 4 11 3. + 0 + 0.0561110004782677 + -0.5014160275459290 + 0.4397610127925873 + <_> + + <_> + + + + <_>1 0 6 13 -1. + <_>4 0 3 13 2. + 0 + 0.0404639989137650 + -0.3292219936847687 + 0.5483469963073731 + <_> + + <_> + + + + <_>5 8 16 6 -1. + <_>5 11 16 3 2. + 0 + 0.0631550028920174 + -0.3170169889926910 + 0.4615299999713898 + <_> + + <_> + + + + <_>8 8 5 12 -1. + <_>8 14 5 6 2. + 0 + 0.0103209996595979 + 0.1069499999284744 + -0.9824389815330505 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>9 21 6 3 3. + 0 + 0.0626069977879524 + -0.1432970017194748 + 0.7109500169754028 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0394160002470016 + 0.9438019990921021 + -0.2157209962606430 + <_> + + <_> + + + + <_>2 0 20 3 -1. + <_>2 1 20 1 3. + 0 + -5.3960001096129417e-003 + -0.5461199879646301 + 0.2530379891395569 + <_> + + <_> + + + + <_>4 6 15 10 -1. + <_>9 6 5 10 3. + 0 + 0.1077319979667664 + 0.0124960001558065 + -1.0809199810028076 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0169820003211498 + -0.3153640031814575 + 0.5123999714851379 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + 0.0312169995158911 + -4.5199999585747719e-003 + -1.2443480491638184 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>16 0 2 9 3. + 0 + -0.0231069996953011 + -0.7649289965629578 + 0.2064059972763062 + <_> + + <_> + + + + <_>7 16 9 6 -1. + <_>7 18 9 2 3. + 0 + -0.0112039996311069 + 0.2409269958734512 + -0.3514209985733032 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>16 0 2 9 3. + 0 + -4.7479998320341110e-003 + -0.0970079973340034 + 0.2063809931278229 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + -0.0173589996993542 + -0.7902029752731323 + 0.0218529999256134 + <_> + + <_> + + + + <_>17 1 6 16 -1. + <_>19 1 2 16 3. + 0 + 0.0188519991934299 + -0.1039460003376007 + 0.5484420061111450 + <_> + + <_> + + + + <_>1 1 6 16 -1. + <_>3 1 2 16 3. + 0 + 7.2249998338520527e-003 + -0.4040940105915070 + 0.2676379978656769 + <_> + + <_> + + + + <_>14 13 6 9 -1. + <_>14 16 6 3 3. + 0 + 0.0189159996807575 + 0.2050800025463104 + -1.0206340551376343 + <_> + + <_> + + + + <_>0 0 6 9 -1. + <_>0 3 6 3 3. + 0 + 0.0311569999903440 + 1.2400000123307109e-003 + -0.8729349970817566 + <_> + + <_> + + + + <_>9 5 6 6 -1. + <_>9 5 3 6 2. + 0 + 0.0209519993513823 + -5.5559999309480190e-003 + 0.8035619854927063 + <_> + + <_> + + + + <_>3 10 9 6 -1. + <_>6 10 3 6 3. + 0 + 0.0112910000607371 + -0.3647840023040772 + 0.2276789993047714 + <_> + + <_> + + + + <_>14 7 3 16 -1. + <_>14 15 3 8 2. + 0 + -0.0570110008120537 + -1.4295619726181030 + 0.1432200074195862 + <_> + + <_> + + + + <_>4 10 14 12 -1. + <_>4 10 7 6 2. + <_>11 16 7 6 2. + 0 + 0.0721940025687218 + -0.0418500006198883 + -1.9111829996109009 + <_> + + <_> + + + + <_>7 6 12 6 -1. + <_>7 8 12 2 3. + 0 + -0.0198740009218454 + 0.2642549872398377 + -0.3261770009994507 + <_> + + <_> + + + + <_>7 2 4 20 -1. + <_>9 2 2 20 2. + 0 + -0.0166929997503757 + -0.8390780091285706 + 4.0799999260343611e-004 + <_> + + <_> + + + + <_>14 13 6 9 -1. + <_>14 16 6 3 3. + 0 + -0.0398349985480309 + -0.4885849952697754 + 0.1643610000610352 + <_> + + <_> + + + + <_>10 6 4 9 -1. + <_>12 6 2 9 2. + 0 + 0.0270099993795156 + -0.1886249929666519 + 0.8341940045356751 + <_> + + <_> + + + + <_>14 13 6 9 -1. + <_>14 16 6 3 3. + 0 + -3.9420002140104771e-003 + 0.2323150038719177 + -0.0723600015044212 + <_> + + <_> + + + + <_>5 20 14 4 -1. + <_>5 22 14 2 2. + 0 + 0.0228330008685589 + -0.0358840003609657 + -1.1549400091171265 + <_> + + <_> + + + + <_>4 4 16 12 -1. + <_>4 10 16 6 2. + 0 + -0.0688880011439323 + -1.7837309837341309 + 0.1515900045633316 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0430970005691051 + -0.2160809934139252 + 0.5062410235404968 + <_> + + <_> + + + + <_>3 0 21 4 -1. + <_>3 2 21 2 2. + 0 + 8.6239995434880257e-003 + -0.1779559999704361 + 0.2895790040493012 + <_> + + <_> + + + + <_>4 13 6 9 -1. + <_>4 16 6 3 3. + 0 + 0.0145610002800822 + -0.0114080002531409 + -0.8940200209617615 + <_> + + <_> + + + + <_>16 16 5 8 -1. + <_>16 20 5 4 2. + 0 + -0.0115010002627969 + 0.3017199933528900 + -0.0436590015888214 + <_> + + <_> + + + + <_>4 0 16 16 -1. + <_>4 0 8 8 2. + <_>12 8 8 8 2. + 0 + -0.1097149997949600 + -0.9514709711074829 + -0.0199730005115271 + <_> + + <_> + + + + <_>6 6 14 6 -1. + <_>13 6 7 3 2. + <_>6 9 7 3 2. + 0 + 0.0452280007302761 + 0.0331109985709190 + 0.9661980271339417 + <_> + + <_> + + + + <_>10 5 4 15 -1. + <_>10 10 4 5 3. + 0 + -0.0270479992032051 + 0.9796360135078430 + -0.1726190000772476 + <_> + + <_> + + + + <_>9 15 12 8 -1. + <_>15 15 6 4 2. + <_>9 19 6 4 2. + 0 + 0.0180309992283583 + -0.0208010002970696 + 0.2738589942455292 + <_> + + <_> + + + + <_>6 7 12 4 -1. + <_>12 7 6 4 2. + 0 + 0.0505249984562397 + -0.0568029992282391 + -1.7775089740753174 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>12 6 7 3 2. + <_>5 9 7 3 2. + 0 + -0.0299239996820688 + 0.6532920002937317 + -0.0235370006412268 + <_> + + <_> + + + + <_>3 6 18 10 -1. + <_>3 6 9 5 2. + <_>12 11 9 5 2. + 0 + 0.0380580015480518 + 0.0263170003890991 + -0.7066569924354553 + <_> + + <_> + + + + <_>6 0 18 21 -1. + <_>12 0 6 21 3. + 0 + 0.1856389939785004 + -5.6039998307824135e-003 + 0.3287369906902313 + <_> + + <_> + + + + <_>0 0 24 21 -1. + <_>8 0 8 21 3. + 0 + -4.0670000016689301e-003 + 0.3420479893684387 + -0.3017159998416901 + <_> + + <_> + + + + <_>6 18 18 3 -1. + <_>6 19 18 1 3. + 0 + 0.0101089999079704 + -7.3600001633167267e-003 + 0.5798159837722778 + <_> + + <_> + + + + <_>0 15 9 6 -1. + <_>0 17 9 2 3. + 0 + -0.0115670002996922 + -0.5272219777107239 + 0.0464479997754097 + <_> + + <_> + + + + <_>4 3 19 2 -1. + <_>4 4 19 1 2. + 0 + -6.5649999305605888e-003 + -0.5852910280227661 + 0.1910189986228943 + <_> + + <_> + + + + <_>0 3 24 2 -1. + <_>0 4 24 1 2. + 0 + 0.0105820000171661 + 0.0210730005055666 + -0.6889259815216065 + <_> + + <_> + + + + <_>15 14 9 4 -1. + <_>15 16 9 2 2. + 0 + -0.0203040000051260 + -0.3640069961547852 + 0.1533879935741425 + <_> + + <_> + + + + <_>0 14 9 4 -1. + <_>0 16 9 2 2. + 0 + 2.3529999889433384e-003 + 0.0361640006303787 + -0.5982509851455689 + <_> + + <_> + + + + <_>6 15 18 2 -1. + <_>6 16 18 1 2. + 0 + -1.4690000098198652e-003 + -0.1470769941806793 + 0.3750799894332886 + <_> + + <_> + + + + <_>3 17 18 3 -1. + <_>3 18 18 1 3. + 0 + 8.6449999362230301e-003 + -0.2170850038528442 + 0.5193679928779602 + <_> + + <_> + + + + <_>12 0 3 23 -1. + <_>13 0 1 23 3. + 0 + -0.0243260003626347 + -1.0846769809722900 + 0.1408479958772659 + <_> + + <_> + + + + <_>6 0 8 6 -1. + <_>6 3 8 3 2. + 0 + 0.0744189992547035 + -0.1551380008459091 + 1.1822769641876221 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + 0.0170779991894960 + 0.0442310012876987 + 0.9156110286712647 + <_> + + <_> + + + + <_>9 0 3 23 -1. + <_>10 0 1 23 3. + 0 + -0.0245779994875193 + -1.5504100322723389 + -0.0547459982335567 + <_> + + <_> + + + + <_>10 7 4 10 -1. + <_>10 12 4 5 2. + 0 + 0.0302050001919270 + 0.1666280031204224 + -1.0001239776611328 + <_> + + <_> + + + + <_>7 8 10 12 -1. + <_>7 12 10 4 3. + 0 + 0.0121360002085567 + -0.7707909941673279 + -4.8639997839927673e-003 + <_> + + <_> + + + + <_>14 9 6 14 -1. + <_>17 9 3 7 2. + <_>14 16 3 7 2. + 0 + 0.0867170020937920 + 0.1106169968843460 + -1.6857999563217163 + <_> + + <_> + + + + <_>2 0 10 9 -1. + <_>2 3 10 3 3. + 0 + -0.0423090010881424 + 1.1075930595397949 + -0.1543859988451004 + <_> + + <_> + + + + <_>11 1 5 12 -1. + <_>11 7 5 6 2. + 0 + -2.6420000940561295e-003 + 0.2745189964771271 + -0.1845619976520538 + <_> + + <_> + + + + <_>1 4 12 10 -1. + <_>1 4 6 5 2. + <_>7 9 6 5 2. + 0 + -0.0566620007157326 + -0.8062559962272644 + -0.0169280003756285 + <_> + + <_> + + + + <_>15 1 9 4 -1. + <_>15 3 9 2 2. + 0 + 0.0234750006347895 + 0.1418769955635071 + -0.2550089955329895 + <_> + + <_> + + + + <_>1 2 8 10 -1. + <_>1 2 4 5 2. + <_>5 7 4 5 2. + 0 + -0.0208030007779598 + 0.1982630044221878 + -0.3117119967937470 + <_> + + <_> + + + + <_>10 1 5 12 -1. + <_>10 5 5 4 3. + 0 + 7.2599998675286770e-003 + -0.0505909994244576 + 0.4192380011081696 + <_> + + <_> + + + + <_>4 0 14 24 -1. + <_>11 0 7 24 2. + 0 + 0.3416000008583069 + -0.1667490005493164 + 0.9274860024452210 + <_> + + <_> + + + + <_>7 17 10 4 -1. + <_>7 19 10 2 2. + 0 + 6.2029999680817127e-003 + -0.1262589991092682 + 0.4044530093669891 + <_> + + <_> + + + + <_>10 14 4 10 -1. + <_>10 19 4 5 2. + 0 + 0.0326920002698898 + -0.0326349996030331 + -0.9893980026245117 + <_> + + <_> + + + + <_>13 15 6 9 -1. + <_>15 15 2 9 3. + 0 + 2.1100000594742596e-004 + -0.0645340010523796 + 0.2547369897365570 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>3 22 18 1 3. + 0 + 7.2100001852959394e-004 + -0.3661859929561615 + 0.1197310015559197 + <_> + + <_> + + + + <_>13 15 6 9 -1. + <_>15 15 2 9 3. + 0 + 0.0544909983873367 + 0.1207349970936775 + -1.0291390419006348 + <_> + + <_> + + + + <_>5 15 6 9 -1. + <_>7 15 2 9 3. + 0 + -0.0101410001516342 + -0.5217720270156860 + 0.0337349995970726 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>12 6 2 9 2. + <_>10 15 2 9 2. + 0 + -0.0188159998506308 + 0.6518179774284363 + 1.3399999588727951e-003 + <_> + + <_> + + + + <_>7 3 6 11 -1. + <_>9 3 2 11 3. + 0 + -5.3480002097785473e-003 + 0.1737069934606552 + -0.3413200080394745 + <_> + + <_> + + + + <_>15 1 9 4 -1. + <_>15 3 9 2 2. + 0 + -0.0108470004051924 + -0.1969989985227585 + 0.1504549980163574 + <_> + + <_> + + + + <_>5 4 14 8 -1. + <_>5 8 14 4 2. + 0 + -0.0499260015785694 + -0.5088850259780884 + 0.0307620000094175 + <_> + + <_> + + + + <_>8 1 15 9 -1. + <_>8 4 15 3 3. + 0 + 0.0121600003913045 + -0.0692519992589951 + 0.1874549984931946 + <_> + + <_> + + + + <_>7 2 8 10 -1. + <_>7 2 4 5 2. + <_>11 7 4 5 2. + 0 + -2.2189998999238014e-003 + -0.4084909856319428 + 0.0799549967050552 + <_> + + <_> + + + + <_>12 2 6 12 -1. + <_>12 2 3 12 2. + 0 + 3.1580000650137663e-003 + -0.2112459987401962 + 0.2236640006303787 + <_> + + <_> + + + + <_>6 2 6 12 -1. + <_>9 2 3 12 2. + 0 + 4.1439998894929886e-003 + -0.4990029931068420 + 0.0629170015454292 + <_> + + <_> + + + + <_>7 7 12 4 -1. + <_>7 7 6 4 2. + 0 + -7.3730000294744968e-003 + -0.2055329978466034 + 0.2209669947624207 + <_> + + <_> + + + + <_>6 3 12 10 -1. + <_>10 3 4 10 3. + 0 + 0.0518120005726814 + 0.1809680014848709 + -0.4349580109119415 + <_> + + <_> + + + + <_>5 6 16 6 -1. + <_>13 6 8 3 2. + <_>5 9 8 3 2. + 0 + 0.0183400008827448 + 0.0152000002563000 + 0.3799169957637787 + <_> + + <_> + + + + <_>3 1 18 9 -1. + <_>9 1 6 9 3. + 0 + 0.1749079972505570 + -0.2092079967260361 + 0.4001300036907196 + <_> + + <_> + + + + <_>3 8 18 5 -1. + <_>9 8 6 5 3. + 0 + 0.0539939999580383 + 0.2475160062313080 + -0.2671290040016174 + <_> + + <_> + + + + <_>0 0 24 22 -1. + <_>0 0 12 11 2. + <_>12 11 12 11 2. + 0 + -0.3203319907188416 + -1.9094380140304565 + -0.0669609978795052 + <_> + + <_> + + + + <_>14 16 9 6 -1. + <_>14 18 9 2 3. + 0 + -0.0270600002259016 + -0.7137129902839661 + 0.1590459942817688 + <_> + + <_> + + + + <_>0 16 24 8 -1. + <_>0 20 24 4 2. + 0 + 0.0774639993906021 + -0.1697019934654236 + 0.7755299806594849 + <_> + + <_> + + + + <_>1 19 22 4 -1. + <_>12 19 11 2 2. + <_>1 21 11 2 2. + 0 + 0.0237719994038343 + 0.1902189999818802 + -0.6016209721565247 + <_> + + <_> + + + + <_>1 16 9 6 -1. + <_>1 18 9 2 3. + 0 + 0.0115010002627969 + 7.7039999887347221e-003 + -0.6173030138015747 + <_> + + <_> + + + + <_>7 8 10 4 -1. + <_>7 8 5 4 2. + 0 + 0.0326160006225109 + 0.1715919971466065 + -0.7097820043563843 + <_> + + <_> + + + + <_>9 15 6 9 -1. + <_>11 15 2 9 3. + 0 + -0.0443830005824566 + -2.2606229782104492 + -0.0732769966125488 + <_> + + <_> + + + + <_>10 18 12 6 -1. + <_>16 18 6 3 2. + <_>10 21 6 3 2. + 0 + -0.0584760010242462 + 2.4087750911712646 + 0.0830919966101646 + <_> + + <_> + + + + <_>2 18 12 6 -1. + <_>2 18 6 3 2. + <_>8 21 6 3 2. + 0 + 0.0193039998412132 + -0.2708230018615723 + 0.2736999988555908 + <_> + + <_> + + + + <_>8 3 16 9 -1. + <_>8 6 16 3 3. + 0 + -0.0447059981524944 + 0.3135559856891632 + -0.0624920018017292 + <_> + + <_> + + + + <_>0 5 10 6 -1. + <_>0 7 10 2 3. + 0 + -0.0603349991142750 + -1.4515119791030884 + -0.0587610006332397 + <_> + + <_> + + + + <_>5 5 18 3 -1. + <_>5 6 18 1 3. + 0 + 0.0116670001298189 + -0.0180849991738796 + 0.5047969818115234 + <_> + + <_> + + + + <_>2 6 9 6 -1. + <_>2 9 9 3 2. + 0 + 0.0280099995434284 + -0.2330289930105209 + 0.3070870041847229 + <_> + + <_> + + + + <_>14 2 10 9 -1. + <_>14 5 10 3 3. + 0 + 0.0653970018029213 + 0.1413590013980866 + -0.5001090168952942 + <_> + + <_> + + + + <_>3 6 18 3 -1. + <_>3 7 18 1 3. + 0 + 9.6239997074007988e-003 + -0.2205460071563721 + 0.3919120132923126 + <_> + + <_> + + + + <_>9 2 15 6 -1. + <_>9 4 15 2 3. + 0 + 2.5510000996291637e-003 + -0.1138150021433830 + 0.2003230005502701 + <_> + + <_> + + + + <_>4 8 15 6 -1. + <_>4 10 15 2 3. + 0 + 0.0318470001220703 + 0.0254769995808601 + -0.5332639813423157 + <_> + + <_> + + + + <_>0 5 24 4 -1. + <_>12 5 12 2 2. + <_>0 7 12 2 2. + 0 + 0.0330550000071526 + 0.1780769973993301 + -0.6279389858245850 + <_> + + <_> + + + + <_>7 8 6 12 -1. + <_>9 8 2 12 3. + 0 + 0.0476009994745255 + -0.1474789977073669 + 1.4204180240631104 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + -0.0195719990879297 + -0.5269349813461304 + 0.1583860069513321 + <_> + + <_> + + + + <_>0 12 6 12 -1. + <_>0 12 3 6 2. + <_>3 18 3 6 2. + 0 + -0.0547300018370152 + 0.8823159933090210 + -0.1662780046463013 + <_> + + <_> + + + + <_>14 12 10 6 -1. + <_>14 14 10 2 3. + 0 + -0.0226860009133816 + -0.4838689863681793 + 0.1500010043382645 + <_> + + <_> + + + + <_>2 7 18 9 -1. + <_>2 10 18 3 3. + 0 + 0.1071320027112961 + -0.2133619934320450 + 0.4233390092849731 + <_> + + <_> + + + + <_>11 14 10 9 -1. + <_>11 17 10 3 3. + 0 + -0.0363800004124641 + -0.0741980001330376 + 0.1458940058946610 + <_> + + <_> + + + + <_>7 6 10 8 -1. + <_>7 6 5 4 2. + <_>12 10 5 4 2. + 0 + 0.0139359999448061 + -0.2491160035133362 + 0.2677119970321655 + <_> + + <_> + + + + <_>6 6 14 6 -1. + <_>13 6 7 3 2. + <_>6 9 7 3 2. + 0 + 0.0209919996559620 + 8.7959999218583107e-003 + 0.4306499958038330 + <_> + + <_> + + + + <_>4 13 9 7 -1. + <_>7 13 3 7 3. + 0 + 0.0491189993917942 + -0.1759199947118759 + 0.6928290128707886 + <_> + + <_> + + + + <_>14 10 6 12 -1. + <_>17 10 3 6 2. + <_>14 16 3 6 2. + 0 + 0.0363159999251366 + 0.1314529925584793 + -0.3359729945659638 + <_> + + <_> + + + + <_>4 10 6 12 -1. + <_>4 10 3 6 2. + <_>7 16 3 6 2. + 0 + 0.0412280000746250 + -0.0456920005381107 + -1.3515930175781250 + <_> + + <_> + + + + <_>13 9 8 6 -1. + <_>13 9 4 6 2. + 0 + 0.0156720001250505 + 0.1754409968852997 + -0.0605500005185604 + <_> + + <_> + + + + <_>8 3 4 14 -1. + <_>10 3 2 14 2. + 0 + -0.0162860006093979 + -1.1308189630508423 + -0.0395330004394054 + <_> + + <_> + + + + <_>17 0 3 18 -1. + <_>18 0 1 18 3. + 0 + -3.0229999683797359e-003 + -0.2245430052280426 + 0.2362809926271439 + <_> + + <_> + + + + <_>4 12 16 12 -1. + <_>12 12 8 12 2. + 0 + -0.1378629952669144 + 0.4537689983844757 + -0.2109870016574860 + <_> + + <_> + + + + <_>15 0 6 14 -1. + <_>17 0 2 14 3. + 0 + -9.6760001033544540e-003 + -0.1510509997606278 + 0.2078170031309128 + <_> + + <_> + + + + <_>3 0 6 14 -1. + <_>5 0 2 14 3. + 0 + -0.0248399991542101 + -0.6835029721260071 + -8.0040004104375839e-003 + <_> + + <_> + + + + <_>12 2 12 20 -1. + <_>16 2 4 20 3. + 0 + -0.1396439969539642 + 0.6501129865646362 + 0.0465440005064011 + <_> + + <_> + + + + <_>0 2 12 20 -1. + <_>4 2 4 20 3. + 0 + -0.0821539983153343 + 0.4488719999790192 + -0.2359199970960617 + <_> + + <_> + + + + <_>16 0 6 17 -1. + <_>18 0 2 17 3. + 0 + 3.8449999410659075e-003 + -0.0881730020046234 + 0.2734679877758026 + <_> + + <_> + + + + <_>2 0 6 17 -1. + <_>4 0 2 17 3. + 0 + -6.6579999402165413e-003 + -0.4686659872531891 + 0.0770019963383675 + <_> + + <_> + + + + <_>15 6 9 6 -1. + <_>15 8 9 2 3. + 0 + -0.0158980004489422 + 0.2926839888095856 + -0.0219410005956888 + <_> + + <_> + + + + <_>0 6 9 6 -1. + <_>0 8 9 2 3. + 0 + -0.0509460009634495 + -1.2093789577484131 + -0.0421099998056889 + <_> + + <_> + + + + <_>18 1 6 13 -1. + <_>20 1 2 13 3. + 0 + 0.0168379992246628 + -0.0455959998071194 + 0.5018069744110107 + <_> + + <_> + + + + <_>0 1 6 13 -1. + <_>2 1 2 13 3. + 0 + 0.0159189999103546 + -0.2690429985523224 + 0.2651630043983460 + <_> + + <_> + + + + <_>16 0 4 9 -1. + <_>16 0 2 9 2. + 0 + 3.6309999413788319e-003 + -0.1304610073566437 + 0.3180710077285767 + <_> + + <_> + + + + <_>5 10 12 7 -1. + <_>9 10 4 7 3. + 0 + -0.0861449986696243 + 1.9443659782409668 + -0.1397829949855804 + <_> + + <_> + + + + <_>12 9 12 6 -1. + <_>12 11 12 2 3. + 0 + 0.0331409983336926 + 0.1526679992675781 + -0.0308660008013248 + <_> + + <_> + + + + <_>0 9 12 6 -1. + <_>0 11 12 2 3. + 0 + -3.9679999463260174e-003 + -0.7120230197906494 + -0.0138440001755953 + <_> + + <_> + + + + <_>5 7 14 9 -1. + <_>5 10 14 3 3. + 0 + -0.0240080002695322 + 0.9200779795646668 + 0.0467239990830421 + <_> + + <_> + + + + <_>0 15 20 3 -1. + <_>0 16 20 1 3. + 0 + 8.7320003658533096e-003 + -0.2256730049848557 + 0.3193179965019226 + <_> + + <_> + + + + <_>8 10 8 10 -1. + <_>12 10 4 5 2. + <_>8 15 4 5 2. + 0 + -0.0277869999408722 + -0.7233710289001465 + 0.1701859980821610 + <_> + + <_> + + + + <_>5 4 13 9 -1. + <_>5 7 13 3 3. + 0 + -0.1945530027151108 + 1.2461860179901123 + -0.1473619937896729 + <_> + + <_> + + + + <_>10 2 6 18 -1. + <_>10 8 6 6 3. + 0 + -0.1086969971656799 + -1.4465179443359375 + 0.1214530020952225 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + -0.0194949992001057 + -0.7815309762954712 + -0.0237329993396997 + <_> + + <_> + + + + <_>6 9 12 4 -1. + <_>6 11 12 2 2. + 0 + 3.0650000553578138e-003 + -0.8547139763832092 + 0.1668699979782105 + <_> + + <_> + + + + <_>3 2 15 12 -1. + <_>3 6 15 4 3. + 0 + 0.0591939985752106 + -0.1485369950532913 + 1.1273469924926758 + <_> + + <_> + + + + <_>12 0 12 5 -1. + <_>16 0 4 5 3. + 0 + -0.0542079992592335 + 0.5472699999809265 + 0.0355239994823933 + <_> + + <_> + + + + <_>0 15 18 3 -1. + <_>6 15 6 3 3. + 0 + -0.0393249988555908 + 0.3664259910583496 + -0.2054399996995926 + <_> + + <_> + + + + <_>0 14 24 5 -1. + <_>8 14 8 5 3. + 0 + 0.0822789967060089 + -0.0350079983472824 + 0.5399420261383057 + <_> + + <_> + + + + <_>5 1 3 18 -1. + <_>6 1 1 18 3. + 0 + -7.4479999020695686e-003 + -0.6153749823570252 + -3.5319998860359192e-003 + <_> + + <_> + + + + <_>10 0 4 14 -1. + <_>10 0 2 14 2. + 0 + 7.3770000599324703e-003 + -0.0655910000205040 + 0.4196139872074127 + <_> + + <_> + + + + <_>9 3 4 9 -1. + <_>11 3 2 9 2. + 0 + 7.0779998786747456e-003 + -0.3412950038909912 + 0.1253679990768433 + <_> + + <_> + + + + <_>8 2 12 6 -1. + <_>14 2 6 3 2. + <_>8 5 6 3 2. + 0 + -0.0155819999054074 + -0.3024039864540100 + 0.2151100039482117 + <_> + + <_> + + + + <_>0 4 17 4 -1. + <_>0 6 17 2 2. + 0 + -2.7399999089539051e-003 + 0.0765530019998550 + -0.4106050133705139 + <_> + + <_> + + + + <_>16 16 5 8 -1. + <_>16 20 5 4 2. + 0 + -0.0706000030040741 + -0.9735620021820068 + 0.1124180033802986 + <_> + + <_> + + + + <_>3 16 5 8 -1. + <_>3 20 5 4 2. + 0 + -0.0117060001939535 + 0.1856070011854172 + -0.2975519895553589 + <_> + + <_> + + + + <_>6 18 18 2 -1. + <_>6 19 18 1 2. + 0 + 7.1499997284263372e-004 + -0.0596500001847744 + 0.2482469975948334 + <_> + + <_> + + + + <_>0 0 12 5 -1. + <_>4 0 4 5 3. + 0 + -0.0368660017848015 + 0.3275170028209686 + -0.2305960059165955 + <_> + + <_> + + + + <_>14 3 6 12 -1. + <_>17 3 3 6 2. + <_>14 9 3 6 2. + 0 + -0.0325269997119904 + -0.2932029962539673 + 0.1542769968509674 + <_> + + <_> + + + + <_>0 12 6 12 -1. + <_>2 12 2 12 3. + 0 + -0.0748139992356300 + -1.2143570184707642 + -0.0522440001368523 + <_> + + <_> + + + + <_>2 3 21 3 -1. + <_>2 4 21 1 3. + 0 + 0.0414699986577034 + 0.1306249946355820 + -2.3274369239807129 + <_> + + <_> + + + + <_>4 3 6 12 -1. + <_>4 3 3 6 2. + <_>7 9 3 6 2. + 0 + -0.0288800001144409 + -0.6607459783554077 + -9.0960003435611725e-003 + <_> + + <_> + + + + <_>12 8 12 6 -1. + <_>18 8 6 3 2. + <_>12 11 6 3 2. + 0 + 0.0463819988071918 + 0.1663019955158234 + -0.6694949865341187 + <_> + + <_> + + + + <_>0 15 16 9 -1. + <_>8 15 8 9 2. + 0 + 0.2542499899864197 + -0.0546419993042946 + -1.2676080465316772 + <_> + + <_> + + + + <_>6 13 18 5 -1. + <_>6 13 9 5 2. + 0 + 2.4000001139938831e-003 + 0.2027679979801178 + 0.0146679999306798 + <_> + + <_> + + + + <_>1 6 15 6 -1. + <_>6 6 5 6 3. + 0 + -0.0828059986233711 + -0.7871360182762146 + -0.0244689993560314 + <_> + + <_> + + + + <_>11 9 9 6 -1. + <_>14 9 3 6 3. + 0 + -0.0114380000159144 + 0.2862339913845062 + -0.0308940000832081 + <_> + + <_> + + + + <_>3 0 15 11 -1. + <_>8 0 5 11 3. + 0 + -0.1291339993476868 + 1.7292929887771606 + -0.1429390013217926 + <_> + + <_> + + + + <_>15 3 3 18 -1. + <_>15 9 3 6 3. + 0 + 0.0385529994964600 + 0.0192329995334148 + 0.3773260116577148 + <_> + + <_> + + + + <_>6 3 3 18 -1. + <_>6 9 3 6 3. + 0 + 0.1019140034914017 + -0.0745339989662170 + -3.3868899345397949 + <_> + + <_> + + + + <_>9 5 10 8 -1. + <_>14 5 5 4 2. + <_>9 9 5 4 2. + 0 + -0.0190680008381605 + 0.3181410133838654 + 0.0192610006779432 + <_> + + <_> + + + + <_>4 4 16 8 -1. + <_>4 4 8 4 2. + <_>12 8 8 4 2. + 0 + -0.0607750006020069 + 0.7693629860877991 + -0.1764400005340576 + <_> + + <_> + + + + <_>7 7 12 3 -1. + <_>7 7 6 3 2. + 0 + 0.0246799997985363 + 0.1839649975299835 + -0.3086880147457123 + <_> + + <_> + + + + <_>5 0 9 13 -1. + <_>8 0 3 13 3. + 0 + 0.0267590004950762 + -0.2345490008592606 + 0.3305659890174866 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + 0.0149699999019504 + 0.1721359938383102 + -0.1824889928102493 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + 0.0261429995298386 + -0.0464639998972416 + -1.1318379640579224 + <_> + + <_> + + + + <_>8 1 10 9 -1. + <_>8 4 10 3 3. + 0 + -0.0375120006501675 + 0.8040400147438049 + 0.0696600005030632 + <_> + + <_> + + + + <_>0 2 18 2 -1. + <_>0 3 18 1 2. + 0 + -5.3229997865855694e-003 + -0.8188440203666687 + -0.0182249993085861 + <_> + + <_> + + + + <_>10 13 14 6 -1. + <_>17 13 7 3 2. + <_>10 16 7 3 2. + 0 + 0.0178130008280277 + 0.1495780050754547 + -0.1866720020771027 + <_> + + <_> + + + + <_>0 13 14 6 -1. + <_>0 13 7 3 2. + <_>7 16 7 3 2. + 0 + -0.0340100005269051 + -0.7285230159759522 + -0.0166159998625517 + <_> + + <_> + + + + <_>20 2 3 21 -1. + <_>21 2 1 21 3. + 0 + -0.0159530006349087 + 0.5694400072097778 + 0.0138320000842214 + <_> + + <_> + + + + <_>0 9 5 12 -1. + <_>0 13 5 4 3. + 0 + 0.0197439994663000 + 0.0405250005424023 + -0.4177339971065521 + <_> + + <_> + + + + <_>12 6 12 6 -1. + <_>12 8 12 2 3. + 0 + -0.1037480011582375 + -1.9825149774551392 + 0.1196020022034645 + <_> + + <_> + + + + <_>1 8 20 3 -1. + <_>1 9 20 1 3. + 0 + -0.0192850008606911 + 0.5023059844970703 + -0.1974589973688126 + <_> + + <_> + + + + <_>5 7 19 3 -1. + <_>5 8 19 1 3. + 0 + -0.0127800004556775 + 0.4019500017166138 + -0.0269579999148846 + <_> + + <_> + + + + <_>1 12 9 6 -1. + <_>1 14 9 2 3. + 0 + -0.0163529999554157 + -0.7660880088806152 + -0.0242090001702309 + <_> + + <_> + + + + <_>6 10 14 12 -1. + <_>6 14 14 4 3. + 0 + -0.1276369988918304 + 0.8657850027084351 + 0.0642059966921806 + <_> + + <_> + + + + <_>5 6 14 18 -1. + <_>5 12 14 6 3. + 0 + 0.0190689992159605 + -0.5592979788780212 + -1.6880000475794077e-003 + <_> + + <_> + + + + <_>11 12 9 7 -1. + <_>14 12 3 7 3. + 0 + 0.0324809998273849 + 0.0407220013439655 + 0.4892509877681732 + <_> + + <_> + + + + <_>1 15 18 4 -1. + <_>1 17 18 2 2. + 0 + 9.4849998131394386e-003 + -0.1923190057277679 + 0.5113970041275024 + <_> + + <_> + + + + <_>11 14 6 9 -1. + <_>11 17 6 3 3. + 0 + 5.0470000132918358e-003 + 0.1870680004358292 + -0.1611360013484955 + <_> + + <_> + + + + <_>0 8 18 4 -1. + <_>0 8 9 2 2. + <_>9 10 9 2 2. + 0 + 0.0412679985165596 + -0.0488179996609688 + -1.1326299905776978 + <_> + + <_> + + + + <_>3 10 20 6 -1. + <_>13 10 10 3 2. + <_>3 13 10 3 2. + 0 + -0.0763589963316917 + 1.4169390201568604 + 0.0873199999332428 + <_> + + <_> + + + + <_>1 10 20 6 -1. + <_>1 10 10 3 2. + <_>11 13 10 3 2. + 0 + -0.0728349983692169 + 1.3189860582351685 + -0.1481910049915314 + <_> + + <_> + + + + <_>0 9 24 2 -1. + <_>0 9 12 2 2. + 0 + 0.0595769993960857 + 0.0483769997954369 + 0.8561180233955383 + <_> + + <_> + + + + <_>1 12 20 8 -1. + <_>1 12 10 4 2. + <_>11 16 10 4 2. + 0 + 0.0202639997005463 + -0.2104409933090210 + 0.3385899960994721 + <_> + + <_> + + + + <_>11 12 9 7 -1. + <_>14 12 3 7 3. + 0 + -0.0803010016679764 + -1.2464400529861450 + 0.1185709983110428 + <_> + + <_> + + + + <_>4 12 9 7 -1. + <_>7 12 3 7 3. + 0 + -0.0178350005298853 + 0.2578229904174805 + -0.2456479966640472 + <_> + + <_> + + + + <_>12 12 8 5 -1. + <_>12 12 4 5 2. + 0 + 0.0114310001954436 + 0.2294979989528656 + -0.2949759960174561 + <_> + + <_> + + + + <_>4 12 8 5 -1. + <_>8 12 4 5 2. + 0 + -0.0255410000681877 + -0.8625299930572510 + -7.0400000549852848e-004 + <_> + + <_> + + + + <_>13 10 4 10 -1. + <_>13 10 2 10 2. + 0 + -7.6899997657164931e-004 + 0.3151139914989471 + -0.1434900015592575 + <_> + + <_> + + + + <_>1 15 20 2 -1. + <_>11 15 10 2 2. + 0 + -0.0144539996981621 + 0.2514849901199341 + -0.2823289930820465 + <_> + + <_> + + + + <_>9 10 6 6 -1. + <_>9 10 3 6 2. + 0 + 8.6730001494288445e-003 + 0.2660140097141266 + -0.2819080054759979 + -3.2103500366210937 + 18 + -1 + <_> + + + <_> + + <_> + + + + <_>0 1 21 3 -1. + <_>7 1 7 3 3. + 0 + 0.0547089986503124 + -0.5414429903030396 + 0.6104300022125244 + <_> + + <_> + + + + <_>6 4 13 9 -1. + <_>6 7 13 3 3. + 0 + -0.1083879992365837 + 0.7173990011215210 + -0.4119609892368317 + <_> + + <_> + + + + <_>6 5 12 5 -1. + <_>10 5 4 5 3. + 0 + 0.0229969993233681 + -0.5826979875564575 + 0.2964560091495514 + <_> + + <_> + + + + <_>10 10 10 6 -1. + <_>10 12 10 2 3. + 0 + 2.7540000155568123e-003 + -0.7424389719963074 + 0.1418330073356628 + <_> + + <_> + + + + <_>6 12 5 8 -1. + <_>6 16 5 4 2. + 0 + -2.1520000882446766e-003 + 0.1787990033626556 + -0.6854860186576843 + <_> + + <_> + + + + <_>13 0 6 9 -1. + <_>15 0 2 9 3. + 0 + -0.0225590001791716 + -1.0775549411773682 + 0.1238899976015091 + <_> + + <_> + + + + <_>2 10 18 6 -1. + <_>8 10 6 6 3. + 0 + 0.0830250009894371 + 0.0245009995996952 + -1.0251879692077637 + <_> + + <_> + + + + <_>11 2 9 4 -1. + <_>11 4 9 2 2. + 0 + -6.6740000620484352e-003 + -0.4528310000896454 + 0.2123019993305206 + <_> + + <_> + + + + <_>1 20 21 3 -1. + <_>8 20 7 3 3. + 0 + 0.0764850005507469 + -0.2697269916534424 + 0.4858019948005676 + <_> + + <_> + + + + <_>1 10 22 2 -1. + <_>1 11 22 1 2. + 0 + 5.4910001344978809e-003 + -0.4887120127677918 + 0.3161639869213104 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + -0.0104149999096990 + 0.4151290059089661 + -0.3004480004310608 + <_> + + <_> + + + + <_>13 0 6 9 -1. + <_>15 0 2 9 3. + 0 + 0.0276079997420311 + 0.1620379984378815 + -0.9986850023269653 + <_> + + <_> + + + + <_>5 0 6 9 -1. + <_>7 0 2 9 3. + 0 + -0.0232720002532005 + -1.1024399995803833 + 0.0211249999701977 + <_> + + <_> + + + + <_>18 2 6 20 -1. + <_>20 2 2 20 3. + 0 + -0.0556199997663498 + 0.6503310203552246 + -0.0279380008578300 + <_> + + <_> + + + + <_>0 2 6 20 -1. + <_>2 2 2 20 3. + 0 + -0.0406319983303547 + 0.4211730062961578 + -0.2676379978656769 + <_> + + <_> + + + + <_>11 7 6 14 -1. + <_>14 7 3 7 2. + <_>11 14 3 7 2. + 0 + -7.3560001328587532e-003 + 0.3527779877185822 + -0.3785400092601776 + <_> + + <_> + + + + <_>0 1 4 9 -1. + <_>2 1 2 9 2. + 0 + 0.0170070007443428 + -0.2918950021266937 + 0.4105379879474640 + <_> + + <_> + + + + <_>12 14 9 4 -1. + <_>12 16 9 2 2. + 0 + -0.0370340012013912 + -1.3216309547424316 + 0.1296650022268295 + <_> + + <_> + + + + <_>1 13 9 4 -1. + <_>1 15 9 2 2. + 0 + -0.0196330007165670 + -0.8770229816436768 + 1.0799999581649899e-003 + <_> + + <_> + + + + <_>7 6 15 6 -1. + <_>7 8 15 2 3. + 0 + -0.0235469993203878 + 0.2610610127449036 + -0.2148140072822571 + <_> + + <_> + + + + <_>8 2 3 18 -1. + <_>8 8 3 6 3. + 0 + -0.0433529987931252 + -0.9908969998359680 + -9.9560003727674484e-003 + <_> + + <_> + + + + <_>6 6 12 6 -1. + <_>12 6 6 3 2. + <_>6 9 6 3 2. + 0 + -0.0221839994192123 + 0.6345440149307251 + -0.0565470010042191 + <_> + + <_> + + + + <_>2 19 20 4 -1. + <_>2 19 10 2 2. + <_>12 21 10 2 2. + 0 + 0.0165309999138117 + 0.0246649999171495 + -0.7332680225372315 + <_> + + <_> + + + + <_>14 15 6 9 -1. + <_>14 18 6 3 3. + 0 + -0.0327440015971661 + -0.5629720091819763 + 0.1664029955863953 + <_> + + <_> + + + + <_>3 5 18 14 -1. + <_>3 5 9 7 2. + <_>12 12 9 7 2. + 0 + 0.0714159980416298 + -3.0000001424923539e-004 + -0.9328640103340149 + <_> + + <_> + + + + <_>15 6 4 18 -1. + <_>17 6 2 9 2. + <_>15 15 2 9 2. + 0 + 8.0999999772757292e-004 + -0.0953800007700920 + 0.2518469989299774 + <_> + + <_> + + + + <_>5 6 4 18 -1. + <_>5 6 2 9 2. + <_>7 15 2 9 2. + 0 + -8.4090000018477440e-003 + -0.6549680233001709 + 0.0673009976744652 + <_> + + <_> + + + + <_>11 0 6 9 -1. + <_>13 0 2 9 3. + 0 + -0.0172540005296469 + -0.4649299979209900 + 0.1607089936733246 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0186410006135702 + -1.0594010353088379 + -0.0196170005947351 + <_> + + <_> + + + + <_>11 5 6 9 -1. + <_>13 5 2 9 3. + 0 + -9.1979997232556343e-003 + 0.5071619749069214 + -0.1533920019865036 + <_> + + <_> + + + + <_>9 5 6 6 -1. + <_>12 5 3 6 2. + 0 + 0.0185380000621080 + -0.3049820065498352 + 0.7350620031356812 + <_> + + <_> + + + + <_>4 1 16 6 -1. + <_>12 1 8 3 2. + <_>4 4 8 3 2. + 0 + -0.0503350012004375 + -1.1140480041503906 + 0.1800010055303574 + <_> + + <_> + + + + <_>9 13 6 11 -1. + <_>11 13 2 11 3. + 0 + -0.0235290005803108 + -0.8690789937973023 + -0.0124599998816848 + <_> + + <_> + + + + <_>17 1 6 12 -1. + <_>20 1 3 6 2. + <_>17 7 3 6 2. + 0 + -0.0271000005304813 + 0.6594290137290955 + -0.0353239998221397 + <_> + + <_> + + + + <_>1 17 18 3 -1. + <_>1 18 18 1 3. + 0 + 6.5879998728632927e-003 + -0.2295340001583099 + 0.4242509901523590 + <_> + + <_> + + + + <_>7 13 10 8 -1. + <_>7 17 10 4 2. + 0 + 0.0233600009232759 + 0.1835619956254959 + -0.9858729839324951 + <_> + + <_> + + + + <_>6 18 10 6 -1. + <_>6 20 10 2 3. + 0 + 0.0129469996318221 + -0.3314740061759949 + 0.2132319957017899 + <_> + + <_> + + + + <_>9 14 9 4 -1. + <_>9 16 9 2 2. + 0 + -6.6559999249875546e-003 + -0.1195140033960342 + 0.2975279986858368 + <_> + + <_> + + + + <_>1 1 6 12 -1. + <_>1 1 3 6 2. + <_>4 7 3 6 2. + 0 + -0.0225709993392229 + 0.3849940001964569 + -0.2443449944257736 + <_> + + <_> + + + + <_>19 4 5 12 -1. + <_>19 8 5 4 3. + 0 + -0.0638139992952347 + -0.8938350081443787 + 0.1421750038862228 + <_> + + <_> + + + + <_>0 0 8 8 -1. + <_>4 0 4 8 2. + 0 + -0.0499450005590916 + 0.5386440157890320 + -0.2048529982566834 + <_> + + <_> + + + + <_>3 5 19 3 -1. + <_>3 6 19 1 3. + 0 + 6.8319998681545258e-003 + -0.0566789992153645 + 0.3997099995613098 + <_> + + <_> + + + + <_>1 5 12 6 -1. + <_>1 5 6 3 2. + <_>7 8 6 3 2. + 0 + -0.0558359995484352 + -1.5239470005035400 + -0.0511830002069473 + <_> + + <_> + + + + <_>2 1 21 8 -1. + <_>9 1 7 8 3. + 0 + 0.3195700049400330 + 0.0745740011334419 + 1.2447799444198608 + <_> + + <_> + + + + <_>4 1 16 8 -1. + <_>4 5 16 4 2. + 0 + 0.0809559971094131 + -0.1966550052165985 + 0.5988969802856445 + <_> + + <_> + + + + <_>6 0 18 3 -1. + <_>6 1 18 1 3. + 0 + -0.0149119999259710 + -0.6402059793472290 + 0.1580760031938553 + <_> + + <_> + + + + <_>4 4 10 14 -1. + <_>4 11 10 7 2. + 0 + 0.0467090010643005 + 0.0852390006184578 + -0.4548720121383667 + <_> + + <_> + + + + <_>15 6 4 10 -1. + <_>15 11 4 5 2. + 0 + 6.0539999976754189e-003 + -0.4318400025367737 + 0.2245260030031204 + <_> + + <_> + + + + <_>3 18 18 3 -1. + <_>9 18 6 3 3. + 0 + -0.0343759991228580 + 0.4020250141620636 + -0.2390359938144684 + <_> + + <_> + + + + <_>8 18 12 6 -1. + <_>12 18 4 6 3. + 0 + -0.0349240005016327 + 0.5287010073661804 + 0.0397090017795563 + <_> + + <_> + + + + <_>3 15 6 9 -1. + <_>6 15 3 9 2. + 0 + 3.0030000489205122e-003 + -0.3875429928302765 + 0.1419260054826737 + <_> + + <_> + + + + <_>15 7 6 8 -1. + <_>15 11 6 4 2. + 0 + -0.0141329998150468 + 0.8752840161323547 + 0.0855079963803291 + <_> + + <_> + + + + <_>3 7 6 8 -1. + <_>3 11 6 4 2. + 0 + -6.7940000444650650e-003 + -1.1649219989776611 + -0.0339430011808872 + <_> + + <_> + + + + <_>5 9 18 6 -1. + <_>14 9 9 3 2. + <_>5 12 9 3 2. + 0 + -0.0528860017657280 + 1.0930680036544800 + 0.0511870011687279 + <_> + + <_> + + + + <_>1 13 12 6 -1. + <_>1 15 12 2 3. + 0 + -2.1079999860376120e-003 + 0.1369619965553284 + -0.3384999930858612 + <_> + + <_> + + + + <_>14 15 10 6 -1. + <_>14 17 10 2 3. + 0 + 0.0183530002832413 + 0.1366160064935684 + -0.4077779948711395 + <_> + + <_> + + + + <_>0 15 10 6 -1. + <_>0 17 10 2 3. + 0 + 0.0126719996333122 + -0.0149360001087189 + -0.8170750141143799 + <_> + + <_> + + + + <_>15 13 6 9 -1. + <_>15 16 6 3 3. + 0 + 0.0129249999299645 + 0.1762509942054749 + -0.3249169886112213 + <_> + + <_> + + + + <_>3 13 6 9 -1. + <_>3 16 6 3 3. + 0 + -0.0179210007190704 + -0.5274540185928345 + 0.0444430001080036 + <_> + + <_> + + + + <_>9 5 8 8 -1. + <_>9 5 4 8 2. + 0 + 1.9160000374540687e-003 + -0.1097859963774681 + 0.2206750065088272 + <_> + + <_> + + + + <_>1 18 12 6 -1. + <_>1 18 6 3 2. + <_>7 21 6 3 2. + 0 + -0.0146979996934533 + 0.3906779885292053 + -0.2222499996423721 + <_> + + <_> + + + + <_>13 19 10 4 -1. + <_>13 21 10 2 2. + 0 + -0.0149729996919632 + -0.2545090019702911 + 0.1779000014066696 + <_> + + <_> + + + + <_>1 19 10 4 -1. + <_>1 21 10 2 2. + 0 + 0.0146369999274611 + -0.0251250006258488 + -0.8712130188941956 + <_> + + <_> + + + + <_>6 19 18 3 -1. + <_>6 20 18 1 3. + 0 + -0.0109740002080798 + 0.7908279895782471 + 0.0201210007071495 + <_> + + <_> + + + + <_>8 14 4 10 -1. + <_>8 19 4 5 2. + 0 + -9.1599998995661736e-003 + -0.4790689945220947 + 0.0522320009768009 + <_> + + <_> + + + + <_>0 0 24 6 -1. + <_>0 2 24 2 3. + 0 + 4.6179997734725475e-003 + -0.1724459975957871 + 0.3452779948711395 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>0 4 6 3 3. + 0 + 0.0234769992530346 + 3.7760001141577959e-003 + -0.6533370018005371 + <_> + + <_> + + + + <_>4 9 20 6 -1. + <_>14 9 10 3 2. + <_>4 12 10 3 2. + 0 + 0.0317669995129108 + 0.0163640007376671 + 0.5872370004653931 + <_> + + <_> + + + + <_>1 15 19 8 -1. + <_>1 19 19 4 2. + 0 + -0.0184199996292591 + 0.1999389976263046 + -0.3205649852752686 + <_> + + <_> + + + + <_>14 0 10 6 -1. + <_>14 2 10 2 3. + 0 + 0.0195439998060465 + 0.1845020055770874 + -0.2379360049962997 + <_> + + <_> + + + + <_>1 10 21 14 -1. + <_>8 10 7 14 3. + 0 + 0.4115949869155884 + -0.0603820011019707 + -1.6072119474411011 + <_> + + <_> + + + + <_>10 10 8 8 -1. + <_>10 10 4 8 2. + 0 + -0.0415959991514683 + -0.3275620043277741 + 0.1505800038576126 + <_> + + <_> + + + + <_>6 8 10 4 -1. + <_>11 8 5 4 2. + 0 + -0.0103359995409846 + -0.6239439845085144 + 0.0131120001897216 + <_> + + <_> + + + + <_>10 5 4 9 -1. + <_>10 5 2 9 2. + 0 + 0.0123929996043444 + -0.0331149995326996 + 0.5557990074157715 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>9 5 2 10 3. + 0 + -8.7270000949501991e-003 + 0.1988320052623749 + -0.3763560056686401 + <_> + + <_> + + + + <_>14 4 4 13 -1. + <_>14 4 2 13 2. + 0 + 0.0162950009107590 + 0.2037300020456314 + -0.4280079901218414 + <_> + + <_> + + + + <_>6 4 4 13 -1. + <_>8 4 2 13 2. + 0 + -0.0104839997366071 + -0.5684700012207031 + 0.0441990010440350 + <_> + + <_> + + + + <_>8 7 9 6 -1. + <_>11 7 3 6 3. + 0 + -0.0124319996684790 + 0.7464190125465393 + 0.0436789989471436 + <_> + + <_> + + + + <_>3 6 16 6 -1. + <_>3 6 8 3 2. + <_>11 9 8 3 2. + 0 + -0.0503749996423721 + 0.8509010076522827 + -0.1777379959821701 + <_> + + <_> + + + + <_>5 4 16 14 -1. + <_>13 4 8 7 2. + <_>5 11 8 7 2. + 0 + 0.0495480000972748 + 0.1678490042686462 + -0.2987749874591827 + <_> + + <_> + + + + <_>0 0 24 4 -1. + <_>0 0 12 2 2. + <_>12 2 12 2 2. + 0 + -0.0410850010812283 + -1.3302919864654541 + -0.0491820015013218 + <_> + + <_> + + + + <_>9 1 9 6 -1. + <_>12 1 3 6 3. + 0 + 1.0069999843835831e-003 + -0.0605389997363091 + 0.1848320066928864 + <_> + + <_> + + + + <_>4 1 14 4 -1. + <_>11 1 7 4 2. + 0 + -0.0501429997384548 + 0.7644770145416260 + -0.1835699975490570 + <_> + + <_> + + + + <_>10 14 7 9 -1. + <_>10 17 7 3 3. + 0 + -8.7879998609423637e-003 + 0.2265599966049194 + -0.0631569996476173 + <_> + + <_> + + + + <_>8 3 8 10 -1. + <_>8 3 4 5 2. + <_>12 8 4 5 2. + 0 + -0.0501709990203381 + -1.5899070501327515 + -0.0612550005316734 + <_> + + <_> + + + + <_>7 3 12 5 -1. + <_>11 3 4 5 3. + 0 + 0.1021609976887703 + 0.1207180023193359 + -1.4120110273361206 + <_> + + <_> + + + + <_>8 2 4 13 -1. + <_>10 2 2 13 2. + 0 + -0.0143729997798800 + -1.3116970062255859 + -0.0519360005855560 + <_> + + <_> + + + + <_>11 2 3 19 -1. + <_>12 2 1 19 3. + 0 + 0.0102819995954633 + -2.1639999467879534e-003 + 0.4424720108509064 + <_> + + <_> + + + + <_>7 7 9 6 -1. + <_>10 7 3 6 3. + 0 + -0.0118140000849962 + 0.6537809967994690 + -0.1872369945049286 + <_> + + <_> + + + + <_>4 22 20 2 -1. + <_>4 22 10 2 2. + 0 + 0.0721149966120720 + 0.0718469992280006 + 0.8149629831314087 + <_> + + <_> + + + + <_>0 16 24 4 -1. + <_>0 16 12 2 2. + <_>12 18 12 2 2. + 0 + -0.0190019998699427 + -0.6742720007896423 + -4.3200000072829425e-004 + <_> + + <_> + + + + <_>7 3 12 5 -1. + <_>11 3 4 5 3. + 0 + -4.6990001574158669e-003 + 0.3331150114536285 + 0.0557940006256104 + <_> + + <_> + + + + <_>1 10 8 14 -1. + <_>1 10 4 7 2. + <_>5 17 4 7 2. + 0 + -0.0581570006906986 + 0.4557229876518250 + -0.2030510008335114 + <_> + + <_> + + + + <_>11 16 6 6 -1. + <_>11 19 6 3 2. + 0 + 1.1360000353306532e-003 + -0.0446869991719723 + 0.2268189936876297 + <_> + + <_> + + + + <_>6 0 10 24 -1. + <_>6 0 5 12 2. + <_>11 12 5 12 2. + 0 + -0.0494149997830391 + 0.2669459879398346 + -0.2611699998378754 + <_> + + <_> + + + + <_>7 5 14 14 -1. + <_>14 5 7 7 2. + <_>7 12 7 7 2. + 0 + -0.1191380023956299 + -0.8301799893379211 + 0.1324850022792816 + <_> + + <_> + + + + <_>7 8 10 8 -1. + <_>7 8 5 4 2. + <_>12 12 5 4 2. + 0 + -0.0183039996773005 + -0.6749920248985291 + 0.0170920006930828 + <_> + + <_> + + + + <_>9 1 9 6 -1. + <_>12 1 3 6 3. + 0 + -7.9199997708201408e-003 + -0.0722870007157326 + 0.1442580074071884 + <_> + + <_> + + + + <_>0 6 24 3 -1. + <_>12 6 12 3 2. + 0 + 0.0519259981811047 + 0.0309219993650913 + -0.5586060285568237 + <_> + + <_> + + + + <_>7 3 12 5 -1. + <_>11 3 4 5 3. + 0 + 0.0667240023612976 + 0.1366640031337738 + -0.2941100001335144 + <_> + + <_> + + + + <_>1 13 22 4 -1. + <_>1 13 11 2 2. + <_>12 15 11 2 2. + 0 + -0.0137780001387000 + -0.5944390296936035 + 0.0153000000864267 + <_> + + <_> + + + + <_>9 12 12 6 -1. + <_>9 14 12 2 3. + 0 + -0.0177609995007515 + 0.4049650132656097 + -3.3559999428689480e-003 + <_> + + <_> + + + + <_>0 5 9 6 -1. + <_>0 7 9 2 3. + 0 + -0.0422349981963634 + -1.0897940397262573 + -0.0402249991893768 + <_> + + <_> + + + + <_>1 5 23 6 -1. + <_>1 7 23 2 3. + 0 + -0.0135249998420477 + 0.2892189919948578 + -0.2519479990005493 + <_> + + <_> + + + + <_>1 6 19 12 -1. + <_>1 10 19 4 3. + 0 + -0.0111060002818704 + 0.6531280279159546 + -0.1805370002985001 + <_> + + <_> + + + + <_>9 1 6 21 -1. + <_>9 8 6 7 3. + 0 + -0.1228459998965263 + -1.9570649862289429 + 0.1481540054082871 + <_> + + <_> + + + + <_>3 19 18 3 -1. + <_>9 19 6 3 3. + 0 + 0.0477159991860390 + -0.2287559956312180 + 0.3423370122909546 + <_> + + <_> + + + + <_>9 14 6 9 -1. + <_>11 14 2 9 3. + 0 + 0.0318170003592968 + 0.1597629934549332 + -1.0091969966888428 + <_> + + <_> + + + + <_>9 6 4 12 -1. + <_>11 6 2 12 2. + 0 + 4.2570000514388084e-003 + -0.3888129889965057 + 0.0842100009322166 + <_> + + <_> + + + + <_>16 0 6 9 -1. + <_>18 0 2 9 3. + 0 + -0.0613729991018772 + 1.7152810096740723 + 0.0593249984085560 + <_> + + <_> + + + + <_>2 0 6 9 -1. + <_>4 0 2 9 3. + 0 + -2.7030000928789377e-003 + -0.3816170096397400 + 0.0851270034909248 + <_> + + <_> + + + + <_>13 1 4 22 -1. + <_>15 1 2 11 2. + <_>13 12 2 11 2. + 0 + -0.0685440003871918 + -3.0925889015197754 + 0.1178800016641617 + <_> + + <_> + + + + <_>1 8 8 12 -1. + <_>1 14 8 6 2. + 0 + 0.1037250012159348 + -0.1376930028200150 + 1.9009410142898560 + <_> + + <_> + + + + <_>14 7 7 9 -1. + <_>14 10 7 3 3. + 0 + 0.0157990008592606 + -0.0626600012183189 + 0.2591769993305206 + <_> + + <_> + + + + <_>3 12 18 4 -1. + <_>3 12 9 2 2. + <_>12 14 9 2 2. + 0 + -9.8040001466870308e-003 + -0.5629159808158875 + 0.0439230017364025 + <_> + + <_> + + + + <_>13 1 4 22 -1. + <_>15 1 2 11 2. + <_>13 12 2 11 2. + 0 + -9.0229995548725128e-003 + 0.2528710067272186 + -0.0412259995937347 + <_> + + <_> + + + + <_>7 1 4 22 -1. + <_>7 1 2 11 2. + <_>9 12 2 11 2. + 0 + -0.0637549981474876 + -2.6178569793701172 + -0.0740059986710548 + <_> + + <_> + + + + <_>4 7 20 4 -1. + <_>14 7 10 2 2. + <_>4 9 10 2 2. + 0 + 0.0389549992978573 + 0.0590329989790916 + 0.8594560027122498 + <_> + + <_> + + + + <_>9 10 6 7 -1. + <_>12 10 3 7 2. + 0 + -0.0398029983043671 + 0.9360049962997437 + -0.1563940048217773 + <_> + + <_> + + + + <_>7 7 10 4 -1. + <_>7 7 5 4 2. + 0 + 0.0503019988536835 + 0.1372590065002441 + -2.5549728870391846 + <_> + + <_> + + + + <_>0 3 4 15 -1. + <_>0 8 4 5 3. + 0 + 0.0462500005960464 + -0.0139640001580119 + -0.7102620005607605 + <_> + + <_> + + + + <_>15 0 8 12 -1. + <_>19 0 4 6 2. + <_>15 6 4 6 2. + 0 + 0.0621960014104843 + 0.0595260001718998 + 1.6509100198745728 + <_> + + <_> + + + + <_>1 0 8 12 -1. + <_>1 0 4 6 2. + <_>5 6 4 6 2. + 0 + -0.0647760033607483 + 0.7136899828910828 + -0.1727000027894974 + <_> + + <_> + + + + <_>14 5 6 16 -1. + <_>16 5 2 16 3. + 0 + 0.0275229997932911 + 0.1463160067796707 + -0.0814289972186089 + <_> + + <_> + + + + <_>4 5 6 16 -1. + <_>6 5 2 16 3. + 0 + 3.9900001138448715e-004 + -0.3714450001716614 + 0.1015269979834557 + <_> + + <_> + + + + <_>15 0 6 16 -1. + <_>17 0 2 16 3. + 0 + -4.3299999088048935e-003 + -0.2375629991292954 + 0.2679840028285980 + <_> + + <_> + + + + <_>3 0 6 16 -1. + <_>5 0 2 16 3. + 0 + 0.0472970008850098 + -0.0276820007711649 + -0.8491029739379883 + <_> + + <_> + + + + <_>0 2 24 3 -1. + <_>0 3 24 1 3. + 0 + 0.0125089995563030 + 0.1873019933700562 + -0.5600110292434692 + <_> + + <_> + + + + <_>7 1 10 4 -1. + <_>7 3 10 2 2. + 0 + 0.0458990000188351 + -0.1560119986534119 + 0.9707300066947937 + <_> + + <_> + + + + <_>1 0 23 8 -1. + <_>1 4 23 4 2. + 0 + 0.1985339969396591 + 0.1489550024271011 + -1.1015529632568359 + <_> + + <_> + + + + <_>1 17 19 3 -1. + <_>1 18 19 1 3. + 0 + 0.0166749991476536 + -0.1661529988050461 + 0.8221099972724915 + <_> + + <_> + + + + <_>6 18 18 2 -1. + <_>6 19 18 1 2. + 0 + 1.9829999655485153e-003 + -0.0712499991059303 + 0.2881090044975281 + <_> + + <_> + + + + <_>1 17 9 6 -1. + <_>1 19 9 2 3. + 0 + 0.0224479995667934 + -0.0209810007363558 + -0.7841650247573853 + <_> + + <_> + + + + <_>15 15 6 9 -1. + <_>15 18 6 3 3. + 0 + -0.0139130000025034 + -0.1816579997539520 + 0.2049179971218109 + <_> + + <_> + + + + <_>3 15 6 9 -1. + <_>3 18 6 3 3. + 0 + -7.7659999951720238e-003 + -0.4559589922428131 + 0.0635769963264465 + <_> + + <_> + + + + <_>4 14 20 6 -1. + <_>4 17 20 3 2. + 0 + -0.0132090002298355 + 0.2663230001926422 + -0.1779599934816361 + <_> + + <_> + + + + <_>0 10 6 14 -1. + <_>0 10 3 7 2. + <_>3 17 3 7 2. + 0 + 0.0490529984235764 + -0.1547680050134659 + 1.1069979667663574 + <_> + + <_> + + + + <_>6 18 18 3 -1. + <_>6 19 18 1 3. + 0 + 0.0202639997005463 + 0.0689150020480156 + 0.6986749768257141 + <_> + + <_> + + + + <_>4 12 9 7 -1. + <_>7 12 3 7 3. + 0 + -0.0168280005455017 + 0.2760719954967499 + -0.2513920068740845 + <_> + + <_> + + + + <_>6 10 18 5 -1. + <_>12 10 6 5 3. + 0 + -0.1693949997425079 + -3.0767529010772705 + 0.1161750033497810 + <_> + + <_> + + + + <_>0 10 18 5 -1. + <_>6 10 6 5 3. + 0 + -0.1133610010147095 + -1.4639229774475098 + -0.0514470003545284 + <_> + + <_> + + + + <_>3 2 18 9 -1. + <_>9 2 6 9 3. + 0 + -0.0776859968900681 + 0.8843020200729370 + 0.0433069989085197 + <_> + + <_> + + + + <_>4 6 10 10 -1. + <_>4 6 5 5 2. + <_>9 11 5 5 2. + 0 + -0.0155680002644658 + 0.1367249935865402 + -0.3450550138950348 + <_> + + <_> + + + + <_>20 14 4 9 -1. + <_>20 14 2 9 2. + 0 + -0.0660189986228943 + -1.0300110578536987 + 0.1160139963030815 + <_> + + <_> + + + + <_>0 14 4 9 -1. + <_>2 14 2 9 2. + 0 + 8.3699999377131462e-003 + 0.0764290019869804 + -0.4400250017642975 + <_> + + <_> + + + + <_>11 1 4 20 -1. + <_>13 1 2 10 2. + <_>11 11 2 10 2. + 0 + 0.0354029983282089 + 0.1197950020432472 + -0.7266830205917358 + <_> + + <_> + + + + <_>6 21 12 3 -1. + <_>12 21 6 3 2. + 0 + -0.0390510000288486 + 0.6737530231475830 + -0.1819600015878677 + <_> + + <_> + + + + <_>11 1 4 20 -1. + <_>13 1 2 10 2. + <_>11 11 2 10 2. + 0 + -9.7899995744228363e-003 + 0.2126459926366806 + 0.0367560014128685 + <_> + + <_> + + + + <_>1 16 10 8 -1. + <_>1 16 5 4 2. + <_>6 20 5 4 2. + 0 + -0.0230470001697540 + 0.4474219977855682 + -0.2098670005798340 + <_> + + <_> + + + + <_>11 1 4 20 -1. + <_>13 1 2 10 2. + <_>11 11 2 10 2. + 0 + 3.1169999856501818e-003 + 0.0375440008938313 + 0.2780820131301880 + <_> + + <_> + + + + <_>1 0 3 19 -1. + <_>2 0 1 19 3. + 0 + 0.0131360003724694 + -0.1984239965677261 + 0.5433570146560669 + <_> + + <_> + + + + <_>11 1 4 20 -1. + <_>13 1 2 10 2. + <_>11 11 2 10 2. + 0 + 0.0147820003330708 + 0.1353060007095337 + -0.1115360036492348 + <_> + + <_> + + + + <_>0 1 6 9 -1. + <_>2 1 2 9 3. + 0 + -0.0601390004158020 + 0.8403930068016052 + -0.1671160012483597 + <_> + + <_> + + + + <_>3 7 19 4 -1. + <_>3 9 19 2 2. + 0 + 0.0519989989697933 + 0.1737200021743774 + -0.7854760289192200 + <_> + + <_> + + + + <_>7 14 9 6 -1. + <_>7 16 9 2 3. + 0 + 0.0247920006513596 + -0.1773920059204102 + 0.6675260066986084 + <_> + + <_> + + + + <_>17 1 7 6 -1. + <_>17 4 7 3 2. + 0 + -0.0120149999856949 + -0.1426369994878769 + 0.1607050001621246 + <_> + + <_> + + + + <_>5 0 14 8 -1. + <_>5 4 14 4 2. + 0 + -0.0986559987068176 + 1.0429769754409790 + -0.1577019989490509 + <_> + + <_> + + + + <_>16 1 8 6 -1. + <_>16 4 8 3 2. + 0 + 0.1175829991698265 + 0.1095570027828217 + -4.4920377731323242 + <_> + + <_> + + + + <_>0 1 8 6 -1. + <_>0 4 8 3 2. + 0 + -0.0189229995012283 + -0.7854340076446533 + 0.0129840001463890 + <_> + + <_> + + + + <_>6 0 18 4 -1. + <_>15 0 9 2 2. + <_>6 2 9 2 2. + 0 + -0.0283909998834133 + -0.6056990027427673 + 0.1290349960327148 + <_> + + <_> + + + + <_>0 14 9 6 -1. + <_>0 16 9 2 3. + 0 + 0.0131829995661974 + -0.0144159998744726 + -0.7321050167083740 + <_> + + <_> + + + + <_>3 7 18 8 -1. + <_>9 7 6 8 3. + 0 + -0.1165300011634827 + -2.0442469120025635 + 0.1405310034751892 + <_> + + <_> + + + + <_>2 11 6 9 -1. + <_>4 11 2 9 3. + 0 + -3.8880000356584787e-003 + -0.4186159968376160 + 0.0787049978971481 + <_> + + <_> + + + + <_>10 5 6 9 -1. + <_>12 5 2 9 3. + 0 + 0.0312290005385876 + 0.0246329996734858 + 0.4187040030956268 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>10 6 2 9 2. + <_>12 15 2 9 2. + 0 + 0.0251989997923374 + -0.1755779981613159 + 0.6471059918403626 + <_> + + <_> + + + + <_>11 1 4 20 -1. + <_>13 1 2 10 2. + <_>11 11 2 10 2. + 0 + -0.0281240008771420 + -0.2200559973716736 + 0.1412100046873093 + <_> + + <_> + + + + <_>9 1 4 20 -1. + <_>9 1 2 10 2. + <_>11 11 2 10 2. + 0 + 0.0364990010857582 + -0.0684269964694977 + -2.3410849571228027 + <_> + + <_> + + + + <_>5 9 18 6 -1. + <_>14 9 9 3 2. + <_>5 12 9 3 2. + 0 + -0.0722929984331131 + 1.2898750305175781 + 0.0848750025033951 + <_> + + <_> + + + + <_>6 4 6 9 -1. + <_>8 4 2 9 3. + 0 + -0.0416710004210472 + -1.1630970239639282 + -0.0537529997527599 + <_> + + <_> + + + + <_>10 16 8 6 -1. + <_>10 16 4 6 2. + 0 + 0.0477030016481876 + 0.0701010003685951 + 0.7367650270462036 + <_> + + <_> + + + + <_>0 0 18 8 -1. + <_>0 0 9 4 2. + <_>9 4 9 4 2. + 0 + 0.0657930001616478 + -0.1775529980659485 + 0.6978049874305725 + <_> + + <_> + + + + <_>6 5 14 12 -1. + <_>13 5 7 6 2. + <_>6 11 7 6 2. + 0 + 0.0139049999415874 + 0.2193679958581924 + -0.2039079964160919 + <_> + + <_> + + + + <_>4 3 15 7 -1. + <_>9 3 5 7 3. + 0 + -0.0277309995144606 + 0.6186789870262146 + -0.1780409961938858 + <_> + + <_> + + + + <_>14 12 10 6 -1. + <_>14 14 10 2 3. + 0 + -0.0158799998462200 + -0.4648410081863403 + 0.1882860064506531 + <_> + + <_> + + + + <_>0 11 4 10 -1. + <_>0 16 4 5 2. + 0 + 0.0741280019283295 + -0.1285810023546219 + 3.2792479991912842 + <_> + + <_> + + + + <_>1 10 22 3 -1. + <_>1 11 22 1 3. + 0 + -8.9000002481043339e-004 + -0.3011760115623474 + 0.2381879985332489 + <_> + + <_> + + + + <_>8 9 6 10 -1. + <_>10 9 2 10 3. + 0 + 0.0179650001227856 + -0.2228499948978424 + 0.2995400130748749 + <_> + + <_> + + + + <_>13 2 6 12 -1. + <_>16 2 3 6 2. + <_>13 8 3 6 2. + 0 + -2.5380000006407499e-003 + 0.2506439983844757 + -0.1366560012102127 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>10 6 2 9 2. + <_>12 15 2 9 2. + 0 + -9.0680001303553581e-003 + 0.2901749908924103 + -0.2892970144748688 + <_> + + <_> + + + + <_>7 8 10 16 -1. + <_>12 8 5 8 2. + <_>7 16 5 8 2. + 0 + 0.0491699986159801 + 0.1915639936923981 + -0.6832870244979858 + <_> + + <_> + + + + <_>8 1 8 12 -1. + <_>8 1 4 6 2. + <_>12 7 4 6 2. + 0 + -0.0306809991598129 + -0.7567700147628784 + -0.0132799996063113 + <_> + + <_> + + + + <_>7 1 12 14 -1. + <_>13 1 6 7 2. + <_>7 8 6 7 2. + 0 + 0.1001740023493767 + 0.0844539999961853 + 1.0888710021972656 + <_> + + <_> + + + + <_>2 14 12 6 -1. + <_>2 16 12 2 3. + 0 + 3.1950001139193773e-003 + -0.2691940069198608 + 0.1953790038824081 + <_> + + <_> + + + + <_>11 16 6 6 -1. + <_>11 19 6 3 2. + 0 + 0.0355030000209808 + 0.1363230049610138 + -0.5691720247268677 + <_> + + <_> + + + + <_>7 16 6 6 -1. + <_>7 19 6 3 2. + 0 + 4.5900000259280205e-004 + -0.4044399857521057 + 0.1407479941844940 + <_> + + <_> + + + + <_>13 4 4 10 -1. + <_>13 4 2 10 2. + 0 + 0.0252589993178844 + 0.1624320000410080 + -0.5574179887771606 + <_> + + <_> + + + + <_>0 19 19 3 -1. + <_>0 20 19 1 3. + 0 + -5.1549999043345451e-003 + 0.3113259971141815 + -0.2275609970092773 + <_> + + <_> + + + + <_>12 8 6 8 -1. + <_>12 12 6 4 2. + 0 + 1.5869999770075083e-003 + -0.2686769962310791 + 0.1956540048122406 + <_> + + <_> + + + + <_>8 1 8 22 -1. + <_>8 12 8 11 2. + 0 + -0.0162049997597933 + 0.1548649966716766 + -0.3405779898166657 + <_> + + <_> + + + + <_>12 8 6 8 -1. + <_>12 12 6 4 2. + 0 + -0.0296240001916885 + 1.1466799974441528 + 0.0905579999089241 + <_> + + <_> + + + + <_>6 8 6 8 -1. + <_>6 12 6 4 2. + 0 + -1.5930000226944685e-003 + -0.7125750184059143 + -7.0400000549852848e-004 + <_> + + <_> + + + + <_>14 5 6 9 -1. + <_>14 8 6 3 3. + 0 + -0.0540190003812313 + 0.4153749942779541 + 0.0272460002452135 + <_> + + <_> + + + + <_>0 6 24 4 -1. + <_>0 8 24 2 2. + 0 + -0.0662110000848770 + -1.3340090513229370 + -0.0473529994487762 + <_> + + <_> + + + + <_>14 12 10 6 -1. + <_>14 14 10 2 3. + 0 + 0.0279409997165203 + 0.1444630026817322 + -0.5151839852333069 + <_> + + <_> + + + + <_>0 12 10 6 -1. + <_>0 14 10 2 3. + 0 + 0.0289570000022650 + -0.0499660000205040 + -1.1929039955139160 + <_> + + <_> + + + + <_>4 6 19 3 -1. + <_>4 7 19 1 3. + 0 + -0.0204249992966652 + 0.6388130187988281 + 0.0381410010159016 + <_> + + <_> + + + + <_>1 6 19 3 -1. + <_>1 7 19 1 3. + 0 + 0.0124169997870922 + -0.2154700011014938 + 0.4947769939899445 + -3.2772979736328125 + 19 + -1 + <_> + + + <_> + + <_> + + + + <_>4 0 16 9 -1. + <_>4 3 16 3 3. + 0 + 0.0432740002870560 + -0.8049439787864685 + 0.3989729881286621 + <_> + + <_> + + + + <_>0 1 24 5 -1. + <_>8 1 8 5 3. + 0 + 0.1861550062894821 + -0.3165529966354370 + 0.6887729763984680 + <_> + + <_> + + + + <_>3 6 6 15 -1. + <_>3 11 6 5 3. + 0 + 0.0318609997630119 + -0.6426619887351990 + 0.2555089890956879 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0140220001339912 + -0.4592660069465637 + 0.3117119967937470 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + -6.3029997982084751e-003 + 0.4602690041065216 + -0.2743850052356720 + <_> + + <_> + + + + <_>6 22 18 2 -1. + <_>6 23 18 1 2. + 0 + -5.4310001432895660e-003 + 0.3660860061645508 + -0.2720580101013184 + <_> + + <_> + + + + <_>2 12 6 9 -1. + <_>2 15 6 3 3. + 0 + 0.0168229993432760 + 0.0234769992530346 + -0.8844379782676697 + <_> + + <_> + + + + <_>18 12 6 9 -1. + <_>18 15 6 3 3. + 0 + 0.0260390006005764 + 0.1748879998922348 + -0.5456470251083374 + <_> + + <_> + + + + <_>0 12 6 9 -1. + <_>0 15 6 3 3. + 0 + -0.0267200004309416 + -0.9639649987220764 + 0.0235249996185303 + <_> + + <_> + + + + <_>11 14 4 10 -1. + <_>11 19 4 5 2. + 0 + -0.0170419998466969 + -0.7084879875183106 + 0.2146809995174408 + <_> + + <_> + + + + <_>9 6 6 16 -1. + <_>9 14 6 8 2. + 0 + 5.9569999575614929e-003 + 0.0736010000109673 + -0.6822559833526611 + <_> + + <_> + + + + <_>7 7 10 10 -1. + <_>7 12 10 5 2. + 0 + -2.8679999522864819e-003 + -0.7493500113487244 + 0.2380339950323105 + <_> + + <_> + + + + <_>1 3 6 13 -1. + <_>3 3 2 13 3. + 0 + -0.0437749996781349 + 0.6832330226898193 + -0.2138029932975769 + <_> + + <_> + + + + <_>18 1 6 13 -1. + <_>18 1 3 13 2. + 0 + 0.0516330003738403 + -0.1256649941205978 + 0.6752380132675171 + <_> + + <_> + + + + <_>5 1 6 9 -1. + <_>7 1 2 9 3. + 0 + 8.1780003383755684e-003 + 0.0706899985671043 + -0.8066589832305908 + <_> + + <_> + + + + <_>18 2 6 11 -1. + <_>18 2 3 11 2. + 0 + -0.0528419986367226 + 0.9543390274047852 + 0.0165480002760887 + <_> + + <_> + + + + <_>0 2 6 11 -1. + <_>3 2 3 11 2. + 0 + 0.0525839999318123 + -0.2841440141201019 + 0.4712980091571808 + <_> + + <_> + + + + <_>9 12 15 6 -1. + <_>9 14 15 2 3. + 0 + -0.0126590002328157 + 0.3844540119171143 + -0.0622880011796951 + <_> + + <_> + + + + <_>2 2 20 3 -1. + <_>2 3 20 1 3. + 0 + 0.0116940001025796 + 5.6000000768108293e-005 + -1.0173139572143555 + <_> + + <_> + + + + <_>10 6 4 9 -1. + <_>10 6 2 9 2. + 0 + -0.0239189993590117 + 0.8492130041122437 + 5.7399999350309372e-003 + <_> + + <_> + + + + <_>5 6 12 14 -1. + <_>5 6 6 7 2. + <_>11 13 6 7 2. + 0 + -0.0616739988327026 + -0.9257140159606934 + -1.7679999582469463e-003 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + -1.8279999494552612e-003 + -0.5437229871749878 + 0.2493239939212799 + <_> + + <_> + + + + <_>7 0 9 6 -1. + <_>10 0 3 6 3. + 0 + 0.0352579988539219 + -7.3719997890293598e-003 + -0.9396399855613709 + <_> + + <_> + + + + <_>10 6 6 9 -1. + <_>12 6 2 9 3. + 0 + -0.0184380002319813 + 0.7213670015335083 + 0.0104919997975230 + <_> + + <_> + + + + <_>4 1 12 20 -1. + <_>4 1 6 10 2. + <_>10 11 6 10 2. + 0 + -0.0383890010416508 + 0.1927260011434555 + -0.3583210110664368 + <_> + + <_> + + + + <_>6 7 18 3 -1. + <_>6 7 9 3 2. + 0 + 0.0997209995985031 + 0.1135419979691505 + -1.6304190158843994 + <_> + + <_> + + + + <_>0 7 18 3 -1. + <_>9 7 9 3 2. + 0 + 0.0844620019197464 + -0.0534209981560707 + -1.6981120109558105 + <_> + + <_> + + + + <_>3 20 18 3 -1. + <_>9 20 6 3 3. + 0 + 0.0402700006961823 + -0.1078319996595383 + 0.5192660093307495 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0589359998703003 + -0.1805370002985001 + 0.9511979818344116 + <_> + + <_> + + + + <_>6 2 12 15 -1. + <_>10 2 4 15 3. + 0 + 0.1495700031518936 + 0.1678529977798462 + -1.1591869592666626 + <_> + + <_> + + + + <_>2 3 18 3 -1. + <_>2 4 18 1 3. + 0 + 6.9399998756125569e-004 + 0.2049140036106110 + -0.3311820030212402 + <_> + + <_> + + + + <_>19 4 4 18 -1. + <_>21 4 2 9 2. + <_>19 13 2 9 2. + 0 + -0.0333690010011196 + 0.9346809983253479 + -2.9639999847859144e-003 + <_> + + <_> + + + + <_>0 1 19 3 -1. + <_>0 2 19 1 3. + 0 + 9.3759996816515923e-003 + 3.7000000011175871e-003 + -0.7754979729652405 + <_> + + <_> + + + + <_>5 0 15 4 -1. + <_>5 2 15 2 2. + 0 + 0.0431939996778965 + -2.2040000185370445e-003 + 0.7458969950675964 + <_> + + <_> + + + + <_>5 2 14 5 -1. + <_>12 2 7 5 2. + 0 + -0.0675550028681755 + 0.7229210138320923 + -0.1840420067310333 + <_> + + <_> + + + + <_>1 2 22 14 -1. + <_>1 2 11 14 2. + 0 + -0.3116860091686249 + 1.0014270544052124 + 0.0340030007064343 + <_> + + <_> + + + + <_>8 15 6 9 -1. + <_>10 15 2 9 3. + 0 + 0.0297439992427826 + -0.0463560000061989 + -1.2781809568405151 + <_> + + <_> + + + + <_>6 17 18 3 -1. + <_>6 18 18 1 3. + 0 + 0.0107370000332594 + 0.0148120000958443 + 0.6664999723434448 + <_> + + <_> + + + + <_>9 6 3 18 -1. + <_>9 12 3 6 3. + 0 + -0.0288410000503063 + -0.9422259926795960 + -0.0207969993352890 + <_> + + <_> + + + + <_>2 0 20 3 -1. + <_>2 1 20 1 3. + 0 + -5.7649998925626278e-003 + -0.4354189932346344 + 0.2338600009679794 + <_> + + <_> + + + + <_>5 4 5 12 -1. + <_>5 8 5 4 3. + 0 + 0.0284109991043806 + -0.1761579960584641 + 0.8576530218124390 + <_> + + <_> + + + + <_>8 6 12 5 -1. + <_>12 6 4 5 3. + 0 + -0.0290079992264509 + 0.5797809958457947 + 0.0285659991204739 + <_> + + <_> + + + + <_>9 12 6 12 -1. + <_>9 12 3 6 2. + <_>12 18 3 6 2. + 0 + 0.0249659996479750 + -0.0227290000766516 + -0.9677309989929199 + <_> + + <_> + + + + <_>14 14 8 10 -1. + <_>18 14 4 5 2. + <_>14 19 4 5 2. + 0 + 0.0120360003784299 + -0.1421470046043396 + 0.5168799757957459 + <_> + + <_> + + + + <_>2 14 8 10 -1. + <_>2 14 4 5 2. + <_>6 19 4 5 2. + 0 + -0.0425140000879765 + 0.9727380275726318 + -0.1811980009078980 + <_> + + <_> + + + + <_>10 18 12 6 -1. + <_>16 18 6 3 2. + <_>10 21 6 3 2. + 0 + 0.0102760000154376 + -0.0830999985337257 + 0.3176279962062836 + <_> + + <_> + + + + <_>1 3 6 9 -1. + <_>1 6 6 3 3. + 0 + -0.0691919997334480 + -2.0668580532073975 + -0.0601739995181561 + <_> + + <_> + + + + <_>11 3 3 20 -1. + <_>12 3 1 20 3. + 0 + -4.6769999898970127e-003 + 0.4413180053234100 + 0.0232090000063181 + <_> + + <_> + + + + <_>4 6 14 6 -1. + <_>4 6 7 3 2. + <_>11 9 7 3 2. + 0 + -0.0139239998534322 + 0.2860670089721680 + -0.2915270030498505 + <_> + + <_> + + + + <_>6 5 12 13 -1. + <_>10 5 4 13 3. + 0 + -0.0153339998796582 + -0.5741450190544128 + 0.2306330054998398 + <_> + + <_> + + + + <_>5 4 4 15 -1. + <_>5 9 4 5 3. + 0 + -0.0102390004321933 + 0.3447920083999634 + -0.2608039975166321 + <_> + + <_> + + + + <_>9 16 15 4 -1. + <_>14 16 5 4 3. + 0 + -0.0509889982640743 + 0.5615410208702087 + 0.0612189993262291 + <_> + + <_> + + + + <_>7 8 6 14 -1. + <_>7 8 3 7 2. + <_>10 15 3 7 2. + 0 + 0.0306899994611740 + -0.1477279961109161 + 1.6378489732742310 + <_> + + <_> + + + + <_>7 6 10 6 -1. + <_>7 8 10 2 3. + 0 + -0.0112239997833967 + 0.2400619983673096 + -0.4486489892005920 + <_> + + <_> + + + + <_>2 5 18 3 -1. + <_>2 6 18 1 3. + 0 + -6.2899999320507050e-003 + 0.4311949908733368 + -0.2380899935960770 + <_> + + <_> + + + + <_>5 1 15 8 -1. + <_>5 5 15 4 2. + 0 + 0.0785909965634346 + 0.0198650006204844 + 0.8085380196571350 + <_> + + <_> + + + + <_>7 1 8 18 -1. + <_>7 10 8 9 2. + 0 + -0.0101789999753237 + 0.1819320023059845 + -0.3287779986858368 + <_> + + <_> + + + + <_>0 10 24 3 -1. + <_>0 11 24 1 3. + 0 + 0.0312270000576973 + 0.1497389972209930 + -1.4180339574813843 + <_> + + <_> + + + + <_>0 2 6 13 -1. + <_>2 2 2 13 3. + 0 + 0.0401969999074936 + -0.1976049989461899 + 0.5850819945335388 + <_> + + <_> + + + + <_>16 0 8 10 -1. + <_>20 0 4 5 2. + <_>16 5 4 5 2. + 0 + 0.0161380004137754 + 5.0000002374872565e-004 + 0.3905000090599060 + <_> + + <_> + + + + <_>5 1 10 9 -1. + <_>5 4 10 3 3. + 0 + -0.0455190017819405 + 1.2646820545196533 + -0.1563259959220886 + <_> + + <_> + + + + <_>5 6 18 3 -1. + <_>5 7 18 1 3. + 0 + -0.0181300006806850 + 0.6514850258827210 + 0.0102359997108579 + <_> + + <_> + + + + <_>0 1 24 3 -1. + <_>0 2 24 1 3. + 0 + -0.0140019999817014 + -1.0344820022583008 + -0.0321829989552498 + <_> + + <_> + + + + <_>11 4 6 11 -1. + <_>13 4 2 11 3. + 0 + -0.0388160012662411 + -0.4787429869174957 + 0.1629070043563843 + <_> + + <_> + + + + <_>0 0 8 10 -1. + <_>0 0 4 5 2. + <_>4 5 4 5 2. + 0 + 0.0316560007631779 + -0.2098339945077896 + 0.5457590222358704 + <_> + + <_> + + + + <_>4 16 18 3 -1. + <_>4 17 18 1 3. + 0 + -0.0108399996533990 + 0.5189880132675171 + -0.0150800002738833 + <_> + + <_> + + + + <_>2 16 18 3 -1. + <_>2 17 18 1 3. + 0 + 0.0120329996570945 + -0.2110760062932968 + 0.7593700289726257 + <_> + + <_> + + + + <_>3 0 18 10 -1. + <_>12 0 9 5 2. + <_>3 5 9 5 2. + 0 + 0.0707729980349541 + 0.1804880052804947 + -0.7404850125312805 + <_> + + <_> + + + + <_>2 3 20 21 -1. + <_>12 3 10 21 2. + 0 + 0.5313979983329773 + -0.1449169963598251 + 1.5360039472579956 + <_> + + <_> + + + + <_>6 7 14 3 -1. + <_>6 7 7 3 2. + 0 + -0.0147740002721548 + -0.2815369963645935 + 0.2040729969739914 + <_> + + <_> + + + + <_>0 9 12 6 -1. + <_>0 9 6 3 2. + <_>6 12 6 3 2. + 0 + -2.2410000674426556e-003 + -0.4487630128860474 + 0.0539890006184578 + <_> + + <_> + + + + <_>3 14 21 4 -1. + <_>10 14 7 4 3. + 0 + 0.0499680005013943 + 0.0415140017867088 + 0.2941710054874420 + <_> + + <_> + + + + <_>0 14 21 4 -1. + <_>7 14 7 4 3. + 0 + -0.0477019995450974 + 0.3967429995536804 + -0.2830179929733276 + <_> + + <_> + + + + <_>5 21 18 3 -1. + <_>11 21 6 3 3. + 0 + -0.0913110002875328 + 2.1994259357452393 + 0.0879649966955185 + <_> + + <_> + + + + <_>1 21 18 3 -1. + <_>7 21 6 3 3. + 0 + 0.0380700007081032 + -0.2802560031414032 + 0.2515619993209839 + <_> + + <_> + + + + <_>19 4 4 18 -1. + <_>21 4 2 9 2. + <_>19 13 2 9 2. + 0 + -0.0155389998108149 + 0.3415749967098236 + 0.0179249998182058 + <_> + + <_> + + + + <_>3 7 18 3 -1. + <_>3 8 18 1 3. + 0 + -0.0154459998011589 + 0.2868019938468933 + -0.2513589859008789 + <_> + + <_> + + + + <_>19 4 4 18 -1. + <_>21 4 2 9 2. + <_>19 13 2 9 2. + 0 + -0.0573880001902580 + 0.6383000016212463 + 0.0885979980230331 + <_> + + <_> + + + + <_>7 15 10 6 -1. + <_>7 17 10 2 3. + 0 + -5.9440000914037228e-003 + 0.0790169984102249 + -0.4077489972114563 + <_> + + <_> + + + + <_>9 13 11 9 -1. + <_>9 16 11 3 3. + 0 + -0.0699689984321594 + -0.4464420080184937 + 0.1721960008144379 + <_> + + <_> + + + + <_>0 6 4 10 -1. + <_>0 11 4 5 2. + 0 + -0.0250649992376566 + -0.9827020168304443 + -0.0353880003094673 + <_> + + <_> + + + + <_>15 16 9 6 -1. + <_>15 18 9 2 3. + 0 + 0.0172160007059574 + 0.2270590066909790 + -0.8055009841918945 + <_> + + <_> + + + + <_>1 5 4 18 -1. + <_>1 5 2 9 2. + <_>3 14 2 9 2. + 0 + -0.0442790016531944 + 0.8395199775695801 + -0.1742960065603256 + <_> + + <_> + + + + <_>9 8 8 10 -1. + <_>13 8 4 5 2. + <_>9 13 4 5 2. + 0 + 0.0439889989793301 + 0.1155719980597496 + -1.9666889905929565 + <_> + + <_> + + + + <_>7 8 8 10 -1. + <_>7 8 4 5 2. + <_>11 13 4 5 2. + 0 + 0.0159070007503033 + -0.0375760011374950 + -1.0311100482940674 + <_> + + <_> + + + + <_>9 8 12 5 -1. + <_>13 8 4 5 3. + 0 + -0.0927549973130226 + -1.3530019521713257 + 0.1214129999279976 + <_> + + <_> + + + + <_>7 8 9 7 -1. + <_>10 8 3 7 3. + 0 + 0.0710370019078255 + -0.1768430024385452 + 0.7448520064353943 + <_> + + <_> + + + + <_>9 8 12 5 -1. + <_>13 8 4 5 3. + 0 + 0.0577620007097721 + 0.1283559948205948 + -0.4444420039653778 + <_> + + <_> + + + + <_>7 6 9 7 -1. + <_>10 6 3 7 3. + 0 + -0.0164320003241301 + 0.8015270233154297 + -0.1749169975519180 + <_> + + <_> + + + + <_>9 8 12 5 -1. + <_>13 8 4 5 3. + 0 + 0.0239390004426241 + 0.1614499986171722 + -0.1236450001597405 + <_> + + <_> + + + + <_>10 5 4 18 -1. + <_>10 11 4 6 3. + 0 + 0.0126360002905130 + 0.1541199982166290 + -0.3329379856586456 + <_> + + <_> + + + + <_>5 5 14 12 -1. + <_>5 11 14 6 2. + 0 + -0.0543479993939400 + -1.8400700092315674 + 0.1483599990606308 + <_> + + <_> + + + + <_>0 1 11 4 -1. + <_>0 3 11 2 2. + 0 + -0.0132619999349117 + -0.8083879947662354 + -0.0277260001748800 + <_> + + <_> + + + + <_>9 10 6 10 -1. + <_>11 10 2 10 3. + 0 + 6.1340001411736012e-003 + -0.1378500014543533 + 0.3285849988460541 + <_> + + <_> + + + + <_>2 17 11 6 -1. + <_>2 19 11 2 3. + 0 + 0.0289910007268190 + -0.0255169998854399 + -0.8338720202445984 + <_> + + <_> + + + + <_>15 16 9 6 -1. + <_>15 18 9 2 3. + 0 + -0.0219860002398491 + -0.7373999953269959 + 0.1788710057735443 + <_> + + <_> + + + + <_>1 10 18 2 -1. + <_>1 11 18 1 2. + 0 + 5.3269998170435429e-003 + -0.4544929862022400 + 0.0687910020351410 + <_> + + <_> + + + + <_>6 4 12 13 -1. + <_>10 4 4 13 3. + 0 + 0.0860479995608330 + 0.2100850045681000 + -0.3780890107154846 + <_> + + <_> + + + + <_>0 18 18 3 -1. + <_>0 19 18 1 3. + 0 + -8.5549997165799141e-003 + 0.4013499915599823 + -0.2107409983873367 + <_> + + <_> + + + + <_>6 18 18 3 -1. + <_>6 19 18 1 3. + 0 + 6.7790001630783081e-003 + -0.0216489993035793 + 0.4542149901390076 + <_> + + <_> + + + + <_>0 16 9 6 -1. + <_>0 18 9 2 3. + 0 + -6.3959998078644276e-003 + -0.4981859922409058 + 0.0759079977869987 + <_> + + <_> + + + + <_>13 15 9 6 -1. + <_>13 17 9 2 3. + 0 + 8.9469999074935913e-003 + 0.1785770058631897 + -0.2845489978790283 + <_> + + <_> + + + + <_>2 15 9 6 -1. + <_>2 17 9 2 3. + 0 + 3.2589999027550220e-003 + 0.0466249994933605 + -0.5520629882812500 + <_> + + <_> + + + + <_>13 1 6 16 -1. + <_>13 1 3 16 2. + 0 + 0.0414769984781742 + 0.1755049973726273 + -0.2070399969816208 + <_> + + <_> + + + + <_>5 1 6 16 -1. + <_>8 1 3 16 2. + 0 + -6.7449999041855335e-003 + -0.4639259874820709 + 0.0693039968609810 + <_> + + <_> + + + + <_>11 5 6 10 -1. + <_>13 5 2 10 3. + 0 + 0.0305649992078543 + 0.0517349988222122 + 0.7555050253868103 + <_> + + <_> + + + + <_>7 5 6 10 -1. + <_>9 5 2 10 3. + 0 + -7.4780001305043697e-003 + 0.1489389985799789 + -0.3190680146217346 + <_> + + <_> + + + + <_>10 0 6 24 -1. + <_>12 0 2 24 3. + 0 + 0.0890889987349510 + 0.1373880058526993 + -1.1379710435867310 + <_> + + <_> + + + + <_>3 4 4 20 -1. + <_>3 4 2 10 2. + <_>5 14 2 10 2. + 0 + 7.3230001144111156e-003 + -0.2882919907569885 + 0.1908860057592392 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>16 0 2 9 3. + 0 + -0.0182050000876188 + -0.3017860054969788 + 0.1679580062627792 + <_> + + <_> + + + + <_>4 0 6 9 -1. + <_>6 0 2 9 3. + 0 + -0.0258280001580715 + -0.9813799858093262 + -0.0198609996587038 + <_> + + <_> + + + + <_>4 5 18 5 -1. + <_>10 5 6 5 3. + 0 + 0.1093619987368584 + 0.0487900003790855 + 0.5311830043792725 + <_> + + <_> + + + + <_>5 6 6 9 -1. + <_>7 6 2 9 3. + 0 + -0.0114249996840954 + 0.2370599955320358 + -0.2792530059814453 + <_> + + <_> + + + + <_>7 2 15 8 -1. + <_>12 2 5 8 3. + 0 + -0.0575659982860088 + 0.4725539982318878 + 0.0651710033416748 + <_> + + <_> + + + + <_>2 2 15 8 -1. + <_>7 2 5 8 3. + 0 + 0.1027830019593239 + -0.2076510041952133 + 0.5094770193099976 + <_> + + <_> + + + + <_>10 0 4 9 -1. + <_>10 0 2 9 2. + 0 + 0.0270419996231794 + 0.1642120033502579 + -1.4508620500564575 + <_> + + <_> + + + + <_>3 4 6 12 -1. + <_>3 4 3 6 2. + <_>6 10 3 6 2. + 0 + -0.0136350002139807 + -0.5654389858245850 + 0.0237889997661114 + <_> + + <_> + + + + <_>16 0 8 18 -1. + <_>16 0 4 18 2. + 0 + -0.3215819895267487 + -3.5602829456329346 + 0.1180130019783974 + <_> + + <_> + + + + <_>0 0 8 18 -1. + <_>4 0 4 18 2. + 0 + 0.2045810073614121 + -0.0370160005986691 + -1.0225499868392944 + <_> + + <_> + + + + <_>0 7 24 6 -1. + <_>0 9 24 2 3. + 0 + -0.0703470036387444 + -0.5649189949035645 + 0.1852519959211350 + <_> + + <_> + + + + <_>4 7 14 3 -1. + <_>11 7 7 3 2. + 0 + 0.0378310009837151 + -0.0299019999802113 + -0.8292149901390076 + <_> + + <_> + + + + <_>10 8 8 15 -1. + <_>10 8 4 15 2. + 0 + -0.0702980011701584 + -0.5317230224609375 + 0.1443019956350327 + <_> + + <_> + + + + <_>7 0 10 14 -1. + <_>12 0 5 14 2. + 0 + 0.0632210001349449 + -0.2204120010137558 + 0.4795219898223877 + <_> + + <_> + + + + <_>13 10 8 10 -1. + <_>17 10 4 5 2. + <_>13 15 4 5 2. + 0 + 0.0363930016756058 + 0.1422269940376282 + -0.6119390130043030 + <_> + + <_> + + + + <_>3 0 4 9 -1. + <_>5 0 2 9 2. + 0 + 4.0099998004734516e-003 + -0.3456079959869385 + 0.1173869967460632 + <_> + + <_> + + + + <_>16 1 6 8 -1. + <_>16 1 3 8 2. + 0 + -0.0491060018539429 + 0.9598410129547119 + 0.0649349987506866 + <_> + + <_> + + + + <_>2 1 6 8 -1. + <_>5 1 3 8 2. + 0 + -0.0715830028057098 + 1.7385669946670532 + -0.1425289958715439 + <_> + + <_> + + + + <_>3 6 18 12 -1. + <_>3 10 18 4 3. + 0 + -0.0380089990794659 + 1.3872820138931274 + 0.0661880001425743 + <_> + + <_> + + + + <_>4 12 16 4 -1. + <_>4 14 16 2 2. + 0 + -3.1570000573992729e-003 + 0.0536770001053810 + -0.5404800176620483 + <_> + + <_> + + + + <_>4 9 16 15 -1. + <_>4 14 16 5 3. + 0 + 0.0194589998573065 + -0.0936200022697449 + 0.3913100063800812 + <_> + + <_> + + + + <_>3 10 8 10 -1. + <_>3 10 4 5 2. + <_>7 15 4 5 2. + 0 + 0.0112939998507500 + 0.0372239984571934 + -0.5425180196762085 + <_> + + <_> + + + + <_>8 18 16 6 -1. + <_>16 18 8 3 2. + <_>8 21 8 3 2. + 0 + -0.0334950014948845 + 0.9530789852142334 + 0.0376969985663891 + <_> + + <_> + + + + <_>2 16 12 5 -1. + <_>6 16 4 5 3. + 0 + 0.0920350030064583 + -0.1348839998245239 + 2.2897069454193115 + <_> + + <_> + + + + <_>14 14 9 4 -1. + <_>14 16 9 2 2. + 0 + 3.7529999390244484e-003 + 0.2282419949769974 + -0.5998370051383972 + <_> + + <_> + + + + <_>7 14 9 6 -1. + <_>7 16 9 2 3. + 0 + 0.0128480000421405 + -0.2200520038604736 + 0.3722189962863922 + <_> + + <_> + + + + <_>4 10 16 12 -1. + <_>4 14 16 4 3. + 0 + -0.1431619971990585 + 1.2855789661407471 + 0.0472370013594627 + <_> + + <_> + + + + <_>0 13 19 6 -1. + <_>0 15 19 2 3. + 0 + -0.0968799963593483 + -3.9550929069519043 + -0.0729039981961250 + <_> + + <_> + + + + <_>10 13 9 6 -1. + <_>10 15 9 2 3. + 0 + -8.8459998369216919e-003 + 0.3767499923706055 + -0.0464840009808540 + <_> + + <_> + + + + <_>5 0 3 23 -1. + <_>6 0 1 23 3. + 0 + 0.0159000009298325 + -0.0244570001959801 + -0.8003479838371277 + <_> + + <_> + + + + <_>0 8 24 6 -1. + <_>0 10 24 2 3. + 0 + 0.0703720003366470 + 0.1701900064945221 + -0.6306899785995483 + <_> + + <_> + + + + <_>0 5 5 12 -1. + <_>0 9 5 4 3. + 0 + -0.0379539988934994 + -0.9366719722747803 + -0.0412140004336834 + <_> + + <_> + + + + <_>3 0 19 18 -1. + <_>3 9 19 9 2. + 0 + 0.5159789919853210 + 0.1308059990406036 + -1.5802290439605713 + <_> + + <_> + + + + <_>9 11 6 12 -1. + <_>9 11 3 6 2. + <_>12 17 3 6 2. + 0 + -0.0328430011868477 + -1.1441620588302612 + -0.0491739995777607 + <_> + + <_> + + + + <_>0 5 24 8 -1. + <_>12 5 12 4 2. + <_>0 9 12 4 2. + 0 + -0.0363570004701614 + 0.4960640072822571 + -0.0344589985907078 + <_> + + <_> + + + + <_>6 18 9 4 -1. + <_>6 20 9 2 2. + 0 + 6.8080001510679722e-003 + -0.3099780082702637 + 0.1705480068922043 + <_> + + <_> + + + + <_>8 8 10 6 -1. + <_>8 10 10 2 3. + 0 + -0.0161140002310276 + -0.3790459930896759 + 0.1607899963855743 + <_> + + <_> + + + + <_>2 7 20 3 -1. + <_>2 8 20 1 3. + 0 + 8.4530003368854523e-003 + -0.1865549981594086 + 0.5636770129203796 + <_> + + <_> + + + + <_>12 0 7 20 -1. + <_>12 10 7 10 2. + 0 + -0.1375239938497543 + -0.5898990035057068 + 0.1174950003623962 + <_> + + <_> + + + + <_>5 0 7 20 -1. + <_>5 10 7 10 2. + 0 + 0.1768800020217896 + -0.1542489975690842 + 0.9291110038757324 + <_> + + <_> + + + + <_>14 2 2 18 -1. + <_>14 11 2 9 2. + 0 + 7.9309996217489243e-003 + 0.3219070136547089 + -0.1639260053634644 + <_> + + <_> + + + + <_>5 8 10 12 -1. + <_>10 8 5 12 2. + 0 + 0.1097180023789406 + -0.1587650030851364 + 1.0186259746551514 + <_> + + <_> + + + + <_>6 9 12 8 -1. + <_>12 9 6 4 2. + <_>6 13 6 4 2. + 0 + -0.0302930008620024 + 0.7558730244636536 + 0.0317949987947941 + <_> + + <_> + + + + <_>7 7 3 14 -1. + <_>7 14 3 7 2. + 0 + -0.0231180004775524 + -0.8845149874687195 + -9.5039997249841690e-003 + <_> + + <_> + + + + <_>11 2 12 16 -1. + <_>17 2 6 8 2. + <_>11 10 6 8 2. + 0 + -3.0900000128895044e-003 + 0.2383829951286316 + -0.1160620003938675 + <_> + + <_> + + + + <_>7 0 6 9 -1. + <_>9 0 2 9 3. + 0 + -0.0333920009434223 + -1.8738139867782593 + -0.0685029998421669 + <_> + + <_> + + + + <_>13 14 9 4 -1. + <_>13 16 9 2 2. + 0 + 0.0131900003179908 + 0.1291989982128143 + -0.6751220226287842 + <_> + + <_> + + + + <_>0 12 22 4 -1. + <_>0 12 11 2 2. + <_>11 14 11 2 2. + 0 + 0.0146610001102090 + -0.0248290002346039 + -0.7439680099487305 + <_> + + <_> + + + + <_>1 12 22 6 -1. + <_>12 12 11 3 2. + <_>1 15 11 3 2. + 0 + -0.0132480002939701 + 0.4682019948959351 + -0.0241650007665157 + <_> + + <_> + + + + <_>6 6 9 6 -1. + <_>9 6 3 6 3. + 0 + -0.0162189994007349 + 0.4008379876613617 + -0.2125570029020309 + <_> + + <_> + + + + <_>10 0 4 9 -1. + <_>10 0 2 9 2. + 0 + -0.0290520004928112 + -1.5650019645690918 + 0.1437589973211289 + <_> + + <_> + + + + <_>3 8 18 7 -1. + <_>9 8 6 7 3. + 0 + -0.1015319973230362 + -1.9220689535140991 + -0.0695599988102913 + <_> + + <_> + + + + <_>0 6 24 6 -1. + <_>0 8 24 2 3. + 0 + 0.0377539992332459 + 0.1339679956436157 + -2.2639141082763672 + <_> + + <_> + + + + <_>0 11 24 10 -1. + <_>8 11 8 10 3. + 0 + -0.2855559885501862 + 1.0215270519256592 + -0.1523219943046570 + <_> + + <_> + + + + <_>3 3 18 21 -1. + <_>9 3 6 21 3. + 0 + 0.1536069959402084 + -0.0974090024828911 + 0.4166240096092224 + <_> + + <_> + + + + <_>7 12 4 10 -1. + <_>9 12 2 10 2. + 0 + -2.1199999901000410e-004 + 0.1127189993858337 + -0.4165399968624115 + <_> + + <_> + + + + <_>10 16 10 8 -1. + <_>15 16 5 4 2. + <_>10 20 5 4 2. + 0 + -0.0205979999154806 + 0.6054049730300903 + 0.0624679997563362 + <_> + + <_> + + + + <_>8 6 6 9 -1. + <_>10 6 2 9 3. + 0 + 0.0373539999127388 + -0.1891900002956390 + 0.4646469950675964 + <_> + + <_> + + + + <_>12 10 6 12 -1. + <_>15 10 3 6 2. + <_>12 16 3 6 2. + 0 + 0.0572750009596348 + 0.1156530007719994 + -1.3213009834289551 + <_> + + <_> + + + + <_>6 10 6 12 -1. + <_>6 10 3 6 2. + <_>9 16 3 6 2. + 0 + 5.1029999740421772e-003 + -0.2806150019168854 + 0.1931339949369431 + <_> + + <_> + + + + <_>16 12 6 12 -1. + <_>19 12 3 6 2. + <_>16 18 3 6 2. + 0 + -0.0546449981629848 + 0.7242850065231323 + 0.0754479989409447 + <_> + + <_> + + + + <_>2 12 6 12 -1. + <_>2 12 3 6 2. + <_>5 18 3 6 2. + 0 + 0.0253490004688501 + -0.1948180049657822 + 0.4603280127048492 + <_> + + <_> + + + + <_>10 15 6 9 -1. + <_>12 15 2 9 3. + 0 + 0.0243110004812479 + 0.1556410044431686 + -0.4991390109062195 + <_> + + <_> + + + + <_>8 15 6 9 -1. + <_>10 15 2 9 3. + 0 + 0.0359620004892349 + -0.0585730001330376 + -1.5418399572372437 + <_> + + <_> + + + + <_>14 20 10 4 -1. + <_>14 20 5 4 2. + 0 + -0.1000069975852966 + -1.6100039482116699 + 0.1145050004124641 + <_> + + <_> + + + + <_>0 20 10 4 -1. + <_>5 20 5 4 2. + 0 + 0.0844359993934631 + -0.0614069998264313 + -1.4673349857330322 + <_> + + <_> + + + + <_>11 17 9 6 -1. + <_>11 19 9 2 3. + 0 + 0.0159479994326830 + 0.1628790050745010 + -0.1102640032768250 + <_> + + <_> + + + + <_>3 2 14 4 -1. + <_>3 4 14 2 2. + 0 + 0.0338240005075932 + -0.1793269962072372 + 0.5721840262413025 + <_> + + <_> + + + + <_>10 1 10 4 -1. + <_>10 3 10 2 2. + 0 + -0.0619960017502308 + 4.6511812210083008 + 0.0945340022444725 + <_> + + <_> + + + + <_>0 15 10 4 -1. + <_>5 15 5 4 2. + 0 + 0.0698769986629486 + -0.1698590070009232 + 0.8702899813652039 + <_> + + <_> + + + + <_>19 2 3 19 -1. + <_>20 2 1 19 3. + 0 + -0.0279169995337725 + 0.9104250073432922 + 0.0568270012736321 + <_> + + <_> + + + + <_>4 12 9 8 -1. + <_>7 12 3 8 3. + 0 + -0.0127640003338456 + 0.2206670045852661 + -0.2776910066604614 + -3.3196411132812500 + 20 + -1 + <_> + + + <_> + + <_> + + + + <_>4 7 5 12 -1. + <_>4 11 5 4 3. + 0 + 0.0216620005667210 + -0.8986889719963074 + 0.2943629920482636 + <_> + + <_> + + + + <_>0 1 24 3 -1. + <_>8 1 8 3 3. + 0 + 0.1004450023174286 + -0.3765920102596283 + 0.6089100241661072 + <_> + + <_> + + + + <_>6 8 12 4 -1. + <_>6 10 12 2 2. + 0 + 0.0260039996355772 + -0.3812850117683411 + 0.3921740055084229 + <_> + + <_> + + + + <_>19 3 4 10 -1. + <_>19 3 2 10 2. + 0 + 0.0284410007297993 + -0.1818230003118515 + 0.5892720222473145 + <_> + + <_> + + + + <_>0 6 9 6 -1. + <_>3 6 3 6 3. + 0 + 0.0386120006442070 + -0.2239959985017777 + 0.6377999782562256 + <_> + + <_> + + + + <_>18 0 6 22 -1. + <_>20 0 2 22 3. + 0 + -0.0465949997305870 + 0.7081220149993897 + -0.1466619968414307 + <_> + + <_> + + + + <_>0 0 6 22 -1. + <_>2 0 2 22 3. + 0 + -0.0427919998764992 + 0.4768039882183075 + -0.2923319935798645 + <_> + + <_> + + + + <_>5 15 19 3 -1. + <_>5 16 19 1 3. + 0 + 3.7960000336170197e-003 + -0.1851029992103577 + 0.5262669920921326 + <_> + + <_> + + + + <_>10 7 4 15 -1. + <_>10 12 4 5 3. + 0 + 0.0423489995300770 + 0.0392449982464314 + -0.8919770121574402 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0195989999920130 + -0.2335840016603470 + 0.4414649903774262 + <_> + + <_> + + + + <_>0 21 18 3 -1. + <_>0 22 18 1 3. + 0 + 8.7400001939386129e-004 + -0.4606359899044037 + 0.1768960058689117 + <_> + + <_> + + + + <_>7 3 10 15 -1. + <_>7 8 10 5 3. + 0 + -4.3629999272525311e-003 + 0.3349319994449616 + -0.2989340126514435 + <_> + + <_> + + + + <_>1 7 18 3 -1. + <_>1 8 18 1 3. + 0 + 0.0169730000197887 + -0.1640869975090027 + 1.5993679761886597 + <_> + + <_> + + + + <_>8 2 9 6 -1. + <_>11 2 3 6 3. + 0 + 0.0360639989376068 + 0.2260169982910156 + -0.5318610072135925 + <_> + + <_> + + + + <_>0 10 24 14 -1. + <_>0 17 24 7 2. + 0 + -0.0708649978041649 + 0.1522050052881241 + -0.4191460013389587 + <_> + + <_> + + + + <_>13 9 8 10 -1. + <_>17 9 4 5 2. + <_>13 14 4 5 2. + 0 + -0.0630759969353676 + -1.4874019622802734 + 0.1295370012521744 + <_> + + <_> + + + + <_>10 5 4 9 -1. + <_>12 5 2 9 2. + 0 + 0.0296700000762939 + -0.1914590001106262 + 0.9818490147590637 + <_> + + <_> + + + + <_>13 9 8 10 -1. + <_>17 9 4 5 2. + <_>13 14 4 5 2. + 0 + 0.0378739982843399 + 0.1345950067043304 + -0.5631629824638367 + <_> + + <_> + + + + <_>7 11 10 10 -1. + <_>7 11 5 5 2. + <_>12 16 5 5 2. + 0 + -0.0332890003919601 + -1.0828030109405518 + -0.0115040000528097 + <_> + + <_> + + + + <_>4 13 18 4 -1. + <_>13 13 9 2 2. + <_>4 15 9 2 2. + 0 + -0.0316089987754822 + -0.5922449827194214 + 0.1339479982852936 + <_> + + <_> + + + + <_>0 0 19 2 -1. + <_>0 1 19 1 2. + 0 + 1.0740000288933516e-003 + -0.4918580055236816 + 0.0944460034370422 + <_> + + <_> + + + + <_>0 18 24 6 -1. + <_>8 18 8 6 3. + 0 + -0.0715560019016266 + 0.5971019864082336 + -0.0395530015230179 + <_> + + <_> + + + + <_>6 4 8 16 -1. + <_>6 12 8 8 2. + 0 + -0.0811700001358986 + -1.1817820072174072 + -0.0282540004700422 + <_> + + <_> + + + + <_>7 8 10 4 -1. + <_>7 10 10 2 2. + 0 + 4.4860001653432846e-003 + -0.6102809906005859 + 0.2261909991502762 + <_> + + <_> + + + + <_>0 3 6 9 -1. + <_>0 6 6 3 3. + 0 + -0.0421760007739067 + -1.1435619592666626 + -0.0290019996464252 + <_> + + <_> + + + + <_>13 15 7 9 -1. + <_>13 18 7 3 3. + 0 + -0.0656400024890900 + -1.6470279693603516 + 0.1281030029058456 + <_> + + <_> + + + + <_>3 18 12 6 -1. + <_>3 18 6 3 2. + <_>9 21 6 3 2. + 0 + 0.0181889999657869 + -0.3114939928054810 + 0.2573960125446320 + <_> + + <_> + + + + <_>12 14 6 9 -1. + <_>12 17 6 3 3. + 0 + -0.0515200011432171 + -0.6920689940452576 + 0.1527079939842224 + <_> + + <_> + + + + <_>2 15 15 8 -1. + <_>2 19 15 4 2. + 0 + -0.0471509993076324 + -0.7186830043792725 + 2.6879999786615372e-003 + <_> + + <_> + + + + <_>9 6 6 16 -1. + <_>9 14 6 8 2. + 0 + 0.0174889992922544 + 0.2237119972705841 + -0.5538179874420166 + <_> + + <_> + + + + <_>6 6 7 12 -1. + <_>6 10 7 4 3. + 0 + -0.0252640005201101 + 1.0319819450378418 + -0.1749649941921234 + <_> + + <_> + + + + <_>14 6 6 9 -1. + <_>14 9 6 3 3. + 0 + -0.0407450012862682 + 0.4496159851551056 + 0.0393490009009838 + <_> + + <_> + + + + <_>5 14 6 9 -1. + <_>5 17 6 3 3. + 0 + -0.0376669988036156 + -0.8547570109367371 + -0.0124639999121428 + <_> + + <_> + + + + <_>10 8 6 9 -1. + <_>12 8 2 9 3. + 0 + -0.0134110003709793 + 0.5784559845924377 + -0.0174679998308420 + <_> + + <_> + + + + <_>6 6 4 18 -1. + <_>6 6 2 9 2. + <_>8 15 2 9 2. + 0 + -7.8999997640494257e-005 + -0.3774920105934143 + 0.1396179944276810 + <_> + + <_> + + + + <_>14 9 6 12 -1. + <_>17 9 3 6 2. + <_>14 15 3 6 2. + 0 + -0.0114150000736117 + -0.2618660032749176 + 0.2371249943971634 + <_> + + <_> + + + + <_>4 9 6 12 -1. + <_>4 9 3 6 2. + <_>7 15 3 6 2. + 0 + 0.0372000001370907 + -0.0286260005086660 + -1.2945239543914795 + <_> + + <_> + + + + <_>14 15 9 6 -1. + <_>14 17 9 2 3. + 0 + 3.4050000831484795e-003 + 0.2053139954805374 + -0.1874749958515167 + <_> + + <_> + + + + <_>0 20 18 4 -1. + <_>0 20 9 2 2. + <_>9 22 9 2 2. + 0 + -0.0224830005317926 + 0.6702719926834106 + -0.1959400027990341 + <_> + + <_> + + + + <_>13 18 9 6 -1. + <_>13 20 9 2 3. + 0 + 0.0232749991118908 + 0.1740539968013763 + -0.3274630010128021 + <_> + + <_> + + + + <_>2 18 9 6 -1. + <_>2 20 9 2 3. + 0 + -0.0139170000329614 + -0.8395429849624634 + -6.3760001212358475e-003 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + 7.5429999269545078e-003 + -0.0341949984431267 + 0.5899819731712341 + <_> + + <_> + + + + <_>0 16 18 3 -1. + <_>0 17 18 1 3. + 0 + -0.0115390000864863 + 0.4214279949665070 + -0.2351049929857254 + <_> + + <_> + + + + <_>19 2 4 22 -1. + <_>21 2 2 11 2. + <_>19 13 2 11 2. + 0 + 0.0525019988417625 + 0.0693039968609810 + 0.7322649955749512 + <_> + + <_> + + + + <_>1 2 4 22 -1. + <_>1 2 2 11 2. + <_>3 13 2 11 2. + 0 + 0.0527159981429577 + -0.1568810045719147 + 1.0907289981842041 + <_> + + <_> + + + + <_>15 0 2 24 -1. + <_>15 0 1 24 2. + 0 + -0.0117260003462434 + -0.7093430161476135 + 0.1682880073785782 + <_> + + <_> + + + + <_>3 20 16 4 -1. + <_>11 20 8 4 2. + 0 + 0.0959459990262985 + -0.1619289964437485 + 1.0072519779205322 + <_> + + <_> + + + + <_>11 6 4 18 -1. + <_>13 6 2 9 2. + <_>11 15 2 9 2. + 0 + -0.0158719997853041 + 0.3900839984416962 + -0.0537770017981529 + <_> + + <_> + + + + <_>7 9 10 14 -1. + <_>7 9 5 7 2. + <_>12 16 5 7 2. + 0 + 0.0348180010914803 + 0.0171799995005131 + -0.9394180178642273 + <_> + + <_> + + + + <_>14 6 6 9 -1. + <_>14 9 6 3 3. + 0 + 0.0347919985651970 + 0.0504629984498024 + 0.5446569919586182 + <_> + + <_> + + + + <_>3 6 7 9 -1. + <_>3 9 7 3 3. + 0 + 0.0162840001285076 + -0.2698130011558533 + 0.4036529958248138 + <_> + + <_> + + + + <_>20 4 4 20 -1. + <_>22 4 2 10 2. + <_>20 14 2 10 2. + 0 + -0.0443190000951290 + 0.8439999818801880 + 0.0328829996287823 + <_> + + <_> + + + + <_>7 6 6 9 -1. + <_>7 9 6 3 3. + 0 + -5.5689997971057892e-003 + 0.1530939936637878 + -0.3495979905128479 + <_> + + <_> + + + + <_>7 0 10 14 -1. + <_>12 0 5 7 2. + <_>7 7 5 7 2. + 0 + -0.0658420026302338 + -0.9271119832992554 + 0.1680099964141846 + <_> + + <_> + + + + <_>2 1 18 6 -1. + <_>11 1 9 6 2. + 0 + -0.0733370035886765 + 0.5161449909210205 + -0.2023600041866303 + <_> + + <_> + + + + <_>15 0 2 24 -1. + <_>15 0 1 24 2. + 0 + 0.0164500009268522 + 0.1395059973001480 + -0.4930129945278168 + <_> + + <_> + + + + <_>7 0 2 24 -1. + <_>8 0 1 24 2. + 0 + -9.2630004510283470e-003 + -0.9010199904441834 + -0.0161160007119179 + <_> + + <_> + + + + <_>13 12 6 7 -1. + <_>13 12 3 7 2. + 0 + 5.9139998629689217e-003 + 0.1985819935798645 + -0.1673129945993424 + <_> + + <_> + + + + <_>5 12 6 7 -1. + <_>8 12 3 7 2. + 0 + -8.4699998842552304e-004 + 0.0940050035715103 + -0.4157089889049530 + <_> + + <_> + + + + <_>3 5 18 19 -1. + <_>9 5 6 19 3. + 0 + 0.2053290009498596 + -0.0600220002233982 + 0.7099360227584839 + <_> + + <_> + + + + <_>5 6 9 6 -1. + <_>8 6 3 6 3. + 0 + -0.0168830007314682 + 0.2439219951629639 + -0.3055180013179779 + <_> + + <_> + + + + <_>9 5 9 6 -1. + <_>12 5 3 6 3. + 0 + -0.0191110000014305 + 0.6122990250587463 + 0.0242529995739460 + <_> + + <_> + + + + <_>3 16 10 8 -1. + <_>3 16 5 4 2. + <_>8 20 5 4 2. + 0 + -0.0259629990905523 + 0.9076499938964844 + -0.1672209948301315 + <_> + + <_> + + + + <_>19 8 5 15 -1. + <_>19 13 5 5 3. + 0 + -0.0217620003968477 + -0.3138470053672791 + 0.2013459950685501 + <_> + + <_> + + + + <_>0 8 5 15 -1. + <_>0 13 5 5 3. + 0 + -0.0241199992597103 + -0.6658840179443359 + 7.4559999629855156e-003 + <_> + + <_> + + + + <_>20 4 4 20 -1. + <_>22 4 2 10 2. + <_>20 14 2 10 2. + 0 + 0.0471299998462200 + 0.0595339983701706 + 0.8780450224876404 + <_> + + <_> + + + + <_>0 4 4 20 -1. + <_>0 4 2 10 2. + <_>2 14 2 10 2. + 0 + -0.0459849983453751 + 0.8006799817085266 + -0.1725230067968369 + <_> + + <_> + + + + <_>7 7 10 4 -1. + <_>7 7 5 4 2. + 0 + 0.0265079997479916 + 0.1877409964799881 + -0.6085060238838196 + <_> + + <_> + + + + <_>4 19 14 4 -1. + <_>11 19 7 4 2. + 0 + -0.0486150011420250 + 0.5864409804344177 + -0.1942770034074783 + <_> + + <_> + + + + <_>10 11 12 3 -1. + <_>10 11 6 3 2. + 0 + -0.0185620002448559 + -0.2558790147304535 + 0.1632619947195053 + <_> + + <_> + + + + <_>0 1 24 3 -1. + <_>0 2 24 1 3. + 0 + 0.0126780001446605 + -0.0142280003055930 + -0.7673810124397278 + <_> + + <_> + + + + <_>7 2 14 20 -1. + <_>14 2 7 10 2. + <_>7 12 7 10 2. + 0 + -1.1919999960809946e-003 + 0.2049500048160553 + -0.1140429973602295 + <_> + + <_> + + + + <_>0 13 6 9 -1. + <_>2 13 2 9 3. + 0 + -0.0490889996290207 + -1.0740849971771240 + -0.0389409996569157 + <_> + + <_> + + + + <_>13 0 4 19 -1. + <_>13 0 2 19 2. + 0 + -0.0174369998276234 + -0.5797380208969116 + 0.1858450025320053 + <_> + + <_> + + + + <_>1 11 14 3 -1. + <_>8 11 7 3 2. + 0 + -0.0147700002416968 + -0.6615030169487000 + 5.3119999356567860e-003 + <_> + + <_> + + + + <_>7 1 16 20 -1. + <_>15 1 8 10 2. + <_>7 11 8 10 2. + 0 + -0.2290520071983337 + -0.4830510020256043 + 0.1232639998197556 + <_> + + <_> + + + + <_>0 10 21 9 -1. + <_>7 10 7 9 3. + 0 + -0.1270709931850433 + 0.5745260119438171 + -0.1942040026187897 + <_> + + <_> + + + + <_>6 19 15 5 -1. + <_>11 19 5 5 3. + 0 + 0.0103390002623200 + -0.0546419993042946 + 0.2450180053710938 + <_> + + <_> + + + + <_>8 10 6 6 -1. + <_>11 10 3 6 2. + 0 + 6.9010001607239246e-003 + 0.1218060031533241 + -0.3879739940166473 + <_> + + <_> + + + + <_>7 1 16 20 -1. + <_>15 1 8 10 2. + <_>7 11 8 10 2. + 0 + 0.2902539968490601 + 0.1096619963645935 + -30. + <_> + + <_> + + + + <_>1 1 16 20 -1. + <_>1 1 8 10 2. + <_>9 11 8 10 2. + 0 + -0.2380499988794327 + -1.7352679967880249 + -0.0638099983334541 + <_> + + <_> + + + + <_>16 4 3 12 -1. + <_>16 10 3 6 2. + 0 + 0.0624810010194778 + 0.1352300047874451 + -0.7030109763145447 + <_> + + <_> + + + + <_>5 4 3 12 -1. + <_>5 10 3 6 2. + 0 + 4.7109997831285000e-003 + -0.4698410034179688 + 0.0603419989347458 + <_> + + <_> + + + + <_>7 6 10 8 -1. + <_>12 6 5 4 2. + <_>7 10 5 4 2. + 0 + -0.0278159994632006 + 0.6980760097503662 + 1.3719999697059393e-003 + <_> + + <_> + + + + <_>4 9 6 6 -1. + <_>4 12 6 3 2. + 0 + -0.0170200001448393 + 1.6870440244674683 + -0.1431480050086975 + <_> + + <_> + + + + <_>6 5 12 4 -1. + <_>6 7 12 2 2. + 0 + -0.0497549995779991 + 0.7949770092964172 + 7.7199999941512942e-004 + <_> + + <_> + + + + <_>9 2 5 15 -1. + <_>9 7 5 5 3. + 0 + -0.0747329965233803 + -1.0132360458374023 + -0.0193889997899532 + <_> + + <_> + + + + <_>15 0 9 6 -1. + <_>15 2 9 2 3. + 0 + 0.0320090018212795 + 0.1441210061311722 + -0.4213910102844238 + <_> + + <_> + + + + <_>6 0 11 10 -1. + <_>6 5 11 5 2. + 0 + -0.0944639965891838 + 0.5068259835243225 + -0.2047889977693558 + <_> + + <_> + + + + <_>12 7 4 12 -1. + <_>12 13 4 6 2. + 0 + -0.0154269998893142 + -0.1581130027770996 + 0.1780689954757690 + <_> + + <_> + + + + <_>7 2 9 4 -1. + <_>7 4 9 2 2. + 0 + -4.0540001355111599e-003 + -0.5436670184135437 + 0.0312350001186132 + <_> + + <_> + + + + <_>6 0 13 6 -1. + <_>6 2 13 2 3. + 0 + 3.0080000869929790e-003 + -0.1737679988145828 + 0.3044170141220093 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>10 6 2 9 2. + <_>12 15 2 9 2. + 0 + -0.0100919995456934 + 0.2510380148887634 + -0.2622410058975220 + <_> + + <_> + + + + <_>10 8 6 9 -1. + <_>12 8 2 9 3. + 0 + -0.0388180017471313 + 0.9322670102119446 + 0.0726599991321564 + <_> + + <_> + + + + <_>3 18 10 6 -1. + <_>3 20 10 2 3. + 0 + 0.0346519984304905 + -0.0339349992573261 + -0.8570790290832520 + <_> + + <_> + + + + <_>4 14 20 3 -1. + <_>4 15 20 1 3. + 0 + -4.6729999594390392e-003 + 0.3496930003166199 + -0.0485179983079433 + <_> + + <_> + + + + <_>2 15 9 6 -1. + <_>2 17 9 2 3. + 0 + 6.8499997723847628e-004 + 0.0665730014443398 + -0.4497379958629608 + <_> + + <_> + + + + <_>13 0 4 19 -1. + <_>13 0 2 19 2. + 0 + 0.0353170000016689 + 0.1427579969167709 + -0.4672639966011047 + <_> + + <_> + + + + <_>7 0 4 19 -1. + <_>9 0 2 19 2. + 0 + -0.0235699992626905 + -1.0286079645156860 + -0.0452880002558231 + <_> + + <_> + + + + <_>1 4 22 2 -1. + <_>1 5 22 1 2. + 0 + -1.9109999993816018e-003 + -0.1965219974517822 + 0.2866100072860718 + <_> + + <_> + + + + <_>0 0 9 6 -1. + <_>0 2 9 2 3. + 0 + -0.0166590008884668 + -0.7753220200538635 + -8.3280000835657120e-003 + <_> + + <_> + + + + <_>0 0 24 18 -1. + <_>0 9 24 9 2. + 0 + 0.6606220006942749 + 0.1323249936103821 + -3.5266680717468262 + <_> + + <_> + + + + <_>3 2 16 8 -1. + <_>3 6 16 4 2. + 0 + 0.1097059994935989 + -0.1554719954729080 + 1.4674140214920044 + <_> + + <_> + + + + <_>3 6 18 6 -1. + <_>3 8 18 2 3. + 0 + 0.0135009996592999 + 0.1523340046405792 + -1.3020930290222168 + <_> + + <_> + + + + <_>3 1 6 10 -1. + <_>5 1 2 10 3. + 0 + -0.0228719990700483 + -0.7132599949836731 + -8.7040001526474953e-003 + <_> + + <_> + + + + <_>13 0 9 6 -1. + <_>16 0 3 6 3. + 0 + -0.0818210020661354 + 1.1127580404281616 + 0.0832199975848198 + <_> + + <_> + + + + <_>2 0 9 6 -1. + <_>5 0 3 6 3. + 0 + -0.0527280010282993 + 0.9316509962081909 + -0.1710399985313416 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0252420008182526 + -0.1973379999399185 + 0.2535940110683441 + <_> + + <_> + + + + <_>6 0 7 10 -1. + <_>6 5 7 5 2. + 0 + -0.0438189990818501 + 0.4181520044803619 + -0.2458550035953522 + <_> + + <_> + + + + <_>2 2 20 4 -1. + <_>12 2 10 2 2. + <_>2 4 10 2 2. + 0 + -0.0181889999657869 + -0.5174319744110107 + 0.2017419934272766 + <_> + + <_> + + + + <_>2 11 19 3 -1. + <_>2 12 19 1 3. + 0 + 0.0234660003334284 + -0.0430710017681122 + -1.0636579990386963 + <_> + + <_> + + + + <_>10 8 6 9 -1. + <_>12 8 2 9 3. + 0 + 0.0342160016298294 + 0.0537809990346432 + 0.4970720112323761 + <_> + + <_> + + + + <_>8 8 6 9 -1. + <_>10 8 2 9 3. + 0 + 0.0256929993629456 + -0.2380010038614273 + 0.4165149927139282 + <_> + + <_> + + + + <_>13 8 4 9 -1. + <_>13 8 2 9 2. + 0 + -0.0265650004148483 + -0.8857480287551880 + 0.1336590051651001 + <_> + + <_> + + + + <_>3 11 9 9 -1. + <_>6 11 3 9 3. + 0 + 0.0609420016407967 + -0.2066970020532608 + 0.5830900073051453 + <_> + + <_> + + + + <_>3 9 18 5 -1. + <_>9 9 6 5 3. + 0 + 0.1447450071573257 + 0.1328230053186417 + -3.1449348926544189 + <_> + + <_> + + + + <_>2 4 2 20 -1. + <_>2 14 2 10 2. + 0 + 0.0534109994769096 + -0.1732520014047623 + 0.6919069886207581 + <_> + + <_> + + + + <_>14 17 8 6 -1. + <_>14 20 8 3 2. + 0 + 0.0114080002531409 + 0.0548220016062260 + 0.3024039864540100 + <_> + + <_> + + + + <_>3 21 18 2 -1. + <_>3 22 18 1 2. + 0 + -2.3179999552667141e-003 + 0.1582089960575104 + -0.3197320103645325 + <_> + + <_> + + + + <_>5 4 15 6 -1. + <_>10 4 5 6 3. + 0 + -0.0296950004994869 + 0.7127479910850525 + 0.0581360012292862 + <_> + + <_> + + + + <_>2 15 12 6 -1. + <_>2 17 12 2 3. + 0 + 0.0272499993443489 + -0.1575410068035126 + 0.9214379787445068 + <_> + + <_> + + + + <_>17 8 6 9 -1. + <_>17 11 6 3 3. + 0 + -3.6200000904500484e-003 + -0.3454839885234833 + 0.2022099941968918 + <_> + + <_> + + + + <_>2 12 20 4 -1. + <_>2 12 10 2 2. + <_>12 14 10 2 2. + 0 + -0.0125789996236563 + -0.5565029978752136 + 0.0203889999538660 + <_> + + <_> + + + + <_>0 17 24 6 -1. + <_>0 19 24 2 3. + 0 + -0.0888490006327629 + -3.6100010871887207 + 0.1316419988870621 + <_> + + <_> + + + + <_>7 16 9 4 -1. + <_>7 18 9 2 2. + 0 + -0.0192569997161627 + 0.5190899968147278 + -0.1928430050611496 + <_> + + <_> + + + + <_>15 1 4 22 -1. + <_>17 1 2 11 2. + <_>15 12 2 11 2. + 0 + -0.0166669990867376 + -0.0874999985098839 + 0.1581249982118607 + <_> + + <_> + + + + <_>5 1 4 22 -1. + <_>5 1 2 11 2. + <_>7 12 2 11 2. + 0 + 0.0129319997504354 + 0.0274059996008873 + -0.5512390136718750 + <_> + + <_> + + + + <_>11 13 8 9 -1. + <_>11 16 8 3 3. + 0 + -0.0134319998323917 + 0.2345779985189438 + -0.0432350002229214 + <_> + + <_> + + + + <_>6 1 6 9 -1. + <_>8 1 2 9 3. + 0 + 0.0188100002706051 + -0.0396809987723827 + -0.9437329769134522 + <_> + + <_> + + + + <_>11 4 3 18 -1. + <_>11 10 3 6 3. + 0 + -6.4349998719990253e-003 + 0.4570370018482208 + -4.0520001202821732e-003 + <_> + + <_> + + + + <_>5 8 12 6 -1. + <_>5 8 6 3 2. + <_>11 11 6 3 2. + 0 + -0.0242490004748106 + -0.7624800205230713 + -0.0198570005595684 + <_> + + <_> + + + + <_>15 7 5 8 -1. + <_>15 11 5 4 2. + 0 + -0.0296679995954037 + -3.7412509918212891 + 0.1125060021877289 + <_> + + <_> + + + + <_>4 7 5 8 -1. + <_>4 11 5 4 2. + 0 + 5.1150000654160976e-003 + -0.6378179788589478 + 0.0112239997833967 + <_> + + <_> + + + + <_>12 6 6 12 -1. + <_>15 6 3 6 2. + <_>12 12 3 6 2. + 0 + -5.7819997891783714e-003 + 0.1937440037727356 + -0.0820420011878014 + <_> + + <_> + + + + <_>6 6 6 12 -1. + <_>6 6 3 6 2. + <_>9 12 3 6 2. + 0 + 0.0166069995611906 + -0.1619209945201874 + 1.1334990262985229 + <_> + + <_> + + + + <_>5 9 14 8 -1. + <_>12 9 7 4 2. + <_>5 13 7 4 2. + 0 + 0.0382280014455318 + 0.0211050007492304 + 0.7626420259475708 + <_> + + <_> + + + + <_>9 1 3 14 -1. + <_>9 8 3 7 2. + 0 + -0.0570940002799034 + -1.6974929571151733 + -0.0597620010375977 + <_> + + <_> + + + + <_>12 6 6 12 -1. + <_>12 10 6 4 3. + 0 + -0.0538830012083054 + 1.1850190162658691 + 0.0909669995307922 + <_> + + <_> + + + + <_>4 5 4 18 -1. + <_>4 5 2 9 2. + <_>6 14 2 9 2. + 0 + -2.6110000908374786e-003 + -0.4094119966030121 + 0.0838209986686707 + <_> + + <_> + + + + <_>4 6 16 18 -1. + <_>4 12 16 6 3. + 0 + 0.2971439957618713 + 0.1552989929914475 + -1.0995409488677979 + <_> + + <_> + + + + <_>5 4 7 20 -1. + <_>5 14 7 10 2. + 0 + -0.0890630036592484 + 0.4894720017910004 + -0.2004120051860809 + <_> + + <_> + + + + <_>14 8 8 12 -1. + <_>14 14 8 6 2. + 0 + -0.0561930015683174 + -0.2458139955997467 + 0.1436550021171570 + <_> + + <_> + + + + <_>9 10 6 14 -1. + <_>9 10 3 7 2. + <_>12 17 3 7 2. + 0 + 0.0370049998164177 + -0.0481689982116222 + -1.2310709953308105 + <_> + + <_> + + + + <_>9 5 9 6 -1. + <_>12 5 3 6 3. + 0 + -8.4840003401041031e-003 + 0.4337260127067566 + 0.0137799996882677 + <_> + + <_> + + + + <_>9 4 3 18 -1. + <_>10 4 1 18 3. + 0 + -2.4379999376833439e-003 + 0.1894969940185547 + -0.3229419887065888 + <_> + + <_> + + + + <_>1 4 22 14 -1. + <_>12 4 11 7 2. + <_>1 11 11 7 2. + 0 + -0.0716399997472763 + -0.4397900104522705 + 0.2273019999265671 + <_> + + <_> + + + + <_>2 7 18 2 -1. + <_>2 8 18 1 2. + 0 + 5.2260002121329308e-003 + -0.2054840028285980 + 0.5093330144882202 + <_> + + <_> + + + + <_>12 6 6 12 -1. + <_>12 10 6 4 3. + 0 + -6.1360001564025879e-003 + 0.3115719854831696 + 0.0706809982657433 + <_> + + <_> + + + + <_>6 5 9 7 -1. + <_>9 5 3 7 3. + 0 + 0.0155950002372265 + -0.3093479871749878 + 0.1562770009040833 + <_> + + <_> + + + + <_>12 7 4 12 -1. + <_>12 13 4 6 2. + 0 + 0.0259959995746613 + 0.1382160037755966 + -0.1761659979820252 + <_> + + <_> + + + + <_>8 7 4 12 -1. + <_>8 13 4 6 2. + 0 + -0.0120850000530481 + -0.5107020139694214 + 0.0584409981966019 + <_> + + <_> + + + + <_>7 2 10 22 -1. + <_>7 13 10 11 2. + 0 + -0.0678360015153885 + 0.4775710105895996 + -0.0714460015296936 + <_> + + <_> + + + + <_>0 1 3 20 -1. + <_>1 1 1 20 3. + 0 + -0.0147150000557303 + 0.4523890018463135 + -0.1986140012741089 + <_> + + <_> + + + + <_>4 13 18 4 -1. + <_>13 13 9 2 2. + <_>4 15 9 2 2. + 0 + 0.0251189991831779 + 0.1295489966869354 + -0.8626639842987061 + <_> + + <_> + + + + <_>2 13 18 4 -1. + <_>2 13 9 2 2. + <_>11 15 9 2 2. + 0 + 0.0188260003924370 + -0.0415700003504753 + -1.1354700326919556 + <_> + + <_> + + + + <_>15 15 9 6 -1. + <_>15 17 9 2 3. + 0 + -0.0212639998644590 + -0.3473800122737885 + 0.1577949970960617 + <_> + + <_> + + + + <_>0 15 9 6 -1. + <_>0 17 9 2 3. + 0 + 9.4609996303915977e-003 + 4.8639997839927673e-003 + -0.6165480017662048 + <_> + + <_> + + + + <_>6 0 18 24 -1. + <_>15 0 9 12 2. + <_>6 12 9 12 2. + 0 + 0.2295770049095154 + 0.0813729986548424 + 0.6984140276908875 + <_> + + <_> + + + + <_>6 6 6 12 -1. + <_>6 10 6 4 3. + 0 + -0.0380619987845421 + 1.1616369485855103 + -0.1497669965028763 + <_> + + <_> + + + + <_>8 7 10 4 -1. + <_>8 9 10 2 2. + 0 + -0.0134849995374680 + -0.3203639984130859 + 0.1736509948968887 + <_> + + <_> + + + + <_>1 9 18 6 -1. + <_>1 9 9 3 2. + <_>10 12 9 3 2. + 0 + 0.0362389981746674 + -0.1815849989652634 + 0.6195669770240784 + <_> + + <_> + + + + <_>6 6 18 3 -1. + <_>6 7 18 1 3. + 0 + 6.7210001870989799e-003 + 7.9600000753998756e-004 + 0.4244140088558197 + <_> + + <_> + + + + <_>7 7 9 8 -1. + <_>10 7 3 8 3. + 0 + 0.0965259969234467 + -0.1469680070877075 + 1.2525680065155029 + <_> + + <_> + + + + <_>10 12 6 12 -1. + <_>12 12 2 12 3. + 0 + -0.0356569997966290 + -0.3978169858455658 + 0.1419139951467514 + <_> + + <_> + + + + <_>3 14 18 3 -1. + <_>3 15 18 1 3. + 0 + 0.0107720000669360 + -0.1819400042295456 + 0.5976219773292542 + <_> + + <_> + + + + <_>15 17 9 7 -1. + <_>18 17 3 7 3. + 0 + 0.0792799964547157 + 0.1464249938726425 + -0.7883689999580383 + <_> + + <_> + + + + <_>1 12 10 6 -1. + <_>1 14 10 2 3. + 0 + 0.0328410007059574 + -0.0624080002307892 + -1.4227490425109863 + <_> + + <_> + + + + <_>15 17 9 7 -1. + <_>18 17 3 7 3. + 0 + -0.0277810003608465 + 0.3403309881687164 + 0.0306700002402067 + <_> + + <_> + + + + <_>10 3 3 19 -1. + <_>11 3 1 19 3. + 0 + -4.0339999832212925e-003 + 0.3108470141887665 + -0.2259570062160492 + <_> + + <_> + + + + <_>15 17 9 7 -1. + <_>18 17 3 7 3. + 0 + 7.4260002002120018e-003 + -0.0389369986951351 + 0.3170210123062134 + <_> + + <_> + + + + <_>6 1 11 9 -1. + <_>6 4 11 3 3. + 0 + 0.1121399998664856 + -0.1757829934358597 + 0.6505659818649292 + <_> + + <_> + + + + <_>15 17 9 7 -1. + <_>18 17 3 7 3. + 0 + -0.1187810003757477 + -1.0092990398406982 + 0.1106970012187958 + <_> + + <_> + + + + <_>6 5 11 6 -1. + <_>6 8 11 3 2. + 0 + -0.0415849983692169 + -0.5380640029907227 + 0.0199050009250641 + <_> + + <_> + + + + <_>16 7 8 5 -1. + <_>16 7 4 5 2. + 0 + -0.0279660001397133 + 0.4814319908618927 + 0.0335909985005856 + <_> + + <_> + + + + <_>2 4 20 19 -1. + <_>12 4 10 19 2. + 0 + -0.1250640004873276 + 0.2635219991207123 + -0.2573789954185486 + <_> + + <_> + + + + <_>2 1 21 6 -1. + <_>9 1 7 6 3. + 0 + 0.2366690039634705 + 0.0365080013871193 + 0.9065560102462769 + <_> + + <_> + + + + <_>6 5 12 14 -1. + <_>6 5 6 7 2. + <_>12 12 6 7 2. + 0 + -0.0294759999960661 + -0.6004880070686340 + 9.5880003646016121e-003 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + 0.0377929992973804 + 0.1550620049238205 + -0.9573349952697754 + <_> + + <_> + + + + <_>2 11 8 5 -1. + <_>6 11 4 5 2. + 0 + 0.0720440000295639 + -0.1452589929103851 + 1.3676730394363403 + <_> + + <_> + + + + <_>16 7 8 5 -1. + <_>16 7 4 5 2. + 0 + 9.7759999334812164e-003 + 0.0129159996286035 + 0.2164089977741242 + <_> + + <_> + + + + <_>0 7 8 5 -1. + <_>4 7 4 5 2. + 0 + 0.0521540008485317 + -0.0163599997758865 + -0.8835629820823669 + <_> + + <_> + + + + <_>15 17 9 7 -1. + <_>18 17 3 7 3. + 0 + -0.0437909997999668 + 0.3582960069179535 + 0.0651310011744499 + <_> + + <_> + + + + <_>8 6 8 10 -1. + <_>8 6 4 5 2. + <_>12 11 4 5 2. + 0 + -0.0383789986371994 + 1.1961040496826172 + -0.1497150063514710 + <_> + + <_> + + + + <_>15 15 9 9 -1. + <_>18 15 3 9 3. + 0 + -0.0988389998674393 + -0.6183400154113770 + 0.1278620064258575 + <_> + + <_> + + + + <_>0 15 9 9 -1. + <_>3 15 3 9 3. + 0 + -0.1219070032238960 + -1.8276120424270630 + -0.0648629963397980 + <_> + + <_> + + + + <_>12 10 9 7 -1. + <_>15 10 3 7 3. + 0 + -0.1198170036077499 + -30. + 0.1132330000400543 + <_> + + <_> + + + + <_>3 10 9 7 -1. + <_>6 10 3 7 3. + 0 + 0.0309100002050400 + -0.2393400073051453 + 0.3633289933204651 + <_> + + <_> + + + + <_>13 15 10 8 -1. + <_>18 15 5 4 2. + <_>13 19 5 4 2. + 0 + 0.0108009995892644 + -0.0351400002837181 + 0.2770789861679077 + <_> + + <_> + + + + <_>0 1 6 12 -1. + <_>0 1 3 6 2. + <_>3 7 3 6 2. + 0 + 0.0568449981510639 + -0.1552429944276810 + 1.0802700519561768 + <_> + + <_> + + + + <_>10 0 6 12 -1. + <_>13 0 3 6 2. + <_>10 6 3 6 2. + 0 + 1.0280000278726220e-003 + -0.0612029992043972 + 0.2050800025463104 + <_> + + <_> + + + + <_>7 0 10 12 -1. + <_>7 0 5 6 2. + <_>12 6 5 6 2. + 0 + -0.0282739996910095 + -0.6477800011634827 + 0.0239170007407665 + <_> + + <_> + + + + <_>4 1 16 8 -1. + <_>4 1 8 8 2. + 0 + -0.1601359993219376 + 1.0892050266265869 + 0.0583890005946159 + <_> + + <_> + + + + <_>0 21 19 3 -1. + <_>0 22 19 1 3. + 0 + 4.9629998393356800e-003 + -0.2580629885196686 + 0.2083459943532944 + <_> + + <_> + + + + <_>6 9 18 4 -1. + <_>15 9 9 2 2. + <_>6 11 9 2 2. + 0 + 0.0469370000064373 + 0.1388629972934723 + -1.5662620067596436 + <_> + + <_> + + + + <_>3 4 9 6 -1. + <_>3 6 9 2 3. + 0 + 0.0242860000580549 + -0.2072830051183701 + 0.5243099927902222 + <_> + + <_> + + + + <_>9 1 6 15 -1. + <_>9 6 6 5 3. + 0 + 0.0702020004391670 + 0.1479689925909042 + -1.3095090389251709 + <_> + + <_> + + + + <_>5 9 6 6 -1. + <_>8 9 3 6 2. + 0 + 9.8120002076029778e-003 + 0.0279060006141663 + -0.5086460113525391 + <_> + + <_> + + + + <_>5 1 14 9 -1. + <_>5 4 14 3 3. + 0 + -0.0562009997665882 + 1.2618130445480347 + 0.0638019964098930 + <_> + + <_> + + + + <_>3 0 8 20 -1. + <_>3 0 4 10 2. + <_>7 10 4 10 2. + 0 + 0.1098280027508736 + -0.1285009980201721 + 3.0776169300079346 + -3.2573320865631104 + 21 + -1 + <_> + + + <_> + + <_> + + + + <_>5 0 7 9 -1. + <_>5 3 7 3 3. + 0 + 0.0209100004285574 + -0.6855940222740173 + 0.3898429870605469 + <_> + + <_> + + + + <_>6 6 12 5 -1. + <_>10 6 4 5 3. + 0 + 0.0350320003926754 + -0.4772439897060394 + 0.4502719938755035 + <_> + + <_> + + + + <_>0 1 8 14 -1. + <_>4 1 4 14 2. + 0 + 0.0397990010678768 + -0.4701110124588013 + 0.4270249903202057 + <_> + + <_> + + + + <_>2 12 22 4 -1. + <_>2 14 22 2 2. + 0 + -4.8409998416900635e-003 + 0.2561430037021637 + -0.6655629873275757 + <_> + + <_> + + + + <_>8 17 6 6 -1. + <_>8 20 6 3 2. + 0 + 2.3439999204128981e-003 + -0.4808349907398224 + 0.2801379859447479 + <_> + + <_> + + + + <_>18 1 6 7 -1. + <_>18 1 3 7 2. + 0 + 0.0253129992634058 + -0.2394820004701614 + 0.4419179856777191 + <_> + + <_> + + + + <_>0 0 6 6 -1. + <_>3 0 3 6 2. + 0 + -0.0321930013597012 + 0.7608669996261597 + -0.2505910098552704 + <_> + + <_> + + + + <_>4 6 17 18 -1. + <_>4 12 17 6 3. + 0 + 0.0754090026021004 + -0.3497459888458252 + 0.3438029885292053 + <_> + + <_> + + + + <_>6 0 12 6 -1. + <_>6 0 6 3 2. + <_>12 3 6 3 2. + 0 + -0.0184690002351999 + -0.7908560037612915 + 0.0347880013287067 + <_> + + <_> + + + + <_>4 7 18 4 -1. + <_>13 7 9 2 2. + <_>4 9 9 2 2. + 0 + -0.0128020001575351 + 0.4710780084133148 + -0.0600060001015663 + <_> + + <_> + + + + <_>4 12 10 6 -1. + <_>4 14 10 2 3. + 0 + -0.0265980008989573 + 0.6711609959602356 + -0.2425750046968460 + <_> + + <_> + + + + <_>7 9 10 12 -1. + <_>12 9 5 6 2. + <_>7 15 5 6 2. + 0 + 0.0219889990985394 + 0.2471749931573868 + -0.4830169975757599 + <_> + + <_> + + + + <_>0 1 24 3 -1. + <_>8 1 8 3 3. + 0 + 0.1465409994125366 + -0.2150409966707230 + 0.7205590009689331 + <_> + + <_> + + + + <_>13 11 6 6 -1. + <_>13 11 3 6 2. + 0 + 3.5310001112520695e-003 + 0.2793099880218506 + -0.3433989882469177 + <_> + + <_> + + + + <_>5 11 6 6 -1. + <_>8 11 3 6 2. + 0 + 9.4010001048445702e-003 + 0.0558619983494282 + -0.8214359879493713 + <_> + + <_> + + + + <_>3 10 19 3 -1. + <_>3 11 19 1 3. + 0 + -8.6390003561973572e-003 + -0.9962059855461121 + 0.1887499988079071 + <_> + + <_> + + + + <_>0 2 6 9 -1. + <_>0 5 6 3 3. + 0 + -0.0391930006444454 + -1.1945559978485107 + -0.0291980002075434 + <_> + + <_> + + + + <_>14 16 10 6 -1. + <_>14 18 10 2 3. + 0 + 0.0248550008982420 + 0.1498759984970093 + -0.5413780212402344 + <_> + + <_> + + + + <_>0 16 10 6 -1. + <_>0 18 10 2 3. + 0 + -0.0349950008094311 + -1.4210180044174194 + -0.0423140004277229 + <_> + + <_> + + + + <_>14 13 9 6 -1. + <_>14 15 9 2 3. + 0 + -0.0183789990842342 + -0.2824259996414185 + 0.1558180004358292 + <_> + + <_> + + + + <_>0 16 18 3 -1. + <_>0 17 18 1 3. + 0 + -0.0135920001193881 + 0.4731709957122803 + -0.2193720042705536 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + 6.2629999592900276e-003 + -0.0597140006721020 + 0.6062589883804321 + <_> + + <_> + + + + <_>0 18 9 6 -1. + <_>0 20 9 2 3. + 0 + -0.0184780005365610 + -0.8564720153808594 + -0.0137839997187257 + <_> + + <_> + + + + <_>14 13 9 6 -1. + <_>14 15 9 2 3. + 0 + 0.0142360003665090 + 0.1665479987859726 + -0.2771399915218353 + <_> + + <_> + + + + <_>6 2 6 9 -1. + <_>8 2 2 9 3. + 0 + -0.0325470007956028 + -1.1728240251541138 + -0.0401850007474422 + <_> + + <_> + + + + <_>15 8 4 12 -1. + <_>15 8 2 12 2. + 0 + -2.6410000864416361e-003 + 0.2651430070400238 + -0.0563430003821850 + <_> + + <_> + + + + <_>8 13 8 8 -1. + <_>8 17 8 4 2. + 0 + -8.7799999164417386e-004 + 0.0365560017526150 + -0.5507519841194153 + <_> + + <_> + + + + <_>4 20 18 3 -1. + <_>10 20 6 3 3. + 0 + 0.0473719984292984 + -0.0426140017807484 + 0.4819490015506744 + <_> + + <_> + + + + <_>5 8 4 12 -1. + <_>7 8 2 12 2. + 0 + -7.0790001191198826e-003 + 0.2869899868965149 + -0.3292300105094910 + <_> + + <_> + + + + <_>7 7 12 3 -1. + <_>7 7 6 3 2. + 0 + -0.0431459993124008 + -1.4065419435501099 + 0.1283639967441559 + <_> + + <_> + + + + <_>10 6 4 9 -1. + <_>12 6 2 9 2. + 0 + 0.0205920003354549 + -0.2143529951572418 + 0.5398179888725281 + <_> + + <_> + + + + <_>5 20 18 3 -1. + <_>11 20 6 3 3. + 0 + -0.0223670005798340 + 0.3371829986572266 + 0.0452120006084442 + <_> + + <_> + + + + <_>1 20 18 3 -1. + <_>7 20 6 3 3. + 0 + 0.0500399991869926 + -0.2512170076370239 + 0.4175049960613251 + <_> + + <_> + + + + <_>18 1 6 20 -1. + <_>21 1 3 10 2. + <_>18 11 3 10 2. + 0 + 0.0617949999868870 + 0.0400849990546703 + 0.6877980232238770 + <_> + + <_> + + + + <_>0 1 6 20 -1. + <_>0 1 3 10 2. + <_>3 11 3 10 2. + 0 + -0.0418619997799397 + 0.5302739739418030 + -0.2290199995040894 + <_> + + <_> + + + + <_>13 3 4 18 -1. + <_>15 3 2 9 2. + <_>13 12 2 9 2. + 0 + -3.1959998887032270e-003 + 0.2516149878501892 + -0.2151460051536560 + <_> + + <_> + + + + <_>0 2 6 12 -1. + <_>0 6 6 4 3. + 0 + 0.0242550000548363 + 7.2320001199841499e-003 + -0.7251909971237183 + <_> + + <_> + + + + <_>12 9 12 6 -1. + <_>18 9 6 3 2. + <_>12 12 6 3 2. + 0 + -0.0173039995133877 + -0.4995819926261902 + 0.1839450001716614 + <_> + + <_> + + + + <_>7 3 4 18 -1. + <_>7 3 2 9 2. + <_>9 12 2 9 2. + 0 + -4.1470001451671124e-003 + 0.0852119997143745 + -0.4636470079421997 + <_> + + <_> + + + + <_>14 0 6 9 -1. + <_>16 0 2 9 3. + 0 + -0.0143699999898672 + -0.5225890278816223 + 0.2389259934425354 + <_> + + <_> + + + + <_>0 9 12 6 -1. + <_>0 9 6 3 2. + <_>6 12 6 3 2. + 0 + -9.0399999171495438e-003 + -0.6325039863586426 + 0.0325510017573833 + <_> + + <_> + + + + <_>14 4 8 20 -1. + <_>18 4 4 10 2. + <_>14 14 4 10 2. + 0 + -0.1237310022115707 + 1.2856210470199585 + 0.0765450000762939 + <_> + + <_> + + + + <_>2 4 8 20 -1. + <_>2 4 4 10 2. + <_>6 14 4 10 2. + 0 + -0.0822219997644424 + 0.8320819735527039 + -0.1859059929847717 + <_> + + <_> + + + + <_>14 13 9 6 -1. + <_>14 15 9 2 3. + 0 + 0.0656590014696121 + 0.1129880025982857 + -30. + <_> + + <_> + + + + <_>1 13 9 6 -1. + <_>1 15 9 2 3. + 0 + -0.0315829999744892 + -1.3485900163650513 + -0.0470970012247562 + <_> + + <_> + + + + <_>3 15 18 3 -1. + <_>9 15 6 3 3. + 0 + -0.0796360000967979 + -1.3533639907836914 + 0.1566880047321320 + <_> + + <_> + + + + <_>5 13 9 6 -1. + <_>5 15 9 2 3. + 0 + -0.0188800003379583 + 0.4030030071735382 + -0.2514890134334564 + <_> + + <_> + + + + <_>5 0 18 3 -1. + <_>5 1 18 1 3. + 0 + -5.0149997696280479e-003 + -0.2628709971904755 + 0.1858250051736832 + <_> + + <_> + + + + <_>8 2 6 7 -1. + <_>11 2 3 7 2. + 0 + -0.0122180003672838 + 0.5869240164756775 + -0.1942770034074783 + <_> + + <_> + + + + <_>9 1 9 6 -1. + <_>12 1 3 6 3. + 0 + 1.2710000155493617e-003 + -0.1668899953365326 + 0.2300689965486527 + <_> + + <_> + + + + <_>6 1 9 6 -1. + <_>9 1 3 6 3. + 0 + 0.0297439992427826 + 0.0125200003385544 + -0.6672359704971314 + <_> + + <_> + + + + <_>5 6 14 6 -1. + <_>12 6 7 3 2. + <_>5 9 7 3 2. + 0 + 0.0281750001013279 + -0.0170600004494190 + 0.6457939743995667 + <_> + + <_> + + + + <_>8 2 6 13 -1. + <_>10 2 2 13 3. + 0 + 0.0303450003266335 + -0.2417870014905930 + 0.3487890064716339 + <_> + + <_> + + + + <_>6 11 12 6 -1. + <_>12 11 6 3 2. + <_>6 14 6 3 2. + 0 + -0.0173259992152452 + -0.5359939932823181 + 0.2099599987268448 + <_> + + <_> + + + + <_>3 1 18 15 -1. + <_>9 1 6 15 3. + 0 + -0.0841780006885529 + 0.7509329915046692 + -0.1759320050477982 + <_> + + <_> + + + + <_>13 0 6 7 -1. + <_>13 0 3 7 2. + 0 + 7.4950000271201134e-003 + -0.1618809998035431 + 0.3065750002861023 + <_> + + <_> + + + + <_>3 3 16 6 -1. + <_>3 6 16 3 2. + 0 + 0.0564949996769428 + -0.1731880009174347 + 1.0016150474548340 + <_> + + <_> + + + + <_>12 1 3 12 -1. + <_>12 7 3 6 2. + 0 + -5.2939997985959053e-003 + 0.2341759949922562 + -0.0653470009565353 + <_> + + <_> + + + + <_>7 7 6 9 -1. + <_>9 7 2 9 3. + 0 + -0.0149450004100800 + 0.2501890063285828 + -0.3059119880199432 + <_> + + <_> + + + + <_>13 0 4 24 -1. + <_>13 0 2 24 2. + 0 + 0.0549190007150173 + 0.1312199980020523 + -0.9376509785652161 + <_> + + <_> + + + + <_>7 0 4 24 -1. + <_>9 0 2 24 2. + 0 + -0.0197219997644424 + -0.8397849798202515 + -0.0234730001538992 + <_> + + <_> + + + + <_>11 9 5 12 -1. + <_>11 13 5 4 3. + 0 + -0.0671589970588684 + 2.3586840629577637 + 0.0829709991812706 + <_> + + <_> + + + + <_>7 15 9 6 -1. + <_>7 17 9 2 3. + 0 + -0.0143259996548295 + 0.1881449967622757 + -0.3122160136699677 + <_> + + <_> + + + + <_>5 7 18 6 -1. + <_>5 9 18 2 3. + 0 + 0.0298410002142191 + 0.1482509970664978 + -0.8468170166015625 + <_> + + <_> + + + + <_>8 9 5 12 -1. + <_>8 13 5 4 3. + 0 + 0.0518830008804798 + -0.0437310002744198 + -1.3366169929504395 + <_> + + <_> + + + + <_>4 17 17 6 -1. + <_>4 19 17 2 3. + 0 + 0.0411270000040531 + 0.1766009926795960 + -0.6090409755706787 + <_> + + <_> + + + + <_>0 3 18 14 -1. + <_>0 3 9 7 2. + <_>9 10 9 7 2. + 0 + -0.1286509931087494 + -0.9870100021362305 + -0.0377850010991097 + <_> + + <_> + + + + <_>0 1 24 2 -1. + <_>0 2 24 1 2. + 0 + 2.4170000106096268e-003 + -0.1611959934234619 + 0.3267570137977600 + <_> + + <_> + + + + <_>0 15 18 3 -1. + <_>0 16 18 1 3. + 0 + 7.7030002139508724e-003 + -0.2384150028228760 + 0.2931939959526062 + <_> + + <_> + + + + <_>9 0 6 9 -1. + <_>11 0 2 9 3. + 0 + 0.0455200001597404 + 0.1442459970712662 + -1.5010160207748413 + <_> + + <_> + + + + <_>3 3 14 12 -1. + <_>3 9 14 6 2. + 0 + -0.0787009969353676 + -1.0394560098648071 + -0.0453759990632534 + <_> + + <_> + + + + <_>12 1 3 12 -1. + <_>12 7 3 6 2. + 0 + 7.8619997948408127e-003 + 0.1963360011577606 + -0.1447239965200424 + <_> + + <_> + + + + <_>8 0 6 9 -1. + <_>10 0 2 9 3. + 0 + -0.0134589998051524 + -0.9063469767570496 + -0.0380490012466908 + <_> + + <_> + + + + <_>10 6 6 10 -1. + <_>12 6 2 10 3. + 0 + 0.0288270004093647 + -0.0294739995151758 + 0.6005839705467224 + <_> + + <_> + + + + <_>5 0 6 9 -1. + <_>7 0 2 9 3. + 0 + -0.0273659992963076 + -0.9980400204658508 + -0.0386530011892319 + <_> + + <_> + + + + <_>2 0 21 7 -1. + <_>9 0 7 7 3. + 0 + -0.0729179978370667 + 0.7336149811744690 + 0.0574400015175343 + <_> + + <_> + + + + <_>6 11 12 5 -1. + <_>10 11 4 5 3. + 0 + -0.0139889996498823 + 0.2789260149002075 + -0.2651630043983460 + <_> + + <_> + + + + <_>8 7 9 8 -1. + <_>11 7 3 8 3. + 0 + 0.0432429984211922 + 4.7760000452399254e-003 + 0.3592590093612671 + <_> + + <_> + + + + <_>9 6 6 18 -1. + <_>9 6 3 9 2. + <_>12 15 3 9 2. + 0 + 0.0295330006629229 + -0.2008399963378906 + 0.5120289921760559 + <_> + + <_> + + + + <_>15 14 8 10 -1. + <_>19 14 4 5 2. + <_>15 19 4 5 2. + 0 + -0.0318970009684563 + 0.6472169756889343 + -1.3760000001639128e-003 + <_> + + <_> + + + + <_>1 14 8 10 -1. + <_>1 14 4 5 2. + <_>5 19 4 5 2. + 0 + 0.0378689989447594 + -0.1836380064487457 + 0.6134309768676758 + <_> + + <_> + + + + <_>11 0 8 10 -1. + <_>15 0 4 5 2. + <_>11 5 4 5 2. + 0 + -0.0224179998040199 + -0.2918789982795715 + 0.1819480061531067 + <_> + + <_> + + + + <_>5 0 8 10 -1. + <_>5 0 4 5 2. + <_>9 5 4 5 2. + 0 + 0.0589589998126030 + -0.0664519965648651 + -1.9290030002593994 + <_> + + <_> + + + + <_>6 1 12 5 -1. + <_>6 1 6 5 2. + 0 + 0.0312229990959167 + -0.0127320000901818 + 0.6156079769134522 + <_> + + <_> + + + + <_>1 12 18 2 -1. + <_>10 12 9 2 2. + 0 + 0.0374849997460842 + -0.2085690051317215 + 0.4436399936676025 + <_> + + <_> + + + + <_>2 8 20 6 -1. + <_>12 8 10 3 2. + <_>2 11 10 3 2. + 0 + -0.0209660008549690 + -0.3571279942989349 + 0.2425220012664795 + <_> + + <_> + + + + <_>7 6 9 7 -1. + <_>10 6 3 7 3. + 0 + -0.0254779998213053 + 1.0846560001373291 + -0.1505440026521683 + <_> + + <_> + + + + <_>10 5 8 16 -1. + <_>14 5 4 8 2. + <_>10 13 4 8 2. + 0 + -7.2570000775158405e-003 + 0.2130260020494461 + -0.1830819994211197 + <_> + + <_> + + + + <_>3 9 16 8 -1. + <_>3 9 8 4 2. + <_>11 13 8 4 2. + 0 + -0.0509830005466938 + 0.5173680186271668 + -0.1883309930562973 + <_> + + <_> + + + + <_>7 8 10 4 -1. + <_>7 8 5 4 2. + 0 + -0.0206400007009506 + -0.4403020143508911 + 0.2274599969387054 + <_> + + <_> + + + + <_>7 12 10 8 -1. + <_>7 12 5 4 2. + <_>12 16 5 4 2. + 0 + 0.0106729995459318 + 0.0350599996745586 + -0.5166500210762024 + <_> + + <_> + + + + <_>9 19 15 4 -1. + <_>14 19 5 4 3. + 0 + 0.0318959988653660 + 0.0132280001416802 + 0.3491519987583160 + <_> + + <_> + + + + <_>1 0 18 9 -1. + <_>7 0 6 9 3. + 0 + -0.0238249991089106 + 0.3411880135536194 + -0.2151020020246506 + <_> + + <_> + + + + <_>13 4 10 8 -1. + <_>18 4 5 4 2. + <_>13 8 5 4 2. + 0 + -6.0680001042783260e-003 + 0.3293739855289459 + -0.2852379977703095 + <_> + + <_> + + + + <_>3 16 18 4 -1. + <_>9 16 6 4 3. + 0 + 0.0238819997757673 + -0.2533380091190338 + 0.2629610002040863 + <_> + + <_> + + + + <_>8 7 10 12 -1. + <_>13 7 5 6 2. + <_>8 13 5 6 2. + 0 + 0.0279660001397133 + 0.1404909938573837 + -0.4988709986209869 + <_> + + <_> + + + + <_>6 7 10 12 -1. + <_>6 7 5 6 2. + <_>11 13 5 6 2. + 0 + 0.0146030001342297 + -0.0153959998860955 + -0.7695800065994263 + <_> + + <_> + + + + <_>4 6 18 7 -1. + <_>10 6 6 7 3. + 0 + 0.1087239980697632 + 0.1906960010528565 + -0.3239310085773468 + <_> + + <_> + + + + <_>0 17 18 3 -1. + <_>0 18 18 1 3. + 0 + -0.0140380002558231 + 0.3492470085620880 + -0.2235870063304901 + <_> + + <_> + + + + <_>3 17 18 3 -1. + <_>3 18 18 1 3. + 0 + 4.0440000593662262e-003 + -0.0383290015161037 + 0.5117729902267456 + <_> + + <_> + + + + <_>2 4 6 10 -1. + <_>4 4 2 10 3. + 0 + -4.9769999459385872e-003 + -0.4288829863071442 + 0.0491739995777607 + <_> + + <_> + + + + <_>16 0 8 24 -1. + <_>16 0 4 24 2. + 0 + -0.0851830020546913 + 0.6662459969520569 + 7.8079998493194580e-003 + <_> + + <_> + + + + <_>4 0 8 15 -1. + <_>8 0 4 15 2. + 0 + 2.1559998858720064e-003 + -0.4913519918918610 + 0.0695559978485107 + <_> + + <_> + + + + <_>16 0 8 24 -1. + <_>16 0 4 24 2. + 0 + 0.3638449907302856 + 0.1299709975719452 + -1.8949509859085083 + <_> + + <_> + + + + <_>1 4 18 9 -1. + <_>7 4 6 9 3. + 0 + 0.2208250015974045 + -0.0572119988501072 + -1.4281120300292969 + <_> + + <_> + + + + <_>15 12 9 6 -1. + <_>15 14 9 2 3. + 0 + -0.0161400008946657 + -0.5758939981460571 + 0.1806250065565109 + <_> + + <_> + + + + <_>3 9 18 6 -1. + <_>3 9 9 3 2. + <_>12 12 9 3 2. + 0 + -0.0483300015330315 + 0.9730849862098694 + -0.1651300042867661 + <_> + + <_> + + + + <_>18 5 6 9 -1. + <_>18 8 6 3 3. + 0 + 0.0175299998372793 + 0.1793269962072372 + -0.2794890105724335 + <_> + + <_> + + + + <_>0 5 6 9 -1. + <_>0 8 6 3 3. + 0 + -0.0343099981546402 + -0.8107249736785889 + -0.0165960006415844 + <_> + + <_> + + + + <_>4 7 18 4 -1. + <_>13 7 9 2 2. + <_>4 9 9 2 2. + 0 + -4.5830002054572105e-003 + 0.2790899872779846 + -7.4519999325275421e-003 + <_> + + <_> + + + + <_>2 1 12 20 -1. + <_>2 1 6 10 2. + <_>8 11 6 10 2. + 0 + 0.1289640069007874 + -0.1350850015878677 + 2.5411539077758789 + <_> + + <_> + + + + <_>17 0 6 23 -1. + <_>17 0 3 23 2. + 0 + 0.0303610004484653 + -0.0684190019965172 + 0.2873409986495972 + <_> + + <_> + + + + <_>1 6 2 18 -1. + <_>1 15 2 9 2. + 0 + 0.0440860018134117 + -0.1813589930534363 + 0.6541320085525513 + <_> + + <_> + + + + <_>8 8 10 6 -1. + <_>8 10 10 2 3. + 0 + 3.0159999150782824e-003 + -0.1569049954414368 + 0.2696380019187927 + <_> + + <_> + + + + <_>0 6 20 6 -1. + <_>0 6 10 3 2. + <_>10 9 10 3 2. + 0 + -0.0263369996100664 + 0.2917560040950775 + -0.2527410089969635 + <_> + + <_> + + + + <_>11 12 12 5 -1. + <_>15 12 4 5 3. + 0 + -0.0278660003095865 + 0.4438750147819519 + 0.0550380013883114 + <_> + + <_> + + + + <_>0 4 3 19 -1. + <_>1 4 1 19 3. + 0 + 0.0117250001057982 + -0.1934649944305420 + 0.4665670096874237 + <_> + + <_> + + + + <_>19 1 3 18 -1. + <_>20 1 1 18 3. + 0 + 1.5689999563619494e-003 + -8.2360003143548965e-003 + 0.2570089995861054 + <_> + + <_> + + + + <_>2 1 3 18 -1. + <_>3 1 1 18 3. + 0 + -3.5550000611692667e-003 + -0.4243089854717255 + 0.0711740031838417 + <_> + + <_> + + + + <_>3 10 18 3 -1. + <_>9 10 6 3 3. + 0 + -0.0316950008273125 + -0.8539350032806397 + 0.1691620051860809 + <_> + + <_> + + + + <_>4 4 10 9 -1. + <_>9 4 5 9 2. + 0 + -0.0320970006287098 + 0.8378490209579468 + -0.1759729981422424 + <_> + + <_> + + + + <_>7 13 14 7 -1. + <_>7 13 7 7 2. + 0 + 0.1554419994354248 + 0.0995500013232231 + 2.3873300552368164 + <_> + + <_> + + + + <_>3 13 14 7 -1. + <_>10 13 7 7 2. + 0 + 0.0880459994077683 + -0.1872529983520508 + 0.6238430142402649 + <_> + + <_> + + + + <_>8 15 9 6 -1. + <_>11 15 3 6 3. + 0 + -1.6720000421628356e-003 + 0.2500869929790497 + -0.0651189982891083 + <_> + + <_> + + + + <_>4 14 8 10 -1. + <_>4 14 4 5 2. + <_>8 19 4 5 2. + 0 + 9.3409996479749680e-003 + -0.3537890017032623 + 0.1071500033140183 + <_> + + <_> + + + + <_>10 14 4 10 -1. + <_>10 19 4 5 2. + 0 + 0.0371380001306534 + 0.1638700067996979 + -0.9171839952468872 + <_> + + <_> + + + + <_>3 8 5 16 -1. + <_>3 16 5 8 2. + 0 + 0.0801839977502823 + -0.1481299996376038 + 1.4895190000534058 + <_> + + <_> + + + + <_>15 10 9 6 -1. + <_>15 12 9 2 3. + 0 + -7.9100002767518163e-004 + -0.2132689952850342 + 0.1967640072107315 + <_> + + <_> + + + + <_>0 10 9 6 -1. + <_>0 12 9 2 3. + 0 + -5.0400001928210258e-003 + -0.7131869792938232 + 1.8240000354126096e-003 + <_> + + <_> + + + + <_>6 7 12 9 -1. + <_>6 10 12 3 3. + 0 + 0.1196239963173866 + 0.0330989994108677 + 1.0441709756851196 + <_> + + <_> + + + + <_>9 10 5 8 -1. + <_>9 14 5 4 2. + 0 + -4.5280000194907188e-003 + -0.2730849981307983 + 0.2722980082035065 + <_> + + <_> + + + + <_>12 1 3 12 -1. + <_>12 7 3 6 2. + 0 + -0.0296390000730753 + 0.3622579872608185 + 0.0567950010299683 + <_> + + <_> + + + + <_>8 15 6 9 -1. + <_>10 15 2 9 3. + 0 + 0.0266500003635883 + -0.0480410009622574 + -0.9672350287437439 + <_> + + <_> + + + + <_>16 6 7 6 -1. + <_>16 9 7 3 2. + 0 + 0.0444220006465912 + 0.1305290013551712 + -0.3507730066776276 + <_> + + <_> + + + + <_>8 1 4 22 -1. + <_>10 1 2 22 2. + 0 + -0.0243599992245436 + -1.0766899585723877 + -0.0512229986488819 + <_> + + <_> + + + + <_>6 6 14 3 -1. + <_>6 6 7 3 2. + 0 + 0.0197349991649389 + 0.0262380000203848 + 0.2807050049304962 + <_> + + <_> + + + + <_>0 18 19 3 -1. + <_>0 19 19 1 3. + 0 + 5.4930001497268677e-003 + -0.2611129879951477 + 0.2101140022277832 + <_> + + <_> + + + + <_>17 0 6 24 -1. + <_>17 0 3 24 2. + 0 + -0.2320030033588409 + -1.7748440504074097 + 0.1148260012269020 + <_> + + <_> + + + + <_>0 13 15 6 -1. + <_>5 13 5 6 3. + 0 + -0.0256140008568764 + 0.2990080118179321 + -0.2250249981880188 + <_> + + <_> + + + + <_>9 6 10 14 -1. + <_>14 6 5 7 2. + <_>9 13 5 7 2. + 0 + -6.4949998632073402e-003 + 0.1956380009651184 + -0.0997629985213280 + <_> + + <_> + + + + <_>1 6 8 10 -1. + <_>1 6 4 5 2. + <_>5 11 4 5 2. + 0 + 3.9840000681579113e-003 + -0.4302150011062622 + 0.0812610015273094 + <_> + + <_> + + + + <_>7 6 12 5 -1. + <_>7 6 6 5 2. + 0 + -0.0358130000531673 + -0.5098739862442017 + 0.1634590029716492 + <_> + + <_> + + + + <_>7 7 9 6 -1. + <_>10 7 3 6 3. + 0 + -0.0141690000891685 + 0.7797809839248657 + -0.1747629940509796 + <_> + + <_> + + + + <_>7 8 14 14 -1. + <_>14 8 7 7 2. + <_>7 15 7 7 2. + 0 + -0.1264210045337677 + -0.6304789781570435 + 0.1272830069065094 + <_> + + <_> + + + + <_>3 8 14 14 -1. + <_>3 8 7 7 2. + <_>10 15 7 7 2. + 0 + 0.0686779990792274 + -0.0464479997754097 + -1.1128979921340942 + <_> + + <_> + + + + <_>9 8 13 4 -1. + <_>9 10 13 2 2. + 0 + 0.0858649984002113 + 0.1183540001511574 + -4.8235158920288086 + <_> + + <_> + + + + <_>3 2 6 12 -1. + <_>3 2 3 6 2. + <_>6 8 3 6 2. + 0 + 0.0155119998380542 + -0.0174679998308420 + -0.6369339823722839 + <_> + + <_> + + + + <_>6 10 17 6 -1. + <_>6 13 17 3 2. + 0 + 0.0810910016298294 + 0.0861330032348633 + 2.4559431076049805 + <_> + + <_> + + + + <_>1 10 17 6 -1. + <_>1 13 17 3 2. + 0 + 0.0184950008988380 + 0.0402290001511574 + -0.5085819959640503 + <_> + + <_> + + + + <_>16 7 8 9 -1. + <_>16 10 8 3 3. + 0 + -0.0863209962844849 + -1.9006760120391846 + 0.1101910024881363 + <_> + + <_> + + + + <_>0 7 8 9 -1. + <_>0 10 8 3 3. + 0 + 0.0723550021648407 + -0.0621119998395443 + -1.4165179729461670 + <_> + + <_> + + + + <_>0 9 24 10 -1. + <_>12 9 12 5 2. + <_>0 14 12 5 2. + 0 + -0.0781790018081665 + 0.8884930014610291 + 0.0423699989914894 + <_> + + <_> + + + + <_>3 2 15 8 -1. + <_>8 2 5 8 3. + 0 + 0.0966819971799850 + -0.2209420055150986 + 0.3357509970664978 + <_> + + <_> + + + + <_>4 2 18 8 -1. + <_>10 2 6 8 3. + 0 + -0.0398759990930557 + 0.5780479907989502 + 0.0453479997813702 + <_> + + <_> + + + + <_>0 1 18 4 -1. + <_>0 1 9 2 2. + <_>9 3 9 2 2. + 0 + -9.5349997282028198e-003 + -0.5417569875717163 + 3.2399999909102917e-003 + <_> + + <_> + + + + <_>20 2 3 18 -1. + <_>21 2 1 18 3. + 0 + 4.0600000647827983e-004 + -0.0815490037202835 + 0.3583790063858032 + <_> + + <_> + + + + <_>1 3 3 19 -1. + <_>2 3 1 19 3. + 0 + 0.0121079999953508 + -0.2028039991855621 + 0.4376800060272217 + <_> + + <_> + + + + <_>18 8 6 16 -1. + <_>20 8 2 16 3. + 0 + -0.0208739992231131 + 0.4146989881992340 + -0.0455680005252361 + <_> + + <_> + + + + <_>0 8 6 16 -1. + <_>2 8 2 16 3. + 0 + 0.0578880012035370 + -0.0290099997073412 + -0.9182230234146118 + <_> + + <_> + + + + <_>8 18 11 6 -1. + <_>8 20 11 2 3. + 0 + 1.3200000103097409e-004 + -0.1177240014076233 + 0.2000000029802322 + <_> + + <_> + + + + <_>4 6 12 5 -1. + <_>8 6 4 5 3. + 0 + -0.0171370003372431 + 0.3300479948520660 + -0.2305520027875900 + <_> + + <_> + + + + <_>7 6 12 5 -1. + <_>11 6 4 5 3. + 0 + 0.0306550003588200 + -0.0215450003743172 + 0.2687819898128510 + <_> + + <_> + + + + <_>6 3 9 6 -1. + <_>9 3 3 6 3. + 0 + -7.8699999721720815e-004 + -0.4410069882869721 + 0.0491579994559288 + <_> + + <_> + + + + <_>7 6 12 5 -1. + <_>7 6 6 5 2. + 0 + 0.0880369991064072 + 0.1178200021386147 + -2.8293309211730957 + <_> + + <_> + + + + <_>9 8 6 7 -1. + <_>12 8 3 7 2. + 0 + -0.0390289984643459 + 0.9177719950675964 + -0.1582739949226379 + <_> + + <_> + + + + <_>8 2 9 6 -1. + <_>11 2 3 6 3. + 0 + 0.0801059976220131 + 0.1128920018672943 + -1.9937280416488647 + <_> + + <_> + + + + <_>8 14 6 9 -1. + <_>8 17 6 3 3. + 0 + 0.0395389981567860 + -0.1435739994049072 + 1.3085240125656128 + <_> + + <_> + + + + <_>8 2 9 6 -1. + <_>11 2 3 6 3. + 0 + 0.0206840001046658 + 0.2004809975624085 + -0.0441869981586933 + <_> + + <_> + + + + <_>4 3 16 20 -1. + <_>4 3 8 10 2. + <_>12 13 8 10 2. + 0 + -0.0670379996299744 + 0.3261860013008118 + -0.2055040001869202 + <_> + + <_> + + + + <_>7 6 10 12 -1. + <_>12 6 5 6 2. + <_>7 12 5 6 2. + 0 + 0.0468150004744530 + 0.1582529991865158 + -0.9553509950637817 + <_> + + <_> + + + + <_>0 2 7 12 -1. + <_>0 6 7 4 3. + 0 + 0.0784439966082573 + -0.0746510028839111 + -2.1161499023437500 + <_> + + <_> + + + + <_>12 17 11 6 -1. + <_>12 19 11 2 3. + 0 + 0.0663800016045570 + 0.1164190024137497 + -1.6113519668579102 + <_> + + <_> + + + + <_>4 7 12 8 -1. + <_>4 7 6 4 2. + <_>10 11 6 4 2. + 0 + 0.0300539992749691 + -0.1656260043382645 + 0.7002540230751038 + <_> + + <_> + + + + <_>8 11 8 10 -1. + <_>12 11 4 5 2. + <_>8 16 4 5 2. + 0 + 0.0171199999749660 + 0.2262769937515259 + -0.4011499881744385 + <_> + + <_> + + + + <_>9 1 4 9 -1. + <_>11 1 2 9 2. + 0 + 0.0200730003416538 + -0.1938969939947128 + 0.4442029893398285 + <_> + + <_> + + + + <_>14 0 3 22 -1. + <_>15 0 1 22 3. + 0 + 0.0331019982695580 + 0.1163749992847443 + -1.5771679878234863 + <_> + + <_> + + + + <_>7 0 3 22 -1. + <_>8 0 1 22 3. + 0 + -0.0148820001631975 + -0.8968030214309692 + -0.0420100018382072 + <_> + + <_> + + + + <_>4 7 18 4 -1. + <_>13 7 9 2 2. + <_>4 9 9 2 2. + 0 + -0.0102810002863407 + 0.3560299873352051 + -0.0131240002810955 + <_> + + <_> + + + + <_>10 2 4 15 -1. + <_>10 7 4 5 3. + 0 + -0.0286950003355742 + -0.4603959918022156 + 0.0268019996583462 + <_> + + <_> + + + + <_>12 1 3 12 -1. + <_>12 7 3 6 2. + 0 + -4.7189998440444469e-003 + 0.2378879934549332 + -0.0655189976096153 + <_> + + <_> + + + + <_>0 0 18 13 -1. + <_>9 0 9 13 2. + 0 + 0.3220160007476807 + -0.0284899994730949 + -0.8423460125923157 + <_> + + <_> + + + + <_>16 0 3 24 -1. + <_>17 0 1 24 3. + 0 + -0.0170450005680323 + -0.5093880295753479 + 0.1605760008096695 + <_> + + <_> + + + + <_>5 0 3 24 -1. + <_>6 0 1 24 3. + 0 + -7.3469998314976692e-003 + -0.5415499806404114 + 4.7320001758635044e-003 + <_> + + <_> + + + + <_>10 15 5 8 -1. + <_>10 19 5 4 2. + 0 + -0.0300019998103380 + -0.8878579735755920 + 0.1362179964780808 + <_> + + <_> + + + + <_>2 18 18 2 -1. + <_>2 19 18 1 2. + 0 + -0.0112929996103048 + 0.8061519861221314 + -0.1615950018167496 + <_> + + <_> + + + + <_>2 8 20 3 -1. + <_>2 9 20 1 3. + 0 + 4.7749998047947884e-003 + 0.0129680000245571 + 0.5507990121841431 + <_> + + <_> + + + + <_>7 6 9 6 -1. + <_>7 8 9 2 3. + 0 + 5.0710001960396767e-003 + -0.0457280017435551 + -1.0766259431838989 + <_> + + <_> + + + + <_>3 2 19 10 -1. + <_>3 7 19 5 2. + 0 + 0.1934410035610199 + 0.0712620019912720 + 1.1694519519805908 + <_> + + <_> + + + + <_>2 7 19 3 -1. + <_>2 8 19 1 3. + 0 + 5.3750001825392246e-003 + -0.1973620057106018 + 0.3820689916610718 + <_> + + <_> + + + + <_>15 6 9 4 -1. + <_>15 8 9 2 2. + 0 + -0.0682760030031204 + -5.4372339248657227 + 0.1115190014243126 + <_> + + <_> + + + + <_>2 2 18 8 -1. + <_>8 2 6 8 3. + 0 + -0.0349330008029938 + 0.4479340016841888 + -0.1865790039300919 + <_> + + <_> + + + + <_>10 9 14 4 -1. + <_>10 9 7 4 2. + 0 + 5.1219998858869076e-003 + -0.0148719996213913 + 0.1841389983892441 + <_> + + <_> + + + + <_>4 4 6 16 -1. + <_>7 4 3 16 2. + 0 + 0.0953119993209839 + -0.1511709988117218 + 0.9499149918556213 + <_> + + <_> + + + + <_>15 8 9 16 -1. + <_>18 8 3 16 3. + 0 + -0.0628490000963211 + 0.4647360146045685 + 0.0384050011634827 + <_> + + <_> + + + + <_>0 8 9 16 -1. + <_>3 8 3 16 3. + 0 + -0.1704069972038269 + -1.6499999761581421 + -0.0632369965314865 + <_> + + <_> + + + + <_>18 0 6 14 -1. + <_>20 0 2 14 3. + 0 + 0.0105839995667338 + -0.0383489988744259 + 0.4191380143165588 + <_> + + <_> + + + + <_>0 0 6 14 -1. + <_>2 0 2 14 3. + 0 + -0.0415790006518364 + 0.3446190059185028 + -0.2118770033121109 + <_> + + <_> + + + + <_>15 0 6 22 -1. + <_>17 0 2 22 3. + 0 + 0.1271860003471375 + 0.1239819973707199 + -2.1254889965057373 + <_> + + <_> + + + + <_>3 0 6 22 -1. + <_>5 0 2 22 3. + 0 + 0.0825570002198219 + -0.0620240010321140 + -1.4875819683074951 + <_> + + <_> + + + + <_>12 2 12 20 -1. + <_>16 2 4 20 3. + 0 + 0.0852930024266243 + 0.0170879997313023 + 0.3207660019397736 + <_> + + <_> + + + + <_>0 2 12 20 -1. + <_>4 2 4 20 3. + 0 + 0.0555440001189709 + -0.2741400003433228 + 0.1897639930248261 + <_> + + <_> + + + + <_>11 6 4 9 -1. + <_>11 6 2 9 2. + 0 + 4.5650000683963299e-003 + -0.1792020052671433 + 0.2796730101108551 + <_> + + <_> + + + + <_>9 0 6 16 -1. + <_>12 0 3 16 2. + 0 + 0.0129979997873306 + -0.3229750096797943 + 0.2694180011749268 + <_> + + <_> + + + + <_>12 1 3 12 -1. + <_>12 7 3 6 2. + 0 + 0.0578919984400272 + 0.1264439970254898 + -0.6071349978446960 + <_> + + <_> + + + + <_>3 4 18 6 -1. + <_>3 4 9 3 2. + <_>12 7 9 3 2. + 0 + -0.0228240005671978 + -0.4968209862709045 + 0.0223769992589951 + <_> + + <_> + + + + <_>5 5 16 8 -1. + <_>13 5 8 4 2. + <_>5 9 8 4 2. + 0 + 0.0483120009303093 + 0.0436070002615452 + 0.4853779971599579 + <_> + + <_> + + + + <_>0 13 10 6 -1. + <_>0 15 10 2 3. + 0 + 0.0257140006870031 + -0.0429509989917278 + -0.9302350282669067 + <_> + + <_> + + + + <_>8 14 9 6 -1. + <_>8 16 9 2 3. + 0 + 6.9269998930394650e-003 + -2.9680000152438879e-003 + 0.3429630100727081 + <_> + + <_> + + + + <_>6 2 9 6 -1. + <_>9 2 3 6 3. + 0 + -0.0344469994306564 + -1.5299769639968872 + -0.0610149987041950 + <_> + + <_> + + + + <_>14 1 10 8 -1. + <_>19 1 5 4 2. + <_>14 5 5 4 2. + 0 + 0.0293879993259907 + 0.0375959984958172 + 0.6417239904403687 + <_> + + <_> + + + + <_>9 1 3 12 -1. + <_>9 7 3 6 2. + 0 + -2.4319998919963837e-003 + 0.0990889966487885 + -0.3968810141086578 + -3.3703000545501709 + 22 + -1 + <_> + + + <_> + + <_> + + + + <_>6 4 12 9 -1. + <_>6 7 12 3 3. + 0 + -0.0959440022706985 + 0.6241909861564636 + -0.4587520062923431 + <_> + + <_> + + + + <_>6 5 12 6 -1. + <_>10 5 4 6 3. + 0 + 0.0168340001255274 + -0.9307280182838440 + 0.2156360000371933 + <_> + + <_> + + + + <_>1 1 8 5 -1. + <_>5 1 4 5 2. + 0 + 0.0260499995201826 + -0.4053229987621307 + 0.4225659966468811 + <_> + + <_> + + + + <_>12 12 6 8 -1. + <_>12 16 6 4 2. + 0 + 3.6500001442618668e-004 + 0.0952880010008812 + -0.6329810023307800 + <_> + + <_> + + + + <_>3 12 12 6 -1. + <_>3 14 12 2 3. + 0 + -6.6940002143383026e-003 + 0.3724380135536194 + -0.3033240139484406 + <_> + + <_> + + + + <_>9 18 12 6 -1. + <_>15 18 6 3 2. + <_>9 21 6 3 2. + 0 + 0.0188740007579327 + -0.2335720062255859 + 0.4033069908618927 + <_> + + <_> + + + + <_>4 13 6 6 -1. + <_>4 16 6 3 2. + 0 + -1.6300000424962491e-004 + 0.0428869985044003 + -0.7779679894447327 + <_> + + <_> + + + + <_>11 3 7 18 -1. + <_>11 12 7 9 2. + 0 + -0.0762590020895004 + -0.4962849915027618 + 0.1633539944887161 + <_> + + <_> + + + + <_>3 9 18 3 -1. + <_>9 9 6 3 3. + 0 + 0.0501490011811256 + 0.0327470004558563 + -0.8004789948463440 + <_> + + <_> + + + + <_>5 3 19 2 -1. + <_>5 4 19 1 2. + 0 + -2.9239999130368233e-003 + -0.5000280141830444 + 0.2548060119152069 + <_> + + <_> + + + + <_>4 2 12 6 -1. + <_>4 2 6 3 2. + <_>10 5 6 3 2. + 0 + 0.0162439998239279 + 0.0389130003750324 + -0.7072489857673645 + <_> + + <_> + + + + <_>9 6 6 9 -1. + <_>11 6 2 9 3. + 0 + 0.0378119982779026 + -0.0662679970264435 + 0.7386879920959473 + <_> + + <_> + + + + <_>8 6 6 9 -1. + <_>10 6 2 9 3. + 0 + -0.0123199997469783 + 0.4869639873504639 + -0.2448559999465942 + <_> + + <_> + + + + <_>16 9 5 15 -1. + <_>16 14 5 5 3. + 0 + 0.0580039992928505 + 0.1345909982919693 + -0.1323210000991821 + <_> + + <_> + + + + <_>3 9 5 15 -1. + <_>3 14 5 5 3. + 0 + 4.8630000092089176e-003 + -0.4417290091514587 + 0.1400559991598129 + <_> + + <_> + + + + <_>6 6 14 6 -1. + <_>13 6 7 3 2. + <_>6 9 7 3 2. + 0 + 0.0456909984350204 + 0.0312179997563362 + 0.8981829881668091 + <_> + + <_> + + + + <_>8 6 3 14 -1. + <_>8 13 3 7 2. + 0 + 0.0213210005313158 + 0.0120080001652241 + -0.8606619834899902 + <_> + + <_> + + + + <_>0 16 24 5 -1. + <_>8 16 8 5 3. + 0 + 0.1567910015583038 + 0.0140559999272227 + 0.8533290028572083 + <_> + + <_> + + + + <_>0 20 20 3 -1. + <_>10 20 10 3 2. + 0 + -0.0103289997205138 + 0.2902280092239380 + -0.2947880029678345 + <_> + + <_> + + + + <_>5 10 18 2 -1. + <_>5 11 18 1 2. + 0 + 2.4290001019835472e-003 + -0.4043990075588226 + 0.1940020024776459 + <_> + + <_> + + + + <_>0 6 6 10 -1. + <_>2 6 2 10 3. + 0 + -0.0233389995992184 + 0.3294520080089569 + -0.2571269869804382 + <_> + + <_> + + + + <_>2 1 20 3 -1. + <_>2 2 20 1 3. + 0 + -6.8970001302659512e-003 + -0.5335299968719482 + 0.2163520008325577 + <_> + + <_> + + + + <_>9 13 6 11 -1. + <_>11 13 2 11 3. + 0 + -0.0344030000269413 + -1.4425489902496338 + -0.0446829982101917 + <_> + + <_> + + + + <_>9 15 6 8 -1. + <_>9 19 6 4 2. + 0 + -0.0212350003421307 + -0.7901750206947327 + 0.1908410042524338 + <_> + + <_> + + + + <_>9 12 6 9 -1. + <_>9 15 6 3 3. + 0 + 2.0620001014322042e-003 + -0.2693119943141937 + 0.3148800134658814 + <_> + + <_> + + + + <_>5 11 18 2 -1. + <_>5 12 18 1 2. + 0 + -4.2190002277493477e-003 + -0.5446439981460571 + 0.1657460033893585 + <_> + + <_> + + + + <_>2 6 15 6 -1. + <_>2 8 15 2 3. + 0 + -0.0143349999561906 + 0.0221050009131432 + -0.6234250068664551 + <_> + + <_> + + + + <_>6 0 18 3 -1. + <_>6 1 18 1 3. + 0 + -8.2120001316070557e-003 + -0.4988499879837036 + 0.1923709958791733 + <_> + + <_> + + + + <_>5 0 3 18 -1. + <_>6 0 1 18 3. + 0 + -9.3350000679492950e-003 + -0.7913119792938232 + -0.0141439996659756 + <_> + + <_> + + + + <_>18 3 6 10 -1. + <_>20 3 2 10 3. + 0 + -0.0379379987716675 + 0.7984129786491394 + -0.0337990000844002 + <_> + + <_> + + + + <_>0 3 6 10 -1. + <_>2 3 2 10 3. + 0 + 4.7059999778866768e-003 + -0.3316340148448944 + 0.2072629928588867 + <_> + + <_> + + + + <_>10 5 8 9 -1. + <_>10 5 4 9 2. + 0 + -4.4499998912215233e-003 + -0.2725630104541779 + 0.1840219944715500 + <_> + + <_> + + + + <_>6 5 8 9 -1. + <_>10 5 4 9 2. + 0 + 5.2189999260008335e-003 + -0.5309600234031677 + 0.0526079982519150 + <_> + + <_> + + + + <_>3 2 20 3 -1. + <_>3 3 20 1 3. + 0 + -9.5399999991059303e-003 + -0.5648540258407593 + 0.1926939934492111 + <_> + + <_> + + + + <_>5 2 13 4 -1. + <_>5 4 13 2 2. + 0 + 0.0449699983000755 + -0.1741150021553040 + 0.9538260102272034 + <_> + + <_> + + + + <_>17 0 7 14 -1. + <_>17 7 7 7 2. + 0 + 0.0142090003937483 + -0.0919490009546280 + 0.2483610063791275 + <_> + + <_> + + + + <_>0 0 7 14 -1. + <_>0 7 7 7 2. + 0 + 0.1638019979000092 + -0.0584970004856586 + -1.6404409408569336 + <_> + + <_> + + + + <_>9 11 10 6 -1. + <_>9 11 5 6 2. + 0 + 2.5579999200999737e-003 + 0.2344799935817719 + -0.0927340015769005 + <_> + + <_> + + + + <_>5 11 10 6 -1. + <_>10 11 5 6 2. + 0 + -3.8499999791383743e-003 + 0.1788070052862167 + -0.3584409952163696 + <_> + + <_> + + + + <_>11 6 3 18 -1. + <_>11 12 3 6 3. + 0 + -0.0252219997346401 + -0.4290300011634827 + 0.2024450004100800 + <_> + + <_> + + + + <_>0 16 18 3 -1. + <_>0 17 18 1 3. + 0 + -0.0194150004535913 + 0.5801630020141602 + -0.1880639940500259 + <_> + + <_> + + + + <_>6 16 18 3 -1. + <_>6 17 18 1 3. + 0 + 0.0144199999049306 + 0.0328469984233379 + 0.8198050260543823 + <_> + + <_> + + + + <_>4 6 9 10 -1. + <_>4 11 9 5 2. + 0 + 0.0515829995274544 + 0.0691760033369064 + -0.4586629867553711 + <_> + + <_> + + + + <_>9 7 15 4 -1. + <_>9 9 15 2 2. + 0 + -0.0379600003361702 + -1.2553000450134277 + 0.1433289945125580 + <_> + + <_> + + + + <_>5 6 12 6 -1. + <_>5 6 6 3 2. + <_>11 9 6 3 2. + 0 + -0.0295609999448061 + 0.5315179824829102 + -0.2059649974107742 + <_> + + <_> + + + + <_>6 1 12 9 -1. + <_>6 4 12 3 3. + 0 + -0.0391109995543957 + 1.1658719778060913 + 0.0538970008492470 + <_> + + <_> + + + + <_>7 9 6 12 -1. + <_>7 9 3 6 2. + <_>10 15 3 6 2. + 0 + -0.0291590001434088 + 0.3930760025978088 + -0.2218450009822846 + <_> + + <_> + + + + <_>11 5 13 6 -1. + <_>11 7 13 2 3. + 0 + -0.0836170017719269 + -0.7374449968338013 + 0.1426820009946823 + <_> + + <_> + + + + <_>1 11 22 13 -1. + <_>12 11 11 13 2. + 0 + 0.4200400114059448 + -0.1427740007638931 + 1.7894840240478516 + <_> + + <_> + + + + <_>18 8 6 6 -1. + <_>18 11 6 3 2. + 0 + 0.0600050017237663 + 0.1197670027613640 + -1.8886189460754395 + <_> + + <_> + + + + <_>0 8 6 6 -1. + <_>0 11 6 3 2. + 0 + -0.0189810004085302 + -1.4148449897766113 + -0.0565229989588261 + <_> + + <_> + + + + <_>0 6 24 3 -1. + <_>0 7 24 1 3. + 0 + -6.0049998573958874e-003 + 0.4417079985141754 + -0.1020080000162125 + <_> + + <_> + + + + <_>0 5 10 6 -1. + <_>0 7 10 2 3. + 0 + -0.0582140013575554 + -1.3918470144271851 + -0.0482689999043942 + <_> + + <_> + + + + <_>6 7 18 3 -1. + <_>6 8 18 1 3. + 0 + -0.0122710000723600 + 0.5131769776344299 + -0.0936969965696335 + <_> + + <_> + + + + <_>0 0 10 6 -1. + <_>0 2 10 2 3. + 0 + 0.0465859994292259 + -0.0574840009212494 + -1.4283169507980347 + <_> + + <_> + + + + <_>19 0 3 19 -1. + <_>20 0 1 19 3. + 0 + 1.2110000243410468e-003 + -0.0808919966220856 + 0.3233320116996765 + <_> + + <_> + + + + <_>4 6 12 16 -1. + <_>4 6 6 8 2. + <_>10 14 6 8 2. + 0 + -0.0886420011520386 + -0.8644909858703613 + -0.0331469997763634 + <_> + + <_> + + + + <_>19 6 4 18 -1. + <_>21 6 2 9 2. + <_>19 15 2 9 2. + 0 + -0.0231849998235703 + 0.5216220021247864 + -0.0161680001765490 + <_> + + <_> + + + + <_>1 6 4 18 -1. + <_>1 6 2 9 2. + <_>3 15 2 9 2. + 0 + 0.0430900007486343 + -0.1615380048751831 + 1.0915000438690186 + <_> + + <_> + + + + <_>3 21 18 3 -1. + <_>3 22 18 1 3. + 0 + 2.0599999697878957e-004 + -0.1709149926900864 + 0.3123669922351837 + <_> + + <_> + + + + <_>0 19 9 4 -1. + <_>0 21 9 2 2. + 0 + 8.9159999042749405e-003 + -6.7039998248219490e-003 + -0.6881039738655090 + <_> + + <_> + + + + <_>12 18 12 6 -1. + <_>18 18 6 3 2. + <_>12 21 6 3 2. + 0 + -0.0177529994398355 + 0.6329280138015747 + -4.2360001243650913e-003 + <_> + + <_> + + + + <_>7 18 9 4 -1. + <_>7 20 9 2 2. + 0 + 6.2299999408423901e-003 + -0.3363719880580902 + 0.1279059946537018 + <_> + + <_> + + + + <_>12 16 10 8 -1. + <_>17 16 5 4 2. + <_>12 20 5 4 2. + 0 + 0.0227700006216764 + -0.0347039997577667 + 0.3914180099964142 + <_> + + <_> + + + + <_>2 16 10 8 -1. + <_>2 16 5 4 2. + <_>7 20 5 4 2. + 0 + -0.0215349998325109 + 0.6476510167121887 + -0.2009779959917069 + <_> + + <_> + + + + <_>14 0 10 12 -1. + <_>19 0 5 6 2. + <_>14 6 5 6 2. + 0 + 0.0617589987814426 + 0.0542970001697540 + 0.9070010185241699 + <_> + + <_> + + + + <_>0 0 10 12 -1. + <_>0 0 5 6 2. + <_>5 6 5 6 2. + 0 + -0.0780699998140335 + 0.6552339792251587 + -0.1975439935922623 + <_> + + <_> + + + + <_>15 14 9 6 -1. + <_>15 16 9 2 3. + 0 + 0.0113150002434850 + 0.1938530057668686 + -0.5170729756355286 + <_> + + <_> + + + + <_>0 14 9 6 -1. + <_>0 16 9 2 3. + 0 + -0.0255900006741285 + -0.9309650063514710 + -0.0315469987690449 + <_> + + <_> + + + + <_>14 14 10 6 -1. + <_>14 16 10 2 3. + 0 + -0.0380589999258518 + -0.6832690238952637 + 0.1270910054445267 + <_> + + <_> + + + + <_>0 14 10 6 -1. + <_>0 16 10 2 3. + 0 + 9.7970003262162209e-003 + 0.0155239999294281 + -0.6334789991378784 + <_> + + <_> + + + + <_>5 18 18 2 -1. + <_>5 19 18 1 2. + 0 + -0.0138419996947050 + 1.0060529708862305 + 0.0628129988908768 + <_> + + <_> + + + + <_>0 18 18 3 -1. + <_>0 19 18 1 3. + 0 + 8.3459997549653053e-003 + -0.2338320016860962 + 0.3098269999027252 + <_> + + <_> + + + + <_>3 5 18 12 -1. + <_>12 5 9 6 2. + <_>3 11 9 6 2. + 0 + -0.0714399963617325 + -0.7250540256500244 + 0.1714829951524735 + <_> + + <_> + + + + <_>5 3 7 9 -1. + <_>5 6 7 3 3. + 0 + 0.0100060002878308 + -0.2207199931144714 + 0.3526619970798492 + <_> + + <_> + + + + <_>4 0 19 15 -1. + <_>4 5 19 5 3. + 0 + 0.1100530028343201 + 0.1666200011968613 + -0.7431899905204773 + <_> + + <_> + + + + <_>3 0 16 4 -1. + <_>3 2 16 2 2. + 0 + 0.0353109985589981 + -0.2398270070552826 + 0.4143599867820740 + <_> + + <_> + + + + <_>4 12 16 12 -1. + <_>4 12 8 12 2. + 0 + -0.1117469966411591 + 0.5104539990425110 + 2.2319999989122152e-003 + <_> + + <_> + + + + <_>4 3 12 15 -1. + <_>10 3 6 15 2. + 0 + -0.1136780008673668 + 0.9047520160675049 + -0.1661529988050461 + <_> + + <_> + + + + <_>16 4 2 19 -1. + <_>16 4 1 19 2. + 0 + 0.0166679993271828 + 0.1402450054883957 + -0.5217850208282471 + <_> + + <_> + + + + <_>6 4 2 19 -1. + <_>7 4 1 19 2. + 0 + -8.0340001732110977e-003 + -0.6617839932441711 + 3.7640000227838755e-003 + <_> + + <_> + + + + <_>13 14 8 10 -1. + <_>17 14 4 5 2. + <_>13 19 4 5 2. + 0 + -0.0330969989299774 + 0.8018590211868286 + 0.0593850016593933 + <_> + + <_> + + + + <_>3 14 8 10 -1. + <_>3 14 4 5 2. + <_>7 19 4 5 2. + 0 + 0.0125479996204376 + -0.3354550004005432 + 0.1457860022783279 + <_> + + <_> + + + + <_>12 6 3 18 -1. + <_>12 12 3 6 3. + 0 + -0.0420739986002445 + -0.5550910234451294 + 0.1326660066843033 + <_> + + <_> + + + + <_>5 11 12 6 -1. + <_>5 11 6 3 2. + <_>11 14 6 3 2. + 0 + 0.0252219997346401 + -0.0616319999098778 + -1.3678770065307617 + <_> + + <_> + + + + <_>10 5 8 10 -1. + <_>14 5 4 5 2. + <_>10 10 4 5 2. + 0 + -0.0242689996957779 + 0.3418509960174561 + -7.4160001240670681e-003 + <_> + + <_> + + + + <_>6 4 12 10 -1. + <_>6 4 6 5 2. + <_>12 9 6 5 2. + 0 + -0.0122800003737211 + 0.2774580121040344 + -0.3103390038013458 + <_> + + <_> + + + + <_>6 8 18 10 -1. + <_>15 8 9 5 2. + <_>6 13 9 5 2. + 0 + -0.1137709990143776 + 1.1719540357589722 + 0.0836810022592545 + <_> + + <_> + + + + <_>0 8 18 10 -1. + <_>0 8 9 5 2. + <_>9 13 9 5 2. + 0 + -0.0847719982266426 + 0.8169479966163635 + -0.1783750057220459 + <_> + + <_> + + + + <_>12 6 3 18 -1. + <_>12 12 3 6 3. + 0 + -0.0245520006865263 + -0.1862729936838150 + 0.1434009969234467 + <_> + + <_> + + + + <_>0 14 18 3 -1. + <_>0 15 18 1 3. + 0 + -9.0269995853304863e-003 + 0.3265919983386993 + -0.2354129999876022 + <_> + + <_> + + + + <_>12 6 3 18 -1. + <_>12 12 3 6 3. + 0 + 0.0111779998987913 + 0.1976120024919510 + -0.0217010006308556 + <_> + + <_> + + + + <_>9 6 3 18 -1. + <_>9 12 3 6 3. + 0 + -0.0293669998645782 + -0.9341480135917664 + -0.0217049997299910 + <_> + + <_> + + + + <_>6 14 18 3 -1. + <_>6 15 18 1 3. + 0 + 6.3640000298619270e-003 + 0.0255730003118515 + 0.4641279876232147 + <_> + + <_> + + + + <_>0 5 18 3 -1. + <_>0 6 18 1 3. + 0 + 0.0140260001644492 + -0.2122859954833984 + 0.4007880091667175 + <_> + + <_> + + + + <_>2 5 22 3 -1. + <_>2 6 22 1 3. + 0 + -0.0133419996127486 + 0.7420269846916199 + 0.0290019996464252 + <_> + + <_> + + + + <_>0 0 21 10 -1. + <_>7 0 7 10 3. + 0 + 0.2842279970645905 + -0.1924359947443008 + 0.4363119900226593 + <_> + + <_> + + + + <_>6 3 18 17 -1. + <_>12 3 6 17 3. + 0 + -0.2372400015592575 + 0.6973639726638794 + 0.0693079978227615 + <_> + + <_> + + + + <_>0 3 18 17 -1. + <_>6 3 6 17 3. + 0 + -0.1116970032453537 + 0.3914720118045807 + -0.2092200070619583 + <_> + + <_> + + + + <_>0 12 24 11 -1. + <_>8 12 8 11 3. + 0 + 0.1278750002384186 + -0.0725559964776039 + 0.3608820140361786 + <_> + + <_> + + + + <_>4 10 16 6 -1. + <_>4 13 16 3 2. + 0 + -0.0629009976983070 + 0.9542499780654907 + -0.1540279984474182 + <_> + + <_> + + + + <_>12 8 6 8 -1. + <_>12 12 6 4 2. + 0 + 0.0174390003085136 + -0.0511349998414516 + 0.2775030136108398 + <_> + + <_> + + + + <_>6 14 8 7 -1. + <_>10 14 4 7 2. + 0 + 1.2319999514147639e-003 + 0.0756279975175858 + -0.3645609915256500 + <_> + + <_> + + + + <_>15 10 6 14 -1. + <_>18 10 3 7 2. + <_>15 17 3 7 2. + 0 + 0.0274950005114079 + 0.0518440008163452 + 0.4156259894371033 + <_> + + <_> + + + + <_>3 10 6 14 -1. + <_>3 10 3 7 2. + <_>6 17 3 7 2. + 0 + -0.0435439981520176 + 0.7196999788284302 + -0.1713220030069351 + <_> + + <_> + + + + <_>6 12 18 2 -1. + <_>6 13 18 1 2. + 0 + 0.0110259996727109 + 0.1435460001230240 + -0.6540300250053406 + <_> + + <_> + + + + <_>5 8 10 6 -1. + <_>5 10 10 2 3. + 0 + 0.0208659991621971 + 0.0400890000164509 + -0.4574329853057861 + <_> + + <_> + + + + <_>12 11 9 4 -1. + <_>12 13 9 2 2. + 0 + -0.0223040003329515 + 0.5385500192642212 + 0.0716629996895790 + <_> + + <_> + + + + <_>0 11 9 6 -1. + <_>0 13 9 2 3. + 0 + 0.0324920006096363 + -0.0459919981658459 + -1.0047069787979126 + <_> + + <_> + + + + <_>11 2 3 18 -1. + <_>12 2 1 18 3. + 0 + 0.0122699998319149 + 0.0343349985778332 + 0.4243179857730866 + <_> + + <_> + + + + <_>10 2 3 18 -1. + <_>11 2 1 18 3. + 0 + 8.3820000290870667e-003 + -0.2585060000419617 + 0.2626349925994873 + <_> + + <_> + + + + <_>9 12 6 10 -1. + <_>11 12 2 10 3. + 0 + 0.0373539999127388 + 0.1569249927997589 + -1.0429090261459351 + <_> + + <_> + + + + <_>1 10 6 9 -1. + <_>1 13 6 3 3. + 0 + -0.0141110001131892 + -0.7317770123481751 + -0.0202769991010427 + <_> + + <_> + + + + <_>6 9 16 6 -1. + <_>14 9 8 3 2. + <_>6 12 8 3 2. + 0 + 0.0570669993758202 + 0.0833600014448166 + 1.5661499500274658 + <_> + + <_> + + + + <_>1 8 9 6 -1. + <_>1 10 9 2 3. + 0 + 4.9680001102387905e-003 + -0.3531819880008698 + 0.1469839960336685 + <_> + + <_> + + + + <_>7 7 16 6 -1. + <_>7 9 16 2 3. + 0 + -0.0244929995387793 + 0.2832590043544769 + -3.4640000667423010e-003 + <_> + + <_> + + + + <_>0 0 18 3 -1. + <_>0 1 18 1 3. + 0 + -0.0112549997866154 + -0.8401749730110169 + -0.0362519994378090 + <_> + + <_> + + + + <_>10 0 6 9 -1. + <_>12 0 2 9 3. + 0 + 0.0345330014824867 + 0.1499850004911423 + -0.8736709952354431 + <_> + + <_> + + + + <_>9 5 6 6 -1. + <_>12 5 3 6 2. + 0 + 0.0243030004203320 + -0.1878750026226044 + 0.5948399901390076 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>12 6 2 9 2. + <_>10 15 2 9 2. + 0 + -7.8790001571178436e-003 + 0.4431569874286652 + -0.0565709993243217 + <_> + + <_> + + + + <_>8 0 6 9 -1. + <_>10 0 2 9 3. + 0 + 0.0351420007646084 + -0.0564949996769428 + -1.3617190122604370 + <_> + + <_> + + + + <_>9 1 6 9 -1. + <_>9 4 6 3 3. + 0 + 4.6259998343884945e-003 + -0.3116169869899750 + 0.2544769942760468 + <_> + + <_> + + + + <_>1 0 18 9 -1. + <_>1 3 18 3 3. + 0 + -0.0831310003995895 + 1.6424349546432495 + -0.1442939937114716 + <_> + + <_> + + + + <_>0 3 24 3 -1. + <_>0 4 24 1 3. + 0 + -0.0140159996226430 + -0.7781950235366821 + 0.1717330068349838 + <_> + + <_> + + + + <_>6 14 9 4 -1. + <_>6 16 9 2 2. + 0 + 1.2450000504031777e-003 + -0.2319139987230301 + 0.2852790057659149 + <_> + + <_> + + + + <_>8 9 8 10 -1. + <_>12 9 4 5 2. + <_>8 14 4 5 2. + 0 + -0.0168030001223087 + -0.3596509993076325 + 0.2041299939155579 + <_> + + <_> + + + + <_>5 2 13 9 -1. + <_>5 5 13 3 3. + 0 + -0.0767479985952377 + 0.7805050015449524 + -0.1561280041933060 + <_> + + <_> + + + + <_>4 4 16 9 -1. + <_>4 7 16 3 3. + 0 + -0.2367199957370758 + 1.1813700199127197 + 0.0781119987368584 + <_> + + <_> + + + + <_>4 4 14 9 -1. + <_>4 7 14 3 3. + 0 + -0.1005740016698837 + -0.4710409939289093 + 0.0791729986667633 + <_> + + <_> + + + + <_>8 5 9 6 -1. + <_>8 7 9 2 3. + 0 + 1.3239999534562230e-003 + 0.2226269990205765 + -0.3709979951381683 + <_> + + <_> + + + + <_>1 7 16 6 -1. + <_>1 9 16 2 3. + 0 + 0.0221529994159937 + -0.0386490002274513 + -0.9227499961853027 + <_> + + <_> + + + + <_>10 5 13 9 -1. + <_>10 8 13 3 3. + 0 + -0.1124619990587235 + 0.4189960062503815 + 0.0804110020399094 + <_> + + <_> + + + + <_>1 5 13 9 -1. + <_>1 8 13 3 3. + 0 + 0.0164810009300709 + -0.1675669997930527 + 0.7184240221977234 + <_> + + <_> + + + + <_>0 4 24 6 -1. + <_>12 4 12 3 2. + <_>0 7 12 3 2. + 0 + 0.0681139975786209 + 0.1571989953517914 + -0.8768110275268555 + <_> + + <_> + + + + <_>1 14 10 9 -1. + <_>1 17 10 3 3. + 0 + 0.0160119999200106 + -4.1600000113248825e-003 + -0.5932779908180237 + <_> + + <_> + + + + <_>5 17 18 3 -1. + <_>5 18 18 1 3. + 0 + 4.6640001237392426e-003 + -0.0301539991050959 + 0.4834530055522919 + <_> + + <_> + + + + <_>0 16 18 3 -1. + <_>0 17 18 1 3. + 0 + 6.7579997703433037e-003 + -0.2266740053892136 + 0.3366230130195618 + <_> + + <_> + + + + <_>9 17 9 6 -1. + <_>9 19 9 2 3. + 0 + 4.7289999201893806e-003 + -0.0603739991784096 + 0.3145810067653656 + <_> + + <_> + + + + <_>1 20 22 4 -1. + <_>1 20 11 2 2. + <_>12 22 11 2 2. + 0 + 2.5869999080896378e-003 + -0.2987259924411774 + 0.1778749972581863 + <_> + + <_> + + + + <_>8 14 8 6 -1. + <_>8 17 8 3 2. + 0 + 2.8989999555051327e-003 + 0.2189020067453384 + -0.2956709861755371 + <_> + + <_> + + + + <_>8 6 8 15 -1. + <_>8 11 8 5 3. + 0 + -0.0300539992749691 + 1.2150429487228394 + -0.1435499936342239 + <_> + + <_> + + + + <_>5 4 18 3 -1. + <_>5 5 18 1 3. + 0 + 0.0141810001805425 + 0.0124519998207688 + 0.5549010038375855 + <_> + + <_> + + + + <_>9 3 5 10 -1. + <_>9 8 5 5 2. + 0 + -0.0605270005762577 + -1.4933999776840210 + -0.0652270019054413 + <_> + + <_> + + + + <_>6 8 12 3 -1. + <_>6 8 6 3 2. + 0 + -0.0198829993605614 + -0.3852640092372894 + 0.1976120024919510 + <_> + + <_> + + + + <_>2 6 18 6 -1. + <_>2 6 9 3 2. + <_>11 9 9 3 2. + 0 + 0.0312189999967813 + -0.2128120064735413 + 0.2944650053977966 + <_> + + <_> + + + + <_>10 6 4 18 -1. + <_>12 6 2 9 2. + <_>10 15 2 9 2. + 0 + 0.0182719994336367 + 9.7200000891461968e-004 + 0.6681420207023621 + <_> + + <_> + + + + <_>7 5 6 6 -1. + <_>10 5 3 6 2. + 0 + 1.1089999461546540e-003 + -0.6246790289878845 + -1.6599999507889152e-003 + <_> + + <_> + + + + <_>14 5 2 18 -1. + <_>14 14 2 9 2. + 0 + -0.0367139987647533 + -0.4233390092849731 + 0.1208470016717911 + <_> + + <_> + + + + <_>8 5 2 18 -1. + <_>8 14 2 9 2. + 0 + 0.0120440004393458 + 0.0258820001035929 + -0.5073239803314209 + <_> + + <_> + + + + <_>9 2 10 6 -1. + <_>9 2 5 6 2. + 0 + 0.0747490003705025 + 0.1318469941616058 + -0.2173960059881210 + <_> + + <_> + + + + <_>3 1 18 12 -1. + <_>12 1 9 12 2. + 0 + -0.2347320020198822 + 1.1775610446929932 + -0.1511469930410385 + <_> + + <_> + + + + <_>5 2 17 22 -1. + <_>5 13 17 11 2. + 0 + 0.1409649997949600 + 0.0339910015463829 + 0.3992309868335724 + <_> + + <_> + + + + <_>4 0 12 6 -1. + <_>4 2 12 2 3. + 0 + 6.1789997853338718e-003 + -0.3180670142173767 + 0.1168169975280762 + <_> + + <_> + + + + <_>6 9 16 6 -1. + <_>14 9 8 3 2. + <_>6 12 8 3 2. + 0 + -0.0572169981896877 + 0.8439909815788269 + 0.0838890001177788 + <_> + + <_> + + + + <_>9 0 5 18 -1. + <_>9 9 5 9 2. + 0 + -0.0552270002663136 + 0.3688830137252808 + -0.1891340017318726 + <_> + + <_> + + + + <_>12 0 6 9 -1. + <_>14 0 2 9 3. + 0 + -0.0215830001980066 + -0.5216180086135864 + 0.1577260047197342 + <_> + + <_> + + + + <_>6 0 6 9 -1. + <_>8 0 2 9 3. + 0 + 0.0257479995489120 + -0.0599219985306263 + -1.0674990415573120 + <_> + + <_> + + + + <_>9 1 6 12 -1. + <_>11 1 2 12 3. + 0 + -0.0130989998579025 + 0.7895839810371399 + 0.0520999990403652 + <_> + + <_> + + + + <_>5 9 13 4 -1. + <_>5 11 13 2 2. + 0 + 2.2799998987466097e-003 + -1.1704430580139160 + -0.0593569986522198 + <_> + + <_> + + + + <_>5 8 19 3 -1. + <_>5 9 19 1 3. + 0 + 8.8060004636645317e-003 + 0.0417179986834526 + 0.6635259985923767 + <_> + + <_> + + + + <_>9 9 6 8 -1. + <_>9 13 6 4 2. + 0 + -8.9699998497962952e-003 + -0.3586269915103912 + 0.0604580007493496 + <_> + + <_> + + + + <_>11 9 4 15 -1. + <_>11 14 4 5 3. + 0 + 4.0230001322925091e-003 + 0.2097939997911453 + -0.2480600029230118 + <_> + + <_> + + + + <_>2 0 6 14 -1. + <_>2 0 3 7 2. + <_>5 7 3 7 2. + 0 + 0.0250170007348061 + -0.1879590004682541 + 0.3954710066318512 + <_> + + <_> + + + + <_>15 1 6 14 -1. + <_>18 1 3 7 2. + <_>15 8 3 7 2. + 0 + -5.9009999968111515e-003 + 0.2566390037536621 + -0.0949190035462379 + <_> + + <_> + + + + <_>3 1 6 14 -1. + <_>3 1 3 7 2. + <_>6 8 3 7 2. + 0 + 4.3850000947713852e-003 + 0.0331390015780926 + -0.4607540071010590 + <_> + + <_> + + + + <_>3 20 18 4 -1. + <_>12 20 9 2 2. + <_>3 22 9 2 2. + 0 + -0.0337719991803169 + -0.9888160228729248 + 0.1463689953088760 + <_> + + <_> + + + + <_>5 0 4 20 -1. + <_>5 0 2 10 2. + <_>7 10 2 10 2. + 0 + 0.0445230007171631 + -0.1328669935464859 + 1.5796790122985840 + <_> + + <_> + + + + <_>16 8 8 12 -1. + <_>20 8 4 6 2. + <_>16 14 4 6 2. + 0 + -0.0409290008246899 + 0.3387709856033325 + 0.0749709978699684 + <_> + + <_> + + + + <_>0 8 8 12 -1. + <_>0 8 4 6 2. + <_>4 14 4 6 2. + 0 + 0.0393519997596741 + -0.1832789927721024 + 0.4698069989681244 + <_> + + <_> + + + + <_>13 13 10 8 -1. + <_>18 13 5 4 2. + <_>13 17 5 4 2. + 0 + -0.0703229978680611 + -0.9832270145416260 + 0.1180810034275055 + <_> + + <_> + + + + <_>1 13 10 8 -1. + <_>1 13 5 4 2. + <_>6 17 5 4 2. + 0 + 0.0357430018484592 + -0.0330509990453720 + -0.8361089825630188 + <_> + + <_> + + + + <_>15 8 4 15 -1. + <_>15 13 4 5 3. + 0 + -0.0429619997739792 + 1.1670809984207153 + 0.0806920006871223 + <_> + + <_> + + + + <_>5 8 4 15 -1. + <_>5 13 4 5 3. + 0 + -0.0210079997777939 + 0.6386979818344116 + -0.1762630045413971 + <_> + + <_> + + + + <_>6 11 16 12 -1. + <_>6 15 16 4 3. + 0 + -0.1574220061302185 + -0.2330249994993210 + 0.1251749992370606 + <_> + + <_> + + + + <_>2 11 16 12 -1. + <_>2 15 16 4 3. + 0 + 7.8659998252987862e-003 + -0.2203799933195114 + 0.2719680070877075 + <_> + + <_> + + + + <_>14 12 7 9 -1. + <_>14 15 7 3 3. + 0 + 0.0236220005899668 + 0.1612730026245117 + -0.4332900047302246 + <_> + + <_> + + + + <_>10 1 3 21 -1. + <_>10 8 3 7 3. + 0 + 0.0746920034289360 + -0.1699199974536896 + 0.5888490080833435 + <_> + + <_> + + + + <_>13 11 9 4 -1. + <_>13 13 9 2 2. + 0 + -6.4799998654052615e-004 + 0.2584289908409119 + -0.0359119996428490 + <_> + + <_> + + + + <_>3 10 17 9 -1. + <_>3 13 17 3 3. + 0 + -0.0162909999489784 + -0.7676439881324768 + -0.0204729996621609 + <_> + + <_> + + + + <_>13 8 8 15 -1. + <_>13 13 8 5 3. + 0 + -0.0331339985132217 + -0.2718009948730469 + 0.1432570070028305 + <_> + + <_> + + + + <_>3 8 8 15 -1. + <_>3 13 8 5 3. + 0 + 0.0487979985773563 + 0.0764089971780777 + -0.4144519865512848 + <_> + + <_> + + + + <_>11 14 10 8 -1. + <_>16 14 5 4 2. + <_>11 18 5 4 2. + 0 + 2.2869999520480633e-003 + -0.0386289991438389 + 0.2075379937887192 + <_> + + <_> + + + + <_>0 18 22 6 -1. + <_>0 18 11 3 2. + <_>11 21 11 3 2. + 0 + 0.0453040003776550 + -0.1777790039777756 + 0.6346139907836914 + <_> + + <_> + + + + <_>0 16 24 4 -1. + <_>0 16 12 4 2. + 0 + 0.1070580035448074 + 0.1897229999303818 + -0.5123620033264160 + <_> + + <_> + + + + <_>6 20 12 3 -1. + <_>12 20 6 3 2. + 0 + -0.0405250005424023 + 0.7061499953269959 + -0.1780329942703247 + <_> + + <_> + + + + <_>18 12 6 12 -1. + <_>21 12 3 6 2. + <_>18 18 3 6 2. + 0 + 0.0319689996540546 + 0.0681499987840652 + 0.6873310208320618 + <_> + + <_> + + + + <_>0 12 6 12 -1. + <_>0 12 3 6 2. + <_>3 18 3 6 2. + 0 + -0.0576170012354851 + 0.7517049908638001 + -0.1576499938964844 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + 0.0135939996689558 + 0.1941190063953400 + -0.2456189990043640 + <_> + + <_> + + + + <_>1 6 22 10 -1. + <_>1 6 11 5 2. + <_>12 11 11 5 2. + 0 + 0.0713960006833076 + -0.0468810014426708 + -0.8819829821586609 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + -0.0148959998041391 + -0.4453240036964417 + 0.1767989993095398 + <_> + + <_> + + + + <_>0 18 18 2 -1. + <_>0 19 18 1 2. + 0 + -0.0100260004401207 + 0.6512269973754883 + -0.1670999974012375 + <_> + + <_> + + + + <_>3 15 19 3 -1. + <_>3 16 19 1 3. + 0 + 3.7589999847114086e-003 + -0.0583010017871857 + 0.3448329865932465 + <_> + + <_> + + + + <_>0 13 18 3 -1. + <_>0 14 18 1 3. + 0 + 0.0162630006670952 + -0.1558150053024292 + 0.8643270134925842 + <_> + + <_> + + + + <_>15 17 9 6 -1. + <_>15 19 9 2 3. + 0 + -0.0401760004460812 + -0.6102859973907471 + 0.1179639995098114 + <_> + + <_> + + + + <_>0 17 9 6 -1. + <_>0 19 9 2 3. + 0 + 0.0270809996873140 + -0.0496019981801510 + -0.8999000191688538 + <_> + + <_> + + + + <_>12 17 9 6 -1. + <_>12 19 9 2 3. + 0 + 0.0524200014770031 + 0.1129719987511635 + -1.0833640098571777 + <_> + + <_> + + + + <_>3 17 9 6 -1. + <_>3 19 9 2 3. + 0 + -0.0191600006073713 + -0.7988010048866272 + -0.0340790003538132 + <_> + + <_> + + + + <_>16 2 3 20 -1. + <_>17 2 1 20 3. + 0 + -3.7730000913143158e-003 + -0.1912409961223602 + 0.2153519988059998 + <_> + + <_> + + + + <_>0 13 24 8 -1. + <_>0 17 24 4 2. + 0 + 0.0757620036602020 + -0.1342169940471649 + 1.6807060241699219 + <_> + + <_> + + + + <_>9 1 6 22 -1. + <_>12 1 3 11 2. + <_>9 12 3 11 2. + 0 + -0.0221730004996061 + 0.4860099852085114 + 3.6160000599920750e-003 + -2.9928278923034668 + 23 + -1 + diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_profileface.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_profileface.xml new file mode 100644 index 0000000000000000000000000000000000000000..486d8e3d83075578c446be96b75a2414efe75ecb --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_profileface.xml @@ -0,0 +1,29690 @@ + + + +BOOST + HAAR + 20 + 20 + + 195 + + 0 + 26 + + <_> + 3 + -1.1856809854507446e+00 + + <_> + + 0 -1 0 1.1384399840608239e-03 + + -8.3771979808807373e-01 7.3413830995559692e-01 + <_> + + 0 -1 1 -1.1342350393533707e-02 + + 6.2702018022537231e-01 -7.2396302223205566e-01 + <_> + + 0 -1 2 -1.1023089755326509e-03 + + 3.7600189447402954e-01 -6.6088408231735229e-01 + <_> + 12 + -1.4913179874420166e+00 + + <_> + + 0 -1 3 -1.9553869962692261e-02 + + 4.9245831370353699e-01 -6.3396167755126953e-01 + <_> + + 0 -1 4 2.2794529795646667e-03 + + -6.4604967832565308e-01 3.5818460583686829e-01 + <_> + + 0 -1 5 2.4270440917462111e-03 + + -4.7253230214118958e-01 2.8494310379028320e-01 + <_> + + 0 -1 6 1.9644061103463173e-03 + + 1.6999539732933044e-01 -7.7868157625198364e-01 + <_> + + 0 -1 7 2.2895270958542824e-03 + + 1.5551710128784180e-01 -6.6725099086761475e-01 + <_> + + 0 -1 8 -3.0143910553306341e-03 + + -6.8721300363540649e-01 1.4604569971561432e-01 + <_> + + 0 -1 9 -1.7399009317159653e-02 + + 7.2524380683898926e-01 -1.6572900116443634e-01 + <_> + + 0 -1 10 9.0722442837432027e-04 + + -4.6388080716133118e-01 2.3604999482631683e-01 + <_> + + 0 -1 11 -1.5043979510664940e-03 + + -7.5959628820419312e-01 1.1436919867992401e-01 + <_> + + 0 -1 12 1.0804689675569534e-01 + + -1.2865519523620605e-01 7.9092341661453247e-01 + <_> + + 0 -1 13 -1.1923050042241812e-03 + + -6.2403547763824463e-01 1.4847490191459656e-01 + <_> + + 0 -1 14 -2.0571390166878700e-02 + + 4.0808489918708801e-01 -2.1287000179290771e-01 + <_> + 27 + -1.9596290588378906e+00 + + <_> + + 0 -1 15 -3.6899209022521973e-02 + + 5.3308618068695068e-01 -4.0872651338577271e-01 + <_> + + 0 -1 16 2.4960909504443407e-03 + + -6.9489312171936035e-01 2.7125179767608643e-01 + <_> + + 0 -1 17 2.4068039783742279e-04 + + -5.6208252906799316e-01 2.1930350363254547e-01 + <_> + + 0 -1 18 -5.8021828532218933e-02 + + 6.9060617685317993e-01 -1.5082140266895294e-01 + <_> + + 0 -1 19 1.1526979506015778e-03 + + 1.3925389945507050e-01 -6.6311657428741455e-01 + <_> + + 0 -1 20 7.4388440698385239e-03 + + -3.3333170413970947e-01 3.1699380278587341e-01 + <_> + + 0 -1 21 -1.4158539706841111e-03 + + -6.8007302284240723e-01 1.3243320584297180e-01 + <_> + + 0 -1 22 8.8562711607664824e-04 + + -3.8672161102294922e-01 1.9732959568500519e-01 + <_> + + 0 -1 23 2.5714060757309198e-03 + + 1.2035659700632095e-01 -7.3177069425582886e-01 + <_> + + 0 -1 24 1.8255549948662519e-03 + + 7.7979840338230133e-02 -7.7196091413497925e-01 + <_> + + 0 -1 25 -1.1993020307272673e-03 + + 1.6821229457855225e-01 -4.1479128599166870e-01 + <_> + + 0 -1 26 2.3179080337285995e-02 + + 7.5337320566177368e-02 -7.1047067642211914e-01 + <_> + + 0 -1 27 4.6539418399333954e-02 + + -1.0464839637279510e-01 6.6270697116851807e-01 + <_> + + 0 -1 28 -1.7157640540972352e-03 + + -4.9618211388587952e-01 1.6275240480899811e-01 + <_> + + 0 -1 29 -1.2778829783201218e-02 + + 4.6254539489746094e-01 -1.6027900576591492e-01 + <_> + + 0 -1 30 -1.5214820206165314e-01 + + -7.0592701435089111e-01 1.0022509843111038e-01 + <_> + + 0 -1 31 3.1789899803698063e-03 + + 1.2345749884843826e-01 -3.9093419909477234e-01 + <_> + + 0 -1 32 -2.2882770281285048e-03 + + 3.7081500887870789e-01 -1.6210420429706573e-01 + <_> + + 0 -1 33 -2.9806189704686403e-03 + + 1.8087059259414673e-01 -3.3239859342575073e-01 + <_> + + 0 -1 34 -1.5072739915922284e-03 + + -4.9472311139106750e-01 9.8288856446743011e-02 + <_> + + 0 -1 35 1.9225040450692177e-03 + + -1.7791110277175903e-01 3.0773329734802246e-01 + <_> + + 0 -1 36 1.9025449873879552e-03 + + 8.4794998168945312e-02 -5.9020972251892090e-01 + <_> + + 0 -1 37 -3.5421559587121010e-03 + + 3.1175771355628967e-01 -1.4392930269241333e-01 + <_> + + 0 -1 38 -2.9751660767942667e-03 + + -6.3649141788482666e-01 8.2639887928962708e-02 + <_> + + 0 -1 39 1.0003290139138699e-02 + + -1.1699260026216507e-01 4.2387530207633972e-01 + <_> + + 0 -1 40 -1.9193530315533280e-03 + + -4.7115838527679443e-01 1.1038240045309067e-01 + <_> + + 0 -1 41 2.5070620700716972e-02 + + 4.8775929957628250e-02 -8.0351328849792480e-01 + <_> + 28 + -1.9849590063095093e+00 + + <_> + + 0 -1 42 1.4214799739420414e-02 + + -6.3577878475189209e-01 3.3461728692054749e-01 + <_> + + 0 -1 43 -1.2525909580290318e-02 + + 3.2766130566596985e-01 -4.1331529617309570e-01 + <_> + + 0 -1 44 -2.2514370357384905e-05 + + 2.3102630674839020e-01 -5.4282051324844360e-01 + <_> + + 0 -1 45 1.8600060138851404e-03 + + 1.7933349311351776e-01 -6.9131940603256226e-01 + <_> + + 0 -1 46 7.8344792127609253e-03 + + 9.1071300208568573e-02 -7.8126847743988037e-01 + <_> + + 0 -1 47 -4.2322301305830479e-03 + + 2.0658409595489502e-01 -4.2906031012535095e-01 + <_> + + 0 -1 48 -7.5860600918531418e-04 + + 2.0730710029602051e-01 -4.2070311307907104e-01 + <_> + + 0 -1 49 -3.5626380704343319e-03 + + -6.3227087259292603e-01 1.3118620216846466e-01 + <_> + + 0 -1 50 -4.9960161559283733e-03 + + -7.5112378597259521e-01 7.8203327953815460e-02 + <_> + + 0 -1 51 7.3098740540444851e-03 + + 9.3428641557693481e-02 -6.6310107707977295e-01 + <_> + + 0 -1 52 2.2772040392737836e-04 + + -3.4148821234703064e-01 2.0008200407028198e-01 + <_> + + 0 -1 53 8.3124160300940275e-04 + + -2.5448161363601685e-01 2.5857710838317871e-01 + <_> + + 0 -1 54 -7.5492179021239281e-03 + + -6.6138988733291626e-01 8.3004422485828400e-02 + <_> + + 0 -1 55 -3.8039948791265488e-02 + + -8.2163572311401367e-01 5.9231590479612350e-02 + <_> + + 0 -1 56 2.8484580107033253e-03 + + 8.9729957282543182e-02 -5.8333742618560791e-01 + <_> + + 0 -1 57 4.8181698657572269e-03 + + 9.3960560858249664e-02 -5.7619768381118774e-01 + <_> + + 0 -1 58 -1.1190489865839481e-02 + + -6.2544298171997070e-01 7.3608897626399994e-02 + <_> + + 0 -1 59 -6.4537129364907742e-03 + + 5.5123388767242432e-01 -1.0020790249109268e-01 + <_> + + 0 -1 60 3.3225629013031721e-03 + + -1.0797890275716782e-01 5.3664940595626831e-01 + <_> + + 0 -1 61 4.6705761924386024e-03 + + 8.8321126997470856e-02 -6.7683601379394531e-01 + <_> + + 0 -1 62 -1.1613310314714909e-02 + + -5.0711882114410400e-01 7.6556630432605743e-02 + <_> + + 0 -1 63 -3.7515610456466675e-02 + + -7.2936272621154785e-01 5.9448610991239548e-02 + <_> + + 0 -1 64 2.3086030036211014e-02 + + 5.0718959420919418e-02 -7.8459781408309937e-01 + <_> + + 0 -1 65 -7.1651988946541678e-06 + + 1.6686220467090607e-01 -2.5713220238685608e-01 + <_> + + 0 -1 66 7.1611627936363220e-04 + + 1.0636030137538910e-01 -4.2793640494346619e-01 + <_> + + 0 -1 67 4.1476460173726082e-03 + + -1.2069659680128098e-01 4.1993188858032227e-01 + <_> + + 0 -1 68 -2.5815099943429232e-03 + + 4.8718088865280151e-01 -1.0045810043811798e-01 + <_> + + 0 -1 69 -1.7147070029750466e-03 + + -4.6096310019493103e-01 1.0375110059976578e-01 + <_> + 28 + -1.8260079622268677e+00 + + <_> + + 0 -1 70 -6.1202719807624817e-02 + + 3.9079108834266663e-01 -3.9401251077651978e-01 + <_> + + 0 -1 71 -1.4643670292571187e-03 + + -7.3697841167449951e-01 1.5660220384597778e-01 + <_> + + 0 -1 72 7.2080420795828104e-04 + + 2.1675530076026917e-01 -5.8012658357620239e-01 + <_> + + 0 -1 73 6.4895692048594356e-04 + + -7.2308099269866943e-01 1.2785249948501587e-01 + <_> + + 0 -1 74 -1.7158190021291375e-03 + + -7.7100431919097900e-01 1.0210309922695160e-01 + <_> + + 0 -1 75 -2.2490581031888723e-03 + + -6.0623127222061157e-01 1.2427269667387009e-01 + <_> + + 0 -1 76 5.3841978311538696e-02 + + -1.7169749736785889e-01 5.3350567817687988e-01 + <_> + + 0 -1 77 -1.3288970291614532e-01 + + 5.5924367904663086e-01 -1.8954899907112122e-01 + <_> + + 0 -1 78 9.0965389972552657e-04 + + -4.7166430950164795e-01 1.6924260556697845e-01 + <_> + + 0 -1 79 6.0799147468060255e-04 + + 1.1347220093011856e-01 -5.9846878051757812e-01 + <_> + + 0 -1 80 1.6072629392147064e-01 + + -1.0295519977807999e-01 6.6487199068069458e-01 + <_> + + 0 -1 81 -1.7097239615395665e-03 + + -4.7276279330253601e-01 1.3392050564289093e-01 + <_> + + 0 -1 82 1.1734620202332735e-03 + + -2.2795589268207550e-01 2.6135650277137756e-01 + <_> + + 0 -1 83 -1.5138329472392797e-03 + + -5.5395001173019409e-01 1.1028339713811874e-01 + <_> + + 0 -1 84 -2.1774161141365767e-03 + + -6.2228900194168091e-01 7.8486673533916473e-02 + <_> + + 0 -1 85 -2.7727920096367598e-03 + + 4.6141120791435242e-01 -1.3496559858322144e-01 + <_> + + 0 -1 86 9.3199027469381690e-04 + + 1.0162770003080368e-01 -5.1631838083267212e-01 + <_> + + 0 -1 87 2.9746659565716982e-03 + + -1.2999209761619568e-01 4.2117300629615784e-01 + <_> + + 0 -1 88 -5.0399480387568474e-03 + + -6.3706171512603760e-01 7.7624127268791199e-02 + <_> + + 0 -1 89 2.3414850234985352e-02 + + 7.2182796895503998e-02 -5.9831130504608154e-01 + <_> + + 0 -1 90 -1.0927390540018678e-03 + + -4.1664880514144897e-01 1.1829990148544312e-01 + <_> + + 0 -1 91 -1.6441360348835588e-03 + + 1.8583069741725922e-01 -2.7551019191741943e-01 + <_> + + 0 -1 92 -2.5736279785633087e-02 + + -7.5146478414535522e-01 6.3907749950885773e-02 + <_> + + 0 -1 93 -2.8924590442329645e-03 + + -5.6780880689620972e-01 7.3297739028930664e-02 + <_> + + 0 -1 94 -5.2889231592416763e-03 + + -6.3738888502120972e-01 6.8686947226524353e-02 + <_> + + 0 -1 95 3.2964269630610943e-03 + + -2.5062951445579529e-01 1.5989780426025391e-01 + <_> + + 0 -1 96 2.4914439767599106e-02 + + 5.5260978639125824e-02 -7.6208770275115967e-01 + <_> + + 0 -1 97 -1.5088500455021858e-02 + + 3.7033379077911377e-01 -1.2003959715366364e-01 + <_> + 53 + -1.9446740150451660e+00 + + <_> + + 0 -1 98 -1.1857179924845695e-02 + + 2.9421558976173401e-01 -5.1703310012817383e-01 + <_> + + 0 -1 99 2.0991980563849211e-03 + + -6.1471748352050781e-01 2.0648500323295593e-01 + <_> + + 0 -1 100 -1.5772449842188507e-04 + + 2.2870740294456482e-01 -5.5258047580718994e-01 + <_> + + 0 -1 101 -2.0669099467340857e-04 + + 1.2070009857416153e-01 -5.4926127195358276e-01 + <_> + + 0 -1 102 2.2675560321658850e-03 + + 1.5354810655117035e-01 -4.6074301004409790e-01 + <_> + + 0 -1 103 1.4469499699771404e-02 + + -1.8976309895515442e-01 4.2071411013603210e-01 + <_> + + 0 -1 104 -1.2127560330554843e-03 + + -4.5139861106872559e-01 9.9425867199897766e-02 + <_> + + 0 -1 105 2.1505509503185749e-03 + + 1.0200879722833633e-01 -6.2064242362976074e-01 + <_> + + 0 -1 106 -1.6638869419693947e-03 + + -7.0367491245269775e-01 7.7214680612087250e-02 + <_> + + 0 -1 107 1.0530210565775633e-03 + + -3.2453960180282593e-01 1.7616109549999237e-01 + <_> + + 0 -1 108 1.1836409568786621e-02 + + -1.3507820665836334e-01 4.2641130089759827e-01 + <_> + + 0 -1 109 9.6512871095910668e-04 + + 9.4502769410610199e-02 -4.8544931411743164e-01 + <_> + + 0 -1 110 7.5651629595085979e-04 + + -2.9959529638290405e-01 1.6867619752883911e-01 + <_> + + 0 -1 111 1.0839150287210941e-02 + + -1.1121030151844025e-01 4.6914410591125488e-01 + <_> + + 0 -1 112 -5.1439419388771057e-02 + + 4.1726920008659363e-01 -1.1776400357484818e-01 + <_> + + 0 -1 113 3.4927250817418098e-03 + + 9.2512279748916626e-02 -5.2599352598190308e-01 + <_> + + 0 -1 114 -1.3926399871706963e-02 + + -6.6633498668670654e-01 5.2386458963155746e-02 + <_> + + 0 -1 115 4.5590959489345551e-03 + + -9.3383841216564178e-02 4.3774750828742981e-01 + <_> + + 0 -1 116 -3.7318699061870575e-02 + + -5.9583687782287598e-01 7.2627849876880646e-02 + <_> + + 0 -1 117 1.2496879789978266e-03 + + 6.9537237286567688e-02 -4.8772460222244263e-01 + <_> + + 0 -1 118 -3.7307639140635729e-03 + + 3.2699251174926758e-01 -1.1739090085029602e-01 + <_> + + 0 -1 119 2.1144179627299309e-03 + + 9.2889092862606049e-02 -4.1788020730018616e-01 + <_> + + 0 -1 120 -6.4239342464134097e-04 + + -2.9332190752029419e-01 1.3107809424400330e-01 + <_> + + 0 -1 121 -3.1379980500787497e-03 + + 3.2445520162582397e-01 -1.1506850272417068e-01 + <_> + + 0 -1 122 -3.9186969399452209e-02 + + -7.9360449314117432e-01 5.0053481012582779e-02 + <_> + + 0 -1 123 4.4646807946264744e-03 + + 5.4776020348072052e-02 -5.6535738706588745e-01 + <_> + + 0 -1 124 8.6451368406414986e-04 + + -1.7471200227737427e-01 1.9758160412311554e-01 + <_> + + 0 -1 125 2.4237011093646288e-03 + + -9.5296189188957214e-02 4.0760260820388794e-01 + <_> + + 0 -1 126 -2.5377490092068911e-03 + + -6.2454742193222046e-01 6.9920547306537628e-02 + <_> + + 0 -1 127 -7.3309220169903710e-06 + + 1.2249249964952469e-01 -2.8157269954681396e-01 + <_> + + 0 -1 128 -1.8882560543715954e-03 + + -6.2670397758483887e-01 6.5820932388305664e-02 + <_> + + 0 -1 129 6.0609861975535750e-04 + + -2.5481408834457397e-01 1.2902240455150604e-01 + <_> + + 0 -1 130 2.3213759995996952e-03 + + -9.7430117428302765e-02 3.2456091046333313e-01 + <_> + + 0 -1 131 -1.8534410046413541e-03 + + -4.4065341353416443e-01 8.2968853414058685e-02 + <_> + + 0 -1 132 2.3999500554054976e-03 + + -1.2041269987821579e-01 2.8288060426712036e-01 + <_> + + 0 -1 133 -8.1356197595596313e-02 + + -7.3972231149673462e-01 4.6568300575017929e-02 + <_> + + 0 -1 134 -2.9865680262446404e-03 + + 1.6334620118141174e-01 -1.9834910333156586e-01 + <_> + + 0 -1 135 2.8128880076110363e-03 + + 1.1837379634380341e-01 -2.9398199915885925e-01 + <_> + + 0 -1 136 -1.0060790181159973e-01 + + -7.3717647790908813e-01 4.2510021477937698e-02 + <_> + + 0 -1 137 1.1854549666168168e-04 + + 1.0471060127019882e-01 -2.9139861464500427e-01 + <_> + + 0 -1 138 2.2375308908522129e-03 + + -9.6042059361934662e-02 3.4045928716659546e-01 + <_> + + 0 -1 139 -4.4986992143094540e-03 + + -5.8234661817550659e-01 5.6236840784549713e-02 + <_> + + 0 -1 140 -3.6484538577497005e-04 + + -2.7956131100654602e-01 1.0113990306854248e-01 + <_> + + 0 -1 141 -7.9940296709537506e-03 + + 2.7775949239730835e-01 -1.1941230297088623e-01 + <_> + + 0 -1 142 -5.1547219045460224e-03 + + -6.0229510068893433e-01 4.8917140811681747e-02 + <_> + + 0 -1 143 -8.1772619159892201e-04 + + 1.7660500109195709e-01 -1.6407689452171326e-01 + <_> + + 0 -1 144 6.7434698343276978e-02 + + 4.0761459618806839e-02 -7.1865761280059814e-01 + <_> + + 0 -1 145 -2.4103289470076561e-03 + + 1.7671680450439453e-01 -1.6081850230693817e-01 + <_> + + 0 -1 146 -3.5183799918740988e-03 + + -4.3078601360321045e-01 7.0671632885932922e-02 + <_> + + 0 -1 147 -1.4561560419679154e-05 + + 1.2714700400829315e-01 -2.3387859761714935e-01 + <_> + + 0 -1 148 -4.7951821237802505e-02 + + -7.9085767269134521e-01 3.6803081631660461e-02 + <_> + + 0 -1 149 2.1735159680247307e-03 + + -1.3089279830455780e-01 2.5330349802970886e-01 + <_> + + 0 -1 150 -3.4542270004749298e-03 + + 5.1025247573852539e-01 -7.5337253510951996e-02 + <_> + 54 + -1.8389279842376709e+00 + + <_> + + 0 -1 151 4.5243161730468273e-03 + + -3.0485519766807556e-01 5.1908642053604126e-01 + <_> + + 0 -1 152 2.3372350260615349e-03 + + -4.2904540896415710e-01 2.9052159190177917e-01 + <_> + + 0 -1 153 -4.4243237935006618e-03 + + 2.1068570017814636e-01 -4.5954981446266174e-01 + <_> + + 0 -1 154 -1.2887439690530300e-02 + + 1.9138230383396149e-01 -4.5879068970680237e-01 + <_> + + 0 -1 155 -5.2370920457178727e-05 + + 1.4141489565372467e-01 -5.0267368555068970e-01 + <_> + + 0 -1 156 -4.7738491557538509e-03 + + -4.8760831356048584e-01 1.2341009825468063e-01 + <_> + + 0 -1 157 9.6315861446782947e-04 + + 1.3367399573326111e-01 -4.4793748855590820e-01 + <_> + + 0 -1 158 -8.9140303432941437e-02 + + 5.0387668609619141e-01 -1.5923009812831879e-01 + <_> + + 0 -1 159 1.7201449954882264e-03 + + -2.0535360276699066e-01 2.4340680241584778e-01 + <_> + + 0 -1 160 -2.6712119579315186e-03 + + -6.3319712877273560e-01 5.3035650402307510e-02 + <_> + + 0 -1 161 3.7353280931711197e-02 + + -1.1360249668359756e-01 4.6645331382751465e-01 + <_> + + 0 -1 162 -3.1510960310697556e-02 + + -6.8820482492446899e-01 6.9371856749057770e-02 + <_> + + 0 -1 163 1.5293819829821587e-02 + + -1.0043840110301971e-01 4.6267789602279663e-01 + <_> + + 0 -1 164 5.4966909810900688e-03 + + -9.3514643609523773e-02 4.5127061009407043e-01 + <_> + + 0 -1 165 -4.6311439946293831e-03 + + -6.4314597845077515e-01 8.5003547370433807e-02 + <_> + + 0 -1 166 8.0943357897922397e-04 + + 7.9738967120647430e-02 -4.9320799112319946e-01 + <_> + + 0 -1 167 2.9745940119028091e-02 + + 7.8420467674732208e-02 -5.0482439994812012e-01 + <_> + + 0 -1 168 9.7070122137665749e-04 + + 5.8135438710451126e-02 -5.7035177946090698e-01 + <_> + + 0 -1 169 2.4534659460186958e-03 + + -1.1259060353040695e-01 3.6852970719337463e-01 + <_> + + 0 -1 170 1.9709810148924589e-03 + + 7.7185310423374176e-02 -5.2683860063552856e-01 + <_> + + 0 -1 171 4.8643019981682301e-03 + + -1.0479539632797241e-01 4.1474440693855286e-01 + <_> + + 0 -1 172 1.0143260005861521e-03 + + -1.4731560647487640e-01 2.8671079874038696e-01 + <_> + + 0 -1 173 -9.5099088503047824e-04 + + -3.8070049881935120e-01 8.8108353316783905e-02 + <_> + + 0 -1 174 -5.6730289943516254e-03 + + 2.4818900227546692e-01 -1.3696339726448059e-01 + <_> + + 0 -1 175 1.6987899318337440e-02 + + -8.0896042287349701e-02 5.2781671285629272e-01 + <_> + + 0 -1 176 -7.5278789736330509e-03 + + -4.6880009770393372e-01 8.9389666914939880e-02 + <_> + + 0 -1 177 3.3948529511690140e-02 + + 5.0594791769981384e-02 -6.7399561405181885e-01 + <_> + + 0 -1 178 8.3328841719776392e-04 + + -1.8931360542774200e-01 1.9607099890708923e-01 + <_> + + 0 -1 179 -5.9632491320371628e-04 + + -3.6229288578033447e-01 1.0544770210981369e-01 + <_> + + 0 -1 180 3.0905720777809620e-03 + + 5.7209629565477371e-02 -5.5316972732543945e-01 + <_> + + 0 -1 181 3.5152619238942862e-03 + + -1.2211070209741592e-01 2.9369899630546570e-01 + <_> + + 0 -1 182 7.9333729809150100e-04 + + 7.5977906584739685e-02 -4.4539821147918701e-01 + <_> + + 0 -1 183 -1.1189360171556473e-02 + + -5.0596517324447632e-01 5.7438369840383530e-02 + <_> + + 0 -1 184 -1.1787790572270751e-03 + + 3.0799698829650879e-01 -1.0762230306863785e-01 + <_> + + 0 -1 185 5.4418851505033672e-05 + + -2.5997561216354370e-01 1.3138440251350403e-01 + <_> + + 0 -1 186 -7.2562302193546202e-06 + + 1.5439839661121368e-01 -2.1094700694084167e-01 + <_> + + 0 -1 187 -8.3436258137226105e-04 + + 1.3689869642257690e-01 -2.4367660284042358e-01 + <_> + + 0 -1 188 -3.3380609005689621e-02 + + -6.7477357387542725e-01 5.0986740738153458e-02 + <_> + + 0 -1 189 7.4093497823923826e-04 + + 9.1248527169227600e-02 -3.5220760107040405e-01 + <_> + + 0 -1 190 -2.0966369193047285e-03 + + 1.9110049307346344e-01 -1.6380029916763306e-01 + <_> + + 0 -1 191 -6.9339506328105927e-02 + + -8.7700867652893066e-01 3.5726629197597504e-02 + <_> + + 0 -1 192 -5.7089990004897118e-03 + + -6.8067228794097900e-01 3.5545960068702698e-02 + <_> + + 0 -1 193 6.8668760359287262e-03 + + -6.4886868000030518e-02 5.2265900373458862e-01 + <_> + + 0 -1 194 5.4602831369265914e-04 + + 1.0924419760704041e-01 -3.0285251140594482e-01 + <_> + + 0 -1 195 6.4349039457738400e-03 + + -1.6561950743198395e-01 1.9022129476070404e-01 + <_> + + 0 -1 196 -1.0112419724464417e-02 + + 7.4523001909255981e-01 -3.8347329944372177e-02 + <_> + + 0 -1 197 -7.5152877252548933e-04 + + -2.8147280216217041e-01 1.1321689933538437e-01 + <_> + + 0 -1 198 2.8225290589034557e-03 + + -1.2364400178194046e-01 2.5608530640602112e-01 + <_> + + 0 -1 199 2.2058798931539059e-03 + + 5.7334281504154205e-02 -5.6152081489562988e-01 + <_> + + 0 -1 200 2.8164181113243103e-01 + + 4.2092379182577133e-02 -6.4923799037933350e-01 + <_> + + 0 -1 201 -4.2593148536980152e-03 + + -6.4854997396469116e-01 4.3502658605575562e-02 + <_> + + 0 -1 202 2.6586679741740227e-03 + + -9.3526139855384827e-02 3.4158730506896973e-01 + <_> + + 0 -1 203 2.0971989724785089e-03 + + -1.1068929731845856e-01 3.1760269403457642e-01 + <_> + + 0 -1 204 -1.0267860488966107e-03 + + -3.7612101435661316e-01 9.8973110318183899e-02 + <_> + 56 + -1.8807189464569092e+00 + + <_> + + 0 -1 205 2.6354179717600346e-03 + + -5.2496808767318726e-01 2.7711030840873718e-01 + <_> + + 0 -1 206 2.6279650628566742e-03 + + -3.2195448875427246e-01 3.7013629078865051e-01 + <_> + + 0 -1 207 -5.8889109641313553e-03 + + 2.3777529597282410e-01 -4.1800329089164734e-01 + <_> + + 0 -1 208 1.9291159696877003e-03 + + -4.7122061252593994e-01 1.3692170381546021e-01 + <_> + + 0 -1 209 -1.5205480158329010e-02 + + -3.9618429541587830e-01 1.7402400076389313e-01 + <_> + + 0 -1 210 2.3393579758703709e-03 + + -3.8508901000022888e-01 1.5659110248088837e-01 + <_> + + 0 -1 211 4.2395621538162231e-02 + + 1.0478709638118744e-01 -6.2164002656936646e-01 + <_> + + 0 -1 212 -5.6959640234708786e-02 + + 5.1225858926773071e-01 -1.2684780359268188e-01 + <_> + + 0 -1 213 -7.2845568865886889e-06 + + 1.5136890113353729e-01 -3.1185621023178101e-01 + <_> + + 0 -1 214 -7.9633750021457672e-02 + + -8.4324747323989868e-01 4.4978428632020950e-02 + <_> + + 0 -1 215 5.9168688021600246e-03 + + -1.0745979845523834e-01 4.7434100508689880e-01 + <_> + + 0 -1 216 -1.4736950397491455e-03 + + 3.6067450046539307e-01 -1.4760640263557434e-01 + <_> + + 0 -1 217 -3.9630971848964691e-02 + + -6.5838980674743652e-01 7.4866786599159241e-02 + <_> + + 0 -1 218 6.2401412287726998e-04 + + -2.6195651292800903e-01 1.5652139484882355e-01 + <_> + + 0 -1 219 -2.3399210476782173e-05 + + 1.2157510221004486e-01 -3.0320811271667480e-01 + <_> + + 0 -1 220 3.0802030116319656e-02 + + 4.4408731162548065e-02 -6.6609877347946167e-01 + <_> + + 0 -1 221 1.4787449617870152e-04 + + -2.4449509382247925e-01 1.4723050594329834e-01 + <_> + + 0 -1 222 4.8630568198859692e-03 + + -1.1267810314893723e-01 3.2596799731254578e-01 + <_> + + 0 -1 223 6.2191881239414215e-02 + + 5.7439960539340973e-02 -6.4031070470809937e-01 + <_> + + 0 -1 224 1.4668420189991593e-03 + + 9.5356643199920654e-02 -3.3727881312370300e-01 + <_> + + 0 -1 225 -1.4742349776497576e-05 + + 1.9759610295295715e-01 -1.7083899676799774e-01 + <_> + + 0 -1 226 -3.2495670020580292e-02 + + -3.6848729848861694e-01 9.0363331139087677e-02 + <_> + + 0 -1 227 -1.5333830378949642e-03 + + 3.2256379723548889e-01 -1.0416819900274277e-01 + <_> + + 0 -1 228 -2.7998909354209900e-02 + + -4.9097910523414612e-01 8.2653783261775970e-02 + <_> + + 0 -1 229 4.9783890135586262e-03 + + 7.3238030076026917e-02 -4.4057780504226685e-01 + <_> + + 0 -1 230 6.8226028233766556e-03 + + 7.6766029000282288e-02 -4.1460910439491272e-01 + <_> + + 0 -1 231 1.1497880332171917e-02 + + -9.1440111398696899e-02 4.0099748969078064e-01 + <_> + + 0 -1 232 -1.1003069579601288e-02 + + -5.7417541742324829e-01 7.2776727378368378e-02 + <_> + + 0 -1 233 4.9345887964591384e-04 + + -1.3353590667247772e-01 2.4575209617614746e-01 + <_> + + 0 -1 234 2.2130589932203293e-03 + + -1.0753840208053589e-01 3.1632119417190552e-01 + <_> + + 0 -1 235 5.1011620089411736e-03 + + 7.8985318541526794e-02 -4.2948201298713684e-01 + <_> + + 0 -1 236 -3.7305638194084167e-02 + + -6.7921191453933716e-01 4.5049939304590225e-02 + <_> + + 0 -1 237 -6.1271698214113712e-03 + + 2.3062059283256531e-01 -1.4559289813041687e-01 + <_> + + 0 -1 238 7.6517700217664242e-03 + + -9.0355172753334045e-02 4.3072968721389771e-01 + <_> + + 0 -1 239 -1.1280870065093040e-02 + + -4.7850719094276428e-01 7.4674449861049652e-02 + <_> + + 0 -1 240 -1.4724049833603203e-05 + + 1.4459890127182007e-01 -2.2535640001296997e-01 + <_> + + 0 -1 241 -1.9895960576832294e-03 + + -6.1527568101882935e-01 5.4905921220779419e-02 + <_> + + 0 -1 242 1.6876959707587957e-03 + + -9.7619786858558655e-02 3.3004701137542725e-01 + <_> + + 0 -1 243 9.8390737548470497e-03 + + 4.0972411632537842e-02 -7.5515109300613403e-01 + <_> + + 0 -1 244 1.3243829598650336e-03 + + -1.0046280175447464e-01 3.0665108561515808e-01 + <_> + + 0 -1 245 3.1150300055742264e-03 + + 8.9804470539093018e-02 -3.3524599671363831e-01 + <_> + + 0 -1 246 7.3907422120100819e-06 + + -2.2410400211811066e-01 1.3288240134716034e-01 + <_> + + 0 -1 247 3.2559569925069809e-02 + + 5.0113398581743240e-02 -5.4240328073501587e-01 + <_> + + 0 -1 248 -2.9865119140595198e-03 + + 2.8385341167449951e-01 -1.1164219677448273e-01 + <_> + + 0 -1 249 1.6058710170909762e-03 + + -1.2024080008268356e-01 2.9032671451568604e-01 + <_> + + 0 -1 250 2.2018649615347385e-03 + + 7.8110128641128540e-02 -4.3846049904823303e-01 + <_> + + 0 -1 251 -5.7107508182525635e-03 + + -3.2608801126480103e-01 9.2941299080848694e-02 + <_> + + 0 -1 252 8.9503038907423615e-04 + + -1.3504159450531006e-01 2.2331899404525757e-01 + <_> + + 0 -1 253 7.7259249985218048e-02 + + 7.3221340775489807e-02 -4.1714018583297729e-01 + <_> + + 0 -1 254 -1.0145610198378563e-02 + + -2.7330970764160156e-01 1.4099189639091492e-01 + <_> + + 0 -1 255 -7.0878718361200299e-06 + + 1.2602959573268890e-01 -2.3253719508647919e-01 + <_> + + 0 -1 256 -8.0232005566358566e-03 + + -6.2682849168777466e-01 4.4199578464031219e-02 + <_> + + 0 -1 257 -1.5409339684993029e-03 + + 3.2154878973960876e-01 -9.5819726586341858e-02 + <_> + + 0 -1 258 -1.3815560378134251e-03 + + 2.3909060657024384e-01 -1.0845059901475906e-01 + <_> + + 0 -1 259 -8.5559524595737457e-03 + + -6.2880992889404297e-01 4.6904459595680237e-02 + <_> + + 0 -1 260 1.4967939932830632e-05 + + -1.7331050336360931e-01 1.6265609860420227e-01 + <_> + 68 + -1.7268099784851074e+00 + + <_> + + 0 -1 261 -9.2911375686526299e-03 + + 2.6676508784294128e-01 -4.8681628704071045e-01 + <_> + + 0 -1 262 -1.0201609693467617e-03 + + 2.1469169855117798e-01 -4.2971470952033997e-01 + <_> + + 0 -1 263 1.8099240260198712e-03 + + -4.7085261344909668e-01 1.7293150722980499e-01 + <_> + + 0 -1 264 -6.3195452094078064e-02 + + 5.5868512392044067e-01 -1.1922080069780350e-01 + <_> + + 0 -1 265 1.5157799934968352e-03 + + -3.3087429404258728e-01 1.4256539940834045e-01 + <_> + + 0 -1 266 -3.1134260352700949e-03 + + 3.1897360086441040e-01 -1.5563400089740753e-01 + <_> + + 0 -1 267 6.7187240347266197e-03 + + 1.1308009922504425e-01 -4.6142110228538513e-01 + <_> + + 0 -1 268 -1.4929190001566894e-05 + + 1.1303120106458664e-01 -3.8268089294433594e-01 + <_> + + 0 -1 269 -1.9974811002612114e-03 + + -6.7833811044692993e-01 5.5562671273946762e-02 + <_> + + 0 -1 270 4.4361899199429899e-05 + + -2.1478720009326935e-01 1.7524589598178864e-01 + <_> + + 0 -1 271 -9.4379335641860962e-03 + + -2.9008820652961731e-01 1.0494410246610641e-01 + <_> + + 0 -1 272 1.0263459989801049e-04 + + -3.6809450387954712e-01 1.1580110341310501e-01 + <_> + + 0 -1 273 -4.3512079864740372e-02 + + -5.7967478036880493e-01 4.5160628855228424e-02 + <_> + + 0 -1 274 2.3894330952316523e-03 + + -1.2443830072879791e-01 2.5726899504661560e-01 + <_> + + 0 -1 275 3.6203579511493444e-03 + + 4.8385269939899445e-02 -6.4456540346145630e-01 + <_> + + 0 -1 276 -4.2086638859473169e-04 + + -2.9963639378547668e-01 9.7508132457733154e-02 + <_> + + 0 -1 277 -3.6320161074399948e-02 + + 3.2499030232429504e-01 -1.0373180359601974e-01 + <_> + + 0 -1 278 5.5678240023553371e-03 + + -1.2865519523620605e-01 2.7721390128135681e-01 + <_> + + 0 -1 279 1.4324679505079985e-03 + + 6.3044667243957520e-02 -5.0411659479141235e-01 + <_> + + 0 -1 280 1.2268769787624478e-03 + + -1.7073589563369751e-01 1.7944329977035522e-01 + <_> + + 0 -1 281 4.0125530213117599e-03 + + 7.2100132703781128e-02 -4.1321611404418945e-01 + <_> + + 0 -1 282 4.7377590090036392e-03 + + -9.0100876986980438e-02 3.4303799271583557e-01 + <_> + + 0 -1 283 4.3965759687125683e-03 + + 5.4753091186285019e-02 -5.9175938367843628e-01 + <_> + + 0 -1 284 1.8952810205519199e-03 + + 4.0120709687471390e-02 -6.4907258749008179e-01 + <_> + + 0 -1 285 -1.3425230281427503e-03 + + 3.0321699380874634e-01 -1.1009240150451660e-01 + <_> + + 0 -1 286 -4.6405740082263947e-02 + + -4.6026471257209778e-01 7.0307031273841858e-02 + <_> + + 0 -1 287 2.5875549763441086e-02 + + 3.8987319916486740e-02 -6.4847522974014282e-01 + <_> + + 0 -1 288 1.0986380511894822e-03 + + -1.6458760201931000e-01 1.8133540451526642e-01 + <_> + + 0 -1 289 -3.9583959733135998e-04 + + 9.7805656492710114e-02 -2.7554351091384888e-01 + <_> + + 0 -1 290 -4.5633990317583084e-02 + + -5.4276019334793091e-01 5.4855771362781525e-02 + <_> + + 0 -1 291 -4.7068470157682896e-03 + + 4.0961420536041260e-01 -6.9687090814113617e-02 + <_> + + 0 -1 292 2.0004810357932001e-04 + + 1.2908969819545746e-01 -2.1091359853744507e-01 + <_> + + 0 -1 293 1.1126570170745254e-03 + + -2.2213070094585419e-01 1.2458589673042297e-01 + <_> + + 0 -1 294 -1.4747029636055231e-03 + + 2.9185178875923157e-01 -9.0756237506866455e-02 + <_> + + 0 -1 295 4.3162931688129902e-03 + + 6.1542909592390060e-02 -5.1068651676177979e-01 + <_> + + 0 -1 296 2.0302709890529513e-04 + + -1.5639910101890564e-01 1.6466440260410309e-01 + <_> + + 0 -1 297 3.4639390651136637e-04 + + 1.0773540288209915e-01 -2.5532799959182739e-01 + <_> + + 0 -1 298 1.5631220303475857e-03 + + -9.5428019762039185e-02 2.5450360774993896e-01 + <_> + + 0 -1 299 5.5476918350905180e-04 + + 7.9774253070354462e-02 -3.0791428685188293e-01 + <_> + + 0 -1 300 2.7690480928868055e-03 + + -9.1900892555713654e-02 3.0198639631271362e-01 + <_> + + 0 -1 301 1.1085179867222905e-03 + + 6.2624886631965637e-02 -4.1680490970611572e-01 + <_> + + 0 -1 302 3.4288389142602682e-03 + + -5.7473558932542801e-02 4.7293519973754883e-01 + <_> + + 0 -1 303 -2.0233790855854750e-03 + + -2.4128660559654236e-01 1.0806660354137421e-01 + <_> + + 0 -1 304 -9.1446418082341552e-04 + + 1.7990960180759430e-01 -1.6031919419765472e-01 + <_> + + 0 -1 305 3.8880690932273865e-02 + + 3.9132621139287949e-02 -6.4085322618484497e-01 + <_> + + 0 -1 306 1.2836069799959660e-03 + + 5.2912048995494843e-02 -4.3914559483528137e-01 + <_> + + 0 -1 307 3.5828219261020422e-03 + + -9.7462162375450134e-02 3.0772930383682251e-01 + <_> + + 0 -1 308 2.3203529417514801e-03 + + -1.0929799824953079e-01 2.6735728979110718e-01 + <_> + + 0 -1 309 1.1978139809798449e-04 + + 1.1623129993677139e-01 -2.3586340248584747e-01 + <_> + + 0 -1 310 -2.8259279206395149e-03 + + -4.1935729980468750e-01 5.7008400559425354e-02 + <_> + + 0 -1 311 2.4410230107605457e-03 + + 4.2706880718469620e-02 -5.3362858295440674e-01 + <_> + + 0 -1 312 2.6899650692939758e-03 + + -1.1351829767227173e-01 2.4779020249843597e-01 + <_> + + 0 -1 313 -3.1081750057637691e-03 + + -2.9488921165466309e-01 8.2543209195137024e-02 + <_> + + 0 -1 314 -6.6210748627781868e-03 + + 2.2958689928054810e-01 -1.1443620175123215e-01 + <_> + + 0 -1 315 4.6786409802734852e-03 + + -1.2875209748744965e-01 2.6777699589729309e-01 + <_> + + 0 -1 316 -1.2973829871043563e-03 + + -2.7280429005622864e-01 9.6471726894378662e-02 + <_> + + 0 -1 317 2.9523740522563457e-03 + + -8.7040692567825317e-02 2.9207450151443481e-01 + <_> + + 0 -1 318 -1.6173559706658125e-03 + + -4.0207850933074951e-01 6.5386466681957245e-02 + <_> + + 0 -1 319 -7.5417757034301758e-02 + + -8.9723330736160278e-01 2.4602690711617470e-02 + <_> + + 0 -1 320 -2.5402200408279896e-03 + + 1.5258650481700897e-01 -1.5025460720062256e-01 + <_> + + 0 -1 321 3.7864660844206810e-03 + + 7.6477207243442535e-02 -3.3881941437721252e-01 + <_> + + 0 -1 322 -1.4005510136485100e-02 + + 4.4426390528678894e-01 -5.9003930538892746e-02 + <_> + + 0 -1 323 5.5956508731469512e-04 + + 7.4007123708724976e-02 -3.5604709386825562e-01 + <_> + + 0 -1 324 2.5946850655600429e-04 + + -2.8126189112663269e-01 8.7399207055568695e-02 + <_> + + 0 -1 325 4.4409232214093208e-03 + + 2.8623659163713455e-02 -7.7284187078475952e-01 + <_> + + 0 -1 326 -2.3343560751527548e-03 + + 3.5460600256919861e-01 -7.1207538247108459e-02 + <_> + + 0 -1 327 9.7654951969161630e-04 + + -1.0138420015573502e-01 2.2545370459556580e-01 + <_> + + 0 -1 328 -4.3227209243923426e-04 + + -2.1095879375934601e-01 1.2273149937391281e-01 + <_> + 70 + -1.6056820154190063e+00 + + <_> + + 0 -1 329 -1.2480209581553936e-02 + + 2.6112109422683716e-01 -4.7001519799232483e-01 + <_> + + 0 -1 330 3.5450961440801620e-02 + + -2.0008459687232971e-01 4.7718611359596252e-01 + <_> + + 0 -1 331 2.0369330886751413e-03 + + -4.7703158855438232e-01 1.5132640302181244e-01 + <_> + + 0 -1 332 -4.3946420191787183e-05 + + 1.2288480252027512e-01 -5.1796287298202515e-01 + <_> + + 0 -1 333 -3.8480788934975863e-03 + + 4.1113680601119995e-01 -1.4595329761505127e-01 + <_> + + 0 -1 334 -2.8316550888121128e-03 + + 2.8710970282554626e-01 -1.7629599571228027e-01 + <_> + + 0 -1 335 2.5026081129908562e-03 + + 7.9668842256069183e-02 -5.7808011770248413e-01 + <_> + + 0 -1 336 3.0812958721071482e-04 + + 8.2838706672191620e-02 -4.2540180683135986e-01 + <_> + + 0 -1 337 6.1186961829662323e-04 + + 1.3641810417175293e-01 -3.0591419339179993e-01 + <_> + + 0 -1 338 -1.4354350241774227e-05 + + 1.4197489619255066e-01 -2.5681999325752258e-01 + <_> + + 0 -1 339 1.6148330178111792e-03 + + -2.6239329576492310e-01 1.3288390636444092e-01 + <_> + + 0 -1 340 2.0318101160228252e-03 + + 7.5749568641185760e-02 -4.3141460418701172e-01 + <_> + + 0 -1 341 9.5563679933547974e-03 + + -9.1424480080604553e-02 4.0004569292068481e-01 + <_> + + 0 -1 342 -7.8439561184495687e-04 + + -3.6619931459426880e-01 9.1777816414833069e-02 + <_> + + 0 -1 343 -3.9661130867898464e-03 + + 2.3698210716247559e-01 -1.4281649887561798e-01 + <_> + + 0 -1 344 -2.3194469977170229e-03 + + -4.2245340347290039e-01 7.8684106469154358e-02 + <_> + + 0 -1 345 -7.3490202426910400e-02 + + -6.2218552827835083e-01 4.0496870875358582e-02 + <_> + + 0 -1 346 -3.6803178954869509e-03 + + 1.2612029910087585e-01 -2.0990429818630219e-01 + <_> + + 0 -1 347 -4.1019290685653687e-02 + + -8.0316942930221558e-01 2.7993949130177498e-02 + <_> + + 0 -1 348 -4.8213129048235714e-04 + + 1.4825980365276337e-01 -1.7869630455970764e-01 + <_> + + 0 -1 349 -1.6598250716924667e-02 + + 4.1442281007766724e-01 -6.4051687717437744e-02 + <_> + + 0 -1 350 -1.0631670011207461e-03 + + -3.3466520905494690e-01 8.2425996661186218e-02 + <_> + + 0 -1 351 1.8658409826457500e-03 + + -1.3119789958000183e-01 2.3183380067348480e-01 + <_> + + 0 -1 352 -2.5827190838754177e-03 + + 3.8415950536727905e-01 -8.4121666848659515e-02 + <_> + + 0 -1 353 1.7159619601443410e-03 + + 7.6971538364887238e-02 -4.1098991036415100e-01 + <_> + + 0 -1 354 -3.9140181615948677e-03 + + -6.2508618831634521e-01 3.8418460637331009e-02 + <_> + + 0 -1 355 4.2724498780444264e-04 + + 8.6016573011875153e-02 -2.6975229382514954e-01 + <_> + + 0 -1 356 3.3992920070886612e-03 + + -1.0176510363817215e-01 2.7030828595161438e-01 + <_> + + 0 -1 357 -3.6457281559705734e-02 + + -4.9261981248855591e-01 5.5854249745607376e-02 + <_> + + 0 -1 358 1.6230379696935415e-03 + + 5.7567078620195389e-02 -4.2053499817848206e-01 + <_> + + 0 -1 359 4.6655549667775631e-03 + + -9.1158397495746613e-02 3.2095280289649963e-01 + <_> + + 0 -1 360 3.1331549398601055e-03 + + -9.6932657063007355e-02 3.4073451161384583e-01 + <_> + + 0 -1 361 -1.6835830174386501e-03 + + -3.6766248941421509e-01 8.2226082682609558e-02 + <_> + + 0 -1 362 2.7728650718927383e-02 + + 4.0117498487234116e-02 -6.5198391675949097e-01 + <_> + + 0 -1 363 9.5015309751033783e-02 + + 2.3065119981765747e-02 -8.8881981372833252e-01 + <_> + + 0 -1 364 7.4755616486072540e-02 + + -6.3946872949600220e-02 4.7399708628654480e-01 + <_> + + 0 -1 365 1.6693340614438057e-02 + + 4.6477258205413818e-02 -7.1152418851852417e-01 + <_> + + 0 -1 366 1.2088769581168890e-03 + + -1.1359269917011261e-01 2.2424149513244629e-01 + <_> + + 0 -1 367 -6.1751517932862043e-04 + + -3.1268230080604553e-01 8.5018932819366455e-02 + <_> + + 0 -1 368 8.5786692798137665e-03 + + -1.5559460222721100e-01 1.5640939772129059e-01 + <_> + + 0 -1 369 6.1184767400845885e-04 + + 9.4403937458992004e-02 -2.6520138978958130e-01 + <_> + + 0 -1 370 -3.4570440184324980e-03 + + 1.5146060287952423e-01 -1.6220529377460480e-01 + <_> + + 0 -1 371 1.3953070156276226e-03 + + -9.9996216595172882e-02 2.4998310208320618e-01 + <_> + + 0 -1 372 3.5910680890083313e-03 + + 8.1011682748794556e-02 -3.0081549286842346e-01 + <_> + + 0 -1 373 5.4192831739783287e-03 + + 6.7650042474269867e-02 -3.2355660200119019e-01 + <_> + + 0 -1 374 -1.1379310162737966e-03 + + 1.8887449800968170e-01 -1.2729729712009430e-01 + <_> + + 0 -1 375 9.1047259047627449e-03 + + 1.0160540044307709e-01 -2.2280150651931763e-01 + <_> + + 0 -1 376 6.5050171688199043e-03 + + -7.2986416518688202e-02 3.5770270228385925e-01 + <_> + + 0 -1 377 -1.4676549653813709e-05 + + 1.4693109691143036e-01 -1.7403540015220642e-01 + <_> + + 0 -1 378 -9.4403158873319626e-03 + + -2.6536750793457031e-01 9.6619546413421631e-02 + <_> + + 0 -1 379 -4.2933300137519836e-03 + + 2.5656831264495850e-01 -1.0550209879875183e-01 + <_> + + 0 -1 380 4.3133171275258064e-03 + + 6.5936572849750519e-02 -4.5719939470291138e-01 + <_> + + 0 -1 381 5.8854468166828156e-02 + + 6.7918263375759125e-02 -3.3078071475028992e-01 + <_> + + 0 -1 382 -2.8407620266079903e-03 + + 2.3953500390052795e-01 -9.2092156410217285e-02 + <_> + + 0 -1 383 9.6359942108392715e-04 + + -1.0982380062341690e-01 2.6462998986244202e-01 + <_> + + 0 -1 384 -1.4724590073456056e-05 + + 1.1111160367727280e-01 -2.2704580426216125e-01 + <_> + + 0 -1 385 -8.0675468780100346e-04 + + -3.6335140466690063e-01 7.8122653067111969e-02 + <_> + + 0 -1 386 7.3296198388561606e-04 + + -1.5605129301548004e-01 1.5184900164604187e-01 + <_> + + 0 -1 387 6.3753738068044186e-03 + + -7.1957953274250031e-02 2.9723879694938660e-01 + <_> + + 0 -1 388 4.6390579082071781e-03 + + 3.5969600081443787e-02 -6.1132347583770752e-01 + <_> + + 0 -1 389 -7.1079272311180830e-04 + + -2.8806841373443604e-01 6.9314628839492798e-02 + <_> + + 0 -1 390 2.9162289574742317e-03 + + -7.5968459248542786e-02 3.2681688666343689e-01 + <_> + + 0 -1 391 -1.7853140830993652e-02 + + 4.4206309318542480e-01 -4.8174031078815460e-02 + <_> + + 0 -1 392 8.3874985575675964e-03 + + 4.8913899809122086e-02 -5.4415327310562134e-01 + <_> + + 0 -1 393 2.9458320568664931e-05 + + -2.1131239831447601e-01 1.0629370063543320e-01 + <_> + + 0 -1 394 -9.8192706704139709e-02 + + 3.5318240523338318e-01 -6.9296866655349731e-02 + <_> + + 0 -1 395 4.6140368795022368e-04 + + 9.6270777285099030e-02 -2.5811928510665894e-01 + <_> + + 0 -1 396 -2.4016610404942185e-04 + + -2.2976429760456085e-01 9.9984891712665558e-02 + <_> + + 0 -1 397 3.7882480770349503e-02 + + -1.0365439951419830e-01 2.3164770007133484e-01 + <_> + + 0 -1 398 3.2621581340208650e-04 + + 9.7933940589427948e-02 -2.3689700663089752e-01 + <_> + 85 + -1.5173089504241943e+00 + + <_> + + 0 -1 399 -3.6744121462106705e-02 + + 3.4079340100288391e-01 -3.1779891252517700e-01 + <_> + + 0 -1 400 2.1955010015517473e-03 + + -2.8729590773582458e-01 2.5869798660278320e-01 + <_> + + 0 -1 401 8.3034839481115341e-03 + + -2.1800449490547180e-01 2.6759269833564758e-01 + <_> + + 0 -1 402 2.6289420202374458e-03 + + -3.6006081104278564e-01 1.4639839529991150e-01 + <_> + + 0 -1 403 1.9458869937807322e-03 + + 1.3677720725536346e-01 -4.2058759927749634e-01 + <_> + + 0 -1 404 -2.1704390645027161e-02 + + 4.8903319239616394e-01 -9.8091572523117065e-02 + <_> + + 0 -1 405 4.2956420220434666e-03 + + -2.7825561165809631e-01 1.5712629258632660e-01 + <_> + + 0 -1 406 4.9894629046320915e-04 + + 1.1003810167312622e-01 -3.3779421448707581e-01 + <_> + + 0 -1 407 2.4652799591422081e-02 + + 4.5820660889148712e-02 -5.4710537195205688e-01 + <_> + + 0 -1 408 -2.3075740784406662e-02 + + -4.9801421165466309e-01 6.7044779658317566e-02 + <_> + + 0 -1 409 1.1991280131042004e-02 + + -7.0877023041248322e-02 4.8294249176979065e-01 + <_> + + 0 -1 410 1.5430679544806480e-02 + + -6.5949738025665283e-02 4.5236849784851074e-01 + <_> + + 0 -1 411 -4.5555769465863705e-03 + + -4.4665691256523132e-01 6.7877657711505890e-02 + <_> + + 0 -1 412 -4.4582979753613472e-03 + + 3.3656919002532959e-01 -9.4792358577251434e-02 + <_> + + 0 -1 413 1.3494009908754379e-04 + + -3.0288851261138916e-01 1.0293830186128616e-01 + <_> + + 0 -1 414 -4.2500188574194908e-03 + + 4.2550128698348999e-01 -7.2956383228302002e-02 + <_> + + 0 -1 415 -1.4293759595602751e-03 + + -3.0116760730743408e-01 9.0039253234863281e-02 + <_> + + 0 -1 416 -6.3978550024330616e-03 + + 4.1943550109863281e-01 -7.9320870339870453e-02 + <_> + + 0 -1 417 2.6083870325237513e-03 + + 8.3598926663398743e-02 -4.1897168755531311e-01 + <_> + + 0 -1 418 8.6870808154344559e-03 + + -6.3015699386596680e-02 5.2644741535186768e-01 + <_> + + 0 -1 419 -1.0380990570411086e-03 + + -3.6220151185989380e-01 8.0301038920879364e-02 + <_> + + 0 -1 420 4.4070050120353699e-01 + + 3.4913059324026108e-02 -7.2764492034912109e-01 + <_> + + 0 -1 421 3.3689520787447691e-03 + + 5.7332780212163925e-02 -4.8633271455764771e-01 + <_> + + 0 -1 422 1.7443710239604115e-03 + + -1.0994660109281540e-01 2.7023580670356750e-01 + <_> + + 0 -1 423 5.3788698278367519e-04 + + -2.7439421415328979e-01 1.0063380002975464e-01 + <_> + + 0 -1 424 1.0072899749502540e-03 + + 1.0756769776344299e-01 -2.3221600055694580e-01 + <_> + + 0 -1 425 -8.2518812268972397e-03 + + -6.5216302871704102e-01 3.5704229027032852e-02 + <_> + + 0 -1 426 3.5490558948367834e-03 + + -8.4254868328571320e-02 3.1767430901527405e-01 + <_> + + 0 -1 427 -1.1033359915018082e-02 + + 4.1271620988845825e-01 -6.2587052583694458e-02 + <_> + + 0 -1 428 3.2278439030051231e-03 + + 7.1266986429691315e-02 -4.1172251105308533e-01 + <_> + + 0 -1 429 1.7540389299392700e-01 + + 3.4958980977535248e-02 -6.3775068521499634e-01 + <_> + + 0 -1 430 -4.8067080206237733e-04 + + -2.4503110349178314e-01 9.8930649459362030e-02 + <_> + + 0 -1 431 -1.8284550169482827e-03 + + 1.3486519455909729e-01 -1.9799900054931641e-01 + <_> + + 0 -1 432 1.7096720403060317e-03 + + -1.0525950044393539e-01 2.1005709469318390e-01 + <_> + + 0 -1 433 3.9468301110900939e-04 + + 8.0952547490596771e-02 -2.7405399084091187e-01 + <_> + + 0 -1 434 2.3097719531506300e-03 + + 1.2338220328092575e-01 -1.9958800077438354e-01 + <_> + + 0 -1 435 3.1529190018773079e-03 + + -1.0612549632787704e-01 2.2089600563049316e-01 + <_> + + 0 -1 436 -1.9097010372206569e-03 + + -2.5094708800315857e-01 8.7022580206394196e-02 + <_> + + 0 -1 437 -1.2370609911158681e-03 + + 3.0760520696640015e-01 -7.5937293469905853e-02 + <_> + + 0 -1 438 3.7081091431900859e-04 + + -1.6065080463886261e-01 1.3480199873447418e-01 + <_> + + 0 -1 439 3.4268848598003387e-02 + + 3.5260949283838272e-02 -6.3547158241271973e-01 + <_> + + 0 -1 440 4.6664681285619736e-03 + + -5.2494861185550690e-02 4.3242320418357849e-01 + <_> + + 0 -1 441 1.0423569940030575e-02 + + 5.1612429320812225e-02 -5.0745230913162231e-01 + <_> + + 0 -1 442 1.1215180158615112e-02 + + -3.8614250719547272e-02 5.7645928859710693e-01 + <_> + + 0 -1 443 -7.3029109444178175e-06 + + 1.2052319943904877e-01 -1.7274369299411774e-01 + <_> + + 0 -1 444 -4.9072802066802979e-03 + + -3.4818550944328308e-01 5.9116441756486893e-02 + <_> + + 0 -1 445 1.9488829420879483e-03 + + -8.8861227035522461e-02 2.4020899832248688e-01 + <_> + + 0 -1 446 1.3313010276760906e-04 + + -1.4657719433307648e-01 1.9929920136928558e-01 + <_> + + 0 -1 447 -1.4298240421339869e-03 + + -3.9005228877067566e-01 5.9909418225288391e-02 + <_> + + 0 -1 448 -6.4831459894776344e-03 + + 1.8141369521617889e-01 -1.1655449867248535e-01 + <_> + + 0 -1 449 7.2958500823006034e-06 + + -1.8219240009784698e-01 1.1812780052423477e-01 + <_> + + 0 -1 450 4.1690681246109307e-04 + + 1.0591679811477661e-01 -2.0353710651397705e-01 + <_> + + 0 -1 451 5.1982058212161064e-03 + + -3.5962641239166260e-02 6.0264211893081665e-01 + <_> + + 0 -1 452 -4.0649957954883575e-03 + + 2.0696419477462769e-01 -9.8599843680858612e-02 + <_> + + 0 -1 453 -4.7734950203448534e-04 + + -2.4629549682140350e-01 9.3174271285533905e-02 + <_> + + 0 -1 454 5.2415160462260246e-03 + + 3.6528520286083221e-02 -5.4934787750244141e-01 + <_> + + 0 -1 455 3.7873629480600357e-03 + + -5.7597089558839798e-02 3.8733980059623718e-01 + <_> + + 0 -1 456 -1.4434250260819681e-05 + + 1.1292859911918640e-01 -1.7447079718112946e-01 + <_> + + 0 -1 457 4.2011599987745285e-02 + + -4.6556860208511353e-02 4.5454800128936768e-01 + <_> + + 0 -1 458 7.9663433134555817e-03 + + 4.2258739471435547e-02 -5.3702521324157715e-01 + <_> + + 0 -1 459 5.3092982852831483e-04 + + -9.7918719053268433e-02 2.1795919537544250e-01 + <_> + + 0 -1 460 5.2906107157468796e-04 + + 7.7961057424545288e-02 -2.8867539763450623e-01 + <_> + + 0 -1 461 -1.9556249678134918e-01 + + -7.6475739479064941e-01 2.7276000007987022e-02 + <_> + + 0 -1 462 -1.1559950187802315e-02 + + 3.3526000380516052e-01 -6.3614986836910248e-02 + <_> + + 0 -1 463 -1.4005659520626068e-01 + + -7.6232051849365234e-01 2.8024470433592796e-02 + <_> + + 0 -1 464 4.4643289584200829e-05 + + -2.0320929586887360e-01 9.9391698837280273e-02 + <_> + + 0 -1 465 3.9411801844835281e-03 + + 4.9936279654502869e-02 -3.7584540247917175e-01 + <_> + + 0 -1 466 -4.5965691097080708e-03 + + 3.3031210303306580e-01 -6.3809931278228760e-02 + <_> + + 0 -1 467 -6.9790292764082551e-04 + + 1.6093710064888000e-01 -1.3192920386791229e-01 + <_> + + 0 -1 468 6.1886821640655398e-04 + + 7.4621193110942841e-02 -3.3021458983421326e-01 + <_> + + 0 -1 469 -3.2755140215158463e-02 + + -4.0643560886383057e-01 4.9308661371469498e-02 + <_> + + 0 -1 470 3.3697509206831455e-03 + + 4.0627099573612213e-02 -4.9757328629493713e-01 + <_> + + 0 -1 471 3.7391821388155222e-04 + + -1.4931799471378326e-01 1.6517969965934753e-01 + <_> + + 0 -1 472 -4.0217190980911255e-03 + + 2.9531970620155334e-01 -7.6642103493213654e-02 + <_> + + 0 -1 473 -7.2943832492455840e-04 + + -2.7355810999870300e-01 7.9243987798690796e-02 + <_> + + 0 -1 474 -5.7726111263036728e-03 + + 3.4741240739822388e-01 -7.6087206602096558e-02 + <_> + + 0 -1 475 -2.1122458856552839e-03 + + 1.7290510237216949e-01 -1.2444470077753067e-01 + <_> + + 0 -1 476 4.4956691563129425e-03 + + 3.0218729749321938e-02 -7.4003338813781738e-01 + <_> + + 0 -1 477 -1.1419389629736543e-03 + + -2.3494489490985870e-01 7.6911546289920807e-02 + <_> + + 0 -1 478 2.7658098842948675e-03 + + -9.1666661202907562e-02 2.1009710431098938e-01 + <_> + + 0 -1 479 -7.2281848406419158e-04 + + -2.5587469339370728e-01 7.5378142297267914e-02 + <_> + + 0 -1 480 1.8604539800435305e-03 + + -9.4511069357395172e-02 1.9726920127868652e-01 + <_> + + 0 -1 481 -2.8568008565343916e-04 + + -2.1073310077190399e-01 9.7290039062500000e-02 + <_> + + 0 -1 482 -3.8796100765466690e-02 + + -7.8724592924118042e-01 2.4410309270024300e-02 + <_> + + 0 -1 483 -1.2119869701564312e-02 + + 3.6466810107231140e-01 -5.7907499372959137e-02 + <_> + 93 + -1.6563049554824829e+00 + + <_> + + 0 -1 484 5.6008538231253624e-03 + + -3.8491588830947876e-01 3.3817461133003235e-01 + <_> + + 0 -1 485 -3.7205789703875780e-03 + + 2.4614119529724121e-01 -3.0673781037330627e-01 + <_> + + 0 -1 486 -2.5333440862596035e-03 + + 1.2531200051307678e-01 -4.2720189690589905e-01 + <_> + + 0 -1 487 -7.3425087612122297e-04 + + 1.3314330577850342e-01 -3.5111570358276367e-01 + <_> + + 0 -1 488 -1.4792960428167135e-04 + + 1.2545309960842133e-01 -3.8591191172599792e-01 + <_> + + 0 -1 489 -4.8976339399814606e-02 + + 3.6456748843193054e-01 -1.1494780331850052e-01 + <_> + + 0 -1 490 1.0917349718511105e-03 + + 7.9005338251590729e-02 -4.1399830579757690e-01 + <_> + + 0 -1 491 5.4457997903227806e-03 + + -1.1921840161085129e-01 3.3085560798645020e-01 + <_> + + 0 -1 492 1.5979419695213437e-03 + + 4.1181199252605438e-02 -5.5028229951858521e-01 + <_> + + 0 -1 493 -1.3023250503465533e-03 + + 8.2839436829090118e-02 -3.5719320178031921e-01 + <_> + + 0 -1 494 4.8810569569468498e-04 + + -2.0928630232810974e-01 1.4972810447216034e-01 + <_> + + 0 -1 495 2.1033850498497486e-03 + + 5.1839418709278107e-02 -6.1099958419799805e-01 + <_> + + 0 -1 496 1.1984360404312611e-02 + + 4.1022349148988724e-02 -5.8985722064971924e-01 + <_> + + 0 -1 497 -1.1898590251803398e-02 + + 4.5844998955726624e-01 -6.4714707434177399e-02 + <_> + + 0 -1 498 5.3713661618530750e-03 + + -6.1560470610857010e-02 4.1204369068145752e-01 + <_> + + 0 -1 499 4.3421140871942043e-03 + + 6.0501661151647568e-02 -4.8703390359878540e-01 + <_> + + 0 -1 500 6.6142519935965538e-03 + + 4.6873189508914948e-02 -5.0346171855926514e-01 + <_> + + 0 -1 501 1.2339729582890868e-03 + + -8.1538438796997070e-02 3.0428299307823181e-01 + <_> + + 0 -1 502 -1.2975660152733326e-02 + + -4.7834330797195435e-01 4.8681490123271942e-02 + <_> + + 0 -1 503 -1.7806360265240073e-03 + + 3.7698730826377869e-01 -6.8126037716865540e-02 + <_> + + 0 -1 504 7.8339744359254837e-03 + + 5.4501280188560486e-02 -4.6738588809967041e-01 + <_> + + 0 -1 505 -6.0113701038062572e-03 + + 5.4870051145553589e-01 -4.4434640556573868e-02 + <_> + + 0 -1 506 -2.0694560371339321e-03 + + -3.7755548954010010e-01 6.4383402466773987e-02 + <_> + + 0 -1 507 4.7843591310083866e-03 + + 4.6252150088548660e-02 -5.2633982896804810e-01 + <_> + + 0 -1 508 -6.2808818183839321e-03 + + 3.9451861381530762e-01 -6.9051302969455719e-02 + <_> + + 0 -1 509 1.6099009662866592e-03 + + -1.0316190123558044e-01 2.7321669459342957e-01 + <_> + + 0 -1 510 -8.2392559852451086e-04 + + -2.8039410710334778e-01 8.4601573646068573e-02 + <_> + + 0 -1 511 -1.0123319923877716e-02 + + 3.3635950088500977e-01 -6.1322949826717377e-02 + <_> + + 0 -1 512 1.0525720193982124e-02 + + 4.6165600419044495e-02 -5.1672130823135376e-01 + <_> + + 0 -1 513 -2.6774499565362930e-02 + + -5.0325971841812134e-01 3.9857819676399231e-02 + <_> + + 0 -1 514 4.0248301811516285e-03 + + -6.1501380056142807e-02 3.6659809947013855e-01 + <_> + + 0 -1 515 -4.6271650353446603e-04 + + -2.6439830660820007e-01 8.1311263144016266e-02 + <_> + + 0 -1 516 -5.1834900659741834e-05 + + 1.1154399812221527e-01 -2.0269370079040527e-01 + <_> + + 0 -1 517 4.8874281346797943e-03 + + -6.9644987583160400e-02 3.3612030744552612e-01 + <_> + + 0 -1 518 1.2638230621814728e-01 + + 3.6813639104366302e-02 -6.5849918127059937e-01 + <_> + + 0 -1 519 -8.0248164013028145e-03 + + 4.6601921319961548e-01 -4.8885859549045563e-02 + <_> + + 0 -1 520 -1.1518909595906734e-03 + + -4.0466758608818054e-01 5.8572851121425629e-02 + <_> + + 0 -1 521 9.8190037533640862e-04 + + -1.3197229802608490e-01 1.7744350433349609e-01 + <_> + + 0 -1 522 -1.9447980448603630e-02 + + -6.8489527702331543e-01 3.3834591507911682e-02 + <_> + + 0 -1 523 -7.2442039709130768e-06 + + 1.1553110182285309e-01 -1.8726129829883575e-01 + <_> + + 0 -1 524 -1.7039060592651367e-02 + + -3.5105291008949280e-01 6.7737713456153870e-02 + <_> + + 0 -1 525 1.1186580173671246e-02 + + -9.3420043587684631e-02 2.1077099442481995e-01 + <_> + + 0 -1 526 7.6585268834605813e-04 + + 6.5965756773948669e-02 -3.2127881050109863e-01 + <_> + + 0 -1 527 1.4231950626708567e-04 + + -1.5460130572319031e-01 1.3757640123367310e-01 + <_> + + 0 -1 528 -5.5553209967911243e-03 + + 3.1319350004196167e-01 -6.4753532409667969e-02 + <_> + + 0 -1 529 1.2308239820413291e-04 + + 9.7666621208190918e-02 -2.2251069545745850e-01 + <_> + + 0 -1 530 -1.6092039877548814e-03 + + -3.6215591430664062e-01 6.4452558755874634e-02 + <_> + + 0 -1 531 -1.5626100357621908e-03 + + 2.2588780522346497e-01 -9.5551103353500366e-02 + <_> + + 0 -1 532 -5.0116342026740313e-04 + + -2.2289219498634338e-01 8.9174531400203705e-02 + <_> + + 0 -1 533 3.7322030402719975e-04 + + 9.1969013214111328e-02 -2.1129919588565826e-01 + <_> + + 0 -1 534 -2.2882660850882530e-03 + + 3.8989049196243286e-01 -5.3455859422683716e-02 + <_> + + 0 -1 535 -4.6884030103683472e-02 + + -6.2357091903686523e-01 3.2194521278142929e-02 + <_> + + 0 -1 536 1.8901260336861014e-03 + + -7.2615146636962891e-02 2.7420088648796082e-01 + <_> + + 0 -1 537 1.5805330127477646e-02 + + 2.8601830825209618e-02 -6.9608169794082642e-01 + <_> + + 0 -1 538 3.2644178718328476e-02 + + -4.0772251784801483e-02 5.0873398780822754e-01 + <_> + + 0 -1 539 6.5482832724228501e-04 + + 8.5724912583827972e-02 -2.7580630779266357e-01 + <_> + + 0 -1 540 -1.1142930015921593e-02 + + 8.7326012551784515e-02 -2.0914819836616516e-01 + <_> + + 0 -1 541 -5.8072229148820043e-04 + + -2.9471421241760254e-01 6.6337890923023224e-02 + <_> + + 0 -1 542 -7.4414577102288604e-04 + + 1.8017959594726562e-01 -1.0654629766941071e-01 + <_> + + 0 -1 543 7.6460661366581917e-03 + + -6.3608147203922272e-02 3.1582340598106384e-01 + <_> + + 0 -1 544 3.2617211341857910e-02 + + 3.2606441527605057e-02 -6.0541188716888428e-01 + <_> + + 0 -1 545 -3.4527231007814407e-02 + + -5.9770858287811279e-01 2.7888769283890724e-02 + <_> + + 0 -1 546 3.2211719080805779e-03 + + -4.9183920025825500e-02 4.0305620431900024e-01 + <_> + + 0 -1 547 -4.1549839079380035e-04 + + 1.3533140718936920e-01 -1.5845330059528351e-01 + <_> + + 0 -1 548 2.5140501093119383e-03 + + 6.3218571245670319e-02 -3.0768528580665588e-01 + <_> + + 0 -1 549 -2.0818209648132324e-01 + + -7.5750261545181274e-01 2.2695960476994514e-02 + <_> + + 0 -1 550 -2.6067279279232025e-02 + + -7.4959957599639893e-01 1.9375480711460114e-02 + <_> + + 0 -1 551 -5.8264029212296009e-04 + + 9.4658233225345612e-02 -1.9919820129871368e-01 + <_> + + 0 -1 552 -3.2769259996712208e-03 + + 1.6214330494403839e-01 -1.2322030216455460e-01 + <_> + + 0 -1 553 1.3998829526826739e-03 + + -1.0849200189113617e-01 2.3151659965515137e-01 + <_> + + 0 -1 554 -1.2055980041623116e-02 + + -2.4002850055694580e-01 9.3272961676120758e-02 + <_> + + 0 -1 555 3.1805539038032293e-03 + + 7.6264120638370514e-02 -2.5435069203376770e-01 + <_> + + 0 -1 556 -1.0693799704313278e-03 + + 2.2258889675140381e-01 -9.0730242431163788e-02 + <_> + + 0 -1 557 -2.9467688873410225e-03 + + -3.4242698550224304e-01 6.0581039637327194e-02 + <_> + + 0 -1 558 8.8108901400119066e-04 + + -7.8326202929019928e-02 2.6911988854408264e-01 + <_> + + 0 -1 559 2.8118939371779561e-04 + + 9.8370827734470367e-02 -2.1947909891605377e-01 + <_> + + 0 -1 560 -1.8574869260191917e-02 + + 2.6729720830917358e-01 -7.1240752935409546e-02 + <_> + + 0 -1 561 -2.4810349568724632e-02 + + -6.8322032690048218e-01 2.9446309432387352e-02 + <_> + + 0 -1 562 2.8904930222779512e-03 + + 7.6161012053489685e-02 -2.4025200307369232e-01 + <_> + + 0 -1 563 3.5410430282354355e-03 + + -1.0742089897394180e-01 1.8509419262409210e-01 + <_> + + 0 -1 564 -8.4244477329775691e-04 + + 1.8727229535579681e-01 -1.1407770216464996e-01 + <_> + + 0 -1 565 -2.5338360574096441e-03 + + -3.5870191454887390e-01 5.1251661032438278e-02 + <_> + + 0 -1 566 1.9654980860650539e-03 + + -1.4064720273017883e-01 1.3041019439697266e-01 + <_> + + 0 -1 567 3.1574100255966187e-01 + + 2.9550969600677490e-02 -6.3157892227172852e-01 + <_> + + 0 -1 568 -2.9846638790331781e-04 + + -2.2911080718040466e-01 7.8875422477722168e-02 + <_> + + 0 -1 569 -1.1545480042695999e-01 + + -8.1895941495895386e-01 2.2261450067162514e-02 + <_> + + 0 -1 570 -3.5817299038171768e-02 + + -3.0612939596176147e-01 6.0644190758466721e-02 + <_> + + 0 -1 571 1.7071690410375595e-02 + + -6.1134841293096542e-02 3.2152679562568665e-01 + <_> + + 0 -1 572 -2.1385080181062222e-03 + + -5.4798161983489990e-01 3.8667369633913040e-02 + <_> + + 0 -1 573 6.5424457192420959e-02 + + 1.7884260043501854e-02 -8.5628831386566162e-01 + <_> + + 0 -1 574 -1.3419929891824722e-02 + + 3.0995100736618042e-01 -6.7559666931629181e-02 + <_> + + 0 -1 575 1.8939709290862083e-02 + + 2.8729729354381561e-02 -7.5338190793991089e-01 + <_> + + 0 -1 576 -2.9120460152626038e-02 + + -7.3594617843627930e-01 2.0359549671411514e-02 + <_> + 79 + -1.5920439958572388e+00 + + <_> + + 0 -1 577 -1.3419030234217644e-02 + + 3.0538010597229004e-01 -4.1782331466674805e-01 + <_> + + 0 -1 578 1.7404999816790223e-03 + + -2.7101579308509827e-01 3.5409560799598694e-01 + <_> + + 0 -1 579 7.7174860052764416e-03 + + -3.1271371245384216e-01 2.1189980208873749e-01 + <_> + + 0 -1 580 -1.4514879694615956e-05 + + 1.6157090663909912e-01 -3.3522731065750122e-01 + <_> + + 0 -1 581 -1.4871519852022175e-05 + + 1.4571620523929596e-01 -2.9369521141052246e-01 + <_> + + 0 -1 582 1.5004149463493377e-04 + + -4.0149879455566406e-01 1.0407949984073639e-01 + <_> + + 0 -1 583 1.8634879961609840e-03 + + 4.9062840640544891e-02 -6.5208268165588379e-01 + <_> + + 0 -1 584 -2.9590800404548645e-03 + + 2.8804430365562439e-01 -1.3293409347534180e-01 + <_> + + 0 -1 585 3.3067780896089971e-04 + + 3.9615370333194733e-02 -4.1540861129760742e-01 + <_> + + 0 -1 586 -1.6816710121929646e-03 + + 1.3032579421997070e-01 -2.3237510025501251e-01 + <_> + + 0 -1 587 3.4896740689873695e-03 + + 6.8852916359901428e-02 -4.7176009416580200e-01 + <_> + + 0 -1 588 1.6204500570893288e-03 + + -1.0996960103511810e-01 3.4887188673019409e-01 + <_> + + 0 -1 589 1.9125849939882755e-04 + + -2.0317320525646210e-01 1.4775620400905609e-01 + <_> + + 0 -1 590 2.2485259920358658e-02 + + 5.1929730921983719e-02 -5.4815691709518433e-01 + <_> + + 0 -1 591 1.0035949759185314e-02 + + -1.0943319648504257e-01 2.6000571250915527e-01 + <_> + + 0 -1 592 4.0091630071401596e-02 + + 3.8657050579786301e-02 -7.4724602699279785e-01 + <_> + + 0 -1 593 1.5319019556045532e-02 + + 2.8579369187355042e-02 -7.7717798948287964e-01 + <_> + + 0 -1 594 9.0913427993655205e-04 + + -1.5049549937248230e-01 1.7363379895687103e-01 + <_> + + 0 -1 595 -6.0226190835237503e-03 + + -4.7704491019248962e-01 5.8185670524835587e-02 + <_> + + 0 -1 596 7.8066787682473660e-04 + + -1.6349339485168457e-01 1.6236920654773712e-01 + <_> + + 0 -1 597 -1.1492020450532436e-02 + + -5.6185477972030640e-01 4.6009611338376999e-02 + <_> + + 0 -1 598 8.9691327884793282e-03 + + 6.6570483148097992e-02 -3.3824840188026428e-01 + <_> + + 0 -1 599 7.2241941234096885e-04 + + -1.2882669270038605e-01 1.9002969563007355e-01 + <_> + + 0 -1 600 1.4879239643050823e-05 + + -2.1765929460525513e-01 1.3151009380817413e-01 + <_> + + 0 -1 601 8.7159732356667519e-03 + + 4.8188239336013794e-02 -5.2367717027664185e-01 + <_> + + 0 -1 602 -1.3809900265187025e-03 + + -3.1734630465507507e-01 6.7012362182140350e-02 + <_> + + 0 -1 603 1.4004110358655453e-02 + + -7.2155177593231201e-02 3.4900391101837158e-01 + <_> + + 0 -1 604 -1.2883460149168968e-02 + + -5.9674298763275146e-01 3.9219990372657776e-02 + <_> + + 0 -1 605 9.9220760166645050e-03 + + -7.3617048561573029e-02 3.5491651296615601e-01 + <_> + + 0 -1 606 -1.0360360145568848e-02 + + -4.9655780196189880e-01 5.4516721516847610e-02 + <_> + + 0 -1 607 5.9103948296979070e-04 + + -9.1649092733860016e-02 2.3738409578800201e-01 + <_> + + 0 -1 608 1.4986419955675956e-05 + + -1.5624360740184784e-01 1.4216689765453339e-01 + <_> + + 0 -1 609 6.2526292167603970e-03 + + 4.6570941805839539e-02 -4.3861261010169983e-01 + <_> + + 0 -1 610 9.0722978115081787e-02 + + 2.3544119670987129e-02 -7.5557678937911987e-01 + <_> + + 0 -1 611 1.2880839640274644e-03 + + -1.0999819636344910e-01 1.9954189658164978e-01 + <_> + + 0 -1 612 -5.3202832350507379e-04 + + -2.3681020736694336e-01 9.4349831342697144e-02 + <_> + + 0 -1 613 1.4669039519503713e-03 + + -6.0417938977479935e-02 3.5437929630279541e-01 + <_> + + 0 -1 614 2.5929270312190056e-02 + + 3.0205380171537399e-02 -7.1175122261047363e-01 + <_> + + 0 -1 615 -7.2257839143276215e-02 + + -7.6830059289932251e-01 2.2078540176153183e-02 + <_> + + 0 -1 616 -2.5999830104410648e-03 + + 2.2878250479698181e-01 -9.2575646936893463e-02 + <_> + + 0 -1 617 4.2036110162734985e-01 + + 3.4129150211811066e-02 -6.3944667577743530e-01 + <_> + + 0 -1 618 -2.1722039673477411e-03 + + -2.0458799600601196e-01 9.6727348864078522e-02 + <_> + + 0 -1 619 -1.8573250621557236e-02 + + -7.2321742773056030e-01 2.6587400585412979e-02 + <_> + + 0 -1 620 2.1321140229701996e-03 + + -7.9263173043727875e-02 2.9004418849945068e-01 + <_> + + 0 -1 621 1.4585970347980037e-05 + + -1.5812200307846069e-01 1.2857919931411743e-01 + <_> + + 0 -1 622 -2.5919941067695618e-01 + + -8.3206391334533691e-01 2.1327629685401917e-02 + <_> + + 0 -1 623 -1.2713880278170109e-02 + + -4.8670661449432373e-01 3.5282909870147705e-02 + <_> + + 0 -1 624 2.1182969212532043e-03 + + -4.8141859471797943e-02 4.3498820066452026e-01 + <_> + + 0 -1 625 4.9225408583879471e-03 + + 5.9389010071754456e-02 -3.5719910264015198e-01 + <_> + + 0 -1 626 7.1720690466463566e-03 + + -7.2721220552921295e-02 3.1716778874397278e-01 + <_> + + 0 -1 627 1.5319329686462879e-03 + + 7.6105281710624695e-02 -2.9826408624649048e-01 + <_> + + 0 -1 628 -2.6141680777072906e-02 + + -4.8129829764366150e-01 4.1991200298070908e-02 + <_> + + 0 -1 629 -7.1861818469187710e-06 + + 1.0385909676551819e-01 -2.5540891289710999e-01 + <_> + + 0 -1 630 -5.8513309340924025e-04 + + 2.1552430093288422e-01 -1.0446780174970627e-01 + <_> + + 0 -1 631 7.3564669582992792e-04 + + 8.2850307226181030e-02 -2.3229689896106720e-01 + <_> + + 0 -1 632 -4.4216000242158771e-04 + + 1.9849689304828644e-01 -1.1084359884262085e-01 + <_> + + 0 -1 633 6.6545000299811363e-03 + + 2.9844839125871658e-02 -6.3819402456283569e-01 + <_> + + 0 -1 634 -1.4856060261081439e-05 + + 1.0647810250520706e-01 -1.6304740309715271e-01 + <_> + + 0 -1 635 4.4933347962796688e-03 + + -5.8312181383371353e-02 3.2200211286544800e-01 + <_> + + 0 -1 636 3.8110970053821802e-03 + + 7.1237437427043915e-02 -2.7149480581283569e-01 + <_> + + 0 -1 637 -3.8309019058942795e-02 + + -6.2387478351593018e-01 2.9790399596095085e-02 + <_> + + 0 -1 638 -2.5534629821777344e-03 + + 2.0947620272636414e-01 -9.3472570180892944e-02 + <_> + + 0 -1 639 -2.9908109354437329e-05 + + 1.4771899580955505e-01 -1.2858720123767853e-01 + <_> + + 0 -1 640 2.0549520850181580e-03 + + -9.3603983521461487e-02 2.1911169588565826e-01 + <_> + + 0 -1 641 3.3064800663851202e-04 + + -1.4430660009384155e-01 1.6905060410499573e-01 + <_> + + 0 -1 642 4.0969369001686573e-04 + + 8.9844956994056702e-02 -2.1793210506439209e-01 + <_> + + 0 -1 643 -5.1680381875485182e-04 + + -2.7330860495567322e-01 7.2490707039833069e-02 + <_> + + 0 -1 644 -1.2285299599170685e-02 + + -5.7899951934814453e-01 2.8828129172325134e-02 + <_> + + 0 -1 645 1.4923219569027424e-03 + + -8.9748427271842957e-02 2.1315790712833405e-01 + <_> + + 0 -1 646 3.7809570785611868e-03 + + 5.6869130581617355e-02 -3.2580479979515076e-01 + <_> + + 0 -1 647 -1.3630799949169159e-01 + + -5.1958292722702026e-01 3.4014869481325150e-02 + <_> + + 0 -1 648 2.1192250773310661e-02 + + -5.9815749526023865e-02 4.3134000897407532e-01 + <_> + + 0 -1 649 -2.2501780185848475e-03 + + -3.2725110650062561e-01 6.9494038820266724e-02 + <_> + + 0 -1 650 -1.3309439644217491e-02 + + 5.5684721469879150e-01 -3.8055110722780228e-02 + <_> + + 0 -1 651 -4.8674400895833969e-02 + + 3.7503889203071594e-01 -4.8045299947261810e-02 + <_> + + 0 -1 652 -1.4651560377387796e-05 + + 9.3043543398380280e-02 -2.2984559834003448e-01 + <_> + + 0 -1 653 -7.7605661936104298e-03 + + 3.8858211040496826e-01 -5.4669309407472610e-02 + <_> + + 0 -1 654 2.4429330602288246e-02 + + 4.5898649841547012e-02 -5.1061111688613892e-01 + <_> + + 0 -1 655 -2.1317049686331302e-04 + + -2.0513610541820526e-01 1.0507310181856155e-01 + <_> + 105 + -1.6632529497146606e+00 + + <_> + + 0 -1 656 -5.7014292106032372e-03 + + 2.7576211094856262e-01 -3.3123719692230225e-01 + <_> + + 0 -1 657 -4.4359369203448296e-03 + + 1.5587480366230011e-01 -5.0288617610931396e-01 + <_> + + 0 -1 658 -5.0388257950544357e-03 + + 1.6109010577201843e-01 -3.5196068882942200e-01 + <_> + + 0 -1 659 8.0847437493503094e-04 + + -3.3315700292587280e-01 1.4446459710597992e-01 + <_> + + 0 -1 660 2.1605329588055611e-02 + + -8.6723573505878448e-02 5.9101939201354980e-01 + <_> + + 0 -1 661 -1.8266839906573296e-02 + + 5.0261861085891724e-01 -8.4620863199234009e-02 + <_> + + 0 -1 662 -8.3384668687358499e-04 + + -3.0832511186599731e-01 1.1352760344743729e-01 + <_> + + 0 -1 663 -1.5336600132286549e-02 + + -6.8610608577728271e-01 3.3057838678359985e-02 + <_> + + 0 -1 664 -5.0607877783477306e-03 + + 3.4399279952049255e-01 -9.2118233442306519e-02 + <_> + + 0 -1 665 -1.4741700397280511e-05 + + 1.1778169870376587e-01 -2.5235179066658020e-01 + <_> + + 0 -1 666 -1.1485730065032840e-03 + + -2.9050019383430481e-01 8.3533048629760742e-02 + <_> + + 0 -1 667 2.8824089094996452e-03 + + -9.0674236416816711e-02 3.1274148821830750e-01 + <_> + + 0 -1 668 -2.9224360361695290e-02 + + -6.9156378507614136e-01 3.3279780298471451e-02 + <_> + + 0 -1 669 2.1423520520329475e-03 + + -1.0087729990482330e-01 2.4603089690208435e-01 + <_> + + 0 -1 670 -3.3471059054136276e-02 + + -5.0953942537307739e-01 5.5052071809768677e-02 + <_> + + 0 -1 671 1.4763450053578708e-05 + + -1.7823149263858795e-01 1.2816399335861206e-01 + <_> + + 0 -1 672 1.6341559588909149e-02 + + -1.3254739344120026e-01 1.9663499295711517e-01 + <_> + + 0 -1 673 2.2475779987871647e-03 + + 7.9048447310924530e-02 -2.9476320743560791e-01 + <_> + + 0 -1 674 4.6113221906125546e-03 + + -7.6338447630405426e-02 3.2394409179687500e-01 + <_> + + 0 -1 675 2.8979079797863960e-03 + + -1.0839050263166428e-01 2.6353389024734497e-01 + <_> + + 0 -1 676 1.3482819776982069e-03 + + 7.9134561121463776e-02 -3.4839859604835510e-01 + <_> + + 0 -1 677 4.6576592139899731e-03 + + 7.6356090605258942e-02 -3.1110540032386780e-01 + <_> + + 0 -1 678 -3.9915097877383232e-03 + + -3.4151628613471985e-01 8.2623466849327087e-02 + <_> + + 0 -1 679 6.0268798843026161e-03 + + -9.6277832984924316e-02 2.6347661018371582e-01 + <_> + + 0 -1 680 -4.1388701647520065e-03 + + 2.3571729660034180e-01 -9.4335287809371948e-02 + <_> + + 0 -1 681 -1.0371750220656395e-02 + + -7.2972798347473145e-01 3.3645220100879669e-02 + <_> + + 0 -1 682 1.0373629629611969e-01 + + 3.1347069889307022e-02 -5.8245128393173218e-01 + <_> + + 0 -1 683 -1.8832299974747002e-04 + + 1.6663299500942230e-01 -1.3723160326480865e-01 + <_> + + 0 -1 684 -6.0749921249225736e-04 + + -2.7257540822029114e-01 8.1483371555805206e-02 + <_> + + 0 -1 685 2.3499270901083946e-03 + + -1.0285440087318420e-01 2.1854889392852783e-01 + <_> + + 0 -1 686 -3.1354159582406282e-03 + + -4.9246039986610413e-01 4.4747360050678253e-02 + <_> + + 0 -1 687 1.5564589994028211e-03 + + 5.3096260875463486e-02 -4.0526211261749268e-01 + <_> + + 0 -1 688 6.3236099667847157e-03 + + -7.9116806387901306e-02 2.8413718938827515e-01 + <_> + + 0 -1 689 -4.8074051737785339e-03 + + 2.9990258812904358e-01 -8.2824081182479858e-02 + <_> + + 0 -1 690 7.6432302594184875e-02 + + 3.9146371185779572e-02 -5.7314342260360718e-01 + <_> + + 0 -1 691 7.0249952841550112e-04 + + 5.2832871675491333e-02 -3.3245471119880676e-01 + <_> + + 0 -1 692 -8.2157138967886567e-04 + + -2.1230019629001617e-01 8.8145829737186432e-02 + <_> + + 0 -1 693 -1.0148280300199986e-02 + + -2.2071610391139984e-01 9.6597403287887573e-02 + <_> + + 0 -1 694 -1.7348809540271759e-01 + + -5.9822201728820801e-01 3.2547060400247574e-02 + <_> + + 0 -1 695 4.3031540699303150e-03 + + -6.8253546953201294e-02 2.8981029987335205e-01 + <_> + + 0 -1 696 -7.3378678280278109e-06 + + 7.5155563652515411e-02 -2.5863590836524963e-01 + <_> + + 0 -1 697 1.9277239916846156e-03 + + 1.0856460034847260e-01 -1.6595140099525452e-01 + <_> + + 0 -1 698 -4.2054480873048306e-03 + + 1.9811309874057770e-01 -9.1941706836223602e-02 + <_> + + 0 -1 699 1.1466189753264189e-03 + + 4.2078729718923569e-02 -4.3991029262542725e-01 + <_> + + 0 -1 700 -6.7244949750602245e-03 + + 3.4456861019134521e-01 -5.7096958160400391e-02 + <_> + + 0 -1 701 -1.4554189874615986e-05 + + 1.1632560193538666e-01 -1.6252210736274719e-01 + <_> + + 0 -1 702 -2.6114559732377529e-03 + + 2.8084969520568848e-01 -6.8243041634559631e-02 + <_> + + 0 -1 703 -1.9477460591588169e-04 + + -1.9368860125541687e-01 9.3413226306438446e-02 + <_> + + 0 -1 704 2.6438338682055473e-04 + + 9.9354371428489685e-02 -2.1586629748344421e-01 + <_> + + 0 -1 705 2.0134719088673592e-03 + + -6.1209201812744141e-02 2.9120978713035583e-01 + <_> + + 0 -1 706 -2.6024359464645386e-01 + + -8.3802181482315063e-01 2.1150760352611542e-02 + <_> + + 0 -1 707 -1.5944700688123703e-02 + + -6.3974797725677490e-01 2.2144839167594910e-02 + <_> + + 0 -1 708 6.7249889252707362e-04 + + -1.4014090597629547e-01 1.2326350063085556e-01 + <_> + + 0 -1 709 1.3042770326137543e-02 + + 2.4306889623403549e-02 -6.6303068399429321e-01 + <_> + + 0 -1 710 -1.4540290067088790e-05 + + 9.0137362480163574e-02 -1.7409169673919678e-01 + <_> + + 0 -1 711 1.7920829355716705e-02 + + 2.5644620880484581e-02 -6.5067142248153687e-01 + <_> + + 0 -1 712 1.6542300581932068e-03 + + -1.0385700315237045e-01 1.6688160598278046e-01 + <_> + + 0 -1 713 3.5362090915441513e-02 + + 2.3093009367585182e-02 -6.9009417295455933e-01 + <_> + + 0 -1 714 3.3049840567400679e-05 + + -1.7408940196037292e-01 9.3873098492622375e-02 + <_> + + 0 -1 715 3.3775588963180780e-03 + + -5.8522459119558334e-02 3.0490559339523315e-01 + <_> + + 0 -1 716 7.3239738121628761e-03 + + 4.0999408811330795e-02 -4.6160981059074402e-01 + <_> + + 0 -1 717 -2.9797051101922989e-03 + + 5.1136761903762817e-01 -3.6246869713068008e-02 + <_> + + 0 -1 718 2.0306499209254980e-03 + + 6.5309353172779083e-02 -2.6698499917984009e-01 + <_> + + 0 -1 719 -6.8856950383633375e-04 + + -1.7604120075702667e-01 9.9361896514892578e-02 + <_> + + 0 -1 720 1.5746579738333821e-03 + + -1.0312269628047943e-01 1.6940550506114960e-01 + <_> + + 0 -1 721 1.5011089853942394e-03 + + -8.8128447532653809e-02 1.8899090588092804e-01 + <_> + + 0 -1 722 1.3503979425877333e-04 + + 9.4145476818084717e-02 -1.8483440577983856e-01 + <_> + + 0 -1 723 5.5570588447153568e-03 + + 2.9959060251712799e-02 -5.5482620000839233e-01 + <_> + + 0 -1 724 9.4529995694756508e-03 + + -5.3136389702558517e-02 4.0138289332389832e-01 + <_> + + 0 -1 725 -6.1030662618577480e-04 + + -2.7060449123382568e-01 6.6881351172924042e-02 + <_> + + 0 -1 726 -1.1329240351915359e-01 + + -6.5178507566452026e-01 2.5042990222573280e-02 + <_> + + 0 -1 727 -2.0354389562271535e-04 + + 1.0892420262098312e-01 -1.5174369513988495e-01 + <_> + + 0 -1 728 -1.4983189757913351e-03 + + 2.7388730645179749e-01 -5.8467049151659012e-02 + <_> + + 0 -1 729 7.5277159921824932e-03 + + 4.0991529822349548e-02 -4.2739889025688171e-01 + <_> + + 0 -1 730 3.6209179088473320e-03 + + -6.7309238016605377e-02 2.6064750552177429e-01 + <_> + + 0 -1 731 1.2153049930930138e-02 + + 5.0768271088600159e-02 -3.8319081068038940e-01 + <_> + + 0 -1 732 4.6126339584589005e-02 + + 2.4232989177107811e-02 -6.5039527416229248e-01 + <_> + + 0 -1 733 7.1408541407436132e-04 + + -1.3476370275020599e-01 1.2208549678325653e-01 + <_> + + 0 -1 734 -4.4331620447337627e-03 + + 1.9939610362052917e-01 -1.0218709707260132e-01 + <_> + + 0 -1 735 1.3099729549139738e-03 + + 7.4517026543617249e-02 -2.4503719806671143e-01 + <_> + + 0 -1 736 2.6161450659856200e-04 + + -8.4287956357002258e-02 1.9924600422382355e-01 + <_> + + 0 -1 737 -2.7577539440244436e-03 + + -6.8734467029571533e-01 2.4851109832525253e-02 + <_> + + 0 -1 738 6.9469690322875977e-02 + + 3.8438729941844940e-02 -3.9717179536819458e-01 + <_> + + 0 -1 739 -1.3031469425186515e-03 + + 2.0089949667453766e-01 -9.1723307967185974e-02 + <_> + + 0 -1 740 1.3012000126764178e-03 + + -9.5305852591991425e-02 1.9248190522193909e-01 + <_> + + 0 -1 741 -3.9377259090542793e-03 + + -3.9224091172218323e-01 4.3738011270761490e-02 + <_> + + 0 -1 742 9.6125707030296326e-02 + + -4.3269440531730652e-02 3.7441849708557129e-01 + <_> + + 0 -1 743 -1.9181859493255615e-01 + + -6.1320561170578003e-01 2.8775539249181747e-02 + <_> + + 0 -1 744 -3.2945619896054268e-03 + + -2.2446820139884949e-01 7.7655017375946045e-02 + <_> + + 0 -1 745 -8.5190916433930397e-03 + + 4.4720551371574402e-01 -4.1310388594865799e-02 + <_> + + 0 -1 746 -4.9431469291448593e-02 + + -5.1819682121276855e-01 3.6863740533590317e-02 + <_> + + 0 -1 747 2.3110879585146904e-02 + + -3.3078420907258987e-02 5.9146630764007568e-01 + <_> + + 0 -1 748 -4.3400399590609595e-05 + + 1.1395029723644257e-01 -1.9526299834251404e-01 + <_> + + 0 -1 749 5.4926839657127857e-03 + + 6.1616070568561554e-02 -2.5591990351676941e-01 + <_> + + 0 -1 750 1.1886029969900846e-03 + + -6.8509116768836975e-02 2.4291250109672546e-01 + <_> + + 0 -1 751 8.8473428040742874e-03 + + 7.6467283070087433e-02 -2.3176389932632446e-01 + <_> + + 0 -1 752 2.3952820338308811e-03 + + -4.4620860368013382e-02 4.5811769366264343e-01 + <_> + + 0 -1 753 -1.5011220239102840e-04 + + -1.6560749709606171e-01 1.0622239857912064e-01 + <_> + + 0 -1 754 -2.3465899750590324e-02 + + -2.4931310117244720e-01 6.6179357469081879e-02 + <_> + + 0 -1 755 -6.6368370316922665e-03 + + 1.4358420670032501e-01 -1.1510509997606277e-01 + <_> + + 0 -1 756 1.1986029567196965e-03 + + -9.8347522318363190e-02 1.7605540156364441e-01 + <_> + + 0 -1 757 7.9502072185277939e-03 + + 3.5481378436088562e-02 -5.0176638364791870e-01 + <_> + + 0 -1 758 -4.5950649655424058e-04 + + -1.6928760707378387e-01 9.3400083482265472e-02 + <_> + + 0 -1 759 -1.9301069900393486e-02 + + 4.1836661100387573e-01 -5.1140110939741135e-02 + <_> + + 0 -1 760 4.0163499116897583e-01 + + 2.9358919709920883e-02 -6.4768058061599731e-01 + <_> + 114 + -1.5384509563446045e+00 + + <_> + + 0 -1 761 -3.6284290254116058e-02 + + 4.2841899394989014e-01 -2.5840431451797485e-01 + <_> + + 0 -1 762 3.0520830303430557e-02 + + -2.9715040326118469e-01 2.1756610274314880e-01 + <_> + + 0 -1 763 3.3444820437580347e-03 + + -2.1734359860420227e-01 1.9754439592361450e-01 + <_> + + 0 -1 764 -1.3315919786691666e-03 + + 1.5535929799079895e-01 -2.3133680224418640e-01 + <_> + + 0 -1 765 -1.9773480016738176e-03 + + -4.2001301050186157e-01 8.8554427027702332e-02 + <_> + + 0 -1 766 -3.7038238951936364e-04 + + 1.2769789993762970e-01 -2.3879130184650421e-01 + <_> + + 0 -1 767 -7.3736459016799927e-03 + + -4.0720060467720032e-01 2.9765319079160690e-02 + <_> + + 0 -1 768 -2.1873020159546286e-05 + + 1.2338209897279739e-01 -2.2237089276313782e-01 + <_> + + 0 -1 769 4.5575048716273159e-05 + + -2.3092910647392273e-01 1.2953619658946991e-01 + <_> + + 0 -1 770 -1.1247170157730579e-02 + + -5.4762738943099976e-01 4.1907660663127899e-02 + <_> + + 0 -1 771 -8.9430268853902817e-03 + + 2.7945289015769958e-01 -9.0801216661930084e-02 + <_> + + 0 -1 772 1.4646670024376363e-05 + + -1.6777880489826202e-01 1.4968040585517883e-01 + <_> + + 0 -1 773 -6.5398351289331913e-03 + + 3.3654621243476868e-01 -7.1987256407737732e-02 + <_> + + 0 -1 774 3.3825531136244535e-03 + + 4.9931880086660385e-02 -4.5806300640106201e-01 + <_> + + 0 -1 775 2.7450500056147575e-03 + + 3.6119509488344193e-02 -5.7113862037658691e-01 + <_> + + 0 -1 776 1.0356379672884941e-02 + + -5.3049158304929733e-02 4.2121198773384094e-01 + <_> + + 0 -1 777 3.1687319278717041e-03 + + 6.2849938869476318e-02 -3.4674918651580811e-01 + <_> + + 0 -1 778 1.3616570504382253e-03 + + -9.0661056339740753e-02 2.5257480144500732e-01 + <_> + + 0 -1 779 -2.2238260135054588e-03 + + 2.6595190167427063e-01 -9.6649080514907837e-02 + <_> + + 0 -1 780 1.1090899817645550e-02 + + 8.6638063192367554e-02 -3.0103358626365662e-01 + <_> + + 0 -1 781 -6.7766150459647179e-04 + + 9.4277828931808472e-02 -2.1464149653911591e-01 + <_> + + 0 -1 782 -3.3104580361396074e-03 + + -5.9162640571594238e-01 3.2738488167524338e-02 + <_> + + 0 -1 783 2.3221869487315416e-03 + + -9.5557250082492828e-02 2.0546199381351471e-01 + <_> + + 0 -1 784 3.0947118648327887e-04 + + -1.2992270290851593e-01 1.7704719305038452e-01 + <_> + + 0 -1 785 -3.2214168459177017e-02 + + -6.4662492275238037e-01 3.1749259680509567e-02 + <_> + + 0 -1 786 -8.3192758029326797e-04 + + -3.0666750669479370e-01 6.1040591448545456e-02 + <_> + + 0 -1 787 3.9188290247693658e-04 + + -1.5795469284057617e-01 1.1830350011587143e-01 + <_> + + 0 -1 788 -3.6203738301992416e-02 + + -2.2731229662895203e-01 8.3183012902736664e-02 + <_> + + 0 -1 789 2.6437509804964066e-03 + + -7.6691061258316040e-02 2.3545509576797485e-01 + <_> + + 0 -1 790 -3.4368310589343309e-03 + + 3.6057031154632568e-01 -7.3672987520694733e-02 + <_> + + 0 -1 791 -5.5921601597219706e-04 + + -2.5343179702758789e-01 7.8275643289089203e-02 + <_> + + 0 -1 792 4.3010139052057639e-05 + + -1.8223099410533905e-01 9.7539380192756653e-02 + <_> + + 0 -1 793 5.3192679770290852e-03 + + -7.6901949942111969e-02 2.4221810698509216e-01 + <_> + + 0 -1 794 -6.9484501145780087e-03 + + -5.8275872468948364e-01 3.4601949155330658e-02 + <_> + + 0 -1 795 1.2447779998183250e-02 + + 2.3883659392595291e-02 -6.1712497472763062e-01 + <_> + + 0 -1 796 1.0083100060001016e-03 + + -7.5152181088924408e-02 2.4744270741939545e-01 + <_> + + 0 -1 797 -2.3544009309262037e-03 + + 3.1459400057792664e-01 -6.5026231110095978e-02 + <_> + + 0 -1 798 4.5676861191168427e-04 + + 7.9758197069168091e-02 -2.3777219653129578e-01 + <_> + + 0 -1 799 6.6723190248012543e-03 + + 3.8779199123382568e-02 -4.6045419573783875e-01 + <_> + + 0 -1 800 7.1861818469187710e-06 + + -1.3110539317131042e-01 1.2532530725002289e-01 + <_> + + 0 -1 801 3.0392590910196304e-02 + + 2.9670530930161476e-02 -5.3870928287506104e-01 + <_> + + 0 -1 802 1.4835850379313342e-05 + + -1.5778580307960510e-01 1.0566859692335129e-01 + <_> + + 0 -1 803 1.4415860176086426e-02 + + -7.6271347701549530e-02 3.0597710609436035e-01 + <_> + + 0 -1 804 3.2787520904093981e-03 + + 4.4464308768510818e-02 -3.8928028941154480e-01 + <_> + + 0 -1 805 1.0770520195364952e-02 + + -3.9324011653661728e-02 4.1493979096412659e-01 + <_> + + 0 -1 806 5.4678268497809768e-04 + + 5.8721691370010376e-02 -2.7546930313110352e-01 + <_> + + 0 -1 807 -1.8106499919667840e-03 + + 1.8281750380992889e-01 -9.3675427138805389e-02 + <_> + + 0 -1 808 1.1771249771118164e-01 + + 2.3175759240984917e-02 -7.0696681737899780e-01 + <_> + + 0 -1 809 -3.1166549888439476e-04 + + -2.0585930347442627e-01 7.6573841273784637e-02 + <_> + + 0 -1 810 -9.7939418628811836e-03 + + 4.8732680082321167e-01 -3.4746028482913971e-02 + <_> + + 0 -1 811 1.0002780472859740e-03 + + -1.1003620177507401e-01 1.5490560233592987e-01 + <_> + + 0 -1 812 6.9929230958223343e-03 + + 3.2923609018325806e-02 -5.4326117038726807e-01 + <_> + + 0 -1 813 3.4163020551204681e-02 + + 1.8062820658087730e-02 -7.0809149742126465e-01 + <_> + + 0 -1 814 -2.0808410644531250e-01 + + -6.7879611253738403e-01 2.0255820825695992e-02 + <_> + + 0 -1 815 2.4889659835025668e-04 + + -1.7719520628452301e-01 8.8152356445789337e-02 + <_> + + 0 -1 816 -9.3355607241392136e-03 + + 1.7948059737682343e-01 -9.4474621117115021e-02 + <_> + + 0 -1 817 2.9192469082772732e-04 + + -1.3786169886589050e-01 1.3819259405136108e-01 + <_> + + 0 -1 818 9.1989226639270782e-03 + + -1.0269109904766083e-01 1.7618100345134735e-01 + <_> + + 0 -1 819 6.8165437551215291e-04 + + 7.4821308255195618e-02 -2.3621830344200134e-01 + <_> + + 0 -1 820 -1.4507620107906405e-05 + + 9.5861770212650299e-02 -1.7785739898681641e-01 + <_> + + 0 -1 821 1.7662490427028388e-04 + + -1.3805359601974487e-01 1.3394320011138916e-01 + <_> + + 0 -1 822 -1.7513500060886145e-03 + + 7.7623583376407623e-02 -2.3174029588699341e-01 + <_> + + 0 -1 823 5.1342020742595196e-03 + + 3.0363969504833221e-02 -5.2420848608016968e-01 + <_> + + 0 -1 824 9.4114318490028381e-03 + + -5.8994568884372711e-02 3.0291381478309631e-01 + <_> + + 0 -1 825 -1.0448819957673550e-03 + + -1.7124690115451813e-01 1.0156030207872391e-01 + <_> + + 0 -1 826 -6.3579198904335499e-03 + + 3.1986710429191589e-01 -5.0694450736045837e-02 + <_> + + 0 -1 827 -6.3502117991447449e-03 + + -5.2413272857666016e-01 3.1800068914890289e-02 + <_> + + 0 -1 828 1.2251759879291058e-02 + + 1.6559680923819542e-02 -7.9422187805175781e-01 + <_> + + 0 -1 829 -1.4000720344483852e-02 + + -5.4444402456283569e-01 2.4652559310197830e-02 + <_> + + 0 -1 830 1.9229920580983162e-03 + + -7.6944977045059204e-02 2.1888209879398346e-01 + <_> + + 0 -1 831 -3.4030789975076914e-03 + + 3.0143401026725769e-01 -5.8023329824209213e-02 + <_> + + 0 -1 832 -2.7728609740734100e-02 + + -5.6704998016357422e-01 3.0071720480918884e-02 + <_> + + 0 -1 833 1.4990579802542925e-04 + + 9.1404616832733154e-02 -1.6989429295063019e-01 + <_> + + 0 -1 834 -1.4532960449287202e-05 + + 1.0442660003900528e-01 -1.3983349502086639e-01 + <_> + + 0 -1 835 2.8315950185060501e-02 + + 1.7812129110097885e-02 -8.1201279163360596e-01 + <_> + + 0 -1 836 -1.7363600200042129e-03 + + 1.9688630104064941e-01 -7.6398819684982300e-02 + <_> + + 0 -1 837 -2.2081490606069565e-02 + + 4.4497510790824890e-01 -3.3445868641138077e-02 + <_> + + 0 -1 838 1.2189210392534733e-03 + + 4.9154780805110931e-02 -3.7790310382843018e-01 + <_> + + 0 -1 839 -5.4838892538100481e-04 + + -2.2823029756546021e-01 8.0446496605873108e-02 + <_> + + 0 -1 840 -9.3702552840113640e-04 + + 2.5258961319923401e-01 -6.5389201045036316e-02 + <_> + + 0 -1 841 1.2496720068156719e-02 + + 3.8215879350900650e-02 -4.0465530753135681e-01 + <_> + + 0 -1 842 -1.6764370724558830e-02 + + -1.4508719742298126e-01 1.2119810283184052e-01 + <_> + + 0 -1 843 5.6504327803850174e-03 + + -8.7139137089252472e-02 2.2194419801235199e-01 + <_> + + 0 -1 844 5.2610319107770920e-04 + + 8.7222076952457428e-02 -2.0502470433712006e-01 + <_> + + 0 -1 845 1.5574200078845024e-03 + + -1.7036689817905426e-01 9.4435282051563263e-02 + <_> + + 0 -1 846 2.5609090924263000e-01 + + 1.7790110781788826e-02 -7.4050921201705933e-01 + <_> + + 0 -1 847 3.3561999443918467e-03 + + -4.2667269706726074e-02 3.7573391199111938e-01 + <_> + + 0 -1 848 4.7072928398847580e-02 + + 3.2015219330787659e-02 -6.4522278308868408e-01 + <_> + + 0 -1 849 -2.2168930154293776e-03 + + 2.0757040381431580e-01 -7.7372692525386810e-02 + <_> + + 0 -1 850 5.0796428695321083e-03 + + 4.1829328984022141e-02 -3.7722969055175781e-01 + <_> + + 0 -1 851 7.0120906457304955e-05 + + 8.1031888723373413e-02 -1.8506260216236115e-01 + <_> + + 0 -1 852 -5.2204862004145980e-04 + + 1.2528459727764130e-01 -1.3090319931507111e-01 + <_> + + 0 -1 853 -6.1609707772731781e-03 + + 3.1177788972854614e-01 -5.1252178847789764e-02 + <_> + + 0 -1 854 -2.8424879908561707e-01 + + -7.0340508222579956e-01 2.2811079397797585e-02 + <_> + + 0 -1 855 -4.1746720671653748e-02 + + -7.8914260864257812e-01 1.6686350107192993e-02 + <_> + + 0 -1 856 -1.0051350109279156e-03 + + -2.2181299328804016e-01 6.1887398362159729e-02 + <_> + + 0 -1 857 -1.3900640187785029e-03 + + 1.8797479569911957e-01 -7.6582401990890503e-02 + <_> + + 0 -1 858 -4.0118378819897771e-04 + + -1.7291170358657837e-01 8.6806759238243103e-02 + <_> + + 0 -1 859 -2.9202610676293261e-05 + + 9.2319779098033905e-02 -1.7136460542678833e-01 + <_> + + 0 -1 860 -2.6532830670475960e-03 + + 3.9422848820686340e-01 -3.9826449006795883e-02 + <_> + + 0 -1 861 -7.8933471813797951e-03 + + -4.3326890468597412e-01 3.6603361368179321e-02 + <_> + + 0 -1 862 8.7933447211980820e-03 + + -3.3205948770046234e-02 4.8740789294242859e-01 + <_> + + 0 -1 863 1.2014759704470634e-02 + + 2.2244220599532127e-02 -8.1597268581390381e-01 + <_> + + 0 -1 864 2.1147020161151886e-03 + + 6.4942933619022369e-02 -2.0959229767322540e-01 + <_> + + 0 -1 865 -9.9916034378111362e-04 + + 1.5402349829673767e-01 -1.0149469971656799e-01 + <_> + + 0 -1 866 -7.6499581336975098e-04 + + 2.0236450433731079e-01 -7.1199662983417511e-02 + <_> + + 0 -1 867 -4.2193511035293341e-04 + + 1.1521430313587189e-01 -1.2845459580421448e-01 + <_> + + 0 -1 868 -4.1548791341483593e-04 + + -2.1168529987335205e-01 7.0376142859458923e-02 + <_> + + 0 -1 869 1.5300279483199120e-03 + + 6.1263758689165115e-02 -2.2269320487976074e-01 + <_> + + 0 -1 870 -2.6573969516903162e-03 + + 3.8462328910827637e-01 -3.8276020437479019e-02 + <_> + + 0 -1 871 -2.1988600492477417e-01 + + -5.1546782255172729e-01 2.8099389746785164e-02 + <_> + + 0 -1 872 -8.7377207819372416e-04 + + 1.0149329900741577e-01 -1.3990689814090729e-01 + <_> + + 0 -1 873 7.5169820338487625e-03 + + -6.1671640723943710e-02 2.5486430525779724e-01 + <_> + + 0 -1 874 -1.3438290625344962e-04 + + -1.6618040204048157e-01 8.8938876986503601e-02 + <_> + 117 + -1.5079799890518188e+00 + + <_> + + 0 -1 875 3.5007519181817770e-03 + + -2.8256690502166748e-01 3.3628109097480774e-01 + <_> + + 0 -1 876 4.1042729280889034e-03 + + -1.5877629816532135e-01 3.4091961383819580e-01 + <_> + + 0 -1 877 9.8724407143890858e-04 + + -4.6094760298728943e-01 1.1771719902753830e-01 + <_> + + 0 -1 878 -4.0168981067836285e-03 + + 1.3994920253753662e-01 -3.8476601243019104e-01 + <_> + + 0 -1 879 -4.2784500867128372e-02 + + 3.1519949436187744e-01 -1.1673810333013535e-01 + <_> + + 0 -1 880 -5.6273501832038164e-04 + + 8.2315109670162201e-02 -3.3594700694084167e-01 + <_> + + 0 -1 881 -4.3416650441940874e-05 + + 1.0691779851913452e-01 -2.5068029761314392e-01 + <_> + + 0 -1 882 1.5347570180892944e-02 + + 9.7383828833699226e-03 -6.4612430334091187e-01 + <_> + + 0 -1 883 1.8295480404049158e-03 + + 8.9164443314075470e-02 -2.9637640714645386e-01 + <_> + + 0 -1 884 3.2098879455588758e-04 + + -2.3136790096759796e-01 1.1478479951620102e-01 + <_> + + 0 -1 885 1.0728760389611125e-03 + + -1.2982189655303955e-01 1.9653689861297607e-01 + <_> + + 0 -1 886 -4.9566011875867844e-03 + + 3.5313999652862549e-01 -7.6989777386188507e-02 + <_> + + 0 -1 887 -1.6319400165230036e-03 + + -2.3701989650726318e-01 1.0319659858942032e-01 + <_> + + 0 -1 888 1.9862050190567970e-02 + + 5.9187598526477814e-02 -4.0955111384391785e-01 + <_> + + 0 -1 889 -9.5205483958125114e-03 + + 3.9061769843101501e-01 -5.7647578418254852e-02 + <_> + + 0 -1 890 -1.0885810479521751e-03 + + -5.2902680635452271e-01 4.4961001724004745e-02 + <_> + + 0 -1 891 3.5348529927432537e-03 + + -9.2707537114620209e-02 2.4449980258941650e-01 + <_> + + 0 -1 892 5.7174800895154476e-03 + + 5.7306189090013504e-02 -3.9878991246223450e-01 + <_> + + 0 -1 893 -1.4010589802637696e-03 + + 1.0757780075073242e-01 -1.9520820677280426e-01 + <_> + + 0 -1 894 -2.2306239698082209e-03 + + -6.1328327655792236e-01 2.7875339612364769e-02 + <_> + + 0 -1 895 -5.0583072006702423e-03 + + -5.4739731550216675e-01 3.0482530593872070e-02 + <_> + + 0 -1 896 1.3725720345973969e-01 + + 2.8162300586700439e-02 -6.0817748308181763e-01 + <_> + + 0 -1 897 3.7828299682587385e-03 + + -1.2640979886054993e-01 1.3382309675216675e-01 + <_> + + 0 -1 898 -1.0629029944539070e-02 + + -1.7343379557132721e-01 9.9954582750797272e-02 + <_> + + 0 -1 899 5.6623672135174274e-03 + + -5.2419230341911316e-02 3.2940819859504700e-01 + <_> + + 0 -1 900 -4.5901038683950901e-03 + + 1.8784660100936890e-01 -9.2681042850017548e-02 + <_> + + 0 -1 901 7.1088741533458233e-03 + + 3.2605409622192383e-02 -5.7968139648437500e-01 + <_> + + 0 -1 902 -1.9310249481350183e-03 + + -2.8707239031791687e-01 5.8658700436353683e-02 + <_> + + 0 -1 903 3.5559700336307287e-03 + + -6.2841393053531647e-02 3.0232760310173035e-01 + <_> + + 0 -1 904 2.1007249597460032e-04 + + -1.2029449641704559e-01 2.0722889900207520e-01 + <_> + + 0 -1 905 3.0181880574673414e-03 + + 4.2764421552419662e-02 -4.5567208528518677e-01 + <_> + + 0 -1 906 -2.0919379312545061e-03 + + -5.8067041635513306e-01 2.4772390723228455e-02 + <_> + + 0 -1 907 4.9380292184650898e-03 + + -6.7825779318809509e-02 2.6715460419654846e-01 + <_> + + 0 -1 908 1.0227119782939553e-03 + + -1.1050579696893692e-01 1.7136010527610779e-01 + <_> + + 0 -1 909 -9.1216713190078735e-02 + + -5.5617409944534302e-01 3.1176509335637093e-02 + <_> + + 0 -1 910 1.9377609714865685e-03 + + 5.2470069378614426e-02 -3.3402100205421448e-01 + <_> + + 0 -1 911 -4.5235231518745422e-03 + + -3.8628038763999939e-01 4.4883530586957932e-02 + <_> + + 0 -1 912 1.1070469627156854e-03 + + -9.4648011028766632e-02 1.7694370448589325e-01 + <_> + + 0 -1 913 -1.4522889629006386e-02 + + -4.4854640960693359e-01 4.0654070675373077e-02 + <_> + + 0 -1 914 2.0895639434456825e-02 + + 3.5988390445709229e-02 -4.4317048788070679e-01 + <_> + + 0 -1 915 7.3273790803796146e-06 + + -1.9736979901790619e-01 8.8131763041019440e-02 + <_> + + 0 -1 916 -1.4750339687452652e-05 + + 8.8203012943267822e-02 -1.9387699663639069e-01 + <_> + + 0 -1 917 1.0160019621253014e-02 + + -7.3683522641658783e-02 2.7725589275360107e-01 + <_> + + 0 -1 918 1.4658429790870287e-05 + + -1.3514040410518646e-01 1.1165390163660049e-01 + <_> + + 0 -1 919 2.9789519030600786e-03 + + -5.6356389075517654e-02 2.9033899307250977e-01 + <_> + + 0 -1 920 6.7907930351793766e-03 + + -5.5468060076236725e-02 2.9650750756263733e-01 + <_> + + 0 -1 921 3.5746619105339050e-02 + + 4.4232271611690521e-02 -3.7943100929260254e-01 + <_> + + 0 -1 922 -8.6023868061602116e-04 + + -2.5524240732192993e-01 6.3983328640460968e-02 + <_> + + 0 -1 923 -3.2749359961599112e-03 + + 5.1642370223999023e-01 -3.0802410095930099e-02 + <_> + + 0 -1 924 -1.4287419617176056e-04 + + -1.7014829814434052e-01 9.0200550854206085e-02 + <_> + + 0 -1 925 -5.9252060949802399e-02 + + 4.4787400960922241e-01 -3.4802999347448349e-02 + <_> + + 0 -1 926 4.9169741570949554e-02 + + 4.3797228485345840e-02 -3.9337700605392456e-01 + <_> + + 0 -1 927 2.4047859478741884e-03 + + -8.5982158780097961e-02 1.7597770690917969e-01 + <_> + + 0 -1 928 -8.8569998741149902e-02 + + -2.9694429039955139e-01 5.6752521544694901e-02 + <_> + + 0 -1 929 3.5266599152237177e-03 + + -5.4160539060831070e-02 3.2359990477561951e-01 + <_> + + 0 -1 930 -1.4674359590571839e-05 + + 1.0095299780368805e-01 -1.7195940017700195e-01 + <_> + + 0 -1 931 -1.0672880336642265e-02 + + -3.9103358983993530e-01 3.9687499403953552e-02 + <_> + + 0 -1 932 -1.3177569955587387e-02 + + 2.7460250258445740e-01 -5.5524408817291260e-02 + <_> + + 0 -1 933 -2.0427990239113569e-03 + + -3.2616940140724182e-01 5.1151938736438751e-02 + <_> + + 0 -1 934 2.5430709123611450e-02 + + 3.4412149339914322e-02 -3.9120680093765259e-01 + <_> + + 0 -1 935 6.6575622186064720e-03 + + -6.2124639749526978e-02 2.5493910908699036e-01 + <_> + + 0 -1 936 -2.4922629818320274e-02 + + -7.5617647171020508e-01 2.0520050078630447e-02 + <_> + + 0 -1 937 6.4869478344917297e-02 + + 1.3535760343074799e-02 -8.5182607173919678e-01 + <_> + + 0 -1 938 -1.9129139836877584e-03 + + -2.0609579980373383e-01 6.8809613585472107e-02 + <_> + + 0 -1 939 -2.7280850335955620e-03 + + 1.3853220641613007e-01 -1.1308959871530533e-01 + <_> + + 0 -1 940 3.9647668600082397e-03 + + -8.5980050265789032e-02 1.8867929279804230e-01 + <_> + + 0 -1 941 8.6866566562093794e-05 + + -1.3409359753131866e-01 1.1543890088796616e-01 + <_> + + 0 -1 942 -1.0680439881980419e-03 + + 2.4043959379196167e-01 -5.9584230184555054e-02 + <_> + + 0 -1 943 6.4973197877407074e-03 + + 3.5721741616725922e-02 -4.3827891349792480e-01 + <_> + + 0 -1 944 3.3825050923041999e-04 + + 7.5188770890235901e-02 -1.9240869581699371e-01 + <_> + + 0 -1 945 2.4638089817017317e-03 + + -3.8108248263597488e-02 4.1398531198501587e-01 + <_> + + 0 -1 946 7.1629788726568222e-04 + + 6.7675560712814331e-02 -2.3129940032958984e-01 + <_> + + 0 -1 947 -1.1354340240359306e-03 + + 1.6413919627666473e-01 -9.8224140703678131e-02 + <_> + + 0 -1 948 -4.6024488983675838e-04 + + 7.8879103064537048e-02 -1.8191289901733398e-01 + <_> + + 0 -1 949 -8.1474315375089645e-03 + + -1.8627829849720001e-01 7.7696673572063446e-02 + <_> + + 0 -1 950 -3.3882331103086472e-02 + + 4.1818460822105408e-01 -4.0109351277351379e-02 + <_> + + 0 -1 951 -4.3395790271461010e-03 + + 1.8961839377880096e-01 -8.3509556949138641e-02 + <_> + + 0 -1 952 2.4691419675946236e-03 + + 4.3756991624832153e-02 -3.8284140825271606e-01 + <_> + + 0 -1 953 8.7688177824020386e-02 + + 2.3466430604457855e-02 -5.9991317987442017e-01 + <_> + + 0 -1 954 7.1277258939517196e-06 + + -1.4574949443340302e-01 9.4181038439273834e-02 + <_> + + 0 -1 955 -2.2863550111651421e-03 + + 2.2176849842071533e-01 -6.2630541622638702e-02 + <_> + + 0 -1 956 -1.4718780221301131e-05 + + 1.1210440099239349e-01 -1.3407769799232483e-01 + <_> + + 0 -1 957 2.9124629218131304e-03 + + -6.1113931238651276e-02 2.6921069622039795e-01 + <_> + + 0 -1 958 -7.2532321792095900e-04 + + -1.8317590653896332e-01 9.0204723179340363e-02 + <_> + + 0 -1 959 -1.7109309555962682e-03 + + -2.9150980710983276e-01 5.6865800172090530e-02 + <_> + + 0 -1 960 3.5050138831138611e-02 + + 2.4259999394416809e-02 -5.9926068782806396e-01 + <_> + + 0 -1 961 2.5119259953498840e-02 + + -4.6499390155076981e-02 3.3078059554100037e-01 + <_> + + 0 -1 962 1.3924979604780674e-02 + + 5.4394099861383438e-02 -3.2431459426879883e-01 + <_> + + 0 -1 963 1.2507860083132982e-03 + + -8.6275100708007812e-02 1.6083979606628418e-01 + <_> + + 0 -1 964 3.2347340602427721e-03 + + 4.0214668959379196e-02 -3.3414369821548462e-01 + <_> + + 0 -1 965 2.3993090726435184e-03 + + -3.6099448800086975e-02 4.0332961082458496e-01 + <_> + + 0 -1 966 -6.4468860626220703e-02 + + -9.2355471849441528e-01 1.7104439437389374e-02 + <_> + + 0 -1 967 2.6983879506587982e-02 + + -4.1323971003293991e-02 3.8095420598983765e-01 + <_> + + 0 -1 968 -1.4244250451156404e-05 + + 9.8453678190708160e-02 -1.3854749500751495e-01 + <_> + + 0 -1 969 3.6304299719631672e-03 + + 2.2532820701599121e-02 -5.7740187644958496e-01 + <_> + + 0 -1 970 -2.7509450446814299e-03 + + 2.8656649589538574e-01 -4.9012679606676102e-02 + <_> + + 0 -1 971 3.4084690269082785e-03 + + 3.8566160947084427e-02 -3.5187271237373352e-01 + <_> + + 0 -1 972 -2.0442469976842403e-03 + + 1.5499830245971680e-01 -8.1280998885631561e-02 + <_> + + 0 -1 973 -3.3763761166483164e-04 + + -1.8969820439815521e-01 7.3497541248798370e-02 + <_> + + 0 -1 974 -1.9649739842861891e-03 + + 2.4030299484729767e-01 -5.3698450326919556e-02 + <_> + + 0 -1 975 2.6115038781426847e-04 + + -1.0585899651050568e-01 1.4551800489425659e-01 + <_> + + 0 -1 976 -2.4496200494468212e-03 + + -3.3511948585510254e-01 4.3949641287326813e-02 + <_> + + 0 -1 977 2.5791170075535774e-02 + + 1.9443970173597336e-02 -6.3135677576065063e-01 + <_> + + 0 -1 978 -1.7996380338445306e-03 + + 1.5620160102844238e-01 -8.9669622480869293e-02 + <_> + + 0 -1 979 -5.5190739221870899e-03 + + 3.8429600000381470e-01 -3.9308220148086548e-02 + <_> + + 0 -1 980 9.3076081248000264e-04 + + 5.3146060556173325e-02 -2.7482900023460388e-01 + <_> + + 0 -1 981 2.7754770126193762e-03 + + -5.3488280624151230e-02 2.4878840148448944e-01 + <_> + + 0 -1 982 1.9387940410524607e-03 + + 7.5177863240242004e-02 -1.9432419538497925e-01 + <_> + + 0 -1 983 -4.0069930255413055e-03 + + -2.7330648899078369e-01 6.2000360339879990e-02 + <_> + + 0 -1 984 7.4540930800139904e-03 + + -5.0977949053049088e-02 2.7055469155311584e-01 + <_> + + 0 -1 985 -1.6338729765266180e-03 + + 1.0920850187540054e-01 -1.4821110665798187e-01 + <_> + + 0 -1 986 -1.1626870185136795e-01 + + -9.4307368993759155e-01 1.4511439949274063e-02 + <_> + + 0 -1 987 -1.2051310390233994e-02 + + -3.0964991450309753e-01 3.7726309150457382e-02 + <_> + + 0 -1 988 1.5592000447213650e-02 + + -3.8526348769664764e-02 3.6706140637397766e-01 + <_> + + 0 -1 989 -1.1198739521205425e-03 + + -1.4644260704517365e-01 9.6057042479515076e-02 + <_> + + 0 -1 990 -1.4623399692936800e-05 + + 1.0641819983720779e-01 -1.3394460082054138e-01 + <_> + + 0 -1 991 -1.0319639742374420e-01 + + -7.0196557044982910e-01 1.8891770392656326e-02 + <_> + 121 + -1.4499469995498657e+00 + + <_> + + 0 -1 992 -3.7469431757926941e-02 + + 2.9079249501228333e-01 -3.5205191373825073e-01 + <_> + + 0 -1 993 4.0861819870769978e-03 + + -2.9098600149154663e-01 1.8445029854774475e-01 + <_> + + 0 -1 994 -9.2446897178888321e-04 + + 1.1087530106306076e-01 -4.1064518690109253e-01 + <_> + + 0 -1 995 8.5803697584196925e-04 + + -2.2129820287227631e-01 1.5465059876441956e-01 + <_> + + 0 -1 996 2.3659599537495524e-04 + + -3.2185178995132446e-01 1.1183690279722214e-01 + <_> + + 0 -1 997 -3.5021029412746429e-02 + + 2.2721460461616516e-01 -1.4156529307365417e-01 + <_> + + 0 -1 998 -3.4688229206949472e-03 + + -4.0247380733489990e-01 4.3791528791189194e-02 + <_> + + 0 -1 999 5.0372090190649033e-03 + + -1.2387280166149139e-01 2.2701320052146912e-01 + <_> + + 0 -1 1000 -1.1929610045626760e-03 + + -4.8692488670349121e-01 5.2568510174751282e-02 + <_> + + 0 -1 1001 9.5561221241950989e-03 + + -4.6204000711441040e-02 5.1149028539657593e-01 + <_> + + 0 -1 1002 1.1109219631180167e-03 + + 4.5496881008148193e-02 -4.5278310775756836e-01 + <_> + + 0 -1 1003 5.7835641200654209e-05 + + -1.5641710162162781e-01 1.3276909291744232e-01 + <_> + + 0 -1 1004 -9.4595848349854350e-04 + + -2.8471308946609497e-01 6.4549557864665985e-02 + <_> + + 0 -1 1005 8.8587577920407057e-04 + + 6.5990276634693146e-02 -3.2505878806114197e-01 + <_> + + 0 -1 1006 2.1180589683353901e-03 + + -7.1820907294750214e-02 3.3132740855216980e-01 + <_> + + 0 -1 1007 -1.6004469245672226e-02 + + -4.9266660213470459e-01 3.5758759826421738e-02 + <_> + + 0 -1 1008 1.4956319937482476e-03 + + -8.3095543086528778e-02 2.7613210678100586e-01 + <_> + + 0 -1 1009 7.5204619206488132e-03 + + 2.6987679302692413e-02 -6.5507948398590088e-01 + <_> + + 0 -1 1010 -1.4567610378435347e-05 + + 1.1181929707527161e-01 -1.8279710412025452e-01 + <_> + + 0 -1 1011 1.5564640052616596e-03 + + -1.5681059658527374e-01 1.1271400004625320e-01 + <_> + + 0 -1 1012 -3.6522798240184784e-02 + + -1.4254869520664215e-01 1.3022269308567047e-01 + <_> + + 0 -1 1013 9.4677843153476715e-03 + + -4.3431900441646576e-02 3.6521318554878235e-01 + <_> + + 0 -1 1014 -1.4508370441035368e-05 + + 8.4056511521339417e-02 -2.0373860001564026e-01 + <_> + + 0 -1 1015 9.7979931160807610e-04 + + -9.2570282518863678e-02 1.9765810668468475e-01 + <_> + + 0 -1 1016 1.4909260244166944e-05 + + -1.4167930185794830e-01 1.2542089819908142e-01 + <_> + + 0 -1 1017 -2.1510709484573454e-04 + + 2.0154480636119843e-01 -8.0978751182556152e-02 + <_> + + 0 -1 1018 -1.3552160235121846e-03 + + -3.9648211002349854e-01 4.5137099921703339e-02 + <_> + + 0 -1 1019 8.4163509309291840e-03 + + -7.5962640345096588e-02 2.2327689826488495e-01 + <_> + + 0 -1 1020 -3.0116800917312503e-04 + + -1.9837650656700134e-01 8.5917882621288300e-02 + <_> + + 0 -1 1021 9.7665376961231232e-04 + + 6.1060719192028046e-02 -3.1315010786056519e-01 + <_> + + 0 -1 1022 1.9718110561370850e-03 + + -5.4124880582094193e-02 3.2931008934974670e-01 + <_> + + 0 -1 1023 6.4220376312732697e-02 + + 3.1034920364618301e-02 -5.8339309692382812e-01 + <_> + + 0 -1 1024 -4.8852190375328064e-03 + + 1.8666909635066986e-01 -8.5492432117462158e-02 + <_> + + 0 -1 1025 -2.5309080956503749e-04 + + -1.6574999690055847e-01 9.2472381889820099e-02 + <_> + + 0 -1 1026 2.9818940674886107e-05 + + -1.4195050299167633e-01 1.0154379904270172e-01 + <_> + + 0 -1 1027 -1.0288760066032410e-02 + + 2.5133699178695679e-01 -5.9286661446094513e-02 + <_> + + 0 -1 1028 -2.9165179512347095e-05 + + 1.2957669794559479e-01 -1.1733850091695786e-01 + <_> + + 0 -1 1029 -2.0741471089422703e-03 + + -2.2633939981460571e-01 6.6792942583560944e-02 + <_> + + 0 -1 1030 1.1343799997121096e-03 + + -6.3913702964782715e-02 2.7956250309944153e-01 + <_> + + 0 -1 1031 -1.5007710317149758e-05 + + 1.3454750180244446e-01 -1.1705060303211212e-01 + <_> + + 0 -1 1032 4.9826782196760178e-03 + + 2.6505010202527046e-02 -6.0010671615600586e-01 + <_> + + 0 -1 1033 -3.4576859325170517e-03 + + 3.1286209821701050e-01 -5.4155170917510986e-02 + <_> + + 0 -1 1034 5.4344828240573406e-03 + + 2.8702750802040100e-02 -5.6824082136154175e-01 + <_> + + 0 -1 1035 -1.4558049770130310e-05 + + 1.0756780207157135e-01 -1.3127699494361877e-01 + <_> + + 0 -1 1036 1.5321969985961914e-03 + + -1.1911620199680328e-01 1.4021439850330353e-01 + <_> + + 0 -1 1037 -2.2449430078268051e-02 + + -3.3376368880271912e-01 4.9373220652341843e-02 + <_> + + 0 -1 1038 1.1923030018806458e-02 + + 6.3558742403984070e-02 -2.4746930599212646e-01 + <_> + + 0 -1 1039 2.0685950294137001e-02 + + -6.1905119568109512e-02 2.6367300748825073e-01 + <_> + + 0 -1 1040 5.0756777636706829e-04 + + -1.2528319656848907e-01 1.4505800604820251e-01 + <_> + + 0 -1 1041 9.2508539091795683e-04 + + 5.9009589254856110e-02 -2.6204380393028259e-01 + <_> + + 0 -1 1042 8.6694798665121198e-04 + + -8.8942721486091614e-02 1.7795750498771667e-01 + <_> + + 0 -1 1043 4.7340960009023547e-04 + + 6.8137630820274353e-02 -2.1880300343036652e-01 + <_> + + 0 -1 1044 9.0366601943969727e-02 + + 1.8516469746828079e-02 -6.5736871957778931e-01 + <_> + + 0 -1 1045 2.0585930906236172e-03 + + -4.5568998903036118e-02 3.2879421114921570e-01 + <_> + + 0 -1 1046 -4.0761628188192844e-03 + + -3.5896709561347961e-01 4.0903490036725998e-02 + <_> + + 0 -1 1047 3.2309619709849358e-03 + + -5.8772470802068710e-02 2.5518509745597839e-01 + <_> + + 0 -1 1048 2.0424150861799717e-03 + + 4.3209441006183624e-02 -3.3393308520317078e-01 + <_> + + 0 -1 1049 -2.8341729193925858e-04 + + -1.6685059666633606e-01 8.1555336713790894e-02 + <_> + + 0 -1 1050 -1.0859699686989188e-03 + + 1.7807449400424957e-01 -9.2171236872673035e-02 + <_> + + 0 -1 1051 -2.0089520141482353e-02 + + -3.5236391425132751e-01 4.4607751071453094e-02 + <_> + + 0 -1 1052 -1.8073120154440403e-03 + + 3.0220940709114075e-01 -5.2047580480575562e-02 + <_> + + 0 -1 1053 1.0337149724364281e-02 + + 2.4787139147520065e-02 -6.8838161230087280e-01 + <_> + + 0 -1 1054 -2.4023749865591526e-03 + + 3.3173340559005737e-01 -4.6199489384889603e-02 + <_> + + 0 -1 1055 -5.8347097365185618e-04 + + -1.8856820464134216e-01 7.7347792685031891e-02 + <_> + + 0 -1 1056 -2.1759211085736752e-03 + + 3.3067348599433899e-01 -4.0855869650840759e-02 + <_> + + 0 -1 1057 -1.1984390439465642e-03 + + -2.1580339968204498e-01 6.8534582853317261e-02 + <_> + + 0 -1 1058 1.4474330237135291e-03 + + -5.8074928820133209e-02 2.3362369835376740e-01 + <_> + + 0 -1 1059 5.1625841297209263e-04 + + 7.5655579566955566e-02 -2.0956470072269440e-01 + <_> + + 0 -1 1060 -1.4388939598575234e-03 + + -3.0948141217231750e-01 5.8159999549388885e-02 + <_> + + 0 -1 1061 -1.7495449865236878e-03 + + 1.0236290097236633e-01 -1.5715239942073822e-01 + <_> + + 0 -1 1062 1.6774939373135567e-02 + + 2.3711699992418289e-02 -5.8594572544097900e-01 + <_> + + 0 -1 1063 -8.3265192806720734e-03 + + 3.0943349003791809e-01 -4.8807561397552490e-02 + <_> + + 0 -1 1064 -4.4853150029666722e-05 + + 1.0615509748458862e-01 -1.3089710474014282e-01 + <_> + + 0 -1 1065 5.9908269904553890e-03 + + 8.0168873071670532e-02 -1.6817809641361237e-01 + <_> + + 0 -1 1066 1.4110070187598467e-03 + + -6.9941587746143341e-02 2.2045080363750458e-01 + <_> + + 0 -1 1067 4.1205998510122299e-02 + + 3.1721431761980057e-02 -4.4176858663558960e-01 + <_> + + 0 -1 1068 1.5044870087876916e-04 + + -1.2152300029993057e-01 1.1241420358419418e-01 + <_> + + 0 -1 1069 -4.8399530351161957e-03 + + 2.8244999051094055e-01 -5.1606610417366028e-02 + <_> + + 0 -1 1070 -1.0831269901245832e-03 + + -1.6978019475936890e-01 8.3731047809123993e-02 + <_> + + 0 -1 1071 -1.3483200222253799e-02 + + 2.8269320726394653e-01 -5.2228599786758423e-02 + <_> + + 0 -1 1072 5.9854640858247876e-04 + + -1.3749149441719055e-01 1.2280890345573425e-01 + <_> + + 0 -1 1073 -6.4943352481350303e-04 + + -1.6931599378585815e-01 8.8171690702438354e-02 + <_> + + 0 -1 1074 -6.3191158697009087e-03 + + 1.6245460510253906e-01 -8.6300060153007507e-02 + <_> + + 0 -1 1075 -2.5179239455610514e-03 + + -3.1853398680686951e-01 5.2688188850879669e-02 + <_> + + 0 -1 1076 -4.6924971044063568e-02 + + -6.5773141384124756e-01 2.0505079999566078e-02 + <_> + + 0 -1 1077 -9.6446421230211854e-04 + + -2.7256599068641663e-01 4.5441299676895142e-02 + <_> + + 0 -1 1078 1.5073099639266729e-03 + + -5.0479460507631302e-02 2.8486481308937073e-01 + <_> + + 0 -1 1079 1.6149930655956268e-02 + + 3.8769058883190155e-02 -3.6149570345878601e-01 + <_> + + 0 -1 1080 1.9126510247588158e-02 + + -3.6233641207218170e-02 4.7573548555374146e-01 + <_> + + 0 -1 1081 -1.2546279467642307e-03 + + 1.1009909957647324e-01 -1.5554140508174896e-01 + <_> + + 0 -1 1082 -1.4754529729543719e-05 + + 9.6549153327941895e-02 -1.3947430253028870e-01 + <_> + + 0 -1 1083 1.5680169686675072e-02 + + 2.3214520886540413e-02 -5.7713180780410767e-01 + <_> + + 0 -1 1084 1.2293360196053982e-02 + + -5.7809889316558838e-02 2.3951390385627747e-01 + <_> + + 0 -1 1085 -9.6596255898475647e-03 + + 2.4098740518093109e-01 -6.5823532640933990e-02 + <_> + + 0 -1 1086 4.4940081425011158e-03 + + 5.4532490670681000e-02 -3.1474688649177551e-01 + <_> + + 0 -1 1087 1.1480580084025860e-02 + + 1.7419299110770226e-02 -7.4722832441329956e-01 + <_> + + 0 -1 1088 -6.5499639511108398e-01 + + -4.5483970642089844e-01 2.6187120005488396e-02 + <_> + + 0 -1 1089 -1.5746919962111861e-04 + + 8.4341458976268768e-02 -1.8240310251712799e-01 + <_> + + 0 -1 1090 -1.0111900046467781e-03 + + -2.0862899720668793e-01 6.7676216363906860e-02 + <_> + + 0 -1 1091 1.8488839268684387e-02 + + -3.5499621182680130e-02 4.1342151165008545e-01 + <_> + + 0 -1 1092 -3.8888910785317421e-04 + + 1.5692460536956787e-01 -8.6299479007720947e-02 + <_> + + 0 -1 1093 -4.5315301977097988e-03 + + -4.3912211060523987e-01 3.4103620797395706e-02 + <_> + + 0 -1 1094 3.3536020666360855e-02 + + -3.2231528311967850e-02 4.7096571326255798e-01 + <_> + + 0 -1 1095 2.0854349713772535e-03 + + -7.6001010835170746e-02 1.7373880743980408e-01 + <_> + + 0 -1 1096 -1.4060589819564484e-05 + + 8.5960999131202698e-02 -1.6348780691623688e-01 + <_> + + 0 -1 1097 4.2995680123567581e-02 + + 2.2033119574189186e-02 -5.9274291992187500e-01 + <_> + + 0 -1 1098 2.4928380735218525e-03 + + -6.3020773231983185e-02 2.1398860216140747e-01 + <_> + + 0 -1 1099 1.4520809600071516e-05 + + -1.1218129843473434e-01 1.1997319757938385e-01 + <_> + + 0 -1 1100 2.1152360364794731e-02 + + 3.0270710587501526e-02 -4.4600808620452881e-01 + <_> + + 0 -1 1101 2.1028789342381060e-04 + + 8.0384418368339539e-02 -1.7209020256996155e-01 + <_> + + 0 -1 1102 1.0620340472087264e-03 + + -6.4051970839500427e-02 2.1304920315742493e-01 + <_> + + 0 -1 1103 -2.5768030900508165e-03 + + -5.2309602499008179e-01 2.6146469637751579e-02 + <_> + + 0 -1 1104 4.7555579803884029e-03 + + 3.6213729530572891e-02 -3.4408730268478394e-01 + <_> + + 0 -1 1105 -5.9062540531158447e-01 + + -9.1701269149780273e-01 1.3416379690170288e-02 + <_> + + 0 -1 1106 -9.7031831741333008e-02 + + 4.8288398981094360e-01 -3.2344181090593338e-02 + <_> + + 0 -1 1107 1.4890159945935011e-03 + + 4.0591750293970108e-02 -3.8898488879203796e-01 + <_> + + 0 -1 1108 2.4702500086277723e-03 + + -6.3159219920635223e-02 2.1322609484195709e-01 + <_> + + 0 -1 1109 -2.9705299530178308e-03 + + 1.4960889518260956e-01 -1.0181649774312973e-01 + <_> + + 0 -1 1110 1.5555499494075775e-01 + + 3.6674879491329193e-02 -3.5983988642692566e-01 + <_> + + 0 -1 1111 1.4113659970462322e-02 + + 1.3834640383720398e-02 -8.7112957239151001e-01 + <_> + + 0 -1 1112 -9.5594127196818590e-04 + + -2.2359329462051392e-01 5.5646751075983047e-02 + <_> + 137 + -1.4971179962158203e+00 + + <_> + + 0 -1 1113 2.3068320006132126e-02 + + -3.0734539031982422e-01 2.5758111476898193e-01 + <_> + + 0 -1 1114 -1.1603030376136303e-02 + + 1.7347939312458038e-01 -2.9917559027671814e-01 + <_> + + 0 -1 1115 -1.0232869535684586e-03 + + 1.9289019703865051e-01 -2.4926829338073730e-01 + <_> + + 0 -1 1116 1.2194960378110409e-02 + + 8.7591417133808136e-02 -4.0853890776634216e-01 + <_> + + 0 -1 1117 -1.2484550243243575e-03 + + 1.6345569491386414e-01 -1.8811899423599243e-01 + <_> + + 0 -1 1118 3.2145460136234760e-04 + + 7.9135909676551819e-02 -3.7722501158714294e-01 + <_> + + 0 -1 1119 -7.9707789700478315e-04 + + -2.6377388834953308e-01 9.6936263144016266e-02 + <_> + + 0 -1 1120 7.0924922823905945e-02 + + -1.2538060545921326e-01 2.5267291069030762e-01 + <_> + + 0 -1 1121 2.5408361107110977e-03 + + -1.3923250138759613e-01 1.4974319934844971e-01 + <_> + + 0 -1 1122 -6.9253891706466675e-04 + + -3.1363919377326965e-01 3.9419740438461304e-02 + <_> + + 0 -1 1123 2.5845640338957310e-03 + + -7.0067122578620911e-02 2.8096580505371094e-01 + <_> + + 0 -1 1124 -1.6803950071334839e-02 + + -4.6254080533981323e-01 3.6509469151496887e-02 + <_> + + 0 -1 1125 -2.1332600153982639e-03 + + 2.2691309452056885e-01 -8.4447480738162994e-02 + <_> + + 0 -1 1126 -5.5397138930857182e-04 + + -2.0728160440921783e-01 1.0041700303554535e-01 + <_> + + 0 -1 1127 -1.4573110092896968e-05 + + 8.8534340262413025e-02 -2.0813420414924622e-01 + <_> + + 0 -1 1128 8.0281507689505816e-04 + + -8.8521443307399750e-02 1.9553969800472260e-01 + <_> + + 0 -1 1129 3.6762449890375137e-03 + + -8.3966277539730072e-02 2.4232700467109680e-01 + <_> + + 0 -1 1130 -1.6549570136703551e-04 + + -1.9402000308036804e-01 1.0044509917497635e-01 + <_> + + 0 -1 1131 5.5225789546966553e-03 + + 4.6014141291379929e-02 -4.1095688939094543e-01 + <_> + + 0 -1 1132 1.1023939587175846e-03 + + -2.1053719520568848e-01 8.4169827401638031e-02 + <_> + + 0 -1 1133 -2.1610360592603683e-02 + + -3.4724879264831543e-01 5.1196940243244171e-02 + <_> + + 0 -1 1134 -1.4869699953123927e-05 + + 1.1187150329351425e-01 -1.6249230504035950e-01 + <_> + + 0 -1 1135 3.1727060675621033e-02 + + 3.7546031177043915e-02 -4.5357111096382141e-01 + <_> + + 0 -1 1136 -6.5588178113102913e-03 + + 2.9756790399551392e-01 -6.1539310961961746e-02 + <_> + + 0 -1 1137 3.7398359272629023e-03 + + -6.9362841546535492e-02 2.2881920635700226e-01 + <_> + + 0 -1 1138 -2.1445790771394968e-03 + + -3.0691981315612793e-01 5.7085540145635605e-02 + <_> + + 0 -1 1139 1.4241340104490519e-03 + + 4.7747720032930374e-02 -3.5141488909721375e-01 + <_> + + 0 -1 1140 1.8902820302173495e-03 + + 1.1250650137662888e-01 -1.5074999630451202e-01 + <_> + + 0 -1 1141 -6.4917900599539280e-03 + + 2.8712779283523560e-01 -6.2573678791522980e-02 + <_> + + 0 -1 1142 -8.7750004604458809e-03 + + -5.4141241312026978e-01 2.9559530317783356e-02 + <_> + + 0 -1 1143 9.3647688627243042e-02 + + -5.6943789124488831e-02 2.9638379812240601e-01 + <_> + + 0 -1 1144 -4.4028809497831389e-05 + + 1.0726290196180344e-01 -1.5169329941272736e-01 + <_> + + 0 -1 1145 7.9690842540003359e-05 + + 8.7704338133335114e-02 -1.8157640099525452e-01 + <_> + + 0 -1 1146 -6.6510448232293129e-03 + + 2.1250769495964050e-01 -7.8765399754047394e-02 + <_> + + 0 -1 1147 2.1358320116996765e-01 + + 3.2704930752515793e-02 -4.9895349144935608e-01 + <_> + + 0 -1 1148 -9.8035410046577454e-02 + + -6.3620072603225708e-01 2.4300750344991684e-02 + <_> + + 0 -1 1149 -3.6894609220325947e-03 + + -5.7873171567916870e-01 2.5343220680952072e-02 + <_> + + 0 -1 1150 4.7867568209767342e-03 + + -6.9719798862934113e-02 2.4641029536724091e-01 + <_> + + 0 -1 1151 4.0250780875794590e-04 + + -1.1852599680423737e-01 1.7163689434528351e-01 + <_> + + 0 -1 1152 -3.8258030544966459e-03 + + -3.1708711385726929e-01 5.2796650677919388e-02 + <_> + + 0 -1 1153 2.9255099434521981e-05 + + -1.2157870084047318e-01 1.2443509697914124e-01 + <_> + + 0 -1 1154 -5.5969221284613013e-04 + + -2.3942449688911438e-01 6.1564020812511444e-02 + <_> + + 0 -1 1155 1.6149280127137899e-03 + + -8.9536681771278381e-02 1.9396179914474487e-01 + <_> + + 0 -1 1156 -5.9165759012103081e-03 + + -6.0741347074508667e-01 2.4107500910758972e-02 + <_> + + 0 -1 1157 4.5592039823532104e-03 + + -5.4090119898319244e-02 2.8721129894256592e-01 + <_> + + 0 -1 1158 -5.1767788827419281e-02 + + -6.4853471517562866e-01 2.4329099804162979e-02 + <_> + + 0 -1 1159 -1.0635569691658020e-02 + + 3.2359760999679565e-01 -5.0231788307428360e-02 + <_> + + 0 -1 1160 2.5121110957115889e-04 + + 9.5274448394775391e-02 -1.4859940111637115e-01 + <_> + + 0 -1 1161 1.3107099803164601e-03 + + -1.1612690240144730e-01 1.2647250294685364e-01 + <_> + + 0 -1 1162 -7.3629721999168396e-02 + + -6.2977832555770874e-01 2.4197410792112350e-02 + <_> + + 0 -1 1163 5.1864539273083210e-04 + + 8.0843970179557800e-02 -1.8038350343704224e-01 + <_> + + 0 -1 1164 -2.0541099365800619e-03 + + 2.0690770447254181e-01 -7.1559637784957886e-02 + <_> + + 0 -1 1165 -7.2738518938422203e-03 + + -1.8049220740795135e-01 8.4618158638477325e-02 + <_> + + 0 -1 1166 -7.0418710820376873e-03 + + -5.5255848169326782e-01 2.4243000894784927e-02 + <_> + + 0 -1 1167 2.3678881116211414e-03 + + -7.4315063655376434e-02 2.2013199329376221e-01 + <_> + + 0 -1 1168 -4.1341409087181091e-03 + + -3.1461110711097717e-01 5.7645540684461594e-02 + <_> + + 0 -1 1169 5.9597631916403770e-03 + + 2.1551210433244705e-02 -6.6399222612380981e-01 + <_> + + 0 -1 1170 -1.4643320355389733e-05 + + 1.0325399786233902e-01 -1.4378640055656433e-01 + <_> + + 0 -1 1171 -8.0324069131165743e-04 + + -2.8026849031448364e-01 5.2175540477037430e-02 + <_> + + 0 -1 1172 -1.7860220745205879e-02 + + 3.1547638773918152e-01 -4.7295480966567993e-02 + <_> + + 0 -1 1173 8.5229711839929223e-04 + + -1.0860790312290192e-01 1.6905729472637177e-01 + <_> + + 0 -1 1174 8.8618341833353043e-03 + + 2.0629420876502991e-02 -7.1686798334121704e-01 + <_> + + 0 -1 1175 4.1418620385229588e-03 + + 3.1313210725784302e-02 -3.9753648638725281e-01 + <_> + + 0 -1 1176 -9.6616581082344055e-02 + + 4.2378899455070496e-01 -3.2291099429130554e-02 + <_> + + 0 -1 1177 -8.4853649139404297e-02 + + -4.8360210657119751e-01 3.4420508891344070e-02 + <_> + + 0 -1 1178 -2.7399489656090736e-02 + + -2.8981518745422363e-01 4.6805508434772491e-02 + <_> + + 0 -1 1179 1.9653420895338058e-03 + + -7.6221130788326263e-02 1.8894240260124207e-01 + <_> + + 0 -1 1180 -9.0222749859094620e-03 + + -5.8255058526992798e-01 2.6038780808448792e-02 + <_> + + 0 -1 1181 1.7859010398387909e-01 + + 1.4113079756498337e-02 -7.5876772403717041e-01 + <_> + + 0 -1 1182 2.6170860510319471e-03 + + -4.2011409997940063e-02 3.4582638740539551e-01 + <_> + + 0 -1 1183 -1.8247140105813742e-03 + + -2.5125750899314880e-01 5.4113451391458511e-02 + <_> + + 0 -1 1184 1.0635840008035302e-03 + + -6.9988057017326355e-02 2.1111090481281281e-01 + <_> + + 0 -1 1185 -8.5794121026992798e-02 + + -5.2950221300125122e-01 2.4234309792518616e-02 + <_> + + 0 -1 1186 -2.4844249710440636e-03 + + 2.2798889875411987e-01 -5.7894941419363022e-02 + <_> + + 0 -1 1187 2.4517390411347151e-03 + + 4.7758270055055618e-02 -2.9931840300559998e-01 + <_> + + 0 -1 1188 7.2088139131665230e-03 + + 8.9190460741519928e-02 -1.4663650095462799e-01 + <_> + + 0 -1 1189 -6.0728411190211773e-03 + + 2.9773110151290894e-01 -4.4187791645526886e-02 + <_> + + 0 -1 1190 2.9379719868302345e-02 + + 1.8384920433163643e-02 -7.2799599170684814e-01 + <_> + + 0 -1 1191 3.5265460610389709e-02 + + -4.0345128625631332e-02 3.4369349479675293e-01 + <_> + + 0 -1 1192 8.0668088048696518e-04 + + -1.0171490162611008e-01 1.3324069976806641e-01 + <_> + + 0 -1 1193 -1.4964640140533447e-03 + + -2.3296439647674561e-01 5.9193279594182968e-02 + <_> + + 0 -1 1194 2.6136979460716248e-02 + + 1.7993519082665443e-02 -7.3094600439071655e-01 + <_> + + 0 -1 1195 1.8663259223103523e-02 + + 1.4693800359964371e-02 -7.2105181217193604e-01 + <_> + + 0 -1 1196 -5.0944439863087609e-05 + + 9.8113812506198883e-02 -1.3487009704113007e-01 + <_> + + 0 -1 1197 -5.5268028518185019e-04 + + -1.1313900351524353e-01 1.1931320279836655e-01 + <_> + + 0 -1 1198 5.4916120134294033e-03 + + -6.8996928632259369e-02 2.2312630712985992e-01 + <_> + + 0 -1 1199 3.1243199482560158e-02 + + -3.2394438982009888e-02 3.9250150322914124e-01 + <_> + + 0 -1 1200 2.7375440113246441e-03 + + 3.6713510751724243e-02 -4.0632349252700806e-01 + <_> + + 0 -1 1201 9.0960890054702759e-02 + + 2.7709199115633965e-02 -4.1612899303436279e-01 + <_> + + 0 -1 1202 -4.2210621177218854e-04 + + -1.5993569791316986e-01 7.8440353274345398e-02 + <_> + + 0 -1 1203 -2.3689800873398781e-03 + + 1.4372199773788452e-01 -9.0417243540287018e-02 + <_> + + 0 -1 1204 4.5116269029676914e-03 + + -6.8068206310272217e-02 2.1011069416999817e-01 + <_> + + 0 -1 1205 -1.4441140228882432e-03 + + -1.3376539945602417e-01 1.1816109716892242e-01 + <_> + + 0 -1 1206 2.1477979607880116e-03 + + -9.8067082464694977e-02 1.7571650445461273e-01 + <_> + + 0 -1 1207 2.2534599527716637e-02 + + 5.3246740251779556e-02 -2.8085210919380188e-01 + <_> + + 0 -1 1208 -1.6165290027856827e-02 + + 2.6058629155158997e-01 -5.6349318474531174e-02 + <_> + + 0 -1 1209 1.3157909736037254e-02 + + 4.4960599392652512e-02 -3.1084328889846802e-01 + <_> + + 0 -1 1210 -2.5218630209565163e-02 + + -1.2245389819145203e-01 1.1707650125026703e-01 + <_> + + 0 -1 1211 -1.0043029760709032e-04 + + 6.2668606638908386e-02 -2.3665410280227661e-01 + <_> + + 0 -1 1212 2.2884309291839600e-02 + + -5.6393388658761978e-02 2.6951891183853149e-01 + <_> + + 0 -1 1213 -3.7653960753232241e-03 + + 2.4265049397945404e-01 -6.0327839106321335e-02 + <_> + + 0 -1 1214 -1.2131360126659274e-03 + + -2.2581340372562408e-01 6.3866272568702698e-02 + <_> + + 0 -1 1215 3.6897920072078705e-03 + + -7.5056307017803192e-02 1.7121140658855438e-01 + <_> + + 0 -1 1216 3.9484380977228284e-04 + + 7.2925560176372528e-02 -1.8006080389022827e-01 + <_> + + 0 -1 1217 -2.8756330721080303e-03 + + 2.3332679271697998e-01 -5.8312799781560898e-02 + <_> + + 0 -1 1218 -1.2939549982547760e-02 + + -5.9966820478439331e-01 2.4746209383010864e-02 + <_> + + 0 -1 1219 4.8920139670372009e-03 + + -5.0808548927307129e-02 2.7142828702926636e-01 + <_> + + 0 -1 1220 -6.3685458153486252e-03 + + -1.7759549617767334e-01 7.8720703721046448e-02 + <_> + + 0 -1 1221 9.1700062155723572e-02 + + -2.4316219612956047e-02 5.6610620021820068e-01 + <_> + + 0 -1 1222 -2.9075080528855324e-03 + + -5.3473442792892456e-01 2.6738349348306656e-02 + <_> + + 0 -1 1223 -3.9782752282917500e-03 + + 1.7898949980735779e-01 -7.3634162545204163e-02 + <_> + + 0 -1 1224 3.8189089391380548e-03 + + 9.6640147268772125e-02 -1.2615419924259186e-01 + <_> + + 0 -1 1225 -6.1400169506669044e-03 + + -2.8025910258293152e-01 4.8952069133520126e-02 + <_> + + 0 -1 1226 4.6048378571867943e-03 + + -3.5297919064760208e-02 3.6271721124649048e-01 + <_> + + 0 -1 1227 6.9598153233528137e-02 + + 2.8236450627446175e-02 -4.7523179650306702e-01 + <_> + + 0 -1 1228 8.2954921526834369e-04 + + 6.5010666847229004e-02 -1.9608500599861145e-01 + <_> + + 0 -1 1229 1.0073450393974781e-02 + + 2.4091430008411407e-02 -5.2702528238296509e-01 + <_> + + 0 -1 1230 -4.9964170902967453e-02 + + 2.7060431241989136e-01 -5.2939768880605698e-02 + <_> + + 0 -1 1231 -2.3425720632076263e-02 + + -6.5538042783737183e-01 2.0399950444698334e-02 + <_> + + 0 -1 1232 4.5370758743956685e-04 + + -1.0145729780197144e-01 1.2575489282608032e-01 + <_> + + 0 -1 1233 -9.4329239800572395e-04 + + -2.3677830398082733e-01 5.2147369831800461e-02 + <_> + + 0 -1 1234 -2.5503130163997412e-03 + + 1.8695800006389618e-01 -6.4383536577224731e-02 + <_> + + 0 -1 1235 -2.1031149663031101e-03 + + -4.0381109714508057e-01 2.8763780370354652e-02 + <_> + + 0 -1 1236 2.3942890111356974e-03 + + -5.8961909264326096e-02 2.0151209831237793e-01 + <_> + + 0 -1 1237 3.4859919105656445e-04 + + -1.1594740301370621e-01 1.1559849977493286e-01 + <_> + + 0 -1 1238 6.5279641421511769e-04 + + -9.6583247184753418e-02 1.4546130597591400e-01 + <_> + + 0 -1 1239 6.6208152566105127e-04 + + 5.5666640400886536e-02 -2.3408170044422150e-01 + <_> + + 0 -1 1240 -1.1246719956398010e-01 + + -7.2129100561141968e-01 1.6700809821486473e-02 + <_> + + 0 -1 1241 2.4760260712355375e-03 + + -7.0752441883087158e-02 1.6832010447978973e-01 + <_> + + 0 -1 1242 -8.7723489850759506e-03 + + -4.8666760325431824e-01 2.6006119325757027e-02 + <_> + + 0 -1 1243 2.8840279206633568e-02 + + 3.3308699727058411e-02 -3.4549170732498169e-01 + <_> + + 0 -1 1244 4.7115320921875536e-04 + + 5.8610469102859497e-02 -2.1334120631217957e-01 + <_> + + 0 -1 1245 -7.5157210230827332e-03 + + 3.7866720557212830e-01 -3.6307640373706818e-02 + <_> + + 0 -1 1246 -1.7479779489804059e-04 + + -1.8687920272350311e-01 7.0380441844463348e-02 + <_> + + 0 -1 1247 6.9826189428567886e-03 + + -7.5376212596893311e-02 1.8541449308395386e-01 + <_> + + 0 -1 1248 -2.5053499266505241e-03 + + -4.7345471382141113e-01 2.6765290647745132e-02 + <_> + + 0 -1 1249 6.5240712137892842e-04 + + -1.1398679763078690e-01 1.1460109800100327e-01 + <_> + 153 + -1.5120370388031006e+00 + + <_> + + 0 -1 1250 2.7968829497694969e-02 + + -2.4054290354251862e-01 3.3976718783378601e-01 + <_> + + 0 -1 1251 4.7484100796282291e-03 + + -1.8598410487174988e-01 2.6523759961128235e-01 + <_> + + 0 -1 1252 -9.6774380654096603e-03 + + 1.3574579358100891e-01 -3.1734740734100342e-01 + <_> + + 0 -1 1253 1.0649940231814981e-03 + + -5.0356131792068481e-01 7.0383183658123016e-02 + <_> + + 0 -1 1254 3.0151519458740950e-03 + + -1.7585769295692444e-01 1.6750140488147736e-01 + <_> + + 0 -1 1255 7.6821137918159366e-04 + + -2.3158560693264008e-01 1.2748460471630096e-01 + <_> + + 0 -1 1256 -5.6622780859470367e-02 + + 3.0103230476379395e-01 -1.1525429785251617e-01 + <_> + + 0 -1 1257 4.7889677807688713e-03 + + -6.8797349929809570e-02 3.5774651169776917e-01 + <_> + + 0 -1 1258 3.7908130325376987e-03 + + 1.1250580102205276e-01 -2.3389840126037598e-01 + <_> + + 0 -1 1259 -3.6302749067544937e-03 + + -2.7425950765609741e-01 6.0180071741342545e-02 + <_> + + 0 -1 1260 1.4986160211265087e-02 + + 5.8370150625705719e-02 -3.5088211297988892e-01 + <_> + + 0 -1 1261 6.1338639352470636e-04 + + -1.0045500099658966e-01 1.8004140257835388e-01 + <_> + + 0 -1 1262 1.7827099654823542e-03 + + -5.8504570275545120e-02 2.8165730834007263e-01 + <_> + + 0 -1 1263 1.0279649868607521e-03 + + 4.6049151569604874e-02 -4.1633561253547668e-01 + <_> + + 0 -1 1264 -1.4470520000031684e-05 + + 9.7594477236270905e-02 -1.7005239427089691e-01 + <_> + + 0 -1 1265 7.2919862577691674e-04 + + -8.9277692139148712e-02 1.9683800637722015e-01 + <_> + + 0 -1 1266 -1.2752750189974904e-03 + + -2.1324349939823151e-01 7.7781319618225098e-02 + <_> + + 0 -1 1267 2.7510570362210274e-02 + + 9.8059087991714478e-02 -1.8463979661464691e-01 + <_> + + 0 -1 1268 3.9082998409867287e-03 + + -9.8240077495574951e-02 1.7902830243110657e-01 + <_> + + 0 -1 1269 2.8285238659009337e-04 + + 6.4882382750511169e-02 -2.5903809070587158e-01 + <_> + + 0 -1 1270 5.8698928914964199e-03 + + -4.8436500132083893e-02 3.5584059357643127e-01 + <_> + + 0 -1 1271 5.2106438670307398e-04 + + 6.4200893044471741e-02 -2.4268729984760284e-01 + <_> + + 0 -1 1272 -3.8013618905097246e-03 + + 3.1349530816078186e-01 -4.9372490495443344e-02 + <_> + + 0 -1 1273 -3.5830549895763397e-03 + + -1.9015640020370483e-01 8.5928887128829956e-02 + <_> + + 0 -1 1274 7.3326388373970985e-03 + + -8.7244078516960144e-02 1.8596029281616211e-01 + <_> + + 0 -1 1275 6.8118958733975887e-04 + + 9.0353183448314667e-02 -1.7380879819393158e-01 + <_> + + 0 -1 1276 -2.4127468932420015e-03 + + 2.6583871245384216e-01 -6.2018260359764099e-02 + <_> + + 0 -1 1277 4.4389287941157818e-03 + + 3.8672439754009247e-02 -4.4039198756217957e-01 + <_> + + 0 -1 1278 2.9394390367087908e-05 + + -1.3116660714149475e-01 1.2389960139989853e-01 + <_> + + 0 -1 1279 5.2613918669521809e-03 + + -5.4326139390468597e-02 3.1434679031372070e-01 + <_> + + 0 -1 1280 2.3712380789220333e-03 + + 3.5234931856393814e-02 -4.5936021208763123e-01 + <_> + + 0 -1 1281 -2.4774149060249329e-03 + + -3.2579651474952698e-01 4.1676308959722519e-02 + <_> + + 0 -1 1282 5.1308068213984370e-04 + + -9.8032839596271515e-02 1.5209600329399109e-01 + <_> + + 0 -1 1283 -7.6761870877817273e-04 + + -2.0944289863109589e-01 6.9563657045364380e-02 + <_> + + 0 -1 1284 4.1551832109689713e-03 + + -5.9142418205738068e-02 2.4788859486579895e-01 + <_> + + 0 -1 1285 1.4315149746835232e-02 + + 2.4713350459933281e-02 -6.2663692235946655e-01 + <_> + + 0 -1 1286 8.9347898028790951e-04 + + -1.3387380540370941e-01 1.0626660287380219e-01 + <_> + + 0 -1 1287 -5.8425782481208444e-04 + + -2.1583810448646545e-01 6.7552872002124786e-02 + <_> + + 0 -1 1288 8.9712149929255247e-04 + + -1.5998089313507080e-01 9.6859596669673920e-02 + <_> + + 0 -1 1289 -4.4576660729944706e-03 + + -4.6839779615402222e-01 3.4481108188629150e-02 + <_> + + 0 -1 1290 1.6316650435328484e-02 + + 1.6176480799913406e-02 -7.6990699768066406e-01 + <_> + + 0 -1 1291 -1.9581869710236788e-03 + + 2.3423190414905548e-01 -6.3605003058910370e-02 + <_> + + 0 -1 1292 2.9628631472587585e-01 + + 3.8007281720638275e-02 -3.8991358876228333e-01 + <_> + + 0 -1 1293 -9.1676972806453705e-04 + + 1.2086489796638489e-01 -1.0912480205297470e-01 + <_> + + 0 -1 1294 -2.5543299852870405e-04 + + -1.8755780160427094e-01 7.1104221045970917e-02 + <_> + + 0 -1 1295 8.2945115864276886e-03 + + -3.9912570267915726e-02 3.3551681041717529e-01 + <_> + + 0 -1 1296 -5.8387689292430878e-02 + + -3.3475118875503540e-01 4.1011139750480652e-02 + <_> + + 0 -1 1297 1.0927469702437520e-03 + + -8.3243489265441895e-02 1.6046769917011261e-01 + <_> + + 0 -1 1298 1.0653319768607616e-03 + + -1.1920040100812912e-01 1.0561779886484146e-01 + <_> + + 0 -1 1299 -3.5323720425367355e-02 + + 2.8399449586868286e-01 -4.7650910913944244e-02 + <_> + + 0 -1 1300 6.7976478021591902e-04 + + 5.9223521500825882e-02 -2.2741270065307617e-01 + <_> + + 0 -1 1301 -2.4810519069433212e-02 + + -6.5788549184799194e-01 1.8828939646482468e-02 + <_> + + 0 -1 1302 4.5880349352955818e-03 + + -5.0799869000911713e-02 2.6886260509490967e-01 + <_> + + 0 -1 1303 3.9034360088407993e-03 + + -5.9183020144701004e-02 2.2644530236721039e-01 + <_> + + 0 -1 1304 1.2360659986734390e-01 + + 2.2052299231290817e-02 -6.7844098806381226e-01 + <_> + + 0 -1 1305 -3.7856408744119108e-04 + + -2.1715499460697174e-01 5.7522300630807877e-02 + <_> + + 0 -1 1306 2.8562229126691818e-02 + + -3.4095268696546555e-02 4.2474791407585144e-01 + <_> + + 0 -1 1307 2.2348840720951557e-03 + + -3.5655528306961060e-02 3.5050040483474731e-01 + <_> + + 0 -1 1308 1.9211059436202049e-02 + + 2.5078350678086281e-02 -5.9314918518066406e-01 + <_> + + 0 -1 1309 1.5611639618873596e-01 + + 2.3612640798091888e-02 -4.8740550875663757e-01 + <_> + + 0 -1 1310 -1.2261980446055532e-03 + + -3.0421718955039978e-01 3.9526391774415970e-02 + <_> + + 0 -1 1311 3.6561759188771248e-03 + + -7.7627539634704590e-02 2.0262609422206879e-01 + <_> + + 0 -1 1312 1.1567790061235428e-03 + + 5.5682398378849030e-02 -2.4368490278720856e-01 + <_> + + 0 -1 1313 6.2764538452029228e-03 + + -6.4452603459358215e-02 2.1183019876480103e-01 + <_> + + 0 -1 1314 1.2091239914298058e-02 + + 2.0667979493737221e-02 -6.2231677770614624e-01 + <_> + + 0 -1 1315 3.7568950210697949e-04 + + 7.3670476675033569e-02 -1.7809109389781952e-01 + <_> + + 0 -1 1316 3.8157668896019459e-03 + + 3.3845711499452591e-02 -3.6262959241867065e-01 + <_> + + 0 -1 1317 -1.3252210337668657e-03 + + 1.4732490479946136e-01 -8.1727422773838043e-02 + <_> + + 0 -1 1318 2.1575710270553827e-03 + + -6.8624198436737061e-02 1.7562319338321686e-01 + <_> + + 0 -1 1319 -6.4548188820481300e-03 + + -5.8159267902374268e-01 2.3020049557089806e-02 + <_> + + 0 -1 1320 -8.1042833626270294e-03 + + -3.5549208521842957e-01 3.5372331738471985e-02 + <_> + + 0 -1 1321 1.6489460540469736e-04 + + 7.4472688138484955e-02 -1.5718360245227814e-01 + <_> + + 0 -1 1322 -1.9494029693305492e-03 + + 3.5157081484794617e-01 -3.6213818937540054e-02 + <_> + + 0 -1 1323 -1.5267659910023212e-04 + + -1.4115719497203827e-01 8.4802761673927307e-02 + <_> + + 0 -1 1324 2.3890420794487000e-02 + + 1.9317669793963432e-02 -6.3186031579971313e-01 + <_> + + 0 -1 1325 -4.4950367882847786e-03 + + 2.1254129707813263e-01 -5.9143088757991791e-02 + <_> + + 0 -1 1326 2.8725271113216877e-03 + + 3.2794039696455002e-02 -3.9505231380462646e-01 + <_> + + 0 -1 1327 2.0885460544377565e-03 + + -8.5443787276744843e-02 1.4347669482231140e-01 + <_> + + 0 -1 1328 -4.4343829154968262e-01 + + -4.0052318572998047e-01 2.9428049921989441e-02 + <_> + + 0 -1 1329 2.0199170336127281e-02 + + 4.0000550448894501e-02 -3.1763339042663574e-01 + <_> + + 0 -1 1330 1.4570879749953747e-02 + + 1.3662800192832947e-02 -8.6441951990127563e-01 + <_> + + 0 -1 1331 -3.8080150261521339e-03 + + 4.0930721163749695e-01 -3.3838968724012375e-02 + <_> + + 0 -1 1332 1.0009920224547386e-03 + + -8.2600250840187073e-02 1.3928790390491486e-01 + <_> + + 0 -1 1333 1.1500980472192168e-03 + + 6.9677546620368958e-02 -1.7433060705661774e-01 + <_> + + 0 -1 1334 3.4720861003734171e-04 + + 6.6659383475780487e-02 -1.7403809726238251e-01 + <_> + + 0 -1 1335 2.7565560303628445e-03 + + -2.9285680502653122e-02 4.0243569016456604e-01 + <_> + + 0 -1 1336 -2.4124220013618469e-02 + + -3.2424208521842957e-01 3.7330508232116699e-02 + <_> + + 0 -1 1337 -1.3989120721817017e-01 + + -6.5967488288879395e-01 1.7929619178175926e-02 + <_> + + 0 -1 1338 3.0997680500149727e-02 + + 1.4100589789450169e-02 -6.9532638788223267e-01 + <_> + + 0 -1 1339 4.6191760338842869e-04 + + -6.7944146692752838e-02 1.8066139519214630e-01 + <_> + + 0 -1 1340 3.4264490008354187e-02 + + 2.2298639640212059e-02 -5.8638918399810791e-01 + <_> + + 0 -1 1341 3.9756381884217262e-03 + + -4.1803721338510513e-02 3.1669101119041443e-01 + <_> + + 0 -1 1342 -3.4192908788099885e-04 + + -1.5810790657997131e-01 7.7484056353569031e-02 + <_> + + 0 -1 1343 7.1672953665256500e-02 + + -2.3302769288420677e-02 5.2465027570724487e-01 + <_> + + 0 -1 1344 7.1812322130426764e-04 + + 4.8268780112266541e-02 -2.7771729230880737e-01 + <_> + + 0 -1 1345 -1.8881190335378051e-03 + + 8.3184987306594849e-02 -1.4802010357379913e-01 + <_> + + 0 -1 1346 -1.2498029973357916e-03 + + 2.5329118967056274e-01 -4.9769390374422073e-02 + <_> + + 0 -1 1347 -1.2756100296974182e-01 + + -6.7970567941665649e-01 2.0871700718998909e-02 + <_> + + 0 -1 1348 -1.4621549780713394e-05 + + 7.9338513314723969e-02 -1.5043739974498749e-01 + <_> + + 0 -1 1349 3.5788679961115122e-03 + + -5.5469110608100891e-02 2.4075509607791901e-01 + <_> + + 0 -1 1350 9.4902152195572853e-03 + + 2.8637239709496498e-02 -5.3680288791656494e-01 + <_> + + 0 -1 1351 1.0283050127327442e-02 + + 1.1550529859960079e-02 -7.7501267194747925e-01 + <_> + + 0 -1 1352 -4.2507290840148926e-02 + + -8.8770490884780884e-01 9.7261751070618629e-03 + <_> + + 0 -1 1353 3.6155930138193071e-04 + + 6.4407013356685638e-02 -1.7109510302543640e-01 + <_> + + 0 -1 1354 -3.4245628863573074e-02 + + 2.4231609702110291e-01 -4.7188870608806610e-02 + <_> + + 0 -1 1355 -1.2806710600852966e-01 + + -5.4869401454925537e-01 2.1854300051927567e-02 + <_> + + 0 -1 1356 5.3918339312076569e-02 + + -2.5415059179067612e-02 4.8263218998908997e-01 + <_> + + 0 -1 1357 -3.7711810320615768e-02 + + 1.4176939427852631e-01 -8.8871710002422333e-02 + <_> + + 0 -1 1358 -2.8310909867286682e-01 + + -6.4925712347030640e-01 2.0563820376992226e-02 + <_> + + 0 -1 1359 -1.1926019564270973e-02 + + -2.1756759285926819e-01 5.1851660013198853e-02 + <_> + + 0 -1 1360 3.7750680348835886e-04 + + 7.2340622544288635e-02 -1.6360169649124146e-01 + <_> + + 0 -1 1361 1.5865910798311234e-02 + + -7.9940237104892731e-02 1.6453659534454346e-01 + <_> + + 0 -1 1362 7.1175709366798401e-02 + + 3.1589020043611526e-02 -4.1988191008567810e-01 + <_> + + 0 -1 1363 5.8520520105957985e-03 + + 2.3279080167412758e-02 -4.8604270815849304e-01 + <_> + + 0 -1 1364 -1.3924130471423268e-03 + + 1.6908380389213562e-01 -7.3783926665782928e-02 + <_> + + 0 -1 1365 -1.8412459758110344e-04 + + 1.2232059985399246e-01 -1.0313989967107773e-01 + <_> + + 0 -1 1366 2.2130980505608022e-04 + + -8.1976376473903656e-02 1.6332870721817017e-01 + <_> + + 0 -1 1367 2.0723740453831851e-04 + + 9.2730201780796051e-02 -1.3733580708503723e-01 + <_> + + 0 -1 1368 -3.8736319402232766e-04 + + -2.0004619657993317e-01 8.4838382899761200e-02 + <_> + + 0 -1 1369 3.2468559220433235e-03 + + -5.6439258158206940e-02 2.2364979982376099e-01 + <_> + + 0 -1 1370 9.3086768174543977e-04 + + 3.1926579773426056e-02 -3.9701279997825623e-01 + <_> + + 0 -1 1371 1.0306099429726601e-03 + + -6.0154888778924942e-02 2.0189760625362396e-01 + <_> + + 0 -1 1372 -7.6027261093258858e-04 + + 1.4901119470596313e-01 -9.9665373563766479e-02 + <_> + + 0 -1 1373 -4.0442569297738373e-04 + + -1.9113409519195557e-01 7.4125148355960846e-02 + <_> + + 0 -1 1374 -4.7783120535314083e-03 + + -3.5730269551277161e-01 3.6531679332256317e-02 + <_> + + 0 -1 1375 -7.7672587940469384e-04 + + 1.0242869704961777e-01 -1.2974999845027924e-01 + <_> + + 0 -1 1376 -5.7417969219386578e-03 + + -1.6698950529098511e-01 7.0111282169818878e-02 + <_> + + 0 -1 1377 -1.0879320092499256e-02 + + 4.4120571017265320e-01 -2.9255589470267296e-02 + <_> + + 0 -1 1378 6.4163492061197758e-04 + + -1.1195279657840729e-01 1.0681179910898209e-01 + <_> + + 0 -1 1379 1.8341830000281334e-02 + + 1.6387680172920227e-01 -8.0189116299152374e-02 + <_> + + 0 -1 1380 -1.5051739756017923e-03 + + -2.2313259541988373e-01 6.1541710048913956e-02 + <_> + + 0 -1 1381 4.4345208443701267e-03 + + -6.6646136343479156e-02 2.2299060225486755e-01 + <_> + + 0 -1 1382 -1.4749550246051513e-05 + + 1.1597889661788940e-01 -1.0377810150384903e-01 + <_> + + 0 -1 1383 -2.6539659593254328e-03 + + 1.3116030395030975e-01 -8.6488783359527588e-02 + <_> + + 0 -1 1384 2.7743550017476082e-03 + + 4.1064068675041199e-02 -3.1225061416625977e-01 + <_> + + 0 -1 1385 1.1590829817578197e-03 + + 6.4309477806091309e-02 -1.7413079738616943e-01 + <_> + + 0 -1 1386 9.2315068468451500e-04 + + -8.2974001765251160e-02 1.4439080655574799e-01 + <_> + + 0 -1 1387 -8.2323597744107246e-03 + + 3.0380389094352722e-01 -4.1229110211133957e-02 + <_> + + 0 -1 1388 3.5314110573381186e-03 + + 3.9511259645223618e-02 -3.3097168803215027e-01 + <_> + + 0 -1 1389 5.7490761391818523e-03 + + 1.9821660593152046e-02 -5.8780592679977417e-01 + <_> + + 0 -1 1390 7.8584970906376839e-03 + + -4.9952238798141479e-02 2.7249589562416077e-01 + <_> + + 0 -1 1391 -1.4245980310079176e-05 + + 8.8010340929031372e-02 -1.3228349387645721e-01 + <_> + + 0 -1 1392 6.9364177761599422e-04 + + -6.7391887307167053e-02 1.7463630437850952e-01 + <_> + + 0 -1 1393 -2.9837749898433685e-02 + + -5.1709812879562378e-01 2.4871410802006721e-02 + <_> + + 0 -1 1394 7.1383598260581493e-03 + + 6.7430503666400909e-02 -1.9037249684333801e-01 + <_> + + 0 -1 1395 1.7582569271326065e-02 + + -3.6622371524572372e-02 3.5335469245910645e-01 + <_> + + 0 -1 1396 -1.2527840444818139e-03 + + -2.1730649471282959e-01 6.1200018972158432e-02 + <_> + + 0 -1 1397 7.4575009057298303e-04 + + -6.4467661082744598e-02 1.9775040447711945e-01 + <_> + + 0 -1 1398 -7.2683871258050203e-04 + + -1.7233370244503021e-01 7.1719951927661896e-02 + <_> + + 0 -1 1399 2.6301289908587933e-03 + + -3.9274338632822037e-02 3.3066290616989136e-01 + <_> + + 0 -1 1400 -1.4553769688063767e-05 + + 7.9698577523231506e-02 -1.7852419614791870e-01 + <_> + + 0 -1 1401 -4.5518940896727145e-04 + + -1.6662250459194183e-01 7.5660362839698792e-02 + <_> + + 0 -1 1402 -4.0261688991449773e-04 + + -1.4214369654655457e-01 8.1017293035984039e-02 + <_> + 161 + -1.4741109609603882e+00 + + <_> + + 0 -1 1403 -8.3439666777849197e-03 + + 3.1942158937454224e-01 -2.6766449213027954e-01 + <_> + + 0 -1 1404 7.8073277836665511e-04 + + -3.4852638840675354e-01 1.3628880679607391e-01 + <_> + + 0 -1 1405 8.6505862418562174e-04 + + -2.5323680043220520e-01 1.7417639493942261e-01 + <_> + + 0 -1 1406 -2.0879819930996746e-04 + + 8.8503703474998474e-02 -3.6038509011268616e-01 + <_> + + 0 -1 1407 -7.4667241424322128e-03 + + 1.6120630502700806e-01 -1.7366449534893036e-01 + <_> + + 0 -1 1408 -6.9383758818730712e-04 + + 9.6873007714748383e-02 -2.6793479919433594e-01 + <_> + + 0 -1 1409 -4.7926991101121530e-05 + + 9.1756246984004974e-02 -2.6212221384048462e-01 + <_> + + 0 -1 1410 -1.5861799474805593e-03 + + -6.1400872468948364e-01 -7.4168378487229347e-03 + <_> + + 0 -1 1411 4.4573731429409236e-05 + + -1.4841860532760620e-01 1.3855740427970886e-01 + <_> + + 0 -1 1412 5.0104141701012850e-04 + + 5.9088941663503647e-02 -2.9596069455146790e-01 + <_> + + 0 -1 1413 -4.7243628650903702e-03 + + 1.7092029750347137e-01 -1.0624700039625168e-01 + <_> + + 0 -1 1414 3.9171050302684307e-03 + + 8.8605202734470367e-02 -2.2775200009346008e-01 + <_> + + 0 -1 1415 8.8675727602094412e-04 + + -1.6839639842510223e-01 1.1958680301904678e-01 + <_> + + 0 -1 1416 -4.2634559795260429e-03 + + -3.3663240075111389e-01 4.7266270965337753e-02 + <_> + + 0 -1 1417 6.8006501533091068e-03 + + -5.9237081557512283e-02 3.1675300002098083e-01 + <_> + + 0 -1 1418 -1.3168989680707455e-02 + + 3.7162569165229797e-01 -4.2714890092611313e-02 + <_> + + 0 -1 1419 7.3881301796063781e-04 + + 5.9158101677894592e-02 -3.0953711271286011e-01 + <_> + + 0 -1 1420 1.7939460230991244e-03 + + -8.4615282714366913e-02 2.0452530682086945e-01 + <_> + + 0 -1 1421 1.6819390002638102e-03 + + -8.6703762412071228e-02 2.0580549538135529e-01 + <_> + + 0 -1 1422 -2.5033599231392145e-03 + + -4.3473190069198608e-01 3.8707830011844635e-02 + <_> + + 0 -1 1423 3.3658559550531209e-04 + + -1.0717310011386871e-01 1.5238380432128906e-01 + <_> + + 0 -1 1424 1.3037879951298237e-02 + + 4.4682659208774567e-02 -4.0395650267601013e-01 + <_> + + 0 -1 1425 1.3743729505222291e-04 + + -2.1432510018348694e-01 6.8643413484096527e-02 + <_> + + 0 -1 1426 3.7178888916969299e-01 + + 3.4502930939197540e-02 -4.5998379588127136e-01 + <_> + + 0 -1 1427 -7.1649150922894478e-03 + + 2.6640880107879639e-01 -5.4557949304580688e-02 + <_> + + 0 -1 1428 -7.1985478280112147e-04 + + -1.4415690302848816e-01 9.8254486918449402e-02 + <_> + + 0 -1 1429 1.6854539513587952e-02 + + 2.8428679332137108e-02 -4.5227599143981934e-01 + <_> + + 0 -1 1430 1.3624729588627815e-02 + + -6.0474298894405365e-02 2.2715990245342255e-01 + <_> + + 0 -1 1431 1.3620140030980110e-02 + + 7.9177603125572205e-02 -1.8104650080204010e-01 + <_> + + 0 -1 1432 -4.4976719655096531e-03 + + 2.1300099790096283e-01 -7.1392573416233063e-02 + <_> + + 0 -1 1433 7.1611418388783932e-04 + + -9.4237379729747772e-02 1.5830449759960175e-01 + <_> + + 0 -1 1434 7.0651061832904816e-04 + + 4.8840671777725220e-02 -2.9152449965476990e-01 + <_> + + 0 -1 1435 -3.1002271175384521e-01 + + -3.8511890172958374e-01 3.4369651228189468e-02 + <_> + + 0 -1 1436 4.3721711263060570e-03 + + -4.6880301088094711e-02 2.9952910542488098e-01 + <_> + + 0 -1 1437 -1.4383009634912014e-02 + + -4.5463728904724121e-01 3.4184519201517105e-02 + <_> + + 0 -1 1438 -3.7763800937682390e-03 + + -5.6709027290344238e-01 2.1684719249606133e-02 + <_> + + 0 -1 1439 -3.4393940586596727e-03 + + 2.8183689713478088e-01 -5.2640009671449661e-02 + <_> + + 0 -1 1440 -3.5846829414367676e-03 + + -2.9227399826049805e-01 5.2231520414352417e-02 + <_> + + 0 -1 1441 3.6200750619173050e-03 + + -5.3378768265247345e-02 2.6364138722419739e-01 + <_> + + 0 -1 1442 7.6435408554971218e-03 + + 3.6897629499435425e-02 -3.9242339134216309e-01 + <_> + + 0 -1 1443 3.5417820326983929e-03 + + 3.5689998418092728e-02 -3.5601079463958740e-01 + <_> + + 0 -1 1444 -2.4041049182415009e-03 + + 1.6313059628009796e-01 -8.9239962399005890e-02 + <_> + + 0 -1 1445 6.5479031763970852e-03 + + 3.6708708852529526e-02 -3.4187689423561096e-01 + <_> + + 0 -1 1446 -1.2350000441074371e-02 + + 2.6157799363136292e-01 -5.2475821226835251e-02 + <_> + + 0 -1 1447 1.4726500012329780e-05 + + -1.7869140207767487e-01 7.7807463705539703e-02 + <_> + + 0 -1 1448 -2.1563619375228882e-02 + + -6.3926118612289429e-01 1.9050199538469315e-02 + <_> + + 0 -1 1449 5.0762481987476349e-03 + + -5.1665481179952621e-02 2.9126250743865967e-01 + <_> + + 0 -1 1450 -5.9531949460506439e-02 + + -7.5291550159454346e-01 2.0238230004906654e-02 + <_> + + 0 -1 1451 -1.6808489337563515e-02 + + -4.2833268642425537e-01 2.5997729972004890e-02 + <_> + + 0 -1 1452 3.4431689418852329e-03 + + -5.4912570863962173e-02 2.4233500659465790e-01 + <_> + + 0 -1 1453 -1.0451589478179812e-03 + + -2.6243540644645691e-01 4.5748569071292877e-02 + <_> + + 0 -1 1454 -4.8333409358747303e-04 + + 8.9791953563690186e-02 -1.2892110645771027e-01 + <_> + + 0 -1 1455 -4.7575961798429489e-03 + + -3.1868740916252136e-01 3.6020528525114059e-02 + <_> + + 0 -1 1456 -1.0407149791717529e-01 + + 5.1398742198944092e-01 -2.3598119616508484e-02 + <_> + + 0 -1 1457 9.6292654052376747e-03 + + -4.7965578734874725e-02 2.1790429949760437e-01 + <_> + + 0 -1 1458 5.9226430021226406e-03 + + 6.4275130629539490e-02 -1.8210859596729279e-01 + <_> + + 0 -1 1459 1.6943799331784248e-02 + + -3.7509348243474960e-02 3.1458830833435059e-01 + <_> + + 0 -1 1460 -6.5468349494040012e-03 + + -1.5812429785728455e-01 9.0520747005939484e-02 + <_> + + 0 -1 1461 9.4754863530397415e-03 + + 4.8995878547430038e-02 -2.7853849530220032e-01 + <_> + + 0 -1 1462 -4.9254479818046093e-03 + + 3.1902191042900085e-01 -4.5609470456838608e-02 + <_> + + 0 -1 1463 -9.4199541490525007e-04 + + -1.6472989320755005e-01 7.3966227471828461e-02 + <_> + + 0 -1 1464 7.0046652108430862e-03 + + -3.6342341452836990e-02 3.3846628665924072e-01 + <_> + + 0 -1 1465 -9.1483298456296325e-04 + + 1.0460989922285080e-01 -1.1206439882516861e-01 + <_> + + 0 -1 1466 -1.8404760339763016e-04 + + 1.4215709269046783e-01 -8.7627373635768890e-02 + <_> + + 0 -1 1467 -3.1692520133219659e-04 + + -1.6067850589752197e-01 7.0096842944622040e-02 + <_> + + 0 -1 1468 2.3108009248971939e-02 + + -5.3784500807523727e-02 2.0780019462108612e-01 + <_> + + 0 -1 1469 6.3212551176548004e-03 + + 2.9342239722609520e-02 -3.8378500938415527e-01 + <_> + + 0 -1 1470 7.3698158375918865e-03 + + -4.1625689715147018e-02 2.6526549458503723e-01 + <_> + + 0 -1 1471 3.3730969298630953e-03 + + 3.7753321230411530e-02 -3.0138298869132996e-01 + <_> + + 0 -1 1472 -6.4016957767307758e-03 + + 2.1839860081672668e-01 -5.4551340639591217e-02 + <_> + + 0 -1 1473 1.3553920201957226e-02 + + 2.8121260926127434e-02 -4.3601170182228088e-01 + <_> + + 0 -1 1474 -6.7636291496455669e-03 + + -1.6322250664234161e-01 6.7339658737182617e-02 + <_> + + 0 -1 1475 -1.3078070478513837e-03 + + 1.2315399944782257e-01 -1.0096319764852524e-01 + <_> + + 0 -1 1476 -7.6282368972897530e-03 + + 2.5165349245071411e-01 -5.0460711121559143e-02 + <_> + + 0 -1 1477 7.9994397237896919e-03 + + 7.3020651936531067e-02 -1.8877799808979034e-01 + <_> + + 0 -1 1478 -3.1321209389716387e-03 + + 2.7653199434280396e-01 -4.3276838958263397e-02 + <_> + + 0 -1 1479 -4.0931310504674911e-02 + + -6.5518248081207275e-01 1.8600920215249062e-02 + <_> + + 0 -1 1480 7.0344978012144566e-03 + + 2.1914770826697350e-02 -4.8595818877220154e-01 + <_> + + 0 -1 1481 -2.5299859698861837e-03 + + 1.4030769467353821e-01 -8.0566473305225372e-02 + <_> + + 0 -1 1482 3.8867890834808350e-03 + + -8.9075699448585510e-02 1.6832409799098969e-01 + <_> + + 0 -1 1483 3.8210590719245374e-04 + + 6.5200872719287872e-02 -1.8599529564380646e-01 + <_> + + 0 -1 1484 1.0954789817333221e-01 + + 1.5036020427942276e-02 -8.6908358335494995e-01 + <_> + + 0 -1 1485 -1.4177490083966404e-04 + + -1.4669269323348999e-01 7.9050153493881226e-02 + <_> + + 0 -1 1486 2.0990408957004547e-03 + + -4.6489678323268890e-02 2.3045249283313751e-01 + <_> + + 0 -1 1487 -2.3089480237103999e-04 + + -1.6784009337425232e-01 6.9773100316524506e-02 + <_> + + 0 -1 1488 -4.3103471398353577e-04 + + 8.1758759915828705e-02 -1.2939240038394928e-01 + <_> + + 0 -1 1489 -2.9572288622148335e-04 + + -1.9068230688571930e-01 5.8420080691576004e-02 + <_> + + 0 -1 1490 -4.0046018548309803e-03 + + 1.2948529422283173e-01 -8.1599622964859009e-02 + <_> + + 0 -1 1491 1.4935520084691234e-05 + + -1.3364720344543457e-01 9.8664022982120514e-02 + <_> + + 0 -1 1492 5.7824450777843595e-04 + + 5.9095639735460281e-02 -1.8318089842796326e-01 + <_> + + 0 -1 1493 1.3251320458948612e-02 + + -7.1488671004772186e-02 1.5635989606380463e-01 + <_> + + 0 -1 1494 7.1273561843554489e-06 + + -1.2283089756965637e-01 9.7752511501312256e-02 + <_> + + 0 -1 1495 1.4193489914759994e-03 + + -8.1696748733520508e-02 1.3701570034027100e-01 + <_> + + 0 -1 1496 -8.0165416002273560e-03 + + 2.4697229266166687e-01 -5.6527040898799896e-02 + <_> + + 0 -1 1497 -2.3803471121937037e-03 + + -3.7901589274406433e-01 3.4532550722360611e-02 + <_> + + 0 -1 1498 -4.8633730039000511e-03 + + 6.5441012382507324e-01 -1.9296199083328247e-02 + <_> + + 0 -1 1499 -1.4388219824468251e-05 + + 7.5101882219314575e-02 -1.4394460618495941e-01 + <_> + + 0 -1 1500 1.4798780284763779e-05 + + -1.0807389765977859e-01 9.6213810145854950e-02 + <_> + + 0 -1 1501 2.4176139384508133e-02 + + 2.6983680203557014e-02 -4.0708479285240173e-01 + <_> + + 0 -1 1502 -3.9851912297308445e-03 + + 2.1786700189113617e-01 -5.4170310497283936e-02 + <_> + + 0 -1 1503 -2.5377580896019936e-03 + + -1.5314599871635437e-01 8.8059239089488983e-02 + <_> + + 0 -1 1504 2.1663319785147905e-03 + + 1.0252720117568970e-01 -1.2039250135421753e-01 + <_> + + 0 -1 1505 3.5593929351307452e-04 + + -8.2267768681049347e-02 1.3228890299797058e-01 + <_> + + 0 -1 1506 1.1394560569897294e-03 + + -8.6393490433692932e-02 1.5693899989128113e-01 + <_> + + 0 -1 1507 5.5563818663358688e-02 + + 1.7108110710978508e-02 -7.0473742485046387e-01 + <_> + + 0 -1 1508 5.5514591932296753e-01 + + 1.3345389626920223e-02 -6.9916892051696777e-01 + <_> + + 0 -1 1509 -4.6235490590333939e-03 + + -2.3983679711818695e-01 3.9515350013971329e-02 + <_> + + 0 -1 1510 -4.5803869143128395e-03 + + 4.2900869250297546e-01 -2.6430539786815643e-02 + <_> + + 0 -1 1511 7.0851319469511509e-03 + + 1.1231079697608948e-01 -1.0711509734392166e-01 + <_> + + 0 -1 1512 -4.0524810901843011e-04 + + -2.5740951299667358e-01 4.6670019626617432e-02 + <_> + + 0 -1 1513 -4.9121538177132607e-03 + + 2.7129280567169189e-01 -4.3966241180896759e-02 + <_> + + 0 -1 1514 -1.9348099827766418e-02 + + -4.0643858909606934e-01 2.9176769778132439e-02 + <_> + + 0 -1 1515 -1.3842330081388354e-03 + + 2.3537209630012512e-01 -5.0227548927068710e-02 + <_> + + 0 -1 1516 6.2752598896622658e-03 + + 2.8113570064306259e-02 -3.9913201332092285e-01 + <_> + + 0 -1 1517 1.4853129869152326e-05 + + -1.0750629752874374e-01 1.0206390172243118e-01 + <_> + + 0 -1 1518 -1.1780710192397237e-03 + + 1.8112790584564209e-01 -5.8998040854930878e-02 + <_> + + 0 -1 1519 -3.2166391611099243e-02 + + -9.8135101795196533e-01 1.1817139573395252e-02 + <_> + + 0 -1 1520 2.8749080374836922e-03 + + 5.0774369388818741e-02 -2.0650039613246918e-01 + <_> + + 0 -1 1521 -3.5098160151392221e-03 + + 1.4354039728641510e-01 -7.8006736934185028e-02 + <_> + + 0 -1 1522 -7.2203627787530422e-03 + + 2.3853950202465057e-01 -4.6176180243492126e-02 + <_> + + 0 -1 1523 2.0837699994444847e-03 + + 2.2801460698246956e-02 -5.0945621728897095e-01 + <_> + + 0 -1 1524 3.6175400018692017e-02 + + 1.4734740369021893e-02 -6.1349362134933472e-01 + <_> + + 0 -1 1525 7.5545758008956909e-03 + + 1.6166130080819130e-02 -5.8863008022308350e-01 + <_> + + 0 -1 1526 -2.6058950461447239e-03 + + 3.6436009407043457e-01 -3.4624300897121429e-02 + <_> + + 0 -1 1527 6.4669351559132338e-04 + + 6.3444733619689941e-02 -1.8953520059585571e-01 + <_> + + 0 -1 1528 -3.1747641041874886e-03 + + 4.2877858877182007e-01 -2.6968790218234062e-02 + <_> + + 0 -1 1529 -2.3839730769395828e-02 + + -3.6871370673179626e-01 3.3688500523567200e-02 + <_> + + 0 -1 1530 1.1973649961873889e-03 + + -6.2898509204387665e-02 1.9179169833660126e-01 + <_> + + 0 -1 1531 4.4593929487746209e-05 + + -1.1022660136222839e-01 1.2159959971904755e-01 + <_> + + 0 -1 1532 9.1575905680656433e-03 + + 2.5353889912366867e-02 -4.9928730726242065e-01 + <_> + + 0 -1 1533 2.3933469783514738e-03 + + 4.8282090574502945e-02 -2.2685450315475464e-01 + <_> + + 0 -1 1534 -1.1994830565527081e-03 + + 1.0886570066213608e-01 -1.0669539868831635e-01 + <_> + + 0 -1 1535 2.1603968925774097e-03 + + -7.6076626777648926e-02 1.6507959365844727e-01 + <_> + + 0 -1 1536 -1.6556339338421822e-02 + + -5.4167211055755615e-01 2.0711649209260941e-02 + <_> + + 0 -1 1537 -8.8350269943475723e-03 + + -3.6710909008979797e-01 2.8870400041341782e-02 + <_> + + 0 -1 1538 -1.4592399566026870e-05 + + 7.8724071383476257e-02 -1.3622610270977020e-01 + <_> + + 0 -1 1539 -1.4897900400683284e-03 + + 1.1436119675636292e-01 -1.0104899853467941e-01 + <_> + + 0 -1 1540 -3.9764028042554855e-03 + + -1.0250560194253922e-01 1.0466060042381287e-01 + <_> + + 0 -1 1541 -7.2657042182981968e-03 + + 2.2982269525527954e-01 -4.5155581086874008e-02 + <_> + + 0 -1 1542 8.9115025475621223e-03 + + 2.9681159183382988e-02 -4.4235008955001831e-01 + <_> + + 0 -1 1543 -1.8145949579775333e-03 + + 2.3911419510841370e-01 -4.6856120228767395e-02 + <_> + + 0 -1 1544 -3.7546321749687195e-02 + + -1.8569689989089966e-01 6.1533749103546143e-02 + <_> + + 0 -1 1545 -1.0010029654949903e-03 + + 1.4361350238323212e-01 -8.6990483105182648e-02 + <_> + + 0 -1 1546 -3.7357229739427567e-03 + + 2.0245459675788879e-01 -6.1167530715465546e-02 + <_> + + 0 -1 1547 -1.4672010365757160e-05 + + 8.8180869817733765e-02 -1.3037009537220001e-01 + <_> + + 0 -1 1548 9.4379713118541986e-05 + + 5.5626530200242996e-02 -2.0025369524955750e-01 + <_> + + 0 -1 1549 1.5706509293522686e-04 + + -9.8335877060890198e-02 1.1518850177526474e-01 + <_> + + 0 -1 1550 -8.1810058327391744e-04 + + -2.1701550483703613e-01 5.2880410104990005e-02 + <_> + + 0 -1 1551 -5.1689259707927704e-02 + + 5.7715278863906860e-01 -1.8761100247502327e-02 + <_> + + 0 -1 1552 -9.0719409286975861e-02 + + -3.6278849840164185e-01 3.6741130053997040e-02 + <_> + + 0 -1 1553 -1.0959040373563766e-02 + + 1.6787180304527283e-01 -6.9725647568702698e-02 + <_> + + 0 -1 1554 3.7122920621186495e-03 + + 6.0360308736562729e-02 -2.0567069947719574e-01 + <_> + + 0 -1 1555 -1.9315730780363083e-02 + + -5.7397401332855225e-01 1.9705319777131081e-02 + <_> + + 0 -1 1556 -2.7051189914345741e-02 + + 3.4983208775520325e-01 -3.6084290593862534e-02 + <_> + + 0 -1 1557 2.1742910146713257e-02 + + 2.2767079994082451e-02 -6.5319198369979858e-01 + <_> + + 0 -1 1558 9.9608592689037323e-02 + + -3.1259559094905853e-02 3.8271111249923706e-01 + <_> + + 0 -1 1559 4.6517839655280113e-03 + + 1.0088030248880386e-01 -1.2396019697189331e-01 + <_> + + 0 -1 1560 -1.4784580343984999e-05 + + 7.9683482646942139e-02 -1.5573020279407501e-01 + <_> + + 0 -1 1561 -1.6718909610062838e-03 + + 1.7077329754829407e-01 -6.7733809351921082e-02 + <_> + + 0 -1 1562 1.4456630196946207e-05 + + -1.0106030106544495e-01 1.1116830259561539e-01 + <_> + + 0 -1 1563 -2.7084909379482269e-03 + + 1.1312720179557800e-01 -1.0880629718303680e-01 + <_> + 159 + -1.3943890333175659e+00 + + <_> + + 0 -1 1564 -2.2686859592795372e-02 + + 2.7316910028457642e-01 -2.7358779311180115e-01 + <_> + + 0 -1 1565 4.2952829971909523e-04 + + -2.5107958912849426e-01 1.5740729868412018e-01 + <_> + + 0 -1 1566 2.5115790776908398e-03 + + -2.2002549469470978e-01 1.5660229325294495e-01 + <_> + + 0 -1 1567 -6.3958892133086920e-04 + + 7.2609938681125641e-02 -3.8278979063034058e-01 + <_> + + 0 -1 1568 2.6575280353426933e-03 + + -1.1523439735174179e-01 2.3414239287376404e-01 + <_> + + 0 -1 1569 -7.5916409492492676e-02 + + 3.2517579197883606e-01 -8.2622267305850983e-02 + <_> + + 0 -1 1570 1.4966350136091933e-05 + + -3.5640290379524231e-01 5.2353590726852417e-02 + <_> + + 0 -1 1571 -1.4678399566037115e-05 + + 1.0198219865560532e-01 -2.2452689707279205e-01 + <_> + + 0 -1 1572 5.2314779168227687e-05 + + -1.7757849395275116e-01 1.0107079893350601e-01 + <_> + + 0 -1 1573 1.4088390162214637e-04 + + -1.5139770507812500e-01 1.3872760534286499e-01 + <_> + + 0 -1 1574 -2.3411789909005165e-02 + + -1.6435989737510681e-01 1.0702139884233475e-01 + <_> + + 0 -1 1575 2.3284659255295992e-03 + + -8.0950729548931122e-02 2.2333970665931702e-01 + <_> + + 0 -1 1576 -3.3611140679568052e-03 + + -4.4329941272735596e-01 3.4489039331674576e-02 + <_> + + 0 -1 1577 5.8458978310227394e-04 + + -1.1083470284938812e-01 1.7215029895305634e-01 + <_> + + 0 -1 1578 -3.3180968603119254e-04 + + 6.9152593612670898e-02 -2.6321241259574890e-01 + <_> + + 0 -1 1579 -8.8515877723693848e-04 + + -3.4764730930328369e-01 4.3258201330900192e-02 + <_> + + 0 -1 1580 1.4169749920256436e-04 + + -1.4600689709186554e-01 1.0149820148944855e-01 + <_> + + 0 -1 1581 1.4851680025458336e-03 + + 2.9983170330524445e-02 -4.1786131262779236e-01 + <_> + + 0 -1 1582 -7.5329327955842018e-04 + + -2.1557639539241791e-01 6.4534209668636322e-02 + <_> + + 0 -1 1583 1.4260539785027504e-02 + + -8.0013327300548553e-02 1.9511990249156952e-01 + <_> + + 0 -1 1584 -1.4687920156575274e-05 + + 9.7121663391590118e-02 -1.3502350449562073e-01 + <_> + + 0 -1 1585 -9.8925074562430382e-03 + + -5.1035261154174805e-01 2.9335800558328629e-02 + <_> + + 0 -1 1586 -1.8316040514037013e-03 + + 3.2676079869270325e-01 -4.5014020055532455e-02 + <_> + + 0 -1 1587 8.6495577124878764e-04 + + -7.7836513519287109e-02 1.8764939904212952e-01 + <_> + + 0 -1 1588 1.4902660250663757e-01 + + 1.9568990916013718e-02 -6.2450677156448364e-01 + <_> + + 0 -1 1589 -1.7126720398664474e-02 + + -1.8141449987888336e-01 7.3048681020736694e-02 + <_> + + 0 -1 1590 -1.7061959952116013e-03 + + 3.1236839294433594e-01 -4.4152028858661652e-02 + <_> + + 0 -1 1591 3.8261809386312962e-03 + + 5.1518529653549194e-02 -2.9330030083656311e-01 + <_> + + 0 -1 1592 3.8093670736998320e-03 + + -7.6707206666469574e-02 1.7574439942836761e-01 + <_> + + 0 -1 1593 -3.4228331060148776e-04 + + -2.3458020389080048e-01 6.1726640909910202e-02 + <_> + + 0 -1 1594 -4.1697870939970016e-02 + + 4.3929129838943481e-01 -3.6892820149660110e-02 + <_> + + 0 -1 1595 1.9080520723946393e-04 + + -1.3488939404487610e-01 9.7168661653995514e-02 + <_> + + 0 -1 1596 2.6400710339657962e-04 + + -1.6539520025253296e-01 7.3270231485366821e-02 + <_> + + 0 -1 1597 7.9839164391160011e-03 + + -3.3527340739965439e-02 3.6535859107971191e-01 + <_> + + 0 -1 1598 -1.4267410151660442e-02 + + 4.6739241480827332e-01 -2.7154419571161270e-02 + <_> + + 0 -1 1599 -9.4726070528849959e-05 + + -1.5017749369144440e-01 8.7657302618026733e-02 + <_> + + 0 -1 1600 -2.9629279742948711e-04 + + -1.6194540262222290e-01 7.3863230645656586e-02 + <_> + + 0 -1 1601 2.3301010951399803e-03 + + -7.9925157129764557e-02 1.5778550505638123e-01 + <_> + + 0 -1 1602 3.6623800406232476e-04 + + -8.7019346654415131e-02 2.0495669543743134e-01 + <_> + + 0 -1 1603 -4.4499669224023819e-02 + + -2.9891410470008850e-01 4.5648001134395599e-02 + <_> + + 0 -1 1604 -6.0768700204789639e-03 + + 2.3746150732040405e-01 -5.3580708801746368e-02 + <_> + + 0 -1 1605 6.6064862767234445e-04 + + 5.9221439063549042e-02 -2.3569910228252411e-01 + <_> + + 0 -1 1606 7.4699260294437408e-03 + + 5.1304049789905548e-02 -2.3386649787425995e-01 + <_> + + 0 -1 1607 -6.7128022201359272e-03 + + 2.7061641216278076e-01 -5.0031121820211411e-02 + <_> + + 0 -1 1608 4.6589970588684082e-03 + + 4.4932201504707336e-02 -3.0730488896369934e-01 + <_> + + 0 -1 1609 4.9815201200544834e-03 + + -4.8255410045385361e-02 2.6853010058403015e-01 + <_> + + 0 -1 1610 9.9244136363267899e-03 + + 1.9446769729256630e-02 -7.0352387428283691e-01 + <_> + + 0 -1 1611 6.1988402158021927e-03 + + -3.5107269883155823e-02 3.5460400581359863e-01 + <_> + + 0 -1 1612 8.8433362543582916e-03 + + 4.5328389853239059e-02 -2.7485930919647217e-01 + <_> + + 0 -1 1613 1.1110560037195683e-02 + + 2.2391419857740402e-02 -5.0172042846679688e-01 + <_> + + 0 -1 1614 -6.9408811395987868e-04 + + 1.7079490423202515e-01 -6.3849426805973053e-02 + <_> + + 0 -1 1615 8.0377031117677689e-03 + + 8.8937461376190186e-02 -1.6416129469871521e-01 + <_> + + 0 -1 1616 1.4750069567526225e-05 + + -1.3713030517101288e-01 9.6981123089790344e-02 + <_> + + 0 -1 1617 1.2381490087136626e-03 + + -6.9491222500801086e-02 1.6551379859447479e-01 + <_> + + 0 -1 1618 2.6584148872643709e-04 + + -9.6803613007068634e-02 1.2020370364189148e-01 + <_> + + 0 -1 1619 -5.4076651576906443e-04 + + -2.3185379803180695e-01 4.8987850546836853e-02 + <_> + + 0 -1 1620 -5.1092808134853840e-03 + + 3.0391758680343628e-01 -4.0800470858812332e-02 + <_> + + 0 -1 1621 1.5575919533148408e-03 + + -1.0150980204343796e-01 1.4465929567813873e-01 + <_> + + 0 -1 1622 2.8396019712090492e-02 + + 1.5098540484905243e-01 -8.8314309716224670e-02 + <_> + + 0 -1 1623 1.5096530551090837e-03 + + 5.1589738577604294e-02 -2.6199528574943542e-01 + <_> + + 0 -1 1624 1.4308419777080417e-03 + + -4.5497849583625793e-02 2.7584540843963623e-01 + <_> + + 0 -1 1625 1.3030369579792023e-01 + + 2.0329989492893219e-02 -5.7491821050643921e-01 + <_> + + 0 -1 1626 -3.3548770006746054e-03 + + 1.2289950251579285e-01 -8.9937411248683929e-02 + <_> + + 0 -1 1627 2.7094839140772820e-02 + + 1.4342390000820160e-02 -7.8952521085739136e-01 + <_> + + 0 -1 1628 -3.6210110783576965e-01 + + -6.2560427188873291e-01 1.4021329581737518e-02 + <_> + + 0 -1 1629 -6.6879601217806339e-04 + + 2.1966129541397095e-01 -5.2415199577808380e-02 + <_> + + 0 -1 1630 -3.7389241158962250e-02 + + -4.7313681244850159e-01 2.5704499334096909e-02 + <_> + + 0 -1 1631 -7.4386061169207096e-03 + + -5.2914857864379883e-01 2.0038880407810211e-02 + <_> + + 0 -1 1632 1.0443119704723358e-01 + + -2.2909460589289665e-02 5.1592028141021729e-01 + <_> + + 0 -1 1633 -6.1161867051851004e-05 + + 7.7016606926918030e-02 -1.4625400304794312e-01 + <_> + + 0 -1 1634 6.5830379026010633e-04 + + 7.0015281438827515e-02 -1.5569929778575897e-01 + <_> + + 0 -1 1635 9.7367232665419579e-03 + + -3.1582240015268326e-02 3.2754561305046082e-01 + <_> + + 0 -1 1636 -2.9574360232800245e-03 + + -3.4247711300849915e-01 3.2184720039367676e-02 + <_> + + 0 -1 1637 1.6319820424541831e-03 + + -4.9400478601455688e-02 2.2656440734863281e-01 + <_> + + 0 -1 1638 1.3844939880073071e-02 + + 2.0476659759879112e-02 -5.4600667953491211e-01 + <_> + + 0 -1 1639 3.1580299139022827e-02 + + -4.2422048747539520e-02 2.9091480374336243e-01 + <_> + + 0 -1 1640 8.6624026298522949e-03 + + 5.4432898759841919e-02 -2.1892189979553223e-01 + <_> + + 0 -1 1641 -4.6714721247553825e-04 + + -1.8205730617046356e-01 7.1491912007331848e-02 + <_> + + 0 -1 1642 4.1834521107375622e-03 + + -6.7491203546524048e-02 1.7285770177841187e-01 + <_> + + 0 -1 1643 -5.3335628472268581e-03 + + -8.4681749343872070e-01 1.3804829679429531e-02 + <_> + + 0 -1 1644 7.8782793134450912e-03 + + -4.8166718333959579e-02 2.4242730438709259e-01 + <_> + + 0 -1 1645 3.8775329012423754e-03 + + 2.4311149492859840e-02 -4.9763259291648865e-01 + <_> + + 0 -1 1646 -1.6564880206715316e-04 + + 5.5546380579471588e-02 -1.9554230570793152e-01 + <_> + + 0 -1 1647 1.8993400037288666e-02 + + -3.6479089409112930e-02 2.8472718596458435e-01 + <_> + + 0 -1 1648 -3.4308759495615959e-03 + + -3.2813000679016113e-01 3.6524198949337006e-02 + <_> + + 0 -1 1649 1.4614370229537599e-05 + + -1.0106439888477325e-01 1.0622490197420120e-01 + <_> + + 0 -1 1650 1.5978919342160225e-02 + + 3.0059399083256721e-02 -3.9310181140899658e-01 + <_> + + 0 -1 1651 -2.2245719446800649e-04 + + 1.8586489558219910e-01 -7.2151653468608856e-02 + <_> + + 0 -1 1652 2.0615909248590469e-02 + + 1.5250990167260170e-02 -7.8391200304031372e-01 + <_> + + 0 -1 1653 2.8645060956478119e-04 + + 6.8745598196983337e-02 -1.5308310091495514e-01 + <_> + + 0 -1 1654 -5.9233439969830215e-05 + + -1.2545019388198853e-01 9.8448492586612701e-02 + <_> + + 0 -1 1655 -7.6257862383499742e-04 + + 2.1546240150928497e-01 -5.3760219365358353e-02 + <_> + + 0 -1 1656 -1.4181639999151230e-03 + + -1.9876889884471893e-01 5.1982138305902481e-02 + <_> + + 0 -1 1657 -4.4716868549585342e-02 + + -7.5508397817611694e-01 1.2906449846923351e-02 + <_> + + 0 -1 1658 -1.3735699467360973e-03 + + 2.2003139555454254e-01 -5.1394689828157425e-02 + <_> + + 0 -1 1659 -1.5352779999375343e-02 + + -2.1422849595546722e-01 5.3781170397996902e-02 + <_> + + 0 -1 1660 1.3817439787089825e-02 + + -3.5158120095729828e-02 2.9399091005325317e-01 + <_> + + 0 -1 1661 8.7981626391410828e-02 + + 1.6688749194145203e-02 -7.2053599357604980e-01 + <_> + + 0 -1 1662 4.0486121177673340e-01 + + 9.4695771113038063e-03 -8.2725608348846436e-01 + <_> + + 0 -1 1663 1.9231239566579461e-03 + + -5.8016318827867508e-02 1.7696020007133484e-01 + <_> + + 0 -1 1664 -4.0756969247013330e-04 + + 8.7600946426391602e-02 -1.2633720040321350e-01 + <_> + + 0 -1 1665 -2.3862780071794987e-03 + + -4.0085569024085999e-01 2.7183029800653458e-02 + <_> + + 0 -1 1666 5.6235089898109436e-02 + + -1.7541319131851196e-02 7.3818737268447876e-01 + <_> + + 0 -1 1667 4.9810402560979128e-04 + + -7.6487071812152863e-02 1.2697990238666534e-01 + <_> + + 0 -1 1668 5.3285917965695262e-04 + + 5.9596300125122070e-02 -1.7600339651107788e-01 + <_> + + 0 -1 1669 5.9949647402390838e-04 + + -8.2509063184261322e-02 1.3002809882164001e-01 + <_> + + 0 -1 1670 -2.0725550712086260e-04 + + 9.3374222517013550e-02 -1.1726769804954529e-01 + <_> + + 0 -1 1671 8.1314949784427881e-04 + + -8.0063126981258392e-02 1.4701730012893677e-01 + <_> + + 0 -1 1672 -3.4973450237885118e-04 + + 1.1057929694652557e-01 -1.0881700366735458e-01 + <_> + + 0 -1 1673 -2.1448899805545807e-01 + + -3.1701159477233887e-01 4.1711531579494476e-02 + <_> + + 0 -1 1674 5.9010740369558334e-04 + + 4.6280328184366226e-02 -2.3512250185012817e-01 + <_> + + 0 -1 1675 -1.2093999981880188e-01 + + -6.8957090377807617e-01 1.4982040040194988e-02 + <_> + + 0 -1 1676 1.0181350260972977e-01 + + 1.1298139579594135e-02 -7.1199649572372437e-01 + <_> + + 0 -1 1677 3.5208329558372498e-01 + + 1.2944510206580162e-02 -6.7572408914566040e-01 + <_> + + 0 -1 1678 -1.4602140254282858e-05 + + 6.9550313055515289e-02 -1.4288060367107391e-01 + <_> + + 0 -1 1679 -2.3212860524654388e-01 + + -7.5287401676177979e-01 1.1394330300390720e-02 + <_> + + 0 -1 1680 -1.4764709630981088e-03 + + 1.3547790050506592e-01 -8.5470907390117645e-02 + <_> + + 0 -1 1681 9.9324379116296768e-03 + + -4.8758801072835922e-02 2.4582690000534058e-01 + <_> + + 0 -1 1682 -2.6857290416955948e-02 + + -4.3975710868835449e-01 2.5082239881157875e-02 + <_> + + 0 -1 1683 -7.3618912138044834e-03 + + 1.2384700030088425e-01 -9.7226209938526154e-02 + <_> + + 0 -1 1684 -1.9785730168223381e-02 + + -5.0932317972183228e-01 2.3481979966163635e-02 + <_> + + 0 -1 1685 -1.4635100342275109e-05 + + 9.4043917953968048e-02 -1.2145669758319855e-01 + <_> + + 0 -1 1686 -5.4067030549049377e-02 + + -5.4586207866668701e-01 1.9500140100717545e-02 + <_> + + 0 -1 1687 1.1532169766724110e-02 + + -7.6409153640270233e-02 1.3763970136642456e-01 + <_> + + 0 -1 1688 -4.4358540326356888e-03 + + 1.2359759956598282e-01 -9.1719299554824829e-02 + <_> + + 0 -1 1689 8.3216017810627818e-04 + + 6.3659071922302246e-02 -2.0440760254859924e-01 + <_> + + 0 -1 1690 -1.2503969669342041e-01 + + -4.1524758934974670e-01 2.7199100703001022e-02 + <_> + + 0 -1 1691 4.9618318676948547e-02 + + 1.5955109149217606e-02 -6.1666852235794067e-01 + <_> + + 0 -1 1692 -3.0613599810749292e-03 + + 3.6662209033966064e-01 -3.3449448645114899e-02 + <_> + + 0 -1 1693 3.5273379180580378e-03 + + 3.1757980585098267e-02 -3.8478809595108032e-01 + <_> + + 0 -1 1694 -6.6726570948958397e-03 + + 3.2095840573310852e-01 -3.4408681094646454e-02 + <_> + + 0 -1 1695 -2.5795500259846449e-03 + + -3.7870529294013977e-01 2.8562130406498909e-02 + <_> + + 0 -1 1696 7.8417789191007614e-03 + + -2.0479770377278328e-02 5.1704108715057373e-01 + <_> + + 0 -1 1697 3.1101319473236799e-04 + + -1.0809139907360077e-01 9.7204521298408508e-02 + <_> + + 0 -1 1698 2.6113479398190975e-03 + + -8.1770427525043488e-02 1.4691209793090820e-01 + <_> + + 0 -1 1699 7.3472261428833008e-03 + + 2.5131259113550186e-02 -4.3025061488151550e-01 + <_> + + 0 -1 1700 1.3528259296435863e-04 + + -1.4751060307025909e-01 6.7584678530693054e-02 + <_> + + 0 -1 1701 -5.1026898290729150e-05 + + -1.2161359935998917e-01 8.4333047270774841e-02 + <_> + + 0 -1 1702 1.1552199721336365e-03 + + -5.4663829505443573e-02 1.9773660600185394e-01 + <_> + + 0 -1 1703 -8.2931712269783020e-02 + + -5.1923328638076782e-01 2.0582359284162521e-02 + <_> + + 0 -1 1704 -4.6260739327408373e-04 + + 8.5588268935680389e-02 -1.1725299805402756e-01 + <_> + + 0 -1 1705 6.7906372714787722e-04 + + 4.5980118215084076e-02 -2.2628420591354370e-01 + <_> + + 0 -1 1706 1.4090019976720214e-03 + + -4.7628920525312424e-02 2.2722719609737396e-01 + <_> + + 0 -1 1707 2.8954911231994629e-01 + + 1.6701240092515945e-02 -6.3967019319534302e-01 + <_> + + 0 -1 1708 1.9376130774617195e-02 + + -2.2569410502910614e-02 5.0590497255325317e-01 + <_> + + 0 -1 1709 4.2641081381589174e-04 + + 6.6041722893714905e-02 -1.6666300594806671e-01 + <_> + + 0 -1 1710 1.7502580303698778e-03 + + -5.8077909052371979e-02 1.9512599706649780e-01 + <_> + + 0 -1 1711 -3.2605750020593405e-03 + + -2.9101881384849548e-01 3.8328718394041061e-02 + <_> + + 0 -1 1712 1.9519040361046791e-03 + + 5.9565968811511993e-02 -1.6910600662231445e-01 + <_> + + 0 -1 1713 -3.2053990289568901e-03 + + 1.9927769899368286e-01 -5.6053258478641510e-02 + <_> + + 0 -1 1714 1.7617279663681984e-03 + + 5.0697531551122665e-02 -2.1276649832725525e-01 + <_> + + 0 -1 1715 -6.0043102130293846e-03 + + -1.3699269294738770e-01 8.2275278866291046e-02 + <_> + + 0 -1 1716 2.4830829352140427e-03 + + -5.1561661064624786e-02 2.1684220433235168e-01 + <_> + + 0 -1 1717 -1.0821930319070816e-01 + + -7.8375291824340820e-01 1.4433650299906731e-02 + <_> + + 0 -1 1718 -7.5229378417134285e-03 + + 1.3453729450702667e-01 -9.0582698583602905e-02 + <_> + + 0 -1 1719 3.0750989913940430e-02 + + 1.1081690341234207e-01 -9.9475599825382233e-02 + <_> + + 0 -1 1720 -2.8948320541530848e-03 + + 1.9005739688873291e-01 -5.2639260888099670e-02 + <_> + + 0 -1 1721 2.7011099737137556e-03 + + 5.8573558926582336e-02 -1.9851949810981750e-01 + <_> + + 0 -1 1722 1.2562989722937346e-03 + + -7.3565311729907990e-02 1.5436840057373047e-01 + <_> + 173 + -1.4785599708557129e+00 + + <_> + + 0 -1 1723 -2.1460579708218575e-02 + + 3.2505050301551819e-01 -2.0890380442142487e-01 + <_> + + 0 -1 1724 7.6785432174801826e-03 + + -1.3231310248374939e-01 3.0525839328765869e-01 + <_> + + 0 -1 1725 3.4118059556931257e-03 + + -3.0793079733848572e-01 1.1010979861021042e-01 + <_> + + 0 -1 1726 -1.4710490177094471e-05 + + 9.5858857035636902e-02 -2.9641860723495483e-01 + <_> + + 0 -1 1727 1.0538049973547459e-02 + + -7.9252541065216064e-02 3.7234848737716675e-01 + <_> + + 0 -1 1728 -2.5260078837163746e-04 + + 6.7121110856533051e-02 -3.0784338712692261e-01 + <_> + + 0 -1 1729 -3.5665810573846102e-03 + + 1.4667609333992004e-01 -1.7083789408206940e-01 + <_> + + 0 -1 1730 -1.2677359627559781e-03 + + -4.9063721299171448e-01 2.0374119281768799e-02 + <_> + + 0 -1 1731 -6.7669381387531757e-03 + + 2.5767329335212708e-01 -7.4175901710987091e-02 + <_> + + 0 -1 1732 -6.0447258874773979e-04 + + -1.9196410477161407e-01 9.1349847614765167e-02 + <_> + + 0 -1 1733 -2.5375590194016695e-03 + + -3.5663878917694092e-01 5.1547251641750336e-02 + <_> + + 0 -1 1734 -7.0200557820498943e-03 + + 3.9719080924987793e-01 -4.3967988342046738e-02 + <_> + + 0 -1 1735 -5.7049379684031010e-03 + + -5.0015491247177124e-01 2.9825929552316666e-02 + <_> + + 0 -1 1736 1.4744909713044763e-03 + + 5.8546211570501328e-02 -2.6139810681343079e-01 + <_> + + 0 -1 1737 9.2834811657667160e-03 + + -4.2836759239435196e-02 3.3443170785903931e-01 + <_> + + 0 -1 1738 9.9660153500735760e-04 + + -1.0425110161304474e-01 1.6191780567169189e-01 + <_> + + 0 -1 1739 -7.5932733714580536e-02 + + -3.7356320023536682e-01 4.3075688183307648e-02 + <_> + + 0 -1 1740 5.5370710470015183e-05 + + -1.4570540189743042e-01 1.1560150235891342e-01 + <_> + + 0 -1 1741 1.4746849956281949e-05 + + -1.2972679734230042e-01 1.1747740209102631e-01 + <_> + + 0 -1 1742 -1.4875919441692531e-04 + + -1.8002930283546448e-01 7.8782692551612854e-02 + <_> + + 0 -1 1743 3.3751460723578930e-03 + + -7.7242009341716766e-02 1.8596859276294708e-01 + <_> + + 0 -1 1744 3.4271259210072458e-04 + + -1.5393340587615967e-01 1.0472580045461655e-01 + <_> + + 0 -1 1745 -4.5711229904554784e-04 + + -2.2300529479980469e-01 6.1818670481443405e-02 + <_> + + 0 -1 1746 3.2788628595881164e-04 + + 7.9448707401752472e-02 -1.8889829516410828e-01 + <_> + + 0 -1 1747 -9.6754019614309072e-04 + + 1.3137130439281464e-01 -1.0801070183515549e-01 + <_> + + 0 -1 1748 1.0537009686231613e-02 + + 2.2138269618153572e-02 -5.7479751110076904e-01 + <_> + + 0 -1 1749 5.6796409189701080e-03 + + -5.6034579873085022e-02 2.4849580228328705e-01 + <_> + + 0 -1 1750 -8.8083967566490173e-03 + + -3.7167680263519287e-01 4.2726948857307434e-02 + <_> + + 0 -1 1751 -2.8319710865616798e-02 + + -6.2387847900390625e-01 2.0844049751758575e-02 + <_> + + 0 -1 1752 1.3637860305607319e-02 + + 1.4434239827096462e-02 -7.1537137031555176e-01 + <_> + + 0 -1 1753 1.1822770349681377e-02 + + -4.3181091547012329e-02 3.0682548880577087e-01 + <_> + + 0 -1 1754 -6.1035697581246495e-04 + + -2.0418339967727661e-01 6.2115620821714401e-02 + <_> + + 0 -1 1755 -5.6125568225979805e-03 + + 3.6485010385513306e-01 -3.5448960959911346e-02 + <_> + + 0 -1 1756 1.4603640011046082e-05 + + -9.6096910536289215e-02 1.2142290174961090e-01 + <_> + + 0 -1 1757 1.9061230123043060e-03 + + 5.3135868161916733e-02 -2.2978909313678741e-01 + <_> + + 0 -1 1758 -3.6644220817834139e-03 + + 1.9614529609680176e-01 -6.8556912243366241e-02 + <_> + + 0 -1 1759 1.2336249928921461e-03 + + -8.7000347673892975e-02 1.3920229673385620e-01 + <_> + + 0 -1 1760 5.4660569876432419e-03 + + 2.2660890594124794e-02 -4.8329529166221619e-01 + <_> + + 0 -1 1761 -6.1730947345495224e-04 + + -2.1959540247917175e-01 5.5258519947528839e-02 + <_> + + 0 -1 1762 2.9604700393974781e-03 + + -5.0548229366540909e-02 2.7476710081100464e-01 + <_> + + 0 -1 1763 2.8015000745654106e-02 + + 1.8874650821089745e-02 -6.0498368740081787e-01 + <_> + + 0 -1 1764 -7.1651988946541678e-06 + + 1.0836219787597656e-01 -1.0606969892978668e-01 + <_> + + 0 -1 1765 -1.6367150470614433e-02 + + 2.8645038604736328e-01 -3.7137690931558609e-02 + <_> + + 0 -1 1766 1.0280719725415111e-03 + + 5.6318141520023346e-02 -2.1795029938220978e-01 + <_> + + 0 -1 1767 1.3662660494446754e-03 + + -4.6803500503301620e-02 2.3804000020027161e-01 + <_> + + 0 -1 1768 7.6626739464700222e-03 + + 2.1595260128378868e-02 -5.6847488880157471e-01 + <_> + + 0 -1 1769 -4.5117521658539772e-03 + + -3.5794979333877563e-01 3.0485490337014198e-02 + <_> + + 0 -1 1770 -4.3773967772722244e-03 + + 2.3192660510540009e-01 -5.3999818861484528e-02 + <_> + + 0 -1 1771 -7.2474628686904907e-03 + + -4.3440380692481995e-01 2.6374189183115959e-02 + <_> + + 0 -1 1772 7.9146260395646095e-04 + + -9.9924586713314056e-02 1.1088500171899796e-01 + <_> + + 0 -1 1773 6.4166806638240814e-02 + + 1.8938669934868813e-02 -5.7849419116973877e-01 + <_> + + 0 -1 1774 -1.1797840124927461e-04 + + -1.4889569580554962e-01 6.8777203559875488e-02 + <_> + + 0 -1 1775 1.2801289558410645e-02 + + 5.6179329752922058e-02 -2.0865969359874725e-01 + <_> + + 0 -1 1776 -2.7018740773200989e-02 + + 4.5356890559196472e-01 -2.5054579600691795e-02 + <_> + + 0 -1 1777 -6.9431727752089500e-03 + + -5.2916550636291504e-01 2.1800139918923378e-02 + <_> + + 0 -1 1778 3.3396780490875244e-03 + + -3.7295959889888763e-02 3.1198439002037048e-01 + <_> + + 0 -1 1779 -3.8888349081389606e-04 + + -1.5630130469799042e-01 7.0981830358505249e-02 + <_> + + 0 -1 1780 -7.1400677552446723e-04 + + 2.1799430251121521e-01 -5.4069280624389648e-02 + <_> + + 0 -1 1781 1.2549630366265774e-02 + + 1.7357179895043373e-02 -7.8320449590682983e-01 + <_> + + 0 -1 1782 -1.4623020433646161e-05 + + 7.8640103340148926e-02 -1.4212970435619354e-01 + <_> + + 0 -1 1783 -1.2133170384913683e-03 + + -3.1371229887008667e-01 3.4287638962268829e-02 + <_> + + 0 -1 1784 3.6882720887660980e-03 + + -3.8382381200790405e-02 3.0124679207801819e-01 + <_> + + 0 -1 1785 -1.4818239833402913e-05 + + 1.2561169266700745e-01 -9.1703377664089203e-02 + <_> + + 0 -1 1786 3.0302109662443399e-03 + + -2.9543070122599602e-02 3.7889540195465088e-01 + <_> + + 0 -1 1787 5.9340851294109598e-05 + + -1.7745719850063324e-01 7.0102430880069733e-02 + <_> + + 0 -1 1788 -2.9449560315697454e-05 + + 1.2052319943904877e-01 -1.1128979921340942e-01 + <_> + + 0 -1 1789 -1.7771139740943909e-02 + + -4.7108310461044312e-01 2.5600789114832878e-02 + <_> + + 0 -1 1790 7.6775359921157360e-03 + + -4.0757879614830017e-02 2.7021768689155579e-01 + <_> + + 0 -1 1791 -1.8513019382953644e-01 + + -3.0238750576972961e-01 3.8790911436080933e-02 + <_> + + 0 -1 1792 2.7697190642356873e-02 + + 2.6712810620665550e-02 -4.4166600704193115e-01 + <_> + + 0 -1 1793 -2.0427649840712547e-02 + + 2.5086608529090881e-01 -5.5672701448202133e-02 + <_> + + 0 -1 1794 9.0200370177626610e-03 + + 4.7344069927930832e-02 -2.7445980906486511e-01 + <_> + + 0 -1 1795 -1.2504979968070984e-03 + + -1.4971190690994263e-01 7.9667650163173676e-02 + <_> + + 0 -1 1796 -1.0021160356700420e-02 + + 2.4248859286308289e-01 -4.9217909574508667e-02 + <_> + + 0 -1 1797 2.6042328681796789e-04 + + 6.3192427158355713e-02 -1.8544280529022217e-01 + <_> + + 0 -1 1798 1.1920549441128969e-03 + + -8.6547911167144775e-02 1.3552339375019073e-01 + <_> + + 0 -1 1799 3.0391330365091562e-03 + + -7.2965219616889954e-02 1.6479800641536713e-01 + <_> + + 0 -1 1800 -2.9615699531859718e-05 + + 8.2047976553440094e-02 -1.4502969384193420e-01 + <_> + + 0 -1 1801 -1.2226340360939503e-02 + + -5.3014177083969116e-01 2.0405799150466919e-02 + <_> + + 0 -1 1802 -2.8124889358878136e-02 + + -5.5148762464523315e-01 1.7688119783997536e-02 + <_> + + 0 -1 1803 -4.8307109624147415e-02 + + -8.2579791545867920e-01 1.1020540259778500e-02 + <_> + + 0 -1 1804 4.6184109523892403e-03 + + 3.2069969922304153e-02 -3.0115368962287903e-01 + <_> + + 0 -1 1805 -8.4275740664452314e-04 + + 1.7034439742565155e-01 -6.3009433448314667e-02 + <_> + + 0 -1 1806 6.3863280229270458e-03 + + 1.6307299956679344e-02 -7.1346491575241089e-01 + <_> + + 0 -1 1807 -7.7203067485243082e-04 + + 1.6715280711650848e-01 -6.6192783415317535e-02 + <_> + + 0 -1 1808 -2.2645338904112577e-03 + + -3.5107091069221497e-01 2.8168670833110809e-02 + <_> + + 0 -1 1809 -3.7738790269941092e-03 + + 5.2762818336486816e-01 -2.0222609862685204e-02 + <_> + + 0 -1 1810 5.8204168453812599e-03 + + 7.0864066481590271e-02 -1.4675390720367432e-01 + <_> + + 0 -1 1811 -1.2069250456988811e-02 + + 2.3928099870681763e-01 -4.4312968850135803e-02 + <_> + + 0 -1 1812 3.3203759230673313e-03 + + -6.5749533474445343e-02 2.0277680456638336e-01 + <_> + + 0 -1 1813 2.1621929481625557e-03 + + 6.7407980561256409e-02 -1.8125349283218384e-01 + <_> + + 0 -1 1814 1.2229150161147118e-02 + + 2.2559309378266335e-02 -4.9180999398231506e-01 + <_> + + 0 -1 1815 -6.7253508605062962e-03 + + -1.5290050208568573e-01 6.9786652922630310e-02 + <_> + + 0 -1 1816 2.3579499684274197e-03 + + 4.9212101846933365e-02 -2.0838280022144318e-01 + <_> + + 0 -1 1817 -2.2950689308345318e-03 + + 1.2400440126657486e-01 -9.6624918282032013e-02 + <_> + + 0 -1 1818 1.0958530474454165e-03 + + -7.3270753026008606e-02 1.5208619832992554e-01 + <_> + + 0 -1 1819 -1.3427219819277525e-03 + + 1.2233039736747742e-01 -9.5689877867698669e-02 + <_> + + 0 -1 1820 5.4691417608410120e-04 + + -1.3924160599708557e-01 8.4381736814975739e-02 + <_> + + 0 -1 1821 8.4598818793892860e-03 + + 8.9689873158931732e-02 -1.3318899273872375e-01 + <_> + + 0 -1 1822 -9.1597117483615875e-02 + + -6.1854732036590576e-01 2.2867869585752487e-02 + <_> + + 0 -1 1823 -1.1090439511463046e-03 + + 5.8513749390840530e-02 -1.8806450068950653e-01 + <_> + + 0 -1 1824 2.2256910597207025e-05 + + -8.4488280117511749e-02 1.2780910730361938e-01 + <_> + + 0 -1 1825 -1.5437819820363075e-04 + + -1.2228029966354370e-01 8.6046978831291199e-02 + <_> + + 0 -1 1826 -2.6862788945436478e-03 + + -2.4487000703811646e-01 4.4255960732698441e-02 + <_> + + 0 -1 1827 -4.0478641167283058e-03 + + 2.7030688524246216e-01 -4.2200870811939240e-02 + <_> + + 0 -1 1828 -5.3340241312980652e-02 + + -7.6232349872589111e-01 1.4388039708137512e-02 + <_> + + 0 -1 1829 2.8256059158593416e-03 + + -2.9877070337533951e-02 3.9692971110343933e-01 + <_> + + 0 -1 1830 1.4443730004131794e-02 + + 3.0186710879206657e-02 -3.6606648564338684e-01 + <_> + + 0 -1 1831 1.3111650478094816e-03 + + -4.8140369355678558e-02 2.2434459626674652e-01 + <_> + + 0 -1 1832 1.6730680363252759e-03 + + -5.9983398765325546e-02 1.6394190490245819e-01 + <_> + + 0 -1 1833 2.3517120629549026e-02 + + 2.4109700694680214e-02 -4.0492439270019531e-01 + <_> + + 0 -1 1834 -3.5689130891114473e-03 + + 3.1903558969497681e-01 -3.4295879304409027e-02 + <_> + + 0 -1 1835 -2.8193008620291948e-04 + + -1.4874160289764404e-01 7.0669896900653839e-02 + <_> + + 0 -1 1836 1.0215859860181808e-01 + + 1.2840500101447105e-02 -7.7848541736602783e-01 + <_> + + 0 -1 1837 -1.9175480306148529e-01 + + -7.5706577301025391e-01 1.0587760247290134e-02 + <_> + + 0 -1 1838 5.3162658587098122e-03 + + -4.0066570043563843e-02 2.6050180196762085e-01 + <_> + + 0 -1 1839 -1.1487120063975453e-03 + + -1.8017220497131348e-01 6.1610430479049683e-02 + <_> + + 0 -1 1840 -2.8316730260848999e-01 + + -8.4913408756256104e-01 1.1647139675915241e-02 + <_> + + 0 -1 1841 3.3731758594512939e-02 + + 1.2357609719038010e-01 -7.7482230961322784e-02 + <_> + + 0 -1 1842 9.8635945469141006e-03 + + 4.3958030641078949e-02 -2.5541779398918152e-01 + <_> + + 0 -1 1843 -3.1564768869429827e-03 + + 1.8942989408969879e-01 -5.8221038430929184e-02 + <_> + + 0 -1 1844 1.5572150005027652e-03 + + -1.0376139730215073e-01 1.4107349514961243e-01 + <_> + + 0 -1 1845 6.2360420823097229e-02 + + 9.6462322399020195e-03 -8.5804969072341919e-01 + <_> + + 0 -1 1846 1.1480550165288150e-04 + + -8.4419928491115570e-02 1.1312700062990189e-01 + <_> + + 0 -1 1847 -5.9252730570733547e-03 + + -3.1650778651237488e-01 3.2079849392175674e-02 + <_> + + 0 -1 1848 -2.4660851340740919e-04 + + 8.8697679340839386e-02 -1.1085110157728195e-01 + <_> + + 0 -1 1849 1.6946049872785807e-03 + + -5.9657149016857147e-02 2.0904210209846497e-01 + <_> + + 0 -1 1850 9.0623252617660910e-05 + + 7.7441960573196411e-02 -1.2806339561939240e-01 + <_> + + 0 -1 1851 1.1666920036077499e-03 + + -6.1748579144477844e-02 1.5702450275421143e-01 + <_> + + 0 -1 1852 1.2541549513116479e-03 + + 4.4608380645513535e-02 -2.3140360414981842e-01 + <_> + + 0 -1 1853 -6.0275900177657604e-03 + + 9.5281846821308136e-02 -1.0283090174198151e-01 + <_> + + 0 -1 1854 -2.0472849905490875e-01 + + -4.1114759445190430e-01 2.3537550121545792e-02 + <_> + + 0 -1 1855 1.7691280692815781e-02 + + -3.9257150143384933e-02 2.8564441204071045e-01 + <_> + + 0 -1 1856 -1.2875649333000183e-01 + + -8.2030779123306274e-01 1.1735290288925171e-02 + <_> + + 0 -1 1857 1.2868089834228158e-03 + + 5.0858870148658752e-02 -1.7848010361194611e-01 + <_> + + 0 -1 1858 -4.5859832316637039e-03 + + 1.6802109777927399e-01 -6.1582598835229874e-02 + <_> + + 0 -1 1859 4.6391240903176367e-04 + + 6.6747047007083893e-02 -1.4237800240516663e-01 + <_> + + 0 -1 1860 -4.4439961202442646e-03 + + 4.5714980363845825e-01 -2.1746810525655746e-02 + <_> + + 0 -1 1861 3.8220020942389965e-03 + + 1.8094329163432121e-02 -6.0244542360305786e-01 + <_> + + 0 -1 1862 1.3894500443711877e-03 + + 3.4007851034402847e-02 -2.7153480052947998e-01 + <_> + + 0 -1 1863 -7.2111929766833782e-03 + + 2.7312570810317993e-01 -3.6855131387710571e-02 + <_> + + 0 -1 1864 1.6509749693796039e-03 + + -8.4407016634941101e-02 1.3134449720382690e-01 + <_> + + 0 -1 1865 -5.0506892148405313e-04 + + -1.4193339645862579e-01 7.3628053069114685e-02 + <_> + + 0 -1 1866 -1.1205329559743404e-02 + + 3.0093750357627869e-01 -3.4171391278505325e-02 + <_> + + 0 -1 1867 -3.4860160667449236e-04 + + -2.4538309872150421e-01 5.9823978692293167e-02 + <_> + + 0 -1 1868 7.3347258148714900e-04 + + -6.1770260334014893e-02 1.6367949545383453e-01 + <_> + + 0 -1 1869 -9.2969406396150589e-03 + + -3.0236640572547913e-01 3.9257898926734924e-02 + <_> + + 0 -1 1870 2.3957120254635811e-02 + + -2.3900719359517097e-02 4.8340830206871033e-01 + <_> + + 0 -1 1871 3.6422210541786626e-05 + + -1.2283039838075638e-01 9.1258950531482697e-02 + <_> + + 0 -1 1872 5.0458200275897980e-02 + + 1.3529149815440178e-02 -7.7827727794647217e-01 + <_> + + 0 -1 1873 -9.8683983087539673e-03 + + -4.4060459733009338e-01 2.0404359325766563e-02 + <_> + + 0 -1 1874 -1.0851239785552025e-02 + + 2.0165500044822693e-01 -5.2248589694499969e-02 + <_> + + 0 -1 1875 1.7670930537860841e-04 + + -1.3691440224647522e-01 8.3170592784881592e-02 + <_> + + 0 -1 1876 1.2582179624587297e-04 + + 6.1275351792573929e-02 -1.6542710363864899e-01 + <_> + + 0 -1 1877 -7.0588971721008420e-04 + + 1.5219129621982574e-01 -6.6164620220661163e-02 + <_> + + 0 -1 1878 1.1355109745636582e-03 + + -5.4115369915962219e-02 2.1311099827289581e-01 + <_> + + 0 -1 1879 -3.7436310667544603e-03 + + -2.3469850420951843e-01 4.9591001123189926e-02 + <_> + + 0 -1 1880 1.2309269513934851e-03 + + -7.5196012854576111e-02 1.4646540582180023e-01 + <_> + + 0 -1 1881 3.6228948738425970e-04 + + -9.7789406776428223e-02 1.2091729789972305e-01 + <_> + + 0 -1 1882 7.5996189843863249e-04 + + 6.9713920354843140e-02 -1.6278789937496185e-01 + <_> + + 0 -1 1883 -1.8509250367060304e-03 + + -1.8382890522480011e-01 5.7501520961523056e-02 + <_> + + 0 -1 1884 7.9539678990840912e-03 + + -5.8848708868026733e-02 1.8846440315246582e-01 + <_> + + 0 -1 1885 -3.1013600528240204e-04 + + -1.4575460553169250e-01 7.2403199970722198e-02 + <_> + + 0 -1 1886 1.6956350300461054e-03 + + 7.0550262928009033e-02 -1.6740930080413818e-01 + <_> + + 0 -1 1887 2.9058079235255718e-05 + + -1.0341589897871017e-01 9.5376282930374146e-02 + <_> + + 0 -1 1888 1.4466919936239719e-02 + + -1.7532069236040115e-02 5.4767167568206787e-01 + <_> + + 0 -1 1889 -5.7156499475240707e-02 + + -7.4789309501647949e-01 1.6394419595599174e-02 + <_> + + 0 -1 1890 3.0681469943374395e-03 + + 3.8702819496393204e-02 -2.4164369702339172e-01 + <_> + + 0 -1 1891 3.7490210961550474e-03 + + -5.6555431336164474e-02 2.0308320224285126e-01 + <_> + + 0 -1 1892 -1.0643450077623129e-03 + + -2.8211921453475952e-01 3.5207509994506836e-02 + <_> + + 0 -1 1893 -8.9807435870170593e-03 + + 2.1754769980907440e-01 -5.0628181546926498e-02 + <_> + + 0 -1 1894 2.4643479264341295e-04 + + 7.2727531194686890e-02 -1.4768819510936737e-01 + <_> + + 0 -1 1895 2.2197801154106855e-03 + + -3.6754861474037170e-02 2.6939278841018677e-01 + <_> + 169 + -1.3372850418090820e+00 + + <_> + + 0 -1 1896 -3.5328421741724014e-02 + + 2.4123990535736084e-01 -2.7961900830268860e-01 + <_> + + 0 -1 1897 2.6829841081053019e-03 + + -1.6362559795379639e-01 2.3433500528335571e-01 + <_> + + 0 -1 1898 2.1330378949642181e-03 + + -2.0100639760494232e-01 1.5679529309272766e-01 + <_> + + 0 -1 1899 4.2972870869562030e-04 + + -3.7790980935096741e-01 7.4083693325519562e-02 + <_> + + 0 -1 1900 -3.4645918756723404e-02 + + 3.0556240677833557e-01 -8.3546526730060577e-02 + <_> + + 0 -1 1901 -1.4237920368032064e-05 + + 8.2699142396450043e-02 -2.3583950102329254e-01 + <_> + + 0 -1 1902 4.9165110103785992e-03 + + -1.9556050002574921e-01 9.6965387463569641e-02 + <_> + + 0 -1 1903 6.0989488847553730e-03 + + 7.8470550477504730e-02 -2.3209640383720398e-01 + <_> + + 0 -1 1904 7.4874181300401688e-03 + + 7.1725919842720032e-03 -5.1566261053085327e-01 + <_> + + 0 -1 1905 4.2871991172432899e-03 + + 4.0530510246753693e-02 -4.1086289286613464e-01 + <_> + + 0 -1 1906 1.6856180503964424e-02 + + -7.7506266534328461e-02 2.3657779395580292e-01 + <_> + + 0 -1 1907 -1.0347689967602491e-03 + + -4.6704441308975220e-01 3.4468568861484528e-02 + <_> + + 0 -1 1908 1.6820980235934258e-03 + + -6.7206740379333496e-02 2.3671430349349976e-01 + <_> + + 0 -1 1909 -1.2018240056931973e-02 + + -2.2372600436210632e-01 7.4281953275203705e-02 + <_> + + 0 -1 1910 1.3802549801766872e-03 + + -9.9990189075469971e-02 1.5270860493183136e-01 + <_> + + 0 -1 1911 -1.4281070232391357e-01 + + -2.8344118595123291e-01 6.2299348413944244e-02 + <_> + + 0 -1 1912 -1.5463490039110184e-02 + + 2.9084190726280212e-01 -5.3395688533782959e-02 + <_> + + 0 -1 1913 -9.9617196246981621e-04 + + -3.6011821031570435e-01 4.1922971606254578e-02 + <_> + + 0 -1 1914 -2.6956679299473763e-02 + + -4.3736729025840759e-01 3.1731128692626953e-02 + <_> + + 0 -1 1915 -8.7780617177486420e-03 + + -5.0374472141265869e-01 2.5146849453449249e-02 + <_> + + 0 -1 1916 4.2969950300175697e-05 + + -1.5406499803066254e-01 8.8478356599807739e-02 + <_> + + 0 -1 1917 -6.2619051896035671e-03 + + 2.2435919940471649e-01 -5.9849821031093597e-02 + <_> + + 0 -1 1918 -6.4296770142391324e-04 + + -2.4377089738845825e-01 5.9389740228652954e-02 + <_> + + 0 -1 1919 -1.5573870041407645e-04 + + -1.6867999732494354e-01 7.8476317226886749e-02 + <_> + + 0 -1 1920 4.1139780660159886e-04 + + -8.9017570018768311e-02 1.4019380509853363e-01 + <_> + + 0 -1 1921 1.8635790329426527e-03 + + 3.8603689521551132e-02 -3.2118970155715942e-01 + <_> + + 0 -1 1922 1.6059159534052014e-03 + + -7.8801520168781281e-02 1.5801469981670380e-01 + <_> + + 0 -1 1923 8.6740078404545784e-04 + + 5.4134480655193329e-02 -2.3538430035114288e-01 + <_> + + 0 -1 1924 -7.9801032552495599e-04 + + 1.3330009579658508e-01 -9.5731817185878754e-02 + <_> + + 0 -1 1925 -4.8548211343586445e-03 + + -2.0736059546470642e-01 6.1038620769977570e-02 + <_> + + 0 -1 1926 -1.1426740325987339e-02 + + 1.7201809585094452e-01 -7.1152277290821075e-02 + <_> + + 0 -1 1927 8.7062492966651917e-03 + + -7.2185672819614410e-02 1.9082969427108765e-01 + <_> + + 0 -1 1928 -1.1634400580078363e-03 + + -1.3751690089702606e-01 9.1818131506443024e-02 + <_> + + 0 -1 1929 6.8914610892534256e-03 + + 9.6225969493389130e-02 -1.3246159255504608e-01 + <_> + + 0 -1 1930 -2.2426620125770569e-03 + + 3.5683241486549377e-01 -3.6280050873756409e-02 + <_> + + 0 -1 1931 1.2301520444452763e-02 + + 4.6940989792346954e-02 -3.0623328685760498e-01 + <_> + + 0 -1 1932 3.9963610470294952e-03 + + -8.2999393343925476e-02 1.5486459434032440e-01 + <_> + + 0 -1 1933 -2.2026189981261268e-05 + + 1.1778099834918976e-01 -1.1899650096893311e-01 + <_> + + 0 -1 1934 5.8708270080387592e-04 + + 5.6864660233259201e-02 -2.2509899735450745e-01 + <_> + + 0 -1 1935 -5.8760121464729309e-03 + + 2.6625269651412964e-01 -4.4570129364728928e-02 + <_> + + 0 -1 1936 4.3262130930088460e-04 + + 5.8049838989973068e-02 -2.1173800528049469e-01 + <_> + + 0 -1 1937 4.7852578572928905e-03 + + -4.0710568428039551e-02 2.9509121179580688e-01 + <_> + + 0 -1 1938 4.5480159315047786e-05 + + -1.8201610445976257e-01 6.0179539024829865e-02 + <_> + + 0 -1 1939 2.5633929762989283e-03 + + -8.7039761245250702e-02 1.2692840397357941e-01 + <_> + + 0 -1 1940 -4.7383471392095089e-03 + + 2.3961830139160156e-01 -4.9914900213479996e-02 + <_> + + 0 -1 1941 4.4647231698036194e-03 + + 4.0540020912885666e-02 -3.2467570900917053e-01 + <_> + + 0 -1 1942 -6.7061209119856358e-03 + + -3.2789680361747742e-01 3.2299648970365524e-02 + <_> + + 0 -1 1943 7.1761049330234528e-02 + + -2.3713670670986176e-02 4.7772058844566345e-01 + <_> + + 0 -1 1944 3.0584860593080521e-02 + + 1.6793910413980484e-02 -7.8061228990554810e-01 + <_> + + 0 -1 1945 3.8672669325023890e-03 + + -2.4876890704035759e-02 5.1260662078857422e-01 + <_> + + 0 -1 1946 -5.2802208811044693e-02 + + -5.0759661197662354e-01 2.3873040452599525e-02 + <_> + + 0 -1 1947 -6.5651582553982735e-04 + + -2.0122329890727997e-01 4.9672801047563553e-02 + <_> + + 0 -1 1948 8.5785267874598503e-03 + + -4.5007020235061646e-02 2.3518909513950348e-01 + <_> + + 0 -1 1949 -1.2620680499821901e-03 + + -1.9962050020694733e-01 5.5564209818840027e-02 + <_> + + 0 -1 1950 1.4215289615094662e-02 + + -4.6983979642391205e-02 2.0781150460243225e-01 + <_> + + 0 -1 1951 1.6393810510635376e-01 + + 1.4973269775509834e-02 -6.5025687217712402e-01 + <_> + + 0 -1 1952 1.4837640523910522e-01 + + 8.1885885447263718e-03 -9.4296187162399292e-01 + <_> + + 0 -1 1953 1.4631190424552187e-05 + + -1.2383759766817093e-01 8.2489579916000366e-02 + <_> + + 0 -1 1954 -3.3909391611814499e-02 + + -2.2818760573863983e-01 4.3302498757839203e-02 + <_> + + 0 -1 1955 3.8288589566946030e-03 + + -3.7276919931173325e-02 2.7613049745559692e-01 + <_> + + 0 -1 1956 8.0947913229465485e-03 + + 2.8445359319448471e-02 -3.9388808608055115e-01 + <_> + + 0 -1 1957 -7.0019601844251156e-04 + + 1.2199380248785019e-01 -9.2714257538318634e-02 + <_> + + 0 -1 1958 3.4412490203976631e-03 + + -4.8972681164741516e-02 2.0617230236530304e-01 + <_> + + 0 -1 1959 -1.6337490081787109e-01 + + -6.1850237846374512e-01 1.6467820852994919e-02 + <_> + + 0 -1 1960 6.5640709362924099e-03 + + 1.1007189750671387e-01 -9.2340007424354553e-02 + <_> + + 0 -1 1961 4.4708838686347008e-04 + + -1.3933309912681580e-01 7.7039696276187897e-02 + <_> + + 0 -1 1962 1.7568700015544891e-02 + + 9.7569692879915237e-03 -8.0032902956008911e-01 + <_> + + 0 -1 1963 -1.9571769516915083e-03 + + 2.8000330924987793e-01 -3.6428239196538925e-02 + <_> + + 0 -1 1964 5.1913037896156311e-04 + + 5.3515341132879257e-02 -1.9425579905509949e-01 + <_> + + 0 -1 1965 9.6273031085729599e-03 + + 3.1317751854658127e-02 -3.1802541017532349e-01 + <_> + + 0 -1 1966 -5.0332810729742050e-02 + + 5.6659060716629028e-01 -1.8494980409741402e-02 + <_> + + 0 -1 1967 -6.4624901860952377e-03 + + -4.1894671320915222e-01 2.7350850403308868e-02 + <_> + + 0 -1 1968 -5.2857249975204468e-03 + + 1.7756509780883789e-01 -5.8377739042043686e-02 + <_> + + 0 -1 1969 9.9454462528228760e-02 + + 1.6487719491124153e-02 -5.8526170253753662e-01 + <_> + + 0 -1 1970 2.1917840058449656e-04 + + -1.0714250057935715e-01 9.1884173452854156e-02 + <_> + + 0 -1 1971 -4.3873358663404360e-05 + + 7.8036926686763763e-02 -1.2723919749259949e-01 + <_> + + 0 -1 1972 -6.7227642284706235e-04 + + -2.5709420442581177e-01 3.8843378424644470e-02 + <_> + + 0 -1 1973 1.1754270235542208e-04 + + -7.9695962369441986e-02 1.2093970179557800e-01 + <_> + + 0 -1 1974 4.6061190962791443e-01 + + 1.3886069878935814e-02 -6.5241271257400513e-01 + <_> + + 0 -1 1975 1.1115600354969501e-02 + + 1.3871660456061363e-02 -6.0222518444061279e-01 + <_> + + 0 -1 1976 9.0776477009057999e-03 + + -3.6118660122156143e-02 2.5702419877052307e-01 + <_> + + 0 -1 1977 -4.9597548786550760e-04 + + 1.1017049849033356e-01 -8.9249506592750549e-02 + <_> + + 0 -1 1978 1.5807070303708315e-03 + + 4.8131279647350311e-02 -2.0215910673141479e-01 + <_> + + 0 -1 1979 -6.9012932479381561e-02 + + -8.1536060571670532e-01 1.0660010389983654e-02 + <_> + + 0 -1 1980 1.9330780196469277e-04 + + -1.1231829971075058e-01 8.5046432912349701e-02 + <_> + + 0 -1 1981 7.8813207801431417e-04 + + -5.5200818926095963e-02 1.7654439806938171e-01 + <_> + + 0 -1 1982 9.5367128960788250e-04 + + 5.4411198943853378e-02 -1.8674199283123016e-01 + <_> + + 0 -1 1983 -2.3191540967673063e-03 + + -2.7544409036636353e-01 3.8513321429491043e-02 + <_> + + 0 -1 1984 9.5087959198281169e-04 + + -6.8218901753425598e-02 1.6082139313220978e-01 + <_> + + 0 -1 1985 9.5385108143091202e-03 + + -3.8826879113912582e-02 3.0370831489562988e-01 + <_> + + 0 -1 1986 -1.4489189721643925e-02 + + -4.6989730000495911e-01 2.3550020530819893e-02 + <_> + + 0 -1 1987 1.0756050236523151e-02 + + 2.0565100014209747e-02 -4.7243130207061768e-01 + <_> + + 0 -1 1988 -2.0074830390512943e-03 + + -2.7946698665618896e-01 3.6021549254655838e-02 + <_> + + 0 -1 1989 -1.7316909506917000e-03 + + 2.0902790129184723e-01 -4.6300981193780899e-02 + <_> + + 0 -1 1990 1.5234799683094025e-01 + + 1.4934250153601170e-02 -6.0461127758026123e-01 + <_> + + 0 -1 1991 6.3340878114104271e-04 + + 5.0307150930166245e-02 -1.8277199566364288e-01 + <_> + + 0 -1 1992 -8.2793915644288063e-03 + + 3.6463031172752380e-01 -2.6474289596080780e-02 + <_> + + 0 -1 1993 1.3667670078575611e-02 + + 1.2511620298027992e-02 -8.9023828506469727e-01 + <_> + + 0 -1 1994 2.0979309920221567e-03 + + -8.0247193574905396e-02 1.2989950180053711e-01 + <_> + + 0 -1 1995 -8.9776562526822090e-03 + + 1.7411080002784729e-01 -6.1771109700202942e-02 + <_> + + 0 -1 1996 1.2094390112906694e-03 + + 6.8711720407009125e-02 -1.6561290621757507e-01 + <_> + + 0 -1 1997 6.8200258538126945e-03 + + 5.7795759290456772e-02 -1.8231619894504547e-01 + <_> + + 0 -1 1998 -1.8268059939146042e-03 + + 1.3340330123901367e-01 -7.5343966484069824e-02 + <_> + + 0 -1 1999 7.9908408224582672e-03 + + -4.5094471424818039e-02 2.4594159424304962e-01 + <_> + + 0 -1 2000 -2.5262041017413139e-03 + + -2.0763960480690002e-01 5.2334129810333252e-02 + <_> + + 0 -1 2001 -7.4825510382652283e-02 + + -5.4688757658004761e-01 1.7803389579057693e-02 + <_> + + 0 -1 2002 -3.3099399879574776e-03 + + 3.3455818891525269e-01 -2.8966419398784637e-02 + <_> + + 0 -1 2003 8.2276277244091034e-03 + + 4.1579861193895340e-02 -2.6652270555496216e-01 + <_> + + 0 -1 2004 3.1686299480497837e-03 + + -4.1817110031843185e-02 2.9769781231880188e-01 + <_> + + 0 -1 2005 1.5170290134847164e-02 + + 4.3392360210418701e-02 -2.4617969989776611e-01 + <_> + + 0 -1 2006 -1.5946379862725735e-03 + + 1.5057189762592316e-01 -7.3017738759517670e-02 + <_> + + 0 -1 2007 -8.5226353257894516e-03 + + -1.5050080418586731e-01 6.9656036794185638e-02 + <_> + + 0 -1 2008 -1.1418120004236698e-02 + + 1.2974749505519867e-01 -9.5122329890727997e-02 + <_> + + 0 -1 2009 -2.8856399655342102e-01 + + -2.1124540269374847e-01 4.7410819679498672e-02 + <_> + + 0 -1 2010 -3.9014229550957680e-03 + + -2.6843780279159546e-01 3.8698658347129822e-02 + <_> + + 0 -1 2011 -3.5567739978432655e-03 + + 2.3385030031204224e-01 -4.5723881572484970e-02 + <_> + + 0 -1 2012 -6.4394129440188408e-03 + + -6.0463881492614746e-01 1.6156049445271492e-02 + <_> + + 0 -1 2013 -7.4861319735646248e-03 + + 1.6867969930171967e-01 -5.5975880473852158e-02 + <_> + + 0 -1 2014 2.3621210129931569e-04 + + 5.3596749901771545e-02 -2.1872919797897339e-01 + <_> + + 0 -1 2015 2.6099249720573425e-02 + + -5.3937491029500961e-02 2.2728930413722992e-01 + <_> + + 0 -1 2016 -1.7809759592637420e-03 + + 8.6759522557258606e-02 -1.2009979784488678e-01 + <_> + + 0 -1 2017 -1.1987469770247117e-04 + + -1.5347549319267273e-01 7.0707783102989197e-02 + <_> + + 0 -1 2018 -6.8248361349105835e-03 + + -3.7341019511222839e-01 2.6779960840940475e-02 + <_> + + 0 -1 2019 -1.3119089999236166e-04 + + -1.1640869826078415e-01 8.7211161851882935e-02 + <_> + + 0 -1 2020 -1.8228540429845452e-03 + + 1.5664499998092651e-01 -6.8006090819835663e-02 + <_> + + 0 -1 2021 2.6267999783158302e-03 + + -3.6987219005823135e-02 2.6393121480941772e-01 + <_> + + 0 -1 2022 -7.0677183568477631e-02 + + -2.8295999765396118e-01 3.5035520792007446e-02 + <_> + + 0 -1 2023 1.8061319366097450e-02 + + -2.8041649609804153e-02 3.5313779115676880e-01 + <_> + + 0 -1 2024 9.2649407451972365e-04 + + 4.4600278139114380e-02 -2.2788539528846741e-01 + <_> + + 0 -1 2025 -5.3023721557110548e-04 + + -2.0866680145263672e-01 6.2718503177165985e-02 + <_> + + 0 -1 2026 3.6058931145817041e-03 + + -6.7796908318996429e-02 1.4900009334087372e-01 + <_> + + 0 -1 2027 8.5915643721818924e-03 + + -4.5626759529113770e-02 2.3078480362892151e-01 + <_> + + 0 -1 2028 -8.8329352438449860e-03 + + -4.1117089986801147e-01 2.8230689465999603e-02 + <_> + + 0 -1 2029 4.0959479520097375e-04 + + 5.3656630218029022e-02 -1.8243549764156342e-01 + <_> + + 0 -1 2030 -2.5011589750647545e-03 + + 1.6313549876213074e-01 -6.0954701155424118e-02 + <_> + + 0 -1 2031 -1.4622169546782970e-02 + + -4.9988400936126709e-01 1.8572760745882988e-02 + <_> + + 0 -1 2032 -6.3790678977966309e-02 + + -4.8329600691795349e-01 1.7903389409184456e-02 + <_> + + 0 -1 2033 -1.6671139746904373e-02 + + -2.6661589741706848e-01 3.4886009991168976e-02 + <_> + + 0 -1 2034 -1.2526069767773151e-02 + + 3.4061339497566223e-01 -2.8094800189137459e-02 + <_> + + 0 -1 2035 4.8325158655643463e-02 + + -3.3176191151142120e-02 2.9025658965110779e-01 + <_> + + 0 -1 2036 1.3246550224721432e-03 + + 3.7181440740823746e-02 -2.6850658655166626e-01 + <_> + + 0 -1 2037 -2.2221319377422333e-01 + + -8.9892768859863281e-01 1.0064439848065376e-02 + <_> + + 0 -1 2038 1.2954319827258587e-03 + + -1.0161759704351425e-01 9.0588621795177460e-02 + <_> + + 0 -1 2039 1.3794669881463051e-02 + + -7.4244648218154907e-02 1.4314259588718414e-01 + <_> + + 0 -1 2040 8.5643801139667630e-04 + + 5.9753969311714172e-02 -1.8660190701484680e-01 + <_> + + 0 -1 2041 -2.3317540064454079e-02 + + -6.9259917736053467e-01 1.3667319901287556e-02 + <_> + + 0 -1 2042 1.6281680436804891e-03 + + -6.1060748994350433e-02 1.5505290031433105e-01 + <_> + + 0 -1 2043 -1.2380329892039299e-02 + + -1.5146850049495697e-01 6.1767600476741791e-02 + <_> + + 0 -1 2044 1.8393599893897772e-03 + + -3.7167988717556000e-02 2.4822179973125458e-01 + <_> + + 0 -1 2045 3.5529870074242353e-03 + + -2.9200790449976921e-02 3.3592289686203003e-01 + <_> + + 0 -1 2046 1.0305979521945119e-03 + + 3.7694081664085388e-02 -2.9085698723793030e-01 + <_> + + 0 -1 2047 2.9916960556874983e-05 + + -8.8014192879199982e-02 1.0515210032463074e-01 + <_> + + 0 -1 2048 -4.1505339322611690e-04 + + 6.5726242959499359e-02 -1.5021100640296936e-01 + <_> + + 0 -1 2049 -1.4631619706051424e-05 + + 7.8170351684093475e-02 -1.1962439864873886e-01 + <_> + + 0 -1 2050 -4.3779090046882629e-03 + + 2.0752459764480591e-01 -5.2089329808950424e-02 + <_> + + 0 -1 2051 4.7036199248395860e-04 + + 6.3348479568958282e-02 -1.8767729401588440e-01 + <_> + + 0 -1 2052 1.4788640328333713e-05 + + -9.5828853547573090e-02 1.1213099956512451e-01 + <_> + + 0 -1 2053 3.7048431113362312e-04 + + -9.8723009228706360e-02 9.8647676408290863e-02 + <_> + + 0 -1 2054 -1.8590339459478855e-03 + + -2.6873630285263062e-01 3.8352578878402710e-02 + <_> + + 0 -1 2055 -7.0764529518783092e-03 + + -1.5984000265598297e-01 5.7841330766677856e-02 + <_> + + 0 -1 2056 1.4920010231435299e-02 + + -5.1178149878978729e-02 1.9242909550666809e-01 + <_> + + 0 -1 2057 -5.0713191740214825e-03 + + 1.3863259553909302e-01 -1.1121229827404022e-01 + <_> + + 0 -1 2058 -1.5005500055849552e-02 + + 4.8583930730819702e-01 -1.8811760470271111e-02 + <_> + + 0 -1 2059 -2.0439480431377888e-03 + + -3.2754859328269958e-01 2.7816310524940491e-02 + <_> + + 0 -1 2060 -1.3060690253041685e-04 + + 9.8868042230606079e-02 -8.4957577288150787e-02 + <_> + + 0 -1 2061 8.8742617517709732e-03 + + -2.5235600769519806e-02 3.2389879226684570e-01 + <_> + + 0 -1 2062 7.0397509261965752e-04 + + 5.6327521800994873e-02 -1.7392079532146454e-01 + <_> + + 0 -1 2063 -2.5402469560503960e-02 + + 1.9675390422344208e-01 -4.7362301498651505e-02 + <_> + + 0 -1 2064 -9.3743661418557167e-03 + + -1.5204219520092010e-01 5.9932630509138107e-02 + <_> + 178 + -1.3418790102005005e+00 + + <_> + + 0 -1 2065 4.0453020483255386e-02 + + -2.3637829720973969e-01 2.8865531086921692e-01 + <_> + + 0 -1 2066 -1.1056049726903439e-02 + + 1.6062900424003601e-01 -2.6259741187095642e-01 + <_> + + 0 -1 2067 -3.9778949576430023e-04 + + 1.1591099947690964e-01 -2.7081018686294556e-01 + <_> + + 0 -1 2068 1.0191530454903841e-03 + + -2.0969370007514954e-01 1.3642899692058563e-01 + <_> + + 0 -1 2069 3.6101979203522205e-03 + + -2.1725459396839142e-01 1.2617790699005127e-01 + <_> + + 0 -1 2070 4.4545531272888184e-04 + + -1.5974539518356323e-01 1.2596489489078522e-01 + <_> + + 0 -1 2071 5.8226222172379494e-03 + + -1.5484449267387390e-01 9.7783811390399933e-02 + <_> + + 0 -1 2072 -2.1416260860860348e-03 + + -3.6377671360969543e-01 4.0103349834680557e-02 + <_> + + 0 -1 2073 -2.6691620587371290e-04 + + 8.4470756351947784e-02 -1.7496100068092346e-01 + <_> + + 0 -1 2074 -5.4352330043911934e-03 + + -3.1830930709838867e-01 4.9786038696765900e-02 + <_> + + 0 -1 2075 -1.5426309546455741e-03 + + -2.1333709359169006e-01 6.4884513616561890e-02 + <_> + + 0 -1 2076 -2.7932289522141218e-03 + + 2.5483250617980957e-01 -6.5170928835868835e-02 + <_> + + 0 -1 2077 1.3845940120518208e-03 + + 3.9304580539464951e-02 -3.7404829263687134e-01 + <_> + + 0 -1 2078 -3.2193479128181934e-03 + + 2.6290428638458252e-01 -5.6396361440420151e-02 + <_> + + 0 -1 2079 -9.7977351397275925e-03 + + 3.2044389843940735e-01 -4.6382289379835129e-02 + <_> + + 0 -1 2080 -1.7625789623707533e-03 + + 1.5050819516181946e-01 -8.8892437517642975e-02 + <_> + + 0 -1 2081 -3.6096889525651932e-02 + + -4.3137839436531067e-01 3.1785801053047180e-02 + <_> + + 0 -1 2082 2.0813369192183018e-03 + + -6.5957918763160706e-02 1.9275289773941040e-01 + <_> + + 0 -1 2083 -6.0533690266311169e-03 + + -3.1374609470367432e-01 5.1007431000471115e-02 + <_> + + 0 -1 2084 3.7253410555422306e-03 + + -6.1402589082717896e-02 2.5631371140480042e-01 + <_> + + 0 -1 2085 5.0668260082602501e-03 + + 5.7962730526924133e-02 -2.4340160191059113e-01 + <_> + + 0 -1 2086 2.8038739692419767e-03 + + -7.0329703390598297e-02 2.1375860273838043e-01 + <_> + + 0 -1 2087 1.5925259795039892e-03 + + 2.6637760922312737e-02 -5.1129138469696045e-01 + <_> + + 0 -1 2088 2.9422679290291853e-05 + + -2.1710200607776642e-01 6.4985051751136780e-02 + <_> + + 0 -1 2089 -2.2399190129362978e-05 + + 8.1582568585872650e-02 -1.5135610103607178e-01 + <_> + + 0 -1 2090 6.7072827368974686e-04 + + 1.0502190142869949e-01 -1.1787360161542892e-01 + <_> + + 0 -1 2091 -1.5262300148606300e-03 + + -3.4620371460914612e-01 3.9244089275598526e-02 + <_> + + 0 -1 2092 1.8151829717680812e-03 + + -7.4669457972049713e-02 1.6847759485244751e-01 + <_> + + 0 -1 2093 5.8078771689906716e-04 + + -9.7952410578727722e-02 1.4192749559879303e-01 + <_> + + 0 -1 2094 -8.9623313397169113e-03 + + -1.9601620733737946e-01 6.6268041729927063e-02 + <_> + + 0 -1 2095 1.1146809905767441e-01 + + 1.7000140622258186e-02 -6.4917707443237305e-01 + <_> + + 0 -1 2096 -1.7872039461508393e-04 + + -1.4053599536418915e-01 8.0108702182769775e-02 + <_> + + 0 -1 2097 -4.6587768010795116e-03 + + 1.9530229270458221e-01 -5.8602340519428253e-02 + <_> + + 0 -1 2098 3.4576000180095434e-03 + + 5.9805799275636673e-02 -2.1990789473056793e-01 + <_> + + 0 -1 2099 -1.9979270291514695e-04 + + -1.3726149499416351e-01 8.3430230617523193e-02 + <_> + + 0 -1 2100 -4.8079751431941986e-03 + + 5.5041921138763428e-01 -2.0715299993753433e-02 + <_> + + 0 -1 2101 -7.3389292083447799e-06 + + 7.5302027165889740e-02 -1.4486590027809143e-01 + <_> + + 0 -1 2102 -3.5799799952656031e-03 + + 2.6277220249176025e-01 -4.2550459504127502e-02 + <_> + + 0 -1 2103 1.1689850362017751e-03 + + -1.0984169691801071e-01 1.2971849739551544e-01 + <_> + + 0 -1 2104 3.2639548182487488e-02 + + 3.1038379296660423e-02 -3.9474260807037354e-01 + <_> + + 0 -1 2105 1.1596709955483675e-03 + + 5.2021898329257965e-02 -2.2035829722881317e-01 + <_> + + 0 -1 2106 -1.4262240147218108e-03 + + 1.0745699703693390e-01 -1.0067079961299896e-01 + <_> + + 0 -1 2107 -2.3668329417705536e-01 + + -7.3174351453781128e-01 1.6999609768390656e-02 + <_> + + 0 -1 2108 1.9279429398011416e-04 + + -1.3248440623283386e-01 7.8186027705669403e-02 + <_> + + 0 -1 2109 -1.7292149364948273e-02 + + -9.7199842333793640e-02 1.1069560050964355e-01 + <_> + + 0 -1 2110 -1.2431619688868523e-03 + + 1.7741470038890839e-01 -7.2548337280750275e-02 + <_> + + 0 -1 2111 2.1754560293629766e-05 + + -9.6952050924301147e-02 1.0899409651756287e-01 + <_> + + 0 -1 2112 3.0975879053585231e-04 + + 6.2249891459941864e-02 -1.7384719848632812e-01 + <_> + + 0 -1 2113 -1.1590570211410522e-02 + + 2.6162809133529663e-01 -4.1994079947471619e-02 + <_> + + 0 -1 2114 1.8150920048356056e-02 + + 2.6353549212217331e-02 -4.4685411453247070e-01 + <_> + + 0 -1 2115 8.0223509576171637e-04 + + -1.2143869698047638e-01 8.7092787027359009e-02 + <_> + + 0 -1 2116 -1.4258639421314001e-03 + + 1.9236080348491669e-01 -5.2987430244684219e-02 + <_> + + 0 -1 2117 -2.4536260752938688e-04 + + -1.6683700680732727e-01 6.5604820847511292e-02 + <_> + + 0 -1 2118 2.2050029656384140e-05 + + -9.3477472662925720e-02 1.0711719840764999e-01 + <_> + + 0 -1 2119 4.7658861149102449e-04 + + -8.0596633255481720e-02 1.2512689828872681e-01 + <_> + + 0 -1 2120 4.0533850551582873e-04 + + 6.8990617990493774e-02 -1.5740759670734406e-01 + <_> + + 0 -1 2121 -1.6471749171614647e-02 + + -5.9667861461639404e-01 1.8876109272241592e-02 + <_> + + 0 -1 2122 2.2267159074544907e-03 + + -4.5803830027580261e-02 2.3071089386940002e-01 + <_> + + 0 -1 2123 4.9383189529180527e-02 + + 1.9837729632854462e-02 -5.9306108951568604e-01 + <_> + + 0 -1 2124 8.6411498486995697e-03 + + 2.8697369620203972e-02 -3.5161119699478149e-01 + <_> + + 0 -1 2125 -4.8241391777992249e-03 + + 2.2474339604377747e-01 -4.8463210463523865e-02 + <_> + + 0 -1 2126 -8.6174849420785904e-03 + + -5.7088959217071533e-01 1.9183190539479256e-02 + <_> + + 0 -1 2127 -5.7220697635784745e-04 + + 1.1697269976139069e-01 -8.8938057422637939e-02 + <_> + + 0 -1 2128 1.1997730471193790e-03 + + 8.4181122481822968e-02 -1.2565499544143677e-01 + <_> + + 0 -1 2129 2.6049909647554159e-03 + + 5.9500031173229218e-02 -2.0638149976730347e-01 + <_> + + 0 -1 2130 -1.4789920533075929e-03 + + 2.5114980340003967e-01 -4.7535050660371780e-02 + <_> + + 0 -1 2131 -2.5746721029281616e-01 + + -7.3038768768310547e-01 1.5440680086612701e-02 + <_> + + 0 -1 2132 -1.2104290071874857e-03 + + 1.8646970391273499e-01 -5.5789809674024582e-02 + <_> + + 0 -1 2133 3.4140399657189846e-04 + + 6.7707672715187073e-02 -1.5597160160541534e-01 + <_> + + 0 -1 2134 3.1749058980494738e-03 + + 3.5003460943698883e-02 -2.9529309272766113e-01 + <_> + + 0 -1 2135 4.4338819384574890e-01 + + 1.4550019986927509e-02 -6.1034661531448364e-01 + <_> + + 0 -1 2136 3.9458259940147400e-02 + + -4.5779328793287277e-02 2.2927519679069519e-01 + <_> + + 0 -1 2137 3.0410829931497574e-03 + + 1.6304129734635353e-02 -5.7491117715835571e-01 + <_> + + 0 -1 2138 -1.4853020012378693e-01 + + -5.6220901012420654e-01 1.5771050006151199e-02 + <_> + + 0 -1 2139 4.4339009036775678e-05 + + -9.1284371912479401e-02 1.0920979827642441e-01 + <_> + + 0 -1 2140 2.2139810025691986e-03 + + -4.7668289393186569e-02 2.2291789948940277e-01 + <_> + + 0 -1 2141 8.7831966578960419e-02 + + 2.6718059554696083e-02 -4.0396329760551453e-01 + <_> + + 0 -1 2142 -2.2798930294811726e-03 + + -1.6160930693149567e-01 6.6071107983589172e-02 + <_> + + 0 -1 2143 -1.4653969628852792e-05 + + 8.5298359394073486e-02 -1.2724019587039948e-01 + <_> + + 0 -1 2144 1.2313240440562367e-03 + + -6.5917477011680603e-02 1.6606420278549194e-01 + <_> + + 0 -1 2145 4.5110988616943359e-01 + + 1.3457960449159145e-02 -7.1525502204895020e-01 + <_> + + 0 -1 2146 -2.4518640711903572e-02 + + -4.3282639980316162e-01 2.0400719717144966e-02 + <_> + + 0 -1 2147 -1.1901959805982187e-04 + + 8.9420333504676819e-02 -1.1834760010242462e-01 + <_> + + 0 -1 2148 -1.3584910193458200e-03 + + 2.4722290039062500e-01 -4.3907400220632553e-02 + <_> + + 0 -1 2149 6.9289728999137878e-03 + + -5.6832619011402130e-02 1.6665740311145782e-01 + <_> + + 0 -1 2150 -6.9041848182678223e-03 + + -1.2742209434509277e-01 7.9310603439807892e-02 + <_> + + 0 -1 2151 1.2964820489287376e-03 + + 7.2462439537048340e-02 -1.6863870620727539e-01 + <_> + + 0 -1 2152 2.3060059174895287e-02 + + -5.0913080573081970e-02 2.1664789319038391e-01 + <_> + + 0 -1 2153 -4.0960568934679031e-02 + + -5.6479138135910034e-01 1.9609550014138222e-02 + <_> + + 0 -1 2154 7.4867479270324111e-05 + + -6.9450333714485168e-02 1.4615139365196228e-01 + <_> + + 0 -1 2155 -6.8458272144198418e-03 + + 6.6049978137016296e-02 -2.0840729773044586e-01 + <_> + + 0 -1 2156 1.9395649433135986e-02 + + 1.6168899834156036e-02 -5.6396162509918213e-01 + <_> + + 0 -1 2157 -1.6121419321279973e-04 + + -1.3194569945335388e-01 7.4094116687774658e-02 + <_> + + 0 -1 2158 6.6511691547930241e-03 + + -5.5261820554733276e-02 1.9894389808177948e-01 + <_> + + 0 -1 2159 4.5172171667218208e-03 + + 3.2863661646842957e-02 -3.0980890989303589e-01 + <_> + + 0 -1 2160 -4.0247041732072830e-02 + + -6.8980348110198975e-01 1.2438739649951458e-02 + <_> + + 0 -1 2161 7.2544030444987584e-06 + + -9.5949873328208923e-02 9.7919799387454987e-02 + <_> + + 0 -1 2162 -1.6025650501251221e-01 + + 4.9472638964653015e-01 -1.8643429502844810e-02 + <_> + + 0 -1 2163 5.0598900998011231e-04 + + -1.2216579914093018e-01 8.6699098348617554e-02 + <_> + + 0 -1 2164 -1.0506899654865265e-01 + + -8.5855627059936523e-01 8.2870386540889740e-03 + <_> + + 0 -1 2165 -1.8218380212783813e-01 + + -5.8477312326431274e-01 1.3160600326955318e-02 + <_> + + 0 -1 2166 1.6435410827398300e-02 + + 1.6296360641717911e-02 -5.5137562751770020e-01 + <_> + + 0 -1 2167 1.9282519817352295e-02 + + -2.5027479976415634e-02 4.3645161390304565e-01 + <_> + + 0 -1 2168 3.4772949293255806e-03 + + 3.1632781028747559e-02 -2.9246759414672852e-01 + <_> + + 0 -1 2169 2.2620869800448418e-02 + + -2.3985739797353745e-02 4.3105301260948181e-01 + <_> + + 0 -1 2170 -1.8172320723533630e-01 + + -1.8037860095500946e-01 5.1903489977121353e-02 + <_> + + 0 -1 2171 -4.3819830752909184e-03 + + -2.8302851319313049e-01 3.3024039119482040e-02 + <_> + + 0 -1 2172 -1.5246120281517506e-02 + + 2.3519919812679291e-01 -4.1242249310016632e-02 + <_> + + 0 -1 2173 3.9043289422988892e-01 + + 2.8530629351735115e-02 -3.5845771431922913e-01 + <_> + + 0 -1 2174 3.9103450253605843e-03 + + -5.1523748785257339e-02 1.7829769849777222e-01 + <_> + + 0 -1 2175 -1.0847560130059719e-02 + + -4.8355281352996826e-01 1.8765790387988091e-02 + <_> + + 0 -1 2176 5.7015339843928814e-03 + + 1.2250830419361591e-02 -7.0457488298416138e-01 + <_> + + 0 -1 2177 -1.1917110532522202e-03 + + 1.8404430150985718e-01 -5.0144620239734650e-02 + <_> + + 0 -1 2178 4.0988530963659286e-04 + + -9.7399666905403137e-02 1.0874579846858978e-01 + <_> + + 0 -1 2179 4.5295488089323044e-03 + + 4.5356839895248413e-02 -2.1069140732288361e-01 + <_> + + 0 -1 2180 -5.4893731139600277e-03 + + 2.9642790555953979e-01 -3.5870831459760666e-02 + <_> + + 0 -1 2181 1.9906361121684313e-03 + + 3.4332871437072754e-02 -3.1506469845771790e-01 + <_> + + 0 -1 2182 8.3358466625213623e-02 + + 1.9684519618749619e-02 -4.4279980659484863e-01 + <_> + + 0 -1 2183 3.0363420955836773e-03 + + -3.3693831413984299e-02 2.6669681072235107e-01 + <_> + + 0 -1 2184 5.7799968868494034e-02 + + 8.5875885561108589e-03 -9.8965817689895630e-01 + <_> + + 0 -1 2185 -7.8585641458630562e-03 + + 2.0088459551334381e-01 -4.6583641320466995e-02 + <_> + + 0 -1 2186 1.9253200152888894e-03 + + 4.7922369092702866e-02 -2.2640110552310944e-01 + <_> + + 0 -1 2187 1.0996909812092781e-02 + + 1.6258660703897476e-02 -5.4048168659210205e-01 + <_> + + 0 -1 2188 1.6405170026700944e-04 + + -1.1542510241270065e-01 7.6001413166522980e-02 + <_> + + 0 -1 2189 5.3780381567776203e-03 + + 1.1179029941558838e-01 -8.4179848432540894e-02 + <_> + + 0 -1 2190 2.2905960213392973e-03 + + -5.7969480752944946e-02 1.6899429261684418e-01 + <_> + + 0 -1 2191 6.3102580606937408e-03 + + 4.1471399366855621e-02 -2.0478209853172302e-01 + <_> + + 0 -1 2192 -1.4342570304870605e-01 + + -7.8573477268218994e-01 1.1634309776127338e-02 + <_> + + 0 -1 2193 1.2364640133455396e-03 + + -5.1800731569528580e-02 1.7734350264072418e-01 + <_> + + 0 -1 2194 -2.0046550780534744e-02 + + -3.1420910358428955e-01 2.8849070891737938e-02 + <_> + + 0 -1 2195 1.0868109762668610e-01 + + 1.6183530911803246e-02 -5.1956307888031006e-01 + <_> + + 0 -1 2196 5.1173489540815353e-02 + + -3.2460309565067291e-02 3.1230181455612183e-01 + <_> + + 0 -1 2197 1.3251069933176041e-02 + + 2.3655060678720474e-02 -4.4210249185562134e-01 + <_> + + 0 -1 2198 -2.0110961049795151e-03 + + 1.0359399765729904e-01 -9.3961462378501892e-02 + <_> + + 0 -1 2199 -3.2843051012605429e-03 + + 3.3196929097175598e-01 -2.9921280220150948e-02 + <_> + + 0 -1 2200 8.8341237278655171e-04 + + 5.9891819953918457e-02 -1.6192750632762909e-01 + <_> + + 0 -1 2201 8.4265992045402527e-03 + + -3.6928750574588776e-02 2.3691199719905853e-01 + <_> + + 0 -1 2202 -1.4503750207950361e-05 + + 7.7373847365379333e-02 -1.3290609419345856e-01 + <_> + + 0 -1 2203 8.0891689285635948e-03 + + 2.8817569836974144e-02 -3.0961230397224426e-01 + <_> + + 0 -1 2204 1.0339939966797829e-02 + + -2.4850569665431976e-02 3.7060049176216125e-01 + <_> + + 0 -1 2205 -2.2790539078414440e-03 + + -2.2051370143890381e-01 4.1877530515193939e-02 + <_> + + 0 -1 2206 -1.7716860165819526e-03 + + 1.4205080270767212e-01 -6.5252363681793213e-02 + <_> + + 0 -1 2207 -6.9317207671701908e-03 + + -3.3556079864501953e-01 2.7605969458818436e-02 + <_> + + 0 -1 2208 -4.2506060563027859e-03 + + 2.3591980338096619e-01 -3.7345319986343384e-02 + <_> + + 0 -1 2209 1.5317599754780531e-03 + + 3.9657011628150940e-02 -2.3438200354576111e-01 + <_> + + 0 -1 2210 1.4941049739718437e-03 + + -6.0311999171972275e-02 1.4468440413475037e-01 + <_> + + 0 -1 2211 -5.2249869331717491e-03 + + -4.0660250186920166e-01 2.3257270455360413e-02 + <_> + + 0 -1 2212 6.4759532688185573e-04 + + 6.4828239381313324e-02 -1.2987309694290161e-01 + <_> + + 0 -1 2213 3.2836120226420462e-04 + + 6.1917629092931747e-02 -1.4835810661315918e-01 + <_> + + 0 -1 2214 -3.4691279288381338e-03 + + 1.5662840008735657e-01 -5.7200349867343903e-02 + <_> + + 0 -1 2215 4.5903379213996232e-04 + + 5.2517898380756378e-02 -1.9093179702758789e-01 + <_> + + 0 -1 2216 -2.6641879230737686e-03 + + 1.5235909819602966e-01 -6.8154700100421906e-02 + <_> + + 0 -1 2217 -8.2513149827718735e-03 + + 3.6680310964584351e-01 -2.8480609878897667e-02 + <_> + + 0 -1 2218 7.1076201274991035e-03 + + 1.5445350110530853e-01 -6.7992970347404480e-02 + <_> + + 0 -1 2219 -4.3808001279830933e-01 + + -2.8871530294418335e-01 3.6639489233493805e-02 + <_> + + 0 -1 2220 6.3719082390889525e-04 + + -1.5995030105113983e-01 5.9860341250896454e-02 + <_> + + 0 -1 2221 -1.9303169392514974e-04 + + 8.6703971028327942e-02 -1.0924819856882095e-01 + <_> + + 0 -1 2222 3.0723758973181248e-03 + + 4.8543959856033325e-02 -1.7700059711933136e-01 + <_> + + 0 -1 2223 1.8341860268265009e-03 + + -5.1901239901781082e-02 1.8232129514217377e-01 + <_> + + 0 -1 2224 6.3172310590744019e-02 + + 2.3308899253606796e-02 -4.2870610952377319e-01 + <_> + + 0 -1 2225 2.4458649568259716e-03 + + -8.6425289511680603e-02 1.1974500119686127e-01 + <_> + + 0 -1 2226 1.1953969951719046e-03 + + 1.1685889959335327e-01 -1.0430490225553513e-01 + <_> + + 0 -1 2227 3.1024610507301986e-04 + + 6.2281988561153412e-02 -1.9196020066738129e-01 + <_> + + 0 -1 2228 -3.1970158219337463e-02 + + -6.4184898138046265e-01 1.3087569735944271e-02 + <_> + + 0 -1 2229 -1.0163170518353581e-03 + + -2.5210660696029663e-01 3.4096211194992065e-02 + <_> + + 0 -1 2230 -5.1776540931314230e-04 + + 1.1874090135097504e-01 -8.2813777029514313e-02 + <_> + + 0 -1 2231 -4.0794219821691513e-03 + + -1.6135309636592865e-01 6.5708972513675690e-02 + <_> + + 0 -1 2232 9.9409874528646469e-03 + + -3.0160220339894295e-02 3.5104531049728394e-01 + <_> + + 0 -1 2233 1.9788760691881180e-03 + + -4.4945359230041504e-02 2.3295649886131287e-01 + <_> + + 0 -1 2234 1.0975249856710434e-01 + + 1.6620220616459846e-02 -6.0423362255096436e-01 + <_> + + 0 -1 2235 -9.2024728655815125e-03 + + -5.6000357866287231e-01 1.4122909866273403e-02 + <_> + + 0 -1 2236 5.8626191457733512e-04 + + -1.0622119903564453e-01 8.4198087453842163e-02 + <_> + + 0 -1 2237 3.3601750619709492e-03 + + -2.1583529189229012e-02 4.1820129752159119e-01 + <_> + + 0 -1 2238 -4.8143669962882996e-02 + + -7.2092157602310181e-01 1.4954459853470325e-02 + <_> + + 0 -1 2239 1.2209859676659107e-02 + + 2.1544290706515312e-02 -3.5482150316238403e-01 + <_> + + 0 -1 2240 -3.9961449801921844e-02 + + -8.8848268985748291e-01 9.4328429549932480e-03 + <_> + + 0 -1 2241 1.5312479808926582e-03 + + -6.4070880413055420e-02 1.3569630682468414e-01 + <_> + + 0 -1 2242 8.9791123173199594e-05 + + 5.0932768732309341e-02 -1.8393670022487640e-01 + <_> + 195 + -1.3934370279312134e+00 + + <_> + + 0 -1 2243 -3.8741368800401688e-02 + + 2.8778830170631409e-01 -2.3312190175056458e-01 + <_> + + 0 -1 2244 -2.5511500425636768e-03 + + 2.5108599662780762e-01 -2.1116070449352264e-01 + <_> + + 0 -1 2245 -2.7973129181191325e-04 + + 8.9916922152042389e-02 -3.4069269895553589e-01 + <_> + + 0 -1 2246 1.1981100542470813e-03 + + -2.2542229294776917e-01 1.3602660596370697e-01 + <_> + + 0 -1 2247 -5.6686070747673512e-03 + + 8.2847259938716888e-02 -2.8080710768699646e-01 + <_> + + 0 -1 2248 -2.7642669738270342e-04 + + 1.0485479980707169e-01 -1.8848650157451630e-01 + <_> + + 0 -1 2249 2.0516710355877876e-03 + + 3.4714280627667904e-03 -4.8608478903770447e-01 + <_> + + 0 -1 2250 -1.4435249795496929e-05 + + 8.4275819361209869e-02 -1.9356100261211395e-01 + <_> + + 0 -1 2251 7.4418791336938739e-04 + + -1.2526750564575195e-01 1.1769519746303558e-01 + <_> + + 0 -1 2252 -4.9923241138458252e-02 + + -4.0080299973487854e-01 2.7910390868782997e-02 + <_> + + 0 -1 2253 9.2694535851478577e-03 + + -9.1088913381099701e-02 1.7550450563430786e-01 + <_> + + 0 -1 2254 -7.4646030552685261e-03 + + 1.6380469501018524e-01 -1.0385499894618988e-01 + <_> + + 0 -1 2255 -8.1985909491777420e-03 + + -1.9168980419635773e-01 8.5415020585060120e-02 + <_> + + 0 -1 2256 -8.1690691877156496e-04 + + -3.0793309211730957e-01 4.0833581238985062e-02 + <_> + + 0 -1 2257 2.8902110643684864e-03 + + -5.0324201583862305e-02 2.9259419441223145e-01 + <_> + + 0 -1 2258 8.0008199438452721e-03 + + -4.6863578259944916e-02 3.1964871287345886e-01 + <_> + + 0 -1 2259 -5.8349180035293102e-03 + + -1.5489180386066437e-01 8.8137261569499969e-02 + <_> + + 0 -1 2260 -1.2492289533838630e-03 + + -3.6294621229171753e-01 3.6120988428592682e-02 + <_> + + 0 -1 2261 2.2950479760766029e-02 + + -4.7119770199060440e-02 2.8532719612121582e-01 + <_> + + 0 -1 2262 -6.9193239323794842e-03 + + 1.7873649299144745e-01 -7.3547556996345520e-02 + <_> + + 0 -1 2263 -1.9392240210436285e-04 + + 1.3911420106887817e-01 -9.2489100992679596e-02 + <_> + + 0 -1 2264 1.9811228848993778e-03 + + 4.3448008596897125e-02 -3.0942690372467041e-01 + <_> + + 0 -1 2265 1.6018489375710487e-02 + + -3.9718918502330780e-02 3.4248939156532288e-01 + <_> + + 0 -1 2266 9.3541406095027924e-03 + + 3.2482650130987167e-02 -4.4502100348472595e-01 + <_> + + 0 -1 2267 -1.3822780456393957e-03 + + 2.1627070009708405e-01 -5.6410200893878937e-02 + <_> + + 0 -1 2268 2.5065820664167404e-02 + + 2.3123230785131454e-02 -5.3954011201858521e-01 + <_> + + 0 -1 2269 5.9798579663038254e-02 + + 2.8747579082846642e-02 -3.6572590470314026e-01 + <_> + + 0 -1 2270 -2.7519159484654665e-03 + + 1.7491349577903748e-01 -6.3990972936153412e-02 + <_> + + 0 -1 2271 -3.2093640416860580e-02 + + -2.5695550441741943e-01 4.0945108979940414e-02 + <_> + + 0 -1 2272 -2.3349749390035868e-03 + + 1.5433880686759949e-01 -7.2836689651012421e-02 + <_> + + 0 -1 2273 2.6897678617388010e-04 + + 7.2721242904663086e-02 -1.5513220429420471e-01 + <_> + + 0 -1 2274 -8.9813407976180315e-04 + + -2.0699620246887207e-01 5.3738221526145935e-02 + <_> + + 0 -1 2275 3.8521869573742151e-03 + + 3.6562010645866394e-02 -2.8075969219207764e-01 + <_> + + 0 -1 2276 1.3440090231597424e-02 + + -3.6046478897333145e-02 3.1876960396766663e-01 + <_> + + 0 -1 2277 7.7129118144512177e-03 + + 9.5960013568401337e-02 -1.1787489801645279e-01 + <_> + + 0 -1 2278 2.1991880203131586e-04 + + -1.3249869644641876e-01 8.4939576685428619e-02 + <_> + + 0 -1 2279 -7.4781170114874840e-03 + + -2.3073039948940277e-01 5.0310928374528885e-02 + <_> + + 0 -1 2280 8.9175272732973099e-03 + + -5.3924769163131714e-02 2.0320640504360199e-01 + <_> + + 0 -1 2281 2.2819850128144026e-03 + + 3.5264909267425537e-02 -3.0841338634490967e-01 + <_> + + 0 -1 2282 2.6413009036332369e-03 + + -3.2939229160547256e-02 3.1721460819244385e-01 + <_> + + 0 -1 2283 -1.4605689793825150e-03 + + -1.7154279351234436e-01 6.3374556601047516e-02 + <_> + + 0 -1 2284 -3.1993410084396601e-03 + + 3.4501680731773376e-01 -3.0717490240931511e-02 + <_> + + 0 -1 2285 2.3919229861348867e-03 + + 2.0887520164251328e-02 -4.8564168810844421e-01 + <_> + + 0 -1 2286 -3.5997610539197922e-03 + + 2.8900530934333801e-01 -3.5605821758508682e-02 + <_> + + 0 -1 2287 -1.4754279618500732e-05 + + 7.2744622826576233e-02 -1.4580619335174561e-01 + <_> + + 0 -1 2288 1.5968360006809235e-02 + + 1.2548550032079220e-02 -6.7445451021194458e-01 + <_> + + 0 -1 2289 -4.0752082131803036e-03 + + 3.1447470188140869e-01 -3.2155450433492661e-02 + <_> + + 0 -1 2290 7.5432872108649462e-05 + + -9.9738657474517822e-02 8.9665092527866364e-02 + <_> + + 0 -1 2291 -3.9632249623537064e-02 + + 2.7617400884628296e-01 -3.4800730645656586e-02 + <_> + + 0 -1 2292 2.9354610887821764e-05 + + -1.4023000001907349e-01 8.8519610464572906e-02 + <_> + + 0 -1 2293 3.1818989664316177e-02 + + 2.9925649985671043e-02 -3.3958339691162109e-01 + <_> + + 0 -1 2294 1.2690100073814392e-01 + + 1.1263390071690083e-02 -8.9932328462600708e-01 + <_> + + 0 -1 2295 -3.5952320322394371e-03 + + 1.7751759290695190e-01 -5.8113489300012589e-02 + <_> + + 0 -1 2296 -1.9231259822845459e-02 + + -3.3173981308937073e-01 4.0587101131677628e-02 + <_> + + 0 -1 2297 2.2836721036583185e-03 + + 3.7206009030342102e-02 -2.8370648622512817e-01 + <_> + + 0 -1 2298 -1.6381660243496299e-03 + + 1.4629170298576355e-01 -6.7781522870063782e-02 + <_> + + 0 -1 2299 2.1173330023884773e-03 + + 2.0773969590663910e-02 -4.3928679823875427e-01 + <_> + + 0 -1 2300 6.4710620790719986e-03 + + -7.2133928537368774e-02 1.3981610536575317e-01 + <_> + + 0 -1 2301 -3.1431620009243488e-03 + + -1.9903449714183807e-01 4.7544669359922409e-02 + <_> + + 0 -1 2302 1.6056640306487679e-03 + + -3.9751898497343063e-02 2.5931739807128906e-01 + <_> + + 0 -1 2303 4.8740832135081291e-03 + + 3.4082379192113876e-02 -2.7611988782882690e-01 + <_> + + 0 -1 2304 -9.6354109700769186e-05 + + -1.0709609836339951e-01 8.3503186702728271e-02 + <_> + + 0 -1 2305 7.7706458978354931e-03 + + -3.0095349997282028e-02 2.9493871331214905e-01 + <_> + + 0 -1 2306 1.3028859393671155e-04 + + -1.1232890188694000e-01 9.4578683376312256e-02 + <_> + + 0 -1 2307 1.2239719508215785e-03 + + 5.1999621093273163e-02 -1.8106269836425781e-01 + <_> + + 0 -1 2308 -8.7549741147086024e-04 + + 1.4276699721813202e-01 -7.5098946690559387e-02 + <_> + + 0 -1 2309 -8.8081993162631989e-02 + + -7.0848828554153442e-01 1.4353640377521515e-02 + <_> + + 0 -1 2310 -3.2854160666465759e-01 + + -4.9687421321868896e-01 1.6604600474238396e-02 + <_> + + 0 -1 2311 9.8696127533912659e-03 + + 1.9364370033144951e-02 -4.9978300929069519e-01 + <_> + + 0 -1 2312 -2.7273639570921659e-03 + + 2.9612520337104797e-01 -3.2831400632858276e-02 + <_> + + 0 -1 2313 9.9100142717361450e-02 + + 1.9799079746007919e-02 -4.7344958782196045e-01 + <_> + + 0 -1 2314 -6.3501899130642414e-03 + + -5.1504719257354736e-01 1.6986010596156120e-02 + <_> + + 0 -1 2315 2.9596920285257511e-05 + + -1.0923019796609879e-01 8.9656107127666473e-02 + <_> + + 0 -1 2316 2.1247670054435730e-02 + + -4.1462190449237823e-02 2.2684270143508911e-01 + <_> + + 0 -1 2317 -7.2977989912033081e-02 + + -6.3227838277816772e-01 1.6678869724273682e-02 + <_> + + 0 -1 2318 1.6230919957160950e-01 + + -2.5661909952759743e-02 3.7533140182495117e-01 + <_> + + 0 -1 2319 -1.4590819773729891e-05 + + 8.5613600909709930e-02 -1.1900989711284637e-01 + <_> + + 0 -1 2320 2.7719149366021156e-03 + + -5.4649248719215393e-02 2.0311379432678223e-01 + <_> + + 0 -1 2321 -8.7484354153275490e-03 + + -7.3674517869949341e-01 1.5571890398859978e-02 + <_> + + 0 -1 2322 1.3679199852049351e-02 + + 7.8902930021286011e-02 -1.1590500175952911e-01 + <_> + + 0 -1 2323 -1.1001150123775005e-02 + + 3.1690821051597595e-01 -3.2384991645812988e-02 + <_> + + 0 -1 2324 3.2964799902401865e-04 + + 5.0016529858112335e-02 -2.0451450347900391e-01 + <_> + + 0 -1 2325 2.7753270696848631e-03 + + -6.7407429218292236e-02 1.5935909748077393e-01 + <_> + + 0 -1 2326 -2.8740249108523130e-03 + + 2.2455960512161255e-01 -5.1031488925218582e-02 + <_> + + 0 -1 2327 8.1631669308990240e-04 + + 6.9849550724029541e-02 -1.4791619777679443e-01 + <_> + + 0 -1 2328 3.7573580630123615e-03 + + 3.1594600528478622e-02 -3.1387978792190552e-01 + <_> + + 0 -1 2329 -3.4902389161288738e-03 + + 1.1638429760932922e-01 -8.5947930812835693e-02 + <_> + + 0 -1 2330 -2.9415320605039597e-02 + + 6.8403428792953491e-01 -1.6140609979629517e-02 + <_> + + 0 -1 2331 -8.8095385581254959e-03 + + -2.0775319635868073e-01 4.9950890243053436e-02 + <_> + + 0 -1 2332 -1.5459939837455750e-02 + + -4.8748460412025452e-01 2.0065559074282646e-02 + <_> + + 0 -1 2333 -3.6481369286775589e-02 + + -5.2395141124725342e-01 1.5850989148020744e-02 + <_> + + 0 -1 2334 -8.8937362306751311e-05 + + -1.3299320638179779e-01 6.6926807165145874e-02 + <_> + + 0 -1 2335 1.4536709932144731e-04 + + 8.7170369923114777e-02 -1.0435820370912552e-01 + <_> + + 0 -1 2336 1.5216879546642303e-01 + + 1.6140580177307129e-02 -6.4970171451568604e-01 + <_> + + 0 -1 2337 -4.2344830580987036e-04 + + 1.8045839667320251e-01 -5.2974540740251541e-02 + <_> + + 0 -1 2338 1.0672640055418015e-03 + + 2.0548380911350250e-02 -4.8242041468620300e-01 + <_> + + 0 -1 2339 1.5491680242121220e-02 + + -5.1540851593017578e-02 1.8363960087299347e-01 + <_> + + 0 -1 2340 6.1393307987600565e-04 + + 2.9983729124069214e-02 -3.1031700968742371e-01 + <_> + + 0 -1 2341 -1.4619939975091256e-05 + + 1.0368499904870987e-01 -9.1634131968021393e-02 + <_> + + 0 -1 2342 6.9900648668408394e-03 + + 1.4683909714221954e-02 -5.9485381841659546e-01 + <_> + + 0 -1 2343 -5.3000110201537609e-03 + + -1.2457770109176636e-01 7.0542782545089722e-02 + <_> + + 0 -1 2344 5.0289987120777369e-04 + + -7.7135689556598663e-02 1.2228710204362869e-01 + <_> + + 0 -1 2345 1.1190979741513729e-02 + + 5.0308059900999069e-02 -1.8091809749603271e-01 + <_> + + 0 -1 2346 1.7019819468259811e-02 + + -3.8816768676042557e-02 3.0851981043815613e-01 + <_> + + 0 -1 2347 -5.8241572696715593e-04 + + 1.2537799775600433e-01 -7.6115481555461884e-02 + <_> + + 0 -1 2348 2.0036669448018074e-02 + + 4.9899481236934662e-02 -1.8082989752292633e-01 + <_> + + 0 -1 2349 -5.4328818805515766e-03 + + 2.3409770429134369e-01 -4.2385410517454147e-02 + <_> + + 0 -1 2350 -2.9535360226873308e-05 + + 5.7630240917205811e-02 -1.5753529965877533e-01 + <_> + + 0 -1 2351 -1.0352370142936707e-01 + + 7.1587741374969482e-01 -1.2989929877221584e-02 + <_> + + 0 -1 2352 -1.2122269719839096e-02 + + -1.4788970351219177e-01 6.6566437482833862e-02 + <_> + + 0 -1 2353 3.0254870653152466e-03 + + -5.4378628730773926e-02 1.7140829563140869e-01 + <_> + + 0 -1 2354 -5.8111078105866909e-03 + + 2.4422149360179901e-01 -5.7652641087770462e-02 + <_> + + 0 -1 2355 8.2830740138888359e-03 + + 2.2720400243997574e-02 -4.2961999773979187e-01 + <_> + + 0 -1 2356 1.2375120073556900e-02 + + 2.2810289636254311e-02 -3.7505629658699036e-01 + <_> + + 0 -1 2357 1.9211210310459137e-02 + + 1.1791059747338295e-02 -6.5529459714889526e-01 + <_> + + 0 -1 2358 3.1843129545450211e-04 + + 6.4130060374736786e-02 -1.3995569944381714e-01 + <_> + + 0 -1 2359 8.4224628517404199e-04 + + -5.4134279489517212e-02 1.7525580525398254e-01 + <_> + + 0 -1 2360 -1.6085049510002136e-01 + + -9.4571417570114136e-01 7.8549478203058243e-03 + <_> + + 0 -1 2361 -1.6774870455265045e-03 + + -1.9166129827499390e-01 4.5787028968334198e-02 + <_> + + 0 -1 2362 -1.8989649834111333e-03 + + 1.5783150494098663e-01 -6.5896913409233093e-02 + <_> + + 0 -1 2363 4.0205760160461068e-04 + + -7.3599092662334442e-02 1.3118380308151245e-01 + <_> + + 0 -1 2364 2.4369959719479084e-03 + + 2.3522870615124702e-02 -4.2745968699455261e-01 + <_> + + 0 -1 2365 -2.8488409952842630e-05 + + 6.3280619680881500e-02 -1.3599009811878204e-01 + <_> + + 0 -1 2366 1.9538639113306999e-02 + + -2.1458270028233528e-02 4.7534748911857605e-01 + <_> + + 0 -1 2367 -1.6530340071767569e-03 + + -1.5323260426521301e-01 5.9455979615449905e-02 + <_> + + 0 -1 2368 -2.1052840165793896e-03 + + 1.1017639935016632e-01 -8.3118103444576263e-02 + <_> + + 0 -1 2369 -4.5266482047736645e-03 + + 2.5815379619598389e-01 -3.5743940621614456e-02 + <_> + + 0 -1 2370 -1.6275560483336449e-04 + + -1.3548290729522705e-01 6.9295726716518402e-02 + <_> + + 0 -1 2371 -3.3048219047486782e-03 + + 1.7806029319763184e-01 -5.2156440913677216e-02 + <_> + + 0 -1 2372 -5.1905210129916668e-03 + + -3.4897321462631226e-01 2.5990990921854973e-02 + <_> + + 0 -1 2373 1.1190810054540634e-01 + + 2.9962029308080673e-02 -2.9597550630569458e-01 + <_> + + 0 -1 2374 -5.2873138338327408e-03 + + 1.8564499914646149e-01 -5.0216298550367355e-02 + <_> + + 0 -1 2375 2.6098049711436033e-03 + + -7.3559276759624481e-02 1.4365130662918091e-01 + <_> + + 0 -1 2376 -2.8581928927451372e-03 + + -1.2605139613151550e-01 7.5433082878589630e-02 + <_> + + 0 -1 2377 -2.9555680157500319e-05 + + 1.0733310133218765e-01 -1.0386200249195099e-01 + <_> + + 0 -1 2378 5.9023561334470287e-05 + + -1.3029119372367859e-01 7.6478391885757446e-02 + <_> + + 0 -1 2379 -4.3344721198081970e-02 + + -6.9299221038818359e-01 1.4173300005495548e-02 + <_> + + 0 -1 2380 -4.6946998685598373e-02 + + -5.5803751945495605e-01 1.2422920204699039e-02 + <_> + + 0 -1 2381 -1.5189060010015965e-02 + + 3.7049770355224609e-01 -2.5564119219779968e-02 + <_> + + 0 -1 2382 1.6361879184842110e-02 + + 2.7049979194998741e-02 -3.4278920292854309e-01 + <_> + + 0 -1 2383 4.0752839297056198e-02 + + 9.3995258212089539e-03 -8.8683712482452393e-01 + <_> + + 0 -1 2384 -1.0879869572818279e-02 + + 5.3260582685470581e-01 -1.9450860098004341e-02 + <_> + + 0 -1 2385 -7.7538257755804807e-05 + + -1.1696249991655350e-01 7.7288232743740082e-02 + <_> + + 0 -1 2386 -4.0953079587779939e-04 + + 1.6214360296726227e-01 -5.3711488842964172e-02 + <_> + + 0 -1 2387 -1.8464239314198494e-02 + + -5.0844788551330566e-01 1.9838189706206322e-02 + <_> + + 0 -1 2388 -5.6788129732012749e-03 + + 3.0203920602798462e-01 -3.0203990638256073e-02 + <_> + + 0 -1 2389 3.8324110209941864e-04 + + -1.6841089725494385e-01 5.4902028292417526e-02 + <_> + + 0 -1 2390 6.4761550165712833e-03 + + 9.5140263438224792e-02 -1.0746160149574280e-01 + <_> + + 0 -1 2391 -2.4377859663218260e-03 + + -1.5647719800472260e-01 6.3407607376575470e-02 + <_> + + 0 -1 2392 5.4156291298568249e-04 + + -6.5962299704551697e-02 1.8441629409790039e-01 + <_> + + 0 -1 2393 2.7917029336094856e-02 + + -2.7590230107307434e-02 3.5032740235328674e-01 + <_> + + 0 -1 2394 4.6622849185951054e-04 + + 4.9628820270299911e-02 -2.2624179720878601e-01 + <_> + + 0 -1 2395 -3.7316799163818359e-02 + + -4.2978170514106750e-01 2.1337680518627167e-02 + <_> + + 0 -1 2396 -2.6047111023217440e-03 + + 3.6650991439819336e-01 -2.5405049324035645e-02 + <_> + + 0 -1 2397 5.1927138119935989e-03 + + 2.6877930387854576e-02 -3.3478578925132751e-01 + <_> + + 0 -1 2398 3.0462879221886396e-03 + + -3.0848290771245956e-02 2.9788359999656677e-01 + <_> + + 0 -1 2399 -4.1325599886476994e-04 + + 7.2986789047718048e-02 -1.2147530168294907e-01 + <_> + + 0 -1 2400 -1.1456120014190674e-01 + + 3.1955468654632568e-01 -3.3379800617694855e-02 + <_> + + 0 -1 2401 -1.3044059742242098e-03 + + -2.0625290274620056e-01 5.4634369909763336e-02 + <_> + + 0 -1 2402 4.5045089791528881e-05 + + -1.1376550048589706e-01 7.8123383224010468e-02 + <_> + + 0 -1 2403 1.8890319624915719e-03 + + -6.5578728914260864e-02 1.7001299560070038e-01 + <_> + + 0 -1 2404 -5.4107961477711797e-04 + + -1.8184140324592590e-01 5.1611810922622681e-02 + <_> + + 0 -1 2405 4.4150161556899548e-03 + + -3.6324780434370041e-02 2.4938449263572693e-01 + <_> + + 0 -1 2406 -2.1878050640225410e-02 + + -1.7643679678440094e-01 5.4811108857393265e-02 + <_> + + 0 -1 2407 -2.0328219980001450e-03 + + 9.4266183674335480e-02 -9.7129411995410919e-02 + <_> + + 0 -1 2408 2.6754371356219053e-04 + + 5.7487931102514267e-02 -1.5442019701004028e-01 + <_> + + 0 -1 2409 1.4061420224606991e-03 + + -5.0268959254026413e-02 1.8814170360565186e-01 + <_> + + 0 -1 2410 2.0725419744849205e-04 + + 7.7659189701080322e-02 -1.2538130581378937e-01 + <_> + + 0 -1 2411 1.8001600401476026e-03 + + -4.2675640434026718e-02 2.2430649399757385e-01 + <_> + + 0 -1 2412 -4.6744230203330517e-03 + + -3.3480471372604370e-01 2.9364420101046562e-02 + <_> + + 0 -1 2413 7.2110369801521301e-03 + + -5.2441328763961792e-02 1.8891569972038269e-01 + <_> + + 0 -1 2414 2.3627521004527807e-03 + + 3.4400060772895813e-02 -2.7200448513031006e-01 + <_> + + 0 -1 2415 -1.3181479880586267e-03 + + 1.7767719924449921e-01 -5.6363631039857864e-02 + <_> + + 0 -1 2416 -1.7586319881957024e-04 + + 9.1534242033958435e-02 -1.0412310063838959e-01 + <_> + + 0 -1 2417 -2.5801590527407825e-04 + + -1.1226779967546463e-01 8.1381812691688538e-02 + <_> + + 0 -1 2418 9.6790950919967145e-05 + + -1.1881929636001587e-01 7.1883186697959900e-02 + <_> + + 0 -1 2419 8.2001117989420891e-03 + + -4.0254529565572739e-02 2.2790899872779846e-01 + <_> + + 0 -1 2420 -6.7277951166033745e-04 + + -7.0979103446006775e-02 1.2775769829750061e-01 + <_> + + 0 -1 2421 3.7424470065161586e-04 + + 6.7096449434757233e-02 -1.3645760715007782e-01 + <_> + + 0 -1 2422 2.5741120334714651e-03 + + -5.4319828748703003e-02 1.6720260679721832e-01 + <_> + + 0 -1 2423 4.3884690967388451e-04 + + 8.2114033401012421e-02 -1.1024679988622665e-01 + <_> + + 0 -1 2424 -4.8180628567934036e-02 + + -7.2217732667922974e-01 1.2223210185766220e-02 + <_> + + 0 -1 2425 9.9836904555559158e-03 + + 1.2195640243589878e-02 -6.7448061704635620e-01 + <_> + + 0 -1 2426 -1.2344559654593468e-03 + + 1.7145380377769470e-01 -5.5381339043378830e-02 + <_> + + 0 -1 2427 -2.7302911039441824e-03 + + -1.3044339418411255e-01 7.4266709387302399e-02 + <_> + + 0 -1 2428 5.5562541820108891e-04 + + -1.0187319666147232e-01 1.0454159975051880e-01 + <_> + + 0 -1 2429 1.5140359755605459e-03 + + 8.2843840122222900e-02 -1.1898560076951981e-01 + <_> + + 0 -1 2430 -7.2555973019916564e-05 + + -1.2512299418449402e-01 7.1132406592369080e-02 + <_> + + 0 -1 2431 -2.4981278693303466e-04 + + -1.3125610351562500e-01 6.8963102996349335e-02 + <_> + + 0 -1 2432 -6.0206428170204163e-03 + + 2.1284450590610504e-01 -4.7603111714124680e-02 + <_> + + 0 -1 2433 -7.2469102451577783e-04 + + 1.0499659925699234e-01 -8.5549630224704742e-02 + <_> + + 0 -1 2434 6.3740357290953398e-04 + + 5.4655481129884720e-02 -1.7353290319442749e-01 + <_> + + 0 -1 2435 1.0901190340518951e-02 + + -5.2832279354333878e-02 1.8752649426460266e-01 + <_> + + 0 -1 2436 7.0734010078012943e-03 + + 6.2958806753158569e-02 -1.6468439996242523e-01 + <_> + + 0 -1 2437 1.3333789538592100e-03 + + -1.2590870261192322e-01 9.4716809689998627e-02 + <_> + 171 + -1.2739679813385010e+00 + + <_> + + 0 -1 2438 6.2053989619016647e-02 + + -2.5427028536796570e-01 2.3591099679470062e-01 + <_> + + 0 -1 2439 5.9534627944231033e-03 + + -2.2544360160827637e-01 1.7751939594745636e-01 + <_> + + 0 -1 2440 7.2477371431887150e-03 + + -1.1398050189018250e-01 2.7556711435317993e-01 + <_> + + 0 -1 2441 -2.2824530024081469e-03 + + 8.6277678608894348e-02 -3.1412398815155029e-01 + <_> + + 0 -1 2442 1.1776019819080830e-02 + + -6.2360338866710663e-02 3.4443479776382446e-01 + <_> + + 0 -1 2443 4.3855342082679272e-03 + + 1.8105769529938698e-02 -5.0128728151321411e-01 + <_> + + 0 -1 2444 1.5859069302678108e-02 + + -7.8765146434307098e-02 2.6402598619461060e-01 + <_> + + 0 -1 2445 3.0654110014438629e-03 + + 3.3250238746404648e-02 -4.3427819013595581e-01 + <_> + + 0 -1 2446 2.5912460405379534e-03 + + 4.0578570216894150e-02 -4.9658200144767761e-01 + <_> + + 0 -1 2447 3.0834769131615758e-04 + + -1.4615769684314728e-01 1.2339019775390625e-01 + <_> + + 0 -1 2448 -2.4314899928867817e-03 + + 7.2739332914352417e-02 -1.9999310374259949e-01 + <_> + + 0 -1 2449 -1.8934230320155621e-03 + + -2.3373599350452423e-01 5.6464370340108871e-02 + <_> + + 0 -1 2450 4.4724289327859879e-03 + + 4.7042880207300186e-02 -3.1258741021156311e-01 + <_> + + 0 -1 2451 1.5810050535947084e-04 + + -1.3098309934139252e-01 1.0137090086936951e-01 + <_> + + 0 -1 2452 1.8755989149212837e-02 + + -3.8183789700269699e-02 3.7149110436439514e-01 + <_> + + 0 -1 2453 -7.4876967119053006e-04 + + 1.9981959462165833e-01 -6.0278389602899551e-02 + <_> + + 0 -1 2454 -9.3861011555418372e-04 + + 8.7467707693576813e-02 -1.6001270711421967e-01 + <_> + + 0 -1 2455 -1.3442989438772202e-03 + + -3.3072051405906677e-01 3.6564111709594727e-02 + <_> + + 0 -1 2456 -1.1384190293028951e-03 + + -2.0630060136318207e-01 5.6614480912685394e-02 + <_> + + 0 -1 2457 2.5966269895434380e-03 + + -6.2676019966602325e-02 1.9195850193500519e-01 + <_> + + 0 -1 2458 1.2499650474637747e-03 + + 5.7390280067920685e-02 -1.9605259597301483e-01 + <_> + + 0 -1 2459 1.1832700110971928e-03 + + -8.5788756608963013e-02 1.3682979345321655e-01 + <_> + + 0 -1 2460 -5.1836138591170311e-03 + + 3.1635698676109314e-01 -4.6736460179090500e-02 + <_> + + 0 -1 2461 -1.3185790181159973e-01 + + -6.2279629707336426e-01 1.8798090517520905e-02 + <_> + + 0 -1 2462 1.8653980223461986e-03 + + 3.8837268948554993e-02 -3.0104321241378784e-01 + <_> + + 0 -1 2463 7.3482480365782976e-04 + + -7.6612047851085663e-02 1.5002079308032990e-01 + <_> + + 0 -1 2464 -1.5738410002086312e-04 + + -1.6588360071182251e-01 7.0020452141761780e-02 + <_> + + 0 -1 2465 5.1779212662950158e-04 + + 7.4801079928874969e-02 -1.6358199715614319e-01 + <_> + + 0 -1 2466 7.5904270634055138e-03 + + -5.1050990819931030e-02 2.4487720429897308e-01 + <_> + + 0 -1 2467 -1.1010250076651573e-02 + + -5.8380401134490967e-01 2.0622009411454201e-02 + <_> + + 0 -1 2468 1.1621849983930588e-01 + + 2.5175059214234352e-02 -4.1262671351432800e-01 + <_> + + 0 -1 2469 -7.4468040838837624e-04 + + 1.2729789316654205e-01 -8.9675500988960266e-02 + <_> + + 0 -1 2470 1.1765309609472752e-02 + + 2.0906679332256317e-02 -5.3172761201858521e-01 + <_> + + 0 -1 2471 -4.4441698119044304e-03 + + 1.4282639324665070e-01 -7.8762412071228027e-02 + <_> + + 0 -1 2472 -4.3369788909330964e-04 + + -2.2131459414958954e-01 5.4554950445890427e-02 + <_> + + 0 -1 2473 -1.9204010022804141e-03 + + -2.5610721111297607e-01 4.0600918233394623e-02 + <_> + + 0 -1 2474 -2.9081690590828657e-03 + + 2.0206320285797119e-01 -5.6222829967737198e-02 + <_> + + 0 -1 2475 -1.4549949810316321e-05 + + 9.0000502765178680e-02 -1.1770520359277725e-01 + <_> + + 0 -1 2476 -5.3217669483274221e-04 + + -1.5299430489540100e-01 6.8925492465496063e-02 + <_> + + 0 -1 2477 -1.4590179547667503e-02 + + 2.1776519715785980e-01 -5.1850430667400360e-02 + <_> + + 0 -1 2478 -4.0213059401139617e-04 + + 9.4017893075942993e-02 -1.1027640104293823e-01 + <_> + + 0 -1 2479 -2.3089889436960220e-03 + + 2.4792349338531494e-01 -5.7857040315866470e-02 + <_> + + 0 -1 2480 3.1196139752864838e-04 + + -1.4021940529346466e-01 7.7247492969036102e-02 + <_> + + 0 -1 2481 -9.1317007318139076e-03 + + 4.0242809057235718e-01 -2.8953509405255318e-02 + <_> + + 0 -1 2482 4.2655199649743736e-04 + + 5.3114388138055801e-02 -2.1355339884757996e-01 + <_> + + 0 -1 2483 3.9956220425665379e-03 + + 4.4066920876502991e-02 -2.2994419932365417e-01 + <_> + + 0 -1 2484 -1.4012040337547660e-03 + + 2.7106899023056030e-01 -4.5171830803155899e-02 + <_> + + 0 -1 2485 3.6064770072698593e-02 + + 3.3628080040216446e-02 -3.2830131053924561e-01 + <_> + + 0 -1 2486 -1.3408949598670006e-04 + + -1.3888040184974670e-01 8.0078050494194031e-02 + <_> + + 0 -1 2487 -6.9480319507420063e-03 + + -3.9315450191497803e-01 2.7302930131554604e-02 + <_> + + 0 -1 2488 -1.4855440240353346e-03 + + 1.9761669635772705e-01 -5.1562070846557617e-02 + <_> + + 0 -1 2489 -1.3757539913058281e-02 + + -5.5620980262756348e-01 1.8301570788025856e-02 + <_> + + 0 -1 2490 8.4021147340536118e-03 + + 1.3690480031073093e-02 -6.3171321153640747e-01 + <_> + + 0 -1 2491 -1.7845979891717434e-04 + + -1.4535990357398987e-01 6.3921131193637848e-02 + <_> + + 0 -1 2492 -1.1326850391924381e-02 + + 6.5870612859725952e-01 -1.6460629180073738e-02 + <_> + + 0 -1 2493 1.5268150018528104e-03 + + -6.0389541089534760e-02 1.5454010665416718e-01 + <_> + + 0 -1 2494 -6.0069989413022995e-03 + + 2.5859731435775757e-01 -4.9466971307992935e-02 + <_> + + 0 -1 2495 -7.4241221882402897e-03 + + -3.8806110620498657e-01 2.9393190518021584e-02 + <_> + + 0 -1 2496 -3.9992430247366428e-03 + + -1.3788199424743652e-01 7.7991880476474762e-02 + <_> + + 0 -1 2497 1.0202969860984012e-04 + + 7.2710737586021423e-02 -1.7032580077648163e-01 + <_> + + 0 -1 2498 4.0135599556379020e-04 + + -9.2788018286228180e-02 1.2305440008640289e-01 + <_> + + 0 -1 2499 -9.7611807286739349e-03 + + -3.6630520224571228e-01 2.9748899862170219e-02 + <_> + + 0 -1 2500 -3.0745539069175720e-01 + + -7.8651821613311768e-01 1.3058690354228020e-02 + <_> + + 0 -1 2501 -6.0231718234717846e-03 + + -5.0900238752365112e-01 1.8171619623899460e-02 + <_> + + 0 -1 2502 -2.3784159566275775e-04 + + -9.9822521209716797e-02 1.0530869662761688e-01 + <_> + + 0 -1 2503 1.3516229810193181e-03 + + -6.6444016993045807e-02 1.5425109863281250e-01 + <_> + + 0 -1 2504 -1.6924949595704675e-03 + + -4.4133850932121277e-01 2.5100700557231903e-02 + <_> + + 0 -1 2505 1.0610929457470775e-03 + + -6.0577899217605591e-02 1.7217910289764404e-01 + <_> + + 0 -1 2506 5.6644581491127610e-04 + + -7.8687779605388641e-02 1.6784669458866119e-01 + <_> + + 0 -1 2507 -1.3955390080809593e-02 + + -5.7841098308563232e-01 1.9087139517068863e-02 + <_> + + 0 -1 2508 -1.8862909637391567e-03 + + 6.2118150293827057e-02 -1.6523399949073792e-01 + <_> + + 0 -1 2509 1.6784170642495155e-02 + + -3.0380919575691223e-02 3.6105319857597351e-01 + <_> + + 0 -1 2510 -1.4158519661577884e-05 + + 7.2182632982730865e-02 -1.4407490193843842e-01 + <_> + + 0 -1 2511 7.3750452138483524e-03 + + 2.9791580513119698e-02 -2.9277870059013367e-01 + <_> + + 0 -1 2512 8.0517530441284180e-03 + + -4.4681299477815628e-02 2.1760399639606476e-01 + <_> + + 0 -1 2513 -7.9519696533679962e-02 + + -6.5208691358566284e-01 1.4618909917771816e-02 + <_> + + 0 -1 2514 1.2065700255334377e-02 + + 2.9202880337834358e-02 -2.9454120993614197e-01 + <_> + + 0 -1 2515 -1.0122699663043022e-02 + + 2.7746239304542542e-01 -4.3713569641113281e-02 + <_> + + 0 -1 2516 -1.8515810370445251e-01 + + -4.6136859059333801e-01 2.4093240499496460e-02 + <_> + + 0 -1 2517 -8.0726131796836853e-02 + + -4.4673430919647217e-01 2.0845459774136543e-02 + <_> + + 0 -1 2518 1.5173270367085934e-03 + + -5.1575969904661179e-02 1.8063379824161530e-01 + <_> + + 0 -1 2519 -1.1184819974005222e-02 + + -3.5373958945274353e-01 2.7059540152549744e-02 + <_> + + 0 -1 2520 -3.5008399281650782e-03 + + 2.0548710227012634e-01 -4.6032059937715530e-02 + <_> + + 0 -1 2521 1.4720410108566284e-03 + + -6.3871711492538452e-02 1.8168300390243530e-01 + <_> + + 0 -1 2522 -4.5021830010227859e-04 + + -1.6353920102119446e-01 5.9327740222215652e-02 + <_> + + 0 -1 2523 6.1653478769585490e-04 + + 6.9089323282241821e-02 -1.9156040251255035e-01 + <_> + + 0 -1 2524 1.4797239564359188e-03 + + -5.2241999655961990e-02 1.8631340563297272e-01 + <_> + + 0 -1 2525 -1.4754989933862817e-05 + + 7.3586143553256989e-02 -1.5092320740222931e-01 + <_> + + 0 -1 2526 8.6423632455989718e-04 + + 6.6930077970027924e-02 -1.3976100087165833e-01 + <_> + + 0 -1 2527 -4.1005611419677734e-03 + + 2.0946699380874634e-01 -4.7175008803606033e-02 + <_> + + 0 -1 2528 -2.1505339536815882e-03 + + -5.2753841876983643e-01 1.7665250226855278e-02 + <_> + + 0 -1 2529 7.8334724530577660e-03 + + -4.5125011354684830e-02 2.0374919474124908e-01 + <_> + + 0 -1 2530 -3.2690390944480896e-03 + + -1.3836699724197388e-01 7.0653162896633148e-02 + <_> + + 0 -1 2531 3.9274748414754868e-03 + + 6.8428598344326019e-02 -1.6210170090198517e-01 + <_> + + 0 -1 2532 7.6534547843039036e-03 + + -9.3162156641483307e-02 9.9912680685520172e-02 + <_> + + 0 -1 2533 -3.2620150595903397e-02 + + 3.5453549027442932e-01 -3.0765339732170105e-02 + <_> + + 0 -1 2534 -1.8247209489345551e-02 + + -3.8171181082725525e-01 2.7764180675148964e-02 + <_> + + 0 -1 2535 -8.0104079097509384e-04 + + -1.4329099655151367e-01 6.4936630427837372e-02 + <_> + + 0 -1 2536 -1.0993109643459320e-01 + + 8.7319427728652954e-01 -1.1242670007050037e-02 + <_> + + 0 -1 2537 -3.0508199706673622e-02 + + -6.1269849538803101e-01 1.9372699782252312e-02 + <_> + + 0 -1 2538 -1.9187819212675095e-02 + + 2.8533020615577698e-01 -3.6832328885793686e-02 + <_> + + 0 -1 2539 2.3266570642590523e-03 + + 4.7289360314607620e-02 -2.1252959966659546e-01 + <_> + + 0 -1 2540 -1.4535760274156928e-03 + + 1.3778920471668243e-01 -7.4501492083072662e-02 + <_> + + 0 -1 2541 -1.0573640465736389e-03 + + -2.2186830639839172e-01 4.2039170861244202e-02 + <_> + + 0 -1 2542 1.7203199677169323e-03 + + -6.9299750030040741e-02 1.3794890046119690e-01 + <_> + + 0 -1 2543 -1.4716150471940637e-03 + + 2.4296709895133972e-01 -4.0795009583234787e-02 + <_> + + 0 -1 2544 -5.2822660654783249e-03 + + -3.1959480047225952e-01 3.4215260297060013e-02 + <_> + + 0 -1 2545 -4.7165742143988609e-03 + + 3.0581191182136536e-01 -3.1772918999195099e-02 + <_> + + 0 -1 2546 7.3668370023369789e-03 + + 6.1085078865289688e-02 -1.6390019655227661e-01 + <_> + + 0 -1 2547 -7.6594999991357327e-03 + + -4.6472349762916565e-01 1.8869750201702118e-02 + <_> + + 0 -1 2548 7.6969028450548649e-03 + + -1.8191590905189514e-02 5.5395811796188354e-01 + <_> + + 0 -1 2549 -5.6195858633145690e-04 + + 9.7618483006954193e-02 -1.0844089835882187e-01 + <_> + + 0 -1 2550 -1.4587530131393578e-05 + + 7.4585132300853729e-02 -1.2353610247373581e-01 + <_> + + 0 -1 2551 -9.5779378898441792e-04 + + 1.6370140016078949e-01 -5.8610081672668457e-02 + <_> + + 0 -1 2552 8.0253500491380692e-03 + + 2.6857670396566391e-02 -4.1507768630981445e-01 + <_> + + 0 -1 2553 1.6938529442995787e-03 + + 4.8536270856857300e-02 -1.7888469994068146e-01 + <_> + + 0 -1 2554 -4.3334178626537323e-03 + + 1.9798220694065094e-01 -4.8085059970617294e-02 + <_> + + 0 -1 2555 -2.2440029715653509e-04 + + -1.5113249421119690e-01 6.0428649187088013e-02 + <_> + + 0 -1 2556 -1.1392509564757347e-02 + + 3.2737928628921509e-01 -2.9751259833574295e-02 + <_> + + 0 -1 2557 -9.3984175473451614e-03 + + -1.2912990152835846e-01 7.6302282512187958e-02 + <_> + + 0 -1 2558 8.7430170970037580e-04 + + -9.7556166350841522e-02 9.7808010876178741e-02 + <_> + + 0 -1 2559 7.5171617791056633e-03 + + 6.5084353089332581e-02 -1.5419410169124603e-01 + <_> + + 0 -1 2560 -2.7937069535255432e-03 + + 1.5009529888629913e-01 -6.3355393707752228e-02 + <_> + + 0 -1 2561 -3.4385098842903972e-04 + + 1.2404289841651917e-01 -7.5780630111694336e-02 + <_> + + 0 -1 2562 8.7557926774024963e-02 + + -1.5905940905213356e-02 5.6607347726821899e-01 + <_> + + 0 -1 2563 -9.3594435602426529e-03 + + -3.3039200305938721e-01 3.0874710530042648e-02 + <_> + + 0 -1 2564 -6.7703737877309322e-03 + + 1.7960870265960693e-01 -5.1310319453477859e-02 + <_> + + 0 -1 2565 -6.2513751909136772e-03 + + -5.7952338457107544e-01 1.5425769612193108e-02 + <_> + + 0 -1 2566 -2.5206409394741058e-02 + + -6.3777071237564087e-01 1.3051119633018970e-02 + <_> + + 0 -1 2567 -1.1819769861176610e-03 + + -2.0478110015392303e-01 4.0494531393051147e-02 + <_> + + 0 -1 2568 -1.0458839824423194e-03 + + 1.4812879264354706e-01 -6.2631592154502869e-02 + <_> + + 0 -1 2569 -2.5445020291954279e-03 + + 1.3021010160446167e-01 -6.9430023431777954e-02 + <_> + + 0 -1 2570 -8.0673627555370331e-02 + + -2.8054219484329224e-01 3.8956280797719955e-02 + <_> + + 0 -1 2571 -1.4390920114237815e-04 + + 1.0780519992113113e-01 -9.6550762653350830e-02 + <_> + + 0 -1 2572 7.6481432188302279e-04 + + 6.0667239129543304e-02 -1.5742610394954681e-01 + <_> + + 0 -1 2573 -3.4516688901931047e-04 + + 1.1415769904851913e-01 -8.8832370936870575e-02 + <_> + + 0 -1 2574 -2.2118249908089638e-03 + + 2.2988039255142212e-01 -5.0498738884925842e-02 + <_> + + 0 -1 2575 9.4616543501615524e-03 + + 1.9827060401439667e-02 -5.0633531808853149e-01 + <_> + + 0 -1 2576 1.0567939607426524e-03 + + 3.8744639605283737e-02 -2.3509359359741211e-01 + <_> + + 0 -1 2577 2.9194469098001719e-03 + + -6.1895478516817093e-02 1.5313319861888885e-01 + <_> + + 0 -1 2578 -1.0768010281026363e-02 + + -5.5298101902008057e-01 1.7847239971160889e-02 + <_> + + 0 -1 2579 -1.0197740048170090e-03 + + 1.1559300124645233e-01 -8.0185852944850922e-02 + <_> + + 0 -1 2580 1.8127029761672020e-04 + + 5.6652870029211044e-02 -1.6549369692802429e-01 + <_> + + 0 -1 2581 7.1620188464294188e-06 + + -9.1480091214179993e-02 9.7915090620517731e-02 + <_> + + 0 -1 2582 5.2910070866346359e-02 + + -1.3591200113296509e-02 6.6090220212936401e-01 + <_> + + 0 -1 2583 4.0185371041297913e-01 + + 1.9574489444494247e-02 -4.9015858769416809e-01 + <_> + + 0 -1 2584 -1.7914770171046257e-02 + + -8.8317036628723145e-02 1.0532960295677185e-01 + <_> + + 0 -1 2585 -1.4578569789591711e-05 + + 7.8513152897357941e-02 -1.2300349771976471e-01 + <_> + + 0 -1 2586 6.4994548447430134e-03 + + -4.0843468159437180e-02 2.9337158799171448e-01 + <_> + + 0 -1 2587 9.5762982964515686e-02 + + 1.9332479685544968e-02 -5.3444057703018188e-01 + <_> + + 0 -1 2588 1.4263469893194269e-05 + + -8.8897533714771271e-02 1.0632789880037308e-01 + <_> + + 0 -1 2589 2.2215039934962988e-03 + + -4.0777951478958130e-02 2.6405128836631775e-01 + <_> + + 0 -1 2590 3.1875250861048698e-03 + + 5.9725038707256317e-02 -1.6202959418296814e-01 + <_> + + 0 -1 2591 9.6069589257240295e-02 + + 1.1318460106849670e-02 -7.9110687971115112e-01 + <_> + + 0 -1 2592 1.9584870897233486e-03 + + -3.9252020418643951e-02 2.3639929294586182e-01 + <_> + + 0 -1 2593 -1.8468469381332397e-01 + + -5.8974397182464600e-01 1.5758410096168518e-02 + <_> + + 0 -1 2594 2.1685050160158426e-04 + + 4.6320449560880661e-02 -1.8274679780006409e-01 + <_> + + 0 -1 2595 1.8809709697961807e-02 + + -4.3357118964195251e-02 2.7832600474357605e-01 + <_> + + 0 -1 2596 -6.2639699317514896e-03 + + -1.3891190290451050e-01 7.7115900814533234e-02 + <_> + + 0 -1 2597 3.2622940489090979e-04 + + -9.1803021728992462e-02 1.0588289797306061e-01 + <_> + + 0 -1 2598 5.3745559416711330e-03 + + 1.0803489945828915e-02 -7.6716458797454834e-01 + <_> + + 0 -1 2599 2.8126770630478859e-03 + + -5.9618860483169556e-02 1.6133050620555878e-01 + <_> + + 0 -1 2600 -6.5314618404954672e-04 + + -8.5690811276435852e-02 1.1540769785642624e-01 + <_> + + 0 -1 2601 -1.7845110269263387e-03 + + 8.1831991672515869e-02 -1.2700800597667694e-01 + <_> + + 0 -1 2602 3.0969830695539713e-03 + + 6.8366639316082001e-02 -1.4475439488887787e-01 + <_> + + 0 -1 2603 -4.1442047804594040e-03 + + 1.8632030487060547e-01 -5.4030310362577438e-02 + <_> + + 0 -1 2604 -4.9972519278526306e-02 + + -1.2800359725952148e-01 8.5049159824848175e-02 + <_> + + 0 -1 2605 -1.0743910446763039e-02 + + 1.3701729476451874e-01 -7.7366456389427185e-02 + <_> + + 0 -1 2606 -3.0474149389192462e-04 + + -1.6938340663909912e-01 5.7971168309450150e-02 + <_> + + 0 -1 2607 3.6023318767547607e-02 + + 1.3561300002038479e-02 -6.3279747962951660e-01 + <_> + + 0 -1 2608 2.5479190517216921e-03 + + -4.3824359774589539e-02 2.2150419652462006e-01 + + <_> + + <_> + 8 7 2 6 -1. + <_> + 8 10 2 3 2. + <_> + + <_> + 8 3 10 7 -1. + <_> + 13 3 5 7 2. + <_> + + <_> + 10 11 3 6 -1. + <_> + 10 14 3 3 2. + <_> + + <_> + 10 4 8 8 -1. + <_> + 14 4 4 8 2. + <_> + + <_> + 5 7 5 4 -1. + <_> + 5 9 5 2 2. + <_> + + <_> + 8 4 6 6 -1. + <_> + 8 4 3 3 2. + <_> + 11 7 3 3 2. + <_> + + <_> + 10 14 5 2 -1. + <_> + 10 15 5 1 2. + <_> + + <_> + 7 11 8 4 -1. + <_> + 7 13 8 2 2. + <_> + + <_> + 11 14 3 3 -1. + <_> + 11 15 3 1 3. + <_> + + <_> + 3 5 3 11 -1. + <_> + 4 5 1 11 3. + <_> + + <_> + 8 7 9 6 -1. + <_> + 8 10 9 3 2. + <_> + + <_> + 13 12 1 2 -1. + <_> + 13 13 1 1 2. + <_> + + <_> + 1 3 6 17 -1. + <_> + 4 3 3 17 2. + <_> + + <_> + 11 12 1 3 -1. + <_> + 11 13 1 1 3. + <_> + + <_> + 1 9 6 9 -1. + <_> + 4 9 3 9 2. + <_> + + <_> + 10 5 8 6 -1. + <_> + 14 5 4 6 2. + <_> + + <_> + 7 8 9 6 -1. + <_> + 7 10 9 2 3. + <_> + + <_> + 5 8 6 6 -1. + <_> + 5 8 3 3 2. + <_> + 8 11 3 3 2. + <_> + + <_> + 2 0 4 18 -1. + <_> + 4 0 2 18 2. + <_> + + <_> + 10 12 3 4 -1. + <_> + 10 14 3 2 2. + <_> + + <_> + 7 0 3 9 -1. + <_> + 7 3 3 3 3. + <_> + + <_> + 11 13 1 3 -1. + <_> + 11 14 1 1 3. + <_> + + <_> + 4 8 5 2 -1. + <_> + 4 9 5 1 2. + <_> + + <_> + 11 13 2 3 -1. + <_> + 11 14 2 1 3. + <_> + + <_> + 12 12 1 3 -1. + <_> + 12 13 1 1 3. + <_> + + <_> + 9 12 2 8 -1. + <_> + 9 16 2 4 2. + <_> + + <_> + 6 3 4 13 -1. + <_> + 8 3 2 13 2. + <_> + + <_> + 2 6 4 12 -1. + <_> + 4 6 2 12 2. + <_> + + <_> + 11 13 3 2 -1. + <_> + 12 13 1 2 3. + <_> + + <_> + 3 5 3 11 -1. + <_> + 4 5 1 11 3. + <_> + + <_> + 3 6 13 12 -1. + <_> + 3 12 13 6 2. + <_> + + <_> + 7 7 6 6 -1. + <_> + 7 7 3 3 2. + <_> + 10 10 3 3 2. + <_> + + <_> + 4 7 3 2 -1. + <_> + 5 7 1 2 3. + <_> + + <_> + 5 4 14 3 -1. + <_> + 12 4 7 3 2. + <_> + + <_> + 10 12 3 2 -1. + <_> + 11 12 1 2 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 12 14 1 3 -1. + <_> + 12 15 1 1 3. + <_> + + <_> + 3 6 3 3 -1. + <_> + 4 6 1 3 3. + <_> + + <_> + 8 4 3 2 -1. + <_> + 9 4 1 2 3. + <_> + + <_> + 3 3 3 13 -1. + <_> + 4 3 1 13 3. + <_> + + <_> + 15 4 2 3 -1. + <_> + 15 5 2 1 3. + <_> + + <_> + 12 8 4 4 -1. + <_> + 12 10 4 2 2. + <_> + + <_> + 8 7 8 9 -1. + <_> + 8 10 8 3 3. + <_> + + <_> + 8 0 12 6 -1. + <_> + 8 0 6 3 2. + <_> + 14 3 6 3 2. + <_> + + <_> + 5 9 3 6 -1. + <_> + 5 12 3 3 2. + <_> + + <_> + 11 12 2 4 -1. + <_> + 12 12 1 4 2. + <_> + + <_> + 10 11 3 8 -1. + <_> + 11 11 1 8 3. + <_> + + <_> + 5 5 5 6 -1. + <_> + 5 7 5 2 3. + <_> + + <_> + 10 13 2 6 -1. + <_> + 10 16 2 3 2. + <_> + + <_> + 10 15 3 4 -1. + <_> + 11 15 1 4 3. + <_> + + <_> + 7 3 3 3 -1. + <_> + 8 3 1 3 3. + <_> + + <_> + 5 8 6 2 -1. + <_> + 8 8 3 2 2. + <_> + + <_> + 8 7 4 2 -1. + <_> + 10 7 2 2 2. + <_> + + <_> + 5 6 2 3 -1. + <_> + 6 6 1 3 2. + <_> + + <_> + 8 0 3 8 -1. + <_> + 9 0 1 8 3. + <_> + + <_> + 5 10 3 8 -1. + <_> + 5 14 3 4 2. + <_> + + <_> + 12 3 3 2 -1. + <_> + 13 3 1 2 3. + <_> + + <_> + 8 2 3 4 -1. + <_> + 9 2 1 4 3. + <_> + + <_> + 14 10 1 8 -1. + <_> + 14 14 1 4 2. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 9 12 3 2 -1. + <_> + 10 12 1 2 3. + <_> + + <_> + 12 2 1 12 -1. + <_> + 12 6 1 4 3. + <_> + + <_> + 2 8 14 6 -1. + <_> + 2 8 7 3 2. + <_> + 9 11 7 3 2. + <_> + + <_> + 11 3 3 17 -1. + <_> + 12 3 1 17 3. + <_> + + <_> + 12 12 1 2 -1. + <_> + 12 13 1 1 2. + <_> + + <_> + 13 1 2 1 -1. + <_> + 14 1 1 1 2. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 12 12 2 3 -1. + <_> + 12 13 2 1 3. + <_> + + <_> + 8 2 10 10 -1. + <_> + 13 2 5 10 2. + <_> + + <_> + 11 13 3 1 -1. + <_> + 12 13 1 1 3. + <_> + + <_> + 12 10 1 4 -1. + <_> + 12 12 1 2 2. + <_> + + <_> + 8 7 2 6 -1. + <_> + 8 10 2 3 2. + <_> + + <_> + 12 11 1 3 -1. + <_> + 12 12 1 1 3. + <_> + + <_> + 9 12 3 3 -1. + <_> + 10 12 1 3 3. + <_> + + <_> + 6 0 8 6 -1. + <_> + 6 3 8 3 2. + <_> + + <_> + 0 0 8 19 -1. + <_> + 4 0 4 19 2. + <_> + + <_> + 5 6 4 9 -1. + <_> + 5 9 4 3 3. + <_> + + <_> + 13 14 1 2 -1. + <_> + 13 15 1 1 2. + <_> + + <_> + 1 3 8 15 -1. + <_> + 5 3 4 15 2. + <_> + + <_> + 13 14 2 3 -1. + <_> + 13 15 2 1 3. + <_> + + <_> + 5 7 3 2 -1. + <_> + 6 7 1 2 3. + <_> + + <_> + 8 5 3 1 -1. + <_> + 9 5 1 1 3. + <_> + + <_> + 9 5 3 1 -1. + <_> + 10 5 1 1 3. + <_> + + <_> + 6 11 1 3 -1. + <_> + 6 12 1 1 3. + <_> + + <_> + 18 4 1 2 -1. + <_> + 18 5 1 1 2. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 10 10 3 4 -1. + <_> + 11 10 1 4 3. + <_> + + <_> + 6 5 2 14 -1. + <_> + 6 12 2 7 2. + <_> + + <_> + 14 8 3 4 -1. + <_> + 14 10 3 2 2. + <_> + + <_> + 4 5 3 6 -1. + <_> + 4 7 3 2 3. + <_> + + <_> + 5 10 2 8 -1. + <_> + 5 14 2 4 2. + <_> + + <_> + 9 1 3 2 -1. + <_> + 10 1 1 2 3. + <_> + + <_> + 10 1 3 3 -1. + <_> + 11 1 1 3 3. + <_> + + <_> + 9 12 8 8 -1. + <_> + 9 12 4 4 2. + <_> + 13 16 4 4 2. + <_> + + <_> + 8 13 6 4 -1. + <_> + 10 13 2 4 3. + <_> + + <_> + 3 6 3 12 -1. + <_> + 4 6 1 12 3. + <_> + + <_> + 9 3 8 5 -1. + <_> + 13 3 4 5 2. + <_> + + <_> + 7 7 3 6 -1. + <_> + 7 10 3 3 2. + <_> + + <_> + 5 10 10 4 -1. + <_> + 5 12 10 2 2. + <_> + + <_> + 11 12 1 6 -1. + <_> + 11 15 1 3 2. + <_> + + <_> + 5 8 6 2 -1. + <_> + 8 8 3 2 2. + <_> + + <_> + 2 0 8 4 -1. + <_> + 2 0 4 2 2. + <_> + 6 2 4 2 2. + <_> + + <_> + 11 7 3 5 -1. + <_> + 12 7 1 5 3. + <_> + + <_> + 12 13 2 3 -1. + <_> + 12 14 2 1 3. + <_> + + <_> + 12 12 1 2 -1. + <_> + 12 13 1 1 2. + <_> + + <_> + 5 11 6 3 -1. + <_> + 8 11 3 3 2. + <_> + + <_> + 2 6 3 9 -1. + <_> + 3 6 1 9 3. + <_> + + <_> + 12 12 1 3 -1. + <_> + 12 13 1 1 3. + <_> + + <_> + 5 8 4 2 -1. + <_> + 5 9 4 1 2. + <_> + + <_> + 3 8 3 7 -1. + <_> + 4 8 1 7 3. + <_> + + <_> + 1 3 6 15 -1. + <_> + 3 3 2 15 3. + <_> + + <_> + 12 14 4 3 -1. + <_> + 12 15 4 1 3. + <_> + + <_> + 9 0 2 20 -1. + <_> + 9 0 1 10 2. + <_> + 10 10 1 10 2. + <_> + + <_> + 6 12 3 3 -1. + <_> + 6 13 3 1 3. + <_> + + <_> + 5 7 3 10 -1. + <_> + 5 12 3 5 2. + <_> + + <_> + 8 5 2 1 -1. + <_> + 9 5 1 1 2. + <_> + + <_> + 5 12 3 3 -1. + <_> + 5 13 3 1 3. + <_> + + <_> + 15 5 4 2 -1. + <_> + 15 6 4 1 2. + <_> + + <_> + 15 5 3 2 -1. + <_> + 15 6 3 1 2. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 6 5 4 12 -1. + <_> + 8 5 2 12 2. + <_> + + <_> + 7 4 3 3 -1. + <_> + 8 4 1 3 3. + <_> + + <_> + 5 6 2 3 -1. + <_> + 6 6 1 3 2. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 12 10 2 1 -1. + <_> + 13 10 1 1 2. + <_> + + <_> + 10 13 5 2 -1. + <_> + 10 14 5 1 2. + <_> + + <_> + 11 13 1 3 -1. + <_> + 11 14 1 1 3. + <_> + + <_> + 7 2 3 6 -1. + <_> + 7 4 3 2 3. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 12 14 2 3 -1. + <_> + 12 15 2 1 3. + <_> + + <_> + 8 5 3 3 -1. + <_> + 8 6 3 1 3. + <_> + + <_> + 7 6 9 10 -1. + <_> + 7 11 9 5 2. + <_> + + <_> + 0 18 18 2 -1. + <_> + 6 18 6 2 3. + <_> + + <_> + 0 5 1 8 -1. + <_> + 0 9 1 4 2. + <_> + + <_> + 1 3 8 10 -1. + <_> + 1 8 8 5 2. + <_> + + <_> + 9 12 6 2 -1. + <_> + 9 13 6 1 2. + <_> + + <_> + 9 6 2 3 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 9 4 3 3 -1. + <_> + 10 4 1 3 3. + <_> + + <_> + 13 13 1 3 -1. + <_> + 13 14 1 1 3. + <_> + + <_> + 2 6 13 3 -1. + <_> + 2 7 13 1 3. + <_> + + <_> + 10 15 2 4 -1. + <_> + 11 15 1 4 2. + <_> + + <_> + 7 7 2 3 -1. + <_> + 8 7 1 3 2. + <_> + + <_> + 3 6 12 8 -1. + <_> + 3 6 6 4 2. + <_> + 9 10 6 4 2. + <_> + + <_> + 12 0 8 4 -1. + <_> + 12 0 4 2 2. + <_> + 16 2 4 2 2. + <_> + + <_> + 9 15 3 3 -1. + <_> + 10 15 1 3 3. + <_> + + <_> + 10 14 1 2 -1. + <_> + 10 15 1 1 2. + <_> + + <_> + 6 11 5 6 -1. + <_> + 6 14 5 3 2. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 5 6 3 4 -1. + <_> + 6 6 1 4 3. + <_> + + <_> + 9 6 6 4 -1. + <_> + 11 6 2 4 3. + <_> + + <_> + 6 5 12 6 -1. + <_> + 6 7 12 2 3. + <_> + + <_> + 3 1 16 7 -1. + <_> + 11 1 8 7 2. + <_> + + <_> + 12 11 1 6 -1. + <_> + 12 14 1 3 2. + <_> + + <_> + 6 6 9 8 -1. + <_> + 6 10 9 4 2. + <_> + + <_> + 5 9 4 6 -1. + <_> + 5 12 4 3 2. + <_> + + <_> + 1 0 6 14 -1. + <_> + 4 0 3 14 2. + <_> + + <_> + 8 1 1 9 -1. + <_> + 8 4 1 3 3. + <_> + + <_> + 11 13 2 2 -1. + <_> + 11 14 2 1 2. + <_> + + <_> + 2 7 4 13 -1. + <_> + 4 7 2 13 2. + <_> + + <_> + 5 8 6 6 -1. + <_> + 8 8 3 6 2. + <_> + + <_> + 18 0 2 20 -1. + <_> + 19 0 1 20 2. + <_> + + <_> + 6 7 3 3 -1. + <_> + 7 7 1 3 3. + <_> + + <_> + 13 10 1 4 -1. + <_> + 13 12 1 2 2. + <_> + + <_> + 12 11 2 2 -1. + <_> + 12 12 2 1 2. + <_> + + <_> + 3 6 12 6 -1. + <_> + 3 6 6 3 2. + <_> + 9 9 6 3 2. + <_> + + <_> + 10 13 2 2 -1. + <_> + 10 14 2 1 2. + <_> + + <_> + 6 13 2 3 -1. + <_> + 6 14 2 1 3. + <_> + + <_> + 13 5 1 3 -1. + <_> + 13 6 1 1 3. + <_> + + <_> + 6 14 3 3 -1. + <_> + 6 15 3 1 3. + <_> + + <_> + 5 15 3 3 -1. + <_> + 5 16 3 1 3. + <_> + + <_> + 15 3 1 3 -1. + <_> + 15 4 1 1 3. + <_> + + <_> + 3 8 3 12 -1. + <_> + 4 8 1 12 3. + <_> + + <_> + 3 4 3 14 -1. + <_> + 4 4 1 14 3. + <_> + + <_> + 6 11 6 2 -1. + <_> + 9 11 3 2 2. + <_> + + <_> + 4 8 8 4 -1. + <_> + 8 8 4 4 2. + <_> + + <_> + 4 5 2 4 -1. + <_> + 5 5 1 4 2. + <_> + + <_> + 7 3 2 1 -1. + <_> + 8 3 1 1 2. + <_> + + <_> + 12 16 2 3 -1. + <_> + 12 17 2 1 3. + <_> + + <_> + 3 16 6 3 -1. + <_> + 3 17 6 1 3. + <_> + + <_> + 13 4 2 1 -1. + <_> + 14 4 1 1 2. + <_> + + <_> + 9 16 4 4 -1. + <_> + 11 16 2 4 2. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 6 8 2 2 -1. + <_> + 6 9 2 1 2. + <_> + + <_> + 12 13 2 1 -1. + <_> + 13 13 1 1 2. + <_> + + <_> + 6 7 6 3 -1. + <_> + 8 7 2 3 3. + <_> + + <_> + 5 8 2 10 -1. + <_> + 5 13 2 5 2. + <_> + + <_> + 0 8 1 2 -1. + <_> + 0 9 1 1 2. + <_> + + <_> + 2 11 4 4 -1. + <_> + 4 11 2 4 2. + <_> + + <_> + 1 9 12 3 -1. + <_> + 5 9 4 3 3. + <_> + + <_> + 8 15 2 3 -1. + <_> + 9 15 1 3 2. + <_> + + <_> + 8 6 3 3 -1. + <_> + 8 7 3 1 3. + <_> + + <_> + 1 2 1 2 -1. + <_> + 1 3 1 1 2. + <_> + + <_> + 5 1 7 6 -1. + <_> + 5 3 7 2 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 13 7 3 4 -1. + <_> + 13 9 3 2 2. + <_> + + <_> + 5 10 3 3 -1. + <_> + 5 11 3 1 3. + <_> + + <_> + 7 5 3 1 -1. + <_> + 8 5 1 1 3. + <_> + + <_> + 0 0 11 16 -1. + <_> + 0 8 11 8 2. + <_> + + <_> + 7 4 3 2 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 13 5 2 2 -1. + <_> + 13 6 2 1 2. + <_> + + <_> + 8 8 2 6 -1. + <_> + 8 10 2 2 3. + <_> + + <_> + 5 6 3 4 -1. + <_> + 6 6 1 4 3. + <_> + + <_> + 10 0 10 8 -1. + <_> + 10 0 5 4 2. + <_> + 15 4 5 4 2. + <_> + + <_> + 9 7 2 12 -1. + <_> + 9 11 2 4 3. + <_> + + <_> + 6 3 12 12 -1. + <_> + 6 3 6 6 2. + <_> + 12 9 6 6 2. + <_> + + <_> + 5 7 4 6 -1. + <_> + 5 9 4 2 3. + <_> + + <_> + 5 7 10 10 -1. + <_> + 5 7 5 5 2. + <_> + 10 12 5 5 2. + <_> + + <_> + 2 1 4 15 -1. + <_> + 4 1 2 15 2. + <_> + + <_> + 12 11 2 2 -1. + <_> + 13 11 1 2 2. + <_> + + <_> + 6 11 10 6 -1. + <_> + 6 14 10 3 2. + <_> + + <_> + 5 12 4 3 -1. + <_> + 5 13 4 1 3. + <_> + + <_> + 6 12 1 3 -1. + <_> + 6 13 1 1 3. + <_> + + <_> + 3 7 12 8 -1. + <_> + 3 7 6 4 2. + <_> + 9 11 6 4 2. + <_> + + <_> + 6 2 2 6 -1. + <_> + 6 4 2 2 3. + <_> + + <_> + 11 11 5 4 -1. + <_> + 11 13 5 2 2. + <_> + + <_> + 5 8 6 6 -1. + <_> + 8 8 3 6 2. + <_> + + <_> + 5 12 4 2 -1. + <_> + 7 12 2 2 2. + <_> + + <_> + 3 13 3 7 -1. + <_> + 4 13 1 7 3. + <_> + + <_> + 11 7 5 9 -1. + <_> + 11 10 5 3 3. + <_> + + <_> + 4 3 15 9 -1. + <_> + 4 6 15 3 3. + <_> + + <_> + 15 13 2 2 -1. + <_> + 15 13 1 1 2. + <_> + 16 14 1 1 2. + <_> + + <_> + 6 5 6 13 -1. + <_> + 9 5 3 13 2. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 6 1 2 15 -1. + <_> + 6 6 2 5 3. + <_> + + <_> + 11 0 4 3 -1. + <_> + 13 0 2 3 2. + <_> + + <_> + 0 0 2 4 -1. + <_> + 0 2 2 2 2. + <_> + + <_> + 4 8 9 3 -1. + <_> + 4 9 9 1 3. + <_> + + <_> + 6 5 6 2 -1. + <_> + 8 5 2 2 3. + <_> + + <_> + 4 15 2 2 -1. + <_> + 4 15 1 1 2. + <_> + 5 16 1 1 2. + <_> + + <_> + 6 14 2 3 -1. + <_> + 6 15 2 1 3. + <_> + + <_> + 6 12 1 6 -1. + <_> + 6 15 1 3 2. + <_> + + <_> + 5 9 2 10 -1. + <_> + 5 14 2 5 2. + <_> + + <_> + 3 6 3 10 -1. + <_> + 4 6 1 10 3. + <_> + + <_> + 3 7 3 5 -1. + <_> + 4 7 1 5 3. + <_> + + <_> + 11 0 6 2 -1. + <_> + 13 0 2 2 3. + <_> + + <_> + 11 12 2 1 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 11 12 2 1 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 6 16 1 3 -1. + <_> + 6 17 1 1 3. + <_> + + <_> + 10 16 5 3 -1. + <_> + 10 17 5 1 3. + <_> + + <_> + 7 13 1 3 -1. + <_> + 7 14 1 1 3. + <_> + + <_> + 12 4 8 2 -1. + <_> + 12 5 8 1 2. + <_> + + <_> + 8 7 4 3 -1. + <_> + 10 7 2 3 2. + <_> + + <_> + 12 10 5 9 -1. + <_> + 12 13 5 3 3. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 11 0 2 4 -1. + <_> + 12 0 1 4 2. + <_> + + <_> + 5 9 10 6 -1. + <_> + 5 9 5 3 2. + <_> + 10 12 5 3 2. + <_> + + <_> + 6 12 3 3 -1. + <_> + 6 13 3 1 3. + <_> + + <_> + 1 3 6 12 -1. + <_> + 1 9 6 6 2. + <_> + + <_> + 1 5 5 10 -1. + <_> + 1 10 5 5 2. + <_> + + <_> + 10 14 1 2 -1. + <_> + 10 15 1 1 2. + <_> + + <_> + 9 5 2 8 -1. + <_> + 9 5 1 4 2. + <_> + 10 9 1 4 2. + <_> + + <_> + 17 12 3 1 -1. + <_> + 18 12 1 1 3. + <_> + + <_> + 5 16 2 3 -1. + <_> + 5 17 2 1 3. + <_> + + <_> + 11 18 7 2 -1. + <_> + 11 19 7 1 2. + <_> + + <_> + 12 6 3 8 -1. + <_> + 13 6 1 8 3. + <_> + + <_> + 11 6 6 5 -1. + <_> + 14 6 3 5 2. + <_> + + <_> + 9 7 4 6 -1. + <_> + 9 7 2 3 2. + <_> + 11 10 2 3 2. + <_> + + <_> + 10 8 6 6 -1. + <_> + 10 10 6 2 3. + <_> + + <_> + 2 1 4 17 -1. + <_> + 4 1 2 17 2. + <_> + + <_> + 7 1 9 4 -1. + <_> + 7 3 9 2 2. + <_> + + <_> + 7 6 3 4 -1. + <_> + 8 6 1 4 3. + <_> + + <_> + 5 9 8 2 -1. + <_> + 9 9 4 2 2. + <_> + + <_> + 11 12 1 4 -1. + <_> + 11 14 1 2 2. + <_> + + <_> + 13 11 1 3 -1. + <_> + 13 12 1 1 3. + <_> + + <_> + 10 19 4 1 -1. + <_> + 12 19 2 1 2. + <_> + + <_> + 5 4 10 12 -1. + <_> + 5 4 5 6 2. + <_> + 10 10 5 6 2. + <_> + + <_> + 4 6 5 6 -1. + <_> + 4 9 5 3 2. + <_> + + <_> + 5 10 4 8 -1. + <_> + 5 14 4 4 2. + <_> + + <_> + 7 5 3 3 -1. + <_> + 7 6 3 1 3. + <_> + + <_> + 7 4 2 2 -1. + <_> + 8 4 1 2 2. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 0 3 6 16 -1. + <_> + 2 3 2 16 3. + <_> + + <_> + 2 6 3 12 -1. + <_> + 3 6 1 12 3. + <_> + + <_> + 12 11 2 2 -1. + <_> + 12 12 2 1 2. + <_> + + <_> + 18 0 2 13 -1. + <_> + 19 0 1 13 2. + <_> + + <_> + 9 14 5 4 -1. + <_> + 9 16 5 2 2. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 10 14 4 3 -1. + <_> + 10 15 4 1 3. + <_> + + <_> + 12 13 1 3 -1. + <_> + 12 14 1 1 3. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 6 6 6 14 -1. + <_> + 9 6 3 14 2. + <_> + + <_> + 5 11 6 3 -1. + <_> + 8 11 3 3 2. + <_> + + <_> + 5 7 2 4 -1. + <_> + 6 7 1 4 2. + <_> + + <_> + 7 3 11 9 -1. + <_> + 7 6 11 3 3. + <_> + + <_> + 10 4 9 6 -1. + <_> + 10 6 9 2 3. + <_> + + <_> + 8 5 2 3 -1. + <_> + 8 6 2 1 3. + <_> + + <_> + 0 0 3 1 -1. + <_> + 1 0 1 1 3. + <_> + + <_> + 9 4 4 6 -1. + <_> + 9 4 2 3 2. + <_> + 11 7 2 3 2. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 6 4 3 2 -1. + <_> + 7 4 1 2 3. + <_> + + <_> + 7 14 1 3 -1. + <_> + 7 15 1 1 3. + <_> + + <_> + 1 3 1 2 -1. + <_> + 1 4 1 1 2. + <_> + + <_> + 7 16 2 3 -1. + <_> + 7 17 2 1 3. + <_> + + <_> + 19 6 1 2 -1. + <_> + 19 7 1 1 2. + <_> + + <_> + 6 15 2 3 -1. + <_> + 6 16 2 1 3. + <_> + + <_> + 11 13 1 3 -1. + <_> + 11 14 1 1 3. + <_> + + <_> + 17 10 3 1 -1. + <_> + 18 10 1 1 3. + <_> + + <_> + 10 0 6 1 -1. + <_> + 13 0 3 1 2. + <_> + + <_> + 14 0 6 4 -1. + <_> + 14 0 3 2 2. + <_> + 17 2 3 2 2. + <_> + + <_> + 12 7 4 6 -1. + <_> + 12 10 4 3 2. + <_> + + <_> + 14 5 1 2 -1. + <_> + 14 6 1 1 2. + <_> + + <_> + 6 13 4 3 -1. + <_> + 6 14 4 1 3. + <_> + + <_> + 5 12 4 3 -1. + <_> + 5 13 4 1 3. + <_> + + <_> + 9 3 2 1 -1. + <_> + 10 3 1 1 2. + <_> + + <_> + 9 3 3 3 -1. + <_> + 10 3 1 3 3. + <_> + + <_> + 9 5 3 1 -1. + <_> + 10 5 1 1 3. + <_> + + <_> + 7 8 4 3 -1. + <_> + 7 9 4 1 3. + <_> + + <_> + 1 4 1 6 -1. + <_> + 1 6 1 2 3. + <_> + + <_> + 3 2 3 11 -1. + <_> + 4 2 1 11 3. + <_> + + <_> + 3 2 3 18 -1. + <_> + 4 2 1 18 3. + <_> + + <_> + 5 12 6 2 -1. + <_> + 8 12 3 2 2. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 7 17 3 1 -1. + <_> + 8 17 1 1 3. + <_> + + <_> + 3 10 8 6 -1. + <_> + 3 13 8 3 2. + <_> + + <_> + 3 2 3 17 -1. + <_> + 4 2 1 17 3. + <_> + + <_> + 4 9 8 1 -1. + <_> + 8 9 4 1 2. + <_> + + <_> + 2 7 3 6 -1. + <_> + 3 7 1 6 3. + <_> + + <_> + 18 4 1 2 -1. + <_> + 18 5 1 1 2. + <_> + + <_> + 7 8 2 6 -1. + <_> + 7 10 2 2 3. + <_> + + <_> + 11 12 2 3 -1. + <_> + 11 13 2 1 3. + <_> + + <_> + 16 11 3 1 -1. + <_> + 17 11 1 1 3. + <_> + + <_> + 16 11 3 2 -1. + <_> + 17 11 1 2 3. + <_> + + <_> + 15 3 1 4 -1. + <_> + 15 5 1 2 2. + <_> + + <_> + 11 0 9 11 -1. + <_> + 14 0 3 11 3. + <_> + + <_> + 7 0 5 6 -1. + <_> + 7 3 5 3 2. + <_> + + <_> + 8 7 2 6 -1. + <_> + 8 10 2 3 2. + <_> + + <_> + 11 11 4 6 -1. + <_> + 11 14 4 3 2. + <_> + + <_> + 4 7 3 2 -1. + <_> + 5 7 1 2 3. + <_> + + <_> + 3 7 3 2 -1. + <_> + 4 7 1 2 3. + <_> + + <_> + 11 11 2 3 -1. + <_> + 11 12 2 1 3. + <_> + + <_> + 5 9 4 6 -1. + <_> + 5 12 4 3 2. + <_> + + <_> + 16 4 2 3 -1. + <_> + 17 4 1 3 2. + <_> + + <_> + 12 12 2 1 -1. + <_> + 13 12 1 1 2. + <_> + + <_> + 8 5 6 4 -1. + <_> + 8 5 3 2 2. + <_> + 11 7 3 2 2. + <_> + + <_> + 10 15 3 3 -1. + <_> + 11 15 1 3 3. + <_> + + <_> + 3 7 3 7 -1. + <_> + 4 7 1 7 3. + <_> + + <_> + 11 4 1 2 -1. + <_> + 11 5 1 1 2. + <_> + + <_> + 3 9 3 5 -1. + <_> + 4 9 1 5 3. + <_> + + <_> + 10 15 3 3 -1. + <_> + 11 15 1 3 3. + <_> + + <_> + 3 3 6 12 -1. + <_> + 3 9 6 6 2. + <_> + + <_> + 3 5 5 6 -1. + <_> + 3 7 5 2 3. + <_> + + <_> + 6 6 4 11 -1. + <_> + 8 6 2 11 2. + <_> + + <_> + 6 5 2 6 -1. + <_> + 7 5 1 6 2. + <_> + + <_> + 2 6 3 8 -1. + <_> + 3 6 1 8 3. + <_> + + <_> + 6 4 3 1 -1. + <_> + 7 4 1 1 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 13 14 2 3 -1. + <_> + 13 15 2 1 3. + <_> + + <_> + 10 11 2 3 -1. + <_> + 10 12 2 1 3. + <_> + + <_> + 19 5 1 3 -1. + <_> + 19 6 1 1 3. + <_> + + <_> + 5 14 5 3 -1. + <_> + 5 15 5 1 3. + <_> + + <_> + 4 10 10 4 -1. + <_> + 9 10 5 4 2. + <_> + + <_> + 12 12 2 3 -1. + <_> + 12 13 2 1 3. + <_> + + <_> + 5 13 4 3 -1. + <_> + 5 14 4 1 3. + <_> + + <_> + 6 12 3 3 -1. + <_> + 6 13 3 1 3. + <_> + + <_> + 6 15 3 2 -1. + <_> + 7 15 1 2 3. + <_> + + <_> + 4 11 8 2 -1. + <_> + 8 11 4 2 2. + <_> + + <_> + 14 3 6 8 -1. + <_> + 14 7 6 4 2. + <_> + + <_> + 8 5 12 5 -1. + <_> + 12 5 4 5 3. + <_> + + <_> + 5 14 6 2 -1. + <_> + 7 14 2 2 3. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 13 12 1 3 -1. + <_> + 13 13 1 1 3. + <_> + + <_> + 6 3 14 12 -1. + <_> + 6 3 7 6 2. + <_> + 13 9 7 6 2. + <_> + + <_> + 18 6 2 2 -1. + <_> + 18 7 2 1 2. + <_> + + <_> + 14 7 6 10 -1. + <_> + 16 7 2 10 3. + <_> + + <_> + 9 8 2 3 -1. + <_> + 9 9 2 1 3. + <_> + + <_> + 0 6 2 4 -1. + <_> + 0 8 2 2 2. + <_> + + <_> + 9 0 6 2 -1. + <_> + 11 0 2 2 3. + <_> + + <_> + 12 0 8 2 -1. + <_> + 12 0 4 1 2. + <_> + 16 1 4 1 2. + <_> + + <_> + 3 10 14 6 -1. + <_> + 3 12 14 2 3. + <_> + + <_> + 6 7 3 4 -1. + <_> + 7 7 1 4 3. + <_> + + <_> + 10 13 2 1 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 11 6 5 10 -1. + <_> + 11 11 5 5 2. + <_> + + <_> + 3 16 4 4 -1. + <_> + 3 16 2 2 2. + <_> + 5 18 2 2 2. + <_> + + <_> + 6 2 3 3 -1. + <_> + 7 2 1 3 3. + <_> + + <_> + 4 0 8 20 -1. + <_> + 4 0 4 10 2. + <_> + 8 10 4 10 2. + <_> + + <_> + 3 16 3 4 -1. + <_> + 4 16 1 4 3. + <_> + + <_> + 3 16 3 1 -1. + <_> + 4 16 1 1 3. + <_> + + <_> + 11 13 1 2 -1. + <_> + 11 14 1 1 2. + <_> + + <_> + 11 13 1 3 -1. + <_> + 11 14 1 1 3. + <_> + + <_> + 6 19 14 1 -1. + <_> + 13 19 7 1 2. + <_> + + <_> + 5 7 3 3 -1. + <_> + 6 7 1 3 3. + <_> + + <_> + 7 4 3 2 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 9 18 2 1 -1. + <_> + 10 18 1 1 2. + <_> + + <_> + 6 17 2 3 -1. + <_> + 6 18 2 1 3. + <_> + + <_> + 9 7 3 6 -1. + <_> + 9 9 3 2 3. + <_> + + <_> + 9 12 3 7 -1. + <_> + 10 12 1 7 3. + <_> + + <_> + 8 9 1 3 -1. + <_> + 8 10 1 1 3. + <_> + + <_> + 8 5 12 11 -1. + <_> + 12 5 4 11 3. + <_> + + <_> + 2 0 1 2 -1. + <_> + 2 1 1 1 2. + <_> + + <_> + 0 0 1 2 -1. + <_> + 0 1 1 1 2. + <_> + + <_> + 8 0 12 16 -1. + <_> + 12 0 4 16 3. + <_> + + <_> + 0 0 1 2 -1. + <_> + 0 1 1 1 2. + <_> + + <_> + 11 0 9 11 -1. + <_> + 14 0 3 11 3. + <_> + + <_> + 5 5 3 6 -1. + <_> + 6 5 1 6 3. + <_> + + <_> + 8 8 3 4 -1. + <_> + 8 10 3 2 2. + <_> + + <_> + 13 2 6 12 -1. + <_> + 13 8 6 6 2. + <_> + + <_> + 10 6 4 14 -1. + <_> + 10 13 4 7 2. + <_> + + <_> + 1 1 10 1 -1. + <_> + 6 1 5 1 2. + <_> + + <_> + 4 2 13 6 -1. + <_> + 4 4 13 2 3. + <_> + + <_> + 11 13 2 3 -1. + <_> + 12 13 1 3 2. + <_> + + <_> + 6 9 4 9 -1. + <_> + 6 12 4 3 3. + <_> + + <_> + 6 6 3 10 -1. + <_> + 6 11 3 5 2. + <_> + + <_> + 2 10 3 4 -1. + <_> + 3 10 1 4 3. + <_> + + <_> + 3 8 3 6 -1. + <_> + 4 8 1 6 3. + <_> + + <_> + 11 12 3 6 -1. + <_> + 12 12 1 6 3. + <_> + + <_> + 8 6 2 3 -1. + <_> + 8 7 2 1 3. + <_> + + <_> + 5 8 6 6 -1. + <_> + 5 8 3 3 2. + <_> + 8 11 3 3 2. + <_> + + <_> + 3 7 3 1 -1. + <_> + 4 7 1 1 3. + <_> + + <_> + 10 12 3 3 -1. + <_> + 10 13 3 1 3. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 10 12 4 3 -1. + <_> + 10 13 4 1 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 9 2 3 1 -1. + <_> + 10 2 1 1 3. + <_> + + <_> + 2 0 18 14 -1. + <_> + 2 7 18 7 2. + <_> + + <_> + 9 2 3 2 -1. + <_> + 10 2 1 2 3. + <_> + + <_> + 8 6 4 3 -1. + <_> + 8 7 4 1 3. + <_> + + <_> + 4 8 5 2 -1. + <_> + 4 9 5 1 2. + <_> + + <_> + 0 3 1 6 -1. + <_> + 0 5 1 2 3. + <_> + + <_> + 13 9 1 6 -1. + <_> + 13 12 1 3 2. + <_> + + <_> + 6 16 3 3 -1. + <_> + 6 17 3 1 3. + <_> + + <_> + 3 16 7 3 -1. + <_> + 3 17 7 1 3. + <_> + + <_> + 10 15 5 3 -1. + <_> + 10 16 5 1 3. + <_> + + <_> + 4 0 5 20 -1. + <_> + 4 10 5 10 2. + <_> + + <_> + 6 2 2 2 -1. + <_> + 7 2 1 2 2. + <_> + + <_> + 18 0 2 15 -1. + <_> + 18 5 2 5 3. + <_> + + <_> + 6 15 7 3 -1. + <_> + 6 16 7 1 3. + <_> + + <_> + 10 13 6 2 -1. + <_> + 10 14 6 1 2. + <_> + + <_> + 13 8 1 9 -1. + <_> + 13 11 1 3 3. + <_> + + <_> + 3 0 4 4 -1. + <_> + 3 0 2 2 2. + <_> + 5 2 2 2 2. + <_> + + <_> + 0 3 1 6 -1. + <_> + 0 5 1 2 3. + <_> + + <_> + 5 8 3 1 -1. + <_> + 6 8 1 1 3. + <_> + + <_> + 5 6 2 3 -1. + <_> + 6 6 1 3 2. + <_> + + <_> + 6 11 6 7 -1. + <_> + 8 11 2 7 3. + <_> + + <_> + 8 7 4 3 -1. + <_> + 8 8 4 1 3. + <_> + + <_> + 3 8 8 1 -1. + <_> + 7 8 4 1 2. + <_> + + <_> + 5 12 3 3 -1. + <_> + 5 13 3 1 3. + <_> + + <_> + 9 7 2 8 -1. + <_> + 9 7 1 4 2. + <_> + 10 11 1 4 2. + <_> + + <_> + 14 2 3 5 -1. + <_> + 15 2 1 5 3. + <_> + + <_> + 6 13 2 3 -1. + <_> + 6 14 2 1 3. + <_> + + <_> + 6 14 1 2 -1. + <_> + 6 15 1 1 2. + <_> + + <_> + 12 10 2 3 -1. + <_> + 12 11 2 1 3. + <_> + + <_> + 1 14 12 3 -1. + <_> + 5 14 4 3 3. + <_> + + <_> + 11 8 3 1 -1. + <_> + 12 8 1 1 3. + <_> + + <_> + 14 4 2 3 -1. + <_> + 14 5 2 1 3. + <_> + + <_> + 7 8 3 2 -1. + <_> + 8 8 1 2 3. + <_> + + <_> + 2 7 3 11 -1. + <_> + 3 7 1 11 3. + <_> + + <_> + 0 14 2 1 -1. + <_> + 1 14 1 1 2. + <_> + + <_> + 6 15 3 2 -1. + <_> + 7 15 1 2 3. + <_> + + <_> + 18 10 2 4 -1. + <_> + 18 10 1 2 2. + <_> + 19 12 1 2 2. + <_> + + <_> + 13 12 2 2 -1. + <_> + 14 12 1 2 2. + <_> + + <_> + 9 5 8 12 -1. + <_> + 13 5 4 12 2. + <_> + + <_> + 11 5 3 3 -1. + <_> + 12 5 1 3 3. + <_> + + <_> + 16 11 2 2 -1. + <_> + 16 11 1 1 2. + <_> + 17 12 1 1 2. + <_> + + <_> + 14 5 1 2 -1. + <_> + 14 6 1 1 2. + <_> + + <_> + 3 0 8 16 -1. + <_> + 3 8 8 8 2. + <_> + + <_> + 3 11 3 5 -1. + <_> + 4 11 1 5 3. + <_> + + <_> + 0 8 12 6 -1. + <_> + 4 8 4 6 3. + <_> + + <_> + 6 9 4 2 -1. + <_> + 6 9 2 1 2. + <_> + 8 10 2 1 2. + <_> + + <_> + 11 15 3 5 -1. + <_> + 12 15 1 5 3. + <_> + + <_> + 18 10 2 6 -1. + <_> + 18 10 1 3 2. + <_> + 19 13 1 3 2. + <_> + + <_> + 13 15 6 1 -1. + <_> + 16 15 3 1 2. + <_> + + <_> + 5 10 7 6 -1. + <_> + 5 13 7 3 2. + <_> + + <_> + 2 11 6 6 -1. + <_> + 2 14 6 3 2. + <_> + + <_> + 11 14 3 3 -1. + <_> + 11 15 3 1 3. + <_> + + <_> + 7 14 6 3 -1. + <_> + 7 15 6 1 3. + <_> + + <_> + 5 14 5 3 -1. + <_> + 5 15 5 1 3. + <_> + + <_> + 6 16 3 1 -1. + <_> + 7 16 1 1 3. + <_> + + <_> + 4 15 4 3 -1. + <_> + 4 16 4 1 3. + <_> + + <_> + 2 2 4 8 -1. + <_> + 2 2 2 4 2. + <_> + 4 6 2 4 2. + <_> + + <_> + 12 13 2 3 -1. + <_> + 12 14 2 1 3. + <_> + + <_> + 9 13 4 3 -1. + <_> + 9 14 4 1 3. + <_> + + <_> + 8 8 5 3 -1. + <_> + 8 9 5 1 3. + <_> + + <_> + 9 12 3 2 -1. + <_> + 10 12 1 2 3. + <_> + + <_> + 4 0 8 2 -1. + <_> + 4 0 4 1 2. + <_> + 8 1 4 1 2. + <_> + + <_> + 0 12 1 2 -1. + <_> + 0 13 1 1 2. + <_> + + <_> + 8 14 8 4 -1. + <_> + 8 16 8 2 2. + <_> + + <_> + 4 17 9 3 -1. + <_> + 4 18 9 1 3. + <_> + + <_> + 10 0 2 8 -1. + <_> + 10 4 2 4 2. + <_> + + <_> + 10 13 2 6 -1. + <_> + 10 16 2 3 2. + <_> + + <_> + 7 2 10 5 -1. + <_> + 12 2 5 5 2. + <_> + + <_> + 9 7 4 6 -1. + <_> + 9 7 2 3 2. + <_> + 11 10 2 3 2. + <_> + + <_> + 12 10 1 6 -1. + <_> + 12 13 1 3 2. + <_> + + <_> + 1 2 6 8 -1. + <_> + 4 2 3 8 2. + <_> + + <_> + 10 12 1 3 -1. + <_> + 10 13 1 1 3. + <_> + + <_> + 5 7 3 2 -1. + <_> + 6 7 1 2 3. + <_> + + <_> + 10 13 1 3 -1. + <_> + 10 14 1 1 3. + <_> + + <_> + 4 3 16 9 -1. + <_> + 4 6 16 3 3. + <_> + + <_> + 5 12 4 3 -1. + <_> + 7 12 2 3 2. + <_> + + <_> + 10 14 1 3 -1. + <_> + 10 15 1 1 3. + <_> + + <_> + 10 6 3 8 -1. + <_> + 11 6 1 8 3. + <_> + + <_> + 1 8 3 5 -1. + <_> + 2 8 1 5 3. + <_> + + <_> + 6 7 3 2 -1. + <_> + 7 7 1 2 3. + <_> + + <_> + 9 10 3 3 -1. + <_> + 10 10 1 3 3. + <_> + + <_> + 11 4 4 3 -1. + <_> + 11 5 4 1 3. + <_> + + <_> + 16 11 3 1 -1. + <_> + 17 11 1 1 3. + <_> + + <_> + 8 0 6 3 -1. + <_> + 10 0 2 3 3. + <_> + + <_> + 17 11 2 2 -1. + <_> + 17 11 1 1 2. + <_> + 18 12 1 1 2. + <_> + + <_> + 11 3 7 3 -1. + <_> + 11 4 7 1 3. + <_> + + <_> + 6 11 1 3 -1. + <_> + 6 12 1 1 3. + <_> + + <_> + 7 4 3 2 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 7 3 3 3 -1. + <_> + 8 3 1 3 3. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 10 12 2 3 -1. + <_> + 10 13 2 1 3. + <_> + + <_> + 5 0 12 2 -1. + <_> + 5 1 12 1 2. + <_> + + <_> + 4 11 8 4 -1. + <_> + 4 13 8 2 2. + <_> + + <_> + 6 12 8 4 -1. + <_> + 6 14 8 2 2. + <_> + + <_> + 4 0 4 2 -1. + <_> + 4 0 2 1 2. + <_> + 6 1 2 1 2. + <_> + + <_> + 13 9 4 2 -1. + <_> + 13 10 4 1 2. + <_> + + <_> + 12 10 2 2 -1. + <_> + 13 10 1 2 2. + <_> + + <_> + 9 9 6 1 -1. + <_> + 12 9 3 1 2. + <_> + + <_> + 6 6 14 6 -1. + <_> + 6 9 14 3 2. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 11 11 1 3 -1. + <_> + 11 12 1 1 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 12 11 6 2 -1. + <_> + 14 11 2 2 3. + <_> + + <_> + 11 11 2 1 -1. + <_> + 12 11 1 1 2. + <_> + + <_> + 3 11 14 1 -1. + <_> + 10 11 7 1 2. + <_> + + <_> + 1 13 6 5 -1. + <_> + 3 13 2 5 3. + <_> + + <_> + 14 0 2 1 -1. + <_> + 15 0 1 1 2. + <_> + + <_> + 10 0 10 1 -1. + <_> + 15 0 5 1 2. + <_> + + <_> + 5 15 3 3 -1. + <_> + 5 16 3 1 3. + <_> + + <_> + 12 14 2 2 -1. + <_> + 12 15 2 1 2. + <_> + + <_> + 12 14 2 3 -1. + <_> + 12 15 2 1 3. + <_> + + <_> + 8 6 1 3 -1. + <_> + 8 7 1 1 3. + <_> + + <_> + 0 2 1 3 -1. + <_> + 0 3 1 1 3. + <_> + + <_> + 0 2 1 3 -1. + <_> + 0 3 1 1 3. + <_> + + <_> + 4 8 2 2 -1. + <_> + 4 8 1 1 2. + <_> + 5 9 1 1 2. + <_> + + <_> + 3 6 8 10 -1. + <_> + 3 6 4 5 2. + <_> + 7 11 4 5 2. + <_> + + <_> + 6 15 1 3 -1. + <_> + 6 16 1 1 3. + <_> + + <_> + 12 0 3 8 -1. + <_> + 13 0 1 8 3. + <_> + + <_> + 10 0 10 6 -1. + <_> + 10 0 5 3 2. + <_> + 15 3 5 3 2. + <_> + + <_> + 17 2 2 2 -1. + <_> + 17 3 2 1 2. + <_> + + <_> + 8 0 12 14 -1. + <_> + 14 0 6 14 2. + <_> + + <_> + 10 18 2 1 -1. + <_> + 11 18 1 1 2. + <_> + + <_> + 18 9 2 6 -1. + <_> + 18 9 1 3 2. + <_> + 19 12 1 3 2. + <_> + + <_> + 18 4 2 16 -1. + <_> + 18 4 1 8 2. + <_> + 19 12 1 8 2. + <_> + + <_> + 5 8 6 6 -1. + <_> + 8 8 3 6 2. + <_> + + <_> + 6 5 4 11 -1. + <_> + 8 5 2 11 2. + <_> + + <_> + 6 8 2 2 -1. + <_> + 7 8 1 2 2. + <_> + + <_> + 6 5 2 5 -1. + <_> + 7 5 1 5 2. + <_> + + <_> + 10 16 3 4 -1. + <_> + 11 16 1 4 3. + <_> + + <_> + 3 0 8 18 -1. + <_> + 3 9 8 9 2. + <_> + + <_> + 1 7 7 3 -1. + <_> + 1 8 7 1 3. + <_> + + <_> + 5 5 2 6 -1. + <_> + 5 7 2 2 3. + <_> + + <_> + 3 8 3 10 -1. + <_> + 4 8 1 10 3. + <_> + + <_> + 3 12 3 2 -1. + <_> + 4 12 1 2 3. + <_> + + <_> + 3 9 10 3 -1. + <_> + 8 9 5 3 2. + <_> + + <_> + 6 15 6 2 -1. + <_> + 8 15 2 2 3. + <_> + + <_> + 5 9 3 2 -1. + <_> + 6 9 1 2 3. + <_> + + <_> + 17 5 3 3 -1. + <_> + 17 6 3 1 3. + <_> + + <_> + 8 6 1 3 -1. + <_> + 8 7 1 1 3. + <_> + + <_> + 18 5 1 3 -1. + <_> + 18 6 1 1 3. + <_> + + <_> + 5 2 5 6 -1. + <_> + 5 5 5 3 2. + <_> + + <_> + 11 1 6 3 -1. + <_> + 13 1 2 3 3. + <_> + + <_> + 6 7 2 10 -1. + <_> + 6 12 2 5 2. + <_> + + <_> + 3 14 4 4 -1. + <_> + 5 14 2 4 2. + <_> + + <_> + 2 11 4 1 -1. + <_> + 4 11 2 1 2. + <_> + + <_> + 6 4 3 2 -1. + <_> + 7 4 1 2 3. + <_> + + <_> + 8 3 2 6 -1. + <_> + 8 5 2 2 3. + <_> + + <_> + 0 10 20 10 -1. + <_> + 10 10 10 10 2. + <_> + + <_> + 13 7 2 2 -1. + <_> + 13 8 2 1 2. + <_> + + <_> + 10 8 10 4 -1. + <_> + 15 8 5 4 2. + <_> + + <_> + 0 10 16 2 -1. + <_> + 8 10 8 2 2. + <_> + + <_> + 10 14 6 6 -1. + <_> + 10 14 3 3 2. + <_> + 13 17 3 3 2. + <_> + + <_> + 13 10 1 3 -1. + <_> + 13 11 1 1 3. + <_> + + <_> + 4 4 10 8 -1. + <_> + 4 4 5 4 2. + <_> + 9 8 5 4 2. + <_> + + <_> + 5 1 6 6 -1. + <_> + 5 1 3 3 2. + <_> + 8 4 3 3 2. + <_> + + <_> + 11 10 8 3 -1. + <_> + 11 11 8 1 3. + <_> + + <_> + 3 11 3 6 -1. + <_> + 3 13 3 2 3. + <_> + + <_> + 8 0 12 6 -1. + <_> + 8 0 6 3 2. + <_> + 14 3 6 3 2. + <_> + + <_> + 7 8 2 4 -1. + <_> + 7 8 1 2 2. + <_> + 8 10 1 2 2. + <_> + + <_> + 11 1 7 10 -1. + <_> + 11 6 7 5 2. + <_> + + <_> + 10 15 3 2 -1. + <_> + 10 16 3 1 2. + <_> + + <_> + 11 11 2 3 -1. + <_> + 12 11 1 3 2. + <_> + + <_> + 6 8 3 2 -1. + <_> + 6 9 3 1 2. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 12 12 2 2 -1. + <_> + 12 13 2 1 2. + <_> + + <_> + 11 3 8 9 -1. + <_> + 11 6 8 3 3. + <_> + + <_> + 10 11 3 3 -1. + <_> + 11 11 1 3 3. + <_> + + <_> + 6 11 1 3 -1. + <_> + 6 12 1 1 3. + <_> + + <_> + 9 6 2 3 -1. + <_> + 10 6 1 3 2. + <_> + + <_> + 7 8 2 6 -1. + <_> + 7 10 2 2 3. + <_> + + <_> + 3 0 4 6 -1. + <_> + 3 0 2 3 2. + <_> + 5 3 2 3 2. + <_> + + <_> + 5 0 3 17 -1. + <_> + 6 0 1 17 3. + <_> + + <_> + 12 9 6 3 -1. + <_> + 12 10 6 1 3. + <_> + + <_> + 10 19 8 1 -1. + <_> + 14 19 4 1 2. + <_> + + <_> + 13 3 5 3 -1. + <_> + 13 4 5 1 3. + <_> + + <_> + 5 7 2 2 -1. + <_> + 6 7 1 2 2. + <_> + + <_> + 12 10 3 10 -1. + <_> + 13 10 1 10 3. + <_> + + <_> + 4 7 6 3 -1. + <_> + 7 7 3 3 2. + <_> + + <_> + 6 10 1 3 -1. + <_> + 6 11 1 1 3. + <_> + + <_> + 6 9 2 3 -1. + <_> + 6 10 2 1 3. + <_> + + <_> + 11 3 6 3 -1. + <_> + 11 4 6 1 3. + <_> + + <_> + 13 14 2 3 -1. + <_> + 13 15 2 1 3. + <_> + + <_> + 6 16 8 4 -1. + <_> + 6 16 4 2 2. + <_> + 10 18 4 2 2. + <_> + + <_> + 10 5 3 15 -1. + <_> + 11 5 1 15 3. + <_> + + <_> + 10 0 10 6 -1. + <_> + 10 0 5 3 2. + <_> + 15 3 5 3 2. + <_> + + <_> + 11 2 3 16 -1. + <_> + 12 2 1 16 3. + <_> + + <_> + 7 12 2 2 -1. + <_> + 7 12 1 1 2. + <_> + 8 13 1 1 2. + <_> + + <_> + 6 4 2 1 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 6 3 3 4 -1. + <_> + 7 3 1 4 3. + <_> + + <_> + 0 13 16 6 -1. + <_> + 0 15 16 2 3. + <_> + + <_> + 7 14 2 3 -1. + <_> + 7 15 2 1 3. + <_> + + <_> + 15 17 2 2 -1. + <_> + 15 18 2 1 2. + <_> + + <_> + 17 12 2 2 -1. + <_> + 17 12 1 1 2. + <_> + 18 13 1 1 2. + <_> + + <_> + 11 1 3 19 -1. + <_> + 12 1 1 19 3. + <_> + + <_> + 1 11 19 4 -1. + <_> + 1 13 19 2 2. + <_> + + <_> + 17 8 2 10 -1. + <_> + 17 8 1 5 2. + <_> + 18 13 1 5 2. + <_> + + <_> + 9 0 11 20 -1. + <_> + 9 10 11 10 2. + <_> + + <_> + 4 1 12 12 -1. + <_> + 4 1 6 6 2. + <_> + 10 7 6 6 2. + <_> + + <_> + 5 11 3 6 -1. + <_> + 6 11 1 6 3. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 18 1 2 4 -1. + <_> + 19 1 1 4 2. + <_> + + <_> + 11 0 8 15 -1. + <_> + 15 0 4 15 2. + <_> + + <_> + 5 5 6 2 -1. + <_> + 7 5 2 2 3. + <_> + + <_> + 17 11 2 2 -1. + <_> + 17 11 1 1 2. + <_> + 18 12 1 1 2. + <_> + + <_> + 6 8 2 8 -1. + <_> + 6 12 2 4 2. + <_> + + <_> + 9 9 2 4 -1. + <_> + 9 11 2 2 2. + <_> + + <_> + 0 8 2 2 -1. + <_> + 0 9 2 1 2. + <_> + + <_> + 7 12 8 4 -1. + <_> + 7 14 8 2 2. + <_> + + <_> + 11 13 3 2 -1. + <_> + 11 14 3 1 2. + <_> + + <_> + 5 8 2 2 -1. + <_> + 5 8 1 1 2. + <_> + 6 9 1 1 2. + <_> + + <_> + 12 11 2 3 -1. + <_> + 12 12 2 1 3. + <_> + + <_> + 10 8 2 2 -1. + <_> + 10 8 1 1 2. + <_> + 11 9 1 1 2. + <_> + + <_> + 6 16 3 2 -1. + <_> + 7 16 1 2 3. + <_> + + <_> + 13 12 2 1 -1. + <_> + 14 12 1 1 2. + <_> + + <_> + 16 9 2 6 -1. + <_> + 16 9 1 3 2. + <_> + 17 12 1 3 2. + <_> + + <_> + 17 2 2 6 -1. + <_> + 17 4 2 2 3. + <_> + + <_> + 13 2 7 6 -1. + <_> + 13 4 7 2 3. + <_> + + <_> + 16 10 4 4 -1. + <_> + 16 10 2 2 2. + <_> + 18 12 2 2 2. + <_> + + <_> + 11 10 2 2 -1. + <_> + 11 11 2 1 2. + <_> + + <_> + 6 13 3 3 -1. + <_> + 6 14 3 1 3. + <_> + + <_> + 4 14 4 2 -1. + <_> + 4 15 4 1 2. + <_> + + <_> + 0 9 2 1 -1. + <_> + 1 9 1 1 2. + <_> + + <_> + 7 6 4 8 -1. + <_> + 7 10 4 4 2. + <_> + + <_> + 9 17 7 3 -1. + <_> + 9 18 7 1 3. + <_> + + <_> + 7 12 2 3 -1. + <_> + 7 13 2 1 3. + <_> + + <_> + 12 17 4 3 -1. + <_> + 12 18 4 1 3. + <_> + + <_> + 11 7 9 11 -1. + <_> + 14 7 3 11 3. + <_> + + <_> + 16 14 4 5 -1. + <_> + 18 14 2 5 2. + <_> + + <_> + 9 2 3 4 -1. + <_> + 10 2 1 4 3. + <_> + + <_> + 3 11 2 8 -1. + <_> + 3 11 1 4 2. + <_> + 4 15 1 4 2. + <_> + + <_> + 13 2 6 18 -1. + <_> + 13 2 3 9 2. + <_> + 16 11 3 9 2. + <_> + + <_> + 9 12 5 2 -1. + <_> + 9 13 5 1 2. + <_> + + <_> + 11 8 4 10 -1. + <_> + 11 8 2 5 2. + <_> + 13 13 2 5 2. + <_> + + <_> + 0 11 20 1 -1. + <_> + 10 11 10 1 2. + <_> + + <_> + 1 12 1 2 -1. + <_> + 1 13 1 1 2. + <_> + + <_> + 6 7 6 3 -1. + <_> + 8 7 2 3 3. + <_> + + <_> + 8 5 10 3 -1. + <_> + 13 5 5 3 2. + <_> + + <_> + 5 5 4 6 -1. + <_> + 5 7 4 2 3. + <_> + + <_> + 5 11 6 3 -1. + <_> + 8 11 3 3 2. + <_> + + <_> + 2 8 3 7 -1. + <_> + 3 8 1 7 3. + <_> + + <_> + 2 10 3 6 -1. + <_> + 3 10 1 6 3. + <_> + + <_> + 14 0 2 2 -1. + <_> + 15 0 1 2 2. + <_> + + <_> + 8 7 4 4 -1. + <_> + 8 7 2 2 2. + <_> + 10 9 2 2 2. + <_> + + <_> + 4 13 4 3 -1. + <_> + 4 14 4 1 3. + <_> + + <_> + 8 11 6 2 -1. + <_> + 8 12 6 1 2. + <_> + + <_> + 17 3 1 4 -1. + <_> + 17 5 1 2 2. + <_> + + <_> + 6 13 2 3 -1. + <_> + 6 14 2 1 3. + <_> + + <_> + 7 9 6 8 -1. + <_> + 7 9 3 4 2. + <_> + 10 13 3 4 2. + <_> + + <_> + 5 15 2 3 -1. + <_> + 5 16 2 1 3. + <_> + + <_> + 7 10 4 9 -1. + <_> + 7 13 4 3 3. + <_> + + <_> + 5 4 2 1 -1. + <_> + 6 4 1 1 2. + <_> + + <_> + 0 1 6 19 -1. + <_> + 2 1 2 19 3. + <_> + + <_> + 5 8 6 2 -1. + <_> + 8 8 3 2 2. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 9 12 2 4 -1. + <_> + 9 12 1 2 2. + <_> + 10 14 1 2 2. + <_> + + <_> + 12 7 2 10 -1. + <_> + 12 12 2 5 2. + <_> + + <_> + 10 6 6 8 -1. + <_> + 10 10 6 4 2. + <_> + + <_> + 4 3 2 6 -1. + <_> + 5 3 1 6 2. + <_> + + <_> + 4 6 3 3 -1. + <_> + 5 6 1 3 3. + <_> + + <_> + 10 7 2 8 -1. + <_> + 10 7 1 4 2. + <_> + 11 11 1 4 2. + <_> + + <_> + 2 0 6 10 -1. + <_> + 2 5 6 5 2. + <_> + + <_> + 8 10 6 2 -1. + <_> + 8 11 6 1 2. + <_> + + <_> + 10 0 2 1 -1. + <_> + 11 0 1 1 2. + <_> + + <_> + 4 16 4 3 -1. + <_> + 4 17 4 1 3. + <_> + + <_> + 7 4 3 2 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 7 5 3 1 -1. + <_> + 8 5 1 1 3. + <_> + + <_> + 5 5 6 3 -1. + <_> + 5 6 6 1 3. + <_> + + <_> + 5 5 5 3 -1. + <_> + 5 6 5 1 3. + <_> + + <_> + 10 7 6 9 -1. + <_> + 10 10 6 3 3. + <_> + + <_> + 17 4 1 2 -1. + <_> + 17 5 1 1 2. + <_> + + <_> + 4 9 10 4 -1. + <_> + 4 9 5 2 2. + <_> + 9 11 5 2 2. + <_> + + <_> + 5 6 3 10 -1. + <_> + 5 11 3 5 2. + <_> + + <_> + 2 13 18 5 -1. + <_> + 11 13 9 5 2. + <_> + + <_> + 5 12 3 3 -1. + <_> + 5 13 3 1 3. + <_> + + <_> + 9 12 2 4 -1. + <_> + 9 14 2 2 2. + <_> + + <_> + 5 11 15 6 -1. + <_> + 5 13 15 2 3. + <_> + + <_> + 16 0 4 6 -1. + <_> + 16 0 2 3 2. + <_> + 18 3 2 3 2. + <_> + + <_> + 11 12 2 2 -1. + <_> + 11 12 1 1 2. + <_> + 12 13 1 1 2. + <_> + + <_> + 6 6 3 5 -1. + <_> + 7 6 1 5 3. + <_> + + <_> + 13 13 2 1 -1. + <_> + 14 13 1 1 2. + <_> + + <_> + 5 8 3 2 -1. + <_> + 6 8 1 2 3. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 16 12 3 1 -1. + <_> + 17 12 1 1 3. + <_> + + <_> + 8 5 12 8 -1. + <_> + 14 5 6 8 2. + <_> + + <_> + 5 13 4 4 -1. + <_> + 5 13 2 2 2. + <_> + 7 15 2 2 2. + <_> + + <_> + 5 7 2 3 -1. + <_> + 6 7 1 3 2. + <_> + + <_> + 9 2 2 10 -1. + <_> + 9 2 1 5 2. + <_> + 10 7 1 5 2. + <_> + + <_> + 9 14 1 2 -1. + <_> + 9 15 1 1 2. + <_> + + <_> + 15 7 2 4 -1. + <_> + 15 9 2 2 2. + <_> + + <_> + 7 5 4 3 -1. + <_> + 7 6 4 1 3. + <_> + + <_> + 3 10 8 2 -1. + <_> + 7 10 4 2 2. + <_> + + <_> + 13 8 2 2 -1. + <_> + 13 9 2 1 2. + <_> + + <_> + 9 6 2 3 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 13 10 5 2 -1. + <_> + 13 11 5 1 2. + <_> + + <_> + 16 11 2 2 -1. + <_> + 16 11 1 1 2. + <_> + 17 12 1 1 2. + <_> + + <_> + 0 10 2 4 -1. + <_> + 0 10 1 2 2. + <_> + 1 12 1 2 2. + <_> + + <_> + 0 8 2 8 -1. + <_> + 0 8 1 4 2. + <_> + 1 12 1 4 2. + <_> + + <_> + 6 14 5 3 -1. + <_> + 6 15 5 1 3. + <_> + + <_> + 18 8 2 4 -1. + <_> + 19 8 1 4 2. + <_> + + <_> + 14 2 3 1 -1. + <_> + 15 2 1 1 3. + <_> + + <_> + 9 13 3 3 -1. + <_> + 9 14 3 1 3. + <_> + + <_> + 5 13 6 3 -1. + <_> + 5 14 6 1 3. + <_> + + <_> + 12 12 1 3 -1. + <_> + 12 13 1 1 3. + <_> + + <_> + 2 14 14 6 -1. + <_> + 2 17 14 3 2. + <_> + + <_> + 7 5 2 4 -1. + <_> + 7 5 1 2 2. + <_> + 8 7 1 2 2. + <_> + + <_> + 5 17 2 2 -1. + <_> + 5 17 1 1 2. + <_> + 6 18 1 1 2. + <_> + + <_> + 9 3 3 5 -1. + <_> + 10 3 1 5 3. + <_> + + <_> + 6 17 4 3 -1. + <_> + 6 18 4 1 3. + <_> + + <_> + 10 0 6 4 -1. + <_> + 12 0 2 4 3. + <_> + + <_> + 4 8 6 10 -1. + <_> + 4 8 3 5 2. + <_> + 7 13 3 5 2. + <_> + + <_> + 4 3 2 6 -1. + <_> + 5 3 1 6 2. + <_> + + <_> + 3 4 6 6 -1. + <_> + 5 4 2 6 3. + <_> + + <_> + 5 8 2 8 -1. + <_> + 5 12 2 4 2. + <_> + + <_> + 5 11 2 2 -1. + <_> + 5 12 2 1 2. + <_> + + <_> + 12 13 1 3 -1. + <_> + 12 14 1 1 3. + <_> + + <_> + 5 1 4 15 -1. + <_> + 5 6 4 5 3. + <_> + + <_> + 6 11 1 3 -1. + <_> + 6 12 1 1 3. + <_> + + <_> + 6 11 3 3 -1. + <_> + 6 12 3 1 3. + <_> + + <_> + 11 0 3 3 -1. + <_> + 12 0 1 3 3. + <_> + + <_> + 2 2 15 3 -1. + <_> + 7 2 5 3 3. + <_> + + <_> + 4 0 16 5 -1. + <_> + 12 0 8 5 2. + <_> + + <_> + 13 7 6 8 -1. + <_> + 13 11 6 4 2. + <_> + + <_> + 9 9 3 4 -1. + <_> + 9 11 3 2 2. + <_> + + <_> + 5 2 6 16 -1. + <_> + 5 2 3 8 2. + <_> + 8 10 3 8 2. + <_> + + <_> + 10 7 6 3 -1. + <_> + 13 7 3 3 2. + <_> + + <_> + 12 11 2 1 -1. + <_> + 13 11 1 1 2. + <_> + + <_> + 0 0 1 8 -1. + <_> + 0 4 1 4 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 6 5 4 15 -1. + <_> + 8 5 2 15 2. + <_> + + <_> + 7 7 2 2 -1. + <_> + 8 7 1 2 2. + <_> + + <_> + 1 3 1 2 -1. + <_> + 1 4 1 1 2. + <_> + + <_> + 6 2 6 11 -1. + <_> + 9 2 3 11 2. + <_> + + <_> + 9 6 9 6 -1. + <_> + 9 8 9 2 3. + <_> + + <_> + 9 8 3 3 -1. + <_> + 9 9 3 1 3. + <_> + + <_> + 6 8 2 3 -1. + <_> + 6 9 2 1 3. + <_> + + <_> + 13 1 2 8 -1. + <_> + 13 5 2 4 2. + <_> + + <_> + 6 0 6 4 -1. + <_> + 6 2 6 2 2. + <_> + + <_> + 0 6 20 14 -1. + <_> + 10 6 10 14 2. + <_> + + <_> + 8 0 12 6 -1. + <_> + 8 0 6 3 2. + <_> + 14 3 6 3 2. + <_> + + <_> + 8 7 9 9 -1. + <_> + 8 10 9 3 3. + <_> + + <_> + 10 14 6 6 -1. + <_> + 10 14 3 3 2. + <_> + 13 17 3 3 2. + <_> + + <_> + 8 7 4 10 -1. + <_> + 8 7 2 5 2. + <_> + 10 12 2 5 2. + <_> + + <_> + 15 4 3 3 -1. + <_> + 15 5 3 1 3. + <_> + + <_> + 14 0 6 6 -1. + <_> + 16 0 2 6 3. + <_> + + <_> + 5 9 10 6 -1. + <_> + 5 9 5 3 2. + <_> + 10 12 5 3 2. + <_> + + <_> + 11 12 2 1 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 11 7 3 7 -1. + <_> + 12 7 1 7 3. + <_> + + <_> + 9 0 2 18 -1. + <_> + 9 0 1 9 2. + <_> + 10 9 1 9 2. + <_> + + <_> + 3 6 3 4 -1. + <_> + 4 6 1 4 3. + <_> + + <_> + 14 10 2 2 -1. + <_> + 14 10 1 1 2. + <_> + 15 11 1 1 2. + <_> + + <_> + 4 7 3 2 -1. + <_> + 5 7 1 2 3. + <_> + + <_> + 10 14 4 3 -1. + <_> + 10 15 4 1 3. + <_> + + <_> + 12 12 2 3 -1. + <_> + 12 13 2 1 3. + <_> + + <_> + 3 0 2 8 -1. + <_> + 3 0 1 4 2. + <_> + 4 4 1 4 2. + <_> + + <_> + 14 4 5 3 -1. + <_> + 14 5 5 1 3. + <_> + + <_> + 6 16 1 3 -1. + <_> + 6 17 1 1 3. + <_> + + <_> + 5 16 2 3 -1. + <_> + 5 17 2 1 3. + <_> + + <_> + 4 6 10 6 -1. + <_> + 4 6 5 3 2. + <_> + 9 9 5 3 2. + <_> + + <_> + 9 14 7 4 -1. + <_> + 9 16 7 2 2. + <_> + + <_> + 10 11 2 4 -1. + <_> + 10 11 1 2 2. + <_> + 11 13 1 2 2. + <_> + + <_> + 5 12 4 3 -1. + <_> + 5 13 4 1 3. + <_> + + <_> + 5 13 3 2 -1. + <_> + 5 14 3 1 2. + <_> + + <_> + 7 13 8 4 -1. + <_> + 7 15 8 2 2. + <_> + + <_> + 8 4 3 1 -1. + <_> + 9 4 1 1 3. + <_> + + <_> + 6 1 1 4 -1. + <_> + 6 3 1 2 2. + <_> + + <_> + 8 0 12 6 -1. + <_> + 8 0 6 3 2. + <_> + 14 3 6 3 2. + <_> + + <_> + 8 5 2 3 -1. + <_> + 8 6 2 1 3. + <_> + + <_> + 8 5 2 3 -1. + <_> + 8 6 2 1 3. + <_> + + <_> + 7 4 3 1 -1. + <_> + 8 4 1 1 3. + <_> + + <_> + 7 9 2 2 -1. + <_> + 7 9 1 1 2. + <_> + 8 10 1 1 2. + <_> + + <_> + 15 14 4 6 -1. + <_> + 15 14 2 3 2. + <_> + 17 17 2 3 2. + <_> + + <_> + 7 9 1 4 -1. + <_> + 7 11 1 2 2. + <_> + + <_> + 10 11 3 9 -1. + <_> + 11 11 1 9 3. + <_> + + <_> + 17 11 3 1 -1. + <_> + 18 11 1 1 3. + <_> + + <_> + 17 11 3 1 -1. + <_> + 18 11 1 1 3. + <_> + + <_> + 0 1 1 2 -1. + <_> + 0 2 1 1 2. + <_> + + <_> + 9 15 7 3 -1. + <_> + 9 16 7 1 3. + <_> + + <_> + 15 0 2 2 -1. + <_> + 16 0 1 2 2. + <_> + + <_> + 5 0 1 14 -1. + <_> + 5 7 1 7 2. + <_> + + <_> + 7 3 1 2 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 7 0 4 6 -1. + <_> + 7 2 4 2 3. + <_> + + <_> + 7 2 3 2 -1. + <_> + 8 2 1 2 3. + <_> + + <_> + 5 12 4 3 -1. + <_> + 5 13 4 1 3. + <_> + + <_> + 18 5 1 2 -1. + <_> + 18 6 1 1 2. + <_> + + <_> + 18 0 2 10 -1. + <_> + 18 0 1 5 2. + <_> + 19 5 1 5 2. + <_> + + <_> + 0 2 13 6 -1. + <_> + 0 4 13 2 3. + <_> + + <_> + 0 0 2 2 -1. + <_> + 0 0 1 1 2. + <_> + 1 1 1 1 2. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 6 12 2 4 -1. + <_> + 7 12 1 4 2. + <_> + + <_> + 7 9 4 10 -1. + <_> + 9 9 2 10 2. + <_> + + <_> + 2 0 9 16 -1. + <_> + 2 8 9 8 2. + <_> + + <_> + 10 3 2 8 -1. + <_> + 10 3 1 4 2. + <_> + 11 7 1 4 2. + <_> + + <_> + 1 2 12 3 -1. + <_> + 5 2 4 3 3. + <_> + + <_> + 4 6 2 3 -1. + <_> + 5 6 1 3 2. + <_> + + <_> + 1 7 6 10 -1. + <_> + 3 7 2 10 3. + <_> + + <_> + 1 14 2 1 -1. + <_> + 2 14 1 1 2. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 12 8 3 5 -1. + <_> + 13 8 1 5 3. + <_> + + <_> + 6 5 9 6 -1. + <_> + 6 7 9 2 3. + <_> + + <_> + 13 8 2 3 -1. + <_> + 13 9 2 1 3. + <_> + + <_> + 7 15 6 4 -1. + <_> + 7 15 3 2 2. + <_> + 10 17 3 2 2. + <_> + + <_> + 10 15 6 3 -1. + <_> + 10 16 6 1 3. + <_> + + <_> + 3 2 2 6 -1. + <_> + 3 2 1 3 2. + <_> + 4 5 1 3 2. + <_> + + <_> + 10 15 3 5 -1. + <_> + 11 15 1 5 3. + <_> + + <_> + 12 9 5 2 -1. + <_> + 12 10 5 1 2. + <_> + + <_> + 4 11 10 1 -1. + <_> + 9 11 5 1 2. + <_> + + <_> + 6 12 6 2 -1. + <_> + 6 12 3 1 2. + <_> + 9 13 3 1 2. + <_> + + <_> + 6 11 1 3 -1. + <_> + 6 12 1 1 3. + <_> + + <_> + 3 12 8 4 -1. + <_> + 3 12 4 2 2. + <_> + 7 14 4 2 2. + <_> + + <_> + 0 3 1 3 -1. + <_> + 0 4 1 1 3. + <_> + + <_> + 10 12 2 1 -1. + <_> + 11 12 1 1 2. + <_> + + <_> + 3 10 3 6 -1. + <_> + 3 12 3 2 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 8 7 4 6 -1. + <_> + 8 9 4 2 3. + <_> + + <_> + 12 11 1 3 -1. + <_> + 12 12 1 1 3. + <_> + + <_> + 12 11 2 3 -1. + <_> + 12 12 2 1 3. + <_> + + <_> + 6 10 2 2 -1. + <_> + 6 10 1 1 2. + <_> + 7 11 1 1 2. + <_> + + <_> + 3 10 9 6 -1. + <_> + 3 13 9 3 2. + <_> + + <_> + 4 8 7 10 -1. + <_> + 4 13 7 5 2. + <_> + + <_> + 6 8 11 3 -1. + <_> + 6 9 11 1 3. + <_> + + <_> + 6 5 1 14 -1. + <_> + 6 12 1 7 2. + <_> + + <_> + 13 6 5 10 -1. + <_> + 13 11 5 5 2. + <_> + + <_> + 2 0 13 15 -1. + <_> + 2 5 13 5 3. + <_> + + <_> + 6 7 2 2 -1. + <_> + 7 7 1 2 2. + <_> + + <_> + 4 5 9 4 -1. + <_> + 7 5 3 4 3. + <_> + + <_> + 6 7 3 3 -1. + <_> + 7 7 1 3 3. + <_> + + <_> + 8 1 3 4 -1. + <_> + 9 1 1 4 3. + <_> + + <_> + 8 11 7 2 -1. + <_> + 8 12 7 1 2. + <_> + + <_> + 4 7 3 2 -1. + <_> + 5 7 1 2 3. + <_> + + <_> + 4 14 2 6 -1. + <_> + 4 14 1 3 2. + <_> + 5 17 1 3 2. + <_> + + <_> + 0 7 8 13 -1. + <_> + 4 7 4 13 2. + <_> + + <_> + 6 3 4 9 -1. + <_> + 8 3 2 9 2. + <_> + + <_> + 9 12 2 3 -1. + <_> + 9 13 2 1 3. + <_> + + <_> + 16 14 2 6 -1. + <_> + 16 14 1 3 2. + <_> + 17 17 1 3 2. + <_> + + <_> + 11 14 2 3 -1. + <_> + 11 15 2 1 3. + <_> + + <_> + 11 14 1 2 -1. + <_> + 11 15 1 1 2. + <_> + + <_> + 8 8 3 2 -1. + <_> + 8 9 3 1 2. + <_> + + <_> + 13 1 3 5 -1. + <_> + 14 1 1 5 3. + <_> + + <_> + 6 15 8 2 -1. + <_> + 6 15 4 1 2. + <_> + 10 16 4 1 2. + <_> + + <_> + 13 2 3 4 -1. + <_> + 14 2 1 4 3. + <_> + + <_> + 1 8 1 6 -1. + <_> + 1 10 1 2 3. + <_> + + <_> + 12 0 8 2 -1. + <_> + 12 0 4 1 2. + <_> + 16 1 4 1 2. + <_> + + <_> + 5 8 3 1 -1. + <_> + 6 8 1 1 3. + <_> + + <_> + 7 5 2 4 -1. + <_> + 8 5 1 4 2. + <_> + + <_> + 7 2 2 1 -1. + <_> + 8 2 1 1 2. + <_> + + <_> + 0 4 2 3 -1. + <_> + 0 5 2 1 3. + <_> + + <_> + 3 17 2 2 -1. + <_> + 3 17 1 1 2. + <_> + 4 18 1 1 2. + <_> + + <_> + 6 0 12 9 -1. + <_> + 12 0 6 9 2. + <_> + + <_> + 7 0 12 3 -1. + <_> + 11 0 4 3 3. + <_> + + <_> + 14 0 6 6 -1. + <_> + 14 0 3 3 2. + <_> + 17 3 3 3 2. + <_> + + <_> + 15 2 1 2 -1. + <_> + 15 3 1 1 2. + <_> + + <_> + 8 2 1 6 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 5 7 3 2 -1. + <_> + 6 7 1 2 3. + <_> + + <_> + 6 7 4 6 -1. + <_> + 6 10 4 3 2. + <_> + + <_> + 8 6 10 2 -1. + <_> + 13 6 5 2 2. + <_> + + <_> + 2 1 4 15 -1. + <_> + 4 1 2 15 2. + <_> + + <_> + 5 9 3 6 -1. + <_> + 5 12 3 3 2. + <_> + + <_> + 12 11 2 1 -1. + <_> + 13 11 1 1 2. + <_> + + <_> + 6 4 6 2 -1. + <_> + 8 4 2 2 3. + <_> + + <_> + 12 9 4 8 -1. + <_> + 12 13 4 4 2. + <_> + + <_> + 15 8 2 4 -1. + <_> + 15 10 2 2 2. + <_> + + <_> + 6 12 3 3 -1. + <_> + 6 13 3 1 3. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 5 10 4 6 -1. + <_> + 7 10 2 6 2. + <_> + + <_> + 7 8 2 9 -1. + <_> + 7 11 2 3 3. + <_> + + <_> + 5 13 4 3 -1. + <_> + 5 14 4 1 3. + <_> + + <_> + 11 12 2 2 -1. + <_> + 11 12 1 1 2. + <_> + 12 13 1 1 2. + <_> + + <_> + 5 13 5 3 -1. + <_> + 5 14 5 1 3. + <_> + + <_> + 4 9 8 1 -1. + <_> + 8 9 4 1 2. + <_> + + <_> + 12 0 8 6 -1. + <_> + 12 0 4 3 2. + <_> + 16 3 4 3 2. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 8 4 3 3 -1. + <_> + 9 4 1 3 3. + <_> + + <_> + 8 0 7 15 -1. + <_> + 8 5 7 5 3. + <_> + + <_> + 3 0 8 4 -1. + <_> + 3 0 4 2 2. + <_> + 7 2 4 2 2. + <_> + + <_> + 0 11 20 1 -1. + <_> + 10 11 10 1 2. + <_> + + <_> + 3 14 3 2 -1. + <_> + 4 14 1 2 3. + <_> + + <_> + 3 11 3 8 -1. + <_> + 4 11 1 8 3. + <_> + + <_> + 7 13 2 5 -1. + <_> + 8 13 1 5 2. + <_> + + <_> + 14 4 3 3 -1. + <_> + 14 5 3 1 3. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 6 12 1 2 -1. + <_> + 6 13 1 1 2. + <_> + + <_> + 5 13 3 1 -1. + <_> + 6 13 1 1 3. + <_> + + <_> + 12 11 1 3 -1. + <_> + 12 12 1 1 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 9 1 3 -1. + <_> + 5 10 1 1 3. + <_> + + <_> + 1 9 12 9 -1. + <_> + 1 12 12 3 3. + <_> + + <_> + 12 14 3 3 -1. + <_> + 12 15 3 1 3. + <_> + + <_> + 10 14 5 3 -1. + <_> + 10 15 5 1 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 5 11 2 6 -1. + <_> + 5 14 2 3 2. + <_> + + <_> + 6 5 2 14 -1. + <_> + 6 12 2 7 2. + <_> + + <_> + 2 8 5 2 -1. + <_> + 2 9 5 1 2. + <_> + + <_> + 10 14 1 2 -1. + <_> + 10 15 1 1 2. + <_> + + <_> + 7 14 4 6 -1. + <_> + 7 16 4 2 3. + <_> + + <_> + 8 12 3 1 -1. + <_> + 9 12 1 1 3. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 3 6 3 4 -1. + <_> + 4 6 1 4 3. + <_> + + <_> + 4 4 3 8 -1. + <_> + 4 8 3 4 2. + <_> + + <_> + 12 5 2 2 -1. + <_> + 12 6 2 1 2. + <_> + + <_> + 16 10 2 2 -1. + <_> + 16 10 1 1 2. + <_> + 17 11 1 1 2. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 7 0 5 8 -1. + <_> + 7 4 5 4 2. + <_> + + <_> + 4 5 8 10 -1. + <_> + 4 5 4 5 2. + <_> + 8 10 4 5 2. + <_> + + <_> + 7 5 3 3 -1. + <_> + 7 6 3 1 3. + <_> + + <_> + 10 6 10 14 -1. + <_> + 10 13 10 7 2. + <_> + + <_> + 8 6 2 3 -1. + <_> + 8 7 2 1 3. + <_> + + <_> + 13 10 1 4 -1. + <_> + 13 12 1 2 2. + <_> + + <_> + 3 9 12 4 -1. + <_> + 3 9 6 2 2. + <_> + 9 11 6 2 2. + <_> + + <_> + 7 14 3 6 -1. + <_> + 7 16 3 2 3. + <_> + + <_> + 10 10 3 2 -1. + <_> + 11 10 1 2 3. + <_> + + <_> + 3 4 10 4 -1. + <_> + 3 4 5 2 2. + <_> + 8 6 5 2 2. + <_> + + <_> + 4 10 4 3 -1. + <_> + 4 11 4 1 3. + <_> + + <_> + 5 3 6 4 -1. + <_> + 5 3 3 2 2. + <_> + 8 5 3 2 2. + <_> + + <_> + 6 8 6 10 -1. + <_> + 9 8 3 10 2. + <_> + + <_> + 10 15 6 3 -1. + <_> + 10 16 6 1 3. + <_> + + <_> + 3 4 3 7 -1. + <_> + 4 4 1 7 3. + <_> + + <_> + 3 3 3 11 -1. + <_> + 4 3 1 11 3. + <_> + + <_> + 7 14 5 3 -1. + <_> + 7 15 5 1 3. + <_> + + <_> + 17 11 2 2 -1. + <_> + 17 11 1 1 2. + <_> + 18 12 1 1 2. + <_> + + <_> + 9 0 3 4 -1. + <_> + 10 0 1 4 3. + <_> + + <_> + 11 1 3 1 -1. + <_> + 12 1 1 1 3. + <_> + + <_> + 17 11 2 2 -1. + <_> + 17 11 1 1 2. + <_> + 18 12 1 1 2. + <_> + + <_> + 0 10 2 1 -1. + <_> + 1 10 1 1 2. + <_> + + <_> + 17 0 2 8 -1. + <_> + 17 0 1 4 2. + <_> + 18 4 1 4 2. + <_> + + <_> + 6 7 6 2 -1. + <_> + 8 7 2 2 3. + <_> + + <_> + 5 7 6 9 -1. + <_> + 8 7 3 9 2. + <_> + + <_> + 6 8 9 3 -1. + <_> + 9 8 3 3 3. + <_> + + <_> + 11 7 6 4 -1. + <_> + 13 7 2 4 3. + <_> + + <_> + 8 5 2 2 -1. + <_> + 9 5 1 2 2. + <_> + + <_> + 15 3 4 10 -1. + <_> + 15 8 4 5 2. + <_> + + <_> + 9 2 1 2 -1. + <_> + 9 3 1 1 2. + <_> + + <_> + 7 15 8 2 -1. + <_> + 7 15 4 1 2. + <_> + 11 16 4 1 2. + <_> + + <_> + 6 5 2 9 -1. + <_> + 7 5 1 9 2. + <_> + + <_> + 6 6 2 4 -1. + <_> + 7 6 1 4 2. + <_> + + <_> + 10 15 2 4 -1. + <_> + 11 15 1 4 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 12 9 7 4 -1. + <_> + 12 11 7 2 2. + <_> + + <_> + 5 9 9 3 -1. + <_> + 8 9 3 3 3. + <_> + + <_> + 5 8 6 5 -1. + <_> + 8 8 3 5 2. + <_> + + <_> + 7 16 4 3 -1. + <_> + 7 17 4 1 3. + <_> + + <_> + 15 4 4 3 -1. + <_> + 15 5 4 1 3. + <_> + + <_> + 16 10 2 2 -1. + <_> + 16 10 1 1 2. + <_> + 17 11 1 1 2. + <_> + + <_> + 5 6 6 9 -1. + <_> + 8 6 3 9 2. + <_> + + <_> + 10 0 10 6 -1. + <_> + 10 0 5 3 2. + <_> + 15 3 5 3 2. + <_> + + <_> + 13 14 1 2 -1. + <_> + 13 15 1 1 2. + <_> + + <_> + 10 4 3 1 -1. + <_> + 11 4 1 1 3. + <_> + + <_> + 6 16 1 3 -1. + <_> + 6 17 1 1 3. + <_> + + <_> + 11 13 4 3 -1. + <_> + 11 14 4 1 3. + <_> + + <_> + 14 10 6 6 -1. + <_> + 14 10 3 3 2. + <_> + 17 13 3 3 2. + <_> + + <_> + 1 1 1 2 -1. + <_> + 1 2 1 1 2. + <_> + + <_> + 6 15 1 3 -1. + <_> + 6 16 1 1 3. + <_> + + <_> + 7 15 1 3 -1. + <_> + 7 16 1 1 3. + <_> + + <_> + 8 16 3 2 -1. + <_> + 9 16 1 2 3. + <_> + + <_> + 5 8 3 9 -1. + <_> + 6 8 1 9 3. + <_> + + <_> + 3 3 2 10 -1. + <_> + 3 3 1 5 2. + <_> + 4 8 1 5 2. + <_> + + <_> + 3 6 3 1 -1. + <_> + 4 6 1 1 3. + <_> + + <_> + 2 0 2 1 -1. + <_> + 3 0 1 1 2. + <_> + + <_> + 7 13 2 3 -1. + <_> + 7 14 2 1 3. + <_> + + <_> + 7 9 1 9 -1. + <_> + 7 12 1 3 3. + <_> + + <_> + 7 8 1 9 -1. + <_> + 7 11 1 3 3. + <_> + + <_> + 15 7 3 10 -1. + <_> + 16 7 1 10 3. + <_> + + <_> + 14 7 6 10 -1. + <_> + 16 7 2 10 3. + <_> + + <_> + 2 12 18 6 -1. + <_> + 2 14 18 2 3. + <_> + + <_> + 0 9 12 1 -1. + <_> + 4 9 4 1 3. + <_> + + <_> + 1 7 3 6 -1. + <_> + 2 7 1 6 3. + <_> + + <_> + 5 6 8 1 -1. + <_> + 9 6 4 1 2. + <_> + + <_> + 10 14 2 1 -1. + <_> + 11 14 1 1 2. + <_> + + <_> + 14 8 6 10 -1. + <_> + 16 8 2 10 3. + <_> + + <_> + 10 5 8 7 -1. + <_> + 14 5 4 7 2. + <_> + + <_> + 8 5 8 4 -1. + <_> + 8 5 4 2 2. + <_> + 12 7 4 2 2. + <_> + + <_> + 11 11 1 8 -1. + <_> + 11 15 1 4 2. + <_> + + <_> + 5 6 2 4 -1. + <_> + 6 6 1 4 2. + <_> + + <_> + 7 8 2 2 -1. + <_> + 7 9 2 1 2. + <_> + + <_> + 0 2 8 11 -1. + <_> + 4 2 4 11 2. + <_> + + <_> + 8 6 8 8 -1. + <_> + 8 10 8 4 2. + <_> + + <_> + 4 4 2 6 -1. + <_> + 5 4 1 6 2. + <_> + + <_> + 13 12 1 2 -1. + <_> + 13 13 1 1 2. + <_> + + <_> + 3 8 3 2 -1. + <_> + 4 8 1 2 3. + <_> + + <_> + 13 12 1 3 -1. + <_> + 13 13 1 1 3. + <_> + + <_> + 9 19 4 1 -1. + <_> + 11 19 2 1 2. + <_> + + <_> + 15 4 2 3 -1. + <_> + 15 5 2 1 3. + <_> + + <_> + 5 11 11 4 -1. + <_> + 5 13 11 2 2. + <_> + + <_> + 7 12 1 3 -1. + <_> + 7 13 1 1 3. + <_> + + <_> + 6 12 4 4 -1. + <_> + 6 14 4 2 2. + <_> + + <_> + 7 11 1 3 -1. + <_> + 7 12 1 1 3. + <_> + + <_> + 9 10 3 3 -1. + <_> + 10 10 1 3 3. + <_> + + <_> + 10 12 2 1 -1. + <_> + 11 12 1 1 2. + <_> + + <_> + 7 1 12 16 -1. + <_> + 7 1 6 8 2. + <_> + 13 9 6 8 2. + <_> + + <_> + 10 5 8 7 -1. + <_> + 14 5 4 7 2. + <_> + + <_> + 18 8 2 10 -1. + <_> + 18 8 1 5 2. + <_> + 19 13 1 5 2. + <_> + + <_> + 12 11 2 2 -1. + <_> + 13 11 1 2 2. + <_> + + <_> + 3 15 3 1 -1. + <_> + 4 15 1 1 3. + <_> + + <_> + 5 14 2 1 -1. + <_> + 6 14 1 1 2. + <_> + + <_> + 11 9 1 2 -1. + <_> + 11 10 1 1 2. + <_> + + <_> + 10 12 3 1 -1. + <_> + 11 12 1 1 3. + <_> + + <_> + 5 9 7 2 -1. + <_> + 5 10 7 1 2. + <_> + + <_> + 11 0 2 1 -1. + <_> + 12 0 1 1 2. + <_> + + <_> + 11 0 2 2 -1. + <_> + 12 0 1 2 2. + <_> + + <_> + 5 0 2 2 -1. + <_> + 5 0 1 1 2. + <_> + 6 1 1 1 2. + <_> + + <_> + 8 3 12 6 -1. + <_> + 8 5 12 2 3. + <_> + + <_> + 17 0 3 12 -1. + <_> + 18 0 1 12 3. + <_> + + <_> + 11 1 2 1 -1. + <_> + 12 1 1 1 2. + <_> + + <_> + 5 5 2 1 -1. + <_> + 6 5 1 1 2. + <_> + + <_> + 7 14 6 6 -1. + <_> + 7 14 3 3 2. + <_> + 10 17 3 3 2. + <_> + + <_> + 11 10 1 2 -1. + <_> + 11 11 1 1 2. + <_> + + <_> + 3 9 12 4 -1. + <_> + 3 9 6 2 2. + <_> + 9 11 6 2 2. + <_> + + <_> + 5 10 1 2 -1. + <_> + 5 11 1 1 2. + <_> + + <_> + 6 10 2 1 -1. + <_> + 7 10 1 1 2. + <_> + + <_> + 8 16 3 2 -1. + <_> + 9 16 1 2 3. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 7 15 3 2 -1. + <_> + 8 15 1 2 3. + <_> + + <_> + 8 15 2 1 -1. + <_> + 9 15 1 1 2. + <_> + + <_> + 5 10 4 3 -1. + <_> + 5 11 4 1 3. + <_> + + <_> + 6 7 4 12 -1. + <_> + 8 7 2 12 2. + <_> + + <_> + 5 6 6 7 -1. + <_> + 8 6 3 7 2. + <_> + + <_> + 8 4 6 11 -1. + <_> + 11 4 3 11 2. + <_> + + <_> + 7 9 6 3 -1. + <_> + 9 9 2 3 3. + <_> + + <_> + 0 5 1 2 -1. + <_> + 0 6 1 1 2. + <_> + + <_> + 6 8 3 1 -1. + <_> + 7 8 1 1 3. + <_> + + <_> + 12 1 2 2 -1. + <_> + 13 1 1 2 2. + <_> + + <_> + 4 4 10 12 -1. + <_> + 4 4 5 6 2. + <_> + 9 10 5 6 2. + <_> + + <_> + 5 18 2 2 -1. + <_> + 5 18 1 1 2. + <_> + 6 19 1 1 2. + <_> + + <_> + 6 3 3 3 -1. + <_> + 7 3 1 3 3. + <_> + + <_> + 5 12 2 3 -1. + <_> + 5 13 2 1 3. + <_> + + <_> + 11 15 2 3 -1. + <_> + 11 16 2 1 3. + <_> + + <_> + 11 15 1 3 -1. + <_> + 11 16 1 1 3. + <_> + + <_> + 6 7 3 2 -1. + <_> + 7 7 1 2 3. + <_> + + <_> + 3 11 14 1 -1. + <_> + 10 11 7 1 2. + <_> + + <_> + 5 7 3 1 -1. + <_> + 6 7 1 1 3. + <_> + + <_> + 14 9 3 3 -1. + <_> + 14 10 3 1 3. + <_> + + <_> + 4 17 2 2 -1. + <_> + 4 17 1 1 2. + <_> + 5 18 1 1 2. + <_> + + <_> + 15 16 2 2 -1. + <_> + 15 17 2 1 2. + <_> + + <_> + 18 12 2 2 -1. + <_> + 18 12 1 1 2. + <_> + 19 13 1 1 2. + <_> + + <_> + 5 11 4 3 -1. + <_> + 7 11 2 3 2. + <_> + + <_> + 9 7 2 3 -1. + <_> + 9 8 2 1 3. + <_> + + <_> + 18 6 2 2 -1. + <_> + 18 7 2 1 2. + <_> + + <_> + 18 6 2 2 -1. + <_> + 18 7 2 1 2. + <_> + + <_> + 4 5 2 6 -1. + <_> + 4 7 2 2 3. + <_> + + <_> + 3 11 6 4 -1. + <_> + 3 11 3 2 2. + <_> + 6 13 3 2 2. + <_> + + <_> + 1 10 3 3 -1. + <_> + 2 10 1 3 3. + <_> + + <_> + 15 0 4 4 -1. + <_> + 15 0 2 2 2. + <_> + 17 2 2 2 2. + <_> + + <_> + 5 6 4 10 -1. + <_> + 5 11 4 5 2. + <_> + + <_> + 7 13 1 3 -1. + <_> + 7 14 1 1 3. + <_> + + <_> + 3 10 16 4 -1. + <_> + 3 10 8 2 2. + <_> + 11 12 8 2 2. + <_> + + <_> + 7 14 1 3 -1. + <_> + 7 15 1 1 3. + <_> + + <_> + 5 14 3 3 -1. + <_> + 5 15 3 1 3. + <_> + + <_> + 9 9 3 8 -1. + <_> + 10 9 1 8 3. + <_> + + <_> + 6 0 7 4 -1. + <_> + 6 2 7 2 2. + <_> + + <_> + 8 0 1 4 -1. + <_> + 8 2 1 2 2. + <_> + + <_> + 1 4 1 6 -1. + <_> + 1 6 1 2 3. + <_> + + <_> + 0 2 15 3 -1. + <_> + 5 2 5 3 3. + <_> + + <_> + 0 8 2 2 -1. + <_> + 0 9 2 1 2. + <_> + + <_> + 3 10 6 4 -1. + <_> + 5 10 2 4 3. + <_> + + <_> + 8 5 3 1 -1. + <_> + 9 5 1 1 3. + <_> + + <_> + 15 11 2 2 -1. + <_> + 15 11 1 1 2. + <_> + 16 12 1 1 2. + <_> + + <_> + 4 11 6 2 -1. + <_> + 7 11 3 2 2. + <_> + + <_> + 6 8 6 4 -1. + <_> + 8 8 2 4 3. + <_> + + <_> + 6 5 6 6 -1. + <_> + 8 5 2 6 3. + <_> + + <_> + 14 12 2 3 -1. + <_> + 15 12 1 3 2. + <_> + + <_> + 11 5 3 7 -1. + <_> + 12 5 1 7 3. + <_> + + <_> + 7 16 8 4 -1. + <_> + 7 16 4 2 2. + <_> + 11 18 4 2 2. + <_> + + <_> + 5 16 12 4 -1. + <_> + 5 16 6 2 2. + <_> + 11 18 6 2 2. + <_> + + <_> + 10 17 6 3 -1. + <_> + 10 18 6 1 3. + <_> + + <_> + 6 8 2 3 -1. + <_> + 6 9 2 1 3. + <_> + + <_> + 0 0 20 18 -1. + <_> + 10 0 10 18 2. + <_> + + <_> + 8 0 6 5 -1. + <_> + 11 0 3 5 2. + <_> + + <_> + 13 5 4 2 -1. + <_> + 13 5 2 1 2. + <_> + 15 6 2 1 2. + <_> + + <_> + 10 4 4 11 -1. + <_> + 12 4 2 11 2. + <_> + + <_> + 5 10 3 1 -1. + <_> + 6 10 1 1 3. + <_> + + <_> + 17 4 2 3 -1. + <_> + 17 5 2 1 3. + <_> + + <_> + 6 13 8 6 -1. + <_> + 6 13 4 3 2. + <_> + 10 16 4 3 2. + <_> + + <_> + 17 5 3 10 -1. + <_> + 18 5 1 10 3. + <_> + + <_> + 13 11 2 2 -1. + <_> + 14 11 1 2 2. + <_> + + <_> + 5 9 4 9 -1. + <_> + 5 12 4 3 3. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 15 15 2 2 -1. + <_> + 15 15 1 1 2. + <_> + 16 16 1 1 2. + <_> + + <_> + 6 13 6 5 -1. + <_> + 8 13 2 5 3. + <_> + + <_> + 9 7 2 8 -1. + <_> + 9 7 1 4 2. + <_> + 10 11 1 4 2. + <_> + + <_> + 4 12 2 2 -1. + <_> + 4 12 1 1 2. + <_> + 5 13 1 1 2. + <_> + + <_> + 7 4 3 1 -1. + <_> + 8 4 1 1 3. + <_> + + <_> + 12 3 3 4 -1. + <_> + 13 3 1 4 3. + <_> + + <_> + 2 0 18 20 -1. + <_> + 2 10 18 10 2. + <_> + + <_> + 11 2 7 12 -1. + <_> + 11 8 7 6 2. + <_> + + <_> + 13 5 2 2 -1. + <_> + 14 5 1 2 2. + <_> + + <_> + 4 17 4 1 -1. + <_> + 6 17 2 1 2. + <_> + + <_> + 3 14 4 4 -1. + <_> + 5 14 2 4 2. + <_> + + <_> + 0 2 8 18 -1. + <_> + 0 11 8 9 2. + <_> + + <_> + 5 7 3 3 -1. + <_> + 5 8 3 1 3. + <_> + + <_> + 8 2 3 2 -1. + <_> + 9 2 1 2 3. + <_> + + <_> + 5 7 15 4 -1. + <_> + 5 9 15 2 2. + <_> + + <_> + 10 0 10 8 -1. + <_> + 10 0 5 4 2. + <_> + 15 4 5 4 2. + <_> + + <_> + 10 8 4 4 -1. + <_> + 10 8 2 2 2. + <_> + 12 10 2 2 2. + <_> + + <_> + 5 6 3 10 -1. + <_> + 5 11 3 5 2. + <_> + + <_> + 7 6 3 4 -1. + <_> + 8 6 1 4 3. + <_> + + <_> + 12 13 2 2 -1. + <_> + 12 14 2 1 2. + <_> + + <_> + 7 8 4 12 -1. + <_> + 7 12 4 4 3. + <_> + + <_> + 0 0 6 18 -1. + <_> + 2 0 2 18 3. + <_> + + <_> + 6 1 10 6 -1. + <_> + 6 3 10 2 3. + <_> + + <_> + 13 9 3 2 -1. + <_> + 13 10 3 1 2. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 6 10 1 10 -1. + <_> + 6 15 1 5 2. + <_> + + <_> + 9 9 3 4 -1. + <_> + 9 11 3 2 2. + <_> + + <_> + 7 4 2 2 -1. + <_> + 7 5 2 1 2. + <_> + + <_> + 12 12 2 1 -1. + <_> + 13 12 1 1 2. + <_> + + <_> + 7 12 1 3 -1. + <_> + 7 13 1 1 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 1 0 1 2 -1. + <_> + 1 1 1 1 2. + <_> + + <_> + 10 16 6 3 -1. + <_> + 10 17 6 1 3. + <_> + + <_> + 9 4 4 6 -1. + <_> + 9 4 2 3 2. + <_> + 11 7 2 3 2. + <_> + + <_> + 10 9 10 1 -1. + <_> + 15 9 5 1 2. + <_> + + <_> + 9 11 1 2 -1. + <_> + 9 12 1 1 2. + <_> + + <_> + 7 8 3 6 -1. + <_> + 7 10 3 2 3. + <_> + + <_> + 1 18 8 2 -1. + <_> + 1 18 4 1 2. + <_> + 5 19 4 1 2. + <_> + + <_> + 5 13 3 3 -1. + <_> + 5 14 3 1 3. + <_> + + <_> + 4 6 5 6 -1. + <_> + 4 9 5 3 2. + <_> + + <_> + 6 5 2 1 -1. + <_> + 7 5 1 1 2. + <_> + + <_> + 11 6 1 6 -1. + <_> + 11 9 1 3 2. + <_> + + <_> + 6 17 4 3 -1. + <_> + 6 18 4 1 3. + <_> + + <_> + 10 4 2 10 -1. + <_> + 10 4 1 5 2. + <_> + 11 9 1 5 2. + <_> + + <_> + 8 4 9 13 -1. + <_> + 11 4 3 13 3. + <_> + + <_> + 10 11 2 2 -1. + <_> + 11 11 1 2 2. + <_> + + <_> + 13 15 1 2 -1. + <_> + 13 16 1 1 2. + <_> + + <_> + 17 0 3 13 -1. + <_> + 18 0 1 13 3. + <_> + + <_> + 0 0 14 10 -1. + <_> + 0 5 14 5 2. + <_> + + <_> + 12 5 6 15 -1. + <_> + 14 5 2 15 3. + <_> + + <_> + 11 10 2 3 -1. + <_> + 11 11 2 1 3. + <_> + + <_> + 5 14 3 3 -1. + <_> + 5 15 3 1 3. + <_> + + <_> + 5 15 3 2 -1. + <_> + 5 16 3 1 2. + <_> + + <_> + 11 14 3 6 -1. + <_> + 12 14 1 6 3. + <_> + + <_> + 12 18 2 1 -1. + <_> + 13 18 1 1 2. + <_> + + <_> + 16 5 1 2 -1. + <_> + 16 6 1 1 2. + <_> + + <_> + 17 8 3 4 -1. + <_> + 18 8 1 4 3. + <_> + + <_> + 8 15 2 3 -1. + <_> + 9 15 1 3 2. + <_> + + <_> + 6 7 2 4 -1. + <_> + 6 7 1 2 2. + <_> + 7 9 1 2 2. + <_> + + <_> + 3 7 12 2 -1. + <_> + 7 7 4 2 3. + <_> + + <_> + 4 7 3 3 -1. + <_> + 5 7 1 3 3. + <_> + + <_> + 1 10 2 1 -1. + <_> + 2 10 1 1 2. + <_> + + <_> + 4 4 2 5 -1. + <_> + 5 4 1 5 2. + <_> + + <_> + 6 7 14 2 -1. + <_> + 13 7 7 2 2. + <_> + + <_> + 14 17 2 3 -1. + <_> + 14 18 2 1 3. + <_> + + <_> + 6 11 1 3 -1. + <_> + 6 12 1 1 3. + <_> + + <_> + 11 3 8 16 -1. + <_> + 11 11 8 8 2. + <_> + + <_> + 9 12 5 3 -1. + <_> + 9 13 5 1 3. + <_> + + <_> + 5 9 1 3 -1. + <_> + 5 10 1 1 3. + <_> + + <_> + 3 8 8 4 -1. + <_> + 3 8 4 2 2. + <_> + 7 10 4 2 2. + <_> + + <_> + 10 15 2 3 -1. + <_> + 10 16 2 1 3. + <_> + + <_> + 14 9 1 6 -1. + <_> + 14 12 1 3 2. + <_> + + <_> + 13 11 1 3 -1. + <_> + 13 12 1 1 3. + <_> + + <_> + 8 7 6 6 -1. + <_> + 8 9 6 2 3. + <_> + + <_> + 9 8 4 3 -1. + <_> + 9 9 4 1 3. + <_> + + <_> + 8 2 2 5 -1. + <_> + 9 2 1 5 2. + <_> + + <_> + 13 6 3 3 -1. + <_> + 13 7 3 1 3. + <_> + + <_> + 12 0 5 14 -1. + <_> + 12 7 5 7 2. + <_> + + <_> + 2 2 7 10 -1. + <_> + 2 7 7 5 2. + <_> + + <_> + 5 5 6 11 -1. + <_> + 8 5 3 11 2. + <_> + + <_> + 6 17 3 3 -1. + <_> + 6 18 3 1 3. + <_> + + <_> + 9 5 2 8 -1. + <_> + 9 5 1 4 2. + <_> + 10 9 1 4 2. + <_> + + <_> + 14 0 4 16 -1. + <_> + 14 8 4 8 2. + <_> + + <_> + 10 7 1 3 -1. + <_> + 10 8 1 1 3. + <_> + + <_> + 7 16 3 2 -1. + <_> + 8 16 1 2 3. + <_> + + <_> + 10 6 1 3 -1. + <_> + 10 7 1 1 3. + <_> + + <_> + 5 11 14 6 -1. + <_> + 5 14 14 3 2. + <_> + + <_> + 9 6 1 3 -1. + <_> + 9 7 1 1 3. + <_> + + <_> + 6 11 5 4 -1. + <_> + 6 13 5 2 2. + <_> + + <_> + 6 9 10 8 -1. + <_> + 6 9 5 4 2. + <_> + 11 13 5 4 2. + <_> + + <_> + 18 9 2 6 -1. + <_> + 18 9 1 3 2. + <_> + 19 12 1 3 2. + <_> + + <_> + 5 12 8 2 -1. + <_> + 9 12 4 2 2. + <_> + + <_> + 8 8 6 12 -1. + <_> + 8 8 3 6 2. + <_> + 11 14 3 6 2. + <_> + + <_> + 12 7 3 5 -1. + <_> + 13 7 1 5 3. + <_> + + <_> + 10 13 4 3 -1. + <_> + 10 14 4 1 3. + <_> + + <_> + 12 4 3 15 -1. + <_> + 13 4 1 15 3. + <_> + + <_> + 4 12 4 2 -1. + <_> + 6 12 2 2 2. + <_> + + <_> + 14 1 6 1 -1. + <_> + 16 1 2 1 3. + <_> + + <_> + 15 3 2 8 -1. + <_> + 16 3 1 8 2. + <_> + + <_> + 13 16 6 4 -1. + <_> + 13 16 3 2 2. + <_> + 16 18 3 2 2. + <_> + + <_> + 9 5 6 7 -1. + <_> + 12 5 3 7 2. + <_> + + <_> + 18 3 2 2 -1. + <_> + 18 4 2 1 2. + <_> + + <_> + 2 0 18 4 -1. + <_> + 11 0 9 4 2. + <_> + + <_> + 0 8 2 2 -1. + <_> + 1 8 1 2 2. + <_> + + <_> + 4 12 3 6 -1. + <_> + 5 12 1 6 3. + <_> + + <_> + 3 13 4 2 -1. + <_> + 5 13 2 2 2. + <_> + + <_> + 4 14 11 2 -1. + <_> + 4 15 11 1 2. + <_> + + <_> + 4 13 8 3 -1. + <_> + 4 14 8 1 3. + <_> + + <_> + 3 7 6 10 -1. + <_> + 3 7 3 5 2. + <_> + 6 12 3 5 2. + <_> + + <_> + 5 7 6 4 -1. + <_> + 7 7 2 4 3. + <_> + + <_> + 2 11 10 6 -1. + <_> + 2 14 10 3 2. + <_> + + <_> + 5 7 9 12 -1. + <_> + 5 13 9 6 2. + <_> + + <_> + 9 12 7 4 -1. + <_> + 9 14 7 2 2. + <_> + + <_> + 2 0 8 4 -1. + <_> + 2 0 4 2 2. + <_> + 6 2 4 2 2. + <_> + + <_> + 4 0 4 4 -1. + <_> + 4 0 2 2 2. + <_> + 6 2 2 2 2. + <_> + + <_> + 6 2 3 2 -1. + <_> + 7 2 1 2 3. + <_> + + <_> + 2 11 3 4 -1. + <_> + 3 11 1 4 3. + <_> + + <_> + 1 17 2 1 -1. + <_> + 2 17 1 1 2. + <_> + + <_> + 15 12 4 3 -1. + <_> + 15 13 4 1 3. + <_> + + <_> + 9 15 7 3 -1. + <_> + 9 16 7 1 3. + <_> + + <_> + 6 7 3 2 -1. + <_> + 7 7 1 2 3. + <_> + + <_> + 3 5 12 10 -1. + <_> + 3 5 6 5 2. + <_> + 9 10 6 5 2. + <_> + + <_> + 4 2 12 5 -1. + <_> + 10 2 6 5 2. + <_> + + <_> + 9 5 3 1 -1. + <_> + 10 5 1 1 3. + <_> + + <_> + 2 10 3 4 -1. + <_> + 3 10 1 4 3. + <_> + + <_> + 11 5 2 10 -1. + <_> + 11 10 2 5 2. + <_> + + <_> + 8 6 7 8 -1. + <_> + 8 10 7 4 2. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 2 8 8 4 -1. + <_> + 6 8 4 4 2. + <_> + + <_> + 0 9 2 2 -1. + <_> + 1 9 1 2 2. + <_> + + <_> + 13 11 4 2 -1. + <_> + 15 11 2 2 2. + <_> + + <_> + 8 6 12 5 -1. + <_> + 12 6 4 5 3. + <_> + + <_> + 11 11 9 1 -1. + <_> + 14 11 3 1 3. + <_> + + <_> + 15 10 2 4 -1. + <_> + 15 10 1 2 2. + <_> + 16 12 1 2 2. + <_> + + <_> + 18 5 1 3 -1. + <_> + 18 6 1 1 3. + <_> + + <_> + 4 10 7 3 -1. + <_> + 4 11 7 1 3. + <_> + + <_> + 8 5 3 1 -1. + <_> + 9 5 1 1 3. + <_> + + <_> + 7 13 2 3 -1. + <_> + 7 14 2 1 3. + <_> + + <_> + 7 14 3 3 -1. + <_> + 7 15 3 1 3. + <_> + + <_> + 7 15 3 3 -1. + <_> + 7 16 3 1 3. + <_> + + <_> + 14 15 1 3 -1. + <_> + 14 16 1 1 3. + <_> + + <_> + 2 14 10 6 -1. + <_> + 2 17 10 3 2. + <_> + + <_> + 5 12 5 3 -1. + <_> + 5 13 5 1 3. + <_> + + <_> + 7 9 1 6 -1. + <_> + 7 11 1 2 3. + <_> + + <_> + 0 6 5 6 -1. + <_> + 0 8 5 2 3. + <_> + + <_> + 6 10 3 4 -1. + <_> + 6 12 3 2 2. + <_> + + <_> + 4 9 9 2 -1. + <_> + 4 10 9 1 2. + <_> + + <_> + 7 3 1 2 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 8 9 4 4 -1. + <_> + 8 11 4 2 2. + <_> + + <_> + 11 10 3 1 -1. + <_> + 12 10 1 1 3. + <_> + + <_> + 5 7 3 2 -1. + <_> + 5 8 3 1 2. + <_> + + <_> + 7 0 6 6 -1. + <_> + 7 3 6 3 2. + <_> + + <_> + 5 6 3 4 -1. + <_> + 6 6 1 4 3. + <_> + + <_> + 11 1 9 12 -1. + <_> + 14 1 3 12 3. + <_> + + <_> + 6 7 4 9 -1. + <_> + 6 10 4 3 3. + <_> + + <_> + 11 7 8 6 -1. + <_> + 11 7 4 3 2. + <_> + 15 10 4 3 2. + <_> + + <_> + 8 9 7 3 -1. + <_> + 8 10 7 1 3. + <_> + + <_> + 3 2 4 18 -1. + <_> + 5 2 2 18 2. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 6 11 8 6 -1. + <_> + 6 11 4 3 2. + <_> + 10 14 4 3 2. + <_> + + <_> + 5 9 4 7 -1. + <_> + 7 9 2 7 2. + <_> + + <_> + 5 8 6 5 -1. + <_> + 8 8 3 5 2. + <_> + + <_> + 7 11 1 3 -1. + <_> + 7 12 1 1 3. + <_> + + <_> + 15 10 3 1 -1. + <_> + 16 10 1 1 3. + <_> + + <_> + 10 12 2 2 -1. + <_> + 10 13 2 1 2. + <_> + + <_> + 11 13 2 1 -1. + <_> + 12 13 1 1 2. + <_> + + <_> + 6 12 2 2 -1. + <_> + 6 13 2 1 2. + <_> + + <_> + 11 2 2 12 -1. + <_> + 11 2 1 6 2. + <_> + 12 8 1 6 2. + <_> + + <_> + 7 0 6 6 -1. + <_> + 7 3 6 3 2. + <_> + + <_> + 4 8 4 2 -1. + <_> + 4 9 4 1 2. + <_> + + <_> + 14 12 1 2 -1. + <_> + 14 13 1 1 2. + <_> + + <_> + 4 0 2 4 -1. + <_> + 4 0 1 2 2. + <_> + 5 2 1 2 2. + <_> + + <_> + 15 2 2 1 -1. + <_> + 16 2 1 1 2. + <_> + + <_> + 3 14 3 1 -1. + <_> + 4 14 1 1 3. + <_> + + <_> + 5 11 10 4 -1. + <_> + 5 11 5 2 2. + <_> + 10 13 5 2 2. + <_> + + <_> + 4 10 12 3 -1. + <_> + 4 11 12 1 3. + <_> + + <_> + 15 2 4 6 -1. + <_> + 15 2 2 3 2. + <_> + 17 5 2 3 2. + <_> + + <_> + 5 8 1 4 -1. + <_> + 5 10 1 2 2. + <_> + + <_> + 6 15 3 2 -1. + <_> + 7 15 1 2 3. + <_> + + <_> + 11 19 2 1 -1. + <_> + 12 19 1 1 2. + <_> + + <_> + 6 7 3 2 -1. + <_> + 7 7 1 2 3. + <_> + + <_> + 6 4 2 1 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 6 4 3 2 -1. + <_> + 7 4 1 2 3. + <_> + + <_> + 6 8 2 2 -1. + <_> + 6 8 1 1 2. + <_> + 7 9 1 1 2. + <_> + + <_> + 6 15 3 2 -1. + <_> + 7 15 1 2 3. + <_> + + <_> + 4 8 2 4 -1. + <_> + 4 8 1 2 2. + <_> + 5 10 1 2 2. + <_> + + <_> + 10 4 7 3 -1. + <_> + 10 5 7 1 3. + <_> + + <_> + 4 5 2 6 -1. + <_> + 5 5 1 6 2. + <_> + + <_> + 10 13 1 3 -1. + <_> + 10 14 1 1 3. + <_> + + <_> + 6 11 6 3 -1. + <_> + 9 11 3 3 2. + <_> + + <_> + 10 14 3 2 -1. + <_> + 10 15 3 1 2. + <_> + + <_> + 8 8 4 2 -1. + <_> + 10 8 2 2 2. + <_> + + <_> + 17 12 3 1 -1. + <_> + 18 12 1 1 3. + <_> + + <_> + 9 0 11 16 -1. + <_> + 9 8 11 8 2. + <_> + + <_> + 17 0 3 6 -1. + <_> + 17 2 3 2 3. + <_> + + <_> + 0 0 1 2 -1. + <_> + 0 1 1 1 2. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 4 10 10 9 -1. + <_> + 4 13 10 3 3. + <_> + + <_> + 3 3 3 5 -1. + <_> + 4 3 1 5 3. + <_> + + <_> + 6 1 2 6 -1. + <_> + 6 3 2 2 3. + <_> + + <_> + 5 0 8 6 -1. + <_> + 5 2 8 2 3. + <_> + + <_> + 0 0 1 2 -1. + <_> + 0 1 1 1 2. + <_> + + <_> + 6 3 6 4 -1. + <_> + 8 3 2 4 3. + <_> + + <_> + 8 6 3 3 -1. + <_> + 8 7 3 1 3. + <_> + + <_> + 9 6 3 6 -1. + <_> + 9 8 3 2 3. + <_> + + <_> + 4 3 12 12 -1. + <_> + 4 3 6 6 2. + <_> + 10 9 6 6 2. + <_> + + <_> + 13 8 3 2 -1. + <_> + 13 9 3 1 2. + <_> + + <_> + 4 3 10 2 -1. + <_> + 9 3 5 2 2. + <_> + + <_> + 18 14 2 2 -1. + <_> + 18 14 1 1 2. + <_> + 19 15 1 1 2. + <_> + + <_> + 5 6 6 2 -1. + <_> + 8 6 3 2 2. + <_> + + <_> + 0 14 20 5 -1. + <_> + 10 14 10 5 2. + <_> + + <_> + 9 17 2 1 -1. + <_> + 10 17 1 1 2. + <_> + + <_> + 5 16 5 3 -1. + <_> + 5 17 5 1 3. + <_> + + <_> + 9 16 3 2 -1. + <_> + 10 16 1 2 3. + <_> + + <_> + 6 5 5 3 -1. + <_> + 6 6 5 1 3. + <_> + + <_> + 11 12 3 8 -1. + <_> + 12 12 1 8 3. + <_> + + <_> + 4 3 3 9 -1. + <_> + 4 6 3 3 3. + <_> + + <_> + 11 0 3 3 -1. + <_> + 12 0 1 3 3. + <_> + + <_> + 5 17 10 2 -1. + <_> + 5 17 5 1 2. + <_> + 10 18 5 1 2. + <_> + + <_> + 5 15 2 3 -1. + <_> + 5 16 2 1 3. + <_> + + <_> + 6 14 2 4 -1. + <_> + 6 14 1 2 2. + <_> + 7 16 1 2 2. + <_> + + <_> + 10 17 6 3 -1. + <_> + 10 18 6 1 3. + <_> + + <_> + 19 5 1 3 -1. + <_> + 19 6 1 1 3. + <_> + + <_> + 16 13 2 2 -1. + <_> + 16 13 1 1 2. + <_> + 17 14 1 1 2. + <_> + + <_> + 0 11 2 1 -1. + <_> + 1 11 1 1 2. + <_> + + <_> + 4 12 6 6 -1. + <_> + 4 12 3 3 2. + <_> + 7 15 3 3 2. + <_> + + <_> + 5 15 4 3 -1. + <_> + 5 16 4 1 3. + <_> + + <_> + 10 16 3 2 -1. + <_> + 11 16 1 2 3. + <_> + + <_> + 1 0 10 2 -1. + <_> + 1 0 5 1 2. + <_> + 6 1 5 1 2. + <_> + + <_> + 2 0 18 14 -1. + <_> + 11 0 9 14 2. + <_> + + <_> + 15 7 4 7 -1. + <_> + 17 7 2 7 2. + <_> + + <_> + 5 10 2 4 -1. + <_> + 6 10 1 4 2. + <_> + + <_> + 15 16 3 1 -1. + <_> + 16 16 1 1 3. + <_> + + <_> + 7 15 5 3 -1. + <_> + 7 16 5 1 3. + <_> + + <_> + 12 1 6 3 -1. + <_> + 14 1 2 3 3. + <_> + + <_> + 16 2 2 1 -1. + <_> + 17 2 1 1 2. + <_> + + <_> + 17 0 2 2 -1. + <_> + 17 0 1 1 2. + <_> + 18 1 1 1 2. + <_> + + <_> + 1 0 4 6 -1. + <_> + 1 2 4 2 3. + <_> + + <_> + 3 1 6 18 -1. + <_> + 3 7 6 6 3. + <_> + + <_> + 5 1 1 12 -1. + <_> + 5 7 1 6 2. + <_> + + <_> + 16 9 2 2 -1. + <_> + 16 9 1 1 2. + <_> + 17 10 1 1 2. + <_> + + <_> + 4 2 2 11 -1. + <_> + 5 2 1 11 2. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 14 18 2 2 -1. + <_> + 14 19 2 1 2. + <_> + + <_> + 10 0 10 10 -1. + <_> + 10 0 5 5 2. + <_> + 15 5 5 5 2. + <_> + + <_> + 19 6 1 2 -1. + <_> + 19 7 1 1 2. + <_> + + <_> + 11 0 6 8 -1. + <_> + 11 0 3 4 2. + <_> + 14 4 3 4 2. + <_> + + <_> + 5 0 2 2 -1. + <_> + 5 0 1 1 2. + <_> + 6 1 1 1 2. + <_> + + <_> + 3 1 9 11 -1. + <_> + 6 1 3 11 3. + <_> + + <_> + 10 11 3 2 -1. + <_> + 10 12 3 1 2. + <_> + + <_> + 10 9 4 2 -1. + <_> + 12 9 2 2 2. + <_> + + <_> + 13 7 1 6 -1. + <_> + 13 9 1 2 3. + <_> + + <_> + 8 10 6 2 -1. + <_> + 8 10 3 1 2. + <_> + 11 11 3 1 2. + <_> + + <_> + 4 11 4 6 -1. + <_> + 4 14 4 3 2. + <_> + + <_> + 17 4 2 3 -1. + <_> + 17 5 2 1 3. + <_> + + <_> + 10 2 8 14 -1. + <_> + 10 2 4 7 2. + <_> + 14 9 4 7 2. + <_> + + <_> + 12 8 8 7 -1. + <_> + 16 8 4 7 2. + <_> + + <_> + 1 2 18 1 -1. + <_> + 7 2 6 1 3. + <_> + + <_> + 0 1 8 19 -1. + <_> + 4 1 4 19 2. + <_> + + <_> + 0 0 8 12 -1. + <_> + 4 0 4 12 2. + <_> + + <_> + 13 5 5 12 -1. + <_> + 13 11 5 6 2. + <_> + + <_> + 7 9 1 4 -1. + <_> + 7 11 1 2 2. + <_> + + <_> + 0 13 10 3 -1. + <_> + 5 13 5 3 2. + <_> + + <_> + 2 7 12 4 -1. + <_> + 6 7 4 4 3. + <_> + + <_> + 9 1 2 6 -1. + <_> + 9 1 1 3 2. + <_> + 10 4 1 3 2. + <_> + + <_> + 6 8 3 3 -1. + <_> + 7 8 1 3 3. + <_> + + <_> + 4 11 3 1 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 5 10 1 2 -1. + <_> + 5 11 1 1 2. + <_> + + <_> + 0 17 4 1 -1. + <_> + 2 17 2 1 2. + <_> + + <_> + 1 16 2 1 -1. + <_> + 2 16 1 1 2. + <_> + + <_> + 7 14 2 3 -1. + <_> + 7 15 2 1 3. + <_> + + <_> + 10 13 2 2 -1. + <_> + 10 14 2 1 2. + <_> + + <_> + 16 11 3 1 -1. + <_> + 17 11 1 1 3. + <_> + + <_> + 16 10 3 2 -1. + <_> + 17 10 1 2 3. + <_> + + <_> + 7 2 3 1 -1. + <_> + 8 2 1 1 3. + <_> + + <_> + 14 4 5 3 -1. + <_> + 14 5 5 1 3. + <_> + + <_> + 7 7 2 3 -1. + <_> + 8 7 1 3 2. + <_> + + <_> + 5 7 6 7 -1. + <_> + 8 7 3 7 2. + <_> + + <_> + 4 2 2 6 -1. + <_> + 4 2 1 3 2. + <_> + 5 5 1 3 2. + <_> + + <_> + 4 9 2 3 -1. + <_> + 4 10 2 1 3. + <_> + + <_> + 8 6 7 12 -1. + <_> + 8 10 7 4 3. + <_> + + <_> + 8 5 2 10 -1. + <_> + 8 10 2 5 2. + <_> + + <_> + 4 3 3 5 -1. + <_> + 5 3 1 5 3. + <_> + + <_> + 9 12 2 1 -1. + <_> + 10 12 1 1 2. + <_> + + <_> + 3 8 3 4 -1. + <_> + 4 8 1 4 3. + <_> + + <_> + 13 14 3 3 -1. + <_> + 13 15 3 1 3. + <_> + + <_> + 1 14 2 3 -1. + <_> + 2 14 1 3 2. + <_> + + <_> + 5 0 2 4 -1. + <_> + 5 0 1 2 2. + <_> + 6 2 1 2 2. + <_> + + <_> + 5 14 4 3 -1. + <_> + 5 15 4 1 3. + <_> + + <_> + 6 12 2 6 -1. + <_> + 6 12 1 3 2. + <_> + 7 15 1 3 2. + <_> + + <_> + 6 13 2 2 -1. + <_> + 7 13 1 2 2. + <_> + + <_> + 9 10 4 5 -1. + <_> + 11 10 2 5 2. + <_> + + <_> + 11 3 2 1 -1. + <_> + 12 3 1 1 2. + <_> + + <_> + 6 7 2 2 -1. + <_> + 6 7 1 1 2. + <_> + 7 8 1 1 2. + <_> + + <_> + 5 3 6 5 -1. + <_> + 7 3 2 5 3. + <_> + + <_> + 5 6 4 8 -1. + <_> + 7 6 2 8 2. + <_> + + <_> + 5 7 6 3 -1. + <_> + 7 7 2 3 3. + <_> + + <_> + 9 12 3 4 -1. + <_> + 10 12 1 4 3. + <_> + + <_> + 16 9 3 1 -1. + <_> + 17 9 1 1 3. + <_> + + <_> + 13 14 3 3 -1. + <_> + 13 15 3 1 3. + <_> + + <_> + 7 13 4 2 -1. + <_> + 7 13 2 1 2. + <_> + 9 14 2 1 2. + <_> + + <_> + 10 13 1 2 -1. + <_> + 10 14 1 1 2. + <_> + + <_> + 9 13 2 3 -1. + <_> + 9 14 2 1 3. + <_> + + <_> + 9 14 2 3 -1. + <_> + 9 15 2 1 3. + <_> + + <_> + 9 6 8 1 -1. + <_> + 13 6 4 1 2. + <_> + + <_> + 6 8 3 2 -1. + <_> + 6 9 3 1 2. + <_> + + <_> + 5 6 2 3 -1. + <_> + 6 6 1 3 2. + <_> + + <_> + 12 10 2 6 -1. + <_> + 12 13 2 3 2. + <_> + + <_> + 1 0 18 2 -1. + <_> + 7 0 6 2 3. + <_> + + <_> + 9 7 4 6 -1. + <_> + 9 7 2 3 2. + <_> + 11 10 2 3 2. + <_> + + <_> + 12 10 2 4 -1. + <_> + 13 10 1 4 2. + <_> + + <_> + 13 12 1 2 -1. + <_> + 13 13 1 1 2. + <_> + + <_> + 13 18 2 2 -1. + <_> + 14 18 1 2 2. + <_> + + <_> + 15 4 2 1 -1. + <_> + 16 4 1 1 2. + <_> + + <_> + 5 7 6 3 -1. + <_> + 7 7 2 3 3. + <_> + + <_> + 5 8 8 3 -1. + <_> + 9 8 4 3 2. + <_> + + <_> + 6 12 6 3 -1. + <_> + 9 12 3 3 2. + <_> + + <_> + 12 14 3 6 -1. + <_> + 13 14 1 6 3. + <_> + + <_> + 18 9 2 8 -1. + <_> + 18 9 1 4 2. + <_> + 19 13 1 4 2. + <_> + + <_> + 5 5 7 3 -1. + <_> + 5 6 7 1 3. + <_> + + <_> + 10 13 2 2 -1. + <_> + 10 13 1 1 2. + <_> + 11 14 1 1 2. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 9 13 4 2 -1. + <_> + 9 13 2 1 2. + <_> + 11 14 2 1 2. + <_> + + <_> + 7 12 1 3 -1. + <_> + 7 13 1 1 3. + <_> + + <_> + 7 10 3 6 -1. + <_> + 7 12 3 2 3. + <_> + + <_> + 13 8 4 4 -1. + <_> + 13 10 4 2 2. + <_> + + <_> + 8 0 12 18 -1. + <_> + 8 9 12 9 2. + <_> + + <_> + 18 9 2 10 -1. + <_> + 18 9 1 5 2. + <_> + 19 14 1 5 2. + <_> + + <_> + 14 2 3 6 -1. + <_> + 14 5 3 3 2. + <_> + + <_> + 10 0 3 14 -1. + <_> + 11 0 1 14 3. + <_> + + <_> + 6 16 8 4 -1. + <_> + 6 16 4 2 2. + <_> + 10 18 4 2 2. + <_> + + <_> + 5 3 5 12 -1. + <_> + 5 7 5 4 3. + <_> + + <_> + 4 15 6 3 -1. + <_> + 4 16 6 1 3. + <_> + + <_> + 6 15 1 3 -1. + <_> + 6 16 1 1 3. + <_> + + <_> + 13 1 2 1 -1. + <_> + 14 1 1 1 2. + <_> + + <_> + 2 2 18 9 -1. + <_> + 11 2 9 9 2. + <_> + + <_> + 4 16 2 4 -1. + <_> + 4 16 1 2 2. + <_> + 5 18 1 2 2. + <_> + + <_> + 15 1 3 8 -1. + <_> + 16 1 1 8 3. + <_> + + <_> + 11 11 2 3 -1. + <_> + 11 12 2 1 3. + <_> + + <_> + 9 9 2 4 -1. + <_> + 9 11 2 2 2. + <_> + + <_> + 5 9 8 4 -1. + <_> + 5 9 4 2 2. + <_> + 9 11 4 2 2. + <_> + + <_> + 9 6 2 3 -1. + <_> + 9 7 2 1 3. + <_> + + <_> + 7 9 2 3 -1. + <_> + 7 10 2 1 3. + <_> + + <_> + 11 15 4 3 -1. + <_> + 11 16 4 1 3. + <_> + + <_> + 8 6 2 3 -1. + <_> + 8 7 2 1 3. + <_> + + <_> + 6 8 2 3 -1. + <_> + 6 9 2 1 3. + <_> + + <_> + 6 9 6 3 -1. + <_> + 8 9 2 3 3. + <_> + + <_> + 6 9 4 2 -1. + <_> + 6 9 2 1 2. + <_> + 8 10 2 1 2. + <_> + + <_> + 4 7 9 1 -1. + <_> + 7 7 3 1 3. + <_> + + <_> + 5 7 2 6 -1. + <_> + 5 7 1 3 2. + <_> + 6 10 1 3 2. + <_> + + <_> + 4 8 4 8 -1. + <_> + 4 12 4 4 2. + <_> + + <_> + 7 0 2 19 -1. + <_> + 8 0 1 19 2. + <_> + + <_> + 5 9 1 3 -1. + <_> + 5 10 1 1 3. + <_> + + <_> + 9 5 3 1 -1. + <_> + 10 5 1 1 3. + <_> + + <_> + 16 4 3 6 -1. + <_> + 16 6 3 2 3. + <_> + + <_> + 10 15 5 3 -1. + <_> + 10 16 5 1 3. + <_> + + <_> + 13 1 5 14 -1. + <_> + 13 8 5 7 2. + <_> + + <_> + 3 0 4 4 -1. + <_> + 3 0 2 2 2. + <_> + 5 2 2 2 2. + <_> + + <_> + 6 5 4 13 -1. + <_> + 8 5 2 13 2. + <_> + + <_> + 4 2 2 16 -1. + <_> + 4 2 1 8 2. + <_> + 5 10 1 8 2. + <_> + + <_> + 4 8 8 3 -1. + <_> + 8 8 4 3 2. + <_> + + <_> + 5 6 2 12 -1. + <_> + 5 12 2 6 2. + <_> + + <_> + 8 7 2 4 -1. + <_> + 9 7 1 4 2. + <_> + + <_> + 13 9 5 4 -1. + <_> + 13 11 5 2 2. + <_> + + <_> + 12 0 8 2 -1. + <_> + 12 0 4 1 2. + <_> + 16 1 4 1 2. + <_> + + <_> + 14 0 6 4 -1. + <_> + 14 0 3 2 2. + <_> + 17 2 3 2 2. + <_> + + <_> + 4 9 6 2 -1. + <_> + 6 9 2 2 3. + <_> + + <_> + 13 1 2 1 -1. + <_> + 14 1 1 1 2. + <_> + + <_> + 0 0 12 3 -1. + <_> + 6 0 6 3 2. + <_> + + <_> + 5 12 3 3 -1. + <_> + 6 12 1 3 3. + <_> + + <_> + 5 11 4 3 -1. + <_> + 5 12 4 1 3. + <_> + + <_> + 5 13 2 4 -1. + <_> + 5 13 1 2 2. + <_> + 6 15 1 2 2. + <_> + + <_> + 4 11 3 3 -1. + <_> + 4 12 3 1 3. + <_> + + <_> + 1 8 6 2 -1. + <_> + 1 9 6 1 2. + <_> + + <_> + 6 8 4 12 -1. + <_> + 6 12 4 4 3. + <_> + + <_> + 7 14 6 4 -1. + <_> + 7 14 3 2 2. + <_> + 10 16 3 2 2. + <_> + + <_> + 8 16 8 4 -1. + <_> + 8 16 4 2 2. + <_> + 12 18 4 2 2. + <_> + + <_> + 5 10 10 6 -1. + <_> + 5 12 10 2 3. + <_> + + <_> + 6 13 1 3 -1. + <_> + 6 14 1 1 3. + <_> + + <_> + 3 11 4 6 -1. + <_> + 3 13 4 2 3. + <_> + + <_> + 10 14 6 3 -1. + <_> + 10 15 6 1 3. + <_> + + <_> + 3 15 4 2 -1. + <_> + 5 15 2 2 2. + <_> + + <_> + 3 14 4 3 -1. + <_> + 5 14 2 3 2. + <_> + + <_> + 1 2 1 2 -1. + <_> + 1 3 1 1 2. + <_> + + <_> + 0 12 8 4 -1. + <_> + 4 12 4 4 2. + <_> + + <_> + 1 2 1 2 -1. + <_> + 1 3 1 1 2. + <_> + + <_> + 5 11 1 3 -1. + <_> + 5 12 1 1 3. + <_> + + <_> + 10 19 2 1 -1. + <_> + 11 19 1 1 2. + <_> + + <_> + 6 6 4 4 -1. + <_> + 6 6 2 2 2. + <_> + 8 8 2 2 2. + <_> + + <_> + 6 3 1 2 -1. + <_> + 6 4 1 1 2. + <_> + + <_> + 0 4 10 2 -1. + <_> + 5 4 5 2 2. + <_> + + <_> + 4 5 2 1 -1. + <_> + 5 5 1 1 2. + <_> + + <_> + 0 12 2 1 -1. + <_> + 1 12 1 1 2. + <_> + + <_> + 1 4 6 11 -1. + <_> + 3 4 2 11 3. + <_> + + <_> + 6 4 2 1 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 7 0 1 6 -1. + <_> + 7 2 1 2 3. + <_> + + <_> + 7 0 8 4 -1. + <_> + 7 2 8 2 2. + <_> + + <_> + 13 6 2 2 -1. + <_> + 13 7 2 1 2. + <_> + + <_> + 16 15 2 2 -1. + <_> + 16 15 1 1 2. + <_> + 17 16 1 1 2. + <_> + + <_> + 11 12 1 2 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 4 4 5 2 -1. + <_> + 4 5 5 1 2. + <_> + + <_> + 4 3 3 9 -1. + <_> + 4 6 3 3 3. + <_> + + <_> + 6 7 2 3 -1. + <_> + 7 7 1 3 2. + <_> + + <_> + 4 8 6 1 -1. + <_> + 7 8 3 1 2. + <_> + + <_> + 3 8 12 5 -1. + <_> + 9 8 6 5 2. + <_> + + <_> + 9 8 1 3 -1. + <_> + 9 9 1 1 3. + <_> + + <_> + 9 9 6 1 -1. + <_> + 12 9 3 1 2. + <_> + + <_> + 13 7 7 6 -1. + <_> + 13 9 7 2 3. + <_> + + <_> + 0 2 20 18 -1. + <_> + 10 2 10 18 2. + <_> + + <_> + 12 5 6 3 -1. + <_> + 12 6 6 1 3. + <_> + + <_> + 8 8 3 2 -1. + <_> + 8 9 3 1 2. + <_> + + <_> + 4 9 11 6 -1. + <_> + 4 11 11 2 3. + <_> + + <_> + 7 7 7 6 -1. + <_> + 7 10 7 3 2. + <_> + + <_> + 15 7 2 8 -1. + <_> + 15 7 1 4 2. + <_> + 16 11 1 4 2. + <_> + + <_> + 4 10 2 6 -1. + <_> + 4 12 2 2 3. + <_> + + <_> + 7 13 2 2 -1. + <_> + 7 13 1 1 2. + <_> + 8 14 1 1 2. + <_> + + <_> + 7 2 3 4 -1. + <_> + 8 2 1 4 3. + <_> + + <_> + 7 3 2 3 -1. + <_> + 8 3 1 3 2. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 14 6 3 8 -1. + <_> + 15 6 1 8 3. + <_> + + <_> + 4 10 2 6 -1. + <_> + 4 13 2 3 2. + <_> + + <_> + 0 17 10 3 -1. + <_> + 0 18 10 1 3. + <_> + + <_> + 5 18 7 2 -1. + <_> + 5 19 7 1 2. + <_> + + <_> + 13 12 1 3 -1. + <_> + 13 13 1 1 3. + <_> + + <_> + 9 2 4 16 -1. + <_> + 9 2 2 8 2. + <_> + 11 10 2 8 2. + <_> + + <_> + 6 7 2 3 -1. + <_> + 6 8 2 1 3. + <_> + + <_> + 9 8 2 4 -1. + <_> + 9 10 2 2 2. + <_> + + <_> + 18 4 2 3 -1. + <_> + 18 5 2 1 3. + <_> + + <_> + 16 10 2 2 -1. + <_> + 16 10 1 1 2. + <_> + 17 11 1 1 2. + <_> + + <_> + 14 2 6 6 -1. + <_> + 14 4 6 2 3. + <_> + + <_> + 16 11 3 1 -1. + <_> + 17 11 1 1 3. + <_> + + <_> + 17 10 2 1 -1. + <_> + 18 10 1 1 2. + <_> + + <_> + 16 8 2 4 -1. + <_> + 17 8 1 4 2. + <_> + + <_> + 11 15 6 3 -1. + <_> + 11 16 6 1 3. + <_> + + <_> + 3 7 3 4 -1. + <_> + 4 7 1 4 3. + <_> + + <_> + 3 5 3 5 -1. + <_> + 4 5 1 5 3. + <_> + + <_> + 2 10 6 1 -1. + <_> + 5 10 3 1 2. + <_> + + <_> + 12 0 4 2 -1. + <_> + 14 0 2 2 2. + <_> + + <_> + 9 14 1 2 -1. + <_> + 9 15 1 1 2. + <_> + + <_> + 15 12 5 6 -1. + <_> + 15 14 5 2 3. + <_> + + <_> + 4 13 10 4 -1. + <_> + 4 15 10 2 2. + <_> + + <_> + 7 16 6 4 -1. + <_> + 7 16 3 2 2. + <_> + 10 18 3 2 2. + <_> + + <_> + 9 16 7 3 -1. + <_> + 9 17 7 1 3. + <_> + + <_> + 4 8 2 2 -1. + <_> + 4 8 1 1 2. + <_> + 5 9 1 1 2. + <_> + + <_> + 0 17 20 2 -1. + <_> + 10 17 10 2 2. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 4 7 2 6 -1. + <_> + 4 7 1 3 2. + <_> + 5 10 1 3 2. + <_> + + <_> + 11 11 1 2 -1. + <_> + 11 12 1 1 2. + <_> + + <_> + 10 13 5 2 -1. + <_> + 10 14 5 1 2. + <_> + + <_> + 8 16 3 3 -1. + <_> + 8 17 3 1 3. + <_> + + <_> + 9 18 3 1 -1. + <_> + 10 18 1 1 3. + <_> + + <_> + 8 6 11 12 -1. + <_> + 8 10 11 4 3. + <_> + + <_> + 2 4 13 12 -1. + <_> + 2 10 13 6 2. + <_> + + <_> + 0 15 10 4 -1. + <_> + 0 15 5 2 2. + <_> + 5 17 5 2 2. + <_> + + <_> + 4 8 6 2 -1. + <_> + 7 8 3 2 2. + <_> + + <_> + 10 1 6 2 -1. + <_> + 12 1 2 2 3. + <_> + + <_> + 7 8 6 7 -1. + <_> + 9 8 2 7 3. + <_> + + <_> + 9 9 6 2 -1. + <_> + 11 9 2 2 3. + <_> + + <_> + 3 14 15 4 -1. + <_> + 8 14 5 4 3. + <_> + + <_> + 7 3 2 14 -1. + <_> + 7 10 2 7 2. + <_> + + <_> + 11 14 1 2 -1. + <_> + 11 15 1 1 2. + <_> + + <_> + 5 11 1 3 -1. + <_> + 5 12 1 1 3. + <_> + + <_> + 11 14 3 3 -1. + <_> + 11 15 3 1 3. + <_> + + <_> + 10 7 9 4 -1. + <_> + 13 7 3 4 3. + <_> + + <_> + 11 6 6 5 -1. + <_> + 14 6 3 5 2. + <_> + + <_> + 8 9 1 2 -1. + <_> + 8 10 1 1 2. + <_> + + <_> + 16 3 1 10 -1. + <_> + 16 8 1 5 2. + <_> + + <_> + 6 11 10 4 -1. + <_> + 6 13 10 2 2. + <_> + + <_> + 5 7 2 2 -1. + <_> + 6 7 1 2 2. + <_> + + <_> + 1 6 6 11 -1. + <_> + 4 6 3 11 2. + <_> + + <_> + 6 8 3 2 -1. + <_> + 6 9 3 1 2. + <_> + + <_> + 10 15 1 2 -1. + <_> + 10 16 1 1 2. + <_> + + <_> + 8 0 12 1 -1. + <_> + 14 0 6 1 2. + <_> + + <_> + 5 3 2 2 -1. + <_> + 6 3 1 2 2. + <_> + + <_> + 11 6 6 5 -1. + <_> + 14 6 3 5 2. + <_> + + <_> + 6 12 3 3 -1. + <_> + 6 13 3 1 3. + <_> + + <_> + 10 10 3 3 -1. + <_> + 11 10 1 3 3. + <_> + + <_> + 6 13 2 2 -1. + <_> + 6 14 2 1 2. + <_> + + <_> + 4 2 16 8 -1. + <_> + 12 2 8 8 2. + <_> + + <_> + 10 12 2 2 -1. + <_> + 10 12 1 1 2. + <_> + 11 13 1 1 2. + <_> + + <_> + 10 7 2 2 -1. + <_> + 11 7 1 2 2. + <_> + + <_> + 13 13 1 3 -1. + <_> + 13 14 1 1 3. + <_> + + <_> + 13 13 2 3 -1. + <_> + 13 14 2 1 3. + <_> + + <_> + 1 13 6 4 -1. + <_> + 4 13 3 4 2. + <_> + + <_> + 10 13 2 1 -1. + <_> + 11 13 1 1 2. + <_> + + <_> + 10 6 2 10 -1. + <_> + 10 6 1 5 2. + <_> + 11 11 1 5 2. + <_> + + <_> + 16 11 2 2 -1. + <_> + 16 11 1 1 2. + <_> + 17 12 1 1 2. + <_> + + <_> + 16 12 3 1 -1. + <_> + 17 12 1 1 3. + <_> + + <_> + 9 5 7 12 -1. + <_> + 9 9 7 4 3. + <_> + + <_> + 4 1 10 18 -1. + <_> + 4 1 5 9 2. + <_> + 9 10 5 9 2. + <_> + + <_> + 17 12 2 2 -1. + <_> + 17 12 1 1 2. + <_> + 18 13 1 1 2. + <_> + + <_> + 12 5 6 2 -1. + <_> + 12 6 6 1 2. + <_> + + <_> + 4 7 5 2 -1. + <_> + 4 8 5 1 2. + <_> + + <_> + 7 3 1 2 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 6 0 7 6 -1. + <_> + 6 3 7 3 2. + <_> + + <_> + 13 11 2 8 -1. + <_> + 13 11 1 4 2. + <_> + 14 15 1 4 2. + <_> + + <_> + 8 7 4 2 -1. + <_> + 10 7 2 2 2. + <_> + + <_> + 4 1 2 4 -1. + <_> + 4 1 1 2 2. + <_> + 5 3 1 2 2. + <_> + + <_> + 4 0 2 8 -1. + <_> + 4 0 1 4 2. + <_> + 5 4 1 4 2. + <_> + + <_> + 6 3 2 1 -1. + <_> + 7 3 1 1 2. + <_> + + <_> + 14 12 1 3 -1. + <_> + 14 13 1 1 3. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 11 2 2 -1. + <_> + 5 12 2 1 2. + <_> + + <_> + 5 1 4 15 -1. + <_> + 5 6 4 5 3. + <_> + + <_> + 11 5 4 14 -1. + <_> + 11 5 2 7 2. + <_> + 13 12 2 7 2. + <_> + + <_> + 9 18 3 1 -1. + <_> + 10 18 1 1 3. + <_> + + <_> + 4 10 5 6 -1. + <_> + 4 12 5 2 3. + <_> + + <_> + 5 13 3 3 -1. + <_> + 5 14 3 1 3. + <_> + + <_> + 8 1 3 5 -1. + <_> + 9 1 1 5 3. + <_> + + <_> + 4 7 3 2 -1. + <_> + 5 7 1 2 3. + <_> + + <_> + 6 14 3 3 -1. + <_> + 7 14 1 3 3. + <_> + + <_> + 7 13 2 3 -1. + <_> + 7 14 2 1 3. + <_> + + <_> + 4 3 2 9 -1. + <_> + 4 6 2 3 3. + <_> + + <_> + 4 8 3 2 -1. + <_> + 4 9 3 1 2. + <_> + + <_> + 10 10 2 2 -1. + <_> + 10 11 2 1 2. + <_> + + <_> + 7 8 12 6 -1. + <_> + 7 8 6 3 2. + <_> + 13 11 6 3 2. + <_> + + <_> + 14 10 3 2 -1. + <_> + 14 11 3 1 2. + <_> + + <_> + 5 16 6 2 -1. + <_> + 5 17 6 1 2. + <_> + + <_> + 8 15 4 3 -1. + <_> + 8 16 4 1 3. + <_> + + <_> + 14 9 2 2 -1. + <_> + 14 10 2 1 2. + <_> + + <_> + 8 5 2 3 -1. + <_> + 8 6 2 1 3. + <_> + + <_> + 8 5 3 3 -1. + <_> + 8 6 3 1 3. + <_> + + <_> + 1 7 17 9 -1. + <_> + 1 10 17 3 3. + <_> + + <_> + 5 10 6 8 -1. + <_> + 5 14 6 4 2. + <_> + + <_> + 18 1 2 2 -1. + <_> + 18 1 1 1 2. + <_> + 19 2 1 1 2. + <_> + + <_> + 0 0 11 6 -1. + <_> + 0 3 11 3 2. + <_> + + <_> + 3 0 16 3 -1. + <_> + 3 1 16 1 3. + <_> + + <_> + 10 10 10 3 -1. + <_> + 10 11 10 1 3. + <_> + + <_> + 0 0 15 18 -1. + <_> + 0 9 15 9 2. + <_> + + <_> + 15 11 2 2 -1. + <_> + 15 11 1 1 2. + <_> + 16 12 1 1 2. + <_> + + <_> + 14 12 6 3 -1. + <_> + 17 12 3 3 2. + <_> + + <_> + 8 4 3 4 -1. + <_> + 9 4 1 4 3. + <_> + + <_> + 8 6 12 4 -1. + <_> + 12 6 4 4 3. + <_> + + <_> + 9 12 2 2 -1. + <_> + 9 13 2 1 2. + <_> + + <_> + 6 3 1 2 -1. + <_> + 6 4 1 1 2. + <_> + + <_> + 4 7 2 8 -1. + <_> + 4 7 1 4 2. + <_> + 5 11 1 4 2. + <_> + + <_> + 9 17 3 2 -1. + <_> + 10 17 1 2 3. + <_> + + <_> + 9 6 1 3 -1. + <_> + 9 7 1 1 3. + <_> + + <_> + 6 4 1 6 -1. + <_> + 6 7 1 3 2. + <_> + + <_> + 5 6 13 6 -1. + <_> + 5 8 13 2 3. + <_> + + <_> + 6 7 4 12 -1. + <_> + 8 7 2 12 2. + <_> + + <_> + 6 12 2 4 -1. + <_> + 7 12 1 4 2. + <_> + + <_> + 5 14 4 3 -1. + <_> + 5 15 4 1 3. + <_> + + <_> + 10 5 3 1 -1. + <_> + 11 5 1 1 3. + <_> + + <_> + 4 15 4 3 -1. + <_> + 4 16 4 1 3. + <_> + + <_> + 11 12 3 2 -1. + <_> + 12 12 1 2 3. + <_> + + <_> + 11 10 8 2 -1. + <_> + 15 10 4 2 2. + <_> + + <_> + 14 18 6 2 -1. + <_> + 17 18 3 2 2. + <_> + + <_> + 7 5 3 2 -1. + <_> + 8 5 1 2 3. + <_> + + <_> + 11 8 2 1 -1. + <_> + 12 8 1 1 2. + <_> + + <_> + 12 6 3 6 -1. + <_> + 12 8 3 2 3. + <_> + + <_> + 11 9 1 2 -1. + <_> + 11 10 1 1 2. + <_> + + <_> + 12 9 3 9 -1. + <_> + 13 9 1 9 3. + <_> + + <_> + 0 8 1 3 -1. + <_> + 0 9 1 1 3. + <_> + + <_> + 0 8 1 3 -1. + <_> + 0 9 1 1 3. + <_> + + <_> + 3 8 2 2 -1. + <_> + 3 8 1 1 2. + <_> + 4 9 1 1 2. + <_> + + <_> + 4 6 2 6 -1. + <_> + 4 9 2 3 2. + <_> + + <_> + 4 9 2 9 -1. + <_> + 4 12 2 3 3. + <_> + + <_> + 7 13 2 2 -1. + <_> + 7 13 1 1 2. + <_> + 8 14 1 1 2. + <_> + + <_> + 3 6 10 6 -1. + <_> + 3 6 5 3 2. + <_> + 8 9 5 3 2. + <_> + + <_> + 9 9 4 6 -1. + <_> + 11 9 2 6 2. + <_> + + <_> + 2 12 14 3 -1. + <_> + 9 12 7 3 2. + <_> + + <_> + 0 0 11 18 -1. + <_> + 0 9 11 9 2. + <_> + + <_> + 4 18 4 2 -1. + <_> + 4 18 2 1 2. + <_> + 6 19 2 1 2. + <_> + + <_> + 7 13 4 6 -1. + <_> + 7 13 2 3 2. + <_> + 9 16 2 3 2. + <_> + + <_> + 8 17 3 1 -1. + <_> + 9 17 1 1 3. + <_> + + <_> + 5 14 8 6 -1. + <_> + 5 14 4 3 2. + <_> + 9 17 4 3 2. + <_> + + <_> + 7 12 2 3 -1. + <_> + 7 13 2 1 3. + <_> + + <_> + 14 4 4 2 -1. + <_> + 14 4 2 1 2. + <_> + 16 5 2 1 2. + <_> + + <_> + 7 13 2 3 -1. + <_> + 7 14 2 1 3. + <_> + + <_> + 7 14 4 2 -1. + <_> + 7 14 2 1 2. + <_> + 9 15 2 1 2. + <_> + + <_> + 10 14 2 6 -1. + <_> + 10 16 2 2 3. + <_> + + <_> + 9 6 9 1 -1. + <_> + 12 6 3 1 3. + <_> + + <_> + 2 5 18 7 -1. + <_> + 11 5 9 7 2. + <_> + + <_> + 18 6 1 2 -1. + <_> + 18 7 1 1 2. + <_> + + <_> + 4 14 14 6 -1. + <_> + 4 17 14 3 2. + <_> + + <_> + 8 0 6 20 -1. + <_> + 10 0 2 20 3. + <_> + + <_> + 12 0 8 18 -1. + <_> + 12 9 8 9 2. + <_> + + <_> + 12 5 2 1 -1. + <_> + 13 5 1 1 2. + <_> + + <_> + 0 6 6 13 -1. + <_> + 3 6 3 13 2. + <_> + + <_> + 3 15 3 4 -1. + <_> + 4 15 1 4 3. + <_> + + <_> + 3 13 3 6 -1. + <_> + 4 13 1 6 3. + <_> + + <_> + 3 11 9 2 -1. + <_> + 6 11 3 2 3. + <_> + + <_> + 0 11 6 8 -1. + <_> + 3 11 3 8 2. + <_> + + <_> + 16 0 3 7 -1. + <_> + 17 0 1 7 3. + <_> + + <_> + 16 1 2 6 -1. + <_> + 16 1 1 3 2. + <_> + 17 4 1 3 2. + <_> + + <_> + 3 7 6 10 -1. + <_> + 3 7 3 5 2. + <_> + 6 12 3 5 2. + <_> + + <_> + 2 0 6 7 -1. + <_> + 5 0 3 7 2. + <_> + + <_> + 1 2 12 2 -1. + <_> + 5 2 4 2 3. + <_> + + <_> + 6 4 1 2 -1. + <_> + 6 5 1 1 2. + <_> + + <_> + 0 14 8 6 -1. + <_> + 4 14 4 6 2. + <_> + + <_> + 3 11 9 3 -1. + <_> + 6 11 3 3 3. + <_> + + <_> + 4 14 2 2 -1. + <_> + 4 14 1 1 2. + <_> + 5 15 1 1 2. + <_> + + <_> + 11 2 3 2 -1. + <_> + 12 2 1 2 3. + <_> + + <_> + 18 5 2 6 -1. + <_> + 18 5 1 3 2. + <_> + 19 8 1 3 2. + <_> + + <_> + 0 5 1 2 -1. + <_> + 0 6 1 1 2. + <_> + + <_> + 8 4 6 1 -1. + <_> + 11 4 3 1 2. + <_> + + <_> + 4 5 2 3 -1. + <_> + 5 5 1 3 2. + <_> + + <_> + 1 3 6 4 -1. + <_> + 3 3 2 4 3. + <_> + + <_> + 12 5 6 1 -1. + <_> + 14 5 2 1 3. + <_> + + <_> + 6 9 3 3 -1. + <_> + 6 10 3 1 3. + <_> + + <_> + 4 3 2 2 -1. + <_> + 4 4 2 1 2. + <_> + + <_> + 8 7 3 3 -1. + <_> + 8 8 3 1 3. + <_> + + <_> + 5 5 10 14 -1. + <_> + 5 5 5 7 2. + <_> + 10 12 5 7 2. + <_> + + <_> + 16 5 2 6 -1. + <_> + 16 7 2 2 3. + <_> + + <_> + 19 5 1 3 -1. + <_> + 19 6 1 1 3. + <_> + + <_> + 3 6 2 2 -1. + <_> + 3 6 1 1 2. + <_> + 4 7 1 1 2. + <_> + + <_> + 0 1 10 10 -1. + <_> + 5 1 5 10 2. + <_> + + <_> + 3 0 8 1 -1. + <_> + 7 0 4 1 2. + <_> + + <_> + 14 5 6 1 -1. + <_> + 16 5 2 1 3. + <_> + + <_> + 6 16 1 3 -1. + <_> + 6 17 1 1 3. + <_> + + <_> + 6 14 2 4 -1. + <_> + 6 14 1 2 2. + <_> + 7 16 1 2 2. + <_> + + <_> + 0 7 2 5 -1. + <_> + 1 7 1 5 2. + <_> + + <_> + 18 0 2 8 -1. + <_> + 18 0 1 4 2. + <_> + 19 4 1 4 2. + <_> + + <_> + 5 8 6 2 -1. + <_> + 8 8 3 2 2. + <_> + + <_> + 4 8 8 3 -1. + <_> + 8 8 4 3 2. + <_> + + <_> + 8 0 2 2 -1. + <_> + 8 1 2 1 2. + <_> + + <_> + 13 8 6 11 -1. + <_> + 15 8 2 11 3. + <_> + + <_> + 11 15 9 5 -1. + <_> + 14 15 3 5 3. + <_> + + <_> + 5 4 12 15 -1. + <_> + 9 4 4 15 3. + <_> + + <_> + 16 12 2 8 -1. + <_> + 16 12 1 4 2. + <_> + 17 16 1 4 2. + <_> + + <_> + 7 13 10 6 -1. + <_> + 7 16 10 3 2. + <_> + + <_> + 6 15 3 4 -1. + <_> + 6 17 3 2 2. + <_> + + <_> + 9 5 8 2 -1. + <_> + 13 5 4 2 2. + <_> + + <_> + 5 6 3 4 -1. + <_> + 6 6 1 4 3. + <_> + + <_> + 10 8 7 6 -1. + <_> + 10 10 7 2 3. + <_> + + <_> + 12 13 1 4 -1. + <_> + 12 15 1 2 2. + <_> + + <_> + 2 10 3 4 -1. + <_> + 3 10 1 4 3. + <_> + + <_> + 8 7 6 6 -1. + <_> + 8 7 3 3 2. + <_> + 11 10 3 3 2. + <_> + + <_> + 2 0 15 2 -1. + <_> + 7 0 5 2 3. + <_> + + <_> + 13 10 1 3 -1. + <_> + 13 11 1 1 3. + <_> + + <_> + 2 9 3 4 -1. + <_> + 3 9 1 4 3. + <_> + + <_> + 6 4 3 2 -1. + <_> + 6 5 3 1 2. + <_> + + <_> + 10 16 2 3 -1. + <_> + 11 16 1 3 2. + <_> + + <_> + 7 13 2 3 -1. + <_> + 7 14 2 1 3. + <_> + + <_> + 6 12 2 4 -1. + <_> + 6 12 1 2 2. + <_> + 7 14 1 2 2. + <_> + + <_> + 9 1 6 1 -1. + <_> + 12 1 3 1 2. + <_> + + <_> + 6 6 3 4 -1. + <_> + 7 6 1 4 3. + <_> + + <_> + 9 8 3 3 -1. + <_> + 9 9 3 1 3. + <_> + + <_> + 8 7 12 3 -1. + <_> + 14 7 6 3 2. + <_> + + <_> + 12 10 4 2 -1. + <_> + 12 10 2 1 2. + <_> + 14 11 2 1 2. + <_> + + <_> + 16 11 1 2 -1. + <_> + 16 12 1 1 2. + <_> + + <_> + 6 2 1 2 -1. + <_> + 6 3 1 1 2. + <_> + + <_> + 5 10 2 3 -1. + <_> + 5 11 2 1 3. + <_> + + <_> + 5 9 2 3 -1. + <_> + 5 10 2 1 3. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 12 0 8 2 -1. + <_> + 12 0 4 1 2. + <_> + 16 1 4 1 2. + <_> + + <_> + 10 11 3 8 -1. + <_> + 11 11 1 8 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 5 10 1 6 -1. + <_> + 5 13 1 3 2. + <_> + + <_> + 6 2 6 6 -1. + <_> + 6 2 3 3 2. + <_> + 9 5 3 3 2. + <_> + + <_> + 11 4 1 6 -1. + <_> + 11 6 1 2 3. + <_> + + <_> + 18 3 2 16 -1. + <_> + 18 3 1 8 2. + <_> + 19 11 1 8 2. + <_> + + <_> + 10 12 3 2 -1. + <_> + 11 12 1 2 3. + <_> + + <_> + 7 14 2 3 -1. + <_> + 7 15 2 1 3. + <_> + + <_> + 16 12 2 1 -1. + <_> + 17 12 1 1 2. + <_> + + <_> + 15 6 4 2 -1. + <_> + 15 7 4 1 2. + <_> + + <_> + 4 6 2 3 -1. + <_> + 4 7 2 1 3. + <_> + + <_> + 8 19 6 1 -1. + <_> + 11 19 3 1 2. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 10 12 1 3 -1. + <_> + 10 13 1 1 3. + <_> + + <_> + 8 6 2 3 -1. + <_> + 8 7 2 1 3. + <_> + + <_> + 5 7 6 5 -1. + <_> + 8 7 3 5 2. + <_> + + <_> + 14 10 1 2 -1. + <_> + 14 11 1 1 2. + <_> + + <_> + 11 7 6 3 -1. + <_> + 13 7 2 3 3. + <_> + + <_> + 14 6 6 1 -1. + <_> + 16 6 2 1 3. + <_> + + <_> + 9 7 1 3 -1. + <_> + 9 8 1 1 3. + <_> + + <_> + 9 5 2 8 -1. + <_> + 9 5 1 4 2. + <_> + 10 9 1 4 2. + <_> + + <_> + 6 12 1 4 -1. + <_> + 6 14 1 2 2. + <_> + + <_> + 5 13 4 2 -1. + <_> + 5 14 4 1 2. + <_> + + <_> + 12 9 2 4 -1. + <_> + 12 11 2 2 2. + <_> + + <_> + 12 7 3 6 -1. + <_> + 13 7 1 6 3. + <_> + + <_> + 5 0 2 14 -1. + <_> + 5 7 2 7 2. + <_> + + <_> + 9 3 1 2 -1. + <_> + 9 4 1 1 2. + <_> + + <_> + 6 1 14 12 -1. + <_> + 6 5 14 4 3. + <_> + + <_> + 13 6 7 6 -1. + <_> + 13 9 7 3 2. + <_> + + <_> + 14 9 3 3 -1. + <_> + 14 10 3 1 3. + <_> + + <_> + 17 12 3 1 -1. + <_> + 18 12 1 1 3. + <_> + + <_> + 8 2 3 2 -1. + <_> + 9 2 1 2 3. + <_> + + <_> + 7 11 2 1 -1. + <_> + 8 11 1 1 2. + <_> + + <_> + 5 7 3 2 -1. + <_> + 5 8 3 1 2. + <_> + + <_> + 11 11 2 1 -1. + <_> + 12 11 1 1 2. + <_> + + <_> + 11 11 3 1 -1. + <_> + 12 11 1 1 3. + <_> + + <_> + 9 5 1 3 -1. + <_> + 9 6 1 1 3. + <_> + + <_> + 12 9 1 2 -1. + <_> + 12 10 1 1 2. + <_> + + <_> + 12 7 2 3 -1. + <_> + 13 7 1 3 2. + <_> + + <_> + 5 11 6 3 -1. + <_> + 8 11 3 3 2. + <_> + + <_> + 6 10 2 2 -1. + <_> + 6 10 1 1 2. + <_> + 7 11 1 1 2. + <_> + + <_> + 17 2 1 9 -1. + <_> + 17 5 1 3 3. + <_> + + <_> + 4 7 2 6 -1. + <_> + 4 7 1 3 2. + <_> + 5 10 1 3 2. + <_> + + <_> + 0 1 11 18 -1. + <_> + 0 10 11 9 2. + <_> + + <_> + 7 6 2 8 -1. + <_> + 7 10 2 4 2. + <_> + + <_> + 6 2 4 6 -1. + <_> + 6 5 4 3 2. + <_> + + <_> + 2 12 12 4 -1. + <_> + 2 14 12 2 2. + <_> + + <_> + 9 0 6 1 -1. + <_> + 12 0 3 1 2. + <_> + + <_> + 5 0 12 2 -1. + <_> + 5 1 12 1 2. + <_> + + <_> + 10 0 2 1 -1. + <_> + 11 0 1 1 2. + <_> + + <_> + 7 14 3 3 -1. + <_> + 7 15 3 1 3. + <_> + + <_> + 4 13 5 3 -1. + <_> + 4 14 5 1 3. + <_> + + <_> + 9 16 6 2 -1. + <_> + 9 17 6 1 2. + <_> + + <_> + 11 16 5 3 -1. + <_> + 11 17 5 1 3. + <_> + + <_> + 5 0 3 15 -1. + <_> + 6 0 1 15 3. + <_> + + <_> + 9 16 8 4 -1. + <_> + 9 18 8 2 2. + <_> + + <_> + 0 6 3 2 -1. + <_> + 0 7 3 1 2. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 9 11 4 2 -1. + <_> + 9 11 2 1 2. + <_> + 11 12 2 1 2. + <_> + + <_> + 4 13 2 2 -1. + <_> + 4 13 1 1 2. + <_> + 5 14 1 1 2. + <_> + + <_> + 6 4 1 2 -1. + <_> + 6 5 1 1 2. + <_> + + <_> + 14 18 2 2 -1. + <_> + 14 18 1 1 2. + <_> + 15 19 1 1 2. + <_> + + <_> + 7 10 5 6 -1. + <_> + 7 12 5 2 3. + <_> + + <_> + 8 7 4 6 -1. + <_> + 8 9 4 2 3. + <_> + + <_> + 7 9 6 2 -1. + <_> + 9 9 2 2 3. + <_> + + <_> + 6 6 6 4 -1. + <_> + 6 6 3 2 2. + <_> + 9 8 3 2 2. + <_> + + <_> + 10 3 1 6 -1. + <_> + 10 5 1 2 3. + <_> + + <_> + 5 2 12 14 -1. + <_> + 5 2 6 7 2. + <_> + 11 9 6 7 2. + <_> + + <_> + 13 5 6 2 -1. + <_> + 13 6 6 1 2. + <_> + + <_> + 16 0 4 8 -1. + <_> + 16 0 2 4 2. + <_> + 18 4 2 4 2. + <_> + + <_> + 3 12 3 1 -1. + <_> + 4 12 1 1 3. + <_> + + <_> + 3 10 3 4 -1. + <_> + 4 10 1 4 3. + <_> + + <_> + 4 6 1 6 -1. + <_> + 4 9 1 3 2. + <_> + + <_> + 3 7 15 1 -1. + <_> + 8 7 5 1 3. + <_> + + <_> + 1 15 6 5 -1. + <_> + 4 15 3 5 2. + <_> + + <_> + 11 9 8 4 -1. + <_> + 15 9 4 4 2. + <_> + + <_> + 15 7 2 4 -1. + <_> + 16 7 1 4 2. + <_> + + <_> + 19 1 1 2 -1. + <_> + 19 2 1 1 2. + <_> + + <_> + 6 15 3 3 -1. + <_> + 7 15 1 3 3. + <_> + + <_> + 3 16 3 1 -1. + <_> + 4 16 1 1 3. + <_> + + <_> + 3 10 3 10 -1. + <_> + 4 10 1 10 3. + <_> + + <_> + 18 17 2 2 -1. + <_> + 18 17 1 1 2. + <_> + 19 18 1 1 2. + <_> + + <_> + 3 12 6 4 -1. + <_> + 3 12 3 2 2. + <_> + 6 14 3 2 2. + <_> + + <_> + 5 17 2 2 -1. + <_> + 5 17 1 1 2. + <_> + 6 18 1 1 2. + <_> + + <_> + 7 16 2 3 -1. + <_> + 7 17 2 1 3. + <_> + + <_> + 5 11 6 3 -1. + <_> + 8 11 3 3 2. + <_> + + <_> + 7 16 1 3 -1. + <_> + 7 17 1 1 3. + <_> + + <_> + 0 16 2 1 -1. + <_> + 1 16 1 1 2. + <_> + + <_> + 11 7 9 6 -1. + <_> + 11 10 9 3 2. + <_> + + <_> + 9 4 9 16 -1. + <_> + 12 4 3 16 3. + <_> + + <_> + 14 12 5 3 -1. + <_> + 14 13 5 1 3. + <_> + + <_> + 8 18 3 2 -1. + <_> + 9 18 1 2 3. + <_> + + <_> + 4 0 11 16 -1. + <_> + 4 8 11 8 2. + <_> + + <_> + 2 4 12 15 -1. + <_> + 2 9 12 5 3. + <_> + + <_> + 3 13 11 4 -1. + <_> + 3 15 11 2 2. + <_> + + <_> + 7 5 4 3 -1. + <_> + 7 6 4 1 3. + <_> + + <_> + 6 5 4 3 -1. + <_> + 6 6 4 1 3. + <_> + + <_> + 5 0 2 9 -1. + <_> + 5 3 2 3 3. + <_> + + <_> + 16 8 2 2 -1. + <_> + 16 8 1 1 2. + <_> + 17 9 1 1 2. + <_> + + <_> + 12 10 8 2 -1. + <_> + 12 10 4 1 2. + <_> + 16 11 4 1 2. + <_> + + <_> + 6 2 2 8 -1. + <_> + 7 2 1 8 2. + <_> + + <_> + 6 6 2 3 -1. + <_> + 7 6 1 3 2. + <_> + + <_> + 17 4 1 3 -1. + <_> + 17 5 1 1 3. + <_> + + <_> + 15 13 3 2 -1. + <_> + 16 13 1 2 3. + <_> + + <_> + 11 13 2 3 -1. + <_> + 11 14 2 1 3. + <_> + + <_> + 14 5 6 11 -1. + <_> + 16 5 2 11 3. + <_> + + <_> + 6 0 12 8 -1. + <_> + 12 0 6 8 2. + <_> + + <_> + 7 15 8 4 -1. + <_> + 7 15 4 2 2. + <_> + 11 17 4 2 2. + <_> + + <_> + 4 14 16 6 -1. + <_> + 4 16 16 2 3. + <_> + + <_> + 6 12 2 6 -1. + <_> + 6 12 1 3 2. + <_> + 7 15 1 3 2. + <_> + + <_> + 7 14 6 4 -1. + <_> + 7 14 3 2 2. + <_> + 10 16 3 2 2. + <_> + + <_> + 0 0 2 4 -1. + <_> + 0 0 1 2 2. + <_> + 1 2 1 2 2. + <_> + + <_> + 15 12 1 3 -1. + <_> + 15 13 1 1 3. + <_> + + <_> + 7 16 3 1 -1. + <_> + 8 16 1 1 3. + <_> + + <_> + 1 8 1 2 -1. + <_> + 1 9 1 1 2. + <_> + + <_> + 3 14 3 2 -1. + <_> + 4 14 1 2 3. + <_> + + <_> + 3 13 3 5 -1. + <_> + 4 13 1 5 3. + <_> + + <_> + 7 2 3 4 -1. + <_> + 8 2 1 4 3. + <_> + + <_> + 10 1 4 4 -1. + <_> + 10 3 4 2 2. + <_> + + <_> + 9 2 1 2 -1. + <_> + 9 3 1 1 2. + <_> + + <_> + 7 12 2 2 -1. + <_> + 7 12 1 1 2. + <_> + 8 13 1 1 2. + <_> + + <_> + 4 11 4 4 -1. + <_> + 4 11 2 2 2. + <_> + 6 13 2 2 2. + <_> + + <_> + 9 10 6 4 -1. + <_> + 12 10 3 4 2. + <_> + + <_> + 8 12 3 2 -1. + <_> + 9 12 1 2 3. + <_> + + <_> + 13 9 6 6 -1. + <_> + 13 9 3 3 2. + <_> + 16 12 3 3 2. + <_> + + <_> + 14 0 3 5 -1. + <_> + 15 0 1 5 3. + <_> + + <_> + 9 8 6 4 -1. + <_> + 9 8 3 2 2. + <_> + 12 10 3 2 2. + <_> + + <_> + 10 6 3 3 -1. + <_> + 11 6 1 3 3. + <_> + + <_> + 13 3 2 1 -1. + <_> + 14 3 1 1 2. + <_> + + <_> + 4 5 2 2 -1. + <_> + 4 5 1 1 2. + <_> + 5 6 1 1 2. + <_> + + <_> + 4 5 2 2 -1. + <_> + 4 5 1 1 2. + <_> + 5 6 1 1 2. + <_> + + <_> + 7 9 2 6 -1. + <_> + 7 11 2 2 3. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 6 13 2 3 -1. + <_> + 6 14 2 1 3. + <_> + + <_> + 7 4 3 2 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 13 1 3 4 -1. + <_> + 14 1 1 4 3. + <_> + + <_> + 6 8 11 3 -1. + <_> + 6 9 11 1 3. + <_> + + <_> + 13 10 5 2 -1. + <_> + 13 11 5 1 2. + <_> + + <_> + 13 9 3 6 -1. + <_> + 13 12 3 3 2. + <_> + + <_> + 3 14 5 2 -1. + <_> + 3 15 5 1 2. + <_> + + <_> + 11 0 8 2 -1. + <_> + 11 0 4 1 2. + <_> + 15 1 4 1 2. + <_> + + <_> + 13 1 7 6 -1. + <_> + 13 3 7 2 3. + <_> + + <_> + 11 0 6 1 -1. + <_> + 13 0 2 1 3. + <_> + + <_> + 8 1 5 3 -1. + <_> + 8 2 5 1 3. + <_> + + <_> + 12 11 1 3 -1. + <_> + 12 12 1 1 3. + <_> + + <_> + 17 13 3 6 -1. + <_> + 17 15 3 2 3. + <_> + + <_> + 12 11 1 3 -1. + <_> + 12 12 1 1 3. + <_> + + <_> + 15 9 3 1 -1. + <_> + 16 9 1 1 3. + <_> + + <_> + 10 4 6 11 -1. + <_> + 13 4 3 11 2. + <_> + + <_> + 13 9 4 4 -1. + <_> + 13 9 2 2 2. + <_> + 15 11 2 2 2. + <_> + + <_> + 8 2 1 6 -1. + <_> + 8 4 1 2 3. + <_> + + <_> + 5 6 4 6 -1. + <_> + 5 9 4 3 2. + <_> + + <_> + 2 6 4 8 -1. + <_> + 4 6 2 8 2. + <_> + + <_> + 11 15 1 2 -1. + <_> + 11 16 1 1 2. + <_> + + <_> + 11 1 7 10 -1. + <_> + 11 6 7 5 2. + <_> + + <_> + 7 11 9 6 -1. + <_> + 7 13 9 2 3. + <_> + + <_> + 4 9 8 1 -1. + <_> + 8 9 4 1 2. + <_> + + <_> + 10 10 3 3 -1. + <_> + 11 10 1 3 3. + <_> + + <_> + 8 0 7 6 -1. + <_> + 8 2 7 2 3. + <_> + + <_> + 11 13 2 2 -1. + <_> + 11 13 1 1 2. + <_> + 12 14 1 1 2. + <_> + + <_> + 7 12 1 3 -1. + <_> + 7 13 1 1 3. + <_> + + <_> + 7 10 3 9 -1. + <_> + 7 13 3 3 3. + <_> + + <_> + 5 9 1 3 -1. + <_> + 5 10 1 1 3. + <_> + + <_> + 2 8 18 6 -1. + <_> + 11 8 9 6 2. + <_> + + <_> + 11 7 6 4 -1. + <_> + 13 7 2 4 3. + <_> + + <_> + 7 8 4 6 -1. + <_> + 7 10 4 2 3. + <_> + + <_> + 10 4 4 6 -1. + <_> + 10 6 4 2 3. + <_> + + <_> + 11 12 6 1 -1. + <_> + 13 12 2 1 3. + <_> + + <_> + 5 7 2 1 -1. + <_> + 6 7 1 1 2. + <_> + + <_> + 5 12 3 3 -1. + <_> + 5 13 3 1 3. + <_> + + <_> + 16 17 1 2 -1. + <_> + 16 18 1 1 2. + <_> + + <_> + 1 0 2 1 -1. + <_> + 2 0 1 1 2. + <_> + + <_> + 5 12 2 2 -1. + <_> + 5 13 2 1 2. + <_> + + <_> + 12 13 2 3 -1. + <_> + 12 14 2 1 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 1 0 2 1 -1. + <_> + 2 0 1 1 2. + <_> + + <_> + 16 0 4 4 -1. + <_> + 16 0 2 2 2. + <_> + 18 2 2 2 2. + <_> + + <_> + 4 5 8 10 -1. + <_> + 4 5 4 5 2. + <_> + 8 10 4 5 2. + <_> + + <_> + 3 14 4 5 -1. + <_> + 5 14 2 5 2. + <_> + + <_> + 2 16 6 2 -1. + <_> + 5 16 3 2 2. + <_> + + <_> + 8 0 8 1 -1. + <_> + 12 0 4 1 2. + <_> + + <_> + 0 4 15 6 -1. + <_> + 0 7 15 3 2. + <_> + + <_> + 9 9 3 2 -1. + <_> + 9 10 3 1 2. + <_> + + <_> + 7 9 2 6 -1. + <_> + 7 11 2 2 3. + <_> + + <_> + 5 10 4 3 -1. + <_> + 5 11 4 1 3. + <_> + + <_> + 12 10 1 2 -1. + <_> + 12 11 1 1 2. + <_> + + <_> + 17 3 1 3 -1. + <_> + 17 4 1 1 3. + <_> + + <_> + 11 9 4 4 -1. + <_> + 11 9 2 2 2. + <_> + 13 11 2 2 2. + <_> + + <_> + 10 14 6 2 -1. + <_> + 10 15 6 1 2. + <_> + + <_> + 11 12 2 8 -1. + <_> + 11 16 2 4 2. + <_> + + <_> + 11 7 5 6 -1. + <_> + 11 10 5 3 2. + <_> + + <_> + 4 2 2 6 -1. + <_> + 5 2 1 6 2. + <_> + + <_> + 6 0 5 2 -1. + <_> + 6 1 5 1 2. + <_> + + <_> + 10 17 4 3 -1. + <_> + 10 18 4 1 3. + <_> + + <_> + 12 3 7 3 -1. + <_> + 12 4 7 1 3. + <_> + + <_> + 8 1 12 8 -1. + <_> + 8 1 6 4 2. + <_> + 14 5 6 4 2. + <_> + + <_> + 11 0 3 20 -1. + <_> + 12 0 1 20 3. + <_> + + <_> + 17 1 2 2 -1. + <_> + 17 1 1 1 2. + <_> + 18 2 1 1 2. + <_> + + <_> + 2 10 7 6 -1. + <_> + 2 12 7 2 3. + <_> + + <_> + 7 3 3 1 -1. + <_> + 8 3 1 1 3. + <_> + + <_> + 4 17 11 3 -1. + <_> + 4 18 11 1 3. + <_> + + <_> + 7 15 3 2 -1. + <_> + 8 15 1 2 3. + <_> + + <_> + 3 4 3 13 -1. + <_> + 4 4 1 13 3. + <_> + + <_> + 5 2 12 14 -1. + <_> + 5 2 6 7 2. + <_> + 11 9 6 7 2. + <_> + + <_> + 0 0 10 6 -1. + <_> + 0 3 10 3 2. + <_> + + <_> + 5 4 2 1 -1. + <_> + 6 4 1 1 2. + <_> + + <_> + 7 7 6 13 -1. + <_> + 10 7 3 13 2. + <_> + + <_> + 7 2 2 8 -1. + <_> + 7 2 1 4 2. + <_> + 8 6 1 4 2. + <_> + + <_> + 6 1 3 4 -1. + <_> + 7 1 1 4 3. + <_> + + <_> + 7 8 2 1 -1. + <_> + 8 8 1 1 2. + <_> + + <_> + 4 0 4 2 -1. + <_> + 4 0 2 1 2. + <_> + 6 1 2 1 2. + <_> + + <_> + 3 10 16 8 -1. + <_> + 3 14 16 4 2. + <_> + + <_> + 10 5 5 10 -1. + <_> + 10 10 5 5 2. + <_> + + <_> + 13 6 3 4 -1. + <_> + 13 8 3 2 2. + <_> + + <_> + 13 10 5 3 -1. + <_> + 13 11 5 1 3. + <_> + + <_> + 16 12 2 2 -1. + <_> + 16 12 1 1 2. + <_> + 17 13 1 1 2. + <_> + + <_> + 16 3 2 1 -1. + <_> + 17 3 1 1 2. + <_> + + <_> + 5 1 3 5 -1. + <_> + 6 1 1 5 3. + <_> + + <_> + 5 7 8 6 -1. + <_> + 5 9 8 2 3. + <_> + + <_> + 6 10 8 2 -1. + <_> + 6 10 4 1 2. + <_> + 10 11 4 1 2. + <_> + + <_> + 6 9 4 8 -1. + <_> + 6 9 2 4 2. + <_> + 8 13 2 4 2. + <_> + + <_> + 0 7 8 4 -1. + <_> + 4 7 4 4 2. + <_> + + <_> + 14 13 2 6 -1. + <_> + 14 13 1 3 2. + <_> + 15 16 1 3 2. + <_> + + <_> + 12 13 2 1 -1. + <_> + 13 13 1 1 2. + <_> + + <_> + 6 8 2 2 -1. + <_> + 6 9 2 1 2. + <_> + + <_> + 15 12 2 1 -1. + <_> + 16 12 1 1 2. + <_> + + <_> + 0 0 18 14 -1. + <_> + 0 7 18 7 2. + <_> + + <_> + 11 5 3 3 -1. + <_> + 12 5 1 3 3. + <_> + + <_> + 4 7 3 3 -1. + <_> + 5 7 1 3 3. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 5 9 1 8 -1. + <_> + 5 13 1 4 2. + <_> + + <_> + 4 2 3 15 -1. + <_> + 5 2 1 15 3. + <_> + + <_> + 15 0 4 4 -1. + <_> + 17 0 2 4 2. + <_> + + <_> + 10 7 1 3 -1. + <_> + 10 8 1 1 3. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 8 17 3 2 -1. + <_> + 9 17 1 2 3. + <_> + + <_> + 10 6 1 3 -1. + <_> + 10 7 1 1 3. + <_> + + <_> + 6 7 4 4 -1. + <_> + 6 7 2 2 2. + <_> + 8 9 2 2 2. + <_> + + <_> + 8 7 4 4 -1. + <_> + 8 7 2 2 2. + <_> + 10 9 2 2 2. + <_> + + <_> + 15 8 2 7 -1. + <_> + 16 8 1 7 2. + <_> + + <_> + 8 3 3 2 -1. + <_> + 9 3 1 2 3. + <_> + + <_> + 16 17 3 1 -1. + <_> + 17 17 1 1 3. + <_> + + <_> + 3 2 12 14 -1. + <_> + 3 2 6 7 2. + <_> + 9 9 6 7 2. + <_> + + <_> + 16 16 1 2 -1. + <_> + 16 17 1 1 2. + <_> + + <_> + 7 12 2 3 -1. + <_> + 7 13 2 1 3. + <_> + + <_> + 7 13 2 6 -1. + <_> + 8 13 1 6 2. + <_> + + <_> + 8 14 2 6 -1. + <_> + 8 16 2 2 3. + <_> + + <_> + 6 14 4 6 -1. + <_> + 6 16 4 2 3. + <_> + + <_> + 11 12 3 6 -1. + <_> + 12 12 1 6 3. + <_> + + <_> + 0 6 1 12 -1. + <_> + 0 10 1 4 3. + <_> + + <_> + 3 3 2 10 -1. + <_> + 3 3 1 5 2. + <_> + 4 8 1 5 2. + <_> + + <_> + 3 3 2 8 -1. + <_> + 3 3 1 4 2. + <_> + 4 7 1 4 2. + <_> + + <_> + 9 4 1 12 -1. + <_> + 9 10 1 6 2. + <_> + + <_> + 0 5 6 4 -1. + <_> + 3 5 3 4 2. + <_> + + <_> + 9 9 1 4 -1. + <_> + 9 11 1 2 2. + <_> + + <_> + 4 6 6 4 -1. + <_> + 4 6 3 2 2. + <_> + 7 8 3 2 2. + <_> + + <_> + 6 8 2 2 -1. + <_> + 7 8 1 2 2. + <_> + + <_> + 6 4 4 14 -1. + <_> + 8 4 2 14 2. + <_> + + <_> + 6 7 3 3 -1. + <_> + 7 7 1 3 3. + <_> + + <_> + 4 7 6 5 -1. + <_> + 7 7 3 5 2. + <_> + + <_> + 0 4 8 10 -1. + <_> + 4 4 4 10 2. + <_> + + <_> + 0 6 18 14 -1. + <_> + 9 6 9 14 2. + <_> + + <_> + 11 15 3 5 -1. + <_> + 12 15 1 5 3. + <_> + + <_> + 3 18 4 2 -1. + <_> + 3 18 2 1 2. + <_> + 5 19 2 1 2. + <_> + + <_> + 7 10 2 2 -1. + <_> + 7 11 2 1 2. + <_> + + <_> + 10 1 3 10 -1. + <_> + 10 6 3 5 2. + <_> + + <_> + 9 0 8 10 -1. + <_> + 13 0 4 10 2. + <_> + + <_> + 7 2 8 13 -1. + <_> + 11 2 4 13 2. + <_> + + <_> + 3 3 12 7 -1. + <_> + 9 3 6 7 2. + <_> + + <_> + 11 8 3 2 -1. + <_> + 12 8 1 2 3. + <_> + + <_> + 11 7 2 8 -1. + <_> + 11 7 1 4 2. + <_> + 12 11 1 4 2. + <_> + + <_> + 0 6 3 2 -1. + <_> + 0 7 3 1 2. + <_> + + <_> + 6 17 2 3 -1. + <_> + 6 18 2 1 3. + <_> + + <_> + 4 7 2 2 -1. + <_> + 4 7 1 1 2. + <_> + 5 8 1 1 2. + <_> + + <_> + 9 2 10 9 -1. + <_> + 9 5 10 3 3. + <_> + + <_> + 9 0 10 4 -1. + <_> + 9 0 5 2 2. + <_> + 14 2 5 2 2. + <_> + + <_> + 7 5 2 1 -1. + <_> + 8 5 1 1 2. + <_> + + <_> + 7 5 2 1 -1. + <_> + 8 5 1 1 2. + <_> + + <_> + 4 9 3 3 -1. + <_> + 4 10 3 1 3. + <_> + + <_> + 4 10 4 3 -1. + <_> + 4 11 4 1 3. + <_> + + <_> + 6 7 2 3 -1. + <_> + 6 8 2 1 3. + <_> + + <_> + 18 4 1 3 -1. + <_> + 18 5 1 1 3. + <_> + + <_> + 17 0 3 5 -1. + <_> + 18 0 1 5 3. + <_> + + <_> + 11 2 8 3 -1. + <_> + 11 3 8 1 3. + <_> + + <_> + 14 9 6 5 -1. + <_> + 17 9 3 5 2. + <_> + + <_> + 0 7 4 6 -1. + <_> + 0 9 4 2 3. + <_> + + <_> + 12 7 4 12 -1. + <_> + 12 7 2 6 2. + <_> + 14 13 2 6 2. + <_> + + <_> + 8 7 9 3 -1. + <_> + 11 7 3 3 3. + <_> + + <_> + 12 12 2 3 -1. + <_> + 12 13 2 1 3. + <_> + + <_> + 11 0 6 20 -1. + <_> + 14 0 3 20 2. + <_> + + <_> + 4 5 2 6 -1. + <_> + 5 5 1 6 2. + <_> + + <_> + 1 7 6 11 -1. + <_> + 3 7 2 11 3. + <_> + + <_> + 2 15 2 1 -1. + <_> + 3 15 1 1 2. + <_> + + <_> + 5 11 2 6 -1. + <_> + 5 14 2 3 2. + <_> + + <_> + 6 17 2 3 -1. + <_> + 6 18 2 1 3. + <_> + + <_> + 5 8 11 12 -1. + <_> + 5 12 11 4 3. + <_> + + <_> + 16 10 2 2 -1. + <_> + 16 10 1 1 2. + <_> + 17 11 1 1 2. + <_> + + <_> + 15 11 3 1 -1. + <_> + 16 11 1 1 3. + <_> + + <_> + 13 14 1 3 -1. + <_> + 13 15 1 1 3. + <_> + + <_> + 6 14 3 4 -1. + <_> + 6 16 3 2 2. + <_> + + <_> + 6 6 2 14 -1. + <_> + 6 13 2 7 2. + <_> + + <_> + 11 14 2 1 -1. + <_> + 12 14 1 1 2. + <_> + + <_> + 9 13 6 6 -1. + <_> + 9 13 3 3 2. + <_> + 12 16 3 3 2. + <_> + + <_> + 10 17 3 1 -1. + <_> + 11 17 1 1 3. + <_> + + <_> + 9 13 2 6 -1. + <_> + 9 13 1 3 2. + <_> + 10 16 1 3 2. + <_> + + <_> + 11 18 4 2 -1. + <_> + 13 18 2 2 2. + <_> + + <_> + 9 12 3 3 -1. + <_> + 10 12 1 3 3. + <_> + + <_> + 5 6 1 12 -1. + <_> + 5 12 1 6 2. + <_> + + <_> + 2 4 6 6 -1. + <_> + 4 4 2 6 3. + <_> + + <_> + 1 4 9 3 -1. + <_> + 4 4 3 3 3. + <_> + + <_> + 5 10 3 3 -1. + <_> + 5 11 3 1 3. + <_> + + <_> + 8 9 1 3 -1. + <_> + 8 10 1 1 3. + <_> + + <_> + 11 19 6 1 -1. + <_> + 13 19 2 1 3. + <_> + + <_> + 18 4 2 8 -1. + <_> + 18 4 1 4 2. + <_> + 19 8 1 4 2. + <_> + + <_> + 17 5 2 3 -1. + <_> + 17 6 2 1 3. + <_> + + <_> + 12 15 8 4 -1. + <_> + 16 15 4 4 2. + <_> + + <_> + 14 8 4 10 -1. + <_> + 14 13 4 5 2. + <_> + + <_> + 11 0 3 18 -1. + <_> + 11 6 3 6 3. + <_> + + <_> + 8 5 12 6 -1. + <_> + 8 7 12 2 3. + <_> + + <_> + 10 11 4 2 -1. + <_> + 12 11 2 2 2. + <_> + + <_> + 5 7 2 8 -1. + <_> + 6 7 1 8 2. + <_> + + <_> + 6 3 12 12 -1. + <_> + 6 3 6 6 2. + <_> + 12 9 6 6 2. + <_> + + <_> + 6 10 4 2 -1. + <_> + 6 10 2 1 2. + <_> + 8 11 2 1 2. + <_> + + <_> + 0 2 6 10 -1. + <_> + 2 2 2 10 3. + <_> + + <_> + 10 15 3 2 -1. + <_> + 11 15 1 2 3. + <_> + + <_> + 6 8 10 2 -1. + <_> + 6 8 5 1 2. + <_> + 11 9 5 1 2. + <_> + + <_> + 6 12 1 6 -1. + <_> + 6 15 1 3 2. + <_> + + <_> + 9 0 4 1 -1. + <_> + 11 0 2 1 2. + <_> + + <_> + 8 5 2 3 -1. + <_> + 8 6 2 1 3. + <_> + + <_> + 7 4 2 1 -1. + <_> + 8 4 1 1 2. + <_> + + <_> + 2 11 3 1 -1. + <_> + 3 11 1 1 3. + <_> + + <_> + 1 10 3 3 -1. + <_> + 2 10 1 3 3. + <_> + + <_> + 12 0 8 2 -1. + <_> + 12 0 4 1 2. + <_> + 16 1 4 1 2. + <_> + + <_> + 6 6 6 8 -1. + <_> + 9 6 3 8 2. + <_> + + <_> + 6 10 1 3 -1. + <_> + 6 11 1 1 3. + <_> + + <_> + 8 12 7 2 -1. + <_> + 8 13 7 1 2. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 6 6 2 12 -1. + <_> + 6 12 2 6 2. + <_> + + <_> + 6 12 2 3 -1. + <_> + 6 13 2 1 3. + <_> + + <_> + 12 12 1 3 -1. + <_> + 12 13 1 1 3. + <_> + + <_> + 8 9 1 2 -1. + <_> + 8 10 1 1 2. + <_> + + <_> + 7 11 4 6 -1. + <_> + 7 11 2 3 2. + <_> + 9 14 2 3 2. + <_> + + <_> + 10 10 4 3 -1. + <_> + 10 11 4 1 3. + <_> + + <_> + 12 10 2 3 -1. + <_> + 12 11 2 1 3. + <_> + + <_> + 6 13 2 3 -1. + <_> + 6 14 2 1 3. + <_> + + <_> + 7 14 1 3 -1. + <_> + 7 15 1 1 3. + <_> + + <_> + 6 10 8 6 -1. + <_> + 6 12 8 2 3. + <_> + + <_> + 5 8 6 12 -1. + <_> + 5 12 6 4 3. + <_> + + <_> + 1 14 2 1 -1. + <_> + 2 14 1 1 2. + <_> + + <_> + 8 6 2 3 -1. + <_> + 8 7 2 1 3. + <_> + + <_> + 4 6 8 4 -1. + <_> + 4 6 4 2 2. + <_> + 8 8 4 2 2. + <_> + + <_> + 0 14 3 1 -1. + <_> + 1 14 1 1 3. + <_> + + <_> + 4 1 2 2 -1. + <_> + 4 1 1 1 2. + <_> + 5 2 1 1 2. + <_> + + <_> + 14 10 1 6 -1. + <_> + 14 13 1 3 2. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 5 10 3 3 -1. + <_> + 5 11 3 1 3. + <_> + + <_> + 2 3 12 4 -1. + <_> + 2 3 6 2 2. + <_> + 8 5 6 2 2. + <_> + + <_> + 10 15 3 2 -1. + <_> + 11 15 1 2 3. + <_> + + <_> + 12 14 8 1 -1. + <_> + 16 14 4 1 2. + <_> + + <_> + 11 0 8 13 -1. + <_> + 15 0 4 13 2. + <_> + + <_> + 12 12 2 8 -1. + <_> + 12 12 1 4 2. + <_> + 13 16 1 4 2. + <_> + + <_> + 4 7 8 12 -1. + <_> + 4 13 8 6 2. + <_> + + <_> + 10 9 2 4 -1. + <_> + 10 11 2 2 2. + <_> + + <_> + 4 4 3 1 -1. + <_> + 5 4 1 1 3. + <_> + + <_> + 18 5 1 3 -1. + <_> + 18 6 1 1 3. + <_> + + <_> + 6 9 9 1 -1. + <_> + 9 9 3 1 3. + <_> + + <_> + 12 5 4 6 -1. + <_> + 12 7 4 2 3. + <_> + + <_> + 16 0 4 4 -1. + <_> + 18 0 2 4 2. + <_> + + <_> + 3 10 2 2 -1. + <_> + 3 10 1 1 2. + <_> + 4 11 1 1 2. + <_> + + <_> + 0 0 2 1 -1. + <_> + 1 0 1 1 2. + <_> + + <_> + 17 4 2 8 -1. + <_> + 17 4 1 4 2. + <_> + 18 8 1 4 2. + <_> + + <_> + 7 15 1 3 -1. + <_> + 7 16 1 1 3. + <_> + + <_> + 0 15 2 1 -1. + <_> + 1 15 1 1 2. + <_> + + <_> + 7 10 2 4 -1. + <_> + 7 12 2 2 2. + <_> + + <_> + 4 19 3 1 -1. + <_> + 5 19 1 1 3. + <_> + + <_> + 2 14 4 5 -1. + <_> + 4 14 2 5 2. + <_> + + <_> + 4 11 4 4 -1. + <_> + 4 11 2 2 2. + <_> + 6 13 2 2 2. + <_> + + <_> + 4 13 2 6 -1. + <_> + 4 13 1 3 2. + <_> + 5 16 1 3 2. + <_> + + <_> + 7 3 3 4 -1. + <_> + 8 3 1 4 3. + <_> + + <_> + 17 11 3 2 -1. + <_> + 18 11 1 2 3. + <_> + + <_> + 10 4 6 2 -1. + <_> + 10 5 6 1 2. + <_> + + <_> + 12 3 6 3 -1. + <_> + 12 4 6 1 3. + <_> + + <_> + 17 12 2 2 -1. + <_> + 17 12 1 1 2. + <_> + 18 13 1 1 2. + <_> + + <_> + 5 12 15 8 -1. + <_> + 10 12 5 8 3. + <_> + + <_> + 4 18 2 2 -1. + <_> + 4 18 1 1 2. + <_> + 5 19 1 1 2. + <_> + + <_> + 0 15 2 2 -1. + <_> + 0 15 1 1 2. + <_> + 1 16 1 1 2. + <_> + + <_> + 5 9 1 6 -1. + <_> + 5 12 1 3 2. + <_> + + <_> + 1 0 18 14 -1. + <_> + 1 7 18 7 2. + <_> + + <_> + 6 2 7 6 -1. + <_> + 6 5 7 3 2. + <_> + + <_> + 6 16 2 1 -1. + <_> + 7 16 1 1 2. + <_> + + <_> + 4 11 16 9 -1. + <_> + 4 14 16 3 3. + <_> + + <_> + 16 9 2 2 -1. + <_> + 17 9 1 2 2. + <_> + + <_> + 6 8 2 2 -1. + <_> + 7 8 1 2 2. + <_> + + <_> + 0 14 12 3 -1. + <_> + 6 14 6 3 2. + <_> + + <_> + 7 6 3 10 -1. + <_> + 7 11 3 5 2. + <_> + + <_> + 10 11 1 2 -1. + <_> + 10 12 1 1 2. + <_> + + <_> + 5 17 2 2 -1. + <_> + 6 17 1 2 2. + <_> + + <_> + 2 0 18 18 -1. + <_> + 11 0 9 18 2. + <_> + + <_> + 12 11 6 3 -1. + <_> + 14 11 2 3 3. + <_> + + <_> + 12 12 6 1 -1. + <_> + 14 12 2 1 3. + <_> + + <_> + 15 10 2 2 -1. + <_> + 15 10 1 1 2. + <_> + 16 11 1 1 2. + <_> + + <_> + 3 11 3 8 -1. + <_> + 4 11 1 8 3. + <_> + + <_> + 6 1 4 12 -1. + <_> + 8 1 2 12 2. + <_> + + <_> + 6 3 4 8 -1. + <_> + 8 3 2 8 2. + <_> + + <_> + 8 4 6 12 -1. + <_> + 11 4 3 12 2. + <_> + + <_> + 16 12 4 5 -1. + <_> + 18 12 2 5 2. + <_> + + <_> + 14 9 2 3 -1. + <_> + 15 9 1 3 2. + <_> + + <_> + 9 7 10 6 -1. + <_> + 14 7 5 6 2. + <_> + + <_> + 12 7 3 11 -1. + <_> + 13 7 1 11 3. + <_> + + <_> + 19 16 1 2 -1. + <_> + 19 17 1 1 2. + <_> + + <_> + 8 15 12 1 -1. + <_> + 14 15 6 1 2. + <_> + + <_> + 10 15 6 3 -1. + <_> + 10 16 6 1 3. + <_> + + <_> + 6 8 10 4 -1. + <_> + 6 8 5 2 2. + <_> + 11 10 5 2 2. + <_> + + <_> + 10 15 1 3 -1. + <_> + 10 16 1 1 3. + <_> + + <_> + 10 1 9 12 -1. + <_> + 10 7 9 6 2. + <_> + + <_> + 10 1 1 4 -1. + <_> + 10 3 1 2 2. + <_> + + <_> + 1 5 18 4 -1. + <_> + 1 7 18 2 2. + <_> + + <_> + 6 4 12 6 -1. + <_> + 12 4 6 6 2. + <_> + + <_> + 13 1 7 3 -1. + <_> + 13 2 7 1 3. + <_> + + <_> + 14 0 6 4 -1. + <_> + 14 0 3 2 2. + <_> + 17 2 3 2 2. + <_> + + <_> + 9 12 3 3 -1. + <_> + 9 13 3 1 3. + <_> + + <_> + 5 14 8 4 -1. + <_> + 5 14 4 2 2. + <_> + 9 16 4 2 2. + <_> + + <_> + 1 6 14 14 -1. + <_> + 8 6 7 14 2. + <_> + + <_> + 13 4 6 2 -1. + <_> + 13 4 3 1 2. + <_> + 16 5 3 1 2. + <_> + + <_> + 8 7 6 6 -1. + <_> + 8 9 6 2 3. + <_> + + <_> + 8 0 12 20 -1. + <_> + 8 10 12 10 2. + <_> + + <_> + 9 8 4 3 -1. + <_> + 9 9 4 1 3. + <_> + + <_> + 10 18 8 2 -1. + <_> + 10 19 8 1 2. + <_> + + <_> + 9 12 4 2 -1. + <_> + 9 12 2 1 2. + <_> + 11 13 2 1 2. + <_> + + <_> + 4 14 2 2 -1. + <_> + 4 14 1 1 2. + <_> + 5 15 1 1 2. + <_> + + <_> + 5 14 3 2 -1. + <_> + 5 15 3 1 2. + <_> + + <_> + 11 1 6 3 -1. + <_> + 13 1 2 3 3. + <_> + + <_> + 6 14 2 3 -1. + <_> + 6 15 2 1 3. + <_> + + <_> + 14 1 2 2 -1. + <_> + 15 1 1 2 2. + <_> + + <_> + 0 13 6 7 -1. + <_> + 3 13 3 7 2. + <_> + + <_> + 17 11 3 1 -1. + <_> + 18 11 1 1 3. + <_> + + <_> + 5 10 8 4 -1. + <_> + 9 10 4 4 2. + <_> + + <_> + 7 16 8 4 -1. + <_> + 7 16 4 2 2. + <_> + 11 18 4 2 2. + <_> + + <_> + 11 16 4 3 -1. + <_> + 11 17 4 1 3. + <_> + + <_> + 3 10 6 2 -1. + <_> + 3 10 3 1 2. + <_> + 6 11 3 1 2. + <_> + + <_> + 11 7 3 2 -1. + <_> + 12 7 1 2 3. + <_> + + <_> + 8 7 9 2 -1. + <_> + 11 7 3 2 3. + <_> + + <_> + 13 6 3 10 -1. + <_> + 14 6 1 10 3. + <_> + + <_> + 15 10 4 3 -1. + <_> + 17 10 2 3 2. + <_> + + <_> + 1 10 6 10 -1. + <_> + 3 10 2 10 3. + <_> + + <_> + 5 0 2 2 -1. + <_> + 5 0 1 1 2. + <_> + 6 1 1 1 2. + <_> + + <_> + 3 11 3 6 -1. + <_> + 3 13 3 2 3. + <_> + + <_> + 4 6 9 10 -1. + <_> + 7 6 3 10 3. + <_> + + <_> + 6 10 9 5 -1. + <_> + 9 10 3 5 3. + <_> + + <_> + 10 5 3 9 -1. + <_> + 11 5 1 9 3. + <_> + + <_> + 3 7 3 4 -1. + <_> + 4 7 1 4 3. + <_> + + <_> + 4 6 2 2 -1. + <_> + 4 6 1 1 2. + <_> + 5 7 1 1 2. + <_> + + <_> + 0 2 2 3 -1. + <_> + 0 3 2 1 3. + <_> + + <_> + 12 0 8 4 -1. + <_> + 12 0 4 2 2. + <_> + 16 2 4 2 2. + <_> + + <_> + 11 1 8 2 -1. + <_> + 11 1 4 1 2. + <_> + 15 2 4 1 2. + <_> + + <_> + 12 2 7 3 -1. + <_> + 12 3 7 1 3. + <_> + + <_> + 3 6 3 2 -1. + <_> + 4 6 1 2 3. + <_> + + <_> + 4 6 4 6 -1. + <_> + 4 9 4 3 2. + <_> + + <_> + 13 12 6 4 -1. + <_> + 13 12 3 2 2. + <_> + 16 14 3 2 2. + <_> + + <_> + 13 10 2 4 -1. + <_> + 13 12 2 2 2. + <_> + + <_> + 15 12 3 3 -1. + <_> + 15 13 3 1 3. + <_> + + <_> + 14 14 2 3 -1. + <_> + 14 15 2 1 3. + <_> + + <_> + 18 4 2 8 -1. + <_> + 18 4 1 4 2. + <_> + 19 8 1 4 2. + <_> + + <_> + 7 14 2 4 -1. + <_> + 7 14 1 2 2. + <_> + 8 16 1 2 2. + <_> + + <_> + 14 3 6 6 -1. + <_> + 14 5 6 2 3. + <_> + + <_> + 19 7 1 2 -1. + <_> + 19 8 1 1 2. + <_> + + <_> + 8 8 6 2 -1. + <_> + 8 8 3 1 2. + <_> + 11 9 3 1 2. + <_> + + <_> + 19 6 1 3 -1. + <_> + 19 7 1 1 3. + <_> + + <_> + 7 8 7 3 -1. + <_> + 7 9 7 1 3. + <_> + + <_> + 18 6 2 6 -1. + <_> + 18 6 1 3 2. + <_> + 19 9 1 3 2. + <_> + + <_> + 5 8 8 6 -1. + <_> + 5 10 8 2 3. + <_> + + <_> + 1 1 18 15 -1. + <_> + 10 1 9 15 2. + <_> + + <_> + 11 7 5 4 -1. + <_> + 11 9 5 2 2. + <_> + + <_> + 10 12 2 3 -1. + <_> + 11 12 1 3 2. + <_> + + <_> + 0 7 2 4 -1. + <_> + 0 9 2 2 2. + <_> + + <_> + 6 12 4 2 -1. + <_> + 6 12 2 1 2. + <_> + 8 13 2 1 2. + <_> + + <_> + 7 7 6 8 -1. + <_> + 7 11 6 4 2. + <_> + + <_> + 9 9 2 4 -1. + <_> + 9 11 2 2 2. + <_> + + <_> + 9 10 6 6 -1. + <_> + 9 12 6 2 3. + <_> + + <_> + 12 13 4 2 -1. + <_> + 12 14 4 1 2. + <_> + + <_> + 0 4 8 1 -1. + <_> + 4 4 4 1 2. + <_> + + <_> + 14 13 1 2 -1. + <_> + 14 14 1 1 2. + <_> + + <_> + 8 7 2 6 -1. + <_> + 8 7 1 3 2. + <_> + 9 10 1 3 2. + <_> + + <_> + 5 8 10 6 -1. + <_> + 5 8 5 3 2. + <_> + 10 11 5 3 2. + <_> + + <_> + 5 12 3 3 -1. + <_> + 5 13 3 1 3. + <_> + + <_> + 5 10 2 2 -1. + <_> + 5 11 2 1 2. + <_> + + <_> + 6 2 4 15 -1. + <_> + 6 7 4 5 3. + <_> + + <_> + 7 6 2 4 -1. + <_> + 7 6 1 2 2. + <_> + 8 8 1 2 2. + <_> + + <_> + 5 9 2 3 -1. + <_> + 5 10 2 1 3. + <_> + + <_> + 15 16 2 2 -1. + <_> + 15 16 1 1 2. + <_> + 16 17 1 1 2. + <_> + + <_> + 4 11 4 6 -1. + <_> + 4 13 4 2 3. + <_> + + <_> + 5 0 3 6 -1. + <_> + 6 0 1 6 3. + <_> + + <_> + 4 11 12 4 -1. + <_> + 4 11 6 2 2. + <_> + 10 13 6 2 2. + <_> + + <_> + 7 13 3 3 -1. + <_> + 7 14 3 1 3. + <_> + + <_> + 9 12 6 2 -1. + <_> + 9 13 6 1 2. + <_> + + <_> + 8 0 12 8 -1. + <_> + 8 0 6 4 2. + <_> + 14 4 6 4 2. + <_> + + <_> + 10 8 4 4 -1. + <_> + 10 8 2 2 2. + <_> + 12 10 2 2 2. + <_> + + <_> + 12 10 1 6 -1. + <_> + 12 13 1 3 2. + <_> + + <_> + 5 5 3 10 -1. + <_> + 6 5 1 10 3. + <_> + + <_> + 4 0 14 6 -1. + <_> + 11 0 7 6 2. + <_> + + <_> + 9 7 2 6 -1. + <_> + 9 7 1 3 2. + <_> + 10 10 1 3 2. + <_> + + <_> + 8 4 3 1 -1. + <_> + 9 4 1 1 3. + <_> + + <_> + 11 14 2 2 -1. + <_> + 11 15 2 1 2. + <_> + + <_> + 9 18 6 2 -1. + <_> + 12 18 3 2 2. + <_> + + <_> + 8 12 8 6 -1. + <_> + 8 15 8 3 2. + <_> + + <_> + 7 0 8 6 -1. + <_> + 7 2 8 2 3. + <_> + + <_> + 1 2 12 3 -1. + <_> + 5 2 4 3 3. + <_> + + <_> + 5 4 10 12 -1. + <_> + 5 4 5 6 2. + <_> + 10 10 5 6 2. + <_> + + <_> + 5 8 3 2 -1. + <_> + 5 9 3 1 2. + <_> + + <_> + 7 12 1 3 -1. + <_> + 7 13 1 1 3. + <_> + + <_> + 5 11 3 3 -1. + <_> + 5 12 3 1 3. + <_> + + <_> + 8 10 6 9 -1. + <_> + 8 13 6 3 3. + <_> + + <_> + 7 8 3 6 -1. + <_> + 7 10 3 2 3. + <_> + + <_> + 3 4 3 14 -1. + <_> + 4 4 1 14 3. + <_> + + <_> + 3 10 3 6 -1. + <_> + 4 10 1 6 3. + <_> + + <_> + 4 8 2 2 -1. + <_> + 4 8 1 1 2. + <_> + 5 9 1 1 2. + <_> + + <_> + 10 13 2 3 -1. + <_> + 10 14 2 1 3. + <_> + + <_> + 6 14 8 4 -1. + <_> + 6 14 4 2 2. + <_> + 10 16 4 2 2. + <_> + + <_> + 5 12 3 4 -1. + <_> + 6 12 1 4 3. + <_> + + <_> + 17 11 2 2 -1. + <_> + 17 11 1 1 2. + <_> + 18 12 1 1 2. + <_> + + <_> + 15 6 1 10 -1. + <_> + 15 11 1 5 2. + <_> + + <_> + 7 1 12 6 -1. + <_> + 7 3 12 2 3. + <_> + + <_> + 4 9 2 4 -1. + <_> + 4 9 1 2 2. + <_> + 5 11 1 2 2. + <_> + + <_> + 6 7 6 12 -1. + <_> + 9 7 3 12 2. + <_> + + <_> + 7 6 2 3 -1. + <_> + 8 6 1 3 2. + <_> + + <_> + 0 1 1 3 -1. + <_> + 0 2 1 1 3. + <_> + + <_> + 0 1 1 3 -1. + <_> + 0 2 1 1 3. + <_> + + <_> + 11 15 3 5 -1. + <_> + 12 15 1 5 3. + <_> + + <_> + 8 6 4 6 -1. + <_> + 8 8 4 2 3. + <_> + + <_> + 5 3 3 12 -1. + <_> + 5 7 3 4 3. + <_> + + <_> + 7 9 2 2 -1. + <_> + 7 9 1 1 2. + <_> + 8 10 1 1 2. + <_> + + <_> + 4 4 2 12 -1. + <_> + 4 8 2 4 3. + <_> + + <_> + 4 5 7 3 -1. + <_> + 4 6 7 1 3. + <_> + + <_> + 13 5 2 3 -1. + <_> + 13 6 2 1 3. + <_> + + <_> + 4 0 2 2 -1. + <_> + 4 0 1 1 2. + <_> + 5 1 1 1 2. + <_> + + <_> + 11 8 3 11 -1. + <_> + 12 8 1 11 3. + <_> + + <_> + 4 0 2 2 -1. + <_> + 4 0 1 1 2. + <_> + 5 1 1 1 2. + <_> + + <_> + 9 3 2 2 -1. + <_> + 9 3 1 1 2. + <_> + 10 4 1 1 2. + <_> + + <_> + 7 11 3 2 -1. + <_> + 8 11 1 2 3. + <_> + + <_> + 11 12 2 1 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 8 8 4 2 -1. + <_> + 10 8 2 2 2. + <_> + + <_> + 17 15 3 1 -1. + <_> + 18 15 1 1 3. + <_> + + <_> + 12 6 2 4 -1. + <_> + 12 6 1 2 2. + <_> + 13 8 1 2 2. + <_> + + <_> + 8 3 9 11 -1. + <_> + 11 3 3 11 3. + <_> + + <_> + 10 8 2 2 -1. + <_> + 11 8 1 2 2. + <_> + + <_> + 12 5 3 9 -1. + <_> + 12 8 3 3 3. + <_> + + <_> + 13 0 6 17 -1. + <_> + 15 0 2 17 3. + <_> + + <_> + 6 6 3 4 -1. + <_> + 7 6 1 4 3. + <_> + + <_> + 5 6 4 7 -1. + <_> + 7 6 2 7 2. + <_> + + <_> + 7 5 3 2 -1. + <_> + 8 5 1 2 3. + <_> + + <_> + 7 15 6 2 -1. + <_> + 7 15 3 1 2. + <_> + 10 16 3 1 2. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 2 12 6 7 -1. + <_> + 4 12 2 7 3. + <_> + + <_> + 11 17 5 3 -1. + <_> + 11 18 5 1 3. + <_> + + <_> + 17 11 2 2 -1. + <_> + 17 11 1 1 2. + <_> + 18 12 1 1 2. + <_> + + <_> + 10 17 6 3 -1. + <_> + 10 18 6 1 3. + <_> + + <_> + 2 15 1 2 -1. + <_> + 2 16 1 1 2. + <_> + + <_> + 8 6 3 3 -1. + <_> + 8 7 3 1 3. + <_> + + <_> + 7 7 1 2 -1. + <_> + 7 8 1 1 2. + <_> + + <_> + 2 15 2 2 -1. + <_> + 2 16 2 1 2. + <_> + + <_> + 3 16 3 1 -1. + <_> + 4 16 1 1 3. + <_> + + <_> + 3 0 3 20 -1. + <_> + 4 0 1 20 3. + <_> + + <_> + 8 2 12 12 -1. + <_> + 14 2 6 12 2. + <_> + + <_> + 5 3 2 3 -1. + <_> + 5 4 2 1 3. + <_> + + <_> + 3 4 2 2 -1. + <_> + 3 4 1 1 2. + <_> + 4 5 1 1 2. + <_> + + <_> + 0 15 20 3 -1. + <_> + 10 15 10 3 2. + <_> + + <_> + 6 13 2 4 -1. + <_> + 6 13 1 2 2. + <_> + 7 15 1 2 2. + <_> + + <_> + 12 8 3 7 -1. + <_> + 13 8 1 7 3. + <_> + + <_> + 8 9 6 10 -1. + <_> + 8 9 3 5 2. + <_> + 11 14 3 5 2. + <_> + + <_> + 2 10 16 2 -1. + <_> + 10 10 8 2 2. + <_> + + <_> + 5 3 15 6 -1. + <_> + 10 3 5 6 3. + <_> + + <_> + 10 14 2 1 -1. + <_> + 11 14 1 1 2. + <_> + + <_> + 9 11 4 4 -1. + <_> + 11 11 2 4 2. + <_> + + <_> + 12 8 2 4 -1. + <_> + 12 10 2 2 2. + <_> + + <_> + 1 3 10 14 -1. + <_> + 1 3 5 7 2. + <_> + 6 10 5 7 2. + <_> + + <_> + 8 0 3 4 -1. + <_> + 8 2 3 2 2. + <_> + + <_> + 10 2 2 1 -1. + <_> + 11 2 1 1 2. + <_> + + <_> + 5 12 5 3 -1. + <_> + 5 13 5 1 3. + <_> + + <_> + 7 12 1 3 -1. + <_> + 7 13 1 1 3. + <_> + + <_> + 10 12 6 3 -1. + <_> + 10 13 6 1 3. + <_> + + <_> + 6 4 1 3 -1. + <_> + 6 5 1 1 3. + <_> + + <_> + 2 0 18 3 -1. + <_> + 2 1 18 1 3. + <_> + + <_> + 8 8 11 6 -1. + <_> + 8 10 11 2 3. + <_> + + <_> + 2 6 10 8 -1. + <_> + 2 6 5 4 2. + <_> + 7 10 5 4 2. + <_> + + <_> + 9 2 6 2 -1. + <_> + 11 2 2 2 3. + <_> + + <_> + 13 9 6 3 -1. + <_> + 15 9 2 3 3. + <_> + + <_> + 5 3 1 2 -1. + <_> + 5 4 1 1 2. + <_> + + <_> + 1 7 3 1 -1. + <_> + 2 7 1 1 3. + <_> + + <_> + 0 6 8 6 -1. + <_> + 4 6 4 6 2. + <_> + + <_> + 11 9 1 2 -1. + <_> + 11 10 1 1 2. + <_> + + <_> + 12 13 1 2 -1. + <_> + 12 14 1 1 2. + <_> + + <_> + 10 15 10 4 -1. + <_> + 10 15 5 2 2. + <_> + 15 17 5 2 2. + <_> + + <_> + 12 11 1 2 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 6 11 2 1 -1. + <_> + 7 11 1 1 2. + <_> + + <_> + 11 3 3 2 -1. + <_> + 12 3 1 2 3. + <_> + + <_> + 4 7 6 5 -1. + <_> + 7 7 3 5 2. + <_> + + <_> + 3 16 3 1 -1. + <_> + 4 16 1 1 3. + <_> + + <_> + 4 7 6 5 -1. + <_> + 7 7 3 5 2. + <_> + + <_> + 5 7 6 3 -1. + <_> + 7 7 2 3 3. + <_> + + <_> + 7 8 4 8 -1. + <_> + 7 8 2 4 2. + <_> + 9 12 2 4 2. + <_> + + <_> + 4 2 14 12 -1. + <_> + 4 6 14 4 3. + <_> + + <_> + 4 14 2 6 -1. + <_> + 4 14 1 3 2. + <_> + 5 17 1 3 2. + <_> + + <_> + 7 11 2 4 -1. + <_> + 7 13 2 2 2. + <_> + + <_> + 6 4 10 15 -1. + <_> + 6 9 10 5 3. + <_> + + <_> + 6 11 12 6 -1. + <_> + 6 13 12 2 3. + <_> + + <_> + 6 17 4 3 -1. + <_> + 6 18 4 1 3. + <_> + + <_> + 6 17 4 3 -1. + <_> + 6 18 4 1 3. + <_> + + <_> + 9 13 3 7 -1. + <_> + 10 13 1 7 3. + <_> + + <_> + 2 8 5 2 -1. + <_> + 2 9 5 1 2. + <_> + + <_> + 14 1 3 8 -1. + <_> + 15 1 1 8 3. + <_> + + <_> + 2 12 1 2 -1. + <_> + 2 13 1 1 2. + <_> + + <_> + 8 6 2 2 -1. + <_> + 8 6 1 1 2. + <_> + 9 7 1 1 2. + <_> + + <_> + 4 3 10 12 -1. + <_> + 4 9 10 6 2. + <_> + + <_> + 5 9 8 4 -1. + <_> + 5 9 4 2 2. + <_> + 9 11 4 2 2. + <_> + + <_> + 9 9 4 4 -1. + <_> + 9 11 4 2 2. + <_> + + <_> + 5 10 4 2 -1. + <_> + 5 11 4 1 2. + <_> + + <_> + 6 17 2 1 -1. + <_> + 7 17 1 1 2. + <_> + + <_> + 12 12 2 1 -1. + <_> + 13 12 1 1 2. + <_> + + <_> + 11 6 4 8 -1. + <_> + 13 6 2 8 2. + <_> + + <_> + 9 4 3 10 -1. + <_> + 10 4 1 10 3. + <_> + + <_> + 0 18 9 2 -1. + <_> + 3 18 3 2 3. + <_> + + <_> + 15 13 3 3 -1. + <_> + 15 14 3 1 3. + <_> + + <_> + 9 12 2 2 -1. + <_> + 9 12 1 1 2. + <_> + 10 13 1 1 2. + <_> + + <_> + 13 12 7 3 -1. + <_> + 13 13 7 1 3. + <_> + + <_> + 14 10 6 2 -1. + <_> + 14 11 6 1 2. + <_> + + <_> + 14 5 5 14 -1. + <_> + 14 12 5 7 2. + <_> + + <_> + 4 16 5 3 -1. + <_> + 4 17 5 1 3. + <_> + + <_> + 5 16 5 3 -1. + <_> + 5 17 5 1 3. + <_> + + <_> + 8 14 4 5 -1. + <_> + 10 14 2 5 2. + <_> + + <_> + 9 14 2 1 -1. + <_> + 10 14 1 1 2. + <_> + + <_> + 6 10 6 2 -1. + <_> + 6 10 3 1 2. + <_> + 9 11 3 1 2. + <_> + + <_> + 5 8 6 6 -1. + <_> + 8 8 3 6 2. + <_> + + <_> + 10 13 7 6 -1. + <_> + 10 15 7 2 3. + <_> + + <_> + 4 1 2 8 -1. + <_> + 4 1 1 4 2. + <_> + 5 5 1 4 2. + <_> + + <_> + 3 6 6 4 -1. + <_> + 3 6 3 2 2. + <_> + 6 8 3 2 2. + <_> + + <_> + 15 2 3 13 -1. + <_> + 16 2 1 13 3. + <_> + + <_> + 16 10 2 6 -1. + <_> + 16 10 1 3 2. + <_> + 17 13 1 3 2. + <_> + + <_> + 13 19 2 1 -1. + <_> + 14 19 1 1 2. + <_> + + <_> + 7 11 2 1 -1. + <_> + 8 11 1 1 2. + <_> + + <_> + 4 10 3 4 -1. + <_> + 5 10 1 4 3. + <_> + + <_> + 4 7 2 4 -1. + <_> + 4 7 1 2 2. + <_> + 5 9 1 2 2. + <_> + + <_> + 10 7 5 4 -1. + <_> + 10 9 5 2 2. + <_> + + <_> + 7 4 8 16 -1. + <_> + 7 4 4 8 2. + <_> + 11 12 4 8 2. + <_> + + <_> + 5 9 10 6 -1. + <_> + 5 9 5 3 2. + <_> + 10 12 5 3 2. + <_> + + <_> + 5 11 3 2 -1. + <_> + 5 12 3 1 2. + <_> + + <_> + 12 12 4 8 -1. + <_> + 12 16 4 4 2. + <_> + + <_> + 8 13 6 2 -1. + <_> + 8 14 6 1 2. + <_> + + <_> + 3 12 5 6 -1. + <_> + 3 14 5 2 3. + <_> + + <_> + 16 0 2 2 -1. + <_> + 16 0 1 1 2. + <_> + 17 1 1 1 2. + <_> + + <_> + 13 3 3 4 -1. + <_> + 14 3 1 4 3. + <_> + + <_> + 15 11 3 1 -1. + <_> + 16 11 1 1 3. + <_> + + <_> + 14 0 6 5 -1. + <_> + 16 0 2 5 3. + <_> + + <_> + 10 1 8 18 -1. + <_> + 10 10 8 9 2. + <_> + + <_> + 11 5 3 2 -1. + <_> + 11 6 3 1 2. + <_> + + <_> + 5 5 2 1 -1. + <_> + 6 5 1 1 2. + <_> + + <_> + 3 4 3 3 -1. + <_> + 4 4 1 3 3. + <_> + + <_> + 11 14 1 3 -1. + <_> + 11 15 1 1 3. + <_> + + <_> + 16 13 3 3 -1. + <_> + 16 14 3 1 3. + <_> + + <_> + 15 8 5 12 -1. + <_> + 15 14 5 6 2. + <_> + + <_> + 3 0 3 10 -1. + <_> + 4 0 1 10 3. + <_> + + <_> + 15 15 1 2 -1. + <_> + 15 16 1 1 2. + <_> + + <_> + 15 0 4 2 -1. + <_> + 15 0 2 1 2. + <_> + 17 1 2 1 2. + <_> + + <_> + 17 2 2 1 -1. + <_> + 18 2 1 1 2. + <_> + + <_> + 8 13 1 3 -1. + <_> + 8 14 1 1 3. + <_> + + <_> + 9 1 2 6 -1. + <_> + 9 1 1 3 2. + <_> + 10 4 1 3 2. + <_> + + <_> + 1 12 9 3 -1. + <_> + 1 13 9 1 3. + <_> + + <_> + 12 14 3 3 -1. + <_> + 12 15 3 1 3. + <_> + + <_> + 15 10 3 1 -1. + <_> + 16 10 1 1 3. + <_> + + <_> + 9 6 9 1 -1. + <_> + 12 6 3 1 3. + <_> + + <_> + 12 5 3 7 -1. + <_> + 13 5 1 7 3. + <_> + + <_> + 8 2 2 2 -1. + <_> + 8 3 2 1 2. + <_> + + <_> + 7 0 9 2 -1. + <_> + 7 1 9 1 2. + <_> + + <_> + 13 5 2 5 -1. + <_> + 14 5 1 5 2. + <_> + + <_> + 14 2 3 6 -1. + <_> + 15 2 1 6 3. + <_> + + <_> + 8 6 4 3 -1. + <_> + 8 7 4 1 3. + <_> + + <_> + 6 8 1 9 -1. + <_> + 6 11 1 3 3. + <_> + + <_> + 3 9 7 6 -1. + <_> + 3 11 7 2 3. + <_> + + <_> + 6 6 2 3 -1. + <_> + 6 7 2 1 3. + <_> + + <_> + 5 9 3 1 -1. + <_> + 6 9 1 1 3. + <_> + + <_> + 4 5 4 4 -1. + <_> + 4 5 2 2 2. + <_> + 6 7 2 2 2. + <_> + + <_> + 8 5 2 3 -1. + <_> + 8 6 2 1 3. + <_> + + <_> + 5 6 4 7 -1. + <_> + 7 6 2 7 2. + <_> + + <_> + 10 8 3 5 -1. + <_> + 11 8 1 5 3. + <_> + + <_> + 11 4 3 13 -1. + <_> + 12 4 1 13 3. + <_> + + <_> + 2 13 3 3 -1. + <_> + 3 13 1 3 3. + <_> + + <_> + 4 8 3 2 -1. + <_> + 5 8 1 2 3. + <_> + + <_> + 0 4 1 3 -1. + <_> + 0 5 1 1 3. + <_> + + <_> + 7 6 6 6 -1. + <_> + 9 6 2 6 3. + <_> + + <_> + 7 7 4 12 -1. + <_> + 9 7 2 12 2. + <_> + + <_> + 6 12 6 3 -1. + <_> + 9 12 3 3 2. + <_> + + <_> + 8 6 9 12 -1. + <_> + 8 10 9 4 3. + <_> + + <_> + 11 0 3 15 -1. + <_> + 11 5 3 5 3. + <_> + + <_> + 8 16 6 4 -1. + <_> + 8 16 3 2 2. + <_> + 11 18 3 2 2. + <_> + + <_> + 6 5 10 6 -1. + <_> + 6 7 10 2 3. + <_> + + <_> + 2 12 3 4 -1. + <_> + 3 12 1 4 3. + <_> + + <_> + 9 13 4 3 -1. + <_> + 9 14 4 1 3. + <_> + + <_> + 3 0 4 6 -1. + <_> + 3 0 2 3 2. + <_> + 5 3 2 3 2. + <_> + + <_> + 5 9 6 1 -1. + <_> + 8 9 3 1 2. + <_> + + <_> + 11 14 2 3 -1. + <_> + 11 15 2 1 3. + <_> + + <_> + 5 8 2 1 -1. + <_> + 6 8 1 1 2. + <_> + + <_> + 17 0 3 12 -1. + <_> + 17 4 3 4 3. + <_> + + <_> + 10 13 3 6 -1. + <_> + 11 13 1 6 3. + <_> + + <_> + 10 13 3 7 -1. + <_> + 11 13 1 7 3. + <_> + + <_> + 6 5 6 1 -1. + <_> + 8 5 2 1 3. + <_> + + <_> + 18 2 2 8 -1. + <_> + 19 2 1 8 2. + <_> + + <_> + 5 8 3 1 -1. + <_> + 6 8 1 1 3. + <_> + + <_> + 8 7 4 6 -1. + <_> + 8 7 2 3 2. + <_> + 10 10 2 3 2. + <_> + + <_> + 8 3 2 2 -1. + <_> + 8 3 1 1 2. + <_> + 9 4 1 1 2. + <_> + + <_> + 18 5 2 3 -1. + <_> + 18 6 2 1 3. + <_> + + <_> + 17 7 3 4 -1. + <_> + 18 7 1 4 3. + <_> + + <_> + 8 2 2 4 -1. + <_> + 8 2 1 2 2. + <_> + 9 4 1 2 2. + <_> + + <_> + 4 6 2 2 -1. + <_> + 5 6 1 2 2. + <_> + + <_> + 4 8 3 1 -1. + <_> + 5 8 1 1 3. + <_> + + <_> + 10 9 9 10 -1. + <_> + 10 14 9 5 2. + <_> + + <_> + 6 4 3 1 -1. + <_> + 7 4 1 1 3. + <_> + + <_> + 8 14 1 3 -1. + <_> + 8 15 1 1 3. + <_> + + <_> + 6 4 2 1 -1. + <_> + 7 4 1 1 2. + <_> + + <_> + 5 9 3 9 -1. + <_> + 5 12 3 3 3. + <_> + + <_> + 5 13 7 3 -1. + <_> + 5 14 7 1 3. + <_> + + <_> + 9 6 2 10 -1. + <_> + 9 6 1 5 2. + <_> + 10 11 1 5 2. + <_> + + <_> + 13 1 3 18 -1. + <_> + 13 10 3 9 2. + <_> + + <_> + 5 13 2 3 -1. + <_> + 5 14 2 1 3. + <_> + + <_> + 9 10 3 7 -1. + <_> + 10 10 1 7 3. + <_> + + <_> + 17 0 3 13 -1. + <_> + 18 0 1 13 3. + <_> + + <_> + 13 6 1 2 -1. + <_> + 13 7 1 1 2. + <_> + + <_> + 6 15 3 2 -1. + <_> + 7 15 1 2 3. + <_> + + <_> + 5 14 2 3 -1. + <_> + 5 15 2 1 3. + <_> + + <_> + 16 6 1 6 -1. + <_> + 16 8 1 2 3. + <_> + + <_> + 0 6 2 2 -1. + <_> + 1 6 1 2 2. + <_> + + <_> + 3 12 4 8 -1. + <_> + 3 12 2 4 2. + <_> + 5 16 2 4 2. + <_> + + <_> + 6 2 2 8 -1. + <_> + 7 2 1 8 2. + <_> + + <_> + 6 7 2 6 -1. + <_> + 6 7 1 3 2. + <_> + 7 10 1 3 2. + <_> + + <_> + 5 12 4 2 -1. + <_> + 7 12 2 2 2. + <_> + + <_> + 4 9 13 2 -1. + <_> + 4 10 13 1 2. + <_> + + <_> + 19 5 1 2 -1. + <_> + 19 6 1 1 2. + <_> + + <_> + 4 8 9 1 -1. + <_> + 7 8 3 1 3. + <_> + + <_> + 8 8 2 1 -1. + <_> + 9 8 1 1 2. + <_> + + <_> + 3 0 2 10 -1. + <_> + 3 5 2 5 2. + <_> + + <_> + 6 2 2 1 -1. + <_> + 7 2 1 1 2. + <_> + + <_> + 14 5 3 3 -1. + <_> + 15 5 1 3 3. + <_> + + <_> + 4 8 2 2 -1. + <_> + 4 8 1 1 2. + <_> + 5 9 1 1 2. + <_> + + <_> + 8 16 9 2 -1. + <_> + 8 17 9 1 2. + <_> + + <_> + 6 7 2 3 -1. + <_> + 6 8 2 1 3. + <_> + + <_> + 12 11 2 2 -1. + <_> + 12 11 1 1 2. + <_> + 13 12 1 1 2. + <_> + + <_> + 15 9 2 4 -1. + <_> + 15 11 2 2 2. + <_> + + <_> + 5 11 2 3 -1. + <_> + 5 12 2 1 3. + <_> + + <_> + 6 11 2 3 -1. + <_> + 6 12 2 1 3. + <_> + + <_> + 6 12 1 6 -1. + <_> + 6 15 1 3 2. + <_> + + <_> + 6 9 5 9 -1. + <_> + 6 12 5 3 3. + <_> + + <_> + 8 11 2 2 -1. + <_> + 8 12 2 1 2. + <_> + + <_> + 8 10 4 2 -1. + <_> + 10 10 2 2 2. + <_> + + <_> + 8 10 4 6 -1. + <_> + 8 10 2 3 2. + <_> + 10 13 2 3 2. + <_> + + <_> + 2 0 9 20 -1. + <_> + 5 0 3 20 3. + <_> + + <_> + 12 3 2 4 -1. + <_> + 12 3 1 2 2. + <_> + 13 5 1 2 2. + <_> + + <_> + 15 0 2 10 -1. + <_> + 16 0 1 10 2. + <_> + + <_> + 13 7 3 4 -1. + <_> + 14 7 1 4 3. + <_> + + <_> + 14 10 1 2 -1. + <_> + 14 11 1 1 2. + <_> + + <_> + 16 11 3 1 -1. + <_> + 17 11 1 1 3. + <_> + + <_> + 16 11 2 2 -1. + <_> + 16 11 1 1 2. + <_> + 17 12 1 1 2. + <_> + + <_> + 13 12 6 1 -1. + <_> + 15 12 2 1 3. + <_> + + <_> + 3 2 14 9 -1. + <_> + 10 2 7 9 2. + <_> + + <_> + 5 4 12 2 -1. + <_> + 11 4 6 2 2. + <_> + + <_> + 13 6 2 1 -1. + <_> + 14 6 1 1 2. + <_> + + <_> + 7 10 3 3 -1. + <_> + 7 11 3 1 3. + <_> + + <_> + 16 17 4 2 -1. + <_> + 18 17 2 2 2. + <_> + + <_> + 4 12 8 8 -1. + <_> + 4 12 4 4 2. + <_> + 8 16 4 4 2. + <_> + + <_> + 14 8 4 5 -1. + <_> + 16 8 2 5 2. + <_> + + <_> + 11 8 6 2 -1. + <_> + 13 8 2 2 3. + <_> + + <_> + 4 5 16 5 -1. + <_> + 12 5 8 5 2. + <_> + + <_> + 14 9 6 10 -1. + <_> + 16 9 2 10 3. + <_> + + <_> + 4 18 3 1 -1. + <_> + 5 18 1 1 3. + <_> + + <_> + 4 13 4 4 -1. + <_> + 4 13 2 2 2. + <_> + 6 15 2 2 2. + <_> + + <_> + 6 15 2 3 -1. + <_> + 6 16 2 1 3. + <_> + + <_> + 6 15 1 3 -1. + <_> + 6 16 1 1 3. + <_> + + <_> + 7 17 3 1 -1. + <_> + 8 17 1 1 3. + <_> + + <_> + 7 17 3 1 -1. + <_> + 8 17 1 1 3. + <_> + + <_> + 9 10 4 1 -1. + <_> + 11 10 2 1 2. + <_> + + <_> + 11 12 2 1 -1. + <_> + 12 12 1 1 2. + <_> + + <_> + 7 8 1 6 -1. + <_> + 7 11 1 3 2. + <_> + + <_> + 6 7 3 3 -1. + <_> + 7 7 1 3 3. + <_> + + <_> + 13 10 1 3 -1. + <_> + 13 11 1 1 3. + <_> + + <_> + 5 8 2 4 -1. + <_> + 5 10 2 2 2. + <_> + + <_> + 5 8 6 6 -1. + <_> + 8 8 3 6 2. + <_> + + <_> + 6 5 4 13 -1. + <_> + 8 5 2 13 2. + <_> + + <_> + 8 4 10 8 -1. + <_> + 8 4 5 4 2. + <_> + 13 8 5 4 2. + <_> + + <_> + 8 3 9 6 -1. + <_> + 11 3 3 6 3. + <_> + + <_> + 11 0 6 3 -1. + <_> + 13 0 2 3 3. + <_> + + <_> + 11 1 3 15 -1. + <_> + 12 1 1 15 3. + <_> + + <_> + 4 8 14 9 -1. + <_> + 4 11 14 3 3. + <_> + + <_> + 11 2 1 16 -1. + <_> + 11 10 1 8 2. + <_> + + <_> + 12 1 2 14 -1. + <_> + 12 8 2 7 2. + <_> + + <_> + 11 1 3 4 -1. + <_> + 12 1 1 4 3. + <_> + + <_> + 9 8 4 2 -1. + <_> + 9 8 2 1 2. + <_> + 11 9 2 1 2. + <_> + + <_> + 17 3 2 2 -1. + <_> + 18 3 1 2 2. + <_> + + <_> + 2 6 3 2 -1. + <_> + 3 6 1 2 3. + <_> + + <_> + 9 8 2 2 -1. + <_> + 9 9 2 1 2. + <_> + + <_> + 6 15 6 1 -1. + <_> + 8 15 2 1 3. + <_> + + <_> + 16 10 2 4 -1. + <_> + 16 10 1 2 2. + <_> + 17 12 1 2 2. + <_> + + <_> + 6 6 10 6 -1. + <_> + 6 6 5 3 2. + <_> + 11 9 5 3 2. + <_> + + <_> + 13 8 3 3 -1. + <_> + 13 9 3 1 3. + <_> + + <_> + 13 0 4 2 -1. + <_> + 13 0 2 1 2. + <_> + 15 1 2 1 2. + <_> + + <_> + 10 0 10 2 -1. + <_> + 10 0 5 1 2. + <_> + 15 1 5 1 2. + <_> + + <_> + 13 13 2 1 -1. + <_> + 14 13 1 1 2. + <_> + + <_> + 4 9 2 2 -1. + <_> + 4 9 1 1 2. + <_> + 5 10 1 1 2. + <_> + + <_> + 6 8 2 3 -1. + <_> + 6 9 2 1 3. + <_> + + <_> + 2 12 2 3 -1. + <_> + 2 13 2 1 3. + <_> + + <_> + 2 0 10 2 -1. + <_> + 2 0 5 1 2. + <_> + 7 1 5 1 2. + <_> + + <_> + 6 2 2 2 -1. + <_> + 6 3 2 1 2. + <_> + + <_> + 5 10 8 2 -1. + <_> + 5 11 8 1 2. + <_> + + <_> + 11 7 5 10 -1. + <_> + 11 12 5 5 2. + <_> + + <_> + 5 10 4 3 -1. + <_> + 5 11 4 1 3. + <_> + + <_> + 9 6 6 12 -1. + <_> + 9 12 6 6 2. + <_> + + <_> + 16 10 3 5 -1. + <_> + 17 10 1 5 3. + <_> + + <_> + 15 12 2 4 -1. + <_> + 15 12 1 2 2. + <_> + 16 14 1 2 2. + <_> + + <_> + 8 0 12 8 -1. + <_> + 8 0 6 4 2. + <_> + 14 4 6 4 2. + <_> + + <_> + 14 1 5 3 -1. + <_> + 14 2 5 1 3. + <_> + + <_> + 2 2 3 6 -1. + <_> + 3 2 1 6 3. + <_> + + <_> + 6 5 2 2 -1. + <_> + 7 5 1 2 2. + <_> + + <_> + 7 12 12 1 -1. + <_> + 11 12 4 1 3. + <_> + + <_> + 13 9 7 2 -1. + <_> + 13 10 7 1 2. + <_> + + <_> + 5 10 1 3 -1. + <_> + 5 11 1 1 3. + <_> + + <_> + 0 4 15 2 -1. + <_> + 5 4 5 2 3. + <_> + + <_> + 3 0 9 13 -1. + <_> + 6 0 3 13 3. + <_> + + <_> + 5 10 6 2 -1. + <_> + 7 10 2 2 3. + <_> + + <_> + 8 3 4 2 -1. + <_> + 8 3 2 1 2. + <_> + 10 4 2 1 2. + <_> + + <_> + 8 7 2 6 -1. + <_> + 8 7 1 3 2. + <_> + 9 10 1 3 2. + <_> + + <_> + 8 7 2 3 -1. + <_> + 9 7 1 3 2. + <_> + + <_> + 5 11 3 3 -1. + <_> + 6 11 1 3 3. + <_> + + <_> + 0 1 1 2 -1. + <_> + 0 2 1 1 2. + <_> + + <_> + 7 0 1 6 -1. + <_> + 7 2 1 2 3. + <_> + + <_> + 14 0 2 5 -1. + <_> + 15 0 1 5 2. + <_> + + <_> + 3 2 12 1 -1. + <_> + 7 2 4 1 3. + <_> + + <_> + 11 13 5 2 -1. + <_> + 11 14 5 1 2. + <_> + + <_> + 13 14 1 3 -1. + <_> + 13 15 1 1 3. + <_> + + <_> + 7 17 12 2 -1. + <_> + 11 17 4 2 3. + <_> + + <_> + 0 0 13 20 -1. + <_> + 0 10 13 10 2. + <_> + + <_> + 4 7 10 12 -1. + <_> + 4 13 10 6 2. + <_> + + <_> + 10 12 2 2 -1. + <_> + 11 12 1 2 2. + <_> + + <_> + 9 11 4 4 -1. + <_> + 11 11 2 4 2. + <_> + + <_> + 4 9 16 5 -1. + <_> + 12 9 8 5 2. + <_> + + <_> + 16 9 2 4 -1. + <_> + 17 9 1 4 2. + <_> + + <_> + 15 9 3 1 -1. + <_> + 16 9 1 1 3. + <_> + + <_> + 14 3 4 11 -1. + <_> + 16 3 2 11 2. + <_> + + <_> + 4 3 10 10 -1. + <_> + 4 3 5 5 2. + <_> + 9 8 5 5 2. + <_> + + <_> + 16 9 3 1 -1. + <_> + 17 9 1 1 3. + <_> + + <_> + 6 4 14 9 -1. + <_> + 6 7 14 3 3. + <_> + + <_> + 8 11 2 4 -1. + <_> + 8 13 2 2 2. + <_> + + <_> + 5 9 6 8 -1. + <_> + 5 9 3 4 2. + <_> + 8 13 3 4 2. + <_> + + <_> + 5 11 4 4 -1. + <_> + 5 13 4 2 2. + <_> + + <_> + 7 14 1 3 -1. + <_> + 7 15 1 1 3. + <_> + + <_> + 9 10 3 1 -1. + <_> + 10 10 1 1 3. + <_> + + <_> + 4 8 2 4 -1. + <_> + 4 8 1 2 2. + <_> + 5 10 1 2 2. + <_> + + <_> + 14 6 2 5 -1. + <_> + 15 6 1 5 2. + <_> + + <_> + 13 7 6 7 -1. + <_> + 15 7 2 7 3. + <_> + + <_> + 15 6 4 7 -1. + <_> + 17 6 2 7 2. + <_> + + <_> + 9 11 6 5 -1. + <_> + 11 11 2 5 3. + <_> + + <_> + 0 8 20 4 -1. + <_> + 10 8 10 4 2. + <_> + + <_> + 1 2 8 14 -1. + <_> + 1 2 4 7 2. + <_> + 5 9 4 7 2. + <_> + + <_> + 10 13 3 1 -1. + <_> + 11 13 1 1 3. + <_> + + <_> + 7 0 6 4 -1. + <_> + 9 0 2 4 3. + <_> + + <_> + 7 14 6 2 -1. + <_> + 7 14 3 1 2. + <_> + 10 15 3 1 2. + diff --git a/custom_nodes/was-node-suite-comfyui/res/haarcascade_upperbody.xml b/custom_nodes/was-node-suite-comfyui/res/haarcascade_upperbody.xml new file mode 100644 index 0000000000000000000000000000000000000000..3c75aa6927762b1a54a104c9926cd0a8d4891e17 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/haarcascade_upperbody.xml @@ -0,0 +1,28134 @@ + + + +BOOST + HAAR + 18 + 22 + + 152 + + 0 + 30 + + <_> + 20 + -1.1264339685440063e+00 + + <_> + + 0 -1 0 -1.3696029782295227e-02 + + 4.5076468586921692e-01 -4.2179030179977417e-01 + <_> + + 0 -1 1 1.2441449798643589e-02 + + 1.6493250429630280e-01 -7.4793487787246704e-01 + <_> + + 0 -1 2 -2.7094660326838493e-03 + + 3.1004700064659119e-01 -3.7617141008377075e-01 + <_> + + 0 -1 3 -1.0008010268211365e-01 + + 7.6182198524475098e-01 -7.4556976556777954e-02 + <_> + + 0 -1 4 -2.5114119052886963e-01 + + -6.4154028892517090e-01 1.5139220654964447e-01 + <_> + + 0 -1 5 -1.0510650277137756e-01 + + 7.1459370851516724e-01 -1.4498579502105713e-01 + <_> + + 0 -1 6 -8.8448017835617065e-02 + + 7.5773179531097412e-01 -6.8586893379688263e-02 + <_> + + 0 -1 7 1.0874910280108452e-02 + + 1.4610609412193298e-01 -5.4263710975646973e-01 + <_> + + 0 -1 8 1.2690570205450058e-02 + + 1.1674589663743973e-01 -4.9649459123611450e-01 + <_> + + 0 -1 9 -3.2198399305343628e-02 + + -3.8529390096664429e-01 9.8437972366809845e-02 + <_> + + 0 -1 10 -3.4077179152518511e-03 + + 2.5200870633125305e-01 -2.2382549941539764e-01 + <_> + + 0 -1 11 3.0324390158057213e-02 + + -1.0534449666738510e-01 6.5735417604446411e-01 + <_> + + 0 -1 12 4.1930507868528366e-03 + + 1.2872399389743805e-01 -5.3160661458969116e-01 + <_> + + 0 -1 13 8.0501407384872437e-02 + + 4.1696660220623016e-02 -7.2123032808303833e-01 + <_> + + 0 -1 14 -3.4822080284357071e-02 + + -4.9751108884811401e-01 1.3959939777851105e-01 + <_> + + 0 -1 15 7.5519368983805180e-03 + + -9.2147678136825562e-02 1.1294340342283249e-01 + <_> + + 0 -1 16 -1.7572140321135521e-02 + + -5.6784427165985107e-01 9.3572810292243958e-02 + <_> + + 0 -1 17 5.2012042142450809e-03 + + -7.9238079488277435e-02 6.1878960579633713e-02 + <_> + + 0 -1 18 -3.0798919498920441e-02 + + -5.6658512353897095e-01 9.5271490514278412e-02 + <_> + + 0 -1 19 -1.3465429656207561e-03 + + 2.4011470377445221e-01 -2.6026639342308044e-01 + <_> + 33 + -1.1226719617843628e+00 + + <_> + + 0 -1 20 1.9108939450234175e-03 + + -4.6240958571434021e-01 3.0612170696258545e-01 + <_> + + 0 -1 21 9.5464065670967102e-03 + + 9.1956138610839844e-02 -5.3501170873641968e-01 + <_> + + 0 -1 22 -4.3402809649705887e-02 + + 5.6817841529846191e-01 -1.1284930258989334e-01 + <_> + + 0 -1 23 5.0386030226945877e-02 + + -8.0316931009292603e-02 7.3521858453750610e-01 + <_> + + 0 -1 24 -6.8480317713692784e-04 + + 2.5798648595809937e-01 -2.8049409389495850e-01 + <_> + + 0 -1 25 1.1548049747943878e-01 + + 9.2065572738647461e-02 -7.5556892156600952e-01 + <_> + + 0 -1 26 -1.9348369678482413e-03 + + 2.9440790414810181e-01 -2.4102710187435150e-01 + <_> + + 0 -1 27 -4.3528810143470764e-02 + + 4.9202969670295715e-01 -3.9650101214647293e-02 + <_> + + 0 -1 28 -3.0218150466680527e-02 + + 7.7227920293807983e-01 -8.6786523461341858e-02 + <_> + + 0 -1 29 2.4536589160561562e-02 + + 9.5944821834564209e-02 -4.8642969131469727e-01 + <_> + + 0 -1 30 2.3958990350365639e-02 + + 1.0437840223312378e-01 -5.1219838857650757e-01 + <_> + + 0 -1 31 -2.5370830669999123e-02 + + -3.1981548666954041e-01 9.1486573219299316e-02 + <_> + + 0 -1 32 -1.8606419907882810e-03 + + 2.2783969342708588e-01 -2.4307970702648163e-01 + <_> + + 0 -1 33 2.2550800815224648e-02 + + 6.9207556545734406e-02 -3.0054280161857605e-01 + <_> + + 0 -1 34 -4.9752090126276016e-02 + + -6.1078047752380371e-01 9.4472773373126984e-02 + <_> + + 0 -1 35 -2.6602389290928841e-02 + + 5.9581768512725830e-01 -9.2046052217483521e-02 + <_> + + 0 -1 36 1.0760000348091125e-01 + + 1.0278519988059998e-01 -5.4303371906280518e-01 + <_> + + 0 -1 37 1.7690699547529221e-02 + + 6.6057138144969940e-02 -6.3213908672332764e-01 + <_> + + 0 -1 38 -6.2409918755292892e-02 + + 6.8724197149276733e-01 -6.7070558667182922e-02 + <_> + + 0 -1 39 -1.9801619928330183e-03 + + 9.4411551952362061e-02 -8.7819486856460571e-02 + <_> + + 0 -1 40 6.3668429851531982e-02 + + 1.1531739681959152e-01 -4.8129761219024658e-01 + <_> + + 0 -1 41 -3.0797829851508141e-02 + + 3.5854768753051758e-01 -1.2593799829483032e-01 + <_> + + 0 -1 42 -1.8353419727645814e-04 + + 1.4788399636745453e-01 -2.8546810150146484e-01 + <_> + + 0 -1 43 1.7074620118364692e-03 + + 7.9929657280445099e-02 -2.5233370065689087e-01 + <_> + + 0 -1 44 -1.5325199812650681e-02 + + -5.7711857557296753e-01 9.8908327519893646e-02 + <_> + + 0 -1 45 4.1389189660549164e-02 + + -6.5550796687602997e-02 5.7363802194595337e-01 + <_> + + 0 -1 46 -4.5577771379612386e-04 + + 2.2593089938163757e-01 -1.9105580449104309e-01 + <_> + + 0 -1 47 -1.3455689884722233e-02 + + -4.0233930945396423e-01 8.6477622389793396e-02 + <_> + + 0 -1 48 -3.7978399544954300e-02 + + 5.5257588624954224e-01 -8.1541016697883606e-02 + <_> + + 0 -1 49 -1.7197500914335251e-02 + + -1.8363009393215179e-01 5.1999870687723160e-02 + <_> + + 0 -1 50 -1.2581580085679889e-03 + + 1.8830040097236633e-01 -2.5726661086082458e-01 + <_> + + 0 -1 51 6.7725107073783875e-02 + + -8.0956451594829559e-02 7.1803241968154907e-01 + <_> + + 0 -1 52 3.5489428788423538e-02 + + 1.0068070143461227e-01 -5.3774142265319824e-01 + <_> + 29 + -1.0127470493316650e+00 + + <_> + + 0 -1 53 -5.3695798851549625e-03 + + 2.7479499578475952e-01 -3.4178960323333740e-01 + <_> + + 0 -1 54 6.2695867381989956e-04 + + -9.8646633327007294e-02 1.0728420317173004e-01 + <_> + + 0 -1 55 -1.6484269872307777e-02 + + -6.4972907304763794e-01 9.6037752926349640e-02 + <_> + + 0 -1 56 -2.2104099392890930e-02 + + -4.5984488725662231e-01 1.6304630041122437e-01 + <_> + + 0 -1 57 1.1904139816761017e-01 + + -9.9600397050380707e-02 7.3729759454727173e-01 + <_> + + 0 -1 58 -2.0222070161253214e-03 + + 2.1029269695281982e-01 -2.4577130377292633e-01 + <_> + + 0 -1 59 6.7500352859497070e-02 + + -1.2467789649963379e-01 5.7654231786727905e-01 + <_> + + 0 -1 60 -1.9655939936637878e-01 + + -6.0891747474670410e-01 9.9672056734561920e-02 + <_> + + 0 -1 61 4.9431171268224716e-02 + + 1.3752749562263489e-01 -4.5580869913101196e-01 + <_> + + 0 -1 62 2.3380089551210403e-02 + + 4.7141890972852707e-02 -3.5027709603309631e-01 + <_> + + 0 -1 63 1.3998650247231126e-03 + + -2.0643049478530884e-01 2.4322299659252167e-01 + <_> + + 0 -1 64 1.1432689614593983e-02 + + 5.5187370628118515e-02 -3.2619899511337280e-01 + <_> + + 0 -1 65 4.8775069415569305e-02 + + -6.8992510437965393e-02 7.1171808242797852e-01 + <_> + + 0 -1 66 6.5284021198749542e-02 + + 3.7155740428715944e-03 5.9318971633911133e-01 + <_> + + 0 -1 67 6.1603228095918894e-04 + + -2.3272520303726196e-01 2.0441530644893646e-01 + <_> + + 0 -1 68 -1.0527499951422215e-02 + + -3.1773790717124939e-01 1.0171309858560562e-01 + <_> + + 0 -1 69 1.6231339424848557e-02 + + 9.1734193265438080e-02 -4.7143009305000305e-01 + <_> + + 0 -1 70 3.8958500954322517e-04 + + -1.2997549772262573e-01 1.3475489616394043e-01 + <_> + + 0 -1 71 -4.4165689498186111e-02 + + -6.0331028699874878e-01 6.4766876399517059e-02 + <_> + + 0 -1 72 -1.3663209974765778e-02 + + -5.2762842178344727e-01 6.3485741615295410e-02 + <_> + + 0 -1 73 -8.8231859263032675e-04 + + 1.4510250091552734e-01 -2.7845200896263123e-01 + <_> + + 0 -1 74 -2.7819190174341202e-02 + + 4.3640869855880737e-01 -8.5191860795021057e-02 + <_> + + 0 -1 75 6.2560990452766418e-02 + + 1.0027889907360077e-01 -4.2235919833183289e-01 + <_> + + 0 -1 76 -4.4808178790844977e-04 + + 1.4851489663124084e-01 -1.7731289565563202e-01 + <_> + + 0 -1 77 -2.1363180130720139e-02 + + -6.1334460973739624e-01 6.0539398342370987e-02 + <_> + + 0 -1 78 -6.9122329354286194e-02 + + -8.6845761537551880e-01 3.9347749203443527e-02 + <_> + + 0 -1 79 -3.0542839318513870e-02 + + -6.4021718502044678e-01 4.9593821167945862e-02 + <_> + + 0 -1 80 -1.0101160034537315e-02 + + -1.6199150681495667e-01 5.7256899774074554e-02 + <_> + + 0 -1 81 -2.2010109387338161e-04 + + 2.1350930631160736e-01 -2.0198999345302582e-01 + <_> + 42 + -1.0684469938278198e+00 + + <_> + + 0 -1 82 5.7967850007116795e-03 + + -3.3844178915023804e-01 2.5066271424293518e-01 + <_> + + 0 -1 83 6.3795179128646851e-02 + + -4.2111620306968689e-02 3.5746571421623230e-01 + <_> + + 0 -1 84 -6.4332038164138794e-02 + + -5.0660789012908936e-01 1.1717739701271057e-01 + <_> + + 0 -1 85 -1.1574289947748184e-01 + + -5.6678497791290283e-01 9.5880903303623199e-02 + <_> + + 0 -1 86 -3.9005130529403687e-03 + + -4.1498228907585144e-01 1.4858320355415344e-01 + <_> + + 0 -1 87 1.2512929737567902e-02 + + 5.3696669638156891e-02 -1.4163960516452789e-01 + <_> + + 0 -1 88 1.5871099894866347e-03 + + -2.5962340831756592e-01 1.9418330490589142e-01 + <_> + + 0 -1 89 1.6291120648384094e-01 + + -6.1243768781423569e-02 7.8567212820053101e-01 + <_> + + 0 -1 90 -3.3258220553398132e-01 + + 7.8020131587982178e-01 -4.4036459177732468e-02 + <_> + + 0 -1 91 -1.0288899764418602e-02 + + -1.5289680659770966e-01 6.2096230685710907e-02 + <_> + + 0 -1 92 2.8956029564142227e-02 + + 8.4707796573638916e-02 -4.7820711135864258e-01 + <_> + + 0 -1 93 -3.2221511355601251e-04 + + 1.3951259851455688e-01 -1.8819390237331390e-01 + <_> + + 0 -1 94 1.5835289657115936e-01 + + 6.6667810082435608e-02 -5.4572361707687378e-01 + <_> + + 0 -1 95 -4.2584311217069626e-02 + + 2.7040338516235352e-01 -5.6654509156942368e-02 + <_> + + 0 -1 96 2.7505140751600266e-02 + + 4.9271158874034882e-02 -7.3157638311386108e-01 + <_> + + 0 -1 97 8.6879700422286987e-02 + + -1.7532400786876678e-02 8.6782652139663696e-01 + <_> + + 0 -1 98 -2.0130439661443233e-03 + + 1.6593940556049347e-01 -2.5266230106353760e-01 + <_> + + 0 -1 99 4.2330170981585979e-04 + + 9.4223551452159882e-02 -2.4629700183868408e-01 + <_> + + 0 -1 100 1.5194499865174294e-02 + + 7.3695637285709381e-02 -5.0068622827529907e-01 + <_> + + 0 -1 101 -6.1203669756650925e-03 + + 2.1381899714469910e-01 -1.6738100349903107e-01 + <_> + + 0 -1 102 2.0660240203142166e-02 + + -8.0636158585548401e-02 5.7828348875045776e-01 + <_> + + 0 -1 103 -6.0398250818252563e-02 + + -6.3411772251129150e-01 5.0899010151624680e-02 + <_> + + 0 -1 104 3.5386480391025543e-02 + + 7.3191151022911072e-02 -5.6426662206649780e-01 + <_> + + 0 -1 105 -6.5997838973999023e-02 + + 3.2833808660507202e-01 -2.6310259476304054e-02 + <_> + + 0 -1 106 1.1004590196534991e-03 + + -2.3114609718322754e-01 2.0206519961357117e-01 + <_> + + 0 -1 107 8.4488153457641602e-02 + + 7.4589841067790985e-02 -4.3710339069366455e-01 + <_> + + 0 -1 108 -2.9235990718007088e-02 + + 6.5064769983291626e-01 -5.4531838744878769e-02 + <_> + + 0 -1 109 -3.3916950225830078e-02 + + -2.8804349899291992e-01 3.2172881066799164e-02 + <_> + + 0 -1 110 -7.9108700156211853e-03 + + -3.3660379052162170e-01 1.0100690275430679e-01 + <_> + + 0 -1 111 5.1930431276559830e-02 + + 3.2920960336923599e-02 -1.3176530599594116e-01 + <_> + + 0 -1 112 -6.8586103618144989e-02 + + 5.2153557538986206e-01 -6.6718578338623047e-02 + <_> + + 0 -1 113 -1.9451669650152326e-03 + + 1.5396790206432343e-01 -1.9895760715007782e-01 + <_> + + 0 -1 114 7.1366228163242340e-02 + + -8.2927159965038300e-02 4.5292338728904724e-01 + <_> + + 0 -1 115 -2.6624239981174469e-02 + + -4.4009739160537720e-01 1.0267119854688644e-01 + <_> + + 0 -1 116 2.5266060605645180e-02 + + 5.5799201130867004e-02 -5.5569338798522949e-01 + <_> + + 0 -1 117 5.5255689658224583e-03 + + -1.3640299439430237e-01 2.8255200386047363e-01 + <_> + + 0 -1 118 -2.9929999727755785e-03 + + -3.2421571016311646e-01 1.2122060358524323e-01 + <_> + + 0 -1 119 2.2192109376192093e-02 + + -6.0741018503904343e-02 4.3473160266876221e-01 + <_> + + 0 -1 120 -9.4268741086125374e-03 + + -3.3458408713340759e-01 1.0029699653387070e-01 + <_> + + 0 -1 121 3.4395330585539341e-03 + + -8.3829909563064575e-02 1.7925940454006195e-01 + <_> + + 0 -1 122 -3.2996390946209431e-03 + + 1.9990429282188416e-01 -2.1068470180034637e-01 + <_> + + 0 -1 123 2.6152150705456734e-02 + + -8.0667406320571899e-02 3.5581269860267639e-01 + <_> + 45 + -1.1520069837570190e+00 + + <_> + + 0 -1 124 -2.2792650386691093e-02 + + 4.0725260972976685e-01 -3.3609920740127563e-01 + <_> + + 0 -1 125 -5.7334620505571365e-03 + + 2.6882189512252808e-01 -2.2775350511074066e-01 + <_> + + 0 -1 126 9.6941202878952026e-02 + + -8.0905012786388397e-02 7.4328738451004028e-01 + <_> + + 0 -1 127 -2.8288999572396278e-02 + + 4.5610108971595764e-01 -6.1096340417861938e-02 + <_> + + 0 -1 128 3.8522849790751934e-03 + + -2.5241801142692566e-01 2.0907109975814819e-01 + <_> + + 0 -1 129 2.3100129328668118e-03 + + -1.4713400602340698e-01 1.5460389852523804e-01 + <_> + + 0 -1 130 1.1361920041963458e-03 + + 1.7680479586124420e-01 -3.0537289381027222e-01 + <_> + + 0 -1 131 2.4962890893220901e-02 + + -1.2652909755706787e-01 3.7442651391029358e-01 + <_> + + 0 -1 132 -5.8984099887311459e-03 + + 2.6738989353179932e-01 -1.7762570083141327e-01 + <_> + + 0 -1 133 1.1804900132119656e-02 + + 6.6077977418899536e-02 -3.3482131361961365e-01 + <_> + + 0 -1 134 6.4400159753859043e-03 + + 1.0994800180196762e-01 -3.6303481459617615e-01 + <_> + + 0 -1 135 -8.9407369494438171e-02 + + -4.3580460548400879e-01 1.4944310300052166e-02 + <_> + + 0 -1 136 -3.1404230743646622e-02 + + 6.9523447751998901e-01 -5.4854288697242737e-02 + <_> + + 0 -1 137 -1.4607949554920197e-01 + + -2.5650060176849365e-01 5.6956540793180466e-02 + <_> + + 0 -1 138 2.1142649929970503e-03 + + -2.4987550079822540e-01 1.6792559623718262e-01 + <_> + + 0 -1 139 -1.5119359828531742e-02 + + -3.0179870128631592e-01 1.0393589735031128e-01 + <_> + + 0 -1 140 2.5620959699153900e-02 + + -7.4821300804615021e-02 5.3600782155990601e-01 + <_> + + 0 -1 141 -1.4417800307273865e-01 + + -2.0490899682044983e-01 7.4457786977291107e-02 + <_> + + 0 -1 142 2.5954779237508774e-02 + + -9.0574868023395538e-02 4.8442208766937256e-01 + <_> + + 0 -1 143 -2.1130720153450966e-02 + + -2.2689810395240784e-01 6.4876057207584381e-02 + <_> + + 0 -1 144 1.6474459320306778e-02 + + 1.0768000036478043e-01 -3.6570599675178528e-01 + <_> + + 0 -1 145 1.0922150313854218e-01 + + 5.6827351450920105e-02 -3.4728559851646423e-01 + <_> + + 0 -1 146 -7.4581061198841780e-05 + + 1.3904270529747009e-01 -2.5942608714103699e-01 + <_> + + 0 -1 147 -2.7753600850701332e-02 + + 3.8111299276351929e-01 -4.2896129190921783e-02 + <_> + + 0 -1 148 3.2721430063247681e-02 + + -9.0872153639793396e-02 3.9289179444313049e-01 + <_> + + 0 -1 149 5.5606258101761341e-03 + + 8.4002248942852020e-02 -1.9396039843559265e-01 + <_> + + 0 -1 150 -1.0710290074348450e-01 + + -5.8981472253799438e-01 5.6862760335206985e-02 + <_> + + 0 -1 151 -8.0517623573541641e-03 + + 1.1790599673986435e-01 -1.1595659703016281e-01 + <_> + + 0 -1 152 -1.3850019872188568e-01 + + -9.0805321931838989e-01 4.1411358863115311e-02 + <_> + + 0 -1 153 2.8620919212698936e-02 + + 1.9928589463233948e-02 -7.3697662353515625e-01 + <_> + + 0 -1 154 2.6208970695734024e-02 + + -6.1577551066875458e-02 6.0899931192398071e-01 + <_> + + 0 -1 155 2.6527039706707001e-02 + + 5.7193860411643982e-02 -6.2992326915264130e-02 + <_> + + 0 -1 156 -4.4622488319873810e-02 + + -3.3318150043487549e-01 9.3214571475982666e-02 + <_> + + 0 -1 157 -1.4283119700849056e-02 + + 1.9125230610370636e-01 -1.1530569940805435e-01 + <_> + + 0 -1 158 -1.9681209232658148e-03 + + -3.1295120716094971e-01 9.9682807922363281e-02 + <_> + + 0 -1 159 5.2851080894470215e-02 + + -5.8919548988342285e-02 5.7887911796569824e-01 + <_> + + 0 -1 160 -6.3711861148476601e-03 + + 1.9182190299034119e-01 -1.9094540178775787e-01 + <_> + + 0 -1 161 -6.4727910794317722e-03 + + -2.4721039831638336e-01 1.2252929806709290e-01 + <_> + + 0 -1 162 -1.6690989956259727e-02 + + -4.9174660444259644e-01 5.0315100699663162e-02 + <_> + + 0 -1 163 -1.4882409945130348e-02 + + 1.9646610319614410e-01 -5.8250389993190765e-02 + <_> + + 0 -1 164 1.7529709264636040e-02 + + 7.6357498764991760e-02 -3.6559268832206726e-01 + <_> + + 0 -1 165 4.2221389710903168e-02 + + -3.1560491770505905e-02 3.6011269688606262e-01 + <_> + + 0 -1 166 -6.5581746399402618e-02 + + 3.4334710240364075e-01 -8.8556960225105286e-02 + <_> + + 0 -1 167 1.6703210771083832e-02 + + 4.8210039734840393e-02 -1.5273620188236237e-01 + <_> + + 0 -1 168 -6.9328742101788521e-03 + + -3.0573639273643494e-01 1.1821140348911285e-01 + <_> + 46 + -1.0648390054702759e+00 + + <_> + + 0 -1 169 -6.3434438779950142e-03 + + 3.3840280771255493e-01 -3.3474850654602051e-01 + <_> + + 0 -1 170 5.2472548559308052e-03 + + -9.3596532940864563e-02 1.6791179776191711e-01 + <_> + + 0 -1 171 -3.6585088819265366e-02 + + 5.3676098585128784e-01 -8.5433527827262878e-02 + <_> + + 0 -1 172 5.3153699263930321e-03 + + -1.2804119288921356e-01 1.4443910121917725e-01 + <_> + + 0 -1 173 -3.9569609798491001e-03 + + 1.8605449795722961e-01 -2.2311410307884216e-01 + <_> + + 0 -1 174 3.3965419977903366e-02 + + 2.7835709974169731e-02 -5.1203387975692749e-01 + <_> + + 0 -1 175 -1.4852879568934441e-02 + + -4.6814951300621033e-01 1.1351560056209564e-01 + <_> + + 0 -1 176 -2.9641329310834408e-03 + + 2.6591798663139343e-01 -2.8183770179748535e-01 + <_> + + 0 -1 177 -1.0795590281486511e-01 + + -5.7527697086334229e-01 1.0991639643907547e-01 + <_> + + 0 -1 178 2.1237600594758987e-02 + + -1.0451590269804001e-01 4.6613770723342896e-01 + <_> + + 0 -1 179 -2.6189640164375305e-02 + + 4.2544820904731750e-01 -9.2278912663459778e-02 + <_> + + 0 -1 180 -3.5010561347007751e-02 + + -7.1801197528839111e-01 7.2877250611782074e-02 + <_> + + 0 -1 181 1.5026619621494319e-05 + + -2.7199760079383850e-01 1.0682159662246704e-01 + <_> + + 0 -1 182 -2.7760250493884087e-02 + + -5.0185692310333252e-01 1.0118210315704346e-01 + <_> + + 0 -1 183 -3.7439178675413132e-02 + + -3.7141519784927368e-01 8.3709038794040680e-02 + <_> + + 0 -1 184 -1.4152259565889835e-02 + + 3.0982801318168640e-01 -7.3767662048339844e-02 + <_> + + 0 -1 185 -1.2331079691648483e-02 + + -3.9507681131362915e-01 8.3215177059173584e-02 + <_> + + 0 -1 186 2.6666349731385708e-03 + + -1.3776129484176636e-01 2.4245689809322357e-01 + <_> + + 0 -1 187 -2.9443199746310711e-03 + + 2.4460780620574951e-01 -1.3937890529632568e-01 + <_> + + 0 -1 188 -1.5788920223712921e-01 + + -5.6832242012023926e-01 3.6140721291303635e-02 + <_> + + 0 -1 189 2.1553030237555504e-03 + + 8.3660557866096497e-02 -4.1380259394645691e-01 + <_> + + 0 -1 190 -8.5367091000080109e-02 + + -5.7053291797637939e-01 5.2995659410953522e-02 + <_> + + 0 -1 191 3.4761740826070309e-03 + + -1.2189819663763046e-01 2.6553291082382202e-01 + <_> + + 0 -1 192 -2.4104220792651176e-02 + + -5.2315437793731689e-01 2.5505660101771355e-02 + <_> + + 0 -1 193 -3.0729150399565697e-02 + + -4.6735408902168274e-01 7.0844426751136780e-02 + <_> + + 0 -1 194 -1.1937420349568129e-03 + + 1.4596860110759735e-01 -2.3086270689964294e-01 + <_> + + 0 -1 195 3.2304100692272186e-02 + + -6.5350927412509918e-02 5.5091381072998047e-01 + <_> + + 0 -1 196 1.4955499768257141e-01 + + 1.5002089552581310e-02 -8.9400452375411987e-01 + <_> + + 0 -1 197 -4.7254669480025768e-03 + + 1.4857460558414459e-01 -2.1019940078258514e-01 + <_> + + 0 -1 198 3.6360718309879303e-02 + + 2.8547950088977814e-02 -6.3668930530548096e-01 + <_> + + 0 -1 199 -2.7109999209642410e-02 + + 4.9661910533905029e-01 -7.3661573231220245e-02 + <_> + + 0 -1 200 -9.5398407429456711e-03 + + -1.9384680688381195e-01 5.8507081121206284e-02 + <_> + + 0 -1 201 1.0541989654302597e-01 + + -7.4785731732845306e-02 4.3781110644340515e-01 + <_> + + 0 -1 202 6.3801761716604233e-03 + + 5.3971529006958008e-02 -3.3829790353775024e-01 + <_> + + 0 -1 203 -2.2759849205613136e-02 + + -5.9374898672103882e-01 4.8046529293060303e-02 + <_> + + 0 -1 204 -1.7323749139904976e-02 + + -1.6034699976444244e-01 1.5187160111963749e-02 + <_> + + 0 -1 205 2.9854409396648407e-02 + + -6.5698243677616119e-02 4.5057341456413269e-01 + <_> + + 0 -1 206 2.3269839584827423e-02 + + 3.8805499672889709e-02 -3.5354879498481750e-01 + <_> + + 0 -1 207 4.0833871811628342e-02 + + 4.9404840916395187e-02 -5.6222450733184814e-01 + <_> + + 0 -1 208 -1.2498889863491058e-01 + + 6.7763668298721313e-01 -1.5484940260648727e-02 + <_> + + 0 -1 209 -6.5579377114772797e-02 + + 6.7363232374191284e-01 -4.5269690454006195e-02 + <_> + + 0 -1 210 -3.7901759147644043e-01 + + -4.9853721261024475e-01 2.3955229669809341e-02 + <_> + + 0 -1 211 2.9792459681630135e-03 + + -1.8436419963836670e-01 1.6265830397605896e-01 + <_> + + 0 -1 212 1.3803659938275814e-02 + + 6.3698217272758484e-02 -4.3389800190925598e-01 + <_> + + 0 -1 213 3.5606899764388800e-03 + + -1.1455070227384567e-01 2.3618610203266144e-01 + <_> + + 0 -1 214 8.8772783055901527e-03 + + 8.6416840553283691e-02 -1.7590980231761932e-01 + <_> + 45 + -9.5069932937622070e-01 + + <_> + + 0 -1 215 -6.7344820126891136e-03 + + 3.0758589506149292e-01 -2.9761791229248047e-01 + <_> + + 0 -1 216 -1.3902880251407623e-02 + + 2.0400699973106384e-01 -2.2967250645160675e-01 + <_> + + 0 -1 217 -4.1963551193475723e-02 + + -5.6575411558151245e-01 8.6745493113994598e-02 + <_> + + 0 -1 218 -5.9794791013700888e-05 + + 1.5832610428333282e-01 -2.3109050095081329e-01 + <_> + + 0 -1 219 8.4739532321691513e-03 + + -1.1501230299472809e-01 3.9758589863777161e-01 + <_> + + 0 -1 220 -6.5317057073116302e-02 + + -2.3887279629707336e-01 1.1391709744930267e-01 + <_> + + 0 -1 221 -4.2358501814305782e-03 + + 2.2337220609188080e-01 -2.4218839406967163e-01 + <_> + + 0 -1 222 4.6229299157857895e-02 + + 9.6837401390075684e-02 -5.3427702188491821e-01 + <_> + + 0 -1 223 5.2246701670810580e-05 + + -2.4189360439777374e-01 1.5932360291481018e-01 + <_> + + 0 -1 224 -4.1420090943574905e-02 + + -3.4044981002807617e-01 4.3712481856346130e-02 + <_> + + 0 -1 225 -1.0224279947578907e-02 + + -2.4752390384674072e-01 1.5512530505657196e-01 + <_> + + 0 -1 226 6.8581208586692810e-02 + + 9.7173796966671944e-03 -6.1821222305297852e-01 + <_> + + 0 -1 227 -4.0700301527976990e-02 + + -6.0284787416458130e-01 7.0963069796562195e-02 + <_> + + 0 -1 228 -8.9998699724674225e-02 + + 4.6664720773696899e-01 -4.8549890518188477e-02 + <_> + + 0 -1 229 1.5307360328733921e-02 + + 1.4783670008182526e-01 -2.7114608883857727e-01 + <_> + + 0 -1 230 3.7016849964857101e-03 + + -1.5153409540653229e-01 2.0931409299373627e-01 + <_> + + 0 -1 231 -3.1937099993228912e-02 + + -7.2332257032394409e-01 3.7420161068439484e-02 + <_> + + 0 -1 232 4.7493908554315567e-02 + + 4.9000091850757599e-02 -4.8303189873695374e-01 + <_> + + 0 -1 233 4.4620381668210030e-03 + + -1.7698319256305695e-01 1.9820910692214966e-01 + <_> + + 0 -1 234 -8.1284176558256149e-03 + + 1.1222189664840698e-01 -5.0805520266294479e-02 + <_> + + 0 -1 235 -1.2596019543707371e-02 + + 4.3889060616493225e-01 -8.2898952066898346e-02 + <_> + + 0 -1 236 -1.0689930059015751e-03 + + 6.8766087293624878e-02 -8.2667008042335510e-02 + <_> + + 0 -1 237 -4.8213090747594833e-02 + + -4.6671348810195923e-01 7.4310712516307831e-02 + <_> + + 0 -1 238 -2.3418650380335748e-04 + + 8.8725142180919647e-02 -1.0919640213251114e-01 + <_> + + 0 -1 239 1.0095000267028809e-01 + + 5.5444270372390747e-02 -5.5205368995666504e-01 + <_> + + 0 -1 240 3.2340411096811295e-02 + + 4.9762740731239319e-02 -3.6636400222778320e-01 + <_> + + 0 -1 241 1.7699210345745087e-01 + + -7.3765642940998077e-02 5.4300791025161743e-01 + <_> + + 0 -1 242 -1.8634319712873548e-04 + + 9.5718666911125183e-02 -1.8214109539985657e-01 + <_> + + 0 -1 243 6.6473139449954033e-03 + + -1.2173130363225937e-01 3.0331039428710938e-01 + <_> + + 0 -1 244 -9.9276658147573471e-03 + + 3.2638520002365112e-01 -8.8533706963062286e-02 + <_> + + 0 -1 245 5.2587099373340607e-02 + + 1.1303950101137161e-01 -3.3436870574951172e-01 + <_> + + 0 -1 246 4.9553681164979935e-03 + + -1.3183289766311646e-01 9.7614809870719910e-02 + <_> + + 0 -1 247 -2.3817660287022591e-02 + + -4.1027650237083435e-01 8.4849812090396881e-02 + <_> + + 0 -1 248 -1.1363780125975609e-02 + + 1.8874420225620270e-01 -8.3536416292190552e-02 + <_> + + 0 -1 249 -1.9515539752319455e-03 + + 1.8985089659690857e-01 -1.7776779830455780e-01 + <_> + + 0 -1 250 -1.3576669618487358e-02 + + 2.0975759625434875e-01 -3.7115450948476791e-02 + <_> + + 0 -1 251 1.6466820612549782e-02 + + -8.2349412143230438e-02 3.8047221302986145e-01 + <_> + + 0 -1 252 -1.0136260092258453e-01 + + -1.1633230000734329e-01 6.7804910242557526e-02 + <_> + + 0 -1 253 -1.0248430073261261e-01 + + -2.8850209712982178e-01 1.2139680236577988e-01 + <_> + + 0 -1 254 -2.8717568516731262e-01 + + 4.6935141086578369e-01 -8.2954309880733490e-02 + <_> + + 0 -1 255 5.0812978297472000e-02 + + 5.5393878370523453e-02 -6.2383282184600830e-01 + <_> + + 0 -1 256 9.1063417494297028e-02 + + -2.3379560559988022e-02 4.7155299782752991e-01 + <_> + + 0 -1 257 -5.1845338195562363e-02 + + -6.9031542539596558e-01 4.5454118400812149e-02 + <_> + + 0 -1 258 1.5031239390373230e-01 + + 4.5906711369752884e-02 -5.2067738771438599e-01 + <_> + + 0 -1 259 4.1596319526433945e-02 + + 5.3706299513578415e-02 -4.8782169818878174e-01 + <_> + 43 + -8.5045951604843140e-01 + + <_> + + 0 -1 260 -5.9847710654139519e-03 + + 2.7858960628509521e-01 -3.0923390388488770e-01 + <_> + + 0 -1 261 -3.9032639469951391e-03 + + 2.2257049381732941e-01 -2.8928229212760925e-01 + <_> + + 0 -1 262 -2.2362179151969030e-05 + + 1.4084370434284210e-01 -3.0143168568611145e-01 + <_> + + 0 -1 263 -9.1167002916336060e-02 + + -6.7608010768890381e-01 5.6040819734334946e-02 + <_> + + 0 -1 264 5.2755638957023621e-02 + + 7.4688747525215149e-02 -6.3256257772445679e-01 + <_> + + 0 -1 265 6.9458536803722382e-02 + + -1.1754920333623886e-01 6.3863641023635864e-01 + <_> + + 0 -1 266 -4.8209438100457191e-03 + + 2.9225930571556091e-01 -1.3872410356998444e-01 + <_> + + 0 -1 267 3.2156750559806824e-02 + + 7.5575239956378937e-02 -5.7927912473678589e-01 + <_> + + 0 -1 268 -4.4298470020294189e-02 + + 4.0226811170578003e-01 -1.0264609754085541e-01 + <_> + + 0 -1 269 -7.0452108047902584e-03 + + 1.5128499269485474e-01 -5.6725870817899704e-02 + <_> + + 0 -1 270 5.1606830675154924e-04 + + -2.3022100329399109e-01 1.6343879699707031e-01 + <_> + + 0 -1 271 -6.1528358608484268e-02 + + 2.5559040904045105e-01 -4.6751510351896286e-02 + <_> + + 0 -1 272 -5.1367811858654022e-02 + + -2.4755829572677612e-01 1.4305450022220612e-01 + <_> + + 0 -1 273 9.0107098221778870e-03 + + -1.0648769885301590e-01 3.1271860003471375e-01 + <_> + + 0 -1 274 2.2352259606122971e-02 + + 1.5494219958782196e-01 -3.1736290454864502e-01 + <_> + + 0 -1 275 3.1493891030550003e-02 + + 7.2037532925605774e-02 -2.8946670889854431e-01 + <_> + + 0 -1 276 -5.2064459770917892e-02 + + -2.7082020044326782e-01 1.2260189652442932e-01 + <_> + + 0 -1 277 -6.1549381352961063e-03 + + 1.6442950069904327e-01 -1.0657779872417450e-01 + <_> + + 0 -1 278 3.0305041000247002e-03 + + -1.5234139561653137e-01 2.0446379482746124e-01 + <_> + + 0 -1 279 -6.8027540110051632e-03 + + 7.1448147296905518e-02 -4.1458301246166229e-02 + <_> + + 0 -1 280 6.8647533655166626e-02 + + -5.2833538502454758e-02 5.7638901472091675e-01 + <_> + + 0 -1 281 -9.2883080244064331e-02 + + -2.6236709952354431e-01 8.2425810396671295e-02 + <_> + + 0 -1 282 -5.2907038480043411e-03 + + 1.4090450108051300e-01 -2.2050650417804718e-01 + <_> + + 0 -1 283 1.5640209894627333e-03 + + -1.0143549740314484e-01 1.3026970624923706e-01 + <_> + + 0 -1 284 1.0752620175480843e-02 + + 9.1515362262725830e-02 -3.2133978605270386e-01 + <_> + + 0 -1 285 -2.1106360480189323e-02 + + -2.7410230040550232e-01 9.1773197054862976e-03 + <_> + + 0 -1 286 4.8663117922842503e-03 + + -1.5258720517158508e-01 1.9711069762706757e-01 + <_> + + 0 -1 287 6.5396472811698914e-02 + + 6.5921088680624962e-03 -6.4343088865280151e-01 + <_> + + 0 -1 288 4.4902609661221504e-03 + + -1.0377249866724014e-01 2.8005209565162659e-01 + <_> + + 0 -1 289 4.6614840626716614e-02 + + 5.4715849459171295e-02 -5.2179151773452759e-01 + <_> + + 0 -1 290 1.1597450077533722e-01 + + 3.9613999426364899e-02 -6.4784902334213257e-01 + <_> + + 0 -1 291 5.7222661562263966e-03 + + -5.4838169366121292e-02 1.2828019261360168e-01 + <_> + + 0 -1 292 -4.1633259505033493e-02 + + -8.0665838718414307e-01 3.5942289978265762e-02 + <_> + + 0 -1 293 -4.7252390533685684e-02 + + -7.9193192720413208e-01 1.2737370096147060e-02 + <_> + + 0 -1 294 -1.6451090341433883e-03 + + 2.0376729965209961e-01 -1.3230639696121216e-01 + <_> + + 0 -1 295 2.5758889969438314e-03 + + -6.3503406941890717e-02 1.3530080020427704e-01 + <_> + + 0 -1 296 2.0758589729666710e-02 + + 4.7286979854106903e-02 -5.8212000131607056e-01 + <_> + + 0 -1 297 -2.8601480647921562e-02 + + -4.1221970319747925e-01 2.4210980162024498e-02 + <_> + + 0 -1 298 -2.8691580519080162e-02 + + -5.5404680967330933e-01 4.5068629086017609e-02 + <_> + + 0 -1 299 -2.6637869887053967e-03 + + 1.2570230662822723e-01 -1.6319499909877777e-01 + <_> + + 0 -1 300 -4.4750720262527466e-03 + + -2.7138069272041321e-01 1.0293100029230118e-01 + <_> + + 0 -1 301 4.0937099605798721e-02 + + -3.2065469771623611e-02 1.3092640042304993e-01 + <_> + + 0 -1 302 7.5827181339263916e-02 + + -5.1221519708633423e-02 5.6596297025680542e-01 + <_> + 58 + -9.1252201795578003e-01 + + <_> + + 0 -1 303 -4.2669968679547310e-03 + + 1.7704419791698456e-01 -2.8265419602394104e-01 + <_> + + 0 -1 304 -2.2577939555048943e-02 + + 2.3657959699630737e-01 -4.2326368391513824e-02 + <_> + + 0 -1 305 -9.8107997328042984e-03 + + -3.8568308949470520e-01 9.0982303023338318e-02 + <_> + + 0 -1 306 3.8510379381477833e-03 + + -1.0270400345325470e-01 1.9267590343952179e-01 + <_> + + 0 -1 307 -2.0688450895249844e-03 + + 1.6656570136547089e-01 -2.1394389867782593e-01 + <_> + + 0 -1 308 -5.8368500322103500e-02 + + 3.4833571314811707e-01 -8.0605462193489075e-02 + <_> + + 0 -1 309 5.6290920823812485e-02 + + -6.1617989093065262e-02 6.9421827793121338e-01 + <_> + + 0 -1 310 5.5776340886950493e-03 + + 7.8374862670898438e-02 -4.0764930844306946e-01 + <_> + + 0 -1 311 5.0974669866263866e-03 + + 1.5001790225505829e-01 -2.7620849013328552e-01 + <_> + + 0 -1 312 2.4134019389748573e-02 + + -3.7685971707105637e-02 4.0111309289932251e-01 + <_> + + 0 -1 313 2.6251180097460747e-03 + + -1.8986889719963074e-01 1.6666570305824280e-01 + <_> + + 0 -1 314 -2.3179719224572182e-02 + + -6.0807460546493530e-01 3.3016931265592575e-02 + <_> + + 0 -1 315 -1.7960369586944580e-03 + + 1.8328389525413513e-01 -1.6300560534000397e-01 + <_> + + 0 -1 316 1.1327250301837921e-01 + + 1.6392359510064125e-02 -3.8521450757980347e-01 + <_> + + 0 -1 317 -1.1120930314064026e-02 + + -2.6789391040802002e-01 1.2030880153179169e-01 + <_> + + 0 -1 318 8.9298561215400696e-03 + + -6.4766243100166321e-02 5.2446700632572174e-02 + <_> + + 0 -1 319 3.0264519155025482e-02 + + -5.3343709558248520e-02 4.9170601367950439e-01 + <_> + + 0 -1 320 1.3036240637302399e-01 + + 9.9123492836952209e-03 -8.0775249004364014e-01 + <_> + + 0 -1 321 -4.8941900022327900e-03 + + 1.4153289794921875e-01 -2.4222679436206818e-01 + <_> + + 0 -1 322 -1.8009349703788757e-02 + + -1.8352709710597992e-01 5.3784269839525223e-02 + <_> + + 0 -1 323 6.3028637669049203e-05 + + -2.0836220681667328e-01 1.3861179351806641e-01 + <_> + + 0 -1 324 -3.8127291202545166e-01 + + -7.6527822017669678e-01 3.4578099846839905e-02 + <_> + + 0 -1 325 1.6168570145964622e-02 + + -7.8577049076557159e-02 3.6086350679397583e-01 + <_> + + 0 -1 326 -2.0725380629301071e-02 + + -3.2905191183090210e-01 8.1693336367607117e-02 + <_> + + 0 -1 327 -1.4763489889446646e-04 + + 1.0449170321226120e-01 -2.7624139189720154e-01 + <_> + + 0 -1 328 -1.6959169879555702e-02 + + -2.4150790274143219e-01 5.4569680243730545e-02 + <_> + + 0 -1 329 -1.5221100300550461e-02 + + 4.1033148765563965e-01 -6.8333253264427185e-02 + <_> + + 0 -1 330 -9.6041243523359299e-03 + + -3.3569648861885071e-01 8.6250491440296173e-02 + <_> + + 0 -1 331 -1.6476860037073493e-03 + + 1.6236330568790436e-01 -1.9044490158557892e-01 + <_> + + 0 -1 332 -1.0705839842557907e-01 + + -8.6767107248306274e-01 7.3941340669989586e-03 + <_> + + 0 -1 333 -1.8818160519003868e-02 + + -3.6879110336303711e-01 6.8846642971038818e-02 + <_> + + 0 -1 334 -5.6142187677323818e-03 + + 1.7322039604187012e-01 -1.2514470517635345e-01 + <_> + + 0 -1 335 7.3969298973679543e-03 + + -8.5467368364334106e-02 3.2027161121368408e-01 + <_> + + 0 -1 336 9.4870915636420250e-03 + + 6.3168406486511230e-02 -2.0918910205364227e-01 + <_> + + 0 -1 337 1.8458140548318624e-03 + + -1.5436279773712158e-01 1.8517020344734192e-01 + <_> + + 0 -1 338 -1.9747359678149223e-02 + + 3.3071118593215942e-01 -7.6775848865509033e-02 + <_> + + 0 -1 339 3.2421160489320755e-02 + + 8.2021132111549377e-02 -4.0147501230239868e-01 + <_> + + 0 -1 340 2.9075390193611383e-03 + + -7.7174037694931030e-02 1.0620699822902679e-01 + <_> + + 0 -1 341 1.5189359895884991e-02 + + 6.0363899916410446e-02 -4.1365239024162292e-01 + <_> + + 0 -1 342 -3.0683739110827446e-02 + + 4.3470621109008789e-01 -5.9381321072578430e-02 + <_> + + 0 -1 343 -1.0973449796438217e-02 + + -2.9535230994224548e-01 8.5516467690467834e-02 + <_> + + 0 -1 344 -3.9540361613035202e-02 + + -2.8765881061553955e-01 3.4472968429327011e-02 + <_> + + 0 -1 345 -3.7935871630907059e-02 + + 3.8199868798255920e-01 -8.5364766418933868e-02 + <_> + + 0 -1 346 3.0669810250401497e-02 + + 4.4738098978996277e-02 -1.7703640460968018e-01 + <_> + + 0 -1 347 1.7194509506225586e-01 + + -5.9214178472757339e-02 4.9291038513183594e-01 + <_> + + 0 -1 348 -6.7055500112473965e-03 + + 1.6410259902477264e-01 -2.1826469898223877e-01 + <_> + + 0 -1 349 -3.8577869534492493e-01 + + -6.7176771163940430e-01 4.2349591851234436e-02 + <_> + + 0 -1 350 2.7213040739297867e-02 + + 1.2266149744391441e-02 -2.2954210638999939e-01 + <_> + + 0 -1 351 -1.9294980913400650e-02 + + -5.8373439311981201e-01 3.8380999118089676e-02 + <_> + + 0 -1 352 7.6792249456048012e-03 + + -4.7490350902080536e-02 1.5964460372924805e-01 + <_> + + 0 -1 353 6.0242269682930782e-05 + + -1.1734239757061005e-01 1.8236650526523590e-01 + <_> + + 0 -1 354 -6.6498141677584499e-05 + + 7.4745140969753265e-02 -1.6989439725875854e-01 + <_> + + 0 -1 355 4.3275849893689156e-03 + + 7.3789797723293304e-02 -2.8444349765777588e-01 + <_> + + 0 -1 356 -3.3140469342470169e-02 + + -4.0606608986854553e-01 1.0028730146586895e-02 + <_> + + 0 -1 357 9.9181402474641800e-03 + + -7.9339787364006042e-02 2.8190010786056519e-01 + <_> + + 0 -1 358 -2.3577339015901089e-03 + + 1.5301220118999481e-01 -1.0475979745388031e-01 + <_> + + 0 -1 359 -2.6200819760560989e-02 + + -5.4185032844543457e-01 4.4369250535964966e-02 + <_> + + 0 -1 360 4.7328658401966095e-02 + + 1.8897749483585358e-02 -8.2665932178497314e-01 + <_> + 44 + -1.1653599739074707e+00 + + <_> + + 0 -1 361 2.9921719804406166e-02 + + -3.2315000891685486e-01 5.1092821359634399e-01 + <_> + + 0 -1 362 5.6147608906030655e-02 + + -1.2574400007724762e-01 6.6749179363250732e-01 + <_> + + 0 -1 363 -1.3759849593043327e-02 + + 4.0691190958023071e-01 -2.1075299382209778e-01 + <_> + + 0 -1 364 -4.3788701295852661e-03 + + 2.7940139174461365e-01 -2.0955459773540497e-01 + <_> + + 0 -1 365 1.9208889454603195e-02 + + -8.9800693094730377e-02 5.0936561822891235e-01 + <_> + + 0 -1 366 -8.9393591042608023e-04 + + 1.0703620314598083e-01 -1.2294200062751770e-01 + <_> + + 0 -1 367 -6.2918022740632296e-04 + + -3.7847930192947388e-01 1.3008819520473480e-01 + <_> + + 0 -1 368 -1.6248769825324416e-03 + + 1.7750020325183868e-01 -2.7811211347579956e-01 + <_> + + 0 -1 369 -4.6151960268616676e-03 + + 2.4071510136127472e-01 -1.4269010722637177e-01 + <_> + + 0 -1 370 5.7162828743457794e-02 + + -1.8474869430065155e-02 4.5086058974266052e-01 + <_> + + 0 -1 371 -3.8265369366854429e-03 + + 2.5951761007308960e-01 -1.1455159634351730e-01 + <_> + + 0 -1 372 -4.5235190540552139e-02 + + -3.3849009871482849e-01 3.4538950771093369e-02 + <_> + + 0 -1 373 3.8135750219225883e-03 + + 1.1333999782800674e-01 -2.7620390057563782e-01 + <_> + + 0 -1 374 4.5108258724212646e-02 + + 2.8602050617337227e-02 -1.5837669372558594e-01 + <_> + + 0 -1 375 -2.7794970665127039e-03 + + 2.8897428512573242e-01 -1.0822720080614090e-01 + <_> + + 0 -1 376 5.6366869248449802e-03 + + -1.0184790194034576e-01 7.8787103295326233e-02 + <_> + + 0 -1 377 -5.2986819297075272e-02 + + 5.2964997291564941e-01 -6.5543353557586670e-02 + <_> + + 0 -1 378 7.4737891554832458e-02 + + 2.6320660486817360e-02 -3.0487209558486938e-01 + <_> + + 0 -1 379 4.1559520177543163e-03 + + -2.2977170348167419e-01 1.5662179887294769e-01 + <_> + + 0 -1 380 -2.9388200491666794e-03 + + -1.6916410624980927e-01 9.6996672451496124e-02 + <_> + + 0 -1 381 -1.3065510429441929e-02 + + 4.0258568525314331e-01 -7.1614369750022888e-02 + <_> + + 0 -1 382 -3.4928251057863235e-02 + + -4.9449989199638367e-01 2.2547820582985878e-02 + <_> + + 0 -1 383 2.1728971041738987e-03 + + -1.5552569925785065e-01 2.0136219263076782e-01 + <_> + + 0 -1 384 1.4387349598109722e-02 + + 3.6348100751638412e-02 -2.9468619823455811e-01 + <_> + + 0 -1 385 6.7830132320523262e-03 + + -8.2248352468013763e-02 3.3857500553131104e-01 + <_> + + 0 -1 386 -7.2883836925029755e-02 + + -3.4577670693397522e-01 1.9601320847868919e-02 + <_> + + 0 -1 387 -4.5158518478274345e-03 + + 1.7059490084648132e-01 -1.9742819666862488e-01 + <_> + + 0 -1 388 -1.3742079958319664e-02 + + -2.1214349567890167e-01 3.3953689038753510e-02 + <_> + + 0 -1 389 7.8056701458990574e-03 + + 7.1426697075366974e-02 -3.4223988652229309e-01 + <_> + + 0 -1 390 2.1649990230798721e-02 + + -6.1925049871206284e-02 3.7267661094665527e-01 + <_> + + 0 -1 391 -6.7706637084484100e-02 + + -3.0304160714149475e-01 9.4357587397098541e-02 + <_> + + 0 -1 392 -2.1855749655514956e-03 + + 1.0831770300865173e-01 -1.5530540049076080e-01 + <_> + + 0 -1 393 -2.5483060162514448e-03 + + -2.4103440344333649e-01 9.2916287481784821e-02 + <_> + + 0 -1 394 -6.7207813262939453e-02 + + -6.6259348392486572e-01 1.6074649989604950e-02 + <_> + + 0 -1 395 4.7799371182918549e-02 + + -4.4412638992071152e-02 6.0569787025451660e-01 + <_> + + 0 -1 396 -9.1178417205810547e-02 + + 2.4761490523815155e-01 -3.4762401133775711e-02 + <_> + + 0 -1 397 -3.8592480123043060e-03 + + -2.5366741418838501e-01 1.0194999724626541e-01 + <_> + + 0 -1 398 2.4100970476865768e-03 + + -1.2133970111608505e-01 1.9767910242080688e-01 + <_> + + 0 -1 399 -5.3831469267606735e-03 + + 1.7103940248489380e-01 -1.6189830005168915e-01 + <_> + + 0 -1 400 9.1004222631454468e-03 + + -6.0921549797058105e-02 1.7695249617099762e-01 + <_> + + 0 -1 401 2.2724110167473555e-03 + + -9.0476967394351959e-02 2.7440631389617920e-01 + <_> + + 0 -1 402 -8.0621562898159027e-02 + + -8.8045567274093628e-01 1.7193239182233810e-02 + <_> + + 0 -1 403 3.8965709973126650e-03 + + -1.7037920653820038e-01 1.7979580163955688e-01 + <_> + + 0 -1 404 -4.3093641288578510e-03 + + -2.9382050037384033e-01 8.6317472159862518e-02 + <_> + 44 + -9.4284927845001221e-01 + + <_> + + 0 -1 405 -6.3116192817687988e-02 + + 5.5512517690658569e-01 -3.5997709631919861e-01 + <_> + + 0 -1 406 8.4350287914276123e-02 + + -1.2531270086765289e-01 5.3567689657211304e-01 + <_> + + 0 -1 407 -2.1390730142593384e-01 + + 7.5156861543655396e-01 -8.8270872831344604e-02 + <_> + + 0 -1 408 -2.9744980856776237e-02 + + 2.0106209814548492e-01 -1.2106689810752869e-01 + <_> + + 0 -1 409 -1.1987680196762085e-01 + + 6.4692199230194092e-01 -7.7747613191604614e-02 + <_> + + 0 -1 410 3.0843529384583235e-03 + + -6.3067637383937836e-02 7.7889077365398407e-02 + <_> + + 0 -1 411 -4.5560211874544621e-03 + + 1.8972270190715790e-01 -1.9929079711437225e-01 + <_> + + 0 -1 412 4.4629329931922257e-04 + + 1.4051589369773865e-01 -3.0292418599128723e-01 + <_> + + 0 -1 413 -6.4954371191561222e-03 + + 3.1942290067672729e-01 -1.1072000116109848e-01 + <_> + + 0 -1 414 -2.1751760505139828e-03 + + 1.6477259993553162e-01 -8.0424778163433075e-02 + <_> + + 0 -1 415 6.5875840373337269e-03 + + 1.4716550707817078e-01 -3.0198150873184204e-01 + <_> + + 0 -1 416 2.0701209083199501e-02 + + -4.2996689677238464e-02 4.0123820304870605e-01 + <_> + + 0 -1 417 2.5877119041979313e-03 + + 1.2630540132522583e-01 -2.7518120408058167e-01 + <_> + + 0 -1 418 -1.0545079596340656e-02 + + 1.9637629389762878e-01 -3.9772778749465942e-02 + <_> + + 0 -1 419 6.2396968714892864e-03 + + -8.3563409745693207e-02 3.6655488610267639e-01 + <_> + + 0 -1 420 1.4458670280873775e-02 + + 6.3301697373390198e-02 -5.8498907089233398e-01 + <_> + + 0 -1 421 3.1263440847396851e-02 + + -1.0675270110368729e-01 3.4852859377861023e-01 + <_> + + 0 -1 422 1.4865349512547255e-03 + + 1.3709670305252075e-01 -1.3731659948825836e-01 + <_> + + 0 -1 423 -1.7898039368446916e-04 + + 1.7839649319648743e-01 -2.5751718878746033e-01 + <_> + + 0 -1 424 7.7714473009109497e-02 + + 5.7081848382949829e-02 -2.4273400008678436e-01 + <_> + + 0 -1 425 2.2228270769119263e-02 + + 1.4593790471553802e-01 -2.0994609594345093e-01 + <_> + + 0 -1 426 1.6969949938356876e-03 + + -1.4418889582157135e-01 2.7375409007072449e-01 + <_> + + 0 -1 427 -2.0023470744490623e-02 + + -3.7556248903274536e-01 8.1627696752548218e-02 + <_> + + 0 -1 428 3.8644319865852594e-03 + + -6.4490430057048798e-02 1.5921689569950104e-01 + <_> + + 0 -1 429 -3.0527650378644466e-03 + + 2.6751521229743958e-01 -1.0531850159168243e-01 + <_> + + 0 -1 430 5.6112320162355900e-03 + + -6.8567730486392975e-02 2.1234990656375885e-01 + <_> + + 0 -1 431 4.6622268855571747e-03 + + 1.4254149794578552e-01 -2.0892719924449921e-01 + <_> + + 0 -1 432 2.4710448924452066e-03 + + 7.2614386677742004e-02 -1.8833909928798676e-01 + <_> + + 0 -1 433 1.2655000202357769e-02 + + -8.3605259656906128e-02 4.3262240290641785e-01 + <_> + + 0 -1 434 -1.7724519595503807e-02 + + 1.7432230710983276e-01 -2.8479820117354393e-02 + <_> + + 0 -1 435 -7.2321272455155849e-04 + + 1.5343970060348511e-01 -2.4012179672718048e-01 + <_> + + 0 -1 436 -6.2155709601938725e-03 + + 2.5166681408882141e-01 -8.5519887506961823e-02 + <_> + + 0 -1 437 4.1632771492004395e-02 + + 5.0593800842761993e-02 -6.0965442657470703e-01 + <_> + + 0 -1 438 2.3918300867080688e-02 + + -3.6809660494327545e-02 3.9055478572845459e-01 + <_> + + 0 -1 439 -7.4353138916194439e-03 + + 1.5018579363822937e-01 -1.8627819418907166e-01 + <_> + + 0 -1 440 -2.0571449771523476e-02 + + -2.8574559092521667e-01 4.8302378505468369e-02 + <_> + + 0 -1 441 -7.3831980116665363e-03 + + 3.6680561304092407e-01 -9.6067756414413452e-02 + <_> + + 0 -1 442 9.7222924232482910e-03 + + 6.3898019492626190e-02 -1.7262579500675201e-01 + <_> + + 0 -1 443 -2.1807629615068436e-02 + + 1.8027269840240479e-01 -1.9109119474887848e-01 + <_> + + 0 -1 444 5.8147668838500977e-02 + + 8.5709961131215096e-03 -4.6250829100608826e-01 + <_> + + 0 -1 445 -9.4539504498243332e-03 + + -2.8908729553222656e-01 1.1421570181846619e-01 + <_> + + 0 -1 446 -2.1080709993839264e-02 + + 3.7570050358772278e-01 -2.5591030716896057e-02 + <_> + + 0 -1 447 -4.0629571303725243e-03 + + 2.7146670222282410e-01 -1.0845380276441574e-01 + <_> + + 0 -1 448 -1.2826620042324066e-01 + + 1. -1.0962430387735367e-03 + <_> + 61 + -9.5620310306549072e-01 + + <_> + + 0 -1 449 -1.2662290036678314e-01 + + 6.2268221378326416e-01 -1.4810459315776825e-01 + <_> + + 0 -1 450 -7.0846290327608585e-03 + + 2.0133779942989349e-01 -1.7728950083255768e-01 + <_> + + 0 -1 451 1.1459200084209442e-01 + + -8.8975846767425537e-02 5.7395541667938232e-01 + <_> + + 0 -1 452 3.3472150098532438e-03 + + 7.5708203017711639e-02 -2.8222179412841797e-01 + <_> + + 0 -1 453 5.1924228668212891e-02 + + -1.3948489725589752e-01 2.5681090354919434e-01 + <_> + + 0 -1 454 -4.1343908756971359e-02 + + 2.2414180636405945e-01 -4.3653670698404312e-02 + <_> + + 0 -1 455 -3.2056469470262527e-02 + + -5.9409761428833008e-01 5.1891159266233444e-02 + <_> + + 0 -1 456 -4.0590870194137096e-03 + + 1.6402080655097961e-01 -1.5528389811515808e-01 + <_> + + 0 -1 457 -9.1876718215644360e-05 + + 1.0587870329618454e-01 -2.8261598944664001e-01 + <_> + + 0 -1 458 2.8358219191431999e-02 + + 5.7384029030799866e-02 -6.7094147205352783e-02 + <_> + + 0 -1 459 -7.4662521481513977e-02 + + 5.6916707754135132e-01 -4.8785641789436340e-02 + <_> + + 0 -1 460 -3.6556490231305361e-03 + + 2.2369490563869476e-01 -1.2202149629592896e-01 + <_> + + 0 -1 461 3.1778779812157154e-03 + + 1.2240319699048996e-01 -2.7681729197502136e-01 + <_> + + 0 -1 462 3.8044340908527374e-02 + + 2.3216400295495987e-02 -5.3732901811599731e-01 + <_> + + 0 -1 463 8.7831392884254456e-03 + + -7.4337556958198547e-02 3.2851231098175049e-01 + <_> + + 0 -1 464 -5.9818099252879620e-03 + + -1.9504779577255249e-01 6.6976852715015411e-02 + <_> + + 0 -1 465 -1.6369449440389872e-03 + + 1.4674800634384155e-01 -1.8024149537086487e-01 + <_> + + 0 -1 466 -9.9193133413791656e-02 + + 6.8363517522811890e-01 -2.9652720317244530e-02 + <_> + + 0 -1 467 -1.0352009907364845e-02 + + 3.4225308895111084e-01 -8.1141538918018341e-02 + <_> + + 0 -1 468 2.5637909770011902e-02 + + 5.1416900008916855e-02 -1.6697999835014343e-01 + <_> + + 0 -1 469 -1.2416959507390857e-03 + + 1.2488900125026703e-01 -2.1346220374107361e-01 + <_> + + 0 -1 470 1.5018839621916413e-03 + + 9.7934387624263763e-02 -2.6385021209716797e-01 + <_> + + 0 -1 471 -3.2703679054975510e-02 + + 5.7504880428314209e-01 -4.5875400304794312e-02 + <_> + + 0 -1 472 2.1297169849276543e-02 + + 6.1069380491971970e-02 -2.2480219602584839e-01 + <_> + + 0 -1 473 -8.8358018547296524e-04 + + 9.5625787973403931e-02 -2.7564591169357300e-01 + <_> + + 0 -1 474 -3.6556860432028770e-03 + + 2.4107089638710022e-01 -1.0359519720077515e-01 + <_> + + 0 -1 475 3.4300461411476135e-02 + + 3.9062701165676117e-02 -6.2445348501205444e-01 + <_> + + 0 -1 476 1.1492350138723850e-02 + + -6.9246053695678711e-02 3.8258171081542969e-01 + <_> + + 0 -1 477 -3.1294790096580982e-03 + + 1.1273369938135147e-01 -2.3122510313987732e-01 + <_> + + 0 -1 478 -4.0945871733129025e-03 + + -1.7195980250835419e-01 1.3112659752368927e-01 + <_> + + 0 -1 479 -3.0921408906579018e-03 + + -2.5460389256477356e-01 9.6659161150455475e-02 + <_> + + 0 -1 480 -4.1672129184007645e-02 + + 2.7327769994735718e-01 -6.3094623386859894e-02 + <_> + + 0 -1 481 1.1384460143744946e-02 + + -7.1872517466545105e-02 4.1160398721694946e-01 + <_> + + 0 -1 482 -2.3934150114655495e-02 + + 1.3192340731620789e-01 -1.7954839766025543e-01 + <_> + + 0 -1 483 -3.1554169952869415e-02 + + -5.8792132139205933e-01 4.1782889515161514e-02 + <_> + + 0 -1 484 -2.4033859372138977e-02 + + -1.5534760057926178e-01 2.7700260281562805e-02 + <_> + + 0 -1 485 3.1589470803737640e-02 + + -3.9150279015302658e-02 6.0951721668243408e-01 + <_> + + 0 -1 486 -2.4214860051870346e-02 + + -2.4587619304656982e-01 9.1133296489715576e-02 + <_> + + 0 -1 487 1.9322870066389441e-03 + + -1.1647839844226837e-01 1.8819290399551392e-01 + <_> + + 0 -1 488 -3.6017759703099728e-03 + + 9.7600512206554413e-02 -4.8918090760707855e-02 + <_> + + 0 -1 489 3.1516118906438351e-03 + + 6.5808869898319244e-02 -3.1577658653259277e-01 + <_> + + 0 -1 490 -6.3677072525024414e-02 + + -8.6415481567382812e-01 -9.9097320344299078e-04 + <_> + + 0 -1 491 -3.9085028693079948e-03 + + 2.0826210081577301e-01 -1.0560230165719986e-01 + <_> + + 0 -1 492 -2.6837719604372978e-02 + + -1.8375129997730255e-01 2.9545329511165619e-02 + <_> + + 0 -1 493 3.1312298960983753e-03 + + -1.2626689672470093e-01 1.6888590157032013e-01 + <_> + + 0 -1 494 -7.3491871356964111e-02 + + -1. 5.6774187833070755e-03 + <_> + + 0 -1 495 1.8034819513559341e-02 + + -6.8617410957813263e-02 3.3438131213188171e-01 + <_> + + 0 -1 496 6.8655997514724731e-02 + + 4.6462309546768665e-03 -8.0664628744125366e-01 + <_> + + 0 -1 497 -4.6970890834927559e-03 + + -2.0121769607067108e-01 1.1580040305852890e-01 + <_> + + 0 -1 498 4.6783890575170517e-02 + + -3.5802699625492096e-02 4.1625639796257019e-01 + <_> + + 0 -1 499 4.5946058817207813e-03 + + 8.8457576930522919e-02 -2.6894488930702209e-01 + <_> + + 0 -1 500 -1.3852829579263926e-03 + + 8.1391222774982452e-02 -1.4880420267581940e-01 + <_> + + 0 -1 501 2.1788759157061577e-02 + + -9.1640457510948181e-02 2.1261249482631683e-01 + <_> + + 0 -1 502 -1.3380090240389109e-04 + + 9.6424743533134460e-02 -1.4717370271682739e-01 + <_> + + 0 -1 503 -4.7990411520004272e-02 + + -6.1987131834030151e-01 3.8760710507631302e-02 + <_> + + 0 -1 504 2.0026009529829025e-02 + + -3.5972420126199722e-02 1.9393420219421387e-01 + <_> + + 0 -1 505 1.0723130544647574e-03 + + -1.9447499513626099e-01 1.2064950168132782e-01 + <_> + + 0 -1 506 2.2665090858936310e-02 + + 4.8719439655542374e-02 -2.3640799522399902e-01 + <_> + + 0 -1 507 -1.1042109690606594e-02 + + -2.6107341051101685e-01 1.0075490176677704e-01 + <_> + + 0 -1 508 -1.2811049818992615e-02 + + 1.5199629962444305e-01 -8.8552959263324738e-02 + <_> + + 0 -1 509 -3.6628648638725281e-02 + + 3.8858860731124878e-01 -7.7304549515247345e-02 + <_> + 72 + -8.7708407640457153e-01 + + <_> + + 0 -1 510 -5.4606638848781586e-02 + + 5.5801349878311157e-01 -1.4168889820575714e-01 + <_> + + 0 -1 511 3.3533740788698196e-02 + + -2.7386279776692390e-02 4.4381770491600037e-01 + <_> + + 0 -1 512 -9.9635301157832146e-03 + + 2.5193908810615540e-01 -1.4647540450096130e-01 + <_> + + 0 -1 513 1.8188880058005452e-03 + + -1.1264120042324066e-01 1.1523260176181793e-01 + <_> + + 0 -1 514 -4.8793829977512360e-02 + + 5.1317107677459717e-01 -7.8665018081665039e-02 + <_> + + 0 -1 515 -1.3357769697904587e-02 + + -1.4197979867458344e-01 1.1862599849700928e-01 + <_> + + 0 -1 516 1.1562240542843938e-03 + + -2.0949220657348633e-01 1.5693040192127228e-01 + <_> + + 0 -1 517 -6.2384512275457382e-03 + + -1.4336450397968292e-01 1.1303550004959106e-01 + <_> + + 0 -1 518 4.4234818778932095e-03 + + -1.0358580201864243e-01 2.4589489400386810e-01 + <_> + + 0 -1 519 5.2964448928833008e-02 + + 1.2561550363898277e-02 -6.2551808357238770e-01 + <_> + + 0 -1 520 5.5844681337475777e-03 + + 8.3967886865139008e-02 -2.4653799831867218e-01 + <_> + + 0 -1 521 -4.1809541289694607e-04 + + 6.9588072597980499e-02 -1.3558819890022278e-01 + <_> + + 0 -1 522 -8.9637134224176407e-03 + + -3.0442738533020020e-01 6.9894723594188690e-02 + <_> + + 0 -1 523 2.4479050189256668e-02 + + -3.1651828438043594e-02 2.0308789610862732e-01 + <_> + + 0 -1 524 -2.5842329487204552e-02 + + 5.0401061773300171e-01 -6.3922062516212463e-02 + <_> + + 0 -1 525 -2.0785620436072350e-03 + + 1.0980220139026642e-01 -1.1839559674263000e-01 + <_> + + 0 -1 526 6.8030342459678650e-02 + + 4.2290739715099335e-02 -5.1855510473251343e-01 + <_> + + 0 -1 527 -7.0639760233461857e-03 + + -2.0031100511550903e-01 2.4955609813332558e-02 + <_> + + 0 -1 528 -3.4848200157284737e-03 + + 2.3135329782962799e-01 -9.6989557147026062e-02 + <_> + + 0 -1 529 1.3147160410881042e-02 + + -3.7450950592756271e-02 2.5842788815498352e-01 + <_> + + 0 -1 530 -1.4271659776568413e-02 + + -3.0110171437263489e-01 7.9672336578369141e-02 + <_> + + 0 -1 531 1.2653480283915997e-02 + + 4.9039140343666077e-02 -1.4988109469413757e-01 + <_> + + 0 -1 532 -4.4893440790474415e-03 + + 1.7208859324455261e-01 -1.5355649590492249e-01 + <_> + + 0 -1 533 3.2365400344133377e-02 + + -9.0493112802505493e-02 3.5779160261154175e-01 + <_> + + 0 -1 534 4.6125808730721474e-03 + + 1.1445190012454987e-01 -2.6519489288330078e-01 + <_> + + 0 -1 535 2.8645930811762810e-02 + + -3.5988539457321167e-02 3.0025520920753479e-01 + <_> + + 0 -1 536 -2.3571979254484177e-02 + + -2.4872820079326630e-01 9.1967120766639709e-02 + <_> + + 0 -1 537 -1.0739799588918686e-02 + + -2.1367760002613068e-01 9.6477411687374115e-02 + <_> + + 0 -1 538 2.3728659376502037e-02 + + -7.0916198194026947e-02 4.3828758597373962e-01 + <_> + + 0 -1 539 -3.2800701260566711e-01 + + 5.8840030431747437e-01 -3.1756788492202759e-02 + <_> + + 0 -1 540 7.5008560997957829e-06 + + -1.8288560211658478e-01 1.2022940069437027e-01 + <_> + + 0 -1 541 3.0071409419178963e-02 + + 2.7802020311355591e-02 -4.3224281072616577e-01 + <_> + + 0 -1 542 -2.1936609409749508e-03 + + 1.3592420518398285e-01 -1.4038629829883575e-01 + <_> + + 0 -1 543 2.0174339413642883e-02 + + -6.1628919094800949e-02 3.1579768657684326e-01 + <_> + + 0 -1 544 9.7460206598043442e-03 + + 8.8958032429218292e-02 -2.2594009339809418e-01 + <_> + + 0 -1 545 -1.2958340346813202e-02 + + -1.2200850248336792e-01 8.6518086493015289e-02 + <_> + + 0 -1 546 1.1445499956607819e-02 + + -6.4182333648204803e-02 3.0279749631881714e-01 + <_> + + 0 -1 547 -3.3802569378167391e-03 + + 1.1177670210599899e-01 -1.2922379374504089e-01 + <_> + + 0 -1 548 2.0366210490465164e-02 + + 1.0104539990425110e-01 -2.5991159677505493e-01 + <_> + + 0 -1 549 3.8058649748563766e-02 + + 1.3168349862098694e-02 -7.5580632686614990e-01 + <_> + + 0 -1 550 2.3050000891089439e-03 + + -1.0766649991273880e-01 1.8757669627666473e-01 + <_> + + 0 -1 551 5.1847118884325027e-02 + + -2.2320529446005821e-02 1.8795830011367798e-01 + <_> + + 0 -1 552 1.1383029632270336e-02 + + 6.0226161032915115e-02 -3.5961788892745972e-01 + <_> + + 0 -1 553 8.2553178071975708e-03 + + -8.5131391882896423e-02 2.3493440449237823e-01 + <_> + + 0 -1 554 -2.6984339579939842e-02 + + -2.1479399502277374e-01 9.3656733632087708e-02 + <_> + + 0 -1 555 -1.0289980098605156e-02 + + 5.8254890143871307e-02 -8.3950929343700409e-02 + <_> + + 0 -1 556 -1.4419780200114474e-05 + + 1.0392870008945465e-01 -1.7317299544811249e-01 + <_> + + 0 -1 557 1.0065140202641487e-02 + + -4.1311118751764297e-02 1.7616020143032074e-01 + <_> + + 0 -1 558 -1.4870229642838240e-04 + + 1.5657539665699005e-01 -1.2030059844255447e-01 + <_> + + 0 -1 559 -3.1059589236974716e-03 + + 1.1674880236387253e-01 -9.1372460126876831e-02 + <_> + + 0 -1 560 1.0708030313253403e-02 + + -7.7608227729797363e-02 2.7916100621223450e-01 + <_> + + 0 -1 561 -9.7792129963636398e-03 + + -2.9060921072959900e-01 7.1562640368938446e-02 + <_> + + 0 -1 562 2.0121980458498001e-02 + + 4.3994959443807602e-02 -4.2539501190185547e-01 + <_> + + 0 -1 563 -6.3295163214206696e-02 + + 3.7034231424331665e-01 -5.2549809217453003e-02 + <_> + + 0 -1 564 -8.7289556860923767e-02 + + -6.4299279451370239e-01 3.1952869147062302e-02 + <_> + + 0 -1 565 2.0398540422320366e-02 + + -4.5955598354339600e-02 4.6266159415245056e-01 + <_> + + 0 -1 566 -4.0313000790774822e-03 + + 1.3840849697589874e-01 -1.7980839312076569e-01 + <_> + + 0 -1 567 -1.5734519809484482e-02 + + -1.8477180600166321e-01 6.9983080029487610e-02 + <_> + + 0 -1 568 3.3332880120724440e-03 + + 1.1277650296688080e-01 -1.9513790309429169e-01 + <_> + + 0 -1 569 4.3689161539077759e-02 + + 5.9510939754545689e-03 -5.5423438549041748e-01 + <_> + + 0 -1 570 -2.0920610986649990e-03 + + 1.9163469970226288e-01 -9.7136110067367554e-02 + <_> + + 0 -1 571 2.0574270747601986e-03 + + -1.0197430104017258e-01 1.4083810150623322e-01 + <_> + + 0 -1 572 8.8018123060464859e-03 + + 1.1987809836864471e-01 -1.5638549625873566e-01 + <_> + + 0 -1 573 -1.6882529482245445e-02 + + -1.8438099324703217e-01 1.9492870196700096e-02 + <_> + + 0 -1 574 -6.1647890834137797e-04 + + 1.0665109753608704e-01 -2.2164009511470795e-01 + <_> + + 0 -1 575 1.0317339911125600e-04 + + -1.1228899657726288e-01 1.3858650624752045e-01 + <_> + + 0 -1 576 1.5316329896450043e-02 + + -5.0639409571886063e-02 4.1119828820228577e-01 + <_> + + 0 -1 577 1.0660690255463123e-02 + + 5.8820810168981552e-02 -1.6454669833183289e-01 + <_> + + 0 -1 578 -1.9296869635581970e-02 + + 3.9260959625244141e-01 -5.2761189639568329e-02 + <_> + + 0 -1 579 1.0018110275268555e-02 + + 1.0068470239639282e-01 -1.9756269454956055e-01 + <_> + + 0 -1 580 -2.7263790369033813e-02 + + 3.5332089662551880e-01 -5.5305551737546921e-02 + <_> + + 0 -1 581 5.4494310170412064e-03 + + 6.7253768444061279e-02 -1.8384470045566559e-01 + <_> + 75 + -8.5267168283462524e-01 + + <_> + + 0 -1 582 -5.7434860616922379e-02 + + 5.0582551956176758e-01 -1.2274570018053055e-01 + <_> + + 0 -1 583 -1.2750659883022308e-01 + + 5.7605969905853271e-01 -4.3710928410291672e-02 + <_> + + 0 -1 584 -6.3675642013549805e-02 + + 5.7122522592544556e-01 -4.9968320876359940e-02 + <_> + + 0 -1 585 -1.1928480118513107e-02 + + 2.1641939878463745e-01 -1.8480269610881805e-01 + <_> + + 0 -1 586 1.3247699826024473e-04 + + -2.2685679793357849e-01 1.0648279637098312e-01 + <_> + + 0 -1 587 6.4140267204493284e-04 + + 9.4751678407192230e-02 -2.6892009377479553e-01 + <_> + + 0 -1 588 -2.9463530518114567e-03 + + 1.3910910487174988e-01 -1.7091070115566254e-01 + <_> + + 0 -1 589 5.3384741768240929e-03 + + 8.3969242870807648e-02 -9.5441989600658417e-02 + <_> + + 0 -1 590 5.8703150600194931e-02 + + -6.9647520780563354e-02 3.3629441261291504e-01 + <_> + + 0 -1 591 -2.5406300555914640e-03 + + 9.6176013350486755e-02 -1.5758140385150909e-01 + <_> + + 0 -1 592 -3.1899519264698029e-02 + + -2.7956488728523254e-01 7.0359513163566589e-02 + <_> + + 0 -1 593 -3.2022708654403687e-01 + + -9.0805047750473022e-01 7.5922380201518536e-03 + <_> + + 0 -1 594 3.5796251147985458e-02 + + -5.0070770084857941e-02 4.2101579904556274e-01 + <_> + + 0 -1 595 -1.9079160690307617e-01 + + -2.2061030566692352e-01 6.5184786915779114e-02 + <_> + + 0 -1 596 -1.2181829661130905e-02 + + 1.3479439914226532e-01 -1.6667750477790833e-01 + <_> + + 0 -1 597 -3.2165799289941788e-02 + + -2.5105410814285278e-01 1.9344560801982880e-02 + <_> + + 0 -1 598 3.6299630999565125e-02 + + -5.9490781277418137e-02 4.0007731318473816e-01 + <_> + + 0 -1 599 2.0224580541253090e-02 + + 5.6489799171686172e-02 -1.3418239355087280e-01 + <_> + + 0 -1 600 -2.5393130257725716e-02 + + 3.6507838964462280e-01 -6.6002182662487030e-02 + <_> + + 0 -1 601 -1.2022369541227818e-02 + + -1.7655059695243835e-01 7.3997639119625092e-02 + <_> + + 0 -1 602 4.7965139150619507e-02 + + 4.4668558984994888e-02 -4.4584980607032776e-01 + <_> + + 0 -1 603 -2.0564019680023193e-01 + + -7.3254501819610596e-01 1.9955230876803398e-02 + <_> + + 0 -1 604 -1.6601709648966789e-03 + + 1.1633270233869553e-01 -1.5488509833812714e-01 + <_> + + 0 -1 605 8.6899623274803162e-02 + + -5.4107550531625748e-02 2.6952400803565979e-01 + <_> + + 0 -1 606 -1.1374129680916667e-03 + + -1.4314429461956024e-01 1.2444330006837845e-01 + <_> + + 0 -1 607 3.0976340174674988e-02 + + 2.9864860698580742e-02 -3.2607930898666382e-01 + <_> + + 0 -1 608 2.6978010311722755e-02 + + -4.5098248869180679e-02 3.6128848791122437e-01 + <_> + + 0 -1 609 1.9421820342540741e-01 + + 3.2255191355943680e-02 -6.8981701135635376e-01 + <_> + + 0 -1 610 -2.0443359389901161e-02 + + 2.9300108551979065e-01 -6.4483217895030975e-02 + <_> + + 0 -1 611 -4.0420450270175934e-02 + + -7.6823359727859497e-01 1.2281980365514755e-02 + <_> + + 0 -1 612 -1.2641429901123047e-02 + + -2.7573791146278381e-01 6.1901118606328964e-02 + <_> + + 0 -1 613 -3.9670299738645554e-02 + + 3.2828390598297119e-01 -2.0364999771118164e-02 + <_> + + 0 -1 614 2.0246729254722595e-02 + + -5.8393601328134537e-02 3.3060538768768311e-01 + <_> + + 0 -1 615 8.9611168950796127e-03 + + 9.0096317231655121e-02 -2.2343009710311890e-01 + <_> + + 0 -1 616 -8.3055719733238220e-03 + + 1.4175349473953247e-01 -1.2607260048389435e-01 + <_> + + 0 -1 617 -2.8248139642528258e-05 + + 9.4516962766647339e-02 -2.1810370683670044e-01 + <_> + + 0 -1 618 -5.1939398981630802e-03 + + 1.3304319977760315e-01 -1.3341580331325531e-01 + <_> + + 0 -1 619 1.1773110181093216e-01 + + 2.9586199671030045e-02 -2.4020829796791077e-01 + <_> + + 0 -1 620 6.7896701395511627e-02 + + 8.0913707613945007e-02 -2.3454460501670837e-01 + <_> + + 0 -1 621 -2.6683699339628220e-02 + + 3.0590981245040894e-01 -6.4152047038078308e-02 + <_> + + 0 -1 622 3.5058211069554090e-03 + + 8.9341968297958374e-02 -2.2773680090904236e-01 + <_> + + 0 -1 623 -6.5844372147694230e-04 + + 1.2458139657974243e-01 -9.1352440416812897e-02 + <_> + + 0 -1 624 7.2530400939285755e-03 + + -6.9285176694393158e-02 2.5482881069183350e-01 + <_> + + 0 -1 625 -2.8056129813194275e-02 + + -2.0867039263248444e-01 3.3539578318595886e-02 + <_> + + 0 -1 626 -5.1205180585384369e-02 + + -2.4107429385185242e-01 6.4439408481121063e-02 + <_> + + 0 -1 627 2.9234649613499641e-02 + + -5.0803840160369873e-02 3.6485049128532410e-01 + <_> + + 0 -1 628 -1.0219520330429077e-01 + + 4.0123480558395386e-01 -4.2902119457721710e-02 + <_> + + 0 -1 629 1.5104969963431358e-02 + + 1.0481490194797516e-01 -1.8472430109977722e-01 + <_> + + 0 -1 630 -1.2570650316774845e-02 + + -2.0540939271450043e-01 9.3013197183609009e-02 + <_> + + 0 -1 631 1.2253070250153542e-02 + + -5.9285100549459457e-02 2.3927310109138489e-01 + <_> + + 0 -1 632 -2.6166990399360657e-02 + + -6.9966787099838257e-01 2.4906709790229797e-02 + <_> + + 0 -1 633 7.0817661471664906e-03 + + 2.4173120036721230e-02 -5.5144792795181274e-01 + <_> + + 0 -1 634 2.1426850929856300e-02 + + 6.4168840646743774e-02 -2.5997900962829590e-01 + <_> + + 0 -1 635 1.8189709633588791e-02 + + 3.5838250070810318e-02 -1.8020580708980560e-01 + <_> + + 0 -1 636 1.7415799200534821e-02 + + -8.3862036466598511e-02 3.3338528871536255e-01 + <_> + + 0 -1 637 -1.4878029469400644e-03 + + 1.2078859657049179e-01 -1.2769320607185364e-01 + <_> + + 0 -1 638 7.5296638533473015e-03 + + -7.0014707744121552e-02 3.2181090116500854e-01 + <_> + + 0 -1 639 -6.1499018222093582e-02 + + 4.6469798684120178e-01 -1.0073710232973099e-02 + <_> + + 0 -1 640 -1.9133290334139019e-04 + + -1.4094290137290955e-01 1.3830110430717468e-01 + <_> + + 0 -1 641 -2.4422289803624153e-02 + + -2.5292310118675232e-01 6.7684173583984375e-02 + <_> + + 0 -1 642 -2.6136320829391479e-01 + + 3.4003540873527527e-01 -5.8462549000978470e-02 + <_> + + 0 -1 643 -7.6046779751777649e-02 + + -7.8514158725738525e-01 5.2708541043102741e-03 + <_> + + 0 -1 644 -3.0279329512268305e-03 + + 1.8527059257030487e-01 -9.0691961348056793e-02 + <_> + + 0 -1 645 -8.0219199880957603e-03 + + -1.2540580332279205e-01 3.0594889074563980e-02 + <_> + + 0 -1 646 -2.0705960690975189e-01 + + -7.5411921739578247e-01 2.1201130002737045e-02 + <_> + + 0 -1 647 -9.5322817564010620e-02 + + -2.9623070359230042e-01 1.3138709589838982e-02 + <_> + + 0 -1 648 9.5921624451875687e-03 + + 8.4324322640895844e-02 -2.1746580302715302e-01 + <_> + + 0 -1 649 -1.3089469633996487e-02 + + 9.3607500195503235e-02 -6.5754130482673645e-02 + <_> + + 0 -1 650 1.1732880026102066e-02 + + -8.0039046704769135e-02 2.3291939496994019e-01 + <_> + + 0 -1 651 1.5239049494266510e-01 + + 9.9299130961298943e-03 -6.5196067094802856e-01 + <_> + + 0 -1 652 -6.4591512084007263e-02 + + 2.8372219204902649e-01 -6.0058828443288803e-02 + <_> + + 0 -1 653 -5.5493030697107315e-02 + + 2.6659101247787476e-01 -1.0336419567465782e-02 + <_> + + 0 -1 654 -5.0287410616874695e-02 + + -6.9501471519470215e-01 2.7849879115819931e-02 + <_> + + 0 -1 655 -4.7794249653816223e-01 + + -9.2871952056884766e-01 5.9050112031400204e-03 + <_> + + 0 -1 656 -1.4398519881069660e-02 + + -4.5541068911552429e-01 3.6409981548786163e-02 + <_> + 67 + -7.4186658859252930e-01 + + <_> + + 0 -1 657 1.9511899445205927e-03 + + -2.4936990439891815e-01 1.4111639559268951e-01 + <_> + + 0 -1 658 -4.6634670346975327e-02 + + 3.7840589880943298e-01 -7.8401736915111542e-02 + <_> + + 0 -1 659 1.6193749383091927e-02 + + 7.5213313102722168e-02 -4.1991469264030457e-01 + <_> + + 0 -1 660 -1.2459639401640743e-04 + + 6.8576186895370483e-02 -1.7935420572757721e-01 + <_> + + 0 -1 661 7.3257791809737682e-03 + + 1.0322099924087524e-01 -2.6099279522895813e-01 + <_> + + 0 -1 662 -1.5020779756014235e-05 + + 7.3122598230838776e-02 -1.6718889772891998e-01 + <_> + + 0 -1 663 -3.4522008150815964e-02 + + -3.9326989650726318e-01 7.6727166771888733e-02 + <_> + + 0 -1 664 -8.2679510116577148e-02 + + -7.4677819013595581e-01 1.5530600212514400e-02 + <_> + + 0 -1 665 8.2162402570247650e-02 + + -6.9249503314495087e-02 3.7914600968360901e-01 + <_> + + 0 -1 666 3.4187830984592438e-02 + + 4.2608659714460373e-02 -1.5429890155792236e-01 + <_> + + 0 -1 667 -1.7891369760036469e-02 + + -3.0639570951461792e-01 7.8118398785591125e-02 + <_> + + 0 -1 668 3.3130999654531479e-02 + + -5.6183800101280212e-02 3.7405240535736084e-01 + <_> + + 0 -1 669 -5.7486710138618946e-03 + + 1.2490350008010864e-01 -2.0527860522270203e-01 + <_> + + 0 -1 670 3.3536829054355621e-02 + + -4.8344220966100693e-02 2.6724401116371155e-01 + <_> + + 0 -1 671 2.4723829701542854e-02 + + 8.3678968250751495e-02 -3.3730649948120117e-01 + <_> + + 0 -1 672 2.2355809342116117e-03 + + 1.0374590009450912e-01 -1.3071919977664948e-01 + <_> + + 0 -1 673 -2.4322168901562691e-03 + + 1.5645089745521545e-01 -1.3284459710121155e-01 + <_> + + 0 -1 674 2.5999119505286217e-02 + + -8.0343127250671387e-02 2.1610119938850403e-01 + <_> + + 0 -1 675 3.6965688195778057e-05 + + -1.7871010303497314e-01 1.0563120245933533e-01 + <_> + + 0 -1 676 -1.6291500627994537e-01 + + -6.9141697883605957e-01 2.2374730557203293e-02 + <_> + + 0 -1 677 1.3008140027523041e-01 + + -4.2769040912389755e-02 4.6373569965362549e-01 + <_> + + 0 -1 678 2.7658540755510330e-02 + + -3.7108600139617920e-02 3.8386580348014832e-01 + <_> + + 0 -1 679 -1.0020419955253601e-02 + + -2.6328051090240479e-01 7.4858680367469788e-02 + <_> + + 0 -1 680 -3.0459940433502197e-02 + + 3.2300901412963867e-01 -2.5858370587229729e-02 + <_> + + 0 -1 681 1.3251040363684297e-03 + + 1.4447669684886932e-01 -2.1082170307636261e-01 + <_> + + 0 -1 682 -2.7931010350584984e-02 + + 1.4374519884586334e-01 -1.6162300109863281e-01 + <_> + + 0 -1 683 -8.8642723858356476e-03 + + 2.3000620305538177e-01 -9.5095098018646240e-02 + <_> + + 0 -1 684 -1.2213969603180885e-02 + + -2.4646399915218353e-01 6.5522022545337677e-02 + <_> + + 0 -1 685 -4.8737529665231705e-02 + + -7.9127711057662964e-01 2.5416409596800804e-02 + <_> + + 0 -1 686 6.1185289174318314e-02 + + -1.2226430408190936e-04 -9.0545868873596191e-01 + <_> + + 0 -1 687 2.6453679427504539e-02 + + 2.6562800630927086e-02 -6.3954341411590576e-01 + <_> + + 0 -1 688 8.8589917868375778e-03 + + 5.4145850241184235e-02 -2.1601280570030212e-01 + <_> + + 0 -1 689 3.4847941249608994e-02 + + -4.5749358832836151e-02 4.3935400247573853e-01 + <_> + + 0 -1 690 -1.4598210155963898e-01 + + -5.5561769008636475e-01 9.5249973237514496e-03 + <_> + + 0 -1 691 -5.0456568598747253e-02 + + -7.5287848711013794e-01 2.0214710384607315e-02 + <_> + + 0 -1 692 -8.5443779826164246e-02 + + -1. -1.3681349810212851e-03 + <_> + + 0 -1 693 1.3248980045318604e-02 + + 6.3400700688362122e-02 -2.5411811470985413e-01 + <_> + + 0 -1 694 -6.5935611724853516e-01 + + -1. 7.7378489077091217e-03 + <_> + + 0 -1 695 5.0879311747848988e-03 + + -8.3207741379737854e-02 1.8876290321350098e-01 + <_> + + 0 -1 696 -3.4071630798280239e-03 + + 1.4578290283679962e-01 -9.1960333287715912e-02 + <_> + + 0 -1 697 -2.1656269207596779e-02 + + -6.5364891290664673e-01 2.7129750698804855e-02 + <_> + + 0 -1 698 9.4357347115874290e-03 + + 6.4360111951828003e-02 -2.3885479569435120e-01 + <_> + + 0 -1 699 -7.5177568942308426e-03 + + 2.4519060552120209e-01 -6.8221837282180786e-02 + <_> + + 0 -1 700 1.6067629680037498e-02 + + 7.6069780625402927e-03 -3.1668719649314880e-01 + <_> + + 0 -1 701 -1.8057749839499593e-03 + + 1.2710370123386383e-01 -1.2145719677209854e-01 + <_> + + 0 -1 702 -4.4154901057481766e-02 + + -4.8579609394073486e-01 2.3444859310984612e-02 + <_> + + 0 -1 703 7.5462698005139828e-03 + + 6.8430766463279724e-02 -2.3316520452499390e-01 + <_> + + 0 -1 704 1.0868260264396667e-01 + + -4.1663911193609238e-02 3.9452219009399414e-01 + <_> + + 0 -1 705 6.1248701810836792e-01 + + 2.0702170208096504e-02 -9.8494791984558105e-01 + <_> + + 0 -1 706 4.9828290939331055e-02 + + 2.7304550167173147e-03 -4.0181699395179749e-01 + <_> + + 0 -1 707 -7.2768718004226685e-02 + + 3.2676479220390320e-01 -4.9144338816404343e-02 + <_> + + 0 -1 708 2.4314310401678085e-02 + + -7.8135710209608078e-03 5.8223301172256470e-01 + <_> + + 0 -1 709 -1.7177179688587785e-04 + + 8.1669911742210388e-02 -2.0376220345497131e-01 + <_> + + 0 -1 710 -4.0095269680023193e-02 + + 5.4681521654129028e-01 -1.7179539427161217e-02 + <_> + + 0 -1 711 -8.9634567499160767e-02 + + -8.1614011526107788e-01 2.1283889189362526e-02 + <_> + + 0 -1 712 1.8692140281200409e-01 + + 8.3980746567249298e-03 -6.0185301303863525e-01 + <_> + + 0 -1 713 -4.3038379400968552e-02 + + -8.7898987531661987e-01 1.4930729754269123e-02 + <_> + + 0 -1 714 -1.8602630007080734e-04 + + 4.0156241506338120e-02 -8.2604438066482544e-02 + <_> + + 0 -1 715 -1.4392189914360642e-03 + + -1.7102399468421936e-01 9.1203540563583374e-02 + <_> + + 0 -1 716 4.2160619050264359e-02 + + -3.5861019045114517e-02 1.5174309909343719e-01 + <_> + + 0 -1 717 7.5991409830749035e-03 + + 1.0874529927968979e-01 -1.6147160530090332e-01 + <_> + + 0 -1 718 -5.7539329864084721e-03 + + -2.5677061080932617e-01 5.8457151055335999e-02 + <_> + + 0 -1 719 -2.7736749500036240e-02 + + 2.2325170040130615e-01 -7.4071511626243591e-02 + <_> + + 0 -1 720 -2.5676110759377480e-02 + + 1.8831080198287964e-01 -5.3860381245613098e-02 + <_> + + 0 -1 721 1.5890730544924736e-02 + + 5.1709540188312531e-02 -3.8476571440696716e-01 + <_> + + 0 -1 722 -8.6374267935752869e-02 + + -5.5680698156356812e-01 9.4922119751572609e-03 + <_> + + 0 -1 723 1.9480630289763212e-03 + + -1.0807219892740250e-01 1.4771680533885956e-01 + <_> + 88 + -8.3640968799591064e-01 + + <_> + + 0 -1 724 -6.8531660363078117e-03 + + 2.8935509920120239e-01 -2.7689141035079956e-01 + <_> + + 0 -1 725 -6.9217637181282043e-02 + + 3.4909790754318237e-01 -4.9741089344024658e-02 + <_> + + 0 -1 726 -1.3092979788780212e-01 + + 4.2791560292243958e-01 -9.6156008541584015e-02 + <_> + + 0 -1 727 -2.9759139579255134e-05 + + 1.1675780266523361e-01 -2.4678389728069305e-01 + <_> + + 0 -1 728 -4.7100789844989777e-02 + + 3.7259110808372498e-01 -5.9072919189929962e-02 + <_> + + 0 -1 729 4.4124510139226913e-02 + + 7.8904099762439728e-02 -2.5528541207313538e-01 + <_> + + 0 -1 730 4.2540309950709343e-03 + + -2.3612380027770996e-01 1.2856779992580414e-01 + <_> + + 0 -1 731 -1.0833570268005133e-03 + + 1.4347310364246368e-01 -1.4203630387783051e-01 + <_> + + 0 -1 732 5.9925230743829161e-05 + + -1.9927270710468292e-01 8.8502913713455200e-02 + <_> + + 0 -1 733 -7.3021486401557922e-02 + + -8.0666261911392212e-01 3.2041858881711960e-02 + <_> + + 0 -1 734 7.9495050013065338e-03 + + -6.5878443419933319e-02 2.7071261405944824e-01 + <_> + + 0 -1 735 -3.3911041100509465e-04 + + 1.3490739464759827e-01 -1.3354760408401489e-01 + <_> + + 0 -1 736 -2.6010179892182350e-02 + + -2.8074580430984497e-01 7.7902659773826599e-02 + <_> + + 0 -1 737 -3.1153090298175812e-02 + + 2.7022659778594971e-01 -2.6994340121746063e-02 + <_> + + 0 -1 738 1.0946249589323997e-02 + + -1.5993720293045044e-01 1.0350699722766876e-01 + <_> + + 0 -1 739 7.3101207613945007e-02 + + -4.1365791112184525e-03 5.2339828014373779e-01 + <_> + + 0 -1 740 3.0207149684429169e-02 + + -4.9229420721530914e-02 4.2848989367485046e-01 + <_> + + 0 -1 741 6.4985260367393494e-02 + + 3.9118612185120583e-03 -1.0003379583358765e+00 + <_> + + 0 -1 742 -2.9119249433279037e-02 + + -7.7025991678237915e-01 2.3930810391902924e-02 + <_> + + 0 -1 743 5.0458308309316635e-02 + + 6.9283558987081051e-03 -5.1854777336120605e-01 + <_> + + 0 -1 744 -3.8890179246664047e-02 + + -4.8176848888397217e-01 3.0270289629697800e-02 + <_> + + 0 -1 745 5.8319371193647385e-02 + + -2.2101389244198799e-02 2.8393501043319702e-01 + <_> + + 0 -1 746 -1.0803690180182457e-02 + + 1.2842060625553131e-01 -1.3849779963493347e-01 + <_> + + 0 -1 747 9.4525264576077461e-03 + + -5.7194419205188751e-02 1.7759050428867340e-01 + <_> + + 0 -1 748 1.5229170210659504e-02 + + 1.0501170158386230e-01 -2.0518389344215393e-01 + <_> + + 0 -1 749 -8.9435698464512825e-04 + + 6.8668253719806671e-02 -1.4666010439395905e-01 + <_> + + 0 -1 750 -1.8322499468922615e-02 + + -2.3613719642162323e-01 8.3538331091403961e-02 + <_> + + 0 -1 751 2.5474189314991236e-03 + + -8.4731526672840118e-02 1.7211520671844482e-01 + <_> + + 0 -1 752 -1.4951790217310190e-03 + + 1.8642990291118622e-01 -1.2753330171108246e-01 + <_> + + 0 -1 753 2.4796150624752045e-02 + + 3.2923560589551926e-02 -4.0954729914665222e-01 + <_> + + 0 -1 754 -2.8976860921829939e-03 + + 1.4480039477348328e-01 -1.0404679924249649e-01 + <_> + + 0 -1 755 7.0361169055104256e-03 + + -6.7916557192802429e-02 2.1544350683689117e-01 + <_> + + 0 -1 756 -1.1870389804244041e-02 + + -2.5537449121475220e-01 7.4443407356739044e-02 + <_> + + 0 -1 757 2.4765899870544672e-03 + + 6.8313367664813995e-02 -1.6111320257186890e-01 + <_> + + 0 -1 758 2.1284550428390503e-02 + + 3.7090871483087540e-02 -4.6916520595550537e-01 + <_> + + 0 -1 759 -1.0369479656219482e-02 + + 1.0807839781045914e-01 -6.0489870607852936e-02 + <_> + + 0 -1 760 1.0732480324804783e-02 + + -5.8582380414009094e-02 3.1958609819412231e-01 + <_> + + 0 -1 761 -2.3235160112380981e-01 + + -1. 8.2511743530631065e-03 + <_> + + 0 -1 762 -6.0572529037017375e-05 + + 8.0201767385005951e-02 -2.3583050072193146e-01 + <_> + + 0 -1 763 -2.7367009315639734e-03 + + 1.5369090437889099e-01 -7.8800879418849945e-02 + <_> + + 0 -1 764 3.1168010085821152e-02 + + -4.1852951049804688e-02 3.7374469637870789e-01 + <_> + + 0 -1 765 4.5415129512548447e-02 + + 6.6594500094652176e-03 -9.9975287914276123e-01 + <_> + + 0 -1 766 -1.3742819428443909e-03 + + 1.0587850213050842e-01 -1.9234779477119446e-01 + <_> + + 0 -1 767 3.0089360661804676e-03 + + 9.4038642942905426e-02 -1.5442730486392975e-01 + <_> + + 0 -1 768 -7.1071386337280273e-02 + + -5.4955267906188965e-01 2.5523129850625992e-02 + <_> + + 0 -1 769 1.0958979837596416e-03 + + -6.1327658593654633e-02 5.7677619159221649e-02 + <_> + + 0 -1 770 -2.3706799373030663e-02 + + 2.9486098885536194e-01 -6.6553473472595215e-02 + <_> + + 0 -1 771 6.8882037885487080e-03 + + 7.3861703276634216e-02 -2.5727730989456177e-01 + <_> + + 0 -1 772 -4.9158040434122086e-02 + + 3.2406309247016907e-01 -5.2785839885473251e-02 + <_> + + 0 -1 773 7.1369417011737823e-02 + + 1.3209920376539230e-02 -7.4821132421493530e-01 + <_> + + 0 -1 774 -8.4517486393451691e-03 + + -2.0652799308300018e-01 9.3139596283435822e-02 + <_> + + 0 -1 775 -1.5554410219192505e-01 + + -5.0736141204833984e-01 1.1575420387089252e-02 + <_> + + 0 -1 776 -4.5976821333169937e-02 + + 3.3433321118354797e-01 -5.6558281183242798e-02 + <_> + + 0 -1 777 1.7900219187140465e-02 + + 3.4091990441083908e-02 -2.8565031290054321e-01 + <_> + + 0 -1 778 6.7351139150559902e-03 + + -6.6538818180561066e-02 2.3322120308876038e-01 + <_> + + 0 -1 779 6.4544100314378738e-03 + + 4.7224499285221100e-02 -1.4422370493412018e-01 + <_> + + 0 -1 780 -1.1029049754142761e-02 + + -2.6442399621009827e-01 6.2542691826820374e-02 + <_> + + 0 -1 781 -3.3727919217199087e-03 + + 1.2575919926166534e-01 -6.8357646465301514e-02 + <_> + + 0 -1 782 -2.2960419300943613e-03 + + -1.5573309361934662e-01 9.4681970775127411e-02 + <_> + + 0 -1 783 -7.9503163695335388e-02 + + -3.8246139883995056e-01 1.7201259732246399e-02 + <_> + + 0 -1 784 -2.5240880250930786e-01 + + 3.0139809846878052e-01 -5.8942809700965881e-02 + <_> + + 0 -1 785 3.6313079297542572e-02 + + 2.1105870604515076e-02 -2.0811690390110016e-01 + <_> + + 0 -1 786 6.8737521767616272e-02 + + -3.2400298863649368e-02 5.1345300674438477e-01 + <_> + + 0 -1 787 -2.1814550459384918e-01 + + -7.0093291997909546e-01 1.6260979697108269e-02 + <_> + + 0 -1 788 -1.9770899415016174e-01 + + -6.7817360162734985e-01 1.7937550321221352e-02 + <_> + + 0 -1 789 -1.0131119936704636e-01 + + 3.6470630764961243e-01 -4.9969438463449478e-02 + <_> + + 0 -1 790 5.4146698676049709e-03 + + 6.6086590290069580e-02 -2.3327399790287018e-01 + <_> + + 0 -1 791 -4.0590178221464157e-02 + + 2.1464720368385315e-01 -4.3033309280872345e-02 + <_> + + 0 -1 792 -1.3324919855222106e-03 + + 1.2975679337978363e-01 -1.2794280052185059e-01 + <_> + + 0 -1 793 5.7570589706301689e-03 + + 4.3469998985528946e-02 -1.1977300047874451e-01 + <_> + + 0 -1 794 -4.0872758254408836e-03 + + -2.0180100202560425e-01 9.2624872922897339e-02 + <_> + + 0 -1 795 2.1345280110836029e-02 + + -2.6310870423913002e-02 2.9142528772354126e-01 + <_> + + 0 -1 796 -2.4241849314421415e-03 + + 1.7131569981575012e-01 -1.1723010241985321e-01 + <_> + + 0 -1 797 6.0677550733089447e-02 + + -4.8347217962145805e-03 5.6577122211456299e-01 + <_> + + 0 -1 798 3.1573011074215174e-04 + + -1.1499550193548203e-01 1.3094860315322876e-01 + <_> + + 0 -1 799 -1.4639530563727021e-03 + + 1.0708429664373398e-01 -8.2188747823238373e-02 + <_> + + 0 -1 800 -8.1629276275634766e-02 + + -7.0090162754058838e-01 2.1318640559911728e-02 + <_> + + 0 -1 801 -2.2923630604054779e-04 + + 5.2449010312557220e-02 -5.7273399084806442e-02 + <_> + + 0 -1 802 8.6732655763626099e-03 + + -1.0944409668445587e-01 1.4530800282955170e-01 + <_> + + 0 -1 803 -9.5603411318734288e-04 + + 5.4728660732507706e-02 -7.6677009463310242e-02 + <_> + + 0 -1 804 -5.6814689189195633e-02 + + -7.2493737936019897e-01 1.7791330814361572e-02 + <_> + + 0 -1 805 6.4268838614225388e-03 + + -3.7768699228763580e-02 8.3454750478267670e-02 + <_> + + 0 -1 806 5.2451258525252342e-03 + + -7.5806751847267151e-02 2.1549069881439209e-01 + <_> + + 0 -1 807 6.7577441222965717e-03 + + 7.7163867652416229e-02 -2.4957199394702911e-01 + <_> + + 0 -1 808 -5.7494179345667362e-03 + + 1.4245559275150299e-01 -1.2740920484066010e-01 + <_> + + 0 -1 809 -6.7760650999844074e-03 + + -2.3316009342670441e-01 3.9975211024284363e-02 + <_> + + 0 -1 810 3.5247279447503388e-04 + + -1.3083159923553467e-01 1.1577410250902176e-01 + <_> + + 0 -1 811 1.4523849822580814e-03 + + -9.2724457383155823e-02 6.5486960113048553e-02 + <_> + 80 + -7.2322398424148560e-01 + + <_> + + 0 -1 812 -3.1163799762725830e-01 + + 3.8062000274658203e-01 -1.1115840077400208e-01 + <_> + + 0 -1 813 -3.0338248610496521e-01 + + 5.1236808300018311e-01 -5.0459731370210648e-02 + <_> + + 0 -1 814 -1.0945170186460018e-02 + + -2.2292029857635498e-01 1.0548099875450134e-01 + <_> + + 0 -1 815 -2.8011079877614975e-02 + + 7.0687793195247650e-02 -8.6478509008884430e-02 + <_> + + 0 -1 816 -5.2256159484386444e-02 + + 5.7856267690658569e-01 -8.7944902479648590e-03 + <_> + + 0 -1 817 -5.9455442242324352e-03 + + -2.5641980767250061e-01 9.4584532082080841e-02 + <_> + + 0 -1 818 2.5594399776309729e-03 + + -2.5718480348587036e-01 1.2882429361343384e-01 + <_> + + 0 -1 819 -1.2099260091781616e-01 + + -1.2293220311403275e-01 2.5829430669546127e-02 + <_> + + 0 -1 820 -4.4208219647407532e-01 + + -7.4546551704406738e-01 4.2586710304021835e-02 + <_> + + 0 -1 821 -6.6842641681432724e-03 + + 1.3515649735927582e-01 -1.6409300267696381e-01 + <_> + + 0 -1 822 9.8270708695054054e-03 + + -8.0305352807044983e-02 2.9853299260139465e-01 + <_> + + 0 -1 823 5.8638598769903183e-02 + + 2.7556419372558594e-02 -8.2242500782012939e-01 + <_> + + 0 -1 824 -3.0546959023922682e-03 + + -1.9292749464511871e-01 1.1082729697227478e-01 + <_> + + 0 -1 825 -7.3340102098882198e-03 + + -2.4307939410209656e-01 6.6744603216648102e-02 + <_> + + 0 -1 826 -1.0526229627430439e-02 + + -3.1136021018028259e-01 6.2850847840309143e-02 + <_> + + 0 -1 827 1.0481160134077072e-01 + + 1.2621720321476460e-02 -6.7376089096069336e-01 + <_> + + 0 -1 828 9.4269379042088985e-04 + + -1.7071670293807983e-01 1.0280650109052658e-01 + <_> + + 0 -1 829 8.4397383034229279e-03 + + -5.3014568984508514e-02 8.8599078357219696e-02 + <_> + + 0 -1 830 -3.0551670119166374e-02 + + 3.5264891386032104e-01 -6.9148473441600800e-02 + <_> + + 0 -1 831 -4.9112379550933838e-02 + + -5.8219379186630249e-01 1.4043220318853855e-02 + <_> + + 0 -1 832 5.8098030276596546e-03 + + 7.0872433483600616e-02 -2.5362819433212280e-01 + <_> + + 0 -1 833 2.5541070848703384e-02 + + -4.5136939734220505e-02 4.0674450993537903e-01 + <_> + + 0 -1 834 -4.8711288720369339e-02 + + -7.0240157842636108e-01 2.4317869916558266e-02 + <_> + + 0 -1 835 -3.2624390721321106e-01 + + -5.0619047880172729e-01 5.5445302277803421e-03 + <_> + + 0 -1 836 -1.8120040476787835e-04 + + 1.3132590055465698e-01 -1.2139549851417542e-01 + <_> + + 0 -1 837 -1.2980769574642181e-01 + + -6.8208992481231689e-01 1.6414549201726913e-02 + <_> + + 0 -1 838 8.3528067916631699e-03 + + 3.0040390789508820e-02 -5.0909137725830078e-01 + <_> + + 0 -1 839 5.4547088220715523e-03 + + -8.2402072846889496e-02 1.8007980287075043e-01 + <_> + + 0 -1 840 -3.1699541211128235e-01 + + -8.6613011360168457e-01 1.8229139968752861e-02 + <_> + + 0 -1 841 5.8424862800166011e-04 + + 4.2409729212522507e-02 -1.3118089735507965e-01 + <_> + + 0 -1 842 -9.7046848386526108e-03 + + -2.7432689070701599e-01 5.5920429527759552e-02 + <_> + + 0 -1 843 1.6834320500493050e-02 + + -8.3306416869163513e-02 6.7792758345603943e-02 + <_> + + 0 -1 844 -3.0685380101203918e-02 + + 4.2126908898353577e-01 -4.5339331030845642e-02 + <_> + + 0 -1 845 4.1394919157028198e-02 + + 1.9971750676631927e-02 -1.9722190499305725e-01 + <_> + + 0 -1 846 3.4910149872303009e-02 + + -5.3826879709959030e-02 3.5040271282196045e-01 + <_> + + 0 -1 847 -5.2495039999485016e-03 + + -1.1363890022039413e-01 5.5080570280551910e-02 + <_> + + 0 -1 848 1.2045619636774063e-01 + + 1.7451599240303040e-02 -9.3958032131195068e-01 + <_> + + 0 -1 849 4.2130421847105026e-02 + + -1.4343280345201492e-02 6.0059851408004761e-01 + <_> + + 0 -1 850 1.9120849668979645e-02 + + 8.5864506661891937e-02 -1.8586499989032745e-01 + <_> + + 0 -1 851 8.4470612928271294e-03 + + -6.9452181458473206e-02 7.3461420834064484e-02 + <_> + + 0 -1 852 1.7696130089461803e-03 + + -7.9996660351753235e-02 1.9479809701442719e-01 + <_> + + 0 -1 853 5.7995948940515518e-02 + + 2.7633000165224075e-02 -5.4097008705139160e-01 + <_> + + 0 -1 854 -7.9884022474288940e-02 + + -5.4307681322097778e-01 2.3219829425215721e-02 + <_> + + 0 -1 855 6.6576242446899414e-02 + + 6.8416809663176537e-03 -8.1224560737609863e-01 + <_> + + 0 -1 856 6.4169943332672119e-02 + + -2.4846689775586128e-02 6.0798132419586182e-01 + <_> + + 0 -1 857 -2.9404780268669128e-01 + + -1. 4.6440181322395802e-03 + <_> + + 0 -1 858 -9.5727723091840744e-03 + + -1.4157359302043915e-01 1.0121650248765945e-01 + <_> + + 0 -1 859 -2.3574449121952057e-02 + + 1.1715450137853622e-01 -1.3184690475463867e-01 + <_> + + 0 -1 860 -5.1256217993795872e-03 + + -1.7623250186443329e-01 1.0177359730005264e-01 + <_> + + 0 -1 861 9.7663059830665588e-02 + + 4.4896239414811134e-03 -8.0415552854537964e-01 + <_> + + 0 -1 862 3.2088689506053925e-02 + + -5.8048430830240250e-02 3.0194890499114990e-01 + <_> + + 0 -1 863 -8.6517207324504852e-02 + + -7.5529891252517700e-01 2.8089359402656555e-03 + <_> + + 0 -1 864 -2.8540970757603645e-02 + + -3.5085019469261169e-01 4.4081591069698334e-02 + <_> + + 0 -1 865 -5.3844689391553402e-03 + + 9.2348903417587280e-02 -7.0033848285675049e-02 + <_> + + 0 -1 866 -2.2280439734458923e-02 + + 2.4949419498443604e-01 -7.0658676326274872e-02 + <_> + + 0 -1 867 5.1025422289967537e-03 + + 6.0899689793586731e-02 -1.5473949909210205e-01 + <_> + + 0 -1 868 3.7133800797164440e-03 + + -8.7124302983283997e-02 1.7195260524749756e-01 + <_> + + 0 -1 869 -4.0405280888080597e-03 + + 1.5054519474506378e-01 -9.9685050547122955e-02 + <_> + + 0 -1 870 4.8944901674985886e-02 + + 2.0637780427932739e-02 -7.1113997697830200e-01 + <_> + + 0 -1 871 -4.0832208469510078e-03 + + -1.6104909777641296e-01 8.8675007224082947e-02 + <_> + + 0 -1 872 -2.2145630791783333e-03 + + -2.1901540458202362e-01 1.0045240074396133e-01 + <_> + + 0 -1 873 -6.4257450401782990e-02 + + -5.7694709300994873e-01 1.0253880172967911e-02 + <_> + + 0 -1 874 1.1895420029759407e-02 + + -7.0560596883296967e-02 2.6147291064262390e-01 + <_> + + 0 -1 875 -4.4988259673118591e-02 + + -6.8440282344818115e-01 9.9674779921770096e-03 + <_> + + 0 -1 876 6.3484339043498039e-03 + + 8.4738656878471375e-02 -1.6299989819526672e-01 + <_> + + 0 -1 877 -5.6587439030408859e-02 + + 4.8960050940513611e-01 -1.9641140475869179e-02 + <_> + + 0 -1 878 3.5853400826454163e-02 + + 1.9695440307259560e-02 -6.8108338117599487e-01 + <_> + + 0 -1 879 -4.5450981706380844e-03 + + 6.9072656333446503e-02 -9.1276638209819794e-02 + <_> + + 0 -1 880 1.0608570277690887e-01 + + -4.9993991851806641e-02 3.2139471173286438e-01 + <_> + + 0 -1 881 -4.5924410223960876e-02 + + -8.2744181156158447e-01 1.2149419635534286e-02 + <_> + + 0 -1 882 -1.2273239903151989e-02 + + -3.0669289827346802e-01 5.1693398505449295e-02 + <_> + + 0 -1 883 8.0667391419410706e-02 + + 2.1730009466409683e-03 -1.0002529621124268e+00 + <_> + + 0 -1 884 -2.3044859990477562e-02 + + 4.5085349678993225e-01 -3.6273978650569916e-02 + <_> + + 0 -1 885 1.8702909350395203e-02 + + 4.6945460140705109e-02 -2.1796269714832306e-01 + <_> + + 0 -1 886 -9.6820026636123657e-02 + + 4.0398910641670227e-01 -3.7819091230630875e-02 + <_> + + 0 -1 887 6.0525789856910706e-02 + + 1.5727160498499870e-02 -4.5661678910255432e-01 + <_> + + 0 -1 888 1.0418569669127464e-02 + + 6.2726646661758423e-02 -2.4441179633140564e-01 + <_> + + 0 -1 889 1.0726209729909897e-02 + + -7.1968853473663330e-02 2.2099970281124115e-01 + <_> + + 0 -1 890 -2.7160700410604477e-03 + + 1.2882749736309052e-01 -1.4629630744457245e-01 + <_> + + 0 -1 891 8.5867568850517273e-03 + + -6.8645663559436798e-02 2.5840589404106140e-01 + <_> + 103 + -7.6886308193206787e-01 + + <_> + + 0 -1 892 -2.5851670652627945e-02 + + 1.8011799454689026e-01 -2.4745930731296539e-01 + <_> + + 0 -1 893 1.4054620265960693e-01 + + -5.1319289952516556e-02 4.0766909718513489e-01 + <_> + + 0 -1 894 -2.7255079150199890e-01 + + 4.9941259622573853e-01 -4.5033931732177734e-02 + <_> + + 0 -1 895 1.3978329952806234e-03 + + 5.3600508719682693e-02 -2.1793389320373535e-01 + <_> + + 0 -1 896 -3.5059880465269089e-02 + + -2.9943290352821350e-01 8.9991323649883270e-02 + <_> + + 0 -1 897 -3.2894399482756853e-03 + + 1.0264199972152710e-01 -9.4711251556873322e-02 + <_> + + 0 -1 898 1.8242290616035461e-01 + + 2.5626670569181442e-02 -6.8765729665756226e-01 + <_> + + 0 -1 899 -7.8741081058979034e-02 + + 1.0810419917106628e-01 -1.4497520029544830e-01 + <_> + + 0 -1 900 1.3945129700005054e-02 + + -7.1371912956237793e-02 3.1315749883651733e-01 + <_> + + 0 -1 901 4.4680278748273849e-02 + + -3.0446149408817291e-02 3.9263629913330078e-01 + <_> + + 0 -1 902 -2.6441770605742931e-03 + + 1.1596699804067612e-01 -1.7800450325012207e-01 + <_> + + 0 -1 903 -5.1071979105472565e-03 + + -1.1739940196275711e-01 6.7823447287082672e-02 + <_> + + 0 -1 904 -3.2582178711891174e-02 + + -5.9129017591476440e-01 3.3352021127939224e-02 + <_> + + 0 -1 905 -2.7755839750170708e-02 + + -7.0649361610412598e-01 1.6761489212512970e-02 + <_> + + 0 -1 906 -6.0038521041860804e-05 + + 7.3832668364048004e-02 -2.2933359444141388e-01 + <_> + + 0 -1 907 3.0506180599331856e-02 + + -3.8056060671806335e-02 4.4115358591079712e-01 + <_> + + 0 -1 908 -6.2056961469352245e-03 + + -1.7757239937782288e-01 9.3707472085952759e-02 + <_> + + 0 -1 909 -8.0766230821609497e-03 + + -2.0256699621677399e-01 7.4059642851352692e-02 + <_> + + 0 -1 910 -3.3209908753633499e-02 + + 4.6372228860855103e-01 -3.4903008490800858e-02 + <_> + + 0 -1 911 3.5530608147382736e-02 + + -3.1679518520832062e-02 4.5202499628067017e-01 + <_> + + 0 -1 912 1.6297640278935432e-02 + + 4.4189039617776871e-02 -3.4845370054244995e-01 + <_> + + 0 -1 913 9.9985357373952866e-03 + + -4.8255320638418198e-02 1.6078050434589386e-01 + <_> + + 0 -1 914 -5.2390778437256813e-03 + + 2.3236599564552307e-01 -7.6032742857933044e-02 + <_> + + 0 -1 915 -3.2508899457752705e-03 + + 5.4369390010833740e-02 -9.1040253639221191e-02 + <_> + + 0 -1 916 5.5640790611505508e-02 + + -3.8811128586530685e-02 4.2034021019935608e-01 + <_> + + 0 -1 917 3.3998981118202209e-02 + + 2.2251330316066742e-02 -3.5615360736846924e-01 + <_> + + 0 -1 918 -4.3103890493512154e-03 + + 1.1287429928779602e-01 -1.7630730569362640e-01 + <_> + + 0 -1 919 -7.9246461391448975e-03 + + -1.0992339998483658e-01 3.5099629312753677e-02 + <_> + + 0 -1 920 4.4273380190134048e-02 + + 2.8094569221138954e-02 -6.0921418666839600e-01 + <_> + + 0 -1 921 5.9907328337430954e-02 + + 9.7544339951127768e-04 -9.0523207187652588e-01 + <_> + + 0 -1 922 3.3378869295120239e-02 + + 1.7723279073834419e-02 -8.5254609584808350e-01 + <_> + + 0 -1 923 1.4694170095026493e-02 + + -4.9031510949134827e-02 2.7998331189155579e-01 + <_> + + 0 -1 924 -5.3877499885857105e-03 + + 1.8219049274921417e-01 -8.2382522523403168e-02 + <_> + + 0 -1 925 -1.7976889386773109e-02 + + -1.9384689629077911e-01 8.4984757006168365e-02 + <_> + + 0 -1 926 -4.4651641510426998e-03 + + 1.7632910609245300e-01 -9.5075771212577820e-02 + <_> + + 0 -1 927 6.9372296333312988e-02 + + 3.1770321074873209e-03 -6.7554402351379395e-01 + <_> + + 0 -1 928 -1.7002269625663757e-02 + + -3.3827948570251465e-01 4.4731728732585907e-02 + <_> + + 0 -1 929 1.7274240031838417e-02 + + -2.4769710376858711e-02 1.1852029711008072e-01 + <_> + + 0 -1 930 4.0388729423284531e-02 + + -3.2967679202556610e-02 4.7323140501976013e-01 + <_> + + 0 -1 931 1.4215400442481041e-02 + + 2.9846860095858574e-02 -4.4157060980796814e-01 + <_> + + 0 -1 932 4.1627719998359680e-02 + + -4.5953918248414993e-02 3.2978388667106628e-01 + <_> + + 0 -1 933 -1.7416840419173241e-03 + + 8.7286308407783508e-02 -8.8862203061580658e-02 + <_> + + 0 -1 934 -9.8077040165662766e-03 + + -2.1026679873466492e-01 7.7401876449584961e-02 + <_> + + 0 -1 935 2.1836649626493454e-02 + + 4.3211769312620163e-02 -1.5330420434474945e-01 + <_> + + 0 -1 936 -7.0743098855018616e-02 + + 3.3019039034843445e-01 -5.2747949957847595e-02 + <_> + + 0 -1 937 -1.1181020177900791e-02 + + -1.1493939906358719e-01 2.7858460322022438e-02 + <_> + + 0 -1 938 -1.4623560011386871e-02 + + 3.2327070832252502e-01 -4.4166058301925659e-02 + <_> + + 0 -1 939 -9.6702557057142258e-03 + + -1.8157319724559784e-01 3.6154530942440033e-02 + <_> + + 0 -1 940 8.3439601585268974e-03 + + -5.2473910152912140e-02 2.7444839477539062e-01 + <_> + + 0 -1 941 2.2970559075474739e-02 + + 3.4930050373077393e-02 -1.5773670375347137e-01 + <_> + + 0 -1 942 -8.2734245806932449e-03 + + 1.1612790077924728e-01 -1.1965770274400711e-01 + <_> + + 0 -1 943 8.7074404582381248e-03 + + -4.0829788893461227e-02 1.0481330007314682e-01 + <_> + + 0 -1 944 -1.8825819715857506e-02 + + -3.8794550299644470e-01 4.7350700944662094e-02 + <_> + + 0 -1 945 -7.2092940099537373e-03 + + -1.9886960089206696e-01 7.5952850282192230e-02 + <_> + + 0 -1 946 1.6543369565624744e-04 + + -1.0674829781055450e-01 1.5510599315166473e-01 + <_> + + 0 -1 947 8.9294537901878357e-03 + + -6.7059643566608429e-02 9.0206786990165710e-02 + <_> + + 0 -1 948 3.1991640571504831e-03 + + 7.4445746839046478e-02 -1.9682839512825012e-01 + <_> + + 0 -1 949 -1.1280879698460922e-04 + + 7.9703390598297119e-02 -1.3661189377307892e-01 + <_> + + 0 -1 950 -6.9613799452781677e-02 + + -2.1010529994964600e-01 6.5771616995334625e-02 + <_> + + 0 -1 951 -2.6066679507493973e-02 + + 2.8696510195732117e-01 -5.7495791465044022e-02 + <_> + + 0 -1 952 1.2050740420818329e-02 + + -4.6820510178804398e-02 2.7994769811630249e-01 + <_> + + 0 -1 953 -3.9625849574804306e-02 + + -3.7054508924484253e-01 1.1476139537990093e-02 + <_> + + 0 -1 954 -2.7379901148378849e-03 + + 9.4371132552623749e-02 -1.6203230619430542e-01 + <_> + + 0 -1 955 -6.5262563526630402e-02 + + -6.7808389663696289e-01 1.9430469721555710e-02 + <_> + + 0 -1 956 2.3191619664430618e-02 + + 2.6134310290217400e-02 -4.6664249897003174e-01 + <_> + + 0 -1 957 4.7741930931806564e-02 + + -2.5291189551353455e-02 2.9092490673065186e-01 + <_> + + 0 -1 958 -1.2830020487308502e-01 + + -8.7187117338180542e-01 1.3883540406823158e-02 + <_> + + 0 -1 959 -4.2689260095357895e-02 + + -6.7644822597503662e-01 6.8771280348300934e-03 + <_> + + 0 -1 960 6.2811248935759068e-03 + + -6.4803749322891235e-02 2.0994420349597931e-01 + <_> + + 0 -1 961 2.7532080188393593e-02 + + 1.5366540290415287e-02 -2.1457369625568390e-01 + <_> + + 0 -1 962 -3.4494648571126163e-04 + + 1.1829499900341034e-01 -1.0641119629144669e-01 + <_> + + 0 -1 963 -3.2187011092901230e-02 + + 2.0676319301128387e-01 -2.7804749086499214e-02 + <_> + + 0 -1 964 -2.4451729841530323e-03 + + -1.8970219790935516e-01 7.6612837612628937e-02 + <_> + + 0 -1 965 3.9631120860576630e-02 + + 1.1457280255854130e-02 -4.4112280011177063e-01 + <_> + + 0 -1 966 -9.0082110837101936e-03 + + -2.0329099893569946e-01 7.1997888386249542e-02 + <_> + + 0 -1 967 -6.0594908893108368e-02 + + 2.5831830501556396e-01 -3.2274000346660614e-02 + <_> + + 0 -1 968 3.3678639680147171e-02 + + 3.6565639078617096e-02 -3.3233150839805603e-01 + <_> + + 0 -1 969 1.4565410092473030e-02 + + -4.9269210547208786e-02 1.8280670046806335e-01 + <_> + + 0 -1 970 4.0103439241647720e-03 + + -1.2435600161552429e-01 1.1247640103101730e-01 + <_> + + 0 -1 971 1.7989509506151080e-03 + + -5.4675988852977753e-02 1.0701840370893478e-01 + <_> + + 0 -1 972 -1.6359580331481993e-04 + + 8.1755228340625763e-02 -1.6235500574111938e-01 + <_> + + 0 -1 973 -3.1993899494409561e-02 + + 1.8631230294704437e-01 -1.7350630834698677e-02 + <_> + + 0 -1 974 -8.1737667322158813e-02 + + -7.5961482524871826e-01 1.4419900253415108e-02 + <_> + + 0 -1 975 -8.8262550532817841e-02 + + -1. 5.3146481513977051e-04 + <_> + + 0 -1 976 -5.7997900992631912e-02 + + -8.9391511678695679e-01 1.2495099566876888e-02 + <_> + + 0 -1 977 2.0691409707069397e-02 + + -3.7167508155107498e-02 9.7208552062511444e-02 + <_> + + 0 -1 978 -6.0336058959364891e-03 + + 1.7547790706157684e-01 -8.6916856467723846e-02 + <_> + + 0 -1 979 1.5789760649204254e-01 + + 3.0604960396885872e-02 -2.2199299931526184e-01 + <_> + + 0 -1 980 3.3271119464188814e-03 + + 1.1201520264148712e-01 -1.6384710371494293e-01 + <_> + + 0 -1 981 1.1383239924907684e-01 + + 1.8078039865940809e-03 -9.9981439113616943e-01 + <_> + + 0 -1 982 3.9188969880342484e-02 + + -3.9494428783655167e-02 3.4139481186866760e-01 + <_> + + 0 -1 983 -4.7382968477904797e-03 + + -8.1601403653621674e-02 3.5498451441526413e-02 + <_> + + 0 -1 984 2.3458160459995270e-02 + + -4.0767479687929153e-02 3.4792768955230713e-01 + <_> + + 0 -1 985 1.6505220904946327e-02 + + 2.0170280709862709e-02 -1.5532009303569794e-01 + <_> + + 0 -1 986 2.0262949168682098e-02 + + 2.1292379125952721e-02 -6.2611502408981323e-01 + <_> + + 0 -1 987 -9.1393236070871353e-03 + + -1.3637480139732361e-01 6.3891842961311340e-02 + <_> + + 0 -1 988 -5.6207980960607529e-02 + + 4.0671119093894958e-01 -3.3258218318223953e-02 + <_> + + 0 -1 989 6.6868839785456657e-03 + + 6.4174309372901917e-02 -9.3966238200664520e-02 + <_> + + 0 -1 990 5.8862278237938881e-03 + + -6.5789960324764252e-02 2.0181339979171753e-01 + <_> + + 0 -1 991 -1.1517380177974701e-01 + + -1. 2.5347759947180748e-03 + <_> + + 0 -1 992 5.5793710052967072e-03 + + 7.0642203092575073e-02 -1.9637429714202881e-01 + <_> + + 0 -1 993 3.2180000096559525e-02 + + -1.4737719669938087e-02 2.2420160472393036e-01 + <_> + + 0 -1 994 -9.1598782455548644e-04 + + 1.1478749662637711e-01 -1.1767079681158066e-01 + <_> + 83 + -7.7573090791702271e-01 + + <_> + + 0 -1 995 9.1346232220530510e-03 + + 8.8698662817478180e-02 -3.8595649600028992e-01 + <_> + + 0 -1 996 -2.4696369655430317e-03 + + 1.6772060096263885e-01 -1.4649170637130737e-01 + <_> + + 0 -1 997 5.8935020118951797e-02 + + -1.3394000008702278e-02 6.1832672357559204e-01 + <_> + + 0 -1 998 -8.9100059121847153e-03 + + -2.6950231194496155e-01 7.2939813137054443e-02 + <_> + + 0 -1 999 1.7743879929184914e-02 + + -5.0217188894748688e-02 4.3166020512580872e-01 + <_> + + 0 -1 1000 1.1056650429964066e-02 + + 3.9155859500169754e-02 -5.2860772609710693e-01 + <_> + + 0 -1 1001 1.6161320731043816e-02 + + 6.9581039249897003e-02 -3.7610140442848206e-01 + <_> + + 0 -1 1002 -2.7879089117050171e-02 + + 2.3220659792423248e-01 -5.5979579687118530e-02 + <_> + + 0 -1 1003 -1.1556839570403099e-02 + + -3.1231081485748291e-01 7.4339963495731354e-02 + <_> + + 0 -1 1004 -6.9651477038860321e-02 + + -4.1905689239501953e-01 6.9694789126515388e-03 + <_> + + 0 -1 1005 -5.0344727933406830e-03 + + 1.3183620572090149e-01 -1.9702030718326569e-01 + <_> + + 0 -1 1006 -8.6098119616508484e-02 + + 6.5727752447128296e-01 -9.5664570108056068e-03 + <_> + + 0 -1 1007 2.5546319782733917e-02 + + -4.0136341005563736e-02 5.4847037792205811e-01 + <_> + + 0 -1 1008 -2.6870880275964737e-02 + + -2.5306650996208191e-01 4.4181719422340393e-02 + <_> + + 0 -1 1009 9.5859682187438011e-03 + + -8.1882461905479431e-02 2.6894670724868774e-01 + <_> + + 0 -1 1010 2.6683809235692024e-02 + + 2.6593349874019623e-02 -4.4127041101455688e-01 + <_> + + 0 -1 1011 -1.4490840025246143e-02 + + -3.5697469115257263e-01 7.0072941482067108e-02 + <_> + + 0 -1 1012 -2.2448399104177952e-03 + + 2.0088230073451996e-01 -1.2228170037269592e-01 + <_> + + 0 -1 1013 4.8795710317790508e-03 + + 4.5820981264114380e-02 -3.9498189091682434e-01 + <_> + + 0 -1 1014 -6.1262990348041058e-03 + + -1.8826089799404144e-01 7.8812077641487122e-02 + <_> + + 0 -1 1015 1.6952969133853912e-02 + + -6.1684221029281616e-02 3.3603700995445251e-01 + <_> + + 0 -1 1016 -4.5547191984951496e-03 + + -1.9471390545368195e-01 5.3147189319133759e-02 + <_> + + 0 -1 1017 -1.2753040064126253e-03 + + 1.4800879359245300e-01 -1.4244349300861359e-01 + <_> + + 0 -1 1018 2.2060280665755272e-02 + + -3.5406738519668579e-02 3.3775308728218079e-01 + <_> + + 0 -1 1019 2.1050389856100082e-02 + + 4.2289130389690399e-02 -4.5886451005935669e-01 + <_> + + 0 -1 1020 9.5637209713459015e-02 + + -1.3171649537980556e-02 5.5534982681274414e-01 + <_> + + 0 -1 1021 -3.6728319246321917e-03 + + -1.8842899799346924e-01 9.5458142459392548e-02 + <_> + + 0 -1 1022 1.6345079347956926e-04 + + -6.0444809496402740e-02 1.0536730289459229e-01 + <_> + + 0 -1 1023 2.5338289141654968e-01 + + 1.6026260331273079e-02 -9.9994468688964844e-01 + <_> + + 0 -1 1024 -4.6113330870866776e-02 + + 5.4247987270355225e-01 -2.7890209108591080e-02 + <_> + + 0 -1 1025 5.2588270045816898e-03 + + 7.9867303371429443e-02 -2.0700709521770477e-01 + <_> + + 0 -1 1026 -1.3449570536613464e-01 + + -4.1270101070404053e-01 8.1500215455889702e-03 + <_> + + 0 -1 1027 1.6953679732978344e-03 + + 1.1035349965095520e-01 -1.6802120208740234e-01 + <_> + + 0 -1 1028 3.9492141455411911e-02 + + -1.3410010375082493e-02 3.8447639346122742e-01 + <_> + + 0 -1 1029 -9.3634781660512090e-04 + + 1.0986819863319397e-01 -1.7310489714145660e-01 + <_> + + 0 -1 1030 -4.4495709240436554e-02 + + 1.9471199810504913e-01 -4.0768899023532867e-02 + <_> + + 0 -1 1031 6.0630109161138535e-02 + + -4.2252369225025177e-02 5.1412987709045410e-01 + <_> + + 0 -1 1032 7.5067640282213688e-03 + + 4.2086970061063766e-02 -1.6080400347709656e-01 + <_> + + 0 -1 1033 9.9260415881872177e-03 + + 6.4119532704353333e-02 -2.6215308904647827e-01 + <_> + + 0 -1 1034 6.0528520494699478e-02 + + 2.4189969524741173e-02 -3.6608389019966125e-01 + <_> + + 0 -1 1035 -6.8054231815040112e-03 + + 1.2508389353752136e-01 -1.3889710605144501e-01 + <_> + + 0 -1 1036 -2.0940289832651615e-03 + + 1.3996599614620209e-01 -8.2706399261951447e-02 + <_> + + 0 -1 1037 -9.6904346719384193e-03 + + 2.6681360602378845e-01 -7.1576990187168121e-02 + <_> + + 0 -1 1038 1.8320349976420403e-02 + + 3.1321980059146881e-02 -2.3460610210895538e-01 + <_> + + 0 -1 1039 5.0429959082975984e-04 + + -1.1669719964265823e-01 1.6514649987220764e-01 + <_> + + 0 -1 1040 -4.7016288153827190e-03 + + -1.2006150186061859e-01 5.9200428426265717e-02 + <_> + + 0 -1 1041 -1.9926870241761208e-02 + + -3.9485099911689758e-01 4.1143018752336502e-02 + <_> + + 0 -1 1042 7.4013080447912216e-03 + + -7.6331257820129395e-02 2.1065360307693481e-01 + <_> + + 0 -1 1043 1.4879629947245121e-02 + + 4.7979071736335754e-02 -3.4014761447906494e-01 + <_> + + 0 -1 1044 1.5527559816837311e-01 + + 3.2225880771875381e-02 -4.6938079595565796e-01 + <_> + + 0 -1 1045 -7.0786331780254841e-03 + + 1.2199480086565018e-01 -1.2004940211772919e-01 + <_> + + 0 -1 1046 2.9872169718146324e-02 + + -4.3677508831024170e-02 2.3529820144176483e-01 + <_> + + 0 -1 1047 3.0555170029401779e-02 + + 3.1775880604982376e-02 -5.7825452089309692e-01 + <_> + + 0 -1 1048 1.0284570045769215e-02 + + 4.7202810645103455e-02 -2.9566499590873718e-01 + <_> + + 0 -1 1049 1.9808709621429443e-02 + + -4.5775938779115677e-02 3.3231019973754883e-01 + <_> + + 0 -1 1050 2.7218880131840706e-02 + + 2.5577219203114510e-02 -3.3180880546569824e-01 + <_> + + 0 -1 1051 1.4097680337727070e-02 + + 5.2157420665025711e-02 -2.9358381032943726e-01 + <_> + + 0 -1 1052 2.4286569654941559e-01 + + 1.4692460186779499e-02 -6.9854879379272461e-01 + <_> + + 0 -1 1053 1.2419570237398148e-02 + + -4.7105878591537476e-02 3.6695051193237305e-01 + <_> + + 0 -1 1054 1.3503880472853780e-03 + + 5.3791359066963196e-02 -2.0953659713268280e-01 + <_> + + 0 -1 1055 -1.5626290813088417e-02 + + 2.7888458967208862e-01 -6.0053750872612000e-02 + <_> + + 0 -1 1056 1.5850139781832695e-02 + + -3.0324909836053848e-02 1.0287520289421082e-01 + <_> + + 0 -1 1057 -4.0868919342756271e-02 + + -8.0402207374572754e-01 1.7601499333977699e-02 + <_> + + 0 -1 1058 6.4108639955520630e-02 + + 2.5845379568636417e-03 -5.3854942321777344e-01 + <_> + + 0 -1 1059 4.9927100539207458e-02 + + 2.1863300353288651e-02 -6.1780720949172974e-01 + <_> + + 0 -1 1060 1.4655419625341892e-02 + + 1.9663369283080101e-02 -2.0426170527935028e-01 + <_> + + 0 -1 1061 -2.4094810709357262e-02 + + 3.7609130144119263e-01 -4.0954101830720901e-02 + <_> + + 0 -1 1062 2.9417769983410835e-02 + + -8.6903842166066170e-03 4.0447419881820679e-01 + <_> + + 0 -1 1063 -1.4158640056848526e-02 + + 3.7811711430549622e-01 -4.0321640670299530e-02 + <_> + + 0 -1 1064 -4.6754989773035049e-02 + + 2.2104309499263763e-01 -2.8996109962463379e-02 + <_> + + 0 -1 1065 -1.1437949724495411e-02 + + -2.5033089518547058e-01 5.8214288204908371e-02 + <_> + + 0 -1 1066 -4.2598780244588852e-02 + + 3.7562200427055359e-01 -1.6349090263247490e-02 + <_> + + 0 -1 1067 -1.5201159752905369e-02 + + -3.5637819766998291e-01 3.8690369576215744e-02 + <_> + + 0 -1 1068 4.3378848582506180e-02 + + 3.3045639283955097e-03 -4.6729469299316406e-01 + <_> + + 0 -1 1069 5.5153011344373226e-03 + + -8.3583608269691467e-02 1.8793170154094696e-01 + <_> + + 0 -1 1070 -7.8126927837729454e-03 + + -1.6586859524250031e-01 4.3801128864288330e-02 + <_> + + 0 -1 1071 4.1652601212263107e-02 + + -3.1804520636796951e-02 4.3517521023750305e-01 + <_> + + 0 -1 1072 3.4417589195072651e-03 + + 4.2282279580831528e-02 -1.3088959455490112e-01 + <_> + + 0 -1 1073 1.3004569336771965e-04 + + -1.1260010302066803e-01 1.3964599370956421e-01 + <_> + + 0 -1 1074 -7.7347733080387115e-02 + + 7.0750647783279419e-01 -5.4134069941937923e-03 + <_> + + 0 -1 1075 -1.6143550164997578e-03 + + 1.1920420080423355e-01 -1.1884269863367081e-01 + <_> + + 0 -1 1076 -9.8279246594756842e-04 + + 6.3156276941299438e-02 -5.2781101316213608e-02 + <_> + + 0 -1 1077 -4.5667469501495361e-02 + + -3.4500870108604431e-01 4.4600728899240494e-02 + <_> + 101 + -6.9763368368148804e-01 + + <_> + + 0 -1 1078 7.3315978050231934e-02 + + -1.1410109698772430e-01 4.0035811066627502e-01 + <_> + + 0 -1 1079 2.5275669991970062e-02 + + -7.2013877332210541e-02 3.6095780134201050e-01 + <_> + + 0 -1 1080 1.8873859196901321e-02 + + -1.7234370112419128e-01 1.8223220109939575e-01 + <_> + + 0 -1 1081 7.4607720307540148e-05 + + -8.1627286970615387e-02 8.8888503611087799e-02 + <_> + + 0 -1 1082 4.2250280966982245e-04 + + -1.2840239703655243e-01 1.1791419982910156e-01 + <_> + + 0 -1 1083 1.4402460306882858e-02 + + 2.0960340276360512e-02 1.9024699926376343e-01 + <_> + + 0 -1 1084 -2.0460959058254957e-03 + + 9.5712497830390930e-02 -2.1517060697078705e-01 + <_> + + 0 -1 1085 7.1128448471426964e-03 + + -5.6100480258464813e-02 2.0984320342540741e-01 + <_> + + 0 -1 1086 -6.5832170657813549e-03 + + -2.1138189733028412e-01 7.6094150543212891e-02 + <_> + + 0 -1 1087 -4.1252959636040032e-04 + + 1.3107340037822723e-01 -1.5670859813690186e-01 + <_> + + 0 -1 1088 -4.4330831617116928e-02 + + 5.4048037528991699e-01 -1.9059479236602783e-02 + <_> + + 0 -1 1089 1.1700130067765713e-02 + + 5.1712401211261749e-02 -1.7216169834136963e-01 + <_> + + 0 -1 1090 3.5091140307486057e-03 + + -7.6767951250076294e-02 1.7776259779930115e-01 + <_> + + 0 -1 1091 1.5597569756209850e-02 + + 3.8307890295982361e-02 -1.4730019867420197e-01 + <_> + + 0 -1 1092 -3.6285370588302612e-02 + + 3.5347661375999451e-01 -4.5018490403890610e-02 + <_> + + 0 -1 1093 -4.5118298381567001e-02 + + -5.7074141502380371e-01 1.0646710172295570e-02 + <_> + + 0 -1 1094 1.3734580017626286e-02 + + 6.6018357872962952e-02 -2.0480890572071075e-01 + <_> + + 0 -1 1095 -2.7120979502797127e-02 + + 4.8094209283590317e-02 -5.1394961774349213e-02 + <_> + + 0 -1 1096 -1.5354059869423509e-03 + + -2.3548009991645813e-01 5.3074609488248825e-02 + <_> + + 0 -1 1097 3.6000818945467472e-03 + + -5.8944340795278549e-02 1.1825410276651382e-01 + <_> + + 0 -1 1098 6.8916529417037964e-03 + + -5.0014488399028778e-02 2.6909399032592773e-01 + <_> + + 0 -1 1099 3.5373449791222811e-03 + + -1.2947039306163788e-01 8.8697038590908051e-02 + <_> + + 0 -1 1100 -4.1431561112403870e-03 + + -1.7883630096912384e-01 6.9098107516765594e-02 + <_> + + 0 -1 1101 -1.0762579739093781e-01 + + -1. 4.7263409942388535e-03 + <_> + + 0 -1 1102 9.7946207970380783e-03 + + -5.4038770496845245e-02 2.4115470051765442e-01 + <_> + + 0 -1 1103 1.0054280050098896e-02 + + -8.0624893307685852e-02 1.1627560108900070e-01 + <_> + + 0 -1 1104 -8.7350717512890697e-04 + + -1.8193979561328888e-01 7.7468506991863251e-02 + <_> + + 0 -1 1105 9.4283261569216847e-04 + + 4.6265050768852234e-02 -2.2732029855251312e-01 + <_> + + 0 -1 1106 3.5424059024080634e-04 + + -1.1824289709329605e-01 1.1095699667930603e-01 + <_> + + 0 -1 1107 -3.8587789982557297e-02 + + -3.0286869406700134e-01 3.1856179703027010e-03 + <_> + + 0 -1 1108 -4.9504679627716541e-03 + + 1.3758100569248199e-01 -9.1690346598625183e-02 + <_> + + 0 -1 1109 -2.5453630834817886e-02 + + -2.3013520240783691e-01 1.9747929647564888e-02 + <_> + + 0 -1 1110 1.5836700797080994e-02 + + -4.5252159237861633e-02 2.9337081313133240e-01 + <_> + + 0 -1 1111 1.0379879735410213e-02 + + 5.9706691652536392e-02 -1.6415530443191528e-01 + <_> + + 0 -1 1112 4.3178450316190720e-02 + + 6.3460536301136017e-02 -2.1360489726066589e-01 + <_> + + 0 -1 1113 -2.2508678957819939e-03 + + 1.0645110160112381e-01 -5.9539180248975754e-02 + <_> + + 0 -1 1114 5.0743711180984974e-03 + + -9.4377033412456512e-02 2.2999720275402069e-01 + <_> + + 0 -1 1115 -3.0670650303363800e-02 + + 2.5975760817527771e-01 -2.3188209161162376e-02 + <_> + + 0 -1 1116 2.4162670597434044e-03 + + 8.7919056415557861e-02 -1.9287380576133728e-01 + <_> + + 0 -1 1117 -9.3405842781066895e-03 + + -1.0935559868812561e-01 2.9358500614762306e-02 + <_> + + 0 -1 1118 2.0513730123639107e-02 + + -5.2511349320411682e-02 3.0545449256896973e-01 + <_> + + 0 -1 1119 -4.3630380183458328e-02 + + -4.5310449600219727e-01 1.8261570483446121e-02 + <_> + + 0 -1 1120 3.4857920836657286e-03 + + -9.7093120217323303e-02 1.4877100288867950e-01 + <_> + + 0 -1 1121 1.0411609895527363e-02 + + 4.2915731668472290e-02 -2.4849639832973480e-01 + <_> + + 0 -1 1122 -7.5155291706323624e-03 + + -2.6623341441154480e-01 5.1602318882942200e-02 + <_> + + 0 -1 1123 7.2157550603151321e-03 + + -6.1878159642219543e-02 1.8314969539642334e-01 + <_> + + 0 -1 1124 9.1090862406417727e-04 + + -9.7420282661914825e-02 1.2223699688911438e-01 + <_> + + 0 -1 1125 -4.0069910883903503e-01 + + -8.1831091642379761e-01 4.7453590668737888e-03 + <_> + + 0 -1 1126 -4.8033627681434155e-03 + + 9.4193987548351288e-02 -1.4436510205268860e-01 + <_> + + 0 -1 1127 -2.1147429943084717e-02 + + 2.9532408714294434e-01 -4.4751271605491638e-02 + <_> + + 0 -1 1128 1.8602259457111359e-02 + + -4.2993780225515366e-02 2.9706719517707825e-01 + <_> + + 0 -1 1129 -8.1051718443632126e-03 + + 1.2369229644536972e-01 -1.3246449828147888e-01 + <_> + + 0 -1 1130 -8.3215925842523575e-03 + + -1.9022589921951294e-01 8.9151017367839813e-02 + <_> + + 0 -1 1131 3.1376329716295004e-03 + + 4.1584819555282593e-02 -7.9552896320819855e-02 + <_> + + 0 -1 1132 1.6556069254875183e-02 + + 4.4908858835697174e-02 -3.6947301030158997e-01 + <_> + + 0 -1 1133 2.9919730499386787e-02 + + -3.7720259279012680e-02 2.4280619621276855e-01 + <_> + + 0 -1 1134 -5.1988288760185242e-02 + + -6.9372260570526123e-01 1.8926780670881271e-02 + <_> + + 0 -1 1135 7.5528107583522797e-02 + + -1.2611350044608116e-02 2.5732690095901489e-01 + <_> + + 0 -1 1136 -2.5031189434230328e-03 + + 1.3807280361652374e-01 -9.1662466526031494e-02 + <_> + + 0 -1 1137 -5.9646938461810350e-04 + + -6.3654616475105286e-02 2.5937270373106003e-02 + <_> + + 0 -1 1138 1.0319340042769909e-02 + + 8.3791837096214294e-02 -1.7408309876918793e-01 + <_> + + 0 -1 1139 9.3816686421632767e-03 + + 2.7871530503034592e-02 -1.1141580343246460e-01 + <_> + + 0 -1 1140 1.0023410432040691e-02 + + -6.9966249167919159e-02 2.1900640428066254e-01 + <_> + + 0 -1 1141 -8.3700200775638223e-04 + + 1.0097689926624298e-01 -1.4261360466480255e-01 + <_> + + 0 -1 1142 2.2468710318207741e-02 + + 9.4028212130069733e-02 -1.3807420432567596e-01 + <_> + + 0 -1 1143 3.9115209132432938e-02 + + -5.3969398140907288e-03 6.5187507867813110e-01 + <_> + + 0 -1 1144 -1.5670569846406579e-03 + + 7.0886030793190002e-02 -2.0010609924793243e-01 + <_> + + 0 -1 1145 6.0749892145395279e-03 + + 3.5395938903093338e-02 -4.3918590992689133e-02 + <_> + + 0 -1 1146 -4.3166890740394592e-02 + + 5.9881848096847534e-01 -2.3480180650949478e-02 + <_> + + 0 -1 1147 2.3302088957279921e-03 + + -7.2818689048290253e-02 4.3940208852291107e-02 + <_> + + 0 -1 1148 5.5236589163541794e-02 + + -3.5117920488119125e-02 3.6355149745941162e-01 + <_> + + 0 -1 1149 2.7774399146437645e-02 + + 3.0074290931224823e-02 -1.0026770085096359e-01 + <_> + + 0 -1 1150 8.4784086793661118e-03 + + -5.6243300437927246e-02 2.1711349487304688e-01 + <_> + + 0 -1 1151 1.3269360177218914e-02 + + 4.3138369917869568e-02 -1.6429780423641205e-01 + <_> + + 0 -1 1152 -3.4072279930114746e-02 + + 3.9418798685073853e-01 -3.2914638519287109e-02 + <_> + + 0 -1 1153 -5.9365970082581043e-03 + + 6.4854122698307037e-02 -8.6971588432788849e-02 + <_> + + 0 -1 1154 -5.1997308619320393e-03 + + -2.1710740029811859e-01 6.5441012382507324e-02 + <_> + + 0 -1 1155 3.0441130511462688e-03 + + -4.7171641141176224e-02 9.4662867486476898e-02 + <_> + + 0 -1 1156 -2.2375459957402200e-04 + + 1.1739899963140488e-01 -1.0451590269804001e-01 + <_> + + 0 -1 1157 4.9494139850139618e-02 + + 9.9552040919661522e-03 -8.8205021619796753e-01 + <_> + + 0 -1 1158 7.7127031981945038e-02 + + -3.6638759076595306e-02 3.7156999111175537e-01 + <_> + + 0 -1 1159 -3.7054829299449921e-03 + + 4.6213079243898392e-02 -7.9498499631881714e-02 + <_> + + 0 -1 1160 1.3655430078506470e-01 + + 2.0802579820156097e-02 -6.4692282676696777e-01 + <_> + + 0 -1 1161 -1.6919399797916412e-01 + + -9.0144991874694824e-01 4.3158119660802186e-04 + <_> + + 0 -1 1162 5.2525149658322334e-03 + + 8.6686216294765472e-02 -1.5751640498638153e-01 + <_> + + 0 -1 1163 5.7952258735895157e-02 + + 1.3485850067809224e-03 -1.0001620054244995e+00 + <_> + + 0 -1 1164 -3.0681459233164787e-02 + + -6.7346888780593872e-01 1.7730809748172760e-02 + <_> + + 0 -1 1165 -2.8556400910019875e-02 + + 2.4913530051708221e-01 -2.1807359531521797e-02 + <_> + + 0 -1 1166 5.8311191387474537e-03 + + 1.0109650343656540e-01 -1.2586539983749390e-01 + <_> + + 0 -1 1167 2.8870739042758942e-03 + + -4.5462280511856079e-02 1.4794190227985382e-01 + <_> + + 0 -1 1168 -5.3575891070067883e-03 + + 1.0845459997653961e-01 -2.0636059343814850e-01 + <_> + + 0 -1 1169 2.0851830020546913e-02 + + -2.5641430169343948e-02 1.2000799924135208e-01 + <_> + + 0 -1 1170 2.9372319113463163e-03 + + -5.8832980692386627e-02 2.3967139422893524e-01 + <_> + + 0 -1 1171 1.0109069757163525e-02 + + 4.4724740087985992e-02 -2.5024959444999695e-01 + <_> + + 0 -1 1172 6.2002640217542648e-02 + + 3.1236680224537849e-02 -3.8775479793548584e-01 + <_> + + 0 -1 1173 1.7331680282950401e-03 + + -7.6642520725727081e-02 5.8738309890031815e-02 + <_> + + 0 -1 1174 -4.6648900955915451e-02 + + 4.7800371050834656e-01 -2.8223259374499321e-02 + <_> + + 0 -1 1175 -4.0585011243820190e-02 + + 1.9591329991817474e-01 -2.9608549550175667e-02 + <_> + + 0 -1 1176 1.4297359623014927e-02 + + 8.0422781407833099e-02 -2.0024399459362030e-01 + <_> + + 0 -1 1177 -1.4215649571269751e-03 + + 9.7693942487239838e-02 -1.3090120255947113e-01 + <_> + + 0 -1 1178 5.2683628164231777e-03 + + -5.8376371860504150e-02 2.4378040432929993e-01 + <_> + 104 + -6.8976742029190063e-01 + + <_> + + 0 -1 1179 -2.6198190171271563e-03 + + 1.8673700094223022e-01 -1.9126529991626740e-01 + <_> + + 0 -1 1180 -2.8629099950194359e-02 + + 1.2887109816074371e-01 -2.6186849921941757e-02 + <_> + + 0 -1 1181 7.1718869730830193e-03 + + 8.8158592581748962e-02 -2.0327340066432953e-01 + <_> + + 0 -1 1182 1.1641040444374084e-02 + + -2.1058250218629837e-02 1.7591789364814758e-01 + <_> + + 0 -1 1183 5.6764329783618450e-03 + + 4.9941159784793854e-02 -2.7329298853874207e-01 + <_> + + 0 -1 1184 -4.4392690062522888e-02 + + 5.6766128540039062e-01 -1.8674779683351517e-02 + <_> + + 0 -1 1185 1.3367610517889261e-04 + + -1.2990309298038483e-01 1.3542290031909943e-01 + <_> + + 0 -1 1186 -4.4111948460340500e-02 + + 2.2684830427169800e-01 -1.3318399898707867e-02 + <_> + + 0 -1 1187 2.9443150851875544e-03 + + 4.3161459267139435e-02 -2.9311171174049377e-01 + <_> + + 0 -1 1188 3.5300010349601507e-03 + + 7.7193722128868103e-02 -2.6324981451034546e-01 + <_> + + 0 -1 1189 1.0119210183620453e-01 + + -5.4924260824918747e-02 3.2430219650268555e-01 + <_> + + 0 -1 1190 -2.2348569706082344e-02 + + 3.0803111195564270e-01 -2.2518489509820938e-02 + <_> + + 0 -1 1191 6.4755380153656006e-03 + + -1.2045770138502121e-01 1.3186110556125641e-01 + <_> + + 0 -1 1192 1.0904319584369659e-02 + + 1.0217989981174469e-01 -1.8308849632740021e-01 + <_> + + 0 -1 1193 -1.1256629601120949e-02 + + -2.9186639189720154e-01 5.5491220206022263e-02 + <_> + + 0 -1 1194 3.6791800521314144e-03 + + -5.0614688545465469e-02 8.2663312554359436e-02 + <_> + + 0 -1 1195 -9.1721288859844208e-02 + + -7.7127552032470703e-01 1.9312959164381027e-02 + <_> + + 0 -1 1196 4.0099889039993286e-02 + + 7.8663527965545654e-03 -8.1302827596664429e-01 + <_> + + 0 -1 1197 -5.4956428706645966e-02 + + 2.9059520363807678e-01 -5.9825580567121506e-02 + <_> + + 0 -1 1198 2.4804650247097015e-01 + + 1.1665189638733864e-02 -6.9121950864791870e-01 + <_> + + 0 -1 1199 -3.4284800291061401e-02 + + 4.5358398556709290e-01 -3.2071251422166824e-02 + <_> + + 0 -1 1200 2.5439230725169182e-02 + + 1.9467150792479515e-02 -3.7927991151809692e-01 + <_> + + 0 -1 1201 -1.2720660306513309e-02 + + -2.1211430430412292e-01 6.1533831059932709e-02 + <_> + + 0 -1 1202 1.0831000283360481e-02 + + -5.1443681120872498e-02 1.6947689652442932e-01 + <_> + + 0 -1 1203 -2.1931570023298264e-02 + + 2.4839389324188232e-01 -5.6636359542608261e-02 + <_> + + 0 -1 1204 2.9397898912429810e-01 + + 1.1411529965698719e-02 -9.3696069717407227e-01 + <_> + + 0 -1 1205 -1.6342259943485260e-02 + + -3.1589549779891968e-01 4.4371981173753738e-02 + <_> + + 0 -1 1206 -4.4280499219894409e-02 + + 2.0337340235710144e-01 -2.1462319418787956e-02 + <_> + + 0 -1 1207 2.6503309607505798e-01 + + 1.1633150279521942e-02 -9.1220170259475708e-01 + <_> + + 0 -1 1208 -7.6378479599952698e-02 + + 1.8688270449638367e-01 -1.9672080874443054e-02 + <_> + + 0 -1 1209 -1.0061570443212986e-02 + + -2.6462039351463318e-01 4.6620260924100876e-02 + <_> + + 0 -1 1210 2.4921730160713196e-02 + + -1.9131390377879143e-02 2.0154500007629395e-01 + <_> + + 0 -1 1211 1.5098409676284064e-05 + + -1.6241690516471863e-01 7.6183967292308807e-02 + <_> + + 0 -1 1212 -1.0081910341978073e-01 + + -1. 7.4751500505954027e-04 + <_> + + 0 -1 1213 6.5058596432209015e-02 + + -4.0468640625476837e-02 3.5160079598426819e-01 + <_> + + 0 -1 1214 -1.2190239876508713e-01 + + -5.3624558448791504e-01 1.8637020140886307e-02 + <_> + + 0 -1 1215 -9.8520738538354635e-04 + + 1.1398199945688248e-01 -1.1298830062150955e-01 + <_> + + 0 -1 1216 -2.5300619006156921e-01 + + -4.3375909328460693e-01 1.2367400340735912e-02 + <_> + + 0 -1 1217 7.5246659107506275e-03 + + 6.7355476319789886e-02 -1.8583969771862030e-01 + <_> + + 0 -1 1218 4.8102210275828838e-03 + + -6.5870061516761780e-02 1.2848910689353943e-01 + <_> + + 0 -1 1219 -1.4562129508703947e-03 + + 1.8110689520835876e-01 -1.1248459666967392e-01 + <_> + + 0 -1 1220 -5.6546321138739586e-03 + + 1.0369840264320374e-01 -1.4115570485591888e-01 + <_> + + 0 -1 1221 -3.1951289623975754e-02 + + -3.2971608638763428e-01 4.8281811177730560e-02 + <_> + + 0 -1 1222 4.2190380394458771e-02 + + -1.1644810438156128e-02 1.3701300323009491e-01 + <_> + + 0 -1 1223 1.2606659904122353e-02 + + -6.0395881533622742e-02 2.4210059642791748e-01 + <_> + + 0 -1 1224 -6.0083861462771893e-03 + + 9.5677606761455536e-02 -2.0248259603977203e-01 + <_> + + 0 -1 1225 4.0676388889551163e-02 + + -3.8506429642438889e-02 3.9824029803276062e-01 + <_> + + 0 -1 1226 -1.3010219670832157e-02 + + -7.7870443463325500e-02 3.2533310353755951e-02 + <_> + + 0 -1 1227 -5.6646969169378281e-02 + + -9.5293551683425903e-01 1.7375659197568893e-02 + <_> + + 0 -1 1228 3.7307970225811005e-02 + + -3.3261440694332123e-02 4.6856319904327393e-01 + <_> + + 0 -1 1229 -2.7986379340291023e-02 + + -4.6356698870658875e-01 2.8524029999971390e-02 + <_> + + 0 -1 1230 -7.5014896690845490e-02 + + 2.4519899487495422e-01 -1.5830159187316895e-02 + <_> + + 0 -1 1231 2.7673080563545227e-02 + + -3.6458358168601990e-02 3.7215578556060791e-01 + <_> + + 0 -1 1232 -1.7312960699200630e-02 + + -2.2117659449577332e-01 4.3232619762420654e-02 + <_> + + 0 -1 1233 -5.8893948793411255e-02 + + 3.9726749062538147e-01 -3.7632528692483902e-02 + <_> + + 0 -1 1234 1.3193679973483086e-02 + + 2.4857729673385620e-02 -1.7514359951019287e-01 + <_> + + 0 -1 1235 3.8230679929256439e-02 + + 2.9635110870003700e-02 -4.3452748656272888e-01 + <_> + + 0 -1 1236 1.6845399513840675e-02 + + 3.9338748902082443e-02 -2.3765720427036285e-01 + <_> + + 0 -1 1237 -1.1559460312128067e-01 + + -4.0006878972053528e-01 3.2390538603067398e-02 + <_> + + 0 -1 1238 -1.7385910032317042e-03 + + 4.8545818775892258e-02 -6.1474680900573730e-02 + <_> + + 0 -1 1239 -3.3697668462991714e-02 + + 2.4345000088214874e-01 -6.5504603087902069e-02 + <_> + + 0 -1 1240 -3.4722799062728882e-01 + + -3.3612060546875000e-01 1.5501200221478939e-02 + <_> + + 0 -1 1241 5.8668039739131927e-02 + + 6.8068057298660278e-02 -2.2104929387569427e-01 + <_> + + 0 -1 1242 2.3718189448118210e-02 + + -1.4779569581151009e-02 4.7328341007232666e-01 + <_> + + 0 -1 1243 2.8812700882554054e-02 + + 3.3309880644083023e-02 -4.6797698736190796e-01 + <_> + + 0 -1 1244 4.1023749858140945e-02 + + -2.8293000534176826e-02 4.9427551031112671e-01 + <_> + + 0 -1 1245 -1.2017590051982552e-04 + + 1.0363650321960449e-01 -1.2107490003108978e-01 + <_> + + 0 -1 1246 -1.0908070206642151e-01 + + -1. 3.2971999607980251e-03 + <_> + + 0 -1 1247 -4.5967359095811844e-02 + + 6.4819461107254028e-01 -1.9233519211411476e-02 + <_> + + 0 -1 1248 -1.9345719367265701e-02 + + -3.3145549893379211e-01 3.9008539170026779e-02 + <_> + + 0 -1 1249 1.2312790378928185e-02 + + 4.1029628366231918e-02 -2.7943921089172363e-01 + <_> + + 0 -1 1250 2.1535221021622419e-03 + + -6.7545056343078613e-02 1.1647740006446838e-01 + <_> + + 0 -1 1251 -3.2158788293600082e-02 + + 5.4741638898849487e-01 -2.3730229586362839e-02 + <_> + + 0 -1 1252 -2.7592359110713005e-02 + + -7.5319421291351318e-01 8.4066214039921761e-03 + <_> + + 0 -1 1253 2.2264510393142700e-02 + + 1.2146740220487118e-02 -9.0291297435760498e-01 + <_> + + 0 -1 1254 1.5361379832029343e-02 + + -3.1641189008951187e-02 3.2132801413536072e-01 + <_> + + 0 -1 1255 -1.2360660359263420e-02 + + 2.9248631000518799e-01 -4.5303758233785629e-02 + <_> + + 0 -1 1256 2.2978749126195908e-02 + + -1.2054479680955410e-02 1.9060949981212616e-01 + <_> + + 0 -1 1257 2.3296380415558815e-02 + + 3.1409051269292831e-02 -5.1856082677841187e-01 + <_> + + 0 -1 1258 5.7384249521419406e-04 + + -1.0293489694595337e-01 8.1548452377319336e-02 + <_> + + 0 -1 1259 -3.3020470291376114e-02 + + 4.2470559477806091e-01 -4.4794678688049316e-02 + <_> + + 0 -1 1260 -2.1713029593229294e-02 + + -1.4825260639190674e-01 1.2959879823029041e-02 + <_> + + 0 -1 1261 -9.7430922323837876e-05 + + 1.1899639666080475e-01 -1.4753970503807068e-01 + <_> + + 0 -1 1262 -9.2907734215259552e-03 + + -1.1635430157184601e-01 5.4104641079902649e-02 + <_> + + 0 -1 1263 3.7244848906993866e-02 + + -3.4421201795339584e-02 3.7943929433822632e-01 + <_> + + 0 -1 1264 1.5277029573917389e-01 + + 7.2725401259958744e-03 -3.4155088663101196e-01 + <_> + + 0 -1 1265 -1.2663450092077255e-02 + + -3.0596670508384705e-01 3.8231261074542999e-02 + <_> + + 0 -1 1266 -7.4888423085212708e-02 + + -3.4658950567245483e-01 1.5501650050282478e-02 + <_> + + 0 -1 1267 -4.0114589035511017e-02 + + 3.2629820704460144e-01 -4.1313670575618744e-02 + <_> + + 0 -1 1268 -9.6492111682891846e-02 + + 1.0172849893569946e-01 -1.7156010493636131e-02 + <_> + + 0 -1 1269 -1.6712839901447296e-01 + + -7.7655118703842163e-01 1.8029559403657913e-02 + <_> + + 0 -1 1270 -8.2981940358877182e-03 + + -1.4397139847278595e-01 5.8948140591382980e-02 + <_> + + 0 -1 1271 -3.7844169419258833e-03 + + 1.7095179855823517e-01 -7.8256443142890930e-02 + <_> + + 0 -1 1272 -1.6076420247554779e-01 + + 2.3138229548931122e-01 -1.3428050093352795e-02 + <_> + + 0 -1 1273 6.4544437918812037e-04 + + -1.4424400031566620e-01 8.3287820219993591e-02 + <_> + + 0 -1 1274 2.2737309336662292e-02 + + -3.4155819565057755e-02 3.5519808530807495e-01 + <_> + + 0 -1 1275 -3.9030050393193960e-03 + + -1.8736769258975983e-01 6.4628012478351593e-02 + <_> + + 0 -1 1276 -5.1145430654287338e-02 + + 6.6892707347869873e-01 -1.1180049739778042e-02 + <_> + + 0 -1 1277 -6.0482369735836983e-03 + + 1.8622750043869019e-01 -6.3018701970577240e-02 + <_> + + 0 -1 1278 1.1743569746613503e-02 + + 2.5449279695749283e-02 -1.3331249356269836e-01 + <_> + + 0 -1 1279 8.4120890824124217e-04 + + -9.3333467841148376e-02 1.3315880298614502e-01 + <_> + + 0 -1 1280 -3.7756171077489853e-02 + + -2.3138800263404846e-01 4.0569789707660675e-02 + <_> + + 0 -1 1281 -2.0867560058832169e-02 + + 1.0056090354919434e-01 -1.1744190007448196e-01 + <_> + + 0 -1 1282 -3.9802178740501404e-02 + + -1.1585719883441925e-01 1.2668189406394958e-01 + <_> + 111 + -6.8169009685516357e-01 + + <_> + + 0 -1 1283 8.4546189755201340e-03 + + -1.6289660334587097e-01 1.9834390282630920e-01 + <_> + + 0 -1 1284 5.1610451191663742e-02 + + -3.0827090144157410e-02 3.3742550015449524e-01 + <_> + + 0 -1 1285 -6.4909443259239197e-02 + + 2.8602281212806702e-01 -5.9848651289939880e-02 + <_> + + 0 -1 1286 -4.3951408006250858e-03 + + 1.1302659660577774e-01 -1.2632089853286743e-01 + <_> + + 0 -1 1287 -8.2756802439689636e-02 + + -6.0790950059890747e-01 2.1967180073261261e-02 + <_> + + 0 -1 1288 -4.8698862083256245e-03 + + 8.5866190493106842e-02 -8.9009523391723633e-02 + <_> + + 0 -1 1289 9.1512441635131836e-02 + + -5.3345348685979843e-02 2.6732870936393738e-01 + <_> + + 0 -1 1290 3.6815661005675793e-03 + + 7.0915699005126953e-02 -1.7941209673881531e-01 + <_> + + 0 -1 1291 6.3032708130776882e-03 + + 1.2378150224685669e-01 -1.2391480058431625e-01 + <_> + + 0 -1 1292 5.8764131972566247e-04 + + -6.3813656568527222e-02 9.5545768737792969e-02 + <_> + + 0 -1 1293 1.4680320397019386e-02 + + -4.9183528870344162e-02 2.9040598869323730e-01 + <_> + + 0 -1 1294 3.5624930169433355e-03 + + -9.7563147544860840e-02 4.8932831734418869e-02 + <_> + + 0 -1 1295 -7.4473340064287186e-03 + + -1.5952460467815399e-01 8.4772646427154541e-02 + <_> + + 0 -1 1296 5.4010991007089615e-02 + + -2.0565150305628777e-02 5.7340717315673828e-01 + <_> + + 0 -1 1297 -2.3613919038325548e-03 + + 1.4957650005817413e-01 -7.5148113071918488e-02 + <_> + + 0 -1 1298 4.0665458887815475e-02 + + 1.4762399718165398e-02 -5.9685671329498291e-01 + <_> + + 0 -1 1299 9.3258380889892578e-02 + + 1.3036210089921951e-02 -6.8643862009048462e-01 + <_> + + 0 -1 1300 2.8593749739229679e-03 + + -5.4904639720916748e-02 9.8074667155742645e-02 + <_> + + 0 -1 1301 -4.9756402149796486e-03 + + 1.6751970350742340e-01 -8.2563832402229309e-02 + <_> + + 0 -1 1302 -2.2061138879507780e-03 + + 7.1486182510852814e-02 -8.4684796631336212e-02 + <_> + + 0 -1 1303 4.3787518516182899e-03 + + 7.5296439230442047e-02 -1.6988970339298248e-01 + <_> + + 0 -1 1304 -4.9143321812152863e-03 + + 1.6274330019950867e-01 -5.7579189538955688e-02 + <_> + + 0 -1 1305 -3.0191219411790371e-03 + + -1.2450099736452103e-01 1.1526980251073837e-01 + <_> + + 0 -1 1306 6.8227178417146206e-03 + + 3.7166971713304520e-02 -1.0093449801206589e-01 + <_> + + 0 -1 1307 3.5116981714963913e-02 + + -4.2997431010007858e-02 3.2959198951721191e-01 + <_> + + 0 -1 1308 -1.4400649815797806e-03 + + -9.8922260105609894e-02 6.7108891904354095e-02 + <_> + + 0 -1 1309 -4.6699359081685543e-03 + + -1.8003439903259277e-01 6.8038396537303925e-02 + <_> + + 0 -1 1310 3.7647720426321030e-02 + + -2.1031750366091728e-02 1.6627119481563568e-01 + <_> + + 0 -1 1311 5.1745469681918621e-03 + + -1.1846090108156204e-01 1.0919190198183060e-01 + <_> + + 0 -1 1312 7.7274879440665245e-03 + + -5.5097330361604691e-02 2.2752280533313751e-01 + <_> + + 0 -1 1313 2.9158849269151688e-02 + + 7.7885583043098450e-02 -1.7775520682334900e-01 + <_> + + 0 -1 1314 2.9885378899052739e-04 + + -7.8875280916690826e-02 5.1163110882043839e-02 + <_> + + 0 -1 1315 1.4456070493906736e-04 + + -1.6097649931907654e-01 8.1574030220508575e-02 + <_> + + 0 -1 1316 4.7840740531682968e-02 + + 1.4210550114512444e-02 -3.1316679716110229e-01 + <_> + + 0 -1 1317 4.3943468481302261e-02 + + -3.1002480536699295e-02 4.2450350522994995e-01 + <_> + + 0 -1 1318 -1.7603389918804169e-01 + + -2.1625219285488129e-01 1.3710640370845795e-02 + <_> + + 0 -1 1319 -2.7010550722479820e-02 + + 4.5448291301727295e-01 -2.8507620096206665e-02 + <_> + + 0 -1 1320 6.4534661360085011e-03 + + -4.9660708755254745e-02 8.3071723580360413e-02 + <_> + + 0 -1 1321 -7.1115070022642612e-03 + + -2.2509810328483582e-01 6.5033361315727234e-02 + <_> + + 0 -1 1322 -2.5184849277138710e-02 + + -1.7480330169200897e-01 1.8751099705696106e-02 + <_> + + 0 -1 1323 -8.8047432655002922e-05 + + 1.2677890062332153e-01 -1.0704579949378967e-01 + <_> + + 0 -1 1324 -3.6020219326019287e-02 + + 2.4649600684642792e-01 -4.9772080034017563e-02 + <_> + + 0 -1 1325 7.6084570027887821e-03 + + 1.0041440278291702e-01 -1.3673840463161469e-01 + <_> + + 0 -1 1326 -8.2404967397451401e-03 + + 1.1703260242938995e-01 -5.2781961858272552e-02 + <_> + + 0 -1 1327 -7.2474818443879485e-04 + + -1.1650030314922333e-01 1.1333490163087845e-01 + <_> + + 0 -1 1328 -7.8272278187796474e-05 + + 6.4425677061080933e-02 -1.5894609689712524e-01 + <_> + + 0 -1 1329 -2.0254699047654867e-03 + + -1.7027080059051514e-01 7.1216866374015808e-02 + <_> + + 0 -1 1330 -1.1882030218839645e-01 + + 3.2878550887107849e-01 -1.5325210057199001e-02 + <_> + + 0 -1 1331 -1.6258429735898972e-02 + + 2.1848890185356140e-01 -5.6253198534250259e-02 + <_> + + 0 -1 1332 -6.8429792299866676e-03 + + -2.3313499987125397e-01 5.7107821106910706e-02 + <_> + + 0 -1 1333 3.4939710050821304e-02 + + -2.7333829551935196e-02 4.5651969313621521e-01 + <_> + + 0 -1 1334 2.2979779541492462e-01 + + 1.4508989639580250e-02 -8.7165087461471558e-01 + <_> + + 0 -1 1335 4.3360598385334015e-02 + + 8.4467595443129539e-03 -8.7500327825546265e-01 + <_> + + 0 -1 1336 -1.1806190013885498e-03 + + 7.8186698257923126e-02 -5.2834209054708481e-02 + <_> + + 0 -1 1337 -4.1772681474685669e-01 + + -8.0729222297668457e-01 1.3048130087554455e-02 + <_> + + 0 -1 1338 -4.6315230429172516e-02 + + 2.9375079274177551e-01 -3.5192389041185379e-02 + <_> + + 0 -1 1339 -4.0271300822496414e-02 + + -5.8174532651901245e-01 1.9768500700592995e-02 + <_> + + 0 -1 1340 -4.3012440204620361e-02 + + 1.0882510244846344e-01 -2.6977609843015671e-02 + <_> + + 0 -1 1341 2.8285770677030087e-03 + + 7.6837047934532166e-02 -1.5720550715923309e-01 + <_> + + 0 -1 1342 -3.3204611390829086e-02 + + -2.3152589797973633e-01 1.5932539477944374e-02 + <_> + + 0 -1 1343 -4.8097351100295782e-04 + + 1.1043740063905716e-01 -1.1589460074901581e-01 + <_> + + 0 -1 1344 2.9704240150749683e-03 + + -3.4243740141391754e-02 6.9107398390769958e-02 + <_> + + 0 -1 1345 1.1893190443515778e-02 + + 8.0122880637645721e-02 -2.0503090322017670e-01 + <_> + + 0 -1 1346 -6.3963606953620911e-02 + + -8.5530751943588257e-01 6.4783529378473759e-03 + <_> + + 0 -1 1347 -5.6093540042638779e-03 + + 1.6278949379920959e-01 -1.0079070180654526e-01 + <_> + + 0 -1 1348 7.5979339890182018e-03 + + 5.4123409092426300e-02 -1.2431269884109497e-01 + <_> + + 0 -1 1349 1.3480819761753082e-02 + + -6.3751302659511566e-02 2.5250628590583801e-01 + <_> + + 0 -1 1350 -9.4613758847117424e-04 + + 4.2835868895053864e-02 -7.6837100088596344e-02 + <_> + + 0 -1 1351 -3.8062490522861481e-02 + + 1.9252179563045502e-01 -6.3947133719921112e-02 + <_> + + 0 -1 1352 1.2410899996757507e-01 + + 7.9416595399379730e-03 -4.2653021216392517e-01 + <_> + + 0 -1 1353 -9.2228442430496216e-02 + + -5.5210620164871216e-01 2.8964910656213760e-02 + <_> + + 0 -1 1354 1.5106770209968090e-02 + + 2.7609340846538544e-02 -1.6688449680805206e-01 + <_> + + 0 -1 1355 -2.3654250428080559e-02 + + -3.4379678964614868e-01 3.9513330906629562e-02 + <_> + + 0 -1 1356 4.7881390899419785e-02 + + 8.0661084502935410e-03 -1.8185199797153473e-01 + <_> + + 0 -1 1357 8.5415288805961609e-02 + + -4.6752408146858215e-02 2.7169001102447510e-01 + <_> + + 0 -1 1358 3.1524940859526396e-03 + + -8.6421400308609009e-02 6.8336002528667450e-02 + <_> + + 0 -1 1359 -3.0099870637059212e-03 + + 8.9336208999156952e-02 -1.3626849651336670e-01 + <_> + + 0 -1 1360 -5.8112520724534988e-02 + + -1.9748120009899139e-01 2.6536440476775169e-02 + <_> + + 0 -1 1361 1.2775669991970062e-01 + + -4.9838040024042130e-02 3.4896400570869446e-01 + <_> + + 0 -1 1362 1.2011290341615677e-01 + + -6.3313432037830353e-03 3.7937548756599426e-01 + <_> + + 0 -1 1363 4.7567482106387615e-03 + + 1.0490419715642929e-01 -1.3542570173740387e-01 + <_> + + 0 -1 1364 -1.5902349725365639e-02 + + 6.1786301434040070e-02 -9.8376080393791199e-02 + <_> + + 0 -1 1365 -5.6423708796501160e-02 + + -6.3371032476425171e-01 2.0224599167704582e-02 + <_> + + 0 -1 1366 -7.9641327261924744e-02 + + -1. 8.7428308324888349e-04 + <_> + + 0 -1 1367 -2.0731301046907902e-03 + + 1.3846459984779358e-01 -9.5865301787853241e-02 + <_> + + 0 -1 1368 5.8470368385314941e-03 + + -5.7033840566873550e-02 1.1691799759864807e-01 + <_> + + 0 -1 1369 -2.6138950139284134e-02 + + -2.2362439334392548e-01 5.5546630173921585e-02 + <_> + + 0 -1 1370 -6.5781630109995604e-04 + + 9.2999227344989777e-02 -8.4152117371559143e-02 + <_> + + 0 -1 1371 -5.6041389703750610e-02 + + 3.5072851181030273e-01 -3.1472280621528625e-02 + <_> + + 0 -1 1372 9.7799800336360931e-02 + + 1.0124430060386658e-02 -3.7714061141014099e-01 + <_> + + 0 -1 1373 4.5515140518546104e-03 + + -7.8311361372470856e-02 1.4166970551013947e-01 + <_> + + 0 -1 1374 1.0168380104005337e-02 + + 5.2113991230726242e-02 -2.4422790110111237e-01 + <_> + + 0 -1 1375 6.2885403633117676e-02 + + -1.8255509436130524e-02 6.2847292423248291e-01 + <_> + + 0 -1 1376 -4.8064131289720535e-02 + + -8.6817431449890137e-01 6.6064838320016861e-03 + <_> + + 0 -1 1377 1.8479900434613228e-02 + + 6.9977812469005585e-02 -1.5929399430751801e-01 + <_> + + 0 -1 1378 2.4549840018153191e-02 + + -1.7519120126962662e-02 1.7961919307708740e-01 + <_> + + 0 -1 1379 3.9227470755577087e-02 + + -4.7417990863323212e-02 2.7945789694786072e-01 + <_> + + 0 -1 1380 4.1248198598623276e-02 + + 1.1459370143711567e-02 -4.3477478623390198e-01 + <_> + + 0 -1 1381 -8.4321142639964819e-04 + + 1.2758859992027283e-01 -9.7010560333728790e-02 + <_> + + 0 -1 1382 -1.3688740320503712e-02 + + -1.6236190497875214e-01 4.3290950357913971e-02 + <_> + + 0 -1 1383 -5.5982511490583420e-02 + + -7.5431138277053833e-01 1.5797710046172142e-02 + <_> + + 0 -1 1384 7.3578268289566040e-02 + + -1.4777439646422863e-03 -1.0000350475311279e+00 + <_> + + 0 -1 1385 3.7084969226270914e-03 + + -9.7184643149375916e-02 1.2435329705476761e-01 + <_> + + 0 -1 1386 -1.4889879821566865e-05 + + 7.1465343236923218e-02 -1.6840849816799164e-01 + <_> + + 0 -1 1387 1.0487560182809830e-01 + + 1.5076650306582451e-02 -7.1159482002258301e-01 + <_> + + 0 -1 1388 1.2587489560246468e-02 + + -2.0771300420165062e-02 1.7468680441379547e-01 + <_> + + 0 -1 1389 -2.2228389570955187e-04 + + 1.1781640350818634e-01 -9.2627458274364471e-02 + <_> + + 0 -1 1390 -7.7760413289070129e-02 + + -7.4605411291122437e-01 3.6328181158751249e-03 + <_> + + 0 -1 1391 4.5043420046567917e-02 + + 2.2217869758605957e-02 -5.0052911043167114e-01 + <_> + + 0 -1 1392 3.5614410880953074e-03 + + -5.1213219761848450e-02 8.9986503124237061e-02 + <_> + + 0 -1 1393 -7.4102368671447039e-04 + + 1.3938049972057343e-01 -1.0272219777107239e-01 + <_> + 107 + -6.0689288377761841e-01 + + <_> + + 0 -1 1394 -8.5600130259990692e-03 + + 1.6578909754753113e-01 -1.6412919759750366e-01 + <_> + + 0 -1 1395 3.0798809602856636e-02 + + -3.3495649695396423e-02 2.8578650951385498e-01 + <_> + + 0 -1 1396 -3.7319411057978868e-04 + + 1.2523449957370758e-01 -1.2115170061588287e-01 + <_> + + 0 -1 1397 -1.9253849983215332e-02 + + -8.7740883231163025e-02 3.9066571742296219e-02 + <_> + + 0 -1 1398 -8.5401646792888641e-03 + + 1.3152270019054413e-01 -1.3007740676403046e-01 + <_> + + 0 -1 1399 1.2424349784851074e-01 + + 1.9019979983568192e-02 -7.8247052431106567e-01 + <_> + + 0 -1 1400 4.0093418210744858e-02 + + -4.0743768215179443e-02 3.8851749897003174e-01 + <_> + + 0 -1 1401 -4.4169559259898961e-05 + + 4.5526970177888870e-02 -8.8063806295394897e-02 + <_> + + 0 -1 1402 -1.7662849277257919e-02 + + -3.1371811032295227e-01 5.1794338971376419e-02 + <_> + + 0 -1 1403 5.2368510514497757e-02 + + -3.5845998674631119e-02 1.5009739995002747e-01 + <_> + + 0 -1 1404 -2.8719279915094376e-02 + + -1.9849379360675812e-01 7.8099071979522705e-02 + <_> + + 0 -1 1405 6.9435790181159973e-02 + + -5.5073730647563934e-02 2.1780849993228912e-01 + <_> + + 0 -1 1406 5.4794438183307648e-02 + + -3.0223689973354340e-02 6.2993967533111572e-01 + <_> + + 0 -1 1407 -1.5315500088036060e-02 + + -1.5052799880504608e-01 2.0194370299577713e-02 + <_> + + 0 -1 1408 2.9001969844102859e-02 + + -2.0738989114761353e-02 4.5645099878311157e-01 + <_> + + 0 -1 1409 -2.3264769464731216e-02 + + 1.4672529697418213e-01 -3.8081351667642593e-02 + <_> + + 0 -1 1410 1.9063109531998634e-02 + + 7.2921238839626312e-02 -2.2723700106143951e-01 + <_> + + 0 -1 1411 1.2208239641040564e-03 + + 7.3471322655677795e-02 -1.9122929871082306e-01 + <_> + + 0 -1 1412 -1.7565910518169403e-01 + + 2.5924688577651978e-01 -5.6015118956565857e-02 + <_> + + 0 -1 1413 -3.8042131811380386e-02 + + 1.6113610565662384e-01 -4.3758820742368698e-02 + <_> + + 0 -1 1414 3.0130259692668915e-02 + + 5.7830829173326492e-02 -2.9774171113967896e-01 + <_> + + 0 -1 1415 2.0089220255613327e-02 + + -6.0509629547595978e-02 3.3441681414842606e-02 + <_> + + 0 -1 1416 2.6193389203399420e-04 + + -1.5175449848175049e-01 1.1094109714031219e-01 + <_> + + 0 -1 1417 4.0310628712177277e-02 + + 1.7477119341492653e-02 -1.4185379445552826e-01 + <_> + + 0 -1 1418 -2.9343019705265760e-03 + + -1.6960139572620392e-01 9.3530252575874329e-02 + <_> + + 0 -1 1419 1.4554520137608051e-02 + + -7.5844526290893555e-02 2.7771660685539246e-01 + <_> + + 0 -1 1420 3.4086001105606556e-03 + + 7.3933310806751251e-02 -1.9626590609550476e-01 + <_> + + 0 -1 1421 -6.7988429218530655e-03 + + -2.0132480561733246e-01 5.8276038616895676e-02 + <_> + + 0 -1 1422 -5.0457930192351341e-03 + + 1.9446060061454773e-01 -7.1691580116748810e-02 + <_> + + 0 -1 1423 1.0465010069310665e-02 + + -4.7314591705799103e-02 1.9316110014915466e-01 + <_> + + 0 -1 1424 -1.6713530058041215e-03 + + 9.2915147542953491e-02 -1.1890129745006561e-01 + <_> + + 0 -1 1425 -4.2704358696937561e-02 + + 1.6961039602756500e-01 -2.0632650703191757e-02 + <_> + + 0 -1 1426 2.0367829501628876e-01 + + 2.3246899247169495e-02 -4.9420261383056641e-01 + <_> + + 0 -1 1427 -8.3379482384771109e-04 + + 5.0001069903373718e-02 -7.3779806494712830e-02 + <_> + + 0 -1 1428 1.7854769527912140e-01 + + 1.5588290058076382e-02 -7.7650082111358643e-01 + <_> + + 0 -1 1429 -1.3535289466381073e-01 + + -5.2299112081527710e-01 3.1595760956406593e-03 + <_> + + 0 -1 1430 4.6555269509553909e-02 + + -4.1891060769557953e-02 3.0324798822402954e-01 + <_> + + 0 -1 1431 2.2663649171590805e-02 + + 3.8851160556077957e-02 -8.5196226835250854e-02 + <_> + + 0 -1 1432 -2.3027729988098145e-01 + + -9.3503099679946899e-01 1.3942349702119827e-02 + <_> + + 0 -1 1433 2.5714140385389328e-02 + + -9.1460775583982468e-03 7.8063201904296875e-01 + <_> + + 0 -1 1434 -7.3728510869841557e-06 + + 6.2730923295021057e-02 -2.0042170584201813e-01 + <_> + + 0 -1 1435 -1.9757889211177826e-02 + + -2.3434729874134064e-01 1.4600900001823902e-02 + <_> + + 0 -1 1436 -4.1893101297318935e-03 + + 1.4971399307250977e-01 -6.9368869066238403e-02 + <_> + + 0 -1 1437 1.1314969742670655e-03 + + -6.9203592836856842e-02 1.0447440296411514e-01 + <_> + + 0 -1 1438 6.3914088532328606e-03 + + 5.6134030222892761e-02 -1.9862769544124603e-01 + <_> + + 0 -1 1439 -3.7047569639980793e-03 + + 9.6817292273044586e-02 -9.5282286405563354e-02 + <_> + + 0 -1 1440 3.0627459287643433e-02 + + -5.0079640001058578e-02 2.6023888587951660e-01 + <_> + + 0 -1 1441 3.2444439828395844e-02 + + 3.1099939718842506e-02 -2.0788609981536865e-01 + <_> + + 0 -1 1442 1.1651559732854366e-02 + + -5.8311950415372849e-02 2.5374108552932739e-01 + <_> + + 0 -1 1443 -3.6515220999717712e-02 + + -2.6749190688133240e-01 2.0536249503493309e-02 + <_> + + 0 -1 1444 1.7474630847573280e-02 + + 4.7416981309652328e-02 -3.3719009160995483e-01 + <_> + + 0 -1 1445 -1.5204170485958457e-03 + + 5.8933809399604797e-02 -9.5844946801662445e-02 + <_> + + 0 -1 1446 4.7761179506778717e-02 + + 1.0849700309336185e-02 -8.6635017395019531e-01 + <_> + + 0 -1 1447 -6.3569113612174988e-02 + + 2.5858598947525024e-01 -1.8156580626964569e-02 + <_> + + 0 -1 1448 -1.7476839711889625e-03 + + 7.5750246644020081e-02 -1.4295279979705811e-01 + <_> + + 0 -1 1449 -4.6762558631598949e-03 + + -9.1223396360874176e-02 1.3135279715061188e-01 + <_> + + 0 -1 1450 2.2202100604772568e-02 + + -5.3397450596094131e-02 2.0743979513645172e-01 + <_> + + 0 -1 1451 -2.4647359549999237e-01 + + -4.5610219240188599e-01 3.5777890589088202e-03 + <_> + + 0 -1 1452 5.0148782320320606e-03 + + 8.8871829211711884e-02 -1.6236490011215210e-01 + <_> + + 0 -1 1453 -4.2023971676826477e-02 + + 1.2805579602718353e-01 -1.1926759965717793e-02 + <_> + + 0 -1 1454 -1.0895519703626633e-01 + + -6.6466122865676880e-01 1.5905549749732018e-02 + <_> + + 0 -1 1455 -3.6672928929328918e-01 + + 3.6374801397323608e-01 -3.1206229701638222e-02 + <_> + + 0 -1 1456 9.5884501934051514e-03 + + 9.1073550283908844e-02 -1.2492360174655914e-01 + <_> + + 0 -1 1457 1.6124530229717493e-03 + + 3.3751979470252991e-02 -5.8749239891767502e-02 + <_> + + 0 -1 1458 -1.7882430925965309e-02 + + 2.0992769300937653e-01 -6.3215233385562897e-02 + <_> + + 0 -1 1459 -6.6655018599703908e-05 + + 5.5020030587911606e-02 -1.7908810079097748e-01 + <_> + + 0 -1 1460 -1.0912610217928886e-02 + + -1.7878860235214233e-01 6.4088903367519379e-02 + <_> + + 0 -1 1461 -1.9031569827347994e-03 + + 1.1012560129165649e-01 -6.2576442956924438e-02 + <_> + + 0 -1 1462 4.7322059981524944e-03 + + 6.0611810535192490e-02 -1.7521250247955322e-01 + <_> + + 0 -1 1463 1.7955000698566437e-01 + + -2.6413710787892342e-02 5.1463198661804199e-01 + <_> + + 0 -1 1464 -1.8869279883801937e-03 + + 7.0732869207859039e-02 -1.8977560102939606e-01 + <_> + + 0 -1 1465 -3.5322420299053192e-03 + + 9.5800288021564484e-02 -4.9251660704612732e-02 + <_> + + 0 -1 1466 1.0818409500643611e-03 + + -9.7082488238811493e-02 1.4092449843883514e-01 + <_> + + 0 -1 1467 -9.5455259084701538e-02 + + -6.8376517295837402e-01 8.8187018409371376e-03 + <_> + + 0 -1 1468 1.6179149970412254e-03 + + -9.5129579305648804e-02 1.1351480334997177e-01 + <_> + + 0 -1 1469 6.5547877550125122e-01 + + 9.7635984420776367e-03 -5.6581187248229980e-01 + <_> + + 0 -1 1470 -7.7973723411560059e-02 + + 3.5573729872703552e-01 -3.3126130700111389e-02 + <_> + + 0 -1 1471 2.0209029316902161e-02 + + 3.9301611483097076e-02 -1.3580250740051270e-01 + <_> + + 0 -1 1472 9.0323589742183685e-02 + + -1.5932930633425713e-02 6.9409132003784180e-01 + <_> + + 0 -1 1473 -6.2048831023275852e-03 + + -1.7037659883499146e-01 6.8090677261352539e-02 + <_> + + 0 -1 1474 -1.5737250447273254e-02 + + 1.6250109672546387e-01 -6.6528938710689545e-02 + <_> + + 0 -1 1475 -3.5397041589021683e-02 + + -8.9766547083854675e-02 4.9135740846395493e-02 + <_> + + 0 -1 1476 3.2850861549377441e-02 + + 8.5158139467239380e-02 -1.3002319633960724e-01 + <_> + + 0 -1 1477 -8.4024056792259216e-02 + + 3.0658489465713501e-01 -3.9313621819019318e-02 + <_> + + 0 -1 1478 2.1347659640014172e-03 + + 8.3386950194835663e-02 -1.2239480018615723e-01 + <_> + + 0 -1 1479 1.7922610044479370e-01 + + 2.6004109531641006e-03 -9.9989092350006104e-01 + <_> + + 0 -1 1480 1.1854390054941177e-01 + + 1.1098369956016541e-02 -8.9629507064819336e-01 + <_> + + 0 -1 1481 -2.7351840399205685e-03 + + 1.1589130014181137e-01 -6.3589207828044891e-02 + <_> + + 0 -1 1482 6.6092880442738533e-03 + + -7.9491429030895233e-02 1.8501229584217072e-01 + <_> + + 0 -1 1483 -2.1072009578347206e-02 + + -1.4708499610424042e-01 2.6071280241012573e-02 + <_> + + 0 -1 1484 1.3411619700491428e-02 + + 4.8645589500665665e-02 -2.2041800618171692e-01 + <_> + + 0 -1 1485 -2.0661540329456329e-02 + + 2.1374049782752991e-01 -2.2243229672312737e-02 + <_> + + 0 -1 1486 -1.0939250141382217e-01 + + -7.9235088825225830e-01 1.1932499706745148e-02 + <_> + + 0 -1 1487 5.4573271423578262e-02 + + -8.7064085528254509e-03 3.8226109743118286e-01 + <_> + + 0 -1 1488 -2.7845989912748337e-02 + + 4.2096340656280518e-01 -3.4300819039344788e-02 + <_> + + 0 -1 1489 1.4973179996013641e-01 + + 5.5857440456748009e-03 -7.1027070283889771e-01 + <_> + + 0 -1 1490 5.4548021405935287e-02 + + 1.9289769232273102e-02 -5.5061852931976318e-01 + <_> + + 0 -1 1491 5.4990737698972225e-03 + + 4.3643891811370850e-02 -1.2233699858188629e-01 + <_> + + 0 -1 1492 3.5988059244118631e-04 + + -9.5005020499229431e-02 1.2501640617847443e-01 + <_> + + 0 -1 1493 -5.1003068685531616e-02 + + -3.4648188948631287e-01 1.4124399982392788e-02 + <_> + + 0 -1 1494 -5.9379130601882935e-02 + + 6.8840432167053223e-01 -2.0780999213457108e-02 + <_> + + 0 -1 1495 6.8976037204265594e-02 + + 8.5678137838840485e-03 -6.9098550081253052e-01 + <_> + + 0 -1 1496 -4.3954830616712570e-03 + + -1.7382889986038208e-01 6.9105990231037140e-02 + <_> + + 0 -1 1497 1.3838030397891998e-02 + + -2.9398119077086449e-02 1.9685789942741394e-01 + <_> + + 0 -1 1498 -7.5316978618502617e-03 + + -3.5790848731994629e-01 3.9685450494289398e-02 + <_> + + 0 -1 1499 -8.8299706578254700e-02 + + -2.3770420253276825e-01 3.0232321005314589e-03 + <_> + + 0 -1 1500 -4.4138759374618530e-02 + + 2.6541408896446228e-01 -5.1865179091691971e-02 + <_> + 107 + -5.6881058216094971e-01 + + <_> + + 0 -1 1501 -9.2582583427429199e-02 + + 3.6183288693428040e-01 -7.8275963664054871e-02 + <_> + + 0 -1 1502 -4.8143980093300343e-03 + + -1.2681719660758972e-01 6.7723788321018219e-02 + <_> + + 0 -1 1503 3.2365128397941589e-02 + + -4.6087108552455902e-02 3.2692021131515503e-01 + <_> + + 0 -1 1504 -1.7028570175170898e-02 + + 9.1306403279304504e-02 -1.1660590022802353e-01 + <_> + + 0 -1 1505 -1.1308620125055313e-01 + + -7.9631358385086060e-01 5.8426991105079651e-02 + <_> + + 0 -1 1506 -3.5633759107440710e-03 + + -8.2610622048377991e-02 1.0166700184345245e-01 + <_> + + 0 -1 1507 -2.4109560251235962e-01 + + 2.7927228808403015e-01 -8.0744966864585876e-02 + <_> + + 0 -1 1508 2.2599289193749428e-02 + + 5.1744598895311356e-02 -2.8865408897399902e-01 + <_> + + 0 -1 1509 2.0002270117402077e-02 + + -5.7962361723184586e-02 2.9044789075851440e-01 + <_> + + 0 -1 1510 -1.9348099594935775e-03 + + 9.8808683454990387e-02 -1.2368459999561310e-01 + <_> + + 0 -1 1511 -7.5757717713713646e-03 + + -2.0071910321712494e-01 9.2741288244724274e-02 + <_> + + 0 -1 1512 3.3381819725036621e-02 + + -3.4530758857727051e-02 3.0876499414443970e-01 + <_> + + 0 -1 1513 4.7418981790542603e-02 + + -1.3563269376754761e-01 1.1016750335693359e-01 + <_> + + 0 -1 1514 -5.4173129610717297e-03 + + -1.6050089895725250e-01 7.2612293064594269e-02 + <_> + + 0 -1 1515 -9.6942558884620667e-03 + + -1.6376489400863647e-01 8.4426470100879669e-02 + <_> + + 0 -1 1516 -6.0632169246673584e-02 + + 1.6474419832229614e-01 -2.6981400325894356e-02 + <_> + + 0 -1 1517 5.0302860327064991e-03 + + -1.0996829718351364e-01 1.3480730354785919e-01 + <_> + + 0 -1 1518 -8.7792202830314636e-02 + + -6.8317967653274536e-01 1.0834610089659691e-02 + <_> + + 0 -1 1519 3.0390409752726555e-02 + + -4.2450569570064545e-02 3.0770599842071533e-01 + <_> + + 0 -1 1520 -5.1566340029239655e-02 + + -6.2840008735656738e-01 9.7069833427667618e-03 + <_> + + 0 -1 1521 -4.2446999577805400e-04 + + 8.4595613181591034e-02 -1.8075129389762878e-01 + <_> + + 0 -1 1522 -1.2135359644889832e-01 + + -1.2717489898204803e-01 9.6575058996677399e-02 + <_> + + 0 -1 1523 -1.5150560066103935e-02 + + 9.3037553131580353e-02 -1.3127900660037994e-01 + <_> + + 0 -1 1524 3.9446409791707993e-02 + + 2.5543639436364174e-02 -1.1460640281438828e-01 + <_> + + 0 -1 1525 -8.2465475425124168e-03 + + 2.4008710682392120e-01 -5.1680248230695724e-02 + <_> + + 0 -1 1526 3.5262361168861389e-02 + + -3.3555049449205399e-02 2.0575499534606934e-01 + <_> + + 0 -1 1527 1.1703060008585453e-02 + + 2.3529250174760818e-02 -4.9983900785446167e-01 + <_> + + 0 -1 1528 4.2969968169927597e-02 + + -1.2683330103754997e-02 5.4043388366699219e-01 + <_> + + 0 -1 1529 -1.5811799094080925e-02 + + 3.9564150571823120e-01 -3.5568390041589737e-02 + <_> + + 0 -1 1530 4.6253358013927937e-03 + + 5.2370540797710419e-02 -2.2989930212497711e-01 + <_> + + 0 -1 1531 -1.5898230485618114e-03 + + 1.3792009651660919e-01 -8.6783193051815033e-02 + <_> + + 0 -1 1532 6.2329089269042015e-04 + + -8.6643829941749573e-02 5.7710029184818268e-02 + <_> + + 0 -1 1533 7.0994929410517216e-03 + + 7.5797617435455322e-02 -1.6898870468139648e-01 + <_> + + 0 -1 1534 6.9608777761459351e-02 + + -1.2454699724912643e-02 2.0845200121402740e-01 + <_> + + 0 -1 1535 -1.8759520724415779e-02 + + -5.5008620023727417e-01 2.1040279418230057e-02 + <_> + + 0 -1 1536 4.6513788402080536e-02 + + -2.5904009118676186e-02 1.8322019279003143e-01 + <_> + + 0 -1 1537 2.1638579666614532e-02 + + -3.8873910903930664e-02 2.9919698834419250e-01 + <_> + + 0 -1 1538 -7.6772570610046387e-02 + + -1. 3.9020550902932882e-03 + <_> + + 0 -1 1539 4.0535528212785721e-02 + + 1.8880680203437805e-02 -6.6033887863159180e-01 + <_> + + 0 -1 1540 4.0338758379220963e-02 + + 9.2877401039004326e-03 -3.4422031044960022e-01 + <_> + + 0 -1 1541 4.3404240161180496e-02 + + -2.2111779078841209e-02 5.1227712631225586e-01 + <_> + + 0 -1 1542 1.6895130276679993e-02 + + 3.0058480799198151e-02 -1.8648600578308105e-01 + <_> + + 0 -1 1543 3.0269259586930275e-03 + + -1.3979099690914154e-01 8.7544560432434082e-02 + <_> + + 0 -1 1544 -3.7171840667724609e-01 + + -2.9676678776741028e-01 1.6241550445556641e-02 + <_> + + 0 -1 1545 -2.5798739865422249e-02 + + -4.3713501095771790e-01 2.6768149808049202e-02 + <_> + + 0 -1 1546 -9.0826600790023804e-03 + + 9.9548496305942535e-02 -3.8500539958477020e-02 + <_> + + 0 -1 1547 -1.7977179959416389e-03 + + 1.3810199499130249e-01 -7.5387232005596161e-02 + <_> + + 0 -1 1548 1.2435699999332428e-01 + + 4.6064029447734356e-03 -3.6909800767898560e-01 + <_> + + 0 -1 1549 -1.2901489622890949e-02 + + -2.0433300733566284e-01 5.3133610635995865e-02 + <_> + + 0 -1 1550 -1.3352099806070328e-02 + + -1.0512170195579529e-01 5.9746239334344864e-02 + <_> + + 0 -1 1551 -3.0650520697236061e-02 + + 3.4366500377655029e-01 -3.9617810398340225e-02 + <_> + + 0 -1 1552 2.0778391044586897e-03 + + -5.0755288451910019e-02 7.2930753231048584e-02 + <_> + + 0 -1 1553 -6.1161179095506668e-02 + + 7.8371667861938477e-01 -1.3940130360424519e-02 + <_> + + 0 -1 1554 -6.6681973636150360e-02 + + -6.7010307312011719e-01 4.2770858854055405e-03 + <_> + + 0 -1 1555 2.7359850704669952e-02 + + 2.4253180250525475e-02 -4.2671859264373779e-01 + <_> + + 0 -1 1556 -2.4731201119720936e-03 + + 9.6493236720561981e-02 -5.7433839887380600e-02 + <_> + + 0 -1 1557 -1.0721489787101746e-02 + + -2.1575610339641571e-01 4.4256970286369324e-02 + <_> + + 0 -1 1558 -1.3936980068683624e-01 + + -3.6377531290054321e-01 1.0005139745771885e-02 + <_> + + 0 -1 1559 -5.6867711246013641e-02 + + 3.0327269434928894e-01 -3.7230789661407471e-02 + <_> + + 0 -1 1560 -6.5776512026786804e-02 + + -1. 1.2443619780242443e-03 + <_> + + 0 -1 1561 -1.5500129666179419e-03 + + 1.2898580729961395e-01 -8.5528247058391571e-02 + <_> + + 0 -1 1562 8.7909551803022623e-04 + + -7.9906381666660309e-02 1.2847130000591278e-01 + <_> + + 0 -1 1563 2.9614660888910294e-03 + + 8.9433841407299042e-02 -1.7047980427742004e-01 + <_> + + 0 -1 1564 -5.0735038518905640e-01 + + -8.4197628498077393e-01 2.3592109791934490e-03 + <_> + + 0 -1 1565 3.5409200936555862e-02 + + 1.7137490212917328e-02 -5.9052079916000366e-01 + <_> + + 0 -1 1566 -4.6220239251852036e-02 + + 4.7383689880371094e-01 -1.1423089541494846e-02 + <_> + + 0 -1 1567 4.0875099599361420e-02 + + -2.6714079082012177e-02 4.2139878869056702e-01 + <_> + + 0 -1 1568 -5.7651810348033905e-02 + + 5.6021291017532349e-01 -9.5757292583584785e-03 + <_> + + 0 -1 1569 3.3733060117810965e-03 + + 7.2323620319366455e-02 -1.5510480105876923e-01 + <_> + + 0 -1 1570 -3.4096160531044006e-01 + + -1. -3.1605950789526105e-04 + <_> + + 0 -1 1571 -5.5850511416792870e-03 + + -1.5768070518970490e-01 7.3625743389129639e-02 + <_> + + 0 -1 1572 -1.1067239940166473e-01 + + 2.3640440404415131e-01 -1.2670779600739479e-02 + <_> + + 0 -1 1573 4.3246410787105560e-02 + + -4.9346420913934708e-02 3.0113101005554199e-01 + <_> + + 0 -1 1574 -5.8916499838232994e-03 + + -1.4727650582790375e-01 6.1345700174570084e-02 + <_> + + 0 -1 1575 -2.8674090572167188e-05 + + 1.1539240181446075e-01 -1.4692650735378265e-01 + <_> + + 0 -1 1576 2.6174910366535187e-02 + + -2.2960580885410309e-02 2.1004410088062286e-01 + <_> + + 0 -1 1577 -1.9902619533240795e-03 + + 9.7250632941722870e-02 -1.3244929909706116e-01 + <_> + + 0 -1 1578 -1.6960840672254562e-02 + + -3.1949061155319214e-01 3.6188289523124695e-02 + <_> + + 0 -1 1579 -1.5634739398956299e-01 + + 3.1934529542922974e-01 -4.1917070746421814e-02 + <_> + + 0 -1 1580 -2.3863950371742249e-01 + + 3.8183578848838806e-01 -8.6567532271146774e-03 + <_> + + 0 -1 1581 -7.7641502022743225e-02 + + -3.3156651258468628e-01 3.3491149544715881e-02 + <_> + + 0 -1 1582 -4.5257899910211563e-02 + + 4.6058529615402222e-01 -3.1354859471321106e-02 + <_> + + 0 -1 1583 -3.3390790224075317e-02 + + -7.2974747419357300e-01 1.6206990927457809e-02 + <_> + + 0 -1 1584 7.3079466819763184e-02 + + -1.9201450049877167e-02 3.4011909365653992e-01 + <_> + + 0 -1 1585 -5.4536230862140656e-02 + + 3.3227160573005676e-01 -3.3163428306579590e-02 + <_> + + 0 -1 1586 3.9552688598632812e-02 + + 1.1817559599876404e-02 -3.2131719589233398e-01 + <_> + + 0 -1 1587 5.9160130331292748e-04 + + -1.1766350269317627e-01 8.8002361357212067e-02 + <_> + + 0 -1 1588 3.5379730165004730e-02 + + 1.8286190927028656e-02 -1.6206890344619751e-01 + <_> + + 0 -1 1589 2.0152490586042404e-02 + + 2.2825939580798149e-02 -4.3034788966178894e-01 + <_> + + 0 -1 1590 -2.9185289517045021e-02 + + 1.8256959319114685e-01 -1.6376309096813202e-02 + <_> + + 0 -1 1591 -2.1705780178308487e-02 + + -6.6977721452713013e-01 1.6782360151410103e-02 + <_> + + 0 -1 1592 4.2584270238876343e-02 + + -1.6852499917149544e-02 3.4360399842262268e-01 + <_> + + 0 -1 1593 -1.2663739919662476e-01 + + 2.6748588681221008e-01 -3.6107789725065231e-02 + <_> + + 0 -1 1594 1.4260070025920868e-01 + + 1.4445270411670208e-02 -1.9729509949684143e-01 + <_> + + 0 -1 1595 5.3560931235551834e-02 + + 1.7324799671769142e-02 -5.9609222412109375e-01 + <_> + + 0 -1 1596 -5.9380959719419479e-03 + + -6.5156273543834686e-02 5.9645600616931915e-02 + <_> + + 0 -1 1597 -6.6497321240603924e-03 + + 1.4270019531250000e-01 -7.9669818282127380e-02 + <_> + + 0 -1 1598 -3.0137640424072742e-03 + + 1.3996289670467377e-01 -9.4831757247447968e-02 + <_> + + 0 -1 1599 -1.7213050276041031e-02 + + -1.7265740036964417e-01 6.9451652467250824e-02 + <_> + + 0 -1 1600 1.0775709897279739e-01 + + -4.6757548116147518e-03 9.2161870002746582e-01 + <_> + + 0 -1 1601 5.8738540858030319e-02 + + -4.2458981275558472e-02 2.8832349181175232e-01 + <_> + + 0 -1 1602 -3.0475479364395142e-01 + + -1. 2.6918480216409080e-05 + <_> + + 0 -1 1603 2.0395779609680176e-01 + + 2.5317989289760590e-02 -5.0275158882141113e-01 + <_> + + 0 -1 1604 -9.7794281318783760e-03 + + -1.9060879945755005e-01 3.0577139928936958e-02 + <_> + + 0 -1 1605 -2.2775499150156975e-02 + + 2.7048370242118835e-01 -5.1001209765672684e-02 + <_> + + 0 -1 1606 9.8080374300479889e-03 + + 2.4180250242352486e-02 -7.5000837445259094e-02 + <_> + + 0 -1 1607 -1.1130969971418381e-02 + + -2.3825749754905701e-01 6.4388722181320190e-02 + <_> + 123 + -6.5824240446090698e-01 + + <_> + + 0 -1 1608 -2.1380689740180969e-01 + + 2.7686640620231628e-01 -9.2777818441390991e-02 + <_> + + 0 -1 1609 -3.3374479971826077e-03 + + 1.4119230210781097e-01 -5.1907159388065338e-02 + <_> + + 0 -1 1610 -2.8738550841808319e-02 + + -3.6243250966072083e-01 3.1938020139932632e-02 + <_> + + 0 -1 1611 -3.5554158966988325e-03 + + 1.1969120055437088e-01 -5.2306748926639557e-02 + <_> + + 0 -1 1612 -1.0732459835708141e-02 + + 2.8602668642997742e-01 -6.0555059462785721e-02 + <_> + + 0 -1 1613 8.7310239672660828e-02 + + -3.3613391220569611e-02 4.7786781191825867e-01 + <_> + + 0 -1 1614 2.1971999667584896e-03 + + 6.0207970440387726e-02 -2.1543750166893005e-01 + <_> + + 0 -1 1615 -7.4302748544141650e-05 + + 1.4141289889812469e-01 -1.2711560726165771e-01 + <_> + + 0 -1 1616 -2.9314011335372925e-01 + + -5.5598288774490356e-01 7.8105749562382698e-03 + <_> + + 0 -1 1617 7.7996537089347839e-02 + + -2.0238140597939491e-02 2.2233769297599792e-01 + <_> + + 0 -1 1618 4.9733570776879787e-03 + + -1.5410329401493073e-01 9.8874516785144806e-02 + <_> + + 0 -1 1619 -6.2232650816440582e-02 + + -2.5253909826278687e-01 2.5864329189062119e-02 + <_> + + 0 -1 1620 -7.4750548228621483e-03 + + -1.9071790575981140e-01 8.4528200328350067e-02 + <_> + + 0 -1 1621 2.2246010601520538e-02 + + -3.1024629250168800e-02 1.5289239585399628e-01 + <_> + + 0 -1 1622 -1.2305259704589844e-02 + + 1.1693249642848969e-01 -1.1092559993267059e-01 + <_> + + 0 -1 1623 -1.3985290424898267e-03 + + -2.0435670018196106e-01 8.7592259049415588e-02 + <_> + + 0 -1 1624 3.6361250281333923e-01 + + -1.8750319257378578e-02 8.5054528713226318e-01 + <_> + + 0 -1 1625 -3.8815739098936319e-03 + + 8.0643877387046814e-02 -1.0520999878644943e-01 + <_> + + 0 -1 1626 -5.2500631660223007e-02 + + 3.8002520799636841e-01 -3.6049079149961472e-02 + <_> + + 0 -1 1627 -7.9602311598137021e-04 + + 3.3794969320297241e-02 -7.5603879988193512e-02 + <_> + + 0 -1 1628 -2.0066089928150177e-02 + + -4.3842989206314087e-01 3.3389199525117874e-02 + <_> + + 0 -1 1629 -2.4233239237219095e-03 + + -9.3005247414112091e-02 4.9772828817367554e-02 + <_> + + 0 -1 1630 -6.8737422116100788e-03 + + 2.0374830067157745e-01 -5.8165848255157471e-02 + <_> + + 0 -1 1631 6.5535600297152996e-03 + + -7.0293396711349487e-02 1.4400149881839752e-01 + <_> + + 0 -1 1632 -1.6780680045485497e-02 + + -3.2226520776748657e-01 4.3717250227928162e-02 + <_> + + 0 -1 1633 2.5448070839047432e-02 + + 4.3461918830871582e-02 -1.5376989543437958e-01 + <_> + + 0 -1 1634 3.4656568896025419e-03 + + -6.3119992613792419e-02 2.1394529938697815e-01 + <_> + + 0 -1 1635 1.0132250189781189e-01 + + -1.7095830291509628e-02 1.8853299319744110e-01 + <_> + + 0 -1 1636 1.0714309662580490e-01 + + 3.5406891256570816e-02 -3.4869039058685303e-01 + <_> + + 0 -1 1637 -1.4500999823212624e-02 + + 3.7903580814599991e-02 -4.9169208854436874e-02 + <_> + + 0 -1 1638 -1.5354759991168976e-01 + + 3.5048320889472961e-01 -3.2774008810520172e-02 + <_> + + 0 -1 1639 -6.5137587487697601e-02 + + -4.1380020976066589e-01 7.3137627914547920e-03 + <_> + + 0 -1 1640 -2.9204839374870062e-03 + + -1.3756680488586426e-01 9.0795390307903290e-02 + <_> + + 0 -1 1641 -3.4104570746421814e-01 + + -6.7252027988433838e-01 1.5200230292975903e-02 + <_> + + 0 -1 1642 -4.4478259951574728e-05 + + 9.6579946577548981e-02 -1.0403420031070709e-01 + <_> + + 0 -1 1643 -1.1172229796648026e-01 + + -4.2234420776367188e-01 4.9457307904958725e-03 + <_> + + 0 -1 1644 2.0429869182407856e-03 + + 9.9474698305130005e-02 -1.0384540259838104e-01 + <_> + + 0 -1 1645 -7.2571309283375740e-03 + + -1.5049630403518677e-01 2.9724840074777603e-02 + <_> + + 0 -1 1646 -8.4451176226139069e-03 + + 9.5648579299449921e-02 -1.1805369704961777e-01 + <_> + + 0 -1 1647 -3.0194969847798347e-02 + + 4.6570628881454468e-01 -1.4386899769306183e-02 + <_> + + 0 -1 1648 5.7423918042331934e-04 + + -1.0382310301065445e-01 1.5052829682826996e-01 + <_> + + 0 -1 1649 8.2014611689373851e-04 + + -7.5132526457309723e-02 1.0363759845495224e-01 + <_> + + 0 -1 1650 7.0748180150985718e-03 + + 6.6062167286872864e-02 -1.7638419568538666e-01 + <_> + + 0 -1 1651 4.8304669559001923e-02 + + -1.7767660319805145e-02 2.6820158958435059e-01 + <_> + + 0 -1 1652 7.9041812568902969e-03 + + 5.1522739231586456e-02 -2.0632369816303253e-01 + <_> + + 0 -1 1653 8.4705486893653870e-02 + + 7.2250380180776119e-03 -5.9514737129211426e-01 + <_> + + 0 -1 1654 3.9120440487749875e-04 + + -1.0663530230522156e-01 1.1103810369968414e-01 + <_> + + 0 -1 1655 1.5959320589900017e-02 + + -4.8573691397905350e-02 2.5832009315490723e-01 + <_> + + 0 -1 1656 -1.8649259582161903e-03 + + 1.1551269888877869e-01 -1.5048590302467346e-01 + <_> + + 0 -1 1657 1.2727979570627213e-02 + + 4.7930240631103516e-02 -3.0310231447219849e-01 + <_> + + 0 -1 1658 -1.5954229747876525e-03 + + -1.5537570416927338e-01 8.3214886486530304e-02 + <_> + + 0 -1 1659 2.0234890282154083e-01 + + 1.1625860352069139e-03 -1.0000209808349609e+00 + <_> + + 0 -1 1660 -3.9196871221065521e-02 + + 3.0884549021720886e-01 -4.4524021446704865e-02 + <_> + + 0 -1 1661 1.5810640528798103e-02 + + -1.5927329659461975e-02 1.0144449770450592e-01 + <_> + + 0 -1 1662 -2.1568681113421917e-03 + + 9.5205381512641907e-02 -1.2910960614681244e-01 + <_> + + 0 -1 1663 -3.4604359418153763e-02 + + 2.7843558788299561e-01 -1.0775060392916203e-02 + <_> + + 0 -1 1664 -2.6206790935248137e-03 + + -1.3744530081748962e-01 9.2945456504821777e-02 + <_> + + 0 -1 1665 4.6692821197211742e-03 + + -5.8331821113824844e-02 1.5733839571475983e-01 + <_> + + 0 -1 1666 7.8623533248901367e-02 + + 1.1130830273032188e-02 -9.7138148546218872e-01 + <_> + + 0 -1 1667 3.9556730538606644e-02 + + 2.1708509884774685e-03 -4.3425449728965759e-01 + <_> + + 0 -1 1668 4.0571438148617744e-03 + + 8.6120717227458954e-02 -1.5579399466514587e-01 + <_> + + 0 -1 1669 -1.5014669857919216e-02 + + 1.3523979485034943e-01 -2.5724019855260849e-02 + <_> + + 0 -1 1670 4.6183250378817320e-04 + + -1.0766889899969101e-01 1.3633869588375092e-01 + <_> + + 0 -1 1671 5.2875209599733353e-02 + + 5.4555749520659447e-03 -3.9382910728454590e-01 + <_> + + 0 -1 1672 -5.9510860592126846e-02 + + 2.8690820932388306e-01 -4.2876079678535461e-02 + <_> + + 0 -1 1673 1.6650360077619553e-02 + + 2.8605299070477486e-02 -3.0349490046501160e-01 + <_> + + 0 -1 1674 1.4959629625082016e-02 + + -5.2699029445648193e-02 2.1825259923934937e-01 + <_> + + 0 -1 1675 -9.6224267035722733e-03 + + -2.1431450545787811e-01 4.8350628465414047e-02 + <_> + + 0 -1 1676 -4.5304261147975922e-02 + + -8.7308478355407715e-01 1.2449770234525204e-02 + <_> + + 0 -1 1677 -7.4465242214500904e-03 + + -1.3586209714412689e-01 3.3087320625782013e-02 + <_> + + 0 -1 1678 -1.1953880311921239e-03 + + 1.4848570525646210e-01 -8.5291646420955658e-02 + <_> + + 0 -1 1679 5.6622507981956005e-03 + + -5.3212448954582214e-02 1.2967950105667114e-01 + <_> + + 0 -1 1680 1.3971360400319099e-02 + + 2.5338830426335335e-02 -4.2097410559654236e-01 + <_> + + 0 -1 1681 -4.5216218568384647e-03 + + 1.2621529400348663e-01 -6.3135430216789246e-02 + <_> + + 0 -1 1682 4.7776158899068832e-03 + + -6.2899917364120483e-02 1.7724449932575226e-01 + <_> + + 0 -1 1683 -5.8305878192186356e-03 + + 8.7906002998352051e-02 -1.5553380548954010e-01 + <_> + + 0 -1 1684 -1.5879280865192413e-02 + + -1.2694430351257324e-01 1.0280299931764603e-01 + <_> + + 0 -1 1685 1.9526369869709015e-03 + + -7.6803453266620636e-02 4.7297749668359756e-02 + <_> + + 0 -1 1686 2.4521650746464729e-02 + + -2.7714680880308151e-02 4.0350469946861267e-01 + <_> + + 0 -1 1687 -8.4529399871826172e-02 + + 1. -2.1367999725043774e-03 + <_> + + 0 -1 1688 1.6844070050865412e-03 + + 7.4043400585651398e-02 -1.6334819793701172e-01 + <_> + + 0 -1 1689 1.3399059884250164e-02 + + -4.2453180998563766e-02 2.4164129793643951e-01 + <_> + + 0 -1 1690 4.4182639569044113e-02 + + 1.8039569258689880e-02 -6.4396840333938599e-01 + <_> + + 0 -1 1691 3.8327239453792572e-02 + + 7.5849238783121109e-03 -3.6534211039543152e-01 + <_> + + 0 -1 1692 2.5997089687734842e-03 + + -8.8553480803966522e-02 1.3763660192489624e-01 + <_> + + 0 -1 1693 1.0775480419397354e-02 + + 4.5753169804811478e-02 -1.1956000328063965e-01 + <_> + + 0 -1 1694 -2.0433649420738220e-02 + + 2.2020170092582703e-01 -5.1925841718912125e-02 + <_> + + 0 -1 1695 -1.2402729690074921e-01 + + 8.8846582174301147e-01 -5.1234480924904346e-03 + <_> + + 0 -1 1696 4.7838478349149227e-03 + + 5.3047031164169312e-02 -2.1085900068283081e-01 + <_> + + 0 -1 1697 -4.5895349234342575e-02 + + 4.4482690095901489e-01 -1.5117119997739792e-02 + <_> + + 0 -1 1698 1.4473790302872658e-02 + + -4.5201409608125687e-02 2.3556250333786011e-01 + <_> + + 0 -1 1699 1.8887920305132866e-03 + + 7.6443381607532501e-02 -1.6385370492935181e-01 + <_> + + 0 -1 1700 -1.9082069396972656e-01 + + 6.4662021398544312e-01 -1.8242619931697845e-02 + <_> + + 0 -1 1701 7.2158463299274445e-02 + + 6.2836478464305401e-03 -7.4822348356246948e-01 + <_> + + 0 -1 1702 9.7802944947034121e-04 + + 7.9063102602958679e-02 -1.3163650035858154e-01 + <_> + + 0 -1 1703 4.8602250171825290e-04 + + -4.2594909667968750e-02 6.9462761282920837e-02 + <_> + + 0 -1 1704 -1.0882800444960594e-02 + + -2.4503070116043091e-01 5.2326161414384842e-02 + <_> + + 0 -1 1705 1.1573769734241068e-04 + + -6.6729307174682617e-02 8.7088912725448608e-02 + <_> + + 0 -1 1706 2.0960739348083735e-03 + + -7.6154567301273346e-02 1.3598169386386871e-01 + <_> + + 0 -1 1707 4.3664351105690002e-02 + + 8.4812156856060028e-03 -8.1097167730331421e-01 + <_> + + 0 -1 1708 -1.1464370181784034e-03 + + 1.2721230089664459e-01 -8.4783419966697693e-02 + <_> + + 0 -1 1709 -5.5613541044294834e-03 + + -1.9722530245780945e-01 5.4411068558692932e-02 + <_> + + 0 -1 1710 3.4083850681781769e-02 + + -3.2338548451662064e-02 3.4062281250953674e-01 + <_> + + 0 -1 1711 5.1227081567049026e-02 + + -1.3262039981782436e-02 2.3953630030155182e-01 + <_> + + 0 -1 1712 3.3531729131937027e-02 + + 2.0279919728636742e-02 -4.8339051008224487e-01 + <_> + + 0 -1 1713 1.5396219678223133e-02 + + -2.9320189729332924e-02 1.5866099298000336e-01 + <_> + + 0 -1 1714 -1.7550770193338394e-02 + + 2.7488970756530762e-01 -3.7798319011926651e-02 + <_> + + 0 -1 1715 -7.5705647468566895e-02 + + -8.2214397192001343e-01 3.8814740255475044e-03 + <_> + + 0 -1 1716 -5.3475350141525269e-03 + + -1.6710759699344635e-01 7.7180616557598114e-02 + <_> + + 0 -1 1717 -3.3435279037803411e-03 + + -1.0673490166664124e-01 4.7575470060110092e-02 + <_> + + 0 -1 1718 1.9328270107507706e-02 + + -4.6563290059566498e-02 2.4716560542583466e-01 + <_> + + 0 -1 1719 8.5368983447551727e-02 + + 2.3296920582652092e-02 -5.0002247095108032e-01 + <_> + + 0 -1 1720 2.5927850510925055e-03 + + -1.1182250082492828e-01 1.1046089977025986e-01 + <_> + + 0 -1 1721 -9.1061238199472427e-03 + + 4.7107011079788208e-02 -5.5807661265134811e-02 + <_> + + 0 -1 1722 1.0170699656009674e-01 + + -1.5966609120368958e-02 6.9857317209243774e-01 + <_> + + 0 -1 1723 2.2854980081319809e-02 + + -1.7226219177246094e-02 1.2225689738988876e-01 + <_> + + 0 -1 1724 -1.6577079892158508e-02 + + -2.2225829958915710e-01 5.6578300893306732e-02 + <_> + + 0 -1 1725 -2.3641420528292656e-02 + + -2.7734050154685974e-01 1.6076890751719475e-02 + <_> + + 0 -1 1726 5.6385230273008347e-03 + + 4.5439280569553375e-02 -2.2549630701541901e-01 + <_> + + 0 -1 1727 5.7422029785811901e-03 + + -7.8568778932094574e-02 1.5234960615634918e-01 + <_> + + 0 -1 1728 -4.3363519944250584e-04 + + 9.5920950174331665e-02 -1.1274240165948868e-01 + <_> + + 0 -1 1729 1.0267919860780239e-02 + + -4.9332991242408752e-02 2.4810829758644104e-01 + <_> + + 0 -1 1730 1.3865719549357891e-02 + + 7.0547938346862793e-02 -1.8594330549240112e-01 + <_> + 127 + -3.0620599746704102e+01 + + <_> + + 0 -1 1731 -4.6980630606412888e-02 + + 1.7078550159931183e-01 -1.5687310695648193e-01 + <_> + + 0 -1 1732 -1.1967960000038147e-01 + + 5.1738417148590088e-01 -1.1747590266168118e-02 + <_> + + 0 -1 1733 -2.8477180749177933e-02 + + 2.3505200445652008e-01 -5.7424411177635193e-02 + <_> + + 0 -1 1734 1.9697479903697968e-01 + + -9.3123828992247581e-04 1.0037239789962769e+00 + <_> + + 0 -1 1735 7.9039083793759346e-03 + + 8.3357498049736023e-02 -1.6527499258518219e-01 + <_> + + 0 -1 1736 3.9338979870080948e-02 + + -6.5605872077867389e-04 3.2361468672752380e-01 + <_> + + 0 -1 1737 -1.5762429684400558e-03 + + 9.1129466891288757e-02 -1.4164330065250397e-01 + <_> + + 0 -1 1738 2.0851049339398742e-04 + + -1.3802680373191833e-01 7.7212989330291748e-02 + <_> + + 0 -1 1739 -2.6843539671972394e-04 + + 1.3646720349788666e-01 -9.4255752861499786e-02 + <_> + + 0 -1 1740 8.8506387546658516e-03 + + 2.4603420868515968e-02 -1.6884680092334747e-01 + <_> + + 0 -1 1741 -8.4813922876492143e-04 + + -1.3972400128841400e-01 1.1566729843616486e-01 + <_> + + 0 -1 1742 -3.7090150726726279e-05 + + 7.5284272432327271e-02 -1.7708149552345276e-01 + <_> + + 0 -1 1743 -2.1533910185098648e-02 + + 2.0233030617237091e-01 -6.6978476941585541e-02 + <_> + + 0 -1 1744 1.1713660322129726e-02 + + 8.6853489279747009e-02 -1.1251810193061829e-01 + <_> + + 0 -1 1745 -9.8365638405084610e-03 + + 3.0164790153503418e-01 -5.0179660320281982e-02 + <_> + + 0 -1 1746 -6.2104999087750912e-03 + + 6.8224228918552399e-02 -9.4441823661327362e-02 + <_> + + 0 -1 1747 -2.0034300163388252e-02 + + -2.8657549619674683e-01 4.5728500932455063e-02 + <_> + + 0 -1 1748 -2.2154829639475793e-04 + + 7.1603760123252869e-02 -8.7115049362182617e-02 + <_> + + 0 -1 1749 -5.2436119876801968e-03 + + 1.3439500331878662e-01 -9.0288907289505005e-02 + <_> + + 0 -1 1750 -1.1711229570209980e-02 + + 1.4874699711799622e-01 -2.5951780378818512e-02 + <_> + + 0 -1 1751 5.8587929233908653e-03 + + -6.6982023417949677e-02 1.8096329271793365e-01 + <_> + + 0 -1 1752 1.0432569682598114e-01 + + 1.0209330357611179e-02 -7.9540812969207764e-01 + <_> + + 0 -1 1753 -1.7049130052328110e-02 + + -2.0516310632228851e-01 6.4470991492271423e-02 + <_> + + 0 -1 1754 2.5877699255943298e-02 + + -3.0079720541834831e-02 1.6041970252990723e-01 + <_> + + 0 -1 1755 -4.0637338533997536e-03 + + 1.0870960354804993e-01 -1.1665400117635727e-01 + <_> + + 0 -1 1756 -1.9286720082163811e-02 + + -1.2503950297832489e-01 2.8055189177393913e-02 + <_> + + 0 -1 1757 -7.2130301305151079e-06 + + 1.1845260113477707e-01 -1.2367019802331924e-01 + <_> + + 0 -1 1758 -2.6098350062966347e-03 + + -1.4498670399188995e-01 8.2318760454654694e-02 + <_> + + 0 -1 1759 3.2303779153153300e-04 + + -9.5855496823787689e-02 1.1992660164833069e-01 + <_> + + 0 -1 1760 -1.1308960383757949e-03 + + 1.2882959842681885e-01 -8.2697473466396332e-02 + <_> + + 0 -1 1761 1.7176469787955284e-02 + + 3.6024659872055054e-02 -3.0873811244964600e-01 + <_> + + 0 -1 1762 -1.0515330359339714e-02 + + 9.6330337226390839e-02 -1.0785780102014542e-01 + <_> + + 0 -1 1763 5.0583500415086746e-02 + + -3.4715801477432251e-02 4.5134508609771729e-01 + <_> + + 0 -1 1764 8.7582931155338883e-04 + + -9.5677152276039124e-02 7.3631688952445984e-02 + <_> + + 0 -1 1765 -3.1957220286130905e-02 + + -3.1473490595817566e-01 3.6329280585050583e-02 + <_> + + 0 -1 1766 5.9863331262022257e-04 + + -4.2676690965890884e-02 5.4342899471521378e-02 + <_> + + 0 -1 1767 -6.6270949319005013e-03 + + 7.3510922491550446e-02 -1.7309080064296722e-01 + <_> + + 0 -1 1768 -7.3186516761779785e-02 + + 6.8777692317962646e-01 -5.6781149469316006e-03 + <_> + + 0 -1 1769 2.0290840417146683e-02 + + -4.0720541030168533e-02 3.0450868606567383e-01 + <_> + + 0 -1 1770 -3.0989840161055326e-03 + + -1.2787370383739471e-01 5.4329689592123032e-02 + <_> + + 0 -1 1771 -1.1258859885856509e-03 + + 1.1980079859495163e-01 -8.3477236330509186e-02 + <_> + + 0 -1 1772 3.9993048994801939e-04 + + -9.5427073538303375e-02 7.6952911913394928e-02 + <_> + + 0 -1 1773 1.1202540248632431e-02 + + 2.5125309824943542e-02 -4.0314701199531555e-01 + <_> + + 0 -1 1774 -2.1753970533609390e-02 + + -2.3042400181293488e-01 1.5338519588112831e-02 + <_> + + 0 -1 1775 7.6912459917366505e-05 + + -9.5581486821174622e-02 1.0388170182704926e-01 + <_> + + 0 -1 1776 9.1011539101600647e-02 + + -8.7168300524353981e-03 7.5593751668930054e-01 + <_> + + 0 -1 1777 -4.3160789646208286e-03 + + 1.3494439423084259e-01 -7.0152096450328827e-02 + <_> + + 0 -1 1778 -5.0581190735101700e-02 + + -6.6112691164016724e-01 2.2676400840282440e-03 + <_> + + 0 -1 1779 -8.3926003426313400e-03 + + -1.2883609533309937e-01 7.7920481562614441e-02 + <_> + + 0 -1 1780 5.5040661245584488e-02 + + 7.7853789553046227e-03 -2.7820050716400146e-01 + <_> + + 0 -1 1781 -4.1862551122903824e-02 + + 4.3335449695587158e-01 -2.9194639995694160e-02 + <_> + + 0 -1 1782 -7.4230520986020565e-03 + + 1.3154500722885132e-01 -3.2047510147094727e-02 + <_> + + 0 -1 1783 1.9948489498347044e-03 + + 8.3299688994884491e-02 -1.1662559956312180e-01 + <_> + + 0 -1 1784 4.1851431131362915e-02 + + 4.1461169719696045e-02 -1.2815159559249878e-01 + <_> + + 0 -1 1785 2.7844381332397461e-01 + + -2.2612810134887695e-02 5.2236318588256836e-01 + <_> + + 0 -1 1786 -7.1095931343734264e-03 + + 1.2902510166168213e-01 -2.7944799512624741e-02 + <_> + + 0 -1 1787 1.1175610125064850e-02 + + 5.1366660743951797e-02 -1.9559539854526520e-01 + <_> + + 0 -1 1788 -1.0364210233092308e-02 + + -7.2631381452083588e-02 1.5199509263038635e-01 + <_> + + 0 -1 1789 -9.4094304367899895e-03 + + -2.0993369817733765e-01 5.3346861153841019e-02 + <_> + + 0 -1 1790 -1.0375010222196579e-01 + + -3.3693191409111023e-01 3.9442018605768681e-03 + <_> + + 0 -1 1791 -9.5977628370746970e-04 + + 1.0307610034942627e-01 -1.0574100166559219e-01 + <_> + + 0 -1 1792 -5.5816810578107834e-02 + + 2.6074001193046570e-01 -4.4885180890560150e-02 + <_> + + 0 -1 1793 -1.3430939614772797e-01 + + -8.1660747528076172e-01 1.5410860069096088e-02 + <_> + + 0 -1 1794 6.0456950217485428e-02 + + -3.0265029054135084e-03 -9.9991780519485474e-01 + <_> + + 0 -1 1795 2.4359079077839851e-02 + + 2.4191310629248619e-02 -4.6632158756256104e-01 + <_> + + 0 -1 1796 5.2735779434442520e-02 + + -2.4266760796308517e-02 2.1460479497909546e-01 + <_> + + 0 -1 1797 -5.5626039393246174e-03 + + 1.0879939794540405e-01 -1.2120909988880157e-01 + <_> + + 0 -1 1798 9.0855263173580170e-02 + + 1.0956900223391131e-04 -9.9975770711898804e-01 + <_> + + 0 -1 1799 -3.4681189805269241e-02 + + -4.5409980416297913e-01 2.3691149428486824e-02 + <_> + + 0 -1 1800 -2.9579090551123954e-05 + + 4.8031318932771683e-02 -4.9872968345880508e-02 + <_> + + 0 -1 1801 2.6277130469679832e-02 + + -2.9456760734319687e-02 3.3974370360374451e-01 + <_> + + 0 -1 1802 -4.6276021748781204e-02 + + 4.5496609807014465e-01 -1.0359579697251320e-02 + <_> + + 0 -1 1803 1.2048200005665421e-04 + + -1.0575199872255325e-01 1.0096730291843414e-01 + <_> + + 0 -1 1804 6.8154390901327133e-03 + + 2.8495609760284424e-02 -9.9765069782733917e-02 + <_> + + 0 -1 1805 1.6169620212167501e-03 + + -1.3256169855594635e-01 8.7828978896141052e-02 + <_> + + 0 -1 1806 1.4563379809260368e-02 + + -4.3079901486635208e-02 2.5113260746002197e-01 + <_> + + 0 -1 1807 2.0352909341454506e-02 + + 3.9463639259338379e-02 -3.2518970966339111e-01 + <_> + + 0 -1 1808 -2.0789269357919693e-02 + + 1.8993359804153442e-01 -2.1271999925374985e-02 + <_> + + 0 -1 1809 3.1780101358890533e-02 + + -2.3768220096826553e-02 4.3957829475402832e-01 + <_> + + 0 -1 1810 1.2459229677915573e-01 + + 6.5275398083031178e-03 -9.9991798400878906e-01 + <_> + + 0 -1 1811 -8.4007039666175842e-02 + + -3.5620281100273132e-01 2.8916560113430023e-02 + <_> + + 0 -1 1812 9.6772145479917526e-03 + + 6.4073942601680756e-02 -1.5482710301876068e-01 + <_> + + 0 -1 1813 1.0405039787292480e-01 + + -2.2652050480246544e-02 5.7623207569122314e-01 + <_> + + 0 -1 1814 4.0814410895109177e-02 + + -3.7368569523096085e-02 7.7298507094383240e-02 + <_> + + 0 -1 1815 -4.6916189789772034e-01 + + -7.7304631471633911e-01 1.3607080094516277e-02 + <_> + + 0 -1 1816 -1.3723419606685638e-01 + + -1. -1.7328710528090596e-03 + <_> + + 0 -1 1817 3.7569448351860046e-02 + + 3.1412709504365921e-02 -3.5512429475784302e-01 + <_> + + 0 -1 1818 -1.2645379640161991e-02 + + -7.1322880685329437e-02 4.1889548301696777e-02 + <_> + + 0 -1 1819 3.9933860301971436e-02 + + -3.3447001129388809e-02 3.5932940244674683e-01 + <_> + + 0 -1 1820 1.7207439988851547e-02 + + 2.6126530021429062e-02 -7.7634379267692566e-02 + <_> + + 0 -1 1821 5.9702228754758835e-02 + + -2.3717980831861496e-02 5.7321798801422119e-01 + <_> + + 0 -1 1822 7.9917803406715393e-02 + + -9.7547564655542374e-03 4.3467441201210022e-01 + <_> + + 0 -1 1823 1.1351720243692398e-01 + + -3.8921970874071121e-02 2.6120808720588684e-01 + <_> + + 0 -1 1824 4.8379451036453247e-01 + + 7.8452667221426964e-03 -6.5024161338806152e-01 + <_> + + 0 -1 1825 -1.0045070201158524e-01 + + -8.0072021484375000e-01 1.2250199913978577e-02 + <_> + + 0 -1 1826 2.7176019549369812e-01 + + 4.4636582024395466e-03 -6.9393122196197510e-01 + <_> + + 0 -1 1827 -1.2301249802112579e-01 + + 3.2483839988708496e-01 -3.3841550350189209e-02 + <_> + + 0 -1 1828 6.1188749969005585e-02 + + 7.1536018513143063e-03 -7.7817517518997192e-01 + <_> + + 0 -1 1829 -7.8828241676092148e-03 + + -1.9754239916801453e-01 6.7795433104038239e-02 + <_> + + 0 -1 1830 -2.5584879517555237e-01 + + -1. 1.4300020411610603e-03 + <_> + + 0 -1 1831 1.3098469376564026e-01 + + -1.6668310388922691e-02 7.4547207355499268e-01 + <_> + + 0 -1 1832 -8.4553077816963196e-02 + + -6.3423901796340942e-01 8.3142798393964767e-03 + <_> + + 0 -1 1833 -8.8297717273235321e-02 + + -8.5705971717834473e-01 1.0549940168857574e-02 + <_> + + 0 -1 1834 -1.0374879837036133e-01 + + 1.2073180079460144e-01 -2.2488579154014587e-02 + <_> + + 0 -1 1835 1.4872249448671937e-03 + + -1.1096440255641937e-01 1.0405410081148148e-01 + <_> + + 0 -1 1836 2.1364030241966248e-01 + + 7.3841079138219357e-03 -4.9760338664054871e-01 + <_> + + 0 -1 1837 2.6294309645891190e-02 + + -6.3212700188159943e-02 2.6284760236740112e-01 + <_> + + 0 -1 1838 -2.6777000166475773e-03 + + 5.6488350033760071e-02 -1.0174310207366943e-01 + <_> + + 0 -1 1839 -2.1261540241539478e-03 + + -1.6442880034446716e-01 6.6159963607788086e-02 + <_> + + 0 -1 1840 -8.2200914621353149e-03 + + -1.6132779419422150e-01 8.3515472710132599e-02 + <_> + + 0 -1 1841 -1.1701880022883415e-02 + + 2.1516199409961700e-01 -5.9116050601005554e-02 + <_> + + 0 -1 1842 -7.0460740244016051e-04 + + 9.6142299473285675e-02 -1.3008759915828705e-01 + <_> + + 0 -1 1843 -1.9671309273689985e-03 + + 1.2605039775371552e-01 -8.8542640209197998e-02 + <_> + + 0 -1 1844 -9.5004076138138771e-03 + + -2.3604579269886017e-01 4.5922629535198212e-02 + <_> + + 0 -1 1845 2.6802370324730873e-02 + + -4.8966769129037857e-02 2.3887130618095398e-01 + <_> + + 0 -1 1846 2.2177420556545258e-02 + + -1.2560590170323849e-02 2.7084270119667053e-01 + <_> + + 0 -1 1847 9.3382880091667175e-02 + + 3.3835850656032562e-02 -3.9707890152931213e-01 + <_> + + 0 -1 1848 -1.3151080347597599e-02 + + -1.1364260315895081e-01 2.5930739939212799e-02 + <_> + + 0 -1 1849 2.6929581072181463e-03 + + 6.8202346563339233e-02 -1.6290910542011261e-01 + <_> + + 0 -1 1850 -5.7519129477441311e-03 + + 1.3197720050811768e-01 -5.7711899280548096e-02 + <_> + + 0 -1 1851 -1.1071159970015287e-03 + + 1.4550089836120605e-01 -7.7300041913986206e-02 + <_> + + 0 -1 1852 3.1805180013179779e-02 + + 1.4181279577314854e-02 -2.1803429722785950e-01 + <_> + + 0 -1 1853 4.0729498863220215e-01 + + -1.3772940263152122e-02 7.4853348731994629e-01 + <_> + + 0 -1 1854 7.0173077285289764e-02 + + 1.1535810306668282e-02 -8.6094629764556885e-01 + <_> + + 0 -1 1855 -1.9437450100667775e-04 + + 6.3009992241859436e-02 -1.5111440420150757e-01 + <_> + + 0 -1 1856 3.9425559341907501e-02 + + 2.4115329608321190e-02 -4.7253820300102234e-01 + <_> + + 0 -1 1857 2.6128459721803665e-03 + + 5.3963150829076767e-02 -1.7429760098457336e-01 + <_> + 152 + -3.0691600799560547e+01 + + <_> + + 0 -1 1858 1.0468430072069168e-01 + + -4.7570109367370605e-02 4.2454048991203308e-01 + <_> + + 0 -1 1859 -4.2946420609951019e-02 + + 1.6328890621662140e-01 -1.2655169703066349e-02 + <_> + + 0 -1 1860 -8.1577729433774948e-03 + + 1.0235799849033356e-01 -1.0876630246639252e-01 + <_> + + 0 -1 1861 2.1813691128045321e-03 + + 8.7985247373580933e-02 -5.5899761617183685e-02 + <_> + + 0 -1 1862 -6.5157511271536350e-03 + + 8.2863852381706238e-02 -1.3736319541931152e-01 + <_> + + 0 -1 1863 2.4716500192880630e-02 + + 1.6755210235714912e-02 1.3371250033378601e-01 + <_> + + 0 -1 1864 -5.9396267170086503e-04 + + -1.3771370053291321e-01 1.0501290112733841e-01 + <_> + + 0 -1 1865 2.9373820871114731e-02 + + -4.4581398367881775e-02 4.2731860280036926e-01 + <_> + + 0 -1 1866 -1.6576919704675674e-02 + + -2.9827460646629333e-01 2.9718369245529175e-02 + <_> + + 0 -1 1867 9.4569493085145950e-03 + + 5.3616948425769806e-02 -7.6675526797771454e-02 + <_> + + 0 -1 1868 7.4581913650035858e-02 + + -4.6554408967494965e-02 3.0179610848426819e-01 + <_> + + 0 -1 1869 -3.8055621087551117e-02 + + -2.8255119919776917e-01 2.0355690270662308e-02 + <_> + + 0 -1 1870 1.1065539903938770e-02 + + -5.3942598402500153e-02 2.3132629692554474e-01 + <_> + + 0 -1 1871 1.3538219965994358e-02 + + 2.8102980926632881e-02 -2.1802890300750732e-01 + <_> + + 0 -1 1872 4.6914750710129738e-03 + + 6.3617020845413208e-02 -1.7460820078849792e-01 + <_> + + 0 -1 1873 4.3054440617561340e-01 + + -2.1062379702925682e-02 5.7197797298431396e-01 + <_> + + 0 -1 1874 1.4298999449238181e-03 + + -1.6780039668083191e-01 7.6851062476634979e-02 + <_> + + 0 -1 1875 2.7855230495333672e-02 + + -3.5647969692945480e-02 2.8956910967826843e-01 + <_> + + 0 -1 1876 1.4391670003533363e-02 + + 8.3300426602363586e-02 -1.2951320409774780e-01 + <_> + + 0 -1 1877 -7.7637381851673126e-02 + + -1. 8.1426621181890368e-04 + <_> + + 0 -1 1878 1.6051199287176132e-02 + + -5.4008588194847107e-02 2.1967799961566925e-01 + <_> + + 0 -1 1879 -7.0988729596138000e-02 + + 6.1602139472961426e-01 -1.6476400196552277e-02 + <_> + + 0 -1 1880 -5.8310989290475845e-02 + + -9.5955359935760498e-01 1.2517100200057030e-02 + <_> + + 0 -1 1881 -7.9547446221113205e-03 + + -9.3684002757072449e-02 3.3896960318088531e-02 + <_> + + 0 -1 1882 -4.9685798585414886e-02 + + 3.1466799974441528e-01 -2.9716050252318382e-02 + <_> + + 0 -1 1883 9.7751528024673462e-02 + + 7.5905729318037629e-04 -6.7009872198104858e-01 + <_> + + 0 -1 1884 7.5908802449703217e-02 + + 1.6073329374194145e-02 -6.6251361370086670e-01 + <_> + + 0 -1 1885 1.3333460083231330e-03 + + 5.2241399884223938e-02 -1.8808710575103760e-01 + <_> + + 0 -1 1886 6.9728610105812550e-04 + + -8.9044801890850067e-02 1.6642339527606964e-01 + <_> + + 0 -1 1887 2.0889509469270706e-02 + + 2.1368719637393951e-02 -1.6083440184593201e-01 + <_> + + 0 -1 1888 -1.7649700166657567e-03 + + 1.2398529797792435e-01 -8.5922397673130035e-02 + <_> + + 0 -1 1889 2.7779850643128157e-03 + + -4.4366151094436646e-02 2.9322549700737000e-02 + <_> + + 0 -1 1890 7.9974532127380371e-04 + + -1.2351520359516144e-01 8.8818296790122986e-02 + <_> + + 0 -1 1891 7.0215959567576647e-04 + + -8.0154180526733398e-02 1.4544290304183960e-01 + <_> + + 0 -1 1892 -4.0604420006275177e-02 + + -3.6047580838203430e-01 3.4314859658479691e-02 + <_> + + 0 -1 1893 -4.1686851531267166e-02 + + -2.0927760004997253e-01 8.5808392614126205e-03 + <_> + + 0 -1 1894 -4.6390198171138763e-02 + + 5.3768527507781982e-01 -2.2632500156760216e-02 + <_> + + 0 -1 1895 -1.5822030603885651e-01 + + -1. 1.4312319690361619e-03 + <_> + + 0 -1 1896 -7.5683370232582092e-02 + + -8.0503028631210327e-01 1.2843839824199677e-02 + <_> + + 0 -1 1897 -5.7808328419923782e-02 + + 3.8675680756568909e-01 -1.2630320154130459e-02 + <_> + + 0 -1 1898 -4.5112581574358046e-05 + + 7.4958987534046173e-02 -1.3433749973773956e-01 + <_> + + 0 -1 1899 3.9205480366945267e-02 + + 2.1980579942464828e-02 -4.5748621225357056e-01 + <_> + + 0 -1 1900 4.4945240020751953e-02 + + -2.3763459175825119e-02 4.8715281486511230e-01 + <_> + + 0 -1 1901 -5.7849191129207611e-02 + + 3.5563638806343079e-01 -6.2380530871450901e-03 + <_> + + 0 -1 1902 -1.0397239774465561e-01 + + -6.2262791395187378e-01 1.5022880397737026e-02 + <_> + + 0 -1 1903 -2.5238281488418579e-01 + + -5.9059482812881470e-01 -1.9238379900343716e-04 + <_> + + 0 -1 1904 1.9675880670547485e-01 + + 1.2625159695744514e-02 -7.2753208875656128e-01 + <_> + + 0 -1 1905 3.7412419915199280e-02 + + -2.3478340357542038e-02 1.2147639691829681e-01 + <_> + + 0 -1 1906 -8.0470675602555275e-03 + + -1.8167789280414581e-01 4.9743499606847763e-02 + <_> + + 0 -1 1907 4.1297491639852524e-02 + + 1.0259049944579601e-02 -1.4679500460624695e-01 + <_> + + 0 -1 1908 -5.0735730677843094e-02 + + 2.2679640352725983e-01 -4.9807049334049225e-02 + <_> + + 0 -1 1909 -3.6145109334029257e-04 + + 4.1798278689384460e-02 -7.0410832762718201e-02 + <_> + + 0 -1 1910 -1.2359450012445450e-01 + + 5.8283501863479614e-01 -1.6822429373860359e-02 + <_> + + 0 -1 1911 5.7071618735790253e-02 + + -4.0532071143388748e-02 1.7078270018100739e-01 + <_> + + 0 -1 1912 5.8561540208756924e-03 + + -1.3827900588512421e-01 8.2565233111381531e-02 + <_> + + 0 -1 1913 -1.1472850292921066e-01 + + -4.6754041314125061e-01 3.4348990302532911e-03 + <_> + + 0 -1 1914 2.0518699660897255e-02 + + 8.1507943570613861e-02 -1.6894109547138214e-01 + <_> + + 0 -1 1915 5.4629769176244736e-02 + + -7.4763749726116657e-03 2.3640379309654236e-01 + <_> + + 0 -1 1916 -6.9312967360019684e-02 + + 3.0071571469306946e-01 -3.4785300493240356e-02 + <_> + + 0 -1 1917 -7.4176848866045475e-03 + + -2.8766560554504395e-01 4.7531820833683014e-02 + <_> + + 0 -1 1918 1.0223260149359703e-02 + + -3.0834799632430077e-02 3.9249539375305176e-01 + <_> + + 0 -1 1919 -2.7346659451723099e-02 + + -1.5695489943027496e-01 1.3967529870569706e-02 + <_> + + 0 -1 1920 3.3875100314617157e-02 + + 2.6063309982419014e-02 -3.9006409049034119e-01 + <_> + + 0 -1 1921 4.5174721628427505e-02 + + 8.9199207723140717e-03 -5.6769150495529175e-01 + <_> + + 0 -1 1922 1.1488229967653751e-02 + + -4.5491419732570648e-02 2.5109928846359253e-01 + <_> + + 0 -1 1923 -1.0496149770915508e-02 + + 6.4895443618297577e-02 -1.0623539984226227e-01 + <_> + + 0 -1 1924 6.0881208628416061e-03 + + 8.0929182469844818e-02 -1.4776149392127991e-01 + <_> + + 0 -1 1925 -2.6524660643190145e-03 + + 1.2062519788742065e-01 -7.2674863040447235e-02 + <_> + + 0 -1 1926 2.3559860419481993e-03 + + -8.1811271607875824e-02 1.4126540720462799e-01 + <_> + + 0 -1 1927 -2.6777219772338867e-01 + + -7.8083831071853638e-01 4.4526048004627228e-03 + <_> + + 0 -1 1928 1.5965799987316132e-01 + + 2.8381649404764175e-02 -3.8967838883399963e-01 + <_> + + 0 -1 1929 5.1899369806051254e-02 + + -3.4305319190025330e-02 1.5921010076999664e-01 + <_> + + 0 -1 1930 -1.3652780326083302e-03 + + -1.3755479454994202e-01 7.2719998657703400e-02 + <_> + + 0 -1 1931 2.2497299313545227e-01 + + -4.8017292283475399e-03 9.9994850158691406e-01 + <_> + + 0 -1 1932 3.1434150878340006e-03 + + 5.5151570588350296e-02 -1.6643160581588745e-01 + <_> + + 0 -1 1933 -6.2940339557826519e-03 + + 6.2896028161048889e-02 -6.0436379164457321e-02 + <_> + + 0 -1 1934 5.1301911473274231e-02 + + -3.1671810895204544e-02 3.8534939289093018e-01 + <_> + + 0 -1 1935 -6.6980808973312378e-02 + + -1.0925900191068649e-01 8.9958757162094116e-03 + <_> + + 0 -1 1936 5.1464758813381195e-02 + + 2.6210019364953041e-02 -4.2159339785575867e-01 + <_> + + 0 -1 1937 -9.0982139110565186e-02 + + 3.2760378718376160e-01 -7.8134387731552124e-03 + <_> + + 0 -1 1938 5.2848970517516136e-03 + + -7.9399570822715759e-02 1.4998179674148560e-01 + <_> + + 0 -1 1939 -1.5017699915915728e-03 + + 9.7703106701374054e-02 -7.3532037436962128e-02 + <_> + + 0 -1 1940 -2.5415199343115091e-03 + + 6.7801132798194885e-02 -1.4883249998092651e-01 + <_> + + 0 -1 1941 4.4252820312976837e-02 + + 1.6475830227136612e-02 -2.2880180180072784e-01 + <_> + + 0 -1 1942 -3.3457159996032715e-02 + + 4.1966789960861206e-01 -3.2553531229496002e-02 + <_> + + 0 -1 1943 1.3529899716377258e-01 + + 9.0894084423780441e-03 -7.3839122056961060e-01 + <_> + + 0 -1 1944 -3.7440970540046692e-02 + + -4.2613020539283752e-01 2.3972390219569206e-02 + <_> + + 0 -1 1945 -1.4479730452876538e-05 + + 5.6783780455589294e-02 -1.5888829529285431e-01 + <_> + + 0 -1 1946 -1.1839280277490616e-01 + + 5.0500631332397461e-01 -2.1859649568796158e-02 + <_> + + 0 -1 1947 -8.5000684484839439e-03 + + 5.2339930087327957e-02 -4.5925021171569824e-02 + <_> + + 0 -1 1948 -1.4189509674906731e-02 + + -2.3597060143947601e-01 4.0358349680900574e-02 + <_> + + 0 -1 1949 7.3599420487880707e-02 + + 3.2680039294064045e-03 -5.8853602409362793e-01 + <_> + + 0 -1 1950 5.4971270263195038e-02 + + -2.0196519792079926e-02 5.5482727289199829e-01 + <_> + + 0 -1 1951 -2.2816160693764687e-02 + + -1.7589579522609711e-01 1.7851740121841431e-02 + <_> + + 0 -1 1952 2.3204670287668705e-03 + + -8.1749923527240753e-02 1.2833079695701599e-01 + <_> + + 0 -1 1953 -1.0797909647226334e-01 + + -1. 1.7423679819330573e-03 + <_> + + 0 -1 1954 -4.1111931204795837e-02 + + 5.8432698249816895e-01 -1.8878869712352753e-02 + <_> + + 0 -1 1955 -3.5695650149136782e-03 + + -1.7558470368385315e-01 6.4731426537036896e-02 + <_> + + 0 -1 1956 -6.6358670592308044e-02 + + -1. 9.2067662626504898e-03 + <_> + + 0 -1 1957 -1.8944580107927322e-02 + + 2.5783088803291321e-01 -1.8944939598441124e-02 + <_> + + 0 -1 1958 -1.2871269881725311e-01 + + -5.8477258682250977e-01 1.4466489665210247e-02 + <_> + + 0 -1 1959 2.4218629114329815e-03 + + -7.3590897023677826e-02 7.0332102477550507e-02 + <_> + + 0 -1 1960 2.9718460515141487e-02 + + -2.3011969402432442e-02 4.0542769432067871e-01 + <_> + + 0 -1 1961 1.7555029690265656e-01 + + 2.0808730274438858e-02 -3.7285649776458740e-01 + <_> + + 0 -1 1962 3.7122450768947601e-02 + + -2.7959629893302917e-02 3.5908779501914978e-01 + <_> + + 0 -1 1963 -3.8044541142880917e-03 + + -1.3337990641593933e-01 9.2061348259449005e-02 + <_> + + 0 -1 1964 -1.0930700227618217e-02 + + 2.3196309804916382e-01 -4.4535879045724869e-02 + <_> + + 0 -1 1965 1.6103629767894745e-01 + + -8.7691349908709526e-03 2.2045169770717621e-01 + <_> + + 0 -1 1966 2.5971230119466782e-02 + + 6.4421012997627258e-02 -1.8919080495834351e-01 + <_> + + 0 -1 1967 1.2638209760189056e-01 + + -1.0362179949879646e-02 1.7057189345359802e-01 + <_> + + 0 -1 1968 -9.1393403708934784e-03 + + -1.3828249275684357e-01 8.6790062487125397e-02 + <_> + + 0 -1 1969 1.7722090706229210e-02 + + 3.9719890803098679e-02 -1.2294259667396545e-01 + <_> + + 0 -1 1970 -8.2425750792026520e-02 + + 3.0023100972175598e-01 -3.3165920525789261e-02 + <_> + + 0 -1 1971 4.3892528861761093e-02 + + -1.3056339696049690e-02 9.8728686571121216e-02 + <_> + + 0 -1 1972 3.5575369838625193e-03 + + 1.1186280101537704e-01 -9.2797823250293732e-02 + <_> + + 0 -1 1973 -1.5298820100724697e-02 + + -1.3007879257202148e-01 2.3159010335803032e-02 + <_> + + 0 -1 1974 -2.6504450943320990e-03 + + 1.3526280224323273e-01 -7.3355458676815033e-02 + <_> + + 0 -1 1975 4.1636861860752106e-02 + + -1.9068980589509010e-02 3.5857999324798584e-01 + <_> + + 0 -1 1976 -7.5290258973836899e-03 + + -1.8672360479831696e-01 5.8248449116945267e-02 + <_> + + 0 -1 1977 -4.0031488984823227e-02 + + 2.2969779372215271e-01 -1.4608230441808701e-02 + <_> + + 0 -1 1978 -1.3624709844589233e-01 + + -8.7086462974548340e-01 1.1211199685931206e-02 + <_> + + 0 -1 1979 4.5124008320271969e-03 + + -3.5644959658384323e-02 1.0103099793195724e-01 + <_> + + 0 -1 1980 5.4118070751428604e-02 + + -1.4689410105347633e-02 6.7652267217636108e-01 + <_> + + 0 -1 1981 -3.4553959965705872e-02 + + 2.1854560077190399e-01 -9.7846649587154388e-03 + <_> + + 0 -1 1982 -2.5520840659737587e-02 + + -4.6898001432418823e-01 2.4060370400547981e-02 + <_> + + 0 -1 1983 -3.5473700612783432e-02 + + 1.3427549600601196e-01 -2.1438699215650558e-02 + <_> + + 0 -1 1984 2.8683411073870957e-04 + + -9.7300283610820770e-02 1.0760939866304398e-01 + <_> + + 0 -1 1985 -7.8717589378356934e-02 + + -1. 2.7187850791960955e-03 + <_> + + 0 -1 1986 -1.5701749362051487e-04 + + 1.1199659854173660e-01 -9.9441379308700562e-02 + <_> + + 0 -1 1987 1.6026569530367851e-02 + + 3.4198261797428131e-02 -1.9100490212440491e-01 + <_> + + 0 -1 1988 -1.9164729863405228e-02 + + 8.9024826884269714e-02 -1.1919700354337692e-01 + <_> + + 0 -1 1989 -3.9445150643587112e-02 + + -1.0717990249395370e-01 3.7615209817886353e-02 + <_> + + 0 -1 1990 2.2417430300265551e-03 + + -9.0581007301807404e-02 1.7547470331192017e-01 + <_> + + 0 -1 1991 -3.8842540234327316e-03 + + 9.2697329819202423e-02 -4.2431369423866272e-02 + <_> + + 0 -1 1992 -2.1914629265666008e-02 + + -2.8017508983612061e-01 3.7537671625614166e-02 + <_> + + 0 -1 1993 -3.7512119859457016e-02 + + 3.6218520998954773e-01 -1.7507450655102730e-02 + <_> + + 0 -1 1994 -8.4374047582969069e-04 + + 1.2348400056362152e-01 -8.0245867371559143e-02 + <_> + + 0 -1 1995 -2.6424999814480543e-03 + + 5.2565738558769226e-02 -8.3335436880588531e-02 + <_> + + 0 -1 1996 -9.2836812138557434e-02 + + -4.2060381174087524e-01 2.3360429331660271e-02 + <_> + + 0 -1 1997 8.2463070750236511e-02 + + -2.9815400484949350e-03 7.8999197483062744e-01 + <_> + + 0 -1 1998 -6.9864951074123383e-02 + + 7.3802971839904785e-01 -1.4021299779415131e-02 + <_> + + 0 -1 1999 4.5439340174198151e-02 + + -1.1321160010993481e-02 1.9973699748516083e-01 + <_> + + 0 -1 2000 -5.0297789275646210e-02 + + 6.0764670372009277e-01 -1.7632890492677689e-02 + <_> + + 0 -1 2001 6.0456149280071259e-02 + + -5.9354598633944988e-03 3.1622889637947083e-01 + <_> + + 0 -1 2002 -4.6769347973167896e-03 + + -1.8090610206127167e-01 5.9660188853740692e-02 + <_> + + 0 -1 2003 3.6530068609863520e-04 + + -9.1220043599605560e-02 1.1092729866504669e-01 + <_> + + 0 -1 2004 -1.9491260871291161e-02 + + -3.7075570225715637e-01 2.8416309505701065e-02 + <_> + + 0 -1 2005 2.0056450739502907e-02 + + -5.8159679174423218e-02 7.8105233609676361e-02 + <_> + + 0 -1 2006 -3.9371181279420853e-02 + + 2.9012489318847656e-01 -4.1875660419464111e-02 + <_> + + 0 -1 2007 2.1523650735616684e-02 + + 1.6573080793023109e-02 -2.3614850640296936e-01 + <_> + + 0 -1 2008 -3.1294699292629957e-03 + + -1.6466400027275085e-01 6.2233809381723404e-02 + <_> + + 0 -1 2009 2.8589619323611259e-03 + + -3.8098409771919250e-02 5.5751629173755646e-02 + <_> + 135 + -3.0609300613403320e+01 + + <_> + + 0 -1 2010 -2.0576130598783493e-02 + + 1.7351129651069641e-01 -1.5058030188083649e-01 + <_> + + 0 -1 2011 1.6125949099659920e-02 + + -4.1612371802330017e-02 2.3984450101852417e-01 + <_> + + 0 -1 2012 -1.2352580204606056e-02 + + 9.7780853509902954e-02 -1.2391830235719681e-01 + <_> + + 0 -1 2013 -5.7473899796605110e-03 + + 7.7615208923816681e-02 -9.6236728131771088e-02 + <_> + + 0 -1 2014 2.9579061083495617e-03 + + -6.7683719098567963e-02 2.6594209671020508e-01 + <_> + + 0 -1 2015 -8.3472225815057755e-03 + + -1.1188179999589920e-01 1.3736370205879211e-01 + <_> + + 0 -1 2016 -5.8408780023455620e-04 + + 4.5943111181259155e-02 -1.6486530005931854e-01 + <_> + + 0 -1 2017 -3.5136839142069221e-04 + + 9.7791008651256561e-02 -6.4357861876487732e-02 + <_> + + 0 -1 2018 8.4126877482049167e-05 + + -1.3847629725933075e-01 8.8727742433547974e-02 + <_> + + 0 -1 2019 -2.6592490077018738e-01 + + -6.7525398731231689e-01 1.6188669949769974e-02 + <_> + + 0 -1 2020 4.3727741576731205e-03 + + 7.2884798049926758e-02 -1.2560360133647919e-01 + <_> + + 0 -1 2021 -2.2660531103610992e-03 + + 8.7269246578216553e-02 -6.8355433642864227e-02 + <_> + + 0 -1 2022 -6.5290732309222221e-03 + + -1.2197560071945190e-01 8.0927930772304535e-02 + <_> + + 0 -1 2023 9.6436247229576111e-02 + + -8.2637304440140724e-03 4.9127399921417236e-01 + <_> + + 0 -1 2024 -4.3594818562269211e-02 + + 4.5575308799743652e-01 -2.5600390508770943e-02 + <_> + + 0 -1 2025 -2.1098319441080093e-02 + + -1.1892750114202499e-01 2.3539589717984200e-02 + <_> + + 0 -1 2026 -2.5200019590556622e-03 + + 1.2724469602108002e-01 -9.0751722455024719e-02 + <_> + + 0 -1 2027 -8.9241685345768929e-03 + + -1.1514320224523544e-01 4.3497029691934586e-02 + <_> + + 0 -1 2028 3.4590170253068209e-03 + + 6.3537172973155975e-02 -1.8261429667472839e-01 + <_> + + 0 -1 2029 -3.6076800897717476e-03 + + 1.2005910277366638e-01 -5.2449110895395279e-02 + <_> + + 0 -1 2030 5.3778890520334244e-02 + + -1.8675789237022400e-02 5.2313017845153809e-01 + <_> + + 0 -1 2031 4.5245189219713211e-02 + + -1.7504919320344925e-02 2.1871849894523621e-01 + <_> + + 0 -1 2032 1.3272929936647415e-03 + + 7.8659959137439728e-02 -1.3551670312881470e-01 + <_> + + 0 -1 2033 1.2393640354275703e-02 + + 2.8952300548553467e-02 -7.2149537503719330e-02 + <_> + + 0 -1 2034 -3.7702780216932297e-02 + + 4.1850051283836365e-01 -3.0355349183082581e-02 + <_> + + 0 -1 2035 -4.8910409212112427e-02 + + 3.7365001440048218e-01 -5.6771109811961651e-03 + <_> + + 0 -1 2036 -5.9961699880659580e-03 + + -2.0756420493125916e-01 7.0438846945762634e-02 + <_> + + 0 -1 2037 5.6631930172443390e-02 + + -1.7292939126491547e-02 2.5498399138450623e-01 + <_> + + 0 -1 2038 3.1650230288505554e-02 + + -2.0658250898122787e-02 4.8398271203041077e-01 + <_> + + 0 -1 2039 -2.1152989938855171e-02 + + 2.0028789341449738e-01 -2.4872610345482826e-02 + <_> + + 0 -1 2040 8.7676532566547394e-02 + + -2.4999700486660004e-02 4.1126599907875061e-01 + <_> + + 0 -1 2041 5.3299881517887115e-02 + + -8.6766229942440987e-03 3.7446591258049011e-01 + <_> + + 0 -1 2042 -2.6251509552821517e-04 + + 9.9231846630573273e-02 -1.1989200115203857e-01 + <_> + + 0 -1 2043 -8.5897604003548622e-03 + + -1.8593010306358337e-01 3.4370779991149902e-02 + <_> + + 0 -1 2044 1.6940470784902573e-02 + + -3.4768261015415192e-02 2.7288261055946350e-01 + <_> + + 0 -1 2045 5.0596110522747040e-02 + + 3.6170349922031164e-03 -3.9460760354995728e-01 + <_> + + 0 -1 2046 -8.3048436790704727e-03 + + 9.8577797412872314e-02 -1.1666280031204224e-01 + <_> + + 0 -1 2047 1.0586270131170750e-02 + + 3.9117150008678436e-02 -8.5843667387962341e-02 + <_> + + 0 -1 2048 -3.2558601349592209e-02 + + -3.7352150678634644e-01 2.5410100817680359e-02 + <_> + + 0 -1 2049 -3.2352130860090256e-02 + + 2.6129978895187378e-01 -2.8631040826439857e-02 + <_> + + 0 -1 2050 2.5547049939632416e-02 + + 3.3884890377521515e-02 -3.0452328920364380e-01 + <_> + + 0 -1 2051 4.2252440005540848e-02 + + 8.9510334655642509e-03 -2.4091260135173798e-01 + <_> + + 0 -1 2052 3.8109479937702417e-03 + + -7.2638936340808868e-02 1.4634390175342560e-01 + <_> + + 0 -1 2053 2.0821709185838699e-02 + + -3.6271940916776657e-02 1.8324719369411469e-01 + <_> + + 0 -1 2054 2.6497790589928627e-02 + + 2.8160110116004944e-02 -3.9517199993133545e-01 + <_> + + 0 -1 2055 2.0283530652523041e-01 + + -9.3782292678952217e-03 4.4868949055671692e-01 + <_> + + 0 -1 2056 -1.7996610701084137e-01 + + -7.9595959186553955e-01 1.2027840130031109e-02 + <_> + + 0 -1 2057 -7.0968091487884521e-02 + + -7.6951277256011963e-01 1.0918079642578959e-03 + <_> + + 0 -1 2058 2.7555041015148163e-03 + + 7.0150263607501984e-02 -1.2915180623531342e-01 + <_> + + 0 -1 2059 -7.7004402875900269e-02 + + -4.9155071377754211e-01 2.8067480307072401e-03 + <_> + + 0 -1 2060 -2.0257910713553429e-02 + + 2.3568239808082581e-01 -4.3432798236608505e-02 + <_> + + 0 -1 2061 -8.6421817541122437e-02 + + -3.4541681408882141e-01 1.1248850263655186e-02 + <_> + + 0 -1 2062 -6.7245952785015106e-02 + + -6.8752902746200562e-01 1.1868669651448727e-02 + <_> + + 0 -1 2063 -1.2990389764308929e-01 + + -7.9069268703460693e-01 2.5537670589983463e-03 + <_> + + 0 -1 2064 -3.0394670367240906e-01 + + -8.9989352226257324e-01 8.1501724198460579e-03 + <_> + + 0 -1 2065 -4.1988548636436462e-01 + + -7.7303320169448853e-01 1.3665149454027414e-03 + <_> + + 0 -1 2066 -1.6851289570331573e-01 + + 2.4319399893283844e-01 -4.1280739009380341e-02 + <_> + + 0 -1 2067 2.8788880445063114e-03 + + 2.0577169954776764e-02 -1.8590900301933289e-01 + <_> + + 0 -1 2068 -4.0223840624094009e-02 + + 4.3099269270896912e-01 -2.3104710504412651e-02 + <_> + + 0 -1 2069 3.9687040261924267e-03 + + 4.3601520359516144e-02 -9.2233568429946899e-02 + <_> + + 0 -1 2070 -2.7650719508528709e-02 + + -6.1707872152328491e-01 1.4680569991469383e-02 + <_> + + 0 -1 2071 -2.3034301120787859e-03 + + 9.0349592268466949e-02 -6.1664551496505737e-02 + <_> + + 0 -1 2072 -2.9040789231657982e-02 + + 2.7737939357757568e-01 -3.9218869060277939e-02 + <_> + + 0 -1 2073 1.3288260437548161e-02 + + 3.1138259917497635e-02 -1.3558749854564667e-01 + <_> + + 0 -1 2074 3.3968928619287908e-05 + + -1.3562929630279541e-01 7.6467581093311310e-02 + <_> + + 0 -1 2075 -6.8583860993385315e-03 + + -1.0365810245275497e-01 2.5939159095287323e-02 + <_> + + 0 -1 2076 -1.4360919594764709e-02 + + -2.1136499941349030e-01 5.2973140031099319e-02 + <_> + + 0 -1 2077 -1.7468679696321487e-02 + + -1.0518109798431396e-01 1.7715079709887505e-02 + <_> + + 0 -1 2078 -9.8544567823410034e-02 + + 2.5649461150169373e-01 -4.4229641556739807e-02 + <_> + + 0 -1 2079 -2.8123459778726101e-03 + + -7.3800362646579742e-02 1.5400940179824829e-01 + <_> + + 0 -1 2080 2.1941340528428555e-03 + + -1.4216299355030060e-01 8.9139223098754883e-02 + <_> + + 0 -1 2081 4.6820759773254395e-02 + + 2.9364090412855148e-02 -6.2754891812801361e-02 + <_> + + 0 -1 2082 3.2891759276390076e-01 + + 1.3015690259635448e-02 -7.8347128629684448e-01 + <_> + + 0 -1 2083 -2.0470520481467247e-02 + + -7.6814353466033936e-02 3.9800468832254410e-02 + <_> + + 0 -1 2084 8.8677026331424713e-02 + + -4.0312368422746658e-02 2.8453868627548218e-01 + <_> + + 0 -1 2085 -1.1557979742065072e-03 + + 4.2199321091175079e-02 -4.1446208953857422e-02 + <_> + + 0 -1 2086 6.0524538159370422e-02 + + -1.6918700188398361e-02 6.7237138748168945e-01 + <_> + + 0 -1 2087 4.0830459445714951e-02 + + 1.3364840298891068e-02 -3.1113299727439880e-01 + <_> + + 0 -1 2088 -3.1132870353758335e-03 + + -1.7262780666351318e-01 5.9382218867540359e-02 + <_> + + 0 -1 2089 -4.3638627976179123e-03 + + 1.7265330255031586e-01 -6.2423970550298691e-02 + <_> + + 0 -1 2090 -3.2834090292453766e-02 + + 4.0275371074676514e-01 -2.5799039751291275e-02 + <_> + + 0 -1 2091 6.4377002418041229e-02 + + -4.7380630858242512e-03 7.5221067667007446e-01 + <_> + + 0 -1 2092 2.7642730623483658e-02 + + 3.7644479423761368e-02 -2.9220271110534668e-01 + <_> + + 0 -1 2093 2.2171199321746826e-02 + + -2.4654069915413857e-02 2.0533810555934906e-01 + <_> + + 0 -1 2094 1.5859310515224934e-03 + + 8.9463792741298676e-02 -1.2611730396747589e-01 + <_> + + 0 -1 2095 -1.8872050568461418e-02 + + 1.3072650134563446e-01 -3.6953710019588470e-02 + <_> + + 0 -1 2096 -1.3306169770658016e-02 + + -2.2963209450244904e-01 4.2687188833951950e-02 + <_> + + 0 -1 2097 -7.0407122373580933e-02 + + -7.1117508411407471e-01 6.6957580856978893e-03 + <_> + + 0 -1 2098 4.1748929768800735e-02 + + -3.2927870750427246e-02 3.0035281181335449e-01 + <_> + + 0 -1 2099 5.3282231092453003e-03 + + 5.1811750978231430e-02 -1.9069090485572815e-01 + <_> + + 0 -1 2100 2.4094989057630301e-03 + + -8.0687969923019409e-02 1.2510129809379578e-01 + <_> + + 0 -1 2101 -6.2405979260802269e-03 + + 1.0740630328655243e-01 -3.9979010820388794e-02 + <_> + + 0 -1 2102 -6.7312467098236084e-01 + + -1. 1.0070810094475746e-02 + <_> + + 0 -1 2103 -9.2983558773994446e-02 + + -1. -2.4261360522359610e-03 + <_> + + 0 -1 2104 3.3629760146141052e-02 + + 2.4122869595885277e-02 -4.1387900710105896e-01 + <_> + + 0 -1 2105 2.3880619555711746e-02 + + 9.6614202484488487e-03 -2.1973779797554016e-01 + <_> + + 0 -1 2106 1.2738780351355672e-03 + + -8.3555117249488831e-02 1.2269689887762070e-01 + <_> + + 0 -1 2107 1.8414139747619629e-02 + + 3.0798140913248062e-02 -3.5609170794487000e-01 + <_> + + 0 -1 2108 -5.6469578295946121e-02 + + 8.8631778955459595e-01 -1.2698300182819366e-02 + <_> + + 0 -1 2109 -4.6219761134125292e-04 + + 3.4681901335716248e-02 -8.2850828766822815e-02 + <_> + + 0 -1 2110 -1.9060859456658363e-02 + + 3.5369411110877991e-01 -2.7611760422587395e-02 + <_> + + 0 -1 2111 1.5762279508635402e-03 + + 4.0939908474683762e-02 -2.2517409920692444e-01 + <_> + + 0 -1 2112 2.0101880654692650e-02 + + -2.3995550349354744e-02 4.1091251373291016e-01 + <_> + + 0 -1 2113 2.7211669366806746e-03 + + 2.8122449293732643e-02 -1.4200119674205780e-01 + <_> + + 0 -1 2114 -1.0944429785013199e-01 + + 9.5085740089416504e-01 -9.4355372712016106e-03 + <_> + + 0 -1 2115 -1.2755279894918203e-03 + + 5.6902900338172913e-02 -8.3429783582687378e-02 + <_> + + 0 -1 2116 -8.0578401684761047e-02 + + -9.5139288902282715e-01 8.2268668338656425e-03 + <_> + + 0 -1 2117 -1.2047989666461945e-01 + + -3.0273869633674622e-01 2.8489340096712112e-02 + <_> + + 0 -1 2118 -1.8294970691204071e-01 + + 2.3866130411624908e-01 -6.2773942947387695e-02 + <_> + + 0 -1 2119 -1.7106409370899200e-01 + + -5.9394681453704834e-01 3.1515269074589014e-03 + <_> + + 0 -1 2120 -7.3414877057075500e-02 + + -8.6933082342147827e-01 1.0084389708936214e-02 + <_> + + 0 -1 2121 2.4238299578428268e-02 + + -2.1756110712885857e-02 1.6218559443950653e-01 + <_> + + 0 -1 2122 -7.1713668294250965e-03 + + -9.7345590591430664e-02 9.2148497700691223e-02 + <_> + + 0 -1 2123 -3.3344399183988571e-02 + + 7.4645392596721649e-02 -2.2160679101943970e-02 + <_> + + 0 -1 2124 7.2907900903373957e-04 + + -9.4971813261508942e-02 1.1826740205287933e-01 + <_> + + 0 -1 2125 -1.0217289673164487e-03 + + 5.6426230818033218e-02 -3.7573829293251038e-02 + <_> + + 0 -1 2126 -8.4900937508791685e-04 + + -1.3883149623870850e-01 7.0047326385974884e-02 + <_> + + 0 -1 2127 9.9850513041019440e-02 + + -1.4011589810252190e-02 2.6115679740905762e-01 + <_> + + 0 -1 2128 -1.3090069591999054e-01 + + 7.1379351615905762e-01 -1.1643799953162670e-02 + <_> + + 0 -1 2129 9.1210529208183289e-03 + + 4.5402809977531433e-02 -2.1830010414123535e-01 + <_> + + 0 -1 2130 2.0106479525566101e-01 + + -2.0753270015120506e-02 5.1230221986770630e-01 + <_> + + 0 -1 2131 4.7389309853315353e-02 + + 9.4779124483466148e-03 -4.7942391037940979e-01 + <_> + + 0 -1 2132 -5.7118538767099380e-02 + + 3.9166051149368286e-01 -2.6703910902142525e-02 + <_> + + 0 -1 2133 -8.3700623363256454e-03 + + -1.3399459421634674e-01 4.8460900783538818e-02 + <_> + + 0 -1 2134 4.0913890115916729e-03 + + -5.9489779174327850e-02 1.7438539862632751e-01 + <_> + + 0 -1 2135 7.1899488568305969e-02 + + 1.1723180301487446e-02 -3.6274778842926025e-01 + <_> + + 0 -1 2136 -3.6888250615447760e-03 + + 7.5763627886772156e-02 -1.5033599734306335e-01 + <_> + + 0 -1 2137 -7.4795219115912914e-03 + + 1.5027859807014465e-01 -4.5870490372180939e-02 + <_> + + 0 -1 2138 -1.2582589872181416e-02 + + -1.9915549457073212e-01 6.3917450606822968e-02 + <_> + + 0 -1 2139 3.5687079653143883e-03 + + -1.2117239832878113e-01 1.0956080257892609e-01 + <_> + + 0 -1 2140 1.7363800434395671e-03 + + 1.2258529663085938e-01 -9.3556262552738190e-02 + <_> + + 0 -1 2141 -1.4523629797622561e-03 + + 9.6722528338432312e-02 -8.0739699304103851e-02 + <_> + + 0 -1 2142 3.1017749570310116e-03 + + -6.9076471030712128e-02 1.5396459400653839e-01 + <_> + + 0 -1 2143 -8.5509587079286575e-03 + + -1.5186290442943573e-01 4.0346920490264893e-02 + <_> + + 0 -1 2144 -1.8966189818456769e-03 + + 1.2172549962997437e-01 -9.8543442785739899e-02 + <_> + 135 + -3.0601499557495117e+01 + + <_> + + 0 -1 2145 -2.3754740133881569e-02 + + 1.7095300555229187e-01 -1.1534280329942703e-01 + <_> + + 0 -1 2146 -7.3806629516184330e-03 + + 8.8067196309566498e-02 -4.0317770093679428e-02 + <_> + + 0 -1 2147 1.1198900174349546e-03 + + -7.9895302653312683e-02 1.3448899984359741e-01 + <_> + + 0 -1 2148 3.3718731254339218e-02 + + -1.5220030210912228e-02 2.9914170503616333e-01 + <_> + + 0 -1 2149 -2.8022660990245640e-04 + + 6.3599728047847748e-02 -1.5619190037250519e-01 + <_> + + 0 -1 2150 -3.9523928426206112e-03 + + -9.7961323335766792e-03 1.0571649670600891e-01 + <_> + + 0 -1 2151 2.1397129166871309e-03 + + 8.9953586459159851e-02 -1.4483779668807983e-01 + <_> + + 0 -1 2152 -6.7521296441555023e-02 + + 2.0932430028915405e-01 -5.3923811763525009e-02 + <_> + + 0 -1 2153 1.0378950275480747e-02 + + -6.4177162945270538e-02 2.7814629673957825e-01 + <_> + + 0 -1 2154 6.2903137877583504e-03 + + -4.9253720790147781e-02 8.2168422639369965e-02 + <_> + + 0 -1 2155 9.3974275514483452e-03 + + 8.4537737071514130e-02 -2.2885300219058990e-01 + <_> + + 0 -1 2156 1.0120930150151253e-02 + + 3.3337119966745377e-02 -8.1664256751537323e-02 + <_> + + 0 -1 2157 3.1531939748674631e-03 + + -1.0220990329980850e-01 1.1837360262870789e-01 + <_> + + 0 -1 2158 7.5137287378311157e-02 + + 2.7504051104187965e-03 -1.0000959634780884e+00 + <_> + + 0 -1 2159 -2.3692219983786345e-03 + + 9.9092483520507812e-02 -1.1425189673900604e-01 + <_> + + 0 -1 2160 -2.4510379880666733e-02 + + 2.8708320856094360e-01 -1.6148800030350685e-02 + <_> + + 0 -1 2161 -1.9670750480145216e-03 + + -1.1531370133161545e-01 8.6816556751728058e-02 + <_> + + 0 -1 2162 3.0845379456877708e-02 + + -2.4090610444545746e-02 1.9607549905776978e-01 + <_> + + 0 -1 2163 2.3816309869289398e-02 + + 3.2824039459228516e-02 -3.5710439085960388e-01 + <_> + + 0 -1 2164 -4.0199130773544312e-02 + + -5.2850788831710815e-01 6.0749719850718975e-03 + <_> + + 0 -1 2165 -6.8876100704073906e-03 + + 2.2058850526809692e-01 -5.9151489287614822e-02 + <_> + + 0 -1 2166 -2.5466730585321784e-04 + + 7.1897879242897034e-02 -8.4962032735347748e-02 + <_> + + 0 -1 2167 9.8468195647001266e-03 + + 4.1366759687662125e-02 -2.3984520137310028e-01 + <_> + + 0 -1 2168 2.7934400364756584e-02 + + -2.3647159337997437e-02 2.4738009274005890e-01 + <_> + + 0 -1 2169 -2.2960390895605087e-02 + + -4.5187929272651672e-01 2.2305779159069061e-02 + <_> + + 0 -1 2170 3.2323438790626824e-04 + + -8.7536007165908813e-02 7.8490957617759705e-02 + <_> + + 0 -1 2171 3.1954899430274963e-02 + + -2.6202389970421791e-02 3.9204901456832886e-01 + <_> + + 0 -1 2172 1.9027979578822851e-03 + + 6.2762781977653503e-02 -1.6107350587844849e-01 + <_> + + 0 -1 2173 -3.2691629603505135e-03 + + 1.0168000310659409e-01 -1.0432480275630951e-01 + <_> + + 0 -1 2174 1.0040200315415859e-02 + + -2.8046580031514168e-02 1.2117899954319000e-01 + <_> + + 0 -1 2175 -3.4158680588006973e-02 + + -2.8974449634552002e-01 3.5282660275697708e-02 + <_> + + 0 -1 2176 1.7615250544622540e-03 + + -5.5583070963621140e-02 7.4158452451229095e-02 + <_> + + 0 -1 2177 -2.1134650334715843e-02 + + 2.5130590796470642e-01 -4.0354639291763306e-02 + <_> + + 0 -1 2178 2.9759369790554047e-02 + + 3.8029540330171585e-02 -1.4226369559764862e-01 + <_> + + 0 -1 2179 1.4866080135107040e-02 + + -3.9721690118312836e-02 2.7522540092468262e-01 + <_> + + 0 -1 2180 -3.5829428583383560e-02 + + -3.3451971411705017e-01 9.6839247271418571e-03 + <_> + + 0 -1 2181 -3.2887340057641268e-03 + + -1.4258219301700592e-01 6.8576209247112274e-02 + <_> + + 0 -1 2182 4.2714878916740417e-02 + + -1.4240439981222153e-02 3.8765299320220947e-01 + <_> + + 0 -1 2183 1.2328879674896598e-03 + + 7.8623853623867035e-02 -1.1869420111179352e-01 + <_> + + 0 -1 2184 -1.0447620414197445e-02 + + -1.4882990717887878e-01 3.1571168452501297e-02 + <_> + + 0 -1 2185 1.2656359933316708e-02 + + -4.6572461724281311e-02 2.6212608814239502e-01 + <_> + + 0 -1 2186 4.9849718809127808e-02 + + 1.7015339806675911e-02 -1.4268730580806732e-01 + <_> + + 0 -1 2187 -1.8607240170240402e-02 + + 2.3338650166988373e-01 -4.7094941139221191e-02 + <_> + + 0 -1 2188 -5.4397370666265488e-02 + + -4.0511301159858704e-01 8.1606470048427582e-03 + <_> + + 0 -1 2189 2.9153900686651468e-03 + + -8.9313946664333344e-02 1.3335379958152771e-01 + <_> + + 0 -1 2190 -5.9154080227017403e-03 + + -2.0414529740810394e-01 4.8475701361894608e-02 + <_> + + 0 -1 2191 -1.9841329194605350e-03 + + 1.3428109884262085e-01 -7.5892791152000427e-02 + <_> + + 0 -1 2192 -4.4047520495951176e-03 + + 4.1852138936519623e-02 -1.0119090229272842e-01 + <_> + + 0 -1 2193 1.7982879653573036e-02 + + 4.3978679925203323e-02 -2.5054019689559937e-01 + <_> + + 0 -1 2194 -7.8059501945972443e-02 + + -3.3025071024894714e-01 6.3089421018958092e-03 + <_> + + 0 -1 2195 7.2548650205135345e-03 + + -1.0872170329093933e-01 9.9411018192768097e-02 + <_> + + 0 -1 2196 -2.7871869970113039e-03 + + 1.3659299910068512e-01 -8.4799639880657196e-02 + <_> + + 0 -1 2197 -9.3798413872718811e-03 + + -1.1872450262308121e-01 7.9108059406280518e-02 + <_> + + 0 -1 2198 -5.4926410317420959e-02 + + 1.4382070302963257e-01 -3.0072269961237907e-02 + <_> + + 0 -1 2199 -4.4219079427421093e-03 + + 1.0666429996490479e-01 -1.0838100314140320e-01 + <_> + + 0 -1 2200 1.0763059835880995e-03 + + 2.7380989864468575e-02 -5.5446051061153412e-02 + <_> + + 0 -1 2201 -7.2514012455940247e-02 + + -1.0893449932336807e-01 1.0097540169954300e-01 + <_> + + 0 -1 2202 -1.6472190618515015e-01 + + 3.0365368723869324e-01 -4.3666210025548935e-02 + <_> + + 0 -1 2203 7.9837806522846222e-02 + + -1.0828680358827114e-02 8.9977437257766724e-01 + <_> + + 0 -1 2204 -5.2413612138479948e-04 + + 8.5230633616447449e-02 -1.2053979933261871e-01 + <_> + + 0 -1 2205 -2.1632270887494087e-02 + + -2.1092039346694946e-01 6.5582543611526489e-02 + <_> + + 0 -1 2206 1.2691530585289001e-01 + + -4.5935749076306820e-03 4.5089641213417053e-01 + <_> + + 0 -1 2207 9.5472350716590881e-02 + + -2.0798899233341217e-02 5.2474659681320190e-01 + <_> + + 0 -1 2208 -8.2936078310012817e-02 + + 8.4976738691329956e-01 -5.0510508008301258e-03 + <_> + + 0 -1 2209 7.7482969500124454e-03 + + -5.5318288505077362e-02 1.7145830392837524e-01 + <_> + + 0 -1 2210 -2.1768439561128616e-02 + + -1.5947930514812469e-01 6.0873799026012421e-02 + <_> + + 0 -1 2211 -1.1072609777329490e-04 + + 7.8877292573451996e-02 -1.3177630305290222e-01 + <_> + + 0 -1 2212 3.1122909858822823e-03 + + -4.3046839535236359e-02 6.2392581254243851e-02 + <_> + + 0 -1 2213 -2.8692940250039101e-03 + + 1.3746979832649231e-01 -8.0494217574596405e-02 + <_> + + 0 -1 2214 1.0575760155916214e-01 + + 1.0569440200924873e-03 -9.9993818998336792e-01 + <_> + + 0 -1 2215 4.6192679554224014e-02 + + 1.7228020355105400e-02 -5.2604919672012329e-01 + <_> + + 0 -1 2216 -2.5476190447807312e-01 + + -6.2927299737930298e-01 1.3698619790375233e-02 + <_> + + 0 -1 2217 -2.7374029159545898e-03 + + 1.2747539579868317e-01 -6.9591522216796875e-02 + <_> + + 0 -1 2218 2.1854760125279427e-03 + + 4.1854761540889740e-02 -2.6481458544731140e-01 + <_> + + 0 -1 2219 -2.4050710722804070e-02 + + -2.6191109418869019e-01 3.4489940851926804e-02 + <_> + + 0 -1 2220 1.0211429744958878e-01 + + -1.5302860178053379e-02 3.9992758631706238e-01 + <_> + + 0 -1 2221 1.0281659662723541e-01 + + -2.9020670801401138e-02 3.6887159943580627e-01 + <_> + + 0 -1 2222 3.9206489920616150e-02 + + 8.9045017957687378e-03 -4.3242999911308289e-01 + <_> + + 0 -1 2223 -3.7830859422683716e-02 + + -6.2731212377548218e-01 1.4882829971611500e-02 + <_> + + 0 -1 2224 1.2507890351116657e-02 + + -1.7865059897303581e-02 1.4156140387058258e-01 + <_> + + 0 -1 2225 -1.5477590262889862e-02 + + 3.1676650047302246e-01 -3.3510830253362656e-02 + <_> + + 0 -1 2226 -4.5885699801146984e-03 + + -1.5222150087356567e-01 7.3211863636970520e-02 + <_> + + 0 -1 2227 -2.0505970343947411e-02 + + 1.1725380271673203e-01 -9.7457922995090485e-02 + <_> + + 0 -1 2228 -1.3098320364952087e-01 + + 5.4338067770004272e-01 -5.8803129941225052e-03 + <_> + + 0 -1 2229 4.7888278961181641e-02 + + -2.7120810002088547e-02 3.5723638534545898e-01 + <_> + + 0 -1 2230 2.5441530346870422e-01 + + 2.5680949911475182e-03 -9.9988257884979248e-01 + <_> + + 0 -1 2231 2.0652529783546925e-03 + + -9.4255000352859497e-02 1.0068359971046448e-01 + <_> + + 0 -1 2232 3.0141780152916908e-02 + + -1.5984520316123962e-02 2.4209509789943695e-01 + <_> + + 0 -1 2233 1.2305500358343124e-01 + + 4.3902460485696793e-02 -2.9046860337257385e-01 + <_> + + 0 -1 2234 1.1436889879405499e-02 + + 3.1826701015233994e-02 -1.0569609701633453e-01 + <_> + + 0 -1 2235 1.4229659922420979e-02 + + -6.4518727362155914e-02 1.6178989410400391e-01 + <_> + + 0 -1 2236 -1.9808039069175720e-02 + + 2.0909899473190308e-01 -2.7245460078120232e-02 + <_> + + 0 -1 2237 -3.2634709030389786e-02 + + -4.6265149116516113e-01 2.3877989500761032e-02 + <_> + + 0 -1 2238 8.1568211317062378e-02 + + -1.0983820073306561e-02 7.4517530202865601e-01 + <_> + + 0 -1 2239 1.7331159906461835e-03 + + 6.2832579016685486e-02 -1.5800160169601440e-01 + <_> + + 0 -1 2240 4.1524558328092098e-03 + + 2.8520949184894562e-02 -8.3923816680908203e-02 + <_> + + 0 -1 2241 2.0917340589221567e-04 + + -1.6536650061607361e-01 8.3170376718044281e-02 + <_> + + 0 -1 2242 -6.9550168700516224e-04 + + 5.7298898696899414e-02 -9.8668128252029419e-02 + <_> + + 0 -1 2243 1.0114730149507523e-01 + + -2.7031859382987022e-02 5.0937288999557495e-01 + <_> + + 0 -1 2244 2.0371530205011368e-02 + + -1.5991339460015297e-02 2.1110190451145172e-01 + <_> + + 0 -1 2245 1.9490359723567963e-01 + + 1.1169149540364742e-02 -8.0626577138900757e-01 + <_> + + 0 -1 2246 -1.5187750104814768e-03 + + 8.8670432567596436e-02 -6.5779693424701691e-02 + <_> + + 0 -1 2247 -2.2300280761555769e-05 + + 7.0237100124359131e-02 -1.3656799495220184e-01 + <_> + + 0 -1 2248 7.0241810753941536e-03 + + 4.5264270156621933e-02 -1.2246630340814590e-01 + <_> + + 0 -1 2249 -5.8513730764389038e-03 + + 1.4548699557781219e-01 -7.7512867748737335e-02 + <_> + + 0 -1 2250 -1.2228869833052158e-02 + + -1.5762320160865784e-01 3.3091600984334946e-02 + <_> + + 0 -1 2251 -2.7475339174270630e-01 + + 4.1415899991989136e-01 -2.3306179791688919e-02 + <_> + + 0 -1 2252 -8.3073312416672707e-03 + + -6.6158972680568695e-02 4.5423369854688644e-02 + <_> + + 0 -1 2253 1.4967099763453007e-02 + + 3.9580021053552628e-02 -2.4474979937076569e-01 + <_> + + 0 -1 2254 3.5121920518577099e-03 + + -3.2608591020107269e-02 7.2080552577972412e-02 + <_> + + 0 -1 2255 6.0676191933453083e-03 + + -6.6284246742725372e-02 1.6455779969692230e-01 + <_> + + 0 -1 2256 -6.0948841273784637e-03 + + -1.6784119606018066e-01 6.8097747862339020e-02 + <_> + + 0 -1 2257 -4.4710501097142696e-03 + + 1.4348860085010529e-01 -7.5286053121089935e-02 + <_> + + 0 -1 2258 2.7629999443888664e-02 + + -6.0715568251907825e-03 4.6235299110412598e-01 + <_> + + 0 -1 2259 -4.1778348386287689e-03 + + -9.4480186700820923e-02 1.0268689692020416e-01 + <_> + + 0 -1 2260 -1.4997010293882340e-04 + + 4.5903969556093216e-02 -1.2689989805221558e-01 + <_> + + 0 -1 2261 9.3421656638383865e-03 + + -4.7851350158452988e-02 2.3776920139789581e-01 + <_> + + 0 -1 2262 -9.0454798191785812e-03 + + -1.4881759881973267e-01 2.5717660784721375e-02 + <_> + + 0 -1 2263 -1.0563050163909793e-03 + + -1.2465219944715500e-01 8.2118943333625793e-02 + <_> + + 0 -1 2264 -1.5602169558405876e-02 + + 3.0471551418304443e-01 -2.4503290653228760e-02 + <_> + + 0 -1 2265 -8.9588612318038940e-03 + + -2.3624059557914734e-01 4.6290140599012375e-02 + <_> + + 0 -1 2266 -7.6452922075986862e-03 + + 1.1393140256404877e-01 -2.6573060080409050e-02 + <_> + + 0 -1 2267 -1.9294900819659233e-02 + + 2.8820019960403442e-01 -3.5906881093978882e-02 + <_> + + 0 -1 2268 8.6250286549329758e-03 + + 6.1006020754575729e-02 -1.6832630336284637e-01 + <_> + + 0 -1 2269 2.5883490219712257e-02 + + -4.0142849087715149e-02 2.3263120651245117e-01 + <_> + + 0 -1 2270 -7.4946112930774689e-02 + + 7.1168798208236694e-01 -6.0237408615648746e-03 + <_> + + 0 -1 2271 -2.6808120310306549e-04 + + 7.7717900276184082e-02 -1.5358750522136688e-01 + <_> + + 0 -1 2272 6.1041440814733505e-02 + + -3.4070160239934921e-02 2.5833290815353394e-01 + <_> + + 0 -1 2273 -4.7920648939907551e-03 + + -1.5077829360961914e-01 8.4577240049839020e-02 + <_> + + 0 -1 2274 -1.2610630691051483e-01 + + -4.8404538631439209e-01 8.6965439841151237e-03 + <_> + + 0 -1 2275 -2.2879270836710930e-02 + + 6.7734187841415405e-01 -1.4856100082397461e-02 + <_> + + 0 -1 2276 -6.2760512810200453e-04 + + 5.0910349935293198e-02 -1.4076440036296844e-01 + <_> + + 0 -1 2277 -1.0543179698288441e-02 + + -9.0707249939441681e-02 1.1281900107860565e-01 + <_> + + 0 -1 2278 -2.4953829124569893e-03 + + 8.9523762464523315e-02 -7.5541287660598755e-02 + <_> + + 0 -1 2279 6.0986150056123734e-02 + + -3.2006978988647461e-02 3.3000910282135010e-01 + <_> + 143 + -3.0555000305175781e+01 + + <_> + + 0 -1 2280 -4.1241809725761414e-02 + + 2.4841840565204620e-01 -6.9879129528999329e-02 + <_> + + 0 -1 2281 -7.4663497507572174e-02 + + -7.5433689355850220e-01 4.0493709966540337e-03 + <_> + + 0 -1 2282 -2.3803679272532463e-02 + + 2.4313099682331085e-01 -4.5283928513526917e-02 + <_> + + 0 -1 2283 3.2028619199991226e-02 + + -1.2230539694428444e-02 3.9811220765113831e-01 + <_> + + 0 -1 2284 3.8454410969279706e-04 + + 6.9244839251041412e-02 -1.7288799583911896e-01 + <_> + + 0 -1 2285 -2.0599530544131994e-03 + + 4.5083250850439072e-02 -6.3824482262134552e-02 + <_> + + 0 -1 2286 5.9174500405788422e-02 + + 1.3756089843809605e-02 5.8063977956771851e-01 + <_> + + 0 -1 2287 -8.1204501911997795e-03 + + -7.9060196876525879e-02 3.2097879797220230e-02 + <_> + + 0 -1 2288 -5.4362448863685131e-03 + + 8.0285012722015381e-02 -1.3880789279937744e-01 + <_> + + 0 -1 2289 4.0768779814243317e-02 + + 3.5265129059553146e-02 -1.6821040213108063e-01 + <_> + + 0 -1 2290 -1.0705769993364811e-02 + + -1.3227799534797668e-01 9.7147703170776367e-02 + <_> + + 0 -1 2291 -2.1374409552663565e-03 + + -1.1135129630565643e-01 1.0501199960708618e-01 + <_> + + 0 -1 2292 -6.0069030150771141e-03 + + 7.9701423645019531e-02 -1.4503550529479980e-01 + <_> + + 0 -1 2293 6.8584359250962734e-03 + + -2.8629170730710030e-02 1.5494349598884583e-01 + <_> + + 0 -1 2294 8.4308702498674393e-03 + + -6.8725876510143280e-02 1.3571439683437347e-01 + <_> + + 0 -1 2295 -3.1918209046125412e-02 + + -9.0021647512912750e-02 7.0172756910324097e-02 + <_> + + 0 -1 2296 1.4346960186958313e-01 + + 3.7936199456453323e-02 -3.3849731087684631e-01 + <_> + + 0 -1 2297 -5.3501531481742859e-02 + + -1. -1.3069049455225468e-03 + <_> + + 0 -1 2298 -4.3198501225560904e-04 + + 6.3140459358692169e-02 -1.4891080558300018e-01 + <_> + + 0 -1 2299 -3.6825511604547501e-02 + + 1.6418960690498352e-01 -3.6547198891639709e-02 + <_> + + 0 -1 2300 -9.3230612576007843e-02 + + -8.1855481863021851e-01 1.0488729923963547e-02 + <_> + + 0 -1 2301 -7.5886500999331474e-03 + + 9.6189923584461212e-02 -3.2392729073762894e-02 + <_> + + 0 -1 2302 1.9316580146551132e-03 + + -9.7133457660675049e-02 9.6836537122726440e-02 + <_> + + 0 -1 2303 -1.7610849440097809e-01 + + -1. 3.9064860902726650e-04 + <_> + + 0 -1 2304 -4.5753358863294125e-03 + + -1.4245940744876862e-01 7.2629533708095551e-02 + <_> + + 0 -1 2305 -7.1555696427822113e-02 + + 7.0124769210815430e-01 -8.1192785874009132e-03 + <_> + + 0 -1 2306 -5.1939189434051514e-03 + + -1.7593400180339813e-01 6.6920258104801178e-02 + <_> + + 0 -1 2307 9.7410175949335098e-03 + + -4.0632858872413635e-02 1.5366269648075104e-01 + <_> + + 0 -1 2308 -1.9197730347514153e-02 + + 8.8404722511768341e-02 -1.1119589954614639e-01 + <_> + + 0 -1 2309 7.7713979408144951e-03 + + -5.1531080156564713e-02 2.3341870307922363e-01 + <_> + + 0 -1 2310 4.6741779893636703e-02 + + 5.8658950030803680e-02 -2.1825340390205383e-01 + <_> + + 0 -1 2311 -6.7051820456981659e-02 + + -7.6968950033187866e-01 2.2733330260962248e-03 + <_> + + 0 -1 2312 1.0403609834611416e-02 + + -5.7208269834518433e-02 1.9874769449234009e-01 + <_> + + 0 -1 2313 6.8136617541313171e-02 + + 1.0924750007688999e-02 -2.3514769971370697e-01 + <_> + + 0 -1 2314 5.5462731979787350e-03 + + 7.6430208981037140e-02 -1.5048150718212128e-01 + <_> + + 0 -1 2315 3.5827890038490295e-02 + + 5.2330200560390949e-03 -9.0509557723999023e-01 + <_> + + 0 -1 2316 1.0099080391228199e-02 + + -4.9438349902629852e-02 1.9236649572849274e-01 + <_> + + 0 -1 2317 -7.3000352131202817e-04 + + 8.0038689076900482e-02 -5.9875860810279846e-02 + <_> + + 0 -1 2318 -6.2627308070659637e-02 + + -6.8771952390670776e-01 1.4409339986741543e-02 + <_> + + 0 -1 2319 4.1463607922196388e-03 + + 6.2068879604339600e-02 -1.4138600230216980e-01 + <_> + + 0 -1 2320 -1.4136059582233429e-01 + + 5.9439867734909058e-01 -1.6910530626773834e-02 + <_> + + 0 -1 2321 7.0147067308425903e-02 + + 3.5781029146164656e-03 -8.4541380405426025e-01 + <_> + + 0 -1 2322 1.8181180348619819e-03 + + -5.9031128883361816e-02 1.7709979414939880e-01 + <_> + + 0 -1 2323 6.3149541616439819e-02 + + -7.9691512510180473e-03 2.4575470387935638e-01 + <_> + + 0 -1 2324 1.7065559513866901e-03 + + -1.3776679337024689e-01 7.2286598384380341e-02 + <_> + + 0 -1 2325 -4.1844159364700317e-02 + + -1.0204549878835678e-01 1.9412880763411522e-02 + <_> + + 0 -1 2326 6.1876028776168823e-02 + + 1.7572570592164993e-02 -5.9611201286315918e-01 + <_> + + 0 -1 2327 8.6206607520580292e-02 + + -8.3246696740388870e-03 5.9274739027023315e-01 + <_> + + 0 -1 2328 1.5561250038444996e-02 + + 5.5908791720867157e-02 -2.0174680650234222e-01 + <_> + + 0 -1 2329 1.9683360587805510e-03 + + 8.4109783172607422e-02 -9.5114283263683319e-02 + <_> + + 0 -1 2330 -3.2295130658894777e-03 + + 1.9859789311885834e-01 -6.0371041297912598e-02 + <_> + + 0 -1 2331 4.3861459940671921e-02 + + -7.5495638884603977e-03 2.7785310149192810e-01 + <_> + + 0 -1 2332 -7.1588042192161083e-04 + + 1.0671679675579071e-01 -1.1605340242385864e-01 + <_> + + 0 -1 2333 -1.1585080064833164e-02 + + 1.3923209905624390e-01 -7.2681717574596405e-02 + <_> + + 0 -1 2334 -2.4132030084729195e-02 + + -3.4343299269676208e-01 2.8587639331817627e-02 + <_> + + 0 -1 2335 -5.9670167975127697e-03 + + 6.2854968011379242e-02 -6.3237912952899933e-02 + <_> + + 0 -1 2336 -5.7298261672258377e-02 + + 3.3512100577354431e-01 -3.4425679594278336e-02 + <_> + + 0 -1 2337 -1.4440530538558960e-01 + + -1. -2.0486500579863787e-04 + <_> + + 0 -1 2338 -1.6152009367942810e-02 + + -1.8017260730266571e-01 6.0698080807924271e-02 + <_> + + 0 -1 2339 3.1132341246120632e-04 + + -8.7393969297409058e-02 1.0814479738473892e-01 + <_> + + 0 -1 2340 -3.4905138891190290e-03 + + 1.3089099526405334e-01 -8.2502506673336029e-02 + <_> + + 0 -1 2341 -5.1078200340270996e-02 + + -6.6744989156723022e-01 9.7670806571841240e-03 + <_> + + 0 -1 2342 2.3027899861335754e-01 + + 8.9318687096238136e-03 -8.8892549276351929e-01 + <_> + + 0 -1 2343 3.3260289579629898e-02 + + -3.8846820592880249e-02 1.1871550232172012e-01 + <_> + + 0 -1 2344 3.6332090385258198e-03 + + -8.1865288317203522e-02 1.2006369978189468e-01 + <_> + + 0 -1 2345 -1.3659459364134818e-04 + + 2.9094040393829346e-02 -8.6412712931632996e-02 + <_> + + 0 -1 2346 4.2663831263780594e-03 + + 5.9642590582370758e-02 -1.6777870059013367e-01 + <_> + + 0 -1 2347 -3.7726368755102158e-02 + + 2.5201418995857239e-01 -1.1480459943413734e-02 + <_> + + 0 -1 2348 -3.7723951041698456e-02 + + 3.6150801181793213e-01 -2.5164980441331863e-02 + <_> + + 0 -1 2349 -3.5217531025409698e-02 + + -2.0768259465694427e-01 1.5659499913454056e-02 + <_> + + 0 -1 2350 -2.6250150054693222e-02 + + 6.4363038539886475e-01 -1.3971080072224140e-02 + <_> + + 0 -1 2351 7.1132831275463104e-02 + + 5.0701410509645939e-03 -8.1053668260574341e-01 + <_> + + 0 -1 2352 2.8358760755509138e-03 + + 8.0034732818603516e-02 -1.1766050010919571e-01 + <_> + + 0 -1 2353 3.4837881103157997e-03 + + 6.9709457457065582e-02 -1.2136720120906830e-01 + <_> + + 0 -1 2354 2.9538539820350707e-05 + + -1.7090520262718201e-01 7.0092067122459412e-02 + <_> + + 0 -1 2355 2.6345230638980865e-02 + + -1.1046449653804302e-02 3.5467839241027832e-01 + <_> + + 0 -1 2356 3.3180779428221285e-04 + + -8.9763849973678589e-02 1.0402739793062210e-01 + <_> + + 0 -1 2357 9.9607985466718674e-03 + + -1.0574670135974884e-01 8.7481163442134857e-02 + <_> + + 0 -1 2358 6.9068476557731628e-02 + + -2.3135760799050331e-02 3.7765979766845703e-01 + <_> + + 0 -1 2359 -3.3804871141910553e-02 + + -8.0052927136421204e-02 6.6171988844871521e-02 + <_> + + 0 -1 2360 -2.1103899925947189e-03 + + 7.2913236916065216e-02 -1.6986669600009918e-01 + <_> + + 0 -1 2361 7.1675583720207214e-02 + + -2.2668020799756050e-02 4.3757459521293640e-01 + <_> + + 0 -1 2362 -1.7637129873037338e-02 + + 1.4710550010204315e-01 -7.7648147940635681e-02 + <_> + + 0 -1 2363 2.1559430751949549e-03 + + -4.4561479240655899e-02 8.0616250634193420e-02 + <_> + + 0 -1 2364 -2.9923371039330959e-03 + + 1.6013230383396149e-01 -7.2628170251846313e-02 + <_> + + 0 -1 2365 -2.8351619839668274e-02 + + -2.4835529923439026e-01 7.8493626788258553e-03 + <_> + + 0 -1 2366 -5.3842412307858467e-03 + + -1.3290390372276306e-01 7.8615352511405945e-02 + <_> + + 0 -1 2367 1.6513720154762268e-02 + + -3.0867580324411392e-02 2.2910499572753906e-01 + <_> + + 0 -1 2368 -2.3480059579014778e-02 + + -3.4656900167465210e-01 2.8477910906076431e-02 + <_> + + 0 -1 2369 6.4804457128047943e-02 + + 3.2681180164217949e-03 -8.1848317384719849e-01 + <_> + + 0 -1 2370 2.9363438952714205e-03 + + 6.8371996283531189e-02 -1.6038259863853455e-01 + <_> + + 0 -1 2371 1.9352639093995094e-02 + + 1.2330809608101845e-02 -1.7751510441303253e-01 + <_> + + 0 -1 2372 -1.4157049590721726e-03 + + 1.6248740255832672e-01 -8.4821969270706177e-02 + <_> + + 0 -1 2373 -3.2165680080652237e-02 + + 2.5495579838752747e-01 -1.5387820079922676e-02 + <_> + + 0 -1 2374 9.9883928894996643e-02 + + 1.1630980297923088e-02 -8.6939221620559692e-01 + <_> + + 0 -1 2375 -8.5509859491139650e-04 + + 3.7509139627218246e-02 -4.1315130889415741e-02 + <_> + + 0 -1 2376 1.9948679953813553e-02 + + -3.3211439847946167e-02 2.6546698808670044e-01 + <_> + + 0 -1 2377 -1.6821360215544701e-02 + + -1.9504530727863312e-01 4.5578271150588989e-02 + <_> + + 0 -1 2378 -8.1685081124305725e-02 + + 8.0823719501495361e-01 -1.0028379969298840e-02 + <_> + + 0 -1 2379 -3.9467110764235258e-04 + + 3.7868868559598923e-02 -7.4321702122688293e-02 + <_> + + 0 -1 2380 -4.1939578950405121e-02 + + -7.5310271978378296e-01 1.2494780123233795e-02 + <_> + + 0 -1 2381 1.2319780141115189e-01 + + 1.5212129801511765e-03 -8.7456828355789185e-01 + <_> + + 0 -1 2382 4.3162349611520767e-03 + + 9.5917366445064545e-02 -9.8286882042884827e-02 + <_> + + 0 -1 2383 1.7064419807866216e-03 + + -6.7283846437931061e-02 5.8372668921947479e-02 + <_> + + 0 -1 2384 6.8853497505187988e-02 + + 3.9853271096944809e-02 -2.7014040946960449e-01 + <_> + + 0 -1 2385 1.5133110573515296e-03 + + 3.6803830415010452e-02 -7.8638777136802673e-02 + <_> + + 0 -1 2386 1.6671700403094292e-02 + + -5.2208479493856430e-02 2.5476139783859253e-01 + <_> + + 0 -1 2387 -2.4927379563450813e-03 + + -6.8352922797203064e-02 3.9182528853416443e-02 + <_> + + 0 -1 2388 1.7946650041267276e-03 + + 7.5641617178916931e-02 -1.8443019688129425e-01 + <_> + + 0 -1 2389 6.5764516592025757e-02 + + -2.7957379817962646e-02 1.3770729303359985e-01 + <_> + + 0 -1 2390 -3.2415628433227539e-02 + + 2.4957719445228577e-01 -3.8401741534471512e-02 + <_> + + 0 -1 2391 1.5985220670700073e-01 + + 2.3139530792832375e-02 -4.5876979827880859e-01 + <_> + + 0 -1 2392 3.3003050833940506e-02 + + -2.8549650683999062e-02 3.6482268571853638e-01 + <_> + + 0 -1 2393 8.3292415365576744e-03 + + 2.3422110825777054e-02 -1.2992739677429199e-01 + <_> + + 0 -1 2394 -1.4707380533218384e-01 + + -1. 1.0342770256102085e-02 + <_> + + 0 -1 2395 1.0625930130481720e-01 + + 2.8901589103043079e-03 -6.2105101346969604e-01 + <_> + + 0 -1 2396 4.7905001789331436e-02 + + -2.5437310338020325e-02 3.8595038652420044e-01 + <_> + + 0 -1 2397 4.3562948703765869e-02 + + 1.2963670305907726e-02 -3.1574508547782898e-01 + <_> + + 0 -1 2398 -6.6401511430740356e-02 + + 3.7184339761734009e-01 -2.4248229339718819e-02 + <_> + + 0 -1 2399 1.0357169667258859e-03 + + -3.3857159316539764e-02 7.2818137705326080e-02 + <_> + + 0 -1 2400 -1.0010260343551636e-01 + + -2.6162430644035339e-01 4.0561348199844360e-02 + <_> + + 0 -1 2401 -1.4029429852962494e-01 + + 1.6186380386352539e-01 -3.7463869899511337e-02 + <_> + + 0 -1 2402 -3.6629181355237961e-02 + + -3.7988689541816711e-01 2.2493759170174599e-02 + <_> + + 0 -1 2403 1.8527939915657043e-01 + + -3.4648380242288113e-03 9.9972921609878540e-01 + <_> + + 0 -1 2404 1.3452930375933647e-02 + + 6.6191017627716064e-02 -1.5208050608634949e-01 + <_> + + 0 -1 2405 8.4628060460090637e-02 + + -3.2134260982275009e-02 2.2877800464630127e-01 + <_> + + 0 -1 2406 -8.7568372488021851e-02 + + 4.3229681253433228e-01 -2.4735029786825180e-02 + <_> + + 0 -1 2407 2.6502339169383049e-02 + + 2.3526629433035851e-02 -2.9849499464035034e-01 + <_> + + 0 -1 2408 -1.8273059278726578e-02 + + 5.0878030061721802e-01 -1.9735949113965034e-02 + <_> + + 0 -1 2409 -1.1995369568467140e-03 + + 7.4867762625217438e-02 -7.3861390352249146e-02 + <_> + + 0 -1 2410 3.1381230801343918e-02 + + -2.6280479505658150e-02 3.6583951115608215e-01 + <_> + + 0 -1 2411 2.3178670555353165e-02 + + 3.7155259400606155e-02 -2.5468569993972778e-01 + <_> + + 0 -1 2412 -1.3644699938595295e-02 + + 2.0717699825763702e-01 -4.2792771011590958e-02 + <_> + + 0 -1 2413 7.8315278515219688e-03 + + 3.6028519272804260e-02 -8.0337040126323700e-02 + <_> + + 0 -1 2414 -1.0035780258476734e-02 + + -2.2253769636154175e-01 4.2950030416250229e-02 + <_> + + 0 -1 2415 -5.1132131367921829e-02 + + 3.0586650967597961e-01 -2.7054589241743088e-02 + <_> + + 0 -1 2416 -6.9544702768325806e-02 + + 3.4688460826873779e-01 -3.1736221164464951e-02 + <_> + + 0 -1 2417 -2.4079360067844391e-02 + + 1.3291560113430023e-01 -3.0277779325842857e-02 + <_> + + 0 -1 2418 -6.6630518995225430e-03 + + -1.8473480641841888e-01 7.8750252723693848e-02 + <_> + + 0 -1 2419 4.3147690594196320e-02 + + -9.1566536575555801e-03 2.9485818743705750e-01 + <_> + + 0 -1 2420 -1.3808339834213257e-02 + + -2.8479158878326416e-01 3.2622188329696655e-02 + <_> + + 0 -1 2421 1.6351899504661560e-01 + + -3.7377059925347567e-03 5.6042182445526123e-01 + <_> + + 0 -1 2422 -2.4086149409413338e-02 + + 1.5841430425643921e-01 -6.6294513642787933e-02 + + <_> + + <_> + 5 5 12 6 -1. + <_> + 9 5 4 6 3. + <_> + + <_> + 7 13 10 4 -1. + <_> + 7 15 10 2 2. + <_> + + <_> + 3 14 9 4 -1. + <_> + 6 14 3 4 3. + <_> + + <_> + 15 6 5 6 -1. + <_> + 15 6 5 3 2. + 1 + <_> + + <_> + 0 1 22 14 -1. + <_> + 11 1 11 14 2. + <_> + + <_> + 1 11 20 4 -1. + <_> + 6 11 10 4 2. + <_> + + <_> + 7 6 6 5 -1. + <_> + 7 6 3 5 2. + 1 + <_> + + <_> + 5 13 12 4 -1. + <_> + 11 13 6 2 2. + <_> + 5 15 6 2 2. + <_> + + <_> + 7 12 8 6 -1. + <_> + 7 12 4 3 2. + <_> + 11 15 4 3 2. + <_> + + <_> + 20 0 2 18 -1. + <_> + 20 9 2 9 2. + <_> + + <_> + 8 6 6 12 -1. + <_> + 10 6 2 12 3. + <_> + + <_> + 8 5 6 6 -1. + <_> + 10 5 2 6 3. + <_> + + <_> + 5 15 12 2 -1. + <_> + 5 16 12 1 2. + <_> + + <_> + 20 0 2 18 -1. + <_> + 20 9 2 9 2. + <_> + + <_> + 0 0 2 18 -1. + <_> + 0 9 2 9 2. + <_> + + <_> + 13 7 6 4 -1. + <_> + 13 7 6 2 2. + 1 + <_> + + <_> + 2 14 7 4 -1. + <_> + 2 16 7 2 2. + <_> + + <_> + 13 7 7 4 -1. + <_> + 13 7 7 2 2. + 1 + <_> + + <_> + 4 6 4 12 -1. + <_> + 4 10 4 4 3. + <_> + + <_> + 8 4 6 10 -1. + <_> + 11 4 3 5 2. + <_> + 8 9 3 5 2. + <_> + + <_> + 6 8 6 10 -1. + <_> + 6 8 3 5 2. + <_> + 9 13 3 5 2. + <_> + + <_> + 11 12 6 6 -1. + <_> + 11 15 6 3 2. + <_> + + <_> + 1 15 8 3 -1. + <_> + 5 15 4 3 2. + <_> + + <_> + 6 9 10 4 -1. + <_> + 6 11 10 2 2. + <_> + + <_> + 11 5 8 3 -1. + <_> + 10 6 8 1 3. + 1 + <_> + + <_> + 0 13 22 5 -1. + <_> + 0 13 11 5 2. + <_> + + <_> + 2 13 14 3 -1. + <_> + 9 13 7 3 2. + <_> + + <_> + 11 5 2 10 -1. + <_> + 11 5 1 10 2. + 1 + <_> + + <_> + 11 5 10 2 -1. + <_> + 11 5 10 1 2. + 1 + <_> + + <_> + 14 0 8 8 -1. + <_> + 18 0 4 4 2. + <_> + 14 4 4 4 2. + <_> + + <_> + 5 0 3 10 -1. + <_> + 5 5 3 5 2. + <_> + + <_> + 16 0 3 12 -1. + <_> + 16 6 3 6 2. + <_> + + <_> + 3 3 12 4 -1. + <_> + 3 3 6 2 2. + <_> + 9 5 6 2 2. + <_> + + <_> + 2 2 20 3 -1. + <_> + 7 2 10 3 2. + <_> + + <_> + 11 7 3 8 -1. + <_> + 11 7 3 4 2. + 1 + <_> + + <_> + 4 9 18 3 -1. + <_> + 4 10 18 1 3. + <_> + + <_> + 3 3 16 14 -1. + <_> + 3 3 8 7 2. + <_> + 11 10 8 7 2. + <_> + + <_> + 7 14 8 4 -1. + <_> + 7 14 4 4 2. + <_> + + <_> + 10 7 4 7 -1. + <_> + 10 7 2 7 2. + 1 + <_> + + <_> + 11 9 6 5 -1. + <_> + 11 9 3 5 2. + <_> + + <_> + 0 6 22 4 -1. + <_> + 11 6 11 4 2. + <_> + + <_> + 14 6 6 12 -1. + <_> + 17 6 3 6 2. + <_> + 14 12 3 6 2. + <_> + + <_> + 4 14 6 4 -1. + <_> + 4 16 6 2 2. + <_> + + <_> + 12 14 6 4 -1. + <_> + 12 16 6 2 2. + <_> + + <_> + 4 14 6 4 -1. + <_> + 4 16 6 2 2. + <_> + + <_> + 10 6 6 6 -1. + <_> + 12 6 2 6 3. + <_> + + <_> + 9 0 11 3 -1. + <_> + 8 1 11 1 3. + 1 + <_> + + <_> + 7 0 12 4 -1. + <_> + 13 0 6 2 2. + <_> + 7 2 6 2 2. + <_> + + <_> + 6 6 6 6 -1. + <_> + 8 6 2 6 3. + <_> + + <_> + 15 5 3 8 -1. + <_> + 15 9 3 4 2. + <_> + + <_> + 5 2 12 7 -1. + <_> + 9 2 4 7 3. + <_> + + <_> + 5 5 12 4 -1. + <_> + 9 5 4 4 3. + <_> + + <_> + 7 3 4 7 -1. + <_> + 7 3 2 7 2. + 1 + <_> + + <_> + 2 14 6 4 -1. + <_> + 5 14 3 4 2. + <_> + + <_> + 11 4 6 6 -1. + <_> + 13 4 2 6 3. + <_> + + <_> + 5 14 12 4 -1. + <_> + 5 14 6 2 2. + <_> + 11 16 6 2 2. + <_> + + <_> + 3 12 16 6 -1. + <_> + 11 12 8 3 2. + <_> + 3 15 8 3 2. + <_> + + <_> + 1 11 20 4 -1. + <_> + 6 11 10 4 2. + <_> + + <_> + 9 0 10 10 -1. + <_> + 14 0 5 5 2. + <_> + 9 5 5 5 2. + <_> + + <_> + 8 8 4 6 -1. + <_> + 8 8 2 6 2. + 1 + <_> + + <_> + 1 7 20 11 -1. + <_> + 1 7 10 11 2. + <_> + + <_> + 9 0 12 3 -1. + <_> + 9 0 6 3 2. + 1 + <_> + + <_> + 13 0 6 6 -1. + <_> + 13 0 3 6 2. + <_> + + <_> + 5 0 12 8 -1. + <_> + 5 2 12 4 2. + <_> + + <_> + 14 0 8 6 -1. + <_> + 18 0 4 3 2. + <_> + 14 3 4 3 2. + <_> + + <_> + 7 6 8 6 -1. + <_> + 9 6 4 6 2. + <_> + + <_> + 11 3 6 6 -1. + <_> + 13 3 2 6 3. + <_> + + <_> + 5 3 6 6 -1. + <_> + 7 3 2 6 3. + <_> + + <_> + 13 0 8 6 -1. + <_> + 17 0 4 3 2. + <_> + 13 3 4 3 2. + <_> + + <_> + 0 0 8 6 -1. + <_> + 0 0 4 3 2. + <_> + 4 3 4 3 2. + <_> + + <_> + 7 0 10 6 -1. + <_> + 12 0 5 3 2. + <_> + 7 3 5 3 2. + <_> + + <_> + 0 15 22 2 -1. + <_> + 11 15 11 2 2. + <_> + + <_> + 5 14 12 4 -1. + <_> + 5 15 12 2 2. + <_> + + <_> + 5 13 6 4 -1. + <_> + 5 15 6 2 2. + <_> + + <_> + 3 9 17 3 -1. + <_> + 3 10 17 1 3. + <_> + + <_> + 3 8 16 10 -1. + <_> + 3 8 8 5 2. + <_> + 11 13 8 5 2. + <_> + + <_> + 9 0 10 6 -1. + <_> + 14 0 5 3 2. + <_> + 9 3 5 3 2. + <_> + + <_> + 3 0 12 4 -1. + <_> + 3 0 6 2 2. + <_> + 9 2 6 2 2. + <_> + + <_> + 4 10 14 3 -1. + <_> + 4 10 7 3 2. + <_> + + <_> + 1 14 11 4 -1. + <_> + 1 16 11 2 2. + <_> + + <_> + 7 0 12 6 -1. + <_> + 13 0 6 3 2. + <_> + 7 3 6 3 2. + <_> + + <_> + 3 0 10 6 -1. + <_> + 3 0 5 3 2. + <_> + 8 3 5 3 2. + <_> + + <_> + 6 0 10 3 -1. + <_> + 6 0 5 3 2. + 1 + <_> + + <_> + 14 8 6 4 -1. + <_> + 14 8 6 2 2. + 1 + <_> + + <_> + 0 2 5 16 -1. + <_> + 0 10 5 8 2. + <_> + + <_> + 0 3 22 5 -1. + <_> + 0 3 11 5 2. + <_> + + <_> + 6 15 8 3 -1. + <_> + 10 15 4 3 2. + <_> + + <_> + 15 0 2 14 -1. + <_> + 15 0 1 14 2. + 1 + <_> + + <_> + 7 0 14 2 -1. + <_> + 7 0 14 1 2. + 1 + <_> + + <_> + 1 11 20 5 -1. + <_> + 6 11 10 5 2. + <_> + + <_> + 5 3 12 9 -1. + <_> + 9 6 4 3 9. + <_> + + <_> + 10 1 12 3 -1. + <_> + 14 1 4 3 3. + <_> + + <_> + 0 1 12 3 -1. + <_> + 4 1 4 3 3. + <_> + + <_> + 14 12 4 6 -1. + <_> + 14 12 2 6 2. + <_> + + <_> + 0 10 22 7 -1. + <_> + 11 10 11 7 2. + <_> + + <_> + 11 2 4 11 -1. + <_> + 11 2 2 11 2. + 1 + <_> + + <_> + 3 14 16 4 -1. + <_> + 3 14 8 2 2. + <_> + 11 16 8 2 2. + <_> + + <_> + 12 12 6 6 -1. + <_> + 14 12 2 6 3. + <_> + + <_> + 4 12 6 6 -1. + <_> + 6 12 2 6 3. + <_> + + <_> + 11 14 6 4 -1. + <_> + 11 16 6 2 2. + <_> + + <_> + 0 0 12 4 -1. + <_> + 0 0 6 2 2. + <_> + 6 2 6 2 2. + <_> + + <_> + 15 11 4 6 -1. + <_> + 15 11 2 6 2. + <_> + + <_> + 3 11 4 6 -1. + <_> + 5 11 2 6 2. + <_> + + <_> + 18 5 4 7 -1. + <_> + 18 5 2 7 2. + 1 + <_> + + <_> + 4 5 7 4 -1. + <_> + 4 5 7 2 2. + 1 + <_> + + <_> + 9 6 12 3 -1. + <_> + 13 6 4 3 3. + <_> + + <_> + 1 6 12 3 -1. + <_> + 5 6 4 3 3. + <_> + + <_> + 0 0 22 10 -1. + <_> + 11 0 11 5 2. + <_> + 0 5 11 5 2. + <_> + + <_> + 2 4 14 3 -1. + <_> + 2 5 14 1 3. + <_> + + <_> + 13 3 8 6 -1. + <_> + 17 3 4 3 2. + <_> + 13 6 4 3 2. + <_> + + <_> + 4 14 14 4 -1. + <_> + 4 14 7 2 2. + <_> + 11 16 7 2 2. + <_> + + <_> + 11 2 4 11 -1. + <_> + 11 2 2 11 2. + 1 + <_> + + <_> + 11 2 11 4 -1. + <_> + 11 2 11 2 2. + 1 + <_> + + <_> + 10 7 12 3 -1. + <_> + 10 7 6 3 2. + <_> + + <_> + 9 7 4 6 -1. + <_> + 9 7 2 6 2. + 1 + <_> + + <_> + 3 11 16 6 -1. + <_> + 11 11 8 3 2. + <_> + 3 14 8 3 2. + <_> + + <_> + 1 3 8 6 -1. + <_> + 1 3 4 3 2. + <_> + 5 6 4 3 2. + <_> + + <_> + 5 4 12 3 -1. + <_> + 5 5 12 1 3. + <_> + + <_> + 7 14 8 4 -1. + <_> + 11 14 4 4 2. + <_> + + <_> + 7 3 15 3 -1. + <_> + 7 4 15 1 3. + <_> + + <_> + 6 8 6 4 -1. + <_> + 6 8 6 2 2. + 1 + <_> + + <_> + 10 7 12 3 -1. + <_> + 10 7 6 3 2. + <_> + + <_> + 0 7 12 3 -1. + <_> + 6 7 6 3 2. + <_> + + <_> + 7 7 9 4 -1. + <_> + 10 7 3 4 3. + <_> + + <_> + 6 2 4 16 -1. + <_> + 6 10 4 8 2. + <_> + + <_> + 8 4 6 6 -1. + <_> + 10 4 2 6 3. + <_> + + <_> + 1 11 20 3 -1. + <_> + 6 11 10 3 2. + <_> + + <_> + 14 9 6 8 -1. + <_> + 17 9 3 4 2. + <_> + 14 13 3 4 2. + <_> + + <_> + 11 0 9 4 -1. + <_> + 11 0 9 2 2. + 1 + <_> + + <_> + 11 10 6 8 -1. + <_> + 14 10 3 4 2. + <_> + 11 14 3 4 2. + <_> + + <_> + 5 16 12 2 -1. + <_> + 5 17 12 1 2. + <_> + + <_> + 5 9 14 4 -1. + <_> + 5 11 14 2 2. + <_> + + <_> + 2 9 6 8 -1. + <_> + 2 9 3 4 2. + <_> + 5 13 3 4 2. + <_> + + <_> + 15 8 6 4 -1. + <_> + 15 8 3 4 2. + <_> + + <_> + 1 8 6 4 -1. + <_> + 4 8 3 4 2. + <_> + + <_> + 13 5 8 5 -1. + <_> + 13 5 4 5 2. + 1 + <_> + + <_> + 11 5 9 2 -1. + <_> + 11 5 9 1 2. + 1 + <_> + + <_> + 12 6 9 12 -1. + <_> + 15 10 3 4 9. + <_> + + <_> + 5 10 6 8 -1. + <_> + 5 10 3 4 2. + <_> + 8 14 3 4 2. + <_> + + <_> + 9 5 5 12 -1. + <_> + 9 8 5 6 2. + <_> + + <_> + 11 5 9 2 -1. + <_> + 11 5 9 1 2. + 1 + <_> + + <_> + 5 0 15 12 -1. + <_> + 10 4 5 4 9. + <_> + + <_> + 1 13 8 5 -1. + <_> + 5 13 4 5 2. + <_> + + <_> + 14 8 6 4 -1. + <_> + 14 8 3 4 2. + 1 + <_> + + <_> + 8 8 4 6 -1. + <_> + 8 8 4 3 2. + 1 + <_> + + <_> + 7 0 12 9 -1. + <_> + 11 3 4 3 9. + <_> + + <_> + 7 13 6 4 -1. + <_> + 7 15 6 2 2. + <_> + + <_> + 10 7 6 10 -1. + <_> + 13 7 3 5 2. + <_> + 10 12 3 5 2. + <_> + + <_> + 6 7 6 10 -1. + <_> + 6 7 3 5 2. + <_> + 9 12 3 5 2. + <_> + + <_> + 7 0 12 2 -1. + <_> + 7 0 6 2 2. + <_> + + <_> + 2 0 18 9 -1. + <_> + 2 3 18 3 3. + <_> + + <_> + 12 2 6 15 -1. + <_> + 12 2 3 15 2. + <_> + + <_> + 4 2 6 15 -1. + <_> + 7 2 3 15 2. + <_> + + <_> + 7 12 12 4 -1. + <_> + 7 13 12 2 2. + <_> + + <_> + 4 4 4 14 -1. + <_> + 4 4 2 7 2. + <_> + 6 11 2 7 2. + <_> + + <_> + 12 6 9 12 -1. + <_> + 15 10 3 4 9. + <_> + + <_> + 1 6 9 12 -1. + <_> + 4 10 3 4 9. + <_> + + <_> + 13 6 8 12 -1. + <_> + 17 6 4 6 2. + <_> + 13 12 4 6 2. + <_> + + <_> + 7 14 8 3 -1. + <_> + 11 14 4 3 2. + <_> + + <_> + 5 5 12 3 -1. + <_> + 9 5 4 3 3. + <_> + + <_> + 10 0 2 18 -1. + <_> + 10 6 2 6 3. + <_> + + <_> + 4 14 14 2 -1. + <_> + 4 14 7 2 2. + <_> + + <_> + 3 0 6 4 -1. + <_> + 6 0 3 4 2. + <_> + + <_> + 13 12 6 4 -1. + <_> + 13 12 3 4 2. + <_> + + <_> + 1 0 8 4 -1. + <_> + 5 0 4 4 2. + <_> + + <_> + 7 9 14 4 -1. + <_> + 14 9 7 2 2. + <_> + 7 11 7 2 2. + <_> + + <_> + 1 0 8 18 -1. + <_> + 1 0 4 9 2. + <_> + 5 9 4 9 2. + <_> + + <_> + 13 8 6 4 -1. + <_> + 13 8 3 4 2. + 1 + <_> + + <_> + 9 8 4 6 -1. + <_> + 9 8 4 3 2. + 1 + <_> + + <_> + 3 13 6 4 -1. + <_> + 6 13 3 4 2. + <_> + + <_> + 11 4 6 7 -1. + <_> + 13 4 2 7 3. + <_> + + <_> + 6 8 6 4 -1. + <_> + 6 8 3 4 2. + 1 + <_> + + <_> + 10 7 12 5 -1. + <_> + 13 7 6 5 2. + <_> + + <_> + 3 5 12 3 -1. + <_> + 9 5 6 3 2. + <_> + + <_> + 13 5 4 6 -1. + <_> + 13 8 4 3 2. + <_> + + <_> + 5 5 4 6 -1. + <_> + 5 8 4 3 2. + <_> + + <_> + 13 12 6 6 -1. + <_> + 15 12 2 6 3. + <_> + + <_> + 10 2 4 10 -1. + <_> + 10 2 4 5 2. + 1 + <_> + + <_> + 13 12 6 6 -1. + <_> + 15 12 2 6 3. + <_> + + <_> + 3 12 6 6 -1. + <_> + 5 12 2 6 3. + <_> + + <_> + 11 12 6 6 -1. + <_> + 11 14 6 2 3. + <_> + + <_> + 5 12 8 6 -1. + <_> + 5 12 4 3 2. + <_> + 9 15 4 3 2. + <_> + + <_> + 5 11 12 6 -1. + <_> + 11 11 6 3 2. + <_> + 5 14 6 3 2. + <_> + + <_> + 0 9 22 8 -1. + <_> + 0 9 11 4 2. + <_> + 11 13 11 4 2. + <_> + + <_> + 6 9 13 3 -1. + <_> + 6 10 13 1 3. + <_> + + <_> + 0 2 8 6 -1. + <_> + 0 2 4 3 2. + <_> + 4 5 4 3 2. + <_> + + <_> + 4 9 16 3 -1. + <_> + 4 10 16 1 3. + <_> + + <_> + 4 9 12 3 -1. + <_> + 4 10 12 1 3. + <_> + + <_> + 16 2 5 16 -1. + <_> + 16 10 5 8 2. + <_> + + <_> + 6 13 7 4 -1. + <_> + 6 15 7 2 2. + <_> + + <_> + 1 7 20 8 -1. + <_> + 11 7 10 4 2. + <_> + 1 11 10 4 2. + <_> + + <_> + 5 2 12 3 -1. + <_> + 5 3 12 1 3. + <_> + + <_> + 13 13 6 4 -1. + <_> + 13 15 6 2 2. + <_> + + <_> + 1 0 5 8 -1. + <_> + 1 4 5 4 2. + <_> + + <_> + 5 0 13 8 -1. + <_> + 5 4 13 4 2. + <_> + + <_> + 9 1 4 8 -1. + <_> + 9 5 4 4 2. + <_> + + <_> + 11 2 8 8 -1. + <_> + 9 4 8 4 2. + 1 + <_> + + <_> + 11 2 8 8 -1. + <_> + 13 4 4 8 2. + 1 + <_> + + <_> + 8 0 14 4 -1. + <_> + 15 0 7 2 2. + <_> + 8 2 7 2 2. + <_> + + <_> + 0 10 12 4 -1. + <_> + 0 10 6 2 2. + <_> + 6 12 6 2 2. + <_> + + <_> + 8 0 14 4 -1. + <_> + 15 0 7 2 2. + <_> + 8 2 7 2 2. + <_> + + <_> + 3 4 16 14 -1. + <_> + 7 4 8 14 2. + <_> + + <_> + 13 13 6 4 -1. + <_> + 13 15 6 2 2. + <_> + + <_> + 3 13 6 4 -1. + <_> + 3 15 6 2 2. + <_> + + <_> + 11 5 2 10 -1. + <_> + 11 5 1 10 2. + 1 + <_> + + <_> + 11 5 10 2 -1. + <_> + 11 5 10 1 2. + 1 + <_> + + <_> + 4 0 18 4 -1. + <_> + 13 0 9 2 2. + <_> + 4 2 9 2 2. + <_> + + <_> + 6 5 4 6 -1. + <_> + 6 5 2 6 2. + 1 + <_> + + <_> + 16 6 6 6 -1. + <_> + 14 8 6 2 3. + 1 + <_> + + <_> + 6 6 6 6 -1. + <_> + 8 8 2 6 3. + 1 + <_> + + <_> + 4 0 18 12 -1. + <_> + 4 0 9 12 2. + <_> + + <_> + 0 12 8 6 -1. + <_> + 2 12 4 6 2. + <_> + + <_> + 7 12 8 6 -1. + <_> + 7 12 4 6 2. + <_> + + <_> + 7 6 3 12 -1. + <_> + 8 6 1 12 3. + <_> + + <_> + 15 5 6 6 -1. + <_> + 15 5 3 6 2. + 1 + <_> + + <_> + 2 12 8 3 -1. + <_> + 6 12 4 3 2. + <_> + + <_> + 2 6 18 3 -1. + <_> + 8 6 6 3 3. + <_> + + <_> + 0 11 22 2 -1. + <_> + 11 11 11 2 2. + <_> + + <_> + 10 14 6 4 -1. + <_> + 10 16 6 2 2. + <_> + + <_> + 3 12 6 4 -1. + <_> + 6 12 3 4 2. + <_> + + <_> + 14 0 4 12 -1. + <_> + 14 0 4 6 2. + 1 + <_> + + <_> + 5 10 6 4 -1. + <_> + 8 10 3 4 2. + <_> + + <_> + 1 12 20 6 -1. + <_> + 11 12 10 3 2. + <_> + 1 15 10 3 2. + <_> + + <_> + 5 15 12 3 -1. + <_> + 9 15 4 3 3. + <_> + + <_> + 13 1 3 10 -1. + <_> + 13 6 3 5 2. + <_> + + <_> + 9 0 10 4 -1. + <_> + 9 0 5 4 2. + 1 + <_> + + <_> + 13 1 3 10 -1. + <_> + 13 6 3 5 2. + <_> + + <_> + 6 1 3 10 -1. + <_> + 6 6 3 5 2. + <_> + + <_> + 11 4 10 4 -1. + <_> + 11 4 10 2 2. + 1 + <_> + + <_> + 0 10 20 8 -1. + <_> + 0 10 10 4 2. + <_> + 10 14 10 4 2. + <_> + + <_> + 15 11 6 7 -1. + <_> + 17 11 2 7 3. + <_> + + <_> + 4 14 9 4 -1. + <_> + 4 16 9 2 2. + <_> + + <_> + 15 0 6 8 -1. + <_> + 15 4 6 4 2. + <_> + + <_> + 1 11 6 7 -1. + <_> + 3 11 2 7 3. + <_> + + <_> + 12 6 8 4 -1. + <_> + 12 6 8 2 2. + 1 + <_> + + <_> + 11 2 6 2 -1. + <_> + 11 2 6 1 2. + 1 + <_> + + <_> + 11 0 11 8 -1. + <_> + 11 4 11 4 2. + <_> + + <_> + 0 1 22 6 -1. + <_> + 0 1 11 3 2. + <_> + 11 4 11 3 2. + <_> + + <_> + 11 6 3 12 -1. + <_> + 12 6 1 12 3. + <_> + + <_> + 0 1 14 7 -1. + <_> + 7 1 7 7 2. + <_> + + <_> + 16 8 4 6 -1. + <_> + 16 8 2 6 2. + 1 + <_> + + <_> + 1 11 20 7 -1. + <_> + 6 11 10 7 2. + <_> + + <_> + 13 12 4 6 -1. + <_> + 13 15 4 3 2. + <_> + + <_> + 0 3 13 3 -1. + <_> + 0 4 13 1 3. + <_> + + <_> + 6 3 12 3 -1. + <_> + 6 4 12 1 3. + <_> + + <_> + 0 4 22 10 -1. + <_> + 0 4 11 5 2. + <_> + 11 9 11 5 2. + <_> + + <_> + 14 3 8 4 -1. + <_> + 14 3 8 2 2. + 1 + <_> + + <_> + 5 5 12 6 -1. + <_> + 5 5 6 3 2. + <_> + 11 8 6 3 2. + <_> + + <_> + 11 6 6 6 -1. + <_> + 13 6 2 6 3. + <_> + + <_> + 9 4 4 13 -1. + <_> + 10 4 2 13 2. + <_> + + <_> + 11 3 3 13 -1. + <_> + 12 3 1 13 3. + <_> + + <_> + 9 5 4 6 -1. + <_> + 11 5 2 6 2. + <_> + + <_> + 7 2 12 15 -1. + <_> + 11 7 4 5 9. + <_> + + <_> + 3 2 12 15 -1. + <_> + 7 7 4 5 9. + <_> + + <_> + 5 2 12 12 -1. + <_> + 9 6 4 4 9. + <_> + + <_> + 8 5 4 12 -1. + <_> + 8 8 4 6 2. + <_> + + <_> + 8 9 8 7 -1. + <_> + 10 9 4 7 2. + <_> + + <_> + 6 9 8 7 -1. + <_> + 8 9 4 7 2. + <_> + + <_> + 0 4 22 14 -1. + <_> + 11 4 11 7 2. + <_> + 0 11 11 7 2. + <_> + + <_> + 2 12 18 6 -1. + <_> + 2 14 18 2 3. + <_> + + <_> + 6 5 6 5 -1. + <_> + 9 5 3 5 2. + <_> + + <_> + 11 14 9 4 -1. + <_> + 14 14 3 4 3. + <_> + + <_> + 6 14 6 4 -1. + <_> + 6 16 6 2 2. + <_> + + <_> + 15 6 6 5 -1. + <_> + 15 6 3 5 2. + 1 + <_> + + <_> + 7 6 5 6 -1. + <_> + 7 6 5 3 2. + 1 + <_> + + <_> + 13 12 8 6 -1. + <_> + 13 12 4 6 2. + <_> + + <_> + 6 10 10 8 -1. + <_> + 6 12 10 4 2. + <_> + + <_> + 2 13 18 2 -1. + <_> + 2 13 9 2 2. + <_> + + <_> + 1 15 8 3 -1. + <_> + 5 15 4 3 2. + <_> + + <_> + 14 7 6 4 -1. + <_> + 14 7 6 2 2. + 1 + <_> + + <_> + 10 0 7 2 -1. + <_> + 10 0 7 1 2. + 1 + <_> + + <_> + 17 8 4 6 -1. + <_> + 17 8 4 3 2. + 1 + <_> + + <_> + 2 0 15 9 -1. + <_> + 7 3 5 3 9. + <_> + + <_> + 9 3 4 6 -1. + <_> + 9 6 4 3 2. + <_> + + <_> + 3 0 16 12 -1. + <_> + 3 6 16 6 2. + <_> + + <_> + 11 0 3 10 -1. + <_> + 11 0 3 5 2. + 1 + <_> + + <_> + 0 3 22 14 -1. + <_> + 11 3 11 14 2. + <_> + + <_> + 10 3 6 7 -1. + <_> + 12 3 2 7 3. + <_> + + <_> + 11 1 11 4 -1. + <_> + 10 2 11 2 2. + 1 + <_> + + <_> + 14 7 6 4 -1. + <_> + 14 7 6 2 2. + 1 + <_> + + <_> + 5 5 4 12 -1. + <_> + 5 11 4 6 2. + <_> + + <_> + 2 6 20 9 -1. + <_> + 2 6 10 9 2. + <_> + + <_> + 1 9 18 3 -1. + <_> + 7 9 6 3 3. + <_> + + <_> + 11 6 6 6 -1. + <_> + 13 6 2 6 3. + <_> + + <_> + 8 13 6 4 -1. + <_> + 11 13 3 4 2. + <_> + + <_> + 10 14 6 4 -1. + <_> + 10 14 3 4 2. + <_> + + <_> + 5 6 6 6 -1. + <_> + 7 6 2 6 3. + <_> + + <_> + 15 0 3 8 -1. + <_> + 16 1 1 8 3. + 1 + <_> + + <_> + 5 8 12 3 -1. + <_> + 9 8 4 3 3. + <_> + + <_> + 2 7 18 4 -1. + <_> + 2 9 18 2 2. + <_> + + <_> + 11 1 10 4 -1. + <_> + 11 1 5 4 2. + 1 + <_> + + <_> + 15 0 3 8 -1. + <_> + 16 1 1 8 3. + 1 + <_> + + <_> + 7 0 8 3 -1. + <_> + 6 1 8 1 3. + 1 + <_> + + <_> + 10 0 12 4 -1. + <_> + 16 0 6 2 2. + <_> + 10 2 6 2 2. + <_> + + <_> + 5 2 12 3 -1. + <_> + 5 3 12 1 3. + <_> + + <_> + 8 2 14 3 -1. + <_> + 8 3 14 1 3. + <_> + + <_> + 0 0 12 4 -1. + <_> + 0 0 6 2 2. + <_> + 6 2 6 2 2. + <_> + + <_> + 8 0 14 4 -1. + <_> + 15 0 7 2 2. + <_> + 8 2 7 2 2. + <_> + + <_> + 0 5 8 6 -1. + <_> + 0 5 4 3 2. + <_> + 4 8 4 3 2. + <_> + + <_> + 14 14 6 4 -1. + <_> + 14 14 3 4 2. + <_> + + <_> + 6 12 10 4 -1. + <_> + 11 12 5 4 2. + <_> + + <_> + 14 6 6 6 -1. + <_> + 12 8 6 2 3. + 1 + <_> + + <_> + 8 6 6 6 -1. + <_> + 10 8 2 6 3. + 1 + <_> + + <_> + 2 8 6 10 -1. + <_> + 2 8 3 5 2. + <_> + 5 13 3 5 2. + <_> + + <_> + 11 3 4 9 -1. + <_> + 12 4 2 9 2. + 1 + <_> + + <_> + 2 0 12 4 -1. + <_> + 2 0 6 2 2. + <_> + 8 2 6 2 2. + <_> + + <_> + 11 5 3 9 -1. + <_> + 12 6 1 9 3. + 1 + <_> + + <_> + 11 3 9 4 -1. + <_> + 10 4 9 2 2. + 1 + <_> + + <_> + 13 13 8 5 -1. + <_> + 13 13 4 5 2. + <_> + + <_> + 1 13 8 5 -1. + <_> + 5 13 4 5 2. + <_> + + <_> + 7 13 8 3 -1. + <_> + 7 13 4 3 2. + <_> + + <_> + 8 13 6 4 -1. + <_> + 11 13 3 4 2. + <_> + + <_> + 11 7 3 8 -1. + <_> + 12 8 1 8 3. + 1 + <_> + + <_> + 5 1 6 8 -1. + <_> + 7 1 2 8 3. + <_> + + <_> + 14 14 6 4 -1. + <_> + 14 16 6 2 2. + <_> + + <_> + 11 7 8 3 -1. + <_> + 10 8 8 1 3. + 1 + <_> + + <_> + 12 3 3 12 -1. + <_> + 8 7 3 4 3. + 1 + <_> + + <_> + 8 5 5 6 -1. + <_> + 8 8 5 3 2. + <_> + + <_> + 11 3 8 4 -1. + <_> + 11 3 8 2 2. + 1 + <_> + + <_> + 7 5 8 6 -1. + <_> + 9 5 4 6 2. + <_> + + <_> + 11 4 6 6 -1. + <_> + 9 6 6 2 3. + 1 + <_> + + <_> + 11 4 6 6 -1. + <_> + 13 6 2 6 3. + 1 + <_> + + <_> + 12 8 6 4 -1. + <_> + 12 8 3 4 2. + 1 + <_> + + <_> + 5 15 8 3 -1. + <_> + 9 15 4 3 2. + <_> + + <_> + 0 5 22 13 -1. + <_> + 0 5 11 13 2. + <_> + + <_> + 2 12 9 6 -1. + <_> + 5 12 3 6 3. + <_> + + <_> + 19 1 3 10 -1. + <_> + 19 6 3 5 2. + <_> + + <_> + 5 14 12 4 -1. + <_> + 5 16 12 2 2. + <_> + + <_> + 10 14 10 4 -1. + <_> + 10 16 10 2 2. + <_> + + <_> + 1 3 14 3 -1. + <_> + 1 4 14 1 3. + <_> + + <_> + 3 14 16 4 -1. + <_> + 11 14 8 2 2. + <_> + 3 16 8 2 2. + <_> + + <_> + 0 14 6 4 -1. + <_> + 3 14 3 4 2. + <_> + + <_> + 10 1 11 4 -1. + <_> + 10 3 11 2 2. + <_> + + <_> + 1 1 11 4 -1. + <_> + 1 3 11 2 2. + <_> + + <_> + 9 3 6 6 -1. + <_> + 9 5 6 2 3. + <_> + + <_> + 4 5 12 3 -1. + <_> + 4 6 12 1 3. + <_> + + <_> + 12 0 7 6 -1. + <_> + 12 3 7 3 2. + <_> + + <_> + 1 3 16 4 -1. + <_> + 1 4 16 2 2. + <_> + + <_> + 4 9 15 3 -1. + <_> + 4 10 15 1 3. + <_> + + <_> + 2 4 18 6 -1. + <_> + 2 4 9 3 2. + <_> + 11 7 9 3 2. + <_> + + <_> + 13 5 4 13 -1. + <_> + 14 5 2 13 2. + <_> + + <_> + 4 6 6 4 -1. + <_> + 4 8 6 2 2. + <_> + + <_> + 8 7 6 5 -1. + <_> + 8 7 3 5 2. + <_> + + <_> + 10 8 4 6 -1. + <_> + 10 8 4 3 2. + 1 + <_> + + <_> + 6 12 12 4 -1. + <_> + 6 12 6 4 2. + <_> + + <_> + 3 11 10 3 -1. + <_> + 8 11 5 3 2. + <_> + + <_> + 12 2 3 12 -1. + <_> + 12 2 3 6 2. + 1 + <_> + + <_> + 0 2 14 16 -1. + <_> + 7 2 7 16 2. + <_> + + <_> + 1 5 20 4 -1. + <_> + 6 5 10 4 2. + <_> + + <_> + 0 1 18 15 -1. + <_> + 9 1 9 15 2. + <_> + + <_> + 15 2 6 8 -1. + <_> + 15 4 6 4 2. + <_> + + <_> + 4 14 13 4 -1. + <_> + 4 15 13 2 2. + <_> + + <_> + 11 2 3 12 -1. + <_> + 12 2 1 12 3. + <_> + + <_> + 0 16 15 2 -1. + <_> + 0 17 15 1 2. + <_> + + <_> + 12 14 6 4 -1. + <_> + 12 16 6 2 2. + <_> + + <_> + 5 13 12 4 -1. + <_> + 5 14 12 2 2. + <_> + + <_> + 12 12 6 6 -1. + <_> + 12 14 6 2 3. + <_> + + <_> + 0 9 15 3 -1. + <_> + 0 10 15 1 3. + <_> + + <_> + 6 9 14 3 -1. + <_> + 6 10 14 1 3. + <_> + + <_> + 4 12 7 6 -1. + <_> + 4 14 7 2 3. + <_> + + <_> + 6 6 10 6 -1. + <_> + 11 6 5 3 2. + <_> + 6 9 5 3 2. + <_> + + <_> + 3 0 16 2 -1. + <_> + 3 0 8 2 2. + 1 + <_> + + <_> + 5 9 12 9 -1. + <_> + 5 12 12 3 3. + <_> + + <_> + 6 9 10 6 -1. + <_> + 6 12 10 3 2. + <_> + + <_> + 7 4 8 6 -1. + <_> + 7 6 8 2 3. + <_> + + <_> + 6 5 3 12 -1. + <_> + 6 11 3 6 2. + <_> + + <_> + 12 12 6 6 -1. + <_> + 14 12 2 6 3. + <_> + + <_> + 6 15 8 3 -1. + <_> + 10 15 4 3 2. + <_> + + <_> + 4 13 14 4 -1. + <_> + 4 15 14 2 2. + <_> + + <_> + 10 4 11 3 -1. + <_> + 9 5 11 1 3. + 1 + <_> + + <_> + 11 4 4 9 -1. + <_> + 12 5 2 9 2. + 1 + <_> + + <_> + 0 8 13 3 -1. + <_> + 0 9 13 1 3. + <_> + + <_> + 13 2 6 10 -1. + <_> + 16 2 3 5 2. + <_> + 13 7 3 5 2. + <_> + + <_> + 3 2 6 10 -1. + <_> + 3 2 3 5 2. + <_> + 6 7 3 5 2. + <_> + + <_> + 11 2 4 11 -1. + <_> + 11 2 2 11 2. + 1 + <_> + + <_> + 4 2 12 3 -1. + <_> + 4 3 12 1 3. + <_> + + <_> + 12 1 4 12 -1. + <_> + 12 1 2 12 2. + 1 + <_> + + <_> + 11 2 11 4 -1. + <_> + 11 2 11 2 2. + 1 + <_> + + <_> + 11 0 4 9 -1. + <_> + 11 0 2 9 2. + 1 + <_> + + <_> + 11 0 9 4 -1. + <_> + 11 0 9 2 2. + 1 + <_> + + <_> + 16 2 6 10 -1. + <_> + 19 2 3 5 2. + <_> + 16 7 3 5 2. + <_> + + <_> + 11 0 6 3 -1. + <_> + 10 1 6 1 3. + 1 + <_> + + <_> + 11 0 3 8 -1. + <_> + 12 1 1 8 3. + 1 + <_> + + <_> + 11 0 8 3 -1. + <_> + 10 1 8 1 3. + 1 + <_> + + <_> + 17 1 4 12 -1. + <_> + 19 1 2 6 2. + <_> + 17 7 2 6 2. + <_> + + <_> + 8 4 6 4 -1. + <_> + 8 6 6 2 2. + <_> + + <_> + 8 5 8 5 -1. + <_> + 8 5 4 5 2. + <_> + + <_> + 8 4 6 13 -1. + <_> + 10 4 2 13 3. + <_> + + <_> + 16 3 6 8 -1. + <_> + 19 3 3 4 2. + <_> + 16 7 3 4 2. + <_> + + <_> + 0 3 6 8 -1. + <_> + 0 3 3 4 2. + <_> + 3 7 3 4 2. + <_> + + <_> + 10 9 12 4 -1. + <_> + 16 9 6 2 2. + <_> + 10 11 6 2 2. + <_> + + <_> + 1 2 9 12 -1. + <_> + 4 6 3 4 9. + <_> + + <_> + 15 12 4 6 -1. + <_> + 15 12 2 6 2. + <_> + + <_> + 5 15 12 3 -1. + <_> + 11 15 6 3 2. + <_> + + <_> + 2 16 20 2 -1. + <_> + 2 16 10 2 2. + <_> + + <_> + 1 8 10 6 -1. + <_> + 1 8 5 3 2. + <_> + 6 11 5 3 2. + <_> + + <_> + 6 3 16 14 -1. + <_> + 14 3 8 7 2. + <_> + 6 10 8 7 2. + <_> + + <_> + 1 4 6 8 -1. + <_> + 1 4 3 4 2. + <_> + 4 8 3 4 2. + <_> + + <_> + 7 2 12 4 -1. + <_> + 7 3 12 2 2. + <_> + + <_> + 1 9 6 9 -1. + <_> + 4 9 3 9 2. + <_> + + <_> + 12 14 10 4 -1. + <_> + 12 14 5 4 2. + <_> + + <_> + 2 12 12 5 -1. + <_> + 5 12 6 5 2. + <_> + + <_> + 15 12 6 6 -1. + <_> + 17 12 2 6 3. + <_> + + <_> + 1 12 6 6 -1. + <_> + 3 12 2 6 3. + <_> + + <_> + 8 12 6 6 -1. + <_> + 10 12 2 6 3. + <_> + + <_> + 5 2 12 16 -1. + <_> + 5 10 12 8 2. + <_> + + <_> + 4 2 18 14 -1. + <_> + 4 9 18 7 2. + <_> + + <_> + 5 4 12 14 -1. + <_> + 5 11 12 7 2. + <_> + + <_> + 2 5 20 8 -1. + <_> + 7 5 10 8 2. + <_> + + <_> + 8 0 10 7 -1. + <_> + 8 0 5 7 2. + 1 + <_> + + <_> + 12 0 5 8 -1. + <_> + 12 0 5 4 2. + 1 + <_> + + <_> + 7 4 6 13 -1. + <_> + 10 4 3 13 2. + <_> + + <_> + 7 14 8 4 -1. + <_> + 7 16 8 2 2. + <_> + + <_> + 8 0 3 12 -1. + <_> + 9 0 1 12 3. + <_> + + <_> + 11 6 3 12 -1. + <_> + 12 6 1 12 3. + <_> + + <_> + 4 0 3 12 -1. + <_> + 4 4 3 4 3. + <_> + + <_> + 11 3 3 15 -1. + <_> + 12 3 1 15 3. + <_> + + <_> + 5 12 7 6 -1. + <_> + 5 14 7 2 3. + <_> + + <_> + 11 6 3 12 -1. + <_> + 12 6 1 12 3. + <_> + + <_> + 8 6 3 12 -1. + <_> + 9 6 1 12 3. + <_> + + <_> + 5 16 12 2 -1. + <_> + 5 16 6 2 2. + <_> + + <_> + 1 12 20 6 -1. + <_> + 6 12 10 6 2. + <_> + + <_> + 8 11 9 4 -1. + <_> + 11 11 3 4 3. + <_> + + <_> + 5 11 9 4 -1. + <_> + 8 11 3 4 3. + <_> + + <_> + 11 6 9 12 -1. + <_> + 14 10 3 4 9. + <_> + + <_> + 2 6 9 12 -1. + <_> + 5 10 3 4 9. + <_> + + <_> + 5 9 12 2 -1. + <_> + 5 10 12 1 2. + <_> + + <_> + 0 3 16 3 -1. + <_> + 4 3 8 3 2. + <_> + + <_> + 11 6 3 12 -1. + <_> + 12 6 1 12 3. + <_> + + <_> + 0 2 14 3 -1. + <_> + 0 3 14 1 3. + <_> + + <_> + 10 2 12 3 -1. + <_> + 10 3 12 1 3. + <_> + + <_> + 5 14 12 3 -1. + <_> + 11 14 6 3 2. + <_> + + <_> + 8 13 8 3 -1. + <_> + 8 13 4 3 2. + <_> + + <_> + 9 2 4 8 -1. + <_> + 9 6 4 4 2. + <_> + + <_> + 15 1 3 11 -1. + <_> + 16 2 1 11 3. + 1 + <_> + + <_> + 8 1 10 4 -1. + <_> + 7 2 10 2 2. + 1 + <_> + + <_> + 5 5 15 3 -1. + <_> + 5 6 15 1 3. + <_> + + <_> + 5 1 9 5 -1. + <_> + 8 1 3 5 3. + <_> + + <_> + 14 0 4 18 -1. + <_> + 15 0 2 18 2. + <_> + + <_> + 6 0 5 16 -1. + <_> + 6 8 5 8 2. + <_> + + <_> + 12 4 4 8 -1. + <_> + 12 8 4 4 2. + <_> + + <_> + 11 4 10 2 -1. + <_> + 11 4 10 1 2. + 1 + <_> + + <_> + 10 0 12 3 -1. + <_> + 14 0 4 3 3. + <_> + + <_> + 0 2 20 13 -1. + <_> + 5 2 10 13 2. + <_> + + <_> + 12 4 4 8 -1. + <_> + 12 8 4 4 2. + <_> + + <_> + 6 4 4 8 -1. + <_> + 6 8 4 4 2. + <_> + + <_> + 11 6 3 12 -1. + <_> + 12 6 1 12 3. + <_> + + <_> + 8 6 3 12 -1. + <_> + 9 6 1 12 3. + <_> + + <_> + 7 1 14 2 -1. + <_> + 7 1 7 2 2. + <_> + + <_> + 4 8 14 10 -1. + <_> + 4 13 14 5 2. + <_> + + <_> + 11 14 9 4 -1. + <_> + 14 14 3 4 3. + <_> + + <_> + 1 7 17 8 -1. + <_> + 1 11 17 4 2. + <_> + + <_> + 10 12 7 6 -1. + <_> + 10 15 7 3 2. + <_> + + <_> + 10 1 8 9 -1. + <_> + 10 1 4 9 2. + 1 + <_> + + <_> + 11 2 4 11 -1. + <_> + 11 2 2 11 2. + 1 + <_> + + <_> + 6 9 4 9 -1. + <_> + 8 9 2 9 2. + <_> + + <_> + 8 3 12 4 -1. + <_> + 14 3 6 2 2. + <_> + 8 5 6 2 2. + <_> + + <_> + 5 14 7 4 -1. + <_> + 5 16 7 2 2. + <_> + + <_> + 13 0 4 13 -1. + <_> + 13 0 2 13 2. + 1 + <_> + + <_> + 9 0 13 4 -1. + <_> + 9 0 13 2 2. + 1 + <_> + + <_> + 12 9 4 9 -1. + <_> + 12 12 4 3 3. + <_> + + <_> + 7 4 12 2 -1. + <_> + 7 4 12 1 2. + 1 + <_> + + <_> + 12 5 10 6 -1. + <_> + 17 5 5 3 2. + <_> + 12 8 5 3 2. + <_> + + <_> + 1 0 17 3 -1. + <_> + 1 1 17 1 3. + <_> + + <_> + 15 4 6 8 -1. + <_> + 18 4 3 4 2. + <_> + 15 8 3 4 2. + <_> + + <_> + 3 2 4 14 -1. + <_> + 3 2 2 7 2. + <_> + 5 9 2 7 2. + <_> + + <_> + 14 8 6 4 -1. + <_> + 14 8 6 2 2. + 1 + <_> + + <_> + 8 8 4 6 -1. + <_> + 8 8 2 6 2. + 1 + <_> + + <_> + 12 1 4 16 -1. + <_> + 14 1 2 8 2. + <_> + 12 9 2 8 2. + <_> + + <_> + 7 0 6 8 -1. + <_> + 7 0 3 4 2. + <_> + 10 4 3 4 2. + <_> + + <_> + 8 12 6 5 -1. + <_> + 8 12 3 5 2. + <_> + + <_> + 7 5 6 12 -1. + <_> + 7 5 3 6 2. + <_> + 10 11 3 6 2. + <_> + + <_> + 15 5 6 6 -1. + <_> + 15 5 3 6 2. + 1 + <_> + + <_> + 6 10 3 8 -1. + <_> + 6 14 3 4 2. + <_> + + <_> + 4 0 14 3 -1. + <_> + 4 1 14 1 3. + <_> + + <_> + 0 9 8 3 -1. + <_> + 4 9 4 3 2. + <_> + + <_> + 9 3 4 6 -1. + <_> + 9 6 4 3 2. + <_> + + <_> + 3 0 10 10 -1. + <_> + 3 0 5 5 2. + <_> + 8 5 5 5 2. + <_> + + <_> + 5 13 12 4 -1. + <_> + 5 13 6 4 2. + <_> + + <_> + 6 12 10 3 -1. + <_> + 11 12 5 3 2. + <_> + + <_> + 12 15 10 3 -1. + <_> + 12 15 5 3 2. + <_> + + <_> + 0 15 10 3 -1. + <_> + 5 15 5 3 2. + <_> + + <_> + 3 0 17 14 -1. + <_> + 3 7 17 7 2. + <_> + + <_> + 9 0 4 16 -1. + <_> + 9 0 2 8 2. + <_> + 11 8 2 8 2. + <_> + + <_> + 11 4 6 8 -1. + <_> + 11 8 6 4 2. + <_> + + <_> + 0 9 12 3 -1. + <_> + 0 10 12 1 3. + <_> + + <_> + 1 5 20 8 -1. + <_> + 11 5 10 4 2. + <_> + 1 9 10 4 2. + <_> + + <_> + 1 8 13 3 -1. + <_> + 1 9 13 1 3. + <_> + + <_> + 8 8 14 3 -1. + <_> + 8 9 14 1 3. + <_> + + <_> + 4 16 14 2 -1. + <_> + 4 17 14 1 2. + <_> + + <_> + 11 1 3 6 -1. + <_> + 12 2 1 6 3. + 1 + <_> + + <_> + 11 1 6 3 -1. + <_> + 10 2 6 1 3. + 1 + <_> + + <_> + 13 1 6 10 -1. + <_> + 16 1 3 5 2. + <_> + 13 6 3 5 2. + <_> + + <_> + 11 0 10 3 -1. + <_> + 10 1 10 1 3. + 1 + <_> + + <_> + 12 1 3 12 -1. + <_> + 13 2 1 12 3. + 1 + <_> + + <_> + 10 1 12 3 -1. + <_> + 9 2 12 1 3. + 1 + <_> + + <_> + 13 1 6 10 -1. + <_> + 16 1 3 5 2. + <_> + 13 6 3 5 2. + <_> + + <_> + 3 1 6 10 -1. + <_> + 3 1 3 5 2. + <_> + 6 6 3 5 2. + <_> + + <_> + 14 7 6 10 -1. + <_> + 17 7 3 5 2. + <_> + 14 12 3 5 2. + <_> + + <_> + 3 2 6 8 -1. + <_> + 3 2 3 4 2. + <_> + 6 6 3 4 2. + <_> + + <_> + 11 14 9 4 -1. + <_> + 14 14 3 4 3. + <_> + + <_> + 1 8 15 8 -1. + <_> + 1 12 15 4 2. + <_> + + <_> + 9 12 8 4 -1. + <_> + 9 14 8 2 2. + <_> + + <_> + 6 5 7 6 -1. + <_> + 6 7 7 2 3. + <_> + + <_> + 9 5 6 5 -1. + <_> + 9 5 3 5 2. + <_> + + <_> + 0 12 8 6 -1. + <_> + 2 12 4 6 2. + <_> + + <_> + 14 8 6 4 -1. + <_> + 14 8 3 4 2. + 1 + <_> + + <_> + 8 8 4 6 -1. + <_> + 8 8 4 3 2. + 1 + <_> + + <_> + 9 4 6 8 -1. + <_> + 11 4 2 8 3. + <_> + + <_> + 7 4 6 8 -1. + <_> + 9 4 2 8 3. + <_> + + <_> + 0 15 10 3 -1. + <_> + 5 15 5 3 2. + <_> + + <_> + 11 5 3 9 -1. + <_> + 12 6 1 9 3. + 1 + <_> + + <_> + 11 5 9 3 -1. + <_> + 10 6 9 1 3. + 1 + <_> + + <_> + 12 6 8 4 -1. + <_> + 12 6 8 2 2. + 1 + <_> + + <_> + 10 6 4 8 -1. + <_> + 10 6 2 8 2. + 1 + <_> + + <_> + 13 0 5 12 -1. + <_> + 13 0 5 6 2. + 1 + <_> + + <_> + 1 3 12 4 -1. + <_> + 4 3 6 4 2. + <_> + + <_> + 15 7 6 5 -1. + <_> + 15 7 3 5 2. + <_> + + <_> + 1 7 12 3 -1. + <_> + 1 8 12 1 3. + <_> + + <_> + 15 7 6 5 -1. + <_> + 15 7 3 5 2. + <_> + + <_> + 1 7 6 5 -1. + <_> + 4 7 3 5 2. + <_> + + <_> + 12 13 6 4 -1. + <_> + 12 15 6 2 2. + <_> + + <_> + 5 12 12 6 -1. + <_> + 5 12 6 3 2. + <_> + 11 15 6 3 2. + <_> + + <_> + 11 5 2 9 -1. + <_> + 11 5 1 9 2. + 1 + <_> + + <_> + 11 5 9 2 -1. + <_> + 11 5 9 1 2. + 1 + <_> + + <_> + 10 12 9 4 -1. + <_> + 13 12 3 4 3. + <_> + + <_> + 8 6 6 6 -1. + <_> + 8 6 6 3 2. + 1 + <_> + + <_> + 10 14 6 4 -1. + <_> + 10 14 3 4 2. + <_> + + <_> + 0 2 14 3 -1. + <_> + 0 3 14 1 3. + <_> + + <_> + 8 2 12 3 -1. + <_> + 8 3 12 1 3. + <_> + + <_> + 8 7 5 6 -1. + <_> + 8 7 5 3 2. + 1 + <_> + + <_> + 12 6 8 3 -1. + <_> + 12 6 4 3 2. + 1 + <_> + + <_> + 4 10 4 6 -1. + <_> + 6 10 2 6 2. + <_> + + <_> + 1 11 20 4 -1. + <_> + 6 11 10 4 2. + <_> + + <_> + 6 10 8 7 -1. + <_> + 8 10 4 7 2. + <_> + + <_> + 11 3 3 9 -1. + <_> + 12 4 1 9 3. + 1 + <_> + + <_> + 0 8 22 4 -1. + <_> + 11 8 11 4 2. + <_> + + <_> + 3 10 16 3 -1. + <_> + 3 10 8 3 2. + <_> + + <_> + 11 3 9 3 -1. + <_> + 10 4 9 1 3. + 1 + <_> + + <_> + 5 3 12 9 -1. + <_> + 9 6 4 3 9. + <_> + + <_> + 7 12 4 6 -1. + <_> + 9 12 2 6 2. + <_> + + <_> + 9 12 6 6 -1. + <_> + 9 12 3 6 2. + <_> + + <_> + 2 13 16 5 -1. + <_> + 10 13 8 5 2. + <_> + + <_> + 12 12 8 3 -1. + <_> + 12 12 4 3 2. + <_> + + <_> + 10 4 12 2 -1. + <_> + 10 4 6 2 2. + 1 + <_> + + <_> + 11 3 8 4 -1. + <_> + 11 3 4 4 2. + 1 + <_> + + <_> + 4 6 10 3 -1. + <_> + 9 6 5 3 2. + <_> + + <_> + 10 1 6 8 -1. + <_> + 13 1 3 4 2. + <_> + 10 5 3 4 2. + <_> + + <_> + 11 1 6 6 -1. + <_> + 11 1 6 3 2. + 1 + <_> + + <_> + 11 6 6 4 -1. + <_> + 11 8 6 2 2. + <_> + + <_> + 2 2 12 3 -1. + <_> + 2 3 12 1 3. + <_> + + <_> + 11 3 8 4 -1. + <_> + 11 3 4 4 2. + 1 + <_> + + <_> + 1 0 8 6 -1. + <_> + 1 0 4 3 2. + <_> + 5 3 4 3 2. + <_> + + <_> + 8 3 14 3 -1. + <_> + 8 4 14 1 3. + <_> + + <_> + 11 3 4 8 -1. + <_> + 11 3 4 4 2. + 1 + <_> + + <_> + 6 0 12 10 -1. + <_> + 9 0 6 10 2. + <_> + + <_> + 4 16 14 2 -1. + <_> + 4 17 14 1 2. + <_> + + <_> + 10 11 12 3 -1. + <_> + 10 12 12 1 3. + <_> + + <_> + 3 0 4 6 -1. + <_> + 5 0 2 6 2. + <_> + + <_> + 16 12 6 4 -1. + <_> + 16 12 3 4 2. + <_> + + <_> + 0 13 10 4 -1. + <_> + 5 13 5 4 2. + <_> + + <_> + 3 1 16 4 -1. + <_> + 11 1 8 2 2. + <_> + 3 3 8 2 2. + <_> + + <_> + 0 1 11 4 -1. + <_> + 0 3 11 2 2. + <_> + + <_> + 6 8 11 6 -1. + <_> + 6 11 11 3 2. + <_> + + <_> + 8 5 5 10 -1. + <_> + 8 10 5 5 2. + <_> + + <_> + 9 2 4 6 -1. + <_> + 9 5 4 3 2. + <_> + + <_> + 2 3 12 6 -1. + <_> + 2 3 6 3 2. + <_> + 8 6 6 3 2. + <_> + + <_> + 13 3 7 9 -1. + <_> + 13 6 7 3 3. + <_> + + <_> + 2 3 7 9 -1. + <_> + 2 6 7 3 3. + <_> + + <_> + 11 0 3 6 -1. + <_> + 12 1 1 6 3. + 1 + <_> + + <_> + 3 3 13 3 -1. + <_> + 3 4 13 1 3. + <_> + + <_> + 8 3 14 3 -1. + <_> + 8 4 14 1 3. + <_> + + <_> + 3 6 7 12 -1. + <_> + 3 9 7 6 2. + <_> + + <_> + 12 13 6 4 -1. + <_> + 12 15 6 2 2. + <_> + + <_> + 4 13 6 4 -1. + <_> + 4 15 6 2 2. + <_> + + <_> + 6 1 15 2 -1. + <_> + 6 2 15 1 2. + <_> + + <_> + 4 3 3 12 -1. + <_> + 5 3 1 12 3. + <_> + + <_> + 14 4 2 12 -1. + <_> + 14 4 2 6 2. + 1 + <_> + + <_> + 11 0 6 3 -1. + <_> + 10 1 6 1 3. + 1 + <_> + + <_> + 4 9 14 5 -1. + <_> + 4 9 7 5 2. + <_> + + <_> + 11 2 10 3 -1. + <_> + 10 3 10 1 3. + 1 + <_> + + <_> + 9 12 7 6 -1. + <_> + 9 14 7 2 3. + <_> + + <_> + 1 8 8 10 -1. + <_> + 1 8 4 5 2. + <_> + 5 13 4 5 2. + <_> + + <_> + 5 5 12 5 -1. + <_> + 9 5 4 5 3. + <_> + + <_> + 8 8 4 6 -1. + <_> + 8 8 2 6 2. + 1 + <_> + + <_> + 7 6 8 10 -1. + <_> + 7 11 8 5 2. + <_> + + <_> + 6 14 6 4 -1. + <_> + 9 14 3 4 2. + <_> + + <_> + 5 15 12 2 -1. + <_> + 5 16 12 1 2. + <_> + + <_> + 6 4 10 6 -1. + <_> + 6 6 10 2 3. + <_> + + <_> + 9 12 8 6 -1. + <_> + 9 14 8 2 3. + <_> + + <_> + 1 11 20 5 -1. + <_> + 6 11 10 5 2. + <_> + + <_> + 10 8 8 4 -1. + <_> + 10 8 4 4 2. + <_> + + <_> + 2 4 18 6 -1. + <_> + 2 6 18 2 3. + <_> + + <_> + 8 4 12 11 -1. + <_> + 8 4 6 11 2. + <_> + + <_> + 11 5 11 2 -1. + <_> + 11 5 11 1 2. + 1 + <_> + + <_> + 3 6 18 9 -1. + <_> + 9 9 6 3 9. + <_> + + <_> + 3 2 10 9 -1. + <_> + 8 2 5 9 2. + <_> + + <_> + 14 5 6 6 -1. + <_> + 16 5 2 6 3. + <_> + + <_> + 5 5 12 6 -1. + <_> + 8 5 6 6 2. + <_> + + <_> + 11 3 10 4 -1. + <_> + 11 3 5 4 2. + 1 + <_> + + <_> + 6 3 8 6 -1. + <_> + 6 3 4 3 2. + <_> + 10 6 4 3 2. + <_> + + <_> + 16 0 3 15 -1. + <_> + 16 5 3 5 3. + <_> + + <_> + 3 0 3 15 -1. + <_> + 3 5 3 5 3. + <_> + + <_> + 5 2 12 16 -1. + <_> + 8 2 6 16 2. + <_> + + <_> + 6 8 4 6 -1. + <_> + 8 8 2 6 2. + <_> + + <_> + 5 9 13 9 -1. + <_> + 5 12 13 3 3. + <_> + + <_> + 11 7 8 3 -1. + <_> + 11 7 4 3 2. + 1 + <_> + + <_> + 7 0 9 4 -1. + <_> + 10 0 3 4 3. + <_> + + <_> + 7 6 6 5 -1. + <_> + 10 6 3 5 2. + <_> + + <_> + 2 7 18 6 -1. + <_> + 8 9 6 2 9. + <_> + + <_> + 11 4 10 3 -1. + <_> + 10 5 10 1 3. + 1 + <_> + + <_> + 13 14 8 4 -1. + <_> + 13 16 8 2 2. + <_> + + <_> + 1 14 8 4 -1. + <_> + 1 16 8 2 2. + <_> + + <_> + 11 4 3 10 -1. + <_> + 12 5 1 10 3. + 1 + <_> + + <_> + 11 4 10 3 -1. + <_> + 10 5 10 1 3. + 1 + <_> + + <_> + 2 12 18 6 -1. + <_> + 11 12 9 3 2. + <_> + 2 15 9 3 2. + <_> + + <_> + 5 2 8 6 -1. + <_> + 5 2 4 3 2. + <_> + 9 5 4 3 2. + <_> + + <_> + 8 14 6 4 -1. + <_> + 8 16 6 2 2. + <_> + + <_> + 1 10 6 8 -1. + <_> + 1 10 3 4 2. + <_> + 4 14 3 4 2. + <_> + + <_> + 7 2 15 9 -1. + <_> + 12 5 5 3 9. + <_> + + <_> + 0 2 15 9 -1. + <_> + 5 5 5 3 9. + <_> + + <_> + 10 5 6 7 -1. + <_> + 12 5 2 7 3. + <_> + + <_> + 5 14 12 4 -1. + <_> + 5 14 6 2 2. + <_> + 11 16 6 2 2. + <_> + + <_> + 10 1 12 3 -1. + <_> + 10 2 12 1 3. + <_> + + <_> + 8 1 3 12 -1. + <_> + 9 1 1 12 3. + <_> + + <_> + 14 2 6 7 -1. + <_> + 14 2 3 7 2. + <_> + + <_> + 1 0 12 9 -1. + <_> + 5 3 4 3 9. + <_> + + <_> + 8 3 7 6 -1. + <_> + 8 6 7 3 2. + <_> + + <_> + 1 12 20 3 -1. + <_> + 6 12 10 3 2. + <_> + + <_> + 5 2 12 16 -1. + <_> + 5 6 12 8 2. + <_> + + <_> + 4 3 7 6 -1. + <_> + 4 6 7 3 2. + <_> + + <_> + 9 5 6 6 -1. + <_> + 11 5 2 6 3. + <_> + + <_> + 7 0 8 2 -1. + <_> + 7 0 8 1 2. + 1 + <_> + + <_> + 5 14 12 2 -1. + <_> + 5 15 12 1 2. + <_> + + <_> + 3 11 16 6 -1. + <_> + 3 13 16 2 3. + <_> + + <_> + 11 5 3 8 -1. + <_> + 11 5 3 4 2. + 1 + <_> + + <_> + 2 15 12 3 -1. + <_> + 8 15 6 3 2. + <_> + + <_> + 4 13 15 3 -1. + <_> + 9 13 5 3 3. + <_> + + <_> + 2 3 12 4 -1. + <_> + 2 3 6 2 2. + <_> + 8 5 6 2 2. + <_> + + <_> + 17 5 4 7 -1. + <_> + 17 5 2 7 2. + 1 + <_> + + <_> + 5 4 7 4 -1. + <_> + 5 4 7 2 2. + 1 + <_> + + <_> + 2 2 18 3 -1. + <_> + 8 2 6 3 3. + <_> + + <_> + 2 2 18 9 -1. + <_> + 8 5 6 3 9. + <_> + + <_> + 15 6 6 4 -1. + <_> + 15 6 3 4 2. + <_> + + <_> + 0 1 12 3 -1. + <_> + 0 2 12 1 3. + <_> + + <_> + 16 2 6 4 -1. + <_> + 16 2 6 2 2. + 1 + <_> + + <_> + 0 9 14 6 -1. + <_> + 7 9 7 6 2. + <_> + + <_> + 13 5 8 4 -1. + <_> + 13 5 4 4 2. + 1 + <_> + + <_> + 9 5 4 8 -1. + <_> + 9 5 4 4 2. + 1 + <_> + + <_> + 12 4 3 14 -1. + <_> + 12 11 3 7 2. + <_> + + <_> + 1 13 20 5 -1. + <_> + 6 13 10 5 2. + <_> + + <_> + 12 4 3 14 -1. + <_> + 12 11 3 7 2. + <_> + + <_> + 7 4 3 14 -1. + <_> + 7 11 3 7 2. + <_> + + <_> + 16 2 6 4 -1. + <_> + 16 2 6 2 2. + 1 + <_> + + <_> + 6 2 4 6 -1. + <_> + 6 2 2 6 2. + 1 + <_> + + <_> + 7 4 15 14 -1. + <_> + 7 11 15 7 2. + <_> + + <_> + 1 16 16 2 -1. + <_> + 1 17 16 1 2. + <_> + + <_> + 0 6 12 4 -1. + <_> + 3 6 6 4 2. + <_> + + <_> + 6 9 10 9 -1. + <_> + 6 12 10 3 3. + <_> + + <_> + 0 6 6 5 -1. + <_> + 3 6 3 5 2. + <_> + + <_> + 11 14 7 4 -1. + <_> + 11 16 7 2 2. + <_> + + <_> + 7 8 8 2 -1. + <_> + 7 8 8 1 2. + 1 + <_> + + <_> + 10 13 7 4 -1. + <_> + 10 15 7 2 2. + <_> + + <_> + 1 16 20 2 -1. + <_> + 11 16 10 2 2. + <_> + + <_> + 5 12 14 4 -1. + <_> + 5 12 7 4 2. + <_> + + <_> + 8 8 4 6 -1. + <_> + 8 8 2 6 2. + 1 + <_> + + <_> + 17 2 2 14 -1. + <_> + 17 2 2 7 2. + 1 + <_> + + <_> + 7 1 8 4 -1. + <_> + 11 1 4 4 2. + <_> + + <_> + 5 7 12 3 -1. + <_> + 9 7 4 3 3. + <_> + + <_> + 2 14 6 4 -1. + <_> + 5 14 3 4 2. + <_> + + <_> + 10 9 12 4 -1. + <_> + 16 9 6 2 2. + <_> + 10 11 6 2 2. + <_> + + <_> + 6 14 9 4 -1. + <_> + 9 14 3 4 3. + <_> + + <_> + 11 9 2 6 -1. + <_> + 11 9 1 6 2. + 1 + <_> + + <_> + 3 9 14 9 -1. + <_> + 3 12 14 3 3. + <_> + + <_> + 5 10 16 6 -1. + <_> + 5 12 16 2 3. + <_> + + <_> + 5 12 10 6 -1. + <_> + 5 12 5 3 2. + <_> + 10 15 5 3 2. + <_> + + <_> + 4 13 18 5 -1. + <_> + 4 13 9 5 2. + <_> + + <_> + 0 13 18 5 -1. + <_> + 9 13 9 5 2. + <_> + + <_> + 4 9 16 3 -1. + <_> + 4 10 16 1 3. + <_> + + <_> + 5 1 15 2 -1. + <_> + 5 1 15 1 2. + 1 + <_> + + <_> + 13 5 2 9 -1. + <_> + 13 5 1 9 2. + 1 + <_> + + <_> + 9 5 9 2 -1. + <_> + 9 5 9 1 2. + 1 + <_> + + <_> + 1 11 20 5 -1. + <_> + 6 11 10 5 2. + <_> + + <_> + 3 9 13 3 -1. + <_> + 3 10 13 1 3. + <_> + + <_> + 18 5 4 12 -1. + <_> + 20 5 2 6 2. + <_> + 18 11 2 6 2. + <_> + + <_> + 4 12 5 6 -1. + <_> + 4 15 5 3 2. + <_> + + <_> + 15 1 2 8 -1. + <_> + 15 1 1 8 2. + 1 + <_> + + <_> + 7 1 8 2 -1. + <_> + 7 1 8 1 2. + 1 + <_> + + <_> + 18 5 4 12 -1. + <_> + 20 5 2 6 2. + <_> + 18 11 2 6 2. + <_> + + <_> + 10 4 10 2 -1. + <_> + 10 4 10 1 2. + 1 + <_> + + <_> + 2 4 20 4 -1. + <_> + 7 4 10 4 2. + <_> + + <_> + 1 9 8 3 -1. + <_> + 5 9 4 3 2. + <_> + + <_> + 18 5 4 12 -1. + <_> + 20 5 2 6 2. + <_> + 18 11 2 6 2. + <_> + + <_> + 0 5 4 12 -1. + <_> + 0 5 2 6 2. + <_> + 2 11 2 6 2. + <_> + + <_> + 6 0 14 18 -1. + <_> + 6 9 14 9 2. + <_> + + <_> + 4 4 12 3 -1. + <_> + 4 5 12 1 3. + <_> + + <_> + 8 4 14 3 -1. + <_> + 8 5 14 1 3. + <_> + + <_> + 4 13 14 3 -1. + <_> + 4 14 14 1 3. + <_> + + <_> + 8 2 6 14 -1. + <_> + 11 2 3 7 2. + <_> + 8 9 3 7 2. + <_> + + <_> + 0 13 15 4 -1. + <_> + 0 14 15 2 2. + <_> + + <_> + 11 14 7 4 -1. + <_> + 11 16 7 2 2. + <_> + + <_> + 11 7 7 3 -1. + <_> + 10 8 7 1 3. + 1 + <_> + + <_> + 10 6 6 6 -1. + <_> + 10 9 6 3 2. + <_> + + <_> + 2 0 4 14 -1. + <_> + 2 0 2 7 2. + <_> + 4 7 2 7 2. + <_> + + <_> + 2 6 18 5 -1. + <_> + 8 6 6 5 3. + <_> + + <_> + 2 0 18 18 -1. + <_> + 8 0 6 18 3. + <_> + + <_> + 13 1 4 8 -1. + <_> + 14 2 2 8 2. + 1 + <_> + + <_> + 4 0 12 18 -1. + <_> + 4 0 6 9 2. + <_> + 10 9 6 9 2. + <_> + + <_> + 12 14 6 4 -1. + <_> + 12 16 6 2 2. + <_> + + <_> + 4 14 6 4 -1. + <_> + 4 16 6 2 2. + <_> + + <_> + 11 8 2 6 -1. + <_> + 11 8 1 6 2. + 1 + <_> + + <_> + 1 10 20 6 -1. + <_> + 1 10 10 3 2. + <_> + 11 13 10 3 2. + <_> + + <_> + 13 1 7 9 -1. + <_> + 10 4 7 3 3. + 1 + <_> + + <_> + 5 3 4 6 -1. + <_> + 5 6 4 3 2. + <_> + + <_> + 13 0 2 12 -1. + <_> + 13 6 2 6 2. + <_> + + <_> + 7 11 8 3 -1. + <_> + 11 11 4 3 2. + <_> + + <_> + 9 6 12 11 -1. + <_> + 12 6 6 11 2. + <_> + + <_> + 6 8 10 9 -1. + <_> + 11 8 5 9 2. + <_> + + <_> + 11 14 6 4 -1. + <_> + 11 14 3 4 2. + <_> + + <_> + 3 6 12 4 -1. + <_> + 7 6 4 4 3. + <_> + + <_> + 10 5 6 7 -1. + <_> + 12 5 2 7 3. + <_> + + <_> + 8 0 6 4 -1. + <_> + 11 0 3 4 2. + <_> + + <_> + 10 6 6 12 -1. + <_> + 12 6 2 12 3. + <_> + + <_> + 6 6 6 12 -1. + <_> + 8 6 2 12 3. + <_> + + <_> + 6 9 9 6 -1. + <_> + 6 12 9 3 2. + <_> + + <_> + 14 6 6 6 -1. + <_> + 14 6 6 3 2. + 1 + <_> + + <_> + 1 13 20 5 -1. + <_> + 6 13 10 5 2. + <_> + + <_> + 8 14 6 4 -1. + <_> + 8 16 6 2 2. + <_> + + <_> + 4 7 8 3 -1. + <_> + 4 7 4 3 2. + 1 + <_> + + <_> + 16 0 2 15 -1. + <_> + 16 0 1 15 2. + 1 + <_> + + <_> + 9 3 12 2 -1. + <_> + 9 3 12 1 2. + 1 + <_> + + <_> + 7 1 8 6 -1. + <_> + 9 1 4 6 2. + <_> + + <_> + 6 15 8 3 -1. + <_> + 10 15 4 3 2. + <_> + + <_> + 8 3 6 6 -1. + <_> + 10 3 2 6 3. + <_> + + <_> + 1 1 16 3 -1. + <_> + 1 2 16 1 3. + <_> + + <_> + 9 1 12 3 -1. + <_> + 9 2 12 1 3. + <_> + + <_> + 0 0 22 6 -1. + <_> + 0 0 11 3 2. + <_> + 11 3 11 3 2. + <_> + + <_> + 10 5 4 6 -1. + <_> + 10 5 2 6 2. + <_> + + <_> + 10 0 8 5 -1. + <_> + 10 0 4 5 2. + 1 + <_> + + <_> + 12 4 4 10 -1. + <_> + 13 5 2 10 2. + 1 + <_> + + <_> + 10 4 10 4 -1. + <_> + 9 5 10 2 2. + 1 + <_> + + <_> + 15 1 2 8 -1. + <_> + 15 1 1 8 2. + 1 + <_> + + <_> + 7 1 8 2 -1. + <_> + 7 1 8 1 2. + 1 + <_> + + <_> + 17 0 3 11 -1. + <_> + 18 1 1 11 3. + 1 + <_> + + <_> + 9 8 4 6 -1. + <_> + 9 8 4 3 2. + 1 + <_> + + <_> + 14 6 6 12 -1. + <_> + 17 6 3 6 2. + <_> + 14 12 3 6 2. + <_> + + <_> + 2 12 18 6 -1. + <_> + 8 14 6 2 9. + <_> + + <_> + 14 7 3 10 -1. + <_> + 14 12 3 5 2. + <_> + + <_> + 3 8 16 10 -1. + <_> + 3 8 8 5 2. + <_> + 11 13 8 5 2. + <_> + + <_> + 15 12 4 6 -1. + <_> + 15 15 4 3 2. + <_> + + <_> + 2 8 18 10 -1. + <_> + 2 8 9 5 2. + <_> + 11 13 9 5 2. + <_> + + <_> + 10 1 12 3 -1. + <_> + 10 2 12 1 3. + <_> + + <_> + 1 1 12 3 -1. + <_> + 1 2 12 1 3. + <_> + + <_> + 8 0 14 4 -1. + <_> + 15 0 7 2 2. + <_> + 8 2 7 2 2. + <_> + + <_> + 2 4 14 4 -1. + <_> + 2 5 14 2 2. + <_> + + <_> + 8 4 12 3 -1. + <_> + 8 5 12 1 3. + <_> + + <_> + 1 0 8 8 -1. + <_> + 1 0 4 4 2. + <_> + 5 4 4 4 2. + <_> + + <_> + 13 0 8 6 -1. + <_> + 17 0 4 3 2. + <_> + 13 3 4 3 2. + <_> + + <_> + 1 0 8 6 -1. + <_> + 1 0 4 3 2. + <_> + 5 3 4 3 2. + <_> + + <_> + 9 6 6 5 -1. + <_> + 9 6 3 5 2. + <_> + + <_> + 5 6 8 3 -1. + <_> + 9 6 4 3 2. + <_> + + <_> + 13 3 6 9 -1. + <_> + 10 6 6 3 3. + 1 + <_> + + <_> + 9 3 9 6 -1. + <_> + 12 6 3 6 3. + 1 + <_> + + <_> + 4 11 18 3 -1. + <_> + 4 12 18 1 3. + <_> + + <_> + 0 13 15 4 -1. + <_> + 5 13 5 4 3. + <_> + + <_> + 15 12 4 6 -1. + <_> + 15 15 4 3 2. + <_> + + <_> + 3 12 4 6 -1. + <_> + 3 15 4 3 2. + <_> + + <_> + 9 12 6 6 -1. + <_> + 11 12 2 6 3. + <_> + + <_> + 6 9 9 7 -1. + <_> + 9 9 3 7 3. + <_> + + <_> + 13 10 6 8 -1. + <_> + 16 10 3 4 2. + <_> + 13 14 3 4 2. + <_> + + <_> + 3 10 6 8 -1. + <_> + 3 10 3 4 2. + <_> + 6 14 3 4 2. + <_> + + <_> + 7 10 8 4 -1. + <_> + 7 10 4 4 2. + <_> + + <_> + 7 5 6 11 -1. + <_> + 10 5 3 11 2. + <_> + + <_> + 10 6 6 6 -1. + <_> + 10 9 6 3 2. + <_> + + <_> + 6 6 6 6 -1. + <_> + 6 9 6 3 2. + <_> + + <_> + 8 6 12 8 -1. + <_> + 12 6 4 8 3. + <_> + + <_> + 2 11 12 3 -1. + <_> + 6 11 4 3 3. + <_> + + <_> + 14 3 6 8 -1. + <_> + 17 3 3 4 2. + <_> + 14 7 3 4 2. + <_> + + <_> + 0 5 13 3 -1. + <_> + 0 6 13 1 3. + <_> + + <_> + 14 0 6 6 -1. + <_> + 14 2 6 2 3. + <_> + + <_> + 3 0 6 6 -1. + <_> + 3 2 6 2 3. + <_> + + <_> + 8 8 14 3 -1. + <_> + 8 9 14 1 3. + <_> + + <_> + 7 2 2 15 -1. + <_> + 8 2 1 15 2. + <_> + + <_> + 4 14 16 4 -1. + <_> + 4 14 8 4 2. + <_> + + <_> + 1 6 20 12 -1. + <_> + 6 6 10 12 2. + <_> + + <_> + 5 10 16 6 -1. + <_> + 13 10 8 3 2. + <_> + 5 13 8 3 2. + <_> + + <_> + 1 10 16 6 -1. + <_> + 1 10 8 3 2. + <_> + 9 13 8 3 2. + <_> + + <_> + 8 8 14 6 -1. + <_> + 8 8 7 6 2. + <_> + + <_> + 0 8 14 6 -1. + <_> + 7 8 7 6 2. + <_> + + <_> + 5 6 12 11 -1. + <_> + 8 6 6 11 2. + <_> + + <_> + 1 3 8 6 -1. + <_> + 1 3 4 3 2. + <_> + 5 6 4 3 2. + <_> + + <_> + 13 1 7 6 -1. + <_> + 13 1 7 3 2. + 1 + <_> + + <_> + 1 4 5 10 -1. + <_> + 1 9 5 5 2. + <_> + + <_> + 18 6 3 8 -1. + <_> + 18 10 3 4 2. + <_> + + <_> + 1 6 3 8 -1. + <_> + 1 10 3 4 2. + <_> + + <_> + 8 5 13 3 -1. + <_> + 8 6 13 1 3. + <_> + + <_> + 1 5 13 3 -1. + <_> + 1 6 13 1 3. + <_> + + <_> + 18 0 3 12 -1. + <_> + 19 0 1 12 3. + <_> + + <_> + 1 0 3 12 -1. + <_> + 2 0 1 12 3. + <_> + + <_> + 4 2 18 2 -1. + <_> + 4 2 9 2 2. + <_> + + <_> + 6 3 6 6 -1. + <_> + 9 3 3 6 2. + <_> + + <_> + 9 5 12 11 -1. + <_> + 12 5 6 11 2. + <_> + + <_> + 1 5 12 11 -1. + <_> + 4 5 6 11 2. + <_> + + <_> + 8 4 8 8 -1. + <_> + 8 4 4 8 2. + <_> + + <_> + 0 8 22 4 -1. + <_> + 0 8 11 2 2. + <_> + 11 10 11 2 2. + <_> + + <_> + 8 6 8 4 -1. + <_> + 8 6 4 4 2. + <_> + + <_> + 6 3 8 8 -1. + <_> + 10 3 4 8 2. + <_> + + <_> + 3 6 16 4 -1. + <_> + 11 6 8 2 2. + <_> + 3 8 8 2 2. + <_> + + <_> + 2 14 16 4 -1. + <_> + 10 14 8 4 2. + <_> + + <_> + 11 13 6 5 -1. + <_> + 11 13 3 5 2. + <_> + + <_> + 5 13 6 5 -1. + <_> + 8 13 3 5 2. + <_> + + <_> + 12 2 2 7 -1. + <_> + 12 2 1 7 2. + 1 + <_> + + <_> + 0 9 21 9 -1. + <_> + 7 12 7 3 9. + <_> + + <_> + 5 3 12 9 -1. + <_> + 9 6 4 3 9. + <_> + + <_> + 3 9 16 8 -1. + <_> + 3 9 8 4 2. + <_> + 11 13 8 4 2. + <_> + + <_> + 7 0 14 18 -1. + <_> + 7 0 7 18 2. + <_> + + <_> + 5 8 6 4 -1. + <_> + 5 8 3 4 2. + 1 + <_> + + <_> + 3 11 16 4 -1. + <_> + 11 11 8 2 2. + <_> + 3 13 8 2 2. + <_> + + <_> + 6 9 6 8 -1. + <_> + 6 9 3 4 2. + <_> + 9 13 3 4 2. + <_> + + <_> + 7 0 14 18 -1. + <_> + 7 0 7 18 2. + <_> + + <_> + 1 0 14 18 -1. + <_> + 8 0 7 18 2. + <_> + + <_> + 13 14 8 3 -1. + <_> + 13 14 4 3 2. + <_> + + <_> + 8 4 6 4 -1. + <_> + 8 6 6 2 2. + <_> + + <_> + 6 6 14 4 -1. + <_> + 13 6 7 2 2. + <_> + 6 8 7 2 2. + <_> + + <_> + 7 3 11 4 -1. + <_> + 6 4 11 2 2. + 1 + <_> + + <_> + 7 0 12 4 -1. + <_> + 13 0 6 2 2. + <_> + 7 2 6 2 2. + <_> + + <_> + 4 0 14 4 -1. + <_> + 4 0 7 2 2. + <_> + 11 2 7 2 2. + <_> + + <_> + 15 8 6 9 -1. + <_> + 17 8 2 9 3. + <_> + + <_> + 1 8 6 9 -1. + <_> + 3 8 2 9 3. + <_> + + <_> + 12 5 5 9 -1. + <_> + 12 8 5 3 3. + <_> + + <_> + 5 5 5 9 -1. + <_> + 5 8 5 3 3. + <_> + + <_> + 17 9 4 6 -1. + <_> + 17 9 2 6 2. + <_> + + <_> + 1 9 4 6 -1. + <_> + 3 9 2 6 2. + <_> + + <_> + 4 3 14 3 -1. + <_> + 4 4 14 1 3. + <_> + + <_> + 6 0 10 3 -1. + <_> + 5 1 10 1 3. + 1 + <_> + + <_> + 10 4 11 14 -1. + <_> + 10 11 11 7 2. + <_> + + <_> + 2 5 6 6 -1. + <_> + 2 7 6 2 3. + <_> + + <_> + 12 2 5 12 -1. + <_> + 12 6 5 4 3. + <_> + + <_> + 5 16 12 2 -1. + <_> + 5 17 12 1 2. + <_> + + <_> + 3 4 18 3 -1. + <_> + 3 5 18 1 3. + <_> + + <_> + 1 4 11 14 -1. + <_> + 1 11 11 7 2. + <_> + + <_> + 8 12 11 4 -1. + <_> + 8 14 11 2 2. + <_> + + <_> + 7 11 8 7 -1. + <_> + 11 11 4 7 2. + <_> + + <_> + 12 2 4 11 -1. + <_> + 12 2 2 11 2. + 1 + <_> + + <_> + 10 4 11 2 -1. + <_> + 10 4 11 1 2. + 1 + <_> + + <_> + 16 0 2 14 -1. + <_> + 16 0 1 14 2. + 1 + <_> + + <_> + 6 0 14 2 -1. + <_> + 6 0 14 1 2. + 1 + <_> + + <_> + 19 4 2 12 -1. + <_> + 19 4 1 12 2. + 1 + <_> + + <_> + 8 2 6 10 -1. + <_> + 8 7 6 5 2. + <_> + + <_> + 19 4 2 12 -1. + <_> + 19 4 1 12 2. + 1 + <_> + + <_> + 11 3 6 8 -1. + <_> + 11 3 6 4 2. + 1 + <_> + + <_> + 11 2 10 6 -1. + <_> + 11 2 5 6 2. + 1 + <_> + + <_> + 3 5 13 2 -1. + <_> + 3 6 13 1 2. + <_> + + <_> + 5 4 12 6 -1. + <_> + 5 6 12 2 3. + <_> + + <_> + 6 9 9 9 -1. + <_> + 9 9 3 9 3. + <_> + + <_> + 19 1 3 12 -1. + <_> + 20 2 1 12 3. + 1 + <_> + + <_> + 2 13 9 5 -1. + <_> + 5 13 3 5 3. + <_> + + <_> + 11 2 10 6 -1. + <_> + 11 2 5 6 2. + 1 + <_> + + <_> + 11 2 6 10 -1. + <_> + 11 2 6 5 2. + 1 + <_> + + <_> + 1 6 21 3 -1. + <_> + 8 6 7 3 3. + <_> + + <_> + 5 5 3 8 -1. + <_> + 5 9 3 4 2. + <_> + + <_> + 10 5 7 6 -1. + <_> + 10 7 7 2 3. + <_> + + <_> + 10 0 7 6 -1. + <_> + 8 2 7 2 3. + 1 + <_> + + <_> + 13 5 6 6 -1. + <_> + 13 7 6 2 3. + <_> + + <_> + 5 5 7 6 -1. + <_> + 5 7 7 2 3. + <_> + + <_> + 9 1 6 8 -1. + <_> + 12 1 3 4 2. + <_> + 9 5 3 4 2. + <_> + + <_> + 7 1 6 8 -1. + <_> + 7 1 3 4 2. + <_> + 10 5 3 4 2. + <_> + + <_> + 7 0 9 4 -1. + <_> + 10 0 3 4 3. + <_> + + <_> + 1 9 14 3 -1. + <_> + 1 10 14 1 3. + <_> + + <_> + 5 9 15 3 -1. + <_> + 5 10 15 1 3. + <_> + + <_> + 3 1 12 3 -1. + <_> + 2 2 12 1 3. + 1 + <_> + + <_> + 5 12 12 6 -1. + <_> + 11 12 6 3 2. + <_> + 5 15 6 3 2. + <_> + + <_> + 5 12 12 4 -1. + <_> + 5 12 6 2 2. + <_> + 11 14 6 2 2. + <_> + + <_> + 15 4 3 9 -1. + <_> + 16 5 1 9 3. + 1 + <_> + + <_> + 7 4 9 3 -1. + <_> + 6 5 9 1 3. + 1 + <_> + + <_> + 13 3 7 4 -1. + <_> + 13 5 7 2 2. + <_> + + <_> + 4 0 9 5 -1. + <_> + 7 0 3 5 3. + <_> + + <_> + 10 6 6 6 -1. + <_> + 12 6 2 6 3. + <_> + + <_> + 0 6 12 4 -1. + <_> + 0 6 6 2 2. + <_> + 6 8 6 2 2. + <_> + + <_> + 10 11 9 6 -1. + <_> + 13 11 3 6 3. + <_> + + <_> + 2 6 16 8 -1. + <_> + 2 10 16 4 2. + <_> + + <_> + 17 0 2 10 -1. + <_> + 17 0 1 10 2. + 1 + <_> + + <_> + 5 0 10 2 -1. + <_> + 5 0 10 1 2. + 1 + <_> + + <_> + 9 11 13 3 -1. + <_> + 9 12 13 1 3. + <_> + + <_> + 0 11 13 3 -1. + <_> + 0 12 13 1 3. + <_> + + <_> + 18 6 4 12 -1. + <_> + 18 9 4 6 2. + <_> + + <_> + 6 4 9 7 -1. + <_> + 9 4 3 7 3. + <_> + + <_> + 11 9 6 7 -1. + <_> + 13 9 2 7 3. + <_> + + <_> + 5 9 6 7 -1. + <_> + 7 9 2 7 3. + <_> + + <_> + 1 13 20 5 -1. + <_> + 6 13 10 5 2. + <_> + + <_> + 7 9 8 6 -1. + <_> + 9 9 4 6 2. + <_> + + <_> + 5 5 12 4 -1. + <_> + 8 5 6 4 2. + <_> + + <_> + 1 11 20 6 -1. + <_> + 6 11 10 6 2. + <_> + + <_> + 1 8 20 7 -1. + <_> + 6 8 10 7 2. + <_> + + <_> + 2 9 18 6 -1. + <_> + 8 11 6 2 9. + <_> + + <_> + 8 13 9 4 -1. + <_> + 8 15 9 2 2. + <_> + + <_> + 1 12 9 6 -1. + <_> + 1 15 9 3 2. + <_> + + <_> + 9 2 8 6 -1. + <_> + 13 2 4 3 2. + <_> + 9 5 4 3 2. + <_> + + <_> + 0 5 22 5 -1. + <_> + 11 5 11 5 2. + <_> + + <_> + 2 0 18 18 -1. + <_> + 2 9 18 9 2. + <_> + + <_> + 6 7 3 8 -1. + <_> + 6 11 3 4 2. + <_> + + <_> + 11 12 8 6 -1. + <_> + 13 12 4 6 2. + <_> + + <_> + 3 8 6 8 -1. + <_> + 3 8 3 4 2. + <_> + 6 12 3 4 2. + <_> + + <_> + 11 6 7 4 -1. + <_> + 11 8 7 2 2. + <_> + + <_> + 9 2 4 6 -1. + <_> + 11 2 2 6 2. + <_> + + <_> + 3 14 16 4 -1. + <_> + 11 14 8 2 2. + <_> + 3 16 8 2 2. + <_> + + <_> + 5 14 6 4 -1. + <_> + 5 16 6 2 2. + <_> + + <_> + 9 5 4 6 -1. + <_> + 9 5 2 6 2. + <_> + + <_> + 5 12 12 6 -1. + <_> + 8 12 6 6 2. + <_> + + <_> + 7 14 8 4 -1. + <_> + 7 16 8 2 2. + <_> + + <_> + 1 3 18 3 -1. + <_> + 1 4 18 1 3. + <_> + + <_> + 8 3 14 3 -1. + <_> + 8 4 14 1 3. + <_> + + <_> + 1 0 14 4 -1. + <_> + 1 0 7 2 2. + <_> + 8 2 7 2 2. + <_> + + <_> + 10 10 12 3 -1. + <_> + 10 11 12 1 3. + <_> + + <_> + 1 10 12 3 -1. + <_> + 1 11 12 1 3. + <_> + + <_> + 10 7 8 3 -1. + <_> + 10 7 4 3 2. + <_> + + <_> + 11 0 6 6 -1. + <_> + 9 2 6 2 3. + 1 + <_> + + <_> + 17 0 2 10 -1. + <_> + 17 0 1 10 2. + 1 + <_> + + <_> + 4 7 8 3 -1. + <_> + 8 7 4 3 2. + <_> + + <_> + 13 0 8 6 -1. + <_> + 13 2 8 2 3. + <_> + + <_> + 1 0 8 6 -1. + <_> + 1 2 8 2 3. + <_> + + <_> + 17 0 2 10 -1. + <_> + 17 0 1 10 2. + 1 + <_> + + <_> + 5 0 10 2 -1. + <_> + 5 0 10 1 2. + 1 + <_> + + <_> + 10 6 6 4 -1. + <_> + 10 6 3 4 2. + <_> + + <_> + 0 4 14 3 -1. + <_> + 0 5 14 1 3. + <_> + + <_> + 3 3 16 10 -1. + <_> + 11 3 8 5 2. + <_> + 3 8 8 5 2. + <_> + + <_> + 1 5 12 3 -1. + <_> + 1 6 12 1 3. + <_> + + <_> + 9 6 13 4 -1. + <_> + 9 8 13 2 2. + <_> + + <_> + 7 5 8 6 -1. + <_> + 7 5 4 3 2. + <_> + 11 8 4 3 2. + <_> + + <_> + 13 3 4 11 -1. + <_> + 14 4 2 11 2. + 1 + <_> + + <_> + 9 2 11 2 -1. + <_> + 9 2 11 1 2. + 1 + <_> + + <_> + 5 13 12 4 -1. + <_> + 5 14 12 2 2. + <_> + + <_> + 0 9 16 4 -1. + <_> + 0 9 8 2 2. + <_> + 8 11 8 2 2. + <_> + + <_> + 7 10 9 7 -1. + <_> + 10 10 3 7 3. + <_> + + <_> + 10 7 5 6 -1. + <_> + 10 7 5 3 2. + 1 + <_> + + <_> + 11 5 10 3 -1. + <_> + 11 5 5 3 2. + 1 + <_> + + <_> + 2 13 12 5 -1. + <_> + 5 13 6 5 2. + <_> + + <_> + 17 9 4 7 -1. + <_> + 17 9 2 7 2. + <_> + + <_> + 0 6 12 3 -1. + <_> + 0 7 12 1 3. + <_> + + <_> + 18 6 2 10 -1. + <_> + 18 6 1 10 2. + 1 + <_> + + <_> + 1 14 8 3 -1. + <_> + 5 14 4 3 2. + <_> + + <_> + 6 11 12 3 -1. + <_> + 10 11 4 3 3. + <_> + + <_> + 0 14 8 3 -1. + <_> + 4 14 4 3 2. + <_> + + <_> + 5 11 16 3 -1. + <_> + 9 11 8 3 2. + <_> + + <_> + 1 9 4 7 -1. + <_> + 3 9 2 7 2. + <_> + + <_> + 6 12 10 6 -1. + <_> + 6 14 10 2 3. + <_> + + <_> + 0 16 12 2 -1. + <_> + 0 17 12 1 2. + <_> + + <_> + 12 5 4 12 -1. + <_> + 14 5 2 6 2. + <_> + 12 11 2 6 2. + <_> + + <_> + 6 11 6 6 -1. + <_> + 8 11 2 6 3. + <_> + + <_> + 4 16 15 2 -1. + <_> + 4 17 15 1 2. + <_> + + <_> + 5 0 12 9 -1. + <_> + 9 3 4 3 9. + <_> + + <_> + 8 0 6 9 -1. + <_> + 8 3 6 3 3. + <_> + + <_> + 1 0 3 13 -1. + <_> + 2 0 1 13 3. + <_> + + <_> + 10 1 6 4 -1. + <_> + 10 1 3 4 2. + <_> + + <_> + 8 1 6 9 -1. + <_> + 10 1 2 9 3. + <_> + + <_> + 8 3 6 6 -1. + <_> + 10 3 2 6 3. + <_> + + <_> + 3 5 11 2 -1. + <_> + 3 5 11 1 2. + 1 + <_> + + <_> + 9 5 6 6 -1. + <_> + 11 5 2 6 3. + <_> + + <_> + 6 4 6 10 -1. + <_> + 6 9 6 5 2. + <_> + + <_> + 11 2 3 12 -1. + <_> + 12 2 1 12 3. + <_> + + <_> + 8 2 3 12 -1. + <_> + 9 2 1 12 3. + <_> + + <_> + 18 9 4 9 -1. + <_> + 18 9 2 9 2. + <_> + + <_> + 1 5 6 6 -1. + <_> + 1 8 6 3 2. + <_> + + <_> + 10 6 6 6 -1. + <_> + 12 6 2 6 3. + <_> + + <_> + 10 2 2 12 -1. + <_> + 11 2 1 12 2. + <_> + + <_> + 11 0 5 6 -1. + <_> + 11 3 5 3 2. + <_> + + <_> + 6 0 5 6 -1. + <_> + 6 3 5 3 2. + <_> + + <_> + 13 9 5 8 -1. + <_> + 13 13 5 4 2. + <_> + + <_> + 0 9 20 2 -1. + <_> + 10 9 10 2 2. + <_> + + <_> + 14 7 3 10 -1. + <_> + 14 12 3 5 2. + <_> + + <_> + 11 5 11 2 -1. + <_> + 11 5 11 1 2. + 1 + <_> + + <_> + 14 7 3 10 -1. + <_> + 14 12 3 5 2. + <_> + + <_> + 5 13 12 2 -1. + <_> + 5 14 12 1 2. + <_> + + <_> + 11 8 4 9 -1. + <_> + 11 11 4 3 3. + <_> + + <_> + 1 8 12 6 -1. + <_> + 1 10 12 2 3. + <_> + + <_> + 16 8 3 8 -1. + <_> + 16 12 3 4 2. + <_> + + <_> + 3 8 3 8 -1. + <_> + 3 12 3 4 2. + <_> + + <_> + 11 8 4 9 -1. + <_> + 11 11 4 3 3. + <_> + + <_> + 7 8 4 9 -1. + <_> + 7 11 4 3 3. + <_> + + <_> + 7 3 15 12 -1. + <_> + 12 7 5 4 9. + <_> + + <_> + 4 10 14 4 -1. + <_> + 4 10 7 2 2. + <_> + 11 12 7 2 2. + <_> + + <_> + 9 10 10 6 -1. + <_> + 14 10 5 3 2. + <_> + 9 13 5 3 2. + <_> + + <_> + 3 10 10 6 -1. + <_> + 3 10 5 3 2. + <_> + 8 13 5 3 2. + <_> + + <_> + 16 7 6 6 -1. + <_> + 18 7 2 6 3. + <_> + + <_> + 3 5 14 2 -1. + <_> + 10 5 7 2 2. + <_> + + <_> + 18 2 4 12 -1. + <_> + 20 2 2 6 2. + <_> + 18 8 2 6 2. + <_> + + <_> + 3 14 12 4 -1. + <_> + 3 15 12 2 2. + <_> + + <_> + 7 6 9 6 -1. + <_> + 7 9 9 3 2. + <_> + + <_> + 1 14 6 4 -1. + <_> + 4 14 3 4 2. + <_> + + <_> + 12 5 5 12 -1. + <_> + 12 8 5 6 2. + <_> + + <_> + 5 0 3 17 -1. + <_> + 6 0 1 17 3. + <_> + + <_> + 16 7 6 6 -1. + <_> + 18 7 2 6 3. + <_> + + <_> + 0 7 6 6 -1. + <_> + 2 7 2 6 3. + <_> + + <_> + 14 0 3 18 -1. + <_> + 15 0 1 18 3. + <_> + + <_> + 0 5 5 10 -1. + <_> + 0 10 5 5 2. + <_> + + <_> + 5 12 12 4 -1. + <_> + 5 13 12 2 2. + <_> + + <_> + 7 9 8 6 -1. + <_> + 7 11 8 2 3. + <_> + + <_> + 2 10 15 4 -1. + <_> + 2 12 15 2 2. + <_> + + <_> + 5 15 12 3 -1. + <_> + 5 15 6 3 2. + <_> + + <_> + 7 4 3 14 -1. + <_> + 8 4 1 14 3. + <_> + + <_> + 7 15 8 3 -1. + <_> + 7 15 4 3 2. + <_> + + <_> + 1 2 8 6 -1. + <_> + 1 2 4 3 2. + <_> + 5 5 4 3 2. + <_> + + <_> + 14 9 6 8 -1. + <_> + 17 9 3 4 2. + <_> + 14 13 3 4 2. + <_> + + <_> + 0 0 6 8 -1. + <_> + 0 0 3 4 2. + <_> + 3 4 3 4 2. + <_> + + <_> + 14 9 6 8 -1. + <_> + 17 9 3 4 2. + <_> + 14 13 3 4 2. + <_> + + <_> + 2 9 6 8 -1. + <_> + 2 9 3 4 2. + <_> + 5 13 3 4 2. + <_> + + <_> + 14 10 6 8 -1. + <_> + 17 10 3 4 2. + <_> + 14 14 3 4 2. + <_> + + <_> + 2 10 6 8 -1. + <_> + 2 10 3 4 2. + <_> + 5 14 3 4 2. + <_> + + <_> + 13 1 6 8 -1. + <_> + 16 1 3 4 2. + <_> + 13 5 3 4 2. + <_> + + <_> + 3 3 12 3 -1. + <_> + 3 4 12 1 3. + <_> + + <_> + 13 1 6 8 -1. + <_> + 16 1 3 4 2. + <_> + 13 5 3 4 2. + <_> + + <_> + 3 1 6 8 -1. + <_> + 3 1 3 4 2. + <_> + 6 5 3 4 2. + <_> + + <_> + 3 3 16 3 -1. + <_> + 3 4 16 1 3. + <_> + + <_> + 7 13 6 4 -1. + <_> + 7 15 6 2 2. + <_> + + <_> + 10 14 6 4 -1. + <_> + 10 16 6 2 2. + <_> + + <_> + 2 10 15 3 -1. + <_> + 2 11 15 1 3. + <_> + + <_> + 8 12 8 6 -1. + <_> + 10 12 4 6 2. + <_> + + <_> + 2 4 13 4 -1. + <_> + 2 5 13 2 2. + <_> + + <_> + 9 9 12 3 -1. + <_> + 9 10 12 1 3. + <_> + + <_> + 3 13 16 4 -1. + <_> + 3 13 8 2 2. + <_> + 11 15 8 2 2. + <_> + + <_> + 8 12 8 6 -1. + <_> + 10 12 4 6 2. + <_> + + <_> + 6 12 8 6 -1. + <_> + 8 12 4 6 2. + <_> + + <_> + 9 4 13 2 -1. + <_> + 9 5 13 1 2. + <_> + + <_> + 7 3 8 12 -1. + <_> + 7 9 8 6 2. + <_> + + <_> + 3 6 17 3 -1. + <_> + 3 7 17 1 3. + <_> + + <_> + 3 0 14 4 -1. + <_> + 3 0 7 2 2. + <_> + 10 2 7 2 2. + <_> + + <_> + 11 4 6 5 -1. + <_> + 11 4 3 5 2. + 1 + <_> + + <_> + 11 4 5 6 -1. + <_> + 11 4 5 3 2. + 1 + <_> + + <_> + 10 5 4 6 -1. + <_> + 10 5 2 6 2. + <_> + + <_> + 4 12 12 3 -1. + <_> + 8 12 4 3 3. + <_> + + <_> + 8 6 8 7 -1. + <_> + 8 6 4 7 2. + <_> + + <_> + 5 0 8 12 -1. + <_> + 5 0 4 6 2. + <_> + 9 6 4 6 2. + <_> + + <_> + 7 0 12 4 -1. + <_> + 13 0 6 2 2. + <_> + 7 2 6 2 2. + <_> + + <_> + 1 4 6 5 -1. + <_> + 4 4 3 5 2. + <_> + + <_> + 15 0 7 4 -1. + <_> + 15 0 7 2 2. + 1 + <_> + + <_> + 5 2 8 6 -1. + <_> + 5 2 4 3 2. + <_> + 9 5 4 3 2. + <_> + + <_> + 4 2 15 3 -1. + <_> + 4 3 15 1 3. + <_> + + <_> + 4 1 14 3 -1. + <_> + 4 2 14 1 3. + <_> + + <_> + 15 5 4 6 -1. + <_> + 15 8 4 3 2. + <_> + + <_> + 0 1 17 2 -1. + <_> + 0 2 17 1 2. + <_> + + <_> + 15 5 4 6 -1. + <_> + 15 8 4 3 2. + <_> + + <_> + 3 5 4 6 -1. + <_> + 3 8 4 3 2. + <_> + + <_> + 3 0 18 3 -1. + <_> + 3 1 18 1 3. + <_> + + <_> + 7 1 6 4 -1. + <_> + 10 1 3 4 2. + <_> + + <_> + 0 11 22 7 -1. + <_> + 0 11 11 7 2. + <_> + + <_> + 3 5 4 12 -1. + <_> + 3 5 2 6 2. + <_> + 5 11 2 6 2. + <_> + + <_> + 14 7 3 10 -1. + <_> + 14 12 3 5 2. + <_> + + <_> + 4 11 14 4 -1. + <_> + 4 11 7 2 2. + <_> + 11 13 7 2 2. + <_> + + <_> + 7 11 8 6 -1. + <_> + 11 11 4 3 2. + <_> + 7 14 4 3 2. + <_> + + <_> + 3 5 3 13 -1. + <_> + 4 5 1 13 3. + <_> + + <_> + 17 1 4 12 -1. + <_> + 19 1 2 6 2. + <_> + 17 7 2 6 2. + <_> + + <_> + 1 1 4 12 -1. + <_> + 1 1 2 6 2. + <_> + 3 7 2 6 2. + <_> + + <_> + 7 0 13 16 -1. + <_> + 7 4 13 8 2. + <_> + + <_> + 1 4 13 2 -1. + <_> + 1 5 13 1 2. + <_> + + <_> + 9 14 6 4 -1. + <_> + 9 16 6 2 2. + <_> + + <_> + 2 4 17 3 -1. + <_> + 2 5 17 1 3. + <_> + + <_> + 14 0 3 10 -1. + <_> + 15 1 1 10 3. + 1 + <_> + + <_> + 7 0 8 3 -1. + <_> + 6 1 8 1 3. + 1 + <_> + + <_> + 14 0 3 10 -1. + <_> + 15 1 1 10 3. + 1 + <_> + + <_> + 8 0 10 3 -1. + <_> + 7 1 10 1 3. + 1 + <_> + + <_> + 11 1 2 7 -1. + <_> + 11 1 1 7 2. + 1 + <_> + + <_> + 8 0 3 14 -1. + <_> + 9 0 1 14 3. + <_> + + <_> + 11 1 2 7 -1. + <_> + 11 1 1 7 2. + 1 + <_> + + <_> + 11 1 7 2 -1. + <_> + 11 1 7 1 2. + 1 + <_> + + <_> + 7 9 9 8 -1. + <_> + 10 9 3 8 3. + <_> + + <_> + 1 7 4 8 -1. + <_> + 3 7 2 8 2. + <_> + + <_> + 17 11 4 6 -1. + <_> + 17 11 2 6 2. + <_> + + <_> + 8 12 6 6 -1. + <_> + 10 12 2 6 3. + <_> + + <_> + 11 0 3 6 -1. + <_> + 12 1 1 6 3. + 1 + <_> + + <_> + 11 0 6 3 -1. + <_> + 10 1 6 1 3. + 1 + <_> + + <_> + 9 14 9 4 -1. + <_> + 12 14 3 4 3. + <_> + + <_> + 8 2 6 4 -1. + <_> + 8 2 6 2 2. + 1 + <_> + + <_> + 10 10 4 6 -1. + <_> + 10 10 2 6 2. + <_> + + <_> + 1 8 18 2 -1. + <_> + 1 9 18 1 2. + <_> + + <_> + 8 8 14 3 -1. + <_> + 8 9 14 1 3. + <_> + + <_> + 3 15 14 3 -1. + <_> + 10 15 7 3 2. + <_> + + <_> + 8 8 14 3 -1. + <_> + 8 9 14 1 3. + <_> + + <_> + 4 14 9 4 -1. + <_> + 7 14 3 4 3. + <_> + + <_> + 10 6 4 8 -1. + <_> + 10 6 2 8 2. + 1 + <_> + + <_> + 2 11 18 3 -1. + <_> + 8 11 6 3 3. + <_> + + <_> + 10 0 12 4 -1. + <_> + 10 0 12 2 2. + 1 + <_> + + <_> + 6 6 16 4 -1. + <_> + 14 6 8 2 2. + <_> + 6 8 8 2 2. + <_> + + <_> + 6 3 4 14 -1. + <_> + 7 3 2 14 2. + <_> + + <_> + 12 12 6 6 -1. + <_> + 14 12 2 6 3. + <_> + + <_> + 4 12 6 6 -1. + <_> + 6 12 2 6 3. + <_> + + <_> + 14 8 3 8 -1. + <_> + 14 12 3 4 2. + <_> + + <_> + 0 6 16 4 -1. + <_> + 0 6 8 2 2. + <_> + 8 8 8 2 2. + <_> + + <_> + 9 10 5 6 -1. + <_> + 9 13 5 3 2. + <_> + + <_> + 7 5 6 12 -1. + <_> + 7 5 3 6 2. + <_> + 10 11 3 6 2. + <_> + + <_> + 1 5 21 9 -1. + <_> + 8 8 7 3 9. + <_> + + <_> + 8 6 3 12 -1. + <_> + 9 6 1 12 3. + <_> + + <_> + 11 3 3 11 -1. + <_> + 12 4 1 11 3. + 1 + <_> + + <_> + 11 5 9 3 -1. + <_> + 10 6 9 1 3. + 1 + <_> + + <_> + 12 11 6 6 -1. + <_> + 12 13 6 2 3. + <_> + + <_> + 0 1 9 9 -1. + <_> + 3 1 3 9 3. + <_> + + <_> + 6 0 12 12 -1. + <_> + 9 0 6 12 2. + <_> + + <_> + 7 14 6 4 -1. + <_> + 10 14 3 4 2. + <_> + + <_> + 8 7 13 3 -1. + <_> + 8 8 13 1 3. + <_> + + <_> + 2 13 12 4 -1. + <_> + 5 13 6 4 2. + <_> + + <_> + 15 3 2 13 -1. + <_> + 15 3 1 13 2. + 1 + <_> + + <_> + 9 5 11 2 -1. + <_> + 9 5 11 1 2. + 1 + <_> + + <_> + 13 2 2 16 -1. + <_> + 13 10 2 8 2. + <_> + + <_> + 7 2 2 16 -1. + <_> + 7 10 2 8 2. + <_> + + <_> + 14 0 7 6 -1. + <_> + 12 2 7 2 3. + 1 + <_> + + <_> + 7 3 6 12 -1. + <_> + 7 3 3 6 2. + <_> + 10 9 3 6 2. + <_> + + <_> + 9 14 8 4 -1. + <_> + 9 16 8 2 2. + <_> + + <_> + 11 3 11 3 -1. + <_> + 10 4 11 1 3. + 1 + <_> + + <_> + 11 1 4 6 -1. + <_> + 12 2 2 6 2. + 1 + <_> + + <_> + 11 1 6 4 -1. + <_> + 10 2 6 2 2. + 1 + <_> + + <_> + 10 10 6 8 -1. + <_> + 12 10 2 8 3. + <_> + + <_> + 2 4 12 4 -1. + <_> + 2 4 6 2 2. + <_> + 8 6 6 2 2. + <_> + + <_> + 14 1 3 10 -1. + <_> + 15 2 1 10 3. + 1 + <_> + + <_> + 0 7 22 7 -1. + <_> + 11 7 11 7 2. + <_> + + <_> + 8 2 14 3 -1. + <_> + 8 3 14 1 3. + <_> + + <_> + 0 2 14 3 -1. + <_> + 0 3 14 1 3. + <_> + + <_> + 14 1 3 10 -1. + <_> + 15 2 1 10 3. + 1 + <_> + + <_> + 8 1 10 3 -1. + <_> + 7 2 10 1 3. + 1 + <_> + + <_> + 12 3 3 10 -1. + <_> + 13 4 1 10 3. + 1 + <_> + + <_> + 11 4 10 3 -1. + <_> + 10 5 10 1 3. + 1 + <_> + + <_> + 12 1 7 6 -1. + <_> + 12 3 7 2 3. + <_> + + <_> + 0 3 14 3 -1. + <_> + 0 4 14 1 3. + <_> + + <_> + 8 0 12 4 -1. + <_> + 14 0 6 2 2. + <_> + 8 2 6 2 2. + <_> + + <_> + 2 0 12 4 -1. + <_> + 2 0 6 2 2. + <_> + 8 2 6 2 2. + <_> + + <_> + 8 4 12 3 -1. + <_> + 8 5 12 1 3. + <_> + + <_> + 0 1 14 2 -1. + <_> + 7 1 7 2 2. + <_> + + <_> + 5 0 15 11 -1. + <_> + 10 0 5 11 3. + <_> + + <_> + 2 0 15 11 -1. + <_> + 7 0 5 11 3. + <_> + + <_> + 11 6 6 12 -1. + <_> + 14 6 3 6 2. + <_> + 11 12 3 6 2. + <_> + + <_> + 7 5 6 6 -1. + <_> + 9 5 2 6 3. + <_> + + <_> + 14 13 6 5 -1. + <_> + 14 13 3 5 2. + <_> + + <_> + 6 10 6 8 -1. + <_> + 8 10 2 8 3. + <_> + + <_> + 10 10 6 6 -1. + <_> + 12 10 2 6 3. + <_> + + <_> + 6 10 6 6 -1. + <_> + 8 10 2 6 3. + <_> + + <_> + 6 11 14 3 -1. + <_> + 6 11 7 3 2. + <_> + + <_> + 3 1 7 6 -1. + <_> + 3 3 7 2 3. + <_> + + <_> + 11 8 6 10 -1. + <_> + 14 8 3 5 2. + <_> + 11 13 3 5 2. + <_> + + <_> + 8 5 3 13 -1. + <_> + 9 5 1 13 3. + <_> + + <_> + 11 0 6 4 -1. + <_> + 11 0 3 4 2. + 1 + <_> + + <_> + 11 0 4 6 -1. + <_> + 11 0 4 3 2. + 1 + <_> + + <_> + 14 3 2 12 -1. + <_> + 14 3 2 6 2. + 1 + <_> + + <_> + 5 4 10 7 -1. + <_> + 10 4 5 7 2. + <_> + + <_> + 8 9 6 6 -1. + <_> + 10 9 2 6 3. + <_> + + <_> + 0 8 12 9 -1. + <_> + 4 11 4 3 9. + <_> + + <_> + 13 12 4 6 -1. + <_> + 13 15 4 3 2. + <_> + + <_> + 5 12 5 6 -1. + <_> + 5 15 5 3 2. + <_> + + <_> + 12 4 2 11 -1. + <_> + 12 4 1 11 2. + 1 + <_> + + <_> + 9 4 11 2 -1. + <_> + 9 4 11 1 2. + 1 + <_> + + <_> + 11 8 6 10 -1. + <_> + 14 8 3 5 2. + <_> + 11 13 3 5 2. + <_> + + <_> + 5 8 6 10 -1. + <_> + 5 8 3 5 2. + <_> + 8 13 3 5 2. + <_> + + <_> + 11 7 6 10 -1. + <_> + 14 7 3 5 2. + <_> + 11 12 3 5 2. + <_> + + <_> + 2 1 18 3 -1. + <_> + 2 2 18 1 3. + <_> + + <_> + 16 4 6 7 -1. + <_> + 16 4 3 7 2. + <_> + + <_> + 5 7 6 10 -1. + <_> + 5 7 3 5 2. + <_> + 8 12 3 5 2. + <_> + + <_> + 12 0 3 14 -1. + <_> + 12 7 3 7 2. + <_> + + <_> + 7 10 8 7 -1. + <_> + 11 10 4 7 2. + <_> + + <_> + 8 0 12 3 -1. + <_> + 8 1 12 1 3. + <_> + + <_> + 3 0 13 4 -1. + <_> + 3 1 13 2 2. + <_> + + <_> + 7 11 12 4 -1. + <_> + 7 12 12 2 2. + <_> + + <_> + 0 0 8 18 -1. + <_> + 4 0 4 18 2. + <_> + + <_> + 14 13 6 5 -1. + <_> + 14 13 3 5 2. + <_> + + <_> + 0 5 22 4 -1. + <_> + 11 5 11 4 2. + <_> + + <_> + 11 2 10 9 -1. + <_> + 11 5 10 3 3. + <_> + + <_> + 1 2 10 9 -1. + <_> + 1 5 10 3 3. + <_> + + <_> + 18 6 2 12 -1. + <_> + 18 6 1 12 2. + <_> + + <_> + 2 6 2 12 -1. + <_> + 3 6 1 12 2. + <_> + + <_> + 15 6 4 12 -1. + <_> + 15 9 4 6 2. + <_> + + <_> + 3 6 4 12 -1. + <_> + 3 9 4 6 2. + <_> + + <_> + 14 13 6 5 -1. + <_> + 14 13 3 5 2. + <_> + + <_> + 2 13 6 5 -1. + <_> + 5 13 3 5 2. + <_> + + <_> + 8 12 12 5 -1. + <_> + 11 12 6 5 2. + <_> + + <_> + 2 12 12 5 -1. + <_> + 5 12 6 5 2. + <_> + + <_> + 12 12 6 6 -1. + <_> + 12 14 6 2 3. + <_> + + <_> + 0 10 16 8 -1. + <_> + 4 10 8 8 2. + <_> + + <_> + 13 1 8 8 -1. + <_> + 15 1 4 8 2. + <_> + + <_> + 1 1 8 8 -1. + <_> + 3 1 4 8 2. + <_> + + <_> + 14 8 3 8 -1. + <_> + 14 12 3 4 2. + <_> + + <_> + 10 4 7 6 -1. + <_> + 10 4 7 3 2. + 1 + <_> + + <_> + 9 10 4 8 -1. + <_> + 9 14 4 4 2. + <_> + + <_> + 5 8 3 8 -1. + <_> + 5 12 3 4 2. + <_> + + <_> + 6 9 4 9 -1. + <_> + 6 12 4 3 3. + <_> + + <_> + 6 3 16 4 -1. + <_> + 14 3 8 2 2. + <_> + 6 5 8 2 2. + <_> + + <_> + 1 3 20 4 -1. + <_> + 1 3 10 2 2. + <_> + 11 5 10 2 2. + <_> + + <_> + 9 5 6 12 -1. + <_> + 12 5 3 6 2. + <_> + 9 11 3 6 2. + <_> + + <_> + 1 6 2 12 -1. + <_> + 2 6 1 12 2. + <_> + + <_> + 19 0 2 16 -1. + <_> + 19 0 1 16 2. + <_> + + <_> + 1 0 2 16 -1. + <_> + 2 0 1 16 2. + <_> + + <_> + 13 5 5 9 -1. + <_> + 13 8 5 3 3. + <_> + + <_> + 5 16 12 2 -1. + <_> + 5 17 12 1 2. + <_> + + <_> + 5 14 12 4 -1. + <_> + 5 15 12 2 2. + <_> + + <_> + 5 3 12 9 -1. + <_> + 9 6 4 3 9. + <_> + + <_> + 7 5 13 2 -1. + <_> + 7 6 13 1 2. + <_> + + <_> + 8 1 12 2 -1. + <_> + 8 1 12 1 2. + 1 + <_> + + <_> + 0 4 22 8 -1. + <_> + 11 4 11 4 2. + <_> + 0 8 11 4 2. + <_> + + <_> + 2 3 6 4 -1. + <_> + 5 3 3 4 2. + <_> + + <_> + 7 11 15 3 -1. + <_> + 7 12 15 1 3. + <_> + + <_> + 5 7 6 7 -1. + <_> + 8 7 3 7 2. + <_> + + <_> + 7 12 12 4 -1. + <_> + 13 12 6 2 2. + <_> + 7 14 6 2 2. + <_> + + <_> + 0 11 16 2 -1. + <_> + 8 11 8 2 2. + <_> + + <_> + 18 3 4 10 -1. + <_> + 18 3 4 5 2. + 1 + <_> + + <_> + 2 2 17 3 -1. + <_> + 2 3 17 1 3. + <_> + + <_> + 10 14 12 4 -1. + <_> + 16 14 6 2 2. + <_> + 10 16 6 2 2. + <_> + + <_> + 1 9 11 6 -1. + <_> + 1 11 11 2 3. + <_> + + <_> + 4 9 18 3 -1. + <_> + 4 10 18 1 3. + <_> + + <_> + 0 9 18 3 -1. + <_> + 0 10 18 1 3. + <_> + + <_> + 11 5 11 12 -1. + <_> + 11 11 11 6 2. + <_> + + <_> + 5 12 6 6 -1. + <_> + 5 14 6 2 3. + <_> + + <_> + 14 10 6 8 -1. + <_> + 17 10 3 4 2. + <_> + 14 14 3 4 2. + <_> + + <_> + 0 5 11 12 -1. + <_> + 0 11 11 6 2. + <_> + + <_> + 15 3 2 12 -1. + <_> + 15 3 2 6 2. + 1 + <_> + + <_> + 3 0 12 4 -1. + <_> + 3 0 6 2 2. + <_> + 9 2 6 2 2. + <_> + + <_> + 14 10 6 8 -1. + <_> + 17 10 3 4 2. + <_> + 14 14 3 4 2. + <_> + + <_> + 5 12 8 6 -1. + <_> + 5 12 4 3 2. + <_> + 9 15 4 3 2. + <_> + + <_> + 8 11 10 5 -1. + <_> + 8 11 5 5 2. + <_> + + <_> + 4 11 10 5 -1. + <_> + 9 11 5 5 2. + <_> + + <_> + 6 6 12 12 -1. + <_> + 12 6 6 6 2. + <_> + 6 12 6 6 2. + <_> + + <_> + 7 10 6 8 -1. + <_> + 7 12 6 4 2. + <_> + + <_> + 7 8 15 10 -1. + <_> + 7 13 15 5 2. + <_> + + <_> + 0 0 22 4 -1. + <_> + 0 0 11 2 2. + <_> + 11 2 11 2 2. + <_> + + <_> + 10 3 12 3 -1. + <_> + 10 4 12 1 3. + <_> + + <_> + 0 3 13 3 -1. + <_> + 0 4 13 1 3. + <_> + + <_> + 9 3 4 12 -1. + <_> + 9 6 4 6 2. + <_> + + <_> + 4 5 9 6 -1. + <_> + 4 8 9 3 2. + <_> + + <_> + 11 6 2 9 -1. + <_> + 11 6 1 9 2. + 1 + <_> + + <_> + 9 2 4 8 -1. + <_> + 9 6 4 4 2. + <_> + + <_> + 7 0 8 10 -1. + <_> + 7 5 8 5 2. + <_> + + <_> + 11 5 9 2 -1. + <_> + 11 5 9 1 2. + 1 + <_> + + <_> + 17 0 3 11 -1. + <_> + 18 1 1 11 3. + 1 + <_> + + <_> + 5 0 11 3 -1. + <_> + 4 1 11 1 3. + 1 + <_> + + <_> + 9 6 4 7 -1. + <_> + 9 6 2 7 2. + <_> + + <_> + 3 11 6 6 -1. + <_> + 3 13 6 2 3. + <_> + + <_> + 6 10 16 8 -1. + <_> + 6 12 16 4 2. + <_> + + <_> + 11 6 9 3 -1. + <_> + 10 7 9 1 3. + 1 + <_> + + <_> + 12 11 8 6 -1. + <_> + 12 13 8 2 3. + <_> + + <_> + 0 10 16 8 -1. + <_> + 0 12 16 4 2. + <_> + + <_> + 10 14 12 4 -1. + <_> + 16 14 6 2 2. + <_> + 10 16 6 2 2. + <_> + + <_> + 2 11 8 6 -1. + <_> + 2 13 8 2 3. + <_> + + <_> + 6 11 16 4 -1. + <_> + 14 11 8 2 2. + <_> + 6 13 8 2 2. + <_> + + <_> + 0 11 22 6 -1. + <_> + 11 11 11 6 2. + <_> + + <_> + 14 10 6 8 -1. + <_> + 17 10 3 4 2. + <_> + 14 14 3 4 2. + <_> + + <_> + 2 10 6 8 -1. + <_> + 2 10 3 4 2. + <_> + 5 14 3 4 2. + <_> + + <_> + 6 4 15 12 -1. + <_> + 11 8 5 4 9. + <_> + + <_> + 0 4 18 12 -1. + <_> + 6 8 6 4 9. + <_> + + <_> + 15 7 2 8 -1. + <_> + 15 7 1 8 2. + 1 + <_> + + <_> + 3 3 10 3 -1. + <_> + 2 4 10 1 3. + 1 + <_> + + <_> + 4 2 14 3 -1. + <_> + 4 3 14 1 3. + <_> + + <_> + 10 8 8 2 -1. + <_> + 10 8 8 1 2. + 1 + <_> + + <_> + 15 5 4 7 -1. + <_> + 15 5 2 7 2. + 1 + <_> + + <_> + 3 6 5 6 -1. + <_> + 3 9 5 3 2. + <_> + + <_> + 14 1 8 6 -1. + <_> + 18 1 4 3 2. + <_> + 14 4 4 3 2. + <_> + + <_> + 0 1 8 6 -1. + <_> + 0 1 4 3 2. + <_> + 4 4 4 3 2. + <_> + + <_> + 17 0 4 12 -1. + <_> + 18 0 2 12 2. + <_> + + <_> + 1 0 4 12 -1. + <_> + 2 0 2 12 2. + <_> + + <_> + 9 16 12 2 -1. + <_> + 9 17 12 1 2. + <_> + + <_> + 1 16 12 2 -1. + <_> + 1 17 12 1 2. + <_> + + <_> + 10 15 12 3 -1. + <_> + 10 16 12 1 3. + <_> + + <_> + 0 15 12 3 -1. + <_> + 0 16 12 1 3. + <_> + + <_> + 10 14 12 4 -1. + <_> + 16 14 6 2 2. + <_> + 10 16 6 2 2. + <_> + + <_> + 0 14 12 4 -1. + <_> + 0 14 6 2 2. + <_> + 6 16 6 2 2. + <_> + + <_> + 9 11 12 4 -1. + <_> + 15 11 6 2 2. + <_> + 9 13 6 2 2. + <_> + + <_> + 0 11 16 4 -1. + <_> + 0 11 8 2 2. + <_> + 8 13 8 2 2. + <_> + + <_> + 8 12 9 6 -1. + <_> + 8 14 9 2 3. + <_> + + <_> + 5 12 9 6 -1. + <_> + 5 14 9 2 3. + <_> + + <_> + 4 5 16 2 -1. + <_> + 4 5 8 2 2. + <_> + + <_> + 1 10 10 8 -1. + <_> + 1 10 5 4 2. + <_> + 6 14 5 4 2. + <_> + + <_> + 16 2 5 9 -1. + <_> + 13 5 5 3 3. + 1 + <_> + + <_> + 4 4 4 6 -1. + <_> + 6 4 2 6 2. + <_> + + <_> + 9 2 9 7 -1. + <_> + 12 2 3 7 3. + <_> + + <_> + 4 2 9 7 -1. + <_> + 7 2 3 7 3. + <_> + + <_> + 16 2 5 9 -1. + <_> + 13 5 5 3 3. + 1 + <_> + + <_> + 6 2 9 5 -1. + <_> + 9 5 3 5 3. + 1 + <_> + + <_> + 5 12 14 6 -1. + <_> + 5 14 14 2 3. + <_> + + <_> + 6 4 4 12 -1. + <_> + 6 4 2 6 2. + <_> + 8 10 2 6 2. + <_> + + <_> + 9 4 10 8 -1. + <_> + 9 4 5 8 2. + <_> + + <_> + 7 5 6 8 -1. + <_> + 7 5 3 4 2. + <_> + 10 9 3 4 2. + <_> + + <_> + 8 7 6 8 -1. + <_> + 11 7 3 4 2. + <_> + 8 11 3 4 2. + <_> + + <_> + 2 4 11 2 -1. + <_> + 2 4 11 1 2. + 1 + <_> + + <_> + 16 0 3 13 -1. + <_> + 17 0 1 13 3. + <_> + + <_> + 2 0 18 3 -1. + <_> + 2 1 18 1 3. + <_> + + <_> + 15 8 6 4 -1. + <_> + 15 8 3 4 2. + <_> + + <_> + 2 0 13 3 -1. + <_> + 2 1 13 1 3. + <_> + + <_> + 4 4 18 4 -1. + <_> + 4 6 18 2 2. + <_> + + <_> + 3 3 10 9 -1. + <_> + 8 3 5 9 2. + <_> + + <_> + 2 7 18 6 -1. + <_> + 8 9 6 2 9. + <_> + + <_> + 10 4 11 2 -1. + <_> + 10 4 11 1 2. + 1 + <_> + + <_> + 14 6 6 12 -1. + <_> + 17 6 3 6 2. + <_> + 14 12 3 6 2. + <_> + + <_> + 2 6 6 12 -1. + <_> + 2 6 3 6 2. + <_> + 5 12 3 6 2. + <_> + + <_> + 3 4 16 6 -1. + <_> + 3 6 16 2 3. + <_> + + <_> + 1 11 16 3 -1. + <_> + 5 11 8 3 2. + <_> + + <_> + 12 10 8 3 -1. + <_> + 12 10 4 3 2. + <_> + + <_> + 0 9 17 9 -1. + <_> + 0 12 17 3 3. + <_> + + <_> + 8 4 6 10 -1. + <_> + 11 4 3 5 2. + <_> + 8 9 3 5 2. + <_> + + <_> + 2 4 16 8 -1. + <_> + 2 4 8 4 2. + <_> + 10 8 8 4 2. + <_> + + <_> + 9 6 12 4 -1. + <_> + 15 6 6 2 2. + <_> + 9 8 6 2 2. + <_> + + <_> + 9 3 4 6 -1. + <_> + 9 6 4 3 2. + <_> + + <_> + 15 5 7 4 -1. + <_> + 15 5 7 2 2. + 1 + <_> + + <_> + 0 6 18 6 -1. + <_> + 0 6 9 3 2. + <_> + 9 9 9 3 2. + <_> + + <_> + 4 2 15 3 -1. + <_> + 4 3 15 1 3. + <_> + + <_> + 2 0 6 6 -1. + <_> + 5 0 3 6 2. + <_> + + <_> + 13 4 8 6 -1. + <_> + 17 4 4 3 2. + <_> + 13 7 4 3 2. + <_> + + <_> + 4 2 13 6 -1. + <_> + 4 4 13 2 3. + <_> + + <_> + 9 8 12 3 -1. + <_> + 9 9 12 1 3. + <_> + + <_> + 1 8 16 3 -1. + <_> + 1 9 16 1 3. + <_> + + <_> + 11 4 5 8 -1. + <_> + 11 8 5 4 2. + <_> + + <_> + 3 4 11 2 -1. + <_> + 3 4 11 1 2. + 1 + <_> + + <_> + 10 7 12 3 -1. + <_> + 10 8 12 1 3. + <_> + + <_> + 9 3 7 8 -1. + <_> + 9 3 7 4 2. + 1 + <_> + + <_> + 13 2 2 12 -1. + <_> + 13 2 2 6 2. + 1 + <_> + + <_> + 0 9 12 4 -1. + <_> + 0 9 6 2 2. + <_> + 6 11 6 2 2. + <_> + + <_> + 11 7 8 6 -1. + <_> + 13 7 4 6 2. + <_> + + <_> + 0 8 6 6 -1. + <_> + 2 8 2 6 3. + <_> + + <_> + 11 7 8 6 -1. + <_> + 13 7 4 6 2. + <_> + + <_> + 3 7 8 6 -1. + <_> + 5 7 4 6 2. + <_> + + <_> + 10 6 6 4 -1. + <_> + 10 6 3 4 2. + <_> + + <_> + 4 8 12 10 -1. + <_> + 4 8 6 5 2. + <_> + 10 13 6 5 2. + <_> + + <_> + 15 7 6 10 -1. + <_> + 17 7 2 10 3. + <_> + + <_> + 6 14 6 4 -1. + <_> + 9 14 3 4 2. + <_> + + <_> + 8 13 10 4 -1. + <_> + 8 13 5 4 2. + <_> + + <_> + 2 0 4 18 -1. + <_> + 4 0 2 18 2. + <_> + + <_> + 11 0 8 10 -1. + <_> + 11 0 8 5 2. + 1 + <_> + + <_> + 0 7 12 3 -1. + <_> + 0 8 12 1 3. + <_> + + <_> + 17 0 2 10 -1. + <_> + 17 0 1 10 2. + 1 + <_> + + <_> + 5 6 6 4 -1. + <_> + 5 8 6 2 2. + <_> + + <_> + 15 10 7 6 -1. + <_> + 15 12 7 2 3. + <_> + + <_> + 0 10 7 6 -1. + <_> + 0 12 7 2 3. + <_> + + <_> + 13 12 6 6 -1. + <_> + 15 12 2 6 3. + <_> + + <_> + 1 11 20 7 -1. + <_> + 11 11 10 7 2. + <_> + + <_> + 13 5 4 9 -1. + <_> + 13 8 4 3 3. + <_> + + <_> + 2 12 8 6 -1. + <_> + 2 12 4 3 2. + <_> + 6 15 4 3 2. + <_> + + <_> + 9 14 6 4 -1. + <_> + 9 16 6 2 2. + <_> + + <_> + 7 12 8 6 -1. + <_> + 7 12 4 3 2. + <_> + 11 15 4 3 2. + <_> + + <_> + 6 1 12 14 -1. + <_> + 12 1 6 7 2. + <_> + 6 8 6 7 2. + <_> + + <_> + 5 5 4 9 -1. + <_> + 5 8 4 3 3. + <_> + + <_> + 5 13 12 4 -1. + <_> + 11 13 6 2 2. + <_> + 5 15 6 2 2. + <_> + + <_> + 9 7 8 3 -1. + <_> + 8 8 8 1 3. + 1 + <_> + + <_> + 7 5 8 10 -1. + <_> + 7 10 8 5 2. + <_> + + <_> + 7 1 8 3 -1. + <_> + 6 2 8 1 3. + 1 + <_> + + <_> + 10 14 12 3 -1. + <_> + 10 15 12 1 3. + <_> + + <_> + 0 6 18 12 -1. + <_> + 0 12 18 6 2. + <_> + + <_> + 9 8 6 6 -1. + <_> + 9 11 6 3 2. + <_> + + <_> + 3 2 4 12 -1. + <_> + 3 2 2 6 2. + <_> + 5 8 2 6 2. + <_> + + <_> + 13 2 2 12 -1. + <_> + 13 2 2 6 2. + 1 + <_> + + <_> + 2 4 6 8 -1. + <_> + 2 4 3 4 2. + <_> + 5 8 3 4 2. + <_> + + <_> + 14 10 4 6 -1. + <_> + 14 10 2 6 2. + <_> + + <_> + 0 0 2 12 -1. + <_> + 0 6 2 6 2. + <_> + + <_> + 13 2 2 12 -1. + <_> + 13 2 2 6 2. + 1 + <_> + + <_> + 9 2 12 2 -1. + <_> + 9 2 6 2 2. + 1 + <_> + + <_> + 10 9 12 4 -1. + <_> + 16 9 6 2 2. + <_> + 10 11 6 2 2. + <_> + + <_> + 0 9 12 4 -1. + <_> + 0 9 6 2 2. + <_> + 6 11 6 2 2. + <_> + + <_> + 17 9 4 9 -1. + <_> + 17 12 4 3 3. + <_> + + <_> + 1 9 10 6 -1. + <_> + 1 9 5 3 2. + <_> + 6 12 5 3 2. + <_> + + <_> + 8 12 9 4 -1. + <_> + 8 14 9 2 2. + <_> + + <_> + 2 8 6 10 -1. + <_> + 2 8 3 5 2. + <_> + 5 13 3 5 2. + <_> + + <_> + 7 10 12 6 -1. + <_> + 10 10 6 6 2. + <_> + + <_> + 3 10 12 6 -1. + <_> + 6 10 6 6 2. + <_> + + <_> + 20 0 2 12 -1. + <_> + 20 6 2 6 2. + <_> + + <_> + 0 0 2 12 -1. + <_> + 0 6 2 6 2. + <_> + + <_> + 14 3 4 15 -1. + <_> + 14 3 2 15 2. + <_> + + <_> + 0 1 16 14 -1. + <_> + 0 1 8 7 2. + <_> + 8 8 8 7 2. + <_> + + <_> + 11 0 8 10 -1. + <_> + 11 0 8 5 2. + 1 + <_> + + <_> + 0 3 16 4 -1. + <_> + 0 3 8 2 2. + <_> + 8 5 8 2 2. + <_> + + <_> + 13 0 7 12 -1. + <_> + 13 4 7 4 3. + <_> + + <_> + 5 3 11 15 -1. + <_> + 5 8 11 5 3. + <_> + + <_> + 13 0 7 12 -1. + <_> + 13 4 7 4 3. + <_> + + <_> + 2 0 7 12 -1. + <_> + 2 4 7 4 3. + <_> + + <_> + 4 5 18 12 -1. + <_> + 10 9 6 4 9. + <_> + + <_> + 4 7 14 6 -1. + <_> + 4 7 7 3 2. + <_> + 11 10 7 3 2. + <_> + + <_> + 7 9 13 3 -1. + <_> + 7 10 13 1 3. + <_> + + <_> + 2 9 13 3 -1. + <_> + 2 10 13 1 3. + <_> + + <_> + 5 9 17 3 -1. + <_> + 5 10 17 1 3. + <_> + + <_> + 1 1 10 9 -1. + <_> + 1 4 10 3 3. + <_> + + <_> + 4 1 16 8 -1. + <_> + 4 3 16 4 2. + <_> + + <_> + 6 5 6 12 -1. + <_> + 8 5 2 12 3. + <_> + + <_> + 11 7 6 5 -1. + <_> + 11 7 3 5 2. + 1 + <_> + + <_> + 5 4 9 5 -1. + <_> + 8 4 3 5 3. + <_> + + <_> + 2 12 18 4 -1. + <_> + 11 12 9 2 2. + <_> + 2 14 9 2 2. + <_> + + <_> + 11 4 9 3 -1. + <_> + 10 5 9 1 3. + 1 + <_> + + <_> + 15 0 2 10 -1. + <_> + 15 0 1 10 2. + 1 + <_> + + <_> + 0 5 18 12 -1. + <_> + 6 9 6 4 9. + <_> + + <_> + 14 9 4 6 -1. + <_> + 14 9 2 6 2. + <_> + + <_> + 5 6 3 12 -1. + <_> + 5 10 3 4 3. + <_> + + <_> + 11 0 3 9 -1. + <_> + 12 1 1 9 3. + 1 + <_> + + <_> + 1 9 4 9 -1. + <_> + 1 12 4 3 3. + <_> + + <_> + 18 9 4 9 -1. + <_> + 18 12 4 3 3. + <_> + + <_> + 6 9 6 4 -1. + <_> + 9 9 3 4 2. + <_> + + <_> + 11 0 3 9 -1. + <_> + 12 1 1 9 3. + 1 + <_> + + <_> + 11 0 9 3 -1. + <_> + 10 1 9 1 3. + 1 + <_> + + <_> + 5 15 12 2 -1. + <_> + 5 16 12 1 2. + <_> + + <_> + 0 0 22 2 -1. + <_> + 11 0 11 2 2. + <_> + + <_> + 20 0 2 13 -1. + <_> + 20 0 1 13 2. + <_> + + <_> + 0 0 2 13 -1. + <_> + 1 0 1 13 2. + <_> + + <_> + 10 1 6 6 -1. + <_> + 12 1 2 6 3. + <_> + + <_> + 6 1 6 6 -1. + <_> + 8 1 2 6 3. + <_> + + <_> + 10 7 12 3 -1. + <_> + 10 8 12 1 3. + <_> + + <_> + 0 7 12 3 -1. + <_> + 0 8 12 1 3. + <_> + + <_> + 1 9 8 6 -1. + <_> + 1 9 4 3 2. + <_> + 5 12 4 3 2. + <_> + + <_> + 10 10 7 4 -1. + <_> + 10 12 7 2 2. + <_> + + <_> + 8 10 4 6 -1. + <_> + 10 10 2 6 2. + <_> + + <_> + 13 6 8 4 -1. + <_> + 13 6 4 4 2. + 1 + <_> + + <_> + 10 1 8 7 -1. + <_> + 12 3 4 7 2. + 1 + <_> + + <_> + 8 5 8 7 -1. + <_> + 8 5 4 7 2. + <_> + + <_> + 6 5 8 7 -1. + <_> + 10 5 4 7 2. + <_> + + <_> + 6 3 16 12 -1. + <_> + 14 3 8 6 2. + <_> + 6 9 8 6 2. + <_> + + <_> + 4 11 6 6 -1. + <_> + 4 13 6 2 3. + <_> + + <_> + 4 2 18 14 -1. + <_> + 13 2 9 7 2. + <_> + 4 9 9 7 2. + <_> + + <_> + 5 0 11 12 -1. + <_> + 5 3 11 6 2. + <_> + + <_> + 4 7 16 9 -1. + <_> + 4 10 16 3 3. + <_> + + <_> + 0 1 18 3 -1. + <_> + 0 2 18 1 3. + <_> + + <_> + 12 13 6 4 -1. + <_> + 12 15 6 2 2. + <_> + + <_> + 1 10 6 8 -1. + <_> + 1 10 3 4 2. + <_> + 4 14 3 4 2. + <_> + + <_> + 14 12 8 6 -1. + <_> + 18 12 4 3 2. + <_> + 14 15 4 3 2. + <_> + + <_> + 9 3 12 3 -1. + <_> + 13 7 4 3 3. + 1 + <_> + + <_> + 8 12 6 6 -1. + <_> + 8 12 3 6 2. + <_> + + <_> + 4 8 14 10 -1. + <_> + 4 13 14 5 2. + <_> + + <_> + 11 2 8 8 -1. + <_> + 11 2 4 8 2. + 1 + <_> + + <_> + 9 6 4 8 -1. + <_> + 9 6 4 4 2. + 1 + <_> + + <_> + 18 3 4 10 -1. + <_> + 18 3 4 5 2. + 1 + <_> + + <_> + 5 15 12 3 -1. + <_> + 9 15 4 3 3. + <_> + + <_> + 11 8 4 6 -1. + <_> + 11 8 4 3 2. + 1 + <_> + + <_> + 11 8 6 4 -1. + <_> + 11 8 3 4 2. + 1 + <_> + + <_> + 3 13 16 5 -1. + <_> + 7 13 8 5 2. + <_> + + <_> + 6 2 4 12 -1. + <_> + 6 2 2 6 2. + <_> + 8 8 2 6 2. + <_> + + <_> + 2 14 18 4 -1. + <_> + 11 14 9 2 2. + <_> + 2 16 9 2 2. + <_> + + <_> + 3 1 12 3 -1. + <_> + 3 2 12 1 3. + <_> + + <_> + 6 1 16 3 -1. + <_> + 6 2 16 1 3. + <_> + + <_> + 5 3 8 3 -1. + <_> + 9 3 4 3 2. + <_> + + <_> + 16 3 4 6 -1. + <_> + 16 3 4 3 2. + 1 + <_> + + <_> + 4 3 10 4 -1. + <_> + 4 3 5 4 2. + 1 + <_> + + <_> + 14 5 6 8 -1. + <_> + 17 5 3 4 2. + <_> + 14 9 3 4 2. + <_> + + <_> + 1 2 14 12 -1. + <_> + 1 5 14 6 2. + <_> + + <_> + 11 2 6 12 -1. + <_> + 11 5 6 6 2. + <_> + + <_> + 5 2 6 12 -1. + <_> + 5 5 6 6 2. + <_> + + <_> + 11 5 8 5 -1. + <_> + 11 5 4 5 2. + 1 + <_> + + <_> + 4 0 9 18 -1. + <_> + 7 0 3 18 3. + <_> + + <_> + 11 14 6 4 -1. + <_> + 11 16 6 2 2. + <_> + + <_> + 5 14 6 4 -1. + <_> + 5 16 6 2 2. + <_> + + <_> + 12 13 6 4 -1. + <_> + 12 15 6 2 2. + <_> + + <_> + 1 6 13 3 -1. + <_> + 1 7 13 1 3. + <_> + + <_> + 10 6 12 3 -1. + <_> + 10 7 12 1 3. + <_> + + <_> + 1 8 6 4 -1. + <_> + 4 8 3 4 2. + <_> + + <_> + 14 12 6 6 -1. + <_> + 16 12 2 6 3. + <_> + + <_> + 2 12 6 6 -1. + <_> + 4 12 2 6 3. + <_> + + <_> + 7 15 12 3 -1. + <_> + 11 15 4 3 3. + <_> + + <_> + 1 12 8 5 -1. + <_> + 5 12 4 5 2. + <_> + + <_> + 14 5 6 8 -1. + <_> + 17 5 3 4 2. + <_> + 14 9 3 4 2. + <_> + + <_> + 2 5 6 8 -1. + <_> + 2 5 3 4 2. + <_> + 5 9 3 4 2. + <_> + + <_> + 14 11 8 6 -1. + <_> + 18 11 4 3 2. + <_> + 14 14 4 3 2. + <_> + + <_> + 4 0 8 6 -1. + <_> + 4 0 4 3 2. + <_> + 8 3 4 3 2. + <_> + + <_> + 14 3 7 4 -1. + <_> + 14 3 7 2 2. + 1 + <_> + + <_> + 0 11 8 6 -1. + <_> + 0 11 4 3 2. + <_> + 4 14 4 3 2. + <_> + + <_> + 4 13 14 4 -1. + <_> + 4 15 14 2 2. + <_> + + <_> + 5 3 9 8 -1. + <_> + 8 3 3 8 3. + <_> + + <_> + 5 0 15 8 -1. + <_> + 10 0 5 8 3. + <_> + + <_> + 2 0 15 8 -1. + <_> + 7 0 5 8 3. + <_> + + <_> + 14 0 6 11 -1. + <_> + 16 0 2 11 3. + <_> + + <_> + 0 16 18 2 -1. + <_> + 6 16 6 2 3. + <_> + + <_> + 5 3 12 9 -1. + <_> + 9 6 4 3 9. + <_> + + <_> + 8 3 4 7 -1. + <_> + 8 3 2 7 2. + 1 + <_> + + <_> + 10 3 6 8 -1. + <_> + 12 3 2 8 3. + <_> + + <_> + 6 3 6 8 -1. + <_> + 8 3 2 8 3. + <_> + + <_> + 7 13 12 4 -1. + <_> + 7 15 12 2 2. + <_> + + <_> + 3 9 16 8 -1. + <_> + 3 9 8 4 2. + <_> + 11 13 8 4 2. + <_> + + <_> + 9 0 13 3 -1. + <_> + 9 1 13 1 3. + <_> + + <_> + 4 0 4 12 -1. + <_> + 4 0 2 6 2. + <_> + 6 6 2 6 2. + <_> + + <_> + 1 11 20 4 -1. + <_> + 6 11 10 4 2. + <_> + + <_> + 3 14 6 4 -1. + <_> + 6 14 3 4 2. + <_> + + <_> + 10 6 12 3 -1. + <_> + 10 7 12 1 3. + <_> + + <_> + 0 6 12 3 -1. + <_> + 0 7 12 1 3. + <_> + + <_> + 6 2 14 6 -1. + <_> + 6 4 14 2 3. + <_> + + <_> + 4 1 6 4 -1. + <_> + 4 1 6 2 2. + 1 + <_> + + <_> + 1 0 21 18 -1. + <_> + 8 0 7 18 3. + <_> + + <_> + 5 0 14 2 -1. + <_> + 5 0 7 2 2. + 1 + <_> + + <_> + 14 8 4 9 -1. + <_> + 14 11 4 3 3. + <_> + + <_> + 2 0 6 10 -1. + <_> + 4 0 2 10 3. + <_> + + <_> + 5 11 12 4 -1. + <_> + 11 11 6 2 2. + <_> + 5 13 6 2 2. + <_> + + <_> + 8 5 4 6 -1. + <_> + 10 5 2 6 2. + <_> + + <_> + 7 1 15 9 -1. + <_> + 12 4 5 3 9. + <_> + + <_> + 0 1 15 9 -1. + <_> + 5 4 5 3 9. + <_> + + <_> + 5 0 12 16 -1. + <_> + 11 0 6 8 2. + <_> + 5 8 6 8 2. + <_> + + <_> + 8 10 6 5 -1. + <_> + 11 10 3 5 2. + <_> + + <_> + 10 4 8 9 -1. + <_> + 10 7 8 3 3. + <_> + + <_> + 4 4 8 9 -1. + <_> + 4 7 8 3 3. + <_> + + <_> + 8 3 12 3 -1. + <_> + 8 4 12 1 3. + <_> + + <_> + 0 3 13 3 -1. + <_> + 0 4 13 1 3. + <_> + + <_> + 10 1 12 3 -1. + <_> + 14 1 4 3 3. + <_> + + <_> + 0 1 12 3 -1. + <_> + 4 1 4 3 3. + <_> + + <_> + 8 3 12 3 -1. + <_> + 8 4 12 1 3. + <_> + + <_> + 8 4 6 4 -1. + <_> + 8 4 3 4 2. + 1 + <_> + + <_> + 13 2 2 11 -1. + <_> + 13 2 1 11 2. + 1 + <_> + + <_> + 9 2 11 2 -1. + <_> + 9 2 11 1 2. + 1 + <_> + + <_> + 11 1 3 16 -1. + <_> + 11 9 3 8 2. + <_> + + <_> + 7 1 4 9 -1. + <_> + 7 4 4 3 3. + <_> + + <_> + 12 4 4 8 -1. + <_> + 12 8 4 4 2. + <_> + + <_> + 1 7 6 4 -1. + <_> + 1 9 6 2 2. + <_> + + <_> + 12 4 4 8 -1. + <_> + 12 8 4 4 2. + <_> + + <_> + 6 4 4 8 -1. + <_> + 6 8 4 4 2. + <_> + + <_> + 19 3 3 12 -1. + <_> + 20 4 1 12 3. + 1 + <_> + + <_> + 3 3 12 3 -1. + <_> + 2 4 12 1 3. + 1 + <_> + + <_> + 13 6 3 7 -1. + <_> + 14 7 1 7 3. + 1 + <_> + + <_> + 8 12 6 4 -1. + <_> + 11 12 3 4 2. + <_> + + <_> + 10 8 10 10 -1. + <_> + 15 8 5 5 2. + <_> + 10 13 5 5 2. + <_> + + <_> + 2 8 10 10 -1. + <_> + 2 8 5 5 2. + <_> + 7 13 5 5 2. + <_> + + <_> + 1 11 20 3 -1. + <_> + 6 11 10 3 2. + <_> + + <_> + 13 8 6 4 -1. + <_> + 13 8 3 4 2. + 1 + <_> + + <_> + 4 11 8 4 -1. + <_> + 8 11 4 4 2. + <_> + + <_> + 9 5 10 6 -1. + <_> + 9 5 5 6 2. + <_> + + <_> + 4 8 6 9 -1. + <_> + 7 8 3 9 2. + <_> + + <_> + 4 5 16 4 -1. + <_> + 4 5 8 4 2. + <_> + + <_> + 2 4 18 6 -1. + <_> + 8 6 6 2 9. + <_> + + <_> + 11 1 2 11 -1. + <_> + 11 1 1 11 2. + 1 + <_> + + <_> + 7 1 6 8 -1. + <_> + 7 1 3 4 2. + <_> + 10 5 3 4 2. + <_> + + <_> + 7 10 8 6 -1. + <_> + 9 10 4 6 2. + <_> + + <_> + 6 12 9 4 -1. + <_> + 9 12 3 4 3. + <_> + + <_> + 10 12 9 4 -1. + <_> + 13 12 3 4 3. + <_> + + <_> + 8 0 10 8 -1. + <_> + 8 0 5 8 2. + 1 + <_> + + <_> + 9 6 12 4 -1. + <_> + 15 6 6 2 2. + <_> + 9 8 6 2 2. + <_> + + <_> + 4 9 14 5 -1. + <_> + 11 9 7 5 2. + <_> + + <_> + 14 6 6 6 -1. + <_> + 12 8 6 2 3. + 1 + <_> + + <_> + 6 4 6 7 -1. + <_> + 8 4 2 7 3. + <_> + + <_> + 14 9 6 6 -1. + <_> + 14 12 6 3 2. + <_> + + <_> + 2 9 6 6 -1. + <_> + 2 12 6 3 2. + <_> + + <_> + 13 8 4 8 -1. + <_> + 13 8 2 8 2. + <_> + + <_> + 5 8 4 9 -1. + <_> + 7 8 2 9 2. + <_> + + <_> + 2 4 18 12 -1. + <_> + 8 8 6 4 9. + <_> + + <_> + 3 5 10 6 -1. + <_> + 8 5 5 6 2. + <_> + + <_> + 6 0 12 8 -1. + <_> + 6 0 6 8 2. + <_> + + <_> + 0 11 8 7 -1. + <_> + 2 11 4 7 2. + <_> + + <_> + 15 11 6 7 -1. + <_> + 17 11 2 7 3. + <_> + + <_> + 3 16 14 2 -1. + <_> + 3 17 14 1 2. + <_> + + <_> + 9 15 13 3 -1. + <_> + 9 16 13 1 3. + <_> + + <_> + 0 15 13 3 -1. + <_> + 0 16 13 1 3. + <_> + + <_> + 5 13 12 3 -1. + <_> + 5 14 12 1 3. + <_> + + <_> + 0 14 14 3 -1. + <_> + 0 15 14 1 3. + <_> + + <_> + 13 5 6 6 -1. + <_> + 15 5 2 6 3. + <_> + + <_> + 3 5 6 6 -1. + <_> + 5 5 2 6 3. + <_> + + <_> + 2 3 20 4 -1. + <_> + 7 3 10 4 2. + <_> + + <_> + 4 13 12 2 -1. + <_> + 4 14 12 1 2. + <_> + + <_> + 9 6 9 6 -1. + <_> + 12 6 3 6 3. + <_> + + <_> + 8 5 6 7 -1. + <_> + 10 5 2 7 3. + <_> + + <_> + 15 0 3 10 -1. + <_> + 16 1 1 10 3. + 1 + <_> + + <_> + 7 0 10 3 -1. + <_> + 6 1 10 1 3. + 1 + <_> + + <_> + 11 4 8 6 -1. + <_> + 15 4 4 3 2. + <_> + 11 7 4 3 2. + <_> + + <_> + 7 0 12 3 -1. + <_> + 6 1 12 1 3. + 1 + <_> + + <_> + 19 4 3 11 -1. + <_> + 20 5 1 11 3. + 1 + <_> + + <_> + 1 11 6 7 -1. + <_> + 3 11 2 7 3. + <_> + + <_> + 7 4 15 14 -1. + <_> + 7 11 15 7 2. + <_> + + <_> + 3 4 11 3 -1. + <_> + 2 5 11 1 3. + 1 + <_> + + <_> + 14 6 3 8 -1. + <_> + 15 7 1 8 3. + 1 + <_> + + <_> + 3 0 3 18 -1. + <_> + 4 0 1 18 3. + <_> + + <_> + 14 3 8 4 -1. + <_> + 14 3 8 2 2. + 1 + <_> + + <_> + 8 3 4 8 -1. + <_> + 8 3 2 8 2. + 1 + <_> + + <_> + 18 2 4 12 -1. + <_> + 15 5 4 6 2. + 1 + <_> + + <_> + 2 9 17 3 -1. + <_> + 2 10 17 1 3. + <_> + + <_> + 7 9 14 3 -1. + <_> + 7 10 14 1 3. + <_> + + <_> + 8 2 6 8 -1. + <_> + 8 2 3 4 2. + <_> + 11 6 3 4 2. + <_> + + <_> + 11 4 8 6 -1. + <_> + 15 4 4 3 2. + <_> + 11 7 4 3 2. + <_> + + <_> + 3 4 8 6 -1. + <_> + 3 4 4 3 2. + <_> + 7 7 4 3 2. + <_> + + <_> + 3 1 18 3 -1. + <_> + 3 2 18 1 3. + <_> + + <_> + 0 9 8 3 -1. + <_> + 4 9 4 3 2. + <_> + + <_> + 13 2 9 10 -1. + <_> + 13 7 9 5 2. + <_> + + <_> + 1 2 8 12 -1. + <_> + 1 2 4 6 2. + <_> + 5 8 4 6 2. + <_> + + <_> + 12 5 8 6 -1. + <_> + 16 5 4 3 2. + <_> + 12 8 4 3 2. + <_> + + <_> + 1 0 17 3 -1. + <_> + 1 1 17 1 3. + <_> + + <_> + 4 0 15 2 -1. + <_> + 4 1 15 1 2. + <_> + + <_> + 5 0 12 4 -1. + <_> + 5 2 12 2 2. + <_> + + <_> + 7 4 15 14 -1. + <_> + 7 11 15 7 2. + <_> + + <_> + 8 2 9 2 -1. + <_> + 8 2 9 1 2. + 1 + <_> + + <_> + 16 0 2 13 -1. + <_> + 16 0 1 13 2. + 1 + <_> + + <_> + 6 0 13 2 -1. + <_> + 6 0 13 1 2. + 1 + <_> + + <_> + 12 7 2 9 -1. + <_> + 12 7 1 9 2. + 1 + <_> + + <_> + 10 7 9 2 -1. + <_> + 10 7 9 1 2. + 1 + <_> + + <_> + 9 0 11 10 -1. + <_> + 9 5 11 5 2. + <_> + + <_> + 8 5 9 2 -1. + <_> + 8 5 9 1 2. + 1 + <_> + + <_> + 13 2 9 10 -1. + <_> + 13 7 9 5 2. + <_> + + <_> + 0 2 9 10 -1. + <_> + 0 7 9 5 2. + <_> + + <_> + 17 2 3 8 -1. + <_> + 17 6 3 4 2. + <_> + + <_> + 2 2 3 8 -1. + <_> + 2 6 3 4 2. + <_> + + <_> + 4 4 18 4 -1. + <_> + 13 4 9 2 2. + <_> + 4 6 9 2 2. + <_> + + <_> + 0 4 18 4 -1. + <_> + 0 4 9 2 2. + <_> + 9 6 9 2 2. + <_> + + <_> + 4 1 14 4 -1. + <_> + 11 1 7 2 2. + <_> + 4 3 7 2 2. + <_> + + <_> + 0 0 21 8 -1. + <_> + 7 0 7 8 3. + <_> + + <_> + 5 0 14 18 -1. + <_> + 12 0 7 9 2. + <_> + 5 9 7 9 2. + <_> + + <_> + 1 11 16 4 -1. + <_> + 5 11 8 4 2. + <_> + + <_> + 6 9 10 6 -1. + <_> + 6 11 10 2 3. + <_> + + <_> + 5 10 12 4 -1. + <_> + 5 11 12 2 2. + <_> + + <_> + 15 4 6 6 -1. + <_> + 15 4 3 6 2. + 1 + <_> + + <_> + 7 4 6 6 -1. + <_> + 7 4 6 3 2. + 1 + <_> + + <_> + 12 5 8 6 -1. + <_> + 16 5 4 3 2. + <_> + 12 8 4 3 2. + <_> + + <_> + 5 5 8 4 -1. + <_> + 5 5 8 2 2. + 1 + <_> + + <_> + 17 6 3 12 -1. + <_> + 17 10 3 4 3. + <_> + + <_> + 5 7 9 2 -1. + <_> + 5 7 9 1 2. + 1 + <_> + + <_> + 14 6 3 8 -1. + <_> + 15 7 1 8 3. + 1 + <_> + + <_> + 5 7 12 2 -1. + <_> + 5 8 12 1 2. + <_> + + <_> + 4 5 18 3 -1. + <_> + 4 6 18 1 3. + <_> + + <_> + 1 6 15 9 -1. + <_> + 6 6 5 9 3. + <_> + + <_> + 19 4 3 10 -1. + <_> + 19 4 3 5 2. + 1 + <_> + + <_> + 0 12 18 6 -1. + <_> + 0 15 18 3 2. + <_> + + <_> + 6 13 13 4 -1. + <_> + 6 15 13 2 2. + <_> + + <_> + 3 5 8 9 -1. + <_> + 3 8 8 3 3. + <_> + + <_> + 6 8 10 8 -1. + <_> + 6 10 10 4 2. + <_> + + <_> + 4 6 13 6 -1. + <_> + 4 9 13 3 2. + <_> + + <_> + 14 3 2 12 -1. + <_> + 14 3 2 6 2. + 1 + <_> + + <_> + 8 3 12 2 -1. + <_> + 8 3 6 2 2. + 1 + <_> + + <_> + 13 1 5 12 -1. + <_> + 13 1 5 6 2. + 1 + <_> + + <_> + 9 1 12 5 -1. + <_> + 9 1 6 5 2. + 1 + <_> + + <_> + 8 12 8 3 -1. + <_> + 8 12 4 3 2. + <_> + + <_> + 5 12 12 4 -1. + <_> + 8 12 6 4 2. + <_> + + <_> + 13 8 6 4 -1. + <_> + 13 8 3 4 2. + 1 + <_> + + <_> + 9 8 4 6 -1. + <_> + 9 8 4 3 2. + 1 + <_> + + <_> + 1 7 20 11 -1. + <_> + 6 7 10 11 2. + <_> + + <_> + 10 13 12 3 -1. + <_> + 10 14 12 1 3. + <_> + + <_> + 1 10 6 4 -1. + <_> + 4 10 3 4 2. + <_> + + <_> + 15 10 6 4 -1. + <_> + 15 10 3 4 2. + <_> + + <_> + 0 13 12 3 -1. + <_> + 0 14 12 1 3. + <_> + + <_> + 4 10 14 8 -1. + <_> + 4 14 14 4 2. + <_> + + <_> + 5 14 12 4 -1. + <_> + 5 15 12 2 2. + <_> + + <_> + 5 16 12 2 -1. + <_> + 5 17 12 1 2. + <_> + + <_> + 1 0 20 12 -1. + <_> + 6 0 10 12 2. + <_> + + <_> + 7 12 15 5 -1. + <_> + 12 12 5 5 3. + <_> + + <_> + 6 0 15 2 -1. + <_> + 6 0 15 1 2. + 1 + <_> + + <_> + 6 5 12 8 -1. + <_> + 12 5 6 4 2. + <_> + 6 9 6 4 2. + <_> + + <_> + 4 5 12 8 -1. + <_> + 4 5 6 4 2. + <_> + 10 9 6 4 2. + <_> + + <_> + 6 2 16 6 -1. + <_> + 14 2 8 3 2. + <_> + 6 5 8 3 2. + <_> + + <_> + 1 2 16 14 -1. + <_> + 1 2 8 7 2. + <_> + 9 9 8 7 2. + <_> + + <_> + 11 14 6 4 -1. + <_> + 11 14 3 4 2. + <_> + + <_> + 3 8 12 9 -1. + <_> + 7 11 4 3 9. + <_> + + <_> + 8 3 14 4 -1. + <_> + 15 3 7 2 2. + <_> + 8 5 7 2 2. + <_> + + <_> + 9 0 6 8 -1. + <_> + 11 2 2 8 3. + 1 + <_> + + <_> + 12 13 6 4 -1. + <_> + 12 15 6 2 2. + <_> + + <_> + 4 13 6 4 -1. + <_> + 4 15 6 2 2. + <_> + + <_> + 6 16 16 2 -1. + <_> + 6 17 16 1 2. + <_> + + <_> + 0 3 12 3 -1. + <_> + 0 4 12 1 3. + <_> + + <_> + 8 3 14 3 -1. + <_> + 8 4 14 1 3. + <_> + + <_> + 6 2 3 16 -1. + <_> + 6 6 3 8 2. + <_> + + <_> + 5 2 14 14 -1. + <_> + 12 2 7 7 2. + <_> + 5 9 7 7 2. + <_> + + <_> + 5 8 3 8 -1. + <_> + 5 12 3 4 2. + <_> + + <_> + 14 7 7 4 -1. + <_> + 14 7 7 2 2. + 1 + <_> + + <_> + 4 6 12 9 -1. + <_> + 8 9 4 3 9. + <_> + + <_> + 7 11 15 6 -1. + <_> + 12 11 5 6 3. + <_> + + <_> + 0 11 15 6 -1. + <_> + 5 11 5 6 3. + <_> + + <_> + 15 7 6 8 -1. + <_> + 18 7 3 4 2. + <_> + 15 11 3 4 2. + <_> + + <_> + 0 7 22 10 -1. + <_> + 0 7 11 5 2. + <_> + 11 12 11 5 2. + <_> + + <_> + 1 8 20 8 -1. + <_> + 6 8 10 8 2. + <_> + + <_> + 2 5 7 6 -1. + <_> + 2 7 7 2 3. + <_> + + <_> + 7 2 15 8 -1. + <_> + 7 4 15 4 2. + <_> + + <_> + 3 1 14 8 -1. + <_> + 3 3 14 4 2. + <_> + + <_> + 9 2 13 2 -1. + <_> + 9 3 13 1 2. + <_> + + <_> + 8 3 6 8 -1. + <_> + 10 3 2 8 3. + <_> + + <_> + 7 1 15 2 -1. + <_> + 7 2 15 1 2. + <_> + + <_> + 0 1 15 2 -1. + <_> + 0 2 15 1 2. + <_> + + <_> + 6 0 12 3 -1. + <_> + 6 1 12 1 3. + <_> + + <_> + 4 0 9 4 -1. + <_> + 7 0 3 4 3. + <_> + + <_> + 12 3 8 3 -1. + <_> + 12 3 4 3 2. + 1 + <_> + + <_> + 8 12 6 4 -1. + <_> + 11 12 3 4 2. + <_> + + <_> + 12 1 10 4 -1. + <_> + 12 1 5 4 2. + <_> + + <_> + 0 1 10 4 -1. + <_> + 5 1 5 4 2. + <_> + + <_> + 16 13 6 5 -1. + <_> + 16 13 3 5 2. + <_> + + <_> + 0 13 6 5 -1. + <_> + 3 13 3 5 2. + <_> + + <_> + 18 11 4 7 -1. + <_> + 18 11 2 7 2. + <_> + + <_> + 0 11 4 7 -1. + <_> + 2 11 2 7 2. + <_> + + <_> + 15 0 6 14 -1. + <_> + 17 0 2 14 3. + <_> + + <_> + 1 0 6 14 -1. + <_> + 3 0 2 14 3. + <_> + + <_> + 13 0 4 14 -1. + <_> + 15 0 2 7 2. + <_> + 13 7 2 7 2. + <_> + + <_> + 5 0 4 14 -1. + <_> + 5 0 2 7 2. + <_> + 7 7 2 7 2. + <_> + + <_> + 13 2 6 4 -1. + <_> + 13 2 3 4 2. + <_> + + <_> + 1 7 12 4 -1. + <_> + 1 7 6 2 2. + <_> + 7 9 6 2 2. + <_> + + <_> + 4 13 18 3 -1. + <_> + 4 14 18 1 3. + <_> + + <_> + 2 6 2 12 -1. + <_> + 2 12 2 6 2. + <_> + + <_> + 4 11 16 4 -1. + <_> + 12 11 8 2 2. + <_> + 4 13 8 2 2. + <_> + + <_> + 2 11 16 4 -1. + <_> + 2 11 8 2 2. + <_> + 10 13 8 2 2. + <_> + + <_> + 10 12 12 4 -1. + <_> + 16 12 6 2 2. + <_> + 10 14 6 2 2. + <_> + + <_> + 0 12 12 4 -1. + <_> + 0 12 6 2 2. + <_> + 6 14 6 2 2. + <_> + + <_> + 12 12 10 6 -1. + <_> + 17 12 5 3 2. + <_> + 12 15 5 3 2. + <_> + + <_> + 0 10 10 8 -1. + <_> + 0 10 5 4 2. + <_> + 5 14 5 4 2. + <_> + + <_> + 8 0 7 4 -1. + <_> + 8 2 7 2 2. + <_> + + <_> + 0 3 14 3 -1. + <_> + 0 4 14 1 3. + <_> + + <_> + 15 1 6 8 -1. + <_> + 18 1 3 4 2. + <_> + 15 5 3 4 2. + <_> + + <_> + 2 3 7 4 -1. + <_> + 2 5 7 2 2. + <_> + + <_> + 13 2 6 4 -1. + <_> + 13 2 3 4 2. + <_> + + <_> + 3 2 6 4 -1. + <_> + 6 2 3 4 2. + <_> + + <_> + 5 1 16 4 -1. + <_> + 5 2 16 2 2. + <_> + + <_> + 4 15 13 3 -1. + <_> + 4 16 13 1 3. + <_> + + <_> + 12 6 3 12 -1. + <_> + 13 6 1 12 3. + <_> + + <_> + 0 16 16 2 -1. + <_> + 8 16 8 2 2. + <_> + + <_> + 3 2 16 10 -1. + <_> + 3 7 16 5 2. + <_> + + <_> + 7 1 12 4 -1. + <_> + 10 4 6 4 2. + 1 + <_> + + <_> + 14 1 2 9 -1. + <_> + 14 1 1 9 2. + 1 + <_> + + <_> + 4 10 3 8 -1. + <_> + 4 14 3 4 2. + <_> + + <_> + 11 12 6 6 -1. + <_> + 11 14 6 2 3. + <_> + + <_> + 5 12 6 6 -1. + <_> + 5 14 6 2 3. + <_> + + <_> + 12 6 3 12 -1. + <_> + 13 6 1 12 3. + <_> + + <_> + 10 6 8 3 -1. + <_> + 9 7 8 1 3. + 1 + <_> + + <_> + 12 6 3 12 -1. + <_> + 13 6 1 12 3. + <_> + + <_> + 7 6 3 12 -1. + <_> + 8 6 1 12 3. + <_> + + <_> + 14 1 2 9 -1. + <_> + 14 1 1 9 2. + 1 + <_> + + <_> + 11 4 10 3 -1. + <_> + 10 5 10 1 3. + 1 + <_> + + <_> + 8 11 9 4 -1. + <_> + 11 11 3 4 3. + <_> + + <_> + 7 5 2 12 -1. + <_> + 8 5 1 12 2. + <_> + + <_> + 13 1 3 16 -1. + <_> + 14 1 1 16 3. + <_> + + <_> + 7 4 6 6 -1. + <_> + 9 4 2 6 3. + <_> + + <_> + 10 4 2 12 -1. + <_> + 10 4 1 12 2. + <_> + + <_> + 0 0 18 5 -1. + <_> + 9 0 9 5 2. + <_> + + <_> + 16 3 2 12 -1. + <_> + 16 3 1 12 2. + 1 + <_> + + <_> + 6 3 12 2 -1. + <_> + 6 3 12 1 2. + 1 + <_> + + <_> + 13 6 4 7 -1. + <_> + 14 7 2 7 2. + 1 + <_> + + <_> + 7 3 13 2 -1. + <_> + 7 3 13 1 2. + 1 + <_> + + <_> + 5 14 17 4 -1. + <_> + 5 15 17 2 2. + <_> + + <_> + 0 13 18 3 -1. + <_> + 0 14 18 1 3. + <_> + + <_> + 6 13 14 3 -1. + <_> + 6 14 14 1 3. + <_> + + <_> + 2 13 14 3 -1. + <_> + 2 14 14 1 3. + <_> + + <_> + 5 13 12 2 -1. + <_> + 5 14 12 1 2. + <_> + + <_> + 0 5 4 8 -1. + <_> + 0 9 4 4 2. + <_> + + <_> + 15 7 6 8 -1. + <_> + 18 7 3 4 2. + <_> + 15 11 3 4 2. + <_> + + <_> + 9 2 4 7 -1. + <_> + 11 2 2 7 2. + <_> + + <_> + 8 4 14 3 -1. + <_> + 8 5 14 1 3. + <_> + + <_> + 0 4 12 3 -1. + <_> + 0 5 12 1 3. + <_> + + <_> + 13 2 4 9 -1. + <_> + 13 5 4 3 3. + <_> + + <_> + 5 2 4 9 -1. + <_> + 5 5 4 3 3. + <_> + + <_> + 12 6 6 4 -1. + <_> + 12 8 6 2 2. + <_> + + <_> + 5 5 12 3 -1. + <_> + 11 5 6 3 2. + <_> + + <_> + 7 1 8 12 -1. + <_> + 7 4 8 6 2. + <_> + + <_> + 9 3 6 7 -1. + <_> + 11 5 2 7 3. + 1 + <_> + + <_> + 12 1 9 6 -1. + <_> + 10 3 9 2 3. + 1 + <_> + + <_> + 11 7 8 3 -1. + <_> + 11 7 4 3 2. + 1 + <_> + + <_> + 14 1 2 9 -1. + <_> + 14 1 1 9 2. + 1 + <_> + + <_> + 1 7 6 8 -1. + <_> + 1 7 3 4 2. + <_> + 4 11 3 4 2. + <_> + + <_> + 11 0 4 6 -1. + <_> + 11 0 2 6 2. + <_> + + <_> + 7 0 4 6 -1. + <_> + 9 0 2 6 2. + <_> + + <_> + 0 7 22 4 -1. + <_> + 11 7 11 2 2. + <_> + 0 9 11 2 2. + <_> + + <_> + 3 5 4 8 -1. + <_> + 3 9 4 4 2. + <_> + + <_> + 5 4 12 3 -1. + <_> + 9 4 4 3 3. + <_> + + <_> + 10 2 12 3 -1. + <_> + 10 2 6 3 2. + 1 + <_> + + <_> + 5 2 6 16 -1. + <_> + 5 10 6 8 2. + <_> + + <_> + 12 6 8 4 -1. + <_> + 12 6 8 2 2. + 1 + <_> + + <_> + 3 12 6 6 -1. + <_> + 5 12 2 6 3. + <_> + + <_> + 12 1 3 12 -1. + <_> + 12 1 3 6 2. + 1 + <_> + + <_> + 10 1 12 3 -1. + <_> + 10 1 6 3 2. + 1 + <_> + + <_> + 4 8 16 4 -1. + <_> + 8 8 8 4 2. + <_> + + <_> + 6 10 4 6 -1. + <_> + 8 10 2 6 2. + <_> + + <_> + 7 14 9 4 -1. + <_> + 10 14 3 4 3. + <_> + + <_> + 8 10 4 7 -1. + <_> + 10 10 2 7 2. + <_> + + <_> + 12 12 4 6 -1. + <_> + 12 12 2 6 2. + <_> + + <_> + 6 12 4 6 -1. + <_> + 8 12 2 6 2. + <_> + + <_> + 9 12 4 6 -1. + <_> + 9 15 4 3 2. + <_> + + <_> + 5 12 6 6 -1. + <_> + 7 12 2 6 3. + <_> + + <_> + 6 2 11 16 -1. + <_> + 6 6 11 8 2. + <_> + + <_> + 11 2 6 2 -1. + <_> + 11 2 6 1 2. + 1 + <_> + + <_> + 10 1 6 8 -1. + <_> + 13 1 3 4 2. + <_> + 10 5 3 4 2. + <_> + + <_> + 5 2 12 2 -1. + <_> + 11 2 6 2 2. + <_> + + <_> + 10 13 8 3 -1. + <_> + 10 13 4 3 2. + <_> + + <_> + 5 0 12 6 -1. + <_> + 11 0 6 6 2. + <_> + + <_> + 10 7 12 3 -1. + <_> + 10 8 12 1 3. + <_> + + <_> + 0 7 12 3 -1. + <_> + 0 8 12 1 3. + <_> + + <_> + 20 0 2 18 -1. + <_> + 20 9 2 9 2. + <_> + + <_> + 0 0 2 18 -1. + <_> + 0 9 2 9 2. + <_> + + <_> + 14 6 6 12 -1. + <_> + 17 6 3 6 2. + <_> + 14 12 3 6 2. + <_> + + <_> + 1 5 6 10 -1. + <_> + 1 10 6 5 2. + <_> + + <_> + 16 1 4 12 -1. + <_> + 16 5 4 4 3. + <_> + + <_> + 2 1 4 12 -1. + <_> + 2 5 4 4 3. + <_> + + <_> + 3 12 16 4 -1. + <_> + 11 12 8 2 2. + <_> + 3 14 8 2 2. + <_> + + <_> + 0 2 12 2 -1. + <_> + 0 3 12 1 2. + <_> + + <_> + 6 2 13 3 -1. + <_> + 6 3 13 1 3. + <_> + + <_> + 1 0 10 6 -1. + <_> + 1 0 5 3 2. + <_> + 6 3 5 3 2. + <_> + + <_> + 9 11 12 5 -1. + <_> + 13 11 4 5 3. + <_> + + <_> + 2 6 6 12 -1. + <_> + 2 6 3 6 2. + <_> + 5 12 3 6 2. + <_> + + <_> + 9 12 8 6 -1. + <_> + 13 12 4 3 2. + <_> + 9 15 4 3 2. + <_> + + <_> + 1 7 6 8 -1. + <_> + 1 7 3 4 2. + <_> + 4 11 3 4 2. + <_> + + <_> + 14 6 3 8 -1. + <_> + 15 7 1 8 3. + 1 + <_> + + <_> + 2 14 12 4 -1. + <_> + 6 14 4 4 3. + <_> + + <_> + 14 4 2 11 -1. + <_> + 14 4 1 11 2. + 1 + <_> + + <_> + 8 6 8 3 -1. + <_> + 7 7 8 1 3. + 1 + <_> + + <_> + 6 12 12 3 -1. + <_> + 6 13 12 1 3. + <_> + + <_> + 2 3 18 3 -1. + <_> + 2 4 18 1 3. + <_> + + <_> + 11 6 9 9 -1. + <_> + 14 6 3 9 3. + <_> + + <_> + 3 13 11 4 -1. + <_> + 3 15 11 2 2. + <_> + + <_> + 17 5 4 6 -1. + <_> + 17 5 2 6 2. + <_> + + <_> + 1 5 4 6 -1. + <_> + 3 5 2 6 2. + <_> + + <_> + 6 0 16 3 -1. + <_> + 10 0 8 3 2. + <_> + + <_> + 8 6 3 12 -1. + <_> + 9 6 1 12 3. + <_> + + <_> + 14 2 2 8 -1. + <_> + 14 2 1 8 2. + 1 + <_> + + <_> + 9 0 12 3 -1. + <_> + 9 0 6 3 2. + 1 + <_> + + <_> + 6 0 16 3 -1. + <_> + 10 0 8 3 2. + <_> + + <_> + 0 0 16 3 -1. + <_> + 4 0 8 3 2. + <_> + + <_> + 8 12 14 3 -1. + <_> + 8 13 14 1 3. + <_> + + <_> + 8 4 11 2 -1. + <_> + 8 4 11 1 2. + 1 + <_> + + <_> + 2 5 20 13 -1. + <_> + 2 5 10 13 2. + <_> + + <_> + 0 2 18 9 -1. + <_> + 6 5 6 3 9. + <_> + + <_> + 10 13 12 3 -1. + <_> + 10 14 12 1 3. + <_> + + <_> + 8 11 6 7 -1. + <_> + 10 11 2 7 3. + <_> + + <_> + 5 6 12 11 -1. + <_> + 9 6 4 11 3. + <_> + + <_> + 3 6 6 6 -1. + <_> + 5 6 2 6 3. + <_> + + <_> + 13 4 6 13 -1. + <_> + 15 4 2 13 3. + <_> + + <_> + 3 4 6 13 -1. + <_> + 5 4 2 13 3. + <_> + + <_> + 5 10 12 3 -1. + <_> + 9 10 4 3 3. + <_> + + <_> + 5 8 12 6 -1. + <_> + 8 8 6 6 2. + <_> + + <_> + 14 2 2 8 -1. + <_> + 14 2 1 8 2. + 1 + <_> + + <_> + 8 2 8 2 -1. + <_> + 8 2 8 1 2. + 1 + <_> + + <_> + 8 6 9 5 -1. + <_> + 11 6 3 5 3. + <_> + + <_> + 0 3 14 4 -1. + <_> + 0 3 7 2 2. + <_> + 7 5 7 2 2. + <_> + + <_> + 12 1 3 8 -1. + <_> + 13 2 1 8 3. + 1 + <_> + + <_> + 10 1 8 3 -1. + <_> + 9 2 8 1 3. + 1 + <_> + + <_> + 14 3 6 6 -1. + <_> + 14 5 6 2 3. + <_> + + <_> + 4 1 6 10 -1. + <_> + 4 1 3 5 2. + <_> + 7 6 3 5 2. + <_> + + <_> + 18 1 3 13 -1. + <_> + 19 1 1 13 3. + <_> + + <_> + 1 1 3 13 -1. + <_> + 2 1 1 13 3. + <_> + + <_> + 11 1 2 8 -1. + <_> + 11 1 1 8 2. + 1 + <_> + + <_> + 11 1 8 2 -1. + <_> + 11 1 8 1 2. + 1 + <_> + + <_> + 8 4 6 6 -1. + <_> + 8 6 6 2 3. + <_> + + <_> + 5 4 7 6 -1. + <_> + 5 6 7 2 3. + <_> + + <_> + 9 11 13 3 -1. + <_> + 9 12 13 1 3. + <_> + + <_> + 0 11 13 3 -1. + <_> + 0 12 13 1 3. + <_> + + <_> + 12 10 9 8 -1. + <_> + 12 14 9 4 2. + <_> + + <_> + 1 10 9 8 -1. + <_> + 1 14 9 4 2. + <_> + + <_> + 4 10 18 8 -1. + <_> + 13 10 9 4 2. + <_> + 4 14 9 4 2. + <_> + + <_> + 0 10 18 8 -1. + <_> + 0 10 9 4 2. + <_> + 9 14 9 4 2. + <_> + + <_> + 12 2 4 12 -1. + <_> + 12 2 2 12 2. + 1 + <_> + + <_> + 0 5 20 13 -1. + <_> + 10 5 10 13 2. + <_> + + <_> + 10 6 9 6 -1. + <_> + 10 8 9 2 3. + <_> + + <_> + 3 6 9 6 -1. + <_> + 3 8 9 2 3. + <_> + + <_> + 7 4 15 8 -1. + <_> + 7 6 15 4 2. + <_> + + <_> + 9 2 12 2 -1. + <_> + 9 2 12 1 2. + 1 + <_> + + <_> + 12 6 6 4 -1. + <_> + 12 6 6 2 2. + 1 + <_> + + <_> + 7 0 13 3 -1. + <_> + 6 1 13 1 3. + 1 + <_> + + <_> + 3 0 18 2 -1. + <_> + 3 0 9 2 2. + <_> + + <_> + 4 5 13 12 -1. + <_> + 4 9 13 4 3. + <_> + + <_> + 4 6 18 9 -1. + <_> + 10 9 6 3 9. + <_> + + <_> + 8 5 6 11 -1. + <_> + 10 5 2 11 3. + <_> + + <_> + 6 2 16 16 -1. + <_> + 6 6 16 8 2. + <_> + + <_> + 0 2 16 16 -1. + <_> + 0 6 16 8 2. + <_> + + <_> + 18 1 2 12 -1. + <_> + 18 7 2 6 2. + <_> + + <_> + 2 1 2 12 -1. + <_> + 2 7 2 6 2. + <_> + + <_> + 8 3 14 9 -1. + <_> + 8 6 14 3 3. + <_> + + <_> + 0 3 14 9 -1. + <_> + 0 6 14 3 3. + <_> + + <_> + 10 6 4 9 -1. + <_> + 10 9 4 3 3. + <_> + + <_> + 0 6 3 12 -1. + <_> + 0 12 3 6 2. + <_> + + <_> + 16 2 6 9 -1. + <_> + 13 5 6 3 3. + 1 + <_> + + <_> + 10 0 12 4 -1. + <_> + 9 1 12 2 2. + 1 + <_> + + <_> + 11 0 10 18 -1. + <_> + 16 0 5 9 2. + <_> + 11 9 5 9 2. + <_> + + <_> + 1 0 10 18 -1. + <_> + 1 0 5 9 2. + <_> + 6 9 5 9 2. + <_> + + <_> + 7 12 14 3 -1. + <_> + 7 12 7 3 2. + <_> + + <_> + 7 11 8 3 -1. + <_> + 11 11 4 3 2. + <_> + + <_> + 2 13 18 4 -1. + <_> + 2 13 9 4 2. + <_> + + <_> + 10 6 4 6 -1. + <_> + 10 6 2 6 2. + 1 + <_> + + <_> + 8 9 6 9 -1. + <_> + 10 9 2 9 3. + <_> + + <_> + 3 11 13 3 -1. + <_> + 3 12 13 1 3. + <_> + + <_> + 18 10 4 6 -1. + <_> + 18 10 2 6 2. + <_> + + <_> + 5 5 9 5 -1. + <_> + 8 5 3 5 3. + <_> + + <_> + 13 0 2 14 -1. + <_> + 13 0 1 14 2. + <_> + + <_> + 2 0 18 7 -1. + <_> + 8 0 6 7 3. + <_> + + <_> + 13 4 6 8 -1. + <_> + 16 4 3 4 2. + <_> + 13 8 3 4 2. + <_> + + <_> + 3 4 6 8 -1. + <_> + 3 4 3 4 2. + <_> + 6 8 3 4 2. + <_> + + <_> + 8 5 12 2 -1. + <_> + 8 6 12 1 2. + <_> + + <_> + 7 0 3 12 -1. + <_> + 8 0 1 12 3. + <_> + + <_> + 15 0 3 10 -1. + <_> + 16 1 1 10 3. + 1 + <_> + + <_> + 2 4 12 12 -1. + <_> + 6 8 4 4 9. + <_> + + <_> + 5 10 13 3 -1. + <_> + 5 11 13 1 3. + <_> + + <_> + 5 15 12 2 -1. + <_> + 5 16 12 1 2. + <_> + + <_> + 17 8 5 6 -1. + <_> + 17 11 5 3 2. + <_> + + <_> + 5 12 6 6 -1. + <_> + 5 14 6 2 3. + <_> + + <_> + 10 6 4 7 -1. + <_> + 10 6 2 7 2. + 1 + <_> + + <_> + 12 3 4 10 -1. + <_> + 13 4 2 10 2. + 1 + <_> + + <_> + 10 3 10 4 -1. + <_> + 9 4 10 2 2. + 1 + <_> + + <_> + 12 4 2 12 -1. + <_> + 12 4 1 12 2. + 1 + <_> + + <_> + 1 11 15 3 -1. + <_> + 6 11 5 3 3. + <_> + + <_> + 11 6 6 9 -1. + <_> + 13 6 2 9 3. + <_> + + <_> + 5 6 6 9 -1. + <_> + 7 6 2 9 3. + <_> + + <_> + 8 5 6 6 -1. + <_> + 10 5 2 6 3. + <_> + + <_> + 1 2 6 8 -1. + <_> + 1 2 3 4 2. + <_> + 4 6 3 4 2. + <_> + + <_> + 14 0 4 9 -1. + <_> + 14 3 4 3 3. + <_> + + <_> + 0 0 18 9 -1. + <_> + 0 3 18 3 3. + <_> + + <_> + 9 5 5 12 -1. + <_> + 9 8 5 6 2. + <_> + + <_> + 3 5 16 3 -1. + <_> + 3 6 16 1 3. + <_> + + <_> + 16 2 6 8 -1. + <_> + 19 2 3 4 2. + <_> + 16 6 3 4 2. + <_> + + <_> + 0 2 6 8 -1. + <_> + 0 2 3 4 2. + <_> + 3 6 3 4 2. + <_> + + <_> + 5 2 12 16 -1. + <_> + 5 10 12 8 2. + <_> + + <_> + 5 11 8 6 -1. + <_> + 5 11 4 3 2. + <_> + 9 14 4 3 2. + <_> + + <_> + 8 2 6 8 -1. + <_> + 11 2 3 4 2. + <_> + 8 6 3 4 2. + <_> + + <_> + 0 6 7 12 -1. + <_> + 0 10 7 4 3. + <_> + + <_> + 16 8 6 8 -1. + <_> + 16 10 6 4 2. + <_> + + <_> + 0 8 6 8 -1. + <_> + 0 10 6 4 2. + <_> + + <_> + 4 0 17 3 -1. + <_> + 4 1 17 1 3. + <_> + + <_> + 7 4 4 14 -1. + <_> + 8 4 2 14 2. + <_> + + <_> + 9 5 5 12 -1. + <_> + 9 8 5 6 2. + <_> + + <_> + 10 4 10 4 -1. + <_> + 9 5 10 2 2. + 1 + <_> + + <_> + 13 1 3 13 -1. + <_> + 14 2 1 13 3. + 1 + <_> + + <_> + 9 1 13 3 -1. + <_> + 8 2 13 1 3. + 1 + <_> + + <_> + 4 16 14 2 -1. + <_> + 4 17 14 1 2. + <_> + + <_> + 0 16 15 2 -1. + <_> + 0 17 15 1 2. + <_> + + <_> + 11 4 2 6 -1. + <_> + 11 4 1 6 2. + 1 + <_> + + <_> + 0 6 4 9 -1. + <_> + 0 9 4 3 3. + <_> + + <_> + 14 0 7 6 -1. + <_> + 12 2 7 2 3. + 1 + <_> + + <_> + 8 4 6 10 -1. + <_> + 8 4 3 5 2. + <_> + 11 9 3 5 2. + <_> + + <_> + 7 7 8 10 -1. + <_> + 11 7 4 5 2. + <_> + 7 12 4 5 2. + <_> + + <_> + 5 6 12 8 -1. + <_> + 5 6 6 4 2. + <_> + 11 10 6 4 2. + <_> + + <_> + 8 6 8 8 -1. + <_> + 12 6 4 4 2. + <_> + 8 10 4 4 2. + <_> + + <_> + 6 6 8 8 -1. + <_> + 6 6 4 4 2. + <_> + 10 10 4 4 2. + <_> + + <_> + 12 4 6 6 -1. + <_> + 10 6 6 2 3. + 1 + <_> + + <_> + 5 7 10 8 -1. + <_> + 5 7 5 4 2. + <_> + 10 11 5 4 2. + <_> + + <_> + 4 5 18 3 -1. + <_> + 4 6 18 1 3. + <_> + + <_> + 3 16 15 2 -1. + <_> + 3 17 15 1 2. + <_> + + <_> + 3 10 16 2 -1. + <_> + 3 11 16 1 2. + <_> + + <_> + 3 12 6 6 -1. + <_> + 5 12 2 6 3. + <_> + + <_> + 18 2 3 13 -1. + <_> + 19 2 1 13 3. + <_> + + <_> + 4 10 12 4 -1. + <_> + 8 10 4 4 3. + <_> + + <_> + 7 7 14 7 -1. + <_> + 7 7 7 7 2. + <_> + + <_> + 1 7 14 7 -1. + <_> + 8 7 7 7 2. + <_> + + <_> + 11 0 8 13 -1. + <_> + 11 0 4 13 2. + <_> + + <_> + 0 6 4 12 -1. + <_> + 0 6 2 6 2. + <_> + 2 12 2 6 2. + <_> + + <_> + 14 2 2 12 -1. + <_> + 14 2 1 12 2. + 1 + <_> + + <_> + 2 2 8 12 -1. + <_> + 2 2 4 6 2. + <_> + 6 8 4 6 2. + <_> + + <_> + 17 0 4 16 -1. + <_> + 17 8 4 8 2. + <_> + + <_> + 1 0 4 16 -1. + <_> + 1 8 4 8 2. + <_> + + <_> + 6 1 16 16 -1. + <_> + 6 9 16 8 2. + <_> + + <_> + 8 0 6 7 -1. + <_> + 10 2 2 7 3. + 1 + <_> + + <_> + 15 1 6 6 -1. + <_> + 13 3 6 2 3. + 1 + <_> + + <_> + 7 1 6 6 -1. + <_> + 9 3 2 6 3. + 1 + <_> + + <_> + 14 2 2 12 -1. + <_> + 14 2 1 12 2. + 1 + <_> + + <_> + 5 11 12 6 -1. + <_> + 5 14 12 3 2. + <_> + + <_> + 5 13 12 4 -1. + <_> + 5 14 12 2 2. + <_> + + <_> + 2 15 18 2 -1. + <_> + 2 16 18 1 2. + <_> + + <_> + 18 4 4 14 -1. + <_> + 20 4 2 7 2. + <_> + 18 11 2 7 2. + <_> + + <_> + 0 4 4 14 -1. + <_> + 0 4 2 7 2. + <_> + 2 11 2 7 2. + <_> + + <_> + 11 0 3 12 -1. + <_> + 12 0 1 12 3. + <_> + + <_> + 9 3 4 6 -1. + <_> + 9 6 4 3 2. + <_> + + <_> + 7 4 15 10 -1. + <_> + 7 9 15 5 2. + <_> + + <_> + 4 2 9 12 -1. + <_> + 4 6 9 4 3. + <_> + + <_> + 3 1 17 3 -1. + <_> + 3 2 17 1 3. + <_> + + <_> + 0 1 16 3 -1. + <_> + 0 2 16 1 3. + <_> + + <_> + 7 4 15 10 -1. + <_> + 7 9 15 5 2. + <_> + + <_> + 0 4 15 10 -1. + <_> + 0 9 15 5 2. + <_> + + <_> + 15 0 6 18 -1. + <_> + 15 9 6 9 2. + <_> + + <_> + 3 14 12 4 -1. + <_> + 3 14 6 2 2. + <_> + 9 16 6 2 2. + <_> + + <_> + 13 0 9 5 -1. + <_> + 16 3 3 5 3. + 1 + <_> + + <_> + 9 7 9 2 -1. + <_> + 9 7 9 1 2. + 1 + <_> + + <_> + 12 6 3 7 -1. + <_> + 13 7 1 7 3. + 1 + <_> + + <_> + 3 4 8 8 -1. + <_> + 7 4 4 8 2. + <_> + + <_> + 7 8 12 3 -1. + <_> + 11 8 4 3 3. + <_> + + <_> + 8 6 5 6 -1. + <_> + 8 6 5 3 2. + 1 + <_> + + <_> + 10 7 10 6 -1. + <_> + 10 10 10 3 2. + <_> + + <_> + 0 9 16 3 -1. + <_> + 0 10 16 1 3. + <_> + + <_> + 7 9 12 3 -1. + <_> + 7 10 12 1 3. + <_> + + <_> + 2 10 8 6 -1. + <_> + 2 13 8 3 2. + <_> + + <_> + 16 6 4 12 -1. + <_> + 16 9 4 6 2. + <_> + + <_> + 3 11 8 6 -1. + <_> + 3 11 4 3 2. + <_> + 7 14 4 3 2. + <_> + + <_> + 4 5 16 10 -1. + <_> + 12 5 8 5 2. + <_> + 4 10 8 5 2. + <_> + + <_> + 7 10 3 8 -1. + <_> + 7 14 3 4 2. + <_> + + <_> + 9 14 6 4 -1. + <_> + 9 16 6 2 2. + <_> + + <_> + 2 9 15 9 -1. + <_> + 2 12 15 3 3. + <_> + + <_> + 11 2 8 6 -1. + <_> + 15 2 4 3 2. + <_> + 11 5 4 3 2. + <_> + + <_> + 4 11 8 6 -1. + <_> + 4 13 8 2 3. + <_> + + <_> + 16 0 2 14 -1. + <_> + 16 0 1 14 2. + 1 + <_> + + <_> + 6 0 14 2 -1. + <_> + 6 0 14 1 2. + 1 + <_> + + <_> + 13 9 7 6 -1. + <_> + 13 11 7 2 3. + <_> + + <_> + 10 6 7 3 -1. + <_> + 9 7 7 1 3. + 1 + <_> + + <_> + 18 2 3 13 -1. + <_> + 19 2 1 13 3. + <_> + + <_> + 1 2 3 13 -1. + <_> + 2 2 1 13 3. + <_> + + <_> + 5 1 12 4 -1. + <_> + 11 1 6 2 2. + <_> + 5 3 6 2 2. + <_> + + <_> + 7 8 6 6 -1. + <_> + 7 10 6 2 3. + <_> + + <_> + 8 13 14 3 -1. + <_> + 8 14 14 1 3. + <_> + + <_> + 10 5 6 6 -1. + <_> + 12 7 2 6 3. + 1 + <_> + + <_> + 15 6 4 8 -1. + <_> + 16 7 2 8 2. + 1 + <_> + + <_> + 0 13 14 4 -1. + <_> + 0 13 7 2 2. + <_> + 7 15 7 2 2. + <_> + + <_> + 1 7 21 6 -1. + <_> + 8 9 7 2 9. + <_> + + <_> + 7 4 6 8 -1. + <_> + 7 4 3 4 2. + <_> + 10 8 3 4 2. + <_> + + <_> + 7 4 8 8 -1. + <_> + 11 4 4 4 2. + <_> + 7 8 4 4 2. + <_> + + <_> + 10 6 7 4 -1. + <_> + 9 7 7 2 2. + 1 + <_> + + <_> + 11 2 6 7 -1. + <_> + 11 2 3 7 2. + 1 + <_> + + <_> + 11 2 7 6 -1. + <_> + 11 2 7 3 2. + 1 + <_> + + <_> + 11 4 8 6 -1. + <_> + 11 4 4 6 2. + 1 + <_> + + <_> + 11 4 6 8 -1. + <_> + 11 4 6 4 2. + 1 + <_> + + <_> + 12 3 8 5 -1. + <_> + 12 3 4 5 2. + 1 + <_> + + <_> + 10 3 5 8 -1. + <_> + 10 3 5 4 2. + 1 + <_> + + <_> + 13 0 9 5 -1. + <_> + 16 3 3 5 3. + 1 + <_> + + <_> + 2 6 10 12 -1. + <_> + 2 9 10 6 2. + <_> + + <_> + 15 6 5 12 -1. + <_> + 15 9 5 6 2. + <_> + + <_> + 3 7 13 3 -1. + <_> + 3 8 13 1 3. + <_> + + <_> + 4 7 17 3 -1. + <_> + 4 8 17 1 3. + <_> + + <_> + 2 9 7 6 -1. + <_> + 2 11 7 2 3. + <_> + + <_> + 13 9 9 4 -1. + <_> + 13 11 9 2 2. + <_> + + <_> + 9 0 5 9 -1. + <_> + 6 3 5 3 3. + 1 + <_> + + <_> + 9 3 8 3 -1. + <_> + 9 3 4 3 2. + <_> + + <_> + 3 0 4 13 -1. + <_> + 4 0 2 13 2. + <_> + + <_> + 13 0 8 6 -1. + <_> + 15 0 4 6 2. + <_> + + <_> + 3 0 6 5 -1. + <_> + 6 0 3 5 2. + <_> + + <_> + 9 0 12 5 -1. + <_> + 9 0 6 5 2. + <_> + + <_> + 1 2 6 8 -1. + <_> + 3 2 2 8 3. + <_> + + <_> + 18 2 4 6 -1. + <_> + 18 2 2 6 2. + <_> + + <_> + 0 2 4 6 -1. + <_> + 2 2 2 6 2. + <_> + + <_> + 16 9 6 6 -1. + <_> + 16 11 6 2 3. + <_> + + <_> + 10 0 12 6 -1. + <_> + 13 3 6 6 2. + 1 + <_> + + <_> + 14 2 3 12 -1. + <_> + 10 6 3 4 3. + 1 + <_> + + <_> + 8 3 6 7 -1. + <_> + 11 3 3 7 2. + <_> + + <_> + 16 1 3 15 -1. + <_> + 17 1 1 15 3. + <_> + + <_> + 0 1 6 8 -1. + <_> + 2 1 2 8 3. + <_> + + <_> + 13 0 3 14 -1. + <_> + 14 0 1 14 3. + <_> + + <_> + 6 0 3 14 -1. + <_> + 7 0 1 14 3. + <_> + + <_> + 4 13 18 2 -1. + <_> + 4 13 9 2 2. + <_> + + <_> + 2 9 15 3 -1. + <_> + 7 9 5 3 3. + <_> + + <_> + 9 5 10 6 -1. + <_> + 14 5 5 3 2. + <_> + 9 8 5 3 2. + <_> + + <_> + 3 5 10 6 -1. + <_> + 3 5 5 3 2. + <_> + 8 8 5 3 2. + <_> + + <_> + 14 3 2 12 -1. + <_> + 14 3 1 12 2. + 1 + <_> + + <_> + 8 3 12 2 -1. + <_> + 8 3 12 1 2. + 1 + <_> + + <_> + 12 7 6 6 -1. + <_> + 14 7 2 6 3. + <_> + + <_> + 4 7 6 6 -1. + <_> + 6 7 2 6 3. + <_> + + <_> + 7 0 8 3 -1. + <_> + 7 0 4 3 2. + <_> + + <_> + 9 0 4 6 -1. + <_> + 11 0 2 6 2. + <_> + + <_> + 10 0 12 12 -1. + <_> + 13 0 6 12 2. + <_> + + <_> + 0 0 12 12 -1. + <_> + 3 0 6 12 2. + <_> + + <_> + 16 5 6 4 -1. + <_> + 16 5 3 4 2. + <_> + + <_> + 0 5 6 4 -1. + <_> + 3 5 3 4 2. + <_> + + <_> + 9 0 12 5 -1. + <_> + 9 0 6 5 2. + <_> + + <_> + 1 8 8 10 -1. + <_> + 1 8 4 5 2. + <_> + 5 13 4 5 2. + <_> + + <_> + 8 16 14 2 -1. + <_> + 8 16 7 2 2. + <_> + + <_> + 0 11 16 3 -1. + <_> + 8 11 8 3 2. + <_> + + <_> + 10 16 12 2 -1. + <_> + 10 16 6 2 2. + <_> + + <_> + 0 16 12 2 -1. + <_> + 6 16 6 2 2. + <_> + + <_> + 3 11 18 6 -1. + <_> + 12 11 9 3 2. + <_> + 3 14 9 3 2. + <_> + + <_> + 7 13 6 4 -1. + <_> + 7 15 6 2 2. + <_> + + <_> + 10 11 6 6 -1. + <_> + 10 13 6 2 3. + <_> + + <_> + 6 14 9 4 -1. + <_> + 9 14 3 4 3. + <_> + + <_> + 5 4 16 10 -1. + <_> + 5 9 16 5 2. + <_> + + <_> + 11 7 3 8 -1. + <_> + 11 7 3 4 2. + 1 + <_> + + <_> + 13 10 6 6 -1. + <_> + 13 12 6 2 3. + <_> + + <_> + 0 6 22 12 -1. + <_> + 0 6 11 6 2. + <_> + 11 12 11 6 2. + <_> + + <_> + 9 5 6 12 -1. + <_> + 12 5 3 6 2. + <_> + 9 11 3 6 2. + <_> + + <_> + 7 5 6 12 -1. + <_> + 7 5 3 6 2. + <_> + 10 11 3 6 2. + <_> + + <_> + 14 1 6 9 -1. + <_> + 14 4 6 3 3. + <_> + + <_> + 2 1 6 9 -1. + <_> + 2 4 6 3 3. + <_> + + <_> + 13 4 4 6 -1. + <_> + 13 7 4 3 2. + <_> + + <_> + 5 4 4 6 -1. + <_> + 5 7 4 3 2. + <_> + + <_> + 10 13 12 3 -1. + <_> + 10 14 12 1 3. + <_> + + <_> + 3 3 15 3 -1. + <_> + 3 4 15 1 3. + <_> + + <_> + 13 5 2 9 -1. + <_> + 13 5 1 9 2. + 1 + <_> + + <_> + 9 5 9 2 -1. + <_> + 9 5 9 1 2. + 1 + <_> + + <_> + 6 2 14 10 -1. + <_> + 6 2 7 10 2. + <_> + + <_> + 8 2 12 2 -1. + <_> + 8 2 12 1 2. + 1 + <_> + + <_> + 17 0 2 13 -1. + <_> + 17 0 1 13 2. + 1 + <_> + + <_> + 5 0 13 2 -1. + <_> + 5 0 13 1 2. + 1 + <_> + + <_> + 12 4 3 10 -1. + <_> + 12 4 3 5 2. + 1 + <_> + + <_> + 0 6 12 3 -1. + <_> + 0 7 12 1 3. + <_> + + <_> + 6 6 15 3 -1. + <_> + 6 7 15 1 3. + <_> + + <_> + 8 8 5 9 -1. + <_> + 8 11 5 3 3. + <_> + + <_> + 10 11 7 6 -1. + <_> + 10 13 7 2 3. + <_> + + <_> + 5 11 7 6 -1. + <_> + 5 13 7 2 3. + <_> + + <_> + 5 12 13 4 -1. + <_> + 5 13 13 2 2. + <_> + + <_> + 9 4 4 6 -1. + <_> + 9 7 4 3 2. + <_> + + <_> + 13 1 2 9 -1. + <_> + 13 1 1 9 2. + 1 + <_> + + <_> + 5 2 8 6 -1. + <_> + 5 2 4 3 2. + <_> + 9 5 4 3 2. + <_> + + <_> + 11 0 4 8 -1. + <_> + 12 1 2 8 2. + 1 + <_> + + <_> + 11 0 8 4 -1. + <_> + 10 1 8 2 2. + 1 + <_> + + <_> + 7 9 15 3 -1. + <_> + 7 10 15 1 3. + <_> + + <_> + 5 10 12 3 -1. + <_> + 5 11 12 1 3. + <_> + + <_> + 15 2 7 6 -1. + <_> + 15 4 7 2 3. + <_> + + <_> + 0 2 7 6 -1. + <_> + 0 4 7 2 3. + <_> + + <_> + 12 3 2 7 -1. + <_> + 12 3 1 7 2. + 1 + <_> + + <_> + 10 3 7 2 -1. + <_> + 10 3 7 1 2. + 1 + <_> + + <_> + 2 3 20 14 -1. + <_> + 12 3 10 7 2. + <_> + 2 10 10 7 2. + <_> + + <_> + 5 2 12 8 -1. + <_> + 11 2 6 8 2. + <_> + + <_> + 18 4 4 8 -1. + <_> + 18 8 4 4 2. + <_> + + <_> + 6 4 6 8 -1. + <_> + 6 4 3 4 2. + <_> + 9 8 3 4 2. + <_> + + <_> + 12 2 4 6 -1. + <_> + 12 2 2 6 2. + 1 + <_> + + <_> + 10 2 6 4 -1. + <_> + 10 2 6 2 2. + 1 + <_> + + <_> + 9 3 8 15 -1. + <_> + 11 3 4 15 2. + <_> + + <_> + 1 11 8 7 -1. + <_> + 3 11 4 7 2. + <_> + + <_> + 13 7 6 10 -1. + <_> + 15 7 2 10 3. + <_> + + <_> + 2 3 10 14 -1. + <_> + 7 3 5 14 2. + <_> + + <_> + 6 5 15 12 -1. + <_> + 11 5 5 12 3. + <_> + + <_> + 1 5 15 12 -1. + <_> + 6 5 5 12 3. + <_> + + <_> + 9 14 8 4 -1. + <_> + 9 16 8 2 2. + <_> + + <_> + 9 6 4 10 -1. + <_> + 11 6 2 10 2. + <_> + + <_> + 8 6 10 4 -1. + <_> + 8 8 10 2 2. + <_> + + <_> + 2 14 7 4 -1. + <_> + 2 16 7 2 2. + <_> + + <_> + 7 9 15 3 -1. + <_> + 7 10 15 1 3. + <_> + + <_> + 0 10 16 4 -1. + <_> + 0 10 8 2 2. + <_> + 8 12 8 2 2. + <_> + + <_> + 10 11 6 7 -1. + <_> + 12 11 2 7 3. + <_> + + <_> + 8 13 6 5 -1. + <_> + 11 13 3 5 2. + <_> + + <_> + 10 11 6 7 -1. + <_> + 12 11 2 7 3. + <_> + + <_> + 6 11 6 7 -1. + <_> + 8 11 2 7 3. + <_> + + <_> + 18 4 4 8 -1. + <_> + 18 8 4 4 2. + <_> + + <_> + 4 6 8 11 -1. + <_> + 8 6 4 11 2. + <_> + + <_> + 7 5 8 12 -1. + <_> + 9 5 4 12 2. + <_> + + <_> + 5 3 6 6 -1. + <_> + 7 3 2 6 3. + <_> + + <_> + 11 2 10 6 -1. + <_> + 11 2 10 3 2. + 1 + <_> + + <_> + 11 1 8 9 -1. + <_> + 11 1 4 9 2. + 1 + <_> + + <_> + 12 4 3 10 -1. + <_> + 12 4 3 5 2. + 1 + <_> + + <_> + 11 1 11 4 -1. + <_> + 11 1 11 2 2. + 1 + <_> + + <_> + 18 4 4 8 -1. + <_> + 18 8 4 4 2. + <_> + + <_> + 0 4 4 8 -1. + <_> + 0 8 4 4 2. + <_> + + <_> + 12 2 2 12 -1. + <_> + 12 2 1 12 2. + 1 + <_> + + <_> + 4 12 12 3 -1. + <_> + 4 13 12 1 3. + <_> + + <_> + 2 12 18 3 -1. + <_> + 2 13 18 1 3. + <_> + + <_> + 0 0 16 3 -1. + <_> + 0 1 16 1 3. + <_> + + <_> + 12 2 2 12 -1. + <_> + 12 2 1 12 2. + 1 + <_> + + <_> + 10 2 12 2 -1. + <_> + 10 2 12 1 2. + 1 + <_> + + <_> + 13 10 6 7 -1. + <_> + 15 10 2 7 3. + <_> + + <_> + 5 13 12 2 -1. + <_> + 11 13 6 2 2. + <_> + + <_> + 16 8 6 8 -1. + <_> + 19 8 3 4 2. + <_> + 16 12 3 4 2. + <_> + + <_> + 4 1 8 6 -1. + <_> + 4 3 8 2 3. + <_> + + <_> + 18 0 4 9 -1. + <_> + 18 3 4 3 3. + <_> + + <_> + 8 2 6 8 -1. + <_> + 8 6 6 4 2. + <_> + + <_> + 8 1 6 4 -1. + <_> + 8 3 6 2 2. + <_> + + <_> + 1 2 12 3 -1. + <_> + 1 3 12 1 3. + <_> + + <_> + 7 2 12 3 -1. + <_> + 7 3 12 1 3. + <_> + + <_> + 1 0 16 18 -1. + <_> + 1 9 16 9 2. + <_> + + <_> + 16 8 6 8 -1. + <_> + 19 8 3 4 2. + <_> + 16 12 3 4 2. + <_> + + <_> + 0 8 6 8 -1. + <_> + 0 8 3 4 2. + <_> + 3 12 3 4 2. + <_> + + <_> + 18 4 4 6 -1. + <_> + 18 7 4 3 2. + <_> + + <_> + 0 12 14 3 -1. + <_> + 0 13 14 1 3. + <_> + + <_> + 3 12 16 3 -1. + <_> + 3 13 16 1 3. + <_> + + <_> + 0 4 4 6 -1. + <_> + 0 7 4 3 2. + <_> + + <_> + 9 14 8 4 -1. + <_> + 9 16 8 2 2. + <_> + + <_> + 0 13 14 3 -1. + <_> + 0 14 14 1 3. + <_> + + <_> + 4 14 14 2 -1. + <_> + 4 15 14 1 2. + <_> + + <_> + 3 12 15 6 -1. + <_> + 3 15 15 3 2. + <_> + + <_> + 7 12 14 6 -1. + <_> + 7 15 14 3 2. + <_> + + <_> + 0 0 14 4 -1. + <_> + 0 2 14 2 2. + <_> + + <_> + 13 10 6 7 -1. + <_> + 15 10 2 7 3. + <_> + + <_> + 3 10 6 7 -1. + <_> + 5 10 2 7 3. + <_> + + <_> + 2 4 18 4 -1. + <_> + 8 4 6 4 3. + <_> + + <_> + 5 3 12 9 -1. + <_> + 9 6 4 3 9. + <_> + + <_> + 10 8 10 7 -1. + <_> + 10 8 5 7 2. + <_> + + <_> + 5 2 4 16 -1. + <_> + 5 6 4 8 2. + <_> + + <_> + 16 8 6 8 -1. + <_> + 19 8 3 4 2. + <_> + 16 12 3 4 2. + <_> + + <_> + 0 12 17 4 -1. + <_> + 0 14 17 2 2. + <_> + + <_> + 7 12 14 6 -1. + <_> + 7 15 14 3 2. + <_> + + <_> + 0 13 12 4 -1. + <_> + 0 13 6 2 2. + <_> + 6 15 6 2 2. + <_> + + <_> + 10 13 12 3 -1. + <_> + 10 14 12 1 3. + <_> + + <_> + 7 11 8 6 -1. + <_> + 7 11 4 3 2. + <_> + 11 14 4 3 2. + <_> + + <_> + 9 6 12 9 -1. + <_> + 12 6 6 9 2. + <_> + + <_> + 1 6 12 8 -1. + <_> + 4 6 6 8 2. + <_> + + <_> + 8 12 6 6 -1. + <_> + 8 14 6 2 3. + <_> + + <_> + 1 4 20 14 -1. + <_> + 1 4 10 7 2. + <_> + 11 11 10 7 2. + <_> + + <_> + 18 0 4 10 -1. + <_> + 19 1 2 10 2. + 1 + <_> + + <_> + 2 2 6 12 -1. + <_> + 2 5 6 6 2. + <_> + + <_> + 16 5 4 9 -1. + <_> + 16 8 4 3 3. + <_> + + <_> + 6 9 8 4 -1. + <_> + 10 9 4 4 2. + <_> + + <_> + 7 8 14 3 -1. + <_> + 7 8 7 3 2. + <_> + + <_> + 0 8 18 3 -1. + <_> + 9 8 9 3 2. + <_> + + <_> + 14 6 8 4 -1. + <_> + 14 6 8 2 2. + 1 + <_> + + <_> + 0 3 18 2 -1. + <_> + 9 3 9 2 2. + <_> + + <_> + 6 6 10 8 -1. + <_> + 6 8 10 4 2. + <_> + + <_> + 1 5 10 12 -1. + <_> + 1 8 10 6 2. + <_> + + <_> + 11 6 3 12 -1. + <_> + 12 6 1 12 3. + <_> + + <_> + 8 6 3 12 -1. + <_> + 9 6 1 12 3. + <_> + + <_> + 11 1 3 13 -1. + <_> + 12 1 1 13 3. + <_> + + <_> + 8 2 3 13 -1. + <_> + 9 2 1 13 3. + <_> + + <_> + 6 6 2 12 -1. + <_> + 6 12 2 6 2. + <_> + + <_> + 17 4 2 9 -1. + <_> + 17 4 1 9 2. + 1 + <_> + + <_> + 0 0 12 4 -1. + <_> + 0 1 12 2 2. + <_> + + <_> + 8 4 12 4 -1. + <_> + 14 4 6 2 2. + <_> + 8 6 6 2 2. + <_> + + <_> + 6 13 6 4 -1. + <_> + 6 15 6 2 2. + <_> + + <_> + 7 13 12 4 -1. + <_> + 7 15 12 2 2. + <_> + + <_> + 1 8 6 4 -1. + <_> + 4 8 3 4 2. + <_> + + <_> + 15 8 6 10 -1. + <_> + 15 8 3 10 2. + <_> + + <_> + 1 8 6 10 -1. + <_> + 4 8 3 10 2. + <_> + + <_> + 16 12 6 4 -1. + <_> + 16 12 3 4 2. + <_> + + <_> + 1 6 6 8 -1. + <_> + 1 6 3 4 2. + <_> + 4 10 3 4 2. + <_> + + <_> + 11 1 4 11 -1. + <_> + 12 2 2 11 2. + 1 + <_> + + <_> + 11 1 11 4 -1. + <_> + 10 2 11 2 2. + 1 + <_> + + <_> + 12 0 4 7 -1. + <_> + 13 1 2 7 2. + 1 + <_> + + <_> + 10 0 7 4 -1. + <_> + 9 1 7 2 2. + 1 + <_> + + <_> + 13 5 2 12 -1. + <_> + 13 5 1 12 2. + <_> + + <_> + 7 5 2 12 -1. + <_> + 8 5 1 12 2. + <_> + + <_> + 8 5 9 4 -1. + <_> + 11 5 3 4 3. + <_> + + <_> + 7 0 10 3 -1. + <_> + 6 1 10 1 3. + 1 + <_> + + <_> + 17 4 2 9 -1. + <_> + 17 4 1 9 2. + 1 + <_> + + <_> + 5 4 9 2 -1. + <_> + 5 4 9 1 2. + 1 + <_> + + <_> + 12 10 4 8 -1. + <_> + 12 10 2 8 2. + <_> + + <_> + 2 0 12 4 -1. + <_> + 2 0 6 2 2. + <_> + 8 2 6 2 2. + <_> + + <_> + 7 7 15 3 -1. + <_> + 7 8 15 1 3. + <_> + + <_> + 2 0 12 4 -1. + <_> + 2 0 6 2 2. + <_> + 8 2 6 2 2. + <_> + + <_> + 10 14 6 4 -1. + <_> + 10 14 3 4 2. + <_> + + <_> + 0 8 17 3 -1. + <_> + 0 9 17 1 3. + <_> + + <_> + 6 13 10 5 -1. + <_> + 6 13 5 5 2. + <_> + + <_> + 5 11 8 5 -1. + <_> + 9 11 4 5 2. + <_> + + <_> + 14 8 4 6 -1. + <_> + 14 8 2 6 2. + <_> + + <_> + 0 10 5 8 -1. + <_> + 0 14 5 4 2. + <_> + + <_> + 7 7 15 3 -1. + <_> + 7 8 15 1 3. + <_> + + <_> + 2 11 7 4 -1. + <_> + 2 13 7 2 2. + <_> + + <_> + 8 3 11 12 -1. + <_> + 8 6 11 6 2. + <_> + + <_> + 2 4 12 4 -1. + <_> + 2 4 6 2 2. + <_> + 8 6 6 2 2. + <_> + + <_> + 19 2 3 12 -1. + <_> + 20 3 1 12 3. + 1 + <_> + + <_> + 1 6 12 4 -1. + <_> + 1 6 6 2 2. + <_> + 7 8 6 2 2. + <_> + + <_> + 9 9 13 3 -1. + <_> + 9 10 13 1 3. + <_> + + <_> + 0 5 12 6 -1. + <_> + 0 5 6 3 2. + <_> + 6 8 6 3 2. + <_> + + <_> + 11 0 3 13 -1. + <_> + 12 0 1 13 3. + <_> + + <_> + 8 0 3 13 -1. + <_> + 9 0 1 13 3. + <_> + + <_> + 14 6 8 8 -1. + <_> + 14 10 8 4 2. + <_> + + <_> + 0 8 8 6 -1. + <_> + 0 10 8 2 3. + <_> + + <_> + 9 9 13 3 -1. + <_> + 9 10 13 1 3. + <_> + + <_> + 0 9 13 3 -1. + <_> + 0 10 13 1 3. + <_> + + <_> + 4 14 14 4 -1. + <_> + 11 14 7 2 2. + <_> + 4 16 7 2 2. + <_> + + <_> + 0 3 6 6 -1. + <_> + 2 3 2 6 3. + <_> + + <_> + 2 6 20 4 -1. + <_> + 7 6 10 4 2. + <_> + + <_> + 2 7 6 6 -1. + <_> + 4 7 2 6 3. + <_> + + <_> + 15 8 6 10 -1. + <_> + 17 8 2 10 3. + <_> + + <_> + 1 8 6 10 -1. + <_> + 3 8 2 10 3. + <_> + + <_> + 9 9 13 3 -1. + <_> + 9 10 13 1 3. + <_> + + <_> + 6 8 4 6 -1. + <_> + 6 8 4 3 2. + 1 + <_> + + <_> + 16 5 6 13 -1. + <_> + 16 5 3 13 2. + <_> + + <_> + 0 5 6 13 -1. + <_> + 3 5 3 13 2. + <_> + + <_> + 4 10 18 2 -1. + <_> + 4 10 9 2 2. + <_> + + <_> + 0 7 21 7 -1. + <_> + 7 7 7 7 3. + <_> + + <_> + 5 6 12 12 -1. + <_> + 9 6 4 12 3. + <_> + + <_> + 10 4 10 3 -1. + <_> + 9 5 10 1 3. + 1 + <_> + + <_> + 9 9 9 7 -1. + <_> + 12 9 3 7 3. + <_> + + <_> + 11 5 9 4 -1. + <_> + 14 8 3 4 3. + 1 + <_> + + <_> + 12 3 3 10 -1. + <_> + 12 3 3 5 2. + 1 + <_> + + <_> + 8 3 12 2 -1. + <_> + 8 3 6 2 2. + 1 + <_> + + <_> + 14 6 4 8 -1. + <_> + 14 10 4 4 2. + <_> + + <_> + 4 6 4 8 -1. + <_> + 4 10 4 4 2. + <_> + + <_> + 6 0 11 12 -1. + <_> + 6 3 11 6 2. + <_> + + <_> + 8 0 6 6 -1. + <_> + 8 3 6 3 2. + <_> + + <_> + 10 0 10 4 -1. + <_> + 10 0 5 4 2. + <_> + + <_> + 2 0 10 4 -1. + <_> + 7 0 5 4 2. + <_> + + <_> + 10 3 8 8 -1. + <_> + 14 3 4 4 2. + <_> + 10 7 4 4 2. + <_> + + <_> + 4 3 8 8 -1. + <_> + 4 3 4 4 2. + <_> + 8 7 4 4 2. + <_> + + <_> + 2 9 18 5 -1. + <_> + 8 9 6 5 3. + <_> + + <_> + 0 15 16 3 -1. + <_> + 0 16 16 1 3. + <_> + + <_> + 6 16 12 2 -1. + <_> + 6 17 12 1 2. + <_> + + <_> + 3 0 4 8 -1. + <_> + 3 4 4 4 2. + <_> + + <_> + 15 6 6 6 -1. + <_> + 13 8 6 2 3. + 1 + <_> + + <_> + 7 6 6 6 -1. + <_> + 9 8 2 6 3. + 1 + <_> + + <_> + 13 12 6 6 -1. + <_> + 13 14 6 2 3. + <_> + + <_> + 3 12 6 6 -1. + <_> + 3 14 6 2 3. + <_> + + <_> + 8 13 14 4 -1. + <_> + 8 14 14 2 2. + <_> + + <_> + 0 13 14 4 -1. + <_> + 0 14 14 2 2. + <_> + + <_> + 3 13 17 2 -1. + <_> + 3 14 17 1 2. + <_> + + <_> + 4 6 12 4 -1. + <_> + 8 6 4 4 3. + <_> + + <_> + 8 7 9 4 -1. + <_> + 11 7 3 4 3. + <_> + + <_> + 10 0 6 8 -1. + <_> + 8 2 6 4 2. + 1 + <_> + + <_> + 9 2 12 12 -1. + <_> + 9 6 12 4 3. + <_> + + <_> + 11 0 6 3 -1. + <_> + 10 1 6 1 3. + 1 + <_> + + <_> + 13 1 3 7 -1. + <_> + 14 2 1 7 3. + 1 + <_> + + <_> + 2 3 12 9 -1. + <_> + 6 6 4 3 9. + <_> + + <_> + 19 2 3 12 -1. + <_> + 20 3 1 12 3. + 1 + <_> + + <_> + 3 5 12 5 -1. + <_> + 7 5 4 5 3. + <_> + + <_> + 13 1 3 7 -1. + <_> + 14 2 1 7 3. + 1 + <_> + + <_> + 9 1 7 3 -1. + <_> + 8 2 7 1 3. + 1 + <_> + + <_> + 9 7 8 6 -1. + <_> + 13 7 4 3 2. + <_> + 9 10 4 3 2. + <_> + + <_> + 4 14 14 4 -1. + <_> + 4 15 14 2 2. + <_> + + <_> + 10 14 6 4 -1. + <_> + 10 14 3 4 2. + <_> + + <_> + 6 14 6 4 -1. + <_> + 9 14 3 4 2. + <_> + + <_> + 14 0 4 16 -1. + <_> + 16 0 2 8 2. + <_> + 14 8 2 8 2. + <_> + + <_> + 0 15 20 3 -1. + <_> + 5 15 10 3 2. + <_> + + <_> + 16 5 3 13 -1. + <_> + 17 5 1 13 3. + <_> + + <_> + 2 6 13 8 -1. + <_> + 2 10 13 4 2. + <_> + + <_> + 16 5 3 13 -1. + <_> + 17 5 1 13 3. + <_> + + <_> + 7 12 7 4 -1. + <_> + 7 14 7 2 2. + <_> + + <_> + 15 1 4 9 -1. + <_> + 15 4 4 3 3. + <_> + + <_> + 0 4 16 2 -1. + <_> + 0 5 16 1 2. + <_> + + <_> + 8 4 12 2 -1. + <_> + 8 5 12 1 2. + <_> + + <_> + 6 3 9 15 -1. + <_> + 9 8 3 5 9. + <_> + + <_> + 12 3 3 8 -1. + <_> + 12 7 3 4 2. + <_> + + <_> + 5 6 12 4 -1. + <_> + 5 6 6 2 2. + <_> + 11 8 6 2 2. + <_> + + <_> + 16 3 3 14 -1. + <_> + 17 3 1 14 3. + <_> + + <_> + 3 3 3 14 -1. + <_> + 4 3 1 14 3. + <_> + + <_> + 0 4 22 4 -1. + <_> + 11 4 11 2 2. + <_> + 0 6 11 2 2. + <_> + + <_> + 1 4 4 9 -1. + <_> + 1 7 4 3 3. + <_> + + <_> + 7 13 12 4 -1. + <_> + 7 15 12 2 2. + <_> + + <_> + 3 13 12 4 -1. + <_> + 3 15 12 2 2. + <_> + + <_> + 11 14 6 4 -1. + <_> + 11 16 6 2 2. + <_> + + <_> + 1 0 13 3 -1. + <_> + 1 1 13 1 3. + <_> + + <_> + 11 0 6 4 -1. + <_> + 11 2 6 2 2. + <_> + + <_> + 4 14 14 4 -1. + <_> + 4 14 7 2 2. + <_> + 11 16 7 2 2. + <_> + + <_> + 6 0 12 2 -1. + <_> + 6 1 12 1 2. + <_> + + <_> + 5 0 6 4 -1. + <_> + 5 2 6 2 2. + <_> + + <_> + 11 0 3 6 -1. + <_> + 12 1 1 6 3. + 1 + <_> + + <_> + 11 0 6 3 -1. + <_> + 10 1 6 1 3. + 1 + <_> + + <_> + 7 12 8 6 -1. + <_> + 9 12 4 6 2. + <_> + + <_> + 1 1 5 10 -1. + <_> + 1 6 5 5 2. + <_> + + <_> + 13 0 2 12 -1. + <_> + 13 6 2 6 2. + <_> + + <_> + 7 0 2 12 -1. + <_> + 7 6 2 6 2. + <_> + + <_> + 12 1 8 14 -1. + <_> + 16 1 4 7 2. + <_> + 12 8 4 7 2. + <_> + + <_> + 1 0 8 10 -1. + <_> + 1 0 4 5 2. + <_> + 5 5 4 5 2. + <_> + + <_> + 6 6 16 4 -1. + <_> + 10 6 8 4 2. + <_> + + <_> + 1 14 13 2 -1. + <_> + 1 15 13 1 2. + <_> + + <_> + 2 7 20 3 -1. + <_> + 7 7 10 3 2. + <_> + + <_> + 11 2 9 4 -1. + <_> + 14 5 3 4 3. + 1 + <_> + + <_> + 6 5 13 2 -1. + <_> + 6 6 13 1 2. + <_> + + <_> + 3 0 6 15 -1. + <_> + 6 0 3 15 2. + <_> + + <_> + 3 12 8 6 -1. + <_> + 5 12 4 6 2. + <_> + + <_> + 13 1 4 7 -1. + <_> + 14 2 2 7 2. + 1 + <_> + + <_> + 9 1 7 4 -1. + <_> + 8 2 7 2 2. + 1 + <_> + + <_> + 11 11 6 4 -1. + <_> + 11 13 6 2 2. + <_> + + <_> + 0 12 8 6 -1. + <_> + 0 12 4 3 2. + <_> + 4 15 4 3 2. + <_> + + <_> + 11 11 6 4 -1. + <_> + 11 13 6 2 2. + <_> + + <_> + 2 6 6 12 -1. + <_> + 2 6 3 6 2. + <_> + 5 12 3 6 2. + <_> + + <_> + 11 11 6 4 -1. + <_> + 11 13 6 2 2. + <_> + + <_> + 5 11 9 4 -1. + <_> + 8 11 3 4 3. + <_> + + <_> + 8 13 9 5 -1. + <_> + 11 13 3 5 3. + <_> + + <_> + 3 15 8 3 -1. + <_> + 7 15 4 3 2. + <_> + + <_> + 4 12 14 6 -1. + <_> + 11 12 7 3 2. + <_> + 4 15 7 3 2. + <_> + + <_> + 2 15 8 3 -1. + <_> + 6 15 4 3 2. + <_> + + <_> + 11 11 6 4 -1. + <_> + 11 13 6 2 2. + <_> + + <_> + 6 5 6 7 -1. + <_> + 8 5 2 7 3. + <_> + + <_> + 8 4 9 12 -1. + <_> + 11 8 3 4 9. + <_> + + <_> + 5 4 9 12 -1. + <_> + 8 8 3 4 9. + <_> + + <_> + 14 12 6 4 -1. + <_> + 14 14 6 2 2. + <_> + + <_> + 2 12 6 4 -1. + <_> + 2 14 6 2 2. + <_> + + <_> + 9 6 6 8 -1. + <_> + 11 6 2 8 3. + <_> + + <_> + 7 4 8 6 -1. + <_> + 7 6 8 2 3. + <_> + + <_> + 13 7 6 4 -1. + <_> + 13 7 6 2 2. + 1 + <_> + + <_> + 10 2 12 3 -1. + <_> + 9 3 12 1 3. + 1 + <_> + + <_> + 12 4 6 6 -1. + <_> + 14 6 2 6 3. + 1 + <_> + + <_> + 10 4 6 6 -1. + <_> + 8 6 6 2 3. + 1 + <_> + + <_> + 11 5 3 9 -1. + <_> + 12 6 1 9 3. + 1 + <_> + + <_> + 4 0 16 2 -1. + <_> + 4 0 16 1 2. + 1 + <_> + + <_> + 12 12 8 3 -1. + <_> + 12 12 4 3 2. + <_> + + <_> + 10 0 12 6 -1. + <_> + 13 3 6 6 2. + 1 + <_> + + <_> + 9 2 4 6 -1. + <_> + 9 5 4 3 2. + <_> + + <_> + 0 2 18 9 -1. + <_> + 6 5 6 3 9. + <_> + + <_> + 16 2 3 9 -1. + <_> + 17 3 1 9 3. + 1 + <_> + + <_> + 6 2 9 3 -1. + <_> + 5 3 9 1 3. + 1 + <_> + + <_> + 10 1 12 4 -1. + <_> + 14 1 4 4 3. + <_> + + <_> + 0 1 12 4 -1. + <_> + 4 1 4 4 3. + <_> + + <_> + 6 14 12 4 -1. + <_> + 12 14 6 2 2. + <_> + 6 16 6 2 2. + <_> + + <_> + 4 2 13 3 -1. + <_> + 4 3 13 1 3. + <_> + + <_> + 7 2 13 3 -1. + <_> + 7 3 13 1 3. + <_> + + <_> + 1 12 20 2 -1. + <_> + 11 12 10 2 2. + <_> + + <_> + 5 2 12 3 -1. + <_> + 9 2 4 3 3. + <_> + + <_> + 4 8 14 9 -1. + <_> + 11 8 7 9 2. + <_> + + <_> + 10 2 4 8 -1. + <_> + 10 2 2 8 2. + <_> + + <_> + 8 2 4 8 -1. + <_> + 10 2 2 8 2. + <_> + + <_> + 16 1 2 16 -1. + <_> + 16 9 2 8 2. + <_> + + <_> + 2 8 9 4 -1. + <_> + 5 8 3 4 3. + <_> + + <_> + 16 1 2 16 -1. + <_> + 16 9 2 8 2. + <_> + + <_> + 4 1 2 16 -1. + <_> + 4 9 2 8 2. + <_> + + <_> + 10 7 8 6 -1. + <_> + 14 7 4 3 2. + <_> + 10 10 4 3 2. + <_> + + <_> + 4 7 8 6 -1. + <_> + 4 7 4 3 2. + <_> + 8 10 4 3 2. + <_> + + <_> + 12 8 2 7 -1. + <_> + 12 8 1 7 2. + 1 + <_> + + <_> + 5 8 6 8 -1. + <_> + 5 8 3 4 2. + <_> + 8 12 3 4 2. + <_> + + <_> + 12 8 2 7 -1. + <_> + 12 8 1 7 2. + 1 + <_> + + <_> + 10 8 7 2 -1. + <_> + 10 8 7 1 2. + 1 + <_> + + <_> + 5 9 13 8 -1. + <_> + 5 11 13 4 2. + <_> + + <_> + 7 9 4 9 -1. + <_> + 9 9 2 9 2. + <_> + + <_> + 9 6 6 10 -1. + <_> + 11 6 2 10 3. + <_> + + <_> + 7 6 6 10 -1. + <_> + 9 6 2 10 3. + <_> + + <_> + 6 0 14 6 -1. + <_> + 13 0 7 3 2. + <_> + 6 3 7 3 2. + <_> + + <_> + 2 0 14 6 -1. + <_> + 2 0 7 3 2. + <_> + 9 3 7 3 2. + <_> + + <_> + 3 6 16 3 -1. + <_> + 3 7 16 1 3. + <_> + + <_> + 1 6 15 3 -1. + <_> + 1 7 15 1 3. + <_> + + <_> + 8 5 8 4 -1. + <_> + 8 7 8 2 2. + <_> + + <_> + 2 4 12 10 -1. + <_> + 8 4 6 10 2. + <_> + + <_> + 7 0 14 16 -1. + <_> + 7 0 7 16 2. + <_> + + <_> + 1 1 18 3 -1. + <_> + 10 1 9 3 2. + <_> + + <_> + 8 8 12 2 -1. + <_> + 8 8 6 2 2. + <_> + + <_> + 8 1 6 4 -1. + <_> + 11 1 3 4 2. + <_> + + <_> + 11 0 4 10 -1. + <_> + 12 1 2 10 2. + 1 + <_> + + <_> + 11 0 10 4 -1. + <_> + 10 1 10 2 2. + 1 + <_> + + <_> + 13 7 9 4 -1. + <_> + 16 7 3 4 3. + <_> + + <_> + 11 1 6 2 -1. + <_> + 11 1 6 1 2. + 1 + <_> + + <_> + 8 8 12 2 -1. + <_> + 8 8 6 2 2. + <_> + + <_> + 7 12 6 5 -1. + <_> + 10 12 3 5 2. + <_> + + <_> + 10 7 9 11 -1. + <_> + 13 7 3 11 3. + <_> + + <_> + 6 15 8 3 -1. + <_> + 10 15 4 3 2. + <_> + + <_> + 19 3 2 12 -1. + <_> + 19 3 1 12 2. + <_> + + <_> + 1 3 2 12 -1. + <_> + 2 3 1 12 2. + <_> + + <_> + 11 1 9 10 -1. + <_> + 14 1 3 10 3. + <_> + + <_> + 1 3 16 6 -1. + <_> + 5 3 8 6 2. + <_> + + <_> + 7 1 12 12 -1. + <_> + 11 1 4 12 3. + <_> + + <_> + 2 8 12 2 -1. + <_> + 8 8 6 2 2. + <_> + + <_> + 14 7 3 10 -1. + <_> + 14 12 3 5 2. + <_> + + <_> + 1 15 18 3 -1. + <_> + 10 15 9 3 2. + <_> + + <_> + 9 0 13 3 -1. + <_> + 9 1 13 1 3. + <_> + + <_> + 5 0 12 3 -1. + <_> + 5 1 12 1 3. + <_> + + <_> + 12 1 2 15 -1. + <_> + 12 1 1 15 2. + <_> + + <_> + 8 1 2 15 -1. + <_> + 9 1 1 15 2. + <_> + + <_> + 12 2 3 13 -1. + <_> + 13 2 1 13 3. + <_> + + <_> + 1 6 4 8 -1. + <_> + 3 6 2 8 2. + <_> + + <_> + 17 1 4 12 -1. + <_> + 19 1 2 6 2. + <_> + 17 7 2 6 2. + <_> + + <_> + 1 1 4 12 -1. + <_> + 1 1 2 6 2. + <_> + 3 7 2 6 2. + <_> + + <_> + 17 0 4 7 -1. + <_> + 17 0 2 7 2. + <_> + + <_> + 1 0 4 7 -1. + <_> + 3 0 2 7 2. + <_> + + <_> + 12 2 3 13 -1. + <_> + 13 2 1 13 3. + <_> + + <_> + 7 4 5 9 -1. + <_> + 7 7 5 3 3. + <_> + + <_> + 12 2 3 13 -1. + <_> + 13 2 1 13 3. + <_> + + <_> + 7 2 3 13 -1. + <_> + 8 2 1 13 3. + <_> + + <_> + 3 5 17 4 -1. + <_> + 3 6 17 2 2. + <_> + + <_> + 2 3 18 3 -1. + <_> + 2 4 18 1 3. + <_> + + <_> + 11 11 6 4 -1. + <_> + 11 13 6 2 2. + <_> + + <_> + 5 11 6 4 -1. + <_> + 5 13 6 2 2. + <_> + + <_> + 15 5 6 4 -1. + <_> + 15 5 6 2 2. + 1 + <_> + + <_> + 7 5 4 6 -1. + <_> + 7 5 2 6 2. + 1 + <_> + + <_> + 13 1 8 8 -1. + <_> + 15 1 4 8 2. + <_> + + <_> + 3 1 12 12 -1. + <_> + 7 1 4 12 3. + <_> + + <_> + 14 2 4 12 -1. + <_> + 14 2 2 12 2. + <_> + + <_> + 4 2 4 12 -1. + <_> + 6 2 2 12 2. + <_> + + <_> + 15 0 2 14 -1. + <_> + 15 0 1 14 2. + <_> + + <_> + 5 0 2 14 -1. + <_> + 6 0 1 14 2. + <_> + + <_> + 15 1 7 15 -1. + <_> + 15 6 7 5 3. + <_> + + <_> + 6 1 7 6 -1. + <_> + 4 3 7 2 3. + 1 + <_> + + <_> + 1 4 20 14 -1. + <_> + 11 4 10 7 2. + <_> + 1 11 10 7 2. + <_> + + <_> + 1 2 6 8 -1. + <_> + 3 2 2 8 3. + <_> + + <_> + 15 0 2 13 -1. + <_> + 15 0 1 13 2. + <_> + + <_> + 2 1 9 10 -1. + <_> + 5 1 3 10 3. + <_> + + <_> + 9 9 6 6 -1. + <_> + 11 9 2 6 3. + <_> + + <_> + 5 5 8 4 -1. + <_> + 5 5 8 2 2. + 1 + <_> + + <_> + 5 8 14 4 -1. + <_> + 5 9 14 2 2. + <_> + + <_> + 0 7 20 2 -1. + <_> + 10 7 10 2 2. + <_> + + <_> + 8 0 10 10 -1. + <_> + 8 0 5 10 2. + <_> + + <_> + 4 0 10 10 -1. + <_> + 9 0 5 10 2. + <_> + + <_> + 5 1 15 10 -1. + <_> + 10 1 5 10 3. + <_> + + <_> + 0 9 18 4 -1. + <_> + 0 10 18 2 2. + <_> + + <_> + 8 8 10 6 -1. + <_> + 8 10 10 2 3. + <_> + + <_> + 4 8 10 6 -1. + <_> + 4 10 10 2 3. + <_> + + <_> + 11 6 10 12 -1. + <_> + 11 10 10 4 3. + <_> + + <_> + 8 5 4 8 -1. + <_> + 8 5 4 4 2. + 1 + <_> + + <_> + 17 8 5 6 -1. + <_> + 17 11 5 3 2. + <_> + + <_> + 8 11 4 7 -1. + <_> + 10 11 2 7 2. + <_> + + <_> + 9 5 12 3 -1. + <_> + 9 6 12 1 3. + <_> + + <_> + 2 9 13 3 -1. + <_> + 2 10 13 1 3. + <_> + + <_> + 3 13 16 3 -1. + <_> + 3 13 8 3 2. + <_> + + <_> + 5 12 8 4 -1. + <_> + 9 12 4 4 2. + <_> + + <_> + 14 8 6 9 -1. + <_> + 14 11 6 3 3. + <_> + + <_> + 4 10 12 3 -1. + <_> + 4 11 12 1 3. + <_> + + <_> + 6 7 11 9 -1. + <_> + 6 10 11 3 3. + <_> + + <_> + 4 1 9 4 -1. + <_> + 7 4 3 4 3. + 1 + <_> + + <_> + 12 1 9 9 -1. + <_> + 15 1 3 9 3. + <_> + + <_> + 1 1 9 9 -1. + <_> + 4 1 3 9 3. + <_> + + <_> + 14 1 6 6 -1. + <_> + 16 1 2 6 3. + <_> + + <_> + 4 6 4 6 -1. + <_> + 6 6 2 6 2. + <_> + + <_> + 7 5 12 7 -1. + <_> + 10 5 6 7 2. + <_> + + <_> + 3 5 12 7 -1. + <_> + 6 5 6 7 2. + diff --git a/custom_nodes/was-node-suite-comfyui/res/lbpcascade_animeface.xml b/custom_nodes/was-node-suite-comfyui/res/lbpcascade_animeface.xml new file mode 100644 index 0000000000000000000000000000000000000000..5a973ef6a1d2a77c23adafbfbf7fbf9de00e95c6 --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/res/lbpcascade_animeface.xml @@ -0,0 +1,6693 @@ + + + + + BOOST + LBP + 24 + 24 + + GAB + 9.9500000476837158e-001 + 5.0000000000000000e-001 + 9.4999999999999996e-001 + 1 + 100 + + 256 + 20 + + + <_> + 3 + -1.2636742591857910e+000 + + <_> + + 0 -1 103 -1302 -1 -2097218 -1 -1 -1 -1 -1 + + -8.6284315586090088e-001 8.2884031534194946e-001 + <_> + + 0 -1 111 -270553558 -16593 -1342197841 -24657 -1069073 + -28753 -268456017 -28753 + + -8.3015900850296021e-001 5.0244796276092529e-001 + <_> + + 0 -1 274 -791883568 -570557057 -10521123 -2261129 -168835596 + -14680065 -536870913 -577 + + -6.8020063638687134e-001 4.2932784557342529e-001 + + <_> + 6 + -1.6361999511718750e+000 + + <_> + + 0 -1 17 -67110150 -1 -1 -1 -1 -1 -1 -1 + + -7.5649648904800415e-001 6.5665715932846069e-001 + <_> + + 0 -1 226 -21846 -18501 -546118 -17745 -67130689 -20481 + -21589 -5185 + + -6.6228806972503662e-001 4.5048093795776367e-001 + <_> + + 0 -1 167 -17104902 269658770 -5046274 -14539742 -1 -67897614 + -1 -11301 + + -6.0636126995086670e-001 4.1438210010528564e-001 + <_> + + 0 -1 410 -67109392 -5 -537133576 -1282 1154814128 -4638 + -1448359800 -71304194 + + -5.9243917465209961e-001 3.5992857813835144e-001 + <_> + + 0 -1 329 -422583808 2030231298 -445326064 -35408702 + 1313755528 222323776 -589330296 -3146022 + + -5.1273822784423828e-001 3.9016643166542053e-001 + <_> + + 0 -1 196 -927858550 1525075679 149029012 -71367005 + 1365708031 -386200322 -419477761 -1110321 + + -4.2613434791564941e-001 3.8135424256324768e-001 + + <_> + 8 + -1.6592464447021484e+000 + + <_> + + 0 -1 112 -18 -1 -17 -1 -1 -1 -1 -1 + + -7.1868747472763062e-001 5.0393396615982056e-001 + <_> + + 0 -1 76 -98500654 -327681 -81592321 -1 -67641345 -1 -525313 + -1 + + -6.5284144878387451e-001 4.0119534730911255e-001 + <_> + + 0 -1 405 -571474700 -4097 -1409548289 -1 -1939013504 -65537 + -269484808 -1 + + -5.9361612796783447e-001 3.1780999898910522e-001 + <_> + + 0 -1 121 -486563073 -218104129 553639679 -88085761 + 1082066687 -151040033 -37889 -32769 + + -3.8881537318229675e-001 4.6969282627105713e-001 + <_> + + 0 -1 91 -28754 -4741217 -5787766 -4272213 -268464465 + -71364690 -340815953 -1616049394 + + -5.3805375099182129e-001 3.0860918760299683e-001 + <_> + + 0 -1 446 419443088 285235408 254456305 -16204592 -1391444552 + -1359433799 -1424163143 -1079260968 + + -4.2636507749557495e-001 3.5725688934326172e-001 + <_> + + 0 -1 327 -597814534 -555989829 -69009699 -13080945 + 1523056856 -23444034 -222757126 -72695094 + + -3.2992404699325562e-001 4.1319400072097778e-001 + <_> + + 0 -1 564 253993232 855774768 261231921 1605348720 1061134545 + -1109396681 -8408880 -1073744225 + + -4.3008872866630554e-001 2.9400870203971863e-001 + + <_> + 11 + -1.5440381765365601e+000 + + <_> + + 0 -1 288 -65537 -331809 -5586945 -5595969 -4524038 -5578822 + -72683526 -1431651670 + + -4.9188971519470215e-001 4.8762881755828857e-001 + <_> + + 0 -1 65 -168317009 -167792753 -101341265 -677969 -201347153 + -675921 -67260497 -1077966929 + + -5.7650732994079590e-001 3.7729337811470032e-001 + <_> + + 0 -1 505 -264195 -4677924 -67109379 -7370536 -16903 + -16933387 -1614832472 -542162864 + + -4.6903330087661743e-001 4.0831282734870911e-001 + <_> + + 0 -1 267 -2295048 -3 -34537480 -131073 -1281 -5121 -1 -1 + + -5.2949124574661255e-001 2.9551723599433899e-001 + <_> + + 0 -1 359 134218832 1346373084 485543039 -571861671 470810624 + -1715453444 -671033126 -1146484562 + + -4.3582525849342346e-001 3.4078335762023926e-001 + <_> + + 0 -1 531 -205802701 855872035 -1275381997 -1751670853 + -545348677 -1210587213 -71320902 -1161839685 + + -3.3710375428199768e-001 4.5348975062370300e-001 + <_> + + 0 -1 118 -487093505 -218127617 -1560391937 -62721 1077346555 + 2063540991 -134275329 -47553 + + -3.1872883439064026e-001 4.4437479972839355e-001 + <_> + + 0 -1 50 -142610526 -253073502 -92102144 -741350074 + -150497792 4218884 -785930752 -246946298 + + -3.5306012630462646e-001 3.3983978629112244e-001 + <_> + + 0 -1 207 -1024537970 -689028406 147245278 1885009118 + -937884982 911296300 -280506658 -73676801 + + -3.2728925347328186e-001 3.3123221993446350e-001 + <_> + + 0 -1 570 -1086335181 -2026903933 402122549 -11553395 + -2038958942 -2047885573 -747113584 -3150353 + + -2.5837799906730652e-001 4.1633638739585876e-001 + <_> + + 0 -1 198 -98401792 -257947744 744505346 -44030416 1449330176 + -1224726752 -13357430 -215989504 + + -3.3786505460739136e-001 3.3100384473800659e-001 + + <_> + 14 + -1.5968077182769775e+000 + + <_> + + 0 -1 161 -327682 -234815982 -9217 -327702 -67108865 -19 + -35169 -1025 + + -6.0774099826812744e-001 3.3485847711563110e-001 + <_> + + 0 -1 42 -234901585 -134893649 -101453937 -676433 -167792721 + -840273 -67662929 -61685 + + -5.8014905452728271e-001 2.9309955239295959e-001 + <_> + + 0 -1 462 -8391180 -9439249 -262916 -265250 2137191648 -8976 + -1048856 -71303446 + + -5.0997173786163330e-001 3.1784811615943909e-001 + <_> + + 0 -1 295 -607128576 -2136813818 -646465352 -33556977 + -732956532 -871835636 -1461741368 -5243185 + + -4.6263068914413452e-001 2.9149281978607178e-001 + <_> + + 0 -1 227 -833836928 -547492971 -36372488 -6209 -69293062 + -1788225 -67174657 -1 + + -7.1006256341934204e-001 1.5655440092086792e-001 + <_> + + 0 -1 120 -1024463121 -202396945 -1428264195 -2102581 + 1473185007 -33559809 -83932193 -39353 + + -2.8522580862045288e-001 4.2438152432441711e-001 + <_> + + 0 -1 327 -665695145 -638053093 -634372129 2134510939 + -62259176 -570765889 -66531622 -1154847045 + + -2.9631933569908142e-001 3.5935410857200623e-001 + <_> + + 0 -1 548 -1357384943 -343742928 -1612720912 -538063840 + -1956010591 -2003587624 131028417 -538978871 + + -3.4053108096122742e-001 3.0849900841712952e-001 + <_> + + 0 -1 535 -138422528 588489479 1067695873 -1074794561 + -27345985 -1342177351 -1360024918 -1429214225 + + -2.7723503112792969e-001 3.7855362892150879e-001 + <_> + + 0 -1 32 -69217521 318980663 -546308677 553910191 -1119418848 + 1023541829 -1078203530 1426587151 + + -3.4162473678588867e-001 2.8405174612998962e-001 + <_> + + 0 -1 68 -272896094 -1060178292 -563023360 -740103008 + -421596795 4254308 -151788864 -804784960 + + -3.3355078101158142e-001 2.5905099511146545e-001 + <_> + + 0 -1 218 2080341787 1060846641 -1107591393 -608438981 + -537157638 -1879067494 1226316347 272137786 + + -4.0187957882881165e-001 2.1486374735832214e-001 + <_> + + 0 -1 171 -1057233345 -629705185 -500534530 807650042 + 68650975 -92965029 2013228271 -135284769 + + -3.0215039849281311e-001 2.7278622984886169e-001 + <_> + + 0 -1 221 1509885074 2004291294 555376605 1073426430 + 2135919867 267332795 -620820929 1006580307 + + -3.3670711517333984e-001 2.5498929619789124e-001 + + <_> + 18 + -1.5330305099487305e+000 + + <_> + + 0 -1 242 -168299308 -103680513 -17039875 -16513 -104858115 + -1093 -68157441 -1074807809 + + -5.2502518892288208e-001 3.1116861104965210e-001 + <_> + + 0 -1 146 -1052690 -134217745 -4114 -134744587 -67108865 + -101187585 -7185 -17 + + -5.5873709917068481e-001 2.3601445555686951e-001 + <_> + + 0 -1 472 -1086455823 520682672 2147352049 -5382736 + -1433403459 -21364743 -1883244099 -541076003 + + -3.1670504808425903e-001 4.0621152520179749e-001 + <_> + + 0 -1 202 1224607962 1392201471 -2097544450 -88674562 + 136185054 -251707461 -319042850 -67147009 + + -4.3568062782287598e-001 2.6303508877754211e-001 + <_> + + 0 -1 246 -327758 -1682395368 -101499042 -4484198 -264197 + -4276368 -67372516 -64294 + + -3.5096347332000732e-001 2.8552994132041931e-001 + <_> + + 0 -1 568 -1221597405 926924550 -1216481488 -7340245 -6297677 + -571475207 -142608393 -81 + + -2.5762778520584106e-001 3.9312085509300232e-001 + <_> + + 0 -1 145 -550531409 -4223169 -1799410513 -40915442 + -100806657 -71439443 -290865 -5271921 + + -3.6129701137542725e-001 2.5089186429977417e-001 + <_> + + 0 -1 518 973252242 1059044139 691879211 1033633599 + 1061157819 1052455857 1001855675 994000443 + + -4.0735718607902527e-001 2.1259017288684845e-001 + <_> + + 0 -1 292 -155561249 -121033 -227353697 822221407 -732409764 + -134860169 -5579014 -81596545 + + -2.7802845835685730e-001 3.1577971577644348e-001 + <_> + + 0 -1 117 -492377361 -67966273 -487796737 -537989558 + 1157580795 -623117 -33726753 -571901 + + -2.3659884929656982e-001 3.6908593773841858e-001 + <_> + + 0 -1 49 -3146078 -87102466 1929375776 -110170912 -209718716 + 1073987584 -172501248 -263724778 + + -3.1267678737640381e-001 2.7408197522163391e-001 + <_> + + 0 -1 608 -17830961 -52594705 1894769378 -759235926 + -746588864 1145539840 -476648760 -255332678 + + -2.3596332967281342e-001 3.3085909485816956e-001 + <_> + + 0 -1 294 1787490066 -264197568 2011318014 -10345952 + 1816350540 1309473484 -1075852357 -347121653 + + -3.2399207353591919e-001 2.3825044929981232e-001 + <_> + + 0 -1 58 538050560 10548908 633938201 761344263 1178943808 + 1145357893 1473577739 17132883 + + -4.3849360942840576e-001 1.7257992923259735e-001 + <_> + + 0 -1 62 -1343377669 -67129357 -1478513667 536586967 + -1342462217 -1141683202 -805589267 268372159 + + -3.9048865437507629e-001 1.9998973608016968e-001 + <_> + + 0 -1 469 -1141917951 2055732023 -67115040 -5259282 891279264 + -1073742033 -252723072 -70254961 + + -3.2046061754226685e-001 2.2697426378726959e-001 + <_> + + 0 -1 148 -123294037 -629126677 -129217217 -106165406 + -58765569 -53989993 -92475474 -260742313 + + -2.2333471477031708e-001 3.3217769861221313e-001 + <_> + + 0 -1 270 11436672 899321384 -2035277788 -134750226 494898348 + 805293739 -134222129 -1330 + + -5.1818227767944336e-001 1.4062055945396423e-001 + + <_> + 22 + -1.6179658174514771e+000 + + <_> + + 0 -1 251 -16842753 -65537 -1078263809 992149503 -21315590 + -65537 -4530178 -1155334401 + + -4.0925416350364685e-001 3.7826526165008545e-001 + <_> + + 0 -1 506 -13 -37748737 -67108878 -1 -2061 -16385 -134744078 + -134217729 + + -3.3893144130706787e-001 3.8312068581581116e-001 + <_> + + 0 -1 123 -33558546 -34087433 -34078866 -33554433 -68423681 + -17175821 -100925441 -1025 + + -6.0692399740219116e-001 1.8727016448974609e-001 + <_> + + 0 -1 495 1061158675 993509311 -1073758401 -1077936129 + 2139094975 2147450879 -68175185 721352703 + + -3.4320926666259766e-001 2.9410150647163391e-001 + <_> + + 0 -1 188 -889254689 -606139649 -822276097 -536932833 + -894497025 -1694560673 -302051361 1543438867 + + -3.3822092413902283e-001 2.6585578918457031e-001 + <_> + + 0 -1 387 2110840317 -170373132 -67108867 -492679 -1898390020 + -458756 -5701956 -1414920152 + + -3.0869945883750916e-001 2.8285649418830872e-001 + <_> + + 0 -1 40 -390074625 -96482225 -35656193 -571112243 216501503 + -1393570577 -1933586817 1087717587 + + -3.2919120788574219e-001 2.2378942370414734e-001 + <_> + + 0 -1 438 -8388612 -71569970 -12587010 -5313858 -885750552 + -17113860 -303256326 -1398104918 + + -3.1555220484733582e-001 2.2833129763603210e-001 + <_> + + 0 -1 300 -44092621 1534539583 2113879867 -72385733 + 2147434111 -4440513 2034203967 1937456447 + + -5.1976418495178223e-001 1.4146961271762848e-001 + <_> + + 0 -1 168 -352334129 -767580353 552889982 1920724734 + -713601266 1983999843 -3411973 -201588757 + + -2.4733479321002960e-001 2.8582152724266052e-001 + <_> + + 0 -1 609 -6225 -295247985 -759442752 1084948202 -68348 + 1141308928 -86191156 -253235222 + + -2.4679842591285706e-001 2.7729529142379761e-001 + <_> + + 0 -1 419 151016821 184578384 -550121483 524098780 139726044 + -1076822018 -1879556100 -1078263671 + + -2.5690904259681702e-001 2.6605170965194702e-001 + <_> + + 0 -1 302 -539102470 -2497 -1107343618 -33566754 -268571713 + -16450 -1093681969 -67633153 + + -2.7877664566040039e-001 2.4463422596454620e-001 + <_> + + 0 -1 485 -4743040 -1107956700 -1376293472 -812648524 + -846885756 -1113473919 -2031764308 76926853 + + -2.8781169652938843e-001 2.3463562130928040e-001 + <_> + + 0 -1 86 -209870857 2010119803 83886091 1057456929 2046623741 + -174587905 565776015 2012079087 + + -2.1731185913085938e-001 2.9724174737930298e-001 + <_> + + 0 -1 173 2130440170 -1494225046 -1116212241 1877473231 + 58685261 1179325709 -612376625 -5169 + + -2.4395407736301422e-001 2.5757655501365662e-001 + <_> + + 0 -1 618 -92280577 -106446978 -151355790 1624177388 + -402930100 -145101820 -1224807732 -798818356 + + -2.2018355131149292e-001 2.8403928875923157e-001 + <_> + + 0 -1 199 1711793897 430272415 788433915 1073703931 + 2113740797 2113925119 -33605377 553123759 + + -3.0896610021591187e-001 2.0028412342071533e-001 + <_> + + 0 -1 588 -278925405 -1884375241 -17322192 -541066313 + -1755323001 126740978 -671090992 -135268353 + + -2.1448580920696259e-001 2.9228198528289795e-001 + <_> + + 0 -1 15 -134217809 -605290706 -344462688 -236258702 + -12599545 1128721759 -134241472 -134220842 + + -2.0150026679039001e-001 3.0634295940399170e-001 + <_> + + 0 -1 308 -62849320 2136267568 1429265656 -39780400 + 2105335016 -4222980 -222437720 -131148 + + -5.5174821615219116e-001 1.1506053060293198e-001 + <_> + + 0 -1 385 269753717 520624093 -1124313601 2109618489 + 894436372 -1619636227 1608012104 -1414922213 + + -2.6527458429336548e-001 2.2207336127758026e-001 + + <_> + 22 + -1.5240784883499146e+000 + + <_> + + 0 -1 209 -16777473 -83890433 -764480770 1914896094 -8241 + -4194369 -17 -8388609 + + -3.7706291675567627e-001 3.4414002299308777e-001 + <_> + + 0 -1 471 -638582861 -214171669 -503319568 -33554449 -2144 + -340000769 1375195600 -671088657 + + -4.0182587504386902e-001 2.8752782940864563e-001 + <_> + + 0 -1 4 -20561 -235683841 -937985 -33574963 -201774161 + -67426137 -67260433 -8977137 + + -3.8269236683845520e-001 2.3733018338680267e-001 + <_> + + 0 -1 538 -142608637 858242867 -2285 -1077936385 -2121 + -1073750089 -269484113 -1358958865 + + -2.5483861565589905e-001 3.3265465497970581e-001 + <_> + + 0 -1 204 1484409054 -81102033 36761342 1375797182 1248987294 + -11018250 -30220550 -15466497 + + -3.4112885594367981e-001 2.4120210111141205e-001 + <_> + + 0 -1 371 -539165220 -131082 -281215236 -1130504 -54722596 + -272891953 -54724376 -5138 + + -4.5275905728340149e-001 1.7377805709838867e-001 + <_> + + 0 -1 25 -1493 -202376410 -1023938528 -251920682 -140545280 + 1682265604 -143133120 -211288085 + + -2.7601632475852966e-001 2.9428055882453918e-001 + <_> + + 0 -1 210 2130668066 -1372889750 -1073898641 -1058833 + 1147095110 1192205127 -872415281 -11534385 + + -2.7625253796577454e-001 2.6642629504203796e-001 + <_> + + 0 -1 99 -4200469 -135335198 -203431774 -70261556 -67111946 + -2147048126 -145230366 -203950650 + + -1.9389793276786804e-001 3.7611347436904907e-001 + <_> + + 0 -1 115 -353457425 -74805585 -353472977 -1176914 1693926379 + 2146838449 -134410392 2135245639 + + -2.8867563605308533e-001 2.4483714997768402e-001 + <_> + + 0 -1 508 -1148733519 780856288 -1276684815 -15737456 + -1346395480 -1880315220 97883549 -570434343 + + -2.4260485172271729e-001 2.8319093585014343e-001 + <_> + + 0 -1 612 -335550942 1122675878 39428642 44487402 -202381696 + 147984 -530587456 -1060116485 + + -3.2079356908798218e-001 2.1750940382480621e-001 + <_> + + 0 -1 487 -2118783 -1086344025 -1094734171 -1375868937 + -5919064 -1073826123 -1598643539 16056055 + + -2.3602557182312012e-001 2.5906878709793091e-001 + <_> + + 0 -1 637 -134222161 -892340722 -85984544 -521142342 + -202377530 2323523 -135792960 -255066161 + + -2.1411967277526855e-001 2.8836101293563843e-001 + <_> + + 0 -1 122 -621281281 305332202 -262286 -4728238 -36700161 + 2138570743 -8388641 -201 + + -2.1160617470741272e-001 2.8658545017242432e-001 + <_> + + 0 -1 66 -68164637 -909443356 -415296912 -679676768 + -1153701470 -2033649432 1979708660 -716703808 + + -2.1729548275470734e-001 2.7502775192260742e-001 + <_> + + 0 -1 166 -488641938 1886375671 -894547222 1886317154 + 1869480350 571498307 -212336949 -227541041 + + -2.4970392882823944e-001 2.2789211571216583e-001 + <_> + + 0 -1 474 -1961000015 -1450948560 -688194123 -33689132 + -2113163334 -1933923148 537001 -1981819431 + + -2.2741013765335083e-001 2.4725703895092010e-001 + <_> + + 0 -1 234 16385024 813192181 1071056213 1557346127 1002160826 + -1178714129 -544243969 -1420854370 + + -5.3053998947143555e-001 1.0699179023504257e-001 + <_> + + 0 -1 594 -100673809 -201327669 -117970254 -756099110 + -136314996 1917240912 -77683000 -229638210 + + -1.7796075344085693e-001 3.2937991619110107e-001 + <_> + + 0 -1 545 285687569 991770411 -268453965 -586154031 248379123 + -1477454505 1467478960 -556794161 + + -2.2852730751037598e-001 2.4927006661891937e-001 + <_> + + 0 -1 233 1350551227 280743415 -2000770498 855637755 + 1912588527 955645951 -150999109 551549951 + + -2.8712141513824463e-001 2.1096032857894897e-001 + + <_> + 29 + -1.6037335395812988e+000 + + <_> + + 0 -1 203 -84213766 1490747034 -136087790 -1334 -610271233 + -16781314 -287582050 -9042 + + -4.0840023756027222e-001 2.4690119922161102e-001 + <_> + + 0 -1 536 -146803789 322156531 -536873673 -1111623753 + -167791621 -285185 -1073742917 -1077952581 + + -2.2172482311725616e-001 4.0040645003318787e-001 + <_> + + 0 -1 255 -171060792 -620071486 -1626088625 -1078985329 + -2142658748 1330043724 -78652593 1263533903 + + -3.2599684596061707e-001 2.3890890181064606e-001 + <_> + + 0 -1 95 -34493009 -235280985 -240005745 -222980691 + -168720385 -240078929 -487551569 -574910835 + + -4.2127540707588196e-001 1.8673938512802124e-001 + <_> + + 0 -1 325 -539101185 2138416567 760823775 -154689 -5141009 + -1229192670 771426991 15139330 + + -3.3633252978324890e-001 2.3910291492938995e-001 + <_> + + 0 -1 101 -630216070 -757354758 -1830057793 -100695298 + -788821861 -1160568079 -2101537 1509884027 + + -2.7834987640380859e-001 2.3219773173332214e-001 + <_> + + 0 -1 350 -133121 -396388 -575086596 -2880236 -605245958 + -2565644 -7287816 201345105 + + -3.4179979562759399e-001 1.9213064014911652e-001 + <_> + + 0 -1 290 -933606177 -657453485 -298165537 302651455 + -858897400 -83995559 -588587302 -13967649 + + -2.8056696057319641e-001 2.1741300821304321e-001 + <_> + + 0 -1 525 437239691 1000060863 221724059 -1073743937 + 2095005119 -1094770761 1043807935 1054674879 + + -3.0488145351409912e-001 2.0650653541088104e-001 + <_> + + 0 -1 184 -18895094 -131137 -176166466 -203950081 -679546994 + -458838209 -68609 -83903493 + + -4.7668248414993286e-001 1.2675790488719940e-001 + <_> + + 0 -1 12 2070372663 1998954923 1360207630 1065855538 + 1377767440 1048259495 1532963338 1968650755 + + -2.6672068238258362e-001 2.2107636928558350e-001 + <_> + + 0 -1 507 -922484943 -2027893262 -12585487 -8266672 + -1488986463 -1674711105 1393415553 -542141047 + + -2.7407065033912659e-001 2.2819964587688446e-001 + <_> + + 0 -1 194 1354368042 814917051 -1126355395 880586619 + 1559710910 2029785056 -89175330 553090803 + + -3.5734507441520691e-001 1.5573251247406006e-001 + <_> + + 0 -1 589 -282073181 261095098 1871689490 -541068327 55427495 + 8912388 612324563 -705694721 + + -2.3903597891330719e-001 2.3758962750434875e-001 + <_> + + 0 -1 104 -206573910 -422925430 -386145678 -136123704 + -286278705 1141151376 -479206272 -708320058 + + -2.3658229410648346e-001 2.4056208133697510e-001 + <_> + + 0 -1 206 -654652226 -2476513 -1573208611 855821951 + -660959876 -9016767 -1131742502 -14483617 + + -2.6215147972106934e-001 2.1193760633468628e-001 + <_> + + 0 -1 390 -9010944 -680453375 -1887353857 -47901136 + -608526176 -574418016 -1046431574 536871426 + + -3.2883000373840332e-001 1.6450397670269012e-001 + <_> + + 0 -1 448 39859029 1953028272 222289789 -538380840 665888169 + -71795760 248524477 -1950416872 + + -2.6085460186004639e-001 2.0465646684169769e-001 + <_> + + 0 -1 24 686465762 -1275658177 692424703 -1350105627 + -1308678529 -1141437607 88731642 802765499 + + -4.2105314135551453e-001 1.2438227236270905e-001 + <_> + + 0 -1 617 -16784885 -92289121 -1600719916 -798303556 + -159384564 805854224 -128647992 -536622122 + + -2.1299290657043457e-001 2.4448233842849731e-001 + <_> + + 0 -1 163 -521214738 -220246129 -1561452498 -219421974 + 1080886061 308284728 1942440958 -139730089 + + -2.3567050695419312e-001 2.0919010043144226e-001 + <_> + + 0 -1 559 -218116608 2854795 586149408 231713907 -136605256 + 888171669 -152650100 -1393765652 + + -2.2312757372856140e-001 2.2375470399856567e-001 + <_> + + 0 -1 581 -180380895 1437970737 1001598225 -147463246 + 375874851 623058979 589757370 50832259 + + -2.6574811339378357e-001 1.8162758648395538e-001 + <_> + + 0 -1 176 -349183357 -205064856 1591152880 -177359388 + -1414794578 -846925632 100053228 -985415232 + + -1.8771494925022125e-001 2.6323491334915161e-001 + <_> + + 0 -1 360 1329651669 1544846740 -1624838371 -81289607 + 1174445848 -636449096 -811966501 -609253157 + + -1.4781230688095093e-001 3.2182273268699646e-001 + <_> + + 0 -1 426 320255795 -1122912577 1562588985 -3678414 81212071 + -1345083980 1775598843 828858931 + + -3.0383765697479248e-001 1.5808410942554474e-001 + <_> + + 0 -1 611 -14681 -1041249298 551283406 1089139306 -721422588 + 14971399 -404950336 -1057492274 + + -2.3500157892704010e-001 1.9808870553970337e-001 + <_> + + 0 -1 327 -656473793 -1621491117 -252526727 360189542 + -967046976 -54702987 2092583960 -81592757 + + -2.2867658734321594e-001 2.1082723140716553e-001 + <_> + + 0 -1 195 -213859158 -17798588 -2144935180 -674987740 + -681448807 -826009773 -993984833 -74230701 + + -1.7290616035461426e-001 2.7669343352317810e-001 + + <_> + 33 + -1.5635454654693604e+000 + + <_> + + 0 -1 29 -218103809 276561887 -138412065 -1 822083583 + 1895301085 -131073 -8193 + + -2.7791345119476318e-001 2.9140633344650269e-001 + <_> + + 0 -1 410 -134217732 -2124833 -1141117716 -545051746 + -327187232 -16385 -923826040 -290455878 + + -4.7651201486587524e-001 1.6560232639312744e-001 + <_> + + 0 -1 291 -488453418 -36125012 1580386906 -6863783 1078215240 + 1288454875 -834384306 -536917493 + + -2.8437176346778870e-001 2.3990501463413239e-001 + <_> + + 0 -1 196 -631371010 -571503013 -394375427 -53617 -699199745 + -100726021 -168882689 2079264479 + + -2.8529921174049377e-001 2.2660185396671295e-001 + <_> + + 0 -1 217 -548034902 -67108929 -678035457 -33557569 + -203424081 -3147385 -6913 -2097425 + + -3.8240256905555725e-001 1.6137607395648956e-001 + <_> + + 0 -1 566 -1155272272 -550848592 -304122592 -1055752 + -807405859 -842933767 1224691796 -572523826 + + -2.9445025324821472e-001 2.1465636789798737e-001 + <_> + + 0 -1 70 -202375193 -152109334 -939854174 -1049502 -146817149 + 1111796162 -201330492 -237504574 + + -2.0517322421073914e-001 2.9565006494522095e-001 + <_> + + 0 -1 362 -562278827 -589558940 -709079073 1597507911 + -242185992 -37871622 -7815682 -1423310294 + + -2.6919457316398621e-001 2.2766013443470001e-001 + <_> + + 0 -1 456 938981281 1039662303 1071988533 -1077430345 + 2138399927 2128867248 1844920255 553084791 + + -3.1169509887695313e-001 1.9148351252079010e-001 + <_> + + 0 -1 595 -6001 -738460161 -491325280 -253577536 -101257344 + -268405248 -218703928 -262931494 + + -1.9845137000083923e-001 2.8662288188934326e-001 + <_> + + 0 -1 219 -572575425 -134758593 -17432673 -33572033 -41975809 + -606209 1819508539 991508795 + + -2.8482630848884583e-001 1.9544802606105804e-001 + <_> + + 0 -1 309 -923858941 16533375 687803098 16641781 1425998848 + 7298130 -117618036 -1459880273 + + -3.1184020638465881e-001 1.7289452254772186e-001 + <_> + + 0 -1 38 43975170 570610210 1259547439 91211681 1344147713 + 71570948 157771867 1194450935 + + -4.0333592891693115e-001 1.3441282510757446e-001 + <_> + + 0 -1 547 -620234959 -144179343 -146804237 -275775558 + -1956118605 -1364333069 1464849904 -273678849 + + -2.2020047903060913e-001 2.5275775790214539e-001 + <_> + + 0 -1 207 -889729313 -553657669 -1560807681 1383525103 + -889272690 -278931701 -3289105 -8913157 + + -2.0864352583885193e-001 2.5178232789039612e-001 + <_> + + 0 -1 386 1527208720 453049088 1849883952 -577776648 + 119313820 -635902472 235845803 -1081104213 + + -2.0040290057659149e-001 2.4476240575313568e-001 + <_> + + 0 -1 105 -1792337 -135877649 -877376633 -6542642 -170938369 + -1158881281 -335549505 -45848810 + + -1.5268525481224060e-001 3.2784590125083923e-001 + <_> + + 0 -1 45 -207358038 -420480598 -201337216 -706480664 -2247288 + 38040834 -201852992 -707267118 + + -2.0646592974662781e-001 2.4018539488315582e-001 + <_> + + 0 -1 489 2125956864 876459318 801054519 -1343357377 + 2029731224 1039974069 517353000 549746646 + + -3.4877002239227295e-001 1.3540452718734741e-001 + <_> + + 0 -1 305 1573977072 -545230924 -503523394 -50543968 + -10687586 -806753917 -132104 -131700 + + -3.9878302812576294e-001 1.1964873224496841e-001 + <_> + + 0 -1 119 -152047962 -219222301 -386358750 -134552838 + 1919010662 3367751 -234912529 -203950258 + + -1.8678680062294006e-001 2.5092527270317078e-001 + <_> + + 0 -1 57 -1548225838 -544303113 169017858 -1079528078 + -65317898 1003848540 366421887 -8390913 + + -3.7026783823966980e-001 1.2458038330078125e-001 + <_> + + 0 -1 236 1558449156 1469328420 1056312229 972818209 + 2062028732 867725236 -299020370 944057152 + + -3.2043197751045227e-001 1.4031112194061279e-001 + <_> + + 0 -1 372 -935886289 247647375 584647234 1086240770 + 1243551262 140912131 1181705168 -433064409 + + -2.5840097665786743e-001 1.7777059972286224e-001 + <_> + + 0 -1 89 553422030 -646738485 -1378666531 -1326610755 + -1193300225 -51663377 -1544714034 13992968 + + -3.2469943165779114e-001 1.3890093564987183e-001 + <_> + + 0 -1 586 -104865536 823385006 -1803553008 -137497690 + 73373105 557062035 -134236512 -493357073 + + -2.1948955953121185e-001 2.0586167275905609e-001 + <_> + + 0 -1 575 -621064755 -225691 -187123238 -211615832 -163588416 + -834187516 -222115108 -532480182 + + -1.6864611208438873e-001 2.5423100590705872e-001 + <_> + + 0 -1 359 1080299799 403982237 -1396998339 1063594813 + -1828978672 -1140929027 -64447240 -1414919521 + + -2.0849223434925079e-001 2.0172889530658722e-001 + <_> + + 0 -1 429 -278726750 1605286067 -1984582 -1183826 2008218293 + -303018266 -454955821 1710712691 + + -2.0772494375705719e-001 2.0896007120609283e-001 + <_> + + 0 -1 275 1480501015 415012755 -1133019206 -1620136218 + 1060451253 988740863 -1174410540 -1157825603 + + -1.8233922123908997e-001 2.3548512160778046e-001 + <_> + + 0 -1 5 -14680093 -404291672 -76582128 -748686168 -138428475 + -925915262 -143379488 -775432703 + + -1.7310297489166260e-001 2.5025579333305359e-001 + <_> + + 0 -1 212 -1537244639 1407124485 955327325 2007572271 + 1904150032 925836884 -671762604 -80740449 + + -1.7120473086833954e-001 2.4483337998390198e-001 + <_> + + 0 -1 90 242724607 -1477218697 133671801 267644576 + -1896108289 -1476658180 -352868111 267886847 + + -4.3268245458602905e-001 9.9339962005615234e-002 + + <_> + 38 + -1.4806630611419678e+000 + + <_> + + 0 -1 171 -352321537 -16783377 -85984514 -218107170 -35652609 + -5769225 -262165 -142606337 + + -3.0283808708190918e-001 2.6332023739814758e-001 + <_> + + 0 -1 546 -524299 -67633156 -537657347 -8228 -1073762819 + -4219400 1300190552 -571517612 + + -3.5477769374847412e-001 2.0226617157459259e-001 + <_> + + 0 -1 121 -486544641 -136840193 -486552833 -4358 -134252545 + -40001 -134258721 -41105 + + -1.8016183376312256e-001 3.3567076921463013e-001 + <_> + + 0 -1 108 -35928657 -47065673 -168170073 -172632151 + -254750721 -638271497 -103690801 -610826356 + + -3.4803321957588196e-001 1.7656043171882629e-001 + <_> + + 0 -1 446 152059857 453032944 268172721 -1081999912 + 1058848761 -1074468867 -1155547143 -1077967479 + + -2.4453158676624298e-001 2.2779227793216705e-001 + <_> + + 0 -1 512 1060093883 -2118241 521748443 -543178753 1034941423 + 1054804398 1039581151 1068449727 + + -2.9603162407875061e-001 1.9860576093196869e-001 + <_> + + 0 -1 74 -1055825 -154210581 -489790880 -68688222 -143657140 + 1883399558 1909716800 -143132733 + + -2.0965711772441864e-001 2.5087174773216248e-001 + <_> + + 0 -1 570 2000680707 -1895041657 2143282995 -5243477 + -1086853213 -1349915235 1373599043 -540016705 + + -2.2155438363552094e-001 2.2709301114082336e-001 + <_> + + 0 -1 167 -67108993 -147652614 -802882 -80484704 -8650787 + -788545 -2832 -2685102 + + -1.8011744320392609e-001 2.7400344610214233e-001 + <_> + + 0 -1 633 -5393 -153096193 -85197202 -521211906 -136321466 + 1145489220 -67651890 -523766818 + + -1.8812599778175354e-001 2.6481631398200989e-001 + <_> + + 0 -1 13 -1105 -202377882 -537677054 -67700788 -73927229 + 1146024982 -147589632 -706219049 + + -1.7558620870113373e-001 2.5304427742958069e-001 + <_> + + 0 -1 324 1309147731 1342867306 1855638557 1060310665 + 1279543388 1581780126 2128137819 463161099 + + -2.1500118076801300e-001 2.0435485243797302e-001 + <_> + + 0 -1 136 470028175 -134246505 -609905701 -40167997 + -236079105 -50477073 -1544564785 -1350038900 + + -2.3913963139057159e-001 1.8239122629165649e-001 + <_> + + 0 -1 627 -335548501 49278626 -968163296 1621881580 + -469768313 71821925 -135800892 -709367826 + + -2.1279625594615936e-001 2.0251362025737762e-001 + <_> + + 0 -1 85 2071061109 1597442983 2142580527 60883457 601335733 + 2012720631 -268461122 1151825767 + + -2.4757970869541168e-001 1.7664666473865509e-001 + <_> + + 0 -1 526 -35661939 -1077936193 -301995075 -1183745 + -154163571 -122046731 -1449525588 -1933180945 + + -1.2376438826322556e-001 3.4610992670059204e-001 + <_> + + 0 -1 413 -68157449 2012427621 -135270402 -318116709 93786352 + 878040771 1928528416 -252444969 + + -3.5282456874847412e-001 1.2535217404365540e-001 + <_> + + 0 -1 97 -299178321 -253579410 -505086341 -106781557 + -528036177 -12284558 -269619473 -918469118 + + -1.8893779814243317e-001 2.2432622313499451e-001 + <_> + + 0 -1 156 1324150526 1610280955 -1358958593 -438721 + 1861166843 2063056823 -33851009 1374120788 + + -2.3356471955776215e-001 1.7476162314414978e-001 + <_> + + 0 -1 254 -855766433 1584267795 1915910911 156285 -153399800 + -211608747 -293675318 -553456689 + + -2.6669386029243469e-001 1.5304766595363617e-001 + <_> + + 0 -1 578 -738212306 -188874866 -1569000750 -789874034 + -142702264 2031301632 -774720374 -800870518 + + -1.9974224269390106e-001 1.9939507544040680e-001 + <_> + + 0 -1 549 -466664669 648734646 1438586999 -35130723 + -1414815573 717124154 94090911 -806368801 + + -1.3644744455814362e-001 2.7690967917442322e-001 + <_> + + 0 -1 342 1249838607 693062607 184519193 144110238 2055029833 + 204701535 2054005330 -288359529 + + -2.4984607100486755e-001 1.5084804594516754e-001 + <_> + + 0 -1 72 -9443413 -23204177 -507513166 -137462546 -159909905 + -1006442233 -192945170 -235407873 + + -1.1998380720615387e-001 3.1827053427696228e-001 + <_> + + 0 -1 421 1494269173 -720349708 1063892989 -545465860 + -122642948 -1074219268 -1347932744 -1417019368 + + -2.4514228105545044e-001 1.5939368307590485e-001 + <_> + + 0 -1 336 1602171163 -69345349 2092678079 -1342178561 + 847146299 -352846093 1809574457 2071953275 + + -2.5552767515182495e-001 1.4843150973320007e-001 + <_> + + 0 -1 348 -151650305 -572701128 -843776304 -712703488 + -275972610 -611461836 -23282034 67390465 + + -1.9797073304653168e-001 1.9756489992141724e-001 + <_> + + 0 -1 80 -6353 1920937967 -1066998110 -209788174 1460628998 + 1095128909 1900515906 -178784682 + + -2.0146924257278442e-001 1.9506125152111053e-001 + <_> + + 0 -1 535 671069953 868219827 -1355307213 -1246761157 + 1008137220 1700181427 1596434984 143191592 + + -2.9868990182876587e-001 1.2109743058681488e-001 + <_> + + 0 -1 102 247622355 165839509 803910905 1273476677 134210527 + -541883513 -1375763256 265808373 + + -3.8485702872276306e-001 9.1168127954006195e-002 + <_> + + 0 -1 96 -37750801 -812843030 -220433808 -144715680 + -121636937 -309815552 1626855360 -716966446 + + -1.6374103724956512e-001 2.2773180902004242e-001 + <_> + + 0 -1 543 153361681 286744433 -1351190668 -1368652198 + 389244373 1069414655 41379716 -322010229 + + -2.3700508475303650e-001 1.5526917576789856e-001 + <_> + + 0 -1 328 -866462505 -765927816 1978289788 1058089531 + 1599921360 -539435782 -291741862 -1626666481 + + -1.5901291370391846e-001 2.3870913684368134e-001 + <_> + + 0 -1 283 -656868899 -1688724035 -1510113953 863235675 + -1872148424 -5628044 -5072136 -1079878326 + + -2.0384007692337036e-001 1.8387956917285919e-001 + <_> + + 0 -1 134 -1367079007 -1114562671 1840407673 -80561904 + -1480914455 -809727614 -374463023 67108864 + + -2.3268628120422363e-001 1.5638644993305206e-001 + <_> + + 0 -1 481 -101203328 -579340569 -676354283 -839058037 + -572153460 -388903440 -859517276 214036430 + + -1.9198764860630035e-001 1.9319075345993042e-001 + <_> + + 0 -1 605 -209752881 -137424530 -487666946 1884224456 + -10503608 -228324026 -84216120 -228393014 + + -1.6494055092334747e-001 2.1874834597110748e-001 + <_> + + 0 -1 567 -742399053 1128773464 2012640561 -684720133 + 96431507 1146418932 903034192 -2143824025 + + -2.4089863896369934e-001 1.4685575664043427e-001 + + <_> + 44 + -1.5313227176666260e+000 + + <_> + + 0 -1 118 -755055873 -603985153 -83902465 -21765 -176712705 + -21761 -173313 -45313 + + -1.9706073403358459e-001 3.1585112214088440e-001 + <_> + + 0 -1 379 -1 -5 -453312513 -67436549 -520375056 -1048577 + -1409303830 -67371013 + + -2.0109902322292328e-001 2.9257318377494812e-001 + <_> + + 0 -1 472 -1157431311 721466416 -5538351 -551232336 + -1442447427 -6095109 267895293 -810822183 + + -2.4664691090583801e-001 2.1074186265468597e-001 + <_> + + 0 -1 208 -754987265 -635491769 -422379777 268627711 + -293623114 2135249669 -12546 -11011073 + + -2.2645103931427002e-001 2.2744908928871155e-001 + <_> + + 0 -1 498 1602221841 941538719 -1107427819 -1614811905 + 2038431645 1073474047 -34735681 1023278847 + + -2.4750703573226929e-001 2.1533721685409546e-001 + <_> + + 0 -1 596 -4213 -655362609 -925375808 -253107478 -2099320 + 1145502980 -118489462 -255332370 + + -1.7560951411724091e-001 2.7202552556991577e-001 + <_> + + 0 -1 246 -262146 -575078470 -1242334370 -302312 1004126207 + -16418 -449086918 -536885544 + + -2.2480174899101257e-001 1.9904428720474243e-001 + <_> + + 0 -1 98 1404276623 -239733835 2100921775 -705445473 + -251883793 -84835913 -118452273 -609710193 + + -3.1005325913429260e-001 1.4657047390937805e-001 + <_> + + 0 -1 21 -1050709 -219156762 -488639934 -253308274 -9444029 + 1078966087 -167774398 -235408433 + + -1.9676518440246582e-001 2.2406490147113800e-001 + <_> + + 0 -1 210 -17597718 390550447 1543499727 -1051889 938702543 + -955581534 800062155 -4194737 + + -2.0562042295932770e-001 2.1108107268810272e-001 + <_> + + 0 -1 148 -231825238 -118708248 -126915649 -35385563 + -51306769 -122622556 -64495777 1883338499 + + -2.3083308339118958e-001 1.9100864231586456e-001 + <_> + + 0 -1 544 1461976433 -687707020 -671102467 -69411352 + -283119619 -34615555 -545307240 -825239080 + + -2.3235793411731720e-001 1.8479822576045990e-001 + <_> + + 0 -1 214 1076339200 27197442 -1430500781 -15102177 345466176 + 1407143952 1909970806 -8423553 + + -2.0824882388114929e-001 1.9525180757045746e-001 + <_> + + 0 -1 562 -1910004736 227976629 -1397791456 -1647335945 + -1090738556 1023222964 -1359086680 -1431505921 + + -2.1516518294811249e-001 1.8513250350952148e-001 + <_> + + 0 -1 107 -72356977 -674304370 -88611088 -68163352 -210769969 + 1615099074 -186211200 -170421565 + + -1.5987128019332886e-001 2.5031208992004395e-001 + <_> + + 0 -1 19 35788594 -570473098 173184975 -63105 686291999 + -5259537 205462959 -811630449 + + -3.9760333299636841e-001 1.0220080614089966e-001 + <_> + + 0 -1 482 -335567743 -134878025 -1109925897 -273158221 + -68311108 -67174475 -1088045138 549775094 + + -1.8809646368026733e-001 2.0953576266765594e-001 + <_> + + 0 -1 571 -1372850253 -13974142 -1678790192 -76566280 + -1973441117 247966637 1465861521 -570433813 + + -1.9961197674274445e-001 1.9941739737987518e-001 + <_> + + 0 -1 635 -687870257 -253248569 -150996374 -1058605588 + -42473594 1111936836 -562907444 -730337333 + + -1.7394885420799255e-001 2.2223794460296631e-001 + <_> + + 0 -1 205 1117962991 1383945687 -500503332 809498271 + 243157775 2115858421 -75727153 -2363489 + + -2.1008644998073578e-001 1.8192526698112488e-001 + <_> + + 0 -1 397 1546701304 1502674361 1862028696 -68619312 + 1224269564 -8404279 737211100 -20651148 + + -4.5097893476486206e-001 8.7255403399467468e-002 + <_> + + 0 -1 60 -393501017 -151003489 -8925315 -1073880539 + -2068927745 -1157701426 -858402901 67453124 + + -2.2688327729701996e-001 1.6925102472305298e-001 + <_> + + 0 -1 232 1510739604 863309605 -1360064513 -1713172665 + 1993154559 953815035 -79736866 725943043 + + -3.0244749784469604e-001 1.2280921638011932e-001 + <_> + + 0 -1 293 -431620010 1347961407 1645639766 673317522 + 1083067468 1591659708 -467746 -14463397 + + -2.9318672418594360e-001 1.3229398429393768e-001 + <_> + + 0 -1 507 -1912234189 788717234 2000052609 -544997440 + -1960506219 -1405096517 1362056513 -913318435 + + -2.3619611561298370e-001 1.6040034592151642e-001 + <_> + + 0 -1 400 -995127773 732950504 712712466 586101759 2120161651 + 242180931 2013206538 -3257 + + -2.1951305866241455e-001 1.7167043685913086e-001 + <_> + + 0 -1 0 -13634581 -55838738 -1242056846 -540019520 -112200001 + -1060453490 -202902559 -705171148 + + -1.4060682058334351e-001 2.6567372679710388e-001 + <_> + + 0 -1 276 1598289887 520138729 -4239399 -1224492136 796904957 + -1703941 -103457531 -1141394078 + + -1.3021205365657806e-001 2.7274754643440247e-001 + <_> + + 0 -1 151 -271652182 -24632894 -899102988 -10017558 + -1209538625 -201046843 -136907778 -137935742 + + -1.4946521818637848e-001 2.3473292589187622e-001 + <_> + + 0 -1 116 -756028693 -218968594 -2101451038 -7430 1120845454 + 2014372962 1666642894 -168057645 + + -1.3821019232273102e-001 2.5574091076850891e-001 + <_> + + 0 -1 33 1534696261 1062842133 -1342824577 16852899 + 2098415232 2098566439 -403313733 1346862895 + + -2.2740162909030914e-001 1.5381287038326263e-001 + <_> + + 0 -1 238 295636992 821772143 1801930031 995190037 2063248814 + -1080066052 -1165455861 779029435 + + -4.0810841321945190e-001 8.7570421397686005e-002 + <_> + + 0 -1 299 -10350288 1953972019 2142658045 -168792567 + -849660939 -20977786 1374479412 1879061282 + + -3.0795192718505859e-001 1.1658731848001480e-001 + <_> + + 0 -1 565 674213888 234891194 1069719170 1066331550 322168757 + 297054135 100431778 -940649218 + + -2.5961834192276001e-001 1.3879708945751190e-001 + <_> + + 0 -1 461 -857747457 -542310940 -336830604 1856806596 + -35369559 -33792609 -2064905188 -858837124 + + -1.4236339926719666e-001 2.5152322649955750e-001 + <_> + + 0 -1 137 -1208749149 -808992822 1941781200 -248516152 + -1493452850 -932784438 1406792916 -171445034 + + -1.5282876789569855e-001 2.2942787408828735e-001 + <_> + + 0 -1 169 -358692337 1913603755 1780904038 847543030 + 2130366282 1465284100 -8519941 -146296866 + + -2.0485755801200867e-001 1.6542507708072662e-001 + <_> + + 0 -1 598 -624964407 1929293739 1660384790 -235366402 + -781729664 1950379016 -86250104 -251396098 + + -1.4611224830150604e-001 2.3367854952812195e-001 + <_> + + 0 -1 417 470098293 68724625 2137775989 -1892868772 + 2124421621 -19018243 977084317 700385347 + + -2.0630234479904175e-001 1.6705249249935150e-001 + <_> + + 0 -1 326 -366232099 -190253736 -1380867905 -69371941 + 540412351 2038193550 178462734 1624598175 + + -2.2028318047523499e-001 1.5421783924102783e-001 + <_> + + 0 -1 615 -341839709 -1026897234 -2084621072 -2054694368 + -1493174289 191097334 -53018662 -2117081441 + + -1.5134109556674957e-001 2.2810822725296021e-001 + <_> + + 0 -1 306 -553664381 1415565023 -2072524587 1367308795 + -252592630 1556899377 -22070406 -525337637 + + -1.3117921352386475e-001 2.5312998890876770e-001 + <_> + + 0 -1 377 108201239 9837407 592193301 136324358 1550649125 + 247541396 1311001108 1141928512 + + -3.6075285077095032e-001 9.4723209738731384e-002 + <_> + + 0 -1 261 -933699454 1776876279 -820938308 -539560229 + -42108166 -554199719 -544738 -84412445 + + -3.7590476870536804e-001 8.6856558918952942e-002 + + <_> + 49 + -1.4961476325988770e+000 + + <_> + + 0 -1 327 -620831233 -537069577 -101417505 -13168873 + -556086056 -536873473 -543629570 -81080321 + + -2.0265258848667145e-001 2.7197143435478210e-001 + <_> + + 0 -1 458 2147483575 2147483647 -1073873409 -2228257 + 2146795519 2130572534 2130018047 2012705527 + + -2.2650071978569031e-001 2.3255248367786407e-001 + <_> + + 0 -1 447 -274730064 -1828490718 369062224 -542114548 + -1364483424 -1347425350 1423397120 -978321525 + + -3.6266660690307617e-001 1.3946540653705597e-001 + <_> + + 0 -1 185 -572799778 -658813697 1438600447 -537458434 + -805311745 -923335015 -800005 1610025215 + + -2.0851968228816986e-001 2.2624310851097107e-001 + <_> + + 0 -1 249 -856105761 -33882273 -1171097633 -1224545442 + -861365000 -2458133 -71565606 -72679542 + + -2.3597721755504608e-001 1.8162688612937927e-001 + <_> + + 0 -1 48 -21 -3741714 -34078816 -527638 -113 13434820 + -135536062 -168298546 + + -1.5795913338661194e-001 2.7534615993499756e-001 + <_> + + 0 -1 410 -202379265 -33554465 -269488146 -17891337 + -1023410948 -66561 -1091883318 -1052929 + + -1.5668000280857086e-001 2.6539573073387146e-001 + <_> + + 0 -1 264 2038380347 -67289281 1824356671 -101238593 + -631259777 -67492802 1886915711 528432447 + + -3.5712456703186035e-001 1.1916486173868179e-001 + <_> + + 0 -1 593 -100673841 -69224881 -118170894 -260898840 -1050808 + -163419068 -195891512 -261881906 + + -1.7496080696582794e-001 2.4545976519584656e-001 + <_> + + 0 -1 191 -789133138 1357808511 344927999 276753983 + -117987153 -119992597 -369100129 552039275 + + -2.4234156310558319e-001 1.5460087358951569e-001 + <_> + + 0 -1 568 993523459 1068448807 -134417102 -40896850 121108099 + -1929384980 2004276738 -1207959621 + + -2.3254016041755676e-001 1.6722182929515839e-001 + <_> + + 0 -1 445 1043149619 134807472 -147062799 -1389785608 + 778508721 -1078985799 1052163003 -1347437061 + + -1.5260687470436096e-001 2.3977851867675781e-001 + <_> + + 0 -1 224 420417423 363269511 -1181413745 -41029713 487264223 + -67125253 -1141534769 -7385137 + + -2.7027475833892822e-001 1.3464234769344330e-001 + <_> + + 0 -1 510 137232896 33988648 1929270085 1977700989 548021027 + 539665932 585611707 -1880109889 + + -2.0664082467556000e-001 1.8262037634849548e-001 + <_> + + 0 -1 83 -537395337 1915903126 -1083662512 536906274 + -1726232561 1041620991 -271596849 -780664993 + + -3.5926330089569092e-001 1.0130912810564041e-001 + <_> + + 0 -1 610 -16781429 -152051773 -469767584 1626332074 + -136839290 423686 -253248888 -255332390 + + -1.7431952059268951e-001 2.1028521656990051e-001 + <_> + + 0 -1 133 -336599041 -8133922 -1141113930 -37248794 + -912916546 -308113878 -1007163154 -993664890 + + -1.2619158625602722e-001 2.6890262961387634e-001 + <_> + + 0 -1 157 -491853138 1357672160 573189794 -69711698 + 1661179311 541861739 2002744015 -170436793 + + -1.8496830761432648e-001 1.7929394543170929e-001 + <_> + + 0 -1 625 -5457 -479461718 -343330784 -236206134 -734006329 + 1113998678 -486541632 -707789857 + + -1.4289534091949463e-001 2.3088638484477997e-001 + <_> + + 0 -1 244 34604197 314455839 709823416 1055849114 242550243 + 1006448564 777592829 -1075859328 + + -2.3958338797092438e-001 1.3976256549358368e-001 + <_> + + 0 -1 331 -1672813806 2117239444 1981954909 790760502 + -1761080108 -2823052 -7282818 -352180693 + + -2.0487272739410400e-001 1.6059969365596771e-001 + <_> + + 0 -1 584 -14682192 1410054962 930545776 -704669782 + 1462208945 485762336 2094510506 756531114 + + -1.9781313836574554e-001 1.6727229952812195e-001 + <_> + + 0 -1 313 -8917097 -1090285819 -1361920071 -1365204576 + -587337803 -21561419 -1628979300 -1092178412 + + -1.3299308717250824e-001 2.4190054833889008e-001 + <_> + + 0 -1 553 -67124722 -244319281 -159678928 -235658710 + -235168120 556126992 -640763208 -247201910 + + -1.4599128067493439e-001 2.1836180984973907e-001 + <_> + + 0 -1 432 -1574459869 1801694139 688355093 1137244896 + 1597321139 667307894 2103932223 -875206861 + + -1.9999620318412781e-001 1.6168454289436340e-001 + <_> + + 0 -1 467 1415519575 2086366073 -277518529 223099000 + 2085048285 -1107460609 2103254796 -2004256296 + + -1.7394535243511200e-001 1.7971649765968323e-001 + <_> + + 0 -1 631 -136321265 1345830367 -352396334 1615494266 -807156 + -1773853180 -156565858 -797708465 + + -1.5880204737186432e-001 1.9731280207633972e-001 + <_> + + 0 -1 153 1244179144 1261546146 -363663553 -268711247 + 1351877499 -280641 -84327201 184487424 + + -2.9488366842269897e-001 1.0669518262147903e-001 + <_> + + 0 -1 287 139709719 43029023 716864123 145948174 72842328 + -542019334 520215374 -68543909 + + -2.0783309638500214e-001 1.5214750170707703e-001 + <_> + + 0 -1 388 -12132331 -1257178152 536162265 -1073758883 + 1531509500 -1143015176 677185805 -1916803061 + + -1.3157057762145996e-001 2.4203033745288849e-001 + <_> + + 0 -1 508 -1297104925 908165800 -1346954911 -1658201648 + -1566723143 699314924 459246985 -1918904868 + + -1.8554656207561493e-001 1.7123413085937500e-001 + <_> + + 0 -1 178 -2030042720 -638576553 -856623777 -1155485679 + -272560641 -73725530 -1699976999 -2013232128 + + -1.9297820329666138e-001 1.6354376077651978e-001 + <_> + + 0 -1 523 -802903936 219991281 -1380735344 -1677912129 + -1196039740 -1924874396 -285426550 -1981250341 + + -1.6431039571762085e-001 1.9124364852905273e-001 + <_> + + 0 -1 187 786271998 353427327 769929200 901316269 -2013419265 + 1070845648 -1929524065 745335551 + + -3.4417709708213806e-001 9.1304123401641846e-002 + <_> + + 0 -1 258 678666755 1795098123 715934564 -11026437 2029921792 + 58675732 -672615084 -138415361 + + -1.6051757335662842e-001 1.9698475301265717e-001 + <_> + + 0 -1 537 757053313 1066712229 -1220563659 -1077439073 + 884821983 -1073791051 -137412097 -1343329298 + + -1.7010088264942169e-001 1.7927823960781097e-001 + <_> + + 0 -1 82 1389352639 1912983387 552783085 -1334070951 + 1883213615 875581417 1061134046 1628826575 + + -2.3557391762733459e-001 1.3502044975757599e-001 + <_> + + 0 -1 43 -148378625 -454104340 -444894224 -3679494 -170132001 + -2004452736 1979433184 -705169834 + + -1.3890986144542694e-001 2.2841182351112366e-001 + <_> + + 0 -1 292 -562163691 -612498656 -398797281 453517379 + 1576666200 -69248251 -836204322 -14537969 + + -1.9448943436145782e-001 1.5733850002288818e-001 + <_> + + 0 -1 394 -802954462 -214240474 -171104766 -526590 167222773 + 669761943 -138184978 1684498211 + + -2.0668393373489380e-001 1.5022484958171844e-001 + <_> + + 0 -1 444 -323100186 -21839924 -1663054876 -1785792953 + 279978382 685830218 1700463695 1075309899 + + -2.3692423105239868e-001 1.2721052765846252e-001 + <_> + + 0 -1 183 -470300502 -779440474 317666416 -237112100 + -1623348022 -837984724 1892930262 -720943418 + + -1.5833552181720734e-001 1.9168458878993988e-001 + <_> + + 0 -1 419 286629325 419495792 -281560203 2100829820 + -1760265995 -1107509763 -1347405073 -1416952693 + + -1.4504113793373108e-001 2.1510179340839386e-001 + <_> + + 0 -1 231 -127662976 -1087598676 773459488 -35385847 + 1652563440 -170590944 -386639138 675557121 + + -2.5528562068939209e-001 1.2368638068437576e-001 + <_> + + 0 -1 431 -571158532 -1123001732 -541836356 1911335552 + -730029140 -520339162 -1192711304 -1090725180 + + -4.1624808311462402e-001 7.4900388717651367e-002 + <_> + + 0 -1 84 -232924694 974885303 -1350574289 1336799215 + 291214187 -2140233508 1643271951 1607457567 + + -2.0407570898532867e-001 1.5173958241939545e-001 + <_> + + 0 -1 307 -554874737 413568674 -1442634037 281801574 + 2020022853 671503039 -193074614 -390597677 + + -1.8695932626724243e-001 1.6458103060722351e-001 + <_> + + 0 -1 75 -135269693 -742200265 -1410699614 -13836096 + 1237270351 1879118941 -203115702 -243795973 + + -1.3667249679565430e-001 2.1947614848613739e-001 + <_> + + 0 -1 41 149662391 267466357 1751289335 248190673 662646779 + 1052753397 -1476979457 233637266 + + -4.0834245085716248e-001 7.4413642287254333e-002 + + <_> + 53 + -1.3947604894638062e+000 + + <_> + + 0 -1 201 1258228431 1541393407 -362082691 -2102673 + 1138427903 -33607787 -36885 -8451185 + + -2.2683575749397278e-001 2.1698297560214996e-001 + <_> + + 0 -1 542 -142609485 -147341345 2147430871 -1244137569 + -671091209 -536948741 -175245326 -167774241 + + -2.1389076113700867e-001 2.2244058549404144e-001 + <_> + + 0 -1 311 -67108865 -1683297558 -1522913379 -1365204446 + -537268289 -340083462 -277894725 -286274934 + + -1.5992589294910431e-001 2.8072324395179749e-001 + <_> + + 0 -1 385 1493442005 487622616 796877695 -541253667 + 1548222680 -537371139 -4661570 -1141957985 + + -1.6421283781528473e-001 2.5366565585136414e-001 + <_> + + 0 -1 453 2012324787 -786541 2127984599 -1712157 1467463095 + -21102638 635748263 552953719 + + -2.3738333582878113e-001 1.7089855670928955e-001 + <_> + + 0 -1 251 -655697441 -109412993 1067113279 251802999 + -1364535078 -14766082 -1077216354 -366813250 + + -2.8667387366294861e-001 1.4454914629459381e-001 + <_> + + 0 -1 44 -67108865 -741279774 -404489384 -586351998 -68816978 + -222027262 -143300086 -783949310 + + -1.3955391943454742e-001 2.7441674470901489e-001 + <_> + + 0 -1 470 335904627 688454032 -576326285 -821588848 + -2057208907 -1716607047 -1955598855 -1884303895 + + -1.8432578444480896e-001 2.0841632783412933e-001 + <_> + + 0 -1 577 -603989137 -18 -1291846942 -253431302 -8388664 + -1132204282 -1226317176 -800850994 + + -1.5180069208145142e-001 2.3879496753215790e-001 + <_> + + 0 -1 92 -1107974265 -1107488321 -438980181 -34271041 + -705322561 -1779060809 -1852239449 1035052479 + + -2.6681289076805115e-001 1.3597114384174347e-001 + <_> + + 0 -1 130 547815938 539725376 590284047 254798327 111100181 + 1881297024 1050504923 1086317355 + + -2.9581764340400696e-001 1.1302649974822998e-001 + <_> + + 0 -1 402 -202970846 1997573378 -807003368 -33822848 + 1383556917 2146907953 -8949889 -8951981 + + -2.9145026206970215e-001 1.1627478897571564e-001 + <_> + + 0 -1 548 -1351616079 -275847760 -807451663 -7342864 + -1959811935 -2102481202 91706817 -577790499 + + -2.2961539030075073e-001 1.4421683549880981e-001 + <_> + + 0 -1 315 -173279301 -570831744 -591597857 -1920606976 + -875315793 -743121507 -84161761 80942663 + + -1.6261959075927734e-001 2.0734244585037231e-001 + <_> + + 0 -1 154 -295507282 -222108994 -337368354 -9054488 + 1695393195 1276183616 1465712383 -203995565 + + -1.6296967864036560e-001 2.0082713663578033e-001 + <_> + + 0 -1 529 743681152 1060998324 -1108396745 -1109544971 + 2146248364 -1131831369 -1109106804 751052416 + + -2.5800719857215881e-001 1.2539248168468475e-001 + <_> + + 0 -1 624 -134222193 -823203865 -355090400 -219170820 + -9699354 1120917334 -236982078 -168297777 + + -1.5296129882335663e-001 2.1096526086330414e-001 + <_> + + 0 -1 8 -142608477 -292753778 -105415054 -168900640 924034995 + 1086518660 2012737924 -705178122 + + -1.6148518025875092e-001 1.9514200091362000e-001 + <_> + + 0 -1 164 -635449601 1383502423 -491069862 818069179 + -763408706 -132708062 -550005 -143263077 + + -1.5955173969268799e-001 1.9565145671367645e-001 + <_> + + 0 -1 304 -91324273 -638948913 1459102352 -234883138 + -411134456 608256066 -201791862 -213123857 + + -1.2484791129827499e-001 2.4199718236923218e-001 + <_> + + 0 -1 141 1924049967 -68243729 -1443053961 -102600915 + -17306377 -56061955 -17170689 -247107769 + + -1.3098762929439545e-001 2.2698824107646942e-001 + <_> + + 0 -1 273 -34538584 -36058140 -1051656288 -35789003 + -268782921 -131587919 -10961985 -938149707 + + -3.8267302513122559e-001 8.2566484808921814e-002 + <_> + + 0 -1 128 -676125470 -422419785 733606671 1331165455 + -1581583803 1441710245 -1152161 -1048625 + + -2.0781999826431274e-001 1.6179111599922180e-001 + <_> + + 0 -1 541 335879441 857358129 -1080172780 1036762602 + 812061588 -1074364481 1313293872 -1344439906 + + -2.3761694133281708e-001 1.3307097554206848e-001 + <_> + + 0 -1 330 -155579051 1977548902 -109936641 527894909 + -754445200 -138473890 -72820226 -83217585 + + -1.5394569933414459e-001 1.9611829519271851e-001 + <_> + + 0 -1 109 -105978129 -89202036 1206379146 -137441666 + 1935124399 1946251504 -1326188912 -146279937 + + -1.3842856884002686e-001 2.1920017898082733e-001 + <_> + + 0 -1 490 1833936128 997703423 -1255263405 -1113732166 + 1549563407 -100835876 -1365059701 960064042 + + -2.8704747557640076e-001 1.0284743458032608e-001 + <_> + + 0 -1 298 1085179907 1558038199 -647534401 -780680693 + 1246731967 -495023986 -2065708378 458754 + + -2.1382987499237061e-001 1.3864573836326599e-001 + <_> + + 0 -1 630 -235929793 -35017425 -974852131 -1473993990 + -154806396 -39664848 -218780980 -1069420542 + + -1.3126356899738312e-001 2.2785462439060211e-001 + <_> + + 0 -1 380 1164428247 330592127 221719867 220515469 1525638961 + 771553661 6317897 71567872 + + -2.2609888017177582e-001 1.3483075797557831e-001 + <_> + + 0 -1 450 -168189951 -537955555 -261358634 -237300738 + -220334180 -301885632 -674048822 -784091958 + + -1.2464315444231033e-001 2.3845015466213226e-001 + <_> + + 0 -1 248 202362591 529960950 687636059 715847819 49893116 + -1075370050 -1090575784 -275320512 + + -1.8483379483222961e-001 1.5816608071327209e-001 + <_> + + 0 -1 369 -59776341 -546708861 -1996605390 -751324454 + -5509237 1212568876 1987049206 -84414181 + + -1.0926777869462967e-001 2.6340296864509583e-001 + <_> + + 0 -1 322 -859874849 -589690915 -1078272001 -269396281 + 1444829252 -361363 -1088889270 -14524897 + + -1.4073981344699860e-001 2.0588415861129761e-001 + <_> + + 0 -1 558 -656422264 1404815771 215051032 1573253083 + -1258510968 1056763359 -1342722118 -1431572741 + + -1.2937442958354950e-001 2.2350135445594788e-001 + <_> + + 0 -1 580 -538976242 -706933761 -1157899644 362671324 + -616768120 76414080 -1145002358 -257163377 + + -1.3464896380901337e-001 2.1594136953353882e-001 + <_> + + 0 -1 500 125169463 419200829 -1376628745 952940586 936641716 + -1309459009 1895069444 -389533710 + + -1.9427908957004547e-001 1.5109929442405701e-001 + <_> + + 0 -1 197 -936448836 296527797 1725790972 1040187375 + 2094788862 989460023 -119468066 821391343 + + -2.2964347898960114e-001 1.2504659593105316e-001 + <_> + + 0 -1 27 -8452601 1934077794 1941583474 -1298074901 + 2048863812 2084627268 1498884488 -481959089 + + -1.6685572266578674e-001 1.7357817292213440e-001 + <_> + + 0 -1 363 -854920272 1466228596 1841174397 -1350754044 + -1912043332 -544412176 -1074884950 -1351636949 + + -1.3493250310420990e-001 2.0787918567657471e-001 + <_> + + 0 -1 399 -759714941 1319853762 465518886 25093107 1311451838 + 262313924 1264269234 -67639305 + + -1.4700172841548920e-001 1.9267882406711578e-001 + <_> + + 0 -1 285 -62210118 -95633686 -543498916 -5647854 -582576008 + -858743621 -1076589240 533272664 + + -2.3387342691421509e-001 1.2231637537479401e-001 + <_> + + 0 -1 63 663490442 868591391 944180691 -1725045803 + -1515212829 -35061268 -960230409 -1358689591 + + -2.4319046735763550e-001 1.1639515310525894e-001 + <_> + + 0 -1 476 -488118528 -1001853142 -1284508299 -24149798 + 1848082858 706898854 -822111589 -1027604833 + + -1.5382418036460876e-001 1.8594354391098022e-001 + <_> + + 0 -1 68 -344199257 -355533682 -1103629790 -672858904 + 1070593471 -997397002 -152595754 -246940218 + + -1.5091717243194580e-001 1.8637944757938385e-001 + <_> + + 0 -1 31 1793322823 6471586 2136661902 544275114 813647735 + 1076229465 388745606 1349749552 + + -5.1503473520278931e-001 5.5495657026767731e-002 + <_> + + 0 -1 479 -1051226111 -906782909 -1752851951 -573214941 + -514355772 -34304515 -1917518166 -2080365046 + + -1.8393677473068237e-001 1.4898243546485901e-001 + <_> + + 0 -1 561 1139012403 1401008551 133777171 366319523 381097650 + 1549129111 1842035418 547765943 + + -2.1134988963603973e-001 1.2946300208568573e-001 + <_> + + 0 -1 53 -525423697 -1284755713 -208931 -1357271134 + 1492289279 -108851201 -327389860 -694524 + + -1.2473458796739578e-001 2.2294642031192780e-001 + <_> + + 0 -1 408 -1053781711 1488768055 -1883353875 1772158314 + -328087099 -1117247528 -937669396 144711680 + + -2.4978365004062653e-001 1.1137084662914276e-001 + <_> + + 0 -1 158 1652543131 308775679 -956675393 -1083456334 + 1883606772 377030805 980155213 1904691731 + + -1.7729288339614868e-001 1.5420784056186676e-001 + <_> + + 0 -1 211 -228447566 1986675278 -1710034373 539124387 + 1681978890 -2146306 2146938783 -277360677 + + -2.4232980608940125e-001 1.1379676312208176e-001 + <_> + + 0 -1 344 1316210703 384430053 -1605375990 -1886392130 + 1727284535 788489447 -84560439 -34605257 + + -1.9972075521945953e-001 1.3768124580383301e-001 + + <_> + 60 + -1.4742519855499268e+000 + + <_> + + 0 -1 121 -1023416577 -4353 -218103809 -4199429 -469764097 + -38913 -218146817 -33041 + + -1.5614232420921326e-001 2.8459531068801880e-001 + <_> + + 0 -1 569 -14419983 -40645392 2105497077 -17200 -1074790407 + -1963226659 1509905749 -573581860 + + -2.6622509956359863e-001 1.7384453117847443e-001 + <_> + + 0 -1 277 -2228225 -537002337 -993984612 -570614067 -2360321 + -539165186 -1086001221 -582353009 + + -1.5457338094711304e-001 2.4920582771301270e-001 + <_> + + 0 -1 501 1497849681 -46703179 -541254857 -152143362 + 1245972736 -19211521 -355578294 -930420486 + + -2.2253337502479553e-001 1.6836225986480713e-001 + <_> + + 0 -1 359 1348230493 -572761801 901578239 -575135907 + -665562984 -587575815 -577217316 -1079359265 + + -2.1952478587627411e-001 1.6715624928474426e-001 + <_> + + 0 -1 530 -22751 924006327 -1242563275 -1343226995 1740615048 + -286293035 -1375884146 143518351 + + -2.0859518647193909e-001 1.6993917524814606e-001 + <_> + + 0 -1 18 -4177 -252706834 -89435614 -152109174 2012210959 + 1212315399 -137891006 -196608261 + + -1.8068830668926239e-001 1.9354419410228729e-001 + <_> + + 0 -1 619 -16782645 -67108979 -220049174 -1292960022 + -671090880 2085024332 -261162808 -261619974 + + -1.6506350040435791e-001 2.0229448378086090e-001 + <_> + + 0 -1 420 -382737744 50788352 -1994165808 -15731432 + -1707289424 -1424360140 1166807040 -1995450709 + + -2.9544642567634583e-001 1.1053616553544998e-001 + <_> + + 0 -1 395 -237505662 -7875693 -44228609 -541082249 -140249609 + -2689391 -168296713 1441130294 + + -1.8134017288684845e-001 1.8158443272113800e-001 + <_> + + 0 -1 252 1223614047 -566850177 452714110 1377860351 + 1314115214 -25223481 -887124790 -14950405 + + -1.9853888452053070e-001 1.6829168796539307e-001 + <_> + + 0 -1 104 -139465237 -221860946 -386469728 -5064 -67109970 + -1027308284 -252448634 -170396417 + + -1.3591608405113220e-001 2.4295561015605927e-001 + <_> + + 0 -1 181 1547110426 -594080544 1242779512 2136932364 + -31486309 -633908553 -93345025 1407648427 + + -2.1696040034294128e-001 1.4790053665637970e-001 + <_> + + 0 -1 370 1499790452 2109508592 -575091844 -302188396 + -591074644 -68222826 -88022980 -109264732 + + -4.2291837930679321e-001 7.3742076754570007e-002 + <_> + + 0 -1 383 -606273731 -196612 -37945347 -1078413452 -68622672 + -1073833673 -886375426 -357847909 + + -1.3053973019123077e-001 2.3168803751468658e-001 + <_> + + 0 -1 42 -504256601 -103693889 -1552631861 1907462573 + -248469585 -243578961 -1001289745 -181465207 + + -2.9833281040191650e-001 1.0050108283758163e-001 + <_> + + 0 -1 54 -134222325 -59852914 2059134464 -236458270 -11550966 + 273579030 2011488840 -179834277 + + -1.6351912915706635e-001 1.7984919250011444e-001 + <_> + + 0 -1 443 1599067921 487038815 -780142176 -36833527 + -228755067 -1147797541 -67116406 -1936719953 + + -1.4494937658309937e-001 2.0445249974727631e-001 + <_> + + 0 -1 423 -1060117723 105172292 -1816406059 -25779 724509227 + 585891728 1289685275 -1951405137 + + -1.3382528722286224e-001 2.1853315830230713e-001 + <_> + + 0 -1 478 -86114193 -313171 1111290354 -203817238 -2438456 + -162604796 -233662248 -268173862 + + -1.2977576255798340e-001 2.2842766344547272e-001 + <_> + + 0 -1 34 2046686481 456335616 2042029853 864486144 -933104412 + 1873167254 -196099146 1728444263 + + -2.1469497680664063e-001 1.3323721289634705e-001 + <_> + + 0 -1 465 343343103 -570466369 1857224703 672685290 + 1434275581 -50389665 1962159104 -1933164800 + + -1.7130576074123383e-001 1.6903875768184662e-001 + <_> + + 0 -1 589 -818941053 87013538 -111184397 -37754531 + -1898061693 176551654 1383588499 -943720513 + + -1.6567197442054749e-001 1.7757371068000793e-001 + <_> + + 0 -1 519 788713130 -1156586598 521182527 -1916946 2134622139 + -541888753 -1893755925 1066076831 + + -1.7230549454689026e-001 1.6435682773590088e-001 + <_> + + 0 -1 143 -922759554 318630654 616432687 -302831042 + -727324161 41682483 1920329832 1348428442 + + -2.0491535961627960e-001 1.3880960643291473e-001 + <_> + + 0 -1 2 723823356 -1076777961 -548951307 84377713 737552635 + -1210946315 -1625454337 51577009 + + -3.2849362492561340e-001 8.8232755661010742e-002 + <_> + + 0 -1 552 -393329 -201327633 -220237262 1908912120 -536940600 + -1069443776 -222112070 -245114118 + + -1.1370333284139633e-001 2.4661171436309814e-001 + <_> + + 0 -1 280 137866647 330497943 -1369628875 -1075382238 + 1313256957 1000285681 770980944 -279533994 + + -1.5602348744869232e-001 1.8064066767692566e-001 + <_> + + 0 -1 39 -876627029 -341120470 1475736498 -712395712 + -1079659634 -2134787454 324531680 -985140140 + + -1.6076508164405823e-001 1.6893979907035828e-001 + <_> + + 0 -1 230 -790584800 -798953804 -1461039873 -33961394 + -146490212 1421872403 -21414210 -1710152177 + + -1.5377494692802429e-001 1.7769819498062134e-001 + <_> + + 0 -1 170 2122294795 1377502722 1392496187 1342876671 + -21078444 1174557952 -18705082 -213683390 + + -1.9270925223827362e-001 1.3925707340240479e-001 + <_> + + 0 -1 636 -286265721 1087368110 577492704 1623915622 + -135286528 1145196902 1993571328 1347679734 + + -2.6577982306480408e-001 9.9985443055629730e-002 + <_> + + 0 -1 398 1982039846 397254535 791353898 -1627349 1729455863 + 1974525379 1867999102 1742958439 + + -2.3255635797977448e-001 1.1416502296924591e-001 + <_> + + 0 -1 155 -930355482 1391502078 -1394668033 -36122826 + 385550518 889011848 1061650251 812083063 + + -2.1627771854400635e-001 1.2228459119796753e-001 + <_> + + 0 -1 474 -1575620175 1360049280 -2023751683 -2633232 + -2102872143 -1474564867 724025243 -879765027 + + -1.3511040806770325e-001 1.9969221949577332e-001 + <_> + + 0 -1 576 -19674853 -209862853 -1174651909 -247081470 + -947934244 -67273344 -361432166 -268305918 + + -1.2421544641256332e-001 2.1285703778266907e-001 + <_> + + 0 -1 56 1606418223 1888960082 -549204542 9024010 -1749831699 + -176720023 -617002195 -117478001 + + -2.1605040132999420e-001 1.2219076603651047e-001 + <_> + + 0 -1 533 532659985 481076660 1069384497 -1109532747 + 897535455 -1146290249 2093248670 986457767 + + -2.2757539153099060e-001 1.1520259827375412e-001 + <_> + + 0 -1 407 4710295 70527783 636325663 772164110 73221521 + 255335261 1734675540 14912 + + -2.5952723622322083e-001 1.0210981220006943e-001 + <_> + + 0 -1 129 1981030261 -784558837 26018085 1070988785 + 1565896703 528994556 791806603 1902506983 + + -1.5912999212741852e-001 1.6561819612979889e-001 + <_> + + 0 -1 260 67381250 -1627145297 -322568424 1301083899 + -547766386 -1073754371 -270867206 -1351352674 + + -4.0170139074325562e-001 6.4794361591339111e-002 + <_> + + 0 -1 225 1425967788 2082375658 -1345048904 502528730 + -168390835 -32914981 -35926390 818686652 + + -1.9302360713481903e-001 1.3907201588153839e-001 + <_> + + 0 -1 547 1868527361 -1221977296 -1891735758 2074998596 + -821039197 -308147286 1432343616 -812652087 + + -2.2188051044940948e-001 1.2489976733922958e-001 + <_> + + 0 -1 131 -1557400589 -1248274 -613056656 1439425388 + -1213219890 -2112947072 -1027083856 -706494382 + + -1.3170881569385529e-001 2.0178051292896271e-001 + <_> + + 0 -1 614 -1006640382 1080797230 -1475932128 1123009640 + 865575328 113265744 -1334843708 -462948914 + + -2.0952266454696655e-001 1.2552599608898163e-001 + <_> + + 0 -1 360 -1013973025 486547392 1073128087 -1626784775 + 105420508 -1152767500 -874785925 -611646437 + + -1.1399252712726593e-001 2.3128414154052734e-001 + <_> + + 0 -1 338 -830633942 2127447645 -1191940048 1916927034 + -251667124 1112837698 -176687438 -250616629 + + -1.4943134784698486e-001 1.7533594369888306e-001 + <_> + + 0 -1 346 1417087799 280764279 113190420 231275440 1397886477 + 1081212924 1978420548 -792412416 + + -1.7520387470722198e-001 1.4383177459239960e-001 + <_> + + 0 -1 316 67284919 168863207 -550613281 765994050 -1262428729 + -1076672520 -649814438 -1369156349 + + -1.8277360498905182e-001 1.3956578075885773e-001 + <_> + + 0 -1 320 -1004017133 1535149056 653028909 1572339459 + 408031108 2023804820 147856938 1606763075 + + -1.6832774877548218e-001 1.4991748332977295e-001 + <_> + + 0 -1 403 1304393556 1433759573 77663858 -1499455289 + 1970754580 888309687 1747679550 1786390635 + + -3.8091838359832764e-001 6.6289529204368591e-002 + <_> + + 0 -1 6 -856294403 -2317726 -1552485576 -369560984 -469158006 + -62120224 -6132168 -783949310 + + -1.3586831092834473e-001 1.8866735696792603e-001 + <_> + + 0 -1 585 -570439936 818277201 -1159260624 367517684 + 2128999812 1485249311 -1397821528 -1576556801 + + -1.5883666276931763e-001 1.6120925545692444e-001 + <_> + + 0 -1 292 1555765623 -1621160633 -95796705 1999587663 + 1161353270 -8647867 -546809186 -16056501 + + -1.2610651552677155e-001 2.0256702601909637e-001 + <_> + + 0 -1 509 -165168237 -1036863384 -962127881 2140655952 + -2085503053 -1361645123 721875631 -907031362 + + -1.0026291012763977e-001 2.5292631983757019e-001 + <_> + + 0 -1 138 -1041372593 -34650142 526856810 -142651678 + -134299697 -472860218 -545794344 -706219574 + + -1.1102330684661865e-001 2.3237709701061249e-001 + <_> + + 0 -1 430 -895385053 -74011487 -2139413994 -269627264 + 907480823 9225072 325710813 -807964109 + + -1.4193460345268250e-001 1.7778962850570679e-001 + <_> + + 0 -1 532 -33182560 431646387 -1444991724 -1883374155 + 873740209 -1109836522 -84107345 -1345837813 + + -1.3789501786231995e-001 1.8308758735656738e-001 + <_> + + 0 -1 591 1512284963 1141564138 1212531296 1113560758 + -1557154909 177122818 100626839 -1018171923 + + -1.6264808177947998e-001 1.5497599542140961e-001 + <_> + + 0 -1 621 -465586289 -1650720961 1677193050 -1245645074 + 1249883404 -1321181114 -924458918 1096148570 + + -1.3877889513969421e-001 1.8350048363208771e-001 + + <_> + 66 + -1.4156285524368286e+000 + + <_> + + 0 -1 534 -638102187 -679525099 -1028701999 -39250355 + -50358343 -16824947 -70270465 -264417 + + -2.3947872221469879e-001 1.7213819921016693e-001 + <_> + + 0 -1 246 -102 -327766 -1317552232 -2088 -5261442 -67190785 + -11965510 -539143080 + + -2.3085838556289673e-001 1.6734325885772705e-001 + <_> + + 0 -1 117 -286283025 1945448179 -287698945 -1201662 + 1962887167 -2019343 -36869 2138326851 + + -2.0555576682090759e-001 1.6909728944301605e-001 + <_> + + 0 -1 446 202504181 487127216 2143026545 -13386576 + -1088807427 -1153572867 1002289913 -1145108339 + + -1.9367472827434540e-001 1.7333285510540009e-001 + <_> + + 0 -1 607 -81 -203438129 -119610708 -788532244 -3145908 + 1564880128 -69535000 -247203890 + + -1.4611057937145233e-001 2.2614458203315735e-001 + <_> + + 0 -1 150 83988110 -16658643 -5992498 2139989165 -1895069745 + -1184840063 -303839025 -15741813 + + -4.2162150144577026e-001 7.6335363090038300e-002 + <_> + + 0 -1 290 1281287903 -992067593 -1707189025 304231711 + -1931918256 -822131563 -664085938 -553461153 + + -2.4101121723651886e-001 1.2899176776409149e-001 + <_> + + 0 -1 522 -99 -67111489 -3670529 -2097155 -16465 -4334435 + -151633 -1930571891 + + -8.9858978986740112e-002 3.5465320944786072e-001 + <_> + + 0 -1 67 -138414161 -204477202 -562727296 -1706382 -7871509 + -2066808860 -170413340 -706218018 + + -1.2456064671278000e-001 2.3621052503585815e-001 + <_> + + 0 -1 440 -22020619 -1688271921 -1268777234 -1126483556 + -731392784 -5256193 -508600094 -1343229953 + + -2.1830691397190094e-001 1.3843542337417603e-001 + <_> + + 0 -1 204 -595670817 2137779887 842165854 696450687 + -898069314 -13148161 -293893442 -11035046 + + -1.6304337978363037e-001 1.8130688369274139e-001 + <_> + + 0 -1 177 654378409 -215812202 -1617431212 -104401568 + 214730925 -624893564 -520853376 -1001098172 + + -1.8432885408401489e-001 1.5385165810585022e-001 + <_> + + 0 -1 268 -923466703 882443319 684861821 214183562 1216310488 + 821823419 -385921285 552943443 + + -2.6095736026763916e-001 1.0501748323440552e-001 + <_> + + 0 -1 418 -498279535 268466432 -135469008 2098156943 + -733180535 958528317 296415627 -1894785397 + + -1.4962430298328400e-001 1.8720914423465729e-001 + <_> + + 0 -1 587 -80747741 -440533585 917467936 -1117913366 + 1400489367 108504455 -470302329 -154675522 + + -1.6636364161968231e-001 1.6690850257873535e-001 + <_> + + 0 -1 55 -18880725 -202391769 -759287262 -119277466 + 1457470730 1950626060 1998312834 -263979350 + + -1.6729773581027985e-001 1.6051302850246429e-001 + <_> + + 0 -1 599 -603994229 1911548847 -194527230 -236195126 + -134744696 12806020 -51512662 -247205941 + + -1.1797425895929337e-001 2.2724191844463348e-001 + <_> + + 0 -1 297 -489829469 1226741 800489375 -1350581579 1377190149 + 1085406199 -604275713 -145 + + -1.2585067749023438e-001 2.0891451835632324e-001 + <_> + + 0 -1 3 -1971608833 -1288591395 1475176695 1563627259 + -1375785217 -17649729 -537679105 1337656827 + + -2.3411718010902405e-001 1.1347461491823196e-001 + <_> + + 0 -1 437 -591344417 2063130525 -1896214768 -307205284 + -571913191 -59966273 -393475068 -2001993712 + + -2.0314617455005646e-001 1.3763523101806641e-001 + <_> + + 0 -1 282 101187391 364219279 -1176650755 1000767114 + 912373727 -1937409 2136472389 -12101881 + + -1.2546767294406891e-001 2.1802827715873718e-001 + <_> + + 0 -1 99 -207622237 1976559278 -421595998 -36702260 -4523009 + -395844156 -151653426 -168299300 + + -1.0666619986295700e-001 2.4177177250385284e-001 + <_> + + 0 -1 247 -176423049 42000772 -1076962432 153287170 + 2069362679 -1434126487 1743140804 -1608450558 + + -3.0636525154113770e-001 8.5088931024074554e-002 + <_> + + 0 -1 215 1572799552 1555302911 502016374 -783578511 + -639160325 -1910286183 -272236801 1497303731 + + -1.6857950389385223e-001 1.5063311159610748e-001 + <_> + + 0 -1 618 -536892241 -34722976 -754993444 1925642998 -440 + -565086396 1902047384 -800915489 + + -1.4988699555397034e-001 1.6982515156269073e-001 + <_> + + 0 -1 10 -76547105 1459287522 -83019108 -110054718 1164994324 + -91993341 -851351024 1900143122 + + -1.4859989285469055e-001 1.6797077655792236e-001 + <_> + + 0 -1 493 210804112 1014671005 -135055168 -1078395007 + 2113214933 -1074900241 -5022132 1021654866 + + -2.1651786565780640e-001 1.1568113416433334e-001 + <_> + + 0 -1 473 1864359859 1195796354 -1679085072 1603270029 + 25372035 13287051 1163609361 1166013371 + + -2.7461531758308411e-001 9.2600800096988678e-002 + <_> + + 0 -1 23 -914427905 -1288795169 -303404033 -1078546750 + 1419528191 -31168917 -35070209 -61577692 + + -1.0754923522472382e-001 2.4138067662715912e-001 + <_> + + 0 -1 243 -864105242 445290293 -1393758754 951036643 + 752302524 -1078070320 -542736642 -1126565110 + + -1.4801679551601410e-001 1.6932740807533264e-001 + <_> + + 0 -1 142 -273756954 1942415086 575448290 -237796308 + -148918550 -1005259754 1940834010 -238567265 + + -1.3087278604507446e-001 1.9493532180786133e-001 + <_> + + 0 -1 322 9446003 202403103 1824152607 147849757 1288968284 + 1576228077 1479597122 522870367 + + -2.3417076468467712e-001 1.0817093402147293e-001 + <_> + + 0 -1 139 -487937866 -262094681 -1100039561 -6056955 + -877674561 -5305710 -322568449 -1002108093 + + -1.5504436194896698e-001 1.6000464558601379e-001 + <_> + + 0 -1 81 -145030135 1397697579 -368311472 2104467426 + 1948713096 1040733696 1984303748 -213412093 + + -1.5200254321098328e-001 1.6414947807788849e-001 + <_> + + 0 -1 582 -553663488 457408791 1690141440 1367473802 + 1628234624 8189057 -168234104 -2104755233 + + -1.6342587769031525e-001 1.5110038220882416e-001 + <_> + + 0 -1 559 -536951808 429892094 -1375777504 501218759 + 284931200 -1195353611 -1376873302 -1410664983 + + -1.5035997331142426e-001 1.6500341892242432e-001 + <_> + + 0 -1 528 -44894976 -1624795335 -1917477097 -1074532871 + -1260347476 -53365552 -94048341 -1349298635 + + -1.2065671384334564e-001 2.0961812138557434e-001 + <_> + + 0 -1 393 -391658845 -878467154 948331218 2028076114 + -617926264 1225967168 810803670 -403971773 + + -1.3285785913467407e-001 1.8157471716403961e-001 + <_> + + 0 -1 382 235150099 2111413 -288488515 797719756 273701972 + -3580555 235419741 775624979 + + -2.2059056162834167e-001 1.1039188504219055e-001 + <_> + + 0 -1 375 -109781508 1400132593 -553322700 -1067589039 + -277020752 -893591409 -400248840 -1575945558 + + -3.5007745027542114e-001 7.2381749749183655e-002 + <_> + + 0 -1 263 1442780976 2147431737 -1666169666 -537068613 + -50644545 -101694274 1208049627 1394086042 + + -2.4647489190101624e-001 9.9319361150264740e-002 + <_> + + 0 -1 73 -202903617 2062270127 905170938 -577838430 + -177209377 -97988910 -7930226 -195566882 + + -9.1621093451976776e-002 2.7104064822196960e-001 + <_> + + 0 -1 545 168785687 1235052478 786401219 781123582 + -1706219793 -1632109057 -1127231054 -907025697 + + -1.3089883327484131e-001 1.9008301198482513e-001 + <_> + + 0 -1 406 120052671 78952369 1815027037 40380180 1968535543 + 500178705 1182027596 1141154900 + + -1.9928658008575439e-001 1.2260554730892181e-001 + <_> + + 0 -1 59 -2123369037 -1007755834 -75536528 -539651674 + -1347699781 -863068216 1903277778 -716704080 + + -1.3662229478359222e-001 1.8261213600635529e-001 + <_> + + 0 -1 419 18421695 -1616330919 1605960693 -1079042055 + -1083695656 -583092739 -1389350577 -1414856417 + + -1.0727475583553314e-001 2.1985870599746704e-001 + <_> + + 0 -1 468 1078396753 354631550 -1967409351 428813808 + 2020671776 822063083 1609375984 201788848 + + -2.2359746694564819e-001 1.1054740101099014e-001 + <_> + + 0 -1 425 -136461589 -219700788 -1199836930 -205259036 + -682692658 -35350016 -144121122 -246950930 + + -9.4373404979705811e-002 2.5603502988815308e-001 + <_> + + 0 -1 79 -159406545 1899276489 -465811838 1922100042 + 2029683712 406996630 1410543184 -800720294 + + -1.6321568191051483e-001 1.4754760265350342e-001 + <_> + + 0 -1 401 707767079 489540902 946742334 468429541 1549207925 + 513686359 2137543788 -839975353 + + -2.0473939180374146e-001 1.1587535589933395e-001 + <_> + + 0 -1 566 -1895302736 -1962819824 1692382641 -1351708978 + 1991000568 -1779630215 1152997709 -845161478 + + -1.7858609557151794e-001 1.3560535013675690e-001 + <_> + + 0 -1 323 -134476363 -2967727 -100720641 1308314038 -475336 + -1368359364 -1558008326 706880550 + + -2.7970603108406067e-001 8.5766717791557312e-002 + <_> + + 0 -1 110 1109568162 965390091 -68932241 -316003794 + 1404437503 -152829605 -363761031 1476677383 + + -2.1853850781917572e-001 1.0969985276460648e-001 + <_> + + 0 -1 555 -134485751 -204476433 2021424858 1893059240 + -59263232 -329974006 -1360480358 -1320953925 + + -1.3401556015014648e-001 1.8581999838352203e-001 + <_> + + 0 -1 579 -11036405 -777934827 2013002256 1897900916 + -33842751 -1532740203 -1140932694 -1056772933 + + -1.1947764456272125e-001 2.0341494679450989e-001 + <_> + + 0 -1 269 -120159104 -590068356 1249209660 838451038 + 2026278092 -1460363686 -54526018 -1202980021 + + -1.2638901174068451e-001 1.9015792012214661e-001 + <_> + + 0 -1 626 -9458765 864484918 -1511008592 -1243354942 + 824178116 -2115851904 2108896696 -974655785 + + -1.1565050482749939e-001 2.0734450221061707e-001 + <_> + + 0 -1 354 841991296 285521189 -1619726583 -1383762267 + 269749288 1909506021 1324502735 136255191 + + -1.8641577661037445e-001 1.2567663192749023e-001 + <_> + + 0 -1 64 -1689540600 -171047537 367437942 -545536546 + -27554929 -201455017 132968039 -56919 + + -3.5486286878585815e-001 6.6686548292636871e-002 + <_> + + 0 -1 427 -605489791 -1143709809 -33693697 -537069149 + -711441259 -1200820238 -2033713755 71617805 + + -1.6660074889659882e-001 1.4271223545074463e-001 + <_> + + 0 -1 477 1308689921 584290 1663521749 -146281108 -1077992529 + 1108859814 -1259897617 -2080382578 + + -1.3657788932323456e-001 1.6795969009399414e-001 + <_> + + 0 -1 71 1120340511 285084254 -117988732 -1107318042 + 1085786093 -252053507 -1002113816 1348812912 + + -2.2501066327095032e-001 1.0219082236289978e-001 + <_> + + 0 -1 352 -1422610505 85245907 -1904285905 -1920684632 + 659881463 -1259208707 -11206692 -1963834010 + + -1.4944109320640564e-001 1.5598995983600616e-001 + <_> + + 0 -1 52 -10490065 1498147823 -553944480 -134217734 + 2145379919 1434648139 -118032742 -178274362 + + -1.1018689721822739e-001 2.0814745128154755e-001 + <_> + + 0 -1 30 -2134754710 37889724 -1915431920 794654769 608172791 + 1106720444 1637266185 262899994 + + -4.2608824372291565e-001 5.4885096848011017e-002 + <_> + + 0 -1 557 -601044984 2043973039 674530844 -201458955 + 1488507776 -1334981997 -395001942 -1827697397 + + -1.3864666223526001e-001 1.6718882322311401e-001 + + <_> + 66 + -1.3544789552688599e+000 + + <_> + + 0 -1 540 -144706765 -1289764937 -1210088649 -1073758505 + -939528193 -1091062785 -1025 -1073745921 + + -1.4142465591430664e-001 2.6060691475868225e-001 + <_> + + 0 -1 207 -1025521953 -562077953 -489238786 1111423727 + -896495890 -11540707 -270799138 -144703521 + + -2.2826394438743591e-001 1.6443152725696564e-001 + <_> + + 0 -1 310 -922776433 209603519 788277263 149814190 2129497804 + 246132079 -120197154 -1461715061 + + -2.4959196150302887e-001 1.3804991543292999e-001 + <_> + + 0 -1 507 1997012981 -1089741086 2101339632 -852493088 + -1154547807 -1416646978 1565513153 -1894785592 + + -2.3500011861324310e-001 1.4007705450057983e-001 + <_> + + 0 -1 318 -2621697 -572527203 -7537699 -540158759 -1285 + -1084358700 -87233877 1527643417 + + -1.3948769867420197e-001 2.3737639188766479e-001 + <_> + + 0 -1 516 -12497 -67110961 1895293114 -202902104 -4473080 + -582642432 -138761558 -247202870 + + -1.2023023515939713e-001 2.5221487879753113e-001 + <_> + + 0 -1 489 2138824465 1006370495 -1628438593 -1343230209 + 2147476917 1023206896 -323568595 619075335 + + -2.2485122084617615e-001 1.3541762530803680e-001 + <_> + + 0 -1 387 -581657131 -715649732 1609916351 -1086636163 + -36644456 -201803780 -38076486 -1414920182 + + -1.8138170242309570e-001 1.6125626862049103e-001 + <_> + + 0 -1 193 1983843871 1509371575 1804010935 -76854621 + 1420240815 -139122955 -144183573 2068252417 + + -1.9591303169727325e-001 1.4679150283336639e-001 + <_> + + 0 -1 622 -2097425 -134218881 -20059542 -252920114 -1048690 + 1433398614 -134679350 -168300550 + + -1.2344046682119370e-001 2.3487685620784760e-001 + <_> + + 0 -1 69 -135790657 -101712898 1790961276 -1117268 -167790693 + -1092435916 -134748966 -171444278 + + -9.3660876154899597e-002 2.9284912347793579e-001 + <_> + + 0 -1 334 1426527010 -134922701 -283812105 -2623147 + -1643650329 -789597 -285199645 -2130690814 + + -1.9195295870304108e-001 1.3882079720497131e-001 + <_> + + 0 -1 128 -188748058 -604825097 228284687 -1059465 -744525073 + -134502345 -74199121 -2098289 + + -1.7025393247604370e-001 1.5861026942729950e-001 + <_> + + 0 -1 504 1444369751 1571127165 1068923765 -418381862 + 1832345573 -1136660769 1164394880 -1928344087 + + -1.7193807661533356e-001 1.6115176677703857e-001 + <_> + + 0 -1 212 577742851 -69269885 687362130 939513855 1131934720 + 1456416085 930627893 -2100289 + + -1.3450787961483002e-001 1.9277665019035339e-001 + <_> + + 0 -1 371 -799212524 -209768513 -76223492 -117260294 + -830701092 -357900157 -1262178576 -68422742 + + -3.8245826959609985e-001 6.8483658134937286e-002 + <_> + + 0 -1 278 207857047 271759280 -286880937 783085486 242614261 + -1090523169 -287801388 -1158721947 + + -1.9277557730674744e-001 1.3489215075969696e-001 + <_> + + 0 -1 220 -509434961 -78710066 -1275726622 -753404420 + -203423798 -1895375284 -673909042 -716971826 + + -1.1536061018705368e-001 2.2317345440387726e-001 + <_> + + 0 -1 433 -966602846 464451514 1072605484 -1080755764 + 2001878519 2011854582 -759073 2145875767 + + -1.7061035335063934e-001 1.4551682770252228e-001 + <_> + + 0 -1 570 -813715661 2085594419 2080225111 -1085276791 + -2024283721 456261102 1944045462 -537989201 + + -1.5524835884571075e-001 1.6316364705562592e-001 + <_> + + 0 -1 0 -4194317 -437584658 -34446 -8391184 -1048677 + -892667004 2009592769 -145255149 + + -1.1445097625255585e-001 2.1161417663097382e-001 + <_> + + 0 -1 160 -354096414 1341058784 -21551621 805293574 + 1727467967 2145870579 -954962 1608997489 + + -2.4929216504096985e-001 9.5367997884750366e-002 + <_> + + 0 -1 175 1133488646 19079215 1923684923 531607301 1643598741 + 16213639 1617959234 1911899943 + + -2.0606075227260590e-001 1.1344346404075623e-001 + <_> + + 0 -1 638 -201331985 785378799 -89140512 -506473810 + -117440562 4699124 -137366572 -237519977 + + -1.3178630173206329e-001 1.7929145693778992e-001 + <_> + + 0 -1 333 1500975106 -591757505 -758294281 -159531458 + 1694185913 -1025496001 1422394875 2130083615 + + -3.1970807909965515e-001 7.3186144232749939e-002 + <_> + + 0 -1 186 -252197833 -222679057 -691545539 2146925322 + -6971745 -20010264 -23089430 809597526 + + -1.5578804910182953e-001 1.5471589565277100e-001 + <_> + + 0 -1 472 528507857 220264592 61472209 -1121194504 + -1442796359 -325000515 254280095 -1913940771 + + -1.6838937997817993e-001 1.4214324951171875e-001 + <_> + + 0 -1 113 -475271441 -85279086 -969873294 -134745356 + -251673889 1917934182 2004345816 -238553377 + + -1.1203543841838837e-001 2.0810666680335999e-001 + <_> + + 0 -1 222 191466152 330415091 1572941818 995482546 -438844995 + 1062600569 -1777930 803654826 + + -2.9350510239601135e-001 7.9699665307998657e-002 + <_> + + 0 -1 149 -357896534 -759148596 -2131974922 -70545720 + -2069117009 1145131456 1431040222 -250146674 + + -1.4853553473949432e-001 1.5207415819168091e-001 + <_> + + 0 -1 100 -1045909650 -485611602 -1652363393 -544464370 + -210833459 -205732025 -412155925 1161839428 + + -1.8374817073345184e-001 1.2407581508159637e-001 + <_> + + 0 -1 484 -2095020544 657773984 -1117496013 -272004556 + -2094691888 -1119555728 770155170 618987575 + + -2.2434021532535553e-001 1.0049981623888016e-001 + <_> + + 0 -1 551 -604093873 -680840318 -1293353250 1356493274 + -100805812 -1358042356 -255656308 -800923830 + + -1.5413197875022888e-001 1.5199714899063110e-001 + <_> + + 0 -1 422 -434654828 -1852916328 1623711061 -39002980 + -1050953440 -356992779 -853603320 -909122417 + + -1.5186285972595215e-001 1.5066848695278168e-001 + <_> + + 0 -1 362 -547512961 -614724543 -5289121 1609498725 + -593979176 -30505107 -4653062 -5570290 + + -9.3973837792873383e-002 2.5007000565528870e-001 + <_> + + 0 -1 502 421738261 512647543 -369466298 644660618 2014315985 + -122826753 -1422579952 144522760 + + -2.2758260369300842e-001 1.0476879030466080e-001 + <_> + + 0 -1 414 -1743450761 -1667229731 -1111617731 -1918337240 + -925364899 -1401618467 -396751348 149751812 + + -2.0826348662376404e-001 1.1135675758123398e-001 + <_> + + 0 -1 327 -589833067 1358228811 -1174324513 1295585037 + 1415054460 -677683843 -333918502 -81110961 + + -1.5275685489177704e-001 1.4786334335803986e-001 + <_> + + 0 -1 513 268447386 -768359989 971741463 -746591329 527673597 + -1371129041 267492031 976958107 + + -2.1398673951625824e-001 1.0998487472534180e-001 + <_> + + 0 -1 287 458687 74623383 -2110418338 547384028 1146347479 + -300476649 1564515406 -549791922 + + -1.9065324962139130e-001 1.2086330354213715e-001 + <_> + + 0 -1 321 -1292446785 -1832472107 -874707144 -1316029507 + 221482943 -54635093 545718286 1952442355 + + -1.8228583037853241e-001 1.2629486620426178e-001 + <_> + + 0 -1 613 -270555509 -2024964282 -340115456 -225707460 + -214435894 140869335 -755499270 -514854962 + + -1.2966547906398773e-001 1.7814037203788757e-001 + <_> + + 0 -1 237 1082294452 279297776 739665110 -1112548388 + 1558433004 2084562652 -285242720 822082519 + + -2.1644125878810883e-001 1.0754965990781784e-001 + <_> + + 0 -1 436 -15204353 1555857143 -370262113 250109974 + -813863457 -118130955 1282485260 -1069443322 + + -1.1899302154779434e-001 1.9272921979427338e-001 + <_> + + 0 -1 195 -458694430 -18865688 -1560262314 -174816220 + 1940735675 2090354613 1843399327 -619769841 + + -1.4177682995796204e-001 1.5781848132610321e-001 + <_> + + 0 -1 396 -281556317 1261252513 -1844107376 -1199966654 + 1342875535 19047986 2048844537 -968363117 + + -1.2672142684459686e-001 1.7686100304126740e-001 + <_> + + 0 -1 349 1148552471 346904839 -953859756 -2069582840 + 808277389 -1544759304 278153766 4483088 + + -2.1089854836463928e-001 1.0583920031785965e-001 + <_> + + 0 -1 26 -7511510 -490213457 -1597284352 -261998934 + 1392468480 403119942 1463169024 1346631127 + + -2.8440985083580017e-001 7.8383870422840118e-002 + <_> + + 0 -1 235 -190183264 1490329067 -1474001346 -1131666523 + 1952776616 -1196417886 -93570934 -1442369763 + + -1.5732380747795105e-001 1.4436712861061096e-001 + <_> + + 0 -1 77 -234098945 1368758007 -117547043 -4245393 1380304763 + -1137653377 -214434069 -302694186 + + -1.0450263321399689e-001 2.1435043215751648e-001 + <_> + + 0 -1 617 -71305489 -102079985 -2135231236 -1835277350 + -268448192 1963334918 -1485652036 1347612190 + + -1.6540440917015076e-001 1.3486905395984650e-001 + <_> + + 0 -1 359 -643869705 -1701290179 -88374529 -4249741 + -615216080 -17244690 -248849941 -1146418449 + + -8.9679509401321411e-002 2.5216883420944214e-001 + <_> + + 0 -1 259 -2018016477 -135365668 -1220542478 -176295708 + -1145529441 -809304950 -718277394 -716994988 + + -1.1881193518638611e-001 1.8229863047599792e-001 + <_> + + 0 -1 521 -659116544 -39007946 -1632034575 -1109530378 + -507923208 215233620 -38434133 -1994050161 + + -1.3277813792228699e-001 1.6686922311782837e-001 + <_> + + 0 -1 250 139447519 -1707244049 543030878 -1876755446 + 418206416 -1344311399 1070800532 -549216572 + + -1.7134600877761841e-001 1.2920229136943817e-001 + <_> + + 0 -1 563 1065351043 -1747387425 1440546065 -1130366210 + 1655662591 -1481415173 -1230520517 -1409895749 + + -1.2196241319179535e-001 1.8406112492084503e-001 + <_> + + 0 -1 590 -1489788317 -215482932 1167516208 -1243059476 + 924747395 731626188 494917253 -1008733729 + + -1.4610977470874786e-001 1.5349069237709045e-001 + <_> + + 0 -1 132 -404231235 -154083334 -386960532 -102894914 + -1494941761 -523327360 -341382230 -993681342 + + -1.1227646470069885e-001 1.9230186939239502e-001 + <_> + + 0 -1 189 461571246 898745267 1045718764 1067757070 838700687 + 523352002 752994271 746684614 + + -3.1202149391174316e-001 6.9881774485111237e-002 + <_> + + 0 -1 319 34848759 77858295 1755294463 -1612362699 279183197 + -1091412483 74513294 -583830953 + + -1.1771897226572037e-001 1.8433926999568939e-001 + <_> + + 0 -1 412 -251804257 -1722448993 2138034394 1294782986 + -571552448 -178921642 -290600310 604546090 + + -1.5075103938579559e-001 1.4529505372047424e-001 + <_> + + 0 -1 286 1276237310 1447458454 -934498184 1623452708 + -1280217356 1357512592 552402944 -528812416 + + -3.4949412941932678e-001 6.3179224729537964e-002 + <_> + + 0 -1 449 1663467552 416069478 -1587070595 -1216236355 + 394667813 694428982 203361851 -1578764654 + + -1.8098776042461395e-001 1.2132294476032257e-001 + <_> + + 0 -1 554 -54546545 -142747138 -1662192658 -117332434 + -558788200 1595934520 -1372737862 -1603959254 + + -1.1871227622032166e-001 1.8538504838943481e-001 + <_> + + 0 -1 580 -136315383 1373438347 2030042960 1935331222 + -1732324440 -1866576357 -104547704 -1603561958 + + -1.2392864376306534e-001 1.7591291666030884e-001 + <_> + + 0 -1 37 1179711253 932391543 -159717504 1907212206 + 1953330597 1588556119 1323654868 -59937033 + + -1.2764546275138855e-001 1.7045098543167114e-001 + + <_> + 71 + -1.3531562089920044e+000 + + <_> + + 0 -1 539 503783185 488612115 -1644183017 -6291490 1583288237 + -8273 -67147553 -50467906 + + -2.3604758083820343e-001 1.4624665677547455e-001 + <_> + + 0 -1 125 -754980097 -92799137 586973151 -1206650282 + -1015162113 -524401 1993957087 -172492917 + + -2.0661883056163788e-001 1.6887924075126648e-001 + <_> + + 0 -1 384 -669517935 16839696 -584633040 2098167213 17062277 + 404341136 716110059 789549483 + + -1.9015172123908997e-001 1.6193783283233643e-001 + <_> + + 0 -1 94 -232805713 -20973057 -34603081 -42607105 -356519937 + -118109025 -387977505 -2134520828 + + -1.5412615239620209e-001 1.9334699213504791e-001 + <_> + + 0 -1 466 528479223 1904195575 -55119905 -10575890 -947918857 + -288358401 1944539762 -135803137 + + -2.0460499823093414e-001 1.4255046844482422e-001 + <_> + + 0 -1 47 -201327709 -19927066 1384109026 -739317020 -12587089 + 1179121028 -241174704 -168296506 + + -1.5054537355899811e-001 1.8594944477081299e-001 + <_> + + 0 -1 339 -581632048 -606277136 -368021128 -176383780 + -956433298 -268467029 1551686188 -36312104 + + -3.0771997570991516e-001 9.1459386050701141e-002 + <_> + + 0 -1 229 1595362495 532541183 -1879371521 1474776767 + 2138187743 1066097155 -537382949 1073678919 + + -2.1706582605838776e-001 1.5264432132244110e-001 + <_> + + 0 -1 254 -864133153 -692189161 -1099006309 304102399 + 1492012696 -93634736 1580914378 1929543391 + + -2.5388464331626892e-001 1.0271452367305756e-001 + <_> + + 0 -1 457 -64904797 -305660509 1468164915 -565 1059143671 + 2107965041 2079816191 -18385129 + + -1.3010272383689880e-001 1.9871400296688080e-001 + <_> + + 0 -1 511 -220210673 -536887410 -757597954 -235406646 + -67108980 -867369216 -31934210 -251404342 + + -1.0590624809265137e-001 2.3959128558635712e-001 + <_> + + 0 -1 497 -740299264 146795517 -50352379 -1912628641 + 1053777812 901722868 771235244 546303999 + + -2.2008468210697174e-001 1.1258465051651001e-001 + <_> + + 0 -1 355 343358231 213430181 -1364197793 1073696095 + 1515411668 -1059425 -13050290 -1078259937 + + -1.1953093856573105e-001 2.0282387733459473e-001 + <_> + + 0 -1 373 1245651467 183707959 582887578 31777007 1691296546 + 237724247 1348920347 -203426945 + + -2.0973107218742371e-001 1.1584123969078064e-001 + <_> + + 0 -1 35 2043933695 -144771167 19989255 1072001427 -506069031 + -1929904385 592718063 -169511337 + + -1.2918440997600555e-001 1.8772880733013153e-001 + <_> + + 0 -1 606 -8388705 -6177 1207429886 -170921106 -34868280 + -53228224 -120789286 -263979270 + + -9.4212375581264496e-002 2.5828978419303894e-001 + <_> + + 0 -1 179 1023876484 -256822903 -1178663971 -236886881 + -236217863 -52522546 833036237 -8385148 + + -1.9691897928714752e-001 1.1818353086709976e-001 + <_> + + 0 -1 124 -85998033 1383065263 1880502784 1516235723 + 2004838208 1346401792 1406554770 -214434290 + + -1.7252604663372040e-001 1.3341437280178070e-001 + <_> + + 0 -1 508 -215485453 -1146159840 -1684308559 -551551504 + -1448107335 -1364266068 197057439 -536879651 + + -1.1676036566495895e-001 1.9348676502704620e-001 + <_> + + 0 -1 388 1867466672 -654132904 -1678940751 -1073748483 + -1814283784 982153656 163645464 -1409292229 + + -1.3684672117233276e-001 1.7416830360889435e-001 + <_> + + 0 -1 241 809177088 255278841 2052280087 2064460633 + -357326549 -1147540547 1220038219 2137729130 + + -3.8880673050880432e-001 5.8297563344240189e-002 + <_> + + 0 -1 463 2113404919 2119573369 -1216467140 686062828 + 1599431925 -35250219 -379253932 -588874972 + + -1.5329168736934662e-001 1.5723362565040588e-001 + <_> + + 0 -1 74 -142872849 -1123585 -1226054942 -168821790 -16843777 + -682478139 -148908590 -170396193 + + -1.0189045220613480e-001 2.2314058244228363e-001 + <_> + + 0 -1 491 666353457 301069589 -134324227 -1850148995 + -163618376 -1430878019 -272720724 747437600 + + -1.5450522303581238e-001 1.4515057206153870e-001 + <_> + + 0 -1 265 -1520456817 -672619806 -1650134352 -739249448 + -410644 1850609408 -671883070 -716704562 + + -1.1736624687910080e-001 1.9088935852050781e-001 + <_> + + 0 -1 640 -268439637 1743224491 -1062209824 2075129070 + -100680209 17194724 -503319132 -705694017 + + -1.1885214596986771e-001 1.8879969418048859e-001 + <_> + + 0 -1 351 -235865865 -778321560 -773813480 -977205055 + -671809621 -2037042305 -1373573570 33902919 + + -1.6477152705192566e-001 1.3532666862010956e-001 + <_> + + 0 -1 281 1288503263 496561871 -41996993 -44831381 513794520 + -6296133 -11544504 -545596144 + + -1.3791570067405701e-001 1.6125756502151489e-001 + <_> + + 0 -1 592 -536990993 -545437826 -526919430 1927770592 + -1149453240 -12157692 -795681042 -263995441 + + -1.2841881811618805e-001 1.8196146190166473e-001 + <_> + + 0 -1 200 -998586714 1923629704 -1567936970 -106682636 + 1246039486 1391923172 2128445439 -1161220571 + + -1.5602493286132813e-001 1.4026483893394470e-001 + <_> + + 0 -1 574 -902847741 1122672495 1740485216 -252907012 + 716157611 40020694 -1354763301 -705694561 + + -1.2058999389410019e-001 1.8453432619571686e-001 + <_> + + 0 -1 411 43190589 237000119 -1479864545 218107700 1727349849 + -84927193 168837124 201328132 + + -2.7450370788574219e-001 8.0660879611968994e-002 + <_> + + 0 -1 503 1145396501 -596306061 -1883250830 604912112 + 1244529921 1003257717 2066631424 -2012515040 + + -2.2889545559883118e-001 9.6403092145919800e-002 + <_> + + 0 -1 337 -704658769 -746617960 1373614706 -235613442 + -75520946 1213059150 -705694758 -783549270 + + -1.0090714693069458e-001 2.1733656525611877e-001 + <_> + + 0 -1 239 -555432832 -1192997968 -286225699 -35864850 + 798508188 -1108076048 -365142037 -1475542263 + + -1.2968091666698456e-001 1.6475941240787506e-001 + <_> + + 0 -1 288 -573549447 -676853411 -1052201605 118622855 + 2113410296 -336957660 -618632262 -1442645118 + + -2.5901097059249878e-001 8.1189118325710297e-002 + <_> + + 0 -1 368 59193218 2011775753 1576662974 -4195534 1435621351 + -1702058057 -2017202266 1977972599 + + -1.8126857280731201e-001 1.1822484433650970e-001 + <_> + + 0 -1 499 -988850988 1016641345 160914500 -474084890 + 154236052 -1078989845 1441076224 1040163634 + + -3.1549975275993347e-001 6.6674441099166870e-002 + <_> + + 0 -1 364 4349984 639681328 129645151 668510722 1148195120 + 2062963956 153885736 1647779843 + + -2.2751913964748383e-001 9.2987477779388428e-002 + <_> + + 0 -1 602 -276828158 1130870371 -1531186142 1894639586 + 1969214592 21205461 -621813270 -792462373 + + -1.4819277822971344e-001 1.4046442508697510e-001 + <_> + + 0 -1 353 118533695 270567044 1288523533 -1113072698 + 827708669 1605729269 -1091248161 -1141436093 + + -1.1226476728916168e-001 1.8895176053047180e-001 + <_> + + 0 -1 340 -85094773 1487850663 721041258 1374657596 + 1581250843 1112227395 -132434392 -362545185 + + -1.1850622296333313e-001 1.7904981970787048e-001 + <_> + + 0 -1 424 -1044577783 -3170331 -210830594 -539528456 + -1410339954 -1907435776 -201789206 -213651366 + + -1.0257221013307571e-001 2.0294868946075439e-001 + <_> + + 0 -1 632 -176173273 -63971382 -92540894 549647982 -1338560 + 1476888389 -78600722 1081266138 + + -1.8841557204723358e-001 1.1295656859874725e-001 + <_> + + 0 -1 520 -671890176 -181237993 -1548798927 2005171473 + 319759748 -51300395 -1366522344 71618053 + + -2.0921583473682404e-001 1.0211694240570068e-001 + <_> + + 0 -1 88 -1602114652 -1100433156 -2115529 -1184520048 + -1984574213 -297169991 366750175 88360004 + + -1.9252884387969971e-001 1.0750143229961395e-001 + <_> + + 0 -1 216 39586 2101407957 -35376942 -644184357 -1586894610 + -971554785 -5808574 -784335945 + + -3.8588806986808777e-001 5.4346900433301926e-002 + <_> + + 0 -1 496 -1706958944 451947443 -277301988 -1936926482 + 1031093973 -1141080111 -100907 -5619417 + + -1.4155553281307220e-001 1.5182578563690186e-001 + <_> + + 0 -1 385 135519103 -1701046404 -476620169 532383797 + -110098979 -17150466 789376991 -1153760421 + + -9.7210779786109924e-002 2.2095255553722382e-001 + <_> + + 0 -1 119 -68163926 -758185242 -457882974 1943200494 + 1903550542 1144214864 892323778 -178276493 + + -1.4763863384723663e-001 1.4450113475322723e-001 + <_> + + 0 -1 163 -894505938 1726040062 -1434959774 1382606572 + 877626085 912263760 854318029 -235154495 + + -1.9948244094848633e-001 1.1108753830194473e-001 + <_> + + 0 -1 114 -391748914 -87495070 1790763645 -34746582 + -1894316363 -151775424 1299316735 -380410251 + + -1.3808603584766388e-001 1.5448948740959167e-001 + <_> + + 0 -1 514 -222361681 -142684161 -1011944610 -254805175 + -178667764 -968192512 -267197752 -251479346 + + -1.2034154683351517e-001 1.7198434472084045e-001 + <_> + + 0 -1 377 202861343 507076415 721758013 687907104 1979993013 + -907174864 1142178076 1145104400 + + -2.1315360069274902e-001 9.7528629004955292e-002 + <_> + + 0 -1 510 -928024797 453054 -218994892 -185220464 -2126798169 + 577432266 713554911 -1090527317 + + -1.1146946996450424e-001 1.8797753751277924e-001 + <_> + + 0 -1 611 -4213 -1009925217 -1562674624 -1151402642 + -234885490 -590488821 -269837110 -774636550 + + -1.0050378739833832e-001 2.0443147420883179e-001 + <_> + + 0 -1 442 -797631013 362314737 -1347420291 -1079419975 + 1280903925 -1262749233 -1377484823 -2001796897 + + -1.0836073011159897e-001 1.9475993514060974e-001 + <_> + + 0 -1 432 -316169693 -1344106541 942691362 -202380558 + 930163489 240608166 1442141879 -470334095 + + -1.7380046844482422e-001 1.1888035386800766e-001 + <_> + + 0 -1 252 148589439 -598544709 1720098523 302055418 + 1690358359 -537721785 1465335308 -581808818 + + -1.8943664431571960e-001 1.0867591947317123e-001 + <_> + + 0 -1 272 -227276285 417733055 141878230 -1930563657 + 443500552 -1197990268 -1091601874 -1428702701 + + -1.5042302012443542e-001 1.3850454986095428e-001 + <_> + + 0 -1 365 -295443825 1881192242 602866173 -1583502408 + 217335229 1880585519 771686574 74185330 + + -1.5697461366653442e-001 1.3160589337348938e-001 + <_> + + 0 -1 240 -266098716 966942003 -1407291626 -1109396138 + 1947725557 517720022 -2137792561 822083471 + + -1.4924865961074829e-001 1.4093182981014252e-001 + <_> + + 0 -1 467 1147473171 1279326997 -821293199 17582980 + 1561349141 1035756917 1693723716 134813760 + + -2.6860836148262024e-001 7.9147100448608398e-002 + <_> + + 0 -1 480 -745413331 -1188194 1267976940 -244057892 + -117834344 -1182421888 -1244286022 -675048758 + + -9.6772380173206329e-002 2.2355352342128754e-001 + <_> + + 0 -1 378 -602815049 2117625195 -1582168171 -443273818 + -810235495 -1412593179 -906457953 -301186044 + + -1.2496618181467056e-001 1.6549517214298248e-001 + <_> + + 0 -1 223 784214531 -1481197765 -1175381145 -142769749 + -1364414680 -1560957006 1999583123 705541055 + + -2.1282529830932617e-001 1.0288743674755096e-001 + <_> + + 0 -1 528 -182344572 446549361 -1443603113 -1212176388 + -554397288 -1963900929 -84743208 -1364216305 + + -1.1019920557737350e-001 1.8950794637203217e-001 + <_> + + 0 -1 616 -1303385209 -896561650 -523800238 -151652106 + -548431989 1168430244 -503320351 -2064649010 + + -1.3371227681636810e-001 1.5766866505146027e-001 + <_> + + 0 -1 22 -218105953 -679114691 -1267388358 -73099594 + 1743867279 2020921186 -6432370 -179836046 + + -9.1669127345085144e-002 2.2812496125698090e-001 + <_> + + 0 -1 332 1790410753 1194205785 712913755 1475242910 + 1659961600 360013679 1966797212 -10529238 + + -1.3938346505165100e-001 1.4807218313217163e-001 + <_> + + 0 -1 488 -56065920 -160826232 1866034740 -2282635 + -1612249423 -2037227788 -1350786837 -1091042443 + + -1.2180531024932861e-001 1.6849491000175476e-001 + + <_> + 78 + -1.3695704936981201e+000 + + <_> + + 0 -1 120 -893410561 -536892673 -353374337 -2103706 + -203480129 -21509 -167809029 -1881225 + + -1.6063986718654633e-001 2.0273709297180176e-001 + <_> + + 0 -1 505 -33816579 2073344700 -1558201345 -4208492 + -1141113363 -1076259588 1055902713 -575711916 + + -2.6204735040664673e-001 1.2330465018749237e-001 + <_> + + 0 -1 335 -293634177 1400863359 -389223329 -201985 -834109825 + -285447041 1851805055 1870364287 + + -2.8927606344223022e-001 1.1206240952014923e-001 + <_> + + 0 -1 357 1515124725 369927936 -1610666831 925855537 + 272631836 1531994580 528108223 455021023 + + -1.9164532423019409e-001 1.4835953712463379e-001 + <_> + + 0 -1 560 1060087328 398437823 79655216 -1914176017 + 1558052245 520000392 -1268336765 212632751 + + -2.6242268085479736e-001 1.0239273309707642e-001 + <_> + + 0 -1 623 -2629905 -78655505 -891554078 -1058015250 -264250 + 13096772 -168561212 -235405345 + + -1.3851559162139893e-001 1.9113600254058838e-001 + <_> + + 0 -1 162 -202375721 -2111636702 -721301001 -1574913280 + 56560078 -18677777 1087758426 -353913415 + + -2.4820300936698914e-001 1.0685873031616211e-001 + <_> + + 0 -1 50 -8917009 -103854097 -225450462 -1575174 -8377 + 608239190 -209718336 -171445561 + + -1.3929376006126404e-001 1.7934627830982208e-001 + <_> + + 0 -1 588 -543165517 533196199 -1426074862 -544347146 + 1130218371 8697232 2010559954 -741343554 + + -1.5528613328933716e-001 1.5392582118511200e-001 + <_> + + 0 -1 527 -411054193 -1281884233 -1115833348 -1079673394 + -1581284984 -1148589108 -289498744 -2000102976 + + -1.0468549281358719e-001 2.2998848557472229e-001 + <_> + + 0 -1 172 -25174417 2117286413 -804523330 539753215 + -1568838361 2136045431 -87049377 2002249599 + + -2.4891264736652374e-001 9.3971431255340576e-002 + <_> + + 0 -1 366 1210754048 537666304 161348605 627257024 1158955796 + 1668769269 2008953695 1744796787 + + -2.2588098049163818e-001 1.0365047305822372e-001 + <_> + + 0 -1 180 -281288273 -70327314 -796921740 -738730264 + -76562513 -808792948 -1745881398 -136841842 + + -9.5508776605129242e-002 2.4650630354881287e-001 + <_> + + 0 -1 341 -886059513 168764323 -267976118 -973090298 + 2097352643 176495367 1802990159 -385882317 + + -1.8350002169609070e-001 1.2640644609928131e-001 + <_> + + 0 -1 595 -1 -134221825 -218431816 -235405578 -573046902 + -35956864 -177472566 -170394389 + + -8.0099381506443024e-002 2.8082871437072754e-001 + <_> + + 0 -1 314 -14155881 -1111314545 -1353609437 -1358649856 + -20250627 -75628649 2146789220 -69994733 + + -1.0310708731412888e-001 2.1459576487541199e-001 + <_> + + 0 -1 404 85077823 206351119 984419861 20769365 1278506495 + 509705715 1276407516 -599458444 + + -1.8984027206897736e-001 1.1972358822822571e-001 + <_> + + 0 -1 206 -665271074 1464887091 -2145223045 83944575 + -664089410 1012828430 -1092945194 -12125474 + + -2.0856955647468567e-001 1.0800682753324509e-001 + <_> + + 0 -1 49 -138414161 -152571922 -234883034 -218173190 + -69748337 -796834004 -51382306 -235410218 + + -1.0277570784091949e-001 2.0767506957054138e-001 + <_> + + 0 -1 420 -75500559 -1089602040 -645940751 -3162657 + -1738551119 -1692862817 1308311003 -1993363541 + + -9.3405932188034058e-002 2.3085142672061920e-001 + <_> + + 0 -1 455 -853368159 -1054814 389048702 -1710123 -1154186313 + 249649875 -10095637 -606120681 + + -1.0593209415674210e-001 2.0079787075519562e-001 + <_> + + 0 -1 11 -2094007297 -442069004 -605586698 -34146080 + -538208337 -1964457677 1912461024 -168302604 + + -1.3107544183731079e-001 1.6108058393001556e-001 + <_> + + 0 -1 198 -158677344 -181113360 -1528008969 -571176396 + 1048269475 1988456372 -27492353 -11978479 + + -1.2844026088714600e-001 1.6143999993801117e-001 + <_> + + 0 -1 544 1596188085 -987304491 452284863 -5645158 -497029639 + -48137 1575704025 -806361704 + + -1.5479210019111633e-001 1.3679373264312744e-001 + <_> + + 0 -1 416 -803355391 269612059 -103964896 -584190505 + -1253042176 -1287846874 -1364704630 70252047 + + -1.7222206294536591e-001 1.2346468865871429e-001 + <_> + + 0 -1 486 -474377216 339541264 1577057677 -1916842185 + -281745276 -1666877764 1774055176 135393823 + + -1.7964713275432587e-001 1.1774364113807678e-001 + <_> + + 0 -1 381 -169103377 -138944407 -4966664 -1117990427 + -67251458 -75752779 -34803588 -1920930339 + + -1.5931025147438049e-001 1.3285309076309204e-001 + <_> + + 0 -1 1 575365347 953914139 -1078241825 530438005 8352735 + -1140861665 -2360355 100616191 + + -2.0287837088108063e-001 1.1286151409149170e-001 + <_> + + 0 -1 301 -2087738451 -236978972 1945051832 -204474216 + -4213090 1812100654 -163122454 -721178166 + + -1.0795794427394867e-001 1.8853248655796051e-001 + <_> + + 0 -1 406 -763363841 1161592123 1218843669 589341989 + 1147453433 878107283 1673862468 71320641 + + -1.4658728241920471e-001 1.4068178832530975e-001 + <_> + + 0 -1 448 338702321 -955723536 223548671 -1618687592 + 640728233 -1892106840 253306637 160038936 + + -2.5049364566802979e-001 8.2616418600082397e-002 + <_> + + 0 -1 309 -1069741553 285037405 549372488 157368060 + 1549206792 476787267 -223284936 -1460015186 + + -1.7551939189434052e-001 1.1551206558942795e-001 + <_> + + 0 -1 16 -559645958 -82033479 -315888674 990559098 -363475207 + -1143281825 -279609123 2137035924 + + -1.1419827491044998e-001 1.8112944066524506e-001 + <_> + + 0 -1 641 -1880100957 172207746 -2042666496 -1609982232 + -877665398 84329482 1961357506 1153890247 + + -2.0891024172306061e-001 9.8337367177009583e-002 + <_> + + 0 -1 19 139296 2079280460 -1035450949 -69253746 155985087 + 2113766731 135764411 1608486923 + + -4.1375562548637390e-001 4.9512255936861038e-002 + <_> + + 0 -1 312 -732977643 277903282 -1368217772 -459228376 + 813142006 837885431 267818252 -1528500380 + + -1.5581859648227692e-001 1.3339102268218994e-001 + <_> + + 0 -1 517 -83917399 -173673311 1626321594 -237716638 + -42225124 1728340229 -1232225590 -217329398 + + -1.1577171832323074e-001 1.8340143561363220e-001 + <_> + + 0 -1 316 771960639 134409859 258363165 -1394364928 + 1598414807 2012884973 1584664128 708838210 + + -2.1626208722591400e-001 9.5477148890495300e-002 + <_> + + 0 -1 572 666873121 -1893565126 -805613104 2079585760 + 774873761 571125668 387929937 -1046488867 + + -1.9903813302516937e-001 1.0453511774539948e-001 + <_> + + 0 -1 28 1635451153 957686021 -451272973 1032273426 + 1958556740 1036989745 177547808 720439818 + + -2.3054862022399902e-001 8.9634105563163757e-002 + <_> + + 0 -1 70 -12625 -404502550 -612371614 -540029722 -8650866 + -1469579796 -67108900 -171445794 + + -9.8644316196441650e-002 2.1435028314590454e-001 + <_> + + 0 -1 451 1576224017 1500983061 -1166946313 1564704015 + -289709104 -1073863490 -828084534 469780763 + + -1.6094450652599335e-001 1.2683431804180145e-001 + <_> + + 0 -1 245 1216020163 491947673 -499451301 -71647474 170564572 + -37852258 -346584676 -75029248 + + -1.5302763879299164e-001 1.3355283439159393e-001 + <_> + + 0 -1 524 77630976 505053924 -1913377232 -1381980602 + 569648564 1022552432 -1514830304 12870692 + + -2.9714506864547729e-001 6.9695785641670227e-002 + <_> + + 0 -1 550 -496000253 -894677310 586380658 -1071684144 + -1104764373 640414632 1062711983 -537927685 + + -1.1207269877195358e-001 1.8443004786968231e-001 + <_> + + 0 -1 317 -805261899 832624929 -898801809 -1107465875 + -413863428 -1242559753 -17457656 783226135 + + -1.1341021209955215e-001 1.7869591712951660e-001 + <_> + + 0 -1 578 -672676857 -671259169 -1426431184 -255886694 + 2067195272 1640486343 -139008630 -247225717 + + -1.1504149436950684e-001 1.7776061594486237e-001 + <_> + + 0 -1 144 -257186129 -55922845 -1077084195 -144891130 + -20726341 -119362033 -410783813 -850035177 + + -1.0251285135746002e-001 1.9268713891506195e-001 + <_> + + 0 -1 600 -143655125 1979709255 1894602946 -754483258 + -161491838 1154539319 -192615542 -237504533 + + -1.0122653841972351e-001 1.9483508169651031e-001 + <_> + + 0 -1 356 2135702864 -1079215788 -36702019 800889112 + -1154973384 -1120359512 -340087880 -1574789070 + + -2.6105710864067078e-001 7.4920117855072021e-002 + <_> + + 0 -1 367 -1064320849 -888408663 -1962966286 1885579722 + -23076338 773293122 -85339410 -481827590 + + -1.1406119912862778e-001 1.7420279979705811e-001 + <_> + + 0 -1 374 -213751904 1605143820 -154679544 -16761 1735620549 + 1320605166 -504922689 2011657827 + + -1.6869510710239410e-001 1.1749855428934097e-001 + <_> + + 0 -1 503 341128023 1349080691 -1529909789 1250525156 + 1550284273 -72450689 1588033856 -2012225976 + + -1.6349551081657410e-001 1.1994868516921997e-001 + <_> + + 0 -1 256 -32515877 -176855289 -261031297 1057333783 + -1032058104 -9935818 -5099570 -13137826 + + -7.9706199467182159e-002 2.4850650131702423e-001 + <_> + + 0 -1 61 -2068804128 -614601995 1600116177 -71247792 + 2029357231 -1082179864 -2010903395 205586689 + + -1.6402688622474670e-001 1.1898551881313324e-001 + <_> + + 0 -1 213 -1435261405 1677689511 645850208 -236093449 + 657455952 121542592 2002383380 -210522417 + + -1.2863418459892273e-001 1.5204958617687225e-001 + <_> + + 0 -1 439 157636991 -102735875 1049370419 1031056562 + 208487861 712996735 -44814592 -1994037727 + + -1.5739968419075012e-001 1.2730987370014191e-001 + <_> + + 0 -1 435 -1060257076 2111306357 -1702557168 -590175618 + 2013791500 2051870375 -52680182 -1454021666 + + -1.9914233684539795e-001 1.0423366725444794e-001 + <_> + + 0 -1 358 -625293571 -19622942 -8397475 -314591297 27992299 + 2121821739 136847918 1146475566 + + -1.9054318964481354e-001 1.0671291500329971e-001 + <_> + + 0 -1 492 1789983778 1018022897 1861236018 -1611852202 + 876642213 975183222 1055947774 -1343409645 + + -1.5487708151340485e-001 1.2867331504821777e-001 + <_> + + 0 -1 279 327569 218144993 -1778603565 -1353503484 1175427833 + -1882729345 -1546330360 -1154386112 + + -1.7322303354740143e-001 1.1569347232580185e-001 + <_> + + 0 -1 601 -349703245 1433019314 -818911184 -1010310506 + 1433778070 920808880 1642167992 -1328024681 + + -1.2355220317840576e-001 1.6419984400272369e-001 + <_> + + 0 -1 93 -14441821 -609294870 -757607712 2138420300 60657067 + -893238130 -182328108 -706218300 + + -1.2934589385986328e-001 1.5674130618572235e-001 + <_> + + 0 -1 474 -1928604261 -1929346844 -581011079 -541065844 + -1962793025 -1959006544 563584939 -1880104977 + + -8.1186413764953613e-002 2.4935099482536316e-001 + <_> + + 0 -1 597 -420879597 -589318153 -893880704 -1313297248 + 2046665348 -1124139690 -1345913272 -264117294 + + -1.1719886213541031e-001 1.6577914357185364e-001 + <_> + + 0 -1 382 405429943 35157617 -142668179 -1390625436 + 1507609804 -621125793 -1973510450 -1165812617 + + -1.3024367392063141e-001 1.4812141656875610e-001 + <_> + + 0 -1 147 -3217685 -30740518 1938426620 -538214752 -18219057 + -1255846052 -201922374 -237023073 + + -8.8876977562904358e-002 2.1971827745437622e-001 + <_> + + 0 -1 639 -738726997 187692013 2011134000 -1955661828 + -210119423 38790407 -755108400 -987308314 + + -1.4444746077060699e-001 1.3702638447284698e-001 + <_> + + 0 -1 452 -206995293 -545767542 1907093886 1935741694 + -335563268 -280903864 -675039554 -211558710 + + -9.5131643116474152e-002 2.1070742607116699e-001 + <_> + + 0 -1 271 -1002116576 1325996544 -665522936 -597824890 + 1711699965 -73301301 -20448286 -110104046 + + -4.1298919916152954e-001 4.8998169600963593e-002 + <_> + + 0 -1 407 207949127 547888959 694611503 671116356 81544535 + -709767 4261201 4220420 + + -2.8662547469139099e-001 6.9952331483364105e-002 + <_> + + 0 -1 127 -1060119505 1644687048 -1964847366 582987502 + -784636060 827683077 1339008607 1415048063 + + -3.4356600046157837e-001 5.6598760187625885e-002 + <_> + + 0 -1 266 -803222897 -61161545 1635244831 -1845510438 + 2098171271 -116434698 -21512229 -513563541 + + -9.9761679768562317e-002 2.0598660409450531e-001 + <_> + + 0 -1 51 -347603025 -1870744068 679009562 -113708376 + 1464161920 -312465308 -41992630 -246943142 + + -1.1211391538381577e-001 1.7646948993206024e-001 + <_> + + 0 -1 190 -2013321968 -742767341 1337165114 -72328646 + -1787329601 -841026207 512443531 314072475 + + -1.8131621181964874e-001 1.1133272945880890e-001 + <_> + + 0 -1 319 236183479 76657666 176155709 908181400 409361495 + -1141645905 -36647350 -1153869993 + + -1.2898425757884979e-001 1.5491998195648193e-001 + <_> + + 0 -1 586 1266667810 1511123775 2094253840 -44707245 + 333808663 628498310 -402685040 -962095185 + + -1.7281804978847504e-001 1.1423593014478683e-001 + <_> + + 0 -1 192 -236396348 -727146278 1815085813 -38007520 + -171378486 -37738304 -206180609 -107261665 + + -1.0720640420913696e-001 1.8341147899627686e-001 + + <_> + 80 + -1.3276469707489014e+000 + + <_> + + 0 -1 434 1206384503 2004349367 -587202625 -570458153 + 1962898935 -135266313 -1091309570 -20973573 + + -1.7421774566173553e-001 1.7884284257888794e-001 + <_> + + 0 -1 165 -134742022 -627902534 1968427302 -560480 806424439 + -67650565 51447330 -2133022 + + -2.1476578712463379e-001 1.3359096646308899e-001 + <_> + + 0 -1 515 1057161146 -6307941 496502567 -1617216785 391903915 + 959252225 772505566 974076347 + + -2.3192755877971649e-001 1.2110722810029984e-001 + <_> + + 0 -1 257 -1700555934 -500504126 370126658 -1079154897 + -284626360 -1924137013 387672847 -3156277 + + -2.5262397527694702e-001 1.0572800785303116e-001 + <_> + + 0 -1 159 -1000152458 1895493330 -281178273 2146355560 + 2093311095 855405813 -543404209 821946229 + + -2.0605906844139099e-001 1.2582625448703766e-001 + <_> + + 0 -1 87 -34608193 -201856262 -352407873 -1461520 -1057490945 + -307438354 -52433718 -1060773758 + + -1.1796126514673233e-001 2.2431872785091400e-001 + <_> + + 0 -1 106 1362202543 -776621899 -193898077 -652093965 + -805254933 -1070494539 -755921969 -553071219 + + -2.5051230192184448e-001 1.0189625620841980e-001 + <_> + + 0 -1 168 -897588481 1378039343 586731134 849323774 + -615806066 1984956738 -10032198 -212598805 + + -1.7378865182399750e-001 1.3972662389278412e-001 + <_> + + 0 -1 620 -1329 -67371793 -1023412510 -219219134 -146872510 + -950677692 -170852800 -263718034 + + -1.4755988121032715e-001 1.5936012566089630e-001 + <_> + + 0 -1 409 -807993858 -444659489 -222269284 -806825748 + -620958468 -268492551 -286463746 203485430 + + -2.2656925022602081e-001 1.0586404800415039e-001 + <_> + + 0 -1 14 -33558609 -65083682 -811175422 -123558262 2138007431 + 436495750 2139054674 -263724334 + + -1.3516700267791748e-001 1.6823019087314606e-001 + <_> + + 0 -1 327 -749235619 -740780991 1904177247 1594623829 + 1529354448 -172286859 -15541176 -1358771366 + + -1.8130488693714142e-001 1.2141185253858566e-001 + <_> + + 0 -1 182 1584990352 -473857834 1605700853 -100717318 + -212604370 2125138082 -883446021 1534527706 + + -1.5860173106193542e-001 1.4470864832401276e-001 + <_> + + 0 -1 573 -1507874397 113426414 310100625 -621282947 + 573547171 583934606 1300686811 -908069413 + + -1.1836400628089905e-001 1.8674106895923615e-001 + <_> + + 0 -1 429 1929570099 1923122619 -1622715780 -1577842 + -213749257 -303632667 -168461162 1626695507 + + -1.7876005172729492e-001 1.2363989651203156e-001 + <_> + + 0 -1 391 -73430485 -9438562 2060940976 -201347366 + -1144276274 -2029070526 -134228252 -146546470 + + -1.0172406584024429e-001 2.1338729560375214e-001 + <_> + + 0 -1 376 240453683 156000191 695730801 151579604 1499214964 + 243072615 1282672708 1080359008 + + -2.8052809834480286e-001 7.7434003353118896e-002 + <_> + + 0 -1 470 962023287 34183056 -852552331 -1347425040 + 1042303461 -1413613637 -2073063769 -1884301079 + + -1.3582970201969147e-001 1.5512573719024658e-001 + <_> + + 0 -1 140 -816847897 -235995450 1886384818 -707134232 + -335615089 1147431968 -136072004 -171444858 + + -1.1215072125196457e-001 1.8390375375747681e-001 + <_> + + 0 -1 564 1059584305 -1155622093 766342448 -1098140737 + 337323539 -289461769 222106113 -1917846854 + + -2.0957885682582855e-001 9.9119037389755249e-002 + <_> + + 0 -1 78 -2102645 -249598262 318267466 -345408 -622854336 + 1577202951 -246163506 -247012394 + + -1.2023200839757919e-001 1.6939368844032288e-001 + <_> + + 0 -1 343 1291652149 416690045 162229011 199289850 16481324 + 2128737110 2062154440 -287838248 + + -1.6363579034805298e-001 1.2322180718183517e-001 + <_> + + 0 -1 135 167774464 -153547823 2081759095 -34798827 + 1856352957 -1309945394 -343857497 -1476130752 + + -1.9108144938945770e-001 1.0656961053609848e-001 + <_> + + 0 -1 5 -207883269 -337118226 -220200976 -672139828 + -205541893 1284500906 1402970448 -984099979 + + -1.2751424312591553e-001 1.5983362495899200e-001 + <_> + + 0 -1 361 -46205443 -77802884 1811618781 -675364873 -61313858 + -1149589828 -824262757 -487644637 + + -1.0761560499668121e-001 1.8673804402351379e-001 + <_> + + 0 -1 289 -1308578941 545317504 -1228750258 -546977248 + 1313629625 1084548279 -448115765 1312899649 + + -1.7796906828880310e-001 1.1297041922807693e-001 + <_> + + 0 -1 236 2124418214 497585588 -273412583 -36860403 410545912 + -21749827 -57253938 977282818 + + -2.0535370707511902e-001 9.8657287657260895e-002 + <_> + + 0 -1 634 -2628625 1397615486 -222948678 1760951400 + -707857434 -108282272 -255668840 -246153218 + + -9.8915778100490570e-002 2.0372001826763153e-001 + <_> + + 0 -1 441 1349520221 349187989 1306762509 220739316 + 1551189437 -1786970795 -860861440 137176337 + + -2.1401090919971466e-001 9.5383636653423309e-002 + <_> + + 0 -1 483 -127866240 -1243498252 2075148792 -656172 + -1077434979 1089506804 -1075988545 -1614085205 + + -1.0469564050436020e-001 1.9067412614822388e-001 + <_> + + 0 -1 253 -573002017 446301319 -1152229985 51053115 + -585457444 -897361 -2109064294 -1442650241 + + -1.8012882769107819e-001 1.1004448682069778e-001 + <_> + + 0 -1 494 -252189692 66845267 -974148203 -1923221545 + 1325215904 951641541 2060832424 677760795 + + -1.8193097412586212e-001 1.0960992425680161e-001 + <_> + + 0 -1 583 -738863711 1434287531 1197897784 1438902146 + 1641133962 -66261760 -37880920 -2004551028 + + -1.0881505161523819e-001 1.8144680559635162e-001 + <_> + + 0 -1 174 573541027 1676608015 2242082 -206569477 997981440 + 1449923588 911537988 -212864065 + + -1.3321767747402191e-001 1.4928372204303741e-001 + <_> + + 0 -1 421 1746154929 -570884612 1069104059 1601896941 + -1187998728 -1080239892 196885913 -1414887399 + + -1.5190990269184113e-001 1.3106991350650787e-001 + <_> + + 0 -1 428 -821319005 -814945359 1779284706 -1712448 + -210261089 1179740053 -1214778225 -136349857 + + -9.9395051598548889e-002 2.0520499348640442e-001 + <_> + + 0 -1 380 -145235969 -541098113 -1368522945 -1478098961 + -177733697 -13142085 -1287296305 -1073370811 + + -8.0196149647235870e-002 2.4394263327121735e-001 + <_> + + 0 -1 7 -1846704337 -178936003 -1015991060 -772726649 + -1444167761 -1145590493 -772690101 1503989547 + + -2.0097000896930695e-001 1.0192957520484924e-001 + <_> + + 0 -1 202 1316145887 271851495 572705407 273870363 2131758335 + -246658261 -14692406 -143193986 + + -1.7980761826038361e-001 1.0900650173425674e-001 + <_> + + 0 -1 347 -103681070 2069215860 1035229628 -688071392 + 596173394 -72609495 -80709704 -669718415 + + -2.1817618608474731e-001 9.0754456818103790e-002 + <_> + + 0 -1 417 495269373 487127029 1072919743 2108013936 + 2055212316 -1615987457 175799945 -1359017709 + + -1.2845875322818756e-001 1.5887255966663361e-001 + <_> + + 0 -1 296 585867779 -2097176059 -2098537921 -539000365 + 1583727364 1442265348 -1814891014 -204483334 + + -1.0467831045389175e-001 1.8451270461082458e-001 + <_> + + 0 -1 303 -607653333 -9692435 -247750602 -747325454 -13637809 + 38165774 -204802086 -775686706 + + -1.0023513436317444e-001 1.9359007477760315e-001 + <_> + + 0 -1 629 -545306625 2064440055 -352337922 318168918 + -74269108 -143238655 -828194850 -1069094165 + + -1.1952475458383560e-001 1.6566169261932373e-001 + <_> + + 0 -1 97 -1610421825 2086727596 2091054521 -43774102 + 1956117671 -1079985741 529529502 -855371243 + + -1.4069811999797821e-001 1.3648596405982971e-001 + <_> + + 0 -1 604 581936675 583427210 717332896 1614603900 1779402659 + 71308546 930838418 1172302558 + + -2.0652648806571960e-001 9.3512579798698425e-002 + <_> + + 0 -1 541 404419349 932903717 1877421366 755818324 743838869 + -1145519202 2007639040 -1363402412 + + -1.7568394541740417e-001 1.0853296518325806e-001 + <_> + + 0 -1 603 -67115261 1783467842 62649872 -177358022 1674508167 + 1627845012 2071982800 -1061706326 + + -1.4163950085639954e-001 1.3505090773105621e-001 + <_> + + 0 -1 464 -725234227 -65396777 -1603801204 -2002121734 + -574092835 -559480843 -664465910 -928072645 + + -1.1832091212272644e-001 1.6329441964626312e-001 + <_> + + 0 -1 9 1740090031 -119056410 -48862274 -36986092 1905130415 + -458821676 -1686142136 -789056533 + + -9.1347850859165192e-002 2.0984113216400146e-001 + <_> + + 0 -1 389 1163198464 -642500693 2032893884 -68817014 + 486915972 -364436958 2111264989 1551847259 + + -3.2341209053993225e-001 5.9208732098340988e-002 + <_> + + 0 -1 454 -89996128 2143001535 -319868938 -303249964 + -941319771 -17356811 -1376506227 12609509 + + -1.6184104979038239e-001 1.2333205342292786e-001 + <_> + + 0 -1 407 224468919 20399931 275107615 218193753 359924797 + 357845277 1715740495 67410948 + + -2.0427259802818298e-001 9.3962967395782471e-002 + <_> + + 0 -1 116 2093999747 2076877547 -1536818480 -68260112 + 342087583 977675846 -2054892578 -143178538 + + -1.1508999764919281e-001 1.6942168772220612e-001 + <_> + + 0 -1 262 61902593 -1083933765 -53051661 -19728780 -758080769 + -274773161 -2065401704 -2140078764 + + -1.3256002962589264e-001 1.4487990736961365e-001 + <_> + + 0 -1 532 -357248640 -1176763950 -1380155585 -537027259 + 844380056 805091765 787808170 -287169169 + + -1.1479937285184860e-001 1.6612561047077179e-001 + <_> + + 0 -1 34 1897919481 934478619 1607954095 1062273795 + -568662347 2092388823 -471626574 1140325367 + + -1.3866116106510162e-001 1.3873730599880219e-001 + <_> + + 0 -1 372 -1037057913 -1657025808 46547560 273413291 + 1868532619 1378320161 -743476278 -347081181 + + -1.1563215404748917e-001 1.6552960872650146e-001 + <_> + + 0 -1 546 -1199314443 -408265787 -913588740 -1655009092 + -1372935763 1873526476 -914524407 -979905444 + + -1.9302512705326080e-001 1.0073655843734741e-001 + <_> + + 0 -1 392 1034106418 -711186286 -1664267794 -171720173 + 606214007 -1128035247 564213287 1934636578 + + -3.2928413152694702e-001 5.9415474534034729e-002 + <_> + + 0 -1 126 -687891445 -614004341 -433115782 2129635058 + 385995416 1494300484 1184757738 -237518882 + + -1.1321404576301575e-001 1.7364130914211273e-001 + <_> + + 0 -1 558 -50542464 -203846926 -1517043500 2113515223 + -1091188084 -1864583803 -387006582 -880564821 + + -1.0755964368581772e-001 1.7839278280735016e-001 + <_> + + 0 -1 475 -357780685 -2045314380 429770709 -139071500 + -1492218965 61726170 728218027 -339771255 + + -1.0421722382307053e-001 1.8218158185482025e-001 + <_> + + 0 -1 556 -302139391 1593041108 735992730 1353726386 + -1293624936 -1196135168 -340731990 -1591537478 + + -1.2337599694728851e-001 1.5013375878334045e-001 + <_> + + 0 -1 275 1547535029 278984577 -1224785328 -1934989504 + 788137925 955118580 -1444508712 -1463819671 + + -1.5866357088088989e-001 1.1958929151296616e-001 + <_> + + 0 -1 176 -1439519837 -336661820 -1812236650 -712007984 + -1960207458 -868173878 857715686 -973873982 + + -1.2497423589229584e-001 1.5143482387065887e-001 + <_> + + 0 -1 359 -761946313 1449128951 2079574803 1577003953 + 1515347100 -537167905 -67450178 -4500465 + + -8.1773772835731506e-002 2.2836156189441681e-001 + <_> + + 0 -1 152 -496441618 -750877463 -389118725 -37689332 + 1436594609 -578584890 -706221607 1893151375 + + -1.3894660770893097e-001 1.3676303625106812e-001 + <_> + + 0 -1 504 68506993 604858146 237580290 -2121958112 1638929 + -1097225097 340092224 34511200 + + -2.7272871136665344e-001 6.8898513913154602e-002 + <_> + + 0 -1 415 -2147155497 1560733239 -303387241 -42356291 + -598665995 -1162842499 -295060500 -1410399929 + + -9.9354021251201630e-002 1.9108591973781586e-001 + <_> + + 0 -1 228 -202202453 -590590115 1756643743 2109944543 + 871348042 -786157488 1542764527 -1693726949 + + -9.3221530318260193e-002 2.0627823472023010e-001 + <_> + + 0 -1 460 -1500254282 1435658607 582487508 484240674 + 2099271125 -46970395 1709452864 -467317454 + + -1.5570642054080963e-001 1.1965727806091309e-001 + <_> + + 0 -1 46 -919902696 -123093042 398152976 -135141176 + 1743172232 -1396378320 2083515430 1413872256 + + -1.8301638960838318e-001 1.0124789923429489e-001 + <_> + + 0 -1 36 -751855101 -145897929 2019538276 817985198 803892992 + 71726960 2134193492 1932970373 + + -1.8066957592964172e-001 9.9701888859272003e-002 + <_> + + 0 -1 20 268485266 -899463709 586345979 2106381807 541077489 + -84983361 1246258371 -1474337 + + -3.0872756242752075e-001 5.9459727257490158e-002 + <_> + + 0 -1 322 1213715019 -2076172757 615109711 232265406 + 1473517724 -1362446289 -1316954598 -1153808357 + + -1.5449893474578857e-001 1.2041906267404556e-001 + <_> + + 0 -1 628 -357568517 323871454 788404112 300650740 1538176507 + -1983070516 -707831379 -1002326334 + + -1.0190636664628983e-001 1.8244536221027374e-001 + <_> + + 0 -1 459 1757579808 985458211 -1912341890 329521286 + 922752757 2094473459 132997598 -336317625 + + -1.6428998112678528e-001 1.1182511597871780e-001 + <_> + + 0 -1 345 -771571504 -9274104 969443530 77684746 -1178752308 + 1794067453 -1562083078 -604642545 + + -1.9640970230102539e-001 9.4069905579090118e-002 + <_> + + 0 -1 284 583589 -1811895121 -1462990311 -1594353 1953890287 + -420466765 -104632354 -1678170365 + + -1.0462117195129395e-001 1.8044032156467438e-001 + + <_> + + 0 0 1 1 + <_> + + 0 0 5 7 + <_> + + 0 0 6 5 + <_> + + 0 0 6 6 + <_> + + 0 0 7 6 + <_> + + 0 1 1 1 + <_> + + 0 1 1 3 + <_> + + 0 1 8 6 + <_> + + 0 2 1 1 + <_> + + 0 2 1 2 + <_> + + 0 2 1 4 + <_> + + 0 2 2 1 + <_> + + 0 3 2 7 + <_> + + 0 4 1 1 + <_> + + 0 4 1 2 + <_> + + 0 5 1 1 + <_> + + 0 5 6 2 + <_> + + 0 5 8 6 + <_> + + 0 6 1 1 + <_> + + 0 6 6 5 + <_> + + 0 6 6 6 + <_> + + 0 7 1 1 + <_> + + 0 7 1 2 + <_> + + 0 7 3 2 + <_> + + 0 7 6 3 + <_> + + 0 8 1 1 + <_> + + 0 9 1 1 + <_> + + 0 9 1 2 + <_> + + 0 9 2 5 + <_> + + 0 9 4 3 + <_> + + 0 9 5 2 + <_> + + 0 11 4 2 + <_> + + 0 13 4 3 + <_> + + 0 14 5 3 + <_> + + 0 15 5 3 + <_> + + 0 15 8 3 + <_> + + 0 16 1 1 + <_> + + 0 16 3 2 + <_> + + 0 19 8 1 + <_> + + 1 0 1 1 + <_> + + 1 0 4 4 + <_> + + 1 0 5 6 + <_> + + 1 0 7 6 + <_> + + 1 1 1 1 + <_> + + 1 1 1 4 + <_> + + 1 2 1 1 + <_> + + 1 2 1 2 + <_> + + 1 3 1 1 + <_> + + 1 4 1 1 + <_> + + 1 5 1 1 + <_> + + 1 6 1 1 + <_> + + 1 6 1 2 + <_> + + 1 7 1 1 + <_> + + 1 7 3 2 + <_> + + 1 8 1 1 + <_> + + 1 9 1 1 + <_> + + 1 12 4 2 + <_> + + 1 14 7 2 + <_> + + 1 18 7 2 + <_> + + 2 0 1 1 + <_> + + 2 0 4 3 + <_> + + 2 0 5 2 + <_> + + 2 0 5 6 + <_> + + 2 0 6 4 + <_> + + 2 0 6 7 + <_> + + 2 0 7 6 + <_> + + 2 1 1 1 + <_> + + 2 2 1 1 + <_> + + 2 3 1 1 + <_> + + 2 3 1 2 + <_> + + 2 4 1 1 + <_> + + 2 4 2 4 + <_> + + 2 5 1 1 + <_> + + 2 5 1 2 + <_> + + 2 6 1 1 + <_> + + 2 7 1 1 + <_> + + 2 7 5 5 + <_> + + 2 8 2 3 + <_> + + 2 9 1 1 + <_> + + 2 9 1 2 + <_> + + 2 10 1 1 + <_> + + 2 11 1 2 + <_> + + 2 11 2 2 + <_> + + 2 12 4 3 + <_> + + 2 14 7 1 + <_> + + 2 15 5 3 + <_> + + 2 15 7 3 + <_> + + 3 0 1 4 + <_> + + 3 0 3 2 + <_> + + 3 0 4 4 + <_> + + 3 0 4 6 + <_> + + 3 0 6 5 + <_> + + 3 0 7 6 + <_> + + 3 1 1 1 + <_> + + 3 1 3 3 + <_> + + 3 1 6 5 + <_> + + 3 2 1 1 + <_> + + 3 2 2 3 + <_> + + 3 2 6 4 + <_> + + 3 3 1 1 + <_> + + 3 3 2 4 + <_> + + 3 3 2 5 + <_> + + 3 3 4 5 + <_> + + 3 3 6 7 + <_> + + 3 4 1 1 + <_> + + 3 4 2 3 + <_> + + 3 4 6 3 + <_> + + 3 5 1 1 + <_> + + 3 5 6 3 + <_> + + 3 6 1 1 + <_> + + 3 6 2 2 + <_> + + 3 6 6 3 + <_> + + 3 6 6 4 + <_> + + 3 7 1 1 + <_> + + 3 7 2 1 + <_> + + 3 7 2 2 + <_> + + 3 8 1 1 + <_> + + 3 8 2 2 + <_> + + 3 8 2 3 + <_> + + 3 9 1 1 + <_> + + 3 9 2 2 + <_> + + 3 10 2 2 + <_> + + 3 10 3 3 + <_> + + 3 10 6 1 + <_> + + 3 11 1 1 + <_> + + 3 11 2 2 + <_> + + 3 14 1 1 + <_> + + 3 14 2 1 + <_> + + 3 15 6 1 + <_> + + 3 15 7 3 + <_> + + 3 19 7 1 + <_> + + 4 0 1 1 + <_> + + 4 0 1 2 + <_> + + 4 0 1 3 + <_> + + 4 0 5 2 + <_> + + 4 0 5 3 + <_> + + 4 0 5 4 + <_> + + 4 1 1 1 + <_> + + 4 2 1 1 + <_> + + 4 2 2 3 + <_> + + 4 3 1 1 + <_> + + 4 3 2 3 + <_> + + 4 4 1 1 + <_> + + 4 4 1 4 + <_> + + 4 4 2 2 + <_> + + 4 4 5 3 + <_> + + 4 4 5 5 + <_> + + 4 5 1 1 + <_> + + 4 5 2 2 + <_> + + 4 6 1 1 + <_> + + 4 6 5 2 + <_> + + 4 7 1 1 + <_> + + 4 7 1 2 + <_> + + 4 7 2 3 + <_> + + 4 8 1 1 + <_> + + 4 8 2 1 + <_> + + 4 8 2 2 + <_> + + 4 9 1 1 + <_> + + 4 9 1 2 + <_> + + 4 9 2 1 + <_> + + 4 9 2 2 + <_> + + 4 9 3 4 + <_> + + 4 9 3 5 + <_> + + 4 10 1 1 + <_> + + 4 10 1 2 + <_> + + 4 10 3 3 + <_> + + 4 11 1 1 + <_> + + 4 11 3 3 + <_> + + 4 12 1 1 + <_> + + 4 13 1 1 + <_> + + 4 14 1 1 + <_> + + 4 14 2 1 + <_> + + 4 15 2 1 + <_> + + 4 15 5 1 + <_> + + 4 16 1 1 + <_> + + 4 18 3 2 + <_> + + 5 0 1 1 + <_> + + 5 0 3 1 + <_> + + 5 0 5 2 + <_> + + 5 0 5 4 + <_> + + 5 1 1 1 + <_> + + 5 1 1 6 + <_> + + 5 1 1 7 + <_> + + 5 2 1 1 + <_> + + 5 2 5 6 + <_> + + 5 3 1 5 + <_> + + 5 3 2 3 + <_> + + 5 3 3 4 + <_> + + 5 4 1 4 + <_> + + 5 4 4 3 + <_> + + 5 4 6 3 + <_> + + 5 5 2 2 + <_> + + 5 6 1 1 + <_> + + 5 6 1 3 + <_> + + 5 6 2 2 + <_> + + 5 7 1 1 + <_> + + 5 7 1 3 + <_> + + 5 7 2 2 + <_> + + 5 8 1 1 + <_> + + 5 8 2 2 + <_> + + 5 9 1 1 + <_> + + 5 9 1 2 + <_> + + 5 10 1 2 + <_> + + 5 10 2 3 + <_> + + 5 11 1 2 + <_> + + 5 12 1 1 + <_> + + 5 12 1 2 + <_> + + 5 13 1 1 + <_> + + 5 14 1 1 + <_> + + 5 15 2 1 + <_> + + 5 15 5 1 + <_> + + 5 16 3 1 + <_> + + 5 17 1 1 + <_> + + 5 18 1 1 + <_> + + 5 18 2 1 + <_> + + 6 0 1 7 + <_> + + 6 0 3 8 + <_> + + 6 0 4 7 + <_> + + 6 0 6 5 + <_> + + 6 0 6 6 + <_> + + 6 1 1 1 + <_> + + 6 1 1 6 + <_> + + 6 1 3 5 + <_> + + 6 1 6 6 + <_> + + 6 3 4 3 + <_> + + 6 4 2 3 + <_> + + 6 4 4 4 + <_> + + 6 4 4 5 + <_> + + 6 5 1 1 + <_> + + 6 5 1 4 + <_> + + 6 6 1 1 + <_> + + 6 6 1 2 + <_> + + 6 6 1 3 + <_> + + 6 6 2 2 + <_> + + 6 6 6 5 + <_> + + 6 7 1 1 + <_> + + 6 7 1 2 + <_> + + 6 7 2 2 + <_> + + 6 7 6 4 + <_> + + 6 8 1 1 + <_> + + 6 8 2 2 + <_> + + 6 8 6 3 + <_> + + 6 8 6 4 + <_> + + 6 10 1 1 + <_> + + 6 11 1 1 + <_> + + 6 11 1 2 + <_> + + 6 11 2 3 + <_> + + 6 11 2 4 + <_> + + 6 12 1 1 + <_> + + 6 12 1 2 + <_> + + 6 13 1 1 + <_> + + 6 13 1 2 + <_> + + 6 14 1 1 + <_> + + 6 14 1 2 + <_> + + 6 15 1 1 + <_> + + 6 15 4 1 + <_> + + 6 16 2 1 + <_> + + 6 16 4 1 + <_> + + 6 18 1 1 + <_> + + 7 0 1 1 + <_> + + 7 0 3 7 + <_> + + 7 0 3 8 + <_> + + 7 0 4 1 + <_> + + 7 0 5 5 + <_> + + 7 0 5 6 + <_> + + 7 1 1 1 + <_> + + 7 3 2 2 + <_> + + 7 5 3 5 + <_> + + 7 6 1 2 + <_> + + 7 7 1 1 + <_> + + 7 7 3 2 + <_> + + 7 7 3 4 + <_> + + 7 8 1 1 + <_> + + 7 8 3 4 + <_> + + 7 8 5 4 + <_> + + 7 10 1 1 + <_> + + 7 10 1 2 + <_> + + 7 10 3 3 + <_> + + 7 11 1 1 + <_> + + 7 11 1 2 + <_> + + 7 12 1 1 + <_> + + 7 12 1 2 + <_> + + 7 13 1 1 + <_> + + 7 13 1 2 + <_> + + 7 13 2 1 + <_> + + 7 13 2 2 + <_> + + 7 13 4 3 + <_> + + 7 14 1 1 + <_> + + 7 14 1 2 + <_> + + 7 14 2 1 + <_> + + 7 15 1 1 + <_> + + 7 15 2 1 + <_> + + 7 16 1 1 + <_> + + 7 16 2 1 + <_> + + 7 16 3 1 + <_> + + 7 16 4 1 + <_> + + 7 18 1 1 + <_> + + 7 19 3 1 + <_> + + 8 0 2 3 + <_> + + 8 0 5 4 + <_> + + 8 0 5 6 + <_> + + 8 1 1 1 + <_> + + 8 1 3 7 + <_> + + 8 2 1 1 + <_> + + 8 4 1 1 + <_> + + 8 5 2 5 + <_> + + 8 6 1 1 + <_> + + 8 6 2 1 + <_> + + 8 6 2 5 + <_> + + 8 7 1 1 + <_> + + 8 8 1 1 + <_> + + 8 9 1 5 + <_> + + 8 10 1 1 + <_> + + 8 10 1 2 + <_> + + 8 11 1 2 + <_> + + 8 12 2 2 + <_> + + 8 13 1 1 + <_> + + 8 13 2 1 + <_> + + 8 13 3 2 + <_> + + 8 14 1 1 + <_> + + 8 14 2 1 + <_> + + 8 14 5 3 + <_> + + 8 15 1 1 + <_> + + 8 15 1 2 + <_> + + 8 15 2 1 + <_> + + 8 15 3 3 + <_> + + 8 15 5 3 + <_> + + 8 16 1 1 + <_> + + 8 16 2 1 + <_> + + 8 16 3 1 + <_> + + 8 17 1 1 + <_> + + 8 17 2 1 + <_> + + 8 19 1 1 + <_> + + 9 0 3 6 + <_> + + 9 0 4 3 + <_> + + 9 0 5 6 + <_> + + 9 1 4 6 + <_> + + 9 2 1 1 + <_> + + 9 3 1 1 + <_> + + 9 5 1 5 + <_> + + 9 6 1 1 + <_> + + 9 6 2 1 + <_> + + 9 7 1 1 + <_> + + 9 8 1 1 + <_> + + 9 8 2 1 + <_> + + 9 8 2 3 + <_> + + 9 9 1 1 + <_> + + 9 9 2 4 + <_> + + 9 10 2 3 + <_> + + 9 11 1 1 + <_> + + 9 11 2 3 + <_> + + 9 12 2 2 + <_> + + 9 13 1 1 + <_> + + 9 14 1 1 + <_> + + 9 14 2 1 + <_> + + 9 15 1 1 + <_> + + 9 15 1 2 + <_> + + 9 15 2 1 + <_> + + 9 15 5 3 + <_> + + 9 16 1 1 + <_> + + 9 16 2 1 + <_> + + 9 16 2 2 + <_> + + 9 17 1 1 + <_> + + 9 17 2 1 + <_> + + 9 18 2 1 + <_> + + 9 18 5 2 + <_> + + 9 19 2 1 + <_> + + 10 4 1 1 + <_> + + 10 4 3 2 + <_> + + 10 5 1 1 + <_> + + 10 5 1 5 + <_> + + 10 5 2 5 + <_> + + 10 6 1 1 + <_> + + 10 7 1 1 + <_> + + 10 7 3 1 + <_> + + 10 8 2 4 + <_> + + 10 9 1 1 + <_> + + 10 10 1 1 + <_> + + 10 10 1 2 + <_> + + 10 10 4 3 + <_> + + 10 11 1 1 + <_> + + 10 11 2 3 + <_> + + 10 15 1 1 + <_> + + 10 15 1 2 + <_> + + 10 15 2 1 + <_> + + 10 16 1 1 + <_> + + 10 16 2 1 + <_> + + 10 17 1 1 + <_> + + 10 17 2 1 + <_> + + 11 0 2 7 + <_> + + 11 0 3 3 + <_> + + 11 1 1 1 + <_> + + 11 1 3 4 + <_> + + 11 4 1 1 + <_> + + 11 4 2 2 + <_> + + 11 4 3 2 + <_> + + 11 5 1 1 + <_> + + 11 5 1 5 + <_> + + 11 5 2 2 + <_> + + 11 6 1 1 + <_> + + 11 7 1 1 + <_> + + 11 8 1 1 + <_> + + 11 8 2 1 + <_> + + 11 8 4 3 + <_> + + 11 9 1 1 + <_> + + 11 9 3 4 + <_> + + 11 10 1 1 + <_> + + 11 11 1 1 + <_> + + 11 11 1 2 + <_> + + 11 11 2 3 + <_> + + 11 11 3 3 + <_> + + 11 12 1 1 + <_> + + 11 12 2 2 + <_> + + 11 12 4 2 + <_> + + 11 13 1 1 + <_> + + 11 14 1 1 + <_> + + 11 14 2 1 + <_> + + 11 15 1 1 + <_> + + 11 15 2 1 + <_> + + 11 16 1 1 + <_> + + 11 16 2 1 + <_> + + 11 17 1 1 + <_> + + 11 17 2 1 + <_> + + 11 18 2 1 + <_> + + 12 0 1 1 + <_> + + 12 1 1 1 + <_> + + 12 3 2 3 + <_> + + 12 4 3 2 + <_> + + 12 5 1 1 + <_> + + 12 5 2 2 + <_> + + 12 6 1 1 + <_> + + 12 6 1 5 + <_> + + 12 7 1 1 + <_> + + 12 8 2 1 + <_> + + 12 9 4 3 + <_> + + 12 10 2 2 + <_> + + 12 11 1 1 + <_> + + 12 11 1 2 + <_> + + 12 11 2 3 + <_> + + 12 12 1 1 + <_> + + 12 12 3 2 + <_> + + 12 13 1 1 + <_> + + 12 14 1 1 + <_> + + 12 14 2 1 + <_> + + 12 14 4 3 + <_> + + 12 15 1 1 + <_> + + 12 16 1 1 + <_> + + 12 16 2 1 + <_> + + 12 17 1 1 + <_> + + 12 18 2 2 + <_> + + 13 0 1 1 + <_> + + 13 0 3 3 + <_> + + 13 2 1 1 + <_> + + 13 4 2 3 + <_> + + 13 5 2 2 + <_> + + 13 6 1 1 + <_> + + 13 6 2 2 + <_> + + 13 7 1 1 + <_> + + 13 7 2 2 + <_> + + 13 8 1 1 + <_> + + 13 10 1 1 + <_> + + 13 10 1 2 + <_> + + 13 10 2 3 + <_> + + 13 11 1 1 + <_> + + 13 11 1 2 + <_> + + 13 12 1 1 + <_> + + 13 12 3 2 + <_> + + 13 13 1 1 + <_> + + 13 14 1 1 + <_> + + 13 14 2 1 + <_> + + 13 15 1 1 + <_> + + 13 15 2 1 + <_> + + 13 16 1 1 + <_> + + 13 16 2 1 + <_> + + 13 17 1 1 + <_> + + 13 18 1 1 + <_> + + 13 18 2 1 + <_> + + 13 19 1 1 + <_> + + 14 0 1 1 + <_> + + 14 1 2 3 + <_> + + 14 2 1 1 + <_> + + 14 4 2 2 + <_> + + 14 4 2 3 + <_> + + 14 5 1 1 + <_> + + 14 5 1 2 + <_> + + 14 5 2 2 + <_> + + 14 6 2 1 + <_> + + 14 6 2 2 + <_> + + 14 7 1 1 + <_> + + 14 7 2 2 + <_> + + 14 7 2 3 + <_> + + 14 7 3 2 + <_> + + 14 8 1 1 + <_> + + 14 8 1 2 + <_> + + 14 8 2 1 + <_> + + 14 8 2 2 + <_> + + 14 9 1 1 + <_> + + 14 9 2 1 + <_> + + 14 9 2 2 + <_> + + 14 9 2 4 + <_> + + 14 11 1 1 + <_> + + 14 11 1 2 + <_> + + 14 12 1 1 + <_> + + 14 13 1 1 + <_> + + 14 14 1 1 + <_> + + 14 14 1 2 + <_> + + 14 14 2 1 + <_> + + 14 15 1 1 + <_> + + 14 16 1 1 + <_> + + 14 17 1 1 + <_> + + 14 18 1 1 + <_> + + 15 0 1 1 + <_> + + 15 0 1 6 + <_> + + 15 0 1 7 + <_> + + 15 1 1 1 + <_> + + 15 1 1 6 + <_> + + 15 2 1 1 + <_> + + 15 3 1 1 + <_> + + 15 3 1 5 + <_> + + 15 4 1 5 + <_> + + 15 4 2 3 + <_> + + 15 5 1 1 + <_> + + 15 5 2 3 + <_> + + 15 6 1 1 + <_> + + 15 6 1 2 + <_> + + 15 6 1 3 + <_> + + 15 6 2 2 + <_> + + 15 6 3 2 + <_> + + 15 7 1 1 + <_> + + 15 7 1 2 + <_> + + 15 7 2 2 + <_> + + 15 7 2 3 + <_> + + 15 8 1 1 + <_> + + 15 8 1 2 + <_> + + 15 8 1 5 + <_> + + 15 8 2 2 + <_> + + 15 8 2 3 + <_> + + 15 9 1 2 + <_> + + 15 9 2 2 + <_> + + 15 10 1 2 + <_> + + 15 10 2 2 + <_> + + 15 11 1 1 + <_> + + 15 11 2 2 + <_> + + 15 12 1 1 + <_> + + 15 12 1 2 + <_> + + 15 13 1 1 + <_> + + 15 13 1 2 + <_> + + 15 14 1 1 + <_> + + 15 15 1 1 + <_> + + 15 17 1 1 + <_> + + 15 18 1 1 + <_> + + 16 0 1 1 + <_> + + 16 2 1 1 + <_> + + 16 3 1 1 + <_> + + 16 3 1 2 + <_> + + 16 4 1 1 + <_> + + 16 5 1 1 + <_> + + 16 6 1 1 + <_> + + 16 7 1 1 + <_> + + 16 8 1 1 + <_> + + 16 8 1 2 + <_> + + 16 8 2 3 + <_> + + 16 9 1 1 + <_> + + 16 9 1 2 + <_> + + 16 10 1 2 + <_> + + 16 11 1 1 + <_> + + 16 11 1 2 + <_> + + 16 11 2 2 + <_> + + 16 12 1 1 + <_> + + 16 12 1 2 + <_> + + 16 13 1 1 + <_> + + 16 14 1 1 + <_> + + 16 15 1 1 + <_> + + 16 16 1 1 + <_> + + 16 17 1 1 + <_> + + 17 0 1 1 + <_> + + 17 0 1 3 + <_> + + 17 2 1 1 + <_> + + 17 3 1 1 + <_> + + 17 5 1 1 + <_> + + 17 6 1 1 + <_> + + 17 6 1 3 + <_> + + 17 7 1 1 + <_> + + 17 7 1 2 + <_> + + 17 7 1 3 + <_> + + 17 8 1 1 + <_> + + 17 10 1 1 + <_> + + 17 11 1 1 + <_> + + 17 12 1 1 + <_> + + 17 13 1 1 + <_> + + 17 14 1 1 + <_> + + 17 16 1 1 + <_> + + 18 0 1 1 + <_> + + 18 1 1 1 + <_> + + 18 2 1 1 + <_> + + 18 3 1 1 + <_> + + 18 4 1 1 + <_> + + 18 4 1 2 + <_> + + 18 5 1 1 + <_> + + 18 6 1 1 + <_> + + 18 7 1 1 + <_> + + 18 8 1 2 + <_> + + 18 9 1 1 + <_> + + 18 10 1 1 + <_> + + 18 16 1 1 + <_> + + 19 0 1 1 + <_> + + 19 1 1 2 + <_> + + 19 2 1 1 + <_> + + 19 3 1 1 + <_> + + 19 4 1 1 + <_> + + 19 5 1 1 + <_> + + 19 6 1 1 + <_> + + 19 8 1 1 + <_> + + 19 9 1 1 + <_> + + 19 10 1 1 + <_> + + 19 11 1 1 + <_> + + 19 12 1 1 + <_> + + 20 0 1 1 + <_> + + 20 1 1 1 + <_> + + 20 2 1 1 + <_> + + 20 3 1 1 + <_> + + 20 3 1 3 + <_> + + 20 4 1 1 + <_> + + 20 5 1 1 + <_> + + 20 7 1 1 + <_> + + 20 8 1 1 + <_> + + 20 8 1 2 + <_> + + 20 9 1 1 + <_> + + 20 11 1 2 + <_> + + 21 0 1 1 + <_> + + 21 0 1 4 + <_> + + 21 1 1 1 + <_> + + 21 2 1 1 + <_> + + 21 3 1 1 + <_> + + 21 3 1 2 + <_> + + 21 4 1 1 + <_> + + 21 5 1 1 + <_> + + 21 6 1 1 + <_> + + 21 7 1 1 + <_> + + 21 8 1 1 + <_> + + 21 9 1 1 + <_> + + 21 10 1 1 + diff --git a/custom_nodes/was-node-suite-comfyui/was_history.json b/custom_nodes/was-node-suite-comfyui/was_history.json new file mode 100644 index 0000000000000000000000000000000000000000..5b7d1bbce6335296f56c8105096798c7818fb01f --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/was_history.json @@ -0,0 +1,225 @@ +{ + "History": { + "Output_Images": [ + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0002.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0003.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0004.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0008.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0009.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0010.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0011.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0012.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0013.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0014.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0015.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0016.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0017.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0018.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0019.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0020.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0021.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0022.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0023.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0024.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0025.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0026.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0027.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0028.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0029.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0030.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0031.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0032.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0033.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0034.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0035.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0036.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0038.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0039.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0040.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0041.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0042.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0043.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0044.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0045.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0046.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0047.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0048.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0049.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0050.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0051.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0052.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0053.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0054.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0055.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0056.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0060.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0061.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0062.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0064.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0065.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0066.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0067.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0068.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0069.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0070.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0071.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0072.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0073.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0074.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0075.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0076.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0077.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0078.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0079.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0080.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0081.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0119.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0120.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0121.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0122.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0123.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0124.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0125.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0126.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0127.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0128.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0129.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0130.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0131.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0132.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0133.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0135.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0136.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0137.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0138.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0139.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0140.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0141.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0142.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0143.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0144.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0145.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0146.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0147.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0148.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0149.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0150.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0151.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0152.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0153.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0154.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0155.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0156.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0157.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0158.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0159.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0160.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0161.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0162.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0163.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0164.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0165.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0166.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0167.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0168.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0169.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0170.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0171.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0172.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0173.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0176.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0177.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0178.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0179.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0180.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0181.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0182.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0183.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0184.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0185.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0186.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0187.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0188.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0189.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0190.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0191.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0192.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0193.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0194.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0195.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0196.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0197.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0198.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0199.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0200.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0201.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0202.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0203.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0204.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0205.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0206.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0207.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0208.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0209.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0210.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0211.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0212.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0213.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0214.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0215.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0216.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0217.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0218.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0219.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0220.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0221.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0222.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0223.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0258.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0259.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0260.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0261.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0262.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0263.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0264.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0265.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0266.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0267.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0268.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0269.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0270.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0271.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0272.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0273.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0274.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0275.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0276.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0277.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0278.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0279.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0280.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0281.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0282.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0283.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0284.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0285.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0286.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0287.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0288.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0289.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0290.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0291.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0292.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0293.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0294.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0295.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0296.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0297.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0298.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0299.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0300.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0301.png", + "C:\\Users\\matsu\\OneDrive\\\u753b\u50cf\\AIART\\Private\\ComfyUI_0302.png" + ] + } +} \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/was_suite_config.json b/custom_nodes/was-node-suite-comfyui/was_suite_config.json new file mode 100644 index 0000000000000000000000000000000000000000..62df012f3d273bbca9d757ef706db898d06f7d8b --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/was_suite_config.json @@ -0,0 +1,23 @@ +{ + "run_requirements": true, + "suppress_uncomfy_warnings": true, + "show_startup_junk": true, + "show_inspiration_quote": true, + "text_nodes_type": "STRING", + "webui_styles": null, + "webui_styles_persistent_update": true, + "blip_model_url": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth", + "blip_model_vqa_url": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth", + "sam_model_vith_url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth", + "sam_model_vitl_url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth", + "sam_model_vitb_url": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth", + "history_display_limit": 36, + "use_legacy_ascii_text": false, + "ffmpeg_bin_path": "/path/to/ffmpeg", + "ffmpeg_extra_codecs": { + "avc1": ".mp4", + "h264": ".mkv" + }, + "wildcards_path": "C:\\Users\\matsu\\Documents\\ComfyUI\\custom_nodes\\was-node-suite-comfyui\\wildcards", + "wildcard_api": true +} \ No newline at end of file diff --git a/custom_nodes/was-node-suite-comfyui/was_suite_settings.json b/custom_nodes/was-node-suite-comfyui/was_suite_settings.json new file mode 100644 index 0000000000000000000000000000000000000000..ef0f033b7c08c1a58ddd07327bd9f633a0ada49c --- /dev/null +++ b/custom_nodes/was-node-suite-comfyui/was_suite_settings.json @@ -0,0 +1,3 @@ +{ + "custom_tokens": {} +} \ No newline at end of file